diff --git a/include/osi_common.h b/include/osi_common.h index 9f5d413..25cae44 100644 --- a/include/osi_common.h +++ b/include/osi_common.h @@ -275,7 +275,7 @@ #define OSI_H_ENABLE (~OSI_H_DISABLE) #define OSI_BIT(nr) ((nveu32_t)1 << (((nveu32_t)nr) & 0x1FU)) -#define OSI_BIT_64(nr) ((nveu64_t)1 << (nr)) +#define OSI_BIT_64(nr) ((nveu64_t)1 << (((nveu32_t)nr) & 0x3FU)) #ifndef OSI_STRIPPED_LIB #define OSI_MGBE_MAC_3_00 0x30U diff --git a/include/osi_core.h b/include/osi_core.h index 219ff9f..92bfb55 100644 --- a/include/osi_core.h +++ b/include/osi_core.h @@ -192,6 +192,7 @@ typedef my_lint_64 nvel64_t; * @brief Maximum number of Secure Channels */ #define OSI_MAX_NUM_SC 8U +#define OSI_MAX_NUM_SC_T26x 48U /** * @brief MACSEC Secure Channel Identifier length */ @@ -1381,7 +1382,7 @@ struct osi_macsec_sc_info { */ struct osi_macsec_lut_status { /** List of max SC's supported */ - struct osi_macsec_sc_info sc_info[OSI_MAX_NUM_SC]; + struct osi_macsec_sc_info sc_info[OSI_MAX_NUM_SC_T26x]; /** next available BYP LUT index * valid values are from 0 to NVETHERNETRM_PIF$OSI_BYP_LUT_MAX_INDEX */ nveu16_t next_byp_idx; @@ -1711,7 +1712,7 @@ struct osi_core_priv_data { /** Dummy SCI/SC/SA etc LUTs programmed with dummy parameter when no * session setup. SCI LUT hit created with VF's MACID * valid values are from 0 to 0xFF for each array element */ - nveu8_t macsec_dummy_sc_macids[OSI_MAX_NUM_SC][OSI_ETH_ALEN]; + nveu8_t macsec_dummy_sc_macids[OSI_MAX_NUM_SC_T26x][OSI_ETH_ALEN]; /** MACSEC initialization state * valid vaues are 0(not initialized) and 1(Initialized) */ nveu32_t macsec_initialized; @@ -1875,6 +1876,9 @@ struct osi_core_priv_data { nve32_t speed; /** PCS BASE-R FEC enable */ nveu32_t pcs_base_r_fec_en; + /** skip auto neg for usxgmii mode. + * 0(enable AN) and 1(disable AN) are the valid values */ + nveu32_t skip_usxgmii_an; }; /** diff --git a/osi/core/debug.c b/osi/core/debug.c index 91515d5..20f3f65 100644 --- a/osi/core/debug.c +++ b/osi/core/debug.c @@ -67,7 +67,7 @@ static void core_dump_struct(struct osi_core_priv_data *osi_core, */ void core_structs_dump(struct osi_core_priv_data *osi_core) { - struct core_local *l_core = (struct core_local *)osi_core; + struct core_local *l_core = (struct core_local *)((void *)osi_core); osi_core->osd_ops.printf(osi_core, OSI_DEBUG_TYPE_STRUCTS, "CORE struct size = %lu", diff --git a/osi/core/eqos_core.c b/osi/core/eqos_core.c index 5042b92..9fbd168 100644 --- a/osi/core/eqos_core.c +++ b/osi/core/eqos_core.c @@ -3774,89 +3774,6 @@ static void eqos_get_hw_features(struct osi_core_priv_data *const osi_core, EQOS_MAC_HFR3_ASP_MASK); } -#ifndef OSI_STRIPPED_LIB -/** - * @brief eqos_padctl_rx_pins Enable/Disable RGMII Rx pins - * - * @param[in] osi_core: OSI Core private data structure. - * @param[in] enable: Enable/Disable EQOS RGMII padctrl Rx pins - * - * @pre - * - MAC needs to be out of reset and proper clock configured. - * - * @retval 0 on success - * @retval -1 on failure. - */ -static nve32_t eqos_padctl_rx_pins(struct osi_core_priv_data *const osi_core, - nveu32_t enable) -{ - nve32_t ret = 0; - nveu32_t value; - void *pad_addr = osi_core->padctrl.padctrl_base; - - if (pad_addr == OSI_NULL) { - ret = -1; - goto error; - } - if (enable == OSI_ENABLE) { - value = osi_readla(osi_core, (nveu8_t *)pad_addr + - osi_core->padctrl.offset_rx_ctl); - value |= EQOS_PADCTL_EQOS_E_INPUT; - osi_writela(osi_core, value, (nveu8_t *)pad_addr + - osi_core->padctrl.offset_rx_ctl); - value = osi_readla(osi_core, (nveu8_t *)pad_addr + - osi_core->padctrl.offset_rd0); - value |= EQOS_PADCTL_EQOS_E_INPUT; - osi_writela(osi_core, value, (nveu8_t *)pad_addr + - osi_core->padctrl.offset_rd0); - value = osi_readla(osi_core, (nveu8_t *)pad_addr + - osi_core->padctrl.offset_rd1); - value |= EQOS_PADCTL_EQOS_E_INPUT; - osi_writela(osi_core, value, (nveu8_t *)pad_addr + - osi_core->padctrl.offset_rd1); - value = osi_readla(osi_core, (nveu8_t *)pad_addr + - osi_core->padctrl.offset_rd2); - value |= EQOS_PADCTL_EQOS_E_INPUT; - osi_writela(osi_core, value, (nveu8_t *)pad_addr + - osi_core->padctrl.offset_rd2); - value = osi_readla(osi_core, (nveu8_t *)pad_addr + - osi_core->padctrl.offset_rd3); - value |= EQOS_PADCTL_EQOS_E_INPUT; - osi_writela(osi_core, value, (nveu8_t *)pad_addr + - osi_core->padctrl.offset_rd3); - } else { - value = osi_readla(osi_core, (nveu8_t *)pad_addr + - osi_core->padctrl.offset_rx_ctl); - value &= ~EQOS_PADCTL_EQOS_E_INPUT; - osi_writela(osi_core, value, (nveu8_t *)pad_addr + - osi_core->padctrl.offset_rx_ctl); - value = osi_readla(osi_core, (nveu8_t *)pad_addr + - osi_core->padctrl.offset_rd0); - value &= ~EQOS_PADCTL_EQOS_E_INPUT; - osi_writela(osi_core, value, (nveu8_t *)pad_addr + - osi_core->padctrl.offset_rd0); - value = osi_readla(osi_core, (nveu8_t *)pad_addr + - osi_core->padctrl.offset_rd1); - value &= ~EQOS_PADCTL_EQOS_E_INPUT; - osi_writela(osi_core, value, (nveu8_t *)pad_addr + - osi_core->padctrl.offset_rd1); - value = osi_readla(osi_core, (nveu8_t *)pad_addr + - osi_core->padctrl.offset_rd2); - value &= ~EQOS_PADCTL_EQOS_E_INPUT; - osi_writela(osi_core, value, (nveu8_t *)pad_addr + - osi_core->padctrl.offset_rd2); - value = osi_readla(osi_core, (nveu8_t *)pad_addr + - osi_core->padctrl.offset_rd3); - value &= ~EQOS_PADCTL_EQOS_E_INPUT; - osi_writela(osi_core, value, (nveu8_t *)pad_addr + - osi_core->padctrl.offset_rd3); - } - -error: - return ret; -} -#endif /* !OSI_STRIPPED_LIB */ - /** * @brief poll_for_mac_tx_rx_idle - check mac tx/rx idle or not * @@ -3939,16 +3856,7 @@ static nve32_t eqos_pre_pad_calibrate(struct osi_core_priv_data *const osi_core) goto error; } - if (osi_core->osd_ops.padctrl_mii_rx_pins != OSI_NULL) { - ret = osi_core->osd_ops.padctrl_mii_rx_pins(osi_core->osd, - OSI_DISABLE); - } -#ifndef OSI_STRIPPED_LIB - else { - ret = eqos_padctl_rx_pins(osi_core, OSI_DISABLE); - } -#endif /* !OSI_STRIPPED_LIB */ - + ret = osi_core->osd_ops.padctrl_mii_rx_pins(osi_core->osd, OSI_DISABLE); if (ret < 0) { goto error; } @@ -3957,15 +3865,7 @@ static nve32_t eqos_pre_pad_calibrate(struct osi_core_priv_data *const osi_core) error: /* roll back on fail */ hw_start_mac(osi_core); - if (osi_core->osd_ops.padctrl_mii_rx_pins != OSI_NULL) { - (void)osi_core->osd_ops.padctrl_mii_rx_pins(osi_core->osd, - OSI_ENABLE); - } -#ifndef OSI_STRIPPED_LIB - else { - (void)eqos_padctl_rx_pins(osi_core, OSI_ENABLE); - } -#endif /* !OSI_STRIPPED_LIB */ + (void)osi_core->osd_ops.padctrl_mii_rx_pins(osi_core->osd, OSI_ENABLE); /* Enable MAC RGSMIIIE - RGMII/SMII interrupts */ /* Read MAC IMR Register */ @@ -4003,15 +3903,8 @@ static nve32_t eqos_post_pad_calibrate( nveu32_t mac_pcs = 0; nveu32_t mac_isr = 0; - if (osi_core->osd_ops.padctrl_mii_rx_pins != OSI_NULL) { - ret = osi_core->osd_ops.padctrl_mii_rx_pins(osi_core->osd, - OSI_ENABLE); - } -#ifndef OSI_STRIPPED_LIB - else { - ret = eqos_padctl_rx_pins(osi_core, OSI_ENABLE); - } -#endif /* !OSI_STRIPPED_LIB */ + ret = osi_core->osd_ops.padctrl_mii_rx_pins(osi_core->osd, OSI_ENABLE); + /* handle only those MAC interrupts which are enabled */ mac_imr = osi_readla(osi_core, (nveu8_t *)osi_core->base + EQOS_MAC_IMR); diff --git a/osi/core/mgbe_core.c b/osi/core/mgbe_core.c index b11d47d..33ca547 100644 --- a/osi/core/mgbe_core.c +++ b/osi/core/mgbe_core.c @@ -255,7 +255,8 @@ static nve32_t mgbe_filter_args_validate(struct osi_core_priv_data *const osi_co } /* check for DMA channel index */ - if ((dma_chan > (l_core->num_max_chans - 0x1U)) && + if ((l_core->num_max_chans > 0x0U) && + (dma_chan > (l_core->num_max_chans - 0x1U)) && (dma_chan != OSI_CHAN_ANY)) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_OUTOFBOUND, "invalid dma channel\n", @@ -340,6 +341,12 @@ static nve32_t check_mac_addr(nveu8_t const *mac_addr, nveu8_t *rch_addr) */ static void mgbe_free_rchlist_index(struct osi_core_priv_data *osi_core, const nve32_t rch_idx) { + + if ((rch_idx < 0) || (rch_idx >= (nve32_t)RCHLIST_SIZE)) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "Invalid rch_idx\n", rch_idx); + return ; + } osi_core->rch_index[rch_idx].in_use = OSI_NONE; osi_core->rch_index[rch_idx].dch = 0; osi_memset(&osi_core->rch_index[rch_idx].mac_address, 0, OSI_ETH_ALEN); @@ -654,6 +661,8 @@ static nve32_t mgbe_update_mac_addr_low_high_reg( goto fail; } + // To make sure idx is not more than max to address CERT INT30-C + idx = idx % OSI_MGBE_MAX_MAC_ADDRESS_FILTER_T26X; value = osi_readla(osi_core, (nveu8_t *)osi_core->base + MGBE_MAC_ADDRH((idx))); @@ -756,7 +765,7 @@ static nve32_t mgbe_update_mac_addr_low_high_reg( if (osi_core->mac != OSI_MAC_HW_MGBE_T26X) { /* Write XDCS configuration into MAC_DChSel_IndReg(x) */ /* Append DCS DMA channel to XDCS hot bit selection */ - xdcs_dds |= (OSI_BIT(dma_chan) | dma_chansel); + xdcs_dds |= (OSI_BIT_64(dma_chan) | dma_chansel); ret = mgbe_mac_indir_addr_write(osi_core, MGBE_MAC_DCHSEL, idx, xdcs_dds); } else { @@ -782,7 +791,9 @@ static nve32_t mgbe_update_mac_addr_low_high_reg( MGBE_MAC_ADDRH_SA); } - value |= ((rch_idx << MGBE_MAC_ADDRH_DCS_SHIFT) & MGBE_MAC_ADDRH_DCS); + // Restricting rch_idx to RCHLIST_SIZE to avoid CERT INT32-C + rch_idx %= RCHLIST_SIZE; + value |= (((nveu32_t)rch_idx << MGBE_MAC_ADDRH_DCS_SHIFT) & MGBE_MAC_ADDRH_DCS); osi_writela(osi_core, ((nveu32_t)addr[4] | ((nveu32_t)addr[5] << 8) | MGBE_MAC_ADDRH_AE | value), @@ -2462,9 +2473,8 @@ static nve32_t mgbe_configure_pdma(struct osi_core_priv_data *osi_core) osi_core->num_of_pdma); const nveu32_t rx_owrq = (MGBE_DMA_CHX_RX_CNTRL2_OWRQ_MCHAN / osi_core->num_of_pdma); - const nveu32_t tx_pbl = - ((((MGBE_TXQ_SIZE / OSI_MGBE_MAX_NUM_QUEUES) - - osi_core->mtu) / (MGBE_AXI_DATAWIDTH / 8U)) - 5U); + nveu32_t tx_pbl = 0U, max_txq_size = 0U, adjusted_txq_size = 0U; + nveu32_t divided_txq_size = 0U; /* Total Rx Queue size is 256KB */ const nveu32_t rx_pbl[OSI_MGBE_MAX_NUM_QUEUES] = { Q_SZ_DEPTH(224U) / 2U, Q_SZ_DEPTH(2U) / 2U, Q_SZ_DEPTH(2U) / 2U, @@ -2472,9 +2482,7 @@ static nve32_t mgbe_configure_pdma(struct osi_core_priv_data *osi_core) Q_SZ_DEPTH(2U) / 2U, Q_SZ_DEPTH(2U) / 2U, Q_SZ_DEPTH(2U) / 2U, Q_SZ_DEPTH(16U) / 2U }; - const nveu32_t tx_pbl_ufpga = - ((((MGBE_TXQ_SIZE_UFPGA / OSI_MGBE_MAX_NUM_QUEUES) - - osi_core->mtu) / (MGBE_AXI_DATAWIDTH / 8U)) - 5U); + nveu32_t tx_pbl_ufpga = 0U; /* uFPGA Rx Queue size is 64KB */ const nveu32_t rx_pbl_ufpga[OSI_MGBE_MAX_NUM_QUEUES] = { Q_SZ_DEPTH(40U)/2U, Q_SZ_DEPTH(2U)/2U, Q_SZ_DEPTH(2U)/2U, @@ -2498,9 +2506,49 @@ static nve32_t mgbe_configure_pdma(struct osi_core_priv_data *osi_core) * calculation by using above formula */ if (osi_core->pre_sil == OSI_ENABLE) { + max_txq_size = MGBE_TXQ_SIZE_UFPGA / OSI_MGBE_MAX_NUM_QUEUES; + if (osi_core->mtu > max_txq_size) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, + "Invalid MTU related to Q size received in pre-sil case\n", + osi_core->mtu); + ret = -1; + goto done; + } + adjusted_txq_size = max_txq_size - osi_core->mtu; + divided_txq_size = adjusted_txq_size / (MGBE_AXI_DATAWIDTH / 8U); + if (divided_txq_size < 5U) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, + "Invalid MTU received in pre-sil case\n", + osi_core->mtu); + ret = -1; + goto done; + } + tx_pbl_ufpga = + ((((MGBE_TXQ_SIZE_UFPGA / OSI_MGBE_MAX_NUM_QUEUES) - + osi_core->mtu) / (MGBE_AXI_DATAWIDTH / 8U)) - 5U); pbl = osi_valid_pbl_value(tx_pbl_ufpga); value |= (pbl << MGBE_PDMA_CHX_EXTCFG_PBL_SHIFT); } else { + max_txq_size = MGBE_TXQ_SIZE / OSI_MGBE_MAX_NUM_QUEUES; + if (osi_core->mtu > max_txq_size) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, + "Invalid MTU related to Q size received in silicon case\n", + osi_core->mtu); + ret = -1; + goto done; + } + adjusted_txq_size = max_txq_size - osi_core->mtu; + divided_txq_size = adjusted_txq_size / (MGBE_AXI_DATAWIDTH / 8U); + if (divided_txq_size < 5U) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, + "Invalid MTU received in silicon case\n", + osi_core->mtu); + ret = -1; + goto done; + } + tx_pbl = + ((((MGBE_TXQ_SIZE / OSI_MGBE_MAX_NUM_QUEUES) - + osi_core->mtu) / (MGBE_AXI_DATAWIDTH / 8U)) - 5U); pbl = osi_valid_pbl_value(tx_pbl); value |= (pbl << MGBE_PDMA_CHX_EXTCFG_PBL_SHIFT); } diff --git a/osi/core/vlan_filter.c b/osi/core/vlan_filter.c index 8ba35d1..9be01b7 100644 --- a/osi/core/vlan_filter.c +++ b/osi/core/vlan_filter.c @@ -293,8 +293,13 @@ static inline nve32_t add_vlan_id(struct osi_core_priv_data *osi_core, return allow_all_vid_tags(osi_core->base, OSI_ENABLE); } - osi_core->vf_bitmap |= OSI_BIT(vid_idx); + osi_core->vf_bitmap |= OSI_BIT_64(vid_idx); osi_core->vid[vid_idx] = vlan_id; + if (osi_core->vlan_filter_cnt >= VLAN_NUM_VID) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "Reached Max number of VLAN flters\n", 0ULL); + return -1; + } osi_core->vlan_filter_cnt++; if (osi_core->vlan_filter_cnt > 0U) { @@ -381,7 +386,7 @@ static inline nve32_t dequeue_vid_to_add_filter_reg( return 0; } - osi_core->vf_bitmap |= OSI_BIT(vid_idx); + osi_core->vf_bitmap |= OSI_BIT_64(vid_idx); osi_core->vid[vid_idx] = vlan_id; val = osi_readl((nveu8_t *)osi_core->base + MAC_VLAN_TAG_DATA); @@ -394,6 +399,8 @@ static inline nve32_t dequeue_vid_to_add_filter_reg( } for (i = VLAN_HW_FILTER_FULL_IDX; i <= osi_core->vlan_filter_cnt; i++) { + // Fixed CERT ARR30-C by limiting the i to array max index + i %= (VLAN_NUM_VID - 1U); osi_core->vid[i] = osi_core->vid[i + 1U]; } @@ -433,7 +440,7 @@ static inline nve32_t del_vlan_id(struct osi_core_priv_data *osi_core, return dequeue_vlan_id(osi_core, idx); } - osi_core->vf_bitmap &= ~OSI_BIT(vid_idx); + osi_core->vf_bitmap &= ~OSI_BIT_64(vid_idx); osi_core->vid[vid_idx] = VLAN_ID_INVALID; ret = update_vlan_filters(osi_core, vid_idx, val); @@ -441,6 +448,11 @@ static inline nve32_t del_vlan_id(struct osi_core_priv_data *osi_core, return -1; } + if (osi_core->vlan_filter_cnt == 0U) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "Number of vlan filters is invalid\n", 0ULL); + return -1; + } osi_core->vlan_filter_cnt--; if (osi_core->vlan_filter_cnt == 0U) { diff --git a/osi/core/xpcs.c b/osi/core/xpcs.c index 4714d60..d9cabfe 100644 --- a/osi/core/xpcs.c +++ b/osi/core/xpcs.c @@ -245,6 +245,30 @@ static inline nve32_t eqos_xpcs_set_speed(struct osi_core_priv_data *osi_core, } #endif +/** + * @brief update_an_status - update AN status. + * + * Algorithm: This routine initialize AN status based on + * the USXGMII mode selected. Later this value + * will be overwritten if AN is enabled. + * + * @param[in] osi_core: OSI core data structure. + * @param[out] an_status: Predefined AN status + * + */ +static inline void update_an_status(const struct osi_core_priv_data *const osi_core, + nveu32_t *an_status) +{ + /* initialize an_status based on DT, later overwite if AN is enabled */ + if (osi_core->phy_iface_mode == OSI_USXGMII_MODE_10G) { + *an_status = XPCS_USXG_AN_STS_SPEED_10000; + } else if (osi_core->phy_iface_mode == OSI_USXGMII_MODE_5G) { + *an_status = XPCS_USXG_AN_STS_SPEED_5000; + } else { + /* do nothing */ + } +} + /** * @brief xpcs_start - Start XPCS * @@ -267,15 +291,20 @@ nve32_t xpcs_start(struct osi_core_priv_data *osi_core) if ((osi_core->phy_iface_mode == OSI_USXGMII_MODE_10G) || (osi_core->phy_iface_mode == OSI_USXGMII_MODE_5G)) { - ctrl = xpcs_read(xpcs_base, XPCS_SR_MII_CTRL); - ctrl |= XPCS_SR_MII_CTRL_AN_ENABLE; - ret = xpcs_write_safety(osi_core, XPCS_SR_MII_CTRL, ctrl); - if (ret != 0) { - goto fail; - } - ret = xpcs_poll_for_an_complete(osi_core, &an_status); - if (ret < 0) { - goto fail; + /* initialize an_status based on DT, later overwite if AN is enabled */ + update_an_status(osi_core, &an_status); + /* Skip AN in USXGMII mode if skip_usxgmii_an is configured in DT */ + if (osi_core->skip_usxgmii_an == OSI_DISABLE) { + ctrl = xpcs_read(xpcs_base, XPCS_SR_MII_CTRL); + ctrl |= XPCS_SR_MII_CTRL_AN_ENABLE; + ret = xpcs_write_safety(osi_core, XPCS_SR_MII_CTRL, ctrl); + if (ret != 0) { + goto fail; + } + ret = xpcs_poll_for_an_complete(osi_core, &an_status); + if (ret < 0) { + goto fail; + } } ret = xpcs_set_speed(osi_core, an_status); @@ -417,6 +446,7 @@ static nve32_t xpcs_uphy_lane_bring_up(struct osi_core_priv_data *osi_core, nveu32_t count; nve32_t ret = 0; nveu32_t once = 0; + nveu64_t retry_delay = OSI_DELAY_1US; const nveu32_t uphy_status_reg[OSI_MAX_MAC_IP_TYPES] = { EQOS_XPCS_WRAP_UPHY_STATUS, XPCS_WRAP_UPHY_STATUS, @@ -430,6 +460,15 @@ static nve32_t xpcs_uphy_lane_bring_up(struct osi_core_priv_data *osi_core, if ((osi_core->mac == OSI_MAC_HW_MGBE_T26X) || (osi_core->mac_ver == OSI_EQOS_MAC_5_40)) { retry = 1000U; + if (osi_core->uphy_gbe_mode == OSI_GBE_MODE_25G) { + /* Delay added as per HW team suggestion which is + * of 100msec if equalizer is enabled for every + * iteration of a lane bring sequence. So 100 * 1000 + * gives us a delay of 100msec for each retry of lane + * bringup + */ + retry_delay = 100U; + } } val = osi_readla(osi_core, @@ -466,10 +505,10 @@ static nve32_t xpcs_uphy_lane_bring_up(struct osi_core_priv_data *osi_core, * but added an extra count of 4 for safer side */ if (once == 0U) { - osi_core->osd_ops.udelay(OSI_DELAY_1US); + osi_core->osd_ops.udelay(retry_delay); once = 1U; } else { - osi_core->osd_ops.udelay(OSI_DELAY_4US); + osi_core->osd_ops.udelay(retry_delay); } } } diff --git a/osi/dma/debug.c b/osi/dma/debug.c index c8e4052..90d52ae 100644 --- a/osi/dma/debug.c +++ b/osi/dma/debug.c @@ -67,7 +67,7 @@ static void dump_struct(struct osi_dma_priv_data *osi_dma, */ void structs_dump(struct osi_dma_priv_data *osi_dma) { - struct dma_local *l_dma = (struct dma_local *)osi_dma; + struct dma_local *l_dma = (struct dma_local *)((void *)osi_dma); nveu32_t i = 0; osi_dma->osd_ops.printf(osi_dma, OSI_DEBUG_TYPE_STRUCTS, @@ -114,7 +114,7 @@ void structs_dump(struct osi_dma_priv_data *osi_dma) */ void reg_dump(struct osi_dma_priv_data *osi_dma) { - struct dma_local *l_dma = (struct dma_local *)osi_dma; + struct dma_local *l_dma = (struct dma_local *)((void *)osi_dma); unsigned int max_addr; unsigned int addr; unsigned int reg_val; @@ -164,6 +164,13 @@ static void rx_desc_dump(struct osi_dma_priv_data *osi_dma, unsigned int idx, struct osi_rx_desc *rx_desc = rx_ring->rx_desc + idx; struct osd_dma_ops *ops = &osi_dma->osd_ops; + if ((rx_ring->rx_desc_phy_addr) > + ((OSI_ULLONG_MAX) - (idx * sizeof(struct osi_rx_desc)))) { + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, + "Invalid rx addr !!!\n", 0ULL); + goto exit_func; + } + ops->printf(osi_dma, OSI_DEBUG_TYPE_DESC, "N [%02d %4p %04d %lx R_D] = %#x:%#x:%#x:%#x\n", chan, rx_desc, idx, @@ -171,6 +178,9 @@ static void rx_desc_dump(struct osi_dma_priv_data *osi_dma, unsigned int idx, rx_desc->rdes3, rx_desc->rdes2, rx_desc->rdes1, rx_desc->rdes0); +exit_func: + return; + } /** @@ -191,10 +201,21 @@ static void tx_desc_dump(struct osi_dma_priv_data *osi_dma, unsigned int f_idx, struct osd_dma_ops *ops = &osi_dma->osd_ops; unsigned int ctxt = 0, i = 0; + if (osi_dma->tx_ring_sz == 0U) { + ops->printf(osi_dma, OSI_DEBUG_TYPE_DESC, + "In Valid tx_ring_sz\n"); + goto exit_func; + } if (f_idx == l_idx) { tx_desc = tx_ring->tx_desc + f_idx; ctxt = tx_desc->tdes3 & TDES3_CTXT; + if ((tx_ring->tx_desc_phy_addr) > + ((OSI_ULLONG_MAX) - (f_idx * sizeof(struct osi_tx_desc)))) { + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, + "Invalid addr !!!\n", 0ULL); + goto exit_func; + } ops->printf(osi_dma, OSI_DEBUG_TYPE_DESC, "%s [%02d %4p %04d %lx %s] = %#x:%#x:%#x:%#x\n", (ctxt == TDES3_CTXT) ? "C" : "N", @@ -207,6 +228,12 @@ static void tx_desc_dump(struct osi_dma_priv_data *osi_dma, unsigned int f_idx, int cnt; if (f_idx > l_idx) { + if ((l_idx > (UINT_MAX - osi_dma->tx_ring_sz)) || + ((l_idx + osi_dma->tx_ring_sz) < f_idx)) { + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, + "Invalid idx !!!\n", 0ULL); + goto exit_func; + } cnt = (int)(l_idx + osi_dma->tx_ring_sz - f_idx); } else { cnt = (int)(l_idx - f_idx); @@ -216,6 +243,12 @@ static void tx_desc_dump(struct osi_dma_priv_data *osi_dma, unsigned int f_idx, tx_desc = tx_ring->tx_desc + i; ctxt = tx_desc->tdes3 & TDES3_CTXT; + if ((tx_ring->tx_desc_phy_addr) > + ((OSI_ULLONG_MAX) - (i * sizeof(struct osi_tx_desc)))) { + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, + "Invalid addr !!!\n", 0ULL); + break; + } ops->printf(osi_dma, OSI_DEBUG_TYPE_DESC, "%s [%02d %4p %04d %lx %s] = %#x:%#x:%#x:%#x\n", (ctxt == TDES3_CTXT) ? "C" : "N", @@ -228,6 +261,9 @@ static void tx_desc_dump(struct osi_dma_priv_data *osi_dma, unsigned int f_idx, INCR_TX_DESC_INDEX(i, osi_dma->tx_ring_sz); } } + +exit_func: + return; } /** @@ -243,6 +279,13 @@ static void tx_desc_dump(struct osi_dma_priv_data *osi_dma, unsigned int f_idx, void desc_dump(struct osi_dma_priv_data *osi_dma, unsigned int f_idx, unsigned int l_idx, unsigned int flag, unsigned int chan) { + + if ((osi_dma->tx_ring_sz == 0U) || (osi_dma->rx_ring_sz == 0U)) { + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, + "Invalid Tx/Rx ring size\n", 0ULL); + goto exit_func; + } + switch (flag & TXRX_DESC_DUMP_MASK) { case TX_DESC_DUMP: tx_desc_dump(osi_dma, f_idx, l_idx, @@ -256,5 +299,8 @@ void desc_dump(struct osi_dma_priv_data *osi_dma, unsigned int f_idx, "Invalid desc dump flag\n", 0ULL); break; } + +exit_func: + return; } #endif /* OSI_DEBUG */ diff --git a/osi/dma/dma_local.h b/osi/dma/dma_local.h index 03244aa..c37bb94 100644 --- a/osi/dma/dma_local.h +++ b/osi/dma/dma_local.h @@ -143,7 +143,7 @@ static inline void osi_dma_writel(nveu32_t val, void *addr) #define GET_TX_TS_PKTID(idx, c) (((idx) & (PKT_ID_CNT - 1U)) | \ (((c) + 1U) << CHAN_START_POSITION)) /* T264 has saperate logic to tell vdma number so we can use all 10 bits for pktid */ -#define GET_TX_TS_PKTID_T264(idx) ((++(idx)) & (PKT_ID_CNT_T264 - 1U)) +#define GET_TX_TS_PKTID_T264(idx) ((((idx) & 0x7FFFFFFFU) + 1U) & (PKT_ID_CNT_T264 - 1U)) /** @} */ /** @@ -338,8 +338,9 @@ static inline void update_rx_tail_ptr(const struct osi_dma_priv_data *const osi_ nveu64_t tailptr) { const nveu32_t chan_mask[OSI_MAX_MAC_IP_TYPES] = {0xFU, 0xFU, 0x3FU}; - - nveu32_t chan = dma_chan & chan_mask[osi_dma->mac]; + const nveu32_t local_mac = osi_dma->mac % OSI_MAX_MAC_IP_TYPES; + // Added bitwise with 0xFF to avoid CERT INT30-C error + nveu32_t chan = (dma_chan & chan_mask[local_mac]) & (0xFFU); const nveu32_t tail_ptr_reg[OSI_MAX_MAC_IP_TYPES] = { EQOS_DMA_CHX_RDTP(chan), MGBE_DMA_CHX_RDTLP(chan), diff --git a/osi/dma/eqos_dma.c b/osi/dma/eqos_dma.c index 88700bf..76d3c88 100644 --- a/osi/dma/eqos_dma.c +++ b/osi/dma/eqos_dma.c @@ -53,6 +53,7 @@ static void eqos_config_slot(struct osi_dma_priv_data *osi_dma, { nveu32_t value; nveu32_t intr; + nveu32_t local_chan = chan % OSI_EQOS_MAX_NUM_CHANS; #if 0 CHECK_CHAN_BOUND(chan); @@ -60,7 +61,7 @@ static void eqos_config_slot(struct osi_dma_priv_data *osi_dma, if (set == OSI_ENABLE) { /* Program SLOT CTRL register SIV and set ESC bit */ value = osi_dma_readl((nveu8_t *)osi_dma->base + - EQOS_DMA_CHX_SLOT_CTRL(chan)); + EQOS_DMA_CHX_SLOT_CTRL(local_chan)); value &= ~EQOS_DMA_CHX_SLOT_SIV_MASK; /* remove overflow bits of interval */ intr = interval & EQOS_DMA_CHX_SLOT_SIV_MASK; @@ -68,15 +69,15 @@ static void eqos_config_slot(struct osi_dma_priv_data *osi_dma, /* Set ESC bit */ value |= EQOS_DMA_CHX_SLOT_ESC; osi_dma_writel(value, (nveu8_t *)osi_dma->base + - EQOS_DMA_CHX_SLOT_CTRL(chan)); + EQOS_DMA_CHX_SLOT_CTRL(local_chan)); } else { /* Clear ESC bit of SLOT CTRL register */ value = osi_dma_readl((nveu8_t *)osi_dma->base + - EQOS_DMA_CHX_SLOT_CTRL(chan)); + EQOS_DMA_CHX_SLOT_CTRL(local_chan)); value &= ~EQOS_DMA_CHX_SLOT_ESC; osi_dma_writel(value, (nveu8_t *)osi_dma->base + - EQOS_DMA_CHX_SLOT_CTRL(chan)); + EQOS_DMA_CHX_SLOT_CTRL(local_chan)); } } diff --git a/osi/dma/mgbe_dma.c b/osi/dma/mgbe_dma.c index 00056ea..c5eaa77 100644 --- a/osi/dma/mgbe_dma.c +++ b/osi/dma/mgbe_dma.c @@ -44,25 +44,26 @@ static void mgbe_config_slot(struct osi_dma_priv_data *osi_dma, OSI_UNUSED unsigned int interval) { unsigned int value; + unsigned int local_chan = chan % OSI_MGBE_MAX_NUM_CHANS; #if 0 MGBE_CHECK_CHAN_BOUND(chan); #endif if (set == OSI_ENABLE) { /* Program SLOT CTRL register SIV and set ESC bit */ value = osi_dma_readl((unsigned char *)osi_dma->base + - MGBE_DMA_CHX_SLOT_CTRL(chan)); + MGBE_DMA_CHX_SLOT_CTRL(local_chan)); /* Set ESC bit */ value |= MGBE_DMA_CHX_SLOT_ESC; osi_dma_writel(value, (unsigned char *)osi_dma->base + - MGBE_DMA_CHX_SLOT_CTRL(chan)); + MGBE_DMA_CHX_SLOT_CTRL(local_chan)); } else { /* Clear ESC bit of SLOT CTRL register */ value = osi_dma_readl((unsigned char *)osi_dma->base + - MGBE_DMA_CHX_SLOT_CTRL(chan)); + MGBE_DMA_CHX_SLOT_CTRL(local_chan)); value &= ~MGBE_DMA_CHX_SLOT_ESC; osi_dma_writel(value, (unsigned char *)osi_dma->base + - MGBE_DMA_CHX_SLOT_CTRL(chan)); + MGBE_DMA_CHX_SLOT_CTRL(local_chan)); } } diff --git a/osi/dma/mgbe_dma.h b/osi/dma/mgbe_dma.h index f073fbd..4ee7461 100644 --- a/osi/dma/mgbe_dma.h +++ b/osi/dma/mgbe_dma.h @@ -59,6 +59,7 @@ #define MGBE_DMA_CHX_RDTLP(x) ((0x0080U * (x)) + 0x312CU) #define MGBE_DMA_CHX_RX_DESC_WR_RNG_OFFSET(x) ((0x0080U * (x)) + 0x317CU) +#define MAX_REG_OFFSET 0xFFFFU /** @} */ /** @} */ diff --git a/osi/dma/osi_dma.c b/osi/dma/osi_dma.c index 23ca1f1..13eb54f 100644 --- a/osi/dma/osi_dma.c +++ b/osi/dma/osi_dma.c @@ -195,7 +195,8 @@ static inline nve32_t dma_validate_args(const struct osi_dma_priv_data *const os nve32_t ret = 0; if ((osi_dma == OSI_NULL) || (osi_dma->base == OSI_NULL) || - (l_dma->init_done == OSI_DISABLE)) { + (l_dma->init_done == OSI_DISABLE) || + (osi_dma->mac >= OSI_MAX_MAC_IP_TYPES)) { ret = -1; } @@ -487,7 +488,9 @@ done: static inline void start_dma(const struct osi_dma_priv_data *const osi_dma, nveu32_t dma_chan) { const nveu32_t chan_mask[OSI_MAX_MAC_IP_TYPES] = {0xFU, 0xFU, 0x3FU}; - nveu32_t chan = dma_chan & chan_mask[osi_dma->mac]; + const nveu32_t local_mac = osi_dma->mac % OSI_MAX_MAC_IP_TYPES; + // Added bitwise with 0xFF to avoid CERT INT30-C error + nveu32_t chan = ((dma_chan & chan_mask[local_mac]) & (0xFFU)); const nveu32_t tx_dma_reg[OSI_MAX_MAC_IP_TYPES] = { EQOS_DMA_CHX_TX_CTRL(chan), MGBE_DMA_CHX_TX_CTRL(chan), @@ -501,15 +504,15 @@ static inline void start_dma(const struct osi_dma_priv_data *const osi_dma, nveu nveu32_t val; /* Start Tx DMA */ - val = osi_dma_readl((nveu8_t *)osi_dma->base + tx_dma_reg[osi_dma->mac]); + val = osi_dma_readl((nveu8_t *)osi_dma->base + tx_dma_reg[local_mac]); val |= OSI_BIT(0); - osi_dma_writel(val, (nveu8_t *)osi_dma->base + tx_dma_reg[osi_dma->mac]); + osi_dma_writel(val, (nveu8_t *)osi_dma->base + tx_dma_reg[local_mac]); /* Start Rx DMA */ - val = osi_dma_readl((nveu8_t *)osi_dma->base + rx_dma_reg[osi_dma->mac]); + val = osi_dma_readl((nveu8_t *)osi_dma->base + rx_dma_reg[local_mac]); val |= OSI_BIT(0); val &= ~OSI_BIT(31); - osi_dma_writel(val, (nveu8_t *)osi_dma->base + rx_dma_reg[osi_dma->mac]); + osi_dma_writel(val, (nveu8_t *)osi_dma->base + rx_dma_reg[local_mac]); } static nve32_t init_dma_channel(const struct osi_dma_priv_data *const osi_dma, @@ -518,7 +521,9 @@ static nve32_t init_dma_channel(const struct osi_dma_priv_data *const osi_dma, const nveu32_t chan_mask[OSI_MAX_MAC_IP_TYPES] = {0xFU, 0xFU, 0x3FU}; nveu32_t pbl = 0; nveu32_t pdma_chan = 0xFFU; - nveu32_t chan = dma_chan & chan_mask[osi_dma->mac]; + const nveu32_t local_mac = osi_dma->mac % OSI_MAX_MAC_IP_TYPES; + // Added bitwise with 0xFF to avoid CERT INT30-C error + nveu32_t chan = ((dma_chan & chan_mask[local_mac]) & (0xFFU)); nveu32_t riwt = osi_dma->rx_riwt & 0xFFFU; const nveu32_t intr_en_reg[OSI_MAX_MAC_IP_TYPES] = { EQOS_DMA_CHX_INTR_ENA(chan), @@ -714,7 +719,9 @@ exit_func: static nve32_t init_dma(const struct osi_dma_priv_data *osi_dma, nveu32_t channel) { const nveu32_t chan_mask[OSI_MAX_MAC_IP_TYPES] = {0xFU, 0xFU, 0x3FU}; - nveu32_t chan = channel & chan_mask[osi_dma->mac]; + const nveu32_t local_mac = osi_dma->mac % OSI_MAX_MAC_IP_TYPES; + // Added bitwise with 0xFF to avoid CERT INT30-C error + nveu32_t chan = ((channel & chan_mask[local_mac]) & (0xFFU)); nve32_t ret = 0; /* CERT ARR-30C issue observed without this check */ @@ -830,7 +837,9 @@ static inline void stop_dma(const struct osi_dma_priv_data *const osi_dma, nveu32_t dma_chan) { const nveu32_t chan_mask[OSI_MAX_MAC_IP_TYPES] = {0xFU, 0xFU, 0x3FU}; - nveu32_t chan = dma_chan & chan_mask[osi_dma->mac]; + const nveu32_t local_mac = osi_dma->mac % OSI_MAX_MAC_IP_TYPES; + // Added bitwise with 0xFF to avoid CERT INT30-C error + nveu32_t chan = ((dma_chan & chan_mask[local_mac]) & (0xFFU)); const nveu32_t dma_tx_reg[OSI_MAX_MAC_IP_TYPES] = { EQOS_DMA_CHX_TX_CTRL(chan), MGBE_DMA_CHX_TX_CTRL(chan), @@ -859,19 +868,27 @@ static inline void set_rx_riit_dma( const struct osi_dma_priv_data *const osi_dma, nveu32_t chan, nveu32_t riit) { + const nveu32_t local_chan = chan % OSI_MGBE_MAX_NUM_CHANS; const nveu32_t rx_wdt_reg[OSI_MAX_MAC_IP_TYPES] = { - EQOS_DMA_CHX_RX_WDT(chan), - MGBE_DMA_CHX_RX_WDT(chan), - MGBE_DMA_CHX_RX_WDT(chan) + EQOS_DMA_CHX_RX_WDT(local_chan), + MGBE_DMA_CHX_RX_WDT(local_chan), + MGBE_DMA_CHX_RX_WDT(local_chan) }; /* riit is in ns */ - const nveu32_t itw_val = { - (((riit * ((nveu32_t)MGBE_AXI_CLK_FREQ / OSI_ONE_MEGA_HZ)) / - (MGBE_DMA_CHX_RX_WDT_ITCU * OSI_MSEC_PER_SEC)) - & MGBE_DMA_CHX_RX_WDT_ITW_MAX) - }; + nveu32_t itw_val = 0U; + const nveu32_t freq_mghz = (MGBE_AXI_CLK_FREQ / OSI_ONE_MEGA_HZ); + const nveu32_t wdt_msec = (MGBE_DMA_CHX_RX_WDT_ITCU * OSI_MSEC_PER_SEC); nveu32_t val; + if (riit > (UINT_MAX / freq_mghz)) { + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, + "Invalid riit received\n", riit); + goto exit_func; + } + + itw_val = (((riit * freq_mghz) / wdt_msec) + & MGBE_DMA_CHX_RX_WDT_ITW_MAX); + if (osi_dma->use_riit != OSI_DISABLE && osi_dma->mac == OSI_MAC_HW_MGBE_T26X) { val = osi_dma_readl((nveu8_t *)osi_dma->base + @@ -882,6 +899,7 @@ static inline void set_rx_riit_dma( rx_wdt_reg[osi_dma->mac]); } +exit_func: return; } @@ -967,13 +985,15 @@ nve32_t osi_get_global_dma_status(struct osi_dma_priv_data *osi_dma, { const nveu32_t global_dma_status_reg_cnt[OSI_MAX_MAC_IP_TYPES] = {1, 1, 3}; struct dma_local *l_dma = (struct dma_local *)(void *)osi_dma; - const nveu32_t global_dma_status_reg[OSI_MAX_MAC_IP_TYPES] = { + nveu32_t global_dma_status_reg[OSI_MAX_MAC_IP_TYPES] = { HW_GLOBAL_DMA_STATUS, HW_GLOBAL_DMA_STATUS, MGBE_T26X_GLOBAL_DMA_STATUS, }; nve32_t ret = 0; nveu32_t i; + nveu64_t temp_addr = 0U; + const nveu32_t local_mac = osi_dma->mac % OSI_MAX_MAC_IP_TYPES; #ifdef OSI_CL_FTRACE if ((osi_get_global_dma_status_cnt % 1000) == 0) @@ -984,10 +1004,14 @@ nve32_t osi_get_global_dma_status(struct osi_dma_priv_data *osi_dma, goto fail; } - for (i = 0U; i < global_dma_status_reg_cnt[osi_dma->mac]; i++) { + for (i = 0U; i < global_dma_status_reg_cnt[local_mac]; i++) { if (i < UINT_MAX) { + // Added check to avoid CERT INT30-C + global_dma_status_reg[local_mac] &= MAX_REG_OFFSET; + temp_addr = (nveu64_t)(global_dma_status_reg[local_mac] + + ((nveu64_t)i * 4U)); dma_status[i] = osi_dma_readl((nveu8_t *)osi_dma->base + - (global_dma_status_reg[osi_dma->mac] + (i * 4U))); + (nveu32_t)(temp_addr & (nveu64_t)MAX_REG_OFFSET)); } } fail: @@ -1410,7 +1434,7 @@ fail: nve32_t osi_dma_ioctl(struct osi_dma_priv_data *osi_dma) { - struct dma_local *l_dma = (struct dma_local *)osi_dma; + struct dma_local *l_dma = (struct dma_local *)(void *)osi_dma; struct osi_dma_ioctl_data *data; #ifdef OSI_CL_FTRACE @@ -1495,7 +1519,7 @@ static inline nve32_t osi_slot_args_validate(struct osi_dma_priv_data *osi_dma, nve32_t osi_config_slot_function(struct osi_dma_priv_data *osi_dma, nveu32_t set) { - struct dma_local *l_dma = (struct dma_local *)osi_dma; + struct dma_local *l_dma = (struct dma_local *)(void *)osi_dma; nveu32_t i = 0U, chan = 0U, interval = 0U; struct osi_tx_ring *tx_ring = OSI_NULL; diff --git a/osi/dma/osi_dma_txrx.c b/osi/dma/osi_dma_txrx.c index aa404ea..db89bf0 100644 --- a/osi/dma/osi_dma_txrx.c +++ b/osi/dma/osi_dma_txrx.c @@ -79,7 +79,8 @@ static inline nve32_t validate_rx_completions_arg( if (osi_unlikely((osi_dma == OSI_NULL) || (more_data_avail == OSI_NULL) || - (chan >= l_dma->num_max_chans))) { + (chan >= l_dma->num_max_chans) || + (chan >= OSI_MGBE_MAX_NUM_CHANS))) { ret = -1; goto fail; } @@ -178,6 +179,9 @@ static inline void check_for_more_data_avail(struct osi_rx_ring *rx_ring, nve32_ * Rx packets, so that the OSD layer can decide to schedule * this function again. */ + if ((received_resv < 0) || (received > (INT_MAX - received_resv))) { + return; + } if ((received + received_resv) >= budget) { rx_desc = rx_ring->rx_desc + rx_ring->cur_rx_idx; rx_swcx = rx_ring->rx_swcx + rx_ring->cur_rx_idx; @@ -218,17 +222,26 @@ static inline nveu32_t compltd_rx_desc_cnt(struct osi_dma_priv_data *osi_dma, nveu32_t chan) { struct osi_rx_ring *rx_ring = osi_dma->rx_ring[chan]; - nveu32_t value, rx_desc_wr_idx, descr_compltd; + nveu32_t value = 0U , rx_desc_wr_idx = 0U, descr_compltd = 0U; + /* Already has a check for this in teh caller + * but coverity tool is not able recognize the same + */ + const nveu32_t local_chan = chan % OSI_MGBE_MAX_NUM_CHANS; value = osi_dma_readl((nveu8_t *)osi_dma->base + - MGBE_DMA_CHX_RX_DESC_WR_RNG_OFFSET(chan)); - /* completed desc write back offset */ - rx_desc_wr_idx = ((value >> MGBE_RX_DESC_WR_RNG_RWDC_SHIFT ) & - (osi_dma->rx_ring_sz - 1)); - descr_compltd = (rx_desc_wr_idx - rx_ring->cur_rx_idx) & - (osi_dma->rx_ring_sz - 1U); + MGBE_DMA_CHX_RX_DESC_WR_RNG_OFFSET(local_chan)); + if (osi_dma->rx_ring_sz > 0U) { + /* completed desc write back offset */ + rx_desc_wr_idx = ((value >> MGBE_RX_DESC_WR_RNG_RWDC_SHIFT ) & + (osi_dma->rx_ring_sz - 1U)); + //If we remove this check we are seeing perf issues on mgbe3_0 of Ferrix + // if (rx_desc_wr_idx >= rx_ring->cur_rx_idx) { + descr_compltd = (rx_desc_wr_idx - rx_ring->cur_rx_idx) & + (osi_dma->rx_ring_sz - 1U); + // } + } /* offset/index start from 0, so add 1 to get final count */ - descr_compltd += 1U; + descr_compltd = (((descr_compltd) & ((nveu32_t)0x7FFFFFFFU)) + (1U)); return descr_compltd; } @@ -1135,7 +1148,7 @@ static inline void apply_write_barrier(struct osi_tx_ring *tx_ring) static inline void dump_tx_descriptors(struct osi_dma_priv_data *osi_dma, nveu32_t f_idx, nveu32_t l_idx, nveu32_t chan) { - if (osi_dma->enable_desc_dump == 1U) { + if ((osi_dma->enable_desc_dump == 1U) && (l_idx != 0U)) { desc_dump(osi_dma, f_idx, DECR_TX_DESC_INDEX(l_idx, osi_dma->tx_ring_sz), (TX_DESC_DUMP | TX_DESC_DUMP_TX), chan); } @@ -1205,7 +1218,9 @@ nve32_t hw_transmit(struct osi_dma_priv_data *osi_dma, #ifdef OSI_DEBUG nveu32_t f_idx = tx_ring->cur_tx_idx; #endif /* OSI_DEBUG */ - nveu32_t chan = dma_chan & chan_mask[osi_dma->mac]; + const nveu32_t local_mac = osi_dma->mac % OSI_MAX_MAC_IP_TYPES; + // Added bitwise with 0xFF to avoid CERT INT30-C error + nveu32_t chan = ((dma_chan & chan_mask[local_mac]) & (0xFFU)); const nveu32_t tail_ptr_reg[OSI_MAX_MAC_IP_TYPES] = { EQOS_DMA_CHX_TDTP(chan), MGBE_DMA_CHX_TDTLP(chan), @@ -1299,6 +1314,12 @@ nve32_t hw_transmit(struct osi_dma_priv_data *osi_dma, /* Fill remaining descriptors */ for (i = 0; i < desc_cnt; i++) { /* Increase the desc count for first descriptor */ + if (tx_ring->desc_cnt == UINT_MAX) { + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, + "dma_txrx: Reached Max Desc count\n", 0ULL); + ret = -1; + break; + } tx_ring->desc_cnt++; tx_desc->tdes0 = L32(tx_swcx->buf_phy_addr); @@ -1314,6 +1335,12 @@ nve32_t hw_transmit(struct osi_dma_priv_data *osi_dma, tx_swcx = tx_ring->tx_swcx + entry; } + if (tx_ring->desc_cnt == UINT_MAX) { + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, + "dma_txrx: Reached Max Desc count\n", 0ULL); + ret = -1; + goto fail; + } /* Mark it as LAST descriptor */ last_desc->tdes3 |= TDES3_LD; @@ -1356,7 +1383,7 @@ nve32_t hw_transmit(struct osi_dma_priv_data *osi_dma, tx_ring->cur_tx_idx = entry; /* Update the Tx tail pointer */ - osi_dma_writel(L32(tailptr), (nveu8_t *)osi_dma->base + tail_ptr_reg[osi_dma->mac]); + osi_dma_writel(L32(tailptr), (nveu8_t *)osi_dma->base + tail_ptr_reg[local_mac]); fail: return ret; @@ -1387,7 +1414,9 @@ static nve32_t rx_dma_desc_initialization(const struct osi_dma_priv_data *const nveu32_t dma_chan) { const nveu32_t chan_mask[OSI_MAX_MAC_IP_TYPES] = {0xFU, 0xFU, 0x3FU}; - nveu32_t chan = dma_chan & chan_mask[osi_dma->mac]; + const nveu32_t local_mac = osi_dma->mac % OSI_MAX_MAC_IP_TYPES; + // Added bitwise with 0xFF to avoid CERT INT30-C error + nveu32_t chan = ((dma_chan & chan_mask[local_mac]) & (0xFFU)); const nveu32_t start_addr_high_reg[OSI_MAX_MAC_IP_TYPES] = { EQOS_DMA_CHX_RDLH(chan), MGBE_DMA_CHX_RDLH(chan), @@ -1533,7 +1562,9 @@ static inline void set_tx_ring_len_and_start_addr(const struct osi_dma_priv_data nveu32_t len) { const nveu32_t chan_mask[OSI_MAX_MAC_IP_TYPES] = {0xFU, 0xFU, 0x3FU}; - nveu32_t chan = dma_chan & chan_mask[osi_dma->mac]; + const nveu32_t local_mac = osi_dma->mac % OSI_MAX_MAC_IP_TYPES; + // Added bitwise with 0xFF to avoid CERT INT30-C error + nveu32_t chan = ((dma_chan & chan_mask[local_mac]) & (0xFFU)); const nveu32_t ring_len_reg[OSI_MAX_MAC_IP_TYPES] = { EQOS_DMA_CHX_TDRL(chan), MGBE_DMA_CHX_TX_CNTRL2(chan), diff --git a/osi/nvmacsecrm/macsec.c b/osi/nvmacsecrm/macsec.c index dddcbe8..7671c8e 100644 --- a/osi/nvmacsecrm/macsec.c +++ b/osi/nvmacsecrm/macsec.c @@ -5360,8 +5360,12 @@ static struct osi_macsec_sc_info *find_existing_sc( &osi_core->macsec_lut_status[ctlr]; struct osi_macsec_sc_info *sc_found = OSI_NULL; nveu32_t i; + const nveu32_t max_num_sc[MAX_MACSEC_IP_TYPES] = { + OSI_MAX_NUM_SC, + OSI_MAX_NUM_SC_T26x + }; - for (i = 0; i < OSI_MAX_NUM_SC; i++) { + for (i = 0; i < max_num_sc[osi_core->macsec]; i++) { if (osi_macsec_memcmp(lut_status_ptr->sc_info[i].sci, sc->sci, (nve32_t)OSI_SCI_LEN) == OSI_NONE_SIGNED) { sc_found = &lut_status_ptr->sc_info[i]; @@ -5399,8 +5403,12 @@ static nveu32_t get_avail_sc_idx(const struct osi_core_priv_data *const osi_core const struct osi_macsec_lut_status *lut_status_ptr = &osi_core->macsec_lut_status[ctlr]; nveu32_t i; + const nveu32_t max_num_sc[MAX_MACSEC_IP_TYPES] = { + OSI_MAX_NUM_SC, + OSI_MAX_NUM_SC_T26x + }; - for (i = 0; i < OSI_MAX_NUM_SC; i++) { + for (i = 0; i < max_num_sc[osi_core->macsec]; i++) { if (lut_status_ptr->sc_info[i].an_valid == OSI_NONE) { break; } @@ -6021,10 +6029,14 @@ static nve32_t add_new_sc(struct osi_core_priv_data *const osi_core, struct osi_macsec_lut_status *lut_status_ptr; nveu32_t avail_sc_idx = 0; struct osi_macsec_sc_info *new_sc = OSI_NULL; + const nveu32_t max_num_sc[MAX_MACSEC_IP_TYPES] = { + OSI_MAX_NUM_SC, + OSI_MAX_NUM_SC_T26x + }; lut_status_ptr = &osi_core->macsec_lut_status[ctlr]; - if (lut_status_ptr->num_of_sc_used >= OSI_MAX_NUM_SC) { + if (lut_status_ptr->num_of_sc_used >= max_num_sc[osi_core->macsec]) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Err: Reached max SC LUT entries!\n", 0ULL); ret = -1; @@ -6032,7 +6044,7 @@ static nve32_t add_new_sc(struct osi_core_priv_data *const osi_core, } avail_sc_idx = get_avail_sc_idx(osi_core, ctlr); - if (avail_sc_idx == OSI_MAX_NUM_SC) { + if (avail_sc_idx == max_num_sc[osi_core->macsec]) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Err: NO free SC Index\n", 0ULL); ret = -1; @@ -6234,6 +6246,10 @@ static nve32_t delete_dummy_sc(struct osi_core_priv_data *const osi_core, nveu16_t kt_idx = 0U; const nveu8_t zero_mac[OSI_ETH_ALEN] = {0U}; nve32_t ret = 0; + const nveu32_t max_num_sc[MAX_MACSEC_IP_TYPES] = { + OSI_MAX_NUM_SC, + OSI_MAX_NUM_SC_T26x + }; /** Using a dummy parameters used in add_dummy_sc */ dummy_sc.sci[6] = 0xFFU; @@ -6246,7 +6262,7 @@ static nve32_t delete_dummy_sc(struct osi_core_priv_data *const osi_core, dummy_sc.pn_window = 0x10U; dummy_sc.flags = 0U; - for (i = 0U; i < OSI_MAX_NUM_SC; i++) { + for (i = 0U; i < max_num_sc[osi_core->macsec]; i++) { if (osi_macsec_memcmp(&osi_core->macsec_dummy_sc_macids[i][0], sc->sci, (nve32_t)OSI_ETH_ALEN) == OSI_NONE_SIGNED) { existing_sc = find_existing_sc(osi_core, &dummy_sc, @@ -6301,6 +6317,10 @@ static nve32_t add_dummy_sc(struct osi_core_priv_data *const osi_core, nveu8_t * nve32_t ret = 0; nveu8_t i = 0; const nveu8_t zero_mac[OSI_ETH_ALEN] = {0U}; + const nveu32_t max_num_sc[MAX_MACSEC_IP_TYPES] = { + OSI_MAX_NUM_SC, + OSI_MAX_NUM_SC_T26x + }; /** Using dummy SC parameters to create TX SC entry in LUTs */ sc.sci[6] = 0xFFU; @@ -6319,7 +6339,7 @@ static nve32_t add_dummy_sc(struct osi_core_priv_data *const osi_core, nveu8_t * "Failed to program dummy sc\n", (nveul64_t)ret); goto exit_func; } - for (i = 0U; i < OSI_MAX_NUM_SC; i++) { + for (i = 0U; i < max_num_sc[osi_core->macsec]; i++) { if (osi_macsec_memcmp(&osi_core->macsec_dummy_sc_macids[i][0], macsec_vf_mac, (nve32_t)OSI_ETH_ALEN) == OSI_NONE_SIGNED) { break;