diff --git a/drivers/net/ethernet/nvidia/nvethernet/Makefile b/drivers/net/ethernet/nvidia/nvethernet/Makefile index c75415f9..3900ee29 100644 --- a/drivers/net/ethernet/nvidia/nvethernet/Makefile +++ b/drivers/net/ethernet/nvidia/nvethernet/Makefile @@ -22,6 +22,9 @@ ccflags-y += -DLINUX_OS -DNET30 -DNVPKCS_MACSEC -DLINUX_IVC \ -I$(srctree.nvidia-oot)/drivers/net/ethernet/nvidia/nvethernet/nvethernetrm/include endif +#ccflags-y += -DOSI_DEBUG -DMACSEC_SUPPORT -DDEBUG_MACSEC -DMACSEC_KEY_PROGRAM +ccflags-y += -DMACSEC_SUPPORT + nvethernet-objs:= ether_linux.o \ osd.o \ ethtool.o \ diff --git a/drivers/net/ethernet/nvidia/nvethernet/ether_export.h b/drivers/net/ethernet/nvidia/nvethernet/ether_export.h index 07ef02e9..b8e72a09 100644 --- a/drivers/net/ethernet/nvidia/nvethernet/ether_export.h +++ b/drivers/net/ethernet/nvidia/nvethernet/ether_export.h @@ -39,6 +39,10 @@ struct ether_l2_filter { nveu32_t index; /** Ethernet MAC address to be added */ nveu8_t mac_addr[OSI_ETH_ALEN]; + /** packet duplication */ + nveu32_t pkt_dup; + /** dma channel */ + nveu32_t dma_chan; }; /** diff --git a/drivers/net/ethernet/nvidia/nvethernet/ether_linux.c b/drivers/net/ethernet/nvidia/nvethernet/ether_linux.c index b33a99d3..474b7646 100644 --- a/drivers/net/ethernet/nvidia/nvethernet/ether_linux.c +++ b/drivers/net/ethernet/nvidia/nvethernet/ether_linux.c @@ -68,11 +68,12 @@ int ether_get_tx_ts(struct ether_priv_data *pdata) ioctl_data.cmd = OSI_CMD_GET_TX_TS; ioctl_data.tx_ts.pkt_id = pnode->pktid; + ioctl_data.tx_ts.vdma_id = pnode->vdmaid; ret = osi_handle_ioctl(pdata->osi_core, &ioctl_data); if (ret == 0) { /* get time stamp form ethernet server */ - dev_dbg(pdata->dev, "%s() pktid = %x, skb = %p\n", - __func__, pnode->pktid, pnode->skb); + dev_dbg(pdata->dev,"%s() pktid = %x, skb = %p\n, vdmaid=%x", + __func__, pnode->pktid, pnode->skb, pnode->vdmaid); if ((ioctl_data.tx_ts.nsec & OSI_MAC_TCR_TXTSSMIS) == OSI_MAC_TCR_TXTSSMIS) { @@ -423,6 +424,19 @@ static int ether_pad_calibrate(struct ether_priv_data *pdata) */ static void ether_disable_mgbe_clks(struct ether_priv_data *pdata) { + if (pdata->osi_core->mac != OSI_MAC_HW_MGBE_T26X) { + if (!IS_ERR_OR_NULL(pdata->eee_pcs_clk)) { + clk_disable_unprepare(pdata->eee_pcs_clk); + } + + if (!IS_ERR_OR_NULL(pdata->mac_div_clk)) { + clk_disable_unprepare(pdata->mac_div_clk); + } + if (!IS_ERR_OR_NULL(pdata->rx_pcs_clk)) { + clk_disable_unprepare(pdata->rx_pcs_clk); + } + } + if (!IS_ERR_OR_NULL(pdata->ptp_ref_clk)) { clk_disable_unprepare(pdata->ptp_ref_clk); } @@ -431,18 +445,10 @@ static void ether_disable_mgbe_clks(struct ether_priv_data *pdata) clk_disable_unprepare(pdata->app_clk); } - if (!IS_ERR_OR_NULL(pdata->eee_pcs_clk)) { - clk_disable_unprepare(pdata->eee_pcs_clk); - } - if (!IS_ERR_OR_NULL(pdata->mac_clk)) { clk_disable_unprepare(pdata->mac_clk); } - if (!IS_ERR_OR_NULL(pdata->mac_div_clk)) { - clk_disable_unprepare(pdata->mac_div_clk); - } - if (!IS_ERR_OR_NULL(pdata->tx_pcs_clk)) { clk_disable_unprepare(pdata->tx_pcs_clk); } @@ -451,10 +457,6 @@ static void ether_disable_mgbe_clks(struct ether_priv_data *pdata) clk_disable_unprepare(pdata->tx_clk); } - if (!IS_ERR_OR_NULL(pdata->rx_pcs_clk)) { - clk_disable_unprepare(pdata->rx_pcs_clk); - } - if (!IS_ERR_OR_NULL(pdata->rx_pcs_input_clk)) { clk_disable_unprepare(pdata->rx_pcs_input_clk); } @@ -476,28 +478,43 @@ static void ether_disable_mgbe_clks(struct ether_priv_data *pdata) */ static void ether_disable_eqos_clks(struct ether_priv_data *pdata) { - if (!IS_ERR_OR_NULL(pdata->axi_cbb_clk)) { - clk_disable_unprepare(pdata->axi_cbb_clk); - } - - if (!IS_ERR_OR_NULL(pdata->axi_clk)) { - clk_disable_unprepare(pdata->axi_clk); - } - if (!IS_ERR_OR_NULL(pdata->rx_clk)) { clk_disable_unprepare(pdata->rx_clk); } - if (!IS_ERR_OR_NULL(pdata->ptp_ref_clk)) { - clk_disable_unprepare(pdata->ptp_ref_clk); - } - if (!IS_ERR_OR_NULL(pdata->tx_clk)) { clk_disable_unprepare(pdata->tx_clk); } - if (!IS_ERR_OR_NULL(pdata->pllrefe_clk)) { - clk_disable_unprepare(pdata->pllrefe_clk); + if (pdata->osi_core->mac_ver == MAC_CORE_VER_TYPE_EQOS_5_40) { + if (!IS_ERR_OR_NULL(pdata->rx_pcs_input_clk)) { + clk_disable_unprepare(pdata->rx_pcs_input_clk); + } + if (!IS_ERR_OR_NULL(pdata->app_clk)) { + clk_disable_unprepare(pdata->app_clk); + } + if (!IS_ERR_OR_NULL(pdata->tx_pcs_clk)) { + clk_disable_unprepare(pdata->tx_pcs_clk); + } + if (!IS_ERR_OR_NULL(pdata->mac_clk)) { + clk_disable_unprepare(pdata->mac_clk); + } + } else { + if (!IS_ERR_OR_NULL(pdata->ptp_ref_clk)) { + clk_disable_unprepare(pdata->ptp_ref_clk); + } + + if (!IS_ERR_OR_NULL(pdata->axi_clk)) { + clk_disable_unprepare(pdata->axi_clk); + } + + if (!IS_ERR_OR_NULL(pdata->axi_cbb_clk)) { + clk_disable_unprepare(pdata->axi_cbb_clk); + } + + if (!IS_ERR_OR_NULL(pdata->pllrefe_clk)) { + clk_disable_unprepare(pdata->pllrefe_clk); + } } pdata->clks_enable = false; @@ -538,6 +555,7 @@ static int ether_enable_mgbe_clks(struct ether_priv_data *pdata) unsigned int uphy_gbe_mode = pdata->osi_core->uphy_gbe_mode; unsigned long rate = 0; int ret; + unsigned short mac= pdata->osi_core->mac; if (!IS_ERR_OR_NULL(pdata->rx_input_clk)) { ret = clk_prepare_enable(pdata->rx_input_clk); @@ -549,19 +567,14 @@ static int ether_enable_mgbe_clks(struct ether_priv_data *pdata) if (!IS_ERR_OR_NULL(pdata->rx_pcs_input_clk)) { ret = clk_prepare_enable(pdata->rx_pcs_input_clk); if (ret < 0) { - return ret; - } - } - - if (!IS_ERR_OR_NULL(pdata->rx_pcs_clk)) { - ret = clk_prepare_enable(pdata->rx_pcs_clk); - if (ret < 0) { - goto err_rx_pcs; + goto err_rx_pcs_input; } } if (!IS_ERR_OR_NULL(pdata->tx_clk)) { - if (uphy_gbe_mode == OSI_ENABLE) + if (uphy_gbe_mode == OSI_UPHY_GBE_MODE_25G) + rate = ETHER_MGBE_TXRX_CLK_XAUI_25G; + else if (uphy_gbe_mode == OSI_GBE_MODE_10G) rate = ETHER_MGBE_TX_CLK_USXGMII_10G; else rate = ETHER_MGBE_TX_CLK_USXGMII_5G; @@ -579,7 +592,9 @@ static int ether_enable_mgbe_clks(struct ether_priv_data *pdata) } if (!IS_ERR_OR_NULL(pdata->tx_pcs_clk)) { - if (uphy_gbe_mode == OSI_ENABLE) + if (uphy_gbe_mode == OSI_UPHY_GBE_MODE_25G) + rate = ETHER_MGBE_TXRX_PCS_CLK_XAUI_25G; + else if (uphy_gbe_mode == OSI_GBE_MODE_10G) rate = ETHER_MGBE_TX_PCS_CLK_USXGMII_10G; else rate = ETHER_MGBE_TX_PCS_CLK_USXGMII_5G; @@ -597,10 +612,26 @@ static int ether_enable_mgbe_clks(struct ether_priv_data *pdata) } } - if (!IS_ERR_OR_NULL(pdata->mac_div_clk)) { - ret = clk_prepare_enable(pdata->mac_div_clk); - if (ret < 0) { - goto err_mac_div; + if (mac != OSI_MAC_HW_MGBE_T26X) { + if (!IS_ERR_OR_NULL(pdata->rx_pcs_clk)) { + ret = clk_prepare_enable(pdata->rx_pcs_clk); + if (ret < 0) { + goto err_rx_pcs; + } + } + + if (!IS_ERR_OR_NULL(pdata->mac_div_clk)) { + ret = clk_prepare_enable(pdata->mac_div_clk); + if (ret < 0) { + goto err_mac_div; + } + } + + if (!IS_ERR_OR_NULL(pdata->eee_pcs_clk)) { + ret = clk_prepare_enable(pdata->eee_pcs_clk); + if (ret < 0) { + goto err_eee_pcs; + } } } @@ -611,13 +642,6 @@ static int ether_enable_mgbe_clks(struct ether_priv_data *pdata) } } - if (!IS_ERR_OR_NULL(pdata->eee_pcs_clk)) { - ret = clk_prepare_enable(pdata->eee_pcs_clk); - if (ret < 0) { - goto err_eee_pcs; - } - } - if (!IS_ERR_OR_NULL(pdata->app_clk)) { ret = clk_prepare_enable(pdata->app_clk); if (ret < 0) { @@ -641,18 +665,22 @@ err_ptp_ref: clk_disable_unprepare(pdata->app_clk); } err_app: - if (!IS_ERR_OR_NULL(pdata->eee_pcs_clk)) { - clk_disable_unprepare(pdata->eee_pcs_clk); - } -err_eee_pcs: if (!IS_ERR_OR_NULL(pdata->mac_clk)) { clk_disable_unprepare(pdata->mac_clk); } err_mac: - if (!IS_ERR_OR_NULL(pdata->mac_div_clk)) { + if (!IS_ERR_OR_NULL(pdata->eee_pcs_clk) && mac == OSI_MAC_HW_MGBE) { + clk_disable_unprepare(pdata->eee_pcs_clk); + } +err_eee_pcs: + if (!IS_ERR_OR_NULL(pdata->mac_div_clk) && mac == OSI_MAC_HW_MGBE) { clk_disable_unprepare(pdata->mac_div_clk); } err_mac_div: + if (!IS_ERR_OR_NULL(pdata->mac_div_clk) && mac == OSI_MAC_HW_MGBE) { + clk_disable_unprepare(pdata->tx_pcs_clk); + } +err_rx_pcs: if (!IS_ERR_OR_NULL(pdata->tx_pcs_clk)) { clk_disable_unprepare(pdata->tx_pcs_clk); } @@ -664,7 +692,7 @@ err_tx: if (!IS_ERR_OR_NULL(pdata->rx_pcs_clk)) { clk_disable_unprepare(pdata->rx_pcs_clk); } -err_rx_pcs: +err_rx_pcs_input: if (!IS_ERR_OR_NULL(pdata->rx_pcs_input_clk)) { clk_disable_unprepare(pdata->rx_pcs_input_clk); } @@ -672,6 +700,108 @@ err_rx_pcs: return ret; } +/** + * @brief Enable all MAC T26x EQOS related clks. + * + * Algorithm: Enables the clks by using clock subsystem provided API's. + * + * @param[in] pdata: OSD private data. + * + * @retval 0 on success + * @retval "negative value" on failure. + */ +static int ether_enable_eqos_clks_t26x(struct ether_priv_data *pdata) +{ + unsigned int uphy_gbe_mode = pdata->osi_core->uphy_gbe_mode; + unsigned long rate = 0; + int ret; + + if (!IS_ERR_OR_NULL(pdata->tx_pcs_clk)) { + if (uphy_gbe_mode == OSI_GBE_MODE_2_5G) + rate = ETHER_EQOS_TX_CLK_2_5G; + else + rate = ETHER_EQOS_TX_CLK_1000M; + ret = clk_set_rate(pdata->tx_pcs_clk, rate); + if (ret < 0) { + dev_err(pdata->dev, "failed to set EQOS tx_pcs_clk rate\n"); + return ret; + } + + ret = clk_prepare_enable(pdata->tx_pcs_clk); + if (ret < 0) { + goto err_tx_pcs; + } + } + + if (!IS_ERR_OR_NULL(pdata->tx_clk)) { + if (uphy_gbe_mode == OSI_GBE_MODE_2_5G) + rate = ETHER_EQOS_UPHY_LX_TX_2_5G_CLK; + else + rate = ETHER_EQOS_UPHY_LX_TX_1G_CLK; + ret = clk_set_rate(pdata->tx_clk, rate); + if (ret < 0) { + dev_err(pdata->dev, "failed to set EQOS tx_clk rate\n"); + goto err_tx; + } + + ret = clk_prepare_enable(pdata->tx_clk); + if (ret < 0) { + goto err_tx; + } + } + + if (!IS_ERR_OR_NULL(pdata->mac_clk)) { + ret = clk_prepare_enable(pdata->mac_clk); + if (ret < 0) { + goto err_tx; + } + } + if (!IS_ERR_OR_NULL(pdata->app_clk)) { + ret = clk_prepare_enable(pdata->app_clk); + if (ret < 0) { + goto err_app; + } + } + if (!IS_ERR_OR_NULL(pdata->rx_pcs_input_clk)) { + ret = clk_prepare_enable(pdata->rx_pcs_input_clk); + if (ret < 0) { + goto err_rx_pcs_input; + } + } + if (!IS_ERR_OR_NULL(pdata->rx_clk)) { + ret = clk_prepare_enable(pdata->rx_clk); + if (ret < 0) { + goto err_rx; + } + } + + pdata->clks_enable = true; + return 0; + +err_rx: + if (!IS_ERR_OR_NULL(pdata->rx_pcs_input_clk)) { + clk_disable_unprepare(pdata->rx_pcs_input_clk); + } +err_rx_pcs_input: + if (!IS_ERR_OR_NULL(pdata->app_clk)) { + clk_disable_unprepare(pdata->app_clk); + } +err_app: + if (!IS_ERR_OR_NULL(pdata->mac_clk)) { + clk_disable_unprepare(pdata->mac_clk); + } +err_tx: + if (!IS_ERR_OR_NULL(pdata->tx_clk)) { + clk_disable_unprepare(pdata->tx_clk); + } +err_tx_pcs: + if (!IS_ERR_OR_NULL(pdata->tx_pcs_clk)) { + clk_disable_unprepare(pdata->tx_pcs_clk); + } + + return ret; +} + /** * @brief Enable all MAC EQOS related clks. * @@ -771,9 +901,11 @@ static int ether_enable_clks(struct ether_priv_data *pdata) if (pdata->osi_core->use_virtualization == OSI_DISABLE) { if (pdata->osi_core->mac != OSI_MAC_HW_EQOS) { return ether_enable_mgbe_clks(pdata); + } else if (pdata->osi_core->mac_ver == MAC_CORE_VER_TYPE_EQOS_5_40) { + return ether_enable_eqos_clks_t26x(pdata); + } else { + return ether_enable_eqos_clks(pdata); } - - return ether_enable_eqos_clks(pdata); } return 0; @@ -848,14 +980,15 @@ int ether_conf_eee(struct ether_priv_data *pdata, unsigned int tx_lpi_enable) #endif /* !OSI_STRIPPED_LIB */ /** - * @brief Set MGBE MAC_DIV/TX clk rate + * @brief Set MGBE MAC_DIV/TX(T23x) or MAC(T26x) clk rate * - * Algorithm: Sets MGBE MAC_DIV clk_rate which will be MAC_TX/MACSEC clk rate. + * Algorithm: Sets MGBE MAC_DIV or MAC clk_rate which will + * be MAC_TX/MACSEC clk rate. * - * @param[in] mac_div_clk: Pointer to MAC_DIV clk. + * @param[in] mac_clk: Pointer to MAC_DIV or MAC clk. * @param[in] speed: PHY line speed. */ -static inline void ether_set_mgbe_mac_div_rate(struct clk *mac_div_clk, +static inline void ether_set_mgbe_mac_div_rate(struct clk *mac_clk, int speed) { unsigned long rate; @@ -867,18 +1000,21 @@ static inline void ether_set_mgbe_mac_div_rate(struct clk *mac_div_clk, case SPEED_5000: rate = ETHER_MGBE_MAC_DIV_RATE_5G; break; + case SPEED_25000: + rate = ETHER_MGBE_MAC_DIV_RATE_25G; + break; case SPEED_10000: default: rate = ETHER_MGBE_MAC_DIV_RATE_10G; break; } - if (clk_set_rate(mac_div_clk, rate) < 0) - pr_err("%s(): failed to set mac_div_clk rate\n", __func__); + if (clk_set_rate(mac_clk, rate) < 0) + pr_err("%s(): failed to set mac_clk rate\n", __func__); } /** - * @brief Set EQOS TX clk rate + * @brief Set EQOS TX(T23x) or MAC/Macsec (T26x) clk rate * * @param[in] tx_clk: Pointer to Tx clk. * @param[in] speed: PHY line speed. @@ -895,6 +1031,9 @@ static inline void ether_set_eqos_tx_clk(struct clk *tx_clk, case SPEED_100: rate = ETHER_EQOS_TX_CLK_100M; break; + case SPEED_2500: + rate = ETHER_EQOS_TX_CLK_2_5G; + break; case SPEED_1000: default: rate = ETHER_EQOS_TX_CLK_1000M; @@ -902,7 +1041,7 @@ static inline void ether_set_eqos_tx_clk(struct clk *tx_clk, } if (clk_set_rate(tx_clk, rate) < 0) - pr_err("%s(): failed to set eqos tx_clk rate\n", __func__); + pr_err("%s(): failed to set eqos tx_clk/mac rate\n", __func__); } /** @@ -921,6 +1060,7 @@ static inline void set_speed_work_func(struct work_struct *work) struct net_device *dev = pdata->ndev; struct phy_device *phydev = pdata->phydev; nveu32_t iface_mode = pdata->osi_core->phy_iface_mode; + struct clk *mac_clk = NULL; #ifndef OSI_STRIPPED_LIB unsigned int eee_enable = OSI_DISABLE; #endif /* !OSI_STRIPPED_LIB */ @@ -971,8 +1111,9 @@ static inline void set_speed_work_func(struct work_struct *work) /* Set MGBE MAC_DIV/TX clk rate */ pdata->speed = speed; phy_print_status(phydev); - ether_set_mgbe_mac_div_rate(pdata->mac_div_clk, - pdata->speed); + mac_clk = (pdata->osi_core->mac == OSI_MAC_HW_MGBE_T26X)? pdata->mac_clk: + pdata->mac_div_clk; + ether_set_mgbe_mac_div_rate(mac_clk, pdata->speed); #ifndef OSI_STRIPPED_LIB if (pdata->eee_enabled && pdata->tx_lpi_enabled) { @@ -1006,6 +1147,13 @@ static void ether_en_dis_monitor_clks(struct ether_priv_data *pdata, else pdata->rx_pcs_m_enabled = true; } + if ((!IS_ERR_OR_NULL(pdata->tx_m_clk) && !pdata->tx_m_clk)){ + if (clk_prepare_enable(pdata->tx_m_clk) < 0) + dev_err(pdata->dev, + "failed to enable tx_m_clk"); + else + pdata->tx_m_enabled = true; + } } else { /* Disable Monitoring clocks */ if (!IS_ERR_OR_NULL(pdata->rx_pcs_m_clk) && pdata->rx_pcs_m_enabled) { @@ -1017,6 +1165,11 @@ static void ether_en_dis_monitor_clks(struct ether_priv_data *pdata, clk_disable_unprepare(pdata->rx_m_clk); pdata->rx_m_enabled = false; } + + if (!IS_ERR_OR_NULL(pdata->tx_m_clk) && pdata->tx_m_enabled) { + clk_disable_unprepare(pdata->tx_m_clk); + pdata->tx_m_enabled = false; + } } } @@ -1035,9 +1188,14 @@ static void ether_adjust_link(struct net_device *dev) { struct ether_priv_data *pdata = netdev_priv(dev); nveu32_t iface_mode = pdata->osi_core->phy_iface_mode; + unsigned int uphy_gbe_mode = pdata->osi_core->uphy_gbe_mode; + struct osi_dma_priv_data *osi_dma = pdata->osi_dma; struct phy_device *phydev = pdata->phydev; int new_state = 0, speed_changed = 0, speed; unsigned long val; + unsigned short mac = pdata->osi_core->mac; + unsigned short mac_ver = pdata->osi_core->mac_ver; + struct clk *mac_clk = NULL; #ifndef OSI_STRIPPED_LIB unsigned int eee_enable = OSI_DISABLE; #endif /* !OSI_STRIPPED_LIB */ @@ -1064,8 +1222,10 @@ static void ether_adjust_link(struct net_device *dev) #endif /* !OSI_STRIPPED_LIB */ if (pdata->fixed_link == OSI_ENABLE) { - if (pdata->osi_core->mac != OSI_MAC_HW_EQOS) { - if (iface_mode == OSI_XFI_MODE_10G) { + if (mac != OSI_MAC_HW_EQOS) { + if (iface_mode == OSI_XAUI_MODE_25G) { + phydev->speed = OSI_SPEED_25000; + } else if (iface_mode == OSI_XFI_MODE_10G) { phydev->speed = OSI_SPEED_10000; } else if (iface_mode == OSI_XFI_MODE_5G) { phydev->speed = OSI_SPEED_5000; @@ -1102,7 +1262,13 @@ static void ether_adjust_link(struct net_device *dev) * speed will be overwritten as per the * PHY interface mode */ speed = phydev->speed; - /* XFI mode = 10G: + /* + * XAUI mode = 25G: + * UPHY GBE mode = 25G + * MAC = 25G + * XLGPCS = 25G + * PHY line side = 25G + * XFI mode = 10G: * UPHY GBE mode = 10G * MAC = 10G * XPCS = 10G @@ -1122,19 +1288,38 @@ static void ether_adjust_link(struct net_device *dev) * MAC = 5G/2.5G ( same as PHY line speed) * XPCS = 5G * PHY line side = 5G/2.5G + * SGMII mode = 2.5G: + * UPHY GBE mode = 2.5G + * MAC = 2.5G + * PCS = 2.5G + * PHY line side = 2.5G + * SGMII mode = 1G: + * UPHY GBE mode = 1G + * MAC = 1G ( same as PHY line speed) + * PCS = 1G + * PHY line side = 1G/100M/10M */ - if (pdata->osi_core->mac != OSI_MAC_HW_EQOS) { - /* MAC and XFI speed should match in XFI mode */ - if (iface_mode == OSI_XFI_MODE_10G) { + if ((mac != OSI_MAC_HW_EQOS) || + (mac_ver == MAC_CORE_VER_TYPE_EQOS_5_40)) { + /* MAC and XFI/XAUI speed should match in + * XFI/XAUI mode + */ + if (iface_mode == OSI_XAUI_MODE_25G) { + speed = OSI_SPEED_25000; + } else if (iface_mode == OSI_XFI_MODE_10G) { speed = OSI_SPEED_10000; } else if (iface_mode == OSI_XFI_MODE_5G) { speed = OSI_SPEED_5000; + } else if (uphy_gbe_mode == OSI_GBE_MODE_2_5G) { + speed = OSI_SPEED_2500; } } + ioctl_data.arg6_32 = speed; ret = osi_handle_ioctl(pdata->osi_core, &ioctl_data); if (ret < 0) { - if (pdata->osi_core->mac != OSI_MAC_HW_EQOS) { + if ((mac != OSI_MAC_HW_EQOS) || + (mac_ver == MAC_CORE_VER_TYPE_EQOS_5_40)) { netdev_dbg(dev, "Retry set speed\n"); netif_carrier_off(dev); schedule_delayed_work(&pdata->set_speed_work, @@ -1155,6 +1340,12 @@ static void ether_adjust_link(struct net_device *dev) } } + if (pdata->osi_core->mac == OSI_MAC_HW_MGBE_T26X) { + osi_dma->ioctl_data.cmd = OSI_DMA_IOCTL_CMD_RX_RIIT_CONFIG; + osi_dma->ioctl_data.arg_u32 = speed; + osi_dma_ioctl(osi_dma); + } + ether_en_dis_monitor_clks(pdata, OSI_ENABLE); pdata->speed = speed; } @@ -1182,9 +1373,13 @@ static void ether_adjust_link(struct net_device *dev) } if (speed_changed) { - if (pdata->osi_core->mac != OSI_MAC_HW_EQOS) { - ether_set_mgbe_mac_div_rate(pdata->mac_div_clk, - pdata->speed); + if((mac == OSI_MAC_HW_MGBE_T26X) || (mac == OSI_MAC_HW_MGBE)) { + mac_clk = (mac == OSI_MAC_HW_MGBE_T26X)? pdata->mac_clk: + pdata->mac_div_clk; + ether_set_mgbe_mac_div_rate(mac_clk, pdata->speed); + } else if(mac_ver == MAC_CORE_VER_TYPE_EQOS_5_40) { + ether_set_eqos_tx_clk(pdata->mac_clk, + phydev->speed); } else { if (pdata->osi_core->mac_ver == OSI_EQOS_MAC_5_30) { ether_set_eqos_tx_clk(pdata->tx_div_clk, @@ -1829,8 +2024,13 @@ static void free_rx_dma_resources(struct osi_dma_priv_data *osi_dma, unsigned long rx_desc_size = sizeof(struct osi_rx_desc) * osi_dma->rx_ring_sz; struct osi_rx_ring *rx_ring = NULL; unsigned int i, chan; + const nveu32_t max_dma_chan[OSI_MAX_MAC_IP_TYPES] = { + OSI_EQOS_MAX_NUM_CHANS, + OSI_MGBE_T23X_MAX_NUM_CHANS, + OSI_MGBE_MAX_NUM_CHANS + }; - for (i = 0; i < OSI_MGBE_MAX_NUM_CHANS; i++) { + for (i = 0; i < max_dma_chan[osi_dma->mac]; i++) { rx_ring = osi_dma->rx_ring[i]; chan = osi_dma->dma_chans[i]; @@ -2048,8 +2248,13 @@ static int ether_allocate_rx_dma_resources(struct osi_dma_priv_data *osi_dma, unsigned int chan; unsigned int i; int ret = 0; + const nveu32_t max_dma_chan[OSI_MAX_MAC_IP_TYPES] = { + OSI_EQOS_MAX_NUM_CHANS, + OSI_MGBE_T23X_MAX_NUM_CHANS, + OSI_MGBE_MAX_NUM_CHANS + }; - for (i = 0; i < OSI_MGBE_MAX_NUM_CHANS; i++) { + for (i = 0; i < max_dma_chan[osi_dma->mac]; i++) { chan = osi_dma->dma_chans[i]; if (chan != ETHER_INVALID_CHAN_NUM) { @@ -2097,8 +2302,13 @@ static void free_tx_dma_resources(struct osi_dma_priv_data *osi_dma, unsigned long tx_desc_size = sizeof(struct osi_tx_desc) * osi_dma->tx_ring_sz; struct osi_tx_ring *tx_ring = NULL; unsigned int i; + const nveu32_t max_dma_chan[OSI_MAX_MAC_IP_TYPES] = { + OSI_EQOS_MAX_NUM_CHANS, + OSI_MGBE_T23X_MAX_NUM_CHANS, + OSI_MGBE_MAX_NUM_CHANS + }; - for (i = 0; i < OSI_MGBE_MAX_NUM_CHANS; i++) { + for (i = 0; i < max_dma_chan[osi_dma->mac]; i++) { tx_ring = osi_dma->tx_ring[i]; if (tx_ring != NULL) { @@ -2200,8 +2410,13 @@ static int ether_allocate_tx_dma_resources(struct osi_dma_priv_data *osi_dma, unsigned int chan; unsigned int i; int ret = 0; + const nveu32_t max_dma_chan[OSI_MAX_MAC_IP_TYPES] = { + OSI_EQOS_MAX_NUM_CHANS, + OSI_MGBE_T23X_MAX_NUM_CHANS, + OSI_MGBE_MAX_NUM_CHANS + }; - for (i = 0; i < OSI_MGBE_MAX_NUM_CHANS; i++) { + for (i = 0; i < max_dma_chan[osi_dma->mac]; i++) { chan = osi_dma->dma_chans[i]; if (chan != ETHER_INVALID_CHAN_NUM) { @@ -2237,13 +2452,18 @@ exit: static void ether_init_invalid_chan_ring(struct osi_dma_priv_data *osi_dma) { unsigned int i; + const nveu32_t max_dma_chan[OSI_MAX_MAC_IP_TYPES] = { + OSI_EQOS_MAX_NUM_CHANS, + OSI_MGBE_T23X_MAX_NUM_CHANS, + OSI_MGBE_MAX_NUM_CHANS + }; - for (i = 0; i < OSI_MGBE_MAX_NUM_CHANS; i++) { + for (i = 0; i < max_dma_chan[osi_dma->mac]; i++) { osi_dma->tx_ring[i] = NULL; osi_dma->rx_ring[i] = NULL; } - for (i = osi_dma->num_dma_chans; i < OSI_MGBE_MAX_NUM_CHANS; i++) { + for (i = osi_dma->num_dma_chans; i < max_dma_chan[osi_dma->mac]; i++) { osi_dma->dma_chans[i] = ETHER_INVALID_CHAN_NUM; } } @@ -2393,6 +2613,16 @@ static int ether_update_mac_addr_filter(struct ether_priv_data *pdata, struct osi_dma_priv_data *osi_dma = pdata->osi_dma; nveu32_t dma_channel = osi_dma->dma_chans[0]; unsigned char bc_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; + unsigned int MAC_index[OSI_MAX_MAC_IP_TYPES] = { + ETHER_MAC_ADDRESS_INDEX, + ETHER_MAC_ADDRESS_INDEX, + ETHER_MAC_ADDRESS_INDEX_T26X + }; + unsigned int BC_index[OSI_MAX_MAC_IP_TYPES] = { + ETHER_BC_ADDRESS_INDEX, + ETHER_BC_ADDRESS_INDEX, + ETHER_BC_ADDRESS_INDEX_T26X + }; if ((en_dis > OSI_ENABLE) || (uc_bc > ETHER_ADDRESS_MAC)) { dev_err(pdata->dev, @@ -2400,10 +2630,7 @@ static int ether_update_mac_addr_filter(struct ether_priv_data *pdata, __func__, en_dis, uc_bc); return -1; } - //TBD: T264 Use Rx DMA channel 1 for NET05 WAR - if (osi_core->pre_sil == 0x1U) { - dma_channel = osi_dma->dma_chans[1]; - } + memset(&ioctl_data->l2_filter, 0x0, sizeof(struct osi_filter)); /* Set MAC address with DCS set to route all legacy Rx * packets from RxQ0 to default DMA at index 0. @@ -2418,7 +2645,7 @@ static int ether_update_mac_addr_filter(struct ether_priv_data *pdata, } if (uc_bc == ETHER_ADDRESS_MAC) { - ioctl_data->l2_filter.index = ETHER_MAC_ADDRESS_INDEX; + ioctl_data->l2_filter.index = MAC_index[osi_core->mac]; memcpy(ioctl_data->l2_filter.mac_addr, osi_core->mac_addr, ETH_ALEN); } else { @@ -2427,8 +2654,9 @@ static int ether_update_mac_addr_filter(struct ether_priv_data *pdata, } else { dma_channel = osi_dma->dma_chans[0]; } - ioctl_data->l2_filter.index = ETHER_BC_ADDRESS_INDEX; + ioctl_data->l2_filter.index = BC_index[osi_core->mac]; memcpy(ioctl_data->l2_filter.mac_addr, bc_addr, ETH_ALEN); + ioctl_data->l2_filter.pkt_dup = OSI_ENABLE; } ioctl_data->l2_filter.dma_routing = OSI_ENABLE; ioctl_data->l2_filter.dma_chan = dma_channel; @@ -3404,11 +3632,15 @@ static unsigned short ether_select_queue(struct net_device *dev, priority = skb_vlan_tag_get_prio(skb); } - for (i = 0; i < osi_core->num_mtl_queues; i++) { - mtlq = osi_core->mtl_queues[i]; - if (pdata->txq_prio[mtlq] == priority) { - txqueue_select = (unsigned short)i; - break; + if ((osi_core->pre_sil == OSI_ENABLE) && (pdata->tx_queue_select != 0U)) { + txqueue_select = pdata->tx_queue_select; + } else { + for (i = 0; i < osi_core->num_mtl_queues; i++) { + mtlq = osi_core->mtl_queues[i]; + if (pdata->txq_prio[mtlq] == priority) { + txqueue_select = (unsigned short)i; + break; + } } } @@ -4562,6 +4794,89 @@ static void ether_set_vm_irq_chan_mask(struct ether_vm_irq_data *vm_irq_data, } } +/** + * @brief ether_get_rx_riit - Get the rx_riit value for speed. + * + * Algorimthm: Parse DT to get rx_riit value + * + * @param[in] pdev: Platform device instance. + * @param[in] pdata: OSD private data. + * + * @retval 0 on success + * @retval "negative value" on failure + */ +static int ether_get_rx_riit(struct platform_device *pdev, + struct ether_priv_data *pdata) +{ + struct osi_dma_priv_data *osi_dma = pdata->osi_dma; + struct device_node *speed_node, *temp; + unsigned int node = 0; + int ret = 0; + + speed_node = of_parse_phandle(pdev->dev.of_node, + "nvidia,mgbe-riit-config", 0); + if (speed_node == NULL) { + dev_warn(pdata->dev, "failed to find rx riit configuration, default disabled\n"); + osi_dma->use_riit = OSI_DISABLE; + return ret; + } + /* parse the number of riit configs */ + ret = of_property_read_u32(speed_node, "nvidia,speeds-num", + &osi_dma->num_of_riit); + if (ret != 0) { + dev_err(&pdev->dev, "failed to get number of rx riit (%d)\n", + ret); + return -EINVAL; + } + if (osi_dma->num_of_riit > OSI_MGBE_MAX_NUM_RIIT) { + dev_err(&pdev->dev, "Invalid Num. of RIIT's\n"); + return -EINVAL; + } + ret = of_get_child_count(speed_node); + if (ret != osi_dma->num_of_riit) { + dev_err(&pdev->dev, + "Mismatch in num_of_riit and riit config DT nodes\n"); + return -EINVAL; + } + for_each_child_of_node(speed_node, temp) { + if (node == osi_dma->num_of_riit) + break; + + ret = of_property_read_u32(temp, "nvidia,speed", + &osi_dma->rx_riit[node].speed); + if (ret != 0) { + dev_err(&pdev->dev, "failed to read riit speed\n"); + return -EINVAL; + } + if ((osi_dma->rx_riit[node].speed > OSI_SPEED_25000) && + (osi_dma->rx_riit[node].speed < OSI_SPEED_2500)) { + dev_err(&pdev->dev, "Invalid speed Number\n"); + return -EINVAL; + } + + ret = of_property_read_u32(temp, "nvidia,riit", + &osi_dma->rx_riit[node].riit); + if (ret != 0) { + dev_err(&pdev->dev, + "failed to read riit vaue\n"); + return -EINVAL; + } + if ((osi_dma->rx_riit[node].riit > OSI_MGBE_MAX_RX_RIIT_NSEC) || + (osi_dma->rx_riit[node].riit < OSI_MGBE_MIN_RX_RIIT_NSEC)) { + dev_err(&pdev->dev, + "invalid rx_riit, must be in ns range %d to %d\n", + OSI_MGBE_MIN_RX_RIIT_NSEC, + OSI_MGBE_MAX_RX_RIIT_NSEC); + return -EINVAL; + } + node++; + } + + osi_dma->use_riit = OSI_ENABLE; + return ret; +} + + /** * @brief ether_get_vdma_mapping - Get vDMA mapping data from DT. * @@ -5004,6 +5319,10 @@ static void ether_put_mgbe_clks(struct ether_priv_data *pdata) { struct device *dev = pdata->dev; + if (!IS_ERR_OR_NULL(pdata->rx_input_clk)) { + devm_clk_put(dev, pdata->rx_input_clk); + } + if (!IS_ERR_OR_NULL(pdata->ptp_ref_clk)) { devm_clk_put(dev, pdata->ptp_ref_clk); } @@ -5012,18 +5331,10 @@ static void ether_put_mgbe_clks(struct ether_priv_data *pdata) devm_clk_put(dev, pdata->app_clk); } - if (!IS_ERR_OR_NULL(pdata->eee_pcs_clk)) { - devm_clk_put(dev, pdata->eee_pcs_clk); - } - if (!IS_ERR_OR_NULL(pdata->mac_clk)) { devm_clk_put(dev, pdata->mac_clk); } - if (!IS_ERR_OR_NULL(pdata->mac_div_clk)) { - devm_clk_put(dev, pdata->mac_div_clk); - } - if (!IS_ERR_OR_NULL(pdata->tx_pcs_clk)) { devm_clk_put(dev, pdata->tx_pcs_clk); } @@ -5032,10 +5343,6 @@ static void ether_put_mgbe_clks(struct ether_priv_data *pdata) devm_clk_put(dev, pdata->tx_clk); } - if (!IS_ERR_OR_NULL(pdata->rx_pcs_clk)) { - devm_clk_put(dev, pdata->rx_pcs_clk); - } - if (!IS_ERR_OR_NULL(pdata->rx_pcs_input_clk)) { devm_clk_put(dev, pdata->rx_pcs_input_clk); } @@ -5047,6 +5354,22 @@ static void ether_put_mgbe_clks(struct ether_priv_data *pdata) if (!IS_ERR_OR_NULL(pdata->rx_m_clk)) { devm_clk_put(dev, pdata->rx_m_clk); } + + if (pdata->osi_core->mac == OSI_MAC_HW_MGBE_T26X) { + if (!IS_ERR_OR_NULL(pdata->tx_m_clk)) { + devm_clk_put(dev, pdata->tx_m_clk); + } + } else { + if (!IS_ERR_OR_NULL(pdata->eee_pcs_clk)) { + devm_clk_put(dev, pdata->eee_pcs_clk); + } + if (!IS_ERR_OR_NULL(pdata->mac_div_clk)) { + devm_clk_put(dev, pdata->mac_div_clk); + } + if (!IS_ERR_OR_NULL(pdata->rx_pcs_clk)) { + devm_clk_put(dev, pdata->rx_pcs_clk); + } + } } /** @@ -5063,37 +5386,51 @@ static void ether_put_eqos_clks(struct ether_priv_data *pdata) if (!IS_ERR_OR_NULL(pdata->tx_clk)) { devm_clk_put(dev, pdata->tx_clk); } - - if (!IS_ERR_OR_NULL(pdata->tx_div_clk)) { - devm_clk_put(dev, pdata->tx_div_clk); + if (!IS_ERR_OR_NULL(pdata->rx_clk)) { + devm_clk_put(dev, pdata->rx_clk); } - if (!IS_ERR_OR_NULL(pdata->rx_m_clk)) { devm_clk_put(dev, pdata->rx_m_clk); } - if (!IS_ERR_OR_NULL(pdata->rx_input_clk)) { - devm_clk_put(dev, pdata->rx_input_clk); - } - - if (!IS_ERR_OR_NULL(pdata->ptp_ref_clk)) { - devm_clk_put(dev, pdata->ptp_ref_clk); - } - - if (!IS_ERR_OR_NULL(pdata->rx_clk)) { - devm_clk_put(dev, pdata->rx_clk); - } - - if (!IS_ERR_OR_NULL(pdata->axi_clk)) { - devm_clk_put(dev, pdata->axi_clk); - } - - if (!IS_ERR_OR_NULL(pdata->axi_cbb_clk)) { - devm_clk_put(dev, pdata->axi_cbb_clk); - } - - if (!IS_ERR_OR_NULL(pdata->pllrefe_clk)) { - devm_clk_put(dev, pdata->pllrefe_clk); + if (pdata->osi_core->mac_ver == MAC_CORE_VER_TYPE_EQOS_5_40) { + if (!IS_ERR_OR_NULL(pdata->rx_pcs_m_clk)) { + devm_clk_put(dev, pdata->rx_pcs_m_clk); + } + if (!IS_ERR_OR_NULL(pdata->tx_m_clk)) { + devm_clk_put(dev, pdata->tx_m_clk); + } + if (!IS_ERR_OR_NULL(pdata->rx_pcs_input_clk)) { + devm_clk_put(dev, pdata->rx_pcs_input_clk); + } + if (!IS_ERR_OR_NULL(pdata->app_clk)) { + devm_clk_put(dev, pdata->app_clk); + } + if (!IS_ERR_OR_NULL(pdata->tx_pcs_clk)) { + devm_clk_put(dev, pdata->tx_pcs_clk); + } + if (!IS_ERR_OR_NULL(pdata->mac_clk)) { + devm_clk_put(dev, pdata->mac_clk); + } + }else { + if (!IS_ERR_OR_NULL(pdata->tx_div_clk)) { + devm_clk_put(dev, pdata->tx_div_clk); + } + if (!IS_ERR_OR_NULL(pdata->rx_input_clk)) { + devm_clk_put(dev, pdata->rx_input_clk); + } + if (!IS_ERR_OR_NULL(pdata->ptp_ref_clk)) { + devm_clk_put(dev, pdata->ptp_ref_clk); + } + if (!IS_ERR_OR_NULL(pdata->axi_clk)) { + devm_clk_put(dev, pdata->axi_clk); + } + if (!IS_ERR_OR_NULL(pdata->axi_cbb_clk)) { + devm_clk_put(dev, pdata->axi_cbb_clk); + } + if (!IS_ERR_OR_NULL(pdata->pllrefe_clk)) { + devm_clk_put(dev, pdata->pllrefe_clk); + } } } @@ -5130,7 +5467,10 @@ static int ether_set_mgbe_rx_fmon_rates(struct ether_priv_data *pdata) unsigned long rx_rate, rx_pcs_rate; int ret; - if (uphy_gbe_mode == OSI_ENABLE) { + if (uphy_gbe_mode == OSI_XAUI_MODE_25G) { + rx_rate = ETHER_MGBE_TXRX_CLK_XAUI_25G; + rx_pcs_rate = ETHER_MGBE_TXRX_PCS_CLK_XAUI_25G; + } else if (uphy_gbe_mode == OSI_GBE_MODE_10G) { rx_rate = ETHER_MGBE_RX_CLK_USXGMII_10G; rx_pcs_rate = ETHER_MGBE_RX_PCS_CLK_USXGMII_10G; } else { @@ -5189,13 +5529,6 @@ static int ether_get_mgbe_clks(struct ether_priv_data *pdata) goto err_rx_pcs_input; } - pdata->rx_pcs_clk = devm_clk_get(dev, "rx-pcs"); - if (IS_ERR(pdata->rx_pcs_clk)) { - ret = PTR_ERR(pdata->rx_pcs_clk); - dev_err(dev, "failed to get rx-pcs clk\n"); - goto err_rx_pcs; - } - pdata->tx_clk = devm_clk_get(dev, "tx"); if (IS_ERR(pdata->tx_clk)) { ret = PTR_ERR(pdata->tx_clk); @@ -5210,13 +5543,6 @@ static int ether_get_mgbe_clks(struct ether_priv_data *pdata) goto err_tx_pcs; } - pdata->mac_div_clk = devm_clk_get(dev, "mac-divider"); - if (IS_ERR(pdata->mac_div_clk)) { - ret = PTR_ERR(pdata->mac_div_clk); - dev_err(dev, "failed to get mac-divider clk\n"); - goto err_mac_div; - } - pdata->mac_clk = devm_clk_get(dev, "mac"); if (IS_ERR(pdata->mac_clk)) { ret = PTR_ERR(pdata->mac_clk); @@ -5224,13 +5550,6 @@ static int ether_get_mgbe_clks(struct ether_priv_data *pdata) goto err_mac; } - pdata->eee_pcs_clk = devm_clk_get(dev, "eee-pcs"); - if (IS_ERR(pdata->eee_pcs_clk)) { - ret = PTR_ERR(pdata->eee_pcs_clk); - dev_err(dev, "failed to get eee-pcs clk\n"); - goto err_eee_pcs; - } - pdata->app_clk = devm_clk_get(dev, "mgbe"); if (IS_ERR(pdata->app_clk)) { ret = PTR_ERR(pdata->app_clk); @@ -5252,29 +5571,58 @@ static int ether_get_mgbe_clks(struct ether_priv_data *pdata) goto err_rx_input; } + if (pdata->osi_core->mac == OSI_MAC_HW_MGBE_T26X) { + pdata->tx_m_clk = devm_clk_get(dev, "tx-m"); + if (IS_ERR(pdata->tx_m_clk)) { + ret = PTR_ERR(pdata->tx_m_clk); + dev_err(dev, "failed to get rx-input-m\n"); + goto err_rx_pcs; + } + } else { + pdata->rx_pcs_clk = devm_clk_get(dev, "rx-pcs"); + if (IS_ERR(pdata->rx_pcs_clk)) { + ret = PTR_ERR(pdata->rx_pcs_clk); + dev_err(dev, "failed to get rx-pcs clk\n"); + goto err_rx_pcs; + } + + pdata->mac_div_clk = devm_clk_get(dev, "mac-divider"); + if (IS_ERR(pdata->mac_div_clk)) { + ret = PTR_ERR(pdata->mac_div_clk); + dev_err(dev, "failed to get mac-divider clk\n"); + goto err_mac_div; + } + pdata->eee_pcs_clk = devm_clk_get(dev, "eee-pcs"); + if (IS_ERR(pdata->eee_pcs_clk)) { + ret = PTR_ERR(pdata->eee_pcs_clk); + dev_err(dev, "failed to get eee-pcs clk\n"); + goto err_eee_pcs; + } + } + ret = ether_set_mgbe_rx_fmon_rates(pdata); if (ret < 0) goto err_rx_input; return 0; +err_eee_pcs: + devm_clk_put(dev, pdata->mac_div_clk); +err_mac_div: + devm_clk_put(dev, pdata->rx_pcs_clk); +err_rx_pcs: + devm_clk_put(dev, pdata->rx_input_clk); err_rx_input: devm_clk_put(dev, pdata->ptp_ref_clk); err_ptp_ref: devm_clk_put(dev, pdata->app_clk); err_app: - devm_clk_put(dev, pdata->eee_pcs_clk); -err_eee_pcs: devm_clk_put(dev, pdata->mac_clk); err_mac: - devm_clk_put(dev, pdata->mac_div_clk); -err_mac_div: devm_clk_put(dev, pdata->tx_pcs_clk); err_tx_pcs: devm_clk_put(dev, pdata->tx_clk); err_tx: - devm_clk_put(dev, pdata->rx_pcs_clk); -err_rx_pcs: devm_clk_put(dev, pdata->rx_pcs_input_clk); err_rx_pcs_input: devm_clk_put(dev, pdata->rx_pcs_m_clk); @@ -5299,24 +5647,80 @@ static int ether_get_eqos_clks(struct ether_priv_data *pdata) struct device *dev = pdata->dev; int ret; - /* Skip pll_refe clock initialisation for t18x platform */ - pdata->pllrefe_clk = devm_clk_get(dev, "pllrefe_vcoout"); - if (IS_ERR(pdata->pllrefe_clk)) { - dev_info(dev, "failed to get pllrefe_vcoout clk\n"); - } + if (pdata->osi_core->mac_ver == MAC_CORE_VER_TYPE_EQOS_5_40) { + pdata->mac_clk = devm_clk_get(dev, "eqos_mac"); + if (IS_ERR(pdata->mac_clk)) { + ret = PTR_ERR(pdata->mac_clk); + dev_err(dev, "failed to get eqos_mac clk\n"); + goto err_eqos_mac; + } - pdata->axi_cbb_clk = devm_clk_get(dev, "axi_cbb"); - if (IS_ERR(pdata->axi_cbb_clk)) { - ret = PTR_ERR(pdata->axi_cbb_clk); - dev_err(dev, "failed to get axi_cbb clk\n"); - goto err_axi_cbb; - } + pdata->tx_pcs_clk = devm_clk_get(dev, "eqos_tx_pcs"); + if (IS_ERR(pdata->tx_pcs_clk)) { + ret = PTR_ERR(pdata->tx_pcs_clk); + dev_err(dev, "failed to get tx_pcs_clk clk\n"); + goto err_tx_pcs; + } + pdata->app_clk = devm_clk_get(dev, "eqos"); + if (IS_ERR(pdata->app_clk)) { + ret = PTR_ERR(pdata->app_clk); + dev_err(dev, "failed to get app_clk clk\n"); + goto err_app; + } - pdata->axi_clk = devm_clk_get(dev, "eqos_axi"); - if (IS_ERR(pdata->axi_clk)) { - ret = PTR_ERR(pdata->axi_clk); - dev_err(dev, "failed to get eqos_axi clk\n"); - goto err_axi; + pdata->rx_pcs_input_clk = devm_clk_get(dev, "eqos_rx_pcs_input"); + if (IS_ERR(pdata->rx_pcs_input_clk)) { + ret = PTR_ERR(pdata->rx_pcs_input_clk); + dev_err(dev, "failed to get eqos_rx_pcs_input clk\n"); + goto err_rx_pcs_input; + } + + pdata->tx_m_clk = devm_clk_get(dev, "eqos_tx_m"); + if (IS_ERR(pdata->tx_m_clk)) { + ret = PTR_ERR(pdata->tx_m_clk); + dev_err(dev, "failed to get tx_m_clk clk\n"); + goto err_tx_m; + } + + pdata->rx_pcs_m_clk = devm_clk_get(dev, "eqos_rx_pcs_m"); + if (IS_ERR(pdata->rx_pcs_m_clk)) { + ret = PTR_ERR(pdata->rx_pcs_m_clk); + dev_err(dev, "failed to get eqos_rx_pcs_m clk\n"); + goto err_rx_pcs_m; + } + }else { + /* Skip pll_refe clock initialisation for t18x platform */ + pdata->pllrefe_clk = devm_clk_get(dev, "pllrefe_vcoout"); + if (IS_ERR(pdata->pllrefe_clk)) { + dev_info(dev, "failed to get pllrefe_vcoout clk\n"); + } + + pdata->axi_cbb_clk = devm_clk_get(dev, "axi_cbb"); + if (IS_ERR(pdata->axi_cbb_clk)) { + ret = PTR_ERR(pdata->axi_cbb_clk); + dev_err(dev, "failed to get axi_cbb clk\n"); + goto err_axi_cbb; + } + + pdata->axi_clk = devm_clk_get(dev, "eqos_axi"); + if (IS_ERR(pdata->axi_clk)) { + ret = PTR_ERR(pdata->axi_clk); + dev_err(dev, "failed to get eqos_axi clk\n"); + goto err_axi; + } + + pdata->ptp_ref_clk = devm_clk_get(dev, "eqos_ptp_ref"); + if (IS_ERR(pdata->ptp_ref_clk)) { + ret = PTR_ERR(pdata->ptp_ref_clk); + dev_err(dev, "failed to get eqos_ptp_ref clk\n"); + goto err_ptp_ref; + } + + pdata->tx_div_clk = devm_clk_get(dev, "eqos_tx_divider"); + if (IS_ERR(pdata->tx_div_clk)) { + ret = PTR_ERR(pdata->tx_div_clk); + dev_info(dev, "failed to get eqos_tx_divider clk\n"); + } } pdata->rx_clk = devm_clk_get(dev, "eqos_rx"); @@ -5326,13 +5730,6 @@ static int ether_get_eqos_clks(struct ether_priv_data *pdata) goto err_rx; } - pdata->ptp_ref_clk = devm_clk_get(dev, "eqos_ptp_ref"); - if (IS_ERR(pdata->ptp_ref_clk)) { - ret = PTR_ERR(pdata->ptp_ref_clk); - dev_err(dev, "failed to get eqos_ptp_ref clk\n"); - goto err_ptp_ref; - } - pdata->tx_clk = devm_clk_get(dev, "eqos_tx"); if (IS_ERR(pdata->tx_clk)) { ret = PTR_ERR(pdata->tx_clk); @@ -5352,12 +5749,6 @@ static int ether_get_eqos_clks(struct ether_priv_data *pdata) dev_info(dev, "failed to get eqos_rx_input clk\n"); } - pdata->tx_div_clk = devm_clk_get(dev, "eqos_tx_divider"); - if (IS_ERR(pdata->tx_div_clk)) { - ret = PTR_ERR(pdata->tx_div_clk); - dev_info(dev, "failed to get eqos_tx_divider clk\n"); - } - /* Set default rate to 1G */ if (!IS_ERR_OR_NULL(pdata->rx_input_clk)) { clk_set_rate(pdata->rx_input_clk, @@ -5365,20 +5756,47 @@ static int ether_get_eqos_clks(struct ether_priv_data *pdata) } return 0; - -err_tx: - devm_clk_put(dev, pdata->ptp_ref_clk); -err_ptp_ref: - devm_clk_put(dev, pdata->rx_clk); err_rx: - devm_clk_put(dev, pdata->axi_clk); + if (!IS_ERR_OR_NULL(pdata->tx_div_clk)) { + devm_clk_put(dev, pdata->tx_div_clk); + } +err_tx: + if (!IS_ERR_OR_NULL(pdata->rx_clk)) { + devm_clk_put(dev, pdata->rx_clk); + } +err_ptp_ref: + if (!IS_ERR_OR_NULL(pdata->axi_clk)) { + devm_clk_put(dev, pdata->axi_clk); + } err_axi: - devm_clk_put(dev, pdata->axi_cbb_clk); + if (!IS_ERR_OR_NULL(pdata->axi_cbb_clk)) { + devm_clk_put(dev, pdata->axi_cbb_clk); + } err_axi_cbb: if (!IS_ERR_OR_NULL(pdata->pllrefe_clk)) { devm_clk_put(dev, pdata->pllrefe_clk); } - +err_rx_pcs_m: + if (!IS_ERR_OR_NULL(pdata->tx_m_clk)) { + devm_clk_put(dev, pdata->tx_m_clk); + } +err_tx_m: + if (!IS_ERR_OR_NULL(pdata->rx_pcs_input_clk)) { + devm_clk_put(dev, pdata->rx_pcs_input_clk); + } +err_rx_pcs_input: + if (!IS_ERR_OR_NULL(pdata->app_clk)) { + devm_clk_put(dev, pdata->app_clk); + } +err_app: + if (!IS_ERR_OR_NULL(pdata->tx_pcs_clk)) { + devm_clk_put(dev, pdata->tx_pcs_clk); + } +err_tx_pcs: + if (!IS_ERR_OR_NULL(pdata->mac_clk)) { + devm_clk_put(dev, pdata->mac_clk); + } +err_eqos_mac: return ret; } @@ -5397,7 +5815,6 @@ static int ether_get_clks(struct ether_priv_data *pdata) if (pdata->osi_core->mac != OSI_MAC_HW_EQOS) { return ether_get_mgbe_clks(pdata); } - return ether_get_eqos_clks(pdata); } @@ -5873,6 +6290,11 @@ static int ether_parse_dt(struct ether_priv_data *pdata) dev_info(dev, "setting default PTP clk rate as 312.5MHz\n"); pdata->ptp_ref_clock_speed = ETHER_DFLT_PTP_CLK; } + + if (osi_core->pre_sil == 0x1U) { + pdata->ptp_ref_clock_speed = ETHER_DFLT_PTP_CLK_UFPGA; + } + /* read promiscuous mode supported or not */ ret = of_property_read_u32(np, "nvidia,promisc_mode", &pdata->promisc_mode); @@ -5987,9 +6409,28 @@ static int ether_parse_dt(struct ether_priv_data *pdata) } } - if (osi_dma->num_dma_chans != osi_core->num_mtl_queues) { - dev_err(dev, "mismatch in numbers of DMA channel and MTL Q\n"); - return -EINVAL; + if (osi_core->mac != OSI_MAC_HW_MGBE_T26X) { + if (osi_dma->num_dma_chans != osi_core->num_mtl_queues) { + dev_err(dev, "mismatch in numbers of DMA channel and MTL Q\n"); + return -EINVAL; + } + + for (i = 0; i < osi_dma->num_dma_chans; i++) { + if (osi_dma->dma_chans[i] != osi_core->mtl_queues[i]) { + dev_err(dev, + "mismatch in DMA channel and MTL Q number at index %d\n", + i); + return -EINVAL; + } + if (osi_dma->dma_chans[i] == 0) { + ret = 0; + } + } + + if (ret != 0) { + dev_err(dev, "Q0 Must be enabled for rx path\n"); + return -EINVAL; + } } /* Allow to set non zero DMA channel for virtualization */ @@ -6000,23 +6441,6 @@ static int ether_parse_dt(struct ether_priv_data *pdata) ret = -1; } - for (i = 0; i < osi_dma->num_dma_chans; i++) { - if (osi_dma->dma_chans[i] != osi_core->mtl_queues[i]) { - dev_err(dev, - "mismatch in DMA channel and MTL Q number at index %d\n", - i); - return -EINVAL; - } - if (osi_dma->dma_chans[i] == 0) { - ret = 0; - } - } - - if (ret != 0) { - dev_err(dev, "Q0 Must be enabled for rx path\n"); - return -EINVAL; - } - ret = of_property_read_u32_array(np, "nvidia,rxq_enable_ctrl", tmp_value, osi_core->num_mtl_queues); @@ -6208,6 +6632,15 @@ static int ether_parse_dt(struct ether_priv_data *pdata) osi_dma->use_riwt = OSI_ENABLE; } + + if (osi_dma->mac == OSI_MAC_HW_MGBE_T26X) { + ret = ether_get_rx_riit(pdev, pdata); + if (ret < 0) { + dev_err(pdata->dev, "failed to get riit info\n"); + return ret; + } + } + /* rx_frames value to be set */ ret = of_property_read_u32(np, "nvidia,rx_frames", &osi_dma->rx_frames); @@ -6231,23 +6664,41 @@ static int ether_parse_dt(struct ether_priv_data *pdata) return -EINVAL; } - if (osi_core->mac != OSI_MAC_HW_EQOS) { - ret = of_property_read_u32(np, "nvidia,uphy-gbe-mode", - &osi_core->uphy_gbe_mode); - if (ret < 0) { + ret = of_property_read_u32(np, "nvidia,uphy-gbe-mode", + &osi_core->uphy_gbe_mode); + if (ret < 0) { + if (osi_core->mac != OSI_MAC_HW_EQOS) { dev_info(dev, "failed to read UPHY GBE mode" "- default to 10G\n"); - osi_core->uphy_gbe_mode = OSI_ENABLE; + osi_core->uphy_gbe_mode = OSI_GBE_MODE_10G; + } else { + dev_info(dev, + "failed to read UPHY GBE mode" + "- default to 1G\n"); + osi_core->uphy_gbe_mode = OSI_GBE_MODE_1G; } + } - if ((osi_core->uphy_gbe_mode != OSI_ENABLE) && - (osi_core->uphy_gbe_mode != OSI_DISABLE)) { + if (osi_core->mac != OSI_MAC_HW_EQOS) { + if ((osi_core->uphy_gbe_mode != OSI_GBE_MODE_5G) && + (osi_core->uphy_gbe_mode != OSI_GBE_MODE_10G) && + (osi_core->uphy_gbe_mode != OSI_UPHY_GBE_MODE_25G)) { dev_err(dev, "Invalid UPHY GBE mode" "- default to 10G\n"); - osi_core->uphy_gbe_mode = OSI_ENABLE; + osi_core->uphy_gbe_mode = OSI_GBE_MODE_10G; } + } else { + if ((osi_core->uphy_gbe_mode != OSI_GBE_MODE_2_5G) && + (osi_core->uphy_gbe_mode != OSI_GBE_MODE_1G)) { + dev_err(dev, "Invalid UPHY GBE mode" + "- default to 1G\n"); + osi_core->uphy_gbe_mode = OSI_GBE_MODE_1G; + } + } + + if (osi_core->mac != OSI_MAC_HW_EQOS) { ret = of_property_read_u32(np, "nvidia,phy-iface-mode", &osi_core->phy_iface_mode); if (ret < 0) { @@ -6259,26 +6710,38 @@ static int ether_parse_dt(struct ether_priv_data *pdata) if ((osi_core->phy_iface_mode != OSI_XFI_MODE_10G) && (osi_core->phy_iface_mode != OSI_XFI_MODE_5G) && (osi_core->phy_iface_mode != OSI_USXGMII_MODE_10G) && - (osi_core->phy_iface_mode != OSI_USXGMII_MODE_5G)) { + (osi_core->phy_iface_mode != OSI_USXGMII_MODE_5G) && + (osi_core->phy_iface_mode != OSI_XAUI_MODE_25G)) { dev_err(dev, "Invalid PHY iface mode" "- default to 10G\n"); osi_core->phy_iface_mode = OSI_XFI_MODE_10G; } - /* GBE and XFI/USXGMII must be in same mode */ - if ((osi_core->uphy_gbe_mode == OSI_ENABLE) && + /* GBE and XAUI must be in same mode */ + if ((osi_core->uphy_gbe_mode == OSI_UPHY_GBE_MODE_25G) && ((osi_core->phy_iface_mode == OSI_XFI_MODE_5G) || - (osi_core->phy_iface_mode == OSI_USXGMII_MODE_5G))) { - dev_err(dev, "Invalid combination of UPHY 10GBE mode" - "and XFI/USXGMII 5G mode\n"); + (osi_core->phy_iface_mode == OSI_USXGMII_MODE_5G) || + (osi_core->phy_iface_mode == OSI_XFI_MODE_10G) || + (osi_core->phy_iface_mode == OSI_USXGMII_MODE_10G))) { + dev_err(dev, "Invalid combination of UPHY 25GBE mode" + "and XFI/USXGMII/XAUI mode\n"); return -EINVAL; } - if ((osi_core->uphy_gbe_mode == OSI_DISABLE) && + if ((osi_core->uphy_gbe_mode == OSI_GBE_MODE_10G) && + ((osi_core->phy_iface_mode == OSI_XFI_MODE_5G) || + (osi_core->phy_iface_mode == OSI_USXGMII_MODE_5G) || + (osi_core->phy_iface_mode == OSI_XAUI_MODE_25G))) { + dev_err(dev, "Invalid combination of UPHY 10GBE mode" + "and XFI/USXGMII/AUXA mode\n"); + return -EINVAL; + } + if ((osi_core->uphy_gbe_mode == OSI_GBE_MODE_5G) && ((osi_core->phy_iface_mode == OSI_XFI_MODE_10G) || - (osi_core->phy_iface_mode == OSI_USXGMII_MODE_10G))) { + (osi_core->phy_iface_mode == OSI_USXGMII_MODE_10G) || + (osi_core->phy_iface_mode == OSI_XAUI_MODE_25G))) { dev_err(dev, "Invalid combination of UPHY 5GBE mode" - "and XFI/USXGMII 10G mode\n"); + "and XFI/USXGMII/XAUI mode\n"); return -EINVAL; } } @@ -6424,6 +6887,7 @@ exit: static void ether_get_num_dma_chan_mtl_q(struct platform_device *pdev, unsigned int *num_dma_chans, unsigned int *mac, + unsigned int *macsec, unsigned int *num_mtl_queues) { struct device_node *np = pdev->dev.of_node; @@ -6434,32 +6898,43 @@ static void ether_get_num_dma_chan_mtl_q(struct platform_device *pdev, ret = of_device_is_compatible(np, "nvidia,nveqos"); if (ret != 0) { *mac = OSI_MAC_HW_EQOS; + *macsec = OSI_MACSEC_T23X; max_chans = OSI_EQOS_MAX_NUM_CHANS; } ret = of_device_is_compatible(np, "nvidia,nvmgbe"); if (ret != 0) { *mac = OSI_MAC_HW_MGBE; - max_chans = OSI_MGBE_MAX_NUM_CHANS; + max_chans = OSI_MGBE_T23X_MAX_NUM_CHANS; + *macsec = OSI_MACSEC_T23X; } ret = of_device_is_compatible(np, "nvidia,tegra234-eqos"); if (ret != 0) { *mac = OSI_MAC_HW_EQOS; + *macsec = OSI_MACSEC_T23X; max_chans = OSI_EQOS_MAX_NUM_CHANS; } ret = of_device_is_compatible(np, "nvidia,tegra234-mgbe"); if (ret != 0) { *mac = OSI_MAC_HW_MGBE; - max_chans = OSI_MGBE_MAX_NUM_PDMA_CHANS; + max_chans = OSI_MGBE_T23X_MAX_NUM_CHANS; + *macsec = OSI_MACSEC_T23X; } if (of_device_is_compatible(np, "nvidia,tegra264-mgbe")) { *mac = OSI_MAC_HW_MGBE_T26X; + *macsec = OSI_MACSEC_T26X; max_chans = OSI_MGBE_MAX_NUM_CHANS; } + if (of_device_is_compatible(np, "nvidia,tegra264-eqos")) { + *mac = OSI_MAC_HW_EQOS; + *macsec = OSI_MACSEC_T26X; + max_chans = OSI_EQOS_MAX_NUM_CHANS; + } + /* parse the number of DMA channels */ ret = of_property_read_u32(np, "nvidia,num-dma-chans", num_dma_chans); if (ret != 0) { @@ -6627,6 +7102,9 @@ static void init_filter_values(struct ether_priv_data *pdata) pdata->num_mac_addr_regs = ETHER_ADDR_REG_CNT_128; } else if (pdata->hw_feat.mac_addr32_sel == OSI_ENABLE) { pdata->num_mac_addr_regs = ETHER_ADDR_REG_CNT_64; + } else if (pdata->hw_feat.mac_addr_sel == + (ETHER_ADDR_REG_CNT_48 - 1U)) { + pdata->num_mac_addr_regs = ETHER_ADDR_REG_CNT_48; } else if (pdata->hw_feat.mac_addr_sel == (ETHER_ADDR_REG_CNT_32 - 1U)) { pdata->num_mac_addr_regs = ETHER_ADDR_REG_CNT_32; @@ -6657,10 +7135,7 @@ static void ether_init_rss(struct ether_priv_data *pdata, osi_core->rss.enable = 0; return; } - //TBD:diable rss for T264 - if (osi_core->pre_sil == 0x1U) { - osi_core->rss.enable = 0; - } + /* generate random key */ netdev_rss_key_fill(osi_core->rss.key, sizeof(osi_core->rss.key)); @@ -6693,16 +7168,21 @@ static void ether_init_rss(struct ether_priv_data *pdata, static int ether_probe(struct platform_device *pdev) { struct ether_priv_data *pdata; - unsigned int num_dma_chans, mac, num_mtl_queues, chan; + unsigned int num_dma_chans, mac, macsec, num_mtl_queues, chan; struct osi_core_priv_data *osi_core; struct osi_dma_priv_data *osi_dma; struct osi_ioctl *ioctl_data; struct net_device *ndev; int ret = 0, i; const char *if_name; + const nveu32_t max_dma_chan[OSI_MAX_MAC_IP_TYPES] = { + OSI_EQOS_MAX_NUM_CHANS, + OSI_MGBE_T23X_MAX_NUM_CHANS, + OSI_MGBE_MAX_NUM_CHANS + }; ether_get_num_dma_chan_mtl_q(pdev, &num_dma_chans, - &mac, &num_mtl_queues); + &mac, &macsec, &num_mtl_queues); if (mac == OSI_MAC_HW_MGBE) { ret = pinctrl_pm_select_default_state(&pdev->dev); @@ -6756,6 +7236,7 @@ static int ether_probe(struct platform_device *pdev) osi_dma->num_dma_chans = num_dma_chans; osi_core->mac = mac; + osi_core->macsec = macsec; osi_dma->mac = mac; osi_core->mtu = ndev->mtu; @@ -6852,7 +7333,7 @@ static int ether_probe(struct platform_device *pdev) /* store enabled dma channels into osi_core */ osi_core->num_dma_chans = osi_dma->num_dma_chans; memcpy(osi_core->dma_chans, osi_dma->dma_chans, - (sizeof(nveu32_t) * OSI_MGBE_MAX_NUM_CHANS)); + (sizeof(nveu32_t) * max_dma_chan[mac])); ndev->netdev_ops = ðer_netdev_ops; ether_set_ethtool_ops(ndev); diff --git a/drivers/net/ethernet/nvidia/nvethernet/ether_linux.h b/drivers/net/ethernet/nvidia/nvethernet/ether_linux.h index e2aa5123..f4411048 100644 --- a/drivers/net/ethernet/nvidia/nvethernet/ether_linux.h +++ b/drivers/net/ethernet/nvidia/nvethernet/ether_linux.h @@ -112,6 +112,7 @@ * @brief Ethernet default PTP clock frequency */ #define ETHER_DFLT_PTP_CLK 312500000U +#define ETHER_DFLT_PTP_CLK_UFPGA 78125000U /** * @brief Ethernet default PTP default RxQ @@ -127,22 +128,28 @@ * @brief Ethernet clk rates */ #define ETHER_RX_INPUT_CLK_RATE 125000000UL +#define ETHER_MGBE_MAC_DIV_RATE_25G 781250000UL #define ETHER_MGBE_MAC_DIV_RATE_10G 312500000UL #define ETHER_MGBE_MAC_DIV_RATE_5G 156250000UL #define ETHER_MGBE_MAC_DIV_RATE_2_5G 78125000UL // gbe_pll2_txclkref (644 MHz) --> programmable link TX_CLK divider // --> link_Tx_clk --> fixed 1/2 gear box divider --> lane TX clk. +#define ETHER_MGBE_TXRX_CLK_XAUI_25G 805664000UL #define ETHER_MGBE_TX_CLK_USXGMII_10G 644531250UL #define ETHER_MGBE_TX_CLK_USXGMII_5G 322265625UL #define ETHER_MGBE_RX_CLK_USXGMII_10G 644531250UL #define ETHER_MGBE_RX_CLK_USXGMII_5G 322265625UL +#define ETHER_MGBE_TXRX_PCS_CLK_XAUI_25G 390625000UL #define ETHER_MGBE_TX_PCS_CLK_USXGMII_10G 156250000UL #define ETHER_MGBE_TX_PCS_CLK_USXGMII_5G 78125000UL #define ETHER_MGBE_RX_PCS_CLK_USXGMII_10G 156250000UL #define ETHER_MGBE_RX_PCS_CLK_USXGMII_5G 78125000UL +#define ETHER_EQOS_TX_CLK_2_5G 312500000UL #define ETHER_EQOS_TX_CLK_1000M 125000000UL #define ETHER_EQOS_TX_CLK_100M 25000000UL #define ETHER_EQOS_TX_CLK_10M 2500000UL +#define ETHER_EQOS_UPHY_LX_TX_2_5G_CLK 195312500UL +#define ETHER_EQOS_UPHY_LX_TX_1G_CLK 78125000UL /** * @brief 1 Second in Neno Second @@ -167,6 +174,7 @@ */ #define ETHER_ADDR_REG_CNT_128 128 #define ETHER_ADDR_REG_CNT_64 64 +#define ETHER_ADDR_REG_CNT_48 48 #define ETHER_ADDR_REG_CNT_32 32 #define ETHER_ADDR_REG_CNT_1 1 /** @} */ @@ -207,8 +215,10 @@ /** * @brief Broadcast and MAC address macros */ -#define ETHER_MAC_ADDRESS_INDEX 1U -#define ETHER_BC_ADDRESS_INDEX 0 +#define ETHER_MAC_ADDRESS_INDEX 1U +#define ETHER_BC_ADDRESS_INDEX 0U +#define ETHER_MAC_ADDRESS_INDEX_T26X 0U +#define ETHER_BC_ADDRESS_INDEX_T26X 1U #define ETHER_ADDRESS_MAC 1 #define ETHER_ADDRESS_BC 0 @@ -388,6 +398,8 @@ struct ether_tx_ts_skb_list { struct sk_buff *skb; /** packet id to identify timestamp */ unsigned int pktid; + /** vdmaid to identify timestamp */ + unsigned int vdmaid; /** SKB jiffies to find time */ unsigned long pkt_jiffies; }; @@ -411,13 +423,13 @@ struct ether_timestamp_skb_list { */ struct ether_xtra_stat_counters { /** rx skb allocation failure count */ - nveu64_t re_alloc_rxbuf_failed[OSI_MGBE_MAX_NUM_QUEUES]; + nveu64_t re_alloc_rxbuf_failed[OSI_MGBE_MAX_NUM_CHANS]; /** TX per channel interrupt count */ - nveu64_t tx_normal_irq_n[OSI_MGBE_MAX_NUM_QUEUES]; + nveu64_t tx_normal_irq_n[OSI_MGBE_MAX_NUM_CHANS]; /** TX per channel SW timer callback count */ - nveu64_t tx_usecs_swtimer_n[OSI_MGBE_MAX_NUM_QUEUES]; + nveu64_t tx_usecs_swtimer_n[OSI_MGBE_MAX_NUM_CHANS]; /** RX per channel interrupt count */ - nveu64_t rx_normal_irq_n[OSI_MGBE_MAX_NUM_QUEUES]; + nveu64_t rx_normal_irq_n[OSI_MGBE_MAX_NUM_CHANS]; /** link connect count */ nveu64_t link_connect_count; /** link disconnect count */ @@ -466,6 +478,8 @@ struct ether_priv_data { struct clk *tx_div_clk; /** Receive Monitoring clock */ struct clk *rx_m_clk; + /** Transmit Monitoring clock */ + struct clk *tx_m_clk; /** RX PCS monitoring clock */ struct clk *rx_pcs_m_clk; /** RX PCS input clock */ @@ -645,6 +659,8 @@ struct ether_priv_data { unsigned int fixed_link; /** Flag to represent rx_m clk enabled or not */ bool rx_m_enabled; + /** Flag to represent tx_m clk enabled or not */ + bool tx_m_enabled; /** Flag to represent rx_pcs_m clk enabled or not */ bool rx_pcs_m_enabled; /* Timer value in msec for ether_stats_work thread */ @@ -673,6 +689,8 @@ struct ether_priv_data { struct hwtstamp_config ptp_config; /** Flag to hold DT config to disable Rx csum in HW */ uint32_t disable_rx_csum; + /** select Tx queue/dma channel for testing */ + unsigned int tx_queue_select; }; /** diff --git a/drivers/net/ethernet/nvidia/nvethernet/ethtool.c b/drivers/net/ethernet/nvidia/nvethernet/ethtool.c index 261f4ddf..1d85f364 100644 --- a/drivers/net/ethernet/nvidia/nvethernet/ethtool.c +++ b/drivers/net/ethernet/nvidia/nvethernet/ethtool.c @@ -105,6 +105,16 @@ static const struct ether_stats ether_dstrings_stats[] = { ETHER_DMA_EXTRA_STAT(tx_clean_n[7]), ETHER_DMA_EXTRA_STAT(tx_clean_n[8]), ETHER_DMA_EXTRA_STAT(tx_clean_n[9]), + ETHER_DMA_EXTRA_STAT(tx_clean_n[10]), + ETHER_DMA_EXTRA_STAT(tx_clean_n[11]), + ETHER_DMA_EXTRA_STAT(tx_clean_n[12]), + ETHER_DMA_EXTRA_STAT(tx_clean_n[13]), + ETHER_DMA_EXTRA_STAT(tx_clean_n[14]), + ETHER_DMA_EXTRA_STAT(tx_clean_n[15]), + ETHER_DMA_EXTRA_STAT(tx_clean_n[16]), + ETHER_DMA_EXTRA_STAT(tx_clean_n[17]), + ETHER_DMA_EXTRA_STAT(tx_clean_n[18]), + ETHER_DMA_EXTRA_STAT(tx_clean_n[19]), /* Tx/Rx frames */ ETHER_DMA_EXTRA_STAT(tx_pkt_n), @@ -114,26 +124,47 @@ static const struct ether_stats ether_dstrings_stats[] = { ETHER_DMA_EXTRA_STAT(tx_tso_pkt_n), /* Tx/Rx frames per channels/queues */ - ETHER_DMA_EXTRA_STAT(q_tx_pkt_n[0]), - ETHER_DMA_EXTRA_STAT(q_tx_pkt_n[1]), - ETHER_DMA_EXTRA_STAT(q_tx_pkt_n[2]), - ETHER_DMA_EXTRA_STAT(q_tx_pkt_n[3]), - ETHER_DMA_EXTRA_STAT(q_tx_pkt_n[4]), - ETHER_DMA_EXTRA_STAT(q_tx_pkt_n[5]), - ETHER_DMA_EXTRA_STAT(q_tx_pkt_n[6]), - ETHER_DMA_EXTRA_STAT(q_tx_pkt_n[7]), - ETHER_DMA_EXTRA_STAT(q_tx_pkt_n[8]), - ETHER_DMA_EXTRA_STAT(q_tx_pkt_n[9]), - ETHER_DMA_EXTRA_STAT(q_rx_pkt_n[0]), - ETHER_DMA_EXTRA_STAT(q_rx_pkt_n[1]), - ETHER_DMA_EXTRA_STAT(q_rx_pkt_n[2]), - ETHER_DMA_EXTRA_STAT(q_rx_pkt_n[3]), - ETHER_DMA_EXTRA_STAT(q_rx_pkt_n[4]), - ETHER_DMA_EXTRA_STAT(q_rx_pkt_n[5]), - ETHER_DMA_EXTRA_STAT(q_rx_pkt_n[6]), - ETHER_DMA_EXTRA_STAT(q_rx_pkt_n[7]), - ETHER_DMA_EXTRA_STAT(q_rx_pkt_n[8]), - ETHER_DMA_EXTRA_STAT(q_rx_pkt_n[9]), + ETHER_DMA_EXTRA_STAT(chan_tx_pkt_n[0]), + ETHER_DMA_EXTRA_STAT(chan_tx_pkt_n[1]), + ETHER_DMA_EXTRA_STAT(chan_tx_pkt_n[2]), + ETHER_DMA_EXTRA_STAT(chan_tx_pkt_n[3]), + ETHER_DMA_EXTRA_STAT(chan_tx_pkt_n[4]), + ETHER_DMA_EXTRA_STAT(chan_tx_pkt_n[5]), + ETHER_DMA_EXTRA_STAT(chan_tx_pkt_n[6]), + ETHER_DMA_EXTRA_STAT(chan_tx_pkt_n[7]), + ETHER_DMA_EXTRA_STAT(chan_tx_pkt_n[8]), + ETHER_DMA_EXTRA_STAT(chan_tx_pkt_n[9]), + ETHER_DMA_EXTRA_STAT(chan_tx_pkt_n[10]), + ETHER_DMA_EXTRA_STAT(chan_tx_pkt_n[11]), + ETHER_DMA_EXTRA_STAT(chan_tx_pkt_n[12]), + ETHER_DMA_EXTRA_STAT(chan_tx_pkt_n[13]), + ETHER_DMA_EXTRA_STAT(chan_tx_pkt_n[14]), + ETHER_DMA_EXTRA_STAT(chan_tx_pkt_n[15]), + ETHER_DMA_EXTRA_STAT(chan_tx_pkt_n[16]), + ETHER_DMA_EXTRA_STAT(chan_tx_pkt_n[17]), + ETHER_DMA_EXTRA_STAT(chan_tx_pkt_n[18]), + ETHER_DMA_EXTRA_STAT(chan_tx_pkt_n[19]), + + ETHER_DMA_EXTRA_STAT(chan_rx_pkt_n[0]), + ETHER_DMA_EXTRA_STAT(chan_rx_pkt_n[1]), + ETHER_DMA_EXTRA_STAT(chan_rx_pkt_n[2]), + ETHER_DMA_EXTRA_STAT(chan_rx_pkt_n[3]), + ETHER_DMA_EXTRA_STAT(chan_rx_pkt_n[4]), + ETHER_DMA_EXTRA_STAT(chan_rx_pkt_n[5]), + ETHER_DMA_EXTRA_STAT(chan_rx_pkt_n[6]), + ETHER_DMA_EXTRA_STAT(chan_rx_pkt_n[7]), + ETHER_DMA_EXTRA_STAT(chan_rx_pkt_n[8]), + ETHER_DMA_EXTRA_STAT(chan_rx_pkt_n[9]), + ETHER_DMA_EXTRA_STAT(chan_rx_pkt_n[10]), + ETHER_DMA_EXTRA_STAT(chan_rx_pkt_n[11]), + ETHER_DMA_EXTRA_STAT(chan_rx_pkt_n[12]), + ETHER_DMA_EXTRA_STAT(chan_rx_pkt_n[13]), + ETHER_DMA_EXTRA_STAT(chan_rx_pkt_n[14]), + ETHER_DMA_EXTRA_STAT(chan_rx_pkt_n[15]), + ETHER_DMA_EXTRA_STAT(chan_rx_pkt_n[16]), + ETHER_DMA_EXTRA_STAT(chan_rx_pkt_n[17]), + ETHER_DMA_EXTRA_STAT(chan_rx_pkt_n[18]), + ETHER_DMA_EXTRA_STAT(chan_rx_pkt_n[19]), }; /** @@ -1003,7 +1034,7 @@ static int ether_set_coalesce(struct net_device *dev, ETHER_MAX_RX_COALESCE_USEC); return -EINVAL; - } else if (osi_dma->mac == OSI_MAC_HW_MGBE && + } else if (osi_dma->mac != OSI_MAC_HW_EQOS && (ec->rx_coalesce_usecs > ETHER_MAX_RX_COALESCE_USEC || ec->rx_coalesce_usecs < ETHER_MGBE_MIN_RX_COALESCE_USEC)) { netdev_err(dev, diff --git a/drivers/net/ethernet/nvidia/nvethernet/ioctl.c b/drivers/net/ethernet/nvidia/nvethernet/ioctl.c index 7f8146ca..4ba3fab5 100644 --- a/drivers/net/ethernet/nvidia/nvethernet/ioctl.c +++ b/drivers/net/ethernet/nvidia/nvethernet/ioctl.c @@ -562,9 +562,11 @@ static int ether_config_l2_filters(struct net_device *dev, return ret; } - if (osi_core->use_virtualization == OSI_DISABLE) { - dev_err(pdata->dev, "%s Ethernet virualization is not enabled\n", __func__); - return ret; + if (osi_core->pre_sil != OSI_ENABLE) { + if (osi_core->use_virtualization == OSI_DISABLE) { + dev_err(pdata->dev, "%s Ethernet virualization is not enabled\n", __func__); + return ret; + } } if (copy_from_user(&u_l2_filter, (void __user *)ifdata->ptr, sizeof(struct ether_l2_filter)) != 0U) { @@ -589,8 +591,13 @@ static int ether_config_l2_filters(struct net_device *dev, u_l2_filter.mac_addr, ETH_ALEN); ioctl_data.l2_filter.dma_routing = OSI_ENABLE; ioctl_data.l2_filter.addr_mask = OSI_DISABLE; - ioctl_data.l2_filter.dma_chan = osi_dma->dma_chans[0]; - ioctl_data.l2_filter.dma_chansel = OSI_BIT(osi_dma->dma_chans[0]); + ioctl_data.l2_filter.pkt_dup = u_l2_filter.pkt_dup; + if (ioctl_data.l2_filter.pkt_dup) { + ioctl_data.l2_filter.dma_chan = u_l2_filter.dma_chan; + } else { + ioctl_data.l2_filter.dma_chan = osi_dma->dma_chans[0]; + } + ioctl_data.l2_filter.dma_chansel = OSI_BIT_64(ioctl_data.l2_filter.dma_chan); ioctl_data.cmd = OSI_CMD_L2_FILTER; return osi_handle_ioctl(osi_core, &ioctl_data); } diff --git a/drivers/net/ethernet/nvidia/nvethernet/macsec.c b/drivers/net/ethernet/nvidia/nvethernet/macsec.c index 3140716a..16d30d53 100644 --- a/drivers/net/ethernet/nvidia/nvethernet/macsec.c +++ b/drivers/net/ethernet/nvidia/nvethernet/macsec.c @@ -100,9 +100,9 @@ static int macsec_disable_car(struct macsec_priv_data *macsec_pdata) struct ether_priv_data *pdata = macsec_pdata->ether_pdata; PRINT_ENTRY(); - if (pdata->osi_core->mac == OSI_MAC_HW_MGBE) { - if (!IS_ERR_OR_NULL(macsec_pdata->mgbe_clk)) { - clk_disable_unprepare(macsec_pdata->mgbe_clk); + if (pdata->osi_core->mac != OSI_MAC_HW_EQOS) { + if (!IS_ERR_OR_NULL(macsec_pdata->macsec_clk)) { + clk_disable_unprepare(macsec_pdata->macsec_clk); } } else { if (!IS_ERR_OR_NULL(macsec_pdata->eqos_tx_clk)) { @@ -129,9 +129,9 @@ static int macsec_enable_car(struct macsec_priv_data *macsec_pdata) int ret = 0; PRINT_ENTRY(); - if (pdata->osi_core->mac == OSI_MAC_HW_MGBE) { - if (!IS_ERR_OR_NULL(macsec_pdata->mgbe_clk)) { - ret = clk_prepare_enable(macsec_pdata->mgbe_clk); + if (pdata->osi_core->mac != OSI_MAC_HW_EQOS) { + if (!IS_ERR_OR_NULL(macsec_pdata->macsec_clk)) { + ret = clk_prepare_enable(macsec_pdata->macsec_clk); if (ret < 0) { dev_err(dev, "failed to enable macsec clk\n"); goto exit; @@ -166,9 +166,9 @@ static int macsec_enable_car(struct macsec_priv_data *macsec_pdata) goto exit; err_ns_rst: - if (pdata->osi_core->mac == OSI_MAC_HW_MGBE) { - if (!IS_ERR_OR_NULL(macsec_pdata->mgbe_clk)) { - clk_disable_unprepare(macsec_pdata->mgbe_clk); + if (pdata->osi_core->mac != OSI_MAC_HW_EQOS) { + if (!IS_ERR_OR_NULL(macsec_pdata->macsec_clk)) { + clk_disable_unprepare(macsec_pdata->macsec_clk); } } else { if (!IS_ERR_OR_NULL(macsec_pdata->eqos_rx_clk)) { @@ -223,7 +223,7 @@ int macsec_open(struct macsec_priv_data *macsec_pdata, IRQF_TRIGGER_NONE, macsec_pdata->irq_name[0], macsec_pdata); if (ret < 0) { - dev_err(dev, "failed to request irq %d\n", __LINE__); + dev_err(dev, "failed to request irq %d\n", ret); goto exit; } @@ -246,7 +246,7 @@ int macsec_open(struct macsec_priv_data *macsec_pdata, macsec_pdata); #endif if (ret < 0) { - dev_err(dev, "failed to request irq %d\n", __LINE__); + dev_err(dev, "failed to request irq %d\n", ret); goto err_ns_irq; } @@ -297,7 +297,26 @@ static int macsec_get_platform_res(struct macsec_priv_data *macsec_pdata) int ret = 0; PRINT_ENTRY(); - /* 1. Get resets */ + /* Get irqs */ + macsec_pdata->ns_irq = platform_get_irq_byname(pdev, "macsec-ns-irq"); + if (macsec_pdata->ns_irq < 0) { + dev_err(dev, "failed to get macsec-ns-irq\n"); + ret = macsec_pdata->ns_irq; + goto exit; + } + + macsec_pdata->s_irq = platform_get_irq_byname(pdev, "macsec-s-irq"); + if (macsec_pdata->s_irq < 0) { + dev_err(dev, "failed to get macsec-s-irq\n"); + ret = macsec_pdata->s_irq; + goto exit; + } + + if (pdata->osi_core->pre_sil == 0x1U) { + dev_warn(dev, "%s: Pre-silicon simulation, skipping reset/clk config\n", __func__); + goto exit; + } + /* Get resets */ macsec_pdata->ns_rst = devm_reset_control_get(dev, "macsec_ns_rst"); if (IS_ERR_OR_NULL(macsec_pdata->ns_rst)) { dev_err(dev, "Failed to get macsec_ns_rst\n"); @@ -305,12 +324,16 @@ static int macsec_get_platform_res(struct macsec_priv_data *macsec_pdata) goto exit; } - /* 2. Get clks */ - if (pdata->osi_core->mac == OSI_MAC_HW_MGBE) { - macsec_pdata->mgbe_clk = devm_clk_get(dev, "mgbe_macsec"); - if (IS_ERR(macsec_pdata->mgbe_clk)) { + /* Get clks */ + if (pdata->osi_core->mac != OSI_MAC_HW_EQOS) { + if (pdata->osi_core->mac_ver == OSI_MGBE_MAC_3_10) { + macsec_pdata->macsec_clk = devm_clk_get(dev, "mgbe_macsec"); + } else { + macsec_pdata->macsec_clk = devm_clk_get(dev, "macsec"); + } + if (IS_ERR(macsec_pdata->macsec_clk)) { dev_err(dev, "failed to get macsec clk\n"); - ret = PTR_ERR(macsec_pdata->mgbe_clk); + ret = PTR_ERR(macsec_pdata->macsec_clk); goto exit; } } else { @@ -329,21 +352,6 @@ static int macsec_get_platform_res(struct macsec_priv_data *macsec_pdata) } } - /* 3. Get irqs */ - macsec_pdata->ns_irq = platform_get_irq_byname(pdev, "macsec-ns-irq"); - if (macsec_pdata->ns_irq < 0) { - dev_err(dev, "failed to get macsec-ns-irq\n"); - ret = macsec_pdata->ns_irq; - goto exit; - } - - macsec_pdata->s_irq = platform_get_irq_byname(pdev, "macsec-s-irq"); - if (macsec_pdata->s_irq < 0) { - dev_err(dev, "failed to get macsec-s-irq\n"); - ret = macsec_pdata->s_irq; - goto exit; - } - exit: PRINT_EXIT(); return ret; @@ -355,9 +363,9 @@ static void macsec_release_platform_res(struct macsec_priv_data *macsec_pdata) struct device *dev = pdata->dev; PRINT_ENTRY(); - if (pdata->osi_core->mac == OSI_MAC_HW_MGBE) { - if (!IS_ERR_OR_NULL(macsec_pdata->mgbe_clk)) { - devm_clk_put(dev, macsec_pdata->mgbe_clk); + if (pdata->osi_core->mac != OSI_MAC_HW_EQOS) { + if (!IS_ERR_OR_NULL(macsec_pdata->macsec_clk)) { + devm_clk_put(dev, macsec_pdata->macsec_clk); } } else { if (!IS_ERR_OR_NULL(macsec_pdata->eqos_tx_clk)) { @@ -500,6 +508,12 @@ static int parse_sa_config(struct nlattr **attrs, struct nlattr **tb_sa, if (tb_sa[NV_MACSEC_SA_ATTR_LOWEST_PN]) { sc_info->lowest_pn = nla_get_u32(tb_sa[NV_MACSEC_SA_ATTR_LOWEST_PN]); } + if (tb_sa[NV_MACSEC_SA_ATTR_CONF_OFFSET]) { + sc_info->conf_offset = nla_get_u8(tb_sa[NV_MACSEC_SA_ATTR_CONF_OFFSET]); + } + if (tb_sa[NV_MACSEC_SA_ATTR_ENCRYPT]) { + sc_info->encrypt = nla_get_u8(tb_sa[NV_MACSEC_SA_ATTR_ENCRYPT]); + } #ifdef NVPKCS_MACSEC if (pkcs) { if (tb_sa[NV_MACSEC_SA_PKCS_KEY_WRAP]) { @@ -1374,6 +1388,15 @@ int macsec_probe(struct ether_priv_data *pdata) mutex_init(&pdata->macsec_pdata->lock); + /* Read MAC instance id and used in TZ api's */ + ret = of_property_read_u32(np, "nvidia,instance_id", &macsec_pdata->id); + if (ret != 0) { + dev_info(dev, + "DT instance_id missing, setting default to MGBE0\n"); + macsec_pdata->id = 0; + } + + osi_core->instance_id = macsec_pdata->id; /* Get OSI MACsec ops */ if (osi_init_macsec_ops(osi_core) != 0) { dev_err(dev, "osi_init_macsec_ops failed\n"); @@ -1406,6 +1429,7 @@ int macsec_probe(struct ether_priv_data *pdata) macsec_pdata->nv_macsec_fam.module = THIS_MODULE; macsec_pdata->nv_macsec_fam.ops = nv_macsec_genl_ops; macsec_pdata->nv_macsec_fam.n_ops = ARRAY_SIZE(nv_macsec_genl_ops); + macsec_pdata->nv_macsec_fam.policy = nv_macsec_genl_policy; if (macsec_pdata->is_nv_macsec_fam_registered == OSI_DISABLE) { if (strlen(netdev_name(pdata->ndev)) >= GENL_NAMSIZ) { dev_err(dev, "Intf name %s of len %lu exceed nl_family name size\n", diff --git a/drivers/net/ethernet/nvidia/nvethernet/macsec.h b/drivers/net/ethernet/nvidia/nvethernet/macsec.h index 6cdb828a..83a9bd27 100644 --- a/drivers/net/ethernet/nvidia/nvethernet/macsec.h +++ b/drivers/net/ethernet/nvidia/nvethernet/macsec.h @@ -7,14 +7,19 @@ #include #include #include -#include - +#include /** * @brief Expected number of inputs in BYP or SCI LUT sysfs config */ #define LUT_INPUTS_LEN 39 +/** + * @brief Maximum entries per 1 sysfs node + */ +#define MAX_ENTRIES_PER_SYSFS_NODE 24 +#define MAX_SA_ENTRIES_PER_SYSFS_NODE 33U + /** * @brief Expected number of extra inputs in BYP LUT sysfs config */ @@ -61,6 +66,8 @@ enum nv_macsec_sa_attrs { NV_MACSEC_SA_ATTR_AN, NV_MACSEC_SA_ATTR_PN, NV_MACSEC_SA_ATTR_LOWEST_PN, + NV_MACSEC_SA_ATTR_CONF_OFFSET, + NV_MACSEC_SA_ATTR_ENCRYPT, #ifdef NVPKCS_MACSEC NV_MACSEC_SA_PKCS_KEY_WRAP, NV_MACSEC_SA_PKCS_KEK_HANDLE, @@ -119,6 +126,8 @@ static const struct nla_policy nv_macsec_sa_genl_policy[NUM_NV_MACSEC_SA_ATTR] = [NV_MACSEC_SA_ATTR_AN] = { .type = NLA_U8 }, [NV_MACSEC_SA_ATTR_PN] = { .type = NLA_U32 }, [NV_MACSEC_SA_ATTR_LOWEST_PN] = { .type = NLA_U32 }, + [NV_MACSEC_SA_ATTR_CONF_OFFSET] = { .type = NLA_U8 }, + [NV_MACSEC_SA_ATTR_ENCRYPT] = { .type = NLA_U8 }, #ifdef NVPKCS_MACSEC [NV_MACSEC_SA_PKCS_KEY_WRAP] = { .type = NLA_BINARY, .len = NV_SAK_WRAPPED_LEN,}, @@ -135,9 +144,9 @@ static const struct nla_policy nv_macsec_tz_genl_policy[NUM_NV_MACSEC_TZ_ATTR] = [NV_MACSEC_TZ_ATTR_RW] = { .type = NLA_U8 }, [NV_MACSEC_TZ_ATTR_INDEX] = { .type = NLA_U8 }, #ifdef NVPKCS_MACSEC - [NV_MACSEC_SA_PKCS_KEY_WRAP] = { .type = NLA_BINARY, + [NV_MACSEC_TZ_PKCS_KEY_WRAP] = { .type = NLA_BINARY, .len = NV_SAK_WRAPPED_LEN,}, - [NV_MACSEC_SA_PKCS_KEK_HANDLE] = { .type = NLA_U64 }, + [NV_MACSEC_TZ_PKCS_KEK_HANDLE] = { .type = NLA_U64 }, #else [NV_MACSEC_TZ_ATTR_KEY] = { .type = NLA_BINARY, .len = OSI_KEY_LEN_256 }, @@ -154,6 +163,7 @@ static const struct nla_policy nv_macsec_genl_policy[NUM_NV_MACSEC_ATTR] = { [NV_MACSEC_ATTR_TXSC_PORT] = { .type = NLA_U16 }, [NV_MACSEC_ATTR_REPLAY_PROT_EN] = { .type = NLA_U32 }, [NV_MACSEC_ATTR_REPLAY_WINDOW] = { .type = NLA_U32 }, + [NV_MACSEC_ATTR_CIPHER_SUITE] = { .type = NLA_U32 }, [NV_MACSEC_ATTR_SA_CONFIG] = { .type = NLA_NESTED }, [NV_MACSEC_ATTR_TZ_CONFIG] = { .type = NLA_NESTED }, [NV_MACSEC_ATTR_TZ_KT_RESET] = { .type = NLA_NESTED }, @@ -209,11 +219,11 @@ struct nvpkcs_data { struct macsec_priv_data { /** Non secure reset */ struct reset_control *ns_rst; - /** MGBE Macsec clock */ - struct clk *mgbe_clk; - /** EQOS Macsec TX clock */ + /** MGBE/EQOS Macsec clock */ + struct clk *macsec_clk; + /** T23x EQOS Macsec TX clock */ struct clk *eqos_tx_clk; - /** EQOS Macsec RX clock */ + /** T23x EQOS Macsec RX clock */ struct clk *eqos_rx_clk; /** Secure irq */ int s_irq; @@ -243,6 +253,8 @@ struct macsec_priv_data { unsigned short next_supp_idx; /** macsec mutex lock */ struct mutex lock; + /** macsec hw instance id */ + unsigned int id; /** Macsec enable flag in DT */ unsigned int is_macsec_enabled_in_dt; /** Context family name */ diff --git a/drivers/net/ethernet/nvidia/nvethernet/osd.c b/drivers/net/ethernet/nvidia/nvethernet/osd.c index 8fd97a7f..8a3bf420 100644 --- a/drivers/net/ethernet/nvidia/nvethernet/osd.c +++ b/drivers/net/ethernet/nvidia/nvethernet/osd.c @@ -27,7 +27,7 @@ static inline unsigned int ether_get_free_tx_ts_node(struct ether_priv_data *pda } static inline void add_skb_node(struct ether_priv_data *pdata, struct sk_buff *skb, - unsigned int pktid) { + unsigned int pktid, unsigned int vdmaid) { struct list_head *head_node, *temp_head_node; struct ether_tx_ts_skb_list *pnode = NULL; unsigned int idx; @@ -72,6 +72,7 @@ empty: pnode = &pdata->tx_ts_skb[idx]; pnode->skb = skb; pnode->pktid = pktid; + pnode->vdmaid = vdmaid; pnode->pkt_jiffies = now_jiffies; dev_dbg(pdata->dev, "%s() SKB %p added for pktid = %x time=%lu\n", @@ -790,7 +791,7 @@ static void osd_transmit_complete(void *priv, const struct osi_tx_swcx *swcx, ndev->stats.tx_packets++; if ((txdone_pkt_cx->flags & OSI_TXDONE_CX_TS_DELAYED) == OSI_TXDONE_CX_TS_DELAYED) { - add_skb_node(pdata, skb, txdone_pkt_cx->pktid); + add_skb_node(pdata, skb, txdone_pkt_cx->pktid, txdone_pkt_cx->vdmaid); /* Consume the timestamp immediately if already available */ if (ether_get_tx_ts(pdata) < 0) schedule_delayed_work(&pdata->tx_ts_work, diff --git a/drivers/net/ethernet/nvidia/nvethernet/sysfs.c b/drivers/net/ethernet/nvidia/nvethernet/sysfs.c index 46eaa440..401f9ec1 100644 --- a/drivers/net/ethernet/nvidia/nvethernet/sysfs.c +++ b/drivers/net/ethernet/nvidia/nvethernet/sysfs.c @@ -76,6 +76,84 @@ static ssize_t ether_desc_dump_store(struct device *dev, static DEVICE_ATTR(desc_dump_enable, (S_IRUGO | S_IWUSR), ether_desc_dump_show, ether_desc_dump_store); + +/** + * @brief Shows current configured tx queue + * + * @param[in] dev: Device data. + * @param[in] attr: Device attribute + * @param[in] buf: Buffer to print the current Tx Q configuration + */ +static ssize_t ether_mac_tx_q_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct net_device *ndev = (struct net_device *)dev_get_drvdata(dev); + struct ether_priv_data *pdata = netdev_priv(ndev); + struct osi_core_priv_data *osi_core = pdata->osi_core; + char *start = buf; + + if (osi_core->pre_sil != OSI_ENABLE) { + dev_err(pdata->dev, "Not Allowed. Not pre-sil platform\n"); + return 0; + } + + if (!netif_running(ndev)) { + dev_err(pdata->dev, "Not Allowed. Ether interface is not up\n"); + return 0; + } + + buf += scnprintf(buf, PAGE_SIZE, "Current Tx queue: %d\n", + pdata->tx_queue_select); + return (buf - start); +} + +/** + * @brief Choose dma channel for Tx traffic or Tx queue select when non-zero + * + * @param[in] dev: Device data. + * @param[in] attr: Device attribute + * @param[in] buf: Buffer which contains dma channel number or Tx Q + * @param[in] size: size of buffer + * + * @return size of buffer. + */ +static ssize_t ether_mac_tx_q_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + struct net_device *ndev = (struct net_device *)dev_get_drvdata(dev); + struct ether_priv_data *pdata = netdev_priv(ndev); + struct osi_core_priv_data *osi_core = pdata->osi_core; + int ret, bufp = 0, dma_chanel = 0; + + if (osi_core->pre_sil != OSI_ENABLE) { + dev_err(pdata->dev, "Not Allowed. Not pre-sil platform\n"); + return 0; + } + + if (!netif_running(ndev)) { + dev_err(pdata->dev, "Not Allowed. Ether interface is not up\n"); + return size; + } + + ret = sscanf(buf + bufp, "%d", &dma_chanel); + if (ret != 1 || dma_chanel >= OSI_MGBE_MAX_NUM_CHANS) { + dev_err(pdata->dev, "Failed to parse args or invalid dma chan"); + goto exit; + } + pdata->tx_queue_select = dma_chanel; + +exit: + return size; +} + +/** + * @brief Sysfs attribute for MAC Tx Q + * + */ +static DEVICE_ATTR(mac_tx_q, (S_IRUGO | S_IWUSR), + ether_mac_tx_q_show, + ether_mac_tx_q_store); #endif /* OSI_DEBUG */ /** @@ -736,8 +814,11 @@ static void dump_byp_lut(char **buf_p, unsigned short ctlr_sel, struct osi_macsec_lut_config lut_config = {0}; char *buf = *buf_p; int i; + const unsigned int byp_lut_max_index[MAX_MACSEC_IP_TYPES] = { + OSI_BYP_LUT_MAX_INDEX, + OSI_BYP_LUT_MAX_INDEX_T26X}; - for (i = 0; i <= OSI_BYP_LUT_MAX_INDEX; i++) { + for (i = 0; i <= byp_lut_max_index[osi_core->macsec]; i++) { memset(&lut_config, OSI_NONE, sizeof(lut_config)); lut_config.table_config.ctlr_sel = ctlr_sel; lut_config.lut_sel = OSI_LUT_SEL_BYPASS; @@ -820,6 +901,10 @@ static ssize_t macsec_byp_lut_store(struct device *dev, struct osi_core_priv_data *osi_core = pdata->osi_core; struct osi_macsec_lut_config lut_config; int ret, bufp, ctrl_port; + unsigned int macsec = osi_core->macsec; + const unsigned int byp_lut_max_index[MAX_MACSEC_IP_TYPES] = { + OSI_BYP_LUT_MAX_INDEX, + OSI_BYP_LUT_MAX_INDEX_T26X}; if (!netif_running(ndev)) { dev_err(pdata->dev, "Not Allowed. Ether interface is not up\n"); @@ -844,9 +929,9 @@ static ssize_t macsec_byp_lut_store(struct device *dev, lut_config.lut_sel = OSI_LUT_SEL_BYPASS; lut_config.table_config.rw = OSI_LUT_WRITE; /* Rest of LUT attributes are filled by parse_inputs() */ - if (lut_config.table_config.index > OSI_BYP_LUT_MAX_INDEX) { + if (lut_config.table_config.index > byp_lut_max_index[macsec]) { dev_err(dev, "%s: Index can't be > %d\n", __func__, - OSI_BYP_LUT_MAX_INDEX); + byp_lut_max_index[macsec]); goto exit; } @@ -871,13 +956,13 @@ static DEVICE_ATTR(macsec_byp_lut, (S_IRUGO | S_IWUSR), macsec_byp_lut_store); /** - * @brief Shows the current macsec statitics counters + * @brief Shows the current Rx macsec statitics counters * * @param[in] dev: Device data. * @param[in] attr: Device attribute * @param[in] buf: Buffer to print the current counters */ -static ssize_t macsec_mmc_counters_show(struct device *dev, +static ssize_t macsec_mmc_counters_show_rx(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *ndev = (struct net_device *)dev_get_drvdata(dev); @@ -886,6 +971,9 @@ static ssize_t macsec_mmc_counters_show(struct device *dev, struct osi_macsec_mmc_counters *mmc = &osi_core->macsec_mmc; unsigned short i; char *start = buf; + unsigned int macsec = osi_core->macsec; + const unsigned int sc_idx_max[MAX_MACSEC_IP_TYPES] = { + OSI_SC_INDEX_MAX, OSI_SC_INDEX_MAX_T26X}; if (!netif_running(ndev)) { dev_err(pdata->dev, "Not Allowed. Ether interface is not up\n"); @@ -893,17 +981,6 @@ static ssize_t macsec_mmc_counters_show(struct device *dev, } osi_macsec_read_mmc(osi_core); - buf += scnprintf(buf, PAGE_SIZE, "tx_pkts_untaged:\t%llu\n", - mmc->tx_pkts_untaged); - buf += scnprintf(buf, PAGE_SIZE, "tx_pkts_too_long:\t%llu\n", - mmc->tx_pkts_too_long); - buf += scnprintf(buf, PAGE_SIZE, "tx_octets_protected:\t%llu\n", - mmc->tx_octets_protected); - for (i = 0; i < OSI_MACSEC_SC_INDEX_MAX; i++) { - buf += scnprintf(buf, PAGE_SIZE, "tx_pkts_protected sc%d:\t%llu\n", - i, mmc->tx_pkts_protected[i]); - } - buf += scnprintf(buf, PAGE_SIZE, "rx_pkts_no_tag: \t%llu\n", mmc->rx_pkts_no_tag); buf += scnprintf(buf, PAGE_SIZE, "rx_pkts_untagged:\t%llu\n", @@ -918,16 +995,18 @@ static ssize_t macsec_mmc_counters_show(struct device *dev, mmc->rx_pkts_overrun); buf += scnprintf(buf, PAGE_SIZE, "rx_octets_validated:\t%llu\n", mmc->rx_octets_validated); + buf += scnprintf(buf, PAGE_SIZE, "rx_octets_decrypted:\t%llu\n", + mmc->rx_octets_decrypted); - for (i = 0; i < OSI_MACSEC_SC_INDEX_MAX; i++) { + for (i = 0; i <= sc_idx_max[macsec]; i++) { buf += scnprintf(buf, PAGE_SIZE, "rx_pkts_invalid sc%d:\t%llu\n", i, mmc->in_pkts_invalid[i]); } - for (i = 0; i < OSI_MACSEC_SC_INDEX_MAX; i++) { + for (i = 0; i <= sc_idx_max[macsec]; i++) { buf += scnprintf(buf, PAGE_SIZE, "rx_pkts_delayed sc%d:\t%llu\n", i, mmc->rx_pkts_delayed[i]); } - for (i = 0; i < OSI_MACSEC_SC_INDEX_MAX; i++) { + for (i = 0; i <= sc_idx_max[macsec]; i++) { buf += scnprintf(buf, PAGE_SIZE, "rx_pkts_ok sc%d: \t%llu\n", i, mmc->rx_pkts_ok[i]); } @@ -936,11 +1015,64 @@ static ssize_t macsec_mmc_counters_show(struct device *dev, } /** - * @brief Sysfs attribute for MACsec irq stats + * @brief Sysfs attribute for MACsec rx mmc counters * */ -static DEVICE_ATTR(macsec_mmc_counters, (S_IRUGO | S_IWUSR), - macsec_mmc_counters_show, +static DEVICE_ATTR(macsec_mmc_counters_rx, (S_IRUGO | S_IWUSR), + macsec_mmc_counters_show_rx, + NULL); + + +/** + * @brief Shows the current Tx macsec statitics counters + * + * @param[in] dev: Device data. + * @param[in] attr: Device attribute + * @param[in] buf: Buffer to print the current counters + */ +static ssize_t macsec_mmc_counters_show_tx(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct net_device *ndev = (struct net_device *)dev_get_drvdata(dev); + struct ether_priv_data *pdata = netdev_priv(ndev); + struct osi_core_priv_data *osi_core = pdata->osi_core; + struct osi_macsec_mmc_counters *mmc = &osi_core->macsec_mmc; + unsigned short i; + char *start = buf; + unsigned int macsec = osi_core->macsec; + const unsigned int sc_idx_max[MAX_MACSEC_IP_TYPES] = { + OSI_SC_INDEX_MAX, OSI_SC_INDEX_MAX_T26X}; + + if (!netif_running(ndev)) { + dev_err(pdata->dev, "Not Allowed. Ether interface is not up\n"); + return 0; + } + + osi_macsec_read_mmc(osi_core); + buf += scnprintf(buf, PAGE_SIZE, "tx_pkts_untaged:\t%llu\n", + mmc->tx_pkts_untaged); + buf += scnprintf(buf, PAGE_SIZE, "tx_pkts_too_long:\t%llu\n", + mmc->tx_pkts_too_long); + buf += scnprintf(buf, PAGE_SIZE, "tx_octets_protected:\t%llu\n", + mmc->tx_octets_protected); + buf += scnprintf(buf, PAGE_SIZE, "tx_octets_encrypted:\t%llu\n", + mmc->tx_octets_encrypted); + for (i = 0; i <= sc_idx_max[macsec]; i++) { + buf += scnprintf(buf, PAGE_SIZE, "tx_pkts_protected sc%d:\t%llu\n", + i, mmc->tx_pkts_protected[i]); + buf += scnprintf(buf, PAGE_SIZE, "tx_pkts_encrypted sc%d:\t%llu\n", + i, mmc->tx_pkts_encrypted[i]); + } + + return (buf - start); +} + +/** + * @brief Sysfs attribute for MACsec tx mmc counters + * + */ +static DEVICE_ATTR(macsec_mmc_counters_tx, (S_IRUGO | S_IWUSR), + macsec_mmc_counters_show_tx, NULL); #ifdef DEBUG_MACSEC @@ -1007,6 +1139,11 @@ static ssize_t macsec_dbg_buffer_show(struct device *dev, dev_err(pdata->dev, "Not Allowed. Ether interface is not up\n"); return 0; } + if (osi_core->macsec == OSI_MACSEC_T26X) { + dev_err(pdata->dev, "T264 Doesn't support MACSEC debug buffer feature\n"); + return 0; + } + buf += scnprintf(buf, PAGE_SIZE, "Tx Dbg Buffers:\n"); dump_dbg_buffers(&buf, OSI_CTLR_SEL_TX, osi_core); @@ -1052,6 +1189,10 @@ static ssize_t macsec_dbg_events_store(struct device *dev, dev_err(pdata->dev, "Not Allowed. Ether interface is not up\n"); return size; } + if (osi_core->macsec == OSI_MACSEC_T26X) { + dev_err(pdata->dev, "T264 Doesn't support MACSEC debug buffer feature\n"); + return size; + } ret = sscanf(buf, "%hu %1x%1x%1x%1x%1x%1x%1x%1x%1x%1x%1x%1x", &controller, &events[11], &events[10], &events[9], &events[8], @@ -1097,65 +1238,34 @@ static DEVICE_ATTR(macsec_dbg_events, (S_IRUGO | S_IWUSR), #endif /* DEBUG_MACSEC */ /** - * @brief Shows the current SCI LUT configuration + * @brief Shows the current SCI LUT configuration for RX * * @param[in] dev: Device data. * @param[in] attr: Device attribute * @param[in] buf: Buffer to print the current SCI LUT configuration */ -static ssize_t macsec_sci_lut_show(struct device *dev, +static ssize_t macsec_sci_lut_show_rx(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *ndev = (struct net_device *)dev_get_drvdata(dev); struct ether_priv_data *pdata = netdev_priv(ndev); struct osi_core_priv_data *osi_core = pdata->osi_core; struct osi_macsec_lut_config lut_config = {0}; - unsigned int an_valid; int i; char *start = buf; + unsigned int macsec = osi_core->macsec; + const unsigned int sc_lut_max_index[MAX_MACSEC_IP_TYPES] = { + OSI_SC_INDEX_MAX, + OSI_SC_INDEX_MAX_T26X}; if (!netif_running(ndev)) { dev_err(pdata->dev, "Not Allowed. Ether interface is not up\n"); return 0; } - buf += scnprintf(buf, PAGE_SIZE, "Tx:\n"); - - for (i = 0; i <= OSI_SC_LUT_MAX_INDEX; i++) { - memset(&lut_config, OSI_NONE, sizeof(lut_config)); - lut_config.table_config.ctlr_sel = OSI_CTLR_SEL_TX; - lut_config.lut_sel = OSI_LUT_SEL_SCI; - lut_config.table_config.rw = OSI_LUT_READ; - lut_config.table_config.index = i; - if (osi_macsec_config_lut(osi_core, &lut_config) < 0) { - dev_err(dev, "%s: Failed to read SCI LUT\n", __func__); - goto exit; - } else { - buf += scnprintf(buf, PAGE_SIZE, "%d.\t", i); - if ((lut_config.flags & OSI_LUT_FLAGS_ENTRY_VALID) != - OSI_LUT_FLAGS_ENTRY_VALID) { - buf += scnprintf(buf, PAGE_SIZE, "Invalid\n"); - memset(&lut_config, 0, sizeof(lut_config)); - continue; - } - format_output(&buf, &lut_config); - /* Tx SCI LUT output field */ - an_valid = lut_config.sci_lut_out.an_valid; - buf += scnprintf(buf, PAGE_SIZE, "AN3: %d AN2: %d " - "AN1: %d AN0: %d ", - an_valid & OSI_AN3_VALID ? 1 : 0, - an_valid & OSI_AN2_VALID ? 1 : 0, - an_valid & OSI_AN1_VALID ? 1 : 0, - an_valid & OSI_AN0_VALID ? 1 : 0); - buf += scnprintf(buf, PAGE_SIZE, "sc_index: %d\n", - lut_config.sci_lut_out.sc_index); - memset(&lut_config, 0, sizeof(lut_config)); - } - } - buf += scnprintf(buf, PAGE_SIZE, "Rx:\n"); - for (i = 0; i <= OSI_SC_LUT_MAX_INDEX; i++) { + for (i = 0; i <= sc_lut_max_index[macsec]; i++) { memset(&lut_config, OSI_NONE, sizeof(lut_config)); lut_config.table_config.ctlr_sel = OSI_CTLR_SEL_RX; lut_config.lut_sel = OSI_LUT_SEL_SCI; @@ -1193,6 +1303,162 @@ exit: return (buf - start); } +/** + * @brief Sysfs attribute for MACsec SCI LUT config + * + */ +static DEVICE_ATTR(macsec_sci_lut_rx, (S_IRUGO | S_IWUSR), + macsec_sci_lut_show_rx, + NULL); + +/** + * @brief Shows the current SCI LUT configuration for TX from Index 0 to 23 + * + * @param[in] dev: Device data. + * @param[in] attr: Device attribute + * @param[in] buf: Buffer to print the current SCI LUT configuration + */ +static ssize_t macsec_sci_lut_show_tx(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct net_device *ndev = (struct net_device *)dev_get_drvdata(dev); + struct ether_priv_data *pdata = netdev_priv(ndev); + struct osi_core_priv_data *osi_core = pdata->osi_core; + struct osi_macsec_lut_config lut_config = {0}; + unsigned int an_valid; + int i; + char *start = buf; + unsigned int macsec = osi_core->macsec; + const unsigned int sc_lut_max_index[MAX_MACSEC_IP_TYPES] = { + OSI_SC_INDEX_MAX, + OSI_SC_INDEX_MAX_T26X}; + + if (!netif_running(ndev)) { + dev_err(pdata->dev, "Not Allowed. Ether interface is not up\n"); + return 0; + } + + buf += scnprintf(buf, PAGE_SIZE, "Tx:\n"); + + for (i = 0; i < (sc_lut_max_index[macsec] > MAX_ENTRIES_PER_SYSFS_NODE? + MAX_ENTRIES_PER_SYSFS_NODE: sc_lut_max_index[macsec]); + i++) { + memset(&lut_config, OSI_NONE, sizeof(lut_config)); + lut_config.table_config.ctlr_sel = OSI_CTLR_SEL_TX; + lut_config.lut_sel = OSI_LUT_SEL_SCI; + lut_config.table_config.rw = OSI_LUT_READ; + lut_config.table_config.index = i; + if (osi_macsec_config_lut(osi_core, &lut_config) < 0) { + dev_err(dev, "%s: Failed to read SCI LUT\n", __func__); + goto exit; + } else { + buf += scnprintf(buf, PAGE_SIZE, "%d.\t", i); + if ((lut_config.flags & OSI_LUT_FLAGS_ENTRY_VALID) != + OSI_LUT_FLAGS_ENTRY_VALID) { + buf += scnprintf(buf, PAGE_SIZE, "Invalid\n"); + memset(&lut_config, 0, sizeof(lut_config)); + continue; + } + format_output(&buf, &lut_config); + /* Tx SCI LUT output field */ + an_valid = lut_config.sci_lut_out.an_valid; + buf += scnprintf(buf, PAGE_SIZE, "AN3: %d AN2: %d " + "AN1: %d AN0: %d ", + an_valid & OSI_AN3_VALID ? 1 : 0, + an_valid & OSI_AN2_VALID ? 1 : 0, + an_valid & OSI_AN1_VALID ? 1 : 0, + an_valid & OSI_AN0_VALID ? 1 : 0); + buf += scnprintf(buf, PAGE_SIZE, "sc_index: %d\n", + lut_config.sci_lut_out.sc_index); + memset(&lut_config, 0, sizeof(lut_config)); + } + } + +exit: + return (buf - start); +} + +/** + * @brief Sysfs attribute for MACsec SCI LUT config + * + */ +static DEVICE_ATTR(macsec_sci_lut_tx, (S_IRUGO | S_IWUSR), + macsec_sci_lut_show_tx, + NULL); + +/** + * @brief Shows the current SCI LUT configuration for TX from Index 24 + * + * @param[in] dev: Device data. + * @param[in] attr: Device attribute + * @param[in] buf: Buffer to print the current SCI LUT configuration + */ +static ssize_t macsec_sci_lut_show_tx_2(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct net_device *ndev = (struct net_device *)dev_get_drvdata(dev); + struct ether_priv_data *pdata = netdev_priv(ndev); + struct osi_core_priv_data *osi_core = pdata->osi_core; + struct osi_macsec_lut_config lut_config = {0}; + unsigned int an_valid; + int i; + char *start = buf; + unsigned int macsec = osi_core->macsec; + const unsigned int sc_lut_max_index[MAX_MACSEC_IP_TYPES] = { + OSI_SC_INDEX_MAX, + OSI_SC_INDEX_MAX_T26X}; + + if (!netif_running(ndev)) { + dev_err(pdata->dev, "Not Allowed. Ether interface is not up\n"); + return 0; + } + + buf += scnprintf(buf, PAGE_SIZE, "Tx:\n"); + + for (i = MAX_ENTRIES_PER_SYSFS_NODE; i <= sc_lut_max_index[macsec]; i++) { + memset(&lut_config, OSI_NONE, sizeof(lut_config)); + lut_config.table_config.ctlr_sel = OSI_CTLR_SEL_TX; + lut_config.lut_sel = OSI_LUT_SEL_SCI; + lut_config.table_config.rw = OSI_LUT_READ; + lut_config.table_config.index = i; + if (osi_macsec_config_lut(osi_core, &lut_config) < 0) { + dev_err(dev, "%s: Failed to read SCI LUT\n", __func__); + goto exit; + } else { + buf += scnprintf(buf, PAGE_SIZE, "%d.\t", i); + if ((lut_config.flags & OSI_LUT_FLAGS_ENTRY_VALID) != + OSI_LUT_FLAGS_ENTRY_VALID) { + buf += scnprintf(buf, PAGE_SIZE, "Invalid\n"); + memset(&lut_config, 0, sizeof(lut_config)); + continue; + } + format_output(&buf, &lut_config); + /* Tx SCI LUT output field */ + an_valid = lut_config.sci_lut_out.an_valid; + buf += scnprintf(buf, PAGE_SIZE, "AN3: %d AN2: %d " + "AN1: %d AN0: %d ", + an_valid & OSI_AN3_VALID ? 1 : 0, + an_valid & OSI_AN2_VALID ? 1 : 0, + an_valid & OSI_AN1_VALID ? 1 : 0, + an_valid & OSI_AN0_VALID ? 1 : 0); + buf += scnprintf(buf, PAGE_SIZE, "sc_index: %d\n", + lut_config.sci_lut_out.sc_index); + memset(&lut_config, 0, sizeof(lut_config)); + } + } + +exit: + return (buf - start); +} + +/** + * @brief Sysfs attribute for MACsec SCI LUT config + * + */ +static DEVICE_ATTR(macsec_sci_lut_tx_2, (S_IRUGO | S_IWUSR), + macsec_sci_lut_show_tx_2, + NULL); + #define SCI_LUT_INPUTS 13 /** @@ -1218,6 +1484,10 @@ static ssize_t macsec_sci_lut_store(struct device *dev, int temp[OSI_SCI_LEN]; int i; int sc_index; + unsigned int macsec = osi_core->macsec; + const unsigned int sc_lut_max_index[MAX_MACSEC_IP_TYPES] = { + OSI_SC_INDEX_MAX, + OSI_SC_INDEX_MAX_T26X}; if (!netif_running(ndev)) { dev_err(pdata->dev, "Not Allowed. Ether interface is not up\n"); @@ -1243,14 +1513,14 @@ static ssize_t macsec_sci_lut_store(struct device *dev, lut_config.lut_sel = OSI_LUT_SEL_SCI; lut_config.table_config.rw = OSI_LUT_WRITE; /* Rest of LUT attributes are filled by parse_inputs() */ - if (lut_config.table_config.index > OSI_SC_LUT_MAX_INDEX) { + if (lut_config.table_config.index > sc_lut_max_index[macsec]) { dev_err(dev, "%s: Index can't be > %d\n", __func__, - OSI_SC_LUT_MAX_INDEX); + sc_lut_max_index[macsec]); goto exit; } - if (sc_index > OSI_SC_LUT_MAX_INDEX) { + if (sc_index > sc_lut_max_index[macsec]) { dev_err(dev, "%s: SC Index can't be > %d\n", __func__, - OSI_SC_LUT_MAX_INDEX); + sc_lut_max_index[macsec]); goto exit; } @@ -1285,18 +1555,20 @@ exit: * */ static DEVICE_ATTR(macsec_sci_lut, (S_IRUGO | S_IWUSR), - macsec_sci_lut_show, + NULL, macsec_sci_lut_store); #ifdef MACSEC_KEY_PROGRAM static void dump_kt(char **buf_p, unsigned short ctlr_sel, - struct osi_core_priv_data *osi_core) + struct osi_core_priv_data *osi_core, + unsigned short start_index, + unsigned short end_index) { struct osi_macsec_kt_config kt_config = {0}; char *buf = *buf_p; int i, j; - for (i = 0; i <= OSI_TABLE_INDEX_MAX; i++) { + for (i = start_index; i <= end_index; i++) { memset(&kt_config, OSI_NONE, sizeof(kt_config)); kt_config.table_config.ctlr_sel = ctlr_sel; kt_config.table_config.rw = OSI_LUT_READ; @@ -1345,6 +1617,9 @@ static ssize_t macsec_tx_kt_show(struct device *dev, struct ether_priv_data *pdata = netdev_priv(ndev); struct osi_core_priv_data *osi_core = pdata->osi_core; char *start = buf; + const nveu32_t lut_max_index[MAX_MACSEC_IP_TYPES] = { + OSI_SA_LUT_MAX_INDEX, + OSI_SA_LUT_MAX_INDEX_T26X}; if (!netif_running(ndev)) { dev_err(pdata->dev, "Not Allowed. Ether interface is not up\n"); @@ -1352,11 +1627,83 @@ static ssize_t macsec_tx_kt_show(struct device *dev, } buf += scnprintf(buf, PAGE_SIZE, "Tx:\n"); - dump_kt(&buf, OSI_CTLR_SEL_TX, osi_core); + dump_kt(&buf, OSI_CTLR_SEL_TX, osi_core, 0U, + (lut_max_index[osi_core->macsec] <= MAX_SA_ENTRIES_PER_SYSFS_NODE? + lut_max_index[osi_core->macsec]: + MAX_SA_ENTRIES_PER_SYSFS_NODE - 1U)); return (buf - start); } +/** + * @brief Shows the current macsec Tx key table from 33 to 65 indices + * + * @param[in] dev: Device data. + * @param[in] attr: Device attribute + * @param[in] buf: Buffer to print the current LUT configuration + */ +static ssize_t macsec_tx_kt_show_2(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct net_device *ndev = (struct net_device *)dev_get_drvdata(dev); + struct ether_priv_data *pdata = netdev_priv(ndev); + struct osi_core_priv_data *osi_core = pdata->osi_core; + char *start = buf; + const nveu32_t lut_max_index[MAX_MACSEC_IP_TYPES] = { + OSI_SA_LUT_MAX_INDEX, + OSI_SA_LUT_MAX_INDEX_T26X}; + + if (!netif_running(ndev)) { + dev_err(pdata->dev, "Not Allowed. Ether interface is not up\n"); + return 0; + } + if (lut_max_index[osi_core->macsec] <= MAX_SA_ENTRIES_PER_SYSFS_NODE) { + dev_err(pdata->dev, "2nd node of KT lut is not allowed for this platform\n"); + return 0; + } + + buf += scnprintf(buf, PAGE_SIZE, "Tx:\n"); + dump_kt(&buf, OSI_CTLR_SEL_TX, osi_core, MAX_SA_ENTRIES_PER_SYSFS_NODE, + (2U * MAX_SA_ENTRIES_PER_SYSFS_NODE) - 1U); + + return (buf - start); +} + +/** + * @brief Shows the current macsec Tx key table from 66 to 96 indices + * + * @param[in] dev: Device data. + * @param[in] attr: Device attribute + * @param[in] buf: Buffer to print the current LUT configuration + */ +static ssize_t macsec_tx_kt_show_3(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct net_device *ndev = (struct net_device *)dev_get_drvdata(dev); + struct ether_priv_data *pdata = netdev_priv(ndev); + struct osi_core_priv_data *osi_core = pdata->osi_core; + char *start = buf; + const nveu32_t lut_max_index[MAX_MACSEC_IP_TYPES] = { + OSI_SA_LUT_MAX_INDEX, + OSI_SA_LUT_MAX_INDEX_T26X}; + + if (!netif_running(ndev)) { + dev_err(pdata->dev, "Not Allowed. Ether interface is not up\n"); + return 0; + } + if (lut_max_index[osi_core->macsec] <= (2U * MAX_SA_ENTRIES_PER_SYSFS_NODE)) { + dev_err(pdata->dev, "3rd node of KT lut is not allowed for this platform\n"); + return 0; + } + + buf += scnprintf(buf, PAGE_SIZE, "Tx:\n"); + dump_kt(&buf, OSI_CTLR_SEL_TX, osi_core, (2U * MAX_SA_ENTRIES_PER_SYSFS_NODE), + lut_max_index[osi_core->macsec]); + + return (buf - start); +} + + /** * @brief Shows the current macsec Rx key table * @@ -1371,6 +1718,9 @@ static ssize_t macsec_rx_kt_show(struct device *dev, struct ether_priv_data *pdata = netdev_priv(ndev); struct osi_core_priv_data *osi_core = pdata->osi_core; char *start = buf; + const nveu32_t lut_max_index[MAX_MACSEC_IP_TYPES] = { + OSI_SA_LUT_MAX_INDEX, + OSI_SA_LUT_MAX_INDEX_T26X}; if (!netif_running(ndev)) { dev_err(pdata->dev, "Not Allowed. Ether interface is not up\n"); @@ -1378,7 +1728,78 @@ static ssize_t macsec_rx_kt_show(struct device *dev, } buf += scnprintf(buf, PAGE_SIZE, "Rx:\n"); - dump_kt(&buf, OSI_CTLR_SEL_RX, osi_core); + dump_kt(&buf, OSI_CTLR_SEL_RX, osi_core, 0U, + (lut_max_index[osi_core->macsec] <= MAX_SA_ENTRIES_PER_SYSFS_NODE? + lut_max_index[osi_core->macsec]: + MAX_SA_ENTRIES_PER_SYSFS_NODE - 1U)); + + return (buf - start); +} + +/** + * @brief Shows the current macsec Rx key table from 33 to 65 indices + * + * @param[in] dev: Device data. + * @param[in] attr: Device attribute + * @param[in] buf: Buffer to print the current LUT configuration + */ +static ssize_t macsec_rx_kt_show_2(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct net_device *ndev = (struct net_device *)dev_get_drvdata(dev); + struct ether_priv_data *pdata = netdev_priv(ndev); + struct osi_core_priv_data *osi_core = pdata->osi_core; + char *start = buf; + const nveu32_t lut_max_index[MAX_MACSEC_IP_TYPES] = { + OSI_SA_LUT_MAX_INDEX, + OSI_SA_LUT_MAX_INDEX_T26X}; + + if (!netif_running(ndev)) { + dev_err(pdata->dev, "Not Allowed. Ether interface is not up\n"); + return 0; + } + if (lut_max_index[osi_core->macsec] <= MAX_SA_ENTRIES_PER_SYSFS_NODE) { + dev_err(pdata->dev, "2nd node of KT lut is not allowed for this platform\n"); + return 0; + } + + buf += scnprintf(buf, PAGE_SIZE, "Rx:\n"); + dump_kt(&buf, OSI_CTLR_SEL_RX, osi_core, MAX_SA_ENTRIES_PER_SYSFS_NODE, + (2U * MAX_SA_ENTRIES_PER_SYSFS_NODE) - 1U); + + return (buf - start); +} + +/** + * @brief Shows the current macsec Rx key table from 66 to 96 indices + * + * @param[in] dev: Device data. + * @param[in] attr: Device attribute + * @param[in] buf: Buffer to print the current LUT configuration + */ +static ssize_t macsec_rx_kt_show_3(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct net_device *ndev = (struct net_device *)dev_get_drvdata(dev); + struct ether_priv_data *pdata = netdev_priv(ndev); + struct osi_core_priv_data *osi_core = pdata->osi_core; + char *start = buf; + const nveu32_t lut_max_index[MAX_MACSEC_IP_TYPES] = { + OSI_SA_LUT_MAX_INDEX, + OSI_SA_LUT_MAX_INDEX_T26X}; + + if (!netif_running(ndev)) { + dev_err(pdata->dev, "Not Allowed. Ether interface is not up\n"); + return 0; + } + if (lut_max_index[osi_core->macsec] <= (2U * MAX_SA_ENTRIES_PER_SYSFS_NODE)) { + dev_err(pdata->dev, "3rd node of KT lut is not allowed for this platform\n"); + return 0; + } + + buf += scnprintf(buf, PAGE_SIZE, "Rx:\n"); + dump_kt(&buf, OSI_CTLR_SEL_RX, osi_core, (2U * MAX_SA_ENTRIES_PER_SYSFS_NODE), + lut_max_index[osi_core->macsec]); return (buf - start); } @@ -1408,6 +1829,9 @@ static ssize_t macsec_kt_store(struct device *dev, struct osi_macsec_kt_config kt_config = {0}; int temp[OSI_KEY_LEN_256] = {0}; unsigned char sak[OSI_KEY_LEN_256] = {0}; + const nveu32_t lut_max_index[MAX_MACSEC_IP_TYPES] = { + OSI_SA_LUT_MAX_INDEX, + OSI_SA_LUT_MAX_INDEX_T26X}; int valid, index, ctlr, key256bit; int i, ret, bufp = 0; @@ -1441,7 +1865,7 @@ static ssize_t macsec_kt_store(struct device *dev, } } - if ((index > OSI_TABLE_INDEX_MAX) || + if ((index > lut_max_index[osi_core->macsec]) || (valid != OSI_ENABLE && valid != OSI_DISABLE) || (ctlr != OSI_CTLR_SEL_TX && ctlr != OSI_CTLR_SEL_RX)) { dev_err(pdata->dev, "%s: Invalid inputs\n", __func__); @@ -1455,7 +1879,7 @@ static ssize_t macsec_kt_store(struct device *dev, /* HKEY GENERATION */ tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC); if (crypto_cipher_setkey(tfm, sak, OSI_KEY_LEN_128)) { - pr_err("%s: Failed to set cipher key for H generation", + dev_err(pdata->dev,"%s: Failed to set cipher key for H generation", __func__); goto exit; } @@ -1491,8 +1915,11 @@ static ssize_t macsec_kt_store(struct device *dev, ret = osi_macsec_config_kt(osi_core, &kt_config); if (ret < 0) { - pr_err("%s: Failed to set SAK", __func__); + dev_err(pdata->dev,"%s: Failed to set SAK", __func__); goto exit; + } else { + dev_err(pdata->dev,"%s: Added KT LUT idx: %d", __func__, + kt_config.table_config.index); } exit: @@ -1522,6 +1949,39 @@ static DEVICE_ATTR(macsec_tx_kt, (S_IRUGO | S_IWUSR), static DEVICE_ATTR(macsec_rx_kt, (S_IRUGO | S_IWUSR), macsec_rx_kt_show, NULL); + +/** + * @brief Sysfs attribute for MACsec key table (show Tx table) + * + */ +static DEVICE_ATTR(macsec_tx_kt_2, (S_IRUGO | S_IWUSR), + macsec_tx_kt_show_2, + NULL); + +/** + * @brief Sysfs attribute for MACsec key table (show Rx table) + * + */ +static DEVICE_ATTR(macsec_rx_kt_2, (S_IRUGO | S_IWUSR), + macsec_rx_kt_show_2, + NULL); +/** + * @brief Sysfs attribute for MACsec key table (show Tx table) + * + */ +static DEVICE_ATTR(macsec_tx_kt_3, (S_IRUGO | S_IWUSR), + macsec_tx_kt_show_3, + NULL); + +/** + * @brief Sysfs attribute for MACsec key table (show Rx table) + * + */ +static DEVICE_ATTR(macsec_rx_kt_3, (S_IRUGO | S_IWUSR), + macsec_rx_kt_show_3, + NULL); + + #endif /* MACSEC_KEY_PROGRAM */ static void dump_sc_state_lut(char **buf_p, unsigned short ctlr_sel, @@ -1530,8 +1990,11 @@ static void dump_sc_state_lut(char **buf_p, unsigned short ctlr_sel, struct osi_macsec_lut_config lut_config = {0}; char *buf = *buf_p; int i; + const unsigned int sc_lut_max_index[MAX_MACSEC_IP_TYPES] = { + OSI_SC_INDEX_MAX, + OSI_SC_INDEX_MAX_T26X}; - for (i = 0; i <= OSI_SC_LUT_MAX_INDEX; i++) { + for (i = 0; i <= sc_lut_max_index[osi_core->macsec]; i++) { memset(&lut_config, OSI_NONE, sizeof(lut_config)); lut_config.table_config.ctlr_sel = ctlr_sel; lut_config.table_config.rw = OSI_LUT_READ; @@ -1601,6 +2064,9 @@ static ssize_t macsec_sc_state_lut_store(struct device *dev, int index, ctlr; int ret; nveu32_t curr_an; + const unsigned int sc_lut_max_index[MAX_MACSEC_IP_TYPES] = { + OSI_SC_INDEX_MAX, + OSI_SC_INDEX_MAX_T26X}; if (!netif_running(ndev)) { dev_err(pdata->dev, "Not Allowed. Ether interface is not up\n"); @@ -1613,7 +2079,7 @@ static ssize_t macsec_sc_state_lut_store(struct device *dev, goto exit; } - if ((index > OSI_SC_LUT_MAX_INDEX) || + if ((index > sc_lut_max_index[osi_core->macsec]) || (ctlr != OSI_CTLR_SEL_TX && ctlr != OSI_CTLR_SEL_RX) || (curr_an >= OSI_MAX_NUM_SA)) { dev_err(pdata->dev, "%s:Invalid inputs", __func__); @@ -1652,8 +2118,11 @@ static void dump_sa_state_lut(char **buf_p, unsigned short ctlr_sel, struct osi_macsec_lut_config lut_config = {0}; char *buf = *buf_p; int i; + const nveu32_t lut_max_index[MAX_MACSEC_IP_TYPES] = { + OSI_SA_LUT_MAX_INDEX, + OSI_SA_LUT_MAX_INDEX_T26X}; - for (i = 0; i <= OSI_SA_LUT_MAX_INDEX; i++) { + for (i = 0; i <= lut_max_index[osi_core->macsec]; i++) { memset(&lut_config, OSI_NONE, sizeof(lut_config)); lut_config.table_config.ctlr_sel = ctlr_sel; lut_config.table_config.rw = OSI_LUT_READ; @@ -1742,6 +2211,9 @@ static ssize_t macsec_sa_state_lut_store(struct device *dev, int index, ctlr; int ret; unsigned int next_pn, lowest_pn; + const nveu32_t lut_max_index[MAX_MACSEC_IP_TYPES] = { + OSI_SA_LUT_MAX_INDEX, + OSI_SA_LUT_MAX_INDEX_T26X}; if (!netif_running(ndev)) { dev_err(pdata->dev, "Not Allowed. Ether interface is not up\n"); @@ -1754,7 +2226,7 @@ static ssize_t macsec_sa_state_lut_store(struct device *dev, goto exit; } - if ((index > OSI_SA_LUT_MAX_INDEX) || + if ((index > lut_max_index[osi_core->macsec]) || (ctlr != OSI_CTLR_SEL_TX && ctlr != OSI_CTLR_SEL_RX)) { dev_err(pdata->dev, "%s:Invalid inputs", __func__); goto exit; @@ -1790,49 +2262,72 @@ static DEVICE_ATTR(macsec_sa_state_lut, (S_IRUGO | S_IWUSR), static void dump_sc_param_lut(char **buf_p, unsigned short ctlr_sel, - struct osi_core_priv_data *osi_core) + struct osi_core_priv_data *osi_core, + unsigned short start_index, + unsigned short end_index) { struct osi_macsec_lut_config lut_config = {0}; char *buf = *buf_p; - int i; + char sci_zero[8] = {0}; + unsigned int i; - for (i = 0; i <= OSI_SC_LUT_MAX_INDEX; i++) { + for (i = start_index; i <= end_index; i++) { memset(&lut_config, OSI_NONE, sizeof(lut_config)); lut_config.table_config.ctlr_sel = ctlr_sel; lut_config.table_config.rw = OSI_LUT_READ; lut_config.table_config.index = i; lut_config.lut_sel = OSI_LUT_SEL_SC_PARAM; if (osi_macsec_config_lut(osi_core, &lut_config) < 0) { - pr_err("%s: Failed to read BYP LUT\n", __func__); + pr_err("%s: Failed to read SC PARAM LUT\n", __func__); goto exit; } switch (ctlr_sel) { case OSI_CTLR_SEL_TX: - buf += scnprintf(buf, PAGE_SIZE, - "%d.\tkey_idx_start: %d pn_max: %u " - "pn_threshold: %u tci %01x vlan_clear %01x sci: " SCI_FMT, - i, lut_config.sc_param_out.key_index_start, - lut_config.sc_param_out.pn_max, - lut_config.sc_param_out.pn_threshold, - lut_config.sc_param_out.tci, - lut_config.sc_param_out.vlan_in_clear, - lut_config.sc_param_out.sci[7], - lut_config.sc_param_out.sci[6], - lut_config.sc_param_out.sci[5], - lut_config.sc_param_out.sci[4], - lut_config.sc_param_out.sci[3], - lut_config.sc_param_out.sci[2], - lut_config.sc_param_out.sci[1], - lut_config.sc_param_out.sci[0]); + if (memcmp(lut_config.sc_param_out.sci, sci_zero, 8) != 0) { + buf += scnprintf(buf, PAGE_SIZE, + "%d.\tkey_idx: %d pn_max: %u " + "pn_threshold: %u tci %01x vlan_clr %01x " + "encrypt %01x offset %01x sci: " SCI_FMT, + i, lut_config.sc_param_out.key_index_start, + lut_config.sc_param_out.pn_max, + lut_config.sc_param_out.pn_threshold, + lut_config.sc_param_out.tci, + lut_config.sc_param_out.vlan_in_clear, + lut_config.sc_param_out.encrypt, + lut_config.sc_param_out.conf_offset, + lut_config.sc_param_out.sci[7], + lut_config.sc_param_out.sci[6], + lut_config.sc_param_out.sci[5], + lut_config.sc_param_out.sci[4], + lut_config.sc_param_out.sci[3], + lut_config.sc_param_out.sci[2], + lut_config.sc_param_out.sci[1], + lut_config.sc_param_out.sci[0]); + } else { + buf += scnprintf(buf, PAGE_SIZE, + "%d.\tkey_idx: %d pn_max: %u " + "pn_threshold: %u tci %01x vlan_clr %01x " + "encrypt %01x offset %01x sci: X", + i, lut_config.sc_param_out.key_index_start, + lut_config.sc_param_out.pn_max, + lut_config.sc_param_out.pn_threshold, + lut_config.sc_param_out.tci, + lut_config.sc_param_out.vlan_in_clear, + lut_config.sc_param_out.encrypt, + lut_config.sc_param_out.conf_offset); + } buf += scnprintf(buf, PAGE_SIZE, "\n"); break; case OSI_CTLR_SEL_RX: buf += scnprintf(buf, PAGE_SIZE, - "%d.\tkey_idx_start: %d pn_max: %u pn_window: %u\n", i, + "%d.\tkey_idx: %d pn_max: %u pn_window: %u " + "encrypt %01x offset %01x\n", i, lut_config.sc_param_out.key_index_start, lut_config.sc_param_out.pn_max, - lut_config.sc_param_out.pn_window); + lut_config.sc_param_out.pn_window, + lut_config.sc_param_out.encrypt, + lut_config.sc_param_out.conf_offset); break; default: goto exit; @@ -1844,13 +2339,14 @@ exit: } /** - * @brief Shows the current SC parameters LUT configuration + * @brief Shows the current SC parameters Tx LUT configuration for + * indices above 24 * * @param[in] dev: Device data. * @param[in] attr: Device attribute * @param[in] buf: Buffer to print the current LUT configuration */ -static ssize_t macsec_sc_param_lut_show(struct device *dev, +static ssize_t macsec_sc_param_tx_lut_show_2(struct device *dev, struct device_attribute *attr, char *buf) { @@ -1858,6 +2354,44 @@ static ssize_t macsec_sc_param_lut_show(struct device *dev, struct ether_priv_data *pdata = netdev_priv(ndev); struct osi_core_priv_data *osi_core = pdata->osi_core; char *start = buf; + const unsigned int sc_lut_max_index[MAX_MACSEC_IP_TYPES] = { + OSI_SC_INDEX_MAX, + OSI_SC_INDEX_MAX_T26X}; + + if (!netif_running(ndev)) { + dev_err(pdata->dev, "Not Allowed. Ether interface is not up\n"); + return 0; + } + + if (sc_lut_max_index[osi_core->macsec] <= MAX_ENTRIES_PER_SYSFS_NODE) { + dev_err(pdata->dev, "2nd node of sc param lut is not allowed for this platform\n"); + return 0; + } + buf += scnprintf(buf, PAGE_SIZE, "Tx:\n"); + dump_sc_param_lut(&buf, OSI_CTLR_SEL_TX, osi_core, MAX_ENTRIES_PER_SYSFS_NODE + 1U, + sc_lut_max_index[osi_core->macsec]); + + return (buf - start); +} + +/** + * @brief Shows the current SC parameters Tx LUT configuration + * + * @param[in] dev: Device data. + * @param[in] attr: Device attribute + * @param[in] buf: Buffer to print the current LUT configuration + */ +static ssize_t macsec_sc_param_tx_lut_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct net_device *ndev = (struct net_device *)dev_get_drvdata(dev); + struct ether_priv_data *pdata = netdev_priv(ndev); + struct osi_core_priv_data *osi_core = pdata->osi_core; + char *start = buf; + const unsigned int sc_lut_max_index[MAX_MACSEC_IP_TYPES] = { + OSI_SC_INDEX_MAX, + OSI_SC_INDEX_MAX_T26X}; if (!netif_running(ndev)) { dev_err(pdata->dev, "Not Allowed. Ether interface is not up\n"); @@ -1865,15 +2399,45 @@ static ssize_t macsec_sc_param_lut_show(struct device *dev, } buf += scnprintf(buf, PAGE_SIZE, "Tx:\n"); - dump_sc_param_lut(&buf, OSI_CTLR_SEL_TX, osi_core); - - buf += scnprintf(buf, PAGE_SIZE, "Rx:\n"); - dump_sc_param_lut(&buf, OSI_CTLR_SEL_RX, osi_core); + dump_sc_param_lut(&buf, OSI_CTLR_SEL_TX, osi_core, 0U, + ((sc_lut_max_index[osi_core->macsec] > MAX_ENTRIES_PER_SYSFS_NODE) ? + MAX_ENTRIES_PER_SYSFS_NODE: sc_lut_max_index[osi_core->macsec])); return (buf - start); } -#define SC_PARAM_INPUTS_LEN 16 +/** + * @brief Shows the current SC parameters Rx LUT configuration + * + * @param[in] dev: Device data. + * @param[in] attr: Device attribute + * @param[in] buf: Buffer to print the current LUT configuration + */ +static ssize_t macsec_sc_param_rx_lut_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct net_device *ndev = (struct net_device *)dev_get_drvdata(dev); + struct ether_priv_data *pdata = netdev_priv(ndev); + struct osi_core_priv_data *osi_core = pdata->osi_core; + char *start = buf; + const unsigned int sc_lut_max_index[MAX_MACSEC_IP_TYPES] = { + OSI_SC_INDEX_MAX, + OSI_SC_INDEX_MAX_T26X}; + + if (!netif_running(ndev)) { + dev_err(pdata->dev, "Not Allowed. Ether interface is not up\n"); + return 0; + } + + buf += scnprintf(buf, PAGE_SIZE, "Rx:\n"); + dump_sc_param_lut(&buf, OSI_CTLR_SEL_RX, osi_core, 0U, + sc_lut_max_index[osi_core->macsec]); + + return (buf - start); +} + +#define SC_PARAM_INPUTS_LEN 18 /** * @brief Set the SC parameters LUT configuration @@ -1894,30 +2458,35 @@ static ssize_t macsec_sc_param_lut_store(struct device *dev, struct osi_core_priv_data *osi_core = pdata->osi_core; struct osi_macsec_lut_config lut_config = {0}; int index, ctlr; - int ret, i, tci, vlan_clear; + int ret, i, tci, vlan_clear, encrypt, offset; int sci[OSI_SCI_LEN] = {0}; unsigned int pn_max, pn_threshold, key_index_start, pn_window; + const unsigned int sc_lut_max_index[MAX_MACSEC_IP_TYPES] = { + OSI_SC_INDEX_MAX, + OSI_SC_INDEX_MAX_T26X}; + const unsigned int key_idx_max[MAX_MACSEC_IP_TYPES] = { + OSI_KEY_INDEX_MAX, OSI_KEY_INDEX_MAX_T26X }; if (!netif_running(ndev)) { dev_err(pdata->dev, "Not Allowed. Ether interface is not up\n"); return size; } - ret = sscanf(buf, "%d %d %u %u %u %u %d %d" SCI_FMT, + ret = sscanf(buf, "%d %d %u %u %u %u %d %d %d %d" SCI_FMT, &index, &ctlr, &key_index_start, &pn_max, &pn_threshold, &pn_window, - &tci, &vlan_clear, + &tci, &vlan_clear, &encrypt, &offset, &sci[7], &sci[6], &sci[5], &sci[4], &sci[3], &sci[2], &sci[1], &sci[0]); if (ret < SC_PARAM_INPUTS_LEN) { dev_err(pdata->dev, "%s: Failed to parse inputs", __func__); goto exit; } - - if ((index > OSI_SC_LUT_MAX_INDEX) || + if ((index > sc_lut_max_index[osi_core->macsec]) || (ctlr != OSI_CTLR_SEL_TX && ctlr != OSI_CTLR_SEL_RX) || - (key_index_start > OSI_KEY_INDEX_MAX) || - (pn_threshold > pn_max)) { + (key_index_start > key_idx_max[osi_core->macsec]) || + (pn_threshold > pn_max) || (encrypt > 1) || + (offset > 2)) { dev_err(pdata->dev, "%s:Invalid inputs", __func__); goto exit; } @@ -1932,6 +2501,9 @@ static ssize_t macsec_sc_param_lut_store(struct device *dev, lut_config.sc_param_out.pn_window = pn_window; lut_config.sc_param_out.tci = (unsigned char)tci; lut_config.sc_param_out.vlan_in_clear = (unsigned char)vlan_clear; + lut_config.sc_param_out.encrypt = (unsigned char)encrypt; + lut_config.sc_param_out.conf_offset = (unsigned char)offset; + for (i = 0; i < OSI_SCI_LEN; i++) { lut_config.sc_param_out.sci[i] = (unsigned char)sci[i]; } @@ -1953,9 +2525,32 @@ exit: * */ static DEVICE_ATTR(macsec_sc_param_lut, (S_IRUGO | S_IWUSR), - macsec_sc_param_lut_show, + NULL, macsec_sc_param_lut_store); +/** + * @brief Sysfs attribute for SC param Tx LUT configuration + * + */ +static DEVICE_ATTR(macsec_sc_param_tx_lut, (S_IRUGO | S_IWUSR), + macsec_sc_param_tx_lut_show, + NULL); +/** + * @brief Sysfs attribute for SC param Tx LUT configuration + * + */ +static DEVICE_ATTR(macsec_sc_param_tx_lut_2, (S_IRUGO | S_IWUSR), + macsec_sc_param_tx_lut_show_2, + NULL); + +/** + * @brief Sysfs attribute for SC param Rx LUT configuration + * + */ +static DEVICE_ATTR(macsec_sc_param_rx_lut, (S_IRUGO | S_IWUSR), + macsec_sc_param_rx_lut_show, + NULL); + /** * @brief Shows the current MACsec irq stats * @@ -2041,6 +2636,8 @@ static ssize_t ether_phy_iface_mode_show(struct device *dev, struct osi_core_priv_data *osi_core = pdata->osi_core; switch (osi_core->phy_iface_mode) { + case OSI_XAUI_MODE_25G: + return scnprintf(buf, PAGE_SIZE, "XAUI-25G\n"); case OSI_XFI_MODE_10G: return scnprintf(buf, PAGE_SIZE, "XFI-10G\n"); case OSI_XFI_MODE_5G: @@ -2080,7 +2677,9 @@ static ssize_t ether_phy_iface_mode_store(struct device *dev, return size; } - if (strncmp(buf, "XFI-10G", 7) == 0U) { + if (strncmp(buf, "XAUI-25G", 7) == 0U) { + osi_core->phy_iface_mode = OSI_XAUI_MODE_25G; + } else if (strncmp(buf, "XFI-10G", 7) == 0U) { osi_core->phy_iface_mode = OSI_XFI_MODE_10G; } else if (strncmp(buf, "XFI-5G", 6) == 0U) { osi_core->phy_iface_mode = OSI_XFI_MODE_5G; @@ -2123,9 +2722,19 @@ static ssize_t ether_uphy_gbe_mode_show(struct device *dev, struct ether_priv_data *pdata = netdev_priv(ndev); struct osi_core_priv_data *osi_core = pdata->osi_core; - return scnprintf(buf, PAGE_SIZE, "%s\n", - (osi_core->uphy_gbe_mode == OSI_ENABLE) ? - "10G" : "5G"); + switch (osi_core->uphy_gbe_mode) { + case OSI_UPHY_GBE_MODE_25G: + return scnprintf(buf, PAGE_SIZE, "25G\n"); + case OSI_GBE_MODE_10G: + return scnprintf(buf, PAGE_SIZE, "10G\n"); + case OSI_GBE_MODE_5G: + return scnprintf(buf, PAGE_SIZE, "5G\n"); + case OSI_GBE_MODE_2_5G: + return scnprintf(buf, PAGE_SIZE, "2_5G\n"); + default: + return scnprintf(buf, PAGE_SIZE, "1G\n"); + } + } /** @@ -2154,10 +2763,16 @@ static ssize_t ether_uphy_gbe_mode_store(struct device *dev, return size; } - if (strncmp(buf, "10G", 3) == 0U) { - osi_core->uphy_gbe_mode = OSI_ENABLE; + if (strncmp(buf, "25G", 3) == 0U) { + osi_core->uphy_gbe_mode = OSI_UPHY_GBE_MODE_25G; + } else if (strncmp(buf, "10G", 3) == 0U) { + osi_core->uphy_gbe_mode = OSI_GBE_MODE_10G; } else if (strncmp(buf, "5G", 2) == 0U) { - osi_core->uphy_gbe_mode = OSI_DISABLE; + osi_core->uphy_gbe_mode = OSI_GBE_MODE_5G; + } else if (strncmp(buf, "2_5G", 4) == 0U) { + osi_core->uphy_gbe_mode = OSI_GBE_MODE_2_5G; + } else if (strncmp(buf, "1G", 2) == 0U) { + osi_core->uphy_gbe_mode = OSI_GBE_MODE_1G; } else { dev_err(pdata->dev, "Invalid value passed. Valid values are 10G or 5G\n"); @@ -2208,7 +2823,7 @@ static ssize_t ether_mac_frp_show(struct device *dev, entry = &osi_core->frp_table[i]; data = &entry->data; j += scnprintf((buf + j), (PAGE_SIZE - j), - "[%d] ID:%d MD:0x%x ME:0x%x AF:%d RF:%d IM:%d NIC:%d FO:%d OKI:%d DCH:x%x\n", + "[%d] ID:%d MD:0x%x ME:0x%x AF:%d RF:%d IM:%d NIC:%d FO:%d OKI:%d DCH:x%lx\n", i, entry->frp_id, data->match_data, data->match_en, data->accept_frame, data->reject_frame, data->inverse_match, @@ -2696,18 +3311,29 @@ static struct attribute *ether_sysfs_attrs[] = { &dev_attr_macsec_irq_stats.attr, &dev_attr_macsec_byp_lut.attr, &dev_attr_macsec_sci_lut.attr, + &dev_attr_macsec_sci_lut_rx.attr, + &dev_attr_macsec_sci_lut_tx.attr, + &dev_attr_macsec_sci_lut_tx_2.attr, #ifdef MACSEC_KEY_PROGRAM &dev_attr_macsec_kt.attr, &dev_attr_macsec_tx_kt.attr, &dev_attr_macsec_rx_kt.attr, + &dev_attr_macsec_tx_kt_2.attr, + &dev_attr_macsec_rx_kt_2.attr, + &dev_attr_macsec_tx_kt_3.attr, + &dev_attr_macsec_rx_kt_3.attr, #endif /* MACSEC_KEY_PROGRAM */ &dev_attr_macsec_sc_state_lut.attr, &dev_attr_macsec_sa_state_lut.attr, &dev_attr_macsec_sc_param_lut.attr, + &dev_attr_macsec_sc_param_tx_lut.attr, + &dev_attr_macsec_sc_param_tx_lut_2.attr, + &dev_attr_macsec_sc_param_rx_lut.attr, &dev_attr_macsec_cipher.attr, &dev_attr_macsec_enable.attr, &dev_attr_macsec_an_status.attr, - &dev_attr_macsec_mmc_counters.attr, + &dev_attr_macsec_mmc_counters_tx.attr, + &dev_attr_macsec_mmc_counters_rx.attr, #ifdef DEBUG_MACSEC &dev_attr_macsec_loopback.attr, &dev_attr_macsec_dbg_buffers.attr, @@ -2752,6 +3378,7 @@ static struct attribute *ether_sysfs_attrs_without_macsec[] = { #if defined HSI_SUPPORT && defined(NV_VLTEST_BUILD) && (IS_ENABLED(CONFIG_TEGRA_HSIERRRPTINJ)) &dev_attr_hsi_enable.attr, #endif + &dev_attr_mac_tx_q.attr, #endif /* OSI_STRIPPED_LIB */ NULL }; @@ -3383,3 +4010,8 @@ void ether_sysfs_unregister(struct ether_priv_data *pdata) sysfs_remove_group(&dev->kobj, ðer_attribute_group_wo_macsec); } } + +MODULE_AUTHOR("NVIDIA Corporation"); +MODULE_DESCRIPTION("Mac/Macsec Sysfs driver"); +MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(CRYPTO_INTERNAL);