diff --git a/drivers/net/ethernet/nvidia/nvethernet/ether_linux.c b/drivers/net/ethernet/nvidia/nvethernet/ether_linux.c index 58d83bc9..474b7646 100644 --- a/drivers/net/ethernet/nvidia/nvethernet/ether_linux.c +++ b/drivers/net/ethernet/nvidia/nvethernet/ether_linux.c @@ -424,6 +424,19 @@ static int ether_pad_calibrate(struct ether_priv_data *pdata) */ static void ether_disable_mgbe_clks(struct ether_priv_data *pdata) { + if (pdata->osi_core->mac != OSI_MAC_HW_MGBE_T26X) { + if (!IS_ERR_OR_NULL(pdata->eee_pcs_clk)) { + clk_disable_unprepare(pdata->eee_pcs_clk); + } + + if (!IS_ERR_OR_NULL(pdata->mac_div_clk)) { + clk_disable_unprepare(pdata->mac_div_clk); + } + if (!IS_ERR_OR_NULL(pdata->rx_pcs_clk)) { + clk_disable_unprepare(pdata->rx_pcs_clk); + } + } + if (!IS_ERR_OR_NULL(pdata->ptp_ref_clk)) { clk_disable_unprepare(pdata->ptp_ref_clk); } @@ -432,18 +445,10 @@ static void ether_disable_mgbe_clks(struct ether_priv_data *pdata) clk_disable_unprepare(pdata->app_clk); } - if (!IS_ERR_OR_NULL(pdata->eee_pcs_clk)) { - clk_disable_unprepare(pdata->eee_pcs_clk); - } - if (!IS_ERR_OR_NULL(pdata->mac_clk)) { clk_disable_unprepare(pdata->mac_clk); } - if (!IS_ERR_OR_NULL(pdata->mac_div_clk)) { - clk_disable_unprepare(pdata->mac_div_clk); - } - if (!IS_ERR_OR_NULL(pdata->tx_pcs_clk)) { clk_disable_unprepare(pdata->tx_pcs_clk); } @@ -452,10 +457,6 @@ static void ether_disable_mgbe_clks(struct ether_priv_data *pdata) clk_disable_unprepare(pdata->tx_clk); } - if (!IS_ERR_OR_NULL(pdata->rx_pcs_clk)) { - clk_disable_unprepare(pdata->rx_pcs_clk); - } - if (!IS_ERR_OR_NULL(pdata->rx_pcs_input_clk)) { clk_disable_unprepare(pdata->rx_pcs_input_clk); } @@ -477,28 +478,43 @@ static void ether_disable_mgbe_clks(struct ether_priv_data *pdata) */ static void ether_disable_eqos_clks(struct ether_priv_data *pdata) { - if (!IS_ERR_OR_NULL(pdata->axi_cbb_clk)) { - clk_disable_unprepare(pdata->axi_cbb_clk); - } - - if (!IS_ERR_OR_NULL(pdata->axi_clk)) { - clk_disable_unprepare(pdata->axi_clk); - } - if (!IS_ERR_OR_NULL(pdata->rx_clk)) { clk_disable_unprepare(pdata->rx_clk); } - if (!IS_ERR_OR_NULL(pdata->ptp_ref_clk)) { - clk_disable_unprepare(pdata->ptp_ref_clk); - } - if (!IS_ERR_OR_NULL(pdata->tx_clk)) { clk_disable_unprepare(pdata->tx_clk); } - if (!IS_ERR_OR_NULL(pdata->pllrefe_clk)) { - clk_disable_unprepare(pdata->pllrefe_clk); + if (pdata->osi_core->mac_ver == MAC_CORE_VER_TYPE_EQOS_5_40) { + if (!IS_ERR_OR_NULL(pdata->rx_pcs_input_clk)) { + clk_disable_unprepare(pdata->rx_pcs_input_clk); + } + if (!IS_ERR_OR_NULL(pdata->app_clk)) { + clk_disable_unprepare(pdata->app_clk); + } + if (!IS_ERR_OR_NULL(pdata->tx_pcs_clk)) { + clk_disable_unprepare(pdata->tx_pcs_clk); + } + if (!IS_ERR_OR_NULL(pdata->mac_clk)) { + clk_disable_unprepare(pdata->mac_clk); + } + } else { + if (!IS_ERR_OR_NULL(pdata->ptp_ref_clk)) { + clk_disable_unprepare(pdata->ptp_ref_clk); + } + + if (!IS_ERR_OR_NULL(pdata->axi_clk)) { + clk_disable_unprepare(pdata->axi_clk); + } + + if (!IS_ERR_OR_NULL(pdata->axi_cbb_clk)) { + clk_disable_unprepare(pdata->axi_cbb_clk); + } + + if (!IS_ERR_OR_NULL(pdata->pllrefe_clk)) { + clk_disable_unprepare(pdata->pllrefe_clk); + } } pdata->clks_enable = false; @@ -539,6 +555,7 @@ static int ether_enable_mgbe_clks(struct ether_priv_data *pdata) unsigned int uphy_gbe_mode = pdata->osi_core->uphy_gbe_mode; unsigned long rate = 0; int ret; + unsigned short mac= pdata->osi_core->mac; if (!IS_ERR_OR_NULL(pdata->rx_input_clk)) { ret = clk_prepare_enable(pdata->rx_input_clk); @@ -550,19 +567,14 @@ static int ether_enable_mgbe_clks(struct ether_priv_data *pdata) if (!IS_ERR_OR_NULL(pdata->rx_pcs_input_clk)) { ret = clk_prepare_enable(pdata->rx_pcs_input_clk); if (ret < 0) { - return ret; - } - } - - if (!IS_ERR_OR_NULL(pdata->rx_pcs_clk)) { - ret = clk_prepare_enable(pdata->rx_pcs_clk); - if (ret < 0) { - goto err_rx_pcs; + goto err_rx_pcs_input; } } if (!IS_ERR_OR_NULL(pdata->tx_clk)) { - if (uphy_gbe_mode == OSI_ENABLE) + if (uphy_gbe_mode == OSI_UPHY_GBE_MODE_25G) + rate = ETHER_MGBE_TXRX_CLK_XAUI_25G; + else if (uphy_gbe_mode == OSI_GBE_MODE_10G) rate = ETHER_MGBE_TX_CLK_USXGMII_10G; else rate = ETHER_MGBE_TX_CLK_USXGMII_5G; @@ -580,7 +592,9 @@ static int ether_enable_mgbe_clks(struct ether_priv_data *pdata) } if (!IS_ERR_OR_NULL(pdata->tx_pcs_clk)) { - if (uphy_gbe_mode == OSI_ENABLE) + if (uphy_gbe_mode == OSI_UPHY_GBE_MODE_25G) + rate = ETHER_MGBE_TXRX_PCS_CLK_XAUI_25G; + else if (uphy_gbe_mode == OSI_GBE_MODE_10G) rate = ETHER_MGBE_TX_PCS_CLK_USXGMII_10G; else rate = ETHER_MGBE_TX_PCS_CLK_USXGMII_5G; @@ -598,10 +612,26 @@ static int ether_enable_mgbe_clks(struct ether_priv_data *pdata) } } - if (!IS_ERR_OR_NULL(pdata->mac_div_clk)) { - ret = clk_prepare_enable(pdata->mac_div_clk); - if (ret < 0) { - goto err_mac_div; + if (mac != OSI_MAC_HW_MGBE_T26X) { + if (!IS_ERR_OR_NULL(pdata->rx_pcs_clk)) { + ret = clk_prepare_enable(pdata->rx_pcs_clk); + if (ret < 0) { + goto err_rx_pcs; + } + } + + if (!IS_ERR_OR_NULL(pdata->mac_div_clk)) { + ret = clk_prepare_enable(pdata->mac_div_clk); + if (ret < 0) { + goto err_mac_div; + } + } + + if (!IS_ERR_OR_NULL(pdata->eee_pcs_clk)) { + ret = clk_prepare_enable(pdata->eee_pcs_clk); + if (ret < 0) { + goto err_eee_pcs; + } } } @@ -612,13 +642,6 @@ static int ether_enable_mgbe_clks(struct ether_priv_data *pdata) } } - if (!IS_ERR_OR_NULL(pdata->eee_pcs_clk)) { - ret = clk_prepare_enable(pdata->eee_pcs_clk); - if (ret < 0) { - goto err_eee_pcs; - } - } - if (!IS_ERR_OR_NULL(pdata->app_clk)) { ret = clk_prepare_enable(pdata->app_clk); if (ret < 0) { @@ -642,18 +665,22 @@ err_ptp_ref: clk_disable_unprepare(pdata->app_clk); } err_app: - if (!IS_ERR_OR_NULL(pdata->eee_pcs_clk)) { - clk_disable_unprepare(pdata->eee_pcs_clk); - } -err_eee_pcs: if (!IS_ERR_OR_NULL(pdata->mac_clk)) { clk_disable_unprepare(pdata->mac_clk); } err_mac: - if (!IS_ERR_OR_NULL(pdata->mac_div_clk)) { + if (!IS_ERR_OR_NULL(pdata->eee_pcs_clk) && mac == OSI_MAC_HW_MGBE) { + clk_disable_unprepare(pdata->eee_pcs_clk); + } +err_eee_pcs: + if (!IS_ERR_OR_NULL(pdata->mac_div_clk) && mac == OSI_MAC_HW_MGBE) { clk_disable_unprepare(pdata->mac_div_clk); } err_mac_div: + if (!IS_ERR_OR_NULL(pdata->mac_div_clk) && mac == OSI_MAC_HW_MGBE) { + clk_disable_unprepare(pdata->tx_pcs_clk); + } +err_rx_pcs: if (!IS_ERR_OR_NULL(pdata->tx_pcs_clk)) { clk_disable_unprepare(pdata->tx_pcs_clk); } @@ -665,7 +692,7 @@ err_tx: if (!IS_ERR_OR_NULL(pdata->rx_pcs_clk)) { clk_disable_unprepare(pdata->rx_pcs_clk); } -err_rx_pcs: +err_rx_pcs_input: if (!IS_ERR_OR_NULL(pdata->rx_pcs_input_clk)) { clk_disable_unprepare(pdata->rx_pcs_input_clk); } @@ -673,6 +700,108 @@ err_rx_pcs: return ret; } +/** + * @brief Enable all MAC T26x EQOS related clks. + * + * Algorithm: Enables the clks by using clock subsystem provided API's. + * + * @param[in] pdata: OSD private data. + * + * @retval 0 on success + * @retval "negative value" on failure. + */ +static int ether_enable_eqos_clks_t26x(struct ether_priv_data *pdata) +{ + unsigned int uphy_gbe_mode = pdata->osi_core->uphy_gbe_mode; + unsigned long rate = 0; + int ret; + + if (!IS_ERR_OR_NULL(pdata->tx_pcs_clk)) { + if (uphy_gbe_mode == OSI_GBE_MODE_2_5G) + rate = ETHER_EQOS_TX_CLK_2_5G; + else + rate = ETHER_EQOS_TX_CLK_1000M; + ret = clk_set_rate(pdata->tx_pcs_clk, rate); + if (ret < 0) { + dev_err(pdata->dev, "failed to set EQOS tx_pcs_clk rate\n"); + return ret; + } + + ret = clk_prepare_enable(pdata->tx_pcs_clk); + if (ret < 0) { + goto err_tx_pcs; + } + } + + if (!IS_ERR_OR_NULL(pdata->tx_clk)) { + if (uphy_gbe_mode == OSI_GBE_MODE_2_5G) + rate = ETHER_EQOS_UPHY_LX_TX_2_5G_CLK; + else + rate = ETHER_EQOS_UPHY_LX_TX_1G_CLK; + ret = clk_set_rate(pdata->tx_clk, rate); + if (ret < 0) { + dev_err(pdata->dev, "failed to set EQOS tx_clk rate\n"); + goto err_tx; + } + + ret = clk_prepare_enable(pdata->tx_clk); + if (ret < 0) { + goto err_tx; + } + } + + if (!IS_ERR_OR_NULL(pdata->mac_clk)) { + ret = clk_prepare_enable(pdata->mac_clk); + if (ret < 0) { + goto err_tx; + } + } + if (!IS_ERR_OR_NULL(pdata->app_clk)) { + ret = clk_prepare_enable(pdata->app_clk); + if (ret < 0) { + goto err_app; + } + } + if (!IS_ERR_OR_NULL(pdata->rx_pcs_input_clk)) { + ret = clk_prepare_enable(pdata->rx_pcs_input_clk); + if (ret < 0) { + goto err_rx_pcs_input; + } + } + if (!IS_ERR_OR_NULL(pdata->rx_clk)) { + ret = clk_prepare_enable(pdata->rx_clk); + if (ret < 0) { + goto err_rx; + } + } + + pdata->clks_enable = true; + return 0; + +err_rx: + if (!IS_ERR_OR_NULL(pdata->rx_pcs_input_clk)) { + clk_disable_unprepare(pdata->rx_pcs_input_clk); + } +err_rx_pcs_input: + if (!IS_ERR_OR_NULL(pdata->app_clk)) { + clk_disable_unprepare(pdata->app_clk); + } +err_app: + if (!IS_ERR_OR_NULL(pdata->mac_clk)) { + clk_disable_unprepare(pdata->mac_clk); + } +err_tx: + if (!IS_ERR_OR_NULL(pdata->tx_clk)) { + clk_disable_unprepare(pdata->tx_clk); + } +err_tx_pcs: + if (!IS_ERR_OR_NULL(pdata->tx_pcs_clk)) { + clk_disable_unprepare(pdata->tx_pcs_clk); + } + + return ret; +} + /** * @brief Enable all MAC EQOS related clks. * @@ -772,9 +901,11 @@ static int ether_enable_clks(struct ether_priv_data *pdata) if (pdata->osi_core->use_virtualization == OSI_DISABLE) { if (pdata->osi_core->mac != OSI_MAC_HW_EQOS) { return ether_enable_mgbe_clks(pdata); + } else if (pdata->osi_core->mac_ver == MAC_CORE_VER_TYPE_EQOS_5_40) { + return ether_enable_eqos_clks_t26x(pdata); + } else { + return ether_enable_eqos_clks(pdata); } - - return ether_enable_eqos_clks(pdata); } return 0; @@ -849,14 +980,15 @@ int ether_conf_eee(struct ether_priv_data *pdata, unsigned int tx_lpi_enable) #endif /* !OSI_STRIPPED_LIB */ /** - * @brief Set MGBE MAC_DIV/TX clk rate + * @brief Set MGBE MAC_DIV/TX(T23x) or MAC(T26x) clk rate * - * Algorithm: Sets MGBE MAC_DIV clk_rate which will be MAC_TX/MACSEC clk rate. + * Algorithm: Sets MGBE MAC_DIV or MAC clk_rate which will + * be MAC_TX/MACSEC clk rate. * - * @param[in] mac_div_clk: Pointer to MAC_DIV clk. + * @param[in] mac_clk: Pointer to MAC_DIV or MAC clk. * @param[in] speed: PHY line speed. */ -static inline void ether_set_mgbe_mac_div_rate(struct clk *mac_div_clk, +static inline void ether_set_mgbe_mac_div_rate(struct clk *mac_clk, int speed) { unsigned long rate; @@ -868,18 +1000,21 @@ static inline void ether_set_mgbe_mac_div_rate(struct clk *mac_div_clk, case SPEED_5000: rate = ETHER_MGBE_MAC_DIV_RATE_5G; break; + case SPEED_25000: + rate = ETHER_MGBE_MAC_DIV_RATE_25G; + break; case SPEED_10000: default: rate = ETHER_MGBE_MAC_DIV_RATE_10G; break; } - if (clk_set_rate(mac_div_clk, rate) < 0) - pr_err("%s(): failed to set mac_div_clk rate\n", __func__); + if (clk_set_rate(mac_clk, rate) < 0) + pr_err("%s(): failed to set mac_clk rate\n", __func__); } /** - * @brief Set EQOS TX clk rate + * @brief Set EQOS TX(T23x) or MAC/Macsec (T26x) clk rate * * @param[in] tx_clk: Pointer to Tx clk. * @param[in] speed: PHY line speed. @@ -896,6 +1031,9 @@ static inline void ether_set_eqos_tx_clk(struct clk *tx_clk, case SPEED_100: rate = ETHER_EQOS_TX_CLK_100M; break; + case SPEED_2500: + rate = ETHER_EQOS_TX_CLK_2_5G; + break; case SPEED_1000: default: rate = ETHER_EQOS_TX_CLK_1000M; @@ -903,7 +1041,7 @@ static inline void ether_set_eqos_tx_clk(struct clk *tx_clk, } if (clk_set_rate(tx_clk, rate) < 0) - pr_err("%s(): failed to set eqos tx_clk rate\n", __func__); + pr_err("%s(): failed to set eqos tx_clk/mac rate\n", __func__); } /** @@ -922,6 +1060,7 @@ static inline void set_speed_work_func(struct work_struct *work) struct net_device *dev = pdata->ndev; struct phy_device *phydev = pdata->phydev; nveu32_t iface_mode = pdata->osi_core->phy_iface_mode; + struct clk *mac_clk = NULL; #ifndef OSI_STRIPPED_LIB unsigned int eee_enable = OSI_DISABLE; #endif /* !OSI_STRIPPED_LIB */ @@ -972,8 +1111,9 @@ static inline void set_speed_work_func(struct work_struct *work) /* Set MGBE MAC_DIV/TX clk rate */ pdata->speed = speed; phy_print_status(phydev); - ether_set_mgbe_mac_div_rate(pdata->mac_div_clk, - pdata->speed); + mac_clk = (pdata->osi_core->mac == OSI_MAC_HW_MGBE_T26X)? pdata->mac_clk: + pdata->mac_div_clk; + ether_set_mgbe_mac_div_rate(mac_clk, pdata->speed); #ifndef OSI_STRIPPED_LIB if (pdata->eee_enabled && pdata->tx_lpi_enabled) { @@ -1007,6 +1147,13 @@ static void ether_en_dis_monitor_clks(struct ether_priv_data *pdata, else pdata->rx_pcs_m_enabled = true; } + if ((!IS_ERR_OR_NULL(pdata->tx_m_clk) && !pdata->tx_m_clk)){ + if (clk_prepare_enable(pdata->tx_m_clk) < 0) + dev_err(pdata->dev, + "failed to enable tx_m_clk"); + else + pdata->tx_m_enabled = true; + } } else { /* Disable Monitoring clocks */ if (!IS_ERR_OR_NULL(pdata->rx_pcs_m_clk) && pdata->rx_pcs_m_enabled) { @@ -1018,6 +1165,11 @@ static void ether_en_dis_monitor_clks(struct ether_priv_data *pdata, clk_disable_unprepare(pdata->rx_m_clk); pdata->rx_m_enabled = false; } + + if (!IS_ERR_OR_NULL(pdata->tx_m_clk) && pdata->tx_m_enabled) { + clk_disable_unprepare(pdata->tx_m_clk); + pdata->tx_m_enabled = false; + } } } @@ -1036,10 +1188,14 @@ static void ether_adjust_link(struct net_device *dev) { struct ether_priv_data *pdata = netdev_priv(dev); nveu32_t iface_mode = pdata->osi_core->phy_iface_mode; + unsigned int uphy_gbe_mode = pdata->osi_core->uphy_gbe_mode; struct osi_dma_priv_data *osi_dma = pdata->osi_dma; struct phy_device *phydev = pdata->phydev; int new_state = 0, speed_changed = 0, speed; unsigned long val; + unsigned short mac = pdata->osi_core->mac; + unsigned short mac_ver = pdata->osi_core->mac_ver; + struct clk *mac_clk = NULL; #ifndef OSI_STRIPPED_LIB unsigned int eee_enable = OSI_DISABLE; #endif /* !OSI_STRIPPED_LIB */ @@ -1066,8 +1222,10 @@ static void ether_adjust_link(struct net_device *dev) #endif /* !OSI_STRIPPED_LIB */ if (pdata->fixed_link == OSI_ENABLE) { - if (pdata->osi_core->mac != OSI_MAC_HW_EQOS) { - if (iface_mode == OSI_XFI_MODE_10G) { + if (mac != OSI_MAC_HW_EQOS) { + if (iface_mode == OSI_XAUI_MODE_25G) { + phydev->speed = OSI_SPEED_25000; + } else if (iface_mode == OSI_XFI_MODE_10G) { phydev->speed = OSI_SPEED_10000; } else if (iface_mode == OSI_XFI_MODE_5G) { phydev->speed = OSI_SPEED_5000; @@ -1104,7 +1262,13 @@ static void ether_adjust_link(struct net_device *dev) * speed will be overwritten as per the * PHY interface mode */ speed = phydev->speed; - /* XFI mode = 10G: + /* + * XAUI mode = 25G: + * UPHY GBE mode = 25G + * MAC = 25G + * XLGPCS = 25G + * PHY line side = 25G + * XFI mode = 10G: * UPHY GBE mode = 10G * MAC = 10G * XPCS = 10G @@ -1124,19 +1288,38 @@ static void ether_adjust_link(struct net_device *dev) * MAC = 5G/2.5G ( same as PHY line speed) * XPCS = 5G * PHY line side = 5G/2.5G + * SGMII mode = 2.5G: + * UPHY GBE mode = 2.5G + * MAC = 2.5G + * PCS = 2.5G + * PHY line side = 2.5G + * SGMII mode = 1G: + * UPHY GBE mode = 1G + * MAC = 1G ( same as PHY line speed) + * PCS = 1G + * PHY line side = 1G/100M/10M */ - if (pdata->osi_core->mac != OSI_MAC_HW_EQOS) { - /* MAC and XFI speed should match in XFI mode */ - if (iface_mode == OSI_XFI_MODE_10G) { + if ((mac != OSI_MAC_HW_EQOS) || + (mac_ver == MAC_CORE_VER_TYPE_EQOS_5_40)) { + /* MAC and XFI/XAUI speed should match in + * XFI/XAUI mode + */ + if (iface_mode == OSI_XAUI_MODE_25G) { + speed = OSI_SPEED_25000; + } else if (iface_mode == OSI_XFI_MODE_10G) { speed = OSI_SPEED_10000; } else if (iface_mode == OSI_XFI_MODE_5G) { speed = OSI_SPEED_5000; + } else if (uphy_gbe_mode == OSI_GBE_MODE_2_5G) { + speed = OSI_SPEED_2500; } } + ioctl_data.arg6_32 = speed; ret = osi_handle_ioctl(pdata->osi_core, &ioctl_data); if (ret < 0) { - if (pdata->osi_core->mac != OSI_MAC_HW_EQOS) { + if ((mac != OSI_MAC_HW_EQOS) || + (mac_ver == MAC_CORE_VER_TYPE_EQOS_5_40)) { netdev_dbg(dev, "Retry set speed\n"); netif_carrier_off(dev); schedule_delayed_work(&pdata->set_speed_work, @@ -1190,9 +1373,13 @@ static void ether_adjust_link(struct net_device *dev) } if (speed_changed) { - if (pdata->osi_core->mac != OSI_MAC_HW_EQOS) { - ether_set_mgbe_mac_div_rate(pdata->mac_div_clk, - pdata->speed); + if((mac == OSI_MAC_HW_MGBE_T26X) || (mac == OSI_MAC_HW_MGBE)) { + mac_clk = (mac == OSI_MAC_HW_MGBE_T26X)? pdata->mac_clk: + pdata->mac_div_clk; + ether_set_mgbe_mac_div_rate(mac_clk, pdata->speed); + } else if(mac_ver == MAC_CORE_VER_TYPE_EQOS_5_40) { + ether_set_eqos_tx_clk(pdata->mac_clk, + phydev->speed); } else { if (pdata->osi_core->mac_ver == OSI_EQOS_MAC_5_30) { ether_set_eqos_tx_clk(pdata->tx_div_clk, @@ -5132,6 +5319,10 @@ static void ether_put_mgbe_clks(struct ether_priv_data *pdata) { struct device *dev = pdata->dev; + if (!IS_ERR_OR_NULL(pdata->rx_input_clk)) { + devm_clk_put(dev, pdata->rx_input_clk); + } + if (!IS_ERR_OR_NULL(pdata->ptp_ref_clk)) { devm_clk_put(dev, pdata->ptp_ref_clk); } @@ -5140,18 +5331,10 @@ static void ether_put_mgbe_clks(struct ether_priv_data *pdata) devm_clk_put(dev, pdata->app_clk); } - if (!IS_ERR_OR_NULL(pdata->eee_pcs_clk)) { - devm_clk_put(dev, pdata->eee_pcs_clk); - } - if (!IS_ERR_OR_NULL(pdata->mac_clk)) { devm_clk_put(dev, pdata->mac_clk); } - if (!IS_ERR_OR_NULL(pdata->mac_div_clk)) { - devm_clk_put(dev, pdata->mac_div_clk); - } - if (!IS_ERR_OR_NULL(pdata->tx_pcs_clk)) { devm_clk_put(dev, pdata->tx_pcs_clk); } @@ -5160,10 +5343,6 @@ static void ether_put_mgbe_clks(struct ether_priv_data *pdata) devm_clk_put(dev, pdata->tx_clk); } - if (!IS_ERR_OR_NULL(pdata->rx_pcs_clk)) { - devm_clk_put(dev, pdata->rx_pcs_clk); - } - if (!IS_ERR_OR_NULL(pdata->rx_pcs_input_clk)) { devm_clk_put(dev, pdata->rx_pcs_input_clk); } @@ -5175,6 +5354,22 @@ static void ether_put_mgbe_clks(struct ether_priv_data *pdata) if (!IS_ERR_OR_NULL(pdata->rx_m_clk)) { devm_clk_put(dev, pdata->rx_m_clk); } + + if (pdata->osi_core->mac == OSI_MAC_HW_MGBE_T26X) { + if (!IS_ERR_OR_NULL(pdata->tx_m_clk)) { + devm_clk_put(dev, pdata->tx_m_clk); + } + } else { + if (!IS_ERR_OR_NULL(pdata->eee_pcs_clk)) { + devm_clk_put(dev, pdata->eee_pcs_clk); + } + if (!IS_ERR_OR_NULL(pdata->mac_div_clk)) { + devm_clk_put(dev, pdata->mac_div_clk); + } + if (!IS_ERR_OR_NULL(pdata->rx_pcs_clk)) { + devm_clk_put(dev, pdata->rx_pcs_clk); + } + } } /** @@ -5191,37 +5386,51 @@ static void ether_put_eqos_clks(struct ether_priv_data *pdata) if (!IS_ERR_OR_NULL(pdata->tx_clk)) { devm_clk_put(dev, pdata->tx_clk); } - - if (!IS_ERR_OR_NULL(pdata->tx_div_clk)) { - devm_clk_put(dev, pdata->tx_div_clk); + if (!IS_ERR_OR_NULL(pdata->rx_clk)) { + devm_clk_put(dev, pdata->rx_clk); } - if (!IS_ERR_OR_NULL(pdata->rx_m_clk)) { devm_clk_put(dev, pdata->rx_m_clk); } - if (!IS_ERR_OR_NULL(pdata->rx_input_clk)) { - devm_clk_put(dev, pdata->rx_input_clk); - } - - if (!IS_ERR_OR_NULL(pdata->ptp_ref_clk)) { - devm_clk_put(dev, pdata->ptp_ref_clk); - } - - if (!IS_ERR_OR_NULL(pdata->rx_clk)) { - devm_clk_put(dev, pdata->rx_clk); - } - - if (!IS_ERR_OR_NULL(pdata->axi_clk)) { - devm_clk_put(dev, pdata->axi_clk); - } - - if (!IS_ERR_OR_NULL(pdata->axi_cbb_clk)) { - devm_clk_put(dev, pdata->axi_cbb_clk); - } - - if (!IS_ERR_OR_NULL(pdata->pllrefe_clk)) { - devm_clk_put(dev, pdata->pllrefe_clk); + if (pdata->osi_core->mac_ver == MAC_CORE_VER_TYPE_EQOS_5_40) { + if (!IS_ERR_OR_NULL(pdata->rx_pcs_m_clk)) { + devm_clk_put(dev, pdata->rx_pcs_m_clk); + } + if (!IS_ERR_OR_NULL(pdata->tx_m_clk)) { + devm_clk_put(dev, pdata->tx_m_clk); + } + if (!IS_ERR_OR_NULL(pdata->rx_pcs_input_clk)) { + devm_clk_put(dev, pdata->rx_pcs_input_clk); + } + if (!IS_ERR_OR_NULL(pdata->app_clk)) { + devm_clk_put(dev, pdata->app_clk); + } + if (!IS_ERR_OR_NULL(pdata->tx_pcs_clk)) { + devm_clk_put(dev, pdata->tx_pcs_clk); + } + if (!IS_ERR_OR_NULL(pdata->mac_clk)) { + devm_clk_put(dev, pdata->mac_clk); + } + }else { + if (!IS_ERR_OR_NULL(pdata->tx_div_clk)) { + devm_clk_put(dev, pdata->tx_div_clk); + } + if (!IS_ERR_OR_NULL(pdata->rx_input_clk)) { + devm_clk_put(dev, pdata->rx_input_clk); + } + if (!IS_ERR_OR_NULL(pdata->ptp_ref_clk)) { + devm_clk_put(dev, pdata->ptp_ref_clk); + } + if (!IS_ERR_OR_NULL(pdata->axi_clk)) { + devm_clk_put(dev, pdata->axi_clk); + } + if (!IS_ERR_OR_NULL(pdata->axi_cbb_clk)) { + devm_clk_put(dev, pdata->axi_cbb_clk); + } + if (!IS_ERR_OR_NULL(pdata->pllrefe_clk)) { + devm_clk_put(dev, pdata->pllrefe_clk); + } } } @@ -5258,7 +5467,10 @@ static int ether_set_mgbe_rx_fmon_rates(struct ether_priv_data *pdata) unsigned long rx_rate, rx_pcs_rate; int ret; - if (uphy_gbe_mode == OSI_ENABLE) { + if (uphy_gbe_mode == OSI_XAUI_MODE_25G) { + rx_rate = ETHER_MGBE_TXRX_CLK_XAUI_25G; + rx_pcs_rate = ETHER_MGBE_TXRX_PCS_CLK_XAUI_25G; + } else if (uphy_gbe_mode == OSI_GBE_MODE_10G) { rx_rate = ETHER_MGBE_RX_CLK_USXGMII_10G; rx_pcs_rate = ETHER_MGBE_RX_PCS_CLK_USXGMII_10G; } else { @@ -5317,13 +5529,6 @@ static int ether_get_mgbe_clks(struct ether_priv_data *pdata) goto err_rx_pcs_input; } - pdata->rx_pcs_clk = devm_clk_get(dev, "rx-pcs"); - if (IS_ERR(pdata->rx_pcs_clk)) { - ret = PTR_ERR(pdata->rx_pcs_clk); - dev_err(dev, "failed to get rx-pcs clk\n"); - goto err_rx_pcs; - } - pdata->tx_clk = devm_clk_get(dev, "tx"); if (IS_ERR(pdata->tx_clk)) { ret = PTR_ERR(pdata->tx_clk); @@ -5338,13 +5543,6 @@ static int ether_get_mgbe_clks(struct ether_priv_data *pdata) goto err_tx_pcs; } - pdata->mac_div_clk = devm_clk_get(dev, "mac-divider"); - if (IS_ERR(pdata->mac_div_clk)) { - ret = PTR_ERR(pdata->mac_div_clk); - dev_err(dev, "failed to get mac-divider clk\n"); - goto err_mac_div; - } - pdata->mac_clk = devm_clk_get(dev, "mac"); if (IS_ERR(pdata->mac_clk)) { ret = PTR_ERR(pdata->mac_clk); @@ -5352,13 +5550,6 @@ static int ether_get_mgbe_clks(struct ether_priv_data *pdata) goto err_mac; } - pdata->eee_pcs_clk = devm_clk_get(dev, "eee-pcs"); - if (IS_ERR(pdata->eee_pcs_clk)) { - ret = PTR_ERR(pdata->eee_pcs_clk); - dev_err(dev, "failed to get eee-pcs clk\n"); - goto err_eee_pcs; - } - pdata->app_clk = devm_clk_get(dev, "mgbe"); if (IS_ERR(pdata->app_clk)) { ret = PTR_ERR(pdata->app_clk); @@ -5380,29 +5571,58 @@ static int ether_get_mgbe_clks(struct ether_priv_data *pdata) goto err_rx_input; } + if (pdata->osi_core->mac == OSI_MAC_HW_MGBE_T26X) { + pdata->tx_m_clk = devm_clk_get(dev, "tx-m"); + if (IS_ERR(pdata->tx_m_clk)) { + ret = PTR_ERR(pdata->tx_m_clk); + dev_err(dev, "failed to get rx-input-m\n"); + goto err_rx_pcs; + } + } else { + pdata->rx_pcs_clk = devm_clk_get(dev, "rx-pcs"); + if (IS_ERR(pdata->rx_pcs_clk)) { + ret = PTR_ERR(pdata->rx_pcs_clk); + dev_err(dev, "failed to get rx-pcs clk\n"); + goto err_rx_pcs; + } + + pdata->mac_div_clk = devm_clk_get(dev, "mac-divider"); + if (IS_ERR(pdata->mac_div_clk)) { + ret = PTR_ERR(pdata->mac_div_clk); + dev_err(dev, "failed to get mac-divider clk\n"); + goto err_mac_div; + } + pdata->eee_pcs_clk = devm_clk_get(dev, "eee-pcs"); + if (IS_ERR(pdata->eee_pcs_clk)) { + ret = PTR_ERR(pdata->eee_pcs_clk); + dev_err(dev, "failed to get eee-pcs clk\n"); + goto err_eee_pcs; + } + } + ret = ether_set_mgbe_rx_fmon_rates(pdata); if (ret < 0) goto err_rx_input; return 0; +err_eee_pcs: + devm_clk_put(dev, pdata->mac_div_clk); +err_mac_div: + devm_clk_put(dev, pdata->rx_pcs_clk); +err_rx_pcs: + devm_clk_put(dev, pdata->rx_input_clk); err_rx_input: devm_clk_put(dev, pdata->ptp_ref_clk); err_ptp_ref: devm_clk_put(dev, pdata->app_clk); err_app: - devm_clk_put(dev, pdata->eee_pcs_clk); -err_eee_pcs: devm_clk_put(dev, pdata->mac_clk); err_mac: - devm_clk_put(dev, pdata->mac_div_clk); -err_mac_div: devm_clk_put(dev, pdata->tx_pcs_clk); err_tx_pcs: devm_clk_put(dev, pdata->tx_clk); err_tx: - devm_clk_put(dev, pdata->rx_pcs_clk); -err_rx_pcs: devm_clk_put(dev, pdata->rx_pcs_input_clk); err_rx_pcs_input: devm_clk_put(dev, pdata->rx_pcs_m_clk); @@ -5427,24 +5647,80 @@ static int ether_get_eqos_clks(struct ether_priv_data *pdata) struct device *dev = pdata->dev; int ret; - /* Skip pll_refe clock initialisation for t18x platform */ - pdata->pllrefe_clk = devm_clk_get(dev, "pllrefe_vcoout"); - if (IS_ERR(pdata->pllrefe_clk)) { - dev_info(dev, "failed to get pllrefe_vcoout clk\n"); - } + if (pdata->osi_core->mac_ver == MAC_CORE_VER_TYPE_EQOS_5_40) { + pdata->mac_clk = devm_clk_get(dev, "eqos_mac"); + if (IS_ERR(pdata->mac_clk)) { + ret = PTR_ERR(pdata->mac_clk); + dev_err(dev, "failed to get eqos_mac clk\n"); + goto err_eqos_mac; + } - pdata->axi_cbb_clk = devm_clk_get(dev, "axi_cbb"); - if (IS_ERR(pdata->axi_cbb_clk)) { - ret = PTR_ERR(pdata->axi_cbb_clk); - dev_err(dev, "failed to get axi_cbb clk\n"); - goto err_axi_cbb; - } + pdata->tx_pcs_clk = devm_clk_get(dev, "eqos_tx_pcs"); + if (IS_ERR(pdata->tx_pcs_clk)) { + ret = PTR_ERR(pdata->tx_pcs_clk); + dev_err(dev, "failed to get tx_pcs_clk clk\n"); + goto err_tx_pcs; + } + pdata->app_clk = devm_clk_get(dev, "eqos"); + if (IS_ERR(pdata->app_clk)) { + ret = PTR_ERR(pdata->app_clk); + dev_err(dev, "failed to get app_clk clk\n"); + goto err_app; + } - pdata->axi_clk = devm_clk_get(dev, "eqos_axi"); - if (IS_ERR(pdata->axi_clk)) { - ret = PTR_ERR(pdata->axi_clk); - dev_err(dev, "failed to get eqos_axi clk\n"); - goto err_axi; + pdata->rx_pcs_input_clk = devm_clk_get(dev, "eqos_rx_pcs_input"); + if (IS_ERR(pdata->rx_pcs_input_clk)) { + ret = PTR_ERR(pdata->rx_pcs_input_clk); + dev_err(dev, "failed to get eqos_rx_pcs_input clk\n"); + goto err_rx_pcs_input; + } + + pdata->tx_m_clk = devm_clk_get(dev, "eqos_tx_m"); + if (IS_ERR(pdata->tx_m_clk)) { + ret = PTR_ERR(pdata->tx_m_clk); + dev_err(dev, "failed to get tx_m_clk clk\n"); + goto err_tx_m; + } + + pdata->rx_pcs_m_clk = devm_clk_get(dev, "eqos_rx_pcs_m"); + if (IS_ERR(pdata->rx_pcs_m_clk)) { + ret = PTR_ERR(pdata->rx_pcs_m_clk); + dev_err(dev, "failed to get eqos_rx_pcs_m clk\n"); + goto err_rx_pcs_m; + } + }else { + /* Skip pll_refe clock initialisation for t18x platform */ + pdata->pllrefe_clk = devm_clk_get(dev, "pllrefe_vcoout"); + if (IS_ERR(pdata->pllrefe_clk)) { + dev_info(dev, "failed to get pllrefe_vcoout clk\n"); + } + + pdata->axi_cbb_clk = devm_clk_get(dev, "axi_cbb"); + if (IS_ERR(pdata->axi_cbb_clk)) { + ret = PTR_ERR(pdata->axi_cbb_clk); + dev_err(dev, "failed to get axi_cbb clk\n"); + goto err_axi_cbb; + } + + pdata->axi_clk = devm_clk_get(dev, "eqos_axi"); + if (IS_ERR(pdata->axi_clk)) { + ret = PTR_ERR(pdata->axi_clk); + dev_err(dev, "failed to get eqos_axi clk\n"); + goto err_axi; + } + + pdata->ptp_ref_clk = devm_clk_get(dev, "eqos_ptp_ref"); + if (IS_ERR(pdata->ptp_ref_clk)) { + ret = PTR_ERR(pdata->ptp_ref_clk); + dev_err(dev, "failed to get eqos_ptp_ref clk\n"); + goto err_ptp_ref; + } + + pdata->tx_div_clk = devm_clk_get(dev, "eqos_tx_divider"); + if (IS_ERR(pdata->tx_div_clk)) { + ret = PTR_ERR(pdata->tx_div_clk); + dev_info(dev, "failed to get eqos_tx_divider clk\n"); + } } pdata->rx_clk = devm_clk_get(dev, "eqos_rx"); @@ -5454,13 +5730,6 @@ static int ether_get_eqos_clks(struct ether_priv_data *pdata) goto err_rx; } - pdata->ptp_ref_clk = devm_clk_get(dev, "eqos_ptp_ref"); - if (IS_ERR(pdata->ptp_ref_clk)) { - ret = PTR_ERR(pdata->ptp_ref_clk); - dev_err(dev, "failed to get eqos_ptp_ref clk\n"); - goto err_ptp_ref; - } - pdata->tx_clk = devm_clk_get(dev, "eqos_tx"); if (IS_ERR(pdata->tx_clk)) { ret = PTR_ERR(pdata->tx_clk); @@ -5480,12 +5749,6 @@ static int ether_get_eqos_clks(struct ether_priv_data *pdata) dev_info(dev, "failed to get eqos_rx_input clk\n"); } - pdata->tx_div_clk = devm_clk_get(dev, "eqos_tx_divider"); - if (IS_ERR(pdata->tx_div_clk)) { - ret = PTR_ERR(pdata->tx_div_clk); - dev_info(dev, "failed to get eqos_tx_divider clk\n"); - } - /* Set default rate to 1G */ if (!IS_ERR_OR_NULL(pdata->rx_input_clk)) { clk_set_rate(pdata->rx_input_clk, @@ -5493,20 +5756,47 @@ static int ether_get_eqos_clks(struct ether_priv_data *pdata) } return 0; - -err_tx: - devm_clk_put(dev, pdata->ptp_ref_clk); -err_ptp_ref: - devm_clk_put(dev, pdata->rx_clk); err_rx: - devm_clk_put(dev, pdata->axi_clk); + if (!IS_ERR_OR_NULL(pdata->tx_div_clk)) { + devm_clk_put(dev, pdata->tx_div_clk); + } +err_tx: + if (!IS_ERR_OR_NULL(pdata->rx_clk)) { + devm_clk_put(dev, pdata->rx_clk); + } +err_ptp_ref: + if (!IS_ERR_OR_NULL(pdata->axi_clk)) { + devm_clk_put(dev, pdata->axi_clk); + } err_axi: - devm_clk_put(dev, pdata->axi_cbb_clk); + if (!IS_ERR_OR_NULL(pdata->axi_cbb_clk)) { + devm_clk_put(dev, pdata->axi_cbb_clk); + } err_axi_cbb: if (!IS_ERR_OR_NULL(pdata->pllrefe_clk)) { devm_clk_put(dev, pdata->pllrefe_clk); } - +err_rx_pcs_m: + if (!IS_ERR_OR_NULL(pdata->tx_m_clk)) { + devm_clk_put(dev, pdata->tx_m_clk); + } +err_tx_m: + if (!IS_ERR_OR_NULL(pdata->rx_pcs_input_clk)) { + devm_clk_put(dev, pdata->rx_pcs_input_clk); + } +err_rx_pcs_input: + if (!IS_ERR_OR_NULL(pdata->app_clk)) { + devm_clk_put(dev, pdata->app_clk); + } +err_app: + if (!IS_ERR_OR_NULL(pdata->tx_pcs_clk)) { + devm_clk_put(dev, pdata->tx_pcs_clk); + } +err_tx_pcs: + if (!IS_ERR_OR_NULL(pdata->mac_clk)) { + devm_clk_put(dev, pdata->mac_clk); + } +err_eqos_mac: return ret; } @@ -5525,7 +5815,6 @@ static int ether_get_clks(struct ether_priv_data *pdata) if (pdata->osi_core->mac != OSI_MAC_HW_EQOS) { return ether_get_mgbe_clks(pdata); } - return ether_get_eqos_clks(pdata); } @@ -6375,23 +6664,41 @@ static int ether_parse_dt(struct ether_priv_data *pdata) return -EINVAL; } - if (osi_core->mac != OSI_MAC_HW_EQOS) { - ret = of_property_read_u32(np, "nvidia,uphy-gbe-mode", - &osi_core->uphy_gbe_mode); - if (ret < 0) { + ret = of_property_read_u32(np, "nvidia,uphy-gbe-mode", + &osi_core->uphy_gbe_mode); + if (ret < 0) { + if (osi_core->mac != OSI_MAC_HW_EQOS) { dev_info(dev, "failed to read UPHY GBE mode" "- default to 10G\n"); - osi_core->uphy_gbe_mode = OSI_ENABLE; + osi_core->uphy_gbe_mode = OSI_GBE_MODE_10G; + } else { + dev_info(dev, + "failed to read UPHY GBE mode" + "- default to 1G\n"); + osi_core->uphy_gbe_mode = OSI_GBE_MODE_1G; } + } - if ((osi_core->uphy_gbe_mode != OSI_ENABLE) && - (osi_core->uphy_gbe_mode != OSI_DISABLE)) { + if (osi_core->mac != OSI_MAC_HW_EQOS) { + if ((osi_core->uphy_gbe_mode != OSI_GBE_MODE_5G) && + (osi_core->uphy_gbe_mode != OSI_GBE_MODE_10G) && + (osi_core->uphy_gbe_mode != OSI_UPHY_GBE_MODE_25G)) { dev_err(dev, "Invalid UPHY GBE mode" "- default to 10G\n"); - osi_core->uphy_gbe_mode = OSI_ENABLE; + osi_core->uphy_gbe_mode = OSI_GBE_MODE_10G; } + } else { + if ((osi_core->uphy_gbe_mode != OSI_GBE_MODE_2_5G) && + (osi_core->uphy_gbe_mode != OSI_GBE_MODE_1G)) { + dev_err(dev, "Invalid UPHY GBE mode" + "- default to 1G\n"); + osi_core->uphy_gbe_mode = OSI_GBE_MODE_1G; + } + } + + if (osi_core->mac != OSI_MAC_HW_EQOS) { ret = of_property_read_u32(np, "nvidia,phy-iface-mode", &osi_core->phy_iface_mode); if (ret < 0) { @@ -6403,26 +6710,38 @@ static int ether_parse_dt(struct ether_priv_data *pdata) if ((osi_core->phy_iface_mode != OSI_XFI_MODE_10G) && (osi_core->phy_iface_mode != OSI_XFI_MODE_5G) && (osi_core->phy_iface_mode != OSI_USXGMII_MODE_10G) && - (osi_core->phy_iface_mode != OSI_USXGMII_MODE_5G)) { + (osi_core->phy_iface_mode != OSI_USXGMII_MODE_5G) && + (osi_core->phy_iface_mode != OSI_XAUI_MODE_25G)) { dev_err(dev, "Invalid PHY iface mode" "- default to 10G\n"); osi_core->phy_iface_mode = OSI_XFI_MODE_10G; } - /* GBE and XFI/USXGMII must be in same mode */ - if ((osi_core->uphy_gbe_mode == OSI_ENABLE) && + /* GBE and XAUI must be in same mode */ + if ((osi_core->uphy_gbe_mode == OSI_UPHY_GBE_MODE_25G) && ((osi_core->phy_iface_mode == OSI_XFI_MODE_5G) || - (osi_core->phy_iface_mode == OSI_USXGMII_MODE_5G))) { - dev_err(dev, "Invalid combination of UPHY 10GBE mode" - "and XFI/USXGMII 5G mode\n"); + (osi_core->phy_iface_mode == OSI_USXGMII_MODE_5G) || + (osi_core->phy_iface_mode == OSI_XFI_MODE_10G) || + (osi_core->phy_iface_mode == OSI_USXGMII_MODE_10G))) { + dev_err(dev, "Invalid combination of UPHY 25GBE mode" + "and XFI/USXGMII/XAUI mode\n"); return -EINVAL; } - if ((osi_core->uphy_gbe_mode == OSI_DISABLE) && + if ((osi_core->uphy_gbe_mode == OSI_GBE_MODE_10G) && + ((osi_core->phy_iface_mode == OSI_XFI_MODE_5G) || + (osi_core->phy_iface_mode == OSI_USXGMII_MODE_5G) || + (osi_core->phy_iface_mode == OSI_XAUI_MODE_25G))) { + dev_err(dev, "Invalid combination of UPHY 10GBE mode" + "and XFI/USXGMII/AUXA mode\n"); + return -EINVAL; + } + if ((osi_core->uphy_gbe_mode == OSI_GBE_MODE_5G) && ((osi_core->phy_iface_mode == OSI_XFI_MODE_10G) || - (osi_core->phy_iface_mode == OSI_USXGMII_MODE_10G))) { + (osi_core->phy_iface_mode == OSI_USXGMII_MODE_10G) || + (osi_core->phy_iface_mode == OSI_XAUI_MODE_25G))) { dev_err(dev, "Invalid combination of UPHY 5GBE mode" - "and XFI/USXGMII 10G mode\n"); + "and XFI/USXGMII/XAUI mode\n"); return -EINVAL; } } @@ -6610,6 +6929,12 @@ static void ether_get_num_dma_chan_mtl_q(struct platform_device *pdev, max_chans = OSI_MGBE_MAX_NUM_CHANS; } + if (of_device_is_compatible(np, "nvidia,tegra264-eqos")) { + *mac = OSI_MAC_HW_EQOS; + *macsec = OSI_MACSEC_T26X; + max_chans = OSI_EQOS_MAX_NUM_CHANS; + } + /* parse the number of DMA channels */ ret = of_property_read_u32(np, "nvidia,num-dma-chans", num_dma_chans); if (ret != 0) { diff --git a/drivers/net/ethernet/nvidia/nvethernet/ether_linux.h b/drivers/net/ethernet/nvidia/nvethernet/ether_linux.h index 32ceaf9b..f4411048 100644 --- a/drivers/net/ethernet/nvidia/nvethernet/ether_linux.h +++ b/drivers/net/ethernet/nvidia/nvethernet/ether_linux.h @@ -128,22 +128,28 @@ * @brief Ethernet clk rates */ #define ETHER_RX_INPUT_CLK_RATE 125000000UL +#define ETHER_MGBE_MAC_DIV_RATE_25G 781250000UL #define ETHER_MGBE_MAC_DIV_RATE_10G 312500000UL #define ETHER_MGBE_MAC_DIV_RATE_5G 156250000UL #define ETHER_MGBE_MAC_DIV_RATE_2_5G 78125000UL // gbe_pll2_txclkref (644 MHz) --> programmable link TX_CLK divider // --> link_Tx_clk --> fixed 1/2 gear box divider --> lane TX clk. +#define ETHER_MGBE_TXRX_CLK_XAUI_25G 805664000UL #define ETHER_MGBE_TX_CLK_USXGMII_10G 644531250UL #define ETHER_MGBE_TX_CLK_USXGMII_5G 322265625UL #define ETHER_MGBE_RX_CLK_USXGMII_10G 644531250UL #define ETHER_MGBE_RX_CLK_USXGMII_5G 322265625UL +#define ETHER_MGBE_TXRX_PCS_CLK_XAUI_25G 390625000UL #define ETHER_MGBE_TX_PCS_CLK_USXGMII_10G 156250000UL #define ETHER_MGBE_TX_PCS_CLK_USXGMII_5G 78125000UL #define ETHER_MGBE_RX_PCS_CLK_USXGMII_10G 156250000UL #define ETHER_MGBE_RX_PCS_CLK_USXGMII_5G 78125000UL +#define ETHER_EQOS_TX_CLK_2_5G 312500000UL #define ETHER_EQOS_TX_CLK_1000M 125000000UL #define ETHER_EQOS_TX_CLK_100M 25000000UL #define ETHER_EQOS_TX_CLK_10M 2500000UL +#define ETHER_EQOS_UPHY_LX_TX_2_5G_CLK 195312500UL +#define ETHER_EQOS_UPHY_LX_TX_1G_CLK 78125000UL /** * @brief 1 Second in Neno Second @@ -472,6 +478,8 @@ struct ether_priv_data { struct clk *tx_div_clk; /** Receive Monitoring clock */ struct clk *rx_m_clk; + /** Transmit Monitoring clock */ + struct clk *tx_m_clk; /** RX PCS monitoring clock */ struct clk *rx_pcs_m_clk; /** RX PCS input clock */ @@ -651,6 +659,8 @@ struct ether_priv_data { unsigned int fixed_link; /** Flag to represent rx_m clk enabled or not */ bool rx_m_enabled; + /** Flag to represent tx_m clk enabled or not */ + bool tx_m_enabled; /** Flag to represent rx_pcs_m clk enabled or not */ bool rx_pcs_m_enabled; /* Timer value in msec for ether_stats_work thread */ diff --git a/drivers/net/ethernet/nvidia/nvethernet/macsec.c b/drivers/net/ethernet/nvidia/nvethernet/macsec.c index 58ffb29f..1b25fc14 100644 --- a/drivers/net/ethernet/nvidia/nvethernet/macsec.c +++ b/drivers/net/ethernet/nvidia/nvethernet/macsec.c @@ -100,9 +100,9 @@ static int macsec_disable_car(struct macsec_priv_data *macsec_pdata) struct ether_priv_data *pdata = macsec_pdata->ether_pdata; PRINT_ENTRY(); - if (pdata->osi_core->mac != OSI_MAC_HW_EQOS) { - if (!IS_ERR_OR_NULL(macsec_pdata->mgbe_clk)) { - clk_disable_unprepare(macsec_pdata->mgbe_clk); + if (pdata->osi_core->mac != OSI_MAC_HW_EQOS) { + if (!IS_ERR_OR_NULL(macsec_pdata->macsec_clk)) { + clk_disable_unprepare(macsec_pdata->macsec_clk); } } else { if (!IS_ERR_OR_NULL(macsec_pdata->eqos_tx_clk)) { @@ -130,8 +130,8 @@ static int macsec_enable_car(struct macsec_priv_data *macsec_pdata) PRINT_ENTRY(); if (pdata->osi_core->mac != OSI_MAC_HW_EQOS) { - if (!IS_ERR_OR_NULL(macsec_pdata->mgbe_clk)) { - ret = clk_prepare_enable(macsec_pdata->mgbe_clk); + if (!IS_ERR_OR_NULL(macsec_pdata->macsec_clk)) { + ret = clk_prepare_enable(macsec_pdata->macsec_clk); if (ret < 0) { dev_err(dev, "failed to enable macsec clk\n"); goto exit; @@ -167,8 +167,8 @@ static int macsec_enable_car(struct macsec_priv_data *macsec_pdata) err_ns_rst: if (pdata->osi_core->mac != OSI_MAC_HW_EQOS) { - if (!IS_ERR_OR_NULL(macsec_pdata->mgbe_clk)) { - clk_disable_unprepare(macsec_pdata->mgbe_clk); + if (!IS_ERR_OR_NULL(macsec_pdata->macsec_clk)) { + clk_disable_unprepare(macsec_pdata->macsec_clk); } } else { if (!IS_ERR_OR_NULL(macsec_pdata->eqos_rx_clk)) { @@ -326,10 +326,14 @@ static int macsec_get_platform_res(struct macsec_priv_data *macsec_pdata) /* Get clks */ if (pdata->osi_core->mac != OSI_MAC_HW_EQOS) { - macsec_pdata->mgbe_clk = devm_clk_get(dev, "mgbe_macsec"); - if (IS_ERR(macsec_pdata->mgbe_clk)) { + if (pdata->osi_core->mac_ver == OSI_MGBE_MAC_3_10) { + macsec_pdata->macsec_clk = devm_clk_get(dev, "mgbe_macsec"); + } else { + macsec_pdata->macsec_clk = devm_clk_get(dev, "macsec"); + } + if (IS_ERR(macsec_pdata->macsec_clk)) { dev_err(dev, "failed to get macsec clk\n"); - ret = PTR_ERR(macsec_pdata->mgbe_clk); + ret = PTR_ERR(macsec_pdata->macsec_clk); goto exit; } } else { @@ -360,8 +364,8 @@ static void macsec_release_platform_res(struct macsec_priv_data *macsec_pdata) PRINT_ENTRY(); if (pdata->osi_core->mac != OSI_MAC_HW_EQOS) { - if (!IS_ERR_OR_NULL(macsec_pdata->mgbe_clk)) { - devm_clk_put(dev, macsec_pdata->mgbe_clk); + if (!IS_ERR_OR_NULL(macsec_pdata->macsec_clk)) { + devm_clk_put(dev, macsec_pdata->macsec_clk); } } else { if (!IS_ERR_OR_NULL(macsec_pdata->eqos_tx_clk)) { diff --git a/drivers/net/ethernet/nvidia/nvethernet/macsec.h b/drivers/net/ethernet/nvidia/nvethernet/macsec.h index 0e207aba..31efb381 100644 --- a/drivers/net/ethernet/nvidia/nvethernet/macsec.h +++ b/drivers/net/ethernet/nvidia/nvethernet/macsec.h @@ -218,11 +218,11 @@ struct nvpkcs_data { struct macsec_priv_data { /** Non secure reset */ struct reset_control *ns_rst; - /** MGBE Macsec clock */ - struct clk *mgbe_clk; - /** EQOS Macsec TX clock */ + /** MGBE/EQOS Macsec clock */ + struct clk *macsec_clk; + /** T23x EQOS Macsec TX clock */ struct clk *eqos_tx_clk; - /** EQOS Macsec RX clock */ + /** T23x EQOS Macsec RX clock */ struct clk *eqos_rx_clk; /** Secure irq */ int s_irq; diff --git a/drivers/net/ethernet/nvidia/nvethernet/sysfs.c b/drivers/net/ethernet/nvidia/nvethernet/sysfs.c index aa92f1da..401f9ec1 100644 --- a/drivers/net/ethernet/nvidia/nvethernet/sysfs.c +++ b/drivers/net/ethernet/nvidia/nvethernet/sysfs.c @@ -2636,6 +2636,8 @@ static ssize_t ether_phy_iface_mode_show(struct device *dev, struct osi_core_priv_data *osi_core = pdata->osi_core; switch (osi_core->phy_iface_mode) { + case OSI_XAUI_MODE_25G: + return scnprintf(buf, PAGE_SIZE, "XAUI-25G\n"); case OSI_XFI_MODE_10G: return scnprintf(buf, PAGE_SIZE, "XFI-10G\n"); case OSI_XFI_MODE_5G: @@ -2675,7 +2677,9 @@ static ssize_t ether_phy_iface_mode_store(struct device *dev, return size; } - if (strncmp(buf, "XFI-10G", 7) == 0U) { + if (strncmp(buf, "XAUI-25G", 7) == 0U) { + osi_core->phy_iface_mode = OSI_XAUI_MODE_25G; + } else if (strncmp(buf, "XFI-10G", 7) == 0U) { osi_core->phy_iface_mode = OSI_XFI_MODE_10G; } else if (strncmp(buf, "XFI-5G", 6) == 0U) { osi_core->phy_iface_mode = OSI_XFI_MODE_5G; @@ -2718,9 +2722,19 @@ static ssize_t ether_uphy_gbe_mode_show(struct device *dev, struct ether_priv_data *pdata = netdev_priv(ndev); struct osi_core_priv_data *osi_core = pdata->osi_core; - return scnprintf(buf, PAGE_SIZE, "%s\n", - (osi_core->uphy_gbe_mode == OSI_ENABLE) ? - "10G" : "5G"); + switch (osi_core->uphy_gbe_mode) { + case OSI_UPHY_GBE_MODE_25G: + return scnprintf(buf, PAGE_SIZE, "25G\n"); + case OSI_GBE_MODE_10G: + return scnprintf(buf, PAGE_SIZE, "10G\n"); + case OSI_GBE_MODE_5G: + return scnprintf(buf, PAGE_SIZE, "5G\n"); + case OSI_GBE_MODE_2_5G: + return scnprintf(buf, PAGE_SIZE, "2_5G\n"); + default: + return scnprintf(buf, PAGE_SIZE, "1G\n"); + } + } /** @@ -2749,10 +2763,16 @@ static ssize_t ether_uphy_gbe_mode_store(struct device *dev, return size; } - if (strncmp(buf, "10G", 3) == 0U) { - osi_core->uphy_gbe_mode = OSI_ENABLE; + if (strncmp(buf, "25G", 3) == 0U) { + osi_core->uphy_gbe_mode = OSI_UPHY_GBE_MODE_25G; + } else if (strncmp(buf, "10G", 3) == 0U) { + osi_core->uphy_gbe_mode = OSI_GBE_MODE_10G; } else if (strncmp(buf, "5G", 2) == 0U) { - osi_core->uphy_gbe_mode = OSI_DISABLE; + osi_core->uphy_gbe_mode = OSI_GBE_MODE_5G; + } else if (strncmp(buf, "2_5G", 4) == 0U) { + osi_core->uphy_gbe_mode = OSI_GBE_MODE_2_5G; + } else if (strncmp(buf, "1G", 2) == 0U) { + osi_core->uphy_gbe_mode = OSI_GBE_MODE_1G; } else { dev_err(pdata->dev, "Invalid value passed. Valid values are 10G or 5G\n");