nvethernet: add lane bringup restart callback

Issue: In the current scenrio, when the remote
interface is brought down and up. There is no way
of getting the link up again with the remote device
which in turn fails the data transfers even though
interface from other end is brought up.

Fix: When interface is gone down on the other side,
MAC receives local faults and in this scenario
no data should be sent to MAC, so disable network
queues and initiate the lane bring up process for
monitoring the link status. Once the link is up,
re enable the network queues for data transfers.

Also added support for enabling driver logs using
msglvl of ethtool

Bug 3744088
Bug 3654543
Bug 3665378

Change-Id: I16cdee74e4e3ff6cd176924575f5005f385d4c5d
Signed-off-by: Narayan Reddy <narayanr@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvidia/+/2730876
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com>
Reviewed-by: svc_kernel_abi <svc_kernel_abi@nvidia.com>
Reviewed-by: Rakesh Goyal <rgoyal@nvidia.com>
Reviewed-by: Bhadram Varka <vbhadram@nvidia.com>
Reviewed-by: Srinivas Ramachandran <srinivasra@nvidia.com>
GVS: Gerrit_Virtual_Submit
This commit is contained in:
Narayan Reddy
2022-06-18 11:37:01 +00:00
committed by Revanth Kumar Uppala
parent f502700729
commit 99acf2912b
4 changed files with 54 additions and 1 deletions

View File

@@ -884,6 +884,13 @@ static inline void set_speed_work_func(struct work_struct *work)
return;
}
if (atomic_read(&pdata->set_speed_ref_cnt) == 1) {
/* set_speed already going on either from workq or interrupt */
return;
}
atomic_set(&pdata->set_speed_ref_cnt, OSI_ENABLE);
/* Speed will be overwritten as per the PHY interface mode */
speed = phydev->speed;
/* MAC and XFI speed should match in XFI mode */
@@ -903,6 +910,7 @@ static inline void set_speed_work_func(struct work_struct *work)
netdev_dbg(dev, "Retry set speed\n");
schedule_delayed_work(&pdata->set_speed_work,
msecs_to_jiffies(1000));
atomic_set(&pdata->set_speed_ref_cnt, OSI_DISABLE);
return;
}
@@ -918,6 +926,8 @@ static inline void set_speed_work_func(struct work_struct *work)
}
pdata->eee_active = ether_conf_eee(pdata, eee_enable);
netif_carrier_on(dev);
atomic_set(&pdata->set_speed_ref_cnt, OSI_DISABLE);
}
static void ether_en_dis_monitor_clks(struct ether_priv_data *pdata,
@@ -6567,6 +6577,7 @@ static int ether_probe(struct platform_device *pdev)
pdata->rx_m_enabled = false;
pdata->rx_pcs_m_enabled = false;
atomic_set(&pdata->tx_ts_ref_cnt, -1);
atomic_set(&pdata->set_speed_ref_cnt, OSI_DISABLE);
#ifdef ETHER_NVGRO
__skb_queue_head_init(&pdata->mq);
__skb_queue_head_init(&pdata->fq);

View File

@@ -623,6 +623,10 @@ struct ether_priv_data {
raw_spinlock_t txts_lock;
/** Ref count for ether_get_tx_ts_func */
atomic_t tx_ts_ref_cnt;
/** Ref count for set_speed_work_func */
atomic_t set_speed_ref_cnt;
/** flag to enable logs using ethtool */
u32 msg_enable;
};
/**

View File

@@ -1603,6 +1603,20 @@ static int ether_set_ringparam(struct net_device *ndev,
return ret;
}
static unsigned int ether_get_msglevel(struct net_device *ndev)
{
struct ether_priv_data *pdata = netdev_priv(ndev);
return pdata->msg_enable;
}
static void ether_set_msglevel(struct net_device *ndev, u32 level)
{
struct ether_priv_data *pdata = netdev_priv(ndev);
pdata->msg_enable = level;
}
/**
* @brief Set of ethtool operations
*/
@@ -1634,6 +1648,8 @@ static const struct ethtool_ops ether_ethtool_ops = {
.set_rxfh = ether_set_rxfh,
.get_ringparam = ether_get_ringparam,
.set_ringparam = ether_set_ringparam,
.get_msglevel = ether_get_msglevel,
.set_msglevel = ether_set_msglevel,
};
void ether_set_ethtool_ops(struct net_device *ndev)

View File

@@ -881,6 +881,28 @@ static void osd_core_printf(struct osi_core_priv_data *osi_core,
}
#endif
static void osd_restart_lane_bringup(void *priv, unsigned int en_disable)
{
struct ether_priv_data *pdata = (struct ether_priv_data *)priv;
if (en_disable == OSI_DISABLE) {
netif_tx_lock(pdata->ndev);
netif_tx_stop_all_queues(pdata->ndev);
netif_tx_unlock(pdata->ndev);
schedule_delayed_work(&pdata->set_speed_work, msecs_to_jiffies(500));
if (netif_msg_drv(pdata)) {
netdev_info(pdata->ndev, "Disable network Tx Queue\n");
}
} else if (en_disable == OSI_ENABLE) {
netif_tx_lock(pdata->ndev);
netif_tx_start_all_queues(pdata->ndev);
netif_tx_unlock(pdata->ndev);
if (netif_msg_drv(pdata)) {
netdev_info(pdata->ndev, "Enable network Tx Queue\n");
}
}
}
void ether_assign_osd_ops(struct osi_core_priv_data *osi_core,
struct osi_dma_priv_data *osi_dma)
{
@@ -892,7 +914,7 @@ void ether_assign_osd_ops(struct osi_core_priv_data *osi_core,
#ifdef OSI_DEBUG
osi_core->osd_ops.printf = osd_core_printf;
#endif
osi_core->osd_ops.restart_lane_bringup = osd_restart_lane_bringup;
osi_dma->osd_ops.transmit_complete = osd_transmit_complete;
osi_dma->osd_ops.receive_packet = osd_receive_packet;
osi_dma->osd_ops.realloc_buf = osd_realloc_buf;