nvethernet: fix spin lock recursion

Issue:
1) dev_queue_xmit acquired the per queue tx lock and called driver
transmit routine.
2) During the transmit - common interrupt asserted which tries to
acquire the same tx lock which resulted in lock recursion.

Fix: Move the lock acquire to tasklet context under common isr.

Bug 3773016

Change-Id: I7cfd49beb1238286d3bccd9e4b9ccc054c4f6d30
Signed-off-by: Bhadram Varka <vbhadram@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvidia/+/2770227
Reviewed-by: Narayan Reddy <narayanr@nvidia.com>
Reviewed-by: Bitan Biswas <bbiswas@nvidia.com>
Reviewed-by: svcacv <svcacv@nvidia.com>
GVS: Gerrit_Virtual_Submit <buildbot_gerritrpt@nvidia.com>
This commit is contained in:
Bhadram Varka
2022-09-01 20:59:24 +05:30
committed by Revanth Kumar Uppala
parent 1ffcf6d286
commit f667be2c63
3 changed files with 23 additions and 4 deletions

View File

@@ -2929,6 +2929,8 @@ static int ether_close(struct net_device *ndev)
/* stop tx ts pending SKB workqueue and remove skb nodes */ /* stop tx ts pending SKB workqueue and remove skb nodes */
ether_flush_tx_ts_skb_list(pdata); ether_flush_tx_ts_skb_list(pdata);
tasklet_kill(&pdata->lane_restart_task);
ether_stop_ivc(pdata); ether_stop_ivc(pdata);
if (pdata->xpcs_rst) { if (pdata->xpcs_rst) {
@@ -6508,6 +6510,8 @@ static int ether_probe(struct platform_device *pdev)
pdata->rx_pcs_m_enabled = false; pdata->rx_pcs_m_enabled = false;
atomic_set(&pdata->tx_ts_ref_cnt, -1); atomic_set(&pdata->tx_ts_ref_cnt, -1);
atomic_set(&pdata->set_speed_ref_cnt, OSI_DISABLE); atomic_set(&pdata->set_speed_ref_cnt, OSI_DISABLE);
tasklet_setup(&pdata->lane_restart_task,
ether_restart_lane_bringup_task);
#ifdef ETHER_NVGRO #ifdef ETHER_NVGRO
__skb_queue_head_init(&pdata->mq); __skb_queue_head_init(&pdata->mq);
__skb_queue_head_init(&pdata->fq); __skb_queue_head_init(&pdata->fq);
@@ -6770,6 +6774,8 @@ static int ether_suspend_noirq(struct device *dev)
} }
#endif /* MACSEC_SUPPORT */ #endif /* MACSEC_SUPPORT */
tasklet_kill(&pdata->lane_restart_task);
/* stop workqueue */ /* stop workqueue */
cancel_delayed_work_sync(&pdata->tx_ts_work); cancel_delayed_work_sync(&pdata->tx_ts_work);

View File

@@ -607,6 +607,10 @@ struct ether_priv_data {
atomic_t set_speed_ref_cnt; atomic_t set_speed_ref_cnt;
/** flag to enable logs using ethtool */ /** flag to enable logs using ethtool */
u32 msg_enable; u32 msg_enable;
/** flag to indicate to start/stop the Tx */
unsigned int tx_start_stop;
/** Tasklet for restarting UPHY lanes */
struct tasklet_struct lane_restart_task;
}; };
/** /**
@@ -793,6 +797,7 @@ int ether_tc_setup_cbs(struct ether_priv_data *pdata,
* @retval EAGAIN on Failure * @retval EAGAIN on Failure
*/ */
int ether_get_tx_ts(struct ether_priv_data *pdata); int ether_get_tx_ts(struct ether_priv_data *pdata);
void ether_restart_lane_bringup_task(struct tasklet_struct *t);
#ifdef ETHER_NVGRO #ifdef ETHER_NVGRO
void ether_nvgro_purge_timer(struct timer_list *t); void ether_nvgro_purge_timer(struct timer_list *t);
#endif /* ETHER_NVGRO */ #endif /* ETHER_NVGRO */

View File

@@ -881,11 +881,11 @@ static void osd_core_printf(struct osi_core_priv_data *osi_core,
} }
#endif #endif
static void osd_restart_lane_bringup(void *priv, unsigned int en_disable) void ether_restart_lane_bringup_task(struct tasklet_struct *t)
{ {
struct ether_priv_data *pdata = (struct ether_priv_data *)priv; struct ether_priv_data *pdata = from_tasklet(pdata, t, lane_restart_task);
if (en_disable == OSI_DISABLE) { if (pdata->tx_start_stop == OSI_DISABLE) {
netif_tx_lock(pdata->ndev); netif_tx_lock(pdata->ndev);
netif_tx_stop_all_queues(pdata->ndev); netif_tx_stop_all_queues(pdata->ndev);
netif_tx_unlock(pdata->ndev); netif_tx_unlock(pdata->ndev);
@@ -893,7 +893,7 @@ static void osd_restart_lane_bringup(void *priv, unsigned int en_disable)
if (netif_msg_drv(pdata)) { if (netif_msg_drv(pdata)) {
netdev_info(pdata->ndev, "Disable network Tx Queue\n"); netdev_info(pdata->ndev, "Disable network Tx Queue\n");
} }
} else if (en_disable == OSI_ENABLE) { } else if (pdata->tx_start_stop == OSI_ENABLE) {
netif_tx_lock(pdata->ndev); netif_tx_lock(pdata->ndev);
netif_tx_start_all_queues(pdata->ndev); netif_tx_start_all_queues(pdata->ndev);
netif_tx_unlock(pdata->ndev); netif_tx_unlock(pdata->ndev);
@@ -903,6 +903,14 @@ static void osd_restart_lane_bringup(void *priv, unsigned int en_disable)
} }
} }
static void osd_restart_lane_bringup(void *priv, unsigned int en_disable)
{
struct ether_priv_data *pdata = (struct ether_priv_data *)priv;
pdata->tx_start_stop = en_disable;
tasklet_hi_schedule(&pdata->lane_restart_task);
}
void ether_assign_osd_ops(struct osi_core_priv_data *osi_core, void ether_assign_osd_ops(struct osi_core_priv_data *osi_core,
struct osi_dma_priv_data *osi_dma) struct osi_dma_priv_data *osi_dma)
{ {