diff --git a/drivers/net/ethernet/nvidia/nvethernet/ether_linux.c b/drivers/net/ethernet/nvidia/nvethernet/ether_linux.c index 6e538678..3e9c89b0 100644 --- a/drivers/net/ethernet/nvidia/nvethernet/ether_linux.c +++ b/drivers/net/ethernet/nvidia/nvethernet/ether_linux.c @@ -31,7 +31,8 @@ */ static void ether_get_tx_ts(struct work_struct *work) { - struct ether_priv_data *pdata = container_of(work, + struct delayed_work *dwork = to_delayed_work(work); + struct ether_priv_data *pdata = container_of(dwork, struct ether_priv_data, tx_ts_work); struct list_head *head_node, *temp_head_node; struct skb_shared_hwtstamps shhwtstamp; @@ -89,8 +90,10 @@ update_skb: } else { dev_dbg(pdata->dev, "Unable to retrieve TS from OSI\n"); miss_count++; - if (miss_count < TS_MISS_THRESHOLD) - schedule_work(&pdata->tx_ts_work); + if (miss_count < TS_MISS_THRESHOLD) { + schedule_delayed_work(&pdata->tx_ts_work, + msecs_to_jiffies(ETHER_TS_MS_TIMER)); + } } } } @@ -2691,7 +2694,7 @@ static inline void ether_flush_tx_ts_skb_list(struct ether_priv_data *pdata) struct list_head *head_node, *temp_head_node; /* stop workqueue */ - cancel_work_sync(&pdata->tx_ts_work); + cancel_delayed_work_sync(&pdata->tx_ts_work); /* Delete nodes from list and rest static memory for reuse */ if (!list_empty(&pdata->tx_ts_skb_head)) { @@ -2886,7 +2889,7 @@ static int ether_handle_tso(struct osi_tx_pkt_cx *tx_pkt_cx, netdev_dbg(skb->dev, "tcp_udp_hdrlen=%u\n", tx_pkt_cx->tcp_udp_hdrlen); netdev_dbg(skb->dev, "total_hdrlen =%u\n", tx_pkt_cx->total_hdrlen); - return ret; + return 1; } /** @@ -6235,7 +6238,7 @@ static int ether_probe(struct platform_device *pdev) osi_core->hw_feature = &pdata->hw_feat; INIT_LIST_HEAD(&pdata->mac_addr_list_head); INIT_LIST_HEAD(&pdata->tx_ts_skb_head); - INIT_WORK(&pdata->tx_ts_work, ether_get_tx_ts); + INIT_DELAYED_WORK(&pdata->tx_ts_work, ether_get_tx_ts); #ifdef ETHER_NVGRO __skb_queue_head_init(&pdata->mq); @@ -6348,6 +6351,9 @@ static int ether_suspend_noirq(struct device *dev) return -EBUSY; } + /* stop workqueue */ + cancel_delayed_work_sync(&pdata->tx_ts_work); + /* Stop workqueue while DUT is going to suspend state */ ether_stats_work_queue_stop(pdata); diff --git a/drivers/net/ethernet/nvidia/nvethernet/ether_linux.h b/drivers/net/ethernet/nvidia/nvethernet/ether_linux.h index 6846ddb1..456af4e8 100644 --- a/drivers/net/ethernet/nvidia/nvethernet/ether_linux.h +++ b/drivers/net/ethernet/nvidia/nvethernet/ether_linux.h @@ -290,6 +290,12 @@ static inline int ether_avail_txdesc_cnt(struct osi_tx_ring *tx_ring) */ #define ETHER_STATS_TIMER 3U +/** + * @brief Timer to trigger Work queue periodically which read TX timestamp + * for PTP packets. Timer is in milisecond. + */ +#define ETHER_TS_MS_TIMER 1U + #define ETHER_VM_IRQ_TX_CHAN_MASK(x) BIT((x) * 2U) #define ETHER_VM_IRQ_RX_CHAN_MASK(x) BIT(((x) * 2U) + 1U) @@ -543,7 +549,7 @@ struct ether_priv_data { /** local L2 filter address list head pointer */ struct list_head mac_addr_list_head; /** skb tx timestamp update work queue */ - struct work_struct tx_ts_work; + struct delayed_work tx_ts_work; /** local skb list head */ struct list_head tx_ts_skb_head; /** pre allocated memory for ether_tx_ts_skb_list list */ diff --git a/drivers/net/ethernet/nvidia/nvethernet/osd.c b/drivers/net/ethernet/nvidia/nvethernet/osd.c index c0bf2be7..55c9b2de 100644 --- a/drivers/net/ethernet/nvidia/nvethernet/osd.c +++ b/drivers/net/ethernet/nvidia/nvethernet/osd.c @@ -764,7 +764,8 @@ static void osd_transmit_complete(void *priv, void *buffer, unsigned long dmaadd skb, txdone_pkt_cx->pktid); list_add_tail(&pnode->list_head, &pdata->tx_ts_skb_head); - schedule_work(&pdata->tx_ts_work); + schedule_delayed_work(&pdata->tx_ts_work, + msecs_to_jiffies(ETHER_TS_MS_TIMER)); } else { dev_consume_skb_any(skb); }