mirror of
git://nv-tegra.nvidia.com/linux-nv-oot.git
synced 2025-12-24 02:01:36 +03:00
nvethernet: Use delayed workqueue for tx timestamp
Issue: - Workqueue for Tx timestamp is running without delay, use many CPU cycle at hard isr thread priority. - Incorrect return value from ether_handle_tso() Fix: - Update code to use delayed workqueue - Return correct value Bug 200780891 Bug 3400623 Change-Id: I2095a0634e079bf870ef87cd2de1d35fe24bafd4 Signed-off-by: Rakesh Goyal <rgoyal@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvidia/+/2610986 Tested-by: mobile promotions <svcmobile_promotions@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
Revanth Kumar Uppala
parent
a54d1536bd
commit
5f3e27f093
@@ -31,7 +31,8 @@
|
|||||||
*/
|
*/
|
||||||
static void ether_get_tx_ts(struct work_struct *work)
|
static void ether_get_tx_ts(struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct ether_priv_data *pdata = container_of(work,
|
struct delayed_work *dwork = to_delayed_work(work);
|
||||||
|
struct ether_priv_data *pdata = container_of(dwork,
|
||||||
struct ether_priv_data, tx_ts_work);
|
struct ether_priv_data, tx_ts_work);
|
||||||
struct list_head *head_node, *temp_head_node;
|
struct list_head *head_node, *temp_head_node;
|
||||||
struct skb_shared_hwtstamps shhwtstamp;
|
struct skb_shared_hwtstamps shhwtstamp;
|
||||||
@@ -89,8 +90,10 @@ update_skb:
|
|||||||
} else {
|
} else {
|
||||||
dev_dbg(pdata->dev, "Unable to retrieve TS from OSI\n");
|
dev_dbg(pdata->dev, "Unable to retrieve TS from OSI\n");
|
||||||
miss_count++;
|
miss_count++;
|
||||||
if (miss_count < TS_MISS_THRESHOLD)
|
if (miss_count < TS_MISS_THRESHOLD) {
|
||||||
schedule_work(&pdata->tx_ts_work);
|
schedule_delayed_work(&pdata->tx_ts_work,
|
||||||
|
msecs_to_jiffies(ETHER_TS_MS_TIMER));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -2691,7 +2694,7 @@ static inline void ether_flush_tx_ts_skb_list(struct ether_priv_data *pdata)
|
|||||||
struct list_head *head_node, *temp_head_node;
|
struct list_head *head_node, *temp_head_node;
|
||||||
|
|
||||||
/* stop workqueue */
|
/* stop workqueue */
|
||||||
cancel_work_sync(&pdata->tx_ts_work);
|
cancel_delayed_work_sync(&pdata->tx_ts_work);
|
||||||
|
|
||||||
/* Delete nodes from list and rest static memory for reuse */
|
/* Delete nodes from list and rest static memory for reuse */
|
||||||
if (!list_empty(&pdata->tx_ts_skb_head)) {
|
if (!list_empty(&pdata->tx_ts_skb_head)) {
|
||||||
@@ -2886,7 +2889,7 @@ static int ether_handle_tso(struct osi_tx_pkt_cx *tx_pkt_cx,
|
|||||||
netdev_dbg(skb->dev, "tcp_udp_hdrlen=%u\n", tx_pkt_cx->tcp_udp_hdrlen);
|
netdev_dbg(skb->dev, "tcp_udp_hdrlen=%u\n", tx_pkt_cx->tcp_udp_hdrlen);
|
||||||
netdev_dbg(skb->dev, "total_hdrlen =%u\n", tx_pkt_cx->total_hdrlen);
|
netdev_dbg(skb->dev, "total_hdrlen =%u\n", tx_pkt_cx->total_hdrlen);
|
||||||
|
|
||||||
return ret;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -6235,7 +6238,7 @@ static int ether_probe(struct platform_device *pdev)
|
|||||||
osi_core->hw_feature = &pdata->hw_feat;
|
osi_core->hw_feature = &pdata->hw_feat;
|
||||||
INIT_LIST_HEAD(&pdata->mac_addr_list_head);
|
INIT_LIST_HEAD(&pdata->mac_addr_list_head);
|
||||||
INIT_LIST_HEAD(&pdata->tx_ts_skb_head);
|
INIT_LIST_HEAD(&pdata->tx_ts_skb_head);
|
||||||
INIT_WORK(&pdata->tx_ts_work, ether_get_tx_ts);
|
INIT_DELAYED_WORK(&pdata->tx_ts_work, ether_get_tx_ts);
|
||||||
|
|
||||||
#ifdef ETHER_NVGRO
|
#ifdef ETHER_NVGRO
|
||||||
__skb_queue_head_init(&pdata->mq);
|
__skb_queue_head_init(&pdata->mq);
|
||||||
@@ -6348,6 +6351,9 @@ static int ether_suspend_noirq(struct device *dev)
|
|||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* stop workqueue */
|
||||||
|
cancel_delayed_work_sync(&pdata->tx_ts_work);
|
||||||
|
|
||||||
/* Stop workqueue while DUT is going to suspend state */
|
/* Stop workqueue while DUT is going to suspend state */
|
||||||
ether_stats_work_queue_stop(pdata);
|
ether_stats_work_queue_stop(pdata);
|
||||||
|
|
||||||
|
|||||||
@@ -290,6 +290,12 @@ static inline int ether_avail_txdesc_cnt(struct osi_tx_ring *tx_ring)
|
|||||||
*/
|
*/
|
||||||
#define ETHER_STATS_TIMER 3U
|
#define ETHER_STATS_TIMER 3U
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Timer to trigger Work queue periodically which read TX timestamp
|
||||||
|
* for PTP packets. Timer is in milisecond.
|
||||||
|
*/
|
||||||
|
#define ETHER_TS_MS_TIMER 1U
|
||||||
|
|
||||||
#define ETHER_VM_IRQ_TX_CHAN_MASK(x) BIT((x) * 2U)
|
#define ETHER_VM_IRQ_TX_CHAN_MASK(x) BIT((x) * 2U)
|
||||||
#define ETHER_VM_IRQ_RX_CHAN_MASK(x) BIT(((x) * 2U) + 1U)
|
#define ETHER_VM_IRQ_RX_CHAN_MASK(x) BIT(((x) * 2U) + 1U)
|
||||||
|
|
||||||
@@ -543,7 +549,7 @@ struct ether_priv_data {
|
|||||||
/** local L2 filter address list head pointer */
|
/** local L2 filter address list head pointer */
|
||||||
struct list_head mac_addr_list_head;
|
struct list_head mac_addr_list_head;
|
||||||
/** skb tx timestamp update work queue */
|
/** skb tx timestamp update work queue */
|
||||||
struct work_struct tx_ts_work;
|
struct delayed_work tx_ts_work;
|
||||||
/** local skb list head */
|
/** local skb list head */
|
||||||
struct list_head tx_ts_skb_head;
|
struct list_head tx_ts_skb_head;
|
||||||
/** pre allocated memory for ether_tx_ts_skb_list list */
|
/** pre allocated memory for ether_tx_ts_skb_list list */
|
||||||
|
|||||||
@@ -764,7 +764,8 @@ static void osd_transmit_complete(void *priv, void *buffer, unsigned long dmaadd
|
|||||||
skb, txdone_pkt_cx->pktid);
|
skb, txdone_pkt_cx->pktid);
|
||||||
list_add_tail(&pnode->list_head,
|
list_add_tail(&pnode->list_head,
|
||||||
&pdata->tx_ts_skb_head);
|
&pdata->tx_ts_skb_head);
|
||||||
schedule_work(&pdata->tx_ts_work);
|
schedule_delayed_work(&pdata->tx_ts_work,
|
||||||
|
msecs_to_jiffies(ETHER_TS_MS_TIMER));
|
||||||
} else {
|
} else {
|
||||||
dev_consume_skb_any(skb);
|
dev_consume_skb_any(skb);
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user