From e4f82ebb78d3eca156abbf548cdbce6a3b14af7d Mon Sep 17 00:00:00 2001 From: Bhadram Varka Date: Tue, 13 Aug 2019 19:54:46 +0530 Subject: [PATCH] nvethernet: use maximum transmit buffer size 1) Removes the limitation of 4KB per descriptor. 2) Adds maximum transmit buffer size as 0x3FFF. 3) Changes descripors threshold considering TSO/GSO. Bug 200542488 Change-Id: Ic32d5e1e49df48b0da057349f8300d029410d322 Signed-off-by: Bhadram Varka Reviewed-on: https://git-master.nvidia.com/r/2174519 Reviewed-by: mobile promotions Tested-by: mobile promotions --- .../ethernet/nvidia/nvethernet/ether_linux.c | 7 +++---- .../ethernet/nvidia/nvethernet/ether_linux.h | 18 ++++++++++++------ drivers/net/ethernet/nvidia/nvethernet/osd.c | 3 ++- 3 files changed, 17 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/nvidia/nvethernet/ether_linux.c b/drivers/net/ethernet/nvidia/nvethernet/ether_linux.c index 3f3712d7..6856893f 100644 --- a/drivers/net/ethernet/nvidia/nvethernet/ether_linux.c +++ b/drivers/net/ethernet/nvidia/nvethernet/ether_linux.c @@ -1385,7 +1385,7 @@ static int ether_tx_swcx_alloc(struct device *dev, struct skb_frag_struct *frag; unsigned int page_idx, page_offset; unsigned int max_data_len_per_txd = (unsigned int) - ETHER_MAX_DATA_LEN_PER_TXD_BUF; // 4KB + ETHER_TX_MAX_BUFF_SIZE; memset(tx_pkt_cx, 0, sizeof(*tx_pkt_cx)); @@ -1503,10 +1503,9 @@ static int ether_tx_swcx_alloc(struct device *dev, for (i = 0; i < num_frags; i++) { offset = 0; frag = &skb_shinfo(skb)->frags[i]; - len = frag->size; + len = skb_frag_size(frag); while (len) { tx_swcx = tx_ring->tx_swcx + cur_tx_idx; - if (unlikely(tx_swcx->len)) { goto desc_not_free; } @@ -1639,7 +1638,7 @@ static int ether_start_xmit(struct sk_buff *skb, struct net_device *ndev) osi_hw_transmit(osi_dma, chan); - if (ether_avail_txdesc_cnt(tx_ring) < TX_DESC_THRESHOLD) { + if (ether_avail_txdesc_cnt(tx_ring) <= ETHER_TX_DESC_THRESHOLD) { netif_stop_subqueue(ndev, qinx); netdev_dbg(ndev, "Tx ring[%d] insufficient desc.\n", chan); } diff --git a/drivers/net/ethernet/nvidia/nvethernet/ether_linux.h b/drivers/net/ethernet/nvidia/nvethernet/ether_linux.h index 7a793343..9c34e528 100644 --- a/drivers/net/ethernet/nvidia/nvethernet/ether_linux.h +++ b/drivers/net/ethernet/nvidia/nvethernet/ether_linux.h @@ -120,14 +120,20 @@ /** * @brief Maximum buffer length per DMA descriptor (4KB). */ -#define ETHER_MAX_DATA_LEN_PER_TXD_BUF BIT(12) +#define ETHER_TX_MAX_BUFF_SIZE 0x3FFF -/** - * @brief In-case of TSO/GSO, Tx ring needs atleast MAX_SKB_FRAGS + - * one context descriptor + - * one descriptor for header/linear buffer payload +/* Descriptors required for maximum contiguous TSO/GSO packet + * one extra descriptor if there is linear buffer payload */ -#define TX_DESC_THRESHOLD (MAX_SKB_FRAGS + 2) +#define ETHER_TX_MAX_SPLIT ((GSO_MAX_SIZE / ETHER_TX_MAX_BUFF_SIZE) + 1) + +/* Maximum possible descriptors needed for an SKB: + * - Maximum number of SKB frags + * - Maximum descriptors for contiguous TSO/GSO packet + * - Possible context descriptor + * - Possible TSO header descriptor + */ +#define ETHER_TX_DESC_THRESHOLD (MAX_SKB_FRAGS + ETHER_TX_MAX_SPLIT + 2) /** *@brief Returns count of available transmit descriptors diff --git a/drivers/net/ethernet/nvidia/nvethernet/osd.c b/drivers/net/ethernet/nvidia/nvethernet/osd.c index 95798f1b..5275bd00 100644 --- a/drivers/net/ethernet/nvidia/nvethernet/osd.c +++ b/drivers/net/ethernet/nvidia/nvethernet/osd.c @@ -311,7 +311,8 @@ void osd_transmit_complete(void *priv, void *buffer, unsigned long dmaaddr, txq = netdev_get_tx_queue(ndev, qinx); if (netif_tx_queue_stopped(txq) && - ether_avail_txdesc_cnt(tx_ring) >= TX_DESC_THRESHOLD) { + (ether_avail_txdesc_cnt(tx_ring) > + ETHER_TX_DESC_THRESHOLD)) { netif_tx_wake_queue(txq); netdev_dbg(ndev, "Tx ring[%d] - waking Txq\n", chan); }