nvethernet: use maximum transmit buffer size

1) Removes the limitation of 4KB per descriptor.
2) Adds maximum transmit buffer size as 0x3FFF.
3) Changes descripors threshold considering TSO/GSO.

Bug 200542488

Change-Id: Ic32d5e1e49df48b0da057349f8300d029410d322
Signed-off-by: Bhadram Varka <vbhadram@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2174519
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Bhadram Varka
2019-08-13 19:54:46 +05:30
committed by Revanth Kumar Uppala
parent 9c4438324c
commit e4f82ebb78
3 changed files with 17 additions and 11 deletions

View File

@@ -1385,7 +1385,7 @@ static int ether_tx_swcx_alloc(struct device *dev,
struct skb_frag_struct *frag; struct skb_frag_struct *frag;
unsigned int page_idx, page_offset; unsigned int page_idx, page_offset;
unsigned int max_data_len_per_txd = (unsigned int) unsigned int max_data_len_per_txd = (unsigned int)
ETHER_MAX_DATA_LEN_PER_TXD_BUF; // 4KB ETHER_TX_MAX_BUFF_SIZE;
memset(tx_pkt_cx, 0, sizeof(*tx_pkt_cx)); memset(tx_pkt_cx, 0, sizeof(*tx_pkt_cx));
@@ -1503,10 +1503,9 @@ static int ether_tx_swcx_alloc(struct device *dev,
for (i = 0; i < num_frags; i++) { for (i = 0; i < num_frags; i++) {
offset = 0; offset = 0;
frag = &skb_shinfo(skb)->frags[i]; frag = &skb_shinfo(skb)->frags[i];
len = frag->size; len = skb_frag_size(frag);
while (len) { while (len) {
tx_swcx = tx_ring->tx_swcx + cur_tx_idx; tx_swcx = tx_ring->tx_swcx + cur_tx_idx;
if (unlikely(tx_swcx->len)) { if (unlikely(tx_swcx->len)) {
goto desc_not_free; goto desc_not_free;
} }
@@ -1639,7 +1638,7 @@ static int ether_start_xmit(struct sk_buff *skb, struct net_device *ndev)
osi_hw_transmit(osi_dma, chan); osi_hw_transmit(osi_dma, chan);
if (ether_avail_txdesc_cnt(tx_ring) < TX_DESC_THRESHOLD) { if (ether_avail_txdesc_cnt(tx_ring) <= ETHER_TX_DESC_THRESHOLD) {
netif_stop_subqueue(ndev, qinx); netif_stop_subqueue(ndev, qinx);
netdev_dbg(ndev, "Tx ring[%d] insufficient desc.\n", chan); netdev_dbg(ndev, "Tx ring[%d] insufficient desc.\n", chan);
} }

View File

@@ -120,14 +120,20 @@
/** /**
* @brief Maximum buffer length per DMA descriptor (4KB). * @brief Maximum buffer length per DMA descriptor (4KB).
*/ */
#define ETHER_MAX_DATA_LEN_PER_TXD_BUF BIT(12) #define ETHER_TX_MAX_BUFF_SIZE 0x3FFF
/** /* Descriptors required for maximum contiguous TSO/GSO packet
* @brief In-case of TSO/GSO, Tx ring needs atleast MAX_SKB_FRAGS + * one extra descriptor if there is linear buffer payload
* one context descriptor +
* one descriptor for header/linear buffer payload
*/ */
#define TX_DESC_THRESHOLD (MAX_SKB_FRAGS + 2) #define ETHER_TX_MAX_SPLIT ((GSO_MAX_SIZE / ETHER_TX_MAX_BUFF_SIZE) + 1)
/* Maximum possible descriptors needed for an SKB:
* - Maximum number of SKB frags
* - Maximum descriptors for contiguous TSO/GSO packet
* - Possible context descriptor
* - Possible TSO header descriptor
*/
#define ETHER_TX_DESC_THRESHOLD (MAX_SKB_FRAGS + ETHER_TX_MAX_SPLIT + 2)
/** /**
*@brief Returns count of available transmit descriptors *@brief Returns count of available transmit descriptors

View File

@@ -311,7 +311,8 @@ void osd_transmit_complete(void *priv, void *buffer, unsigned long dmaaddr,
txq = netdev_get_tx_queue(ndev, qinx); txq = netdev_get_tx_queue(ndev, qinx);
if (netif_tx_queue_stopped(txq) && if (netif_tx_queue_stopped(txq) &&
ether_avail_txdesc_cnt(tx_ring) >= TX_DESC_THRESHOLD) { (ether_avail_txdesc_cnt(tx_ring) >
ETHER_TX_DESC_THRESHOLD)) {
netif_tx_wake_queue(txq); netif_tx_wake_queue(txq);
netdev_dbg(ndev, "Tx ring[%d] - waking Txq\n", chan); netdev_dbg(ndev, "Tx ring[%d] - waking Txq\n", chan);
} }