nvethernet: use maximum transmit buffer size

1) Removes the limitation of 4KB per descriptor.
2) Adds maximum transmit buffer size as 0x3FFF.
3) Changes descripors threshold considering TSO/GSO.

Bug 200542488

Change-Id: Ic32d5e1e49df48b0da057349f8300d029410d322
Signed-off-by: Bhadram Varka <vbhadram@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2174519
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Bhadram Varka
2019-08-13 19:54:46 +05:30
committed by Revanth Kumar Uppala
parent 9c4438324c
commit e4f82ebb78
3 changed files with 17 additions and 11 deletions

View File

@@ -1385,7 +1385,7 @@ static int ether_tx_swcx_alloc(struct device *dev,
struct skb_frag_struct *frag;
unsigned int page_idx, page_offset;
unsigned int max_data_len_per_txd = (unsigned int)
ETHER_MAX_DATA_LEN_PER_TXD_BUF; // 4KB
ETHER_TX_MAX_BUFF_SIZE;
memset(tx_pkt_cx, 0, sizeof(*tx_pkt_cx));
@@ -1503,10 +1503,9 @@ static int ether_tx_swcx_alloc(struct device *dev,
for (i = 0; i < num_frags; i++) {
offset = 0;
frag = &skb_shinfo(skb)->frags[i];
len = frag->size;
len = skb_frag_size(frag);
while (len) {
tx_swcx = tx_ring->tx_swcx + cur_tx_idx;
if (unlikely(tx_swcx->len)) {
goto desc_not_free;
}
@@ -1639,7 +1638,7 @@ static int ether_start_xmit(struct sk_buff *skb, struct net_device *ndev)
osi_hw_transmit(osi_dma, chan);
if (ether_avail_txdesc_cnt(tx_ring) < TX_DESC_THRESHOLD) {
if (ether_avail_txdesc_cnt(tx_ring) <= ETHER_TX_DESC_THRESHOLD) {
netif_stop_subqueue(ndev, qinx);
netdev_dbg(ndev, "Tx ring[%d] insufficient desc.\n", chan);
}