nvethernet: Add support for ioctls and HW offloads

1. ARP offload can be configured via private ioctl.
   Application requesting the arp offload configuration
   should provide the IP address to be configured for
   ARP offload via ioctl data struct. Refer to
   struct ether_ifr_data and
   struct arp_offload_param for details.

2. Tx/Rx Checksum offload can be configured via ethtool

3. TCP Segmentation offload can be configured via ethtool

Bug 2571001

Change-Id: If639ab9049db97f200911af456ddb8cb8433fa12
Signed-off-by: Srinivas Ramachandran <srinivasra@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2109676
Reviewed-by: Rakesh Goyal <rgoyal@nvidia.com>
Reviewed-by: Narayan Reddy <narayanr@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Ashutosh Jha <ajha@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Srinivas Ramachandran
2019-04-19 13:50:17 -07:00
committed by Revanth Kumar Uppala
parent 0426fc74e2
commit e05030a4e2
5 changed files with 483 additions and 36 deletions

View File

@@ -215,6 +215,7 @@ void osd_receive_packet(void *priv, void *rxring, unsigned int chan,
unsigned int dma_buf_len, void *rxpkt_cx)
{
struct ether_priv_data *pdata = (struct ether_priv_data *)priv;
struct ether_rx_napi *rx_napi = pdata->rx_napi[chan];
struct osi_rx_ring *rx_ring = (struct osi_rx_ring *)rxring;
struct osi_rx_swcx *rx_swcx = rx_ring->rx_swcx + rx_ring->cur_rx_idx;
struct osi_rx_pkt_cx *rx_pkt_cx = (struct osi_rx_pkt_cx *)rxpkt_cx;
@@ -230,6 +231,12 @@ void osd_receive_packet(void *priv, void *rxring, unsigned int chan,
OSI_PKT_CX_VALID)) {
skb_put(skb, rx_pkt_cx->pkt_len);
if (likely(rx_pkt_cx->rxcsum == OSI_CHECKSUM_UNNECESSARY)) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
} else {
skb->ip_summed = CHECKSUM_NONE;
}
if ((rx_pkt_cx->flags & OSI_PKT_CX_VLAN) == OSI_PKT_CX_VLAN) {
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
rx_pkt_cx->vlan_tag);
@@ -238,7 +245,11 @@ void osd_receive_packet(void *priv, void *rxring, unsigned int chan,
skb->dev = ndev;
skb->protocol = eth_type_trans(skb, ndev);
ndev->stats.rx_bytes += skb->len;
netif_receive_skb(skb);
if (likely(ndev->features & NETIF_F_GRO)) {
napi_gro_receive(&rx_napi->napi, skb);
} else {
netif_receive_skb(skb);
}
} else {
ndev->stats.rx_crc_errors = pkt_err_stat->rx_crc_error;
ndev->stats.rx_errors++;
@@ -260,7 +271,9 @@ void osd_receive_packet(void *priv, void *rxring, unsigned int chan,
* @buffer: Buffer address to free.
* @dmaaddr: DMA address to unmap.
* @len: Length of data.
* @pkt_valid: Packet is valid or not
* @tx_done_pkt_cx: Pointer to struct which has tx done status info.
* This struct has flags to indicate tx error, whether DMA address
* is mapped from paged/linear buffer.
*
* Algorithm:
* 1) Updates stats for linux network stack.
@@ -274,20 +287,44 @@ void osd_receive_packet(void *priv, void *rxring, unsigned int chan,
* Return: None.
*/
void osd_transmit_complete(void *priv, void *buffer, unsigned long dmaaddr,
unsigned int len, int pkt_valid)
unsigned int len, void *tx_done_pkt_cx)
{
struct osi_txdone_pkt_cx *txdone_pkt_cx = (struct osi_txdone_pkt_cx *)
tx_done_pkt_cx;
struct ether_priv_data *pdata = (struct ether_priv_data *)priv;
struct osi_dma_priv_data *osi_dma = pdata->osi_dma;
struct sk_buff *skb = (struct sk_buff *)buffer;
struct net_device *ndev = pdata->ndev;
dma_addr_t dma_addr = (dma_addr_t)dmaaddr;
struct net_device *ndev = pdata->ndev;
struct osi_tx_ring *tx_ring;
struct netdev_queue *txq;
unsigned int chan;
ndev->stats.tx_packets++;
ndev->stats.tx_bytes += len;
if (dma_addr)
dma_unmap_single(pdata->dev, dmaaddr,
len, DMA_TO_DEVICE);
if (dma_addr) {
if ((txdone_pkt_cx->flags & OSI_TXDONE_CX_PAGED_BUF) ==
OSI_TXDONE_CX_PAGED_BUF) {
dma_unmap_page(pdata->dev, dmaaddr,
len, DMA_TO_DEVICE);
} else {
dma_unmap_single(pdata->dev, dmaaddr,
len, DMA_TO_DEVICE);
}
}
if (skb) {
chan = skb_get_queue_mapping(skb);
tx_ring = osi_dma->tx_ring[chan];
txq = netdev_get_tx_queue(ndev, chan);
if (netif_tx_queue_stopped(txq) &&
ether_avail_txdesc_cnt(tx_ring) >= TX_DESC_THRESHOLD) {
netif_tx_wake_queue(txq);
netdev_dbg(ndev, "Tx ring[%d] - waking Txq\n", chan);
}
if (skb)
dev_consume_skb_any(skb);
}
}