nvethernet: Add support for ioctls and HW offloads

1. ARP offload can be configured via private ioctl.
   Application requesting the arp offload configuration
   should provide the IP address to be configured for
   ARP offload via ioctl data struct. Refer to
   struct ether_ifr_data and
   struct arp_offload_param for details.

2. Tx/Rx Checksum offload can be configured via ethtool

3. TCP Segmentation offload can be configured via ethtool

Bug 2571001

Change-Id: If639ab9049db97f200911af456ddb8cb8433fa12
Signed-off-by: Srinivas Ramachandran <srinivasra@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2109676
Reviewed-by: Rakesh Goyal <rgoyal@nvidia.com>
Reviewed-by: Narayan Reddy <narayanr@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Ashutosh Jha <ajha@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Srinivas Ramachandran
2019-04-19 13:50:17 -07:00
committed by Revanth Kumar Uppala
parent 0426fc74e2
commit e05030a4e2
5 changed files with 483 additions and 36 deletions

View File

@@ -29,6 +29,8 @@
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/of.h>
#include <osi_core.h>
@@ -41,6 +43,34 @@
#define ETHER_QUEUE_PRIO_DEFAULT 0U
#define ETHER_QUEUE_PRIO_MAX 7U
/* Map max. 4KB buffer per Tx descriptor */
#define ETHER_MAX_DATA_LEN_PER_TXD_BUF BIT(12)
/* Incase of TSO/GSO, Tx ring needs atmost MAX_SKB_FRAGS +
* one context descriptor +
* one descriptor for header/linear buffer payload
*/
#define TX_DESC_THRESHOLD (MAX_SKB_FRAGS + 2)
/**
* ether_avail_txdesc_count - Return count of available tx desc.
* @tx_ring: Tx ring instance associated with channel number
*
* Algorithm: Check the difference between current desc index
* and the desc. index to be cleaned.
*
* Dependencies: MAC needs to be initialized and Tx ring allocated.
*
* Protection: None.
*
* Return: Number of available descriptors in the given Tx ring.
*/
static inline int ether_avail_txdesc_cnt(struct osi_tx_ring *tx_ring)
{
return ((tx_ring->clean_idx - tx_ring->cur_tx_idx - 1) &
(TX_DESC_CNT - 1));
}
/**
* struct ether_tx_napi - DMA Transmit Channel NAPI
* @chan: Transmit channel number
@@ -100,6 +130,7 @@ struct ether_rx_napi {
* @dma_mask: memory allocation mask
* @mac_loopback_mode: MAC loopback mode
* @q_prio: Array of MTL queue TX priority
* @hw_feat_cur_state: Current state of features enabled in HW
*/
struct ether_priv_data {
struct osi_core_priv_data *osi_core;
@@ -138,6 +169,7 @@ struct ether_priv_data {
int tx_irqs[ETHER_MAX_IRQS];
int rx_irqs[ETHER_MAX_IRQS];
unsigned long long dma_mask;
netdev_features_t hw_feat_cur_state;
/* for MAC loopback */
unsigned int mac_loopback_mode;