diff --git a/drivers/net/ethernet/nvidia/nvethernet/ether_linux.c b/drivers/net/ethernet/nvidia/nvethernet/ether_linux.c index 3d337282..5e389499 100644 --- a/drivers/net/ethernet/nvidia/nvethernet/ether_linux.c +++ b/drivers/net/ethernet/nvidia/nvethernet/ether_linux.c @@ -996,6 +996,59 @@ static int ether_close(struct net_device *dev) return ret; } +/** + * ether_handle_tso - Helper func to check if TSO is used in given skb. + * @tx_pkt_cx: Pointer to packet context information structure. + * @skb: socket buffer. + * + * Algorithm: + * 1) Check if driver received a TSO/LSO/GSO packet + * 2) If so, store the packet details like MSS(Maximum Segment Size), + * packet header length, packet payload length, tcp/udp header length. + * + * Dependencies: None. + * + * Protection: None. + * + * Return: 0 - Not a TSO packet, 1 - success, -ve value - failure. + */ +static int ether_handle_tso(struct osi_tx_pkt_cx *tx_pkt_cx, + struct sk_buff *skb) +{ + int ret = 1; + + if (skb_is_gso(skb) == 0) { + return 0; + } + + if (skb_header_cloned(skb)) { + ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); + if (ret) { + return ret; + } + } + + /* Start filling packet details in Tx_pkt_cx */ + if (skb_shinfo(skb)->gso_type & (SKB_GSO_UDP)) { + tx_pkt_cx->tcp_udp_hdrlen = sizeof(struct udphdr); + tx_pkt_cx->mss = skb_shinfo(skb)->gso_size - + sizeof(struct udphdr); + } else { + tx_pkt_cx->tcp_udp_hdrlen = tcp_hdrlen(skb); + tx_pkt_cx->mss = skb_shinfo(skb)->gso_size; + } + tx_pkt_cx->total_hdrlen = skb_transport_offset(skb) + + tx_pkt_cx->tcp_udp_hdrlen; + tx_pkt_cx->payload_len = (skb->len - tx_pkt_cx->total_hdrlen); + + netdev_dbg(skb->dev, "mss =%u\n", tx_pkt_cx->mss); + netdev_dbg(skb->dev, "payload_len =%u\n", tx_pkt_cx->payload_len); + netdev_dbg(skb->dev, "tcp_udp_hdrlen=%u\n", tx_pkt_cx->tcp_udp_hdrlen); + netdev_dbg(skb->dev, "total_hdrlen =%u\n", tx_pkt_cx->total_hdrlen); + + return ret; +} + /** * ether_tx_swcx_alloc - Tx ring software context allocation. * @dev: device instance associated with driver. @@ -1020,16 +1073,39 @@ static int ether_tx_swcx_alloc(struct device *dev, struct osi_tx_pkt_cx *tx_pkt_cx = &tx_ring->tx_pkt_cx; unsigned int cur_tx_idx = tx_ring->cur_tx_idx; struct osi_tx_swcx *tx_swcx = NULL; - unsigned int len = 0; - int cnt = 0; + unsigned int len = 0, offset = 0, size = 0; + int cnt = 0, ret = 0, i, num_frags; + struct skb_frag_struct *frag; + unsigned int page_idx, page_offset; + unsigned int max_data_len_per_txd = (unsigned int) + ETHER_MAX_DATA_LEN_PER_TXD_BUF; // 4KB memset(tx_pkt_cx, 0, sizeof(*tx_pkt_cx)); + ret = ether_handle_tso(tx_pkt_cx, skb); + if (unlikely(ret < 0)) { + dev_err(dev, "Unable to handle TSO packet (%d)\n", ret); + /* Caller will take care of consuming skb */ + return ret; + } + + if (ret == 0) { + dev_dbg(dev, "Not a TSO packet\n"); + if (skb->ip_summed == CHECKSUM_PARTIAL) { + tx_pkt_cx->flags |= OSI_PKT_CX_CSUM; + } + } else { + tx_pkt_cx->flags |= OSI_PKT_CX_TSO; + } + if (unlikely(skb_vlan_tag_present(skb))) { tx_pkt_cx->vtag_id = skb_vlan_tag_get(skb); tx_pkt_cx->vtag_id |= (skb->priority << VLAN_PRIO_SHIFT); - tx_pkt_cx->flags = OSI_PKT_CX_VLAN; + tx_pkt_cx->flags |= OSI_PKT_CX_VLAN; + } + if (((tx_pkt_cx->flags & OSI_PKT_CX_VLAN) == OSI_PKT_CX_VLAN) || + ((tx_pkt_cx->flags & OSI_PKT_CX_TSO) == OSI_PKT_CX_TSO)) { tx_swcx = tx_ring->tx_swcx + cur_tx_idx; if (tx_swcx->len) { return 0; @@ -1040,28 +1116,142 @@ static int ether_tx_swcx_alloc(struct device *dev, INCR_TX_DESC_INDEX(cur_tx_idx, 1U); } - len = skb_headlen(skb); - - tx_swcx = tx_ring->tx_swcx + cur_tx_idx; - if (tx_swcx->len) { - return 0; + if ((tx_pkt_cx->flags & OSI_PKT_CX_TSO) == OSI_PKT_CX_TSO) { + /* For TSO map the header in separate desc. */ + len = tx_pkt_cx->total_hdrlen; + } else { + len = skb_headlen(skb); } - tx_swcx->buf_phy_addr = dma_map_single(dev, skb->data, - len, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(dev, tx_swcx->buf_phy_addr))) { - dev_err(dev, "failed to map Tx buffer\n"); - return -ENOMEM; + /* Map the linear buffers from the skb first. + * For TSO only upto TCP header is filled in first desc. + */ + while (len) { + tx_swcx = tx_ring->tx_swcx + cur_tx_idx; + if (unlikely(tx_swcx->len)) { + goto desc_not_free; + } + + size = min(len, max_data_len_per_txd); + + tx_swcx->buf_phy_addr = dma_map_single(dev, + skb->data + offset, + size, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev, + tx_swcx->buf_phy_addr))) { + dev_err(dev, "failed to map Tx buffer\n"); + ret = -ENOMEM; + goto dma_map_failed; + } + tx_swcx->is_paged_buf = 0; + + tx_swcx->len = size; + len -= size; + offset += size; + cnt++; + INCR_TX_DESC_INDEX(cur_tx_idx, 1U); + } + + /* Map remaining payload from linear buffer + * to subsequent descriptors in case of TSO + */ + if ((tx_pkt_cx->flags & OSI_PKT_CX_TSO) == OSI_PKT_CX_TSO) { + len = skb_headlen(skb) - tx_pkt_cx->total_hdrlen; + while (len) { + tx_swcx = tx_ring->tx_swcx + cur_tx_idx; + + if (unlikely(tx_swcx->len)) { + goto desc_not_free; + } + + size = min(len, max_data_len_per_txd); + tx_swcx->buf_phy_addr = dma_map_single(dev, + skb->data + offset, + size, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev, + tx_swcx->buf_phy_addr))) { + dev_err(dev, "failed to map Tx buffer\n"); + ret = -ENOMEM; + goto dma_map_failed; + } + + tx_swcx->is_paged_buf = 0; + tx_swcx->len = size; + len -= size; + offset += size; + cnt++; + INCR_TX_DESC_INDEX(cur_tx_idx, 1U); + } + } + + /* Process fragmented skb's */ + num_frags = skb_shinfo(skb)->nr_frags; + for (i = 0; i < num_frags; i++) { + offset = 0; + frag = &skb_shinfo(skb)->frags[i]; + len = frag->size; + while (len) { + tx_swcx = tx_ring->tx_swcx + cur_tx_idx; + + if (unlikely(tx_swcx->len)) { + goto desc_not_free; + } + + size = min(len, max_data_len_per_txd); + + page_idx = (frag->page_offset + offset) >> PAGE_SHIFT; + page_offset = (frag->page_offset + offset) & ~PAGE_MASK; + tx_swcx->buf_phy_addr = dma_map_page(dev, + (frag->page.p + page_idx), + page_offset, size, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev, + tx_swcx->buf_phy_addr))) { + dev_err(dev, "failed to map Tx buffer\n"); + ret = -ENOMEM; + goto dma_map_failed; + } + tx_swcx->is_paged_buf = 1; + + tx_swcx->len = size; + len -= size; + offset += size; + cnt++; + INCR_TX_DESC_INDEX(cur_tx_idx, 1U); + } } - tx_swcx->len = len; tx_swcx->buf_virt_addr = skb; - - cnt++; - tx_pkt_cx->desc_cnt = cnt; return cnt; + +desc_not_free: + ret = 0; + +dma_map_failed: + /* Failed to fill current desc. Rollback previous desc's */ + while (cnt) { + DECR_TX_DESC_INDEX(cur_tx_idx, 1U); + tx_swcx = tx_ring->tx_swcx + cur_tx_idx; + if (tx_swcx->buf_phy_addr) { + if (tx_swcx->is_paged_buf) { + dma_unmap_page(dev, tx_swcx->buf_phy_addr, + tx_swcx->len, DMA_TO_DEVICE); + } else { + dma_unmap_single(dev, tx_swcx->buf_phy_addr, + tx_swcx->len, DMA_TO_DEVICE); + } + tx_swcx->buf_phy_addr = 0; + } + tx_swcx->len = 0; + + tx_swcx->is_paged_buf = 0; + cnt--; + } + return ret; } /** @@ -1129,8 +1319,8 @@ static int ether_start_xmit(struct sk_buff *skb, struct net_device *ndev) count = ether_tx_swcx_alloc(pdata->dev, tx_ring, skb); if (count <= 0) { if (count == 0) { - netif_stop_queue(ndev); - netdev_err(ndev, "Tx ring is full\n"); + netif_stop_subqueue(ndev, chan); + netdev_err(ndev, "Tx ring[%d] is full\n", chan); return NETDEV_TX_BUSY; } dev_kfree_skb_any(skb); @@ -1139,6 +1329,11 @@ static int ether_start_xmit(struct sk_buff *skb, struct net_device *ndev) osi_hw_transmit(osi_dma, chan); + if (ether_avail_txdesc_cnt(tx_ring) < TX_DESC_THRESHOLD) { + netif_stop_subqueue(ndev, chan); + netdev_dbg(ndev, "Tx ring[%d] insufficient desc.\n", chan); + } + return NETDEV_TX_OK; } @@ -1162,15 +1357,21 @@ static int ether_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { int ret = -EOPNOTSUPP; - if (!netif_running(dev)) + if (!dev || !rq) { return -EINVAL; + } + + if (!netif_running(dev)) { + return -EINVAL; + } switch (cmd) { case SIOCGMIIPHY: case SIOCGMIIREG: case SIOCSMIIREG: - if (!dev->phydev) + if (!dev->phydev) { return -EINVAL; + } /* generic PHY MII ioctl interface */ ret = phy_mii_ioctl(dev->phydev, rq, cmd); @@ -1181,6 +1382,8 @@ static int ether_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) break; default: + netdev_err(dev, "%s: Unsupported ioctl %d\n", + __func__, cmd); break; } @@ -1255,6 +1458,55 @@ static int ether_change_mtu(struct net_device *ndev, int new_mtu) return 0; } +/** + * ether_set_features - Change HW features for the given ndev + * @ndev: Network device structure + * @feat: New features to be updated + * + * Algorithm: + * 1) Check if HW supports feature requested to be changed + * 2) If supported, check the current status of the feature and if it + * needs to be toggled, do so. + * + * Dependencies: Ethernet interface needs to be up. Stack will enforce + * the check. + * + * Protection: None. + * + * Return: 0 - success, negative value - failure. + */ +static int ether_set_features(struct net_device *ndev, netdev_features_t feat) +{ + int ret = 0; + struct ether_priv_data *pdata = netdev_priv(ndev); + struct osi_core_priv_data *osi_core = pdata->osi_core; + netdev_features_t hw_feat_cur_state = pdata->hw_feat_cur_state; + + if (pdata->hw_feat.rx_coe_sel == 0U) { + return ret; + } + + if ((feat & NETIF_F_RXCSUM) == NETIF_F_RXCSUM) { + if (!(hw_feat_cur_state & NETIF_F_RXCSUM)) { + ret = osi_config_rxcsum_offload(osi_core, + OSI_ENABLE); + dev_info(pdata->dev, "Rx Csum offload: Enable: %s\n", + ret ? "Failed" : "Success"); + pdata->hw_feat_cur_state |= NETIF_F_RXCSUM; + } + } else { + if ((hw_feat_cur_state & NETIF_F_RXCSUM)) { + ret = osi_config_rxcsum_offload(osi_core, + OSI_DISABLE); + dev_info(pdata->dev, "Rx Csum offload: Disable: %s\n", + ret ? "Failed" : "Success"); + pdata->hw_feat_cur_state &= ~NETIF_F_RXCSUM; + } + } + + return ret; +} + static const struct net_device_ops ether_netdev_ops = { .ndo_open = ether_open, .ndo_stop = ether_close, @@ -1263,6 +1515,7 @@ static const struct net_device_ops ether_netdev_ops = { .ndo_set_mac_address = ether_set_mac_addr, .ndo_change_mtu = ether_change_mtu, .ndo_select_queue = ether_select_queue, + .ndo_set_features = ether_set_features, }; /** @@ -2234,6 +2487,62 @@ static int ether_set_dma_mask(struct ether_priv_data *pdata) return ret; } +/** ether_set_ndev_features - Set the network device feature flags + * @ndev: Network device instance + * @pdata: OS dependent private data structure. + * + * Algorithm: + * 1) Check the HW features supported + * 2) Enable corresponding feature flag so that network subsystem of OS + * is aware of device capabilities. + * 3) Update current enable/disable state of features currently enabled + * + * Dependencies: Netdev allocated and HW features are already parsed. + * + * Protection: None + * + * Return: None. + */ +static void ether_set_ndev_features(struct net_device *ndev, + struct ether_priv_data *pdata) +{ + netdev_features_t features = 0; + + if (pdata->hw_feat.tso_en) { + features |= NETIF_F_TSO; + features |= NETIF_F_SG; + } + + if (pdata->hw_feat.tx_coe_sel) { + features |= NETIF_F_IP_CSUM; + features |= NETIF_F_IPV6_CSUM; + } + + if (pdata->hw_feat.rx_coe_sel) { + features |= NETIF_F_RXCSUM; + } + + /* GRO is independent of HW features */ + features |= NETIF_F_GRO; + + if (pdata->hw_feat.sa_vlan_ins) { + features |= NETIF_F_HW_VLAN_CTAG_TX; + } + + /* Rx VLAN tag detection enabled by default */ + features |= NETIF_F_HW_VLAN_CTAG_RX; + + /* Features available in HW */ + ndev->hw_features = features; + /* Features that can be changed by user */ + ndev->features = features; + /* Features that can be inherited by vlan devices */ + ndev->vlan_features = features; + + /* Set current state of features enabled by default in HW */ + pdata->hw_feat_cur_state = features; +} + /** * ether_probe - Ethernet platform driver probe. * @pdev: platform device associated with platform driver. @@ -2333,6 +2642,9 @@ static int ether_probe(struct platform_device *pdev) goto err_dma_mask; } + /* Set netdev features based on hw features */ + ether_set_ndev_features(ndev, pdata); + ret = osi_get_mac_version(osi_core->base, &osi_core->mac_ver); if (ret < 0) { dev_err(&pdev->dev, "failed to get MAC version (%u)\n", @@ -2355,13 +2667,6 @@ static int ether_probe(struct platform_device *pdev) ndev->netdev_ops = ðer_netdev_ops; ether_set_ethtool_ops(ndev); - ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; - if (pdata->hw_feat.sa_vlan_ins) { - ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX; - } - - ndev->features |= ndev->hw_features; - ret = ether_alloc_napi(pdata); if (ret < 0) { dev_err(&pdev->dev, "failed to allocate NAPI\n"); diff --git a/drivers/net/ethernet/nvidia/nvethernet/ether_linux.h b/drivers/net/ethernet/nvidia/nvethernet/ether_linux.h index 0747ef65..a30add26 100644 --- a/drivers/net/ethernet/nvidia/nvethernet/ether_linux.h +++ b/drivers/net/ethernet/nvidia/nvethernet/ether_linux.h @@ -29,6 +29,8 @@ #include #include #include +#include +#include #include #include @@ -41,6 +43,34 @@ #define ETHER_QUEUE_PRIO_DEFAULT 0U #define ETHER_QUEUE_PRIO_MAX 7U +/* Map max. 4KB buffer per Tx descriptor */ +#define ETHER_MAX_DATA_LEN_PER_TXD_BUF BIT(12) + +/* Incase of TSO/GSO, Tx ring needs atmost MAX_SKB_FRAGS + + * one context descriptor + + * one descriptor for header/linear buffer payload + */ +#define TX_DESC_THRESHOLD (MAX_SKB_FRAGS + 2) + +/** + * ether_avail_txdesc_count - Return count of available tx desc. + * @tx_ring: Tx ring instance associated with channel number + * + * Algorithm: Check the difference between current desc index + * and the desc. index to be cleaned. + * + * Dependencies: MAC needs to be initialized and Tx ring allocated. + * + * Protection: None. + * + * Return: Number of available descriptors in the given Tx ring. + */ +static inline int ether_avail_txdesc_cnt(struct osi_tx_ring *tx_ring) +{ + return ((tx_ring->clean_idx - tx_ring->cur_tx_idx - 1) & + (TX_DESC_CNT - 1)); +} + /** * struct ether_tx_napi - DMA Transmit Channel NAPI * @chan: Transmit channel number @@ -100,6 +130,7 @@ struct ether_rx_napi { * @dma_mask: memory allocation mask * @mac_loopback_mode: MAC loopback mode * @q_prio: Array of MTL queue TX priority + * @hw_feat_cur_state: Current state of features enabled in HW */ struct ether_priv_data { struct osi_core_priv_data *osi_core; @@ -138,6 +169,7 @@ struct ether_priv_data { int tx_irqs[ETHER_MAX_IRQS]; int rx_irqs[ETHER_MAX_IRQS]; unsigned long long dma_mask; + netdev_features_t hw_feat_cur_state; /* for MAC loopback */ unsigned int mac_loopback_mode; diff --git a/drivers/net/ethernet/nvidia/nvethernet/ioctl.c b/drivers/net/ethernet/nvidia/nvethernet/ioctl.c index fda46e82..9947c92d 100644 --- a/drivers/net/ethernet/nvidia/nvethernet/ioctl.c +++ b/drivers/net/ethernet/nvidia/nvethernet/ioctl.c @@ -109,6 +109,59 @@ static int ether_get_avb_algo(struct net_device *ndev, return ret; } +/* + * ether_config_arp_offload - Handle ioctl to enable/disable ARP offload + * @pdata: OS dependent private data structure. + * @ifrd_p: Interface request private data pointer. + * + * Algorithm: + * 1) Copy the priv data from user space. This includes the IP address + * to be updated in HW. + * 2) Check if IP address provided in priv data is valid. + * 3) If IP address is valid, invoke OSI API to update HW registers. + * + * Dependencies: Interface should be running (enforced by caller). + * + * Protection: None. + * + * Return: 0 - success, -ve value - failure + */ +static int ether_config_arp_offload(struct ether_priv_data *pdata, + struct ether_ifr_data *ifrd_p) +{ + int i, ret = -EINVAL; + struct arp_offload_param param; + /* TODO: Need Spin lock to prevent multiple apps from + * requesting same ioctls to the same MAC instance + */ + if (!ifrd_p->ptr) { + dev_err(pdata->dev, "%s: Invalid data for priv ioctl %d\n", + __func__, ifrd_p->ifcmd); + return ret; + } + + if (copy_from_user(¶m, (struct arp_offload_param *)ifrd_p->ptr, + sizeof(struct arp_offload_param))) { + dev_err(pdata->dev, "%s: copy_from_user failed\n", __func__); + return ret; + } + + for (i = 0; i < NUM_BYTES_IN_IPADDR; i++) { + if (param.ip_addr[i] > MAX_IP_ADDR_BYTE) { + dev_err(pdata->dev, "%s: Invalid IP addr\n", __func__); + return ret; + } + } + + ret = osi_config_arp_offload(pdata->osi_core, ifrd_p->if_flags, + param.ip_addr); + dev_err(pdata->dev, "ARP offload: %s : %s\n", + ifrd_p->if_flags ? "Enable" : "Disable", + ret ? "Failed" : "Success"); + + return ret; +} + /** * ether_priv_ioctl - Handle private IOCTLs * @ndev: network device structure @@ -146,6 +199,9 @@ int ether_handle_priv_ioctl(struct net_device *ndev, case ETHER_GET_AVB_ALGORITHM: ret = ether_get_avb_algo(ndev, &ifdata); break; + case ETHER_CONFIG_ARP_OFFLOAD: + ret = ether_config_arp_offload(pdata, &ifdata); + break; default: break; } diff --git a/drivers/net/ethernet/nvidia/nvethernet/ioctl.h b/drivers/net/ethernet/nvidia/nvethernet/ioctl.h index 93f0fb3c..a1b1443f 100644 --- a/drivers/net/ethernet/nvidia/nvethernet/ioctl.h +++ b/drivers/net/ethernet/nvidia/nvethernet/ioctl.h @@ -17,11 +17,15 @@ #ifndef IOCTL_H #define IOCTL_H +#define NUM_BYTES_IN_IPADDR 4 +#define MAX_IP_ADDR_BYTE 0xFF /* Remote wakeup filter */ #define EQOS_RWK_FILTER_LENGTH 8 + /* private ioctl number*/ #define ETHER_AVB_ALGORITHM 27 #define ETHER_GET_AVB_ALGORITHM 46 +#define ETHER_CONFIG_ARP_OFFLOAD 36 /** * struct ether_ifr_data - Private data of struct ifreq @@ -57,6 +61,19 @@ struct ether_ifr_data { void *ptr; }; +/** + * struct arp_offload_param - Parameter to support ARP offload. + * @ip_addr: Byte array for decimal representation of IP address. + * For example, 192.168.1.3 is represented as + * ip_addr[0] = '192' + * ip_addr[1] = '168' + * ip_addr[2] = '1' + * ip_addr[3] = '3' + */ +struct arp_offload_param { + unsigned char ip_addr[NUM_BYTES_IN_IPADDR]; +}; + /* Private ioctl handler function */ int ether_handle_priv_ioctl(struct net_device *ndev, struct ifreq *ifr); diff --git a/drivers/net/ethernet/nvidia/nvethernet/osd.c b/drivers/net/ethernet/nvidia/nvethernet/osd.c index fc2696ba..34bc8c42 100644 --- a/drivers/net/ethernet/nvidia/nvethernet/osd.c +++ b/drivers/net/ethernet/nvidia/nvethernet/osd.c @@ -215,6 +215,7 @@ void osd_receive_packet(void *priv, void *rxring, unsigned int chan, unsigned int dma_buf_len, void *rxpkt_cx) { struct ether_priv_data *pdata = (struct ether_priv_data *)priv; + struct ether_rx_napi *rx_napi = pdata->rx_napi[chan]; struct osi_rx_ring *rx_ring = (struct osi_rx_ring *)rxring; struct osi_rx_swcx *rx_swcx = rx_ring->rx_swcx + rx_ring->cur_rx_idx; struct osi_rx_pkt_cx *rx_pkt_cx = (struct osi_rx_pkt_cx *)rxpkt_cx; @@ -230,6 +231,12 @@ void osd_receive_packet(void *priv, void *rxring, unsigned int chan, OSI_PKT_CX_VALID)) { skb_put(skb, rx_pkt_cx->pkt_len); + if (likely(rx_pkt_cx->rxcsum == OSI_CHECKSUM_UNNECESSARY)) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + } else { + skb->ip_summed = CHECKSUM_NONE; + } + if ((rx_pkt_cx->flags & OSI_PKT_CX_VLAN) == OSI_PKT_CX_VLAN) { __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rx_pkt_cx->vlan_tag); @@ -238,7 +245,11 @@ void osd_receive_packet(void *priv, void *rxring, unsigned int chan, skb->dev = ndev; skb->protocol = eth_type_trans(skb, ndev); ndev->stats.rx_bytes += skb->len; - netif_receive_skb(skb); + if (likely(ndev->features & NETIF_F_GRO)) { + napi_gro_receive(&rx_napi->napi, skb); + } else { + netif_receive_skb(skb); + } } else { ndev->stats.rx_crc_errors = pkt_err_stat->rx_crc_error; ndev->stats.rx_errors++; @@ -260,7 +271,9 @@ void osd_receive_packet(void *priv, void *rxring, unsigned int chan, * @buffer: Buffer address to free. * @dmaaddr: DMA address to unmap. * @len: Length of data. - * @pkt_valid: Packet is valid or not + * @tx_done_pkt_cx: Pointer to struct which has tx done status info. + * This struct has flags to indicate tx error, whether DMA address + * is mapped from paged/linear buffer. * * Algorithm: * 1) Updates stats for linux network stack. @@ -274,20 +287,44 @@ void osd_receive_packet(void *priv, void *rxring, unsigned int chan, * Return: None. */ void osd_transmit_complete(void *priv, void *buffer, unsigned long dmaaddr, - unsigned int len, int pkt_valid) + unsigned int len, void *tx_done_pkt_cx) { + struct osi_txdone_pkt_cx *txdone_pkt_cx = (struct osi_txdone_pkt_cx *) + tx_done_pkt_cx; struct ether_priv_data *pdata = (struct ether_priv_data *)priv; + struct osi_dma_priv_data *osi_dma = pdata->osi_dma; struct sk_buff *skb = (struct sk_buff *)buffer; - struct net_device *ndev = pdata->ndev; dma_addr_t dma_addr = (dma_addr_t)dmaaddr; + struct net_device *ndev = pdata->ndev; + struct osi_tx_ring *tx_ring; + struct netdev_queue *txq; + unsigned int chan; ndev->stats.tx_packets++; ndev->stats.tx_bytes += len; - if (dma_addr) - dma_unmap_single(pdata->dev, dmaaddr, - len, DMA_TO_DEVICE); + if (dma_addr) { + if ((txdone_pkt_cx->flags & OSI_TXDONE_CX_PAGED_BUF) == + OSI_TXDONE_CX_PAGED_BUF) { + dma_unmap_page(pdata->dev, dmaaddr, + len, DMA_TO_DEVICE); + } else { + dma_unmap_single(pdata->dev, dmaaddr, + len, DMA_TO_DEVICE); + } + } + + if (skb) { + chan = skb_get_queue_mapping(skb); + tx_ring = osi_dma->tx_ring[chan]; + txq = netdev_get_tx_queue(ndev, chan); + + if (netif_tx_queue_stopped(txq) && + ether_avail_txdesc_cnt(tx_ring) >= TX_DESC_THRESHOLD) { + netif_tx_wake_queue(txq); + netdev_dbg(ndev, "Tx ring[%d] - waking Txq\n", chan); + } - if (skb) dev_consume_skb_any(skb); + } }