From c97c4d7421b014d036f9837ec3940ebc3d9bd9fd Mon Sep 17 00:00:00 2001 From: Rakesh Goyal Date: Wed, 6 Oct 2021 19:50:40 +0530 Subject: [PATCH] nvethernet: Handle error from osi_hw_transmit() issue: - handling error from OSI DMA in transmission. Fix: Free memory and drop skb for which osi_hw_transmit failed. Change-Id: I3e7776ee3a37144fba6103fe1fd2091f646c07a8 Signed-off-by: Rakesh Goyal Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvidia/+/2606423 Reviewed-by: Nagarjuna Kristam Reviewed-by: svcacv Reviewed-by: Bitan Biswas Reviewed-by: svc_kernel_abi Reviewed-by: mobile promotions GVS: Gerrit_Virtual_Submit Tested-by: mobile promotions --- .../ethernet/nvidia/nvethernet/ether_linux.c | 79 ++++++++++++++----- 1 file changed, 58 insertions(+), 21 deletions(-) diff --git a/drivers/net/ethernet/nvidia/nvethernet/ether_linux.c b/drivers/net/ethernet/nvidia/nvethernet/ether_linux.c index 2fda5926..b33b3434 100644 --- a/drivers/net/ethernet/nvidia/nvethernet/ether_linux.c +++ b/drivers/net/ethernet/nvidia/nvethernet/ether_linux.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -2984,6 +2984,48 @@ static int ether_handle_tso(struct osi_tx_pkt_cx *tx_pkt_cx, return 1; } +/** + * @brief Rollback previous descriptor if failure. + * + * Algorithm: + * - Go over all descriptor until count is 0 + * - Unmap physical address. + * - Reset length. + * - Reset flags. + * + * @param[in] pdata: OSD private data. + * @param[in] tx_ring: Tx ring instance associated with channel number. + * @param[in] cur_tx_idx: Local descriptor index + * @param[in] count: Number of descriptor filled + */ +static void ether_tx_swcx_rollback(struct ether_priv_data *pdata, + struct osi_tx_ring *tx_ring, + unsigned int cur_tx_idx, + unsigned int count) +{ + struct device *dev = pdata->dev; + struct osi_tx_swcx *tx_swcx = NULL; + + while (count > 0) { + DECR_TX_DESC_INDEX(cur_tx_idx, 1U); + tx_swcx = tx_ring->tx_swcx + cur_tx_idx; + if (tx_swcx->buf_phy_addr) { + if ((tx_swcx->flags & OSI_PKT_CX_PAGED_BUF) == + OSI_PKT_CX_PAGED_BUF) { + dma_unmap_page(dev, tx_swcx->buf_phy_addr, + tx_swcx->len, DMA_TO_DEVICE); + } else { + dma_unmap_single(dev, tx_swcx->buf_phy_addr, + tx_swcx->len, DMA_TO_DEVICE); + } + tx_swcx->buf_phy_addr = 0; + } + tx_swcx->len = 0; + tx_swcx->flags = 0; + count--; + } +} + /** * @brief Tx ring software context allocation. * @@ -3191,25 +3233,7 @@ desc_not_free: dma_map_failed: /* Failed to fill current desc. Rollback previous desc's */ - while (cnt > 0) { - DECR_TX_DESC_INDEX(cur_tx_idx, 1U); - tx_swcx = tx_ring->tx_swcx + cur_tx_idx; - if (tx_swcx->buf_phy_addr) { - if ((tx_swcx->flags & OSI_PKT_CX_PAGED_BUF) == - OSI_PKT_CX_PAGED_BUF) { - dma_unmap_page(dev, tx_swcx->buf_phy_addr, - tx_swcx->len, DMA_TO_DEVICE); - } else { - dma_unmap_single(dev, tx_swcx->buf_phy_addr, - tx_swcx->len, DMA_TO_DEVICE); - } - tx_swcx->buf_phy_addr = 0; - } - tx_swcx->len = 0; - - tx_swcx->flags &= ~OSI_PKT_CX_PAGED_BUF; - cnt--; - } + ether_tx_swcx_rollback(pdata, tx_ring, cur_tx_idx, cnt); return ret; } @@ -3281,7 +3305,11 @@ static int ether_start_xmit(struct sk_buff *skb, struct net_device *ndev) unsigned int qinx = skb_get_queue_mapping(skb); unsigned int chan = osi_dma->dma_chans[qinx]; struct osi_tx_ring *tx_ring = osi_dma->tx_ring[chan]; +#ifdef OSI_ERR_DEBUG + unsigned int cur_tx_idx = tx_ring->cur_tx_idx; +#endif int count = 0; + int ret; count = ether_tx_swcx_alloc(pdata, tx_ring, skb); if (count <= 0) { @@ -3294,7 +3322,16 @@ static int ether_start_xmit(struct sk_buff *skb, struct net_device *ndev) return NETDEV_TX_OK; } - osi_hw_transmit(osi_dma, chan); + ret = osi_hw_transmit(osi_dma, chan); +#ifdef OSI_ERR_DEBUG + if (ret < 0) { + INCR_TX_DESC_INDEX(cur_tx_idx, count); + ether_tx_swcx_rollback(pdata, tx_ring, cur_tx_idx, count); + netdev_err(ndev, "%s() dropping corrupted skb\n", __func__); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } +#endif if (ether_avail_txdesc_cnt(tx_ring) <= ETHER_TX_DESC_THRESHOLD) { netif_stop_subqueue(ndev, qinx);