nvethernet: don't free/alloc descriptors in SC7

- dma_alloc_coharent is taking more time while
allocating the descriptors in resume path.
- This change will not free the DMA descriptor
- It only free DMA buffers in the Rx ring while suspend
and re-allocate the DMA bufeers in the Rx ring during
resume.

Bug 5094704

Change-Id: If52471d11dd300c324a9c9bc1723fcb3d3e51bf7
Signed-off-by: Bhadram Varka <vbhadram@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/3313272
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
GVS: buildbot_gerritrpt <buildbot_gerritrpt@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Reviewed-by: svcacv <svcacv@nvidia.com>
Reviewed-by: Srinivas Ramachandran <srinivasra@nvidia.com>
This commit is contained in:
Bhadram Varka
2025-03-05 08:01:52 +00:00
committed by Jon Hunter
parent 4c9bdb3715
commit b5a64188d3

View File

@@ -7671,6 +7671,51 @@ void ether_shutdown(struct platform_device *pdev)
} }
#ifdef CONFIG_PM #ifdef CONFIG_PM
static s32 ether_handle_rx_buffers(struct ether_priv_data *pdata,
uint32_t suspend)
{
const nveu32_t max_dma_chan[OSI_MAX_MAC_IP_TYPES] = {
OSI_EQOS_MAX_NUM_CHANS,
OSI_MGBE_T23X_MAX_NUM_CHANS,
OSI_MGBE_MAX_NUM_CHANS
};
struct osi_dma_priv_data *osi_dma = pdata->osi_dma;
struct osi_rx_ring *rx_ring = NULL;
u32 i, chan, clean_idx;
s32 ret = 0;
for (i = 0; i < max_dma_chan[osi_dma->mac]; i++) {
rx_ring = osi_dma->rx_ring[i];
chan = osi_dma->dma_chans[i];
if (!rx_ring || !rx_ring->rx_swcx)
continue;
if (suspend) {
ether_free_rx_skbs(rx_ring->rx_swcx, pdata,
osi_dma->rx_buf_len,
pdata->resv_buf_virt_addr, chan);
} else {
ret = ether_allocate_rx_buffers(pdata, rx_ring, chan);
if (ret < 0) {
/* Clean up already allocated buffers */
for (clean_idx = 0; clean_idx < i; clean_idx++) {
if (osi_dma->rx_ring[clean_idx] &&
osi_dma->rx_ring[clean_idx]->rx_swcx) {
ether_free_rx_skbs(osi_dma->rx_ring[clean_idx]->rx_swcx,
pdata, osi_dma->rx_buf_len,
pdata->resv_buf_virt_addr,
osi_dma->dma_chans[clean_idx]);
}
}
goto fail;
}
}
}
fail:
return ret;
}
/** /**
* @brief Ethernet platform driver resume call. * @brief Ethernet platform driver resume call.
* *
@@ -7718,7 +7763,7 @@ static int ether_resume(struct ether_priv_data *pdata)
} }
osi_set_rx_buf_len(osi_dma); osi_set_rx_buf_len(osi_dma);
ret = ether_allocate_dma_resources(pdata); ret = ether_handle_rx_buffers(pdata, OSI_DISABLE);
if (ret < 0) { if (ret < 0) {
dev_err(dev, "failed to allocate dma resources\n"); dev_err(dev, "failed to allocate dma resources\n");
return ret; return ret;
@@ -7821,7 +7866,8 @@ int ether_suspend_noirq(struct device *dev)
OSI_DMA_INTR_DISABLE); OSI_DMA_INTR_DISABLE);
} }
free_dma_resources(pdata); if (ether_handle_rx_buffers(pdata, OSI_ENABLE) != 0)
dev_err(dev, "Failed to free the Rx buffers\n");
if (osi_core->mac != OSI_MAC_HW_EQOS) if (osi_core->mac != OSI_MAC_HW_EQOS)
pm_runtime_put_sync(pdata->dev); pm_runtime_put_sync(pdata->dev);