nvethernet: support for page pool

Add support for page pool and
enable it when CONFIG_PAGE_POOL
config got enabled.

Bug 200686236

Change-Id: I08efd68106182f65eac4e24b9f55baa22ce5968b
Signed-off-by: Bhadram Varka <vbhadram@nvidia.com>
Signed-off-by: Mohan Thadikamalla <mohant@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvidia/+/2532360
Reviewed-by: Narayan Reddy <narayanr@nvidia.com>
This commit is contained in:
Bhadram Varka
2020-12-29 21:14:54 +05:30
committed by Revanth Kumar Uppala
parent b55cba3cf7
commit a79b1f58c0
3 changed files with 150 additions and 13 deletions

View File

@@ -1348,11 +1348,14 @@ static void ether_napi_enable(struct ether_priv_data *pdata)
* @brief Free receive skbs * @brief Free receive skbs
* *
* @param[in] rx_swcx: Rx pkt SW context * @param[in] rx_swcx: Rx pkt SW context
* @param[in] dev: device instance associated with driver. * @param[in] pdata: Ethernet private data
* @param[in] rx_buf_len: Receive buffer length * @param[in] rx_buf_len: Receive buffer length
* @param[in] resv_buf_virt_addr: Reservered virtual buffer
*/ */
static void ether_free_rx_skbs(struct osi_rx_swcx *rx_swcx, struct device *dev, static void ether_free_rx_skbs(struct osi_rx_swcx *rx_swcx,
unsigned int rx_buf_len, void *resv_buf_virt_addr) struct ether_priv_data *pdata,
unsigned int rx_buf_len,
void *resv_buf_virt_addr)
{ {
struct osi_rx_swcx *prx_swcx = NULL; struct osi_rx_swcx *prx_swcx = NULL;
unsigned int i; unsigned int i;
@@ -1362,9 +1365,16 @@ static void ether_free_rx_skbs(struct osi_rx_swcx *rx_swcx, struct device *dev,
if (prx_swcx->buf_virt_addr != NULL) { if (prx_swcx->buf_virt_addr != NULL) {
if (resv_buf_virt_addr != prx_swcx->buf_virt_addr) { if (resv_buf_virt_addr != prx_swcx->buf_virt_addr) {
dma_unmap_single(dev, prx_swcx->buf_phy_addr, #ifdef ETHER_PAGE_POOL
page_pool_put_full_page(pdata->page_pool,
prx_swcx->buf_virt_addr,
false);
#else
dma_unmap_single(pdata->dev,
prx_swcx->buf_phy_addr,
rx_buf_len, DMA_FROM_DEVICE); rx_buf_len, DMA_FROM_DEVICE);
dev_kfree_skb_any(prx_swcx->buf_virt_addr); dev_kfree_skb_any(prx_swcx->buf_virt_addr);
#endif
} }
prx_swcx->buf_virt_addr = NULL; prx_swcx->buf_virt_addr = NULL;
prx_swcx->buf_phy_addr = 0; prx_swcx->buf_phy_addr = 0;
@@ -1378,10 +1388,10 @@ static void ether_free_rx_skbs(struct osi_rx_swcx *rx_swcx, struct device *dev,
* allocated_rx_dma_ring() API. * allocated_rx_dma_ring() API.
* *
* @param[in] osi_dma: OSI DMA private data structure. * @param[in] osi_dma: OSI DMA private data structure.
* @param[in] dev: device instance associated with driver. * @param[in] pdata: Ethernet private data.
*/ */
static void free_rx_dma_resources(struct osi_dma_priv_data *osi_dma, static void free_rx_dma_resources(struct osi_dma_priv_data *osi_dma,
struct device *dev) struct ether_priv_data *pdata)
{ {
unsigned long rx_desc_size = sizeof(struct osi_rx_desc) * RX_DESC_CNT; unsigned long rx_desc_size = sizeof(struct osi_rx_desc) * RX_DESC_CNT;
struct osi_rx_ring *rx_ring = NULL; struct osi_rx_ring *rx_ring = NULL;
@@ -1392,14 +1402,14 @@ static void free_rx_dma_resources(struct osi_dma_priv_data *osi_dma,
if (rx_ring != NULL) { if (rx_ring != NULL) {
if (rx_ring->rx_swcx != NULL) { if (rx_ring->rx_swcx != NULL) {
ether_free_rx_skbs(rx_ring->rx_swcx, dev, ether_free_rx_skbs(rx_ring->rx_swcx, pdata,
osi_dma->rx_buf_len, osi_dma->rx_buf_len,
osi_dma->resv_buf_virt_addr); osi_dma->resv_buf_virt_addr);
kfree(rx_ring->rx_swcx); kfree(rx_ring->rx_swcx);
} }
if (rx_ring->rx_desc != NULL) { if (rx_ring->rx_desc != NULL) {
dma_free_coherent(dev, rx_desc_size, dma_free_coherent(pdata->dev, rx_desc_size,
rx_ring->rx_desc, rx_ring->rx_desc,
rx_ring->rx_desc_phy_addr); rx_ring->rx_desc_phy_addr);
} }
@@ -1408,6 +1418,12 @@ static void free_rx_dma_resources(struct osi_dma_priv_data *osi_dma,
rx_ring = NULL; rx_ring = NULL;
} }
} }
#ifdef ETHER_PAGE_POOL
if (pdata->page_pool) {
page_pool_destroy(pdata->page_pool);
pdata->page_pool = NULL;
}
#endif
} }
/** /**
@@ -1486,16 +1502,33 @@ err_rx_desc:
static int ether_allocate_rx_buffers(struct ether_priv_data *pdata, static int ether_allocate_rx_buffers(struct ether_priv_data *pdata,
struct osi_rx_ring *rx_ring) struct osi_rx_ring *rx_ring)
{ {
#ifndef ETHER_PAGE_POOL
unsigned int rx_buf_len = pdata->osi_dma->rx_buf_len; unsigned int rx_buf_len = pdata->osi_dma->rx_buf_len;
#endif
struct osi_rx_swcx *rx_swcx = NULL; struct osi_rx_swcx *rx_swcx = NULL;
unsigned int i = 0; unsigned int i = 0;
for (i = 0; i < RX_DESC_CNT; i++) { for (i = 0; i < RX_DESC_CNT; i++) {
#ifndef ETHER_PAGE_POOL
struct sk_buff *skb = NULL; struct sk_buff *skb = NULL;
#else
struct page *page = NULL;
#endif
dma_addr_t dma_addr = 0; dma_addr_t dma_addr = 0;
rx_swcx = rx_ring->rx_swcx + i; rx_swcx = rx_ring->rx_swcx + i;
#ifdef ETHER_PAGE_POOL
page = page_pool_dev_alloc_pages(pdata->page_pool);
if (!page) {
dev_err(pdata->dev,
"failed to allocate page pool buffer");
return -ENOMEM;
}
dma_addr = page_pool_get_dma_addr(page);
rx_swcx->buf_virt_addr = page;
#else
skb = __netdev_alloc_skb_ip_align(pdata->ndev, rx_buf_len, skb = __netdev_alloc_skb_ip_align(pdata->ndev, rx_buf_len,
GFP_KERNEL); GFP_KERNEL);
if (unlikely(skb == NULL)) { if (unlikely(skb == NULL)) {
@@ -1512,12 +1545,50 @@ static int ether_allocate_rx_buffers(struct ether_priv_data *pdata,
} }
rx_swcx->buf_virt_addr = skb; rx_swcx->buf_virt_addr = skb;
#endif
rx_swcx->buf_phy_addr = dma_addr; rx_swcx->buf_phy_addr = dma_addr;
} }
return 0; return 0;
} }
#ifdef ETHER_PAGE_POOL
/**
* @brief Create Rx buffer page pool
*
* Algorithm: Invokes page pool API to create Rx buffer pool.
*
* @param[in] pdata: OSD private data.
*
* @retval 0 on success
* @retval "negative value" on failure.
*/
static int ether_page_pool_create(struct ether_priv_data *pdata)
{
struct osi_dma_priv_data *osi_dma = pdata->osi_dma;
struct page_pool_params pp_params = { 0 };
unsigned int num_pages;
int ret = 0;
pp_params.flags = PP_FLAG_DMA_MAP;
pp_params.pool_size = osi_dma->rx_buf_len;
num_pages = DIV_ROUND_UP(osi_dma->rx_buf_len, PAGE_SIZE);
pp_params.order = ilog2(roundup_pow_of_two(num_pages));
pp_params.nid = dev_to_node(pdata->dev);
pp_params.dev = pdata->dev;
pp_params.dma_dir = DMA_FROM_DEVICE;
pdata->page_pool = page_pool_create(&pp_params);
if (IS_ERR(pdata->page_pool)) {
ret = PTR_ERR(pdata->page_pool);
pdata->page_pool = NULL;
return ret;
}
return ret;
}
#endif
/** /**
* @brief Allocate Receive DMA channel ring resources. * @brief Allocate Receive DMA channel ring resources.
* *
@@ -1543,6 +1614,14 @@ static int ether_allocate_rx_dma_resources(struct osi_dma_priv_data *osi_dma,
chan = osi_dma->dma_chans[i]; chan = osi_dma->dma_chans[i];
if (chan != OSI_INVALID_CHAN_NUM) { if (chan != OSI_INVALID_CHAN_NUM) {
#ifdef ETHER_PAGE_POOL
ret = ether_page_pool_create(pdata);
if (ret < 0) {
pr_err("%s(): failed to create page pool\n",
__func__);
goto exit;
}
#endif
ret = allocate_rx_dma_resource(osi_dma, pdata->dev, ret = allocate_rx_dma_resource(osi_dma, pdata->dev,
chan); chan);
if (ret != 0) { if (ret != 0) {
@@ -1559,7 +1638,7 @@ static int ether_allocate_rx_dma_resources(struct osi_dma_priv_data *osi_dma,
return 0; return 0;
exit: exit:
free_rx_dma_resources(osi_dma, pdata->dev); free_rx_dma_resources(osi_dma, pdata);
return ret; return ret;
} }
@@ -1744,7 +1823,7 @@ void free_dma_resources(struct ether_priv_data *pdata)
struct device *dev = pdata->dev; struct device *dev = pdata->dev;
free_tx_dma_resources(osi_dma, dev); free_tx_dma_resources(osi_dma, dev);
free_rx_dma_resources(osi_dma, dev); free_rx_dma_resources(osi_dma, pdata);
/* unmap reserved DMA*/ /* unmap reserved DMA*/
if (osi_dma->resv_buf_phy_addr) { if (osi_dma->resv_buf_phy_addr) {

View File

@@ -49,6 +49,12 @@
#else #else
#include <soc/tegra/fuse.h> #include <soc/tegra/fuse.h>
#endif #endif
#if IS_ENABLED(CONFIG_PAGE_POOL)
#if (KERNEL_VERSION(5, 10, 0) <= LINUX_VERSION_CODE)
#include <net/page_pool.h>
#define ETHER_PAGE_POOL
#endif
#endif
#include <osi_core.h> #include <osi_core.h>
#include <osi_dma.h> #include <osi_dma.h>
#include <mmc.h> #include <mmc.h>
@@ -446,6 +452,10 @@ struct ether_priv_data {
struct ether_ivc_ctxt ictxt; struct ether_ivc_ctxt ictxt;
/** VM channel info data associated with VM IRQ */ /** VM channel info data associated with VM IRQ */
struct ether_vm_irq_data *vm_irq_data; struct ether_vm_irq_data *vm_irq_data;
#ifdef ETHER_PAGE_POOL
/** Pointer to page pool */
struct page_pool *page_pool;
#endif
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
/** Debug fs directory pointer */ /** Debug fs directory pointer */
struct dentry *dbgfs_dir; struct dentry *dbgfs_dir;

View File

@@ -132,10 +132,11 @@ static inline int ether_alloc_skb(struct ether_priv_data *pdata,
unsigned int dma_rx_buf_len, unsigned int dma_rx_buf_len,
unsigned int chan) unsigned int chan)
{ {
#ifndef ETHER_PAGE_POOL
struct sk_buff *skb = NULL; struct sk_buff *skb = NULL;
dma_addr_t dma_addr; dma_addr_t dma_addr;
#endif
unsigned long val; unsigned long val;
if ((rx_swcx->flags & OSI_RX_SWCX_REUSE) == OSI_RX_SWCX_REUSE) { if ((rx_swcx->flags & OSI_RX_SWCX_REUSE) == OSI_RX_SWCX_REUSE) {
/* Skip buffer allocation and DMA mapping since /* Skip buffer allocation and DMA mapping since
* PTP software context will have valid buffer and * PTP software context will have valid buffer and
@@ -145,6 +146,7 @@ static inline int ether_alloc_skb(struct ether_priv_data *pdata,
return 0; return 0;
} }
#ifndef ETHER_PAGE_POOL
skb = netdev_alloc_skb_ip_align(pdata->ndev, dma_rx_buf_len); skb = netdev_alloc_skb_ip_align(pdata->ndev, dma_rx_buf_len);
if (unlikely(skb == NULL)) { if (unlikely(skb == NULL)) {
@@ -166,8 +168,26 @@ static inline int ether_alloc_skb(struct ether_priv_data *pdata,
return -ENOMEM; return -ENOMEM;
} }
#else
rx_swcx->buf_virt_addr = page_pool_dev_alloc_pages(pdata->page_pool);
if (!rx_swcx->buf_virt_addr) {
dev_err(pdata->dev,
"page pool allocation failed using resv_buf\n");
rx_swcx->buf_virt_addr = pdata->osi_dma->resv_buf_virt_addr;
rx_swcx->buf_phy_addr = pdata->osi_dma->resv_buf_phy_addr;
rx_swcx->flags |= OSI_RX_SWCX_BUF_VALID;
val = pdata->osi_core->xstats.re_alloc_rxbuf_failed[chan];
pdata->osi_core->xstats.re_alloc_rxbuf_failed[chan] =
osi_update_stats_counter(val, 1UL);
return 0;
}
rx_swcx->buf_phy_addr = page_pool_get_dma_addr(rx_swcx->buf_virt_addr);
#endif
#ifndef ETHER_PAGE_POOL
rx_swcx->buf_virt_addr = skb; rx_swcx->buf_virt_addr = skb;
rx_swcx->buf_phy_addr = dma_addr; rx_swcx->buf_phy_addr = dma_addr;
#endif
rx_swcx->flags |= OSI_RX_SWCX_BUF_VALID; rx_swcx->flags |= OSI_RX_SWCX_BUF_VALID;
return 0; return 0;
@@ -259,20 +279,45 @@ void osd_receive_packet(void *priv, struct osi_rx_ring *rx_ring,
struct ether_priv_data *pdata = (struct ether_priv_data *)priv; struct ether_priv_data *pdata = (struct ether_priv_data *)priv;
struct osi_core_priv_data *osi_core = pdata->osi_core; struct osi_core_priv_data *osi_core = pdata->osi_core;
struct ether_rx_napi *rx_napi = pdata->rx_napi[chan]; struct ether_rx_napi *rx_napi = pdata->rx_napi[chan];
#ifdef ETHER_PAGE_POOL
struct page *page = (struct page *)rx_swcx->buf_virt_addr;
struct sk_buff *skb = NULL;
#else
struct sk_buff *skb = (struct sk_buff *)rx_swcx->buf_virt_addr; struct sk_buff *skb = (struct sk_buff *)rx_swcx->buf_virt_addr;
#endif
dma_addr_t dma_addr = (dma_addr_t)rx_swcx->buf_phy_addr; dma_addr_t dma_addr = (dma_addr_t)rx_swcx->buf_phy_addr;
struct net_device *ndev = pdata->ndev; struct net_device *ndev = pdata->ndev;
struct osi_pkt_err_stats *pkt_err_stat = &pdata->osi_dma->pkt_err_stats; struct osi_pkt_err_stats *pkt_err_stat = &pdata->osi_dma->pkt_err_stats;
struct skb_shared_hwtstamps *shhwtstamp; struct skb_shared_hwtstamps *shhwtstamp;
unsigned long val; unsigned long val;
#ifndef ETHER_PAGE_POOL
dma_unmap_single(pdata->dev, dma_addr, dma_buf_len, DMA_FROM_DEVICE); dma_unmap_single(pdata->dev, dma_addr, dma_buf_len, DMA_FROM_DEVICE);
#endif
/* Process only the Valid packets */ /* Process only the Valid packets */
if (likely((rx_pkt_cx->flags & OSI_PKT_CX_VALID) == if (likely((rx_pkt_cx->flags & OSI_PKT_CX_VALID) ==
OSI_PKT_CX_VALID)) { OSI_PKT_CX_VALID)) {
skb_put(skb, rx_pkt_cx->pkt_len); #ifdef ETHER_PAGE_POOL
skb = netdev_alloc_skb_ip_align(pdata->ndev,
rx_pkt_cx->pkt_len);
if (unlikely(!skb)) {
pdata->ndev->stats.rx_dropped++;
dev_err(pdata->dev,
"%s(): Error in allocating the skb\n",
__func__);
page_pool_recycle_direct(pdata->page_pool, page);
return;
}
dma_sync_single_for_cpu(pdata->dev, dma_addr,
rx_pkt_cx->pkt_len, DMA_FROM_DEVICE);
skb_copy_to_linear_data(skb, page_address(page),
rx_pkt_cx->pkt_len);
skb_put(skb, rx_pkt_cx->pkt_len);
page_pool_recycle_direct(pdata->page_pool, page);
#else
skb_put(skb, rx_pkt_cx->pkt_len);
#endif
if (likely((rx_pkt_cx->rxcsum & OSI_CHECKSUM_UNNECESSARY) == if (likely((rx_pkt_cx->rxcsum & OSI_CHECKSUM_UNNECESSARY) ==
OSI_CHECKSUM_UNNECESSARY)) { OSI_CHECKSUM_UNNECESSARY)) {
skb->ip_summed = CHECKSUM_UNNECESSARY; skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -315,6 +360,9 @@ void osd_receive_packet(void *priv, struct osi_rx_ring *rx_ring,
ndev->stats.rx_frame_errors = pkt_err_stat->rx_frame_error; ndev->stats.rx_frame_errors = pkt_err_stat->rx_frame_error;
ndev->stats.rx_fifo_errors = osi_core->mmc.mmc_rx_fifo_overflow; ndev->stats.rx_fifo_errors = osi_core->mmc.mmc_rx_fifo_overflow;
ndev->stats.rx_errors++; ndev->stats.rx_errors++;
#ifdef ETHER_PAGE_POOL
page_pool_recycle_direct(pdata->page_pool, page);
#endif
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
} }