Files
linux-nv-oot/drivers/net/ethernet/nvidia/nvethernet/osd.c
Bhadram Varka 58301a3ef2 nvethernet: Update comments with Doxygen style
replace kernel doc comments with Doxygen style comments

Bug 200512422

Change-Id: I1445cab3fb6708ddc21b4bfacebe213ed22f7aa2
Signed-off-by: Bhadram Varka <vbhadram@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2180213
Reviewed-by: Narayan Reddy <narayanr@nvidia.com>
Tested-by: Narayan Reddy <narayanr@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Srinivas Ramachandran <srinivasra@nvidia.com>
Reviewed-by: Bitan Biswas <bbiswas@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
2022-11-01 14:27:07 +05:30

322 lines
9.4 KiB
C

/*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "ether_linux.h"
#include <osd.h>
/**
* @brief Adds delay in micro seconds.
*
* Algorithm: Invokes OSD delay function for adding delay
*
* @param[in] usec: Delay number in micro seconds.
*/
void osd_udelay(unsigned long usec)
{
udelay(usec);
}
/**
* @brief Adds sleep in micro seconds
*
* Algorithm: Invokes OSD function to add sleep.
*
* @param[in] umin: Minimum sleep required in micro seconds.
* @param[in] umax: Maximum sleep required in micro seconds.
*/
void osd_usleep_range(unsigned long umin, unsigned long umax)
{
usleep_range(umin, umax);
}
/**
* @brief Adds sleep in milli seconds.
*
* Algorithm: Invokes OSD function to add sleep.
*
* @param[in] msec: Minimum sleep required in milli seconds.
*/
void osd_msleep(unsigned int msec)
{
msleep(msec);
}
/**
* @brief Prints log at INFO log level
*
* Algorithm: Invokes OSD function to prints the data in kernel log
*
* @param[in] priv: OSD private data
* @param[in] fmt: log format
*/
void osd_info(void *priv, const char *fmt, ...)
{
dev_info(((struct ether_priv_data *)priv)->dev, fmt);
}
/**
* @brief Prints log at ERR log level
*
* Algorithm: Invokes OSD function to prints the data in kernel log
*
* @param[in] priv: OSD private data
* @param[in] fmt: log format
*/
void osd_err(void *priv, const char *fmt, ...)
{
dev_err(((struct ether_priv_data *)priv)->dev, fmt);
}
/**
* @brief Allocate and DMA map Rx buffer.
*
* Algorithm: Allocate network buffer (skb) and map skb->data to
* DMA mappable address.
*
* @param[in] pdata: OSD private data structure.
* @param[in] rx_swcx: Rx ring software context.
* @param[in] dma_rx_buf_len: DMA Rx buffer length.
*
* @retval 0 on Sucess
* @retval ENOMEM on failure.
*/
static inline int ether_alloc_skb(struct ether_priv_data *pdata,
struct osi_rx_swcx *rx_swcx,
unsigned int dma_rx_buf_len)
{
struct sk_buff *skb = NULL;
dma_addr_t dma_addr;
skb = netdev_alloc_skb_ip_align(pdata->ndev, dma_rx_buf_len);
if (unlikely(skb == NULL)) {
dev_err(pdata->dev, "RX skb allocation failed\n");
return -ENOMEM;
}
dma_addr = dma_map_single(pdata->dev, skb->data, dma_rx_buf_len,
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(pdata->dev, dma_addr) != 0)) {
dev_err(pdata->dev, "RX skb dma map failed\n");
dev_kfree_skb_any(skb);
return -ENOMEM;
}
rx_swcx->buf_virt_addr = skb;
rx_swcx->buf_phy_addr = dma_addr;
return 0;
}
/**
* @brief Re-fill DMA channel Rx ring.
*
* Algorithm: Re-fill Rx DMA channel ring until dirty rx index is equal
* to current rx index.
* 1) Invokes OSD layer to allocate the buffer and map the buffer to DMA
* mappable address.
* 2) Fill Rx descriptors with required data.
* 3) Program DMA rx channel tail pointer.
*
* @param[in] pdata: OSD private data structure.
* @param[in] rx_ring: DMA channel Rx ring instance.
* @param[in] chan: DMA Rx channel number.
*/
static void ether_realloc_rx_skb(struct ether_priv_data *pdata,
struct osi_rx_ring *rx_ring,
unsigned int chan)
{
struct osi_dma_priv_data *osi_dma = pdata->osi_dma;
struct osi_core_priv_data *osi_core = pdata->osi_core;
struct osi_rx_swcx *rx_swcx = NULL;
struct osi_rx_desc *rx_desc = NULL;
unsigned long val;
int ret = 0;
while (rx_ring->refill_idx != rx_ring->cur_rx_idx) {
rx_swcx = rx_ring->rx_swcx + rx_ring->refill_idx;
rx_desc = rx_ring->rx_desc + rx_ring->refill_idx;
ret = ether_alloc_skb(pdata, rx_swcx, osi_dma->rx_buf_len);
if (ret < 0) {
val = osi_core->xstats.re_alloc_rxbuf_failed[chan];
osi_core->xstats.re_alloc_rxbuf_failed[chan] =
osi_update_stats_counter(val, 1UL);
break;
}
osi_rx_dma_desc_init(rx_swcx, rx_desc, osi_dma->use_riwt);
INCR_RX_DESC_INDEX(rx_ring->refill_idx, 1U);
}
osi_update_rx_tailptr(osi_dma, rx_ring, chan);
}
/**
* @brief Handover received packet to network stack.
*
* Algorithm:
* 1) Unmap the DMA buffer address.
* 2) Updates socket buffer with len and ether type and handover to
* Linux network stack.
* 3) Refill the Rx ring based on threshold.
*
* @param[in] priv: OSD private data structure.
* @param[in] rxring: Pointer to DMA channel Rx ring.
* @param[in] chan: DMA Rx channel number.
* @param[in] dma_buf_len: Rx DMA buffer length.
* @param[in] rxpkt_cx: Received packet context.
* @param[in] rx_pkt_swcx: Received packet sw context.
*
* @note Rx completion need to make sure that Rx descriptors processed properly.
*/
void osd_receive_packet(void *priv, void *rxring, unsigned int chan,
unsigned int dma_buf_len, void *rxpkt_cx,
void *rx_pkt_swcx)
{
struct ether_priv_data *pdata = (struct ether_priv_data *)priv;
struct ether_rx_napi *rx_napi = pdata->rx_napi[chan];
struct osi_rx_ring *rx_ring = (struct osi_rx_ring *)rxring;
struct osi_rx_swcx *rx_swcx = (struct osi_rx_swcx *)rx_pkt_swcx;
struct osi_rx_pkt_cx *rx_pkt_cx = (struct osi_rx_pkt_cx *)rxpkt_cx;
struct sk_buff *skb = (struct sk_buff *)rx_swcx->buf_virt_addr;
dma_addr_t dma_addr = (dma_addr_t)rx_swcx->buf_phy_addr;
struct net_device *ndev = pdata->ndev;
struct osi_pkt_err_stats *pkt_err_stat = &pdata->osi_dma->pkt_err_stats;
struct skb_shared_hwtstamps *shhwtstamp;
unsigned long val;
dma_unmap_single(pdata->dev, dma_addr, dma_buf_len, DMA_FROM_DEVICE);
/* Process only the Valid packets */
if (likely((rx_pkt_cx->flags & OSI_PKT_CX_VALID) ==
OSI_PKT_CX_VALID)) {
skb_put(skb, rx_pkt_cx->pkt_len);
if (likely(rx_pkt_cx->rxcsum == OSI_CHECKSUM_UNNECESSARY)) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
} else {
skb->ip_summed = CHECKSUM_NONE;
}
if ((rx_pkt_cx->flags & OSI_PKT_CX_VLAN) == OSI_PKT_CX_VLAN) {
val = pdata->osi_dma->dstats.rx_vlan_pkt_n;
pdata->osi_dma->dstats.rx_vlan_pkt_n =
osi_update_stats_counter(val, 1UL);
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
rx_pkt_cx->vlan_tag);
}
/* Handle time stamp */
if ((rx_pkt_cx->flags & OSI_PKT_CX_PTP) == OSI_PKT_CX_PTP) {
shhwtstamp = skb_hwtstamps(skb);
memset(shhwtstamp, 0,
sizeof(struct skb_shared_hwtstamps));
shhwtstamp->hwtstamp = ns_to_ktime(rx_pkt_cx->ns);
}
skb->dev = ndev;
skb->protocol = eth_type_trans(skb, ndev);
ndev->stats.rx_bytes += skb->len;
if (likely(ndev->features & NETIF_F_GRO)) {
napi_gro_receive(&rx_napi->napi, skb);
} else {
netif_receive_skb(skb);
}
} else {
ndev->stats.rx_crc_errors = pkt_err_stat->rx_crc_error;
ndev->stats.rx_errors++;
}
ndev->stats.rx_packets++;
rx_swcx->buf_virt_addr = NULL;
rx_swcx->buf_phy_addr = 0;
if (osi_get_refill_rx_desc_cnt(rx_ring) >= 16U)
ether_realloc_rx_skb(pdata, rx_ring, chan);
}
/**
* @brief osd_transmit_complete - Transmit completion routine.
*
* Algorithm:
* 1) Updates stats for linux network stack.
* 2) unmap and free the buffer DMA address and buffer.
* 3) Time stamp will be update to stack if available.
*
* @param[in] priv: OSD private data structure.
* @param[in] buffer: Buffer address to free.
* @param[in] dmaaddr: DMA address to unmap.
* @param[in] len: Length of data.
* @param[in] tx_done_pkt_cx: Pointer to struct which has tx done status info.
* This struct has flags to indicate tx error, whether DMA address
* is mapped from paged/linear buffer.
*
* @note Tx completion need to make sure that Tx descriptors processed properly.
*/
void osd_transmit_complete(void *priv, void *buffer, unsigned long dmaaddr,
unsigned int len, void *tx_done_pkt_cx)
{
struct osi_txdone_pkt_cx *txdone_pkt_cx = (struct osi_txdone_pkt_cx *)
tx_done_pkt_cx;
struct ether_priv_data *pdata = (struct ether_priv_data *)priv;
struct osi_dma_priv_data *osi_dma = pdata->osi_dma;
struct sk_buff *skb = (struct sk_buff *)buffer;
dma_addr_t dma_addr = (dma_addr_t)dmaaddr;
struct skb_shared_hwtstamps shhwtstamp;
struct net_device *ndev = pdata->ndev;
struct osi_tx_ring *tx_ring;
struct netdev_queue *txq;
unsigned int chan, qinx;
ndev->stats.tx_packets++;
ndev->stats.tx_bytes += len;
if ((txdone_pkt_cx->flags & OSI_TXDONE_CX_TS) == OSI_TXDONE_CX_TS) {
memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
shhwtstamp.hwtstamp = ns_to_ktime(txdone_pkt_cx->ns);
/* pass tstamp to stack */
skb_tstamp_tx(skb, &shhwtstamp);
}
if (dma_addr) {
if ((txdone_pkt_cx->flags & OSI_TXDONE_CX_PAGED_BUF) ==
OSI_TXDONE_CX_PAGED_BUF) {
dma_unmap_page(pdata->dev, dmaaddr,
len, DMA_TO_DEVICE);
} else {
dma_unmap_single(pdata->dev, dmaaddr,
len, DMA_TO_DEVICE);
}
}
if (skb) {
/* index in array, netdev_get_tx_queue use index to get
* network queue.
*/
qinx = skb_get_queue_mapping(skb);
chan = osi_dma->dma_chans[qinx];
tx_ring = osi_dma->tx_ring[chan];
txq = netdev_get_tx_queue(ndev, qinx);
if (netif_tx_queue_stopped(txq) &&
ether_avail_txdesc_cnt(tx_ring) >= TX_DESC_THRESHOLD) {
netif_tx_wake_queue(txq);
netdev_dbg(ndev, "Tx ring[%d] - waking Txq\n", chan);
}
dev_consume_skb_any(skb);
}
}