mirror of
git://nv-tegra.nvidia.com/kernel/nvethernetrm.git
synced 2025-12-22 09:12:10 +03:00
osi: dma: Support for variable Tx/Rx ring length
o Adds support for variable Tx/Rx ring length
o Default ring size 1K used if ring size passed
from OSD is zero.
Bug 3489814
Change-Id: I9c57b8d9e0c424bf39633998e0845fc97975de8f
Signed-off-by: Bhadram Varka <vbhadram@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/kernel/nvethernetrm/+/2652960
(cherry picked from commit 1f4753bbb3)
Reviewed-on: https://git-master.nvidia.com/r/c/kernel/nvethernetrm/+/2786266
Tested-by: Pohsun Su <pohsuns@nvidia.com>
Reviewed-by: Pohsun Su <pohsuns@nvidia.com>
Reviewed-by: Narayan Reddy <narayanr@nvidia.com>
Reviewed-by: Amulya Yarlagadda <ayarlagadda@nvidia.com>
GVS: Gerrit_Virtual_Submit <buildbot_gerritrpt@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
f661fdd189
commit
d54e470beb
@@ -604,6 +604,10 @@ struct osi_dma_priv_data {
|
|||||||
#endif /* OSI_DEBUG */
|
#endif /* OSI_DEBUG */
|
||||||
/** Flag which checks is ethernet server enabled(1) or disabled(0) */
|
/** Flag which checks is ethernet server enabled(1) or disabled(0) */
|
||||||
nveu32_t is_ethernet_server;
|
nveu32_t is_ethernet_server;
|
||||||
|
/** DMA Tx channel ring size */
|
||||||
|
nveu32_t tx_ring_sz;
|
||||||
|
/** DMA Rx channel ring size */
|
||||||
|
nveu32_t rx_ring_sz;
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -909,7 +913,8 @@ nve32_t osi_stop_dma(struct osi_dma_priv_data *osi_dma, nveu32_t chan);
|
|||||||
*
|
*
|
||||||
* @retval "Number of available free descriptors."
|
* @retval "Number of available free descriptors."
|
||||||
*/
|
*/
|
||||||
nveu32_t osi_get_refill_rx_desc_cnt(struct osi_rx_ring *rx_ring);
|
nveu32_t osi_get_refill_rx_desc_cnt(struct osi_dma_priv_data *osi_dma,
|
||||||
|
unsigned int chan);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief osi_rx_dma_desc_init - DMA Rx descriptor init
|
* @brief osi_rx_dma_desc_init - DMA Rx descriptor init
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -29,8 +29,11 @@
|
|||||||
* @brief Helper macros for defining Tx/Rx descriptor count
|
* @brief Helper macros for defining Tx/Rx descriptor count
|
||||||
* @{
|
* @{
|
||||||
*/
|
*/
|
||||||
#define TX_DESC_CNT 1024U
|
#define OSI_EQOS_TX_DESC_CNT 1024U
|
||||||
#define RX_DESC_CNT 1024U
|
#define OSI_EQOS_RX_DESC_CNT 1024U
|
||||||
|
#define OSI_MGBE_TX_DESC_CNT 4096U
|
||||||
|
#define OSI_MGBE_RX_DESC_CNT 4096U
|
||||||
|
#define OSI_MGBE_MAX_RX_DESC_CNT 16384U
|
||||||
/** @} */
|
/** @} */
|
||||||
|
|
||||||
/** TSO Header length divisor */
|
/** TSO Header length divisor */
|
||||||
@@ -43,14 +46,14 @@
|
|||||||
* @{
|
* @{
|
||||||
*/
|
*/
|
||||||
/** Increment the tx descriptor index */
|
/** Increment the tx descriptor index */
|
||||||
#define INCR_TX_DESC_INDEX(idx, i) ((idx) = ((idx) + (i)) & (TX_DESC_CNT - 1U))
|
#define INCR_TX_DESC_INDEX(idx, x) ((idx) = ((idx) + (1U)) & ((x) - 1U))
|
||||||
/** Increment the rx descriptor index */
|
/** Increment the rx descriptor index */
|
||||||
#define INCR_RX_DESC_INDEX(idx, i) ((idx) = ((idx) + (i)) & (RX_DESC_CNT - 1U))
|
#define INCR_RX_DESC_INDEX(idx, x) ((idx) = ((idx) + (1U)) & ((x) - 1U))
|
||||||
#ifndef OSI_STRIPPED_LIB
|
#ifndef OSI_STRIPPED_LIB
|
||||||
/** Decrement the tx descriptor index */
|
/** Decrement the tx descriptor index */
|
||||||
#define DECR_TX_DESC_INDEX(idx, i) ((idx) = ((idx) - (i)) & (TX_DESC_CNT - 1U))
|
#define DECR_TX_DESC_INDEX(idx, x) ((idx) = ((idx) - (1U)) & ((x) - 1U))
|
||||||
/** Decrement the rx descriptor index */
|
/** Decrement the rx descriptor index */
|
||||||
#define DECR_RX_DESC_INDEX(idx, i) ((idx) = ((idx) - (i)) & (RX_DESC_CNT - 1U))
|
#define DECR_RX_DESC_INDEX(idx, x) ((idx) = ((idx) - (1U)) & ((x) - 1U))
|
||||||
#endif /* !OSI_STRIPPED_LIB */
|
#endif /* !OSI_STRIPPED_LIB */
|
||||||
/** @} */
|
/** @} */
|
||||||
#endif /* INCLUDED_OSI_DMA_TXRX_H */
|
#endif /* INCLUDED_OSI_DMA_TXRX_H */
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -205,7 +205,7 @@ static void tx_desc_dump(struct osi_dma_priv_data *osi_dma, unsigned int f_idx,
|
|||||||
int cnt;
|
int cnt;
|
||||||
|
|
||||||
if (f_idx > l_idx) {
|
if (f_idx > l_idx) {
|
||||||
cnt = l_idx + TX_DESC_CNT - f_idx;
|
cnt = l_idx + osi_dma->tx_ring_sz - f_idx;
|
||||||
} else {
|
} else {
|
||||||
cnt = l_idx - f_idx;
|
cnt = l_idx - f_idx;
|
||||||
}
|
}
|
||||||
@@ -223,7 +223,7 @@ static void tx_desc_dump(struct osi_dma_priv_data *osi_dma, unsigned int f_idx,
|
|||||||
tx_desc->tdes3, tx_desc->tdes2,
|
tx_desc->tdes3, tx_desc->tdes2,
|
||||||
tx_desc->tdes1, tx_desc->tdes0);
|
tx_desc->tdes1, tx_desc->tdes0);
|
||||||
|
|
||||||
INCR_TX_DESC_INDEX(i, 1U);
|
INCR_TX_DESC_INDEX(i, osi_dma->tx_ring_sz);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -34,6 +34,14 @@
|
|||||||
#define MAX_DMA_INSTANCES 10U
|
#define MAX_DMA_INSTANCES 10U
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Default DMA Tx/Rx ring sizes for EQOS/MGBE.
|
||||||
|
*/
|
||||||
|
#define EQOS_DEFAULT_RING_SZ 1024U
|
||||||
|
#define MGBE_DEFAULT_RING_SZ 4096U
|
||||||
|
#define MGBE_MAX_RING_SZ 16384U
|
||||||
|
#define HW_MIN_RING_SZ 4U
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief MAC DMA Channel operations
|
* @brief MAC DMA Channel operations
|
||||||
*/
|
*/
|
||||||
@@ -227,6 +235,15 @@ nve32_t hw_transmit(struct osi_dma_priv_data *osi_dma,
|
|||||||
nve32_t dma_desc_init(struct osi_dma_priv_data *osi_dma,
|
nve32_t dma_desc_init(struct osi_dma_priv_data *osi_dma,
|
||||||
struct dma_chan_ops *ops);
|
struct dma_chan_ops *ops);
|
||||||
|
|
||||||
|
static inline nveu32_t is_power_of_two(nveu32_t num)
|
||||||
|
{
|
||||||
|
if ((num > 0U) && ((num & (num - 1U)) == 0U)) {
|
||||||
|
return OSI_ENABLE;
|
||||||
|
}
|
||||||
|
|
||||||
|
return OSI_DISABLE;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @addtogroup Helper Helper MACROS
|
* @addtogroup Helper Helper MACROS
|
||||||
*
|
*
|
||||||
|
|||||||
@@ -193,6 +193,8 @@ static nve32_t validate_func_ptrs(struct osi_dma_priv_data *osi_dma,
|
|||||||
nve32_t osi_init_dma_ops(struct osi_dma_priv_data *osi_dma)
|
nve32_t osi_init_dma_ops(struct osi_dma_priv_data *osi_dma)
|
||||||
{
|
{
|
||||||
struct dma_local *l_dma = (struct dma_local *)osi_dma;
|
struct dma_local *l_dma = (struct dma_local *)osi_dma;
|
||||||
|
nveu32_t default_rz[] = { EQOS_DEFAULT_RING_SZ, MGBE_DEFAULT_RING_SZ };
|
||||||
|
nveu32_t max_rz[] = { EQOS_DEFAULT_RING_SZ, MGBE_MAX_RING_SZ };
|
||||||
typedef void (*init_ops_arr)(struct dma_chan_ops *temp);
|
typedef void (*init_ops_arr)(struct dma_chan_ops *temp);
|
||||||
typedef void *(*safety_init)(void);
|
typedef void *(*safety_init)(void);
|
||||||
|
|
||||||
@@ -231,6 +233,26 @@ nve32_t osi_init_dma_ops(struct osi_dma_priv_data *osi_dma)
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if ((osi_dma->tx_ring_sz == 0U) ||
|
||||||
|
!(is_power_of_two(osi_dma->tx_ring_sz)) ||
|
||||||
|
(osi_dma->tx_ring_sz < HW_MIN_RING_SZ) ||
|
||||||
|
(osi_dma->tx_ring_sz > default_rz[osi_dma->mac])) {
|
||||||
|
osi_dma->tx_ring_sz = default_rz[osi_dma->mac];
|
||||||
|
OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
|
||||||
|
"DMA: Using default Tx ring size: \n",
|
||||||
|
osi_dma->tx_ring_sz);
|
||||||
|
}
|
||||||
|
|
||||||
|
if ((osi_dma->rx_ring_sz == 0U) ||
|
||||||
|
!(is_power_of_two(osi_dma->rx_ring_sz)) ||
|
||||||
|
(osi_dma->rx_ring_sz < HW_MIN_RING_SZ) ||
|
||||||
|
(osi_dma->rx_ring_sz > max_rz[osi_dma->mac])) {
|
||||||
|
osi_dma->rx_ring_sz = default_rz[osi_dma->mac];
|
||||||
|
OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
|
||||||
|
"DMA: Using default rx ring size: \n",
|
||||||
|
osi_dma->tx_ring_sz);
|
||||||
|
}
|
||||||
|
|
||||||
i_ops[osi_dma->mac](&g_ops[osi_dma->mac]);
|
i_ops[osi_dma->mac](&g_ops[osi_dma->mac]);
|
||||||
|
|
||||||
if (s_init[osi_dma->mac] != OSI_NULL) {
|
if (s_init[osi_dma->mac] != OSI_NULL) {
|
||||||
@@ -542,15 +564,19 @@ nve32_t osi_stop_dma(struct osi_dma_priv_data *osi_dma,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
nveu32_t osi_get_refill_rx_desc_cnt(struct osi_rx_ring *rx_ring)
|
nveu32_t osi_get_refill_rx_desc_cnt(struct osi_dma_priv_data *osi_dma,
|
||||||
|
unsigned int chan)
|
||||||
{
|
{
|
||||||
|
struct osi_rx_ring *rx_ring = osi_dma->rx_ring[chan];
|
||||||
|
|
||||||
if ((rx_ring == OSI_NULL) ||
|
if ((rx_ring == OSI_NULL) ||
|
||||||
(rx_ring->cur_rx_idx >= RX_DESC_CNT) ||
|
(rx_ring->cur_rx_idx >= osi_dma->rx_ring_sz) ||
|
||||||
(rx_ring->refill_idx >= RX_DESC_CNT)) {
|
(rx_ring->refill_idx >= osi_dma->rx_ring_sz)) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
return (rx_ring->cur_rx_idx - rx_ring->refill_idx) & (RX_DESC_CNT - 1U);
|
return (rx_ring->cur_rx_idx - rx_ring->refill_idx) &
|
||||||
|
(osi_dma->rx_ring_sz - 1U);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -649,7 +675,7 @@ nve32_t osi_rx_dma_desc_init(struct osi_dma_priv_data *osi_dma,
|
|||||||
|
|
||||||
/* Refill buffers */
|
/* Refill buffers */
|
||||||
while ((rx_ring->refill_idx != rx_ring->cur_rx_idx) &&
|
while ((rx_ring->refill_idx != rx_ring->cur_rx_idx) &&
|
||||||
(rx_ring->refill_idx < RX_DESC_CNT)) {
|
(rx_ring->refill_idx < osi_dma->rx_ring_sz)) {
|
||||||
rx_swcx = rx_ring->rx_swcx + rx_ring->refill_idx;
|
rx_swcx = rx_ring->rx_swcx + rx_ring->refill_idx;
|
||||||
rx_desc = rx_ring->rx_desc + rx_ring->refill_idx;
|
rx_desc = rx_ring->rx_desc + rx_ring->refill_idx;
|
||||||
|
|
||||||
@@ -675,7 +701,7 @@ nve32_t osi_rx_dma_desc_init(struct osi_dma_priv_data *osi_dma,
|
|||||||
rx_dma_handle_ioc(osi_dma, rx_ring, rx_desc);
|
rx_dma_handle_ioc(osi_dma, rx_ring, rx_desc);
|
||||||
rx_desc->rdes3 |= RDES3_OWN;
|
rx_desc->rdes3 |= RDES3_OWN;
|
||||||
|
|
||||||
INCR_RX_DESC_INDEX(rx_ring->refill_idx, 1U);
|
INCR_RX_DESC_INDEX(rx_ring->refill_idx, osi_dma->rx_ring_sz);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Update the Rx tail ptr whenever buffer is replenished to
|
/* Update the Rx tail ptr whenever buffer is replenished to
|
||||||
@@ -684,7 +710,7 @@ nve32_t osi_rx_dma_desc_init(struct osi_dma_priv_data *osi_dma,
|
|||||||
* knows to loop over to start of ring.
|
* knows to loop over to start of ring.
|
||||||
*/
|
*/
|
||||||
tailptr = rx_ring->rx_desc_phy_addr +
|
tailptr = rx_ring->rx_desc_phy_addr +
|
||||||
(sizeof(struct osi_rx_desc) * (RX_DESC_CNT));
|
(sizeof(struct osi_rx_desc) * (osi_dma->rx_ring_sz));
|
||||||
|
|
||||||
if (osi_unlikely(tailptr < rx_ring->rx_desc_phy_addr)) {
|
if (osi_unlikely(tailptr < rx_ring->rx_desc_phy_addr)) {
|
||||||
/* Will not hit this case, used for CERT-C compliance */
|
/* Will not hit this case, used for CERT-C compliance */
|
||||||
|
|||||||
@@ -148,7 +148,7 @@ nve32_t osi_process_rx_completions(struct osi_dma_priv_data *osi_dma,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (rx_ring->cur_rx_idx >= RX_DESC_CNT) {
|
if (rx_ring->cur_rx_idx >= osi_dma->rx_ring_sz) {
|
||||||
OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
|
OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
|
||||||
"dma_txrx: Invalid cur_rx_idx\n", 0ULL);
|
"dma_txrx: Invalid cur_rx_idx\n", 0ULL);
|
||||||
return -1;
|
return -1;
|
||||||
@@ -173,7 +173,7 @@ nve32_t osi_process_rx_completions(struct osi_dma_priv_data *osi_dma,
|
|||||||
}
|
}
|
||||||
#endif /* OSI_DEBUG */
|
#endif /* OSI_DEBUG */
|
||||||
|
|
||||||
INCR_RX_DESC_INDEX(rx_ring->cur_rx_idx, 1U);
|
INCR_RX_DESC_INDEX(rx_ring->cur_rx_idx, osi_dma->rx_ring_sz);
|
||||||
|
|
||||||
if (osi_unlikely(rx_swcx->buf_virt_addr ==
|
if (osi_unlikely(rx_swcx->buf_virt_addr ==
|
||||||
osi_dma->resv_buf_virt_addr)) {
|
osi_dma->resv_buf_virt_addr)) {
|
||||||
@@ -264,7 +264,7 @@ nve32_t osi_process_rx_completions(struct osi_dma_priv_data *osi_dma,
|
|||||||
/* Context descriptor was consumed. Its skb
|
/* Context descriptor was consumed. Its skb
|
||||||
* and DMA mapping will be recycled
|
* and DMA mapping will be recycled
|
||||||
*/
|
*/
|
||||||
INCR_RX_DESC_INDEX(rx_ring->cur_rx_idx, 1U);
|
INCR_RX_DESC_INDEX(rx_ring->cur_rx_idx, osi_dma->rx_ring_sz);
|
||||||
}
|
}
|
||||||
if (osi_likely(osi_dma->osd_ops.receive_packet !=
|
if (osi_likely(osi_dma->osd_ops.receive_packet !=
|
||||||
OSI_NULL)) {
|
OSI_NULL)) {
|
||||||
@@ -569,7 +569,7 @@ int osi_process_tx_completions(struct osi_dma_priv_data *osi_dma,
|
|||||||
osi_dma->dstats.tx_clean_n[chan] =
|
osi_dma->dstats.tx_clean_n[chan] =
|
||||||
osi_update_stats_counter(osi_dma->dstats.tx_clean_n[chan], 1U);
|
osi_update_stats_counter(osi_dma->dstats.tx_clean_n[chan], 1U);
|
||||||
|
|
||||||
while ((entry != tx_ring->cur_tx_idx) && (entry < TX_DESC_CNT) &&
|
while ((entry != tx_ring->cur_tx_idx) && (entry < osi_dma->tx_ring_sz) &&
|
||||||
(processed < budget)) {
|
(processed < budget)) {
|
||||||
osi_memset(txdone_pkt_cx, 0U, sizeof(*txdone_pkt_cx));
|
osi_memset(txdone_pkt_cx, 0U, sizeof(*txdone_pkt_cx));
|
||||||
|
|
||||||
@@ -674,7 +674,7 @@ int osi_process_tx_completions(struct osi_dma_priv_data *osi_dma,
|
|||||||
tx_swcx->buf_virt_addr = OSI_NULL;
|
tx_swcx->buf_virt_addr = OSI_NULL;
|
||||||
tx_swcx->buf_phy_addr = 0;
|
tx_swcx->buf_phy_addr = 0;
|
||||||
tx_swcx->flags = 0;
|
tx_swcx->flags = 0;
|
||||||
INCR_TX_DESC_INDEX(entry, 1U);
|
INCR_TX_DESC_INDEX(entry, osi_dma->tx_ring_sz);
|
||||||
|
|
||||||
/* Don't wait to update tx_ring->clean-idx. It will
|
/* Don't wait to update tx_ring->clean-idx. It will
|
||||||
* be used by OSD layer to determine the num. of available
|
* be used by OSD layer to determine the num. of available
|
||||||
@@ -988,7 +988,7 @@ nve32_t hw_transmit(struct osi_dma_priv_data *osi_dma,
|
|||||||
nveu32_t i;
|
nveu32_t i;
|
||||||
|
|
||||||
entry = tx_ring->cur_tx_idx;
|
entry = tx_ring->cur_tx_idx;
|
||||||
if (entry >= TX_DESC_CNT) {
|
if (entry >= osi_dma->tx_ring_sz) {
|
||||||
OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
|
OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
|
||||||
"dma_txrx: Invalid cur_tx_idx\n", 0ULL);
|
"dma_txrx: Invalid cur_tx_idx\n", 0ULL);
|
||||||
return -1;
|
return -1;
|
||||||
@@ -1040,7 +1040,7 @@ nve32_t hw_transmit(struct osi_dma_priv_data *osi_dma,
|
|||||||
/* update packet id */
|
/* update packet id */
|
||||||
tx_desc->tdes0 = pkt_id;
|
tx_desc->tdes0 = pkt_id;
|
||||||
}
|
}
|
||||||
INCR_TX_DESC_INDEX(entry, 1U);
|
INCR_TX_DESC_INDEX(entry, osi_dma->tx_ring_sz);
|
||||||
|
|
||||||
/* Storing context descriptor to set DMA_OWN at last */
|
/* Storing context descriptor to set DMA_OWN at last */
|
||||||
cx_desc = tx_desc;
|
cx_desc = tx_desc;
|
||||||
@@ -1060,7 +1060,7 @@ nve32_t hw_transmit(struct osi_dma_priv_data *osi_dma,
|
|||||||
tx_swcx->pktid = pkt_id;
|
tx_swcx->pktid = pkt_id;
|
||||||
}
|
}
|
||||||
|
|
||||||
INCR_TX_DESC_INDEX(entry, 1U);
|
INCR_TX_DESC_INDEX(entry, osi_dma->tx_ring_sz);
|
||||||
|
|
||||||
first_desc = tx_desc;
|
first_desc = tx_desc;
|
||||||
last_desc = tx_desc;
|
last_desc = tx_desc;
|
||||||
@@ -1076,7 +1076,7 @@ nve32_t hw_transmit(struct osi_dma_priv_data *osi_dma,
|
|||||||
/* set HW OWN bit for descriptor*/
|
/* set HW OWN bit for descriptor*/
|
||||||
tx_desc->tdes3 |= TDES3_OWN;
|
tx_desc->tdes3 |= TDES3_OWN;
|
||||||
|
|
||||||
INCR_TX_DESC_INDEX(entry, 1U);
|
INCR_TX_DESC_INDEX(entry, osi_dma->tx_ring_sz);
|
||||||
last_desc = tx_desc;
|
last_desc = tx_desc;
|
||||||
tx_desc = tx_ring->tx_desc + entry;
|
tx_desc = tx_ring->tx_desc + entry;
|
||||||
tx_swcx = tx_ring->tx_swcx + entry;
|
tx_swcx = tx_ring->tx_swcx + entry;
|
||||||
@@ -1129,7 +1129,7 @@ nve32_t hw_transmit(struct osi_dma_priv_data *osi_dma,
|
|||||||
#ifdef OSI_DEBUG
|
#ifdef OSI_DEBUG
|
||||||
if (osi_dma->enable_desc_dump == 1U) {
|
if (osi_dma->enable_desc_dump == 1U) {
|
||||||
l_idx = entry;
|
l_idx = entry;
|
||||||
desc_dump(osi_dma, f_idx, DECR_TX_DESC_INDEX(l_idx, 1U),
|
desc_dump(osi_dma, f_idx, DECR_TX_DESC_INDEX(l_idx, osi_dma->tx_ring_sz),
|
||||||
(TX_DESC_DUMP | TX_DESC_DUMP_TX), chan);
|
(TX_DESC_DUMP | TX_DESC_DUMP_TX), chan);
|
||||||
}
|
}
|
||||||
#endif /* OSI_DEBUG */
|
#endif /* OSI_DEBUG */
|
||||||
@@ -1197,7 +1197,7 @@ static nve32_t rx_dma_desc_initialization(struct osi_dma_priv_data *osi_dma,
|
|||||||
rx_ring->cur_rx_idx = 0;
|
rx_ring->cur_rx_idx = 0;
|
||||||
rx_ring->refill_idx = 0;
|
rx_ring->refill_idx = 0;
|
||||||
|
|
||||||
for (i = 0; i < RX_DESC_CNT; i++) {
|
for (i = 0; i < osi_dma->rx_ring_sz; i++) {
|
||||||
rx_swcx = rx_ring->rx_swcx + i;
|
rx_swcx = rx_ring->rx_swcx + i;
|
||||||
rx_desc = rx_ring->rx_desc + i;
|
rx_desc = rx_ring->rx_desc + i;
|
||||||
|
|
||||||
@@ -1235,7 +1235,7 @@ static nve32_t rx_dma_desc_initialization(struct osi_dma_priv_data *osi_dma,
|
|||||||
}
|
}
|
||||||
|
|
||||||
tailptr = rx_ring->rx_desc_phy_addr +
|
tailptr = rx_ring->rx_desc_phy_addr +
|
||||||
(sizeof(struct osi_rx_desc) * (RX_DESC_CNT));
|
(sizeof(struct osi_rx_desc) * (osi_dma->rx_ring_sz));
|
||||||
|
|
||||||
if (osi_unlikely((tailptr < rx_ring->rx_desc_phy_addr))) {
|
if (osi_unlikely((tailptr < rx_ring->rx_desc_phy_addr))) {
|
||||||
/* Will not hit this case */
|
/* Will not hit this case */
|
||||||
@@ -1244,7 +1244,7 @@ static nve32_t rx_dma_desc_initialization(struct osi_dma_priv_data *osi_dma,
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
ops->set_rx_ring_len(osi_dma, chan, (RX_DESC_CNT - 1U));
|
ops->set_rx_ring_len(osi_dma, chan, (osi_dma->rx_ring_sz - 1U));
|
||||||
ops->update_rx_tailptr(osi_dma->base, chan, tailptr);
|
ops->update_rx_tailptr(osi_dma->base, chan, tailptr);
|
||||||
ops->set_rx_ring_start_addr(osi_dma->base, chan,
|
ops->set_rx_ring_start_addr(osi_dma->base, chan,
|
||||||
rx_ring->rx_desc_phy_addr);
|
rx_ring->rx_desc_phy_addr);
|
||||||
@@ -1331,7 +1331,7 @@ static nve32_t tx_dma_desc_init(struct osi_dma_priv_data *osi_dma,
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (j = 0; j < TX_DESC_CNT; j++) {
|
for (j = 0; j < osi_dma->tx_ring_sz; j++) {
|
||||||
tx_desc = tx_ring->tx_desc + j;
|
tx_desc = tx_ring->tx_desc + j;
|
||||||
tx_swcx = tx_ring->tx_swcx + j;
|
tx_swcx = tx_ring->tx_swcx + j;
|
||||||
|
|
||||||
@@ -1354,7 +1354,7 @@ static nve32_t tx_dma_desc_init(struct osi_dma_priv_data *osi_dma,
|
|||||||
tx_ring->slot_check = OSI_DISABLE;
|
tx_ring->slot_check = OSI_DISABLE;
|
||||||
|
|
||||||
ops->set_tx_ring_len(osi_dma, chan,
|
ops->set_tx_ring_len(osi_dma, chan,
|
||||||
(TX_DESC_CNT - 1U));
|
(osi_dma->tx_ring_sz - 1U));
|
||||||
ops->set_tx_ring_start_addr(osi_dma->base, chan,
|
ops->set_tx_ring_start_addr(osi_dma->base, chan,
|
||||||
tx_ring->tx_desc_phy_addr);
|
tx_ring->tx_desc_phy_addr);
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user