mirror of
git://nv-tegra.nvidia.com/kernel/nvethernetrm.git
synced 2025-12-22 17:34:29 +03:00
osi: mgbe: configure MTL TXQ size
Issue: Currentlt MTL TXFIFO is divided by 10 for MGBE since there are 10 MTL TX queues but in Linux maximum 8 channels and in QNX maximum two channels can be used. On TX, DMA channel to MTL queue is static mapping so two queues memory in Linux and 8 queues memory in QNX is not getting used. Fix: Divide the TXFIFO size based on enabled DMA channels. Bug 4443026 Bug 4283087 Bug 4266776 Change-Id: I92ac5da644f2df05503ac44979e0d16079cf9231 Signed-off-by: Bhadram Varka <vbhadram@nvidia.com> Signed-off-by: Revanth Kumar Uppala <ruppala@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/kernel/nvethernetrm/+/3009823 Reviewed-by: Ashutosh Jha <ajha@nvidia.com> GVS: Gerrit_Virtual_Submit <buildbot_gerritrpt@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
6c637ac71e
commit
606235ffb2
@@ -185,6 +185,7 @@
|
|||||||
#define EQOS_DMA_CHX_STATUS(x) ((0x0080U * (x)) + 0x1160U)
|
#define EQOS_DMA_CHX_STATUS(x) ((0x0080U * (x)) + 0x1160U)
|
||||||
#define MGBE_DMA_CHX_STATUS(x) ((0x0080U * (x)) + 0x3160U)
|
#define MGBE_DMA_CHX_STATUS(x) ((0x0080U * (x)) + 0x3160U)
|
||||||
#define EQOS_DMA_CHX_IER(x) ((0x0080U * (x)) + 0x1134U)
|
#define EQOS_DMA_CHX_IER(x) ((0x0080U * (x)) + 0x1134U)
|
||||||
|
#define MGBE_MTL_CHX_TX_OP_MODE(x) ((0x0080U * (x)) + 0x1100U)
|
||||||
|
|
||||||
/* FIXME add logic based on HW version */
|
/* FIXME add logic based on HW version */
|
||||||
#define OSI_EQOS_MAX_NUM_CHANS 8U
|
#define OSI_EQOS_MAX_NUM_CHANS 8U
|
||||||
|
|||||||
@@ -47,6 +47,9 @@
|
|||||||
* @brief FIFO size helper macro
|
* @brief FIFO size helper macro
|
||||||
*/
|
*/
|
||||||
#define FIFO_SZ(x) ((((x) * 1024U) / 256U) - 1U)
|
#define FIFO_SZ(x) ((((x) * 1024U) / 256U) - 1U)
|
||||||
|
#define EQOS_MAC_XP_TX_FIFO_SZ 36864U
|
||||||
|
#define EQOS_MAC_TX_FIFO_SZ 65536U
|
||||||
|
#define MGBE_MAC_TX_FIFO_SZ 131072U
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Dynamic configuration helper macros.
|
* @brief Dynamic configuration helper macros.
|
||||||
@@ -386,6 +389,8 @@ struct core_local {
|
|||||||
/** l3l4 wildcard filter configured (OSI_ENABLE) / not configured (OSI_DISABLE) */
|
/** l3l4 wildcard filter configured (OSI_ENABLE) / not configured (OSI_DISABLE) */
|
||||||
nveu32_t l3l4_wildcard_filter_configured;
|
nveu32_t l3l4_wildcard_filter_configured;
|
||||||
#endif /* L3L4_WILDCARD_FILTER */
|
#endif /* L3L4_WILDCARD_FILTER */
|
||||||
|
/** TXFIFO size per queue */
|
||||||
|
nveu32_t tx_fifosz_perq;
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|||||||
@@ -354,8 +354,7 @@ calibration_failed:
|
|||||||
* @retval 0 on success
|
* @retval 0 on success
|
||||||
* @retval -1 on failure.
|
* @retval -1 on failure.
|
||||||
*/
|
*/
|
||||||
static nve32_t eqos_configure_mtl_queue(struct osi_core_priv_data *const osi_core,
|
static nve32_t eqos_configure_mtl_queue(struct osi_core_priv_data *const osi_core)
|
||||||
nveu32_t q_inx)
|
|
||||||
{
|
{
|
||||||
const struct core_local *l_core = (struct core_local *)(void *)osi_core;
|
const struct core_local *l_core = (struct core_local *)(void *)osi_core;
|
||||||
const nveu32_t rx_fifo_sz[2U][OSI_EQOS_MAX_NUM_QUEUES] = {
|
const nveu32_t rx_fifo_sz[2U][OSI_EQOS_MAX_NUM_QUEUES] = {
|
||||||
@@ -364,12 +363,6 @@ static nve32_t eqos_configure_mtl_queue(struct osi_core_priv_data *const osi_cor
|
|||||||
{ FIFO_SZ(36U), FIFO_SZ(2U), FIFO_SZ(2U), FIFO_SZ(2U),
|
{ FIFO_SZ(36U), FIFO_SZ(2U), FIFO_SZ(2U), FIFO_SZ(2U),
|
||||||
FIFO_SZ(2U), FIFO_SZ(2U), FIFO_SZ(2U), FIFO_SZ(16U) },
|
FIFO_SZ(2U), FIFO_SZ(2U), FIFO_SZ(2U), FIFO_SZ(16U) },
|
||||||
};
|
};
|
||||||
const nveu32_t tx_fifo_sz[2U][OSI_EQOS_MAX_NUM_QUEUES] = {
|
|
||||||
{ FIFO_SZ(9U), FIFO_SZ(9U), FIFO_SZ(9U), FIFO_SZ(9U),
|
|
||||||
FIFO_SZ(1U), FIFO_SZ(1U), FIFO_SZ(1U), FIFO_SZ(1U) },
|
|
||||||
{ FIFO_SZ(8U), FIFO_SZ(8U), FIFO_SZ(8U), FIFO_SZ(8U),
|
|
||||||
FIFO_SZ(8U), FIFO_SZ(8U), FIFO_SZ(8U), FIFO_SZ(8U) },
|
|
||||||
};
|
|
||||||
const nveu32_t rfd_rfa[OSI_EQOS_MAX_NUM_QUEUES] = {
|
const nveu32_t rfd_rfa[OSI_EQOS_MAX_NUM_QUEUES] = {
|
||||||
FULL_MINUS_16_K,
|
FULL_MINUS_16_K,
|
||||||
FULL_MINUS_1_5K,
|
FULL_MINUS_1_5K,
|
||||||
@@ -381,31 +374,44 @@ static nve32_t eqos_configure_mtl_queue(struct osi_core_priv_data *const osi_cor
|
|||||||
FULL_MINUS_1_5K,
|
FULL_MINUS_1_5K,
|
||||||
};
|
};
|
||||||
nveu32_t l_macv = (l_core->l_mac_ver & 0x1U);
|
nveu32_t l_macv = (l_core->l_mac_ver & 0x1U);
|
||||||
nveu32_t que_idx = (q_inx & 0x7U);
|
nveu32_t value = 0, i, que_idx;
|
||||||
nveu32_t rx_fifo_sz_t = 0U;
|
nveu32_t rx_fifo_sz_t = 0U;
|
||||||
nveu32_t tx_fifo_sz_t = 0U;
|
|
||||||
nveu32_t value = 0;
|
|
||||||
nve32_t ret = 0;
|
nve32_t ret = 0;
|
||||||
|
|
||||||
tx_fifo_sz_t = tx_fifo_sz[l_macv][que_idx];
|
for (i = 0U; i < osi_core->num_mtl_queues; i++) {
|
||||||
|
que_idx = osi_core->mtl_queues[i] & 0xFU;
|
||||||
|
|
||||||
ret = hw_flush_mtl_tx_queue(osi_core, que_idx);
|
ret = hw_flush_mtl_tx_queue(osi_core, que_idx);
|
||||||
if (ret < 0) {
|
if (ret < 0)
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
|
||||||
|
|
||||||
value = (tx_fifo_sz_t << EQOS_MTL_TXQ_SIZE_SHIFT);
|
value = (l_core->tx_fifosz_perq << EQOS_MTL_TXQ_SIZE_SHIFT);
|
||||||
/* Enable Store and Forward mode */
|
/* Enable Store and Forward mode */
|
||||||
value |= EQOS_MTL_TSF;
|
value |= EQOS_MTL_TSF;
|
||||||
/* Enable TxQ */
|
/* Enable TxQ */
|
||||||
value |= EQOS_MTL_TXQEN;
|
value |= EQOS_MTL_TXQEN;
|
||||||
osi_writela(osi_core, value, (nveu8_t *)osi_core->base + EQOS_MTL_CHX_TX_OP_MODE(que_idx));
|
osi_writela(osi_core, value, (nveu8_t *)osi_core->base +
|
||||||
|
EQOS_MTL_CHX_TX_OP_MODE(que_idx));
|
||||||
|
|
||||||
|
/* Transmit Queue weight */
|
||||||
|
value = osi_readla(osi_core, (nveu8_t *)osi_core->base +
|
||||||
|
EQOS_MTL_TXQ_QW(que_idx));
|
||||||
|
value |= EQOS_MTL_TXQ_QW_ISCQW;
|
||||||
|
osi_writela(osi_core, value, (nveu8_t *)osi_core->base + EQOS_MTL_TXQ_QW(que_idx));
|
||||||
|
|
||||||
|
/* Enable by default to configure forward error packets.
|
||||||
|
* Since this is a local function this will always return success,
|
||||||
|
* so no need to check for return value
|
||||||
|
*/
|
||||||
|
(void)hw_config_fw_err_pkts(osi_core, que_idx, OSI_ENABLE);
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i = 0U; i < OSI_EQOS_MAX_NUM_QUEUES; i++) {
|
||||||
/* read RX Q0 Operating Mode Register */
|
/* read RX Q0 Operating Mode Register */
|
||||||
value = osi_readla(osi_core, (nveu8_t *)osi_core->base +
|
value = osi_readla(osi_core, (nveu8_t *)osi_core->base +
|
||||||
EQOS_MTL_CHX_RX_OP_MODE(que_idx));
|
EQOS_MTL_CHX_RX_OP_MODE(i));
|
||||||
|
|
||||||
rx_fifo_sz_t = rx_fifo_sz[l_macv][que_idx];
|
rx_fifo_sz_t = rx_fifo_sz[l_macv][i];
|
||||||
value |= (rx_fifo_sz_t << EQOS_MTL_RXQ_SIZE_SHIFT);
|
value |= (rx_fifo_sz_t << EQOS_MTL_RXQ_SIZE_SHIFT);
|
||||||
/* Enable Store and Forward mode */
|
/* Enable Store and Forward mode */
|
||||||
value |= EQOS_MTL_RSF;
|
value |= EQOS_MTL_RSF;
|
||||||
@@ -417,24 +423,18 @@ static nve32_t eqos_configure_mtl_queue(struct osi_core_priv_data *const osi_cor
|
|||||||
value &= ~EQOS_MTL_RXQ_OP_MODE_RFD_MASK;
|
value &= ~EQOS_MTL_RXQ_OP_MODE_RFD_MASK;
|
||||||
value &= ~EQOS_MTL_RXQ_OP_MODE_RFA_MASK;
|
value &= ~EQOS_MTL_RXQ_OP_MODE_RFA_MASK;
|
||||||
value |= EQOS_MTL_RXQ_OP_MODE_EHFC;
|
value |= EQOS_MTL_RXQ_OP_MODE_EHFC;
|
||||||
value |= (rfd_rfa[que_idx] << EQOS_MTL_RXQ_OP_MODE_RFD_SHIFT) &
|
value |= (rfd_rfa[i] << EQOS_MTL_RXQ_OP_MODE_RFD_SHIFT) &
|
||||||
EQOS_MTL_RXQ_OP_MODE_RFD_MASK;
|
EQOS_MTL_RXQ_OP_MODE_RFD_MASK;
|
||||||
value |= (rfd_rfa[que_idx] << EQOS_MTL_RXQ_OP_MODE_RFA_SHIFT) &
|
value |= (rfd_rfa[i] << EQOS_MTL_RXQ_OP_MODE_RFA_SHIFT) &
|
||||||
EQOS_MTL_RXQ_OP_MODE_RFA_MASK;
|
EQOS_MTL_RXQ_OP_MODE_RFA_MASK;
|
||||||
osi_writela(osi_core, value, (nveu8_t *)osi_core->base + EQOS_MTL_CHX_RX_OP_MODE(que_idx));
|
osi_writela(osi_core, value, (nveu8_t *)osi_core->base +
|
||||||
|
EQOS_MTL_CHX_RX_OP_MODE(i));
|
||||||
/* Transmit Queue weight */
|
|
||||||
value = osi_readla(osi_core, (nveu8_t *)osi_core->base +
|
|
||||||
EQOS_MTL_TXQ_QW(que_idx));
|
|
||||||
value |= EQOS_MTL_TXQ_QW_ISCQW;
|
|
||||||
osi_writela(osi_core, value, (nveu8_t *)osi_core->base + EQOS_MTL_TXQ_QW(que_idx));
|
|
||||||
|
|
||||||
/* Enable Rx Queue Control */
|
/* Enable Rx Queue Control */
|
||||||
value = osi_readla(osi_core, (nveu8_t *)osi_core->base +
|
value = osi_readla(osi_core, (nveu8_t *)osi_core->base + EQOS_MAC_RQC0R);
|
||||||
EQOS_MAC_RQC0R);
|
value |= ((osi_core->rxq_ctrl[i] & EQOS_RXQ_EN_MASK) << (i * 2U));
|
||||||
value |= ((osi_core->rxq_ctrl[que_idx] & EQOS_RXQ_EN_MASK) << (que_idx * 2U));
|
|
||||||
osi_writela(osi_core, value, (nveu8_t *)osi_core->base + EQOS_MAC_RQC0R);
|
osi_writela(osi_core, value, (nveu8_t *)osi_core->base + EQOS_MAC_RQC0R);
|
||||||
|
}
|
||||||
fail:
|
fail:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@@ -1263,7 +1263,6 @@ static void eqos_dma_chan_to_vmirq_map(struct osi_core_priv_data *osi_core)
|
|||||||
static nve32_t eqos_core_init(struct osi_core_priv_data *const osi_core)
|
static nve32_t eqos_core_init(struct osi_core_priv_data *const osi_core)
|
||||||
{
|
{
|
||||||
nve32_t ret = 0;
|
nve32_t ret = 0;
|
||||||
nveu32_t qinx = 0;
|
|
||||||
nveu32_t value = 0;
|
nveu32_t value = 0;
|
||||||
nveu32_t value1 = 0;
|
nveu32_t value1 = 0;
|
||||||
|
|
||||||
@@ -1324,32 +1323,10 @@ static nve32_t eqos_core_init(struct osi_core_priv_data *const osi_core)
|
|||||||
osi_writela(osi_core, value1, (nveu8_t *)osi_core->base + EQOS_MTL_RXQ_DMA_MAP1);
|
osi_writela(osi_core, value1, (nveu8_t *)osi_core->base + EQOS_MTL_RXQ_DMA_MAP1);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (osi_unlikely(osi_core->num_mtl_queues > OSI_EQOS_MAX_NUM_QUEUES)) {
|
ret = eqos_configure_mtl_queue(osi_core);
|
||||||
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID,
|
if (ret < 0)
|
||||||
"Number of queues is incorrect\n", 0ULL);
|
|
||||||
ret = -1;
|
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
|
||||||
|
|
||||||
/* Configure MTL Queues */
|
|
||||||
for (qinx = 0; qinx < osi_core->num_mtl_queues; qinx++) {
|
|
||||||
if (osi_unlikely(osi_core->mtl_queues[qinx] >=
|
|
||||||
OSI_EQOS_MAX_NUM_QUEUES)) {
|
|
||||||
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID,
|
|
||||||
"Incorrect queues number\n", 0ULL);
|
|
||||||
ret = -1;
|
|
||||||
goto fail;
|
|
||||||
}
|
|
||||||
ret = eqos_configure_mtl_queue(osi_core, osi_core->mtl_queues[qinx]);
|
|
||||||
if (ret < 0) {
|
|
||||||
goto fail;
|
|
||||||
}
|
|
||||||
/* Enable by default to configure forward error packets.
|
|
||||||
* Since this is a local function this will always return sucess,
|
|
||||||
* so no need to check for return value
|
|
||||||
*/
|
|
||||||
(void)hw_config_fw_err_pkts(osi_core, osi_core->mtl_queues[qinx], OSI_ENABLE);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* configure EQOS MAC HW */
|
/* configure EQOS MAC HW */
|
||||||
eqos_configure_mac(osi_core);
|
eqos_configure_mac(osi_core);
|
||||||
@@ -2567,7 +2544,6 @@ static nve32_t eqos_config_ptp_rxq(struct osi_core_priv_data *const osi_core,
|
|||||||
{
|
{
|
||||||
nveu8_t *base = osi_core->base;
|
nveu8_t *base = osi_core->base;
|
||||||
nveu32_t value = OSI_NONE;
|
nveu32_t value = OSI_NONE;
|
||||||
nveu32_t i = 0U;
|
|
||||||
|
|
||||||
/* Validate the RX queue index argment */
|
/* Validate the RX queue index argment */
|
||||||
if (rxq_idx >= OSI_EQOS_MAX_NUM_QUEUES) {
|
if (rxq_idx >= OSI_EQOS_MAX_NUM_QUEUES) {
|
||||||
@@ -2590,21 +2566,6 @@ static nve32_t eqos_config_ptp_rxq(struct osi_core_priv_data *const osi_core,
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Validate PTP RX queue enable */
|
|
||||||
for (i = 0; i < osi_core->num_mtl_queues; i++) {
|
|
||||||
if (osi_core->mtl_queues[i] == rxq_idx) {
|
|
||||||
/* Given PTP RX queue is enabled */
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (i == osi_core->num_mtl_queues) {
|
|
||||||
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID,
|
|
||||||
"PTP RX queue not enabled\n",
|
|
||||||
rxq_idx);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Read MAC_RxQ_Ctrl1 */
|
/* Read MAC_RxQ_Ctrl1 */
|
||||||
value = osi_readla(osi_core, (nveu8_t *)base + EQOS_MAC_RQC1R);
|
value = osi_readla(osi_core, (nveu8_t *)base + EQOS_MAC_RQC1R);
|
||||||
if (enable == OSI_DISABLE) {
|
if (enable == OSI_DISABLE) {
|
||||||
|
|||||||
@@ -685,7 +685,6 @@ static nve32_t mgbe_config_ptp_rxq(struct osi_core_priv_data *const osi_core,
|
|||||||
{
|
{
|
||||||
nveu8_t *base = osi_core->base;
|
nveu8_t *base = osi_core->base;
|
||||||
nveu32_t value = 0U;
|
nveu32_t value = 0U;
|
||||||
nveu32_t i = 0U;
|
|
||||||
|
|
||||||
/* Validate the RX queue index argument */
|
/* Validate the RX queue index argument */
|
||||||
if (rxq_idx >= OSI_MGBE_MAX_NUM_QUEUES) {
|
if (rxq_idx >= OSI_MGBE_MAX_NUM_QUEUES) {
|
||||||
@@ -703,20 +702,6 @@ static nve32_t mgbe_config_ptp_rxq(struct osi_core_priv_data *const osi_core,
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Validate PTP RX queue enable */
|
|
||||||
for (i = 0; i < osi_core->num_mtl_queues; i++) {
|
|
||||||
if (osi_core->mtl_queues[i] == rxq_idx) {
|
|
||||||
/* Given PTP RX queue is enabled */
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (i == osi_core->num_mtl_queues) {
|
|
||||||
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID,
|
|
||||||
"PTP RX queue not enabled\n",
|
|
||||||
rxq_idx);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Read MAC_RxQ_Ctrl1 */
|
/* Read MAC_RxQ_Ctrl1 */
|
||||||
value = osi_readla(osi_core, base + MGBE_MAC_RQC1R);
|
value = osi_readla(osi_core, base + MGBE_MAC_RQC1R);
|
||||||
/* Check for enable or disable */
|
/* Check for enable or disable */
|
||||||
@@ -1179,10 +1164,11 @@ done:
|
|||||||
* @retval 0 on success
|
* @retval 0 on success
|
||||||
* @retval -1 on failure.
|
* @retval -1 on failure.
|
||||||
*/
|
*/
|
||||||
static nve32_t mgbe_configure_mtl_queue(struct osi_core_priv_data *osi_core,
|
static nve32_t mgbe_configure_mtl_queue(struct osi_core_priv_data *osi_core)
|
||||||
nveu32_t hw_qinx)
|
|
||||||
{
|
{
|
||||||
nveu32_t qinx = hw_qinx & 0xFU;
|
const struct core_local *l_core = (struct core_local *)(void *)osi_core;
|
||||||
|
nveu32_t qinx = 0U;
|
||||||
|
nveu32_t i = 0U;
|
||||||
/*
|
/*
|
||||||
* Total available Rx queue size is 192KB.
|
* Total available Rx queue size is 192KB.
|
||||||
* Below is the destribution among the Rx queueu -
|
* Below is the destribution among the Rx queueu -
|
||||||
@@ -1198,10 +1184,6 @@ static nve32_t mgbe_configure_mtl_queue(struct osi_core_priv_data *osi_core,
|
|||||||
FIFO_SZ(160U), FIFO_SZ(2U), FIFO_SZ(2U), FIFO_SZ(2U), FIFO_SZ(2U),
|
FIFO_SZ(160U), FIFO_SZ(2U), FIFO_SZ(2U), FIFO_SZ(2U), FIFO_SZ(2U),
|
||||||
FIFO_SZ(2U), FIFO_SZ(2U), FIFO_SZ(2U), FIFO_SZ(2U), FIFO_SZ(16U),
|
FIFO_SZ(2U), FIFO_SZ(2U), FIFO_SZ(2U), FIFO_SZ(2U), FIFO_SZ(16U),
|
||||||
};
|
};
|
||||||
const nveu32_t tx_fifo_sz[OSI_MGBE_MAX_NUM_QUEUES] = {
|
|
||||||
TX_FIFO_SZ, TX_FIFO_SZ, TX_FIFO_SZ, TX_FIFO_SZ, TX_FIFO_SZ,
|
|
||||||
TX_FIFO_SZ, TX_FIFO_SZ, TX_FIFO_SZ, TX_FIFO_SZ, TX_FIFO_SZ,
|
|
||||||
};
|
|
||||||
const nveu32_t rfd_rfa[OSI_MGBE_MAX_NUM_QUEUES] = {
|
const nveu32_t rfd_rfa[OSI_MGBE_MAX_NUM_QUEUES] = {
|
||||||
FULL_MINUS_32_K,
|
FULL_MINUS_32_K,
|
||||||
FULL_MINUS_1_5K,
|
FULL_MINUS_1_5K,
|
||||||
@@ -1217,6 +1199,8 @@ static nve32_t mgbe_configure_mtl_queue(struct osi_core_priv_data *osi_core,
|
|||||||
nveu32_t value = 0;
|
nveu32_t value = 0;
|
||||||
nve32_t ret = 0;
|
nve32_t ret = 0;
|
||||||
|
|
||||||
|
for (i = 0U; i < osi_core->num_mtl_queues; i++) {
|
||||||
|
qinx = osi_core->mtl_queues[i];
|
||||||
/* Program ETSALG (802.1Qaz) and RAA in MTL_Operation_Mode
|
/* Program ETSALG (802.1Qaz) and RAA in MTL_Operation_Mode
|
||||||
* register to initialize the MTL operation in case
|
* register to initialize the MTL operation in case
|
||||||
* of multiple Tx and Rx queues default : ETSALG WRR RAA SP
|
* of multiple Tx and Rx queues default : ETSALG WRR RAA SP
|
||||||
@@ -1237,9 +1221,8 @@ static nve32_t mgbe_configure_mtl_queue(struct osi_core_priv_data *osi_core,
|
|||||||
* default: 0x0 SP
|
* default: 0x0 SP
|
||||||
*/
|
*/
|
||||||
ret = hw_flush_mtl_tx_queue(osi_core, qinx);
|
ret = hw_flush_mtl_tx_queue(osi_core, qinx);
|
||||||
if (ret < 0) {
|
if (ret < 0)
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
|
||||||
|
|
||||||
if (osi_unlikely((qinx >= OSI_MGBE_MAX_NUM_QUEUES) ||
|
if (osi_unlikely((qinx >= OSI_MGBE_MAX_NUM_QUEUES) ||
|
||||||
(osi_core->tc[qinx] >= OSI_MAX_TC_NUM))) {
|
(osi_core->tc[qinx] >= OSI_MAX_TC_NUM))) {
|
||||||
@@ -1249,7 +1232,7 @@ static nve32_t mgbe_configure_mtl_queue(struct osi_core_priv_data *osi_core,
|
|||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
value = (tx_fifo_sz[qinx] << MGBE_MTL_TXQ_SIZE_SHIFT);
|
value = (l_core->tx_fifosz_perq << MGBE_MTL_TXQ_SIZE_SHIFT);
|
||||||
/* Enable Store and Forward mode */
|
/* Enable Store and Forward mode */
|
||||||
value |= MGBE_MTL_TSF;
|
value |= MGBE_MTL_TSF;
|
||||||
/*TTC not applicable for TX*/
|
/*TTC not applicable for TX*/
|
||||||
@@ -1259,31 +1242,6 @@ static nve32_t mgbe_configure_mtl_queue(struct osi_core_priv_data *osi_core,
|
|||||||
osi_writela(osi_core, value, (nveu8_t *)
|
osi_writela(osi_core, value, (nveu8_t *)
|
||||||
osi_core->base + MGBE_MTL_CHX_TX_OP_MODE(qinx));
|
osi_core->base + MGBE_MTL_CHX_TX_OP_MODE(qinx));
|
||||||
|
|
||||||
/* read RX Q0 Operating Mode Register */
|
|
||||||
value = osi_readla(osi_core, (nveu8_t *)osi_core->base +
|
|
||||||
MGBE_MTL_CHX_RX_OP_MODE(qinx));
|
|
||||||
value |= (rx_fifo_sz[qinx] << MGBE_MTL_RXQ_SIZE_SHIFT);
|
|
||||||
/* Enable Store and Forward mode */
|
|
||||||
value |= MGBE_MTL_RSF;
|
|
||||||
/* Enable HW flow control */
|
|
||||||
value |= MGBE_MTL_RXQ_OP_MODE_EHFC;
|
|
||||||
|
|
||||||
osi_writela(osi_core, value, (nveu8_t *)osi_core->base +
|
|
||||||
MGBE_MTL_CHX_RX_OP_MODE(qinx));
|
|
||||||
|
|
||||||
/* Update RFA and RFD
|
|
||||||
* RFA: Threshold for Activating Flow Control
|
|
||||||
* RFD: Threshold for Deactivating Flow Control
|
|
||||||
*/
|
|
||||||
value = osi_readla(osi_core, (nveu8_t *)osi_core->base +
|
|
||||||
MGBE_MTL_RXQ_FLOW_CTRL(qinx));
|
|
||||||
value &= ~MGBE_MTL_RXQ_OP_MODE_RFD_MASK;
|
|
||||||
value &= ~MGBE_MTL_RXQ_OP_MODE_RFA_MASK;
|
|
||||||
value |= (rfd_rfa[qinx] << MGBE_MTL_RXQ_OP_MODE_RFD_SHIFT) & MGBE_MTL_RXQ_OP_MODE_RFD_MASK;
|
|
||||||
value |= (rfd_rfa[qinx] << MGBE_MTL_RXQ_OP_MODE_RFA_SHIFT) & MGBE_MTL_RXQ_OP_MODE_RFA_MASK;
|
|
||||||
osi_writela(osi_core, value, (nveu8_t *)osi_core->base +
|
|
||||||
MGBE_MTL_RXQ_FLOW_CTRL(qinx));
|
|
||||||
|
|
||||||
/* Transmit Queue weight, all TX weights are equal */
|
/* Transmit Queue weight, all TX weights are equal */
|
||||||
value = osi_readla(osi_core, (nveu8_t *)osi_core->base +
|
value = osi_readla(osi_core, (nveu8_t *)osi_core->base +
|
||||||
MGBE_MTL_TCQ_QW(qinx));
|
MGBE_MTL_TCQ_QW(qinx));
|
||||||
@@ -1298,14 +1256,51 @@ static nve32_t mgbe_configure_mtl_queue(struct osi_core_priv_data *osi_core,
|
|||||||
value |= OSI_MGBE_TXQ_AVALG_ETS;
|
value |= OSI_MGBE_TXQ_AVALG_ETS;
|
||||||
osi_writela(osi_core, value, (nveu8_t *)osi_core->base +
|
osi_writela(osi_core, value, (nveu8_t *)osi_core->base +
|
||||||
MGBE_MTL_TCQ_ETS_CR(osi_core->tc[qinx]));
|
MGBE_MTL_TCQ_ETS_CR(osi_core->tc[qinx]));
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i = 0U; i < OSI_MGBE_MAX_NUM_QUEUES; i++) {
|
||||||
|
/* read RX Q0 Operating Mode Register */
|
||||||
|
value = osi_readla(osi_core, (nveu8_t *)osi_core->base +
|
||||||
|
MGBE_MTL_CHX_RX_OP_MODE(i));
|
||||||
|
value |= (rx_fifo_sz[i] << MGBE_MTL_RXQ_SIZE_SHIFT);
|
||||||
|
/* Enable Store and Forward mode */
|
||||||
|
value |= MGBE_MTL_RSF;
|
||||||
|
/* Enable HW flow control */
|
||||||
|
value |= MGBE_MTL_RXQ_OP_MODE_EHFC;
|
||||||
|
|
||||||
|
osi_writela(osi_core, value, (nveu8_t *)osi_core->base +
|
||||||
|
MGBE_MTL_CHX_RX_OP_MODE(i));
|
||||||
|
|
||||||
|
/* Update RFA and RFD
|
||||||
|
* RFA: Threshold for Activating Flow Control
|
||||||
|
* RFD: Threshold for Deactivating Flow Control
|
||||||
|
*/
|
||||||
|
value = osi_readla(osi_core, (nveu8_t *)osi_core->base +
|
||||||
|
MGBE_MTL_RXQ_FLOW_CTRL(i));
|
||||||
|
value &= ~MGBE_MTL_RXQ_OP_MODE_RFD_MASK;
|
||||||
|
value &= ~MGBE_MTL_RXQ_OP_MODE_RFA_MASK;
|
||||||
|
value |= (rfd_rfa[i] << MGBE_MTL_RXQ_OP_MODE_RFD_SHIFT) &
|
||||||
|
MGBE_MTL_RXQ_OP_MODE_RFD_MASK;
|
||||||
|
value |= (rfd_rfa[i] << MGBE_MTL_RXQ_OP_MODE_RFA_SHIFT) &
|
||||||
|
MGBE_MTL_RXQ_OP_MODE_RFA_MASK;
|
||||||
|
osi_writela(osi_core, value, (nveu8_t *)osi_core->base +
|
||||||
|
MGBE_MTL_RXQ_FLOW_CTRL(i));
|
||||||
|
|
||||||
|
|
||||||
/* Enable Rx Queue Control */
|
/* Enable Rx Queue Control */
|
||||||
value = osi_readla(osi_core, (nveu8_t *)osi_core->base +
|
value = osi_readla(osi_core, (nveu8_t *)osi_core->base + MGBE_MAC_RQC0R);
|
||||||
MGBE_MAC_RQC0R);
|
value |= ((osi_core->rxq_ctrl[i] & MGBE_MAC_RXQC0_RXQEN_MASK) <<
|
||||||
value |= ((osi_core->rxq_ctrl[qinx] & MGBE_MAC_RXQC0_RXQEN_MASK) <<
|
(MGBE_MAC_RXQC0_RXQEN_SHIFT(i)));
|
||||||
(MGBE_MAC_RXQC0_RXQEN_SHIFT(qinx)));
|
osi_writela(osi_core, value, (nveu8_t *)osi_core->base + MGBE_MAC_RQC0R);
|
||||||
osi_writela(osi_core, value, (nveu8_t *)osi_core->base +
|
|
||||||
MGBE_MAC_RQC0R);
|
/* Enable by default to configure forward error packets.
|
||||||
|
* Since this is a local function this will always return success,
|
||||||
|
* so no need to check for return value
|
||||||
|
*/
|
||||||
|
ret = hw_config_fw_err_pkts(osi_core, i, OSI_ENABLE);
|
||||||
|
if (ret < 0)
|
||||||
|
goto fail;
|
||||||
|
}
|
||||||
fail:
|
fail:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@@ -1748,7 +1743,12 @@ static nve32_t mgbe_hsi_inject_err(struct osi_core_priv_data *const osi_core,
|
|||||||
*/
|
*/
|
||||||
static nve32_t mgbe_configure_mac(struct osi_core_priv_data *osi_core)
|
static nve32_t mgbe_configure_mac(struct osi_core_priv_data *osi_core)
|
||||||
{
|
{
|
||||||
nveu32_t value = 0U, max_queue = 0U, i = 0U;
|
const struct core_local *l_core = (struct core_local *)(void *)osi_core;
|
||||||
|
/* 4176U is the minimum space required to support jumbo frames */
|
||||||
|
nveu32_t max_value = UINT_MAX - 4176U;
|
||||||
|
nveu32_t value = 0U;
|
||||||
|
nveu32_t result = 0U;
|
||||||
|
nve32_t ret = 0;
|
||||||
|
|
||||||
/* TODO: Need to check if we need to enable anything in Tx configuration
|
/* TODO: Need to check if we need to enable anything in Tx configuration
|
||||||
* value = osi_readla(osi_core,
|
* value = osi_readla(osi_core,
|
||||||
@@ -1761,6 +1761,17 @@ static nve32_t mgbe_configure_mac(struct osi_core_priv_data *osi_core)
|
|||||||
/* Enable Rx checksum offload engine by default */
|
/* Enable Rx checksum offload engine by default */
|
||||||
value |= MGBE_MAC_RMCR_ACS | MGBE_MAC_RMCR_CST | MGBE_MAC_RMCR_IPC;
|
value |= MGBE_MAC_RMCR_ACS | MGBE_MAC_RMCR_CST | MGBE_MAC_RMCR_IPC;
|
||||||
|
|
||||||
|
if (l_core->tx_fifosz_perq <= ((max_value / 256U) - 1U))
|
||||||
|
result = ((l_core->tx_fifosz_perq + 1U) * 256U) - 4176U;
|
||||||
|
|
||||||
|
if (osi_core->mtu > result) {
|
||||||
|
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID,
|
||||||
|
"Invalid MTU, max allowed MTU should be less than:\n",
|
||||||
|
(nveul64_t)result);
|
||||||
|
ret = -1;
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
|
||||||
/* Jumbo Packet Enable */
|
/* Jumbo Packet Enable */
|
||||||
if ((osi_core->mtu > OSI_DFLT_MTU_SIZE) &&
|
if ((osi_core->mtu > OSI_DFLT_MTU_SIZE) &&
|
||||||
(osi_core->mtu <= OSI_MTU_SIZE_9000)) {
|
(osi_core->mtu <= OSI_MTU_SIZE_9000)) {
|
||||||
@@ -1794,16 +1805,8 @@ static nve32_t mgbe_configure_mac(struct osi_core_priv_data *osi_core)
|
|||||||
value = osi_readla(osi_core,
|
value = osi_readla(osi_core,
|
||||||
(nveu8_t *)osi_core->base + MGBE_MAC_RQC1R);
|
(nveu8_t *)osi_core->base + MGBE_MAC_RQC1R);
|
||||||
value |= MGBE_MAC_RQC1R_MCBCQEN;
|
value |= MGBE_MAC_RQC1R_MCBCQEN;
|
||||||
/* Set MCBCQ to highest enabled RX queue index */
|
|
||||||
for (i = 0; i < osi_core->num_mtl_queues; i++) {
|
|
||||||
if ((max_queue < osi_core->mtl_queues[i]) &&
|
|
||||||
(osi_core->mtl_queues[i] < OSI_MGBE_MAX_NUM_QUEUES)) {
|
|
||||||
/* Update max queue number */
|
|
||||||
max_queue = osi_core->mtl_queues[i];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
value &= ~(MGBE_MAC_RQC1R_MCBCQ);
|
value &= ~(MGBE_MAC_RQC1R_MCBCQ);
|
||||||
value |= (max_queue << MGBE_MAC_RQC1R_MCBCQ_SHIFT);
|
value |= MGBE_MAX_RXQ_NUM;
|
||||||
osi_writela(osi_core, value,
|
osi_writela(osi_core, value,
|
||||||
(nveu8_t *)osi_core->base + MGBE_MAC_RQC1R);
|
(nveu8_t *)osi_core->base + MGBE_MAC_RQC1R);
|
||||||
|
|
||||||
@@ -1882,7 +1885,8 @@ static nve32_t mgbe_configure_mac(struct osi_core_priv_data *osi_core)
|
|||||||
mgbe_config_rss(osi_core);
|
mgbe_config_rss(osi_core);
|
||||||
#endif /* !OSI_STRIPPED_LIB */
|
#endif /* !OSI_STRIPPED_LIB */
|
||||||
|
|
||||||
return 0;
|
err:
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -2015,7 +2019,6 @@ static nve32_t mgbe_dma_chan_to_vmirq_map(struct osi_core_priv_data *osi_core)
|
|||||||
static nve32_t mgbe_core_init(struct osi_core_priv_data *const osi_core)
|
static nve32_t mgbe_core_init(struct osi_core_priv_data *const osi_core)
|
||||||
{
|
{
|
||||||
nve32_t ret = 0;
|
nve32_t ret = 0;
|
||||||
nveu32_t qinx = 0;
|
|
||||||
nveu32_t value = 0;
|
nveu32_t value = 0;
|
||||||
|
|
||||||
/* reset mmc counters */
|
/* reset mmc counters */
|
||||||
@@ -2051,22 +2054,9 @@ static nve32_t mgbe_core_init(struct osi_core_priv_data *const osi_core)
|
|||||||
osi_writela(osi_core, value, (nveu8_t *)osi_core->base +
|
osi_writela(osi_core, value, (nveu8_t *)osi_core->base +
|
||||||
MGBE_MAC_EXT_CNF);
|
MGBE_MAC_EXT_CNF);
|
||||||
|
|
||||||
/* Configure MTL Queues */
|
ret = mgbe_configure_mtl_queue(osi_core);
|
||||||
/* TODO: Iterate over Number MTL queues need to be removed */
|
if (ret < 0)
|
||||||
for (qinx = 0; qinx < osi_core->num_mtl_queues; qinx++) {
|
|
||||||
ret = mgbe_configure_mtl_queue(osi_core, osi_core->mtl_queues[qinx]);
|
|
||||||
if (ret < 0) {
|
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
|
||||||
/* Enable by default to configure forward error packets.
|
|
||||||
* Since this is a local function this will always return sucess,
|
|
||||||
* so no need to check for return value
|
|
||||||
*/
|
|
||||||
ret = hw_config_fw_err_pkts(osi_core, osi_core->mtl_queues[qinx], OSI_ENABLE);
|
|
||||||
if (ret < 0) {
|
|
||||||
goto fail;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* configure MGBE MAC HW */
|
/* configure MGBE MAC HW */
|
||||||
ret = mgbe_configure_mac(osi_core);
|
ret = mgbe_configure_mac(osi_core);
|
||||||
|
|||||||
@@ -460,7 +460,6 @@
|
|||||||
#define MGBE_MTL_RXQ_DMA_MAP0 0x1030
|
#define MGBE_MTL_RXQ_DMA_MAP0 0x1030
|
||||||
#define MGBE_MTL_RXQ_DMA_MAP1 0x1034
|
#define MGBE_MTL_RXQ_DMA_MAP1 0x1034
|
||||||
#define MGBE_MTL_RXQ_DMA_MAP2 0x1038
|
#define MGBE_MTL_RXQ_DMA_MAP2 0x1038
|
||||||
#define MGBE_MTL_CHX_TX_OP_MODE(x) ((0x0080U * (x)) + 0x1100U)
|
|
||||||
#define MGBE_MTL_TCQ_ETS_CR(x) ((0x0080U * (x)) + 0x1110U)
|
#define MGBE_MTL_TCQ_ETS_CR(x) ((0x0080U * (x)) + 0x1110U)
|
||||||
#define MGBE_MTL_TCQ_QW(x) ((0x0080U * (x)) + 0x1118U)
|
#define MGBE_MTL_TCQ_QW(x) ((0x0080U * (x)) + 0x1118U)
|
||||||
#define MGBE_MTL_CHX_RX_OP_MODE(x) ((0x0080U * (x)) + 0x1140U)
|
#define MGBE_MTL_CHX_RX_OP_MODE(x) ((0x0080U * (x)) + 0x1140U)
|
||||||
@@ -509,7 +508,6 @@
|
|||||||
#define MGBE_MAC_RQC1R_MCBCQEN OSI_BIT(15)
|
#define MGBE_MAC_RQC1R_MCBCQEN OSI_BIT(15)
|
||||||
#define MGBE_MAC_RQC1R_MCBCQ (OSI_BIT(11) | OSI_BIT(10) | \
|
#define MGBE_MAC_RQC1R_MCBCQ (OSI_BIT(11) | OSI_BIT(10) | \
|
||||||
OSI_BIT(9) | OSI_BIT(8))
|
OSI_BIT(9) | OSI_BIT(8))
|
||||||
#define MGBE_MAC_RQC1R_MCBCQ_SHIFT 8U
|
|
||||||
#define MGBE_IMR_RGSMIIIE OSI_BIT(0)
|
#define MGBE_IMR_RGSMIIIE OSI_BIT(0)
|
||||||
#define MGBE_IMR_TSIE OSI_BIT(12)
|
#define MGBE_IMR_TSIE OSI_BIT(12)
|
||||||
#define MGBE_ISR_TSIS OSI_BIT(12)
|
#define MGBE_ISR_TSIS OSI_BIT(12)
|
||||||
@@ -738,8 +736,8 @@
|
|||||||
/** @} */
|
/** @} */
|
||||||
#endif /* !OSI_STRIPPED_LIB */
|
#endif /* !OSI_STRIPPED_LIB */
|
||||||
|
|
||||||
/* TXQ Size 128KB is divided equally across 10 MTL Queues*/
|
/* Maximum RXQ number is 9. Values here is 9U << 8U, 8 here is bit position in register */
|
||||||
#define TX_FIFO_SZ (((((128U * 1024U)/OSI_MGBE_MAX_NUM_QUEUES)) / 256U) - 1U)
|
#define MGBE_MAX_RXQ_NUM 0x900U
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @addtogroup MGBE-MAC-HWFR MGBE MAC HW feature registers
|
* @addtogroup MGBE-MAC-HWFR MGBE MAC HW feature registers
|
||||||
|
|||||||
@@ -501,6 +501,39 @@ fail:
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static nve32_t validate_txqueues_derive_txfifosz(struct osi_core_priv_data *const osi_core)
|
||||||
|
{
|
||||||
|
struct core_local *l_core = (struct core_local *)(void *)osi_core;
|
||||||
|
const nveu32_t tx_fifosz[3U] = { EQOS_MAC_XP_TX_FIFO_SZ, EQOS_MAC_TX_FIFO_SZ,
|
||||||
|
MGBE_MAC_TX_FIFO_SZ };
|
||||||
|
nveu32_t tx_queue_num = 0U, i = 0U;
|
||||||
|
nve32_t ret = 0;
|
||||||
|
|
||||||
|
if ((osi_core->num_mtl_queues == 0U) ||
|
||||||
|
(osi_core->num_mtl_queues > l_core->num_max_chans)) {
|
||||||
|
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID,
|
||||||
|
"Invalid number of MTL queues\n", (nveu64_t)osi_core->num_mtl_queues);
|
||||||
|
ret = -1;
|
||||||
|
goto fail;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i = 0U; i < osi_core->num_mtl_queues; i++) {
|
||||||
|
tx_queue_num = osi_core->mtl_queues[i];
|
||||||
|
if (tx_queue_num > l_core->num_max_chans) {
|
||||||
|
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID,
|
||||||
|
"Invalid Tx queue number\n", (nveu64_t)tx_queue_num);
|
||||||
|
ret = -1;
|
||||||
|
goto fail;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
l_core->tx_fifosz_perq = ((tx_fifosz[l_core->l_mac_ver] / osi_core->num_mtl_queues) /
|
||||||
|
256U) - 1U;
|
||||||
|
|
||||||
|
fail:
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
static nve32_t osi_get_mac_version(struct osi_core_priv_data *const osi_core, nveu32_t *mac_ver)
|
static nve32_t osi_get_mac_version(struct osi_core_priv_data *const osi_core, nveu32_t *mac_ver)
|
||||||
{
|
{
|
||||||
struct core_local *l_core = (struct core_local *)(void *)osi_core;
|
struct core_local *l_core = (struct core_local *)(void *)osi_core;
|
||||||
@@ -531,6 +564,11 @@ static nve32_t osi_hal_hw_core_init(struct osi_core_priv_data *const osi_core)
|
|||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Validate and derive TXFIFO size per Queue */
|
||||||
|
ret = validate_txqueues_derive_txfifosz(osi_core);
|
||||||
|
if (ret < 0)
|
||||||
|
goto fail;
|
||||||
|
|
||||||
/* Bring MAC out of reset */
|
/* Bring MAC out of reset */
|
||||||
ret = hw_poll_for_swr(osi_core);
|
ret = hw_poll_for_swr(osi_core);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
|
|||||||
@@ -92,14 +92,10 @@
|
|||||||
* @brief Values defined for PBL settings
|
* @brief Values defined for PBL settings
|
||||||
* @{
|
* @{
|
||||||
*/
|
*/
|
||||||
/* Tx Queue size is 128KB */
|
|
||||||
#define MGBE_TXQ_SIZE 131072U
|
|
||||||
/* Rx Queue size is 192KB */
|
/* Rx Queue size is 192KB */
|
||||||
#define MGBE_RXQ_SIZE 196608U
|
#define MGBE_RXQ_SIZE 196608U
|
||||||
/* MAX PBL value */
|
/* MAX PBL value */
|
||||||
#define MGBE_DMA_CHX_MAX_PBL 256U
|
#define MGBE_DMA_CHX_MAX_PBL 256U
|
||||||
#define MGBE_DMA_CHX_MAX_PBL_VAL 0x200000U
|
#define MGBE_DMA_CHX_MAX_PBL_VAL 0x200000U
|
||||||
/* AXI Data width */
|
|
||||||
#define MGBE_AXI_DATAWIDTH 128U
|
|
||||||
/** @} */
|
/** @} */
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
@@ -393,7 +393,32 @@ static inline void start_dma(const struct osi_dma_priv_data *const osi_dma, nveu
|
|||||||
osi_writel(val, (nveu8_t *)osi_dma->base + rx_dma_reg[osi_dma->mac]);
|
osi_writel(val, (nveu8_t *)osi_dma->base + rx_dma_reg[osi_dma->mac]);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void init_dma_channel(const struct osi_dma_priv_data *const osi_dma,
|
static inline nveu32_t calculate_tx_pbl(nveu32_t tx_fifo_perq, nveu32_t mtu)
|
||||||
|
{
|
||||||
|
nveu32_t subtraction_result = 0U;
|
||||||
|
nveu32_t tx_pbl = 0U;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Formula for TxPBL calculation is
|
||||||
|
* (TxPBL) < ((TXQSize - MTU)/(DATAWIDTH/8)) - 5
|
||||||
|
* if TxPBL exceeds the value of 256 then we need to make use of 256
|
||||||
|
* as the TxPBL else we should be using the value which we get after
|
||||||
|
* calculation by using above formula
|
||||||
|
*/
|
||||||
|
/* tx_pbl = ((((Total Q size / total enabled queues) - osi_dma->mtu) /
|
||||||
|
* (MGBE_AXI_DATAWIDTH / 8U)) - 5U)
|
||||||
|
*/
|
||||||
|
if (tx_fifo_perq >= mtu) {
|
||||||
|
subtraction_result = tx_fifo_perq - mtu;
|
||||||
|
|
||||||
|
if (subtraction_result >= (5U * 16U))
|
||||||
|
tx_pbl = (subtraction_result / 16U) - 5U;
|
||||||
|
}
|
||||||
|
|
||||||
|
return tx_pbl;
|
||||||
|
}
|
||||||
|
|
||||||
|
static nve32_t init_dma_channel(const struct osi_dma_priv_data *const osi_dma,
|
||||||
nveu32_t dma_chan)
|
nveu32_t dma_chan)
|
||||||
{
|
{
|
||||||
nveu32_t chan = dma_chan & 0xFU;
|
nveu32_t chan = dma_chan & 0xFU;
|
||||||
@@ -418,11 +443,6 @@ static void init_dma_channel(const struct osi_dma_priv_data *const osi_dma,
|
|||||||
EQOS_DMA_CHX_RX_WDT(chan),
|
EQOS_DMA_CHX_RX_WDT(chan),
|
||||||
MGBE_DMA_CHX_RX_WDT(chan)
|
MGBE_DMA_CHX_RX_WDT(chan)
|
||||||
};
|
};
|
||||||
const nveu32_t tx_pbl[2] = {
|
|
||||||
EQOS_DMA_CHX_TX_CTRL_TXPBL_RECOMMENDED,
|
|
||||||
((((MGBE_TXQ_SIZE / osi_dma->num_dma_chans) -
|
|
||||||
osi_dma->mtu) / (MGBE_AXI_DATAWIDTH / 8U)) - 5U)
|
|
||||||
};
|
|
||||||
const nveu32_t rx_pbl[2] = {
|
const nveu32_t rx_pbl[2] = {
|
||||||
EQOS_DMA_CHX_RX_CTRL_RXPBL_RECOMMENDED,
|
EQOS_DMA_CHX_RX_CTRL_RXPBL_RECOMMENDED,
|
||||||
((MGBE_RXQ_SIZE / osi_dma->num_dma_chans) / 2U)
|
((MGBE_RXQ_SIZE / osi_dma->num_dma_chans) / 2U)
|
||||||
@@ -446,7 +466,8 @@ static void init_dma_channel(const struct osi_dma_priv_data *const osi_dma,
|
|||||||
MGBE_DMA_CHX_RX_CNTRL2_OWRQ_SCHAN, owrq, owrq, owrq,
|
MGBE_DMA_CHX_RX_CNTRL2_OWRQ_SCHAN, owrq, owrq, owrq,
|
||||||
owrq, owrq, owrq, owrq, owrq, owrq
|
owrq, owrq, owrq, owrq, owrq, owrq
|
||||||
};
|
};
|
||||||
nveu32_t val;
|
nveu32_t val, tx_fifo_perq, tx_pbl, result = 0U;
|
||||||
|
nve32_t ret = 0;
|
||||||
|
|
||||||
/* Enable Transmit/Receive interrupts */
|
/* Enable Transmit/Receive interrupts */
|
||||||
val = osi_readl((nveu8_t *)osi_dma->base + intr_en_reg[osi_dma->mac]);
|
val = osi_readl((nveu8_t *)osi_dma->base + intr_en_reg[osi_dma->mac]);
|
||||||
@@ -462,22 +483,41 @@ static void init_dma_channel(const struct osi_dma_priv_data *const osi_dma,
|
|||||||
val = osi_readl((nveu8_t *)osi_dma->base + tx_ctrl_reg[osi_dma->mac]);
|
val = osi_readl((nveu8_t *)osi_dma->base + tx_ctrl_reg[osi_dma->mac]);
|
||||||
val |= (DMA_CHX_TX_CTRL_OSP | DMA_CHX_TX_CTRL_TSE);
|
val |= (DMA_CHX_TX_CTRL_OSP | DMA_CHX_TX_CTRL_TSE);
|
||||||
|
|
||||||
|
/* Getting Per TX Q memory */
|
||||||
if (osi_dma->mac == OSI_MAC_HW_EQOS) {
|
if (osi_dma->mac == OSI_MAC_HW_EQOS) {
|
||||||
val |= tx_pbl[osi_dma->mac];
|
val |= EQOS_DMA_CHX_TX_CTRL_TXPBL_RECOMMENDED;
|
||||||
} else {
|
} else {
|
||||||
/*
|
/* Need to get per Q memeory assigned inside OSI core */
|
||||||
* Formula for TxPBL calculation is
|
tx_fifo_perq = osi_readl((nveu8_t *)osi_dma->base +
|
||||||
* (TxPBL) < ((TXQSize - MTU)/(DATAWIDTH/8)) - 5
|
MGBE_MTL_CHX_TX_OP_MODE(chan));
|
||||||
* if TxPBL exceeds the value of 256 then we need to make use of 256
|
/* Mask the bits to get the per queue memory from the register */
|
||||||
* as the TxPBL else we should be using the value whcih we get after
|
tx_fifo_perq = (tx_fifo_perq >> 16U) & 0x1FFU;
|
||||||
* calculation by using above formula
|
/* Need to multiply by 256 to get the actual memory size */
|
||||||
|
tx_fifo_perq = (tx_fifo_perq + 1U) * 256U;
|
||||||
|
|
||||||
|
if (tx_fifo_perq >= 4176U)
|
||||||
|
result = tx_fifo_perq - 4176U;
|
||||||
|
|
||||||
|
if (osi_dma->mtu > result) {
|
||||||
|
OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID,
|
||||||
|
"Invalid MTU, max allowed MTU should be less than:\n", result);
|
||||||
|
ret = -1;
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
|
||||||
|
tx_pbl = calculate_tx_pbl(tx_fifo_perq, osi_dma->mtu);
|
||||||
|
|
||||||
|
if (tx_pbl >= MGBE_DMA_CHX_MAX_PBL) {
|
||||||
|
/* setting maximum value of 32 which is 32 * 8
|
||||||
|
* (because TxPBLx8 = 1) => 256 bytes
|
||||||
*/
|
*/
|
||||||
if (tx_pbl[osi_dma->mac] >= MGBE_DMA_CHX_MAX_PBL) {
|
|
||||||
val |= MGBE_DMA_CHX_MAX_PBL_VAL;
|
val |= MGBE_DMA_CHX_MAX_PBL_VAL;
|
||||||
} else {
|
} else {
|
||||||
val |= ((tx_pbl[osi_dma->mac] / 8U) << MGBE_DMA_CHX_CTRL_PBL_SHIFT);
|
/* divide by 8 because TxPBLx8 = 1 */
|
||||||
|
val |= ((tx_pbl / 8U) << MGBE_DMA_CHX_CTRL_PBL_SHIFT);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
osi_writel(val, (nveu8_t *)osi_dma->base + tx_ctrl_reg[osi_dma->mac]);
|
osi_writel(val, (nveu8_t *)osi_dma->base + tx_ctrl_reg[osi_dma->mac]);
|
||||||
|
|
||||||
val = osi_readl((nveu8_t *)osi_dma->base + rx_ctrl_reg[osi_dma->mac]);
|
val = osi_readl((nveu8_t *)osi_dma->base + rx_ctrl_reg[osi_dma->mac]);
|
||||||
@@ -525,6 +565,9 @@ static void init_dma_channel(const struct osi_dma_priv_data *const osi_dma,
|
|||||||
val |= (owrq_arr[osi_dma->num_dma_chans - 1U] << MGBE_DMA_CHX_RX_CNTRL2_OWRQ_SHIFT);
|
val |= (owrq_arr[osi_dma->num_dma_chans - 1U] << MGBE_DMA_CHX_RX_CNTRL2_OWRQ_SHIFT);
|
||||||
osi_writel(val, (nveu8_t *)osi_dma->base + MGBE_DMA_CHX_RX_CNTRL2(chan));
|
osi_writel(val, (nveu8_t *)osi_dma->base + MGBE_DMA_CHX_RX_CNTRL2(chan));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
err:
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
nve32_t osi_hw_dma_init(struct osi_dma_priv_data *osi_dma)
|
nve32_t osi_hw_dma_init(struct osi_dma_priv_data *osi_dma)
|
||||||
@@ -573,7 +616,10 @@ nve32_t osi_hw_dma_init(struct osi_dma_priv_data *osi_dma)
|
|||||||
for (i = 0; i < osi_dma->num_dma_chans; i++) {
|
for (i = 0; i < osi_dma->num_dma_chans; i++) {
|
||||||
chan = osi_dma->dma_chans[i];
|
chan = osi_dma->dma_chans[i];
|
||||||
|
|
||||||
init_dma_channel(osi_dma, chan);
|
ret = init_dma_channel(osi_dma, chan);
|
||||||
|
if (ret < 0) {
|
||||||
|
goto fail;
|
||||||
|
}
|
||||||
|
|
||||||
ret = intr_fn[OSI_DMA_INTR_ENABLE](osi_dma,
|
ret = intr_fn[OSI_DMA_INTR_ENABLE](osi_dma,
|
||||||
VIRT_INTR_CHX_CNTRL(chan),
|
VIRT_INTR_CHX_CNTRL(chan),
|
||||||
|
|||||||
Reference in New Issue
Block a user