osi: T264 VDMA feature and bring up changes

Bug 4043836

Ported from the change -
https://git-master.nvidia.com/r/c/nvethernet-docs/+/2896005

Change-Id: Iabbbde0d2733f04bba5d7128e7b8ac5956605424
Signed-off-by: Mahesh Patil <maheshp@nvidia.com>
Signed-off-by: Michael Hsu <mhsu@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/kernel/nvethernetrm/+/3149288
GVS: buildbot_gerritrpt <buildbot_gerritrpt@nvidia.com>
Tested-by: Bhadram Varka <vbhadram@nvidia.com>
Tested-by: Seema Khowala <seemaj@nvidia.com>
Reviewed-by: Ashutosh Jha <ajha@nvidia.com>
Reviewed-by: Bhadram Varka <vbhadram@nvidia.com>
This commit is contained in:
Mahesh Patil
2023-03-30 21:55:19 +00:00
committed by mobile promotions
parent d28da6a10b
commit 8c7f7328e8
18 changed files with 1134 additions and 392 deletions

View File

@@ -68,7 +68,7 @@
#define OSI_MTL_QUEUE_ENABLE 0x2U
#define OSI_MTL_QUEUE_MODEMAX 0x3U
#ifndef OSI_STRIPPED_LIB
#define OSI_MTL_MAX_NUM_QUEUES 10U
#define OSI_MAX_NUM_CHANS 48U
#endif
/** @} */
@@ -260,13 +260,13 @@ struct osi_stats {
/** Under Flow Error */
nveu64_t mgbe_tx_underflow_err;
/** RX buffer unavailable irq count */
nveu64_t rx_buf_unavail_irq_n[OSI_MTL_MAX_NUM_QUEUES];
nveu64_t rx_buf_unavail_irq_n[OSI_MAX_NUM_CHANS];
/** Transmit Process Stopped irq count */
nveu64_t tx_proc_stopped_irq_n[OSI_MTL_MAX_NUM_QUEUES];
nveu64_t tx_proc_stopped_irq_n[OSI_MAX_NUM_CHANS];
/** Transmit Buffer Unavailable irq count */
nveu64_t tx_buf_unavail_irq_n[OSI_MTL_MAX_NUM_QUEUES];
nveu64_t tx_buf_unavail_irq_n[OSI_MAX_NUM_CHANS];
/** Receive Process Stopped irq count */
nveu64_t rx_proc_stopped_irq_n[OSI_MTL_MAX_NUM_QUEUES];
nveu64_t rx_proc_stopped_irq_n[OSI_MAX_NUM_CHANS];
/** Receive Watchdog Timeout irq count */
nveu64_t rx_watchdog_irq_n;
/** Fatal Bus Error irq count */

View File

@@ -26,6 +26,11 @@
#include <nvethernet_type.h>
/**
* @brief Maximum number of supported MAC IP types (EQOS, MGBE, MGBE_T26X)
*/
#define OSI_MAX_MAC_IP_TYPES 3U
/**
* @addtogroup FC Flow Control Threshold Macros
*
@@ -206,7 +211,12 @@
/**
* @brief Maximum number of channels in MGBE
*/
//TBD: T264, NET04 supports only 10 VDMA
#define OSI_MGBE_MAX_NUM_CHANS 10U
/**
* @brief Maximum number of PDMA channels in MGBE
*/
#define OSI_MGBE_MAX_NUM_PDMA_CHANS 10U
/** @brief Maximum number of queues in MGBE */
#define OSI_MGBE_MAX_NUM_QUEUES 10U
#define OSI_EQOS_XP_MAX_CHANS 4U
@@ -228,6 +238,8 @@
#define OSI_MAC_HW_EQOS 0U
/** @brief flag indicating MGBE MAC */
#define OSI_MAC_HW_MGBE 1U
/** @brief flag indicating MGBE MAC on T26X */
#define OSI_MAC_HW_MGBE_T26X 2U
#define OSI_NULL ((void *)0)
/** Enable Flag */
@@ -254,6 +266,8 @@
#define OSI_EQOS_MAC_5_30 0x53U
/** @brief MGBE MAC version Orin */
#define OSI_MGBE_MAC_3_10 0x31U
//TBD: T264 NET04 version, update it later
#define OSI_MGBE_MAC_3_20 0x32U
/**
* @brief Maximum number of VM IRQs
@@ -291,4 +305,16 @@
/** @brief macro for 1 micro second delay */
#define OSI_DELAY_1US 1U
/**
* @brief OSI PDMA to VDMA mapping data
*/
struct osi_pdma_vdma_data {
/** PDMA channel */
nveu32_t pdma_chan;
/** Number of VDMA channels */
nveu32_t num_vdma_chans;
/** Array of VDMA channel list */
nveu32_t vdma_chans[OSI_MGBE_MAX_NUM_CHANS];
};
#endif /* OSI_COMMON_H */

View File

@@ -272,6 +272,7 @@ typedef my_lint_64 nvel64_t;
#define OSI_SPEED_2500 2500
#define OSI_SPEED_5000 5000
#define OSI_SPEED_10000 10000
#define OSI_SPEED_25000 25000
#define TEN_POWER_9 0x3B9ACA00U
#define TWO_POWER_32 0x100000000ULL
@@ -1645,12 +1646,12 @@ struct osi_core_priv_data {
nveu32_t num_mtl_queues;
/** Array of MTL queues
* each array element has max value same as num_mtl_queues */
nveu32_t mtl_queues[OSI_MGBE_MAX_NUM_CHANS];
nveu32_t mtl_queues[OSI_MGBE_MAX_NUM_QUEUES];
/** List of MTL Rx queue mode that need to be enabled */
nveu32_t rxq_ctrl[OSI_MGBE_MAX_NUM_CHANS];
nveu32_t rxq_ctrl[OSI_MGBE_MAX_NUM_QUEUES];
/** Rx MTl Queue mapping based on User Priority field
* valid values are from 1 to 0xFF */
nveu32_t rxq_prio[OSI_MGBE_MAX_NUM_CHANS];
nveu32_t rxq_prio[OSI_MGBE_MAX_NUM_QUEUES];
/** MAC HW type EQOS based on DT compatible
* valid values are NVETHERNETRM_PIF$OSI_MAC_HW_EQOS and
* NVETHERNETRM_PIF$OSI_MAC_HW_MGBE*/
@@ -1683,7 +1684,7 @@ struct osi_core_priv_data {
nveu32_t dcs_en;
/** TQ:TC mapping
* valid values are from 0 to 7 */
nveu32_t tc[OSI_MGBE_MAX_NUM_CHANS];
nveu32_t tc[OSI_MGBE_MAX_NUM_PDMA_CHANS];
#ifndef OSI_STRIPPED_LIB
/** Memory mapped base address of HV window */
void *hv_base;
@@ -1740,6 +1741,14 @@ struct osi_core_priv_data {
nveu32_t mc_dmasel;
/** UPHY GBE mode (1 for 10G, 0 for 5G) */
nveu32_t uphy_gbe_mode;
/** number of PDMA's */
nveu32_t num_of_pdma;
/** Array of PDMA to VDMA mapping */
struct osi_pdma_vdma_data pdma_data[OSI_MGBE_MAX_NUM_PDMA_CHANS];
/** Number of channels enabled in MAC */
nveu32_t num_dma_chans;
/** Array of supported DMA channels */
nveu32_t dma_chans[OSI_MGBE_MAX_NUM_CHANS];
/** Array of VM IRQ's */
struct osi_vm_irq_data irq_data[OSI_MAX_VM_IRQS];
/** number of VM IRQ's
@@ -1763,6 +1772,8 @@ struct osi_core_priv_data {
#ifdef HSI_SUPPORT
struct osi_hsi_data hsi;
#endif
/** pre-silicon flag */
nveu32_t pre_sil;
};
/**

View File

@@ -790,6 +790,10 @@ struct osi_dma_priv_data {
* Max value is NVETHERNETCL_PIF$OSI_EQOS_RX_DESC_CNT/NVETHERNETCL_PIF$OSI_MGBE_RX_DESC_CNT
*/
nveu32_t rx_ring_sz;
/** number of PDMA's */
nveu32_t num_of_pdma;
/** Array of PDMA to VDMA mapping copy of osi_core */
struct osi_pdma_vdma_data pdma_data[OSI_MGBE_MAX_NUM_PDMA_CHANS];
};
/**
@@ -810,6 +814,8 @@ struct osi_dma_priv_data {
*
* @param[in] osi_dma: DMA private data.
* - Valid range: Any valid memory address except NULL.
* @param[out] dma_status: Stores the global DMA Interrupt status register value
* - Valid range: Any valid memory address except NULL.
*
* @retval !=0 DMA status on success
* @retval 0 on failure - invalid argument
@@ -829,7 +835,8 @@ struct osi_dma_priv_data {
*
*/
#endif
nveu32_t osi_get_global_dma_status(struct osi_dma_priv_data *osi_dma);
nve32_t osi_get_global_dma_status(struct osi_dma_priv_data *osi_dma,
nveu32_t *const dma_status);
/**
* @brief

View File

@@ -45,9 +45,21 @@
#define MAC_CORE_VER_TYPE_MGBE 2U
/**
* @brief Maximum number of supported MAC IP types (EQOS and MGBE)
* @addtogroup MGBE PBL settings.
*
* @brief Values defined for PBL settings
* @{
*/
#define MAX_MAC_IP_TYPES 2U
/* Tx Queue size is 128KB */
#define MGBE_TXQ_SIZE 131072U
/* Rx Queue size is 192KB */
#define MGBE_RXQ_SIZE 196608U
/* MAX PBL value */
#define MGBE_DMA_CHX_MAX_PBL 256U
#define MGBE_DMA_CHX_MAX_PBL_VAL 0x200000U
/* AXI Data width */
#define MGBE_AXI_DATAWIDTH 128U
/** @} */
/**
* @brief osi_readl_poll_timeout - Periodically poll an address until
@@ -277,9 +289,12 @@ static inline nve32_t validate_mac_ver_update_chans(nveu32_t mac_ver,
ret = 1;
break;
case OSI_MGBE_MAC_3_10:
//TBD: T264 uFPGA reports mac version 3.2
case OSI_MGBE_MAC_3_20:
#ifndef OSI_STRIPPED_LIB
case OSI_MGBE_MAC_4_00:
#endif /* !OSI_STRIPPED_LIB */
//TBD: T264 number of dma channels?
*num_max_chans = OSI_MGBE_MAX_NUM_CHANS;
*l_mac_ver = MAC_CORE_VER_TYPE_MGBE;
ret = 1;

View File

@@ -27,7 +27,7 @@
#include "xpcs.h"
#include "macsec.h"
static inline nve32_t poll_check(struct osi_core_priv_data *const osi_core, nveu8_t *addr,
nve32_t poll_check(struct osi_core_priv_data *const osi_core, nveu8_t *addr,
nveu32_t bit_check, nveu32_t *value)
{
nveu32_t retry = RETRY_COUNT;
@@ -62,7 +62,11 @@ fail:
nve32_t hw_poll_for_swr(struct osi_core_priv_data *const osi_core)
{
nveu32_t dma_mode_val = 0U;
const nveu32_t dma_mode[2] = { EQOS_DMA_BMR, MGBE_DMA_MODE };
const nveu32_t dma_mode[OSI_MAX_MAC_IP_TYPES] = {
EQOS_DMA_BMR,
MGBE_DMA_MODE,
MGBE_DMA_MODE
};
void *addr = osi_core->base;
return poll_check(osi_core, ((nveu8_t *)addr + dma_mode[osi_core->mac]),
@@ -73,10 +77,26 @@ void hw_start_mac(struct osi_core_priv_data *const osi_core)
{
void *addr = osi_core->base;
nveu32_t value;
const nveu32_t mac_mcr_te_reg[2] = { EQOS_MAC_MCR, MGBE_MAC_TMCR };
const nveu32_t mac_mcr_re_reg[2] = { EQOS_MAC_MCR, MGBE_MAC_RMCR };
const nveu32_t set_bit_te[2] = { EQOS_MCR_TE, MGBE_MAC_TMCR_TE };
const nveu32_t set_bit_re[2] = { EQOS_MCR_RE, MGBE_MAC_RMCR_RE };
const nveu32_t mac_mcr_te_reg[OSI_MAX_MAC_IP_TYPES] = {
EQOS_MAC_MCR,
MGBE_MAC_TMCR,
MGBE_MAC_TMCR
};
const nveu32_t mac_mcr_re_reg[OSI_MAX_MAC_IP_TYPES] = {
EQOS_MAC_MCR,
MGBE_MAC_RMCR,
MGBE_MAC_RMCR
};
const nveu32_t set_bit_te[OSI_MAX_MAC_IP_TYPES] = {
EQOS_MCR_TE,
MGBE_MAC_TMCR_TE,
MGBE_MAC_TMCR_TE
};
const nveu32_t set_bit_re[OSI_MAX_MAC_IP_TYPES] = {
EQOS_MCR_RE,
MGBE_MAC_RMCR_RE,
MGBE_MAC_RMCR_RE
};
value = osi_readla(osi_core, ((nveu8_t *)addr + mac_mcr_te_reg[osi_core->mac]));
value |= set_bit_te[osi_core->mac];
@@ -91,10 +111,26 @@ void hw_stop_mac(struct osi_core_priv_data *const osi_core)
{
void *addr = osi_core->base;
nveu32_t value;
const nveu32_t mac_mcr_te_reg[2] = { EQOS_MAC_MCR, MGBE_MAC_TMCR };
const nveu32_t mac_mcr_re_reg[2] = { EQOS_MAC_MCR, MGBE_MAC_RMCR };
const nveu32_t clear_bit_te[2] = { EQOS_MCR_TE, MGBE_MAC_TMCR_TE };
const nveu32_t clear_bit_re[2] = { EQOS_MCR_RE, MGBE_MAC_RMCR_RE };
const nveu32_t mac_mcr_te_reg[OSI_MAX_MAC_IP_TYPES] = {
EQOS_MAC_MCR,
MGBE_MAC_TMCR,
MGBE_MAC_TMCR
};
const nveu32_t mac_mcr_re_reg[OSI_MAX_MAC_IP_TYPES] = {
EQOS_MAC_MCR,
MGBE_MAC_RMCR,
MGBE_MAC_RMCR
};
const nveu32_t clear_bit_te[OSI_MAX_MAC_IP_TYPES] = {
EQOS_MCR_TE,
MGBE_MAC_TMCR_TE,
MGBE_MAC_TMCR_TE
};
const nveu32_t clear_bit_re[OSI_MAX_MAC_IP_TYPES] = {
EQOS_MCR_RE,
MGBE_MAC_RMCR_RE,
MGBE_MAC_RMCR_RE
};
value = osi_readla(osi_core, ((nveu8_t *)addr + mac_mcr_te_reg[osi_core->mac]));
value &= ~clear_bit_te[osi_core->mac];
@@ -173,11 +209,16 @@ nve32_t hw_set_speed(struct osi_core_priv_data *const osi_core, const nve32_t sp
nveu32_t value;
nve32_t ret = 0;
void *base = osi_core->base;
const nveu32_t mac_mcr[2] = { EQOS_MAC_MCR, MGBE_MAC_TMCR };
const nveu32_t mac_mcr[OSI_MAX_MAC_IP_TYPES] = {
EQOS_MAC_MCR,
MGBE_MAC_TMCR,
MGBE_MAC_TMCR
};
if (((osi_core->mac == OSI_MAC_HW_EQOS) && (speed > OSI_SPEED_1000)) ||
((osi_core->mac == OSI_MAC_HW_MGBE) && ((speed < OSI_SPEED_2500) ||
(speed > OSI_SPEED_10000)))) {
(((osi_core->mac == OSI_MAC_HW_MGBE) ||
(osi_core->mac == OSI_MAC_HW_MGBE_T26X)) &&
((speed < OSI_SPEED_2500) || (speed > OSI_SPEED_25000)))) {
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL,
"unsupported speed\n", (nveul64_t)speed);
ret = -1;
@@ -209,6 +250,10 @@ nve32_t hw_set_speed(struct osi_core_priv_data *const osi_core, const nve32_t sp
case OSI_SPEED_10000:
value &= ~MGBE_MAC_TMCR_SS_10G;
break;
case OSI_SPEED_25000:
value &= ~MGBE_MAC_TMCR_SS_10G;
value |= MGBE_MAC_TMCR_SS_SPEED_25G;
break;
default:
ret = -1;
break;
@@ -231,8 +276,11 @@ nve32_t hw_flush_mtl_tx_queue(struct osi_core_priv_data *const osi_core,
nveu32_t tx_op_mode_val = 0U;
nveu32_t que_idx = (q_inx & 0xFU);
nveu32_t value;
const nveu32_t tx_op_mode[2] = { EQOS_MTL_CHX_TX_OP_MODE(que_idx),
MGBE_MTL_CHX_TX_OP_MODE(que_idx)};
const nveu32_t tx_op_mode[OSI_MAX_MAC_IP_TYPES] = {
EQOS_MTL_CHX_TX_OP_MODE(que_idx),
MGBE_MTL_CHX_TX_OP_MODE(que_idx),
MGBE_MTL_CHX_TX_OP_MODE(que_idx)
};
/* Read Tx Q Operating Mode Register and flush TxQ */
value = osi_readla(osi_core, ((nveu8_t *)addr + tx_op_mode[osi_core->mac]));
@@ -250,11 +298,17 @@ nve32_t hw_config_fw_err_pkts(struct osi_core_priv_data *osi_core,
nveu32_t val;
nve32_t ret = 0;
nveu32_t que_idx = (q_inx & 0xFU);
const nveu32_t rx_op_mode[2] = { EQOS_MTL_CHX_RX_OP_MODE(que_idx),
MGBE_MTL_CHX_RX_OP_MODE(que_idx)};
const nveu32_t rx_op_mode[OSI_MAX_MAC_IP_TYPES] = {
EQOS_MTL_CHX_RX_OP_MODE(que_idx),
MGBE_MTL_CHX_RX_OP_MODE(que_idx),
MGBE_MTL_CHX_RX_OP_MODE(que_idx)
};
#ifndef OSI_STRIPPED_LIB
const nveu32_t max_q[2] = { OSI_EQOS_MAX_NUM_QUEUES,
OSI_MGBE_MAX_NUM_QUEUES};
const nveu32_t max_q[OSI_MAX_MAC_IP_TYPES] = {
OSI_EQOS_MAX_NUM_QUEUES,
OSI_MGBE_MAX_NUM_QUEUES,
OSI_MGBE_MAX_NUM_QUEUES
};
/* Check for valid enable_fw_err_pkts and que_idx values */
if (((enable_fw_err_pkts != OSI_ENABLE) &&
(enable_fw_err_pkts != OSI_DISABLE)) ||
@@ -311,8 +365,16 @@ nve32_t hw_config_rxcsum_offload(struct osi_core_priv_data *const osi_core,
void *addr = osi_core->base;
nveu32_t value;
nve32_t ret = 0;
const nveu32_t rxcsum_mode[2] = { EQOS_MAC_MCR, MGBE_MAC_RMCR};
const nveu32_t ipc_value[2] = { EQOS_MCR_IPC, MGBE_MAC_RMCR_IPC};
const nveu32_t rxcsum_mode[OSI_MAX_MAC_IP_TYPES] = {
EQOS_MAC_MCR,
MGBE_MAC_RMCR,
MGBE_MAC_RMCR
};
const nveu32_t ipc_value[OSI_MAX_MAC_IP_TYPES] = {
EQOS_MCR_IPC,
MGBE_MAC_RMCR_IPC,
MGBE_MAC_RMCR_IPC
};
if ((enabled != OSI_ENABLE) && (enabled != OSI_DISABLE)) {
ret = -1;
@@ -337,9 +399,21 @@ nve32_t hw_set_systime_to_mac(struct osi_core_priv_data *const osi_core,
void *addr = osi_core->base;
nveu32_t mac_tcr = 0U;
nve32_t ret = 0;
const nveu32_t mac_tscr[2] = { EQOS_MAC_TCR, MGBE_MAC_TCR};
const nveu32_t mac_stsur[2] = { EQOS_MAC_STSUR, MGBE_MAC_STSUR};
const nveu32_t mac_stnsur[2] = { EQOS_MAC_STNSUR, MGBE_MAC_STNSUR};
const nveu32_t mac_tscr[OSI_MAX_MAC_IP_TYPES] = {
EQOS_MAC_TCR,
MGBE_MAC_TCR,
MGBE_MAC_TCR
};
const nveu32_t mac_stsur[OSI_MAX_MAC_IP_TYPES] = {
EQOS_MAC_STSUR,
MGBE_MAC_STSUR,
MGBE_MAC_STSUR
};
const nveu32_t mac_stnsur[OSI_MAX_MAC_IP_TYPES] = {
EQOS_MAC_STNSUR,
MGBE_MAC_STNSUR,
MGBE_MAC_STNSUR
};
ret = poll_check(osi_core, ((nveu8_t *)addr + mac_tscr[osi_core->mac]),
MAC_TCR_TSINIT, &mac_tcr);
@@ -371,8 +445,16 @@ nve32_t hw_config_addend(struct osi_core_priv_data *const osi_core,
void *addr = osi_core->base;
nveu32_t mac_tcr = 0U;
nve32_t ret = 0;
const nveu32_t mac_tscr[2] = { EQOS_MAC_TCR, MGBE_MAC_TCR};
const nveu32_t mac_tar[2] = { EQOS_MAC_TAR, MGBE_MAC_TAR};
const nveu32_t mac_tscr[OSI_MAX_MAC_IP_TYPES] = {
EQOS_MAC_TCR,
MGBE_MAC_TCR,
MGBE_MAC_TCR
};
const nveu32_t mac_tar[OSI_MAX_MAC_IP_TYPES] = {
EQOS_MAC_TAR,
MGBE_MAC_TAR,
MGBE_MAC_TAR
};
ret = poll_check(osi_core, ((nveu8_t *)addr + mac_tscr[osi_core->mac]),
MAC_TCR_TSADDREG, &mac_tcr);
@@ -406,8 +488,16 @@ void hw_config_tscr(struct osi_core_priv_data *const osi_core, OSI_UNUSED const
nveu32_t i = 0U, temp = 0U;
#endif /* !OSI_STRIPPED_LIB */
nveu32_t value = 0x0U;
const nveu32_t mac_tscr[2] = { EQOS_MAC_TCR, MGBE_MAC_TCR};
const nveu32_t mac_pps[2] = { EQOS_MAC_PPS_CTL, MGBE_MAC_PPS_CTL};
const nveu32_t mac_tscr[OSI_MAX_MAC_IP_TYPES] = {
EQOS_MAC_TCR,
MGBE_MAC_TCR,
MGBE_MAC_TCR
};
const nveu32_t mac_pps[OSI_MAX_MAC_IP_TYPES] = {
EQOS_MAC_PPS_CTL,
MGBE_MAC_PPS_CTL,
MGBE_MAC_TCR
};
(void)ptp_filter; // unused
@@ -484,11 +574,16 @@ void hw_config_ssir(struct osi_core_priv_data *const osi_core)
nveu32_t val = 0U;
void *addr = osi_core->base;
const struct core_local *l_core = (struct core_local *)(void *)osi_core;
const nveu32_t mac_ssir[2] = { EQOS_MAC_SSIR, MGBE_MAC_SSIR};
const nveu32_t mac_ssir[OSI_MAX_MAC_IP_TYPES] = {
EQOS_MAC_SSIR,
MGBE_MAC_SSIR,
MGBE_MAC_SSIR
};
const nveu32_t ptp_ssinc[3] = {OSI_PTP_SSINC_4, OSI_PTP_SSINC_6, OSI_PTP_SSINC_4};
/* by default Fine method is enabled */
/* Fix the SSINC value based on Exact MAC used */
//TBD: review for T264
val = ptp_ssinc[l_core->l_mac_ver];
val |= val << MAC_SSIR_SSINC_SHIFT;
@@ -653,9 +748,16 @@ static inline nve32_t hw_est_read(struct osi_core_priv_data *osi_core,
nve32_t retry = 1000;
nveu32_t val = 0U;
nve32_t ret;
const nveu32_t MTL_EST_GCL_CONTROL[MAX_MAC_IP_TYPES] = {
EQOS_MTL_EST_GCL_CONTROL, MGBE_MTL_EST_GCL_CONTROL};
const nveu32_t MTL_EST_DATA[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_DATA, MGBE_MTL_EST_DATA};
const nveu32_t MTL_EST_GCL_CONTROL[OSI_MAX_MAC_IP_TYPES] = {
EQOS_MTL_EST_GCL_CONTROL,
MGBE_MTL_EST_GCL_CONTROL,
MGBE_MTL_EST_GCL_CONTROL
};
const nveu32_t MTL_EST_DATA[OSI_MAX_MAC_IP_TYPES] = {
EQOS_MTL_EST_DATA,
MGBE_MTL_EST_DATA,
MGBE_MTL_EST_DATA
};
(void)gcla;
*data = 0U;
@@ -757,17 +859,23 @@ static nve32_t validate_btr(struct osi_core_priv_data *const osi_core,
nveu64_t btr_new = 0U;
nveu64_t old_btr, old_ctr;
nveu32_t btr_l, btr_h, ctr_l, ctr_h;
const nveu32_t MTL_EST_CONTROL[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_CONTROL,
const nveu32_t MTL_EST_CONTROL[OSI_MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_CONTROL,
MGBE_MTL_EST_CONTROL,
MGBE_MTL_EST_CONTROL};
const nveu32_t PTP_CYCLE_8[MAX_MAC_IP_TYPES] = {EQOS_8PTP_CYCLE,
const nveu32_t PTP_CYCLE_8[OSI_MAX_MAC_IP_TYPES] = {EQOS_8PTP_CYCLE,
MGBE_8PTP_CYCLE,
MGBE_8PTP_CYCLE};
const nveu32_t MTL_EST_BTR_LOW[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_BTR_LOW,
const nveu32_t MTL_EST_BTR_LOW[OSI_MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_BTR_LOW,
MGBE_MTL_EST_BTR_LOW,
MGBE_MTL_EST_BTR_LOW};
const nveu32_t MTL_EST_BTR_HIGH[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_BTR_HIGH,
const nveu32_t MTL_EST_BTR_HIGH[OSI_MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_BTR_HIGH,
MGBE_MTL_EST_BTR_HIGH,
MGBE_MTL_EST_BTR_HIGH};
const nveu32_t MTL_EST_CTR_LOW[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_CTR_LOW,
const nveu32_t MTL_EST_CTR_LOW[OSI_MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_CTR_LOW,
MGBE_MTL_EST_CTR_LOW,
MGBE_MTL_EST_CTR_LOW};
const nveu32_t MTL_EST_CTR_HIGH[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_CTR_HIGH,
const nveu32_t MTL_EST_CTR_HIGH[OSI_MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_CTR_HIGH,
MGBE_MTL_EST_CTR_HIGH,
MGBE_MTL_EST_CTR_HIGH};
const struct est_read hw_read_arr[4] = {
{&btr_l, MTL_EST_BTR_LOW[mac]},
@@ -847,10 +955,16 @@ static nve32_t gcl_validate(struct osi_core_priv_data *const osi_core,
const nveu32_t *btr, nveu32_t mac)
{
const struct core_local *l_core = (struct core_local *)(void *)osi_core;
const nveu32_t PTP_CYCLE_8[MAX_MAC_IP_TYPES] = {EQOS_8PTP_CYCLE,
MGBE_8PTP_CYCLE};
const nveu32_t MTL_EST_STATUS[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_STATUS,
MGBE_MTL_EST_STATUS};
const nveu32_t PTP_CYCLE_8[OSI_MAX_MAC_IP_TYPES] = {
EQOS_8PTP_CYCLE,
MGBE_8PTP_CYCLE,
MGBE_8PTP_CYCLE
};
const nveu32_t MTL_EST_STATUS[OSI_MAX_MAC_IP_TYPES] = {
EQOS_MTL_EST_STATUS,
MGBE_MTL_EST_STATUS,
MGBE_MTL_EST_STATUS
};
nveu32_t i;
nveu64_t sum_ti = 0U;
nveu64_t sum_tin = 0U;
@@ -930,10 +1044,16 @@ static nve32_t hw_est_write(struct osi_core_priv_data *osi_core,
nve32_t retry = 1000;
nveu32_t val = 0x0;
nve32_t ret = 0;
const nveu32_t MTL_EST_DATA[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_DATA,
MGBE_MTL_EST_DATA};
const nveu32_t MTL_EST_GCL_CONTROL[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_GCL_CONTROL,
MGBE_MTL_EST_GCL_CONTROL};
const nveu32_t MTL_EST_DATA[OSI_MAX_MAC_IP_TYPES] = {
EQOS_MTL_EST_DATA,
MGBE_MTL_EST_DATA,
MGBE_MTL_EST_DATA
};
const nveu32_t MTL_EST_GCL_CONTROL[OSI_MAX_MAC_IP_TYPES] = {
EQOS_MTL_EST_GCL_CONTROL,
MGBE_MTL_EST_GCL_CONTROL,
MGBE_MTL_EST_GCL_CONTROL
};
osi_writela(osi_core, data, (nveu8_t *)osi_core->base +
MTL_EST_DATA[osi_core->mac]);
@@ -970,13 +1090,17 @@ static inline nve32_t configure_est_params(struct osi_core_priv_data *const osi_
nveu32_t i;
nve32_t ret;
nveu32_t addr = 0x0;
const nveu32_t MTL_EST_CTR_LOW[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_CTR_LOW,
const nveu32_t MTL_EST_CTR_LOW[OSI_MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_CTR_LOW,
MGBE_MTL_EST_CTR_LOW,
MGBE_MTL_EST_CTR_LOW};
const nveu32_t MTL_EST_CTR_HIGH[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_CTR_HIGH,
const nveu32_t MTL_EST_CTR_HIGH[OSI_MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_CTR_HIGH,
MGBE_MTL_EST_CTR_HIGH,
MGBE_MTL_EST_CTR_HIGH};
const nveu32_t MTL_EST_TER[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_TER,
const nveu32_t MTL_EST_TER[OSI_MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_TER,
MGBE_MTL_EST_TER,
MGBE_MTL_EST_TER};
const nveu32_t MTL_EST_LLR[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_LLR,
const nveu32_t MTL_EST_LLR[OSI_MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_LLR,
MGBE_MTL_EST_LLR,
MGBE_MTL_EST_LLR};
ret = hw_est_write(osi_core, MTL_EST_CTR_LOW[osi_core->mac], est->ctr[0], 0);
@@ -1056,12 +1180,21 @@ nve32_t hw_config_est(struct osi_core_priv_data *const osi_core,
nveu32_t val = 0x0;
void *base = osi_core->base;
nve32_t ret = 0;
const nveu32_t MTL_EST_CONTROL[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_CONTROL,
MGBE_MTL_EST_CONTROL};
const nveu32_t MTL_EST_BTR_LOW[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_BTR_LOW,
MGBE_MTL_EST_BTR_LOW};
const nveu32_t MTL_EST_BTR_HIGH[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_BTR_HIGH,
MGBE_MTL_EST_BTR_HIGH};
const nveu32_t MTL_EST_CONTROL[OSI_MAX_MAC_IP_TYPES] = {
EQOS_MTL_EST_CONTROL,
MGBE_MTL_EST_CONTROL,
MGBE_MTL_EST_CONTROL
};
const nveu32_t MTL_EST_BTR_LOW[OSI_MAX_MAC_IP_TYPES] = {
EQOS_MTL_EST_BTR_LOW,
MGBE_MTL_EST_BTR_LOW,
MGBE_MTL_EST_BTR_LOW
};
const nveu32_t MTL_EST_BTR_HIGH[OSI_MAX_MAC_IP_TYPES] = {
EQOS_MTL_EST_BTR_HIGH,
MGBE_MTL_EST_BTR_HIGH,
MGBE_MTL_EST_BTR_HIGH
};
if (est->en_dis == OSI_DISABLE) {
val = osi_readla(osi_core, (nveu8_t *)base +
@@ -1133,19 +1266,26 @@ static nve32_t hw_config_fpe_pec_enable(struct osi_core_priv_data *const osi_cor
nveu32_t temp = 0U, temp1 = 0U;
nveu32_t temp_shift = 0U;
nve32_t ret = 0;
const nveu32_t MTL_FPE_CTS[MAX_MAC_IP_TYPES] = {EQOS_MTL_FPE_CTS,
const nveu32_t MTL_FPE_CTS[OSI_MAX_MAC_IP_TYPES] = {EQOS_MTL_FPE_CTS,
MGBE_MTL_FPE_CTS,
MGBE_MTL_FPE_CTS};
const nveu32_t MAC_FPE_CTS[MAX_MAC_IP_TYPES] = {EQOS_MAC_FPE_CTS,
const nveu32_t MAC_FPE_CTS[OSI_MAX_MAC_IP_TYPES] = {EQOS_MAC_FPE_CTS,
MGBE_MAC_FPE_CTS,
MGBE_MAC_FPE_CTS};
const nveu32_t max_number_queue[MAX_MAC_IP_TYPES] = {OSI_EQOS_MAX_NUM_QUEUES,
const nveu32_t max_number_queue[OSI_MAX_MAC_IP_TYPES] = {OSI_EQOS_MAX_NUM_QUEUES,
OSI_MGBE_MAX_NUM_QUEUES,
OSI_MGBE_MAX_NUM_QUEUES};
const nveu32_t MAC_RQC1R[MAX_MAC_IP_TYPES] = {EQOS_MAC_RQC1R,
const nveu32_t MAC_RQC1R[OSI_MAX_MAC_IP_TYPES] = {EQOS_MAC_RQC1R,
MGBE_MAC_RQC1R,
MGBE_MAC_RQC1R};
const nveu32_t MAC_RQC1R_RQ[MAX_MAC_IP_TYPES] = {EQOS_MAC_RQC1R_FPRQ,
const nveu32_t MAC_RQC1R_RQ[OSI_MAX_MAC_IP_TYPES] = {EQOS_MAC_RQC1R_FPRQ,
MGBE_MAC_RQC1R_RQ,
MGBE_MAC_RQC1R_RQ};
const nveu32_t MAC_RQC1R_RQ_SHIFT[MAX_MAC_IP_TYPES] = {EQOS_MAC_RQC1R_FPRQ_SHIFT,
const nveu32_t MAC_RQC1R_RQ_SHIFT[OSI_MAX_MAC_IP_TYPES] = {EQOS_MAC_RQC1R_FPRQ_SHIFT,
MGBE_MAC_RQC1R_RQ_SHIFT,
MGBE_MAC_RQC1R_RQ_SHIFT};
const nveu32_t MTL_FPE_ADV[MAX_MAC_IP_TYPES] = {EQOS_MTL_FPE_ADV,
const nveu32_t MTL_FPE_ADV[OSI_MAX_MAC_IP_TYPES] = {EQOS_MTL_FPE_ADV,
MGBE_MTL_FPE_ADV,
MGBE_MTL_FPE_ADV};
val = osi_readla(osi_core, (nveu8_t *)osi_core->base +
@@ -1242,10 +1382,16 @@ nve32_t hw_config_fpe(struct osi_core_priv_data *const osi_core,
{
nveu32_t val = 0U;
nve32_t ret = 0;
const nveu32_t MTL_FPE_CTS[MAX_MAC_IP_TYPES] = {EQOS_MTL_FPE_CTS,
MGBE_MTL_FPE_CTS};
const nveu32_t MAC_FPE_CTS[MAX_MAC_IP_TYPES] = {EQOS_MAC_FPE_CTS,
MGBE_MAC_FPE_CTS};
const nveu32_t MTL_FPE_CTS[OSI_MAX_MAC_IP_TYPES] = {
EQOS_MTL_FPE_CTS,
MGBE_MTL_FPE_CTS,
MGBE_MTL_FPE_CTS
};
const nveu32_t MAC_FPE_CTS[OSI_MAX_MAC_IP_TYPES] = {
EQOS_MAC_FPE_CTS,
MGBE_MAC_FPE_CTS,
MGBE_MAC_FPE_CTS
};
/* Only 8 TC */
if (fpe->tx_queue_preemption_enable > 0xFFU) {
@@ -1321,8 +1467,11 @@ error:
static inline void enable_mtl_interrupts(struct osi_core_priv_data *osi_core)
{
nveu32_t mtl_est_ir = OSI_DISABLE;
const nveu32_t MTL_EST_ITRE[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_ITRE,
MGBE_MTL_EST_ITRE};
const nveu32_t MTL_EST_ITRE[OSI_MAX_MAC_IP_TYPES] = {
EQOS_MTL_EST_ITRE,
MGBE_MTL_EST_ITRE,
MGBE_MTL_EST_ITRE
};
mtl_est_ir = osi_readla(osi_core, (nveu8_t *)osi_core->base +
MTL_EST_ITRE[osi_core->mac]);
@@ -1352,10 +1501,16 @@ static inline void enable_mtl_interrupts(struct osi_core_priv_data *osi_core)
static inline void enable_fpe_interrupts(struct osi_core_priv_data *osi_core)
{
nveu32_t value = OSI_DISABLE;
const nveu32_t MAC_IER[MAX_MAC_IP_TYPES] = {EQOS_MAC_IMR,
MGBE_MAC_IER};
const nveu32_t IMR_FPEIE[MAX_MAC_IP_TYPES] = {EQOS_IMR_FPEIE,
MGBE_IMR_FPEIE};
const nveu32_t MAC_IER[OSI_MAX_MAC_IP_TYPES] = {
EQOS_MAC_IMR,
MGBE_MAC_IER,
MGBE_MAC_IER
};
const nveu32_t IMR_FPEIE[OSI_MAX_MAC_IP_TYPES] = {
EQOS_IMR_FPEIE,
MGBE_IMR_FPEIE,
MGBE_IMR_FPEIE
};
/* Read MAC IER Register and enable Frame Preemption Interrupt
* Enable */
@@ -1408,38 +1563,86 @@ void hw_tsn_init(struct osi_core_priv_data *osi_core)
{
nveu32_t val = 0x0;
nveu32_t temp = 0U;
const nveu32_t MTL_EST_CONTROL[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_CONTROL,
MGBE_MTL_EST_CONTROL};
const nveu32_t MTL_EST_CONTROL_PTOV[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_CONTROL_PTOV,
MGBE_MTL_EST_CONTROL_PTOV};
const nveu32_t MTL_EST_PTOV_RECOMMEND[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_PTOV_RECOMMEND,
MGBE_MTL_EST_PTOV_RECOMMEND};
const nveu32_t MTL_EST_CONTROL_PTOV_SHIFT[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_CONTROL_PTOV_SHIFT,
MGBE_MTL_EST_CONTROL_PTOV_SHIFT};
const nveu32_t MTL_EST_CONTROL_CTOV[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_CONTROL_CTOV,
MGBE_MTL_EST_CONTROL_CTOV};
const nveu32_t MTL_EST_CTOV_RECOMMEND[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_CTOV_RECOMMEND,
MGBE_MTL_EST_CTOV_RECOMMEND};
const nveu32_t MTL_EST_CONTROL_CTOV_SHIFT[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_CONTROL_CTOV_SHIFT,
MGBE_MTL_EST_CONTROL_CTOV_SHIFT};
const nveu32_t MTL_EST_CONTROL_LCSE[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_CONTROL_LCSE,
MGBE_MTL_EST_CONTROL_LCSE};
const nveu32_t MTL_EST_CONTROL_LCSE_VAL[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_CONTROL_LCSE_VAL,
MGBE_MTL_EST_CONTROL_LCSE_VAL};
const nveu32_t MTL_EST_CONTROL_DDBF[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_CONTROL_DDBF,
MGBE_MTL_EST_CONTROL_DDBF};
const nveu32_t MTL_EST_OVERHEAD[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_OVERHEAD,
MGBE_MTL_EST_OVERHEAD};
const nveu32_t MTL_EST_OVERHEAD_OVHD[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_OVERHEAD_OVHD,
MGBE_MTL_EST_OVERHEAD_OVHD};
const nveu32_t MTL_EST_OVERHEAD_RECOMMEND[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_OVERHEAD_RECOMMEND,
MGBE_MTL_EST_OVERHEAD_RECOMMEND};
const nveu32_t MAC_RQC1R[MAX_MAC_IP_TYPES] = {EQOS_MAC_RQC1R,
MGBE_MAC_RQC1R};
const nveu32_t MAC_RQC1R_RQ[MAX_MAC_IP_TYPES] = {EQOS_MAC_RQC1R_FPRQ,
MGBE_MAC_RQC1R_RQ};
const nveu32_t MAC_RQC1R_RQ_SHIFT[MAX_MAC_IP_TYPES] = {EQOS_MAC_RQC1R_FPRQ_SHIFT,
MGBE_MAC_RQC1R_RQ_SHIFT};
const nveu32_t MTL_EST_CONTROL[OSI_MAX_MAC_IP_TYPES] = {
EQOS_MTL_EST_CONTROL,
MGBE_MTL_EST_CONTROL,
MGBE_MTL_EST_CONTROL
};
const nveu32_t MTL_EST_CONTROL_PTOV[OSI_MAX_MAC_IP_TYPES] = {
EQOS_MTL_EST_CONTROL_PTOV,
MGBE_MTL_EST_CONTROL_PTOV,
MGBE_MTL_EST_CONTROL_PTOV
};
const nveu32_t MTL_EST_PTOV_RECOMMEND[OSI_MAX_MAC_IP_TYPES] = {
EQOS_MTL_EST_PTOV_RECOMMEND,
MGBE_MTL_EST_PTOV_RECOMMEND,
MGBE_MTL_EST_PTOV_RECOMMEND
};
const nveu32_t MTL_EST_CONTROL_PTOV_SHIFT[OSI_MAX_MAC_IP_TYPES] = {
EQOS_MTL_EST_CONTROL_PTOV_SHIFT,
MGBE_MTL_EST_CONTROL_PTOV_SHIFT,
MGBE_MTL_EST_CONTROL_PTOV_SHIFT
};
const nveu32_t MTL_EST_CONTROL_CTOV[OSI_MAX_MAC_IP_TYPES] = {
EQOS_MTL_EST_CONTROL_CTOV,
MGBE_MTL_EST_CONTROL_CTOV,
MGBE_MTL_EST_CONTROL_CTOV
};
const nveu32_t MTL_EST_CTOV_RECOMMEND[OSI_MAX_MAC_IP_TYPES] = {
EQOS_MTL_EST_CTOV_RECOMMEND,
MGBE_MTL_EST_CTOV_RECOMMEND,
MGBE_MTL_EST_CTOV_RECOMMEND
};
const nveu32_t MTL_EST_CONTROL_CTOV_SHIFT[OSI_MAX_MAC_IP_TYPES] = {
EQOS_MTL_EST_CONTROL_CTOV_SHIFT,
MGBE_MTL_EST_CONTROL_CTOV_SHIFT,
MGBE_MTL_EST_CONTROL_CTOV_SHIFT
};
const nveu32_t MTL_EST_CONTROL_LCSE[OSI_MAX_MAC_IP_TYPES] = {
EQOS_MTL_EST_CONTROL_LCSE,
MGBE_MTL_EST_CONTROL_LCSE,
MGBE_MTL_EST_CONTROL_LCSE
};
const nveu32_t MTL_EST_CONTROL_LCSE_VAL[OSI_MAX_MAC_IP_TYPES] = {
EQOS_MTL_EST_CONTROL_LCSE_VAL,
MGBE_MTL_EST_CONTROL_LCSE_VAL,
MGBE_MTL_EST_CONTROL_LCSE_VAL
};
const nveu32_t MTL_EST_CONTROL_DDBF[OSI_MAX_MAC_IP_TYPES] = {
EQOS_MTL_EST_CONTROL_DDBF,
MGBE_MTL_EST_CONTROL_DDBF,
MGBE_MTL_EST_CONTROL_DDBF
};
const nveu32_t MTL_EST_OVERHEAD[OSI_MAX_MAC_IP_TYPES] = {
EQOS_MTL_EST_OVERHEAD,
MGBE_MTL_EST_OVERHEAD,
MGBE_MTL_EST_OVERHEAD
};
const nveu32_t MTL_EST_OVERHEAD_OVHD[OSI_MAX_MAC_IP_TYPES] = {
EQOS_MTL_EST_OVERHEAD_OVHD,
MGBE_MTL_EST_OVERHEAD_OVHD,
MGBE_MTL_EST_OVERHEAD_OVHD
};
const nveu32_t MTL_EST_OVERHEAD_RECOMMEND[OSI_MAX_MAC_IP_TYPES] = {
EQOS_MTL_EST_OVERHEAD_RECOMMEND,
MGBE_MTL_EST_OVERHEAD_RECOMMEND,
MGBE_MTL_EST_OVERHEAD_RECOMMEND
};
const nveu32_t MAC_RQC1R[OSI_MAX_MAC_IP_TYPES] = {
EQOS_MAC_RQC1R,
MGBE_MAC_RQC1R,
MGBE_MAC_RQC1R
};
const nveu32_t MAC_RQC1R_RQ[OSI_MAX_MAC_IP_TYPES] = {
EQOS_MAC_RQC1R_FPRQ,
MGBE_MAC_RQC1R_RQ,
MGBE_MAC_RQC1R_RQ
};
const nveu32_t MAC_RQC1R_RQ_SHIFT[OSI_MAX_MAC_IP_TYPES] = {
EQOS_MAC_RQC1R_FPRQ_SHIFT,
MGBE_MAC_RQC1R_RQ_SHIFT,
MGBE_MAC_RQC1R_RQ_SHIFT
};
/* Configure EST paramenters */
save_gcl_params(osi_core);
@@ -1633,9 +1836,16 @@ static inline nveu64_t hsi_update_mmc_val(struct osi_core_priv_data *osi_core,
{
nveu64_t temp = 0;
nveu32_t value = osi_readl((nveu8_t *)osi_core->base + offset);
const nveu32_t MMC_CNTRL[MAX_MAC_IP_TYPES] = { EQOS_MMC_CNTRL, MGBE_MMC_CNTRL };
const nveu32_t MMC_CNTRL_CNTRST[MAX_MAC_IP_TYPES] = { EQOS_MMC_CNTRL_CNTRST,
MGBE_MMC_CNTRL_CNTRST };
const nveu32_t MMC_CNTRL[OSI_MAX_MAC_IP_TYPES] = {
EQOS_MMC_CNTRL,
MGBE_MMC_CNTRL,
MGBE_MMC_CNTRL
};
const nveu32_t MMC_CNTRL_CNTRST[OSI_MAX_MAC_IP_TYPES] = {
EQOS_MMC_CNTRL_CNTRST,
MGBE_MMC_CNTRL_CNTRST,
MGBE_MMC_CNTRL_CNTRST
};
temp = last_value + value;
if (temp < last_value) {
@@ -1667,16 +1877,31 @@ static inline nveu64_t hsi_update_mmc_val(struct osi_core_priv_data *osi_core,
void hsi_read_err(struct osi_core_priv_data *const osi_core)
{
struct osi_mmc_counters *mmc = &osi_core->mmc;
const nveu32_t RXCRCERROR[MAX_MAC_IP_TYPES] = { EQOS_MMC_RXCRCERROR,
MGBE_MMC_RXCRCERROR_L };
const nveu32_t RXIPV4_HDRERR_PKTS[MAX_MAC_IP_TYPES] = { EQOS_MMC_RXIPV4_HDRERR_PKTS,
MGBE_MMC_RXIPV4_HDRERR_PKTS_L };
const nveu32_t RXIPV6_HDRERR_PKTS[MAX_MAC_IP_TYPES] = { EQOS_MMC_RXIPV6_HDRERR_PKTS,
MGBE_MMC_RXIPV6_HDRERR_PKTS_L };
const nveu32_t RXUDP_ERR_PKTS[MAX_MAC_IP_TYPES] = { EQOS_MMC_RXUDP_ERR_PKTS,
MGBE_MMC_RXUDP_ERR_PKTS_L };
const nveu32_t RXTCP_ERR_PKTS[MAX_MAC_IP_TYPES] = { EQOS_MMC_RXTCP_ERR_PKTS,
MGBE_MMC_RXTCP_ERR_PKTS_L };
const nveu32_t RXCRCERROR[OSI_MAX_MAC_IP_TYPES] = {
EQOS_MMC_RXCRCERROR,
MGBE_MMC_RXCRCERROR_L,
MGBE_MMC_RXCRCERROR_L
};
const nveu32_t RXIPV4_HDRERR_PKTS[OSI_MAX_MAC_IP_TYPES] = {
EQOS_MMC_RXIPV4_HDRERR_PKTS,
MGBE_MMC_RXIPV4_HDRERR_PKTS_L,
MGBE_MMC_RXIPV4_HDRERR_PKTS_L
};
const nveu32_t RXIPV6_HDRERR_PKTS[OSI_MAX_MAC_IP_TYPES] = {
EQOS_MMC_RXIPV6_HDRERR_PKTS,
MGBE_MMC_RXIPV6_HDRERR_PKTS_L,
MGBE_MMC_RXIPV6_HDRERR_PKTS_L
};
const nveu32_t RXUDP_ERR_PKTS[OSI_MAX_MAC_IP_TYPES] = {
EQOS_MMC_RXUDP_ERR_PKTS,
MGBE_MMC_RXUDP_ERR_PKTS_L,
MGBE_MMC_RXUDP_ERR_PKTS_L
};
const nveu32_t RXTCP_ERR_PKTS[OSI_MAX_MAC_IP_TYPES] = {
EQOS_MMC_RXTCP_ERR_PKTS,
MGBE_MMC_RXTCP_ERR_PKTS_L,
MGBE_MMC_RXTCP_ERR_PKTS_L
};
mmc->mmc_rx_crc_error = hsi_update_mmc_val(osi_core, mmc->mmc_rx_crc_error,
RXCRCERROR[osi_core->mac]);
@@ -1717,8 +1942,9 @@ static void prepare_l3l4_ctr_reg(const struct osi_core_priv_data *const osi_core
nveu32_t dma_routing_enable = OSI_BIT(0);
nveu32_t dst_addr_match = OSI_BIT(0);
#endif /* !OSI_STRIPPED_LIB */
const nveu32_t dma_chan_en_shift[2] = {
const nveu32_t dma_chan_en_shift[OSI_MAX_MAC_IP_TYPES] = {
EQOS_MAC_L3L4_CTR_DMCHEN_SHIFT,
MGBE_MAC_L3L4_CTR_DMCHEN_SHIFT,
MGBE_MAC_L3L4_CTR_DMCHEN_SHIFT
};
nveu32_t value = 0U;
@@ -1932,10 +2158,16 @@ nve32_t hw_validate_avb_input(struct osi_core_priv_data *const osi_core,
const struct osi_core_avb_algorithm *const avb)
{
nve32_t ret = 0;
nveu32_t ETS_QW_ISCQW_MASK[MAX_MAC_IP_TYPES] = {EQOS_MTL_TXQ_ETS_QW_ISCQW_MASK,
MGBE_MTL_TCQ_ETS_QW_ISCQW_MASK};
nveu32_t ETS_SSCR_SSC_MASK[MAX_MAC_IP_TYPES] = {EQOS_MTL_TXQ_ETS_SSCR_SSC_MASK,
MGBE_MTL_TCQ_ETS_SSCR_SSC_MASK};
nveu32_t ETS_QW_ISCQW_MASK[OSI_MAX_MAC_IP_TYPES] = {
EQOS_MTL_TXQ_ETS_QW_ISCQW_MASK,
MGBE_MTL_TCQ_ETS_QW_ISCQW_MASK,
MGBE_MTL_TCQ_ETS_QW_ISCQW_MASK
};
nveu32_t ETS_SSCR_SSC_MASK[OSI_MAX_MAC_IP_TYPES] = {
EQOS_MTL_TXQ_ETS_SSCR_SSC_MASK,
MGBE_MTL_TCQ_ETS_SSCR_SSC_MASK,
MGBE_MTL_TCQ_ETS_SSCR_SSC_MASK
};
nveu32_t ETS_HC_BOUND = 0x8000000U;
nveu32_t ETS_LC_BOUND = 0xF8000000U;
nveu32_t mac = osi_core->mac;

View File

@@ -161,7 +161,8 @@ struct est_read {
};
/** @} */
nve32_t poll_check(struct osi_core_priv_data *const osi_core, nveu8_t *addr,
nveu32_t bit_check, nveu32_t *value);
nve32_t hw_poll_for_swr(struct osi_core_priv_data *const osi_core);
void hw_start_mac(struct osi_core_priv_data *const osi_core);
void hw_stop_mac(struct osi_core_priv_data *const osi_core);

View File

@@ -45,6 +45,7 @@
/**
* @brief Maximum number of timestamps stored in OSI from HW FIFO.
*/
//TBD: does it change for T264?
#define MAX_TX_TS_CNT (PKT_ID_CNT * OSI_MGBE_MAX_NUM_CHANS)
/**

View File

@@ -200,7 +200,7 @@ static nve32_t validate_frp_args(struct osi_core_priv_data *const osi_core,
OSI_UNUSED nveu8_t pos,
nveu32_t *req_entries)
{
nveu32_t dma_sel_val[MAX_MAC_IP_TYPES] = {0xFFU, 0x3FFU};
nveu32_t dma_sel_val[OSI_MAX_MAC_IP_TYPES] = {0xFFU, 0x3FFU, 0x3FFU};
nve32_t ret = 0;
(void)pos;

View File

@@ -1477,6 +1477,11 @@ static nve32_t mgbe_hsi_configure(struct osi_core_priv_data *const osi_core,
{
nveu32_t value = 0U;
nve32_t ret = 0;
const nveu32_t xpcs_intr_ctrl_reg[OSI_MAX_MAC_IP_TYPES] = {
0,
XPCS_WRAP_INTERRUPT_CONTROL,
T26X_XPCS_WRAP_INTERRUPT_CONTROL
};
if (enable == OSI_ENABLE) {
osi_core->hsi.enabled = OSI_ENABLE;
@@ -1574,12 +1579,12 @@ static nve32_t mgbe_hsi_configure(struct osi_core_priv_data *const osi_core,
MGBE_WRAP_COMMON_INTR_ENABLE);
value = osi_readla(osi_core, (nveu8_t *)osi_core->xpcs_base +
XPCS_WRAP_INTERRUPT_CONTROL);
xpcs_intr_ctrl_reg[osi_core->mac]);
value |= XPCS_CORE_CORRECTABLE_ERR;
value |= XPCS_CORE_UNCORRECTABLE_ERR;
value |= XPCS_REGISTER_PARITY_ERR;
osi_writela(osi_core, value, (nveu8_t *)osi_core->xpcs_base +
XPCS_WRAP_INTERRUPT_CONTROL);
xpcs_intr_ctrl_reg[osi_core->mac]);
} else {
osi_core->hsi.enabled = OSI_DISABLE;
@@ -1641,12 +1646,12 @@ static nve32_t mgbe_hsi_configure(struct osi_core_priv_data *const osi_core,
MGBE_WRAP_COMMON_INTR_ENABLE);
value = osi_readla(osi_core, (nveu8_t *)osi_core->xpcs_base +
XPCS_WRAP_INTERRUPT_CONTROL);
xpcs_intr_ctrl_reg[osi_core->mac]);
value &= ~XPCS_CORE_CORRECTABLE_ERR;
value &= ~XPCS_CORE_UNCORRECTABLE_ERR;
value &= ~XPCS_REGISTER_PARITY_ERR;
osi_writela(osi_core, value, (nveu8_t *)osi_core->xpcs_base +
XPCS_WRAP_INTERRUPT_CONTROL);
xpcs_intr_ctrl_reg[osi_core->mac]);
}
fail:
return ret;
@@ -1842,6 +1847,158 @@ static void mgbe_configure_mac(struct osi_core_priv_data *osi_core)
#endif /* !OSI_STRIPPED_LIB */
}
/**
* @brief mgbe_dma_ind_config - Configures the DMA indirect registers
*
* Algorithm: Write to Indirect DMA registers
*
* @param[in] osi_core: OSI core private data structure.
* @param[in] mode: Indirect DMA register to write.
* @param[in] chan: Indirect DMA channel register offset.
* @param[in] value: Data to be written to DMA indirect register
*
* @note MAC has to be out of reset.
*
* @retval 0 on success
* @retval -1 on failure.
*/
static nve32_t mgbe_dma_indir_addr_write(struct osi_core_priv_data *osi_core,
nveu32_t mode, nveu32_t chan, nveu32_t value)
{
nveu8_t *addr = (nveu8_t *)osi_core->base;
nveu32_t ctrl = 0;
nve32_t ret = 0;
nveu32_t val = 0U;
/* Write data to indirect register */
osi_writela(osi_core, value, addr + MGBE_DMA_INDIR_DATA);
ctrl |= (mode << MGBE_DMA_INDIR_CTRL_MSEL_SHIFT) &
MGBE_DMA_INDIR_CTRL_MSEL_MASK;
ctrl |= (chan << MGBE_DMA_INDIR_CTRL_AOFF_SHIFT) &
MGBE_DMA_INDIR_CTRL_AOFF_MASK;
ctrl |= MGBE_DMA_INDIR_CTRL_OB;
ctrl &= ~MGBE_DMA_INDIR_CTRL_CT;
/* Write cmd to indirect control register */
osi_writela(osi_core, ctrl, addr + MGBE_DMA_INDIR_CTRL);
/* poll for write operation to complete */
ret = poll_check(osi_core, addr + MGBE_DMA_INDIR_CTRL,
MGBE_DMA_INDIR_CTRL_OB, &val);
if (ret == -1) {
goto done;
}
done:
return ret;
}
/**
* @brief mgbe_configure_pdma - Configure PDMA parameters and TC mapping
*
* Algorithm:
* 1) Program Tx/Rx PDMA PBL, ORR, OWR parameters
* 2) Program PDMA to TC mapping for Tx and Rx
*
* @param[in] osi_core: OSI core private data structure.
*
* @note MAC has to be out of reset.
*
* @retval 0 on success
* @retval -1 on failure.
*/
static nve32_t mgbe_configure_pdma(struct osi_core_priv_data *osi_core)
{
nveu32_t value = 0;
nve32_t ret = 0;
nveu32_t i, j, pdma_chan, vdma_chan;
//TBD: check values for T264
const nveu32_t tx_orr = (MGBE_DMA_CHX_TX_CNTRL2_ORRQ_RECOMMENDED /
osi_core->num_of_pdma);
const nveu32_t tx_pbl = ((((MGBE_TXQ_SIZE / osi_core->num_of_pdma) -
osi_core->mtu) / (MGBE_AXI_DATAWIDTH / 8U)) - 5U);
const nveu32_t rx_owrq = (MGBE_DMA_CHX_RX_CNTRL2_OWRQ_MCHAN /
osi_core->num_of_pdma);
const nveu32_t rx_pbl = ((MGBE_RXQ_SIZE / osi_core->num_of_pdma) / 2U);
for (i = 0 ; i < osi_core->num_of_pdma; i++) {
pdma_chan = osi_core->pdma_data[i].pdma_chan;
/* Update PDMA_CH(#i)_TxExtCfg register */
value = (tx_orr << MGBE_PDMA_CHX_TXRX_EXTCFG_ORRQ_SHIFT);
value |= (pdma_chan << MGBE_PDMA_CHX_TXRX_EXTCFG_P2TCMP_SHIFT) &
MGBE_PDMA_CHX_TXRX_EXTCFG_P2TCMP_MASK;
value |= MGBE_PDMA_CHX_TXRX_EXTCFG_PBLX8;
/*
* Formula for TxPBL calculation is
* (TxPBL) < ((TXQSize - MTU)/(DATAWIDTH/8)) - 5
* if TxPBL exceeds the value of 256 then we need to make
* use of 256 as the TxPBL else we should be using the
* value whcih we get after calculation by using above formula
*/
if (tx_pbl>= MGBE_PDMA_CHX_EXTCFG_MAX_PBL) {
value |= MGBE_PDMA_CHX_EXTCFG_MAX_PBL_VAL;
} else {
value |= ((tx_pbl / 8U) <<
MGBE_PDMA_CHX_TXRX_EXTCFG_PBL_SHIFT) &
MGBE_PDMA_CHX_TXRX_EXTCFG_PBL_MASK;
}
ret = mgbe_dma_indir_addr_write(osi_core,
MGBE_PDMA_CHX_TX_EXTCFG, pdma_chan, value);
if (ret < 0) {
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL,
"MGBE_PDMA_CHX_TX_EXTCFG failed\n", 0ULL);
goto done;
}
/* Update PDMA_CH(#i)_RxExtCfg register */
value = (rx_owrq << MGBE_PDMA_CHX_TXRX_EXTCFG_ORRQ_SHIFT);
value |= (pdma_chan << MGBE_PDMA_CHX_TXRX_EXTCFG_P2TCMP_SHIFT) &
MGBE_PDMA_CHX_TXRX_EXTCFG_P2TCMP_MASK;
value |= MGBE_PDMA_CHX_TXRX_EXTCFG_PBLX8;
if (rx_pbl>= MGBE_PDMA_CHX_EXTCFG_MAX_PBL) {
value |= MGBE_PDMA_CHX_EXTCFG_MAX_PBL_VAL;
} else {
value |= (((rx_pbl / 8U)) <<
MGBE_PDMA_CHX_TXRX_EXTCFG_PBL_SHIFT) &
MGBE_PDMA_CHX_TXRX_EXTCFG_PBL_MASK;
}
ret = mgbe_dma_indir_addr_write(osi_core,
MGBE_PDMA_CHX_RX_EXTCFG, pdma_chan, value);
if (ret < 0) {
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL,
"MGBE_PDMA_CHX_RX_EXTCFG failed\n", 0ULL);
goto done;
}
/* program the vdma's descriptor cache size and
* pre-fetch threshold */
for (j = 0 ; j < osi_core->pdma_data[i].num_vdma_chans; j++) {
vdma_chan = osi_core->pdma_data[i].vdma_chans[j];
//TBD: check descriptor size value is correct for T264
value = MGBE_XDMA_CHX_TXRX_DESC_CTRL_DCSZ &
MGBE_XDMA_CHX_TXRX_DESC_CTRL_DCSZ_MASK;
value |= (MGBE_XDMA_CHX_TXRX_DESC_CTRL_DPS <<
MGBE_XDMA_CHX_TXRX_DESC_CTRL_DPS_SHIFT) &
MGBE_XDMA_CHX_TXRX_DESC_CTRL_DPS_MASK;
ret = mgbe_dma_indir_addr_write(osi_core,
MGBE_VDMA_CHX_TX_DESC_CTRL, vdma_chan, value);
if (ret < 0) {
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL,
"MGBE_VDMA_CHX_TX_DESC_CTRL failed\n", 0ULL);
goto done;
}
ret = mgbe_dma_indir_addr_write(osi_core,
MGBE_VDMA_CHX_RX_DESC_CTRL, vdma_chan, value);
if (ret < 0) {
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL,
"MGBE_VDMA_CHX_RX_DESC_CTRL failed\n", 0ULL);
goto done;
}
}
}
done:
return ret;
}
/**
* @brief mgbe_configure_dma - Configure DMA
*
@@ -1854,10 +2011,14 @@ static void mgbe_configure_mac(struct osi_core_priv_data *osi_core)
* @param[in] osi_core: OSI core private data structure.
*
* @note MAC has to be out of reset.
*
* @retval 0 on success
* @retval -1 on failure.
*/
static void mgbe_configure_dma(struct osi_core_priv_data *osi_core)
static nve32_t mgbe_configure_dma(struct osi_core_priv_data *osi_core)
{
nveu32_t value = 0;
nve32_t ret = 0;
/* Set AXI Undefined Burst Length */
value |= MGBE_DMA_SBUS_UNDEF;
@@ -1872,7 +2033,7 @@ static void mgbe_configure_dma(struct osi_core_priv_data *osi_core)
osi_writela(osi_core, value,
(nveu8_t *)osi_core->base + MGBE_DMA_SBUS);
if (osi_core->mac == OSI_MAC_HW_MGBE) {
/* Configure TDPS to 5 */
value = osi_readla(osi_core,
(nveu8_t *)osi_core->base + MGBE_DMA_TX_EDMA_CTRL);
@@ -1886,6 +2047,16 @@ static void mgbe_configure_dma(struct osi_core_priv_data *osi_core)
value |= MGBE_DMA_RX_EDMA_CTRL_RDPS;
osi_writela(osi_core, value,
(nveu8_t *)osi_core->base + MGBE_DMA_RX_EDMA_CTRL);
}
/* configure MGBE PDMA */
if (osi_core->mac == OSI_MAC_HW_MGBE_T26X) {
ret = mgbe_configure_pdma(osi_core);
if (ret < 0) {
goto done;
}
}
done:
return ret;
}
/**
@@ -2042,7 +2213,10 @@ static nve32_t mgbe_core_init(struct osi_core_priv_data *const osi_core)
mgbe_configure_mac(osi_core);
/* configure MGBE DMA */
mgbe_configure_dma(osi_core);
ret = mgbe_configure_dma(osi_core);
if (ret < 0) {
goto fail;
}
/* tsn initialization */
hw_tsn_init(osi_core);
@@ -2053,6 +2227,11 @@ static nve32_t mgbe_core_init(struct osi_core_priv_data *const osi_core)
#endif /* !L3L4_WILDCARD_FILTER */
ret = mgbe_dma_chan_to_vmirq_map(osi_core);
//TBD: debugging reset mmc counters for T264
if (osi_core->pre_sil == OSI_ENABLE) {
//TODO: removed in tot dev-main
//mgbe_reset_mmc(osi_core);
}
fail:
return ret;
}
@@ -2326,31 +2505,31 @@ done:
*
* @param[in] osi_core: OSI core private data structure.
* @param[in] dma_sr: Dma status register read value
* @param[in] qinx: Queue index
* @param[in] chan: DMA channel number
*/
static inline void mgbe_update_dma_sr_stats(struct osi_core_priv_data *osi_core,
nveu32_t dma_sr, nveu32_t qinx)
nveu32_t dma_sr, nveu32_t chan)
{
nveu64_t val;
if ((dma_sr & MGBE_DMA_CHX_STATUS_RBU) == MGBE_DMA_CHX_STATUS_RBU) {
val = osi_core->stats.rx_buf_unavail_irq_n[qinx];
osi_core->stats.rx_buf_unavail_irq_n[qinx] =
val = osi_core->stats.rx_buf_unavail_irq_n[chan];
osi_core->stats.rx_buf_unavail_irq_n[chan] =
osi_update_stats_counter(val, 1U);
}
if ((dma_sr & MGBE_DMA_CHX_STATUS_TPS) == MGBE_DMA_CHX_STATUS_TPS) {
val = osi_core->stats.tx_proc_stopped_irq_n[qinx];
osi_core->stats.tx_proc_stopped_irq_n[qinx] =
val = osi_core->stats.tx_proc_stopped_irq_n[chan];
osi_core->stats.tx_proc_stopped_irq_n[chan] =
osi_update_stats_counter(val, 1U);
}
if ((dma_sr & MGBE_DMA_CHX_STATUS_TBU) == MGBE_DMA_CHX_STATUS_TBU) {
val = osi_core->stats.tx_buf_unavail_irq_n[qinx];
osi_core->stats.tx_buf_unavail_irq_n[qinx] =
val = osi_core->stats.tx_buf_unavail_irq_n[chan];
osi_core->stats.tx_buf_unavail_irq_n[chan] =
osi_update_stats_counter(val, 1U);
}
if ((dma_sr & MGBE_DMA_CHX_STATUS_RPS) == MGBE_DMA_CHX_STATUS_RPS) {
val = osi_core->stats.rx_proc_stopped_irq_n[qinx];
osi_core->stats.rx_proc_stopped_irq_n[qinx] =
val = osi_core->stats.rx_proc_stopped_irq_n[chan];
osi_core->stats.rx_proc_stopped_irq_n[chan] =
osi_update_stats_counter(val, 1U);
}
if ((dma_sr & MGBE_DMA_CHX_STATUS_FBE) == MGBE_DMA_CHX_STATUS_FBE) {
@@ -2990,12 +3169,22 @@ static void mgbe_handle_hsi_intr(struct osi_core_priv_data *osi_core)
nveu32_t val2 = 0;
void *xpcs_base = osi_core->xpcs_base;
nveu64_t ce_count_threshold;
const nveu32_t xpcs_intr_ctrl_reg[OSI_MAX_MAC_IP_TYPES] = {
0,
XPCS_WRAP_INTERRUPT_CONTROL,
T26X_XPCS_WRAP_INTERRUPT_CONTROL
};
const nveu32_t xpcs_intr_sts_reg[OSI_MAX_MAC_IP_TYPES] = {
0,
XPCS_WRAP_INTERRUPT_STATUS,
T26X_XPCS_WRAP_INTERRUPT_STATUS
};
/* Handle HSI wrapper common interrupt */
mgbe_handle_hsi_wrap_common_intr(osi_core);
val = osi_readla(osi_core, (nveu8_t *)osi_core->xpcs_base +
XPCS_WRAP_INTERRUPT_STATUS);
xpcs_intr_sts_reg[osi_core->mac]);
if (((val & XPCS_CORE_UNCORRECTABLE_ERR) == XPCS_CORE_UNCORRECTABLE_ERR) ||
((val & XPCS_REGISTER_PARITY_ERR) == XPCS_REGISTER_PARITY_ERR)) {
osi_core->hsi.err_code[UE_IDX] = OSI_UNCORRECTABLE_ERR;
@@ -3003,11 +3192,11 @@ static void mgbe_handle_hsi_intr(struct osi_core_priv_data *osi_core)
osi_core->hsi.report_count_err[UE_IDX] = OSI_ENABLE;
/* Disable uncorrectable interrupts */
val2 = osi_readla(osi_core, (nveu8_t *)osi_core->xpcs_base +
XPCS_WRAP_INTERRUPT_CONTROL);
xpcs_intr_ctrl_reg[osi_core->mac]);
val2 &= ~XPCS_CORE_UNCORRECTABLE_ERR;
val2 &= ~XPCS_REGISTER_PARITY_ERR;
osi_writela(osi_core, val2, (nveu8_t *)osi_core->xpcs_base +
XPCS_WRAP_INTERRUPT_CONTROL);
xpcs_intr_ctrl_reg[osi_core->mac]);
}
if ((val & XPCS_CORE_CORRECTABLE_ERR) == XPCS_CORE_CORRECTABLE_ERR) {
osi_core->hsi.err_code[CE_IDX] = OSI_CORRECTABLE_ERR;
@@ -3022,7 +3211,7 @@ static void mgbe_handle_hsi_intr(struct osi_core_priv_data *osi_core)
}
osi_writela(osi_core, val, (nveu8_t *)osi_core->xpcs_base +
XPCS_WRAP_INTERRUPT_STATUS);
xpcs_intr_sts_reg[osi_core->mac]);
if (((val & XPCS_CORE_CORRECTABLE_ERR) == XPCS_CORE_CORRECTABLE_ERR) ||
((val & XPCS_CORE_UNCORRECTABLE_ERR) == XPCS_CORE_UNCORRECTABLE_ERR)) {
@@ -3051,8 +3240,9 @@ static void mgbe_handle_hsi_intr(struct osi_core_priv_data *osi_core)
static void mgbe_handle_common_intr(struct osi_core_priv_data *const osi_core)
{
void *base = osi_core->base;
nveu32_t dma_isr = 0;
nveu32_t qinx = 0;
nveu32_t dma_isr_ch0_15 = 0;
nveu32_t dma_isr_ch16_47 = 0;
nveu32_t chan = 0;
nveu32_t i = 0;
nveu32_t dma_sr = 0;
nveu32_t dma_ier = 0;
@@ -3064,28 +3254,33 @@ static void mgbe_handle_common_intr(struct osi_core_priv_data *const osi_core)
mgbe_handle_hsi_intr(osi_core);
}
#endif
dma_isr = osi_readla(osi_core, (nveu8_t *)base + MGBE_DMA_ISR);
if (dma_isr == OSI_NONE) {
dma_isr_ch0_15 = osi_readla(osi_core, (nveu8_t *)base +
MGBE_DMA_ISR_CH0_15);
if (osi_core->mac == OSI_MAC_HW_MGBE_T26X) {
dma_isr_ch16_47 = osi_readla(osi_core, (nveu8_t *)base +
MGBE_DMA_ISR_CH16_47);
}
if ((dma_isr_ch0_15 == OSI_NONE) && (dma_isr_ch16_47 == OSI_NONE)) {
goto done;
}
//FIXME Need to check how we can get the DMA channel here instead of
//MTL Queues
if ((dma_isr & MGBE_DMA_ISR_DCH0_DCH15_MASK) != OSI_NONE) {
if (((dma_isr_ch0_15 & MGBE_DMA_ISR_DCH0_DCH15_MASK) != OSI_NONE) ||
((dma_isr_ch16_47 & MGBE_DMA_ISR_DCH16_DCH47_MASK) != OSI_NONE)) {
/* Handle Non-TI/RI nve32_terrupts */
for (i = 0; i < osi_core->num_mtl_queues; i++) {
qinx = osi_core->mtl_queues[i];
for (i = 0; i < osi_core->num_dma_chans; i++) {
chan = osi_core->dma_chans[i];
if (qinx >= OSI_MGBE_MAX_NUM_CHANS) {
if (chan >= OSI_MGBE_MAX_NUM_CHANS) {
continue;
}
/* read dma channel status register */
dma_sr = osi_readla(osi_core, (nveu8_t *)base +
MGBE_DMA_CHX_STATUS(qinx));
MGBE_DMA_CHX_STATUS(chan));
/* read dma channel nve32_terrupt enable register */
dma_ier = osi_readla(osi_core, (nveu8_t *)base +
MGBE_DMA_CHX_IER(qinx));
MGBE_DMA_CHX_IER(chan));
/* process only those nve32_terrupts which we
* have enabled.
@@ -3101,22 +3296,22 @@ static void mgbe_handle_common_intr(struct osi_core_priv_data *const osi_core)
/* ack non ti/ri nve32_ts */
osi_writela(osi_core, dma_sr, (nveu8_t *)base +
MGBE_DMA_CHX_STATUS(qinx));
MGBE_DMA_CHX_STATUS(chan));
#ifndef OSI_STRIPPED_LIB
mgbe_update_dma_sr_stats(osi_core, dma_sr, qinx);
mgbe_update_dma_sr_stats(osi_core, dma_sr, chan);
#endif /* !OSI_STRIPPED_LIB */
}
}
/* Handle MAC interrupts */
if ((dma_isr & MGBE_DMA_ISR_MACIS) == MGBE_DMA_ISR_MACIS) {
if ((dma_isr_ch0_15 & MGBE_DMA_ISR_MACIS) == MGBE_DMA_ISR_MACIS) {
mgbe_handle_mac_intrs(osi_core);
}
/* Handle MTL inerrupts */
mtl_isr = osi_readla(osi_core,
(nveu8_t *)base + MGBE_MTL_INTR_STATUS);
if ((dma_isr & MGBE_DMA_ISR_MTLIS) == MGBE_DMA_ISR_MTLIS) {
if ((dma_isr_ch0_15 & MGBE_DMA_ISR_MTLIS) == MGBE_DMA_ISR_MTLIS) {
mgbe_handle_mtl_intrs(osi_core, mtl_isr);
}
@@ -3511,6 +3706,18 @@ static void mgbe_get_hw_features(struct osi_core_priv_data *const osi_core,
#ifndef OSI_STRIPPED_LIB
nveu32_t val = 0;
#endif /* !OSI_STRIPPED_LIB */
nveu32_t ret = 0;
if (osi_core->pre_sil == OSI_ENABLE) {
/* TBD: T264 reset to get mac version for MGBE */
osi_writela(osi_core, 0x1U, ((nveu8_t *)osi_core->base + MGBE_DMA_MODE));
ret = hw_poll_for_swr(osi_core);
if (ret < 0) {
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL,
"T264 MGBE Reset failed\n", 0ULL);
goto done;
}
}
mac_hfr0 = osi_readla(osi_core, base + MGBE_MAC_HFR0);
mac_hfr1 = osi_readla(osi_core, base + MGBE_MAC_HFR1);
@@ -3675,6 +3882,8 @@ static void mgbe_get_hw_features(struct osi_core_priv_data *const osi_core,
MGBE_MAC_HFR3_TBSSEL_MASK);
hw_feat->num_tbs_ch = ((mac_hfr3 >> MGBE_MAC_HFR3_TBS_CH_SHIFT) &
MGBE_MAC_HFR3_TBS_CH_MASK);
done:
return;
}
/**

View File

@@ -435,6 +435,12 @@
#endif /* !OSI_STRIPPED_LIB */
/** @} */
#define MGBE_DMA_CHX_TX_CNTRL2_ORRQ_RECOMMENDED 64U
#define MGBE_DMA_CHX_TX_CNTRL2_ORRQ_SHIFT 24U
#define MGBE_DMA_CHX_RX_CNTRL2_OWRQ_SCHAN 32U
#define MGBE_DMA_CHX_RX_CNTRL2_OWRQ_MCHAN 64U
#define MGBE_DMA_CHX_RX_CNTRL2_OWRQ_SHIFT 24U
#define MGBE_DMA_CHX_CTRL_PBL_SHIFT 16U
/**
* @addtogroup MGBE-DMA DMA register offsets
*
@@ -443,9 +449,12 @@
*/
#define MGBE_DMA_MODE 0x3000
#define MGBE_DMA_SBUS 0x3004
#define MGBE_DMA_ISR 0x3008
#define MGBE_DMA_ISR_CH0_15 0x3008
#define MGBE_DMA_TX_EDMA_CTRL 0x3040
#define MGBE_DMA_RX_EDMA_CTRL 0x3044
#define MGBE_DMA_INDIR_CTRL 0x3080
#define MGBE_DMA_INDIR_DATA 0x3084
#define MGBE_DMA_ISR_CH16_47 0x3090
#define MGBE_DMA_CHX_STATUS(x) ((0x0080U * (x)) + 0x3160U)
#define MGBE_DMA_CHX_IER(x) ((0x0080U * (x)) + 0x3138U)
/** @} */
@@ -514,6 +523,7 @@
#define MGBE_ISR_TSIS OSI_BIT(12)
#define MGBE_DMA_ISR_MACIS OSI_BIT(17)
#define MGBE_DMA_ISR_DCH0_DCH15_MASK 0x3FFU
#define MGBE_DMA_ISR_DCH16_DCH47_MASK 0xFFFFU
#define MGBE_DMA_CHX_STATUS_TI OSI_BIT(0)
#define MGBE_DMA_CHX_STATUS_RI OSI_BIT(6)
#define MGBE_MAC_ADDRH_AE OSI_BIT(31)
@@ -557,6 +567,7 @@
#define MGBE_MAC_TMCR_SS_2_5G (OSI_BIT(31) | OSI_BIT(30))
#define MGBE_MAC_TMCR_SS_5G (OSI_BIT(31) | OSI_BIT(29))
#define MGBE_MAC_TMCR_SS_10G (OSI_BIT(31) | OSI_BIT(30) | OSI_BIT(29))
#define MGBE_MAC_TMCR_SS_SPEED_25G OSI_BIT(29)
#define MGBE_MAC_TMCR_TE OSI_BIT(0)
#define MGBE_MAC_RMCR_RE OSI_BIT(0)
#define MGBE_MTL_TXQ_SIZE_SHIFT 16U
@@ -610,6 +621,42 @@
#define MGBE_MAC_EXT_CNF_DDS OSI_BIT(7)
/* TX timestamp */
#define MGBE_MAC_TSS_TXTSC OSI_BIT(15)
/* MGBE DMA IND CTRL register field masks */
#define MGBE_DMA_INDIR_CTRL_MSEL_MASK (OSI_BIT(24) | OSI_BIT(25) | \
OSI_BIT(26) | OSI_BIT(27))
#define MGBE_DMA_INDIR_CTRL_MSEL_SHIFT 24
#define MGBE_DMA_INDIR_CTRL_AOFF_MASK (OSI_BIT(8) | OSI_BIT(9) | \
OSI_BIT(10) | OSI_BIT(11) | \
OSI_BIT(12) | OSI_BIT(13) | \
OSI_BIT(14))
#define MGBE_DMA_INDIR_CTRL_AOFF_SHIFT 8
#define MGBE_DMA_INDIR_CTRL_CT OSI_BIT(1)
#define MGBE_DMA_INDIR_CTRL_OB OSI_BIT(0)
/* MGBE PDMA_CH(#i)_Tx/RxExtCfg register field masks */
#define MGBE_PDMA_CHX_TX_EXTCFG 0U
#define MGBE_PDMA_CHX_RX_EXTCFG 1U
#define MGBE_PDMA_CHX_TXRX_EXTCFG_ORRQ_SHIFT 8
#define MGBE_PDMA_CHX_TXRX_EXTCFG_P2TCMP_SHIFT 16
#define MGBE_PDMA_CHX_TXRX_EXTCFG_P2TCMP_MASK (OSI_BIT(16) | \
OSI_BIT(17) | OSI_BIT(18))
#define MGBE_PDMA_CHX_TXRX_EXTCFG_PBLX8 OSI_BIT(19)
#define MGBE_PDMA_CHX_TXRX_EXTCFG_PBL_MASK (OSI_BIT(24) | OSI_BIT(25) | \
OSI_BIT(26) | OSI_BIT(27) | \
OSI_BIT(28) | OSI_BIT(29))
#define MGBE_PDMA_CHX_TXRX_EXTCFG_PBL_SHIFT 24
#define MGBE_PDMA_CHX_EXTCFG_MAX_PBL 256U
#define MGBE_PDMA_CHX_EXTCFG_MAX_PBL_VAL 0x20000000U
/* MGBE PDMA_CH(#i)_Tx/RxDescCtrl register field masks */
#define MGBE_VDMA_CHX_TX_DESC_CTRL 4U
#define MGBE_VDMA_CHX_RX_DESC_CTRL 5U
#define MGBE_XDMA_CHX_TXRX_DESC_CTRL_DCSZ 3U
#define MGBE_XDMA_CHX_TXRX_DESC_CTRL_DCSZ_MASK (OSI_BIT(0) | OSI_BIT(1) | \
OSI_BIT(2))
#define MGBE_XDMA_CHX_TXRX_DESC_CTRL_DPS 4U
#define MGBE_XDMA_CHX_TXRX_DESC_CTRL_DPS_MASK (OSI_BIT(3) | OSI_BIT(4) | \
OSI_BIT(5))
#define MGBE_XDMA_CHX_TXRX_DESC_CTRL_DPS_SHIFT 3
/** @} */
#ifndef OSI_STRIPPED_LIB

View File

@@ -253,9 +253,11 @@ static nve32_t osi_hal_init_core_ops(struct osi_core_priv_data *const osi_core)
{
struct core_local *l_core = (struct core_local *)(void *)osi_core;
typedef void (*init_core_ops_arr)(struct core_ops *local_ops);
static struct core_ops g_ops[MAX_MAC_IP_TYPES];
init_core_ops_arr i_ops[MAX_MAC_IP_TYPES] = {
eqos_init_core_ops, mgbe_init_core_ops
static struct core_ops g_ops[OSI_MAX_MAC_IP_TYPES];
init_core_ops_arr i_ops[OSI_MAX_MAC_IP_TYPES][2] = {
{ eqos_init_core_ops, OSI_NULL },
{ mgbe_init_core_ops, OSI_NULL },
{ mgbe_init_core_ops, OSI_NULL }
};
nve32_t ret = -1;
@@ -269,13 +271,13 @@ static nve32_t osi_hal_init_core_ops(struct osi_core_priv_data *const osi_core)
goto exit;
}
if (osi_core->mac > OSI_MAC_HW_MGBE) {
if (osi_core->mac > OSI_MAC_HW_MGBE_T26X) {
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID,
"Invalid MAC HW type\n", 0ULL);
goto exit;
}
i_ops[osi_core->mac](&g_ops[osi_core->mac]);
i_ops[osi_core->mac][0](&g_ops[osi_core->mac]);
l_core->ops_p = &g_ops[osi_core->mac];
@@ -883,8 +885,9 @@ static nve32_t l3l4_find_match(const struct core_local *const l_core,
static nve32_t configure_l3l4_filter_valid_params(const struct osi_core_priv_data *const osi_core,
const struct osi_l3_l4_filter *const l3_l4)
{
const nveu32_t max_dma_chan[2] = {
const nveu32_t max_dma_chan[OSI_MAX_MAC_IP_TYPES] = {
OSI_EQOS_MAX_NUM_CHANS,
OSI_MGBE_MAX_NUM_CHANS,
OSI_MGBE_MAX_NUM_CHANS
};
nve32_t ret = -1;
@@ -1114,9 +1117,10 @@ static nve32_t configure_l3l4_filter(struct osi_core_priv_data *const osi_core,
nveu32_t filter_no = 0;
nveu32_t free_filter_no = UINT_MAX;
const struct core_local *l_core = (struct core_local *)(void *)osi_core;
const nveu32_t max_filter_no[2] = {
const nveu32_t max_filter_no[OSI_MAX_MAC_IP_TYPES] = {
EQOS_MAX_L3_L4_FILTER - 1U,
OSI_MGBE_MAX_L3_L4_FILTER - 1U,
OSI_MGBE_MAX_L3_L4_FILTER - 1U,
};
nve32_t ret = -1;

View File

@@ -45,6 +45,11 @@ static inline nve32_t xpcs_poll_for_an_complete(struct osi_core_priv_data *osi_c
nve32_t cond = 1;
nve32_t ret = 0;
if (osi_core->pre_sil == 0x1U) {
//TBD: T264 increase retry for uFPGA
retry = 10000;
}
/* 14. Poll for AN complete */
cond = 1;
count = 0;
@@ -279,27 +284,43 @@ static nve32_t xpcs_uphy_lane_bring_up(struct osi_core_priv_data *osi_core,
nveu32_t val = 0;
nveu32_t count;
nve32_t ret = 0;
const nveu32_t uphy_status_reg[OSI_MAX_MAC_IP_TYPES] = {
0,
XPCS_WRAP_UPHY_STATUS,
T26X_XPCS_WRAP_UPHY_STATUS
};
const nveu32_t uphy_init_ctrl_reg[OSI_MAX_MAC_IP_TYPES] = {
0,
XPCS_WRAP_UPHY_HW_INIT_CTRL,
T26X_XPCS_WRAP_UPHY_HW_INIT_CTRL
};
val = osi_readla(osi_core,
(nveu8_t *)xpcs_base + XPCS_WRAP_UPHY_STATUS);
if ((val & XPCS_WRAP_UPHY_STATUS_TX_P_UP_STATUS) !=
XPCS_WRAP_UPHY_STATUS_TX_P_UP_STATUS) {
(nveu8_t *)xpcs_base + uphy_status_reg[osi_core->mac]);
if ((lane_init_en == XPCS_WRAP_UPHY_HW_INIT_CTRL_TX_EN) &&
((val & XPCS_WRAP_UPHY_STATUS_TX_P_UP_STATUS) ==
XPCS_WRAP_UPHY_STATUS_TX_P_UP_STATUS)) {
goto done;
} else {
val = osi_readla(osi_core,
(nveu8_t *)xpcs_base + XPCS_WRAP_UPHY_HW_INIT_CTRL);
(nveu8_t *)xpcs_base +
uphy_init_ctrl_reg[osi_core->mac]);
val |= lane_init_en;
osi_writela(osi_core, val,
(nveu8_t *)xpcs_base + XPCS_WRAP_UPHY_HW_INIT_CTRL);
(nveu8_t *)xpcs_base +
uphy_init_ctrl_reg[osi_core->mac]);
count = 0;
while (cond == COND_NOT_MET) {
if (count > retry) {
ret = -1;
goto fail;
goto done;
}
count++;
val = osi_readla(osi_core,
(nveu8_t *)xpcs_base + XPCS_WRAP_UPHY_HW_INIT_CTRL);
(nveu8_t *)xpcs_base +
uphy_init_ctrl_reg[osi_core->mac]);
if ((val & lane_init_en) == OSI_NONE) {
/* exit loop */
cond = COND_MET;
@@ -313,7 +334,7 @@ static nve32_t xpcs_uphy_lane_bring_up(struct osi_core_priv_data *osi_core,
}
}
fail:
done:
return ret;
}
@@ -335,6 +356,11 @@ static nve32_t xpcs_check_pcs_lock_status(struct osi_core_priv_data *osi_core)
nveu32_t val = 0;
nveu32_t count;
nve32_t ret = 0;
const nveu32_t uphy_irq_sts_reg[OSI_MAX_MAC_IP_TYPES] = {
0,
XPCS_WRAP_INTERRUPT_STATUS,
T26X_XPCS_WRAP_INTERRUPT_STATUS
};
count = 0;
while (cond == COND_NOT_MET) {
@@ -345,7 +371,8 @@ static nve32_t xpcs_check_pcs_lock_status(struct osi_core_priv_data *osi_core)
count++;
val = osi_readla(osi_core,
(nveu8_t *)xpcs_base + XPCS_WRAP_IRQ_STATUS);
(nveu8_t *)xpcs_base +
uphy_irq_sts_reg[osi_core->mac]);
if ((val & XPCS_WRAP_IRQ_STATUS_PCS_LINK_STS) ==
XPCS_WRAP_IRQ_STATUS_PCS_LINK_STS) {
/* exit loop */
@@ -361,7 +388,8 @@ static nve32_t xpcs_check_pcs_lock_status(struct osi_core_priv_data *osi_core)
}
/* Clear the status */
osi_writela(osi_core, val, (nveu8_t *)xpcs_base + XPCS_WRAP_IRQ_STATUS);
osi_writela(osi_core, val, (nveu8_t *)xpcs_base +
uphy_irq_sts_reg[osi_core->mac]);
fail:
return ret;
}
@@ -394,6 +422,15 @@ static nve32_t xpcs_lane_bring_up(struct osi_core_priv_data *osi_core)
goto fail;
}
if (osi_core->mac == OSI_MAC_HW_MGBE_T26X) {
if (xpcs_uphy_lane_bring_up(osi_core,
XPCS_WRAP_UPHY_HW_INIT_CTRL_RX_EN) < 0) {
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL,
"UPHY RX lane bring-up failed\n", 0ULL);
ret = -1;
goto fail;
}
} else {
if (l_core->lane_powered_up == OSI_ENABLE) {
goto step10;
}
@@ -459,9 +496,7 @@ static nve32_t xpcs_lane_bring_up(struct osi_core_priv_data *osi_core)
ret = -1;
goto fail;
}
count++;
val = osi_readla(osi_core,
(nveu8_t *)osi_core->xpcs_base +
XPCS_WRAP_UPHY_RX_CONTROL_0_0);
@@ -533,6 +568,7 @@ step10:
/* Step16: wait for 30ms */
osi_core->osd_ops.udelay(30000U);
}
if (xpcs_check_pcs_lock_status(osi_core) < 0) {
if (l_core->lane_status == OSI_ENABLE) {
@@ -633,12 +669,15 @@ nve32_t xpcs_init(struct osi_core_priv_data *osi_core)
nveu32_t ctrl = 0;
nve32_t ret = 0;
if (osi_core->pre_sil == 0x1U) {
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL,
"Pre-silicon, skipping lane bring up", 0ULL);
} else {
if (xpcs_lane_bring_up(osi_core) < 0) {
ret = -1;
goto fail;
}
}
/* Switching to USXGMII Mode based on
* XPCS programming guideline 7.6
*/

View File

@@ -43,8 +43,12 @@
#define XPCS_VR_MII_AN_INTR_STS 0x7E0008
#define XPCS_WRAP_UPHY_HW_INIT_CTRL 0x8020
#define XPCS_WRAP_UPHY_STATUS 0x8044
#define XPCS_WRAP_IRQ_STATUS 0x8050
#define XPCS_WRAP_UPHY_RX_CONTROL_0_0 0x801C
#define XPCS_WRAP_INTERRUPT_STATUS 0x8050
#define T26X_XPCS_WRAP_UPHY_HW_INIT_CTRL 0x8034
#define T26X_XPCS_WRAP_UPHY_STATUS 0x8074
#define T26X_XPCS_WRAP_INTERRUPT_STATUS 0x8080
/** @} */
#ifndef OSI_STRIPPED_LIB
@@ -87,6 +91,7 @@
OSI_BIT(10))
#define XPCS_VR_XS_PCS_KR_CTRL_USXG_MODE_5G OSI_BIT(10)
#define XPCS_WRAP_UPHY_HW_INIT_CTRL_TX_EN OSI_BIT(0)
#define XPCS_WRAP_UPHY_HW_INIT_CTRL_RX_EN OSI_BIT(2)
#define XPCS_WRAP_IRQ_STATUS_PCS_LINK_STS OSI_BIT(6)
#define XPCS_WRAP_UPHY_RX_CONTROL_0_0_RX_DATA_EN OSI_BIT(0)
#define XPCS_WRAP_UPHY_RX_CONTROL_0_0_RX_IDDQ OSI_BIT(4)
@@ -98,10 +103,11 @@
#define XPCS_WRAP_UPHY_RX_CONTROL_0_0_RX_PCS_PHY_RDY OSI_BIT(10)
#define XPCS_WRAP_UPHY_RX_CONTROL_0_0_RX_SW_OVRD OSI_BIT(31)
#define XPCS_WRAP_UPHY_STATUS_TX_P_UP_STATUS OSI_BIT(0)
#define XPCS_WRAP_UPHY_STATUS_RX_P_UP_STATUS OSI_BIT(2)
#ifdef HSI_SUPPORT
#define XPCS_WRAP_INTERRUPT_CONTROL 0x8048
#define XPCS_WRAP_INTERRUPT_STATUS 0x8050
#define T26X_XPCS_WRAP_INTERRUPT_CONTROL 0x8078
#define XPCS_CORE_CORRECTABLE_ERR OSI_BIT(10)
#define XPCS_CORE_UNCORRECTABLE_ERR OSI_BIT(9)
#define XPCS_REGISTER_PARITY_ERR OSI_BIT(8)

View File

@@ -145,7 +145,7 @@ static inline void osi_dma_writel(nveu32_t val, void *addr)
* @brief Maximum number of OSI DMA instances.
*/
#ifndef MAX_DMA_INSTANCES
#define MAX_DMA_INSTANCES 10U
#define MAX_DMA_INSTANCES OSI_MGBE_MAX_NUM_CHANS
#endif
/**
@@ -330,8 +330,9 @@ static inline void update_rx_tail_ptr(const struct osi_dma_priv_data *const osi_
nveu64_t tailptr)
{
nveu32_t chan = dma_chan & 0xFU;
const nveu32_t tail_ptr_reg[2] = {
const nveu32_t tail_ptr_reg[OSI_MAX_MAC_IP_TYPES] = {
EQOS_DMA_CHX_RDTP(chan),
MGBE_DMA_CHX_RDTLP(chan),
MGBE_DMA_CHX_RDTLP(chan)
};

View File

@@ -39,6 +39,8 @@
* @brief MGBE DMA Channel register offsets
* @{
*/
#define MGBE_T26X_GLOBAL_DMA_STATUS 0x8800U
#define MGBE_DMA_CHX_TX_CTRL(x) ((0x0080U * (x)) + 0x3104U)
#define MGBE_DMA_CHX_RX_CTRL(x) ((0x0080U * (x)) + 0x3108U)
#ifndef OSI_STRIPPED_LIB
@@ -85,6 +87,11 @@
#define MGBE_DMA_CHX_RX_CNTRL2_OWRQ_MCHAN 64U
#define MGBE_DMA_CHX_RX_CNTRL2_OWRQ_SHIFT 24U
#define MGBE_DMA_CHX_CTRL_PBL_SHIFT 16U
/* MGBE VDMA to TC mask */
#define MGBE_TX_VDMA_TC_MASK (OSI_BIT(4) | OSI_BIT(5) | OSI_BIT(6))
#define MGBE_TX_VDMA_TC_SHIFT 4
#define MGBE_RX_VDMA_TC_MASK (OSI_BIT(28) | OSI_BIT(29) | OSI_BIT(30))
#define MGBE_RX_VDMA_TC_SHIFT 28
/** @} */
/**

View File

@@ -290,8 +290,16 @@ static nve32_t validate_func_ptrs(struct osi_dma_priv_data *osi_dma,
static nve32_t validate_ring_sz(const struct osi_dma_priv_data *osi_dma)
{
const nveu32_t default_rz[] = { EQOS_DEFAULT_RING_SZ, MGBE_DEFAULT_RING_SZ };
const nveu32_t max_rz[] = { EQOS_DEFAULT_RING_SZ, MGBE_MAX_RING_SZ };
const nveu32_t default_rz[OSI_MAX_MAC_IP_TYPES] = {
EQOS_DEFAULT_RING_SZ,
MGBE_DEFAULT_RING_SZ,
MGBE_DEFAULT_RING_SZ
};
const nveu32_t max_rz[OSI_MAX_MAC_IP_TYPES] = {
EQOS_DEFAULT_RING_SZ,
MGBE_MAX_RING_SZ,
MGBE_MAX_RING_SZ
};
nve32_t ret = 0;
if ((osi_dma->tx_ring_sz == 0U) ||
@@ -347,6 +355,12 @@ static nve32_t validate_dma_ops_params(struct osi_dma_priv_data *osi_dma)
ret = -1;
goto fail;
}
if (osi_dma->mac > OSI_MAC_HW_MGBE_T26X) {
OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID,
"DMA: Invalid MAC HW type\n", 0ULL);
ret = -1;
goto fail;
}
if ((l_dma->magic_num != (nveu64_t)osi_dma) ||
(l_dma->init_done == OSI_ENABLE)) {
@@ -374,11 +388,12 @@ fail:
nve32_t osi_init_dma_ops(struct osi_dma_priv_data *osi_dma)
{
struct dma_local *l_dma = (struct dma_local *)(void *)osi_dma;
static struct dma_chan_ops dma_gops[MAX_MAC_IP_TYPES];
static struct dma_chan_ops dma_gops[OSI_MAX_MAC_IP_TYPES];
#ifndef OSI_STRIPPED_LIB
typedef void (*init_ops_arr)(struct dma_chan_ops *temp);
const init_ops_arr i_ops[MAX_MAC_IP_TYPES] = {
eqos_init_dma_chan_ops, mgbe_init_dma_chan_ops
const init_ops_arr i_ops[OSI_MAX_MAC_IP_TYPES] = {
eqos_init_dma_chan_ops, mgbe_init_dma_chan_ops,
mgbe_init_dma_chan_ops
};
#endif
nve32_t ret = 0;
@@ -416,15 +431,54 @@ fail:
return ret;
}
static nve32_t vdma_to_pdma_map(const struct osi_dma_priv_data *const osi_dma,
nveu32_t vdma_chan, nveu32_t *const pdma_chan)
{
nve32_t ret = -1;
nveu32_t i, j;
nveu32_t vchan, pchan;
nveu32_t found = 0U;
if (pdma_chan == OSI_NULL) {
OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID,
"pdma_chan is NULL\n", 0ULL);
goto done;
}
for (i = 0 ; i < osi_dma->num_of_pdma; i++) {
pchan = osi_dma->pdma_data[i].pdma_chan;
for (j = 0 ; j < osi_dma->pdma_data[i].num_vdma_chans; j++) {
vchan = osi_dma->pdma_data[i].vdma_chans[j];
if (vchan == vdma_chan) {
*pdma_chan = pchan;
ret = 0;
found = 1U;
break;
}
}
if (found == 1U) {
break;
}
}
if (found == 0U) {
OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_HW_FAIL,
"vdma mapped to pdma not found, vdma", vdma_chan);
}
done:
return ret;
}
static inline void start_dma(const struct osi_dma_priv_data *const osi_dma, nveu32_t dma_chan)
{
nveu32_t chan = dma_chan & 0xFU;
const nveu32_t tx_dma_reg[2] = {
const nveu32_t tx_dma_reg[OSI_MAX_MAC_IP_TYPES] = {
EQOS_DMA_CHX_TX_CTRL(chan),
MGBE_DMA_CHX_TX_CTRL(chan),
MGBE_DMA_CHX_TX_CTRL(chan)
};
const nveu32_t rx_dma_reg[2] = {
const nveu32_t rx_dma_reg[OSI_MAX_MAC_IP_TYPES] = {
EQOS_DMA_CHX_RX_CTRL(chan),
MGBE_DMA_CHX_RX_CTRL(chan),
MGBE_DMA_CHX_RX_CTRL(chan)
};
nveu32_t val;
@@ -444,26 +498,32 @@ static inline void start_dma(const struct osi_dma_priv_data *const osi_dma, nveu
static nve32_t init_dma_channel(const struct osi_dma_priv_data *const osi_dma,
nveu32_t dma_chan)
{
nveu32_t pdma_chan = 0xFFU;
nveu32_t chan = dma_chan & 0xFU;
nveu32_t riwt = osi_dma->rx_riwt & 0xFFFU;
const nveu32_t intr_en_reg[2] = {
const nveu32_t intr_en_reg[OSI_MAX_MAC_IP_TYPES] = {
EQOS_DMA_CHX_INTR_ENA(chan),
MGBE_DMA_CHX_INTR_ENA(chan),
MGBE_DMA_CHX_INTR_ENA(chan)
};
const nveu32_t chx_ctrl_reg[2] = {
const nveu32_t chx_ctrl_reg[OSI_MAX_MAC_IP_TYPES] = {
EQOS_DMA_CHX_CTRL(chan),
MGBE_DMA_CHX_CTRL(chan),
MGBE_DMA_CHX_CTRL(chan)
};
const nveu32_t tx_ctrl_reg[2] = {
const nveu32_t tx_ctrl_reg[OSI_MAX_MAC_IP_TYPES] = {
EQOS_DMA_CHX_TX_CTRL(chan),
MGBE_DMA_CHX_TX_CTRL(chan)
MGBE_DMA_CHX_TX_CTRL(chan),
MGBE_DMA_CHX_TX_CTRL(chan),
};
const nveu32_t rx_ctrl_reg[2] = {
const nveu32_t rx_ctrl_reg[OSI_MAX_MAC_IP_TYPES] = {
EQOS_DMA_CHX_RX_CTRL(chan),
MGBE_DMA_CHX_RX_CTRL(chan),
MGBE_DMA_CHX_RX_CTRL(chan)
};
const nveu32_t rx_wdt_reg[2] = {
const nveu32_t rx_wdt_reg[OSI_MAX_MAC_IP_TYPES] = {
EQOS_DMA_CHX_RX_WDT(chan),
MGBE_DMA_CHX_RX_WDT(chan),
MGBE_DMA_CHX_RX_WDT(chan)
};
nveu32_t tx_pbl[2] = {
@@ -474,21 +534,31 @@ static nve32_t init_dma_channel(const struct osi_dma_priv_data *const osi_dma,
EQOS_DMA_CHX_RX_CTRL_RXPBL_RECOMMENDED,
((MGBE_RXQ_SIZE / osi_dma->num_dma_chans) / 2U)
};
const nveu32_t rwt_val[2] = {
const nveu32_t rwt_val[OSI_MAX_MAC_IP_TYPES] = {
(((riwt * (EQOS_AXI_CLK_FREQ / OSI_ONE_MEGA_HZ)) /
EQOS_DMA_CHX_RX_WDT_RWTU) & EQOS_DMA_CHX_RX_WDT_RWT_MASK),
(((riwt * ((nveu32_t)MGBE_AXI_CLK_FREQ / OSI_ONE_MEGA_HZ)) /
MGBE_DMA_CHX_RX_WDT_RWTU) & MGBE_DMA_CHX_RX_WDT_RWT_MASK),
(((riwt * ((nveu32_t)MGBE_AXI_CLK_FREQ / OSI_ONE_MEGA_HZ)) /
MGBE_DMA_CHX_RX_WDT_RWTU) & MGBE_DMA_CHX_RX_WDT_RWT_MASK)
};
const nveu32_t rwtu_val[2] = {
const nveu32_t rwtu_val[OSI_MAX_MAC_IP_TYPES] = {
EQOS_DMA_CHX_RX_WDT_RWTU_512_CYCLE,
MGBE_DMA_CHX_RX_WDT_RWTU_2048_CYCLE,
MGBE_DMA_CHX_RX_WDT_RWTU_2048_CYCLE
};
const nveu32_t rwtu_mask[2] = {
const nveu32_t rwtu_mask[OSI_MAX_MAC_IP_TYPES] = {
EQOS_DMA_CHX_RX_WDT_RWTU_MASK,
MGBE_DMA_CHX_RX_WDT_RWTU_MASK,
MGBE_DMA_CHX_RX_WDT_RWTU_MASK
};
const nveu32_t osp_tse[OSI_MAX_MAC_IP_TYPES] = {
(DMA_CHX_TX_CTRL_OSP | DMA_CHX_TX_CTRL_TSE),
(DMA_CHX_TX_CTRL_OSP | DMA_CHX_TX_CTRL_TSE),
DMA_CHX_TX_CTRL_TSE
};
const nveu32_t owrq = (MGBE_DMA_CHX_RX_CNTRL2_OWRQ_MCHAN / osi_dma->num_dma_chans);
//TBD: owrq_arr add more entries for T264?
const nveu32_t owrq_arr[OSI_MGBE_MAX_NUM_CHANS] = {
MGBE_DMA_CHX_RX_CNTRL2_OWRQ_SCHAN, owrq, owrq, owrq,
owrq, owrq, owrq, owrq, owrq, owrq
@@ -518,18 +588,31 @@ static nve32_t init_dma_channel(const struct osi_dma_priv_data *const osi_dma,
val |= (DMA_CHX_INTR_TIE | DMA_CHX_INTR_RIE);
osi_dma_writel(val, (nveu8_t *)osi_dma->base + intr_en_reg[osi_dma->mac]);
if ((osi_dma->mac == OSI_MAC_HW_MGBE) ||
(osi_dma->mac == OSI_MAC_HW_EQOS)) {
/* Enable PBLx8 */
val = osi_dma_readl((nveu8_t *)osi_dma->base + chx_ctrl_reg[osi_dma->mac]);
val = osi_dma_readl((nveu8_t *)osi_dma->base +
chx_ctrl_reg[osi_dma->mac]);
val |= DMA_CHX_CTRL_PBLX8;
osi_dma_writel(val, (nveu8_t *)osi_dma->base + chx_ctrl_reg[osi_dma->mac]);
osi_dma_writel(val, (nveu8_t *)osi_dma->base +
chx_ctrl_reg[osi_dma->mac]);
}
if (osi_dma->mac == OSI_MAC_HW_MGBE_T26X) {
/* Find VDMA to PDMA mapping */
ret = vdma_to_pdma_map(osi_dma, dma_chan, &pdma_chan);
if (ret != 0) {
ret = -1;
goto exit_func;
}
}
/* Program OSP, TSO enable and TXPBL */
val = osi_dma_readl((nveu8_t *)osi_dma->base + tx_ctrl_reg[osi_dma->mac]);
val |= osp_tse[osi_dma->mac];
val |= (DMA_CHX_TX_CTRL_OSP | DMA_CHX_TX_CTRL_TSE);
if (osi_dma->mac == OSI_MAC_HW_EQOS) {
val |= tx_pbl[osi_dma->mac];
} else {
} else if (osi_dma->mac == OSI_MAC_HW_MGBE) {
/*
* Formula for TxPBL calculation is
* (TxPBL) < ((TXQSize - MTU)/(DATAWIDTH/8)) - 5
@@ -540,8 +623,16 @@ static nve32_t init_dma_channel(const struct osi_dma_priv_data *const osi_dma,
if (tx_pbl[osi_dma->mac] >= MGBE_DMA_CHX_MAX_PBL) {
val |= MGBE_DMA_CHX_MAX_PBL_VAL;
} else {
val |= ((tx_pbl[osi_dma->mac] / 8U) << MGBE_DMA_CHX_CTRL_PBL_SHIFT);
val |= ((tx_pbl[osi_dma->mac] / 8U) <<
MGBE_DMA_CHX_CTRL_PBL_SHIFT);
}
} else if (osi_dma->mac == OSI_MAC_HW_MGBE_T26X) {
/* Map Tx VDMA's to TC. TC and PDMA mapped 1 to 1 */
val &= ~MGBE_TX_VDMA_TC_MASK;
val |= (pdma_chan << MGBE_TX_VDMA_TC_SHIFT) &
MGBE_TX_VDMA_TC_MASK;
} else {
/* do nothing */
}
osi_dma_writel(val, (nveu8_t *)osi_dma->base + tx_ctrl_reg[osi_dma->mac]);
@@ -556,35 +647,57 @@ static nve32_t init_dma_channel(const struct osi_dma_priv_data *const osi_dma,
val |= ((osi_dma->rx_buf_len - 30U) << DMA_CHX_RBSZ_SHIFT);
if (osi_dma->mac == OSI_MAC_HW_EQOS) {
val |= rx_pbl[osi_dma->mac];
} else {
} else if (osi_dma->mac == OSI_MAC_HW_MGBE){
if (rx_pbl[osi_dma->mac] >= MGBE_DMA_CHX_MAX_PBL) {
val |= MGBE_DMA_CHX_MAX_PBL_VAL;
} else {
val |= ((rx_pbl[osi_dma->mac] / 8U) <<
MGBE_DMA_CHX_CTRL_PBL_SHIFT);
}
} else if (osi_dma->mac == OSI_MAC_HW_MGBE_T26X) {
/* Map Rx VDMA's to TC. TC and PDMA mapped 1 to 1 */
val &= ~MGBE_RX_VDMA_TC_MASK;
val |= (pdma_chan << MGBE_RX_VDMA_TC_SHIFT) &
MGBE_RX_VDMA_TC_MASK;
} else {
/* do nothing */
}
osi_dma_writel(val, (nveu8_t *)osi_dma->base + rx_ctrl_reg[osi_dma->mac]);
if ((osi_dma->use_riwt == OSI_ENABLE) &&
(osi_dma->rx_riwt < UINT_MAX)) {
val = osi_dma_readl((nveu8_t *)osi_dma->base + rx_wdt_reg[osi_dma->mac]);
val = osi_dma_readl((nveu8_t *)osi_dma->base +
rx_wdt_reg[osi_dma->mac]);
val &= ~DMA_CHX_RX_WDT_RWT_MASK;
val |= rwt_val[osi_dma->mac];
osi_dma_writel(val, (nveu8_t *)osi_dma->base + rx_wdt_reg[osi_dma->mac]);
osi_dma_writel(val, (nveu8_t *)osi_dma->base +
rx_wdt_reg[osi_dma->mac]);
val = osi_dma_readl((nveu8_t *)osi_dma->base + rx_wdt_reg[osi_dma->mac]);
val = osi_dma_readl((nveu8_t *)osi_dma->base +
rx_wdt_reg[osi_dma->mac]);
val &= ~rwtu_mask[osi_dma->mac];
val |= rwtu_val[osi_dma->mac];
osi_dma_writel(val, (nveu8_t *)osi_dma->base + rx_wdt_reg[osi_dma->mac]);
osi_dma_writel(val, (nveu8_t *)osi_dma->base +
rx_wdt_reg[osi_dma->mac]);
}
if (osi_dma->mac == OSI_MAC_HW_MGBE) {
/* Update ORRQ in DMA_CH(#i)_Tx_Control2 register */
val = osi_dma_readl((nveu8_t *)osi_dma->base + MGBE_DMA_CHX_TX_CNTRL2(chan));
val |= (((MGBE_DMA_CHX_TX_CNTRL2_ORRQ_RECOMMENDED / osi_dma->num_dma_chans)) <<
val = osi_dma_readl((nveu8_t *)osi_dma->base +
MGBE_DMA_CHX_TX_CNTRL2(chan));
val |= (((MGBE_DMA_CHX_TX_CNTRL2_ORRQ_RECOMMENDED /
osi_dma->num_dma_chans)) <<
MGBE_DMA_CHX_TX_CNTRL2_ORRQ_SHIFT);
osi_dma_writel(val, (nveu8_t *)osi_dma->base + MGBE_DMA_CHX_TX_CNTRL2(chan));
osi_dma_writel(val, (nveu8_t *)osi_dma->base +
MGBE_DMA_CHX_TX_CNTRL2(chan));
/* Update OWRQ in DMA_CH(#i)_Rx_Control2 register */
val = osi_dma_readl((nveu8_t *)osi_dma->base + MGBE_DMA_CHX_RX_CNTRL2(chan));
val |= (owrq_arr[osi_dma->num_dma_chans - 1U] << MGBE_DMA_CHX_RX_CNTRL2_OWRQ_SHIFT);
osi_dma_writel(val, (nveu8_t *)osi_dma->base + MGBE_DMA_CHX_RX_CNTRL2(chan));
val = osi_dma_readl((nveu8_t *)osi_dma->base +
MGBE_DMA_CHX_RX_CNTRL2(chan));
val |= (owrq_arr[osi_dma->num_dma_chans - 1U] <<
MGBE_DMA_CHX_RX_CNTRL2_OWRQ_SHIFT);
osi_dma_writel(val, (nveu8_t *)osi_dma->base +
MGBE_DMA_CHX_RX_CNTRL2(chan));
}
/* success */
@@ -612,7 +725,7 @@ static nve32_t init_dma(const struct osi_dma_priv_data *osi_dma, nveu32_t channe
ret = intr_fn[OSI_DMA_INTR_ENABLE](osi_dma, VIRT_INTR_CHX_CNTRL(chan),
VIRT_INTR_CHX_STATUS(chan),
((osi_dma->mac == OSI_MAC_HW_MGBE) ?
((osi_dma->mac > OSI_MAC_HW_EQOS) ?
MGBE_DMA_CHX_STATUS(chan) : EQOS_DMA_CHX_STATUS(chan)),
OSI_BIT(OSI_DMA_CH_TX_INTR));
if (ret < 0) {
@@ -623,7 +736,7 @@ static nve32_t init_dma(const struct osi_dma_priv_data *osi_dma, nveu32_t channe
ret = intr_fn[OSI_DMA_INTR_ENABLE](osi_dma, VIRT_INTR_CHX_CNTRL(chan),
VIRT_INTR_CHX_STATUS(chan),
((osi_dma->mac == OSI_MAC_HW_MGBE) ?
((osi_dma->mac > OSI_MAC_HW_EQOS) ?
MGBE_DMA_CHX_STATUS(chan) : EQOS_DMA_CHX_STATUS(chan)),
OSI_BIT(OSI_DMA_CH_RX_INTR));
if (ret < 0) {
@@ -713,12 +826,14 @@ static inline void stop_dma(const struct osi_dma_priv_data *const osi_dma,
nveu32_t dma_chan)
{
nveu32_t chan = dma_chan & 0xFU;
const nveu32_t dma_tx_reg[2] = {
const nveu32_t dma_tx_reg[OSI_MAX_MAC_IP_TYPES] = {
EQOS_DMA_CHX_TX_CTRL(chan),
MGBE_DMA_CHX_TX_CTRL(chan),
MGBE_DMA_CHX_TX_CTRL(chan)
};
const nveu32_t dma_rx_reg[2] = {
const nveu32_t dma_rx_reg[OSI_MAX_MAC_IP_TYPES] = {
EQOS_DMA_CHX_RX_CTRL(chan),
MGBE_DMA_CHX_RX_CTRL(chan),
MGBE_DMA_CHX_RX_CTRL(chan)
};
nveu32_t val;
@@ -777,20 +892,34 @@ fail:
#ifdef OSI_CL_FTRACE
nveu32_t osi_get_global_dma_status_cnt = 0;
#endif /* OSI_CL_FTRACE */
nveu32_t osi_get_global_dma_status(struct osi_dma_priv_data *osi_dma)
nve32_t osi_get_global_dma_status(struct osi_dma_priv_data *osi_dma,
nveu32_t *const dma_status)
{
const nveu32_t global_dma_status_reg_cnt[OSI_MAX_MAC_IP_TYPES] = {1, 1, 3};
struct dma_local *l_dma = (struct dma_local *)(void *)osi_dma;
nveu32_t ret = 0U;
const nveu32_t global_dma_status_reg[OSI_MAX_MAC_IP_TYPES] = {
HW_GLOBAL_DMA_STATUS,
HW_GLOBAL_DMA_STATUS,
MGBE_T26X_GLOBAL_DMA_STATUS,
};
nve32_t ret = 0;
nveu32_t i;
#ifdef OSI_CL_FTRACE
if ((osi_get_global_dma_status_cnt % 1000) == 0)
slogf(0, 2, "%s : Function Entry\n", __func__);
#endif /* OSI_CL_FTRACE */
if (dma_validate_args(osi_dma, l_dma) < 0) {
if ((dma_validate_args(osi_dma, l_dma) < 0) || (dma_status == OSI_NULL)) {
ret = -1;
goto fail;
}
ret = osi_dma_readl((nveu8_t *)osi_dma->base + HW_GLOBAL_DMA_STATUS);
for (i = 0U; i < global_dma_status_reg_cnt[osi_dma->mac]; i++) {
if (i < UINT_MAX) {
dma_status[i] = osi_dma_readl((nveu8_t *)osi_dma->base +
(global_dma_status_reg[osi_dma->mac] + (i * 4U)));
}
}
fail:
#ifdef OSI_CL_FTRACE
if ((osi_get_global_dma_status_cnt++ % 1000) == 0)
@@ -831,7 +960,7 @@ nve32_t osi_handle_dma_intr(struct osi_dma_priv_data *osi_dma,
}
ret = intr_fn[en_dis](osi_dma, VIRT_INTR_CHX_CNTRL(chan),
VIRT_INTR_CHX_STATUS(chan), ((osi_dma->mac == OSI_MAC_HW_MGBE) ?
VIRT_INTR_CHX_STATUS(chan), ((osi_dma->mac > OSI_MAC_HW_EQOS) ?
MGBE_DMA_CHX_STATUS(chan) : EQOS_DMA_CHX_STATUS(chan)),
OSI_BIT(tx_rx));

View File

@@ -32,7 +32,7 @@
#endif /* OSI_DEBUG */
/** DMA descriptor operations */
static struct desc_ops d_ops[MAX_MAC_IP_TYPES];
static struct desc_ops d_ops[OSI_MAX_MAC_IP_TYPES];
#if defined OSI_DEBUG && !defined OSI_STRIPPED_LIB
static inline void dump_rx_descriptors(struct osi_dma_priv_data *osi_dma,
@@ -1056,7 +1056,7 @@ static inline void set_swcx_pkt_id_for_ptp(struct osi_dma_priv_data *osi_dma,
nveu32_t pkt_id)
{
if (((tx_pkt_cx->flags & OSI_PKT_CX_PTP) == OSI_PKT_CX_PTP) &&
(osi_dma->mac == OSI_MAC_HW_MGBE)) {
(osi_dma->mac > OSI_MAC_HW_EQOS)) {
last_swcx->flags |= OSI_PKT_CX_PTP;
last_swcx->pktid = pkt_id;
}
@@ -1085,8 +1085,9 @@ nve32_t hw_transmit(struct osi_dma_priv_data *osi_dma,
nveu32_t f_idx = tx_ring->cur_tx_idx;
#endif /* OSI_DEBUG */
nveu32_t chan = dma_chan & 0xFU;
const nveu32_t tail_ptr_reg[2] = {
const nveu32_t tail_ptr_reg[OSI_MAX_MAC_IP_TYPES] = {
EQOS_DMA_CHX_TDTP(chan),
MGBE_DMA_CHX_TDTLP(chan),
MGBE_DMA_CHX_TDTLP(chan)
};
nve32_t cntx_desc_consumed;
@@ -1131,7 +1132,7 @@ nve32_t hw_transmit(struct osi_dma_priv_data *osi_dma,
osi_dma->ptp_flag, osi_dma->mac);
if (cntx_desc_consumed == 1) {
if (((tx_pkt_cx->flags & OSI_PKT_CX_PTP) == OSI_PKT_CX_PTP) &&
(osi_dma->mac == OSI_MAC_HW_MGBE)) {
(osi_dma->mac > OSI_MAC_HW_EQOS)) {
/* mark packet id valid */
tx_desc->tdes3 |= TDES3_PIDV;
if ((osi_dma->ptp_flag & OSI_PTP_SYNC_ONESTEP) ==
@@ -1254,19 +1255,22 @@ static nve32_t rx_dma_desc_initialization(const struct osi_dma_priv_data *const
nveu32_t dma_chan)
{
nveu32_t chan = dma_chan & 0xFU;
const nveu32_t start_addr_high_reg[2] = {
const nveu32_t start_addr_high_reg[OSI_MAX_MAC_IP_TYPES] = {
EQOS_DMA_CHX_RDLH(chan),
MGBE_DMA_CHX_RDLH(chan),
MGBE_DMA_CHX_RDLH(chan)
};
const nveu32_t start_addr_low_reg[2] = {
const nveu32_t start_addr_low_reg[OSI_MAX_MAC_IP_TYPES] = {
EQOS_DMA_CHX_RDLA(chan),
MGBE_DMA_CHX_RDLA(chan),
MGBE_DMA_CHX_RDLA(chan)
};
const nveu32_t ring_len_reg[2] = {
const nveu32_t ring_len_reg[OSI_MAX_MAC_IP_TYPES] = {
EQOS_DMA_CHX_RDRL(chan),
MGBE_DMA_CHX_RX_CNTRL2(chan),
MGBE_DMA_CHX_RX_CNTRL2(chan)
};
const nveu32_t mask[2] = { 0x3FFU, 0x3FFFU };
const nveu32_t mask[OSI_MAX_MAC_IP_TYPES] = { 0x3FFU, 0x3FFFU, 0x3FFFU };
struct osi_rx_ring *rx_ring = OSI_NULL;
struct osi_rx_desc *rx_desc = OSI_NULL;
struct osi_rx_swcx *rx_swcx = OSI_NULL;
@@ -1396,19 +1400,22 @@ static inline void set_tx_ring_len_and_start_addr(const struct osi_dma_priv_data
nveu32_t len)
{
nveu32_t chan = dma_chan & 0xFU;
const nveu32_t ring_len_reg[2] = {
const nveu32_t ring_len_reg[OSI_MAX_MAC_IP_TYPES] = {
EQOS_DMA_CHX_TDRL(chan),
MGBE_DMA_CHX_TX_CNTRL2(chan),
MGBE_DMA_CHX_TX_CNTRL2(chan)
};
const nveu32_t start_addr_high_reg[2] = {
const nveu32_t start_addr_high_reg[OSI_MAX_MAC_IP_TYPES] = {
EQOS_DMA_CHX_TDLH(chan),
MGBE_DMA_CHX_TDLH(chan),
MGBE_DMA_CHX_TDLH(chan)
};
const nveu32_t start_addr_low_reg[2] = {
const nveu32_t start_addr_low_reg[OSI_MAX_MAC_IP_TYPES] = {
EQOS_DMA_CHX_TDLA(chan),
MGBE_DMA_CHX_TDLA(chan),
MGBE_DMA_CHX_TDLA(chan)
};
const nveu32_t mask[2] = { 0x3FFU, 0x3FFFU };
const nveu32_t mask[OSI_MAX_MAC_IP_TYPES] = { 0x3FFU, 0x3FFFU, 0x3FFFU };
nveu32_t val;
/* Program ring length */
@@ -1516,8 +1523,8 @@ void init_desc_ops(const struct osi_dma_priv_data *const osi_dma)
{
typedef void (*desc_ops_arr)(struct desc_ops *p_ops);
const desc_ops_arr desc_ops_a[2] = {
eqos_init_desc_ops, mgbe_init_desc_ops
const desc_ops_arr desc_ops_a[OSI_MAX_MAC_IP_TYPES] = {
eqos_init_desc_ops, mgbe_init_desc_ops, mgbe_init_desc_ops
};
desc_ops_a[osi_dma->mac](&d_ops[osi_dma->mac]);