nvethernetrm: add support for MGBE initialization

Adds MAC CORE and DMA initialization support for
MGBE MAC Controller.

Bug 200548572

Change-Id: I6796229852b47ff0748a848a6dbe9addab6ab74f
Signed-off-by: Bhadram Varka <vbhadram@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/kernel/nvethernetrm/+/2250401
This commit is contained in:
Bhadram Varka
2019-09-16 13:53:44 +05:30
parent 2f4dfb0594
commit 731abcdc1e
17 changed files with 1803 additions and 58 deletions

View File

@@ -534,25 +534,25 @@ struct osi_mmc_counters {
*/ */
struct osi_xtra_stat_counters { struct osi_xtra_stat_counters {
/** RX buffer unavailable irq count */ /** RX buffer unavailable irq count */
nveu64_t rx_buf_unavail_irq_n[OSI_EQOS_MAX_NUM_QUEUES]; nveu64_t rx_buf_unavail_irq_n[OSI_MGBE_MAX_NUM_QUEUES];
/** Transmit Process Stopped irq count */ /** Transmit Process Stopped irq count */
nveu64_t tx_proc_stopped_irq_n[OSI_EQOS_MAX_NUM_QUEUES]; nveu64_t tx_proc_stopped_irq_n[OSI_MGBE_MAX_NUM_QUEUES];
/** Transmit Buffer Unavailable irq count */ /** Transmit Buffer Unavailable irq count */
nveu64_t tx_buf_unavail_irq_n[OSI_EQOS_MAX_NUM_QUEUES]; nveu64_t tx_buf_unavail_irq_n[OSI_MGBE_MAX_NUM_QUEUES];
/** Receive Process Stopped irq count */ /** Receive Process Stopped irq count */
nveu64_t rx_proc_stopped_irq_n[OSI_EQOS_MAX_NUM_QUEUES]; nveu64_t rx_proc_stopped_irq_n[OSI_MGBE_MAX_NUM_QUEUES];
/** Receive Watchdog Timeout irq count */ /** Receive Watchdog Timeout irq count */
nveu64_t rx_watchdog_irq_n; nveu64_t rx_watchdog_irq_n;
/** Fatal Bus Error irq count */ /** Fatal Bus Error irq count */
nveu64_t fatal_bus_error_irq_n; nveu64_t fatal_bus_error_irq_n;
/** rx skb allocation failure count */ /** rx skb allocation failure count */
nveu64_t re_alloc_rxbuf_failed[OSI_EQOS_MAX_NUM_QUEUES]; nveu64_t re_alloc_rxbuf_failed[OSI_MGBE_MAX_NUM_QUEUES];
/** TX per channel interrupt count */ /** TX per channel interrupt count */
nveu64_t tx_normal_irq_n[OSI_EQOS_MAX_NUM_QUEUES]; nveu64_t tx_normal_irq_n[OSI_MGBE_MAX_NUM_QUEUES];
/** TX per channel SW timer callback count */ /** TX per channel SW timer callback count */
nveu64_t tx_usecs_swtimer_n[OSI_EQOS_MAX_NUM_QUEUES]; nveu64_t tx_usecs_swtimer_n[OSI_MGBE_MAX_NUM_QUEUES];
/** RX per channel interrupt count */ /** RX per channel interrupt count */
nveu64_t rx_normal_irq_n[OSI_EQOS_MAX_NUM_QUEUES]; nveu64_t rx_normal_irq_n[OSI_MGBE_MAX_NUM_QUEUES];
/** link connect count */ /** link connect count */
nveu64_t link_connect_count; nveu64_t link_connect_count;
/** link disconnect count */ /** link disconnect count */

View File

@@ -142,15 +142,25 @@
#define OSI_MAX_MTU_SIZE 16383U #define OSI_MAX_MTU_SIZE 16383U
#define EQOS_DMA_CHX_STATUS(x) ((0x0080U * (x)) + 0x1160U) #define EQOS_DMA_CHX_STATUS(x) ((0x0080U * (x)) + 0x1160U)
#define MGBE_DMA_CHX_STATUS(x) ((0x0080U * (x)) + 0x3160U)
#define EQOS_DMA_CHX_IER(x) ((0x0080U * (x)) + 0x1134U)
/* FIXME add logic based on HW version */ /* FIXME add logic based on HW version */
#define OSI_EQOS_MAX_NUM_CHANS 4U #define OSI_EQOS_MAX_NUM_CHANS 4U
#define OSI_EQOS_MAX_NUM_QUEUES 4U #define OSI_EQOS_MAX_NUM_QUEUES 4U
#define OSI_MGBE_MAX_NUM_CHANS 10U
#define OSI_MGBE_MAX_NUM_QUEUES 10U
/* HW supports 8 Hash table regs, but eqos_validate_core_regs only checks 4 */
#define OSI_EQOS_MAX_HASH_REGS 4U
#define MAC_VERSION 0x110 #define MAC_VERSION 0x110
#define MAC_VERSION_SNVER_MASK 0x7FU #define MAC_VERSION_SNVER_MASK 0x7FU
#define OSI_MAC_HW_EQOS 0U #define OSI_MAC_HW_EQOS 0U
#define OSI_MAC_HW_MGBE 1U
#define OSI_ETH_ALEN 6U
#define OSI_MAX_VM_IRQS 5U
#define OSI_NULL ((void *)0) #define OSI_NULL ((void *)0)
#define OSI_ENABLE 1U #define OSI_ENABLE 1U
@@ -161,8 +171,28 @@
#define OSI_EQOS_MAC_4_10 0x41U #define OSI_EQOS_MAC_4_10 0x41U
#define OSI_EQOS_MAC_5_00 0x50U #define OSI_EQOS_MAC_5_00 0x50U
#define OSI_EQOS_MAC_5_10 0x51U
#define OSI_EQOS_MAC_5_30 0x53U #define OSI_EQOS_MAC_5_30 0x53U
#define OSI_MGBE_MAC_3_00 0x30U
#define OSI_MGBE_MAC_3_10 0x31U
#define OSI_MAX_VM_IRQS 5U #define OSI_MAX_VM_IRQS 5U
#define OSI_IP4_FILTER 0U
#define OSI_IP6_FILTER 1U
#define CHECK_CHAN_BOUND(chan) \
{ \
if ((chan) >= OSI_EQOS_MAX_NUM_CHANS) { \
return; \
} \
} \
#define MGBE_CHECK_CHAN_BOUND(chan) \
{ \
if ((chan) >= OSI_MGBE_MAX_NUM_CHANS) { \
return; \
} \
} \
#ifndef OSI_STRIPPED_LIB #ifndef OSI_STRIPPED_LIB
#define OSI_L2_FILTER_INDEX_ANY 127U #define OSI_L2_FILTER_INDEX_ANY 127U
@@ -176,6 +206,10 @@
#define MAX_ETH_FRAME_LEN_DEFAULT \ #define MAX_ETH_FRAME_LEN_DEFAULT \
(NV_ETH_FRAME_LEN + NV_ETH_FCS_LEN + NV_VLAN_HLEN) (NV_ETH_FRAME_LEN + NV_ETH_FCS_LEN + NV_VLAN_HLEN)
#define OSI_MTU_SIZE_16K 16000U
#define OSI_MTU_SIZE_8K 8000U
#define OSI_MTU_SIZE_4K 4000U
#define OSI_MTU_SIZE_2K 2000U
#define OSI_INVALID_CHAN_NUM 0xFFU #define OSI_INVALID_CHAN_NUM 0xFFU
#endif /* OSI_STRIPPED_LIB */ #endif /* OSI_STRIPPED_LIB */

View File

@@ -669,11 +669,11 @@ struct osi_core_priv_data {
/** Number of MTL queues enabled in MAC */ /** Number of MTL queues enabled in MAC */
nveu32_t num_mtl_queues; nveu32_t num_mtl_queues;
/** Array of MTL queues */ /** Array of MTL queues */
nveu32_t mtl_queues[OSI_EQOS_MAX_NUM_CHANS]; nveu32_t mtl_queues[OSI_MGBE_MAX_NUM_CHANS];
/** List of MTL Rx queue mode that need to be enabled */ /** List of MTL Rx queue mode that need to be enabled */
nveu32_t rxq_ctrl[OSI_EQOS_MAX_NUM_CHANS]; nveu32_t rxq_ctrl[OSI_MGBE_MAX_NUM_CHANS];
/** Rx MTl Queue mapping based on User Priority field */ /** Rx MTl Queue mapping based on User Priority field */
nveu32_t rxq_prio[OSI_EQOS_MAX_NUM_CHANS]; nveu32_t rxq_prio[OSI_MGBE_MAX_NUM_CHANS];
/** MAC HW type EQOS based on DT compatible */ /** MAC HW type EQOS based on DT compatible */
nveu32_t mac; nveu32_t mac;
/** MAC version */ /** MAC version */

View File

@@ -387,11 +387,11 @@ struct osi_tx_ring {
*/ */
struct osi_xtra_dma_stat_counters { struct osi_xtra_dma_stat_counters {
/** Per Q TX packet count */ /** Per Q TX packet count */
nveu64_t q_tx_pkt_n[OSI_EQOS_MAX_NUM_QUEUES]; nveu64_t q_tx_pkt_n[OSI_MGBE_MAX_NUM_QUEUES];
/** Per Q RX packet count */ /** Per Q RX packet count */
nveu64_t q_rx_pkt_n[OSI_EQOS_MAX_NUM_QUEUES]; nveu64_t q_rx_pkt_n[OSI_MGBE_MAX_NUM_QUEUES];
/** Per Q TX complete call count */ /** Per Q TX complete call count */
nveu64_t tx_clean_n[OSI_EQOS_MAX_NUM_QUEUES]; nveu64_t tx_clean_n[OSI_MGBE_MAX_NUM_QUEUES];
/** Total number of tx packets count */ /** Total number of tx packets count */
nveu64_t tx_pkt_n; nveu64_t tx_pkt_n;
/** Total number of rx packet count */ /** Total number of rx packet count */
@@ -413,7 +413,7 @@ struct osi_vm_irq_data {
/** Number of VM channels per VM IRQ */ /** Number of VM channels per VM IRQ */
nveu32_t num_vm_chans; nveu32_t num_vm_chans;
/** Array of VM channel list */ /** Array of VM channel list */
nveu32_t vm_chans[OSI_EQOS_MAX_NUM_CHANS]; nveu32_t vm_chans[OSI_MGBE_MAX_NUM_CHANS];
}; };
/** /**
@@ -443,9 +443,9 @@ struct osd_dma_ops {
*/ */
struct osi_dma_priv_data { struct osi_dma_priv_data {
/** Array of pointers to DMA Tx channel Ring */ /** Array of pointers to DMA Tx channel Ring */
struct osi_tx_ring *tx_ring[OSI_EQOS_MAX_NUM_CHANS]; struct osi_tx_ring *tx_ring[OSI_MGBE_MAX_NUM_CHANS];
/** Array of pointers to DMA Rx channel Ring */ /** Array of pointers to DMA Rx channel Ring */
struct osi_rx_ring *rx_ring[OSI_EQOS_MAX_NUM_CHANS]; struct osi_rx_ring *rx_ring[OSI_MGBE_MAX_NUM_CHANS];
/** Memory mapped base address of MAC IP */ /** Memory mapped base address of MAC IP */
void *base; void *base;
/** Pointer to OSD private data structure */ /** Pointer to OSD private data structure */
@@ -455,7 +455,7 @@ struct osi_dma_priv_data {
/** Number of channels enabled in MAC */ /** Number of channels enabled in MAC */
nveu32_t num_dma_chans; nveu32_t num_dma_chans;
/** Array of supported DMA channels */ /** Array of supported DMA channels */
nveu32_t dma_chans[OSI_EQOS_MAX_NUM_CHANS]; nveu32_t dma_chans[OSI_MGBE_MAX_NUM_CHANS];
/** DMA Rx channel buffer length at HW level */ /** DMA Rx channel buffer length at HW level */
nveu32_t rx_buf_len; nveu32_t rx_buf_len;
/** MTU size */ /** MTU size */
@@ -486,9 +486,9 @@ struct osi_dma_priv_data {
* certain safety critical dma registers */ * certain safety critical dma registers */
void *safety_config; void *safety_config;
/** Array of DMA channel slot snterval value from DT */ /** Array of DMA channel slot snterval value from DT */
nveu32_t slot_interval[OSI_EQOS_MAX_NUM_CHANS]; nveu32_t slot_interval[OSI_MGBE_MAX_NUM_CHANS];
/** Array of DMA channel slot enabled status from DT*/ /** Array of DMA channel slot enabled status from DT*/
nveu32_t slot_enabled[OSI_EQOS_MAX_NUM_CHANS]; nveu32_t slot_enabled[OSI_MGBE_MAX_NUM_CHANS];
/** number of VM IRQ's */ /** number of VM IRQ's */
nveu32_t num_vm_irqs; nveu32_t num_vm_irqs;
/** Array of VM IRQ's */ /** Array of VM IRQ's */
@@ -499,6 +499,8 @@ struct osi_dma_priv_data {
void *resv_buf_virt_addr; void *resv_buf_virt_addr;
/** Physical address of reserved DMA buffer */ /** Physical address of reserved DMA buffer */
nveu64_t resv_buf_phy_addr; nveu64_t resv_buf_phy_addr;
/** Tegra Pre-si platform info */
nveu32_t pre_si;
}; };

View File

@@ -53,4 +53,5 @@
#define DECR_RX_DESC_INDEX(idx, i) ((idx) = ((idx) - (i)) & (RX_DESC_CNT - 1U)) #define DECR_RX_DESC_INDEX(idx, i) ((idx) = ((idx) - (i)) & (RX_DESC_CNT - 1U))
#endif /* !OSI_STRIPPED_LIB */ #endif /* !OSI_STRIPPED_LIB */
/** @} */ /** @} */
#endif /* INCLUDED_OSI_DMA_TXRX_H */ #endif /* INCLUDED_OSI_DMA_TXRX_H */

View File

@@ -219,7 +219,8 @@ static inline nve32_t is_valid_mac_version(nveu32_t mac_ver)
{ {
if ((mac_ver == OSI_EQOS_MAC_4_10) || if ((mac_ver == OSI_EQOS_MAC_4_10) ||
(mac_ver == OSI_EQOS_MAC_5_00) || (mac_ver == OSI_EQOS_MAC_5_00) ||
(mac_ver == OSI_EQOS_MAC_5_30)) { (mac_ver == OSI_EQOS_MAC_5_30) ||
(mac_ver == OSI_MGBE_MAC_3_10)) {
return 1; return 1;
} }

View File

@@ -235,4 +235,17 @@ void eqos_init_core_ops(struct core_ops *ops);
* - De-initialization: No * - De-initialization: No
*/ */
void ivc_init_core_ops(struct core_ops *ops); void ivc_init_core_ops(struct core_ops *ops);
/**
* @brief mgbe_init_core_ops - Initialize MGBE core operations.
*
* @param[in] ops: Core operations pointer.
*
* @note
* API Group:
* - Initialization: Yes
* - Run time: No
* - De-initialization: No
*/
void mgbe_init_core_ops(struct core_ops *ops);
#endif /* INCLUDED_CORE_LOCAL_H */ #endif /* INCLUDED_CORE_LOCAL_H */

726
osi/core/mgbe_core.c Normal file
View File

@@ -0,0 +1,726 @@
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "../osi/common/common.h"
#include "../osi/common/type.h"
#include <osi_common.h>
#include <osi_core.h>
#include "mgbe_core.h"
#include "core_local.h"
/**
* @brief mgbe_poll_for_swr - Poll for software reset (SWR bit in DMA Mode)
*
* Algorithm: CAR reset will be issued through MAC reset pin.
* Waits for SWR reset to be cleared in DMA Mode register.
*
* @param[in] osi_core: OSI core private data structure.
*
* @note MAC needs to be out of reset and proper clock configured.
*
* @retval 0 on success
* @retval -1 on failure.
*/
static nve32_t mgbe_poll_for_swr(struct osi_core_priv_data *const osi_core)
{
void *addr = osi_core->base;
nveu32_t retry = 1000;
nveu32_t count;
nveu32_t dma_bmr = 0;
nve32_t cond = 1;
nveu32_t pre_si = osi_core->pre_si;
/* Performing software reset */
if (pre_si == 1U) {
osi_writel(OSI_ENABLE, (nveu8_t *)addr + MGBE_DMA_MODE);
}
/* Poll Until Poll Condition */
count = 0;
while (cond == 1) {
if (count > retry) {
return -1;
}
count++;
dma_bmr = osi_readl((nveu8_t *)addr + MGBE_DMA_MODE);
if ((dma_bmr & MGBE_DMA_MODE_SWR) == OSI_NONE) {
cond = 0;
} else {
/* sleep if SWR is set */
osi_core->osd_ops.msleep(1U);
}
}
return 0;
}
/**
* @brief mgbe_calculate_per_queue_fifo - Calculate per queue FIFO size
*
* Algorithm: Total Tx/Rx FIFO size which is read from
* MAC HW is being shared equally among the queues that are
* configured.
*
* @param[in] fifo_size: Total Tx/RX HW FIFO size.
* @param[in] queue_count: Total number of Queues configured.
*
* @note MAC has to be out of reset.
*
* @retval Queue size that need to be programmed.
*/
static nveu32_t mgbe_calculate_per_queue_fifo(nveu32_t fifo_size,
nveu32_t queue_count)
{
nveu32_t q_fifo_size = 0; /* calculated fifo size per queue */
nveu32_t p_fifo = 0; /* per queue fifo size program value */
if (queue_count == 0U) {
return 0U;
}
/* calculate Tx/Rx fifo share per queue */
switch (fifo_size) {
case 0:
case 1:
case 2:
case 3:
q_fifo_size = FIFO_SIZE_KB(1U);
break;
case 4:
q_fifo_size = FIFO_SIZE_KB(2U);
break;
case 5:
q_fifo_size = FIFO_SIZE_KB(4U);
break;
case 6:
q_fifo_size = FIFO_SIZE_KB(8U);
break;
case 7:
q_fifo_size = FIFO_SIZE_KB(16U);
break;
case 8:
q_fifo_size = FIFO_SIZE_KB(32U);
break;
case 9:
q_fifo_size = FIFO_SIZE_KB(64U);
break;
case 10:
q_fifo_size = FIFO_SIZE_KB(128U);
break;
case 11:
q_fifo_size = FIFO_SIZE_KB(256U);
break;
default:
q_fifo_size = FIFO_SIZE_KB(1U);
break;
}
q_fifo_size = q_fifo_size / queue_count;
if (q_fifo_size < UINT_MAX) {
p_fifo = (q_fifo_size / 256U) - 1U;
}
return p_fifo;
}
/**
* @brief mgbe_flush_mtl_tx_queue - Flush MTL Tx queue
*
* @param[in] osi_core: OSI core private data structure.
* @param[in] qinx: MTL queue index.
*
* @note 1) MAC should out of reset and clocks enabled.
* 2) hw core initialized. see osi_hw_core_init().
*
* @retval 0 on success
* @retval -1 on failure.
*/
static nve32_t mgbe_flush_mtl_tx_queue(struct osi_core_priv_data *const osi_core,
const nveu32_t qinx)
{
void *addr = osi_core->base;
nveu32_t retry = 1000;
nveu32_t count;
nveu32_t value;
nve32_t cond = 1;
if (qinx >= OSI_MGBE_MAX_NUM_QUEUES) {
return -1;
}
/* Read Tx Q Operating Mode Register and flush TxQ */
value = osi_readl((nveu8_t *)addr +
MGBE_MTL_CHX_TX_OP_MODE(qinx));
value |= MGBE_MTL_QTOMR_FTQ;
osi_writel(value, (nveu8_t *)addr +
MGBE_MTL_CHX_TX_OP_MODE(qinx));
/* Poll Until FTQ bit resets for Successful Tx Q flush */
count = 0;
while (cond == 1) {
if (count > retry) {
return -1;
}
count++;
value = osi_readl((nveu8_t *)addr +
MGBE_MTL_CHX_TX_OP_MODE(qinx));
if ((value & MGBE_MTL_QTOMR_FTQ_LPOS) == OSI_NONE) {
cond = 0;
} else {
osi_core->osd_ops.msleep(1);
}
}
return 0;
}
/**
* @brief mgbe_configure_mtl_queue - Configure MTL Queue
*
* Algorithm: This takes care of configuring the below
* parameters for the MTL Queue
* 1) Mapping MTL Rx queue and DMA Rx channel
* 2) Flush TxQ
* 3) Enable Store and Forward mode for Tx, Rx
* 4) Configure Tx and Rx MTL Queue sizes
* 5) Configure TxQ weight
* 6) Enable Rx Queues
*
* @param[in] qinx: Queue number that need to be configured.
* @param[in] osi_core: OSI core private data.
* @param[in] tx_fifo: MTL TX queue size for a MTL queue.
* @param[in] rx_fifo: MTL RX queue size for a MTL queue.
*
* @note MAC has to be out of reset.
*
* @retval 0 on success
* @retval -1 on failure.
*/
static nve32_t mgbe_configure_mtl_queue(nveu32_t qinx,
struct osi_core_priv_data *osi_core,
nveu32_t tx_fifo,
nveu32_t rx_fifo)
{
nveu32_t value = 0;
nve32_t ret = 0;
/* Program ETSALG (802.1Qaz) and RAA in MTL_Operation_Mode
* register to initialize the MTL operation in case
* of multiple Tx and Rx queues default : ETSALG WRR RAA SP
*/
/* Program the priorities mapped to the Selected Traffic
* Classes in MTL_TC_Prty_Map0-3 registers. This register is
* to tell traffic class x should be blocked from transmitting
* for the specified pause time when a PFC packet is received
* with priorities matching the priorities programmed in this field
* default: 0x0
*/
/* Program the Transmit Selection Algorithm (TSA) in
* MTL_TC[n]_ETS_Control register for all the Selected
* Traffic Classes (n=0, 1, …, Max selected number of TCs - 1)
* Setting related to CBS will come here for TC.
* default: 0x0 SP
*/
ret = mgbe_flush_mtl_tx_queue(osi_core, qinx);
if (ret < 0) {
return ret;
}
value = (tx_fifo << MGBE_MTL_TXQ_SIZE_SHIFT);
/* Enable Store and Forward mode */
value |= MGBE_MTL_TSF;
/*TTC not applicable for TX*/
/* Enable TxQ */
value |= MGBE_MTL_TXQEN;
osi_writel(value, (nveu8_t *)
osi_core->base + MGBE_MTL_CHX_TX_OP_MODE(qinx));
/* read RX Q0 Operating Mode Register */
value = osi_readl((nveu8_t *)osi_core->base +
MGBE_MTL_CHX_RX_OP_MODE(qinx));
value |= (rx_fifo << MGBE_MTL_RXQ_SIZE_SHIFT);
/* Enable Store and Forward mode */
value |= MGBE_MTL_RSF;
osi_writel(value, (nveu8_t *)osi_core->base +
MGBE_MTL_CHX_RX_OP_MODE(qinx));
/* Transmit Queue weight */
value = osi_readl((nveu8_t *)osi_core->base +
MGBE_MTL_TXQ_QW(qinx));
value |= (MGBE_MTL_TXQ_QW_ISCQW + qinx);
osi_writel(value, (nveu8_t *)osi_core->base +
MGBE_MTL_TXQ_QW(qinx));
/* Enable Rx Queue Control */
value = osi_readl((nveu8_t *)osi_core->base +
MGBE_MAC_RQC0R);
value |= ((osi_core->rxq_ctrl[qinx] & MGBE_MAC_RXQC0_RXQEN_MASK) <<
(MGBE_MAC_RXQC0_RXQEN_SHIFT(qinx)));
osi_writel(value, (nveu8_t *)osi_core->base +
MGBE_MAC_RQC0R);
return 0;
}
/**
* @brief mgbe_configure_mac - Configure MAC
*
* Algorithm: This takes care of configuring the below
* parameters for the MAC
* 1) Programming the MAC address
* 2) Enable required MAC control fields in MCR
* 3) Enable Multicast and Broadcast Queue
* 4) Disable MMC nve32_terrupts and Configure the MMC counters
* 5) Enable required MAC nve32_terrupts
*
* @param[in] osi_core: OSI core private data structure.
*
* @note MAC has to be out of reset.
*/
static void mgbe_configure_mac(struct osi_core_priv_data *osi_core)
{
nveu32_t value;
/* Update MAC address 0 high */
value = (((nveu32_t)osi_core->mac_addr[5] << 8U) |
((nveu32_t)osi_core->mac_addr[4]));
osi_writel(value, (nveu8_t *)osi_core->base + MGBE_MAC_MA0HR);
/* Update MAC address 0 Low */
value = (((nveu32_t)osi_core->mac_addr[3] << 24U) |
((nveu32_t)osi_core->mac_addr[2] << 16U) |
((nveu32_t)osi_core->mac_addr[1] << 8U) |
((nveu32_t)osi_core->mac_addr[0]));
osi_writel(value, (nveu8_t *)osi_core->base + MGBE_MAC_MA0LR);
/* TODO: Need to check if we need to enable anything in Tx configuration
* value = osi_readl((nveu8_t *)osi_core->base + MGBE_MAC_TMCR);
*/
/* Read MAC Rx Configuration Register */
value = osi_readl((nveu8_t *)osi_core->base + MGBE_MAC_RMCR);
/* Enable Automatic Pad or CRC Stripping */
/* Enable CRC stripping for Type packets */
/* Enable Rx checksum offload engine by default */
value |= MGBE_MAC_RMCR_ACS | MGBE_MAC_RMCR_CST | MGBE_MAC_RMCR_IPC;
osi_writel(value, (nveu8_t *)osi_core->base + MGBE_MAC_RMCR);
/* TODO: MCBC queue enable */
/* Disable all MMC nve32_terrupts */
/* Disable all MMC Tx nve32_terrupts */
osi_writel(OSI_NONE, (nveu8_t *)osi_core->base +
MGBE_MMC_TX_INTR_EN);
/* Disable all MMC RX nve32_terrupts */
osi_writel(OSI_NONE, (nveu8_t *)osi_core->base +
MGBE_MMC_RX_INTR_EN);
/* Configure MMC counters */
value = osi_readl((nveu8_t *)osi_core->base + MGBE_MMC_CNTRL);
value |= MGBE_MMC_CNTRL_CNTRST | MGBE_MMC_CNTRL_RSTONRD |
MGBE_MMC_CNTRL_CNTMCT | MGBE_MMC_CNTRL_CNTPRST;
osi_writel(value, (nveu8_t *)osi_core->base + MGBE_MMC_CNTRL);
/* Enable MAC nve32_terrupts */
/* Read MAC IMR Register */
value = osi_readl((nveu8_t *)osi_core->base + MGBE_MAC_IER);
/* RGSMIIIM - RGMII/SMII nve32_terrupt Enable */
/* TODO: LPI need to be enabled during EEE implementation */
value |= MGBE_IMR_RGSMIIIE;
osi_writel(value, (nveu8_t *)osi_core->base + MGBE_MAC_IER);
/* TODO: USP (user Priority) to RxQ Mapping */
}
/**
* @brief mgbe_configure_dma - Configure DMA
*
* Algorithm: This takes care of configuring the below
* parameters for the DMA
* 1) Programming different burst length for the DMA
* 2) Enable enhanced Address mode
* 3) Programming max read outstanding request limit
*
* @param[in] base: MGBE virtual base address.
*
* @note MAC has to be out of reset.
*/
static void mgbe_configure_dma(void *base)
{
nveu32_t value = 0;
/* AXI Burst Length 8*/
value |= MGBE_DMA_SBUS_BLEN8;
/* AXI Burst Length 16*/
value |= MGBE_DMA_SBUS_BLEN16;
/* Enhanced Address Mode Enable */
value |= MGBE_DMA_SBUS_EAME;
/* AXI Maximum Read Outstanding Request Limit = 31 */
value |= MGBE_DMA_SBUS_RD_OSR_LMT;
/* AXI Maximum Write Outstanding Request Limit = 31 */
value |= MGBE_DMA_SBUS_WR_OSR_LMT;
osi_writel(value, (nveu8_t *)base + MGBE_DMA_SBUS);
}
/**
* @brief mgbe_core_init - MGBE MAC, MTL and common DMA Initialization
*
* Algorithm: This function will take care of initializing MAC, MTL and
* common DMA registers.
*
* @param[in] osi_core: OSI core private data structure.
* @param[in] tx_fifo_size: MTL TX FIFO size
* @param[in] rx_fifo_size: MTL RX FIFO size
*
* @note 1) MAC should be out of reset. See osi_poll_for_swr() for details.
* 2) osi_core->base needs to be filled based on ioremap.
* 3) osi_core->num_mtl_queues needs to be filled.
* 4) osi_core->mtl_queues[qinx] need to be filled.
*
* @retval 0 on success
* @retval -1 on failure.
*/
static nve32_t mgbe_core_init(struct osi_core_priv_data *osi_core,
nveu32_t tx_fifo_size,
nveu32_t rx_fifo_size)
{
nve32_t ret = 0;
nveu32_t qinx = 0;
nveu32_t value = 0;
nveu32_t tx_fifo = 0;
nveu32_t rx_fifo = 0;
/* reset mmc counters */
osi_writel(MGBE_MMC_CNTRL_CNTRST, (nveu8_t *)osi_core->base +
MGBE_MMC_CNTRL);
/* Mapping MTL Rx queue and DMA Rx channel */
value = osi_readl((nveu8_t *)osi_core->base +
MGBE_MTL_RXQ_DMA_MAP0);
value |= MGBE_RXQ_TO_DMA_CHAN_MAP0;
osi_writel(value, (nveu8_t *)osi_core->base +
MGBE_MTL_RXQ_DMA_MAP0);
value = osi_readl((nveu8_t *)osi_core->base +
MGBE_MTL_RXQ_DMA_MAP1);
value |= MGBE_RXQ_TO_DMA_CHAN_MAP1;
osi_writel(value, (nveu8_t *)osi_core->base +
MGBE_MTL_RXQ_DMA_MAP1);
value = osi_readl((nveu8_t *)osi_core->base +
MGBE_MTL_RXQ_DMA_MAP2);
value |= MGBE_RXQ_TO_DMA_CHAN_MAP2;
osi_writel(value, (nveu8_t *)osi_core->base +
MGBE_MTL_RXQ_DMA_MAP2);
/* TODO: DCS enable */
/* Calculate value of Transmit queue fifo size to be programmed */
tx_fifo = mgbe_calculate_per_queue_fifo(tx_fifo_size,
osi_core->num_mtl_queues);
/* Calculate value of Receive queue fifo size to be programmed */
rx_fifo = mgbe_calculate_per_queue_fifo(rx_fifo_size,
osi_core->num_mtl_queues);
/* Configure MTL Queues */
/* TODO: Iterate over Number MTL queues need to be removed */
for (qinx = 0; qinx < osi_core->num_mtl_queues; qinx++) {
ret = mgbe_configure_mtl_queue(osi_core->mtl_queues[qinx],
osi_core, tx_fifo, rx_fifo);
if (ret < 0) {
return ret;
}
}
/* configure MGBE MAC HW */
mgbe_configure_mac(osi_core);
/* configure MGBE DMA */
mgbe_configure_dma(osi_core->base);
return ret;
}
/**
* @brief mgbe_handle_mac_nve32_trs - Handle MAC nve32_terrupts
*
* Algorithm: This function takes care of handling the
* MAC nve32_terrupts which includes speed, mode detection.
*
* @param[in] osi_core: OSI core private data structure.
* @param[in] dma_isr: DMA ISR register read value.
*
* @note MAC nve32_terrupts need to be enabled
*/
static void mgbe_handle_mac_intrs(struct osi_core_priv_data *osi_core,
nveu32_t dma_isr)
{
#if 0 /* TODO: Need to re-visit */
mac_isr = osi_readl((nveu8_t *)osi_core->base + MGBE_MAC_ISR);
/* Handle MAC interrupts */
if ((dma_isr & MGBE_DMA_ISR_MACIS) != MGBE_DMA_ISR_MACIS) {
return;
}
/* TODO: Duplex/speed settigs - Its not same as EQOS for MGBE */
#endif
}
/**
* @brief mgbe_update_dma_sr_stats - stats for dma_status error
*
* Algorithm: increament error stats based on corresponding bit filed.
*
* @param[in] osi_core: OSI core private data structure.
* @param[in] dma_sr: Dma status register read value
* @param[in] qinx: Queue index
*/
static inline void mgbe_update_dma_sr_stats(struct osi_core_priv_data *osi_core,
nveu32_t dma_sr, nveu32_t qinx)
{
nveu64_t val;
if ((dma_sr & MGBE_DMA_CHX_STATUS_RBU) == MGBE_DMA_CHX_STATUS_RBU) {
val = osi_core->xstats.rx_buf_unavail_irq_n[qinx];
osi_core->xstats.rx_buf_unavail_irq_n[qinx] =
osi_update_stats_counter(val, 1U);
}
if ((dma_sr & MGBE_DMA_CHX_STATUS_TPS) == MGBE_DMA_CHX_STATUS_TPS) {
val = osi_core->xstats.tx_proc_stopped_irq_n[qinx];
osi_core->xstats.tx_proc_stopped_irq_n[qinx] =
osi_update_stats_counter(val, 1U);
}
if ((dma_sr & MGBE_DMA_CHX_STATUS_TBU) == MGBE_DMA_CHX_STATUS_TBU) {
val = osi_core->xstats.tx_buf_unavail_irq_n[qinx];
osi_core->xstats.tx_buf_unavail_irq_n[qinx] =
osi_update_stats_counter(val, 1U);
}
if ((dma_sr & MGBE_DMA_CHX_STATUS_RPS) == MGBE_DMA_CHX_STATUS_RPS) {
val = osi_core->xstats.rx_proc_stopped_irq_n[qinx];
osi_core->xstats.rx_proc_stopped_irq_n[qinx] =
osi_update_stats_counter(val, 1U);
}
if ((dma_sr & MGBE_DMA_CHX_STATUS_FBE) == MGBE_DMA_CHX_STATUS_FBE) {
val = osi_core->xstats.fatal_bus_error_irq_n;
osi_core->xstats.fatal_bus_error_irq_n =
osi_update_stats_counter(val, 1U);
}
}
/**
* @brief mgbe_handle_common_intr - Handles common nve32_terrupt.
*
* Algorithm: Clear common nve32_terrupt source.
*
* @param[in] osi_core: OSI core private data structure.
*
* @note MAC should be init and started. see osi_start_mac()
*/
static void mgbe_handle_common_intr(struct osi_core_priv_data *osi_core)
{
void *base = osi_core->base;
nveu32_t dma_isr = 0;
nveu32_t qinx = 0;
nveu32_t i = 0;
nveu32_t dma_sr = 0;
nveu32_t dma_ier = 0;
dma_isr = osi_readl((nveu8_t *)base + MGBE_DMA_ISR);
if (dma_isr == OSI_NONE) {
return;
}
//FIXME Need to check how we can get the DMA channel here instead of
//MTL Queues
if ((dma_isr & MGBE_DMA_ISR_DCH0_DCH15_MASK) != OSI_NONE) {
/* Handle Non-TI/RI nve32_terrupts */
for (i = 0; i < osi_core->num_mtl_queues; i++) {
qinx = osi_core->mtl_queues[i];
if (qinx >= OSI_MGBE_MAX_NUM_CHANS) {
continue;
}
/* read dma channel status register */
dma_sr = osi_readl((nveu8_t *)base +
MGBE_DMA_CHX_STATUS(qinx));
/* read dma channel nve32_terrupt enable register */
dma_ier = osi_readl((nveu8_t *)base +
MGBE_DMA_CHX_IER(qinx));
/* process only those nve32_terrupts which we
* have enabled.
*/
dma_sr = (dma_sr & dma_ier);
/* mask off RI and TI */
dma_sr &= ~(MGBE_DMA_CHX_STATUS_TI |
MGBE_DMA_CHX_STATUS_RI);
if (dma_sr == OSI_NONE) {
continue;
}
/* ack non ti/ri nve32_ts */
osi_writel(dma_sr, (nveu8_t *)base +
MGBE_DMA_CHX_STATUS(qinx));
mgbe_update_dma_sr_stats(osi_core, dma_sr, qinx);
}
}
mgbe_handle_mac_intrs(osi_core, dma_isr);
}
/**
* @brief mgbe_pad_calibrate - PAD calibration
*
* Algorithm: Since PAD calibration not applicable for MGBE
* it returns zero.
*
* @param[in] osi_core: OSI core private data structure.
*
* @retval zero always
*/
static nve32_t mgbe_pad_calibrate(struct osi_core_priv_data *const osi_core)
{
return 0;
}
/**
* @brief mgbe_start_mac - Start MAC Tx/Rx engine
*
* Algorithm: Enable MAC Transmitter and Receiver
*
* @param[in] osi_core: OSI core private data structure.
*
* @note 1) MAC init should be complete. See osi_hw_core_init() and
* osi_hw_dma_init()
*/
static void mgbe_start_mac(struct osi_core_priv_data *const osi_core)
{
nveu32_t value;
void *addr = osi_core->base;
value = osi_readl((nveu8_t *)addr + MGBE_MAC_TMCR);
/* Enable MAC Transmit */
value |= MGBE_MAC_TMCR_TE;
osi_writel(value, (nveu8_t *)addr + MGBE_MAC_TMCR);
value = osi_readl((nveu8_t *)addr + MGBE_MAC_RMCR);
/* Enable MAC Receive */
value |= MGBE_MAC_RMCR_RE;
osi_writel(value, (nveu8_t *)addr + MGBE_MAC_RMCR);
}
/**
* @brief mgbe_stop_mac - Stop MAC Tx/Rx engine
*
* Algorithm: Disables MAC Transmitter and Receiver
*
* @param[in] osi_core: OSI core private data structure.
*
* @note MAC DMA deinit should be complete. See osi_hw_dma_deinit()
*/
static void mgbe_stop_mac(struct osi_core_priv_data *const osi_core)
{
nveu32_t value;
void *addr = osi_core->base;
value = osi_readl((nveu8_t *)addr + MGBE_MAC_TMCR);
/* Disable MAC Transmit */
value &= ~MGBE_MAC_TMCR_TE;
osi_writel(value, (nveu8_t *)addr + MGBE_MAC_TMCR);
value = osi_readl((nveu8_t *)addr + MGBE_MAC_RMCR);
/* Disable MAC Receive */
value &= ~MGBE_MAC_RMCR_RE;
osi_writel(value, (nveu8_t *)addr + MGBE_MAC_RMCR);
}
/**
* @brief mgbe_core_deinit - MGBE MAC core deinitialization
*
* Algorithm: This function will take care of deinitializing MAC
*
* @param[in] osi_core: OSI core private data structure.
*
* @note Required clks and resets has to be enabled
*/
static void mgbe_core_deinit(struct osi_core_priv_data *osi_core)
{
/* Stop the MAC by disabling both MAC Tx and Rx */
mgbe_stop_mac(osi_core);
}
/**
* @brief mgbe_init_core_ops - Initialize MGBE MAC core operations
*/
void mgbe_init_core_ops(struct core_ops *ops)
{
ops->poll_for_swr = mgbe_poll_for_swr;
ops->core_init = mgbe_core_init;
ops->core_deinit = mgbe_core_deinit;
ops->validate_regs = OSI_NULL;
ops->start_mac = mgbe_start_mac;
ops->stop_mac = mgbe_stop_mac;
ops->handle_common_intr = mgbe_handle_common_intr;
/* only MGBE supports full duplex */
ops->set_mode = OSI_NULL;
/* by default speed is 10G */
ops->set_speed = OSI_NULL;
ops->pad_calibrate = mgbe_pad_calibrate;
ops->set_mdc_clk_rate = OSI_NULL;
ops->flush_mtl_tx_queue = mgbe_flush_mtl_tx_queue;
ops->config_mac_loopback = OSI_NULL;
ops->set_avb_algorithm = OSI_NULL;
ops->get_avb_algorithm = OSI_NULL;
ops->config_fw_err_pkts = OSI_NULL;
ops->config_tx_status = OSI_NULL;
ops->config_rx_crc_check = OSI_NULL;
ops->config_flow_control = OSI_NULL;
ops->config_arp_offload = OSI_NULL;
ops->config_rxcsum_offload = OSI_NULL;
ops->config_mac_pkt_filter_reg = OSI_NULL;
ops->update_mac_addr_low_high_reg = OSI_NULL;
ops->config_l3_l4_filter_enable = OSI_NULL;
ops->config_l3_filters = OSI_NULL;
ops->update_ip4_addr = OSI_NULL;
ops->update_ip6_addr = OSI_NULL;
ops->config_l4_filters = OSI_NULL;
ops->update_l4_port_no = OSI_NULL;
ops->config_vlan_filtering = OSI_NULL;
ops->update_vlan_id = OSI_NULL;
ops->set_systime_to_mac = OSI_NULL;
ops->config_addend = OSI_NULL;
ops->adjust_mactime = OSI_NULL,
//.get_systime_from_mac = OSI_NULL,
ops->config_tscr = OSI_NULL;
ops->config_ssir = OSI_NULL;
ops->read_mmc = OSI_NULL;
ops->reset_mmc = OSI_NULL;
}

132
osi/core/mgbe_core.h Normal file
View File

@@ -0,0 +1,132 @@
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef MGBE_CORE_H_
#define MGBE_CORE_H_
/**
* @addtogroup MGBE-MAC MAC register offsets
*
* @brief MGBE MAC register offsets
* @{
*/
#define MGBE_MAC_RQC0R 0x00A0
#define MGBE_MAC_MA0HR 0x0300
#define MGBE_MAC_MA0LR 0x0304
#define MGBE_MAC_TMCR 0x0000
#define MGBE_MAC_RMCR 0x0004
#define MGBE_MMC_TX_INTR_EN 0x0810
#define MGBE_MMC_RX_INTR_EN 0x080C
#define MGBE_MMC_CNTRL 0x0800
#define MGBE_MAC_IER 0x00B4
#define MGBE_MAC_ISR 0x00B0
/** @} */
/**
* @addtogroup MGBE-DMA DMA register offsets
*
* @brief MGBE DMA register offsets
* @{
*/
#define MGBE_DMA_MODE 0x3000
#define MGBE_DMA_SBUS 0x3004
#define MGBE_DMA_ISR 0x3008
#define MGBE_DMA_CHX_STATUS(x) ((0x0080U * (x)) + 0x3160U)
#define MGBE_DMA_CHX_IER(x) ((0x0080U * (x)) + 0x3138U)
/** @} */
/**
* @addtogroup MGBE-MTL MTL register offsets
*
* @brief MGBE MTL register offsets
* @{
*/
#define MGBE_MTL_RXQ_DMA_MAP0 0x1030
#define MGBE_MTL_RXQ_DMA_MAP1 0x1034
#define MGBE_MTL_RXQ_DMA_MAP2 0x1038
#define MGBE_MTL_RXQ_DMA_MAP3 0x103b
#define MGBE_MTL_CHX_TX_OP_MODE(x) ((0x0080U * (x)) + 0x1100U)
#define MGBE_MTL_CHX_RX_OP_MODE(x) ((0x0080U * (x)) + 0x1140U)
#define MGBE_MTL_TXQ_QW(x) ((0x0080U * (x)) + 0x1118U)
/** @} */
/**
* @addtogroup HW Register BIT values
*
* @brief consists of corresponding MGBE MAC, MTL register bit values
* @{
*/
#define MGBE_DMA_MODE_SWR OSI_BIT(0)
#define MGBE_MTL_QTOMR_FTQ OSI_BIT(0)
#define MGBE_MTL_QTOMR_FTQ_LPOS OSI_BIT(0)
#define MGBE_MTL_TSF OSI_BIT(1)
#define MGBE_MTL_TXQEN OSI_BIT(3)
#define MGBE_MTL_RSF OSI_BIT(5)
#define MGBE_MTL_TXQ_QW_ISCQW OSI_BIT(4)
#define MGBE_MAC_RMCR_ACS OSI_BIT(1)
#define MGBE_MAC_RMCR_CST OSI_BIT(2)
#define MGBE_MAC_RMCR_IPC OSI_BIT(9)
#define MGBE_MAC_RXQC0_RXQEN_MASK 0x3U
#define MGBE_MAC_RXQC0_RXQEN_SHIFT(x) ((x) * 2U)
#define MGBE_MMC_CNTRL_CNTRST OSI_BIT(0)
#define MGBE_MMC_CNTRL_RSTONRD OSI_BIT(2)
#define MGBE_MMC_CNTRL_CNTMCT (OSI_BIT(4) | OSI_BIT(5))
#define MGBE_MMC_CNTRL_CNTPRST OSI_BIT(7)
#define MGBE_IMR_RGSMIIIE OSI_BIT(0)
#define MGBE_DMA_ISR_MACIS OSI_BIT(17)
#define MGBE_DMA_ISR_DCH0_DCH15_MASK 0xFFU
#define MGBE_DMA_CHX_STATUS_TPS OSI_BIT(1)
#define MGBE_DMA_CHX_STATUS_TBU OSI_BIT(2)
#define MGBE_DMA_CHX_STATUS_RBU OSI_BIT(7)
#define MGBE_DMA_CHX_STATUS_RPS OSI_BIT(8)
#define MGBE_DMA_CHX_STATUS_FBE OSI_BIT(12)
#define MGBE_DMA_CHX_STATUS_TI OSI_BIT(0)
#define MGBE_DMA_CHX_STATUS_RI OSI_BIT(6)
/* DMA SBUS */
#define MGBE_DMA_SBUS_BLEN8 OSI_BIT(2)
#define MGBE_DMA_SBUS_BLEN16 OSI_BIT(3)
#define MGBE_DMA_SBUS_EAME OSI_BIT(11)
#define MGBE_DMA_SBUS_RD_OSR_LMT 0x001F0000U
#define MGBE_DMA_SBUS_WR_OSR_LMT 0x1F000000U
#define MGBE_MAC_TMCR_TE OSI_BIT(0)
#define MGBE_MAC_RMCR_RE OSI_BIT(0)
#define MGBE_MTL_TXQ_SIZE_SHIFT 16U
#define MGBE_MTL_RXQ_SIZE_SHIFT 16U
#define MGBE_RXQ_TO_DMA_CHAN_MAP0 0x03020100U
#define MGBE_RXQ_TO_DMA_CHAN_MAP1 0x07060504U
#define MGBE_RXQ_TO_DMA_CHAN_MAP2 0x0B0A0908U
#define MGBE_RXQ_TO_DMA_CHAN_MAP3 0x0F0E0D0CU
#define MGBE_MTL_TXQ_SIZE_SHIFT 16U
#define MGBE_MTL_RXQ_SIZE_SHIFT 16U
/** @} */
/**
* @addtogroup MGBE-SIZE SIZE calculation helper Macros
*
* @brief SIZE calculation defines
* @{
*/
#define FIFO_SIZE_B(x) (x)
#define FIFO_SIZE_KB(x) ((x) * 1024U)
/** @} */
#endif /* MGBE_CORE_H_ */

View File

@@ -123,6 +123,19 @@ nve32_t osi_read_phy_reg(struct osi_core_priv_data *const osi_core,
nve32_t osi_init_core_ops(struct osi_core_priv_data *const osi_core) nve32_t osi_init_core_ops(struct osi_core_priv_data *const osi_core)
{ {
typedef void (*init_ops_arr)(struct core_ops *);
typedef void *(*safety_init)(void);
init_ops_arr i_ops[2][2] = {
{ eqos_init_core_ops, ivc_init_core_ops },
{ mgbe_init_core_ops, OSI_NULL }
};
safety_init s_init[2][2] = {
{ eqos_get_core_safety_config, ivc_get_core_safety_config },
{ OSI_NULL, OSI_NULL }
};
if (osi_core == OSI_NULL) { if (osi_core == OSI_NULL) {
return -1; return -1;
} }
@@ -134,30 +147,25 @@ nve32_t osi_init_core_ops(struct osi_core_priv_data *const osi_core)
return -1; return -1;
} }
if (osi_core->mac != OSI_MAC_HW_EQOS) { if (osi_core->mac > OSI_MAC_HW_MGBE) {
OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
"Invalid MAC HW type\n", 0ULL); "Invalid MAC HW type\n", 0ULL);
return -1; return -1;
} }
if (osi_core->use_virtualization == OSI_DISABLE) { if (osi_core->use_virtualization > OSI_ENABLE) {
/* Get EQOS HW core ops */ OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
eqos_init_core_ops(ops_p); "Invalid use_virtualization value\n", 0ULL);
/* Explicitly set osi_core->safety_config = OSI_NULL if return -1;
* a particular MAC version does not need SW safety }
* mechanisms like periodic read-verify.
*/ if (i_ops[osi_core->mac][osi_core->use_virtualization] != OSI_NULL) {
i_ops[osi_core->mac][osi_core->use_virtualization](ops_p);
}
if (s_init[osi_core->mac][osi_core->use_virtualization] != OSI_NULL) {
osi_core->safety_config = osi_core->safety_config =
(void *)eqos_get_core_safety_config(); s_init[osi_core->mac][osi_core->use_virtualization]();
} else {
/* Get IVC HW core ops */
ivc_init_core_ops(ops_p);
/* Explicitly set osi_core->safety_config = OSI_NULL if
* a particular MAC version does not need SW safety
* mechanisms like periodic read-verify.
*/
osi_core->safety_config =
(void *)ivc_get_core_safety_config();
} }
if (validate_func_ptrs(osi_core) < 0) { if (validate_func_ptrs(osi_core) < 0) {
@@ -169,7 +177,6 @@ nve32_t osi_init_core_ops(struct osi_core_priv_data *const osi_core)
g_core.init_done = OSI_ENABLE; g_core.init_done = OSI_ENABLE;
return 0; return 0;
} }
nve32_t osi_poll_for_mac_reset_complete( nve32_t osi_poll_for_mac_reset_complete(

View File

@@ -92,15 +92,39 @@ struct dma_local {
/** DMA channel operations */ /** DMA channel operations */
struct dma_chan_ops ops; struct dma_chan_ops ops;
/** Flag to represent OSI DMA software init done */ /** Flag to represent OSI DMA software init done */
unsigned int init_done; nveu32_t init_done;
/** Holds the MAC version of MAC controller */ /** Holds the MAC version of MAC controller */
nveu32_t mac_ver; nveu32_t mac_ver;
/** Represents whether DMA interrupts are VM or Non-VM */ /** Represents whether DMA interrupts are VM or Non-VM */
nveu32_t vm_intr; nveu32_t vm_intr;
}; };
/**
* @brief eqos_init_dma_chan_ops - Initialize eqos DMA operations.
*
* @param[in] ops: DMA channel operations pointer.
*
* @note
* API Group:
* - Initialization: Yes
* - Run time: No
* - De-initialization: No
*/
void eqos_init_dma_chan_ops(struct dma_chan_ops *ops); void eqos_init_dma_chan_ops(struct dma_chan_ops *ops);
/**
* @brief mgbe_init_dma_chan_ops - Initialize MGBE DMA operations.
*
* @param[in] ops: DMA channel operations pointer.
*
* @note
* API Group:
* - Initialization: Yes
* - Run time: No
* - De-initialization: No
*/
void mgbe_init_dma_chan_ops(struct dma_chan_ops *ops);
/** /**
* @brief osi_hw_transmit - Initialize Tx DMA descriptors for a channel * @brief osi_hw_transmit - Initialize Tx DMA descriptors for a channel
* *

View File

@@ -68,6 +68,8 @@
(RDES3_ERR_CRC | RDES3_ERR_GP | RDES3_ERR_WD | \ (RDES3_ERR_CRC | RDES3_ERR_GP | RDES3_ERR_WD | \
RDES3_ERR_ORUN | RDES3_ERR_RE | RDES3_ERR_DRIB) RDES3_ERR_ORUN | RDES3_ERR_RE | RDES3_ERR_DRIB)
/** MGBE error summary bits for Received packet */
#define RDES3_ES_MGBE 0x8000U
/** /**
* @addtogroup EQOS_TxDesc Transmit Descriptors bit fields * @addtogroup EQOS_TxDesc Transmit Descriptors bit fields
* *

612
osi/dma/mgbe_dma.c Normal file
View File

@@ -0,0 +1,612 @@
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "../osi/common/common.h"
#include <osi_common.h>
#include "mgbe_dma.h"
#include "dma_local.h"
/**
* @brief mgbe_disable_chan_tx_intr - Disables DMA Tx channel interrupts.
*
* @param[in] addr: Base address indicating the start of
* memory mapped IO region of the MAC.
* @param[in] chan: DMA Tx channel number.
*
* @note 1) MAC needs to be out of reset and proper clocks need to be configured
* 2) DMA HW init need to be completed successfully, see osi_hw_dma_init
* 3) Mapping of physical IRQ line to DMA channel need to be maintained at
* OSDependent layer and pass corresponding channel number.
*/
static void mgbe_disable_chan_tx_intr(void *addr, nveu32_t chan)
{
nveu32_t cntrl;
MGBE_CHECK_CHAN_BOUND(chan);
cntrl = osi_readl((nveu8_t *)addr +
MGBE_VIRT_INTR_CHX_CNTRL(chan));
cntrl &= ~MGBE_VIRT_INTR_CHX_CNTRL_TX;
osi_writel(cntrl, (nveu8_t *)addr +
MGBE_VIRT_INTR_CHX_CNTRL(chan));
}
/**
* @brief mgbe_enable_chan_tx_intr - Enable Tx channel interrupts.
*
* @param[in] addr: Base address indicating the start of
* memory mapped IO region of the MAC.
* @param[in] chan: DMA Tx channel number.
*
* @note 1) MAC needs to be out of reset and proper clocks need to be configured
* 2) DMA HW init need to be completed successfully, see osi_hw_dma_init
* 3) Mapping of physical IRQ line to DMA channel need to be maintained at
* OSDependent layer and pass corresponding channel number.
*/
static void mgbe_enable_chan_tx_intr(void *addr, nveu32_t chan)
{
nveu32_t cntrl;
MGBE_CHECK_CHAN_BOUND(chan);
cntrl = osi_readl((nveu8_t *)addr +
MGBE_VIRT_INTR_CHX_CNTRL(chan));
cntrl |= MGBE_VIRT_INTR_CHX_CNTRL_TX;
osi_writel(cntrl, (nveu8_t *)addr +
MGBE_VIRT_INTR_CHX_CNTRL(chan));
}
/**
* @brief mgbe_disable_chan_rx_intr - Disable Rx channel interrupts.
*
* @param[in] addr: Base address indicating the start of
* memory mapped IO region of the MAC.
* @param[in] chan: DMA Rx channel number.
*
* @note 1) MAC needs to be out of reset and proper clocks need to be configured
* 2) DMA HW init need to be completed successfully, see osi_hw_dma_init
* 3) Mapping of physical IRQ line to DMA channel need to be maintained at
* OSDependent layer and pass corresponding channel number.
*/
static void mgbe_disable_chan_rx_intr(void *addr, nveu32_t chan)
{
nveu32_t cntrl;
MGBE_CHECK_CHAN_BOUND(chan);
cntrl = osi_readl((nveu8_t *)addr +
MGBE_VIRT_INTR_CHX_CNTRL(chan));
cntrl &= ~MGBE_VIRT_INTR_CHX_CNTRL_RX;
osi_writel(cntrl, (nveu8_t *)addr +
MGBE_VIRT_INTR_CHX_CNTRL(chan));
}
/**
* @brief mgbe_enable_chan_rx_intr - Enable Rx channel interrupts.
*
* @param[in] addr: Base address indicating the start of
* memory mapped IO region of the MAC.
* @param[in] chan: DMA Rx channel number.
*
* @note 1) MAC needs to be out of reset and proper clocks need to be configured
* 2) DMA HW init need to be completed successfully, see osi_hw_dma_init
*/
static void mgbe_enable_chan_rx_intr(void *addr, nveu32_t chan)
{
nveu32_t cntrl;
MGBE_CHECK_CHAN_BOUND(chan);
cntrl = osi_readl((nveu8_t *)addr +
MGBE_VIRT_INTR_CHX_CNTRL(chan));
cntrl |= MGBE_VIRT_INTR_CHX_CNTRL_RX;
osi_writel(cntrl, (nveu8_t *)addr +
MGBE_VIRT_INTR_CHX_CNTRL(chan));
}
/**
* @brief mgbe_set_tx_ring_len - Set DMA Tx ring length.
*
* Algorithm: Set DMA Tx channel ring length for specific channel.
*
* @param[in] osi_dma: OSI DMA data structure.
* @param[in] chan: DMA Tx channel number.
* @param[in] len: Length.
*/
static void mgbe_set_tx_ring_len(struct osi_dma_priv_data *osi_dma,
nveu32_t chan,
nveu32_t len)
{
void *addr = osi_dma->base;
MGBE_CHECK_CHAN_BOUND(chan);
osi_writel(len, (nveu8_t *)addr + MGBE_DMA_CHX_TDRL(chan));
}
/**
* @brief mgbe_set_tx_ring_start_addr - Set DMA Tx ring base address.
*
* Algorithm: Sets DMA Tx ring base address for specific channel.
*
* @param[in] addr: Base address indicating the start of
* memory mapped IO region of the MAC.
* @param[in] chan: DMA Tx channel number.
* @param[in] tx_desc: Tx desc base addess.
*/
static void mgbe_set_tx_ring_start_addr(void *addr, nveu32_t chan,
nveu64_t tx_desc)
{
nveu64_t temp;
MGBE_CHECK_CHAN_BOUND(chan);
temp = H32(tx_desc);
if (temp < UINT_MAX) {
osi_writel((nveu32_t)temp, (nveu8_t *)addr +
MGBE_DMA_CHX_TDLH(chan));
}
temp = L32(tx_desc);
if (temp < UINT_MAX) {
osi_writel((nveu32_t)temp, (nveu8_t *)addr +
MGBE_DMA_CHX_TDLA(chan));
}
}
/**
* @brief mgbe_update_tx_tailptr - Updates DMA Tx ring tail pointer.
*
* Algorithm: Updates DMA Tx ring tail pointer for specific channel.
*
* @param[in] addr: Base address indicating the start of
* memory mapped IO region of the MAC.
* @param[in] chan: DMA Tx channel number.
* @param[in] tailptr: DMA Tx ring tail pointer.
*
*
* @note 1) MAC needs to be out of reset and proper clocks need to be configured
* 2) DMA HW init need to be completed successfully, see osi_hw_dma_init
*/
static void mgbe_update_tx_tailptr(void *addr, nveu32_t chan,
nveu64_t tailptr)
{
nveu64_t temp;
MGBE_CHECK_CHAN_BOUND(chan);
temp = L32(tailptr);
if (temp < UINT_MAX) {
osi_writel((nveu32_t)temp, (nveu8_t *)addr +
MGBE_DMA_CHX_TDTLP(chan));
}
}
/**
* @brief mgbe_set_rx_ring_len - Set Rx channel ring length.
*
* Algorithm: Sets DMA Rx channel ring length for specific DMA channel.
*
* @param[in] osi_dma: OSI DMA data structure.
* @param[in] chan: DMA Rx channel number.
* @param[in] len: Length
*/
static void mgbe_set_rx_ring_len(struct osi_dma_priv_data *osi_dma,
nveu32_t chan,
nveu32_t len)
{
void *addr = osi_dma->base;
MGBE_CHECK_CHAN_BOUND(chan);
osi_writel(len, (nveu8_t *)addr + MGBE_DMA_CHX_RDRL(chan));
}
/**
* @brief mgbe_set_rx_ring_start_addr - Set DMA Rx ring base address.
*
* Algorithm: Sets DMA Rx channel ring base address.
*
* @param[in] addr: Base address indicating the start of
* memory mapped IO region of the MAC.
* @param[in] chan: DMA Rx channel number.
* @param[in] tx_desc: DMA Rx desc base address.
*/
static void mgbe_set_rx_ring_start_addr(void *addr, nveu32_t chan,
nveu64_t tx_desc)
{
nveu64_t temp;
MGBE_CHECK_CHAN_BOUND(chan);
temp = H32(tx_desc);
if (temp < UINT_MAX) {
osi_writel((nveu32_t)temp, (nveu8_t *)addr +
MGBE_DMA_CHX_RDLH(chan));
}
temp = L32(tx_desc);
if (temp < UINT_MAX) {
osi_writel((nveu32_t)temp, (nveu8_t *)addr +
MGBE_DMA_CHX_RDLA(chan));
}
}
/**
* @brief mgbe_update_rx_tailptr - Update Rx ring tail pointer
*
* Algorithm: Updates DMA Rx channel tail pointer for specific channel.
*
* @param[in] addr: Base address indicating the start of
* memory mapped IO region of the MAC.
* @param[in] chan: DMA Rx channel number.
* @param[in] tailptr: Tail pointer
*
* @note 1) MAC needs to be out of reset and proper clocks need to be configured
* 2) DMA HW init need to be completed successfully, see osi_hw_dma_init
*/
static void mgbe_update_rx_tailptr(void *addr, nveu32_t chan,
nveu64_t tailptr)
{
nveu64_t temp;
MGBE_CHECK_CHAN_BOUND(chan);
temp = H32(tailptr);
if (temp < UINT_MAX) {
osi_writel((nveu32_t)temp, (nveu8_t *)addr +
MGBE_DMA_CHX_RDTHP(chan));
}
temp = L32(tailptr);
if (temp < UINT_MAX) {
osi_writel((nveu32_t)temp, (nveu8_t *)addr +
MGBE_DMA_CHX_RDTLP(chan));
}
}
/**
* @brief mgbe_start_dma - Start DMA.
*
* Algorithm: Start Tx and Rx DMA for specific channel.
*
* @param[in] osi_dma: OSI DMA private data structure.
* @param[in] chan: DMA Tx/Rx channel number.
*
* @note 1) MAC needs to be out of reset and proper clocks need to be configured
* 2) DMA HW init need to be completed successfully, see osi_hw_dma_init
*/
static void mgbe_start_dma(struct osi_dma_priv_data *osi_dma, nveu32_t chan)
{
nveu32_t val;
void *addr = osi_dma->base;
MGBE_CHECK_CHAN_BOUND(chan);
/* start Tx DMA */
val = osi_readl((nveu8_t *)addr + MGBE_DMA_CHX_TX_CTRL(chan));
val |= OSI_BIT(0);
osi_writel(val, (nveu8_t *)addr + MGBE_DMA_CHX_TX_CTRL(chan));
/* start Rx DMA */
val = osi_readl((nveu8_t *)addr + MGBE_DMA_CHX_RX_CTRL(chan));
val |= OSI_BIT(0);
osi_writel(val, (nveu8_t *)addr + MGBE_DMA_CHX_RX_CTRL(chan));
}
/**
* @brief mgbe_stop_dma - Stop DMA.
*
* Algorithm: Start Tx and Rx DMA for specific channel.
*
* @param[in] osi_dma: OSI DMA private data structure.
* @param[in] chan: DMA Tx/Rx channel number.
*
* @note 1) MAC needs to be out of reset and proper clocks need to be configured
* 2) DMA HW init need to be completed successfully, see osi_hw_dma_init
*/
static void mgbe_stop_dma(struct osi_dma_priv_data *osi_dma, nveu32_t chan)
{
nveu32_t val;
void *addr = osi_dma->base;
MGBE_CHECK_CHAN_BOUND(chan);
/* stop Tx DMA */
val = osi_readl((nveu8_t *)addr + MGBE_DMA_CHX_TX_CTRL(chan));
val &= ~OSI_BIT(0);
osi_writel(val, (nveu8_t *)addr + MGBE_DMA_CHX_TX_CTRL(chan));
/* stop Rx DMA */
val = osi_readl((nveu8_t *)addr + MGBE_DMA_CHX_RX_CTRL(chan));
val &= ~OSI_BIT(0);
osi_writel(val, (nveu8_t *)addr + MGBE_DMA_CHX_RX_CTRL(chan));
}
/**
* @brief mgbe_configure_dma_channel - Configure DMA channel
*
* Algorithm: This takes care of configuring the below
* parameters for the DMA channel
* 1) Enabling DMA channel interrupts
* 2) Enable 8xPBL mode
* 3) Program Tx, Rx PBL
* 4) Enable TSO if HW supports
* 5) Program Rx Watchdog timer
*
* @param[in] chan: DMA channel number that need to be configured.
* @param[in] osi_dma: OSI DMA private data structure.
*
* @note MAC has to be out of reset.
*/
static void mgbe_configure_dma_channel(nveu32_t chan,
struct osi_dma_priv_data *osi_dma)
{
nveu32_t value;
MGBE_CHECK_CHAN_BOUND(chan);
/* enable DMA channel interrupts */
/* Enable TIE and TBUE */
/* TIE - Transmit Interrupt Enable */
/* TBUE - Transmit Buffer Unavailable Enable */
/* RIE - Receive Interrupt Enable */
/* RBUE - Receive Buffer Unavailable Enable */
/* AIE - Abnormal Interrupt Summary Enable */
/* NIE - Normal Interrupt Summary Enable */
/* FBE - Fatal Bus Error Enable */
value = osi_readl((nveu8_t *)osi_dma->base +
MGBE_DMA_CHX_INTR_ENA(chan));
value |= MGBE_DMA_CHX_INTR_TIE | MGBE_DMA_CHX_INTR_TBUE |
MGBE_DMA_CHX_INTR_RIE | MGBE_DMA_CHX_INTR_RBUE |
MGBE_DMA_CHX_INTR_FBEE | MGBE_DMA_CHX_INTR_AIE |
MGBE_DMA_CHX_INTR_NIE;
/* For multi-irqs to work nie needs to be disabled */
/* TODO: do we need this ? */
value &= ~(MGBE_DMA_CHX_INTR_NIE);
osi_writel(value, (nveu8_t *)osi_dma->base +
MGBE_DMA_CHX_INTR_ENA(chan));
/* Enable 8xPBL mode */
value = osi_readl((nveu8_t *)osi_dma->base +
MGBE_DMA_CHX_CTRL(chan));
value |= MGBE_DMA_CHX_CTRL_PBLX8;
osi_writel(value, (nveu8_t *)osi_dma->base +
MGBE_DMA_CHX_CTRL(chan));
/* Configure DMA channel Transmit control register */
value = osi_readl((nveu8_t *)osi_dma->base +
MGBE_DMA_CHX_TX_CTRL(chan));
/* Enable OSF mode */
value |= MGBE_DMA_CHX_TX_CTRL_OSP;
/* TxPBL = 16 */
value |= MGBE_DMA_CHX_TX_CTRL_TXPBL_RECOMMENDED;
/* enable TSO by default if HW supports */
value |= MGBE_DMA_CHX_TX_CTRL_TSE;
osi_writel(value, (nveu8_t *)osi_dma->base +
MGBE_DMA_CHX_TX_CTRL(chan));
/* Configure DMA channel Receive control register */
/* Select Rx Buffer size. Needs to be rounded up to next multiple of
* bus width
*/
value = osi_readl((nveu8_t *)osi_dma->base +
MGBE_DMA_CHX_RX_CTRL(chan));
value |= (osi_dma->rx_buf_len << MGBE_DMA_CHX_RBSZ_SHIFT);
/* RXPBL = 16 */
value |= MGBE_DMA_CHX_RX_CTRL_RXPBL_RECOMMENDED;
osi_writel(value, (nveu8_t *)osi_dma->base +
MGBE_DMA_CHX_RX_CTRL(chan));
/* Set Receive Interrupt Watchdog Timer Count */
/* conversion of usec to RWIT value
* Eg:System clock is 62.5MHz, each clock cycle would then be 16ns
* For value 0x1 in watchdog timer,device would wait for 256 clk cycles,
* ie, (16ns x 256) => 4.096us (rounding off to 4us)
* So formula with above values is,ret = usec/4
*/
if ((osi_dma->use_riwt == OSI_ENABLE) &&
(osi_dma->rx_riwt < UINT_MAX)) {
value = osi_readl((nveu8_t *)osi_dma->base +
MGBE_DMA_CHX_RX_WDT(chan));
/* Mask the RWT value */
value &= ~MGBE_DMA_CHX_RX_WDT_RWT_MASK;
/* Conversion of usec to Rx Interrupt Watchdog Timer Count */
/* TODO: Need to fix AXI clock for silicon */
value |= ((osi_dma->rx_riwt *
((nveu32_t)13000000 / OSI_ONE_MEGA_HZ)) /
MGBE_DMA_CHX_RX_WDT_RWTU) &
MGBE_DMA_CHX_RX_WDT_RWT_MASK;
osi_writel(value, (nveu8_t *)osi_dma->base +
MGBE_DMA_CHX_RX_WDT(chan));
}
}
/**
* mgbe_dma_chan_to_vmirq_map - Map DMA channels to a specific VM IRQ.
*
* Algorithm: Programs HW to map DMA channels to specific VM.
*
* Dependencies: OSD layer needs to update number of VM channels and
* DMA channel list in osi_vm_irq_data.
*
* @param[in] osi_dma: OSI private data structure.
*
* Return: None.
*/
static void mgbe_dma_chan_to_vmirq_map(struct osi_dma_priv_data *osi_dma)
{
struct osi_vm_irq_data *irq_data;
nveu32_t i, j;
nveu32_t chan;
for (i = 0; i < osi_dma->num_vm_irqs; i++) {
irq_data = &osi_dma->irq_data[i];
for (j = 0; j < irq_data->num_vm_chans; j++) {
chan = irq_data->vm_chans[j];
if (chan >= OSI_MGBE_MAX_NUM_CHANS) {
continue;
}
osi_writel(OSI_BIT(i),
(nveu8_t *)osi_dma->base +
MGBE_VIRT_INTR_APB_CHX_CNTRL(chan));
}
}
osi_writel(0xD, (nveu8_t *)osi_dma->base + 0x8400);
}
/**
* @brief mgbe_init_dma_channel - DMA channel INIT
*
* @param[in] osi_dma: OSI DMA private data structure.
*/
static nve32_t mgbe_init_dma_channel(struct osi_dma_priv_data *osi_dma)
{
nveu32_t chinx;
/* configure MGBE DMA channels */
for (chinx = 0; chinx < osi_dma->num_dma_chans; chinx++) {
mgbe_configure_dma_channel(osi_dma->dma_chans[chinx], osi_dma);
}
mgbe_dma_chan_to_vmirq_map(osi_dma);
return 0;
}
/**
* @brief mgbe_set_rx_buf_len - Set Rx buffer length
* Sets the Rx buffer length based on the new MTU size set.
*
* @param[in] osi_dma: OSI DMA private data structure.
*
* @note 1) MAC needs to be out of reset and proper clocks need to be configured
* 2) DMA HW init need to be completed successfully, see osi_hw_dma_init
* 3) osi_dma->mtu need to be filled with current MTU size <= 9K
*/
static void mgbe_set_rx_buf_len(struct osi_dma_priv_data *osi_dma)
{
nveu32_t rx_buf_len;
if (osi_dma->mtu >= OSI_MTU_SIZE_8K) {
rx_buf_len = OSI_MTU_SIZE_16K;
} else if (osi_dma->mtu >= OSI_MTU_SIZE_4K) {
rx_buf_len = OSI_MTU_SIZE_8K;
} else if (osi_dma->mtu >= OSI_MTU_SIZE_2K) {
rx_buf_len = OSI_MTU_SIZE_4K;
} else if (osi_dma->mtu > MAX_ETH_FRAME_LEN_DEFAULT) {
rx_buf_len = OSI_MTU_SIZE_2K;
} else {
rx_buf_len = MAX_ETH_FRAME_LEN_DEFAULT;
}
/* Buffer alignment */
osi_dma->rx_buf_len = ((rx_buf_len + (MGBE_AXI_BUS_WIDTH - 1U)) &
~(MGBE_AXI_BUS_WIDTH - 1U));
}
/**
* mgbe_get_global_dma_status - Gets DMA status.
*
* Algorithm: Returns global DMA Tx/Rx interrupt status
*
* @param[in] addr: MAC base address.
*
* Dependencies: None.
* Protection: None.
* Return: None.
*/
static nveu32_t mgbe_get_global_dma_status(void *addr)
{
return osi_readl((nveu8_t *)addr + MGBE_GLOBAL_DMA_STATUS);
}
/**
* mgbe_clear_vm_tx_intr - Clear VM Tx interrupt
*
* Algorithm: Clear Tx interrupt source at DMA and wrapper level.
*
* @param[in] addr: MAC base address.
* @param[in] chan: DMA Tx channel number.
*
* Dependencies: None.
* Protection: None.
* Return: None.
*/
static void mgbe_clear_vm_tx_intr(void *addr, nveu32_t chan)
{
MGBE_CHECK_CHAN_BOUND(chan);
osi_writel(MGBE_DMA_CHX_STATUS_CLEAR_TX,
(nveu8_t *)addr + MGBE_DMA_CHX_STATUS(chan));
osi_writel(MGBE_VIRT_INTR_CHX_STATUS_TX,
(nveu8_t *)addr + MGBE_VIRT_INTR_CHX_STATUS(chan));
}
/**
* mgbe_clear_vm_rx_intr - Clear VM Rx interrupt
*
* Algorithm: Clear Rx interrupt source at DMA and wrapper level.
*
* @param[in] addr: MAC base address.
* @param[in] chan: DMA Rx channel number.
*
* Dependencies: None.
* Protection: None.
* Return: None.
*/
static void mgbe_clear_vm_rx_intr(void *addr, nveu32_t chan)
{
MGBE_CHECK_CHAN_BOUND(chan);
osi_writel(MGBE_DMA_CHX_STATUS_CLEAR_RX,
(nveu8_t *)addr + MGBE_DMA_CHX_STATUS(chan));
osi_writel(MGBE_VIRT_INTR_CHX_STATUS_RX,
(nveu8_t *)addr + MGBE_VIRT_INTR_CHX_STATUS(chan));
}
void mgbe_init_dma_chan_ops(struct dma_chan_ops *ops)
{
ops->set_tx_ring_len = mgbe_set_tx_ring_len;
ops->set_rx_ring_len = mgbe_set_rx_ring_len;
ops->set_tx_ring_start_addr = mgbe_set_tx_ring_start_addr;
ops->set_rx_ring_start_addr = mgbe_set_rx_ring_start_addr;
ops->update_tx_tailptr = mgbe_update_tx_tailptr;
ops->update_rx_tailptr = mgbe_update_rx_tailptr;
ops->disable_chan_tx_intr = mgbe_disable_chan_tx_intr;
ops->enable_chan_tx_intr = mgbe_enable_chan_tx_intr;
ops->disable_chan_rx_intr = mgbe_disable_chan_rx_intr;
ops->enable_chan_rx_intr = mgbe_enable_chan_rx_intr;
ops->start_dma = mgbe_start_dma;
ops->stop_dma = mgbe_stop_dma;
ops->init_dma_channel = mgbe_init_dma_channel;
ops->set_rx_buf_len = mgbe_set_rx_buf_len;
ops->validate_regs = OSI_NULL;
ops->get_global_dma_status = mgbe_get_global_dma_status;
ops->clear_vm_tx_intr = mgbe_clear_vm_tx_intr;
ops->clear_vm_rx_intr = mgbe_clear_vm_rx_intr;
};

95
osi/dma/mgbe_dma.h Normal file
View File

@@ -0,0 +1,95 @@
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef INCLUDED_MGBE_DMA_H
#define INCLUDED_MGBE_DMA_H
/**
* @addtogroup MGBE-DMA-CH DMA Channel Register offsets
*
* @brief MGBE DMA Channel register offsets
* @{
*/
#define MGBE_DMA_CHX_TX_CTRL(x) ((0x0080U * (x)) + 0x3104U)
#define MGBE_DMA_CHX_RX_CTRL(x) ((0x0080U * (x)) + 0x3108U)
#define MGBE_DMA_CHX_INTR_ENA(x) ((0x0080U * (x)) + 0x3138U)
#define MGBE_DMA_CHX_CTRL(x) ((0x0080U * (x)) + 0x3100U)
#define MGBE_DMA_CHX_RX_WDT(x) ((0x0080U * (x)) + 0x313CU)
#define MGBE_DMA_CHX_TDRL(x) ((0x0080U * (x)) + 0x3130U)
#define MGBE_DMA_CHX_RDRL(x) ((0x0080U * (x)) + 0x3134U)
#define MGBE_DMA_CHX_TDLH(x) ((0x0080U * (x)) + 0x3110U)
#define MGBE_DMA_CHX_TDLA(x) ((0x0080U * (x)) + 0x3114U)
#define MGBE_DMA_CHX_TDTLP(x) ((0x0080U * (x)) + 0x3124U)
#define MGBE_DMA_CHX_TDTHP(x) ((0x0080U * (x)) + 0x3120U)
#define MGBE_DMA_CHX_RDLH(x) ((0x0080U * (x)) + 0x3118U)
#define MGBE_DMA_CHX_RDLA(x) ((0x0080U * (x)) + 0x311CU)
#define MGBE_DMA_CHX_RDTHP(x) ((0x0080U * (x)) + 0x3128U)
#define MGBE_DMA_CHX_RDTLP(x) ((0x0080U * (x)) + 0x312CU)
/** @} */
/**
* @addtogroup MGBE-INTR INT Channel Register offsets
*
* @brief MGBE Virtural Interrupt Channel register offsets
* @{
*/
#define MGBE_VIRT_INTR_CHX_STATUS(x) (0x8604U + ((x) * 8U))
#define MGBE_VIRT_INTR_CHX_CNTRL(x) (0x8600U + ((x) * 8U))
#define MGBE_VIRT_INTR_APB_CHX_CNTRL(x) (0x8200U + ((x) * 4U))
/** @} */
/**
* @addtogroup MGBE BIT fields for MGBE Rgisters
*
* @brief Values defined for the MGBE registers
* @{
*/
#define MGBE_DMA_CHX_TX_CTRL_OSP OSI_BIT(4)
#define MGBE_DMA_CHX_TX_CTRL_TSE OSI_BIT(12)
#define MGBE_DMA_CHX_RX_WDT_RWT_MASK 0xFFU
#define MGBE_DMA_CHX_RX_WDT_RWTU 256U
#define MGBE_DMA_CHX_RBSZ_SHIFT 1U
#define MGBE_DMA_CHX_TX_CTRL_TXPBL_RECOMMENDED 0x100000U
#define MGBE_DMA_CHX_RX_CTRL_RXPBL_RECOMMENDED 0x100000U
#define MGBE_AXI_BUS_WIDTH 0x10U
#define MGBE_GLOBAL_DMA_STATUS 0x8700U
#define MGBE_DMA_CHX_CTRL_PBLX8 OSI_BIT(16)
#define MGBE_DMA_CHX_INTR_TIE OSI_BIT(0)
#define MGBE_DMA_CHX_INTR_TBUE OSI_BIT(2)
#define MGBE_DMA_CHX_INTR_RIE OSI_BIT(6)
#define MGBE_DMA_CHX_INTR_RBUE OSI_BIT(7)
#define MGBE_DMA_CHX_INTR_FBEE OSI_BIT(12)
#define MGBE_DMA_CHX_INTR_AIE OSI_BIT(14)
#define MGBE_DMA_CHX_INTR_NIE OSI_BIT(15)
#define MGBE_DMA_CHX_STATUS_TI OSI_BIT(0)
#define MGBE_DMA_CHX_STATUS_RI OSI_BIT(6)
#define MGBE_DMA_CHX_STATUS_NIS OSI_BIT(15)
#define MGBE_DMA_CHX_STATUS_CLEAR_TX (MGBE_DMA_CHX_STATUS_TI | \
MGBE_DMA_CHX_STATUS_NIS)
#define MGBE_DMA_CHX_STATUS_CLEAR_RX (MGBE_DMA_CHX_STATUS_RI | \
MGBE_DMA_CHX_STATUS_NIS)
#define MGBE_VIRT_INTR_CHX_STATUS_TX OSI_BIT(0)
#define MGBE_VIRT_INTR_CHX_STATUS_RX OSI_BIT(1)
#define MGBE_VIRT_INTR_CHX_CNTRL_TX OSI_BIT(0)
#define MGBE_VIRT_INTR_CHX_CNTRL_RX OSI_BIT(1)
/** @} */
#endif /* INCLUDED_MGBE_DMA_H */

View File

@@ -79,7 +79,8 @@ static inline nve32_t validate_args(struct osi_dma_priv_data *osi_dma)
static inline nve32_t validate_dma_chan_num(struct osi_dma_priv_data *osi_dma, static inline nve32_t validate_dma_chan_num(struct osi_dma_priv_data *osi_dma,
nveu32_t chan) nveu32_t chan)
{ {
if (chan >= OSI_EQOS_MAX_NUM_CHANS) { /* TODO: Get the max channel number based on mac/mac_ver */
if (chan >= OSI_MGBE_MAX_NUM_CHANS) {
OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
"Invalid DMA channel number\n", 0ULL); "Invalid DMA channel number\n", 0ULL);
return -1; return -1;
@@ -157,6 +158,18 @@ static nve32_t validate_func_ptrs(struct osi_dma_priv_data *osi_dma)
nve32_t osi_init_dma_ops(struct osi_dma_priv_data *osi_dma) nve32_t osi_init_dma_ops(struct osi_dma_priv_data *osi_dma)
{ {
typedef void (*init_ops_arr)(struct dma_chan_ops *temp);
typedef void *(*safety_init)(void);
init_ops_arr i_ops[2] = {
eqos_init_dma_chan_ops, mgbe_init_dma_chan_ops
};
safety_init s_init[2] = {
eqos_get_dma_safety_config, OSI_NULL
};
if (osi_dma == OSI_NULL) { if (osi_dma == OSI_NULL) {
return -1; return -1;
} }
@@ -168,20 +181,17 @@ nve32_t osi_init_dma_ops(struct osi_dma_priv_data *osi_dma)
return -1; return -1;
} }
if (osi_dma->mac != OSI_MAC_HW_EQOS) { if (osi_dma->mac > OSI_MAC_HW_MGBE) {
OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
"DMA: Invalid MAC HW type\n", 0ULL); "DMA: Invalid MAC HW type\n", 0ULL);
return -1; return -1;
} }
/* Get EQOS HW DMA operations */ i_ops[osi_dma->mac](ops_p);
eqos_init_dma_chan_ops(ops_p);
/* Explicitly set osi_dma->safety_config = OSI_NULL if if (s_init[osi_dma->mac] != OSI_NULL) {
* a particular MAC version does not need SW safety mechanisms osi_dma->safety_config = s_init[osi_dma->mac]();
* like periodic read-verify. }
*/
osi_dma->safety_config = (void *)eqos_get_dma_safety_config();
if (validate_func_ptrs(osi_dma) < 0) { if (validate_func_ptrs(osi_dma) < 0) {
OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
@@ -203,7 +213,7 @@ nve32_t osi_hw_dma_init(struct osi_dma_priv_data *osi_dma)
return -1; return -1;
} }
if (osi_dma->num_dma_chans > OSI_EQOS_MAX_NUM_CHANS) { if (osi_dma->num_dma_chans > OSI_MGBE_MAX_NUM_CHANS) {
OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
"Invalid number of DMA channels\n", 0ULL); "Invalid number of DMA channels\n", 0ULL);
return -1; return -1;
@@ -260,7 +270,7 @@ nve32_t osi_hw_dma_deinit(struct osi_dma_priv_data *osi_dma)
return -1; return -1;
} }
if (osi_dma->num_dma_chans > OSI_EQOS_MAX_NUM_CHANS) { if (osi_dma->num_dma_chans > OSI_MGBE_MAX_NUM_CHANS) {
OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
"Invalid number of DMA channels\n", 0ULL); "Invalid number of DMA channels\n", 0ULL);
return -1; return -1;
@@ -493,6 +503,8 @@ static inline nve32_t rx_dma_desc_validate_args(
} }
if (validate_dma_chan_num(osi_dma, chan) < 0) { if (validate_dma_chan_num(osi_dma, chan) < 0) {
OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
"dma: Invalid channel\n", 0ULL);
return -1; return -1;
} }
@@ -584,7 +596,11 @@ nve32_t osi_rx_dma_desc_init(struct osi_dma_priv_data *osi_dma,
} }
rx_desc->rdes2 = 0; rx_desc->rdes2 = 0;
rx_desc->rdes3 = (RDES3_IOC | RDES3_B1V); rx_desc->rdes3 = RDES3_IOC;
if (osi_dma->mac == OSI_MAC_HW_EQOS) {
rx_desc->rdes3 |= RDES3_B1V;
}
/* Reset IOC bit if RWIT is enabled */ /* Reset IOC bit if RWIT is enabled */
rx_dma_handle_ioc(osi_dma, rx_ring, rx_desc); rx_dma_handle_ioc(osi_dma, rx_ring, rx_desc);
@@ -721,7 +737,7 @@ nve32_t osi_config_slot_function(struct osi_dma_priv_data *osi_dma,
chan = osi_dma->dma_chans[i]; chan = osi_dma->dma_chans[i];
if ((chan == 0x0U) || if ((chan == 0x0U) ||
(chan >= OSI_EQOS_MAX_NUM_CHANS)) { (chan >= OSI_MGBE_MAX_NUM_CHANS)) {
/* Ignore 0 and invalid channels */ /* Ignore 0 and invalid channels */
continue; continue;
} }

72
osi/dma/osi_dma_local.h Normal file
View File

@@ -0,0 +1,72 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef INCLUDED_OSI_DMA_LOCAL_H
#define INCLUDED_OSI_DMA_LOCAL_H
#include <osi_dma.h>
#include "eqos_dma.h"
#include "mgbe_dma.h"
/* Function prototype needed for misra */
/**
* @brief dma_desc_init - Initialize DMA Tx/Rx descriptors
*
* @note
* Algorithm:
* - Transmit and Receive descriptors will be initialized with
* required values so that MAC DMA can understand and act accordingly.
*
* @param[in, out] osi_dma: OSI DMA private data structure.
*
* @note
* API Group:
* - Initialization: Yes
* - Run time: No
* - De-initialization: No
*
* @retval 0 on success
* @retval -1 on failure.
*/
nve32_t dma_desc_init(struct osi_dma_priv_data *osi_dma);
/**
* @addtogroup Helper Helper MACROS
*
* @brief EQOS generic helper MACROS.
* @{
*/
#define CHECK_CHAN_BOUND(chan) \
{ \
if ((chan) >= OSI_EQOS_MAX_NUM_CHANS) { \
return; \
} \
} \
#define BOOLEAN_FALSE (0U != 0U)
#define L32(data) ((data) & 0xFFFFFFFFU)
#define H32(data) (((data) & 0xFFFFFFFF00000000UL) >> 32UL)
/** @} */
#endif /* INCLUDED_OSI_DMA_LOCAL_H */

View File

@@ -428,7 +428,9 @@ nve32_t osi_process_rx_completions(struct osi_dma_priv_data *osi_dma,
rx_pkt_cx->flags |= OSI_PKT_CX_VALID; rx_pkt_cx->flags |= OSI_PKT_CX_VALID;
if ((rx_desc->rdes3 & RDES3_LD) == RDES3_LD) { if ((rx_desc->rdes3 & RDES3_LD) == RDES3_LD) {
if ((rx_desc->rdes3 & RDES3_ES_BITS) != 0U) { if ((rx_desc->rdes3 &
(((osi_dma->mac == OSI_MAC_HW_MGBE) ?
RDES3_ES_MGBE : RDES3_ES_BITS))) != 0U) {
/* reset validity if any of the error bits /* reset validity if any of the error bits
* are set * are set
*/ */
@@ -771,7 +773,8 @@ nve32_t osi_process_tx_completions(struct osi_dma_priv_data *osi_dma,
} }
} }
if (((tx_desc->tdes3 & TDES3_LD) == TDES3_LD) && if ((osi_dma->mac != OSI_MAC_HW_MGBE) &&
((tx_desc->tdes3 & TDES3_LD) == TDES3_LD) &&
((tx_desc->tdes3 & TDES3_CTXT) != TDES3_CTXT)) { ((tx_desc->tdes3 & TDES3_CTXT) != TDES3_CTXT)) {
/* check tx tstamp status */ /* check tx tstamp status */
if ((tx_desc->tdes3 & TDES3_TTSS) == TDES3_TTSS) { if ((tx_desc->tdes3 & TDES3_TTSS) == TDES3_TTSS) {
@@ -1232,7 +1235,12 @@ static nve32_t rx_dma_desc_initialization(struct osi_dma_priv_data *osi_dma,
} }
rx_desc->rdes2 = 0; rx_desc->rdes2 = 0;
rx_desc->rdes3 = (RDES3_IOC | RDES3_B1V); rx_desc->rdes3 = RDES3_IOC;
if (osi_dma->mac == OSI_MAC_HW_EQOS) {
rx_desc->rdes3 |= RDES3_B1V;
}
/* reconfigure INTE bit if RX watchdog timer is enabled */ /* reconfigure INTE bit if RX watchdog timer is enabled */
if (osi_dma->use_riwt == OSI_ENABLE) { if (osi_dma->use_riwt == OSI_ENABLE) {
rx_desc->rdes3 &= ~RDES3_IOC; rx_desc->rdes3 &= ~RDES3_IOC;