osi: dma: interrupt enable/disable retry

Adds retry for interrupt enable/disable and
combine interrupt handling part for EQOS/MGBE

Bug 3503523

Change-Id: Icc8b10cd786c878972e2e508ede3edb8d52addf8
Signed-off-by: Bhadram Varka <vbhadram@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/kernel/nvethernetrm/+/2652907
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Bhadram Varka
2022-01-17 08:37:42 +05:30
parent 1d10e8c3ff
commit 759a471d35
8 changed files with 132 additions and 504 deletions

View File

@@ -777,44 +777,6 @@ nve32_t osi_enable_chan_rx_intr(struct osi_dma_priv_data *osi_dma,
*/
nveu32_t osi_get_global_dma_status(struct osi_dma_priv_data *osi_dma);
/**
* @brief osi_clear_vm_tx_intr - Handles VM Tx interrupt source.
*
* Algorithm: Clear Tx interrupt source at wrapper level and DMA level.
*
* @param[in] osi_dma: DMA private data.
* @param[in] chan: DMA tx channel number.
*
* @note
* 1) MAC needs to be out of reset and proper clocks need to be configured.
* 2) DMA HW init need to be completed successfully, see osi_hw_dma_init
*
* @retval 0 on success
* @retval -1 on failure.
*/
nve32_t osi_clear_vm_tx_intr(struct osi_dma_priv_data *osi_dma,
nveu32_t chan);
/**
* @brief osi_clear_vm_rx_intr - Handles VM Rx interrupt source.
*
* Algorithm: Clear Rx interrupt source at wrapper level and DMA level.
*
* @param[in] osi_dma: DMA private data.
* @param[in] chan: DMA rx channel number.
*
* @note
* 1) MAC needs to be out of reset and proper clocks need to be configured.
* 2) DMA HW init need to be completed successfully, see osi_hw_dma_init
* 3) Mapping of physical IRQ line to DMA channel need to be maintained at
* OS Dependent layer and pass corresponding channel number.
*
* @retval 0 on success
* @retval -1 on failure.
*/
nve32_t osi_clear_vm_rx_intr(struct osi_dma_priv_data *osi_dma,
nveu32_t chan);
/**
* @brief Start DMA
*

View File

@@ -66,14 +66,6 @@ struct dma_chan_ops {
/** Called to update Rx ring tail pointer */
void (*update_rx_tailptr)(void *addr, nveu32_t chan,
nveu64_t tailptr);
/** Called to disable DMA Tx channel interrupts at wrapper level */
void (*disable_chan_tx_intr)(void *addr, nveu32_t chan);
/** Called to enable DMA Tx channel interrupts at wrapper level */
void (*enable_chan_tx_intr)(void *addr, nveu32_t chan);
/** Called to disable DMA Rx channel interrupts at wrapper level */
void (*disable_chan_rx_intr)(void *addr, nveu32_t chan);
/** Called to enable DMA Rx channel interrupts at wrapper level */
void (*enable_chan_rx_intr)(void *addr, nveu32_t chan);
/** Called to start the Tx/Rx DMA */
void (*start_dma)(struct osi_dma_priv_data *osi_dma, nveu32_t chan);
/** Called to stop the Tx/Rx DMA */
@@ -92,10 +84,6 @@ struct dma_chan_ops {
nveu32_t set,
nveu32_t interval);
#endif /* !OSI_STRIPPED_LIB */
/** Called to clear VM Tx interrupt */
void (*clear_vm_tx_intr)(void *addr, nveu32_t chan);
/** Called to clear VM Rx interrupt */
void (*clear_vm_rx_intr)(void *addr, nveu32_t chan);
};
/**
@@ -139,8 +127,6 @@ struct dma_local {
nveu32_t init_done;
/** Holds the MAC version of MAC controller */
nveu32_t mac_ver;
/** Represents whether DMA interrupts are VM or Non-VM */
nveu32_t vm_intr;
/** Magic number to validate osi_dma pointer */
nveu64_t magic_num;
/** Maximum number of DMA channels */

View File

@@ -145,159 +145,6 @@ static void eqos_dma_safety_init(struct osi_dma_priv_data *osi_dma)
osi_lock_init(&config->dma_safety_lock);
}
/**
* @brief eqos_disable_chan_tx_intr - Disables DMA Tx channel interrupts.
*
* @param[in] addr: Base address indicating the start of
* memory mapped IO region of the MAC.
* @param[in] chan: DMA Tx channel number.
*
* @pre
* - MAC needs to be out of reset and proper clocks need to be configured
* - DMA HW init need to be completed successfully, see osi_hw_dma_init
* - Mapping of physical IRQ line to DMA channel need to be maintained at
* OSDependent layer and pass corresponding channel number.
*
* @note
* API Group:
* - Initialization: No
* - Run time: Yes
* - De-initialization: Yes
*/
static void eqos_disable_chan_tx_intr(void *addr, nveu32_t chan)
{
nveu32_t cntrl, status;
#if 0
CHECK_CHAN_BOUND(chan);
#endif
/* Clear irq before disabling */
status = osi_readl((nveu8_t *)addr +
EQOS_VIRT_INTR_CHX_STATUS(chan));
if ((status & EQOS_VIRT_INTR_CHX_STATUS_TX) ==
EQOS_VIRT_INTR_CHX_STATUS_TX) {
osi_writel(EQOS_DMA_CHX_STATUS_CLEAR_TX,
(nveu8_t *)addr + EQOS_DMA_CHX_STATUS(chan));
osi_writel(EQOS_VIRT_INTR_CHX_STATUS_TX,
(nveu8_t *)addr +
EQOS_VIRT_INTR_CHX_STATUS(chan));
}
/* Disable the irq */
cntrl = osi_readl((nveu8_t *)addr +
EQOS_VIRT_INTR_CHX_CNTRL(chan));
cntrl &= ~EQOS_VIRT_INTR_CHX_CNTRL_TX;
osi_writel(cntrl, (nveu8_t *)addr +
EQOS_VIRT_INTR_CHX_CNTRL(chan));
}
/**
* @brief eqos_enable_chan_tx_intr - Enable Tx channel interrupts.
*
* @param[in] addr: Base address indicating the start of
* memory mapped IO region of the MAC.
* @param[in] chan: DMA Tx channel number.
*
* @pre
* - MAC needs to be out of reset and proper clocks need to be configured
* - DMA HW init need to be completed successfully, see osi_hw_dma_init
* - Mapping of physical IRQ line to DMA channel need to be maintained at
* OSDependent layer and pass corresponding channel number.
*
* @note
* API Group:
* - Initialization: Yes
* - Run time: Yes
* - De-initialization: No
*/
static void eqos_enable_chan_tx_intr(void *addr, nveu32_t chan)
{
nveu32_t cntrl;
#if 0
CHECK_CHAN_BOUND(chan);
#endif
cntrl = osi_readl((nveu8_t *)addr +
EQOS_VIRT_INTR_CHX_CNTRL(chan));
cntrl |= EQOS_VIRT_INTR_CHX_CNTRL_TX;
osi_writel(cntrl, (nveu8_t *)addr +
EQOS_VIRT_INTR_CHX_CNTRL(chan));
}
/**
* @brief eqos_disable_chan_rx_intr - Disable Rx channel interrupts.
*
* @param[in] addr: Base address indicating the start of
* memory mapped IO region of the MAC.
* @param[in] chan: DMA Rx channel number.
*
* @pre
* - MAC needs to be out of reset and proper clocks need to be configured
* - DMA HW init need to be completed successfully, see osi_hw_dma_init
* - Mapping of physical IRQ line to DMA channel need to be maintained at
* OSDependent layer and pass corresponding channel number.
*
* @note
* API Group:
* - Initialization: No
* - Run time: Yes
* - De-initialization: Yes
*/
static void eqos_disable_chan_rx_intr(void *addr, nveu32_t chan)
{
nveu32_t cntrl, status;
#if 0
CHECK_CHAN_BOUND(chan);
#endif
/* Clear irq before disabling */
status = osi_readl((nveu8_t *)addr +
EQOS_VIRT_INTR_CHX_STATUS(chan));
if ((status & EQOS_VIRT_INTR_CHX_STATUS_RX) ==
EQOS_VIRT_INTR_CHX_STATUS_RX) {
osi_writel(EQOS_DMA_CHX_STATUS_CLEAR_RX,
(nveu8_t *)addr + EQOS_DMA_CHX_STATUS(chan));
osi_writel(EQOS_VIRT_INTR_CHX_STATUS_RX,
(nveu8_t *)addr +
EQOS_VIRT_INTR_CHX_STATUS(chan));
}
/* Disable irq */
cntrl = osi_readl((nveu8_t *)addr +
EQOS_VIRT_INTR_CHX_CNTRL(chan));
cntrl &= ~EQOS_VIRT_INTR_CHX_CNTRL_RX;
osi_writel(cntrl, (nveu8_t *)addr +
EQOS_VIRT_INTR_CHX_CNTRL(chan));
}
/**
* @brief eqos_enable_chan_rx_intr - Enable Rx channel interrupts.
*
* @param[in] addr: Base address indicating the start of
* memory mapped IO region of the MAC.
* @param[in] chan: DMA Rx channel number.
*
* @pre
* - MAC needs to be out of reset and proper clocks need to be configured
* - DMA HW init need to be completed successfully, see osi_hw_dma_init
*
* @note
* API Group:
* - Initialization: Yes
* - Run time: Yes
* - De-initialization: No
*/
static void eqos_enable_chan_rx_intr(void *addr, nveu32_t chan)
{
nveu32_t cntrl;
#if 0
CHECK_CHAN_BOUND(chan);
#endif
cntrl = osi_readl((nveu8_t *)addr +
EQOS_VIRT_INTR_CHX_CNTRL(chan));
cntrl |= EQOS_VIRT_INTR_CHX_CNTRL_RX;
osi_writel(cntrl, (nveu8_t *)addr +
EQOS_VIRT_INTR_CHX_CNTRL(chan));
}
/**
* @brief eqos_set_tx_ring_len - Set DMA Tx ring length.
*
@@ -897,59 +744,6 @@ static void eqos_config_slot(struct osi_dma_priv_data *osi_dma,
}
#endif /* !OSI_STRIPPED_LIB */
/**
* @brief eqos_clear_vm_tx_intr - Handle VM Tx interrupt
*
* @param[in] addr: MAC base address.
* @param[in] chan: DMA Tx channel number.
*
* Algorithm: Clear Tx interrupt source at DMA and wrapper level.
*
* @note
* Dependencies: None.
* Protection: None.
* @retval None.
*/
static void eqos_clear_vm_tx_intr(void *addr, nveu32_t chan)
{
#if 0
CHECK_CHAN_BOUND(chan);
#endif
osi_writel(EQOS_DMA_CHX_STATUS_CLEAR_TX,
(nveu8_t *)addr + EQOS_DMA_CHX_STATUS(chan));
osi_writel(EQOS_VIRT_INTR_CHX_STATUS_TX,
(nveu8_t *)addr + EQOS_VIRT_INTR_CHX_STATUS(chan));
eqos_disable_chan_tx_intr(addr, chan);
}
/**
* @brief eqos_clear_vm_rx_intr - Handle VM Rx interrupt
*
* @param[in] addr: MAC base address.
* @param[in] chan: DMA Rx channel number.
*
* Algorithm: Clear Rx interrupt source at DMA and wrapper level.
*
* @note
* Dependencies: None.
* Protection: None.
*
* @retval None.
*/
static void eqos_clear_vm_rx_intr(void *addr, nveu32_t chan)
{
#if 0
CHECK_CHAN_BOUND(chan);
#endif
osi_writel(EQOS_DMA_CHX_STATUS_CLEAR_RX,
(nveu8_t *)addr + EQOS_DMA_CHX_STATUS(chan));
osi_writel(EQOS_VIRT_INTR_CHX_STATUS_RX,
(nveu8_t *)addr + EQOS_VIRT_INTR_CHX_STATUS(chan));
eqos_disable_chan_rx_intr(addr, chan);
}
/**
* @brief eqos_get_dma_safety_config - EQOS get DMA safety configuration
*/
@@ -971,10 +765,6 @@ void eqos_init_dma_chan_ops(struct dma_chan_ops *ops)
ops->set_rx_ring_start_addr = eqos_set_rx_ring_start_addr;
ops->update_tx_tailptr = eqos_update_tx_tailptr;
ops->update_rx_tailptr = eqos_update_rx_tailptr;
ops->disable_chan_tx_intr = eqos_disable_chan_tx_intr;
ops->enable_chan_tx_intr = eqos_enable_chan_tx_intr;
ops->disable_chan_rx_intr = eqos_disable_chan_rx_intr;
ops->enable_chan_rx_intr = eqos_enable_chan_rx_intr;
ops->start_dma = eqos_start_dma;
ops->stop_dma = eqos_stop_dma;
ops->init_dma_channel = eqos_init_dma_channel;
@@ -983,6 +773,4 @@ void eqos_init_dma_chan_ops(struct dma_chan_ops *ops)
ops->validate_regs = eqos_validate_dma_regs;
ops->config_slot = eqos_config_slot;
#endif /* !OSI_STRIPPED_LIB */
ops->clear_vm_tx_intr = eqos_clear_vm_tx_intr;
ops->clear_vm_rx_intr = eqos_clear_vm_rx_intr;
}

View File

@@ -55,9 +55,6 @@
#define EQOS_DMA_CHX_TDLH(x) ((0x0080U * (x)) + 0x1110U)
#define EQOS_DMA_CHX_TDLA(x) ((0x0080U * (x)) + 0x1114U)
#define EQOS_DMA_CHX_TDRL(x) ((0x0080U * (x)) + 0x112CU)
#define EQOS_VIRT_INTR_APB_CHX_CNTRL(x) (0x8200U + ((x) * 4U))
#define EQOS_VIRT_INTR_CHX_STATUS(x) (0x8604U + ((x) * 8U))
#define EQOS_VIRT_INTR_CHX_CNTRL(x) (0x8600U + ((x) * 8U))
/** @} */
/**
@@ -66,8 +63,6 @@
* @brief Values defined for the DMA channel registers
* @{
*/
#define EQOS_VIRT_INTR_CHX_STATUS_TX OSI_BIT(0)
#define EQOS_VIRT_INTR_CHX_STATUS_RX OSI_BIT(1)
#define EQOS_DMA_CHX_STATUS_TI OSI_BIT(0)
#define EQOS_DMA_CHX_STATUS_RI OSI_BIT(6)
#define EQOS_DMA_CHX_STATUS_NIS OSI_BIT(15)
@@ -76,9 +71,6 @@
#define EQOS_DMA_CHX_STATUS_CLEAR_RX \
(EQOS_DMA_CHX_STATUS_RI | EQOS_DMA_CHX_STATUS_NIS)
#define EQOS_VIRT_INTR_CHX_CNTRL_TX OSI_BIT(0)
#define EQOS_VIRT_INTR_CHX_CNTRL_RX OSI_BIT(1)
#define EQOS_DMA_CHX_INTR_TIE OSI_BIT(0)
#define EQOS_DMA_CHX_INTR_TBUE OSI_BIT(2)
#define EQOS_DMA_CHX_INTR_RIE OSI_BIT(6)

View File

@@ -30,7 +30,8 @@
* @{
*/
#define HW_GLOBAL_DMA_STATUS 0x8700U
#define VIRT_INTR_CHX_CNTRL(x) (0x8600U + ((x) * 8U))
#define VIRT_INTR_CHX_STATUS(x) (0x8604U + ((x) * 8U))
/** @} */
#endif /* INCLUDED_HW_COMMON_H */

View File

@@ -25,104 +25,6 @@
#include "mgbe_dma.h"
#include "dma_local.h"
/**
* @brief mgbe_disable_chan_tx_intr - Disables DMA Tx channel interrupts.
*
* @param[in] addr: Base address indicating the start of
* memory mapped IO region of the MAC.
* @param[in] chan: DMA Tx channel number.
*
* @note 1) MAC needs to be out of reset and proper clocks need to be configured
* 2) DMA HW init need to be completed successfully, see osi_hw_dma_init
* 3) Mapping of physical IRQ line to DMA channel need to be maintained at
* OSDependent layer and pass corresponding channel number.
*/
static void mgbe_disable_chan_tx_intr(void *addr, nveu32_t chan)
{
nveu32_t cntrl;
#if 0
MGBE_CHECK_CHAN_BOUND(chan);
#endif
cntrl = osi_readl((nveu8_t *)addr +
MGBE_VIRT_INTR_CHX_CNTRL(chan));
cntrl &= ~MGBE_VIRT_INTR_CHX_CNTRL_TX;
osi_writel(cntrl, (nveu8_t *)addr +
MGBE_VIRT_INTR_CHX_CNTRL(chan));
}
/**
* @brief mgbe_enable_chan_tx_intr - Enable Tx channel interrupts.
*
* @param[in] addr: Base address indicating the start of
* memory mapped IO region of the MAC.
* @param[in] chan: DMA Tx channel number.
*
* @note 1) MAC needs to be out of reset and proper clocks need to be configured
* 2) DMA HW init need to be completed successfully, see osi_hw_dma_init
* 3) Mapping of physical IRQ line to DMA channel need to be maintained at
* OSDependent layer and pass corresponding channel number.
*/
static void mgbe_enable_chan_tx_intr(void *addr, nveu32_t chan)
{
nveu32_t cntrl;
#if 0
MGBE_CHECK_CHAN_BOUND(chan);
#endif
cntrl = osi_readl((nveu8_t *)addr +
MGBE_VIRT_INTR_CHX_CNTRL(chan));
cntrl |= MGBE_VIRT_INTR_CHX_CNTRL_TX;
osi_writel(cntrl, (nveu8_t *)addr +
MGBE_VIRT_INTR_CHX_CNTRL(chan));
}
/**
* @brief mgbe_disable_chan_rx_intr - Disable Rx channel interrupts.
*
* @param[in] addr: Base address indicating the start of
* memory mapped IO region of the MAC.
* @param[in] chan: DMA Rx channel number.
*
* @note 1) MAC needs to be out of reset and proper clocks need to be configured
* 2) DMA HW init need to be completed successfully, see osi_hw_dma_init
* 3) Mapping of physical IRQ line to DMA channel need to be maintained at
* OSDependent layer and pass corresponding channel number.
*/
static void mgbe_disable_chan_rx_intr(void *addr, nveu32_t chan)
{
nveu32_t cntrl;
#if 0
MGBE_CHECK_CHAN_BOUND(chan);
#endif
cntrl = osi_readl((nveu8_t *)addr +
MGBE_VIRT_INTR_CHX_CNTRL(chan));
cntrl &= ~MGBE_VIRT_INTR_CHX_CNTRL_RX;
osi_writel(cntrl, (nveu8_t *)addr +
MGBE_VIRT_INTR_CHX_CNTRL(chan));
}
/**
* @brief mgbe_enable_chan_rx_intr - Enable Rx channel interrupts.
*
* @param[in] addr: Base address indicating the start of
* memory mapped IO region of the MAC.
* @param[in] chan: DMA Rx channel number.
*
* @note 1) MAC needs to be out of reset and proper clocks need to be configured
* 2) DMA HW init need to be completed successfully, see osi_hw_dma_init
*/
static void mgbe_enable_chan_rx_intr(void *addr, nveu32_t chan)
{
nveu32_t cntrl;
#if 0
MGBE_CHECK_CHAN_BOUND(chan);
#endif
cntrl = osi_readl((nveu8_t *)addr +
MGBE_VIRT_INTR_CHX_CNTRL(chan));
cntrl |= MGBE_VIRT_INTR_CHX_CNTRL_RX;
osi_writel(cntrl, (nveu8_t *)addr +
MGBE_VIRT_INTR_CHX_CNTRL(chan));
}
/**
* @brief mgbe_set_tx_ring_len - Set DMA Tx ring length.
*
@@ -636,48 +538,6 @@ static nve32_t mgbe_validate_dma_regs(OSI_UNUSED
return 0;
}
/**
* @brief mgbe_clear_vm_tx_intr - Clear VM Tx interrupt
*
* Algorithm: Clear Tx interrupt source at DMA and wrapper level.
*
* @param[in] addr: MAC base address.
* @param[in] chan: DMA Tx channel number.
*/
static void mgbe_clear_vm_tx_intr(void *addr, nveu32_t chan)
{
#if 0
MGBE_CHECK_CHAN_BOUND(chan);
#endif
osi_writel(MGBE_DMA_CHX_STATUS_CLEAR_TX,
(nveu8_t *)addr + MGBE_DMA_CHX_STATUS(chan));
osi_writel(MGBE_VIRT_INTR_CHX_STATUS_TX,
(nveu8_t *)addr + MGBE_VIRT_INTR_CHX_STATUS(chan));
mgbe_disable_chan_tx_intr(addr, chan);
}
/**
* @brief mgbe_clear_vm_rx_intr - Clear VM Rx interrupt
*
* @param[in] addr: MAC base address.
* @param[in] chan: DMA Tx channel number.
*
* Algorithm: Clear Rx interrupt source at DMA and wrapper level.
*/
static void mgbe_clear_vm_rx_intr(void *addr, nveu32_t chan)
{
#if 0
MGBE_CHECK_CHAN_BOUND(chan);
#endif
osi_writel(MGBE_DMA_CHX_STATUS_CLEAR_RX,
(nveu8_t *)addr + MGBE_DMA_CHX_STATUS(chan));
osi_writel(MGBE_VIRT_INTR_CHX_STATUS_RX,
(nveu8_t *)addr + MGBE_VIRT_INTR_CHX_STATUS(chan));
mgbe_disable_chan_rx_intr(addr, chan);
}
/**
* @brief mgbe_config_slot - Configure slot Checking for DMA channel
*
@@ -728,16 +588,10 @@ void mgbe_init_dma_chan_ops(struct dma_chan_ops *ops)
ops->set_rx_ring_start_addr = mgbe_set_rx_ring_start_addr;
ops->update_tx_tailptr = mgbe_update_tx_tailptr;
ops->update_rx_tailptr = mgbe_update_rx_tailptr;
ops->disable_chan_tx_intr = mgbe_disable_chan_tx_intr;
ops->enable_chan_tx_intr = mgbe_enable_chan_tx_intr;
ops->disable_chan_rx_intr = mgbe_disable_chan_rx_intr;
ops->enable_chan_rx_intr = mgbe_enable_chan_rx_intr;
ops->start_dma = mgbe_start_dma;
ops->stop_dma = mgbe_stop_dma;
ops->init_dma_channel = mgbe_init_dma_channel;
ops->set_rx_buf_len = mgbe_set_rx_buf_len;
ops->validate_regs = mgbe_validate_dma_regs;
ops->clear_vm_tx_intr = mgbe_clear_vm_tx_intr;
ops->clear_vm_rx_intr = mgbe_clear_vm_rx_intr;
ops->config_slot = mgbe_config_slot;
};

View File

@@ -67,15 +67,6 @@
#define MGBE_DMA_CHX_RDTLP(x) ((0x0080U * (x)) + 0x312CU)
/** @} */
/**
* @addtogroup MGBE_INTR INT Channel Register offsets
*
* @brief MGBE Virtural Interrupt Channel register offsets
* @{
*/
#define MGBE_VIRT_INTR_CHX_STATUS(x) (0x8604U + ((x) * 8U))
#define MGBE_VIRT_INTR_CHX_CNTRL(x) (0x8600U + ((x) * 8U))
#define MGBE_VIRT_INTR_APB_CHX_CNTRL(x) (0x8200U + ((x) * 4U))
/** @} */
/**
@@ -102,18 +93,7 @@
#define MGBE_DMA_CHX_INTR_FBEE OSI_BIT(12)
#define MGBE_DMA_CHX_INTR_AIE OSI_BIT(14)
#define MGBE_DMA_CHX_INTR_NIE OSI_BIT(15)
#define MGBE_DMA_CHX_STATUS_TI OSI_BIT(0)
#define MGBE_DMA_CHX_STATUS_RI OSI_BIT(6)
#define MGBE_DMA_CHX_STATUS_NIS OSI_BIT(15)
#define MGBE_DMA_CHX_SLOT_ESC OSI_BIT(0)
#define MGBE_DMA_CHX_STATUS_CLEAR_TX (MGBE_DMA_CHX_STATUS_TI | \
MGBE_DMA_CHX_STATUS_NIS)
#define MGBE_DMA_CHX_STATUS_CLEAR_RX (MGBE_DMA_CHX_STATUS_RI | \
MGBE_DMA_CHX_STATUS_NIS)
#define MGBE_VIRT_INTR_CHX_STATUS_TX OSI_BIT(0)
#define MGBE_VIRT_INTR_CHX_STATUS_RX OSI_BIT(1)
#define MGBE_VIRT_INTR_CHX_CNTRL_TX OSI_BIT(0)
#define MGBE_VIRT_INTR_CHX_CNTRL_RX OSI_BIT(1)
#define MGBE_DMA_CHX_TX_CNTRL2_ORRQ_RECOMMENDED 64U
#define MGBE_DMA_CHX_TX_CNTRL2_ORRQ_SHIFT 24U
#define MGBE_DMA_CHX_RX_CNTRL2_OWRQ_SCHAN 32U

View File

@@ -39,6 +39,86 @@ static struct dma_local g_dma[MAX_DMA_INSTANCES];
*/
static struct dma_chan_ops g_ops[MAX_MAC_IP_TYPES];
typedef nve32_t (*dma_intr_fn)(struct osi_dma_priv_data const *osi_dma,
nveu32_t intr_ctrl, nveu32_t intr_status,
nveu32_t dma_status, nveu32_t val);
static inline nve32_t enable_intr(struct osi_dma_priv_data const *osi_dma,
nveu32_t intr_ctrl, nveu32_t intr_status,
nveu32_t dma_status, nveu32_t val);
static inline nve32_t disable_intr(struct osi_dma_priv_data const *osi_dma,
nveu32_t intr_ctrl, nveu32_t intr_status,
nveu32_t dma_status, nveu32_t val);
static dma_intr_fn intr_fn[2] = { disable_intr, enable_intr };
static inline nveu32_t set_pos_val(nveu32_t val, nveu32_t pos_val)
{
return (val | pos_val);
}
static inline nveu32_t clear_pos_val(nveu32_t val, nveu32_t pos_val)
{
return (val & ~pos_val);
}
static inline nve32_t intr_en_dis_retry(nveu8_t *base, nveu32_t intr_ctrl,
nveu32_t val, nveu32_t en_dis)
{
typedef nveu32_t (*set_clear)(nveu32_t val, nveu32_t pos);
set_clear set_clr[2] = { clear_pos_val, set_pos_val };
nveu32_t cntrl1, cntrl2, i;
for (i = 0U; i < 10U; i++) {
cntrl1 = osi_readl(base + intr_ctrl);
cntrl1 = set_clr[en_dis](cntrl1, val);
osi_writel(cntrl1, base + intr_ctrl);
cntrl2 = osi_readl(base + intr_ctrl);
if (cntrl1 == cntrl2) {
break;
} else {
continue;
}
}
/* failure case retry failed */
if (i == 10U) {
return -1;
}
return 0;
}
static inline nve32_t enable_intr(struct osi_dma_priv_data const *osi_dma,
nveu32_t intr_ctrl, OSI_UNUSED nveu32_t intr_status,
OSI_UNUSED nveu32_t dma_status, nveu32_t val)
{
return intr_en_dis_retry((nveu8_t *)osi_dma->base, intr_ctrl,
val, OSI_DMA_INTR_ENABLE);
}
static inline nve32_t disable_intr(struct osi_dma_priv_data const *osi_dma,
nveu32_t intr_ctrl, nveu32_t intr_status,
nveu32_t dma_status, nveu32_t val)
{
nveu8_t *base = (nveu8_t *)osi_dma->base;
const nveu32_t status_val[4] = {
0,
EQOS_DMA_CHX_STATUS_CLEAR_TX,
EQOS_DMA_CHX_STATUS_CLEAR_RX,
0,
};
nveu32_t status;
status = osi_readl(base + intr_status);
if ((status & val) == val) {
osi_writel(status_val[val], base + dma_status);
osi_writel(val, base + intr_status);
}
return intr_en_dis_retry((nveu8_t *)osi_dma->base, intr_ctrl,
val, OSI_DMA_INTR_DISABLE);
}
struct osi_dma_priv_data *osi_get_dma(void)
{
nveu32_t i;
@@ -320,17 +400,32 @@ nve32_t osi_hw_dma_init(struct osi_dma_priv_data *osi_dma)
return ret;
}
if ((l_dma->mac_ver != OSI_EQOS_MAC_4_10) &&
(l_dma->mac_ver != OSI_EQOS_MAC_5_00)) {
l_dma->vm_intr = OSI_ENABLE;
}
/* Enable channel interrupts at wrapper level and start DMA */
for (i = 0; i < osi_dma->num_dma_chans; i++) {
chan = osi_dma->dma_chans[i];
l_dma->ops_p->enable_chan_tx_intr(osi_dma->base, chan);
l_dma->ops_p->enable_chan_rx_intr(osi_dma->base, chan);
ret = intr_fn[OSI_DMA_INTR_ENABLE](osi_dma,
VIRT_INTR_CHX_CNTRL(chan),
VIRT_INTR_CHX_STATUS(chan),
((osi_dma->mac == OSI_MAC_HW_MGBE) ?
MGBE_DMA_CHX_STATUS(chan) :
EQOS_DMA_CHX_STATUS(chan)),
OSI_BIT(OSI_DMA_CH_TX_INTR));
if (ret < 0) {
return ret;
}
ret = intr_fn[OSI_DMA_INTR_ENABLE](osi_dma,
VIRT_INTR_CHX_CNTRL(chan),
VIRT_INTR_CHX_STATUS(chan),
((osi_dma->mac == OSI_MAC_HW_MGBE) ?
MGBE_DMA_CHX_STATUS(chan) :
EQOS_DMA_CHX_STATUS(chan)),
OSI_BIT(OSI_DMA_CH_RX_INTR));
if (ret < 0) {
return ret;
}
l_dma->ops_p->start_dma(osi_dma, chan);
}
@@ -390,9 +485,12 @@ nve32_t osi_disable_chan_tx_intr(struct osi_dma_priv_data *osi_dma,
return -1;
}
l_dma->ops_p->disable_chan_tx_intr(osi_dma->base, chan);
return 0;
return intr_fn[OSI_DMA_INTR_DISABLE](osi_dma, VIRT_INTR_CHX_CNTRL(chan),
VIRT_INTR_CHX_STATUS(chan),
((osi_dma->mac == OSI_MAC_HW_MGBE) ?
MGBE_DMA_CHX_STATUS(chan) :
EQOS_DMA_CHX_STATUS(chan)),
OSI_BIT(OSI_DMA_CH_TX_INTR));
}
nve32_t osi_enable_chan_tx_intr(struct osi_dma_priv_data *osi_dma,
@@ -408,9 +506,12 @@ nve32_t osi_enable_chan_tx_intr(struct osi_dma_priv_data *osi_dma,
return -1;
}
l_dma->ops_p->enable_chan_tx_intr(osi_dma->base, chan);
return 0;
return intr_fn[OSI_DMA_INTR_ENABLE](osi_dma, VIRT_INTR_CHX_CNTRL(chan),
VIRT_INTR_CHX_STATUS(chan),
((osi_dma->mac == OSI_MAC_HW_MGBE) ?
MGBE_DMA_CHX_STATUS(chan) :
EQOS_DMA_CHX_STATUS(chan)),
OSI_BIT(OSI_DMA_CH_TX_INTR));
}
nve32_t osi_disable_chan_rx_intr(struct osi_dma_priv_data *osi_dma,
@@ -426,9 +527,12 @@ nve32_t osi_disable_chan_rx_intr(struct osi_dma_priv_data *osi_dma,
return -1;
}
l_dma->ops_p->disable_chan_rx_intr(osi_dma->base, chan);
return 0;
return intr_fn[OSI_DMA_INTR_DISABLE](osi_dma, VIRT_INTR_CHX_CNTRL(chan),
VIRT_INTR_CHX_STATUS(chan),
((osi_dma->mac == OSI_MAC_HW_MGBE) ?
MGBE_DMA_CHX_STATUS(chan) :
EQOS_DMA_CHX_STATUS(chan)),
OSI_BIT(OSI_DMA_CH_RX_INTR));
}
nve32_t osi_enable_chan_rx_intr(struct osi_dma_priv_data *osi_dma,
@@ -444,45 +548,12 @@ nve32_t osi_enable_chan_rx_intr(struct osi_dma_priv_data *osi_dma,
return -1;
}
l_dma->ops_p->enable_chan_rx_intr(osi_dma->base, chan);
return 0;
}
nve32_t osi_clear_vm_tx_intr(struct osi_dma_priv_data *osi_dma,
nveu32_t chan)
{
struct dma_local *l_dma = (struct dma_local *)osi_dma;
if (validate_args(osi_dma, l_dma) < 0) {
return -1;
}
if (validate_dma_chan_num(osi_dma, chan) < 0) {
return -1;
}
l_dma->ops_p->clear_vm_tx_intr(osi_dma->base, chan);
return 0;
}
nve32_t osi_clear_vm_rx_intr(struct osi_dma_priv_data *osi_dma,
nveu32_t chan)
{
struct dma_local *l_dma = (struct dma_local *)osi_dma;
if (validate_args(osi_dma, l_dma) < 0) {
return -1;
}
if (validate_dma_chan_num(osi_dma, chan) < 0) {
return -1;
}
l_dma->ops_p->clear_vm_rx_intr(osi_dma->base, chan);
return 0;
return intr_fn[OSI_DMA_INTR_ENABLE](osi_dma, VIRT_INTR_CHX_CNTRL(chan),
VIRT_INTR_CHX_STATUS(chan),
((osi_dma->mac == OSI_MAC_HW_MGBE) ?
MGBE_DMA_CHX_STATUS(chan) :
EQOS_DMA_CHX_STATUS(chan)),
OSI_BIT(OSI_DMA_CH_RX_INTR));
}
nveu32_t osi_get_global_dma_status(struct osi_dma_priv_data *osi_dma)
@@ -502,13 +573,6 @@ nve32_t osi_handle_dma_intr(struct osi_dma_priv_data *osi_dma,
nveu32_t en_dis)
{
struct dma_local *l_dma = (struct dma_local *)osi_dma;
typedef void (*dma_intr_fn)(void *base, nveu32_t ch);
dma_intr_fn fn[2][2][2] = {
{ { l_dma->ops_p->disable_chan_tx_intr, l_dma->ops_p->enable_chan_tx_intr },
{ l_dma->ops_p->disable_chan_rx_intr, l_dma->ops_p->enable_chan_rx_intr } },
{ { l_dma->ops_p->clear_vm_tx_intr, l_dma->ops_p->enable_chan_tx_intr },
{ l_dma->ops_p->clear_vm_rx_intr, l_dma->ops_p->enable_chan_rx_intr } }
};
if (validate_args(osi_dma, l_dma) < 0) {
return -1;
@@ -523,9 +587,10 @@ nve32_t osi_handle_dma_intr(struct osi_dma_priv_data *osi_dma,
return -1;
}
fn[l_dma->vm_intr][tx_rx][en_dis](osi_dma->base, chan);
return 0;
return intr_fn[en_dis](osi_dma, VIRT_INTR_CHX_CNTRL(chan),
VIRT_INTR_CHX_STATUS(chan), ((osi_dma->mac == OSI_MAC_HW_MGBE) ?
MGBE_DMA_CHX_STATUS(chan) : EQOS_DMA_CHX_STATUS(chan)),
OSI_BIT(tx_rx));
}
nve32_t osi_start_dma(struct osi_dma_priv_data *osi_dma,