osi: coe: do not use CoE channels

The MGBE driver logic should not use Camera Over Ethernet DMA channels
for normal Rx/Tx.

Track which channels are CoE channels separately from normal channels.

In some cases MTL queue number is used in place of DMA channel, as it is
assumed by a driver that DMA channel maps 1:1 to MTL queue number. Fix
such cases to make sure CoE channels are not selected for Tx by a driver
and are not part of RSS or multicasting.

Change-Id: I18da9a3e51168c9e3c95278beb179b44e8647f36
Signed-off-by: Igor Mitsyanko <imitsyanko@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/kernel/nvethernetrm/+/3330334
Reviewed-by: svcacv <svcacv@nvidia.com>
This commit is contained in:
Igor Mitsyanko
2025-03-29 18:04:17 +00:00
parent 10cedf73e6
commit 9aa28e802d
4 changed files with 136 additions and 46 deletions

View File

@@ -1304,6 +1304,8 @@ struct osi_vm_irq_data {
* valid values are from 0 to NVETHERNETRM_PIF$OSI_EQOS_MAX_NUM_CHANS-1 for eqos
* and 0 to NVETHERNETRM_PIF$OSI_MGBE_MAX_NUM_CHANS-1 */
nveu32_t vm_chans[OSI_MGBE_MAX_NUM_CHANS];
/** If the IRQ is used for Camera Over Ethernet (handled by camera CPU) */
nveu8_t is_coe;
};
/**

View File

@@ -724,6 +724,12 @@ struct osi_dma_priv_data {
* Valid array size is num_dma_chans
*/
nveu32_t dma_chans[OSI_MGBE_MAX_NUM_CHANS];
/** Number of channels enabled in MAC used for Camera Over Ethernet
*/
nveu32_t num_dma_chans_coe;
/** Array of DMA channels which are managed by camera CPU.
*/
nveu8_t dma_chans_coe[OSI_MGBE_MAX_NUM_CHANS];
/** DMA Rx channel buffer length at HW level. Max value is related to mtu based
* on equation documented in sequence diagram of osi_set_rx_buf_len()
*/

View File

@@ -2162,7 +2162,7 @@ static nve32_t mgbe_config_flow_control(struct osi_core_priv_data *const osi_cor
* @brief pcs_configure_fsm - Configure FSM for XPCS/XLGPCS
*
* @note
* Algorithm: enable/disable the FSM timeout safety feature
* Algorithm: enable/disable the FSM timeout safety feature
*
* @param[in, out] osi_core: OSI core private data structure.
* @param[in] enable: OSI_ENABLE for Enabling FSM timeout safety feature, else disable
@@ -2482,6 +2482,24 @@ static nve32_t mgbe_hsi_inject_err(struct osi_core_priv_data *const osi_core,
#endif
#endif
static inline nveu32_t
mgbe_core_chan_is_coe(const struct osi_core_priv_data * const osi_core,
nveu32_t chan_id)
{
for (nveu32_t irqn = 0U; irqn < osi_core->num_vm_irqs; irqn++) {
if (osi_core->irq_data[irqn].is_coe == 0U)
continue;
for (nveu32_t ch = 0U; ch < osi_core->irq_data[irqn].num_vm_chans; ch++) {
if (osi_core->irq_data[irqn].vm_chans[ch] == chan_id) {
return 1U;
}
}
}
return 0U;
}
/**
* @brief mgbe_configure_mac - Configure MAC
*
@@ -2555,13 +2573,14 @@ static void mgbe_configure_mac(struct osi_core_priv_data *osi_core)
(nveu8_t *)osi_core->base + MGBE_MAC_RQC1R);
value |= MGBE_MAC_RQC1R_MCBCQEN;
/* Set MCBCQ to highest enabled RX queue index */
for (i = 0; i < osi_core->num_mtl_queues; i++) {
if ((max_queue < osi_core->mtl_queues[i]) &&
(osi_core->mtl_queues[i] < OSI_MGBE_MAX_NUM_QUEUES)) {
for (i = 0; i < osi_core->num_dma_chans; i++) {
if ((max_queue < osi_core->dma_chans[i]) &&
(osi_core->dma_chans[i] < OSI_MGBE_MAX_NUM_QUEUES)) {
/* Update max queue number */
max_queue = osi_core->mtl_queues[i];
max_queue = osi_core->dma_chans[i];
}
}
value &= ~(MGBE_MAC_RQC1R_MCBCQ);
value |= (max_queue << MGBE_MAC_RQC1R_MCBCQ_SHIFT);
osi_writela(osi_core, value,
@@ -3074,15 +3093,20 @@ static nve32_t mgbe_core_init(struct osi_core_priv_data *const osi_core)
* Since this is a local function this will always return sucess,
* so no need to check for return value
*/
if (mgbe_core_chan_is_coe(osi_core, osi_core->mtl_queues[qinx])) {
ret = hw_config_fw_err_pkts(osi_core,
osi_core->mtl_queues[qinx], OSI_DISABLE);
} else {
ret = hw_config_fw_err_pkts(osi_core,
osi_core->mtl_queues[qinx], OSI_ENABLE);
}
#ifndef OSI_STRIPPED_LIB
ret = hw_config_fw_err_pkts(osi_core, osi_core->mtl_queues[qinx], OSI_ENABLE);
if (ret < 0) {
goto fail;
}
#else
(void)hw_config_fw_err_pkts(osi_core, osi_core->mtl_queues[qinx], OSI_ENABLE);
#endif /* !OSI_STRIPPED_LIB */
(void)ret;
#endif
}
/* configure MGBE MAC HW */

View File

@@ -266,6 +266,36 @@ static inline nve32_t validate_dma_chans(struct osi_dma_priv_data *osi_dma)
return ret;
}
/**
* @brief Function to validate array of CoE DMA channels.
*
* @param[in] osi_dma: OSI DMA private data structure.
*
* @note
* API Group:
* - Initialization: Yes
* - Run time: Yes
* - De-initialization: Yes
*
* @retval 0 on Success
* @retval -1 on Failure
*/
static inline nve32_t validate_coe_dma_chans(struct osi_dma_priv_data *osi_dma)
{
const struct dma_local *const l_dma = (struct dma_local *)(void *)osi_dma;
nveu32_t i = 0U;
nve32_t ret = 0;
for (i = 0; i < osi_dma->num_dma_chans_coe; i++) {
if (osi_dma->dma_chans_coe[i] > l_dma->num_max_chans) {
OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID,
"Invalid CoE DMA channel number:\n",
osi_dma->dma_chans_coe[i]);
ret = -1;
}
}
return ret;
}
#ifndef OSI_STRIPPED_LIB
/**
* @brief Function to validate function pointers.
@@ -516,6 +546,37 @@ static inline void start_dma(const struct osi_dma_priv_data *const osi_dma, nveu
osi_dma_writel(val, (nveu8_t *)osi_dma->base + rx_dma_reg[local_mac]);
}
static inline void stop_dma(const struct osi_dma_priv_data *const osi_dma,
nveu32_t dma_chan)
{
const nveu32_t chan_mask[OSI_MAX_MAC_IP_TYPES] = {0xFU, 0xFU, 0x3FU};
const nveu32_t local_mac = osi_dma->mac % OSI_MAX_MAC_IP_TYPES;
// Added bitwise with 0xFF to avoid CERT INT30-C error
nveu32_t chan = ((dma_chan & chan_mask[local_mac]) & (0xFFU));
const nveu32_t dma_tx_reg[OSI_MAX_MAC_IP_TYPES] = {
EQOS_DMA_CHX_TX_CTRL(chan),
MGBE_DMA_CHX_TX_CTRL(chan),
MGBE_DMA_CHX_TX_CTRL(chan)
};
const nveu32_t dma_rx_reg[OSI_MAX_MAC_IP_TYPES] = {
EQOS_DMA_CHX_RX_CTRL(chan),
MGBE_DMA_CHX_RX_CTRL(chan),
MGBE_DMA_CHX_RX_CTRL(chan)
};
nveu32_t val;
/* Stop Tx DMA */
val = osi_dma_readl((nveu8_t *)osi_dma->base + dma_tx_reg[osi_dma->mac]);
val &= ~OSI_BIT(0);
osi_dma_writel(val, (nveu8_t *)osi_dma->base + dma_tx_reg[osi_dma->mac]);
/* Stop Rx DMA */
val = osi_dma_readl((nveu8_t *)osi_dma->base + dma_rx_reg[osi_dma->mac]);
val &= ~OSI_BIT(0);
val |= OSI_BIT(31);
osi_dma_writel(val, (nveu8_t *)osi_dma->base + dma_rx_reg[osi_dma->mac]);
}
static nve32_t init_dma_channel(const struct osi_dma_priv_data *const osi_dma,
nveu32_t dma_chan)
{
@@ -526,6 +587,7 @@ static nve32_t init_dma_channel(const struct osi_dma_priv_data *const osi_dma,
// Added bitwise with 0xFF to avoid CERT INT30-C error
nveu32_t chan = ((dma_chan & chan_mask[local_mac]) & (0xFFU));
nveu32_t riwt = osi_dma->rx_riwt & 0xFFFU;
const nveu32_t total_num_chans = osi_dma->num_dma_chans + osi_dma->num_dma_chans_coe;
const nveu32_t intr_en_reg[OSI_MAX_MAC_IP_TYPES] = {
EQOS_DMA_CHX_INTR_ENA(chan),
MGBE_DMA_CHX_INTR_ENA(chan),
@@ -558,7 +620,7 @@ static nve32_t init_dma_channel(const struct osi_dma_priv_data *const osi_dma,
const nveu32_t rx_pbl[2] = {
EQOS_DMA_CHX_RX_CTRL_RXPBL_RECOMMENDED,
((Q_SZ_DEPTH(MGBE_RXQ_SIZE/OSI_MGBE_MAX_NUM_QUEUES) /
osi_dma->num_dma_chans) / 2U)
total_num_chans) / 2U)
};
const nveu32_t rwt_val[OSI_MAX_MAC_IP_TYPES] = {
(((riwt * (EQOS_AXI_CLK_FREQ / OSI_ONE_MEGA_HZ)) /
@@ -583,7 +645,7 @@ static nve32_t init_dma_channel(const struct osi_dma_priv_data *const osi_dma,
(DMA_CHX_TX_CTRL_OSP | DMA_CHX_TX_CTRL_TSE),
DMA_CHX_TX_CTRL_TSE
};
const nveu32_t owrq = (MGBE_DMA_CHX_RX_CNTRL2_OWRQ_MCHAN / osi_dma->num_dma_chans);
const nveu32_t owrq = (MGBE_DMA_CHX_RX_CNTRL2_OWRQ_MCHAN / total_num_chans);
const nveu32_t owrq_arr[OSI_MGBE_T23X_MAX_NUM_CHANS] = {
MGBE_DMA_CHX_RX_CNTRL2_OWRQ_SCHAN, owrq, owrq, owrq,
owrq, owrq, owrq, owrq, owrq, owrq
@@ -799,14 +861,16 @@ nve32_t osi_hw_dma_init(struct osi_dma_priv_data *osi_dma)
}
if ((osi_dma->num_dma_chans == 0U) ||
(osi_dma->num_dma_chans > l_dma->num_max_chans)) {
(osi_dma->num_dma_chans > l_dma->num_max_chans) ||
(osi_dma->num_dma_chans_coe > l_dma->num_max_chans)) {
OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID,
"Invalid number of DMA channels\n", 0ULL);
ret = -1;
goto fail;
}
if (validate_dma_chans(osi_dma) < 0) {
if ((validate_dma_chans(osi_dma) < 0) ||
(validate_coe_dma_chans(osi_dma) < 0)) {
OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID,
"DMA channels validation failed\n", 0ULL);
ret = -1;
@@ -826,6 +890,18 @@ nve32_t osi_hw_dma_init(struct osi_dma_priv_data *osi_dma)
}
}
/* Init DMA engine settings for CoE channels, but don't start the DMA */
for (i = 0; i < osi_dma->num_dma_chans_coe; i++) {
ret = init_dma_channel(osi_dma, osi_dma->dma_chans_coe[i]);
if (ret < 0) {
OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID,
"DMA: Init CoE DMA channel failed\n", 0ULL);
goto fail;
}
stop_dma(osi_dma, osi_dma->dma_chans_coe[i]);
}
set_default_ptp_config(osi_dma);
fail:
#ifdef OSI_CL_FTRACE
@@ -834,37 +910,6 @@ fail:
return ret;
}
static inline void stop_dma(const struct osi_dma_priv_data *const osi_dma,
nveu32_t dma_chan)
{
const nveu32_t chan_mask[OSI_MAX_MAC_IP_TYPES] = {0xFU, 0xFU, 0x3FU};
const nveu32_t local_mac = osi_dma->mac % OSI_MAX_MAC_IP_TYPES;
// Added bitwise with 0xFF to avoid CERT INT30-C error
nveu32_t chan = ((dma_chan & chan_mask[local_mac]) & (0xFFU));
const nveu32_t dma_tx_reg[OSI_MAX_MAC_IP_TYPES] = {
EQOS_DMA_CHX_TX_CTRL(chan),
MGBE_DMA_CHX_TX_CTRL(chan),
MGBE_DMA_CHX_TX_CTRL(chan)
};
const nveu32_t dma_rx_reg[OSI_MAX_MAC_IP_TYPES] = {
EQOS_DMA_CHX_RX_CTRL(chan),
MGBE_DMA_CHX_RX_CTRL(chan),
MGBE_DMA_CHX_RX_CTRL(chan)
};
nveu32_t val;
/* Stop Tx DMA */
val = osi_dma_readl((nveu8_t *)osi_dma->base + dma_tx_reg[osi_dma->mac]);
val &= ~OSI_BIT(0);
osi_dma_writel(val, (nveu8_t *)osi_dma->base + dma_tx_reg[osi_dma->mac]);
/* Stop Rx DMA */
val = osi_dma_readl((nveu8_t *)osi_dma->base + dma_rx_reg[osi_dma->mac]);
val &= ~OSI_BIT(0);
val |= OSI_BIT(31);
osi_dma_writel(val, (nveu8_t *)osi_dma->base + dma_rx_reg[osi_dma->mac]);
}
static inline void set_rx_riit_dma(
const struct osi_dma_priv_data *const osi_dma,
nveu32_t chan, nveu32_t riit)
@@ -934,8 +979,15 @@ static inline void set_rx_riit(
for (i = 0; i < osi_dma->num_dma_chans; i++) {
chan = osi_dma->dma_chans[i];
set_rx_riit_dma(osi_dma, chan, riit);
}
for (i = 0; i < osi_dma->num_dma_chans_coe; i++) {
chan = osi_dma->dma_chans_coe[i];
set_rx_riit_dma(osi_dma, chan, 0U);
}
return;
}
@@ -953,14 +1005,16 @@ nve32_t osi_hw_dma_deinit(struct osi_dma_priv_data *osi_dma)
goto fail;
}
if (osi_dma->num_dma_chans > l_dma->num_max_chans) {
if ((osi_dma->num_dma_chans > l_dma->num_max_chans) ||
(osi_dma->num_dma_chans_coe > l_dma->num_max_chans)) {
OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID,
"Invalid number of DMA channels\n", 0ULL);
ret = -1;
goto fail;
}
if (validate_dma_chans(osi_dma) < 0) {
if ((validate_dma_chans(osi_dma) < 0) ||
(validate_coe_dma_chans(osi_dma) < 0)) {
OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID,
"DMA channels validation failed\n", 0ULL);
ret = -1;
@@ -971,6 +1025,10 @@ nve32_t osi_hw_dma_deinit(struct osi_dma_priv_data *osi_dma)
stop_dma(osi_dma, osi_dma->dma_chans[i]);
}
for (i = 0; i < osi_dma->num_dma_chans_coe; i++) {
stop_dma(osi_dma, osi_dma->dma_chans_coe[i]);
}
fail:
#ifdef OSI_CL_FTRACE
slogf(0, 2, "%s : Function Exit\n", __func__);