diff --git a/include/osi_common.h b/include/osi_common.h index a2d8acc..3b1834c 100644 --- a/include/osi_common.h +++ b/include/osi_common.h @@ -313,7 +313,7 @@ static inline nveu64_t osi_update_stats_counter(nveu64_t last_value, if (temp < last_value) { /* Stats overflow, so reset it to zero */ - return 0UL; + temp = 0UL; } return temp; diff --git a/include/osi_dma.h b/include/osi_dma.h index be6867d..77df993 100644 --- a/include/osi_dma.h +++ b/include/osi_dma.h @@ -118,7 +118,9 @@ /** Paged buffer */ #define OSI_PKT_CX_PAGED_BUF OSI_BIT(4) /** Rx packet has RSS hash */ +#ifndef OSI_STRIPPED_LIB #define OSI_PKT_CX_RSS OSI_BIT(5) +#endif /* !OSI_STRIPPED_LIB */ /** Valid packet */ #define OSI_PKT_CX_VALID OSI_BIT(10) /** Update Packet Length in Tx Desc3 */ @@ -127,18 +129,18 @@ #define OSI_PKT_CX_IP_CSUM OSI_BIT(12) /** @} */ +#ifndef OSI_STRIPPED_LIB /** * @addtogroup SLOT function context fields * * @brief These flags are used for DMA channel Slot context configuration * @{ */ -#ifndef OSI_STRIPPED_LIB #define OSI_SLOT_INTVL_DEFAULT 125U #define OSI_SLOT_INTVL_MAX 4095U -#endif /* !OSI_STRIPPED_LIB */ #define OSI_SLOT_NUM_MAX 16U /** @} */ +#endif /* !OSI_STRIPPED_LIB */ /** * @addtogroup EQOS-TX Tx done packet context fields @@ -208,7 +210,7 @@ /** @} */ - +#ifndef OSI_STRIPPED_LIB /** * @addtogroup RSS-HASH type * @@ -220,6 +222,7 @@ #define OSI_RX_PKT_HASH_TYPE_L3 0x2U #define OSI_RX_PKT_HASH_TYPE_L4 0x3U /** @} */ +#endif /* !OSI_STRIPPED_LIB */ /** * @addtogroup OSI-INTR OSI DMA interrupt handling macros. @@ -666,7 +669,7 @@ nveu32_t osi_get_global_dma_status(struct osi_dma_priv_data *osi_dma); * * @retval "Number of available free descriptors." */ -nveu32_t osi_get_refill_rx_desc_cnt(struct osi_dma_priv_data *osi_dma, +nveu32_t osi_get_refill_rx_desc_cnt(const struct osi_dma_priv_data *const osi_dma, nveu32_t chan); /** diff --git a/include/osi_dma_txrx.h b/include/osi_dma_txrx.h index 35834b6..325a0dd 100644 --- a/include/osi_dma_txrx.h +++ b/include/osi_dma_txrx.h @@ -32,7 +32,6 @@ #define OSI_EQOS_TX_DESC_CNT 1024U #define OSI_EQOS_RX_DESC_CNT 1024U #define OSI_MGBE_TX_DESC_CNT 4096U -#define OSI_MGBE_RX_DESC_CNT 4096U #define OSI_MGBE_MAX_RX_DESC_CNT 16384U /** @} */ diff --git a/osi/common/common.h b/osi/common/common.h index aeeb738..3d37f14 100644 --- a/osi/common/common.h +++ b/osi/common/common.h @@ -256,25 +256,25 @@ static inline void osi_writela(OSI_UNUSED void *priv, nveu32_t val, void *addr) * @retval 1 - for Valid MAC */ static inline nve32_t validate_mac_ver_update_chans(nveu32_t mac_ver, - nveu32_t *max_chans, + nveu32_t *num_max_chans, nveu32_t *l_mac_ver) { nve32_t ret; switch (mac_ver) { case OSI_EQOS_MAC_5_00: - *max_chans = OSI_EQOS_XP_MAX_CHANS; + *num_max_chans = OSI_EQOS_XP_MAX_CHANS; *l_mac_ver = MAC_CORE_VER_TYPE_EQOS; ret = 1; break; case OSI_EQOS_MAC_5_30: - *max_chans = OSI_EQOS_MAX_NUM_CHANS; + *num_max_chans = OSI_EQOS_MAX_NUM_CHANS; *l_mac_ver = MAC_CORE_VER_TYPE_EQOS_5_30; ret = 1; break; case OSI_MGBE_MAC_3_10: case OSI_MGBE_MAC_4_00: - *max_chans = OSI_MGBE_MAX_NUM_CHANS; + *num_max_chans = OSI_MGBE_MAX_NUM_CHANS; *l_mac_ver = MAC_CORE_VER_TYPE_MGBE; ret = 1; break; @@ -305,7 +305,7 @@ static inline void osi_memset(void *s, nveu32_t c, nveu64_t count) nveu64_t temp = count; if (s == OSI_NULL) { - return; + goto done; } xs = (nveu8_t *)s; while (temp != 0UL) { @@ -315,6 +315,8 @@ static inline void osi_memset(void *s, nveu32_t c, nveu64_t count) } temp--; } +done: + return; } /** @@ -332,38 +334,47 @@ static inline void osi_memset(void *s, nveu32_t c, nveu64_t count) */ static inline nve32_t osi_memcpy(void *dest, void *src, nveu64_t n) { - nve8_t *csrc = (nve8_t *)src; nve8_t *cdest = (nve8_t *)dest; + const nve8_t *csrc = (nve8_t *)src; + nve32_t ret = 0; nveu64_t i = 0; if ((src == OSI_NULL) || (dest == OSI_NULL)) { - return -1; + ret = -1; + goto fail; } for (i = 0; i < n; i++) { cdest[i] = csrc[i]; } - return 0; +fail: + return ret; } static inline nve32_t osi_memcmp(void *dest, void *src, nve32_t n) { + const nve8_t *const cdest = (nve8_t *)dest; + const nve8_t *const csrc = (nve8_t *)src; + nve32_t ret = 0; nve32_t i; - nve8_t *csrc = (nve8_t *)src; - nve8_t *cdest = (nve8_t *)dest; - if ((src == OSI_NULL) || (dest == OSI_NULL)) - return -1; + if ((src == OSI_NULL) || (dest == OSI_NULL)) { + ret = -1; + goto fail; + } for (i = 0; i < n; i++) { if (csrc[i] < cdest[i]) { - return -1; + ret = -1; + goto fail; } else if (csrc[i] > cdest[i]) { - return 1; + ret = 1; + goto fail; } else { /* Do Nothing */ } } - return 0; +fail: + return ret; } #endif diff --git a/osi/common/osi_common.c b/osi/common/osi_common.c index 18df8ff..3a369d6 100644 --- a/osi/common/osi_common.c +++ b/osi/common/osi_common.c @@ -31,7 +31,7 @@ void common_get_systime_from_mac(void *addr, nveu32_t mac, nveu32_t *sec, nveu64_t remain; nveul64_t ns; typedef nveul64_t (*get_time)(void *addr); - get_time i_ops[MAX_MAC_IP_TYPES] = { + const get_time i_ops[MAX_MAC_IP_TYPES] = { eqos_get_systime_from_mac, mgbe_get_systime_from_mac }; @@ -53,7 +53,7 @@ void common_get_systime_from_mac(void *addr, nveu32_t mac, nveu32_t *sec, nveu32_t common_is_mac_enabled(void *addr, nveu32_t mac) { typedef nveu32_t (*mac_enable_arr)(void *addr); - mac_enable_arr i_ops[MAX_MAC_IP_TYPES] = { + const mac_enable_arr i_ops[MAX_MAC_IP_TYPES] = { eqos_is_mac_enabled, mgbe_is_mac_enabled }; diff --git a/osi/dma/debug.c b/osi/dma/debug.c index 3ccb451..aecbfa8 100644 --- a/osi/dma/debug.c +++ b/osi/dma/debug.c @@ -250,6 +250,8 @@ void desc_dump(struct osi_dma_priv_data *osi_dma, unsigned int f_idx, rx_desc_dump(osi_dma, f_idx, chan); break; default: + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, + "Invalid desc dump flag\n", 0ULL); break; } } diff --git a/osi/dma/dma_local.h b/osi/dma/dma_local.h index 1820ab4..7c87e89 100644 --- a/osi/dma/dma_local.h +++ b/osi/dma/dma_local.h @@ -66,7 +66,7 @@ struct dma_chan_ops { */ struct desc_ops { /** Called to get receive checksum */ - void (*get_rx_csum)(struct osi_rx_desc *rx_desc, + void (*get_rx_csum)(const struct osi_rx_desc *const rx_desc, struct osi_rx_pkt_cx *rx_pkt_cx); #ifndef OSI_STRIPPED_LIB /** Called to get rx error stats */ @@ -80,10 +80,10 @@ struct desc_ops { struct osi_rx_pkt_cx *rx_pkt_cx); #endif /* !OSI_STRIPPED_LIB */ /** Called to get RX hw timestamp */ - int (*get_rx_hwstamp)(struct osi_dma_priv_data *osi_dma, - struct osi_rx_desc *rx_desc, - struct osi_rx_desc *context_desc, - struct osi_rx_pkt_cx *rx_pkt_cx); + nve32_t (*get_rx_hwstamp)(const struct osi_dma_priv_data *const osi_dma, + const struct osi_rx_desc *const rx_desc, + const struct osi_rx_desc *const context_desc, + struct osi_rx_pkt_cx *rx_pkt_cx); }; /** @@ -107,7 +107,7 @@ struct dma_local { /** Magic number to validate osi_dma pointer */ nveu64_t magic_num; /** Maximum number of DMA channels */ - nveu32_t max_chans; + nveu32_t num_max_chans; /** Exact MAC used across SOCs 0:Legacy EQOS, 1:Orin EQOS, 2:Orin MGBE */ nveu32_t l_mac_ver; }; @@ -141,14 +141,14 @@ void mgbe_init_dma_chan_ops(struct dma_chan_ops *ops); /** * @brief eqos_get_desc_ops - EQOS init DMA descriptor operations */ -void eqos_init_desc_ops(struct desc_ops *d_ops); +void eqos_init_desc_ops(struct desc_ops *p_dops); /** * @brief mgbe_get_desc_ops - MGBE init DMA descriptor operations */ -void mgbe_init_desc_ops(struct desc_ops *d_ops); +void mgbe_init_desc_ops(struct desc_ops *p_dops); -nve32_t init_desc_ops(struct osi_dma_priv_data *osi_dma); +nve32_t init_desc_ops(const struct osi_dma_priv_data *const osi_dma); /** * @brief osi_hw_transmit - Initialize Tx DMA descriptors for a channel @@ -199,33 +199,15 @@ nve32_t dma_desc_init(struct osi_dma_priv_data *osi_dma); static inline nveu32_t is_power_of_two(nveu32_t num) { + nveu32_t ret = OSI_DISABLE; + if ((num > 0U) && ((num & (num - 1U)) == 0U)) { - return OSI_ENABLE; + ret = OSI_ENABLE; } - return OSI_DISABLE; + return ret; } -/** - * @addtogroup Helper Helper MACROS - * - * @brief EQOS generic helper MACROS. - * @{ - */ -#define CHECK_CHAN_BOUND(chan) \ - { \ - if ((chan) >= OSI_EQOS_MAX_NUM_CHANS) { \ - return; \ - } \ - } - -#define MGBE_CHECK_CHAN_BOUND(chan) \ -{ \ - if ((chan) >= OSI_MGBE_MAX_NUM_CHANS) { \ - return; \ - } \ -} \ - #define BOOLEAN_FALSE (0U != 0U) #define L32(data) ((nveu32_t)((data) & 0xFFFFFFFFU)) #define H32(data) ((nveu32_t)(((data) & 0xFFFFFFFF00000000UL) >> 32UL)) diff --git a/osi/dma/eqos_desc.c b/osi/dma/eqos_desc.c index 54004d0..389ac72 100644 --- a/osi/dma/eqos_desc.c +++ b/osi/dma/eqos_desc.c @@ -115,7 +115,7 @@ static void eqos_get_rx_hash(OSI_UNUSED struct osi_rx_desc *rx_desc, * @param[in, out] rx_desc: Rx descriptor * @param[in, out] rx_pkt_cx: Per-Rx packet context structure */ -static void eqos_get_rx_csum(struct osi_rx_desc *rx_desc, +static void eqos_get_rx_csum(const struct osi_rx_desc *const rx_desc, struct osi_rx_pkt_cx *rx_pkt_cx) { nveu32_t pkt_type; @@ -125,51 +125,49 @@ static void eqos_get_rx_csum(struct osi_rx_desc *rx_desc, * Set none/unnecessary bit as well for other OS to check and * take proper actions. */ - if ((rx_desc->rdes3 & RDES3_RS1V) != RDES3_RS1V) { - return; - } - - if ((rx_desc->rdes1 & - (RDES1_IPCE | RDES1_IPCB | RDES1_IPHE)) == OSI_DISABLE) { - rx_pkt_cx->rxcsum |= OSI_CHECKSUM_UNNECESSARY; - } - - if ((rx_desc->rdes1 & RDES1_IPCB) != OSI_DISABLE) { - return; - } - - rx_pkt_cx->rxcsum |= OSI_CHECKSUM_IPv4; - if ((rx_desc->rdes1 & RDES1_IPHE) == RDES1_IPHE) { - rx_pkt_cx->rxcsum |= OSI_CHECKSUM_IPv4_BAD; - } - - pkt_type = rx_desc->rdes1 & RDES1_PT_MASK; - if ((rx_desc->rdes1 & RDES1_IPV4) == RDES1_IPV4) { - if (pkt_type == RDES1_PT_UDP) { - rx_pkt_cx->rxcsum |= OSI_CHECKSUM_UDPv4; - } else if (pkt_type == RDES1_PT_TCP) { - rx_pkt_cx->rxcsum |= OSI_CHECKSUM_TCPv4; - - } else { - /* Do nothing */ - } - } else if ((rx_desc->rdes1 & RDES1_IPV6) == RDES1_IPV6) { - if (pkt_type == RDES1_PT_UDP) { - rx_pkt_cx->rxcsum |= OSI_CHECKSUM_UDPv6; - } else if (pkt_type == RDES1_PT_TCP) { - rx_pkt_cx->rxcsum |= OSI_CHECKSUM_TCPv6; - - } else { - /* Do nothing */ + if ((rx_desc->rdes3 & RDES3_RS1V) == RDES3_RS1V) { + if ((rx_desc->rdes1 & + (RDES1_IPCE | RDES1_IPCB | RDES1_IPHE)) == OSI_DISABLE) { + rx_pkt_cx->rxcsum |= OSI_CHECKSUM_UNNECESSARY; } - } else { - /* Do nothing */ + if ((rx_desc->rdes1 & RDES1_IPCB) != RDES1_IPCB) { + rx_pkt_cx->rxcsum |= OSI_CHECKSUM_IPv4; + if ((rx_desc->rdes1 & RDES1_IPHE) == RDES1_IPHE) { + rx_pkt_cx->rxcsum |= OSI_CHECKSUM_IPv4_BAD; + } + + pkt_type = rx_desc->rdes1 & RDES1_PT_MASK; + if ((rx_desc->rdes1 & RDES1_IPV4) == RDES1_IPV4) { + if (pkt_type == RDES1_PT_UDP) { + rx_pkt_cx->rxcsum |= OSI_CHECKSUM_UDPv4; + } else if (pkt_type == RDES1_PT_TCP) { + rx_pkt_cx->rxcsum |= OSI_CHECKSUM_TCPv4; + + } else { + /* Do nothing */ + } + } else if ((rx_desc->rdes1 & RDES1_IPV6) == RDES1_IPV6) { + if (pkt_type == RDES1_PT_UDP) { + rx_pkt_cx->rxcsum |= OSI_CHECKSUM_UDPv6; + } else if (pkt_type == RDES1_PT_TCP) { + rx_pkt_cx->rxcsum |= OSI_CHECKSUM_TCPv6; + + } else { + /* Do nothing */ + } + + } else { + /* Do nothing */ + } + + if ((rx_desc->rdes1 & RDES1_IPCE) == RDES1_IPCE) { + rx_pkt_cx->rxcsum |= OSI_CHECKSUM_TCP_UDP_BAD; + } + } } - if ((rx_desc->rdes1 & RDES1_IPCE) == RDES1_IPCE) { - rx_pkt_cx->rxcsum |= OSI_CHECKSUM_TCP_UDP_BAD; - } + return; } /** @@ -188,12 +186,13 @@ static void eqos_get_rx_csum(struct osi_rx_desc *rx_desc, * @retval -1 if TimeStamp is not available * @retval 0 if TimeStamp is available. */ -static int eqos_get_rx_hwstamp(struct osi_dma_priv_data *osi_dma, - struct osi_rx_desc *rx_desc, - struct osi_rx_desc *context_desc, - struct osi_rx_pkt_cx *rx_pkt_cx) +static nve32_t eqos_get_rx_hwstamp(const struct osi_dma_priv_data *const osi_dma, + const struct osi_rx_desc *const rx_desc, + const struct osi_rx_desc *const context_desc, + struct osi_rx_pkt_cx *rx_pkt_cx) { - int retry; + nve32_t ret = 0; + nve32_t retry; /* Check for RS1V/TSA/TD valid */ if (((rx_desc->rdes3 & RDES3_RS1V) == RDES3_RS1V) && @@ -207,7 +206,8 @@ static int eqos_get_rx_hwstamp(struct osi_dma_priv_data *osi_dma, OSI_INVALID_VALUE) && (context_desc->rdes1 == OSI_INVALID_VALUE)) { - return -1; + ret = -1; + goto fail; } /* Update rx pkt context flags to indicate * PTP */ @@ -221,29 +221,31 @@ static int eqos_get_rx_hwstamp(struct osi_dma_priv_data *osi_dma, } if (retry == 10) { /* Timed out waiting for Rx timestamp */ - return -1; + ret = -1; + goto fail; } rx_pkt_cx->ns = context_desc->rdes0 + (OSI_NSEC_PER_SEC * context_desc->rdes1); if (rx_pkt_cx->ns < context_desc->rdes0) { /* Will not hit this case */ - return -1; + ret = -1; + goto fail; } } else { - return -1; + ret = -1; } - - return 0; +fail: + return ret; } -void eqos_init_desc_ops(struct desc_ops *d_ops) +void eqos_init_desc_ops(struct desc_ops *p_dops) { #ifndef OSI_STRIPPED_LIB - d_ops->update_rx_err_stats = eqos_update_rx_err_stats; - d_ops->get_rx_vlan = eqos_get_rx_vlan; - d_ops->get_rx_hash = eqos_get_rx_hash; + p_dops->update_rx_err_stats = eqos_update_rx_err_stats; + p_dops->get_rx_vlan = eqos_get_rx_vlan; + p_dops->get_rx_hash = eqos_get_rx_hash; #endif /* !OSI_STRIPPED_LIB */ - d_ops->get_rx_csum = eqos_get_rx_csum; - d_ops->get_rx_hwstamp = eqos_get_rx_hwstamp; + p_dops->get_rx_csum = eqos_get_rx_csum; + p_dops->get_rx_hwstamp = eqos_get_rx_hwstamp; } diff --git a/osi/dma/eqos_dma.h b/osi/dma/eqos_dma.h index 4b48627..cb4fbf0 100644 --- a/osi/dma/eqos_dma.h +++ b/osi/dma/eqos_dma.h @@ -71,8 +71,6 @@ #define EQOS_DMA_CHX_STATUS_CLEAR_RX \ (EQOS_DMA_CHX_STATUS_RI | EQOS_DMA_CHX_STATUS_NIS) -#define EQOS_DMA_CHX_INTR_TIE OSI_BIT(0) -#define EQOS_DMA_CHX_INTR_RIE OSI_BIT(6) #ifdef OSI_DEBUG #define EQOS_DMA_CHX_INTR_TBUE OSI_BIT(2) #define EQOS_DMA_CHX_INTR_RBUE OSI_BIT(7) @@ -80,11 +78,6 @@ #define EQOS_DMA_CHX_INTR_AIE OSI_BIT(14) #define EQOS_DMA_CHX_INTR_NIE OSI_BIT(15) #endif -#define EQOS_DMA_CHX_TX_CTRL_OSF OSI_BIT(4) -#define EQOS_DMA_CHX_TX_CTRL_TSE OSI_BIT(12) -#define EQOS_DMA_CHX_CTRL_PBLX8 OSI_BIT(16) -#define EQOS_DMA_CHX_RBSZ_MASK 0x7FFEU -#define EQOS_DMA_CHX_RBSZ_SHIFT 1U #define EQOS_DMA_CHX_TX_CTRL_TXPBL_RECOMMENDED 0x200000U #define EQOS_DMA_CHX_RX_CTRL_RXPBL_RECOMMENDED 0xC0000U #define EQOS_DMA_CHX_RX_WDT_RWT_MASK 0xFFU @@ -95,99 +88,10 @@ /* Below macros are used for periodic reg validation for functional safety. * HW register mask - to mask out reserved and self-clearing bits */ -#define EQOS_DMA_CHX_CTRL_MASK 0x11D3FFFU -#define EQOS_DMA_CHX_TX_CTRL_MASK 0xF3F9010U -#define EQOS_DMA_CHX_RX_CTRL_MASK 0x8F3F7FE0U -#define EQOS_DMA_CHX_TDRL_MASK 0x3FFU -#define EQOS_DMA_CHX_RDRL_MASK 0x3FFU -#define EQOS_DMA_CHX_INTR_ENA_MASK 0xFFC7U #ifndef OSI_STRIPPED_LIB #define EQOS_DMA_CHX_SLOT_SIV_MASK 0xFFFU #define EQOS_DMA_CHX_SLOT_SIV_SHIFT 4U #define EQOS_DMA_CHX_SLOT_ESC 0x1U #endif /* !OSI_STRIPPED_LIB */ -/* To add new registers to validate,append at end of below macro list and - * increment EQOS_MAX_DMA_SAFETY_REGS. - * Using macros instead of enum due to misra error. - */ -#define EQOS_DMA_CH0_CTRL_IDX 0U -#define EQOS_DMA_CH1_CTRL_IDX 1U -#define EQOS_DMA_CH2_CTRL_IDX 2U -#define EQOS_DMA_CH3_CTRL_IDX 3U -#define EQOS_DMA_CH4_CTRL_IDX 4U -#define EQOS_DMA_CH5_CTRL_IDX 5U -#define EQOS_DMA_CH6_CTRL_IDX 6U -#define EQOS_DMA_CH7_CTRL_IDX 7U -#define EQOS_DMA_CH0_TX_CTRL_IDX 8U -#define EQOS_DMA_CH1_TX_CTRL_IDX 9U -#define EQOS_DMA_CH2_TX_CTRL_IDX 10U -#define EQOS_DMA_CH3_TX_CTRL_IDX 11U -#define EQOS_DMA_CH4_TX_CTRL_IDX 12U -#define EQOS_DMA_CH5_TX_CTRL_IDX 13U -#define EQOS_DMA_CH6_TX_CTRL_IDX 14U -#define EQOS_DMA_CH7_TX_CTRL_IDX 15U -#define EQOS_DMA_CH0_RX_CTRL_IDX 16U -#define EQOS_DMA_CH1_RX_CTRL_IDX 17U -#define EQOS_DMA_CH2_RX_CTRL_IDX 18U -#define EQOS_DMA_CH3_RX_CTRL_IDX 19U -#define EQOS_DMA_CH4_RX_CTRL_IDX 20U -#define EQOS_DMA_CH5_RX_CTRL_IDX 21U -#define EQOS_DMA_CH6_RX_CTRL_IDX 22U -#define EQOS_DMA_CH7_RX_CTRL_IDX 23U -#define EQOS_DMA_CH0_TDRL_IDX 24U -#define EQOS_DMA_CH1_TDRL_IDX 25U -#define EQOS_DMA_CH2_TDRL_IDX 26U -#define EQOS_DMA_CH3_TDRL_IDX 27U -#define EQOS_DMA_CH4_TDRL_IDX 28U -#define EQOS_DMA_CH5_TDRL_IDX 29U -#define EQOS_DMA_CH6_TDRL_IDX 30U -#define EQOS_DMA_CH7_TDRL_IDX 31U -#define EQOS_DMA_CH0_RDRL_IDX 32U -#define EQOS_DMA_CH1_RDRL_IDX 33U -#define EQOS_DMA_CH2_RDRL_IDX 34U -#define EQOS_DMA_CH3_RDRL_IDX 35U -#define EQOS_DMA_CH4_RDRL_IDX 36U -#define EQOS_DMA_CH5_RDRL_IDX 37U -#define EQOS_DMA_CH6_RDRL_IDX 38U -#define EQOS_DMA_CH7_RDRL_IDX 39U -#define EQOS_DMA_CH0_INTR_ENA_IDX 40U -#define EQOS_DMA_CH1_INTR_ENA_IDX 41U -#define EQOS_DMA_CH2_INTR_ENA_IDX 42U -#define EQOS_DMA_CH3_INTR_ENA_IDX 43U -#define EQOS_DMA_CH4_INTR_ENA_IDX 44U -#define EQOS_DMA_CH5_INTR_ENA_IDX 45U -#define EQOS_DMA_CH6_INTR_ENA_IDX 46U -#define EQOS_DMA_CH7_INTR_ENA_IDX 47U -#define EQOS_MAX_DMA_SAFETY_REGS 48U /** @} */ - -/** - * @brief dma_func_safety - Struct used to store last written values of - * critical DMA HW registers. - */ -struct dma_func_safety { - /** Array of reg MMIO addresses (base EQoS + offset of reg) */ - void *reg_addr[EQOS_MAX_DMA_SAFETY_REGS]; - /** Array of bit-mask value of each corresponding reg - * (used to ignore self-clearing/reserved bits in reg) */ - nveu32_t reg_mask[EQOS_MAX_DMA_SAFETY_REGS]; - /** Array of value stored in each corresponding register */ - nveu32_t reg_val[EQOS_MAX_DMA_SAFETY_REGS]; - /** OSI lock variable used to protect writes to reg - * while validation is in-progress */ - nveu32_t dma_safety_lock; -}; - -/** - * @brief eqos_get_dma_safety_config - EQOS get DMA safety configuration - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: No - * - De-initialization: No - * - * @returns Pointer to DMA safety configuration - */ -void *eqos_get_dma_safety_config(void); #endif /* INCLUDED_EQOS_DMA_H */ diff --git a/osi/dma/hw_desc.h b/osi/dma/hw_desc.h index 45cf896..ddf27f0 100644 --- a/osi/dma/hw_desc.h +++ b/osi/dma/hw_desc.h @@ -45,22 +45,26 @@ #define RDES3_ERR_RE OSI_BIT(20) #define RDES3_ERR_DRIB OSI_BIT(19) #define RDES3_PKT_LEN 0x00007fffU -#define RDES3_LT (OSI_BIT(16) | OSI_BIT(17) | OSI_BIT(18)) -#define RDES3_LT_VT OSI_BIT(18) -#define RDES3_LT_DVT (OSI_BIT(16) | OSI_BIT(18)) -#define RDES3_RS0V OSI_BIT(25) #define RDES3_RS1V OSI_BIT(26) -#define RDES3_RSV OSI_BIT(26) -#define RDES0_OVT 0x0000FFFFU #define RDES3_TSD OSI_BIT(6) #define RDES3_TSA OSI_BIT(4) #define RDES1_TSA OSI_BIT(14) #define RDES1_TD OSI_BIT(15) +#ifndef OSI_STRIPPED_LIB +#define RDES3_LT (OSI_BIT(16) | OSI_BIT(17) | OSI_BIT(18)) +#define RDES3_LT_VT OSI_BIT(18) +#define RDES3_LT_DVT (OSI_BIT(16) | OSI_BIT(18)) +#define RDES0_OVT 0x0000FFFFU +#define RDES3_RS0V OSI_BIT(25) +#define RDES3_RSV OSI_BIT(26) #define RDES3_L34T 0x00F00000U #define RDES3_L34T_IPV4_TCP OSI_BIT(20) #define RDES3_L34T_IPV4_UDP OSI_BIT(21) #define RDES3_L34T_IPV6_TCP (OSI_BIT(23) | OSI_BIT(20)) #define RDES3_L34T_IPV6_UDP (OSI_BIT(23) | OSI_BIT(21)) +#define RDES3_ELLT_CVLAN 0x90000U +#define RDES3_ERR_MGBE_CRC (OSI_BIT(16) | OSI_BIT(17)) +#endif /* !OSI_STRIPPED_LIB */ #define RDES1_IPCE OSI_BIT(7) #define RDES1_IPCB OSI_BIT(6) @@ -73,7 +77,6 @@ #define RDES3_ELLT 0xF0000U #define RDES3_ELLT_IPHE 0x50000U #define RDES3_ELLT_CSUM_ERR 0x60000U -#define RDES3_ELLT_CVLAN 0x90000U /** @} */ /** Error Summary bits for Received packet */ @@ -83,7 +86,6 @@ /** MGBE error summary bits for Received packet */ #define RDES3_ES_MGBE 0x8000U -#define RDES3_ERR_MGBE_CRC (OSI_BIT(16) | OSI_BIT(17)) /** * @addtogroup EQOS_TxDesc Transmit Descriptors bit fields * diff --git a/osi/dma/mgbe_desc.c b/osi/dma/mgbe_desc.c index 7a3fb29..f3169f8 100644 --- a/osi/dma/mgbe_desc.c +++ b/osi/dma/mgbe_desc.c @@ -146,10 +146,10 @@ static void mgbe_get_rx_hash(struct osi_rx_desc *rx_desc, * @param[in] rx_desc: Rx descriptor * @param[in] rx_pkt_cx: Per-Rx packet context structure */ -static void mgbe_get_rx_csum(struct osi_rx_desc *rx_desc, +static void mgbe_get_rx_csum(const struct osi_rx_desc *const rx_desc, struct osi_rx_pkt_cx *rx_pkt_cx) { - unsigned int ellt = rx_desc->rdes3 & RDES3_ELLT; + nveu32_t ellt = rx_desc->rdes3 & RDES3_ELLT; /* Always include either checksum none/unnecessary * depending on status fields in desc. @@ -175,15 +175,17 @@ static void mgbe_get_rx_csum(struct osi_rx_desc *rx_desc, * @retval -1 if TimeStamp is not available * @retval 0 if TimeStamp is available. */ -static int mgbe_get_rx_hwstamp(struct osi_dma_priv_data *osi_dma, - struct osi_rx_desc *rx_desc, - struct osi_rx_desc *context_desc, - struct osi_rx_pkt_cx *rx_pkt_cx) +static nve32_t mgbe_get_rx_hwstamp(const struct osi_dma_priv_data *const osi_dma, + const struct osi_rx_desc *const rx_desc, + const struct osi_rx_desc *const context_desc, + struct osi_rx_pkt_cx *rx_pkt_cx) { - int retry; + nve32_t ret = 0; + nve32_t retry; if ((rx_desc->rdes3 & RDES3_CDA) != RDES3_CDA) { - return -1; + ret = -1; + goto fail; } for (retry = 0; retry < 10; retry++) { @@ -194,7 +196,8 @@ static int mgbe_get_rx_hwstamp(struct osi_dma_priv_data *osi_dma, if ((context_desc->rdes0 == OSI_INVALID_VALUE) && (context_desc->rdes1 == OSI_INVALID_VALUE)) { /* Invalid time stamp */ - return -1; + ret = -1; + goto fail; } /* Update rx pkt context flags to indicate PTP */ rx_pkt_cx->flags |= OSI_PKT_CX_PTP; @@ -208,26 +211,27 @@ static int mgbe_get_rx_hwstamp(struct osi_dma_priv_data *osi_dma, if (retry == 10) { /* Timed out waiting for Rx timestamp */ - return -1; + ret = -1; + goto fail; } rx_pkt_cx->ns = context_desc->rdes0 + (OSI_NSEC_PER_SEC * context_desc->rdes1); if (rx_pkt_cx->ns < context_desc->rdes0) { - /* Will not hit this case */ - return -1; + ret = -1; } - return 0; +fail: + return ret; } -void mgbe_init_desc_ops(struct desc_ops *d_ops) +void mgbe_init_desc_ops(struct desc_ops *p_dops) { #ifndef OSI_STRIPPED_LIB - d_ops->update_rx_err_stats = mgbe_update_rx_err_stats; - d_ops->get_rx_vlan = mgbe_get_rx_vlan; - d_ops->get_rx_hash = mgbe_get_rx_hash; + p_dops->update_rx_err_stats = mgbe_update_rx_err_stats; + p_dops->get_rx_vlan = mgbe_get_rx_vlan; + p_dops->get_rx_hash = mgbe_get_rx_hash; #endif /* !OSI_STRIPPED_LIB */ - d_ops->get_rx_csum = mgbe_get_rx_csum; - d_ops->get_rx_hwstamp = mgbe_get_rx_hwstamp; + p_dops->get_rx_csum = mgbe_get_rx_csum; + p_dops->get_rx_hwstamp = mgbe_get_rx_hwstamp; } diff --git a/osi/dma/mgbe_desc.h b/osi/dma/mgbe_desc.h index 8b0d5d0..d3bc340 100644 --- a/osi/dma/mgbe_desc.h +++ b/osi/dma/mgbe_desc.h @@ -23,6 +23,7 @@ #ifndef MGBE_DESC_H_ #define MGBE_DESC_H_ +#ifndef OSI_STRIPPED_LIB /** * @addtogroup MGBE MAC FRP Stats. * @@ -32,6 +33,7 @@ #define MGBE_RDES2_FRPSM OSI_BIT(10) #define MGBE_RDES3_FRPSL OSI_BIT(14) /** @} */ +#endif /* !OSI_STRIPPED_LIB */ #endif /* MGBE_DESC_H_ */ diff --git a/osi/dma/mgbe_dma.h b/osi/dma/mgbe_dma.h index d4a81f6..89032ca 100644 --- a/osi/dma/mgbe_dma.h +++ b/osi/dma/mgbe_dma.h @@ -32,17 +32,6 @@ #define MGBE_AXI_CLK_FREQ 480000000U /** @} */ -/** - * @@addtogroup Timestamp Capture Register - * @brief MGBE MAC Timestamp Register offset - * @{ - */ -#define MGBE_MAC_TSS 0X0D20 -#define MGBE_MAC_TS_NSEC 0x0D30 -#define MGBE_MAC_TS_SEC 0x0D34 -#define MGBE_MAC_TS_PID 0x0D38 -/** @} */ - /** * @addtogroup MGBE_DMA DMA Channel Register offsets * @@ -51,7 +40,9 @@ */ #define MGBE_DMA_CHX_TX_CTRL(x) ((0x0080U * (x)) + 0x3104U) #define MGBE_DMA_CHX_RX_CTRL(x) ((0x0080U * (x)) + 0x3108U) +#ifndef OSI_STRIPPED_LIB #define MGBE_DMA_CHX_SLOT_CTRL(x) ((0x0080U * (x)) + 0x310CU) +#endif /* !OSI_STRIPPED_LIB */ #define MGBE_DMA_CHX_INTR_ENA(x) ((0x0080U * (x)) + 0x3138U) #define MGBE_DMA_CHX_CTRL(x) ((0x0080U * (x)) + 0x3100U) #define MGBE_DMA_CHX_RX_WDT(x) ((0x0080U * (x)) + 0x313CU) @@ -60,10 +51,8 @@ #define MGBE_DMA_CHX_TDLH(x) ((0x0080U * (x)) + 0x3110U) #define MGBE_DMA_CHX_TDLA(x) ((0x0080U * (x)) + 0x3114U) #define MGBE_DMA_CHX_TDTLP(x) ((0x0080U * (x)) + 0x3124U) -#define MGBE_DMA_CHX_TDTHP(x) ((0x0080U * (x)) + 0x3120U) #define MGBE_DMA_CHX_RDLH(x) ((0x0080U * (x)) + 0x3118U) #define MGBE_DMA_CHX_RDLA(x) ((0x0080U * (x)) + 0x311CU) -#define MGBE_DMA_CHX_RDTHP(x) ((0x0080U * (x)) + 0x3128U) #define MGBE_DMA_CHX_RDTLP(x) ((0x0080U * (x)) + 0x312CU) /** @} */ @@ -75,17 +64,10 @@ * @brief Values defined for the MGBE registers * @{ */ -#define MGBE_DMA_CHX_TX_CTRL_OSP OSI_BIT(4) -#define MGBE_DMA_CHX_TX_CTRL_TSE OSI_BIT(12) #define MGBE_DMA_CHX_RX_WDT_RWT_MASK 0xFFU #define MGBE_DMA_CHX_RX_WDT_RWTU 2048U #define MGBE_DMA_CHX_RX_WDT_RWTU_2048_CYCLE 0x3000U #define MGBE_DMA_CHX_RX_WDT_RWTU_MASK 0x3000U -#define MGBE_DMA_CHX_RBSZ_MASK 0x7FFEU -#define MGBE_DMA_CHX_RBSZ_SHIFT 1U -#define MGBE_DMA_CHX_CTRL_PBLX8 OSI_BIT(16) -#define MGBE_DMA_CHX_INTR_TIE OSI_BIT(0) -#define MGBE_DMA_CHX_INTR_RIE OSI_BIT(6) #ifdef OSI_DEBUG #define MGBE_DMA_CHX_INTR_TBUE OSI_BIT(2) #define MGBE_DMA_CHX_INTR_RBUE OSI_BIT(7) @@ -93,15 +75,14 @@ #define MGBE_DMA_CHX_INTR_AIE OSI_BIT(14) #define MGBE_DMA_CHX_INTR_NIE OSI_BIT(15) #endif +#ifndef OSI_STRIPPED_LIB #define MGBE_DMA_CHX_SLOT_ESC OSI_BIT(0) +#endif /* !OSI_STRIPPED_LIB */ #define MGBE_DMA_CHX_TX_CNTRL2_ORRQ_RECOMMENDED 64U #define MGBE_DMA_CHX_TX_CNTRL2_ORRQ_SHIFT 24U #define MGBE_DMA_CHX_RX_CNTRL2_OWRQ_SCHAN 32U #define MGBE_DMA_CHX_RX_CNTRL2_OWRQ_MCHAN 64U #define MGBE_DMA_CHX_RX_CNTRL2_OWRQ_SHIFT 24U -#define MGBE_DMA_CHX_RX_CNTRL2_OWRQ_SCHAN_PRESI 8U -#define MGBE_DMA_CHX_RX_CNTRL2_ORRQ_SCHAN_PRESI 16U -#define MGBE_DMA_RING_LENGTH_MASK 0xFFFFU #define MGBE_DMA_CHX_CTRL_PBL_SHIFT 16U /** @} */ @@ -111,35 +92,14 @@ * @brief Values defined for PBL settings * @{ */ -/* Tx and Rx Qsize is 64KB */ -#define MGBE_TXQ_RXQ_SIZE_FPGA 65536U /* Tx Queue size is 128KB */ #define MGBE_TXQ_SIZE 131072U /* Rx Queue size is 192KB */ #define MGBE_RXQ_SIZE 196608U /* MAX PBL value */ #define MGBE_DMA_CHX_MAX_PBL 256U +#define MGBE_DMA_CHX_MAX_PBL_VAL 0x200000U /* AXI Data width */ #define MGBE_AXI_DATAWIDTH 128U /** @} */ - -/** - * @addtogroup MGBE MAC timestamp registers bit field. - * - * @brief Values defined for the MGBE timestamp registers - * @{ - */ -#define MGBE_MAC_TSS_TXTSC OSI_BIT(15) -#define MGBE_MAC_TS_PID_MASK 0x3FFU -#define MGBE_MAC_TS_NSEC_MASK 0x7FFFFFFFU -/** @} */ - -/** - * @brief mgbe_get_dma_chan_ops - MGBE get DMA channel operations - * - * Algorithm: Returns pointer DMA channel operations structure. - * - * @returns Pointer to DMA channel operations structure - */ -struct osi_dma_chan_ops *mgbe_get_dma_chan_ops(void); #endif diff --git a/osi/dma/osi_dma.c b/osi/dma/osi_dma.c index b5e421e..5a6ba85 100644 --- a/osi/dma/osi_dma.c +++ b/osi/dma/osi_dma.c @@ -32,12 +32,10 @@ /** * @brief g_dma - DMA local data array. */ -static struct dma_local g_dma[MAX_DMA_INSTANCES]; /** * @brief g_ops - local DMA HW operations array. */ -static struct dma_chan_ops g_ops[MAX_MAC_IP_TYPES]; typedef nve32_t (*dma_intr_fn)(struct osi_dma_priv_data const *osi_dma, nveu32_t intr_ctrl, nveu32_t intr_status, @@ -64,8 +62,9 @@ static inline nve32_t intr_en_dis_retry(nveu8_t *base, nveu32_t intr_ctrl, nveu32_t val, nveu32_t en_dis) { typedef nveu32_t (*set_clear)(nveu32_t val, nveu32_t pos); - set_clear set_clr[2] = { clear_pos_val, set_pos_val }; + const set_clear set_clr[2] = { clear_pos_val, set_pos_val }; nveu32_t cntrl1, cntrl2, i; + nve32_t ret = -1; for (i = 0U; i < 10U; i++) { cntrl1 = osi_readl(base + intr_ctrl); @@ -74,18 +73,14 @@ static inline nve32_t intr_en_dis_retry(nveu8_t *base, nveu32_t intr_ctrl, cntrl2 = osi_readl(base + intr_ctrl); if (cntrl1 == cntrl2) { + ret = 0; break; } else { continue; } } - /* failure case retry failed */ - if (i == 10U) { - return -1; - } - - return 0; + return ret; } static inline nve32_t enable_intr(struct osi_dma_priv_data const *osi_dma, @@ -121,6 +116,8 @@ static inline nve32_t disable_intr(struct osi_dma_priv_data const *osi_dma, struct osi_dma_priv_data *osi_get_dma(void) { + static struct dma_local g_dma[MAX_DMA_INSTANCES]; + struct osi_dma_priv_data *osi_dma = OSI_NULL; nveu32_t i; for (i = 0U; i < MAX_DMA_INSTANCES; i++) { @@ -132,12 +129,14 @@ struct osi_dma_priv_data *osi_get_dma(void) } if (i == MAX_DMA_INSTANCES) { - return OSI_NULL; + goto fail; } g_dma[i].magic_num = (nveu64_t)&g_dma[i].osi_dma; - return &g_dma[i].osi_dma; + osi_dma = &g_dma[i].osi_dma; +fail: + return osi_dma; } /** @@ -155,15 +154,17 @@ struct osi_dma_priv_data *osi_get_dma(void) * @retval 0 on Success * @retval -1 on Failure */ -static inline nve32_t validate_args(struct osi_dma_priv_data *osi_dma, - struct dma_local *l_dma) +static inline nve32_t dma_validate_args(const struct osi_dma_priv_data *const osi_dma, + const struct dma_local *const l_dma) { + nve32_t ret = 0; + if ((osi_dma == OSI_NULL) || (osi_dma->base == OSI_NULL) || (l_dma->init_done == OSI_DISABLE)) { - return -1; + ret = -1; } - return 0; + return ret; } /** @@ -184,15 +185,16 @@ static inline nve32_t validate_args(struct osi_dma_priv_data *osi_dma, static inline nve32_t validate_dma_chan_num(struct osi_dma_priv_data *osi_dma, nveu32_t chan) { - struct dma_local *l_dma = (struct dma_local *)osi_dma; + const struct dma_local *const l_dma = (struct dma_local *)(void *)osi_dma; + nve32_t ret = 0; - if (chan >= l_dma->max_chans) { + if (chan >= l_dma->num_max_chans) { OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "Invalid DMA channel number\n", chan); - return -1; + ret = -1; } - return 0; + return ret; } /** @@ -211,19 +213,20 @@ static inline nve32_t validate_dma_chan_num(struct osi_dma_priv_data *osi_dma, */ static inline nve32_t validate_dma_chans(struct osi_dma_priv_data *osi_dma) { - struct dma_local *l_dma = (struct dma_local *)osi_dma; - nveu32_t i = 0; + const struct dma_local *const l_dma = (struct dma_local *)(void *)osi_dma; + nveu32_t i = 0U; + nve32_t ret = 0; for (i = 0; i < osi_dma->num_dma_chans; i++) { - if (osi_dma->dma_chans[i] > l_dma->max_chans) { + if (osi_dma->dma_chans[i] > l_dma->num_max_chans) { OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "Invalid DMA channel number:\n", osi_dma->dma_chans[i]); - return -1; + ret = -1; } } - return 0; + return ret; } #ifndef OSI_STRIPPED_LIB @@ -274,22 +277,26 @@ static nve32_t validate_func_ptrs(struct osi_dma_priv_data *osi_dma, nve32_t osi_init_dma_ops(struct osi_dma_priv_data *osi_dma) { - struct dma_local *l_dma = (struct dma_local *)osi_dma; - nveu32_t default_rz[] = { EQOS_DEFAULT_RING_SZ, MGBE_DEFAULT_RING_SZ }; - nveu32_t max_rz[] = { EQOS_DEFAULT_RING_SZ, MGBE_MAX_RING_SZ }; + const nveu32_t default_rz[] = { EQOS_DEFAULT_RING_SZ, MGBE_DEFAULT_RING_SZ }; + const nveu32_t max_rz[] = { EQOS_DEFAULT_RING_SZ, MGBE_MAX_RING_SZ }; + struct dma_local *l_dma = (struct dma_local *)(void *)osi_dma; typedef void (*init_ops_arr)(struct dma_chan_ops *temp); + static struct dma_chan_ops dma_gops[MAX_MAC_IP_TYPES]; + nve32_t ret = 0; - init_ops_arr i_ops[MAX_MAC_IP_TYPES] = { + const init_ops_arr i_ops[MAX_MAC_IP_TYPES] = { eqos_init_dma_chan_ops, mgbe_init_dma_chan_ops }; if (osi_dma == OSI_NULL) { - return -1; + ret = -1; + goto fail; } if ((l_dma->magic_num != (nveu64_t)osi_dma) || (l_dma->init_done == OSI_ENABLE)) { - return -1; + ret = -1; + goto fail; } if (osi_dma->is_ethernet_server != OSI_ENABLE) { @@ -300,56 +307,63 @@ nve32_t osi_init_dma_ops(struct osi_dma_priv_data *osi_dma) (osi_dma->osd_ops.printf == OSI_NULL) || #endif /* OSI_DEBUG */ (osi_dma->osd_ops.udelay == OSI_NULL)) { - return -1; + ret = -1; + goto fail; } } if (osi_dma->mac > OSI_MAC_HW_MGBE) { OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "DMA: Invalid MAC HW type\n", 0ULL); - return -1; + ret = -1; + goto fail; } if ((osi_dma->tx_ring_sz == 0U) || - !(is_power_of_two(osi_dma->tx_ring_sz)) || + (is_power_of_two(osi_dma->tx_ring_sz) == 0U) || (osi_dma->tx_ring_sz < HW_MIN_RING_SZ) || (osi_dma->tx_ring_sz > default_rz[osi_dma->mac])) { - osi_dma->tx_ring_sz = default_rz[osi_dma->mac]; OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, - "DMA: Using default Tx ring size: \n", + "DMA: Invalid Tx ring size:\n", osi_dma->tx_ring_sz); + ret = -1; + goto fail; } if ((osi_dma->rx_ring_sz == 0U) || - !(is_power_of_two(osi_dma->rx_ring_sz)) || + (is_power_of_two(osi_dma->rx_ring_sz) == 0U) || (osi_dma->rx_ring_sz < HW_MIN_RING_SZ) || (osi_dma->rx_ring_sz > max_rz[osi_dma->mac])) { - osi_dma->rx_ring_sz = default_rz[osi_dma->mac]; OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, - "DMA: Using default rx ring size: \n", + "DMA: Invalid Rx ring size:\n", osi_dma->tx_ring_sz); + ret = -1; + goto fail; } - i_ops[osi_dma->mac](&g_ops[osi_dma->mac]); + i_ops[osi_dma->mac](&dma_gops[osi_dma->mac]); if (init_desc_ops(osi_dma) < 0) { OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "DMA desc ops init failed\n", 0ULL); - return -1; + ret = -1; + goto fail; } #ifndef OSI_STRIPPED_LIB - if (validate_func_ptrs(osi_dma, &g_ops[osi_dma->mac]) < 0) { + if (validate_func_ptrs(osi_dma, &dma_gops[osi_dma->mac]) < 0) { OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "DMA ops validation failed\n", 0ULL); - return -1; + ret = -1; + goto fail; } #endif - l_dma->ops_p = &g_ops[osi_dma->mac]; + l_dma->ops_p = &dma_gops[osi_dma->mac]; l_dma->init_done = OSI_ENABLE; - return 0; +fail: + return ret; } static inline void start_dma(const struct osi_dma_priv_data *const osi_dma, nveu32_t chan) @@ -454,7 +468,7 @@ static void init_dma_channel(const struct osi_dma_priv_data *const osi_dma, * calculation by using above formula */ if (tx_pbl[osi_dma->mac] >= MGBE_DMA_CHX_MAX_PBL) { - val |= ((MGBE_DMA_CHX_MAX_PBL / 8U) << MGBE_DMA_CHX_CTRL_PBL_SHIFT); + val |= MGBE_DMA_CHX_MAX_PBL_VAL; } else { val |= ((tx_pbl[osi_dma->mac] / 8U) << MGBE_DMA_CHX_CTRL_PBL_SHIFT); } @@ -468,7 +482,7 @@ static void init_dma_channel(const struct osi_dma_priv_data *const osi_dma, val |= rx_pbl[osi_dma->mac]; } else { if (rx_pbl[osi_dma->mac] >= MGBE_DMA_CHX_MAX_PBL) { - val |= ((MGBE_DMA_CHX_MAX_PBL / 8U) << MGBE_DMA_CHX_CTRL_PBL_SHIFT); + val |= MGBE_DMA_CHX_MAX_PBL_VAL; } else { val |= ((rx_pbl[osi_dma->mac] / 8U) << MGBE_DMA_CHX_CTRL_PBL_SHIFT); } @@ -504,39 +518,44 @@ static void init_dma_channel(const struct osi_dma_priv_data *const osi_dma, nve32_t osi_hw_dma_init(struct osi_dma_priv_data *osi_dma) { - struct dma_local *l_dma = (struct dma_local *)osi_dma; + struct dma_local *l_dma = (struct dma_local *)(void *)osi_dma; nveu32_t i, chan; - nve32_t ret; + nve32_t ret = 0; - if (validate_args(osi_dma, l_dma) < 0) { - return -1; + if (dma_validate_args(osi_dma, l_dma) < 0) { + ret = -1; + goto fail; } l_dma->mac_ver = osi_readl((nveu8_t *)osi_dma->base + MAC_VERSION) & MAC_VERSION_SNVER_MASK; if (validate_mac_ver_update_chans(l_dma->mac_ver, - &l_dma->max_chans, + &l_dma->num_max_chans, &l_dma->l_mac_ver) == 0) { OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "Invalid MAC version\n", (nveu64_t)l_dma->mac_ver); - return -1; + ret = -1; + goto fail; } - if (osi_dma->num_dma_chans > l_dma->max_chans) { + if ((osi_dma->num_dma_chans == 0U) || + (osi_dma->num_dma_chans > l_dma->num_max_chans)) { OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "Invalid number of DMA channels\n", 0ULL); - return -1; + ret = -1; + goto fail; } if (validate_dma_chans(osi_dma) < 0) { OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "DMA channels validation failed\n", 0ULL); - return -1; + ret = -1; + goto fail; } ret = dma_desc_init(osi_dma); if (ret != 0) { - return ret; + goto fail; } /* Enable channel interrupts at wrapper level and start DMA */ @@ -553,7 +572,7 @@ nve32_t osi_hw_dma_init(struct osi_dma_priv_data *osi_dma) EQOS_DMA_CHX_STATUS(chan)), OSI_BIT(OSI_DMA_CH_TX_INTR)); if (ret < 0) { - return ret; + goto fail; } ret = intr_fn[OSI_DMA_INTR_ENABLE](osi_dma, @@ -564,7 +583,7 @@ nve32_t osi_hw_dma_init(struct osi_dma_priv_data *osi_dma) EQOS_DMA_CHX_STATUS(chan)), OSI_BIT(OSI_DMA_CH_RX_INTR)); if (ret < 0) { - return ret; + goto fail; } start_dma(osi_dma, chan); @@ -578,7 +597,8 @@ nve32_t osi_hw_dma_init(struct osi_dma_priv_data *osi_dma) osi_dma->ptp_flag = (OSI_PTP_SYNC_SLAVE | OSI_PTP_SYNC_TWOSTEP); } - return 0; +fail: + return ret; } static inline void stop_dma(const struct osi_dma_priv_data *const osi_dma, @@ -608,41 +628,49 @@ static inline void stop_dma(const struct osi_dma_priv_data *const osi_dma, nve32_t osi_hw_dma_deinit(struct osi_dma_priv_data *osi_dma) { - struct dma_local *l_dma = (struct dma_local *)osi_dma; + struct dma_local *l_dma = (struct dma_local *)(void *)osi_dma; + nve32_t ret = 0; nveu32_t i; - if (validate_args(osi_dma, l_dma) < 0) { - return -1; + if (dma_validate_args(osi_dma, l_dma) < 0) { + ret = -1; + goto fail; } - if (osi_dma->num_dma_chans > l_dma->max_chans) { + if (osi_dma->num_dma_chans > l_dma->num_max_chans) { OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "Invalid number of DMA channels\n", 0ULL); - return -1; + ret = -1; + goto fail; } if (validate_dma_chans(osi_dma) < 0) { OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "DMA channels validation failed\n", 0ULL); - return -1; + ret = -1; + goto fail; } for (i = 0; i < osi_dma->num_dma_chans; i++) { stop_dma(osi_dma, osi_dma->dma_chans[i]); } - return 0; +fail: + return ret; } nveu32_t osi_get_global_dma_status(struct osi_dma_priv_data *osi_dma) { - struct dma_local *l_dma = (struct dma_local *)osi_dma; + struct dma_local *l_dma = (struct dma_local *)(void *)osi_dma; + nveu32_t ret = 0U; - if (validate_args(osi_dma, l_dma) < 0) { - return 0; + if (dma_validate_args(osi_dma, l_dma) < 0) { + goto fail; } - return osi_readl((nveu8_t *)osi_dma->base + HW_GLOBAL_DMA_STATUS); + ret = osi_readl((nveu8_t *)osi_dma->base + HW_GLOBAL_DMA_STATUS); +fail: + return ret; } nve32_t osi_handle_dma_intr(struct osi_dma_priv_data *osi_dma, @@ -650,44 +678,54 @@ nve32_t osi_handle_dma_intr(struct osi_dma_priv_data *osi_dma, nveu32_t tx_rx, nveu32_t en_dis) { - struct dma_local *l_dma = (struct dma_local *)osi_dma; + struct dma_local *l_dma = (struct dma_local *)(void *)osi_dma; + nve32_t ret = 0; - if (validate_args(osi_dma, l_dma) < 0) { - return -1; + if (dma_validate_args(osi_dma, l_dma) < 0) { + ret = -1; + goto fail; } if (validate_dma_chan_num(osi_dma, chan) < 0) { - return -1; + ret = -1; + goto fail; } if ((tx_rx > OSI_DMA_CH_RX_INTR) || (en_dis > OSI_DMA_INTR_ENABLE)) { - return -1; + ret = -1; + goto fail; } - return intr_fn[en_dis](osi_dma, VIRT_INTR_CHX_CNTRL(chan), + ret = intr_fn[en_dis](osi_dma, VIRT_INTR_CHX_CNTRL(chan), VIRT_INTR_CHX_STATUS(chan), ((osi_dma->mac == OSI_MAC_HW_MGBE) ? MGBE_DMA_CHX_STATUS(chan) : EQOS_DMA_CHX_STATUS(chan)), OSI_BIT(tx_rx)); + +fail: + return ret; } -nveu32_t osi_get_refill_rx_desc_cnt(struct osi_dma_priv_data *osi_dma, - unsigned int chan) +nveu32_t osi_get_refill_rx_desc_cnt(const struct osi_dma_priv_data *const osi_dma, + nveu32_t chan) { - struct osi_rx_ring *rx_ring = osi_dma->rx_ring[chan]; + const struct osi_rx_ring *const rx_ring = osi_dma->rx_ring[chan]; + nveu32_t ret = 0U; if ((rx_ring == OSI_NULL) || (rx_ring->cur_rx_idx >= osi_dma->rx_ring_sz) || (rx_ring->refill_idx >= osi_dma->rx_ring_sz)) { - return 0; + goto fail; } - return (rx_ring->cur_rx_idx - rx_ring->refill_idx) & + ret = (rx_ring->cur_rx_idx - rx_ring->refill_idx) & (osi_dma->rx_ring_sz - 1U); +fail: + return ret; } /** - * @brief rx_dma_desc_validate_args - DMA Rx descriptor init args Validate + * @brief rx_dma_desc_dma_validate_args - DMA Rx descriptor init args Validate * * Algorithm: Validates DMA Rx descriptor init argments. * @@ -704,30 +742,36 @@ nveu32_t osi_get_refill_rx_desc_cnt(struct osi_dma_priv_data *osi_dma, * @retval 0 on success * @retval -1 on failure. */ -static inline nve32_t rx_dma_desc_validate_args( +static inline nve32_t rx_dma_desc_dma_validate_args( struct osi_dma_priv_data *osi_dma, struct dma_local *l_dma, - struct osi_rx_ring *rx_ring, + const struct osi_rx_ring *const rx_ring, nveu32_t chan) { - if (validate_args(osi_dma, l_dma) < 0) { - return -1; + nve32_t ret = 0; + + if (dma_validate_args(osi_dma, l_dma) < 0) { + ret = -1; + goto fail; } if (!((rx_ring != OSI_NULL) && (rx_ring->rx_swcx != OSI_NULL) && (rx_ring->rx_desc != OSI_NULL))) { OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "dma: Invalid pointers\n", 0ULL); - return -1; + ret = -1; + goto fail; } if (validate_dma_chan_num(osi_dma, chan) < 0) { OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "dma: Invalid channel\n", 0ULL); - return -1; + ret = -1; + goto fail; } - return 0; +fail: + return ret; } /** @@ -748,8 +792,8 @@ static inline nve32_t rx_dma_desc_validate_args( * - De-initialization: No * */ -static inline void rx_dma_handle_ioc(struct osi_dma_priv_data *osi_dma, - struct osi_rx_ring *rx_ring, +static inline void rx_dma_handle_ioc(const struct osi_dma_priv_data *const osi_dma, + const struct osi_rx_ring *const rx_ring, struct osi_rx_desc *rx_desc) { /* reset IOC bit if RWIT is enabled */ @@ -770,14 +814,16 @@ static inline void rx_dma_handle_ioc(struct osi_dma_priv_data *osi_dma, nve32_t osi_rx_dma_desc_init(struct osi_dma_priv_data *osi_dma, struct osi_rx_ring *rx_ring, nveu32_t chan) { - struct dma_local *l_dma = (struct dma_local *)osi_dma; - nveu64_t tailptr = 0; + struct dma_local *l_dma = (struct dma_local *)(void *)osi_dma; struct osi_rx_swcx *rx_swcx = OSI_NULL; struct osi_rx_desc *rx_desc = OSI_NULL; + nveu64_t tailptr = 0; + nve32_t ret = 0; - if (rx_dma_desc_validate_args(osi_dma, l_dma, rx_ring, chan) < 0) { + if (rx_dma_desc_dma_validate_args(osi_dma, l_dma, rx_ring, chan) < 0) { /* Return on arguments validation failureĀ */ - return -1; + ret = -1; + goto fail; } /* Refill buffers */ @@ -823,27 +869,32 @@ nve32_t osi_rx_dma_desc_init(struct osi_dma_priv_data *osi_dma, /* Will not hit this case, used for CERT-C compliance */ OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "dma: Invalid tailptr\n", 0ULL); - return -1; + ret = -1; + goto fail; } update_rx_tail_ptr(osi_dma, chan, tailptr); - return 0; +fail: + return ret; } nve32_t osi_set_rx_buf_len(struct osi_dma_priv_data *osi_dma) { - struct dma_local *l_dma = (struct dma_local *)osi_dma; + struct dma_local *l_dma = (struct dma_local *)(void *)osi_dma; nveu32_t rx_buf_len; + nve32_t ret = 0; - if (validate_args(osi_dma, l_dma) < 0) { - return -1; + if (dma_validate_args(osi_dma, l_dma) < 0) { + ret = -1; + goto fail; } if (osi_dma->mtu > OSI_MAX_MTU_SIZE) { OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "Invalid MTU setting\n", 0ULL); - return -1; + ret = -1; + goto fail; } /* Add Ethernet header + FCS */ @@ -852,54 +903,64 @@ nve32_t osi_set_rx_buf_len(struct osi_dma_priv_data *osi_dma) /* Buffer alignment */ osi_dma->rx_buf_len = ((rx_buf_len + (AXI_BUS_WIDTH - 1U)) & ~(AXI_BUS_WIDTH - 1U)); - - return 0; +fail: + return ret; } nve32_t osi_dma_get_systime_from_mac(struct osi_dma_priv_data *const osi_dma, nveu32_t *sec, nveu32_t *nsec) { - struct dma_local *l_dma = (struct dma_local *)osi_dma; + struct dma_local *l_dma = (struct dma_local *)(void *)osi_dma; + nve32_t ret = 0; - if (validate_args(osi_dma, l_dma) < 0) { - return -1; + if (dma_validate_args(osi_dma, l_dma) < 0) { + ret = -1; } common_get_systime_from_mac(osi_dma->base, osi_dma->mac, sec, nsec); - return 0; + return ret; } nveu32_t osi_is_mac_enabled(struct osi_dma_priv_data *const osi_dma) { - struct dma_local *l_dma = (struct dma_local *)osi_dma; + struct dma_local *l_dma = (struct dma_local *)(void *)osi_dma; + nveu32_t ret = OSI_DISABLE; - if (validate_args(osi_dma, l_dma) < 0) { - return OSI_DISABLE; + if (dma_validate_args(osi_dma, l_dma) < 0) { + goto fail; } - return common_is_mac_enabled(osi_dma->base, osi_dma->mac); + ret = common_is_mac_enabled(osi_dma->base, osi_dma->mac); +fail: + return ret; } nve32_t osi_hw_transmit(struct osi_dma_priv_data *osi_dma, nveu32_t chan) { - struct dma_local *l_dma = (struct dma_local *)osi_dma; + struct dma_local *l_dma = (struct dma_local *)(void *)osi_dma; + nve32_t ret = 0; - if (osi_unlikely(validate_args(osi_dma, l_dma) < 0)) { - return -1; + if (osi_unlikely(dma_validate_args(osi_dma, l_dma) < 0)) { + ret = -1; + goto fail; } if (osi_unlikely(validate_dma_chan_num(osi_dma, chan) < 0)) { - return -1; + ret = -1; + goto fail; } if (osi_unlikely(osi_dma->tx_ring[chan] == OSI_NULL)) { OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "DMA: Invalid Tx ring\n", 0ULL); - return -1; + ret = -1; + goto fail; } - return hw_transmit(osi_dma, osi_dma->tx_ring[chan], chan); + ret = hw_transmit(osi_dma, osi_dma->tx_ring[chan], chan); +fail: + return ret; } #ifdef OSI_DEBUG @@ -908,7 +969,7 @@ nve32_t osi_dma_ioctl(struct osi_dma_priv_data *osi_dma) struct dma_local *l_dma = (struct dma_local *)osi_dma; struct osi_dma_ioctl_data *data; - if (osi_unlikely(validate_args(osi_dma, l_dma) < 0)) { + if (osi_unlikely(dma_validate_args(osi_dma, l_dma) < 0)) { return -1; } @@ -962,7 +1023,7 @@ static inline nve32_t osi_slot_args_validate(struct osi_dma_priv_data *osi_dma, struct dma_local *l_dma, nveu32_t set) { - if (validate_args(osi_dma, l_dma) < 0) { + if (dma_validate_args(osi_dma, l_dma) < 0) { return -1; } @@ -993,7 +1054,7 @@ nve32_t osi_config_slot_function(struct osi_dma_priv_data *osi_dma, chan = osi_dma->dma_chans[i]; if ((chan == 0x0U) || - (chan >= l_dma->max_chans)) { + (chan >= l_dma->num_max_chans)) { /* Ignore 0 and invalid channels */ continue; } diff --git a/osi/dma/osi_dma_txrx.c b/osi/dma/osi_dma_txrx.c index eeea0ce..29fd848 100644 --- a/osi/dma/osi_dma_txrx.c +++ b/osi/dma/osi_dma_txrx.c @@ -58,16 +58,18 @@ static struct desc_ops d_ops[MAX_MAC_IP_TYPES]; static inline nve32_t validate_rx_completions_arg( struct osi_dma_priv_data *osi_dma, nveu32_t chan, - nveu32_t *more_data_avail, + const nveu32_t *const more_data_avail, struct osi_rx_ring **rx_ring, struct osi_rx_pkt_cx **rx_pkt_cx) { - struct dma_local *l_dma = (struct dma_local *)osi_dma; + const struct dma_local *const l_dma = (struct dma_local *)(void *)osi_dma; + nve32_t ret = 0; if (osi_unlikely((osi_dma == OSI_NULL) || (more_data_avail == OSI_NULL) || - (chan >= l_dma->max_chans))) { - return -1; + (chan >= l_dma->num_max_chans))) { + ret = -1; + goto fail; } *rx_ring = osi_dma->rx_ring[chan]; @@ -75,17 +77,20 @@ static inline nve32_t validate_rx_completions_arg( OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "validate_input_rx_completions: Invalid pointers\n", 0ULL); - return -1; + ret = -1; + goto fail; } *rx_pkt_cx = &(*rx_ring)->rx_pkt_cx; if (osi_unlikely(*rx_pkt_cx == OSI_NULL)) { OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "validate_input_rx_completions: Invalid pointers\n", 0ULL); - return -1; + ret = -1; + goto fail; } - return 0; +fail: + return ret; } nve32_t osi_process_rx_completions(struct osi_dma_priv_data *osi_dma, @@ -108,13 +113,15 @@ nve32_t osi_process_rx_completions(struct osi_dma_priv_data *osi_dma, ret = validate_rx_completions_arg(osi_dma, chan, more_data_avail, &rx_ring, &rx_pkt_cx); if (osi_unlikely(ret < 0)) { - return ret; + received = -1; + goto fail; } if (rx_ring->cur_rx_idx >= osi_dma->rx_ring_sz) { OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "dma_txrx: Invalid cur_rx_idx\n", 0ULL); - return -1; + received = -1; + goto fail; } /* Reset flag to indicate if more Rx frames available to OSD layer */ @@ -248,7 +255,8 @@ nve32_t osi_process_rx_completions(struct osi_dma_priv_data *osi_dma, OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "dma_txrx: Invalid function pointer\n", 0ULL); - return -1; + received = -1; + goto fail; } } #ifndef OSI_STRIPPED_LIB @@ -280,6 +288,8 @@ nve32_t osi_process_rx_completions(struct osi_dma_priv_data *osi_dma, } } #endif /* !OSI_STRIPPED_LIB */ + +fail: return received; } @@ -484,11 +494,13 @@ static inline nve32_t validate_tx_completions_arg( nveu32_t chan, struct osi_tx_ring **tx_ring) { - struct dma_local *l_dma = (struct dma_local *)osi_dma; + const struct dma_local *const l_dma = (struct dma_local *)(void *)osi_dma; + nve32_t ret = 0; if (osi_unlikely((osi_dma == OSI_NULL) || - (chan >= l_dma->max_chans))) { - return -1; + (chan >= l_dma->num_max_chans))) { + ret = -1; + goto fail; } *tx_ring = osi_dma->tx_ring[chan]; @@ -497,10 +509,11 @@ static inline nve32_t validate_tx_completions_arg( OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "validate_tx_completions_arg: Invalid pointers\n", 0ULL); - return -1; + ret = -1; + goto fail; } - - return 0; +fail: + return ret; } /** @@ -513,15 +526,15 @@ static inline nve32_t validate_tx_completions_arg( * @retval 1 if condition is true * @retval 0 if condition is false. */ -static inline unsigned int is_ptp_twostep_or_slave_mode(unsigned int ptp_flag) +static inline nveu32_t is_ptp_twostep_or_slave_mode(nveu32_t ptp_flag) { return (((ptp_flag & OSI_PTP_SYNC_SLAVE) == OSI_PTP_SYNC_SLAVE) || ((ptp_flag & OSI_PTP_SYNC_TWOSTEP) == OSI_PTP_SYNC_TWOSTEP)) ? OSI_ENABLE : OSI_DISABLE; } -int osi_process_tx_completions(struct osi_dma_priv_data *osi_dma, - unsigned int chan, int budget) +nve32_t osi_process_tx_completions(struct osi_dma_priv_data *osi_dma, + nveu32_t chan, nve32_t budget) { struct osi_tx_ring *tx_ring = OSI_NULL; struct osi_txdone_pkt_cx *txdone_pkt_cx = OSI_NULL; @@ -535,7 +548,8 @@ int osi_process_tx_completions(struct osi_dma_priv_data *osi_dma, ret = validate_tx_completions_arg(osi_dma, chan, &tx_ring); if (osi_unlikely(ret < 0)) { - return ret; + processed = -1; + goto fail; } txdone_pkt_cx = &tx_ring->txdone_pkt_cx; @@ -642,7 +656,8 @@ int osi_process_tx_completions(struct osi_dma_priv_data *osi_dma, OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "dma_txrx: Invalid function pointer\n", 0ULL); - return -1; + processed = -1; + goto fail; } tx_desc->tdes3 = 0; @@ -664,6 +679,7 @@ int osi_process_tx_completions(struct osi_dma_priv_data *osi_dma, tx_ring->clean_idx = entry; } +fail: return processed; } @@ -692,18 +708,17 @@ int osi_process_tx_completions(struct osi_dma_priv_data *osi_dma, * @retval 1 - cntx desc used. */ -static inline nve32_t need_cntx_desc(struct osi_tx_pkt_cx *tx_pkt_cx, - struct osi_tx_swcx *tx_swcx, - struct osi_tx_desc *tx_desc, - unsigned int ptp_sync_flag, - unsigned int mac) +static inline nve32_t need_cntx_desc(const struct osi_tx_pkt_cx *const tx_pkt_cx, + struct osi_tx_swcx *tx_swcx, + struct osi_tx_desc *tx_desc, + nveu32_t ptp_sync_flag, + nveu32_t mac) { nve32_t ret = 0; if (((tx_pkt_cx->flags & OSI_PKT_CX_VLAN) == OSI_PKT_CX_VLAN) || ((tx_pkt_cx->flags & OSI_PKT_CX_TSO) == OSI_PKT_CX_TSO) || ((tx_pkt_cx->flags & OSI_PKT_CX_PTP) == OSI_PKT_CX_PTP)) { - if ((tx_pkt_cx->flags & OSI_PKT_CX_VLAN) == OSI_PKT_CX_VLAN) { /* Set context type */ tx_desc->tdes3 |= TDES3_CTXT; @@ -730,24 +745,22 @@ static inline nve32_t need_cntx_desc(struct osi_tx_pkt_cx *tx_pkt_cx, /* This part of code must be at the end of function */ if ((tx_pkt_cx->flags & OSI_PKT_CX_PTP) == OSI_PKT_CX_PTP) { - if ((mac == OSI_MAC_HW_EQOS) && - ((ptp_sync_flag & OSI_PTP_SYNC_TWOSTEP) == - OSI_PTP_SYNC_TWOSTEP)){ - /* return the current ret value */ - return ret; - } + if (((mac == OSI_MAC_HW_EQOS) && + ((ptp_sync_flag & OSI_PTP_SYNC_TWOSTEP) == OSI_PTP_SYNC_TWOSTEP))) { + /* Doing nothing */ + } else { + /* Set context type */ + tx_desc->tdes3 |= TDES3_CTXT; + /* in case of One-step sync */ + if ((ptp_sync_flag & OSI_PTP_SYNC_ONESTEP) == + OSI_PTP_SYNC_ONESTEP) { + /* Set TDES3_OSTC */ + tx_desc->tdes3 |= TDES3_OSTC; + tx_desc->tdes3 &= ~TDES3_TCMSSV; + } - /* Set context type */ - tx_desc->tdes3 |= TDES3_CTXT; - /* in case of One-step sync */ - if ((ptp_sync_flag & OSI_PTP_SYNC_ONESTEP) == - OSI_PTP_SYNC_ONESTEP) { - /* Set TDES3_OSTC */ - tx_desc->tdes3 |= TDES3_OSTC; - tx_desc->tdes3 &= ~TDES3_TCMSSV; + ret = 1; } - - ret = 1; } } @@ -764,7 +777,7 @@ static inline nve32_t need_cntx_desc(struct osi_tx_pkt_cx *tx_pkt_cx, * @retval 1 if condition is true * @retval 0 if condition is false. */ -static inline unsigned int is_ptp_onestep_and_master_mode(unsigned int ptp_flag) +static inline nveu32_t is_ptp_onestep_and_master_mode(nveu32_t ptp_flag) { return (((ptp_flag & OSI_PTP_SYNC_MASTER) == OSI_PTP_SYNC_MASTER) && ((ptp_flag & OSI_PTP_SYNC_ONESTEP) == OSI_PTP_SYNC_ONESTEP)) ? @@ -798,13 +811,13 @@ static inline void fill_first_desc(struct osi_tx_ring *tx_ring, struct osi_tx_pkt_cx *tx_pkt_cx, struct osi_tx_desc *tx_desc, struct osi_tx_swcx *tx_swcx, - unsigned int ptp_flag) + nveu32_t ptp_flag) #else static inline void fill_first_desc(OSI_UNUSED struct osi_tx_ring *tx_ring, struct osi_tx_pkt_cx *tx_pkt_cx, struct osi_tx_desc *tx_desc, struct osi_tx_swcx *tx_swcx, - unsigned int ptp_flag) + nveu32_t ptp_flag) #endif /* !OSI_STRIPPED_LIB */ { tx_desc->tdes0 = L32(tx_swcx->buf_phy_addr); @@ -911,54 +924,63 @@ static inline void dmb_oshst(void) * @retval 0 on success * @retval -1 on failure. */ -static inline nve32_t validate_ctx(struct osi_dma_priv_data *osi_dma, - struct osi_tx_pkt_cx *tx_pkt_cx) +static inline nve32_t validate_ctx(const struct osi_dma_priv_data *const osi_dma, + const struct osi_tx_pkt_cx *const tx_pkt_cx) { + nve32_t ret = 0; + if ((tx_pkt_cx->flags & OSI_PKT_CX_TSO) == OSI_PKT_CX_TSO) { if (osi_unlikely((tx_pkt_cx->tcp_udp_hdrlen / OSI_TSO_HDR_LEN_DIVISOR) > TDES3_THL_MASK)) { OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "dma_txrx: Invalid TSO header len\n", (nveul64_t)tx_pkt_cx->tcp_udp_hdrlen); + ret = -1; goto fail; } else if (osi_unlikely(tx_pkt_cx->payload_len > TDES3_TPL_MASK)) { OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "dma_txrx: Invalid TSO payload len\n", (nveul64_t)tx_pkt_cx->payload_len); + ret = -1; goto fail; } else if (osi_unlikely(tx_pkt_cx->mss > TDES2_MSS_MASK)) { OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "dma_txrx: Invalid MSS\n", (nveul64_t)tx_pkt_cx->mss); + ret = -1; goto fail; + } else { + /* empty statement */ } } else if ((tx_pkt_cx->flags & OSI_PKT_CX_LEN) == OSI_PKT_CX_LEN) { if (osi_unlikely(tx_pkt_cx->payload_len > TDES3_PL_MASK)) { OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "dma_txrx: Invalid frame len\n", (nveul64_t)tx_pkt_cx->payload_len); + ret = -1; goto fail; } + } else { + /* empty statement */ } if (osi_unlikely(tx_pkt_cx->vtag_id > TDES3_VT_MASK)) { OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "dma_txrx: Invalid VTAG_ID\n", (nveul64_t)tx_pkt_cx->vtag_id); - goto fail; + ret = -1; } - return 0; fail: - return -1; + return ret; } nve32_t hw_transmit(struct osi_dma_priv_data *osi_dma, struct osi_tx_ring *tx_ring, nveu32_t chan) { - struct dma_local *l_dma = (struct dma_local *)osi_dma; + struct dma_local *l_dma = (struct dma_local *)(void *)osi_dma; struct osi_tx_pkt_cx *tx_pkt_cx = OSI_NULL; struct osi_tx_desc *first_desc = OSI_NULL; struct osi_tx_desc *last_desc = OSI_NULL; @@ -978,13 +1000,15 @@ nve32_t hw_transmit(struct osi_dma_priv_data *osi_dma, nveu32_t desc_cnt = 0U; nveu64_t tailptr; nveu32_t entry = 0U; + nve32_t ret = 0; nveu32_t i; entry = tx_ring->cur_tx_idx; if (entry >= osi_dma->tx_ring_sz) { OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "dma_txrx: Invalid cur_tx_idx\n", 0ULL); - return -1; + ret = -1; + goto fail; } tx_desc = tx_ring->tx_desc + entry; @@ -996,11 +1020,13 @@ nve32_t hw_transmit(struct osi_dma_priv_data *osi_dma, /* Will not hit this case */ OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "dma_txrx: Invalid desc_cnt\n", 0ULL); - return -1; + ret = -1; + goto fail; } if (validate_ctx(osi_dma, tx_pkt_cx) < 0) { - return -1; + ret = -1; + goto fail; } #ifndef OSI_STRIPPED_LIB @@ -1135,7 +1161,8 @@ nve32_t hw_transmit(struct osi_dma_priv_data *osi_dma, /* Will not hit this case */ OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "dma_txrx: Invalid tx_desc_phy_addr\n", 0ULL); - return -1; + ret = -1; + goto fail; } /* @@ -1147,7 +1174,8 @@ nve32_t hw_transmit(struct osi_dma_priv_data *osi_dma, /* Update the Tx tail pointer */ osi_writel(L32(tailptr), (nveu8_t *)osi_dma->base + tail_ptr_reg[osi_dma->mac]); - return 0; +fail: + return ret; } /** @@ -1172,7 +1200,7 @@ nve32_t hw_transmit(struct osi_dma_priv_data *osi_dma, * @retval 0 on success * @retval -1 on failure. */ -static nve32_t rx_dma_desc_initialization(struct osi_dma_priv_data *osi_dma, +static nve32_t rx_dma_desc_initialization(const struct osi_dma_priv_data *const osi_dma, nveu32_t chan) { const nveu32_t start_addr_high_reg[2] = { @@ -1200,7 +1228,8 @@ static nve32_t rx_dma_desc_initialization(struct osi_dma_priv_data *osi_dma, if (osi_unlikely(rx_ring == OSI_NULL)) { OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "dma_txrx: Invalid argument\n", 0ULL); - return -1; + ret = -1; + goto fail; }; rx_ring->cur_rx_idx = 0; @@ -1250,7 +1279,8 @@ static nve32_t rx_dma_desc_initialization(struct osi_dma_priv_data *osi_dma, /* Will not hit this case */ OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "dma_txrx: Invalid phys address\n", 0ULL); - return -1; + ret = -1; + goto fail; } /* Update the HW DMA ring length */ @@ -1266,6 +1296,7 @@ static nve32_t rx_dma_desc_initialization(struct osi_dma_priv_data *osi_dma, osi_writel(L32(rx_ring->rx_desc_phy_addr), (nveu8_t *)osi_dma->base + start_addr_low_reg[osi_dma->mac]); +fail: return ret; } @@ -1293,18 +1324,19 @@ static nve32_t rx_dma_desc_initialization(struct osi_dma_priv_data *osi_dma, static nve32_t rx_dma_desc_init(struct osi_dma_priv_data *osi_dma) { nveu32_t chan = 0; - nveu32_t i; nve32_t ret = 0; + nveu32_t i; for (i = 0; i < osi_dma->num_dma_chans; i++) { chan = osi_dma->dma_chans[i]; ret = rx_dma_desc_initialization(osi_dma, chan); if (ret != 0) { - return ret; + goto fail; } } +fail: return ret; } @@ -1360,12 +1392,13 @@ static inline void set_tx_ring_len_and_start_addr(const struct osi_dma_priv_data * @retval 0 on success * @retval -1 on failure. */ -static nve32_t tx_dma_desc_init(struct osi_dma_priv_data *osi_dma) +static nve32_t tx_dma_desc_init(const struct osi_dma_priv_data *const osi_dma) { struct osi_tx_ring *tx_ring = OSI_NULL; struct osi_tx_desc *tx_desc = OSI_NULL; struct osi_tx_swcx *tx_swcx = OSI_NULL; nveu32_t chan = 0; + nve32_t ret = 0; nveu32_t i, j; for (i = 0; i < osi_dma->num_dma_chans; i++) { @@ -1375,7 +1408,8 @@ static nve32_t tx_dma_desc_init(struct osi_dma_priv_data *osi_dma) if (osi_unlikely(tx_ring == OSI_NULL)) { OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "dma_txrx: Invalid pointers\n", 0ULL); - return -1; + ret = -1; + goto fail; } for (j = 0; j < osi_dma->tx_ring_sz; j++) { @@ -1406,7 +1440,8 @@ static nve32_t tx_dma_desc_init(struct osi_dma_priv_data *osi_dma) chan, (osi_dma->tx_ring_sz - 1U)); } - return 0; +fail: + return ret; } nve32_t dma_desc_init(struct osi_dma_priv_data *osi_dma) @@ -1415,26 +1450,27 @@ nve32_t dma_desc_init(struct osi_dma_priv_data *osi_dma) ret = tx_dma_desc_init(osi_dma); if (ret != 0) { - return ret; + goto fail; } ret = rx_dma_desc_init(osi_dma); if (ret != 0) { - return ret; + goto fail; } +fail: return ret; } -nve32_t init_desc_ops(struct osi_dma_priv_data *osi_dma) +nve32_t init_desc_ops(const struct osi_dma_priv_data *const osi_dma) { - typedef void (*desc_ops_arr)(struct desc_ops *); + typedef void (*desc_ops_arr)(struct desc_ops *p_ops); - desc_ops_arr desc_ops[2] = { + const desc_ops_arr desc_ops_a[2] = { eqos_init_desc_ops, mgbe_init_desc_ops }; - desc_ops[osi_dma->mac](&d_ops[osi_dma->mac]); + desc_ops_a[osi_dma->mac](&d_ops[osi_dma->mac]); /* TODO: validate function pointers */ return 0;