diff --git a/include/config.tmk b/include/config.tmk index 415f044..dfbf7c2 100644 --- a/include/config.tmk +++ b/include/config.tmk @@ -47,4 +47,5 @@ endif #NV_COMPONENT_CFLAGS += -DMACSEC_KEY_PROGRAM HSI_SUPPORT := 1 MACSEC_SUPPORT := 1 +#MACSEC_KEY_PROGRAM := 1 ccflags-y += $(NV_COMPONENT_CFLAGS) diff --git a/include/mmc.h b/include/mmc.h index 3e5fa6d..b1b5cb2 100644 --- a/include/mmc.h +++ b/include/mmc.h @@ -60,6 +60,8 @@ struct osi_macsec_mmc_counters { /** This counter provides the number of octets after IVC passing * valid values are between 0 and UINT64_MAX */ nveul64_t rx_octets_validated; + /** This counter provides the number of octets after decryption */ + nveul64_t rx_octets_decrypted; /** This counter provides the number not valid packets * valid values are between 0 and UINT64_MAX */ nveul64_t rx_pkts_not_valid[OSI_MACSEC_SC_INDEX_MAX]; @@ -84,9 +86,13 @@ struct osi_macsec_mmc_counters { /** This counter provides the number of out packets protected * valid values are between 0 and UINT64_MAX */ nveul64_t tx_pkts_protected[OSI_MACSEC_SC_INDEX_MAX]; + /** This counter provides the number of out packets encrypted */ + nveul64_t tx_pkts_encrypted[OSI_MACSEC_SC_INDEX_MAX]; /** This counter provides the number of out octets protected/ * valid values are between 0 and UINT64_MAX */ nveul64_t tx_octets_protected; + /** This counter provides the number of out octets encrypted */ + nveul64_t tx_octets_encrypted; }; #endif /* MACSEC_SUPPORT */ #endif /* INCLUDED_MMC_H */ diff --git a/include/nvethernetrm_export.h b/include/nvethernetrm_export.h index 7bc46ab..9fa6cc9 100644 --- a/include/nvethernetrm_export.h +++ b/include/nvethernetrm_export.h @@ -137,7 +137,9 @@ struct osi_core_frp_cmd { * Bit[0] - DMA channel 0 * .. * Bit [N] - DMA channel N] */ - nveu32_t dma_sel; + nveu64_t dma_sel; + /** OSD DCHT */ + nveu8_t dcht; }; /** diff --git a/include/nvethernetrm_l3l4.h b/include/nvethernetrm_l3l4.h index d275e4c..b534a24 100644 --- a/include/nvethernetrm_l3l4.h +++ b/include/nvethernetrm_l3l4.h @@ -47,6 +47,9 @@ struct osi_l3_l4_filter { nveu32_t is_udp; /** ipv6 (OSI_L3L4_ENABLE) or ipv4 (OSI_L3L4_DISABLE) */ nveu32_t is_ipv6; + /** match combined L3, L4 filters (OSI_TRUE) or ignore L3,L4 + * combined filter match (OSI_FALSE) */ + nveu32_t is_l3l4_match_en; #endif /* !OSI_STRIPPED_LIB */ struct { /** ipv4 address diff --git a/include/osi_common.h b/include/osi_common.h index d2aee9f..7029234 100644 --- a/include/osi_common.h +++ b/include/osi_common.h @@ -51,6 +51,8 @@ #define OSI_LOCKED 0x1U /** @brief Number of Nano seconds per second */ #define OSI_NSEC_PER_SEC 1000000000ULL +#define OSI_MGBE_MAX_RX_RIIT_NSEC 17500U +#define OSI_MGBE_MIN_RX_RIIT_NSEC 535U #ifndef OSI_STRIPPED_LIB #define OSI_MAX_RX_COALESCE_USEC 1020U #define OSI_EQOS_MIN_RX_COALESCE_USEC 5U @@ -208,11 +210,14 @@ #define OSI_EQOS_MAX_NUM_QUEUES 8U /** @brief Maximum number of L3L4 filters supported */ #define OSI_MGBE_MAX_L3_L4_FILTER 8U +/** @brief Maximum number of L3L4 filters supported for T264 */ +#define OSI_MGBE_MAX_L3_L4_FILTER_T264 48U /** * @brief Maximum number of channels in MGBE */ -//TBD: T264, NET04 supports only 10 VDMA -#define OSI_MGBE_MAX_NUM_CHANS 10U +//TBD: T264, NET05 supports only 20 VDMA, change to 48 later +#define OSI_MGBE_MAX_NUM_CHANS 20U +#define OSI_MGBE_T23X_MAX_NUM_CHANS 10U /** * @brief Maximum number of PDMA channels in MGBE */ @@ -220,6 +225,8 @@ /** @brief Maximum number of queues in MGBE */ #define OSI_MGBE_MAX_NUM_QUEUES 10U #define OSI_EQOS_XP_MAX_CHANS 4U +/* max riit DT configs for supported speeds */ +#define OSI_MGBE_MAX_NUM_RIIT 4U /** * @brief Maximum number of Secure Channels supported @@ -241,6 +248,15 @@ /** @brief flag indicating MGBE MAC on T26X */ #define OSI_MAC_HW_MGBE_T26X 2U +/** MAC version type for EQOS version previous to 5.30 */ +#define MAC_CORE_VER_TYPE_EQOS 0U +/** MAC version type for EQOS version 5.30 */ +#define MAC_CORE_VER_TYPE_EQOS_5_30 1U +/** MAC version type for MGBE IP */ +#define MAC_CORE_VER_TYPE_MGBE 2U +/** MAC version type for T26x EQOS version 5.40 */ +#define MAC_CORE_VER_TYPE_EQOS_5_40 3U + #define OSI_NULL ((void *)0) /** Enable Flag */ #define OSI_ENABLE 1U @@ -252,6 +268,7 @@ #define OSI_H_ENABLE (~OSI_H_DISABLE) #define OSI_BIT(nr) ((nveu32_t)1 << (((nveu32_t)nr) & 0x1FU)) +#define OSI_BIT_64(nr) ((nveu64_t)1 << (nr)) #ifndef OSI_STRIPPED_LIB #define OSI_MGBE_MAC_3_00 0x30U @@ -264,10 +281,11 @@ #define OSI_EQOS_MAC_5_00 0x50U /** @brief EQOS MAC version Orin */ #define OSI_EQOS_MAC_5_30 0x53U +#define OSI_EQOS_MAC_5_40 0x54U /** @brief MGBE MAC version Orin */ #define OSI_MGBE_MAC_3_10 0x31U -//TBD: T264 NET04 version, update it later #define OSI_MGBE_MAC_3_20 0x32U +#define OSI_MGBE_MAC_4_20 0x42U /** * @brief Maximum number of VM IRQs diff --git a/include/osi_core.h b/include/osi_core.h index 445bc1a..8827f94 100644 --- a/include/osi_core.h +++ b/include/osi_core.h @@ -217,6 +217,7 @@ typedef my_lint_64 nvel64_t; #define EQOS_MAX_MAC_5_3_ADDRESS_FILTER 32U #define EQOS_MAX_L3_L4_FILTER 8U #define OSI_MGBE_MAX_MAC_ADDRESS_FILTER 32U +#define OSI_MGBE_MAX_MAC_ADDRESS_FILTER_T26X 48U #define OSI_DA_MATCH 0U #ifndef OSI_STRIPPED_LIB #define OSI_INV_MATCH 1U @@ -284,9 +285,18 @@ typedef my_lint_64 nvel64_t; * @brief Ethernet PHY Interface Modes */ #define OSI_XFI_MODE_10G 0U -#define OSI_XFI_MODE_5G 1U +#define OSI_XFI_MODE_5G 1U #define OSI_USXGMII_MODE_10G 2U #define OSI_USXGMII_MODE_5G 3U +#define OSI_XAUI_MODE_25G 4U +/** + * @brief Ethernet UPHY GBE Modes + */ +#define OSI_GBE_MODE_5G 0U +#define OSI_GBE_MODE_10G 1U +#define OSI_UPHY_GBE_MODE_25G 2U +#define OSI_GBE_MODE_1G 3U +#define OSI_GBE_MODE_2_5G 4U /** * @addtogroup IOCTL OPS MACROS @@ -502,7 +512,8 @@ typedef my_lint_64 nvel64_t; #define VLAN_NUM_VID 4096U #define OSI_DELAY_1000US 1000U - +#define OSI_DELAY_1US 1U +#define RCHLIST_SIZE 48U /** * @addtogroup PTP PTP related information * @@ -740,7 +751,18 @@ struct osi_filter { /** src_dest: SA(1) or DA(0) */ nveu32_t src_dest; /** indicates one hot encoded DMA receive channels to program */ - nveu32_t dma_chansel; + nveu64_t dma_chansel; + /** Indicates packet duplication enable(1) disable (0) */ + nveu32_t pkt_dup; +}; + +/** + * @brief OSI core structure for RCHlist + */ +struct rchlist_index { + nveu8_t mac_address[OSI_ETH_ALEN]; + nveu32_t in_use; + nveu64_t dch; }; #ifndef OSI_STRIPPED_LIB @@ -1300,6 +1322,17 @@ struct osi_macsec_sc_info { /** flag indicating the prosition of vlan tag * valid values are either 0(vlan not in clear) or 1(vlan in clear) */ nveu8_t vlan_in_clear; + /** Indicates 1 bit for encription configuration + 0: Indicates disabled + 1: Indicates enabled + */ + nveu8_t encrypt; + /** Indicates 2 bit for confidentiality offset configuration + 0: Indicates offset as 0 + 1: Indicates offset as 30 + 2: Indicates offset as 50 + */ + nveu8_t conf_offset; }; /** @@ -1408,8 +1441,12 @@ struct osi_core_frp_data { /** Entry OK Index - Next Instruction * valid values are from 0 to 0xFF */ nveu8_t ok_index; + /** Entry dcht */ + nveu8_t dcht; /** Entry DMA Channel selection (1-bit for each channel) */ - nveu32_t dma_chsel; + nveu64_t dma_chsel; + /** Entry RChlist index */ + nve32_t rchlist_indx; }; /** @@ -1435,6 +1472,8 @@ struct osi_core_tx_ts { /** Packet ID for corresponding timestamp * valid values are from 1 to 0x3FF*/ nveu32_t pkt_id; + /** vdma ID for corresponding timestamp */ + nveu32_t vdma_id; /** Time in seconds*/ nveu32_t sec; /** Time in nano seconds */ @@ -1656,6 +1695,8 @@ struct osi_core_priv_data { * valid values are NVETHERNETRM_PIF$OSI_MAC_HW_EQOS and * NVETHERNETRM_PIF$OSI_MAC_HW_MGBE*/ nveu32_t mac; + /** MACSEC HW type based on DT compatible */ + nveu32_t macsec; /** MAC version * valid values are NVETHERNETRM_PIF$OSI_EQOS_MAC_5_00, * NVETHERNETRM_PIF$OSI_EQOS_MAC_5_30 @@ -1731,7 +1772,7 @@ struct osi_core_priv_data { #if !defined(L3L4_WILDCARD_FILTER) /** L3L4 filter bit bask, set index corresponding bit for * filter if filter enabled */ - nveu32_t l3l4_filter_bitmask; + nveu64_t l3l4_filter_bitmask; #endif /* !L3L4_WILDCARD_FILTER */ /** Flag which decides virtualization is enabled(1) or disabled(0) */ nveu32_t use_virtualization; @@ -1739,7 +1780,7 @@ struct osi_core_priv_data { struct osi_hw_features *hw_feature; /** MC packets Multiple DMA channel selection flags */ nveu32_t mc_dmasel; - /** UPHY GBE mode (1 for 10G, 0 for 5G) */ + /** UPHY GBE mode (2 for 25F, 1 for 10G, 0 for 5G) */ nveu32_t uphy_gbe_mode; /** number of PDMA's */ nveu32_t num_of_pdma; @@ -1754,7 +1795,8 @@ struct osi_core_priv_data { /** number of VM IRQ's * Fixed value filled by NvEthernet unit as 4*/ nveu32_t num_vm_irqs; - /** PHY interface mode (0/1 for XFI 10/5G, 2/3 for USXGMII 10/5) */ + /** PHY interface mode (0/1 for XFI 10/5G, 2/3 for USXGMII 10/5) + * (4 for XFI 25G) (5 for USXGMII 25G */ nveu32_t phy_iface_mode; /** MGBE MAC instance ID's * valid values are from 0 to 4 @@ -1774,6 +1816,8 @@ struct osi_core_priv_data { #endif /** pre-silicon flag */ nveu32_t pre_sil; + /** rCHlist bookkeeping **/ + struct rchlist_index rch_index[RCHLIST_SIZE]; }; /** diff --git a/include/osi_dma.h b/include/osi_dma.h index e343874..9dd0553 100644 --- a/include/osi_dma.h +++ b/include/osi_dma.h @@ -59,6 +59,7 @@ #define OSI_ONE_MEGA_HZ 1000000U /** @brief MAX ULLONG value */ #define OSI_ULLONG_MAX (~0ULL) +#define OSI_MSEC_PER_SEC 1000U /* Compiler hints for branch prediction */ #define osi_likely(x) __builtin_expect(!!(x), 1) @@ -141,6 +142,9 @@ #define OSI_PKT_CX_IP_CSUM OSI_BIT(12) /** @} */ +/** VDMA ID in TDESC0 **/ +#define OSI_PTP_VDMA_SHIFT 10U + #ifndef OSI_STRIPPED_LIB /** * @addtogroup SLOT function context fields @@ -265,6 +269,7 @@ #define OSI_DMA_IOCTL_CMD_STRUCTS_DUMP 2U #define OSI_DMA_IOCTL_CMD_DEBUG_INTR_CONFIG 3U #endif /* OSI_DEBUG */ +#define OSI_DMA_IOCTL_CMD_RX_RIIT_CONFIG 4U /** @} */ /** @@ -318,6 +323,16 @@ struct osi_pkt_err_stats { }; #endif /* !OSI_STRIPPED_LIB */ +/** + * @brief RX RIIT value for speed + */ +struct osi_rx_riit { + /** speed */ + nveu32_t speed; + /** riit value */ + nveu32_t riit; +}; + /** * @brief Receive Descriptor */ @@ -452,6 +467,8 @@ struct osi_tx_swcx { * Max value is NVETHERNETCL_PIF$UINT_MAX */ nveu32_t pktid; + /** VDMA id of packet for which TX packet sent for timestamp needed */ + nveu32_t vdmaid; /** dma channel number for osd use. * Max value is NVETHERNETCL_PIF$OSI_EQOS_MAX_NUM_CHANS or * NVETHERNETCL_PIF$OSI_MGBE_MAX_NUM_CHANS @@ -539,6 +556,8 @@ struct osi_txdone_pkt_cx { * Max value is NVETHERNETCL_PIF$UINT_MAX */ nveu32_t pktid; + /** Passing vdma id to map TX time to packet */ + nveu32_t vdmaid; }; /** @@ -606,12 +625,12 @@ struct osi_tx_ring { * @brief osi_xtra_dma_stat_counters - OSI DMA extra stats counters */ struct osi_xtra_dma_stat_counters { - /** Per Q TX packet count */ - nveu64_t q_tx_pkt_n[OSI_MGBE_MAX_NUM_QUEUES]; - /** Per Q RX packet count */ - nveu64_t q_rx_pkt_n[OSI_MGBE_MAX_NUM_QUEUES]; - /** Per Q TX complete call count */ - nveu64_t tx_clean_n[OSI_MGBE_MAX_NUM_QUEUES]; + /** Per chan TX packet count */ + nveu64_t chan_tx_pkt_n[OSI_MGBE_MAX_NUM_CHANS]; + /** Per chan RX packet count */ + nveu64_t chan_rx_pkt_n[OSI_MGBE_MAX_NUM_CHANS]; + /** Per chan TX complete call count */ + nveu64_t tx_clean_n[OSI_MGBE_MAX_NUM_CHANS]; /** Total number of tx packets count */ nveu64_t tx_pkt_n; /** Total number of rx packet count */ @@ -657,7 +676,7 @@ struct osd_dma_ops { #endif /* OSI_DEBUG */ }; -#ifdef OSI_DEBUG +//#ifdef OSI_DEBUG /** * @brief The OSI DMA IOCTL data structure. */ @@ -667,7 +686,7 @@ struct osi_dma_ioctl_data { /** IOCTL command argument */ nveu32_t arg_u32; }; -#endif /* OSI_DEBUG */ +//#endif /* OSI_DEBUG */ /** * @brief The OSI DMA private data structure. @@ -721,6 +740,12 @@ struct osi_dma_priv_data { * NVETHERNETCL_PIF$OSI_DISABLE */ nveu32_t use_riwt; + /** Receive Interrupt Idle Timer in nsec */ + struct osi_rx_riit rx_riit[OSI_MGBE_MAX_NUM_RIIT]; + /** num of rx riit configs for different speeds */ + nveu32_t num_of_riit; + /** Flag which decides riit is enabled(1) or disabled(0) */ + nveu32_t use_riit; /** Max no of pkts to be received before triggering Rx interrupt. * Max value is NVETHERNETCL_PIF$UINT_MAX */ @@ -772,9 +797,9 @@ struct osi_dma_priv_data { * NVETHENETCL_PIF$OSI_PTP_SYNC_TWOSTEP - two step mode */ nveu32_t ptp_flag; -#ifdef OSI_DEBUG /** OSI DMA IOCTL data */ struct osi_dma_ioctl_data ioctl_data; +#ifdef OSI_DEBUG /** Flag to enable/disable descriptor dump */ nveu32_t enable_desc_dump; #endif /* OSI_DEBUG */ @@ -1444,7 +1469,7 @@ nveu32_t osi_is_mac_enabled(struct osi_dma_priv_data *const osi_dma); nve32_t osi_handle_dma_intr(struct osi_dma_priv_data *osi_dma, nveu32_t chan, nveu32_t tx_rx, nveu32_t en_dis); -#ifdef OSI_DEBUG +//#ifdef OSI_DEBUG /** * @brief * Description: OSI DMA IOCTL @@ -1468,7 +1493,7 @@ nve32_t osi_handle_dma_intr(struct osi_dma_priv_data *osi_dma, * @retval -1 on failure - invalid ioctl command within osi data structure */ nve32_t osi_dma_ioctl(struct osi_dma_priv_data *osi_dma); -#endif /* OSI_DEBUG */ +//#endif /* OSI_DEBUG */ #ifndef OSI_STRIPPED_LIB /** * @brief diff --git a/include/osi_macsec.h b/include/osi_macsec.h index 956f5f7..0377135 100644 --- a/include/osi_macsec.h +++ b/include/osi_macsec.h @@ -26,6 +26,7 @@ #include #ifdef MACSEC_SUPPORT + ////////////////////////////////////////////////////////////////////////// /* MACSEC OSI data structures */ ////////////////////////////////////////////////////////////////////////// @@ -51,6 +52,7 @@ #endif /* DEBUG_MACSEC */ /** @brief maximum key index */ #define OSI_KEY_INDEX_MAX 31U +#define OSI_KEY_INDEX_MAX_T26X 95U /** @brief maximum PN by default */ #define OSI_PN_MAX_DEFAULT 0xFFFFFFFFU /** @brief threshold PN by default */ @@ -59,6 +61,8 @@ #define OSI_TCI_DEFAULT 0x1 /** @brief maximum SCs index */ #define OSI_SC_INDEX_MAX 15U +/** @brief maximum SCs index for T26X */ +#define OSI_SC_INDEX_MAX_T26X 47U /** * @brief Length of ethernet type field */ @@ -127,6 +131,10 @@ * @brief Helper macros for generic table CONFIG register programming * @{ */ +/** @brief MACSEC max ip types */ +#define MAX_MACSEC_IP_TYPES 2 +#define OSI_MACSEC_T23X 0U +#define OSI_MACSEC_T26X 1U /** @brief TX MACSEC controller */ #define OSI_CTLR_SEL_TX 0U /** @brief RX MACSEC controller */ @@ -137,13 +145,15 @@ /** @brief LUT write operation */ #define OSI_LUT_WRITE 1U #define OSI_RW_MAX 1U -/** @brief Maximum table index */ -#define OSI_TABLE_INDEX_MAX 31U /** @brief Maximum bypass lut table index */ -#define OSI_BYP_LUT_MAX_INDEX OSI_TABLE_INDEX_MAX -/** @brief Maximum number of SCs */ -#define OSI_SC_LUT_MAX_INDEX 15U -#define OSI_SA_LUT_MAX_INDEX OSI_TABLE_INDEX_MAX +#define OSI_BYP_LUT_MAX_INDEX 31U +/** @brief Maximum bypass lut table index for T26X */ +#define OSI_BYP_LUT_MAX_INDEX_T26X 47U +/** @brief Maximum number of SAs */ +#define OSI_SA_LUT_MAX_INDEX 31U +/** @brief Maximum number of SAs for T26X */ +#define OSI_SA_LUT_MAX_INDEX_T26X 95U + /** @} */ #ifdef DEBUG_MACSEC @@ -242,6 +252,10 @@ struct osi_sc_param_outputs { /** Indicates 1 bit VLAN IN CLEAR config * vlaid values are 0(vlan not in clear) and 1(vlan in clear) */ nveu8_t vlan_in_clear; + /** Indicates 1 bit Encription config */ + nveu8_t encrypt; + /** Indicates 2 bit confidentiality offset config */ + nveu8_t conf_offset; }; /** diff --git a/osi/core/common.h b/osi/core/common.h index f3f55d5..5b90c88 100644 --- a/osi/core/common.h +++ b/osi/core/common.h @@ -37,13 +37,6 @@ #define RETRY_DELAY 1U /** @} */ -/** MAC version type for EQOS version previous to 5.30 */ -#define MAC_CORE_VER_TYPE_EQOS 0U -/** MAC version type for EQOS version 5.30 */ -#define MAC_CORE_VER_TYPE_EQOS_5_30 1U -/** MAC version type for MGBE IP */ -#define MAC_CORE_VER_TYPE_MGBE 2U - /** * @addtogroup MGBE PBL settings. * @@ -54,13 +47,24 @@ #define MGBE_TXQ_SIZE 131072U /* Rx Queue size is 192KB */ #define MGBE_RXQ_SIZE 196608U -/* MAX PBL value */ -#define MGBE_DMA_CHX_MAX_PBL 256U -#define MGBE_DMA_CHX_MAX_PBL_VAL 0x200000U +/* uFPGA config Tx Queue size is 64KB */ +#define MGBE_TXQ_SIZE_UFPGA 65536U + +/* PBL values */ +#define MGBE_DMA_CHX_MAX_PBL 32U +#define MGBE_DMA_CHX_PBL_16 16U +#define MGBE_DMA_CHX_PBL_8 8U +#define MGBE_DMA_CHX_PBL_4 4U +#define MGBE_DMA_CHX_PBL_1 1U /* AXI Data width */ -#define MGBE_AXI_DATAWIDTH 128U +#define MGBE_AXI_DATAWIDTH 128U /** @} */ +/** + * @brief MTL Q size depth helper macro + */ +#define Q_SZ_DEPTH(x) (((x) * 1024U) / (MGBE_AXI_DATAWIDTH / 8U)) + /** * @brief osi_readl_poll_timeout - Periodically poll an address until * a condition is met or a timeout occurs @@ -254,6 +258,7 @@ static inline void osi_writela(OSI_UNUSED void *priv, nveu32_t val, void *addr) /** * @brief validate_mac_ver_update_chans - Validates mac version and update chan * + * @param[in] mac: MAC HW type. * @param[in] mac_ver: MAC version read. * @param[out] num_max_chans: Maximum channel number. * @param[out] l_mac_ver: local mac version. @@ -269,10 +274,16 @@ static inline void osi_writela(OSI_UNUSED void *priv, nveu32_t val, void *addr) * @retval 0 - for not Valid MAC * @retval 1 - for Valid MAC */ -static inline nve32_t validate_mac_ver_update_chans(nveu32_t mac_ver, +static inline nve32_t validate_mac_ver_update_chans(nveu32_t mac, + nveu32_t mac_ver, nveu32_t *num_max_chans, nveu32_t *l_mac_ver) { + const nveu32_t max_dma_chan[OSI_MAX_MAC_IP_TYPES] = { + OSI_EQOS_MAX_NUM_CHANS, + OSI_MGBE_T23X_MAX_NUM_CHANS, + OSI_MGBE_MAX_NUM_CHANS + }; nve32_t ret; switch (mac_ver) { @@ -288,14 +299,20 @@ static inline nve32_t validate_mac_ver_update_chans(nveu32_t mac_ver, *l_mac_ver = MAC_CORE_VER_TYPE_EQOS_5_30; ret = 1; break; + case OSI_EQOS_MAC_5_40: + *num_max_chans = OSI_EQOS_MAX_NUM_CHANS; + *l_mac_ver = MAC_CORE_VER_TYPE_EQOS_5_40; + ret = 1; + break; case OSI_MGBE_MAC_3_10: //TBD: T264 uFPGA reports mac version 3.2 case OSI_MGBE_MAC_3_20: + case OSI_MGBE_MAC_4_20: #ifndef OSI_STRIPPED_LIB case OSI_MGBE_MAC_4_00: #endif /* !OSI_STRIPPED_LIB */ //TBD: T264 number of dma channels? - *num_max_chans = OSI_MGBE_MAX_NUM_CHANS; + *num_max_chans = max_dma_chan[mac]; *l_mac_ver = MAC_CORE_VER_TYPE_MGBE; ret = 1; break; @@ -375,4 +392,46 @@ static inline nve32_t osi_memcmp(const void *dest, const void *src, nve32_t n) fail: return ret; } + +/** + * @brief osi_valid_pbl_value - returns the allowed pbl value. + * @note + * Algorithm: + * - Check the pbl range and return allowed pbl value + * + * @param[in] pbl: Calculated PBL value + * + * @note Input parameter should be only nveu32_t type + * + * @note + * API Group: + * - Initialization: No + * - Run time: Yes + * - De-initialization: No + * + * @retval allowed pbl value + */ +static inline nveu32_t osi_valid_pbl_value(nveu32_t pbl_value) +{ + nveu32_t allowed_pbl; + nveu32_t pbl; + + /* 8xPBL mode is set */ + pbl = pbl_value / 8U; + + if (pbl >= MGBE_DMA_CHX_MAX_PBL) { + allowed_pbl = MGBE_DMA_CHX_MAX_PBL; + } else if (pbl >= MGBE_DMA_CHX_PBL_16) { + allowed_pbl = MGBE_DMA_CHX_PBL_16; + } else if (pbl >= MGBE_DMA_CHX_PBL_8) { + allowed_pbl = MGBE_DMA_CHX_PBL_8; + } else if (pbl >= MGBE_DMA_CHX_PBL_4) { + allowed_pbl = MGBE_DMA_CHX_PBL_4; + } else { + allowed_pbl = MGBE_DMA_CHX_PBL_1; + } + + return allowed_pbl; +} + #endif diff --git a/osi/core/core_common.c b/osi/core/core_common.c index 69ad2fb..602b015 100644 --- a/osi/core/core_common.c +++ b/osi/core/core_common.c @@ -26,6 +26,7 @@ #include "eqos_core.h" #include "xpcs.h" #include "macsec.h" +#include "osi_macsec.h" nve32_t poll_check(struct osi_core_priv_data *const osi_core, nveu8_t *addr, nveu32_t bit_check, nveu32_t *value) @@ -172,6 +173,7 @@ fail: return ret; } +#if 0 static nve32_t xpcs_init_start(struct osi_core_priv_data *const osi_core) { nve32_t ret = 0; @@ -203,6 +205,7 @@ static nve32_t xpcs_init_start(struct osi_core_priv_data *const osi_core) fail: return ret; } +#endif nve32_t hw_set_speed(struct osi_core_priv_data *const osi_core, const nve32_t speed) { @@ -261,8 +264,39 @@ nve32_t hw_set_speed(struct osi_core_priv_data *const osi_core, const nve32_t sp if (ret != -1) { osi_writela(osi_core, value, ((nveu8_t *)osi_core->base + mac_mcr[osi_core->mac])); - /* Validate PCS initialization */ - ret = xpcs_init_start(osi_core); + if (osi_core->mac != OSI_MAC_HW_EQOS) { + if (speed == OSI_SPEED_25000) { + ret = xlgpcs_init(osi_core); + if (ret < 0) { + goto fail; + } + + ret = xlgpcs_start(osi_core); + if (ret < 0) { + goto fail; + } + } else { + ret = xpcs_init(osi_core); + if (ret < 0) { + goto fail; + } + + ret = xpcs_start(osi_core); + if (ret < 0) { + goto fail; + } + } + value = osi_readla(osi_core, (nveu8_t *)osi_core->base + MGBE_MAC_IER); + /* Enable Link Status interrupt only after lane bring up success */ + value |= MGBE_IMR_RGSMIIIE; + osi_writela(osi_core, value, (nveu8_t *)osi_core->base + MGBE_MAC_IER); + } else if (osi_core->mac_ver == MAC_CORE_VER_TYPE_EQOS_5_40) { + //TDB: eqos sgmii pcs changes +// ret = eqos_xpcs_init(osi_core); +// if (ret < 0) { +// goto fail; +// } + } } fail: return ret; @@ -496,7 +530,7 @@ void hw_config_tscr(struct osi_core_priv_data *const osi_core, OSI_UNUSED const const nveu32_t mac_pps[OSI_MAX_MAC_IP_TYPES] = { EQOS_MAC_PPS_CTL, MGBE_MAC_PPS_CTL, - MGBE_MAC_TCR + MGBE_MAC_PPS_CTL }; (void)ptp_filter; // unused @@ -656,11 +690,6 @@ nve32_t hw_config_mac_pkt_filter_reg(struct osi_core_priv_data *const osi_core, value = osi_readla(osi_core, ((nveu8_t *)osi_core->base + MAC_PKT_FILTER_REG)); - /*Retain all other values */ - value &= (MAC_PFR_DAIF | MAC_PFR_DBF | MAC_PFR_SAIF | - MAC_PFR_SAF | MAC_PFR_PCF | MAC_PFR_VTFE | - MAC_PFR_IPFE | MAC_PFR_DNTU | MAC_PFR_RA); - if ((filter->oper_mode & OSI_OPER_EN_PERFECT) != OSI_DISABLE) { value |= MAC_PFR_HPF; } @@ -1324,7 +1353,7 @@ static nve32_t hw_config_fpe_pec_enable(struct osi_core_priv_data *const osi_cor osi_writela(osi_core, val, (nveu8_t *)osi_core->base + MAC_RQC1R[osi_core->mac & 0x1U]); - if (osi_core->mac == OSI_MAC_HW_MGBE) { + if (osi_core->mac != OSI_MAC_HW_EQOS) { val = osi_readla(osi_core, (nveu8_t *)osi_core->base + MGBE_MAC_RQC4R); val &= ~MGBE_MAC_RQC4R_PMCBCQ; @@ -1402,10 +1431,11 @@ nve32_t hw_config_fpe(struct osi_core_priv_data *const osi_core, goto error; } - if (osi_core->mac == OSI_MAC_HW_MGBE) { + if (osi_core->mac != OSI_MAC_HW_EQOS) { #ifdef MACSEC_SUPPORT osi_lock_irq_enabled(&osi_core->macsec_fpe_lock); - /* MACSEC and FPE cannot coexist on MGBE refer bug 3484034 */ + /* MACSEC and FPE cannot coexist on MGBE of T234 refer bug 3484034 + * Both EQOS and MGBE of T264 cannot have macsec and fpe enabled simultaneously */ if (osi_core->is_macsec_enabled == OSI_ENABLE) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "FPE and MACSEC cannot co-exist\n", 0ULL); @@ -1431,7 +1461,7 @@ nve32_t hw_config_fpe(struct osi_core_priv_data *const osi_core, osi_writela(osi_core, val, (nveu8_t *)osi_core->base + MAC_FPE_CTS[osi_core->mac & 0x1U]); - if (osi_core->mac == OSI_MAC_HW_MGBE) { + if (osi_core->mac != OSI_MAC_HW_EQOS) { #ifdef MACSEC_SUPPORT osi_core->is_fpe_enabled = OSI_DISABLE; #endif /* MACSEC_SUPPORT */ @@ -1445,7 +1475,7 @@ nve32_t hw_config_fpe(struct osi_core_priv_data *const osi_core, } done: - if (osi_core->mac == OSI_MAC_HW_MGBE) { + if (osi_core->mac != OSI_MAC_HW_EQOS) { #ifdef MACSEC_SUPPORT osi_unlock_irq_enabled(&osi_core->macsec_fpe_lock); #endif /* MACSEC_SUPPORT */ @@ -1701,7 +1731,7 @@ void hw_tsn_init(struct osi_core_priv_data *osi_core) osi_writela(osi_core, val, (nveu8_t *)osi_core->base + MAC_RQC1R[osi_core->mac & 0x1U]); - if (osi_core->mac == OSI_MAC_HW_MGBE) { + if (osi_core->mac != OSI_MAC_HW_EQOS) { val = osi_readla(osi_core, (nveu8_t *)osi_core->base + MGBE_MAC_RQC4R); val &= ~MGBE_MAC_RQC4R_PMCBCQ; @@ -1741,6 +1771,12 @@ nve32_t hsi_common_error_inject(struct osi_core_priv_data *osi_core, nveu32_t error_code) { nve32_t ret = 0; + const nveu32_t rx_isr_set[MAX_MACSEC_IP_TYPES] = { + MACSEC_RX_ISR_SET, + MACSEC_RX_ISR_SET_T26X}; + const nveu32_t common_isr_set[MAX_MACSEC_IP_TYPES] = { + MACSEC_COMMON_ISR_SET, + MACSEC_COMMON_ISR_SET_T26X}; switch (error_code) { case OSI_INBOUND_BUS_CRC_ERR: @@ -1756,7 +1792,7 @@ nve32_t hsi_common_error_inject(struct osi_core_priv_data *osi_core, case OSI_MACSEC_RX_CRC_ERR: osi_writela(osi_core, MACSEC_RX_MAC_CRC_ERROR, (nveu8_t *)osi_core->macsec_base + - MACSEC_RX_ISR_SET); + rx_isr_set[osi_core->macsec]); break; case OSI_MACSEC_TX_CRC_ERR: osi_writela(osi_core, MACSEC_TX_MAC_CRC_ERROR, @@ -1766,12 +1802,12 @@ nve32_t hsi_common_error_inject(struct osi_core_priv_data *osi_core, case OSI_MACSEC_RX_ICV_ERR: osi_writela(osi_core, MACSEC_RX_ICV_ERROR, (nveu8_t *)osi_core->macsec_base + - MACSEC_RX_ISR_SET); + rx_isr_set[osi_core->macsec]); break; case OSI_MACSEC_REG_VIOL_ERR: osi_writela(osi_core, MACSEC_SECURE_REG_VIOL, (nveu8_t *)osi_core->macsec_base + - MACSEC_COMMON_ISR_SET); + common_isr_set[osi_core->macsec]); break; case OSI_PHY_WRITE_VERIFY_ERR: osi_core->hsi.err_code[PHY_WRITE_VERIFY_FAIL_IDX] = OSI_PHY_WRITE_VERIFY_ERR; @@ -1971,7 +2007,10 @@ static void prepare_l3l4_ctr_reg(const struct osi_core_priv_data *const osi_core /* Enable L4 filters for SOURCE Port No matching */ value |= (l3_l4->data.src.port_match << MAC_L3L4_CTR_L4SPM_SHIFT) | (l3_l4->data.src.port_match_inv << MAC_L3L4_CTR_L4SPIM_SHIFT); - + if (osi_core->mac == OSI_MAC_HW_MGBE_T26X) { + /* Enable combined L3 and L4 filters */ + value |= l3_l4->data.is_l3l4_match_en << MAC_L3L4_CTR_L5TEN_SHIFT; + } /* set udp / tcp port matching bit (for l4) */ value |= l3_l4->data.is_udp << MAC_L3L4_CTR_L4PEN_SHIFT; diff --git a/osi/core/core_common.h b/osi/core/core_common.h index 5d59a1c..1ae354a 100644 --- a/osi/core/core_common.h +++ b/osi/core/core_common.h @@ -110,6 +110,7 @@ #ifndef OSI_STRIPPED_LIB #define MAC_L3L4_CTR_L3DAIM_SHIFT 5 #define MAC_L3L4_CTR_L4PEN_SHIFT 16 +#define MAC_L3L4_CTR_L5TEN_SHIFT 17 #define MAC_L3L4_CTR_L4SPM_SHIFT 18 #define MAC_L3L4_CTR_L4SPIM_SHIFT 19 #define MAC_L3L4_CTR_L4DPM_SHIFT 20 diff --git a/osi/core/core_local.h b/osi/core/core_local.h index 4f54226..22e40b2 100644 --- a/osi/core/core_local.h +++ b/osi/core/core_local.h @@ -229,6 +229,12 @@ struct core_ops { struct osi_core_frp_data *const data); /** Called to update FRP NVE and */ void (*update_frp_nve)(struct osi_core_priv_data *const osi_core, const nveu32_t nve); + /** Called to get RCHList index */ + nve32_t (*get_rchlist_index)(struct osi_core_priv_data *const osi_core, + nveu8_t const *mac_addr); + /** Called to free RCHLIST index */ + void (*free_rchlist_index)(struct osi_core_priv_data *const osi_core, + const nve32_t rch_indx); #ifdef HSI_SUPPORT /** Interface function called to initialize HSI */ nve32_t (*core_hsi_configure)(struct osi_core_priv_data *const osi_core, @@ -316,7 +322,7 @@ struct core_l2 { struct dynamic_cfg { nveu32_t flags; /** L3_L4 filters */ - struct osi_l3_l4_filter l3_l4[OSI_MGBE_MAX_L3_L4_FILTER]; + struct osi_l3_l4_filter l3_l4[OSI_MGBE_MAX_L3_L4_FILTER_T264]; /** flow control */ nveu32_t flow_ctrl; /** AVB */ diff --git a/osi/core/eqos_core.c b/osi/core/eqos_core.c index 4a10a32..28e137d 100644 --- a/osi/core/eqos_core.c +++ b/osi/core/eqos_core.c @@ -2072,7 +2072,7 @@ static nve32_t eqos_update_mac_addr_low_high_reg( nveu32_t idx = filter->index; nveu32_t dma_routing_enable = filter->dma_routing; nveu32_t dma_chan = filter->dma_chan; - nveu32_t dma_chansel = filter->dma_chansel; + nveu64_t dma_chansel = filter->dma_chansel; nveu32_t addr_mask = filter->addr_mask; nveu32_t src_dest = filter->src_dest; nveu32_t value = OSI_DISABLE; @@ -4022,6 +4022,43 @@ static nve32_t eqos_post_pad_calibrate( return ret; } +/** + * @brief eqos_free_rchlist_index - Free index. + * + * Algorithm: This function just free the Receive channel index. + * + * @param[in] osi_core: OSI core private data structure. + * @param[in] rch_idx: Receive channel index. + * + */ +static void eqos_free_rchlist_index(struct osi_core_priv_data *osi_core, + const nve32_t rch_idx) { + (void) osi_core; + (void) rch_idx; + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, + "Free RCHLIST not supported by EQOS\n", 0ULL); +} + +/** + * @brief eqos_rchlist_get_index - find free index + * + * Algorithm: This function gets free index for receive channel list. + * + * @param[in] osi_core: OSI core private data structure. + * @param[in] mac_addr: Mac address. + * + * @retval -1 on failure. + +**/ +static nve32_t eqos_get_rchlist_index(struct osi_core_priv_data *osi_core, + nveu8_t const *mac_addr) { + (void) osi_core; + (void) mac_addr; + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, + "RCHLIST not supported by EQOS\n", 0ULL); + return -1; +} + #ifndef OSI_STRIPPED_LIB /** * @brief eqos_config_rss - Configure RSS @@ -4151,6 +4188,8 @@ void eqos_init_core_ops(struct core_ops *ops) ops->config_frp = eqos_config_frp; ops->update_frp_entry = eqos_update_frp_entry; ops->update_frp_nve = eqos_update_frp_nve; + ops->get_rchlist_index = eqos_get_rchlist_index; + ops->free_rchlist_index = eqos_free_rchlist_index; #if defined MACSEC_SUPPORT && !defined OSI_STRIPPED_LIB ops->read_macsec_reg = eqos_read_macsec_reg; ops->write_macsec_reg = eqos_write_macsec_reg; diff --git a/osi/core/frp.c b/osi/core/frp.c index 76ea304..fb84efe 100644 --- a/osi/core/frp.c +++ b/osi/core/frp.c @@ -200,7 +200,7 @@ static nve32_t validate_frp_args(struct osi_core_priv_data *const osi_core, OSI_UNUSED nveu8_t pos, nveu32_t *req_entries) { - nveu32_t dma_sel_val[OSI_MAX_MAC_IP_TYPES] = {0xFFU, 0x3FFU, 0x3FFU}; + nveu64_t dma_sel_val[OSI_MAX_MAC_IP_TYPES] = {0xFFU, 0x3FFU, 0xFFFFFFFFFFFFU}; nve32_t ret = 0; (void)pos; @@ -253,7 +253,9 @@ done: * @param[in] offset: Actual match data offset position. * @param[in] filter_mode: Filter mode from FRP command. * @param[in] next_frp_id: FRP ID to link this ID. - * @param[in] dma_sel: Indicate the DMA Channel Number (1-bit for each). + * @param[in] dcht: DMA Channel Selection Type. + * @param[in] rchlist_indx: Receive Channel list index. + * * * @retval 0 on success. * @retval -1 on failure. @@ -266,7 +268,9 @@ static nve32_t frp_entry_add(struct osi_core_priv_data *const osi_core, nveu8_t offset, nveu8_t filter_mode, nve32_t next_frp_id, - nveu32_t dma_sel) + nveu32_t dma_sel, + nveu8_t dcht, + nve32_t rchlist_indx) { struct osi_core_frp_entry *entry = OSI_NULL; struct osi_core_frp_data *data = OSI_NULL; @@ -340,6 +344,9 @@ static nve32_t frp_entry_add(struct osi_core_priv_data *const osi_core, /* Fill DCH */ data->dma_chsel = dma_sel; + /* Fill dcht & rchlist_indx */ + data->rchlist_indx = rchlist_indx; + data->dcht = dcht; /* Check for the remain data and update FRP flags */ if (md_pos < length) { /* Reset AF, RF and set NIC, OKI */ @@ -456,13 +463,15 @@ frp_hw_write_error: * @param[in] osi_core: OSI core private data structure. * @param[in] cmd: OSI FRP command structure. * @param[in] pos: Pointer to the FRP entry position. + * @param[in] rchlist_indx: Index to the rchlist. * * @retval 0 on success. * @retval -1 on failure. */ static nve32_t frp_add_proto(struct osi_core_priv_data *const osi_core, struct osi_core_frp_cmd *const cmd, - nveu8_t *pos) + nveu8_t *pos, + nve32_t rchlist_indx) { nve32_t ret, proto_oki; nveu8_t proto_entry = OSI_DISABLE; @@ -538,7 +547,8 @@ static nve32_t frp_add_proto(struct osi_core_priv_data *const osi_core, ret = frp_entry_add(osi_core, cmd->frp_id, *pos, proto_match, proto_lendth, proto_offset, OSI_FRP_MODE_LINK, - proto_oki, cmd->dma_sel); + proto_oki, cmd->dma_sel, cmd->dcht, + rchlist_indx); if (ret < 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Fail add FRP protocol entry\n", @@ -627,6 +637,7 @@ static nve32_t frp_delete(struct osi_core_priv_data *const osi_core, nveu8_t i = 0U, pos = 0U, count = 0U; nve32_t frp_id = cmd->frp_id; nveu32_t frp_cnt = osi_core->frp_cnt; + struct osi_core_frp_entry *entry = OSI_NULL; /* Check for FRP entries */ if (frp_cnt == 0U) { @@ -646,6 +657,11 @@ static nve32_t frp_delete(struct osi_core_priv_data *const osi_core, goto done; } + /* Free the RCHLIST index */ + entry = &osi_core->frp_table[frp_id]; + ops_p->free_rchlist_index(osi_core, + entry->data.rchlist_indx); + /* Update the frp_table entry */ osi_memset(&osi_core->frp_table[pos], 0U, (sizeof(struct osi_core_frp_entry) * count)); @@ -692,6 +708,8 @@ static nve32_t frp_update(struct osi_core_priv_data *const osi_core, nveu8_t pos = 0U, count = 0U, req = 0U; nveu16_t req_16bit = 0U; nve32_t frp_id = cmd->frp_id; + struct osi_core_frp_entry *entry = OSI_NULL; + nve32_t rchlist_indx = 0; /* Validate given frp_id */ if (frp_entry_find(osi_core, frp_id, &pos, &count) < 0) { @@ -702,6 +720,9 @@ static nve32_t frp_update(struct osi_core_priv_data *const osi_core, goto done; } + entry = &osi_core->frp_table[frp_id]; + rchlist_indx = entry->data.rchlist_indx; + /* Parse match type and update command offset */ frp_parse_mtype(cmd); @@ -732,8 +753,8 @@ static nve32_t frp_update(struct osi_core_priv_data *const osi_core, goto done; } - /* Process and update FRP Command Protocal Entry */ - ret = frp_add_proto(osi_core, cmd, &pos); + /* Process and update FRP Command Protocol Entry */ + ret = frp_add_proto(osi_core, cmd, &pos, rchlist_indx); if (ret < 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Fail to parse match type\n", @@ -745,7 +766,8 @@ static nve32_t frp_update(struct osi_core_priv_data *const osi_core, ret = frp_entry_add(osi_core, frp_id, pos, cmd->match, cmd->match_length, cmd->offset, cmd->filter_mode, - cmd->next_frp_id, cmd->dma_sel); + cmd->next_frp_id, cmd->dma_sel, + cmd->dcht, rchlist_indx); if (ret < 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Fail to update FRP entry\n", @@ -784,6 +806,7 @@ static nve32_t frp_add(struct osi_core_priv_data *const osi_core, nve32_t ret; nveu8_t pos = 0U, count = 0U; nve32_t frp_id = cmd->frp_id; + nve32_t rchlist_indx = 0; /* Check for MAX FRP entries */ if (osi_core->frp_cnt >= OSI_FRP_MAX_ENTRY) { @@ -807,8 +830,20 @@ static nve32_t frp_add(struct osi_core_priv_data *const osi_core, /* Parse match type and update command offset */ frp_parse_mtype(cmd); + if (cmd->dcht == OSI_ENABLE) { + /* Find rchlist Free index */ + rchlist_indx = ops_p->get_rchlist_index(osi_core, OSI_NULL); + if (rchlist_indx < 0) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, + "Fail to get rchlist index\n", + OSI_NONE); + goto done; + } + osi_core->rch_index[rchlist_indx/*ret*/].in_use = OSI_ENABLE; + } + /* Process and add FRP Command Protocal Entry */ - ret = frp_add_proto(osi_core, cmd, (nveu8_t *)&osi_core->frp_cnt); + ret = frp_add_proto(osi_core, cmd, (nveu8_t *)&osi_core->frp_cnt, rchlist_indx); if (ret < 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Fail to parse match type\n", @@ -820,7 +855,8 @@ static nve32_t frp_add(struct osi_core_priv_data *const osi_core, ret = frp_entry_add(osi_core, frp_id, (nveu8_t)(osi_core->frp_cnt & 0xFFU), cmd->match, cmd->match_length, cmd->offset, cmd->filter_mode, - cmd->next_frp_id, cmd->dma_sel); + cmd->next_frp_id, cmd->dma_sel, + cmd->dcht, rchlist_indx); if (ret < 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Fail to add FRP entry\n", diff --git a/osi/core/macsec.c b/osi/core/macsec.c index dc2a552..82fcf4d 100644 --- a/osi/core/macsec.c +++ b/osi/core/macsec.c @@ -29,14 +29,14 @@ #if 0 /* Qnx */ #define MACSEC_LOG(...) \ { \ - slogf(0, 6, ##__VA_ARGS__); \ - } + slogf(0, 6, ##__VA_ARGS__); \ +} #elif 0 /* Linux */ #include #define MACSEC_LOG(...) \ { \ - pr_debug(__VA_ARGS__); \ + pr_err(__VA_ARGS__); \ } #else #define MACSEC_LOG(...) @@ -77,6 +77,10 @@ static nve32_t poll_for_dbg_buf_update(struct osi_core_priv_data *const osi_core nve32_t cond = COND_NOT_MET; nve32_t ret = 0; nveu32_t count; + const nveu32_t dbg_buf_reg[MAX_MACSEC_IP_TYPES] = { + MACSEC_DEBUG_BUF_CONFIG_0, + MACSEC_DEBUG_BUF_CONFIG_0_T26X + }; count = 0; while (cond == COND_NOT_MET) { @@ -89,7 +93,7 @@ static nve32_t poll_for_dbg_buf_update(struct osi_core_priv_data *const osi_core dbg_buf_config = osi_readla(osi_core, (nveu8_t *)osi_core->macsec_base + - MACSEC_DEBUG_BUF_CONFIG_0); + dbg_buf_reg[osi_core->macsec]); if ((dbg_buf_config & MACSEC_DEBUG_BUF_CONFIG_0_UPDATE) == OSI_NONE) { cond = COND_MET; } @@ -130,11 +134,14 @@ static inline void write_dbg_buf_data( nveu8_t *base = (nveu8_t *)osi_core->macsec_base; nveu32_t i; + nveu32_t dbg_buf_reg[MAX_MACSEC_IP_TYPES] = {0}; /* Commit the dbg buffer to HW */ for (i = 0; i < DBG_BUF_LEN; i++) { + dbg_buf_reg[OSI_MACSEC_T23X] = MACSEC_DEBUG_BUF_DATA_0(i); + dbg_buf_reg[OSI_MACSEC_T26X] = MACSEC_DEBUG_BUF_DATA_0_T26X(i); osi_writela(osi_core, dbg_buf[i], base + - MACSEC_DEBUG_BUF_DATA_0(i)); + dbg_buf_reg[osi_core->macsec]); } } @@ -165,11 +172,14 @@ static inline void read_dbg_buf_data( nveu8_t *base = (nveu8_t *)osi_core->macsec_base; nveu32_t i; + nveu32_t dbg_buf_reg[MAX_MACSEC_IP_TYPES] = {0}; /* Read debug buffer from HW */ for (i = 0; i < DBG_BUF_LEN; i++) { + dbg_buf_reg[OSI_MACSEC_T23X] = MACSEC_DEBUG_BUF_DATA_0(i); + dbg_buf_reg[OSI_MACSEC_T26X] = MACSEC_DEBUG_BUF_DATA_0_T26X(i); dbg_buf[i] = osi_readla(osi_core, base + - MACSEC_DEBUG_BUF_DATA_0(i)); + dbg_buf_reg[osi_core->macsec]); } } @@ -203,10 +213,19 @@ static void write_tx_dbg_trigger_evts( nveu32_t flags = 0; nveu32_t tx_trigger_evts; nveu32_t debug_ctrl_reg; + nveu32_t macsec = osi_core->macsec; + const nveu32_t tx_dbg_ctrl_reg[MAX_MACSEC_IP_TYPES] = { + MACSEC_TX_DEBUG_CONTROL_0, + MACSEC_TX_DEBUG_CONTROL_0_T26X + }; + const nveu32_t tx_dbg_trig_en_reg[MAX_MACSEC_IP_TYPES] = { + MACSEC_TX_DEBUG_TRIGGER_EN_0, + MACSEC_TX_DEBUG_TRIGGER_EN_0_T26X + }; flags = dbg_buf_config->flags; tx_trigger_evts = osi_readla(osi_core, - base + MACSEC_TX_DEBUG_TRIGGER_EN_0); + base + tx_dbg_trig_en_reg[macsec]); if ((flags & OSI_TX_DBG_LKUP_MISS_EVT) != OSI_NONE) { tx_trigger_evts |= MACSEC_TX_DBG_LKUP_MISS; } else { @@ -245,16 +264,16 @@ static void write_tx_dbg_trigger_evts( MACSEC_LOG("%s: 0x%x", __func__, tx_trigger_evts); osi_writela(osi_core, tx_trigger_evts, - base + MACSEC_TX_DEBUG_TRIGGER_EN_0); + base + tx_dbg_trig_en_reg[macsec]); if (tx_trigger_evts != OSI_NONE) { /** Start the tx debug buffer capture */ debug_ctrl_reg = osi_readla(osi_core, - base + MACSEC_TX_DEBUG_CONTROL_0); + base + tx_dbg_ctrl_reg[macsec]); debug_ctrl_reg |= MACSEC_TX_DEBUG_CONTROL_0_START_CAP; MACSEC_LOG("%s: debug_ctrl_reg 0x%x", __func__, debug_ctrl_reg); osi_writela(osi_core, debug_ctrl_reg, - base + MACSEC_TX_DEBUG_CONTROL_0); + base + tx_dbg_ctrl_reg[macsec]); } } @@ -287,12 +306,16 @@ static void tx_dbg_trigger_evts( nveu8_t *base = (nveu8_t *)osi_core->macsec_base; nveu32_t flags = 0; nveu32_t tx_trigger_evts; + const nveu32_t tx_dbg_trig_en_reg[MAX_MACSEC_IP_TYPES] = { + MACSEC_TX_DEBUG_TRIGGER_EN_0, + MACSEC_TX_DEBUG_TRIGGER_EN_0_T26X + }; if (dbg_buf_config->rw == OSI_LUT_WRITE) { write_tx_dbg_trigger_evts(osi_core, dbg_buf_config); } else { tx_trigger_evts = osi_readla(osi_core, - base + MACSEC_TX_DEBUG_TRIGGER_EN_0); + base + tx_dbg_trig_en_reg[osi_core->macsec]); MACSEC_LOG("%s: 0x%x", __func__, tx_trigger_evts); if ((tx_trigger_evts & MACSEC_TX_DBG_LKUP_MISS) != OSI_NONE) { flags |= OSI_TX_DBG_LKUP_MISS_EVT; @@ -346,10 +369,19 @@ static void write_rx_dbg_trigger_evts( nveu32_t flags = 0; nveu32_t rx_trigger_evts = 0; nveu32_t debug_ctrl_reg; + nveu32_t macsec = osi_core->macsec; + const nveu32_t rx_dbg_ctrl_reg[MAX_MACSEC_IP_TYPES] = { + MACSEC_RX_DEBUG_CONTROL_0, + MACSEC_RX_DEBUG_CONTROL_0_T26X + }; + const nveu32_t rx_trig_en_reg[MAX_MACSEC_IP_TYPES] = { + MACSEC_RX_DEBUG_TRIGGER_EN_0, + MACSEC_RX_DEBUG_TRIGGER_EN_0_T26X + }; flags = dbg_buf_config->flags; rx_trigger_evts = osi_readla(osi_core, - base + MACSEC_RX_DEBUG_TRIGGER_EN_0); + base + rx_trig_en_reg[macsec]); if ((flags & OSI_RX_DBG_LKUP_MISS_EVT) != OSI_NONE) { rx_trigger_evts |= MACSEC_RX_DBG_LKUP_MISS; } else { @@ -387,16 +419,16 @@ static void write_rx_dbg_trigger_evts( } MACSEC_LOG("%s: 0x%x", __func__, rx_trigger_evts); osi_writela(osi_core, rx_trigger_evts, - base + MACSEC_RX_DEBUG_TRIGGER_EN_0); + base + rx_trig_en_reg[macsec]); if (rx_trigger_evts != OSI_NONE) { /** Start the tx debug buffer capture */ debug_ctrl_reg = osi_readla(osi_core, - base + MACSEC_RX_DEBUG_CONTROL_0); + base + rx_dbg_ctrl_reg[macsec]); debug_ctrl_reg |= MACSEC_RX_DEBUG_CONTROL_0_START_CAP; MACSEC_LOG("%s: debug_ctrl_reg 0x%x", __func__, debug_ctrl_reg); osi_writela(osi_core, debug_ctrl_reg, - base + MACSEC_RX_DEBUG_CONTROL_0); + base + rx_dbg_ctrl_reg[macsec]); } } @@ -429,12 +461,16 @@ static void rx_dbg_trigger_evts( nveu8_t *base = (nveu8_t *)osi_core->macsec_base; nveu32_t flags = 0; nveu32_t rx_trigger_evts = 0; + const nveu32_t rx_trig_en_reg[MAX_MACSEC_IP_TYPES] = { + MACSEC_RX_DEBUG_TRIGGER_EN_0, + MACSEC_RX_DEBUG_TRIGGER_EN_0_T26X + }; if (dbg_buf_config->rw == OSI_LUT_WRITE) { write_rx_dbg_trigger_evts(osi_core, dbg_buf_config); } else { rx_trigger_evts = osi_readla(osi_core, - base + MACSEC_RX_DEBUG_TRIGGER_EN_0); + base + rx_trig_en_reg[osi_core->macsec]); MACSEC_LOG("%s: 0x%x", __func__, rx_trigger_evts); if ((rx_trigger_evts & MACSEC_RX_DBG_LKUP_MISS) != OSI_NONE) { flags |= OSI_RX_DBG_LKUP_MISS_EVT; @@ -545,13 +581,18 @@ static nve32_t macsec_dbg_buf_config(struct osi_core_priv_data *const osi_core, nveu8_t *base = (nveu8_t *)osi_core->macsec_base; nveu32_t dbg_config_reg = 0; nve32_t ret = 0; + nveu32_t macsec = osi_core->macsec; + const nveu32_t dbg_buf_reg[MAX_MACSEC_IP_TYPES] = { + MACSEC_DEBUG_BUF_CONFIG_0, + MACSEC_DEBUG_BUF_CONFIG_0_T26X + }; if (validate_inputs_macsec_dbg_buf_conf(osi_core, dbg_buf_config) < 0) { ret = -1; goto err; } - dbg_config_reg = osi_readla(osi_core, base + MACSEC_DEBUG_BUF_CONFIG_0); + dbg_config_reg = osi_readla(osi_core, base + dbg_buf_reg[macsec]); if (dbg_buf_config->ctlr_sel != OSI_NONE) { dbg_config_reg |= MACSEC_DEBUG_BUF_CONFIG_0_CTLR_SEL; @@ -570,7 +611,7 @@ static nve32_t macsec_dbg_buf_config(struct osi_core_priv_data *const osi_core, dbg_config_reg &= ~MACSEC_DEBUG_BUF_CONFIG_0_IDX_MASK; dbg_config_reg |= dbg_buf_config->index ; dbg_config_reg |= MACSEC_DEBUG_BUF_CONFIG_0_UPDATE; - osi_writela(osi_core, dbg_config_reg, base + MACSEC_DEBUG_BUF_CONFIG_0); + osi_writela(osi_core, dbg_config_reg, base + dbg_buf_reg[macsec]); ret = poll_for_dbg_buf_update(osi_core); if (ret < 0) { goto err; @@ -716,7 +757,42 @@ static inline nveul64_t update_macsec_mmc_val( static void macsec_read_mmc(struct osi_core_priv_data *const osi_core) { struct osi_macsec_mmc_counters *mmc = &osi_core->macsec_mmc; - nveu16_t i; + nveu32_t i; + nveu32_t macsec = osi_core->macsec; + const nveu32_t rx_notg_reg[MAX_MACSEC_IP_TYPES] = { + MACSEC_RX_PKTS_NOTG_LO_0, + MACSEC_RX_PKTS_NOTG_LO_0_T26X + }; + const nveu32_t rx_untg_reg[MAX_MACSEC_IP_TYPES] = { + MACSEC_RX_PKTS_UNTG_LO_0, + MACSEC_RX_PKTS_UNTG_LO_0_T26X + }; + const nveu32_t rx_badtg_reg[MAX_MACSEC_IP_TYPES] = { + MACSEC_RX_PKTS_BADTAG_LO_0, + MACSEC_RX_PKTS_BADTAG_LO_0_T26X + }; + const nveu32_t rx_nosaerror_reg[MAX_MACSEC_IP_TYPES] = { + MACSEC_RX_PKTS_NOSAERROR_LO_0, + MACSEC_RX_PKTS_NOSAERROR_LO_0_T26X + }; + const nveu32_t rx_nosa_reg[MAX_MACSEC_IP_TYPES] = { + MACSEC_RX_PKTS_NOSA_LO_0, + MACSEC_RX_PKTS_NOSA_LO_0_T26X + }; + const nveu32_t rx_ovrrun_reg[MAX_MACSEC_IP_TYPES] = { + MACSEC_RX_PKTS_OVRRUN_LO_0, + MACSEC_RX_PKTS_OVRRUN_LO_0_T26X + }; + const nveu32_t rx_vldtd_reg[MAX_MACSEC_IP_TYPES] = { + MACSEC_RX_OCTETS_VLDTD_LO_0, + MACSEC_RX_OCTETS_VLDTD_LO_0_T26X + }; + const nveu32_t sc_idx_max[MAX_MACSEC_IP_TYPES] = { + OSI_SC_INDEX_MAX, OSI_SC_INDEX_MAX_T26X + }; + nveu32_t rx_pkt_late_scx_reg[MAX_MACSEC_IP_TYPES] = {0}; + nveu32_t rx_pkt_notvld_scx_reg[MAX_MACSEC_IP_TYPES] = {0}; + nveu32_t rx_pkt_ok_scx_reg[MAX_MACSEC_IP_TYPES] = {0}; mmc->tx_pkts_untaged = update_macsec_mmc_val(osi_core, MACSEC_TX_PKTS_UNTG_LO_0); @@ -725,36 +801,54 @@ static void macsec_read_mmc(struct osi_core_priv_data *const osi_core) mmc->tx_octets_protected = update_macsec_mmc_val(osi_core, MACSEC_TX_OCTETS_PRTCTD_LO_0); mmc->rx_pkts_no_tag = - update_macsec_mmc_val(osi_core, MACSEC_RX_PKTS_NOTG_LO_0); + update_macsec_mmc_val(osi_core, rx_notg_reg[osi_core->macsec]); mmc->rx_pkts_untagged = - update_macsec_mmc_val(osi_core, MACSEC_RX_PKTS_UNTG_LO_0); + update_macsec_mmc_val(osi_core, rx_untg_reg[osi_core->macsec]); mmc->rx_pkts_bad_tag = - update_macsec_mmc_val(osi_core, MACSEC_RX_PKTS_BADTAG_LO_0); + update_macsec_mmc_val(osi_core, rx_badtg_reg[osi_core->macsec]); mmc->rx_pkts_no_sa_err = - update_macsec_mmc_val(osi_core, MACSEC_RX_PKTS_NOSAERROR_LO_0); + update_macsec_mmc_val(osi_core, rx_nosaerror_reg[osi_core->macsec]); mmc->rx_pkts_no_sa = - update_macsec_mmc_val(osi_core, MACSEC_RX_PKTS_NOSA_LO_0); + update_macsec_mmc_val(osi_core, rx_nosa_reg[osi_core->macsec]); mmc->rx_pkts_overrun = - update_macsec_mmc_val(osi_core, MACSEC_RX_PKTS_OVRRUN_LO_0); + update_macsec_mmc_val(osi_core, rx_ovrrun_reg[osi_core->macsec]); mmc->rx_octets_validated = - update_macsec_mmc_val(osi_core, MACSEC_RX_OCTETS_VLDTD_LO_0); + update_macsec_mmc_val(osi_core, rx_vldtd_reg[osi_core->macsec]); + if (osi_core->macsec == OSI_MACSEC_T26X) { + mmc->tx_octets_encrypted = + update_macsec_mmc_val(osi_core, MACSEC_TX_OCTETS_ENCRYPTED_LO_0); + mmc->rx_octets_decrypted = + update_macsec_mmc_val(osi_core, MACSEC_RX_OCTETS_DECRYPD_LO_0); + } + + for (i = 0; i <= sc_idx_max[macsec]; i++) { + rx_pkt_late_scx_reg[OSI_MACSEC_T23X] = MACSEC_RX_PKTS_LATE_SCx_LO_0(i); + rx_pkt_late_scx_reg[OSI_MACSEC_T26X] = MACSEC_RX_PKTS_LATE_SCx_LO_0_T26X(i); + rx_pkt_notvld_scx_reg[OSI_MACSEC_T23X] = MACSEC_RX_PKTS_NOTVALID_SCx_LO_0(i); + rx_pkt_notvld_scx_reg[OSI_MACSEC_T26X] = MACSEC_RX_PKTS_NOTVALID_SCx_LO_0_T26X(i); + rx_pkt_ok_scx_reg[OSI_MACSEC_T23X] = MACSEC_RX_PKTS_OK_SCx_LO_0(i); + rx_pkt_ok_scx_reg[OSI_MACSEC_T26X] = MACSEC_RX_PKTS_OK_SCx_LO_0_T26X(i); - for (i = 0; i <= OSI_SC_INDEX_MAX; i++) { mmc->tx_pkts_protected[i] = update_macsec_mmc_val(osi_core, MACSEC_TX_PKTS_PROTECTED_SCx_LO_0(i)); + if (osi_core->macsec == OSI_MACSEC_T26X) { + mmc->tx_pkts_encrypted[i] = + update_macsec_mmc_val(osi_core, + MACSEC_TX_PKTS_ENCRYPTED_SCx_LO_0(i)); + } mmc->rx_pkts_late[i] = update_macsec_mmc_val(osi_core, - MACSEC_RX_PKTS_LATE_SCx_LO_0(i)); + rx_pkt_late_scx_reg[osi_core->macsec]); mmc->rx_pkts_delayed[i] = mmc->rx_pkts_late[i]; mmc->rx_pkts_not_valid[i] = update_macsec_mmc_val(osi_core, - MACSEC_RX_PKTS_NOTVALID_SCx_LO_0(i)); + rx_pkt_notvld_scx_reg[osi_core->macsec]); mmc->in_pkts_invalid[i] = mmc->rx_pkts_not_valid[i]; mmc->rx_pkts_unchecked[i] = mmc->rx_pkts_not_valid[i]; mmc->rx_pkts_ok[i] = update_macsec_mmc_val(osi_core, - MACSEC_RX_PKTS_OK_SCx_LO_0(i)); + rx_pkt_ok_scx_reg[osi_core->macsec]); } } @@ -796,8 +890,9 @@ static nve32_t macsec_enable(struct osi_core_priv_data *const osi_core, osi_lock_irq_enabled(&osi_core->macsec_fpe_lock); - /* MACSEC and FPE cannot coexist on MGBE refer bug 3484034 */ - if ((osi_core->mac == OSI_MAC_HW_MGBE) && + /* MACSEC and FPE cannot coexist on MGBE of T234 refer bug 3484034 + * Both EQOS and MGBE of T264 cannot have macsec and fpe enabled simultaneously */ + if ((osi_core->mac != OSI_MAC_HW_EQOS) && (enable == OSI_ENABLE) && (osi_core->is_fpe_enabled == OSI_ENABLE)) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "MACSE and FPE cannot coexist on MGBE\n", 0ULL); @@ -945,14 +1040,19 @@ static nve32_t kt_key_write(struct osi_core_priv_data *const osi_core, return 0; } -static nve32_t validate_kt_config(const struct osi_macsec_kt_config *const kt_config) +static nve32_t validate_kt_config(struct osi_core_priv_data *const osi_core, + const struct osi_macsec_kt_config *const kt_config) { nve32_t ret = 0; + const nveu32_t kt_max_index[MAX_MACSEC_IP_TYPES] = { + OSI_SA_LUT_MAX_INDEX, + OSI_SA_LUT_MAX_INDEX_T26X + }; /* Validate KT config */ if ((kt_config->table_config.ctlr_sel > OSI_CTLR_SEL_MAX) || (kt_config->table_config.rw > OSI_RW_MAX) || - (kt_config->table_config.index > OSI_TABLE_INDEX_MAX)) { + (kt_config->table_config.index > kt_max_index[osi_core->macsec])) { ret = -1; goto err; } @@ -968,7 +1068,7 @@ static nve32_t macsec_kt_config(struct osi_core_priv_data *const osi_core, nveu32_t kt_config_reg = 0; nveu8_t *base = (nveu8_t *)osi_core->tz_base; - ret = validate_kt_config(kt_config); + ret = validate_kt_config(osi_core, kt_config); if (ret < 0) { goto err; } @@ -996,7 +1096,6 @@ static nve32_t macsec_kt_config(struct osi_core_priv_data *const osi_core, kt_config_reg |= MACSEC_KT_CONFIG_UPDATE; osi_writela(osi_core, kt_config_reg, base + MACSEC_GCM_KEYTABLE_CONFIG); - /* Wait for this KT update to finish */ ret = poll_for_kt_update(osi_core); if (ret < 0) { @@ -1405,9 +1504,18 @@ static nve32_t byp_lut_read(struct osi_core_priv_data *const osi_core, nveu8_t *addr = (nveu8_t *)osi_core->macsec_base; nveu8_t *paddr = OSI_NULL; nve32_t ret = 0; + const nveu32_t tx_byp_lut_reg[MAX_MACSEC_IP_TYPES] = { + MACSEC_TX_BYP_LUT_VALID, + (index < 32U)? MACSEC_TX_BYP_LUT_VALID0_T26X: + MACSEC_TX_BYP_LUT_VALID1_T26X + }; + const nveu32_t rx_byp_lut_reg[MAX_MACSEC_IP_TYPES] = { + MACSEC_RX_BYP_LUT_VALID, + (index < 32U)? MACSEC_RX_BYP_LUT_VALID0_T26X: + MACSEC_RX_BYP_LUT_VALID1_T26X + }; read_lut_data(osi_core, lut_data); - if (lut_read_inputs(lut_config, lut_data) != 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "LUT inputs error\n", 0ULL); @@ -1433,10 +1541,10 @@ static nve32_t byp_lut_read(struct osi_core_priv_data *const osi_core, switch (lut_config->table_config.ctlr_sel) { case OSI_CTLR_SEL_TX: - paddr = addr + MACSEC_TX_BYP_LUT_VALID; + paddr = addr + tx_byp_lut_reg[osi_core->macsec]; break; case OSI_CTLR_SEL_RX: - paddr = addr + MACSEC_RX_BYP_LUT_VALID; + paddr = addr + rx_byp_lut_reg[osi_core->macsec]; break; default: OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, @@ -1445,6 +1553,8 @@ static nve32_t byp_lut_read(struct osi_core_priv_data *const osi_core, break; } if (ret == OSI_NONE_SIGNED) { + /* update byp LUT index if it is > 32 */ + index &= 0x1FU; val = osi_readla(osi_core, paddr); if ((val & ((nveu32_t)(1U) << (index & 0x1FU))) != OSI_NONE) { flags |= OSI_LUT_FLAGS_ENTRY_VALID; @@ -1483,40 +1593,55 @@ static void tx_sci_lut_read(struct osi_core_priv_data *const osi_core, nveu32_t val = 0; nveu8_t *addr = (nveu8_t *)osi_core->macsec_base; nveu32_t index = lut_config->table_config.index; + nveu32_t macsec = osi_core->macsec; + const nveu32_t tx_sci_lut_reg[MAX_MACSEC_IP_TYPES] = { + MACSEC_TX_SCI_LUT_VALID, + (index < 32U)? MACSEC_TX_SCI_LUT_VALID0_T26X: + MACSEC_TX_SCI_LUT_VALID1_T26X + }; + const nveu32_t dvlan_pkt[MAX_MACSEC_IP_TYPES] = { + MACSEC_TX_SCI_LUT_DVLAN_PKT, + MACSEC_TX_SCI_LUT_DVLAN_PKT_T26X + }; + const nveu32_t dvlan_pkt_out_in[MAX_MACSEC_IP_TYPES] = { + MACSEC_TX_SCI_LUT_DVLAN_OUTER_INNER_TAG_SEL, + MACSEC_TX_SCI_LUT_DVLAN_OUTER_INNER_TAG_SEL_T26X + }; - if ((lut_data[6] & MACSEC_LUT_AN0_VALID) == - MACSEC_LUT_AN0_VALID) { - lut_config->sci_lut_out.an_valid |= OSI_AN0_VALID; - } - if ((lut_data[6] & MACSEC_LUT_AN1_VALID) == - MACSEC_LUT_AN1_VALID) { - lut_config->sci_lut_out.an_valid |= OSI_AN1_VALID; - } - if ((lut_data[6] & MACSEC_LUT_AN2_VALID) == - MACSEC_LUT_AN2_VALID) { - lut_config->sci_lut_out.an_valid |= OSI_AN2_VALID; - } - if ((lut_data[6] & MACSEC_LUT_AN3_VALID) == - MACSEC_LUT_AN3_VALID) { - lut_config->sci_lut_out.an_valid |= OSI_AN3_VALID; - } + if ((lut_data[6] & MACSEC_LUT_AN0_VALID) == + MACSEC_LUT_AN0_VALID) { + lut_config->sci_lut_out.an_valid |= OSI_AN0_VALID; + } + if ((lut_data[6] & MACSEC_LUT_AN1_VALID) == + MACSEC_LUT_AN1_VALID) { + lut_config->sci_lut_out.an_valid |= OSI_AN1_VALID; + } + if ((lut_data[6] & MACSEC_LUT_AN2_VALID) == + MACSEC_LUT_AN2_VALID) { + lut_config->sci_lut_out.an_valid |= OSI_AN2_VALID; + } + if ((lut_data[6] & MACSEC_LUT_AN3_VALID) == + MACSEC_LUT_AN3_VALID) { + lut_config->sci_lut_out.an_valid |= OSI_AN3_VALID; + } - lut_config->sci_lut_out.sc_index = (lut_data[6] >> 17) & 0xFU; + lut_config->sci_lut_out.sc_index = (lut_data[6] >> 17) & 0x3FU; - if ((lut_data[6] & MACSEC_TX_SCI_LUT_DVLAN_PKT) == - MACSEC_TX_SCI_LUT_DVLAN_PKT) { - lut_config->flags |= OSI_LUT_FLAGS_DVLAN_PKT; - } - if ((lut_data[6] & MACSEC_TX_SCI_LUT_DVLAN_OUTER_INNER_TAG_SEL) == - MACSEC_TX_SCI_LUT_DVLAN_OUTER_INNER_TAG_SEL) { - lut_config->flags |= - OSI_LUT_FLAGS_DVLAN_OUTER_INNER_TAG_SEL; - } + if ((lut_data[6] & dvlan_pkt[macsec]) == dvlan_pkt[macsec]) { + lut_config->flags |= OSI_LUT_FLAGS_DVLAN_PKT; + } + if ((lut_data[6] & dvlan_pkt_out_in[macsec]) == + dvlan_pkt_out_in[macsec]) { + lut_config->flags |= + OSI_LUT_FLAGS_DVLAN_OUTER_INNER_TAG_SEL; + } - val = osi_readla(osi_core, addr+MACSEC_TX_SCI_LUT_VALID); - if ((val & ((nveu32_t)(1U) << (index & 0xFFU))) != OSI_NONE) { - lut_config->flags |= OSI_LUT_FLAGS_ENTRY_VALID; - } + /* update SCI LUT index if it is > 32 */ + index &= 0x1FU; + val = osi_readla(osi_core, addr+tx_sci_lut_reg[macsec]); + if ((val & ((nveu32_t)(1U) << (index & 0xFFU))) != OSI_NONE) { + lut_config->flags |= OSI_LUT_FLAGS_ENTRY_VALID; + } } /** @@ -1553,12 +1678,25 @@ static nve32_t sci_lut_read(struct osi_core_priv_data *const osi_core, nveu32_t val = 0; nveu32_t index = lut_config->table_config.index; nve32_t ret = 0; + nveu32_t macsec = osi_core->macsec; + const nveu32_t rx_sci_lut_reg[MAX_MACSEC_IP_TYPES] = { + MACSEC_RX_SCI_LUT_VALID, + (index < 32U)? MACSEC_RX_SCI_LUT_VALID0_T26X: + MACSEC_RX_SCI_LUT_VALID1_T26X + }; + const nveu32_t sc_lut_max_index[MAX_MACSEC_IP_TYPES] = { + OSI_SC_INDEX_MAX, + OSI_SC_INDEX_MAX_T26X + }; - if (index > OSI_SC_LUT_MAX_INDEX) { + if (index > sc_lut_max_index[macsec]) { ret = -1; goto exit; } + read_lut_data(osi_core, lut_data); + /* update byp LUT index if it is > 32 */ + index &= 0x1FU; switch (lut_config->table_config.ctlr_sel) { case OSI_CTLR_SEL_TX: @@ -1580,7 +1718,7 @@ static nve32_t sci_lut_read(struct osi_core_priv_data *const osi_core, lut_config->sci_lut_out.sci[6] = (nveu8_t)((lut_data[1] >> 16) & 0xFFU); lut_config->sci_lut_out.sci[7] = (nveu8_t)((lut_data[1] >> 24) & 0xFFU); - lut_config->sci_lut_out.sc_index = (lut_data[2] >> 10) & 0xFU; + lut_config->sci_lut_out.sc_index = (lut_data[2] >> 10) & 0x3FU; if ((lut_data[2] & MACSEC_RX_SCI_LUT_PREEMPT_INACTIVE) != MACSEC_RX_SCI_LUT_PREEMPT_INACTIVE) { flags |= OSI_LUT_FLAGS_PREEMPT_VALID; @@ -1590,7 +1728,7 @@ static nve32_t sci_lut_read(struct osi_core_priv_data *const osi_core, } } - val = osi_readla(osi_core, addr+MACSEC_RX_SCI_LUT_VALID); + val = osi_readla(osi_core, addr+rx_sci_lut_reg[macsec]); if ((val & ((nveu32_t)(1U) << index)) != OSI_NONE) { lut_config->flags |= OSI_LUT_FLAGS_ENTRY_VALID; } @@ -1639,29 +1777,64 @@ static nve32_t sc_param_lut_read(struct osi_core_priv_data *const osi_core, switch (lut_config->table_config.ctlr_sel) { case OSI_CTLR_SEL_TX: - lut_config->sc_param_out.key_index_start = lut_data[0] & 0x1FU; - lut_config->sc_param_out.pn_max = (lut_data[0] >> 5) | - (lut_data[1] << 27); - lut_config->sc_param_out.pn_threshold = (lut_data[1] >> 5) | - (lut_data[2] << 27); - lut_config->sc_param_out.tci = (nveu8_t)((lut_data[2] >> 5) & 0x3U); - lut_config->sc_param_out.sci[0] = (nveu8_t)((lut_data[2] >> 8) & 0xFFU); - lut_config->sc_param_out.sci[1] = (nveu8_t)((lut_data[2] >> 16) & 0xFFU); - lut_config->sc_param_out.sci[2] = (nveu8_t)((lut_data[2] >> 24) & 0xFFU); - lut_config->sc_param_out.sci[3] = (nveu8_t)(lut_data[3] & 0xFFU); - lut_config->sc_param_out.sci[4] = (nveu8_t)((lut_data[3] >> 8) & 0xFFU); - lut_config->sc_param_out.sci[5] = (nveu8_t)((lut_data[3] >> 16) & 0xFFU); - lut_config->sc_param_out.sci[6] = (nveu8_t)((lut_data[3] >> 24) & 0xFFU); - lut_config->sc_param_out.sci[7] = (nveu8_t)(lut_data[4] & 0xFFU); - lut_config->sc_param_out.vlan_in_clear = - (nveu8_t)((lut_data[4] >> 8) & 0x1U); + if (osi_core->macsec == OSI_MACSEC_T23X) { + lut_config->sc_param_out.key_index_start = lut_data[0] & 0x1FU; + lut_config->sc_param_out.pn_max = (lut_data[0] >> 5) | + (lut_data[1] << 27); + lut_config->sc_param_out.pn_threshold = (lut_data[1] >> 5) | + (lut_data[2] << 27); + lut_config->sc_param_out.tci = (nveu8_t)((lut_data[2] >> 5) & 0x3U); + lut_config->sc_param_out.sci[0] = (nveu8_t)((lut_data[2] >> 8) & 0xFFU); + lut_config->sc_param_out.sci[1] = (nveu8_t)((lut_data[2] >> 16) & 0xFFU); + lut_config->sc_param_out.sci[2] = (nveu8_t)((lut_data[2] >> 24) & 0xFFU); + lut_config->sc_param_out.sci[3] = (nveu8_t)(lut_data[3] & 0xFFU); + lut_config->sc_param_out.sci[4] = (nveu8_t)((lut_data[3] >> 8) & 0xFFU); + lut_config->sc_param_out.sci[5] = (nveu8_t)((lut_data[3] >> 16) & 0xFFU); + lut_config->sc_param_out.sci[6] = (nveu8_t)((lut_data[3] >> 24) & 0xFFU); + lut_config->sc_param_out.sci[7] = (nveu8_t)(lut_data[4] & 0xFFU); + lut_config->sc_param_out.vlan_in_clear = + (nveu8_t)((lut_data[4] >> 8) & 0x1U); + } else { + lut_config->sc_param_out.key_index_start = lut_data[0] & 0x7FU; + lut_config->sc_param_out.pn_max = (lut_data[0] >> 7) | + (lut_data[1] << 25); + lut_config->sc_param_out.pn_threshold = (lut_data[1] >> 7) | + (lut_data[2] << 25); + lut_config->sc_param_out.tci = (nveu8_t)((lut_data[2] >> 7) & 0x7U); + lut_config->sc_param_out.sci[0] = (nveu8_t)((lut_data[2] >> 10) & 0xFFU); + lut_config->sc_param_out.sci[1] = (nveu8_t)((lut_data[2] >> 18) & 0xFFU); + lut_config->sc_param_out.sci[2] = + (nveu8_t)(((lut_data[2] >> 26) | (lut_data[3] << 6)) & 0xFFU); + lut_config->sc_param_out.sci[3] = (nveu8_t)((lut_data[3] >> 2) & 0xFFU); + lut_config->sc_param_out.sci[4] = (nveu8_t)((lut_data[3] >> 10) & 0xFFU); + lut_config->sc_param_out.sci[5] = (nveu8_t)((lut_data[3] >> 18) & 0xFFU); + lut_config->sc_param_out.sci[6] = + (nveu8_t)(((lut_data[3] >> 26) | (lut_data[4] << 6)) & 0xFFU); + lut_config->sc_param_out.sci[7] = (nveu8_t)((lut_data[4] >> 2) & 0xFFU); + lut_config->sc_param_out.vlan_in_clear = + (nveu8_t)((lut_data[4] >> 10) & 0x1U); + lut_config->sc_param_out.encrypt = + (nveu8_t)((lut_data[4] >> 11) & 0x1U); + lut_config->sc_param_out.conf_offset = + (nveu8_t)((lut_data[4] >> 12) & 0x3U); + } break; case OSI_CTLR_SEL_RX: - lut_config->sc_param_out.key_index_start = lut_data[0] & 0x1FU; - lut_config->sc_param_out.pn_window = (lut_data[0] >> 5) | + if (osi_core->macsec == OSI_MACSEC_T23X) { + lut_config->sc_param_out.key_index_start = lut_data[0] & 0x1FU; + lut_config->sc_param_out.pn_window = (lut_data[0] >> 5) | (lut_data[1] << 27); - lut_config->sc_param_out.pn_max = (lut_data[1] >> 5) | + lut_config->sc_param_out.pn_max = (lut_data[1] >> 5) | (lut_data[2] << 27); + } else { + lut_config->sc_param_out.key_index_start = lut_data[0] & 0x7FU; + lut_config->sc_param_out.pn_window = (lut_data[0] >> 7) | + (lut_data[1] << 25); + lut_config->sc_param_out.pn_max = (lut_data[1] >> 7) | + (lut_data[2] << 25); + lut_config->sc_param_out.encrypt = (nveu8_t)((lut_data[2] >> 7) & 0x1U); + lut_config->sc_param_out.conf_offset = (nveu8_t)((lut_data[2] >> 8) & 0x3U); + } break; default: OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, @@ -2001,16 +2174,27 @@ static nve32_t sc_state_lut_config(struct osi_core_priv_data *const osi_core, * - De-initialization: No */ static void rx_sc_param_lut_config( + struct osi_core_priv_data *const osi_core, const struct osi_macsec_lut_config *const lut_config, nveu32_t *const lut_data) { struct osi_sc_param_outputs entry = lut_config->sc_param_out; - lut_data[0] |= entry.key_index_start; - lut_data[0] |= entry.pn_window << 5; - lut_data[1] |= entry.pn_window >> 27; - lut_data[1] |= entry.pn_max << 5; - lut_data[2] |= entry.pn_max >> 27; + if (osi_core->macsec == OSI_MACSEC_T23X) { + lut_data[0] |= entry.key_index_start; + lut_data[0] |= entry.pn_window << 5; + lut_data[1] |= entry.pn_window >> 27; + lut_data[1] |= entry.pn_max << 5; + lut_data[2] |= entry.pn_max >> 27; + } else { + lut_data[0] |= entry.key_index_start; + lut_data[0] |= entry.pn_window << 7; + lut_data[1] |= entry.pn_window >> 25; + lut_data[1] |= entry.pn_max << 7; + lut_data[2] |= entry.pn_max >> 25; + lut_data[2] |= ((nveu32_t)entry.encrypt) << 7; + lut_data[2] |= ((nveu32_t)entry.conf_offset) << 8; + } } /** @@ -2034,26 +2218,49 @@ static void rx_sc_param_lut_config( * - De-initialization: No */ static void tx_sc_param_lut_config( + struct osi_core_priv_data *const osi_core, const struct osi_macsec_lut_config *const lut_config, nveu32_t *const lut_data) { struct osi_sc_param_outputs entry = lut_config->sc_param_out; - lut_data[0] |= entry.key_index_start; - lut_data[0] |= entry.pn_max << 5; - lut_data[1] |= entry.pn_max >> 27; - lut_data[1] |= entry.pn_threshold << 5; - lut_data[2] |= entry.pn_threshold >> 27; - lut_data[2] |= (nveu32_t)(entry.tci) << 5; - lut_data[2] |= ((nveu32_t)entry.sci[0]) << 8; - lut_data[2] |= ((nveu32_t)entry.sci[1]) << 16; - lut_data[2] |= ((nveu32_t)entry.sci[2]) << 24; - lut_data[3] |= ((nveu32_t)entry.sci[3]); - lut_data[3] |= ((nveu32_t)entry.sci[4]) << 8; - lut_data[3] |= ((nveu32_t)entry.sci[5]) << 16; - lut_data[3] |= ((nveu32_t)entry.sci[6]) << 24; - lut_data[4] |= ((nveu32_t)entry.sci[7]); - lut_data[4] |= ((nveu32_t)entry.vlan_in_clear) << 8; + if (osi_core->macsec == OSI_MACSEC_T23X) { + lut_data[0] |= entry.key_index_start; + lut_data[0] |= entry.pn_max << 5; + lut_data[1] |= entry.pn_max >> 27; + lut_data[1] |= entry.pn_threshold << 5; + lut_data[2] |= entry.pn_threshold >> 27; + lut_data[2] |= (nveu32_t)(entry.tci) << 5; + lut_data[2] |= ((nveu32_t)entry.sci[0]) << 8; + lut_data[2] |= ((nveu32_t)entry.sci[1]) << 16; + lut_data[2] |= ((nveu32_t)entry.sci[2]) << 24; + lut_data[3] |= ((nveu32_t)entry.sci[3]); + lut_data[3] |= ((nveu32_t)entry.sci[4]) << 8; + lut_data[3] |= ((nveu32_t)entry.sci[5]) << 16; + lut_data[3] |= ((nveu32_t)entry.sci[6]) << 24; + lut_data[4] |= ((nveu32_t)entry.sci[7]); + lut_data[4] |= ((nveu32_t)entry.vlan_in_clear) << 8; + } else { + lut_data[0] |= entry.key_index_start; + lut_data[0] |= entry.pn_max << 7; + lut_data[1] |= entry.pn_max >> 25; + lut_data[1] |= entry.pn_threshold << 7; + lut_data[2] |= entry.pn_threshold >> 25; + lut_data[2] |= (nveu32_t)(entry.tci) << 7; + lut_data[2] |= ((nveu32_t)entry.sci[0]) << 10; + lut_data[2] |= ((nveu32_t)entry.sci[1]) << 18; + lut_data[2] |= ((nveu32_t)entry.sci[2]) << 26; + lut_data[3] |= ((nveu32_t)entry.sci[2]) >> 6; + lut_data[3] |= ((nveu32_t)entry.sci[3]) << 2; + lut_data[3] |= ((nveu32_t)entry.sci[4]) << 10; + lut_data[3] |= ((nveu32_t)entry.sci[5]) << 18; + lut_data[3] |= ((nveu32_t)entry.sci[6]) << 26; + lut_data[4] |= ((nveu32_t)entry.sci[6]) >> 6; + lut_data[4] |= ((nveu32_t)entry.sci[7]) << 2; + lut_data[4] |= ((nveu32_t)entry.vlan_in_clear) << 10; + lut_data[4] |= ((nveu32_t)entry.encrypt) << 11; + lut_data[4] |= ((nveu32_t)entry.conf_offset) << 12; + } } /** @@ -2088,18 +2295,29 @@ static nve32_t sc_param_lut_config(struct osi_core_priv_data *const osi_core, struct osi_macsec_table_config table_config = lut_config->table_config; struct osi_sc_param_outputs entry = lut_config->sc_param_out; nve32_t ret = 0; + const nveu32_t key_idx_max[MAX_MACSEC_IP_TYPES] = { + OSI_KEY_INDEX_MAX, OSI_KEY_INDEX_MAX_T26X + }; - if (entry.key_index_start > OSI_KEY_INDEX_MAX) { + if (entry.key_index_start > key_idx_max[osi_core->macsec]) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Invalid Key Index\n", 0ULL); ret = -1; goto exit; } + /* Confidentionality offset is de-PoRed from T264, hence conf_offset cannot be non-zero */ + if ((entry.encrypt > 1U) || (entry.conf_offset > 0U) || + (entry.vlan_in_clear > 1U)) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, + "Invalid paramters\n", entry.conf_offset); + ret = -1; + goto exit; + } if (table_config.ctlr_sel == OSI_CTLR_SEL_TX) { - tx_sc_param_lut_config(lut_config, lut_data); + tx_sc_param_lut_config(osi_core, lut_config, lut_data); } else { - rx_sc_param_lut_config(lut_config, lut_data); + rx_sc_param_lut_config(osi_core, lut_config, lut_data); } commit_lut_data(osi_core, lut_data); @@ -2545,11 +2763,19 @@ exit: * @retval -1 on failure */ static void rx_sci_lut_config( + struct osi_core_priv_data *const osi_core, const struct osi_macsec_lut_config *const lut_config, nveu32_t *const lut_data) { nveu32_t flags = lut_config->flags; struct osi_sci_lut_outputs entry = lut_config->sci_lut_out; + const nveu32_t sc_idx_max[MAX_MACSEC_IP_TYPES] = { + OSI_SC_INDEX_MAX, OSI_SC_INDEX_MAX_T26X + }; + + if (entry.sc_index > sc_idx_max[osi_core->macsec]) { + goto exit; + } lut_data[0] |= ((nveu32_t)(entry.sci[0]) | (((nveu32_t)entry.sci[1]) << 8) | @@ -2574,6 +2800,8 @@ static void rx_sci_lut_config( } lut_data[2] |= entry.sc_index << 10; +exit: + return; } /** @@ -2602,6 +2830,7 @@ static void rx_sci_lut_config( * @retval -1 on failure */ static nve32_t tx_sci_lut_config( + struct osi_core_priv_data *const osi_core, const struct osi_macsec_lut_config *const lut_config, nveu32_t *const lut_data) { @@ -2609,6 +2838,14 @@ static nve32_t tx_sci_lut_config( struct osi_sci_lut_outputs entry = lut_config->sci_lut_out; nveu32_t an_valid = entry.an_valid; nve32_t ret = 0; + const nveu32_t dvlan[MAX_MACSEC_IP_TYPES] = { + MACSEC_TX_SCI_LUT_DVLAN_PKT, + MACSEC_TX_SCI_LUT_DVLAN_PKT_T26X + }; + const nveu32_t dvlan_tag[MAX_MACSEC_IP_TYPES] = { + MACSEC_TX_SCI_LUT_DVLAN_OUTER_INNER_TAG_SEL, + MACSEC_TX_SCI_LUT_DVLAN_OUTER_INNER_TAG_SEL_T26X + }; if (lut_config_inputs(lut_config, lut_data) != 0) { ret = -1; @@ -2632,12 +2869,12 @@ static nve32_t tx_sci_lut_config( lut_data[6] |= entry.sc_index << 17; if ((flags & OSI_LUT_FLAGS_DVLAN_PKT) == OSI_LUT_FLAGS_DVLAN_PKT) { - lut_data[6] |= MACSEC_TX_SCI_LUT_DVLAN_PKT; + lut_data[6] |= dvlan[osi_core->macsec]; } if ((flags & OSI_LUT_FLAGS_DVLAN_OUTER_INNER_TAG_SEL) == OSI_LUT_FLAGS_DVLAN_OUTER_INNER_TAG_SEL) { - lut_data[6] |= MACSEC_TX_SCI_LUT_DVLAN_OUTER_INNER_TAG_SEL; + lut_data[6] |= dvlan_tag[osi_core->macsec]; } exit: return ret; @@ -2679,17 +2916,35 @@ static nve32_t sci_lut_config(struct osi_core_priv_data *const osi_core, nveu32_t val = 0; nveu32_t index = lut_config->table_config.index; nve32_t ret = 0; + nveu32_t macsec = osi_core->macsec; + const nveu32_t tx_sci_lut_reg[MAX_MACSEC_IP_TYPES] = { + MACSEC_TX_SCI_LUT_VALID, + (index < 32U)? MACSEC_TX_SCI_LUT_VALID0_T26X: + MACSEC_TX_SCI_LUT_VALID1_T26X + }; + const nveu32_t rx_sci_lut_reg[MAX_MACSEC_IP_TYPES] = { + MACSEC_RX_SCI_LUT_VALID, + (index < 32U)? MACSEC_RX_SCI_LUT_VALID0_T26X: + MACSEC_RX_SCI_LUT_VALID1_T26X + }; + const nveu32_t sc_lut_max_index[MAX_MACSEC_IP_TYPES] = { + OSI_SC_INDEX_MAX, + OSI_SC_INDEX_MAX_T26X + }; - if ((entry.sc_index > OSI_SC_INDEX_MAX) || - (lut_config->table_config.index > OSI_SC_LUT_MAX_INDEX)) { + if ((entry.sc_index > sc_lut_max_index[macsec]) || + (lut_config->table_config.index > sc_lut_max_index[macsec])) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "SCI LUT config err - Invalid Index\n", 0ULL); ret = -1; goto exit; } + /* update SCI LUT index if it is > 32 */ + index &= 0x1FU; + if (table_config.ctlr_sel == OSI_CTLR_SEL_TX) { - if (tx_sci_lut_config(lut_config, lut_data) < 0) { + if (tx_sci_lut_config(osi_core, lut_config, lut_data) < 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Failed to config tx sci LUT\n", 0ULL); ret = -1; @@ -2700,35 +2955,35 @@ static nve32_t sci_lut_config(struct osi_core_priv_data *const osi_core, if ((lut_config->flags & OSI_LUT_FLAGS_ENTRY_VALID) == OSI_LUT_FLAGS_ENTRY_VALID) { val = osi_readla(osi_core, addr + - MACSEC_TX_SCI_LUT_VALID); + tx_sci_lut_reg[macsec]); val |= ((nveu32_t)(1U) << index); osi_writela(osi_core, val, addr + - MACSEC_TX_SCI_LUT_VALID); + tx_sci_lut_reg[macsec]); } else { val = osi_readla(osi_core, addr + - MACSEC_TX_SCI_LUT_VALID); + tx_sci_lut_reg[macsec]); val &= ~((nveu32_t)(1U) << index); osi_writela(osi_core, val, addr + - MACSEC_TX_SCI_LUT_VALID); + tx_sci_lut_reg[macsec]); } } else { - rx_sci_lut_config(lut_config, lut_data); + rx_sci_lut_config(osi_core, lut_config, lut_data); commit_lut_data(osi_core, lut_data); if ((lut_config->flags & OSI_LUT_FLAGS_ENTRY_VALID) == OSI_LUT_FLAGS_ENTRY_VALID) { val = osi_readla(osi_core, addr + - MACSEC_RX_SCI_LUT_VALID); + rx_sci_lut_reg[macsec]); val |= ((nveu32_t)(1U) << index); osi_writela(osi_core, val, addr + - MACSEC_RX_SCI_LUT_VALID); + rx_sci_lut_reg[macsec]); } else { val = osi_readla(osi_core, addr + - MACSEC_RX_SCI_LUT_VALID); + rx_sci_lut_reg[macsec]); val &= ~((nveu32_t)(1U) << index); osi_writela(osi_core, val, addr + - MACSEC_RX_SCI_LUT_VALID); + rx_sci_lut_reg[macsec]); } } exit: @@ -2769,6 +3024,16 @@ static nve32_t byp_lut_config(struct osi_core_priv_data *const osi_core, nveu32_t val = 0; nveu32_t index = lut_config->table_config.index; nve32_t ret = 0; + const nveu32_t tx_byp_lut_reg[MAX_MACSEC_IP_TYPES] = { + MACSEC_TX_BYP_LUT_VALID, + (index < 32U)? MACSEC_TX_BYP_LUT_VALID0_T26X: + MACSEC_TX_BYP_LUT_VALID1_T26X + }; + const nveu32_t rx_byp_lut_reg[MAX_MACSEC_IP_TYPES] = { + MACSEC_RX_BYP_LUT_VALID, + (index < 32U)? MACSEC_RX_BYP_LUT_VALID0_T26X: + MACSEC_RX_BYP_LUT_VALID1_T26X + }; if (lut_config_inputs(lut_config, lut_data) != 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, @@ -2794,21 +3059,23 @@ static nve32_t byp_lut_config(struct osi_core_priv_data *const osi_core, commit_lut_data(osi_core, lut_data); + /* update byp LUT index if it is > 32 */ + index &= 0x1FU; switch (lut_config->table_config.ctlr_sel) { case OSI_CTLR_SEL_TX: if ((flags & OSI_LUT_FLAGS_ENTRY_VALID) == OSI_LUT_FLAGS_ENTRY_VALID) { val = osi_readla(osi_core, addr + - MACSEC_TX_BYP_LUT_VALID); + tx_byp_lut_reg[osi_core->macsec]); val |= ((nveu32_t)(1U) << (index & 0x1FU)); osi_writela(osi_core, val, addr + - MACSEC_TX_BYP_LUT_VALID); + tx_byp_lut_reg[osi_core->macsec]); } else { val = osi_readla(osi_core, addr + - MACSEC_TX_BYP_LUT_VALID); + tx_byp_lut_reg[osi_core->macsec]); val &= ~((nveu32_t)(1U) << (index & 0x1FU)); osi_writela(osi_core, val, addr + - MACSEC_TX_BYP_LUT_VALID); + tx_byp_lut_reg[osi_core->macsec]); } break; @@ -2816,16 +3083,16 @@ static nve32_t byp_lut_config(struct osi_core_priv_data *const osi_core, if ((flags & OSI_LUT_FLAGS_ENTRY_VALID) == OSI_LUT_FLAGS_ENTRY_VALID) { val = osi_readla(osi_core, addr + - MACSEC_RX_BYP_LUT_VALID); + rx_byp_lut_reg[osi_core->macsec]); val |= ((nveu32_t)(1U) << (index & 0x1FU)); osi_writela(osi_core, val, addr + - MACSEC_RX_BYP_LUT_VALID); + rx_byp_lut_reg[osi_core->macsec]); } else { val = osi_readla(osi_core, addr + - MACSEC_RX_BYP_LUT_VALID); + rx_byp_lut_reg[osi_core->macsec]); val &= ~((nveu32_t)(1U) << (index & 0x1FU)); osi_writela(osi_core, val, addr + - MACSEC_RX_BYP_LUT_VALID); + rx_byp_lut_reg[osi_core->macsec]); } break; @@ -2915,14 +3182,19 @@ static inline nve32_t lut_data_write(struct osi_core_priv_data *const osi_core, * @retval 0 on success * @retval -1 on failure */ -static nve32_t validate_lut_conf(const struct osi_macsec_lut_config *const lut_config) +static nve32_t validate_lut_conf(struct osi_core_priv_data *const osi_core, + const struct osi_macsec_lut_config *const lut_config) { nve32_t ret = 0; + const nveu32_t lut_max_index[MAX_MACSEC_IP_TYPES] = { + OSI_SA_LUT_MAX_INDEX, + OSI_SA_LUT_MAX_INDEX_T26X + }; /* Validate LUT config */ if ((lut_config->table_config.ctlr_sel > OSI_CTLR_SEL_MAX) || (lut_config->table_config.rw > OSI_RW_MAX) || - (lut_config->table_config.index > OSI_TABLE_INDEX_MAX) || + (lut_config->table_config.index > lut_max_index[osi_core->macsec]) || (lut_config->lut_sel > OSI_LUT_SEL_MAX)) { MACSEC_LOG("Validating LUT config failed. ctrl: %hu," " rw: %hu, index: %hu, lut_sel: %hu", @@ -2972,7 +3244,7 @@ static nve32_t macsec_lut_config(struct osi_core_priv_data *const osi_core, nveu32_t lut_config_reg; nveu8_t *base = (nveu8_t *)osi_core->macsec_base; - if (validate_lut_conf(lut_config) < 0) { + if (validate_lut_conf(osi_core, lut_config) < 0) { ret = -1; goto exit; } @@ -3051,16 +3323,22 @@ static inline void handle_rx_sc_invalid_key( { nveu8_t *addr = (nveu8_t *)osi_core->macsec_base; nveu32_t clear = 0; + nveu32_t macsec = osi_core->macsec; + const nveu32_t reg_off[MAX_MACSEC_IP_TYPES] = { + MACSEC_RX_SC_KEY_INVALID_STS0_0, + MACSEC_RX_SC_KEY_INVALID_STS0_0_T26X + }; + const nveu32_t reg_count[MAX_MACSEC_IP_TYPES] = {2, 6}; + nveu32_t i; MACSEC_LOG("%s()\n", __func__); /** check which SC/AN had triggered and clear */ - /* rx_sc0_7 */ - clear = osi_readla(osi_core, addr + MACSEC_RX_SC_KEY_INVALID_STS0_0); - osi_writela(osi_core, clear, addr + MACSEC_RX_SC_KEY_INVALID_STS0_0); - /* rx_sc8_15 */ - clear = osi_readla(osi_core, addr + MACSEC_RX_SC_KEY_INVALID_STS1_0); - osi_writela(osi_core, clear, addr + MACSEC_RX_SC_KEY_INVALID_STS1_0); + for (i = 0U; i < reg_count[macsec]; i++) { + clear = osi_readla(osi_core, addr + reg_off[macsec] + (4U * i)); + osi_writela(osi_core, clear, addr + reg_off[macsec] + (4U * i)); + } + } /** @@ -3087,16 +3365,22 @@ static inline void handle_tx_sc_invalid_key( { nveu8_t *addr = (nveu8_t *)osi_core->macsec_base; nveu32_t clear = 0; + nveu32_t macsec = osi_core->macsec; + const nveu32_t reg_off[MAX_MACSEC_IP_TYPES] = { + MACSEC_TX_SC_KEY_INVALID_STS0_0, + MACSEC_TX_SC_KEY_INVALID_STS0_0_T26X + }; + const nveu32_t reg_count[MAX_MACSEC_IP_TYPES] = {2, 6}; + nveu32_t i; MACSEC_LOG("%s()\n", __func__); /** check which SC/AN had triggered and clear */ - /* tx_sc0_7 */ - clear = osi_readla(osi_core, addr + MACSEC_TX_SC_KEY_INVALID_STS0_0); - osi_writela(osi_core, clear, addr + MACSEC_TX_SC_KEY_INVALID_STS0_0); - /* tx_sc8_15 */ - clear = osi_readla(osi_core, addr + MACSEC_TX_SC_KEY_INVALID_STS1_0); - osi_writela(osi_core, clear, addr + MACSEC_TX_SC_KEY_INVALID_STS1_0); + for (i = 0U; i < reg_count[macsec]; i++) { + clear = osi_readla(osi_core, addr + reg_off[macsec] + (4U*i)); + osi_writela(osi_core, clear, addr + reg_off[macsec] + (4U*i)); + } + } /** @@ -3151,17 +3435,21 @@ static inline void handle_rx_sc_replay_err( { nveu8_t *addr = (nveu8_t *)osi_core->macsec_base; nveu32_t clear = 0; + nveu32_t macsec = osi_core->macsec; + const nveu32_t reg_off[MAX_MACSEC_IP_TYPES] = { + MACSEC_RX_SC_REPLAY_ERROR_STATUS0_0, + MACSEC_RX_SC_REPLAY_ERROR_STATUS0_0_T26X + }; + const nveu32_t reg_count[MAX_MACSEC_IP_TYPES] = {2, 4}; + nveu32_t i; + + for (i = 0U; i < reg_count[macsec]; i++) { + clear = osi_readla(osi_core, addr + + reg_off[macsec] + (4U*i)); + osi_writela(osi_core, clear, addr + + reg_off[macsec] + (4U*i)); + } - /* rx_sc0_7 */ - clear = osi_readla(osi_core, addr + - MACSEC_RX_SC_REPLAY_ERROR_STATUS0_0); - osi_writela(osi_core, clear, addr + - MACSEC_RX_SC_REPLAY_ERROR_STATUS0_0); - /* rx_sc8_15 */ - clear = osi_readla(osi_core, addr + - MACSEC_RX_SC_REPLAY_ERROR_STATUS1_0); - osi_writela(osi_core, clear, addr + - MACSEC_RX_SC_REPLAY_ERROR_STATUS1_0); } /** @@ -3188,18 +3476,22 @@ static inline void handle_rx_pn_exhausted( { nveu8_t *addr = (nveu8_t *)osi_core->macsec_base; nveu32_t clear = 0; + nveu32_t macsec = osi_core->macsec; + const nveu32_t reg_off[MAX_MACSEC_IP_TYPES] = { + MACSEC_RX_SC_PN_EXHAUSTED_STATUS0_0, + MACSEC_RX_SC_PN_EXHAUSTED_STATUS0_0_T26X + }; + const nveu32_t reg_count[MAX_MACSEC_IP_TYPES] = {2, 4}; + nveu32_t i; /* Check which SC/AN had triggered and clear */ - /* rx_sc0_7 */ - clear = osi_readla(osi_core, addr + - MACSEC_RX_SC_PN_EXHAUSTED_STATUS0_0); - osi_writela(osi_core, clear, addr + - MACSEC_RX_SC_PN_EXHAUSTED_STATUS0_0); - /* rx_sc8_15 */ - clear = osi_readla(osi_core, addr + - MACSEC_RX_SC_PN_EXHAUSTED_STATUS1_0); - osi_writela(osi_core, clear, addr + - MACSEC_RX_SC_PN_EXHAUSTED_STATUS1_0); + for (i = 0U; i < reg_count[macsec]; i++) { + clear = osi_readla(osi_core, addr + + reg_off[macsec] + (4U*i)); + osi_writela(osi_core, clear, addr + + reg_off[macsec] + (4U*i)); + } + } /** @@ -3225,12 +3517,20 @@ static inline void handle_tx_sc_err(struct osi_core_priv_data *const osi_core) { nveu8_t *addr = (nveu8_t *)osi_core->macsec_base; nveu32_t clear = 0; + nveu32_t macsec = osi_core->macsec; + const nveu32_t reg_off[MAX_MACSEC_IP_TYPES] = { + MACSEC_TX_SC_ERROR_INTERRUPT_STATUS_0, + MACSEC_TX_SC_ERROR_INTERRUPT_STATUS0_0_T26X + }; + const nveu32_t reg_count[MAX_MACSEC_IP_TYPES] = {1, 2}; + nveu32_t i; - clear = osi_readla(osi_core, addr + - MACSEC_TX_SC_ERROR_INTERRUPT_STATUS_0); - osi_writela(osi_core, clear, addr + - MACSEC_TX_SC_ERROR_INTERRUPT_STATUS_0); - + for (i = 0U; i < reg_count[macsec]; i++) { + clear = osi_readla(osi_core, addr + + reg_off[macsec] + (4U*i)); + osi_writela(osi_core, clear, addr + + reg_off[macsec] + (4U*i)); + } } /** @@ -3257,18 +3557,20 @@ static inline void handle_tx_pn_threshold( { nveu8_t *addr = (nveu8_t *)osi_core->macsec_base; nveu32_t clear = 0; + nveu32_t macsec = osi_core->macsec; + const nveu32_t reg_off[MAX_MACSEC_IP_TYPES] = { + MACSEC_TX_SC_PN_THRESHOLD_STATUS0_0, + MACSEC_TX_SC_PN_THRESHOLD_STATUS0_0_T26X + }; + const nveu32_t reg_count[MAX_MACSEC_IP_TYPES] = {2, 4}; + nveu32_t i; /* check which SC/AN had triggered and clear */ - /* tx_sc0_7 */ - clear = osi_readla(osi_core, addr + - MACSEC_TX_SC_PN_THRESHOLD_STATUS0_0); - osi_writela(osi_core, clear, addr + - MACSEC_TX_SC_PN_THRESHOLD_STATUS0_0); - /* tx_sc8_15 */ - clear = osi_readla(osi_core, addr + - MACSEC_TX_SC_PN_THRESHOLD_STATUS1_0); - osi_writela(osi_core, clear, addr + - MACSEC_TX_SC_PN_THRESHOLD_STATUS1_0); + for (i = 0U; i < reg_count[osi_core->macsec]; i++) { + clear = osi_readla(osi_core, addr + reg_off[macsec] + (4U*i)); + osi_writela(osi_core, clear, addr + reg_off[macsec] + (4U*i)); + } + } /** @@ -3295,18 +3597,22 @@ static inline void handle_tx_pn_exhausted( { nveu8_t *addr = (nveu8_t *)osi_core->macsec_base; nveu32_t clear = 0; + nveu32_t macsec = osi_core->macsec; + const nveu32_t reg_off[MAX_MACSEC_IP_TYPES] = { + MACSEC_TX_SC_PN_EXHAUSTED_STATUS0_0, + MACSEC_TX_SC_PN_EXHAUSTED_STATUS0_0_T26X + }; + const nveu32_t reg_count[MAX_MACSEC_IP_TYPES] = {2, 4}; + nveu32_t i; + + for (i = 0U; i < reg_count[macsec]; i++) { + /* check which SC/AN had triggered and clear */ + clear = osi_readla(osi_core, addr + + reg_off[macsec] + (4U*i)); + osi_writela(osi_core, clear, addr + + reg_off[macsec] + (4U*i)); + } - /* check which SC/AN had triggered and clear */ - /* tx_sc0_7 */ - clear = osi_readla(osi_core, addr + - MACSEC_TX_SC_PN_EXHAUSTED_STATUS0_0); - osi_writela(osi_core, clear, addr + - MACSEC_TX_SC_PN_EXHAUSTED_STATUS0_0); - /* tx_sc8_15 */ - clear = osi_readla(osi_core, addr + - MACSEC_TX_SC_PN_EXHAUSTED_STATUS1_0); - osi_writela(osi_core, clear, addr + - MACSEC_TX_SC_PN_EXHAUSTED_STATUS1_0); } /** @@ -3336,25 +3642,42 @@ static inline void handle_dbg_evt_capture_done( { nveu8_t *addr = (nveu8_t *)osi_core->macsec_base; nveu32_t trigger_evts = 0; + nveu32_t macsec = osi_core->macsec; + const nveu32_t tx_dbg_sts_reg[MAX_MACSEC_IP_TYPES] = { + MACSEC_TX_DEBUG_STATUS_0, + MACSEC_TX_DEBUG_STATUS_0_T26X + }; + const nveu32_t rx_dbg_sts_reg[MAX_MACSEC_IP_TYPES] = { + MACSEC_RX_DEBUG_STATUS_0, + MACSEC_RX_DEBUG_STATUS_0_T26X + }; + const nveu32_t tx_trig_reg[MAX_MACSEC_IP_TYPES] = { + MACSEC_TX_DEBUG_TRIGGER_EN_0, + MACSEC_TX_DEBUG_TRIGGER_EN_0_T26X + }; + const nveu32_t rx_trig_reg[MAX_MACSEC_IP_TYPES] = { + MACSEC_RX_DEBUG_TRIGGER_EN_0, + MACSEC_RX_DEBUG_TRIGGER_EN_0_T26X + }; if (ctrl_sel == OSI_CTLR_SEL_TX) { trigger_evts = osi_readla(osi_core, addr + - MACSEC_TX_DEBUG_STATUS_0); + tx_dbg_sts_reg[macsec]); osi_writela(osi_core, trigger_evts, addr + - MACSEC_TX_DEBUG_STATUS_0); + tx_dbg_sts_reg[macsec]); /* clear all trigger events */ trigger_evts = 0U; osi_writela(osi_core, trigger_evts, - addr + MACSEC_TX_DEBUG_TRIGGER_EN_0); + addr + tx_trig_reg[macsec]); } else { trigger_evts = osi_readla(osi_core, addr + - MACSEC_RX_DEBUG_STATUS_0); + rx_dbg_sts_reg[macsec]); osi_writela(osi_core, trigger_evts, addr + - MACSEC_RX_DEBUG_STATUS_0); + rx_dbg_sts_reg[macsec]); /* clear all trigger events */ trigger_evts = 0U; osi_writela(osi_core, trigger_evts, - addr + MACSEC_RX_DEBUG_TRIGGER_EN_0); + addr + rx_trig_reg[macsec]); } } @@ -3549,12 +3872,16 @@ static inline void handle_rx_irq(struct osi_core_priv_data *const osi_core) nveu32_t rx_isr; nveu32_t clear = 0; nveu8_t *addr = (nveu8_t *)osi_core->macsec_base; + const nveu32_t rx_isr_reg[MAX_MACSEC_IP_TYPES] = { + MACSEC_RX_ISR, + MACSEC_RX_ISR_T26X + }; #ifdef HSI_SUPPORT nveu64_t rx_crc_err = 0; nveu64_t rx_icv_err = 0; #endif - rx_isr = osi_readla(osi_core, addr + MACSEC_RX_ISR); + rx_isr = osi_readla(osi_core, addr + rx_isr_reg[osi_core->macsec]); MACSEC_LOG("%s(): rx_isr 0x%x\n", __func__, rx_isr); if ((rx_isr & MACSEC_RX_DBG_BUF_CAPTURE_DONE) == @@ -3612,7 +3939,7 @@ static inline void handle_rx_irq(struct osi_core_priv_data *const osi_core) } if (clear != OSI_NONE) { - osi_writela(osi_core, clear, addr + MACSEC_RX_ISR); + osi_writela(osi_core, clear, addr + rx_isr_reg[osi_core->macsec]); } } @@ -3645,8 +3972,13 @@ static inline void handle_common_irq(struct osi_core_priv_data *const osi_core) nveu32_t common_isr; nveu32_t clear = 0; nveu8_t *addr = (nveu8_t *)osi_core->macsec_base; + nveu32_t macsec = osi_core->macsec; + const nveu32_t common_isr_reg[MAX_MACSEC_IP_TYPES] = { + MACSEC_COMMON_ISR, + MACSEC_COMMON_ISR_T26X + }; - common_isr = osi_readla(osi_core, addr + MACSEC_COMMON_ISR); + common_isr = osi_readla(osi_core, addr + common_isr_reg[macsec]); MACSEC_LOG("%s(): common_isr 0x%x\n", __func__, common_isr); if ((common_isr & MACSEC_SECURE_REG_VIOL) == MACSEC_SECURE_REG_VIOL) { @@ -3695,7 +4027,7 @@ static inline void handle_common_irq(struct osi_core_priv_data *const osi_core) clear |= MACSEC_TX_LKUP_MISS; } if (clear != OSI_NONE) { - osi_writela(osi_core, clear, addr + MACSEC_COMMON_ISR); + osi_writela(osi_core, clear, addr + common_isr_reg[macsec]); } } @@ -3726,6 +4058,11 @@ static void macsec_handle_irq(struct osi_core_priv_data *const osi_core) { nveu32_t irq_common_sr, common_isr; nveu8_t *addr = (nveu8_t *)osi_core->macsec_base; + nveu32_t macsec = osi_core->macsec; + const nveu32_t common_isr_reg[MAX_MACSEC_IP_TYPES] = { + MACSEC_COMMON_ISR, + MACSEC_COMMON_ISR_T26X + }; irq_common_sr = osi_readla(osi_core, addr + MACSEC_INTERRUPT_COMMON_SR); MACSEC_LOG("%s(): common_sr 0x%x\n", __func__, irq_common_sr); @@ -3742,7 +4079,7 @@ static void macsec_handle_irq(struct osi_core_priv_data *const osi_core) handle_safety_err_irq(osi_core); } - common_isr = osi_readla(osi_core, addr + MACSEC_COMMON_ISR); + common_isr = osi_readla(osi_core, addr + common_isr_reg[macsec]); if (common_isr != OSI_NONE) { handle_common_irq(osi_core); } @@ -3875,13 +4212,17 @@ static nve32_t clear_byp_lut(struct osi_core_priv_data *const osi_core) struct osi_macsec_table_config *table_config = &lut_config.table_config; nveu16_t i, j; nve32_t ret = 0; + const nveu32_t byp_lut_max_index[MAX_MACSEC_IP_TYPES] = { + OSI_BYP_LUT_MAX_INDEX, + OSI_BYP_LUT_MAX_INDEX_T26X + }; table_config->rw = OSI_LUT_WRITE; /* Tx/Rx BYP LUT */ lut_config.lut_sel = OSI_LUT_SEL_BYPASS; for (i = 0; i <= OSI_CTLR_SEL_MAX; i++) { table_config->ctlr_sel = i; - for (j = 0; j <= OSI_BYP_LUT_MAX_INDEX; j++) { + for (j = 0; j <= byp_lut_max_index[osi_core->macsec]; j++) { table_config->index = j; ret = macsec_lut_config(osi_core, &lut_config); if (ret < 0) { @@ -3923,13 +4264,17 @@ static nve32_t clear_sci_lut(struct osi_core_priv_data *const osi_core) struct osi_macsec_table_config *table_config = &lut_config.table_config; nveu16_t i, j; nve32_t ret = 0; + const nveu32_t sc_lut_max_index[MAX_MACSEC_IP_TYPES] = { + OSI_SC_INDEX_MAX, + OSI_SC_INDEX_MAX_T26X + }; table_config->rw = OSI_LUT_WRITE; /* Tx/Rx SCI LUT */ lut_config.lut_sel = OSI_LUT_SEL_SCI; for (i = 0; i <= OSI_CTLR_SEL_MAX; i++) { table_config->ctlr_sel = i; - for (j = 0; j <= OSI_SC_LUT_MAX_INDEX; j++) { + for (j = 0; j <= sc_lut_max_index[osi_core->macsec]; j++) { table_config->index = j; ret = macsec_lut_config(osi_core, &lut_config); if (ret < 0) { @@ -3971,13 +4316,17 @@ static nve32_t clear_sc_param_lut(struct osi_core_priv_data *const osi_core) struct osi_macsec_table_config *table_config = &lut_config.table_config; nveu16_t i, j; nve32_t ret = 0; + const nveu32_t sc_lut_max_index[MAX_MACSEC_IP_TYPES] = { + OSI_SC_INDEX_MAX, + OSI_SC_INDEX_MAX_T26X + }; table_config->rw = OSI_LUT_WRITE; /* Tx/Rx SC param LUT */ lut_config.lut_sel = OSI_LUT_SEL_SC_PARAM; for (i = 0; i <= OSI_CTLR_SEL_MAX; i++) { table_config->ctlr_sel = i; - for (j = 0; j <= OSI_SC_LUT_MAX_INDEX; j++) { + for (j = 0; j <= sc_lut_max_index[osi_core->macsec]; j++) { table_config->index = j; ret = macsec_lut_config(osi_core, &lut_config); if (ret < 0) { @@ -4020,13 +4369,17 @@ static nve32_t clear_sc_state_lut(struct osi_core_priv_data *const osi_core) struct osi_macsec_table_config *table_config = &lut_config.table_config; nveu16_t i, j; nve32_t ret = 0; + const nveu32_t sc_lut_max_index[MAX_MACSEC_IP_TYPES] = { + OSI_SC_INDEX_MAX, + OSI_SC_INDEX_MAX_T26X + }; table_config->rw = OSI_LUT_WRITE; /* Tx/Rx SC state */ lut_config.lut_sel = OSI_LUT_SEL_SC_STATE; for (i = 0; i <= OSI_CTLR_SEL_MAX; i++) { table_config->ctlr_sel = i; - for (j = 0; j <= OSI_SC_LUT_MAX_INDEX; j++) { + for (j = 0; j <= sc_lut_max_index[osi_core->macsec]; j++) { table_config->index = j; ret = macsec_lut_config(osi_core, &lut_config); if (ret < 0) { @@ -4069,12 +4422,17 @@ static nve32_t clear_sa_state_lut(struct osi_core_priv_data *const osi_core) struct osi_macsec_table_config *table_config = &lut_config.table_config; nveu16_t j; nve32_t ret = 0; + nveu32_t macsec = osi_core->macsec; + const nveu32_t sa_lut_max_index[MAX_MACSEC_IP_TYPES] = { + OSI_SA_LUT_MAX_INDEX, + OSI_SA_LUT_MAX_INDEX_T26X + }; table_config->rw = OSI_LUT_WRITE; /* Tx SA state LUT */ lut_config.lut_sel = OSI_LUT_SEL_SA_STATE; table_config->ctlr_sel = OSI_CTLR_SEL_TX; - for (j = 0; j <= OSI_SA_LUT_MAX_INDEX; j++) { + for (j = 0; j <= sa_lut_max_index[macsec]; j++) { table_config->index = j; ret = macsec_lut_config(osi_core, &lut_config); if (ret < 0) { @@ -4087,7 +4445,7 @@ static nve32_t clear_sa_state_lut(struct osi_core_priv_data *const osi_core) /* Rx SA state LUT */ lut_config.lut_sel = OSI_LUT_SEL_SA_STATE; table_config->ctlr_sel = OSI_CTLR_SEL_RX; - for (j = 0; j <= OSI_SA_LUT_MAX_INDEX; j++) { + for (j = 0; j <= sa_lut_max_index[macsec]; j++) { table_config->index = j; ret = macsec_lut_config(osi_core, &lut_config); if (ret < 0) { @@ -4136,6 +4494,12 @@ static nve32_t clear_lut(struct osi_core_priv_data *const osi_core) #endif struct osi_macsec_table_config *table_config = &lut_config.table_config; nve32_t ret = 0; +#ifdef MACSEC_KEY_PROGRAM + const nveu32_t lut_max_index[MAX_MACSEC_IP_TYPES] = { + OSI_SA_LUT_MAX_INDEX, + OSI_SA_LUT_MAX_INDEX_T26X + }; +#endif /* MACSEC_KEY_PROGRAM */ table_config->rw = OSI_LUT_WRITE; /* Clear all the LUT's which have a dedicated LUT valid bit per entry */ @@ -4166,7 +4530,7 @@ static nve32_t clear_lut(struct osi_core_priv_data *const osi_core) table_config->rw = OSI_LUT_WRITE; for (i = 0; i <= OSI_CTLR_SEL_MAX; i++) { table_config->ctlr_sel = i; - for (j = 0; j <= OSI_TABLE_INDEX_MAX; j++) { + for (j = 0; j <= lut_max_index[osi_core->macsec]; j++) { table_config->index = j; ret = macsec_kt_config(osi_core, &kt_config); if (ret < 0) { @@ -4509,6 +4873,15 @@ static void macsec_intr_config(struct osi_core_priv_data *const osi_core, nveu32 { nveu32_t val = 0; nveu8_t *addr = (nveu8_t *)osi_core->macsec_base; + nveu32_t macsec = osi_core->macsec; + const nveu32_t common_imr_reg[MAX_MACSEC_IP_TYPES] = { + MACSEC_COMMON_IMR, + MACSEC_COMMON_IMR_T26X + }; + const nveu32_t rx_imr_reg[MAX_MACSEC_IP_TYPES] = { + MACSEC_RX_IMR, + MACSEC_RX_IMR_T26X + }; (void)enable; val = osi_readla(osi_core, addr + MACSEC_TX_IMR); @@ -4522,7 +4895,7 @@ static void macsec_intr_config(struct osi_core_priv_data *const osi_core, nveu32 osi_writela(osi_core, val, addr + MACSEC_TX_IMR); MACSEC_LOG("Write MACSEC_TX_IMR: 0x%x\n", val); - val = osi_readla(osi_core, addr + MACSEC_RX_IMR); + val = osi_readla(osi_core, addr + rx_imr_reg[macsec]); MACSEC_LOG("Read MACSEC_RX_IMR: 0x%x\n", val); val |= (MACSEC_RX_DBG_BUF_CAPTURE_DONE_INT_EN | @@ -4531,16 +4904,16 @@ static void macsec_intr_config(struct osi_core_priv_data *const osi_core, nveu32 MACSEC_RX_AES_GCM_BUF_OVF_INT_EN | MACSEC_RX_PN_EXHAUSTED_INT_EN ); - osi_writela(osi_core, val, addr + MACSEC_RX_IMR); + osi_writela(osi_core, val, addr + rx_imr_reg[macsec]); MACSEC_LOG("Write MACSEC_RX_IMR: 0x%x\n", val); - val = osi_readla(osi_core, addr + MACSEC_COMMON_IMR); + val = osi_readla(osi_core, addr + common_imr_reg[macsec]); MACSEC_LOG("Read MACSEC_COMMON_IMR: 0x%x\n", val); val |= (MACSEC_RX_UNINIT_KEY_SLOT_INT_EN | MACSEC_RX_LKUP_MISS_INT_EN | MACSEC_TX_UNINIT_KEY_SLOT_INT_EN | MACSEC_TX_LKUP_MISS_INT_EN); - osi_writela(osi_core, val, addr + MACSEC_COMMON_IMR); + osi_writela(osi_core, val, addr + common_imr_reg[macsec]); MACSEC_LOG("Write MACSEC_COMMON_IMR: 0x%x\n", val); } @@ -4589,6 +4962,15 @@ static nve32_t macsec_initialize(struct osi_core_priv_data *const osi_core, nveu const struct core_local *l_core = (void *)osi_core; nveu8_t *addr = (nveu8_t *)osi_core->macsec_base; nve32_t ret = 0; + nveu32_t macsec = osi_core->macsec; + const nveu32_t common_imr_reg[MAX_MACSEC_IP_TYPES] = { + MACSEC_COMMON_IMR, + MACSEC_COMMON_IMR_T26X + }; + const nveu32_t rx_imr_reg[MAX_MACSEC_IP_TYPES] = { + MACSEC_RX_IMR, + MACSEC_RX_IMR_T26X + }; /* Update MAC value as per macsec requirement */ l_core->ops_p->macsec_config_mac(osi_core, OSI_ENABLE); @@ -4655,16 +5037,16 @@ static nve32_t macsec_initialize(struct osi_core_priv_data *const osi_core, nveu /* set ICV error threshold to 1 */ osi_writela(osi_core, 1U, addr + MACSEC_RX_ICV_ERR_CNTRL); /* Enabling interrupts only related to HSI */ - val = osi_readla(osi_core, addr + MACSEC_RX_IMR); + val = osi_readla(osi_core, addr + rx_imr_reg[macsec]); MACSEC_LOG("Read MACSEC_RX_IMR: 0x%x\n", val); val |= (MACSEC_RX_ICV_ERROR_INT_EN | MACSEC_RX_MAC_CRC_ERROR_INT_EN); MACSEC_LOG("Write MACSEC_RX_IMR: 0x%x\n", val); - osi_writela(osi_core, val, addr + MACSEC_RX_IMR); + osi_writela(osi_core, val, addr + rx_imr_reg[macsec]); - val = osi_readla(osi_core, addr + MACSEC_COMMON_IMR); + val = osi_readla(osi_core, addr + common_imr_reg[macsec]); val |= MACSEC_SECURE_REG_VIOL_INT_EN; - osi_writela(osi_core, val, addr + MACSEC_COMMON_IMR); + osi_writela(osi_core, val, addr + common_imr_reg[macsec]); /* Set AES mode * Default power on reset is AES-GCM128, leave it. @@ -5071,12 +5453,19 @@ static void print_error(const struct osi_core_priv_data *const osi_core, */ static void add_upd_sc_err_cleanup(struct osi_core_priv_data *const osi_core, nveu8_t mask, nveu16_t ctlr, - const struct osi_macsec_sc_info *const sc) + const struct osi_macsec_sc_info *const sc +#ifdef MACSEC_KEY_PROGRAM + ,nveu16_t kt_idx +#endif + ) { struct osi_macsec_lut_config lut_config = {0}; struct osi_macsec_table_config *table_config; nve32_t ret_fail = 0; nveu8_t error_mask = mask; +#ifdef MACSEC_KEY_PROGRAM + struct osi_macsec_kt_config kt_config = {0}; +#endif if ((error_mask & CLEAR_SCI_LUT) != OSI_NONE) { /* Cleanup SCI LUT */ @@ -5127,7 +5516,7 @@ static void add_upd_sc_err_cleanup(struct osi_core_priv_data *const osi_core, table_config = &kt_config.table_config; table_config->ctlr_sel = ctlr; table_config->rw = OSI_LUT_WRITE; - table_config->index = *kt_idx; + table_config->index = kt_idx; ret_fail = macsec_kt_config(osi_core, &kt_config); print_error(osi_core, ret_fail); } @@ -5235,6 +5624,8 @@ static nve32_t add_upd_sc(struct osi_core_priv_data *const osi_core, lut_config.sc_param_out.pn_window = sc->pn_window; lut_config.sc_param_out.tci = OSI_TCI_DEFAULT; lut_config.sc_param_out.vlan_in_clear = sc->vlan_in_clear; + lut_config.sc_param_out.conf_offset = sc->conf_offset; + lut_config.sc_param_out.encrypt = sc->encrypt; ret = macsec_lut_config(osi_core, &lut_config); if (ret < 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, @@ -5300,7 +5691,11 @@ static nve32_t add_upd_sc(struct osi_core_priv_data *const osi_core, } } exit: - add_upd_sc_err_cleanup(osi_core, error_mask, ctlr, sc); + add_upd_sc_err_cleanup(osi_core, error_mask, ctlr, sc +#ifdef MACSEC_KEY_PROGRAM + , *kt_idx +#endif + ); return ret; } @@ -5402,6 +5797,8 @@ static nve32_t add_new_sc(struct osi_core_priv_data *const osi_core, new_sc->pn_window = sc->pn_window; new_sc->flags = sc->flags; new_sc->vlan_in_clear = sc->vlan_in_clear; + new_sc->conf_offset = sc->conf_offset; + new_sc->encrypt = sc->encrypt; new_sc->sc_idx_start = avail_sc_idx; if (is_sc_valid == OSI_MACSEC_SC_VALID) { @@ -5524,6 +5921,8 @@ static nve32_t macsec_configure(struct osi_core_priv_data *const osi_core, tmp_sc_p->pn_window = sc->pn_window; tmp_sc_p->flags = sc->flags; tmp_sc_p->vlan_in_clear = sc->vlan_in_clear; + tmp_sc_p->encrypt = sc->encrypt; + tmp_sc_p->conf_offset = sc->conf_offset; tmp_sc_p->an_valid |= OSI_BIT(sc->curr_an & 0x1FU); @@ -6233,7 +6632,8 @@ nve32_t osi_macsec_config_dbg_buf( if ((osi_core != OSI_NULL) && (l_core->macsec_ops != OSI_NULL) && (l_core->macsec_ops->dbg_buf_config != OSI_NULL) && - (dbg_buf_config != OSI_NULL)) { + (dbg_buf_config != OSI_NULL) && + (osi_core->macsec != OSI_MACSEC_T26X)) { ret = l_core->macsec_ops->dbg_buf_config(osi_core, dbg_buf_config); } @@ -6274,7 +6674,8 @@ nve32_t osi_macsec_dbg_events_config( if ((osi_core != OSI_NULL) && (l_core->macsec_ops != OSI_NULL) && (l_core->macsec_ops->dbg_events_config != OSI_NULL) && - (dbg_buf_config != OSI_NULL)) { + (dbg_buf_config != OSI_NULL) && + (osi_core->macsec != OSI_MACSEC_T26X)) { ret = l_core->macsec_ops->dbg_events_config(osi_core, dbg_buf_config); } diff --git a/osi/core/macsec.h b/osi/core/macsec.h index 91f4e2d..cde0bbd 100644 --- a/osi/core/macsec.h +++ b/osi/core/macsec.h @@ -46,55 +46,89 @@ #define MACSEC_TX_IMR 0x4008 #define MACSEC_TX_ISR 0x400C #define MACSEC_RX_IMR 0x4048 +#define MACSEC_RX_IMR_T26X 0x4050 #define MACSEC_RX_ISR 0x404C +#define MACSEC_RX_ISR_T26X 0x4054 #define MACSEC_TX_SC_PN_THRESHOLD_STATUS0_0 0x4018 -#define MACSEC_TX_SC_PN_THRESHOLD_STATUS1_0 0x401C +#define MACSEC_TX_SC_PN_THRESHOLD_STATUS0_0_T26X 0x4018 #define MACSEC_TX_SC_PN_EXHAUSTED_STATUS0_0 0x4024 -#define MACSEC_TX_SC_PN_EXHAUSTED_STATUS1_0 0x4028 +#define MACSEC_TX_SC_PN_EXHAUSTED_STATUS0_0_T26X 0x4030 #define MACSEC_TX_SC_ERROR_INTERRUPT_STATUS_0 0x402C +#define MACSEC_TX_SC_ERROR_INTERRUPT_STATUS0_0_T26X 0x4048 #define MACSEC_RX_SC_PN_EXHAUSTED_STATUS0_0 0x405C -#define MACSEC_RX_SC_PN_EXHAUSTED_STATUS1_0 0x4060 +#define MACSEC_RX_SC_PN_EXHAUSTED_STATUS0_0_T26X 0x4064 #define MACSEC_RX_SC_REPLAY_ERROR_STATUS0_0 0x4090 -#define MACSEC_RX_SC_REPLAY_ERROR_STATUS1_0 0x4094 +#define MACSEC_RX_SC_REPLAY_ERROR_STATUS0_0_T26X 0x4084 #define MACSEC_STATS_CONTROL_0 0x900C #define MACSEC_TX_PKTS_UNTG_LO_0 0x9010 #define MACSEC_TX_OCTETS_PRTCTD_LO_0 0x9018 +#define MACSEC_TX_OCTETS_ENCRYPTED_LO_0 0x91A8 #define MACSEC_TX_PKTS_TOO_LONG_LO_0 0x9020 #define MACSEC_TX_PKTS_PROTECTED_SCx_LO_0(x) ((0x9028UL) + ((x) * 8UL)) +#define MACSEC_TX_PKTS_ENCRYPTED_SCx_LO_0(x) ((0x91B0UL) + ((x) * 8UL)) #define MACSEC_RX_PKTS_NOTG_LO_0 0x90B0 +#define MACSEC_RX_PKTS_NOTG_LO_0_T26X 0x9338 #define MACSEC_RX_PKTS_UNTG_LO_0 0x90A8 +#define MACSEC_RX_PKTS_UNTG_LO_0_T26X 0x9330 #define MACSEC_RX_PKTS_BADTAG_LO_0 0x90B8 +#define MACSEC_RX_PKTS_BADTAG_LO_0_T26X 0x9040 #define MACSEC_RX_PKTS_NOSA_LO_0 0x90C0 +#define MACSEC_RX_PKTS_NOSA_LO_0_T26X 0x9348 #define MACSEC_RX_PKTS_NOSAERROR_LO_0 0x90C8 +#define MACSEC_RX_PKTS_NOSAERROR_LO_0_T26X 0x9350 #define MACSEC_RX_PKTS_OVRRUN_LO_0 0x90D0 +#define MACSEC_RX_PKTS_OVRRUN_LO_0_T26X 0x9358 #define MACSEC_RX_OCTETS_VLDTD_LO_0 0x90D8 -#define MACSEC_RX_PKTS_LATE_SCx_LO_0(x) ((0x90E0UL) + ((x) * 8UL)) -#define MACSEC_RX_PKTS_NOTVALID_SCx_LO_0(x) ((0x9160UL) + ((x) * 8UL)) -#define MACSEC_RX_PKTS_OK_SCx_LO_0(x) ((0x91E0UL) + ((x) * 8UL)) +#define MACSEC_RX_OCTETS_DECRYPD_LO_0 0x9368 +#define MACSEC_RX_OCTETS_VLDTD_LO_0_T26X 0x9360 +#define MACSEC_RX_PKTS_LATE_SCx_LO_0(x) ((0x90E0U) + ((x) * 8U)) +#define MACSEC_RX_PKTS_LATE_SCx_LO_0_T26X(x) ((0x9370U) + ((x) * 8U)) +#define MACSEC_RX_PKTS_NOTVALID_SCx_LO_0(x) ((0x9160U) + ((x) * 8U)) +#define MACSEC_RX_PKTS_NOTVALID_SCx_LO_0_T26X(x) ((0x94F0U) + ((x) * 8U)) +#define MACSEC_RX_PKTS_OK_SCx_LO_0(x) ((0x91E0U) + ((x) * 8U)) +#define MACSEC_RX_PKTS_OK_SCx_LO_0_T26X(x) ((0x9670U) + ((x) * 8U)) #define MACSEC_CONTROL0 0xD000 #define MACSEC_LUT_CONFIG 0xD004 #define MACSEC_LUT_DATA(x) ((0xD008U) + ((x) * 4U)) #define MACSEC_TX_BYP_LUT_VALID 0xD024 +#define MACSEC_TX_BYP_LUT_VALID0_T26X 0xD024U +#define MACSEC_TX_BYP_LUT_VALID1_T26X 0xD028U #define MACSEC_TX_SCI_LUT_VALID 0xD028 +#define MACSEC_TX_SCI_LUT_VALID0_T26X 0xD02CU +#define MACSEC_TX_SCI_LUT_VALID1_T26X 0xD030U #define MACSEC_RX_BYP_LUT_VALID 0xD02C +#define MACSEC_RX_BYP_LUT_VALID0_T26X 0xD034U +#define MACSEC_RX_BYP_LUT_VALID1_T26X 0xD038U #define MACSEC_RX_SCI_LUT_VALID 0xD030 +#define MACSEC_RX_SCI_LUT_VALID0_T26X 0xD03CU +#define MACSEC_RX_SCI_LUT_VALID1_T26X 0xD040U #define MACSEC_COMMON_IMR 0xD054 +#define MACSEC_COMMON_IMR_T26X 0xD064 #define MACSEC_COMMON_ISR 0xD058 +#define MACSEC_COMMON_ISR_T26X 0xD068 #define MACSEC_TX_SC_KEY_INVALID_STS0_0 0xD064 -#define MACSEC_TX_SC_KEY_INVALID_STS1_0 0xD068 +#define MACSEC_TX_SC_KEY_INVALID_STS0_0_T26X 0xD074 #define MACSEC_RX_SC_KEY_INVALID_STS0_0 0xD080 -#define MACSEC_RX_SC_KEY_INVALID_STS1_0 0xD084 +#define MACSEC_RX_SC_KEY_INVALID_STS0_0_T26X 0xD08C +#define MACSEC_TX_DEBUG_STATUS_0 0xD0C4 +#define MACSEC_TX_DEBUG_STATUS_0_T26X 0xD0D0 +#define MACSEC_TX_DEBUG_TRIGGER_EN_0 0xD09C +#define MACSEC_TX_DEBUG_TRIGGER_EN_0_T26X 0xD0A8 +#define MACSEC_RX_DEBUG_STATUS_0 0xD0F8 +#define MACSEC_RX_DEBUG_STATUS_0_T26X 0xD104 +#define MACSEC_RX_DEBUG_TRIGGER_EN_0 0xD0E0 +#define MACSEC_RX_DEBUG_TRIGGER_EN_0_T26X 0xD0EC -#define MACSEC_TX_DEBUG_STATUS_0 0xD0C4 -#define MACSEC_TX_DEBUG_TRIGGER_EN_0 0xD09C -#define MACSEC_RX_DEBUG_STATUS_0 0xD0F8 -#define MACSEC_RX_DEBUG_TRIGGER_EN_0 0xD0E0 #ifdef DEBUG_MACSEC #define MACSEC_TX_DEBUG_CONTROL_0 0xD098 +#define MACSEC_TX_DEBUG_CONTROL_0_T26X 0xD0A4 #define MACSEC_DEBUG_BUF_CONFIG_0 0xD0C8 +#define MACSEC_DEBUG_BUF_CONFIG_0_T26X 0xD0D4 #define MACSEC_DEBUG_BUF_DATA_0(x) ((0xD0CCU) + ((x) * 4U)) +#define MACSEC_DEBUG_BUF_DATA_0_T26X(x) ((0xD0D8U) + ((x) * 4U)) #define MACSEC_RX_DEBUG_CONTROL_0 0xD0DC +#define MACSEC_RX_DEBUG_CONTROL_0_T26X 0xD0E8 #endif /* DEBUG_MACSEC */ #define MACSEC_CONTROL1 0xE000 @@ -115,7 +149,8 @@ #define MACSEC_KT_CONFIG_UPDATE OSI_BIT(31) #define MACSEC_KT_CONFIG_CTLR_SEL OSI_BIT(25) #define MACSEC_KT_CONFIG_RW OSI_BIT(24) -#define MACSEC_KT_CONFIG_INDEX_MASK (OSI_BIT(4) | OSI_BIT(3) | OSI_BIT(2) |\ +#define MACSEC_KT_CONFIG_INDEX_MASK (OSI_BIT(6) | OSI_BIT(5) |\ + OSI_BIT(4) | OSI_BIT(3) | OSI_BIT(2) |\ OSI_BIT(1) | OSI_BIT(0)) #define MACSEC_KT_ENTRY_VALID OSI_BIT(0) /** @} */ @@ -144,7 +179,8 @@ #define MACSEC_LUT_CONFIG_LUT_SEL_MASK (OSI_BIT(18) | OSI_BIT(17) |\ OSI_BIT(16)) #define MACSEC_LUT_CONFIG_LUT_SEL_SHIFT 16 -#define MACSEC_LUT_CONFIG_INDEX_MASK (OSI_BIT(4) | OSI_BIT(3) | OSI_BIT(2) |\ +#define MACSEC_LUT_CONFIG_INDEX_MASK (OSI_BIT(6) | OSI_BIT(5) |\ + OSI_BIT(4) | OSI_BIT(3) | OSI_BIT(2) |\ OSI_BIT(1) | OSI_BIT(0)) /** @} */ /** @@ -153,9 +189,9 @@ * @brief Bit definitions of MACSEC_INTERRUPT_COMMON_STATUS register * @{ */ -#define MACSEC_COMMON_SR_SFTY_ERR OSI_BIT(2) -#define MACSEC_COMMON_SR_RX OSI_BIT(1) -#define MACSEC_COMMON_SR_TX OSI_BIT(0) +#define MACSEC_COMMON_SR_SFTY_ERR OSI_BIT(2) +#define MACSEC_COMMON_SR_RX OSI_BIT(1) +#define MACSEC_COMMON_SR_TX OSI_BIT(0) /** @} */ /* Helper MACROS to set which LUTs to be cleared in error scenario */ @@ -464,8 +500,12 @@ #define MACSEC_LUT_AN3_VALID OSI_BIT(16) /* DVLAN packet in LUT_DATA[6] register */ #define MACSEC_TX_SCI_LUT_DVLAN_PKT OSI_BIT(21) +#define MACSEC_TX_SCI_LUT_DVLAN_PKT_T26X OSI_BIT(23) + /* DVLAN outer/inner tag select in LUT_DATA[6] register */ -#define MACSEC_TX_SCI_LUT_DVLAN_OUTER_INNER_TAG_SEL OSI_BIT(22) +#define MACSEC_TX_SCI_LUT_DVLAN_OUTER_INNER_TAG_SEL OSI_BIT(22) +#define MACSEC_TX_SCI_LUT_DVLAN_OUTER_INNER_TAG_SEL_T26X OSI_BIT(24) + /* SA State LUT entry valid in LUT_DATA[0] register */ #define MACSEC_SA_STATE_LUT_ENTRY_VALID OSI_BIT(0) @@ -486,10 +526,12 @@ #ifdef HSI_SUPPORT /* Set RX ISR set interrupt status bit */ #define MACSEC_RX_ISR_SET 0x4050U +#define MACSEC_RX_ISR_SET_T26X 0x4058U /* Set TX ISR set interrupt status bit */ #define MACSEC_TX_ISR_SET 0x4010U /* Set Common ISR set interrupt status bit */ -#define MACSEC_COMMON_ISR_SET 0xd05cU +#define MACSEC_COMMON_ISR_SET 0xD05cU +#define MACSEC_COMMON_ISR_SET_T26X 0xD06cU #endif #endif /* INCLUDED_MACSEC_H */ diff --git a/osi/core/mgbe_core.c b/osi/core/mgbe_core.c index 323ce82..db24b9e 100644 --- a/osi/core/mgbe_core.c +++ b/osi/core/mgbe_core.c @@ -89,7 +89,18 @@ static nve32_t mgbe_mac_indir_addr_write(struct osi_core_priv_data *osi_core, nveu32_t addr_offset, nveu32_t value) { + const nveu32_t ac_msel_mask[OSI_MAX_MAC_IP_TYPES] = { + 0, + MGBE_MAC_INDIR_AC_MSEL, + MGBE_MAC_INDIR_AC_MSEL_T26X + }; + const nveu32_t ac_msel_shift[OSI_MAX_MAC_IP_TYPES] = { + 0, + MGBE_MAC_INDIR_AC_MSEL_SHIFT, + MGBE_MAC_INDIR_AC_MSEL_SHIFT_T264 + }; void *base = osi_core->base; + nveu32_t mac = osi_core->mac; nveu32_t addr = 0; nve32_t ret = 0; @@ -100,9 +111,8 @@ static nve32_t mgbe_mac_indir_addr_write(struct osi_core_priv_data *osi_core, addr = osi_readla(osi_core, (nveu8_t *)base + MGBE_MAC_INDIR_AC); /* update Mode Select */ - addr &= ~(MGBE_MAC_INDIR_AC_MSEL); - addr |= ((mc_no << MGBE_MAC_INDIR_AC_MSEL_SHIFT) & - MGBE_MAC_INDIR_AC_MSEL); + addr &= ~ac_msel_mask[mac]; + addr |= ((mc_no << ac_msel_shift[mac]) & ac_msel_mask[mac]); /* update Address Offset */ addr &= ~(MGBE_MAC_INDIR_AC_AOFF); @@ -148,7 +158,18 @@ static nve32_t mgbe_mac_indir_addr_read(struct osi_core_priv_data *osi_core, nveu32_t addr_offset, nveu32_t *value) { + const nveu32_t ac_msel_mask[OSI_MAX_MAC_IP_TYPES] = { + 0, + MGBE_MAC_INDIR_AC_MSEL, + MGBE_MAC_INDIR_AC_MSEL_T26X + }; + const nveu32_t ac_msel_shift[OSI_MAX_MAC_IP_TYPES] = { + 0, + MGBE_MAC_INDIR_AC_MSEL_SHIFT, + MGBE_MAC_INDIR_AC_MSEL_SHIFT_T264 + }; void *base = osi_core->base; + nveu32_t mac = osi_core->mac; nveu32_t addr = 0; nve32_t ret = 0; @@ -156,9 +177,8 @@ static nve32_t mgbe_mac_indir_addr_read(struct osi_core_priv_data *osi_core, addr = osi_readla(osi_core, (nveu8_t *)base + MGBE_MAC_INDIR_AC); /* update Mode Select */ - addr &= ~(MGBE_MAC_INDIR_AC_MSEL); - addr |= ((mc_no << MGBE_MAC_INDIR_AC_MSEL_SHIFT) & - MGBE_MAC_INDIR_AC_MSEL); + addr &= ~ac_msel_mask[mac]; + addr |= ((mc_no << ac_msel_shift[mac]) & ac_msel_mask[mac]); /* update Address Offset */ addr &= ~(MGBE_MAC_INDIR_AC_AOFF); @@ -206,17 +226,28 @@ fail: static nve32_t mgbe_filter_args_validate(struct osi_core_priv_data *const osi_core, const struct osi_filter *filter) { + struct core_local *l_core = (struct core_local *)(void *)osi_core; + const nveu32_t idx_max[OSI_MAX_MAC_IP_TYPES] = { + 0, + OSI_MGBE_MAX_MAC_ADDRESS_FILTER, + OSI_MGBE_MAX_MAC_ADDRESS_FILTER_T26X + }; + const nveu64_t chansel_max[OSI_MAX_MAC_IP_TYPES] = { + 0, + MGBE_MAC_XDCS_DMA_MAX, + MGBE_MAC_XDCS_DMA_MAX_T26X + }; + nveu32_t mac = osi_core->mac; nveu32_t idx = filter->index; nveu32_t dma_routing_enable = filter->dma_routing; nveu32_t dma_chan = filter->dma_chan; nveu32_t addr_mask = filter->addr_mask; nveu32_t src_dest = filter->src_dest; - nveu32_t dma_chansel = filter->dma_chansel; + nveu64_t dma_chansel = filter->dma_chansel; nve32_t ret = 0; - (void) osi_core; - /* check for valid index (0 to 31) */ - if (idx >= OSI_MGBE_MAX_MAC_ADDRESS_FILTER) { + /* check for valid index */ + if (idx >= idx_max[mac]) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "invalid MAC filter index\n", idx); @@ -224,8 +255,8 @@ static nve32_t mgbe_filter_args_validate(struct osi_core_priv_data *const osi_co goto fail; } - /* check for DMA channel index (0 to 9) */ - if ((dma_chan > (OSI_MGBE_MAX_NUM_CHANS - 0x1U)) && + /* check for DMA channel index */ + if ((dma_chan > (l_core->num_max_chans - 0x1U)) && (dma_chan != OSI_CHAN_ANY)) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_OUTOFBOUND, "invalid dma channel\n", @@ -235,7 +266,7 @@ static nve32_t mgbe_filter_args_validate(struct osi_core_priv_data *const osi_co } /* validate dma_chansel argument */ - if (dma_chansel > MGBE_MAC_XDCS_DMA_MAX) { + if (dma_chansel > chansel_max[mac]) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_OUTOFBOUND, "invalid dma_chansel value\n", dma_chansel); @@ -273,6 +304,313 @@ fail: return ret; } +/** + * @brief check_mac_addr - Compare macaddress with rchannel address + * + * Algorithm: This function just validates macaddress with rchannel address. + * + * @param[in] mac_addr: Mac address. + * @param[in] rch_addr: Receive channel address. + * + * @retval 0 on success + * @retval -1 on failure. + */ +static nve32_t check_mac_addr(nveu8_t const *mac_addr, nveu8_t *rch_addr) +{ + nve32_t i = 0; + nve32_t ret = OSI_NONE; + + for (i = 0; i < 6; i ++) { + if (*(mac_addr + i) != *(rch_addr + i)) { + ret = -1; + break; + } + } + + return ret; +} + +/** + * @brief mgbe_free_rchlist_index - Free index. + * + * Algorithm: This function just free the Receive channel index. + * + * @param[in] osi_core: OSI core private data structure. + * @param[in] rch_idx: Receive channel index. + * + */ +static void mgbe_free_rchlist_index(struct osi_core_priv_data *osi_core, + const nve32_t rch_idx) { + osi_core->rch_index[rch_idx].in_use = OSI_NONE; + osi_core->rch_index[rch_idx].dch = 0; + osi_memset(&osi_core->rch_index[rch_idx].mac_address, 0, OSI_ETH_ALEN); +} + +/** + * @brief mgbe_get_rchlist_index - find free index + * + * Algorithm: This function gets free index for receive channel list. + * + * @param[in] osi_core: OSI core private data structure. + * @param[in] mac_addr: Mac address. + * + * @retval 0 on success + * @retval -1 on failure. + +**/ +static nve32_t mgbe_get_rchlist_index(struct osi_core_priv_data *osi_core, + nveu8_t const *mac_addr) { + nve32_t ret = -1; + nveu32_t i = 0; + + if (mac_addr != OSI_NULL) { + for (i = 0; i < RCHLIST_SIZE; i++) { + if (osi_core->rch_index[i].in_use == OSI_NONE) { + continue; + } + if (check_mac_addr(mac_addr, + osi_core->rch_index[i].mac_address) + == OSI_NONE) { + ret = i; + goto done; + } + } + } + + for (i = 0; i < RCHLIST_SIZE; i++) { + if (osi_core->rch_index[i].in_use == OSI_NONE) { + ret = i; + break; + } + } +done: + return ret; + +} + +/** + * @brief mgbe_write_rchlist - add/update rchlist index with new value + * + * Algorithm: This function will write Receive channel list entry registers into HW. + * This function should be called 2 times, 1 for 0-31 channel update, + * 2nd for 32-47 channel update, data filed in 2nd read should be 0 for bit + * 48-63. + * + * @param[in] osi_core: OSI core private data structure. + * @param[in] acc_mode: 1 - continuation, 0 - single acccess + * @param[in] addr: Rchlist register address. + * @param[in/out] data: Rchlist register data. + * @param[in] read_write: Rchlist read - 0, write - 1 + * + * @note MAC should be init and started. see osi_start_mac() + * + * @retval 0 on success + * @retval -1 on failure. + */ +static nve32_t mgbe_rchlist_write(struct osi_core_priv_data *osi_core, + nveu32_t acc_mode, nveu32_t addr, + nveu32_t *data, nveu32_t read_write) +{ + nve32_t ret = 0; + nveu8_t *base = osi_core->base; + nveu32_t val = 0U; + if ((acc_mode != OSI_ENABLE) && (acc_mode != OSI_DISABLE)) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "Invalid acc_mode argment\n", + acc_mode); + ret = -1; + goto done; + } + + if ((read_write != OSI_ENABLE) && (read_write != OSI_DISABLE)) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "Invalid read_write argment\n", + read_write); + ret = -1; + goto done; + } + + /* Wait for ready */ + ret = osi_readl_poll_timeout((base + MGBE_MTL_RXP_IND_CS), + (osi_core->osd_ops.udelay), + (val), + ((val & MGBE_MTL_RXP_IND_CS_BUSY) == + OSI_NONE), + (MGBE_MTL_RCHlist_READ_UDELAY), + (MGBE_MTL_RCHlist_READ_RETRY)); + if (ret < 0) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, + "Fail to read/write\n", + val); + ret = -1; + goto done; + } + + if (read_write == OSI_ENABLE) { + /* Write data into MTL_RXP_Indirect_Acc_Data */ + osi_writela(osi_core, *data, base + MGBE_MTL_RXP_IND_DATA); + } + + /* Program MTL_RXP_Indirect_Acc_Control_Status */ + val = osi_readla(osi_core, base + MGBE_MTL_RXP_IND_CS); + /* Reset ACCSEL bit */ + val &= ~MGBE_MTL_RXP_IND_CS_ACCSEL; + /* ACCSEL for Rxchlist 0x2 */ + val |= MGBE_MTL_RXP_IND_RCH_ACCSEL; + if (acc_mode == OSI_ENABLE){ + val |= (MGBE_MTL_RXP_IND_CS_CRWEN | MGBE_MTL_RXP_IND_CS_CRWSEL); + } else { + val &= ~(MGBE_MTL_RXP_IND_CS_CRWEN | MGBE_MTL_RXP_IND_CS_CRWSEL); + } + /* Set WRRDN for write */ + if (read_write == OSI_ENABLE) { + val |= MGBE_MTL_RXP_IND_CS_WRRDN; + } else { + val &= ~MGBE_MTL_RXP_IND_CS_WRRDN; + } + + /* Clear and add ADDR */ + val &= ~MGBE_MTL_RXP_IND_CS_ADDR; + val |= (addr & MGBE_MTL_RXP_IND_CS_ADDR); + /* Start write */ + val |= MGBE_MTL_RXP_IND_CS_BUSY; + osi_writela(osi_core, val, base + MGBE_MTL_RXP_IND_CS); + + /* Wait for complete */ + ret = osi_readl_poll_timeout((base + MGBE_MTL_RXP_IND_CS), + (osi_core->osd_ops.udelay), + (val), + ((val & MGBE_MTL_RXP_IND_CS_BUSY) == + OSI_NONE), + (MGBE_MTL_RCHlist_READ_UDELAY), + (MGBE_MTL_RCHlist_READ_RETRY)); + if (ret < 0) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, + "Fail to write\n", + ret); + ret = -1; + } + + if (read_write == OSI_DISABLE) { + /* Write data from MTL_RXP_Indirect_Acc_Data */ + *data = osi_readla(osi_core, base + MGBE_MTL_RXP_IND_DATA); + } + +done: + return ret; +} + +/** + * @brief mgbe_rchlist_add_del - Add or delete based on the chnnel + * + * Algorithm: This function will add or delete the receive channel list. + * + * @param[in] osi_core: OSI core private data structure. + * @param[in] filter: OSI filter structure. + * @param[in] add_del: Rchlist add - 1, del - 0 + * @param[in/out] idx: Rchlist index. + * @param[in/out] rch: rch status + * if rch0_data and rch1_data is zero than rch is zero. + * if rch0_data and rch1_data is non zero than rch is nozero. + * + * @note MAC should be init and started. see osi_start_mac() + * + * @retval 0 on success + * @retval -1 on failure. + */ +static nve32_t mgbe_rchlist_add_del(struct osi_core_priv_data *osi_core, + const struct osi_filter *filter, + nveu32_t add_del, nve32_t *idx, nveu32_t *rch) +{ + nveu32_t rch0_data = 0x0U; + nveu32_t rch1_data = 0x0U; + nve32_t rch_idx = 0; + nve32_t ret = 0; + nveu32_t dma_chan = filter->dma_chan; + + rch_idx = mgbe_get_rchlist_index(osi_core, filter->mac_addr); + if (rch_idx < 0) { + /* error case */ + ret = -1; + goto fail; + } + + if (idx != OSI_NULL) { + *idx = rch_idx; + } + + /* read currentl channel in rchlist for index */ + if (osi_core->rch_index[rch_idx].in_use != OSI_NONE) { + /* handle error */ + ret = mgbe_rchlist_write(osi_core, 0, + (rch_idx * 16) + 0, &rch0_data , 0); + if (ret != OSI_NONE) { + /* error case */ + goto fail; + } + if (osi_core->num_dma_chans > 32) { + ret = mgbe_rchlist_write(osi_core, 0, + (rch_idx * 16) + 1, &rch1_data , 0); + if (ret != OSI_NONE) { + /* error case */ + goto fail; + } + } + } + + if (add_del) { + /* add */ + if (dma_chan < 32) { + rch0_data |= (nveu32_t) (1 << dma_chan); + } else { + rch1_data |= (nveu32_t) (1 << (dma_chan - 1)); + } + } else { + /* add */ + if (dma_chan < 32) { + rch0_data &= (nveu32_t) ~(1 << dma_chan); + } else { + rch1_data &= (nveu32_t) ~(1 << (dma_chan - 1)); + } + } + + if (rch0_data == 0U && rch1_data == 0U) { + *rch = OSI_DISABLE; + } else { + *rch = OSI_ENABLE; + } + + /* coresponding to each index there will be 2 entries address 0_0 and 0_1 */ + ret = mgbe_rchlist_write(osi_core, 0, (rch_idx * 16) + 0, &rch0_data , 1); + if (ret != OSI_NONE) { + /* error case */ + goto fail; + } + + if (osi_core->num_dma_chans > 32) { + ret = mgbe_rchlist_write(osi_core, 0, (rch_idx * 16) + 1, &rch1_data, 1); + if (ret != OSI_NONE) { + /* error case */ + goto fail; + } + } + + osi_core->rch_index[rch_idx].dch = rch1_data; + osi_core->rch_index[rch_idx].dch |= ((osi_core->rch_index[rch_idx].dch << 32) | rch0_data); + if (add_del) { + /* add */ + osi_core->rch_index[rch_idx].in_use = OSI_ENABLE; + osi_memcpy(&osi_core->rch_index[rch_idx], filter->mac_addr, OSI_ETH_ALEN); + } else { + /* delete */ + if (osi_core->rch_index[rch_idx].dch == 0) { + mgbe_free_rchlist_index(osi_core, rch_idx); + } + } +fail: + return ret; +} + /** * @brief mgbe_update_mac_addr_low_high_reg- Update L2 address in filter * register @@ -295,15 +633,24 @@ static nve32_t mgbe_update_mac_addr_low_high_reg( struct osi_core_priv_data *const osi_core, const struct osi_filter *filter) { + const nveu32_t dch_dpc_reg[OSI_MAX_MAC_IP_TYPES] = { + 0xFF, /* place holder */ + MGBE_MAC_DCHSEL, + MGBE_MAC_DPCSEL + }; + nveu32_t idx = filter->index; nveu32_t dma_chan = filter->dma_chan; nveu32_t addr_mask = filter->addr_mask; nveu32_t src_dest = filter->src_dest; const nveu8_t *addr = filter->mac_addr; - nveu32_t dma_chansel = filter->dma_chansel; - nveu32_t xdcs_check; + nveu64_t dma_chansel = filter->dma_chansel; + nveu32_t dpsel_value; nveu32_t value = 0x0U; nve32_t ret = 0; + nve32_t rch_idx = 0; + nveu32_t rch = 0x0U; + nveu32_t xdcs_dds; /* Validate filter values */ if (mgbe_filter_args_validate(osi_core, filter) < 0) { @@ -317,27 +664,68 @@ static nve32_t mgbe_update_mac_addr_low_high_reg( /* read current value at index preserve XDCS current value */ ret = mgbe_mac_indir_addr_read(osi_core, - MGBE_MAC_DCHSEL, + dch_dpc_reg[osi_core->mac], idx, - &xdcs_check); + &xdcs_dds); if (ret < 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "indirect register read failed\n", 0ULL); goto fail; } - /* preserve last XDCS bits */ - xdcs_check &= MGBE_MAC_XDCS_DMA_MAX; + dpsel_value = xdcs_dds; + /* Incase of T264 + 1. DCH filed is extended to have 48 channel number as binary in DCH field so + it shhould be used by detfault for all unicast packets + 2. XDCH and XDCHT are used to tell about 2 cases but as we have number of DMA + channels as 48 , we should use XDCH as rchlistindex and XDCHT as 1 + algo: + 1. write DCH bit as binary representation for channel number by default + 2. DDS bit for that index should be 0 , xDCS/XDCT is don't care + 3. If request to add one more channel for that index + (check by seeing DCH field is not 0xffff and AE bit 0x1 or not) + a) set DDS bit to 1 for that L2 index + b) write hot bit representation of channel in rchlist for free index. + use a 48*64 bit array for book keeping + set bits for earlier dch and new dch as hot bit representation + c) set XDCS as rchindex and XDCST as 1 + d) DCH filed is don't care but we need to have non zero value + 4. If request to delete one channel (expect all delete request one after other) + a) if DDS filed is 1 for that index, if yes, it is using rxchanlist. + b) read rxchlist and update bit as channel asked for delate, if only 1 channel + get binary repesentation of that channel, update DCH + reset XDCHT and XDCH to 0 for index + DDS filed set to 0 for index + c) if DDS files is 0, do not duplication for that index + clear DCH files to 0xffff, AE bit set to 0x0 + */ - /* High address reset DCS and AE bits and XDCS in MAC_DChSel_IndReg */ + /* preserve last XDCS bits */ + xdcs_dds &= ((osi_core->mac == OSI_MAC_HW_MGBE) ? + MGBE_MAC_XDCS_DMA_MAX : UINT_MAX); + + /* High address reset DCS and AE bits and XDCS in MAC_DChSel_IndReg or + * reset DDS bit in DPCSel reg + */ if ((filter->oper_mode & OSI_OPER_ADDR_DEL) != OSI_NONE) { - xdcs_check &= ~OSI_BIT(dma_chan); - ret = mgbe_mac_indir_addr_write(osi_core, MGBE_MAC_DCHSEL, - idx, xdcs_check); - value &= ~(MGBE_MAC_ADDRH_DCS); + if (osi_core->mac != OSI_MAC_HW_MGBE_T26X && + filter->pkt_dup != OSI_NONE) { + ret = mgbe_rchlist_add_del(osi_core, filter, 0, &rch_idx, &rch); + } + if (osi_core->mac != OSI_MAC_HW_MGBE_T26X || rch == OSI_DISABLE) { + xdcs_dds &= ((osi_core->mac == OSI_MAC_HW_MGBE) ? + ~OSI_BIT(dma_chan) : ~OSI_BIT(1)); + ret = mgbe_mac_indir_addr_write(osi_core, + dch_dpc_reg[osi_core->mac], + idx, xdcs_dds); + value &= ~(MGBE_MAC_ADDRH_DCS); + } /* XDCS values is always maintained */ - if (xdcs_check == OSI_DISABLE) { + if ((osi_core->mac == OSI_MAC_HW_MGBE) && + (xdcs_dds == OSI_DISABLE)) { + value &= ~(MGBE_MAC_ADDRH_AE); + } else { value &= ~(MGBE_MAC_ADDRH_AE); } @@ -350,15 +738,14 @@ static nve32_t mgbe_update_mac_addr_low_high_reg( /* Add DMA channel to value in binary */ value = OSI_NONE; value |= ((dma_chan << MGBE_MAC_ADDRH_DCS_SHIFT) & MGBE_MAC_ADDRH_DCS); - if (idx != 0U) { /* Add Address mask */ value |= ((addr_mask << MGBE_MAC_ADDRH_MBC_SHIFT) & - MGBE_MAC_ADDRH_MBC); + MGBE_MAC_ADDRH_MBC); /* Setting Source/Destination Address match valid */ value |= ((src_dest << MGBE_MAC_ADDRH_SA_SHIFT) & - MGBE_MAC_ADDRH_SA); + MGBE_MAC_ADDRH_SA); } osi_writela(osi_core, @@ -371,10 +758,60 @@ static nve32_t mgbe_update_mac_addr_low_high_reg( ((nveu32_t)addr[2] << 16) | ((nveu32_t)addr[3] << 24)), (nveu8_t *)osi_core->base + MGBE_MAC_ADDRL((idx))); - /* Write XDCS configuration into MAC_DChSel_IndReg(x) */ - /* Append DCS DMA channel to XDCS hot bit selection */ - xdcs_check |= (OSI_BIT(dma_chan) | dma_chansel); - ret = mgbe_mac_indir_addr_write(osi_core, MGBE_MAC_DCHSEL, idx, xdcs_check); + if (osi_core->mac != OSI_MAC_HW_MGBE_T26X) { + /* Write XDCS configuration into MAC_DChSel_IndReg(x) */ + /* Append DCS DMA channel to XDCS hot bit selection */ + xdcs_dds |= (OSI_BIT(dma_chan) | dma_chansel); + ret = mgbe_mac_indir_addr_write(osi_core, MGBE_MAC_DCHSEL, + idx, xdcs_dds); + } else { + /* check for packet duplicate 0 - disable, 1 - enable */ + if (filter->pkt_dup != OSI_NONE) { + dpsel_value |= MGBE_MAC_DPCSEL_DDS; + ret = mgbe_rchlist_add_del(osi_core, filter, + 1, &rch_idx, &rch); + if (ret < 0) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "rchlist add del failed\n", 0ULL); + goto fail; + } + value = OSI_NONE; + + if (idx != 0U) { + /* Add Address mask */ + value |= ((addr_mask << MGBE_MAC_ADDRH_MBC_SHIFT) & + MGBE_MAC_ADDRH_MBC); + + /* Setting Source/Destination Address match valid */ + value |= ((src_dest << MGBE_MAC_ADDRH_SA_SHIFT) & + MGBE_MAC_ADDRH_SA); + } + + value |= ((rch_idx << MGBE_MAC_ADDRH_DCS_SHIFT) & MGBE_MAC_ADDRH_DCS); + osi_writela(osi_core, + ((nveu32_t)addr[4] | ((nveu32_t)addr[5] << 8) | + MGBE_MAC_ADDRH_AE | value), + (nveu8_t *)osi_core->base + MGBE_MAC_ADDRH((idx))); + + osi_writela(osi_core, + ((nveu32_t)addr[0] | ((nveu32_t)addr[1] << 8) | + ((nveu32_t)addr[2] << 16) | ((nveu32_t)addr[3] << 24)), + (nveu8_t *)osi_core->base + MGBE_MAC_ADDRL((idx))); + } else { + /* No duplication */ + xdcs_dds &= ~(MGBE_MAC_XDCS_DMA_MAX | MGBE_MAC_XDCST_DMA_MAX); + dpsel_value &= ~MGBE_MAC_DPCSEL_DDS; + } + /* TODO add error check */ + ret = mgbe_mac_indir_addr_write(osi_core, MGBE_MAC_DPCSEL, + idx, dpsel_value); + + if (osi_core->mac != OSI_MAC_HW_MGBE_T26X) { + ret = mgbe_mac_indir_addr_write(osi_core, + MGBE_MAC_DCHSEL, idx, + xdcs_dds); + } + } } fail: return ret; @@ -450,6 +887,11 @@ static nve32_t mgbe_l3l4_filter_write(struct osi_core_priv_data *osi_core, void *base = osi_core->base; nveu32_t addr = 0; nve32_t ret = 0; + const nveu32_t fnum[OSI_MAX_MAC_IP_TYPES] = { + MGBE_MAC_L3L4_ADDR_CTR_IDDR_FNUM, + MGBE_MAC_L3L4_ADDR_CTR_IDDR_FNUM, + MGBE_MAC_L3L4_ADDR_CTR_IDDR_FNUM_T264, + }; /* Write MAC_L3_L4_Data register value */ osi_writela(osi_core, value, @@ -460,9 +902,9 @@ static nve32_t mgbe_l3l4_filter_write(struct osi_core_priv_data *osi_core, (nveu8_t *)base + MGBE_MAC_L3L4_ADDR_CTR); /* update filter number */ - addr &= ~(MGBE_MAC_L3L4_ADDR_CTR_IDDR_FNUM); + addr &= ~(fnum[osi_core->mac]); addr |= ((filter_no << MGBE_MAC_L3L4_ADDR_CTR_IDDR_FNUM_SHIFT) & - MGBE_MAC_L3L4_ADDR_CTR_IDDR_FNUM); + (fnum[osi_core->mac])); /* update filter type */ addr &= ~(MGBE_MAC_L3L4_ADDR_CTR_IDDR_FTYPE); @@ -523,10 +965,22 @@ static nve32_t mgbe_config_l3l4_filters(struct osi_core_priv_data *const osi_cor #endif /* !OSI_STRIPPED_LIB */ nveu32_t l3_addr1_reg = 0; nveu32_t ctr_reg = 0; - nveu32_t filter_no = filter_no_r & (OSI_MGBE_MAX_L3_L4_FILTER - 1U); + const nveu32_t max_filter_no[OSI_MAX_MAC_IP_TYPES] = { + EQOS_MAX_L3_L4_FILTER - 1U, + OSI_MGBE_MAX_L3_L4_FILTER - 1U, + OSI_MGBE_MAX_L3_L4_FILTER_T264 - 1U, + }; + nveu32_t filter_no = filter_no_r; nve32_t err; nve32_t ret = -1; + if (filter_no_r > max_filter_no[osi_core->mac]) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, + "Filter number is more than allowed\n", + filter_no_r); + goto exit_func; + } + prepare_l3l4_registers(osi_core, l3_l4, #ifndef OSI_STRIPPED_LIB &l3_addr0_reg, @@ -647,11 +1101,11 @@ static nve32_t mgbe_config_vlan_filtering(struct osi_core_priv_data *osi_core, } /* Read MAC PFR value set VTFE bit */ - value = osi_readla(osi_core, base + MGBE_MAC_PFR); + value = osi_readla(osi_core, base + MAC_PKT_FILTER_REG); value &= ~(MGBE_MAC_PFR_VTFE); value |= ((filter_enb_dis << MGBE_MAC_PFR_VTFE_SHIFT) & MGBE_MAC_PFR_VTFE); - osi_writela(osi_core, value, base + MGBE_MAC_PFR); + osi_writela(osi_core, value, base + MAC_PKT_FILTER_REG); /* Read MAC VLAN TR register value set VTIM bit */ value = osi_readla(osi_core, base + MGBE_MAC_VLAN_TR); @@ -971,6 +1425,8 @@ static nve32_t mgbe_frp_write(struct osi_core_priv_data *osi_core, /* Program MTL_RXP_Indirect_Acc_Control_Status */ val = osi_readla(osi_core, base + MGBE_MTL_RXP_IND_CS); + /* Reset RCH bit */ + val &= ~MGBE_MTL_RXP_IND_RCH_ACCSEL; /* Currently acc_sel is always 0 which means FRP Indirect Access Selection * is Access FRP Instruction Table @@ -1025,8 +1481,34 @@ static nve32_t mgbe_update_frp_entry(struct osi_core_priv_data *const osi_core, { nveu32_t val = 0U, tmp = 0U; nve32_t ret = -1; + nveu32_t rch0_data = 0x0U; + nveu32_t rch1_data = 0x0U; + nve32_t rch_idx = 0; nveu32_t pos = (pos_val & 0xFFU); + if (osi_core->mac == OSI_MAC_HW_MGBE_T26X && data->dcht == OSI_ENABLE) { + if (data->accept_frame == OSI_ENABLE) { + rch0_data = (nveu32_t) (data->dma_chsel & 0xFFFFFFFF); + rch1_data = (nveu32_t) ((data->dma_chsel >> 32) & 0xFFFFFFFF); + ret = mgbe_rchlist_write(osi_core, OSI_DISABLE, (rch_idx * 16) + 0, + &rch0_data, OSI_ENABLE); + if (ret != OSI_NONE) { + /* error case */ + ret = -1; + goto done; + } + if (osi_core->num_dma_chans > 32) { + mgbe_rchlist_write(osi_core, OSI_DISABLE, (rch_idx * 16) + 1, + &rch1_data, OSI_ENABLE); + if (ret != OSI_NONE) { + /* error case */ + ret = -1; + goto done; + } + } + } + } + /** Write Match Data into IE0 **/ val = data->match_data; ret = mgbe_frp_write(osi_core, OSI_DISABLE, MGBE_MTL_FRP_IE0(pos), val); @@ -1063,12 +1545,13 @@ static nve32_t mgbe_update_frp_entry(struct osi_core_priv_data *const osi_core, /* Set NIC Bit */ val |= MGBE_MTL_FRP_IE2_NC; } + if (osi_core->mac == OSI_MAC_HW_MGBE_T26X && data->dcht == OSI_ENABLE) { + val |= MGBE_MTL_FRP_IE2_DCHT; + } tmp = data->frame_offset; val |= ((tmp << MGBE_MTL_FRP_IE2_FO_SHIFT) & MGBE_MTL_FRP_IE2_FO); tmp = data->ok_index; val |= ((tmp << MGBE_MTL_FRP_IE2_OKI_SHIFT) & MGBE_MTL_FRP_IE2_OKI); - tmp = data->dma_chsel; - val |= ((tmp << MGBE_MTL_FRP_IE2_DCH_SHIFT) & MGBE_MTL_FRP_IE2_DCH); ret = mgbe_frp_write(osi_core, OSI_DISABLE, MGBE_MTL_FRP_IE2(pos), val); if (ret < 0) { /* FRP IE2 Write fail */ @@ -1077,13 +1560,21 @@ static nve32_t mgbe_update_frp_entry(struct osi_core_priv_data *const osi_core, } /** Write DCH into IE3 **/ - val = (data->dma_chsel & MGBE_MTL_FRP_IE3_DCH_MASK); + if (osi_core->mac == OSI_MAC_HW_MGBE_T26X) { + if (data->dcht == OSI_DISABLE) { + val = (data->dma_chsel & MGBE_MTL_FRP_IE3_DCH_MASK); + } else { + val = 0; + } + } else { + val = (data->dma_chsel & MGBE_MTL_FRP_IE3_DCH_MASK); + } + ret = mgbe_frp_write(osi_core, OSI_DISABLE, MGBE_MTL_FRP_IE3(pos), val); if (ret < 0) { /* DCH Write fail */ ret = -1; } - done: return ret; } @@ -1144,9 +1635,9 @@ static nve32_t mgbe_configure_mtl_queue(struct osi_core_priv_data *osi_core, { nveu32_t qinx = hw_qinx & 0xFU; /* - * Total available Rx queue size is 192KB. + * Total available Rx queue size is 192KB in T23x, 256KB in T26x. * Below is the destribution among the Rx queueu - - * Q0 - 160KB + * Q0 - 160KB for T23x and 224KB for T26x * Q1 to Q8 - 2KB each = 8 * 2KB = 16KB * Q9 - 16KB (MVBCQ) * @@ -1154,14 +1645,35 @@ static nve32_t mgbe_configure_mtl_queue(struct osi_core_priv_data *osi_core, * * vale= (size in KB / 256) - 1U */ - const nveu32_t rx_fifo_sz[OSI_MGBE_MAX_NUM_QUEUES] = { - FIFO_SZ(160U), FIFO_SZ(2U), FIFO_SZ(2U), FIFO_SZ(2U), FIFO_SZ(2U), - FIFO_SZ(2U), FIFO_SZ(2U), FIFO_SZ(2U), FIFO_SZ(2U), FIFO_SZ(16U), + const nveu32_t rx_fifo_sz[OSI_MAX_MAC_IP_TYPES][OSI_MGBE_MAX_NUM_QUEUES] = { + { + 0U ,0U ,0U ,0U ,0U ,0U ,0U ,0U ,0U ,0U + }, + { + FIFO_SZ(160U), FIFO_SZ(2U), FIFO_SZ(2U), FIFO_SZ(2U), + FIFO_SZ(2U), FIFO_SZ(2U), FIFO_SZ(2U), FIFO_SZ(2U), + FIFO_SZ(2U), FIFO_SZ(16U) + }, + { + FIFO_SZ(224U), FIFO_SZ(2U), FIFO_SZ(2U), FIFO_SZ(2U), + FIFO_SZ(2U), FIFO_SZ(2U), FIFO_SZ(2U), FIFO_SZ(2U), + FIFO_SZ(2U), FIFO_SZ(16U) + }, }; const nveu32_t tx_fifo_sz[OSI_MGBE_MAX_NUM_QUEUES] = { TX_FIFO_SZ, TX_FIFO_SZ, TX_FIFO_SZ, TX_FIFO_SZ, TX_FIFO_SZ, TX_FIFO_SZ, TX_FIFO_SZ, TX_FIFO_SZ, TX_FIFO_SZ, TX_FIFO_SZ, }; + const nveu32_t ufpga_tx_fifo_sz[OSI_MGBE_MAX_NUM_QUEUES] = { + TX_FIFO_SZ_UFPGA, TX_FIFO_SZ_UFPGA, TX_FIFO_SZ_UFPGA, + TX_FIFO_SZ_UFPGA, TX_FIFO_SZ_UFPGA, TX_FIFO_SZ_UFPGA, + TX_FIFO_SZ_UFPGA, TX_FIFO_SZ_UFPGA, TX_FIFO_SZ_UFPGA, + TX_FIFO_SZ_UFPGA + }; + const nveu32_t ufpga_rx_fifo_sz[OSI_MGBE_MAX_NUM_QUEUES] = { + FIFO_SZ(40U), FIFO_SZ(2U), FIFO_SZ(2U), FIFO_SZ(2U), FIFO_SZ(2U), + FIFO_SZ(2U), FIFO_SZ(2U), FIFO_SZ(2U), FIFO_SZ(2U), FIFO_SZ(8U), + }; const nveu32_t rfd_rfa[OSI_MGBE_MAX_NUM_QUEUES] = { FULL_MINUS_32_K, FULL_MINUS_1_5K, @@ -1209,20 +1721,37 @@ static nve32_t mgbe_configure_mtl_queue(struct osi_core_priv_data *osi_core, goto fail; } - value = (tx_fifo_sz[qinx] << MGBE_MTL_TXQ_SIZE_SHIFT); + value = osi_readla(osi_core, (nveu8_t *)osi_core->base + + MGBE_MTL_CHX_TX_OP_MODE(qinx)); + value &= ~MGBE_MTL_Q_SIZE_MASK; + if (osi_core->pre_sil == OSI_ENABLE) { + value |= (ufpga_tx_fifo_sz[qinx] << MGBE_MTL_TXQ_SIZE_SHIFT); + } else { + value |= (tx_fifo_sz[qinx] << MGBE_MTL_TXQ_SIZE_SHIFT); + } /* Enable Store and Forward mode */ value |= MGBE_MTL_TSF; /*TTC not applicable for TX*/ /* Enable TxQ */ value |= MGBE_MTL_TXQEN; - value |= (osi_core->tc[qinx] << MGBE_MTL_CHX_TX_OP_MODE_Q2TC_SH); + + if (osi_core->mac == OSI_MAC_HW_MGBE) { + /* Q2TCMAP is reserved for T26x */ + value &= ~MGBE_MTL_TX_OP_MODE_Q2TCMAP; + value |= (osi_core->tc[qinx] << MGBE_MTL_CHX_TX_OP_MODE_Q2TC_SH); + } osi_writela(osi_core, value, (nveu8_t *) osi_core->base + MGBE_MTL_CHX_TX_OP_MODE(qinx)); /* read RX Q0 Operating Mode Register */ value = osi_readla(osi_core, (nveu8_t *)osi_core->base + MGBE_MTL_CHX_RX_OP_MODE(qinx)); - value |= (rx_fifo_sz[qinx] << MGBE_MTL_RXQ_SIZE_SHIFT); + value &= ~MGBE_MTL_Q_SIZE_MASK; + if (osi_core->pre_sil == OSI_ENABLE) { + value |= (ufpga_rx_fifo_sz[qinx] << MGBE_MTL_RXQ_SIZE_SHIFT); + } else { + value |= (rx_fifo_sz[osi_core->mac][qinx] << MGBE_MTL_RXQ_SIZE_SHIFT); + } /* Enable Store and Forward mode */ value |= MGBE_MTL_RSF; /* Enable HW flow control */ @@ -1482,6 +2011,11 @@ static nve32_t mgbe_hsi_configure(struct osi_core_priv_data *const osi_core, XPCS_WRAP_INTERRUPT_CONTROL, T26X_XPCS_WRAP_INTERRUPT_CONTROL }; + const nveu32_t intr_en[OSI_MAX_MAC_IP_TYPES] = { + 0, + MGBE_WRAP_COMMON_INTR_ENABLE, + MGBE_T26X_WRAP_COMMON_INTR_ENABLE + }; if (enable == OSI_ENABLE) { osi_core->hsi.enabled = OSI_ENABLE; @@ -1571,12 +2105,12 @@ static nve32_t mgbe_hsi_configure(struct osi_core_priv_data *const osi_core, (nveu8_t *)osi_core->base + MGBE_DMA_ECC_INTERRUPT_ENABLE); value = osi_readla(osi_core, (nveu8_t *)osi_core->base + - MGBE_WRAP_COMMON_INTR_ENABLE); + intr_en[osi_core->mac]); value |= MGBE_REGISTER_PARITY_ERR; value |= MGBE_CORE_CORRECTABLE_ERR; value |= MGBE_CORE_UNCORRECTABLE_ERR; osi_writela(osi_core, value, (nveu8_t *)osi_core->base + - MGBE_WRAP_COMMON_INTR_ENABLE); + intr_en[osi_core->mac]); value = osi_readla(osi_core, (nveu8_t *)osi_core->xpcs_base + xpcs_intr_ctrl_reg[osi_core->mac]); @@ -1638,12 +2172,12 @@ static nve32_t mgbe_hsi_configure(struct osi_core_priv_data *const osi_core, (nveu8_t *)osi_core->base + MGBE_DMA_ECC_INTERRUPT_ENABLE); value = osi_readla(osi_core, (nveu8_t *)osi_core->base + - MGBE_WRAP_COMMON_INTR_ENABLE); + intr_en[osi_core->mac]); value &= ~MGBE_REGISTER_PARITY_ERR; value &= ~MGBE_CORE_CORRECTABLE_ERR; value &= ~MGBE_CORE_UNCORRECTABLE_ERR; osi_writela(osi_core, value, (nveu8_t *)osi_core->base + - MGBE_WRAP_COMMON_INTR_ENABLE); + intr_en[osi_core->mac]); value = osi_readla(osi_core, (nveu8_t *)osi_core->xpcs_base + xpcs_intr_ctrl_reg[osi_core->mac]); @@ -1724,6 +2258,11 @@ static nve32_t mgbe_hsi_inject_err(struct osi_core_priv_data *const osi_core, static void mgbe_configure_mac(struct osi_core_priv_data *osi_core) { nveu32_t value = 0U, max_queue = 0U, i = 0U; + const nveu32_t intr_en[OSI_MAX_MAC_IP_TYPES] = { + 0, + MGBE_WRAP_COMMON_INTR_ENABLE, + MGBE_T26X_WRAP_COMMON_INTR_ENABLE + }; /* TODO: Need to check if we need to enable anything in Tx configuration * value = osi_readla(osi_core, @@ -1809,13 +2348,27 @@ static void mgbe_configure_mac(struct osi_core_priv_data *osi_core) value |= MGBE_IMR_TSIE; osi_writela(osi_core, value, (nveu8_t *)osi_core->base + MGBE_MAC_IER); + /* Mask the mmc counters interrupts */ + value = MGBE_MMC_IPC_RX_INT_MASK_VALUE; + osi_writela(osi_core, value, (nveu8_t *)osi_core->base + + MGBE_MMC_IPC_RX_INT_MASK); + /* Enable common interrupt at wrapper level */ value = osi_readla(osi_core, (nveu8_t *)osi_core->base + - MGBE_WRAP_COMMON_INTR_ENABLE); + intr_en[osi_core->mac]); value |= MGBE_MAC_SBD_INTR; osi_writela(osi_core, value, (nveu8_t *)osi_core->base + - MGBE_WRAP_COMMON_INTR_ENABLE); + intr_en[osi_core->mac]); + if (osi_core->mac == OSI_MAC_HW_MGBE_T26X) { + /* configure L3L4 filter index to be 48 in Rx desc2 */ + value = osi_readla(osi_core, + (nveu8_t *)osi_core->base + MAC_PKT_FILTER_REG); + value &= ~MGBE_MAC_PFR_DHLFRS_MASK; + value |= MGBE_MAC_PFR_DHLFRS; + osi_writela(osi_core, value, + (nveu8_t *)osi_core->base + MAC_PKT_FILTER_REG); + } /* Enable VLAN configuration */ value = osi_readla(osi_core, (nveu8_t *)osi_core->base + MGBE_MAC_VLAN_TR); @@ -1909,16 +2462,32 @@ static nve32_t mgbe_configure_pdma(struct osi_core_priv_data *osi_core) { nveu32_t value = 0; nve32_t ret = 0; - + nveu32_t pbl = 0; nveu32_t i, j, pdma_chan, vdma_chan; - //TBD: check values for T264 const nveu32_t tx_orr = (MGBE_DMA_CHX_TX_CNTRL2_ORRQ_RECOMMENDED / osi_core->num_of_pdma); - const nveu32_t tx_pbl = ((((MGBE_TXQ_SIZE / osi_core->num_of_pdma) - - osi_core->mtu) / (MGBE_AXI_DATAWIDTH / 8U)) - 5U); const nveu32_t rx_owrq = (MGBE_DMA_CHX_RX_CNTRL2_OWRQ_MCHAN / osi_core->num_of_pdma); - const nveu32_t rx_pbl = ((MGBE_RXQ_SIZE / osi_core->num_of_pdma) / 2U); + const nveu32_t tx_pbl = + ((((MGBE_TXQ_SIZE / OSI_MGBE_MAX_NUM_QUEUES) - + osi_core->mtu) / (MGBE_AXI_DATAWIDTH / 8U)) - 5U); + /* Total Rx Queue size is 256KB */ + const nveu32_t rx_pbl[OSI_MGBE_MAX_NUM_QUEUES] = { + Q_SZ_DEPTH(224U) / 2U, Q_SZ_DEPTH(2U) / 2U, Q_SZ_DEPTH(2U) / 2U, + Q_SZ_DEPTH(2U) / 2U, Q_SZ_DEPTH(2U) / 2U, Q_SZ_DEPTH(2U) / 2U, + Q_SZ_DEPTH(2U) / 2U, Q_SZ_DEPTH(2U) / 2U, Q_SZ_DEPTH(2U) / 2U, + Q_SZ_DEPTH(16U) / 2U + }; + const nveu32_t tx_pbl_ufpga = + ((((MGBE_TXQ_SIZE_UFPGA / OSI_MGBE_MAX_NUM_QUEUES) - + osi_core->mtu) / (MGBE_AXI_DATAWIDTH / 8U)) - 5U); + /* uFPGA Rx Queue size is 64KB */ + const nveu32_t rx_pbl_ufpga[OSI_MGBE_MAX_NUM_QUEUES] = { + Q_SZ_DEPTH(40U)/2U, Q_SZ_DEPTH(2U)/2U, Q_SZ_DEPTH(2U)/2U, + Q_SZ_DEPTH(2U)/2U, Q_SZ_DEPTH(2U)/2U, Q_SZ_DEPTH(2U), + Q_SZ_DEPTH(2U)/2U, Q_SZ_DEPTH(2U)/2U, Q_SZ_DEPTH(2U)/2U, + Q_SZ_DEPTH(8U)/2U + }; for (i = 0 ; i < osi_core->num_of_pdma; i++) { pdma_chan = osi_core->pdma_data[i].pdma_chan; @@ -1930,16 +2499,16 @@ static nve32_t mgbe_configure_pdma(struct osi_core_priv_data *osi_core) /* * Formula for TxPBL calculation is * (TxPBL) < ((TXQSize - MTU)/(DATAWIDTH/8)) - 5 - * if TxPBL exceeds the value of 256 then we need to make - * use of 256 as the TxPBL else we should be using the - * value whcih we get after calculation by using above formula + * if TxPBL exceeds the value of 256 then we need to make use of 256 + * as the TxPBL else we should be using the value whcih we get after + * calculation by using above formula */ - if (tx_pbl>= MGBE_PDMA_CHX_EXTCFG_MAX_PBL) { - value |= MGBE_PDMA_CHX_EXTCFG_MAX_PBL_VAL; + if (osi_core->pre_sil == OSI_ENABLE) { + pbl = osi_valid_pbl_value(tx_pbl_ufpga); + value |= (pbl << MGBE_PDMA_CHX_EXTCFG_PBL_SHIFT); } else { - value |= ((tx_pbl / 8U) << - MGBE_PDMA_CHX_TXRX_EXTCFG_PBL_SHIFT) & - MGBE_PDMA_CHX_TXRX_EXTCFG_PBL_MASK; + pbl = osi_valid_pbl_value(tx_pbl); + value |= (pbl << MGBE_PDMA_CHX_EXTCFG_PBL_SHIFT); } ret = mgbe_dma_indir_addr_write(osi_core, MGBE_PDMA_CHX_TX_EXTCFG, pdma_chan, value); @@ -1948,19 +2517,21 @@ static nve32_t mgbe_configure_pdma(struct osi_core_priv_data *osi_core) "MGBE_PDMA_CHX_TX_EXTCFG failed\n", 0ULL); goto done; } - /* Update PDMA_CH(#i)_RxExtCfg register */ value = (rx_owrq << MGBE_PDMA_CHX_TXRX_EXTCFG_ORRQ_SHIFT); value |= (pdma_chan << MGBE_PDMA_CHX_TXRX_EXTCFG_P2TCMP_SHIFT) & MGBE_PDMA_CHX_TXRX_EXTCFG_P2TCMP_MASK; value |= MGBE_PDMA_CHX_TXRX_EXTCFG_PBLX8; - if (rx_pbl>= MGBE_PDMA_CHX_EXTCFG_MAX_PBL) { - value |= MGBE_PDMA_CHX_EXTCFG_MAX_PBL_VAL; + + if (osi_core->pre_sil == OSI_ENABLE) { + pbl = osi_valid_pbl_value(rx_pbl_ufpga[i]); + value |= (pbl << MGBE_PDMA_CHX_EXTCFG_PBL_SHIFT); } else { - value |= (((rx_pbl / 8U)) << - MGBE_PDMA_CHX_TXRX_EXTCFG_PBL_SHIFT) & - MGBE_PDMA_CHX_TXRX_EXTCFG_PBL_MASK; + pbl = osi_valid_pbl_value(rx_pbl[i]); + value |= (pbl << MGBE_PDMA_CHX_EXTCFG_PBL_SHIFT); } + + value |= MGBE_PDMA_CHX_RX_EXTCFG_RXPEN; ret = mgbe_dma_indir_addr_write(osi_core, MGBE_PDMA_CHX_RX_EXTCFG, pdma_chan, value); if (ret < 0) { @@ -1973,12 +2544,16 @@ static nve32_t mgbe_configure_pdma(struct osi_core_priv_data *osi_core) * pre-fetch threshold */ for (j = 0 ; j < osi_core->pdma_data[i].num_vdma_chans; j++) { vdma_chan = osi_core->pdma_data[i].vdma_chans[j]; - //TBD: check descriptor size value is correct for T264 - value = MGBE_XDMA_CHX_TXRX_DESC_CTRL_DCSZ & - MGBE_XDMA_CHX_TXRX_DESC_CTRL_DCSZ_MASK; - value |= (MGBE_XDMA_CHX_TXRX_DESC_CTRL_DPS << - MGBE_XDMA_CHX_TXRX_DESC_CTRL_DPS_SHIFT) & - MGBE_XDMA_CHX_TXRX_DESC_CTRL_DPS_MASK; + if (osi_core->pre_sil == OSI_ENABLE) { + value = MGBE_VDMA_CHX_TXRX_DESC_CTRL_DCSZ_UFPGA & + MGBE_VDMA_CHX_TXRX_DESC_CTRL_DCSZ_MASK; + } else { + value = MGBE_VDMA_CHX_TXRX_DESC_CTRL_DCSZ & + MGBE_VDMA_CHX_TXRX_DESC_CTRL_DCSZ_MASK; + } + value |= (MGBE_VDMA_CHX_TXRX_DESC_CTRL_DPS << + MGBE_VDMA_CHX_TXRX_DESC_CTRL_DPS_SHIFT) & + MGBE_VDMA_CHX_TXRX_DESC_CTRL_DPS_MASK; ret = mgbe_dma_indir_addr_write(osi_core, MGBE_VDMA_CHX_TX_DESC_CTRL, vdma_chan, value); if (ret < 0) { @@ -1995,6 +2570,20 @@ static nve32_t mgbe_configure_pdma(struct osi_core_priv_data *osi_core) } } } + + value = osi_readla(osi_core, + (nveu8_t *)osi_core->base + MGBE_DMA_MODE); + /* set DMA_Mode register DSCB bit */ + value |= MGBE_DMA_MODE_DSCB; + osi_writela(osi_core, value, (nveu8_t *)osi_core->base + MGBE_DMA_MODE); + /* poll for Tx/Rx Dcache calculations complete and fixed */ + ret = poll_check(osi_core, (nveu8_t *)osi_core->base + MGBE_DMA_MODE, + MGBE_DMA_MODE_DSCB, &value); + if (ret == -1) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, + "MGBE_DMA_MODE_DSCB timeout\n", 0ULL); + } + done: return ret; } @@ -2073,8 +2662,13 @@ done: static nve32_t mgbe_dma_chan_to_vmirq_map(struct osi_core_priv_data *osi_core) { #ifndef OSI_STRIPPED_LIB - nveu32_t sid[4] = { MGBE0_SID, MGBE1_SID, MGBE2_SID, MGBE3_SID }; + nveu32_t sid[3][4] = { + { 0U, 0U, 0U, 0U }, + { MGBE0_SID, MGBE1_SID, MGBE2_SID, MGBE3_SID }, + { MGBE0_SID_T264, MGBE1_SID_T264, MGBE2_SID_T264, MGBE3_SID_T264 } + }; #endif + struct core_local *l_core = (struct core_local *)(void *)osi_core; struct osi_vm_irq_data *irq_data; nve32_t ret = 0; nveu32_t i, j; @@ -2087,7 +2681,7 @@ static nve32_t mgbe_dma_chan_to_vmirq_map(struct osi_core_priv_data *osi_core) for (j = 0; j < irq_data->num_vm_chans; j++) { chan = irq_data->vm_chans[j]; - if (chan >= OSI_MGBE_MAX_NUM_CHANS) { + if (chan >= l_core->num_max_chans) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Invalid channel number\n", chan); ret = -1; @@ -2112,18 +2706,14 @@ static nve32_t mgbe_dma_chan_to_vmirq_map(struct osi_core_priv_data *osi_core) ret = -1; goto exit; } + osi_writela(osi_core, MGBE_SID_VAL1(sid[osi_core->mac][osi_core->instance_id]), + (nveu8_t *)osi_core->hv_base + MGBE_WRAP_AXI_ASID0_CTRL); - osi_writela(osi_core, MGBE_SID_VAL1(sid[osi_core->instance_id]), - (nveu8_t *)osi_core->hv_base + - MGBE_WRAP_AXI_ASID0_CTRL); + osi_writela(osi_core, MGBE_SID_VAL1(sid[osi_core->mac][osi_core->instance_id]), + (nveu8_t *)osi_core->hv_base + MGBE_WRAP_AXI_ASID1_CTRL); - osi_writela(osi_core, MGBE_SID_VAL1(sid[osi_core->instance_id]), - (nveu8_t *)osi_core->hv_base + - MGBE_WRAP_AXI_ASID1_CTRL); - - osi_writela(osi_core, MGBE_SID_VAL2(sid[osi_core->instance_id]), - (nveu8_t *)osi_core->hv_base + - MGBE_WRAP_AXI_ASID2_CTRL); + osi_writela(osi_core, MGBE_SID_VAL2(sid[osi_core->mac][osi_core->instance_id]), + (nveu8_t *)osi_core->hv_base + MGBE_WRAP_AXI_ASID2_CTRL); } #endif @@ -2179,14 +2769,15 @@ static nve32_t mgbe_core_init(struct osi_core_priv_data *const osi_core) value |= MGBE_RXQ_TO_DMA_MAP_DDMACH; osi_writela(osi_core, value, (nveu8_t *)osi_core->base + MGBE_MTL_RXQ_DMA_MAP2); - - /* Enable XDCS in MAC_Extended_Configuration */ - value = osi_readla(osi_core, (nveu8_t *)osi_core->base + - MGBE_MAC_EXT_CNF); - value |= MGBE_MAC_EXT_CNF_DDS; - osi_writela(osi_core, value, (nveu8_t *)osi_core->base + - MGBE_MAC_EXT_CNF); - + /* T264 DDS bit moved */ + if (osi_core->mac != OSI_MAC_HW_MGBE_T26X) { + /* Enable DDS in MAC_Extended_Configuration */ + value = osi_readla(osi_core, (nveu8_t *)osi_core->base + + MGBE_MAC_EXT_CNF); + value |= MGBE_MAC_EXT_CNF_DDS; + osi_writela(osi_core, value, (nveu8_t *)osi_core->base + + MGBE_MAC_EXT_CNF); + } /* Configure MTL Queues */ /* TODO: Iterate over Number MTL queues need to be removed */ for (qinx = 0; qinx < osi_core->num_mtl_queues; qinx++) { @@ -2232,6 +2823,8 @@ static nve32_t mgbe_core_init(struct osi_core_priv_data *const osi_core) //TODO: removed in tot dev-main //mgbe_reset_mmc(osi_core); } + osi_memset(&osi_core->rch_index, 0, + sizeof(struct rchlist_index)*RCHLIST_SIZE); fail: return ret; } @@ -2381,6 +2974,7 @@ static void mgbe_handle_mac_intrs(struct osi_core_priv_data *osi_core) nveu32_t mac_isr = 0; nveu32_t tx_errors = 0; nveu8_t *base = (nveu8_t *)osi_core->base; + nveu32_t pktid = 0U; #ifdef HSI_SUPPORT nveu64_t tx_frame_err = 0; #endif @@ -2481,7 +3075,11 @@ static void mgbe_handle_mac_intrs(struct osi_core_priv_data *osi_core) l_core->ts[i].nsec = osi_readla(osi_core, base + MGBE_MAC_TSNSSEC); l_core->ts[i].in_use = OSI_ENABLE; - l_core->ts[i].pkt_id = osi_readla(osi_core, base + MGBE_MAC_TSPKID); + pktid = osi_readla(osi_core, base + MGBE_MAC_TSPKID); + l_core->ts[i].pkt_id = (pktid & MGBE_PKTID_MASK); + if (osi_core->mac == OSI_MAC_HW_MGBE_T26X) { + l_core->ts[i].vdma_id = (pktid & MGBE_VDMAID_MASK); + } l_core->ts[i].sec = osi_readla(osi_core, base + MGBE_MAC_TSSEC); /* Add time stamp to end of list */ l_core->ts[i].next = head->prev->next; @@ -2644,10 +3242,12 @@ static nve32_t mgbe_set_avb_algorithm(struct osi_core_priv_data *const osi_core, /* Set TXQEN mode as per input struct after masking 3 bit */ value |= ((avb->oper_mode << MGBE_MTL_TX_OP_MODE_TXQEN_SHIFT) & MGBE_MTL_TX_OP_MODE_TXQEN); - /* Set TC mapping */ - value &= ~MGBE_MTL_TX_OP_MODE_Q2TCMAP; - value |= ((tcinx << MGBE_MTL_TX_OP_MODE_Q2TCMAP_SHIFT) & - MGBE_MTL_TX_OP_MODE_Q2TCMAP); + if (osi_core->mac == OSI_MAC_HW_MGBE) { + /* Set TC mapping */ + value &= ~MGBE_MTL_TX_OP_MODE_Q2TCMAP; + value |= ((tcinx << MGBE_MTL_TX_OP_MODE_Q2TCMAP_SHIFT) & + MGBE_MTL_TX_OP_MODE_Q2TCMAP); + } osi_writela(osi_core, value, (nveu8_t *)osi_core->base + MGBE_MTL_CHX_TX_OP_MODE(qinx)); @@ -2706,13 +3306,15 @@ static nve32_t mgbe_set_avb_algorithm(struct osi_core_priv_data *const osi_core, MGBE_MTL_TCQ_ETS_HCR(tcinx)); osi_writela(osi_core, OSI_DISABLE, (nveu8_t *)osi_core->base + MGBE_MTL_TCQ_ETS_LCR(tcinx)); - - value = osi_readla(osi_core, (nveu8_t *)osi_core->base + - MGBE_MTL_CHX_TX_OP_MODE(qinx)); - value &= ~MGBE_MTL_TX_OP_MODE_Q2TCMAP; - value |= (osi_core->tc[qinx] << MGBE_MTL_CHX_TX_OP_MODE_Q2TC_SH); - osi_writela(osi_core, value, (nveu8_t *)osi_core->base + - MGBE_MTL_CHX_TX_OP_MODE(qinx)); + if (osi_core->mac == OSI_MAC_HW_MGBE) { + /* Q2TCMAP is reserved for T26x */ + value = osi_readla(osi_core, (nveu8_t *)osi_core->base + + MGBE_MTL_CHX_TX_OP_MODE(qinx)); + value &= ~MGBE_MTL_TX_OP_MODE_Q2TCMAP; + value |= (osi_core->tc[qinx] << MGBE_MTL_CHX_TX_OP_MODE_Q2TC_SH); + osi_writela(osi_core, value, (nveu8_t *)osi_core->base + + MGBE_MTL_CHX_TX_OP_MODE(qinx)); + } } done: @@ -2750,6 +3352,14 @@ static nve32_t mgbe_get_avb_algorithm(struct osi_core_priv_data *const osi_core, nveu32_t qinx = 0U; nveu32_t tcinx = 0U; + if (osi_core->mac == OSI_MAC_HW_MGBE_T26X) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_OPNOTSUPP, + "Not supported for T26x\n", + 0ULL); + ret = -1; + goto fail; + } + if ((avb->qindex >= OSI_MGBE_MAX_NUM_QUEUES) || (avb->qindex == OSI_NONE)) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, @@ -3099,9 +3709,19 @@ static void mgbe_handle_hsi_wrap_common_intr(struct osi_core_priv_data *osi_core nveu32_t val = 0; nveu32_t val2 = 0; nveu64_t ce_count_threshold; + const nveu32_t intr_en[OSI_MAX_MAC_IP_TYPES] = { + 0, + MGBE_WRAP_COMMON_INTR_ENABLE, + MGBE_T26X_WRAP_COMMON_INTR_ENABLE + }; + const nveu32_t intr_status[OSI_MAX_MAC_IP_TYPES] = { + 0, + MGBE_WRAP_COMMON_INTR_STATUS, + MGBE_T26X_WRAP_COMMON_INTR_STATUS + }; val = osi_readla(osi_core, (nveu8_t *)osi_core->base + - MGBE_WRAP_COMMON_INTR_STATUS); + intr_en[osi_core->mac]); if (((val & MGBE_REGISTER_PARITY_ERR) == MGBE_REGISTER_PARITY_ERR) || ((val & MGBE_CORE_UNCORRECTABLE_ERR) == MGBE_CORE_UNCORRECTABLE_ERR)) { osi_core->hsi.err_code[UE_IDX] = OSI_UNCORRECTABLE_ERR; @@ -3109,11 +3729,11 @@ static void mgbe_handle_hsi_wrap_common_intr(struct osi_core_priv_data *osi_core osi_core->hsi.report_count_err[UE_IDX] = OSI_ENABLE; /* Disable the interrupt */ val2 = osi_readla(osi_core, (nveu8_t *)osi_core->base + - MGBE_WRAP_COMMON_INTR_ENABLE); + intr_en[osi_core->mac]); val2 &= ~MGBE_REGISTER_PARITY_ERR; val2 &= ~MGBE_CORE_UNCORRECTABLE_ERR; osi_writela(osi_core, val2, (nveu8_t *)osi_core->base + - MGBE_WRAP_COMMON_INTR_ENABLE); + intr_en[osi_core->mac]); } if ((val & MGBE_CORE_CORRECTABLE_ERR) == MGBE_CORE_CORRECTABLE_ERR) { osi_core->hsi.err_code[CE_IDX] = OSI_CORRECTABLE_ERR; @@ -3128,7 +3748,7 @@ static void mgbe_handle_hsi_wrap_common_intr(struct osi_core_priv_data *osi_core } val &= ~MGBE_MAC_SBD_INTR; osi_writela(osi_core, val, (nveu8_t *)osi_core->base + - MGBE_WRAP_COMMON_INTR_STATUS); + intr_status[osi_core->mac]); if (((val & MGBE_CORE_CORRECTABLE_ERR) == MGBE_CORE_CORRECTABLE_ERR) || ((val & MGBE_CORE_UNCORRECTABLE_ERR) == MGBE_CORE_UNCORRECTABLE_ERR)) { @@ -3239,6 +3859,17 @@ static void mgbe_handle_hsi_intr(struct osi_core_priv_data *osi_core) */ static void mgbe_handle_common_intr(struct osi_core_priv_data *const osi_core) { + struct core_local *l_core = (struct core_local *)(void *)osi_core; + const nveu32_t intr_en[OSI_MAX_MAC_IP_TYPES] = { + 0, + MGBE_WRAP_COMMON_INTR_ENABLE, + MGBE_T26X_WRAP_COMMON_INTR_ENABLE + }; + const nveu32_t intr_status[OSI_MAX_MAC_IP_TYPES] = { + 0, + MGBE_WRAP_COMMON_INTR_STATUS, + MGBE_T26X_WRAP_COMMON_INTR_STATUS + }; void *base = osi_core->base; nveu32_t dma_isr_ch0_15 = 0; nveu32_t dma_isr_ch16_47 = 0; @@ -3271,7 +3902,7 @@ static void mgbe_handle_common_intr(struct osi_core_priv_data *const osi_core) for (i = 0; i < osi_core->num_dma_chans; i++) { chan = osi_core->dma_chans[i]; - if (chan >= OSI_MGBE_MAX_NUM_CHANS) { + if (chan >= l_core->num_max_chans) { continue; } @@ -3317,12 +3948,12 @@ static void mgbe_handle_common_intr(struct osi_core_priv_data *const osi_core) /* Clear common interrupt status in wrapper register */ osi_writela(osi_core, MGBE_MAC_SBD_INTR, - (nveu8_t *)base + MGBE_WRAP_COMMON_INTR_STATUS); + (nveu8_t *)base + intr_status[osi_core->mac]); val = osi_readla(osi_core, (nveu8_t *)osi_core->base + - MGBE_WRAP_COMMON_INTR_ENABLE); + intr_en[osi_core->mac]); val |= MGBE_MAC_SBD_INTR; osi_writela(osi_core, val, (nveu8_t *)osi_core->base + - MGBE_WRAP_COMMON_INTR_ENABLE); + intr_en[osi_core->mac]); /* Clear FRP Interrupts in MTL_RXP_Interrupt_Control_Status */ val = osi_readla(osi_core, (nveu8_t *)base + MGBE_MTL_RXP_INTR_CS); @@ -3622,7 +4253,13 @@ static void mgbe_configure_eee(struct osi_core_priv_data *const osi_core, nveu32_t tic_counter = 0; void *addr = osi_core->base; - if (xpcs_eee(osi_core, tx_lpi_enabled) != 0) { + if (osi_core->uphy_gbe_mode == OSI_UPHY_GBE_MODE_25G) { + if (xlgpcs_eee(osi_core, tx_lpi_enabled) != 0) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "xlgpcs_eee call failed\n", 0ULL); + return; + } + } else if (xpcs_eee(osi_core, tx_lpi_enabled) != 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "xpcs_eee call failed\n", 0ULL); return; @@ -3698,7 +4335,18 @@ static void mgbe_configure_eee(struct osi_core_priv_data *const osi_core, static void mgbe_get_hw_features(struct osi_core_priv_data *const osi_core, struct osi_hw_features *hw_feat) { + const nveu32_t addmac_addrsel_shift[OSI_MAX_MAC_IP_TYPES] = { + 0, + MGBE_MAC_HFR0_ADDMACADRSEL_SHIFT, + MGBE_T26X_MAC_HFR0_ADDMACADRSEL_SHIFT + }; + const nveu32_t addmac_addrsel_mask[OSI_MAX_MAC_IP_TYPES] = { + 0, + MGBE_MAC_HFR0_ADDMACADRSEL_MASK, + MGBE_T26X_MAC_HFR0_ADDMACADRSEL_MASK + }; nveu8_t *base = (nveu8_t *)osi_core->base; + nveu32_t mac = osi_core->mac; nveu32_t mac_hfr0 = 0; nveu32_t mac_hfr1 = 0; nveu32_t mac_hfr2 = 0; @@ -3706,7 +4354,7 @@ static void mgbe_get_hw_features(struct osi_core_priv_data *const osi_core, #ifndef OSI_STRIPPED_LIB nveu32_t val = 0; #endif /* !OSI_STRIPPED_LIB */ - nveu32_t ret = 0; + nve32_t ret = 0; if (osi_core->pre_sil == OSI_ENABLE) { /* TBD: T264 reset to get mac version for MGBE */ @@ -3756,9 +4404,8 @@ static void mgbe_get_hw_features(struct osi_core_priv_data *const osi_core, MGBE_MAC_HFR0_TXCOESEL_MASK); hw_feat->rx_coe_sel = ((mac_hfr0 >> MGBE_MAC_HFR0_RXCOESEL_SHIFT) & MGBE_MAC_HFR0_RXCOESEL_MASK); - hw_feat->mac_addr_sel = - ((mac_hfr0 >> MGBE_MAC_HFR0_ADDMACADRSEL_SHIFT) & - MGBE_MAC_HFR0_ADDMACADRSEL_MASK); + hw_feat->mac_addr_sel = ((mac_hfr0 >> addmac_addrsel_shift[mac]) & + addmac_addrsel_mask[mac]); hw_feat->act_phy_sel = ((mac_hfr0 >> MGBE_MAC_HFR0_PHYSEL_SHIFT) & MGBE_MAC_HFR0_PHYSEL_MASK); hw_feat->tsstssel = ((mac_hfr0 >> MGBE_MAC_HFR0_TSSTSSEL_SHIFT) & @@ -4148,11 +4795,15 @@ static void mgbe_config_for_macsec(struct osi_core_priv_data *const osi_core, /* Configure IPG {EIPG,IPG} value according to macsec IAS in * MAC_Tx_Configuration and MAC_Extended_Configuration * IPG (12 B[default] + 32 B[sectag]) = 352 bits + * IPG (12 B[default] + 32 B[sectag] + 15B[if encryption is supported]) = 472 bits */ value = osi_readla(osi_core, (nveu8_t *)osi_core->base + MGBE_MAC_TMCR); value &= ~MGBE_MAC_TMCR_IPG_MASK; value |= MGBE_MAC_TMCR_IFP; + if (osi_core->mac == OSI_MAC_HW_MGBE_T26X) { + value |= MGBE_MAC_TMCR_IPG; + } osi_writela(osi_core, value, (nveu8_t *)osi_core->base + MGBE_MAC_TMCR); value = osi_readla(osi_core, (nveu8_t *)osi_core->base + @@ -4217,6 +4868,8 @@ void mgbe_init_core_ops(struct core_ops *ops) ops->config_frp = mgbe_config_frp; ops->update_frp_entry = mgbe_update_frp_entry; ops->update_frp_nve = mgbe_update_frp_nve; + ops->get_rchlist_index = mgbe_get_rchlist_index; + ops->free_rchlist_index = mgbe_free_rchlist_index; #if defined MACSEC_SUPPORT && !defined OSI_STRIPPED_LIB ops->read_macsec_reg = mgbe_read_macsec_reg; ops->write_macsec_reg = mgbe_write_macsec_reg; diff --git a/osi/core/mgbe_core.h b/osi/core/mgbe_core.h index 27ecb6e..2d469d0 100644 --- a/osi/core/mgbe_core.h +++ b/osi/core/mgbe_core.h @@ -25,7 +25,6 @@ #define INCLUDED_MGBE_CORE_H_ #ifndef OSI_STRIPPED_LIB -#define MGBE_MAC_PFR 0x0008 #define MGBE_MAC_RX_FLW_CTRL 0x0090 #define MGBE_MAC_RQC2R 0x00A8 #define MGBE_MAC_QX_TX_FLW_CTRL(x) ((0x0004U * (x)) + 0x0070U) @@ -45,6 +44,8 @@ #define MGBE_WRAP_AXI_ASID0_CTRL 0x8400 #define MGBE_WRAP_AXI_ASID1_CTRL 0x8404 #define MGBE_WRAP_AXI_ASID2_CTRL 0x8408 +#define MGBE_MAC_PFR_DHLFRS OSI_BIT(12) +#define MGBE_MAC_PFR_DHLFRS_MASK (OSI_BIT(12) | OSI_BIT(11)) #define MGBE_MAC_PFR_VTFE OSI_BIT(16) #define MGBE_MAC_PFR_IPFE OSI_BIT(20) #define MGBE_MAC_PFR_IPFE_SHIFT 20 @@ -58,6 +59,10 @@ #define MGBE1_SID ((nveu32_t)0x49U) #define MGBE2_SID ((nveu32_t)0x4AU) #define MGBE3_SID ((nveu32_t)0x4BU) +#define MGBE0_SID_T264 ((nveu32_t)0x0U) +#define MGBE1_SID_T264 ((nveu32_t)0x0U) +#define MGBE2_SID_T264 ((nveu32_t)0x0U) +#define MGBE3_SID_T264 ((nveu32_t)0x0U) #define MGBE_MAC_PAUSE_TIME 0xFFFF0000U #define MGBE_MAC_PAUSE_TIME_MASK 0xFFFF0000U #define MGBE_MAC_VLAN_TR_VTHM OSI_BIT(25) @@ -80,9 +85,21 @@ #define MGBE_MAC_RQC1R_PTPQ_SHIFT 24U #define MGBE_MAC_RQC1R_PTPQ (OSI_BIT(27) | OSI_BIT(26) | \ OSI_BIT(25) | OSI_BIT(24)) +#define MGBE_PKTID_MASK (OSI_BIT(9) | OSI_BIT(8) | \ + OSI_BIT(7) | OSI_BIT(6) | \ + OSI_BIT(5) | OSI_BIT(4) | \ + OSI_BIT(3) | OSI_BIT(2) | \ + OSI_BIT(1) | OSI_BIT(0)) +/* T264 VDMA ID bits */ +#define MGBE_VDMAID_MASK (OSI_BIT(23) | OSI_BIT(22) | \ + OSI_BIT(21) | OSI_BIT(20) | \ + OSI_BIT(19) | OSI_BIT(18) | \ + OSI_BIT(17) | OSI_BIT(16)) #define MGBE_MAC_RMCR_LM OSI_BIT(10) #define MGBE_MAC_RMCR_ARPEN OSI_BIT(31) #define MGBE_MAC_QX_TX_FLW_CTRL_TFE OSI_BIT(1) +#define MGBE_MAC_TMCR_IFP OSI_BIT(11) +#define MGBE_MAC_TMCR_IPG OSI_BIT(8) | OSI_BIT(9) #define MGBE_MAC_RQC1R_TPQC0 OSI_BIT(21) #define MGBE_MAC_RQC1R_OMCBCQ OSI_BIT(20) #define MGBE_MAC_RSS_CTRL_RSSE OSI_BIT(0) @@ -150,11 +167,26 @@ #endif /* !OSI_STRIPPED_LIB */ +#define MGBE_PKTID_MASK (OSI_BIT(9) | OSI_BIT(8) | \ + OSI_BIT(7) | OSI_BIT(6) | \ + OSI_BIT(5) | OSI_BIT(4) | \ + OSI_BIT(3) | OSI_BIT(2) | \ + OSI_BIT(1) | OSI_BIT(0)) +/* T264 VDMA ID bits */ +#define MGBE_VDMAID_MASK (OSI_BIT(23) | OSI_BIT(22) | \ + OSI_BIT(21) | OSI_BIT(20) | \ + OSI_BIT(19) | OSI_BIT(18) | \ + OSI_BIT(17) | OSI_BIT(16)) + +#define MGBE_MAC_PFR_DHLFRS OSI_BIT(12) +#define MGBE_MAC_PFR_DHLFRS_MASK (OSI_BIT(12) | OSI_BIT(11)) + #define MGBE_CORE_MAC_STSR 0x0D08 #define MGBE_CORE_MAC_STNSR 0x0D0C #define MGBE_CORE_MAC_STNSR_TSSS_MASK 0x7FFFFFFFU #define MGBE_MAC_TMCR_IPG_MASK 0x700U #define MGBE_MAC_TMCR_IFP OSI_BIT(11) +#define MGBE_MAC_TMCR_IPG OSI_BIT(8) | OSI_BIT(9) #define MGBE_MAC_RX_TX_STS 0x00B8 #define MGBE_MTL_EST_CONTROL 0x1050 #define MGBE_MTL_EST_OVERHEAD 0x1054 @@ -268,6 +300,10 @@ OSI_BIT(5) | OSI_BIT(4) | \ OSI_BIT(3) | OSI_BIT(2) | \ OSI_BIT(1) | OSI_BIT(0)) + +#define MGBE_MTL_RCHlist_READ_UDELAY 1U +#define MGBE_MTL_RCHlist_READ_RETRY 1000U + /* FRP Interrupt Control and Status register */ #define MGBE_MTL_RXP_INTR_CS_PDRFIE OSI_BIT(19) #define MGBE_MTL_RXP_INTR_CS_FOOVIE OSI_BIT(18) @@ -296,6 +332,7 @@ OSI_BIT(11) | OSI_BIT(10) | \ OSI_BIT(9) | OSI_BIT(8)) #define MGBE_MTL_FRP_IE2_FO_SHIFT 8U +#define MGBE_MTL_FRP_IE2_DCHT OSI_BIT(7) #define MGBE_MTL_FRP_IE2_NC OSI_BIT(3) #define MGBE_MTL_FRP_IE2_IM OSI_BIT(2) #define MGBE_MTL_FRP_IE2_RF OSI_BIT(1) @@ -303,7 +340,10 @@ #define MGBE_MTL_FRP_IE3_DCH_MASK 0xFFFFU /* Indirect register defines */ #define MGBE_MTL_RXP_IND_CS_BUSY OSI_BIT(31) +#define MGBE_MTL_RXP_IND_RCH_ACCSEL OSI_BIT(25) #define MGBE_MTL_RXP_IND_CS_ACCSEL OSI_BIT(24) +#define MGBE_MTL_RXP_IND_CS_CRWEN OSI_BIT(18) +#define MGBE_MTL_RXP_IND_CS_CRWSEL OSI_BIT(17) #define MGBE_MTL_RXP_IND_CS_WRRDN OSI_BIT(16) #define MGBE_MTL_RXP_IND_CS_ADDR (OSI_BIT(9) | OSI_BIT(8) | \ OSI_BIT(7) | OSI_BIT(6) | \ @@ -346,6 +386,7 @@ #define MGBE_MMC_TX_INTR_EN 0x0810 #define MGBE_MMC_RX_INTR_EN 0x080C #define MGBE_MMC_CNTRL 0x0800 +#define MGBE_MMC_IPC_RX_INT_MASK 0x0A5C #define MGBE_MAC_L3L4_ADDR_CTR 0x0C00 #define MGBE_MAC_L3L4_DATA 0x0C04 #define MGBE_MAC_TCR 0x0D00 @@ -366,7 +407,8 @@ * @brief MGBE Wrapper register offsets * @{ */ -#define MGBE_WRAP_COMMON_INTR_ENABLE 0x8704 +#define MGBE_WRAP_COMMON_INTR_ENABLE 0x8704 +#define MGBE_T26X_WRAP_COMMON_INTR_ENABLE 0x880C #ifdef HSI_SUPPORT #define MGBE_REGISTER_PARITY_ERR OSI_BIT(5) @@ -383,6 +425,7 @@ #endif #define MGBE_MAC_SBD_INTR OSI_BIT(2) #define MGBE_WRAP_COMMON_INTR_STATUS 0x8708 +#define MGBE_T26X_WRAP_COMMON_INTR_STATUS 0x8810 #define MGBE_VIRT_INTR_APB_CHX_CNTRL(x) (0x8200U + ((x) * 4U)) #define MGBE_VIRTUAL_APB_ERR_CTRL 0x8300 /** @} */ @@ -396,15 +439,22 @@ * @{ */ #define MGBE_MAC_XDCS_DMA_MAX 0x3FFU +#define MGBE_MAC_XDCS_DMA_MAX_T26X 0xFFFFFFFFFFFFU +#define MGBE_MAC_XDCST_DMA_MAX OSI_BIT(16) #define MGBE_MAC_INDIR_AC_OB_WAIT 10U #define MGBE_MAC_INDIR_AC_OB_RETRY 10U +#define MGBE_MAC_INDIR_AC_MSEL_T26X (OSI_BIT(26) | OSI_BIT(27) | \ + OSI_BIT(28) | OSI_BIT(29)) #define MGBE_MAC_DCHSEL 0U +#define MGBE_MAC_DPCSEL 0x3U +#define MGBE_MAC_DPCSEL_DDS OSI_BIT(1) /* MGBE_MAC_INDIR_AC register defines */ #define MGBE_MAC_INDIR_AC_MSEL (OSI_BIT(19) | OSI_BIT(18) | \ OSI_BIT(17) | OSI_BIT(16)) #define MGBE_MAC_INDIR_AC_MSEL_SHIFT 16U +#define MGBE_MAC_INDIR_AC_MSEL_SHIFT_T264 26U #define MGBE_MAC_INDIR_AC_AOFF (OSI_BIT(15) | OSI_BIT(14) | \ OSI_BIT(13) | OSI_BIT(12) | \ OSI_BIT(11) | OSI_BIT(10) | \ @@ -514,6 +564,7 @@ #define MGBE_MMC_CNTRL_RSTONRD OSI_BIT(2) #define MGBE_MMC_CNTRL_CNTMCT (OSI_BIT(4) | OSI_BIT(5)) #define MGBE_MMC_CNTRL_CNTPRST OSI_BIT(7) +#define MGBE_MMC_IPC_RX_INT_MASK_VALUE 0x3FFF3FFFU #define MGBE_MAC_RQC1R_MCBCQEN OSI_BIT(15) #define MGBE_MAC_RQC1R_MCBCQ (OSI_BIT(11) | OSI_BIT(10) | \ OSI_BIT(9) | OSI_BIT(8)) @@ -539,6 +590,9 @@ OSI_BIT(19) | OSI_BIT(18) | \ OSI_BIT(17) | OSI_BIT(16)) #define MGBE_MAC_ADDRH_DCS_SHIFT 16 +#define MGBE_MAC_L3L4_ADDR_CTR_IDDR_FNUM_T264 (OSI_BIT(12) | OSI_BIT(13) | \ + OSI_BIT(14) | OSI_BIT(15) | \ + OSI_BIT(16) | OSI_BIT(17)) #define MGBE_MAC_L3L4_ADDR_CTR_IDDR_FNUM (OSI_BIT(12) | OSI_BIT(13) | \ OSI_BIT(14) | OSI_BIT(15)) #define MGBE_MAC_L3L4_ADDR_CTR_IDDR_FNUM_SHIFT 12 @@ -572,12 +626,12 @@ #define MGBE_MAC_RMCR_RE OSI_BIT(0) #define MGBE_MTL_TXQ_SIZE_SHIFT 16U #define MGBE_MTL_RXQ_SIZE_SHIFT 16U +#define MGBE_MTL_Q_SIZE_MASK (OSI_BIT(21) | OSI_BIT(20) | OSI_BIT(19) | \ + OSI_BIT(18) | OSI_BIT(17) | OSI_BIT(16)) #define MGBE_RXQ_TO_DMA_CHAN_MAP0 0x03020100U #define MGBE_RXQ_TO_DMA_CHAN_MAP1 0x07060504U #define MGBE_RXQ_TO_DMA_CHAN_MAP2 0x0B0A0908U #define MGBE_RXQ_TO_DMA_MAP_DDMACH 0x80808080U -#define MGBE_MTL_TXQ_SIZE_SHIFT 16U -#define MGBE_MTL_RXQ_SIZE_SHIFT 16U #define MGBE_MAC_RMCR_GPSL_MSK 0x3FFF0000U #define MGBE_MAC_TCR_TSUPDT OSI_BIT(3) #define MGBE_MAC_STNSUR_ADDSUB_SHIFT 31U @@ -586,6 +640,7 @@ #define MGBE_MTL_RXQ_OP_MODE_RFA_MASK 0x0000007EU #define MGBE_MTL_RXQ_OP_MODE_RFD_SHIFT 17U #define MGBE_MTL_RXQ_OP_MODE_RFD_MASK 0x007E0000U +#define MGBE_DMA_MODE_DSCB OSI_BIT(16) #if defined(MACSEC_SUPPORT) /** * MACSEC Recommended value @@ -623,7 +678,8 @@ #define MGBE_MAC_TSS_TXTSC OSI_BIT(15) /* MGBE DMA IND CTRL register field masks */ #define MGBE_DMA_INDIR_CTRL_MSEL_MASK (OSI_BIT(24) | OSI_BIT(25) | \ - OSI_BIT(26) | OSI_BIT(27)) + OSI_BIT(26) | OSI_BIT(27) | \ + OSI_BIT(28)) #define MGBE_DMA_INDIR_CTRL_MSEL_SHIFT 24 #define MGBE_DMA_INDIR_CTRL_AOFF_MASK (OSI_BIT(8) | OSI_BIT(9) | \ OSI_BIT(10) | OSI_BIT(11) | \ @@ -640,23 +696,21 @@ #define MGBE_PDMA_CHX_TXRX_EXTCFG_P2TCMP_MASK (OSI_BIT(16) | \ OSI_BIT(17) | OSI_BIT(18)) #define MGBE_PDMA_CHX_TXRX_EXTCFG_PBLX8 OSI_BIT(19) -#define MGBE_PDMA_CHX_TXRX_EXTCFG_PBL_MASK (OSI_BIT(24) | OSI_BIT(25) | \ - OSI_BIT(26) | OSI_BIT(27) | \ - OSI_BIT(28) | OSI_BIT(29)) -#define MGBE_PDMA_CHX_TXRX_EXTCFG_PBL_SHIFT 24 -#define MGBE_PDMA_CHX_EXTCFG_MAX_PBL 256U -#define MGBE_PDMA_CHX_EXTCFG_MAX_PBL_VAL 0x20000000U +#define MGBE_PDMA_CHX_EXTCFG_PBL_SHIFT 24U + +#define MGBE_PDMA_CHX_RX_EXTCFG_RXPEN OSI_BIT(31) /* MGBE PDMA_CH(#i)_Tx/RxDescCtrl register field masks */ #define MGBE_VDMA_CHX_TX_DESC_CTRL 4U #define MGBE_VDMA_CHX_RX_DESC_CTRL 5U -#define MGBE_XDMA_CHX_TXRX_DESC_CTRL_DCSZ 3U -#define MGBE_XDMA_CHX_TXRX_DESC_CTRL_DCSZ_MASK (OSI_BIT(0) | OSI_BIT(1) | \ +#define MGBE_VDMA_CHX_TXRX_DESC_CTRL_DCSZ 5U +#define MGBE_VDMA_CHX_TXRX_DESC_CTRL_DCSZ_UFPGA 3U +#define MGBE_VDMA_CHX_TXRX_DESC_CTRL_DCSZ_MASK (OSI_BIT(0) | OSI_BIT(1) | \ OSI_BIT(2)) -#define MGBE_XDMA_CHX_TXRX_DESC_CTRL_DPS 4U -#define MGBE_XDMA_CHX_TXRX_DESC_CTRL_DPS_MASK (OSI_BIT(3) | OSI_BIT(4) | \ +#define MGBE_VDMA_CHX_TXRX_DESC_CTRL_DPS 3U +#define MGBE_VDMA_CHX_TXRX_DESC_CTRL_DPS_MASK (OSI_BIT(3) | OSI_BIT(4) | \ OSI_BIT(5)) -#define MGBE_XDMA_CHX_TXRX_DESC_CTRL_DPS_SHIFT 3 +#define MGBE_VDMA_CHX_TXRX_DESC_CTRL_DPS_SHIFT 3 /** @} */ #ifndef OSI_STRIPPED_LIB @@ -785,7 +839,9 @@ #endif /* !OSI_STRIPPED_LIB */ /* TXQ Size 128KB is divided equally across 10 MTL Queues*/ -#define TX_FIFO_SZ (((((128U * 1024U)/OSI_MGBE_MAX_NUM_QUEUES)) / 256U) - 1U) +#define TX_FIFO_SZ (((((MGBE_TXQ_SIZE)/OSI_MGBE_MAX_NUM_QUEUES)) / 256U) - 1U) +#define TX_FIFO_SZ_UFPGA (((((MGBE_TXQ_SIZE_UFPGA)/OSI_MGBE_MAX_NUM_QUEUES)) / 256U) - 1U) + /** * @addtogroup MGBE-MAC-HWFR MGBE MAC HW feature registers @@ -854,7 +910,9 @@ #define MGBE_MAC_HFR0_RXCOESEL_SHIFT 16U #define MGBE_MAC_HFR0_ADDMACADRSEL_MASK 0x1FU +#define MGBE_T26X_MAC_HFR0_ADDMACADRSEL_MASK 0x3FU #define MGBE_MAC_HFR0_ADDMACADRSEL_SHIFT 18U +#define MGBE_T26X_MAC_HFR0_ADDMACADRSEL_SHIFT 17U #define MGBE_MAC_HFR0_PHYSEL_MASK 0x3U #define MGBE_MAC_HFR0_PHYSEL_SHIFT 23U diff --git a/osi/core/osi_hal.c b/osi/core/osi_hal.c index 2ba8a12..d9a0d15 100644 --- a/osi/core/osi_hal.c +++ b/osi/core/osi_hal.c @@ -522,7 +522,7 @@ static nve32_t osi_get_mac_version(struct osi_core_priv_data *const osi_core, nv *mac_ver = osi_readla(osi_core, ((nveu8_t *)osi_core->base + (nve32_t)MAC_VERSION)) & MAC_VERSION_SNVER_MASK; - if (validate_mac_ver_update_chans(*mac_ver, &l_core->num_max_chans, + if (validate_mac_ver_update_chans(osi_core->mac, *mac_ver, &l_core->num_max_chans, &l_core->l_mac_ver) == 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Invalid MAC version\n", (nveu64_t)*mac_ver) @@ -885,15 +885,11 @@ static nve32_t l3l4_find_match(const struct core_local *const l_core, static nve32_t configure_l3l4_filter_valid_params(const struct osi_core_priv_data *const osi_core, const struct osi_l3_l4_filter *const l3_l4) { - const nveu32_t max_dma_chan[OSI_MAX_MAC_IP_TYPES] = { - OSI_EQOS_MAX_NUM_CHANS, - OSI_MGBE_MAX_NUM_CHANS, - OSI_MGBE_MAX_NUM_CHANS - }; + struct core_local *l_core = (struct core_local *)(void *)osi_core; nve32_t ret = -1; /* validate dma channel */ - if (l3_l4->dma_chan > max_dma_chan[osi_core->mac]) { + if (l3_l4->dma_chan > l_core->num_max_chans) { OSI_CORE_ERR((osi_core->osd), (OSI_LOG_ARG_OUTOFBOUND), ("L3L4: Wrong DMA channel: "), (l3_l4->dma_chan)); goto exit_func; @@ -973,6 +969,7 @@ static nve32_t configure_l3l4_filter_helper(struct osi_core_priv_data *const osi { struct osi_l3_l4_filter *cfg_l3_l4; struct core_local *const l_core = (struct core_local *)(void *)osi_core; + const nveu32_t filter_mask[OSI_MAX_MAC_IP_TYPES] = { 0x1F, 0x1F, 0x3F }; nve32_t ret; ret = l_core->ops_p->config_l3l4_filters(osi_core, filter_no, l3_l4); @@ -994,7 +991,8 @@ static nve32_t configure_l3l4_filter_helper(struct osi_core_priv_data *const osi #if !defined(L3L4_WILDCARD_FILTER) /* update filter mask bit */ - osi_core->l3l4_filter_bitmask |= ((nveu32_t)1U << (filter_no & 0x1FU)); + osi_core->l3l4_filter_bitmask |= ((nveu64_t)1U << + (filter_no & filter_mask[osi_core->mac])); #endif /* !L3L4_WILDCARD_FILTER */ } else { /* Clear the filter data. @@ -1007,7 +1005,8 @@ static nve32_t configure_l3l4_filter_helper(struct osi_core_priv_data *const osi #if !defined(L3L4_WILDCARD_FILTER) /* update filter mask bit */ - osi_core->l3l4_filter_bitmask &= ~((nveu32_t)1U << (filter_no & 0x1FU)); + osi_core->l3l4_filter_bitmask &= ~((nveu64_t)1U << + (filter_no & filter_mask[osi_core->mac])); #endif /* !L3L4_WILDCARD_FILTER */ } @@ -1120,7 +1119,7 @@ static nve32_t configure_l3l4_filter(struct osi_core_priv_data *const osi_core, const nveu32_t max_filter_no[OSI_MAX_MAC_IP_TYPES] = { EQOS_MAX_L3_L4_FILTER - 1U, OSI_MGBE_MAX_L3_L4_FILTER - 1U, - OSI_MGBE_MAX_L3_L4_FILTER - 1U, + OSI_MGBE_MAX_L3_L4_FILTER_T264 - 1U, }; nve32_t ret = -1; @@ -1748,11 +1747,20 @@ static inline void free_tx_ts(struct osi_core_priv_data *osi_core, nveu32_t count = 0U; while ((temp != head) && (count < MAX_TX_TS_CNT)) { - if (((temp->pkt_id >> CHAN_START_POSITION) & chan) == chan) { - temp->next->prev = temp->prev; - temp->prev->next = temp->next; - /* reset in_use for temp node from the link */ - temp->in_use = OSI_DISABLE; + if (osi_core->mac != OSI_MAC_HW_MGBE_T26X) { + if (((temp->pkt_id >> CHAN_START_POSITION) & chan) == chan) { + temp->next->prev = temp->prev; + temp->prev->next = temp->next; + /* reset in_use for temp node from the link */ + temp->in_use = OSI_DISABLE; + } + } else { + if (temp->vdma_id == chan) { + temp->next->prev = temp->prev; + temp->prev->next = temp->next; + /* reset in_use for temp node from the link */ + temp->in_use = OSI_DISABLE; + } } count++; temp = temp->next; @@ -1842,6 +1850,7 @@ static inline nve32_t get_tx_ts(struct osi_core_priv_data *osi_core, temp = temp->next; continue; } else if ((temp->pkt_id == ts->pkt_id) && + (temp->vdma_id == ts->vdma_id) && (temp->in_use != OSI_NONE)) { ts->sec = temp->sec; ts->nsec = temp->nsec; @@ -2222,8 +2231,13 @@ fail: static void cfg_l3_l4_filter(struct core_local *l_core) { nveu32_t i = 0U; + const nveu32_t max_filter_no[OSI_MAX_MAC_IP_TYPES] = { + EQOS_MAX_L3_L4_FILTER, + OSI_MGBE_MAX_L3_L4_FILTER, + OSI_MGBE_MAX_L3_L4_FILTER_T264, + }; - for (i = 0U; i < OSI_MGBE_MAX_L3_L4_FILTER; i++) { + for (i = 0U; i < max_filter_no[l_core->osi_core.mac]; i++) { if (l_core->cfg.l3_l4[i].filter_enb_dis == OSI_L3L4_DISABLE) { /* filter not enabled */ continue; diff --git a/osi/core/xpcs.c b/osi/core/xpcs.c index 33f0845..4a0042d 100644 --- a/osi/core/xpcs.c +++ b/osi/core/xpcs.c @@ -263,6 +263,91 @@ fail: return ret; } +/** + * @brief xlgpcs_start - Start XLGPCS + * + * Algorithm: This routine enables AN and set speed based on AN status + * + * @param[in] osi_core: OSI core data structure. + * + * @retval 0 on success + * @retval -1 on failure. + */ +nve32_t xlgpcs_start(struct osi_core_priv_data *osi_core) +{ + void *xpcs_base = osi_core->xpcs_base; + nveu32_t retry = RETRY_COUNT; + nveu32_t count = 0; + nveu32_t ctrl = 0; + nve32_t ret = 0; + nve32_t cond = COND_NOT_MET; + + if (xpcs_base == OSI_NULL) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, + "XLGPCS base is NULL", 0ULL); + ret = -1; + goto fail; + } + /* * XLGPCS programming guideline IAS section 7.1.3.2.2.2 + */ + /* 4 Poll SR_PCS_CTRL1 reg RST bit */ + ctrl = xpcs_read(xpcs_base, XLGPCS_SR_PCS_CTRL1); + ctrl |= XLGPCS_SR_PCS_CTRL1_RST; + xpcs_write(xpcs_base, XLGPCS_SR_PCS_CTRL1, ctrl); + + count = 0; + while (cond == 1) { + if (count > retry) { + ret = -1; + goto fail; + } + count++; + ctrl = xpcs_read(xpcs_base, XLGPCS_SR_PCS_CTRL1); + if ((ctrl & XLGPCS_SR_PCS_CTRL1_RST) == 0U) { + cond = 0; + } else { + /* Maximum wait delay as per HW team is 10msec. + * So add a loop for 1000 iterations with 1usec delay, + * so that if check get satisfies before 1msec will come + * out of loop and it can save some boot time + */ + osi_core->osd_ops.udelay(10U); + } + } + /* 5 Program SR_AN_CTRL reg AN_EN bit to disable auto-neg */ + ctrl = xpcs_read(xpcs_base, XLGPCS_SR_AN_CTRL); + ctrl &= ~XLGPCS_SR_AN_CTRL_AN_EN; + ret = xpcs_write_safety(osi_core, XLGPCS_SR_AN_CTRL, ctrl); + if (ret != 0) { + goto fail; + } + + /* 6 Wait for SR_PCS_STS1 reg RLU bit to set */ + cond = COND_NOT_MET; + count = 0; + while (cond == COND_NOT_MET) { + if (count > retry) { + ret = -1; + break; + } + count++; + ctrl = xpcs_read(xpcs_base, XLGPCS_SR_PCS_STS1); + if ((ctrl & XLGPCS_SR_PCS_STS1_RLU) == + XLGPCS_SR_PCS_STS1_RLU) { + cond = COND_MET; + } else { + /* Maximum wait delay as per HW team is 10msec. + * So add a loop for 1000 iterations with 1usec delay, + * so that if check get satisfies before 1msec will come + * out of loop and it can save some boot time + */ + osi_core->osd_ops.udelay(10U); + } + } +fail: + return ret; +} + /** * @brief xpcs_uphy_lane_bring_up - Bring up UPHY Tx/Rx lanes * @@ -697,7 +782,7 @@ nve32_t xpcs_init(struct osi_core_priv_data *osi_core) ctrl = xpcs_read(xpcs_base, XPCS_VR_XS_PCS_KR_CTRL); ctrl &= ~(XPCS_VR_XS_PCS_KR_CTRL_USXG_MODE_MASK); - if (osi_core->uphy_gbe_mode == OSI_DISABLE) { + if (osi_core->uphy_gbe_mode == OSI_GBE_MODE_5G) { ctrl |= XPCS_VR_XS_PCS_KR_CTRL_USXG_MODE_5G; } } @@ -717,6 +802,129 @@ fail: return ret; } + +/** + * @brief xlgpcs_init - XLGPCS initialization + * + * Algorithm: This routine initialize XLGPCS in USXMII mode. + * + * @param[in] osi_core: OSI core data structure. + * + * @retval 0 on success + * @retval -1 on failure. + */ +nve32_t xlgpcs_init(struct osi_core_priv_data *osi_core) +{ + void *xpcs_base = osi_core->xpcs_base; + nveu32_t retry = 1000; + nveu32_t count; + nveu32_t ctrl = 0; + nve32_t cond = COND_NOT_MET; + nve32_t ret = 0; + nveu32_t value = 0; + + if (osi_core->xpcs_base == OSI_NULL) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, + "XLGPCS base is NULL", 0ULL); + ret = -1; + goto fail; + } + + if (osi_core->pre_sil == 0x1U) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, + "Pre-silicon, skipping lane bring up", 0ULL); + } else { + /* Select XLGPCS in wrapper register */ + if ((osi_core->mac == OSI_MAC_HW_MGBE_T26X) && + (osi_core->uphy_gbe_mode == OSI_UPHY_GBE_MODE_25G)) { + value = osi_readla(osi_core, (nveu8_t *)osi_core->xpcs_base + + T26X_XPCS_WRAP_CONFIG_0); + value |= OSI_BIT(0); + osi_writela(osi_core, value, (nveu8_t *)osi_core->xpcs_base + + T26X_XPCS_WRAP_CONFIG_0); + } + + if (xpcs_lane_bring_up(osi_core) < 0) { + ret = -1; + goto fail; + } + } + /* Switching to USXGMII Mode to 25G based on + * XLGPCS programming guideline IAS section 7.1.3.2.2.1 + */ + /* 1.Program SR_PCS_CTRL1 reg SS_5_2 bits */ + ctrl = xpcs_read(xpcs_base, XLGPCS_SR_PCS_CTRL1); + ctrl &= ~XLGPCS_SR_PCS_CTRL1_SS5_2_MASK; + ctrl |= XLGPCS_SR_PCS_CTRL1_SS5_2; + ret = xpcs_write_safety(osi_core, XLGPCS_SR_PCS_CTRL1, ctrl); + if (ret != 0) { + goto fail; + } + /* 2.Program SR_PCS_CTRL2 reg PCS_TYPE_SEL bits */ + ctrl = xpcs_read(xpcs_base, XLGPCS_SR_PCS_CTRL2); + ctrl &= ~XLGPCS_SR_PCS_CTRL2_PCS_TYPE_SEL_MASK; + ctrl |= XLGPCS_SR_PCS_CTRL2_PCS_TYPE_SEL; + ret = xpcs_write_safety(osi_core, XLGPCS_SR_PCS_CTRL2, ctrl); + if (ret != 0) { + goto fail; + } + /* 3.Program SR_PMA_CTRL2 reg PMA_TYPE bits */ + ctrl = xpcs_read(xpcs_base, XLGPCS_SR_PMA_CTRL2); + ctrl &= ~XLGPCS_SR_PMA_CTRL2_PMA_TYPE_MASK; + ctrl |= XLGPCS_SR_PMA_CTRL2_PMA_TYPE; + ret = xpcs_write_safety(osi_core, XLGPCS_SR_PMA_CTRL2, ctrl); + if (ret != 0) { + goto fail; + } + /* 4.NA [Program VR_PCS_MMD Digital Control3 reg EN_50G bit + * to disable 50G] 25G mode selected for T264 */ + /* 5.Program VR_PCS_MMD Digital Control3 reg CNS_EN bit to 1 to + * enable 25G as per manual */ + ctrl = xpcs_read(xpcs_base, XLGPCS_VR_PCS_DIG_CTRL3); + ctrl |= XLGPCS_VR_PCS_DIG_CTRL3_CNS_EN; + ret = xpcs_write_safety(osi_core, XLGPCS_VR_PCS_DIG_CTRL3, ctrl); + if (ret != 0) { + goto fail; + } + + /* 6.NA. Enable RS FEC */ + /* 7. Enable BASE-R FEC */ + ctrl = xpcs_read(xpcs_base, XLGPCS_SR_PMA_KR_FEC_CTRL); + ctrl |= XLGPCS_SR_PMA_KR_FEC_CTRL_FEC_EN; + ret = xpcs_write_safety(osi_core, XLGPCS_SR_PMA_KR_FEC_CTRL, ctrl); + if (ret != 0) { + goto fail; + } + + /* 8.NA, Configure PHY to 25G rate */ + /* 9.Program VR_PCS_DIG_CTRL1 reg VR_RST bit */ + ctrl = xpcs_read(xpcs_base, XLGPCS_VR_PCS_DIG_CTRL1); + ctrl |= XLGPCS_VR_PCS_DIG_CTRL1_VR_RST; + xpcs_write(xpcs_base, XLGPCS_VR_PCS_DIG_CTRL1, ctrl); + /* 10.Wait for VR_PCS_DIG_CTRL1 reg VR_RST bit to self clear */ + count = 0; + while (cond == COND_NOT_MET) { + if (count > retry) { + ret = -1; + goto fail; + } + count++; + ctrl = xpcs_read(xpcs_base, XLGPCS_VR_PCS_DIG_CTRL1); + if ((ctrl & XLGPCS_VR_PCS_DIG_CTRL1_VR_RST) == 0U) { + cond = 0; + } else { + /* Maximum wait delay as per HW team is 10msec. + * So add a loop for 1000 iterations with 1usec delay, + * so that if check get satisfies before 1msec will come + * out of loop and it can save some boot time + */ + osi_core->osd_ops.udelay(10U); + } + } +fail: + return ret; +} + #ifndef OSI_STRIPPED_LIB /** * @brief xpcs_eee - XPCS enable/disable EEE @@ -781,4 +989,93 @@ nve32_t xpcs_eee(struct osi_core_priv_data *osi_core, nveu32_t en_dis) fail: return ret; } + +/** + * @brief xlgpcs_eee - XLGPCS enable/disable EEE + * + * Algorithm: This routine update register related to EEE + * for XLGPCS. + * + * @param[in] osi_core: OSI core data structure. + * @param[in] en_dis: enable - 1 or disable - 0 + * + * @retval 0 on success + * @retval -1 on failure. + */ +nve32_t xlgpcs_eee(struct osi_core_priv_data *osi_core, nveu32_t en_dis) +{ + void *xpcs_base = osi_core->xpcs_base; + nveu32_t val = 0x0U; + nve32_t ret = 0; + nveu32_t retry = 1000U; + nveu32_t count = 0; + nve32_t cond = COND_NOT_MET; + + if ((en_dis != OSI_ENABLE) && (en_dis != OSI_DISABLE)) { + ret = -1; + goto fail; + } + + if (xpcs_base == OSI_NULL) { + ret = -1; + goto fail; + } + + if (en_dis == OSI_DISABLE) { + val = xpcs_read(xpcs_base, XLGPCS_VR_PCS_EEE_MCTRL); + val &= ~XPCS_VR_XS_PCS_EEE_MCTRL0_LTX_EN; + val &= ~XPCS_VR_XS_PCS_EEE_MCTRL0_LRX_EN; + ret = xpcs_write_safety(osi_core, XLGPCS_VR_PCS_EEE_MCTRL, val); + /* To disable EEE on TX side, the software must wait for + * TX LPI to enter TX_ACTIVE state by reading + * VR_PCS_DIG_STS Register + */ + while (cond == COND_NOT_MET) { + if (count > retry) { + ret = -1; + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, + "EEE active state timeout!", 0ULL); + goto fail; + } + count++; + val = xpcs_read(xpcs_base, XLGPCS_VR_PCS_DIG_STS); + if ((val & XLGPCS_VR_PCS_DIG_STSLTXRX_STATE) == 0U) { + cond = 0; + } else { + osi_core->osd_ops.udelay(100U); + } + } + } else { + + /* 1. Check if DWC_xlgpcs supports the EEE feature + * by reading the SR_PCS_EEE_ABL reg. For 25G always enabled + * by default + */ + + /* 2. Program various timers used in the EEE mode depending on + * the clk_eee_i clock frequency. default timers are same as + * IEEE std clk_eee_i() is 108MHz. MULT_FACT_100NS = 9 + * because 9.2ns*10 = 92 which is between 80 and 120 this + * leads to default setting match. + */ + + /* 3. NA. [If FEC is enabled in the KR mode] */ + /* 4. NA. [Enable fast_sim mode] */ + /* 5. NA [If RS FEC is enabled, program AM interval and RS FEC] + */ + /* 6. NA [Fast wake is not enabled default] */ + /* 7. Enable the EEE feature on Tx and Rx path */ + val = xpcs_read(xpcs_base, XLGPCS_VR_PCS_EEE_MCTRL); + val |= (XPCS_VR_XS_PCS_EEE_MCTRL0_LTX_EN | + XPCS_VR_XS_PCS_EEE_MCTRL0_LRX_EN); + ret = xpcs_write_safety(osi_core, XLGPCS_VR_PCS_EEE_MCTRL, val); + if (ret != 0) { + goto fail; + } + /* 8. NA [If PMA service interface is XLAUI or CAUI] */ + } +fail: + return ret; +} + #endif /* !OSI_STRIPPED_LIB */ diff --git a/osi/core/xpcs.h b/osi/core/xpcs.h index aaa5fe8..802ce09 100644 --- a/osi/core/xpcs.h +++ b/osi/core/xpcs.h @@ -48,16 +48,65 @@ #define T26X_XPCS_WRAP_UPHY_HW_INIT_CTRL 0x8034 #define T26X_XPCS_WRAP_UPHY_STATUS 0x8074 #define T26X_XPCS_WRAP_INTERRUPT_STATUS 0x8080 +#define T26X_XPCS_WRAP_CONFIG_0 0x8094 + +/** @} */ + +/** + * @addtogroup XLGPCS Register offsets + * + * @brief XLGPCS register offsets + * @{ + */ +#define XLGPCS_SR_PMA_CTRL2 0x4001c +#define XLGPCS_SR_PMA_KR_FEC_CTRL 0x402ac +#define XLGPCS_SR_PCS_CTRL1 0xc0000 +#define XLGPCS_SR_PCS_STS1 0xc0004 +#define XLGPCS_SR_PCS_CTRL2 0xc001c +#define XLGPCS_VR_PCS_DIG_CTRL1 0xe0000 +#define XLGPCS_VR_PCS_DIG_CTRL3 0xe000c +#define XLGPCS_SR_AN_CTRL 0x1c0000 + +/** @} */ + +/** + * @addtogroup XLGPCS-BIT Register bit fileds + * + * @brief XLGPCS register bit fields and values + * @{ + */ +#define XLGPCS_SR_PCS_CTRL1_RST OSI_BIT(15) +#define XLGPCS_SR_AN_CTRL_AN_EN OSI_BIT(12) +#define XLGPCS_SR_PCS_STS1_RLU OSI_BIT(2) +#define XLGPCS_SR_PCS_CTRL1_SS5_2 OSI_BIT(2) | OSI_BIT(4) +#define XLGPCS_SR_PCS_CTRL1_SS5_2_MASK OSI_BIT(5) | OSI_BIT(4) | \ + OSI_BIT(3) | OSI_BIT(2) +#define XLGPCS_SR_PCS_CTRL2_PCS_TYPE_SEL OSI_BIT(2) | OSI_BIT(1) | \ + OSI_BIT(0) +#define XLGPCS_SR_PCS_CTRL2_PCS_TYPE_SEL_MASK OSI_BIT(3) | OSI_BIT(2) | \ + OSI_BIT(1) | OSI_BIT(0) +#define XLGPCS_SR_PMA_CTRL2_PMA_TYPE OSI_BIT(5) | OSI_BIT(4) | \ + OSI_BIT(3) | OSI_BIT(0) +#define XLGPCS_SR_PMA_CTRL2_PMA_TYPE_MASK 0x7F +#define XLGPCS_VR_PCS_DIG_CTRL3_CNS_EN OSI_BIT(0) +#define XLGPCS_VR_PCS_DIG_CTRL1_VR_RST OSI_BIT(15) +#define XLGPCS_SR_PMA_KR_FEC_CTRL_FEC_EN OSI_BIT(0) /** @} */ #ifndef OSI_STRIPPED_LIB #define XPCS_VR_XS_PCS_EEE_MCTRL0 0xE0018 #define XPCS_VR_XS_PCS_EEE_MCTRL1 0xE002C +#define XLGPCS_VR_PCS_EEE_MCTRL 0xe0018 +#define XLGPCS_VR_PCS_DIG_STS 0xe0040 #define XPCS_VR_XS_PCS_EEE_MCTRL1_TRN_LPI OSI_BIT(0) #define XPCS_VR_XS_PCS_EEE_MCTRL0_LTX_EN OSI_BIT(0) #define XPCS_VR_XS_PCS_EEE_MCTRL0_LRX_EN OSI_BIT(1) +#define XLGPCS_VR_PCS_DIG_STSLTXRX_STATE (OSI_BIT(15) | OSI_BIT(14) | \ + OSI_BIT(13) | OSI_BIT(12) | \ + OSI_BIT(11) | OSI_BIT(10)) + #endif /* !OSI_STRIPPED_LIB */ /** @@ -124,8 +173,11 @@ nve32_t xpcs_init(struct osi_core_priv_data *osi_core); nve32_t xpcs_start(struct osi_core_priv_data *osi_core); +nve32_t xlgpcs_init(struct osi_core_priv_data *osi_core); +nve32_t xlgpcs_start(struct osi_core_priv_data *osi_core); #ifndef OSI_STRIPPED_LIB nve32_t xpcs_eee(struct osi_core_priv_data *osi_core, nveu32_t en_dis); +nve32_t xlgpcs_eee(struct osi_core_priv_data *osi_core, nveu32_t en_dis); #endif /* !OSI_STRIPPED_LIB */ /** diff --git a/osi/dma/dma_local.h b/osi/dma/dma_local.h index 0887ae2..6f80648 100644 --- a/osi/dma/dma_local.h +++ b/osi/dma/dma_local.h @@ -41,6 +41,7 @@ /** * @brief validate_dma_mac_ver_update_chans - Validates mac version and update chan * + * @param[in] mac: MAC HW type. * @param[in] mac_ver: MAC version read. * @param[out] num_max_chans: Maximum channel number. * @param[out] l_mac_ver: local mac version. @@ -56,10 +57,16 @@ * @retval 0 - for not Valid MAC * @retval 1 - for Valid MAC */ -static inline nve32_t validate_dma_mac_ver_update_chans(nveu32_t mac_ver, +static inline nve32_t validate_dma_mac_ver_update_chans(nveu32_t mac, + nveu32_t mac_ver, nveu32_t *num_max_chans, nveu32_t *l_mac_ver) { + const nveu32_t max_dma_chan[OSI_MAX_MAC_IP_TYPES] = { + OSI_EQOS_MAX_NUM_CHANS, + OSI_MGBE_T23X_MAX_NUM_CHANS, + OSI_MGBE_MAX_NUM_CHANS + }; nve32_t ret; switch (mac_ver) { @@ -69,17 +76,21 @@ static inline nve32_t validate_dma_mac_ver_update_chans(nveu32_t mac_ver, *l_mac_ver = MAC_CORE_VER_TYPE_EQOS; ret = 1; break; -#endif +#endif /* !OSI_STRIPPED_LIB */ case OSI_EQOS_MAC_5_30: *num_max_chans = OSI_EQOS_MAX_NUM_CHANS; *l_mac_ver = MAC_CORE_VER_TYPE_EQOS_5_30; ret = 1; break; case OSI_MGBE_MAC_3_10: + //TBD: T264 uFPGA reports mac version 3.2 + case OSI_MGBE_MAC_3_20: + case OSI_MGBE_MAC_4_20: #ifndef OSI_STRIPPED_LIB case OSI_MGBE_MAC_4_00: #endif /* !OSI_STRIPPED_LIB */ - *num_max_chans = OSI_MGBE_MAX_NUM_CHANS; + //TBD: T264 number of dma channels? + *num_max_chans = max_dma_chan[mac]; *l_mac_ver = MAC_CORE_VER_TYPE_MGBE; ret = 1; break; @@ -135,10 +146,13 @@ static inline void osi_dma_writel(nveu32_t val, void *addr) */ #define CHAN_START_POSITION 6U #define PKT_ID_CNT ((nveu32_t)1 << CHAN_START_POSITION) +#define PKT_ID_CNT_T264 ((nveu32_t)1 << 10) /* First 6 bytes of idx and last 4 bytes of chan(+1 to avoid pkt_id to be 0) */ #define INC_TX_TS_PKTID(idx) ((idx) = (((idx) & 0x7FFFFFFFU) + 1U)) #define GET_TX_TS_PKTID(idx, c) (((idx) & (PKT_ID_CNT - 1U)) | \ (((c) + 1U) << CHAN_START_POSITION)) +/* T264 has saperate logic to tell vdma number so we can use all 10 bits for pktid */ +#define GET_TX_TS_PKTID_T264(idx) ((++(idx)) & (PKT_ID_CNT_T264 - 1U)) /** @} */ /** @@ -210,8 +224,11 @@ struct dma_local { * PacketID for PTP TS. * MSB 4-bits of channel number and LSB 6-bits of local * index(PKT_ID_CNT). + * In T264, it is 9 bits PKTID */ nveu32_t pkt_id; + /** VDMA number for T264 */ + nveu32_t vdma_id; /** Flag to represent OSI DMA software init done */ nveu32_t init_done; /** Holds the MAC version of MAC controller */ @@ -329,7 +346,9 @@ static inline void update_rx_tail_ptr(const struct osi_dma_priv_data *const osi_ nveu32_t dma_chan, nveu64_t tailptr) { - nveu32_t chan = dma_chan & 0xFU; + const nveu32_t chan_mask[OSI_MAX_MAC_IP_TYPES] = {0xFU, 0xFU, 0x3FU}; + + nveu32_t chan = dma_chan & chan_mask[osi_dma->mac]; const nveu32_t tail_ptr_reg[OSI_MAX_MAC_IP_TYPES] = { EQOS_DMA_CHX_RDTP(chan), MGBE_DMA_CHX_RDTLP(chan), diff --git a/osi/dma/mgbe_dma.h b/osi/dma/mgbe_dma.h index a44858e..41c222c 100644 --- a/osi/dma/mgbe_dma.h +++ b/osi/dma/mgbe_dma.h @@ -57,6 +57,8 @@ #define MGBE_DMA_CHX_RDLH(x) ((0x0080U * (x)) + 0x3118U) #define MGBE_DMA_CHX_RDLA(x) ((0x0080U * (x)) + 0x311CU) #define MGBE_DMA_CHX_RDTLP(x) ((0x0080U * (x)) + 0x312CU) +#define MGBE_DMA_CHX_RX_DESC_WR_RNG_OFFSET(x) ((0x0080U * (x)) + 0x317CU) + /** @} */ /** @} */ @@ -71,6 +73,12 @@ #define MGBE_DMA_CHX_RX_WDT_RWTU 2048U #define MGBE_DMA_CHX_RX_WDT_RWTU_2048_CYCLE 0x3000U #define MGBE_DMA_CHX_RX_WDT_RWTU_MASK 0x3000U +#define MGBE_DMA_CHX_RX_WDT_ITW_MASK 0x7C000000U +#define MGBE_DMA_CHX_RX_WDT_ITW_SHIFT 26U +#define MGBE_DMA_CHX_RX_WDT_ITW_MAX 0x1FU +#define MGBE_DMA_CHX_RX_WDT_ITW_DEFAULT 1100U +#define MGBE_DMA_CHX_RX_WDT_ITCU 256U + #ifdef OSI_DEBUG #define MGBE_DMA_CHX_INTR_TBUE OSI_BIT(2) #define MGBE_DMA_CHX_INTR_RBUE OSI_BIT(7) @@ -92,6 +100,8 @@ #define MGBE_TX_VDMA_TC_SHIFT 4 #define MGBE_RX_VDMA_TC_MASK (OSI_BIT(28) | OSI_BIT(29) | OSI_BIT(30)) #define MGBE_RX_VDMA_TC_SHIFT 28 +#define MGBE_RX_DESC_WR_RNG_RWDC_SHIFT 16 + /** @} */ /** diff --git a/osi/dma/osi_dma.c b/osi/dma/osi_dma.c index 5b8d03d..fe0ec6d 100644 --- a/osi/dma/osi_dma.c +++ b/osi/dma/osi_dma.c @@ -33,6 +33,44 @@ #endif /* OSI_DEBUG */ #include "hw_common.h" +#if 1 // copied from osi/core/common.h + +/** + * @brief MTL Q size depth helper macro + */ +#define Q_SZ_DEPTH(x) (((x) * 1024U) / (MGBE_AXI_DATAWIDTH / 8U)) + +/* PBL values */ +//redefined #define MGBE_DMA_CHX_MAX_PBL 32U +#define MGBE_DMA_CHX_PBL_16 16U +#define MGBE_DMA_CHX_PBL_8 8U +#define MGBE_DMA_CHX_PBL_4 4U +#define MGBE_DMA_CHX_PBL_1 1U + +static inline nveu32_t osi_valid_pbl_value(nveu32_t pbl_value) +{ + nveu32_t allowed_pbl; + nveu32_t pbl; + + /* 8xPBL mode is set */ + pbl = pbl_value / 8U; + + if (pbl >= MGBE_DMA_CHX_MAX_PBL) { + allowed_pbl = MGBE_DMA_CHX_MAX_PBL; + } else if (pbl >= MGBE_DMA_CHX_PBL_16) { + allowed_pbl = MGBE_DMA_CHX_PBL_16; + } else if (pbl >= MGBE_DMA_CHX_PBL_8) { + allowed_pbl = MGBE_DMA_CHX_PBL_8; + } else if (pbl >= MGBE_DMA_CHX_PBL_4) { + allowed_pbl = MGBE_DMA_CHX_PBL_4; + } else { + allowed_pbl = MGBE_DMA_CHX_PBL_1; + } + + return allowed_pbl; +} +#endif + /** * @brief g_dma - DMA local data array. */ @@ -470,7 +508,8 @@ done: } static inline void start_dma(const struct osi_dma_priv_data *const osi_dma, nveu32_t dma_chan) { - nveu32_t chan = dma_chan & 0xFU; + const nveu32_t chan_mask[OSI_MAX_MAC_IP_TYPES] = {0xFU, 0xFU, 0x3FU}; + nveu32_t chan = dma_chan & chan_mask[osi_dma->mac]; const nveu32_t tx_dma_reg[OSI_MAX_MAC_IP_TYPES] = { EQOS_DMA_CHX_TX_CTRL(chan), MGBE_DMA_CHX_TX_CTRL(chan), @@ -498,8 +537,10 @@ static inline void start_dma(const struct osi_dma_priv_data *const osi_dma, nveu static nve32_t init_dma_channel(const struct osi_dma_priv_data *const osi_dma, nveu32_t dma_chan) { + const nveu32_t chan_mask[OSI_MAX_MAC_IP_TYPES] = {0xFU, 0xFU, 0x3FU}; + nveu32_t pbl = 0; nveu32_t pdma_chan = 0xFFU; - nveu32_t chan = dma_chan & 0xFU; + nveu32_t chan = dma_chan & chan_mask[osi_dma->mac]; nveu32_t riwt = osi_dma->rx_riwt & 0xFFFU; const nveu32_t intr_en_reg[OSI_MAX_MAC_IP_TYPES] = { EQOS_DMA_CHX_INTR_ENA(chan), @@ -532,7 +573,8 @@ static nve32_t init_dma_channel(const struct osi_dma_priv_data *const osi_dma, }; const nveu32_t rx_pbl[2] = { EQOS_DMA_CHX_RX_CTRL_RXPBL_RECOMMENDED, - ((MGBE_RXQ_SIZE / osi_dma->num_dma_chans) / 2U) + ((Q_SZ_DEPTH(MGBE_RXQ_SIZE/OSI_MGBE_MAX_NUM_QUEUES) / + osi_dma->num_dma_chans) / 2U) }; const nveu32_t rwt_val[OSI_MAX_MAC_IP_TYPES] = { (((riwt * (EQOS_AXI_CLK_FREQ / OSI_ONE_MEGA_HZ)) / @@ -558,8 +600,7 @@ static nve32_t init_dma_channel(const struct osi_dma_priv_data *const osi_dma, DMA_CHX_TX_CTRL_TSE }; const nveu32_t owrq = (MGBE_DMA_CHX_RX_CNTRL2_OWRQ_MCHAN / osi_dma->num_dma_chans); - //TBD: owrq_arr add more entries for T264? - const nveu32_t owrq_arr[OSI_MGBE_MAX_NUM_CHANS] = { + const nveu32_t owrq_arr[OSI_MGBE_T23X_MAX_NUM_CHANS] = { MGBE_DMA_CHX_RX_CNTRL2_OWRQ_SCHAN, owrq, owrq, owrq, owrq, owrq, owrq, owrq, owrq, owrq }; @@ -620,12 +661,8 @@ static nve32_t init_dma_channel(const struct osi_dma_priv_data *const osi_dma, * as the TxPBL else we should be using the value whcih we get after * calculation by using above formula */ - if (tx_pbl[osi_dma->mac] >= MGBE_DMA_CHX_MAX_PBL) { - val |= MGBE_DMA_CHX_MAX_PBL_VAL; - } else { - val |= ((tx_pbl[osi_dma->mac] / 8U) << - MGBE_DMA_CHX_CTRL_PBL_SHIFT); - } + pbl = osi_valid_pbl_value(tx_pbl[osi_dma->mac]); + val |= (pbl << MGBE_DMA_CHX_CTRL_PBL_SHIFT); } else if (osi_dma->mac == OSI_MAC_HW_MGBE_T26X) { /* Map Tx VDMA's to TC. TC and PDMA mapped 1 to 1 */ val &= ~MGBE_TX_VDMA_TC_MASK; @@ -648,12 +685,8 @@ static nve32_t init_dma_channel(const struct osi_dma_priv_data *const osi_dma, if (osi_dma->mac == OSI_MAC_HW_EQOS) { val |= rx_pbl[osi_dma->mac]; } else if (osi_dma->mac == OSI_MAC_HW_MGBE){ - if (rx_pbl[osi_dma->mac] >= MGBE_DMA_CHX_MAX_PBL) { - val |= MGBE_DMA_CHX_MAX_PBL_VAL; - } else { - val |= ((rx_pbl[osi_dma->mac] / 8U) << - MGBE_DMA_CHX_CTRL_PBL_SHIFT); - } + pbl = osi_valid_pbl_value(rx_pbl[osi_dma->mac]); + val |= (pbl << MGBE_DMA_CHX_CTRL_PBL_SHIFT); } else if (osi_dma->mac == OSI_MAC_HW_MGBE_T26X) { /* Map Rx VDMA's to TC. TC and PDMA mapped 1 to 1 */ val &= ~MGBE_RX_VDMA_TC_MASK; @@ -671,7 +704,7 @@ static nve32_t init_dma_channel(const struct osi_dma_priv_data *const osi_dma, val &= ~DMA_CHX_RX_WDT_RWT_MASK; val |= rwt_val[osi_dma->mac]; osi_dma_writel(val, (nveu8_t *)osi_dma->base + - rx_wdt_reg[osi_dma->mac]); + rx_wdt_reg[osi_dma->mac]); val = osi_dma_readl((nveu8_t *)osi_dma->base + rx_wdt_reg[osi_dma->mac]); @@ -777,7 +810,7 @@ nve32_t osi_hw_dma_init(struct osi_dma_priv_data *osi_dma) l_dma->mac_ver = osi_dma_readl((nveu8_t *)osi_dma->base + MAC_VERSION) & MAC_VERSION_SNVER_MASK; - if (validate_dma_mac_ver_update_chans(l_dma->mac_ver, + if (validate_dma_mac_ver_update_chans(osi_dma->mac, l_dma->mac_ver, &l_dma->num_max_chans, &l_dma->l_mac_ver) == 0) { OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, @@ -825,7 +858,8 @@ fail: static inline void stop_dma(const struct osi_dma_priv_data *const osi_dma, nveu32_t dma_chan) { - nveu32_t chan = dma_chan & 0xFU; + const nveu32_t chan_mask[OSI_MAX_MAC_IP_TYPES] = {0xFU, 0xFU, 0x3FU}; + nveu32_t chan = dma_chan & chan_mask[osi_dma->mac]; const nveu32_t dma_tx_reg[OSI_MAX_MAC_IP_TYPES] = { EQOS_DMA_CHX_TX_CTRL(chan), MGBE_DMA_CHX_TX_CTRL(chan), @@ -850,6 +884,71 @@ static inline void stop_dma(const struct osi_dma_priv_data *const osi_dma, osi_dma_writel(val, (nveu8_t *)osi_dma->base + dma_rx_reg[osi_dma->mac]); } +static inline void set_rx_riit_dma( + const struct osi_dma_priv_data *const osi_dma, + nveu32_t chan, nveu32_t riit) +{ + const nveu32_t rx_wdt_reg[OSI_MAX_MAC_IP_TYPES] = { + EQOS_DMA_CHX_RX_WDT(chan), + MGBE_DMA_CHX_RX_WDT(chan), + MGBE_DMA_CHX_RX_WDT(chan) + }; + /* riit is in ns */ + const nveu32_t itw_val = { + (((riit * ((nveu32_t)MGBE_AXI_CLK_FREQ / OSI_ONE_MEGA_HZ)) / + (MGBE_DMA_CHX_RX_WDT_ITCU * OSI_MSEC_PER_SEC)) + & MGBE_DMA_CHX_RX_WDT_ITW_MAX) + }; + nveu32_t val; + + if (osi_dma->use_riit != OSI_DISABLE && + osi_dma->mac == OSI_MAC_HW_MGBE_T26X) { + val = osi_dma_readl((nveu8_t *)osi_dma->base + + rx_wdt_reg[osi_dma->mac]); + val &= ~MGBE_DMA_CHX_RX_WDT_ITW_MASK; + val |= (itw_val << MGBE_DMA_CHX_RX_WDT_ITW_SHIFT); + osi_dma_writel(val, (nveu8_t *)osi_dma->base + + rx_wdt_reg[osi_dma->mac]); + } + + return; +} + +static inline void set_rx_riit( + const struct osi_dma_priv_data *const osi_dma, nveu32_t speed) +{ + nveu32_t i, chan, riit; + nveu32_t found =OSI_DISABLE; + + for (i = 0; i < osi_dma->num_of_riit; i++) { + if (osi_dma->rx_riit[i].speed == speed) { + riit = osi_dma->rx_riit[i].riit; + found = OSI_ENABLE; + break; + } + } + + if (found != OSI_ENABLE) { + /* use default ~1us value */ + riit = MGBE_DMA_CHX_RX_WDT_ITW_DEFAULT; + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, + "Invalid speed value, using default riit 1us\n", + speed); + } + + /* riit is in nsec */ + if ((riit > (osi_dma->rx_riwt * OSI_MSEC_PER_SEC))) { + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, + "Invalid riit value, using default 1us\n", riit); + } + + for (i = 0; i < osi_dma->num_dma_chans; i++) { + chan = osi_dma->dma_chans[i]; + set_rx_riit_dma(osi_dma, chan, riit); + } + return; +} + nve32_t osi_hw_dma_deinit(struct osi_dma_priv_data *osi_dma) { struct dma_local *l_dma = (struct dma_local *)(void *)osi_dma; @@ -1331,7 +1430,7 @@ fail: return ret; } -#ifdef OSI_DEBUG + nve32_t osi_dma_ioctl(struct osi_dma_priv_data *osi_dma) { struct dma_local *l_dma = (struct dma_local *)osi_dma; @@ -1347,6 +1446,7 @@ nve32_t osi_dma_ioctl(struct osi_dma_priv_data *osi_dma) data = &osi_dma->ioctl_data; switch (data->cmd) { +#ifdef OSI_DEBUG case OSI_DMA_IOCTL_CMD_REG_DUMP: reg_dump(osi_dma); break; @@ -1356,6 +1456,10 @@ nve32_t osi_dma_ioctl(struct osi_dma_priv_data *osi_dma) case OSI_DMA_IOCTL_CMD_DEBUG_INTR_CONFIG: l_dma->ops_p->debug_intr_config(osi_dma); break; +#endif /* OSI_DEBUG */ + case OSI_DMA_IOCTL_CMD_RX_RIIT_CONFIG: + set_rx_riit(osi_dma, data->arg_u32); + break; default: OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "DMA: Invalid IOCTL command", 0ULL); @@ -1367,7 +1471,6 @@ nve32_t osi_dma_ioctl(struct osi_dma_priv_data *osi_dma) #endif /* OSI_CL_FTRACE */ return 0; } -#endif /* OSI_DEBUG */ #ifndef OSI_STRIPPED_LIB diff --git a/osi/dma/osi_dma_txrx.c b/osi/dma/osi_dma_txrx.c index 41ff6c2..d5bb480 100644 --- a/osi/dma/osi_dma_txrx.c +++ b/osi/dma/osi_dma_txrx.c @@ -194,6 +194,42 @@ static inline void check_for_more_data_avail(struct osi_rx_ring *rx_ring, nve32_ #ifdef OSI_CL_FTRACE nveu32_t osi_process_rx_completions_cnt = 0; #endif /* OSI_CL_FTRACE */ + +/** + * @brief compltd_rxdesc_cnt - number of Rx descriptors completed by HW + * + * @note + * Algorithm: + * - This routine will be invoked by OSI layer internally to get the + * available Rx descriptor to process by SW. + * + * @note + * API Group: + * - Initialization: No + * - Run time: Yes + * - De-initialization: No + * + * @param[in, out] osi_dma: Pointer to OSI DMA private data structure. + * @param[in] chan: DMA channel number for which stats should be incremented. + */ +static inline nveu32_t compltd_rx_desc_cnt(struct osi_dma_priv_data *osi_dma, + nveu32_t chan) +{ + struct osi_rx_ring *rx_ring = osi_dma->rx_ring[chan]; + nveu32_t value, rx_desc_wr_idx, descr_compltd; + + value = osi_dma_readl((nveu8_t *)osi_dma->base + + MGBE_DMA_CHX_RX_DESC_WR_RNG_OFFSET(chan)); + /* completed desc write back offset */ + rx_desc_wr_idx = ((value >> MGBE_RX_DESC_WR_RNG_RWDC_SHIFT ) & + (osi_dma->rx_ring_sz - 1)); + descr_compltd = (rx_desc_wr_idx - rx_ring->cur_rx_idx) & + (osi_dma->rx_ring_sz - 1U); + /* offset/index start from 0, so add 1 to get final count */ + descr_compltd += 1U; + return descr_compltd; +} + nve32_t osi_process_rx_completions(struct osi_dma_priv_data *osi_dma, nveu32_t chan, nve32_t budget, nveu32_t *more_data_avail) @@ -209,6 +245,7 @@ nve32_t osi_process_rx_completions(struct osi_dma_priv_data *osi_dma, nve32_t received_resv = 0; #endif /* !OSI_STRIPPED_LIB */ nve32_t ret = 0; + nveu32_t rx_desc_compltd; #ifdef OSI_CL_FTRACE if ((osi_process_rx_completions_cnt % 1000) == 0) @@ -229,6 +266,11 @@ nve32_t osi_process_rx_completions(struct osi_dma_priv_data *osi_dma, /* Reset flag to indicate if more Rx frames available to OSD layer */ *more_data_avail = OSI_NONE; + if (osi_dma->mac == OSI_MAC_HW_MGBE_T26X) { + rx_desc_compltd = compltd_rx_desc_cnt(osi_dma, chan); + budget = (budget > ((nve32_t)rx_desc_compltd)? ((nve32_t)rx_desc_compltd): budget); + } + while ((received < budget) #ifndef OSI_STRIPPED_LIB && (received_resv < budget) @@ -298,9 +340,9 @@ nve32_t osi_process_rx_completions(struct osi_dma_priv_data *osi_dma, process_rx_desc(osi_dma, rx_ring, rx_desc, rx_swcx, rx_pkt_cx, chan, rx_ring_mask); #ifndef OSI_STRIPPED_LIB - osi_dma->dstats.q_rx_pkt_n[chan] = + osi_dma->dstats.chan_rx_pkt_n[chan] = dma_update_stats_counter( - osi_dma->dstats.q_rx_pkt_n[chan], + osi_dma->dstats.chan_rx_pkt_n[chan], 1UL); osi_dma->dstats.rx_pkt_n = dma_update_stats_counter(osi_dma->dstats.rx_pkt_n, 1UL); @@ -341,8 +383,8 @@ fail: static inline void inc_tx_pkt_stats(struct osi_dma_priv_data *osi_dma, nveu32_t chan) { - osi_dma->dstats.q_tx_pkt_n[chan] = - dma_update_stats_counter(osi_dma->dstats.q_tx_pkt_n[chan], 1UL); + osi_dma->dstats.chan_tx_pkt_n[chan] = + dma_update_stats_counter(osi_dma->dstats.chan_tx_pkt_n[chan], 1UL); osi_dma->dstats.tx_pkt_n = dma_update_stats_counter(osi_dma->dstats.tx_pkt_n, 1UL); } @@ -549,7 +591,7 @@ static inline nve32_t process_last_desc(struct osi_dma_priv_data *osi_dma, /* check for Last Descriptor */ if ((tx_desc->tdes3 & TDES3_LD) == TDES3_LD) { if (((tx_desc->tdes3 & TDES3_ES_BITS) != 0U) && - (osi_dma->mac != OSI_MAC_HW_MGBE)) { + (osi_dma->mac == OSI_MAC_HW_EQOS)) { txdone_pkt_cx->flags |= OSI_TXDONE_CX_ERROR; #ifndef OSI_STRIPPED_LIB /* fill packet error stats */ @@ -633,7 +675,7 @@ nve32_t osi_process_tx_completions(struct osi_dma_priv_data *osi_dma, processed = process_last_desc(osi_dma, tx_desc, txdone_pkt_cx, processed); #endif - if (osi_dma->mac != OSI_MAC_HW_MGBE) { + if (osi_dma->mac == OSI_MAC_HW_EQOS) { update_tx_done_ts(tx_desc, txdone_pkt_cx); } else if (((tx_swcx->flags & OSI_PKT_CX_PTP) == OSI_PKT_CX_PTP) && // if not master in onestep mode @@ -642,6 +684,9 @@ nve32_t osi_process_tx_completions(struct osi_dma_priv_data *osi_dma, OSI_ENABLE) && ((tx_desc->tdes3 & TDES3_CTXT) == 0U)) { txdone_pkt_cx->pktid = tx_swcx->pktid; + if (osi_dma->mac == OSI_MAC_HW_MGBE_T26X) { + txdone_pkt_cx->vdmaid = tx_swcx->vdmaid; + } txdone_pkt_cx->flags |= OSI_TXDONE_CX_TS_DELAYED; } else { /* Do nothing here */ @@ -1073,6 +1118,7 @@ nve32_t hw_transmit(struct osi_dma_priv_data *osi_dma, struct osi_tx_ring *tx_ring, nveu32_t dma_chan) { + const nveu32_t chan_mask[OSI_MAX_MAC_IP_TYPES] = {0xFU, 0xFU, 0x3FU}; struct dma_local *l_dma = (struct dma_local *)(void *)osi_dma; struct osi_tx_pkt_cx *tx_pkt_cx = OSI_NULL; struct osi_tx_desc *first_desc = OSI_NULL; @@ -1084,7 +1130,7 @@ nve32_t hw_transmit(struct osi_dma_priv_data *osi_dma, #ifdef OSI_DEBUG nveu32_t f_idx = tx_ring->cur_tx_idx; #endif /* OSI_DEBUG */ - nveu32_t chan = dma_chan & 0xFU; + nveu32_t chan = dma_chan & chan_mask[osi_dma->mac]; const nveu32_t tail_ptr_reg[OSI_MAX_MAC_IP_TYPES] = { EQOS_DMA_CHX_TDTP(chan), MGBE_DMA_CHX_TDTLP(chan), @@ -1092,6 +1138,7 @@ nve32_t hw_transmit(struct osi_dma_priv_data *osi_dma, }; nve32_t cntx_desc_consumed; nveu32_t pkt_id = 0x0U; + nveu32_t vdma_id = 0x0U; nveu32_t desc_cnt = 0U; nveu64_t tailptr; nveu32_t entry = 0U; @@ -1140,11 +1187,16 @@ nve32_t hw_transmit(struct osi_dma_priv_data *osi_dma, /* packet ID for Onestep is 0x0 always */ pkt_id = OSI_NONE; } else { - INC_TX_TS_PKTID(l_dma->pkt_id); - pkt_id = GET_TX_TS_PKTID(l_dma->pkt_id, chan); + if (osi_dma->mac != OSI_MAC_HW_MGBE_T26X) { + pkt_id = GET_TX_TS_PKTID(l_dma->pkt_id, chan); + } else { + pkt_id = GET_TX_TS_PKTID_T264(l_dma->pkt_id); + vdma_id = chan; + tx_desc->tdes0 = (vdma_id << OSI_PTP_VDMA_SHIFT); + } } /* update packet id */ - tx_desc->tdes0 = pkt_id; + tx_desc->tdes0 |= pkt_id; } INCR_TX_DESC_INDEX(entry, osi_dma->tx_ring_sz); @@ -1254,7 +1306,8 @@ fail: static nve32_t rx_dma_desc_initialization(const struct osi_dma_priv_data *const osi_dma, nveu32_t dma_chan) { - nveu32_t chan = dma_chan & 0xFU; + const nveu32_t chan_mask[OSI_MAX_MAC_IP_TYPES] = {0xFU, 0xFU, 0x3FU}; + nveu32_t chan = dma_chan & chan_mask[osi_dma->mac]; const nveu32_t start_addr_high_reg[OSI_MAX_MAC_IP_TYPES] = { EQOS_DMA_CHX_RDLH(chan), MGBE_DMA_CHX_RDLH(chan), @@ -1399,7 +1452,8 @@ static inline void set_tx_ring_len_and_start_addr(const struct osi_dma_priv_data nveu32_t dma_chan, nveu32_t len) { - nveu32_t chan = dma_chan & 0xFU; + const nveu32_t chan_mask[OSI_MAX_MAC_IP_TYPES] = {0xFU, 0xFU, 0x3FU}; + nveu32_t chan = dma_chan & chan_mask[osi_dma->mac]; const nveu32_t ring_len_reg[OSI_MAX_MAC_IP_TYPES] = { EQOS_DMA_CHX_TDRL(chan), MGBE_DMA_CHX_TX_CNTRL2(chan),