diff --git a/include/config.tmk b/include/config.tmk new file mode 100644 index 0000000..8df4e70 --- /dev/null +++ b/include/config.tmk @@ -0,0 +1,44 @@ +# copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. +# +############################################################################### + +# Set the Makefile config macros to zero by default +OSI_STRIPPED_LIB := 0 +OSI_DEBUG := 0 +DEBUG_MACSEC := 0 + +ifeq ($(NV_BUILD_CONFIGURATION_IS_SAFETY),1) + NV_COMPONENT_CFLAGS += -DOSI_STRIPPED_LIB + OSI_STRIPPED_LIB := 1 +else + NV_COMPONENT_CFLAGS += -DOSI_DEBUG + NV_COMPONENT_CFLAGS += -DDEBUG_MACSEC + OSI_DEBUG := 1 + DEBUG_MACSEC := 1 +endif +NV_COMPONENT_CFLAGS += -DHSI_SUPPORT +NV_COMPONENT_CFLAGS += -DMACSEC_SUPPORT +NV_COMPONENT_CFLAGS += -DLOG_OSI + +#NV_COMPONENT_CFLAGS += -DMACSEC_KEY_PROGRAM +HSI_SUPPORT := 1 +MACSEC_SUPPORT := 1 +ccflags-y += $(NV_COMPONENT_CFLAGS) diff --git a/include/ivc_core.h b/include/ivc_core.h index e8da34f..2be9f17 100644 --- a/include/ivc_core.h +++ b/include/ivc_core.h @@ -38,7 +38,7 @@ /** * @brief IVC commands between OSD & OSI. */ -typedef enum ivc_cmd { +typedef enum { core_init = 1, core_deinit, write_phy_reg, @@ -46,8 +46,7 @@ typedef enum ivc_cmd { handle_ioctl, init_macsec, deinit_macsec, - handle_ns_irq_macsec, - handle_s_irq_macsec, + handle_irq_macsec, lut_config_macsec, kt_config_macsec, cipher_config, @@ -58,13 +57,15 @@ typedef enum ivc_cmd { dbg_buf_config_macsec, dbg_events_config_macsec, macsec_get_sc_lut_key_index, - macsec_update_mtu_size, + nvethmgr_get_status, + nvethmgr_verify_ts, + nvethmgr_get_avb_perf, }ivc_cmd; /** * @brief IVC arguments structure. */ -typedef struct ivc_args { +typedef struct { /** Number of arguments */ nveu32_t count; /** arguments */ @@ -74,7 +75,7 @@ typedef struct ivc_args { /** * @brief IVC core argument structure. */ -typedef struct ivc_core_args { +typedef struct { /** Number of MTL queues enabled in MAC */ nveu32_t num_mtl_queues; /** Array of MTL queues */ @@ -85,8 +86,6 @@ typedef struct ivc_core_args { nveu32_t rxq_prio[OSI_EQOS_MAX_NUM_CHANS]; /** Ethernet MAC address */ nveu8_t mac_addr[OSI_ETH_ALEN]; - /** Tegra Pre-si platform info */ - nveu32_t pre_si; /** VLAN tag stripping enable(1) or disable(0) */ nveu32_t strip_vlan_tag; /** pause frame support */ @@ -103,15 +102,15 @@ typedef struct ivc_core_args { * @brief macsec config structure. */ #ifdef MACSEC_SUPPORT -typedef struct macsec_config { +typedef struct { /** MACsec secure channel basic information */ struct osi_macsec_sc_info sc_info; /** MACsec enable or disable */ - unsigned int enable; + nveu32_t enable; /** MACsec controller */ - unsigned short ctlr; + nveu16_t ctlr; /** MACsec KT index */ - unsigned short kt_idx; + nveu16_t kt_idx; /** MACsec KT index */ nveu32_t key_index; /** MACsec SCI */ @@ -133,19 +132,20 @@ typedef struct ivc_msg_common { /** message count, used for debug */ nveu32_t count; + /** IVC argument structure */ + ivc_args args; + union { - /** IVC argument structure */ - ivc_args args; -#ifndef OSI_STRIPPED_LIB /** avb algorithm structure */ struct osi_core_avb_algorithm avb_algo; -#endif /** OSI filter structure */ struct osi_filter filter; /** OSI HW features */ struct osi_hw_features hw_feat; /** MMC counters */ - struct osi_mmc_counters mmc; + struct osi_mmc_counters mmc_s; + /** OSI stats counters */ + struct osi_stats stats_s; /** core argument structure */ ivc_core_args init_args; /** ioctl command structure */ @@ -186,14 +186,4 @@ typedef struct ivc_msg_common { */ nve32_t osd_ivc_send_cmd(void *priv, ivc_msg_common_t *ivc_buf, nveu32_t len); - -/** - * @brief ivc_get_core_safety_config - Get core safety config - * - * API Group: - * - Initialization: Yes - * - Run time: Yes - * - De-initialization: Yes - */ -void *ivc_get_core_safety_config(void); #endif /* IVC_CORE_H */ diff --git a/include/mmc.h b/include/mmc.h index 0d3c7ab..3b0a864 100644 --- a/include/mmc.h +++ b/include/mmc.h @@ -23,568 +23,9 @@ #ifndef INCLUDED_MMC_H #define INCLUDED_MMC_H -#include "../osi/common/type.h" +#include #include "osi_common.h" -/** - * @brief osi_mmc_counters - The structure to hold RMON counter values - */ -struct osi_mmc_counters { - /** This counter provides the number of bytes transmitted, exclusive of - * preamble and retried bytes, in good and bad packets */ - nveu64_t mmc_tx_octetcount_gb; - /** This counter provides upper 32 bits of transmitted octet count */ - nveu64_t mmc_tx_octetcount_gb_h; - /** This counter provides the number of good and - * bad packets transmitted, exclusive of retried packets */ - nveu64_t mmc_tx_framecount_gb; - /** This counter provides upper 32 bits of transmitted good and bad - * packets count */ - nveu64_t mmc_tx_framecount_gb_h; - /** This counter provides number of good broadcast - * packets transmitted */ - nveu64_t mmc_tx_broadcastframe_g; - /** This counter provides upper 32 bits of transmitted good broadcast - * packets count */ - nveu64_t mmc_tx_broadcastframe_g_h; - /** This counter provides number of good multicast - * packets transmitted */ - nveu64_t mmc_tx_multicastframe_g; - /** This counter provides upper 32 bits of transmitted good multicast - * packet count */ - nveu64_t mmc_tx_multicastframe_g_h; - /** This counter provides the number of good and bad packets - * transmitted with length 64 bytes, exclusive of preamble and - * retried packets */ - nveu64_t mmc_tx_64_octets_gb; - /** This counter provides upper 32 bits of transmitted 64 octet size - * good and bad packets count */ - nveu64_t mmc_tx_64_octets_gb_h; - /** This counter provides the number of good and bad packets - * transmitted with length 65-127 bytes, exclusive of preamble and - * retried packets */ - nveu64_t mmc_tx_65_to_127_octets_gb; - /** Provides upper 32 bits of transmitted 65-to-127 octet size good and - * bad packets count */ - nveu64_t mmc_tx_65_to_127_octets_gb_h; - /** This counter provides the number of good and bad packets - * transmitted with length 128-255 bytes, exclusive of preamble and - * retried packets */ - nveu64_t mmc_tx_128_to_255_octets_gb; - /** This counter provides upper 32 bits of transmitted 128-to-255 - * octet size good and bad packets count */ - nveu64_t mmc_tx_128_to_255_octets_gb_h; - /** This counter provides the number of good and bad packets - * transmitted with length 256-511 bytes, exclusive of preamble and - * retried packets */ - nveu64_t mmc_tx_256_to_511_octets_gb; - /** This counter provides upper 32 bits of transmitted 256-to-511 - * octet size good and bad packets count. */ - nveu64_t mmc_tx_256_to_511_octets_gb_h; - /** This counter provides the number of good and bad packets - * transmitted with length 512-1023 bytes, exclusive of preamble and - * retried packets */ - nveu64_t mmc_tx_512_to_1023_octets_gb; - /** This counter provides upper 32 bits of transmitted 512-to-1023 - * octet size good and bad packets count.*/ - nveu64_t mmc_tx_512_to_1023_octets_gb_h; - /** This counter provides the number of good and bad packets - * transmitted with length 1024-max bytes, exclusive of preamble and - * retried packets */ - nveu64_t mmc_tx_1024_to_max_octets_gb; - /** This counter provides upper 32 bits of transmitted 1024-tomaxsize - * octet size good and bad packets count. */ - nveu64_t mmc_tx_1024_to_max_octets_gb_h; - /** This counter provides the number of good and bad unicast packets */ - nveu64_t mmc_tx_unicast_gb; - /** This counter provides upper 32 bits of transmitted good bad - * unicast packets count */ - nveu64_t mmc_tx_unicast_gb_h; - /** This counter provides the number of good and bad - * multicast packets */ - nveu64_t mmc_tx_multicast_gb; - /** This counter provides upper 32 bits of transmitted good bad - * multicast packets count */ - nveu64_t mmc_tx_multicast_gb_h; - /** This counter provides the number of good and bad - * broadcast packets */ - nveu64_t mmc_tx_broadcast_gb; - /** This counter provides upper 32 bits of transmitted good bad - * broadcast packets count */ - nveu64_t mmc_tx_broadcast_gb_h; - /** This counter provides the number of abort packets due to - * underflow error */ - nveu64_t mmc_tx_underflow_error; - /** This counter provides upper 32 bits of abort packets due to - * underflow error */ - nveu64_t mmc_tx_underflow_error_h; - /** This counter provides the number of successfully transmitted - * packets after a single collision in the half-duplex mode */ - nveu64_t mmc_tx_singlecol_g; - /** This counter provides the number of successfully transmitted - * packets after a multi collision in the half-duplex mode */ - nveu64_t mmc_tx_multicol_g; - /** This counter provides the number of successfully transmitted - * after a deferral in the half-duplex mode */ - nveu64_t mmc_tx_deferred; - /** This counter provides the number of packets aborted because of - * late collision error */ - nveu64_t mmc_tx_latecol; - /** This counter provides the number of packets aborted because of - * excessive (16) collision errors */ - nveu64_t mmc_tx_exesscol; - /** This counter provides the number of packets aborted because of - * carrier sense error (no carrier or loss of carrier) */ - nveu64_t mmc_tx_carrier_error; - /** This counter provides the number of bytes transmitted, - * exclusive of preamble, only in good packets */ - nveu64_t mmc_tx_octetcount_g; - /** This counter provides upper 32 bytes of bytes transmitted, - * exclusive of preamble, only in good packets */ - nveu64_t mmc_tx_octetcount_g_h; - /** This counter provides the number of good packets transmitted */ - nveu64_t mmc_tx_framecount_g; - /** This counter provides upper 32 bytes of good packets transmitted */ - nveu64_t mmc_tx_framecount_g_h; - /** This counter provides the number of packets aborted because of - * excessive deferral error - * (deferred for more than two max-sized packet times) */ - nveu64_t mmc_tx_excessdef; - /** This counter provides the number of good Pause - * packets transmitted */ - nveu64_t mmc_tx_pause_frame; - /** This counter provides upper 32 bytes of good Pause - * packets transmitted */ - nveu64_t mmc_tx_pause_frame_h; - /** This counter provides the number of good VLAN packets transmitted */ - nveu64_t mmc_tx_vlan_frame_g; - /** This counter provides upper 32 bytes of good VLAN packets - * transmitted */ - nveu64_t mmc_tx_vlan_frame_g_h; - /** This counter provides the number of packets transmitted without - * errors and with length greater than the maxsize (1,518 or 1,522 bytes - * for VLAN tagged packets; 2000 bytes */ - nveu64_t mmc_tx_osize_frame_g; - /** This counter provides the number of good and bad packets received */ - nveu64_t mmc_rx_framecount_gb; - /** This counter provides upper 32 bytes of good and bad packets - * received */ - nveu64_t mmc_rx_framecount_gb_h; - /** This counter provides the number of bytes received by DWC_ther_qos, - * exclusive of preamble, in good and bad packets */ - nveu64_t mmc_rx_octetcount_gb; - /** This counter provides upper 32 bytes of bytes received by - * DWC_ether_qos, exclusive of preamble, in good and bad packets */ - nveu64_t mmc_rx_octetcount_gb_h; - /** This counter provides the number of bytes received by DWC_ether_qos, - * exclusive of preamble, in good and bad packets */ - nveu64_t mmc_rx_octetcount_g; - /** This counter provides upper 32 bytes of bytes received by - * DWC_ether_qos, exclusive of preamble, in good and bad packets */ - nveu64_t mmc_rx_octetcount_g_h; - /** This counter provides the number of good - * broadcast packets received */ - nveu64_t mmc_rx_broadcastframe_g; - /** This counter provides upper 32 bytes of good - * broadcast packets received */ - nveu64_t mmc_rx_broadcastframe_g_h; - /** This counter provides the number of good - * multicast packets received */ - nveu64_t mmc_rx_multicastframe_g; - /** This counter provides upper 32 bytes of good - * multicast packets received */ - nveu64_t mmc_rx_multicastframe_g_h; - /** This counter provides the number of packets - * received with CRC error */ - nveu64_t mmc_rx_crc_error; - /** This counter provides upper 32 bytes of packets - * received with CRC error */ - nveu64_t mmc_rx_crc_error_h; - /** This counter provides the number of packets received with - * alignment (dribble) error. It is valid only in 10/100 mode */ - nveu64_t mmc_rx_align_error; - /** This counter provides the number of packets received with - * runt (length less than 64 bytes and CRC error) error */ - nveu64_t mmc_rx_runt_error; - /** This counter provides the number of giant packets received with - * length (including CRC) greater than 1,518 bytes (1,522 bytes for - * VLAN tagged) and with CRC error */ - nveu64_t mmc_rx_jabber_error; - /** This counter provides the number of packets received with length - * less than 64 bytes, without any errors */ - nveu64_t mmc_rx_undersize_g; - /** This counter provides the number of packets received without error, - * with length greater than the maxsize */ - nveu64_t mmc_rx_oversize_g; - /** This counter provides the number of good and bad packets received - * with length 64 bytes, exclusive of the preamble */ - nveu64_t mmc_rx_64_octets_gb; - /** This counter provides upper 32 bytes of good and bad packets - * received with length 64 bytes, exclusive of the preamble */ - nveu64_t mmc_rx_64_octets_gb_h; - /** This counter provides the number of good and bad packets received - * with length 65-127 bytes, exclusive of the preamble */ - nveu64_t mmc_rx_65_to_127_octets_gb; - /** This counter provides upper 32 bytes of good and bad packets - * received with length 65-127 bytes, exclusive of the preamble */ - nveu64_t mmc_rx_65_to_127_octets_gb_h; - /** This counter provides the number of good and bad packets received - * with length 128-255 bytes, exclusive of the preamble */ - nveu64_t mmc_rx_128_to_255_octets_gb; - /** This counter provides upper 32 bytes of good and bad packets - * received with length 128-255 bytes, exclusive of the preamble */ - nveu64_t mmc_rx_128_to_255_octets_gb_h; - /** This counter provides the number of good and bad packets received - * with length 256-511 bytes, exclusive of the preamble */ - nveu64_t mmc_rx_256_to_511_octets_gb; - /** This counter provides upper 32 bytes of good and bad packets - * received with length 256-511 bytes, exclusive of the preamble */ - nveu64_t mmc_rx_256_to_511_octets_gb_h; - /** This counter provides the number of good and bad packets received - * with length 512-1023 bytes, exclusive of the preamble */ - nveu64_t mmc_rx_512_to_1023_octets_gb; - /** This counter provides upper 32 bytes of good and bad packets - * received with length 512-1023 bytes, exclusive of the preamble */ - nveu64_t mmc_rx_512_to_1023_octets_gb_h; - /** This counter provides the number of good and bad packets received - * with length 1024-maxbytes, exclusive of the preamble */ - nveu64_t mmc_rx_1024_to_max_octets_gb; - /** This counter provides upper 32 bytes of good and bad packets - * received with length 1024-maxbytes, exclusive of the preamble */ - nveu64_t mmc_rx_1024_to_max_octets_gb_h; - /** This counter provides the number of good unicast packets received */ - nveu64_t mmc_rx_unicast_g; - /** This counter provides upper 32 bytes of good unicast packets - * received */ - nveu64_t mmc_rx_unicast_g_h; - /** This counter provides the number of packets received with length - * error (Length Type field not equal to packet size), for all packets - * with valid length field */ - nveu64_t mmc_rx_length_error; - /** This counter provides upper 32 bytes of packets received with - * length error (Length Type field not equal to packet size), for all - * packets with valid length field */ - nveu64_t mmc_rx_length_error_h; - /** This counter provides the number of packets received with length - * field not equal to the valid packet size (greater than 1,500 but - * less than 1,536) */ - nveu64_t mmc_rx_outofrangetype; - /** This counter provides upper 32 bytes of packets received with - * length field not equal to the valid packet size (greater than 1,500 - * but less than 1,536) */ - nveu64_t mmc_rx_outofrangetype_h; - /** This counter provides the number of good and valid Pause packets - * received */ - nveu64_t mmc_rx_pause_frames; - /** This counter provides upper 32 bytes of good and valid Pause packets - * received */ - nveu64_t mmc_rx_pause_frames_h; - /** This counter provides the number of missed received packets - * because of FIFO overflow in DWC_ether_qos */ - nveu64_t mmc_rx_fifo_overflow; - /** This counter provides upper 32 bytes of missed received packets - * because of FIFO overflow in DWC_ether_qos */ - nveu64_t mmc_rx_fifo_overflow_h; - /** This counter provides the number of good and bad VLAN packets - * received */ - nveu64_t mmc_rx_vlan_frames_gb; - /** This counter provides upper 32 bytes of good and bad VLAN packets - * received */ - nveu64_t mmc_rx_vlan_frames_gb_h; - /** This counter provides the number of packets received with error - * because of watchdog timeout error */ - nveu64_t mmc_rx_watchdog_error; - /** This counter provides the number of packets received with Receive - * error or Packet Extension error on the GMII or MII interface */ - nveu64_t mmc_rx_receive_error; - /** This counter provides the number of packets received with Receive - * error or Packet Extension error on the GMII or MII interface */ - nveu64_t mmc_rx_ctrl_frames_g; - /** This counter provides the number of microseconds Tx LPI is asserted - * in the MAC controller */ - nveu64_t mmc_tx_lpi_usec_cntr; - /** This counter provides the number of times MAC controller has - * entered Tx LPI. */ - nveu64_t mmc_tx_lpi_tran_cntr; - /** This counter provides the number of microseconds Rx LPI is asserted - * in the MAC controller */ - nveu64_t mmc_rx_lpi_usec_cntr; - /** This counter provides the number of times MAC controller has - * entered Rx LPI.*/ - nveu64_t mmc_rx_lpi_tran_cntr; - /** This counter provides the number of good IPv4 datagrams received - * with the TCP, UDP, or ICMP payload */ - nveu64_t mmc_rx_ipv4_gd; - /** This counter provides upper 32 bytes of good IPv4 datagrams received - * with the TCP, UDP, or ICMP payload */ - nveu64_t mmc_rx_ipv4_gd_h; - /** RxIPv4 Header Error Packets */ - nveu64_t mmc_rx_ipv4_hderr; - /** RxIPv4 of upper 32 bytes of Header Error Packets */ - nveu64_t mmc_rx_ipv4_hderr_h; - /** This counter provides the number of IPv4 datagram packets received - * that did not have a TCP, UDP, or ICMP payload */ - nveu64_t mmc_rx_ipv4_nopay; - /** This counter provides upper 32 bytes of IPv4 datagram packets - * received that did not have a TCP, UDP, or ICMP payload */ - nveu64_t mmc_rx_ipv4_nopay_h; - /** This counter provides the number of good IPv4 datagrams received - * with fragmentation */ - nveu64_t mmc_rx_ipv4_frag; - /** This counter provides upper 32 bytes of good IPv4 datagrams received - * with fragmentation */ - nveu64_t mmc_rx_ipv4_frag_h; - /** This counter provides the number of good IPv4 datagrams received - * that had a UDP payload with checksum disabled */ - nveu64_t mmc_rx_ipv4_udsbl; - /** This counter provides upper 32 bytes of good IPv4 datagrams received - * that had a UDP payload with checksum disabled */ - nveu64_t mmc_rx_ipv4_udsbl_h; - /** This counter provides the number of good IPv6 datagrams received - * with the TCP, UDP, or ICMP payload */ - nveu64_t mmc_rx_ipv6_gd_octets; - /** This counter provides upper 32 bytes of good IPv6 datagrams received - * with the TCP, UDP, or ICMP payload */ - nveu64_t mmc_rx_ipv6_gd_octets_h; - /** This counter provides the number of IPv6 datagrams received - * with header (length or version mismatch) errors */ - nveu64_t mmc_rx_ipv6_hderr_octets; - /** This counter provides the number of IPv6 datagrams received - * with header (length or version mismatch) errors */ - nveu64_t mmc_rx_ipv6_hderr_octets_h; - /** This counter provides the number of IPv6 datagram packets received - * that did not have a TCP, UDP, or ICMP payload */ - nveu64_t mmc_rx_ipv6_nopay_octets; - /** This counter provides upper 32 bytes of IPv6 datagram packets - * received that did not have a TCP, UDP, or ICMP payload */ - nveu64_t mmc_rx_ipv6_nopay_octets_h; - /* Protocols */ - /** This counter provides the number of good IP datagrams received by - * DWC_ether_qos with a good UDP payload */ - nveu64_t mmc_rx_udp_gd; - /** This counter provides upper 32 bytes of good IP datagrams received - * by DWC_ether_qos with a good UDP payload */ - nveu64_t mmc_rx_udp_gd_h; - /** This counter provides the number of good IP datagrams received by - * DWC_ether_qos with a good UDP payload. This counter is not updated - * when the RxIPv4_UDP_Checksum_Disabled_Packets counter is - * incremented */ - nveu64_t mmc_rx_udp_err; - /** This counter provides upper 32 bytes of good IP datagrams received - * by DWC_ether_qos with a good UDP payload. This counter is not updated - * when the RxIPv4_UDP_Checksum_Disabled_Packets counter is - * incremented */ - nveu64_t mmc_rx_udp_err_h; - /** This counter provides the number of good IP datagrams received - * with a good TCP payload */ - nveu64_t mmc_rx_tcp_gd; - /** This counter provides the number of good IP datagrams received - * with a good TCP payload */ - nveu64_t mmc_rx_tcp_gd_h; - /** This counter provides upper 32 bytes of good IP datagrams received - * with a good TCP payload */ - nveu64_t mmc_rx_tcp_err; - /** This counter provides upper 32 bytes of good IP datagrams received - * with a good TCP payload */ - nveu64_t mmc_rx_tcp_err_h; - /** This counter provides the number of good IP datagrams received - * with a good ICMP payload */ - nveu64_t mmc_rx_icmp_gd; - /** This counter provides upper 32 bytes of good IP datagrams received - * with a good ICMP payload */ - nveu64_t mmc_rx_icmp_gd_h; - /** This counter provides the number of good IP datagrams received - * whose ICMP payload has a checksum error */ - nveu64_t mmc_rx_icmp_err; - /** This counter provides upper 32 bytes of good IP datagrams received - * whose ICMP payload has a checksum error */ - nveu64_t mmc_rx_icmp_err_h; - /** This counter provides the number of bytes received by DWC_ether_qos - * in good IPv4 datagrams encapsulating TCP, UDP, or ICMP data. - * (Ethernet header, FCS, pad, or IP pad bytes are not included - * in this counter */ - nveu64_t mmc_rx_ipv4_gd_octets; - /** This counter provides upper 32 bytes received by DWC_ether_qos - * in good IPv4 datagrams encapsulating TCP, UDP, or ICMP data. - * (Ethernet header, FCS, pad, or IP pad bytes are not included - * in this counter */ - nveu64_t mmc_rx_ipv4_gd_octets_h; - /** This counter provides the number of bytes received in IPv4 datagram - * with header errors (checksum, length, version mismatch). The value - * in the Length field of IPv4 header is used to update this counter. - * (Ethernet header, FCS, pad, or IP pad bytes are not included - * in this counter */ - nveu64_t mmc_rx_ipv4_hderr_octets; - /** This counter provides upper 32 bytes received in IPv4 datagram - * with header errors (checksum, length, version mismatch). The value - * in the Length field of IPv4 header is used to update this counter. - * (Ethernet header, FCS, pad, or IP pad bytes are not included - * in this counter */ - nveu64_t mmc_rx_ipv4_hderr_octets_h; - /** This counter provides the number of bytes received in IPv4 datagram - * that did not have a TCP, UDP, or ICMP payload. The value in the - * Length field of IPv4 header is used to update this counter. - * (Ethernet header, FCS, pad, or IP pad bytes are not included - * in this counter */ - nveu64_t mmc_rx_ipv4_nopay_octets; - /** This counter provides upper 32 bytes received in IPv4 datagram - * that did not have a TCP, UDP, or ICMP payload. The value in the - * Length field of IPv4 header is used to update this counter. - * (Ethernet header, FCS, pad, or IP pad bytes are not included - * in this counter */ - nveu64_t mmc_rx_ipv4_nopay_octets_h; - /** This counter provides the number of bytes received in fragmented - * IPv4 datagrams. The value in the Length field of IPv4 header is - * used to update this counter. (Ethernet header, FCS, pad, or IP pad - * bytes are not included in this counter */ - nveu64_t mmc_rx_ipv4_frag_octets; - /** This counter provides upper 32 bytes received in fragmented - * IPv4 datagrams. The value in the Length field of IPv4 header is - * used to update this counter. (Ethernet header, FCS, pad, or IP pad - * bytes are not included in this counter */ - nveu64_t mmc_rx_ipv4_frag_octets_h; - /** This counter provides the number of bytes received in a UDP segment - * that had the UDP checksum disabled. This counter does not count IP - * Header bytes. (Ethernet header, FCS, pad, or IP pad bytes are not - * included in this counter */ - nveu64_t mmc_rx_ipv4_udsbl_octets; - /** This counter provides upper 32 bytes received in a UDP segment - * that had the UDP checksum disabled. This counter does not count IP - * Header bytes. (Ethernet header, FCS, pad, or IP pad bytes are not - * included in this counter */ - nveu64_t mmc_rx_ipv4_udsbl_octets_h; - /** This counter provides the number of bytes received in good IPv6 - * datagrams encapsulating TCP, UDP, or ICMP data. (Ethernet header, - * FCS, pad, or IP pad bytes are not included in this counter */ - nveu64_t mmc_rx_ipv6_gd; - /** This counter provides upper 32 bytes received in good IPv6 - * datagrams encapsulating TCP, UDP, or ICMP data. (Ethernet header, - * FCS, pad, or IP pad bytes are not included in this counter */ - nveu64_t mmc_rx_ipv6_gd_h; - /** This counter provides the number of bytes received in IPv6 datagrams - * with header errors (length, version mismatch). The value in the - * Length field of IPv6 header is used to update this counter. - * (Ethernet header, FCS, pad, or IP pad bytes are not included in - * this counter */ - nveu64_t mmc_rx_ipv6_hderr; - /** This counter provides upper 32 bytes received in IPv6 datagrams - * with header errors (length, version mismatch). The value in the - * Length field of IPv6 header is used to update this counter. - * (Ethernet header, FCS, pad, or IP pad bytes are not included in - * this counter */ - nveu64_t mmc_rx_ipv6_hderr_h; - /** This counter provides the number of bytes received in IPv6 - * datagrams that did not have a TCP, UDP, or ICMP payload. The value - * in the Length field of IPv6 header is used to update this counter. - * (Ethernet header, FCS, pad, or IP pad bytes are not included - * in this counter */ - nveu64_t mmc_rx_ipv6_nopay; - /** This counter provides upper 32 bytes received in IPv6 - * datagrams that did not have a TCP, UDP, or ICMP payload. The value - * in the Length field of IPv6 header is used to update this counter. - * (Ethernet header, FCS, pad, or IP pad bytes are not included - * in this counter */ - nveu64_t mmc_rx_ipv6_nopay_h; - /* Protocols */ - /** This counter provides the number of bytes received in a good UDP - * segment. This counter does not count IP header bytes */ - nveu64_t mmc_rx_udp_gd_octets; - /** This counter provides upper 32 bytes received in a good UDP - * segment. This counter does not count IP header bytes */ - nveu64_t mmc_rx_udp_gd_octets_h; - /** This counter provides the number of bytes received in a UDP - * segment that had checksum errors. This counter does not count - * IP header bytes */ - nveu64_t mmc_rx_udp_err_octets; - /** This counter provides upper 32 bytes received in a UDP - * segment that had checksum errors. This counter does not count - * IP header bytes */ - nveu64_t mmc_rx_udp_err_octets_h; - /** This counter provides the number of bytes received in a good - * TCP segment. This counter does not count IP header bytes */ - nveu64_t mmc_rx_tcp_gd_octets; - /** This counter provides upper 32 bytes received in a good - * TCP segment. This counter does not count IP header bytes */ - nveu64_t mmc_rx_tcp_gd_octets_h; - /** This counter provides the number of bytes received in a TCP - * segment that had checksum errors. This counter does not count - * IP header bytes */ - nveu64_t mmc_rx_tcp_err_octets; - /** This counter provides upper 32 bytes received in a TCP - * segment that had checksum errors. This counter does not count - * IP header bytes */ - nveu64_t mmc_rx_tcp_err_octets_h; - /** This counter provides the number of bytes received in a good - * ICMP segment. This counter does not count IP header bytes */ - nveu64_t mmc_rx_icmp_gd_octets; - /** This counter provides upper 32 bytes received in a good - * ICMP segment. This counter does not count IP header bytes */ - nveu64_t mmc_rx_icmp_gd_octets_h; - /** This counter provides the number of bytes received in a ICMP - * segment that had checksum errors. This counter does not count - * IP header bytes */ - nveu64_t mmc_rx_icmp_err_octets; - /** This counter provides upper 32 bytes received in a ICMP - * segment that had checksum errors. This counter does not count - * IP header bytes */ - nveu64_t mmc_rx_icmp_err_octets_h; - /** This counter provides the number of additional mPackets - * transmitted due to preemption */ - unsigned long mmc_tx_fpe_frag_cnt; - /** This counter provides the count of number of times a hold - * request is given to MAC */ - unsigned long mmc_tx_fpe_hold_req_cnt; - /** This counter provides the number of MAC frames with reassembly - * errors on the Receiver, due to mismatch in the fragment - * count value */ - unsigned long mmc_rx_packet_reass_err_cnt; - /** This counter the number of received MAC frames rejected - * due to unknown SMD value and MAC frame fragments rejected due - * to arriving with an SMD-C when there was no preceding preempted - * frame */ - unsigned long mmc_rx_packet_smd_err_cnt; - /** This counter provides the number of MAC frames that were - * successfully reassembled and delivered to MAC */ - unsigned long mmc_rx_packet_asm_ok_cnt; - /** This counter provides the number of additional mPackets received - * due to preemption */ - unsigned long mmc_rx_fpe_fragment_cnt; -}; - -/** - * @brief osi_xtra_stat_counters - OSI core extra stat counters - */ -struct osi_xtra_stat_counters { - /** RX buffer unavailable irq count */ - nveu64_t rx_buf_unavail_irq_n[OSI_MGBE_MAX_NUM_QUEUES]; - /** Transmit Process Stopped irq count */ - nveu64_t tx_proc_stopped_irq_n[OSI_MGBE_MAX_NUM_QUEUES]; - /** Transmit Buffer Unavailable irq count */ - nveu64_t tx_buf_unavail_irq_n[OSI_MGBE_MAX_NUM_QUEUES]; - /** Receive Process Stopped irq count */ - nveu64_t rx_proc_stopped_irq_n[OSI_MGBE_MAX_NUM_QUEUES]; - /** Receive Watchdog Timeout irq count */ - nveu64_t rx_watchdog_irq_n; - /** Fatal Bus Error irq count */ - nveu64_t fatal_bus_error_irq_n; - /** rx skb allocation failure count */ - nveu64_t re_alloc_rxbuf_failed[OSI_MGBE_MAX_NUM_QUEUES]; - /** TX per channel interrupt count */ - nveu64_t tx_normal_irq_n[OSI_MGBE_MAX_NUM_QUEUES]; - /** TX per channel SW timer callback count */ - nveu64_t tx_usecs_swtimer_n[OSI_MGBE_MAX_NUM_QUEUES]; - /** RX per channel interrupt count */ - nveu64_t rx_normal_irq_n[OSI_MGBE_MAX_NUM_QUEUES]; - /** link connect count */ - nveu64_t link_connect_count; - /** link disconnect count */ - nveu64_t link_disconnect_count; - /** lock fail count node addition */ - nveu64_t ts_lock_add_fail; - /** lock fail count node removal */ - nveu64_t ts_lock_del_fail; -}; - #ifdef MACSEC_SUPPORT /** * @brief The structure hold macsec statistics counters diff --git a/osi/common/type.h b/include/nvethernet_type.h similarity index 92% rename from osi/common/type.h rename to include/nvethernet_type.h index d2ed7c7..b80e8fc 100644 --- a/osi/common/type.h +++ b/include/nvethernet_type.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -37,8 +37,6 @@ typedef unsigned int my_uint32_t; typedef int my_int32_t; /** intermediate type for unsigned short */ typedef unsigned short my_uint16_t; -/** intermediate type for short */ -typedef short my_int16_t; /** intermediate type for char */ typedef char my_int8_t; /** intermediate type for unsigned char */ @@ -55,8 +53,6 @@ typedef my_uint32_t nveu32_t; typedef my_int32_t nve32_t; /** typedef equivalent to unsigned short */ typedef my_uint16_t nveu16_t; -/** typedef equivalent to short */ -typedef my_int16_t nve16_t; /** typedef equivalent to char */ typedef my_int8_t nve8_t; /** typedef equivalent to unsigned char */ @@ -68,3 +64,4 @@ typedef my_uint64_t nveu64_t; /** @} */ #endif /* INCLUDED_TYPE_H */ + diff --git a/include/nvethernetrm_export.h b/include/nvethernetrm_export.h new file mode 100644 index 0000000..f8d1d84 --- /dev/null +++ b/include/nvethernetrm_export.h @@ -0,0 +1,775 @@ +/* + * Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef INCLUDED_NVETHERNETRM_EXPORT_H +#define INCLUDED_NVETHERNETRM_EXPORT_H + +#include + +/** + * @addtogroup Helper MACROS + * + * @brief EQOS generic helper MACROS. + * @{ + */ +#define OSI_GCL_SIZE_256 256U +#define OSI_MAX_TC_NUM 8U +/* Ethernet Address length */ +#define OSI_ETH_ALEN 6U +/** @} */ + +/** + * @addtogroup Flexible Receive Parser related information + * + * @brief Flexible Receive Parser commands, table size and other defines + * @{ + */ +/* Match data defines */ +#define OSI_FRP_MATCH_DATA_MAX 12U +/** @} */ + +/** + * @addtogroup MTL queue operation mode + * + * @brief MTL queue operation mode options + * @{ + */ +#define OSI_MTL_QUEUE_AVB 0x1U +#define OSI_MTL_QUEUE_ENABLE 0x2U +#define OSI_MTL_QUEUE_MODEMAX 0x3U +#ifndef OSI_STRIPPED_LIB +#define OSI_MTL_MAX_NUM_QUEUES 10U +#endif +/** @} */ + +/** + * @addtogroup EQOS_MTL MTL queue AVB algorithm mode + * + * @brief MTL AVB queue algorithm type + * @{ + */ +#define OSI_MTL_TXQ_AVALG_CBS 1U +#define OSI_MTL_TXQ_AVALG_SP 0U +/** @} */ + +#ifndef OSI_STRIPPED_LIB +/** + * @addtogroup Helper MACROS + * + * @brief EQOS generic helper MACROS. + * @{ + */ +/* L2 DA filter mode(enable/disable) */ +#define OSI_OPER_EN_L2_DA_INV OSI_BIT(4) +#define OSI_OPER_DIS_L2_DA_INV OSI_BIT(5) +#endif /* !OSI_STRIPPED_LIB */ + +/* Ethernet Address length */ +#define OSI_ETH_ALEN 6U +#define OSI_MAX_TC_NUM 8U +/** @} */ + +#pragma pack(push, 1) +/** + * @brief FRP command structure for OSD to OSI + */ +struct osi_core_frp_cmd { + /** FRP Command type */ + nveu32_t cmd; + /** OSD FRP ID */ + nve32_t frp_id; + /** OSD match data type */ + nveu8_t match_type; + /** OSD match data */ + nveu8_t match[OSI_FRP_MATCH_DATA_MAX]; + /** OSD match data length */ + nveu8_t match_length; + /** OSD Offset */ + nveu8_t offset; + /** OSD FRP filter mode flag */ + nveu8_t filter_mode; + /** OSD FRP Link ID */ + nve32_t next_frp_id; + /** OSD DMA Channel Selection + * Bit selection of DMA channels to route the frame + * Bit[0] - DMA channel 0 + * .. + * Bit [N] - DMA channel N] */ + nveu32_t dma_sel; +}; + +/** + * @brief OSI Core avb data structure per queue. + */ +struct osi_core_avb_algorithm { + /** TX Queue/TC index */ + nveu32_t qindex; + /** CBS Algorithm enable(1) or disable(0) */ + nveu32_t algo; + /** When this bit is set, the accumulated credit parameter in the + * credit-based shaper algorithm logic is not reset to zero when + * there is positive credit and no packet to transmit in the channel. + * + * Expected values are enable(1) or disable(0) */ + nveu32_t credit_control; + /** idleSlopeCredit value required for CBS + * Max value for EQOS - 0x000FFFFFU + * Max value for MGBE - 0x001FFFFFU */ + nveu32_t idle_slope; + /** sendSlopeCredit value required for CBS + * Max value for EQOS - 0x0000FFFFU + * Max value for MGBE - 0x00003FFFU */ + nveu32_t send_slope; + /** hiCredit value required for CBS + * Max value - 0x1FFFFFFFU */ + nveu32_t hi_credit; + /** lowCredit value required for CBS + * Max value - 0x1FFFFFFFU */ + nveu32_t low_credit; + /** Transmit queue operating mode + * + * 00: disable + * + * 01: avb + * + * 10: enable */ + nveu32_t oper_mode; + /** TC index + * value 0 to 7 represent 8 TC */ + nveu32_t tcindex; +}; + +/** + * @brief OSI Core EST structure + */ +struct osi_est_config { + /** enable/disable */ + nveu32_t en_dis; + /** 64 bit base time register + * if both values are 0, take ptp time to avoid BTRE + * index 0 for nsec, index 1 for sec + */ + nveu32_t btr[2]; + /** 64 bit base time offset index 0 for nsec, index 1 for sec + * 32 bits for Seconds, 32 bits for nanoseconds (max 10^9) */ + nveu32_t btr_offset[2]; + /** 40 bits cycle time register, index 0 for nsec, index 1 for sec + * 8 bits for Seconds, 32 bits for nanoseconds (max 10^9) */ + nveu32_t ctr[2]; + /** Configured Time Interval width(24 bits) + 7 bits + * extension register */ + nveu32_t ter; + /** size of the gate control list Max 256 entries + * valid value range (1-255)*/ + nveu32_t llr; + /** data array 8 bit gate op + 24 execution time + * MGBE HW support GCL depth 256 */ + nveu32_t gcl[OSI_GCL_SIZE_256]; +}; + +/** + * @brief OSI Core FPE structure + */ +struct osi_fpe_config { + /** Queue Mask 1 - preemption 0 - express + * bit representation*/ + nveu32_t tx_queue_preemption_enable; + /** RQ for all preemptable packets which are not filtered + * based on user priority or SA-DA + * Value range for EQOS 1-7 + * Value range for MGBE 1-9 */ + nveu32_t rq; +}; + +/** + * @brief OSI Core error stats structure + */ +struct osi_stats { + /** Constant Gate Control Error */ + nveu64_t const_gate_ctr_err; + /** Head-Of-Line Blocking due to Scheduling */ + nveu64_t head_of_line_blk_sch; + /** Per TC Schedule Error */ + nveu64_t hlbs_q[OSI_MAX_TC_NUM]; + /** Head-Of-Line Blocking due to Frame Size */ + nveu64_t head_of_line_blk_frm; + /** Per TC Frame Size Error */ + nveu64_t hlbf_q[OSI_MAX_TC_NUM]; + /** BTR Error */ + nveu64_t base_time_reg_err; + /** Switch to Software Owned List Complete */ + nveu64_t sw_own_list_complete; +#ifndef OSI_STRIPPED_LIB + /** IP Header Error */ + nveu64_t mgbe_ip_header_err; + /** Jabber time out Error */ + nveu64_t mgbe_jabber_timeout_err; + /** Payload Checksum Error */ + nveu64_t mgbe_payload_cs_err; + /** Under Flow Error */ + nveu64_t mgbe_tx_underflow_err; + /** RX buffer unavailable irq count */ + nveu64_t rx_buf_unavail_irq_n[OSI_MTL_MAX_NUM_QUEUES]; + /** Transmit Process Stopped irq count */ + nveu64_t tx_proc_stopped_irq_n[OSI_MTL_MAX_NUM_QUEUES]; + /** Transmit Buffer Unavailable irq count */ + nveu64_t tx_buf_unavail_irq_n[OSI_MTL_MAX_NUM_QUEUES]; + /** Receive Process Stopped irq count */ + nveu64_t rx_proc_stopped_irq_n[OSI_MTL_MAX_NUM_QUEUES]; + /** Receive Watchdog Timeout irq count */ + nveu64_t rx_watchdog_irq_n; + /** Fatal Bus Error irq count */ + nveu64_t fatal_bus_error_irq_n; + /** lock fail count node addition */ + nveu64_t ts_lock_add_fail; + /** lock fail count node removal */ + nveu64_t ts_lock_del_fail; +#endif +}; + +/** + * @brief osi_mmc_counters - The structure to hold RMON counter values + */ +struct osi_mmc_counters { + /** This counter provides the number of bytes transmitted, exclusive of + * preamble and retried bytes, in good and bad packets */ + nveu64_t mmc_tx_octetcount_gb; + /** This counter provides upper 32 bits of transmitted octet count */ + nveu64_t mmc_tx_octetcount_gb_h; + /** This counter provides the number of good and + * bad packets transmitted, exclusive of retried packets */ + nveu64_t mmc_tx_framecount_gb; + /** This counter provides upper 32 bits of transmitted good and bad + * packets count */ + nveu64_t mmc_tx_framecount_gb_h; + /** This counter provides number of good broadcast + * packets transmitted */ + nveu64_t mmc_tx_broadcastframe_g; + /** This counter provides upper 32 bits of transmitted good broadcast + * packets count */ + nveu64_t mmc_tx_broadcastframe_g_h; + /** This counter provides number of good multicast + * packets transmitted */ + nveu64_t mmc_tx_multicastframe_g; + /** This counter provides upper 32 bits of transmitted good multicast + * packet count */ + nveu64_t mmc_tx_multicastframe_g_h; + /** This counter provides the number of good and bad packets + * transmitted with length 64 bytes, exclusive of preamble and + * retried packets */ + nveu64_t mmc_tx_64_octets_gb; + /** This counter provides upper 32 bits of transmitted 64 octet size + * good and bad packets count */ + nveu64_t mmc_tx_64_octets_gb_h; + /** This counter provides the number of good and bad packets + * transmitted with length 65-127 bytes, exclusive of preamble and + * retried packets */ + nveu64_t mmc_tx_65_to_127_octets_gb; + /** Provides upper 32 bits of transmitted 65-to-127 octet size good and + * bad packets count */ + nveu64_t mmc_tx_65_to_127_octets_gb_h; + /** This counter provides the number of good and bad packets + * transmitted with length 128-255 bytes, exclusive of preamble and + * retried packets */ + nveu64_t mmc_tx_128_to_255_octets_gb; + /** This counter provides upper 32 bits of transmitted 128-to-255 + * octet size good and bad packets count */ + nveu64_t mmc_tx_128_to_255_octets_gb_h; + /** This counter provides the number of good and bad packets + * transmitted with length 256-511 bytes, exclusive of preamble and + * retried packets */ + nveu64_t mmc_tx_256_to_511_octets_gb; + /** This counter provides upper 32 bits of transmitted 256-to-511 + * octet size good and bad packets count. */ + nveu64_t mmc_tx_256_to_511_octets_gb_h; + /** This counter provides the number of good and bad packets + * transmitted with length 512-1023 bytes, exclusive of preamble and + * retried packets */ + nveu64_t mmc_tx_512_to_1023_octets_gb; + /** This counter provides upper 32 bits of transmitted 512-to-1023 + * octet size good and bad packets count.*/ + nveu64_t mmc_tx_512_to_1023_octets_gb_h; + /** This counter provides the number of good and bad packets + * transmitted with length 1024-max bytes, exclusive of preamble and + * retried packets */ + nveu64_t mmc_tx_1024_to_max_octets_gb; + /** This counter provides upper 32 bits of transmitted 1024-tomaxsize + * octet size good and bad packets count. */ + nveu64_t mmc_tx_1024_to_max_octets_gb_h; + /** This counter provides the number of good and bad unicast packets */ + nveu64_t mmc_tx_unicast_gb; + /** This counter provides upper 32 bits of transmitted good bad + * unicast packets count */ + nveu64_t mmc_tx_unicast_gb_h; + /** This counter provides the number of good and bad + * multicast packets */ + nveu64_t mmc_tx_multicast_gb; + /** This counter provides upper 32 bits of transmitted good bad + * multicast packets count */ + nveu64_t mmc_tx_multicast_gb_h; + /** This counter provides the number of good and bad + * broadcast packets */ + nveu64_t mmc_tx_broadcast_gb; + /** This counter provides upper 32 bits of transmitted good bad + * broadcast packets count */ + nveu64_t mmc_tx_broadcast_gb_h; + /** This counter provides the number of abort packets due to + * underflow error */ + nveu64_t mmc_tx_underflow_error; + /** This counter provides upper 32 bits of abort packets due to + * underflow error */ + nveu64_t mmc_tx_underflow_error_h; + /** This counter provides the number of successfully transmitted + * packets after a single collision in the half-duplex mode */ + nveu64_t mmc_tx_singlecol_g; + /** This counter provides the number of successfully transmitted + * packets after a multi collision in the half-duplex mode */ + nveu64_t mmc_tx_multicol_g; + /** This counter provides the number of successfully transmitted + * after a deferral in the half-duplex mode */ + nveu64_t mmc_tx_deferred; + /** This counter provides the number of packets aborted because of + * late collision error */ + nveu64_t mmc_tx_latecol; + /** This counter provides the number of packets aborted because of + * excessive (16) collision errors */ + nveu64_t mmc_tx_exesscol; + /** This counter provides the number of packets aborted because of + * carrier sense error (no carrier or loss of carrier) */ + nveu64_t mmc_tx_carrier_error; + /** This counter provides the number of bytes transmitted, + * exclusive of preamble, only in good packets */ + nveu64_t mmc_tx_octetcount_g; + /** This counter provides upper 32 bytes of bytes transmitted, + * exclusive of preamble, only in good packets */ + nveu64_t mmc_tx_octetcount_g_h; + /** This counter provides the number of good packets transmitted */ + nveu64_t mmc_tx_framecount_g; + /** This counter provides upper 32 bytes of good packets transmitted */ + nveu64_t mmc_tx_framecount_g_h; + /** This counter provides the number of packets aborted because of + * excessive deferral error + * (deferred for more than two max-sized packet times) */ + nveu64_t mmc_tx_excessdef; + /** This counter provides the number of good Pause + * packets transmitted */ + nveu64_t mmc_tx_pause_frame; + /** This counter provides upper 32 bytes of good Pause + * packets transmitted */ + nveu64_t mmc_tx_pause_frame_h; + /** This counter provides the number of good VLAN packets transmitted */ + nveu64_t mmc_tx_vlan_frame_g; + /** This counter provides upper 32 bytes of good VLAN packets + * transmitted */ + nveu64_t mmc_tx_vlan_frame_g_h; + /** This counter provides the number of packets transmitted without + * errors and with length greater than the maxsize (1,518 or 1,522 bytes + * for VLAN tagged packets; 2000 bytes */ + nveu64_t mmc_tx_osize_frame_g; + /** This counter provides the number of good and bad packets received */ + nveu64_t mmc_rx_framecount_gb; + /** This counter provides upper 32 bytes of good and bad packets + * received */ + nveu64_t mmc_rx_framecount_gb_h; + /** This counter provides the number of bytes received by DWC_ther_qos, + * exclusive of preamble, in good and bad packets */ + nveu64_t mmc_rx_octetcount_gb; + /** This counter provides upper 32 bytes of bytes received by + * DWC_ether_qos, exclusive of preamble, in good and bad packets */ + nveu64_t mmc_rx_octetcount_gb_h; + /** This counter provides the number of bytes received by DWC_ether_qos, + * exclusive of preamble, in good and bad packets */ + nveu64_t mmc_rx_octetcount_g; + /** This counter provides upper 32 bytes of bytes received by + * DWC_ether_qos, exclusive of preamble, in good and bad packets */ + nveu64_t mmc_rx_octetcount_g_h; + /** This counter provides the number of good + * broadcast packets received */ + nveu64_t mmc_rx_broadcastframe_g; + /** This counter provides upper 32 bytes of good + * broadcast packets received */ + nveu64_t mmc_rx_broadcastframe_g_h; + /** This counter provides the number of good + * multicast packets received */ + nveu64_t mmc_rx_multicastframe_g; + /** This counter provides upper 32 bytes of good + * multicast packets received */ + nveu64_t mmc_rx_multicastframe_g_h; + /** This counter provides the number of packets + * received with CRC error */ + nveu64_t mmc_rx_crc_error; + /** This counter provides upper 32 bytes of packets + * received with CRC error */ + nveu64_t mmc_rx_crc_error_h; + /** This counter provides the number of packets received with + * alignment (dribble) error. It is valid only in 10/100 mode */ + nveu64_t mmc_rx_align_error; + /** This counter provides the number of packets received with + * runt (length less than 64 bytes and CRC error) error */ + nveu64_t mmc_rx_runt_error; + /** This counter provides the number of giant packets received with + * length (including CRC) greater than 1,518 bytes (1,522 bytes for + * VLAN tagged) and with CRC error */ + nveu64_t mmc_rx_jabber_error; + /** This counter provides the number of packets received with length + * less than 64 bytes, without any errors */ + nveu64_t mmc_rx_undersize_g; + /** This counter provides the number of packets received without error, + * with length greater than the maxsize */ + nveu64_t mmc_rx_oversize_g; + /** This counter provides the number of good and bad packets received + * with length 64 bytes, exclusive of the preamble */ + nveu64_t mmc_rx_64_octets_gb; + /** This counter provides upper 32 bytes of good and bad packets + * received with length 64 bytes, exclusive of the preamble */ + nveu64_t mmc_rx_64_octets_gb_h; + /** This counter provides the number of good and bad packets received + * with length 65-127 bytes, exclusive of the preamble */ + nveu64_t mmc_rx_65_to_127_octets_gb; + /** This counter provides upper 32 bytes of good and bad packets + * received with length 65-127 bytes, exclusive of the preamble */ + nveu64_t mmc_rx_65_to_127_octets_gb_h; + /** This counter provides the number of good and bad packets received + * with length 128-255 bytes, exclusive of the preamble */ + nveu64_t mmc_rx_128_to_255_octets_gb; + /** This counter provides upper 32 bytes of good and bad packets + * received with length 128-255 bytes, exclusive of the preamble */ + nveu64_t mmc_rx_128_to_255_octets_gb_h; + /** This counter provides the number of good and bad packets received + * with length 256-511 bytes, exclusive of the preamble */ + nveu64_t mmc_rx_256_to_511_octets_gb; + /** This counter provides upper 32 bytes of good and bad packets + * received with length 256-511 bytes, exclusive of the preamble */ + nveu64_t mmc_rx_256_to_511_octets_gb_h; + /** This counter provides the number of good and bad packets received + * with length 512-1023 bytes, exclusive of the preamble */ + nveu64_t mmc_rx_512_to_1023_octets_gb; + /** This counter provides upper 32 bytes of good and bad packets + * received with length 512-1023 bytes, exclusive of the preamble */ + nveu64_t mmc_rx_512_to_1023_octets_gb_h; + /** This counter provides the number of good and bad packets received + * with length 1024-maxbytes, exclusive of the preamble */ + nveu64_t mmc_rx_1024_to_max_octets_gb; + /** This counter provides upper 32 bytes of good and bad packets + * received with length 1024-maxbytes, exclusive of the preamble */ + nveu64_t mmc_rx_1024_to_max_octets_gb_h; + /** This counter provides the number of good unicast packets received */ + nveu64_t mmc_rx_unicast_g; + /** This counter provides upper 32 bytes of good unicast packets + * received */ + nveu64_t mmc_rx_unicast_g_h; + /** This counter provides the number of packets received with length + * error (Length Type field not equal to packet size), for all packets + * with valid length field */ + nveu64_t mmc_rx_length_error; + /** This counter provides upper 32 bytes of packets received with + * length error (Length Type field not equal to packet size), for all + * packets with valid length field */ + nveu64_t mmc_rx_length_error_h; + /** This counter provides the number of packets received with length + * field not equal to the valid packet size (greater than 1,500 but + * less than 1,536) */ + nveu64_t mmc_rx_outofrangetype; + /** This counter provides upper 32 bytes of packets received with + * length field not equal to the valid packet size (greater than 1,500 + * but less than 1,536) */ + nveu64_t mmc_rx_outofrangetype_h; + /** This counter provides the number of good and valid Pause packets + * received */ + nveu64_t mmc_rx_pause_frames; + /** This counter provides upper 32 bytes of good and valid Pause packets + * received */ + nveu64_t mmc_rx_pause_frames_h; + /** This counter provides the number of missed received packets + * because of FIFO overflow in DWC_ether_qos */ + nveu64_t mmc_rx_fifo_overflow; + /** This counter provides upper 32 bytes of missed received packets + * because of FIFO overflow in DWC_ether_qos */ + nveu64_t mmc_rx_fifo_overflow_h; + /** This counter provides the number of good and bad VLAN packets + * received */ + nveu64_t mmc_rx_vlan_frames_gb; + /** This counter provides upper 32 bytes of good and bad VLAN packets + * received */ + nveu64_t mmc_rx_vlan_frames_gb_h; + /** This counter provides the number of packets received with error + * because of watchdog timeout error */ + nveu64_t mmc_rx_watchdog_error; + /** This counter provides the number of packets received with Receive + * error or Packet Extension error on the GMII or MII interface */ + nveu64_t mmc_rx_receive_error; + /** This counter provides the number of packets received with Receive + * error or Packet Extension error on the GMII or MII interface */ + nveu64_t mmc_rx_ctrl_frames_g; + /** This counter provides the number of microseconds Tx LPI is asserted + * in the MAC controller */ + nveu64_t mmc_tx_lpi_usec_cntr; + /** This counter provides the number of times MAC controller has + * entered Tx LPI. */ + nveu64_t mmc_tx_lpi_tran_cntr; + /** This counter provides the number of microseconds Rx LPI is asserted + * in the MAC controller */ + nveu64_t mmc_rx_lpi_usec_cntr; + /** This counter provides the number of times MAC controller has + * entered Rx LPI.*/ + nveu64_t mmc_rx_lpi_tran_cntr; + /** This counter provides the number of good IPv4 datagrams received + * with the TCP, UDP, or ICMP payload */ + nveu64_t mmc_rx_ipv4_gd; + /** This counter provides upper 32 bytes of good IPv4 datagrams received + * with the TCP, UDP, or ICMP payload */ + nveu64_t mmc_rx_ipv4_gd_h; + /** RxIPv4 Header Error Packets */ + nveu64_t mmc_rx_ipv4_hderr; + /** RxIPv4 of upper 32 bytes of Header Error Packets */ + nveu64_t mmc_rx_ipv4_hderr_h; + /** This counter provides the number of IPv4 datagram packets received + * that did not have a TCP, UDP, or ICMP payload */ + nveu64_t mmc_rx_ipv4_nopay; + /** This counter provides upper 32 bytes of IPv4 datagram packets + * received that did not have a TCP, UDP, or ICMP payload */ + nveu64_t mmc_rx_ipv4_nopay_h; + /** This counter provides the number of good IPv4 datagrams received + * with fragmentation */ + nveu64_t mmc_rx_ipv4_frag; + /** This counter provides upper 32 bytes of good IPv4 datagrams received + * with fragmentation */ + nveu64_t mmc_rx_ipv4_frag_h; + /** This counter provides the number of good IPv4 datagrams received + * that had a UDP payload with checksum disabled */ + nveu64_t mmc_rx_ipv4_udsbl; + /** This counter provides upper 32 bytes of good IPv4 datagrams received + * that had a UDP payload with checksum disabled */ + nveu64_t mmc_rx_ipv4_udsbl_h; + /** This counter provides the number of good IPv6 datagrams received + * with the TCP, UDP, or ICMP payload */ + nveu64_t mmc_rx_ipv6_gd_octets; + /** This counter provides upper 32 bytes of good IPv6 datagrams received + * with the TCP, UDP, or ICMP payload */ + nveu64_t mmc_rx_ipv6_gd_octets_h; + /** This counter provides the number of IPv6 datagrams received + * with header (length or version mismatch) errors */ + nveu64_t mmc_rx_ipv6_hderr_octets; + /** This counter provides the number of IPv6 datagrams received + * with header (length or version mismatch) errors */ + nveu64_t mmc_rx_ipv6_hderr_octets_h; + /** This counter provides the number of IPv6 datagram packets received + * that did not have a TCP, UDP, or ICMP payload */ + nveu64_t mmc_rx_ipv6_nopay_octets; + /** This counter provides upper 32 bytes of IPv6 datagram packets + * received that did not have a TCP, UDP, or ICMP payload */ + nveu64_t mmc_rx_ipv6_nopay_octets_h; + /* Protocols */ + /** This counter provides the number of good IP datagrams received by + * DWC_ether_qos with a good UDP payload */ + nveu64_t mmc_rx_udp_gd; + /** This counter provides upper 32 bytes of good IP datagrams received + * by DWC_ether_qos with a good UDP payload */ + nveu64_t mmc_rx_udp_gd_h; + /** This counter provides the number of good IP datagrams received by + * DWC_ether_qos with a good UDP payload. This counter is not updated + * when the RxIPv4_UDP_Checksum_Disabled_Packets counter is + * incremented */ + nveu64_t mmc_rx_udp_err; + /** This counter provides upper 32 bytes of good IP datagrams received + * by DWC_ether_qos with a good UDP payload. This counter is not updated + * when the RxIPv4_UDP_Checksum_Disabled_Packets counter is + * incremented */ + nveu64_t mmc_rx_udp_err_h; + /** This counter provides the number of good IP datagrams received + * with a good TCP payload */ + nveu64_t mmc_rx_tcp_gd; + /** This counter provides the number of good IP datagrams received + * with a good TCP payload */ + nveu64_t mmc_rx_tcp_gd_h; + /** This counter provides upper 32 bytes of good IP datagrams received + * with a good TCP payload */ + nveu64_t mmc_rx_tcp_err; + /** This counter provides upper 32 bytes of good IP datagrams received + * with a good TCP payload */ + nveu64_t mmc_rx_tcp_err_h; + /** This counter provides the number of good IP datagrams received + * with a good ICMP payload */ + nveu64_t mmc_rx_icmp_gd; + /** This counter provides upper 32 bytes of good IP datagrams received + * with a good ICMP payload */ + nveu64_t mmc_rx_icmp_gd_h; + /** This counter provides the number of good IP datagrams received + * whose ICMP payload has a checksum error */ + nveu64_t mmc_rx_icmp_err; + /** This counter provides upper 32 bytes of good IP datagrams received + * whose ICMP payload has a checksum error */ + nveu64_t mmc_rx_icmp_err_h; + /** This counter provides the number of bytes received by DWC_ether_qos + * in good IPv4 datagrams encapsulating TCP, UDP, or ICMP data. + * (Ethernet header, FCS, pad, or IP pad bytes are not included + * in this counter */ + nveu64_t mmc_rx_ipv4_gd_octets; + /** This counter provides upper 32 bytes received by DWC_ether_qos + * in good IPv4 datagrams encapsulating TCP, UDP, or ICMP data. + * (Ethernet header, FCS, pad, or IP pad bytes are not included + * in this counter */ + nveu64_t mmc_rx_ipv4_gd_octets_h; + /** This counter provides the number of bytes received in IPv4 datagram + * with header errors (checksum, length, version mismatch). The value + * in the Length field of IPv4 header is used to update this counter. + * (Ethernet header, FCS, pad, or IP pad bytes are not included + * in this counter */ + nveu64_t mmc_rx_ipv4_hderr_octets; + /** This counter provides upper 32 bytes received in IPv4 datagram + * with header errors (checksum, length, version mismatch). The value + * in the Length field of IPv4 header is used to update this counter. + * (Ethernet header, FCS, pad, or IP pad bytes are not included + * in this counter */ + nveu64_t mmc_rx_ipv4_hderr_octets_h; + /** This counter provides the number of bytes received in IPv4 datagram + * that did not have a TCP, UDP, or ICMP payload. The value in the + * Length field of IPv4 header is used to update this counter. + * (Ethernet header, FCS, pad, or IP pad bytes are not included + * in this counter */ + nveu64_t mmc_rx_ipv4_nopay_octets; + /** This counter provides upper 32 bytes received in IPv4 datagram + * that did not have a TCP, UDP, or ICMP payload. The value in the + * Length field of IPv4 header is used to update this counter. + * (Ethernet header, FCS, pad, or IP pad bytes are not included + * in this counter */ + nveu64_t mmc_rx_ipv4_nopay_octets_h; + /** This counter provides the number of bytes received in fragmented + * IPv4 datagrams. The value in the Length field of IPv4 header is + * used to update this counter. (Ethernet header, FCS, pad, or IP pad + * bytes are not included in this counter */ + nveu64_t mmc_rx_ipv4_frag_octets; + /** This counter provides upper 32 bytes received in fragmented + * IPv4 datagrams. The value in the Length field of IPv4 header is + * used to update this counter. (Ethernet header, FCS, pad, or IP pad + * bytes are not included in this counter */ + nveu64_t mmc_rx_ipv4_frag_octets_h; + /** This counter provides the number of bytes received in a UDP segment + * that had the UDP checksum disabled. This counter does not count IP + * Header bytes. (Ethernet header, FCS, pad, or IP pad bytes are not + * included in this counter */ + nveu64_t mmc_rx_ipv4_udsbl_octets; + /** This counter provides upper 32 bytes received in a UDP segment + * that had the UDP checksum disabled. This counter does not count IP + * Header bytes. (Ethernet header, FCS, pad, or IP pad bytes are not + * included in this counter */ + nveu64_t mmc_rx_ipv4_udsbl_octets_h; + /** This counter provides the number of bytes received in good IPv6 + * datagrams encapsulating TCP, UDP, or ICMP data. (Ethernet header, + * FCS, pad, or IP pad bytes are not included in this counter */ + nveu64_t mmc_rx_ipv6_gd; + /** This counter provides upper 32 bytes received in good IPv6 + * datagrams encapsulating TCP, UDP, or ICMP data. (Ethernet header, + * FCS, pad, or IP pad bytes are not included in this counter */ + nveu64_t mmc_rx_ipv6_gd_h; + /** This counter provides the number of bytes received in IPv6 datagrams + * with header errors (length, version mismatch). The value in the + * Length field of IPv6 header is used to update this counter. + * (Ethernet header, FCS, pad, or IP pad bytes are not included in + * this counter */ + nveu64_t mmc_rx_ipv6_hderr; + /** This counter provides upper 32 bytes received in IPv6 datagrams + * with header errors (length, version mismatch). The value in the + * Length field of IPv6 header is used to update this counter. + * (Ethernet header, FCS, pad, or IP pad bytes are not included in + * this counter */ + nveu64_t mmc_rx_ipv6_hderr_h; + /** This counter provides the number of bytes received in IPv6 + * datagrams that did not have a TCP, UDP, or ICMP payload. The value + * in the Length field of IPv6 header is used to update this counter. + * (Ethernet header, FCS, pad, or IP pad bytes are not included + * in this counter */ + nveu64_t mmc_rx_ipv6_nopay; + /** This counter provides upper 32 bytes received in IPv6 + * datagrams that did not have a TCP, UDP, or ICMP payload. The value + * in the Length field of IPv6 header is used to update this counter. + * (Ethernet header, FCS, pad, or IP pad bytes are not included + * in this counter */ + nveu64_t mmc_rx_ipv6_nopay_h; + /* Protocols */ + /** This counter provides the number of bytes received in a good UDP + * segment. This counter does not count IP header bytes */ + nveu64_t mmc_rx_udp_gd_octets; + /** This counter provides upper 32 bytes received in a good UDP + * segment. This counter does not count IP header bytes */ + nveu64_t mmc_rx_udp_gd_octets_h; + /** This counter provides the number of bytes received in a UDP + * segment that had checksum errors. This counter does not count + * IP header bytes */ + nveu64_t mmc_rx_udp_err_octets; + /** This counter provides upper 32 bytes received in a UDP + * segment that had checksum errors. This counter does not count + * IP header bytes */ + nveu64_t mmc_rx_udp_err_octets_h; + /** This counter provides the number of bytes received in a good + * TCP segment. This counter does not count IP header bytes */ + nveu64_t mmc_rx_tcp_gd_octets; + /** This counter provides upper 32 bytes received in a good + * TCP segment. This counter does not count IP header bytes */ + nveu64_t mmc_rx_tcp_gd_octets_h; + /** This counter provides the number of bytes received in a TCP + * segment that had checksum errors. This counter does not count + * IP header bytes */ + nveu64_t mmc_rx_tcp_err_octets; + /** This counter provides upper 32 bytes received in a TCP + * segment that had checksum errors. This counter does not count + * IP header bytes */ + nveu64_t mmc_rx_tcp_err_octets_h; + /** This counter provides the number of bytes received in a good + * ICMP segment. This counter does not count IP header bytes */ + nveu64_t mmc_rx_icmp_gd_octets; + /** This counter provides upper 32 bytes received in a good + * ICMP segment. This counter does not count IP header bytes */ + nveu64_t mmc_rx_icmp_gd_octets_h; + /** This counter provides the number of bytes received in a ICMP + * segment that had checksum errors. This counter does not count + * IP header bytes */ + nveu64_t mmc_rx_icmp_err_octets; + /** This counter provides upper 32 bytes received in a ICMP + * segment that had checksum errors. This counter does not count + * IP header bytes */ + nveu64_t mmc_rx_icmp_err_octets_h; + /** This counter provides the number of additional mPackets + * transmitted due to preemption */ + nveu64_t mmc_tx_fpe_frag_cnt; + /** This counter provides the count of number of times a hold + * request is given to MAC */ + nveu64_t mmc_tx_fpe_hold_req_cnt; + /** This counter provides the number of MAC frames with reassembly + * errors on the Receiver, due to mismatch in the fragment + * count value */ + nveu64_t mmc_rx_packet_reass_err_cnt; + /** This counter the number of received MAC frames rejected + * due to unknown SMD value and MAC frame fragments rejected due + * to arriving with an SMD-C when there was no preceding preempted + * frame */ + nveu64_t mmc_rx_packet_smd_err_cnt; + /** This counter provides the number of MAC frames that were + * successfully reassembled and delivered to MAC */ + nveu64_t mmc_rx_packet_asm_ok_cnt; + /** This counter provides the number of additional mPackets received + * due to preemption */ + nveu64_t mmc_rx_fpe_fragment_cnt; +}; + +#pragma pack(pop) +#endif /* INCLUDED_NVETHERNETRM_EXPORT_H */ diff --git a/include/nvethernetrm_l3l4.h b/include/nvethernetrm_l3l4.h new file mode 100644 index 0000000..dd8619a --- /dev/null +++ b/include/nvethernetrm_l3l4.h @@ -0,0 +1,95 @@ +/* + * Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef INCLUDED_NVETHERNETRM_L3L4_H +#define INCLUDED_NVETHERNETRM_L3L4_H + +#include + +/** helper macro for enable */ +#define OSI_TRUE ((nveu32_t)1U) + +/** helper macro to disable */ +#define OSI_FALSE ((nveu32_t)0U) + +/** + * @brief L3/L4 filter function dependent parameter + */ +struct osi_l3_l4_filter { + /** filter data */ + struct { +#ifndef OSI_STRIPPED_LIB + /** udp (OSI_TRUE) or tcp (OSI_FALSE) */ + nveu32_t is_udp; + /** ipv6 (OSI_TRUE) or ipv4 (OSI_FALSE) */ + nveu32_t is_ipv6; +#endif /* !OSI_STRIPPED_LIB */ + /** destination ip address information */ + struct { + /** ipv4 address */ + nveu8_t ip4_addr[4]; +#ifndef OSI_STRIPPED_LIB + /** ipv6 address */ + nveu16_t ip6_addr[8]; + /** Port number */ + nveu16_t port_no; + /** addr match enable (OSI_TRUE) or disable (OSI_FALSE) */ + nveu32_t addr_match; + /** perfect(OSI_FALSE) or inverse(OSI_TRUE) match for address */ + nveu32_t addr_match_inv; + /** port match enable (OSI_TRUE) or disable (OSI_FALSE) */ + nveu32_t port_match; + /** perfect(OSI_FALSE) or inverse(OSI_TRUE) match for port */ + nveu32_t port_match_inv; +#endif /* !OSI_STRIPPED_LIB */ + } dst; +#ifndef OSI_STRIPPED_LIB + /** ip address and port information */ + struct { + /** ipv4 address */ + nveu8_t ip4_addr[4]; + /** ipv6 address */ + nveu16_t ip6_addr[8]; + /** Port number */ + nveu16_t port_no; + /** addr match enable (OSI_TRUE) or disable (OSI_FALSE) */ + nveu32_t addr_match; + /** perfect(OSI_FALSE) or inverse(OSI_TRUE) match for address */ + nveu32_t addr_match_inv; + /** port match enable (OSI_TRUE) or disable (OSI_FALSE) */ + nveu32_t port_match; + /** perfect(OSI_FALSE) or inverse(OSI_TRUE) match for port */ + nveu32_t port_match_inv; + } src; +#endif /* !OSI_STRIPPED_LIB */ + } data; +#ifndef OSI_STRIPPED_LIB + /** Represents whether DMA routing enabled (OSI_TRUE) or not (OSI_FALSE) */ + nveu32_t dma_routing_enable; +#endif /* !OSI_STRIPPED_LIB */ + /** DMA channel number of routing enabled */ + nveu32_t dma_chan; + /** filter enable (OSI_TRUE) or disable (OSI_FALSE) */ + nveu32_t filter_enb_dis; +}; + +#endif /* INCLUDED_NVETHERNETRM_L3L4_H */ diff --git a/include/osi_common.h b/include/osi_common.h index 8f34012..14050eb 100644 --- a/include/osi_common.h +++ b/include/osi_common.h @@ -23,7 +23,7 @@ #ifndef INCLUDED_OSI_COMMON_H #define INCLUDED_OSI_COMMON_H -#include "../osi/common/type.h" +#include /** * @addtogroup FC Flow Control Threshold Macros @@ -32,22 +32,9 @@ * the flow control is asserted or de-asserted * @{ */ -#define FULL_MINUS_1_5K (unsigned int)1 -#define FULL_MINUS_2_K (unsigned int)2 -#define FULL_MINUS_2_5K (unsigned int)3 -#define FULL_MINUS_3_K (unsigned int)4 -#define FULL_MINUS_4_K (unsigned int)6 -#define FULL_MINUS_6_K (unsigned int)10 -#define FULL_MINUS_10_K (unsigned int)18 -#define FULL_MINUS_13_K (unsigned int)24 -#define FULL_MINUS_14_K (unsigned int)26 -#define FULL_MINUS_16_K (unsigned int)30 -#define FULL_MINUS_18_K (unsigned int)34 -#define FULL_MINUS_21_K (unsigned int)40 -#define FULL_MINUS_24_K (unsigned int)46 -#define FULL_MINUS_29_K (unsigned int)56 -#define FULL_MINUS_31_K (unsigned int)60 -#define FULL_MINUS_32_K (unsigned int)62 +#define FULL_MINUS_1_5K ((nveu32_t)1) +#define FULL_MINUS_16_K ((nveu32_t)30) +#define FULL_MINUS_32_K ((nveu32_t)62) /** @} */ /** @@ -66,13 +53,46 @@ #define OSI_MAX_TX_COALESCE_USEC 1020U #define OSI_MIN_TX_COALESCE_USEC 32U #define OSI_MIN_TX_COALESCE_FRAMES 1U +#define OSI_PAUSE_FRAMES_DISABLE 0U +#define OSI_PAUSE_FRAMES_ENABLE 1U #endif /* !OSI_STRIPPED_LIB */ /* Compiler hints for branch prediction */ #define osi_unlikely(x) __builtin_expect(!!(x), 0) /** @} */ +/** + * @addtogroup Helper MACROS + * + * @brief EQOS generic helper MACROS. + * @{ + */ +#define OSI_MAX_24BITS 0xFFFFFFU +#define OSI_MAX_28BITS 0xFFFFFFFU +#define OSI_MAX_32BITS 0xFFFFFFFFU +#define OSI_MASK_16BITS 0xFFFFU +#define OSI_MASK_20BITS 0xFFFFFU +#define OSI_MASK_24BITS 0xFFFFFFU +#define OSI_GCL_SIZE_64 64U +#define OSI_GCL_SIZE_128 128U +#define OSI_GCL_SIZE_512 512U +#define OSI_GCL_SIZE_1024 1024U +/** @} */ + #ifndef OSI_STRIPPED_LIB +/** + * @addtogroup Helper MACROS + * + * @brief EQOS generic helper MACROS. + * @{ + */ +#define OSI_PTP_REQ_CLK_FREQ 250000000U +#define OSI_FLOW_CTRL_DISABLE 0U +#define OSI_ADDRESS_32BIT 0 +#define OSI_ADDRESS_40BIT 1 +#define OSI_ADDRESS_48BIT 2 +/** @ } */ + /** * @addtogroup - LPI-Timers LPI configuration macros * @@ -120,47 +140,22 @@ /** @} */ #endif /* !OSI_STRIPPED_LIB */ -/** - * @addtogroup Helper Helper MACROS - * - * @brief EQOS generic helper MACROS. - * @{ - */ -#ifndef OSI_STRIPPED_LIB -#define OSI_PAUSE_FRAMES_ENABLE 1U -#define OSI_PTP_REQ_CLK_FREQ 250000000U -#define OSI_FLOW_CTRL_DISABLE 0U -#define OSI_MAX_24BITS 0xFFFFFFU -#define OSI_MAX_28BITS 0xFFFFFFFU -#define OSI_MAX_32BITS 0xFFFFFFFFU -#define OSI_MASK_16BITS 0xFFFFU -#define OSI_MASK_20BITS 0xFFFFFU -#define OSI_MASK_24BITS 0xFFFFFFU -#define OSI_GCL_SIZE_64 64U -#define OSI_GCL_SIZE_128 128U -#define OSI_GCL_SIZE_256 256U -#define OSI_GCL_SIZE_512 512U -#define OSI_GCL_SIZE_1024 1024U - #define OSI_POLL_COUNT 1000U - -#define OSI_ADDRESS_32BIT 0 -#define OSI_ADDRESS_40BIT 1 -#define OSI_ADDRESS_48BIT 2 -#endif /* !OSI_STRIPPED_LIB */ - #ifndef UINT_MAX #define UINT_MAX (~0U) #endif #ifndef INT_MAX #define INT_MAX (0x7FFFFFFF) +#ifndef OSI_LLONG_MAX +#define OSI_LLONG_MAX (0x7FFFFFFFFFFFFFFF) +#endif #endif /** @} */ /** - * @addtogroup Helper Helper MACROS + * @addtogroup Generic helper MACROS * - * @brief EQOS generic helper MACROS. + * @brief These are Generic helper macros used at various places. * @{ */ #define OSI_UCHAR_MAX (0xFFU) @@ -168,22 +163,24 @@ /* Logging defines */ /* log levels */ -#define OSI_LOG_INFO 1U +#define OSI_LOG_INFO 1U +#ifndef OSI_STRIPPED_LIB #define OSI_LOG_WARN 2U +#endif /* OSI_STRIPPED_LIB */ #define OSI_LOG_ERR 3U /* Error types */ #define OSI_LOG_ARG_OUTOFBOUND 1U #define OSI_LOG_ARG_INVALID 2U #define OSI_LOG_ARG_HW_FAIL 4U -#define OSI_LOG_WARN 2U -#ifndef OSI_STRIPPED_LIB #define OSI_LOG_ARG_OPNOTSUPP 3U -#endif /* !OSI_STRIPPED_LIB */ /* Default maximum Giant Packet Size Limit is 16K */ #define OSI_MAX_MTU_SIZE 16383U + +#ifdef UPDATED_PAD_CAL /* MAC Tx/Rx Idle retry and delay count */ #define OSI_TXRX_IDLE_RETRY 5000U #define OSI_DELAY_COUNT 10U +#endif #define EQOS_DMA_CHX_STATUS(x) ((0x0080U * (x)) + 0x1160U) #define MGBE_DMA_CHX_STATUS(x) ((0x0080U * (x)) + 0x3160U) @@ -200,15 +197,16 @@ /* MACSEC max SC's supported 16*/ #define OSI_MACSEC_SC_INDEX_MAX 16 +#ifndef OSI_STRIPPED_LIB /* HW supports 8 Hash table regs, but eqos_validate_core_regs only checks 4 */ #define OSI_EQOS_MAX_HASH_REGS 4U +#endif /* OSI_STRIPPED_LIB */ #define MAC_VERSION 0x110 #define MAC_VERSION_SNVER_MASK 0x7FU #define OSI_MAC_HW_EQOS 0U #define OSI_MAC_HW_MGBE 1U -#define OSI_ETH_ALEN 6U #define OSI_MAX_VM_IRQS 5U #define OSI_NULL ((void *)0) @@ -216,37 +214,30 @@ #define OSI_NONE 0U #define OSI_NONE_SIGNED 0 #define OSI_DISABLE 0U +#define OSI_H_DISABLE 0x10101010U +#define OSI_H_ENABLE (~OSI_H_DISABLE) #define OSI_BIT(nr) ((nveu32_t)1 << (nr)) -#define OSI_EQOS_MAC_4_10 0x41U -#define OSI_EQOS_MAC_5_00 0x50U -#define OSI_EQOS_MAC_5_10 0x51U -#define OSI_EQOS_MAC_5_30 0x53U +#ifndef OSI_STRIPPED_LIB #define OSI_MGBE_MAC_3_00 0x30U -#define OSI_MGBE_MAC_3_10 0x31U +#define OSI_EQOS_MAC_4_10 0x41U +#define OSI_EQOS_MAC_5_10 0x51U #define OSI_MGBE_MAC_4_00 0x40U +#endif /* OSI_STRIPPED_LIB */ + +#define OSI_EQOS_MAC_5_00 0x50U +#define OSI_EQOS_MAC_5_30 0x53U +#define OSI_MGBE_MAC_3_10 0x31U #define OSI_MAX_VM_IRQS 5U -#define OSI_IP4_FILTER 0U -#define OSI_IP6_FILTER 1U #ifndef OSI_STRIPPED_LIB -#define OSI_L2_FILTER_INDEX_ANY 127U #define OSI_HASH_FILTER_MODE 1U #define OSI_L4_FILTER_TCP 0U #define OSI_L4_FILTER_UDP 1U #define OSI_PERFECT_FILTER_MODE 0U -#define NV_ETH_FCS_LEN 0x4U -#define NV_ETH_FRAME_LEN 1514U - -#define MAX_ETH_FRAME_LEN_DEFAULT \ - (NV_ETH_FRAME_LEN + NV_ETH_FCS_LEN + NV_VLAN_HLEN) -#define OSI_MTU_SIZE_16K 16000U -#define OSI_MTU_SIZE_8K 8000U -#define OSI_MTU_SIZE_4K 4000U -#define OSI_MTU_SIZE_2K 2000U #define OSI_INVALID_CHAN_NUM 0xFFU #endif /* OSI_STRIPPED_LIB */ /** @} */ @@ -262,31 +253,8 @@ #define OSI_DEBUG_TYPE_REG 2U #define OSI_DEBUG_TYPE_STRUCTS 3U #endif /* OSI_DEBUG */ - -#ifndef OSI_STRIPPED_LIB -/** - * @addtogroup MTL queue operation mode - * - * @brief MTL queue operation mode options - * @{ - */ -#define OSI_MTL_QUEUE_DISABLED 0x0U -#define OSI_MTL_QUEUE_AVB 0x1U -#define OSI_MTL_QUEUE_ENABLE 0x2U -#define OSI_MTL_QUEUE_MODEMAX 0x3U /** @} */ -/** - * @addtogroup EQOS_MTL MTL queue AVB algorithm mode - * - * @brief MTL AVB queue algorithm type - * @{ - */ -#define OSI_MTL_TXQ_AVALG_CBS 1U -#define OSI_MTL_TXQ_AVALG_SP 0U -/** @} */ -#endif /* OSI_STRIPPED_LIB */ - /** * @brief unused function attribute */ @@ -320,7 +288,7 @@ static inline nveu64_t osi_update_stats_counter(nveu64_t last_value, if (temp < last_value) { /* Stats overflow, so reset it to zero */ - return 0UL; + temp = 0UL; } return temp; diff --git a/include/osi_core.h b/include/osi_core.h index 6648a7a..97e13b7 100644 --- a/include/osi_core.h +++ b/include/osi_core.h @@ -23,6 +23,8 @@ #ifndef INCLUDED_OSI_CORE_H #define INCLUDED_OSI_CORE_H +#include "nvethernetrm_export.h" +#include "nvethernetrm_l3l4.h" #include #include "mmc.h" @@ -36,18 +38,79 @@ struct ivc_msg_common; /* Following added to avoid misraC 4.6 * Here we are defining intermediate type */ -/** intermediate type for unsigned short */ -typedef unsigned short my_uint16_t; /** intermediate type for long long */ typedef long long my_lint_64; -/* Actual type used in code */ -/** typedef equivalent to unsigned short */ -typedef my_uint16_t nveu16_t; /** typedef equivalent to long long */ typedef my_lint_64 nvel64_t; /** @} */ +#ifndef OSI_STRIPPED_LIB +#define OSI_OPER_EN_L2_DA_INV OSI_BIT(4) +#define OSI_OPER_DIS_L2_DA_INV OSI_BIT(5) +#define OSI_PTP_SNAP_TRANSPORT 1U +#define OSI_VLAN_ACTION_DEL 0x0U +#define OSI_VLAN_ACTION_ADD OSI_BIT(31) +#define OSI_RXQ_ROUTE_PTP 0U +#define EQOS_MAX_HTR_REGS 8U + +/** + * @addtogroup RSS related information + * + * @brief RSS hash key and table size. + * @{ + */ +#define OSI_RSS_HASH_KEY_SIZE 40U +#define OSI_RSS_MAX_TABLE_SIZE 128U +/** @} */ + +#define OSI_CMD_RESET_MMC 12U +#define OSI_CMD_MDC_CONFIG 1U +#define OSI_CMD_MAC_LB 14U +#define OSI_CMD_FLOW_CTRL 15U +#define OSI_CMD_CONFIG_TXSTATUS 27U +#define OSI_CMD_CONFIG_RX_CRC_CHECK 25U +#define OSI_CMD_CONFIG_EEE 32U +#define OSI_CMD_ARP_OFFLOAD 30U +#define OSI_CMD_UPDATE_VLAN_ID 26U +#define OSI_CMD_VLAN_FILTER 31U +#define OSI_CMD_CONFIG_PTP_OFFLOAD 34U +#define OSI_CMD_PTP_RXQ_ROUTE 35U +#define OSI_CMD_CONFIG_RSS 37U +#define OSI_CMD_CONFIG_FW_ERR 29U +#define OSI_CMD_SET_MODE 16U +#define OSI_CMD_POLL_FOR_MAC_RST 4U +#define OSI_CMD_GET_MAC_VER 10U + +/** + * @addtogroup PTP-offload PTP offload defines + * @{ + */ +#define OSI_PTP_MAX_PORTID 0xFFFFU +#define OSI_PTP_MAX_DOMAIN 0xFFU +#define OSI_PTP_SNAP_ORDINARY 0U +#define OSI_PTP_SNAP_P2P 3U +/** @} */ + +#define OSI_MAC_TCR_TSMASTERENA OSI_BIT(15) +#define OSI_MAC_TCR_TSEVENTENA OSI_BIT(14) +#define OSI_MAC_TCR_TSENALL OSI_BIT(8) +#define OSI_MAC_TCR_SNAPTYPSEL_3 (OSI_BIT(16) | OSI_BIT(17)) +#define OSI_MAC_TCR_SNAPTYPSEL_2 OSI_BIT(17) +#define OSI_MAC_TCR_CSC OSI_BIT(19) +#define OSI_MAC_TCR_AV8021ASMEN OSI_BIT(28) + +#define OSI_FLOW_CTRL_RX OSI_BIT(1) + +#define OSI_INSTANCE_ID_MBGE0 0 +#define OSI_INSTANCE_ID_MGBE1 1 +#define OSI_INSTANCE_ID_MGBE2 2 +#define OSI_INSTANCE_ID_MGBE3 3 +#define OSI_INSTANCE_ID_EQOS 4 + +#endif /* !OSI_STRIPPED_LIB */ + + #ifdef MACSEC_SUPPORT /** * @addtogroup MACSEC related helper MACROs @@ -63,16 +126,6 @@ typedef my_lint_64 nvel64_t; /** @} */ #endif /* MACSEC_SUPPORT */ -/** - * @addtogroup PTP PTP related information - * - * @brief PTP SSINC values - * @{ - */ -#define OSI_PTP_SSINC_16 16U -#define OSI_PTP_SSINC_4 4U -/** @} */ - /** * @addtogroup PTP PTP related information * @@ -83,6 +136,7 @@ typedef my_lint_64 nvel64_t; #define OSI_PTP_M2M_SECONDARY 2U /** @} */ + /** * @addtogroup EQOS_PTP PTP Helper MACROS * @@ -91,55 +145,47 @@ typedef my_lint_64 nvel64_t; */ #define OSI_MAC_TCR_TSENA OSI_BIT(0) #define OSI_MAC_TCR_TSCFUPDT OSI_BIT(1) -#define OSI_MAC_TCR_TSENALL OSI_BIT(8) #define OSI_MAC_TCR_TSCTRLSSR OSI_BIT(9) #define OSI_MAC_TCR_TSVER2ENA OSI_BIT(10) #define OSI_MAC_TCR_TSIPENA OSI_BIT(11) #define OSI_MAC_TCR_TSIPV6ENA OSI_BIT(12) #define OSI_MAC_TCR_TSIPV4ENA OSI_BIT(13) -#define OSI_MAC_TCR_TSEVENTENA OSI_BIT(14) -#define OSI_MAC_TCR_TSMASTERENA OSI_BIT(15) #define OSI_MAC_TCR_SNAPTYPSEL_1 OSI_BIT(16) -#define OSI_MAC_TCR_SNAPTYPSEL_2 OSI_BIT(17) -#define OSI_MAC_TCR_CSC OSI_BIT(19) -#define OSI_MAC_TCR_AV8021ASMEN OSI_BIT(28) -#define OSI_MAC_TCR_SNAPTYPSEL_3 (OSI_BIT(16) | OSI_BIT(17)) #define OSI_MAC_TCR_TXTSSMIS OSI_BIT(31) /** @} */ /** - * @addtogroup Helper Helper MACROS + * @addtogroup Helper MACROS * * @brief EQOS generic helper MACROS. * @{ */ #define EQOS_DMA_CHX_IER(x) ((0x0080U * (x)) + 0x1134U) #define EQOS_MAX_MAC_ADDRESS_FILTER 128U +#define EQOS_MAX_MAC_5_3_ADDRESS_FILTER 32U #define EQOS_MAX_L3_L4_FILTER 8U -#define EQOS_MAX_HTR_REGS 8U #define OSI_MGBE_MAX_MAC_ADDRESS_FILTER 32U #define OSI_DA_MATCH 0U +#ifndef OSI_STRIPPED_LIB #define OSI_INV_MATCH 1U +#endif /* !OSI_STRIPPED_LIB */ #define OSI_AMASK_DISABLE 0U #define OSI_CHAN_ANY 0xFFU -#define OSI_MAX_TC_NUM 8U #define OSI_DFLT_MTU_SIZE 1500U #define OSI_MTU_SIZE_9000 9000U +/* Reg ETHER_QOS_AUTO_CAL_CONFIG_0[AUTO_CAL_PD/PU_OFFSET] max value */ +#define OSI_PAD_CAL_CONFIG_PD_PU_OFFSET_MAX 0x1FU + +#ifndef OSI_STRIPPED_LIB /* HW supports 8 Hash table regs, but eqos_validate_core_regs only checks 4 */ #define OSI_EQOS_MAX_HASH_REGS 4U -#define OSI_ETH_ALEN 6U +#endif /* !OSI_STRIPPED_LIB */ #define OSI_FLOW_CTRL_TX OSI_BIT(0) -#define OSI_FLOW_CTRL_RX OSI_BIT(1) #define OSI_FULL_DUPLEX 1 #define OSI_HALF_DUPLEX 0 -#define OSI_IP4_FILTER 0U -#define OSI_IP6_FILTER 1U -#define OSI_IPV6_MATCH 1U -#define OSI_IPV4_MATCH 0U - /* L2 filter operations supported by OSI layer. These operation modes shall be * set by OSD driver as input to update registers accordingly. */ @@ -147,16 +193,12 @@ typedef my_lint_64 nvel64_t; #define OSI_OPER_DIS_PROMISC OSI_BIT(1) #define OSI_OPER_EN_ALLMULTI OSI_BIT(2) #define OSI_OPER_DIS_ALLMULTI OSI_BIT(3) -#define OSI_OPER_EN_L2_DA_INV OSI_BIT(4) -#define OSI_OPER_DIS_L2_DA_INV OSI_BIT(5) #define OSI_OPER_EN_PERFECT OSI_BIT(6) #define OSI_OPER_DIS_PERFECT OSI_BIT(7) #define OSI_OPER_ADDR_UPDATE OSI_BIT(8) #define OSI_OPER_ADDR_DEL OSI_BIT(9) -#define OSI_PAUSE_FRAMES_DISABLE 1U #define OSI_PFT_MATCH 0U -#define OSI_SOURCE_MATCH 0U #define OSI_SA_MATCH 1U #define OSI_SPEED_10 10 @@ -177,42 +219,20 @@ typedef my_lint_64 nvel64_t; * @brief Ethernet PHY Interface Modes */ #define OSI_XFI_MODE_10G 0U -#define OSI_XFI_MODE_5G 1U +#define OSI_XFI_MODE_5G 1U #define OSI_USXGMII_MODE_10G 2U #define OSI_USXGMII_MODE_5G 3U -/** - * @addtogroup PTP-offload PTP offload defines - * @{ - */ -#define OSI_PTP_SNAP_ORDINARY 0U -#define OSI_PTP_SNAP_TRANSPORT 1U -#define OSI_PTP_SNAP_P2P 3U -#define OSI_PTP_MAX_PORTID 0xFFFFU -#define OSI_PTP_MAX_DOMAIN 0xFFU - /** * @addtogroup IOCTL OPS MACROS * * @brief IOCTL OPS for runtime commands * @{ */ -#define OSI_CMD_MDC_CONFIG 1U -#define OSI_CMD_RESTORE_REGISTER 2U #define OSI_CMD_L3L4_FILTER 3U -#define OSI_CMD_POLL_FOR_MAC_RST 4U -#define OSI_CMD_START_MAC 5U -#define OSI_CMD_STOP_MAC 6U #define OSI_CMD_COMMON_ISR 7U #define OSI_CMD_PAD_CALIBRATION 8U #define OSI_CMD_READ_MMC 9U -#define OSI_CMD_GET_MAC_VER 10U -#define OSI_CMD_VALIDATE_CORE_REG 11U -#define OSI_CMD_RESET_MMC 12U -#define OSI_CMD_SAVE_REGISTER 13U -#define OSI_CMD_MAC_LB 14U -#define OSI_CMD_FLOW_CTRL 15U -#define OSI_CMD_SET_MODE 16U #define OSI_CMD_SET_SPEED 17U #define OSI_CMD_L2_FILTER 18U #define OSI_CMD_RXCSUM_OFFLOAD 19U @@ -221,19 +241,9 @@ typedef my_lint_64 nvel64_t; #define OSI_CMD_CONFIG_PTP 22U #define OSI_CMD_GET_AVB 23U #define OSI_CMD_SET_AVB 24U -#define OSI_CMD_CONFIG_RX_CRC_CHECK 25U -#define OSI_CMD_UPDATE_VLAN_ID 26U -#define OSI_CMD_CONFIG_TXSTATUS 27U #define OSI_CMD_GET_HW_FEAT 28U -#define OSI_CMD_CONFIG_FW_ERR 29U -#define OSI_CMD_ARP_OFFLOAD 30U -#define OSI_CMD_VLAN_FILTER 31U -#define OSI_CMD_CONFIG_EEE 32U #define OSI_CMD_SET_SYSTOHW_TIME 33U -#define OSI_CMD_CONFIG_PTP_OFFLOAD 34U -#define OSI_CMD_PTP_RXQ_ROUTE 35U #define OSI_CMD_CONFIG_FRP 36U -#define OSI_CMD_CONFIG_RSS 37U #define OSI_CMD_CONFIG_EST 38U #define OSI_CMD_CONFIG_FPE 39U #define OSI_CMD_READ_REG 40U @@ -254,8 +264,18 @@ typedef my_lint_64 nvel64_t; #ifdef HSI_SUPPORT #define OSI_CMD_HSI_CONFIGURE 51U #endif +#ifdef OSI_DEBUG +#define OSI_CMD_DEBUG_INTR_CONFIG 52U +#endif +#define OSI_CMD_SUSPEND 53U +#define OSI_CMD_RESUME 54U +#ifdef HSI_SUPPORT +#define OSI_CMD_HSI_INJECT_ERR 55U +#endif +#define OSI_CMD_READ_STATS 56U /** @} */ +#ifdef LOG_OSI /** * @brief OSI error macro definition, * @param[in] priv: OSD private data OR NULL @@ -281,30 +301,21 @@ typedef my_lint_64 nvel64_t; osi_core->osd_ops.ops_log(priv, __func__, __LINE__, \ OSI_LOG_INFO, type, err, loga); \ } +#else +#define OSI_CORE_ERR(priv, type, err, loga) +#define OSI_CORE_INFO(priv, type, err, loga) +#endif #define VLAN_NUM_VID 4096U -#define OSI_VLAN_ACTION_ADD OSI_BIT(31) -#define OSI_VLAN_ACTION_DEL 0x0U -#define OSI_RXQ_ROUTE_PTP 0U #define OSI_DELAY_1000US 1000U #define OSI_DELAY_1US 1U -/** - * @addtogroup RSS related information - * - * @brief RSS hash key and table size. - * @{ - */ -#define OSI_RSS_HASH_KEY_SIZE 40U -#define OSI_RSS_MAX_TABLE_SIZE 128U -/** @} */ /** - * @addtogroup PTP related information + * @addtogroup PTP PTP related information * * @brief PTP SSINC values * @{ */ -#define OSI_PTP_SSINC_16 16U #define OSI_PTP_SSINC_4 4U #define OSI_PTP_SSINC_6 6U /** @} */ @@ -315,13 +326,16 @@ typedef my_lint_64 nvel64_t; * @brief Flexible Receive Parser commands, table size and other defines * @{ */ +#ifndef OSI_STRIPPED_LIB +#define OSI_FRP_CMD_MAX 3U +#define OSI_FRP_MATCH_MAX 10U +#endif /* !OSI_STRIPPED_LIB */ #define OSI_FRP_MAX_ENTRY 256U #define OSI_FRP_OFFSET_MAX 64U /* FRP Command types */ #define OSI_FRP_CMD_ADD 0U #define OSI_FRP_CMD_UPDATE 1U #define OSI_FRP_CMD_DEL 2U -#define OSI_FRP_CMD_MAX 3U /* FRP Filter mode defines */ #define OSI_FRP_MODE_ROUTE 0U #define OSI_FRP_MODE_DROP 1U @@ -333,7 +347,6 @@ typedef my_lint_64 nvel64_t; #define OSI_FRP_MODE_IM_LINK 7U #define OSI_FRP_MODE_MAX 8U /* Match data defines */ -#define OSI_FRP_MATCH_DATA_MAX 12U #define OSI_FRP_MATCH_NORMAL 0U #define OSI_FRP_MATCH_L2_DA 1U #define OSI_FRP_MATCH_L2_SA 2U @@ -344,32 +357,30 @@ typedef my_lint_64 nvel64_t; #define OSI_FRP_MATCH_L4_S_TPORT 7U #define OSI_FRP_MATCH_L4_D_TPORT 8U #define OSI_FRP_MATCH_VLAN 9U -#define OSI_FRP_MATCH_MAX 10U /** @} */ +#define XPCS_WRITE_FAIL_CODE -9 + #ifdef HSI_SUPPORT /** - * @addtogroup hsi_err_code_idx + * @addtogroup osi_hsi_err_code_idx * - * @brief data index for hsi_err_code array + * @brief data index for osi_hsi_err_code array * @{ */ -#define REPORTER_IDX 2U - #define UE_IDX 0U #define CE_IDX 1U #define RX_CRC_ERR_IDX 2U #define TX_FRAME_ERR_IDX 3U #define RX_CSUM_ERR_IDX 4U #define AUTONEG_ERR_IDX 5U - +#define XPCS_WRITE_FAIL_IDX 6U #define MACSEC_RX_CRC_ERR_IDX 0U #define MACSEC_TX_CRC_ERR_IDX 1U #define MACSEC_RX_ICV_ERR_IDX 2U +#define MACSEC_REG_VIOL_ERR_IDX 3U /** @} */ -extern nveu32_t hsi_err_code[][3]; - /** * @addtogroup HSI_TIME_THRESHOLD * @@ -388,13 +399,14 @@ extern nveu32_t hsi_err_code[][3]; /** * @brief Maximum number of different mac error code + * HSI_SW_ERR_CODE + Two (Corrected and Uncorrected error code) */ -#define HSI_MAX_MAC_ERROR_CODE 6U +#define OSI_HSI_MAX_MAC_ERROR_CODE 7U /** * @brief Maximum number of different macsec error code */ -#define HSI_MAX_MACSEC_ERROR_CODE 3U +#define HSI_MAX_MACSEC_ERROR_CODE 4U /** * @addtogroup HSI_SW_ERR_CODE @@ -409,7 +421,26 @@ extern nveu32_t hsi_err_code[][3]; #define OSI_MACSEC_RX_CRC_ERR 0x1005U #define OSI_MACSEC_TX_CRC_ERR 0x1006U #define OSI_MACSEC_RX_ICV_ERR 0x1007U +#define OSI_MACSEC_REG_VIOL_ERR 0x1008U +#define OSI_XPCS_WRITE_FAIL_ERR 0x1009U +#define OSI_HSI_MGBE0_UE_CODE 0x2A00U +#define OSI_HSI_MGBE1_UE_CODE 0x2A01U +#define OSI_HSI_MGBE2_UE_CODE 0x2A02U +#define OSI_HSI_MGBE3_UE_CODE 0x2A03U +#define OSI_HSI_EQOS0_UE_CODE 0x28ADU + +#define OSI_HSI_MGBE0_CE_CODE 0x2E08U +#define OSI_HSI_MGBE1_CE_CODE 0x2E09U +#define OSI_HSI_MGBE2_CE_CODE 0x2E0AU +#define OSI_HSI_MGBE3_CE_CODE 0x2E0BU +#define OSI_HSI_EQOS0_CE_CODE 0x2DE6U + +#define OSI_HSI_MGBE0_REPORTER_ID 0x8019U +#define OSI_HSI_MGBE1_REPORTER_ID 0x801AU +#define OSI_HSI_MGBE2_REPORTER_ID 0x801BU +#define OSI_HSI_MGBE3_REPORTER_ID 0x801CU +#define OSI_HSI_EQOS0_REPORTER_ID 0x8009U /** @} */ #endif @@ -443,39 +474,20 @@ struct osi_filter { nveu32_t dma_chansel; }; +#ifndef OSI_STRIPPED_LIB /** * @brief OSI core structure for RXQ route */ struct osi_rxq_route { #define OSI_RXQ_ROUTE_PTP 0U /** Indicates RX routing type OSI_RXQ_ROUTE_* */ - unsigned int route_type; + nveu32_t route_type; /** RXQ routing enable(1) disable (0) */ - unsigned int enable; + nveu32_t enable; /** RX queue index */ - unsigned int idx; -}; - -/** - * @brief L3/L4 filter function dependent parameter - */ -struct osi_l3_l4_filter { - /** Indicates the index of the filter to be modified. - * Filter index must be between 0 - 7 */ - nveu32_t filter_no; - /** filter enable(1) or disable(0) */ - nveu32_t filter_enb_dis; - /** source(0) or destination(1) */ - nveu32_t src_dst_addr_match; - /** perfect(0) or inverse(1) */ - nveu32_t perfect_inverse_match; - /** ipv4 address */ - nveu8_t ip4_addr[4]; - /** ipv6 address */ - nveu16_t ip6_addr[8]; - /** Port number */ - nveu16_t port_no; + nveu32_t idx; }; +#endif /** * @brief struct osi_hw_features - MAC HW supported features. @@ -800,64 +812,6 @@ struct osi_vlan_filter { nveu32_t perfect_inverse_match; }; -/** - * @brief FRP Instruction configuration structure - */ -struct osi_core_frp_data { - /* Entry Match Data */ - unsigned int match_data; - /* Entry Match Enable mask */ - unsigned int match_en; - /* Entry Accept frame flag */ - unsigned char accept_frame; - /* Entry Reject Frame flag */ - unsigned char reject_frame; - /* Entry Inverse match flag */ - unsigned char inverse_match; - /* Entry Next Instruction Control match flag */ - unsigned char next_ins_ctrl; - /* Entry Frame offset in the packet data */ - unsigned char frame_offset; - /* Entry OK Index - Next Instruction */ - unsigned char ok_index; - /* Entry DMA Channel selection (1-bit for each channel) */ - unsigned int dma_chsel; -}; - -/** - * @brief FRP command structure for OSD to OSI - */ -struct osi_core_frp_cmd { - /* FRP Command type */ - unsigned int cmd; - /* OSD FRP ID */ - int frp_id; - /* OSD match data type */ - unsigned char match_type; - /* OSD match data */ - unsigned char match[OSI_FRP_MATCH_DATA_MAX]; - /* OSD match data length */ - unsigned char match_length; - /* OSD Offset */ - unsigned char offset; - /* OSD FRP filter mode flag */ - unsigned char filter_mode; - /* OSD FRP Link ID */ - int next_frp_id; - /* OSD DMA Channel Selection */ - unsigned int dma_sel; -}; - -/** - * @brief FRP Instruction table entry configuration structure - */ -struct osi_core_frp_entry { - /* FRP ID */ - int frp_id; - /* FRP Entry data structure */ - struct osi_core_frp_data data; -}; - /** * @brief L2 filter function dependent parameter */ @@ -868,118 +822,58 @@ struct osi_l2_da_filter { nveu32_t perfect_inverse_match; }; -/** - * @brief OSI Core avb data structure per queue. - */ -struct osi_core_avb_algorithm { - /** TX Queue/TC index */ - nveu32_t qindex; - /** CBS Algorithm enable(1) or disable(0) */ - nveu32_t algo; - /** When this bit is set, the accumulated credit parameter in the - * credit-based shaper algorithm logic is not reset to zero when - * there is positive credit and no packet to transmit in Channel. - * - * Expected values are enable(1) or disable(0) */ - nveu32_t credit_control; - /** idleSlopeCredit value required for CBS */ - nveu32_t idle_slope; - /** sendSlopeCredit value required for CBS */ - nveu32_t send_slope; - /** hiCredit value required for CBS */ - nveu32_t hi_credit; - /** lowCredit value required for CBS */ - nveu32_t low_credit; - /** Transmit queue operating mode - * - * 00: disable - * - * 01: avb - * - * 10: enable */ - nveu32_t oper_mode; - /** TC index */ - unsigned int tcindex; -}; -#endif /* !OSI_STRIPPED_LIB */ - /** * @brief struct ptp_offload_param - Parameter to support PTP offload. */ struct osi_pto_config { /** enable(0) / disable(1) */ - unsigned int en_dis; + nveu32_t en_dis; /** Flag for Master mode. * OSI_ENABLE for master OSI_DISABLE for slave */ - unsigned int master; + nveu32_t master; /** Flag to Select PTP packets for Taking Snapshots */ - unsigned int snap_type; + nveu32_t snap_type; /** ptp domain */ - unsigned int domain_num; + nveu32_t domain_num; /** The PTP Offload function qualifies received PTP * packet with unicast Destination address * 0 - only multicast, 1 - unicast and multicast */ - unsigned int mc_uc; + nveu32_t mc_uc; /** Port identification */ - unsigned int portid; + nveu32_t portid; }; /** - * @brief OSI Core EST structure + * @brief osi_core_rss - Struture used to store RSS Hash key and table + * information. */ -struct osi_est_config { - /** enable/disable */ - unsigned int en_dis; - /** 64 bit base time register - * if both vlaues are 0, take ptp time to avoid BTRE - * index 0 for nsec, index 1 for sec - */ - unsigned int btr[2]; - /** 64 bit base time offset index 0 for nsec, index 1 for sec */ - unsigned int btr_offset[2]; - /** 40 bit cycle time register, index 0 for nsec, index 1 for sec */ - unsigned int ctr[2]; - /** Configured Time Interval width + 7 bit extension register */ - unsigned int ter; - /** size of the gate control list */ - unsigned int llr; - /** data array 8 bit gate op + 24 execution time - * MGBE HW support GCL depth 256 */ - unsigned int gcl[OSI_GCL_SIZE_256]; +struct osi_core_rss { + /** Flag to represent to enable RSS or not */ + nveu32_t enable; + /** Array for storing RSS Hash key */ + nveu8_t key[OSI_RSS_HASH_KEY_SIZE]; + /** Array for storing RSS Hash table */ + nveu32_t table[OSI_RSS_MAX_TABLE_SIZE]; }; /** - * @brief OSI Core FPE structure + * @brief Max num of MAC core registers to backup. It should be max of or >= + * (EQOS_MAX_BAK_IDX=380, coreX,...etc) backup registers. */ -struct osi_fpe_config { - /** Queue Mask 1 preemption 0- express bit representation */ - unsigned int tx_queue_preemption_enable; - /** RQ for all preemptable packets which are not filtered - * based on user priority or SA-DA - */ - unsigned int rq; -}; +#define CORE_MAX_BAK_IDX 700U /** - * @brief OSI Core TSN error stats structure + * @brief core_backup - Struct used to store backup of core HW registers. */ -struct osi_tsn_stats { - /** Constant Gate Control Error */ - unsigned long const_gate_ctr_err; - /** Head-Of-Line Blocking due to Scheduling */ - unsigned long head_of_line_blk_sch; - /** Per TC Schedule Error */ - unsigned long hlbs_q[OSI_MAX_TC_NUM]; - /** Head-Of-Line Blocking due to Frame Size */ - unsigned long head_of_line_blk_frm; - /** Per TC Frame Size Error */ - unsigned long hlbf_q[OSI_MAX_TC_NUM]; - /** BTR Error */ - unsigned long base_time_reg_err; - /** Switch to Software Owned List Complete */ - unsigned long sw_own_list_complete; +struct core_backup { + /** Array of reg MMIO addresses (base of MAC + offset of reg) */ + void *reg_addr[CORE_MAX_BAK_IDX]; + /** Array of value stored in each corresponding register */ + nveu32_t reg_val[CORE_MAX_BAK_IDX]; }; +#endif /* !OSI_STRIPPED_LIB */ + /** * @brief PTP configuration structure */ @@ -1029,19 +923,6 @@ struct osi_ptp_config { nveu32_t ptp_rx_queue; }; -/** - * @brief osi_core_rss - Struture used to store RSS Hash key and table - * information. - */ -struct osi_core_rss { - /** Flag to represent to enable RSS or not */ - unsigned int enable; - /** Array for storing RSS Hash key */ - unsigned char key[OSI_RSS_HASH_KEY_SIZE]; - /** Array for storing RSS Hash table */ - unsigned int table[OSI_RSS_MAX_TABLE_SIZE]; -}; - /** * @brief osi_core_ptp_tsc_data - Struture used to store TSC and PTP time * information. @@ -1057,22 +938,6 @@ struct osi_core_ptp_tsc_data { nveu32_t tsc_low_bits; }; -/** - * @brief Max num of MAC core registers to backup. It should be max of or >= - * (EQOS_MAX_BAK_IDX=380, coreX,...etc) backup registers. - */ -#define CORE_MAX_BAK_IDX 700U - -/** - * @brief core_backup - Struct used to store backup of core HW registers. - */ -struct core_backup { - /** Array of reg MMIO addresses (base of MAC + offset of reg) */ - void *reg_addr[CORE_MAX_BAK_IDX]; - /** Array of value stored in each corresponding register */ - nveu32_t reg_val[CORE_MAX_BAK_IDX]; -}; - /** * @brief OSI VM IRQ data */ @@ -1090,7 +955,7 @@ struct osi_vm_irq_data { */ struct osd_core_ops { /** padctrl rx pin disable/enable callback */ - int (*padctrl_mii_rx_pins)(void *priv, nveu32_t enable); + nve32_t (*padctrl_mii_rx_pins)(void *priv, nveu32_t enable); /** logging callback */ void (*ops_log)(void *priv, const nve8_t *func, nveu32_t line, nveu32_t level, nveu32_t type, const nve8_t *err, @@ -1106,7 +971,7 @@ struct osd_core_ops { nveu32_t len); #ifdef MACSEC_SUPPORT /** Program macsec key table through Trust Zone callback */ - nve32_t (*macsec_tz_kt_config)(void *priv, unsigned char cmd, + nve32_t (*macsec_tz_kt_config)(void *priv, nveu8_t cmd, void *const kt_config, void *const genl_info); #endif /* MACSEC_SUPPORT */ @@ -1116,6 +981,8 @@ struct osd_core_ops { nveu32_t type, const char *fmt, ...); #endif + /** Lane bringup restart callback */ + void (*restart_lane_bringup)(void *priv, nveu32_t en_disable); }; #ifdef MACSEC_SUPPORT @@ -1126,7 +993,7 @@ struct osi_macsec_sc_info { /** Secure channel identifier */ nveu8_t sci[OSI_SCI_LEN]; /** Secure association key */ - nveu8_t sak[OSI_KEY_LEN_128]; + nveu8_t sak[OSI_KEY_LEN_256]; #ifdef MACSEC_KEY_PROGRAM /** Secure association key */ nveu8_t hkey[OSI_KEY_LEN_128]; @@ -1204,6 +1071,40 @@ struct osi_macsec_irq_stats { }; #endif /* MACSEC_SUPPORT */ +/** + * @brief FRP Instruction configuration structure + */ +struct osi_core_frp_data { + /** Entry Match Data */ + nveu32_t match_data; + /** Entry Match Enable mask */ + nveu32_t match_en; + /** Entry Accept frame flag */ + nveu8_t accept_frame; + /** Entry Reject Frame flag */ + nveu8_t reject_frame; + /** Entry Inverse match flag */ + nveu8_t inverse_match; + /** Entry Next Instruction Control match flag */ + nveu8_t next_ins_ctrl; + /** Entry Frame offset in the packet data */ + nveu8_t frame_offset; + /** Entry OK Index - Next Instruction */ + nveu8_t ok_index; + /** Entry DMA Channel selection (1-bit for each channel) */ + nveu32_t dma_chsel; +}; + +/** + * @brief FRP Instruction table entry configuration structure + */ +struct osi_core_frp_entry { + /** FRP ID */ + nve32_t frp_id; + /** FRP Entry data structure */ + struct osi_core_frp_data data; +}; + /** * @brief Core time stamp data strcuture */ @@ -1250,21 +1151,21 @@ struct osi_ioctl { struct osi_l3_l4_filter l3l4_filter; /* HW feature structure */ struct osi_hw_features hw_feat; -#ifndef OSI_STRIPPED_LIB - /* AVB structure */ + /** AVB structure */ struct osi_core_avb_algorithm avb; - /* VLAN filter structure */ +#ifndef OSI_STRIPPED_LIB + /** VLAN filter structure */ struct osi_vlan_filter vlan_filter; -#endif /* !OSI_STRIPPED_LIB */ - /* PTP offload config structure*/ + /** PTP offload config structure*/ struct osi_pto_config pto_config; - /* RXQ route structure */ + /** RXQ route structure */ struct osi_rxq_route rxq_route; - /* FRP structure */ +#endif /* !OSI_STRIPPED_LIB */ + /** FRP structure */ struct osi_core_frp_cmd frp_cmd; - /* EST structure */ + /** EST structure */ struct osi_est_config est; - /* FRP structure */ + /** FRP structure */ struct osi_fpe_config fpe; /** PTP configuration settings */ struct osi_ptp_config ptp_config; @@ -1281,33 +1182,23 @@ struct core_padctrl { /** Memory mapped base address of eqos padctrl registers */ void *padctrl_base; /** EQOS_RD0_0 register offset */ - unsigned int offset_rd0; + nveu32_t offset_rd0; /** EQOS_RD1_0 register offset */ - unsigned int offset_rd1; + nveu32_t offset_rd1; /** EQOS_RD2_0 register offset */ - unsigned int offset_rd2; + nveu32_t offset_rd2; /** EQOS_RD3_0 register offset */ - unsigned int offset_rd3; + nveu32_t offset_rd3; /** RX_CTL_0 register offset */ - unsigned int offset_rx_ctl; + nveu32_t offset_rx_ctl; /** is pad calibration in progress */ - unsigned int is_pad_cal_in_progress; + nveu32_t is_pad_cal_in_progress; /** This flag set/reset using priv ioctl and DT entry */ - unsigned int pad_calibration_enable; -}; - -/** - * @brief OSI CORE packet error stats - */ -struct osi_core_pkt_err_stats { - /** IP Header Error */ - nveu64_t mgbe_ip_header_err; - /** Jabber time out Error */ - nveu64_t mgbe_jabber_timeout_err; - /** Payload Checksum Error */ - nveu64_t mgbe_payload_cs_err; - /** Under Flow Error */ - nveu64_t mgbe_tx_underflow_err; + nveu32_t pad_calibration_enable; + /** Reg ETHER_QOS_AUTO_CAL_CONFIG_0[AUTO_CAL_PD_OFFSET] value */ + nveu32_t pad_auto_cal_pd_offset; + /** Reg ETHER_QOS_AUTO_CAL_CONFIG_0[AUTO_CAL_PU_OFFSET] value */ + nveu32_t pad_auto_cal_pu_offset; }; #ifdef HSI_SUPPORT @@ -1322,11 +1213,11 @@ struct osi_hsi_data { /** error count threshold to report error */ nveu32_t err_count_threshold; /** HSI reporter ID */ - nveu32_t reporter_id; + nveu16_t reporter_id; /** HSI error codes */ - nveu32_t err_code[HSI_MAX_MAC_ERROR_CODE]; + nveu32_t err_code[OSI_HSI_MAX_MAC_ERROR_CODE]; /** HSI MAC report count threshold based error */ - nveu32_t report_count_err[HSI_MAX_MAC_ERROR_CODE]; + nveu32_t report_count_err[OSI_HSI_MAX_MAC_ERROR_CODE]; /** Indicates if error reporting to FSI is pending */ nveu32_t report_err; /** HSI MACSEC error codes */ @@ -1353,6 +1244,10 @@ struct osi_hsi_data { nveu64_t tx_frame_err_count; /** tx frame error count threshold hit */ nveu64_t tx_frame_err_threshold; + /** Rx UDP error injection count */ + nveu64_t inject_udp_err_count; + /** Rx CRC error injection count */ + nveu64_t inject_crc_err_count; }; #endif @@ -1362,8 +1257,6 @@ struct osi_hsi_data { struct osi_core_priv_data { /** Memory mapped base address of MAC IP */ void *base; - /** Memory mapped base address of HV window */ - void *hv_base; /** Memory mapped base address of DMA window of MAC IP */ void *dma_base; /** Memory mapped base address of XPCS IP */ @@ -1389,7 +1282,7 @@ struct osi_core_priv_data { /** FPE HW configuration initited to enable/disable * 1- FPE HW configuration initiated to enable * 0- FPE HW configuration initiated to disable */ - unsigned int is_fpe_enabled; + nveu32_t is_fpe_enabled; #endif /* MACSEC_SUPPORT */ /** Pointer to OSD private data structure */ void *osd; @@ -1403,24 +1296,16 @@ struct osi_core_priv_data { nveu32_t rxq_ctrl[OSI_MGBE_MAX_NUM_CHANS]; /** Rx MTl Queue mapping based on User Priority field */ nveu32_t rxq_prio[OSI_MGBE_MAX_NUM_CHANS]; - /** TQ:TC mapping */ - unsigned int tc[OSI_MGBE_MAX_NUM_CHANS]; - /** Residual queue valid with FPE support */ - unsigned int residual_queue; /** MAC HW type EQOS based on DT compatible */ nveu32_t mac; /** MAC version */ nveu32_t mac_ver; /** HW supported feature list */ struct osi_hw_features *hw_feat; - /** MDC clock rate */ - nveu32_t mdc_cr; /** MTU size */ nveu32_t mtu; /** Ethernet MAC address */ nveu8_t mac_addr[OSI_ETH_ALEN]; - /** DT entry to enable(1) or disable(0) pause frame support */ - nveu32_t pause_frames; /** Current flow control settings */ nveu32_t flow_ctrl; /** PTP configuration settings */ @@ -1429,49 +1314,56 @@ struct osi_core_priv_data { nveu32_t default_addend; /** mmc counter structure */ struct osi_mmc_counters mmc; - /** xtra sw error counters */ - struct osi_xtra_stat_counters xstats; /** DMA channel selection enable (1) */ nveu32_t dcs_en; - /** Functional safety config to do periodic read-verify of - * certain safety critical registers */ - void *safety_config; - /** Backup config to save/restore registers during suspend/resume */ - struct core_backup backup_config; + /** TQ:TC mapping */ + nveu32_t tc[OSI_MGBE_MAX_NUM_CHANS]; +#ifndef OSI_STRIPPED_LIB + /** Memory mapped base address of HV window */ + void *hv_base; + /** csr clock is to program LPI 1 us tick timer register. + * Value stored in MHz + */ + nveu32_t csr_clk_speed; + nveu64_t vf_bitmap; + /** Array to maintain VLAN filters */ + nveu16_t vid[VLAN_NUM_VID]; + /** Count of number of VLAN filters in vid array */ + nveu16_t vlan_filter_cnt; + /** RSS core structure */ + struct osi_core_rss rss; + /** DT entry to enable(1) or disable(0) pause frame support */ + nveu32_t pause_frames; +#endif + /** Residual queue valid with FPE support */ + nveu32_t residual_queue; + /** FRP Instruction Table */ + struct osi_core_frp_entry frp_table[OSI_FRP_MAX_ENTRY]; + /** Number of valid Entries in the FRP Instruction Table */ + nveu32_t frp_cnt; + /* Switch to Software Owned List Complete. + * 1 - Successful and User configured GCL in placed + */ + nveu32_t est_ready; + /* FPE enabled, verify and respose done with peer device + * 1- Successful and can be used between P2P device + */ + nveu32_t fpe_ready; + /** MAC stats counters */ + struct osi_stats stats; + /** eqos pad control structure */ + struct core_padctrl padctrl; + /** MDC clock rate */ + nveu32_t mdc_cr; /** VLAN tag stripping enable(1) or disable(0) */ nveu32_t strip_vlan_tag; /** L3L4 filter bit bask, set index corresponding bit for * filter if filter enabled */ nveu32_t l3l4_filter_bitmask; - /** csr clock is to program LPI 1 us tick timer register. - * Value stored in MHz - */ - nveu32_t csr_clk_speed; - /** Tegra Pre-si platform info */ - nveu32_t pre_si; /** Flag which decides virtualization is enabled(1) or disabled(0) */ nveu32_t use_virtualization; - unsigned long vf_bitmap; - /** Array to maintaion VLAN filters */ - unsigned short vid[VLAN_NUM_VID]; - /** Count of number of VLAN filters in vid array */ - unsigned short vlan_filter_cnt; - /** FRP Instruction Table */ - struct osi_core_frp_entry frp_table[OSI_FRP_MAX_ENTRY]; - /** Number of valid Entries in the FRP Instruction Table */ - unsigned int frp_cnt; - /** RSS core structure */ - struct osi_core_rss rss; /** HW supported feature list */ struct osi_hw_features *hw_feature; - /** Switch to Software Owned List Complete. - * 1 - Successful and User configured GCL in placed */ - unsigned int est_ready; - /** FPE enabled, verify and respose done with peer device - * 1- Sucessful and can be used between P2P device */ - unsigned int fpe_ready; - /** TSN stats counters */ - struct osi_tsn_stats tsn_stats; /** MC packets Multiple DMA channel selection flags */ nveu32_t mc_dmasel; /** UPHY GBE mode (1 for 10G, 0 for 5G) */ @@ -1482,12 +1374,8 @@ struct osi_core_priv_data { nveu32_t num_vm_irqs; /** PHY interface mode (0/1 for XFI 10/5G, 2/3 for USXGMII 10/5) */ nveu32_t phy_iface_mode; - /** eqos pad control structure */ - struct core_padctrl padctrl; /** MGBE MAC instance ID's */ nveu32_t instance_id; - /** Packet error stats */ - struct osi_core_pkt_err_stats pkt_err_stats; /** Ethernet controller MAC to MAC Time sync role * 1 - Primary interface, 2 - secondary interface, 0 - inactive interface */ @@ -1500,41 +1388,6 @@ struct osi_core_priv_data { #endif }; -/** - * @brief osi_poll_for_mac_reset_complete - Poll Software reset bit in MAC HW - * - * @note - * Algorithm: - * - Invokes EQOS routine to check for SWR (software reset) - * bit in DMA Basic mode register to make sure IP reset was successful. - * - * @param[in] osi_core: OSI Core private data structure. - * - * @pre MAC needs to be out of reset and proper clock configured. - * - * @note - * Traceability Details: - * - SWUD_ID: ETHERNET_NVETHERNETRM_004 - * - * @usage - * - Allowed context for the API call - * - Interrupt handler: No - * - Signal handler: No - * - Thread safe: No - * - Async/Sync: Sync - * - Required Privileges: None - * - API Group: - * - Initialization: Yes - * - Run time: No - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ - -nve32_t osi_poll_for_mac_reset_complete( - struct osi_core_priv_data *const osi_core); - /** * @brief osi_hw_core_init - EQOS MAC, MTL and common DMA initialization. * @@ -1543,8 +1396,6 @@ nve32_t osi_poll_for_mac_reset_complete( * - Invokes EQOS MAC, MTL and common DMA register init code. * * @param[in, out] osi_core: OSI core private data structure. - * @param[in] tx_fifo_size: OSI core private data structure. - * @param[in] rx_fifo_size: OSI core private data structure. * * @pre * - MAC should be out of reset. See osi_poll_for_mac_reset_complete() @@ -1572,8 +1423,7 @@ nve32_t osi_poll_for_mac_reset_complete( * @retval 0 on success * @retval -1 on failure. */ -nve32_t osi_hw_core_init(struct osi_core_priv_data *const osi_core, - nveu32_t tx_fifo_size, nveu32_t rx_fifo_size); +nve32_t osi_hw_core_init(struct osi_core_priv_data *const osi_core); /** * @brief osi_hw_core_deinit - EQOS MAC deinitialization. @@ -1607,314 +1457,6 @@ nve32_t osi_hw_core_init(struct osi_core_priv_data *const osi_core, */ nve32_t osi_hw_core_deinit(struct osi_core_priv_data *const osi_core); -/** - * @brief osi_start_mac - Start MAC Tx/Rx engine - * - * @note - * Algorithm: - * - Enable MAC Tx and Rx engine. - * - * @param[in] osi_core: OSI core private data structure. - * - * @pre MAC init should be complete. See osi_hw_core_init() and - * osi_hw_dma_init() - * - * @note - * Traceability Details: - * - SWUD_ID: ETHERNET_NVETHERNETRM_008 - * - * @usage - * - Allowed context for the API call - * - Interrupt handler: No - * - Signal handler: No - * - Thread safe: No - * - Async/Sync: Sync - * - Required Privileges: None - * - API Group: - * - Initialization: Yes - * - Run time: No - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -nve32_t osi_start_mac(struct osi_core_priv_data *const osi_core); - -/** - * @brief osi_stop_mac - Stop MAC Tx/Rx engine - * - * @note - * Algorithm: - * - Stop MAC Tx and Rx engine - * - * @param[in] osi_core: OSI core private data structure. - * - * @pre MAC DMA deinit should be complete. See osi_hw_dma_deinit() - * - * @note - * Traceability Details: - * - SWUD_ID: ETHERNET_NVETHERNETRM_009 - * - * @usage - * - Allowed context for the API call - * - Interrupt handler: No - * - Signal handler: No - * - Thread safe: No - * - Async/Sync: Sync - * - Required Privileges: None - * - API Group: - * - Initialization: No - * - Run time: No - * - De-initialization: Yes - * - * @retval 0 on success - * @retval -1 on failure. - */ -nve32_t osi_stop_mac(struct osi_core_priv_data *const osi_core); - -/** - * @brief osi_common_isr - Common ISR. - * - * @note - * Algorithm: - * - Takes care of handling the common interrupts accordingly as per - * the MAC IP - * - * @param[in] osi_core: OSI core private data structure. - * - * @pre MAC should be init and started. see osi_start_mac() - * - * @note - * Traceability Details: - * - SWUD_ID: ETHERNET_NVETHERNETRM_010 - * - * @usage - * - Allowed context for the API call - * - Interrupt handler: Yes - * - Signal handler: Yes - * - Thread safe: No - * - Async/Sync: Sync - * - Required Privileges: None - * - API Group: - * - Initialization: No - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -nve32_t osi_common_isr(struct osi_core_priv_data *const osi_core); - -/** - * @brief osi_set_mode - Set FD/HD mode. - * - * @note - * Algorithm: - * - Takes care of setting HD or FD mode accordingly as per the MAC IP - * - * @param[in] osi_core: OSI core private data structure. - * @param[in] mode: Operating mode. (OSI_FULL_DUPLEX/OSI_HALF_DUPLEX) - * - * @pre MAC should be init and started. see osi_start_mac() - * - * @note - * Traceability Details: - * - SWUD_ID: ETHERNET_NVETHERNETRM_011 - * - * @usage - * - Allowed context for the API call - * - Interrupt handler: No - * - Signal handler: No - * - Thread safe: No - * - Async/Sync: Sync - * - Required Privileges: None - * - API Group: - * - Initialization: Yes - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -nve32_t osi_set_mode(struct osi_core_priv_data *const osi_core, - const nve32_t mode); - -/** - * @brief osi_set_speed - Set operating speed. - * - * @note - * Algorithm: - * - Takes care of setting the operating speed accordingly as per - * the MAC IP. - * - * @param[in] osi_core: OSI core private data structure. - * @param[in] speed: Operating speed. - * - * @pre MAC should be init and started. see osi_start_mac() - * - * @note - * Traceability Details: - * - SWUD_ID: ETHERNET_NVETHERNETRM_012 - * - * @usage - * - Allowed context for the API call - * - Interrupt handler: No - * - Signal handler: No - * - Thread safe: No - * - Async/Sync: Sync - * - Required Privileges: None - * - API Group: - * - Initialization: Yes - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -nve32_t osi_set_speed(struct osi_core_priv_data *const osi_core, - const nve32_t speed); - -/** - * @brief osi_pad_calibrate - PAD calibration - * - * @note - * Algorithm: - * - Takes care of doing the pad calibration - * accordingly as per the MAC IP. - * - * @param[in] osi_core: OSI core private data structure. - * - * @pre - * - MAC should out of reset and clocks enabled. - * - * @note - * Traceability Details: - * - SWUD_ID: ETHERNET_NVETHERNETRM_013 - * - * @usage - * - Allowed context for the API call - * - Interrupt handler: No - * - Signal handler: No - * - Thread safe: No - * - Async/Sync: Sync - * - Required Privileges: None - * - API Group: - * - Initialization: Yes - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 value on failure or pad calibration is disabled - */ -nve32_t osi_pad_calibrate(struct osi_core_priv_data *const osi_core); - -/** - * @brief osi_config_fw_err_pkts - Configure forwarding of error packets - * - * @note - * Algorithm: - * - Configure MAC to enable/disable forwarding of error packets. - * - * @param[in] osi_core: OSI core private data structure. - * @param[in] qinx: Q index. Max OSI_EQOS_MAX_NUM_QUEUES. - * @param[in] fw_err: Enable or disable forwarding of error packets. - * 0: Disable 1: Enable - * - * @pre MAC should be init and started. see osi_start_mac() - * - * @note - * Traceability Details: - * - SWUD_ID: ETHERNET_NVETHERNETRM_020 - * - * @usage - * - Allowed context for the API call - * - Interrupt handler: No - * - Signal handler: No - * - Thread safe: No - * - Async/Sync: Sync - * - Required Privileges: None - * - API Group: - * - Initialization: Yes - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -nve32_t osi_config_fw_err_pkts(struct osi_core_priv_data *const osi_core, - const nveu32_t qinx, const nveu32_t fw_err); - -/** - * @brief osi_config_rxcsum_offload - Configure RX checksum offload in MAC. - * - * @note - * Algorithm: - * - Invokes EQOS config RX checksum offload routine. - * - * @param[in] osi_core: OSI core private data structure. - * @param[in] enable: Enable/disable flag. 0: Disable 1: Enable - * - * @pre MAC should be init and started. see osi_start_mac() - * - * @note - * Traceability Details: - * - SWUD_ID: ETHERNET_NVETHERNETRM_017 - * - * @usage - * - Allowed context for the API call - * - Interrupt handler: No - * - Signal handler: No - * - Thread safe: No - * - Async/Sync: Sync - * - Required Privileges: None - * - API Group: - * - Initialization: Yes - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -nve32_t osi_config_rxcsum_offload(struct osi_core_priv_data *const osi_core, - const nveu32_t enable); - -/** - * @brief osi_l2_filter - configure L2 mac filter. - * - * @note - * Algorithm: - * - This sequence is used to configure MAC in different packet - * processing modes like promiscuous, multicast, unicast, - * hash unicast/multicast and perfect/inverse matching for L2 DA - * - * @param[in] osi_core: OSI core private data structure. - * @param[in] filter: OSI filter structure. - * - * @pre - * - MAC should be initialized and started. see osi_start_mac() - * - * @note - * Traceability Details: - * - SWUD_ID: ETHERNET_NVETHERNETRM_018 - * - * @usage - * - Allowed context for the API call - * - Interrupt handler: No - * - Signal handler: No - * - Thread safe: No - * - Async/Sync: Sync - * - Required Privileges: None - * - API Group: - * - Initialization: Yes - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -nve32_t osi_l2_filter(struct osi_core_priv_data *const osi_core, - const struct osi_filter *filter); - /** * @brief osi_write_phy_reg - Write to a PHY register through MAC over MDIO bus. * @@ -1959,42 +1501,6 @@ nve32_t osi_write_phy_reg(struct osi_core_priv_data *const osi_core, const nveu32_t phyaddr, const nveu32_t phyreg, const nveu16_t phydata); -/** - * @brief osi_read_mmc - invoke function to read actual registers and update - * structure variable mmc - * - * @note - * Algorithm: - * - Read the registers, mask reserve bits if required, update - * structure. - * - * @param[in] osi_core: OSI core private data structure. - * - * @pre - * - MAC should be init and started. see osi_start_mac() - * - osi_core->osd should be populated - * - * @note - * Traceability Details: - * - SWUD_ID: ETHERNET_NVETHERNETRM_014 - * - * @usage - * - Allowed context for the API call - * - Interrupt handler: No - * - Signal handler: No - * - Thread safe: No - * - Async/Sync: Sync - * - Required Privileges: None - * - API Group: - * - Initialization: No - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -nve32_t osi_read_mmc(struct osi_core_priv_data *const osi_core); - /** * @brief osi_read_phy_reg - Read from a PHY register through MAC over MDIO bus. * @@ -2065,283 +1571,6 @@ nve32_t osi_read_phy_reg(struct osi_core_priv_data *const osi_core, */ nve32_t osi_init_core_ops(struct osi_core_priv_data *const osi_core); -/** - * @brief osi_set_systime_to_mac - Handles setting of system time. - * - * @note - * Algorithm: - * - Set current system time to MAC. - * - * @param[in] osi_core: OSI core private data structure. - * @param[in] sec: Seconds to be configured. - * @param[in] nsec: Nano seconds to be configured. - * - * @pre MAC should be init and started. see osi_start_mac() - * - * @note - * Traceability Details: - * - SWUD_ID: ETHERNET_NVETHERNETRM_005 - * - * @usage - * - Allowed context for the API call - * - Interrupt handler: No - * - Signal handler: No - * - Thread safe: No - * - Async/Sync: Sync - * - Required Privileges: None - * - API Group: - * - Initialization: No - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -nve32_t osi_set_systime_to_mac(struct osi_core_priv_data *const osi_core, - const nveu32_t sec, const nveu32_t nsec); - -/** - * @brief osi_adjust_freq - Adjust frequency - * - * @note - * Algorithm: - * - Adjust a drift of +/- comp nanoseconds per second. - * "Compensation" is the difference in frequency between - * the master and slave clocks in Parts Per Billion. - * - * @param[in] osi_core: OSI core private data structure. - * @param[in] ppb: Parts per Billion - * - * @pre MAC should be init and started. see osi_start_mac() - * - * @note - * Traceability Details: - * - SWUD_ID: ETHERNET_NVETHERNETRM_023 - * - * @usage - * - Allowed context for the API call - * - Interrupt handler: No - * - Signal handler: No - * - Thread safe: No - * - Async/Sync: Sync - * - Required Privileges: None - * - API Group: - * - Initialization: No - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -nve32_t osi_adjust_freq(struct osi_core_priv_data *const osi_core, nve32_t ppb); - -/** - * @brief osi_adjust_time - Adjust MAC time with system time - * - * @note - * Algorithm: - * - Adjust/update the MAC time (delta time from MAC to system time - * passed in nanoseconds, can be + or -). - * - * @param[in] osi_core: OSI core private data structure. - * @param[in] nsec_delta: Delta time in nano seconds - * - * @pre - * - MAC should be init and started. see osi_start_mac() - * - osi_core->ptp_config.one_nsec_accuracy need to be set to 1 - * - * @note - * Traceability Details: - * - SWUD_ID: ETHERNET_NVETHERNETRM_022 - * - * @usage - * - Allowed context for the API call - * - Interrupt handler: No - * - Signal handler: No - * - Thread safe: No - * - Async/Sync: Sync - * - Required Privileges: None - * - API Group: - * - Initialization: No - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -nve32_t osi_adjust_time(struct osi_core_priv_data *const osi_core, - nvel64_t nsec_delta); - -/** - * @brief osi_ptp_configuration - Configure PTP - * - * @note - * Algorithm: - * - Configure the PTP registers that are required for PTP. - * - * @param[in] osi_core: OSI core private data structure. - * @param[in] enable: Enable or disable Time Stamping. 0: Disable 1: Enable - * - * @pre - * - MAC should be init and started. see osi_start_mac() - * - osi->ptp_config.ptp_filter need to be filled accordingly to the - * filter that need to be set for PTP packets. Please check osi_ptp_config - * structure declaration on the bit fields that need to be filled. - * - osi->ptp_config.ptp_clock need to be filled with the ptp system clk. - * Currently it is set to 62500000Hz. - * - osi->ptp_config.ptp_ref_clk_rate need to be filled with the ptp - * reference clock that platform supports. - * - osi->ptp_config.sec need to be filled with current time of seconds - * - osi->ptp_config.nsec need to be filled with current time of nseconds - * - osi->base need to be filled with the ioremapped base address - * - * @note - * Traceability Details: - * - SWUD_ID: ETHERNET_NVETHERNETRM_021 - * - * @usage - * - Allowed context for the API call - * - Interrupt handler: No - * - Signal handler: No - * - Thread safe: No - * - Async/Sync: Sync - * - Required Privileges: None - * - API Group: - * - Initialization: Yes - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -nve32_t osi_ptp_configuration(struct osi_core_priv_data *const osi_core, - const nveu32_t enable); - -/* MAC version specific implementation function prototypes added here - * for misra compliance to have - * 1. Visible prototype for all functions. - * 2. Only one prototype for all function. - */ -void *eqos_get_core_safety_config(void); - -/** - * @brief osi_l3l4_filter - invoke OSI call to add L3/L4 - * filters. - * - * @note - * Algorithm: - * - This routine is to enable/disable L3/l4 filter. - * Check for DCS enable as well as validate channel - * number if dcs_enable is set. After validation, configure L3(IPv4/IPv6) - * filters register for given address. Based on input arguments update - * IPv4/IPv6 source/destination address for L3 layer filtering or source and - * destination Port Number for L4(TCP/UDP) layer - * filtering. - * - * @param[in, out] osi_core: OSI core private data structure. - * @param[in] l_filter: L3L4 filter data structure. - * @param[in] type: L3 filter (ipv4(0) or ipv6(1)) - * or L4 filter (tcp(0) or udp(1)) - * @param[in] dma_routing_enable: filter based dma routing enable(1) - * @param[in] dma_chan: dma channel for routing based on filter. - * Max OSI_EQOS_MAX_NUM_CHANS. - * @param[in] is_l4_filter: API call for L3 filter(0) or L4 filter(1) - * - * @pre - * - MAC should be init and started. see osi_start_mac() - * - Concurrent invocations to configure filters is not supported. - * OSD driver shall serialize calls. - * - * @note - * Traceability Details: - * - SWUD_ID: ETHERNET_NVETHERNETRM_019 - * - * @usage - * - Allowed context for the API call - * - Interrupt handler: No - * - Signal handler: No - * - Thread safe: No - * - Async/Sync: Sync - * - Required Privileges: None - * - API Group: - * - Initialization: Yes - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -nve32_t osi_l3l4_filter(struct osi_core_priv_data *const osi_core, - const struct osi_l3_l4_filter l_filter, - const nveu32_t type, - const nveu32_t dma_routing_enable, - const nveu32_t dma_chan, - const nveu32_t is_l4_filter); - -/** - * @brief osi_get_mac_version - Reading MAC version - * - * @note - * Algorithm: - * - Reads MAC version and check whether its valid or not. - * - * @param[in] osi_core: OSI core private data structure. - * @param[out] mac_ver: holds mac version. - * - * @pre MAC has to be out of reset. - * - * @note - * Traceability Details: - * - SWUD_ID: ETHERNET_NVETHERNETRM_015 - * - * @usage - * - Allowed context for the API call - * - Interrupt handler: No - * - Signal handler: No - * - Thread safe: No - * - Async/Sync: Sync - * - Required Privileges: None - * - API Group: - * - Initialization: No - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -nve32_t osi_get_mac_version(struct osi_core_priv_data *const osi_core, - nveu32_t *mac_ver); - -/** - * @brief osi_get_hw_features - Reading MAC HW features - * - * @param[in] osi_core: OSI core private data structure. - * @param[out] hw_feat: holds the supported features of the hardware. - * - * @pre MAC has to be out of reset. - * - * @note - * Traceability Details: - * - SWUD_ID: ETHERNET_NVETHERNETRM_016 - * - * @usage - * - Allowed context for the API call - * - Interrupt handler: No - * - Signal handler: No - * - Thread safe: No - * - Async/Sync: Sync - * - Required Privileges: None - * - API Group: - * - Initialization: No - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -nve32_t osi_get_hw_features(struct osi_core_priv_data *const osi_core, - struct osi_hw_features *hw_feat); - /** * @brief osi_handle_ioctl - API to handle runtime command * @@ -2351,14 +1580,8 @@ nve32_t osi_get_hw_features(struct osi_core_priv_data *const osi_core, * - OSI_CMD_MDC_CONFIG * Derive MDC clock based on provided AXI_CBB clk * arg1_u32 - CSR (AXI CBB) clock rate. - * - OSI_CMD_RESTORE_REGISTER - * Restore backup of MAC MMIO address space * - OSI_CMD_POLL_FOR_MAC_RST * Poll Software reset bit in MAC HW - * - OSI_CMD_START_MAC - * Start MAC Tx/Rx engine - * - OSI_CMD_STOP_MAC - * Stop MAC Tx/Rx engine * - OSI_CMD_COMMON_ISR * Common ISR handler * - OSI_CMD_PAD_CALIBRATION @@ -2369,13 +1592,9 @@ nve32_t osi_get_hw_features(struct osi_core_priv_data *const osi_core, * - OSI_CMD_GET_MAC_VER * Reading MAC version * arg1_u32 - holds mac version - * - OSI_CMD_VALIDATE_CORE_REG - * Read-validate HW registers for func safety * - OSI_CMD_RESET_MMC * invoke function to reset MMC counter and data * structure - * - OSI_CMD_SAVE_REGISTER - * Take backup of MAC MMIO address space * - OSI_CMD_MAC_LB * Configure MAC loopback * - OSI_CMD_FLOW_CTRL @@ -2539,333 +1758,4 @@ nve32_t osi_handle_ioctl(struct osi_core_priv_data *osi_core, * @retval NULL on failure. */ struct osi_core_priv_data *osi_get_core(void); - -/** - * @brief osi_hal_handle_ioctl - HW function API to handle runtime command - * - * @note - * Algorithm: - * - Handle runtime commands to OSI - * - OSI_CMD_MDC_CONFIG - * Derive MDC clock based on provided AXI_CBB clk - * arg1_u32 - CSR (AXI CBB) clock rate. - * - OSI_CMD_RESTORE_REGISTER - * Restore backup of MAC MMIO address space - * - OSI_CMD_POLL_FOR_MAC_RST - * Poll Software reset bit in MAC HW - * - OSI_CMD_START_MAC - * Start MAC Tx/Rx engine - * - OSI_CMD_STOP_MAC - * Stop MAC Tx/Rx engine - * - OSI_CMD_COMMON_ISR - * Common ISR handler - * - OSI_CMD_PAD_CALIBRATION - * PAD calibration - * - OSI_CMD_READ_MMC - * invoke function to read actual registers and update - * structure variable mmc - * - OSI_CMD_GET_MAC_VER - * Reading MAC version - * arg1_u32 - holds mac version - * - OSI_CMD_VALIDATE_CORE_REG - * Read-validate HW registers for func safety - * - OSI_CMD_RESET_MMC - * invoke function to reset MMC counter and data - * structure - * - OSI_CMD_SAVE_REGISTER - * Take backup of MAC MMIO address space - * - OSI_CMD_MAC_LB - * Configure MAC loopback - * - OSI_CMD_FLOW_CTRL - * Configure flow control settings - * arg1_u32 - Enable or disable flow control settings - * - OSI_CMD_SET_MODE - * Set Full/Half Duplex mode. - * arg1_u32 - mode - * - OSI_CMD_SET_SPEED - * Set Operating speed - * arg1_u32 - Operating speed - * - OSI_CMD_L2_FILTER - * configure L2 mac filter - * l2_filter_struct - OSI filter structure - * - OSI_CMD_RXCSUM_OFFLOAD - * Configure RX checksum offload in MAC - * arg1_u32 - enable(1)/disable(0) - * - OSI_CMD_ADJ_FREQ - * Adjust frequency - * arg6_u32 - Parts per Billion - * - OSI_CMD_ADJ_TIME - * Adjust MAC time with system time - * arg1_u32 - Delta time in nano seconds - * - OSI_CMD_CONFIG_PTP - * Configure PTP - * arg1_u32 - Enable(1) or disable(0) Time Stamping - * - OSI_CMD_GET_AVB - * Get CBS algo and parameters - * avb_struct - osi core avb data structure - * - OSI_CMD_SET_AVB - * Set CBS algo and parameters - * avb_struct - osi core avb data structure - * - OSI_CMD_CONFIG_RX_CRC_CHECK - * Configure CRC Checking for Received Packets - * arg1_u32 - Enable or disable checking of CRC field in - * received pkts - * - OSI_CMD_UPDATE_VLAN_ID - * invoke osi call to update VLAN ID - * arg1_u32 - VLAN ID - * - OSI_CMD_CONFIG_TXSTATUS - * Configure Tx packet status reporting - * Enable(1) or disable(0) tx packet status reporting - * - OSI_CMD_GET_HW_FEAT - * Reading MAC HW features - * hw_feat_struct - holds the supported features of the hardware - * - OSI_CMD_CONFIG_FW_ERR - * Configure forwarding of error packets - * arg1_u32 - queue index, Max OSI_EQOS_MAX_NUM_QUEUES - * arg2_u32 - FWD error enable(1)/disable(0) - * - OSI_CMD_ARP_OFFLOAD - * Configure ARP offload in MAC - * arg1_u32 - Enable/disable flag - * arg7_u8_p - Char array representation of IP address - * - OSI_CMD_VLAN_FILTER - * OSI call for configuring VLAN filter - * vlan_filter - vlan filter structure - * - OSI_CMD_CONFIG_EEE - * Configure EEE LPI in MAC - * arg1_u32 - Enable (1)/disable (0) tx lpi - * arg2_u32 - Tx LPI entry timer in usecs upto - * OSI_MAX_TX_LPI_TIMER (in steps of 8usec) - * - OSI_CMD_L3L4_FILTER - * invoke OSI call to add L3/L4 - * l3l4_filter - l3_l4 filter structure - * arg1_u32 - L3 filter (ipv4(0) or ipv6(1)) - * or L4 filter (tcp(0) or udp(1) - * arg2_u32 - filter based dma routing enable(1) - * arg3_u32 - dma channel for routing based on filter. - * Max OSI_EQOS_MAX_NUM_CHANS. - * arg4_u32 - API call for L3 filter(0) or L4 filter(1) - * - OSI_CMD_SET_SYSTOHW_TIME - * set system to MAC hardware - * arg1_u32 - sec - * arg1_u32 - nsec - * - OSI_CMD_CONFIG_PTP_OFFLOAD - * enable/disable PTP offload feature - * pto_config - ptp offload structure - * - OSI_CMD_PTP_RXQ_ROUTE - * rxq routing to secific queue - * rxq_route - rxq routing information in structure - * - OSI_CMD_CONFIG_FRP - * Issue FRP command to HW - * frp_cmd - FRP command parameter - * - OSI_CMD_CONFIG_RSS - * Configure RSS - * - OSI_CMD_CONFIG_EST - * Configure EST registers and GCL to hw - * est - EST configuration structure - * - OSI_CMD_CONFIG_FPE - * Configuration FPE register and preemptable queue - * fpe - FPE configuration structure - * - * - OSI_CMD_GET_TX_TS - * Command to get TX timestamp for PTP packet - * ts - OSI core timestamp structure - * - * - OSI_CMD_FREE_TS - * Command to free old timestamp for PTP packet - * chan - DMA channel number +1. 0 will be used for onestep - * - * - OSI_CMD_CAP_TSC_PTP - * Capture TSC and PTP time stamp - * ptp_tsc_data - output structure with time - * - * - OSI_CMD_CONF_M2M_TS - * Enable/Disable MAC to MAC time sync for Secondary interface - * enable_disable - 1 - enable, 0- disable - * - * @param[in] osi_core: OSI core private data structure. - * @param[in] data: void pointer pointing to osi_ioctl - * - * @pre MAC should be init and started. see osi_start_mac() - * - * @note - * Traceability Details: - * - * @usage - * - Allowed context for the API call - * - Interrupt handler: No - * - Signal handler: No - * - Thread safe: No - * - Async/Sync: Sync - * - Required Privileges: None - * - API Group: - * - Initialization: No - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -nve32_t osi_hal_handle_ioctl(struct osi_core_priv_data *osi_core, - struct osi_ioctl *data); -/** - * @brief osi_hal_hw_core_init - HW API for EQOS MAC, MTL and common DMA - * initialization. - * - * @note - * Algorithm: - * - Invokes EQOS MAC, MTL and common DMA register init code. - * - * @param[in, out] osi_core: OSI core private data structure. - * @param[in] tx_fifo_size: OSI core private data structure. - * @param[in] rx_fifo_size: OSI core private data structure. - * - * @pre - * - MAC should be out of reset. See osi_poll_for_mac_reset_complete() - * for details. - * - osi_core->base needs to be filled based on ioremap. - * - osi_core->num_mtl_queues needs to be filled. - * - osi_core->mtl_queues[qinx] need to be filled. - * - * @note - * Traceability Details: - * - SWUD_ID: ETHERNET_NVETHERNETRM_006 - * - * @usage - * - Allowed context for the API call - * - Interrupt handler: No - * - Signal handler: No - * - Thread safe: No - * - Async/Sync: Sync - * - Required Privileges: None - * - API Group: - * - Initialization: Yes - * - Run time: No - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -nve32_t osi_hal_hw_core_init(struct osi_core_priv_data *const osi_core, - nveu32_t tx_fifo_size, nveu32_t rx_fifo_size); - -/** - * @brief osi_hal_hw_core_deinit - HW API for MAC deinitialization. - * - * @note - * Algorithm: - * - Stops MAC transmission and reception. - * - * @param[in] osi_core: OSI core private data structure. - * - * @pre MAC has to be out of reset. - * - * @note - * Traceability Details: - * - SWUD_ID: TODO - * - * @usage - * - Allowed context for the API call - * - Interrupt handler: No - * - Signal handler: No - * - Thread safe: No - * - Async/Sync: Sync - * - Required Privileges: None - * - API Group: - * - Initialization: No - * - Run time: No - * - De-initialization: Yes - * - * @retval 0 on success - * @retval -1 on failure. - */ -nve32_t osi_hal_hw_core_deinit(struct osi_core_priv_data *const osi_core); - -/** - * @brief osi_hal_write_phy_reg - HW API to Write to a PHY register through MAC - * over MDIO bus. - * - * @note - * Algorithm: - * - Before proceeding for reading for PHY register check whether any MII - * operation going on MDIO bus by polling MAC_GMII_BUSY bit. - * - Program data into MAC MDIO data register. - * - Populate required parameters like phy address, phy register etc,, - * in MAC MDIO Address register. write and GMII busy bits needs to be set - * in this operation. - * - Write into MAC MDIO address register poll for GMII busy for MDIO - * operation to complete. - * - * @param[in] osi_core: OSI core private data structure. - * @param[in] phyaddr: PHY address (PHY ID) associated with PHY - * @param[in] phyreg: Register which needs to be write to PHY. - * @param[in] phydata: Data to write to a PHY register. - * - * @pre MAC should be init and started. see osi_start_mac() - * - * @note - * Traceability Details: - * - SWUD_ID: TODO - * - * @usage - * - Allowed context for the API call - * - Interrupt handler: No - * - Signal handler: No - * - Thread safe: No - * - Async/Sync: Sync - * - Required Privileges: None - * - API Group: - * - Initialization: Yes - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -nve32_t osi_hal_write_phy_reg(struct osi_core_priv_data *const osi_core, - const nveu32_t phyaddr, const nveu32_t phyreg, - const nveu16_t phydata); - -/** - * @brief osi_hal_read_phy_reg - HW API to Read from a PHY register through MAC - * over MDIO bus. - * - * @note - * Algorithm: - * - Before proceeding for reading for PHY register check whether any MII - * operation going on MDIO bus by polling MAC_GMII_BUSY bit. - * - Populate required parameters like phy address, phy register etc,, - * in program it in MAC MDIO Address register. Read and GMII busy bits - * needs to be set in this operation. - * - Write into MAC MDIO address register poll for GMII busy for MDIO - * operation to complete. After this data will be available at MAC MDIO - * data register. - * - * @param[in] osi_core: OSI core private data structure. - * @param[in] phyaddr: PHY address (PHY ID) associated with PHY - * @param[in] phyreg: Register which needs to be read from PHY. - * - * @pre MAC should be init and started. see osi_start_mac() - * - * @note - * Traceability Details: - * - SWUD_ID: TODO - * - * @usage - * - Allowed context for the API call - * - Interrupt handler: No - * - Signal handler: No - * - Thread safe: No - * - Async/Sync: Sync - * - Required Privileges: None - * - API Group: - * - Initialization: Yes - * - Run time: Yes - * - De-initialization: No - * - * @retval data from PHY register on success - * @retval -1 on failure - */ -nve32_t osi_hal_read_phy_reg(struct osi_core_priv_data *const osi_core, - const nveu32_t phyaddr, const nveu32_t phyreg); #endif /* INCLUDED_OSI_CORE_H */ - diff --git a/include/osi_dma.h b/include/osi_dma.h index 934784f..9151c39 100644 --- a/include/osi_dma.h +++ b/include/osi_dma.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -45,7 +45,6 @@ * @brief EQOS generic helper MACROS. * @{ */ -#define OSI_NET_IP_ALIGN 0x2U #define NV_VLAN_HLEN 0x4U #define OSI_ETH_HLEN 0xEU @@ -67,6 +66,7 @@ #define OSI_VM_IRQ_RX_CHAN_MASK(x) OSI_BIT(((x) * 2U) + 1U) /** @} */ +#ifdef LOG_OSI /** * OSI error macro definition, * @param[in] priv: OSD private data OR NULL @@ -94,6 +94,10 @@ OSI_LOG_INFO, type, err, loga);\ } #endif /* !OSI_STRIPPED_LIB */ +#else +#define OSI_DMA_ERR(priv, type, err, loga) +#endif /* LOG_OSI */ + /** * @addtogroup EQOS-PKT Packet context fields * @@ -119,7 +123,9 @@ /** Paged buffer */ #define OSI_PKT_CX_PAGED_BUF OSI_BIT(4) /** Rx packet has RSS hash */ +#ifndef OSI_STRIPPED_LIB #define OSI_PKT_CX_RSS OSI_BIT(5) +#endif /* !OSI_STRIPPED_LIB */ /** Valid packet */ #define OSI_PKT_CX_VALID OSI_BIT(10) /** Update Packet Length in Tx Desc3 */ @@ -128,18 +134,18 @@ #define OSI_PKT_CX_IP_CSUM OSI_BIT(12) /** @} */ +#ifndef OSI_STRIPPED_LIB /** * @addtogroup SLOT function context fields * * @brief These flags are used for DMA channel Slot context configuration * @{ */ -#ifndef OSI_STRIPPED_LIB #define OSI_SLOT_INTVL_DEFAULT 125U #define OSI_SLOT_INTVL_MAX 4095U -#endif /* !OSI_STRIPPED_LIB */ #define OSI_SLOT_NUM_MAX 16U /** @} */ +#endif /* !OSI_STRIPPED_LIB */ /** * @addtogroup EQOS-TX Tx done packet context fields @@ -147,7 +153,7 @@ * @brief These flags used to convey transmit done packet context information, * whether transmitted packet used a paged buffer, whether transmitted packet * has an tx error, whether transmitted packet has an TS - * + * * @{ */ /** Flag to indicate if buffer programmed in desc. is DMA map'd from @@ -209,7 +215,7 @@ /** @} */ - +#ifndef OSI_STRIPPED_LIB /** * @addtogroup RSS-HASH type * @@ -221,6 +227,7 @@ #define OSI_RX_PKT_HASH_TYPE_L3 0x2U #define OSI_RX_PKT_HASH_TYPE_L4 0x3U /** @} */ +#endif /* !OSI_STRIPPED_LIB */ /** * @addtogroup OSI-INTR OSI DMA interrupt handling macros. @@ -244,6 +251,7 @@ #ifdef OSI_DEBUG #define OSI_DMA_IOCTL_CMD_REG_DUMP 1U #define OSI_DMA_IOCTL_CMD_STRUCTS_DUMP 2U +#define OSI_DMA_IOCTL_CMD_DEBUG_INTR_CONFIG 3U #endif /* OSI_DEBUG */ /** @} */ @@ -252,6 +260,7 @@ */ #define OSI_TX_MAX_BUFF_SIZE 0x3FFFU +#ifndef OSI_STRIPPED_LIB /** * @brief OSI packet error stats */ @@ -287,14 +296,15 @@ struct osi_pkt_err_stats { /** FRP Parsed count, includes accept * routing-bypass, or result-bypass count. */ - unsigned long frp_parsed; + nveu64_t frp_parsed; /** FRP Dropped count */ - unsigned long frp_dropped; + nveu64_t frp_dropped; /** FRP Parsing Error count */ - unsigned long frp_err; + nveu64_t frp_err; /** FRP Incomplete Parsing */ - unsigned long frp_incomplete; + nveu64_t frp_incomplete; }; +#endif /* !OSI_STRIPPED_LIB */ /** * @brief Receive Descriptor @@ -322,6 +332,8 @@ struct osi_rx_swcx { nveu32_t len; /** Flags to share info about Rx swcx between OSD and OSI */ nveu32_t flags; + /** nvsocket data index */ + nveu64_t data_idx; }; /** @@ -333,16 +345,18 @@ struct osi_rx_pkt_cx { nveu32_t flags; /** Stores the Rx csum */ nveu32_t rxcsum; - /** Stores the VLAN tag ID in received packet */ - nveu32_t vlan_tag; /** Length of received packet */ nveu32_t pkt_len; + /** TS in nsec for the received packet */ + nveul64_t ns; +#ifndef OSI_STRIPPED_LIB + /** Stores the VLAN tag ID in received packet */ + nveu32_t vlan_tag; /** Stores received packet hash */ nveu32_t rx_hash; /** Store type of packet for which hash carries at rx_hash */ nveu32_t rx_hash_type; - /** TS in nsec for the received packet */ - nveul64_t ns; +#endif /* !OSI_STRIPPED_LIB */ }; /** @@ -374,20 +388,22 @@ struct osi_tx_swcx { void *buf_virt_addr; /** Length of buffer */ nveu32_t len; +#ifndef OSI_STRIPPED_LIB /** Flag to keep track of whether buffer pointed by buf_phy_addr * is a paged buffer/linear buffer */ nveu32_t is_paged_buf; +#endif /* !OSI_STRIPPED_LIB */ /** Flag to keep track of SWCX * Bit 0 is_paged_buf - whether buffer pointed by buf_phy_addr * is a paged buffer/linear buffer * Bit 1 PTP hwtime form timestamp registers */ - unsigned int flags; + nveu32_t flags; /** Packet id of packet for which TX timestamp needed */ - unsigned int pktid; + nveu32_t pktid; /** dma channel number for osd use */ nveu32_t chan; - /** reserved field 1 for future use */ - nveu64_t rsvd1; + /** nvsocket data index */ + nveu64_t data_idx; /** reserved field 2 for future use */ nveu64_t rsvd2; }; @@ -438,7 +454,7 @@ struct osi_txdone_pkt_cx { * bit is set in fields */ nveul64_t ns; /** Passing packet id to map TX time to packet */ - unsigned int pktid; + nveu32_t pktid; }; /** @@ -456,18 +472,23 @@ struct osi_tx_ring { nveu32_t cur_tx_idx; /** Descriptor index for descriptor cleanup */ nveu32_t clean_idx; +#ifndef OSI_STRIPPED_LIB /** Slot function check */ nveu32_t slot_check; /** Slot number */ nveu32_t slot_number; +#endif /* !OSI_STRIPPED_LIB */ /** Transmit packet context */ struct osi_tx_pkt_cx tx_pkt_cx; /** Transmit complete packet context information */ struct osi_txdone_pkt_cx txdone_pkt_cx; /** Number of packets or frames transmitted */ nveu32_t frame_cnt; + /** flag to skip memory barrier */ + nveu32_t skip_dmb; }; +#ifndef OSI_STRIPPED_LIB /** * @brief osi_xtra_dma_stat_counters - OSI DMA extra stats counters */ @@ -489,6 +510,7 @@ struct osi_xtra_dma_stat_counters { /** Total number of TSO packet count */ nveu64_t tx_tso_pkt_n; }; +#endif /* !OSI_STRIPPED_LIB */ struct osi_dma_priv_data; @@ -522,13 +544,17 @@ struct osd_dma_ops { #endif /* OSI_DEBUG */ }; +#ifdef OSI_DEBUG /** * @brief The OSI DMA IOCTL data structure. */ struct osi_dma_ioctl_data { /** IOCTL command number */ nveu32_t cmd; + /** IOCTL command argument */ + nveu32_t arg_u32; }; +#endif /* OSI_DEBUG */ /** * @brief The OSI DMA private data structure. @@ -552,10 +578,12 @@ struct osi_dma_priv_data { nveu32_t rx_buf_len; /** MTU size */ nveu32_t mtu; +#ifndef OSI_STRIPPED_LIB /** Packet error stats */ struct osi_pkt_err_stats pkt_err_stats; /** Extra DMA stats */ struct osi_xtra_dma_stat_counters dstats; +#endif /* !OSI_STRIPPED_LIB */ /** Receive Interrupt Watchdog Timer Count Units */ nveu32_t rx_riwt; /** Flag which decides riwt is enabled(1) or disabled(0) */ @@ -572,33 +600,30 @@ struct osi_dma_priv_data { nveu32_t tx_frames; /** Flag which decides tx_frames is enabled(1) or disabled(0) */ nveu32_t use_tx_frames; + /** DMA callback ops structure */ + struct osd_dma_ops osd_ops; +#ifndef OSI_STRIPPED_LIB /** Flag which decides virtualization is enabled(1) or disabled(0) */ nveu32_t use_virtualization; - /** Functional safety config to do periodic read-verify of - * certain safety critical dma registers */ - void *safety_config; /** Array of DMA channel slot snterval value from DT */ nveu32_t slot_interval[OSI_MGBE_MAX_NUM_CHANS]; /** Array of DMA channel slot enabled status from DT*/ nveu32_t slot_enabled[OSI_MGBE_MAX_NUM_CHANS]; - /** DMA callback ops structure */ - struct osd_dma_ops osd_ops; /** Virtual address of reserved DMA buffer */ void *resv_buf_virt_addr; /** Physical address of reserved DMA buffer */ nveu64_t resv_buf_phy_addr; - /** Tegra Pre-si platform info */ - nveu32_t pre_si; +#endif /* !OSI_STRIPPED_LIB */ /** PTP flags * OSI_PTP_SYNC_MASTER - acting as master * OSI_PTP_SYNC_SLAVE - acting as slave * OSI_PTP_SYNC_ONESTEP - one-step mode * OSI_PTP_SYNC_TWOSTEP - two step mode */ - unsigned int ptp_flag; + nveu32_t ptp_flag; +#ifdef OSI_DEBUG /** OSI DMA IOCTL data */ struct osi_dma_ioctl_data ioctl_data; -#ifdef OSI_DEBUG /** Flag to enable/disable descriptor dump */ nveu32_t enable_desc_dump; #endif /* OSI_DEBUG */ @@ -610,158 +635,6 @@ struct osi_dma_priv_data { nveu32_t rx_ring_sz; }; -/** - * @brief osi_disable_chan_tx_intr - Disables DMA Tx channel interrupts. - * - * @note - * Algorithm: - * - Disables Tx interrupts at wrapper level. - * - * @param[in] osi_dma: OSI DMA private data. - * @param[in] chan: DMA Tx channel number. Max OSI_EQOS_MAX_NUM_CHANS. - * - * @pre - * - MAC needs to be out of reset and proper clocks need to be configured. - * - DMA HW init need to be completed successfully, see osi_hw_dma_init - * - Mapping of physical IRQ line to DMA channel need to be maintained at - * OS Dependent layer and pass corresponding channel number. - * - * @note - * Traceability Details: - * - SWUD_ID: ETHERNET_NVETHERNETCL_001 - * - * @usage - * - Allowed context for the API call - * - Interrupt handler: Yes - * - Signal handler: Yes - * - Thread safe: No - * - Async/Sync: Sync - * - Required Privileges: None - * - API Group: - * - Initialization: No - * - Run time: Yes - * - De-initialization: Yes - * - * @retval 0 on success - * @retval -1 on failure. - */ -nve32_t osi_disable_chan_tx_intr(struct osi_dma_priv_data *osi_dma, - nveu32_t chan); - -/** - * @brief osi_enable_chan_tx_intr - Enable DMA Tx channel interrupts. - * - * @note - * Algorithm: - * - Enables Tx interrupts at wrapper level. - * - * @param[in] osi_dma: OSI DMA private data. - * @param[in] chan: DMA Tx channel number. Max OSI_EQOS_MAX_NUM_CHANS. - * - * @pre - * - MAC needs to be out of reset and proper clocks need to be configured. - * - DMA HW init need to be completed successfully, see osi_hw_dma_init - * - Mapping of physical IRQ line to DMA channel need to be maintained at - * OS Dependent layer and pass corresponding channel number. - * - * @note - * Traceability Details: - * - SWUD_ID: ETHERNET_NVETHERNETCL_002 - * - * @usage - * - Allowed context for the API call - * - Interrupt handler: Yes - * - Signal handler: Yes - * - Thread safe: No - * - Async/Sync: Sync - * - Required Privileges: None - * - API Group: - * - Initialization: Yes - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -nve32_t osi_enable_chan_tx_intr(struct osi_dma_priv_data *osi_dma, - nveu32_t chan); - -/** - * @brief osi_disable_chan_rx_intr - Disable DMA Rx channel interrupts. - * - * @note - * Algorithm: - * - Disables Rx interrupts at wrapper level. - * - * @param[in] osi_dma: OSI DMA private data. - * @param[in] chan: DMA Rx channel number. Max OSI_EQOS_MAX_NUM_CHANS. - * - * @pre - * - MAC needs to be out of reset and proper clocks need to be configured. - * - DMA HW init need to be completed successfully, see osi_hw_dma_init - * - Mapping of physical IRQ line to DMA channel need to be maintained at - * OS Dependent layer and pass corresponding channel number. - * - * @note - * Traceability Details: - * - SWUD_ID: ETHERNET_NVETHERNETCL_003 - * - * @usage - * - Allowed context for the API call - * - Interrupt handler: Yes - * - Signal handler: Yes - * - Thread safe: No - * - Async/Sync: Sync - * - Required Privileges: None - * - API Group: - * - Initialization: No - * - Run time: Yes - * - De-initialization: Yes - * - * @retval 0 on success - * @retval -1 on failure. - */ -nve32_t osi_disable_chan_rx_intr(struct osi_dma_priv_data *osi_dma, - nveu32_t chan); - -/** - * @brief osi_enable_chan_rx_intr - Enable DMA Rx channel interrupts. - * - * @note - * Algorithm: - * - Enables Rx interrupts at wrapper level. - * - * @param[in] osi_dma: OSI DMA private data. - * @param[in] chan: DMA Rx channel number. Max OSI_EQOS_MAX_NUM_CHANS. - * - * @pre - * - MAC needs to be out of reset and proper clocks need to be configured. - * - DMA HW init need to be completed successfully, see osi_hw_dma_init - * - Mapping of physical IRQ line to DMA channel need to be maintained at - * OS Dependent layer and pass corresponding channel number. - * - * @note - * Traceability Details: - * - SWUD_ID: ETHERNET_NVETHERNETCL_004 - * - * @usage - * - Allowed context for the API call - * - Interrupt handler: Yes - * - Signal handler: Yes - * - Thread safe: No - * - Async/Sync: Sync - * - Required Privileges: None - * - API Group: - * - Initialization: Yes - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -nve32_t osi_enable_chan_rx_intr(struct osi_dma_priv_data *osi_dma, - nveu32_t chan); - /** * @brief osi_get_global_dma_status - Gets DMA status. * @@ -777,114 +650,6 @@ nve32_t osi_enable_chan_rx_intr(struct osi_dma_priv_data *osi_dma, */ nveu32_t osi_get_global_dma_status(struct osi_dma_priv_data *osi_dma); -/** - * @brief osi_clear_vm_tx_intr - Handles VM Tx interrupt source. - * - * Algorithm: Clear Tx interrupt source at wrapper level and DMA level. - * - * @param[in] osi_dma: DMA private data. - * @param[in] chan: DMA tx channel number. - * - * @note - * 1) MAC needs to be out of reset and proper clocks need to be configured. - * 2) DMA HW init need to be completed successfully, see osi_hw_dma_init - * - * @retval 0 on success - * @retval -1 on failure. - */ -nve32_t osi_clear_vm_tx_intr(struct osi_dma_priv_data *osi_dma, - nveu32_t chan); - -/** - * @brief osi_clear_vm_rx_intr - Handles VM Rx interrupt source. - * - * Algorithm: Clear Rx interrupt source at wrapper level and DMA level. - * - * @param[in] osi_dma: DMA private data. - * @param[in] chan: DMA rx channel number. - * - * @note - * 1) MAC needs to be out of reset and proper clocks need to be configured. - * 2) DMA HW init need to be completed successfully, see osi_hw_dma_init - * 3) Mapping of physical IRQ line to DMA channel need to be maintained at - * OS Dependent layer and pass corresponding channel number. - * - * @retval 0 on success - * @retval -1 on failure. - */ -nve32_t osi_clear_vm_rx_intr(struct osi_dma_priv_data *osi_dma, - nveu32_t chan); - -/** - * @brief Start DMA - * - * @note - * Algorithm: - * - Start the DMA for specific MAC - * - * @param[in] osi_dma: OSI DMA private data. - * @param[in] chan: DMA Tx/Rx channel number. Max OSI_EQOS_MAX_NUM_CHANS. - * - * @pre - * - MAC needs to be out of reset and proper clocks need to be configured. - * - DMA HW init need to be completed successfully, see osi_hw_dma_init - * - * @note - * Traceability Details: - * - SWUD_ID: ETHERNET_NVETHERNETCL_005 - * - * @usage - * - Allowed context for the API call - * - Interrupt handler: No - * - Signal handler: No - * - Thread safe: No - * - Async/Sync: Sync - * - Required Privileges: None - * - API Group: - * - Initialization: Yes - * - Run time: No - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -nve32_t osi_start_dma(struct osi_dma_priv_data *osi_dma, nveu32_t chan); - -/** - * @brief osi_stop_dma - Stop DMA - * - * @note - * Algorithm: - * - Stop the DMA for specific MAC - * - * @param[in] osi_dma: OSI DMA private data. - * @param[in] chan: DMA Tx/Rx channel number. Max OSI_EQOS_MAX_NUM_CHANS. - * - * @pre - * - MAC needs to be out of reset and proper clocks need to be configured. - * - DMA HW init need to be completed successfully, see osi_hw_dma_init - * - * @note - * Traceability Details: - * - SWUD_ID: ETHERNET_NVETHERNETCL_006 - * - * @usage - * - Allowed context for the API call - * - Interrupt handler: No - * - Signal handler: No - * - Thread safe: No - * - Async/Sync: Sync - * - Required Privileges: None - * - API Group: - * - Initialization: No - * - Run time: No - * - De-initialization: Yes - * - * @retval 0 on success - * @retval -1 on failure. - */ -nve32_t osi_stop_dma(struct osi_dma_priv_data *osi_dma, nveu32_t chan); - /** * @brief osi_get_refill_rx_desc_cnt - Rx descriptors count that needs to refill * @@ -913,8 +678,8 @@ nve32_t osi_stop_dma(struct osi_dma_priv_data *osi_dma, nveu32_t chan); * * @retval "Number of available free descriptors." */ -nveu32_t osi_get_refill_rx_desc_cnt(struct osi_dma_priv_data *osi_dma, - unsigned int chan); +nveu32_t osi_get_refill_rx_desc_cnt(const struct osi_dma_priv_data *const osi_dma, + nveu32_t chan); /** * @brief osi_rx_dma_desc_init - DMA Rx descriptor init @@ -1349,6 +1114,7 @@ nveu32_t osi_is_mac_enabled(struct osi_dma_priv_data *const osi_dma); nve32_t osi_handle_dma_intr(struct osi_dma_priv_data *osi_dma, nveu32_t chan, nveu32_t tx_rx, nveu32_t en_dis); +#ifdef OSI_DEBUG /** * @brief osi_dma_ioctl - OSI DMA IOCTL * @@ -1365,44 +1131,8 @@ nve32_t osi_handle_dma_intr(struct osi_dma_priv_data *osi_dma, * @retval -1 on failure. */ nve32_t osi_dma_ioctl(struct osi_dma_priv_data *osi_dma); +#endif /* OSI_DEBUG */ #ifndef OSI_STRIPPED_LIB -/** - * @brief - Read-validate HW registers for func safety. - * - * @note - * Algorithm: - * - Reads pre-configured list of DMA configuration registers - * and compares with last written value for any modifications. - * - * @param[in] osi_dma: OSI DMA private data structure. - * - * @pre - * - MAC has to be out of reset. - * - osi_hw_dma_init has to be called. Internally this would initialize - * the safety_config (see osi_dma_priv_data) based on MAC version and - * which specific registers needs to be validated periodically. - * - Invoke this call if (osi_dma_priv_data->safety_config != OSI_NULL) - * - * @note - * Traceability Details: - * - * @usage - * - Allowed context for the API call - * - Interrupt handler: No - * - Signal handler: No - * - Thread safe: No - * - Async/Sync: Sync - * - Required Privileges: None - * - API Group: - * - Initialization: No - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -nve32_t osi_validate_dma_regs(struct osi_dma_priv_data *osi_dma); - /** * @brief osi_clear_tx_pkt_err_stats - Clear tx packet error stats. * diff --git a/include/osi_dma_txrx.h b/include/osi_dma_txrx.h index 97b3607..325a0dd 100644 --- a/include/osi_dma_txrx.h +++ b/include/osi_dma_txrx.h @@ -32,7 +32,6 @@ #define OSI_EQOS_TX_DESC_CNT 1024U #define OSI_EQOS_RX_DESC_CNT 1024U #define OSI_MGBE_TX_DESC_CNT 4096U -#define OSI_MGBE_RX_DESC_CNT 4096U #define OSI_MGBE_MAX_RX_DESC_CNT 16384U /** @} */ @@ -49,9 +48,11 @@ #define INCR_TX_DESC_INDEX(idx, x) ((idx) = ((idx) + (1U)) & ((x) - 1U)) /** Increment the rx descriptor index */ #define INCR_RX_DESC_INDEX(idx, x) ((idx) = ((idx) + (1U)) & ((x) - 1U)) -#ifndef OSI_STRIPPED_LIB +#ifdef OSI_DEBUG /** Decrement the tx descriptor index */ #define DECR_TX_DESC_INDEX(idx, x) ((idx) = ((idx) - (1U)) & ((x) - 1U)) +#endif /* OSI_DEBUG */ +#ifndef OSI_STRIPPED_LIB /** Decrement the rx descriptor index */ #define DECR_RX_DESC_INDEX(idx, x) ((idx) = ((idx) - (1U)) & ((x) - 1U)) #endif /* !OSI_STRIPPED_LIB */ diff --git a/include/osi_macsec.h b/include/osi_macsec.h index 8d98bd3..d3598cd 100644 --- a/include/osi_macsec.h +++ b/include/osi_macsec.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -41,7 +41,9 @@ #define OSI_AN2_VALID OSI_BIT(2) #define OSI_AN3_VALID OSI_BIT(3) #define OSI_MAX_NUM_SA 4U +#ifdef DEBUG_MACSEC #define OSI_CURR_AN_MAX 3 +#endif /* DEBUG_MACSEC */ #define OSI_KEY_INDEX_MAX 31U #define OSI_PN_MAX_DEFAULT 0xFFFFFFFFU #define OSI_PN_THRESHOLD_DEFAULT 0xC0000000U @@ -97,7 +99,7 @@ /** @} */ /** - * @addtogroup Generic table CONFIG register helpers macros + * @addtogroup MACSEC-Generic table CONFIG register helpers macros * * @brief Helper macros for generic table CONFIG register programming * @{ @@ -114,14 +116,13 @@ #define OSI_SA_LUT_MAX_INDEX OSI_TABLE_INDEX_MAX /** @} */ +#ifdef DEBUG_MACSEC /** * @addtogroup Debug buffer table CONFIG register helpers macros * * @brief Helper macros for debug buffer table CONFIG register programming * @{ */ -#define OSI_DBG_TBL_READ OSI_LUT_READ -#define OSI_DBG_TBL_WRITE OSI_LUT_WRITE /* Num of Tx debug buffers */ #define OSI_TX_DBG_BUF_IDX_MAX 12U /* Num of Rx debug buffers */ @@ -140,6 +141,7 @@ #define OSI_RX_DBG_ICV_ERROR_EVT OSI_BIT(10) #define OSI_RX_DBG_CAPTURE_EVT OSI_BIT(11) /** @} */ +#endif /* DEBUG_MACSEC*/ /** * @addtogroup AES ciphers @@ -152,27 +154,22 @@ /** @} */ /** - * @addtogroup MACSEC Misc helper macro's + * @addtogroup MACSEC related helper MACROs * - * @brief MACSEC Helper macro's + * @brief MACSEC generic helper MACROs * @{ */ #define OSI_MACSEC_TX_EN OSI_BIT(0) #define OSI_MACSEC_RX_EN OSI_BIT(1) -/* MACSEC SECTAG + ICV + 2B ethertype adds upto 34B */ -#define MACSEC_TAG_ICV_LEN 34U -/* MACSEC TZ key config cmd */ -#define OSI_MACSEC_CMD_TZ_CONFIG 0x1 -/* MACSEC TZ key table entries reset cmd */ -#define OSI_MACSEC_CMD_TZ_KT_RESET 0x2 /** @} */ /** * @brief Indicates different operations on MACSEC SA */ +#ifdef MACSEC_KEY_PROGRAM #define OSI_CREATE_SA 1U +#endif /* MACSEC_KEY_PROGRAM */ #define OSI_ENABLE_SA 2U -#define OSI_DISABLE_SA 3U /** * @brief MACSEC SA State LUT entry outputs structure @@ -238,6 +235,7 @@ struct osi_macsec_table_config { nveu16_t index; }; +#if defined(MACSEC_KEY_PROGRAM) || defined(LINUX_OS) /** * @brief MACSEC Key Table entry structure */ @@ -247,6 +245,7 @@ struct osi_kt_entry { /** Indicates Hash-key */ nveu8_t h[OSI_KEY_LEN_128]; }; +#endif /* MACSEC_KEY_PROGRAM */ /** * @brief MACSEC BYP/SCI LUT entry inputs structure @@ -296,6 +295,7 @@ struct osi_macsec_lut_config { struct osi_sa_state_outputs sa_state_out; }; +#if defined(MACSEC_KEY_PROGRAM) || defined(LINUX_OS) /** * @brief MACSEC Key Table config data structure */ @@ -307,6 +307,7 @@ struct osi_macsec_kt_config { /** Indicates key table entry valid or not, bit 31 */ nveu32_t flags; }; +#endif /* MACSEC_KEY_PROGRAM */ /** * @brief MACSEC Debug buffer config data structure @@ -333,10 +334,8 @@ struct osi_macsec_core_ops { nveu32_t mtu); /** macsec de-init */ nve32_t (*deinit)(struct osi_core_priv_data *const osi_core); - /** Non Secure irq handler */ - void (*handle_ns_irq)(struct osi_core_priv_data *const osi_core); - /** Secure irq handler */ - void (*handle_s_irq)(struct osi_core_priv_data *const osi_core); + /** Macsec irq handler */ + void (*handle_irq)(struct osi_core_priv_data *const osi_core); /** macsec lut config */ nve32_t (*lut_config)(struct osi_core_priv_data *const osi_core, struct osi_macsec_lut_config *const lut_config); @@ -348,9 +347,11 @@ struct osi_macsec_core_ops { /** macsec cipher config */ nve32_t (*cipher_config)(struct osi_core_priv_data *const osi_core, nveu32_t cipher); +#ifdef DEBUG_MACSEC /** macsec loopback config */ nve32_t (*loopback_config)(struct osi_core_priv_data *const osi_core, nveu32_t enable); +#endif /* DEBUG_MACSEC */ /** macsec enable */ nve32_t (*macsec_en)(struct osi_core_priv_data *const osi_core, nveu32_t enable); @@ -361,19 +362,24 @@ struct osi_macsec_core_ops { nveu16_t *kt_idx); /** macsec read mmc counters */ void (*read_mmc)(struct osi_core_priv_data *const osi_core); +#ifdef DEBUG_MACSEC /** macsec debug buffer config */ nve32_t (*dbg_buf_config)(struct osi_core_priv_data *const osi_core, struct osi_macsec_dbg_buf_config *const dbg_buf_config); /** macsec debug buffer config */ nve32_t (*dbg_events_config)(struct osi_core_priv_data *const osi_core, struct osi_macsec_dbg_buf_config *const dbg_buf_config); +#endif /* DEBUG_MACSEC */ /** macsec get Key Index start for a given SCI */ nve32_t (*get_sc_lut_key_index)(struct osi_core_priv_data *const osi_core, nveu8_t *sci, nveu32_t *key_index, nveu16_t ctlr); /** macsec set MTU size */ nve32_t (*update_mtu)(struct osi_core_priv_data *const osi_core, nveu32_t mtu); - +#ifdef DEBUG_MACSEC + /** macsec interrupts configuration */ + void (*intr_config)(struct osi_core_priv_data *const osi_core, nveu32_t enable); +#endif /* DEBUG_MACSEC */ }; ////////////////////////////////////////////////////////////////////////// @@ -461,12 +467,12 @@ nve32_t osi_macsec_init(struct osi_core_priv_data *const osi_core, nve32_t osi_macsec_deinit(struct osi_core_priv_data *const osi_core); /** - * @brief osi_macsec_ns_isr - macsec non-secure irq handler + * @brief osi_macsec_isr - macsec irq handler * * @note * Algorithm: * - Return -1 if osi core or ops is null - * - handles non-secure macsec interrupts + * - handles macsec interrupts * - Refer to MACSEC column of <<******, (sequence diagram)>> for API details. * - TraceID: *********** * @@ -482,31 +488,7 @@ nve32_t osi_macsec_deinit(struct osi_core_priv_data *const osi_core); * * @retval none */ -void osi_macsec_ns_isr(struct osi_core_priv_data *const osi_core); - -/** - * @brief osi_macsec_s_isr - macsec secure irq handler - * - * @note - * Algorithm: - * - Return -1 if osi core or ops is null - * - handles secure macsec interrupts - * - Refer to MACSEC column of <<******, (sequence diagram)>> for API details. - * - TraceID: *********** - * - * @param[in] osi_core: OSI core private data structure - * - * @pre MACSEC needs to be out of reset and proper clock configured. - * - * @note - * API Group: - * - Initialization: No - * - Run time: Yes - * - De-initialization: No - * - * @retval none - */ -void osi_macsec_s_isr(struct osi_core_priv_data *const osi_core); +void osi_macsec_isr(struct osi_core_priv_data *const osi_core); /** * @brief osi_macsec_config_lut - Read or write to macsec LUTs @@ -535,6 +517,7 @@ void osi_macsec_s_isr(struct osi_core_priv_data *const osi_core); nve32_t osi_macsec_config_lut(struct osi_core_priv_data *const osi_core, struct osi_macsec_lut_config *const lut_config); +#ifdef MACSEC_KEY_PROGRAM /** * @brief osi_macsec_config_kt - API to read or update the keys * @@ -561,6 +544,7 @@ nve32_t osi_macsec_config_lut(struct osi_core_priv_data *const osi_core, */ nve32_t osi_macsec_config_kt(struct osi_core_priv_data *const osi_core, struct osi_macsec_kt_config *const kt_config); +#endif /* MACSEC_KEY_PROGRAM */ /** * @brief osi_macsec_cipher_config - API to update the cipher @@ -589,6 +573,7 @@ nve32_t osi_macsec_config_kt(struct osi_core_priv_data *const osi_core, nve32_t osi_macsec_cipher_config(struct osi_core_priv_data *const osi_core, nveu32_t cipher); +#ifdef DEBUG_MACSEC /** * @brief osi_macsec_loopback - API to enable/disable macsec loopback * @@ -613,8 +598,10 @@ nve32_t osi_macsec_cipher_config(struct osi_core_priv_data *const osi_core, * @retval 0 on success * @retval -1 on failure */ + nve32_t osi_macsec_loopback(struct osi_core_priv_data *const osi_core, nveu32_t enable); +#endif /* DEBUG_MACSEC */ /** * @brief osi_macsec_en - API to enable/disable macsec @@ -657,6 +644,7 @@ nve32_t osi_macsec_en(struct osi_core_priv_data *const osi_core, * * @param[in] osi_core: OSI core private data structure * @param[in] sc: Pointer to the sc that needs to be added/deleted/updated + * @param[in] enable: macsec enable/disable selection * @param[in] ctlr: Controller selected * @param[out] kt_idx: Pointer to the kt_index passed to OSD * @@ -701,6 +689,7 @@ nve32_t osi_macsec_config(struct osi_core_priv_data *const osi_core, */ nve32_t osi_macsec_read_mmc(struct osi_core_priv_data *const osi_core); +#ifdef DEBUG_MACSEC /** * @brief osi_macsec_config_dbg_buf - Reads the debug buffer captured * @@ -756,7 +745,7 @@ nve32_t osi_macsec_config_dbg_buf( nve32_t osi_macsec_dbg_events_config( struct osi_core_priv_data *const osi_core, struct osi_macsec_dbg_buf_config *const dbg_buf_config); - +#endif /* DEBUG_MACSEC */ /** * @brief osi_macsec_get_sc_lut_key_index - API to get key index for a given SCI * diff --git a/osi/common/common.h b/osi/common/common.h index d2b9082..31de8d2 100644 --- a/osi/common/common.h +++ b/osi/common/common.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -22,11 +22,11 @@ #ifndef INCLUDED_COMMON_H #define INCLUDED_COMMON_H -#include "../osi/common/type.h" +#include #include /** - * @addtogroup Generic helper macros + * @addtogroup Generic helper MACROS * * @brief These are Generic helper macros used at various places. * @{ @@ -37,6 +37,12 @@ #define RETRY_DELAY 1U /** @} */ +/** MAC version type for EQOS version previous to 5.30 */ +#define MAC_CORE_VER_TYPE_EQOS 0U +/** MAC version type for EQOS version 5.30 */ +#define MAC_CORE_VER_TYPE_EQOS_5_30 1U +/** MAC version type for MGBE IP */ +#define MAC_CORE_VER_TYPE_MGBE 2U /** * @brief Maximum number of supported MAC IP types (EQOS and MGBE) @@ -48,8 +54,9 @@ * a condition is met or a timeout occurs * * @param[in] addr: Memory mapped address. + * @param[in] fn: function to be used. * @param[in] val: Variable to read the value. - * @param[in] cond: Break condition (usually involving @val). + * @param[in] cond: Break condition. * @param[in] delay_us: Maximum time to sleep between reads in us. * @param[in] retry: Retry count. @@ -60,9 +67,9 @@ */ #define osi_readl_poll_timeout(addr, fn, val, cond, delay_us, retry) \ ({ \ - unsigned int count = 0; \ + nveu32_t count = 0; \ while (count++ < retry) { \ - val = osi_readl((unsigned char *)addr); \ + val = osi_readl((nveu8_t *)addr); \ if ((cond)) { \ break; \ } \ @@ -234,7 +241,8 @@ static inline void osi_writela(OSI_UNUSED void *priv, nveu32_t val, void *addr) * @brief validate_mac_ver_update_chans - Validates mac version and update chan * * @param[in] mac_ver: MAC version read. - * @param[out] max_chans: Maximum channel number. + * @param[out] num_max_chans: Maximum channel number. + * @param[out] l_mac_ver: local mac version. * * @note MAC has to be out of reset. * @@ -248,26 +256,36 @@ static inline void osi_writela(OSI_UNUSED void *priv, nveu32_t val, void *addr) * @retval 1 - for Valid MAC */ static inline nve32_t validate_mac_ver_update_chans(nveu32_t mac_ver, - nveu32_t *max_chans) + nveu32_t *num_max_chans, + nveu32_t *l_mac_ver) { + nve32_t ret; + switch (mac_ver) { - case OSI_EQOS_MAC_4_10: case OSI_EQOS_MAC_5_00: - *max_chans = OSI_EQOS_XP_MAX_CHANS; + *num_max_chans = OSI_EQOS_XP_MAX_CHANS; + *l_mac_ver = MAC_CORE_VER_TYPE_EQOS; + ret = 1; break; case OSI_EQOS_MAC_5_30: - *max_chans = OSI_EQOS_MAX_NUM_CHANS; + *num_max_chans = OSI_EQOS_MAX_NUM_CHANS; + *l_mac_ver = MAC_CORE_VER_TYPE_EQOS_5_30; + ret = 1; break; - case OSI_MGBE_MAC_3_00: case OSI_MGBE_MAC_3_10: +#ifndef OSI_STRIPPED_LIB case OSI_MGBE_MAC_4_00: - *max_chans = OSI_MGBE_MAX_NUM_CHANS; +#endif /* !OSI_STRIPPED_LIB */ + *num_max_chans = OSI_MGBE_MAX_NUM_CHANS; + *l_mac_ver = MAC_CORE_VER_TYPE_MGBE; + ret = 1; break; default: - return 0; + ret = 0; + break; } - return 1; + return ret; } /** @@ -289,7 +307,7 @@ static inline void osi_memset(void *s, nveu32_t c, nveu64_t count) nveu64_t temp = count; if (s == OSI_NULL) { - return; + goto done; } xs = (nveu8_t *)s; while (temp != 0UL) { @@ -299,6 +317,8 @@ static inline void osi_memset(void *s, nveu32_t c, nveu64_t count) } temp--; } +done: + return; } /** @@ -314,38 +334,49 @@ static inline void osi_memset(void *s, nveu32_t c, nveu64_t count) * - Run time: Yes * - De-initialization: No */ -static inline nve32_t osi_memcpy(void *dest, void *src, nveu64_t n) +static inline nve32_t osi_memcpy(void *dest, const void *src, nveu64_t n) { - nve8_t *csrc = (nve8_t *)src; - nve8_t *cdest = (nve8_t *)dest; + nve8_t *cdest = dest; + const nve8_t *csrc = src; + nve32_t ret = 0; nveu64_t i = 0; - if (src == OSI_NULL || dest == OSI_NULL) { - return -1; + if ((src == OSI_NULL) || (dest == OSI_NULL)) { + ret = -1; + goto fail; } for (i = 0; i < n; i++) { cdest[i] = csrc[i]; } - return 0; +fail: + return ret; } -static inline nve32_t osi_memcmp(void *dest, void *src, nve32_t n) +static inline nve32_t osi_memcmp(const void *dest, const void *src, nve32_t n) { + const nve8_t *const cdest = dest; + const nve8_t *const csrc = src; + nve32_t ret = 0; nve32_t i; - nve8_t *csrc = (nve8_t *)src; - nve8_t *cdest = (nve8_t *)dest; - if (src == OSI_NULL || dest == OSI_NULL) - return -1; + if ((src == OSI_NULL) || (dest == OSI_NULL)) { + ret = -1; + goto fail; + } for (i = 0; i < n; i++) { if (csrc[i] < cdest[i]) { - return -1; + ret = -1; + goto fail; } else if (csrc[i] > cdest[i]) { - return 1; + ret = 1; + goto fail; + } else { + /* Do Nothing */ } } - return 0; +fail: + return ret; } #endif diff --git a/osi/common/mgbe_common.h b/osi/common/mgbe_common.h index 7ebffeb..5ba8380 100644 --- a/osi/common/mgbe_common.h +++ b/osi/common/mgbe_common.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -24,7 +24,7 @@ #define INCLUDED_MGBE_COMMON_H /** - * @addtogroup MGBE-MAC MGBE MAC common HW feature registers + * @addtogroup MGBE-MAC MAC register offsets * * @{ */ diff --git a/osi/common/osi_common.c b/osi/common/osi_common.c index 18df8ff..3a369d6 100644 --- a/osi/common/osi_common.c +++ b/osi/common/osi_common.c @@ -31,7 +31,7 @@ void common_get_systime_from_mac(void *addr, nveu32_t mac, nveu32_t *sec, nveu64_t remain; nveul64_t ns; typedef nveul64_t (*get_time)(void *addr); - get_time i_ops[MAX_MAC_IP_TYPES] = { + const get_time i_ops[MAX_MAC_IP_TYPES] = { eqos_get_systime_from_mac, mgbe_get_systime_from_mac }; @@ -53,7 +53,7 @@ void common_get_systime_from_mac(void *addr, nveu32_t mac, nveu32_t *sec, nveu32_t common_is_mac_enabled(void *addr, nveu32_t mac) { typedef nveu32_t (*mac_enable_arr)(void *addr); - mac_enable_arr i_ops[MAX_MAC_IP_TYPES] = { + const mac_enable_arr i_ops[MAX_MAC_IP_TYPES] = { eqos_is_mac_enabled, mgbe_is_mac_enabled }; diff --git a/osi/core/Makefile.interface.tmk b/osi/core/Makefile.interface.tmk index 4637979..4abf439 100644 --- a/osi/core/Makefile.interface.tmk +++ b/osi/core/Makefile.interface.tmk @@ -24,13 +24,12 @@ # ############################################################################### -ifdef NV_INTERFACE_FLAG_SHARED_LIBRARY_SECTION -NV_INTERFACE_NAME := nvethernetrm -NV_INTERFACE_EXPORTS := lib$(NV_INTERFACE_NAME) +ifdef NV_INTERFACE_FLAG_STATIC_LIBRARY_SECTION +NV_COMPONENT_NAME := nvethernetrm +NV_INTERFACE_COMPONENT_DIR := . NV_INTERFACE_PUBLIC_INCLUDES := \ ./include endif - # Local Variables: # indent-tabs-mode: t # tab-width: 8 diff --git a/osi/core/Makefile.tmk b/osi/core/Makefile.tmk index 521160a..ecb6fcf 100644 --- a/osi/core/Makefile.tmk +++ b/osi/core/Makefile.tmk @@ -22,7 +22,7 @@ # ############################################################################### -ifdef NV_COMPONENT_FLAG_SHARED_LIBRARY_SECTION +ifdef NV_COMPONENT_FLAG_STATIC_LIBRARY_SECTION include $(NV_BUILD_START_COMPONENT) NV_COMPONENT_STRICT_WARNINGS_qnx_64 := 1 @@ -30,42 +30,37 @@ NV_COMPONENT_STRICT_WARNINGS_qnx_64 := 1 NV_COMPONENT_NAME := nvethernetrm NV_COMPONENT_OWN_INTERFACE_DIR := . NV_COMPONENT_SOURCES := \ - eqos_core.c \ - eqos_mmc.c \ - osi_core.c \ - vlan_filter.c \ - osi_hal.c \ - ivc_core.c \ - frp.c \ - mgbe_core.c \ - xpcs.c \ - mgbe_mmc.c \ - debug.c \ - core_common.c \ + $(NV_SOURCE)/nvethernetrm/osi/core/eqos_core.c \ + $(NV_SOURCE)/nvethernetrm/osi/core/eqos_mmc.c \ + $(NV_SOURCE)/nvethernetrm/osi/core/osi_core.c \ + $(NV_SOURCE)/nvethernetrm/osi/core/osi_hal.c \ + $(NV_SOURCE)/nvethernetrm/osi/core/ivc_core.c \ + $(NV_SOURCE)/nvethernetrm/osi/core/frp.c \ + $(NV_SOURCE)/nvethernetrm/osi/core/mgbe_core.c \ + $(NV_SOURCE)/nvethernetrm/osi/core/xpcs.c \ + $(NV_SOURCE)/nvethernetrm/osi/core/mgbe_mmc.c \ + $(NV_SOURCE)/nvethernetrm/osi/core/core_common.c \ $(NV_SOURCE)/nvethernetrm/osi/common/osi_common.c \ $(NV_SOURCE)/nvethernetrm/osi/common/eqos_common.c \ $(NV_SOURCE)/nvethernetrm/osi/common/mgbe_common.c \ $(NV_SOURCE)/nvethernetrm/osi/core/macsec.c -#NV_COMPONENT_CFLAGS += -DMACSEC_SUPPORT -#NV_COMPONENT_CFLAGS += -DMACSEC_KEY_PROGRAM -#NV_COMPONENT_CFLAGS += -DDEBUG_MACSEC - -ifeq ($(NV_BUILD_CONFIGURATION_OS_IS_LINUX),1) - NV_COMPONENT_CFLAGS += -DLINUX_OS -else ifeq ($(NV_BUILD_CONFIGURATION_OS_IS_QNX),1) - NV_COMPONENT_CFLAGS += -DQNX_OS -endif - -ifeq ($(NV_BUILD_CONFIGURATION_IS_SAFETY),0) -NV_COMPONENT_CFLAGS += -DOSI_DEBUG -endif - NV_COMPONENT_INCLUDES := \ $(NV_SOURCE)/nvethernetrm/include \ $(NV_SOURCE)/nvethernetrm/osi/common/include -include $(NV_BUILD_SHARED_LIBRARY) +include $(NV_SOURCE)/nvethernetrm/include/config.tmk + +ifeq ($(OSI_DEBUG),1) +NV_COMPONENT_SOURCES += $(NV_SOURCE)/nvethernetrm/osi/core/debug.c +endif + +ifeq ($(OSI_STRIPPED_LIB),0) +NV_COMPONENT_SOURCES += \ + $(NV_SOURCE)/nvethernetrm/osi/core/vlan_filter.c +endif + +include $(NV_BUILD_STATIC_LIBRARY) endif # Local Variables: diff --git a/osi/core/core_common.c b/osi/core/core_common.c index 0d218a6..02fbfaf 100644 --- a/osi/core/core_common.c +++ b/osi/core/core_common.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -24,12 +24,596 @@ #include "core_common.h" #include "mgbe_core.h" #include "eqos_core.h" +#include "xpcs.h" +#include "macsec.h" + +static inline nve32_t poll_check(struct osi_core_priv_data *const osi_core, nveu8_t *addr, + nveu32_t bit_check, nveu32_t *value) +{ + nveu32_t retry = RETRY_COUNT; + nve32_t cond = COND_NOT_MET; + nveu32_t count; + nve32_t ret = 0; + + /* Poll Until Poll Condition */ + count = 0; + while (cond == COND_NOT_MET) { + if (count > retry) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, + "poll_check: timeout\n", 0ULL); + ret = -1; + goto fail; + } + + count++; + + *value = osi_readla(osi_core, addr); + if ((*value & bit_check) == OSI_NONE) { + cond = COND_MET; + } else { + osi_core->osd_ops.udelay(OSI_DELAY_1000US); + } + } +fail: + return ret; +} + + +nve32_t hw_poll_for_swr(struct osi_core_priv_data *const osi_core) +{ + nveu32_t dma_mode_val = 0U; + const nveu32_t dma_mode[2] = { EQOS_DMA_BMR, MGBE_DMA_MODE }; + void *addr = osi_core->base; + + return poll_check(osi_core, ((nveu8_t *)addr + dma_mode[osi_core->mac]), + DMA_MODE_SWR, &dma_mode_val); +} + +void hw_start_mac(struct osi_core_priv_data *const osi_core) +{ + void *addr = osi_core->base; + nveu32_t value; + const nveu32_t mac_mcr_te_reg[2] = { EQOS_MAC_MCR, MGBE_MAC_TMCR }; + const nveu32_t mac_mcr_re_reg[2] = { EQOS_MAC_MCR, MGBE_MAC_RMCR }; + const nveu32_t set_bit_te[2] = { EQOS_MCR_TE, MGBE_MAC_TMCR_TE }; + const nveu32_t set_bit_re[2] = { EQOS_MCR_RE, MGBE_MAC_RMCR_RE }; + + value = osi_readla(osi_core, ((nveu8_t *)addr + mac_mcr_te_reg[osi_core->mac])); + value |= set_bit_te[osi_core->mac]; + osi_writela(osi_core, value, ((nveu8_t *)addr + mac_mcr_te_reg[osi_core->mac])); + + value = osi_readla(osi_core, ((nveu8_t *)addr + mac_mcr_re_reg[osi_core->mac])); + value |= set_bit_re[osi_core->mac]; + osi_writela(osi_core, value, ((nveu8_t *)addr + mac_mcr_re_reg[osi_core->mac])); +} + +void hw_stop_mac(struct osi_core_priv_data *const osi_core) +{ + void *addr = osi_core->base; + nveu32_t value; + const nveu32_t mac_mcr_te_reg[2] = { EQOS_MAC_MCR, MGBE_MAC_TMCR }; + const nveu32_t mac_mcr_re_reg[2] = { EQOS_MAC_MCR, MGBE_MAC_RMCR }; + const nveu32_t clear_bit_te[2] = { EQOS_MCR_TE, MGBE_MAC_TMCR_TE }; + const nveu32_t clear_bit_re[2] = { EQOS_MCR_RE, MGBE_MAC_RMCR_RE }; + + value = osi_readla(osi_core, ((nveu8_t *)addr + mac_mcr_te_reg[osi_core->mac])); + value &= ~clear_bit_te[osi_core->mac]; + osi_writela(osi_core, value, ((nveu8_t *)addr + mac_mcr_te_reg[osi_core->mac])); + + value = osi_readla(osi_core, ((nveu8_t *)addr + mac_mcr_re_reg[osi_core->mac])); + value &= ~clear_bit_re[osi_core->mac]; + osi_writela(osi_core, value, ((nveu8_t *)addr + mac_mcr_re_reg[osi_core->mac])); +} + +nve32_t hw_set_mode(struct osi_core_priv_data *const osi_core, const nve32_t mode) +{ + void *base = osi_core->base; + nveu32_t mcr_val; + nve32_t ret = 0; + const nveu32_t bit_set[2] = { EQOS_MCR_DO, EQOS_MCR_DM }; + const nveu32_t clear_bit[2] = { EQOS_MCR_DM, EQOS_MCR_DO }; + + /* don't allow only if loopback mode is other than 0 or 1 */ + if ((mode != OSI_FULL_DUPLEX) && (mode != OSI_HALF_DUPLEX)) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "Invalid duplex mode\n", 0ULL); + ret = -1; + goto fail; + } + + if (osi_core->mac == OSI_MAC_HW_EQOS) { + mcr_val = osi_readla(osi_core, (nveu8_t *)base + EQOS_MAC_MCR); + mcr_val |= bit_set[mode]; + mcr_val &= ~clear_bit[mode]; + osi_writela(osi_core, mcr_val, ((nveu8_t *)base + EQOS_MAC_MCR)); + } +fail: + return ret; +} + +nve32_t hw_set_speed(struct osi_core_priv_data *const osi_core, const nve32_t speed) +{ + nveu32_t value; + nve32_t ret = 0; + void *base = osi_core->base; + const nveu32_t mac_mcr[2] = { EQOS_MAC_MCR, MGBE_MAC_TMCR }; + + if (((osi_core->mac == OSI_MAC_HW_EQOS) && (speed > OSI_SPEED_1000)) || + ((osi_core->mac == OSI_MAC_HW_MGBE) && ((speed < OSI_SPEED_2500) || + (speed > OSI_SPEED_10000)))) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, + "unsupported speed\n", (nveul64_t)speed); + ret = -1; + goto fail; + } + + value = osi_readla(osi_core, ((nveu8_t *)base + mac_mcr[osi_core->mac])); + switch (speed) { + case OSI_SPEED_10: + value |= EQOS_MCR_PS; + value &= ~EQOS_MCR_FES; + break; + case OSI_SPEED_100: + value |= EQOS_MCR_PS; + value |= EQOS_MCR_FES; + break; + case OSI_SPEED_1000: + value &= ~EQOS_MCR_PS; + value &= ~EQOS_MCR_FES; + break; + case OSI_SPEED_2500: + value |= MGBE_MAC_TMCR_SS_2_5G; + break; + case OSI_SPEED_5000: + value |= MGBE_MAC_TMCR_SS_5G; + break; + case OSI_SPEED_10000: + value &= ~MGBE_MAC_TMCR_SS_10G; + break; + default: + if (osi_core->mac == OSI_MAC_HW_EQOS) { + value &= ~EQOS_MCR_PS; + value &= ~EQOS_MCR_FES; + } else if (osi_core->mac == OSI_MAC_HW_MGBE) { + value &= ~MGBE_MAC_TMCR_SS_10G; + } else { + /* Do Nothing */ + } + break; + } + osi_writela(osi_core, value, ((nveu8_t *)osi_core->base + mac_mcr[osi_core->mac])); + + if (osi_core->mac == OSI_MAC_HW_MGBE) { + ret = xpcs_init(osi_core); + if (ret < 0) { + goto fail; + } + + ret = xpcs_start(osi_core); + if (ret < 0) { + goto fail; + } + } +fail: + return ret; +} + + +nve32_t hw_flush_mtl_tx_queue(struct osi_core_priv_data *const osi_core, + const nveu32_t q_inx) +{ + void *addr = osi_core->base; + nveu32_t tx_op_mode_val = 0U; + nveu32_t que_idx = (q_inx & 0xFU); + nveu32_t value; + const nveu32_t tx_op_mode[2] = { EQOS_MTL_CHX_TX_OP_MODE(que_idx), + MGBE_MTL_CHX_TX_OP_MODE(que_idx)}; + + /* Read Tx Q Operating Mode Register and flush TxQ */ + value = osi_readla(osi_core, ((nveu8_t *)addr + tx_op_mode[osi_core->mac])); + value |= MTL_QTOMR_FTQ; + osi_writela(osi_core, value, ((nveu8_t *)addr + tx_op_mode[osi_core->mac])); + + /* Poll Until FTQ bit resets for Successful Tx Q flush */ + return poll_check(osi_core, ((nveu8_t *)addr + tx_op_mode[osi_core->mac]), + MTL_QTOMR_FTQ, &tx_op_mode_val); +} + +nve32_t hw_config_fw_err_pkts(struct osi_core_priv_data *osi_core, + const nveu32_t q_inx, const nveu32_t enable_fw_err_pkts) +{ + nveu32_t val; + nve32_t ret = 0; + nveu32_t que_idx = (q_inx & 0xFU); + const nveu32_t rx_op_mode[2] = { EQOS_MTL_CHX_RX_OP_MODE(que_idx), + MGBE_MTL_CHX_RX_OP_MODE(que_idx)}; +#ifndef OSI_STRIPPED_LIB + const nveu32_t max_q[2] = { OSI_EQOS_MAX_NUM_QUEUES, + OSI_MGBE_MAX_NUM_QUEUES}; + /* Check for valid enable_fw_err_pkts and que_idx values */ + if (((enable_fw_err_pkts != OSI_ENABLE) && + (enable_fw_err_pkts != OSI_DISABLE)) || + (que_idx >= max_q[osi_core->mac])) { + ret = -1; + goto fail; + } + + /* Read MTL RXQ Operation_Mode Register */ + val = osi_readla(osi_core, ((nveu8_t *)osi_core->base + + rx_op_mode[osi_core->mac])); + + /* enable_fw_err_pkts, 1 is for enable and 0 is for disable */ + if (enable_fw_err_pkts == OSI_ENABLE) { + /* When enable_fw_err_pkts bit is set, all packets except + * the runt error packets are forwarded to the application + * or DMA. + */ + val |= MTL_RXQ_OP_MODE_FEP; + } else { + /* When this bit is reset, the Rx queue drops packets with error + * status (CRC error, GMII_ER, watchdog timeout, or overflow) + */ + val &= ~MTL_RXQ_OP_MODE_FEP; + } + + /* Write to FEP bit of MTL RXQ Operation Mode Register to enable or + * disable the forwarding of error packets to DMA or application. + */ + osi_writela(osi_core, val, ((nveu8_t *)osi_core->base + + rx_op_mode[osi_core->mac])); +fail: + return ret; +#else + /* using void to skip the misra error of unused variable */ + (void)enable_fw_err_pkts; + /* Read MTL RXQ Operation_Mode Register */ + val = osi_readla(osi_core, ((nveu8_t *)osi_core->base + + rx_op_mode[osi_core->mac])); + val |= MTL_RXQ_OP_MODE_FEP; + /* Write to FEP bit of MTL RXQ Operation Mode Register to enable or + * disable the forwarding of error packets to DMA or application. + */ + osi_writela(osi_core, val, ((nveu8_t *)osi_core->base + + rx_op_mode[osi_core->mac])); + + return ret; +#endif /* !OSI_STRIPPED_LIB */ +} + +nve32_t hw_config_rxcsum_offload(struct osi_core_priv_data *const osi_core, + nveu32_t enabled) +{ + void *addr = osi_core->base; + nveu32_t value; + nve32_t ret = 0; + const nveu32_t rxcsum_mode[2] = { EQOS_MAC_MCR, MGBE_MAC_RMCR}; + const nveu32_t ipc_value[2] = { EQOS_MCR_IPC, MGBE_MAC_RMCR_IPC}; + + if ((enabled != OSI_ENABLE) && (enabled != OSI_DISABLE)) { + ret = -1; + goto fail; + } + + value = osi_readla(osi_core, ((nveu8_t *)addr + rxcsum_mode[osi_core->mac])); + if (enabled == OSI_ENABLE) { + value |= ipc_value[osi_core->mac]; + } else { + value &= ~ipc_value[osi_core->mac]; + } + + osi_writela(osi_core, value, ((nveu8_t *)addr + rxcsum_mode[osi_core->mac])); +fail: + return ret; +} + +nve32_t hw_set_systime_to_mac(struct osi_core_priv_data *const osi_core, + const nveu32_t sec, const nveu32_t nsec) +{ + void *addr = osi_core->base; + nveu32_t mac_tcr = 0U; + nve32_t ret = 0; + const nveu32_t mac_tscr[2] = { EQOS_MAC_TCR, MGBE_MAC_TCR}; + const nveu32_t mac_stsur[2] = { EQOS_MAC_STSUR, MGBE_MAC_STSUR}; + const nveu32_t mac_stnsur[2] = { EQOS_MAC_STNSUR, MGBE_MAC_STNSUR}; + + ret = poll_check(osi_core, ((nveu8_t *)addr + mac_tscr[osi_core->mac]), + MAC_TCR_TSINIT, &mac_tcr); + if (ret == -1) { + goto fail; + } + + /* write seconds value to MAC_System_Time_Seconds_Update register */ + osi_writela(osi_core, sec, ((nveu8_t *)addr + mac_stsur[osi_core->mac])); + + /* write nano seconds value to MAC_System_Time_Nanoseconds_Update + * register + */ + osi_writela(osi_core, nsec, ((nveu8_t *)addr + mac_stnsur[osi_core->mac])); + + /* issue command to update the configured secs and nsecs values */ + mac_tcr |= MAC_TCR_TSINIT; + osi_writela(osi_core, mac_tcr, ((nveu8_t *)addr + mac_tscr[osi_core->mac])); + + ret = poll_check(osi_core, ((nveu8_t *)addr + mac_tscr[osi_core->mac]), + MAC_TCR_TSINIT, &mac_tcr); +fail: + return ret; +} + +nve32_t hw_config_addend(struct osi_core_priv_data *const osi_core, + const nveu32_t addend) +{ + void *addr = osi_core->base; + nveu32_t mac_tcr = 0U; + nve32_t ret = 0; + const nveu32_t mac_tscr[2] = { EQOS_MAC_TCR, MGBE_MAC_TCR}; + const nveu32_t mac_tar[2] = { EQOS_MAC_TAR, MGBE_MAC_TAR}; + + ret = poll_check(osi_core, ((nveu8_t *)addr + mac_tscr[osi_core->mac]), + MAC_TCR_TSADDREG, &mac_tcr); + if (ret == -1) { + goto fail; + } + + /* write addend value to MAC_Timestamp_Addend register */ + osi_writela(osi_core, addend, ((nveu8_t *)addr + mac_tar[osi_core->mac])); + + /* issue command to update the configured addend value */ + mac_tcr |= MAC_TCR_TSADDREG; + osi_writela(osi_core, mac_tcr, ((nveu8_t *)addr + mac_tscr[osi_core->mac])); + + ret = poll_check(osi_core, ((nveu8_t *)addr + mac_tscr[osi_core->mac]), + MAC_TCR_TSADDREG, &mac_tcr); +fail: + return ret; +} + +#ifndef OSI_STRIPPED_LIB +void hw_config_tscr(struct osi_core_priv_data *const osi_core, const nveu32_t ptp_filter) +#else +void hw_config_tscr(struct osi_core_priv_data *const osi_core, OSI_UNUSED const nveu32_t ptp_filter) +#endif /* !OSI_STRIPPED_LIB */ +{ + void *addr = osi_core->base; + struct core_local *l_core = (struct core_local *)(void *)osi_core; + nveu32_t mac_tcr = 0U; +#ifndef OSI_STRIPPED_LIB + nveu32_t i = 0U, temp = 0U; +#endif /* !OSI_STRIPPED_LIB */ + nveu32_t value = 0x0U; + const nveu32_t mac_tscr[2] = { EQOS_MAC_TCR, MGBE_MAC_TCR}; + const nveu32_t mac_pps[2] = { EQOS_MAC_PPS_CTL, MGBE_MAC_PPS_CTL}; + +#ifndef OSI_STRIPPED_LIB + if (ptp_filter != OSI_DISABLE) { + mac_tcr = (OSI_MAC_TCR_TSENA | OSI_MAC_TCR_TSCFUPDT | OSI_MAC_TCR_TSCTRLSSR); + for (i = 0U; i < 32U; i++) { + temp = ptp_filter & OSI_BIT(i); + + switch (temp) { + case OSI_MAC_TCR_SNAPTYPSEL_1: + mac_tcr |= OSI_MAC_TCR_SNAPTYPSEL_1; + break; + case OSI_MAC_TCR_SNAPTYPSEL_2: + mac_tcr |= OSI_MAC_TCR_SNAPTYPSEL_2; + break; + case OSI_MAC_TCR_SNAPTYPSEL_3: + mac_tcr |= OSI_MAC_TCR_SNAPTYPSEL_3; + break; + case OSI_MAC_TCR_TSIPV4ENA: + mac_tcr |= OSI_MAC_TCR_TSIPV4ENA; + break; + case OSI_MAC_TCR_TSIPV6ENA: + mac_tcr |= OSI_MAC_TCR_TSIPV6ENA; + break; + case OSI_MAC_TCR_TSEVENTENA: + mac_tcr |= OSI_MAC_TCR_TSEVENTENA; + break; + case OSI_MAC_TCR_TSMASTERENA: + mac_tcr |= OSI_MAC_TCR_TSMASTERENA; + break; + case OSI_MAC_TCR_TSVER2ENA: + mac_tcr |= OSI_MAC_TCR_TSVER2ENA; + break; + case OSI_MAC_TCR_TSIPENA: + mac_tcr |= OSI_MAC_TCR_TSIPENA; + break; + case OSI_MAC_TCR_AV8021ASMEN: + mac_tcr |= OSI_MAC_TCR_AV8021ASMEN; + break; + case OSI_MAC_TCR_TSENALL: + mac_tcr |= OSI_MAC_TCR_TSENALL; + break; + case OSI_MAC_TCR_CSC: + mac_tcr |= OSI_MAC_TCR_CSC; + break; + default: + break; + } + } + } else { + /* Disabling the MAC time stamping */ + mac_tcr = OSI_DISABLE; + } +#else + mac_tcr = (OSI_MAC_TCR_TSENA | OSI_MAC_TCR_TSCFUPDT | OSI_MAC_TCR_TSCTRLSSR + | OSI_MAC_TCR_TSVER2ENA | OSI_MAC_TCR_TSIPENA | OSI_MAC_TCR_TSIPV6ENA | + OSI_MAC_TCR_TSIPV4ENA | OSI_MAC_TCR_SNAPTYPSEL_1); +#endif /* !OSI_STRIPPED_LIB */ + + osi_writela(osi_core, mac_tcr, ((nveu8_t *)addr + mac_tscr[osi_core->mac])); + + value = osi_readla(osi_core, (nveu8_t *)addr + mac_pps[osi_core->mac]); + value &= ~MAC_PPS_CTL_PPSCTRL0; + if (l_core->pps_freq == OSI_ENABLE) { + value |= OSI_ENABLE; + } + osi_writela(osi_core, value, ((nveu8_t *)addr + mac_pps[osi_core->mac])); +} + +void hw_config_ssir(struct osi_core_priv_data *const osi_core) +{ + nveu32_t val = 0U; + void *addr = osi_core->base; + const struct core_local *l_core = (struct core_local *)(void *)osi_core; + const nveu32_t mac_ssir[2] = { EQOS_MAC_SSIR, MGBE_MAC_SSIR}; + const nveu32_t ptp_ssinc[3] = {OSI_PTP_SSINC_4, OSI_PTP_SSINC_6, OSI_PTP_SSINC_4}; + + /* by default Fine method is enabled */ + /* Fix the SSINC value based on Exact MAC used */ + val = ptp_ssinc[l_core->l_mac_ver]; + + val |= val << MAC_SSIR_SSINC_SHIFT; + /* update Sub-second Increment Value */ + osi_writela(osi_core, val, ((nveu8_t *)addr + mac_ssir[osi_core->mac])); +} + +nve32_t hw_ptp_tsc_capture(struct osi_core_priv_data *const osi_core, + struct osi_core_ptp_tsc_data *data) +{ +#ifndef OSI_STRIPPED_LIB + const struct core_local *l_core = (struct core_local *)osi_core; +#endif /* !OSI_STRIPPED_LIB */ + void *addr = osi_core->base; + nveu32_t tsc_ptp = 0U; + nve32_t ret = 0; + +#ifndef OSI_STRIPPED_LIB + /* This code is NA for Orin use case */ + if (l_core->l_mac_ver < MAC_CORE_VER_TYPE_EQOS_5_30) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "ptp_tsc: older IP\n", 0ULL); + ret = -1; + goto done; + } +#endif /* !OSI_STRIPPED_LIB */ + + osi_writela(osi_core, OSI_ENABLE, (nveu8_t *)osi_core->base + WRAP_SYNC_TSC_PTP_CAPTURE); + + ret = poll_check(osi_core, ((nveu8_t *)addr + WRAP_SYNC_TSC_PTP_CAPTURE), + OSI_ENABLE, &tsc_ptp); + if (ret == -1) { + goto done; + } + + data->tsc_low_bits = osi_readla(osi_core, (nveu8_t *)osi_core->base + + WRAP_TSC_CAPTURE_LOW); + data->tsc_high_bits = osi_readla(osi_core, (nveu8_t *)osi_core->base + + WRAP_TSC_CAPTURE_HIGH); + data->ptp_low_bits = osi_readla(osi_core, (nveu8_t *)osi_core->base + + WRAP_PTP_CAPTURE_LOW); + data->ptp_high_bits = osi_readla(osi_core, (nveu8_t *)osi_core->base + + WRAP_PTP_CAPTURE_HIGH); +done: + return ret; +} + +#ifndef OSI_STRIPPED_LIB +static inline void config_l2_da_perfect_inverse_match( + struct osi_core_priv_data *osi_core, + nveu32_t perfect_inverse_match) +{ + nveu32_t value = 0U; + + value = osi_readla(osi_core, ((nveu8_t *)osi_core->base + MAC_PKT_FILTER_REG)); + value &= ~MAC_PFR_DAIF; + if (perfect_inverse_match == OSI_INV_MATCH) { + /* Set DA Inverse Filtering */ + value |= MAC_PFR_DAIF; + } + osi_writela(osi_core, value, ((nveu8_t *)osi_core->base + MAC_PKT_FILTER_REG)); +} +#endif /* !OSI_STRIPPED_LIB */ + +nve32_t hw_config_mac_pkt_filter_reg(struct osi_core_priv_data *const osi_core, + const struct osi_filter *filter) +{ + nveu32_t value = 0U; + nve32_t ret = 0; + + value = osi_readla(osi_core, ((nveu8_t *)osi_core->base + MAC_PKT_FILTER_REG)); + + /*Retain all other values */ + value &= (MAC_PFR_DAIF | MAC_PFR_DBF | MAC_PFR_SAIF | + MAC_PFR_SAF | MAC_PFR_PCF | MAC_PFR_VTFE | + MAC_PFR_IPFE | MAC_PFR_DNTU | MAC_PFR_RA); + + if ((filter->oper_mode & OSI_OPER_EN_PERFECT) != OSI_DISABLE) { + value |= MAC_PFR_HPF; + } + +#ifndef OSI_STRIPPED_LIB + if ((filter->oper_mode & OSI_OPER_DIS_PERFECT) != OSI_DISABLE) { + value &= ~MAC_PFR_HPF; + } + + if ((filter->oper_mode & OSI_OPER_EN_PROMISC) != OSI_DISABLE) { + value |= MAC_PFR_PR; + } + + if ((filter->oper_mode & OSI_OPER_DIS_PROMISC) != OSI_DISABLE) { + value &= ~MAC_PFR_PR; + } + + if ((filter->oper_mode & OSI_OPER_EN_ALLMULTI) != OSI_DISABLE) { + value |= MAC_PFR_PM; + } + + if ((filter->oper_mode & OSI_OPER_DIS_ALLMULTI) != OSI_DISABLE) { + value &= ~MAC_PFR_PM; + } +#endif /* !OSI_STRIPPED_LIB */ + + osi_writela(osi_core, value, + ((nveu8_t *)osi_core->base + MAC_PKT_FILTER_REG)); + +#ifndef OSI_STRIPPED_LIB + if ((filter->oper_mode & OSI_OPER_EN_L2_DA_INV) != OSI_DISABLE) { + config_l2_da_perfect_inverse_match(osi_core, OSI_INV_MATCH); + } + + if ((filter->oper_mode & OSI_OPER_DIS_L2_DA_INV) != OSI_DISABLE) { + config_l2_da_perfect_inverse_match(osi_core, OSI_PFT_MATCH); + } +#else + value = osi_readla(osi_core, ((nveu8_t *)osi_core->base + MAC_PKT_FILTER_REG)); + value &= ~MAC_PFR_DAIF; + osi_writela(osi_core, value, ((nveu8_t *)osi_core->base + MAC_PKT_FILTER_REG)); + +#endif /* !OSI_STRIPPED_LIB */ + + return ret; +} + +nve32_t hw_config_l3_l4_filter_enable(struct osi_core_priv_data *const osi_core, + const nveu32_t filter_enb_dis) +{ + nveu32_t value = 0U; + void *base = osi_core->base; + nve32_t ret = 0; + + /* validate filter_enb_dis argument */ + if ((filter_enb_dis != OSI_ENABLE) && (filter_enb_dis != OSI_DISABLE)) { + OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + "Invalid filter_enb_dis value\n", + filter_enb_dis); + ret = -1; + goto fail; + } + + value = osi_readla(osi_core, ((nveu8_t *)base + MAC_PKT_FILTER_REG)); + value &= ~(MAC_PFR_IPFE); + value |= ((filter_enb_dis << MAC_PFR_IPFE_SHIFT) & MAC_PFR_IPFE); + osi_writela(osi_core, value, ((nveu8_t *)base + MAC_PKT_FILTER_REG)); +fail: + return ret; +} /** * @brief hw_est_read - indirect read the GCL to Software own list * (SWOL) * - * @param[in] base: MAC base IOVA address. + * @param[in] osi_core: OSI core private data structure. * @param[in] addr_val: Address offset for indirect write. * @param[in] data: Data to be written at offset. * @param[in] gcla: Gate Control List Address, 0 for ETS register. @@ -53,8 +637,7 @@ static inline nve32_t hw_est_read(struct osi_core_priv_data *osi_core, nve32_t ret; const nveu32_t MTL_EST_GCL_CONTROL[MAX_MAC_IP_TYPES] = { EQOS_MTL_EST_GCL_CONTROL, MGBE_MTL_EST_GCL_CONTROL}; - const nveu32_t MTL_EST_DATA[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_DATA, - MGBE_MTL_EST_DATA}; + const nveu32_t MTL_EST_DATA[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_DATA, MGBE_MTL_EST_DATA}; *data = 0U; val &= ~MTL_EST_ADDR_MASK; @@ -94,6 +677,7 @@ err: * * @param[in] osi_core: OSI core private data structure. * @param[in] est: Configuration input argument. + * @param[in] btr: Base time register value. * @param[in] mac: MAC index * * @note MAC should be init and started. see osi_start_mac() @@ -101,11 +685,11 @@ err: * @retval 0 on success * @retval -1 on failure. */ -nve32_t gcl_validate(struct osi_core_priv_data *const osi_core, - struct osi_est_config *const est, - const nveu32_t *btr, nveu32_t mac) +static nve32_t gcl_validate(struct osi_core_priv_data *const osi_core, + struct osi_est_config *const est, + const nveu32_t *btr, nveu32_t mac) { - const struct core_local *l_core = (struct core_local *)osi_core; + const struct core_local *l_core = (struct core_local *)(void *)osi_core; const nveu32_t PTP_CYCLE_8[MAX_MAC_IP_TYPES] = {EQOS_8PTP_CYCLE, MGBE_8PTP_CYCLE}; const nveu32_t MTL_EST_CONTROL[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_CONTROL, @@ -129,7 +713,7 @@ nve32_t gcl_validate(struct osi_core_priv_data *const osi_core, nveu32_t bunk = 0U; nveu32_t est_status; nveu64_t old_btr, old_ctr; - nve32_t ret; + nve32_t ret = 0; nveu32_t val = 0U; nveu64_t rem = 0U; const struct est_read hw_read_arr[4] = { @@ -138,11 +722,50 @@ nve32_t gcl_validate(struct osi_core_priv_data *const osi_core, {&ctr_l, MTL_EST_CTR_LOW[mac]}, {&ctr_h, MTL_EST_CTR_HIGH[mac]}}; + if (est->en_dis > OSI_ENABLE) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "input argument en_dis value\n", + (nveul64_t)est->en_dis); + ret = -1; + goto done; + } + if (est->llr > l_core->gcl_dep) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "input argument more than GCL depth\n", (nveul64_t)est->llr); - return -1; + ret = -1; + goto done; + } + + /* 24 bit configure time in GCL + 7) */ + if (est->ter > 0x7FFFFFFFU) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "invalid TER value\n", + (nveul64_t)est->ter); + ret = -1; + goto done; + } + + /* nenosec register value can't be more than 10^9 nsec */ + if ((est->ctr[0] > OSI_NSEC_PER_SEC) || + (est->btr[0] > OSI_NSEC_PER_SEC) || + (est->ctr[1] > 0xFFU)) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "input argument CTR/BTR nsec is invalid\n", + 0ULL); + ret = -1; + goto done; + } + + /* if btr + offset is more than limit */ + if ((est->btr[0] > (OSI_NSEC_PER_SEC - est->btr_offset[0])) || + (est->btr[1] > (UINT_MAX - est->btr_offset[1]))) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "input argument BTR offset is invalid\n", + 0ULL); + ret = -1; + goto done; } ctr = ((nveu64_t)est->ctr[1] * OSI_NSEC_PER_SEC) + est->ctr[0]; @@ -155,12 +778,13 @@ nve32_t gcl_validate(struct osi_core_priv_data *const osi_core, ((ctr - sum_tin) >= PTP_CYCLE_8[mac])) { continue; } else if (((ctr - sum_ti) != 0U) && - ((ctr - sum_ti) < PTP_CYCLE_8[mac])) { + ((ctr - sum_ti) < PTP_CYCLE_8[mac])) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "CTR issue due to trancate\n", (nveul64_t)i); - return -1; + ret = -1; + goto done; } else { //do nothing } @@ -171,16 +795,17 @@ nve32_t gcl_validate(struct osi_core_priv_data *const osi_core, OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "validation of GCL entry failed\n", (nveul64_t)i); - return -1; + ret = -1; + goto done; } /* Check for BTR in case of new ETS while current GCL enabled */ - val = osi_readla(osi_core, - (nveu8_t *)osi_core->base + + val = osi_readla(osi_core, (nveu8_t *)osi_core->base + MTL_EST_CONTROL[mac]); if ((val & MTL_EST_CONTROL_EEST) != MTL_EST_CONTROL_EEST) { - return 0; + ret = 0; + goto done; } /* Read EST_STATUS for bunk */ @@ -200,7 +825,7 @@ nve32_t gcl_validate(struct osi_core_priv_data *const osi_core, OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Reading failed for index\n", (nveul64_t)i); - return ret; + goto done; } } @@ -211,18 +836,1000 @@ nve32_t gcl_validate(struct osi_core_priv_data *const osi_core, if ((rem != OSI_NONE) && (rem < PTP_CYCLE_8[mac])) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "invalid BTR", (nveul64_t)rem); - return -1; + ret = -1; + goto done; } } else if (btr_new > old_btr) { rem = (btr_new - old_btr) % old_ctr; if ((rem != OSI_NONE) && (rem < PTP_CYCLE_8[mac])) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "invalid BTR", (nveul64_t)rem); - return -1; + ret = -1; + goto done; } } else { // Nothing to do } - return 0; +done: + return ret; +} + +/** + * @brief hw_est_write - indirect write the GCL to Software own list + * (SWOL) + * + * @param[in] osi_core: OSI core private data structure. + * @param[in] addr_val: Address offset for indirect write. + * @param[in] data: Data to be written at offset. + * @param[in] gcla: Gate Control List Address, 0 for ETS register. + * 1 for GCL memory. + * + * @note MAC should be init and started. see osi_start_mac() + * + * @retval 0 on success + * @retval -1 on failure. + */ +static nve32_t hw_est_write(struct osi_core_priv_data *osi_core, + nveu32_t addr_val, nveu32_t data, + nveu32_t gcla) +{ + nve32_t retry = 1000; + nveu32_t val = 0x0; + nve32_t ret = 0; + const nveu32_t MTL_EST_DATA[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_DATA, + MGBE_MTL_EST_DATA}; + const nveu32_t MTL_EST_GCL_CONTROL[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_GCL_CONTROL, + MGBE_MTL_EST_GCL_CONTROL}; + + osi_writela(osi_core, data, (nveu8_t *)osi_core->base + + MTL_EST_DATA[osi_core->mac]); + + val &= ~MTL_EST_ADDR_MASK; + val |= (gcla == 1U) ? 0x0U : MTL_EST_GCRR; + val |= MTL_EST_SRWO; + val |= addr_val; + osi_writela(osi_core, val, (nveu8_t *)osi_core->base + + MTL_EST_GCL_CONTROL[osi_core->mac]); + + while (--retry > 0) { + val = osi_readla(osi_core, (nveu8_t *)osi_core->base + + MTL_EST_GCL_CONTROL[osi_core->mac]); + if ((val & MTL_EST_SRWO) == MTL_EST_SRWO) { + osi_core->osd_ops.udelay(OSI_DELAY_1US); + continue; + } + + break; + } + + if (((val & MTL_EST_ERR0) == MTL_EST_ERR0) || + (retry <= 0)) { + ret = -1; + } + + return ret; +} + +/** + * @brief hw_config_est - Read Setting for GCL from input and update + * registers. + * + * Algorithm: + * 1) Write TER, LLR and EST control register + * 2) Update GCL to sw own GCL (MTL_EST_Status bit SWOL will tell which is + * owned by SW) and store which GCL is in use currently in sw. + * 3) TODO set DBGB and DBGM for debugging + * 4) EST_data and GCRR to 1, update entry sno in ADDR and put data at + * est_gcl_data enable GCL MTL_EST_SSWL and wait for self clear or use + * SWLC in MTL_EST_Status. Please note new GCL will be pushed for each entry. + * 5) Configure btr. Update btr based on current time (current time + * should be updated based on PTP by this time) + * + * @param[in] osi_core: OSI core private data structure. + * @param[in] est: EST configuration input argument. + * + * @note MAC should be init and started. see osi_start_mac() + * + * @retval 0 on success + * @retval -1 on failure. + */ +nve32_t hw_config_est(struct osi_core_priv_data *const osi_core, + struct osi_est_config *const est) +{ + nveu32_t btr[2] = {0}; + nveu32_t val = 0x0; + void *base = osi_core->base; + nveu32_t i; + nve32_t ret = 0; + nveu32_t addr = 0x0; + const nveu32_t MTL_EST_CONTROL[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_CONTROL, + MGBE_MTL_EST_CONTROL}; + const nveu32_t MTL_EST_BTR_LOW[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_BTR_LOW, + MGBE_MTL_EST_BTR_LOW}; + const nveu32_t MTL_EST_BTR_HIGH[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_BTR_HIGH, + MGBE_MTL_EST_BTR_HIGH}; + const nveu32_t MTL_EST_CTR_LOW[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_CTR_LOW, + MGBE_MTL_EST_CTR_LOW}; + const nveu32_t MTL_EST_CTR_HIGH[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_CTR_HIGH, + MGBE_MTL_EST_CTR_HIGH}; + const nveu32_t MTL_EST_TER[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_TER, + MGBE_MTL_EST_TER}; + const nveu32_t MTL_EST_LLR[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_LLR, + MGBE_MTL_EST_LLR}; + + if ((osi_core->hw_feature != OSI_NULL) && + (osi_core->hw_feature->est_sel == OSI_DISABLE)) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "EST not supported in HW\n", 0ULL); + ret = -1; + goto done; + } + + if (est->en_dis == OSI_DISABLE) { + val = osi_readla(osi_core, (nveu8_t *)base + + MTL_EST_CONTROL[osi_core->mac]); + val &= ~MTL_EST_EEST; + osi_writela(osi_core, val, (nveu8_t *)base + + MTL_EST_CONTROL[osi_core->mac]); + + ret = 0; + } else { + btr[0] = est->btr[0]; + btr[1] = est->btr[1]; + if ((btr[0] == 0U) && (btr[1] == 0U)) { + common_get_systime_from_mac(osi_core->base, + osi_core->mac, + &btr[1], &btr[0]); + } + + if (gcl_validate(osi_core, est, btr, osi_core->mac) < 0) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "GCL validation failed\n", 0LL); + ret = -1; + goto done; + } + + ret = hw_est_write(osi_core, MTL_EST_CTR_LOW[osi_core->mac], est->ctr[0], 0); + if (ret < 0) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "GCL CTR[0] failed\n", 0LL); + goto done; + } + /* check for est->ctr[i] not more than FF, TODO as per hw config + * parameter we can have max 0x3 as this value in sec */ + est->ctr[1] &= MTL_EST_CTR_HIGH_MAX; + ret = hw_est_write(osi_core, MTL_EST_CTR_HIGH[osi_core->mac], est->ctr[1], 0); + if (ret < 0) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "GCL CTR[1] failed\n", 0LL); + goto done; + } + + ret = hw_est_write(osi_core, MTL_EST_TER[osi_core->mac], est->ter, 0); + if (ret < 0) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "GCL TER failed\n", 0LL); + goto done; + } + + ret = hw_est_write(osi_core, MTL_EST_LLR[osi_core->mac], est->llr, 0); + if (ret < 0) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "GCL LLR failed\n", 0LL); + goto done; + } + + /* Write GCL table */ + for (i = 0U; i < est->llr; i++) { + addr = i; + addr = addr << MTL_EST_ADDR_SHIFT; + addr &= MTL_EST_ADDR_MASK; + ret = hw_est_write(osi_core, addr, est->gcl[i], 1); + if (ret < 0) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "GCL enties write failed\n", + (nveul64_t)i); + goto done; + } + } + + /* Write parameters */ + ret = hw_est_write(osi_core, MTL_EST_BTR_LOW[osi_core->mac], + btr[0] + est->btr_offset[0], OSI_DISABLE); + if (ret < 0) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "GCL BTR[0] failed\n", + (btr[0] + est->btr_offset[0])); + goto done; + } + + ret = hw_est_write(osi_core, MTL_EST_BTR_HIGH[osi_core->mac], + btr[1] + est->btr_offset[1], OSI_DISABLE); + if (ret < 0) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "GCL BTR[1] failed\n", + (btr[1] + est->btr_offset[1])); + goto done; + } + + val = osi_readla(osi_core, (nveu8_t *)base + + MTL_EST_CONTROL[osi_core->mac]); + /* Store table */ + val |= MTL_EST_SSWL; + val |= MTL_EST_EEST; + val |= MTL_EST_QHLBF; + osi_writela(osi_core, val, (nveu8_t *)base + + MTL_EST_CONTROL[osi_core->mac]); + } +done: + return ret; +} + +/** + * @brief hw_config_fpe - Read Setting for preemption and express for TC + * and update registers. + * + * Algorithm: + * 1) Check for TC enable and TC has masked for setting to preemptable. + * 2) update FPE control status register + * + * @param[in] osi_core: OSI core private data structure. + * @param[in] fpe: FPE configuration input argument. + * + * @note MAC should be init and started. see osi_start_mac() + * + * @retval 0 on success + * @retval -1 on failure. + */ +nve32_t hw_config_fpe(struct osi_core_priv_data *const osi_core, + struct osi_fpe_config *const fpe) +{ + nveu32_t i = 0U; + nveu32_t val = 0U; + nveu32_t temp = 0U, temp1 = 0U; + nveu32_t temp_shift = 0U; + nve32_t ret = 0; + const nveu32_t MTL_FPE_CTS[MAX_MAC_IP_TYPES] = {EQOS_MTL_FPE_CTS, + MGBE_MTL_FPE_CTS}; + const nveu32_t MAC_FPE_CTS[MAX_MAC_IP_TYPES] = {EQOS_MAC_FPE_CTS, + MGBE_MAC_FPE_CTS}; + const nveu32_t max_number_queue[MAX_MAC_IP_TYPES] = {OSI_EQOS_MAX_NUM_QUEUES, + OSI_MGBE_MAX_NUM_QUEUES}; + const nveu32_t MAC_RQC1R[MAX_MAC_IP_TYPES] = {EQOS_MAC_RQC1R, + MGBE_MAC_RQC1R}; + const nveu32_t MAC_RQC1R_RQ[MAX_MAC_IP_TYPES] = {EQOS_MAC_RQC1R_FPRQ, + MGBE_MAC_RQC1R_RQ}; + const nveu32_t MAC_RQC1R_RQ_SHIFT[MAX_MAC_IP_TYPES] = {EQOS_MAC_RQC1R_FPRQ_SHIFT, + MGBE_MAC_RQC1R_RQ_SHIFT}; + const nveu32_t MTL_FPE_ADV[MAX_MAC_IP_TYPES] = {EQOS_MTL_FPE_ADV, + MGBE_MTL_FPE_ADV}; + + if ((osi_core->hw_feature != OSI_NULL) && + (osi_core->hw_feature->fpe_sel == OSI_DISABLE)) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "FPE not supported in HW\n", 0ULL); + ret = -1; + goto error; + } + + /* Only 8 TC */ + if (fpe->tx_queue_preemption_enable > 0xFFU) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "FPE input tx_queue_preemption_enable is invalid\n", + (nveul64_t)fpe->tx_queue_preemption_enable); + ret = -1; + goto error; + } + + if (osi_core->mac == OSI_MAC_HW_MGBE) { +#ifdef MACSEC_SUPPORT + osi_lock_irq_enabled(&osi_core->macsec_fpe_lock); + /* MACSEC and FPE cannot coexist on MGBE refer bug 3484034 */ + if (osi_core->is_macsec_enabled == OSI_ENABLE) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "FPE and MACSEC cannot co-exist\n", 0ULL); + ret = -1; + goto done; + } +#endif /* MACSEC_SUPPORT */ + } + + osi_core->fpe_ready = OSI_DISABLE; + + if (((fpe->tx_queue_preemption_enable << MTL_FPE_CTS_PEC_SHIFT) & + MTL_FPE_CTS_PEC) == OSI_DISABLE) { + val = osi_readla(osi_core, (nveu8_t *)osi_core->base + + MTL_FPE_CTS[osi_core->mac]); + val &= ~MTL_FPE_CTS_PEC; + osi_writela(osi_core, val, (nveu8_t *)osi_core->base + + MTL_FPE_CTS[osi_core->mac]); + + val = osi_readla(osi_core, (nveu8_t *)osi_core->base + + MAC_FPE_CTS[osi_core->mac]); + val &= ~MAC_FPE_CTS_EFPE; + osi_writela(osi_core, val, (nveu8_t *)osi_core->base + + MAC_FPE_CTS[osi_core->mac]); + + if (osi_core->mac == OSI_MAC_HW_MGBE) { +#ifdef MACSEC_SUPPORT + osi_core->is_fpe_enabled = OSI_DISABLE; +#endif /* MACSEC_SUPPORT */ + } + ret = 0; + } else { + val = osi_readla(osi_core, (nveu8_t *)osi_core->base + + MTL_FPE_CTS[osi_core->mac]); + val &= ~MTL_FPE_CTS_PEC; + for (i = 0U; i < OSI_MAX_TC_NUM; i++) { + /* max 8 bit for this structure fot TC/TXQ. Set the TC for express or + * preemption. Default is express for a TC. DWCXG_NUM_TC = 8 */ + temp = OSI_BIT(i); + if ((fpe->tx_queue_preemption_enable & temp) == temp) { + temp_shift = i; + temp_shift += MTL_FPE_CTS_PEC_SHIFT; + /* set queue for preemtable */ + if (temp_shift < MTL_FPE_CTS_PEC_MAX_SHIFT) { + temp1 = OSI_ENABLE; + temp1 = temp1 << temp_shift; + val |= temp1; + } else { + /* Do nothing */ + } + } + } + osi_writela(osi_core, val, (nveu8_t *)osi_core->base + + MTL_FPE_CTS[osi_core->mac]); + + if ((fpe->rq == 0x0U) || (fpe->rq >= max_number_queue[osi_core->mac])) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "FPE init failed due to wrong RQ\n", fpe->rq); + ret = -1; + goto done; + } + + val = osi_readla(osi_core, (nveu8_t *)osi_core->base + + MAC_RQC1R[osi_core->mac]); + val &= ~MAC_RQC1R_RQ[osi_core->mac]; + temp = fpe->rq; + temp = temp << MAC_RQC1R_RQ_SHIFT[osi_core->mac]; + temp = (temp & MAC_RQC1R_RQ[osi_core->mac]); + val |= temp; + osi_core->residual_queue = fpe->rq; + osi_writela(osi_core, val, (nveu8_t *)osi_core->base + + MAC_RQC1R[osi_core->mac]); + + if (osi_core->mac == OSI_MAC_HW_MGBE) { + val = osi_readla(osi_core, (nveu8_t *)osi_core->base + + MGBE_MAC_RQC4R); + val &= ~MGBE_MAC_RQC4R_PMCBCQ; + temp = fpe->rq; + temp = temp << MGBE_MAC_RQC4R_PMCBCQ_SHIFT; + temp = (temp & MGBE_MAC_RQC4R_PMCBCQ); + val |= temp; + osi_writela(osi_core, val, (nveu8_t *)osi_core->base + + MGBE_MAC_RQC4R); + } + /* initiate SVER for SMD-V and SMD-R */ + val = osi_readla(osi_core, (nveu8_t *)osi_core->base + + MTL_FPE_CTS[osi_core->mac]); + val |= MAC_FPE_CTS_SVER; + osi_writela(osi_core, val, (nveu8_t *)osi_core->base + + MAC_FPE_CTS[osi_core->mac]); + + val = osi_readla(osi_core, (nveu8_t *)osi_core->base + + MTL_FPE_ADV[osi_core->mac]); + val &= ~MTL_FPE_ADV_HADV_MASK; + //(minimum_fragment_size +IPG/EIPG + Preamble) *.8 ~98ns for10G + val |= MTL_FPE_ADV_HADV_VAL; + osi_writela(osi_core, val, (nveu8_t *)osi_core->base + + MTL_FPE_ADV[osi_core->mac]); + + if (osi_core->mac == OSI_MAC_HW_MGBE) { +#ifdef MACSEC_SUPPORT + osi_core->is_fpe_enabled = OSI_ENABLE; +#endif /* MACSEC_SUPPORT */ + } + } +done: + + if (osi_core->mac == OSI_MAC_HW_MGBE) { +#ifdef MACSEC_SUPPORT + osi_unlock_irq_enabled(&osi_core->macsec_fpe_lock); +#endif /* MACSEC_SUPPORT */ + } + +error: + return ret; +} + +/** + * @brief enable_mtl_interrupts - Enable MTL interrupts + * + * Algorithm: enable MTL interrupts for EST + * + * @param[in] osi_core: OSI core private data structure. + * + * @note MAC should be init and started. see osi_start_mac() + */ +static inline void enable_mtl_interrupts(struct osi_core_priv_data *osi_core) +{ + nveu32_t mtl_est_ir = OSI_DISABLE; + const nveu32_t MTL_EST_ITRE[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_ITRE, + MGBE_MTL_EST_ITRE}; + + mtl_est_ir = osi_readla(osi_core, (nveu8_t *)osi_core->base + + MTL_EST_ITRE[osi_core->mac]); + /* enable only MTL interrupt realted to + * Constant Gate Control Error + * Head-Of-Line Blocking due to Scheduling + * Head-Of-Line Blocking due to Frame Size + * BTR Error + * Switch to S/W owned list Complete + */ + mtl_est_ir |= (MTL_EST_ITRE_CGCE | MTL_EST_ITRE_IEHS | + MTL_EST_ITRE_IEHF | MTL_EST_ITRE_IEBE | + MTL_EST_ITRE_IECC); + osi_writela(osi_core, mtl_est_ir, (nveu8_t *)osi_core->base + + MTL_EST_ITRE[osi_core->mac]); +} + +/** + * @brief enable_fpe_interrupts - Enable MTL interrupts + * + * Algorithm: enable FPE interrupts + * + * @param[in] osi_core: OSI core private data structure. + * + * @note MAC should be init and started. see osi_start_mac() + */ +static inline void enable_fpe_interrupts(struct osi_core_priv_data *osi_core) +{ + nveu32_t value = OSI_DISABLE; + const nveu32_t MAC_IER[MAX_MAC_IP_TYPES] = {EQOS_MAC_IMR, + MGBE_MAC_IER}; + const nveu32_t IMR_FPEIE[MAX_MAC_IP_TYPES] = {EQOS_IMR_FPEIE, + MGBE_IMR_FPEIE}; + + /* Read MAC IER Register and enable Frame Preemption Interrupt + * Enable */ + value = osi_readla(osi_core, (nveu8_t *)osi_core->base + + MAC_IER[osi_core->mac]); + value |= IMR_FPEIE[osi_core->mac]; + osi_writela(osi_core, value, (nveu8_t *)osi_core->base + + MAC_IER[osi_core->mac]); +} + +/** + * @brief save_gcl_params - save GCL configs in local core structure + * + * @param[in] osi_core: OSI core private data structure. + * + * @note MAC should be init and started. see osi_start_mac() + */ +static inline void save_gcl_params(struct osi_core_priv_data *osi_core) +{ + struct core_local *l_core = (struct core_local *)(void *)osi_core; + const nveu32_t gcl_widhth[4] = {0, OSI_MAX_24BITS, OSI_MAX_28BITS, + OSI_MAX_32BITS}; + const nveu32_t gcl_ti_mask[4] = {0, OSI_MASK_16BITS, OSI_MASK_20BITS, + OSI_MASK_24BITS}; + const nveu32_t gcl_depthth[6] = {0, OSI_GCL_SIZE_64, OSI_GCL_SIZE_128, + OSI_GCL_SIZE_256, OSI_GCL_SIZE_512, + OSI_GCL_SIZE_1024}; + + if ((osi_core->hw_feature->gcl_width == 0U) || + (osi_core->hw_feature->gcl_width > 3U)) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "Wrong HW feature GCL width\n", + (nveul64_t)osi_core->hw_feature->gcl_width); + } else { + l_core->gcl_width_val = + gcl_widhth[osi_core->hw_feature->gcl_width]; + l_core->ti_mask = gcl_ti_mask[osi_core->hw_feature->gcl_width]; + } + + if ((osi_core->hw_feature->gcl_depth == 0U) || + (osi_core->hw_feature->gcl_depth > 5U)) { + /* Do Nothing */ + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "Wrong HW feature GCL depth\n", + (nveul64_t)osi_core->hw_feature->gcl_depth); + } else { + l_core->gcl_dep = gcl_depthth[osi_core->hw_feature->gcl_depth]; + } +} + +/** + * @brief hw_tsn_init - initialize TSN feature + * + * Algorithm: + * 1) If hardware support EST, + * a) Set default EST configuration + * b) Set enable interrupts + * 2) If hardware supports FPE + * a) Set default FPE configuration + * b) enable interrupts + * + * @param[in] osi_core: OSI core private data structure. + * @param[in] est_sel: EST HW support present or not + * @param[in] fpe_sel: FPE HW support present or not + * + * @note MAC should be init and started. see osi_start_mac() + */ +void hw_tsn_init(struct osi_core_priv_data *osi_core, + nveu32_t est_sel, nveu32_t fpe_sel) +{ + nveu32_t val = 0x0; + nveu32_t temp = 0U; + const nveu32_t MTL_EST_CONTROL[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_CONTROL, + MGBE_MTL_EST_CONTROL}; + const nveu32_t MTL_EST_CONTROL_PTOV[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_CONTROL_PTOV, + MGBE_MTL_EST_CONTROL_PTOV}; + const nveu32_t MTL_EST_PTOV_RECOMMEND[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_PTOV_RECOMMEND, + MGBE_MTL_EST_PTOV_RECOMMEND}; + const nveu32_t MTL_EST_CONTROL_PTOV_SHIFT[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_CONTROL_PTOV_SHIFT, + MGBE_MTL_EST_CONTROL_PTOV_SHIFT}; + const nveu32_t MTL_EST_CONTROL_CTOV[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_CONTROL_CTOV, + MGBE_MTL_EST_CONTROL_CTOV}; + const nveu32_t MTL_EST_CTOV_RECOMMEND[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_CTOV_RECOMMEND, + MGBE_MTL_EST_CTOV_RECOMMEND}; + const nveu32_t MTL_EST_CONTROL_CTOV_SHIFT[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_CONTROL_CTOV_SHIFT, + MGBE_MTL_EST_CONTROL_CTOV_SHIFT}; + const nveu32_t MTL_EST_CONTROL_LCSE[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_CONTROL_LCSE, + MGBE_MTL_EST_CONTROL_LCSE}; + const nveu32_t MTL_EST_CONTROL_LCSE_VAL[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_CONTROL_LCSE_VAL, + MGBE_MTL_EST_CONTROL_LCSE_VAL}; + const nveu32_t MTL_EST_CONTROL_DDBF[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_CONTROL_DDBF, + MGBE_MTL_EST_CONTROL_DDBF}; + const nveu32_t MTL_EST_OVERHEAD[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_OVERHEAD, + MGBE_MTL_EST_OVERHEAD}; + const nveu32_t MTL_EST_OVERHEAD_OVHD[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_OVERHEAD_OVHD, + MGBE_MTL_EST_OVERHEAD_OVHD}; + const nveu32_t MTL_EST_OVERHEAD_RECOMMEND[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_OVERHEAD_RECOMMEND, + MGBE_MTL_EST_OVERHEAD_RECOMMEND}; + const nveu32_t MAC_RQC1R[MAX_MAC_IP_TYPES] = {EQOS_MAC_RQC1R, + MGBE_MAC_RQC1R}; + const nveu32_t MAC_RQC1R_RQ[MAX_MAC_IP_TYPES] = {EQOS_MAC_RQC1R_FPRQ, + MGBE_MAC_RQC1R_RQ}; + const nveu32_t MAC_RQC1R_RQ_SHIFT[MAX_MAC_IP_TYPES] = {EQOS_MAC_RQC1R_FPRQ_SHIFT, + MGBE_MAC_RQC1R_RQ_SHIFT}; + + if (est_sel == OSI_ENABLE) { + save_gcl_params(osi_core); + val = osi_readla(osi_core, (nveu8_t *)osi_core->base + + MTL_EST_CONTROL[osi_core->mac]); + + /* + * PTOV PTP clock period * 6 + * dual-port RAM based asynchronous FIFO controllers or + * Single-port RAM based synchronous FIFO controllers + * CTOV 96 x Tx clock period + * : + * : + * set other default value + */ + val &= ~MTL_EST_CONTROL_PTOV[osi_core->mac]; + temp = MTL_EST_PTOV_RECOMMEND[osi_core->mac]; + temp = temp << MTL_EST_CONTROL_PTOV_SHIFT[osi_core->mac]; + val |= temp; + + val &= ~MTL_EST_CONTROL_CTOV[osi_core->mac]; + temp = MTL_EST_CTOV_RECOMMEND[osi_core->mac]; + temp = temp << MTL_EST_CONTROL_CTOV_SHIFT[osi_core->mac]; + val |= temp; + + /*Loop Count to report Scheduling Error*/ + val &= ~MTL_EST_CONTROL_LCSE[osi_core->mac]; + val |= MTL_EST_CONTROL_LCSE_VAL[osi_core->mac]; + + if (osi_core->mac == OSI_MAC_HW_EQOS) { + val &= ~EQOS_MTL_EST_CONTROL_DFBS; + } + val &= ~MTL_EST_CONTROL_DDBF[osi_core->mac]; + val |= MTL_EST_CONTROL_DDBF[osi_core->mac]; + osi_writela(osi_core, val, (nveu8_t *)osi_core->base + + MTL_EST_CONTROL[osi_core->mac]); + + val = osi_readla(osi_core, (nveu8_t *)osi_core->base + + MTL_EST_OVERHEAD[osi_core->mac]); + val &= ~MTL_EST_OVERHEAD_OVHD[osi_core->mac]; + /* As per hardware programming info */ + val |= MTL_EST_OVERHEAD_RECOMMEND[osi_core->mac]; + osi_writela(osi_core, val, (nveu8_t *)osi_core->base + + MTL_EST_OVERHEAD[osi_core->mac]); + + enable_mtl_interrupts(osi_core); + } + + if (fpe_sel == OSI_ENABLE) { + val = osi_readla(osi_core, (nveu8_t *)osi_core->base + + MAC_RQC1R[osi_core->mac]); + val &= ~MAC_RQC1R_RQ[osi_core->mac]; + temp = osi_core->residual_queue; + temp = temp << MAC_RQC1R_RQ_SHIFT[osi_core->mac]; + temp = (temp & MAC_RQC1R_RQ[osi_core->mac]); + val |= temp; + osi_writela(osi_core, val, (nveu8_t *)osi_core->base + + MAC_RQC1R[osi_core->mac]); + + if (osi_core->mac == OSI_MAC_HW_MGBE) { + val = osi_readla(osi_core, (nveu8_t *)osi_core->base + + MGBE_MAC_RQC4R); + val &= ~MGBE_MAC_RQC4R_PMCBCQ; + temp = osi_core->residual_queue; + temp = temp << MGBE_MAC_RQC4R_PMCBCQ_SHIFT; + temp = (temp & MGBE_MAC_RQC4R_PMCBCQ); + val |= temp; + osi_writela(osi_core, val, (nveu8_t *)osi_core->base + + MGBE_MAC_RQC4R); + } + + enable_fpe_interrupts(osi_core); + } + + /* CBS setting for TC or TXQ for default configuration + user application should use IOCTL to set CBS as per requirement + */ +} + +#ifdef HSI_SUPPORT +/** + * @brief hsi_common_error_inject + * + * Algorithm: + * - For macsec HSI: trigger interrupt using MACSEC_*_INTERRUPT_SET_0 register + * - For mmc counter based: trigger interrupt by incrementing count by threshold value + * - For rest: Directly set the error detected as there is no other mean to induce error + * + * @param[in] osi_core: OSI core private data structure. + * @param[in] error_code: Ethernet HSI error code + * + * @note MAC should be init and started. see osi_start_mac() + * + * @retval 0 on success + * @retval -1 on failure. + */ +nve32_t hsi_common_error_inject(struct osi_core_priv_data *osi_core, + nveu32_t error_code) +{ + nve32_t ret = 0; + + switch (error_code) { + case OSI_INBOUND_BUS_CRC_ERR: + osi_core->hsi.inject_crc_err_count = + osi_update_stats_counter(osi_core->hsi.inject_crc_err_count, + osi_core->hsi.err_count_threshold); + break; + case OSI_RECEIVE_CHECKSUM_ERR: + osi_core->hsi.inject_udp_err_count = + osi_update_stats_counter(osi_core->hsi.inject_udp_err_count, + osi_core->hsi.err_count_threshold); + break; + case OSI_MACSEC_RX_CRC_ERR: + osi_writela(osi_core, MACSEC_RX_MAC_CRC_ERROR, + (nveu8_t *)osi_core->macsec_base + + MACSEC_RX_ISR_SET); + break; + case OSI_MACSEC_TX_CRC_ERR: + osi_writela(osi_core, MACSEC_TX_MAC_CRC_ERROR, + (nveu8_t *)osi_core->macsec_base + + MACSEC_TX_ISR_SET); + break; + case OSI_MACSEC_RX_ICV_ERR: + osi_writela(osi_core, MACSEC_RX_ICV_ERROR, + (nveu8_t *)osi_core->macsec_base + + MACSEC_RX_ISR_SET); + break; + case OSI_MACSEC_REG_VIOL_ERR: + osi_writela(osi_core, MACSEC_SECURE_REG_VIOL, + (nveu8_t *)osi_core->macsec_base + + MACSEC_COMMON_ISR_SET); + break; + case OSI_TX_FRAME_ERR: + osi_core->hsi.report_count_err[TX_FRAME_ERR_IDX] = OSI_ENABLE; + osi_core->hsi.err_code[TX_FRAME_ERR_IDX] = OSI_TX_FRAME_ERR; + osi_core->hsi.report_err = OSI_ENABLE; + break; + case OSI_PCS_AUTONEG_ERR: + osi_core->hsi.err_code[AUTONEG_ERR_IDX] = OSI_PCS_AUTONEG_ERR; + osi_core->hsi.report_err = OSI_ENABLE; + osi_core->hsi.report_count_err[AUTONEG_ERR_IDX] = OSI_ENABLE; + break; + case OSI_XPCS_WRITE_FAIL_ERR: + osi_core->hsi.err_code[XPCS_WRITE_FAIL_IDX] = OSI_XPCS_WRITE_FAIL_ERR; + osi_core->hsi.report_err = OSI_ENABLE; + osi_core->hsi.report_count_err[XPCS_WRITE_FAIL_IDX] = OSI_ENABLE; + break; + default: + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, + "Invalid error code\n", (nveu32_t)error_code); + ret = -1; + break; + } + + return ret; +} +#endif + +/** + * @brief prepare_l3l4_ctr_reg - Prepare control register for L3L4 filters. + * + * @note + * Algorithm: + * - This sequence is used to prepare L3L4 control register for SA and DA Port Number matching. + * + * @param[in] osi_core: OSI core private data structure. + * @param[in] l3_l4: Pointer to l3 l4 filter structure (#osi_l3_l4_filter) + * @param[out] ctr_reg: Pointer to L3L4 CTR register value + * + * @note 1) MAC should be init and started. see osi_start_mac() + * + * @retval L3L4 CTR register value + */ +static void prepare_l3l4_ctr_reg(const struct osi_core_priv_data *const osi_core, + const struct osi_l3_l4_filter *const l3_l4, + nveu32_t *ctr_reg) +{ +#ifndef OSI_STRIPPED_LIB + nveu32_t dma_routing_enable = l3_l4->dma_routing_enable; + nveu32_t dst_addr_match = l3_l4->data.dst.addr_match; +#else + nveu32_t dma_routing_enable = OSI_TRUE; + nveu32_t dst_addr_match = OSI_TRUE; +#endif /* !OSI_STRIPPED_LIB */ + const nveu32_t dma_chan_en_shift[2] = { + EQOS_MAC_L3L4_CTR_DMCHEN_SHIFT, + MGBE_MAC_L3L4_CTR_DMCHEN_SHIFT + }; + nveu32_t value = 0U; + + /* set routing dma channel */ + value |= dma_routing_enable << (dma_chan_en_shift[osi_core->mac] & 0x1FU); + value |= l3_l4->dma_chan << MAC_L3L4_CTR_DMCHN_SHIFT; + + /* Enable L3 filters for IPv4 DESTINATION addr matching */ + value |= dst_addr_match << MAC_L3L4_CTR_L3DAM_SHIFT; + +#ifndef OSI_STRIPPED_LIB + /* Enable L3 filters for IPv4 DESTINATION addr INV matching */ + value |= l3_l4->data.dst.addr_match_inv << MAC_L3L4_CTR_L3DAIM_SHIFT; + + /* Enable L3 filters for IPv4 SOURCE addr matching */ + value |= (l3_l4->data.src.addr_match << MAC_L3L4_CTR_L3SAM_SHIFT) | + (l3_l4->data.src.addr_match_inv << MAC_L3L4_CTR_L3SAIM_SHIFT); + + /* Enable L4 filters for DESTINATION port No matching */ + value |= (l3_l4->data.dst.port_match << MAC_L3L4_CTR_L4DPM_SHIFT) | + (l3_l4->data.dst.port_match_inv << MAC_L3L4_CTR_L4DPIM_SHIFT); + + /* Enable L4 filters for SOURCE Port No matching */ + value |= (l3_l4->data.src.port_match << MAC_L3L4_CTR_L4SPM_SHIFT) | + (l3_l4->data.src.port_match_inv << MAC_L3L4_CTR_L4SPIM_SHIFT); + + /* set udp / tcp port matching bit (for l4) */ + value |= l3_l4->data.is_udp << MAC_L3L4_CTR_L4PEN_SHIFT; + + /* set ipv4 / ipv6 protocol matching bit (for l3) */ + value |= l3_l4->data.is_ipv6 << MAC_L3L4_CTR_L3PEN_SHIFT; +#endif /* !OSI_STRIPPED_LIB */ + + *ctr_reg = value; +} + +/** + * @brief prepare_l3_addr_registers - prepare register data for IPv4/IPv6 address filtering + * + * @note + * Algorithm: + * - Update IPv4/IPv6 source/destination address for L3 layer filtering. + * - For IPv4, both source/destination address can be configured but + * for IPv6, only one of the source/destination address can be configured. + * + * @param[in] l3_l4: Pointer to l3 l4 filter structure (#osi_l3_l4_filter) + * @param[out] l3_addr1_reg: Pointer to L3 ADDR1 register value + * + * @note 1) MAC should be init and started. see osi_start_mac() + */ +static void prepare_l3_addr_registers(const struct osi_l3_l4_filter *const l3_l4, +#ifndef OSI_STRIPPED_LIB + nveu32_t *l3_addr0_reg, + nveu32_t *l3_addr2_reg, + nveu32_t *l3_addr3_reg, +#endif /* !OSI_STRIPPED_LIB */ + nveu32_t *l3_addr1_reg) +{ +#ifndef OSI_STRIPPED_LIB + if (l3_l4->data.is_ipv6 == OSI_TRUE) { + const nveu16_t *addr; + /* For IPv6, either source address or destination + * address only one of them can be enabled + */ + if (l3_l4->data.src.addr_match == OSI_TRUE) { + /* select src address only */ + addr = l3_l4->data.src.ip6_addr; + } else { + /* select dst address only */ + addr = l3_l4->data.dst.ip6_addr; + } + /* update Bits[31:0] of 128-bit IP addr */ + *l3_addr0_reg = addr[7] | ((nveu32_t)addr[6] << 16); + + /* update Bits[63:32] of 128-bit IP addr */ + *l3_addr1_reg = addr[5] | ((nveu32_t)addr[4] << 16); + + /* update Bits[95:64] of 128-bit IP addr */ + *l3_addr2_reg = addr[3] | ((nveu32_t)addr[2] << 16); + + /* update Bits[127:96] of 128-bit IP addr */ + *l3_addr3_reg = addr[1] | ((nveu32_t)addr[0] << 16); + } else { +#endif /* !OSI_STRIPPED_LIB */ + const nveu8_t *addr; + nveu32_t value; + +#ifndef OSI_STRIPPED_LIB + /* set source address */ + addr = l3_l4->data.src.ip4_addr; + value = addr[3]; + value |= (nveu32_t)addr[2] << 8; + value |= (nveu32_t)addr[1] << 16; + value |= (nveu32_t)addr[0] << 24; + *l3_addr0_reg = value; +#endif /* !OSI_STRIPPED_LIB */ + + /* set destination address */ + addr = l3_l4->data.dst.ip4_addr; + value = addr[3]; + value |= (nveu32_t)addr[2] << 8; + value |= (nveu32_t)addr[1] << 16; + value |= (nveu32_t)addr[0] << 24; + *l3_addr1_reg = value; +#ifndef OSI_STRIPPED_LIB + } +#endif /* !OSI_STRIPPED_LIB */ +} + +#ifndef OSI_STRIPPED_LIB +/** + * @brief prepare_l4_port_register - program source and destination port number + * + * @note + * Algorithm: + * - Program l4 address register with source and destination port numbers. + * + * @param[in] l3_l4: Pointer to l3 l4 filter structure (#osi_l3_l4_filter) + * @param[out] l4_addr_reg: Pointer to L3 ADDR0 register value + * + * @note 1) MAC should be init and started. see osi_start_mac() + * 3) DCS bits should be enabled in RXQ to DMA mapping register + */ +static void prepare_l4_port_register(const struct osi_l3_l4_filter *const l3_l4, + nveu32_t *l4_addr_reg) +{ + nveu32_t value = 0U; + + /* set source port */ + value |= ((nveu32_t)l3_l4->data.src.port_no + & MGBE_MAC_L4_ADDR_SP_MASK); + + /* set destination port */ + value |= (((nveu32_t)l3_l4->data.dst.port_no << + MGBE_MAC_L4_ADDR_DP_SHIFT) & MGBE_MAC_L4_ADDR_DP_MASK); + + *l4_addr_reg = value; +} +#endif /* !OSI_STRIPPED_LIB */ + +/** + * @brief prepare_l3l4_registers - function to prepare l3l4 registers + * + * @note + * Algorithm: + * - If filter to be enabled, + * - Prepare l3 ip address registers using prepare_l3_addr_registers(). + * - Prepare l4 port register using prepare_l4_port_register(). + * - Prepare l3l4 control register using prepare_l3l4_ctr_reg(). + * + * @param[in] osi_core: OSI core private data structure. + * @param[in] l3_l4: Pointer to l3 l4 filter structure (#osi_l3_l4_filter) + * @param[out] l3_addr1_reg: Pointer to L3 ADDR1 register value + * @param[out] ctr_reg: Pointer to L3L4 CTR register value + * + * @note 1) MAC should be init and started. see osi_start_mac() + * 2) osi_core->osd should be populated + * 3) DCS bits should be enabled in RXQ to DMA mapping register + */ +void prepare_l3l4_registers(const struct osi_core_priv_data *const osi_core, + const struct osi_l3_l4_filter *const l3_l4, +#ifndef OSI_STRIPPED_LIB + nveu32_t *l3_addr0_reg, + nveu32_t *l3_addr2_reg, + nveu32_t *l3_addr3_reg, + nveu32_t *l4_addr_reg, +#endif /* !OSI_STRIPPED_LIB */ + nveu32_t *l3_addr1_reg, + nveu32_t *ctr_reg) +{ + /* prepare regiser data if filter to be enabled */ + if (l3_l4->filter_enb_dis == OSI_TRUE) { + /* prepare l3 filter ip address register data */ + prepare_l3_addr_registers(l3_l4, +#ifndef OSI_STRIPPED_LIB + l3_addr0_reg, + l3_addr2_reg, + l3_addr3_reg, +#endif /* !OSI_STRIPPED_LIB */ + l3_addr1_reg); + +#ifndef OSI_STRIPPED_LIB + /* prepare l4 filter port register data */ + prepare_l4_port_register(l3_l4, l4_addr_reg); +#endif /* !OSI_STRIPPED_LIB */ + + /* prepare control register data */ + prepare_l3l4_ctr_reg(osi_core, l3_l4, ctr_reg); + } +} + +/** + * @brief hw_validate_avb_input- validate input arguments + * + * Algorithm: + * 1) Check if idle slope is valid + * 2) Check if send slope is valid + * 3) Check if hi credit is valid + * 4) Check if low credit is valid + * + * @param[in] osi_core: osi core priv data structure + * @param[in] avb: structure having configuration for avb algorithm + * + * @note 1) MAC should be init and started. see osi_start_mac() + * + * @retval 0 on success + * @retval -1 on failure. + */ +nve32_t hw_validate_avb_input(struct osi_core_priv_data *const osi_core, + const struct osi_core_avb_algorithm *const avb) +{ + nve32_t ret = 0; + nveu32_t ETS_QW_ISCQW_MASK[MAX_MAC_IP_TYPES] = {EQOS_MTL_TXQ_ETS_QW_ISCQW_MASK, + MGBE_MTL_TCQ_ETS_QW_ISCQW_MASK}; + nveu32_t ETS_SSCR_SSC_MASK[MAX_MAC_IP_TYPES] = {EQOS_MTL_TXQ_ETS_SSCR_SSC_MASK, + MGBE_MTL_TCQ_ETS_SSCR_SSC_MASK}; + nveu32_t ETS_HC_BOUND = 0x8000000U; + nveu32_t ETS_LC_BOUND = 0xF8000000U; + nveu32_t mac = osi_core->mac; + + if (avb->idle_slope > ETS_QW_ISCQW_MASK[mac]) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "Invalid idle_slope\n", + (nveul64_t)avb->idle_slope); + ret = -1; + goto fail; + } + if (avb->send_slope > ETS_SSCR_SSC_MASK[mac]) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "Invalid send_slope\n", + (nveul64_t)avb->send_slope); + ret = -1; + goto fail; + } + if (avb->hi_credit > ETS_HC_BOUND) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "Invalid hi credit\n", + (nveul64_t)avb->hi_credit); + ret = -1; + goto fail; + } + if ((avb->low_credit < ETS_LC_BOUND) && + (avb->low_credit != 0U)) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "Invalid low credit\n", + (nveul64_t)avb->low_credit); + ret = -1; + goto fail; + } +fail: + return ret; } diff --git a/osi/core/core_common.h b/osi/core/core_common.h index 81b69a7..190d0c6 100644 --- a/osi/core/core_common.h +++ b/osi/core/core_common.h @@ -24,11 +24,20 @@ #define INCLUDED_CORE_COMMON_H #include "core_local.h" + +#ifndef OSI_STRIPPED_LIB +#define MAC_PFR_PR OSI_BIT(0) +#define MAC_TCR_TSCFUPDT OSI_BIT(1) +#define MAC_TCR_TSCTRLSSR OSI_BIT(9) +#define MAC_PFR_PM OSI_BIT(4) +#endif /* !OSI_STRIPPED_LIB */ + +#define MTL_EST_ADDR_SHIFT 8 #define MTL_EST_ADDR_MASK (OSI_BIT(8) | OSI_BIT(9) | \ OSI_BIT(10) | OSI_BIT(11) | \ OSI_BIT(12) | OSI_BIT(13) | \ OSI_BIT(14) | OSI_BIT(15) | \ - OSI_BIT(16) | (17) | \ + OSI_BIT(16) | (17U) | \ OSI_BIT(18) | OSI_BIT(19)) #define MTL_EST_SRWO OSI_BIT(0) #define MTL_EST_R1W0 OSI_BIT(1) @@ -38,6 +47,75 @@ #define MTL_EST_ERR0 OSI_BIT(20) #define MTL_EST_CONTROL_EEST OSI_BIT(0) #define MTL_EST_STATUS_SWOL OSI_BIT(7) +/* EST control OSI_BIT map */ +#define MTL_EST_EEST OSI_BIT(0) +#define MTL_EST_SSWL OSI_BIT(1) +#define MTL_EST_QHLBF OSI_BIT(3) +#define MTL_EST_CTR_HIGH_MAX 0xFFU +#define MTL_EST_ITRE_CGCE OSI_BIT(4) +#define MTL_EST_ITRE_IEHS OSI_BIT(3) +#define MTL_EST_ITRE_IEHF OSI_BIT(2) +#define MTL_EST_ITRE_IEBE OSI_BIT(1) +#define MTL_EST_ITRE_IECC OSI_BIT(0) +/* MTL_FPE_CTRL_STS */ +#define MTL_FPE_CTS_PEC (OSI_BIT(8) | OSI_BIT(9) | \ + OSI_BIT(10) | OSI_BIT(11) | \ + OSI_BIT(12) | OSI_BIT(13) | \ + OSI_BIT(14) | OSI_BIT(15)) +#define MTL_FPE_CTS_PEC_SHIFT 8U +#define MTL_FPE_CTS_PEC_MAX_SHIFT 16U +#define MAC_FPE_CTS_EFPE OSI_BIT(0) +#define MAC_FPE_CTS_SVER OSI_BIT(1) +/* MTL FPE adv registers */ +#define MTL_FPE_ADV_HADV_MASK (0xFFFFU) +#define MTL_FPE_ADV_HADV_VAL 100U +#define DMA_MODE_SWR OSI_BIT(0) +#define MTL_QTOMR_FTQ OSI_BIT(0) +#define MTL_RXQ_OP_MODE_FEP OSI_BIT(4) +#define MAC_TCR_TSINIT OSI_BIT(2) +#define MAC_TCR_TSADDREG OSI_BIT(5) +#define MAC_PPS_CTL_PPSCTRL0 (OSI_BIT(3) | OSI_BIT(2) |\ + OSI_BIT(1) | OSI_BIT(0)) +#define MAC_SSIR_SSINC_SHIFT 16U +#define MAC_PFR_DAIF OSI_BIT(3) +#define MAC_PFR_DBF OSI_BIT(5) +#define MAC_PFR_PCF (OSI_BIT(6) | OSI_BIT(7)) +#define MAC_PFR_SAIF OSI_BIT(8) +#define MAC_PFR_SAF OSI_BIT(9) +#define MAC_PFR_HPF OSI_BIT(10) +#define MAC_PFR_VTFE OSI_BIT(16) +#define MAC_PFR_IPFE OSI_BIT(20) +#define MAC_PFR_IPFE_SHIFT 20U +#define MAC_PFR_DNTU OSI_BIT(21) +#define MAC_PFR_RA OSI_BIT(31) + +#define WRAP_SYNC_TSC_PTP_CAPTURE 0x800CU +#define WRAP_TSC_CAPTURE_LOW 0x8010U +#define WRAP_TSC_CAPTURE_HIGH 0x8014U +#define WRAP_PTP_CAPTURE_LOW 0x8018U +#define WRAP_PTP_CAPTURE_HIGH 0x801CU +#define MAC_PKT_FILTER_REG 0x0008 +#define HW_MAC_IER 0x00B4U +#define WRAP_COMMON_INTR_ENABLE 0x8704U + +/* common l3 l4 register bit fields for eqos and mgbe */ +#ifndef OSI_STRIPPED_LIB +#define MAC_L3L4_CTR_L3PEN_SHIFT 0 +#define MAC_L3L4_CTR_L3SAM_SHIFT 2 +#define MAC_L3L4_CTR_L3SAIM_SHIFT 3 +#endif /* !OSI_STRIPPED_LIB */ +#define MAC_L3L4_CTR_L3DAM_SHIFT 4 +#ifndef OSI_STRIPPED_LIB +#define MAC_L3L4_CTR_L3DAIM_SHIFT 5 +#define MAC_L3L4_CTR_L4PEN_SHIFT 16 +#define MAC_L3L4_CTR_L4SPM_SHIFT 18 +#define MAC_L3L4_CTR_L4SPIM_SHIFT 19 +#define MAC_L3L4_CTR_L4DPM_SHIFT 20 +#define MAC_L3L4_CTR_L4DPIM_SHIFT 21 +#endif /* !OSI_STRIPPED_LIB */ +#define MAC_L3L4_CTR_DMCHN_SHIFT 24 +#define EQOS_MAC_L3L4_CTR_DMCHEN_SHIFT 28 +#define MGBE_MAC_L3L4_CTR_DMCHEN_SHIFT 31 /** * @addtogroup typedef related info @@ -47,15 +125,57 @@ */ struct est_read { - /* variable pointer */ + /** variable pointer */ nveu32_t *var; - /* memory register/address offset */ + /** memory register/address offset */ nveu32_t addr; }; /** @} */ -nve32_t gcl_validate(struct osi_core_priv_data *const osi_core, - struct osi_est_config *const est, - const nveu32_t *btr, nveu32_t mac); +nve32_t hw_poll_for_swr(struct osi_core_priv_data *const osi_core); +void hw_start_mac(struct osi_core_priv_data *const osi_core); +void hw_stop_mac(struct osi_core_priv_data *const osi_core); +nve32_t hw_set_mode(struct osi_core_priv_data *const osi_core, const nve32_t mode); +nve32_t hw_set_speed(struct osi_core_priv_data *const osi_core, const nve32_t speed); +nve32_t hw_flush_mtl_tx_queue(struct osi_core_priv_data *const osi_core, + const nveu32_t q_inx); +nve32_t hw_config_fw_err_pkts(struct osi_core_priv_data *osi_core, + const nveu32_t q_inx, const nveu32_t enable_fw_err_pkts); +nve32_t hw_config_rxcsum_offload(struct osi_core_priv_data *const osi_core, + nveu32_t enabled); +nve32_t hw_set_systime_to_mac(struct osi_core_priv_data *const osi_core, + const nveu32_t sec, const nveu32_t nsec); +nve32_t hw_config_addend(struct osi_core_priv_data *const osi_core, + const nveu32_t addend); +void hw_config_tscr(struct osi_core_priv_data *const osi_core, const nveu32_t ptp_filter); +void hw_config_ssir(struct osi_core_priv_data *const osi_core); +nve32_t hw_ptp_tsc_capture(struct osi_core_priv_data *const osi_core, + struct osi_core_ptp_tsc_data *data); +nve32_t hw_config_mac_pkt_filter_reg(struct osi_core_priv_data *const osi_core, + const struct osi_filter *filter); +nve32_t hw_config_l3_l4_filter_enable(struct osi_core_priv_data *const osi_core, + const nveu32_t filter_enb_dis); +nve32_t hw_config_est(struct osi_core_priv_data *const osi_core, + struct osi_est_config *const est); +nve32_t hw_config_fpe(struct osi_core_priv_data *const osi_core, + struct osi_fpe_config *const fpe); +void hw_tsn_init(struct osi_core_priv_data *osi_core, + nveu32_t est_sel, nveu32_t fpe_sel); +void prepare_l3l4_registers(const struct osi_core_priv_data *const osi_core, + const struct osi_l3_l4_filter *const l3_l4, +#ifndef OSI_STRIPPED_LIB + nveu32_t *l3_addr0_reg, + nveu32_t *l3_addr2_reg, + nveu32_t *l3_addr3_reg, + nveu32_t *l4_addr_reg, +#endif /* !OSI_STRIPPED_LIB */ + nveu32_t *l3_addr1_reg, + nveu32_t *ctr_reg); +#ifdef HSI_SUPPORT +nve32_t hsi_common_error_inject(struct osi_core_priv_data *osi_core, + nveu32_t error_code); +#endif +nve32_t hw_validate_avb_input(struct osi_core_priv_data *const osi_core, + const struct osi_core_avb_algorithm *const avb); #endif /* INCLUDED_CORE_COMMON_H */ diff --git a/osi/core/core_local.h b/osi/core/core_local.h index d616c53..3677475 100644 --- a/osi/core/core_local.h +++ b/osi/core/core_local.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -43,13 +43,51 @@ */ #define MAX_TX_TS_CNT (PKT_ID_CNT * OSI_MGBE_MAX_NUM_CHANS) +/** + * @brief FIFO size helper macro + */ +#define FIFO_SZ(x) ((((x) * 1024U) / 256U) - 1U) + +/** + * @brief Dynamic configuration helper macros. + */ +#define DYNAMIC_CFG_L3_L4 OSI_BIT(0) +#define DYNAMIC_CFG_AVB OSI_BIT(2) +#define DYNAMIC_CFG_L2 OSI_BIT(3) +#define DYNAMIC_CFG_L2_IDX 3U +#define DYNAMIC_CFG_RXCSUM OSI_BIT(4) +#define DYNAMIC_CFG_PTP OSI_BIT(7) +#define DYNAMIC_CFG_EST OSI_BIT(8) +#define DYNAMIC_CFG_FPE OSI_BIT(9) +#define DYNAMIC_CFG_FRP OSI_BIT(10) + +#ifndef OSI_STRIPPED_LIB +#define DYNAMIC_CFG_FC OSI_BIT(1) +#define DYNAMIC_CFG_VLAN OSI_BIT(5) +#define DYNAMIC_CFG_EEE OSI_BIT(6) +#define DYNAMIC_CFG_FC_IDX 1U +#define DYNAMIC_CFG_VLAN_IDX 5U +#define DYNAMIC_CFG_EEE_IDX 6U +#endif /* !OSI_STRIPPED_LIB */ + +#define DYNAMIC_CFG_L3_L4_IDX 0U +#define DYNAMIC_CFG_AVB_IDX 2U +#define DYNAMIC_CFG_L2_IDX 3U +#define DYNAMIC_CFG_RXCSUM_IDX 4U +#define DYNAMIC_CFG_PTP_IDX 7U +#define DYNAMIC_CFG_EST_IDX 8U +#define DYNAMIC_CFG_FPE_IDX 9U +#define DYNAMIC_CFG_FRP_IDX 10U + +#define OSI_SUSPENDED OSI_BIT(0) + + /** * interface core ops */ struct if_core_ops { /** Interface function called to initialize MAC and MTL registers */ - nve32_t (*if_core_init)(struct osi_core_priv_data *const osi_core, - nveu32_t tx_fifo_size, nveu32_t rx_fifo_size); + nve32_t (*if_core_init)(struct osi_core_priv_data *const osi_core); /** Interface function called to deinitialize MAC and MTL registers */ nve32_t (*if_core_deinit)(struct osi_core_priv_data *const osi_core); /** Interface function called to write into a PHY reg over MDIO bus */ @@ -72,103 +110,26 @@ struct if_core_ops { * @brief Initialize MAC & MTL core operations. */ struct core_ops { - /** Called to poll for software reset bit */ - nve32_t (*poll_for_swr)(struct osi_core_priv_data *const osi_core); /** Called to initialize MAC and MTL registers */ - nve32_t (*core_init)(struct osi_core_priv_data *const osi_core, - const nveu32_t tx_fifo_size, - const nveu32_t rx_fifo_size); - /** Called to deinitialize MAC and MTL registers */ - void (*core_deinit)(struct osi_core_priv_data *const osi_core); - /** Called to start MAC Tx and Rx engine */ - void (*start_mac)(struct osi_core_priv_data *const osi_core); - /** Called to stop MAC Tx and Rx engine */ - void (*stop_mac)(struct osi_core_priv_data *const osi_core); + nve32_t (*core_init)(struct osi_core_priv_data *const osi_core); /** Called to handle common interrupt */ void (*handle_common_intr)(struct osi_core_priv_data *const osi_core); - /** Called to set the mode at MAC (full/duplex) */ - nve32_t (*set_mode)(struct osi_core_priv_data *const osi_core, - const nve32_t mode); - /** Called to set the speed at MAC */ - nve32_t (*set_speed)(struct osi_core_priv_data *const osi_core, - const nve32_t speed); /** Called to do pad caliberation */ nve32_t (*pad_calibrate)(struct osi_core_priv_data *const osi_core); - /** Called to configure MTL RxQ to forward the err pkt */ - nve32_t (*config_fw_err_pkts)(struct osi_core_priv_data *const osi_core, - const nveu32_t qinx, - const nveu32_t fw_err); - /** Called to configure Rx Checksum offload engine */ - nve32_t (*config_rxcsum_offload)( - struct osi_core_priv_data *const osi_core, - const nveu32_t enabled); - /** Called to config mac packet filter */ - nve32_t (*config_mac_pkt_filter_reg)( - struct osi_core_priv_data *const osi_core, - const struct osi_filter *filter); /** Called to update MAC address 1-127 */ nve32_t (*update_mac_addr_low_high_reg)( struct osi_core_priv_data *const osi_core, const struct osi_filter *filter); - /** Called to configure l3/L4 filter */ - nve32_t (*config_l3_l4_filter_enable)( - struct osi_core_priv_data *const osi_core, - const nveu32_t enable); - /** Called to configure L3 filter */ - nve32_t (*config_l3_filters)(struct osi_core_priv_data *const osi_core, - const nveu32_t filter_no, - const nveu32_t enb_dis, - const nveu32_t ipv4_ipv6_match, - const nveu32_t src_dst_addr_match, - const nveu32_t perfect_inverse_match, - const nveu32_t dma_routing_enable, - const nveu32_t dma_chan); - /** Called to update ip4 src or desc address */ - nve32_t (*update_ip4_addr)(struct osi_core_priv_data *const osi_core, - const nveu32_t filter_no, - const nveu8_t addr[], - const nveu32_t src_dst_addr_match); - /** Called to update ip6 address */ - nve32_t (*update_ip6_addr)(struct osi_core_priv_data *const osi_core, - const nveu32_t filter_no, - const nveu16_t addr[]); - /** Called to configure L4 filter */ - nve32_t (*config_l4_filters)(struct osi_core_priv_data *const osi_core, - const nveu32_t filter_no, - const nveu32_t enb_dis, - const nveu32_t tcp_udp_match, - const nveu32_t src_dst_port_match, - const nveu32_t perfect_inverse_match, - const nveu32_t dma_routing_enable, - const nveu32_t dma_chan); - /** Called to update L4 Port for filter packet */ - nve32_t (*update_l4_port_no)(struct osi_core_priv_data *const osi_core, - const nveu32_t filter_no, - const nveu16_t port_no, - const nveu32_t src_dst_port_match); - /** Called to set the addend value to adjust the time */ - nve32_t (*config_addend)(struct osi_core_priv_data *const osi_core, - const nveu32_t addend); + /** Called to configure L3L4 filter */ + nve32_t (*config_l3l4_filters)(struct osi_core_priv_data *const osi_core, + nveu32_t filter_no, + const struct osi_l3_l4_filter *const l3_l4); /** Called to adjust the mac time */ nve32_t (*adjust_mactime)(struct osi_core_priv_data *const osi_core, const nveu32_t sec, const nveu32_t nsec, const nveu32_t neg_adj, const nveu32_t one_nsec_accuracy); - /** Called to set current system time to MAC */ - nve32_t (*set_systime_to_mac)(struct osi_core_priv_data *const osi_core, - const nveu32_t sec, - const nveu32_t nsec); - /** Called to configure the TimeStampControl register */ - void (*config_tscr)(struct osi_core_priv_data *const osi_core, - const nveu32_t ptp_filter); - /** Called to configure the sub second increment register */ - void (*config_ssir)(struct osi_core_priv_data *const osi_core, - const nveu32_t ptp_clock); - /** Called to configure the PTP RX packets Queue */ - nve32_t (*config_ptp_rxq)(struct osi_core_priv_data *const osi_core, - const unsigned int rxq_idx, - const unsigned int enable); /** Called to update MMC counter from HW register */ void (*read_mmc)(struct osi_core_priv_data *const osi_core); /** Called to write into a PHY reg over MDIO bus */ @@ -180,6 +141,9 @@ struct core_ops { nve32_t (*read_phy_reg)(struct osi_core_priv_data *const osi_core, const nveu32_t phyaddr, const nveu32_t phyreg); + /** Called to get HW features */ + nve32_t (*get_hw_features)(struct osi_core_priv_data *const osi_core, + struct osi_hw_features *hw_feat); /** Called to read reg */ nveu32_t (*read_reg)(struct osi_core_priv_data *const osi_core, const nve32_t reg); @@ -195,20 +159,12 @@ struct core_ops { nveu32_t (*write_macsec_reg)(struct osi_core_priv_data *const osi_core, const nveu32_t val, const nve32_t reg); +#ifndef OSI_STRIPPED_LIB + void (*macsec_config_mac)(struct osi_core_priv_data *const osi_core, + const nveu32_t enable); +#endif /* !OSI_STRIPPED_LIB */ #endif /* MACSEC_SUPPORT */ #ifndef OSI_STRIPPED_LIB - /** Called periodically to read and validate safety critical - * registers against last written value */ - nve32_t (*validate_regs)(struct osi_core_priv_data *const osi_core); - /** Called to flush MTL Tx queue */ - nve32_t (*flush_mtl_tx_queue)(struct osi_core_priv_data *const osi_core, - const nveu32_t qinx); - /** Called to set av parameter */ - nve32_t (*set_avb_algorithm)(struct osi_core_priv_data *const osi_core, - const struct osi_core_avb_algorithm *const avb); - /** Called to get av parameter */ - nve32_t (*get_avb_algorithm)(struct osi_core_priv_data *const osi_core, - struct osi_core_avb_algorithm *const avb); /** Called to configure the MTL to forward/drop tx status */ nve32_t (*config_tx_status)(struct osi_core_priv_data *const osi_core, const nveu32_t tx_status); @@ -224,6 +180,9 @@ struct core_ops { nve32_t (*config_arp_offload)(struct osi_core_priv_data *const osi_core, const nveu32_t enable, const nveu8_t *ip_addr); + /** Called to configure HW PTP offload feature */ + nve32_t (*config_ptp_offload)(struct osi_core_priv_data *const osi_core, + struct osi_pto_config *const pto_config); /** Called to configure VLAN filtering */ nve32_t (*config_vlan_filtering)( struct osi_core_priv_data *const osi_core, @@ -236,10 +195,6 @@ struct core_ops { void (*configure_eee)(struct osi_core_priv_data *const osi_core, const nveu32_t tx_lpi_enabled, const nveu32_t tx_lpi_timer); - /** Called to save MAC register space during SoC suspend */ - nve32_t (*save_registers)(struct osi_core_priv_data *const osi_core); - /** Called to restore MAC control registers during SoC resume */ - nve32_t (*restore_registers)(struct osi_core_priv_data *const osi_core); /** Called to set MDC clock rate for MDIO operation */ void (*set_mdc_clk_rate)(struct osi_core_priv_data *const osi_core, const nveu64_t csr_clk_rate); @@ -247,63 +202,54 @@ struct core_ops { nve32_t (*config_mac_loopback)( struct osi_core_priv_data *const osi_core, const nveu32_t lb_mode); -#endif /* !OSI_STRIPPED_LIB */ - /** Called to get HW features */ - nve32_t (*get_hw_features)(struct osi_core_priv_data *const osi_core, - struct osi_hw_features *hw_feat); /** Called to configure RSS for MAC */ nve32_t (*config_rss)(struct osi_core_priv_data *osi_core); - /** Called to update GCL config */ - int (*hw_config_est)(struct osi_core_priv_data *const osi_core, - struct osi_est_config *const est); - /** Called to update FPE config */ - int (*hw_config_fpe)(struct osi_core_priv_data *const osi_core, - struct osi_fpe_config *const fpe); - /** Called to configure FRP engine */ - int (*config_frp)(struct osi_core_priv_data *const osi_core, - const unsigned int enabled); - /** Called to update FRP Instruction Table entry */ - int (*update_frp_entry)(struct osi_core_priv_data *const osi_core, - const unsigned int pos, - struct osi_core_frp_data *const data); - /** Called to update FRP NVE and */ - int (*update_frp_nve)(struct osi_core_priv_data *const osi_core, - const unsigned int nve); - /** Called to configure HW PTP offload feature */ - int (*config_ptp_offload)(struct osi_core_priv_data *const osi_core, - struct osi_pto_config *const pto_config); -#ifdef MACSEC_SUPPORT - void (*macsec_config_mac)(struct osi_core_priv_data *const osi_core, + /** Called to configure the PTP RX packets Queue */ + nve32_t (*config_ptp_rxq)(struct osi_core_priv_data *const osi_core, + const nveu32_t rxq_idx, const nveu32_t enable); -#endif /* MACSEC_SUPPORT */ - int (*ptp_tsc_capture)(struct osi_core_priv_data *const osi_core, - struct osi_core_ptp_tsc_data *data); +#endif /* !OSI_STRIPPED_LIB */ + /** Called to set av parameter */ + nve32_t (*set_avb_algorithm)(struct osi_core_priv_data *const osi_core, + const struct osi_core_avb_algorithm *const avb); + /** Called to get av parameter */ + nve32_t (*get_avb_algorithm)(struct osi_core_priv_data *const osi_core, + struct osi_core_avb_algorithm *const avb); + /** Called to configure FRP engine */ + nve32_t (*config_frp)(struct osi_core_priv_data *const osi_core, + const nveu32_t enabled); + /** Called to update FRP Instruction Table entry */ + nve32_t (*update_frp_entry)(struct osi_core_priv_data *const osi_core, + const nveu32_t pos, + struct osi_core_frp_data *const data); + /** Called to update FRP NVE and */ + nve32_t (*update_frp_nve)(struct osi_core_priv_data *const osi_core, + const nveu32_t nve); #ifdef HSI_SUPPORT /** Interface function called to initialize HSI */ - int (*core_hsi_configure)(struct osi_core_priv_data *const osi_core, + nve32_t (*core_hsi_configure)(struct osi_core_priv_data *const osi_core, const nveu32_t enable); + /** Interface function called to inject error */ + nve32_t (*core_hsi_inject_err)(struct osi_core_priv_data *const osi_core, + const nveu32_t error_code); #endif }; /** * @brief constant values for drift MAC to MAC sync. */ -#ifndef DRIFT_CAL -#define DRIFT_CAL 1 -#define I_COMPONENT_BY_10 3 -#define P_COMPONENT_BY_10 7 -#define WEIGHT_BY_10 10 -#define CONST_FACTOR 8 //(1sec/125ns) -#define MAX_FREQ 85000000LL -#endif -#define EQOS_SEC_OFFSET 0xB08 -#define EQOS_NSEC_OFFSET 0xB0C -#define MGBE_SEC_OFFSET 0xD08 -#define MGBE_NSEC_OFFSET 0xD0C -#define ETHER_NSEC_MASK 0x7FFFFFFF -#define SERVO_STATS_0 0 -#define SERVO_STATS_1 1 -#define SERVO_STATS_2 2 +/* No longer needed since DRIFT CAL is not used */ +#define I_COMPONENT_BY_10 3LL +#define P_COMPONENT_BY_10 7LL +#define WEIGHT_BY_10 10LL +#define MAX_FREQ_POS 250000000LL +#define MAX_FREQ_NEG -250000000LL +#define SERVO_STATS_0 0U +#define SERVO_STATS_1 1U +#define SERVO_STATS_2 2U +#define OSI_NSEC_PER_SEC_SIGNED 1000000000LL + +#define ETHER_NSEC_MASK 0x7FFFFFFFU /** * @brief servo data structure. @@ -330,6 +276,64 @@ struct core_ptp_servo { nveu32_t m2m_lock; }; +/** + * @brief AVB dynamic config storage structure + */ +struct core_avb { + /** Represend whether AVB config done or not */ + nveu32_t used; + /** AVB data structure */ + struct osi_core_avb_algorithm avb_info; +}; + +/** + * @brief VLAN dynamic config storage structure + */ +struct core_vlan { + /** VID to be stored */ + nveu32_t vid; + /** Represens whether VLAN config done or not */ + nveu32_t used; +}; + +/** + * @brief L2 filter dynamic config storage structure + */ +struct core_l2 { + nveu32_t used; + struct osi_filter filter; +}; + +/** + * @brief Dynamic config storage structure + */ +struct dynamic_cfg { + nveu32_t flags; + /** L3_L4 filters */ + struct osi_l3_l4_filter l3_l4[OSI_MGBE_MAX_L3_L4_FILTER]; + /** flow control */ + nveu32_t flow_ctrl; + /** AVB */ + struct core_avb avb[OSI_MGBE_MAX_NUM_QUEUES]; + /** RXCSUM */ + nveu32_t rxcsum; + /** VLAN arguments storage */ + struct core_vlan vlan[VLAN_NUM_VID]; + /** LPI parameters storage */ + nveu32_t tx_lpi_enabled; + nveu32_t tx_lpi_timer; + /** PTP information storage */ + nveu32_t ptp; + /** EST information storage */ + struct osi_est_config est; + /** FPE information storage */ + struct osi_fpe_config fpe; + /** L2 filter storage */ + struct osi_filter l2_filter; + /** L2 filter configuration */ + struct core_l2 l2[EQOS_MAX_MAC_ADDRESS_FILTER]; +}; + /** * @brief Core local data structure. */ @@ -351,7 +355,7 @@ struct core_local { /** This is the head node for PTP packet ID queue */ struct osi_core_tx_ts tx_ts_head; /** Maximum number of queues/channels */ - nveu32_t max_chans; + nveu32_t num_max_chans; /** GCL depth supported by HW */ nveu32_t gcl_dep; /** Max GCL width (time + gate) value supported by HW */ @@ -370,8 +374,43 @@ struct core_local { nveu32_t pps_freq; /** Time interval mask for GCL entry */ nveu32_t ti_mask; + /** Hardware dynamic configuration context */ + struct dynamic_cfg cfg; + /** Hardware dynamic configuration state */ + nveu32_t state; + /** XPCS Lane bringup/Block lock status */ + nveu32_t lane_status; + /** Exact MAC used across SOCs 0:Legacy EQOS, 1:Orin EQOS, 2:Orin MGBE */ + nveu32_t l_mac_ver; +#if defined(L3L4_WILDCARD_FILTER) + /** l3l4 wildcard filter configured (OSI_ENABLE) / not configured (OSI_DISABLE) */ + nveu32_t l3l4_wildcard_filter_configured; +#endif /* L3L4_WILDCARD_FILTER */ }; +/** + * @brief update_counter_u - Increment nveu32_t counter + * + * @param[out] value: Pointer to value to be incremented. + * @param[in] incr: increment value + * + * @note + * API Group: + * - Initialization: Yes + * - Run time: No + * - De-initialization: No + */ +static inline void update_counter_u(nveu32_t *value, nveu32_t incr) +{ + nveu32_t temp = *value + incr; + + if (temp < *value) { + /* Overflow, so reset it to zero */ + *value = 0U; + } + *value = temp; +} + /** * @brief eqos_init_core_ops - Initialize EQOS core operations. * @@ -385,19 +424,6 @@ struct core_local { */ void eqos_init_core_ops(struct core_ops *ops); -/** - * @brief ivc_init_core_ops - Initialize IVC core operations. - * - * @param[in] ops: Core operations pointer. - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: No - * - De-initialization: No - */ -void ivc_init_core_ops(struct core_ops *ops); - /** * @brief mgbe_init_core_ops - Initialize MGBE core operations. * diff --git a/osi/core/debug.c b/osi/core/debug.c index 8b16296..622edf7 100644 --- a/osi/core/debug.c +++ b/osi/core/debug.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -32,10 +32,10 @@ * */ static void core_dump_struct(struct osi_core_priv_data *osi_core, - unsigned char *ptr, + nveu8_t *ptr, unsigned long size) { - nveu32_t i = 0, rem, j; + nveu32_t i = 0, rem, j = 0; unsigned long temp; if (ptr == OSI_NULL) { @@ -72,40 +72,40 @@ void core_structs_dump(struct osi_core_priv_data *osi_core) osi_core->osd_ops.printf(osi_core, OSI_DEBUG_TYPE_STRUCTS, "CORE struct size = %lu", sizeof(struct osi_core_priv_data)); - core_dump_struct(osi_core, (unsigned char *)osi_core, + core_dump_struct(osi_core, (nveu8_t *)osi_core, sizeof(struct osi_core_priv_data)); #ifdef MACSEC_SUPPORT osi_core->osd_ops.printf(osi_core, OSI_DEBUG_TYPE_STRUCTS, "MACSEC ops size = %lu", sizeof(struct osi_macsec_core_ops)); - core_dump_struct(osi_core, (unsigned char *)osi_core->macsec_ops, + core_dump_struct(osi_core, (nveu8_t *)osi_core->macsec_ops, sizeof(struct osi_macsec_core_ops)); osi_core->osd_ops.printf(osi_core, OSI_DEBUG_TYPE_STRUCTS, "MACSEC LUT status size = %lu", sizeof(struct osi_macsec_lut_status)); - core_dump_struct(osi_core, (unsigned char *)osi_core->macsec_ops, + core_dump_struct(osi_core, (nveu8_t *)osi_core->macsec_ops, sizeof(struct osi_macsec_lut_status)); #endif osi_core->osd_ops.printf(osi_core, OSI_DEBUG_TYPE_STRUCTS, "HW features size = %lu", sizeof(struct osi_hw_features)); - core_dump_struct(osi_core, (unsigned char *)osi_core->hw_feature, + core_dump_struct(osi_core, (nveu8_t *)osi_core->hw_feature, sizeof(struct osi_hw_features)); osi_core->osd_ops.printf(osi_core, OSI_DEBUG_TYPE_STRUCTS, "core local size = %lu", sizeof(struct core_local)); - core_dump_struct(osi_core, (unsigned char *)l_core, + core_dump_struct(osi_core, (nveu8_t *)l_core, sizeof(struct core_local)); osi_core->osd_ops.printf(osi_core, OSI_DEBUG_TYPE_STRUCTS, "core ops size = %lu", sizeof(struct core_ops)); - core_dump_struct(osi_core, (unsigned char *)l_core->ops_p, + core_dump_struct(osi_core, (nveu8_t *)l_core->ops_p, sizeof(struct core_ops)); osi_core->osd_ops.printf(osi_core, OSI_DEBUG_TYPE_STRUCTS, "if_ops_p struct size = %lu", sizeof(struct if_core_ops)); - core_dump_struct(osi_core, (unsigned char *)l_core->if_ops_p, + core_dump_struct(osi_core, (nveu8_t *)l_core->if_ops_p, sizeof(struct if_core_ops)); } @@ -116,9 +116,9 @@ void core_structs_dump(struct osi_core_priv_data *osi_core) */ void core_reg_dump(struct osi_core_priv_data *osi_core) { - unsigned int max_addr; - unsigned int addr = 0x0; - unsigned int reg_val; + nveu32_t max_addr; + nveu32_t addr = 0x0; + nveu32_t reg_val; switch (osi_core->mac_ver) { case OSI_EQOS_MAC_5_00: diff --git a/osi/core/debug.h b/osi/core/debug.h index 5029510..60d06e1 100644 --- a/osi/core/debug.h +++ b/osi/core/debug.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -20,6 +20,7 @@ * DEALINGS IN THE SOFTWARE. */ +#ifdef OSI_DEBUG #ifndef INCLUDED_CORE_DEBUG_H #define INCLUDED_CORE_DEBUG_H @@ -32,3 +33,4 @@ void core_reg_dump(struct osi_core_priv_data *osi_core); void core_structs_dump(struct osi_core_priv_data *osi_core); #endif /* INCLUDED_CORE_DEBUG_H*/ +#endif /* OSI_DEBUG */ diff --git a/osi/core/eqos_core.c b/osi/core/eqos_core.c index 4ab9a96..2d987fe 100644 --- a/osi/core/eqos_core.c +++ b/osi/core/eqos_core.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -26,8 +26,8 @@ #include "eqos_core.h" #include "eqos_mmc.h" #include "core_local.h" -#include "vlan_filter.h" #include "core_common.h" +#include "macsec.h" #ifdef UPDATED_PAD_CAL /* @@ -39,364 +39,7 @@ static nve32_t eqos_pre_pad_calibrate( struct osi_core_priv_data *const osi_core); #endif /* UPDATED_PAD_CAL */ -/** - * @brief eqos_core_safety_config - EQOS MAC core safety configuration - */ -static struct core_func_safety eqos_core_safety_config; - -/** - * @brief eqos_ptp_tsc_capture - read PTP and TSC registers - * - * Algorithm: - * - write 1 to ETHER_QOS_WRAP_SYNC_TSC_PTP_CAPTURE_0 - * - wait till ETHER_QOS_WRAP_SYNC_TSC_PTP_CAPTURE_0 is 0x0 - * - read and return following registers - * ETHER_QOS_WRAP_TSC_CAPTURE_LOW_0 - * ETHER_QOS_WRAP_TSC_CAPTURE_HIGH_0 - * ETHER_QOS_WRAP_PTP_CAPTURE_LOW_0 - * ETHER_QOS_WRAP_PTP_CAPTURE_HIGH_0 - * - * @param[in] base: EQOS virtual base address. - * @param[out]: osi_core_ptp_tsc_data register - * - * @note MAC needs to be out of reset and proper clock configured. TSC and PTP - * registers should be configured. - * - * @retval 0 on success - * @retval -1 on failure. - */ -static nve32_t eqos_ptp_tsc_capture(struct osi_core_priv_data *const osi_core, - struct osi_core_ptp_tsc_data *data) -{ - nveu32_t retry = 20U; - nveu32_t count = 0U, val = 0U; - nve32_t cond = COND_NOT_MET; - nve32_t ret = -1; - - if (osi_core->mac_ver < OSI_EQOS_MAC_5_30) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, - "ptp_tsc: older IP\n", 0ULL); - goto done; - } - osi_writela(osi_core, OSI_ENABLE, (nveu8_t *)osi_core->base + - EQOS_WRAP_SYNC_TSC_PTP_CAPTURE); - - /* Poll Until Poll Condition */ - while (cond == COND_NOT_MET) { - if (count > retry) { - /* Max retries reached */ - goto done; - } - - count++; - - val = osi_readla(osi_core, (nveu8_t *)osi_core->base + - EQOS_WRAP_SYNC_TSC_PTP_CAPTURE); - if ((val & OSI_ENABLE) == OSI_NONE) { - cond = COND_MET; - } else { - /* delay if SWR is set */ - osi_core->osd_ops.udelay(1U); - } - } - - data->tsc_low_bits = osi_readla(osi_core, (nveu8_t *)osi_core->base + - EQOS_WRAP_TSC_CAPTURE_LOW); - data->tsc_high_bits = osi_readla(osi_core, (nveu8_t *)osi_core->base + - EQOS_WRAP_TSC_CAPTURE_HIGH); - data->ptp_low_bits = osi_readla(osi_core, (nveu8_t *)osi_core->base + - EQOS_WRAP_PTP_CAPTURE_LOW); - data->ptp_high_bits = osi_readla(osi_core, (nveu8_t *)osi_core->base + - EQOS_WRAP_PTP_CAPTURE_HIGH); - ret = 0; -done: - return ret; -} - -/** - * @brief eqos_core_safety_writel - Write to safety critical register. - * - * @note - * Algorithm: - * - Acquire RW lock, so that eqos_validate_core_regs does not run while - * updating the safety critical register. - * - call osi_writela() to actually update the memory mapped register. - * - Store the same value in eqos_core_safety_config->reg_val[idx], - * so that this latest value will be compared when eqos_validate_core_regs - * is scheduled. - * - * @param[in] osi_core: OSI core private data structure. - * @param[in] val: Value to be written. - * @param[in] addr: memory mapped register address to be written to. - * @param[in] idx: Index of register corresponding to enum func_safety_core_regs. - * - * @pre MAC has to be out of reset, and clocks supplied. - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: Yes - * - De-initialization: Yes - */ -static inline void eqos_core_safety_writel( - struct osi_core_priv_data *const osi_core, - nveu32_t val, void *addr, - nveu32_t idx) -{ - struct core_func_safety *config = &eqos_core_safety_config; - - osi_lock_irq_enabled(&config->core_safety_lock); - osi_writela(osi_core, val, addr); - config->reg_val[idx] = (val & config->reg_mask[idx]); - osi_unlock_irq_enabled(&config->core_safety_lock); -} - -/** - * @brief Initialize the eqos_core_safety_config. - * - * @note - * Algorithm: - * - Populate the list of safety critical registers and provide - * the address of the register - * - Register mask (to ignore reserved/self-critical bits in the reg). - * See eqos_validate_core_regs which can be invoked periodically to compare - * the last written value to this register vs the actual value read when - * eqos_validate_core_regs is scheduled. - * - * @param[in] osi_core: OSI core private data structure. - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: No - * - De-initialization: No - */ -static void eqos_core_safety_init(struct osi_core_priv_data *const osi_core) -{ - struct core_func_safety *config = &eqos_core_safety_config; - nveu8_t *base = (nveu8_t *)osi_core->base; - nveu32_t val; - nveu32_t i, idx; - - /* Initialize all reg address to NULL, since we may not use - * some regs depending on the number of MTL queues enabled. - */ - for (i = EQOS_MAC_MCR_IDX; i < EQOS_MAX_CORE_SAFETY_REGS; i++) { - config->reg_addr[i] = OSI_NULL; - } - - /* Store reg addresses to run periodic read MAC registers.*/ - config->reg_addr[EQOS_MAC_MCR_IDX] = base + EQOS_MAC_MCR; - config->reg_addr[EQOS_MAC_PFR_IDX] = base + EQOS_MAC_PFR; - for (i = 0U; i < OSI_EQOS_MAX_HASH_REGS; i++) { - config->reg_addr[EQOS_MAC_HTR0_IDX + i] = - base + EQOS_MAC_HTR_REG(i); - } - config->reg_addr[EQOS_MAC_Q0_TXFC_IDX] = base + - EQOS_MAC_QX_TX_FLW_CTRL(0U); - config->reg_addr[EQOS_MAC_RQC0R_IDX] = base + EQOS_MAC_RQC0R; - config->reg_addr[EQOS_MAC_RQC1R_IDX] = base + EQOS_MAC_RQC1R; - config->reg_addr[EQOS_MAC_RQC2R_IDX] = base + EQOS_MAC_RQC2R; - config->reg_addr[EQOS_MAC_IMR_IDX] = base + EQOS_MAC_IMR; - config->reg_addr[EQOS_MAC_MA0HR_IDX] = base + EQOS_MAC_MA0HR; - config->reg_addr[EQOS_MAC_MA0LR_IDX] = base + EQOS_MAC_MA0LR; - config->reg_addr[EQOS_MAC_TCR_IDX] = base + EQOS_MAC_TCR; - config->reg_addr[EQOS_MAC_SSIR_IDX] = base + EQOS_MAC_SSIR; - config->reg_addr[EQOS_MAC_TAR_IDX] = base + EQOS_MAC_TAR; - config->reg_addr[EQOS_PAD_AUTO_CAL_CFG_IDX] = base + - EQOS_PAD_AUTO_CAL_CFG; - /* MTL registers */ - config->reg_addr[EQOS_MTL_RXQ_DMA_MAP0_IDX] = base + - EQOS_MTL_RXQ_DMA_MAP0; - for (i = 0U; i < osi_core->num_mtl_queues; i++) { - idx = osi_core->mtl_queues[i]; - if (idx >= OSI_EQOS_MAX_NUM_CHANS) { - continue; - } - - config->reg_addr[EQOS_MTL_CH0_TX_OP_MODE_IDX + idx] = base + - EQOS_MTL_CHX_TX_OP_MODE(idx); - config->reg_addr[EQOS_MTL_TXQ0_QW_IDX + idx] = base + - EQOS_MTL_TXQ_QW(idx); - config->reg_addr[EQOS_MTL_CH0_RX_OP_MODE_IDX + idx] = base + - EQOS_MTL_CHX_RX_OP_MODE(idx); - } - /* DMA registers */ - config->reg_addr[EQOS_DMA_SBUS_IDX] = base + EQOS_DMA_SBUS; - - /* Update the register mask to ignore reserved bits/self-clearing bits. - * MAC registers */ - config->reg_mask[EQOS_MAC_MCR_IDX] = EQOS_MAC_MCR_MASK; - config->reg_mask[EQOS_MAC_PFR_IDX] = EQOS_MAC_PFR_MASK; - for (i = 0U; i < OSI_EQOS_MAX_HASH_REGS; i++) { - config->reg_mask[EQOS_MAC_HTR0_IDX + i] = EQOS_MAC_HTR_MASK; - } - config->reg_mask[EQOS_MAC_Q0_TXFC_IDX] = EQOS_MAC_QX_TXFC_MASK; - config->reg_mask[EQOS_MAC_RQC0R_IDX] = EQOS_MAC_RQC0R_MASK; - config->reg_mask[EQOS_MAC_RQC1R_IDX] = EQOS_MAC_RQC1R_MASK; - config->reg_mask[EQOS_MAC_RQC2R_IDX] = EQOS_MAC_RQC2R_MASK; - config->reg_mask[EQOS_MAC_IMR_IDX] = EQOS_MAC_IMR_MASK; - config->reg_mask[EQOS_MAC_MA0HR_IDX] = EQOS_MAC_MA0HR_MASK; - config->reg_mask[EQOS_MAC_MA0LR_IDX] = EQOS_MAC_MA0LR_MASK; - config->reg_mask[EQOS_MAC_TCR_IDX] = EQOS_MAC_TCR_MASK; - config->reg_mask[EQOS_MAC_SSIR_IDX] = EQOS_MAC_SSIR_MASK; - config->reg_mask[EQOS_MAC_TAR_IDX] = EQOS_MAC_TAR_MASK; - config->reg_mask[EQOS_PAD_AUTO_CAL_CFG_IDX] = - EQOS_PAD_AUTO_CAL_CFG_MASK; - /* MTL registers */ - config->reg_mask[EQOS_MTL_RXQ_DMA_MAP0_IDX] = EQOS_RXQ_DMA_MAP0_MASK; - for (i = 0U; i < osi_core->num_mtl_queues; i++) { - idx = osi_core->mtl_queues[i]; - if (idx >= OSI_EQOS_MAX_NUM_CHANS) { - continue; - } - - config->reg_mask[EQOS_MTL_CH0_TX_OP_MODE_IDX + idx] = - EQOS_MTL_TXQ_OP_MODE_MASK; - config->reg_mask[EQOS_MTL_TXQ0_QW_IDX + idx] = - EQOS_MTL_TXQ_QW_MASK; - config->reg_mask[EQOS_MTL_CH0_RX_OP_MODE_IDX + idx] = - EQOS_MTL_RXQ_OP_MODE_MASK; - } - /* DMA registers */ - config->reg_mask[EQOS_DMA_SBUS_IDX] = EQOS_DMA_SBUS_MASK; - - /* Initialize current power-on-reset values of these registers */ - for (i = EQOS_MAC_MCR_IDX; i < EQOS_MAX_CORE_SAFETY_REGS; i++) { - if (config->reg_addr[i] == OSI_NULL) { - continue; - } - - val = osi_readla(osi_core, - (nveu8_t *)config->reg_addr[i]); - config->reg_val[i] = val & config->reg_mask[i]; - } - - osi_lock_init(&config->core_safety_lock); -} - -/** - * @brief Initialize the OSI core private data backup config array - * - * @note - * Algorithm: - * - Populate the list of core registers to be saved during suspend. - * Fill the address of each register in structure. - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: No - * - De-initialization: No - * - * @param[in] osi_core: OSI core private data structure. - */ -static void eqos_core_backup_init(struct osi_core_priv_data *const osi_core) -{ - struct core_backup *config = &osi_core->backup_config; - nveu8_t *base = (nveu8_t *)osi_core->base; - nveu32_t i; - - /* MAC registers backup */ - config->reg_addr[EQOS_MAC_MCR_BAK_IDX] = base + EQOS_MAC_MCR; - config->reg_addr[EQOS_MAC_EXTR_BAK_IDX] = base + EQOS_MAC_EXTR; - config->reg_addr[EQOS_MAC_PFR_BAK_IDX] = base + EQOS_MAC_PFR; - config->reg_addr[EQOS_MAC_VLAN_TAG_BAK_IDX] = base + - EQOS_MAC_VLAN_TAG; - config->reg_addr[EQOS_MAC_VLANTIR_BAK_IDX] = base + EQOS_MAC_VLANTIR; - config->reg_addr[EQOS_MAC_RX_FLW_CTRL_BAK_IDX] = base + - EQOS_MAC_RX_FLW_CTRL; - config->reg_addr[EQOS_MAC_RQC0R_BAK_IDX] = base + EQOS_MAC_RQC0R; - config->reg_addr[EQOS_MAC_RQC1R_BAK_IDX] = base + EQOS_MAC_RQC1R; - config->reg_addr[EQOS_MAC_RQC2R_BAK_IDX] = base + EQOS_MAC_RQC2R; - config->reg_addr[EQOS_MAC_ISR_BAK_IDX] = base + EQOS_MAC_ISR; - config->reg_addr[EQOS_MAC_IMR_BAK_IDX] = base + EQOS_MAC_IMR; - config->reg_addr[EQOS_MAC_PMTCSR_BAK_IDX] = base + EQOS_MAC_PMTCSR; - config->reg_addr[EQOS_MAC_LPI_CSR_BAK_IDX] = base + EQOS_MAC_LPI_CSR; - config->reg_addr[EQOS_MAC_LPI_TIMER_CTRL_BAK_IDX] = base + - EQOS_MAC_LPI_TIMER_CTRL; - config->reg_addr[EQOS_MAC_LPI_EN_TIMER_BAK_IDX] = base + - EQOS_MAC_LPI_EN_TIMER; - config->reg_addr[EQOS_MAC_ANS_BAK_IDX] = base + EQOS_MAC_ANS; - config->reg_addr[EQOS_MAC_PCS_BAK_IDX] = base + EQOS_MAC_PCS; - if (osi_core->mac_ver == OSI_EQOS_MAC_5_00) { - config->reg_addr[EQOS_5_00_MAC_ARPPA_BAK_IDX] = base + - EQOS_5_00_MAC_ARPPA; - } - config->reg_addr[EQOS_MMC_CNTRL_BAK_IDX] = base + EQOS_MMC_CNTRL; - if (osi_core->mac_ver == OSI_EQOS_MAC_4_10) { - config->reg_addr[EQOS_4_10_MAC_ARPPA_BAK_IDX] = base + - EQOS_4_10_MAC_ARPPA; - } - config->reg_addr[EQOS_MAC_TCR_BAK_IDX] = base + EQOS_MAC_TCR; - config->reg_addr[EQOS_MAC_SSIR_BAK_IDX] = base + EQOS_MAC_SSIR; - config->reg_addr[EQOS_MAC_STSR_BAK_IDX] = base + EQOS_MAC_STSR; - config->reg_addr[EQOS_MAC_STNSR_BAK_IDX] = base + EQOS_MAC_STNSR; - config->reg_addr[EQOS_MAC_STSUR_BAK_IDX] = base + EQOS_MAC_STSUR; - config->reg_addr[EQOS_MAC_STNSUR_BAK_IDX] = base + EQOS_MAC_STNSUR; - config->reg_addr[EQOS_MAC_TAR_BAK_IDX] = base + EQOS_MAC_TAR; - config->reg_addr[EQOS_DMA_BMR_BAK_IDX] = base + EQOS_DMA_BMR; - config->reg_addr[EQOS_DMA_SBUS_BAK_IDX] = base + EQOS_DMA_SBUS; - config->reg_addr[EQOS_DMA_ISR_BAK_IDX] = base + EQOS_DMA_ISR; - config->reg_addr[EQOS_MTL_OP_MODE_BAK_IDX] = base + EQOS_MTL_OP_MODE; - config->reg_addr[EQOS_MTL_RXQ_DMA_MAP0_BAK_IDX] = base + - EQOS_MTL_RXQ_DMA_MAP0; - - for (i = 0; i < EQOS_MAX_HTR_REGS; i++) { - config->reg_addr[EQOS_MAC_HTR_REG_BAK_IDX(i)] = base + - EQOS_MAC_HTR_REG(i); - } - for (i = 0; i < OSI_EQOS_MAX_NUM_QUEUES; i++) { - config->reg_addr[EQOS_MAC_QX_TX_FLW_CTRL_BAK_IDX(i)] = base + - EQOS_MAC_QX_TX_FLW_CTRL(i); - } - for (i = 0; i < EQOS_MAX_MAC_ADDRESS_FILTER; i++) { - config->reg_addr[EQOS_MAC_ADDRH_BAK_IDX(i)] = base + - EQOS_MAC_ADDRH(i); - config->reg_addr[EQOS_MAC_ADDRL_BAK_IDX(i)] = base + - EQOS_MAC_ADDRL(i); - } - for (i = 0; i < EQOS_MAX_L3_L4_FILTER; i++) { - config->reg_addr[EQOS_MAC_L3L4_CTR_BAK_IDX(i)] = base + - EQOS_MAC_L3L4_CTR(i); - config->reg_addr[EQOS_MAC_L4_ADR_BAK_IDX(i)] = base + - EQOS_MAC_L4_ADR(i); - config->reg_addr[EQOS_MAC_L3_AD0R_BAK_IDX(i)] = base + - EQOS_MAC_L3_AD0R(i); - config->reg_addr[EQOS_MAC_L3_AD1R_BAK_IDX(i)] = base + - EQOS_MAC_L3_AD1R(i); - config->reg_addr[EQOS_MAC_L3_AD2R_BAK_IDX(i)] = base + - EQOS_MAC_L3_AD2R(i); - config->reg_addr[EQOS_MAC_L3_AD3R_BAK_IDX(i)] = base + - EQOS_MAC_L3_AD3R(i); - } - for (i = 0; i < OSI_EQOS_MAX_NUM_QUEUES; i++) { - config->reg_addr[EQOS_MTL_CHX_TX_OP_MODE_BAK_IDX(i)] = base + - EQOS_MTL_CHX_TX_OP_MODE(i); - config->reg_addr[EQOS_MTL_TXQ_ETS_CR_BAK_IDX(i)] = base + - EQOS_MTL_TXQ_ETS_CR(i); - config->reg_addr[EQOS_MTL_TXQ_QW_BAK_IDX(i)] = base + - EQOS_MTL_TXQ_QW(i); - config->reg_addr[EQOS_MTL_TXQ_ETS_SSCR_BAK_IDX(i)] = base + - EQOS_MTL_TXQ_ETS_SSCR(i); - config->reg_addr[EQOS_MTL_TXQ_ETS_HCR_BAK_IDX(i)] = base + - EQOS_MTL_TXQ_ETS_HCR(i); - config->reg_addr[EQOS_MTL_TXQ_ETS_LCR_BAK_IDX(i)] = base + - EQOS_MTL_TXQ_ETS_LCR(i); - config->reg_addr[EQOS_MTL_CHX_RX_OP_MODE_BAK_IDX(i)] = base + - EQOS_MTL_CHX_RX_OP_MODE(i); - } - - /* Wrapper register backup */ - config->reg_addr[EQOS_CLOCK_CTRL_0_BAK_IDX] = base + - EQOS_CLOCK_CTRL_0; - config->reg_addr[EQOS_AXI_ASID_CTRL_BAK_IDX] = base + - EQOS_AXI_ASID_CTRL; - config->reg_addr[EQOS_PAD_CRTL_BAK_IDX] = base + EQOS_PAD_CRTL; - config->reg_addr[EQOS_PAD_AUTO_CAL_CFG_BAK_IDX] = base + - EQOS_PAD_AUTO_CAL_CFG; -} - +#ifndef OSI_STRIPPED_LIB /** * @brief eqos_config_flow_control - Configure MAC flow control settings * @@ -430,7 +73,7 @@ static nve32_t eqos_config_flow_control( /* return on invalid argument */ if (flw_ctrl > (OSI_FLOW_CTRL_RX | OSI_FLOW_CTRL_TX)) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "flw_ctr: invalid input\n", 0ULL); return -1; } @@ -455,9 +98,7 @@ static nve32_t eqos_config_flow_control( } /* Write to MAC Tx Flow control Register of Q0 */ - eqos_core_safety_writel(osi_core, val, (nveu8_t *)addr + - EQOS_MAC_QX_TX_FLW_CTRL(0U), - EQOS_MAC_Q0_TXFC_IDX); + osi_writela(osi_core, val, (nveu8_t *)addr + EQOS_MAC_QX_TX_FLW_CTRL(0U)); /* Configure MAC Rx Flow control*/ /* Read MAC Rx Flow control Register */ @@ -481,359 +122,7 @@ static nve32_t eqos_config_flow_control( return 0; } - -/** - * @brief eqos_config_fw_err_pkts - Configure forwarding of error packets - * - * @note - * Algorithm: - * - Validate fw_err and return -1 if fails. - * - Enable or disable forward error packet confiration based on fw_err. - * - Refer to EQOS column of <> for API details. - * - TraceID: ETHERNET_NVETHERNETRM_020 - * - * @param[in] osi_core: OSI core private data structure. Used param base. - * @param[in] qinx: Queue index. Max value OSI_EQOS_MAX_NUM_CHANS-1. - * @param[in] fw_err: Enable(OSI_ENABLE) or Disable(OSI_DISABLE) the forwarding of error packets - * - * @pre MAC should be initialized and started. see osi_start_mac() - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -static nve32_t eqos_config_fw_err_pkts( - struct osi_core_priv_data *const osi_core, - const nveu32_t qinx, - const nveu32_t fw_err) -{ - void *addr = osi_core->base; - nveu32_t val; - - /* Check for valid fw_err and qinx values */ - if (((fw_err != OSI_ENABLE) && (fw_err != OSI_DISABLE)) || - (qinx >= OSI_EQOS_MAX_NUM_CHANS)) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, - "config_fw_err: invalid input\n", 0ULL); - return -1; - } - - /* Read MTL RXQ Operation_Mode Register */ - val = osi_readla(osi_core, - (nveu8_t *)addr + EQOS_MTL_CHX_RX_OP_MODE(qinx)); - - /* fw_err, 1 is for enable and 0 is for disable */ - if (fw_err == OSI_ENABLE) { - /* When fw_err bit is set, all packets except the runt error - * packets are forwarded to the application or DMA. - */ - val |= EQOS_MTL_RXQ_OP_MODE_FEP; - } else if (fw_err == OSI_DISABLE) { - /* When this bit is reset, the Rx queue drops packets with error - * status (CRC error, GMII_ER, watchdog timeout, or overflow) - */ - val &= ~EQOS_MTL_RXQ_OP_MODE_FEP; - } else { - /* Nothing here */ - } - - /* Write to FEP bit of MTL RXQ operation Mode Register to enable or - * disable the forwarding of error packets to DMA or application. - */ - eqos_core_safety_writel(osi_core, val, (nveu8_t *)addr + - EQOS_MTL_CHX_RX_OP_MODE(qinx), - EQOS_MTL_CH0_RX_OP_MODE_IDX + qinx); - - return 0; -} - -/** - * @brief eqos_poll_for_swr - Poll for software reset (SWR bit in DMA Mode) - * - * @note - * Algorithm: - * - Waits for SWR reset to be cleared in DMA Mode register for max polling count of 1000. - * - Sleeps for 1 milli sec for each iteration. - * - Refer to EQOS column of <> for API details. - * - TraceID: ETHERNET_NVETHERNETRM_004 - * - * @param[in] osi_core: OSI core private data structure.Used param base, osd_ops.usleep_range. - * - * @pre MAC needs to be out of reset and proper clock configured. - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: No - * - De-initialization: No - * - * @retval 0 on success if reset is success - * @retval -1 on if reset didnot happen in timeout. - */ -static nve32_t eqos_poll_for_swr(struct osi_core_priv_data *const osi_core) -{ - void *addr = osi_core->base; - nveu32_t retry = RETRY_COUNT; - nveu32_t count; - nveu32_t dma_bmr = 0; - nve32_t cond = COND_NOT_MET; - nveu32_t pre_si = osi_core->pre_si; - - if (pre_si == OSI_ENABLE) { - osi_writela(osi_core, OSI_ENABLE, - (nveu8_t *)addr + EQOS_DMA_BMR); - } - /* add delay of 10 usec */ - osi_core->osd_ops.usleep_range(9, 11); - - /* Poll Until Poll Condition */ - count = 0; - while (cond == COND_NOT_MET) { - if (count > retry) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL, - "poll_for_swr: timeout\n", 0ULL); - return -1; - } - - count++; - - - dma_bmr = osi_readla(osi_core, - (nveu8_t *)addr + EQOS_DMA_BMR); - if ((dma_bmr & EQOS_DMA_BMR_SWR) != EQOS_DMA_BMR_SWR) { - cond = COND_MET; - } else { - osi_core->osd_ops.msleep(1U); - } - } - - return 0; -} - -/** - * @brief eqos_set_speed - Set operating speed - * - * @note - * Algorithm: - * - Based on the speed (10/100/1000Mbps) MAC will be configured - * accordingly. - * - If invalid value for speed, configure for 1000Mbps. - * - Refer to EQOS column of <> for API details. - * - TraceID: ETHERNET_NVETHERNETRM_012 - * - * @param[in] base: EQOS virtual base address. - * @param[in] speed: Operating speed. Valid values are OSI_SPEED_* - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: Yes - * - De-initialization: No - * - * @pre MAC should be initialized and started. see osi_start_mac() - */ -static int eqos_set_speed(struct osi_core_priv_data *const osi_core, - const nve32_t speed) -{ - nveu32_t mcr_val; - void *base = osi_core->base; - - mcr_val = osi_readla(osi_core, (nveu8_t *)base + EQOS_MAC_MCR); - switch (speed) { - default: - mcr_val &= ~EQOS_MCR_PS; - mcr_val &= ~EQOS_MCR_FES; - break; - case OSI_SPEED_1000: - mcr_val &= ~EQOS_MCR_PS; - mcr_val &= ~EQOS_MCR_FES; - break; - case OSI_SPEED_100: - mcr_val |= EQOS_MCR_PS; - mcr_val |= EQOS_MCR_FES; - break; - case OSI_SPEED_10: - mcr_val |= EQOS_MCR_PS; - mcr_val &= ~EQOS_MCR_FES; - break; - } - - eqos_core_safety_writel(osi_core, mcr_val, - (unsigned char *)osi_core->base + EQOS_MAC_MCR, - EQOS_MAC_MCR_IDX); - return 0; -} - -/** - * @brief eqos_set_mode - Set operating mode - * - * @note - * Algorithm: - * - Based on the mode (HALF/FULL Duplex) MAC will be configured - * accordingly. - * - If invalid value for mode, return -1. - * - Refer to EQOS column of <> for API details. - * - TraceID: ETHERNET_NVETHERNETRM_011 - * - * @param[in] osi_core: OSI core private data structure. used param is base. - * @param[in] mode: Operating mode. (OSI_FULL_DUPLEX/OSI_HALF_DUPLEX) - * - * @pre MAC should be initialized and started. see osi_start_mac() - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -static nve32_t eqos_set_mode(struct osi_core_priv_data *const osi_core, - const nve32_t mode) -{ - void *base = osi_core->base; - nveu32_t mcr_val; - - mcr_val = osi_readla(osi_core, (nveu8_t *)base + EQOS_MAC_MCR); - if (mode == OSI_FULL_DUPLEX) { - mcr_val |= EQOS_MCR_DM; - /* DO (disable receive own) bit is not applicable, don't care */ - mcr_val &= ~EQOS_MCR_DO; - } else if (mode == OSI_HALF_DUPLEX) { - mcr_val &= ~EQOS_MCR_DM; - /* Set DO (disable receive own) bit */ - mcr_val |= EQOS_MCR_DO; - } else { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, - "set_mode: invalid mode\n", 0ULL); - return -1; - /* Nothing here */ - } - eqos_core_safety_writel(osi_core, mcr_val, - (nveu8_t *)base + EQOS_MAC_MCR, - EQOS_MAC_MCR_IDX); - return 0; -} - -/** - * @brief eqos_calculate_per_queue_fifo - Calculate per queue FIFO size - * - * @note - * Algorithm: - * - Identify Total Tx/Rx HW FIFO size in KB based on fifo_size - * - Divide the same for each queue. - * - Correct the size to its nearest value of 256B to 32K with next correction value - * which is a 2power(2^x). - * - Correct for 9K and Max of 36K also. - * - i.e if share is >256 and < 512, set it to 256. - * - SWUD_ID: ETHERNET_NVETHERNETRM_006_1 - * - * @param[in] mac_ver: MAC version value. - * @param[in] fifo_size: Total Tx/RX HW FIFO size. - * @param[in] queue_count: Total number of Queues configured. - * - * @pre MAC has to be out of reset. - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: No - * - De-initialization: No - * - * @retval Queue size that need to be programmed. - */ -static nveu32_t eqos_calculate_per_queue_fifo(nveu32_t mac_ver, - nveu32_t fifo_size, - nveu32_t queue_count) -{ - nveu32_t q_fifo_size = 0; /* calculated fifo size per queue */ - nveu32_t p_fifo = EQOS_256; /* per queue fifo size program value */ - - if (queue_count == 0U) { - return 0U; - } - - /* calculate Tx/Rx fifo share per queue */ - switch (fifo_size) { - case 0: - q_fifo_size = FIFO_SIZE_B(128U); - break; - case 1: - q_fifo_size = FIFO_SIZE_B(256U); - break; - case 2: - q_fifo_size = FIFO_SIZE_B(512U); - break; - case 3: - q_fifo_size = FIFO_SIZE_KB(1U); - break; - case 4: - q_fifo_size = FIFO_SIZE_KB(2U); - break; - case 5: - q_fifo_size = FIFO_SIZE_KB(4U); - break; - case 6: - q_fifo_size = FIFO_SIZE_KB(8U); - break; - case 7: - q_fifo_size = FIFO_SIZE_KB(16U); - break; - case 8: - q_fifo_size = FIFO_SIZE_KB(32U); - break; - case 9: - if (mac_ver == OSI_EQOS_MAC_5_30) { - q_fifo_size = FIFO_SIZE_KB(64U); - } else { - q_fifo_size = FIFO_SIZE_KB(36U); - } - break; - case 10: - q_fifo_size = FIFO_SIZE_KB(128U); - break; - case 11: - q_fifo_size = FIFO_SIZE_KB(256U); - break; - default: - q_fifo_size = FIFO_SIZE_KB(36U); - break; - } - - q_fifo_size = q_fifo_size / queue_count; - - if (q_fifo_size >= FIFO_SIZE_KB(36U)) { - p_fifo = EQOS_36K; - } else if (q_fifo_size >= FIFO_SIZE_KB(32U)) { - p_fifo = EQOS_32K; - } else if (q_fifo_size >= FIFO_SIZE_KB(16U)) { - p_fifo = EQOS_16K; - } else if (q_fifo_size == FIFO_SIZE_KB(9U)) { - p_fifo = EQOS_9K; - } else if (q_fifo_size >= FIFO_SIZE_KB(8U)) { - p_fifo = EQOS_8K; - } else if (q_fifo_size >= FIFO_SIZE_KB(4U)) { - p_fifo = EQOS_4K; - } else if (q_fifo_size >= FIFO_SIZE_KB(2U)) { - p_fifo = EQOS_2K; - } else if (q_fifo_size >= FIFO_SIZE_KB(1U)) { - p_fifo = EQOS_1K; - } else if (q_fifo_size >= FIFO_SIZE_B(512U)) { - p_fifo = EQOS_512; - } else if (q_fifo_size >= FIFO_SIZE_B(256U)) { - p_fifo = EQOS_256; - } else { - /* Nothing here */ - } - - return p_fifo; -} +#endif /* !OSI_STRIPPED_LIB */ #ifdef UPDATED_PAD_CAL /** @@ -896,14 +185,17 @@ static nve32_t eqos_pad_calibrate(struct osi_core_priv_data *const osi_core) /* 3. Set AUTO_CAL_ENABLE and AUTO_CAL_START in * reg ETHER_QOS_AUTO_CAL_CONFIG_0. + * Set pad_auto_cal pd/pu offset values */ value = osi_readla(osi_core, (nveu8_t *)ioaddr + EQOS_PAD_AUTO_CAL_CFG); + value &= ~EQOS_PAD_CRTL_PU_OFFSET_MASK; + value &= ~EQOS_PAD_CRTL_PD_OFFSET_MASK; + value |= osi_core->padctrl.pad_auto_cal_pu_offset; + value |= (osi_core->padctrl.pad_auto_cal_pd_offset << 8U); value |= EQOS_PAD_AUTO_CAL_CFG_START | EQOS_PAD_AUTO_CAL_CFG_ENABLE; - eqos_core_safety_writel(osi_core, value, (nveu8_t *)ioaddr + - EQOS_PAD_AUTO_CAL_CFG, - EQOS_PAD_AUTO_CAL_CFG_IDX); + osi_writela(osi_core, value, (nveu8_t *)ioaddr + EQOS_PAD_AUTO_CAL_CFG); /* 4. Wait on 10 to 12 us before start checking for calibration done. * This delay is consumed in delay inside while loop. @@ -988,14 +280,19 @@ static nve32_t eqos_pad_calibrate(struct osi_core_priv_data *const osi_core) osi_core->osd_ops.usleep_range(1, 3); /* 3. Set AUTO_CAL_ENABLE and AUTO_CAL_START in * reg ETHER_QOS_AUTO_CAL_CONFIG_0. + * Set pad_auto_cal pd/pu offset values */ + value = osi_readla(osi_core, (nveu8_t *)ioaddr + EQOS_PAD_AUTO_CAL_CFG); + value &= ~EQOS_PAD_CRTL_PU_OFFSET_MASK; + value &= ~EQOS_PAD_CRTL_PD_OFFSET_MASK; + value |= osi_core->padctrl.pad_auto_cal_pu_offset; + value |= (osi_core->padctrl.pad_auto_cal_pd_offset << 8U); value |= EQOS_PAD_AUTO_CAL_CFG_START | EQOS_PAD_AUTO_CAL_CFG_ENABLE; - eqos_core_safety_writel(osi_core, value, (nveu8_t *)ioaddr + - EQOS_PAD_AUTO_CAL_CFG, - EQOS_PAD_AUTO_CAL_CFG_IDX); + osi_writela(osi_core, value, (nveu8_t *)ioaddr + EQOS_PAD_AUTO_CAL_CFG); + /* 4. Wait on 1 to 3 us before start checking for calibration done. * This delay is consumed in delay inside while loop. */ @@ -1026,190 +323,6 @@ calibration_failed: } #endif /* UPDATED_PAD_CAL */ -/** - * @brief eqos_flush_mtl_tx_queue - Flush MTL Tx queue - * - * @note - * Algorithm: - * - Validate qinx for maximum value of OSI_EQOS_MAX_NUM_QUEUES and return -1 if fails. - * - Configure EQOS_MTL_CHX_TX_OP_MODE to flush corresponding MTL queue. - * - Wait on EQOS_MTL_QTOMR_FTQ_LPOS bit set for a loop of 1000 with a sleep of - * 1 milli second between itertions. - * - return 0 if EQOS_MTL_QTOMR_FTQ_LPOS is set else -1. - * - SWUD_ID: ETHERNET_NVETHERNETRM_006_2 - * - * @param[in] osi_core: OSI core private data structure. Used param base, osd_ops.msleep. - * @param[in] qinx: MTL queue index. Max value is OSI_EQOS_MAX_NUM_QUEUES-1. - * - * @note - * - MAC should out of reset and clocks enabled. - * - hw core initialized. see osi_hw_core_init(). - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: No - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -static nve32_t eqos_flush_mtl_tx_queue( - struct osi_core_priv_data *const osi_core, - const nveu32_t qinx) -{ - void *addr = osi_core->base; - nveu32_t retry = RETRY_COUNT; - nveu32_t count; - nveu32_t value; - nve32_t cond = COND_NOT_MET; - - if (qinx >= OSI_EQOS_MAX_NUM_QUEUES) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, - "flush_mtl_tx_queue: invalid input\n", 0ULL); - return -1; - } - - /* Read Tx Q Operating Mode Register and flush TxQ */ - value = osi_readla(osi_core, (nveu8_t *)addr + - EQOS_MTL_CHX_TX_OP_MODE(qinx)); - value |= EQOS_MTL_QTOMR_FTQ; - eqos_core_safety_writel(osi_core, value, (nveu8_t *)addr + - EQOS_MTL_CHX_TX_OP_MODE(qinx), - EQOS_MTL_CH0_TX_OP_MODE_IDX + qinx); - - /* Poll Until FTQ bit resets for Successful Tx Q flush */ - count = 0; - while (cond == COND_NOT_MET) { - if (count > retry) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, - "Poll FTQ bit timeout\n", 0ULL); - return -1; - } - - count++; - osi_core->osd_ops.msleep(1); - - value = osi_readla(osi_core, (nveu8_t *)addr + - EQOS_MTL_CHX_TX_OP_MODE(qinx)); - - if ((value & EQOS_MTL_QTOMR_FTQ_LPOS) == 0U) { - cond = COND_MET; - } - } - - return 0; -} - -/** - * @brief update_ehfc_rfa_rfd - Update EHFC, RFD and RSA values - * - * @note - * Algorithm: - * - Caculates and stores the RSD (Threshold for Deactivating - * Flow control) and RSA (Threshold for Activating Flow Control) values - * based on the Rx FIFO size and also enables HW flow control. - * - Maping detials for rx_fifo are:(minimum EQOS_4K) - * - EQOS_4K, configure FULL_MINUS_2_5K for RFD and FULL_MINUS_1_5K for RFA - * - EQOS_8K, configure FULL_MINUS_4_K for RFD and FULL_MINUS_6_K for RFA - * - EQOS_16K, configure FULL_MINUS_4_K for RFD and FULL_MINUS_10_K for RFA - * - EQOS_32K, configure FULL_MINUS_4_K for RFD and FULL_MINUS_16_K for RFA - * - EQOS_9K/Deafult, configure FULL_MINUS_3_K for RFD and FULL_MINUS_2_K for RFA - * - SWUD_ID: ETHERNET_NVETHERNETRM_006_3 - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: No - * - De-initialization: No - * - * @param[in] rx_fifo: Rx FIFO size. - * @param[out] value: Stores RFD and RSA values - */ -void update_ehfc_rfa_rfd(nveu32_t rx_fifo, nveu32_t *value) -{ - if (rx_fifo >= EQOS_4K) { - /* Enable HW Flow Control */ - *value |= EQOS_MTL_RXQ_OP_MODE_EHFC; - - switch (rx_fifo) { - case EQOS_4K: - /* Update RFD */ - *value &= ~EQOS_MTL_RXQ_OP_MODE_RFD_MASK; - *value |= (FULL_MINUS_2_5K << - EQOS_MTL_RXQ_OP_MODE_RFD_SHIFT) & - EQOS_MTL_RXQ_OP_MODE_RFD_MASK; - /* Update RFA */ - *value &= ~EQOS_MTL_RXQ_OP_MODE_RFA_MASK; - *value |= (FULL_MINUS_1_5K << - EQOS_MTL_RXQ_OP_MODE_RFA_SHIFT) & - EQOS_MTL_RXQ_OP_MODE_RFA_MASK; - break; - case EQOS_8K: - /* Update RFD */ - *value &= ~EQOS_MTL_RXQ_OP_MODE_RFD_MASK; - *value |= (FULL_MINUS_4_K << - EQOS_MTL_RXQ_OP_MODE_RFD_SHIFT) & - EQOS_MTL_RXQ_OP_MODE_RFD_MASK; - /* Update RFA */ - *value &= ~EQOS_MTL_RXQ_OP_MODE_RFA_MASK; - *value |= (FULL_MINUS_6_K << - EQOS_MTL_RXQ_OP_MODE_RFA_SHIFT) & - EQOS_MTL_RXQ_OP_MODE_RFA_MASK; - break; - case EQOS_9K: - /* Update RFD */ - *value &= ~EQOS_MTL_RXQ_OP_MODE_RFD_MASK; - *value |= (FULL_MINUS_3_K << - EQOS_MTL_RXQ_OP_MODE_RFD_SHIFT) & - EQOS_MTL_RXQ_OP_MODE_RFD_MASK; - /* Update RFA */ - *value &= ~EQOS_MTL_RXQ_OP_MODE_RFA_MASK; - *value |= (FULL_MINUS_2_K << - EQOS_MTL_RXQ_OP_MODE_RFA_SHIFT) & - EQOS_MTL_RXQ_OP_MODE_RFA_MASK; - break; - case EQOS_16K: - /* Update RFD */ - *value &= ~EQOS_MTL_RXQ_OP_MODE_RFD_MASK; - *value |= (FULL_MINUS_4_K << - EQOS_MTL_RXQ_OP_MODE_RFD_SHIFT) & - EQOS_MTL_RXQ_OP_MODE_RFD_MASK; - /* Update RFA */ - *value &= ~EQOS_MTL_RXQ_OP_MODE_RFA_MASK; - *value |= (FULL_MINUS_10_K << - EQOS_MTL_RXQ_OP_MODE_RFA_SHIFT) & - EQOS_MTL_RXQ_OP_MODE_RFA_MASK; - break; - case EQOS_32K: - /* Update RFD */ - *value &= ~EQOS_MTL_RXQ_OP_MODE_RFD_MASK; - *value |= (FULL_MINUS_4_K << - EQOS_MTL_RXQ_OP_MODE_RFD_SHIFT) & - EQOS_MTL_RXQ_OP_MODE_RFD_MASK; - /* Update RFA */ - *value &= ~EQOS_MTL_RXQ_OP_MODE_RFA_MASK; - *value |= (FULL_MINUS_16_K << - EQOS_MTL_RXQ_OP_MODE_RFA_SHIFT) & - EQOS_MTL_RXQ_OP_MODE_RFA_MASK; - break; - default: - /* Use 9K values */ - /* Update RFD */ - *value &= ~EQOS_MTL_RXQ_OP_MODE_RFD_MASK; - *value |= (FULL_MINUS_3_K << - EQOS_MTL_RXQ_OP_MODE_RFD_SHIFT) & - EQOS_MTL_RXQ_OP_MODE_RFD_MASK; - /* Update RFA */ - *value &= ~EQOS_MTL_RXQ_OP_MODE_RFA_MASK; - *value |= (FULL_MINUS_2_K << - EQOS_MTL_RXQ_OP_MODE_RFA_SHIFT) & - EQOS_MTL_RXQ_OP_MODE_RFA_MASK; - break; - } - } -} - /** \cond DO_NOT_DOCUMENT */ /** * @brief eqos_configure_mtl_queue - Configure MTL Queue @@ -1241,32 +354,59 @@ void update_ehfc_rfa_rfd(nveu32_t rx_fifo, nveu32_t *value) * @retval 0 on success * @retval -1 on failure. */ -static nve32_t eqos_configure_mtl_queue(nveu32_t qinx, - struct osi_core_priv_data *const osi_core, - nveu32_t tx_fifo, - nveu32_t rx_fifo) +static nve32_t eqos_configure_mtl_queue(struct osi_core_priv_data *const osi_core, + nveu32_t q_inx) { + const struct core_local *l_core = (struct core_local *)(void *)osi_core; + const nveu32_t rx_fifo_sz[2U][OSI_EQOS_MAX_NUM_QUEUES] = { + { FIFO_SZ(9U), FIFO_SZ(9U), FIFO_SZ(9U), FIFO_SZ(9U), + FIFO_SZ(1U), FIFO_SZ(1U), FIFO_SZ(1U), FIFO_SZ(1U) }, + { FIFO_SZ(36U), FIFO_SZ(2U), FIFO_SZ(2U), FIFO_SZ(2U), + FIFO_SZ(2U), FIFO_SZ(2U), FIFO_SZ(2U), FIFO_SZ(16U) }, + }; + const nveu32_t tx_fifo_sz[2U][OSI_EQOS_MAX_NUM_QUEUES] = { + { FIFO_SZ(9U), FIFO_SZ(9U), FIFO_SZ(9U), FIFO_SZ(9U), + FIFO_SZ(1U), FIFO_SZ(1U), FIFO_SZ(1U), FIFO_SZ(1U) }, + { FIFO_SZ(8U), FIFO_SZ(8U), FIFO_SZ(8U), FIFO_SZ(8U), + FIFO_SZ(8U), FIFO_SZ(8U), FIFO_SZ(8U), FIFO_SZ(8U) }, + }; + const nveu32_t rfd_rfa[OSI_EQOS_MAX_NUM_QUEUES] = { + FULL_MINUS_16_K, + FULL_MINUS_1_5K, + FULL_MINUS_1_5K, + FULL_MINUS_1_5K, + FULL_MINUS_1_5K, + FULL_MINUS_1_5K, + FULL_MINUS_1_5K, + FULL_MINUS_1_5K, + }; + nveu32_t l_macv = (l_core->l_mac_ver & 0x1U); + nveu32_t que_idx = (q_inx & 0x7U); + nveu32_t rx_fifo_sz_t = 0U; + nveu32_t tx_fifo_sz_t = 0U; nveu32_t value = 0; nve32_t ret = 0; - ret = eqos_flush_mtl_tx_queue(osi_core, qinx); + tx_fifo_sz_t = tx_fifo_sz[l_macv][que_idx]; + + ret = hw_flush_mtl_tx_queue(osi_core, que_idx); if (ret < 0) { - return ret; + goto fail; } - value = (tx_fifo << EQOS_MTL_TXQ_SIZE_SHIFT); + value = (tx_fifo_sz_t << EQOS_MTL_TXQ_SIZE_SHIFT); /* Enable Store and Forward mode */ value |= EQOS_MTL_TSF; /* Enable TxQ */ value |= EQOS_MTL_TXQEN; - eqos_core_safety_writel(osi_core, value, (nveu8_t *)osi_core->base + - EQOS_MTL_CHX_TX_OP_MODE(qinx), - EQOS_MTL_CH0_TX_OP_MODE_IDX + qinx); + osi_writela(osi_core, value, (nveu8_t *)osi_core->base + EQOS_MTL_CHX_TX_OP_MODE(que_idx)); /* read RX Q0 Operating Mode Register */ value = osi_readla(osi_core, (nveu8_t *)osi_core->base + - EQOS_MTL_CHX_RX_OP_MODE(qinx)); - value |= (rx_fifo << EQOS_MTL_RXQ_SIZE_SHIFT); + EQOS_MTL_CHX_RX_OP_MODE(que_idx)); + + rx_fifo_sz_t = rx_fifo_sz[l_macv][que_idx]; + value |= (rx_fifo_sz_t << EQOS_MTL_RXQ_SIZE_SHIFT); /* Enable Store and Forward mode */ value |= EQOS_MTL_RSF; /* Update EHFL, RFA and RFD @@ -1274,85 +414,32 @@ static nve32_t eqos_configure_mtl_queue(nveu32_t qinx, * RFA: Threshold for Activating Flow Control * RFD: Threshold for Deactivating Flow Control */ - update_ehfc_rfa_rfd(rx_fifo, &value); - eqos_core_safety_writel(osi_core, value, (nveu8_t *)osi_core->base + - EQOS_MTL_CHX_RX_OP_MODE(qinx), - EQOS_MTL_CH0_RX_OP_MODE_IDX + qinx); + value &= ~EQOS_MTL_RXQ_OP_MODE_RFD_MASK; + value &= ~EQOS_MTL_RXQ_OP_MODE_RFA_MASK; + value |= EQOS_MTL_RXQ_OP_MODE_EHFC; + value |= (rfd_rfa[que_idx] << EQOS_MTL_RXQ_OP_MODE_RFD_SHIFT) & + EQOS_MTL_RXQ_OP_MODE_RFD_MASK; + value |= (rfd_rfa[que_idx] << EQOS_MTL_RXQ_OP_MODE_RFA_SHIFT) & + EQOS_MTL_RXQ_OP_MODE_RFA_MASK; + osi_writela(osi_core, value, (nveu8_t *)osi_core->base + EQOS_MTL_CHX_RX_OP_MODE(que_idx)); /* Transmit Queue weight */ value = osi_readla(osi_core, (nveu8_t *)osi_core->base + - EQOS_MTL_TXQ_QW(qinx)); - value |= (EQOS_MTL_TXQ_QW_ISCQW + qinx); - eqos_core_safety_writel(osi_core, value, (nveu8_t *)osi_core->base + - EQOS_MTL_TXQ_QW(qinx), - EQOS_MTL_TXQ0_QW_IDX + qinx); + EQOS_MTL_TXQ_QW(que_idx)); + value |= EQOS_MTL_TXQ_QW_ISCQW; + osi_writela(osi_core, value, (nveu8_t *)osi_core->base + EQOS_MTL_TXQ_QW(que_idx)); /* Enable Rx Queue Control */ value = osi_readla(osi_core, (nveu8_t *)osi_core->base + EQOS_MAC_RQC0R); - value |= ((osi_core->rxq_ctrl[qinx] & EQOS_RXQ_EN_MASK) << (qinx * 2U)); - eqos_core_safety_writel(osi_core, value, (nveu8_t *)osi_core->base + - EQOS_MAC_RQC0R, EQOS_MAC_RQC0R_IDX); + value |= ((osi_core->rxq_ctrl[que_idx] & EQOS_RXQ_EN_MASK) << (que_idx * 2U)); + osi_writela(osi_core, value, (nveu8_t *)osi_core->base + EQOS_MAC_RQC0R); - return 0; +fail: + return ret; } /** \endcond */ -/** - * @brief eqos_config_rxcsum_offload - Enable/Disable rx checksum offload in HW - * - * @note - * Algorithm: - * - VAlidate enabled param and return -1 if invalid. - * - Read the MAC configuration register. - * - Enable/disable the IP checksum offload engine COE in MAC receiver based on enabled. - * - Update the MAC configuration register. - * - Refer to OSI column of <> for sequence - * of execution. - * - TraceID:ETHERNET_NVETHERNETRM_017 - * - * @param[in] osi_core: OSI core private data structure. Used param is base. - * @param[in] enabled: Flag to indicate feature is to be enabled(OSI_ENABLE)/disabled(OSI_DISABLE). - * - * @pre MAC should be initialized and started. see osi_start_mac() - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -static nve32_t eqos_config_rxcsum_offload( - struct osi_core_priv_data *const osi_core, - const nveu32_t enabled) -{ - void *addr = osi_core->base; - nveu32_t mac_mcr; - - if ((enabled != OSI_ENABLE) && (enabled != OSI_DISABLE)) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, - "rxsum_offload: invalid input\n", 0ULL); - return -1; - } - - mac_mcr = osi_readla(osi_core, (nveu8_t *)addr + EQOS_MAC_MCR); - - if (enabled == OSI_ENABLE) { - mac_mcr |= EQOS_MCR_IPC; - } else { - mac_mcr &= ~EQOS_MCR_IPC; - } - - eqos_core_safety_writel(osi_core, mac_mcr, - (nveu8_t *)addr + EQOS_MAC_MCR, - EQOS_MAC_MCR_IDX); - - return 0; -} - /** * @brief eqos_config_frp - Enable/Disale RX Flexible Receive Parser in HW * @@ -1369,18 +456,19 @@ static nve32_t eqos_config_rxcsum_offload( * @retval 0 on success * @retval -1 on failure. */ -static int eqos_config_frp(struct osi_core_priv_data *const osi_core, - const unsigned int enabled) +static nve32_t eqos_config_frp(struct osi_core_priv_data *const osi_core, + const nveu32_t enabled) { - unsigned char *base = osi_core->base; - unsigned int op_mode = 0U, val = 0U; - int ret = 0; + nveu8_t *base = osi_core->base; + nveu32_t op_mode = 0U, val = 0U; + nve32_t ret = 0; - if (enabled != OSI_ENABLE && enabled != OSI_DISABLE) { + if ((enabled != OSI_ENABLE) && (enabled != OSI_DISABLE)) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Invalid enable input\n", enabled); - return -1; + ret = -1; + goto done; } /* Disable RE */ @@ -1435,6 +523,7 @@ frp_enable_re: val |= EQOS_MCR_RE; osi_writela(osi_core, val, base + EQOS_MAC_MCR); +done: return ret; } @@ -1444,25 +533,26 @@ frp_enable_re: * Algorithm: * * @param[in] osi_core: OSI core private data. - * @param[in] enabled: Flag to indicate feature is to be enabled/disabled. + * @param[in] nve: Number of Valid Entries. * * @note MAC should be init and started. see osi_start_mac() * * @retval 0 on success * @retval -1 on failure. */ -static int eqos_update_frp_nve(struct osi_core_priv_data *const osi_core, - const unsigned int nve) +static nve32_t eqos_update_frp_nve(struct osi_core_priv_data *const osi_core, + const nveu32_t nve) { - unsigned int val; - unsigned char *base = osi_core->base; + nveu32_t val; + nveu8_t *base = osi_core->base; + nve32_t ret = -1; /* Validate the NVE value */ if (nve >= OSI_FRP_MAX_ENTRY) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Invalid NVE value\n", nve); - return -1; + goto done; } /* Update NVE and NPE in MTL_RXP_Control_Status register */ @@ -1474,7 +564,10 @@ static int eqos_update_frp_nve(struct osi_core_priv_data *const osi_core, val |= ((nve << EQOS_MTL_RXP_CS_NPE_SHIFT) & EQOS_MTL_RXP_CS_NPE); osi_writela(osi_core, val, base + EQOS_MTL_RXP_CS); - return 0; + ret = 0; + +done: + return ret; } /** @@ -1491,13 +584,13 @@ static int eqos_update_frp_nve(struct osi_core_priv_data *const osi_core, * @retval 0 on success * @retval -1 on failure. */ -static int eqos_frp_write(struct osi_core_priv_data *osi_core, - unsigned int addr, - unsigned int data) +static nve32_t eqos_frp_write(struct osi_core_priv_data *osi_core, + nveu32_t addr, + nveu32_t data) { - int ret = 0; - unsigned char *base = osi_core->base; - unsigned int val = 0U; + nve32_t ret = 0; + nveu8_t *base = osi_core->base; + nveu32_t val = 0U; /* Wait for ready */ ret = osi_readl_poll_timeout((base + EQOS_MTL_RXP_IND_CS), @@ -1511,7 +604,8 @@ static int eqos_frp_write(struct osi_core_priv_data *osi_core, OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Fail to write\n", val); - return -1; + ret = -1; + goto done; } /* Write data into MTL_RXP_Indirect_Acc_Data */ @@ -1540,9 +634,10 @@ static int eqos_frp_write(struct osi_core_priv_data *osi_core, OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Fail to write\n", val); - return -1; + ret = -1; } +done: return ret; } @@ -1560,19 +655,20 @@ static int eqos_frp_write(struct osi_core_priv_data *osi_core, * @retval 0 on success * @retval -1 on failure. */ -static int eqos_update_frp_entry(struct osi_core_priv_data *const osi_core, - const unsigned int pos, - struct osi_core_frp_data *const data) +static nve32_t eqos_update_frp_entry(struct osi_core_priv_data *const osi_core, + const nveu32_t pos, + struct osi_core_frp_data *const data) { - unsigned int val = 0U, tmp = 0U; - int ret = -1; + nveu32_t val = 0U, tmp = 0U; + nve32_t ret = -1; /* Validate pos value */ if (pos >= OSI_FRP_MAX_ENTRY) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Invalid FRP table entry\n", pos); - return -1; + ret = -1; + goto done; } /** Write Match Data into IE0 **/ @@ -1580,7 +676,8 @@ static int eqos_update_frp_entry(struct osi_core_priv_data *const osi_core, ret = eqos_frp_write(osi_core, EQOS_MTL_FRP_IE0(pos), val); if (ret < 0) { /* Match Data Write fail */ - return -1; + ret = -1; + goto done; } /** Write Match Enable into IE1 **/ @@ -1588,7 +685,8 @@ static int eqos_update_frp_entry(struct osi_core_priv_data *const osi_core, ret = eqos_frp_write(osi_core, EQOS_MTL_FRP_IE1(pos), val); if (ret < 0) { /* Match Enable Write fail */ - return -1; + ret = -1; + goto done; } /** Write AF, RF, IM, NIC, FO and OKI into IE2 **/ @@ -1618,7 +716,8 @@ static int eqos_update_frp_entry(struct osi_core_priv_data *const osi_core, ret = eqos_frp_write(osi_core, EQOS_MTL_FRP_IE2(pos), val); if (ret < 0) { /* FRP IE2 Write fail */ - return -1; + ret = -1; + goto done; } /** Write DCH into IE3 **/ @@ -1626,9 +725,10 @@ static int eqos_update_frp_entry(struct osi_core_priv_data *const osi_core, ret = eqos_frp_write(osi_core, EQOS_MTL_FRP_IE3(pos), val); if (ret < 0) { /* DCH Write fail */ - return -1; + ret = -1; } +done: return ret; } @@ -1697,9 +797,7 @@ static void eqos_configure_rxq_priority( mfix_var2 <<= mfix_var1; val |= (temp & mfix_var2); /* Priorities Selected in the Receive Queue 0 */ - eqos_core_safety_writel(osi_core, val, - (nveu8_t *)osi_core->base + - EQOS_MAC_RQC2R, EQOS_MAC_RQC2R_IDX); + osi_writela(osi_core, val, (nveu8_t *)osi_core->base + EQOS_MAC_RQC2R); } } @@ -1717,21 +815,20 @@ static void eqos_configure_rxq_priority( * @retval 0 on success * @retval -1 on failure */ -static int eqos_hsi_configure(struct osi_core_priv_data *const osi_core, +static nve32_t eqos_hsi_configure(struct osi_core_priv_data *const osi_core, const nveu32_t enable) { nveu32_t value; if (enable == OSI_ENABLE) { osi_core->hsi.enabled = OSI_ENABLE; - osi_core->hsi.reporter_id = hsi_err_code[osi_core->instance_id][REPORTER_IDX]; + osi_core->hsi.reporter_id = OSI_HSI_EQOS0_REPORTER_ID; /* T23X-EQOS_HSIv2-19: Enabling of Consistency Monitor for TX Frame Errors */ value = osi_readla(osi_core, (nveu8_t *)osi_core->base + EQOS_MAC_IMR); value |= EQOS_IMR_TXESIE; - eqos_core_safety_writel(osi_core, value, (nveu8_t *)osi_core->base + - EQOS_MAC_IMR, EQOS_MAC_IMR_IDX); + osi_writela(osi_core, value, (nveu8_t *)osi_core->base + EQOS_MAC_IMR); /* T23X-EQOS_HSIv2-1: Enabling of Memory ECC */ value = osi_readla(osi_core, @@ -1747,14 +844,14 @@ static int eqos_hsi_configure(struct osi_core_priv_data *const osi_core, /* T23X-EQOS_HSIv2-5: Enabling and Initialization of Transaction Timeout */ value = (0x198U << EQOS_TMR_SHIFT) & EQOS_TMR_MASK; - value |= (0x2U << EQOS_LTMRMD_SHIFT) & EQOS_LTMRMD_MASK; - value |= (0x1U << EQOS_NTMRMD_SHIFT) & EQOS_NTMRMD_MASK; + value |= ((nveu32_t)0x2U << EQOS_LTMRMD_SHIFT) & EQOS_LTMRMD_MASK; + value |= ((nveu32_t)0x2U << EQOS_NTMRMD_SHIFT) & EQOS_NTMRMD_MASK; osi_writela(osi_core, value, (nveu8_t *)osi_core->base + EQOS_MAC_FSM_ACT_TIMER); /* T23X-EQOS_HSIv2-3: Enabling and Initialization of Watchdog */ /* T23X-EQOS_HSIv2-4: Enabling of Consistency Monitor for FSM States */ - // TODO: enable EQOS_TMOUTEN + /* TODO enable EQOS_TMOUTEN. Bug 3584387 */ value = EQOS_PRTYEN; osi_writela(osi_core, value, (nveu8_t *)osi_core->base + EQOS_MAC_FSM_CONTROL); @@ -1798,8 +895,7 @@ static int eqos_hsi_configure(struct osi_core_priv_data *const osi_core, value = osi_readla(osi_core, (nveu8_t *)osi_core->base + EQOS_MAC_IMR); value &= ~EQOS_IMR_TXESIE; - eqos_core_safety_writel(osi_core, value, (nveu8_t *)osi_core->base + - EQOS_MAC_IMR, EQOS_MAC_IMR_IDX); + osi_writela(osi_core, value, (nveu8_t *)osi_core->base + EQOS_MAC_IMR); /* T23X-EQOS_HSIv2-1: Disable of Memory ECC */ value = osi_readla(osi_core, @@ -1845,7 +941,51 @@ static int eqos_hsi_configure(struct osi_core_priv_data *const osi_core, } return 0; } + +/** + * @brief eqos_hsi_inject_err - inject error + * + * @note + * Algorithm: + * - Use error injection method induce error + * + * @param[in, out] osi_core: OSI core private data structure. + * @param[in] type: UE_IDX/CE_IDX + * + * @retval 0 on success + * @retval -1 on failure + */ + +static nve32_t eqos_hsi_inject_err(struct osi_core_priv_data *const osi_core, + const nveu32_t error_code) +{ + nveu32_t value; + nve32_t ret = 0; + + switch (error_code) { + case OSI_HSI_EQOS0_CE_CODE: + value = (EQOS_MTL_DBG_CTL_EIEC | EQOS_MTL_DBG_CTL_EIEE); + osi_writela(osi_core, value, (nveu8_t *)osi_core->base + + EQOS_MTL_DBG_CTL); + break; + case OSI_HSI_EQOS0_UE_CODE: + value = EQOS_MTL_DPP_ECC_EIC_BLEI; + osi_writela(osi_core, value, (nveu8_t *)osi_core->base + + EQOS_MTL_DPP_ECC_EIC); + + value = (EQOS_MTL_DBG_CTL_EIEC | EQOS_MTL_DBG_CTL_EIEE); + osi_writela(osi_core, value, (nveu8_t *)osi_core->base + + EQOS_MTL_DBG_CTL); + break; + default: + ret = hsi_common_error_inject(osi_core, error_code); + break; + } + + return ret; +} #endif + /** * @brief eqos_configure_mac - Configure MAC * @@ -1905,8 +1045,7 @@ static void eqos_configure_mac(struct osi_core_priv_data *const osi_core) /* do nothing for default mtu size */ } - eqos_core_safety_writel(osi_core, value, (nveu8_t *)osi_core->base + - EQOS_MAC_MCR, EQOS_MAC_MCR_IDX); + osi_writela(osi_core, value, (nveu8_t *)osi_core->base + EQOS_MAC_MCR); /* Enable common interrupt at wrapper level */ if (osi_core->mac_ver >= OSI_EQOS_MAC_5_30) { @@ -1933,12 +1072,11 @@ static void eqos_configure_mac(struct osi_core_priv_data *const osi_core) /* Routing Multicast and Broadcast depending on mac version */ value &= ~(EQOS_MAC_RQC1R_MCBCQ); if (osi_core->mac_ver > OSI_EQOS_MAC_5_00) { - value |= EQOS_MAC_RQC1R_MCBCQ7 << EQOS_MAC_RQC1R_MCBCQ_SHIFT; + value |= ((nveu32_t)EQOS_MAC_RQC1R_MCBCQ7) << EQOS_MAC_RQC1R_MCBCQ_SHIFT; } else { - value |= EQOS_MAC_RQC1R_MCBCQ3 << EQOS_MAC_RQC1R_MCBCQ_SHIFT; + value |= ((nveu32_t)EQOS_MAC_RQC1R_MCBCQ3) << EQOS_MAC_RQC1R_MCBCQ_SHIFT; } - eqos_core_safety_writel(osi_core, value, (nveu8_t *)osi_core->base + - EQOS_MAC_RQC1R, EQOS_MAC_RQC1R_IDX); + osi_writela(osi_core, value, (nveu8_t *)osi_core->base + EQOS_MAC_RQC1R); /* Disable all MMC interrupts */ /* Disable all MMC Tx Interrupts */ @@ -1966,8 +1104,7 @@ static void eqos_configure_mac(struct osi_core_priv_data *const osi_core) /* RGSMIIIE - RGMII/SMII interrupt Enable. * LPIIE is not enabled. MMC LPI counters is maintained in HW */ value |= EQOS_IMR_RGSMIIIE; - eqos_core_safety_writel(osi_core, value, (nveu8_t *)osi_core->base + - EQOS_MAC_IMR, EQOS_MAC_IMR_IDX); + osi_writela(osi_core, value, (nveu8_t *)osi_core->base + EQOS_MAC_IMR); /* Enable VLAN configuration */ value = osi_readla(osi_core, @@ -1995,6 +1132,7 @@ static void eqos_configure_mac(struct osi_core_priv_data *const osi_core) osi_writela(osi_core, value, (nveu8_t *)osi_core->base + EQOS_MAC_VLANTIR); +#ifndef OSI_STRIPPED_LIB /* Configure default flow control settings */ if (osi_core->pause_frames != OSI_PAUSE_FRAMES_DISABLE) { osi_core->flow_ctrl = (OSI_FLOW_CTRL_TX | OSI_FLOW_CTRL_RX); @@ -2005,6 +1143,8 @@ static void eqos_configure_mac(struct osi_core_priv_data *const osi_core) 0ULL); } } +#endif /* !OSI_STRIPPED_LIB */ + /* USP (user Priority) to RxQ Mapping, only if DCS not enabled */ if (osi_core->dcs_en != OSI_ENABLE) { eqos_configure_rxq_priority(osi_core); @@ -2047,9 +1187,7 @@ static void eqos_configure_dma(struct osi_core_priv_data *const osi_core) /* AXI Maximum Write Outstanding Request Limit = 31 */ value |= EQOS_DMA_SBUS_WR_OSR_LMT; - eqos_core_safety_writel(osi_core, value, - (nveu8_t *)base + EQOS_DMA_SBUS, - EQOS_DMA_SBUS_IDX); + osi_writela(osi_core, value, (nveu8_t *)base + EQOS_DMA_SBUS); value = osi_readla(osi_core, (nveu8_t *)base + EQOS_DMA_BMR); value |= EQOS_DMA_BMR_DPSW; @@ -2057,191 +1195,6 @@ static void eqos_configure_dma(struct osi_core_priv_data *const osi_core) } /** \endcond */ -/** - * @brief eqos_enable_mtl_interrupts - Enable MTL interrupts - * - * Algorithm: enable MTL interrupts for EST - * - * @param[in] osi_core: OSI core private data structure. - * - * @note MAC should be init and started. see osi_start_mac() - */ -static inline void eqos_enable_mtl_interrupts( - struct osi_core_priv_data *const osi_core) -{ - unsigned int mtl_est_ir = OSI_DISABLE; - void *addr = osi_core->base; - - mtl_est_ir = osi_readla(osi_core, (unsigned char *) - addr + EQOS_MTL_EST_ITRE); - /* enable only MTL interrupt realted to - * Constant Gate Control Error - * Head-Of-Line Blocking due to Scheduling - * Head-Of-Line Blocking due to Frame Size - * BTR Error - * Switch to S/W owned list Complete - */ - mtl_est_ir |= (EQOS_MTL_EST_ITRE_CGCE | EQOS_MTL_EST_ITRE_IEHS | - EQOS_MTL_EST_ITRE_IEHF | EQOS_MTL_EST_ITRE_IEBE | - EQOS_MTL_EST_ITRE_IECC); - osi_writela(osi_core, mtl_est_ir, - (unsigned char *)addr + EQOS_MTL_EST_ITRE); -} - -/** - * @brief eqos_enable_fpe_interrupts - Enable MTL interrupts - * - * Algorithm: enable FPE interrupts - * - * @param[in] osi_core: OSI core private data structure. - * - * @note MAC should be init and started. see osi_start_mac() - */ -static inline void eqos_enable_fpe_interrupts( - struct osi_core_priv_data *const osi_core) -{ - unsigned int value = OSI_DISABLE; - void *addr = osi_core->base; - - /* Read MAC IER Register and enable Frame Preemption Interrupt - * Enable */ - value = osi_readla(osi_core, (unsigned char *)addr + EQOS_MAC_IMR); - value |= EQOS_IMR_FPEIE; - osi_writela(osi_core, value, (unsigned char *)addr + EQOS_MAC_IMR); -} - -/** - * @brief eqos_save_gcl_params - save GCL configs in local core structure - * - * @param[in] osi_core: OSI core private data structure. - * - * @note MAC should be init and started. see osi_start_mac() - */ -static inline void eqos_save_gcl_params(struct osi_core_priv_data *osi_core) -{ - struct core_local *l_core = (struct core_local *)osi_core; - unsigned int gcl_widhth[4] = {0, OSI_MAX_24BITS, OSI_MAX_28BITS, - OSI_MAX_32BITS}; - nveu32_t gcl_ti_mask[4] = {0, OSI_MASK_16BITS, OSI_MASK_20BITS, - OSI_MASK_24BITS}; - unsigned int gcl_depthth[6] = {0, OSI_GCL_SIZE_64, OSI_GCL_SIZE_128, - OSI_GCL_SIZE_256, OSI_GCL_SIZE_512, - OSI_GCL_SIZE_1024}; - - if ((osi_core->hw_feature->gcl_width == 0) || - (osi_core->hw_feature->gcl_width > 3)) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "Wrong HW feature GCL width\n", - (unsigned long long)osi_core->hw_feature->gcl_width); - } else { - l_core->gcl_width_val = - gcl_widhth[osi_core->hw_feature->gcl_width]; - l_core->ti_mask = gcl_ti_mask[osi_core->hw_feature->gcl_width]; - } - - if ((osi_core->hw_feature->gcl_depth == 0) || - (osi_core->hw_feature->gcl_depth > 5)) { - /* Do Nothing */ - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "Wrong HW feature GCL depth\n", - (unsigned long long)osi_core->hw_feature->gcl_depth); - } else { - l_core->gcl_dep = gcl_depthth[osi_core->hw_feature->gcl_depth]; - } -} - -/** - * @brief eqos_tsn_init - initialize TSN feature - * - * Algorithm: - * 1) If hardware support EST, - * a) Set default EST configuration - * b) Set enable interrupts - * 2) If hardware supports FPE - * a) Set default FPE configuration - * b) enable interrupts - * - * @param[in] osi_core: OSI core private data structure. - * @param[in] est_sel: EST HW support present or not - * @param[in] fpe_sel: FPE HW support present or not - * - * @note MAC should be init and started. see osi_start_mac() - */ -static void eqos_tsn_init(struct osi_core_priv_data *osi_core, - unsigned int est_sel, unsigned int fpe_sel) -{ - unsigned int val = 0x0; - unsigned int temp = 0U; - - if (est_sel == OSI_ENABLE) { - eqos_save_gcl_params(osi_core); - val = osi_readla(osi_core, (unsigned char *)osi_core->base + - EQOS_MTL_EST_CONTROL); - - /* - * PTOV PTP clock period * 6 - * dual-port RAM based asynchronous FIFO controllers or - * Single-port RAM based synchronous FIFO controllers - * CTOV 96 x Tx clock period - * : - * : - * set other default value - */ - val &= ~EQOS_MTL_EST_CONTROL_PTOV; - if (osi_core->pre_si == OSI_ENABLE) { - /* 6*1/(78.6 MHz) in ns*/ - temp = (6U * 13U); - } else { - temp = EQOS_MTL_EST_PTOV_RECOMMEND; - } - temp = temp << EQOS_MTL_EST_CONTROL_PTOV_SHIFT; - val |= temp; - - val &= ~EQOS_MTL_EST_CONTROL_CTOV; - temp = EQOS_MTL_EST_CTOV_RECOMMEND; - temp = temp << EQOS_MTL_EST_CONTROL_CTOV_SHIFT; - val |= temp; - - /*Loop Count to report Scheduling Error*/ - val &= ~EQOS_MTL_EST_CONTROL_LCSE; - val |= EQOS_MTL_EST_CONTROL_LCSE_VAL; - - val &= ~(EQOS_MTL_EST_CONTROL_DDBF | - EQOS_MTL_EST_CONTROL_DFBS); - val |= EQOS_MTL_EST_CONTROL_DDBF; - - osi_writela(osi_core, val, (nveu8_t *)osi_core->base + - EQOS_MTL_EST_CONTROL); - - val = osi_readla(osi_core, (nveu8_t *)osi_core->base + - EQOS_MTL_EST_OVERHEAD); - val &= ~EQOS_MTL_EST_OVERHEAD_OVHD; - /* As per hardware team recommendation */ - val |= EQOS_MTL_EST_OVERHEAD_RECOMMEND; - osi_writela(osi_core, val, (nveu8_t *)osi_core->base + - EQOS_MTL_EST_OVERHEAD); - - eqos_enable_mtl_interrupts(osi_core); - } - - if (fpe_sel == OSI_ENABLE) { - val = osi_readla(osi_core, (unsigned char *)osi_core->base + - EQOS_MAC_RQC1R); - val &= ~EQOS_MAC_RQC1R_FPRQ; - temp = osi_core->residual_queue; - temp = temp << EQOS_MAC_RQC1R_FPRQ_SHIFT; - temp = (temp & EQOS_MAC_RQC1R_FPRQ); - val |= temp; - osi_writela(osi_core, val, (unsigned char *)osi_core->base + - EQOS_MAC_RQC1R); - - eqos_enable_fpe_interrupts(osi_core); - } - - /* CBS setting for TC should be by user application/IOCTL as - * per requirement */ -} - /** * @brief Map DMA channels to a specific VM IRQ. * @@ -2260,10 +1213,6 @@ static void eqos_dma_chan_to_vmirq_map(struct osi_core_priv_data *osi_core) nveu32_t i, j; nveu32_t chan; - if (osi_core->mac_ver < OSI_EQOS_MAC_5_30) { - return; - } - for (i = 0; i < osi_core->num_vm_irqs; i++) { irq_data = &osi_core->irq_data[i]; for (j = 0; j < irq_data->num_vm_chans; j++) { @@ -2276,7 +1225,7 @@ static void eqos_dma_chan_to_vmirq_map(struct osi_core_priv_data *osi_core) EQOS_VIRT_INTR_APB_CHX_CNTRL(chan)); } osi_writel(OSI_BIT(irq_data->vm_num), - (nveu8_t *)osi_core->base + VIRTUAL_APB_ERR_CTRL); + (nveu8_t *)osi_core->base + VIRTUAL_APB_ERR_CTRL); } } @@ -2292,9 +1241,8 @@ static void eqos_dma_chan_to_vmirq_map(struct osi_core_priv_data *osi_core) * - TraceID:ETHERNET_NVETHERNETRM_006 * * @param[in] osi_core: OSI core private data structure. Used params are - * - base, dcs_en, num_mtl_queues, mtl_queues, mtu, stip_vlan_tag, pause_frames, l3l4_filter_bitmask - * @param[in] tx_fifo_size: MTL TX FIFO size. Max 11. - * @param[in] rx_fifo_size: MTL RX FIFO size. Max 11. + * - base, dcs_en, num_mtl_queues, mtl_queues, mtu, stip_vlan_tag, pause_frames, + * l3l4_filter_bitmask * * @pre * - MAC should be out of reset. See osi_poll_for_mac_reset_complete() @@ -2312,27 +1260,20 @@ static void eqos_dma_chan_to_vmirq_map(struct osi_core_priv_data *osi_core) * @retval 0 on success * @retval -1 on failure. */ -static nve32_t eqos_core_init(struct osi_core_priv_data *const osi_core, - const nveu32_t tx_fifo_size, - const nveu32_t rx_fifo_size) +static nve32_t eqos_core_init(struct osi_core_priv_data *const osi_core) { nve32_t ret = 0; nveu32_t qinx = 0; nveu32_t value = 0; nveu32_t value1 = 0; - nveu32_t tx_fifo = 0; - nveu32_t rx_fifo = 0; - - eqos_core_safety_init(osi_core); - eqos_core_backup_init(osi_core); #ifndef UPDATED_PAD_CAL /* PAD calibration */ ret = eqos_pad_calibrate(osi_core); if (ret < 0) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "eqos pad calibration failed\n", 0ULL); - return ret; + goto fail; } #endif /* !UPDATED_PAD_CAL */ @@ -2341,6 +1282,7 @@ static nve32_t eqos_core_init(struct osi_core_priv_data *const osi_core, (nveu8_t *)osi_core->base + EQOS_MMC_CNTRL); if (osi_core->use_virtualization == OSI_DISABLE) { +#ifndef OSI_STRIPPED_LIB if (osi_core->hv_base != OSI_NULL) { osi_writela(osi_core, EQOS_5_30_ASID_CTRL_VAL, (nveu8_t *)osi_core->hv_base + @@ -2350,6 +1292,7 @@ static nve32_t eqos_core_init(struct osi_core_priv_data *const osi_core, (nveu8_t *)osi_core->hv_base + EQOS_AXI_ASID1_CTRL); } +#endif if (osi_core->mac_ver < OSI_EQOS_MAC_5_30) { /* AXI ASID CTRL for channel 0 to 3 */ @@ -2375,45 +1318,37 @@ static nve32_t eqos_core_init(struct osi_core_priv_data *const osi_core, value1 = EQOS_RXQ_TO_DMA_CHAN_MAP1; } - eqos_core_safety_writel(osi_core, value, (nveu8_t *)osi_core->base + - EQOS_MTL_RXQ_DMA_MAP0, - EQOS_MTL_RXQ_DMA_MAP0_IDX); + osi_writela(osi_core, value, (nveu8_t *)osi_core->base + EQOS_MTL_RXQ_DMA_MAP0); if (osi_core->mac_ver >= OSI_EQOS_MAC_5_30) { - eqos_core_safety_writel(osi_core, value1, - (nveu8_t *)osi_core->base + - EQOS_MTL_RXQ_DMA_MAP1, - EQOS_MTL_RXQ_DMA_MAP1_IDX); + osi_writela(osi_core, value1, (nveu8_t *)osi_core->base + EQOS_MTL_RXQ_DMA_MAP1); } if (osi_unlikely(osi_core->num_mtl_queues > OSI_EQOS_MAX_NUM_QUEUES)) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Number of queues is incorrect\n", 0ULL); - return -1; + ret = -1; + goto fail; } - /* Calculate value of Transmit queue fifo size to be programmed */ - tx_fifo = eqos_calculate_per_queue_fifo(osi_core->mac_ver, - tx_fifo_size, - osi_core->num_mtl_queues); - /* Calculate value of Receive queue fifo size to be programmed */ - rx_fifo = eqos_calculate_per_queue_fifo(osi_core->mac_ver, - rx_fifo_size, - osi_core->num_mtl_queues); - /* Configure MTL Queues */ for (qinx = 0; qinx < osi_core->num_mtl_queues; qinx++) { if (osi_unlikely(osi_core->mtl_queues[qinx] >= OSI_EQOS_MAX_NUM_QUEUES)) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Incorrect queues number\n", 0ULL); - return -1; + ret = -1; + goto fail; } - ret = eqos_configure_mtl_queue(osi_core->mtl_queues[qinx], - osi_core, tx_fifo, rx_fifo); + ret = eqos_configure_mtl_queue(osi_core, osi_core->mtl_queues[qinx]); if (ret < 0) { - return ret; + goto fail; } + /* Enable by default to configure forward error packets. + * Since this is a local function this will always return sucess, + * so no need to check for return value + */ + (void)hw_config_fw_err_pkts(osi_core, osi_core->mtl_queues[qinx], OSI_ENABLE); } /* configure EQOS MAC HW */ @@ -2424,15 +1359,17 @@ static nve32_t eqos_core_init(struct osi_core_priv_data *const osi_core, /* tsn initialization */ if (osi_core->hw_feature != OSI_NULL) { - eqos_tsn_init(osi_core, osi_core->hw_feature->est_sel, - osi_core->hw_feature->fpe_sel); + hw_tsn_init(osi_core, osi_core->hw_feature->est_sel, + osi_core->hw_feature->fpe_sel); } /* initialize L3L4 Filters variable */ osi_core->l3l4_filter_bitmask = OSI_NONE; - eqos_dma_chan_to_vmirq_map(osi_core); - + if (osi_core->mac_ver >= OSI_EQOS_MAC_5_30) { + eqos_dma_chan_to_vmirq_map(osi_core); + } +fail: return ret; } @@ -2448,11 +1385,11 @@ static nve32_t eqos_core_init(struct osi_core_priv_data *const osi_core, */ static void eqos_handle_mac_fpe_intrs(struct osi_core_priv_data *osi_core) { - unsigned int val = 0; + nveu32_t val = 0; /* interrupt bit clear on read as CSR_SW is reset */ val = osi_readla(osi_core, - (unsigned char *)osi_core->base + EQOS_MAC_FPE_CTS); + (nveu8_t *)osi_core->base + EQOS_MAC_FPE_CTS); if ((val & EQOS_MAC_FPE_CTS_RVER) == EQOS_MAC_FPE_CTS_RVER) { val &= ~EQOS_MAC_FPE_CTS_RVER; @@ -2486,7 +1423,58 @@ static void eqos_handle_mac_fpe_intrs(struct osi_core_priv_data *osi_core) } osi_writela(osi_core, val, - (unsigned char *)osi_core->base + EQOS_MAC_FPE_CTS); + (nveu8_t *)osi_core->base + EQOS_MAC_FPE_CTS); +} + +/** + * @brief eqos_handle_mac_link_intrs + * + * Algorithm: This function takes care of handling the + * MAC link interrupts. + * + * @param[in] osi_core: OSI core private data structure. + * + * @note MAC interrupts need to be enabled + */ +static void eqos_handle_mac_link_intrs(struct osi_core_priv_data *osi_core) +{ + nveu32_t mac_pcs = 0; + nve32_t ret = 0; + + mac_pcs = osi_readla(osi_core, (nveu8_t *)osi_core->base + EQOS_MAC_PCS); + /* check whether Link is UP or NOT - if not return. */ + if ((mac_pcs & EQOS_MAC_PCS_LNKSTS) == EQOS_MAC_PCS_LNKSTS) { + /* check for Link mode (full/half duplex) */ + if ((mac_pcs & EQOS_MAC_PCS_LNKMOD) == EQOS_MAC_PCS_LNKMOD) { + ret = hw_set_mode(osi_core, OSI_FULL_DUPLEX); + if (osi_unlikely(ret < 0)) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, + "set mode in full duplex failed\n", 0ULL); + } + } else { + ret = hw_set_mode(osi_core, OSI_HALF_DUPLEX); + if (osi_unlikely(ret < 0)) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, + "set mode in half duplex failed\n", 0ULL); + } + } + + /* set speed at MAC level */ + /* TODO: set_tx_clk needs to be done */ + /* Maybe through workqueue for QNX */ + /* hw_set_speed is treated as void since it is + * an internal functin which will be always success + */ + if ((mac_pcs & EQOS_MAC_PCS_LNKSPEED) == EQOS_MAC_PCS_LNKSPEED_10) { + (void)hw_set_speed(osi_core, OSI_SPEED_10); + } else if ((mac_pcs & EQOS_MAC_PCS_LNKSPEED) == EQOS_MAC_PCS_LNKSPEED_100) { + (void)hw_set_speed(osi_core, OSI_SPEED_100); + } else if ((mac_pcs & EQOS_MAC_PCS_LNKSPEED) == EQOS_MAC_PCS_LNKSPEED_1000) { + (void)hw_set_speed(osi_core, OSI_SPEED_1000); + } else { + /* Nothing here */ + } + } } /** @@ -2501,7 +1489,7 @@ static void eqos_handle_mac_fpe_intrs(struct osi_core_priv_data *osi_core) * - RGMII/SMII MAC interrupt * - If link is down * - Identify speed and mode changes from EQOS_MAC_PCS register and configure the same by calling - * eqos_set_speed(), eqos_set_mode()(proceed even on error for this call) API's. + * hw_set_speed(), hw_set_mode()(proceed even on error for this call) API's. * - SWUD_ID: ETHERNET_NVETHERNETRM_010_1 * * @param[in] osi_core: OSI core private data structure. Used param base. @@ -2519,104 +1507,51 @@ static void eqos_handle_mac_intrs(struct osi_core_priv_data *const osi_core, nveu32_t dma_isr) { nveu32_t mac_imr = 0; - nveu32_t mac_pcs = 0; nveu32_t mac_isr = 0; - nve32_t ret = 0; #ifdef HSI_SUPPORT nveu64_t tx_frame_err = 0; #endif - mac_isr = osi_readla(osi_core, - (nveu8_t *)osi_core->base + EQOS_MAC_ISR); + mac_isr = osi_readla(osi_core, (nveu8_t *)osi_core->base + EQOS_MAC_ISR); -#ifdef HSI_SUPPORT - if (osi_core->mac_ver >= OSI_EQOS_MAC_5_30) { - /* T23X-EQOS_HSIv2-19: Consistency Monitor for TX Frame */ - if ((dma_isr & EQOS_DMA_ISR_TXSTSIS) == EQOS_DMA_ISR_TXSTSIS) { - osi_core->hsi.tx_frame_err_count = - osi_update_stats_counter(osi_core->hsi.tx_frame_err_count, - 1UL); - tx_frame_err = osi_core->hsi.tx_frame_err_count / - osi_core->hsi.err_count_threshold; - if (osi_core->hsi.tx_frame_err_threshold < tx_frame_err) { - osi_core->hsi.tx_frame_err_threshold = tx_frame_err; - osi_core->hsi.report_count_err[TX_FRAME_ERR_IDX] = OSI_ENABLE; - } - osi_core->hsi.err_code[TX_FRAME_ERR_IDX] = - OSI_TX_FRAME_ERR; - osi_core->hsi.report_err = OSI_ENABLE; - } - } -#endif /* Handle MAC interrupts */ - if ((dma_isr & EQOS_DMA_ISR_MACIS) != EQOS_DMA_ISR_MACIS) { - return; - } - - /* handle only those MAC interrupts which are enabled */ - mac_imr = osi_readla(osi_core, - (nveu8_t *)osi_core->base + EQOS_MAC_IMR); - mac_isr = (mac_isr & mac_imr); - - /* RGMII/SMII interrupt */ - if (((mac_isr & EQOS_MAC_ISR_RGSMIIS) != EQOS_MAC_ISR_RGSMIIS) && - ((mac_isr & EQOS_MAC_IMR_FPEIS) != EQOS_MAC_IMR_FPEIS)) { - return; - } - - if (((mac_isr & EQOS_MAC_IMR_FPEIS) == EQOS_MAC_IMR_FPEIS) && - ((mac_imr & EQOS_IMR_FPEIE) == EQOS_IMR_FPEIE)) { - eqos_handle_mac_fpe_intrs(osi_core); - mac_isr &= ~EQOS_MAC_IMR_FPEIS; - } - osi_writela(osi_core, mac_isr, - (nveu8_t *) osi_core->base + EQOS_MAC_ISR); - - mac_pcs = osi_readla(osi_core, - (nveu8_t *)osi_core->base + EQOS_MAC_PCS); - /* check whether Link is UP or NOT - if not return. */ - if ((mac_pcs & EQOS_MAC_PCS_LNKSTS) != EQOS_MAC_PCS_LNKSTS) { - return; - } - - /* check for Link mode (full/half duplex) */ - if ((mac_pcs & EQOS_MAC_PCS_LNKMOD) == EQOS_MAC_PCS_LNKMOD) { - ret = eqos_set_mode(osi_core, OSI_FULL_DUPLEX); - if (osi_unlikely(ret < 0)) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL, - "set mode in full duplex failed\n", 0ULL); + if ((dma_isr & EQOS_DMA_ISR_MACIS) == EQOS_DMA_ISR_MACIS) { +#ifdef HSI_SUPPORT + if (osi_core->mac_ver >= OSI_EQOS_MAC_5_30) { + /* T23X-EQOS_HSIv2-19: Consistency Monitor for TX Frame */ + if ((dma_isr & EQOS_DMA_ISR_TXSTSIS) == EQOS_DMA_ISR_TXSTSIS) { + osi_core->hsi.tx_frame_err_count = + osi_update_stats_counter(osi_core->hsi.tx_frame_err_count, + 1UL); + tx_frame_err = osi_core->hsi.tx_frame_err_count / + osi_core->hsi.err_count_threshold; + if (osi_core->hsi.tx_frame_err_threshold < tx_frame_err) { + osi_core->hsi.tx_frame_err_threshold = tx_frame_err; + osi_core->hsi.report_count_err[TX_FRAME_ERR_IDX] = + OSI_ENABLE; + } + osi_core->hsi.err_code[TX_FRAME_ERR_IDX] = OSI_TX_FRAME_ERR; + osi_core->hsi.report_err = OSI_ENABLE; + } } - } else { - ret = eqos_set_mode(osi_core, OSI_HALF_DUPLEX); - if (osi_unlikely(ret < 0)) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL, - "set mode in half duplex failed\n", 0ULL); +#endif + /* handle only those MAC interrupts which are enabled */ + mac_imr = osi_readla(osi_core, (nveu8_t *)osi_core->base + EQOS_MAC_IMR); + mac_isr = (mac_isr & mac_imr); + + if (((mac_isr & EQOS_MAC_IMR_FPEIS) == EQOS_MAC_IMR_FPEIS) && + ((mac_imr & EQOS_IMR_FPEIE) == EQOS_IMR_FPEIE)) { + eqos_handle_mac_fpe_intrs(osi_core); + } + + if ((mac_isr & EQOS_MAC_ISR_RGSMIIS) == EQOS_MAC_ISR_RGSMIIS) { + eqos_handle_mac_link_intrs(osi_core); } } - /* set speed at MAC level */ - /* TODO: set_tx_clk needs to be done */ - /* Maybe through workqueue for QNX */ - if ((mac_pcs & EQOS_MAC_PCS_LNKSPEED) == EQOS_MAC_PCS_LNKSPEED_10) { - eqos_set_speed(osi_core, OSI_SPEED_10); - } else if ((mac_pcs & EQOS_MAC_PCS_LNKSPEED) == - EQOS_MAC_PCS_LNKSPEED_100) { - eqos_set_speed(osi_core, OSI_SPEED_100); - } else if ((mac_pcs & EQOS_MAC_PCS_LNKSPEED) == - EQOS_MAC_PCS_LNKSPEED_1000) { - eqos_set_speed(osi_core, OSI_SPEED_1000); - } else { - /* Nothing here */ - } - - if (((mac_isr & EQOS_MAC_IMR_FPEIS) == EQOS_MAC_IMR_FPEIS) && - ((mac_imr & EQOS_IMR_FPEIE) == EQOS_IMR_FPEIE)) { - eqos_handle_mac_fpe_intrs(osi_core); - mac_isr &= ~EQOS_MAC_IMR_FPEIS; - } - osi_writela(osi_core, mac_isr, - (unsigned char *)osi_core->base + EQOS_MAC_ISR); + return; } +#ifndef OSI_STRIPPED_LIB /** \cond DO_NOT_DOCUMENT */ /** * @brief update_dma_sr_stats - stats for dma_status error @@ -2642,37 +1577,38 @@ static inline void update_dma_sr_stats( nveu64_t val; if ((dma_sr & EQOS_DMA_CHX_STATUS_RBU) == EQOS_DMA_CHX_STATUS_RBU) { - val = osi_core->xstats.rx_buf_unavail_irq_n[qinx]; - osi_core->xstats.rx_buf_unavail_irq_n[qinx] = + val = osi_core->stats.rx_buf_unavail_irq_n[qinx]; + osi_core->stats.rx_buf_unavail_irq_n[qinx] = osi_update_stats_counter(val, 1U); } if ((dma_sr & EQOS_DMA_CHX_STATUS_TPS) == EQOS_DMA_CHX_STATUS_TPS) { - val = osi_core->xstats.tx_proc_stopped_irq_n[qinx]; - osi_core->xstats.tx_proc_stopped_irq_n[qinx] = + val = osi_core->stats.tx_proc_stopped_irq_n[qinx]; + osi_core->stats.tx_proc_stopped_irq_n[qinx] = osi_update_stats_counter(val, 1U); } if ((dma_sr & EQOS_DMA_CHX_STATUS_TBU) == EQOS_DMA_CHX_STATUS_TBU) { - val = osi_core->xstats.tx_buf_unavail_irq_n[qinx]; - osi_core->xstats.tx_buf_unavail_irq_n[qinx] = + val = osi_core->stats.tx_buf_unavail_irq_n[qinx]; + osi_core->stats.tx_buf_unavail_irq_n[qinx] = osi_update_stats_counter(val, 1U); } if ((dma_sr & EQOS_DMA_CHX_STATUS_RPS) == EQOS_DMA_CHX_STATUS_RPS) { - val = osi_core->xstats.rx_proc_stopped_irq_n[qinx]; - osi_core->xstats.rx_proc_stopped_irq_n[qinx] = + val = osi_core->stats.rx_proc_stopped_irq_n[qinx]; + osi_core->stats.rx_proc_stopped_irq_n[qinx] = osi_update_stats_counter(val, 1U); } if ((dma_sr & EQOS_DMA_CHX_STATUS_RWT) == EQOS_DMA_CHX_STATUS_RWT) { - val = osi_core->xstats.rx_watchdog_irq_n; - osi_core->xstats.rx_watchdog_irq_n = + val = osi_core->stats.rx_watchdog_irq_n; + osi_core->stats.rx_watchdog_irq_n = osi_update_stats_counter(val, 1U); } if ((dma_sr & EQOS_DMA_CHX_STATUS_FBE) == EQOS_DMA_CHX_STATUS_FBE) { - val = osi_core->xstats.fatal_bus_error_irq_n; - osi_core->xstats.fatal_bus_error_irq_n = + val = osi_core->stats.fatal_bus_error_irq_n; + osi_core->stats.fatal_bus_error_irq_n = osi_update_stats_counter(val, 1U); } } /** \endcond */ +#endif /* !OSI_STRIPPED_LIB */ /** * @brief eqos_handle_mtl_intrs - Handle MTL interrupts @@ -2692,37 +1628,37 @@ static inline void update_dma_sr_stats( */ static void eqos_handle_mtl_intrs(struct osi_core_priv_data *osi_core) { - unsigned int val = 0U; - unsigned int sch_err = 0U; - unsigned int frm_err = 0U; - unsigned int temp = 0U; - unsigned int i = 0; - unsigned long stat_val = 0U; - unsigned int value = 0U; + nveu32_t val = 0U; + nveu32_t sch_err = 0U; + nveu32_t frm_err = 0U; + nveu32_t temp = 0U; + nveu32_t i = 0; + nveul64_t stat_val = 0U; + nveu32_t value = 0U; val = osi_readla(osi_core, - (unsigned char *)osi_core->base + EQOS_MTL_EST_STATUS); + (nveu8_t *)osi_core->base + EQOS_MTL_EST_STATUS); val &= (EQOS_MTL_EST_STATUS_CGCE | EQOS_MTL_EST_STATUS_HLBS | EQOS_MTL_EST_STATUS_HLBF | EQOS_MTL_EST_STATUS_BTRE | EQOS_MTL_EST_STATUS_SWLC); /* return if interrupt is not related to EST */ if (val == OSI_DISABLE) { - return; + goto done; } /* increase counter write 1 back will clear */ if ((val & EQOS_MTL_EST_STATUS_CGCE) == EQOS_MTL_EST_STATUS_CGCE) { osi_core->est_ready = OSI_DISABLE; - stat_val = osi_core->tsn_stats.const_gate_ctr_err; - osi_core->tsn_stats.const_gate_ctr_err = + stat_val = osi_core->stats.const_gate_ctr_err; + osi_core->stats.const_gate_ctr_err = osi_update_stats_counter(stat_val, 1U); } if ((val & EQOS_MTL_EST_STATUS_HLBS) == EQOS_MTL_EST_STATUS_HLBS) { osi_core->est_ready = OSI_DISABLE; - stat_val = osi_core->tsn_stats.head_of_line_blk_sch; - osi_core->tsn_stats.head_of_line_blk_sch = + stat_val = osi_core->stats.head_of_line_blk_sch; + osi_core->stats.head_of_line_blk_sch = osi_update_stats_counter(stat_val, 1U); /* Need to read MTL_EST_Sch_Error register and cleared */ sch_err = osi_readla(osi_core, (nveu8_t *)osi_core->base + @@ -2731,8 +1667,8 @@ static void eqos_handle_mtl_intrs(struct osi_core_priv_data *osi_core) temp = OSI_ENABLE; temp = temp << i; if ((sch_err & temp) == temp) { - stat_val = osi_core->tsn_stats.hlbs_q[i]; - osi_core->tsn_stats.hlbs_q[i] = + stat_val = osi_core->stats.hlbs_q[i]; + osi_core->stats.hlbs_q[i] = osi_update_stats_counter(stat_val, 1U); } } @@ -2747,7 +1683,7 @@ static void eqos_handle_mtl_intrs(struct osi_core_priv_data *osi_core) value &= ~EQOS_MTL_EST_CONTROL_EEST; osi_writela(osi_core, value, (nveu8_t *)osi_core->base + EQOS_MTL_EST_CONTROL); - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Disabling EST due to HLBS, correct GCL\n", OSI_NONE); } @@ -2755,8 +1691,8 @@ static void eqos_handle_mtl_intrs(struct osi_core_priv_data *osi_core) if ((val & EQOS_MTL_EST_STATUS_HLBF) == EQOS_MTL_EST_STATUS_HLBF) { osi_core->est_ready = OSI_DISABLE; - stat_val = osi_core->tsn_stats.head_of_line_blk_frm; - osi_core->tsn_stats.head_of_line_blk_frm = + stat_val = osi_core->stats.head_of_line_blk_frm; + osi_core->stats.head_of_line_blk_frm = osi_update_stats_counter(stat_val, 1U); /* Need to read MTL_EST_Frm_Size_Error register and cleared */ frm_err = osi_readla(osi_core, (nveu8_t *)osi_core->base + @@ -2765,8 +1701,8 @@ static void eqos_handle_mtl_intrs(struct osi_core_priv_data *osi_core) temp = OSI_ENABLE; temp = temp << i; if ((frm_err & temp) == temp) { - stat_val = osi_core->tsn_stats.hlbf_q[i]; - osi_core->tsn_stats.hlbf_q[i] = + stat_val = osi_core->stats.hlbf_q[i]; + osi_core->stats.hlbf_q[i] = osi_update_stats_counter(stat_val, 1U); } } @@ -2782,7 +1718,7 @@ static void eqos_handle_mtl_intrs(struct osi_core_priv_data *osi_core) value &= ~EQOS_MTL_EST_CONTROL_EEST; osi_writela(osi_core, value, (nveu8_t *)osi_core->base + EQOS_MTL_EST_CONTROL); - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Disabling EST due to HLBF, correct GCL\n", OSI_NONE); } @@ -2793,21 +1729,24 @@ static void eqos_handle_mtl_intrs(struct osi_core_priv_data *osi_core) EQOS_MTL_EST_STATUS_BTRE) { osi_core->est_ready = OSI_ENABLE; } - stat_val = osi_core->tsn_stats.sw_own_list_complete; - osi_core->tsn_stats.sw_own_list_complete = + stat_val = osi_core->stats.sw_own_list_complete; + osi_core->stats.sw_own_list_complete = osi_update_stats_counter(stat_val, 1U); } if ((val & EQOS_MTL_EST_STATUS_BTRE) == EQOS_MTL_EST_STATUS_BTRE) { osi_core->est_ready = OSI_DISABLE; - stat_val = osi_core->tsn_stats.base_time_reg_err; - osi_core->tsn_stats.base_time_reg_err = + stat_val = osi_core->stats.base_time_reg_err; + osi_core->stats.base_time_reg_err = osi_update_stats_counter(stat_val, 1U); osi_core->est_ready = OSI_DISABLE; } /* clear EST status register as interrupt is handled */ osi_writela(osi_core, val, (nveu8_t *)osi_core->base + EQOS_MTL_EST_STATUS); + +done: + return; } #ifdef HSI_SUPPORT @@ -2838,8 +1777,7 @@ static void eqos_handle_hsi_intr(struct osi_core_priv_data *const osi_core) EQOS_WRAP_COMMON_INTR_STATUS); if (((val & EQOS_REGISTER_PARITY_ERR) == EQOS_REGISTER_PARITY_ERR) || ((val & EQOS_CORE_UNCORRECTABLE_ERR) == EQOS_CORE_UNCORRECTABLE_ERR)) { - osi_core->hsi.err_code[UE_IDX] = - hsi_err_code[osi_core->instance_id][UE_IDX]; + osi_core->hsi.err_code[UE_IDX] = OSI_HSI_EQOS0_UE_CODE; osi_core->hsi.report_err = OSI_ENABLE; osi_core->hsi.report_count_err[UE_IDX] = OSI_ENABLE; /* Disable the interrupt */ @@ -2851,8 +1789,7 @@ static void eqos_handle_hsi_intr(struct osi_core_priv_data *const osi_core) EQOS_WRAP_COMMON_INTR_ENABLE); } if ((val & EQOS_CORE_CORRECTABLE_ERR) == EQOS_CORE_CORRECTABLE_ERR) { - osi_core->hsi.err_code[CE_IDX] = - hsi_err_code[osi_core->instance_id][CE_IDX]; + osi_core->hsi.err_code[CE_IDX] = OSI_HSI_EQOS0_CE_CODE; osi_core->hsi.report_err = OSI_ENABLE; osi_core->hsi.ce_count = osi_update_stats_counter(osi_core->hsi.ce_count, 1UL); @@ -2897,7 +1834,8 @@ static void eqos_handle_hsi_intr(struct osi_core_priv_data *const osi_core) * Algorithm: * - Reads DMA ISR register * - Returns if calue is 0. - * - Handle Non-TI/RI interrupts for all MTL queues and increments #osi_core_priv_data->xstats + * - Handle Non-TI/RI interrupts for all MTL queues and + * increments #osi_core_priv_data->stats * based on error detected per cahnnel. * - Calls eqos_handle_mac_intrs() to handle MAC interrupts. * - Refer to EQOS column of <> for API details. @@ -2935,138 +1873,66 @@ static void eqos_handle_common_intr(struct osi_core_priv_data *const osi_core) } dma_isr = osi_readla(osi_core, (nveu8_t *)base + EQOS_DMA_ISR); - if (dma_isr == 0U) { - return; - } + if (dma_isr != 0U) { + //FIXME Need to check how we can get the DMA channel here instead of + //MTL Queues + if ((dma_isr & EQOS_DMA_CHAN_INTR_STATUS) != 0U) { + /* Handle Non-TI/RI interrupts */ + for (i = 0; i < osi_core->num_mtl_queues; i++) { + qinx = osi_core->mtl_queues[i]; + if (qinx >= OSI_EQOS_MAX_NUM_CHANS) { + continue; + } - //FIXME Need to check how we can get the DMA channel here instead of - //MTL Queues - if ((dma_isr & EQOS_DMA_CHAN_INTR_STATUS) != 0U) { - /* Handle Non-TI/RI interrupts */ - for (i = 0; i < osi_core->num_mtl_queues; i++) { - qinx = osi_core->mtl_queues[i]; - if (qinx >= OSI_EQOS_MAX_NUM_CHANS) { - continue; - } + /* read dma channel status register */ + dma_sr = osi_readla(osi_core, (nveu8_t *)base + + EQOS_DMA_CHX_STATUS(qinx)); + /* read dma channel interrupt enable register */ + dma_ier = osi_readla(osi_core, (nveu8_t *)base + + EQOS_DMA_CHX_IER(qinx)); - /* read dma channel status register */ - dma_sr = osi_readla(osi_core, (nveu8_t *)base + + /* process only those interrupts which we + * have enabled. + */ + dma_sr = (dma_sr & dma_ier); + + /* mask off RI and TI */ + dma_sr &= ~(OSI_BIT(6) | OSI_BIT(0)); + if (dma_sr == 0U) { + continue; + } + + /* ack non ti/ri ints */ + osi_writela(osi_core, dma_sr, (nveu8_t *)base + EQOS_DMA_CHX_STATUS(qinx)); - /* read dma channel interrupt enable register */ - dma_ier = osi_readla(osi_core, (nveu8_t *)base + - EQOS_DMA_CHX_IER(qinx)); - - /* process only those interrupts which we - * have enabled. - */ - dma_sr = (dma_sr & dma_ier); - - /* mask off RI and TI */ - dma_sr &= ~(OSI_BIT(6) | OSI_BIT(0)); - if (dma_sr == 0U) { - continue; +#ifndef OSI_STRIPPED_LIB + update_dma_sr_stats(osi_core, dma_sr, qinx); +#endif /* !OSI_STRIPPED_LIB */ } - - /* ack non ti/ri ints */ - osi_writela(osi_core, dma_sr, (nveu8_t *)base + - EQOS_DMA_CHX_STATUS(qinx)); - update_dma_sr_stats(osi_core, dma_sr, qinx); } + + eqos_handle_mac_intrs(osi_core, dma_isr); + + /* Handle MTL inerrupts */ + mtl_isr = osi_readla(osi_core, (nveu8_t *)base + EQOS_MTL_INTR_STATUS); + if (((mtl_isr & EQOS_MTL_IS_ESTIS) == EQOS_MTL_IS_ESTIS) && + ((dma_isr & EQOS_DMA_ISR_MTLIS) == EQOS_DMA_ISR_MTLIS)) { + eqos_handle_mtl_intrs(osi_core); + mtl_isr &= ~EQOS_MTL_IS_ESTIS; + osi_writela(osi_core, mtl_isr, (nveu8_t *)base + EQOS_MTL_INTR_STATUS); + } + + /* Clear FRP Interrupt MTL_RXP_Interrupt_Control_Status */ + frp_isr = osi_readla(osi_core, (nveu8_t *)base + EQOS_MTL_RXP_INTR_CS); + frp_isr |= (EQOS_MTL_RXP_INTR_CS_NVEOVIS | EQOS_MTL_RXP_INTR_CS_NPEOVIS | + EQOS_MTL_RXP_INTR_CS_FOOVIS | EQOS_MTL_RXP_INTR_CS_PDRFIS); + osi_writela(osi_core, frp_isr, (nveu8_t *)base + EQOS_MTL_RXP_INTR_CS); + } else { + /* Do Nothing */ } - - eqos_handle_mac_intrs(osi_core, dma_isr); - /* Handle MTL inerrupts */ - mtl_isr = osi_readla(osi_core, - (unsigned char *)base + EQOS_MTL_INTR_STATUS); - if (((mtl_isr & EQOS_MTL_IS_ESTIS) == EQOS_MTL_IS_ESTIS) && - ((dma_isr & EQOS_DMA_ISR_MTLIS) == EQOS_DMA_ISR_MTLIS)) { - eqos_handle_mtl_intrs(osi_core); - mtl_isr &= ~EQOS_MTL_IS_ESTIS; - osi_writela(osi_core, mtl_isr, (unsigned char *)base + - EQOS_MTL_INTR_STATUS); - } - - /* Clear FRP Interrupt MTL_RXP_Interrupt_Control_Status */ - frp_isr = osi_readla(osi_core, - (unsigned char *)base + EQOS_MTL_RXP_INTR_CS); - frp_isr |= (EQOS_MTL_RXP_INTR_CS_NVEOVIS | - EQOS_MTL_RXP_INTR_CS_NPEOVIS | - EQOS_MTL_RXP_INTR_CS_FOOVIS | - EQOS_MTL_RXP_INTR_CS_PDRFIS); - osi_writela(osi_core, frp_isr, - (unsigned char *)base + EQOS_MTL_RXP_INTR_CS); } -/** - * @brief eqos_start_mac - Start MAC Tx/Rx engine - * - * @note - * Algorithm: - * - Enable MAC Transmitter and Receiver in EQOS_MAC_MCR_IDX - * - Refer to EQOS column of <> for API details. - * - TraceID:ETHERNET_NVETHERNETRM_008 - * - * @param[in] osi_core: OSI core private data structure. - * - * @pre - * - MAC init should be complete. See osi_hw_core_init() and - * osi_hw_dma_init() - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: No - * - De-initialization: No - */ -static void eqos_start_mac(struct osi_core_priv_data *const osi_core) -{ - nveu32_t value; - void *addr = osi_core->base; - - value = osi_readla(osi_core, (nveu8_t *)addr + EQOS_MAC_MCR); - /* Enable MAC Transmit */ - /* Enable MAC Receive */ - value |= EQOS_MCR_TE | EQOS_MCR_RE; - eqos_core_safety_writel(osi_core, value, - (nveu8_t *)addr + EQOS_MAC_MCR, - EQOS_MAC_MCR_IDX); -} - -/** - * @brief eqos_stop_mac - Stop MAC Tx/Rx engine - * - * @note - * Algorithm: - * - Disable MAC Transmitter and Receiver in EQOS_MAC_MCR_IDX - * - Refer to EQOS column of <> for API details. - * - TraceID:ETHERNET_NVETHERNETRM_007 - * - * @param[in] osi_core: OSI core private data structure. - * - * @pre MAC DMA deinit should be complete. See osi_hw_dma_deinit() - * - * @note - * API Group: - * - Initialization: No - * - Run time: No - * - De-initialization: Yes - */ -static void eqos_stop_mac(struct osi_core_priv_data *const osi_core) -{ - nveu32_t value; - void *addr = osi_core->base; - - value = osi_readla(osi_core, (nveu8_t *)addr + EQOS_MAC_MCR); - /* Disable MAC Transmit */ - /* Disable MAC Receive */ - value &= ~EQOS_MCR_TE; - value &= ~EQOS_MCR_RE; - eqos_core_safety_writel(osi_core, value, - (nveu8_t *)addr + EQOS_MAC_MCR, - EQOS_MAC_MCR_IDX); -} - -#ifdef MACSEC_SUPPORT +#if defined(MACSEC_SUPPORT) && !defined(OSI_STRIPPED_LIB) /** * @brief eqos_config_mac_tx - Enable/Disable MAC Tx * @@ -3095,140 +1961,16 @@ static void eqos_config_mac_tx(struct osi_core_priv_data *const osi_core, value = osi_readla(osi_core, (nveu8_t *)addr + EQOS_MAC_MCR); /* Enable MAC Transmit */ value |= EQOS_MCR_TE; - eqos_core_safety_writel(osi_core, value, - (nveu8_t *)addr + EQOS_MAC_MCR, - EQOS_MAC_MCR_IDX); + osi_writela(osi_core, value, (nveu8_t *)addr + EQOS_MAC_MCR); } else { value = osi_readla(osi_core, (nveu8_t *)addr + EQOS_MAC_MCR); /* Disable MAC Transmit */ value &= ~EQOS_MCR_TE; - eqos_core_safety_writel(osi_core, value, - (nveu8_t *)addr + EQOS_MAC_MCR, - EQOS_MAC_MCR_IDX); + osi_writela(osi_core, value, (nveu8_t *)addr + EQOS_MAC_MCR); } } #endif /* MACSEC_SUPPORT */ -/** - * @brief eqos_config_l2_da_perfect_inverse_match - configure register for - * inverse or perfect match. - * - * @note - * Algorithm: - * - use perfect_inverse_match filed to set perfect/inverse matching for L2 DA. - * - Refer to EQOS column of <> for API details. - * - TraceID:ETHERNET_NVETHERNETRM_018 - * - * @param[in] base: Base address from OSI core private data structure. - * @param[in] perfect_inverse_match: OSI_INV_MATCH - inverse mode else - perfect mode - * - * @pre MAC should be initialized and started. see osi_start_mac() - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 always - */ -static inline nve32_t eqos_config_l2_da_perfect_inverse_match( - struct osi_core_priv_data *const osi_core, - nveu32_t perfect_inverse_match) -{ - nveu32_t value = 0U; - - value = osi_readla(osi_core, - (nveu8_t *)osi_core->base + EQOS_MAC_PFR); - value &= ~EQOS_MAC_PFR_DAIF; - if (perfect_inverse_match == OSI_INV_MATCH) { - value |= EQOS_MAC_PFR_DAIF; - } - eqos_core_safety_writel(osi_core, value, - (nveu8_t *)osi_core->base + EQOS_MAC_PFR, - EQOS_MAC_PFR_IDX); - - return 0; -} - -/** - * @brief eqos_config_mac_pkt_filter_reg - configure mac filter register. - * - * @note - * - This sequence is used to configure MAC in different pkt - * processing modes like promiscuous, multicast, unicast, - * hash unicast/multicast based on input filter arguments. - * - Refer to EQOS column of <> for API details. - * - TraceID:ETHERNET_NVETHERNETRM_018 - * - * @param[in] osi_core: OSI core private data structure. Used param base. - * @param[in] filter: OSI filter structure. used param oper_mode. - * - * @pre MAC should be initialized and started. see osi_start_mac() - * - * @note - * API Group: - * - Initialization: No - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 always - */ -static nve32_t eqos_config_mac_pkt_filter_reg( - struct osi_core_priv_data *const osi_core, - const struct osi_filter *filter) -{ - nveu32_t value = 0U; - nve32_t ret = 0; - - value = osi_readla(osi_core, (nveu8_t *)osi_core->base + EQOS_MAC_PFR); - - /*Retain all other values */ - value &= (EQOS_MAC_PFR_DAIF | EQOS_MAC_PFR_DBF | EQOS_MAC_PFR_SAIF | - EQOS_MAC_PFR_SAF | EQOS_MAC_PFR_PCF | EQOS_MAC_PFR_VTFE | - EQOS_MAC_PFR_IPFE | EQOS_MAC_PFR_DNTU | EQOS_MAC_PFR_RA); - - if ((filter->oper_mode & OSI_OPER_EN_PROMISC) != OSI_DISABLE) { - value |= EQOS_MAC_PFR_PR; - } - - if ((filter->oper_mode & OSI_OPER_DIS_PROMISC) != OSI_DISABLE) { - value &= ~EQOS_MAC_PFR_PR; - } - - if ((filter->oper_mode & OSI_OPER_EN_ALLMULTI) != OSI_DISABLE) { - value |= EQOS_MAC_PFR_PM; - } - - if ((filter->oper_mode & OSI_OPER_DIS_ALLMULTI) != OSI_DISABLE) { - value &= ~EQOS_MAC_PFR_PM; - } - - if ((filter->oper_mode & OSI_OPER_EN_PERFECT) != OSI_DISABLE) { - value |= EQOS_MAC_PFR_HPF; - } - - if ((filter->oper_mode & OSI_OPER_DIS_PERFECT) != OSI_DISABLE) { - value &= ~EQOS_MAC_PFR_HPF; - } - - - eqos_core_safety_writel(osi_core, value, (nveu8_t *)osi_core->base + - EQOS_MAC_PFR, EQOS_MAC_PFR_IDX); - - if ((filter->oper_mode & OSI_OPER_EN_L2_DA_INV) != OSI_DISABLE) { - ret = eqos_config_l2_da_perfect_inverse_match(osi_core, - OSI_INV_MATCH); - } - - if ((filter->oper_mode & OSI_OPER_DIS_L2_DA_INV) != OSI_DISABLE) { - ret = eqos_config_l2_da_perfect_inverse_match(osi_core, - OSI_PFT_MATCH); - } - - return ret; -} - /** * @brief eqos_update_mac_addr_helper - Function to update DCS and MBC; helper function for * eqos_update_mac_addr_low_high_reg() @@ -3246,9 +1988,9 @@ static nve32_t eqos_config_mac_pkt_filter_reg( * @param[in] osi_core: OSI core private data structure. Used param base. * @param[out] value: nveu32_t pointer which has value read from register. * @param[in] idx: Refer #osi_filter->index for details. - * @param[in] dma_routing_enable: Refer #osi_filter->dma_routing for details. * @param[in] dma_chan: Refer #osi_filter->dma_chan for details. * @param[in] addr_mask: Refer #osi_filter->addr_mask for details. + * @param[in] src_dest: source/destination MAC address. * * @pre * - MAC should be initialized and started. see osi_start_mac() @@ -3272,6 +2014,7 @@ static inline nve32_t eqos_update_mac_addr_helper( OSI_UNUSED const nveu32_t src_dest) { nveu32_t temp; + nve32_t ret = 0; /* PDC bit of MAC_Ext_Configuration register is set so binary * value representation form index 32-127 else hot-bit @@ -3303,11 +2046,11 @@ static inline nve32_t eqos_update_mac_addr_helper( OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "invalid address index for MBC\n", 0ULL); - return -1; + ret = -1; } } - return 0; + return ret; } /** @@ -3321,7 +2064,7 @@ static inline nve32_t eqos_update_mac_addr_helper( * * @param[in] osi_core: OSI core private data structure. * @param[out] value: nveu32_t pointer which has value read from register. - * @param[in] idx: filter index + * @param[in] filter_idx: filter index * @param[in] dma_routing_enable: dma channel routing enable(1) * @param[in] dma_chan: dma channel number * @@ -3337,44 +2080,45 @@ static inline nve32_t eqos_update_mac_addr_helper( */ static void eqos_l2_filter_delete(struct osi_core_priv_data *osi_core, nveu32_t *value, - const nveu32_t idx, + const nveu32_t filter_idx, const nveu32_t dma_routing_enable, const nveu32_t dma_chan) { nveu32_t dcs_check = *value; nveu32_t temp = OSI_DISABLE; + nveu32_t idx = (filter_idx & 0xFFU); osi_writela(osi_core, OSI_MAX_32BITS, (nveu8_t *)osi_core->base + EQOS_MAC_ADDRL((idx))); *value |= OSI_MASK_16BITS; - if (dma_routing_enable == OSI_DISABLE || - osi_core->mac_ver < OSI_EQOS_MAC_5_00) { - *value &= ~(EQOS_MAC_ADDRH_AE | EQOS_MAC_ADDRH_DCS); - osi_writela(osi_core, *value, (nveu8_t *)osi_core->base + - EQOS_MAC_ADDRH((idx))); - return; - } - - dcs_check &= EQOS_MAC_ADDRH_DCS; - dcs_check = dcs_check >> EQOS_MAC_ADDRH_DCS_SHIFT; - - if (idx >= EQOS_MAX_MAC_ADDR_REG) { - dcs_check = OSI_DISABLE; - } else { - temp = OSI_BIT(dma_chan); - dcs_check &= ~(temp); - } - - if (dcs_check == OSI_DISABLE) { + if ((dma_routing_enable == OSI_DISABLE) || + (osi_core->mac_ver < OSI_EQOS_MAC_5_00)) { *value &= ~(EQOS_MAC_ADDRH_AE | EQOS_MAC_ADDRH_DCS); osi_writela(osi_core, *value, (nveu8_t *)osi_core->base + EQOS_MAC_ADDRH((idx))); } else { - *value &= ~(EQOS_MAC_ADDRH_DCS); - *value |= (dcs_check << EQOS_MAC_ADDRH_DCS_SHIFT); - osi_writela(osi_core, *value, (nveu8_t *)osi_core->base + - EQOS_MAC_ADDRH((idx))); + + dcs_check &= EQOS_MAC_ADDRH_DCS; + dcs_check = dcs_check >> EQOS_MAC_ADDRH_DCS_SHIFT; + + if (idx >= EQOS_MAX_MAC_ADDR_REG) { + dcs_check = OSI_DISABLE; + } else { + temp = OSI_BIT(dma_chan); + dcs_check &= ~(temp); + } + + if (dcs_check == OSI_DISABLE) { + *value &= ~(EQOS_MAC_ADDRH_AE | EQOS_MAC_ADDRH_DCS); + osi_writela(osi_core, *value, (nveu8_t *)osi_core->base + + EQOS_MAC_ADDRH((idx))); + } else { + *value &= ~(EQOS_MAC_ADDRH_DCS); + *value |= (dcs_check << EQOS_MAC_ADDRH_DCS_SHIFT); + osi_writela(osi_core, *value, (nveu8_t *)osi_core->base + + EQOS_MAC_ADDRH((idx))); + } } return; @@ -3413,6 +2157,7 @@ static nve32_t eqos_update_mac_addr_low_high_reg( struct osi_core_priv_data *const osi_core, const struct osi_filter *filter) { + const struct core_local *l_core = (struct core_local *)(void *)osi_core; nveu32_t idx = filter->index; nveu32_t dma_routing_enable = filter->dma_routing; nveu32_t dma_chan = filter->dma_chan; @@ -3420,13 +2165,16 @@ static nve32_t eqos_update_mac_addr_low_high_reg( nveu32_t src_dest = filter->src_dest; nveu32_t value = OSI_DISABLE; nve32_t ret = 0; + const nveu32_t eqos_max_madd[2] = {EQOS_MAX_MAC_ADDRESS_FILTER, + EQOS_MAX_MAC_5_3_ADDRESS_FILTER}; - if ((idx > (EQOS_MAX_MAC_ADDRESS_FILTER - 0x1U)) || + if ((idx >= eqos_max_madd[l_core->l_mac_ver]) || (dma_chan >= OSI_EQOS_MAX_NUM_CHANS)) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "invalid MAC filter index or channel number\n", 0ULL); - return -1; + ret = -1; + goto fail; } /* read current value at index preserve DCS current value */ @@ -3437,42 +2185,40 @@ static nve32_t eqos_update_mac_addr_low_high_reg( if ((filter->oper_mode & OSI_OPER_ADDR_DEL) != OSI_NONE) { eqos_l2_filter_delete(osi_core, &value, idx, dma_routing_enable, dma_chan); - return 0; + } else { + ret = eqos_update_mac_addr_helper(osi_core, &value, idx, dma_chan, + addr_mask, src_dest); + /* Check return value from helper code */ + if (ret == -1) { + goto fail; + } + + /* Update AE bit if OSI_OPER_ADDR_UPDATE is set */ + if ((filter->oper_mode & OSI_OPER_ADDR_UPDATE) == OSI_OPER_ADDR_UPDATE) { + value |= EQOS_MAC_ADDRH_AE; + } + + /* Setting Source/Destination Address match valid for 1 to 32 index */ + if (((idx > 0U) && (idx < EQOS_MAX_MAC_ADDR_REG)) && (src_dest <= OSI_SA_MATCH)) { + value = (value | ((src_dest << EQOS_MAC_ADDRH_SA_SHIFT) & + EQOS_MAC_ADDRH_SA)); + } + + osi_writela(osi_core, ((nveu32_t)filter->mac_address[4] | + ((nveu32_t)filter->mac_address[5] << 8) | value), + (nveu8_t *)osi_core->base + EQOS_MAC_ADDRH((idx))); + + osi_writela(osi_core, ((nveu32_t)filter->mac_address[0] | + ((nveu32_t)filter->mac_address[1] << 8) | + ((nveu32_t)filter->mac_address[2] << 16) | + ((nveu32_t)filter->mac_address[3] << 24)), + (nveu8_t *)osi_core->base + EQOS_MAC_ADDRL((idx))); } - - ret = eqos_update_mac_addr_helper(osi_core, &value, idx, dma_chan, - addr_mask, src_dest); - /* Check return value from helper code */ - if (ret == -1) { - return ret; - } - - /* Update AE bit if OSI_OPER_ADDR_UPDATE is set */ - if ((filter->oper_mode & OSI_OPER_ADDR_UPDATE) == - OSI_OPER_ADDR_UPDATE) { - value |= EQOS_MAC_ADDRH_AE; - } - - /* Setting Source/Destination Address match valid for 1 to 32 index */ - if (((idx > 0U) && (idx < EQOS_MAX_MAC_ADDR_REG)) && - (src_dest <= OSI_SA_MATCH)) { - value = (value | ((src_dest << EQOS_MAC_ADDRH_SA_SHIFT) & - EQOS_MAC_ADDRH_SA)); - } - - osi_writela(osi_core, ((nveu32_t)filter->mac_address[4] | - ((nveu32_t)filter->mac_address[5] << 8) | value), - (nveu8_t *)osi_core->base + EQOS_MAC_ADDRH((idx))); - - osi_writela(osi_core, ((nveu32_t)filter->mac_address[0] | - ((nveu32_t)filter->mac_address[1] << 8) | - ((nveu32_t)filter->mac_address[2] << 16) | - ((nveu32_t)filter->mac_address[3] << 24)), - (nveu8_t *)osi_core->base + EQOS_MAC_ADDRL((idx))); - +fail: return ret; } +#ifndef OSI_STRIPPED_LIB /** * @brief eqos_config_ptp_offload - Enable/Disable PTP offload * @@ -3489,14 +2235,14 @@ static nve32_t eqos_update_mac_addr_low_high_reg( * @retval 0 on success * @retval -1 on failure. */ -static int eqos_config_ptp_offload(struct osi_core_priv_data *osi_core, +static nve32_t eqos_config_ptp_offload(struct osi_core_priv_data *const osi_core, struct osi_pto_config *const pto_config) { - unsigned char *addr = (unsigned char *)osi_core->base; - int ret = 0; - unsigned int value = 0x0U; - unsigned int ptc_value = 0x0U; - unsigned int port_id = 0x0U; + nveu8_t *addr = (nveu8_t *)osi_core->base; + nve32_t ret = 0; + nveu32_t value = 0x0U; + nveu32_t ptc_value = 0x0U; + nveu32_t port_id = 0x0U; /* Read MAC TCR */ value = osi_readla(osi_core, (nveu8_t *)addr + EQOS_MAC_TCR); @@ -3511,8 +2257,7 @@ static int eqos_config_ptp_offload(struct osi_core_priv_data *osi_core, if (pto_config->en_dis == OSI_DISABLE) { osi_core->ptp_config.ptp_filter = value; osi_writela(osi_core, ptc_value, addr + EQOS_MAC_PTO_CR); - eqos_core_safety_writel(osi_core, value, addr + - EQOS_MAC_TCR, EQOS_MAC_TCR_IDX); + osi_writela(osi_core, value, addr + EQOS_MAC_TCR); osi_writela(osi_core, OSI_NONE, addr + EQOS_MAC_PIDR0); osi_writela(osi_core, OSI_NONE, addr + EQOS_MAC_PIDR1); osi_writela(osi_core, OSI_NONE, addr + EQOS_MAC_PIDR2); @@ -3565,8 +2310,7 @@ static int eqos_config_ptp_offload(struct osi_core_priv_data *osi_core, osi_core->ptp_config.ptp_filter = value; /** Write PTO_CR and TCR registers */ osi_writela(osi_core, ptc_value, addr + EQOS_MAC_PTO_CR); - eqos_core_safety_writel(osi_core, value, addr + EQOS_MAC_TCR, - EQOS_MAC_TCR_IDX); + osi_writela(osi_core, value, addr + EQOS_MAC_TCR); /* Port ID for PTP offload packet created */ port_id = pto_config->portid & EQOS_MAC_PIDR_PID_MASK; osi_writela(osi_core, port_id, addr + EQOS_MAC_PIDR0); @@ -3575,877 +2319,73 @@ static int eqos_config_ptp_offload(struct osi_core_priv_data *osi_core, return ret; } +#endif /* !OSI_STRIPPED_LIB */ /** - * @brief eqos_config_l3_l4_filter_enable - register write to enable L3/L4 - * filters. + * @brief eqos_config_l3l4_filters - Config L3L4 filters. * * @note * Algorithm: - * - This routine to update filter_enb_dis value in IP filter enable register. - * - TraceID:ETHERNET_NVETHERNETRM_019 - * - * @param[in] osi_core: OSI core private data. - * @param[in] filter_enb_dis: enable/disable - * - * @pre MAC should be initialized and started. see osi_start_mac() - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -static nve32_t eqos_config_l3_l4_filter_enable( - struct osi_core_priv_data *const osi_core, - const nveu32_t filter_enb_dis) -{ - nveu32_t value = 0U; - void *base = osi_core->base; - value = osi_readla(osi_core, (nveu8_t *)base + EQOS_MAC_PFR); - value &= ~(EQOS_MAC_PFR_IPFE); - value |= ((filter_enb_dis << EQOS_MAC_PFR_IPFE_SHIFT) & - EQOS_MAC_PFR_IPFE); - eqos_core_safety_writel(osi_core, value, (nveu8_t *)base + EQOS_MAC_PFR, - EQOS_MAC_PFR_IDX); - - return 0; -} - -/** - * @brief eqos_update_ip4_addr - configure register for IPV4 address filtering - * - * @note - * Algorithm: - * - Validate addr for null, filter_no for max value and return -1 on failure. - * - Update IPv4 source/destination address for L3 layer filtering. - * - Refer to EQOS column of <> for API details. - * - TraceID:ETHERNET_NVETHERNETRM_019 - * - * @param[in] osi_core: OSI core private data structure. Used param base. - * @param[in] filter_no: filter index. Refer #osi_l3_l4_filter->filter_no for details. - * @param[in] addr: ipv4 address. Refer #osi_l3_l4_filter->ip4_addr for details. - * @param[in] src_dst_addr_match: Refer #osi_l3_l4_filter->src_dst_addr_match for details. - * - * @pre 1) MAC should be initialized and started. see osi_start_mac() - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -static nve32_t eqos_update_ip4_addr(struct osi_core_priv_data *const osi_core, - const nveu32_t filter_no, - const nveu8_t addr[], - const nveu32_t src_dst_addr_match) -{ - void *base = osi_core->base; - nveu32_t value = 0U; - nveu32_t temp = 0U; - - if (addr == OSI_NULL) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "invalid address\n", 0ULL); - return -1; - } - - if (filter_no > (EQOS_MAX_L3_L4_FILTER - 0x1U)) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_OUTOFBOUND, - "invalid filter index for L3/L4 filter\n", - (nveul64_t)filter_no); - return -1; - } - - value = addr[3]; - temp = (nveu32_t)addr[2] << 8; - value |= temp; - temp = (nveu32_t)addr[1] << 16; - value |= temp; - temp = (nveu32_t)addr[0] << 24; - value |= temp; - if (src_dst_addr_match == OSI_SOURCE_MATCH) { - osi_writela(osi_core, value, (nveu8_t *)base + - EQOS_MAC_L3_AD0R(filter_no)); - } else { - osi_writela(osi_core, value, (nveu8_t *)base + - EQOS_MAC_L3_AD1R(filter_no)); - } - - return 0; -} - -/** - * @brief eqos_update_ip6_addr - add ipv6 address in register - * - * @note - * Algorithm: - * - Validate addr for null, filter_no for max value and return -1 on failure. - * - Update IPv6 source/destination address for L3 layer filtering. - * - Refer to EQOS column of <> for API details. - * - TraceID:ETHERNET_NVETHERNETRM_019 - * - * @param[in] osi_core: OSI core private data structure. Used param base. - * @param[in] filter_no: filter index. Refer #osi_l3_l4_filter->filter_no for details. - * @param[in] addr: ipv4 address. Refer #osi_l3_l4_filter->ip6_addr for details. - * - * @pre MAC should be initialized and started. see osi_start_mac() - * - * @note - * API Group: - * - Initialization: No - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -static nve32_t eqos_update_ip6_addr(struct osi_core_priv_data *const osi_core, - const nveu32_t filter_no, - const nveu16_t addr[]) -{ - void *base = osi_core->base; - nveu32_t value = 0U; - nveu32_t temp = 0U; - - if (addr == OSI_NULL) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "invalid address\n", 0ULL); - return -1; - } - - if (filter_no > (EQOS_MAX_L3_L4_FILTER - 0x1U)) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "invalid filter index for L3/L4 filter\n", - (nveul64_t)filter_no); - return -1; - } - - /* update Bits[31:0] of 128-bit IP addr */ - value = addr[7]; - temp = (nveu32_t)addr[6] << 16; - value |= temp; - osi_writela(osi_core, value, (nveu8_t *)base + - EQOS_MAC_L3_AD0R(filter_no)); - /* update Bits[63:32] of 128-bit IP addr */ - value = addr[5]; - temp = (nveu32_t)addr[4] << 16; - value |= temp; - osi_writela(osi_core, value, (nveu8_t *)base + - EQOS_MAC_L3_AD1R(filter_no)); - /* update Bits[95:64] of 128-bit IP addr */ - value = addr[3]; - temp = (nveu32_t)addr[2] << 16; - value |= temp; - osi_writela(osi_core, value, (nveu8_t *)base + - EQOS_MAC_L3_AD2R(filter_no)); - /* update Bits[127:96] of 128-bit IP addr */ - value = addr[1]; - temp = (nveu32_t)addr[0] << 16; - value |= temp; - osi_writela(osi_core, value, (nveu8_t *)base + - EQOS_MAC_L3_AD3R(filter_no)); - - return 0; -} - -/** - * @brief eqos_update_l4_port_no -program source port no - * - * @note - * Algorithm: - * - Validate filter_no for max value and return -1 on failure. - * - Update port_no based on src_dst_port_match to confiure L4 layer filtering. - * - Refer to EQOS column of <> for API details. - * - TraceID:ETHERNET_NVETHERNETRM_019 - * - * @param[in] osi_core: OSI core private data structure. Used param base. - * @param[in] filter_no: filter index. Refer #osi_l3_l4_filter->filter_no for details. - * @param[in] port_no: ipv4 address. Refer #osi_l3_l4_filter->port_no for details. - * @param[in] src_dst_port_match: Refer #osi_l3_l4_filter->src_dst_port_match for details. - * - * @pre - * - MAC should be initialized and started. see osi_start_mac() - * - osi_core->osd should be populated. - * - DCS bits should be enabled in RXQ to DMA mapping register - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -static nve32_t eqos_update_l4_port_no( - struct osi_core_priv_data *const osi_core, - const nveu32_t filter_no, - const nveu16_t port_no, - const nveu32_t src_dst_port_match) -{ - void *base = osi_core->base; - nveu32_t value = 0U; - nveu32_t temp = 0U; - - if (filter_no > (EQOS_MAX_L3_L4_FILTER - 0x1U)) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_OUTOFBOUND, - "invalid filter index for L3/L4 filter\n", - (nveul64_t)filter_no); - return -1; - } - - value = osi_readla(osi_core, - (nveu8_t *)base + EQOS_MAC_L4_ADR(filter_no)); - if (src_dst_port_match == OSI_SOURCE_MATCH) { - value &= ~EQOS_MAC_L4_SP_MASK; - value |= ((nveu32_t)port_no & EQOS_MAC_L4_SP_MASK); - } else { - value &= ~EQOS_MAC_L4_DP_MASK; - temp = port_no; - value |= ((temp << EQOS_MAC_L4_DP_SHIFT) & EQOS_MAC_L4_DP_MASK); - } - osi_writela(osi_core, value, - (nveu8_t *)base + EQOS_MAC_L4_ADR(filter_no)); - - return 0; -} - -/** \cond DO_NOT_DOCUMENT */ -/** - * @brief eqos_set_dcs - check and update dma routing register - * - * @note - * Algorithm: - * - Check for request for DCS_enable as well as validate chan - * number and dcs_enable is set. After validation, this sequence is used - * to configure L3((IPv4/IPv6) filters for address matching. + * - This sequence is used to configure L3L4 filters for SA and DA Port Number matching. + * - Prepare register data using prepare_l3l4_registers(). + * - Write l3l4 reigsters using mgbe_l3l4_filter_write(). + * - Return 0 on success. + * - Return -1 on any register failure. * * @param[in] osi_core: OSI core private data structure. - * @param[in] value: nveu32_t value for caller - * @param[in] dma_routing_enable: filter based dma routing enable(1) - * @param[in] dma_chan: dma channel for routing based on filter + * @param[in] filter_no_r: filter index + * @param[in] l3_l4: Pointer to l3 l4 filter structure (#osi_l3_l4_filter) * - * @pre - * - MAC should be initialized and started. see osi_start_mac() - * - DCS bit of RxQ should be enabled for dynamic channel selection - * in filter support - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: Yes - * - De-initialization: No - * - *@return updated nveu32_t value - */ -static inline nveu32_t eqos_set_dcs( - struct osi_core_priv_data *const osi_core, - nveu32_t value, - nveu32_t dma_routing_enable, - nveu32_t dma_chan) -{ - nveu32_t t_val = value; - - if ((dma_routing_enable == OSI_ENABLE) && (dma_chan < - OSI_EQOS_MAX_NUM_CHANS) && (osi_core->dcs_en == - OSI_ENABLE)) { - t_val |= ((dma_routing_enable << - EQOS_MAC_L3L4_CTR_DMCHEN0_SHIFT) & - EQOS_MAC_L3L4_CTR_DMCHEN0); - t_val |= ((dma_chan << - EQOS_MAC_L3L4_CTR_DMCHN0_SHIFT) & - EQOS_MAC_L3L4_CTR_DMCHN0); - } - - return t_val; -} - -/** - * @brief eqos_helper_l3l4_bitmask - helper function to set L3L4 - * bitmask. - * - * @note - * Algorithm: - * - set bit corresponding to L3l4 filter index - * - * @param[out] bitmask: bit mask OSI core private data structure. - * @param[in] filter_no: filter index - * @param[in] value: 0 - disable otherwise - l3/l4 filter enabled - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: Yes - * - De-initialization: No - * - * @pre MAC should be initialized and started. see osi_start_mac() - * - */ -static inline void eqos_helper_l3l4_bitmask(nveu32_t *bitmask, - nveu32_t filter_no, - nveu32_t value) -{ - nveu32_t temp; - - /* Set bit mask for index */ - temp = OSI_ENABLE; - temp = temp << filter_no; - /* check against all bit fields for L3L4 filter enable */ - if ((value & EQOS_MAC_L3L4_CTRL_ALL) != OSI_DISABLE) { - *bitmask |= temp; - } else { - *bitmask &= ~temp; - } -} -/** \endcond */ - -/** - * @brief eqos_config_l3_filters - config L3 filters. - * - * @note - * Algorithm: - * - Validate filter_no for maximum and hannel number if dma_routing_enable - * is OSI_ENABLE and reitrn -1 if fails. - * - Configure L3 filter register based on all arguments(except for osi_core and dma_routing_enable) - * - Refer to EQOS column of <> for API details. - * - TraceID:ETHERNET_NVETHERNETRM_019 - * - * @param[in, out] osi_core: OSI core private data structure. Used param is base. - * @param[in] filter_no: filter index. Max EQOS_MAX_L3_L4_FILTER - 1. - * @param[in] enb_dis: OSI_ENABLE - enable otherwise - disable L3 filter. - * @param[in] ipv4_ipv6_match: OSI_IPV6_MATCH - IPv6, otherwise - IPv4. - * @param[in] src_dst_addr_match: OSI_SOURCE_MATCH - source, otherwise - destination. - * @param[in] perfect_inverse_match: normal match(0) or inverse map(1). - * @param[in] dma_routing_enable: Valid value OSI_ENABLE, invalid otherwise. - * @param[in] dma_chan: dma channel for routing based on filter. Max OSI_EQOS_MAX_NUM_CHANS-1. - * - * @pre - * - MAC should be initialized and started. see osi_start_mac() - * - osi_core->osd should be populated. - * - DCS bit of RxQ should be enabled for dynamic channel selection - * in filter support - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: Yes - * - De-initialization: No + * @note 1) MAC should be init and started. see osi_start_mac() + * 2) osi_core->osd should be populated * * @retval 0 on success * @retval -1 on failure. */ -static nve32_t eqos_config_l3_filters( - struct osi_core_priv_data *const osi_core, - const nveu32_t filter_no, - const nveu32_t enb_dis, - const nveu32_t ipv4_ipv6_match, - const nveu32_t src_dst_addr_match, - const nveu32_t perfect_inverse_match, - const nveu32_t dma_routing_enable, - const nveu32_t dma_chan) -{ - nveu32_t value = 0U; - void *base = osi_core->base; - - if (filter_no > (EQOS_MAX_L3_L4_FILTER - 0x1U)) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_OUTOFBOUND, - "invalid filter index for L3/L4 filter\n", - (nveul64_t)filter_no); - return -1; - } - - if ((dma_routing_enable == OSI_ENABLE) && - (dma_chan > (OSI_EQOS_MAX_NUM_CHANS - 1U))) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_OUTOFBOUND, - "Wrong DMA channel\n", (nveul64_t)dma_chan); - return -1; - } - - value = osi_readla(osi_core, (nveu8_t *)base + - EQOS_MAC_L3L4_CTR(filter_no)); - value &= ~EQOS_MAC_L3L4_CTR_L3PEN0; - value |= (ipv4_ipv6_match & EQOS_MAC_L3L4_CTR_L3PEN0); - osi_writela(osi_core, value, (nveu8_t *)base + - EQOS_MAC_L3L4_CTR(filter_no)); - - /* For IPv6 either SA/DA can be checked not both */ - if (ipv4_ipv6_match == OSI_IPV6_MATCH) { - if (enb_dis == OSI_ENABLE) { - if (src_dst_addr_match == OSI_SOURCE_MATCH) { - /* Enable L3 filters for IPv6 SOURCE addr - * matching - */ - value = osi_readla(osi_core, (nveu8_t *)base + - EQOS_MAC_L3L4_CTR(filter_no)); - value &= ~EQOS_MAC_L3_IP6_CTRL_CLEAR; - value |= ((EQOS_MAC_L3L4_CTR_L3SAM0 | - (perfect_inverse_match << - EQOS_MAC_L3L4_CTR_L3SAI_SHIFT)) & - ((EQOS_MAC_L3L4_CTR_L3SAM0 | - EQOS_MAC_L3L4_CTR_L3SAIM0))); - value |= eqos_set_dcs(osi_core, value, - dma_routing_enable, - dma_chan); - osi_writela(osi_core, value, (nveu8_t *)base + - EQOS_MAC_L3L4_CTR(filter_no)); - - } else { - /* Enable L3 filters for IPv6 DESTINATION addr - * matching - */ - value = osi_readla(osi_core, (nveu8_t *)base + - EQOS_MAC_L3L4_CTR(filter_no)); - value &= ~EQOS_MAC_L3_IP6_CTRL_CLEAR; - value |= ((EQOS_MAC_L3L4_CTR_L3DAM0 | - (perfect_inverse_match << - EQOS_MAC_L3L4_CTR_L3DAI_SHIFT)) & - ((EQOS_MAC_L3L4_CTR_L3DAM0 | - EQOS_MAC_L3L4_CTR_L3DAIM0))); - value |= eqos_set_dcs(osi_core, value, - dma_routing_enable, - dma_chan); - osi_writela(osi_core, value, (nveu8_t *)base + - EQOS_MAC_L3L4_CTR(filter_no)); - } - } else { - /* Disable L3 filters for IPv6 SOURCE/DESTINATION addr - * matching - */ - value = osi_readla(osi_core, (nveu8_t *)base + - EQOS_MAC_L3L4_CTR(filter_no)); - value &= ~(EQOS_MAC_L3_IP6_CTRL_CLEAR | - EQOS_MAC_L3L4_CTR_L3PEN0); - osi_writela(osi_core, value, (nveu8_t *)base + - EQOS_MAC_L3L4_CTR(filter_no)); - } - } else { - if (src_dst_addr_match == OSI_SOURCE_MATCH) { - if (enb_dis == OSI_ENABLE) { - /* Enable L3 filters for IPv4 SOURCE addr - * matching - */ - value = osi_readla(osi_core, (nveu8_t *)base + - EQOS_MAC_L3L4_CTR(filter_no)); - value &= ~EQOS_MAC_L3_IP4_SA_CTRL_CLEAR; - value |= ((EQOS_MAC_L3L4_CTR_L3SAM0 | - (perfect_inverse_match << - EQOS_MAC_L3L4_CTR_L3SAI_SHIFT)) & - ((EQOS_MAC_L3L4_CTR_L3SAM0 | - EQOS_MAC_L3L4_CTR_L3SAIM0))); - value |= eqos_set_dcs(osi_core, value, - dma_routing_enable, - dma_chan); - osi_writela(osi_core, value, (nveu8_t *)base + - EQOS_MAC_L3L4_CTR(filter_no)); - } else { - /* Disable L3 filters for IPv4 SOURCE addr - * matching - */ - value = osi_readla(osi_core, (nveu8_t *)base + - EQOS_MAC_L3L4_CTR(filter_no)); - value &= ~EQOS_MAC_L3_IP4_SA_CTRL_CLEAR; - osi_writela(osi_core, value, (nveu8_t *)base + - EQOS_MAC_L3L4_CTR(filter_no)); - } - } else { - if (enb_dis == OSI_ENABLE) { - /* Enable L3 filters for IPv4 DESTINATION addr - * matching - */ - value = osi_readla(osi_core, (nveu8_t *)base + - EQOS_MAC_L3L4_CTR(filter_no)); - value &= ~EQOS_MAC_L3_IP4_DA_CTRL_CLEAR; - value |= ((EQOS_MAC_L3L4_CTR_L3DAM0 | - (perfect_inverse_match << - EQOS_MAC_L3L4_CTR_L3DAI_SHIFT)) & - ((EQOS_MAC_L3L4_CTR_L3DAM0 | - EQOS_MAC_L3L4_CTR_L3DAIM0))); - value |= eqos_set_dcs(osi_core, value, - dma_routing_enable, - dma_chan); - osi_writela(osi_core, value, (nveu8_t *)base + - EQOS_MAC_L3L4_CTR(filter_no)); - } else { - /* Disable L3 filters for IPv4 DESTINATION addr - * matching - */ - value = osi_readla(osi_core, (nveu8_t *)base + - EQOS_MAC_L3L4_CTR(filter_no)); - value &= ~EQOS_MAC_L3_IP4_DA_CTRL_CLEAR; - osi_writela(osi_core, value, (nveu8_t *)base + - EQOS_MAC_L3L4_CTR(filter_no)); - } - } - } - - /* Set bit corresponding to filter index if value is non-zero */ - eqos_helper_l3l4_bitmask(&osi_core->l3l4_filter_bitmask, - filter_no, value); - - return 0; -} - -/** - * @brief eqos_config_l4_filters - Config L4 filters. - * - * @note - * Algorithm: - * - Validate filter_no for maximum and hannel number if dma_routing_enable - * is OSI_ENABLE and reitrn -1 if fails. - * - Configure L4 filter register based on all arguments(except for osi_core and dma_routing_enable) - * - Refer to EQOS column of <> for API details. - * - TraceID:ETHERNET_NVETHERNETRM_019 - * - * @param[in, out] osi_core: OSI core private data structure. Used param is base. - * @param[in] filter_no: filter index. Max EQOS_MAX_L3_L4_FILTER - 1. - * @param[in] enb_dis: OSI_ENABLE - enable, otherwise - disable L4 filter - * @param[in] tcp_udp_match: 1 - udp, 0 - tcp - * @param[in] src_dst_port_match: OSI_SOURCE_MATCH - source port, otherwise - dest port - * @param[in] perfect_inverse_match: normal match(0) or inverse map(1) - * @param[in] dma_routing_enable: Valid value OSI_ENABLE, invalid otherwise. - * @param[in] dma_chan: dma channel for routing based on filter. Max OSI_EQOS_MAX_NUM_CHANS-1. - * - * @pre - * - MAC should be initialized and started. see osi_start_mac() - * - osi_core->osd should be populated. - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -static nve32_t eqos_config_l4_filters( - struct osi_core_priv_data *const osi_core, - const nveu32_t filter_no, - const nveu32_t enb_dis, - const nveu32_t tcp_udp_match, - const nveu32_t src_dst_port_match, - const nveu32_t perfect_inverse_match, - const nveu32_t dma_routing_enable, - const nveu32_t dma_chan) +static nve32_t eqos_config_l3l4_filters(struct osi_core_priv_data *const osi_core, + nveu32_t filter_no_r, + const struct osi_l3_l4_filter *const l3_l4) { void *base = osi_core->base; - nveu32_t value = 0U; +#ifndef OSI_STRIPPED_LIB + nveu32_t l3_addr0_reg = 0; + nveu32_t l3_addr2_reg = 0; + nveu32_t l3_addr3_reg = 0; + nveu32_t l4_addr_reg = 0; +#endif /* !OSI_STRIPPED_LIB */ + nveu32_t l3_addr1_reg = 0; + nveu32_t ctr_reg = 0; + nveu32_t filter_no = filter_no_r & (OSI_MGBE_MAX_L3_L4_FILTER - 1U); - if (filter_no > (EQOS_MAX_L3_L4_FILTER - 0x1U)) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_OUTOFBOUND, - "invalid filter index for L3/L4 filter\n", - (nveul64_t)filter_no); - return -1; - } + prepare_l3l4_registers(osi_core, l3_l4, +#ifndef OSI_STRIPPED_LIB + &l3_addr0_reg, + &l3_addr2_reg, + &l3_addr3_reg, + &l4_addr_reg, +#endif /* !OSI_STRIPPED_LIB */ + &l3_addr1_reg, + &ctr_reg); - if ((dma_routing_enable == OSI_ENABLE) && - (dma_chan > (OSI_EQOS_MAX_NUM_CHANS - 1U))) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_OUTOFBOUND, - "Wrong DMA channel\n", (nveu32_t)dma_chan); - return -1; - } +#ifndef OSI_STRIPPED_LIB + /* Update l3 ip addr MGBE_MAC_L3_AD0R register */ + osi_writela(osi_core, l3_addr0_reg, (nveu8_t *)base + EQOS_MAC_L3_AD0R(filter_no)); - value = osi_readla(osi_core, (nveu8_t *)base + - EQOS_MAC_L3L4_CTR(filter_no)); - value &= ~EQOS_MAC_L3L4_CTR_L4PEN0; - value |= ((tcp_udp_match << EQOS_MAC_L3L4_CTR_L4PEN0_SHIFT) - & EQOS_MAC_L3L4_CTR_L4PEN0); - osi_writela(osi_core, value, (nveu8_t *)base + - EQOS_MAC_L3L4_CTR(filter_no)); + /* Update l3 ip addr MGBE_MAC_L3_AD2R register */ + osi_writela(osi_core, l3_addr2_reg, (nveu8_t *)base + EQOS_MAC_L3_AD2R(filter_no)); - if (src_dst_port_match == OSI_SOURCE_MATCH) { - if (enb_dis == OSI_ENABLE) { - /* Enable L4 filters for SOURCE Port No matching */ - value = osi_readla(osi_core, (nveu8_t *)base + - EQOS_MAC_L3L4_CTR(filter_no)); - value &= ~EQOS_MAC_L4_SP_CTRL_CLEAR; - value |= ((EQOS_MAC_L3L4_CTR_L4SPM0 | - (perfect_inverse_match << - EQOS_MAC_L3L4_CTR_L4SPI_SHIFT)) & - (EQOS_MAC_L3L4_CTR_L4SPM0 | - EQOS_MAC_L3L4_CTR_L4SPIM0)); - value |= eqos_set_dcs(osi_core, value, - dma_routing_enable, - dma_chan); - osi_writela(osi_core, value, (nveu8_t *)base + - EQOS_MAC_L3L4_CTR(filter_no)); - } else { - /* Disable L4 filters for SOURCE Port No matching */ - value = osi_readla(osi_core, (nveu8_t *)base + - EQOS_MAC_L3L4_CTR(filter_no)); - value &= ~EQOS_MAC_L4_SP_CTRL_CLEAR; - osi_writela(osi_core, value, (nveu8_t *)base + - EQOS_MAC_L3L4_CTR(filter_no)); - } - } else { - if (enb_dis == OSI_ENABLE) { - /* Enable L4 filters for DESTINATION port No - * matching - */ - value = osi_readla(osi_core, (nveu8_t *)base + - EQOS_MAC_L3L4_CTR(filter_no)); - value &= ~EQOS_MAC_L4_DP_CTRL_CLEAR; - value |= ((EQOS_MAC_L3L4_CTR_L4DPM0 | - (perfect_inverse_match << - EQOS_MAC_L3L4_CTR_L4DPI_SHIFT)) & - (EQOS_MAC_L3L4_CTR_L4DPM0 | - EQOS_MAC_L3L4_CTR_L4DPIM0)); - value |= eqos_set_dcs(osi_core, value, - dma_routing_enable, - dma_chan); - osi_writela(osi_core, value, (nveu8_t *)base + - EQOS_MAC_L3L4_CTR(filter_no)); - } else { - /* Disable L4 filters for DESTINATION port No - * matching - */ - value = osi_readla(osi_core, (nveu8_t *)base + - EQOS_MAC_L3L4_CTR(filter_no)); - value &= ~EQOS_MAC_L4_DP_CTRL_CLEAR; - osi_writela(osi_core, value, (nveu8_t *)base + - EQOS_MAC_L3L4_CTR(filter_no)); - } - } - /* Set bit corresponding to filter index if value is non-zero */ - eqos_helper_l3l4_bitmask(&osi_core->l3l4_filter_bitmask, - filter_no, value); + /* Update l3 ip addr MGBE_MAC_L3_AD3R register */ + osi_writela(osi_core, l3_addr3_reg, (nveu8_t *)base + EQOS_MAC_L3_AD3R(filter_no)); - return 0; -} + /* Update l4 port EQOS_MAC_L4_ADR register */ + osi_writela(osi_core, l4_addr_reg, (nveu8_t *)base + EQOS_MAC_L4_ADR(filter_no)); +#endif /* !OSI_STRIPPED_LIB */ -/** - * @brief eqos_poll_for_tsinit_complete - Poll for time stamp init complete - * - * @note - * Algorithm: - * - Read TSINIT value from MAC TCR register until it is equal to zero. - * - Max loop count of 1000 with 1 ms delay between iterations. - * - SWUD_ID: ETHERNET_NVETHERNETRM_005_1 - * - * @param[in] osi_core: OSI core private data structure. Used param is base, osd_ops.udelay. - * @param[in, out] mac_tcr: Address to store time stamp control register read - * value - * - * @pre MAC should be initialized and started. see osi_start_mac() - * - * @note - * API Group: - * - Initialization: No - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -static inline nve32_t eqos_poll_for_tsinit_complete( - struct osi_core_priv_data *const osi_core, - nveu32_t *mac_tcr) -{ - nveu32_t retry = RETRY_COUNT; - nveu32_t count; - nve32_t cond = COND_NOT_MET; + /* Update l3 ip addr MGBE_MAC_L3_AD1R register */ + osi_writela(osi_core, l3_addr1_reg, (nveu8_t *)base + EQOS_MAC_L3_AD1R(filter_no)); - /* Wait for previous(if any) Initialize Timestamp value - * update to complete - */ - count = 0; - while (cond == COND_NOT_MET) { - if (count > retry) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL, - "poll_for_tsinit: timeout\n", 0ULL); - return -1; - } - /* Read and Check TSINIT in MAC_Timestamp_Control register */ - *mac_tcr = osi_readla(osi_core, (nveu8_t *)osi_core->base + - EQOS_MAC_TCR); - if ((*mac_tcr & EQOS_MAC_TCR_TSINIT) == 0U) { - cond = COND_MET; - } - - count++; - osi_core->osd_ops.udelay(OSI_DELAY_1000US); - } - - return 0; -} - -/** - * @brief eqos_set_systime_to_mac - Set system time - * - * @note - * Algorithm: - * - Updates system time (seconds and nano seconds) in hardware registers. - * - Calls eqos_poll_for_tsinit_complete() before and after setting time. - * - return -1 if API fails. - * - Refer to EQOS column of <> for API details. - * - TraceID:ETHERNET_NVETHERNETRM_005 - * - * @param[in] osi_core: OSI core private data structure. Used param is base. - * @param[in] sec: Seconds to be configured - * @param[in] nsec: Nano Seconds to be configured - * - * @pre MAC should be initialized and started. see osi_start_mac() - * - * @note - * API Group: - * - Initialization: No - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -static nve32_t eqos_set_systime_to_mac( - struct osi_core_priv_data *const osi_core, - const nveu32_t sec, - const nveu32_t nsec) -{ - void *addr = osi_core->base; - nveu32_t mac_tcr; - nve32_t ret; - - /* To be sure previous write was flushed (if Any) */ - ret = eqos_poll_for_tsinit_complete(osi_core, &mac_tcr); - if (ret == -1) { - return -1; - } - - /* write seconds value to MAC_System_Time_Seconds_Update register */ - osi_writela(osi_core, sec, (nveu8_t *)addr + EQOS_MAC_STSUR); - - /* write nano seconds value to MAC_System_Time_Nanoseconds_Update - * register - */ - osi_writela(osi_core, nsec, (nveu8_t *)addr + EQOS_MAC_STNSUR); - - /* issue command to update the configured secs and nsecs values */ - mac_tcr |= EQOS_MAC_TCR_TSINIT; - eqos_core_safety_writel(osi_core, mac_tcr, - (nveu8_t *)addr + EQOS_MAC_TCR, - EQOS_MAC_TCR_IDX); - - ret = eqos_poll_for_tsinit_complete(osi_core, &mac_tcr); - if (ret == -1) { - return -1; - } - - return 0; -} - -/** - * @brief eqos_poll_for_addend_complete - Poll for addend value write complete - * - * @note - * Algorithm: - * - Read TSADDREG value from MAC TCR register until it is equal to zero. - * - Max loop count of 1000 with 1 ms delay between iterations. - * - SWUD_ID: ETHERNET_NVETHERNETRM_023_1 - * - * @param[in] osi_core: OSI core private data structure. Used param is base, osd_ops.udelay. - * @param[in, out] mac_tcr: Address to store time stamp control register read - * value - * - * @pre MAC should be initialized and started. see osi_start_mac() - * - * @note - * API Group: - * - Initialization: No - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -static inline nve32_t eqos_poll_for_addend_complete( - struct osi_core_priv_data *const osi_core, - nveu32_t *mac_tcr) -{ - nveu32_t retry = RETRY_COUNT; - nveu32_t count; - nve32_t cond = COND_NOT_MET; - - /* Wait for previous(if any) addend value update to complete */ - /* Poll */ - count = 0; - while (cond == COND_NOT_MET) { - if (count > retry) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL, - "poll_for_addend: timeout\n", 0ULL); - return -1; - } - /* Read and Check TSADDREG in MAC_Timestamp_Control register */ - *mac_tcr = osi_readla(osi_core, - (nveu8_t *)osi_core->base + EQOS_MAC_TCR); - if ((*mac_tcr & EQOS_MAC_TCR_TSADDREG) == 0U) { - cond = COND_MET; - } - - count++; - osi_core->osd_ops.udelay(OSI_DELAY_1000US); - } - - return 0; -} - -/** - * @brief eqos_config_addend - Configure addend - * - * @note - * Algorithm: - * - Updates the Addend value in HW register - * - Calls eqos_poll_for_addend_complete() before and after setting time. - * - return -1 if API fails. - * - Refer to EQOS column of <> for API details. - * - TraceID:ETHERNET_NVETHERNETRM_023 - * - * @param[in] osi_core: OSI core private data structure. Used param is base. - * @param[in] addend: Addend value to be configured - * - * @pre MAC should be initialized and started. see osi_start_mac() - * - * @note - * API Group: - * - Initialization: No - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -static nve32_t eqos_config_addend(struct osi_core_priv_data *const osi_core, - const nveu32_t addend) -{ - nveu32_t mac_tcr; - nve32_t ret; - - /* To be sure previous write was flushed (if Any) */ - ret = eqos_poll_for_addend_complete(osi_core, &mac_tcr); - if (ret == -1) { - return -1; - } - - /* write addend value to MAC_Timestamp_Addend register */ - eqos_core_safety_writel(osi_core, addend, - (nveu8_t *)osi_core->base + EQOS_MAC_TAR, - EQOS_MAC_TAR_IDX); - - /* issue command to update the configured addend value */ - mac_tcr |= EQOS_MAC_TCR_TSADDREG; - eqos_core_safety_writel(osi_core, mac_tcr, - (nveu8_t *)osi_core->base + EQOS_MAC_TCR, - EQOS_MAC_TCR_IDX); - - ret = eqos_poll_for_addend_complete(osi_core, &mac_tcr); - if (ret == -1) { - return -1; - } + /* Write CTR register */ + osi_writela(osi_core, ctr_reg, (nveu8_t *)base + EQOS_MAC_L3L4_CTR(filter_no)); return 0; } @@ -4482,14 +2422,16 @@ static inline nve32_t eqos_poll_for_update_ts_complete( nveu32_t retry = RETRY_COUNT; nveu32_t count; nve32_t cond = COND_NOT_MET; + nve32_t ret = 0; /* Wait for previous(if any) time stamp value update to complete */ count = 0; while (cond == COND_NOT_MET) { if (count > retry) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "poll_for_update_ts: timeout\n", 0ULL); - return -1; + ret = -1; + goto fail; } /* Read and Check TSUPDT in MAC_Timestamp_Control register */ *mac_tcr = osi_readla(osi_core, (nveu8_t *)osi_core->base + @@ -4501,8 +2443,8 @@ static inline nve32_t eqos_poll_for_update_ts_complete( count++; osi_core->osd_ops.udelay(OSI_DELAY_1000US); } - - return 0; +fail: + return ret; } @@ -4542,16 +2484,16 @@ static nve32_t eqos_adjust_mactime(struct osi_core_priv_data *const osi_core, const nveu32_t one_nsec_accuracy) { void *addr = osi_core->base; - nveu32_t mac_tcr; + nveu32_t mac_tcr = 0U; nveu32_t value = 0; nveul64_t temp = 0; nveu32_t sec1 = sec; nveu32_t nsec1 = nsec; - nve32_t ret; + nve32_t ret = 0; ret = eqos_poll_for_update_ts_complete(osi_core, &mac_tcr); if (ret == -1) { - return -1; + goto fail; } if (add_sub != 0U) { @@ -4597,106 +2539,15 @@ static nve32_t eqos_adjust_mactime(struct osi_core_priv_data *const osi_core, * specified in MAC_STSUR and MAC_STNSUR */ mac_tcr |= EQOS_MAC_TCR_TSUPDT; - eqos_core_safety_writel(osi_core, mac_tcr, - (nveu8_t *)addr + EQOS_MAC_TCR, - EQOS_MAC_TCR_IDX); + osi_writela(osi_core, mac_tcr, (nveu8_t *)addr + EQOS_MAC_TCR); ret = eqos_poll_for_update_ts_complete(osi_core, &mac_tcr); - if (ret == -1) { - return -1; - } - return 0; +fail: + return ret; } -/** \cond DO_NOT_DOCUMENT */ -/** - * @brief eqos_config_tscr - Configure Time Stamp Register - * - * @param[in] osi_core: OSI core private data structure. - * @param[in] ptp_filter: PTP rx filter parameters - * - * @pre MAC should be initialized and started. see osi_start_mac() - * - * @note - * API Group: - * - Initialization: No - * - Run time: Yes - * - De-initialization: No - */ -static void eqos_config_tscr(struct osi_core_priv_data *const osi_core, - const nveu32_t ptp_filter) -{ - void *addr = osi_core->base; - struct core_local *l_core = (struct core_local *)osi_core; - nveu32_t mac_tcr = 0U, i = 0U, temp = 0U; - nveu32_t value = 0x0U; - - if (ptp_filter != OSI_DISABLE) { - mac_tcr = (OSI_MAC_TCR_TSENA | - OSI_MAC_TCR_TSCFUPDT | - OSI_MAC_TCR_TSCTRLSSR); - - for (i = 0U; i < 32U; i++) { - temp = ptp_filter & OSI_BIT(i); - - switch (temp) { - case OSI_MAC_TCR_SNAPTYPSEL_1: - mac_tcr |= OSI_MAC_TCR_SNAPTYPSEL_1; - break; - case OSI_MAC_TCR_SNAPTYPSEL_2: - mac_tcr |= OSI_MAC_TCR_SNAPTYPSEL_2; - break; - case OSI_MAC_TCR_TSIPV4ENA: - mac_tcr |= OSI_MAC_TCR_TSIPV4ENA; - break; - case OSI_MAC_TCR_TSIPV6ENA: - mac_tcr |= OSI_MAC_TCR_TSIPV6ENA; - break; - case OSI_MAC_TCR_TSEVENTENA: - mac_tcr |= OSI_MAC_TCR_TSEVENTENA; - break; - case OSI_MAC_TCR_TSMASTERENA: - mac_tcr |= OSI_MAC_TCR_TSMASTERENA; - break; - case OSI_MAC_TCR_TSVER2ENA: - mac_tcr |= OSI_MAC_TCR_TSVER2ENA; - break; - case OSI_MAC_TCR_TSIPENA: - mac_tcr |= OSI_MAC_TCR_TSIPENA; - break; - case OSI_MAC_TCR_AV8021ASMEN: - mac_tcr |= OSI_MAC_TCR_AV8021ASMEN; - break; - case OSI_MAC_TCR_TSENALL: - mac_tcr |= OSI_MAC_TCR_TSENALL; - break; - case OSI_MAC_TCR_CSC: - mac_tcr |= OSI_MAC_TCR_CSC; - break; - default: - /* To avoid MISRA violation */ - mac_tcr |= mac_tcr; - break; - } - } - } else { - /* Disabling the MAC time stamping */ - mac_tcr = OSI_DISABLE; - } - - eqos_core_safety_writel(osi_core, mac_tcr, - (nveu8_t *)addr + EQOS_MAC_TCR, - EQOS_MAC_TCR_IDX); - value = osi_readla(osi_core, (nveu8_t *)addr + EQOS_MAC_PPS_CTL); - value &= ~EQOS_MAC_PPS_CTL_PPSCTRL0; - if (l_core->pps_freq == OSI_ENABLE) { - value |= OSI_ENABLE; - } - osi_writela(osi_core, value, (nveu8_t *)addr + EQOS_MAC_PPS_CTL); -} -/** \endcond */ - +#ifndef OSI_STRIPPED_LIB /** * @brief eqos_config_ptp_rxq - To config PTP RX packets queue * @@ -4710,13 +2561,13 @@ static void eqos_config_tscr(struct osi_core_priv_data *const osi_core, * @retval 0 on success * @retval -1 on failure. */ -static int eqos_config_ptp_rxq(struct osi_core_priv_data *osi_core, - const unsigned int rxq_idx, - const unsigned int enable) +static nve32_t eqos_config_ptp_rxq(struct osi_core_priv_data *const osi_core, + const nveu32_t rxq_idx, + const nveu32_t enable) { - unsigned char *base = osi_core->base; - unsigned int value = OSI_NONE; - unsigned int i = 0U; + nveu8_t *base = osi_core->base; + nveu32_t value = OSI_NONE; + nveu32_t i = 0U; /* Validate the RX queue index argment */ if (rxq_idx >= OSI_EQOS_MAX_NUM_QUEUES) { @@ -4732,7 +2583,7 @@ static int eqos_config_ptp_rxq(struct osi_core_priv_data *osi_core, } /* Validate enable argument */ - if (enable != OSI_ENABLE && enable != OSI_DISABLE) { + if ((enable != OSI_ENABLE) && (enable != OSI_DISABLE)) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Invalid enable input\n", enable); @@ -4784,402 +2635,7 @@ static int eqos_config_ptp_rxq(struct osi_core_priv_data *osi_core, return 0; } - -/** - * @brief eqos_config_ssir - Configure SSIR register - * - * @note - * Algorithm: - * - Calculate SSIR - * - For Coarse method(EQOS_MAC_TCR_TSCFUPDT not set in TCR register), ((1/ptp_clock) * 1000000000). - * - For fine correction use predeined value based on MAC version OSI_PTP_SSINC_16 if MAC version - * less than OSI_EQOS_MAC_4_10 and OSI_PTP_SSINC_4 if otherwise. - * - If EQOS_MAC_TCR_TSCTRLSSR bit not set in TCR register, set accurasy to 0.465ns. - * - i.e new val = val * 1000/465; - * - Program the calculated value to EQOS_MAC_SSIR register - * - Refer to EQOS column of <> for API details. - * - SWUD_ID: ETHERNET_NVETHERNETRM_021_1 - * - * @param[in] osi_core: OSI core private data structure. Used param is base, mac_ver. - * - * @pre MAC should be initialized and started. see osi_start_mac() - * - * @note - * API Group: - * - Initialization: No - * - Run time: Yes - * - De-initialization: No - */ -static void eqos_config_ssir(struct osi_core_priv_data *const osi_core, - const unsigned int ptp_clock) -{ - nveul64_t val; - nveu32_t mac_tcr; - void *addr = osi_core->base; - - mac_tcr = osi_readla(osi_core, (nveu8_t *)addr + EQOS_MAC_TCR); - - if ((mac_tcr & EQOS_MAC_TCR_TSCFUPDT) == EQOS_MAC_TCR_TSCFUPDT) { - if (osi_core->mac_ver <= OSI_EQOS_MAC_4_10) { - val = OSI_PTP_SSINC_16; - } else if (osi_core->mac_ver == OSI_EQOS_MAC_5_30) { - val = OSI_PTP_SSINC_6; - } else { - val = OSI_PTP_SSINC_4; - } - } else { - /* convert the PTP required clock frequency to nano second for - * COARSE correction. - * Formula: ((1/ptp_clock) * 1000000000) - */ - val = ((1U * OSI_NSEC_PER_SEC) / ptp_clock); - } - - /* 0.465ns accurecy */ - if ((mac_tcr & EQOS_MAC_TCR_TSCTRLSSR) == 0U) { - if (val < UINT_MAX) { - val = (val * 1000U) / 465U; - } - } - - val |= val << EQOS_MAC_SSIR_SSINC_SHIFT; - /* update Sub-second Increment Value */ - if (val < UINT_MAX) { - eqos_core_safety_writel(osi_core, (nveu32_t)val, - (nveu8_t *)addr + EQOS_MAC_SSIR, - EQOS_MAC_SSIR_IDX); - } -} - -/** - * @brief eqos_core_deinit - EQOS MAC core deinitialization - * - * @note - * Algorithm: - * - This function calls eqos_stop_mac() - * - TraceId:ETHERNET_NVETHERNETRM_007 - * - * @param[in] osi_core: OSI core private data structure. Used param is base. - * - * @pre Required clks and resets has to be enabled - * - * @note - * API Group: - * - Initialization: No - * - Run time: No - * - De-initialization: Yes - */ -static void eqos_core_deinit(struct osi_core_priv_data *const osi_core) -{ - /* Stop the MAC by disabling both MAC Tx and Rx */ - eqos_stop_mac(osi_core); -} - -/** - * @brief eqos_hw_est_write - indirect write the GCL to Software own list - * (SWOL) - * - * @param[in] base: MAC base IOVA address. - * @param[in] addr_val: Address offset for indirect write. - * @param[in] data: Data to be written at offset. - * @param[in] gcla: Gate Control List Address, 0 for ETS register. - * 1 for GCL memory. - * - * @note MAC should be init and started. see osi_start_mac() - * - * @retval 0 on success - * @retval -1 on failure. - */ -static int eqos_hw_est_write(struct osi_core_priv_data *osi_core, - unsigned int addr_val, - unsigned int data, unsigned int gcla) -{ - void *base = osi_core->base; - int retry = 1000; - unsigned int val = 0x0; - - osi_writela(osi_core, data, (unsigned char *)base + EQOS_MTL_EST_DATA); - - val &= ~EQOS_MTL_EST_ADDR_MASK; - val |= (gcla == 1U) ? 0x0U : EQOS_MTL_EST_GCRR; - val |= EQOS_MTL_EST_SRWO; - val |= addr_val; - osi_writela(osi_core, val, - (unsigned char *)base + EQOS_MTL_EST_GCL_CONTROL); - - while (--retry > 0) { - osi_core->osd_ops.udelay(OSI_DELAY_1US); - val = osi_readla(osi_core, (unsigned char *)base + - EQOS_MTL_EST_GCL_CONTROL); - if ((val & EQOS_MTL_EST_SRWO) == EQOS_MTL_EST_SRWO) { - continue; - } - - break; - } - - if (((val & EQOS_MTL_EST_ERR0) == EQOS_MTL_EST_ERR0) || - (retry <= 0)) { - return -1; - } - - return 0; -} - -/** - * @brief eqos_hw_config_est - Read Setting for GCL from input and update - * registers. - * - * Algorithm: - * 1) Write TER, LLR and EST control register - * 2) Update GCL to sw own GCL (MTL_EST_Status bit SWOL will tell which is - * owned by SW) and store which GCL is in use currently in sw. - * 3) TODO set DBGB and DBGM for debugging - * 4) EST_data and GCRR to 1, update entry sno in ADDR and put data at - * est_gcl_data enable GCL MTL_EST_SSWL and wait for self clear or use - * SWLC in MTL_EST_Status. Please note new GCL will be pushed for each entry. - * 5) Configure btr. Update btr based on current time (current time - * should be updated based on PTP by this time) - * - * @param[in] osi_core: OSI core private data structure. - * @param[in] est: EST configuration input argument. - * - * @note MAC should be init and started. see osi_start_mac() - * - * @retval 0 on success - * @retval -1 on failure. - */ -static int eqos_hw_config_est(struct osi_core_priv_data *osi_core, - struct osi_est_config *est) -{ - void *base = osi_core->base; - unsigned int btr[2] = {0}; - unsigned int val = 0x0; - unsigned int addr = 0x0; - unsigned int i; - int ret = 0; - - if ((osi_core->hw_feature != OSI_NULL) && - (osi_core->hw_feature->est_sel == OSI_DISABLE)) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "EST not supported in HW\n", 0ULL); - return -1; - } - - if (est->en_dis == OSI_DISABLE) { - val = osi_readla(osi_core, - (nveu8_t *)base + EQOS_MTL_EST_CONTROL); - val &= ~EQOS_MTL_EST_CONTROL_EEST; - osi_writela(osi_core, val, - (nveu8_t *)base + EQOS_MTL_EST_CONTROL); - return 0; - } - - btr[0] = est->btr[0]; - btr[1] = est->btr[1]; - - if (btr[0] == 0U && btr[1] == 0U) { - common_get_systime_from_mac(osi_core->base, osi_core->mac, - &btr[1], &btr[0]); - } - - if (gcl_validate(osi_core, est, btr, osi_core->mac) < 0) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "GCL validation failed\n", 0LL); - return -1; - } - - ret = eqos_hw_est_write(osi_core, EQOS_MTL_EST_CTR_LOW, est->ctr[0], - OSI_DISABLE); - if (ret < 0) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "GCL CTR[0] failed\n", 0LL); - return ret; - } - /* check for est->ctr[i] not more than FF, TODO as per hw config - * parameter we can have max 0x3 as this value in sec */ - est->ctr[1] &= EQOS_MTL_EST_CTR_HIGH_MAX; - ret = eqos_hw_est_write(osi_core, EQOS_MTL_EST_CTR_HIGH, est->ctr[1], - OSI_DISABLE); - if (ret < 0) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "GCL CTR[1] failed\n", 0LL); - return ret; - } - - ret = eqos_hw_est_write(osi_core, EQOS_MTL_EST_TER, est->ter, - OSI_DISABLE); - if (ret < 0) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "GCL TER failed\n", 0LL); - return ret; - } - - ret = eqos_hw_est_write(osi_core, EQOS_MTL_EST_LLR, est->llr, - OSI_DISABLE); - if (ret < 0) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "GCL LLR failed\n", 0LL); - return ret; - } - - /* Write GCL table */ - for (i = 0U; i < est->llr; i++) { - addr = i; - addr = addr << EQOS_MTL_EST_ADDR_SHIFT; - addr &= EQOS_MTL_EST_ADDR_MASK; - ret = eqos_hw_est_write(osi_core, addr, est->gcl[i], - OSI_ENABLE); - if (ret < 0) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "GCL enties write failed\n", - (unsigned long long)i); - return ret; - } - } - - /* Write parameters */ - ret = eqos_hw_est_write(osi_core, EQOS_MTL_EST_BTR_LOW, - btr[0] + est->btr_offset[0], OSI_DISABLE); - if (ret < 0) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "GCL BTR[0] failed\n", - (unsigned long long)(btr[0] + - est->btr_offset[0])); - return ret; - } - - ret = eqos_hw_est_write(osi_core, EQOS_MTL_EST_BTR_HIGH, - btr[1] + est->btr_offset[1], OSI_DISABLE); - if (ret < 0) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "GCL BTR[1] failed\n", - (unsigned long long)(btr[1] + - est->btr_offset[1])); - return ret; - } - - val = osi_readla(osi_core, (unsigned char *) - base + EQOS_MTL_EST_CONTROL); - /* Store table */ - val |= EQOS_MTL_EST_CONTROL_SSWL; - val |= EQOS_MTL_EST_CONTROL_EEST; - val |= EQOS_MTL_EST_CONTROL_QHLBF; - osi_writela(osi_core, val, (nveu8_t *)base + EQOS_MTL_EST_CONTROL); - - return ret; -} - -/** - * @brief eqos_hw_config_fep - Read Setting for preemption and express for TC - * and update registers. - * - * Algorithm: - * 1) Check for TC enable and TC has masked for setting to preemptable. - * 2) update FPE control status register - * - * @param[in] osi_core: OSI core private data structure. - * @param[in] fpe: FPE configuration input argument. - * - * @note MAC should be init and started. see osi_start_mac() - * - * @retval 0 on success - * @retval -1 on failure. - */ -static int eqos_hw_config_fpe(struct osi_core_priv_data *osi_core, - struct osi_fpe_config *fpe) -{ - unsigned int i = 0U; - unsigned int val = 0U; - unsigned int temp = 0U, temp1 = 0U; - unsigned int temp_shift = 0U; - - if ((osi_core->hw_feature != OSI_NULL) && - (osi_core->hw_feature->fpe_sel == OSI_DISABLE)) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "FPE not supported in HW\n", 0ULL); - return -1; - } - - osi_core->fpe_ready = OSI_DISABLE; - - - if (((fpe->tx_queue_preemption_enable << EQOS_MTL_FPE_CTS_PEC_SHIFT) & - EQOS_MTL_FPE_CTS_PEC) == OSI_DISABLE) { - val = osi_readla(osi_core, - (nveu8_t *)osi_core->base + EQOS_MTL_FPE_CTS); - val &= ~EQOS_MTL_FPE_CTS_PEC; - osi_writela(osi_core, val, - (nveu8_t *)osi_core->base + EQOS_MTL_FPE_CTS); - - val = osi_readla(osi_core, - (nveu8_t *)osi_core->base + EQOS_MAC_FPE_CTS); - val &= ~EQOS_MAC_FPE_CTS_EFPE; - osi_writela(osi_core, val, (nveu8_t *)osi_core->base + - EQOS_MAC_FPE_CTS); - - return 0; - } - - val = osi_readla(osi_core, - (nveu8_t *)osi_core->base + EQOS_MTL_FPE_CTS); - val &= ~EQOS_MTL_FPE_CTS_PEC; - for (i = 0U; i < OSI_MAX_TC_NUM; i++) { - /* max 8 bit for this structure fot TC/TXQ. Set the TC for express or - * preemption. Default is express for a TC. DWCXG_NUM_TC = 8 */ - temp = OSI_BIT(i); - if ((fpe->tx_queue_preemption_enable & temp) == temp) { - temp_shift = i; - temp_shift += EQOS_MTL_FPE_CTS_PEC_SHIFT; - /* set queue for preemtable */ - if (temp_shift < EQOS_MTL_FPE_CTS_PEC_MAX_SHIFT) { - temp1 = OSI_ENABLE; - temp1 = temp1 << temp_shift; - val |= temp1; - } else { - /* Do nothing */ - } - } - } - osi_writela(osi_core, val, - (nveu8_t *)osi_core->base + EQOS_MTL_FPE_CTS); - - /* Setting RQ as RxQ 0 is not allowed */ - if (fpe->rq == 0x0U || fpe->rq >= OSI_EQOS_MAX_NUM_CHANS) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "EST init failed due to wrong RQ\n", fpe->rq); - return -1; - } - - val = osi_readla(osi_core, - (unsigned char *)osi_core->base + EQOS_MAC_RQC1R); - val &= ~EQOS_MAC_RQC1R_FPRQ; - temp = fpe->rq; - temp = temp << EQOS_MAC_RQC1R_FPRQ_SHIFT; - temp = (temp & EQOS_MAC_RQC1R_FPRQ); - val |= temp; - /* update RQ in OSI CORE struct */ - osi_core->residual_queue = fpe->rq; - osi_writela(osi_core, val, - (unsigned char *)osi_core->base + EQOS_MAC_RQC1R); - - /* initiate SVER for SMD-V and SMD-R */ - val = osi_readla(osi_core, - (unsigned char *)osi_core->base + EQOS_MTL_FPE_CTS); - val |= EQOS_MAC_FPE_CTS_SVER; - osi_writela(osi_core, val, - (unsigned char *)osi_core->base + EQOS_MAC_FPE_CTS); - - val = osi_readla(osi_core, - (unsigned char *)osi_core->base + EQOS_MTL_FPE_ADV); - val &= ~EQOS_MTL_FPE_ADV_HADV_MASK; - /* (minimum_fragment_size +IPG/EIPG + Preamble) *.8 ~98ns for10G */ - val |= EQOS_MTL_FPE_ADV_HADV_VAL; - osi_writela(osi_core, val, - (unsigned char *)osi_core->base + EQOS_MTL_FPE_ADV); - - return 0; -} +#endif /* !OSI_STRIPPED_LIB */ /** \cond DO_NOT_DOCUMENT */ /** @@ -5205,13 +2661,15 @@ static inline nve32_t poll_for_mii_idle(struct osi_core_priv_data *osi_core) nveu32_t mac_gmiiar; nveu32_t count; nve32_t cond = COND_NOT_MET; + nve32_t ret = 0; count = 0; while (cond == COND_NOT_MET) { if (count > retry) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "MII operation timed out\n", 0ULL); - return -1; + ret = -1; + goto fail; } count++; @@ -5225,8 +2683,8 @@ static inline nve32_t poll_for_mii_idle(struct osi_core_priv_data *osi_core) osi_core->osd_ops.udelay(10U); } } - - return 0; +fail: + return ret; } /** \endcond */ @@ -5276,7 +2734,7 @@ static nve32_t eqos_write_phy_reg(struct osi_core_priv_data *const osi_core, ret = poll_for_mii_idle(osi_core); if (ret < 0) { /* poll_for_mii_idle fail */ - return ret; + goto fail; } /* C45 register access */ @@ -5329,7 +2787,9 @@ static nve32_t eqos_write_phy_reg(struct osi_core_priv_data *const osi_core, osi_writela(osi_core, mac_gmiiar, (nveu8_t *)osi_core->base + EQOS_MAC_MDIO_ADDRESS); /* wait for MII write operation to complete */ - return poll_for_mii_idle(osi_core); + ret = poll_for_mii_idle(osi_core); +fail: + return ret; } /** @@ -5377,7 +2837,7 @@ static nve32_t eqos_read_phy_reg(struct osi_core_priv_data *const osi_core, ret = poll_for_mii_idle(osi_core); if (ret < 0) { /* poll_for_mii_idle fail */ - return ret; + goto fail; } /* C45 register access */ if ((phyreg & OSI_MII_ADDR_C45) == OSI_MII_ADDR_C45) { @@ -5424,14 +2884,16 @@ static nve32_t eqos_read_phy_reg(struct osi_core_priv_data *const osi_core, ret = poll_for_mii_idle(osi_core); if (ret < 0) { /* poll_for_mii_idle fail */ - return ret; + goto fail; } mac_gmiidr = osi_readla(osi_core, (nveu8_t *)osi_core->base + EQOS_MAC_MDIO_DATA); data = (mac_gmiidr & EQOS_MAC_GMIIDR_GD_MASK); - return (nve32_t)data; + ret = (nve32_t)data; +fail: + return ret; } /** @@ -5485,12 +2947,23 @@ static nveu32_t eqos_write_reg(struct osi_core_priv_data *const osi_core, * - Initialization: Yes * - Run time: Yes * - De-initialization: Yes - * @retval data from register on success + * @retval data from register on success and 0xffffffff on failure */ static nveu32_t eqos_read_macsec_reg(struct osi_core_priv_data *const osi_core, const nve32_t reg) { - return osi_readla(osi_core, (nveu8_t *)osi_core->macsec_base + reg); + nveu32_t ret = 0; + + if (osi_core->macsec_ops != OSI_NULL) { + ret = osi_readla(osi_core, (nveu8_t *)osi_core->macsec_base + + reg); + } else { + /* macsec is not supported or not enabled in DT */ + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, + "read reg failed", 0ULL); + ret = 0xffffffff; + } + return ret; } /** @@ -5505,13 +2978,23 @@ static nveu32_t eqos_read_macsec_reg(struct osi_core_priv_data *const osi_core, * - Initialization: Yes * - Run time: Yes * - De-initialization: Yes - * @retval 0 + * @retval 0 on success or 0xffffffff on error */ static nveu32_t eqos_write_macsec_reg(struct osi_core_priv_data *const osi_core, const nveu32_t val, const nve32_t reg) { - osi_writela(osi_core, val, (nveu8_t *)osi_core->macsec_base + reg); - return 0; + nveu32_t ret = 0; + + if (osi_core->macsec_ops != OSI_NULL) { + osi_writela(osi_core, val, (nveu8_t *)osi_core->macsec_base + + reg); + } else { + /* macsec is not supported or not enabled in DT */ + OSI_CORE_ERR(osi_core->osd, + OSI_LOG_ARG_HW_FAIL, "write reg failed", 0ULL); + ret = 0xffffffff; + } + return ret; } #endif /* MACSEC_SUPPORT */ @@ -5549,67 +3032,6 @@ static inline void eqos_disable_tx_lpi( (nveu8_t *)addr + EQOS_MAC_LPI_CSR); } -/** - * @brief Read-validate HW registers for functional safety. - * - * @note - * Algorithm: - * - Reads pre-configured list of MAC/MTL configuration registers - * and compares with last written value for any modifications. - * - * @param[in] osi_core: OSI core private data structure. - * - * @pre - * - MAC has to be out of reset. - * - osi_hw_core_init has to be called. Internally this would initialize - * the safety_config (see osi_core_priv_data) based on MAC version and - * which specific registers needs to be validated periodically. - * - Invoke this call if (osi_core_priv_data->safety_config != OSI_NULL) - * - * @note - * API Group: - * - Initialization: No - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -static nve32_t eqos_validate_core_regs( - struct osi_core_priv_data *const osi_core) -{ - struct core_func_safety *config = - (struct core_func_safety *)osi_core->safety_config; - nveu32_t cur_val; - nveu32_t i; - - osi_lock_irq_enabled(&config->core_safety_lock); - for (i = EQOS_MAC_MCR_IDX; i < EQOS_MAX_CORE_SAFETY_REGS; i++) { - if (config->reg_addr[i] == OSI_NULL) { - continue; - } - cur_val = osi_readla(osi_core, - (nveu8_t *)config->reg_addr[i]); - cur_val &= config->reg_mask[i]; - - if (cur_val == config->reg_val[i]) { - continue; - } else { - /* Register content differs from what was written. - * Return error and let safety manager (NVGaurd etc.) - * take care of corrective action. - */ - osi_unlock_irq_enabled(&config->core_safety_lock); - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, - "register mismatch\n", 0ULL); - return -1; - } - } - osi_unlock_irq_enabled(&config->core_safety_lock); - - return 0; -} - /** * @brief eqos_config_rx_crc_check - Configure CRC Checking for Rx Packets * @@ -5642,7 +3064,7 @@ static nve32_t eqos_config_rx_crc_check( /* return on invalid argument */ if ((crc_chk != OSI_ENABLE) && (crc_chk != OSI_DISABLE)) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "rx_crc: invalid input\n", 0ULL); return -1; } @@ -5699,7 +3121,7 @@ static nve32_t eqos_config_tx_status(struct osi_core_priv_data *const osi_core, /* don't allow if tx_status is other than 0 or 1 */ if ((tx_status != OSI_ENABLE) && (tx_status != OSI_DISABLE)) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "tx_status: invalid input\n", 0ULL); return -1; } @@ -5728,6 +3150,7 @@ static nve32_t eqos_config_tx_status(struct osi_core_priv_data *const osi_core, return 0; } +#endif /* !OSI_STRIPPED_LIB */ /** * @brief eqos_set_avb_algorithm - Set TxQ/TC avb config @@ -5771,21 +3194,21 @@ static nve32_t eqos_set_avb_algorithm( if (avb == OSI_NULL) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "avb structure is NULL\n", 0ULL); - return ret; + goto done; } /* queue index in range */ if (avb->qindex >= OSI_EQOS_MAX_NUM_QUEUES) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Invalid Queue index\n", (nveul64_t)avb->qindex); - return ret; + goto done; } /* queue oper_mode in range check*/ if (avb->oper_mode >= OSI_MTL_QUEUE_MODEMAX) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Invalid Queue mode\n", (nveul64_t)avb->qindex); - return ret; + goto done; } /* can't set AVB mode for queue 0 */ @@ -5793,7 +3216,7 @@ static nve32_t eqos_set_avb_algorithm( OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_OPNOTSUPP, "Not allowed to set AVB for Q0\n", (nveul64_t)avb->qindex); - return ret; + goto done; } qinx = avb->qindex; @@ -5803,9 +3226,7 @@ static nve32_t eqos_set_avb_algorithm( /* Set TxQ/TC mode as per input struct after masking 3 bit */ value |= (avb->oper_mode << EQOS_MTL_TXQEN_MASK_SHIFT) & EQOS_MTL_TXQEN_MASK; - eqos_core_safety_writel(osi_core, value, (nveu8_t *)osi_core->base + - EQOS_MTL_CHX_TX_OP_MODE(qinx), - EQOS_MTL_CH0_TX_OP_MODE_IDX + qinx); + osi_writela(osi_core, value, (nveu8_t *)osi_core->base + EQOS_MTL_CHX_TX_OP_MODE(qinx)); /* Set Algo and Credit control */ value = OSI_DISABLE; @@ -5829,10 +3250,8 @@ static nve32_t eqos_set_avb_algorithm( EQOS_MTL_TXQ_QW(qinx)); value &= ~EQOS_MTL_TXQ_ETS_QW_ISCQW_MASK; value |= avb->idle_slope & EQOS_MTL_TXQ_ETS_QW_ISCQW_MASK; - eqos_core_safety_writel(osi_core, value, - (nveu8_t *)osi_core->base + - EQOS_MTL_TXQ_QW(qinx), - EQOS_MTL_TXQ0_QW_IDX + qinx); + osi_writela(osi_core, value, (nveu8_t *)osi_core->base + + EQOS_MTL_TXQ_QW(qinx)); /* Set Hi credit */ value = avb->hi_credit & EQOS_MTL_TXQ_ETS_HCR_HC_MASK; @@ -5845,9 +3264,24 @@ static nve32_t eqos_set_avb_algorithm( value = avb->low_credit & EQOS_MTL_TXQ_ETS_LCR_LC_MASK; osi_writela(osi_core, value, (nveu8_t *)osi_core->base + EQOS_MTL_TXQ_ETS_LCR(qinx)); + } else { + /* Reset register values to POR/initialized values */ + osi_writela(osi_core, OSI_DISABLE, (nveu8_t *)osi_core->base + + EQOS_MTL_TXQ_ETS_SSCR(qinx)); + + osi_writela(osi_core, EQOS_MTL_TXQ_QW_ISCQW, + (nveu8_t *)osi_core->base + EQOS_MTL_TXQ_QW(qinx)); + + osi_writela(osi_core, OSI_DISABLE, (nveu8_t *)osi_core->base + + EQOS_MTL_TXQ_ETS_HCR(qinx)); + + osi_writela(osi_core, OSI_DISABLE, (nveu8_t *)osi_core->base + + EQOS_MTL_TXQ_ETS_LCR(qinx)); } - return 0; + ret = 0; +done: + return ret; } /** @@ -5891,13 +3325,13 @@ static nve32_t eqos_get_avb_algorithm(struct osi_core_priv_data *const osi_core, if (avb == OSI_NULL) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "avb structure is NULL\n", 0ULL); - return ret; + goto done; } if (avb->qindex >= OSI_EQOS_MAX_NUM_QUEUES) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Invalid Queue index\n", (nveul64_t)avb->qindex); - return ret; + goto done; } qinx = avb->qindex; @@ -5938,9 +3372,13 @@ static nve32_t eqos_get_avb_algorithm(struct osi_core_priv_data *const osi_core, EQOS_MTL_TXQ_ETS_LCR(qinx)); avb->low_credit = value & EQOS_MTL_TXQ_ETS_LCR_LC_MASK; - return 0; + ret = 0; + +done: + return ret; } +#ifndef OSI_STRIPPED_LIB /** * @brief eqos_config_arp_offload - Enable/Disable ARP offload * @@ -5995,7 +3433,7 @@ static nve32_t eqos_config_arp_offload( EQOS_5_00_MAC_ARPPA); } else { /* Unsupported MAC ver */ - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "arp_offload: invalid HW\n", 0ULL); return -1; } @@ -6005,9 +3443,7 @@ static nve32_t eqos_config_arp_offload( mac_mcr &= ~EQOS_MCR_ARPEN; } - eqos_core_safety_writel(osi_core, mac_mcr, - (nveu8_t *)addr + EQOS_MAC_MCR, - EQOS_MAC_MCR_IDX); + osi_writela(osi_core, mac_mcr, (nveu8_t *)addr + EQOS_MAC_MCR); return 0; } @@ -6049,21 +3485,21 @@ static nve32_t eqos_config_vlan_filtering( if ((filter_enb_dis != OSI_ENABLE) && (filter_enb_dis != OSI_DISABLE)) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "vlan_filter: invalid input\n", 0ULL); return -1; } if ((perfect_hash_filtering != OSI_ENABLE) && (perfect_hash_filtering != OSI_DISABLE)) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "vlan_filter: invalid input\n", 0ULL); return -1; } if ((perfect_inverse_match != OSI_ENABLE) && (perfect_inverse_match != OSI_DISABLE)) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "vlan_filter: invalid input\n", 0ULL); return -1; } @@ -6071,8 +3507,7 @@ static nve32_t eqos_config_vlan_filtering( value = osi_readla(osi_core, (nveu8_t *)base + EQOS_MAC_PFR); value &= ~(EQOS_MAC_PFR_VTFE); value |= ((filter_enb_dis << EQOS_MAC_PFR_SHIFT) & EQOS_MAC_PFR_VTFE); - eqos_core_safety_writel(osi_core, value, (nveu8_t *)base + EQOS_MAC_PFR, - EQOS_MAC_PFR_IDX); + osi_writela(osi_core, value, (nveu8_t *)base + EQOS_MAC_PFR); value = osi_readla(osi_core, (nveu8_t *)base + EQOS_MAC_VLAN_TR); value &= ~(EQOS_MAC_VLAN_TR_VTIM | EQOS_MAC_VLAN_TR_VTHM); @@ -6177,74 +3612,6 @@ static void eqos_configure_eee(struct osi_core_priv_data *const osi_core, } } -/** - * @brief Function to store a backup of MAC register space during SOC suspend. - * - * @note - * Algorithm: - * - Read registers to be backed up as per struct core_backup and - * store the register values in memory. - * - * @param[in] osi_core: OSI core private data structure. - * - * @note - * API Group: - * - Initialization: No - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on Success - */ -static inline nve32_t eqos_save_registers( - struct osi_core_priv_data *const osi_core) -{ - nveu32_t i; - struct core_backup *config = &osi_core->backup_config; - - for (i = 0; i < EQOS_MAX_BAK_IDX; i++) { - if (config->reg_addr[i] != OSI_NULL) { - config->reg_val[i] = osi_readla(osi_core, - config->reg_addr[i]); - } - } - - return 0; -} - -/** - * @brief Function to restore the backup of MAC registers during SOC resume. - * - * @note - * Algorithm: - * - Restore the register values from the in memory backup taken using - * eqos_save_registers(). - * - * @param[in] osi_core: OSI core private data structure. - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: No - * - De-initialization: No - * - * @retval 0 on Success - */ -static inline nve32_t eqos_restore_registers( - struct osi_core_priv_data *const osi_core) -{ - nveu32_t i; - struct core_backup *config = &osi_core->backup_config; - - for (i = 0; i < EQOS_MAX_BAK_IDX; i++) { - if (config->reg_addr[i] != OSI_NULL) { - osi_writela(osi_core, config->reg_val[i], - config->reg_addr[i]); - } - } - - return 0; -} - /** * @brief eqos_set_mdc_clk_rate - Derive MDC clock based on provided AXI_CBB clk * @@ -6347,9 +3714,7 @@ static nve32_t eqos_config_mac_loopback( (nveu8_t *)addr + EQOS_CLOCK_CTRL_0); /* Write to MAC Configuration Register */ - eqos_core_safety_writel(osi_core, mcr_val, - (nveu8_t *)addr + EQOS_MAC_MCR, - EQOS_MAC_MCR_IDX); + osi_writela(osi_core, mcr_val, (nveu8_t *)addr + EQOS_MAC_MCR); return 0; } @@ -6363,10 +3728,10 @@ static nve32_t eqos_get_hw_features(struct osi_core_priv_data *const osi_core, nveu32_t mac_hfr2 = 0; nveu32_t mac_hfr3 = 0; - mac_hfr0 = eqos_read_reg(osi_core, EQOS_MAC_HFR0); - mac_hfr1 = eqos_read_reg(osi_core, EQOS_MAC_HFR1); - mac_hfr2 = eqos_read_reg(osi_core, EQOS_MAC_HFR2); - mac_hfr3 = eqos_read_reg(osi_core, EQOS_MAC_HFR3); + mac_hfr0 = osi_readla(osi_core, (nveu8_t *)osi_core->base + EQOS_MAC_HFR0); + mac_hfr1 = osi_readla(osi_core, (nveu8_t *)osi_core->base + EQOS_MAC_HFR1); + mac_hfr2 = osi_readla(osi_core, (nveu8_t *)osi_core->base + EQOS_MAC_HFR2); + mac_hfr3 = osi_readla(osi_core, (nveu8_t *)osi_core->base + EQOS_MAC_HFR3); hw_feat->mii_sel = ((mac_hfr0 >> EQOS_MAC_HFR0_MIISEL_SHIFT) & EQOS_MAC_HFR0_MIISEL_MASK); @@ -6496,8 +3861,8 @@ static nve32_t eqos_get_hw_features(struct osi_core_priv_data *const osi_core, * @retval 0 on success * @retval -1 on failure. */ -static int eqos_padctl_rx_pins(struct osi_core_priv_data *const osi_core, - unsigned int enable) +static nve32_t eqos_padctl_rx_pins(struct osi_core_priv_data *const osi_core, + nveu32_t enable) { nveu32_t value; void *pad_addr = osi_core->padctrl.padctrl_base; @@ -6532,7 +3897,7 @@ static int eqos_padctl_rx_pins(struct osi_core_priv_data *const osi_core, osi_writela(osi_core, value, (nveu8_t *)pad_addr + osi_core->padctrl.offset_rd3); } else { - value = osi_readla(osi_core, (unsigned char *)pad_addr + + value = osi_readla(osi_core, (nveu8_t *)pad_addr + osi_core->padctrl.offset_rx_ctl); value &= ~EQOS_PADCTL_EQOS_E_INPUT; osi_writela(osi_core, value, (nveu8_t *)pad_addr + @@ -6574,7 +3939,7 @@ static int eqos_padctl_rx_pins(struct osi_core_priv_data *const osi_core, * @retval 0 on success * @retval -1 on failure. */ -static inline int poll_for_mac_tx_rx_idle(struct osi_core_priv_data *osi_core) +static inline nve32_t poll_for_mac_tx_rx_idle(struct osi_core_priv_data *osi_core) { nveu32_t retry = 0; nveu32_t mac_debug; @@ -6621,7 +3986,7 @@ static inline int poll_for_mac_tx_rx_idle(struct osi_core_priv_data *osi_core) * @retval negative value on failure. */ -static int eqos_pre_pad_calibrate(struct osi_core_priv_data *const osi_core) +static nve32_t eqos_pre_pad_calibrate(struct osi_core_priv_data *const osi_core) { nve32_t ret = 0; nveu32_t value; @@ -6630,9 +3995,8 @@ static int eqos_pre_pad_calibrate(struct osi_core_priv_data *const osi_core) /* Read MAC IMR Register */ value = osi_readla(osi_core, (nveu8_t *)osi_core->base + EQOS_MAC_IMR); value &= ~(EQOS_IMR_RGSMIIIE); - eqos_core_safety_writel(osi_core, value, (nveu8_t *)osi_core->base + - EQOS_MAC_IMR, EQOS_MAC_IMR_IDX); - eqos_stop_mac(osi_core); + osi_writela(osi_core, value, (nveu8_t *)osi_core->base + EQOS_MAC_IMR); + hw_stop_mac(osi_core); ret = poll_for_mii_idle(osi_core); if (ret < 0) { goto error; @@ -6656,7 +4020,7 @@ static int eqos_pre_pad_calibrate(struct osi_core_priv_data *const osi_core) return ret; error: /* roll back on fail */ - eqos_start_mac(osi_core); + hw_start_mac(osi_core); if (osi_core->osd_ops.padctrl_mii_rx_pins != OSI_NULL) { (void)osi_core->osd_ops.padctrl_mii_rx_pins(osi_core->osd, OSI_ENABLE); @@ -6666,10 +4030,9 @@ error: /* Enable MAC RGSMIIIE - RGMII/SMII interrupts */ /* Read MAC IMR Register */ - value = osi_readl((unsigned char *)osi_core->base + EQOS_MAC_IMR); + value = osi_readl((nveu8_t *)osi_core->base + EQOS_MAC_IMR); value |= EQOS_IMR_RGSMIIIE; - eqos_core_safety_writel(osi_core, value, (nveu8_t *)osi_core->base + - EQOS_MAC_IMR, EQOS_MAC_IMR_IDX); + osi_writela(osi_core, value, (nveu8_t *)osi_core->base + EQOS_MAC_IMR); return ret; } @@ -6721,15 +4084,15 @@ static nve32_t eqos_post_pad_calibrate( /* do nothing */ } } - eqos_start_mac(osi_core); + hw_start_mac(osi_core); /* Enable MAC RGSMIIIE - RGMII/SMII interrupts */ mac_imr |= EQOS_IMR_RGSMIIIE; - eqos_core_safety_writel(osi_core, mac_imr, (nveu8_t *)osi_core->base + - EQOS_MAC_IMR, EQOS_MAC_IMR_IDX); + osi_writela(osi_core, mac_imr, (nveu8_t *)osi_core->base + EQOS_MAC_IMR); return ret; } #endif /* UPDATED_PAD_CAL */ +#ifndef OSI_STRIPPED_LIB /** * @brief eqos_config_rss - Configure RSS * @@ -6739,15 +4102,17 @@ static nve32_t eqos_post_pad_calibrate( * * @retval -1 Always */ -static nve32_t eqos_config_rss(struct osi_core_priv_data *const osi_core) +static nve32_t eqos_config_rss(struct osi_core_priv_data *osi_core) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL, + (void) osi_core; + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "RSS not supported by EQOS\n", 0ULL); return -1; } +#endif /* !OSI_STRIPPED_LIB */ -#ifdef MACSEC_SUPPORT +#if defined(MACSEC_SUPPORT) && !defined(OSI_STRIPPED_LIB) /** * @brief eqos_config_for_macsec - Configure MAC according to macsec IAS * @@ -6777,9 +4142,9 @@ static void eqos_config_for_macsec(struct osi_core_priv_data *const osi_core, nveu32_t value = 0U, temp = 0U; if ((enable != OSI_ENABLE) && (enable != OSI_DISABLE)) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Failed to config EQOS per MACSEC\n", 0ULL); - return; + goto done; } if (osi_core->mac_ver == OSI_EQOS_MAC_5_30) { /* stop MAC Tx */ @@ -6847,90 +4212,54 @@ static void eqos_config_for_macsec(struct osi_core_priv_data *const osi_core, OSI_LOG_ARG_HW_FAIL, "Error: osi_core->hw_feature is NULL\n", 0ULL); } +done: + return; } #endif /* MACSEC_SUPPORT */ -/** - * @brief eqos_get_core_safety_config - EQOS MAC safety configuration - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: No - * - De-initialization: No - */ -void *eqos_get_core_safety_config(void) -{ - return &eqos_core_safety_config; -} - void eqos_init_core_ops(struct core_ops *ops) { - ops->poll_for_swr = eqos_poll_for_swr; ops->core_init = eqos_core_init; - ops->core_deinit = eqos_core_deinit; - ops->start_mac = eqos_start_mac; - ops->stop_mac = eqos_stop_mac; ops->handle_common_intr = eqos_handle_common_intr; - ops->set_mode = eqos_set_mode; - ops->set_speed = eqos_set_speed; ops->pad_calibrate = eqos_pad_calibrate; - ops->config_fw_err_pkts = eqos_config_fw_err_pkts; - ops->config_rxcsum_offload = eqos_config_rxcsum_offload; - ops->config_mac_pkt_filter_reg = eqos_config_mac_pkt_filter_reg; ops->update_mac_addr_low_high_reg = eqos_update_mac_addr_low_high_reg; - ops->config_l3_l4_filter_enable = eqos_config_l3_l4_filter_enable; - ops->config_l3_filters = eqos_config_l3_filters; - ops->update_ip4_addr = eqos_update_ip4_addr; - ops->update_ip6_addr = eqos_update_ip6_addr; - ops->config_l4_filters = eqos_config_l4_filters; - ops->update_l4_port_no = eqos_update_l4_port_no; - ops->set_systime_to_mac = eqos_set_systime_to_mac; - ops->config_addend = eqos_config_addend; ops->adjust_mactime = eqos_adjust_mactime; - ops->config_tscr = eqos_config_tscr; - ops->config_ssir = eqos_config_ssir; ops->read_mmc = eqos_read_mmc; ops->write_phy_reg = eqos_write_phy_reg; ops->read_phy_reg = eqos_read_phy_reg; + ops->get_hw_features = eqos_get_hw_features; ops->read_reg = eqos_read_reg; ops->write_reg = eqos_write_reg; + ops->set_avb_algorithm = eqos_set_avb_algorithm; + ops->get_avb_algorithm = eqos_get_avb_algorithm; + ops->config_frp = eqos_config_frp; + ops->update_frp_entry = eqos_update_frp_entry; + ops->update_frp_nve = eqos_update_frp_nve; #ifdef MACSEC_SUPPORT ops->read_macsec_reg = eqos_read_macsec_reg; ops->write_macsec_reg = eqos_write_macsec_reg; +#ifndef OSI_STRIPPED_LIB + ops->macsec_config_mac = eqos_config_for_macsec; +#endif /* !OSI_STRIPPED_LIB */ #endif /* MACSEC_SUPPORT */ - ops->get_hw_features = eqos_get_hw_features; + ops->config_l3l4_filters = eqos_config_l3l4_filters; #ifndef OSI_STRIPPED_LIB ops->config_tx_status = eqos_config_tx_status; ops->config_rx_crc_check = eqos_config_rx_crc_check; ops->config_flow_control = eqos_config_flow_control; ops->config_arp_offload = eqos_config_arp_offload; ops->config_ptp_offload = eqos_config_ptp_offload; - ops->validate_regs = eqos_validate_core_regs; - ops->flush_mtl_tx_queue = eqos_flush_mtl_tx_queue; - ops->set_avb_algorithm = eqos_set_avb_algorithm; - ops->get_avb_algorithm = eqos_get_avb_algorithm; ops->config_vlan_filtering = eqos_config_vlan_filtering; ops->reset_mmc = eqos_reset_mmc; ops->configure_eee = eqos_configure_eee; - ops->save_registers = eqos_save_registers; - ops->restore_registers = eqos_restore_registers; ops->set_mdc_clk_rate = eqos_set_mdc_clk_rate; ops->config_mac_loopback = eqos_config_mac_loopback; -#endif /* !OSI_STRIPPED_LIB */ - ops->hw_config_est = eqos_hw_config_est; - ops->hw_config_fpe = eqos_hw_config_fpe; - ops->config_ptp_rxq = eqos_config_ptp_rxq; - ops->config_frp = eqos_config_frp; - ops->update_frp_entry = eqos_update_frp_entry; - ops->update_frp_nve = eqos_update_frp_nve; ops->config_rss = eqos_config_rss; -#ifdef MACSEC_SUPPORT - ops->macsec_config_mac = eqos_config_for_macsec; -#endif /* MACSEC_SUPPORT */ - ops->ptp_tsc_capture = eqos_ptp_tsc_capture; + ops->config_ptp_rxq = eqos_config_ptp_rxq; +#endif /* !OSI_STRIPPED_LIB */ #ifdef HSI_SUPPORT ops->core_hsi_configure = eqos_hsi_configure; + ops->core_hsi_inject_err = eqos_hsi_inject_err; #endif } diff --git a/osi/core/eqos_core.h b/osi/core/eqos_core.h index c3b503a..68000e8 100644 --- a/osi/core/eqos_core.h +++ b/osi/core/eqos_core.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -24,6 +24,95 @@ #define INCLUDED_EQOS_CORE_H #ifndef OSI_STRIPPED_LIB +#define EQOS_MAC_PFR 0x0008 +#define EQOS_MAC_LPI_CSR 0x00D0 +#define EQOS_MAC_LPI_TIMER_CTRL 0x00D4 +#define EQOS_MAC_LPI_EN_TIMER 0x00D8 +#define EQOS_MAC_RX_FLW_CTRL 0x0090 +#define EQOS_MAC_STNSR 0x0B0C +#define EQOS_MAC_STSR 0x0B08 +#define EQOS_MAC_MA0LR 0x0304 +#define EQOS_MAC_PIDR0 0x0BC4 +#define EQOS_MAC_PTO_CR 0x0BC0 +#define EQOS_MAC_PIDR1 0x0BC8 +#define EQOS_MAC_PIDR2 0x0BCC +#define EQOS_MAC_PMTCSR 0x00C0 +#define EQOS_MAC_QX_TX_FLW_CTRL(x) ((0x0004U * (x)) + 0x0070U) +#define EQOS_MAC_MA0HR 0x0300 +#define EQOS_4_10_MAC_ARPPA 0x0AE0 +#define EQOS_5_00_MAC_ARPPA 0x0210 +#define EQOS_CLOCK_CTRL_0 0x8000U +#define EQOS_APB_ERR_STATUS 0x8214U + +#define EQOS_MAC_PFR_VTFE OSI_BIT(16) +#define EQOS_MAC_PFR_IPFE OSI_BIT(20) +#define EQOS_MAC_PFR_IPFE_SHIFT 20U +#define EQOS_MAC_MA0HR_IDX 11U +#define EQOS_5_30_SID 0x3U +#define EQOS_5_30_SID_CH3 ((EQOS_5_30_SID) << EQOS_ASID_CTRL_SHIFT_24) +#define EQOS_5_30_SID_CH2 ((EQOS_5_30_SID) << EQOS_ASID_CTRL_SHIFT_16) +#define EQOS_5_30_SID_CH1 ((EQOS_5_30_SID) << EQOS_ASID_CTRL_SHIFT_8) +#define EQOS_5_30_SID_CH7 ((EQOS_5_30_SID) << EQOS_ASID_CTRL_SHIFT_24) +#define EQOS_5_30_SID_CH6 ((EQOS_5_30_SID) << EQOS_ASID_CTRL_SHIFT_16) +#define EQOS_5_30_SID_CH5 ((EQOS_5_30_SID) << EQOS_ASID_CTRL_SHIFT_8) +#define EQOS_5_30_ASID_CTRL_VAL ((EQOS_5_30_SID_CH3) |\ + (EQOS_5_30_SID_CH2) |\ + (EQOS_5_30_SID_CH1) |\ + (EQOS_5_30_SID)) +#define EQOS_5_30_ASID1_CTRL_VAL ((EQOS_5_30_SID_CH7) |\ + (EQOS_5_30_SID_CH6) |\ + (EQOS_5_30_SID_CH5) |\ + (EQOS_5_30_SID)) +#define EQOS_MAC_MA0HR_MASK 0xFFFFFU +#define EQOS_MAC_IMR_MASK 0x67039U +#define EQOS_MAC_HTR_MASK 0xFFFFFFFFU +#define EQOS_MAC_HTR0_IDX 2U +#define EQOS_MAC_HTR_REG(x) ((0x0004U * (x)) + 0x0010U) +#define EQOS_DMA_SBUS_MASK 0xDF1F3CFFU +#define EQOS_DMA_CHX_STATUS_FBE OSI_BIT(10) +#define EQOS_DMA_CHX_STATUS_TBU OSI_BIT(2) +#define EQOS_DMA_CHX_STATUS_RBU OSI_BIT(7) +#define EQOS_DMA_CHX_STATUS_RPS OSI_BIT(8) +#define EQOS_DMA_CHX_STATUS_RWT OSI_BIT(9) +#define EQOS_DMA_CHX_STATUS_TPS OSI_BIT(1) +#define EQOS_MAC_RQC0R_MASK 0xFFU +#define EQOS_MAC_QX_TX_FLW_CTRL_TFE OSI_BIT(1) +#define EQOS_MAC_QX_TXFC_MASK 0xFFFF00F2U +#define EQOS_MAC_Q0_TXFC_IDX 6U +#define EQOS_MAC_PTO_CR_ASYNCEN OSI_BIT(1) +#define EQOS_MAC_RQC1R_OMCBCQ OSI_BIT(28) +#define EQOS_MAC_PIDR_PID_MASK 0XFFFFU +#define EQOS_MAC_PFR_MASK 0x803107FFU +#define EQOS_MAC_PAUSE_TIME 0xFFFF0000U +#define EQOS_MAC_PAUSE_TIME_MASK 0xFFFF0000U +#define EQOS_MAC_MCR_MASK 0xFFFFFF7FU +#define EQOS_MAC_MA0LR_IDX 12U +#define EQOS_MAC_MA0LR_MASK 0xFFFFFFFFU +#define EQOS_MAC_PTO_CR_DN (OSI_BIT(15) | OSI_BIT(14) | \ + OSI_BIT(13) | OSI_BIT(12) | \ + OSI_BIT(11) | OSI_BIT(10) | \ + OSI_BIT(9) | OSI_BIT(8)) +#define EQOS_MAC_PTO_CR_DN_SHIFT 8U +#define EQOS_MAC_PTO_CR_APDREQEN OSI_BIT(2) +#define EQOS_MAC_PTO_CR_PTOEN OSI_BIT(0) + +#define EQOS_MCR_IPG_MASK 0x7000000U +#define EQOS_MCR_IPG_SHIFT 24U +#define EQOS_MCR_IPG 0x7U +#define EQOS_MAC_TCR_TSENMACADDR OSI_BIT(18) +#define EQOS_MAC_TCR_SNAPTYPSEL_SHIFT 16U +#define EQOS_MAC_TAR_IDX 15U +#define EQOS_MAC_SSIR_IDX 14U +#define EQOS_MAC_RX_FLW_CTRL_RFE OSI_BIT(0) +#define EQOS_MAC_TCR_MASK 0x1107FF03U +#define EQOS_MAC_TAR_MASK 0xFFFFFFFFU +#define EQOS_MAC_SSIR_MASK 0xFFFF00U +#define EQOS_MAC_RQC2R_MASK 0xFFFFFFFFU +#define EQOS_MAC_RQC1R_TPQC (OSI_BIT(22) | OSI_BIT(23)) +#define EQOS_MAC_RQC1R_TPQC0 OSI_BIT(22) +#define EQOS_MAC_RQC1R_PTPQ (OSI_BIT(6) | OSI_BIT(5) | \ + OSI_BIT(4)) +#define EQOS_MAC_RQC1R_PTPQ_SHIFT 4U /** * @addtogroup EQOS-MDC MDC Clock Selection defines * @@ -39,34 +128,116 @@ #define EQOS_CSR_300_500M 0x6 /* MDC = clk_csr/204 */ #define EQOS_CSR_500_800M 0x7 /* MDC = clk_csr/324 */ /** @} */ +#define EQOS_MAC_LPI_CSR_LPITE OSI_BIT(20) +#define EQOS_MAC_LPI_CSR_LPITXA OSI_BIT(19) +#define EQOS_MAC_LPI_CSR_PLS OSI_BIT(17) +#define EQOS_MAC_LPI_CSR_LPIEN OSI_BIT(16) #endif /* !OSI_STRIPPED_LIB */ +#define EQOS_MTL_EST_CONTROL 0x0C50 +#define EQOS_MTL_EST_OVERHEAD 0x0C54 +#define EQOS_MTL_EST_STATUS 0x0C58 +#define EQOS_MTL_EST_SCH_ERR 0x0C60 +#define EQOS_MTL_EST_FRMS_ERR 0x0C64 +#define EQOS_MTL_EST_ITRE 0x0C70 +#define EQOS_MTL_EST_GCL_CONTROL 0x0C80 +#define EQOS_MTL_EST_DATA 0x0C84 +#define EQOS_MTL_FPE_CTS 0x0C90 +#define EQOS_MTL_FPE_ADV 0x0C94 +#define EQOS_MTL_RXP_CS 0x0CA0 +#define EQOS_MTL_RXP_INTR_CS 0x0CA4 +#define EQOS_MTL_RXP_IND_CS 0x0CB0 +#define EQOS_MTL_RXP_IND_DATA 0x0CB4 +#define EQOS_MTL_TXQ_ETS_CR(x) ((0x0040U * (x)) + 0x0D10U) +#define EQOS_MTL_TXQ_ETS_SSCR(x) ((0x0040U * (x)) + 0x0D1CU) +#define EQOS_MTL_TXQ_ETS_HCR(x) ((0x0040U * (x)) + 0x0D20U) +#define EQOS_MTL_TXQ_ETS_LCR(x) ((0x0040U * (x)) + 0x0D24U) +#define EQOS_MTL_INTR_STATUS 0x0C20 +#define EQOS_MTL_OP_MODE 0x0C00 +#define EQOS_MAC_FPE_CTS 0x0234 +#define EQOS_IMR_FPEIE OSI_BIT(17) +#define EQOS_MTL_FRP_IE2_DCH_SHIFT 24U +#define EQOS_DMA_ISR_MTLIS OSI_BIT(16) /** - * @addtogroup EQOS-SIZE SIZE calculation helper Macros + * @addtogroup EQOS-MTL-FRP FRP Indirect Access register defines * - * @brief SIZE calculation defines + * @brief EQOS MTL FRP register defines * @{ */ -#define FIFO_SIZE_B(x) (x) -#define FIFO_SIZE_KB(x) ((x) * 1024U) +#define EQOS_MTL_FRP_READ_UDELAY 1U +#define EQOS_MTL_FRP_READ_RETRY 10000U + +/* FRP Control and Status register defines */ +#define EQOS_MTL_RXP_CS_RXPI OSI_BIT(31) +#define EQOS_MTL_RXP_CS_NPE (OSI_BIT(23) | OSI_BIT(22) | \ + OSI_BIT(21) | OSI_BIT(20) | \ + OSI_BIT(19) | OSI_BIT(18) | \ + OSI_BIT(17) | OSI_BIT(16)) +#define EQOS_MTL_RXP_CS_NPE_SHIFT 16U +#define EQOS_MTL_RXP_CS_NVE (OSI_BIT(7) | OSI_BIT(6) | \ + OSI_BIT(5) | OSI_BIT(4) | \ + OSI_BIT(3) | OSI_BIT(2) | \ + OSI_BIT(1) | OSI_BIT(0)) +/* Indirect register defines */ +#define EQOS_MTL_RXP_IND_CS_BUSY OSI_BIT(31) +#define EQOS_MTL_RXP_IND_CS_WRRDN OSI_BIT(16) +#define EQOS_MTL_RXP_IND_CS_ADDR (OSI_BIT(9) | OSI_BIT(8) | \ + OSI_BIT(7) | OSI_BIT(6) | \ + OSI_BIT(5) | OSI_BIT(4) | \ + OSI_BIT(3) | OSI_BIT(2) | \ + OSI_BIT(1) | OSI_BIT(0)) /** @} */ -/** - * @addtogroup EQOS-QUEUE QUEUE fifo size programmable values - * - * @brief Queue FIFO size programmable values - * @{ - */ -#define EQOS_256 0x00U -#define EQOS_512 0x01U -#define EQOS_1K 0x03U -#define EQOS_2K 0x07U -#define EQOS_4K 0x0FU -#define EQOS_8K 0x1FU -#define EQOS_9K 0x23U -#define EQOS_16K 0x3FU -#define EQOS_32K 0x7FU -#define EQOS_36K 0x8FU -/** @} */ +/* FRP Interrupt Control and Status register */ +#define EQOS_MTL_RXP_INTR_CS_PDRFIE OSI_BIT(19) +#define EQOS_MTL_RXP_INTR_CS_FOOVIE OSI_BIT(18) +#define EQOS_MTL_RXP_INTR_CS_NPEOVIE OSI_BIT(17) +#define EQOS_MTL_RXP_INTR_CS_NVEOVIE OSI_BIT(16) +#define EQOS_MTL_RXP_INTR_CS_PDRFIS OSI_BIT(3) +#define EQOS_MTL_RXP_INTR_CS_FOOVIS OSI_BIT(2) +#define EQOS_MTL_RXP_INTR_CS_NPEOVIS OSI_BIT(1) +#define EQOS_MTL_RXP_INTR_CS_NVEOVIS OSI_BIT(0) + +#ifndef OSI_STRIPPED_LIB +#define EQOS_RXQ_DMA_MAP0_MASK 0x13131313U +#define EQOS_MTL_TXQ_QW_MASK 0x1FFFFFU +#define EQOS_PAD_AUTO_CAL_CFG_MASK 0x7FFFFFFFU +#define EQOS_MTL_TXQ_OP_MODE_MASK 0xFF007EU +#define EQOS_MTL_RXQ_OP_MODE_MASK 0xFFFFFFBU +#define EQOS_MAC_RQC1R_MASK 0xF77077U +#endif /* !OSI_STRIPPED_LIB */ +#define EQOS_MTL_TXQ_ETS_SSCR_SSC_MASK 0x00003FFFU +#define EQOS_MTL_TXQ_ETS_QW_ISCQW_MASK 0x000FFFFFU +#define EQOS_MTL_TXQ_ETS_HCR_HC_MASK 0x1FFFFFFFU +#define EQOS_MTL_TXQ_ETS_LCR_LC_MASK 0x1FFFFFFFU +#define EQOS_MTL_TXQ_ETS_CR_CC OSI_BIT(3) +#define EQOS_MTL_TXQ_ETS_CR_AVALG OSI_BIT(2) +#define EQOS_MTL_TXQ_ETS_CR_CC_SHIFT 3U +#define EQOS_MTL_TXQ_ETS_CR_AVALG_SHIFT 2U +#define EQOS_MAC_RQC1R_FPRQ (OSI_BIT(26) | OSI_BIT(25) | \ + OSI_BIT(24)) +#define EQOS_MAC_RQC1R_FPRQ_SHIFT 24U +/* Indirect Instruction Table defines */ +#define EQOS_MTL_FRP_IE0(x) (((x) * 0x4U) + 0x0U) +#define EQOS_MTL_FRP_IE1(x) (((x) * 0x4U) + 0x1U) +#define EQOS_MTL_FRP_IE2(x) (((x) * 0x4U) + 0x2U) +#define EQOS_MTL_FRP_IE3(x) (((x) * 0x4U) + 0x3U) +#define EQOS_MTL_FRP_IE2_DCH (OSI_BIT(31) | OSI_BIT(30) | \ + OSI_BIT(29) | OSI_BIT(28) | \ + OSI_BIT(27) | OSI_BIT(26) | \ + OSI_BIT(25) | OSI_BIT(24)) +#define EQOS_MTL_FRP_IE2_OKI (OSI_BIT(23) | OSI_BIT(22) | \ + OSI_BIT(21) | OSI_BIT(20) | \ + OSI_BIT(19) | OSI_BIT(18) | \ + OSI_BIT(17) | OSI_BIT(16)) +#define EQOS_MTL_FRP_IE2_OKI_SHIFT 16U +#define EQOS_MTL_FRP_IE2_FO (OSI_BIT(13) | OSI_BIT(12) | \ + OSI_BIT(11) | OSI_BIT(10) | \ + OSI_BIT(9) | OSI_BIT(8)) +#define EQOS_MTL_FRP_IE2_FO_SHIFT 8U +#define EQOS_MTL_FRP_IE2_NC OSI_BIT(3) +#define EQOS_MTL_FRP_IE2_IM OSI_BIT(2) +#define EQOS_MTL_FRP_IE2_RF OSI_BIT(1) +#define EQOS_MTL_FRP_IE2_AF OSI_BIT(0) /** * @addtogroup EQOS-HW Hardware Register offsets @@ -76,63 +247,52 @@ */ #define EQOS_MAC_MCR 0x0000 #define EQOS_MAC_EXTR 0x0004 -#define EQOS_MAC_PFR 0x0008 -#define EQOS_MAC_WATCH 0x000C -#define EQOS_MAC_HTR_REG(x) ((0x0004U * (x)) + 0x0010U) #define EQOS_MAC_VLAN_TAG 0x0050 #define EQOS_MAC_VLANTIR 0x0060 -#define EQOS_MAC_QX_TX_FLW_CTRL(x) ((0x0004U * (x)) + 0x0070U) -#define EQOS_MAC_RX_FLW_CTRL 0x0090 #define EQOS_MAC_RQC0R 0x00A0 #define EQOS_MAC_RQC1R 0x00A4 #define EQOS_MAC_RQC2R 0x00A8 #define EQOS_MAC_ISR 0x00B0 #define EQOS_MAC_IMR 0x00B4 -#define EQOS_MAC_PMTCSR 0x00C0 -#define EQOS_MAC_LPI_CSR 0x00D0 -#define EQOS_MAC_LPI_TIMER_CTRL 0x00D4 -#define EQOS_MAC_LPI_EN_TIMER 0x00D8 #ifndef OSI_STRIPPED_LIB #define EQOS_MAC_1US_TIC_CNTR 0x00DC -#endif /* !OSI_STRIPPED_LIB */ #define EQOS_MAC_ANS 0x00E4 +#endif /* !OSI_STRIPPED_LIB */ #define EQOS_MAC_PCS 0x00F8 + +#ifdef UPDATED_PAD_CAL #define EQOS_MAC_DEBUG 0x0114 +#define EQOS_MAC_DEBUG_RPESTS OSI_BIT(0) +#define EQOS_MAC_DEBUG_TPESTS OSI_BIT(16) +#endif + #define EQOS_MAC_MDIO_ADDRESS 0x0200 #define EQOS_MAC_MDIO_DATA 0x0204 -#define EQOS_5_00_MAC_ARPPA 0x0210 -#define EQOS_MAC_CSR_SW_CTL 0x0230 -#define EQOS_MAC_FPE_CTS 0x0234 -#define EQOS_MAC_MA0HR 0x0300 #define EQOS_MAC_ADDRH(x) ((0x0008U * (x)) + 0x0300U) -#define EQOS_MAC_MA0LR 0x0304 #define EQOS_MAC_ADDRL(x) ((0x0008U * (x)) + 0x0304U) #define EQOS_MMC_CNTRL 0x0700 #define EQOS_MMC_TX_INTR_MASK 0x0710 #define EQOS_MMC_RX_INTR_MASK 0x070C #define EQOS_MMC_IPC_RX_INTR_MASK 0x0800 #define EQOS_MAC_L3L4_CTR(x) ((0x0030U * (x)) + 0x0900U) -#define EQOS_MAC_L4_ADR(x) ((0x0030U * (x)) + 0x0904U) -#define EQOS_MAC_L3_AD0R(x) ((0x0030U * (x)) + 0x0910U) #define EQOS_MAC_L3_AD1R(x) ((0x0030U * (x)) + 0x0914U) +#ifndef OSI_STRIPPED_LIB +#define EQOS_MAC_L3_AD0R(x) ((0x0030U * (x)) + 0x0910U) #define EQOS_MAC_L3_AD2R(x) ((0x0030U * (x)) + 0x0918U) #define EQOS_MAC_L3_AD3R(x) ((0x0030U * (x)) + 0x091CU) -#define EQOS_4_10_MAC_ARPPA 0x0AE0 +#define EQOS_MAC_L4_ADR(x) ((0x0030U * (x)) + 0x0904U) +#endif /* !OSI_STRIPPED_LIB */ #define EQOS_MAC_TCR 0x0B00 #define EQOS_MAC_SSIR 0x0B04 -#define EQOS_MAC_STSR 0x0B08 -#define EQOS_MAC_STNSR 0x0B0C #define EQOS_MAC_STSUR 0x0B10 #define EQOS_MAC_STNSUR 0x0B14 #define EQOS_MAC_TAR 0x0B18 -#define EQOS_MAC_PTO_CR 0x0BC0 -#define EQOS_MAC_PIDR0 0x0BC4 -#define EQOS_MAC_PIDR1 0x0BC8 -#define EQOS_MAC_PIDR2 0x0BCC #define EQOS_MAC_PPS_CTL 0x0B70 #define EQOS_DMA_BMR 0x1000 #define EQOS_DMA_SBUS 0x1004 #define EQOS_DMA_ISR 0x1008 +#define EQOS_PTP_CLK_SPEED 208333334U +#define EQOS_X_PTP_CLK_SPEED 312500000U /** @} */ /** @@ -141,36 +301,11 @@ * @brief EQOS MTL HW Register offsets * @{ */ -#define EQOS_MTL_OP_MODE 0x0C00 -#define EQOS_MTL_INTR_STATUS 0x0C20 #define EQOS_MTL_RXQ_DMA_MAP0 0x0C30 #define EQOS_MTL_RXQ_DMA_MAP1 0x0C34 -#define EQOS_MTL_EST_CONTROL 0x0C50 -#define EQOS_MTL_EST_OVERHEAD 0x0C54 -#define EQOS_MTL_EST_STATUS 0x0C58 -#define EQOS_MTL_EST_SCH_ERR 0x0C60 -#define EQOS_MTL_EST_FRMS_ERR 0x0C64 -#define EQOS_MTL_EST_FRMC_ERR 0x0C68 -#define EQOS_MTL_EST_ITRE 0x0C70 -#define EQOS_MTL_EST_GCL_CONTROL 0x0C80 -#define EQOS_MTL_EST_DATA 0x0C84 -#define EQOS_MTL_FPE_CTS 0x0C90 -#define EQOS_MTL_FPE_ADV 0x0C94 -#define EQOS_MTL_RXP_CS 0x0CA0 -#define EQOS_MTL_RXP_INTR_CS 0x0CA4 -#define EQOS_MTL_RXP_DROP_CNT 0x0CA8 -#define EQOS_MTL_RXP_ERROR_CNT 0x0CAC -#define EQOS_MTL_RXP_IND_CS 0x0CB0 -#define EQOS_MTL_RXP_IND_DATA 0x0CB4 #define EQOS_MTL_CHX_TX_OP_MODE(x) ((0x0040U * (x)) + 0x0D00U) -#define EQOS_MTL_TXQ_DEBUG(x) ((0x0040U * (x)) + 0x0D08U) -#define EQOS_MTL_TXQ_ETS_CR(x) ((0x0040U * (x)) + 0x0D10U) #define EQOS_MTL_TXQ_QW(x) ((0x0040U * (x)) + 0x0D18U) -#define EQOS_MTL_TXQ_ETS_SSCR(x) ((0x0040U * (x)) + 0x0D1CU) -#define EQOS_MTL_TXQ_ETS_HCR(x) ((0x0040U * (x)) + 0x0D20U) -#define EQOS_MTL_TXQ_ETS_LCR(x) ((0x0040U * (x)) + 0x0D24U) #define EQOS_MTL_CHX_RX_OP_MODE(x) ((0x0040U * (x)) + 0x0D30U) -#define EQOS_MTL_RXQ_DEBUG(x) ((0x0040U * (x)) + 0x0D38U) /** @} */ /** @@ -179,8 +314,6 @@ * @brief EQOS Wrapper register offsets * @{ */ -#define EQOS_CLOCK_CTRL_0 0x8000U -#define EQOS_APB_ERR_STATUS 0x8214U #define EQOS_AXI_ASID_CTRL 0x8400U #define EQOS_AXI_ASID1_CTRL 0x8404U #define EQOS_PAD_CRTL 0x8800U @@ -189,16 +322,15 @@ #define EQOS_VIRT_INTR_APB_CHX_CNTRL(x) (0x8200U + ((x) * 4U)) #define VIRTUAL_APB_ERR_CTRL 0x8300 #define EQOS_WRAP_COMMON_INTR_ENABLE 0x8704 + +#ifdef HSI_SUPPORT #define EQOS_REGISTER_PARITY_ERR OSI_BIT(5) #define EQOS_CORE_CORRECTABLE_ERR OSI_BIT(4) #define EQOS_CORE_UNCORRECTABLE_ERR OSI_BIT(3) +#endif + #define EQOS_MAC_SBD_INTR OSI_BIT(2) #define EQOS_WRAP_COMMON_INTR_STATUS 0x8708 -#define EQOS_WRAP_SYNC_TSC_PTP_CAPTURE 0x800CU -#define EQOS_WRAP_TSC_CAPTURE_LOW 0x8010U -#define EQOS_WRAP_TSC_CAPTURE_HIGH 0x8014U -#define EQOS_WRAP_PTP_CAPTURE_LOW 0x8018U -#define EQOS_WRAP_PTP_CAPTURE_HIGH 0x801CU /** @} */ @@ -217,15 +349,13 @@ #define EQOS_PAD_AUTO_CAL_CFG_START OSI_BIT(31) #define EQOS_PAD_AUTO_CAL_STAT_ACTIVE OSI_BIT(31) #define EQOS_PAD_CRTL_E_INPUT_OR_E_PWRD OSI_BIT(31) -#define EQOS_MCR_IPG_MASK 0x7000000U -#define EQOS_MCR_IPG_SHIFT 24U -#define EQOS_MCR_IPG 0x7U +#define EQOS_PAD_CRTL_PD_OFFSET_MASK 0x1F00U +#define EQOS_PAD_CRTL_PU_OFFSET_MASK 0x1FU #define EQOS_MCR_IPC OSI_BIT(27) #define EQOS_MMC_CNTRL_CNTRST OSI_BIT(0) #define EQOS_MMC_CNTRL_RSTONRD OSI_BIT(2) #define EQOS_MMC_CNTRL_CNTPRST OSI_BIT(4) #define EQOS_MMC_CNTRL_CNTPRSTLVL OSI_BIT(5) -#define EQOS_MTL_QTOMR_FTQ OSI_BIT(0) #define EQOS_MTL_TSF OSI_BIT(1) #define EQOS_MTL_TXQEN OSI_BIT(3) #define EQOS_MTL_RSF OSI_BIT(5) @@ -242,12 +372,6 @@ #define EQOS_MCR_CST OSI_BIT(21) #define EQOS_MCR_GPSLCE OSI_BIT(23) #define EQOS_IMR_RGSMIIIE OSI_BIT(0) -#define EQOS_IMR_PCSLCHGIE OSI_BIT(1) -#define EQOS_IMR_PCSANCIE OSI_BIT(2) -#define EQOS_IMR_PMTIE OSI_BIT(4) -#define EQOS_IMR_LPIIE OSI_BIT(5) -#define EQOS_IMR_TXESIE OSI_BIT(13) -#define EQOS_IMR_FPEIE OSI_BIT(17) #define EQOS_MAC_PCS_LNKSTS OSI_BIT(19) #define EQOS_MAC_PCS_LNKMOD OSI_BIT(16) #define EQOS_MAC_PCS_LNKSPEED (OSI_BIT(17) | OSI_BIT(18)) @@ -260,15 +384,10 @@ #define EQOS_MAC_VLANTR_DOVLTC OSI_BIT(20) #define EQOS_MAC_VLANTR_ERIVLT OSI_BIT(27) #define EQOS_MAC_VLANTIRR_CSVL OSI_BIT(19) -#define EQOS_MAC_DEBUG_RPESTS OSI_BIT(0) -#define EQOS_MAC_DEBUG_TPESTS OSI_BIT(16) #define EQOS_DMA_SBUS_BLEN8 OSI_BIT(2) #define EQOS_DMA_SBUS_BLEN16 OSI_BIT(3) #define EQOS_DMA_SBUS_EAME OSI_BIT(11) -#define EQOS_DMA_BMR_SWR OSI_BIT(0) #define EQOS_DMA_BMR_DPSW OSI_BIT(8) -#define EQOS_MAC_RQC1R_TPQC (OSI_BIT(22) | OSI_BIT(23)) -#define EQOS_MAC_RQC1R_TPQC0 OSI_BIT(22) #define EQOS_MAC_RQC1R_MCBCQ (OSI_BIT(18) | OSI_BIT(17) |\ OSI_BIT(16)) #define EQOS_MAC_RQC1R_MCBCQ_SHIFT 16U @@ -276,162 +395,62 @@ #define EQOS_MAC_RQC1R_MCBCQ7 0x7U #define EQOS_MAC_RQC1R_MCBCQEN OSI_BIT(20) -#define EQOS_MAC_RQC1R_FPRQ (OSI_BIT(26) | OSI_BIT(25) | \ - OSI_BIT(24)) -#define EQOS_MAC_RQC1R_FPRQ_SHIFT 24U -#define EQOS_MAC_RQC1R_PTPQ (OSI_BIT(6) | OSI_BIT(5) | \ - OSI_BIT(4)) -#define EQOS_MAC_RQC1R_OMCBCQ OSI_BIT(28) -#define EQOS_MAC_RQC1R_PTPQ_SHIFT 4U -#define EQOS_MAC_PPS_CTL_PPSCTRL0 (OSI_BIT(3) | OSI_BIT(2) |\ - OSI_BIT(1) | OSI_BIT(0)) -#define EQOS_MTL_QTOMR_FTQ_LPOS OSI_BIT(0) -#define EQOS_DMA_ISR_MTLIS OSI_BIT(16) #define EQOS_DMA_ISR_MACIS OSI_BIT(17) + +#ifdef HSI_SUPPORT #define EQOS_DMA_ISR_TXSTSIS OSI_BIT(13) +#define EQOS_IMR_TXESIE OSI_BIT(13) +#endif + #define EQOS_MAC_ISR_RGSMIIS OSI_BIT(0) #define EQOS_MAC_IMR_FPEIS OSI_BIT(17) #define EQOS_MTL_TXQ_QW_ISCQW OSI_BIT(4) +#define EQOS_RXQ_EN_MASK (OSI_BIT(0) | OSI_BIT(1)) #define EQOS_DMA_SBUS_RD_OSR_LMT 0x001F0000U #define EQOS_DMA_SBUS_WR_OSR_LMT 0x1F000000U #define EQOS_MTL_TXQ_SIZE_SHIFT 16U #define EQOS_MTL_RXQ_SIZE_SHIFT 20U #ifndef OSI_STRIPPED_LIB #define EQOS_MAC_ENABLE_LM OSI_BIT(12) -#define EQOS_MAC_VLANTIRR_VLTI OSI_BIT(20) -#define EQOS_DMA_SBUS_BLEN4 OSI_BIT(1) -#define EQOS_IMR_LPIIE OSI_BIT(5) -#define EQOS_IMR_PCSLCHGIE OSI_BIT(1) -#define EQOS_IMR_PCSANCIE OSI_BIT(2) -#define EQOS_IMR_PMTIE OSI_BIT(4) -#define EQOS_MAC_ISR_LPIIS OSI_BIT(5) -#define EQOS_MAC_LPI_CSR_LPITE OSI_BIT(20) -#define EQOS_MAC_LPI_CSR_LPITXA OSI_BIT(19) -#define EQOS_MAC_LPI_CSR_PLS OSI_BIT(17) -#define EQOS_MAC_LPI_CSR_LPIEN OSI_BIT(16) #define EQOS_MCR_ARPEN OSI_BIT(31) #define EQOS_RX_CLK_SEL OSI_BIT(8) +#define EQOS_MTL_OP_MODE_DTXSTS OSI_BIT(1) +#define EQOS_MAC_VLAN_TR 0x0050U +#define EQOS_MAC_VLAN_TR_VTIM OSI_BIT(17) +#define EQOS_MAC_VLAN_TR_VTIM_SHIFT 17 +#define EQOS_MAC_VLAN_TR_VTHM OSI_BIT(25) +#define EQOS_MAC_STNSR_TSSS_MASK 0x7FFFFFFFU +#define EQOS_MAC_PFR_SHIFT 16 +#define EQOS_MTL_OP_MODE_DTXSTS OSI_BIT(1) +#define EQOS_MAC_EXTR_DCRCC OSI_BIT(16) #define EQOS_MTL_TXQ_ETS_SSCR_SSC_MASK 0x00003FFFU #define EQOS_MTL_TXQ_ETS_QW_ISCQW_MASK 0x000FFFFFU #define EQOS_MTL_TXQ_ETS_HCR_HC_MASK 0x1FFFFFFFU #define EQOS_MTL_TXQ_ETS_LCR_LC_MASK 0x1FFFFFFFU -#define EQOS_MTL_TXQ_ETS_CR_SLC_MASK (OSI_BIT(6) | OSI_BIT(5) | \ - OSI_BIT(4)) -#define EQOS_MTL_TXQ_ETS_CR_CC OSI_BIT(3) #define EQOS_MTL_TXQ_ETS_CR_AVALG OSI_BIT(2) -#define EQOS_MTL_TXQ_ETS_CR_CC_SHIFT 3U #define EQOS_MTL_TXQ_ETS_CR_AVALG_SHIFT 2U -#define EQOS_MTL_TXQEN_MASK (OSI_BIT(3) | OSI_BIT(2)) -#define EQOS_MTL_TXQEN_MASK_SHIFT 2U -#define EQOS_MTL_OP_MODE_DTXSTS OSI_BIT(1) -#define EQOS_MAC_VLAN_TR 0x0050U -#define EQOS_MAC_VLAN_TFR 0x0054U -#define EQOS_MAC_VLAN_HTR 0x0058U -#define EQOS_MAC_VLAN_TR_ETV OSI_BIT(16) -#define EQOS_MAC_VLAN_TR_VTIM OSI_BIT(17) -#define EQOS_MAC_VLAN_TR_VTIM_SHIFT 17 -#define EQOS_MAC_VLAN_TR_VTHM OSI_BIT(25) -#define EQOS_MAC_VLAN_TR_VL 0xFFFFU -#define EQOS_MAC_VLAN_HTR_VLHT 0xFFFFU -#define EQOS_MAC_STNSR_TSSS_MASK 0x7FFFFFFFU -#define EQOS_MAC_VLAN_TR_ETV_SHIFT 16U -#define EQOS_MAC_PFR_HUC OSI_BIT(1) -#define EQOS_MAC_PFR_HMC OSI_BIT(2) -#define EQOS_MAC_MAX_HTR_REG_LEN 8U -#define EQOS_MAC_L3L4_CTR_L3HSBM0 (OSI_BIT(6) | OSI_BIT(7) | \ - OSI_BIT(8) | OSI_BIT(9) | \ - OSI_BIT(10)) -#define EQOS_MAC_L3L4_CTR_L3HDBM0 (OSI_BIT(11) | OSI_BIT(12) | \ - OSI_BIT(13) | OSI_BIT(14) | \ - OSI_BIT(15)) -#define EQOS_MAC_PFR_SHIFT 16 -#define EQOS_MTL_RXQ_OP_MODE_FEP OSI_BIT(4) -#define EQOS_MTL_OP_MODE_FRPE OSI_BIT(15) -#define EQOS_MTL_OP_MODE_DTXSTS OSI_BIT(1) +#define EQOS_MTL_TXQ_ETS_CR_CC OSI_BIT(3) +#define EQOS_MTL_TXQ_ETS_CR_CC_SHIFT 3U #define EQOS_MAC_EXTR_PDC OSI_BIT(19) -#define EQOS_MTL_TXQ_DEBUG_TRCSTS 0x6U -#define EQOS_MTL_TXQ_DEBUG_TXQSTS OSI_BIT(4) -#define EQOS_MTL_RXQ_DEBUG_PRXQ 0x3FFF0000U -#define EQOS_MTL_RXQ_DEBUG_RXQSTS 0x30U -#define EQOS_MAC_EXTR_DCRCC OSI_BIT(16) #define EQOS_MAC_EXTR_EIPGEN OSI_BIT(24) #define EQOS_MAC_EXTR_EIPG_MASK 0x3E000000U #define EQOS_MAC_EXTR_EIPG_SHIFT 25U #define EQOS_MAC_EXTR_EIPG 0x3U #endif /* !OSI_STRIPPED_LIB */ -#define EQOS_MTL_RXQ_OP_MODE_FEP OSI_BIT(4) -#define EQOS_MAC_QX_TX_FLW_CTRL_TFE OSI_BIT(1) -#define EQOS_MAC_RX_FLW_CTRL_RFE OSI_BIT(0) -#define EQOS_MAC_PAUSE_TIME 0xFFFF0000U -#define EQOS_MAC_PAUSE_TIME_MASK 0xFFFF0000U +#define EQOS_MTL_TXQEN_MASK (OSI_BIT(3) | OSI_BIT(2)) +#define EQOS_MTL_TXQEN_MASK_SHIFT 2U +#define EQOS_MTL_OP_MODE_FRPE OSI_BIT(15) +#define EQOS_MAC_EXTR_PDC OSI_BIT(19) #define EQOS_MTL_RXQ_OP_MODE_EHFC OSI_BIT(7) #define EQOS_MTL_RXQ_OP_MODE_RFA_SHIFT 8U #define EQOS_MTL_RXQ_OP_MODE_RFA_MASK 0x00003F00U #define EQOS_MTL_RXQ_OP_MODE_RFD_SHIFT 14U #define EQOS_MTL_RXQ_OP_MODE_RFD_MASK 0x000FC000U -#define EQOS_MAC_PFR_PR OSI_BIT(0) -#define EQOS_MAC_PFR_DAIF OSI_BIT(3) -#define EQOS_MAC_PFR_PM OSI_BIT(4) -#define EQOS_MAC_PFR_DBF OSI_BIT(5) -#define EQOS_MAC_PFR_PCF (OSI_BIT(6) | OSI_BIT(7)) -#define EQOS_MAC_PFR_SAIF OSI_BIT(8) -#define EQOS_MAC_PFR_SAF OSI_BIT(9) -#define EQOS_MAC_PFR_HPF OSI_BIT(10) -#define EQOS_MAC_PFR_VTFE OSI_BIT(16) -#define EQOS_MAC_PFR_IPFE OSI_BIT(20) -#define EQOS_MAC_PFR_IPFE_SHIFT 20U -#define EQOS_MAC_PFR_DNTU OSI_BIT(21) -#define EQOS_MAC_PFR_RA OSI_BIT(31) +#ifndef OSI_STRIPPED_LIB #define EQOS_MAC_L4_SP_MASK 0x0000FFFFU #define EQOS_MAC_L4_DP_MASK 0xFFFF0000U #define EQOS_MAC_L4_DP_SHIFT 16 -#define EQOS_MAC_L3L4_CTR_L4SPM0 OSI_BIT(18) -#define EQOS_MAC_L3L4_CTR_L4SPIM0 OSI_BIT(19) -#define EQOS_MAC_L3L4_CTR_L4SPI_SHIFT 19 -#define EQOS_MAC_L3L4_CTR_L4DPM0 OSI_BIT(20) -#define EQOS_MAC_L3L4_CTR_L4DPIM0 OSI_BIT(21) -#define EQOS_MAC_L3L4_CTR_L4DPI_SHIFT 21 -#define EQOS_MAC_L3L4_CTR_L4PEN0 OSI_BIT(16) -#define EQOS_MAC_L3L4_CTR_L4PEN0_SHIFT 16 -#define EQOS_MAC_L3L4_CTR_L3PEN0 OSI_BIT(0) -#define EQOS_MAC_L3L4_CTR_L3SAM0 OSI_BIT(2) -#define EQOS_MAC_L3L4_CTR_L3SAIM0 OSI_BIT(3) -#define EQOS_MAC_L3L4_CTR_L3SAI_SHIFT 3 -#define EQOS_MAC_L3L4_CTR_L3DAM0 OSI_BIT(4) -#define EQOS_MAC_L3L4_CTR_L3DAIM0 OSI_BIT(5) -#define EQOS_MAC_L3L4_CTR_L3DAI_SHIFT 5 -#define EQOS_MAC_L3L4_CTR_DMCHEN0 OSI_BIT(28) -#define EQOS_MAC_L3L4_CTR_DMCHEN0_SHIFT 28 -#define EQOS_MAC_L3L4_CTR_DMCHN0 (OSI_BIT(24) | OSI_BIT(25) | \ - OSI_BIT(26) | OSI_BIT(27)) -#define EQOS_MAC_L3L4_CTR_DMCHN0_SHIFT 24 -#define EQOS_MAC_L3_IP6_CTRL_CLEAR (EQOS_MAC_L3L4_CTR_L3SAM0 | \ - EQOS_MAC_L3L4_CTR_L3SAIM0 | \ - EQOS_MAC_L3L4_CTR_L3DAM0 | \ - EQOS_MAC_L3L4_CTR_L3DAIM0 | \ - EQOS_MAC_L3L4_CTR_DMCHEN0 | \ - EQOS_MAC_L3L4_CTR_DMCHN0) -#define EQOS_MAC_L3_IP4_SA_CTRL_CLEAR (EQOS_MAC_L3L4_CTR_L3SAM0 | \ - EQOS_MAC_L3L4_CTR_L3SAIM0 | \ - EQOS_MAC_L3L4_CTR_DMCHEN0 | \ - EQOS_MAC_L3L4_CTR_DMCHN0) -#define EQOS_MAC_L3_IP4_DA_CTRL_CLEAR (EQOS_MAC_L3L4_CTR_L3DAM0 | \ - EQOS_MAC_L3L4_CTR_L3DAIM0 | \ - EQOS_MAC_L3L4_CTR_DMCHEN0 | \ - EQOS_MAC_L3L4_CTR_DMCHN0) -#define EQOS_MAC_L4_SP_CTRL_CLEAR (EQOS_MAC_L3L4_CTR_L4SPM0 | \ - EQOS_MAC_L3L4_CTR_L4SPIM0 | \ - EQOS_MAC_L3L4_CTR_DMCHEN0 | \ - EQOS_MAC_L3L4_CTR_DMCHN0) -#define EQOS_MAC_L4_DP_CTRL_CLEAR (EQOS_MAC_L3L4_CTR_L4DPM0 | \ - EQOS_MAC_L3L4_CTR_L4DPIM0 | \ - EQOS_MAC_L3L4_CTR_DMCHEN0 | \ - EQOS_MAC_L3L4_CTR_DMCHN0) -#define EQOS_MAC_L3L4_CTRL_ALL (EQOS_MAC_L3_IP6_CTRL_CLEAR | \ - EQOS_MAC_L3_IP4_SA_CTRL_CLEAR | \ - EQOS_MAC_L3_IP4_DA_CTRL_CLEAR | \ - EQOS_MAC_L4_SP_CTRL_CLEAR | \ - EQOS_MAC_L4_DP_CTRL_CLEAR) +#endif /* !OSI_STRIPPED_LIB */ #define EQOS_MAC_ADDRH_DCS (OSI_BIT(23) | OSI_BIT(22) | \ OSI_BIT(21) | OSI_BIT(20) | \ OSI_BIT(19) | OSI_BIT(18) | \ @@ -448,26 +467,8 @@ #define EQOS_MAC_ADDRH_AE OSI_BIT(31) #define EQOS_MAC_RQC2_PSRQ_MASK ((nveu32_t)0xFF) #define EQOS_MAC_RQC2_PSRQ_SHIFT 8U -#define EQOS_MAC_VLAN_TR_ETV_SHIFT 16U -#define EQOS_MAC_MAX_HTR_REG_LEN 8U -#define EQOS_MAC_TCR_TSENMACADDR OSI_BIT(18) -#define EQOS_MAC_TCR_SNAPTYPSEL_SHIFT 16U -#define EQOS_MAC_TCR_TSCTRLSSR OSI_BIT(9) -#define EQOS_MAC_TCR_TSADDREG OSI_BIT(5) -#define EQOS_MAC_TCR_TSINIT OSI_BIT(2) #define EQOS_MAC_TCR_TSUPDT OSI_BIT(3) -#define EQOS_MAC_TCR_TSCFUPDT OSI_BIT(1) -#define EQOS_MAC_PTO_CR_DN (OSI_BIT(15) | OSI_BIT(14) | \ - OSI_BIT(13) | OSI_BIT(12) | \ - OSI_BIT(11) | OSI_BIT(10) | \ - OSI_BIT(9) | OSI_BIT(8)) -#define EQOS_MAC_PTO_CR_DN_SHIFT 8U -#define EQOS_MAC_PTO_CR_APDREQEN OSI_BIT(2) -#define EQOS_MAC_PTO_CR_ASYNCEN OSI_BIT(1) -#define EQOS_MAC_PTO_CR_PTOEN OSI_BIT(0) -#define EQOS_MAC_PIDR_PID_MASK 0XFFFFU #define EQOS_MAC_STNSUR_ADDSUB_SHIFT 31U -#define EQOS_MAC_SSIR_SSINC_SHIFT 16U #define EQOS_MAC_GMIIDR_GD_WR_MASK 0xFFFF0000U #define EQOS_MAC_GMIIDR_GD_MASK 0xFFFFU #define EQOS_MDIO_PHY_ADDR_SHIFT 21U @@ -485,12 +486,6 @@ #define EQOS_MDIO_DATA_REG_DEV_ADDR_SHIFT 16U #define EQOS_DMA_CHAN_INTR_STATUS 0xFU -#define EQOS_DMA_CHX_STATUS_TPS OSI_BIT(1) -#define EQOS_DMA_CHX_STATUS_TBU OSI_BIT(2) -#define EQOS_DMA_CHX_STATUS_RBU OSI_BIT(7) -#define EQOS_DMA_CHX_STATUS_RPS OSI_BIT(8) -#define EQOS_DMA_CHX_STATUS_RWT OSI_BIT(9) -#define EQOS_DMA_CHX_STATUS_FBE OSI_BIT(10) #define EQOS_ASID_CTRL_SHIFT_24 24U #define EQOS_ASID_CTRL_SHIFT_16 16U @@ -511,21 +506,6 @@ (TEGRA_SID_EQOS_CH6) |\ (TEGRA_SID_EQOS_CH5) |\ (TEGRA_SID_EQOS)) -#define EQOS_5_30_SID 0x3U -#define EQOS_5_30_SID_CH3 ((EQOS_5_30_SID) << EQOS_ASID_CTRL_SHIFT_24) -#define EQOS_5_30_SID_CH2 ((EQOS_5_30_SID) << EQOS_ASID_CTRL_SHIFT_16) -#define EQOS_5_30_SID_CH1 ((EQOS_5_30_SID) << EQOS_ASID_CTRL_SHIFT_8) -#define EQOS_5_30_ASID_CTRL_VAL ((EQOS_5_30_SID_CH3) |\ - (EQOS_5_30_SID_CH2) |\ - (EQOS_5_30_SID_CH1) |\ - (EQOS_5_30_SID)) -#define EQOS_5_30_SID_CH7 ((EQOS_5_30_SID) << EQOS_ASID_CTRL_SHIFT_24) -#define EQOS_5_30_SID_CH6 ((EQOS_5_30_SID) << EQOS_ASID_CTRL_SHIFT_16) -#define EQOS_5_30_SID_CH5 ((EQOS_5_30_SID) << EQOS_ASID_CTRL_SHIFT_8) -#define EQOS_5_30_ASID1_CTRL_VAL ((EQOS_5_30_SID_CH7) |\ - (EQOS_5_30_SID_CH6) |\ - (EQOS_5_30_SID_CH5) |\ - (EQOS_5_30_SID)) #define EQOS_MMC_INTR_DISABLE 0xFFFFFFFFU /* MAC FPE control/statusOSI_BITmap */ @@ -534,19 +514,8 @@ #define EQOS_MAC_FPE_CTS_TVER OSI_BIT(18) #define EQOS_MAC_FPE_CTS_RRSP OSI_BIT(17) #define EQOS_MAC_FPE_CTS_RVER OSI_BIT(16) -#define EQOS_MAC_FPE_CTS_SVER OSI_BIT(1) #define EQOS_MAC_FPE_CTS_SRSP OSI_BIT(2) -/* MTL_FPE_CTRL_STS */ -#define EQOS_MTL_FPE_CTS_PEC (OSI_BIT(8) | OSI_BIT(9) | \ - OSI_BIT(10) | OSI_BIT(11) | \ - OSI_BIT(12) | OSI_BIT(13) | \ - OSI_BIT(14) | OSI_BIT(15)) -#define EQOS_MTL_FPE_CTS_PEC_SHIFT 8U -#define EQOS_MTL_FPE_CTS_PEC_MAX_SHIFT 16U -/* MTL FPE adv registers */ -#define EQOS_MTL_FPE_ADV_HADV_MASK (0xFFFFU) -#define EQOS_MTL_FPE_ADV_HADV_VAL 100U /* MTL_EST_CONTROL */ #define EQOS_MTL_EST_CONTROL_PTOV (OSI_BIT(24) | OSI_BIT(25) | \ OSI_BIT(26) | OSI_BIT(27) | \ @@ -563,19 +532,10 @@ #define EQOS_MTL_EST_CONTROL_CTOV_SHIFT 12U #define EQOS_MTL_EST_CTOV_RECOMMEND 94U #define EQOS_8PTP_CYCLE 40U -#ifdef MACSEC_SUPPORT -/* MACSEC Recommended value*/ -#define EQOS_MTL_EST_CTOV_MACSEC_RECOMMEND 758U -#endif /* MACSEC_SUPPORT */ -#define EQOS_MTL_EST_CONTROL_TILS (OSI_BIT(8) | OSI_BIT(9) | \ - OSI_BIT(10)) #define EQOS_MTL_EST_CONTROL_LCSE (OSI_BIT(6) | OSI_BIT(5)) -#define EQOS_MTL_EST_CONTROL_LCSE_SHIFT 5U #define EQOS_MTL_EST_CONTROL_LCSE_VAL 0U #define EQOS_MTL_EST_CONTROL_DFBS OSI_BIT(5) #define EQOS_MTL_EST_CONTROL_DDBF OSI_BIT(4) -#define EQOS_MTL_EST_CONTROL_QHLBF OSI_BIT(3) -#define EQOS_MTL_EST_CONTROL_SSWL OSI_BIT(1) #define EQOS_MTL_EST_CONTROL_EEST OSI_BIT(0) #define EQOS_MTL_EST_OVERHEAD_OVHD (OSI_BIT(5) | OSI_BIT(4) | \ OSI_BIT(3) | OSI_BIT(2) | \ @@ -583,28 +543,18 @@ #define EQOS_MTL_EST_OVERHEAD_RECOMMEND 0x17U /* EST GCL controlOSI_BITmap */ #define EQOS_MTL_EST_ADDR_SHIFT 8U -#define EQOS_MTL_EST_ADDR_MASK (OSI_BIT(8) | OSI_BIT(9) | \ - OSI_BIT(10) | OSI_BIT(11) | \ - OSI_BIT(12) | OSI_BIT(13) | \ - OSI_BIT(14) | OSI_BIT(15) | \ - OSI_BIT(16) | OSI_BIT(17) | \ - OSI_BIT(18) | OSI_BIT(19)) -#define EQOS_MTL_EST_SRWO OSI_BIT(0) -#define EQOS_MTL_EST_GCRR OSI_BIT(2) -#define EQOS_MTL_EST_ERR0 OSI_BIT(20) /* EST GCRA addresses */ -#define EQOS_MTL_EST_BTR_LOW ((unsigned int)0x0 << \ +#define EQOS_MTL_EST_BTR_LOW ((nveu32_t)0x0 << \ EQOS_MTL_EST_ADDR_SHIFT) -#define EQOS_MTL_EST_BTR_HIGH ((unsigned int)0x1 << \ +#define EQOS_MTL_EST_BTR_HIGH ((nveu32_t)0x1 << \ EQOS_MTL_EST_ADDR_SHIFT) -#define EQOS_MTL_EST_CTR_LOW ((unsigned int)0x2 << \ +#define EQOS_MTL_EST_CTR_LOW ((nveu32_t)0x2 << \ EQOS_MTL_EST_ADDR_SHIFT) -#define EQOS_MTL_EST_CTR_HIGH ((unsigned int)0x3 << \ +#define EQOS_MTL_EST_CTR_HIGH ((nveu32_t)0x3 << \ EQOS_MTL_EST_ADDR_SHIFT) -#define EQOS_MTL_EST_CTR_HIGH_MAX 0xFFU -#define EQOS_MTL_EST_TER ((unsigned int)0x4 << \ +#define EQOS_MTL_EST_TER ((nveu32_t)0x4 << \ EQOS_MTL_EST_ADDR_SHIFT) -#define EQOS_MTL_EST_LLR ((unsigned int)0x5 << \ +#define EQOS_MTL_EST_LLR ((nveu32_t)0x5 << \ EQOS_MTL_EST_ADDR_SHIFT) /*EST MTL interrupt STATUS and ERR*/ #define EQOS_MTL_IS_ESTIS OSI_BIT(18) @@ -614,295 +564,18 @@ #define EQOS_MTL_EST_STATUS_HLBF OSI_BIT(2) #define EQOS_MTL_EST_STATUS_BTRE OSI_BIT(1) #define EQOS_MTL_EST_STATUS_SWLC OSI_BIT(0) -#define EQOS_MTL_EST_ITRE_CGCE OSI_BIT(4) -#define EQOS_MTL_EST_ITRE_IEHS OSI_BIT(3) -#define EQOS_MTL_EST_ITRE_IEHF OSI_BIT(2) -#define EQOS_MTL_EST_ITRE_IEBE OSI_BIT(1) -#define EQOS_MTL_EST_ITRE_IECC OSI_BIT(0) +#if defined(MACSEC_SUPPORT) && !defined(OSI_STRIPPED_LIB) +/* MACSEC Recommended value*/ +#define EQOS_MTL_EST_CTOV_MACSEC_RECOMMEND 758U +#endif /* MACSEC_SUPPORT */ +#ifdef UPDATED_PAD_CAL /* EQOS RGMII Rx padctrl registers E_INPUT bit */ -#define EQOS_PADCTL_EQOS_E_INPUT OSI_BIT(6) - +#define EQOS_PADCTL_EQOS_E_INPUT OSI_BIT(6) +#endif /** @} */ void update_ehfc_rfa_rfd(nveu32_t rx_fifo, nveu32_t *value); -/** - * @addtogroup EQOS-Safety-Register EQOS Safety Register Mask - * - * @brief EQOS HW register masks and index - * @{ - */ -#define EQOS_MAC_MCR_MASK 0xFFFFFF7FU -#define EQOS_MAC_PFR_MASK 0x803107FFU -#define EQOS_MAC_HTR_MASK 0xFFFFFFFFU -#define EQOS_MAC_QX_TXFC_MASK 0xFFFF00F2U -#define EQOS_MAC_RQC0R_MASK 0xFFU -#define EQOS_MAC_RQC1R_MASK 0xF77077U -#define EQOS_MAC_RQC2R_MASK 0xFFFFFFFFU -#define EQOS_MAC_IMR_MASK 0x67039U -#define EQOS_MAC_MA0HR_MASK 0xFFFFFU -#define EQOS_MAC_MA0LR_MASK 0xFFFFFFFFU -#define EQOS_MAC_TCR_MASK 0x1107FF03U -#define EQOS_MAC_SSIR_MASK 0xFFFF00U -#define EQOS_MAC_TAR_MASK 0xFFFFFFFFU -#define EQOS_RXQ_DMA_MAP0_MASK 0x13131313U -#define EQOS_RXQ_EN_MASK (OSI_BIT(0) | OSI_BIT(1)) -#define EQOS_MTL_TXQ_OP_MODE_MASK 0xFF007EU -#define EQOS_MTL_TXQ_QW_MASK 0x1FFFFFU -#define EQOS_MTL_RXQ_OP_MODE_MASK 0xFFFFFFBU -#define EQOS_PAD_AUTO_CAL_CFG_MASK 0x7FFFFFFFU -#define EQOS_DMA_SBUS_MASK 0xDF1F3CFFU - -/* To add new registers to validate,append at end of this list and increment - * EQOS_MAX_CORE_SAFETY_REGS. - * Using macro instead of enum due to misra error. - */ -#define EQOS_MAC_MCR_IDX 0U -#define EQOS_MAC_PFR_IDX 1U -#define EQOS_MAC_HTR0_IDX 2U -#define EQOS_MAC_HTR1_IDX 3U -#define EQOS_MAC_HTR2_IDX 4U -#define EQOS_MAC_HTR3_IDX 5U -#define EQOS_MAC_Q0_TXFC_IDX 6U -#define EQOS_MAC_RQC0R_IDX 7U -#define EQOS_MAC_RQC1R_IDX 8U -#define EQOS_MAC_RQC2R_IDX 9U -#define EQOS_MAC_IMR_IDX 10U -#define EQOS_MAC_MA0HR_IDX 11U -#define EQOS_MAC_MA0LR_IDX 12U -#define EQOS_MAC_TCR_IDX 13U -#define EQOS_MAC_SSIR_IDX 14U -#define EQOS_MAC_TAR_IDX 15U -#define EQOS_PAD_AUTO_CAL_CFG_IDX 16U -#define EQOS_MTL_RXQ_DMA_MAP0_IDX 17U -#define EQOS_MTL_CH0_TX_OP_MODE_IDX 18U -#define EQOS_MTL_CH1_TX_OP_MODE_IDX 19U -#define EQOS_MTL_CH2_TX_OP_MODE_IDX 20U -#define EQOS_MTL_CH3_TX_OP_MODE_IDX 21U -#define EQOS_MTL_CH4_TX_OP_MODE_IDX 22U -#define EQOS_MTL_CH5_TX_OP_MODE_IDX 23U -#define EQOS_MTL_CH6_TX_OP_MODE_IDX 24U -#define EQOS_MTL_CH7_TX_OP_MODE_IDX 25U -#define EQOS_MTL_TXQ0_QW_IDX 26U -#define EQOS_MTL_TXQ1_QW_IDX 27U -#define EQOS_MTL_TXQ2_QW_IDX 28U -#define EQOS_MTL_TXQ3_QW_IDX 29U -#define EQOS_MTL_TXQ4_QW_IDX 30U -#define EQOS_MTL_TXQ5_QW_IDX 31U -#define EQOS_MTL_TXQ6_QW_IDX 32U -#define EQOS_MTL_TXQ7_QW_IDX 33U -#define EQOS_MTL_CH0_RX_OP_MODE_IDX 34U -#define EQOS_MTL_CH1_RX_OP_MODE_IDX 35U -#define EQOS_MTL_CH2_RX_OP_MODE_IDX 36U -#define EQOS_MTL_CH3_RX_OP_MODE_IDX 37U -#define EQOS_MTL_CH4_RX_OP_MODE_IDX 38U -#define EQOS_MTL_CH5_RX_OP_MODE_IDX 39U -#define EQOS_MTL_CH6_RX_OP_MODE_IDX 40U -#define EQOS_MTL_CH7_RX_OP_MODE_IDX 41U -#define EQOS_MTL_CH8_RX_OP_MODE_IDX 42U -#define EQOS_DMA_SBUS_IDX 43U -#define EQOS_MTL_RXQ_DMA_MAP1_IDX 44U -#define EQOS_MAX_CORE_SAFETY_REGS 45U -/** @} */ - -/** - * @addtogroup EQOS-MTL FRP Indirect Access register defines - * - * @brief EQOS MTL register offsets - * @{ - */ -#define EQOS_MTL_FRP_READ_UDELAY 1U -#define EQOS_MTL_FRP_READ_RETRY 10000U - -/* FRP Control and Status register defines */ -#define EQOS_MTL_RXP_CS_RXPI OSI_BIT(31) -#define EQOS_MTL_RXP_CS_NPE (OSI_BIT(23) | OSI_BIT(22) | \ - OSI_BIT(21) | OSI_BIT(20) | \ - OSI_BIT(19) | OSI_BIT(18) | \ - OSI_BIT(17) | OSI_BIT(16)) -#define EQOS_MTL_RXP_CS_NPE_SHIFT 16U -#define EQOS_MTL_RXP_CS_NVE (OSI_BIT(7) | OSI_BIT(6) | \ - OSI_BIT(5) | OSI_BIT(4) | \ - OSI_BIT(3) | OSI_BIT(2) | \ - OSI_BIT(1) | OSI_BIT(0)) -/* FRP Interrupt Control and Status register */ -#define EQOS_MTL_RXP_INTR_CS_PDRFIE OSI_BIT(19) -#define EQOS_MTL_RXP_INTR_CS_FOOVIE OSI_BIT(18) -#define EQOS_MTL_RXP_INTR_CS_NPEOVIE OSI_BIT(17) -#define EQOS_MTL_RXP_INTR_CS_NVEOVIE OSI_BIT(16) -#define EQOS_MTL_RXP_INTR_CS_PDRFIS OSI_BIT(3) -#define EQOS_MTL_RXP_INTR_CS_FOOVIS OSI_BIT(2) -#define EQOS_MTL_RXP_INTR_CS_NPEOVIS OSI_BIT(1) -#define EQOS_MTL_RXP_INTR_CS_NVEOVIS OSI_BIT(0) -/* Indirect Instruction Table defines */ -#define EQOS_MTL_FRP_IE0(x) ((x) * 0x4U + 0x0U) -#define EQOS_MTL_FRP_IE1(x) ((x) * 0x4U + 0x1U) -#define EQOS_MTL_FRP_IE2(x) ((x) * 0x4U + 0x2U) -#define EQOS_MTL_FRP_IE3(x) ((x) * 0x4U + 0x3U) -#define EQOS_MTL_FRP_IE2_DCH (OSI_BIT(31) | OSI_BIT(30) | \ - OSI_BIT(29) | OSI_BIT(28) | \ - OSI_BIT(27) | OSI_BIT(26) | \ - OSI_BIT(25) | OSI_BIT(24)) -#define EQOS_MTL_FRP_IE2_DCH_SHIFT 24U -#define EQOS_MTL_FRP_IE2_DCH_MASK 0xFFU -#define EQOS_MTL_FRP_IE2_OKI (OSI_BIT(23) | OSI_BIT(22) | \ - OSI_BIT(21) | OSI_BIT(20) | \ - OSI_BIT(19) | OSI_BIT(18) | \ - OSI_BIT(17) | OSI_BIT(16)) -#define EQOS_MTL_FRP_IE2_OKI_SHIFT 16U -#define EQOS_MTL_FRP_IE2_FO (OSI_BIT(13) | OSI_BIT(12) | \ - OSI_BIT(11) | OSI_BIT(10) | \ - OSI_BIT(9) | OSI_BIT(8)) -#define EQOS_MTL_FRP_IE2_FO_SHIFT 8U -#define EQOS_MTL_FRP_IE2_NC OSI_BIT(3) -#define EQOS_MTL_FRP_IE2_IM OSI_BIT(2) -#define EQOS_MTL_FRP_IE2_RF OSI_BIT(1) -#define EQOS_MTL_FRP_IE2_AF OSI_BIT(0) -/* Indirect register defines */ -#define EQOS_MTL_RXP_IND_CS_BUSY OSI_BIT(31) -#define EQOS_MTL_RXP_IND_CS_RXPEIEC (OSI_BIT(22) | OSI_BIT(21)) -#define EQOS_MTL_RXP_IND_CS_RXPEIEE OSI_BIT(20) -#define EQOS_MTL_RXP_IND_CS_WRRDN OSI_BIT(16) -#define EQOS_MTL_RXP_IND_CS_ADDR (OSI_BIT(9) | OSI_BIT(8) | \ - OSI_BIT(7) | OSI_BIT(6) | \ - OSI_BIT(5) | OSI_BIT(4) | \ - OSI_BIT(3) | OSI_BIT(2) | \ - OSI_BIT(1) | OSI_BIT(0)) -/** @} */ - -/** - * @brief core_func_safety - Struct used to store last written values of - * critical core HW registers. - */ -struct core_func_safety { - /** Array of reg MMIO addresses (base of EQoS + offset of reg) */ - void *reg_addr[EQOS_MAX_CORE_SAFETY_REGS]; - /** Array of bit-mask value of each corresponding reg - * (used to ignore self-clearing/reserved bits in reg) */ - nveu32_t reg_mask[EQOS_MAX_CORE_SAFETY_REGS]; - /** Array of value stored in each corresponding register */ - nveu32_t reg_val[EQOS_MAX_CORE_SAFETY_REGS]; - /** OSI lock variable used to protect writes to reg while - * validation is in-progress */ - nveu32_t core_safety_lock; -}; - -/** - * @addtogroup EQOS_HW EQOS HW BACKUP registers - * - * @brief Definitions related to taking backup of EQOS core registers. - * @{ - */ - -/* Hardware Register offsets to be backed up during suspend. - * - * Do not change the order of these macros. To add new registers to be - * backed up, append to end of list before EQOS_MAX_MAC_BAK_IDX, and - * update EQOS_MAX_MAC_BAK_IDX based on new macro. - */ -#define EQOS_MAC_MCR_BAK_IDX 0U -#define EQOS_MAC_EXTR_BAK_IDX ((EQOS_MAC_MCR_BAK_IDX + 1U)) -#define EQOS_MAC_PFR_BAK_IDX ((EQOS_MAC_EXTR_BAK_IDX + 1U)) -#define EQOS_MAC_VLAN_TAG_BAK_IDX ((EQOS_MAC_PFR_BAK_IDX + 1U)) -#define EQOS_MAC_VLANTIR_BAK_IDX ((EQOS_MAC_VLAN_TAG_BAK_IDX + 1U)) -#define EQOS_MAC_RX_FLW_CTRL_BAK_IDX ((EQOS_MAC_VLANTIR_BAK_IDX + 1U)) -#define EQOS_MAC_RQC0R_BAK_IDX ((EQOS_MAC_RX_FLW_CTRL_BAK_IDX + 1U)) -#define EQOS_MAC_RQC1R_BAK_IDX ((EQOS_MAC_RQC0R_BAK_IDX + 1U)) -#define EQOS_MAC_RQC2R_BAK_IDX ((EQOS_MAC_RQC1R_BAK_IDX + 1U)) -#define EQOS_MAC_ISR_BAK_IDX ((EQOS_MAC_RQC2R_BAK_IDX + 1U)) -#define EQOS_MAC_IMR_BAK_IDX ((EQOS_MAC_ISR_BAK_IDX + 1U)) -#define EQOS_MAC_PMTCSR_BAK_IDX ((EQOS_MAC_IMR_BAK_IDX + 1U)) -#define EQOS_MAC_LPI_CSR_BAK_IDX ((EQOS_MAC_PMTCSR_BAK_IDX + 1U)) -#define EQOS_MAC_LPI_TIMER_CTRL_BAK_IDX ((EQOS_MAC_LPI_CSR_BAK_IDX + 1U)) -#define EQOS_MAC_LPI_EN_TIMER_BAK_IDX ((EQOS_MAC_LPI_TIMER_CTRL_BAK_IDX + 1U)) -#define EQOS_MAC_ANS_BAK_IDX ((EQOS_MAC_LPI_EN_TIMER_BAK_IDX + 1U)) -#define EQOS_MAC_PCS_BAK_IDX ((EQOS_MAC_ANS_BAK_IDX + 1U)) -#define EQOS_5_00_MAC_ARPPA_BAK_IDX ((EQOS_MAC_PCS_BAK_IDX + 1U)) -#define EQOS_MMC_CNTRL_BAK_IDX ((EQOS_5_00_MAC_ARPPA_BAK_IDX + 1U)) -#define EQOS_4_10_MAC_ARPPA_BAK_IDX ((EQOS_MMC_CNTRL_BAK_IDX + 1U)) -#define EQOS_MAC_TCR_BAK_IDX ((EQOS_4_10_MAC_ARPPA_BAK_IDX + 1U)) -#define EQOS_MAC_SSIR_BAK_IDX ((EQOS_MAC_TCR_BAK_IDX + 1U)) -#define EQOS_MAC_STSR_BAK_IDX ((EQOS_MAC_SSIR_BAK_IDX + 1U)) -#define EQOS_MAC_STNSR_BAK_IDX ((EQOS_MAC_STSR_BAK_IDX + 1U)) -#define EQOS_MAC_STSUR_BAK_IDX ((EQOS_MAC_STNSR_BAK_IDX + 1U)) -#define EQOS_MAC_STNSUR_BAK_IDX ((EQOS_MAC_STSUR_BAK_IDX + 1U)) -#define EQOS_MAC_TAR_BAK_IDX ((EQOS_MAC_STNSUR_BAK_IDX + 1U)) -#define EQOS_DMA_BMR_BAK_IDX ((EQOS_MAC_TAR_BAK_IDX + 1U)) -#define EQOS_DMA_SBUS_BAK_IDX ((EQOS_DMA_BMR_BAK_IDX + 1U)) -#define EQOS_DMA_ISR_BAK_IDX ((EQOS_DMA_SBUS_BAK_IDX + 1U)) -#define EQOS_MTL_OP_MODE_BAK_IDX ((EQOS_DMA_ISR_BAK_IDX + 1U)) -#define EQOS_MTL_RXQ_DMA_MAP0_BAK_IDX ((EQOS_MTL_OP_MODE_BAK_IDX + 1U)) -/* x varies from 0-7, 8 HTR registers total */ -#define EQOS_MAC_HTR_REG_BAK_IDX(x) ((EQOS_MTL_RXQ_DMA_MAP0_BAK_IDX + 1U + \ - (x))) -/* x varies from 0-7, 8 queues total */ -#define EQOS_MAC_QX_TX_FLW_CTRL_BAK_IDX(x) ((EQOS_MAC_HTR_REG_BAK_IDX(0U) \ - + EQOS_MAX_HTR_REGS + (x))) -/* x varies from 0-127, 128 L2 DA/SA filters total */ -#define EQOS_MAC_ADDRH_BAK_IDX(x) ((EQOS_MAC_QX_TX_FLW_CTRL_BAK_IDX(0U) \ - + OSI_EQOS_MAX_NUM_QUEUES + (x))) -#define EQOS_MAC_ADDRL_BAK_IDX(x) ((EQOS_MAC_ADDRH_BAK_IDX(0U) + \ - EQOS_MAX_MAC_ADDRESS_FILTER + (x))) -/* x varies from 0-7, 8 L3/L4 filters total */ -#define EQOS_MAC_L3L4_CTR_BAK_IDX(x) ((EQOS_MAC_ADDRL_BAK_IDX(0U) + \ - EQOS_MAX_MAC_ADDRESS_FILTER + (x))) -#define EQOS_MAC_L4_ADR_BAK_IDX(x) ((EQOS_MAC_L3L4_CTR_BAK_IDX(0U) + \ - EQOS_MAX_L3_L4_FILTER + (x))) -#define EQOS_MAC_L3_AD0R_BAK_IDX(x) ((EQOS_MAC_L4_ADR_BAK_IDX(0U) + \ - EQOS_MAX_L3_L4_FILTER + (x))) -#define EQOS_MAC_L3_AD1R_BAK_IDX(x) ((EQOS_MAC_L3_AD0R_BAK_IDX(0U) + \ - EQOS_MAX_L3_L4_FILTER + (x))) -#define EQOS_MAC_L3_AD2R_BAK_IDX(x) ((EQOS_MAC_L3_AD1R_BAK_IDX(0U) + \ - EQOS_MAX_L3_L4_FILTER + (x))) -#define EQOS_MAC_L3_AD3R_BAK_IDX(x) ((EQOS_MAC_L3_AD2R_BAK_IDX(0U) + \ - EQOS_MAX_L3_L4_FILTER + (x))) - -/* MTL HW Register offsets - * - * Do not change the order of these macros. To add new registers to be - * backed up, append to end of list before EQOS_MAX_MTL_BAK_IDX, and - * update EQOS_MAX_MTL_BAK_IDX based on new macro. - */ -/* x varies from 0-7, 8 queues total */ -#define EQOS_MTL_CHX_TX_OP_MODE_BAK_IDX(x) ((EQOS_MAC_L3_AD3R_BAK_IDX(0U) \ - + EQOS_MAX_L3_L4_FILTER + (x))) -#define EQOS_MTL_TXQ_ETS_CR_BAK_IDX(x) ((EQOS_MTL_CHX_TX_OP_MODE_BAK_IDX(0U) \ - + OSI_EQOS_MAX_NUM_QUEUES + (x))) -#define EQOS_MTL_TXQ_QW_BAK_IDX(x) ((EQOS_MTL_TXQ_ETS_CR_BAK_IDX(0U) + \ - OSI_EQOS_MAX_NUM_QUEUES + (x))) -#define EQOS_MTL_TXQ_ETS_SSCR_BAK_IDX(x) ((EQOS_MTL_TXQ_QW_BAK_IDX(0U) \ - + OSI_EQOS_MAX_NUM_QUEUES + \ - (x))) -#define EQOS_MTL_TXQ_ETS_HCR_BAK_IDX(x) ((EQOS_MTL_TXQ_ETS_SSCR_BAK_IDX(0U) + \ - OSI_EQOS_MAX_NUM_QUEUES + (x))) -#define EQOS_MTL_TXQ_ETS_LCR_BAK_IDX(x) ((EQOS_MTL_TXQ_ETS_HCR_BAK_IDX(0U) + \ - OSI_EQOS_MAX_NUM_QUEUES + (x))) -#define EQOS_MTL_CHX_RX_OP_MODE_BAK_IDX(x) \ - ((EQOS_MTL_TXQ_ETS_LCR_BAK_IDX(0U) + \ - OSI_EQOS_MAX_NUM_QUEUES + (x))) - -/* EQOS Wrapper register offsets to be saved during suspend - * - * Do not change the order of these macros. To add new registers to be - * backed up, append to end of list before EQOS_MAX_WRAPPER_BAK_IDX, - * and update EQOS_MAX_WRAPPER_BAK_IDX based on new macro. - */ -#define EQOS_CLOCK_CTRL_0_BAK_IDX ((EQOS_MTL_CHX_RX_OP_MODE_BAK_IDX(0U) \ - + OSI_EQOS_MAX_NUM_QUEUES)) -#define EQOS_AXI_ASID_CTRL_BAK_IDX ((EQOS_CLOCK_CTRL_0_BAK_IDX + 1U)) -#define EQOS_PAD_CRTL_BAK_IDX ((EQOS_AXI_ASID_CTRL_BAK_IDX + 1U)) -#define EQOS_PAD_AUTO_CAL_CFG_BAK_IDX ((EQOS_PAD_CRTL_BAK_IDX + 1U)) -/* EQOS_PAD_AUTO_CAL_STAT is Read-only. Skip backup/restore */ - -/* To add new registers to backup during suspend, and restore during resume - * add it before this line, and increment EQOS_MAC_BAK_IDX accordingly. - */ - -#ifndef OSI_STRIPPED_LIB -#define EQOS_MAX_BAK_IDX ((EQOS_PAD_AUTO_CAL_CFG_BAK_IDX + 1U)) -#endif /* !OSI_STRIPPED_LIB */ -/** @} */ - /** * @addtogroup EQOS-MAC-Feature EQOS MAC HW feature registers bit fields * @@ -1041,9 +714,6 @@ struct core_func_safety { #define EQOS_MAC_HFR3_DVLAN_MASK 0x1U #define EQOS_MAC_HFR3_DVLAN_SHIFT 5U -#define EQOS_MAC_HFR3_PDUPSEL_MASK 0x1U -#define EQOS_MAC_HFR3_PDUPSEL_SHIFT 9U - #define EQOS_MAC_HFR3_FRPSEL_MASK 0x1U #define EQOS_MAC_HFR3_FRPSEL_SHIFT 10U @@ -1116,12 +786,16 @@ struct core_func_safety { #define EQOS_TMR_SHIFT 0U #define EQOS_TMR_MASK 0x3FFU #define EQOS_MAC_FSM_CONTROL 0x148U -#define EQOS_TMOUTEN OSI_BIT(0) #define EQOS_PRTYEN OSI_BIT(1) #define EQOS_MAC_DPP_FSM_INTERRUPT_STATUS 0x140U #define EQOS_MTL_DPP_CONTROL 0xCE0U #define EQOS_EDPP OSI_BIT(0) #define EQOS_MAC_DPP_FSM_INTERRUPT_STATUS 0x140U +#define EQOS_MTL_DBG_CTL 0xC08U +#define EQOS_MTL_DBG_CTL_EIEC OSI_BIT(18) +#define EQOS_MTL_DBG_CTL_EIEE OSI_BIT(16) +#define EQOS_MTL_DPP_ECC_EIC 0xCE4U +#define EQOS_MTL_DPP_ECC_EIC_BLEI OSI_BIT(0) /** @} */ #endif diff --git a/osi/core/eqos_mmc.c b/osi/core/eqos_mmc.c index e0de057..670d702 100644 --- a/osi/core/eqos_mmc.c +++ b/osi/core/eqos_mmc.c @@ -54,7 +54,7 @@ static inline nveu64_t update_mmc_val(struct osi_core_priv_data *const osi_core, nveu64_t last_value, nveu64_t offset) { - nveu64_t temp; + nveu64_t temp = 0; nveu32_t value = osi_readla(osi_core, (nveu8_t *)osi_core->base + offset); @@ -65,11 +65,9 @@ static inline nveu64_t update_mmc_val(struct osi_core_priv_data *const osi_core, "Value overflow resetting all counters\n", (nveul64_t)offset); eqos_reset_mmc(osi_core); - } else { - return temp; } - return 0; + return temp; } /** diff --git a/osi/core/frp.c b/osi/core/frp.c index 4b0c953..2dea183 100644 --- a/osi/core/frp.c +++ b/osi/core/frp.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -33,7 +33,7 @@ * */ static void frp_entry_copy(struct osi_core_frp_entry *dst, - struct osi_core_frp_entry *src) + struct osi_core_frp_entry *const src) { dst->frp_id = src->frp_id; dst->data.match_data = src->data.match_data; @@ -61,13 +61,14 @@ static void frp_entry_copy(struct osi_core_frp_entry *dst, * @retval 0 on success. * @retval -1 on failure. */ -static int frp_entry_find(struct osi_core_priv_data *const osi_core, - int frp_id, - unsigned char *start, - unsigned char *no_entries) +static nve32_t frp_entry_find(struct osi_core_priv_data *const osi_core, + nve32_t frp_id, + nveu8_t *start, + nveu8_t *no_entries) { - unsigned char count = OSI_NONE, found = OSI_NONE; + nveu8_t count = OSI_NONE, found = OSI_NONE; struct osi_core_frp_entry *entry = OSI_NULL; + nve32_t ret = 0; /* Parse the FRP table for give frp_id */ for (count = 0U; count < osi_core->frp_cnt; count++) { @@ -80,17 +81,17 @@ static int frp_entry_find(struct osi_core_priv_data *const osi_core, found = OSI_ENABLE; } else { /* Increment entries */ - *no_entries = (unsigned char) (*no_entries + 1U); + *no_entries = (nveu8_t)(*no_entries + 1U); } } } if (found == OSI_NONE) { /* No entry found return error */ - return -1; + ret = -1; } - return 0; + return ret; } /** @@ -104,34 +105,38 @@ static int frp_entry_find(struct osi_core_priv_data *const osi_core, * * @retval No of FRP entries required. */ -static unsigned char frp_req_entries(unsigned char offset, - unsigned char match_length) +static nveu8_t frp_req_entries(nveu8_t offset, + nveu8_t match_length) { - unsigned char req = 0U; + nveu8_t req = 0U; + nveu8_t temp_match_length = match_length; - /* Validate for match_length */ - if ((match_length == OSI_NONE) || - (match_length > OSI_FRP_MATCH_DATA_MAX)) { + /* Validate for temp_match_length */ + if ((temp_match_length == OSI_NONE) || + (temp_match_length > OSI_FRP_MATCH_DATA_MAX)) { /* return zero */ - return req; + goto done; } /* Check does the given length can fit in fist entry */ - if (match_length <= (unsigned char) FRP_OFFSET_BYTES(offset)) { + if (temp_match_length <= (nveu8_t)FRP_OFFSET_BYTES(offset)) { /* Require one entry */ - return 1U; + req = 1U; + goto done; } /* Initialize req as 1U and decrement length by FRP_OFFSET_BYTES */ req = 1U; - match_length = (unsigned char) (match_length - (unsigned char) FRP_OFFSET_BYTES(offset)); - if ((match_length / FRP_MD_SIZE) < OSI_FRP_MATCH_DATA_MAX) { - req = (unsigned char) (req + (match_length / FRP_MD_SIZE)); - if ((match_length % FRP_MD_SIZE) != OSI_NONE) { + temp_match_length = (nveu8_t)(temp_match_length - + (nveu8_t)FRP_OFFSET_BYTES(offset)); + if ((temp_match_length / FRP_MD_SIZE) < OSI_FRP_MATCH_DATA_MAX) { + req = (nveu8_t)(req + (temp_match_length / FRP_MD_SIZE)); + if ((temp_match_length % FRP_MD_SIZE) != OSI_NONE) { /* Need one more entry */ - req = (unsigned char) (req + 1U); + req = (nveu8_t)(req + 1U); } } +done: return req; } @@ -144,7 +149,7 @@ static unsigned char frp_req_entries(unsigned char offset, * @param[in] data: FRP entry data pointer. * */ -static void frp_entry_mode_parse(unsigned char filter_mode, +static void frp_entry_mode_parse(nveu8_t filter_mode, struct osi_core_frp_data *data) { switch (filter_mode) { @@ -189,7 +194,7 @@ static void frp_entry_mode_parse(unsigned char filter_mode, data->inverse_match = OSI_DISABLE; break; default: - //OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + //OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, // "Invalid filter mode argment\n", // filter_mode); break; @@ -205,6 +210,7 @@ static void frp_entry_mode_parse(unsigned char filter_mode, * * @param[in] osi_core: OSI core private data structure. * @param[in] frp_id: FRP ID to add. + * @param[in] pos: FRP entry position. * @param[in] match: Pointer to match data. * @param[in] length: Match data length. * @param[in] offset: Actual match data offset position. @@ -215,30 +221,34 @@ static void frp_entry_mode_parse(unsigned char filter_mode, * @retval 0 on success. * @retval -1 on failure. */ -static int frp_entry_add(struct osi_core_priv_data *const osi_core, - int frp_id, - unsigned char pos, - unsigned char *const match, - unsigned char length, - unsigned char offset, - unsigned char filter_mode, - int next_frp_id, - unsigned int dma_sel) +static nve32_t frp_entry_add(struct osi_core_priv_data *const osi_core, + nve32_t frp_id, + nveu8_t pos, + nveu8_t *const match, + nveu8_t length, + nveu8_t offset, + nveu8_t filter_mode, + nve32_t next_frp_id, + nveu32_t dma_sel) { struct osi_core_frp_entry *entry = OSI_NULL; struct osi_core_frp_data *data = OSI_NULL; - unsigned int req_entries = 0U; - unsigned char ok_index = 0U; - unsigned char fo_t = 0U; - unsigned char fp_t = 0U; - unsigned char i = 0U, j = 0U, md_pos = 0U; + nveu32_t req_entries = 0U; + nveu8_t ok_index = 0U; + nveu8_t fo_t = 0U; + nveu8_t fp_t = 0U; + nveu8_t i = 0U, j = 0U, md_pos = 0U; + nveu8_t temp_pos = pos; + nve32_t ret; + nveu32_t dma_sel_val[MAX_MAC_IP_TYPES] = {0xFFU, 0x3FF}; /* Validate length */ if (length > OSI_FRP_MATCH_DATA_MAX) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_OUTOFBOUND, "Invalid match length\n", length); - return -1; + ret = -1; + goto done; } /* Validate filter_mode */ @@ -246,7 +256,8 @@ static int frp_entry_add(struct osi_core_priv_data *const osi_core, OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Invalid filter mode argment\n", filter_mode); - return -1; + ret = -1; + goto done; } /* Validate offset */ @@ -254,27 +265,38 @@ static int frp_entry_add(struct osi_core_priv_data *const osi_core, OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Invalid offset value\n", offset); - return -1; + ret = -1; + goto done; + } + + /* Validate channel selection */ + if (dma_sel > dma_sel_val[osi_core->mac]) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "Invalid DMA selection\n", + (nveu64_t)dma_sel); + ret = -1; + goto done; } /* Check for avilable space */ req_entries = frp_req_entries(offset, length); if ((req_entries >= OSI_FRP_MAX_ENTRY) || - (req_entries + pos) >= OSI_FRP_MAX_ENTRY) { + ((req_entries + temp_pos) >= OSI_FRP_MAX_ENTRY)) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "No space to update FRP ID\n", OSI_NONE); - return -1; + ret = -1; + goto done; } /* Validate next_frp_id index ok_index */ - if (filter_mode == OSI_FRP_MODE_LINK || - filter_mode == OSI_FRP_MODE_IM_LINK) { + if ((filter_mode == OSI_FRP_MODE_LINK) || + (filter_mode == OSI_FRP_MODE_IM_LINK)) { if (frp_entry_find(osi_core, next_frp_id, &i, &j) < 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "No Link FRP ID index found\n", OSI_NONE); - i = (unsigned char) next_frp_id; + i = (nveu8_t)next_frp_id; } ok_index = i; } @@ -285,7 +307,7 @@ static int frp_entry_add(struct osi_core_priv_data *const osi_core, md_pos = 0U; for (i = 0U; i < req_entries; i++) { /* Get FRP entry*/ - entry = &osi_core->frp_table[pos]; + entry = &osi_core->frp_table[temp_pos]; data = &entry->data; /* Fill FRP ID */ @@ -295,9 +317,9 @@ static int frp_entry_add(struct osi_core_priv_data *const osi_core, data->match_data = OSI_NONE; data->match_en = OSI_NONE; for (j = fp_t; j < FRP_MD_SIZE; j++) { - data->match_data |= ((unsigned int)match[md_pos]) + data->match_data |= ((nveu32_t)match[md_pos]) << (j * FRP_ME_BYTE_SHIFT); - data->match_en |= ((unsigned int)FRP_ME_BYTE << + data->match_en |= ((nveu32_t)FRP_ME_BYTE << (j * FRP_ME_BYTE_SHIFT)); md_pos++; if (md_pos >= length) { @@ -323,10 +345,10 @@ static int frp_entry_add(struct osi_core_priv_data *const osi_core, data->next_ins_ctrl = OSI_ENABLE; /* Init next FRP entry */ - pos++; + temp_pos++; fo_t++; fp_t = OSI_NONE; - data->ok_index = pos; + data->ok_index = temp_pos; } else { data->next_ins_ctrl = OSI_DISABLE; data->ok_index = OSI_DISABLE; @@ -334,14 +356,16 @@ static int frp_entry_add(struct osi_core_priv_data *const osi_core, } /* Check and fill final OKI */ - if (filter_mode == OSI_FRP_MODE_LINK || - filter_mode == OSI_FRP_MODE_IM_LINK) { + if ((filter_mode == OSI_FRP_MODE_LINK) || + (filter_mode == OSI_FRP_MODE_IM_LINK)) { /* Update NIC and OKI in final entry */ data->next_ins_ctrl = OSI_ENABLE; data->ok_index = ok_index; } - return 0; + ret = 0; +done: + return ret; } /** @@ -350,16 +374,19 @@ static int frp_entry_add(struct osi_core_priv_data *const osi_core, * Algorithm: Update FRP table into HW. * * @param[in] osi_core: OSI core private data structure. + * @param[in] ops_p: Core operations data structure. * * @retval 0 on success. * @retval -1 on failure. */ -static int frp_hw_write(struct osi_core_priv_data *const osi_core, - struct core_ops *ops_p) +nve32_t frp_hw_write(struct osi_core_priv_data *const osi_core, + struct core_ops *const ops_p) { - int ret = -1, tmp = -1; + nve32_t ret = 0; + nve32_t tmp = 0; struct osi_core_frp_entry *entry; - unsigned int frp_cnt = osi_core->frp_cnt, i = OSI_NONE; + struct osi_core_frp_data bypass_entry = {}; + nveu32_t frp_cnt = osi_core->frp_cnt, i = OSI_NONE; /* Disable the FRP in HW */ ret = ops_p->config_frp(osi_core, OSI_DISABLE); @@ -371,29 +398,55 @@ static int frp_hw_write(struct osi_core_priv_data *const osi_core, goto hw_write_enable_frp; } - /* Write FRP entries into HW */ - for (i = 0; i < frp_cnt; i++) { - entry = &osi_core->frp_table[i]; - ret = ops_p->update_frp_entry(osi_core, i, &entry->data); + /* Check space for XCS BYPASS rule */ + if ((frp_cnt + 1U) > OSI_FRP_MAX_ENTRY) { + ret = -1; + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, + "No space for rules\n", OSI_NONE); + goto error; + } + + /* Check HW table size for non-zero */ + if (frp_cnt != 0U) { + /* Write FRP entries into HW */ + for (i = 0; i < frp_cnt; i++) { + entry = &osi_core->frp_table[i]; + ret = ops_p->update_frp_entry(osi_core, i, + &entry->data); + if (ret < 0) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, + "Fail to update FRP entry\n", + OSI_NONE); + goto hw_write_enable_frp; + } + } + + /* Write BYPASS rule for XDCS */ + bypass_entry.match_en = 0x0U; + bypass_entry.accept_frame = 1; + bypass_entry.reject_frame = 1; + ret = ops_p->update_frp_entry(osi_core, frp_cnt, &bypass_entry); if (ret < 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, - "Fail to update FRP entry\n", + "Fail to update BYPASS entry\n", OSI_NONE); goto hw_write_enable_frp; } - } - /* Update the NVE */ - ret = ops_p->update_frp_nve(osi_core, (frp_cnt - 1U)); - if (ret < 0) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, - "Fail to update FRP NVE\n", - OSI_NONE); - } + /* Update the NVE */ + ret = ops_p->update_frp_nve(osi_core, frp_cnt); + if (ret < 0) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, + "Fail to update FRP NVE\n", + OSI_NONE); + } - /* Enable the FRP in HW */ + /* Enable the FRP in HW */ hw_write_enable_frp: - tmp = ops_p->config_frp(osi_core, OSI_ENABLE); + tmp = ops_p->config_frp(osi_core, OSI_ENABLE); + } + +error: return (ret < 0) ? ret : tmp; } @@ -409,17 +462,17 @@ hw_write_enable_frp: * @retval 0 on success. * @retval -1 on failure. */ -static int frp_add_proto(struct osi_core_priv_data *const osi_core, - struct osi_core_frp_cmd *const cmd, - unsigned char *pos) +static nve32_t frp_add_proto(struct osi_core_priv_data *const osi_core, + struct osi_core_frp_cmd *const cmd, + nveu8_t *pos) { - int ret = -1, proto_oki = -1; - unsigned char proto_entry = OSI_DISABLE; - unsigned char req = 0U; - unsigned char proto_match[FRP_PROTO_LENGTH]; - unsigned char proto_lendth; - unsigned char proto_offset; - unsigned char match_type = cmd->match_type; + nve32_t ret, proto_oki; + nveu8_t proto_entry = OSI_DISABLE; + nveu8_t req = 0U; + nveu8_t proto_match[FRP_PROTO_LENGTH]; + nveu8_t proto_lendth; + nveu8_t proto_offset; + nveu8_t match_type = cmd->match_type; switch (match_type) { case OSI_FRP_MATCH_L4_S_UPORT: @@ -462,16 +515,18 @@ static int frp_add_proto(struct osi_core_priv_data *const osi_core, /* Check and Add protocol FRP entire */ if (proto_entry == OSI_ENABLE) { /* Check for space */ - req = (unsigned char) (frp_req_entries(cmd->offset, cmd->match_length) + 1U); + req = (nveu8_t)(frp_req_entries(cmd->offset, cmd->match_length) + 1U); if (*pos > (OSI_FRP_MAX_ENTRY - req)) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Fail add FRP protocol entry\n", OSI_NONE); - return -1; + ret = -1; + goto done; } /* Add protocol FRP entire */ - proto_oki = *pos + 1; + proto_oki = (nve32_t)*pos; + proto_oki += 1; ret = frp_entry_add(osi_core, cmd->frp_id, *pos, proto_match, proto_lendth, proto_offset, OSI_FRP_MODE_LINK, @@ -480,14 +535,16 @@ static int frp_add_proto(struct osi_core_priv_data *const osi_core, OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Fail add FRP protocol entry\n", OSI_NONE); - return ret; + goto done; } /* Increment pos value */ - *pos = (unsigned char) (*pos + 1U); + *pos = (nveu8_t)(*pos + (nveu8_t)1); } - return 0; + ret = 0; +done: + return ret; } /** @@ -495,15 +552,13 @@ static int frp_add_proto(struct osi_core_priv_data *const osi_core, * * Algorithm: Parse give FRP command match type and update it's offset. * - * @param[in] osi_core: OSI core private data structure. * @param[in] cmd: OSI FRP command structure. * */ -static void frp_parse_mtype(OSI_UNUSED struct osi_core_priv_data *const osi_core, - struct osi_core_frp_cmd *const cmd) +static void frp_parse_mtype(struct osi_core_frp_cmd *const cmd) { - unsigned char offset; - unsigned char match_type = cmd->match_type; + nveu8_t offset; + nveu8_t match_type = cmd->match_type; switch (match_type) { case OSI_FRP_MATCH_L2_DA: @@ -549,26 +604,28 @@ static void frp_parse_mtype(OSI_UNUSED struct osi_core_priv_data *const osi_core * Algorithm: Parse give FRP delete command and update it on OSI data and HW. * * @param[in] osi_core: OSI core private data structure. + * @param[in] ops_p: Core operations data structure. * @param[in] cmd: OSI FRP command structure. * * @retval 0 on success. * @retval -1 on failure. */ -static int frp_delete(struct osi_core_priv_data *const osi_core, - struct core_ops *ops_p, - struct osi_core_frp_cmd *const cmd) +static nve32_t frp_delete(struct osi_core_priv_data *const osi_core, + struct core_ops *ops_p, + struct osi_core_frp_cmd *const cmd) { - int ret = -1; - unsigned char i = 0U, pos = 0U, count = 0U; - int frp_id = cmd->frp_id; - unsigned int frp_cnt = osi_core->frp_cnt; + nve32_t ret; + nveu8_t i = 0U, pos = 0U, count = 0U; + nve32_t frp_id = cmd->frp_id; + nveu32_t frp_cnt = osi_core->frp_cnt; /* Check for FRP entries */ if (frp_cnt == 0U) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "No FRP entries in the table\n", OSI_NONE); - return -1; + ret = -1; + goto done; } /* Find the FRP entry */ @@ -576,15 +633,17 @@ static int frp_delete(struct osi_core_priv_data *const osi_core, OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "No FRP entry found to delete\n", OSI_NONE); - return -1; + ret = -1; + goto done; } /* Validate pos and count */ - if (((unsigned int)pos + count) > frp_cnt) { + if (((nveu32_t)pos + count) > frp_cnt) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Invalid FRP entry index\n", OSI_NONE); - return -1; + ret = -1; + goto done; } /* Update the frp_table entry */ @@ -592,12 +651,15 @@ static int frp_delete(struct osi_core_priv_data *const osi_core, (sizeof(struct osi_core_frp_entry) * count)); /* Move in FRP table entries by count */ - for (i = (unsigned char) (pos + count); i <= frp_cnt; i++) { + for (i = (nveu8_t)(pos + count); i <= frp_cnt; i++) { frp_entry_copy(&osi_core->frp_table[pos], &osi_core->frp_table[i]); pos++; } + /* Update the frp_cnt entry */ + osi_core->frp_cnt = (frp_cnt - count); + /* Write FRP Table into HW */ ret = frp_hw_write(osi_core, ops_p); if (ret < 0) { @@ -606,9 +668,7 @@ static int frp_delete(struct osi_core_priv_data *const osi_core, OSI_NONE); } - /* Update the frp_cnt entry */ - osi_core->frp_cnt = (frp_cnt - count); - +done: return ret; } @@ -618,29 +678,31 @@ static int frp_delete(struct osi_core_priv_data *const osi_core, * Algorithm: Parse give FRP update command and update it on OSI data and HW. * * @param[in] osi_core: OSI core private data structure. + * @param[in] ops_p: Core operations data structure. * @param[in] cmd: OSI FRP command structure. * * @retval 0 on success. * @retval -1 on failure. */ -static int frp_update(struct osi_core_priv_data *const osi_core, - struct core_ops *ops_p, - struct osi_core_frp_cmd *const cmd) +static nve32_t frp_update(struct osi_core_priv_data *const osi_core, + struct core_ops *ops_p, + struct osi_core_frp_cmd *const cmd) { - int ret = -1; - unsigned char pos = 0U, count = 0U, req = 0U; - int frp_id = cmd->frp_id; + nve32_t ret; + nveu8_t pos = 0U, count = 0U, req = 0U; + nve32_t frp_id = cmd->frp_id; /* Validate given frp_id */ if (frp_entry_find(osi_core, frp_id, &pos, &count) < 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "No FRP entry found\n", OSI_NONE); - return -1; + ret = -1; + goto done; } /* Parse match type and update command offset */ - frp_parse_mtype(osi_core, cmd); + frp_parse_mtype(cmd); /* Calculate the required FRP entries for Update Command. */ req = frp_req_entries(cmd->offset, cmd->match_length); @@ -662,7 +724,8 @@ static int frp_update(struct osi_core_priv_data *const osi_core, OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Old and New required FRP entries mismatch\n", OSI_NONE); - return -1; + ret = -1; + goto done; } /* Process and update FRP Command Protocal Entry */ @@ -671,7 +734,7 @@ static int frp_update(struct osi_core_priv_data *const osi_core, OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Fail to parse match type\n", OSI_NONE); - return ret; + goto done; } /* Update FRP entries */ @@ -683,7 +746,7 @@ static int frp_update(struct osi_core_priv_data *const osi_core, OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Fail to update FRP entry\n", OSI_NONE); - return ret; + goto done; } /* Write FRP Table into HW */ @@ -694,6 +757,7 @@ static int frp_update(struct osi_core_priv_data *const osi_core, OSI_NONE); } +done: return ret; } @@ -703,26 +767,28 @@ static int frp_update(struct osi_core_priv_data *const osi_core, * Algorithm: Parse give FRP Add command and update it on OSI data and HW. * * @param[in] osi_core: OSI core private data structure. + * @param[in] ops_p: Core operations data structure. * @param[in] cmd: OSI FRP command structure. * * @retval 0 on success. * @retval -1 on failure. */ -static int frp_add(struct osi_core_priv_data *const osi_core, - struct core_ops *ops_p, - struct osi_core_frp_cmd *const cmd) +static nve32_t frp_add(struct osi_core_priv_data *const osi_core, + struct core_ops *ops_p, + struct osi_core_frp_cmd *const cmd) { - int ret = -1; - unsigned char pos = 0U, count = 0U; - int frp_id = cmd->frp_id; - unsigned int nve = osi_core->frp_cnt; + nve32_t ret; + nveu8_t pos = 0U, count = 0U; + nve32_t frp_id = cmd->frp_id; + nveu32_t nve = osi_core->frp_cnt; /* Check for MAX FRP entries */ if (nve >= OSI_FRP_MAX_ENTRY) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_OUTOFBOUND, "FRP etries are full\n", nve); - return -1; + ret = -1; + goto done; } /* Check the FRP entry already exists */ @@ -731,23 +797,24 @@ static int frp_add(struct osi_core_priv_data *const osi_core, OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "FRP entry already exists\n", OSI_NONE); - return -1; + ret = -1; + goto done; } /* Parse match type and update command offset */ - frp_parse_mtype(osi_core, cmd); + frp_parse_mtype(cmd); /* Process and add FRP Command Protocal Entry */ - ret = frp_add_proto(osi_core, cmd, (unsigned char *)&nve); + ret = frp_add_proto(osi_core, cmd, (nveu8_t *)&nve); if (ret < 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Fail to parse match type\n", OSI_NONE); - return ret; + goto done; } /* Add Match data FRP Entry */ - ret = frp_entry_add(osi_core, frp_id, (unsigned char)nve, + ret = frp_entry_add(osi_core, frp_id, (nveu8_t)nve, cmd->match, cmd->match_length, cmd->offset, cmd->filter_mode, cmd->next_frp_id, cmd->dma_sel); @@ -755,7 +822,7 @@ static int frp_add(struct osi_core_priv_data *const osi_core, OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Fail to add FRP entry\n", nve); - return ret; + goto done; } osi_core->frp_cnt = nve + frp_req_entries(cmd->offset, cmd->match_length); @@ -768,6 +835,7 @@ static int frp_add(struct osi_core_priv_data *const osi_core, OSI_NONE); } +done: return ret; } @@ -777,16 +845,17 @@ static int frp_add(struct osi_core_priv_data *const osi_core, * Algorithm: Parse give FRP command and update it on OSI data and HW. * * @param[in] osi_core: OSI core private data structure. + * @param[in] ops_p: Core operations data structure. * @param[in] cmd: OSI FRP command structure. * * @retval 0 on success. * @retval -1 on failure. */ -int setup_frp(struct osi_core_priv_data *const osi_core, - struct core_ops *ops_p, - struct osi_core_frp_cmd *const cmd) +nve32_t setup_frp(struct osi_core_priv_data *const osi_core, + struct core_ops *ops_p, + struct osi_core_frp_cmd *const cmd) { - int ret = -1; + nve32_t ret = -1; switch (cmd->cmd) { case OSI_FRP_CMD_ADD: @@ -817,20 +886,3 @@ int setup_frp(struct osi_core_priv_data *const osi_core, return ret; } - -/** - * @brief init_frp - Initialize FRP. - * - * Algorithm: Reset all the data in the FRP table Initialize FRP count to zero. - * - * @param[in] osi_core: OSI core private data structure. - * - */ -void init_frp(struct osi_core_priv_data *const osi_core) -{ - /* Reset the NVE count to zero */ - osi_core->frp_cnt = 0U; - /* Clear all instruction of FRP */ - osi_memset(osi_core->frp_table, 0U, - (sizeof(struct osi_core_frp_entry) * OSI_FRP_MAX_ENTRY)); -} diff --git a/osi/core/frp.h b/osi/core/frp.h index d1092b7..0e902c1 100644 --- a/osi/core/frp.h +++ b/osi/core/frp.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -64,21 +64,20 @@ * @retval 0 on success. * @retval -1 on failure. */ -int setup_frp(struct osi_core_priv_data *const osi_core, - struct core_ops *ops_p, - struct osi_core_frp_cmd *const cmd); +nve32_t setup_frp(struct osi_core_priv_data *const osi_core, + struct core_ops *ops_p, + struct osi_core_frp_cmd *const cmd); /** - * @brief init_frp - Init the FRP Instruction Table. + * @brief frp_hw_write - Update HW FRP table. + * + * Algorithm: Update FRP table into HW. * * @param[in] osi_core: OSI core private data structure. * - * @note - * 1) MAC and PHY should be init and started. see osi_start_mac() - * - * @retval 0 on success + * @retval 0 on success. * @retval -1 on failure. */ -void init_frp(struct osi_core_priv_data *const osi_core); - +nve32_t frp_hw_write(struct osi_core_priv_data *const osi_core, + struct core_ops *const ops_p); #endif /* FRP_H */ diff --git a/osi/core/ivc_core.c b/osi/core/ivc_core.c index fe40e26..555b023 100644 --- a/osi/core/ivc_core.c +++ b/osi/core/ivc_core.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -30,11 +30,6 @@ #include "../osi/common/common.h" #include "macsec.h" -/** - * @brief ivc_safety_config - EQOS MAC core safety configuration - */ -static struct core_func_safety ivc_safety_config; - /** * @brief ivc_handle_ioctl - marshell input argument to handle runtime command * @@ -55,27 +50,40 @@ static nve32_t ivc_handle_ioctl(struct osi_core_priv_data *osi_core, osi_memset(&msg, 0, sizeof(msg)); msg.cmd = handle_ioctl; - msg.status = osi_memcpy((void *)&msg.data.ioctl_data, - (void *)data, - sizeof(struct osi_ioctl)); + /* osi_memcpy is treated as void since it is + * an internal functin which will be always success + */ + (void)osi_memcpy((void *)&msg.data.ioctl_data, (void *)data, + sizeof(struct osi_ioctl)); if (data->cmd == OSI_CMD_CONFIG_PTP) { - osi_memcpy((void *)&msg.data.ioctl_data.ptp_config, - (void *)&osi_core->ptp_config, - sizeof(struct osi_ptp_config)); + (void)osi_memcpy((void *)&msg.data.ioctl_data.ptp_config, + (void *)&osi_core->ptp_config, + sizeof(struct osi_ptp_config)); } ret = osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg)); - if (data->cmd == OSI_CMD_READ_MMC) { - msg.status = osi_memcpy((void *)&osi_core->mmc, - (void *)&msg.data.mmc, - sizeof(struct osi_mmc_counters)); - } else { - msg.status = osi_memcpy((void *)data, - (void *)&msg.data.ioctl_data, - sizeof(struct osi_ioctl)); + switch (data->cmd) { + case OSI_CMD_READ_MMC: + (void)osi_memcpy((void *)&osi_core->mmc, + (void *)&msg.data.mmc_s, + sizeof(struct osi_mmc_counters)); + break; + + case OSI_CMD_READ_STATS: + (void)osi_memcpy((void *)&osi_core->stats, + (void *)&msg.data.stats_s, + sizeof(struct osi_stats)); + break; + + default: + (void)osi_memcpy((void *)data, + (void *)&msg.data.ioctl_data, + sizeof(struct osi_ioctl)); + break; } + return ret; } @@ -83,15 +91,11 @@ static nve32_t ivc_handle_ioctl(struct osi_core_priv_data *osi_core, * @brief ivc_core_init - EQOS MAC, MTL and common DMA Initialization * * @param[in] osi_core: OSI core private data structure. - * @param[in] tx_fifo_size: MTL TX FIFO size - * @param[in] rx_fifo_size: MTL RX FIFO size * * @retval 0 on success * @retval -1 on failure. */ -static nve32_t ivc_core_init(struct osi_core_priv_data *const osi_core, - OSI_UNUSED nveu32_t tx_fifo_size, - OSI_UNUSED nveu32_t rx_fifo_size) +static nve32_t ivc_core_init(struct osi_core_priv_data *const osi_core) { ivc_msg_common_t msg; @@ -117,8 +121,7 @@ static void ivc_core_deinit(struct osi_core_priv_data *const osi_core) osi_memset(&msg, 0, sizeof(msg)); - msg.cmd = handle_ioctl; - msg.data.ioctl_data.cmd = OSI_CMD_STOP_MAC; + msg.cmd = core_deinit; ret = osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg)); if (ret < 0) { @@ -151,10 +154,10 @@ static nve32_t ivc_write_phy_reg(struct osi_core_priv_data *const osi_core, osi_memset(&msg, 0, sizeof(msg)); msg.cmd = write_phy_reg; - msg.data.args.arguments[index++] = phyaddr; - msg.data.args.arguments[index++] = phyreg; - msg.data.args.arguments[index++] = phydata; - msg.data.args.count = index; + msg.args.arguments[index++] = phyaddr; + msg.args.arguments[index++] = phyreg; + msg.args.arguments[index++] = phydata; + msg.args.count = index; return osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg)); } @@ -182,14 +185,15 @@ static nve32_t ivc_read_phy_reg(struct osi_core_priv_data *const osi_core, osi_memset(&msg, 0, sizeof(msg)); msg.cmd = read_phy_reg; - msg.data.args.arguments[index++] = phyaddr; - msg.data.args.arguments[index++] = phyreg; - msg.data.args.count = index; + msg.args.arguments[index++] = phyaddr; + msg.args.arguments[index++] = phyreg; + msg.args.count = index; return osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg)); } #ifdef MACSEC_SUPPORT +#ifdef DEBUG_MACSEC /** * @brief ivc_macsec_dbg_events_config - Configure Debug events * @@ -199,7 +203,7 @@ static nve32_t ivc_read_phy_reg(struct osi_core_priv_data *const osi_core, * @retval 0 on success * @retval -1 on failure. */ -static int ivc_macsec_dbg_events_config( +static nve32_t ivc_macsec_dbg_events_config( struct osi_core_priv_data *const osi_core, struct osi_macsec_dbg_buf_config *const dbg_buf_config) { @@ -210,19 +214,19 @@ static int ivc_macsec_dbg_events_config( msg.cmd = dbg_events_config_macsec; - msg.status = osi_memcpy((void *)&msg.data.dbg_buf_config, - (void *)dbg_buf_config, - sizeof(struct osi_macsec_dbg_buf_config)); + (void)osi_memcpy((void *)&msg.data.dbg_buf_config, + (void *)dbg_buf_config, + sizeof(struct osi_macsec_dbg_buf_config)); ret = osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg)); if (ret != 0) { - return ret; + goto done; } - msg.status = osi_memcpy((void *)dbg_buf_config, - (void *)&msg.data.dbg_buf_config, - sizeof(struct osi_macsec_dbg_buf_config)); - + (void)osi_memcpy((void *)dbg_buf_config, + (void *)&msg.data.dbg_buf_config, + sizeof(struct osi_macsec_dbg_buf_config)); +done: return ret; } @@ -235,7 +239,7 @@ static int ivc_macsec_dbg_events_config( * @retval 0 on Success * @retval -1 on Failure */ -static int ivc_macsec_dbg_buf_config( +static nve32_t ivc_macsec_dbg_buf_config( struct osi_core_priv_data *const osi_core, struct osi_macsec_dbg_buf_config *const dbg_buf_config) { @@ -246,21 +250,22 @@ static int ivc_macsec_dbg_buf_config( msg.cmd = dbg_buf_config_macsec; - msg.status = osi_memcpy((void *)&msg.data.dbg_buf_config, - (void *)dbg_buf_config, - sizeof(struct osi_macsec_dbg_buf_config)); + (void)osi_memcpy((void *)&msg.data.dbg_buf_config, + (void *)dbg_buf_config, + sizeof(struct osi_macsec_dbg_buf_config)); ret = osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg)); if (ret != 0) { - return ret; + goto done; } - msg.status = osi_memcpy((void *)dbg_buf_config, - (void *) &msg.data.dbg_buf_config, - sizeof(struct osi_macsec_dbg_buf_config)); - + (void)osi_memcpy((void *)dbg_buf_config, + (void *) &msg.data.dbg_buf_config, + sizeof(struct osi_macsec_dbg_buf_config)); +done: return ret; } +#endif /* DEBUG_MACSEC */ /** * @brief macsec_read_mmc - To read statitics registers and update structure @@ -284,27 +289,26 @@ static void ivc_macsec_read_mmc(struct osi_core_priv_data *const osi_core) msg.status = osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg)); - msg.status = osi_memcpy((void *)&osi_core->macsec_mmc, - (void *) &msg.data.macsec_mmc, - sizeof(struct osi_macsec_mmc_counters)); - msg.status = osi_memcpy((void *)&osi_core->macsec_irq_stats, - (void *) &msg.data.macsec_irq_stats, - sizeof(struct osi_macsec_irq_stats)); + (void)osi_memcpy((void *)&osi_core->macsec_mmc, + (void *) &msg.data.macsec_mmc, + sizeof(struct osi_macsec_mmc_counters)); + (void)osi_memcpy((void *)&osi_core->macsec_irq_stats, + (void *) &msg.data.macsec_irq_stats, + sizeof(struct osi_macsec_irq_stats)); } /** * @brief ivc_get_sc_lut_key_index - Macsec get Key_index * * @param[in] osi_core: OSI Core private data structure. - * @param[in] sc: Secure Channel info. - * @param[in] enable: enable or disable. + * @param[in] sci: Secure Channel info. + * @param[out] key_index: Key table index to program SAK. * @param[in] ctlr: Controller instance. - * @param[[out] kt_idx: Key table index to program SAK. * * @retval 0 on Success * @retval -1 on Failure */ -static int ivc_get_sc_lut_key_index(struct osi_core_priv_data *const osi_core, +static nve32_t ivc_get_sc_lut_key_index(struct osi_core_priv_data *const osi_core, nveu8_t *sci, nveu32_t *key_index, nveu16_t ctlr) { @@ -314,17 +318,16 @@ static int ivc_get_sc_lut_key_index(struct osi_core_priv_data *const osi_core, osi_memset(&msg, 0, sizeof(msg)); msg.cmd = macsec_get_sc_lut_key_index; - msg.status = osi_memcpy((void *) &msg.data.macsec_cfg.sci, - (void *)sci, - OSI_SCI_LEN); + (void)osi_memcpy((void *) &msg.data.macsec_cfg.sci, + (void *)sci, + OSI_SCI_LEN); msg.data.macsec_cfg.ctlr = ctlr; ret = osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg)); - if (ret != 0) { - return ret; + if (ret == 0) { + *key_index = msg.data.macsec_cfg.key_index; } - *key_index = msg.data.macsec_cfg.key_index; return ret; } @@ -335,15 +338,15 @@ static int ivc_get_sc_lut_key_index(struct osi_core_priv_data *const osi_core, * @param[in] sc: Secure Channel info. * @param[in] enable: enable or disable. * @param[in] ctlr: Controller instance. - * @param[[out] kt_idx: Key table index to program SAK. + * @param[out] kt_idx: Key table index to program SAK. * * @retval 0 on Success * @retval -1 on Failure */ -static int ivc_macsec_config(struct osi_core_priv_data *const osi_core, +static nve32_t ivc_macsec_config(struct osi_core_priv_data *const osi_core, struct osi_macsec_sc_info *const sc, - unsigned int enable, unsigned short ctlr, - unsigned short *kt_idx) + nveu32_t enable, nveu16_t ctlr, + nveu16_t *kt_idx) { ivc_msg_common_t msg; nve32_t ret = 0; @@ -351,47 +354,23 @@ static int ivc_macsec_config(struct osi_core_priv_data *const osi_core, osi_memset(&msg, 0, sizeof(msg)); msg.cmd = config_macsec; - msg.status = osi_memcpy((void *) &msg.data.macsec_cfg.sc_info, - (void *)sc, - sizeof(struct osi_macsec_sc_info)); + (void)osi_memcpy((void *) &msg.data.macsec_cfg.sc_info, + (void *)sc, + sizeof(struct osi_macsec_sc_info)); msg.data.macsec_cfg.enable = enable; msg.data.macsec_cfg.ctlr = ctlr; msg.data.macsec_cfg.kt_idx = *kt_idx; ret = osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg)); if (ret != 0) { - return ret; + goto done; } *kt_idx = msg.data.macsec_cfg.kt_idx; +done: return ret; } -/** - * @brief ivc_macsec_update_mtu - Update MACSEC mtu. - * - * @param[in] osi_core: OSI Core private data structure. - * @param[in] mtu: MACSEC MTU len. - * - * @retval 0 on Success - * @retval -1 on Failure - */ -static nve32_t ivc_macsec_update_mtu(struct osi_core_priv_data *const osi_core, - nveu32_t mtu) -{ - ivc_msg_common_t msg; - nveu32_t index = 0; - - osi_memset(&msg, 0, sizeof(msg)); - - msg.cmd = macsec_update_mtu_size; - msg.data.args.arguments[index] = mtu; - index++; - msg.data.args.count = index; - - return osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg)); -} - /** * @brief ivc_macsec_enable - Enable or disable Macsec. * @@ -401,8 +380,8 @@ static nve32_t ivc_macsec_update_mtu(struct osi_core_priv_data *const osi_core, * @retval 0 on Success * @retval -1 on Failure */ -static int ivc_macsec_enable(struct osi_core_priv_data *const osi_core, - unsigned int enable) +static nve32_t ivc_macsec_enable(struct osi_core_priv_data *const osi_core, + nveu32_t enable) { ivc_msg_common_t msg; nveu32_t index = 0; @@ -410,13 +389,14 @@ static int ivc_macsec_enable(struct osi_core_priv_data *const osi_core, osi_memset(&msg, 0, sizeof(msg)); msg.cmd = en_macsec; - msg.data.args.arguments[index] = enable; + msg.args.arguments[index] = enable; index++; - msg.data.args.count = index; + msg.args.count = index; return osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg)); } +#ifdef DEBUG_MACSEC /** * @brief ivc_macsec_loopback_config - Loopback configure. * @@ -426,8 +406,8 @@ static int ivc_macsec_enable(struct osi_core_priv_data *const osi_core, * @retval 0 on Success * @retval -1 on Failure */ -static int ivc_macsec_loopback_config(struct osi_core_priv_data *const osi_core, - unsigned int enable) +static nve32_t ivc_macsec_loopback_config(struct osi_core_priv_data *const osi_core, + nveu32_t enable) { ivc_msg_common_t msg; nveu32_t index = 0; @@ -435,12 +415,13 @@ static int ivc_macsec_loopback_config(struct osi_core_priv_data *const osi_core, osi_memset(&msg, 0, sizeof(msg)); msg.cmd = loopback_config_macsec; - msg.data.args.arguments[index] = enable; + msg.args.arguments[index] = enable; index++; - msg.data.args.count = index; + msg.args.count = index; return osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg)); } +#endif /* DEBUG_MACSEC */ #ifdef MACSEC_KEY_PROGRAM /** @@ -461,18 +442,18 @@ static nve32_t ivc_macsec_kt_config(struct osi_core_priv_data *const osi_core, osi_memset(&msg, 0, sizeof(msg)); msg.cmd = kt_config_macsec; - msg.status = osi_memcpy((void *) &msg.data.kt_config, - (void *)kt_config, - sizeof(struct osi_macsec_kt_config)); + (void)osi_memcpy((void *) &msg.data.kt_config, + (void *)kt_config, + sizeof(struct osi_macsec_kt_config)); ret = osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg)); if (ret != 0) { return ret; } - msg.status = osi_memcpy((void *)kt_config, - (void *)&msg.data.kt_config, - sizeof(struct osi_macsec_kt_config)); + (void)osi_memcpy((void *)kt_config, + (void *)&msg.data.kt_config, + sizeof(struct osi_macsec_kt_config)); return ret; } #endif /* MACSEC_KEY_PROGRAM */ @@ -486,8 +467,8 @@ static nve32_t ivc_macsec_kt_config(struct osi_core_priv_data *const osi_core, * @retval 0 on Success * @retval -1 on Failure */ -static int ivc_macsec_cipher_config(struct osi_core_priv_data *const osi_core, - unsigned int cipher) +static nve32_t ivc_macsec_cipher_config(struct osi_core_priv_data *const osi_core, + nveu32_t cipher) { ivc_msg_common_t msg; nveu32_t index = 0; @@ -495,9 +476,9 @@ static int ivc_macsec_cipher_config(struct osi_core_priv_data *const osi_core, osi_memset(&msg, 0, sizeof(msg)); msg.cmd = cipher_config; - msg.data.args.arguments[index] = cipher; + msg.args.arguments[index] = cipher; index++; - msg.data.args.count = index; + msg.args.count = index; return osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg)); } @@ -519,48 +500,35 @@ static nve32_t ivc_macsec_lut_config(struct osi_core_priv_data *const osi_core, osi_memset(&msg, 0, sizeof(msg)); msg.cmd = lut_config_macsec; - msg.status = osi_memcpy((void *) &msg.data.lut_config, - (void *)lut_config, - sizeof(struct osi_macsec_lut_config)); + (void)osi_memcpy((void *) &msg.data.lut_config, + (void *)lut_config, + sizeof(struct osi_macsec_lut_config)); ret = osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg)); if (ret != 0) { - return ret; + goto done; } - msg.status = osi_memcpy((void *)lut_config, - (void *)&msg.data.lut_config, - sizeof(struct osi_macsec_lut_config)); + (void)osi_memcpy((void *)lut_config, + (void *)&msg.data.lut_config, + sizeof(struct osi_macsec_lut_config)); +done: return ret; } /** - * @brief ivc_macsec_handle_s_irq - handle s irq. + * @brief ivc_macsec_handle_irq - handle macsec irq. * * @param[in] osi_core: OSI Core private data structure. * */ -static void ivc_macsec_handle_s_irq(OSI_UNUSED +static void ivc_macsec_handle_irq(OSI_UNUSED struct osi_core_priv_data *const osi_core) { OSI_CORE_INFO(osi_core->osd, OSI_LOG_ARG_INVALID, "Nothing to handle \n", 0ULL); } -/** - * @brief ivc_macsec_handle_ns_irq - handle ns irq. - * - * @param[in] osi_core: OSI Core private data structure. - * - */ - -static void ivc_macsec_handle_ns_irq(OSI_UNUSED - struct osi_core_priv_data *const osi_core) -{ - OSI_CORE_INFO(osi_core->osd, OSI_LOG_ARG_INVALID, - "Nothing to handle \n", 0ULL); -} - /** * @brief ivc_macsec_deinit - De Initialize. * @@ -570,7 +538,7 @@ static void ivc_macsec_handle_ns_irq(OSI_UNUSED * @retval -1 on Failure */ -static int ivc_macsec_deinit(struct osi_core_priv_data *const osi_core) +static nve32_t ivc_macsec_deinit(struct osi_core_priv_data *const osi_core) { ivc_msg_common_t msg; @@ -585,12 +553,12 @@ static int ivc_macsec_deinit(struct osi_core_priv_data *const osi_core) * @brief ivc_macsec_init -Initialize. * * @param[in] osi_core: OSI Core private data structure. - * @param[in] genl_info: Generic netlink information structure. + * @param[in] mtu: mtu to be set. * * @retval 0 on Success * @retval -1 on Failure */ -static int ivc_macsec_init(struct osi_core_priv_data *const osi_core, +static nve32_t ivc_macsec_init(struct osi_core_priv_data *const osi_core, nveu32_t mtu) { ivc_msg_common_t msg; @@ -599,9 +567,9 @@ static int ivc_macsec_init(struct osi_core_priv_data *const osi_core, osi_memset(&msg, 0, sizeof(msg)); msg.cmd = init_macsec; - msg.data.args.arguments[index] = mtu; + msg.args.arguments[index] = mtu; index++; - msg.data.args.count = index; + msg.args.count = index; return osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg)); } @@ -621,32 +589,24 @@ void ivc_init_macsec_ops(void *macsecops) ops->init = ivc_macsec_init; ops->deinit = ivc_macsec_deinit; - ops->handle_ns_irq = ivc_macsec_handle_ns_irq; - ops->handle_s_irq = ivc_macsec_handle_s_irq; + ops->handle_irq = ivc_macsec_handle_irq; ops->lut_config = ivc_macsec_lut_config; #ifdef MACSEC_KEY_PROGRAM ops->kt_config = ivc_macsec_kt_config; #endif /* MACSEC_KEY_PROGRAM */ ops->cipher_config = ivc_macsec_cipher_config; - ops->loopback_config = ivc_macsec_loopback_config; ops->macsec_en = ivc_macsec_enable; ops->config = ivc_macsec_config; ops->read_mmc = ivc_macsec_read_mmc; - ops->dbg_buf_config = ivc_macsec_dbg_buf_config; +#ifdef DEBUG_MACSEC + ops->loopback_config = ivc_macsec_loopback_config; ops->dbg_events_config = ivc_macsec_dbg_events_config; + ops->dbg_buf_config = ivc_macsec_dbg_buf_config; +#endif /* DEBUG_MACSEC */ ops->get_sc_lut_key_index = ivc_get_sc_lut_key_index; - ops->update_mtu = ivc_macsec_update_mtu; } #endif -/** - * @brief ivc_get_core_safety_config - EQOS MAC safety configuration - */ -void *ivc_get_core_safety_config(void) -{ - return &ivc_safety_config; -} - /** * @brief vir_ivc_core_deinit - MAC core deinitialization * diff --git a/osi/core/macsec.c b/osi/core/macsec.c index a1c545f..e2f039f 100644 --- a/osi/core/macsec.c +++ b/osi/core/macsec.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -26,22 +26,23 @@ #include "../osi/common/common.h" #include "core_local.h" -#if defined(DEBUG_MACSEC) && defined(QNX_OS) -#define LOG(...) \ +#if 0 /* Qnx */ +#define MACSEC_LOG(...) \ { \ - slogf(0, 2, ##__VA_ARGS__); \ + slogf(0, 6, ##__VA_ARGS__); \ } -#elif defined(DEBUG_MACSEC) && defined(LINUX_OS) +#elif 0 /* Linux */ #include -#define LOG(...) \ +#define MACSEC_LOG(...) \ { \ - pr_err(##__VA_ARGS__); \ + pr_debug(__VA_ARGS__); \ } #else -#define LOG(...) +#define MACSEC_LOG(...) #endif +#ifdef DEBUG_MACSEC /** * @brief poll_for_dbg_buf_update - Query the status of a debug buffer update. * @@ -70,6 +71,7 @@ static nve32_t poll_for_dbg_buf_update(struct osi_core_priv_data *const osi_core nveu32_t retry = RETRY_COUNT; nveu32_t dbg_buf_config; nve32_t cond = COND_NOT_MET; + nve32_t ret = 0; nveu32_t count; count = 0; @@ -77,7 +79,8 @@ static nve32_t poll_for_dbg_buf_update(struct osi_core_priv_data *const osi_core if (count > retry) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "timeout!\n", 0ULL); - return -1; + ret = -1; + goto err; } dbg_buf_config = osi_readla(osi_core, @@ -91,8 +94,8 @@ static nve32_t poll_for_dbg_buf_update(struct osi_core_priv_data *const osi_core /* wait on UPDATE bit to reset */ osi_core->osd_ops.udelay(RETRY_DELAY); } - - return 0; +err: + return ret; } @@ -194,7 +197,8 @@ static void write_tx_dbg_trigger_evts( nveu8_t *base = (nveu8_t *)osi_core->macsec_base; nveu32_t flags = 0; - nveu32_t tx_trigger_evts, debug_ctrl_reg; + nveu32_t tx_trigger_evts; + nveu32_t debug_ctrl_reg; flags = dbg_buf_config->flags; tx_trigger_evts = osi_readla(osi_core, @@ -235,7 +239,7 @@ static void write_tx_dbg_trigger_evts( tx_trigger_evts &= ~MACSEC_TX_DBG_CAPTURE; } - LOG("%s: 0x%x", __func__, tx_trigger_evts); + MACSEC_LOG("%s: 0x%x", __func__, tx_trigger_evts); osi_writela(osi_core, tx_trigger_evts, base + MACSEC_TX_DEBUG_TRIGGER_EN_0); if (tx_trigger_evts != OSI_NONE) { @@ -243,7 +247,7 @@ static void write_tx_dbg_trigger_evts( debug_ctrl_reg = osi_readla(osi_core, base + MACSEC_TX_DEBUG_CONTROL_0); debug_ctrl_reg |= MACSEC_TX_DEBUG_CONTROL_0_START_CAP; - LOG("%s: debug_ctrl_reg 0x%x", __func__, + MACSEC_LOG("%s: debug_ctrl_reg 0x%x", __func__, debug_ctrl_reg); osi_writela(osi_core, debug_ctrl_reg, base + MACSEC_TX_DEBUG_CONTROL_0); @@ -280,12 +284,12 @@ static void tx_dbg_trigger_evts( nveu32_t flags = 0; nveu32_t tx_trigger_evts; - if (dbg_buf_config->rw == OSI_DBG_TBL_WRITE) { + if (dbg_buf_config->rw == OSI_LUT_WRITE) { write_tx_dbg_trigger_evts(osi_core, dbg_buf_config); } else { tx_trigger_evts = osi_readla(osi_core, base + MACSEC_TX_DEBUG_TRIGGER_EN_0); - LOG("%s: 0x%x", __func__, tx_trigger_evts); + MACSEC_LOG("%s: 0x%x", __func__, tx_trigger_evts); if ((tx_trigger_evts & MACSEC_TX_DBG_LKUP_MISS) != OSI_NONE) { flags |= OSI_TX_DBG_LKUP_MISS_EVT; } @@ -336,7 +340,8 @@ static void write_rx_dbg_trigger_evts( nveu8_t *base = (nveu8_t *)osi_core->macsec_base; nveu32_t flags = 0; - nveu32_t rx_trigger_evts = 0, debug_ctrl_reg; + nveu32_t rx_trigger_evts = 0; + nveu32_t debug_ctrl_reg; flags = dbg_buf_config->flags; rx_trigger_evts = osi_readla(osi_core, @@ -376,7 +381,7 @@ static void write_rx_dbg_trigger_evts( } else { rx_trigger_evts &= ~MACSEC_RX_DBG_CAPTURE; } - LOG("%s: 0x%x", __func__, rx_trigger_evts); + MACSEC_LOG("%s: 0x%x", __func__, rx_trigger_evts); osi_writela(osi_core, rx_trigger_evts, base + MACSEC_RX_DEBUG_TRIGGER_EN_0); if (rx_trigger_evts != OSI_NONE) { @@ -384,7 +389,7 @@ static void write_rx_dbg_trigger_evts( debug_ctrl_reg = osi_readla(osi_core, base + MACSEC_RX_DEBUG_CONTROL_0); debug_ctrl_reg |= MACSEC_RX_DEBUG_CONTROL_0_START_CAP; - LOG("%s: debug_ctrl_reg 0x%x", __func__, + MACSEC_LOG("%s: debug_ctrl_reg 0x%x", __func__, debug_ctrl_reg); osi_writela(osi_core, debug_ctrl_reg, base + MACSEC_RX_DEBUG_CONTROL_0); @@ -421,12 +426,12 @@ static void rx_dbg_trigger_evts( nveu32_t flags = 0; nveu32_t rx_trigger_evts = 0; - if (dbg_buf_config->rw == OSI_DBG_TBL_WRITE) { + if (dbg_buf_config->rw == OSI_LUT_WRITE) { write_rx_dbg_trigger_evts(osi_core, dbg_buf_config); } else { rx_trigger_evts = osi_readla(osi_core, base + MACSEC_RX_DEBUG_TRIGGER_EN_0); - LOG("%s: 0x%x", __func__, rx_trigger_evts); + MACSEC_LOG("%s: 0x%x", __func__, rx_trigger_evts); if ((rx_trigger_evts & MACSEC_RX_DBG_LKUP_MISS) != OSI_NONE) { flags |= OSI_RX_DBG_LKUP_MISS_EVT; } @@ -477,12 +482,16 @@ static nve32_t validate_inputs_macsec_dbg_buf_conf( const struct osi_core_priv_data *const osi_core, const struct osi_macsec_dbg_buf_config *const dbg_buf_config) { + nve32_t ret = 0; + + (void) osi_core; /* Validate inputs */ if ((dbg_buf_config->rw > OSI_RW_MAX) || (dbg_buf_config->ctlr_sel > OSI_CTLR_SEL_MAX)) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Params validation failed\n", 0ULL); - return -1; + ret = -1; + goto err; } if (((dbg_buf_config->ctlr_sel == OSI_CTLR_SEL_TX) && @@ -491,9 +500,11 @@ static nve32_t validate_inputs_macsec_dbg_buf_conf( (dbg_buf_config->index > OSI_RX_DBG_BUF_IDX_MAX))) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Wrong index \n", dbg_buf_config->index); - return -1; + ret = -1; + goto err; } - return 0; +err: + return ret; } /** @@ -532,7 +543,8 @@ static nve32_t macsec_dbg_buf_config(struct osi_core_priv_data *const osi_core, nve32_t ret = 0; if (validate_inputs_macsec_dbg_buf_conf(osi_core, dbg_buf_config) < 0) { - return -1; + ret = -1; + goto err; } dbg_config_reg = osi_readla(osi_core, base + MACSEC_DEBUG_BUF_CONFIG_0); @@ -557,13 +569,14 @@ static nve32_t macsec_dbg_buf_config(struct osi_core_priv_data *const osi_core, osi_writela(osi_core, dbg_config_reg, base + MACSEC_DEBUG_BUF_CONFIG_0); ret = poll_for_dbg_buf_update(osi_core); if (ret < 0) { - return ret; + goto err; } - if (dbg_buf_config->rw == OSI_NONE) { + if (dbg_buf_config->rw == OSI_LUT_READ) { read_dbg_buf_data(osi_core, dbg_buf_config->dbg_buf); } - return 0; +err: + return ret; } /** @@ -597,17 +610,19 @@ static nve32_t macsec_dbg_events_config( { nveu64_t events = 0; nveu32_t i, flags = dbg_buf_config->flags; + nve32_t ret = 0; /* Validate inputs */ if ((dbg_buf_config->rw > OSI_RW_MAX) || (dbg_buf_config->ctlr_sel > OSI_CTLR_SEL_MAX)) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Params validation failed!\n", 0ULL); - return -1; + ret = -1; + goto err; } /* Only one event allowed to configure at a time */ - if ((flags != OSI_NONE) && (dbg_buf_config->rw == OSI_DBG_TBL_WRITE)) { + if ((flags != OSI_NONE) && (dbg_buf_config->rw == OSI_LUT_WRITE)) { for (i = 0; i < 32U; i++) { if ((flags & ((nveu32_t)(1U) << i)) != OSI_NONE) { CERT_C__POST_INC__U64(events); @@ -616,7 +631,8 @@ static nve32_t macsec_dbg_events_config( if (events > 1U) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Don't allow more than one debug events set\n", flags); - return -1; + ret = -1; + goto err; } } @@ -632,9 +648,10 @@ static nve32_t macsec_dbg_events_config( "Unknown controller select\n", 0ULL); break; } - - return 0; +err: + return ret; } +#endif /* DEBUG_MACSEC */ /** * @brief update_macsec_mmc_val - Reads specific macsec mmc counters @@ -662,7 +679,8 @@ static inline nveul64_t update_macsec_mmc_val( struct osi_core_priv_data *osi_core, nveu64_t offset) { - nveul64_t value_lo, value_hi; + nveul64_t value_lo; + nveul64_t value_hi; value_lo = osi_readla(osi_core, (nveu8_t *)osi_core->macsec_base + offset); @@ -786,26 +804,21 @@ static nve32_t macsec_enable(struct osi_core_priv_data *const osi_core, } val = osi_readla(osi_core, base + MACSEC_CONTROL0); - OSI_CORE_INFO(osi_core->osd, OSI_LOG_ARG_INVALID, - "Read MACSEC_CONTROL0: \n", val); + MACSEC_LOG("Read MACSEC_CONTROL0: 0x%x \n", val); if ((enable & OSI_MACSEC_TX_EN) == OSI_MACSEC_TX_EN) { - OSI_CORE_INFO(osi_core->osd, OSI_LOG_ARG_INVALID, - "Enabling macsec TX \n", 0ULL); + MACSEC_LOG("Enabling macsec TX\n"); val |= (MACSEC_TX_EN); } else { - OSI_CORE_INFO(osi_core->osd, OSI_LOG_ARG_INVALID, - "Disabling macsec TX \n", 0ULL); + MACSEC_LOG("Disabling macsec TX\n"); val &= ~(MACSEC_TX_EN); } if ((enable & OSI_MACSEC_RX_EN) == OSI_MACSEC_RX_EN) { - OSI_CORE_INFO(osi_core->osd, OSI_LOG_ARG_INVALID, - "Enabling macsec RX \n", 0ULL); + MACSEC_LOG("Enabling macsec RX\n"); val |= (MACSEC_RX_EN); } else { - OSI_CORE_INFO(osi_core->osd, OSI_LOG_ARG_INVALID, - "Disabling macsec RX \n", 0ULL); + MACSEC_LOG("Disabling macsec RX\n"); val &= ~(MACSEC_RX_EN); } @@ -816,7 +829,7 @@ static nve32_t macsec_enable(struct osi_core_priv_data *const osi_core, osi_core->is_macsec_enabled = OSI_DISABLE; } - LOG("Write MACSEC_CONTROL0: 0x%x\n", val); + MACSEC_LOG("Write MACSEC_CONTROL0: 0x%x\n", val); osi_writela(osi_core, val, base + MACSEC_CONTROL0); exit: @@ -943,13 +956,17 @@ static nve32_t kt_key_write(struct osi_core_priv_data *const osi_core, static nve32_t validate_kt_config(const struct osi_macsec_kt_config *const kt_config) { + nve32_t ret = 0; + /* Validate KT config */ if ((kt_config->table_config.ctlr_sel > OSI_CTLR_SEL_MAX) || (kt_config->table_config.rw > OSI_RW_MAX) || (kt_config->table_config.index > OSI_TABLE_INDEX_MAX)) { - return -1; + ret = -1; + goto err; } - return 0; +err: + return ret; } @@ -962,7 +979,7 @@ static nve32_t macsec_kt_config(struct osi_core_priv_data *const osi_core, ret = validate_kt_config(kt_config); if (ret < 0) { - return ret; + goto err; } kt_config_reg = osi_readla(osi_core, base + MACSEC_GCM_KEYTABLE_CONFIG); @@ -977,7 +994,7 @@ static nve32_t macsec_kt_config(struct osi_core_priv_data *const osi_core, /* For write operation, load the lut_data registers */ ret = kt_key_write(osi_core, kt_config); if (ret < 0) { - return ret; + goto err; } } else { kt_config_reg &= ~MACSEC_KT_CONFIG_RW; @@ -992,15 +1009,16 @@ static nve32_t macsec_kt_config(struct osi_core_priv_data *const osi_core, /* Wait for this KT update to finish */ ret = poll_for_kt_update(osi_core); if (ret < 0) { - return ret; + goto err; } if (kt_config->table_config.rw == OSI_NONE) { ret = kt_key_read(osi_core, kt_config); if (ret < 0) { - return ret; + goto err; } } +err: return ret; } #endif /* MACSEC_KEY_PROGRAM */ @@ -1036,6 +1054,7 @@ static inline nve32_t poll_for_lut_update(struct osi_core_priv_data *osi_core) nveu32_t lut_config; nveu32_t count; nve32_t cond = 1; + nve32_t ret = 0; count = 0; while (cond == 1) { @@ -1044,7 +1063,8 @@ static inline nve32_t poll_for_lut_update(struct osi_core_priv_data *osi_core) OSI_LOG_ARG_HW_FAIL, "LUT update timed out\n", 0ULL); - return -1; + ret = -1; + goto exit; } count++; @@ -1060,8 +1080,8 @@ static inline nve32_t poll_for_lut_update(struct osi_core_priv_data *osi_core) osi_core->osd_ops.udelay(RETRY_DELAY); } } - - return 0; +exit: + return ret; } /** @@ -1295,7 +1315,7 @@ static void lut_read_inputs_vlan(const nveu32_t *const lut_data, * @retval -1 for failure */ static nve32_t lut_read_inputs(struct osi_macsec_lut_config *const lut_config, - nveu32_t *const lut_data) + const nveu32_t *const lut_data) { struct osi_lut_inputs entry = {0}; nveu32_t flags = 0; @@ -1387,7 +1407,8 @@ static nve32_t byp_lut_read(struct osi_core_priv_data *const osi_core, struct osi_macsec_lut_config *const lut_config) { nveu32_t lut_data[MACSEC_LUT_DATA_REG_CNT] = {0}; - nveu32_t flags = 0, val = 0; + nveu32_t flags = 0; + nveu32_t val = 0; nveu32_t index = lut_config->table_config.index; nveu8_t *addr = (nveu8_t *)osi_core->macsec_base; nveu8_t *paddr = OSI_NULL; @@ -1398,7 +1419,8 @@ static nve32_t byp_lut_read(struct osi_core_priv_data *const osi_core, if (lut_read_inputs(lut_config, lut_data) != 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "LUT inputs error\n", 0ULL); - return -1; + ret = -1; + goto err; } /* Lookup output */ @@ -1437,6 +1459,7 @@ static nve32_t byp_lut_read(struct osi_core_priv_data *const osi_core, } lut_config->flags |= flags; } +err: return ret; } @@ -1540,7 +1563,8 @@ static nve32_t sci_lut_read(struct osi_core_priv_data *const osi_core, nve32_t ret = 0; if (index > OSI_SC_LUT_MAX_INDEX) { - return -1; + ret = -1; + goto exit; } read_lut_data(osi_core, lut_data); @@ -1549,7 +1573,8 @@ static nve32_t sci_lut_read(struct osi_core_priv_data *const osi_core, if (lut_read_inputs(lut_config, lut_data) != 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "LUT inputs error\n", 0ULL); - return -1; + ret = -1; + goto exit; } tx_sci_lut_read(osi_core, lut_config, lut_data); break; @@ -1584,8 +1609,7 @@ static nve32_t sci_lut_read(struct osi_core_priv_data *const osi_core, ret = -1; break; } - - /* Lookup output */ +exit: return ret; } @@ -1654,7 +1678,6 @@ static nve32_t sc_param_lut_read(struct osi_core_priv_data *const osi_core, break; } - /* Lookup output */ return ret; } @@ -1922,7 +1945,7 @@ static void tx_sa_state_lut_config(const struct osi_macsec_lut_config *const lut * @retval -1 on failure */ static nve32_t sa_state_lut_config(struct osi_core_priv_data *const osi_core, - struct osi_macsec_lut_config *const lut_config) + const struct osi_macsec_lut_config *const lut_config) { nveu32_t lut_data[MACSEC_LUT_DATA_REG_CNT] = {0}; struct osi_macsec_table_config table_config = lut_config->table_config; @@ -2083,7 +2106,7 @@ static void tx_sc_param_lut_config( * @retval -1 on failure */ static nve32_t sc_param_lut_config(struct osi_core_priv_data *const osi_core, - struct osi_macsec_lut_config *const lut_config) + const struct osi_macsec_lut_config *const lut_config) { nveu32_t lut_data[MACSEC_LUT_DATA_REG_CNT] = {0}; struct osi_macsec_table_config table_config = lut_config->table_config; @@ -2093,7 +2116,8 @@ static nve32_t sc_param_lut_config(struct osi_core_priv_data *const osi_core, if (entry.key_index_start > OSI_KEY_INDEX_MAX) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Invalid Key Index\n", 0ULL); - return -1; + ret = -1; + goto exit; } switch (table_config.ctlr_sel) { @@ -2111,7 +2135,7 @@ static nve32_t sc_param_lut_config(struct osi_core_priv_data *const osi_core, } commit_lut_data(osi_core, lut_data); - +exit: return ret; } @@ -2491,18 +2515,20 @@ static void lut_config_preempt_mask(const struct osi_macsec_lut_config *const lu * @retval 0 on success * @retval -1 on failure */ -static nve32_t lut_config_inputs(struct osi_macsec_lut_config *const lut_config, +static nve32_t lut_config_inputs(const struct osi_macsec_lut_config *const lut_config, nveu32_t *const lut_data) { struct osi_lut_inputs entry = lut_config->lut_in; nveu32_t flags = lut_config->flags; nveu32_t i, j = OSI_LUT_FLAGS_BYTE0_PATTERN_VALID; + nve32_t ret = 0; for (i = 0; i < OSI_LUT_BYTE_PATTERN_MAX; i++) { if ((flags & j) == j) { if (entry.byte_pattern_offset[i] > OSI_LUT_BYTE_PATTERN_MAX_OFFSET) { - return -1; + ret = -1; + goto exit; } } j <<= 1; @@ -2512,14 +2538,16 @@ static nve32_t lut_config_inputs(struct osi_macsec_lut_config *const lut_config, OSI_LUT_FLAGS_BYTE0_PATTERN_VALID) { if (entry.byte_pattern_offset[0] > OSI_LUT_BYTE_PATTERN_MAX_OFFSET) { - return -1; + ret = -1; + goto exit; } } if ((flags & OSI_LUT_FLAGS_VLAN_VALID) == OSI_LUT_FLAGS_VLAN_VALID) { if ((entry.vlan_pcp > OSI_VLAN_PCP_MAX) || (entry.vlan_id > OSI_VLAN_ID_MAX)) { - return -1; + ret = -1; + goto exit; } } @@ -2529,8 +2557,8 @@ static nve32_t lut_config_inputs(struct osi_macsec_lut_config *const lut_config, lut_config_vlan(lut_config, lut_data); lut_config_byte_pattern(lut_config, lut_data); lut_config_preempt_mask(lut_config, lut_data); - - return 0; +exit: + return ret; } /** @@ -2563,9 +2591,11 @@ static nve32_t rx_sci_lut_config( { nveu32_t flags = lut_config->flags; struct osi_sci_lut_outputs entry = lut_config->sci_lut_out; + nve32_t ret = 0; if (entry.sc_index > OSI_SC_INDEX_MAX) { - return -1; + ret = -1; + goto exit; } lut_data[0] |= ((nveu32_t)(entry.sci[0]) | @@ -2591,12 +2621,12 @@ static nve32_t rx_sci_lut_config( } lut_data[2] |= entry.sc_index << 10; - - return 0; +exit: + return ret; } /** - * @brief rx_sci_lut_config - update lut_data from lut_config for sci_lut + * @brief tx_sci_lut_config - update lut_data from lut_config for sci_lut * * @note * Algorithm: @@ -2621,15 +2651,17 @@ static nve32_t rx_sci_lut_config( * @retval -1 on failure */ static nve32_t tx_sci_lut_config( - struct osi_macsec_lut_config *const lut_config, + const struct osi_macsec_lut_config *const lut_config, nveu32_t *const lut_data) { nveu32_t flags = lut_config->flags; struct osi_sci_lut_outputs entry = lut_config->sci_lut_out; nveu32_t an_valid = entry.an_valid; + nve32_t ret = 0; if (lut_config_inputs(lut_config, lut_data) != 0) { - return -1; + ret = -1; + goto exit; } /* Lookup result fields */ @@ -2656,7 +2688,8 @@ static nve32_t tx_sci_lut_config( OSI_LUT_FLAGS_DVLAN_OUTER_INNER_TAG_SEL) { lut_data[6] |= MACSEC_TX_SCI_LUT_DVLAN_OUTER_INNER_TAG_SEL; } - return 0; +exit: + return ret; } /** @@ -2686,7 +2719,7 @@ static nve32_t tx_sci_lut_config( * @retval -1 on failure */ static nve32_t sci_lut_config(struct osi_core_priv_data *const osi_core, - struct osi_macsec_lut_config *const lut_config) + const struct osi_macsec_lut_config *const lut_config) { nveu32_t lut_data[MACSEC_LUT_DATA_REG_CNT] = {0}; struct osi_macsec_table_config table_config = lut_config->table_config; @@ -2700,7 +2733,8 @@ static nve32_t sci_lut_config(struct osi_core_priv_data *const osi_core, (lut_config->table_config.index > OSI_SC_LUT_MAX_INDEX)) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "SCI LUT config err - Invalid Index\n", 0ULL); - return -1; + ret = -1; + goto exit; } switch (table_config.ctlr_sel) { @@ -2708,7 +2742,8 @@ static nve32_t sci_lut_config(struct osi_core_priv_data *const osi_core, if (tx_sci_lut_config(lut_config, lut_data) < 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Failed to config tx sci LUT\n", 0ULL); - return -1; + ret = -1; + goto exit; } commit_lut_data(osi_core, lut_data); @@ -2732,7 +2767,8 @@ static nve32_t sci_lut_config(struct osi_core_priv_data *const osi_core, if (rx_sci_lut_config(lut_config, lut_data) < 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Failed to config rx sci LUT\n", 0ULL); - return -1; + ret = -1; + goto exit; } commit_lut_data(osi_core, lut_data); @@ -2758,6 +2794,7 @@ static nve32_t sci_lut_config(struct osi_core_priv_data *const osi_core, ret = -1; break; } +exit: return ret; } @@ -2787,7 +2824,7 @@ static nve32_t sci_lut_config(struct osi_core_priv_data *const osi_core, * @retval -1 on failure */ static nve32_t byp_lut_config(struct osi_core_priv_data *const osi_core, - struct osi_macsec_lut_config *const lut_config) + const struct osi_macsec_lut_config *const lut_config) { nveu32_t lut_data[MACSEC_LUT_DATA_REG_CNT] = {0}; nveu32_t flags = lut_config->flags; @@ -2799,7 +2836,8 @@ static nve32_t byp_lut_config(struct osi_core_priv_data *const osi_core, if (lut_config_inputs(lut_config, lut_data) != 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "LUT inputs error\n", 0ULL); - return -1; + ret = -1; + goto exit; } /* Lookup output */ @@ -2860,7 +2898,7 @@ static nve32_t byp_lut_config(struct osi_core_priv_data *const osi_core, ret = -1; break; } - +exit: return ret; } @@ -2888,7 +2926,7 @@ static nve32_t byp_lut_config(struct osi_core_priv_data *const osi_core, * @retval -1 on failure */ static inline nve32_t lut_data_write(struct osi_core_priv_data *const osi_core, - struct osi_macsec_lut_config *const lut_config) + const struct osi_macsec_lut_config *const lut_config) { nve32_t ret = 0; @@ -2942,19 +2980,23 @@ static inline nve32_t lut_data_write(struct osi_core_priv_data *const osi_core, */ static nve32_t validate_lut_conf(const struct osi_macsec_lut_config *const lut_config) { + nve32_t ret = 0; + /* Validate LUT config */ if ((lut_config->table_config.ctlr_sel > OSI_CTLR_SEL_MAX) || (lut_config->table_config.rw > OSI_RW_MAX) || (lut_config->table_config.index > OSI_TABLE_INDEX_MAX) || (lut_config->lut_sel > OSI_LUT_SEL_MAX)) { - LOG("Validating LUT config failed. ctrl: %hu," + MACSEC_LOG("Validating LUT config failed. ctrl: %hu," " rw: %hu, index: %hu, lut_sel: %hu", lut_config->table_config.ctlr_sel, lut_config->table_config.rw, lut_config->table_config.index, lut_config->lut_sel); - return -1; + ret = -1; + goto exit; } - return 0; +exit: + return ret; } /** @@ -2994,13 +3036,14 @@ static nve32_t macsec_lut_config(struct osi_core_priv_data *const osi_core, nveu8_t *base = (nveu8_t *)osi_core->macsec_base; if (validate_lut_conf(lut_config) < 0) { - return -1; + ret = -1; + goto exit; } /* Wait for previous LUT update to finish */ ret = poll_for_lut_update(osi_core); if (ret < 0) { - return ret; + goto exit; } lut_config_reg = osi_readla(osi_core, base + MACSEC_LUT_CONFIG); @@ -3015,7 +3058,7 @@ static nve32_t macsec_lut_config(struct osi_core_priv_data *const osi_core, /* For write operation, load the lut_data registers */ ret = lut_data_write(osi_core, lut_config); if (ret < 0) { - return ret; + goto exit; } } else { lut_config_reg &= ~MACSEC_LUT_CONFIG_RW; @@ -3034,17 +3077,17 @@ static nve32_t macsec_lut_config(struct osi_core_priv_data *const osi_core, /* Wait for this LUT update to finish */ ret = poll_for_lut_update(osi_core); if (ret < 0) { - return ret; + goto exit; } if (lut_config->table_config.rw == OSI_NONE) { ret = lut_data_read(osi_core, lut_config); if (ret < 0) { - return ret; + goto exit; } } - - return 0; +exit: + return ret; } /** @@ -3072,7 +3115,7 @@ static inline void handle_rx_sc_invalid_key( nveu8_t *addr = (nveu8_t *)osi_core->macsec_base; nveu32_t clear = 0; - LOG("%s()\n", __func__); + MACSEC_LOG("%s()\n", __func__); /** check which SC/AN had triggered and clear */ /* rx_sc0_7 */ @@ -3108,7 +3151,7 @@ static inline void handle_tx_sc_invalid_key( nveu8_t *addr = (nveu8_t *)osi_core->macsec_base; nveu32_t clear = 0; - LOG("%s()\n", __func__); + MACSEC_LOG("%s()\n", __func__); /** check which SC/AN had triggered and clear */ /* tx_sc0_7 */ @@ -3141,9 +3184,10 @@ static inline void handle_tx_sc_invalid_key( static inline void handle_safety_err_irq( const struct osi_core_priv_data *const osi_core) { + (void) osi_core; OSI_CORE_INFO(osi_core->osd, OSI_LOG_ARG_INVALID, "Safety Error Handler \n", 0ULL); - LOG("%s()\n", __func__); + MACSEC_LOG("%s()\n", __func__); } /** @@ -3417,7 +3461,7 @@ static inline void handle_tx_irq(struct osi_core_priv_data *const osi_core) #endif tx_isr = osi_readla(osi_core, addr + MACSEC_TX_ISR); - LOG("%s(): tx_isr 0x%x\n", __func__, tx_isr); + MACSEC_LOG("%s(): tx_isr 0x%x\n", __func__, tx_isr); if ((tx_isr & MACSEC_TX_DBG_BUF_CAPTURE_DONE) == MACSEC_TX_DBG_BUF_CAPTURE_DONE) { handle_dbg_evt_capture_done(osi_core, OSI_CTLR_SEL_TX); @@ -3509,7 +3553,8 @@ static inline void handle_tx_irq(struct osi_core_priv_data *const osi_core) */ static inline void handle_rx_irq(struct osi_core_priv_data *const osi_core) { - nveu32_t rx_isr, clear = 0; + nveu32_t rx_isr; + nveu32_t clear = 0; nveu8_t *addr = (nveu8_t *)osi_core->macsec_base; #ifdef HSI_SUPPORT nveu64_t rx_crc_err = 0; @@ -3517,7 +3562,7 @@ static inline void handle_rx_irq(struct osi_core_priv_data *const osi_core) #endif rx_isr = osi_readla(osi_core, addr + MACSEC_RX_ISR); - LOG("%s(): rx_isr 0x%x\n", __func__, rx_isr); + MACSEC_LOG("%s(): rx_isr 0x%x\n", __func__, rx_isr); if ((rx_isr & MACSEC_RX_DBG_BUF_CAPTURE_DONE) == MACSEC_RX_DBG_BUF_CAPTURE_DONE) { @@ -3616,15 +3661,23 @@ static inline void handle_rx_irq(struct osi_core_priv_data *const osi_core) */ static inline void handle_common_irq(struct osi_core_priv_data *const osi_core) { - nveu32_t common_isr, clear = 0; + nveu32_t common_isr; + nveu32_t clear = 0; nveu8_t *addr = (nveu8_t *)osi_core->macsec_base; common_isr = osi_readla(osi_core, addr + MACSEC_COMMON_ISR); - LOG("%s(): common_isr 0x%x\n", __func__, common_isr); + MACSEC_LOG("%s(): common_isr 0x%x\n", __func__, common_isr); if ((common_isr & MACSEC_SECURE_REG_VIOL) == MACSEC_SECURE_REG_VIOL) { CERT_C__POST_INC__U64(osi_core->macsec_irq_stats.secure_reg_viol); clear |= MACSEC_SECURE_REG_VIOL; +#ifdef HSI_SUPPORT + if (osi_core->hsi.enabled == OSI_ENABLE) { + osi_core->hsi.macsec_err_code[MACSEC_REG_VIOL_ERR_IDX] = + OSI_MACSEC_REG_VIOL_ERR; + osi_core->hsi.macsec_report_err = OSI_ENABLE; + } +#endif } if ((common_isr & MACSEC_RX_UNINIT_KEY_SLOT) == @@ -3656,7 +3709,7 @@ static inline void handle_common_irq(struct osi_core_priv_data *const osi_core) } /** - * @brief macsec_handle_ns_irq - Non-secure interrupt handler + * @brief macsec_handle_irq - Macsec interrupt handler * * @note * Algorithm: @@ -3678,13 +3731,13 @@ static inline void handle_common_irq(struct osi_core_priv_data *const osi_core) * - Run time: Yes * - De-initialization: No */ -static void macsec_handle_ns_irq(struct osi_core_priv_data *const osi_core) +static void macsec_handle_irq(struct osi_core_priv_data *const osi_core) { nveu32_t irq_common_sr, common_isr; nveu8_t *addr = (nveu8_t *)osi_core->macsec_base; irq_common_sr = osi_readla(osi_core, addr + MACSEC_INTERRUPT_COMMON_SR); - LOG("%s(): common_sr 0x%x\n", __func__, irq_common_sr); + MACSEC_LOG("%s(): common_sr 0x%x\n", __func__, irq_common_sr); if ((irq_common_sr & MACSEC_COMMON_SR_TX) == MACSEC_COMMON_SR_TX) { handle_tx_irq(osi_core); } @@ -3704,38 +3757,6 @@ static void macsec_handle_ns_irq(struct osi_core_priv_data *const osi_core) } } -/** - * @brief macsec_handle_s_irq - secure interrupt handler - * - * @note - * Algorithm: - * - Handles common interrupts - * - Refer to MACSEC column of <<******, (sequence diagram)>> for API details. - * - TraceID: *********** - * - * @param[in] osi_core: OSI core private data structure. used param macsec_base - * - * @pre MACSEC needs to be out of reset and proper clock configured. - * - * @note - * API Group: - * - Initialization: No - * - Run time: Yes - * - De-initialization: No - */ -static void macsec_handle_s_irq(struct osi_core_priv_data *const osi_core) -{ - nveu32_t common_isr; - nveu8_t *addr = (nveu8_t *)osi_core->macsec_base; - - LOG("%s()\n", __func__); - - common_isr = osi_readla(osi_core, addr + MACSEC_COMMON_ISR); - if (common_isr != OSI_NONE) { - handle_common_irq(osi_core); - } -} - /** * @brief macsec_cipher_config - Configures the cipher type * @@ -3764,6 +3785,7 @@ static nve32_t macsec_cipher_config(struct osi_core_priv_data *const osi_core, { nveu8_t *base = (nveu8_t *)osi_core->macsec_base; nveu32_t val; + nve32_t ret = 0; val = osi_readla(osi_core, base + MACSEC_GCM_AES_CONTROL_0); @@ -3776,13 +3798,16 @@ static nve32_t macsec_cipher_config(struct osi_core_priv_data *const osi_core, val |= MACSEC_TX_AES_MODE_AES256; val |= MACSEC_RX_AES_MODE_AES256; } else { - return -1; + ret = -1; + goto exit; } osi_writela(osi_core, val, base + MACSEC_GCM_AES_CONTROL_0); - return 0; +exit: + return ret; } +#ifdef DEBUG_MACSEC /** * @brief macsec_loopback_config - Configures the loopback mode * @@ -3812,6 +3837,7 @@ static nve32_t macsec_loopback_config( { nveu8_t *base = (nveu8_t *)osi_core->macsec_base; nveu32_t val; + nve32_t ret = 0; val = osi_readla(osi_core, base + MACSEC_CONTROL1); @@ -3820,12 +3846,15 @@ static nve32_t macsec_loopback_config( } else if (enable == OSI_DISABLE) { val &= ~MACSEC_LOOPBACK_MODE_EN; } else { - return -1; + ret = -1; + goto exit; } osi_writela(osi_core, val, base + MACSEC_CONTROL1); - return 0; +exit: + return ret; } +#endif /* DEBUG_MACSEC */ /** * @brief clear_byp_lut - Clears the bypass lut @@ -3867,11 +3896,11 @@ static nve32_t clear_byp_lut(struct osi_core_priv_data *const osi_core) if (ret < 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Error clearing CTLR:BYPASS LUT:INDEX: \n", j); - return ret; + goto exit; } } } - +exit: return ret; } @@ -3915,10 +3944,11 @@ static nve32_t clear_sci_lut(struct osi_core_priv_data *const osi_core) if (ret < 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Error clearing CTLR:SCI LUT:INDEX: \n", j); - return ret; + goto exit; } } } +exit: return ret; } @@ -3962,10 +3992,11 @@ static nve32_t clear_sc_param_lut(struct osi_core_priv_data *const osi_core) if (ret < 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Error clearing CTLR:SC PARAM LUT:INDEX: \n", j); - return ret; + goto exit; } } } +exit: return ret; } @@ -4010,10 +4041,11 @@ static nve32_t clear_sc_state_lut(struct osi_core_priv_data *const osi_core) if (ret < 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Error clearing CTLR:SC STATE LUT:INDEX: \n", j); - return ret; + goto exit; } } } +exit: return ret; } @@ -4057,7 +4089,7 @@ static nve32_t clear_sa_state_lut(struct osi_core_priv_data *const osi_core) if (ret < 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Error clearing TX CTLR:SA STATE LUT:INDEX: \n", j); - return ret; + goto exit; } } @@ -4070,9 +4102,10 @@ static nve32_t clear_sa_state_lut(struct osi_core_priv_data *const osi_core) if (ret < 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Error clearing RX CTLR:SA STATE LUT:INDEX: \n", j); - return ret; + goto exit; } } +exit: return ret; } @@ -4117,23 +4150,23 @@ static nve32_t clear_lut(struct osi_core_priv_data *const osi_core) /* Clear all the LUT's which have a dedicated LUT valid bit per entry */ ret = clear_byp_lut(osi_core); if (ret < 0) { - return ret; + goto exit; } ret = clear_sci_lut(osi_core); if (ret < 0) { - return ret; + goto exit; } ret = clear_sc_param_lut(osi_core); if (ret < 0) { - return ret; + goto exit; } ret = clear_sc_state_lut(osi_core); if (ret < 0) { - return ret; + goto exit; } ret = clear_sa_state_lut(osi_core); if (ret < 0) { - return ret; + goto exit; } #ifdef MACSEC_KEY_PROGRAM @@ -4148,12 +4181,12 @@ static nve32_t clear_lut(struct osi_core_priv_data *const osi_core) if (ret < 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Error clearing KT LUT:INDEX: \n", j); - return ret; + goto exit; } } } #endif /* MACSEC_KEY_PROGRAM */ - +exit: return ret; } @@ -4182,13 +4215,16 @@ static nve32_t clear_lut(struct osi_core_priv_data *const osi_core) static nve32_t macsec_deinit(struct osi_core_priv_data *const osi_core) { nveu32_t i; +#if defined(MACSEC_SUPPORT) && !defined(OSI_STRIPPED_LIB) const struct core_local *l_core = (void *)osi_core; +#endif for (i = OSI_CTLR_SEL_TX; i <= OSI_CTLR_SEL_RX; i++) { osi_memset(&osi_core->macsec_lut_status[i], OSI_NONE, sizeof(struct osi_macsec_lut_status)); } +#if defined(MACSEC_SUPPORT) && !defined(OSI_STRIPPED_LIB) /* Update MAC as per macsec requirement */ if (l_core->ops_p->macsec_config_mac != OSI_NULL) { l_core->ops_p->macsec_config_mac(osi_core, OSI_DISABLE); @@ -4196,6 +4232,7 @@ static nve32_t macsec_deinit(struct osi_core_priv_data *const osi_core) OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Failed config MAC per macsec\n", 0ULL); } +#endif return 0; } @@ -4228,28 +4265,30 @@ static nve32_t macsec_update_mtu(struct osi_core_priv_data *const osi_core, { nveu32_t val = 0; nveu8_t *addr = (nveu8_t *)osi_core->macsec_base; + nve32_t ret = 0; if (mtu > OSI_MAX_MTU_SIZE) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Invalid MTU received!!\n", mtu); - return -1; + ret = -1; + goto exit; } /* Set MTU */ val = osi_readla(osi_core, addr + MACSEC_TX_MTU_LEN); - LOG("Read MACSEC_TX_MTU_LEN: 0x%x\n", val); + MACSEC_LOG("Read MACSEC_TX_MTU_LEN: 0x%x\n", val); val &= ~(MTU_LENGTH_MASK); val |= (mtu & MTU_LENGTH_MASK); - LOG("Write MACSEC_TX_MTU_LEN: 0x%x\n", val); + MACSEC_LOG("Write MACSEC_TX_MTU_LEN: 0x%x\n", val); osi_writela(osi_core, val, addr + MACSEC_TX_MTU_LEN); val = osi_readla(osi_core, addr + MACSEC_RX_MTU_LEN); - LOG("Read MACSEC_RX_MTU_LEN: 0x%x\n", val); + MACSEC_LOG("Read MACSEC_RX_MTU_LEN: 0x%x\n", val); val &= ~(MTU_LENGTH_MASK); val |= (mtu & MTU_LENGTH_MASK); - LOG("Write MACSEC_RX_MTU_LEN: 0x%x\n", val); + MACSEC_LOG("Write MACSEC_RX_MTU_LEN: 0x%x\n", val); osi_writela(osi_core, val, addr + MACSEC_RX_MTU_LEN); - - return 0; +exit: + return ret; } /** @@ -4304,9 +4343,9 @@ static nve32_t set_byp_lut(struct osi_core_priv_data *const osi_core) if (ret < 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Failed to set BYP for BC addr\n", (nveul64_t)ret); - return ret; + goto exit; } else { - osi_core->macsec_lut_status[i].next_byp_idx = + osi_core->macsec_lut_status[i].next_byp_idx = (nveu16_t ) ((osi_core->macsec_lut_status[i].next_byp_idx & 0xFFU) + 1U); } } @@ -4324,17 +4363,91 @@ static nve32_t set_byp_lut(struct osi_core_priv_data *const osi_core) OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Failed to set BYP for MKPDU multicast DA\n", (nveul64_t)ret); - return ret; + goto exit; } else { - osi_core->macsec_lut_status[i].next_byp_idx = + osi_core->macsec_lut_status[i].next_byp_idx = (nveu16_t ) ((osi_core->macsec_lut_status[i].next_byp_idx & 0xFFU) + 1U); } } - return 0; +exit: + return ret; } +#ifdef DEBUG_MACSEC +static void macsec_intr_config(struct osi_core_priv_data *const osi_core, nveu32_t enable) +{ + nveu32_t val = 0; + nveu8_t *addr = (nveu8_t *)osi_core->macsec_base; + + if (enable == OSI_ENABLE) { + val = osi_readla(osi_core, addr + MACSEC_TX_IMR); + MACSEC_LOG("Read MACSEC_TX_IMR: 0x%x\n", val); + val |= (MACSEC_TX_DBG_BUF_CAPTURE_DONE_INT_EN | + MACSEC_TX_MTU_CHECK_FAIL_INT_EN | + MACSEC_TX_SC_AN_NOT_VALID_INT_EN | + MACSEC_TX_AES_GCM_BUF_OVF_INT_EN | + MACSEC_TX_PN_EXHAUSTED_INT_EN | + MACSEC_TX_PN_THRSHLD_RCHD_INT_EN); + osi_writela(osi_core, val, addr + MACSEC_TX_IMR); + MACSEC_LOG("Write MACSEC_TX_IMR: 0x%x\n", val); + + val = osi_readla(osi_core, addr + MACSEC_RX_IMR); + MACSEC_LOG("Read MACSEC_RX_IMR: 0x%x\n", val); + + val |= (MACSEC_RX_DBG_BUF_CAPTURE_DONE_INT_EN | + RX_REPLAY_ERROR_INT_EN | + MACSEC_RX_MTU_CHECK_FAIL_INT_EN | + MACSEC_RX_AES_GCM_BUF_OVF_INT_EN | + MACSEC_RX_PN_EXHAUSTED_INT_EN + ); + osi_writela(osi_core, val, addr + MACSEC_RX_IMR); + MACSEC_LOG("Write MACSEC_RX_IMR: 0x%x\n", val); + + val = osi_readla(osi_core, addr + MACSEC_COMMON_IMR); + MACSEC_LOG("Read MACSEC_COMMON_IMR: 0x%x\n", val); + val |= (MACSEC_RX_UNINIT_KEY_SLOT_INT_EN | + MACSEC_RX_LKUP_MISS_INT_EN | + MACSEC_TX_UNINIT_KEY_SLOT_INT_EN | + MACSEC_TX_LKUP_MISS_INT_EN); + osi_writela(osi_core, val, addr + MACSEC_COMMON_IMR); + MACSEC_LOG("Write MACSEC_COMMON_IMR: 0x%x\n", val); + } else { + val = osi_readla(osi_core, addr + MACSEC_TX_IMR); + MACSEC_LOG("Read MACSEC_TX_IMR: 0x%x\n", val); + val &= (~MACSEC_TX_DBG_BUF_CAPTURE_DONE_INT_EN & + ~MACSEC_TX_MTU_CHECK_FAIL_INT_EN & + ~MACSEC_TX_SC_AN_NOT_VALID_INT_EN & + ~MACSEC_TX_AES_GCM_BUF_OVF_INT_EN & + ~MACSEC_TX_PN_EXHAUSTED_INT_EN & + ~MACSEC_TX_PN_THRSHLD_RCHD_INT_EN); + osi_writela(osi_core, val, addr + MACSEC_TX_IMR); + MACSEC_LOG("Write MACSEC_TX_IMR: 0x%x\n", val); + + val = osi_readla(osi_core, addr + MACSEC_RX_IMR); + MACSEC_LOG("Read MACSEC_RX_IMR: 0x%x\n", val); + val &= (~MACSEC_RX_DBG_BUF_CAPTURE_DONE_INT_EN & + ~RX_REPLAY_ERROR_INT_EN & + ~MACSEC_RX_MTU_CHECK_FAIL_INT_EN & + ~MACSEC_RX_AES_GCM_BUF_OVF_INT_EN & + ~MACSEC_RX_PN_EXHAUSTED_INT_EN + ); + osi_writela(osi_core, val, addr + MACSEC_RX_IMR); + MACSEC_LOG("Write MACSEC_RX_IMR: 0x%x\n", val); + + val = osi_readla(osi_core, addr + MACSEC_COMMON_IMR); + MACSEC_LOG("Read MACSEC_COMMON_IMR: 0x%x\n", val); + val &= (~MACSEC_RX_UNINIT_KEY_SLOT_INT_EN & + ~MACSEC_RX_LKUP_MISS_INT_EN & + ~MACSEC_TX_UNINIT_KEY_SLOT_INT_EN & + ~MACSEC_TX_LKUP_MISS_INT_EN); + osi_writela(osi_core, val, addr + MACSEC_COMMON_IMR); + MACSEC_LOG("Write MACSEC_COMMON_IMR: 0x%x\n", val); + } +} +#endif /* DEBUG_MACSEC */ + /** - * @brief macsec_init - Inititlizes macsec + * @brief macsec_initialize - Inititlizes macsec * * @note * Algorithm: @@ -4370,14 +4483,16 @@ static nve32_t set_byp_lut(struct osi_core_priv_data *const osi_core) * @retval 0 for success * @retval -1 for failure */ -static nve32_t macsec_init(struct osi_core_priv_data *const osi_core, - nveu32_t mtu) +static nve32_t macsec_initialize(struct osi_core_priv_data *const osi_core, nveu32_t mtu) { nveu32_t val = 0; +#if defined(MACSEC_SUPPORT) && !defined(OSI_STRIPPED_LIB) const struct core_local *l_core = (void *)osi_core; +#endif nveu8_t *addr = (nveu8_t *)osi_core->macsec_base; nve32_t ret = 0; +#if defined(MACSEC_SUPPORT) && !defined(OSI_STRIPPED_LIB) /* Update MAC value as per macsec requirement */ if (l_core->ops_p->macsec_config_mac != OSI_NULL) { l_core->ops_p->macsec_config_mac(osi_core, OSI_ENABLE); @@ -4385,11 +4500,11 @@ static nve32_t macsec_init(struct osi_core_priv_data *const osi_core, OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Failed to config mac per macsec\n", 0ULL); } - +#endif /* Set MTU */ ret = macsec_update_mtu(osi_core, mtu); if (ret < 0) { - return ret; + goto exit; } /* set TX/RX SOT, as SOT value different for eqos. @@ -4397,83 +4512,64 @@ static nve32_t macsec_init(struct osi_core_priv_data *const osi_core, */ if (osi_core->mac == OSI_MAC_HW_EQOS) { val = osi_readla(osi_core, addr + MACSEC_TX_SOT_DELAY); - LOG("Read MACSEC_TX_SOT_DELAY: 0x%x\n", val); + MACSEC_LOG("Read MACSEC_TX_SOT_DELAY: 0x%x\n", val); val &= ~(SOT_LENGTH_MASK); val |= (EQOS_MACSEC_SOT_DELAY & SOT_LENGTH_MASK); - LOG("Write MACSEC_TX_SOT_DELAY: 0x%x\n", val); + MACSEC_LOG("Write MACSEC_TX_SOT_DELAY: 0x%x\n", val); osi_writela(osi_core, val, addr + MACSEC_TX_SOT_DELAY); val = osi_readla(osi_core, addr + MACSEC_RX_SOT_DELAY); - LOG("Read MACSEC_RX_SOT_DELAY: 0x%x\n", val); + MACSEC_LOG("Read MACSEC_RX_SOT_DELAY: 0x%x\n", val); val &= ~(SOT_LENGTH_MASK); val |= (EQOS_MACSEC_SOT_DELAY & SOT_LENGTH_MASK); - LOG("Write MACSEC_RX_SOT_DELAY: 0x%x\n", val); + MACSEC_LOG("Write MACSEC_RX_SOT_DELAY: 0x%x\n", val); osi_writela(osi_core, val, addr + MACSEC_RX_SOT_DELAY); } /* Set essential MACsec control configuration */ val = osi_readla(osi_core, addr + MACSEC_CONTROL0); - LOG("Read MACSEC_CONTROL0: 0x%x\n", val); + MACSEC_LOG("Read MACSEC_CONTROL0: 0x%x\n", val); val |= (MACSEC_TX_LKUP_MISS_NS_INTR | MACSEC_RX_LKUP_MISS_NS_INTR | MACSEC_TX_LKUP_MISS_BYPASS | MACSEC_RX_LKUP_MISS_BYPASS); val &= ~(MACSEC_VALIDATE_FRAMES_MASK); val |= MACSEC_VALIDATE_FRAMES_STRICT; val |= MACSEC_RX_REPLAY_PROT_EN; - LOG("Write MACSEC_CONTROL0: 0x%x\n", val); + MACSEC_LOG("Write MACSEC_CONTROL0: 0x%x\n", val); osi_writela(osi_core, val, addr + MACSEC_CONTROL0); val = osi_readla(osi_core, addr + MACSEC_CONTROL1); - LOG("Read MACSEC_CONTROL1: 0x%x\n", val); + MACSEC_LOG("Read MACSEC_CONTROL1: 0x%x\n", val); val |= (MACSEC_RX_MTU_CHECK_EN | MACSEC_TX_LUT_PRIO_BYP | MACSEC_TX_MTU_CHECK_EN); - LOG("Write MACSEC_CONTROL1: 0x%x\n", val); + MACSEC_LOG("Write MACSEC_CONTROL1: 0x%x\n", val); osi_writela(osi_core, val, addr + MACSEC_CONTROL1); val = osi_readla(osi_core, addr + MACSEC_STATS_CONTROL_0); - LOG("Read MACSEC_STATS_CONTROL_0: 0x%x\n", val); + MACSEC_LOG("Read MACSEC_STATS_CONTROL_0: 0x%x\n", val); /* set STATS rollover bit */ val |= MACSEC_STATS_CONTROL0_CNT_RL_OVR_CPY; - LOG("Write MACSEC_STATS_CONTROL_0: 0x%x\n", val); + MACSEC_LOG("Write MACSEC_STATS_CONTROL_0: 0x%x\n", val); osi_writela(osi_core, val, addr + MACSEC_STATS_CONTROL_0); - /* Enable default interrupts needed */ + /* Enable default HSI related interrupts needed */ val = osi_readla(osi_core, addr + MACSEC_TX_IMR); - LOG("Read MACSEC_TX_IMR: 0x%x\n", val); - val |= (MACSEC_TX_DBG_BUF_CAPTURE_DONE_INT_EN | - MACSEC_TX_MTU_CHECK_FAIL_INT_EN | - MACSEC_TX_MAC_CRC_ERROR_INT_EN | - MACSEC_TX_SC_AN_NOT_VALID_INT_EN | - MACSEC_TX_AES_GCM_BUF_OVF_INT_EN | - MACSEC_TX_PN_EXHAUSTED_INT_EN | - MACSEC_TX_PN_THRSHLD_RCHD_INT_EN); - LOG("Write MACSEC_TX_IMR: 0x%x\n", val); + MACSEC_LOG("Read MACSEC_TX_IMR: 0x%x\n", val); + val |= MACSEC_TX_MAC_CRC_ERROR_INT_EN; + MACSEC_LOG("Write MACSEC_TX_IMR: 0x%x\n", val); osi_writela(osi_core, val, addr + MACSEC_TX_IMR); /* set ICV error threshold to 1 */ osi_writela(osi_core, 1U, addr + MACSEC_RX_ICV_ERR_CNTRL); - + /* Enabling interrupts only related to HSI */ val = osi_readla(osi_core, addr + MACSEC_RX_IMR); - LOG("Read MACSEC_RX_IMR: 0x%x\n", val); - - val |= (MACSEC_RX_DBG_BUF_CAPTURE_DONE_INT_EN | - MACSEC_RX_ICV_ERROR_INT_EN | RX_REPLAY_ERROR_INT_EN | - MACSEC_RX_MTU_CHECK_FAIL_INT_EN | - MACSEC_RX_MAC_CRC_ERROR_INT_EN | - MACSEC_RX_AES_GCM_BUF_OVF_INT_EN | - MACSEC_RX_PN_EXHAUSTED_INT_EN - ); - LOG("Write MACSEC_RX_IMR: 0x%x\n", val); + MACSEC_LOG("Read MACSEC_RX_IMR: 0x%x\n", val); + val |= (MACSEC_RX_ICV_ERROR_INT_EN | + MACSEC_RX_MAC_CRC_ERROR_INT_EN); + MACSEC_LOG("Write MACSEC_RX_IMR: 0x%x\n", val); osi_writela(osi_core, val, addr + MACSEC_RX_IMR); val = osi_readla(osi_core, addr + MACSEC_COMMON_IMR); - LOG("Read MACSEC_COMMON_IMR: 0x%x\n", val); - - val |= (MACSEC_SECURE_REG_VIOL_INT_EN | - MACSEC_RX_UNINIT_KEY_SLOT_INT_EN | - MACSEC_RX_LKUP_MISS_INT_EN | - MACSEC_TX_UNINIT_KEY_SLOT_INT_EN | - MACSEC_TX_LKUP_MISS_INT_EN); - LOG("Write MACSEC_COMMON_IMR: 0x%x\n", val); + val |= MACSEC_SECURE_REG_VIOL_INT_EN; osi_writela(osi_core, val, addr + MACSEC_COMMON_IMR); /* Set AES mode @@ -4485,9 +4581,11 @@ static nve32_t macsec_init(struct osi_core_priv_data *const osi_core, if (ret < 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Invalidating all LUT's failed\n", (nveul64_t)ret); - return ret; + goto exit; } - return set_byp_lut(osi_core); + ret = set_byp_lut(osi_core); +exit: + return ret; } /** @@ -4521,16 +4619,17 @@ static struct osi_macsec_sc_info *find_existing_sc( { struct osi_macsec_lut_status *lut_status_ptr = &osi_core->macsec_lut_status[ctlr]; + struct osi_macsec_sc_info *sc_found = OSI_NULL; nveu32_t i; for (i = 0; i < OSI_MAX_NUM_SC; i++) { if (osi_memcmp(lut_status_ptr->sc_info[i].sci, sc->sci, (nve32_t)OSI_SCI_LEN) == OSI_NONE_SIGNED) { - return &lut_status_ptr->sc_info[i]; + sc_found = &lut_status_ptr->sc_info[i]; } } - return OSI_NULL; + return sc_found; } /** @@ -4564,7 +4663,7 @@ static nveu32_t get_avail_sc_idx(const struct osi_core_priv_data *const osi_core for (i = 0; i < OSI_MAX_NUM_SC; i++) { if (lut_status_ptr->sc_info[i].an_valid == OSI_NONE) { - return i; + break; } } return i; @@ -4609,24 +4708,28 @@ static nve32_t macsec_get_key_index(struct osi_core_priv_data *const osi_core, (ctlr > OSI_CTLR_SEL_MAX)) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Params validation failed\n", 0ULL); - return -1; + ret = -1; + goto exit; } ret = osi_memcpy(sc.sci, sci, OSI_SCI_LEN); if (ret < OSI_NONE_SIGNED) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "memcpy failed\n", 0ULL); - return -1; + ret = -1; + goto exit; } sc_info = find_existing_sc(osi_core, &sc, ctlr); if (sc_info == OSI_NULL) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "SCI Not found\n", 0ULL); - return -1; + ret = -1; + goto exit; } *key_index = (sc_info->sc_idx_start * OSI_MAX_NUM_SA); - return 0; +exit: + return ret; } /** @@ -4670,7 +4773,7 @@ static nve32_t del_upd_sc(struct osi_core_priv_data *const osi_core, #endif /* MACSEC_KEY_PROGRAM */ struct osi_macsec_lut_config lut_config = {0}; struct osi_macsec_table_config *table_config; - nve32_t ret; + nve32_t ret = 0; /* All input/output fields are already zero'd in declaration. * Write all 0's to LUT index to clear everything @@ -4686,13 +4789,14 @@ static nve32_t del_upd_sc(struct osi_core_priv_data *const osi_core, if (existing_sc->curr_an == sc->curr_an) { /* 1. SCI LUT */ lut_config.lut_sel = OSI_LUT_SEL_SCI; - table_config->index = (nveu16_t)(existing_sc->sc_idx_start); + table_config->index = (nveu16_t)(existing_sc->sc_idx_start & 0xFFU); ret = macsec_lut_config(osi_core, &lut_config); if (ret < 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Failed to del SCI LUT idx\n", sc->sc_idx_start); - return -1; + ret = -1; + goto exit; } /* 2. SC Param LUT */ @@ -4701,7 +4805,8 @@ static nve32_t del_upd_sc(struct osi_core_priv_data *const osi_core, if (ret < 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Failed to del SC param\n", (nveul64_t)ret); - return -1; + ret = -1; + goto exit; } /* 3. SC state LUT */ @@ -4710,24 +4815,26 @@ static nve32_t del_upd_sc(struct osi_core_priv_data *const osi_core, if (ret < 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Failed to del SC state\n", (nveul64_t)ret); - return -1; + ret = -1; + goto exit; } } /* 4. SA State LUT */ lut_config.lut_sel = OSI_LUT_SEL_SA_STATE; - table_config->index = (nveu16_t)((existing_sc->sc_idx_start * OSI_MAX_NUM_SA) + - sc->curr_an); + table_config->index = (nveu16_t)(((existing_sc->sc_idx_start * OSI_MAX_NUM_SA) + + sc->curr_an) & (0xFFU)); ret = macsec_lut_config(osi_core, &lut_config); if (ret < 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Failed to del SA state\n", (nveul64_t)ret); - return -1; + ret = -1; + goto exit; } /* Store key table index returned to osd */ - *kt_idx = (nveu16_t)((existing_sc->sc_idx_start * OSI_MAX_NUM_SA) + - sc->curr_an); + *kt_idx = (nveu16_t)(((existing_sc->sc_idx_start * OSI_MAX_NUM_SA) + + sc->curr_an) & (0xFFU)); #ifdef MACSEC_KEY_PROGRAM /* 5. Key LUT */ table_config = &kt_config.table_config; @@ -4739,13 +4846,14 @@ static nve32_t del_upd_sc(struct osi_core_priv_data *const osi_core, if (ret < 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Failed to del SAK\n", (nveul64_t)ret); - return -1; + ret = -1; + goto exit; } #endif /* MACSEC_KEY_PROGRAM */ existing_sc->an_valid &= ~OSI_BIT(sc->curr_an); - - return 0; +exit: + return ret; } /** @@ -4771,6 +4879,7 @@ static nve32_t del_upd_sc(struct osi_core_priv_data *const osi_core, static void print_error(const struct osi_core_priv_data *const osi_core, nve32_t ret) { + (void) osi_core; if (ret < 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Failed to config macsec\n", (nveul64_t)ret); @@ -4808,6 +4917,87 @@ static void copy_rev_order(nveu8_t *dst_buff, const nveu8_t *src_buff, nveu16_t } } +/** + * @brief add_upd_sc_err_cleanup - Helper function to handle error conditions in add_upd_sc + * + * @note + * Algorithm: + * - Depending on the error_mask passed clear the LUTs + * - Refer to MACSEC column of <<******, (sequence diagram)>> for API details. + * - TraceID: *********** + * + * @param[in] osi_core: OSI core private data structure. used param macsec_base + * @param[in] mask: Error mask that indicate which LUTs need to be cleared + * @param[in] ctlr: Controller to be selected + * @param[in] sc: Pointer to the SC that was intended to be added + * + * @pre MACSEC needs to be out of reset and proper clock configured. + * + * @note + * API Group: + * - Initialization: No + * - Run time: Yes + * - De-initialization: No + */ +static void add_upd_sc_err_cleanup(struct osi_core_priv_data *const osi_core, + nveu8_t mask, nveu16_t ctlr, + const struct osi_macsec_sc_info *const sc) +{ + struct osi_macsec_lut_config lut_config = {0}; + struct osi_macsec_table_config *table_config; + nve32_t ret_fail = 0; + nveu8_t error_mask = mask; + + if ((error_mask & OSI_BIT(3)) != OSI_NONE) { + /* Cleanup SCI LUT */ + error_mask &= ((~OSI_BIT(3)) & (0xFFU)); + osi_memset(&lut_config, 0, sizeof(lut_config)); + table_config = &lut_config.table_config; + table_config->ctlr_sel = ctlr; + table_config->rw = OSI_LUT_WRITE; + lut_config.lut_sel = OSI_LUT_SEL_SCI; + table_config->index = (nveu16_t)(sc->sc_idx_start & 0xFFU); + ret_fail = macsec_lut_config(osi_core, &lut_config); + print_error(osi_core, ret_fail); + } + if ((error_mask & OSI_BIT(2)) != OSI_NONE) { + /* cleanup SC param */ + error_mask &= ((~OSI_BIT(2)) & (0xFFU)); + osi_memset(&lut_config, 0, sizeof(lut_config)); + table_config = &lut_config.table_config; + table_config->ctlr_sel = ctlr; + lut_config.lut_sel = OSI_LUT_SEL_SC_PARAM; + table_config->index = (nveu16_t)(sc->sc_idx_start & 0xFFU); + ret_fail = macsec_lut_config(osi_core, &lut_config); + print_error(osi_core, ret_fail); + } + if ((error_mask & OSI_BIT(1)) != OSI_NONE) { + /* Cleanup SA state LUT */ + error_mask &= ((~OSI_BIT(1)) & (0xFFU)); + osi_memset(&lut_config, 0, sizeof(lut_config)); + table_config = &lut_config.table_config; + table_config->ctlr_sel = ctlr; + table_config->rw = OSI_LUT_WRITE; + lut_config.lut_sel = OSI_LUT_SEL_SA_STATE; + table_config->index = (nveu16_t)(((sc->sc_idx_start & 0xFU) * + OSI_MAX_NUM_SA) + sc->curr_an); + ret_fail = macsec_lut_config(osi_core, &lut_config); + print_error(osi_core, ret_fail); + } +#ifdef MACSEC_KEY_PROGRAM + if ((error_mask & OSI_BIT(0)) != OSI_NONE) { + error_mask &= ((~OSI_BIT(0)) & (0xFFU)); + osi_memset(&kt_config, 0, sizeof(kt_config)); + table_config = &kt_config.table_config; + table_config->ctlr_sel = ctlr; + table_config->rw = OSI_LUT_WRITE; + table_config->index = *kt_idx; + ret_fail = macsec_kt_config(osi_core, &kt_config); + print_error(osi_core, ret_fail); + } +#endif /* MACSEC_KEY_PROGRAM */ +} + /** * @brief add_upd_sc - add or update an SC * @@ -4823,7 +5013,7 @@ static void copy_rev_order(nveu8_t *dst_buff, const nveu8_t *src_buff, nveu16_t * - TraceID: *********** * * @param[in] osi_core: OSI core private data structure. used param macsec_base - * @param[in] existing_sc: Pointer to the existing sc + * @param[in] sc: Pointer to the existing sc * @param[in] ctlr: Controller to be selected * @param[out] kt_idx: Key index to be passed to osd * @@ -4839,13 +5029,14 @@ static void copy_rev_order(nveu8_t *dst_buff, const nveu8_t *src_buff, nveu16_t * @retval -1 on failure */ static nve32_t add_upd_sc(struct osi_core_priv_data *const osi_core, - struct osi_macsec_sc_info *const sc, - nveu16_t ctlr, nveu16_t *kt_idx) + const struct osi_macsec_sc_info *const sc, + nveu16_t ctlr, nveu16_t *kt_idx) { struct osi_macsec_lut_config lut_config = {0}; struct osi_macsec_table_config *table_config; - nve32_t ret; + nve32_t ret = 0; nveu32_t i; + nveu8_t error_mask = 0; #ifdef MACSEC_KEY_PROGRAM struct osi_macsec_kt_config kt_config = {0}; #endif /* MACSEC_KEY_PROGRAM */ @@ -4870,7 +5061,8 @@ static nve32_t add_upd_sc(struct osi_core_priv_data *const osi_core, if (ret < 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Failed to set SAK\n", (nveul64_t)ret); - return -1; + ret = -1; + goto exit; } } #endif /* MACSEC_KEY_PROGRAM */ @@ -4890,13 +5082,14 @@ static nve32_t add_upd_sc(struct osi_core_priv_data *const osi_core, if (ret < 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Failed to set SA state\n", (nveul64_t)ret); - goto err_sa_state; + error_mask |= OSI_BIT(0); + goto exit; } /* 3. SC param LUT */ lut_config.flags = OSI_NONE; lut_config.lut_sel = OSI_LUT_SEL_SC_PARAM; - table_config->index = (nveu16_t)(sc->sc_idx_start); + table_config->index = (nveu16_t)(sc->sc_idx_start & 0xFFU); copy_rev_order(lut_config.sc_param_out.sci, sc->sci, OSI_SCI_LEN); lut_config.sc_param_out.key_index_start = ((sc->sc_idx_start & 0xFU) * @@ -4910,7 +5103,8 @@ static nve32_t add_upd_sc(struct osi_core_priv_data *const osi_core, if (ret < 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Failed to set SC param\n", (nveul64_t)ret); - goto err_sc_param; + error_mask |= OSI_BIT(1); + goto exit; } /* 4. SCI LUT */ @@ -4931,7 +5125,8 @@ static nve32_t add_upd_sc(struct osi_core_priv_data *const osi_core, if (ret < 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Failed to set SCI LUT\n", (nveul64_t)ret); - goto err_sci; + error_mask |= OSI_BIT(2); + goto exit; } if (sc->flags == OSI_ENABLE_SA) { @@ -4944,56 +5139,13 @@ static nve32_t add_upd_sc(struct osi_core_priv_data *const osi_core, if (ret < 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Failed to set SC state\n", (nveul64_t)ret); - goto err_sc_state; + error_mask |= OSI_BIT(3); + goto exit; } } - return 0; - -err_sc_state: - /* Cleanup SCI LUT */ - osi_memset(&lut_config, 0, sizeof(lut_config)); - table_config = &lut_config.table_config; - table_config->ctlr_sel = ctlr; - table_config->rw = OSI_LUT_WRITE; - lut_config.lut_sel = OSI_LUT_SEL_SCI; - table_config->index = (nveu16_t)(sc->sc_idx_start); - ret = macsec_lut_config(osi_core, &lut_config); - print_error(osi_core, ret); - -err_sci: - /* cleanup SC param */ - osi_memset(&lut_config, 0, sizeof(lut_config)); - table_config = &lut_config.table_config; - table_config->ctlr_sel = ctlr; - lut_config.lut_sel = OSI_LUT_SEL_SC_PARAM; - table_config->index = (nveu16_t)(sc->sc_idx_start); - ret = macsec_lut_config(osi_core, &lut_config); - print_error(osi_core, ret); - -err_sc_param: - /* Cleanup SA state LUT */ - osi_memset(&lut_config, 0, sizeof(lut_config)); - table_config = &lut_config.table_config; - table_config->ctlr_sel = ctlr; - table_config->rw = OSI_LUT_WRITE; - lut_config.lut_sel = OSI_LUT_SEL_SA_STATE; - table_config->index = (nveu16_t)(((sc->sc_idx_start & 0xFU) * - OSI_MAX_NUM_SA) + sc->curr_an); - ret = macsec_lut_config(osi_core, &lut_config); - print_error(osi_core, ret); - -err_sa_state: -#ifdef MACSEC_KEY_PROGRAM - osi_memset(&kt_config, 0, sizeof(kt_config)); - table_config = &kt_config.table_config; - table_config->ctlr_sel = ctlr; - table_config->rw = OSI_LUT_WRITE; - table_config->index = *kt_idx; - ret = macsec_kt_config(osi_core, &kt_config); - print_error(osi_core, ret); -#endif /* MACSEC_KEY_PROGRAM */ - - return -1; +exit: + add_upd_sc_err_cleanup(osi_core, error_mask, ctlr, sc); + return ret; } /** @@ -5023,13 +5175,15 @@ err_sa_state: static nve32_t macsec_config_validate_inputs(nveu32_t enable, nveu16_t ctlr, const nveu16_t *kt_idx) { + nve32_t ret = 0; + /* Validate inputs */ if (((enable != OSI_ENABLE) && (enable != OSI_DISABLE)) || ((ctlr != OSI_CTLR_SEL_TX) && (ctlr != OSI_CTLR_SEL_RX)) || (kt_idx == OSI_NULL)) { - return -1; + ret = -1; } - return 0; + return ret; } /** @@ -5122,21 +5276,24 @@ static nve32_t add_new_sc(struct osi_core_priv_data *const osi_core, if (lut_status_ptr->num_of_sc_used >= OSI_MAX_NUM_SC) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Err: Reached max SC LUT entries!\n", 0ULL); - return -1; + ret = -1; + goto exit; } avail_sc_idx = get_avail_sc_idx(osi_core, ctlr); if (avail_sc_idx == OSI_MAX_NUM_SC) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Err: NO free SC Index\n", 0ULL); - return -1; + ret = -1; + goto exit; } new_sc = &lut_status_ptr->sc_info[avail_sc_idx]; ret = memcpy_sci_sak_hkey(new_sc, sc); if (ret < OSI_NONE_SIGNED) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "memcpy Failed\n", 0ULL); - return -1; + ret = -1; + goto exit; } new_sc->curr_an = sc->curr_an; new_sc->next_pn = sc->next_pn; @@ -5150,20 +5307,22 @@ static nve32_t add_new_sc(struct osi_core_priv_data *const osi_core, OSI_NONE_SIGNED) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "failed to add new SC\n", 0ULL); - return -1; + ret = -1; + goto exit; } else { /* Update lut status */ lut_status_ptr->num_of_sc_used++; - LOG("%s: Added new SC ctlr: %u " + MACSEC_LOG("%s: Added new SC ctlr: %u " "Total active SCs: %u", __func__, ctlr, lut_status_ptr->num_of_sc_used); - return 0; } +exit: + return ret; } /** - * @brief config_macsec - API to update LUTs for addition/deletion of SC/SA + * @brief macsec_configure - API to update LUTs for addition/deletion of SC/SA * * @note * Algorithm: @@ -5180,6 +5339,7 @@ static nve32_t add_new_sc(struct osi_core_priv_data *const osi_core, * * @param[in] osi_core: OSI core private data structure. used param macsec_base * @param[in] sc: Pointer to the sc that need to be added/deleted/updated + * @param[in] enable: enable or disable * @param[in] ctlr: Controller to be selected * @param[out] kt_idx: Key index to be passed to osd * @@ -5194,21 +5354,22 @@ static nve32_t add_new_sc(struct osi_core_priv_data *const osi_core, * @retval 0 on success * @retval -1 on failure */ -static nve32_t config_macsec(struct osi_core_priv_data *const osi_core, - struct osi_macsec_sc_info *const sc, - nveu32_t enable, nveu16_t ctlr, - nveu16_t *kt_idx) +static nve32_t macsec_configure(struct osi_core_priv_data *const osi_core, + struct osi_macsec_sc_info *const sc, + nveu32_t enable, nveu16_t ctlr, + nveu16_t *kt_idx) { struct osi_macsec_sc_info *existing_sc = OSI_NULL; struct osi_macsec_sc_info tmp_sc; struct osi_macsec_sc_info *tmp_sc_p = &tmp_sc; struct osi_macsec_lut_status *lut_status_ptr; - nve32_t ret; + nve32_t ret = 0; if (macsec_config_validate_inputs(enable, ctlr, kt_idx) < 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Input validation failed\n", 0ULL); - return -1; + ret = -1; + goto exit; } lut_status_ptr = &osi_core->macsec_lut_status[ctlr]; @@ -5219,20 +5380,23 @@ static nve32_t config_macsec(struct osi_core_priv_data *const osi_core, OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "trying to delete non-existing SC ?\n", 0ULL); - return -1; + ret = -1; + goto exit; } else { - LOG("%s: Adding new SC/SA: ctlr: %hu", __func__, ctlr); - return add_new_sc(osi_core, sc, ctlr, kt_idx); + MACSEC_LOG("%s: Adding new SC/SA: ctlr: %hu", __func__, ctlr); + ret = add_new_sc(osi_core, sc, ctlr, kt_idx); + goto exit; } } else { - LOG("%s: Updating existing SC", __func__); + MACSEC_LOG("%s: Updating existing SC", __func__); if (enable == OSI_DISABLE) { - LOG("%s: Deleting existing SA", __func__); + MACSEC_LOG("%s: Deleting existing SA", __func__); if (del_upd_sc(osi_core, existing_sc, sc, ctlr, kt_idx) != OSI_NONE_SIGNED) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "failed to del SA\n", 0ULL); - return -1; + ret = -1; + goto exit; } else { if ((existing_sc->an_valid == OSI_NONE) && (lut_status_ptr->num_of_sc_used != OSI_NONE)) { @@ -5241,7 +5405,7 @@ static nve32_t config_macsec(struct osi_core_priv_data *const osi_core, sizeof(*existing_sc)); } - return 0; + goto exit; } } else { /* Take backup copy. @@ -5253,7 +5417,8 @@ static nve32_t config_macsec(struct osi_core_priv_data *const osi_core, if (ret < OSI_NONE_SIGNED) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "memcpy Failed\n", 0ULL); - return -1; + ret = -1; + goto exit; } tmp_sc_p->curr_an = sc->curr_an; tmp_sc_p->next_pn = sc->next_pn; @@ -5266,18 +5431,20 @@ static nve32_t config_macsec(struct osi_core_priv_data *const osi_core, OSI_NONE_SIGNED) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "failed to add new SA\n", 0ULL); - return -1; + ret = -1; + goto exit; } else { - LOG("%s: Updated new SC ctlr: %u " + MACSEC_LOG("%s: Updated new SC ctlr: %u " "Total active SCs: %u", __func__, ctlr, lut_status_ptr->num_of_sc_used); /* Now commit the changes */ *existing_sc = *tmp_sc_p; - return 0; } } } +exit: + return ret; } /** @@ -5308,24 +5475,27 @@ static nve32_t config_macsec(struct osi_core_priv_data *const osi_core, nve32_t osi_init_macsec_ops(struct osi_core_priv_data *const osi_core) { static struct osi_macsec_core_ops virt_macsec_ops; + nve32_t ret = 0; static struct osi_macsec_core_ops macsec_ops = { - .init = macsec_init, + .init = macsec_initialize, .deinit = macsec_deinit, - .handle_ns_irq = macsec_handle_ns_irq, - .handle_s_irq = macsec_handle_s_irq, + .handle_irq = macsec_handle_irq, .lut_config = macsec_lut_config, #ifdef MACSEC_KEY_PROGRAM .kt_config = macsec_kt_config, #endif /* MACSEC_KEY_PROGRAM */ .cipher_config = macsec_cipher_config, - .loopback_config = macsec_loopback_config, .macsec_en = macsec_enable, - .config = config_macsec, + .config = macsec_configure, .read_mmc = macsec_read_mmc, - .dbg_buf_config = macsec_dbg_buf_config, - .dbg_events_config = macsec_dbg_events_config, .get_sc_lut_key_index = macsec_get_key_index, .update_mtu = macsec_update_mtu, +#ifdef DEBUG_MACSEC + .loopback_config = macsec_loopback_config, + .dbg_buf_config = macsec_dbg_buf_config, + .dbg_events_config = macsec_dbg_events_config, + .intr_config = macsec_intr_config, +#endif }; if (osi_core->use_virtualization == OSI_ENABLE) { @@ -5333,11 +5503,13 @@ nve32_t osi_init_macsec_ops(struct osi_core_priv_data *const osi_core) ivc_init_macsec_ops(osi_core->macsec_ops); } else { if (osi_core->macsec_base == OSI_NULL) { - return -1; + ret = -1; + goto exit; } osi_core->macsec_ops = &macsec_ops; } - return 0; +exit: + return ret; } /** @@ -5368,12 +5540,14 @@ nve32_t osi_init_macsec_ops(struct osi_core_priv_data *const osi_core) nve32_t osi_macsec_init(struct osi_core_priv_data *const osi_core, nveu32_t mtu) { + nve32_t ret = -1; + if ((osi_core != OSI_NULL) && (osi_core->macsec_ops != OSI_NULL) && (osi_core->macsec_ops->init != OSI_NULL)) { - return osi_core->macsec_ops->init(osi_core, mtu); + ret = osi_core->macsec_ops->init(osi_core, mtu); } - return -1; + return ret; } /** @@ -5401,20 +5575,22 @@ nve32_t osi_macsec_init(struct osi_core_priv_data *const osi_core, */ nve32_t osi_macsec_deinit(struct osi_core_priv_data *const osi_core) { + nve32_t ret = -1; + if ((osi_core != OSI_NULL) && (osi_core->macsec_ops != OSI_NULL) && (osi_core->macsec_ops->deinit != OSI_NULL)) { - return osi_core->macsec_ops->deinit(osi_core); + ret = osi_core->macsec_ops->deinit(osi_core); } - return -1; + return ret; } /** - * @brief osi_macsec_ns_isr - macsec non-secure irq handler + * @brief osi_macsec_isr - macsec irq handler * * @note * Algorithm: * - Return -1 if osi core or ops is null - * - handles non-secure macsec interrupts + * - handles macsec interrupts * - Refer to MACSEC column of <<******, (sequence diagram)>> for API details. * - TraceID: *********** * @@ -5428,39 +5604,11 @@ nve32_t osi_macsec_deinit(struct osi_core_priv_data *const osi_core) * - Run time: Yes * - De-initialization: No */ -void osi_macsec_ns_isr(struct osi_core_priv_data *const osi_core) +void osi_macsec_isr(struct osi_core_priv_data *const osi_core) { if ((osi_core != OSI_NULL) && (osi_core->macsec_ops != OSI_NULL) && - (osi_core->macsec_ops->handle_ns_irq != OSI_NULL)) { - osi_core->macsec_ops->handle_ns_irq(osi_core); - } -} - -/** - * @brief osi_macsec_s_isr - macsec secure irq handler - * - * @note - * Algorithm: - * - Return -1 if osi core or ops is null - * - handles secure macsec interrupts - * - Refer to MACSEC column of <<******, (sequence diagram)>> for API details. - * - TraceID: *********** - * - * @param[in] osi_core: OSI core private data structure - * - * @pre MACSEC needs to be out of reset and proper clock configured. - * - * @note - * API Group: - * - Initialization: No - * - Run time: Yes - * - De-initialization: No - */ -void osi_macsec_s_isr(struct osi_core_priv_data *const osi_core) -{ - if ((osi_core != OSI_NULL) && (osi_core->macsec_ops != OSI_NULL) && - (osi_core->macsec_ops->handle_s_irq != OSI_NULL)) { - osi_core->macsec_ops->handle_s_irq(osi_core); + (osi_core->macsec_ops->handle_irq != OSI_NULL)) { + osi_core->macsec_ops->handle_irq(osi_core); } } @@ -5491,12 +5639,14 @@ void osi_macsec_s_isr(struct osi_core_priv_data *const osi_core) nve32_t osi_macsec_config_lut(struct osi_core_priv_data *const osi_core, struct osi_macsec_lut_config *const lut_config) { + nve32_t ret = -1; + if ((osi_core != OSI_NULL) && (osi_core->macsec_ops != OSI_NULL) && (osi_core->macsec_ops->lut_config != OSI_NULL)) { - return osi_core->macsec_ops->lut_config(osi_core, lut_config); + ret = osi_core->macsec_ops->lut_config(osi_core, lut_config); } - return -1; + return ret; } /** @@ -5529,13 +5679,15 @@ nve32_t osi_macsec_get_sc_lut_key_index(struct osi_core_priv_data *const osi_cor nveu8_t *sci, nveu32_t *key_index, nveu16_t ctlr) { + nve32_t ret = -1; + if ((osi_core != OSI_NULL) && (osi_core->macsec_ops != OSI_NULL) && (osi_core->macsec_ops->get_sc_lut_key_index != OSI_NULL)) { - return osi_core->macsec_ops->get_sc_lut_key_index(osi_core, sci, key_index, + ret = osi_core->macsec_ops->get_sc_lut_key_index(osi_core, sci, key_index, ctlr); } - return -1; + return ret; } /** @@ -5565,12 +5717,14 @@ nve32_t osi_macsec_get_sc_lut_key_index(struct osi_core_priv_data *const osi_cor nve32_t osi_macsec_update_mtu(struct osi_core_priv_data *const osi_core, nveu32_t mtu) { + nve32_t ret = -1; + if ((osi_core != OSI_NULL) && (osi_core->macsec_ops != OSI_NULL) && (osi_core->macsec_ops->update_mtu != OSI_NULL)) { - return osi_core->macsec_ops->update_mtu(osi_core, mtu); + ret = osi_core->macsec_ops->update_mtu(osi_core, mtu); } - return -1; + return ret; } #ifdef MACSEC_KEY_PROGRAM @@ -5601,13 +5755,15 @@ nve32_t osi_macsec_update_mtu(struct osi_core_priv_data *const osi_core, nve32_t osi_macsec_config_kt(struct osi_core_priv_data *const osi_core, struct osi_macsec_kt_config *const kt_config) { + nve32_t ret = -1; + if ((osi_core != OSI_NULL) && (osi_core->macsec_ops != OSI_NULL) && (osi_core->macsec_ops->kt_config != OSI_NULL) && (kt_config != OSI_NULL)) { - return osi_core->macsec_ops->kt_config(osi_core, kt_config); + ret = osi_core->macsec_ops->kt_config(osi_core, kt_config); } - return -1; + return ret; } #endif /* MACSEC_KEY_PROGRAM */ @@ -5638,14 +5794,17 @@ nve32_t osi_macsec_config_kt(struct osi_core_priv_data *const osi_core, nve32_t osi_macsec_cipher_config(struct osi_core_priv_data *const osi_core, nveu32_t cipher) { + nve32_t ret = -1; + if ((osi_core != OSI_NULL) && (osi_core->macsec_ops != OSI_NULL) && (osi_core->macsec_ops->cipher_config != OSI_NULL)) { - return osi_core->macsec_ops->cipher_config(osi_core, cipher); + ret = osi_core->macsec_ops->cipher_config(osi_core, cipher); } - return -1; + return ret; } +#ifdef DEBUG_MACSEC /** * @brief osi_macsec_loopback - API to enable/disable macsec loopback * @@ -5673,14 +5832,16 @@ nve32_t osi_macsec_cipher_config(struct osi_core_priv_data *const osi_core, nve32_t osi_macsec_loopback(struct osi_core_priv_data *const osi_core, nveu32_t enable) { + nve32_t ret = -1; if ((osi_core != OSI_NULL) && (osi_core->macsec_ops != OSI_NULL) && (osi_core->macsec_ops->loopback_config != OSI_NULL)) { - return osi_core->macsec_ops->loopback_config(osi_core, enable); + ret = osi_core->macsec_ops->loopback_config(osi_core, enable); } - return -1; + return ret; } +#endif /* DEBUG_MACSEC */ /** * @brief osi_macsec_en - API to enable/disable macsec @@ -5710,18 +5871,20 @@ nve32_t osi_macsec_loopback(struct osi_core_priv_data *const osi_core, nve32_t osi_macsec_en(struct osi_core_priv_data *const osi_core, nveu32_t enable) { + nve32_t ret = -1; + if (((enable & OSI_MACSEC_TX_EN) != OSI_MACSEC_TX_EN) && ((enable & OSI_MACSEC_RX_EN) != OSI_MACSEC_RX_EN) && (enable != OSI_DISABLE)) { - return -1; + goto exit; } if ((osi_core != OSI_NULL) && (osi_core->macsec_ops != OSI_NULL) && (osi_core->macsec_ops->macsec_en != OSI_NULL)) { - return osi_core->macsec_ops->macsec_en(osi_core, enable); + ret = osi_core->macsec_ops->macsec_en(osi_core, enable); } - - return -1; +exit: + return ret; } /** @@ -5737,6 +5900,7 @@ nve32_t osi_macsec_en(struct osi_core_priv_data *const osi_core, * * @param[in] osi_core: OSI core private data structure * @param[in] sc: Pointer to the sc that needs to be added/deleted/updated + * @param[in] enable: enable or disable * @param[in] ctlr: Controller selected * @param[out] kt_idx: Pointer to the kt_index passed to OSD * @@ -5756,18 +5920,20 @@ nve32_t osi_macsec_config(struct osi_core_priv_data *const osi_core, nveu32_t enable, nveu16_t ctlr, nveu16_t *kt_idx) { + nve32_t ret = -1; + if (((enable != OSI_ENABLE) && (enable != OSI_DISABLE)) || (ctlr > OSI_CTLR_SEL_MAX) || (kt_idx == OSI_NULL)) { - return -1; + goto exit; } if ((osi_core != OSI_NULL) && (osi_core->macsec_ops != OSI_NULL) && (osi_core->macsec_ops->config != OSI_NULL) && (sc != OSI_NULL)) { - return osi_core->macsec_ops->config(osi_core, sc, + ret = osi_core->macsec_ops->config(osi_core, sc, enable, ctlr, kt_idx); } - - return -1; +exit: + return ret; } /** @@ -5795,15 +5961,17 @@ nve32_t osi_macsec_config(struct osi_core_priv_data *const osi_core, */ nve32_t osi_macsec_read_mmc(struct osi_core_priv_data *const osi_core) { + nve32_t ret = -1; + if ((osi_core != OSI_NULL) && (osi_core->macsec_ops != OSI_NULL) && (osi_core->macsec_ops->read_mmc != OSI_NULL)) { osi_core->macsec_ops->read_mmc(osi_core); - return 0; + ret = 0; } - - return -1; + return ret; } +#ifdef DEBUG_MACSEC /** * @brief osi_macsec_config_dbg_buf - Reads the debug buffer captured * @@ -5832,14 +6000,15 @@ nve32_t osi_macsec_config_dbg_buf( struct osi_core_priv_data *const osi_core, struct osi_macsec_dbg_buf_config *const dbg_buf_config) { + nve32_t ret = -1; if ((osi_core != OSI_NULL) && (osi_core->macsec_ops != OSI_NULL) && (osi_core->macsec_ops->dbg_buf_config != OSI_NULL)) { - return osi_core->macsec_ops->dbg_buf_config(osi_core, + ret = osi_core->macsec_ops->dbg_buf_config(osi_core, dbg_buf_config); } - return -1; + return ret; } /** @@ -5870,14 +6039,16 @@ nve32_t osi_macsec_dbg_events_config( struct osi_core_priv_data *const osi_core, struct osi_macsec_dbg_buf_config *const dbg_buf_config) { + nve32_t ret = -1; if ((osi_core != OSI_NULL) && (osi_core->macsec_ops != OSI_NULL) && (osi_core->macsec_ops->dbg_events_config != OSI_NULL)) { - return osi_core->macsec_ops->dbg_events_config(osi_core, + ret = osi_core->macsec_ops->dbg_events_config(osi_core, dbg_buf_config); } - return -1; + return ret; } +#endif /* DEBUG_MACSEC */ #endif /* MACSEC_SUPPORT */ diff --git a/osi/core/macsec.h b/osi/core/macsec.h index 7d027d0..aabe9a2 100644 --- a/osi/core/macsec.h +++ b/osi/core/macsec.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -48,8 +48,10 @@ * @brief MACsec controller register offsets * @{ */ +#ifdef MACSEC_KEY_PROGRAM #define MACSEC_GCM_KEYTABLE_CONFIG 0x0000 #define MACSEC_GCM_KEYTABLE_DATA(x) ((0x0004U) + ((x) * 4U)) +#endif /* MACSEC_KEY_PROGRAM */ #define MACSEC_RX_ICV_ERR_CNTRL 0x4000 #define MACSEC_INTERRUPT_COMMON_SR 0x4004 #define MACSEC_TX_IMR 0x4008 @@ -89,7 +91,6 @@ #define MACSEC_TX_SCI_LUT_VALID 0xD028 #define MACSEC_RX_BYP_LUT_VALID 0xD02C #define MACSEC_RX_SCI_LUT_VALID 0xD030 - #define MACSEC_COMMON_IMR 0xD054 #define MACSEC_COMMON_ISR 0xD058 #define MACSEC_TX_SC_KEY_INVALID_STS0_0 0xD064 @@ -97,14 +98,16 @@ #define MACSEC_RX_SC_KEY_INVALID_STS0_0 0xD080 #define MACSEC_RX_SC_KEY_INVALID_STS1_0 0xD084 -#define MACSEC_TX_DEBUG_CONTROL_0 0xD098 -#define MACSEC_TX_DEBUG_TRIGGER_EN_0 0xD09C #define MACSEC_TX_DEBUG_STATUS_0 0xD0C4 +#define MACSEC_TX_DEBUG_TRIGGER_EN_0 0xD09C +#define MACSEC_RX_DEBUG_STATUS_0 0xD0F8 +#define MACSEC_RX_DEBUG_TRIGGER_EN_0 0xD0E0 +#ifdef DEBUG_MACSEC +#define MACSEC_TX_DEBUG_CONTROL_0 0xD098 #define MACSEC_DEBUG_BUF_CONFIG_0 0xD0C8 #define MACSEC_DEBUG_BUF_DATA_0(x) ((0xD0CCU) + ((x) * 4U)) #define MACSEC_RX_DEBUG_CONTROL_0 0xD0DC -#define MACSEC_RX_DEBUG_TRIGGER_EN_0 0xD0E0 -#define MACSEC_RX_DEBUG_STATUS_0 0xD0F8 +#endif /* DEBUG_MACSEC */ #define MACSEC_CONTROL1 0xE000 #define MACSEC_GCM_AES_CONTROL_0 0xE004 @@ -114,6 +117,7 @@ #define MACSEC_RX_SOT_DELAY 0xE01C /** @} */ +#ifdef MACSEC_KEY_PROGRAM /** * @addtogroup MACSEC_GCM_KEYTABLE_CONFIG register * @@ -138,6 +142,7 @@ #define MACSEC_KT_DATA_REG_SAK_CNT 8U #define MACSEC_KT_DATA_REG_H_CNT 4U /** @} */ +#endif /* MACSEC_KEY_PROGRAM */ /** * @addtogroup MACSEC_LUT_CONFIG register @@ -188,7 +193,9 @@ * @brief Bit definitions of MACSEC_CONTROL1 register * @{ */ +#ifdef DEBUG_MACSEC #define MACSEC_LOOPBACK_MODE_EN OSI_BIT(31) +#endif /* DEBUG_MACSEC */ #define MACSEC_RX_MTU_CHECK_EN OSI_BIT(16) #define MACSEC_TX_LUT_PRIO_BYP OSI_BIT(2) #define MACSEC_TX_MTU_CHECK_EN OSI_BIT(0) @@ -215,10 +222,12 @@ * @{ */ #define MACSEC_SECURE_REG_VIOL_INT_EN OSI_BIT(31) +#ifdef DEBUG_MACSEC #define MACSEC_RX_UNINIT_KEY_SLOT_INT_EN OSI_BIT(17) #define MACSEC_RX_LKUP_MISS_INT_EN OSI_BIT(16) #define MACSEC_TX_UNINIT_KEY_SLOT_INT_EN OSI_BIT(1) #define MACSEC_TX_LKUP_MISS_INT_EN OSI_BIT(0) +#endif /* DEBUG_MACSEC */ /** @} */ /** @@ -227,11 +236,12 @@ * @brief Bit definitions of TX_INTERRUPT_MASK register * @{ */ +#define MACSEC_TX_MAC_CRC_ERROR_INT_EN OSI_BIT(16) +#ifdef DEBUG_MACSEC #define MACSEC_TX_DBG_BUF_CAPTURE_DONE_INT_EN OSI_BIT(22) #define MACSEC_TX_MTU_CHECK_FAIL_INT_EN OSI_BIT(19) #define MACSEC_TX_AES_GCM_BUF_OVF_INT_EN OSI_BIT(18) #define MACSEC_TX_SC_AN_NOT_VALID_INT_EN OSI_BIT(17) -#define MACSEC_TX_MAC_CRC_ERROR_INT_EN OSI_BIT(16) #define MACSEC_TX_PN_EXHAUSTED_INT_EN OSI_BIT(1) #define MACSEC_TX_PN_THRSHLD_RCHD_INT_EN OSI_BIT(0) /** @} */ @@ -243,12 +253,13 @@ * @{ */ #define MACSEC_RX_DBG_BUF_CAPTURE_DONE_INT_EN OSI_BIT(22) -#define MACSEC_RX_ICV_ERROR_INT_EN OSI_BIT(21) #define RX_REPLAY_ERROR_INT_EN OSI_BIT(20) #define MACSEC_RX_MTU_CHECK_FAIL_INT_EN OSI_BIT(19) #define MACSEC_RX_AES_GCM_BUF_OVF_INT_EN OSI_BIT(18) -#define MACSEC_RX_MAC_CRC_ERROR_INT_EN OSI_BIT(16) #define MACSEC_RX_PN_EXHAUSTED_INT_EN OSI_BIT(1) +#endif /* DEBUG_MACSEC */ +#define MACSEC_RX_ICV_ERROR_INT_EN OSI_BIT(21) +#define MACSEC_RX_MAC_CRC_ERROR_INT_EN OSI_BIT(16) /** @} */ /** @@ -264,6 +275,16 @@ #define MACSEC_TX_LKUP_MISS OSI_BIT(0) /** @} */ +/** + * @addtogroup MACSEC_STATS_CONTROL_0 register + * + * @brief Bit definitions of MACSEC_STATS_CONTROL_0 register + * @{ + */ +#define MACSEC_STATS_CONTROL0_CNT_RL_OVR_CPY OSI_BIT(1) +/** @} */ + + /** * @addtogroup MACSEC_TX_ISR register * @@ -294,15 +315,7 @@ #define MACSEC_RX_PN_EXHAUSTED OSI_BIT(1) /** @} */ -/** - * @addtogroup MACSEC_STATS_CONTROL_0 register - * - * @brief Bit definitions of MACSEC_STATS_CONTROL_0 register - * @{ - */ -#define MACSEC_STATS_CONTROL0_CNT_RL_OVR_CPY OSI_BIT(1) -/** @} */ - +#ifdef DEBUG_MACSEC /** * @addtogroup MACSEC_DEBUG_BUF_CONFIG_0 register * @@ -361,21 +374,14 @@ */ #define MACSEC_RX_DEBUG_CONTROL_0_START_CAP OSI_BIT(31) /** @} */ +#endif /* DEBUG_MACSEC */ #define MTU_LENGTH_MASK 0xFFFFU #define SOT_LENGTH_MASK 0xFFU #define EQOS_MACSEC_SOT_DELAY 0x4EU /** - * @addtogroup TX/RX_BYP/SCI_LUT_VALID register - * - * @brief Bit definitions of LUT_VALID registers - * @{ - */ -/** @} */ - -/** - * @addtogroup TX/RX LUT bit fields in LUT_DATA registers + * @addtogroup MACSEC-LUT TX/RX LUT bit fields in LUT_DATA registers * * @brief Helper macros for LUT data programming * @{ @@ -439,8 +445,21 @@ #define MACSEC_RX_SCI_LUT_PREEMPT_INACTIVE OSI_BIT(9) /** @} */ +#ifdef DEBUG_MACSEC /* debug buffer data read/write length */ #define DBG_BUF_LEN 4U +#endif /* DEBUG_MACSEC */ +#ifdef MACSEC_KEY_PROGRAM #define INTEGER_LEN 4U +#endif /* MACSEC_KEY_PROGRAM */ + +#ifdef HSI_SUPPORT +/* Set RX ISR set interrupt status bit */ +#define MACSEC_RX_ISR_SET 0x4050U +/* Set TX ISR set interrupt status bit */ +#define MACSEC_TX_ISR_SET 0x4010U +/* Set Common ISR set interrupt status bit */ +#define MACSEC_COMMON_ISR_SET 0xd05cU +#endif #endif /* INCLUDED_MACSEC_H */ diff --git a/osi/core/mgbe_core.c b/osi/core/mgbe_core.c index 18d03e2..ebb0660 100644 --- a/osi/core/mgbe_core.c +++ b/osi/core/mgbe_core.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -21,7 +21,6 @@ */ #include "../osi/common/common.h" -#include "../osi/common/type.h" #include #include #include @@ -29,254 +28,8 @@ #include "core_local.h" #include "xpcs.h" #include "mgbe_mmc.h" -#include "vlan_filter.h" #include "core_common.h" - -/** - * @brief mgbe_ptp_tsc_capture - read PTP and TSC registers - * - * Algorithm: - * - write 1 to MGBE_WRAP_SYNC_TSC_PTP_CAPTURE_0 - * - wait till MGBE_WRAP_SYNC_TSC_PTP_CAPTURE_0 is 0x0 - * - read and return following registers - * MGBE_WRAP _TSC_CAPTURE_LOW_0 - * MGBE_WRAP _TSC_CAPTURE_HIGH_0 - * MGBE_WRAP _PTP_CAPTURE_LOW_0 - * MGBE_WRAP _PTP_CAPTURE_HIGH_0 - * - * @param[in] base: MGBE virtual base address. - * @param[out]: osi_core_ptp_tsc_data register - * - * @note MAC needs to be out of reset and proper clock configured. TSC and PTP - * registers should be configured. - * - * @retval 0 on success - * @retval -1 on failure. - */ -static nve32_t mgbe_ptp_tsc_capture(struct osi_core_priv_data *const osi_core, - struct osi_core_ptp_tsc_data *data) -{ - nveu32_t retry = 20U; - nveu32_t count = 0U, val = 0U; - nve32_t cond = COND_NOT_MET; - nve32_t ret = -1; - - osi_writela(osi_core, OSI_ENABLE, (nveu8_t *)osi_core->base + - MGBE_WRAP_SYNC_TSC_PTP_CAPTURE); - - /* Poll Until Poll Condition */ - while (cond == COND_NOT_MET) { - if (count > retry) { - /* Max retries reached */ - goto done; - } - - count++; - - val = osi_readla(osi_core, (nveu8_t *)osi_core->base + - MGBE_WRAP_SYNC_TSC_PTP_CAPTURE); - if ((val & OSI_ENABLE) == OSI_NONE) { - cond = COND_MET; - } else { - /* delay if SWR is set */ - osi_core->osd_ops.udelay(1U); - } - } - - data->tsc_low_bits = osi_readla(osi_core, (nveu8_t *)osi_core->base + - MGBE_WRAP_TSC_CAPTURE_LOW); - data->tsc_high_bits = osi_readla(osi_core, (nveu8_t *)osi_core->base + - MGBE_WRAP_TSC_CAPTURE_HIGH); - data->ptp_low_bits = osi_readla(osi_core, (nveu8_t *)osi_core->base + - MGBE_WRAP_PTP_CAPTURE_LOW); - data->ptp_high_bits = osi_readla(osi_core, (nveu8_t *)osi_core->base + - MGBE_WRAP_PTP_CAPTURE_HIGH); - ret = 0; -done: - return ret; -} - -/** - * @brief mgbe_config_fw_err_pkts - Configure forwarding of error packets - * - * Algorithm: When FEP bit is reset, the Rx queue drops packets with - * error status (CRC error, GMII_ER, watchdog timeout, or overflow). - * When FEP bit is set, all packets except the runt error packets - * are forwarded to the application or DMA. - * - * @param[in] addr: Base address indicating the start of memory mapped IO - * region of the MAC. - * @param[in] qinx: Q index - * @param[in] enable_fw_err_pkts: Enable or Disable the forwarding of error - * packets - * - * @note MAC should be init and started. see osi_start_mac() - * - * @retval 0 on success - * @retval -1 on failure. - */ -static int mgbe_config_fw_err_pkts(struct osi_core_priv_data *osi_core, - const unsigned int qinx, - const unsigned int enable_fw_err_pkts) -{ - unsigned int val; - - /* Check for valid enable_fw_err_pkts and qinx values */ - if ((enable_fw_err_pkts!= OSI_ENABLE && - enable_fw_err_pkts != OSI_DISABLE) || - (qinx >= OSI_MGBE_MAX_NUM_CHANS)) { - return -1; - } - - /* Read MTL RXQ Operation_Mode Register */ - val = osi_readla(osi_core, (unsigned char *)osi_core->base + - MGBE_MTL_CHX_RX_OP_MODE(qinx)); - - /* enable_fw_err_pkts, 1 is for enable and 0 is for disable */ - if (enable_fw_err_pkts == OSI_ENABLE) { - /* When enable_fw_err_pkts bit is set, all packets except - * the runt error packets are forwarded to the application - * or DMA. - */ - val |= MGBE_MTL_RXQ_OP_MODE_FEP; - } else { - /* When this bit is reset, the Rx queue drops packets with error - * status (CRC error, GMII_ER, watchdog timeout, or overflow) - */ - val &= ~MGBE_MTL_RXQ_OP_MODE_FEP; - } - - /* Write to FEP bit of MTL RXQ Operation Mode Register to enable or - * disable the forwarding of error packets to DMA or application. - */ - osi_writela(osi_core, val, (unsigned char *)osi_core->base + - MGBE_MTL_CHX_RX_OP_MODE(qinx)); - - return 0; -} - -/** - * @brief mgbe_poll_for_swr - Poll for software reset (SWR bit in DMA Mode) - * - * Algorithm: CAR reset will be issued through MAC reset pin. - * Waits for SWR reset to be cleared in DMA Mode register. - * - * @param[in] osi_core: OSI core private data structure. - * - * @note MAC needs to be out of reset and proper clock configured. - * - * @retval 0 on success - * @retval -1 on failure. - */ -static nve32_t mgbe_poll_for_swr(struct osi_core_priv_data *const osi_core) -{ - void *addr = osi_core->base; - nveu32_t retry = 1000; - nveu32_t count; - nveu32_t dma_bmr = 0; - nve32_t cond = 1; - nveu32_t pre_si = osi_core->pre_si; - - /* Performing software reset */ - if (pre_si == OSI_ENABLE) { - osi_writela(osi_core, OSI_ENABLE, - (nveu8_t *)addr + MGBE_DMA_MODE); - } - - /* Poll Until Poll Condition */ - count = 0; - while (cond == 1) { - if (count > retry) { - return -1; - } - - count++; - - dma_bmr = osi_readla(osi_core, (nveu8_t *)addr + MGBE_DMA_MODE); - if ((dma_bmr & MGBE_DMA_MODE_SWR) == OSI_NONE) { - cond = 0; - } else { - /* sleep if SWR is set */ - osi_core->osd_ops.msleep(1U); - } - } - - return 0; -} - -/** - * @brief mgbe_calculate_per_queue_fifo - Calculate per queue FIFO size - * - * Algorithm: Total Tx/Rx FIFO size which is read from - * MAC HW is being shared equally among the queues that are - * configured. - * - * @param[in] fifo_size: Total Tx/RX HW FIFO size. - * @param[in] queue_count: Total number of Queues configured. - * - * @note MAC has to be out of reset. - * - * @retval Queue size that need to be programmed. - */ -static nveu32_t mgbe_calculate_per_queue_fifo(nveu32_t fifo_size, - nveu32_t queue_count) -{ - nveu32_t q_fifo_size = 0; /* calculated fifo size per queue */ - nveu32_t p_fifo = 0; /* per queue fifo size program value */ - - if (queue_count == 0U) { - return 0U; - } - - /* calculate Tx/Rx fifo share per queue */ - switch (fifo_size) { - case 0: - case 1: - case 2: - case 3: - q_fifo_size = FIFO_SIZE_KB(1U); - break; - case 4: - q_fifo_size = FIFO_SIZE_KB(2U); - break; - case 5: - q_fifo_size = FIFO_SIZE_KB(4U); - break; - case 6: - q_fifo_size = FIFO_SIZE_KB(8U); - break; - case 7: - q_fifo_size = FIFO_SIZE_KB(16U); - break; - case 8: - q_fifo_size = FIFO_SIZE_KB(32U); - break; - case 9: - q_fifo_size = FIFO_SIZE_KB(64U); - break; - case 10: - q_fifo_size = FIFO_SIZE_KB(128U); - break; - case 11: - q_fifo_size = FIFO_SIZE_KB(256U); - break; - case 12: - /* Size mapping not found for 192KB, so assigned 12 */ - q_fifo_size = FIFO_SIZE_KB(192U); - break; - default: - q_fifo_size = FIFO_SIZE_KB(1U); - break; - } - - q_fifo_size = q_fifo_size / queue_count; - - if (q_fifo_size < UINT_MAX) { - p_fifo = (q_fifo_size / 256U) - 1U; - } - - return p_fifo; -} +#include "macsec.h" /** * @brief mgbe_poll_for_mac_accrtl - Poll for Indirect Access control and status @@ -285,17 +38,18 @@ static nveu32_t mgbe_calculate_per_queue_fifo(nveu32_t fifo_size, * Algorithm: Waits for waits for transfer busy bit to be cleared in * MAC Indirect address control register to complete operations. * - * @param[in] addr: MGBE virtual base address. + * @param[in] osi_core: osi core priv data structure * * @note MAC needs to be out of reset and proper clock configured. * * @retval 0 on success * @retval -1 on failure. */ -static int mgbe_poll_for_mac_acrtl(struct osi_core_priv_data *osi_core) +static nve32_t mgbe_poll_for_mac_acrtl(struct osi_core_priv_data *osi_core) { nveu32_t count = 0U; nveu32_t mac_indir_addr_ctrl = 0U; + nve32_t ret = -1; /* Poll Until MAC_Indir_Access_Ctrl OB is clear */ while (count < MGBE_MAC_INDIR_AC_OB_RETRY) { @@ -304,7 +58,8 @@ static int mgbe_poll_for_mac_acrtl(struct osi_core_priv_data *osi_core) MGBE_MAC_INDIR_AC); if ((mac_indir_addr_ctrl & MGBE_MAC_INDIR_AC_OB) == OSI_NONE) { /* OB is clear exit the loop */ - return 0; + ret = 0; + break; } /* wait for 10 usec for OB clear and retry */ @@ -312,7 +67,7 @@ static int mgbe_poll_for_mac_acrtl(struct osi_core_priv_data *osi_core) count++; } - return -1; + return ret; } /** @@ -320,7 +75,7 @@ static int mgbe_poll_for_mac_acrtl(struct osi_core_priv_data *osi_core) * * Algorithm: writes MAC Indirect AC register * - * @param[in] base: MGBE virtual base address. + * @param[in] osi_core: osi core priv data structure * @param[in] mc_no: MAC AC Mode Select number * @param[in] addr_offset: MAC AC Address Offset. * @param[in] value: MAC AC register value @@ -330,13 +85,14 @@ static int mgbe_poll_for_mac_acrtl(struct osi_core_priv_data *osi_core) * @retval 0 on success * @retval -1 on failure. */ -static int mgbe_mac_indir_addr_write(struct osi_core_priv_data *osi_core, - nveu32_t mc_no, - nveu32_t addr_offset, - nveu32_t value) +static nve32_t mgbe_mac_indir_addr_write(struct osi_core_priv_data *osi_core, + nveu32_t mc_no, + nveu32_t addr_offset, + nveu32_t value) { void *base = osi_core->base; nveu32_t addr = 0; + nve32_t ret = 0; /* Write MAC_Indir_Access_Data register value */ osi_writela(osi_core, value, (nveu8_t *)base + MGBE_MAC_INDIR_DATA); @@ -365,12 +121,12 @@ static int mgbe_mac_indir_addr_write(struct osi_core_priv_data *osi_core, /* Wait until OB bit reset */ if (mgbe_poll_for_mac_acrtl(osi_core) < 0) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Fail to write MAC_Indir_Access_Ctrl\n", mc_no); - return -1; + ret = -1; } - return 0; + return ret; } /** @@ -378,7 +134,7 @@ static int mgbe_mac_indir_addr_write(struct osi_core_priv_data *osi_core, * * Algorithm: Reads MAC Indirect AC register * - * @param[in] base: MGBE virtual base address. + * @param[in] osi_core: osi core priv data structure * @param[in] mc_no: MAC AC Mode Select number * @param[in] addr_offset: MAC AC Address Offset. * @param[in] value: Pointer MAC AC register value @@ -388,13 +144,14 @@ static int mgbe_mac_indir_addr_write(struct osi_core_priv_data *osi_core, * @retval 0 on success * @retval -1 on failure. */ -static int mgbe_mac_indir_addr_read(struct osi_core_priv_data *osi_core, - nveu32_t mc_no, - nveu32_t addr_offset, - nveu32_t *value) +static nve32_t mgbe_mac_indir_addr_read(struct osi_core_priv_data *osi_core, + nveu32_t mc_no, + nveu32_t addr_offset, + nveu32_t *value) { void *base = osi_core->base; nveu32_t addr = 0; + nve32_t ret = 0; /* Program MAC_Indir_Access_Ctrl */ addr = osi_readla(osi_core, (nveu8_t *)base + MGBE_MAC_INDIR_AC); @@ -420,116 +177,15 @@ static int mgbe_mac_indir_addr_read(struct osi_core_priv_data *osi_core, /* Wait until OB bit reset */ if (mgbe_poll_for_mac_acrtl(osi_core) < 0) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Fail to write MAC_Indir_Access_Ctrl\n", mc_no); - return -1; + ret = -1; + goto fail; } /* Read MAC_Indir_Access_Data register value */ *value = osi_readla(osi_core, (nveu8_t *)base + MGBE_MAC_INDIR_DATA); - return 0; -} - -/** - * @brief mgbe_config_l2_da_perfect_inverse_match - configure register for - * inverse or perfect match. - * - * Algorithm: This sequence is used to select perfect/inverse matching - * for L2 DA - * - * @param[in] osi_core: OSI core private data structure. - * @param[in] perfect_inverse_match: 1 - inverse mode 0- perfect mode - * - * @note MAC should be init and started. see osi_start_mac() - */ -static inline void mgbe_config_l2_da_perfect_inverse_match( - struct osi_core_priv_data *osi_core, - unsigned int perfect_inverse_match) -{ - unsigned int value = 0U; - - value = osi_readla(osi_core, - (unsigned char *)osi_core->base + MGBE_MAC_PFR); - value &= ~MGBE_MAC_PFR_DAIF; - if (perfect_inverse_match == OSI_INV_MATCH) { - /* Set DA Inverse Filtering */ - value |= MGBE_MAC_PFR_DAIF; - } - osi_writela(osi_core, value, - (unsigned char *)osi_core->base + MGBE_MAC_PFR); -} - -/** - * @brief mgbe_config_mac_pkt_filter_reg - configure mac filter register. - * - * Algorithm: This sequence is used to configure MAC in differnet pkt - * processing modes like promiscuous, multicast, unicast, - * hash unicast/multicast. - * - * @param[in] osi_core: OSI core private data structure. - * @param[in] filter: OSI filter structure. - * - * @note 1) MAC should be initialized and started. see osi_start_mac() - * - * @retval 0 always - */ -static int mgbe_config_mac_pkt_filter_reg(struct osi_core_priv_data *osi_core, - const struct osi_filter *filter) -{ - unsigned int value = 0U; - int ret = 0; - - value = osi_readla(osi_core, - (unsigned char *)osi_core->base + MGBE_MAC_PFR); - - /* Retain all other values */ - value &= (MGBE_MAC_PFR_DAIF | MGBE_MAC_PFR_DBF | MGBE_MAC_PFR_SAIF | - MGBE_MAC_PFR_SAF | MGBE_MAC_PFR_PCF | MGBE_MAC_PFR_VTFE | - MGBE_MAC_PFR_IPFE | MGBE_MAC_PFR_DNTU | MGBE_MAC_PFR_RA); - - if ((filter->oper_mode & OSI_OPER_EN_PROMISC) != OSI_DISABLE) { - /* Set Promiscuous Mode Bit */ - value |= MGBE_MAC_PFR_PR; - } - - if ((filter->oper_mode & OSI_OPER_DIS_PROMISC) != OSI_DISABLE) { - /* Reset Promiscuous Mode Bit */ - value &= ~MGBE_MAC_PFR_PR; - } - - if ((filter->oper_mode & OSI_OPER_EN_ALLMULTI) != OSI_DISABLE) { - /* Set Pass All Multicast Bit */ - value |= MGBE_MAC_PFR_PM; - } - - if ((filter->oper_mode & OSI_OPER_DIS_ALLMULTI) != OSI_DISABLE) { - /* Reset Pass All Multicast Bit */ - value &= ~MGBE_MAC_PFR_PM; - } - - if ((filter->oper_mode & OSI_OPER_EN_PERFECT) != OSI_DISABLE) { - /* Set Hash or Perfect Filter Bit */ - value |= MGBE_MAC_PFR_HPF; - } - - if ((filter->oper_mode & OSI_OPER_DIS_PERFECT) != OSI_DISABLE) { - /* Reset Hash or Perfect Filter Bit */ - value &= ~MGBE_MAC_PFR_HPF; - } - - osi_writela(osi_core, value, - (unsigned char *)osi_core->base + MGBE_MAC_PFR); - - if ((filter->oper_mode & OSI_OPER_EN_L2_DA_INV) != OSI_DISABLE) { - mgbe_config_l2_da_perfect_inverse_match(osi_core, - OSI_INV_MATCH); - } - - if ((filter->oper_mode & OSI_OPER_DIS_L2_DA_INV) != OSI_DISABLE) { - mgbe_config_l2_da_perfect_inverse_match(osi_core, - OSI_PFT_MATCH); - } - +fail: return ret; } @@ -548,8 +204,8 @@ static int mgbe_config_mac_pkt_filter_reg(struct osi_core_priv_data *osi_core, * @retval 0 on success * @retval -1 on failure. */ -static int mgbe_filter_args_validate(struct osi_core_priv_data *const osi_core, - const struct osi_filter *filter) +static nve32_t mgbe_filter_args_validate(struct osi_core_priv_data *const osi_core, + const struct osi_filter *filter) { nveu32_t idx = filter->index; nveu32_t dma_routing_enable = filter->dma_routing; @@ -557,22 +213,26 @@ static int mgbe_filter_args_validate(struct osi_core_priv_data *const osi_core, nveu32_t addr_mask = filter->addr_mask; nveu32_t src_dest = filter->src_dest; nveu32_t dma_chansel = filter->dma_chansel; + nve32_t ret = 0; + (void) osi_core; /* check for valid index (0 to 31) */ if (idx >= OSI_MGBE_MAX_MAC_ADDRESS_FILTER) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "invalid MAC filter index\n", idx); - return -1; + ret = -1; + goto fail; } /* check for DMA channel index (0 to 9) */ - if ((dma_chan > OSI_MGBE_MAX_NUM_CHANS - 0x1U) && - (dma_chan != OSI_CHAN_ANY)){ + if ((dma_chan > (OSI_MGBE_MAX_NUM_CHANS - 0x1U)) && + (dma_chan != OSI_CHAN_ANY)) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_OUTOFBOUND, "invalid dma channel\n", (nveul64_t)dma_chan); - return -1; + ret = -1; + goto fail; } /* validate dma_chansel argument */ @@ -580,35 +240,38 @@ static int mgbe_filter_args_validate(struct osi_core_priv_data *const osi_core, OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_OUTOFBOUND, "invalid dma_chansel value\n", dma_chansel); - return -1; + ret = -1; + goto fail; } /* validate addr_mask argument */ if (addr_mask > MGBE_MAB_ADDRH_MBC_MAX_MASK) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Invalid addr_mask value\n", addr_mask); - return -1; + ret = -1; + goto fail; } /* validate src_dest argument */ - if (src_dest != OSI_SA_MATCH && src_dest != OSI_DA_MATCH) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + if ((src_dest != OSI_SA_MATCH) && (src_dest != OSI_DA_MATCH)) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Invalid src_dest value\n", src_dest); - return -1; + ret = -1; + goto fail; } /* validate dma_routing_enable argument */ - if (dma_routing_enable != OSI_ENABLE && - dma_routing_enable != OSI_DISABLE) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + if ((dma_routing_enable != OSI_ENABLE) && + (dma_routing_enable != OSI_DISABLE)) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Invalid dma_routing value\n", dma_routing_enable); - return -1; + ret = -1; } - - return 0; +fail: + return ret; } /** @@ -629,7 +292,7 @@ static int mgbe_filter_args_validate(struct osi_core_priv_data *const osi_core, * @retval 0 on success * @retval -1 on failure. */ -static int mgbe_update_mac_addr_low_high_reg( +static nve32_t mgbe_update_mac_addr_low_high_reg( struct osi_core_priv_data *const osi_core, const struct osi_filter *filter) { @@ -646,7 +309,8 @@ static int mgbe_update_mac_addr_low_high_reg( /* Validate filter values */ if (mgbe_filter_args_validate(osi_core, filter) < 0) { /* Filter argments validation got failed */ - return -1; + ret = -1; + goto fail; } value = osi_readla(osi_core, (nveu8_t *)osi_core->base + @@ -660,7 +324,7 @@ static int mgbe_update_mac_addr_low_high_reg( if (ret < 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "indirect register read failed\n", 0ULL); - return -1; + goto fail; } /* preserve last XDCS bits */ @@ -682,46 +346,38 @@ static int mgbe_update_mac_addr_low_high_reg( osi_writela(osi_core, value, (nveu8_t *)osi_core->base + MGBE_MAC_ADDRH((idx))); osi_writela(osi_core, OSI_MAX_32BITS, - (unsigned char *)osi_core->base + MGBE_MAC_ADDRL((idx))); + (nveu8_t *)osi_core->base + MGBE_MAC_ADDRL((idx))); + } else { + /* Add DMA channel to value in binary */ + value = OSI_NONE; + value |= ((dma_chan << MGBE_MAC_ADDRH_DCS_SHIFT) & MGBE_MAC_ADDRH_DCS); - return 0; + if (idx != 0U) { + /* Add Address mask */ + value |= ((addr_mask << MGBE_MAC_ADDRH_MBC_SHIFT) & + MGBE_MAC_ADDRH_MBC); + + /* Setting Source/Destination Address match valid */ + value |= ((src_dest << MGBE_MAC_ADDRH_SA_SHIFT) & + MGBE_MAC_ADDRH_SA); + } + + osi_writela(osi_core, + ((nveu32_t)addr[4] | ((nveu32_t)addr[5] << 8) | + MGBE_MAC_ADDRH_AE | value), + (nveu8_t *)osi_core->base + MGBE_MAC_ADDRH((idx))); + + osi_writela(osi_core, + ((nveu32_t)addr[0] | ((nveu32_t)addr[1] << 8) | + ((nveu32_t)addr[2] << 16) | ((nveu32_t)addr[3] << 24)), + (nveu8_t *)osi_core->base + MGBE_MAC_ADDRL((idx))); + + /* Write XDCS configuration into MAC_DChSel_IndReg(x) */ + /* Append DCS DMA channel to XDCS hot bit selection */ + xdcs_check |= (OSI_BIT(dma_chan) | dma_chansel); + ret = mgbe_mac_indir_addr_write(osi_core, MGBE_MAC_DCHSEL, idx, xdcs_check); } - - /* Add DMA channel to value in binary */ - value = OSI_NONE; - value |= ((dma_chan << MGBE_MAC_ADDRH_DCS_SHIFT) & - MGBE_MAC_ADDRH_DCS); - - if (idx != 0U) { - /* Add Address mask */ - value |= ((addr_mask << MGBE_MAC_ADDRH_MBC_SHIFT) & - MGBE_MAC_ADDRH_MBC); - - /* Setting Source/Destination Address match valid */ - value |= ((src_dest << MGBE_MAC_ADDRH_SA_SHIFT) & - MGBE_MAC_ADDRH_SA); - } - - osi_writela(osi_core, ((unsigned int)addr[4] | - ((unsigned int)addr[5] << 8) | - MGBE_MAC_ADDRH_AE | - value), - (unsigned char *)osi_core->base + MGBE_MAC_ADDRH((idx))); - - osi_writela(osi_core, ((unsigned int)addr[0] | - ((unsigned int)addr[1] << 8) | - ((unsigned int)addr[2] << 16) | - ((unsigned int)addr[3] << 24)), - (unsigned char *)osi_core->base + MGBE_MAC_ADDRL((idx))); - - /* Write XDCS configuration into MAC_DChSel_IndReg(x) */ - /* Append DCS DMA channel to XDCS hot bit selection */ - xdcs_check |= (OSI_BIT(dma_chan) | dma_chansel); - ret = mgbe_mac_indir_addr_write(osi_core, - MGBE_MAC_DCHSEL, - idx, - xdcs_check); - +fail: return ret; } @@ -731,32 +387,34 @@ static int mgbe_update_mac_addr_low_high_reg( * Algorithm: Waits for waits for transfer busy bit to be cleared in * L3_L4 address control register to complete filter register operations. * - * @param[in] addr: MGBE virtual base address. + * @param[in] osi_core: osi core priv data structure * * @note MAC needs to be out of reset and proper clock configured. * * @retval 0 on success * @retval -1 on failure. */ -static int mgbe_poll_for_l3l4crtl(struct osi_core_priv_data *osi_core) +static nve32_t mgbe_poll_for_l3l4crtl(struct osi_core_priv_data *osi_core) { - unsigned int retry = 10; - unsigned int count; - unsigned int l3l4_addr_ctrl = 0; - int cond = 1; + nveu32_t retry = 10; + nveu32_t count; + nveu32_t l3l4_addr_ctrl = 0; + nve32_t cond = 1; + nve32_t ret = 0; /* Poll Until L3_L4_Address_Control XB is clear */ count = 0; while (cond == 1) { if (count > retry) { /* Return error after max retries */ - return -1; + ret = -1; + goto fail; } count++; l3l4_addr_ctrl = osi_readla(osi_core, - (unsigned char *)osi_core->base + + (nveu8_t *)osi_core->base + MGBE_MAC_L3L4_ADDR_CTR); if ((l3l4_addr_ctrl & MGBE_MAC_L3L4_ADDR_CTR_XB) == OSI_NONE) { /* Set cond to 0 to exit loop */ @@ -766,8 +424,8 @@ static int mgbe_poll_for_l3l4crtl(struct osi_core_priv_data *osi_core) osi_core->osd_ops.udelay(MGBE_MAC_XB_WAIT); } } - - return 0; +fail: + return ret; } /** @@ -775,7 +433,7 @@ static int mgbe_poll_for_l3l4crtl(struct osi_core_priv_data *osi_core) * * Algorithm: writes L3_L4 filter register * - * @param[in] base: MGBE virtual base address. + * @param[in] osi_core: osi core priv data structure * @param[in] filter_no: MGBE L3_L4 filter number * @param[in] filter_type: MGBE L3_L4 filter register type. * @param[in] value: MGBE L3_L4 filter register value @@ -785,21 +443,22 @@ static int mgbe_poll_for_l3l4crtl(struct osi_core_priv_data *osi_core) * @retval 0 on success * @retval -1 on failure. */ -static int mgbe_l3l4_filter_write(struct osi_core_priv_data *osi_core, - unsigned int filter_no, - unsigned int filter_type, - unsigned int value) +static nve32_t mgbe_l3l4_filter_write(struct osi_core_priv_data *osi_core, + nveu32_t filter_no, + nveu32_t filter_type, + nveu32_t value) { void *base = osi_core->base; - unsigned int addr = 0; + nveu32_t addr = 0; + nve32_t ret = 0; /* Write MAC_L3_L4_Data register value */ osi_writela(osi_core, value, - (unsigned char *)base + MGBE_MAC_L3L4_DATA); + (nveu8_t *)base + MGBE_MAC_L3L4_DATA); /* Program MAC_L3_L4_Address_Control */ addr = osi_readla(osi_core, - (unsigned char *)base + MGBE_MAC_L3L4_ADDR_CTR); + (nveu8_t *)base + MGBE_MAC_L3L4_ADDR_CTR); /* update filter number */ addr &= ~(MGBE_MAC_L3L4_ADDR_CTR_IDDR_FNUM); @@ -819,717 +478,119 @@ static int mgbe_l3l4_filter_write(struct osi_core_priv_data *osi_core, /* Write MGBE_MAC_L3L4_ADDR_CTR */ osi_writela(osi_core, addr, - (unsigned char *)base + MGBE_MAC_L3L4_ADDR_CTR); + (nveu8_t *)base + MGBE_MAC_L3L4_ADDR_CTR); /* Wait untile XB bit reset */ if (mgbe_poll_for_l3l4crtl(osi_core) < 0) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Fail to write L3_L4_Address_Control\n", filter_type); - return -1; - } - - return 0; -} - -/** - * @brief mgbe_l3l4_filter_read - L3_L4 filter register read. - * - * Algorithm: writes L3_L4 filter register - * - * @param[in] base: MGBE virtual base address. - * @param[in] filter_no: MGBE L3_L4 filter number - * @param[in] filter_type: MGBE L3_L4 filter register type. - * @param[in] *value: Pointer MGBE L3_L4 filter register value - * - * @note MAC needs to be out of reset and proper clock configured. - * - * @retval 0 on success - * @retval -1 on failure. - */ -static int mgbe_l3l4_filter_read(struct osi_core_priv_data *osi_core, - unsigned int filter_no, - unsigned int filter_type, - unsigned int *value) -{ - void *base = osi_core->base; - unsigned int addr = 0; - - /* Program MAC_L3_L4_Address_Control */ - addr = osi_readla(osi_core, - (unsigned char *)base + MGBE_MAC_L3L4_ADDR_CTR); - - /* update filter number */ - addr &= ~(MGBE_MAC_L3L4_ADDR_CTR_IDDR_FNUM); - addr |= ((filter_no << MGBE_MAC_L3L4_ADDR_CTR_IDDR_FNUM_SHIFT) & - MGBE_MAC_L3L4_ADDR_CTR_IDDR_FNUM); - - /* update filter type */ - addr &= ~(MGBE_MAC_L3L4_ADDR_CTR_IDDR_FTYPE); - addr |= ((filter_type << MGBE_MAC_L3L4_ADDR_CTR_IDDR_FTYPE_SHIFT) & - MGBE_MAC_L3L4_ADDR_CTR_IDDR_FTYPE); - - /* Set TT field 1 for read */ - addr |= MGBE_MAC_L3L4_ADDR_CTR_TT; - - /* Set XB bit to initiate write */ - addr |= MGBE_MAC_L3L4_ADDR_CTR_XB; - - /* Write MGBE_MAC_L3L4_ADDR_CTR */ - osi_writela(osi_core, addr, - (unsigned char *)base + MGBE_MAC_L3L4_ADDR_CTR); - - /* Wait untile XB bit reset */ - if (mgbe_poll_for_l3l4crtl(osi_core) < 0) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL, - "Fail to read L3L4 Address\n", - filter_type); - return -1; - } - - /* Read the MGBE_MAC_L3L4_DATA for filter register data */ - *value = osi_readla(osi_core, - (unsigned char *)base + MGBE_MAC_L3L4_DATA); - return 0; -} - -/** - * @brief mgbe_update_ip4_addr - configure register for IPV4 address filtering - * - * Algorithm: This sequence is used to update IPv4 source/destination - * Address for L3 layer filtering - * - * @param[in] osi_core: OSI core private data structure. - * @param[in] filter_no: filter index - * @param[in] addr: ipv4 address - * @param[in] src_dst_addr_match: 0 - source addr otherwise - dest addr - * - * @note 1) MAC should be init and started. see osi_start_mac() - * - * @retval 0 on success - * @retval -1 on failure. - */ -static int mgbe_update_ip4_addr(struct osi_core_priv_data *osi_core, - const unsigned int filter_no, - const unsigned char addr[], - const unsigned int src_dst_addr_match) -{ - unsigned int value = 0U; - unsigned int temp = 0U; - int ret = 0; - - if (addr == OSI_NULL) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "invalid address\n", - 0ULL); - return -1; - } - - if (filter_no >= OSI_MGBE_MAX_L3_L4_FILTER) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_OUTOFBOUND, - "invalid filter index for L3/L4 filter\n", - (unsigned long long)filter_no); - return -1; - } - - /* validate src_dst_addr_match argument */ - if (src_dst_addr_match != OSI_SOURCE_MATCH && - src_dst_addr_match != OSI_INV_MATCH) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "Invalid src_dst_addr_match value\n", - src_dst_addr_match); - return -1; - } - - value = addr[3]; - temp = (unsigned int)addr[2] << 8; - value |= temp; - temp = (unsigned int)addr[1] << 16; - value |= temp; - temp = (unsigned int)addr[0] << 24; - value |= temp; - if (src_dst_addr_match == OSI_SOURCE_MATCH) { - ret = mgbe_l3l4_filter_write(osi_core, - filter_no, - MGBE_MAC_L3_AD0R, - value); - } else { - ret = mgbe_l3l4_filter_write(osi_core, - filter_no, - MGBE_MAC_L3_AD1R, - value); + ret = -1; } return ret; } /** - * @brief mgbe_update_ip6_addr - add ipv6 address in register + * @brief mgbe_config_l3l4_filters - Config L3L4 filters. * - * Algorithm: This sequence is used to update IPv6 source/destination - * Address for L3 layer filtering + * @note + * Algorithm: + * - This sequence is used to configure L3L4 filters for SA and DA Port Number matching. + * - Prepare register data using prepare_l3l4_registers(). + * - Write l3l4 reigsters using mgbe_l3l4_filter_write(). + * - Return 0 on success. + * - Return -1 on any register failure. * * @param[in] osi_core: OSI core private data structure. - * @param[in] filter_no: filter index - * @param[in] addr: ipv6 adderss + * @param[in] filter_no_r: filter index + * @param[in] l3_l4: Pointer to l3 l4 filter structure (#osi_l3_l4_filter) * * @note 1) MAC should be init and started. see osi_start_mac() + * 2) osi_core->osd should be populated * * @retval 0 on success * @retval -1 on failure. */ -static int mgbe_update_ip6_addr(struct osi_core_priv_data *osi_core, - const unsigned int filter_no, - const unsigned short addr[]) +static nve32_t mgbe_config_l3l4_filters(struct osi_core_priv_data *const osi_core, + nveu32_t filter_no_r, + const struct osi_l3_l4_filter *const l3_l4) { - unsigned int value = 0U; - unsigned int temp = 0U; - int ret = 0; +#ifndef OSI_STRIPPED_LIB + nveu32_t l3_addr0_reg = 0; + nveu32_t l3_addr2_reg = 0; + nveu32_t l3_addr3_reg = 0; + nveu32_t l4_addr_reg = 0; +#endif /* !OSI_STRIPPED_LIB */ + nveu32_t l3_addr1_reg = 0; + nveu32_t ctr_reg = 0; + nveu32_t filter_no = filter_no_r & (OSI_MGBE_MAX_L3_L4_FILTER - 1U); + nve32_t err; + nve32_t ret = -1; - if (addr == OSI_NULL) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "invalid address\n", - 0ULL); - return -1; - } + prepare_l3l4_registers(osi_core, l3_l4, +#ifndef OSI_STRIPPED_LIB + &l3_addr0_reg, + &l3_addr2_reg, + &l3_addr3_reg, + &l4_addr_reg, +#endif /* !OSI_STRIPPED_LIB */ + &l3_addr1_reg, + &ctr_reg); - if (filter_no >= OSI_MGBE_MAX_L3_L4_FILTER) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "invalid filter index for L3/L4 filter\n", - (unsigned long long)filter_no); - return -1; - } - - /* update Bits[31:0] of 128-bit IP addr */ - value = addr[7]; - temp = (unsigned int)addr[6] << 16; - value |= temp; - - ret = mgbe_l3l4_filter_write(osi_core, filter_no, - MGBE_MAC_L3_AD0R, value); - if (ret < 0) { +#ifndef OSI_STRIPPED_LIB + /* Update l3 ip addr MGBE_MAC_L3_AD0R register */ + err = mgbe_l3l4_filter_write(osi_core, filter_no, MGBE_MAC_L3_AD0R, l3_addr0_reg); + if (err < 0) { /* Write MGBE_MAC_L3_AD0R fail return error */ - return ret; + goto exit_func; } - /* update Bits[63:32] of 128-bit IP addr */ - value = addr[5]; - temp = (unsigned int)addr[4] << 16; - value |= temp; - ret = mgbe_l3l4_filter_write(osi_core, filter_no, - MGBE_MAC_L3_AD1R, value); - if (ret < 0) { - /* Write MGBE_MAC_L3_AD1R fail return error */ - return ret; - } - /* update Bits[95:64] of 128-bit IP addr */ - value = addr[3]; - temp = (unsigned int)addr[2] << 16; - value |= temp; - - ret = mgbe_l3l4_filter_write(osi_core, filter_no, - MGBE_MAC_L3_AD2R, value); - if (ret < 0) { + /* Update l3 ip addr MGBE_MAC_L3_AD2R register */ + err = mgbe_l3l4_filter_write(osi_core, filter_no, MGBE_MAC_L3_AD2R, l3_addr2_reg); + if (err < 0) { /* Write MGBE_MAC_L3_AD2R fail return error */ - return ret; + goto exit_func; } - /* update Bits[127:96] of 128-bit IP addr */ - value = addr[1]; - temp = (unsigned int)addr[0] << 16; - value |= temp; - - return mgbe_l3l4_filter_write(osi_core, filter_no, - MGBE_MAC_L3_AD3R, value); -} - -/** - * @brief mgbe_config_l3_l4_filter_enable - register write to enable L3/L4 - * filters. - * - * Algorithm: This routine to enable/disable L3/l4 filter - * - * @param[in] osi_core: OSI core private data structure. - * @note MAC should be init and started. see osi_start_mac() - * - * @retval 0 on success - * @retval -1 on failure. - */ -static int mgbe_config_l3_l4_filter_enable( - struct osi_core_priv_data *const osi_core, - unsigned int filter_enb_dis) -{ - unsigned int value = 0U; - void *base = osi_core->base; - - /* validate filter_enb_dis argument */ - if (filter_enb_dis != OSI_ENABLE && filter_enb_dis != OSI_DISABLE) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, - "Invalid filter_enb_dis value\n", - filter_enb_dis); - return -1; + /* Update l3 ip addr MGBE_MAC_L3_AD3R register */ + err = mgbe_l3l4_filter_write(osi_core, filter_no, MGBE_MAC_L3_AD3R, l3_addr3_reg); + if (err < 0) { + /* Write MGBE_MAC_L3_AD3R fail return error */ + goto exit_func; } - value = osi_readla(osi_core, (unsigned char *)base + MGBE_MAC_PFR); - value &= ~(MGBE_MAC_PFR_IPFE); - value |= ((filter_enb_dis << MGBE_MAC_PFR_IPFE_SHIFT) & - MGBE_MAC_PFR_IPFE); - osi_writela(osi_core, value, (unsigned char *)base + MGBE_MAC_PFR); + /* Update l4 port register MGBE_MAC_L4_ADDR register */ + err = mgbe_l3l4_filter_write(osi_core, filter_no, MGBE_MAC_L4_ADDR, l4_addr_reg); + if (err < 0) { + /* Write MGBE_MAC_L4_ADDR fail return error */ + goto exit_func; + } +#endif /* !OSI_STRIPPED_LIB */ - return 0; -} - -/** - * @brief mgbe_update_l4_port_no -program source port no - * - * Algorithm: sequence is used to update Source Port Number for - * L4(TCP/UDP) layer filtering. - * - * @param[in] osi_core: OSI core private data structure. - * @param[in] filter_no: filter index - * @param[in] port_no: port number - * @param[in] src_dst_port_match: 0 - source port, otherwise - dest port - * - * @note 1) MAC should be init and started. see osi_start_mac() - * 2) osi_core->osd should be populated - * 3) DCS bits should be enabled in RXQ to DMA mapping register - * - * @retval 0 on success - * @retval -1 on failure. - */ -static int mgbe_update_l4_port_no(struct osi_core_priv_data *osi_core, - unsigned int filter_no, - unsigned short port_no, - unsigned int src_dst_port_match) -{ - unsigned int value = 0U; - unsigned int temp = 0U; - int ret = 0; - - if (filter_no >= OSI_MGBE_MAX_L3_L4_FILTER) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_OUTOFBOUND, - "invalid filter index for L3/L4 filter\n", - (unsigned long long)filter_no); - return -1; + /* Update l3 ip addr MGBE_MAC_L3_AD1R register */ + err = mgbe_l3l4_filter_write(osi_core, filter_no, MGBE_MAC_L3_AD1R, l3_addr1_reg); + if (err < 0) { + /* Write MGBE_MAC_L3_AD1R fail return error */ + goto exit_func; } - ret = mgbe_l3l4_filter_read(osi_core, filter_no, - MGBE_MAC_L4_ADDR, &value); - if (ret < 0) { - /* Read MGBE_MAC_L4_ADDR fail return error */ - return ret; - } - - if (src_dst_port_match == OSI_SOURCE_MATCH) { - value &= ~MGBE_MAC_L4_ADDR_SP_MASK; - value |= ((unsigned int)port_no & MGBE_MAC_L4_ADDR_SP_MASK); - } else { - value &= ~MGBE_MAC_L4_ADDR_DP_MASK; - temp = port_no; - value |= ((temp << MGBE_MAC_L4_ADDR_DP_SHIFT) & - MGBE_MAC_L4_ADDR_DP_MASK); - } - - return mgbe_l3l4_filter_write(osi_core, filter_no, - MGBE_MAC_L4_ADDR, value); -} - -/** - * @brief mgbe_set_dcs - check and update dma routing register - * - * Algorithm: Check for request for DCS_enable as well as validate chan - * number and dcs_enable is set. After validation, this sequence is used - * to configure L3((IPv4/IPv6) filters for address matching. - * - * @param[in] osi_core: OSI core private data structure. - * @param[in] value: unsigned int value for caller - * @param[in] dma_routing_enable: filter based dma routing enable(1) - * @param[in] dma_chan: dma channel for routing based on filter - * - * @note 1) MAC IP should be out of reset and need to be initialized - * as the requirements. - * 2) DCS bit of RxQ should be enabled for dynamic channel selection - * in filter support - * - * @retval updated unsigned int value param - */ -static inline unsigned int mgbe_set_dcs(struct osi_core_priv_data *osi_core, - unsigned int value, - unsigned int dma_routing_enable, - unsigned int dma_chan) -{ - if ((dma_routing_enable == OSI_ENABLE) && (dma_chan < - OSI_MGBE_MAX_NUM_CHANS) && (osi_core->dcs_en == - OSI_ENABLE)) { - value |= ((dma_routing_enable << - MGBE_MAC_L3L4_CTR_DMCHEN0_SHIFT) & - MGBE_MAC_L3L4_CTR_DMCHEN0); - value |= ((dma_chan << - MGBE_MAC_L3L4_CTR_DMCHN0_SHIFT) & - MGBE_MAC_L3L4_CTR_DMCHN0); - } - - return value; -} - -/** - * @brief mgbe_helper_l3l4_bitmask - helper function to set L3L4 - * bitmask. - * - * Algorithm: set bit corresponding to L3l4 filter index - * - * @param[in] bitmask: bit mask OSI core private data structure. - * @param[in] filter_no: filter index - * @param[in] value: 0 - disable otherwise - l3/l4 filter enabled - * - * @note 1) MAC should be init and started. see osi_start_mac() - */ -static inline void mgbe_helper_l3l4_bitmask(unsigned int *bitmask, - unsigned int filter_no, - unsigned int value) -{ - unsigned int temp; - - temp = OSI_ENABLE; - temp = temp << filter_no; - - /* check against all bit fields for L3L4 filter enable */ - if ((value & MGBE_MAC_L3L4_CTRL_ALL) != OSI_DISABLE) { - /* Set bit mask for index */ - *bitmask |= temp; - } else { - /* Reset bit mask for index */ - *bitmask &= ~temp; - } -} - -/** - * @brief mgbe_config_l3_filters - config L3 filters. - * - * Algorithm: Check for DCS_enable as well as validate channel - * number and if dcs_enable is set. After validation, code flow - * is used to configure L3((IPv4/IPv6) filters resister - * for address matching. - * - * @param[in] osi_core: OSI core private data structure. - * @param[in] filter_no: filter index - * @param[in] enb_dis: 1 - enable otherwise - disable L3 filter - * @param[in] ipv4_ipv6_match: 1 - IPv6, otherwise - IPv4 - * @param[in] src_dst_addr_match: 0 - source, otherwise - destination - * @param[in] perfect_inverse_match: normal match(0) or inverse map(1) - * @param[in] dma_routing_enable: filter based dma routing enable(1) - * @param[in] dma_chan: dma channel for routing based on filter - * - * @note 1) MAC should be init and started. see osi_start_mac() - * 2) osi_core->osd should be populated - * 3) DCS bit of RxQ should be enabled for dynamic channel selection - * in filter support - * - * @retval 0 on success - * @retval -1 on failure. - */ -static int mgbe_config_l3_filters(struct osi_core_priv_data *osi_core, - unsigned int filter_no, - unsigned int enb_dis, - unsigned int ipv4_ipv6_match, - unsigned int src_dst_addr_match, - unsigned int perfect_inverse_match, - unsigned int dma_routing_enable, - unsigned int dma_chan) -{ - unsigned int value = 0U; - int ret = 0; - - if (filter_no >= OSI_MGBE_MAX_L3_L4_FILTER) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_OUTOFBOUND, - "invalid filter index for L3/L4 filter\n", - (unsigned long long)filter_no); - return -1; - } - /* validate enb_dis argument */ - if (enb_dis != OSI_ENABLE && enb_dis != OSI_DISABLE) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "Invalid filter_enb_dis value\n", - enb_dis); - return -1; - } - /* validate ipv4_ipv6_match argument */ - if (ipv4_ipv6_match != OSI_IPV6_MATCH && - ipv4_ipv6_match != OSI_IPV4_MATCH) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "Invalid ipv4_ipv6_match value\n", - ipv4_ipv6_match); - return -1; - } - /* validate src_dst_addr_match argument */ - if (src_dst_addr_match != OSI_SOURCE_MATCH && - src_dst_addr_match != OSI_INV_MATCH) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "Invalid src_dst_addr_match value\n", - src_dst_addr_match); - return -1; - } - /* validate perfect_inverse_match argument */ - if (perfect_inverse_match != OSI_ENABLE && - perfect_inverse_match != OSI_DISABLE) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "Invalid perfect_inverse_match value\n", - perfect_inverse_match); - return -1; - } - if ((dma_routing_enable == OSI_ENABLE) && - (dma_chan > OSI_MGBE_MAX_NUM_CHANS - 1U)) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_OUTOFBOUND, - "Wrong DMA channel\n", - (unsigned long long)dma_chan); - return -1; - } - - ret = mgbe_l3l4_filter_read(osi_core, filter_no, - MGBE_MAC_L3L4_CTR, &value); - if (ret < 0) { - /* MGBE_MAC_L3L4_CTR read fail return here */ - return ret; - } - - value &= ~MGBE_MAC_L3L4_CTR_L3PEN0; - value |= (ipv4_ipv6_match & MGBE_MAC_L3L4_CTR_L3PEN0); - - /* For IPv6 either SA/DA can be checked not both */ - if (ipv4_ipv6_match == OSI_IPV6_MATCH) { - if (enb_dis == OSI_ENABLE) { - if (src_dst_addr_match == OSI_SOURCE_MATCH) { - /* Enable L3 filters for IPv6 SOURCE addr - * matching - */ - value &= ~MGBE_MAC_L3_IP6_CTRL_CLEAR; - value |= ((MGBE_MAC_L3L4_CTR_L3SAM0 | - perfect_inverse_match << - MGBE_MAC_L3L4_CTR_L3SAIM0_SHIFT) & - ((MGBE_MAC_L3L4_CTR_L3SAM0 | - MGBE_MAC_L3L4_CTR_L3SAIM0))); - value |= mgbe_set_dcs(osi_core, value, - dma_routing_enable, - dma_chan); - - } else { - /* Enable L3 filters for IPv6 DESTINATION addr - * matching - */ - value &= ~MGBE_MAC_L3_IP6_CTRL_CLEAR; - value |= ((MGBE_MAC_L3L4_CTR_L3DAM0 | - perfect_inverse_match << - MGBE_MAC_L3L4_CTR_L3DAIM0_SHIFT) & - ((MGBE_MAC_L3L4_CTR_L3DAM0 | - MGBE_MAC_L3L4_CTR_L3DAIM0))); - value |= mgbe_set_dcs(osi_core, value, - dma_routing_enable, - dma_chan); - } - } else { - /* Disable L3 filters for IPv6 SOURCE/DESTINATION addr - * matching - */ - value &= ~(MGBE_MAC_L3_IP6_CTRL_CLEAR | - MGBE_MAC_L3L4_CTR_L3PEN0); - } - } else { - if (src_dst_addr_match == OSI_SOURCE_MATCH) { - if (enb_dis == OSI_ENABLE) { - /* Enable L3 filters for IPv4 SOURCE addr - * matching - */ - value &= ~MGBE_MAC_L3_IP4_SA_CTRL_CLEAR; - value |= ((MGBE_MAC_L3L4_CTR_L3SAM0 | - perfect_inverse_match << - MGBE_MAC_L3L4_CTR_L3SAIM0_SHIFT) & - ((MGBE_MAC_L3L4_CTR_L3SAM0 | - MGBE_MAC_L3L4_CTR_L3SAIM0))); - value |= mgbe_set_dcs(osi_core, value, - dma_routing_enable, - dma_chan); - } else { - /* Disable L3 filters for IPv4 SOURCE addr - * matching - */ - value &= ~MGBE_MAC_L3_IP4_SA_CTRL_CLEAR; - } - } else { - if (enb_dis == OSI_ENABLE) { - /* Enable L3 filters for IPv4 DESTINATION addr - * matching - */ - value &= ~MGBE_MAC_L3_IP4_DA_CTRL_CLEAR; - value |= ((MGBE_MAC_L3L4_CTR_L3DAM0 | - perfect_inverse_match << - MGBE_MAC_L3L4_CTR_L3DAIM0_SHIFT) & - ((MGBE_MAC_L3L4_CTR_L3DAM0 | - MGBE_MAC_L3L4_CTR_L3DAIM0))); - value |= mgbe_set_dcs(osi_core, value, - dma_routing_enable, - dma_chan); - } else { - /* Disable L3 filters for IPv4 DESTINATION addr - * matching - */ - value &= ~MGBE_MAC_L3_IP4_DA_CTRL_CLEAR; - } - } - } - - ret = mgbe_l3l4_filter_write(osi_core, filter_no, - MGBE_MAC_L3L4_CTR, value); - if (ret < 0) { + /* Write CTR register */ + err = mgbe_l3l4_filter_write(osi_core, filter_no, MGBE_MAC_L3L4_CTR, ctr_reg); + if (err < 0) { /* Write MGBE_MAC_L3L4_CTR fail return error */ - return ret; + goto exit_func; } - /* Set bit corresponding to filter index if value is non-zero */ - mgbe_helper_l3l4_bitmask(&osi_core->l3l4_filter_bitmask, - filter_no, value); - - return ret; -} - -/** - * @brief mgbe_config_l4_filters - Config L4 filters. - * - * Algorithm: This sequence is used to configure L4(TCP/UDP) filters for - * SA and DA Port Number matching - * - * @param[in] osi_core: OSI core private data structure. - * @param[in] filter_no: filter index - * @param[in] enb_dis: 1 - enable, otherwise - disable L4 filter - * @param[in] tcp_udp_match: 1 - udp, 0 - tcp - * @param[in] src_dst_port_match: 0 - source port, otherwise - dest port - * @param[in] perfect_inverse_match: normal match(0) or inverse map(1) - * @param[in] dma_routing_enable: filter based dma routing enable(1) - * @param[in] dma_chan: dma channel for routing based on filter - * - * @note 1) MAC should be init and started. see osi_start_mac() - * 2) osi_core->osd should be populated - * - * @retval 0 on success - * @retval -1 on failure. - */ -static int mgbe_config_l4_filters(struct osi_core_priv_data *osi_core, - unsigned int filter_no, - unsigned int enb_dis, - unsigned int tcp_udp_match, - unsigned int src_dst_port_match, - unsigned int perfect_inverse_match, - unsigned int dma_routing_enable, - unsigned int dma_chan) -{ - unsigned int value = 0U; - int ret = 0; - - if (filter_no >= OSI_MGBE_MAX_L3_L4_FILTER) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_OUTOFBOUND, - "invalid filter index for L3/L4 filter\n", - (unsigned long long)filter_no); - return -1; - } - /* validate enb_dis argument */ - if (enb_dis != OSI_ENABLE && enb_dis != OSI_DISABLE) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "Invalid filter_enb_dis value\n", - enb_dis); - return -1; - } - /* validate tcp_udp_match argument */ - if (tcp_udp_match != OSI_ENABLE && tcp_udp_match != OSI_DISABLE) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "Invalid tcp_udp_match value\n", - tcp_udp_match); - return -1; - } - /* validate src_dst_port_match argument */ - if (src_dst_port_match != OSI_SOURCE_MATCH && - src_dst_port_match != OSI_INV_MATCH) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "Invalid src_dst_port_match value\n", - src_dst_port_match); - return -1; - } - /* validate perfect_inverse_match argument */ - if (perfect_inverse_match != OSI_ENABLE && - perfect_inverse_match != OSI_DISABLE) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "Invalid perfect_inverse_match value\n", - perfect_inverse_match); - return -1; - } - if ((dma_routing_enable == OSI_ENABLE) && - (dma_chan > OSI_MGBE_MAX_NUM_CHANS - 1U)) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_OUTOFBOUND, - "Wrong DMA channel\n", - (unsigned int)dma_chan); - return -1; - } - - ret = mgbe_l3l4_filter_read(osi_core, filter_no, - MGBE_MAC_L3L4_CTR, &value); - if (ret < 0) { - /* MGBE_MAC_L3L4_CTR read fail return here */ - return ret; - } - - value &= ~MGBE_MAC_L3L4_CTR_L4PEN0; - value |= ((tcp_udp_match << 16) & MGBE_MAC_L3L4_CTR_L4PEN0); - - if (src_dst_port_match == OSI_SOURCE_MATCH) { - if (enb_dis == OSI_ENABLE) { - /* Enable L4 filters for SOURCE Port No matching */ - value &= ~MGBE_MAC_L4_SP_CTRL_CLEAR; - value |= ((MGBE_MAC_L3L4_CTR_L4SPM0 | - perfect_inverse_match << - MGBE_MAC_L3L4_CTR_L4SPIM0_SHIFT) & - (MGBE_MAC_L3L4_CTR_L4SPM0 | - MGBE_MAC_L3L4_CTR_L4SPIM0)); - value |= mgbe_set_dcs(osi_core, value, - dma_routing_enable, - dma_chan); - } else { - /* Disable L4 filters for SOURCE Port No matching */ - value &= ~MGBE_MAC_L4_SP_CTRL_CLEAR; - } - } else { - if (enb_dis == OSI_ENABLE) { - /* Enable L4 filters for DESTINATION port No - * matching - */ - value &= ~MGBE_MAC_L4_DP_CTRL_CLEAR; - value |= ((MGBE_MAC_L3L4_CTR_L4DPM0 | - perfect_inverse_match << - MGBE_MAC_L3L4_CTR_L4DPIM0_SHIFT) & - (MGBE_MAC_L3L4_CTR_L4DPM0 | - MGBE_MAC_L3L4_CTR_L4DPIM0)); - value |= mgbe_set_dcs(osi_core, value, - dma_routing_enable, - dma_chan); - } else { - /* Disable L4 filters for DESTINATION port No - * matching - */ - value &= ~MGBE_MAC_L4_DP_CTRL_CLEAR; - } - } - - ret = mgbe_l3l4_filter_write(osi_core, filter_no, - MGBE_MAC_L3L4_CTR, value); - if (ret < 0) { - /* Write MGBE_MAC_L3L4_CTR fail return error */ - return ret; - } - - /* Set bit corresponding to filter index if value is non-zero */ - mgbe_helper_l3l4_bitmask(&osi_core->l3l4_filter_bitmask, - filter_no, value); + /* success */ + ret = 0; + +exit_func: return ret; } +#ifndef OSI_STRIPPED_LIB /** * @brief mgbe_config_vlan_filter_reg - config vlan filter register * @@ -1547,13 +608,13 @@ static int mgbe_config_l4_filters(struct osi_core_priv_data *osi_core, * @retval 0 on success * @retval -1 on failure. */ -static int mgbe_config_vlan_filtering(struct osi_core_priv_data *osi_core, - unsigned int filter_enb_dis, - unsigned int perfect_hash_filtering, - unsigned int perfect_inverse_match) +static nve32_t mgbe_config_vlan_filtering(struct osi_core_priv_data *osi_core, + const nveu32_t filter_enb_dis, + const nveu32_t perfect_hash_filtering, + const nveu32_t perfect_inverse_match) { - unsigned int value; - unsigned char *base = osi_core->base; + nveu32_t value; + nveu8_t *base = osi_core->base; /* validate perfect_inverse_match argument */ if (perfect_hash_filtering == OSI_HASH_FILTER_MODE) { @@ -1570,7 +631,7 @@ static int mgbe_config_vlan_filtering(struct osi_core_priv_data *osi_core, } /* validate filter_enb_dis argument */ - if (filter_enb_dis != OSI_ENABLE && filter_enb_dis != OSI_DISABLE) { + if ((filter_enb_dis != OSI_ENABLE) && (filter_enb_dis != OSI_DISABLE)) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Invalid filter_enb_dis value\n", filter_enb_dis); @@ -1578,8 +639,8 @@ static int mgbe_config_vlan_filtering(struct osi_core_priv_data *osi_core, } /* validate perfect_inverse_match argument */ - if (perfect_inverse_match != OSI_ENABLE && - perfect_inverse_match != OSI_DISABLE) { + if ((perfect_inverse_match != OSI_ENABLE) && + (perfect_inverse_match != OSI_DISABLE)) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Invalid perfect_inverse_match value\n", perfect_inverse_match); @@ -1618,13 +679,13 @@ static int mgbe_config_vlan_filtering(struct osi_core_priv_data *osi_core, * @retval 0 on success * @retval -1 on failure. */ -static int mgbe_config_ptp_rxq(struct osi_core_priv_data *const osi_core, - const unsigned int rxq_idx, - const unsigned int enable) +static nve32_t mgbe_config_ptp_rxq(struct osi_core_priv_data *const osi_core, + const nveu32_t rxq_idx, + const nveu32_t enable) { - unsigned char *base = osi_core->base; - unsigned int value = 0U; - unsigned int i = 0U; + nveu8_t *base = osi_core->base; + nveu32_t value = 0U; + nveu32_t i = 0U; /* Validate the RX queue index argument */ if (rxq_idx >= OSI_MGBE_MAX_NUM_QUEUES) { @@ -1635,7 +696,7 @@ static int mgbe_config_ptp_rxq(struct osi_core_priv_data *const osi_core, } /* Validate enable argument */ - if (enable != OSI_ENABLE && enable != OSI_DISABLE) { + if ((enable != OSI_ENABLE) && (enable != OSI_DISABLE)) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Invalid enable input\n", enable); @@ -1686,60 +747,6 @@ static int mgbe_config_ptp_rxq(struct osi_core_priv_data *const osi_core, return 0; } -/** - * @brief mgbe_flush_mtl_tx_queue - Flush MTL Tx queue - * - * @param[in] osi_core: OSI core private data structure. - * @param[in] qinx: MTL queue index. - * - * @note 1) MAC should out of reset and clocks enabled. - * 2) hw core initialized. see osi_hw_core_init(). - * - * @retval 0 on success - * @retval -1 on failure. - */ -static nve32_t mgbe_flush_mtl_tx_queue( - struct osi_core_priv_data *const osi_core, - const nveu32_t qinx) -{ - void *addr = osi_core->base; - nveu32_t retry = 1000; - nveu32_t count; - nveu32_t value; - nve32_t cond = 1; - - if (qinx >= OSI_MGBE_MAX_NUM_QUEUES) { - return -1; - } - - /* Read Tx Q Operating Mode Register and flush TxQ */ - value = osi_readla(osi_core, (nveu8_t *)addr + - MGBE_MTL_CHX_TX_OP_MODE(qinx)); - value |= MGBE_MTL_QTOMR_FTQ; - osi_writela(osi_core, value, (nveu8_t *)addr + - MGBE_MTL_CHX_TX_OP_MODE(qinx)); - - /* Poll Until FTQ bit resets for Successful Tx Q flush */ - count = 0; - while (cond == 1) { - if (count > retry) { - return -1; - } - - count++; - - value = osi_readla(osi_core, (nveu8_t *)addr + - MGBE_MTL_CHX_TX_OP_MODE(qinx)); - if ((value & MGBE_MTL_QTOMR_FTQ_LPOS) == OSI_NONE) { - cond = 0; - } else { - osi_core->osd_ops.msleep(1); - } - } - - return 0; -} - /** * @brief mgbe_config_mac_loopback - Configure MAC to support loopback * @@ -1752,19 +759,19 @@ static nve32_t mgbe_flush_mtl_tx_queue( * @retval 0 on success * @retval -1 on failure. */ -static int mgbe_config_mac_loopback(struct osi_core_priv_data *const osi_core, - unsigned int lb_mode) +static nve32_t mgbe_config_mac_loopback(struct osi_core_priv_data *const osi_core, + nveu32_t lb_mode) { - unsigned int value; + nveu32_t value; void *addr = osi_core->base; /* don't allow only if loopback mode is other than 0 or 1 */ - if (lb_mode != OSI_ENABLE && lb_mode != OSI_DISABLE) { + if ((lb_mode != OSI_ENABLE) && (lb_mode != OSI_DISABLE)) { return -1; } /* Read MAC Configuration Register */ - value = osi_readla(osi_core, (unsigned char *)addr + MGBE_MAC_RMCR); + value = osi_readla(osi_core, (nveu8_t *)addr + MGBE_MAC_RMCR); if (lb_mode == OSI_ENABLE) { /* Enable Loopback Mode */ value |= MGBE_MAC_RMCR_LM; @@ -1772,7 +779,7 @@ static int mgbe_config_mac_loopback(struct osi_core_priv_data *const osi_core, value &= ~MGBE_MAC_RMCR_LM; } - osi_writela(osi_core, value, (unsigned char *)addr + MGBE_MAC_RMCR); + osi_writela(osi_core, value, (nveu8_t *)addr + MGBE_MAC_RMCR); return 0; } @@ -1797,77 +804,39 @@ static int mgbe_config_mac_loopback(struct osi_core_priv_data *const osi_core, * @retval 0 on success * @retval -1 on failure. */ -static int mgbe_config_arp_offload(struct osi_core_priv_data *const osi_core, - const unsigned int enable, - const unsigned char *ip_addr) +static nve32_t mgbe_config_arp_offload(struct osi_core_priv_data *const osi_core, + const nveu32_t enable, + const nveu8_t *ip_addr) { - unsigned int mac_rmcr; - unsigned int val; + nveu32_t mac_rmcr; + nveu32_t val; void *addr = osi_core->base; - if (enable != OSI_ENABLE && enable != OSI_DISABLE) { + if ((enable != OSI_ENABLE) && (enable != OSI_DISABLE)) { return -1; } - mac_rmcr = osi_readla(osi_core, (unsigned char *)addr + MGBE_MAC_RMCR); + mac_rmcr = osi_readla(osi_core, (nveu8_t *)addr + MGBE_MAC_RMCR); if (enable == OSI_ENABLE) { - val = (((unsigned int)ip_addr[0]) << 24) | - (((unsigned int)ip_addr[1]) << 16) | - (((unsigned int)ip_addr[2]) << 8) | - (((unsigned int)ip_addr[3])); + val = (((nveu32_t)ip_addr[0]) << 24) | + (((nveu32_t)ip_addr[1]) << 16) | + (((nveu32_t)ip_addr[2]) << 8) | + (((nveu32_t)ip_addr[3])); osi_writela(osi_core, val, - (unsigned char *)addr + MGBE_MAC_ARPPA); + (nveu8_t *)addr + MGBE_MAC_ARPPA); mac_rmcr |= MGBE_MAC_RMCR_ARPEN; } else { mac_rmcr &= ~MGBE_MAC_RMCR_ARPEN; } - osi_writela(osi_core, mac_rmcr, (unsigned char *)addr + MGBE_MAC_RMCR); - - return 0; -} - -/** - * @brief mgbe_config_rxcsum_offload - Enable/Disale rx checksum offload in HW - * - * Algorithm: - * 1) Read the MAC configuration register. - * 2) Enable the IP checksum offload engine COE in MAC receiver. - * 3) Update the MAC configuration register. - * - * @param[in] addr: MGBE virtual base address. - * @param[in] enabled: Flag to indicate feature is to be enabled/disabled. - * - * @note MAC should be init and started. see osi_start_mac() - * - * @retval 0 on success - * @retval -1 on failure. - */ -static int mgbe_config_rxcsum_offload( - struct osi_core_priv_data *const osi_core, - unsigned int enabled) -{ - void *addr = osi_core->base; - unsigned int mac_rmcr; - - if (enabled != OSI_ENABLE && enabled != OSI_DISABLE) { - return -1; - } - - mac_rmcr = osi_readla(osi_core, (unsigned char *)addr + MGBE_MAC_RMCR); - if (enabled == OSI_ENABLE) { - mac_rmcr |= MGBE_MAC_RMCR_IPC; - } else { - mac_rmcr &= ~MGBE_MAC_RMCR_IPC; - } - - osi_writela(osi_core, mac_rmcr, (unsigned char *)addr + MGBE_MAC_RMCR); + osi_writela(osi_core, mac_rmcr, (nveu8_t *)addr + MGBE_MAC_RMCR); return 0; } +#endif /* !OSI_STRIPPED_LIB */ /** * @brief mgbe_config_frp - Enable/Disale RX Flexible Receive Parser in HW @@ -1885,18 +854,19 @@ static int mgbe_config_rxcsum_offload( * @retval 0 on success * @retval -1 on failure. */ -static int mgbe_config_frp(struct osi_core_priv_data *const osi_core, - const unsigned int enabled) +static nve32_t mgbe_config_frp(struct osi_core_priv_data *const osi_core, + const nveu32_t enabled) { - unsigned char *base = osi_core->base; - unsigned int op_mode = 0U, val = 0U; - int ret = -1; + nveu8_t *base = osi_core->base; + nveu32_t op_mode = 0U, val = 0U; + nve32_t ret = 0; - if (enabled != OSI_ENABLE && enabled != OSI_DISABLE) { + if ((enabled != OSI_ENABLE) && (enabled != OSI_DISABLE)) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Invalid enable input\n", enabled); - return -1; + ret = -1; + goto done; } op_mode = osi_readla(osi_core, base + MGBE_MTL_OP_MODE); @@ -1917,7 +887,8 @@ static int mgbe_config_frp(struct osi_core_priv_data *const osi_core, OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Fail to enable FRP\n", val); - return -1; + ret = -1; + goto done; } /* Enable FRP Interrupts in MTL_RXP_Interrupt_Control_Status */ @@ -1944,7 +915,8 @@ static int mgbe_config_frp(struct osi_core_priv_data *const osi_core, OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Fail to disable FRP\n", val); - return -1; + ret = -1; + goto done; } /* Disable FRP Interrupts in MTL_RXP_Interrupt_Control_Status */ @@ -1956,7 +928,8 @@ static int mgbe_config_frp(struct osi_core_priv_data *const osi_core, osi_writela(osi_core, val, base + MGBE_MTL_RXP_INTR_CS); } - return 0; +done: + return ret; } /** @@ -1976,20 +949,21 @@ static int mgbe_config_frp(struct osi_core_priv_data *const osi_core, * @retval 0 on success * @retval -1 on failure. */ -static int mgbe_frp_write(struct osi_core_priv_data *osi_core, - unsigned int acc_sel, - unsigned int addr, - unsigned int data) +static nve32_t mgbe_frp_write(struct osi_core_priv_data *osi_core, + nveu32_t acc_sel, + nveu32_t addr, + nveu32_t data) { - int ret = 0; - unsigned char *base = osi_core->base; - unsigned int val = 0U; + nve32_t ret = 0; + nveu8_t *base = osi_core->base; + nveu32_t val = 0U; - if (acc_sel != OSI_ENABLE && acc_sel != OSI_DISABLE) { + if ((acc_sel != OSI_ENABLE) && (acc_sel != OSI_DISABLE)) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Invalid acc_sel argment\n", acc_sel); - return -1; + ret = -1; + goto done; } /* Wait for ready */ @@ -2004,7 +978,8 @@ static int mgbe_frp_write(struct osi_core_priv_data *osi_core, OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Fail to write\n", val); - return -1; + ret = -1; + goto done; } /* Write data into MTL_RXP_Indirect_Acc_Data */ @@ -2041,9 +1016,10 @@ static int mgbe_frp_write(struct osi_core_priv_data *osi_core, OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Fail to write\n", val); - return -1; + ret = -1; } +done: return ret; } @@ -2061,19 +1037,20 @@ static int mgbe_frp_write(struct osi_core_priv_data *osi_core, * @retval 0 on success * @retval -1 on failure. */ -static int mgbe_update_frp_entry(struct osi_core_priv_data *const osi_core, - const unsigned int pos, - struct osi_core_frp_data *const data) +static nve32_t mgbe_update_frp_entry(struct osi_core_priv_data *const osi_core, + const nveu32_t pos, + struct osi_core_frp_data *const data) { - unsigned int val = 0U, tmp = 0U; - int ret = -1; + nveu32_t val = 0U, tmp = 0U; + nve32_t ret = -1; /* Validate pos value */ if (pos >= OSI_FRP_MAX_ENTRY) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Invalid FRP table entry\n", pos); - return -1; + ret = -1; + goto done; } /** Write Match Data into IE0 **/ @@ -2081,7 +1058,8 @@ static int mgbe_update_frp_entry(struct osi_core_priv_data *const osi_core, ret = mgbe_frp_write(osi_core, OSI_DISABLE, MGBE_MTL_FRP_IE0(pos), val); if (ret < 0) { /* Match Data Write fail */ - return -1; + ret = -1; + goto done; } /** Write Match Enable into IE1 **/ @@ -2089,7 +1067,8 @@ static int mgbe_update_frp_entry(struct osi_core_priv_data *const osi_core, ret = mgbe_frp_write(osi_core, OSI_DISABLE, MGBE_MTL_FRP_IE1(pos), val); if (ret < 0) { /* Match Enable Write fail */ - return -1; + ret = -1; + goto done; } /** Write AF, RF, IM, NIC, FO and OKI into IE2 **/ @@ -2119,7 +1098,8 @@ static int mgbe_update_frp_entry(struct osi_core_priv_data *const osi_core, ret = mgbe_frp_write(osi_core, OSI_DISABLE, MGBE_MTL_FRP_IE2(pos), val); if (ret < 0) { /* FRP IE2 Write fail */ - return -1; + ret = -1; + goto done; } /** Write DCH into IE3 **/ @@ -2127,9 +1107,10 @@ static int mgbe_update_frp_entry(struct osi_core_priv_data *const osi_core, ret = mgbe_frp_write(osi_core, OSI_DISABLE, MGBE_MTL_FRP_IE3(pos), val); if (ret < 0) { /* DCH Write fail */ - return -1; + ret = -1; } +done: return ret; } @@ -2138,26 +1119,28 @@ static int mgbe_update_frp_entry(struct osi_core_priv_data *const osi_core, * * Algorithm: * - * @param[in] addr: MGBE virtual base address. - * @param[in] enabled: Flag to indicate feature is to be enabled/disabled. + * @param[in] osi_core: osi core priv data structure + * @param[in] nve: Number of Valid Entries. * * @note MAC should be init and started. see osi_start_mac() * * @retval 0 on success * @retval -1 on failure. */ -static int mgbe_update_frp_nve(struct osi_core_priv_data *const osi_core, - const unsigned int nve) +static nve32_t mgbe_update_frp_nve(struct osi_core_priv_data *const osi_core, + const nveu32_t nve) { - unsigned int val; - unsigned char *base = osi_core->base; + nveu32_t val; + nveu8_t *base = osi_core->base; + nve32_t ret; /* Validate the NVE value */ if (nve >= OSI_FRP_MAX_ENTRY) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Invalid NVE value\n", nve); - return -1; + ret = -1; + goto done; } /* Update NVE and NPE in MTL_RXP_Control_Status register */ @@ -2169,100 +1152,10 @@ static int mgbe_update_frp_nve(struct osi_core_priv_data *const osi_core, val |= ((nve << MGBE_MTL_RXP_CS_NPE_SHIFT) & MGBE_MTL_RXP_CS_NPE); osi_writela(osi_core, val, base + MGBE_MTL_RXP_CS); - return 0; -} + ret = 0; -/** - * @brief update_rfa_rfd - Update RFD and RSA values - * - * Algorithm: Calulates and stores the RSD (Threshold for Dectivating - * Flow control) and RSA (Threshold for Activating Flow Control) values - * based on the Rx FIFO size - * - * @param[in] rx_fifo: Rx FIFO size. - * @param[in] value: Stores RFD and RSA values - */ -static void update_rfa_rfd(unsigned int rx_fifo, unsigned int *value) -{ - switch (rx_fifo) { - case MGBE_21K: - /* Update RFD */ - *value &= ~MGBE_MTL_RXQ_OP_MODE_RFD_MASK; - *value |= (FULL_MINUS_4_K << - MGBE_MTL_RXQ_OP_MODE_RFD_SHIFT) & - MGBE_MTL_RXQ_OP_MODE_RFD_MASK; - /* Update RFA */ - *value &= ~MGBE_MTL_RXQ_OP_MODE_RFA_MASK; - *value |= (FULL_MINUS_18_K << - MGBE_MTL_RXQ_OP_MODE_RFA_SHIFT) & - MGBE_MTL_RXQ_OP_MODE_RFA_MASK; - break; - case MGBE_24K: - /* Update RFD */ - *value &= ~MGBE_MTL_RXQ_OP_MODE_RFD_MASK; - *value |= (FULL_MINUS_4_K << - MGBE_MTL_RXQ_OP_MODE_RFD_SHIFT) & - MGBE_MTL_RXQ_OP_MODE_RFD_MASK; - /* Update RFA */ - *value &= ~MGBE_MTL_RXQ_OP_MODE_RFA_MASK; - *value |= (FULL_MINUS_21_K << - MGBE_MTL_RXQ_OP_MODE_RFA_SHIFT) & - MGBE_MTL_RXQ_OP_MODE_RFA_MASK; - break; - case MGBE_27K: - /* Update RFD */ - *value &= ~MGBE_MTL_RXQ_OP_MODE_RFD_MASK; - *value |= (FULL_MINUS_4_K << - MGBE_MTL_RXQ_OP_MODE_RFD_SHIFT) & - MGBE_MTL_RXQ_OP_MODE_RFD_MASK; - /* Update RFA */ - *value &= ~MGBE_MTL_RXQ_OP_MODE_RFA_MASK; - *value |= (FULL_MINUS_24_K << - MGBE_MTL_RXQ_OP_MODE_RFA_SHIFT) & - MGBE_MTL_RXQ_OP_MODE_RFA_MASK; - break; - case MGBE_32K: - /* Update RFD */ - *value &= ~MGBE_MTL_RXQ_OP_MODE_RFD_MASK; - *value |= (FULL_MINUS_4_K << - MGBE_MTL_RXQ_OP_MODE_RFD_SHIFT) & - MGBE_MTL_RXQ_OP_MODE_RFD_MASK; - /* Update RFA */ - *value &= ~MGBE_MTL_RXQ_OP_MODE_RFA_MASK; - *value |= (FULL_MINUS_29_K << - MGBE_MTL_RXQ_OP_MODE_RFA_SHIFT) & - MGBE_MTL_RXQ_OP_MODE_RFA_MASK; - break; - case MGBE_38K: - case MGBE_48K: - case MGBE_64K: - case MGBE_96K: - case MGBE_192K: - /* Update RFD */ - *value &= ~MGBE_MTL_RXQ_OP_MODE_RFD_MASK; - *value |= (FULL_MINUS_4_K << - MGBE_MTL_RXQ_OP_MODE_RFD_SHIFT) & - MGBE_MTL_RXQ_OP_MODE_RFD_MASK; - /* Update RFA */ - *value &= ~MGBE_MTL_RXQ_OP_MODE_RFA_MASK; - *value |= (FULL_MINUS_32_K << - MGBE_MTL_RXQ_OP_MODE_RFA_SHIFT) & - MGBE_MTL_RXQ_OP_MODE_RFA_MASK; - break; - case MGBE_19K: - default: - /* Update RFD */ - *value &= ~MGBE_MTL_RXQ_OP_MODE_RFD_MASK; - *value |= (FULL_MINUS_4_K << - MGBE_MTL_RXQ_OP_MODE_RFD_SHIFT) & - MGBE_MTL_RXQ_OP_MODE_RFD_MASK; - /* Update RFA */ - *value &= ~MGBE_MTL_RXQ_OP_MODE_RFA_MASK; - *value |= (FULL_MINUS_16_K << - MGBE_MTL_RXQ_OP_MODE_RFA_SHIFT) & - MGBE_MTL_RXQ_OP_MODE_RFA_MASK; - break; - } +done: + return ret; } /** @@ -2278,21 +1171,49 @@ static void update_rfa_rfd(unsigned int rx_fifo, unsigned int *value) * 6) Enable Rx Queues * 7) Enable TX Underflow Interrupt for MTL Q * - * @param[in] qinx: Queue number that need to be configured. - * @param[in] osi_core: OSI core private data. - * @param[in] tx_fifo: MTL TX queue size for a MTL queue. - * @param[in] rx_fifo: MTL RX queue size for a MTL queue. + * @param[in] osi_core: OSI core private data structure. + * @param[in] hw_qinx: Queue number that need to be configured. * * @note MAC has to be out of reset. * * @retval 0 on success * @retval -1 on failure. */ -static nve32_t mgbe_configure_mtl_queue(nveu32_t qinx, - struct osi_core_priv_data *osi_core, - nveu32_t tx_fifo, - nveu32_t rx_fifo) +static nve32_t mgbe_configure_mtl_queue(struct osi_core_priv_data *osi_core, + nveu32_t hw_qinx) { + nveu32_t qinx = hw_qinx & 0xFU; + /* + * Total available Rx queue size is 192KB. + * Below is the destribution among the Rx queueu - + * Q0 - 160KB + * Q1 to Q8 - 2KB each = 8 * 2KB = 16KB + * Q9 - 16KB (MVBCQ) + * + * Formula to calculate the value to be programmed in HW + * + * vale= (size in KB / 256) - 1U + */ + const nveu32_t rx_fifo_sz[OSI_MGBE_MAX_NUM_QUEUES] = { + FIFO_SZ(160U), FIFO_SZ(2U), FIFO_SZ(2U), FIFO_SZ(2U), FIFO_SZ(2U), + FIFO_SZ(2U), FIFO_SZ(2U), FIFO_SZ(2U), FIFO_SZ(2U), FIFO_SZ(16U), + }; + const nveu32_t tx_fifo_sz[OSI_MGBE_MAX_NUM_QUEUES] = { + TX_FIFO_SZ, TX_FIFO_SZ, TX_FIFO_SZ, TX_FIFO_SZ, TX_FIFO_SZ, + TX_FIFO_SZ, TX_FIFO_SZ, TX_FIFO_SZ, TX_FIFO_SZ, TX_FIFO_SZ, + }; + const nveu32_t rfd_rfa[OSI_MGBE_MAX_NUM_QUEUES] = { + FULL_MINUS_32_K, + FULL_MINUS_1_5K, + FULL_MINUS_1_5K, + FULL_MINUS_1_5K, + FULL_MINUS_1_5K, + FULL_MINUS_1_5K, + FULL_MINUS_1_5K, + FULL_MINUS_1_5K, + FULL_MINUS_1_5K, + FULL_MINUS_1_5K, + }; nveu32_t value = 0; nve32_t ret = 0; @@ -2315,25 +1236,33 @@ static nve32_t mgbe_configure_mtl_queue(nveu32_t qinx, * Setting related to CBS will come here for TC. * default: 0x0 SP */ - ret = mgbe_flush_mtl_tx_queue(osi_core, qinx); + ret = hw_flush_mtl_tx_queue(osi_core, qinx); if (ret < 0) { - return ret; + goto fail; } - value = (tx_fifo << MGBE_MTL_TXQ_SIZE_SHIFT); + if (osi_unlikely((qinx >= OSI_MGBE_MAX_NUM_QUEUES) || + (osi_core->tc[qinx] >= OSI_MAX_TC_NUM))) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "Incorrect queues/TC number\n", 0ULL); + ret = -1; + goto fail; + } + + value = (tx_fifo_sz[qinx] << MGBE_MTL_TXQ_SIZE_SHIFT); /* Enable Store and Forward mode */ value |= MGBE_MTL_TSF; /*TTC not applicable for TX*/ /* Enable TxQ */ value |= MGBE_MTL_TXQEN; value |= (osi_core->tc[qinx] << MGBE_MTL_CHX_TX_OP_MODE_Q2TC_SH); - osi_writela(osi_core, value, (unsigned char *) + osi_writela(osi_core, value, (nveu8_t *) osi_core->base + MGBE_MTL_CHX_TX_OP_MODE(qinx)); /* read RX Q0 Operating Mode Register */ value = osi_readla(osi_core, (nveu8_t *)osi_core->base + MGBE_MTL_CHX_RX_OP_MODE(qinx)); - value |= (rx_fifo << MGBE_MTL_RXQ_SIZE_SHIFT); + value |= (rx_fifo_sz[qinx] << MGBE_MTL_RXQ_SIZE_SHIFT); /* Enable Store and Forward mode */ value |= MGBE_MTL_RSF; /* Enable HW flow control */ @@ -2346,18 +1275,30 @@ static nve32_t mgbe_configure_mtl_queue(nveu32_t qinx, * RFA: Threshold for Activating Flow Control * RFD: Threshold for Deactivating Flow Control */ - value = osi_readla(osi_core, (unsigned char *)osi_core->base + + value = osi_readla(osi_core, (nveu8_t *)osi_core->base + MGBE_MTL_RXQ_FLOW_CTRL(qinx)); - update_rfa_rfd(rx_fifo, &value); - osi_writela(osi_core, value, (unsigned char *)osi_core->base + + value &= ~MGBE_MTL_RXQ_OP_MODE_RFD_MASK; + value &= ~MGBE_MTL_RXQ_OP_MODE_RFA_MASK; + value |= (rfd_rfa[qinx] << MGBE_MTL_RXQ_OP_MODE_RFD_SHIFT) & MGBE_MTL_RXQ_OP_MODE_RFD_MASK; + value |= (rfd_rfa[qinx] << MGBE_MTL_RXQ_OP_MODE_RFA_SHIFT) & MGBE_MTL_RXQ_OP_MODE_RFA_MASK; + osi_writela(osi_core, value, (nveu8_t *)osi_core->base + MGBE_MTL_RXQ_FLOW_CTRL(qinx)); - /* Transmit Queue weight */ + /* Transmit Queue weight, all TX weights are equal */ value = osi_readla(osi_core, (nveu8_t *)osi_core->base + MGBE_MTL_TCQ_QW(qinx)); - value |= (MGBE_MTL_TCQ_QW_ISCQW + qinx); + value |= MGBE_MTL_TCQ_QW_ISCQW; osi_writela(osi_core, value, (nveu8_t *)osi_core->base + MGBE_MTL_TCQ_QW(qinx)); + + /* Default ETS tx selection algo */ + value = osi_readla(osi_core, (nveu8_t *)osi_core->base + + MGBE_MTL_TCQ_ETS_CR(osi_core->tc[qinx])); + value &= ~MGBE_MTL_TCQ_ETS_CR_AVALG; + value |= OSI_MGBE_TXQ_AVALG_ETS; + osi_writela(osi_core, value, (nveu8_t *)osi_core->base + + MGBE_MTL_TCQ_ETS_CR(osi_core->tc[qinx])); + /* Enable Rx Queue Control */ value = osi_readla(osi_core, (nveu8_t *)osi_core->base + MGBE_MAC_RQC0R); @@ -2365,16 +1306,11 @@ static nve32_t mgbe_configure_mtl_queue(nveu32_t qinx, (MGBE_MAC_RXQC0_RXQEN_SHIFT(qinx))); osi_writela(osi_core, value, (nveu8_t *)osi_core->base + MGBE_MAC_RQC0R); - - /* Enable TX Underflow Interrupt for MTL Q */ - value = osi_readl((unsigned char *)osi_core->base + - MGBE_MTL_QINT_ENABLE(qinx)); - value |= MGBE_MTL_QINT_TXUIE; - osi_writel(value, (unsigned char *)osi_core->base + - MGBE_MTL_QINT_ENABLE(qinx)); - return 0; +fail: + return ret; } +#ifndef OSI_STRIPPED_LIB /** * @brief mgbe_rss_write_reg - Write into RSS registers * @@ -2390,16 +1326,16 @@ static nve32_t mgbe_configure_mtl_queue(nveu32_t qinx, * @retval 0 on success * @retval -1 on failure. */ -static int mgbe_rss_write_reg(struct osi_core_priv_data *osi_core, - unsigned int idx, - unsigned int value, - unsigned int is_key) +static nve32_t mgbe_rss_write_reg(struct osi_core_priv_data *osi_core, + nveu32_t idx, + nveu32_t value, + nveu32_t is_key) { - unsigned char *addr = (unsigned char *)osi_core->base; - unsigned int retry = 100; - unsigned int ctrl = 0; - unsigned int count = 0; - int cond = 1; + nveu8_t *addr = (nveu8_t *)osi_core->base; + nveu32_t retry = 100; + nveu32_t ctrl = 0; + nveu32_t count = 0; + nve32_t cond = 1; /* data into RSS Lookup Table or RSS Hash Key */ osi_writela(osi_core, value, addr + MGBE_MAC_RSS_DATA); @@ -2416,7 +1352,7 @@ static int mgbe_rss_write_reg(struct osi_core_priv_data *osi_core, /* poll for write operation to complete */ while (cond == 1) { if (count > retry) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Failed to update RSS Hash key or table\n", 0ULL); return -1; @@ -2447,12 +1383,12 @@ static int mgbe_rss_write_reg(struct osi_core_priv_data *osi_core, * @retval 0 on success * @retval -1 on failure. */ -static int mgbe_config_rss(struct osi_core_priv_data *osi_core) +static nve32_t mgbe_config_rss(struct osi_core_priv_data *osi_core) { - unsigned char *addr = (unsigned char *)osi_core->base; - unsigned int value = 0; - unsigned int i = 0, j = 0; - int ret = 0; + nveu8_t *addr = (nveu8_t *)osi_core->base; + nveu32_t value = 0; + nveu32_t i = 0, j = 0; + nve32_t ret = 0; if (osi_core->rss.enable == OSI_DISABLE) { /* RSS not supported */ @@ -2466,10 +1402,10 @@ static int mgbe_config_rss(struct osi_core_priv_data *osi_core) /* Program the hash key */ for (i = 0; i < OSI_RSS_HASH_KEY_SIZE; i += 4U) { - value = ((unsigned int)osi_core->rss.key[i] | - (unsigned int)osi_core->rss.key[i + 1U] << 8U | - (unsigned int)osi_core->rss.key[i + 2U] << 16U | - (unsigned int)osi_core->rss.key[i + 3U] << 24U); + value = ((nveu32_t)osi_core->rss.key[i] | + ((nveu32_t)osi_core->rss.key[i + 1U] << 8U) | + ((nveu32_t)osi_core->rss.key[i + 2U] << 16U) | + ((nveu32_t)osi_core->rss.key[i + 3U] << 24U)); ret = mgbe_rss_write_reg(osi_core, j, value, OSI_ENABLE); if (ret < 0) { return ret; @@ -2506,10 +1442,10 @@ static int mgbe_config_rss(struct osi_core_priv_data *osi_core) * @retval 0 on success * @retval -1 on failure. */ -static int mgbe_config_flow_control(struct osi_core_priv_data *const osi_core, - const nveu32_t flw_ctrl) +static nve32_t mgbe_config_flow_control(struct osi_core_priv_data *const osi_core, + const nveu32_t flw_ctrl) { - unsigned int val; + nveu32_t val; void *addr = osi_core->base; /* return on invalid argument */ @@ -2520,7 +1456,7 @@ static int mgbe_config_flow_control(struct osi_core_priv_data *const osi_core, /* Configure MAC Tx Flow control */ /* Read MAC Tx Flow control Register of Q0 */ val = osi_readla(osi_core, - (unsigned char *)addr + MGBE_MAC_QX_TX_FLW_CTRL(0U)); + (nveu8_t *)addr + MGBE_MAC_QX_TX_FLW_CTRL(0U)); /* flw_ctrl BIT0: 1 is for tx flow ctrl enable * flw_ctrl BIT0: 0 is for tx flow ctrl disable @@ -2538,12 +1474,12 @@ static int mgbe_config_flow_control(struct osi_core_priv_data *const osi_core, /* Write to MAC Tx Flow control Register of Q0 */ osi_writela(osi_core, val, - (unsigned char *)addr + MGBE_MAC_QX_TX_FLW_CTRL(0U)); + (nveu8_t *)addr + MGBE_MAC_QX_TX_FLW_CTRL(0U)); /* Configure MAC Rx Flow control*/ /* Read MAC Rx Flow control Register */ val = osi_readla(osi_core, - (unsigned char *)addr + MGBE_MAC_RX_FLW_CTRL); + (nveu8_t *)addr + MGBE_MAC_RX_FLW_CTRL); /* flw_ctrl BIT1: 1 is for rx flow ctrl enable * flw_ctrl BIT1: 0 is for rx flow ctrl disable @@ -2558,10 +1494,11 @@ static int mgbe_config_flow_control(struct osi_core_priv_data *const osi_core, /* Write to MAC Rx Flow control Register */ osi_writela(osi_core, val, - (unsigned char *)addr + MGBE_MAC_RX_FLW_CTRL); + (nveu8_t *)addr + MGBE_MAC_RX_FLW_CTRL); return 0; } +#endif /* !OSI_STRIPPED_LIB */ #ifdef HSI_SUPPORT /** @@ -2575,28 +1512,28 @@ static int mgbe_config_flow_control(struct osi_core_priv_data *const osi_core, * @retval 0 on success * @retval -1 on failure */ -static int mgbe_hsi_configure(struct osi_core_priv_data *const osi_core, - const nveu32_t enable) +static nve32_t mgbe_hsi_configure(struct osi_core_priv_data *const osi_core, + const nveu32_t enable) { nveu32_t value = 0U; - int ret = 0; + nve32_t ret = 0; + const nveu16_t osi_hsi_reporter_id[] = { + OSI_HSI_MGBE0_REPORTER_ID, + OSI_HSI_MGBE1_REPORTER_ID, + OSI_HSI_MGBE2_REPORTER_ID, + OSI_HSI_MGBE3_REPORTER_ID, + }; if (enable == OSI_ENABLE) { osi_core->hsi.enabled = OSI_ENABLE; - osi_core->hsi.reporter_id = hsi_err_code[osi_core->instance_id][REPORTER_IDX]; + osi_core->hsi.reporter_id = osi_hsi_reporter_id[osi_core->instance_id]; - /* T23X-MGBE_HSIv2-10 Enable PCS ECC */ - value = (EN_ERR_IND | FEC_EN); - ret = xpcs_write_safety(osi_core, XPCS_BASE_PMA_MMD_SR_PMA_KR_FEC_CTRL, value); - if (ret != 0) { - return ret; - } /* T23X-MGBE_HSIv2-12:Initialization of Transaction Timeout in PCS */ /* T23X-MGBE_HSIv2-11:Initialization of Watchdog Timer */ value = (0xCCU << XPCS_SFTY_1US_MULT_SHIFT) & XPCS_SFTY_1US_MULT_MASK; ret = xpcs_write_safety(osi_core, XPCS_VR_XS_PCS_SFTY_TMR_CTRL, value); if (ret != 0) { - return ret; + goto fail; } /* T23X-MGBE_HSIv2-1 Configure ECC */ value = osi_readla(osi_core, @@ -2612,15 +1549,15 @@ static int mgbe_hsi_configure(struct osi_core_priv_data *const osi_core, /* T23X-MGBE_HSIv2-5: Enabling and Initialization of Transaction Timeout */ value = (0x198U << MGBE_TMR_SHIFT) & MGBE_TMR_MASK; - value |= (0x0U << MGBE_CTMR_SHIFT) & MGBE_CTMR_MASK; - value |= (0x2U << MGBE_LTMRMD_SHIFT) & MGBE_LTMRMD_MASK; - value |= (0x1U << MGBE_NTMRMD_SHIFT) & MGBE_NTMRMD_MASK; + value |= ((nveu32_t)0x0U << MGBE_CTMR_SHIFT) & MGBE_CTMR_MASK; + value |= ((nveu32_t)0x2U << MGBE_LTMRMD_SHIFT) & MGBE_LTMRMD_MASK; + value |= ((nveu32_t)0x2U << MGBE_NTMRMD_SHIFT) & MGBE_NTMRMD_MASK; osi_writela(osi_core, value, (nveu8_t *)osi_core->base + MGBE_DWCXG_CORE_MAC_FSM_ACT_TIMER); /* T23X-MGBE_HSIv2-3: Enabling and Initialization of Watchdog Timer */ /* T23X-MGBE_HSIv2-4: Enabling of Consistency Monitor for XGMAC FSM State */ - // TODO: enable MGBE_TMOUTEN. + /* TODO enable MGBE_TMOUTEN. Bug 3584387 */ value = MGBE_PRTYEN; osi_writela(osi_core, value, (nveu8_t *)osi_core->base + MGBE_MAC_FSM_CONTROL); @@ -2675,15 +1612,10 @@ static int mgbe_hsi_configure(struct osi_core_priv_data *const osi_core, } else { osi_core->hsi.enabled = OSI_DISABLE; - /* T23X-MGBE_HSIv2-10 Disable PCS ECC */ - ret = xpcs_write_safety(osi_core, XPCS_BASE_PMA_MMD_SR_PMA_KR_FEC_CTRL, 0); - if (ret != 0) { - return ret; - } /* T23X-MGBE_HSIv2-11:Deinitialization of Watchdog Timer */ ret = xpcs_write_safety(osi_core, XPCS_VR_XS_PCS_SFTY_TMR_CTRL, 0); if (ret != 0) { - return ret; + goto fail; } /* T23X-MGBE_HSIv2-1 Disable ECC */ value = osi_readla(osi_core, @@ -2742,6 +1674,56 @@ static int mgbe_hsi_configure(struct osi_core_priv_data *const osi_core, osi_writela(osi_core, value, (nveu8_t *)osi_core->xpcs_base + XPCS_WRAP_INTERRUPT_CONTROL); } +fail: + return ret; +} + +/** + * @brief mgbe_hsi_inject_err - Inject error + * + * Algorithm: Use error injection method to induce error + * + * @param[in] osi_core: OSI core private data structure. + * @param[in] error_code: HSI Error code + * + * @retval 0 on success + * @retval -1 on failure + */ +static nve32_t mgbe_hsi_inject_err(struct osi_core_priv_data *const osi_core, + const nveu32_t error_code) +{ + const nveu32_t val_ce = (MGBE_MTL_DEBUG_CONTROL_FDBGEN | + MGBE_MTL_DEBUG_CONTROL_DBGMOD | + MGBE_MTL_DEBUG_CONTROL_FIFORDEN | + MGBE_MTL_DEBUG_CONTROL_EIEE | + MGBE_MTL_DEBUG_CONTROL_EIEC); + + const nveu32_t val_ue = (MGBE_MTL_DEBUG_CONTROL_FDBGEN | + MGBE_MTL_DEBUG_CONTROL_DBGMOD | + MGBE_MTL_DEBUG_CONTROL_FIFORDEN | + MGBE_MTL_DEBUG_CONTROL_EIEE); + nve32_t ret = 0; + + switch (error_code) { + case OSI_HSI_MGBE0_CE_CODE: + case OSI_HSI_MGBE1_CE_CODE: + case OSI_HSI_MGBE2_CE_CODE: + case OSI_HSI_MGBE3_CE_CODE: + osi_writela(osi_core, val_ce, (nveu8_t *)osi_core->base + + MGBE_MTL_DEBUG_CONTROL); + break; + case OSI_HSI_MGBE0_UE_CODE: + case OSI_HSI_MGBE1_UE_CODE: + case OSI_HSI_MGBE2_UE_CODE: + case OSI_HSI_MGBE3_UE_CODE: + osi_writela(osi_core, val_ue, (nveu8_t *)osi_core->base + + MGBE_MTL_DEBUG_CONTROL); + break; + default: + ret = hsi_common_error_inject(osi_core, error_code); + break; + } + return ret; } #endif @@ -2764,9 +1746,9 @@ static int mgbe_hsi_configure(struct osi_core_priv_data *const osi_core, * @retval 0 on success * @retval -1 on failure. */ -static int mgbe_configure_mac(struct osi_core_priv_data *osi_core) +static nve32_t mgbe_configure_mac(struct osi_core_priv_data *osi_core) { - unsigned int value = 0U, max_queue = 0U, i = 0U; + nveu32_t value = 0U, max_queue = 0U, i = 0U; /* TODO: Need to check if we need to enable anything in Tx configuration * value = osi_readla(osi_core, @@ -2780,14 +1762,14 @@ static int mgbe_configure_mac(struct osi_core_priv_data *osi_core) value |= MGBE_MAC_RMCR_ACS | MGBE_MAC_RMCR_CST | MGBE_MAC_RMCR_IPC; /* Jumbo Packet Enable */ - if (osi_core->mtu > OSI_DFLT_MTU_SIZE && - osi_core->mtu <= OSI_MTU_SIZE_9000) { + if ((osi_core->mtu > OSI_DFLT_MTU_SIZE) && + (osi_core->mtu <= OSI_MTU_SIZE_9000)) { value |= MGBE_MAC_RMCR_JE; } else if (osi_core->mtu > OSI_MTU_SIZE_9000){ /* if MTU greater 9K use GPSLCE */ value |= MGBE_MAC_RMCR_GPSLCE | MGBE_MAC_RMCR_WD; value &= ~MGBE_MAC_RMCR_GPSL_MSK; - value |= ((OSI_MAX_MTU_SIZE << 16) & MGBE_MAC_RMCR_GPSL_MSK); + value |= ((((nveu32_t)OSI_MAX_MTU_SIZE) << 16U) & MGBE_MAC_RMCR_GPSL_MSK); } else { value &= ~MGBE_MAC_RMCR_JE; value &= ~MGBE_MAC_RMCR_GPSLCE; @@ -2795,10 +1777,10 @@ static int mgbe_configure_mac(struct osi_core_priv_data *osi_core) } osi_writela(osi_core, value, - (unsigned char *)osi_core->base + MGBE_MAC_RMCR); + (nveu8_t *)osi_core->base + MGBE_MAC_RMCR); value = osi_readla(osi_core, - (unsigned char *)osi_core->base + MGBE_MAC_TMCR); + (nveu8_t *)osi_core->base + MGBE_MAC_TMCR); /* DDIC bit set is needed to improve MACSEC Tput */ value |= MGBE_MAC_TMCR_DDIC; /* Jabber Disable */ @@ -2806,11 +1788,11 @@ static int mgbe_configure_mac(struct osi_core_priv_data *osi_core) value |= MGBE_MAC_TMCR_JD; } osi_writela(osi_core, value, - (unsigned char *)osi_core->base + MGBE_MAC_TMCR); + (nveu8_t *)osi_core->base + MGBE_MAC_TMCR); /* Enable Multicast and Broadcast Queue */ value = osi_readla(osi_core, - (unsigned char *)osi_core->base + MGBE_MAC_RQC1R); + (nveu8_t *)osi_core->base + MGBE_MAC_RQC1R); value |= MGBE_MAC_RQC1R_MCBCQEN; /* Set MCBCQ to highest enabled RX queue index */ for (i = 0; i < osi_core->num_mtl_queues; i++) { @@ -2823,7 +1805,7 @@ static int mgbe_configure_mac(struct osi_core_priv_data *osi_core) value &= ~(MGBE_MAC_RQC1R_MCBCQ); value |= (max_queue << MGBE_MAC_RQC1R_MCBCQ_SHIFT); osi_writela(osi_core, value, - (unsigned char *)osi_core->base + MGBE_MAC_RQC1R); + (nveu8_t *)osi_core->base + MGBE_MAC_RQC1R); /* Disable all MMC nve32_terrupts */ /* Disable all MMC Tx nve32_terrupts */ @@ -2847,19 +1829,22 @@ static int mgbe_configure_mac(struct osi_core_priv_data *osi_core) /* RGSMIIIM - RGMII/SMII interrupt and TSIE Enable */ /* TXESIE - Transmit Error Status Interrupt Enable */ /* TODO: LPI need to be enabled during EEE implementation */ - value |= (MGBE_IMR_RGSMIIIE | MGBE_IMR_TSIE | MGBE_IMR_TXESIE); +#ifndef OSI_STRIPPED_LIB + value |= (MGBE_IMR_TXESIE); +#endif + value |= (MGBE_IMR_RGSMIIIE | MGBE_IMR_TSIE); osi_writela(osi_core, value, (nveu8_t *)osi_core->base + MGBE_MAC_IER); /* Enable common interrupt at wrapper level */ - value = osi_readla(osi_core, (unsigned char *)osi_core->base + + value = osi_readla(osi_core, (nveu8_t *)osi_core->base + MGBE_WRAP_COMMON_INTR_ENABLE); value |= MGBE_MAC_SBD_INTR; - osi_writela(osi_core, value, (unsigned char *)osi_core->base + + osi_writela(osi_core, value, (nveu8_t *)osi_core->base + MGBE_WRAP_COMMON_INTR_ENABLE); /* Enable VLAN configuration */ value = osi_readla(osi_core, - (unsigned char *)osi_core->base + MGBE_MAC_VLAN_TR); + (nveu8_t *)osi_core->base + MGBE_MAC_VLAN_TR); /* Enable VLAN Tag in RX Status * Disable double VLAN Tag processing on TX and RX */ @@ -2869,17 +1854,18 @@ static int mgbe_configure_mac(struct osi_core_priv_data *osi_core) } value |= MGBE_MAC_VLANTR_EVLRXS | MGBE_MAC_VLANTR_DOVLTC; osi_writela(osi_core, value, - (unsigned char *)osi_core->base + MGBE_MAC_VLAN_TR); + (nveu8_t *)osi_core->base + MGBE_MAC_VLAN_TR); value = osi_readla(osi_core, - (unsigned char *)osi_core->base + MGBE_MAC_VLANTIR); + (nveu8_t *)osi_core->base + MGBE_MAC_VLANTIR); /* Enable VLAN tagging through context descriptor */ value |= MGBE_MAC_VLANTIR_VLTI; /* insert/replace C_VLAN in 13th & 14th bytes of transmitted frames */ value &= ~MGBE_MAC_VLANTIRR_CSVL; osi_writela(osi_core, value, - (unsigned char *)osi_core->base + MGBE_MAC_VLANTIR); + (nveu8_t *)osi_core->base + MGBE_MAC_VLANTIR); +#ifndef OSI_STRIPPED_LIB /* Configure default flow control settings */ if (osi_core->pause_frames == OSI_PAUSE_FRAMES_ENABLE) { osi_core->flow_ctrl = (OSI_FLOW_CTRL_TX | OSI_FLOW_CTRL_RX); @@ -2893,7 +1879,10 @@ static int mgbe_configure_mac(struct osi_core_priv_data *osi_core) /* TODO: USP (user Priority) to RxQ Mapping */ /* RSS cofiguration */ - return mgbe_config_rss(osi_core); + mgbe_config_rss(osi_core); +#endif /* !OSI_STRIPPED_LIB */ + + return 0; } /** @@ -2909,8 +1898,7 @@ static int mgbe_configure_mac(struct osi_core_priv_data *osi_core) * * @note MAC has to be out of reset. */ -static void mgbe_configure_dma(struct osi_core_priv_data *osi_core, - nveu32_t pre_si) +static void mgbe_configure_dma(struct osi_core_priv_data *osi_core) { nveu32_t value = 0; @@ -2931,308 +1919,18 @@ static void mgbe_configure_dma(struct osi_core_priv_data *osi_core, /* Configure TDPS to 5 */ value = osi_readla(osi_core, (nveu8_t *)osi_core->base + MGBE_DMA_TX_EDMA_CTRL); - if (pre_si == OSI_ENABLE) { - /* For Pre silicon TDPS Value is 3 */ - value |= MGBE_DMA_TX_EDMA_CTRL_TDPS_PRESI; - } else { - value |= MGBE_DMA_TX_EDMA_CTRL_TDPS; - } + value |= MGBE_DMA_TX_EDMA_CTRL_TDPS; osi_writela(osi_core, value, (nveu8_t *)osi_core->base + MGBE_DMA_TX_EDMA_CTRL); /* Configure RDPS to 5 */ value = osi_readla(osi_core, (nveu8_t *)osi_core->base + MGBE_DMA_RX_EDMA_CTRL); - if (pre_si == OSI_ENABLE) { - /* For Pre silicon RDPS Value is 3 */ - value |= MGBE_DMA_RX_EDMA_CTRL_RDPS_PRESI; - } else { - value |= MGBE_DMA_RX_EDMA_CTRL_RDPS; - } + value |= MGBE_DMA_RX_EDMA_CTRL_RDPS; osi_writela(osi_core, value, (nveu8_t *)osi_core->base + MGBE_DMA_RX_EDMA_CTRL); } -/** - * @brief Initialize the osi_core->backup_config. - * - * Algorithm: Populate the list of core registers to be saved during suspend. - * Fill the address of each register in structure. - * - * @param[in] osi_core: OSI core private data structure. - * - * @retval none - */ -static void mgbe_core_backup_init(struct osi_core_priv_data *const osi_core) -{ - struct core_backup *config = &osi_core->backup_config; - unsigned char *base = (unsigned char *)osi_core->base; - unsigned int i; - - /* MAC registers backup */ - config->reg_addr[MGBE_MAC_TMCR_BAK_IDX] = base + MGBE_MAC_TMCR; - config->reg_addr[MGBE_MAC_RMCR_BAK_IDX] = base + MGBE_MAC_RMCR; - config->reg_addr[MGBE_MAC_PFR_BAK_IDX] = base + MGBE_MAC_PFR; - config->reg_addr[MGBE_MAC_VLAN_TAG_BAK_IDX] = base + - MGBE_MAC_VLAN_TR; - config->reg_addr[MGBE_MAC_VLANTIR_BAK_IDX] = base + MGBE_MAC_VLANTIR; - config->reg_addr[MGBE_MAC_RX_FLW_CTRL_BAK_IDX] = base + - MGBE_MAC_RX_FLW_CTRL; - config->reg_addr[MGBE_MAC_RQC0R_BAK_IDX] = base + MGBE_MAC_RQC0R; - config->reg_addr[MGBE_MAC_RQC1R_BAK_IDX] = base + MGBE_MAC_RQC1R; - config->reg_addr[MGBE_MAC_RQC2R_BAK_IDX] = base + MGBE_MAC_RQC2R; - config->reg_addr[MGBE_MAC_ISR_BAK_IDX] = base + MGBE_MAC_ISR; - config->reg_addr[MGBE_MAC_IER_BAK_IDX] = base + MGBE_MAC_IER; - config->reg_addr[MGBE_MAC_PMTCSR_BAK_IDX] = base + MGBE_MAC_PMTCSR; - config->reg_addr[MGBE_MAC_LPI_CSR_BAK_IDX] = base + MGBE_MAC_LPI_CSR; - config->reg_addr[MGBE_MAC_LPI_TIMER_CTRL_BAK_IDX] = base + - MGBE_MAC_LPI_TIMER_CTRL; - config->reg_addr[MGBE_MAC_LPI_EN_TIMER_BAK_IDX] = base + - MGBE_MAC_LPI_EN_TIMER; - config->reg_addr[MGBE_MAC_TCR_BAK_IDX] = base + MGBE_MAC_TCR; - config->reg_addr[MGBE_MAC_SSIR_BAK_IDX] = base + MGBE_MAC_SSIR; - config->reg_addr[MGBE_MAC_STSR_BAK_IDX] = base + MGBE_MAC_STSR; - config->reg_addr[MGBE_MAC_STNSR_BAK_IDX] = base + MGBE_MAC_STNSR; - config->reg_addr[MGBE_MAC_STSUR_BAK_IDX] = base + MGBE_MAC_STSUR; - config->reg_addr[MGBE_MAC_STNSUR_BAK_IDX] = base + MGBE_MAC_STNSUR; - config->reg_addr[MGBE_MAC_TAR_BAK_IDX] = base + MGBE_MAC_TAR; - config->reg_addr[MGBE_DMA_BMR_BAK_IDX] = base + MGBE_DMA_MODE; - config->reg_addr[MGBE_DMA_SBUS_BAK_IDX] = base + MGBE_DMA_SBUS; - config->reg_addr[MGBE_DMA_ISR_BAK_IDX] = base + MGBE_DMA_ISR; - config->reg_addr[MGBE_MTL_OP_MODE_BAK_IDX] = base + MGBE_MTL_OP_MODE; - config->reg_addr[MGBE_MTL_RXQ_DMA_MAP0_BAK_IDX] = base + - MGBE_MTL_RXQ_DMA_MAP0; - - for (i = 0; i < MGBE_MAX_HTR_REGS; i++) { - config->reg_addr[MGBE_MAC_HTR_REG_BAK_IDX(i)] = base + - MGBE_MAC_HTR_REG(i); - } - for (i = 0; i < OSI_MGBE_MAX_NUM_QUEUES; i++) { - config->reg_addr[MGBE_MAC_QX_TX_FLW_CTRL_BAK_IDX(i)] = base + - MGBE_MAC_QX_TX_FLW_CTRL(i); - } - for (i = 0; i < OSI_MGBE_MAX_MAC_ADDRESS_FILTER; i++) { - config->reg_addr[MGBE_MAC_ADDRH_BAK_IDX(i)] = base + - MGBE_MAC_ADDRH(i); - config->reg_addr[MGBE_MAC_ADDRL_BAK_IDX(i)] = base + - MGBE_MAC_ADDRL(i); - } - for (i = 0; i < OSI_MGBE_MAX_NUM_QUEUES; i++) { - config->reg_addr[MGBE_MTL_CHX_TX_OP_MODE_BAK_IDX(i)] = base + - MGBE_MTL_CHX_TX_OP_MODE(i); - config->reg_addr[MGBE_MTL_CHX_RX_OP_MODE_BAK_IDX(i)] = base + - MGBE_MTL_CHX_RX_OP_MODE(i); - } - for (i = 0; i < OSI_MAX_TC_NUM; i++) { - config->reg_addr[MGBE_MTL_TXQ_ETS_CR_BAK_IDX(i)] = base + - MGBE_MTL_TCQ_ETS_CR(i); - config->reg_addr[MGBE_MTL_TXQ_QW_BAK_IDX(i)] = base + - MGBE_MTL_TCQ_QW(i); - config->reg_addr[MGBE_MTL_TXQ_ETS_SSCR_BAK_IDX(i)] = base + - MGBE_MTL_TCQ_ETS_SSCR(i); - config->reg_addr[MGBE_MTL_TXQ_ETS_HCR_BAK_IDX(i)] = base + - MGBE_MTL_TCQ_ETS_HCR(i); - config->reg_addr[MGBE_MTL_TXQ_ETS_LCR_BAK_IDX(i)] = base + - MGBE_MTL_TCQ_ETS_LCR(i); - } - - /* TODO: Add wrapper register backup */ -} - -/** - * @brief mgbe_enable_mtl_interrupts - Enable MTL interrupts - * - * Algorithm: enable MTL interrupts for EST - * - * @param[in] osi_core: OSI core private data structure. - * - * @note MAC should be init and started. see osi_start_mac() - */ -static inline void mgbe_enable_mtl_interrupts( - struct osi_core_priv_data *osi_core) -{ - unsigned int mtl_est_ir = OSI_DISABLE; - - mtl_est_ir = osi_readla(osi_core, (unsigned char *) - osi_core->base + MGBE_MTL_EST_ITRE); - /* enable only MTL interrupt realted to - * Constant Gate Control Error - * Head-Of-Line Blocking due to Scheduling - * Head-Of-Line Blocking due to Frame Size - * BTR Error - * Switch to S/W owned list Complete - */ - mtl_est_ir |= (MGBE_MTL_EST_ITRE_CGCE | MGBE_MTL_EST_ITRE_IEHS | - MGBE_MTL_EST_ITRE_IEHF | MGBE_MTL_EST_ITRE_IEBE | - MGBE_MTL_EST_ITRE_IECC); - osi_writela(osi_core, mtl_est_ir, - (unsigned char *)osi_core->base + MGBE_MTL_EST_ITRE); -} - -/** - * @brief mgbe_enable_fpe_interrupts - Enable MTL interrupts - * - * Algorithm: enable FPE interrupts - * - * @param[in] osi_core: OSI core private data structure. - * - * @note MAC should be init and started. see osi_start_mac() - */ -static inline void mgbe_enable_fpe_interrupts( - struct osi_core_priv_data *osi_core) -{ - unsigned int value = OSI_DISABLE; - - /* Read MAC IER Register and enable Frame Preemption Interrupt - * Enable */ - value = osi_readla(osi_core, (unsigned char *) - osi_core->base + MGBE_MAC_IER); - value |= MGBE_IMR_FPEIE; - osi_writela(osi_core, value, (unsigned char *) - osi_core->base + MGBE_MAC_IER); -} - -/** - * @brief mgbe_save_gcl_params - save GCL configs in local core structure - * - * @param[in] osi_core: OSI core private data structure. - * - * @note MAC should be init and started. see osi_start_mac() - */ -static inline void mgbe_save_gcl_params(struct osi_core_priv_data *osi_core) -{ - struct core_local *l_core = (struct core_local *)osi_core; - unsigned int gcl_widhth[4] = {0, OSI_MAX_24BITS, OSI_MAX_28BITS, - OSI_MAX_32BITS}; - nveu32_t gcl_ti_mask[4] = {0, OSI_MASK_16BITS, OSI_MASK_20BITS, - OSI_MASK_24BITS}; - unsigned int gcl_depthth[6] = {0, OSI_GCL_SIZE_64, OSI_GCL_SIZE_128, - OSI_GCL_SIZE_256, OSI_GCL_SIZE_512, - OSI_GCL_SIZE_1024}; - - if (osi_core->hw_feature->gcl_width == 0 || - osi_core->hw_feature->gcl_width > 3) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "Wrong HW feature GCL width\n", - (unsigned long long)osi_core->hw_feature->gcl_width); - } else { - l_core->gcl_width_val = - gcl_widhth[osi_core->hw_feature->gcl_width]; - l_core->ti_mask = gcl_ti_mask[osi_core->hw_feature->gcl_width]; - } - - if (osi_core->hw_feature->gcl_depth == 0 || - osi_core->hw_feature->gcl_depth > 5) { - /* Do Nothing */ - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "Wrong HW feature GCL depth\n", - (unsigned long long)osi_core->hw_feature->gcl_depth); - } else { - l_core->gcl_dep = gcl_depthth[osi_core->hw_feature->gcl_depth]; - } -} - -/** - * @brief mgbe_tsn_init - initialize TSN feature - * - * Algorithm: - * 1) If hardware support EST, - * a) Set default EST configuration - * b) Set enable interrupts - * 2) If hardware supports FPE - * a) Set default FPE configuration - * b) enable interrupts - * - * @param[in] osi_core: OSI core private data structure. - * @param[in] est_sel: EST HW support present or not - * @param[in] fpe_sel: FPE HW support present or not - * - * @note MAC should be init and started. see osi_start_mac() - */ -static void mgbe_tsn_init(struct osi_core_priv_data *osi_core, - unsigned int est_sel, unsigned int fpe_sel) -{ - unsigned int val = 0x0; - unsigned int temp = 0U; - - if (est_sel == OSI_ENABLE) { - mgbe_save_gcl_params(osi_core); - val = osi_readla(osi_core, (unsigned char *)osi_core->base + - MGBE_MTL_EST_CONTROL); - - /* - * PTOV PTP clock period * 6 - * dual-port RAM based asynchronous FIFO controllers or - * Single-port RAM based synchronous FIFO controllers - * CTOV 96 x Tx clock period - * : - * : - * set other default value - */ - val &= ~MGBE_MTL_EST_CONTROL_PTOV; - if (osi_core->pre_si == OSI_ENABLE) { - /* 6*1/(78.6 MHz) in ns*/ - temp = (6U * 13U); - } else { - temp = MGBE_MTL_EST_PTOV_RECOMMEND; - } - temp = temp << MGBE_MTL_EST_CONTROL_PTOV_SHIFT; - val |= temp; - - val &= ~MGBE_MTL_EST_CONTROL_CTOV; - temp = MGBE_MTL_EST_CTOV_RECOMMEND; - temp = temp << MGBE_MTL_EST_CONTROL_CTOV_SHIFT; - val |= temp; - - /*Loop Count to report Scheduling Error*/ - val &= ~MGBE_MTL_EST_CONTROL_LCSE; - val |= MGBE_MTL_EST_CONTROL_LCSE_VAL; - - val &= ~MGBE_MTL_EST_CONTROL_DDBF; - val |= MGBE_MTL_EST_CONTROL_DDBF; - osi_writela(osi_core, val, (unsigned char *)osi_core->base + - MGBE_MTL_EST_CONTROL); - - val = osi_readla(osi_core, (nveu8_t *)osi_core->base + - MGBE_MTL_EST_OVERHEAD); - val &= ~MGBE_MTL_EST_OVERHEAD_OVHD; - /* As per hardware programming info */ - val |= MGBE_MTL_EST_OVERHEAD_RECOMMEND; - osi_writela(osi_core, val, (nveu8_t *)osi_core->base + - MGBE_MTL_EST_OVERHEAD); - - mgbe_enable_mtl_interrupts(osi_core); - } - - if (fpe_sel == OSI_ENABLE) { - val = osi_readla(osi_core, (unsigned char *)osi_core->base + - MGBE_MAC_RQC1R); - val &= ~MGBE_MAC_RQC1R_RQ; - temp = osi_core->residual_queue; - temp = temp << MGBE_MAC_RQC1R_RQ_SHIFT; - temp = (temp & MGBE_MAC_RQC1R_RQ); - val |= temp; - osi_writela(osi_core, val, (unsigned char *)osi_core->base + - MGBE_MAC_RQC1R); - - val = osi_readla(osi_core, (nveu8_t *)osi_core->base + - MGBE_MAC_RQC4R); - val &= ~MGBE_MAC_RQC4R_PMCBCQ; - temp = osi_core->residual_queue; - temp = temp << MGBE_MAC_RQC4R_PMCBCQ_SHIFT; - temp = (temp & MGBE_MAC_RQC4R_PMCBCQ); - val |= temp; - osi_writela(osi_core, val, (nveu8_t *)osi_core->base + - MGBE_MAC_RQC4R); - - mgbe_enable_fpe_interrupts(osi_core); - } - - /* CBS setting for TC or TXQ for default configuration - user application should use IOCTL to set CBS as per requirement - */ -} - /** * @brief Map DMA channels to a specific VM IRQ. * @@ -3246,7 +1944,9 @@ static void mgbe_tsn_init(struct osi_core_priv_data *osi_core, */ static nve32_t mgbe_dma_chan_to_vmirq_map(struct osi_core_priv_data *osi_core) { +#ifndef OSI_STRIPPED_LIB nveu32_t sid[4] = { MGBE0_SID, MGBE1_SID, MGBE2_SID, MGBE3_SID }; +#endif struct osi_vm_irq_data *irq_data; nveu32_t i, j; nveu32_t chan; @@ -3269,6 +1969,7 @@ static nve32_t mgbe_dma_chan_to_vmirq_map(struct osi_core_priv_data *osi_core) (nveu8_t *)osi_core->base + MGBE_VIRTUAL_APB_ERR_CTRL); } +#ifndef OSI_STRIPPED_LIB if ((osi_core->use_virtualization == OSI_DISABLE) && (osi_core->hv_base != OSI_NULL)) { if (osi_core->instance_id > 3U) { @@ -3290,7 +1991,7 @@ static nve32_t mgbe_dma_chan_to_vmirq_map(struct osi_core_priv_data *osi_core) (nveu8_t *)osi_core->hv_base + MGBE_WRAP_AXI_ASID2_CTRL); } - +#endif return 0; } @@ -3302,8 +2003,6 @@ static nve32_t mgbe_dma_chan_to_vmirq_map(struct osi_core_priv_data *osi_core) * common DMA registers. * * @param[in] osi_core: OSI core private data structure. - * @param[in] tx_fifo_size: MTL TX FIFO size - * @param[in] rx_fifo_size: MTL RX FIFO size * * @note 1) MAC should be out of reset. See osi_poll_for_swr() for details. * 2) osi_core->base needs to be filled based on ioremap. @@ -3313,17 +2012,11 @@ static nve32_t mgbe_dma_chan_to_vmirq_map(struct osi_core_priv_data *osi_core) * @retval 0 on success * @retval -1 on failure. */ -static nve32_t mgbe_core_init(struct osi_core_priv_data *osi_core, - nveu32_t tx_fifo_size, - nveu32_t rx_fifo_size) +static nve32_t mgbe_core_init(struct osi_core_priv_data *const osi_core) { nve32_t ret = 0; nveu32_t qinx = 0; nveu32_t value = 0; - nveu32_t tx_fifo = 0; - nveu32_t rx_fifo = 0; - - mgbe_core_backup_init(osi_core); /* reset mmc counters */ osi_writela(osi_core, MGBE_MMC_CNTRL_CNTRST, (nveu8_t *)osi_core->base + @@ -3334,21 +2027,21 @@ static nve32_t mgbe_core_init(struct osi_core_priv_data *osi_core, MGBE_MTL_RXQ_DMA_MAP0); value |= MGBE_RXQ_TO_DMA_CHAN_MAP0; value |= MGBE_RXQ_TO_DMA_MAP_DDMACH; - osi_writela(osi_core, value, (unsigned char *)osi_core->base + + osi_writela(osi_core, value, (nveu8_t *)osi_core->base + MGBE_MTL_RXQ_DMA_MAP0); value = osi_readla(osi_core, (nveu8_t *)osi_core->base + MGBE_MTL_RXQ_DMA_MAP1); value |= MGBE_RXQ_TO_DMA_CHAN_MAP1; value |= MGBE_RXQ_TO_DMA_MAP_DDMACH; - osi_writela(osi_core, value, (unsigned char *)osi_core->base + + osi_writela(osi_core, value, (nveu8_t *)osi_core->base + MGBE_MTL_RXQ_DMA_MAP1); value = osi_readla(osi_core, (nveu8_t *)osi_core->base + MGBE_MTL_RXQ_DMA_MAP2); value |= MGBE_RXQ_TO_DMA_CHAN_MAP2; value |= MGBE_RXQ_TO_DMA_MAP_DDMACH; - osi_writela(osi_core, value, (unsigned char *)osi_core->base + + osi_writela(osi_core, value, (nveu8_t *)osi_core->base + MGBE_MTL_RXQ_DMA_MAP2); /* Enable XDCS in MAC_Extended_Configuration */ @@ -3358,50 +2051,41 @@ static nve32_t mgbe_core_init(struct osi_core_priv_data *osi_core, osi_writela(osi_core, value, (nveu8_t *)osi_core->base + MGBE_MAC_EXT_CNF); - if (osi_core->pre_si == OSI_ENABLE) { - /* For pre silicon Tx and Rx Queue sizes are 64KB */ - tx_fifo_size = MGBE_TX_FIFO_SIZE_64KB; - rx_fifo_size = MGBE_RX_FIFO_SIZE_64KB; - } else { - /* Actual HW RAM size for Tx is 128KB and Rx is 192KB */ - tx_fifo_size = MGBE_TX_FIFO_SIZE_128KB; - rx_fifo_size = MGBE_RX_FIFO_SIZE_192KB; - } - - /* Calculate value of Transmit queue fifo size to be programmed */ - tx_fifo = mgbe_calculate_per_queue_fifo(tx_fifo_size, - osi_core->num_mtl_queues); - - /* Calculate value of Receive queue fifo size to be programmed */ - rx_fifo = mgbe_calculate_per_queue_fifo(rx_fifo_size, - osi_core->num_mtl_queues); - /* Configure MTL Queues */ /* TODO: Iterate over Number MTL queues need to be removed */ for (qinx = 0; qinx < osi_core->num_mtl_queues; qinx++) { - ret = mgbe_configure_mtl_queue(osi_core->mtl_queues[qinx], - osi_core, tx_fifo, rx_fifo); + ret = mgbe_configure_mtl_queue(osi_core, osi_core->mtl_queues[qinx]); if (ret < 0) { - return ret; + goto fail; + } + /* Enable by default to configure forward error packets. + * Since this is a local function this will always return sucess, + * so no need to check for return value + */ + ret = hw_config_fw_err_pkts(osi_core, osi_core->mtl_queues[qinx], OSI_ENABLE); + if (ret < 0) { + goto fail; } } /* configure MGBE MAC HW */ ret = mgbe_configure_mac(osi_core); if (ret < 0) { - return ret; + goto fail; } /* configure MGBE DMA */ - mgbe_configure_dma(osi_core, osi_core->pre_si); + mgbe_configure_dma(osi_core); /* tsn initialization */ if (osi_core->hw_feature != OSI_NULL) { - mgbe_tsn_init(osi_core, osi_core->hw_feature->est_sel, - osi_core->hw_feature->fpe_sel); + hw_tsn_init(osi_core, osi_core->hw_feature->est_sel, + osi_core->hw_feature->fpe_sel); } - return mgbe_dma_chan_to_vmirq_map(osi_core); + ret = mgbe_dma_chan_to_vmirq_map(osi_core); +fail: + return ret; } /** @@ -3417,10 +2101,10 @@ static nve32_t mgbe_core_init(struct osi_core_priv_data *osi_core, */ static void mgbe_handle_mac_fpe_intrs(struct osi_core_priv_data *osi_core) { - unsigned int val = 0; + nveu32_t val = 0; /* interrupt bit clear on read as CSR_SW is reset */ - val = osi_readla(osi_core, (unsigned char *) + val = osi_readla(osi_core, (nveu8_t *) osi_core->base + MGBE_MAC_FPE_CTS); if ((val & MGBE_MAC_FPE_CTS_RVER) == MGBE_MAC_FPE_CTS_RVER) { @@ -3454,7 +2138,7 @@ static void mgbe_handle_mac_fpe_intrs(struct osi_core_priv_data *osi_core) val &= ~MGBE_MAC_FPE_CTS_EFPE; } - osi_writela(osi_core, val, (unsigned char *) + osi_writela(osi_core, val, (nveu8_t *) osi_core->base + MGBE_MAC_FPE_CTS); } @@ -3487,87 +2171,115 @@ static inline nveu32_t get_free_ts_idx(struct core_local *l_core) * MAC nve32_terrupts which includes speed, mode detection. * * @param[in] osi_core: OSI core private data structure. - * @param[in] dma_isr: DMA ISR register read value. * * @note MAC nve32_terrupts need to be enabled */ -static void mgbe_handle_mac_intrs(struct osi_core_priv_data *osi_core, - nveu32_t dma_isr) +static void mgbe_handle_mac_intrs(struct osi_core_priv_data *osi_core) { - struct core_local *l_core = (struct core_local *)osi_core; + struct core_local *l_core = (struct core_local *)(void *)osi_core; nveu32_t mac_isr = 0; nveu32_t mac_ier = 0; nveu32_t tx_errors = 0; + nveu8_t *base = (nveu8_t *)osi_core->base; +#ifdef HSI_SUPPORT + nveu64_t tx_frame_err = 0; +#endif - mac_isr = osi_readla(osi_core, - (unsigned char *)osi_core->base + MGBE_MAC_ISR); - /* Handle MAC interrupts */ - if ((dma_isr & MGBE_DMA_ISR_MACIS) != MGBE_DMA_ISR_MACIS) { - return; + mac_isr = osi_readla(osi_core, base + MGBE_MAC_ISR); + + /* Check for Link status change interrupt */ + if ((mac_isr & MGBE_MAC_ISR_LSI) == OSI_ENABLE) { + /* For Local fault need to stop network data and restart the LANE bringup */ + if ((mac_isr & MGBE_MAC_ISR_LS_MASK) == MGBE_MAC_ISR_LS_LOCAL_FAULT) { + osi_core->osd_ops.restart_lane_bringup(osi_core->osd, OSI_DISABLE); + } else if ((mac_isr & MGBE_MAC_ISR_LS_MASK) == MGBE_MAC_ISR_LS_LINK_OK) { + osi_core->osd_ops.restart_lane_bringup(osi_core->osd, OSI_ENABLE); + } else { + /* Do Nothing */ + } } - mac_ier = osi_readla(osi_core, - (unsigned char *)osi_core->base + MGBE_MAC_IER); + mac_ier = osi_readla(osi_core, base + MGBE_MAC_IER); if (((mac_isr & MGBE_MAC_IMR_FPEIS) == MGBE_MAC_IMR_FPEIS) && ((mac_ier & MGBE_IMR_FPEIE) == MGBE_IMR_FPEIE)) { mgbe_handle_mac_fpe_intrs(osi_core); - mac_isr &= ~MGBE_MAC_IMR_FPEIS; } + /* Check for any MAC Transmit Error Status Interrupt */ if ((mac_isr & MGBE_IMR_TXESIE) == MGBE_IMR_TXESIE) { /* Check for the type of Tx error by reading MAC_Rx_Tx_Status * register */ - tx_errors = osi_readl((unsigned char *)osi_core->base + - MGBE_MAC_RX_TX_STS); + tx_errors = osi_readl(base + MGBE_MAC_RX_TX_STS); +#ifndef OSI_STRIPPED_LIB if ((tx_errors & MGBE_MAC_TX_TJT) == MGBE_MAC_TX_TJT) { /* increment Tx Jabber timeout stats */ - osi_core->pkt_err_stats.mgbe_jabber_timeout_err = + osi_core->stats.mgbe_jabber_timeout_err = osi_update_stats_counter( - osi_core->pkt_err_stats.mgbe_jabber_timeout_err, - 1UL); + osi_core->stats.mgbe_jabber_timeout_err, + 1UL); } if ((tx_errors & MGBE_MAC_TX_IHE) == MGBE_MAC_TX_IHE) { /* IP Header Error */ - osi_core->pkt_err_stats.mgbe_ip_header_err = + osi_core->stats.mgbe_ip_header_err = osi_update_stats_counter( - osi_core->pkt_err_stats.mgbe_ip_header_err, - 1UL); + osi_core->stats.mgbe_ip_header_err, + 1UL); } if ((tx_errors & MGBE_MAC_TX_PCE) == MGBE_MAC_TX_PCE) { /* Payload Checksum error */ - osi_core->pkt_err_stats.mgbe_payload_cs_err = + osi_core->stats.mgbe_payload_cs_err = osi_update_stats_counter( - osi_core->pkt_err_stats.mgbe_payload_cs_err, - 1UL); + osi_core->stats.mgbe_payload_cs_err, + 1UL); } +#endif /* !OSI_STRIPPED_LIB */ + +#ifdef HSI_SUPPORT + tx_errors &= (MGBE_MAC_TX_TJT | MGBE_MAC_TX_IHE | MGBE_MAC_TX_PCE); + if (tx_errors != OSI_NONE) { + osi_core->hsi.tx_frame_err_count = + osi_update_stats_counter( + osi_core->hsi.tx_frame_err_count, 1UL); + tx_frame_err = osi_core->hsi.tx_frame_err_count / + osi_core->hsi.err_count_threshold; + if (osi_core->hsi.tx_frame_err_threshold < + tx_frame_err) { + osi_core->hsi.tx_frame_err_threshold = tx_frame_err; + osi_core->hsi.report_count_err[TX_FRAME_ERR_IDX] = OSI_ENABLE; + } + osi_core->hsi.err_code[TX_FRAME_ERR_IDX] = OSI_TX_FRAME_ERR; + osi_core->hsi.report_err = OSI_ENABLE; + } +#endif } - osi_writela(osi_core, mac_isr, - (unsigned char *)osi_core->base + MGBE_MAC_ISR); if ((mac_isr & MGBE_ISR_TSIS) == MGBE_ISR_TSIS) { struct osi_core_tx_ts *head = &l_core->tx_ts_head; if (__sync_fetch_and_add(&l_core->ts_lock, 1) == 1U) { /* mask return as initial value is returned always */ (void)__sync_fetch_and_sub(&l_core->ts_lock, 1); - osi_core->xstats.ts_lock_add_fail = - osi_update_stats_counter( - osi_core->xstats.ts_lock_add_fail, 1U); +#ifndef OSI_STRIPPED_LIB + osi_core->stats.ts_lock_add_fail = + osi_update_stats_counter(osi_core->stats.ts_lock_add_fail, 1U); +#endif /* !OSI_STRIPPED_LIB */ goto done; } /* TXTSC bit should get reset when all timestamp read */ - while (((osi_readla(osi_core, (nveu8_t *)osi_core->base + - MGBE_MAC_TSS) & - MGBE_MAC_TSS_TXTSC) == MGBE_MAC_TSS_TXTSC)) { + while (((osi_readla(osi_core, base + MGBE_MAC_TSS) & + MGBE_MAC_TSS_TXTSC) == MGBE_MAC_TSS_TXTSC)) { nveu32_t i = get_free_ts_idx(l_core); if (i == MAX_TX_TS_CNT) { struct osi_core_tx_ts *temp = l_core->tx_ts_head.next; - /* Remove oldest stale TS from list to make space for new TS */ + /* Remove oldest stale TS from list to make + * space for new TS + */ OSI_CORE_INFO(osi_core->osd, OSI_LOG_ARG_INVALID, - "Removing TS from queue pkt_id\n", temp->pkt_id); + "Removing TS from queue pkt_id\n", + temp->pkt_id); temp->in_use = OSI_DISABLE; /* remove temp node from the link */ @@ -3576,22 +2288,16 @@ static void mgbe_handle_mac_intrs(struct osi_core_priv_data *osi_core, i = get_free_ts_idx(l_core); if (i == MAX_TX_TS_CNT) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, - "TS queue is full\n", i); + "TS queue is full\n", i); break; } } - l_core->ts[i].nsec = osi_readla(osi_core, - (nveu8_t *)osi_core->base + - MGBE_MAC_TSNSSEC); + l_core->ts[i].nsec = osi_readla(osi_core, base + MGBE_MAC_TSNSSEC); l_core->ts[i].in_use = OSI_ENABLE; - l_core->ts[i].pkt_id = osi_readla(osi_core, - (nveu8_t *)osi_core->base + - MGBE_MAC_TSPKID); - l_core->ts[i].sec = osi_readla(osi_core, - (nveu8_t *)osi_core->base + - MGBE_MAC_TSSEC); + l_core->ts[i].pkt_id = osi_readla(osi_core, base + MGBE_MAC_TSPKID); + l_core->ts[i].sec = osi_readla(osi_core, base + MGBE_MAC_TSSEC); /* Add time stamp to end of list */ l_core->ts[i].next = head->prev->next; head->prev->next = &l_core->ts[i]; @@ -3603,13 +2309,10 @@ static void mgbe_handle_mac_intrs(struct osi_core_priv_data *osi_core, (void)__sync_fetch_and_sub(&l_core->ts_lock, 1); } done: - mac_isr &= ~MGBE_ISR_TSIS; - - osi_writela(osi_core, mac_isr, - (unsigned char *)osi_core->base + MGBE_MAC_ISR); - /* TODO: Duplex/speed settigs - Its not same as EQOS for MGBE */ + return; } +#ifndef OSI_STRIPPED_LIB /** * @brief mgbe_update_dma_sr_stats - stats for dma_status error * @@ -3625,31 +2328,32 @@ static inline void mgbe_update_dma_sr_stats(struct osi_core_priv_data *osi_core, nveu64_t val; if ((dma_sr & MGBE_DMA_CHX_STATUS_RBU) == MGBE_DMA_CHX_STATUS_RBU) { - val = osi_core->xstats.rx_buf_unavail_irq_n[qinx]; - osi_core->xstats.rx_buf_unavail_irq_n[qinx] = + val = osi_core->stats.rx_buf_unavail_irq_n[qinx]; + osi_core->stats.rx_buf_unavail_irq_n[qinx] = osi_update_stats_counter(val, 1U); } if ((dma_sr & MGBE_DMA_CHX_STATUS_TPS) == MGBE_DMA_CHX_STATUS_TPS) { - val = osi_core->xstats.tx_proc_stopped_irq_n[qinx]; - osi_core->xstats.tx_proc_stopped_irq_n[qinx] = + val = osi_core->stats.tx_proc_stopped_irq_n[qinx]; + osi_core->stats.tx_proc_stopped_irq_n[qinx] = osi_update_stats_counter(val, 1U); } if ((dma_sr & MGBE_DMA_CHX_STATUS_TBU) == MGBE_DMA_CHX_STATUS_TBU) { - val = osi_core->xstats.tx_buf_unavail_irq_n[qinx]; - osi_core->xstats.tx_buf_unavail_irq_n[qinx] = + val = osi_core->stats.tx_buf_unavail_irq_n[qinx]; + osi_core->stats.tx_buf_unavail_irq_n[qinx] = osi_update_stats_counter(val, 1U); } if ((dma_sr & MGBE_DMA_CHX_STATUS_RPS) == MGBE_DMA_CHX_STATUS_RPS) { - val = osi_core->xstats.rx_proc_stopped_irq_n[qinx]; - osi_core->xstats.rx_proc_stopped_irq_n[qinx] = + val = osi_core->stats.rx_proc_stopped_irq_n[qinx]; + osi_core->stats.rx_proc_stopped_irq_n[qinx] = osi_update_stats_counter(val, 1U); } if ((dma_sr & MGBE_DMA_CHX_STATUS_FBE) == MGBE_DMA_CHX_STATUS_FBE) { - val = osi_core->xstats.fatal_bus_error_irq_n; - osi_core->xstats.fatal_bus_error_irq_n = + val = osi_core->stats.fatal_bus_error_irq_n; + osi_core->stats.fatal_bus_error_irq_n = osi_update_stats_counter(val, 1U); } } +#endif /* !OSI_STRIPPED_LIB */ /** * @brief mgbe_set_avb_algorithm - Set TxQ/TC avb config @@ -3674,65 +2378,65 @@ static inline void mgbe_update_dma_sr_stats(struct osi_core_priv_data *osi_core, * @retval 0 on success * @retval -1 on failure. */ -static int mgbe_set_avb_algorithm( +static nve32_t mgbe_set_avb_algorithm( struct osi_core_priv_data *const osi_core, const struct osi_core_avb_algorithm *const avb) { - unsigned int value; - int ret = -1; - unsigned int qinx = 0U; - unsigned int tcinx = 0U; + nveu32_t value; + nve32_t ret = -1; + nveu32_t qinx = 0U; + nveu32_t tcinx = 0U; if (avb == OSI_NULL) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "avb structure is NULL\n", 0ULL); - return ret; + goto done; } /* queue index in range */ if (avb->qindex >= OSI_MGBE_MAX_NUM_QUEUES) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Invalid Queue index\n", - (unsigned long long)avb->qindex); - return ret; + (nveul64_t)avb->qindex); + goto done; } /* queue oper_mode in range check*/ if (avb->oper_mode >= OSI_MTL_QUEUE_MODEMAX) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Invalid Queue mode\n", - (unsigned long long)avb->qindex); - return ret; + (nveul64_t)avb->qindex); + goto done; } /* Validate algo is valid */ if (avb->algo > OSI_MTL_TXQ_AVALG_CBS) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Invalid Algo input\n", - (unsigned long long)avb->tcindex); - return ret; + (nveul64_t)avb->algo); + goto done; } /* can't set AVB mode for queue 0 */ if ((avb->qindex == 0U) && (avb->oper_mode == OSI_MTL_QUEUE_AVB)) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_OPNOTSUPP, "Not allowed to set AVB for Q0\n", - (unsigned long long)avb->qindex); - return ret; + (nveul64_t)avb->qindex); + goto done; } /* TC index range check */ if ((avb->tcindex == 0U) || (avb->tcindex >= OSI_MAX_TC_NUM)) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Invalid Queue TC mapping\n", - (unsigned long long)avb->tcindex); - return ret; + (nveul64_t)avb->tcindex); + goto done; } qinx = avb->qindex; tcinx = avb->tcindex; - value = osi_readla(osi_core, (unsigned char *)osi_core->base + + value = osi_readla(osi_core, (nveu8_t *)osi_core->base + MGBE_MTL_CHX_TX_OP_MODE(qinx)); value &= ~MGBE_MTL_TX_OP_MODE_TXQEN; /* Set TXQEN mode as per input struct after masking 3 bit */ @@ -3742,54 +2446,77 @@ static int mgbe_set_avb_algorithm( value &= ~MGBE_MTL_TX_OP_MODE_Q2TCMAP; value |= ((tcinx << MGBE_MTL_TX_OP_MODE_Q2TCMAP_SHIFT) & MGBE_MTL_TX_OP_MODE_Q2TCMAP); - osi_writela(osi_core, value, (unsigned char *)osi_core->base + + osi_writela(osi_core, value, (nveu8_t *)osi_core->base + MGBE_MTL_CHX_TX_OP_MODE(qinx)); /* Set Algo and Credit control */ - value = osi_readla(osi_core, (unsigned char *)osi_core->base + + value = osi_readla(osi_core, (nveu8_t *)osi_core->base + MGBE_MTL_TCQ_ETS_CR(tcinx)); + value &= ~MGBE_MTL_TCQ_ETS_CR_AVALG; + value &= ~MGBE_MTL_TCQ_ETS_CR_CC; if (avb->algo == OSI_MTL_TXQ_AVALG_CBS) { - value &= ~MGBE_MTL_TCQ_ETS_CR_CC; value |= (avb->credit_control << MGBE_MTL_TCQ_ETS_CR_CC_SHIFT) & MGBE_MTL_TCQ_ETS_CR_CC; + value |= (OSI_MTL_TXQ_AVALG_CBS << MGBE_MTL_TCQ_ETS_CR_AVALG_SHIFT) & + MGBE_MTL_TCQ_ETS_CR_AVALG; + } else { + value |= (OSI_MGBE_TXQ_AVALG_ETS << MGBE_MTL_TCQ_ETS_CR_AVALG_SHIFT) & + MGBE_MTL_TCQ_ETS_CR_AVALG; } - value &= ~MGBE_MTL_TCQ_ETS_CR_AVALG; - value |= (avb->algo << MGBE_MTL_TCQ_ETS_CR_AVALG_SHIFT) & - MGBE_MTL_TCQ_ETS_CR_AVALG; - osi_writela(osi_core, value, (unsigned char *)osi_core->base + + osi_writela(osi_core, value, (nveu8_t *)osi_core->base + MGBE_MTL_TCQ_ETS_CR(tcinx)); if (avb->algo == OSI_MTL_TXQ_AVALG_CBS) { /* Set Idle slope credit*/ - value = osi_readla(osi_core, (unsigned char *)osi_core->base + + value = osi_readla(osi_core, (nveu8_t *)osi_core->base + MGBE_MTL_TCQ_QW(tcinx)); value &= ~MGBE_MTL_TCQ_ETS_QW_ISCQW_MASK; value |= avb->idle_slope & MGBE_MTL_TCQ_ETS_QW_ISCQW_MASK; - osi_writela(osi_core, value, (unsigned char *)osi_core->base + + osi_writela(osi_core, value, (nveu8_t *)osi_core->base + MGBE_MTL_TCQ_QW(tcinx)); /* Set Send slope credit */ - value = osi_readla(osi_core, (unsigned char *)osi_core->base + + value = osi_readla(osi_core, (nveu8_t *)osi_core->base + MGBE_MTL_TCQ_ETS_SSCR(tcinx)); value &= ~MGBE_MTL_TCQ_ETS_SSCR_SSC_MASK; value |= avb->send_slope & MGBE_MTL_TCQ_ETS_SSCR_SSC_MASK; - osi_writela(osi_core, value, (unsigned char *)osi_core->base + + osi_writela(osi_core, value, (nveu8_t *)osi_core->base + MGBE_MTL_TCQ_ETS_SSCR(tcinx)); /* Set Hi credit */ value = avb->hi_credit & MGBE_MTL_TCQ_ETS_HCR_HC_MASK; - osi_writela(osi_core, value, (unsigned char *)osi_core->base + + osi_writela(osi_core, value, (nveu8_t *)osi_core->base + MGBE_MTL_TCQ_ETS_HCR(tcinx)); - /* low credit is -ve number, osi_write need a unsigned int + /* low credit is -ve number, osi_write need a nveu32_t * take only 28:0 bits from avb->low_credit */ value = avb->low_credit & MGBE_MTL_TCQ_ETS_LCR_LC_MASK; - osi_writela(osi_core, value, (unsigned char *)osi_core->base + + osi_writela(osi_core, value, (nveu8_t *)osi_core->base + MGBE_MTL_TCQ_ETS_LCR(tcinx)); + } else { + /* Reset register values to POR/initialized values */ + osi_writela(osi_core, MGBE_MTL_TCQ_QW_ISCQW, (nveu8_t *)osi_core->base + + MGBE_MTL_TCQ_QW(tcinx)); + osi_writela(osi_core, OSI_DISABLE, (nveu8_t *)osi_core->base + + MGBE_MTL_TCQ_ETS_SSCR(tcinx)); + osi_writela(osi_core, OSI_DISABLE, (nveu8_t *)osi_core->base + + MGBE_MTL_TCQ_ETS_HCR(tcinx)); + osi_writela(osi_core, OSI_DISABLE, (nveu8_t *)osi_core->base + + MGBE_MTL_TCQ_ETS_LCR(tcinx)); + + value = osi_readla(osi_core, (nveu8_t *)osi_core->base + + MGBE_MTL_CHX_TX_OP_MODE(qinx)); + value &= ~MGBE_MTL_TX_OP_MODE_Q2TCMAP; + value |= (osi_core->tc[qinx] << MGBE_MTL_CHX_TX_OP_MODE_Q2TC_SH); + osi_writela(osi_core, value, (nveu8_t *)osi_core->base + + MGBE_MTL_CHX_TX_OP_MODE(qinx)); } - return 0; + ret = 0; + +done: + return ret; } /** @@ -3815,30 +2542,32 @@ static int mgbe_set_avb_algorithm( * @retval 0 on success * @retval -1 on failure. */ -static int mgbe_get_avb_algorithm(struct osi_core_priv_data *const osi_core, - struct osi_core_avb_algorithm *const avb) +static nve32_t mgbe_get_avb_algorithm(struct osi_core_priv_data *const osi_core, + struct osi_core_avb_algorithm *const avb) { - unsigned int value; - int ret = -1; - unsigned int qinx = 0U; - unsigned int tcinx = 0U; + nveu32_t value; + nve32_t ret = 0; + nveu32_t qinx = 0U; + nveu32_t tcinx = 0U; if (avb == OSI_NULL) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "avb structure is NULL\n", 0ULL); - return ret; + ret = -1; + goto fail; } if (avb->qindex >= OSI_MGBE_MAX_NUM_QUEUES) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Invalid Queue index\n", - (unsigned long long)avb->qindex); - return ret; + (nveul64_t)avb->qindex); + ret = -1; + goto fail; } qinx = avb->qindex; - value = osi_readla(osi_core, (unsigned char *)osi_core->base + + value = osi_readla(osi_core, (nveu8_t *)osi_core->base + MGBE_MTL_CHX_TX_OP_MODE(qinx)); /* Get TxQ/TC mode as per input struct after masking 3:2 bit */ @@ -3851,7 +2580,7 @@ static int mgbe_get_avb_algorithm(struct osi_core_priv_data *const osi_core, tcinx = avb->tcindex; /* Get Algo and Credit control */ - value = osi_readla(osi_core, (unsigned char *)osi_core->base + + value = osi_readla(osi_core, (nveu8_t *)osi_core->base + MGBE_MTL_TCQ_ETS_CR(tcinx)); avb->credit_control = (value & MGBE_MTL_TCQ_ETS_CR_CC) >> MGBE_MTL_TCQ_ETS_CR_CC_SHIFT; @@ -3860,29 +2589,29 @@ static int mgbe_get_avb_algorithm(struct osi_core_priv_data *const osi_core, if (avb->algo == OSI_MTL_TXQ_AVALG_CBS) { /* Get Idle slope credit*/ - value = osi_readla(osi_core, (unsigned char *)osi_core->base + + value = osi_readla(osi_core, (nveu8_t *)osi_core->base + MGBE_MTL_TCQ_QW(tcinx)); avb->idle_slope = value & MGBE_MTL_TCQ_ETS_QW_ISCQW_MASK; /* Get Send slope credit */ - value = osi_readla(osi_core, (unsigned char *)osi_core->base + + value = osi_readla(osi_core, (nveu8_t *)osi_core->base + MGBE_MTL_TCQ_ETS_SSCR(tcinx)); avb->send_slope = value & MGBE_MTL_TCQ_ETS_SSCR_SSC_MASK; /* Get Hi credit */ - value = osi_readla(osi_core, (unsigned char *)osi_core->base + + value = osi_readla(osi_core, (nveu8_t *)osi_core->base + MGBE_MTL_TCQ_ETS_HCR(tcinx)); avb->hi_credit = value & MGBE_MTL_TCQ_ETS_HCR_HC_MASK; /* Get Low credit for which bit 31:29 are unknown * return 28:0 valid bits to application */ - value = osi_readla(osi_core, (unsigned char *)osi_core->base + + value = osi_readla(osi_core, (nveu8_t *)osi_core->base + MGBE_MTL_TCQ_ETS_LCR(tcinx)); avb->low_credit = value & MGBE_MTL_TCQ_ETS_LCR_LC_MASK; } - - return 0; +fail: + return ret; } /** @@ -3898,44 +2627,47 @@ static int mgbe_get_avb_algorithm(struct osi_core_priv_data *const osi_core, * There is one status interrupt which says swich to SWOL complete. * * @param[in] osi_core: osi core priv data structure + * @param[in] mtl_isr: MTL interrupt status value * * @note MAC should be init and started. see osi_start_mac() */ static void mgbe_handle_mtl_intrs(struct osi_core_priv_data *osi_core, - unsigned int mtl_isr) + nveu32_t mtl_isr) { - unsigned int val = 0U; - unsigned int sch_err = 0U; - unsigned int frm_err = 0U; - unsigned int temp = 0U; - unsigned int i = 0; - unsigned long stat_val = 0U; - unsigned int value = 0U; - unsigned int qstatus = 0U; - unsigned int qinx = 0U; + nveu32_t val = 0U; + nveu32_t sch_err = 0U; + nveu32_t frm_err = 0U; + nveu32_t temp = 0U; + nveu32_t i = 0; + nveul64_t stat_val = 0U; + nveu32_t value = 0U; + nveu32_t qstatus = 0U; + nveu32_t qinx = 0U; /* Check for all MTL queues */ for (i = 0; i < osi_core->num_mtl_queues; i++) { qinx = osi_core->mtl_queues[i]; - if (mtl_isr & OSI_BIT(qinx)) { + if ((mtl_isr & OSI_BIT(qinx)) == OSI_BIT(qinx)) { /* check if Q has underflow error */ - qstatus = osi_readl((unsigned char *)osi_core->base + + qstatus = osi_readl((nveu8_t *)osi_core->base + MGBE_MTL_QINT_STATUS(qinx)); /* Transmit Queue Underflow Interrupt Status */ - if (qstatus & MGBE_MTL_QINT_TXUNIFS) { - osi_core->pkt_err_stats.mgbe_tx_underflow_err = + if ((qstatus & MGBE_MTL_QINT_TXUNIFS) == MGBE_MTL_QINT_TXUNIFS) { +#ifndef OSI_STRIPPED_LIB + osi_core->stats.mgbe_tx_underflow_err = osi_update_stats_counter( - osi_core->pkt_err_stats.mgbe_tx_underflow_err, + osi_core->stats.mgbe_tx_underflow_err, 1UL); +#endif /* !OSI_STRIPPED_LIB */ } /* Clear interrupt status by writing back with 1 */ - osi_writel(1U, (unsigned char *)osi_core->base + + osi_writel(1U, (nveu8_t *)osi_core->base + MGBE_MTL_QINT_STATUS(qinx)); } } if ((mtl_isr & MGBE_MTL_IS_ESTIS) != MGBE_MTL_IS_ESTIS) { - return; + goto done; } val = osi_readla(osi_core, @@ -3946,21 +2678,21 @@ static void mgbe_handle_mtl_intrs(struct osi_core_priv_data *osi_core, /* return if interrupt is not related to EST */ if (val == OSI_DISABLE) { - return; + goto done; } /* increase counter write 1 back will clear */ if ((val & MGBE_MTL_EST_STATUS_CGCE) == MGBE_MTL_EST_STATUS_CGCE) { osi_core->est_ready = OSI_DISABLE; - stat_val = osi_core->tsn_stats.const_gate_ctr_err; - osi_core->tsn_stats.const_gate_ctr_err = + stat_val = osi_core->stats.const_gate_ctr_err; + osi_core->stats.const_gate_ctr_err = osi_update_stats_counter(stat_val, 1U); } if ((val & MGBE_MTL_EST_STATUS_HLBS) == MGBE_MTL_EST_STATUS_HLBS) { osi_core->est_ready = OSI_DISABLE; - stat_val = osi_core->tsn_stats.head_of_line_blk_sch; - osi_core->tsn_stats.head_of_line_blk_sch = + stat_val = osi_core->stats.head_of_line_blk_sch; + osi_core->stats.head_of_line_blk_sch = osi_update_stats_counter(stat_val, 1U); /* Need to read MTL_EST_Sch_Error register and cleared */ sch_err = osi_readla(osi_core, (nveu8_t *)osi_core->base + @@ -3969,28 +2701,28 @@ static void mgbe_handle_mtl_intrs(struct osi_core_priv_data *osi_core, temp = OSI_ENABLE; temp = temp << i; if ((sch_err & temp) == temp) { - stat_val = osi_core->tsn_stats.hlbs_q[i]; - osi_core->tsn_stats.hlbs_q[i] = + stat_val = osi_core->stats.hlbs_q[i]; + osi_core->stats.hlbs_q[i] = osi_update_stats_counter(stat_val, 1U); } } sch_err &= 0xFFU; //only 8 TC allowed so clearing all osi_writela(osi_core, sch_err, (nveu8_t *)osi_core->base + MGBE_MTL_EST_SCH_ERR); - /* Reset EST with print to configure it properly */ + /* Reset EST with prnve32_t to configure it properly */ value = osi_readla(osi_core, (nveu8_t *)osi_core->base + MGBE_MTL_EST_CONTROL); value &= ~MGBE_MTL_EST_EEST; osi_writela(osi_core, value, (nveu8_t *)osi_core->base + MGBE_MTL_EST_CONTROL); - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Disabling EST due to HLBS, correct GCL\n", OSI_NONE); } if ((val & MGBE_MTL_EST_STATUS_HLBF) == MGBE_MTL_EST_STATUS_HLBF) { osi_core->est_ready = OSI_DISABLE; - stat_val = osi_core->tsn_stats.head_of_line_blk_frm; - osi_core->tsn_stats.head_of_line_blk_frm = + stat_val = osi_core->stats.head_of_line_blk_frm; + osi_core->stats.head_of_line_blk_frm = osi_update_stats_counter(stat_val, 1U); /* Need to read MTL_EST_Frm_Size_Error register and cleared */ frm_err = osi_readla(osi_core, (nveu8_t *)osi_core->base + @@ -3999,8 +2731,8 @@ static void mgbe_handle_mtl_intrs(struct osi_core_priv_data *osi_core, temp = OSI_ENABLE; temp = temp << i; if ((frm_err & temp) == temp) { - stat_val = osi_core->tsn_stats.hlbf_q[i]; - osi_core->tsn_stats.hlbf_q[i] = + stat_val = osi_core->stats.hlbf_q[i]; + osi_core->stats.hlbf_q[i] = osi_update_stats_counter(stat_val, 1U); } } @@ -4008,7 +2740,7 @@ static void mgbe_handle_mtl_intrs(struct osi_core_priv_data *osi_core, osi_writela(osi_core, frm_err, (nveu8_t *)osi_core->base + MGBE_MTL_EST_FRMS_ERR); - /* Reset EST with print to configure it properly */ + /* Reset EST with prnve32_t to configure it properly */ value = osi_readla(osi_core, (nveu8_t *)osi_core->base + MGBE_MTL_EST_CONTROL); /* DDBF 1 means don't drop packets */ @@ -4017,7 +2749,7 @@ static void mgbe_handle_mtl_intrs(struct osi_core_priv_data *osi_core, value &= ~MGBE_MTL_EST_EEST; osi_writela(osi_core, value, (nveu8_t *)osi_core->base + MGBE_MTL_EST_CONTROL); - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Disabling EST due to HLBF, correct GCL\n", OSI_NONE); } @@ -4028,15 +2760,15 @@ static void mgbe_handle_mtl_intrs(struct osi_core_priv_data *osi_core, MGBE_MTL_EST_STATUS_BTRE) { osi_core->est_ready = OSI_ENABLE; } - stat_val = osi_core->tsn_stats.sw_own_list_complete; - osi_core->tsn_stats.sw_own_list_complete = + stat_val = osi_core->stats.sw_own_list_complete; + osi_core->stats.sw_own_list_complete = osi_update_stats_counter(stat_val, 1U); } if ((val & MGBE_MTL_EST_STATUS_BTRE) == MGBE_MTL_EST_STATUS_BTRE) { osi_core->est_ready = OSI_DISABLE; - stat_val = osi_core->tsn_stats.base_time_reg_err; - osi_core->tsn_stats.base_time_reg_err = + stat_val = osi_core->stats.base_time_reg_err; + osi_core->stats.base_time_reg_err = osi_update_stats_counter(stat_val, 1U); osi_core->est_ready = OSI_DISABLE; } @@ -4044,11 +2776,12 @@ static void mgbe_handle_mtl_intrs(struct osi_core_priv_data *osi_core, osi_writela(osi_core, val, (nveu8_t *)osi_core->base + MGBE_MTL_EST_STATUS); - mtl_isr &= ~MGBE_MTL_IS_ESTIS; - osi_writela(osi_core, mtl_isr, (unsigned char *)osi_core->base + - MGBE_MTL_INTR_STATUS); +done: + return; } +#ifndef OSI_STRIPPED_LIB + /** * @brief mgbe_config_ptp_offload - Enable/Disable PTP offload * @@ -4064,17 +2797,17 @@ static void mgbe_handle_mtl_intrs(struct osi_core_priv_data *osi_core, * @retval -1 on failure. */ -static int mgbe_config_ptp_offload(struct osi_core_priv_data *const osi_core, - struct osi_pto_config *const pto_config) +static nve32_t mgbe_config_ptp_offload(struct osi_core_priv_data *const osi_core, + struct osi_pto_config *const pto_config) { - unsigned char *addr = (unsigned char *)osi_core->base; - int ret = 0; - unsigned int value = 0x0U; - unsigned int ptc_value = 0x0U; - unsigned int port_id = 0x0U; + nveu8_t *addr = (nveu8_t *)osi_core->base; + nve32_t ret = 0; + nveu32_t value = 0x0U; + nveu32_t ptc_value = 0x0U; + nveu32_t port_id = 0x0U; /* Read MAC TCR */ - value = osi_readla(osi_core, (unsigned char *)addr + MGBE_MAC_TCR); + value = osi_readla(osi_core, (nveu8_t *)addr + MGBE_MAC_TCR); /* clear old configuration */ value &= ~(MGBE_MAC_TCR_TSENMACADDR | OSI_MAC_TCR_SNAPTYPSEL_3 | @@ -4153,6 +2886,7 @@ static int mgbe_config_ptp_offload(struct osi_core_priv_data *const osi_core, return ret; } +#endif /* !OSI_STRIPPED_LIB */ #ifdef HSI_SUPPORT /** @@ -4172,13 +2906,19 @@ static void mgbe_handle_hsi_intr(struct osi_core_priv_data *osi_core) nveu32_t val2 = 0; void *xpcs_base = osi_core->xpcs_base; nveu64_t ce_count_threshold; + const nveu32_t osi_hsi_err_code[][2] = { + {OSI_HSI_MGBE0_UE_CODE, OSI_HSI_MGBE0_CE_CODE}, + {OSI_HSI_MGBE1_UE_CODE, OSI_HSI_MGBE1_CE_CODE}, + {OSI_HSI_MGBE2_UE_CODE, OSI_HSI_MGBE2_CE_CODE}, + {OSI_HSI_MGBE3_UE_CODE, OSI_HSI_MGBE3_CE_CODE}, + }; val = osi_readla(osi_core, (nveu8_t *)osi_core->base + MGBE_WRAP_COMMON_INTR_STATUS); if (((val & MGBE_REGISTER_PARITY_ERR) == MGBE_REGISTER_PARITY_ERR) || ((val & MGBE_CORE_UNCORRECTABLE_ERR) == MGBE_CORE_UNCORRECTABLE_ERR)) { osi_core->hsi.err_code[UE_IDX] = - hsi_err_code[osi_core->instance_id][UE_IDX]; + osi_hsi_err_code[osi_core->instance_id][UE_IDX]; osi_core->hsi.report_err = OSI_ENABLE; osi_core->hsi.report_count_err[UE_IDX] = OSI_ENABLE; /* Disable the interrupt */ @@ -4191,7 +2931,7 @@ static void mgbe_handle_hsi_intr(struct osi_core_priv_data *osi_core) } if ((val & MGBE_CORE_CORRECTABLE_ERR) == MGBE_CORE_CORRECTABLE_ERR) { osi_core->hsi.err_code[CE_IDX] = - hsi_err_code[osi_core->instance_id][CE_IDX]; + osi_hsi_err_code[osi_core->instance_id][CE_IDX]; osi_core->hsi.report_err = OSI_ENABLE; osi_core->hsi.ce_count = osi_update_stats_counter(osi_core->hsi.ce_count, 1UL); @@ -4230,7 +2970,7 @@ static void mgbe_handle_hsi_intr(struct osi_core_priv_data *osi_core) XPCS_WRAP_INTERRUPT_STATUS); if (((val & XPCS_CORE_UNCORRECTABLE_ERR) == XPCS_CORE_UNCORRECTABLE_ERR) || ((val & XPCS_REGISTER_PARITY_ERR) == XPCS_REGISTER_PARITY_ERR)) { - osi_core->hsi.err_code[UE_IDX] = hsi_err_code[osi_core->instance_id][UE_IDX]; + osi_core->hsi.err_code[UE_IDX] = osi_hsi_err_code[osi_core->instance_id][UE_IDX]; osi_core->hsi.report_err = OSI_ENABLE; osi_core->hsi.report_count_err[UE_IDX] = OSI_ENABLE; /* Disable uncorrectable interrupts */ @@ -4242,7 +2982,7 @@ static void mgbe_handle_hsi_intr(struct osi_core_priv_data *osi_core) XPCS_WRAP_INTERRUPT_CONTROL); } if ((val & XPCS_CORE_CORRECTABLE_ERR) == XPCS_CORE_CORRECTABLE_ERR) { - osi_core->hsi.err_code[CE_IDX] = hsi_err_code[osi_core->instance_id][CE_IDX]; + osi_core->hsi.err_code[CE_IDX] = osi_hsi_err_code[osi_core->instance_id][CE_IDX]; osi_core->hsi.report_err = OSI_ENABLE; osi_core->hsi.ce_count = osi_update_stats_counter(osi_core->hsi.ce_count, 1UL); @@ -4280,16 +3020,16 @@ static void mgbe_handle_hsi_intr(struct osi_core_priv_data *osi_core) * * @note MAC should be init and started. see osi_start_mac() */ -static void mgbe_handle_common_intr(struct osi_core_priv_data *osi_core) +static void mgbe_handle_common_intr(struct osi_core_priv_data *const osi_core) { void *base = osi_core->base; - unsigned int dma_isr = 0; - unsigned int qinx = 0; - unsigned int i = 0; - unsigned int dma_sr = 0; - unsigned int dma_ier = 0; - unsigned int mtl_isr = 0; - unsigned int val = 0; + nveu32_t dma_isr = 0; + nveu32_t qinx = 0; + nveu32_t i = 0; + nveu32_t dma_sr = 0; + nveu32_t dma_ier = 0; + nveu32_t mtl_isr = 0; + nveu32_t val = 0; #ifdef HSI_SUPPORT if (osi_core->hsi.enabled == OSI_ENABLE) { @@ -4298,7 +3038,7 @@ static void mgbe_handle_common_intr(struct osi_core_priv_data *osi_core) #endif dma_isr = osi_readla(osi_core, (nveu8_t *)base + MGBE_DMA_ISR); if (dma_isr == OSI_NONE) { - return; + goto done; } //FIXME Need to check how we can get the DMA channel here instead of @@ -4334,26 +3074,31 @@ static void mgbe_handle_common_intr(struct osi_core_priv_data *osi_core) /* ack non ti/ri nve32_ts */ osi_writela(osi_core, dma_sr, (nveu8_t *)base + MGBE_DMA_CHX_STATUS(qinx)); +#ifndef OSI_STRIPPED_LIB mgbe_update_dma_sr_stats(osi_core, dma_sr, qinx); +#endif /* !OSI_STRIPPED_LIB */ } } - mgbe_handle_mac_intrs(osi_core, dma_isr); + /* Handle MAC interrupts */ + if ((dma_isr & MGBE_DMA_ISR_MACIS) == MGBE_DMA_ISR_MACIS) { + mgbe_handle_mac_intrs(osi_core); + } /* Handle MTL inerrupts */ mtl_isr = osi_readla(osi_core, - (unsigned char *)base + MGBE_MTL_INTR_STATUS); + (nveu8_t *)base + MGBE_MTL_INTR_STATUS); if ((dma_isr & MGBE_DMA_ISR_MTLIS) == MGBE_DMA_ISR_MTLIS) { mgbe_handle_mtl_intrs(osi_core, mtl_isr); } /* Clear common interrupt status in wrapper register */ osi_writela(osi_core, MGBE_MAC_SBD_INTR, - (unsigned char *)base + MGBE_WRAP_COMMON_INTR_STATUS); - val = osi_readla(osi_core, (unsigned char *)osi_core->base + + (nveu8_t *)base + MGBE_WRAP_COMMON_INTR_STATUS); + val = osi_readla(osi_core, (nveu8_t *)osi_core->base + MGBE_WRAP_COMMON_INTR_ENABLE); val |= MGBE_MAC_SBD_INTR; - osi_writela(osi_core, val, (unsigned char *)osi_core->base + + osi_writela(osi_core, val, (nveu8_t *)osi_core->base + MGBE_WRAP_COMMON_INTR_ENABLE); /* Clear FRP Interrupts in MTL_RXP_Interrupt_Control_Status */ @@ -4363,6 +3108,9 @@ static void mgbe_handle_common_intr(struct osi_core_priv_data *osi_core) MGBE_MTL_RXP_INTR_CS_FOOVIS | MGBE_MTL_RXP_INTR_CS_PDRFIS); osi_writela(osi_core, val, (nveu8_t *)base + MGBE_MTL_RXP_INTR_CS); + +done: + return; } /** @@ -4381,58 +3129,7 @@ static nve32_t mgbe_pad_calibrate(OSI_UNUSED return 0; } -/** - * @brief mgbe_start_mac - Start MAC Tx/Rx engine - * - * Algorithm: Enable MAC Transmitter and Receiver - * - * @param[in] osi_core: OSI core private data structure. - * - * @note 1) MAC init should be complete. See osi_hw_core_init() and - * osi_hw_dma_init() - */ -static void mgbe_start_mac(struct osi_core_priv_data *const osi_core) -{ - nveu32_t value; - void *addr = osi_core->base; - - value = osi_readla(osi_core, (nveu8_t *)addr + MGBE_MAC_TMCR); - /* Enable MAC Transmit */ - value |= MGBE_MAC_TMCR_TE; - osi_writela(osi_core, value, (nveu8_t *)addr + MGBE_MAC_TMCR); - - value = osi_readla(osi_core, (nveu8_t *)addr + MGBE_MAC_RMCR); - /* Enable MAC Receive */ - value |= MGBE_MAC_RMCR_RE; - osi_writela(osi_core, value, (nveu8_t *)addr + MGBE_MAC_RMCR); -} - -/** - * @brief mgbe_stop_mac - Stop MAC Tx/Rx engine - * - * Algorithm: Disables MAC Transmitter and Receiver - * - * @param[in] osi_core: OSI core private data structure. - * - * @note MAC DMA deinit should be complete. See osi_hw_dma_deinit() - */ -static void mgbe_stop_mac(struct osi_core_priv_data *const osi_core) -{ - nveu32_t value; - void *addr = osi_core->base; - - value = osi_readla(osi_core, (nveu8_t *)addr + MGBE_MAC_TMCR); - /* Disable MAC Transmit */ - value &= ~MGBE_MAC_TMCR_TE; - osi_writela(osi_core, value, (nveu8_t *)addr + MGBE_MAC_TMCR); - - value = osi_readla(osi_core, (nveu8_t *)addr + MGBE_MAC_RMCR); - /* Disable MAC Receive */ - value &= ~MGBE_MAC_RMCR_RE; - osi_writela(osi_core, value, (nveu8_t *)addr + MGBE_MAC_RMCR); -} - -#ifdef MACSEC_SUPPORT +#if defined(MACSEC_SUPPORT) && !defined(OSI_STRIPPED_LIB) /** * @brief mgbe_config_mac_tx - Enable/Disable MAC Tx * @@ -4463,68 +3160,6 @@ static void mgbe_config_mac_tx(struct osi_core_priv_data *const osi_core, } #endif /* MACSEC_SUPPORT */ -/** - * @brief mgbe_core_deinit - MGBE MAC core deinitialization - * - * Algorithm: This function will take care of deinitializing MAC - * - * @param[in] osi_core: OSI core private data structure. - * - * @note Required clks and resets has to be enabled - */ -static void mgbe_core_deinit(struct osi_core_priv_data *osi_core) -{ - /* Stop the MAC by disabling both MAC Tx and Rx */ - mgbe_stop_mac(osi_core); -} - -/** - * @brief mgbe_set_speed - Set operating speed - * - * Algorithm: Based on the speed (2.5G/5G/10G) MAC will be configured - * accordingly. - * - * @param[in] osi_core: OSI core private data. - * @param[in] speed: Operating speed. - * - * @note MAC should be init and started. see osi_start_mac() - */ -static int mgbe_set_speed(struct osi_core_priv_data *const osi_core, - const int speed) -{ - unsigned int value = 0; - - value = osi_readla(osi_core, - (unsigned char *) osi_core->base + MGBE_MAC_TMCR); - - switch (speed) { - case OSI_SPEED_2500: - value |= MGBE_MAC_TMCR_SS_2_5G; - break; - case OSI_SPEED_5000: - value |= MGBE_MAC_TMCR_SS_5G; - break; - case OSI_SPEED_10000: - value &= ~MGBE_MAC_TMCR_SS_10G; - break; - default: - /* setting default to 10G */ - value &= ~MGBE_MAC_TMCR_SS_10G; - break; - } - - osi_writela(osi_core, value, (unsigned char *) - osi_core->base + MGBE_MAC_TMCR); - - if (xpcs_init(osi_core) < 0) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL, - "xpcs_init failed\n", OSI_NONE); - return -1; - } - - return xpcs_start(osi_core); -} - /** * @brief mgbe_mdio_busy_wait - MDIO busy wait loop * @@ -4532,23 +3167,25 @@ static int mgbe_set_speed(struct osi_core_priv_data *const osi_core, * * @param[in] osi_core: OSI core data struture. */ -static int mgbe_mdio_busy_wait(struct osi_core_priv_data *const osi_core) +static nve32_t mgbe_mdio_busy_wait(struct osi_core_priv_data *const osi_core) { /* half second timeout */ - unsigned int retry = 50000; - unsigned int mac_gmiiar; - unsigned int count; - int cond = 1; + nveu32_t retry = 50000; + nveu32_t mac_gmiiar; + nveu32_t count; + nve32_t cond = 1; + nve32_t ret = 0; count = 0; while (cond == 1) { if (count > retry) { - return -1; + ret = -1; + goto fail; } count++; - mac_gmiiar = osi_readla(osi_core, (unsigned char *) + mac_gmiiar = osi_readla(osi_core, (nveu8_t *) osi_core->base + MGBE_MDIO_SCCD); if ((mac_gmiiar & MGBE_MDIO_SCCD_SBUSY) == 0U) { cond = 0; @@ -4556,169 +3193,7 @@ static int mgbe_mdio_busy_wait(struct osi_core_priv_data *const osi_core) osi_core->osd_ops.udelay(10U); } } - - return 0; -} - -/* - * @brief mgbe_save_registers Function to store a backup of - * MAC register space during SOC suspend. - * - * Algorithm: Read registers to be backed up as per struct core_backup and - * store the register values in memory. - * - * @param[in] osi_core: OSI core private data structure. - * - * @retval 0 on success - * @retval -1 on failure. - */ -static inline int mgbe_save_registers( - struct osi_core_priv_data *const osi_core) -{ - unsigned int i = 0; - struct core_backup *config = &osi_core->backup_config; - int ret = 0; - - /* Save direct access registers */ - for (i = 0; i < MGBE_DIRECT_MAX_BAK_IDX; i++) { - if (config->reg_addr[i] != OSI_NULL) { - /* Read the register and store into reg_val */ - config->reg_val[i] = osi_readla(osi_core, - config->reg_addr[i]); - } - } - - /* Save L3 and L4 indirect addressing registers */ - for (i = 0; i < OSI_MGBE_MAX_L3_L4_FILTER; i++) { - ret = mgbe_l3l4_filter_read(osi_core, i, MGBE_MAC_L3L4_CTR, - &config->reg_val[MGBE_MAC_L3L4_CTR_BAK_IDX(i)]); - if (ret < 0) { - /* MGBE_MAC_L3L4_CTR read fail return here */ - return ret; - } - ret = mgbe_l3l4_filter_read(osi_core, i, MGBE_MAC_L4_ADDR, - &config->reg_val[MGBE_MAC_L4_ADR_BAK_IDX(i)]); - if (ret < 0) { - /* MGBE_MAC_L4_ADDR read fail return here */ - return ret; - } - ret = mgbe_l3l4_filter_read(osi_core, i, MGBE_MAC_L3_AD0R, - &config->reg_val[MGBE_MAC_L3_AD0R_BAK_IDX(i)]); - if (ret < 0) { - /* MGBE_MAC_L3_AD0R read fail return here */ - return ret; - } - ret = mgbe_l3l4_filter_read(osi_core, i, MGBE_MAC_L3_AD1R, - &config->reg_val[MGBE_MAC_L3_AD1R_BAK_IDX(i)]); - if (ret < 0) { - /* MGBE_MAC_L3_AD1R read fail return here */ - return ret; - } - ret = mgbe_l3l4_filter_read(osi_core, i, MGBE_MAC_L3_AD2R, - &config->reg_val[MGBE_MAC_L3_AD2R_BAK_IDX(i)]); - if (ret < 0) { - /* MGBE_MAC_L3_AD2R read fail return here */ - return ret; - } - ret = mgbe_l3l4_filter_read(osi_core, i, MGBE_MAC_L3_AD3R, - &config->reg_val[MGBE_MAC_L3_AD3R_BAK_IDX(i)]); - if (ret < 0) { - /* MGBE_MAC_L3_AD3R read fail return here */ - return ret; - } - } - - /* Save MAC_DChSel_IndReg indirect addressing registers */ - for (i = 0; i < OSI_MGBE_MAX_MAC_ADDRESS_FILTER; i++) { - ret = mgbe_mac_indir_addr_read(osi_core, MGBE_MAC_DCHSEL, - i, &config->reg_val[MGBE_MAC_DCHSEL_BAK_IDX(i)]); - if (ret < 0) { - /* MGBE_MAC_DCHSEL read fail return here */ - return ret; - } - } - - return ret; -} - -/** - * @brief mgbe_restore_registers Function to restore the backup of - * MAC registers during SOC resume. - * - * Algorithm: Restore the register values from the in memory backup taken using - * mgbe_save_registers(). - * - * @param[in] osi_core: OSI core private data structure. - * - * @retval 0 on success - * @retval -1 on failure. - */ -static inline int mgbe_restore_registers( - struct osi_core_priv_data *const osi_core) -{ - unsigned int i = 0; - struct core_backup *config = &osi_core->backup_config; - int ret = 0; - - /* Restore direct access registers */ - for (i = 0; i < MGBE_MAX_BAK_IDX; i++) { - if (config->reg_addr[i] != OSI_NULL) { - /* Write back the saved register value */ - osi_writela(osi_core, config->reg_val[i], - config->reg_addr[i]); - } - } - - /* Restore L3 and L4 indirect addressing registers */ - for (i = 0; i < OSI_MGBE_MAX_L3_L4_FILTER; i++) { - ret = mgbe_l3l4_filter_write(osi_core, i, MGBE_MAC_L3L4_CTR, - config->reg_val[MGBE_MAC_L3L4_CTR_BAK_IDX(i)]); - if (ret < 0) { - /* MGBE_MAC_L3L4_CTR write fail return here */ - return ret; - } - ret = mgbe_l3l4_filter_write(osi_core, i, MGBE_MAC_L4_ADDR, - config->reg_val[MGBE_MAC_L4_ADR_BAK_IDX(i)]); - if (ret < 0) { - /* MGBE_MAC_L4_ADDR write fail return here */ - return ret; - } - ret = mgbe_l3l4_filter_write(osi_core, i, MGBE_MAC_L3_AD0R, - config->reg_val[MGBE_MAC_L3_AD0R_BAK_IDX(i)]); - if (ret < 0) { - /* MGBE_MAC_L3_AD0R write fail return here */ - return ret; - } - ret = mgbe_l3l4_filter_write(osi_core, i, MGBE_MAC_L3_AD1R, - config->reg_val[MGBE_MAC_L3_AD1R_BAK_IDX(i)]); - if (ret < 0) { - /* MGBE_MAC_L3_AD1R write fail return here */ - return ret; - } - ret = mgbe_l3l4_filter_write(osi_core, i, MGBE_MAC_L3_AD2R, - config->reg_val[MGBE_MAC_L3_AD2R_BAK_IDX(i)]); - if (ret < 0) { - /* MGBE_MAC_L3_AD2R write fail return here */ - return ret; - } - ret = mgbe_l3l4_filter_write(osi_core, i, MGBE_MAC_L3_AD3R, - config->reg_val[MGBE_MAC_L3_AD3R_BAK_IDX(i)]); - if (ret < 0) { - /* MGBE_MAC_L3_AD3R write fail return here */ - return ret; - } - } - - /* Restore MAC_DChSel_IndReg indirect addressing registers */ - for (i = 0; i < OSI_MGBE_MAX_MAC_ADDRESS_FILTER; i++) { - ret = mgbe_mac_indir_addr_write(osi_core, MGBE_MAC_DCHSEL, - i, config->reg_val[MGBE_MAC_DCHSEL_BAK_IDX(i)]); - if (ret < 0) { - /* MGBE_MAC_DCHSEL write fail return here */ - return ret; - } - } - +fail: return ret; } @@ -4737,13 +3212,13 @@ static inline int mgbe_restore_registers( * @retval 0 on success * @retval -1 on failure. */ -static int mgbe_write_phy_reg(struct osi_core_priv_data *osi_core, - unsigned int phyaddr, - unsigned int phyreg, - unsigned short phydata) +static nve32_t mgbe_write_phy_reg(struct osi_core_priv_data *const osi_core, + const nveu32_t phyaddr, + const nveu32_t phyreg, + const nveu16_t phydata) { - int ret = 0; - unsigned int reg; + nve32_t ret = 0; + nveu32_t reg; /* Wait for any previous MII read/write operation to complete */ ret = mgbe_mdio_busy_wait(osi_core); @@ -4752,7 +3227,7 @@ static int mgbe_write_phy_reg(struct osi_core_priv_data *osi_core, OSI_LOG_ARG_HW_FAIL, "MII operation timed out\n", 0ULL); - return ret; + goto fail; } /* set MDIO address register */ @@ -4762,12 +3237,12 @@ static int mgbe_write_phy_reg(struct osi_core_priv_data *osi_core, /* set port address and register address */ reg |= (phyaddr << MGBE_MDIO_SCCA_PA_SHIFT) | (phyreg & MGBE_MDIO_SCCA_RA_MASK); - osi_writela(osi_core, reg, (unsigned char *) + osi_writela(osi_core, reg, (nveu8_t *) osi_core->base + MGBE_MDIO_SCCA); /* Program Data register */ reg = phydata | - (MGBE_MDIO_SCCD_CMD_WR << MGBE_MDIO_SCCD_CMD_SHIFT) | + (((nveu32_t)MGBE_MDIO_SCCD_CMD_WR) << MGBE_MDIO_SCCD_CMD_SHIFT) | MGBE_MDIO_SCCD_SBUSY; /** @@ -4776,17 +3251,10 @@ static int mgbe_write_phy_reg(struct osi_core_priv_data *osi_core, * On Silicon AXI/APB clock is 408MHz. To achive maximum MDC clock * of 2.5MHz only CR need to be set to 5. */ - if (osi_core->pre_si) { - reg |= (MGBE_MDIO_SCCD_CRS | - ((0x1U & MGBE_MDIO_SCCD_CR_MASK) << - MGBE_MDIO_SCCD_CR_SHIFT)); - } else { - reg &= ~MGBE_MDIO_SCCD_CRS; - reg |= ((0x5U & MGBE_MDIO_SCCD_CR_MASK) << - MGBE_MDIO_SCCD_CR_SHIFT); - } + reg &= ~MGBE_MDIO_SCCD_CRS; + reg |= ((((nveu32_t)0x5U) & MGBE_MDIO_SCCD_CR_MASK) << MGBE_MDIO_SCCD_CR_SHIFT); - osi_writela(osi_core, reg, (unsigned char *) + osi_writela(osi_core, reg, (nveu8_t *) osi_core->base + MGBE_MDIO_SCCD); /* wait for MII write operation to complete */ @@ -4796,10 +3264,9 @@ static int mgbe_write_phy_reg(struct osi_core_priv_data *osi_core, OSI_LOG_ARG_HW_FAIL, "MII operation timed out\n", 0ULL); - return ret; } - - return 0; +fail: + return ret; } /** @@ -4816,13 +3283,13 @@ static int mgbe_write_phy_reg(struct osi_core_priv_data *osi_core, * @retval 0 on success * @retval -1 on failure. */ -static int mgbe_read_phy_reg(struct osi_core_priv_data *osi_core, - unsigned int phyaddr, - unsigned int phyreg) +static nve32_t mgbe_read_phy_reg(struct osi_core_priv_data *const osi_core, + const nveu32_t phyaddr, + const nveu32_t phyreg) { - unsigned int reg; - unsigned int data; - int ret = 0; + nveu32_t reg; + nveu32_t data; + nve32_t ret = 0; ret = mgbe_mdio_busy_wait(osi_core); if (ret < 0) { @@ -4830,7 +3297,7 @@ static int mgbe_read_phy_reg(struct osi_core_priv_data *osi_core, OSI_LOG_ARG_HW_FAIL, "MII operation timed out\n", 0ULL); - return ret; + goto fail; } /* set MDIO address register */ @@ -4840,11 +3307,11 @@ static int mgbe_read_phy_reg(struct osi_core_priv_data *osi_core, /* set port address and register address */ reg |= (phyaddr << MGBE_MDIO_SCCA_PA_SHIFT) | (phyreg & MGBE_MDIO_SCCA_RA_MASK); - osi_writela(osi_core, reg, (unsigned char *) + osi_writela(osi_core, reg, (nveu8_t *) osi_core->base + MGBE_MDIO_SCCA); /* Program Data register */ - reg = (MGBE_MDIO_SCCD_CMD_RD << MGBE_MDIO_SCCD_CMD_SHIFT) | + reg = (((nveu32_t)MGBE_MDIO_SCCD_CMD_RD) << MGBE_MDIO_SCCD_CMD_SHIFT) | MGBE_MDIO_SCCD_SBUSY; /** @@ -4853,17 +3320,10 @@ static int mgbe_read_phy_reg(struct osi_core_priv_data *osi_core, * On Silicon AXI/APB clock is 408MHz. To achive maximum MDC clock * of 2.5MHz only CR need to be set to 5. */ - if (osi_core->pre_si) { - reg |= (MGBE_MDIO_SCCD_CRS | - ((0x1U & MGBE_MDIO_SCCD_CR_MASK) << - MGBE_MDIO_SCCD_CR_SHIFT)); - } else { - reg &= ~MGBE_MDIO_SCCD_CRS; - reg |= ((0x5U & MGBE_MDIO_SCCD_CR_MASK) << - MGBE_MDIO_SCCD_CR_SHIFT); - } + reg &= ~MGBE_MDIO_SCCD_CRS; + reg |= ((((nveu32_t)0x5U) & MGBE_MDIO_SCCD_CR_MASK) << MGBE_MDIO_SCCD_CR_SHIFT); - osi_writela(osi_core, reg, (unsigned char *) + osi_writela(osi_core, reg, (nveu8_t *) osi_core->base + MGBE_MDIO_SCCD); ret = mgbe_mdio_busy_wait(osi_core); @@ -4872,350 +3332,19 @@ static int mgbe_read_phy_reg(struct osi_core_priv_data *osi_core, OSI_LOG_ARG_HW_FAIL, "MII operation timed out\n", 0ULL); - return ret; + goto fail; } - reg = osi_readla(osi_core, (unsigned char *) + reg = osi_readla(osi_core, (nveu8_t *) osi_core->base + MGBE_MDIO_SCCD); data = (reg & MGBE_MDIO_SCCD_SDATA_MASK); - return (int)data; -} - -/** - * @brief mgbe_hw_est_write - indirect write the GCL to Software own list - * (SWOL) - * - * @param[in] base: MAC base IOVA address. - * @param[in] addr_val: Address offset for indirect write. - * @param[in] data: Data to be written at offset. - * @param[in] gcla: Gate Control List Address, 0 for ETS register. - * 1 for GCL memory. - * - * @note MAC should be init and started. see osi_start_mac() - * - * @retval 0 on success - * @retval -1 on failure. - */ -static int mgbe_hw_est_write(struct osi_core_priv_data *osi_core, - unsigned int addr_val, unsigned int data, - unsigned int gcla) -{ - int retry = 1000; - unsigned int val = 0x0; - - osi_writela(osi_core, data, (unsigned char *)osi_core->base + - MGBE_MTL_EST_DATA); - - val &= ~MGBE_MTL_EST_ADDR_MASK; - val |= (gcla == 1U) ? 0x0U : MGBE_MTL_EST_GCRR; - val |= MGBE_MTL_EST_SRWO; - val |= addr_val; - osi_writela(osi_core, val, (unsigned char *)osi_core->base + - MGBE_MTL_EST_GCL_CONTROL); - - while (--retry > 0) { - osi_core->osd_ops.udelay(OSI_DELAY_1US); - val = osi_readla(osi_core, (unsigned char *)osi_core->base + - MGBE_MTL_EST_GCL_CONTROL); - if ((val & MGBE_MTL_EST_SRWO) == MGBE_MTL_EST_SRWO) { - continue; - } - - break; - } - - if ((val & MGBE_MTL_EST_ERR0) == MGBE_MTL_EST_ERR0 || - (retry <= 0)) { - return -1; - } - - return 0; -} - -/** - * @brief mgbe_hw_config_est - Read Setting for GCL from input and update - * registers. - * - * Algorithm: - * 1) Write TER, LLR and EST control register - * 2) Update GCL to sw own GCL (MTL_EST_Status bit SWOL will tell which is - * owned by SW) and store which GCL is in use currently in sw. - * 3) TODO set DBGB and DBGM for debugging - * 4) EST_data and GCRR to 1, update entry sno in ADDR and put data at - * est_gcl_data enable GCL MTL_EST_SSWL and wait for self clear or use - * SWLC in MTL_EST_Status. Please note new GCL will be pushed for each entry. - * 5) Configure btr. Update btr based on current time (current time - * should be updated based on PTP by this time) - * - * @param[in] osi_core: OSI core private data structure. - * @param[in] est: EST configuration input argument. - * - * @note MAC should be init and started. see osi_start_mac() - * - * @retval 0 on success - * @retval -1 on failure. - */ -static int mgbe_hw_config_est(struct osi_core_priv_data *osi_core, - struct osi_est_config *est) -{ - unsigned int btr[2] = {0}; - unsigned int val = 0x0; - void *base = osi_core->base; - unsigned int i; - int ret = 0; - unsigned int addr = 0x0; - - if ((osi_core->hw_feature != OSI_NULL) && - (osi_core->hw_feature->est_sel == OSI_DISABLE)) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "EST not supported in HW\n", 0ULL); - return -1; - } - - if (est->en_dis == OSI_DISABLE) { - val = osi_readla(osi_core, (unsigned char *) - base + MGBE_MTL_EST_CONTROL); - val &= ~MGBE_MTL_EST_EEST; - osi_writela(osi_core, val, (unsigned char *) - base + MGBE_MTL_EST_CONTROL); - - return 0; - } - - btr[0] = est->btr[0]; - btr[1] = est->btr[1]; - if (btr[0] == 0U && btr[1] == 0U) { - common_get_systime_from_mac(osi_core->base, - osi_core->mac, - &btr[1], &btr[0]); - } - - if (gcl_validate(osi_core, est, btr, osi_core->mac) < 0) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "GCL validation failed\n", 0LL); - return -1; - } - - ret = mgbe_hw_est_write(osi_core, MGBE_MTL_EST_CTR_LOW, est->ctr[0], 0); - if (ret < 0) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "GCL CTR[0] failed\n", 0LL); - return ret; - } - /* check for est->ctr[i] not more than FF, TODO as per hw config - * parameter we can have max 0x3 as this value in sec */ - est->ctr[1] &= MGBE_MTL_EST_CTR_HIGH_MAX; - ret = mgbe_hw_est_write(osi_core, MGBE_MTL_EST_CTR_HIGH, est->ctr[1], 0); - if (ret < 0) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "GCL CTR[1] failed\n", 0LL); - return ret; - } - - ret = mgbe_hw_est_write(osi_core, MGBE_MTL_EST_TER, est->ter, 0); - if (ret < 0) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "GCL TER failed\n", 0LL); - return ret; - } - - ret = mgbe_hw_est_write(osi_core, MGBE_MTL_EST_LLR, est->llr, 0); - if (ret < 0) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "GCL LLR failed\n", 0LL); - return ret; - } - - /* Write GCL table */ - for (i = 0U; i < est->llr; i++) { - addr = i; - addr = addr << MGBE_MTL_EST_ADDR_SHIFT; - addr &= MGBE_MTL_EST_ADDR_MASK; - ret = mgbe_hw_est_write(osi_core, addr, est->gcl[i], 1); - if (ret < 0) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "GCL enties write failed\n", - (unsigned long long)i); - return ret; - } - } - - /* Write parameters */ - ret = mgbe_hw_est_write(osi_core, MGBE_MTL_EST_BTR_LOW, - btr[0] + est->btr_offset[0], OSI_DISABLE); - if (ret < 0) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "GCL BTR[0] failed\n", - (unsigned long long)(btr[0] + - est->btr_offset[0])); - return ret; - } - - ret = mgbe_hw_est_write(osi_core, MGBE_MTL_EST_BTR_HIGH, - btr[1] + est->btr_offset[1], OSI_DISABLE); - if (ret < 0) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "GCL BTR[1] failed\n", - (unsigned long long)(btr[1] + - est->btr_offset[1])); - return ret; - } - - val = osi_readla(osi_core, (unsigned char *) - base + MGBE_MTL_EST_CONTROL); - /* Store table */ - val |= MGBE_MTL_EST_SSWL; - val |= MGBE_MTL_EST_EEST; - val |= MGBE_MTL_EST_QHLBF; - osi_writela(osi_core, val, (unsigned char *) - base + MGBE_MTL_EST_CONTROL); - - return ret; -} - -/** - * @brief mgbe_hw_config_fep - Read Setting for preemption and express for TC - * and update registers. - * - * Algorithm: - * 1) Check for TC enable and TC has masked for setting to preemptable. - * 2) update FPE control status register - * - * @param[in] osi_core: OSI core private data structure. - * @param[in] fpe: FPE configuration input argument. - * - * @note MAC should be init and started. see osi_start_mac() - * - * @retval 0 on success - * @retval -1 on failure. - */ -static int mgbe_hw_config_fpe(struct osi_core_priv_data *osi_core, - struct osi_fpe_config *fpe) -{ - unsigned int i = 0U; - unsigned int val = 0U; - unsigned int temp = 0U, temp1 = 0U; - unsigned int temp_shift = 0U; - int ret = 0; - - if ((osi_core->hw_feature != OSI_NULL) && - (osi_core->hw_feature->fpe_sel == OSI_DISABLE)) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "FPE not supported in HW\n", 0ULL); - return -1; - } - -#ifdef MACSEC_SUPPORT - osi_lock_irq_enabled(&osi_core->macsec_fpe_lock); - /* MACSEC and FPE cannot coexist on MGBE refer bug 3484034 */ - if (osi_core->is_macsec_enabled == OSI_ENABLE) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "FPE and MACSEC cannot co-exist\n", 0ULL); - ret = -1; - goto exit; - } -#endif /* MACSEC_SUPPORT */ - - osi_core->fpe_ready = OSI_DISABLE; - - if (((fpe->tx_queue_preemption_enable << MGBE_MTL_FPE_CTS_PEC_SHIFT) & - MGBE_MTL_FPE_CTS_PEC) == OSI_DISABLE) { - val = osi_readla(osi_core, (nveu8_t *)osi_core->base + - MGBE_MTL_FPE_CTS); - val &= ~MGBE_MTL_FPE_CTS_PEC; - osi_writela(osi_core, val, (nveu8_t *)osi_core->base + - MGBE_MTL_FPE_CTS); - - val = osi_readla(osi_core, (nveu8_t *)osi_core->base + - MGBE_MAC_FPE_CTS); - val &= ~MGBE_MAC_FPE_CTS_EFPE; - osi_writela(osi_core, val, (nveu8_t *)osi_core->base + - MGBE_MAC_FPE_CTS); - -#ifdef MACSEC_SUPPORT - osi_core->is_fpe_enabled = OSI_DISABLE; -#endif /* MACSEC_SUPPORT */ - ret = 0; - goto exit; - } - - val = osi_readla(osi_core, (nveu8_t *)osi_core->base + - MGBE_MTL_FPE_CTS); - val &= ~MGBE_MTL_FPE_CTS_PEC; - for (i = 0U; i < OSI_MAX_TC_NUM; i++) { - /* max 8 bit for this structure fot TC/TXQ. Set the TC for express or - * preemption. Default is express for a TC. DWCXG_NUM_TC = 8 */ - temp = OSI_BIT(i); - if ((fpe->tx_queue_preemption_enable & temp) == temp) { - temp_shift = i; - temp_shift += MGBE_MTL_FPE_CTS_PEC_SHIFT; - /* set queue for preemtable */ - if (temp_shift < MGBE_MTL_FPE_CTS_PEC_MAX_SHIFT) { - temp1 = OSI_ENABLE; - temp1 = temp1 << temp_shift; - val |= temp1; - } else { - /* Do nothing */ - } - } - } - osi_writela(osi_core, val, (nveu8_t *)osi_core->base + - MGBE_MTL_FPE_CTS); - - if (fpe->rq == 0x0U || fpe->rq >= OSI_MGBE_MAX_NUM_CHANS) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "FPE init failed due to wrong RQ\n", fpe->rq); - ret = -1; - goto exit; - } - - val = osi_readla(osi_core, (unsigned char *) - osi_core->base + MGBE_MAC_RQC1R); - val &= ~MGBE_MAC_RQC1R_RQ; - temp = fpe->rq; - temp = temp << MGBE_MAC_RQC1R_RQ_SHIFT; - temp = (temp & MGBE_MAC_RQC1R_RQ); - val |= temp; - osi_core->residual_queue = fpe->rq; - osi_writela(osi_core, val, (unsigned char *) - osi_core->base + MGBE_MAC_RQC1R); - - val = osi_readla(osi_core, (nveu8_t *)osi_core->base + MGBE_MAC_RQC4R); - val &= ~MGBE_MAC_RQC4R_PMCBCQ; - temp = fpe->rq; - temp = temp << MGBE_MAC_RQC4R_PMCBCQ_SHIFT; - temp = (temp & MGBE_MAC_RQC4R_PMCBCQ); - val |= temp; - osi_writela(osi_core, val, (nveu8_t *)osi_core->base + MGBE_MAC_RQC4R); - - /* initiate SVER for SMD-V and SMD-R */ - val = osi_readla(osi_core, (unsigned char *) - osi_core->base + MGBE_MTL_FPE_CTS); - val |= MGBE_MAC_FPE_CTS_SVER; - osi_writela(osi_core, val, (unsigned char *) - osi_core->base + MGBE_MAC_FPE_CTS); - - val = osi_readla(osi_core, (unsigned char *) - osi_core->base + MGBE_MTL_FPE_ADV); - val &= ~MGBE_MTL_FPE_ADV_HADV_MASK; - //(minimum_fragment_size +IPG/EIPG + Preamble) *.8 ~98ns for10G - val |= MGBE_MTL_FPE_ADV_HADV_VAL; - osi_writela(osi_core, val, (unsigned char *) - osi_core->base + MGBE_MTL_FPE_ADV); - -#ifdef MACSEC_SUPPORT - osi_core->is_fpe_enabled = OSI_ENABLE; -#endif /* MACSEC_SUPPORT */ - -exit: - -#ifdef MACSEC_SUPPORT - osi_unlock_irq_enabled(&osi_core->macsec_fpe_lock); -#endif /* MACSEC_SUPPORT */ + ret = (nve32_t)data; +fail: return ret; } +#ifndef OSI_STRIPPED_LIB /** * @brief mgbe_disable_tx_lpi - Helper function to disable Tx LPI. * @@ -5229,14 +3358,14 @@ exit: */ static inline void mgbe_disable_tx_lpi(struct osi_core_priv_data *osi_core) { - unsigned int lpi_csr = 0; + nveu32_t lpi_csr = 0; /* Disable LPI control bits */ - lpi_csr = osi_readla(osi_core, (unsigned char *) + lpi_csr = osi_readla(osi_core, (nveu8_t *) osi_core->base + MGBE_MAC_LPI_CSR); lpi_csr &= ~(MGBE_MAC_LPI_CSR_LPITE | MGBE_MAC_LPI_CSR_LPITXA | MGBE_MAC_LPI_CSR_PLS | MGBE_MAC_LPI_CSR_LPIEN); - osi_writela(osi_core, lpi_csr, (unsigned char *) + osi_writela(osi_core, lpi_csr, (nveu8_t *) osi_core->base + MGBE_MAC_LPI_CSR); } @@ -5259,14 +3388,14 @@ static inline void mgbe_disable_tx_lpi(struct osi_core_priv_data *osi_core) * MAC/PHY should be initialized * */ -static void mgbe_configure_eee(struct osi_core_priv_data *osi_core, - unsigned int tx_lpi_enabled, - unsigned int tx_lpi_timer) +static void mgbe_configure_eee(struct osi_core_priv_data *const osi_core, + const nveu32_t tx_lpi_enabled, + const nveu32_t tx_lpi_timer) { - unsigned int lpi_csr = 0; - unsigned int lpi_timer_ctrl = 0; - unsigned int lpi_entry_timer = 0; - unsigned int tic_counter = 0; + nveu32_t lpi_csr = 0; + nveu32_t lpi_timer_ctrl = 0; + nveu32_t lpi_entry_timer = 0; + nveu32_t tic_counter = 0; void *addr = osi_core->base; if (xpcs_eee(osi_core, tx_lpi_enabled) != 0) { @@ -5293,7 +3422,7 @@ static void mgbe_configure_eee(struct osi_core_priv_data *osi_core, MGBE_LPI_LS_TIMER_MASK); lpi_timer_ctrl |= (MGBE_DEFAULT_LPI_TW_TIMER & MGBE_LPI_TW_TIMER_MASK); - osi_writela(osi_core, lpi_timer_ctrl, (unsigned char *)addr + + osi_writela(osi_core, lpi_timer_ctrl, (nveu8_t *)addr + MGBE_MAC_LPI_TIMER_CTRL); /* 4. For GMII, read the link status of the PHY chip by @@ -5308,7 +3437,7 @@ static void mgbe_configure_eee(struct osi_core_priv_data *osi_core, /* Should be same as (ABP clock freq - 1) = 12 = 0xC, currently * from define but we should get it from pdata->clock TODO */ tic_counter = MGBE_1US_TIC_COUNTER; - osi_writela(osi_core, tic_counter, (unsigned char *)addr + + osi_writela(osi_core, tic_counter, (nveu8_t *)addr + MGBE_MAC_1US_TIC_COUNT); /* 6. Program the MAC_LPI_Auto_Entry_Timer register (LPIET) @@ -5318,7 +3447,7 @@ static void mgbe_configure_eee(struct osi_core_priv_data *osi_core, * to enter LPI mode after all tx is complete. Default 1sec */ lpi_entry_timer |= (tx_lpi_timer & MGBE_LPI_ENTRY_TIMER_MASK); - osi_writela(osi_core, lpi_entry_timer, (unsigned char *)addr + + osi_writela(osi_core, lpi_entry_timer, (nveu8_t *)addr + MGBE_MAC_LPI_EN_TIMER); /* 7. Set LPIATE and LPITXA (bit[20:19]) of @@ -5329,27 +3458,28 @@ static void mgbe_configure_eee(struct osi_core_priv_data *osi_core, * enters the LPI mode after completing all scheduled * packets and remain IDLE for the time indicated by LPIET. */ - lpi_csr = osi_readla(osi_core, (unsigned char *) + lpi_csr = osi_readla(osi_core, (nveu8_t *) addr + MGBE_MAC_LPI_CSR); lpi_csr |= (MGBE_MAC_LPI_CSR_LPITE | MGBE_MAC_LPI_CSR_LPITXA | MGBE_MAC_LPI_CSR_PLS | MGBE_MAC_LPI_CSR_LPIEN); - osi_writela(osi_core, lpi_csr, (unsigned char *) + osi_writela(osi_core, lpi_csr, (nveu8_t *) addr + MGBE_MAC_LPI_CSR); } else { /* Disable LPI control bits */ mgbe_disable_tx_lpi(osi_core); } } +#endif /* !OSI_STRIPPED_LIB */ -static int mgbe_get_hw_features(struct osi_core_priv_data *osi_core, - struct osi_hw_features *hw_feat) +static nve32_t mgbe_get_hw_features(struct osi_core_priv_data *const osi_core, + struct osi_hw_features *hw_feat) { - unsigned char *base = (unsigned char *)osi_core->base; - unsigned int mac_hfr0 = 0; - unsigned int mac_hfr1 = 0; - unsigned int mac_hfr2 = 0; - unsigned int mac_hfr3 = 0; - unsigned int val = 0; + nveu8_t *base = (nveu8_t *)osi_core->base; + nveu32_t mac_hfr0 = 0; + nveu32_t mac_hfr1 = 0; + nveu32_t mac_hfr2 = 0; + nveu32_t mac_hfr3 = 0; + nveu32_t val = 0; mac_hfr0 = osi_readla(osi_core, base + MGBE_MAC_HFR0); mac_hfr1 = osi_readla(osi_core, base + MGBE_MAC_HFR1); @@ -5507,179 +3637,13 @@ static int mgbe_get_hw_features(struct osi_core_priv_data *osi_core, return 0; } -/** - * @brief mgbe_poll_for_tsinit_complete - Poll for time stamp init complete - * - * Algorithm: Read TSINIT value from MAC TCR register until it is - * equal to zero. - * - * @param[in] addr: Base address indicating the start of - * memory mapped IO region of the MAC. - * @param[in] mac_tcr: Address to store time stamp control register read value - * - * @note MAC should be init and started. see osi_start_mac() - * - * @retval 0 on success - * @retval -1 on failure. - */ -static inline int mgbe_poll_for_tsinit_complete( - struct osi_core_priv_data *osi_core, - unsigned int *mac_tcr) -{ - unsigned int retry = 0U; - - while (retry < OSI_POLL_COUNT) { - /* Read and Check TSINIT in MAC_Timestamp_Control register */ - *mac_tcr = osi_readla(osi_core, (unsigned char *) - osi_core->base + MGBE_MAC_TCR); - if ((*mac_tcr & MGBE_MAC_TCR_TSINIT) == 0U) { - return 0; - } - - retry++; - osi_core->osd_ops.udelay(OSI_DELAY_1000US); - } - - return -1; -} - -/** - * @brief mgbe_set_systime - Set system time - * - * Algorithm: Updates system time (seconds and nano seconds) - * in hardware registers - * - * @param[in] addr: Base address indicating the start of - * memory mapped IO region of the MAC. - * @param[in] sec: Seconds to be configured - * @param[in] nsec: Nano Seconds to be configured - * - * @note MAC should be init and started. see osi_start_mac() - * - * @retval 0 on success - * @retval -1 on failure. - */ -static int mgbe_set_systime_to_mac(struct osi_core_priv_data *osi_core, - unsigned int sec, - unsigned int nsec) -{ - unsigned int mac_tcr; - void *addr = osi_core->base; - int ret; - - /* To be sure previous write was flushed (if Any) */ - ret = mgbe_poll_for_tsinit_complete(osi_core, &mac_tcr); - if (ret == -1) { - return -1; - } - - /* write seconds value to MAC_System_Time_Seconds_Update register */ - osi_writela(osi_core, sec, (unsigned char *)addr + MGBE_MAC_STSUR); - - /* write nano seconds value to MAC_System_Time_Nanoseconds_Update - * register - */ - osi_writela(osi_core, nsec, (unsigned char *)addr + MGBE_MAC_STNSUR); - - /* issue command to update the configured secs and nsecs values */ - mac_tcr |= MGBE_MAC_TCR_TSINIT; - osi_writela(osi_core, mac_tcr, (unsigned char *)addr + MGBE_MAC_TCR); - - ret = mgbe_poll_for_tsinit_complete(osi_core, &mac_tcr); - if (ret == -1) { - return -1; - } - - return 0; -} - -/** - * @brief mgbe_poll_for_addend_complete - Poll for addend value write complete - * - * Algorithm: Read TSADDREG value from MAC TCR register until it is - * equal to zero. - * - * @param[in] addr: Base address indicating the start of - * memory mapped IO region of the MAC. - * @param[in] mac_tcr: Address to store time stamp control register read value - * - * @note MAC should be init and started. see osi_start_mac() - * - * @retval 0 on success - * @retval -1 on failure. - */ -static inline int mgbe_poll_for_addend_complete( - struct osi_core_priv_data *osi_core, - unsigned int *mac_tcr) -{ - unsigned int retry = 0U; - - /* Poll */ - while (retry < OSI_POLL_COUNT) { - /* Read and Check TSADDREG in MAC_Timestamp_Control register */ - *mac_tcr = osi_readla(osi_core, (unsigned char *) - osi_core->base + MGBE_MAC_TCR); - if ((*mac_tcr & MGBE_MAC_TCR_TSADDREG) == 0U) { - return 0; - } - - retry++; - osi_core->osd_ops.udelay(OSI_DELAY_1000US); - } - - return -1; -} - -/** - * @brief mgbe_config_addend - Configure addend - * - * Algorithm: Updates the Addend value in HW register - * - * @param[in] addr: Base address indicating the start of - * memory mapped IO region of the MAC. - * @param[in] addend: Addend value to be configured - * - * @note MAC should be init and started. see osi_start_mac() - * - * @retval 0 on success - * @retval -1 on failure. - */ -static int mgbe_config_addend(struct osi_core_priv_data *osi_core, - unsigned int addend) -{ - unsigned int mac_tcr; - void *addr = osi_core->base; - int ret; - - /* To be sure previous write was flushed (if Any) */ - ret = mgbe_poll_for_addend_complete(osi_core, &mac_tcr); - if (ret == -1) { - return -1; - } - - /* write addend value to MAC_Timestamp_Addend register */ - osi_writela(osi_core, addend, (unsigned char *)addr + MGBE_MAC_TAR); - - /* issue command to update the configured addend value */ - mac_tcr |= MGBE_MAC_TCR_TSADDREG; - osi_writela(osi_core, mac_tcr, (unsigned char *)addr + MGBE_MAC_TCR); - - ret = mgbe_poll_for_addend_complete(osi_core, &mac_tcr); - if (ret == -1) { - return -1; - } - - return 0; -} - /** * @brief mgbe_poll_for_update_ts_complete - Poll for update time stamp * * Algorithm: Read time stamp update value from TCR register until it is * equal to zero. * - * @param[in] addr: Base address indicating the start of - * memory mapped IO region of the MAC. + * @param[in] osi_core: OSI core private data structure. * @param[in] mac_tcr: Address to store time stamp control register read value * * @note MAC should be init and started. see osi_start_mac() @@ -5687,25 +3651,27 @@ static int mgbe_config_addend(struct osi_core_priv_data *osi_core, * @retval 0 on success * @retval -1 on failure. */ -static inline int mgbe_poll_for_update_ts_complete( +static inline nve32_t mgbe_poll_for_update_ts_complete( struct osi_core_priv_data *osi_core, - unsigned int *mac_tcr) + nveu32_t *mac_tcr) { - unsigned int retry = 0U; + nveu32_t retry = 0U; + nve32_t ret = -1; while (retry < OSI_POLL_COUNT) { /* Read and Check TSUPDT in MAC_Timestamp_Control register */ - *mac_tcr = osi_readla(osi_core, (unsigned char *) + *mac_tcr = osi_readla(osi_core, (nveu8_t *) osi_core->base + MGBE_MAC_TCR); if ((*mac_tcr & MGBE_MAC_TCR_TSUPDT) == 0U) { - return 0; + ret = 0; + break; } retry++; osi_core->osd_ops.udelay(OSI_DELAY_1000US); } - return -1; + return ret; } /** @@ -5713,8 +3679,7 @@ static inline int mgbe_poll_for_update_ts_complete( * * Algorithm: Update MAC time with system time * - * @param[in] addr: Base address indicating the start of - * memory mapped IO region of the MAC. + * @param[in] osi_core: OSI core private data structure. * @param[in] sec: Seconds to be configured * @param[in] nsec: Nano seconds to be configured * @param[in] add_sub: To decide on add/sub with system time @@ -5726,21 +3691,25 @@ static inline int mgbe_poll_for_update_ts_complete( * @retval 0 on success * @retval -1 on failure. */ -static int mgbe_adjust_mactime(struct osi_core_priv_data *osi_core, - unsigned int sec, unsigned int nsec, - unsigned int add_sub, - unsigned int one_nsec_accuracy) +static nve32_t mgbe_adjust_mactime(struct osi_core_priv_data *const osi_core, + const nveu32_t sec, const nveu32_t nsec, + const nveu32_t add_sub, + const nveu32_t one_nsec_accuracy) { void *addr = osi_core->base; - unsigned int mac_tcr; - unsigned int value = 0; - unsigned long long temp = 0; - int ret; + nveu32_t mac_tcr; + nveu32_t value = 0; + nveul64_t temp = 0; + nveu32_t temp_sec; + nveu32_t temp_nsec; + nve32_t ret = 0; + temp_sec = sec; + temp_nsec = nsec; /* To be sure previous write was flushed (if Any) */ ret = mgbe_poll_for_update_ts_complete(osi_core, &mac_tcr); if (ret == -1) { - return -1; + goto fail; } if (add_sub != 0U) { @@ -5748,9 +3717,9 @@ static int mgbe_adjust_mactime(struct osi_core_priv_data *osi_core, * the system time, then MAC_STSUR reg should be * programmed with (2^32 – ) */ - temp = (TWO_POWER_32 - sec); + temp = (TWO_POWER_32 - temp_sec); if (temp < UINT_MAX) { - sec = (unsigned int)temp; + temp_sec = (nveu32_t)temp; } else { /* do nothing here */ } @@ -5762,192 +3731,35 @@ static int mgbe_adjust_mactime(struct osi_core_priv_data *osi_core, * (2^32 - if MAC_TCR.TSCTRLSSR is reset) */ if (one_nsec_accuracy == OSI_ENABLE) { - if (nsec < UINT_MAX) { - nsec = (TEN_POWER_9 - nsec); + if (temp_nsec < UINT_MAX) { + temp_nsec = (TEN_POWER_9 - temp_nsec); } } else { - if (nsec < UINT_MAX) { - nsec = (TWO_POWER_31 - nsec); + if (temp_nsec < UINT_MAX) { + temp_nsec = (TWO_POWER_31 - temp_nsec); } } } /* write seconds value to MAC_System_Time_Seconds_Update register */ - osi_writela(osi_core, sec, (unsigned char *)addr + MGBE_MAC_STSUR); + osi_writela(osi_core, temp_sec, (nveu8_t *)addr + MGBE_MAC_STSUR); /* write nano seconds value and add_sub to * MAC_System_Time_Nanoseconds_Update register */ - value |= nsec; + value |= temp_nsec; value |= (add_sub << MGBE_MAC_STNSUR_ADDSUB_SHIFT); - osi_writela(osi_core, value, (unsigned char *)addr + MGBE_MAC_STNSUR); + osi_writela(osi_core, value, (nveu8_t *)addr + MGBE_MAC_STNSUR); /* issue command to initialize system time with the value * specified in MAC_STSUR and MAC_STNSUR */ mac_tcr |= MGBE_MAC_TCR_TSUPDT; - osi_writela(osi_core, mac_tcr, (unsigned char *)addr + MGBE_MAC_TCR); + osi_writela(osi_core, mac_tcr, (nveu8_t *)addr + MGBE_MAC_TCR); ret = mgbe_poll_for_update_ts_complete(osi_core, &mac_tcr); - if (ret == -1) { - return -1; - } - - return 0; -} - -/** - * @brief mgbe_config_tscr - Configure Time Stamp Register - * - * @param[in] addr: Base address indicating the start of - * memory mapped IO region of the MAC. - * @param[in] ptp_filter: PTP rx filter parameters - * - * @note MAC should be init and started. see osi_start_mac() - */ -static void mgbe_config_tscr(struct osi_core_priv_data *osi_core, - unsigned int ptp_filter) -{ - struct core_local *l_core = (struct core_local *)osi_core; - unsigned int mac_tcr = 0; - nveu32_t value = 0x0U; - void *addr = osi_core->base; - - if (ptp_filter != OSI_DISABLE) { - mac_tcr = (OSI_MAC_TCR_TSENA | - OSI_MAC_TCR_TSCFUPDT | - OSI_MAC_TCR_TSCTRLSSR); - - if ((ptp_filter & OSI_MAC_TCR_SNAPTYPSEL_1) == - OSI_MAC_TCR_SNAPTYPSEL_1) { - mac_tcr |= OSI_MAC_TCR_SNAPTYPSEL_1; - } - if ((ptp_filter & OSI_MAC_TCR_SNAPTYPSEL_2) == - OSI_MAC_TCR_SNAPTYPSEL_2) { - mac_tcr |= OSI_MAC_TCR_SNAPTYPSEL_2; - } - if ((ptp_filter & OSI_MAC_TCR_SNAPTYPSEL_3) == - OSI_MAC_TCR_SNAPTYPSEL_3) { - mac_tcr |= OSI_MAC_TCR_SNAPTYPSEL_3; - } - if ((ptp_filter & OSI_MAC_TCR_TSIPV4ENA) == - OSI_MAC_TCR_TSIPV4ENA) { - mac_tcr |= OSI_MAC_TCR_TSIPV4ENA; - } - if ((ptp_filter & OSI_MAC_TCR_TSIPV6ENA) == - OSI_MAC_TCR_TSIPV6ENA) { - mac_tcr |= OSI_MAC_TCR_TSIPV6ENA; - } - if ((ptp_filter & OSI_MAC_TCR_TSEVENTENA) == - OSI_MAC_TCR_TSEVENTENA) { - mac_tcr |= OSI_MAC_TCR_TSEVENTENA; - } - if ((ptp_filter & OSI_MAC_TCR_TSMASTERENA) == - OSI_MAC_TCR_TSMASTERENA) { - mac_tcr |= OSI_MAC_TCR_TSMASTERENA; - } - if ((ptp_filter & OSI_MAC_TCR_TSVER2ENA) == - OSI_MAC_TCR_TSVER2ENA) { - mac_tcr |= OSI_MAC_TCR_TSVER2ENA; - } - if ((ptp_filter & OSI_MAC_TCR_TSIPENA) == - OSI_MAC_TCR_TSIPENA) { - mac_tcr |= OSI_MAC_TCR_TSIPENA; - } - if ((ptp_filter & OSI_MAC_TCR_AV8021ASMEN) == - OSI_MAC_TCR_AV8021ASMEN) { - mac_tcr |= OSI_MAC_TCR_AV8021ASMEN; - } - if ((ptp_filter & OSI_MAC_TCR_TSENALL) == - OSI_MAC_TCR_TSENALL) { - mac_tcr |= OSI_MAC_TCR_TSENALL; - } - if ((ptp_filter & OSI_MAC_TCR_CSC) == - OSI_MAC_TCR_CSC) { - mac_tcr |= OSI_MAC_TCR_CSC; - } - } else { - /* Disabling the MAC time stamping */ - mac_tcr = OSI_DISABLE; - } - - osi_writela(osi_core, mac_tcr, (unsigned char *)addr + MGBE_MAC_TCR); - - value = osi_readla(osi_core, (nveu8_t *)addr + MGBE_MAC_PPS_CTL); - value &= ~MGBE_MAC_PPS_CTL_PPSCTRL0; - if (l_core->pps_freq == OSI_ENABLE) { - value |= OSI_ENABLE; - } - osi_writela(osi_core, value, (nveu8_t *)addr + MGBE_MAC_PPS_CTL); -} - -/** - * @brief mgbe_config_ssir - Configure SSIR - * - * @param[in] osi_core: OSI core private data structure. - * @param[in] ptp_clock: PTP required clock frequency - * - * @note MAC should be init and started. see osi_start_mac() - */ -static void mgbe_config_ssir(struct osi_core_priv_data *const osi_core, - const unsigned int ptp_clock) -{ - unsigned long long val; - unsigned int mac_tcr; - void *addr = osi_core->base; - - mac_tcr = osi_readla(osi_core, (unsigned char *)addr + MGBE_MAC_TCR); - - /* convert the PTP required clock frequency to nano second. - * formula is : ((1/ptp_clock) * 1000000000) - * where, ptp_clock = OSI_PTP_REQ_CLK_FREQ if FINE correction - * and ptp_clock = PTP reference clock if COARSE correction - */ - if ((mac_tcr & MGBE_MAC_TCR_TSCFUPDT) == MGBE_MAC_TCR_TSCFUPDT) { - if (osi_core->pre_si == OSI_ENABLE) { - val = OSI_PTP_SSINC_16; - } else { - /* For silicon */ - val = OSI_PTP_SSINC_4; - } - } else { - val = ((1U * OSI_NSEC_PER_SEC) / ptp_clock); - } - - /* 0.465ns accurecy */ - if ((mac_tcr & MGBE_MAC_TCR_TSCTRLSSR) == 0U) { - if (val < UINT_MAX) { - val = (val * 1000U) / 465U; - } - } - - val |= (val << MGBE_MAC_SSIR_SSINC_SHIFT); - - /* update Sub-second Increment Value */ - if (val < UINT_MAX) { - osi_writela(osi_core, (unsigned int)val, - (unsigned char *)addr + MGBE_MAC_SSIR); - } -} - -/** - * @brief mgbe_set_mode - Setting the mode. - * - * @param[in] osi_core: OSI core private data structure. - * @param[in] mode: mode to be set. - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: Yes - * - De-initialization: Yes - * @retval 0 - */ -static nve32_t mgbe_set_mode(OSI_UNUSED - struct osi_core_priv_data *const osi_core, - OSI_UNUSED const nve32_t mode) -{ - return 0; +fail: + return ret; } /** @@ -5964,7 +3776,7 @@ static nve32_t mgbe_set_mode(OSI_UNUSED * @retval 0 */ static nveu32_t mgbe_read_reg(struct osi_core_priv_data *const osi_core, - const nve32_t reg) + const nve32_t reg) { return osi_readla(osi_core, (nveu8_t *)osi_core->base + reg); } @@ -6033,25 +3845,7 @@ static nveu32_t mgbe_write_macsec_reg(struct osi_core_priv_data *const osi_core, } #endif /* MACSEC_SUPPORT */ -/** - * @brief mgbe_validate_core_regs - Validates MGBE core registers. - * - * @param[in] osi_core: OSI core private data structure. - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: Yes - * - De-initialization: Yes - * @retval 0 - */ -static nve32_t mgbe_validate_core_regs( - OSI_UNUSED - struct osi_core_priv_data *const osi_core) -{ - return 0; -} - +#ifndef OSI_STRIPPED_LIB /** * @brief eqos_write_reg - Write a reg * @@ -6114,8 +3908,9 @@ static void mgbe_set_mdc_clk_rate(OSI_UNUSED const nveu64_t csr_clk_rate) { } +#endif /* !OSI_STRIPPED_LIB */ -#ifdef MACSEC_SUPPORT +#if defined(MACSEC_SUPPORT) && !defined(OSI_STRIPPED_LIB) /** * @brief mgbe_config_for_macsec - Configure MAC according to macsec IAS * @@ -6145,9 +3940,9 @@ static void mgbe_config_for_macsec(struct osi_core_priv_data *const osi_core, nveu32_t value = 0U, temp = 0U; if ((enable != OSI_ENABLE) && (enable != OSI_DISABLE)) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Failed to config MGBE per MACSEC\n", 0ULL); - return; + goto done; } /* stop MAC Tx */ mgbe_config_mac_tx(osi_core, OSI_DISABLE); @@ -6209,6 +4004,8 @@ static void mgbe_config_for_macsec(struct osi_core_priv_data *const osi_core, 0ULL); } } +done: + return; } #endif /* MACSEC_SUPPORT */ @@ -6217,68 +4014,46 @@ static void mgbe_config_for_macsec(struct osi_core_priv_data *const osi_core, */ void mgbe_init_core_ops(struct core_ops *ops) { - ops->poll_for_swr = mgbe_poll_for_swr; ops->core_init = mgbe_core_init; - ops->core_deinit = mgbe_core_deinit; - ops->validate_regs = mgbe_validate_core_regs; - ops->start_mac = mgbe_start_mac; - ops->stop_mac = mgbe_stop_mac; ops->handle_common_intr = mgbe_handle_common_intr; - /* only MGBE supports full duplex */ - ops->set_mode = mgbe_set_mode; - /* by default speed is 10G */ - ops->set_speed = mgbe_set_speed; ops->pad_calibrate = mgbe_pad_calibrate; - ops->set_mdc_clk_rate = mgbe_set_mdc_clk_rate; - ops->flush_mtl_tx_queue = mgbe_flush_mtl_tx_queue; - ops->config_mac_loopback = mgbe_config_mac_loopback; + ops->update_mac_addr_low_high_reg = mgbe_update_mac_addr_low_high_reg; + ops->adjust_mactime = mgbe_adjust_mactime; + ops->read_mmc = mgbe_read_mmc; + ops->write_phy_reg = mgbe_write_phy_reg; + ops->read_phy_reg = mgbe_read_phy_reg; + ops->get_hw_features = mgbe_get_hw_features; + ops->read_reg = mgbe_read_reg; + ops->write_reg = mgbe_write_reg; ops->set_avb_algorithm = mgbe_set_avb_algorithm; - ops->get_avb_algorithm = mgbe_get_avb_algorithm, - ops->config_fw_err_pkts = mgbe_config_fw_err_pkts; + ops->get_avb_algorithm = mgbe_get_avb_algorithm; + ops->config_frp = mgbe_config_frp; + ops->update_frp_entry = mgbe_update_frp_entry; + ops->update_frp_nve = mgbe_update_frp_nve; +#ifdef MACSEC_SUPPORT + ops->read_macsec_reg = mgbe_read_macsec_reg; + ops->write_macsec_reg = mgbe_write_macsec_reg; +#ifndef OSI_STRIPPED_LIB + ops->macsec_config_mac = mgbe_config_for_macsec; +#endif /* !OSI_STRIPPED_LIB */ +#endif /* MACSEC_SUPPORT */ + ops->config_l3l4_filters = mgbe_config_l3l4_filters; +#ifndef OSI_STRIPPED_LIB ops->config_tx_status = mgbe_config_tx_status; ops->config_rx_crc_check = mgbe_config_rx_crc_check; ops->config_flow_control = mgbe_config_flow_control; ops->config_arp_offload = mgbe_config_arp_offload; ops->config_ptp_offload = mgbe_config_ptp_offload; - ops->config_rxcsum_offload = mgbe_config_rxcsum_offload; - ops->config_mac_pkt_filter_reg = mgbe_config_mac_pkt_filter_reg; - ops->update_mac_addr_low_high_reg = mgbe_update_mac_addr_low_high_reg; - ops->config_l3_l4_filter_enable = mgbe_config_l3_l4_filter_enable; - ops->config_l3_filters = mgbe_config_l3_filters; - ops->update_ip4_addr = mgbe_update_ip4_addr; - ops->update_ip6_addr = mgbe_update_ip6_addr; - ops->config_l4_filters = mgbe_config_l4_filters; - ops->update_l4_port_no = mgbe_update_l4_port_no; ops->config_vlan_filtering = mgbe_config_vlan_filtering; - ops->set_systime_to_mac = mgbe_set_systime_to_mac; - ops->config_addend = mgbe_config_addend; - ops->adjust_mactime = mgbe_adjust_mactime; - ops->config_tscr = mgbe_config_tscr; - ops->config_ssir = mgbe_config_ssir, - ops->config_ptp_rxq = mgbe_config_ptp_rxq; - ops->write_phy_reg = mgbe_write_phy_reg; - ops->read_phy_reg = mgbe_read_phy_reg; - ops->save_registers = mgbe_save_registers; - ops->restore_registers = mgbe_restore_registers; - ops->read_mmc = mgbe_read_mmc; ops->reset_mmc = mgbe_reset_mmc; ops->configure_eee = mgbe_configure_eee; - ops->get_hw_features = mgbe_get_hw_features; + ops->set_mdc_clk_rate = mgbe_set_mdc_clk_rate; + ops->config_mac_loopback = mgbe_config_mac_loopback; ops->config_rss = mgbe_config_rss; - ops->hw_config_est = mgbe_hw_config_est; - ops->hw_config_fpe = mgbe_hw_config_fpe; - ops->config_frp = mgbe_config_frp; - ops->update_frp_entry = mgbe_update_frp_entry; - ops->update_frp_nve = mgbe_update_frp_nve; - ops->ptp_tsc_capture = mgbe_ptp_tsc_capture; - ops->write_reg = mgbe_write_reg; - ops->read_reg = mgbe_read_reg; -#ifdef MACSEC_SUPPORT - ops->write_macsec_reg = mgbe_write_macsec_reg; - ops->read_macsec_reg = mgbe_read_macsec_reg; - ops->macsec_config_mac = mgbe_config_for_macsec; -#endif /* MACSEC_SUPPORT */ + ops->config_ptp_rxq = mgbe_config_ptp_rxq; +#endif /* !OSI_STRIPPED_LIB */ #ifdef HSI_SUPPORT ops->core_hsi_configure = mgbe_hsi_configure; + ops->core_hsi_inject_err = mgbe_hsi_inject_err; #endif }; diff --git a/osi/core/mgbe_core.h b/osi/core/mgbe_core.h index 8fc3368..691432b 100644 --- a/osi/core/mgbe_core.h +++ b/osi/core/mgbe_core.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -23,126 +23,47 @@ #ifndef MGBE_CORE_H_ #define MGBE_CORE_H_ -/** - * @addtogroup - MGBE-LPI LPI configuration macros - * - * @brief LPI timers and config register field masks. - * @{ - */ -/* LPI LS timer - minimum time (in milliseconds) for which the link status from - * PHY should be up before the LPI pattern can be transmitted to the PHY. - * Default 1sec. - */ -#define MGBE_DEFAULT_LPI_LS_TIMER (unsigned int)1000 -#define MGBE_LPI_LS_TIMER_MASK 0x3FFU -#define MGBE_LPI_LS_TIMER_SHIFT 16U -/* LPI TW timer - minimum time (in microseconds) for which MAC wait after it - * stops transmitting LPI pattern before resuming normal tx. - * Default 21us - */ -#define MGBE_DEFAULT_LPI_TW_TIMER 0x15U -#define MGBE_LPI_TW_TIMER_MASK 0xFFFFU -/* LPI entry timer - Time in microseconds that MAC will wait to enter LPI mode - * after all tx is complete. - * Default 1sec. - */ -#define MGBE_LPI_ENTRY_TIMER_MASK 0xFFFF8U -/* 1US TIC counter - This counter should be programmed with the number of clock - * cycles of CSR clock that constitutes a period of 1us. - * it should be APB clock in MHZ i.e 480-1 for silicon and 13MHZ-1 for uFPGA - */ -#define MGBE_1US_TIC_COUNTER 0x1DF - -/** @} */ - -/** - * @addtogroup MGBE-MAC MAC register offsets - * - * @brief MGBE MAC register offsets - * @{ - */ -#define MGBE_MAC_TMCR 0x0000 -#define MGBE_MAC_RMCR 0x0004 -#define MGBE_MAC_PFR 0x0008 -#define MGBE_MAC_HTR_REG(x) ((0x0004U * (x)) + 0x0010U) -#define MGBE_MAC_VLAN_TR 0x0050 -#define MGBE_MAC_VLANTIR 0x0060 -#define MGBE_MAC_QX_TX_FLW_CTRL(x) ((0x0004U * (x)) + 0x0070U) -#define MGBE_MAC_RX_FLW_CTRL 0x0090 -#define MGBE_MAC_RQC4R 0x0094 -#define MGBE_MAC_RQC0R 0x00A0 -#define MGBE_MAC_RQC1R 0x00A4 -#define MGBE_MAC_RQC2R 0x00A8 -#define MGBE_MAC_ISR 0x00B0 -#define MGBE_MAC_IER 0x00B4 -#define MGBE_MAC_RX_TX_STS 0x00B8 -#define MGBE_MAC_PMTCSR 0x00C0 -#define MGBE_MAC_LPI_CSR 0x00D0 -#define MGBE_MAC_LPI_TIMER_CTRL 0x00D4 -#define MGBE_MAC_LPI_EN_TIMER 0x00D8 -#define MGBE_MAC_1US_TIC_COUNT 0x00DC -#define MGBE_MAC_EXT_CNF 0x0140 -#define MGBE_MDIO_SCCD 0x0204 -#define MGBE_MDIO_SCCA 0x0200 -#define MGBE_MAC_FPE_CTS 0x0280 -#define MGBE_MAC_CSR_SW_CTL 0x0290 -#define MGBE_MAC_MA0HR 0x0300 -#define MGBE_MAC_ADDRH(x) ((0x0008U * (x)) + 0x0300U) -#define MGBE_MAC_MA0LR 0x0304 -#define MGBE_MAC_ADDRL(x) ((0x0008U * (x)) + 0x0304U) -#define MGBE_MAC_INDIR_AC 0x0700 -#define MGBE_MAC_INDIR_DATA 0x0704 -#define MGBE_MMC_TX_INTR_EN 0x0810 -#define MGBE_MMC_RX_INTR_EN 0x080C -#define MGBE_MMC_CNTRL 0x0800 -#define MGBE_MAC_L3L4_ADDR_CTR 0x0C00 -#define MGBE_MAC_L3L4_DATA 0x0C04 -#define MGBE_MAC_ARPPA 0x0C10 -#define MGBE_MAC_RSS_CTRL 0x0C80 -#define MGBE_MAC_RSS_ADDR 0x0C88 -#define MGBE_MAC_RSS_DATA 0x0C8C -#define MGBE_MAC_TCR 0x0D00 -#define MGBE_MAC_SSIR 0x0D04 -#define MGBE_MAC_STSR 0x0D08 -#define MGBE_MAC_STNSR 0x0D0C -#define MGBE_MAC_STSUR 0x0D10 -#define MGBE_MAC_STNSUR 0x0D14 -#define MGBE_MAC_TAR 0x0D18 -#define MGBE_MAC_TSS 0x0D20 -#define MGBE_MAC_TSNSSEC 0x0D30 -#define MGBE_MAC_TSSEC 0x0D34 -#define MGBE_MAC_TSPKID 0x0D38 -#define MGBE_MAC_PPS_CTL 0x0D70 -#define MGBE_MAC_PTO_CR 0x0DC0 -#define MGBE_MAC_PIDR0 0x0DC4 -#define MGBE_MAC_PIDR1 0x0DC8 -#define MGBE_MAC_PIDR2 0x0DCC -/** @} */ - -/** - * @addtogroup MGBE-WRAPPER MGBE Wrapper register offsets - * - * @brief MGBE Wrapper register offsets - * @{ - */ -#define MGBE_WRAP_AXI_ASID0_CTRL 0x8400 -#define MGBE_WRAP_AXI_ASID1_CTRL 0x8404 -#define MGBE_WRAP_AXI_ASID2_CTRL 0x8408 -#define MGBE_WRAP_COMMON_INTR_ENABLE 0x8704 -#define MGBE_REGISTER_PARITY_ERR OSI_BIT(5) -#define MGBE_CORE_CORRECTABLE_ERR OSI_BIT(4) -#define MGBE_CORE_UNCORRECTABLE_ERR OSI_BIT(3) -#define MGBE_MAC_SBD_INTR OSI_BIT(2) -#define MGBE_WRAP_COMMON_INTR_STATUS 0x8708 -#define MGBE_VIRT_INTR_APB_CHX_CNTRL(x) (0x8200U + ((x) * 4U)) -#define MGBE_VIRTUAL_APB_ERR_CTRL 0x8300 -#define MGBE_WRAP_SYNC_TSC_PTP_CAPTURE 0x800CU -#define MGBE_WRAP_TSC_CAPTURE_LOW 0x8010U -#define MGBE_WRAP_TSC_CAPTURE_HIGH 0x8014U -#define MGBE_WRAP_PTP_CAPTURE_LOW 0x8018U -#define MGBE_WRAP_PTP_CAPTURE_HIGH 0x801CU -/** @} */ - +#ifndef OSI_STRIPPED_LIB +#define MGBE_MAC_PFR 0x0008 +#define MGBE_MAC_RX_FLW_CTRL 0x0090 +#define MGBE_MAC_RQC2R 0x00A8 +#define MGBE_MAC_QX_TX_FLW_CTRL(x) ((0x0004U * (x)) + 0x0070U) +#define MGBE_MAC_ARPPA 0x0C10 +#define MGBE_MAC_LPI_CSR 0x00D0 +#define MGBE_MAC_LPI_TIMER_CTRL 0x00D4 +#define MGBE_MAC_LPI_EN_TIMER 0x00D8 +#define MGBE_MAC_RSS_CTRL 0x0C80 +#define MGBE_MAC_RSS_ADDR 0x0C88 +#define MGBE_MAC_RSS_DATA 0x0C8C +#define MGBE_MAC_STSR 0x0D08 +#define MGBE_MAC_STNSR 0x0D0C +#define MGBE_MAC_PTO_CR 0x0DC0 +#define MGBE_MAC_PIDR0 0x0DC4 +#define MGBE_MAC_PIDR1 0x0DC8 +#define MGBE_MAC_PIDR2 0x0DCC +#define MGBE_MAC_PMTCSR 0x00C0 +#define MGBE_MAC_HTR_REG(x) ((0x0004U * (x)) + 0x0010U) +#define MGBE_WRAP_AXI_ASID0_CTRL 0x8400 +#define MGBE_WRAP_AXI_ASID1_CTRL 0x8404 +#define MGBE_WRAP_AXI_ASID2_CTRL 0x8408 +#define MGBE_MAC_PFR_VTFE OSI_BIT(16) +#define MGBE_MAC_PFR_IPFE OSI_BIT(20) +#define MGBE_MAC_PFR_IPFE_SHIFT 20 +#define MGBE_SID_VAL1(x) (((x) << 24U) |\ + ((x) << 16U) |\ + ((x) << 8U) |\ + (x)) +#define MGBE_SID_VAL2(x) (((x) << 8U) |\ + (x)) +#define MGBE0_SID ((nveu32_t)0x6U) +#define MGBE1_SID ((nveu32_t)0x49U) +#define MGBE2_SID ((nveu32_t)0x4AU) +#define MGBE3_SID ((nveu32_t)0x4BU) +#define MGBE_MAC_PAUSE_TIME 0xFFFF0000U +#define MGBE_MAC_PAUSE_TIME_MASK 0xFFFF0000U +#define MGBE_MAC_VLAN_TR_VTHM OSI_BIT(25) +#define MGBE_MAC_VLAN_TR_VTIM OSI_BIT(17) +#define MGBE_MAC_VLAN_TR_VTIM_SHIFT 17 /** * @addtogroup MGBE MAC hash table defines * @@ -153,8 +74,323 @@ #define MGBE_MAX_HTR_REGS 4U /** @} */ +#define MGBE_MAX_VLAN_FILTER 32U +#define MGBE_MAC_RX_FLW_CTRL_RFE OSI_BIT(0) +#define MGBE_MAC_STNSR_TSSS_MASK 0x7FFFFFFFU +#define MGBE_MAC_TCR_SNAPTYPSEL_SHIFT 16U +#define MGBE_MAC_TCR_TSENMACADDR OSI_BIT(18) +#define MGBE_MAC_TMCR_IPG_MASK 0x700U +#define MGBE_MAC_RQC1R_PTPQ_SHIFT 24U +#define MGBE_MAC_RQC1R_PTPQ (OSI_BIT(27) | OSI_BIT(26) | \ + OSI_BIT(25) | OSI_BIT(24)) +#define MGBE_MAC_RMCR_LM OSI_BIT(10) +#define MGBE_MAC_RMCR_ARPEN OSI_BIT(31) +#define MGBE_MAC_QX_TX_FLW_CTRL_TFE OSI_BIT(1) +#define MGBE_MAC_TMCR_IFP OSI_BIT(11) +#define MGBE_MAC_RQC1R_TPQC0 OSI_BIT(21) +#define MGBE_MAC_RQC1R_OMCBCQ OSI_BIT(20) +#define MGBE_MAC_RSS_CTRL_RSSE OSI_BIT(0) +#define MGBE_MAC_RSS_CTRL_IP2TE OSI_BIT(1) +#define MGBE_MAC_RSS_CTRL_TCP4TE OSI_BIT(2) +#define MGBE_MAC_RSS_CTRL_UDP4TE OSI_BIT(3) +#define MGBE_MAC_RSS_ADDR_ADDRT OSI_BIT(2) +#define MGBE_MAC_RSS_ADDR_RSSIA_SHIFT 8U +#define MGBE_MAC_RSS_ADDR_OB OSI_BIT(0) +#define MGBE_MAC_RSS_ADDR_CT OSI_BIT(1) /** - * @addtogroup MGBE MAC Mode Select Group + * @addtogroup - MGBE-LPI LPI configuration macros + * + * @brief LPI timers and config register field masks. + * @{ + */ +/* LPI LS timer - minimum time (in milliseconds) for which the link status from + * PHY should be up before the LPI pattern can be transmitted to the PHY. + * Default 1sec. + */ +#define MGBE_DEFAULT_LPI_LS_TIMER ((nveu32_t)1000) +#define MGBE_LPI_LS_TIMER_MASK 0x3FFU +#define MGBE_LPI_LS_TIMER_SHIFT 16U +/* LPI TW timer - minimum time (in microseconds) for which MAC wait after it + * stops transmitting LPI pattern before resuming normal tx. + * Default 21us + */ +#define MGBE_DEFAULT_LPI_TW_TIMER 0x15U +#define MGBE_LPI_TW_TIMER_MASK 0xFFFFU +/* LPI entry timer - Time in microseconds that MAC will wait to enter LPI mode + * after all tx is complete. + * Default 1sec. + */ +#define MGBE_LPI_ENTRY_TIMER_MASK 0xFFFF8U +/* 1US TIC counter - This counter should be programmed with the number of clock + * cycles of CSR clock that constitutes a period of 1us. + * it should be APB clock in MHZ i.e 480-1 for silicon and 13MHZ-1 for uFPGA + */ +#define MGBE_1US_TIC_COUNTER 0x1DF +#define MGBE_MAC_1US_TIC_COUNT 0x00DC +/** @} */ +#define MGBE_MAC_PTO_CR_PTOEN OSI_BIT(0) +#define MGBE_MAC_PTO_CR_ASYNCEN OSI_BIT(1) +#define MGBE_MAC_PTO_CR_APDREQEN OSI_BIT(2) +#define MGBE_MAC_PTO_CR_DN (OSI_BIT(15) | OSI_BIT(14) | \ + OSI_BIT(13) | OSI_BIT(12) | \ + OSI_BIT(11) | OSI_BIT(10) | \ + OSI_BIT(9) | OSI_BIT(8)) +#define MGBE_MAC_PTO_CR_DN_SHIFT 8U +#define MGBE_DMA_CHX_STATUS_RPS OSI_BIT(8) +#define MGBE_DMA_CHX_STATUS_TPS OSI_BIT(1) +#define MGBE_DMA_CHX_STATUS_TBU OSI_BIT(2) +#define MGBE_DMA_CHX_STATUS_RBU OSI_BIT(7) +#define MGBE_DMA_CHX_STATUS_FBE OSI_BIT(12) + +#define MGBE_MAC_LPI_CSR_LPITE OSI_BIT(20) +#define MGBE_MAC_LPI_CSR_LPITXA OSI_BIT(19) +#define MGBE_MAC_LPI_CSR_PLS OSI_BIT(17) +#define MGBE_MAC_LPI_CSR_LPIEN OSI_BIT(16) +#define MGBE_MAC_PFR_VTFE_SHIFT 16 +#define MGBE_MAC_PIDR_PID_MASK 0XFFFFU + +#define MGBE_MTL_RXP_BYPASS_CNT 2U +#define MGBE_MAC_FPE_CTS_SVER OSI_BIT(1) + +#endif /* !OSI_STRIPPED_LIB */ + +#define MGBE_MAC_RX_TX_STS 0x00B8 +#define MGBE_MTL_EST_CONTROL 0x1050 +#define MGBE_MTL_EST_OVERHEAD 0x1054 +#define MGBE_MTL_EST_STATUS 0x1058 +#define MGBE_MTL_EST_SCH_ERR 0x1060 +#define MGBE_MTL_EST_FRMS_ERR 0x1064 +#define MGBE_MTL_EST_ITRE 0x1070 +#define MGBE_MTL_EST_GCL_CONTROL 0x1080 +#define MGBE_MTL_EST_DATA 0x1084 +#define MGBE_MAC_RQC4R 0x0094 +#define MGBE_MAC_FPE_CTS 0x0280 +#define MGBE_MTL_RXP_CS 0x10A0 +#define MGBE_MTL_RXP_INTR_CS 0x10A4 +#define MGBE_MTL_RXP_IND_CS 0x10B0 +#define MGBE_MTL_RXP_IND_DATA 0x10B4 + +#define MGBE_MAC_TX_PCE OSI_BIT(13) +#define MGBE_MAC_TX_IHE OSI_BIT(12) +#define MGBE_MAC_TX_TJT OSI_BIT(0) +#define MGBE_MTL_TCQ_ETS_HCR(x) ((0x0080U * (x)) + 0x1120U) +#define MGBE_MTL_TCQ_ETS_LCR(x) ((0x0080U * (x)) + 0x1124U) +#define MGBE_MTL_TCQ_ETS_SSCR(x) ((0x0080U * (x)) + 0x111CU) +#define MGBE_MTL_OP_MODE 0x1000 +#define MGBE_MTL_INTR_STATUS 0x1020 +#define MGBE_MTL_FPE_CTS 0x1090 +#define MGBE_MTL_FPE_ADV 0x1094 + +#define MGBE_MTL_QINT_STATUS(x) ((0x0080U * (x)) + 0x1174U) +#define MGBE_MTL_TCQ_ETS_CR_AVALG_SHIFT 0U +#define MGBE_MTL_QINT_TXUNIFS OSI_BIT(0) +#define MGBE_MTL_TX_OP_MODE_Q2TCMAP (OSI_BIT(10) | OSI_BIT(9) |\ + OSI_BIT(8)) +#define MGBE_MTL_TX_OP_MODE_Q2TCMAP_SHIFT 8U +#define MGBE_MTL_TX_OP_MODE_TXQEN (OSI_BIT(3) | OSI_BIT(2)) +#define MGBE_MTL_TX_OP_MODE_TXQEN_SHIFT 2U +#define MGBE_MTL_TCQ_ETS_CR_CC OSI_BIT(3) +#define MGBE_MTL_TCQ_ETS_CR_CC_SHIFT 3U +#define MGBE_MTL_TCQ_ETS_QW_ISCQW_MASK 0x001FFFFFU +#define MGBE_MTL_TCQ_ETS_SSCR_SSC_MASK 0x0000FFFFU +#define MGBE_MTL_TCQ_ETS_HCR_HC_MASK 0x1FFFFFFFU +#define MGBE_MTL_TCQ_ETS_LCR_LC_MASK 0x1FFFFFFFU + +#define MGBE_8PTP_CYCLE 26U +#define MGBE_PTP_CLK_SPEED 312500000U +#define MGBE_DMA_ISR_MTLIS OSI_BIT(16) +#define MGBE_IMR_TXESIE OSI_BIT(13) +#define MGBE_IMR_FPEIE OSI_BIT(15) +#ifndef OSI_STRIPPED_LIB +#define MGBE_MAC_EXT_CNF_EIPG 0x1U +#define MGBE_MAC_EXT_CNF_EIPG_MASK 0x7FU +#endif /* !OSI_STRIPPED_LIB */ +#define MGBE_MAC_RQC4R_PMCBCQ (OSI_BIT(27) | OSI_BIT(26) | \ + OSI_BIT(25) | OSI_BIT(24)) +#define MGBE_MAC_RQC4R_PMCBCQ_SHIFT 24U +#define MGBE_MAC_RQC1R_RQ_SHIFT 4U +#define MGBE_MTL_EST_EEST OSI_BIT(0) +/* EST GCL controlOSI_BITmap */ +#define MGBE_MTL_EST_ADDR_SHIFT 8 +/*EST MTL interrupt STATUS and ERR*/ +#define MGBE_MTL_IS_ESTIS OSI_BIT(18) +/* MTL_EST_STATUS*/ +#define MGBE_MTL_EST_STATUS_CGCE OSI_BIT(4) +#define MGBE_MTL_EST_STATUS_HLBS OSI_BIT(3) +#define MGBE_MTL_EST_STATUS_HLBF OSI_BIT(2) +#define MGBE_MTL_EST_STATUS_BTRE OSI_BIT(1) +#define MGBE_MTL_EST_STATUS_SWLC OSI_BIT(0) +/* MAC FPE control/statusOSI_BITmap */ +#define MGBE_MAC_FPE_CTS_EFPE OSI_BIT(0) +#define MGBE_MAC_FPE_CTS_TRSP OSI_BIT(19) +#define MGBE_MAC_FPE_CTS_TVER OSI_BIT(18) +#define MGBE_MAC_FPE_CTS_RVER OSI_BIT(16) +#define MGBE_MAC_FPE_CTS_SRSP OSI_BIT(2) +/* MTL FPE adv registers */ +#define MGBE_MAC_IMR_FPEIS OSI_BIT(16) +#define MGBE_MAC_FPE_CTS_RRSP OSI_BIT(17) +/* MTL_EST_CONTROL */ +#define MGBE_MTL_EST_CONTROL_PTOV (OSI_BIT(23) | OSI_BIT(24) | \ + OSI_BIT(25) | OSI_BIT(26) | \ + OSI_BIT(27) | OSI_BIT(28) | \ + OSI_BIT(29) | OSI_BIT(30) | \ + OSI_BIT(31)) +#define MGBE_MTL_EST_CONTROL_PTOV_SHIFT 23U +#define MGBE_MTL_EST_PTOV_RECOMMEND 32U +#define MGBE_MTL_EST_CONTROL_CTOV (OSI_BIT(11) | OSI_BIT(12) | \ + OSI_BIT(13) | OSI_BIT(14) | \ + OSI_BIT(15) | OSI_BIT(16) | \ + OSI_BIT(17) | OSI_BIT(18) | \ + OSI_BIT(19) | OSI_BIT(20) | \ + OSI_BIT(21) | OSI_BIT(22)) +#define MGBE_MTL_EST_CONTROL_CTOV_SHIFT 11U +#define MGBE_MTL_EST_CTOV_RECOMMEND 42U +#define MGBE_MAC_RQC1R_RQ (OSI_BIT(7) | OSI_BIT(6) | \ + OSI_BIT(5) | OSI_BIT(4)) + +/** + * @addtogroup MGBE-MTL-FRP FRP Indirect Access register defines + * + * @brief MGBE MTL FRP register defines + * @{ + */ +#define MGBE_MTL_FRP_READ_UDELAY 1U +#define MGBE_MTL_FRP_READ_RETRY 1000U + +#define MGBE_MTL_OP_MODE_FRPE OSI_BIT(15) +/* FRP Control and Status register defines */ +#define MGBE_MTL_RXP_CS_RXPI OSI_BIT(31) +#define MGBE_MTL_RXP_CS_NPE (OSI_BIT(23) | OSI_BIT(22) | \ + OSI_BIT(21) | OSI_BIT(20) | \ + OSI_BIT(19) | OSI_BIT(18) | \ + OSI_BIT(17) | OSI_BIT(16)) +#define MGBE_MTL_RXP_CS_NPE_SHIFT 16U +#define MGBE_MTL_RXP_CS_NVE (OSI_BIT(7) | OSI_BIT(6) | \ + OSI_BIT(5) | OSI_BIT(4) | \ + OSI_BIT(3) | OSI_BIT(2) | \ + OSI_BIT(1) | OSI_BIT(0)) +/* FRP Interrupt Control and Status register */ +#define MGBE_MTL_RXP_INTR_CS_PDRFIE OSI_BIT(19) +#define MGBE_MTL_RXP_INTR_CS_FOOVIE OSI_BIT(18) +#define MGBE_MTL_RXP_INTR_CS_NPEOVIE OSI_BIT(17) +#define MGBE_MTL_RXP_INTR_CS_NVEOVIE OSI_BIT(16) +#define MGBE_MTL_RXP_INTR_CS_PDRFIS OSI_BIT(3) +#define MGBE_MTL_RXP_INTR_CS_FOOVIS OSI_BIT(2) +#define MGBE_MTL_RXP_INTR_CS_NPEOVIS OSI_BIT(1) +#define MGBE_MTL_RXP_INTR_CS_NVEOVIS OSI_BIT(0) +/* Indirect Instruction Table defines */ +#define MGBE_MTL_FRP_IE0(x) (((x) * 0x4U) + 0x0U) +#define MGBE_MTL_FRP_IE1(x) (((x) * 0x4U) + 0x1U) +#define MGBE_MTL_FRP_IE2(x) (((x) * 0x4U) + 0x2U) +#define MGBE_MTL_FRP_IE3(x) (((x) * 0x4U) + 0x3U) +#define MGBE_MTL_FRP_IE2_DCH (OSI_BIT(31) | OSI_BIT(30) | \ + OSI_BIT(29) | OSI_BIT(28) | \ + OSI_BIT(27) | OSI_BIT(26) | \ + OSI_BIT(25) | OSI_BIT(24)) +#define MGBE_MTL_FRP_IE2_DCH_SHIFT 24U +#define MGBE_MTL_FRP_IE2_OKI (OSI_BIT(23) | OSI_BIT(22) | \ + OSI_BIT(21) | OSI_BIT(20) | \ + OSI_BIT(19) | OSI_BIT(18) | \ + OSI_BIT(17) | OSI_BIT(16)) +#define MGBE_MTL_FRP_IE2_OKI_SHIFT 16U +#define MGBE_MTL_FRP_IE2_FO (OSI_BIT(13) | OSI_BIT(12) | \ + OSI_BIT(11) | OSI_BIT(10) | \ + OSI_BIT(9) | OSI_BIT(8)) +#define MGBE_MTL_FRP_IE2_FO_SHIFT 8U +#define MGBE_MTL_FRP_IE2_NC OSI_BIT(3) +#define MGBE_MTL_FRP_IE2_IM OSI_BIT(2) +#define MGBE_MTL_FRP_IE2_RF OSI_BIT(1) +#define MGBE_MTL_FRP_IE2_AF OSI_BIT(0) +#define MGBE_MTL_FRP_IE3_DCH_MASK 0xFFFFU +/* Indirect register defines */ +#define MGBE_MTL_RXP_IND_CS_BUSY OSI_BIT(31) +#define MGBE_MTL_RXP_IND_CS_ACCSEL OSI_BIT(24) +#define MGBE_MTL_RXP_IND_CS_WRRDN OSI_BIT(16) +#define MGBE_MTL_RXP_IND_CS_ADDR (OSI_BIT(9) | OSI_BIT(8) | \ + OSI_BIT(7) | OSI_BIT(6) | \ + OSI_BIT(5) | OSI_BIT(4) | \ + OSI_BIT(3) | OSI_BIT(2) | \ + OSI_BIT(1) | OSI_BIT(0)) +/** @} */ + +/** + * @addtogroup MGBE MTL queue ETS algorithm mode + * + * @brief MTL queue algorithm type + * @{ + */ +#define OSI_MGBE_TXQ_AVALG_ETS 2U +#define MGBE_MTL_TCQ_ETS_CR_AVALG (OSI_BIT(1) | OSI_BIT(0)) +/** @} */ + +/** + * @addtogroup MGBE-MAC MAC register offsets + * + * @brief MGBE MAC register offsets + * @{ + */ +#define MGBE_MAC_TMCR 0x0000 +#define MGBE_MAC_RMCR 0x0004 +#define MGBE_MAC_VLAN_TR 0x0050 +#define MGBE_MAC_VLANTIR 0x0060 +#define MGBE_MAC_RQC0R 0x00A0 +#define MGBE_MAC_RQC1R 0x00A4 +#define MGBE_MAC_ISR 0x00B0 +#define MGBE_MAC_IER 0x00B4 +#define MGBE_MAC_EXT_CNF 0x0140 +#define MGBE_MDIO_SCCD 0x0204 +#define MGBE_MDIO_SCCA 0x0200 +#define MGBE_MAC_ADDRH(x) ((0x0008U * (x)) + 0x0300U) +#define MGBE_MAC_ADDRL(x) ((0x0008U * (x)) + 0x0304U) +#define MGBE_MAC_INDIR_AC 0x0700 +#define MGBE_MAC_INDIR_DATA 0x0704 +#define MGBE_MMC_TX_INTR_EN 0x0810 +#define MGBE_MMC_RX_INTR_EN 0x080C +#define MGBE_MMC_CNTRL 0x0800 +#define MGBE_MAC_L3L4_ADDR_CTR 0x0C00 +#define MGBE_MAC_L3L4_DATA 0x0C04 +#define MGBE_MAC_TCR 0x0D00 +#define MGBE_MAC_SSIR 0x0D04 +#define MGBE_MAC_STSUR 0x0D10 +#define MGBE_MAC_STNSUR 0x0D14 +#define MGBE_MAC_TAR 0x0D18 +#define MGBE_MAC_TSS 0x0D20 +#define MGBE_MAC_TSNSSEC 0x0D30 +#define MGBE_MAC_TSSEC 0x0D34 +#define MGBE_MAC_TSPKID 0x0D38 +#define MGBE_MAC_PPS_CTL 0x0D70 +/** @} */ + +/** + * @addtogroup MGBE-WRAPPER MGBE Wrapper register offsets + * + * @brief MGBE Wrapper register offsets + * @{ + */ +#define MGBE_WRAP_COMMON_INTR_ENABLE 0x8704 + +#ifdef HSI_SUPPORT +#define MGBE_REGISTER_PARITY_ERR OSI_BIT(5) +#define MGBE_CORE_CORRECTABLE_ERR OSI_BIT(4) +#define MGBE_CORE_UNCORRECTABLE_ERR OSI_BIT(3) + +#define MGBE_MTL_DEBUG_CONTROL 0x1008U +#define MGBE_MTL_DEBUG_CONTROL_FDBGEN OSI_BIT(0) +#define MGBE_MTL_DEBUG_CONTROL_DBGMOD OSI_BIT(1) +#define MGBE_MTL_DEBUG_CONTROL_FIFORDEN OSI_BIT(10) +#define MGBE_MTL_DEBUG_CONTROL_EIEE OSI_BIT(16) +#define MGBE_MTL_DEBUG_CONTROL_EIEC OSI_BIT(18) + +#endif +#define MGBE_MAC_SBD_INTR OSI_BIT(2) +#define MGBE_WRAP_COMMON_INTR_STATUS 0x8708 +#define MGBE_VIRT_INTR_APB_CHX_CNTRL(x) (0x8200U + ((x) * 4U)) +#define MGBE_VIRTUAL_APB_ERR_CTRL 0x8300 +/** @} */ + + +/** + * @addtogroup MGBE-MAC-MODE MAC Mode Select Group * * @brief MGBE MAC Indirect Access control and status for * Mode Select type defines. @@ -165,13 +401,6 @@ #define MGBE_MAC_INDIR_AC_OB_RETRY 10U #define MGBE_MAC_DCHSEL 0U -#define MGBE_MAC_PCCTRL 1U -#define MGBE_MAC_PCNTRL 2U -#define MGBE_MAC_DPCSEL 3U -#define MGBE_MAC_VPCSEL 4U -#define MGBE_MAC_LPCSEL 5U -#define MGBE_MAC_APCSEL 6U -#define MGBE_MAC_PC_STATUS 7U /* MGBE_MAC_INDIR_AC register defines */ #define MGBE_MAC_INDIR_AC_MSEL (OSI_BIT(19) | OSI_BIT(18) | \ @@ -182,84 +411,29 @@ OSI_BIT(11) | OSI_BIT(10) | \ OSI_BIT(9) | OSI_BIT(8)) #define MGBE_MAC_INDIR_AC_AOFF_SHIFT 8U -#define MGBE_MAC_INDIR_AC_AUTO OSI_BIT(5) #define MGBE_MAC_INDIR_AC_CMD OSI_BIT(1) #define MGBE_MAC_INDIR_AC_OB OSI_BIT(0) /** @} */ /** - * @addtogroup MGBE MAC L3L4 defines + * @addtogroup MGBE-L3L4 MAC L3L4 defines * * @brief MGBE L3L4 Address Control register * IDDR filter filed type defines * @{ */ -#define MGBE_MAX_VLAN_FILTER 32U #define MGBE_MAC_XB_WAIT 10U #define MGBE_MAC_L3L4_CTR 0x0 -#define MGBE_MAC_L4_ADDR 0x1 -#define MGBE_MAC_L3_AD0R 0x4 #define MGBE_MAC_L3_AD1R 0x5 +#ifndef OSI_STRIPPED_LIB +#define MGBE_MAC_L3_AD0R 0x4 #define MGBE_MAC_L3_AD2R 0x6 #define MGBE_MAC_L3_AD3R 0x7 - -#define MGBE_MAC_L3L4_CTR_DMCHEN0 OSI_BIT(31) -#define MGBE_MAC_L3L4_CTR_DMCHEN0_SHIFT 31 -#define MGBE_MAC_L3L4_CTR_DMCHN0 (OSI_BIT(24) | OSI_BIT(25) | \ - OSI_BIT(26) | OSI_BIT(27)) -#define MGBE_MAC_L3L4_CTR_DMCHN0_SHIFT 24 -#define MGBE_MAC_L3L4_CTR_L4DPIM0 OSI_BIT(21) -#define MGBE_MAC_L3L4_CTR_L4DPIM0_SHIFT 21 -#define MGBE_MAC_L3L4_CTR_L4DPM0 OSI_BIT(20) -#define MGBE_MAC_L3L4_CTR_L4SPIM0 OSI_BIT(19) -#define MGBE_MAC_L3L4_CTR_L4SPIM0_SHIFT 19 -#define MGBE_MAC_L3L4_CTR_L4SPM0 OSI_BIT(18) -#define MGBE_MAC_L3L4_CTR_L4PEN0 OSI_BIT(16) -#define MGBE_MAC_L3L4_CTR_L3HDBM0 (OSI_BIT(11) | OSI_BIT(12) | \ - OSI_BIT(13) | OSI_BIT(14) | \ - OSI_BIT(15)) -#define MGBE_MAC_L3L4_CTR_L3HSBM0 (OSI_BIT(6) | OSI_BIT(7) | \ - OSI_BIT(8) | OSI_BIT(9) | \ - OSI_BIT(10)) -#define MGBE_MAC_L3L4_CTR_L3DAIM0 OSI_BIT(5) -#define MGBE_MAC_L3L4_CTR_L3DAIM0_SHIFT 5 -#define MGBE_MAC_L3L4_CTR_L3DAM0 OSI_BIT(4) -#define MGBE_MAC_L3L4_CTR_L3SAIM0 OSI_BIT(3) -#define MGBE_MAC_L3L4_CTR_L3SAIM0_SHIFT 3 -#define MGBE_MAC_L3L4_CTR_L3SAM0 OSI_BIT(2) -#define MGBE_MAC_L3L4_CTR_L3PEN0 OSI_BIT(0) -#define MGBE_MAC_L3_IP6_CTRL_CLEAR (MGBE_MAC_L3L4_CTR_L3SAM0 | \ - MGBE_MAC_L3L4_CTR_L3SAIM0 | \ - MGBE_MAC_L3L4_CTR_L3DAM0 | \ - MGBE_MAC_L3L4_CTR_L3DAIM0 | \ - MGBE_MAC_L3L4_CTR_DMCHEN0 | \ - MGBE_MAC_L3L4_CTR_DMCHN0) -#define MGBE_MAC_L3_IP4_SA_CTRL_CLEAR (MGBE_MAC_L3L4_CTR_L3SAM0 | \ - MGBE_MAC_L3L4_CTR_L3SAIM0 | \ - MGBE_MAC_L3L4_CTR_DMCHEN0 | \ - MGBE_MAC_L3L4_CTR_DMCHN0) -#define MGBE_MAC_L3_IP4_DA_CTRL_CLEAR (MGBE_MAC_L3L4_CTR_L3DAM0 | \ - MGBE_MAC_L3L4_CTR_L3DAIM0 | \ - MGBE_MAC_L3L4_CTR_DMCHEN0 | \ - MGBE_MAC_L3L4_CTR_DMCHN0) -#define MGBE_MAC_L4_SP_CTRL_CLEAR (MGBE_MAC_L3L4_CTR_L4SPM0 | \ - MGBE_MAC_L3L4_CTR_L4SPIM0 | \ - MGBE_MAC_L3L4_CTR_DMCHEN0 | \ - MGBE_MAC_L3L4_CTR_DMCHN0) -#define MGBE_MAC_L4_DP_CTRL_CLEAR (MGBE_MAC_L3L4_CTR_L4DPM0 | \ - MGBE_MAC_L3L4_CTR_L4DPIM0 | \ - MGBE_MAC_L3L4_CTR_DMCHEN0 | \ - MGBE_MAC_L3L4_CTR_DMCHN0) -#define MGBE_MAC_L3L4_CTRL_ALL (MGBE_MAC_L3_IP6_CTRL_CLEAR | \ - MGBE_MAC_L3_IP4_SA_CTRL_CLEAR | \ - MGBE_MAC_L3_IP4_DA_CTRL_CLEAR | \ - MGBE_MAC_L4_SP_CTRL_CLEAR | \ - MGBE_MAC_L4_DP_CTRL_CLEAR) +#define MGBE_MAC_L4_ADDR 0x1 #define MGBE_MAC_L4_ADDR_SP_MASK 0x0000FFFFU #define MGBE_MAC_L4_ADDR_DP_MASK 0xFFFF0000U #define MGBE_MAC_L4_ADDR_DP_SHIFT 16 -#define MGBE_MAC_PPS_CTL_PPSCTRL0 (OSI_BIT(3) | OSI_BIT(2) |\ - OSI_BIT(1) | OSI_BIT(0)) +#endif /* !OSI_STRIPPED_LIB */ /** @} */ /** @@ -283,118 +457,16 @@ * @brief MGBE MTL register offsets * @{ */ -#define MGBE_MTL_OP_MODE 0x1000 -#define MGBE_MTL_INTR_STATUS 0x1020 #define MGBE_MTL_RXQ_DMA_MAP0 0x1030 #define MGBE_MTL_RXQ_DMA_MAP1 0x1034 #define MGBE_MTL_RXQ_DMA_MAP2 0x1038 -#define MGBE_MTL_RXQ_DMA_MAP3 0x103b -#define MGBE_MTL_EST_CONTROL 0x1050 -#define MGBE_MTL_EST_OVERHEAD 0x1054 -#define MGBE_MTL_EST_STATUS 0x1058 -#define MGBE_MTL_EST_SCH_ERR 0x1060 -#define MGBE_MTL_EST_FRMS_ERR 0x1064 -#define MGBE_MTL_EST_FRMC_ERR 0x1068 -#define MGBE_MTL_EST_ITRE 0x1070 -#define MGBE_MTL_EST_GCL_CONTROL 0x1080 -#define MGBE_MTL_EST_DATA 0x1084 -#define MGBE_MTL_FPE_CTS 0x1090 -#define MGBE_MTL_FPE_ADV 0x1094 #define MGBE_MTL_CHX_TX_OP_MODE(x) ((0x0080U * (x)) + 0x1100U) #define MGBE_MTL_TCQ_ETS_CR(x) ((0x0080U * (x)) + 0x1110U) #define MGBE_MTL_TCQ_QW(x) ((0x0080U * (x)) + 0x1118U) -#define MGBE_MTL_TCQ_ETS_SSCR(x) ((0x0080U * (x)) + 0x111CU) -#define MGBE_MTL_TCQ_ETS_HCR(x) ((0x0080U * (x)) + 0x1120U) -#define MGBE_MTL_TCQ_ETS_LCR(x) ((0x0080U * (x)) + 0x1124U) #define MGBE_MTL_CHX_RX_OP_MODE(x) ((0x0080U * (x)) + 0x1140U) #define MGBE_MTL_RXQ_FLOW_CTRL(x) ((0x0080U * (x)) + 0x1150U) -#define MGBE_MTL_QINT_ENABLE(x) ((0x0080U * (x)) + 0x1170U) -#define MGBE_MTL_QINT_STATUS(x) ((0x0080U * (x)) + 0x1174U) -#define MGBE_MTL_TC_PRTY_MAP0 0x1040 -#define MGBE_MTL_TC_PRTY_MAP1 0x1044 -#define MGBE_MTL_RXP_CS 0x10A0 -#define MGBE_MTL_RXP_INTR_CS 0x10A4 -#define MGBE_MTL_RXP_IND_CS 0x10B0 -#define MGBE_MTL_RXP_IND_DATA 0x10B4 /** @} */ -/** - * @addtogroup MGBE-MTL FRP Indirect Access register defines - * - * @brief MGBE MTL register offsets - * @{ - */ -#define MGBE_MTL_FRP_READ_UDELAY 1U -#define MGBE_MTL_FRP_READ_RETRY 1000U - -#define MGBE_MTL_OP_MODE_FRPE OSI_BIT(15) -/* FRP Control and Status register defines */ -#define MGBE_MTL_RXP_CS_RXPI OSI_BIT(31) -#define MGBE_MTL_RXP_CS_PIPE (OSI_BIT(30) | OSI_BIT(29) | \ - OSI_BIT(28)) -#define MGBE_MTL_RXP_CS_NPE (OSI_BIT(23) | OSI_BIT(22) | \ - OSI_BIT(21) | OSI_BIT(20) | \ - OSI_BIT(19) | OSI_BIT(18) | \ - OSI_BIT(17) | OSI_BIT(16)) -#define MGBE_MTL_RXP_CS_NPE_SHIFT 16U -#define MGBE_MTL_RXP_CS_FPE_RCH (OSI_BIT(15) | OSI_BIT(14) | \ - OSI_BIT(13) | OSI_BIT(12)) -#define MGBE_MTL_RXP_CS_NVE (OSI_BIT(7) | OSI_BIT(6) | \ - OSI_BIT(5) | OSI_BIT(4) | \ - OSI_BIT(3) | OSI_BIT(2) | \ - OSI_BIT(1) | OSI_BIT(0)) -/* FRP Interrupt Control and Status register */ -#define MGBE_MTL_RXP_INTR_CS_PDRFIE OSI_BIT(19) -#define MGBE_MTL_RXP_INTR_CS_FOOVIE OSI_BIT(18) -#define MGBE_MTL_RXP_INTR_CS_NPEOVIE OSI_BIT(17) -#define MGBE_MTL_RXP_INTR_CS_NVEOVIE OSI_BIT(16) -#define MGBE_MTL_RXP_INTR_CS_PDRFIS OSI_BIT(3) -#define MGBE_MTL_RXP_INTR_CS_FOOVIS OSI_BIT(2) -#define MGBE_MTL_RXP_INTR_CS_NPEOVIS OSI_BIT(1) -#define MGBE_MTL_RXP_INTR_CS_NVEOVIS OSI_BIT(0) -/* Indirect Instruction Table defines */ -#define MGBE_MTL_FRP_IE0(x) ((x) * 0x4U + 0x0U) -#define MGBE_MTL_FRP_IE1(x) ((x) * 0x4U + 0x1U) -#define MGBE_MTL_FRP_IE2(x) ((x) * 0x4U + 0x2U) -#define MGBE_MTL_FRP_IE3(x) ((x) * 0x4U + 0x3U) -#define MGBE_MTL_FRP_IE2_DCH (OSI_BIT(31) | OSI_BIT(30) | \ - OSI_BIT(29) | OSI_BIT(28) | \ - OSI_BIT(27) | OSI_BIT(26) | \ - OSI_BIT(25) | OSI_BIT(24)) -#define MGBE_MTL_FRP_IE2_DCH_SHIFT 24U -#define MGBE_MTL_FRP_IE2_DCH_MASK 0xFFU -#define MGBE_MTL_FRP_IE2_OKI (OSI_BIT(23) | OSI_BIT(22) | \ - OSI_BIT(21) | OSI_BIT(20) | \ - OSI_BIT(19) | OSI_BIT(18) | \ - OSI_BIT(17) | OSI_BIT(16)) -#define MGBE_MTL_FRP_IE2_OKI_SHIFT 16U -#define MGBE_MTL_FRP_IE2_FO (OSI_BIT(13) | OSI_BIT(12) | \ - OSI_BIT(11) | OSI_BIT(10) | \ - OSI_BIT(9) | OSI_BIT(8)) -#define MGBE_MTL_FRP_IE2_FO_SHIFT 8U -#define MGBE_MTL_FRP_IE2_NC OSI_BIT(3) -#define MGBE_MTL_FRP_IE2_IM OSI_BIT(2) -#define MGBE_MTL_FRP_IE2_RF OSI_BIT(1) -#define MGBE_MTL_FRP_IE2_AF OSI_BIT(0) -#define MGBE_MTL_FRP_IE3_DCH_MASK 0xFFFFU -/* Indirect register defines */ -#define MGBE_MTL_RXP_DROP_CNT 0U -#define MGBE_MTL_RXP_ERROR_CNT 1U -#define MGBE_MTL_RXP_BYPASS_CNT 2U -#define MGBE_MTL_RXP_ACCEPT_CNT(x) ((0x10 * (x)) + 0x40) -#define MGBE_MTL_RXP_IND_CS_BUSY OSI_BIT(31) -#define MGBE_MTL_RXP_IND_CS_ACCSEL OSI_BIT(24) -#define MGBE_MTL_RXP_IND_CS_RXPEIEC (OSI_BIT(22) | OSI_BIT(21)) -#define MGBE_MTL_RXP_IND_CS_RXPEIEE OSI_BIT(20) -#define MGBE_MTL_RXP_IND_CS_CRWEN OSI_BIT(18) -#define MGBE_MTL_RXP_IND_CS_CRWSEL OSI_BIT(17) -#define MGBE_MTL_RXP_IND_CS_WRRDN OSI_BIT(16) -#define MGBE_MTL_RXP_IND_CS_ADDR (OSI_BIT(9) | OSI_BIT(8) | \ - OSI_BIT(7) | OSI_BIT(6) | \ - OSI_BIT(5) | OSI_BIT(4) | \ - OSI_BIT(3) | OSI_BIT(2) | \ - OSI_BIT(1) | OSI_BIT(0)) -/** @} */ /** * @addtogroup HW Register BIT values @@ -402,38 +474,16 @@ * @brief consists of corresponding MGBE MAC, MTL register bit values * @{ */ -#define MGBE_DMA_MODE_SWR OSI_BIT(0) -#define MGBE_MTL_TCQ_ETS_CR_SLC_MASK (OSI_BIT(6) | OSI_BIT(5) | \ - OSI_BIT(4)) -#define MGBE_MTL_TCQ_ETS_CR_CC OSI_BIT(3) -#define MGBE_MTL_TCQ_ETS_CR_CC_SHIFT 3U -#define MGBE_MTL_TCQ_ETS_CR_AVALG (OSI_BIT(1) | OSI_BIT(0)) -#define MGBE_MTL_TCQ_ETS_CR_AVALG_SHIFT 0U -#define MGBE_MTL_TCQ_ETS_QW_ISCQW_MASK 0x001FFFFFU -#define MGBE_MTL_TCQ_ETS_SSCR_SSC_MASK 0x0000FFFFU -#define MGBE_MTL_TCQ_ETS_HCR_HC_MASK 0x1FFFFFFFU -#define MGBE_MTL_TCQ_ETS_LCR_LC_MASK 0x1FFFFFFFU -#define MGBE_MTL_TX_OP_MODE_Q2TCMAP (OSI_BIT(10) | OSI_BIT(9) |\ - OSI_BIT(8)) -#define MGBE_MTL_TX_OP_MODE_Q2TCMAP_SHIFT 8U -#define MGBE_MTL_TX_OP_MODE_TXQEN (OSI_BIT(3) | OSI_BIT(2)) -#define MGBE_MTL_TX_OP_MODE_TXQEN_SHIFT 2U #define MGBE_MTL_CHX_TX_OP_MODE_Q2TC_SH 8U -#define MGBE_MTL_QTOMR_FTQ OSI_BIT(0) -#define MGBE_MTL_QTOMR_FTQ_LPOS OSI_BIT(0) #define MGBE_MTL_TSF OSI_BIT(1) #define MGBE_MTL_TXQEN OSI_BIT(3) #define MGBE_MTL_RSF OSI_BIT(5) #define MGBE_MTL_TCQ_QW_ISCQW OSI_BIT(4) -#define MGBE_MTL_QINT_TXUNIFS OSI_BIT(0) -#define MGBE_MTL_QINT_TXUIE OSI_BIT(0) #define MGBE_MAC_RMCR_ACS OSI_BIT(1) #define MGBE_MAC_RMCR_CST OSI_BIT(2) #define MGBE_MAC_RMCR_IPC OSI_BIT(9) #define MGBE_MAC_RXQC0_RXQEN_MASK 0x3U #define MGBE_MAC_RXQC0_RXQEN_SHIFT(x) ((x) * 2U) -#define MGBE_MAC_RMCR_LM OSI_BIT(10) -#define MGBE_MAC_RMCR_ARPEN OSI_BIT(31) #define MGBE_MDIO_SCCD_SBUSY OSI_BIT(22) #define MGBE_MDIO_SCCA_DA_SHIFT 21U #define MGBE_MDIO_SCCA_DA_MASK 0x1FU @@ -450,65 +500,24 @@ #define MGBE_MAC_RMCR_GPSLCE OSI_BIT(6) #define MGBE_MAC_RMCR_WD OSI_BIT(7) #define MGBE_MAC_RMCR_JE OSI_BIT(8) -#define MGBE_MAC_TMCR_IFP OSI_BIT(11) #define MGBE_MAC_TMCR_DDIC OSI_BIT(1) -#define MGBE_MAC_TMCR_IPG_MASK 0x700U #define MGBE_MAC_TMCR_JD OSI_BIT(16) #define MGBE_MMC_CNTRL_CNTRST OSI_BIT(0) #define MGBE_MMC_CNTRL_RSTONRD OSI_BIT(2) #define MGBE_MMC_CNTRL_CNTMCT (OSI_BIT(4) | OSI_BIT(5)) #define MGBE_MMC_CNTRL_CNTPRST OSI_BIT(7) -#define MGBE_MAC_RQC1R_PTPQ (OSI_BIT(27) | OSI_BIT(26) | \ - OSI_BIT(25) | OSI_BIT(24)) -#define MGBE_MAC_RQC1R_PTPQ_SHIFT 24U -#define MGBE_MAC_RQC1R_TPQC1 OSI_BIT(22) -#define MGBE_MAC_RQC1R_TPQC0 OSI_BIT(21) -#define MGBE_MAC_RQC1R_OMCBCQ OSI_BIT(20) #define MGBE_MAC_RQC1R_MCBCQEN OSI_BIT(15) #define MGBE_MAC_RQC1R_MCBCQ (OSI_BIT(11) | OSI_BIT(10) | \ OSI_BIT(9) | OSI_BIT(8)) #define MGBE_MAC_RQC1R_MCBCQ_SHIFT 8U -#define MGBE_MAC_RQC1R_MCBCQ_DEFAULT 9U -#define MGBE_MAC_RQC1R_RQ (OSI_BIT(7) | OSI_BIT(6) | \ - OSI_BIT(5) | OSI_BIT(4)) -#define MGBE_MAC_RQC1R_RQ_SHIFT 4U -#define MGBE_MAC_RQC4R_PMCBCQ (OSI_BIT(27) | OSI_BIT(26) | \ - OSI_BIT(25) | OSI_BIT(24)) -#define MGBE_MAC_RQC4R_PMCBCQ_SHIFT 24U #define MGBE_IMR_RGSMIIIE OSI_BIT(0) #define MGBE_IMR_TSIE OSI_BIT(12) -#define MGBE_IMR_TXESIE OSI_BIT(13) -#define MGBE_IMR_FPEIE OSI_BIT(15) -#define MGBE_MAC_IMR_FPEIS OSI_BIT(16) #define MGBE_ISR_TSIS OSI_BIT(12) -#define MGBE_DMA_ISR_MTLIS OSI_BIT(16) #define MGBE_DMA_ISR_MACIS OSI_BIT(17) #define MGBE_DMA_ISR_DCH0_DCH15_MASK 0x3FFU -#define MGBE_DMA_CHX_STATUS_TPS OSI_BIT(1) -#define MGBE_DMA_CHX_STATUS_TBU OSI_BIT(2) -#define MGBE_DMA_CHX_STATUS_RBU OSI_BIT(7) -#define MGBE_DMA_CHX_STATUS_RPS OSI_BIT(8) -#define MGBE_DMA_CHX_STATUS_FBE OSI_BIT(12) #define MGBE_DMA_CHX_STATUS_TI OSI_BIT(0) #define MGBE_DMA_CHX_STATUS_RI OSI_BIT(6) -#define MGBE_MAC_PFR_PR OSI_BIT(0) -#define MGBE_MAC_PFR_HUC OSI_BIT(1) -#define MGBE_MAC_PFR_DAIF OSI_BIT(3) -#define MGBE_MAC_PFR_PM OSI_BIT(4) -#define MGBE_MAC_PFR_DBF OSI_BIT(5) -#define MGBE_MAC_PFR_PCF (OSI_BIT(6) | OSI_BIT(7)) -#define MGBE_MAC_PFR_SAIF OSI_BIT(8) -#define MGBE_MAC_PFR_SAF OSI_BIT(9) -#define MGBE_MAC_PFR_HPF OSI_BIT(10) -#define MGBE_MAC_PFR_VTFE OSI_BIT(16) -#define MGBE_MAC_PFR_VTFE_SHIFT 16 -#define MGBE_MAC_PFR_IPFE OSI_BIT(20) -#define MGBE_MAC_PFR_IPFE_SHIFT 20 -#define MGBE_MAC_PFR_DNTU OSI_BIT(21) -#define MGBE_MAC_PFR_VUCC OSI_BIT(22) -#define MGBE_MAC_PFR_RA OSI_BIT(31) #define MGBE_MAC_ADDRH_AE OSI_BIT(31) -#define MGBE_MAC_ADDRH_AE_SHIFT 31 #define MGBE_MAC_ADDRH_SA OSI_BIT(30) #define MGBE_MAC_ADDRH_SA_SHIFT 30 #define MGBE_MAB_ADDRH_MBC_MAX_MASK 0x3FU @@ -529,30 +538,15 @@ #define MGBE_MAC_L3L4_ADDR_CTR_IDDR_FTYPE_SHIFT 8 #define MGBE_MAC_L3L4_ADDR_CTR_TT OSI_BIT(1) #define MGBE_MAC_L3L4_ADDR_CTR_XB OSI_BIT(0) -#define MGBE_MAC_VLAN_TR_ETV OSI_BIT(16) -#define MGBE_MAC_VLAN_TR_VTIM OSI_BIT(17) -#define MGBE_MAC_VLAN_TR_VTIM_SHIFT 17 -#define MGBE_MAC_VLAN_TR_VTHM OSI_BIT(25) -#define MGBE_MAC_VLANTR_EVLS_ALWAYS_STRIP ((unsigned int)0x3 << 21U) +#define MGBE_MAC_VLANTR_EVLS_ALWAYS_STRIP ((nveu32_t)0x3 << 21U) #define MGBE_MAC_VLANTR_EVLRXS OSI_BIT(24) #define MGBE_MAC_VLANTR_DOVLTC OSI_BIT(20) #define MGBE_MAC_VLANTIR_VLTI OSI_BIT(20) #define MGBE_MAC_VLANTIRR_CSVL OSI_BIT(19) -#define MGBE_MAC_LPI_CSR_LPITE OSI_BIT(20) -#define MGBE_MAC_LPI_CSR_LPITXA OSI_BIT(19) -#define MGBE_MAC_LPI_CSR_PLS OSI_BIT(17) -#define MGBE_MAC_LPI_CSR_LPIEN OSI_BIT(16) -#define MGBE_MAC_RSS_CTRL_RSSE OSI_BIT(0) -#define MGBE_MAC_RSS_CTRL_IP2TE OSI_BIT(1) -#define MGBE_MAC_RSS_CTRL_TCP4TE OSI_BIT(2) -#define MGBE_MAC_RSS_CTRL_UDP4TE OSI_BIT(3) -#define MGBE_MAC_RSS_ADDR_ADDRT OSI_BIT(2) -#define MGBE_MAC_RSS_ADDR_RSSIA_SHIFT 8U -#define MGBE_MAC_RSS_ADDR_OB OSI_BIT(0) -#define MGBE_MAC_RSS_ADDR_CT OSI_BIT(1) -#define MGBE_MAC_TX_TJT OSI_BIT(0) -#define MGBE_MAC_TX_IHE OSI_BIT(12) -#define MGBE_MAC_TX_PCE OSI_BIT(13) +#define MGBE_MAC_ISR_LSI OSI_BIT(0) +#define MGBE_MAC_ISR_LS_MASK (OSI_BIT(25) | OSI_BIT(24)) +#define MGBE_MAC_ISR_LS_LOCAL_FAULT OSI_BIT(25) +#define MGBE_MAC_ISR_LS_LINK_OK 0U /* DMA SBUS */ #define MGBE_DMA_SBUS_UNDEF OSI_BIT(0) #define MGBE_DMA_SBUS_BLEN256 OSI_BIT(7) @@ -561,8 +555,6 @@ #define MGBE_DMA_SBUS_WR_OSR_LMT 0x3F000000U #define MGBE_DMA_TX_EDMA_CTRL_TDPS 0x00000005U #define MGBE_DMA_RX_EDMA_CTRL_RDPS 0x00000005U -#define MGBE_DMA_TX_EDMA_CTRL_TDPS_PRESI 0x00000003U -#define MGBE_DMA_RX_EDMA_CTRL_RDPS_PRESI 0x00000003U #define MGBE_MAC_TMCR_SS_2_5G (OSI_BIT(31) | OSI_BIT(30)) #define MGBE_MAC_TMCR_SS_5G (OSI_BIT(31) | OSI_BIT(29)) #define MGBE_MAC_TMCR_SS_10G (OSI_BIT(31) | OSI_BIT(30) | OSI_BIT(29)) @@ -573,197 +565,55 @@ #define MGBE_RXQ_TO_DMA_CHAN_MAP0 0x03020100U #define MGBE_RXQ_TO_DMA_CHAN_MAP1 0x07060504U #define MGBE_RXQ_TO_DMA_CHAN_MAP2 0x0B0A0908U -#define MGBE_RXQ_TO_DMA_CHAN_MAP3 0x0F0E0D0CU #define MGBE_RXQ_TO_DMA_MAP_DDMACH 0x80808080U #define MGBE_MTL_TXQ_SIZE_SHIFT 16U #define MGBE_MTL_RXQ_SIZE_SHIFT 16U #define MGBE_MAC_RMCR_GPSL_MSK 0x3FFF0000U -#define MGBE_MTL_RXQ_OP_MODE_FEP OSI_BIT(4) -#define MGBE_MAC_TCR_TSCFUPDT OSI_BIT(1) -#define MGBE_MAC_TCR_TSINIT OSI_BIT(2) #define MGBE_MAC_TCR_TSUPDT OSI_BIT(3) -#define MGBE_MAC_TCR_TSADDREG OSI_BIT(5) -#define MGBE_MAC_TCR_TSCTRLSSR OSI_BIT(9) -#define MGBE_MAC_TCR_TSENMACADDR OSI_BIT(18) -#define MGBE_MAC_TCR_SNAPTYPSEL_SHIFT 16U #define MGBE_MAC_STNSUR_ADDSUB_SHIFT 31U -#define MGBE_MAC_SSIR_SSINC_SHIFT 16U -#define MGBE_MAC_STNSR_TSSS_MASK 0x7FFFFFFFU -#define MGBE_MAC_PTO_CR_PTOEN OSI_BIT(0) -#define MGBE_MAC_PTO_CR_ASYNCEN OSI_BIT(1) -#define MGBE_MAC_PTO_CR_APDREQEN OSI_BIT(2) -#define MGBE_MAC_PTO_CR_DN (OSI_BIT(15) | OSI_BIT(14) | \ - OSI_BIT(13) | OSI_BIT(12) | \ - OSI_BIT(11) | OSI_BIT(10) | \ - OSI_BIT(9) | OSI_BIT(8)) -#define MGBE_MAC_PTO_CR_DN_SHIFT 8U -#define MGBE_MAC_PIDR_PID_MASK 0XFFFFU -#define MGBE_MAC_QX_TX_FLW_CTRL_TFE OSI_BIT(1) -#define MGBE_MAC_RX_FLW_CTRL_RFE OSI_BIT(0) -#define MGBE_MAC_PAUSE_TIME 0xFFFF0000U -#define MGBE_MAC_PAUSE_TIME_MASK 0xFFFF0000U #define MGBE_MTL_RXQ_OP_MODE_EHFC OSI_BIT(7) #define MGBE_MTL_RXQ_OP_MODE_RFA_SHIFT 1U #define MGBE_MTL_RXQ_OP_MODE_RFA_MASK 0x0000007EU #define MGBE_MTL_RXQ_OP_MODE_RFD_SHIFT 17U #define MGBE_MTL_RXQ_OP_MODE_RFD_MASK 0x007E0000U -/* MAC FPE control/statusOSI_BITmap */ -#define MGBE_MAC_FPE_CTS_EFPE OSI_BIT(0) -#define MGBE_MAC_FPE_CTS_TRSP OSI_BIT(19) -#define MGBE_MAC_FPE_CTS_TVER OSI_BIT(18) -#define MGBE_MAC_FPE_CTS_RRSP OSI_BIT(17) -#define MGBE_MAC_FPE_CTS_RVER OSI_BIT(16) -#define MGBE_MAC_FPE_CTS_SVER OSI_BIT(1) -#define MGBE_MAC_FPE_CTS_SRSP OSI_BIT(2) -/* MTL_FPE_CTRL_STS */ -#define MGBE_MTL_FPE_CTS_PEC (OSI_BIT(8) | OSI_BIT(9) | \ - OSI_BIT(10) | OSI_BIT(11) | \ - OSI_BIT(12) | OSI_BIT(13) | \ - OSI_BIT(14) | OSI_BIT(15)) -#define MGBE_MTL_FPE_CTS_PEC_SHIFT 8U -#define MGBE_MTL_FPE_CTS_PEC_MAX_SHIFT 16U -/* MTL FPE adv registers */ -#define MGBE_MTL_FPE_ADV_HADV_MASK (0xFFFFU) -#define MGBE_MTL_FPE_ADV_HADV_VAL 100U -/* MTL_EST_CONTROL */ -#define MGBE_MTL_EST_CONTROL_PTOV (OSI_BIT(23) | OSI_BIT(24) | \ - OSI_BIT(25) | OSI_BIT(26) | \ - OSI_BIT(27) | OSI_BIT(28) | \ - OSI_BIT(29) | OSI_BIT(30) | \ - OSI_BIT(31)) -#define MGBE_MTL_EST_CONTROL_PTOV_SHIFT 23U -#define MGBE_MTL_EST_PTOV_RECOMMEND 32U -#define MGBE_MTL_EST_CONTROL_CTOV (OSI_BIT(11) | OSI_BIT(12) | \ - OSI_BIT(13) | OSI_BIT(14) | \ - OSI_BIT(15) | OSI_BIT(16) | \ - OSI_BIT(17) | OSI_BIT(18) | \ - OSI_BIT(19) | OSI_BIT(20) | \ - OSI_BIT(21) | OSI_BIT(22)) -#define MGBE_MTL_EST_CONTROL_CTOV_SHIFT 11U -#define MGBE_MTL_EST_CTOV_RECOMMEND 42U -#define MGBE_8PTP_CYCLE 26U -#ifdef MACSEC_SUPPORT +#if defined(MACSEC_SUPPORT) && !defined(OSI_STRIPPED_LIB) /** * MACSEC Recommended value * By default PCS and UPHY are present */ #define MGBE_MTL_EST_CTOV_MACSEC_RECOMMEND 295U #endif /* MACSEC_SUPPORT */ -#define MGBE_MTL_EST_CONTROL_TILS (OSI_BIT(8) | OSI_BIT(9) | \ - OSI_BIT(10)) #define MGBE_MTL_EST_CONTROL_LCSE (OSI_BIT(7) | OSI_BIT(6)) #define MGBE_MTL_EST_CONTROL_LCSE_VAL 0U -#define MGBE_MTL_EST_CONTROL_LCSE_SHIFT 6U #define MGBE_MTL_EST_CONTROL_DDBF OSI_BIT(4) -#define MGBE_MTL_EST_CONTROL_SSWL OSI_BIT(1) #define MGBE_MTL_EST_OVERHEAD_OVHD (OSI_BIT(0) | OSI_BIT(1) | \ OSI_BIT(2) | OSI_BIT(3) | \ OSI_BIT(4) | OSI_BIT(5)) #define MGBE_MTL_EST_OVERHEAD_RECOMMEND 56U -/* EST controlOSI_BITmap */ -#define MGBE_MTL_EST_EEST OSI_BIT(0) -#define MGBE_MTL_EST_SSWL OSI_BIT(1) -#define MGBE_MTL_EST_QHLBF OSI_BIT(3) /* EST GCL controlOSI_BITmap */ #define MGBE_MTL_EST_ADDR_SHIFT 8 -#define MGBE_MTL_EST_ADDR_MASK (OSI_BIT(8) | OSI_BIT(9) | \ - OSI_BIT(10) | OSI_BIT(11) | \ - OSI_BIT(12) | OSI_BIT(13) | \ - OSI_BIT(14) | OSI_BIT(15) | \ - OSI_BIT(16) | OSI_BIT(17) | \ - OSI_BIT(18) | OSI_BIT(19)) -#define MGBE_MTL_EST_SRWO OSI_BIT(0) -#define MGBE_MTL_EST_GCRR OSI_BIT(2) -#define MGBE_MTL_EST_ERR0 OSI_BIT(20) /* EST GCRA addresses */ -#define MGBE_MTL_EST_BTR_LOW ((unsigned int)0x0 << \ +#define MGBE_MTL_EST_BTR_LOW ((nveu32_t)0x0 << \ MGBE_MTL_EST_ADDR_SHIFT) -#define MGBE_MTL_EST_BTR_HIGH ((unsigned int)0x1 << \ +#define MGBE_MTL_EST_BTR_HIGH ((nveu32_t)0x1 << \ MGBE_MTL_EST_ADDR_SHIFT) -#define MGBE_MTL_EST_CTR_LOW ((unsigned int)0x2 << \ +#define MGBE_MTL_EST_CTR_LOW ((nveu32_t)0x2 << \ MGBE_MTL_EST_ADDR_SHIFT) -#define MGBE_MTL_EST_CTR_HIGH ((unsigned int)0x3 << \ +#define MGBE_MTL_EST_CTR_HIGH ((nveu32_t)0x3 << \ MGBE_MTL_EST_ADDR_SHIFT) -#define MGBE_MTL_EST_CTR_HIGH_MAX 0xFFU -#define MGBE_MTL_EST_TER ((unsigned int)0x4 << \ +#define MGBE_MTL_EST_TER ((nveu32_t)0x4 << \ MGBE_MTL_EST_ADDR_SHIFT) -#define MGBE_MTL_EST_LLR ((unsigned int)0x5 << \ +#define MGBE_MTL_EST_LLR ((nveu32_t)0x5 << \ MGBE_MTL_EST_ADDR_SHIFT) /*EST MTL interrupt STATUS and ERR*/ #define MGBE_MTL_IS_ESTIS OSI_BIT(18) -/* MTL_EST_STATUS*/ -#define MGBE_MTL_EST_STATUS_CGCE OSI_BIT(4) -#define MGBE_MTL_EST_STATUS_HLBS OSI_BIT(3) -#define MGBE_MTL_EST_STATUS_HLBF OSI_BIT(2) -#define MGBE_MTL_EST_STATUS_BTRE OSI_BIT(1) -#define MGBE_MTL_EST_STATUS_SWLC OSI_BIT(0) -#define MGBE_MTL_EST_ITRE_CGCE OSI_BIT(4) -#define MGBE_MTL_EST_ITRE_IEHS OSI_BIT(3) -#define MGBE_MTL_EST_ITRE_IEHF OSI_BIT(2) -#define MGBE_MTL_EST_ITRE_IEBE OSI_BIT(1) -#define MGBE_MTL_EST_ITRE_IECC OSI_BIT(0) #define MGBE_MAC_EXT_CNF_DDS OSI_BIT(7) -#define MGBE_MAC_EXT_CNF_EIPG 0x1U -#define MGBE_MAC_EXT_CNF_EIPG_MASK 0x7FU /* TX timestamp */ #define MGBE_MAC_TSS_TXTSC OSI_BIT(15) -#define MGBE0_SID ((nveu32_t)0x6U) -#define MGBE1_SID ((nveu32_t)0x49U) -#define MGBE2_SID ((nveu32_t)0x4AU) -#define MGBE3_SID ((nveu32_t)0x4BU) -#define MGBE_SID_VAL1(x) (((x) << 24U) |\ - ((x) << 16U) |\ - ((x) << 8U) |\ - (x)) -#define MGBE_SID_VAL2(x) (((x) << 8U) |\ - (x)) -/** @} */ - -/** - * @addtogroup MGBE-QUEUE QUEUE fifo size programmable values - * - * @brief Queue FIFO size programmable values - * @{ - */ -/* Formula is "Programmed value = (x + 1 )*256" - * Total Rx buf size is 192KB so 192*1024 = (x + 1)*256 - * which gives x as 0x2FF - */ -#define MGBE_19K 0x4BU /* For Ten MTL queues */ -#define MGBE_21K 0x53U /* For Nine MTL queues */ -#define MGBE_24K 0x5FU /* For Eight MTL queues */ -#define MGBE_27K 0x6BU /* For Seven MTL queues */ -#define MGBE_32K 0x7FU /* For Six MTL queues */ -#define MGBE_38K 0x97U /* For Five MTL queues */ -#define MGBE_48K 0xBFU /* For Four MTL queues */ -#define MGBE_64K 0xFFU /* For Three MTL queues */ -#define MGBE_96K 0x17FU /* For Two MTL queues */ -#define MGBE_192K 0x2FFU /* For One MTL queue */ -/** @} */ - -/** - * @addtogroup MGBE-SIZE SIZE calculation helper Macros - * - * @brief SIZE calculation defines - * @{ - */ -#define FIFO_SIZE_B(x) (x) -#define FIFO_SIZE_KB(x) ((x) * 1024U) -/** @} */ - -/** - * @addtogroup MGBE-QSIZE Queue SIZE Mapping Macros - * - * @brief Tx and Rx Queue SIZE Mapping defines - * @{ - */ -#define MGBE_TX_FIFO_SIZE_64KB 9U -#define MGBE_RX_FIFO_SIZE_64KB 9U -#define MGBE_TX_FIFO_SIZE_128KB 10U -#define MGBE_RX_FIFO_SIZE_192KB 12U /** @} */ +#ifndef OSI_STRIPPED_LIB /** * @addtogroup MGBE-HW-BACKUP * @@ -877,18 +727,22 @@ OSI_MGBE_MAX_L3_L4_FILTER + (x))) /* x varies from 0-31, 32 VLAN tag filters total */ -#define MGBE_MAC_VLAN_BAK_IDX(x) ((MGBE_MAC_L3_AD3R_BAK_IDX(0) + \ +#define MGBE_MAC_VLAN_BAK_IDX(x) ((MGBE_MAC_L3_AD3R_BAK_IDX(0U) + \ OSI_MGBE_MAX_L3_L4_FILTER + (x))) /* Add MAC_DChSel_IndReg */ -#define MGBE_MAC_DCHSEL_BAK_IDX(x) ((MGBE_MAC_VLAN_BAK_IDX(0) + \ +#define MGBE_MAC_DCHSEL_BAK_IDX(x) ((MGBE_MAC_VLAN_BAK_IDX(0U) + \ MGBE_MAX_VLAN_FILTER + 1U)) -#define MGBE_MAX_BAK_IDX ((MGBE_MAC_DCHSEL_BAK_IDX(0) + \ +#define MGBE_MAX_BAK_IDX ((MGBE_MAC_DCHSEL_BAK_IDX(0U) + \ OSI_MGBE_MAX_MAC_ADDRESS_FILTER + 1U)) /** @} */ +#endif /* !OSI_STRIPPED_LIB */ + +/* TXQ Size 128KB is divided equally across 10 MTL Queues*/ +#define TX_FIFO_SZ (((((128U * 1024U)/OSI_MGBE_MAX_NUM_QUEUES)) / 256U) - 1U) /** - * @addtogroup MGBE-MAC MGBE MAC HW feature registers + * @addtogroup MGBE-MAC-HWFR MGBE MAC HW feature registers * * @brief Helps in identifying the features that are set in MAC HW * @{ @@ -962,7 +816,6 @@ #define MGBE_MAC_HFR0_TSSTSSEL_MASK 0x3U #define MGBE_MAC_HFR0_TSSTSSEL_SHIFT 25U -#define MGBE_MAC_HFR0_SAVLANINS_MASK 0x1U #define MGBE_MAC_HFR0_SAVLANINS_SHIFT 27U #define MGBE_MAC_HFR0_VXN_MASK 0x1U @@ -1134,7 +987,6 @@ #define MGBE_MTL_ECC_TSOED OSI_BIT(4) #define MGBE_MTL_ECC_DESCED OSI_BIT(5) #define MGBE_MAC_FSM_CONTROL 0x158U -#define MGBE_TMOUTEN OSI_BIT(0) #define MGBE_PRTYEN OSI_BIT(1) #define MGBE_MAC_DPP_FSM_INTERRUPT_STATUS 0x150U #define MGBE_MTL_DPP_CONTROL 0x10E0U diff --git a/osi/core/mgbe_mmc.c b/osi/core/mgbe_mmc.c index 75ed121..57e65bb 100644 --- a/osi/core/mgbe_mmc.c +++ b/osi/core/mgbe_mmc.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -27,7 +27,7 @@ #include "mgbe_core.h" /** - * @brief update_mmc_val - function to read register and return value to callee + * @brief mgbe_update_mmc_val - function to read register and return value to callee * * Algorithm: Read the registers, check for boundary, if more, reset * counters else return same to caller. @@ -43,12 +43,12 @@ * @retval 0 on MMC counters overflow * @retval value on current MMC counter value. */ -static inline unsigned long update_mmc_val(struct osi_core_priv_data *osi_core, - unsigned long last_value, - unsigned long offset) +static inline nveu64_t mgbe_update_mmc_val(struct osi_core_priv_data *osi_core, + nveu64_t last_value, + nveu64_t offset) { - unsigned long temp; - unsigned int value = osi_readl((unsigned char *)osi_core->base + + nveu64_t temp = 0; + nveu32_t value = osi_readl((nveu8_t *)osi_core->base + offset); temp = last_value + value; @@ -56,13 +56,11 @@ static inline unsigned long update_mmc_val(struct osi_core_priv_data *osi_core, OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_OUTOFBOUND, "Value overflow resetting all counters\n", - (unsigned long long)offset); + (nveul64_t)offset); mgbe_reset_mmc(osi_core); - } else { - return temp; } - return 0; + return temp; } /** @@ -75,14 +73,14 @@ static inline unsigned long update_mmc_val(struct osi_core_priv_data *osi_core, * 1) MAC should be init and started. see osi_start_mac() * 2) osi_core->osd should be populated */ -void mgbe_reset_mmc(struct osi_core_priv_data *osi_core) +void mgbe_reset_mmc(struct osi_core_priv_data *const osi_core) { - unsigned int value; + nveu32_t value; - value = osi_readl((unsigned char *)osi_core->base + MGBE_MMC_CNTRL); + value = osi_readl((nveu8_t *)osi_core->base + MGBE_MMC_CNTRL); /* self-clear bit in one clock cycle */ value |= MGBE_MMC_CNTRL_CNTRST; - osi_writel(value, (unsigned char *)osi_core->base + MGBE_MMC_CNTRL); + osi_writel(value, (nveu8_t *)osi_core->base + MGBE_MMC_CNTRL); osi_memset(&osi_core->mmc, 0U, sizeof(struct osi_mmc_counters)); } @@ -99,461 +97,461 @@ void mgbe_reset_mmc(struct osi_core_priv_data *osi_core) * 1) MAC should be init and started. see osi_start_mac() * 2) osi_core->osd should be populated */ -void mgbe_read_mmc(struct osi_core_priv_data *osi_core) +void mgbe_read_mmc(struct osi_core_priv_data *const osi_core) { struct osi_mmc_counters *mmc = &osi_core->mmc; mmc->mmc_tx_octetcount_gb = - update_mmc_val(osi_core, mmc->mmc_tx_octetcount_gb, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_octetcount_gb, MMC_TXOCTETCOUNT_GB_L); mmc->mmc_tx_octetcount_gb_h = - update_mmc_val(osi_core, mmc->mmc_tx_octetcount_gb_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_octetcount_gb_h, MMC_TXOCTETCOUNT_GB_H); mmc->mmc_tx_framecount_gb = - update_mmc_val(osi_core, mmc->mmc_tx_framecount_gb, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_framecount_gb, MMC_TXPACKETCOUNT_GB_L); mmc->mmc_tx_framecount_gb_h = - update_mmc_val(osi_core, mmc->mmc_tx_framecount_gb_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_framecount_gb_h, MMC_TXPACKETCOUNT_GB_H); mmc->mmc_tx_broadcastframe_g = - update_mmc_val(osi_core, mmc->mmc_tx_broadcastframe_g, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_broadcastframe_g, MMC_TXBROADCASTPACKETS_G_L); mmc->mmc_tx_broadcastframe_g_h = - update_mmc_val(osi_core, mmc->mmc_tx_broadcastframe_g_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_broadcastframe_g_h, MMC_TXBROADCASTPACKETS_G_H); mmc->mmc_tx_multicastframe_g = - update_mmc_val(osi_core, mmc->mmc_tx_multicastframe_g, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_multicastframe_g, MMC_TXMULTICASTPACKETS_G_L); mmc->mmc_tx_multicastframe_g_h = - update_mmc_val(osi_core, mmc->mmc_tx_multicastframe_g_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_multicastframe_g_h, MMC_TXMULTICASTPACKETS_G_H); mmc->mmc_tx_64_octets_gb = - update_mmc_val(osi_core, mmc->mmc_tx_64_octets_gb, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_64_octets_gb, MMC_TX64OCTETS_GB_L); mmc->mmc_tx_64_octets_gb_h = - update_mmc_val(osi_core, mmc->mmc_tx_64_octets_gb_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_64_octets_gb_h, MMC_TX64OCTETS_GB_H); mmc->mmc_tx_65_to_127_octets_gb = - update_mmc_val(osi_core, mmc->mmc_tx_65_to_127_octets_gb, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_65_to_127_octets_gb, MMC_TX65TO127OCTETS_GB_L); mmc->mmc_tx_65_to_127_octets_gb_h = - update_mmc_val(osi_core, mmc->mmc_tx_65_to_127_octets_gb_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_65_to_127_octets_gb_h, MMC_TX65TO127OCTETS_GB_H); mmc->mmc_tx_128_to_255_octets_gb = - update_mmc_val(osi_core, mmc->mmc_tx_128_to_255_octets_gb, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_128_to_255_octets_gb, MMC_TX128TO255OCTETS_GB_L); mmc->mmc_tx_128_to_255_octets_gb_h = - update_mmc_val(osi_core, mmc->mmc_tx_128_to_255_octets_gb_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_128_to_255_octets_gb_h, MMC_TX128TO255OCTETS_GB_H); mmc->mmc_tx_256_to_511_octets_gb = - update_mmc_val(osi_core, mmc->mmc_tx_256_to_511_octets_gb, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_256_to_511_octets_gb, MMC_TX256TO511OCTETS_GB_L); mmc->mmc_tx_256_to_511_octets_gb_h = - update_mmc_val(osi_core, mmc->mmc_tx_256_to_511_octets_gb_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_256_to_511_octets_gb_h, MMC_TX256TO511OCTETS_GB_H); mmc->mmc_tx_512_to_1023_octets_gb = - update_mmc_val(osi_core, mmc->mmc_tx_512_to_1023_octets_gb, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_512_to_1023_octets_gb, MMC_TX512TO1023OCTETS_GB_L); mmc->mmc_tx_512_to_1023_octets_gb_h = - update_mmc_val(osi_core, mmc->mmc_tx_512_to_1023_octets_gb_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_512_to_1023_octets_gb_h, MMC_TX512TO1023OCTETS_GB_H); mmc->mmc_tx_1024_to_max_octets_gb = - update_mmc_val(osi_core, mmc->mmc_tx_1024_to_max_octets_gb, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_1024_to_max_octets_gb, MMC_TX1024TOMAXOCTETS_GB_L); mmc->mmc_tx_1024_to_max_octets_gb_h = - update_mmc_val(osi_core, mmc->mmc_tx_1024_to_max_octets_gb_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_1024_to_max_octets_gb_h, MMC_TX1024TOMAXOCTETS_GB_H); mmc->mmc_tx_unicast_gb = - update_mmc_val(osi_core, mmc->mmc_tx_unicast_gb, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_unicast_gb, MMC_TXUNICASTPACKETS_GB_L); mmc->mmc_tx_unicast_gb_h = - update_mmc_val(osi_core, mmc->mmc_tx_unicast_gb_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_unicast_gb_h, MMC_TXUNICASTPACKETS_GB_H); mmc->mmc_tx_multicast_gb = - update_mmc_val(osi_core, mmc->mmc_tx_multicast_gb, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_multicast_gb, MMC_TXMULTICASTPACKETS_GB_L); mmc->mmc_tx_multicast_gb_h = - update_mmc_val(osi_core, mmc->mmc_tx_multicast_gb_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_multicast_gb_h, MMC_TXMULTICASTPACKETS_GB_H); mmc->mmc_tx_broadcast_gb = - update_mmc_val(osi_core, mmc->mmc_tx_broadcast_gb, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_broadcast_gb, MMC_TXBROADCASTPACKETS_GB_L); mmc->mmc_tx_broadcast_gb_h = - update_mmc_val(osi_core, mmc->mmc_tx_broadcast_gb_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_broadcast_gb_h, MMC_TXBROADCASTPACKETS_GB_H); mmc->mmc_tx_underflow_error = - update_mmc_val(osi_core, mmc->mmc_tx_underflow_error, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_underflow_error, MMC_TXUNDERFLOWERROR_L); mmc->mmc_tx_underflow_error_h = - update_mmc_val(osi_core, mmc->mmc_tx_underflow_error_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_underflow_error_h, MMC_TXUNDERFLOWERROR_H); mmc->mmc_tx_singlecol_g = - update_mmc_val(osi_core, mmc->mmc_tx_singlecol_g, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_singlecol_g, MMC_TXSINGLECOL_G); mmc->mmc_tx_multicol_g = - update_mmc_val(osi_core, mmc->mmc_tx_multicol_g, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_multicol_g, MMC_TXMULTICOL_G); mmc->mmc_tx_deferred = - update_mmc_val(osi_core, mmc->mmc_tx_deferred, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_deferred, MMC_TXDEFERRED); mmc->mmc_tx_latecol = - update_mmc_val(osi_core, mmc->mmc_tx_latecol, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_latecol, MMC_TXLATECOL); mmc->mmc_tx_exesscol = - update_mmc_val(osi_core, mmc->mmc_tx_exesscol, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_exesscol, MMC_TXEXESSCOL); mmc->mmc_tx_carrier_error = - update_mmc_val(osi_core, mmc->mmc_tx_carrier_error, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_carrier_error, MMC_TXCARRIERERROR); mmc->mmc_tx_octetcount_g = - update_mmc_val(osi_core, mmc->mmc_tx_octetcount_g, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_octetcount_g, MMC_TXOCTETCOUNT_G_L); mmc->mmc_tx_octetcount_g_h = - update_mmc_val(osi_core, mmc->mmc_tx_octetcount_g_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_octetcount_g_h, MMC_TXOCTETCOUNT_G_H); mmc->mmc_tx_framecount_g = - update_mmc_val(osi_core, mmc->mmc_tx_framecount_g, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_framecount_g, MMC_TXPACKETSCOUNT_G_L); mmc->mmc_tx_framecount_g_h = - update_mmc_val(osi_core, mmc->mmc_tx_framecount_g_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_framecount_g_h, MMC_TXPACKETSCOUNT_G_H); mmc->mmc_tx_excessdef = - update_mmc_val(osi_core, mmc->mmc_tx_excessdef, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_excessdef, MMC_TXEXECESS_DEFERRED); mmc->mmc_tx_pause_frame = - update_mmc_val(osi_core, mmc->mmc_tx_pause_frame, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_pause_frame, MMC_TXPAUSEPACKETS_L); mmc->mmc_tx_pause_frame_h = - update_mmc_val(osi_core, mmc->mmc_tx_pause_frame_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_pause_frame_h, MMC_TXPAUSEPACKETS_H); mmc->mmc_tx_vlan_frame_g = - update_mmc_val(osi_core, mmc->mmc_tx_vlan_frame_g, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_vlan_frame_g, MMC_TXVLANPACKETS_G_L); mmc->mmc_tx_vlan_frame_g_h = - update_mmc_val(osi_core, mmc->mmc_tx_vlan_frame_g_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_vlan_frame_g_h, MMC_TXVLANPACKETS_G_H); mmc->mmc_rx_framecount_gb = - update_mmc_val(osi_core, mmc->mmc_rx_framecount_gb, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_framecount_gb, MMC_RXPACKETCOUNT_GB_L); mmc->mmc_rx_framecount_gb_h = - update_mmc_val(osi_core, mmc->mmc_rx_framecount_gb_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_framecount_gb_h, MMC_RXPACKETCOUNT_GB_H); mmc->mmc_rx_octetcount_gb = - update_mmc_val(osi_core, mmc->mmc_rx_octetcount_gb, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_octetcount_gb, MMC_RXOCTETCOUNT_GB_L); mmc->mmc_rx_octetcount_gb_h = - update_mmc_val(osi_core, mmc->mmc_rx_octetcount_gb_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_octetcount_gb_h, MMC_RXOCTETCOUNT_GB_H); mmc->mmc_rx_octetcount_g = - update_mmc_val(osi_core, mmc->mmc_rx_octetcount_g, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_octetcount_g, MMC_RXOCTETCOUNT_G_L); mmc->mmc_rx_octetcount_g_h = - update_mmc_val(osi_core, mmc->mmc_rx_octetcount_g_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_octetcount_g_h, MMC_RXOCTETCOUNT_G_H); mmc->mmc_rx_broadcastframe_g = - update_mmc_val(osi_core, mmc->mmc_rx_broadcastframe_g, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_broadcastframe_g, MMC_RXBROADCASTPACKETS_G_L); mmc->mmc_rx_broadcastframe_g_h = - update_mmc_val(osi_core, mmc->mmc_rx_broadcastframe_g_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_broadcastframe_g_h, MMC_RXBROADCASTPACKETS_G_H); mmc->mmc_rx_multicastframe_g = - update_mmc_val(osi_core, mmc->mmc_rx_multicastframe_g, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_multicastframe_g, MMC_RXMULTICASTPACKETS_G_L); mmc->mmc_rx_multicastframe_g_h = - update_mmc_val(osi_core, mmc->mmc_rx_multicastframe_g_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_multicastframe_g_h, MMC_RXMULTICASTPACKETS_G_H); mmc->mmc_rx_crc_error = - update_mmc_val(osi_core, mmc->mmc_rx_crc_error, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_crc_error, MMC_RXCRCERROR_L); mmc->mmc_rx_crc_error_h = - update_mmc_val(osi_core, mmc->mmc_rx_crc_error_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_crc_error_h, MMC_RXCRCERROR_H); mmc->mmc_rx_align_error = - update_mmc_val(osi_core, mmc->mmc_rx_align_error, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_align_error, MMC_RXALIGNMENTERROR); mmc->mmc_rx_runt_error = - update_mmc_val(osi_core, mmc->mmc_rx_runt_error, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_runt_error, MMC_RXRUNTERROR); mmc->mmc_rx_jabber_error = - update_mmc_val(osi_core, mmc->mmc_rx_jabber_error, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_jabber_error, MMC_RXJABBERERROR); mmc->mmc_rx_undersize_g = - update_mmc_val(osi_core, mmc->mmc_rx_undersize_g, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_undersize_g, MMC_RXUNDERSIZE_G); mmc->mmc_rx_oversize_g = - update_mmc_val(osi_core, mmc->mmc_rx_oversize_g, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_oversize_g, MMC_RXOVERSIZE_G); mmc->mmc_rx_64_octets_gb = - update_mmc_val(osi_core, mmc->mmc_rx_64_octets_gb, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_64_octets_gb, MMC_RX64OCTETS_GB_L); mmc->mmc_rx_64_octets_gb_h = - update_mmc_val(osi_core, mmc->mmc_rx_64_octets_gb_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_64_octets_gb_h, MMC_RX64OCTETS_GB_H); mmc->mmc_rx_65_to_127_octets_gb = - update_mmc_val(osi_core, mmc->mmc_rx_65_to_127_octets_gb, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_65_to_127_octets_gb, MMC_RX65TO127OCTETS_GB_L); mmc->mmc_rx_65_to_127_octets_gb_h = - update_mmc_val(osi_core, mmc->mmc_rx_65_to_127_octets_gb_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_65_to_127_octets_gb_h, MMC_RX65TO127OCTETS_GB_H); mmc->mmc_rx_128_to_255_octets_gb = - update_mmc_val(osi_core, mmc->mmc_rx_128_to_255_octets_gb, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_128_to_255_octets_gb, MMC_RX128TO255OCTETS_GB_L); mmc->mmc_rx_128_to_255_octets_gb_h = - update_mmc_val(osi_core, mmc->mmc_rx_128_to_255_octets_gb_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_128_to_255_octets_gb_h, MMC_RX128TO255OCTETS_GB_H); mmc->mmc_rx_256_to_511_octets_gb = - update_mmc_val(osi_core, mmc->mmc_rx_256_to_511_octets_gb, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_256_to_511_octets_gb, MMC_RX256TO511OCTETS_GB_L); mmc->mmc_rx_256_to_511_octets_gb_h = - update_mmc_val(osi_core, mmc->mmc_rx_256_to_511_octets_gb_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_256_to_511_octets_gb_h, MMC_RX256TO511OCTETS_GB_H); mmc->mmc_rx_512_to_1023_octets_gb = - update_mmc_val(osi_core, mmc->mmc_rx_512_to_1023_octets_gb, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_512_to_1023_octets_gb, MMC_RX512TO1023OCTETS_GB_L); mmc->mmc_rx_512_to_1023_octets_gb_h = - update_mmc_val(osi_core, mmc->mmc_rx_512_to_1023_octets_gb_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_512_to_1023_octets_gb_h, MMC_RX512TO1023OCTETS_GB_H); mmc->mmc_rx_1024_to_max_octets_gb = - update_mmc_val(osi_core, mmc->mmc_rx_1024_to_max_octets_gb, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_1024_to_max_octets_gb, MMC_RX1024TOMAXOCTETS_GB_L); mmc->mmc_rx_1024_to_max_octets_gb_h = - update_mmc_val(osi_core, mmc->mmc_rx_1024_to_max_octets_gb_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_1024_to_max_octets_gb_h, MMC_RX1024TOMAXOCTETS_GB_H); mmc->mmc_rx_unicast_g = - update_mmc_val(osi_core, mmc->mmc_rx_unicast_g, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_unicast_g, MMC_RXUNICASTPACKETS_G_L); mmc->mmc_rx_unicast_g_h = - update_mmc_val(osi_core, mmc->mmc_rx_unicast_g_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_unicast_g_h, MMC_RXUNICASTPACKETS_G_H); mmc->mmc_rx_length_error = - update_mmc_val(osi_core, mmc->mmc_rx_length_error, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_length_error, MMC_RXLENGTHERROR_L); mmc->mmc_rx_length_error_h = - update_mmc_val(osi_core, mmc->mmc_rx_length_error_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_length_error_h, MMC_RXLENGTHERROR_H); mmc->mmc_rx_outofrangetype = - update_mmc_val(osi_core, mmc->mmc_rx_outofrangetype, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_outofrangetype, MMC_RXOUTOFRANGETYPE_L); mmc->mmc_rx_outofrangetype_h = - update_mmc_val(osi_core, mmc->mmc_rx_outofrangetype_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_outofrangetype_h, MMC_RXOUTOFRANGETYPE_H); mmc->mmc_rx_pause_frames = - update_mmc_val(osi_core, mmc->mmc_rx_pause_frames, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_pause_frames, MMC_RXPAUSEPACKETS_L); mmc->mmc_rx_pause_frames_h = - update_mmc_val(osi_core, mmc->mmc_rx_pause_frames_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_pause_frames_h, MMC_RXPAUSEPACKETS_H); mmc->mmc_rx_fifo_overflow = - update_mmc_val(osi_core, mmc->mmc_rx_fifo_overflow, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_fifo_overflow, MMC_RXFIFOOVERFLOW_L); mmc->mmc_rx_fifo_overflow_h = - update_mmc_val(osi_core, mmc->mmc_rx_fifo_overflow_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_fifo_overflow_h, MMC_RXFIFOOVERFLOW_H); mmc->mmc_rx_vlan_frames_gb = - update_mmc_val(osi_core, mmc->mmc_rx_vlan_frames_gb, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_vlan_frames_gb, MMC_RXVLANPACKETS_GB_L); mmc->mmc_rx_vlan_frames_gb_h = - update_mmc_val(osi_core, mmc->mmc_rx_vlan_frames_gb_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_vlan_frames_gb_h, MMC_RXVLANPACKETS_GB_H); mmc->mmc_rx_watchdog_error = - update_mmc_val(osi_core, mmc->mmc_rx_watchdog_error, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_watchdog_error, MMC_RXWATCHDOGERROR); mmc->mmc_tx_lpi_usec_cntr = - update_mmc_val(osi_core, mmc->mmc_tx_lpi_usec_cntr, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_lpi_usec_cntr, MMC_TXLPIUSECCNTR); mmc->mmc_tx_lpi_tran_cntr = - update_mmc_val(osi_core, mmc->mmc_tx_lpi_tran_cntr, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_lpi_tran_cntr, MMC_TXLPITRANCNTR); mmc->mmc_rx_lpi_usec_cntr = - update_mmc_val(osi_core, mmc->mmc_rx_lpi_usec_cntr, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_lpi_usec_cntr, MMC_RXLPIUSECCNTR); mmc->mmc_rx_lpi_tran_cntr = - update_mmc_val(osi_core, mmc->mmc_rx_lpi_tran_cntr, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_lpi_tran_cntr, MMC_RXLPITRANCNTR); mmc->mmc_rx_ipv4_gd = - update_mmc_val(osi_core, mmc->mmc_rx_ipv4_gd, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv4_gd, MMC_RXIPV4_GD_PKTS_L); mmc->mmc_rx_ipv4_gd_h = - update_mmc_val(osi_core, mmc->mmc_rx_ipv4_gd_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv4_gd_h, MMC_RXIPV4_GD_PKTS_H); mmc->mmc_rx_ipv4_hderr = - update_mmc_val(osi_core, mmc->mmc_rx_ipv4_hderr, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv4_hderr, MMC_RXIPV4_HDRERR_PKTS_L); mmc->mmc_rx_ipv4_hderr_h = - update_mmc_val(osi_core, mmc->mmc_rx_ipv4_hderr_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv4_hderr_h, MMC_RXIPV4_HDRERR_PKTS_H); mmc->mmc_rx_ipv4_nopay = - update_mmc_val(osi_core, mmc->mmc_rx_ipv4_nopay, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv4_nopay, MMC_RXIPV4_NOPAY_PKTS_L); mmc->mmc_rx_ipv4_nopay_h = - update_mmc_val(osi_core, mmc->mmc_rx_ipv4_nopay_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv4_nopay_h, MMC_RXIPV4_NOPAY_PKTS_H); mmc->mmc_rx_ipv4_frag = - update_mmc_val(osi_core, mmc->mmc_rx_ipv4_frag, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv4_frag, MMC_RXIPV4_FRAG_PKTS_L); mmc->mmc_rx_ipv4_frag_h = - update_mmc_val(osi_core, mmc->mmc_rx_ipv4_frag_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv4_frag_h, MMC_RXIPV4_FRAG_PKTS_H); mmc->mmc_rx_ipv4_udsbl = - update_mmc_val(osi_core, mmc->mmc_rx_ipv4_udsbl, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv4_udsbl, MMC_RXIPV4_UBSBL_PKTS_L); mmc->mmc_rx_ipv4_udsbl_h = - update_mmc_val(osi_core, mmc->mmc_rx_ipv4_udsbl_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv4_udsbl_h, MMC_RXIPV4_UBSBL_PKTS_H); mmc->mmc_rx_ipv6_gd = - update_mmc_val(osi_core, mmc->mmc_rx_ipv6_gd, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv6_gd, MMC_RXIPV6_GD_PKTS_L); mmc->mmc_rx_ipv6_gd_h = - update_mmc_val(osi_core, mmc->mmc_rx_ipv6_gd_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv6_gd_h, MMC_RXIPV6_GD_PKTS_H); mmc->mmc_rx_ipv6_hderr = - update_mmc_val(osi_core, mmc->mmc_rx_ipv6_hderr, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv6_hderr, MMC_RXIPV6_HDRERR_PKTS_L); mmc->mmc_rx_ipv6_hderr_h = - update_mmc_val(osi_core, mmc->mmc_rx_ipv6_hderr_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv6_hderr_h, MMC_RXIPV6_HDRERR_PKTS_H); mmc->mmc_rx_ipv6_nopay = - update_mmc_val(osi_core, mmc->mmc_rx_ipv6_nopay, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv6_nopay, MMC_RXIPV6_NOPAY_PKTS_L); mmc->mmc_rx_ipv6_nopay_h = - update_mmc_val(osi_core, mmc->mmc_rx_ipv6_nopay_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv6_nopay_h, MMC_RXIPV6_NOPAY_PKTS_H); mmc->mmc_rx_udp_gd = - update_mmc_val(osi_core, mmc->mmc_rx_udp_gd, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_udp_gd, MMC_RXUDP_GD_PKTS_L); mmc->mmc_rx_udp_gd_h = - update_mmc_val(osi_core, mmc->mmc_rx_udp_gd_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_udp_gd_h, MMC_RXUDP_GD_PKTS_H); mmc->mmc_rx_udp_err = - update_mmc_val(osi_core, mmc->mmc_rx_udp_err, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_udp_err, MMC_RXUDP_ERR_PKTS_L); mmc->mmc_rx_udp_err_h = - update_mmc_val(osi_core, mmc->mmc_rx_udp_err_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_udp_err_h, MMC_RXUDP_ERR_PKTS_H); mmc->mmc_rx_tcp_gd = - update_mmc_val(osi_core, mmc->mmc_rx_tcp_gd, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_tcp_gd, MMC_RXTCP_GD_PKTS_L); mmc->mmc_rx_tcp_gd_h = - update_mmc_val(osi_core, mmc->mmc_rx_tcp_gd_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_tcp_gd_h, MMC_RXTCP_GD_PKTS_H); mmc->mmc_rx_tcp_err = - update_mmc_val(osi_core, mmc->mmc_rx_tcp_err, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_tcp_err, MMC_RXTCP_ERR_PKTS_L); mmc->mmc_rx_tcp_err_h = - update_mmc_val(osi_core, mmc->mmc_rx_tcp_err_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_tcp_err_h, MMC_RXTCP_ERR_PKTS_H); mmc->mmc_rx_icmp_gd = - update_mmc_val(osi_core, mmc->mmc_rx_icmp_gd, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_icmp_gd, MMC_RXICMP_GD_PKTS_L); mmc->mmc_rx_icmp_gd_h = - update_mmc_val(osi_core, mmc->mmc_rx_icmp_gd_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_icmp_gd_h, MMC_RXICMP_GD_PKTS_H); mmc->mmc_rx_icmp_err = - update_mmc_val(osi_core, mmc->mmc_rx_icmp_err, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_icmp_err, MMC_RXICMP_ERR_PKTS_L); mmc->mmc_rx_icmp_err_h = - update_mmc_val(osi_core, mmc->mmc_rx_icmp_err_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_icmp_err_h, MMC_RXICMP_ERR_PKTS_H); mmc->mmc_rx_ipv4_gd_octets = - update_mmc_val(osi_core, mmc->mmc_rx_ipv4_gd_octets, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv4_gd_octets, MMC_RXIPV4_GD_OCTETS_L); mmc->mmc_rx_ipv4_gd_octets_h = - update_mmc_val(osi_core, mmc->mmc_rx_ipv4_gd_octets_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv4_gd_octets_h, MMC_RXIPV4_GD_OCTETS_H); mmc->mmc_rx_ipv4_hderr_octets = - update_mmc_val(osi_core, mmc->mmc_rx_ipv4_hderr_octets, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv4_hderr_octets, MMC_RXIPV4_HDRERR_OCTETS_L); mmc->mmc_rx_ipv4_hderr_octets_h = - update_mmc_val(osi_core, mmc->mmc_rx_ipv4_hderr_octets_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv4_hderr_octets_h, MMC_RXIPV4_HDRERR_OCTETS_H); mmc->mmc_rx_ipv4_nopay_octets = - update_mmc_val(osi_core, mmc->mmc_rx_ipv4_nopay_octets, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv4_nopay_octets, MMC_RXIPV4_NOPAY_OCTETS_L); mmc->mmc_rx_ipv4_nopay_octets_h = - update_mmc_val(osi_core, mmc->mmc_rx_ipv4_nopay_octets_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv4_nopay_octets_h, MMC_RXIPV4_NOPAY_OCTETS_H); mmc->mmc_rx_ipv4_frag_octets = - update_mmc_val(osi_core, mmc->mmc_rx_ipv4_frag_octets, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv4_frag_octets, MMC_RXIPV4_FRAG_OCTETS_L); mmc->mmc_rx_ipv4_frag_octets_h = - update_mmc_val(osi_core, mmc->mmc_rx_ipv4_frag_octets_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv4_frag_octets_h, MMC_RXIPV4_FRAG_OCTETS_H); mmc->mmc_rx_ipv4_udsbl_octets = - update_mmc_val(osi_core, mmc->mmc_rx_ipv4_udsbl_octets, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv4_udsbl_octets, MMC_RXIPV4_UDP_CHKSM_DIS_OCT_L); mmc->mmc_rx_ipv4_udsbl_octets_h = - update_mmc_val(osi_core, mmc->mmc_rx_ipv4_udsbl_octets_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv4_udsbl_octets_h, MMC_RXIPV4_UDP_CHKSM_DIS_OCT_H); mmc->mmc_rx_udp_gd_octets = - update_mmc_val(osi_core, mmc->mmc_rx_udp_gd_octets, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_udp_gd_octets, MMC_RXUDP_GD_OCTETS_L); mmc->mmc_rx_udp_gd_octets_h = - update_mmc_val(osi_core, mmc->mmc_rx_udp_gd_octets_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_udp_gd_octets_h, MMC_RXUDP_GD_OCTETS_H); mmc->mmc_rx_ipv6_gd_octets = - update_mmc_val(osi_core, mmc->mmc_rx_ipv6_gd_octets, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv6_gd_octets, MMC_RXIPV6_GD_OCTETS_L); mmc->mmc_rx_ipv6_gd_octets_h = - update_mmc_val(osi_core, mmc->mmc_rx_ipv6_gd_octets_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv6_gd_octets_h, MMC_RXIPV6_GD_OCTETS_H); mmc->mmc_rx_ipv6_hderr_octets = - update_mmc_val(osi_core, mmc->mmc_rx_ipv6_hderr_octets, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv6_hderr_octets, MMC_RXIPV6_HDRERR_OCTETS_L); mmc->mmc_rx_ipv6_hderr_octets_h = - update_mmc_val(osi_core, mmc->mmc_rx_ipv6_hderr_octets_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv6_hderr_octets_h, MMC_RXIPV6_HDRERR_OCTETS_H); mmc->mmc_rx_ipv6_nopay_octets = - update_mmc_val(osi_core, mmc->mmc_rx_ipv6_nopay_octets, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv6_nopay_octets, MMC_RXIPV6_NOPAY_OCTETS_L); mmc->mmc_rx_ipv6_nopay_octets_h = - update_mmc_val(osi_core, mmc->mmc_rx_ipv6_nopay_octets_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv6_nopay_octets_h, MMC_RXIPV6_NOPAY_OCTETS_H); mmc->mmc_rx_udp_err_octets = - update_mmc_val(osi_core, mmc->mmc_rx_udp_err_octets, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_udp_err_octets, MMC_RXUDP_ERR_OCTETS_L); mmc->mmc_rx_udp_err_octets_h = - update_mmc_val(osi_core, mmc->mmc_rx_udp_err_octets_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_udp_err_octets_h, MMC_RXUDP_ERR_OCTETS_H); mmc->mmc_rx_tcp_gd_octets = - update_mmc_val(osi_core, mmc->mmc_rx_tcp_gd_octets, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_tcp_gd_octets, MMC_RXTCP_GD_OCTETS_L); mmc->mmc_rx_tcp_gd_octets_h = - update_mmc_val(osi_core, mmc->mmc_rx_tcp_gd_octets_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_tcp_gd_octets_h, MMC_RXTCP_GD_OCTETS_H); mmc->mmc_rx_tcp_err_octets = - update_mmc_val(osi_core, mmc->mmc_rx_tcp_err_octets, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_tcp_err_octets, MMC_RXTCP_ERR_OCTETS_L); mmc->mmc_rx_tcp_err_octets_h = - update_mmc_val(osi_core, mmc->mmc_rx_tcp_err_octets_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_tcp_err_octets_h, MMC_RXTCP_ERR_OCTETS_H); mmc->mmc_rx_icmp_gd_octets = - update_mmc_val(osi_core, mmc->mmc_rx_icmp_gd_octets, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_icmp_gd_octets, MMC_RXICMP_GD_OCTETS_L); mmc->mmc_rx_icmp_gd_octets_h = - update_mmc_val(osi_core, mmc->mmc_rx_icmp_gd_octets_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_icmp_gd_octets_h, MMC_RXICMP_GD_OCTETS_H); mmc->mmc_rx_icmp_err_octets = - update_mmc_val(osi_core, mmc->mmc_rx_icmp_err_octets, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_icmp_err_octets, MMC_RXICMP_ERR_OCTETS_L); mmc->mmc_rx_icmp_err_octets_h = - update_mmc_val(osi_core, mmc->mmc_rx_icmp_err_octets_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_icmp_err_octets_h, MMC_RXICMP_ERR_OCTETS_H); mmc->mmc_tx_fpe_frag_cnt = - update_mmc_val(osi_core, mmc->mmc_tx_fpe_frag_cnt, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_fpe_frag_cnt, MMC_TX_FPE_FRAG_COUNTER); mmc->mmc_tx_fpe_hold_req_cnt = - update_mmc_val(osi_core, mmc->mmc_tx_fpe_hold_req_cnt, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_fpe_hold_req_cnt, MMC_TX_HOLD_REQ_COUNTER); mmc->mmc_rx_packet_reass_err_cnt = - update_mmc_val(osi_core, mmc->mmc_rx_packet_reass_err_cnt, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_packet_reass_err_cnt, MMC_RX_PKT_ASSEMBLY_ERR_CNTR); mmc->mmc_rx_packet_smd_err_cnt = - update_mmc_val(osi_core, mmc->mmc_rx_packet_smd_err_cnt, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_packet_smd_err_cnt, MMC_RX_PKT_SMD_ERR_CNTR); mmc->mmc_rx_packet_asm_ok_cnt = - update_mmc_val(osi_core, mmc->mmc_rx_packet_asm_ok_cnt, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_packet_asm_ok_cnt, MMC_RX_PKT_ASSEMBLY_OK_CNTR); mmc->mmc_rx_fpe_fragment_cnt = - update_mmc_val(osi_core, mmc->mmc_rx_fpe_fragment_cnt, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_fpe_fragment_cnt, MMC_RX_FPE_FRAG_CNTR); } diff --git a/osi/core/mgbe_mmc.h b/osi/core/mgbe_mmc.h index 957577d..ac97c46 100644 --- a/osi/core/mgbe_mmc.h +++ b/osi/core/mgbe_mmc.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -67,13 +67,6 @@ #define MMC_TXVLANPACKETS_G_H 0x008A0 #define MMC_TXLPIUSECCNTR 0x008A4 #define MMC_TXLPITRANCNTR 0x008A8 -#define MMC_PRIO_INT_STATUS 0x008CC -#define MMC_TX_PER_PRIO_STATUS 0x008D0 -#define MMC_TX_PER_PRIO_PKT_GB 0x008D4 -#define MMC_TX_PER_PRIO_PFC_PKT_GB 0x008D8 -#define MMC_TX_PER_PRIO_GPFC_PKT_GB 0x008DC -#define MMC_TX_PER_PRIO_OCTET_GB_L 0x008E0 -#define MMC_TX_PER_PRIO_OCTET_GB_H 0x008E4 #define MMC_RXPACKETCOUNT_GB_L 0x00900 #define MMC_RXPACKETCOUNT_GB_H 0x00904 @@ -118,24 +111,9 @@ #define MMC_RXWATCHDOGERROR 0x009A0 #define MMC_RXLPIUSECCNTR 0x009A4 #define MMC_RXLPITRANCNTR 0x009A8 -#define MMC_RX_DISCARD_PKTS_GB_L 0x009AC -#define MMC_RX_DISCARD_PKTS_GB_H 0x009B0 -#define MMC_RX_DISCARD_OCTET_GB_L 0x009B4 -#define MMC_RX_DISCARD_OCTET_GB_H 0x009B8 #define MMC_RXALIGNMENTERROR 0x009BC -#define MMC_RX_PER_PRIO_STATUS 0x009D0 -#define MMC_RX_PER_PRIO_PKT_GB 0x009D4 -#define MMC_RX_PER_PRIO_PKT_B 0x009D8 -#define MMC_RX_PER_PRIO_PFC_PKT_GB 0x009DC -#define MMC_RX_PER_PRIO_OCTET_GB_L 0x009E0 -#define MMC_RX_PER_PRIO_OCTET_GB_H 0x009E4 -#define MMC_RX_PER_PRIO_DISCARD_GB 0x009E8 -#define MMC_FPE_TX_INT 0x00A00 -#define MMC_FPE_TX_INT_MASK 0x00A04 #define MMC_TX_FPE_FRAG_COUNTER 0x00A08 #define MMC_TX_HOLD_REQ_COUNTER 0x00A0C -#define MMC_FPE_RX_INT 0x00A20 -#define MMC_FPE_RX_INT_MASK 0x00A24 #define MMC_RX_PKT_ASSEMBLY_ERR_CNTR 0x00A28 #define MMC_RX_PKT_SMD_ERR_CNTR 0x00A2C #define MMC_RX_PKT_ASSEMBLY_OK_CNTR 0x00A30 @@ -147,8 +125,6 @@ #define MMC_TXEXESSCOL 0x00A50 #define MMC_TXCARRIERERROR 0x00A54 #define MMC_TXEXECESS_DEFERRED 0x00A58 -#define MMC_IPC_RX_INT_MASK 0x00A5C -#define MMC_IPC_RX_INT 0x00A60 #define MMC_RXIPV4_GD_PKTS_L 0x00A64 #define MMC_RXIPV4_GD_PKTS_H 0x00A68 #define MMC_RXIPV4_HDRERR_PKTS_L 0x00A6C @@ -220,7 +196,7 @@ * 1) MAC should be init and started. see osi_start_mac() * 2) osi_core->osd should be populated */ -void mgbe_read_mmc(struct osi_core_priv_data *osi_core); +void mgbe_read_mmc(struct osi_core_priv_data *const osi_core); /** * @brief mgbe_reset_mmc - To reset MMC registers and ether_mmc_counter @@ -232,5 +208,5 @@ void mgbe_read_mmc(struct osi_core_priv_data *osi_core); * 1) MAC should be init and started. see osi_start_mac() * 2) osi_core->osd should be populated */ -void mgbe_reset_mmc(struct osi_core_priv_data *osi_core); +void mgbe_reset_mmc(struct osi_core_priv_data *const osi_core); #endif diff --git a/osi/core/osi_core.c b/osi/core/osi_core.c index 75c9e72..f59884d 100644 --- a/osi/core/osi_core.c +++ b/osi/core/osi_core.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -25,40 +25,13 @@ #include "core_local.h" #include "../osi/common/common.h" -#ifdef HSI_SUPPORT -/** - * @brief hsi_err_code - Arry of error code and reporter ID to be use by - * each Ethernet controller instance - * a condition is met or a timeout occurs - * Below is the data: - * uncorrectable_error_code, correctable_error_code, reporter ID - * hsi_err_code[0] to hsi_err_code[3] for MGBE instance - * hsi_err_code[4] is for EQOS - */ -nveu32_t hsi_err_code[][3] = { - {0x2A00, 0x2E08, 0x8019}, - {0x2A01, 0x2E09, 0x801A}, - {0x2A02, 0x2E0A, 0x801B}, - {0x2A03, 0x2E0B, 0x801C}, - {0x28AD, 0x2DE6, 0x8009}, -}; -#endif - -/** - * @brief g_core - Static core local data array - */ static struct core_local g_core[MAX_CORE_INSTANCES]; -/** - * @brief if_ops - Static core interface operations for virtual/non-virtual - * case - */ -static struct if_core_ops if_ops[MAX_INTERFACE_OPS]; - /** * @brief Function to validate function pointers. * * @param[in] osi_core: OSI Core private data structure. + * @param[in] if_ops_p: pointer to interface core operations. * * @note * API Group: @@ -74,34 +47,39 @@ static nve32_t validate_if_func_ptrs(struct osi_core_priv_data *const osi_core, { nveu32_t i = 0; void *temp_ops = (void *)if_ops_p; + nve32_t ret = 0; #if __SIZEOF_POINTER__ == 8 nveu64_t *l_ops = (nveu64_t *)temp_ops; #elif __SIZEOF_POINTER__ == 4 nveu32_t *l_ops = (nveu32_t *)temp_ops; #else - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Undefined architecture\n", 0ULL); - return -1; + ret = -1; + goto fail; #endif + (void) osi_core; for (i = 0; i < (sizeof(*if_ops_p) / (nveu64_t)__SIZEOF_POINTER__); i++) { if (*l_ops == 0U) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "failed at index : ", i); - return -1; + ret = -1; + goto fail; } l_ops++; } - - return 0; +fail: + return ret; } /** * @brief Function to validate input arguments of API. * * @param[in] osi_core: OSI Core private data structure. + * @param[in] l_core: Core local private data structure. * * @note * API Group: @@ -115,17 +93,20 @@ static nve32_t validate_if_func_ptrs(struct osi_core_priv_data *const osi_core, static inline nve32_t validate_if_args(struct osi_core_priv_data *const osi_core, struct core_local *l_core) { + nve32_t ret = 0; + if ((osi_core == OSI_NULL) || (l_core->if_init_done == OSI_DISABLE) || (l_core->magic_num != (nveu64_t)osi_core)) { - return -1; + ret = -1; } - return 0; + return ret; } struct osi_core_priv_data *osi_get_core(void) { nveu32_t i; + struct osi_core_priv_data *osi_core = OSI_NULL; for (i = 0U; i < MAX_CORE_INSTANCES; i++) { if (g_core[i].if_init_done == OSI_ENABLE) { @@ -136,7 +117,7 @@ struct osi_core_priv_data *osi_get_core(void) } if (i == MAX_CORE_INSTANCES) { - return OSI_NULL; + goto fail; } g_core[i].magic_num = (nveu64_t)&g_core[i].osi_core; @@ -145,45 +126,55 @@ struct osi_core_priv_data *osi_get_core(void) g_core[i].tx_ts_head.next = &g_core[i].tx_ts_head; g_core[i].pps_freq = OSI_DISABLE; - return &g_core[i].osi_core; + osi_core = &g_core[i].osi_core; + osi_memset(osi_core, 0, sizeof(struct osi_core_priv_data)); +fail: + return osi_core; } struct osi_core_priv_data *get_role_pointer(nveu32_t role) { nveu32_t i; + struct osi_core_priv_data *ret_ptr = OSI_NULL; if ((role != OSI_PTP_M2M_PRIMARY) && (role != OSI_PTP_M2M_SECONDARY)) { - return OSI_NULL; + goto done; } /* Current approch to give pointer for 1st role */ for (i = 0U; i < MAX_CORE_INSTANCES; i++) { if ((g_core[i].if_init_done == OSI_ENABLE) && (g_core[i].ether_m2m_role == role)) { - return &g_core[i].osi_core; + ret_ptr = &g_core[i].osi_core; + break; } } - return OSI_NULL; +done: + return ret_ptr; } nve32_t osi_init_core_ops(struct osi_core_priv_data *const osi_core) { - struct core_local *l_core = (struct core_local *)osi_core; - nve32_t ret = -1; + struct core_local *l_core = (struct core_local *)(void *)osi_core; + static struct if_core_ops if_ops[MAX_INTERFACE_OPS]; + nve32_t ret = 0; if (osi_core == OSI_NULL) { - return -1; + ret = -1; + goto fail; } if (osi_core->use_virtualization > OSI_ENABLE) { - return ret; + ret = -1; + goto fail; } if ((l_core->magic_num != (nveu64_t)osi_core) || (l_core->if_init_done == OSI_ENABLE)) { - return -1; + ret = -1; + goto fail; } l_core->if_ops_p = &if_ops[osi_core->use_virtualization]; @@ -195,16 +186,17 @@ nve32_t osi_init_core_ops(struct osi_core_priv_data *const osi_core) } if (validate_if_func_ptrs(osi_core, l_core->if_ops_p) < 0) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Interface function validation failed\n", 0ULL); - return -1; + ret = -1; + goto fail; } ret = l_core->if_ops_p->if_init_core_ops(osi_core); if (ret < 0) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "if_init_core_ops failed\n", 0ULL); - return ret; + goto fail; } l_core->ts_lock = OSI_DISABLE; l_core->ether_m2m_role = osi_core->m2m_role; @@ -228,11 +220,11 @@ nve32_t osi_init_core_ops(struct osi_core_priv_data *const osi_core) if (osi_core->pps_frq <= OSI_ENABLE) { l_core->pps_freq = osi_core->pps_frq; } else { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "invalid pps_frq\n", (nveu64_t)osi_core->pps_frq); ret = -1; } - +fail: return ret; } @@ -240,67 +232,79 @@ nve32_t osi_write_phy_reg(struct osi_core_priv_data *const osi_core, const nveu32_t phyaddr, const nveu32_t phyreg, const nveu16_t phydata) { - struct core_local *l_core = (struct core_local *)osi_core; + nve32_t ret = -1; + struct core_local *l_core = (struct core_local *)(void *)osi_core; if (validate_if_args(osi_core, l_core) < 0) { - return -1; + goto fail; } - return l_core->if_ops_p->if_write_phy_reg(osi_core, phyaddr, phyreg, - phydata); + ret = l_core->if_ops_p->if_write_phy_reg(osi_core, phyaddr, phyreg, + phydata); +fail: + return ret; } nve32_t osi_read_phy_reg(struct osi_core_priv_data *const osi_core, const nveu32_t phyaddr, const nveu32_t phyreg) { - struct core_local *l_core = (struct core_local *)osi_core; + nve32_t ret = -1; + struct core_local *l_core = (struct core_local *)(void *)osi_core; if (validate_if_args(osi_core, l_core) < 0) { - return -1; + goto fail; } - return l_core->if_ops_p->if_read_phy_reg(osi_core, phyaddr, phyreg); + ret = l_core->if_ops_p->if_read_phy_reg(osi_core, phyaddr, phyreg); +fail: + return ret; } -nve32_t osi_hw_core_init(struct osi_core_priv_data *const osi_core, - nveu32_t tx_fifo_size, nveu32_t rx_fifo_size) +nve32_t osi_hw_core_init(struct osi_core_priv_data *const osi_core) { - struct core_local *l_core = (struct core_local *)osi_core; + nve32_t ret = -1; + struct core_local *l_core = (struct core_local *)(void *)osi_core; if (validate_if_args(osi_core, l_core) < 0) { - return -1; + goto fail; } - return l_core->if_ops_p->if_core_init(osi_core, tx_fifo_size, - rx_fifo_size); + ret = l_core->if_ops_p->if_core_init(osi_core); +fail: + return ret; } nve32_t osi_hw_core_deinit(struct osi_core_priv_data *const osi_core) { - struct core_local *l_core = (struct core_local *)osi_core; + nve32_t ret = -1; + struct core_local *l_core = (struct core_local *)(void *)osi_core; if (validate_if_args(osi_core, l_core) < 0) { - return -1; + goto fail; } - return l_core->if_ops_p->if_core_deinit(osi_core); + ret = l_core->if_ops_p->if_core_deinit(osi_core); +fail: + return ret; } nve32_t osi_handle_ioctl(struct osi_core_priv_data *osi_core, struct osi_ioctl *data) { - struct core_local *l_core = (struct core_local *)osi_core; + struct core_local *l_core = (struct core_local *)(void *)osi_core; nve32_t ret = -1; if (validate_if_args(osi_core, l_core) < 0) { - return ret; + goto fail; } if (data == OSI_NULL) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "CORE: Invalid argument\n", 0ULL); - return ret; + goto fail; } - return l_core->if_ops_p->if_handle_ioctl(osi_core, data); + ret = l_core->if_ops_p->if_handle_ioctl(osi_core, data); +fail: + return ret; } diff --git a/osi/core/osi_hal.c b/osi/core/osi_hal.c index 0407070..4364127 100644 --- a/osi/core/osi_hal.c +++ b/osi/core/osi_hal.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -24,16 +24,19 @@ #include #include "core_local.h" #include "../osi/common/common.h" -#include "vlan_filter.h" +#include "core_common.h" +#include "eqos_core.h" +#include "mgbe_core.h" #include "frp.h" #ifdef OSI_DEBUG #include "debug.h" #endif /* OSI_DEBUG */ - +#ifndef OSI_STRIPPED_LIB +#include "vlan_filter.h" +#endif /** * @brief g_ops - Static core operations array. */ -static struct core_ops g_ops[MAX_MAC_IP_TYPES]; /** * @brief Function to validate input arguments of API. @@ -51,15 +54,17 @@ static struct core_ops g_ops[MAX_MAC_IP_TYPES]; * @retval -1 on Failure */ static inline nve32_t validate_args(struct osi_core_priv_data *const osi_core, - struct core_local *l_core) + struct core_local *const l_core) { + nve32_t ret = 0; + if ((osi_core == OSI_NULL) || (osi_core->base == OSI_NULL) || (l_core->init_done == OSI_DISABLE) || (l_core->magic_num != (nveu64_t)osi_core)) { - return -1; + ret = -1; } - return 0; + return ret; } /** @@ -81,79 +86,152 @@ static nve32_t validate_func_ptrs(struct osi_core_priv_data *const osi_core, struct core_ops *ops_p) { nveu32_t i = 0; + nve32_t ret = 0; void *temp_ops = (void *)ops_p; #if __SIZEOF_POINTER__ == 8 nveu64_t *l_ops = (nveu64_t *)temp_ops; #elif __SIZEOF_POINTER__ == 4 nveu32_t *l_ops = (nveu32_t *)temp_ops; #else - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Undefined architecture\n", 0ULL); - return -1; + ret = -1; + goto fail; #endif + (void) osi_core; for (i = 0; i < (sizeof(*ops_p) / (nveu64_t)__SIZEOF_POINTER__); i++) { if (*l_ops == 0U) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "core: fn ptr validation failed at\n", (nveu64_t)i); - return -1; + ret = -1; + goto fail; } l_ops++; } - - return 0; +fail: + return ret; } -nve32_t osi_hal_write_phy_reg(struct osi_core_priv_data *const osi_core, - const nveu32_t phyaddr, const nveu32_t phyreg, - const nveu16_t phydata) +/** + * @brief osi_hal_write_phy_reg - HW API to Write to a PHY register through MAC + * over MDIO bus. + * + * @note + * Algorithm: + * - Before proceeding for reading for PHY register check whether any MII + * operation going on MDIO bus by polling MAC_GMII_BUSY bit. + * - Program data into MAC MDIO data register. + * - Populate required parameters like phy address, phy register etc,, + * in MAC MDIO Address register. write and GMII busy bits needs to be set + * in this operation. + * - Write into MAC MDIO address register poll for GMII busy for MDIO + * operation to complete. + * + * @param[in] osi_core: OSI core private data structure. + * @param[in] phyaddr: PHY address (PHY ID) associated with PHY + * @param[in] phyreg: Register which needs to be write to PHY. + * @param[in] phydata: Data to write to a PHY register. + * + * @pre MAC should be init and started. see osi_start_mac() + * + * @note + * Traceability Details: + * - SWUD_ID: TODO + * + * @usage + * - Allowed context for the API call + * - Interrupt handler: No + * - Signal handler: No + * - Thread safe: No + * - Async/Sync: Sync + * - Required Privileges: None + * - API Group: + * - Initialization: Yes + * - Run time: Yes + * - De-initialization: No + * + * @retval 0 on success + * @retval -1 on failure. + */ +static nve32_t osi_hal_write_phy_reg(struct osi_core_priv_data *const osi_core, + const nveu32_t phyaddr, const nveu32_t phyreg, + const nveu16_t phydata) { - struct core_local *l_core = (struct core_local *)osi_core; - - if (validate_args(osi_core, l_core) < 0) { - return -1; - } + struct core_local *l_core = (struct core_local *)(void *)osi_core; return l_core->ops_p->write_phy_reg(osi_core, phyaddr, phyreg, phydata); } -nve32_t osi_hal_read_phy_reg(struct osi_core_priv_data *const osi_core, - const nveu32_t phyaddr, const nveu32_t phyreg) -{ - struct core_local *l_core = (struct core_local *)osi_core; +/** + * @brief osi_hal_read_phy_reg - HW API to Read from a PHY register through MAC + * over MDIO bus. + * + * @note + * Algorithm: + * - Before proceeding for reading for PHY register check whether any MII + * operation going on MDIO bus by polling MAC_GMII_BUSY bit. + * - Populate required parameters like phy address, phy register etc,, + * in program it in MAC MDIO Address register. Read and GMII busy bits + * needs to be set in this operation. + * - Write into MAC MDIO address register poll for GMII busy for MDIO + * operation to complete. After this data will be available at MAC MDIO + * data register. + * + * @param[in] osi_core: OSI core private data structure. + * @param[in] phyaddr: PHY address (PHY ID) associated with PHY + * @param[in] phyreg: Register which needs to be read from PHY. + * + * @pre MAC should be init and started. see osi_start_mac() + * + * @note + * Traceability Details: + * - SWUD_ID: TODO + * + * @usage + * - Allowed context for the API call + * - Interrupt handler: No + * - Signal handler: No + * - Thread safe: No + * - Async/Sync: Sync + * - Required Privileges: None + * - API Group: + * - Initialization: Yes + * - Run time: Yes + * - De-initialization: No + * + * @retval data from PHY register on success + * @retval -1 on failure + */ +static nve32_t osi_hal_read_phy_reg(struct osi_core_priv_data *const osi_core, + const nveu32_t phyaddr, const nveu32_t phyreg) - if (validate_args(osi_core, l_core) < 0) { - return -1; - } +{ + struct core_local *l_core = (struct core_local *)(void *)osi_core; return l_core->ops_p->read_phy_reg(osi_core, phyaddr, phyreg); } static nve32_t osi_hal_init_core_ops(struct osi_core_priv_data *const osi_core) { - struct core_local *l_core = (struct core_local *)osi_core; - typedef void (*init_ops_arr)(struct core_ops *); - typedef void *(*safety_init)(void); - - init_ops_arr i_ops[MAX_MAC_IP_TYPES][MAX_MAC_IP_TYPES] = { + nve32_t ret = -1; + struct core_local *l_core = (struct core_local *)(void *)osi_core; + typedef void (*init_core_ops_arr)(struct core_ops *local_ops); + static struct core_ops g_ops[MAX_MAC_IP_TYPES]; + init_core_ops_arr i_ops[MAX_MAC_IP_TYPES][MAX_MAC_IP_TYPES] = { { eqos_init_core_ops, OSI_NULL }, { mgbe_init_core_ops, OSI_NULL } }; - safety_init s_init[MAX_MAC_IP_TYPES][MAX_MAC_IP_TYPES] = { - { eqos_get_core_safety_config, ivc_get_core_safety_config }, - { OSI_NULL, OSI_NULL } - }; - if (osi_core == OSI_NULL) { - return -1; + goto exit; } if ((l_core->magic_num != (nveu64_t)osi_core) || (l_core->init_done == OSI_ENABLE)) { - return -1; + goto exit; } if ((osi_core->osd_ops.ops_log == OSI_NULL) || @@ -163,54 +241,40 @@ static nve32_t osi_hal_init_core_ops(struct osi_core_priv_data *const osi_core) (osi_core->osd_ops.printf == OSI_NULL) || #endif /* OSI_DEBUG */ (osi_core->osd_ops.usleep_range == OSI_NULL)) { - return -1; + goto exit; } if (osi_core->mac > OSI_MAC_HW_MGBE) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Invalid MAC HW type\n", 0ULL); - return -1; + goto exit; } if (osi_core->use_virtualization > OSI_ENABLE) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Invalid use_virtualization value\n", 0ULL); - return -1; + goto exit; } if (i_ops[osi_core->mac][osi_core->use_virtualization] != OSI_NULL) { i_ops[osi_core->mac][osi_core->use_virtualization](&g_ops[osi_core->mac]); } - if (s_init[osi_core->mac][osi_core->use_virtualization] != OSI_NULL) { - osi_core->safety_config = - s_init[osi_core->mac][osi_core->use_virtualization](); - } - if (validate_func_ptrs(osi_core, &g_ops[osi_core->mac]) < 0) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "core: function ptrs validation failed\n", 0ULL); - return -1; + goto exit; } l_core->ops_p = &g_ops[osi_core->mac]; l_core->init_done = OSI_ENABLE; - return 0; -} - -nve32_t osi_poll_for_mac_reset_complete( - struct osi_core_priv_data *const osi_core) -{ - struct core_local *l_core = (struct core_local *)osi_core; - - if (validate_args(osi_core, l_core) < 0) { - return -1; - } - - return l_core->ops_p->poll_for_swr(osi_core); + ret = 0; +exit: + return ret; } +#ifndef OSI_STRIPPED_LIB /** * @brief init_vlan_filters - Helper function to init all VLAN SW information. * @@ -220,7 +284,7 @@ nve32_t osi_poll_for_mac_reset_complete( */ static inline void init_vlan_filters(struct osi_core_priv_data *const osi_core) { - unsigned int i = 0U; + nveu32_t i = 0U; for (i = 0; i < VLAN_NUM_VID; i++) { osi_core->vid[i] = VLAN_ID_INVALID; @@ -229,176 +293,341 @@ static inline void init_vlan_filters(struct osi_core_priv_data *const osi_core) osi_core->vf_bitmap = 0U; osi_core->vlan_filter_cnt = 0U; } +#endif -nve32_t osi_hal_hw_core_init(struct osi_core_priv_data *const osi_core, - nveu32_t tx_fifo_size, nveu32_t rx_fifo_size) +/** + * @brief osi_hal_hw_core_deinit - HW API for MAC deinitialization. + * + * @note + * Algorithm: + * - Stops MAC transmission and reception. + * + * @param[in] osi_core: OSI core private data structure. + * + * @pre MAC has to be out of reset. + * + * @note + * Traceability Details: + * - SWUD_ID: TODO + * + * @usage + * - Allowed context for the API call + * - Interrupt handler: No + * - Signal handler: No + * - Thread safe: No + * - Async/Sync: Sync + * - Required Privileges: None + * - API Group: + * - Initialization: No + * - Run time: No + * - De-initialization: Yes + * + * @retval 0 on success + * @retval -1 on failure. + */ +static nve32_t osi_hal_hw_core_deinit(struct osi_core_priv_data *const osi_core) { - struct core_local *l_core = (struct core_local *)osi_core; - nve32_t ret; + struct core_local *l_core = (struct core_local *)(void *)osi_core; - if (validate_args(osi_core, l_core) < 0) { - return -1; + /* Stop the MAC */ + hw_stop_mac(osi_core); + + /* Disable MAC interrupts */ + osi_writela(osi_core, 0U, ((nveu8_t *)osi_core->base + HW_MAC_IER)); + + if (l_core->l_mac_ver != MAC_CORE_VER_TYPE_EQOS) { + osi_writela(osi_core, 0U, + ((nveu8_t *)osi_core->base + WRAP_COMMON_INTR_ENABLE)); } - init_vlan_filters(osi_core); + /* Handle the common interrupt if any status bits set */ + l_core->ops_p->handle_common_intr(osi_core); - /* Init FRP */ - init_frp(osi_core); + l_core->hw_init_successful = OSI_DISABLE; - ret = l_core->ops_p->core_init(osi_core, tx_fifo_size, rx_fifo_size); + if (l_core->state != OSI_SUSPENDED) { + /* Reset restore operation flags on interface down */ + l_core->cfg.flags = OSI_DISABLE; + } - if (ret == 0) { - l_core->hw_init_successful = OSI_ENABLE; + l_core->state = OSI_DISABLE; + + return 0; +} + +/** + * @brief div_u64 - Calls a function which returns quotient + * + * @param[in] dividend: Dividend + * @param[in] divisor: Divisor + * + * @pre MAC IP should be out of reset and need to be initialized as the + * requirements. + * + * + * @note + * API Group: + * - Initialization: No + * - Run time: Yes + * - De-initialization: No + * @returns Quotient + */ +static inline nveu64_t div_u64(nveu64_t dividend, + nveu64_t divisor) +{ + nveu64_t remain; + + return div_u64_rem(dividend, divisor, &remain); +} + +/** + * @brief osi_ptp_configuration - Configure PTP + * + * @note + * Algorithm: + * - Configure the PTP registers that are required for PTP. + * + * @param[in] osi_core: OSI core private data structure. + * @param[in] enable: Enable or disable Time Stamping. 0: Disable 1: Enable + * + * @pre + * - MAC should be init and started. see osi_start_mac() + * - osi->ptp_config.ptp_filter need to be filled accordingly to the + * filter that need to be set for PTP packets. Please check osi_ptp_config + * structure declaration on the bit fields that need to be filled. + * - osi->ptp_config.ptp_clock need to be filled with the ptp system clk. + * Currently it is set to 62500000Hz. + * - osi->ptp_config.ptp_ref_clk_rate need to be filled with the ptp + * reference clock that platform supports. + * - osi->ptp_config.sec need to be filled with current time of seconds + * - osi->ptp_config.nsec need to be filled with current time of nseconds + * - osi->base need to be filled with the ioremapped base address + * + * @note + * Traceability Details: + * - SWUD_ID: ETHERNET_NVETHERNETRM_021 + * + * @usage + * - Allowed context for the API call + * - Interrupt handler: No + * - Signal handler: No + * - Thread safe: No + * - Async/Sync: Sync + * - Required Privileges: None + * - API Group: + * - Initialization: Yes + * - Run time: Yes + * - De-initialization: No + * + * @retval 0 on success + * @retval -1 on failure. + */ +static nve32_t osi_ptp_configuration(struct osi_core_priv_data *const osi_core, + OSI_UNUSED const nveu32_t enable) +{ +#ifndef OSI_STRIPPED_LIB + struct core_local *l_core = (struct core_local *)(void *)osi_core; +#endif /* !OSI_STRIPPED_LIB */ + nve32_t ret = 0; + nveu64_t temp = 0, temp1 = 0, temp2 = 0; + nveu64_t ssinc = 0; + +#ifndef OSI_STRIPPED_LIB + if (enable == OSI_DISABLE) { + /* disable hw time stamping */ + /* Program MAC_Timestamp_Control Register */ + hw_config_tscr(osi_core, OSI_DISABLE); + /* Disable PTP RX Queue routing */ + ret = l_core->ops_p->config_ptp_rxq(osi_core, + osi_core->ptp_config.ptp_rx_queue, + OSI_DISABLE); + } else { +#endif /* !OSI_STRIPPED_LIB */ + /* Program MAC_Timestamp_Control Register */ + hw_config_tscr(osi_core, osi_core->ptp_config.ptp_filter); + + /* Program Sub Second Increment Register */ + hw_config_ssir(osi_core); + + /* formula for calculating addend value is + * TSAR = (2^32 * 1000) / (ptp_ref_clk_rate in MHz * SSINC) + * 2^x * y == (y << x), hence + * 2^32 * 1000 == (1000 << 32) + * so addend = (2^32 * 1000)/(ptp_ref_clk_rate in MHZ * SSINC); + */ + ssinc = OSI_PTP_SSINC_4; + if (osi_core->mac_ver == OSI_EQOS_MAC_5_30) { + ssinc = OSI_PTP_SSINC_6; + } + + temp = ((nveu64_t)1000 << 32); + temp = (nveu64_t)temp * 1000000U; + + temp1 = div_u64(temp, + (nveu64_t)osi_core->ptp_config.ptp_ref_clk_rate); + + temp2 = div_u64(temp1, (nveu64_t)ssinc); + + if (temp2 < UINT_MAX) { + osi_core->default_addend = (nveu32_t)temp2; + } else { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "core: temp2 >= UINT_MAX\n", 0ULL); + ret = -1; + goto fail; + } + + /* Program addend value */ + ret = hw_config_addend(osi_core, osi_core->default_addend); + + /* Set current time */ + if (ret == 0) { + ret = hw_set_systime_to_mac(osi_core, + osi_core->ptp_config.sec, + osi_core->ptp_config.nsec); +#ifndef OSI_STRIPPED_LIB + if (ret == 0) { + /* Enable PTP RX Queue routing */ + ret = l_core->ops_p->config_ptp_rxq(osi_core, + osi_core->ptp_config.ptp_rx_queue, + OSI_ENABLE); + } +#endif /* !OSI_STRIPPED_LIB */ + } +#ifndef OSI_STRIPPED_LIB + } +#endif /* !OSI_STRIPPED_LIB */ +fail: + return ret; +} + +static nve32_t osi_get_mac_version(struct osi_core_priv_data *const osi_core, nveu32_t *mac_ver) +{ + struct core_local *l_core = (struct core_local *)(void *)osi_core; + nve32_t ret = 0; + + *mac_ver = osi_readla(osi_core, ((nveu8_t *)osi_core->base + (nve32_t)MAC_VERSION)) & + MAC_VERSION_SNVER_MASK; + + if (validate_mac_ver_update_chans(*mac_ver, &l_core->num_max_chans, + &l_core->l_mac_ver) == 0) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "Invalid MAC version\n", (nveu64_t)*mac_ver) + ret = -1; } return ret; } -nve32_t osi_hal_hw_core_deinit(struct osi_core_priv_data *const osi_core) +static nve32_t osi_hal_hw_core_init(struct osi_core_priv_data *const osi_core) { - struct core_local *l_core = (struct core_local *)osi_core; + struct core_local *l_core = (struct core_local *)(void *)osi_core; + const nveu32_t ptp_ref_clk_rate[3] = {EQOS_X_PTP_CLK_SPEED, EQOS_PTP_CLK_SPEED, + MGBE_PTP_CLK_SPEED}; + nve32_t ret; - if (validate_args(osi_core, l_core) < 0) { - return -1; + ret = osi_get_mac_version(osi_core, &osi_core->mac_ver); + if (ret < 0) { + goto fail; } - l_core->hw_init_successful = OSI_DISABLE; - l_core->ops_p->core_deinit(osi_core); - - /* FIXME: Should be fixed */ - //l_core->init_done = OSI_DISABLE; - //l_core->magic_num = 0; - - return 0; -} - -nve32_t osi_start_mac(struct osi_core_priv_data *const osi_core) -{ - struct core_local *l_core = (struct core_local *)osi_core; - - if (validate_args(osi_core, l_core) < 0) { - return -1; + /* Bring MAC out of reset */ + ret = hw_poll_for_swr(osi_core); + if (ret < 0) { + goto fail; } - l_core->ops_p->start_mac(osi_core); +#ifndef OSI_STRIPPED_LIB + init_vlan_filters(osi_core); - return 0; -} +#endif /* !OSI_STRIPPED_LIB */ -nve32_t osi_stop_mac(struct osi_core_priv_data *const osi_core) -{ - struct core_local *l_core = (struct core_local *)osi_core; - - if (validate_args(osi_core, l_core) < 0) { - return -1; + ret = l_core->ops_p->core_init(osi_core); + if (ret < 0) { + goto fail; } - l_core->ops_p->stop_mac(osi_core); + /* By default set MAC to Full duplex mode. + * Since this is a local function it will always return sucess, + * so no need to check for return value + */ + (void)hw_set_mode(osi_core, OSI_FULL_DUPLEX); - return 0; -} - -nve32_t osi_common_isr(struct osi_core_priv_data *const osi_core) -{ - struct core_local *l_core = (struct core_local *)osi_core; - - if (validate_args(osi_core, l_core) < 0) { - return -1; + /* By default enable rxcsum */ + ret = hw_config_rxcsum_offload(osi_core, OSI_ENABLE); + if (ret == 0) { + l_core->cfg.rxcsum = OSI_ENABLE; + l_core->cfg.flags |= DYNAMIC_CFG_RXCSUM; } - l_core->ops_p->handle_common_intr(osi_core); - - return 0; -} - -nve32_t osi_set_mode(struct osi_core_priv_data *const osi_core, - const nve32_t mode) -{ - struct core_local *l_core = (struct core_local *)osi_core; - - if (validate_args(osi_core, l_core) < 0) { - return -1; + /* Set default PTP settings */ + osi_core->ptp_config.ptp_rx_queue = 3U; + osi_core->ptp_config.ptp_ref_clk_rate = ptp_ref_clk_rate[l_core->l_mac_ver]; + osi_core->ptp_config.ptp_filter = OSI_MAC_TCR_TSENA | OSI_MAC_TCR_TSCFUPDT | + OSI_MAC_TCR_TSCTRLSSR | OSI_MAC_TCR_TSVER2ENA | + OSI_MAC_TCR_TSIPENA | OSI_MAC_TCR_TSIPV6ENA | + OSI_MAC_TCR_TSIPV4ENA | OSI_MAC_TCR_SNAPTYPSEL_1; + osi_core->ptp_config.sec = 0; + osi_core->ptp_config.nsec = 0; + osi_core->ptp_config.one_nsec_accuracy = OSI_ENABLE; + ret = osi_ptp_configuration(osi_core, OSI_ENABLE); + if (ret < 0) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "Fail to configure PTP\n", 0ULL); + goto fail; } - return l_core->ops_p->set_mode(osi_core, mode); -} - -nve32_t osi_set_speed(struct osi_core_priv_data *const osi_core, - const nve32_t speed) -{ - struct core_local *l_core = (struct core_local *)osi_core; - - if (validate_args(osi_core, l_core) < 0) { - return -1; - } - - return l_core->ops_p->set_speed(osi_core, speed); -} - -nve32_t osi_pad_calibrate(struct osi_core_priv_data *const osi_core) -{ - struct core_local *l_core = (struct core_local *)osi_core; - - if (validate_args(osi_core, l_core) < 0) { - return -1; - } - - return l_core->ops_p->pad_calibrate(osi_core); -} - -nve32_t osi_config_fw_err_pkts(struct osi_core_priv_data *const osi_core, - const nveu32_t qinx, const nveu32_t fw_err) -{ - struct core_local *l_core = (struct core_local *)osi_core; - - if (validate_args(osi_core, l_core) < 0) { - return -1; - } - - /* Configure Forwarding of Error packets */ - return l_core->ops_p->config_fw_err_pkts(osi_core, qinx, fw_err); + /* Start the MAC */ + hw_start_mac(osi_core); + + l_core->lane_status = OSI_ENABLE; + l_core->hw_init_successful = OSI_ENABLE; + +fail: + return ret; } +#ifndef OSI_STRIPPED_LIB static nve32_t conf_ptp_offload(struct osi_core_priv_data *const osi_core, struct osi_pto_config *const pto_config) { - struct core_local *l_core = (struct core_local *)osi_core; - int ret = -1; + struct core_local *l_core = (struct core_local *)(void *)osi_core; + nve32_t ret = -1; /* Validate input arguments */ if (pto_config == OSI_NULL) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "pto_config is NULL\n", 0ULL); return ret; } - if (pto_config->mc_uc != OSI_ENABLE && - pto_config->mc_uc != OSI_DISABLE) { + if ((pto_config->mc_uc != OSI_ENABLE) && + (pto_config->mc_uc != OSI_DISABLE)) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "invalid mc_uc flag value\n", (nveul64_t)pto_config->mc_uc); return ret; } - if (pto_config->en_dis != OSI_ENABLE && - pto_config->en_dis != OSI_DISABLE) { + if ((pto_config->en_dis != OSI_ENABLE) && + (pto_config->en_dis != OSI_DISABLE)) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "invalid enable flag value\n", (nveul64_t)pto_config->en_dis); return ret; } - if (pto_config->snap_type != OSI_PTP_SNAP_ORDINARY && - pto_config->snap_type != OSI_PTP_SNAP_TRANSPORT && - pto_config->snap_type != OSI_PTP_SNAP_P2P) { + if ((pto_config->snap_type != OSI_PTP_SNAP_ORDINARY) && + (pto_config->snap_type != OSI_PTP_SNAP_TRANSPORT) && + (pto_config->snap_type != OSI_PTP_SNAP_P2P)) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "invalid SNAP type value\n", (nveul64_t)pto_config->snap_type); return ret; } - if (pto_config->master != OSI_ENABLE && - pto_config->master != OSI_DISABLE) { + if ((pto_config->master != OSI_ENABLE) && + (pto_config->master != OSI_DISABLE)) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "invalid master flag value\n", (nveul64_t)pto_config->master); @@ -438,29 +667,54 @@ static nve32_t conf_ptp_offload(struct osi_core_priv_data *const osi_core, return ret; } +#endif /* !OSI_STRIPPED_LIB */ -nve32_t osi_l2_filter(struct osi_core_priv_data *const osi_core, - const struct osi_filter *filter) +/** + * @brief osi_l2_filter - configure L2 mac filter. + * + * @note + * Algorithm: + * - This sequence is used to configure MAC in different packet + * processing modes like promiscuous, multicast, unicast, + * hash unicast/multicast and perfect/inverse matching for L2 DA + * + * @param[in] osi_core: OSI core private data structure. + * @param[in] filter: OSI filter structure. + * + * @pre + * - MAC should be initialized and started. see osi_start_mac() + * + * @note + * Traceability Details: + * - SWUD_ID: ETHERNET_NVETHERNETRM_018 + * + * @usage + * - Allowed context for the API call + * - Interrupt handler: No + * - Signal handler: No + * - Thread safe: No + * - Async/Sync: Sync + * - Required Privileges: None + * - API Group: + * - Initialization: Yes + * - Run time: Yes + * - De-initialization: No + * + * @retval 0 on success + * @retval -1 on failure. + */ +static nve32_t osi_l2_filter(struct osi_core_priv_data *const osi_core, + const struct osi_filter *filter) { - struct core_local *l_core = (struct core_local *)osi_core; - nve32_t ret; + struct core_local *l_core = (struct core_local *)(void *)osi_core; + nve32_t ret = 0; - if ((validate_args(osi_core, l_core) < 0) || (filter == OSI_NULL)) { - return -1; - } - - if (filter == OSI_NULL) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, - "CORE: filter is NULL\n", 0ULL); - return -1; - } - - ret = l_core->ops_p->config_mac_pkt_filter_reg(osi_core, filter); + ret = hw_config_mac_pkt_filter_reg(osi_core, filter); if (ret < 0) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "failed to configure MAC packet filter register\n", 0ULL); - return ret; + goto fail; } if (((filter->oper_mode & OSI_OPER_ADDR_UPDATE) != OSI_NONE) || @@ -472,224 +726,437 @@ nve32_t osi_l2_filter(struct osi_core_priv_data *const osi_core, OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "DCS requested. Conflicts with DT config\n", 0ULL); - return ret; + goto fail; } ret = l_core->ops_p->update_mac_addr_low_high_reg(osi_core, filter); } +fail: return ret; } /** - * @brief helper_l4_filter helper function for l4 filtering - * - * @param[in] osi_core: OSI Core private data structure. - * @param[in] l_filter: filter structure - * @param[in] type: filter type l3 or l4 - * @param[in] dma_routing_enable: dma routing enable (1) or disable (0) - * @param[in] dma_chan: dma channel - * - * @pre MAC needs to be out of reset and proper clock configured. + * @brief l3l4_find_match - function to find filter match * * @note - * API Group: - * - Initialization: Yes - * - Run time: Yes - * - De-initialization: No + * Algorithm: + * - Search through filter list l_core->cfg.l3_l4[] and find for a + * match with l3_l4 input data. + * - Filter data matches, store the filter index into filter_no. + * - Store first found filter index into free_filter_no. + * - Return 0 on match. + * - Return -1 on failure. * - * @retval 0 on Success - * @retval -1 on Failure + * @param[in] l_core: OSI local core data structure. + * @param[in] l3_l4: Pointer to l3 l4 filter structure (#osi_l3_l4_filter) + * @param[out] filter_no: pointer to filter index + * @param[out] free_filter_no: pointer to free filter index + * @param[in] max_filter_no: maximum allowed filter number + * + * @pre + * - MAC should be initialized and started. see osi_start_mac() + * + * @retval 0 on success + * @retval -1 on failure. */ -static inline nve32_t helper_l4_filter( - struct osi_core_priv_data *const osi_core, - struct core_ops *ops_p, - struct osi_l3_l4_filter l_filter, - nveu32_t type, - nveu32_t dma_routing_enable, - nveu32_t dma_chan) +static nve32_t l3l4_find_match(const struct core_local *const l_core, + const struct osi_l3_l4_filter *const l3_l4, + nveu32_t *filter_no, + nveu32_t *free_filter_no, + nveu32_t max_filter_no) { - nve32_t ret = 0; + nveu32_t i; + nve32_t ret = -1; + nveu32_t found_free_index = 0; + nve32_t filter_size = (nve32_t)sizeof(l3_l4->data); +#if defined(L3L4_WILDCARD_FILTER) + nveu32_t start_idx = 1; /* leave first one for TCP wildcard */ +#else + nveu32_t start_idx = 0; +#endif /* L3L4_WILDCARD_FILTER */ - ret = ops_p->config_l4_filters(osi_core, - l_filter.filter_no, - l_filter.filter_enb_dis, - type, - l_filter.src_dst_addr_match, - l_filter.perfect_inverse_match, - dma_routing_enable, - dma_chan); - if (ret < 0) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL, - "failed to configure L4 filters\n", 0ULL); - return ret; - } + /* init free index value to invalid value */ + *free_filter_no = UINT_MAX; - return ops_p->update_l4_port_no(osi_core, - l_filter.filter_no, - l_filter.port_no, - l_filter.src_dst_addr_match); -} + for (i = start_idx; i <= max_filter_no; i++) { + if (l_core->cfg.l3_l4[i].filter_enb_dis == OSI_FALSE) { + /* filter not enabled, save free index */ + if (found_free_index == 0U) { + *free_filter_no = i; + found_free_index = 1; + } + continue; + } -/** - * @brief helper_l3_filter helper function for l3 filtering - * - * @param[in] osi_core: OSI Core private data structure. - * @param[in] l_filter: filter structure - * @param[in] type: filter type l3 or l4 - * @param[in] dma_routing_enable: dma routing enable (1) or disable (0) - * @param[in] dma_chan: dma channel - * - * @pre MAC needs to be out of reset and proper clock configured. - * - * @note - * API Group: - * - Initialization: No - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on Success - * @retval -1 on Failure - */ -static inline nve32_t helper_l3_filter( - struct osi_core_priv_data *const osi_core, - struct core_ops *ops_p, - struct osi_l3_l4_filter l_filter, - nveu32_t type, - nveu32_t dma_routing_enable, - nveu32_t dma_chan) -{ - nve32_t ret = 0; + if (osi_memcmp(&(l_core->cfg.l3_l4[i].data), &(l3_l4->data), + filter_size) != 0) { + /* data do not match */ + continue; + } - ret = ops_p->config_l3_filters(osi_core, - l_filter.filter_no, - l_filter.filter_enb_dis, - type, - l_filter.src_dst_addr_match, - l_filter.perfect_inverse_match, - dma_routing_enable, - dma_chan); - if (ret < 0) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL, - "failed to configure L3 filters\n", 0ULL); - return ret; - } - - if (type == OSI_IP6_FILTER) { - ret = ops_p->update_ip6_addr(osi_core, l_filter.filter_no, - l_filter.ip6_addr); - } else if (type == OSI_IP4_FILTER) { - ret = ops_p->update_ip4_addr(osi_core, l_filter.filter_no, - l_filter.ip4_addr, - l_filter.src_dst_addr_match); - } else { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, - "Invalid L3 filter type\n", 0ULL); - return -1; + /* found a match */ + ret = 0; + *filter_no = i; + break; } return ret; } -nve32_t osi_l3l4_filter(struct osi_core_priv_data *const osi_core, - const struct osi_l3_l4_filter l_filter, - const nveu32_t type, const nveu32_t dma_routing_enable, - const nveu32_t dma_chan, const nveu32_t is_l4_filter) +/** + * @brief configure_l3l4_filter_valid_params - parameter validation function for l3l4 configuration + * + * @note + * Algorithm: + * - Validate all the l3_l4 structure parameter. + * - Verify routing dma channel id value. + * - Vefify each enable/disable parameters is <= OSI_TRUE. + * - Return -1 if parameter validation fails. + * - Return 0 on success. + * + * @param[in] osi_core: OSI core private data structure. + * @param[in] l3_l4: Pointer to l3 l4 filter structure (#osi_l3_l4_filter) + * + * @pre + * - MAC should be initialized and started. see osi_start_mac() + * + * @retval 0 on success + * @retval -1 on failure. + */ +static nve32_t configure_l3l4_filter_valid_params(const struct osi_core_priv_data *const osi_core, + const struct osi_l3_l4_filter *const l3_l4) { - struct core_local *l_core = (struct core_local *)osi_core; + const nveu32_t max_dma_chan[2] = { + OSI_EQOS_MAX_NUM_CHANS, + OSI_MGBE_MAX_NUM_CHANS + }; nve32_t ret = -1; - if (validate_args(osi_core, l_core) < 0) { - return -1; + /* validate dma channel */ + if (l3_l4->dma_chan > max_dma_chan[osi_core->mac]) { + OSI_CORE_ERR((osi_core->osd), (OSI_LOG_ARG_OUTOFBOUND), + ("L3L4: Wrong DMA channel: "), (l3_l4->dma_chan)); + goto exit_func; } - if ((dma_routing_enable == OSI_ENABLE) && - (osi_core->dcs_en != OSI_ENABLE)) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "dma routing enabled but dcs disabled in DT\n", - 0ULL); - return ret; + /* valate enb parameters */ + if ((l3_l4->filter_enb_dis +#ifndef OSI_STRIPPED_LIB + | l3_l4->dma_routing_enable | + l3_l4->data.is_udp | + l3_l4->data.is_ipv6 | + l3_l4->data.src.port_match | + l3_l4->data.src.addr_match | + l3_l4->data.dst.port_match | + l3_l4->data.dst.addr_match | + l3_l4->data.src.port_match_inv | + l3_l4->data.src.addr_match_inv | + l3_l4->data.dst.port_match_inv | + l3_l4->data.dst.addr_match_inv +#endif /* !OSI_STRIPPED_LIB */ + ) > OSI_TRUE) { + OSI_CORE_ERR((osi_core->osd), (OSI_LOG_ARG_OUTOFBOUND), + ("L3L4: one of the enb param > OSI_TRUE: "), 0); + goto exit_func; } - if (is_l4_filter == OSI_ENABLE) { - ret = helper_l4_filter(osi_core, l_core->ops_p, l_filter, type, - dma_routing_enable, dma_chan); - } else { - ret = helper_l3_filter(osi_core, l_core->ops_p, l_filter, type, - dma_routing_enable, dma_chan); +#ifndef OSI_STRIPPED_LIB + /* validate port/addr enb bits */ + if (l3_l4->filter_enb_dis == OSI_TRUE) { + if ((l3_l4->data.src.port_match | l3_l4->data.src.addr_match | + l3_l4->data.dst.port_match | l3_l4->data.dst.addr_match) + == OSI_FALSE) { + OSI_CORE_ERR((osi_core->osd), (OSI_LOG_ARG_OUTOFBOUND), + ("L3L4: None of the enb bits are not set: "), 0); + goto exit_func; + } + if ((l3_l4->data.is_ipv6 & l3_l4->data.src.addr_match & + l3_l4->data.dst.addr_match) != OSI_FALSE) { + OSI_CORE_ERR((osi_core->osd), (OSI_LOG_ARG_OUTOFBOUND), + ("L3L4: Both ip6 addr match bits are set\n"), 0); + goto exit_func; + } } +#endif /* !OSI_STRIPPED_LIB */ - if (ret < 0) { - OSI_CORE_INFO(osi_core->osd, OSI_LOG_ARG_INVALID, - "L3/L4 helper function failed\n", 0ULL); - return ret; - } + /* success */ + ret = 0; - if (osi_core->l3l4_filter_bitmask != OSI_DISABLE) { - ret = l_core->ops_p->config_l3_l4_filter_enable(osi_core, - OSI_ENABLE); - } else { - ret = l_core->ops_p->config_l3_l4_filter_enable(osi_core, - OSI_DISABLE); - } +exit_func: return ret; } -nve32_t osi_config_rxcsum_offload(struct osi_core_priv_data *const osi_core, - const nveu32_t enable) +/** + * @brief configure_l3l4_filter_helper - helper function for l3l4 configuration + * + * @note + * Algorithm: + * - Confifure l3l4 filter using l_core->ops_p->config_l3l4_filters(). + * Return -1 if config_l3l4_filters() fails. + * - Store the filter into l_core->cfg.l3_l4[] and enable + * l3l4 filter if any of the filter index enabled currently. + * + * @param[in] osi_core: OSI core private data structure. + * @param[in] filter_no: pointer to filter number + * @param[in] l3_l4: Pointer to l3 l4 filter structure (#osi_l3_l4_filter) + * + * @pre + * - MAC should be initialized and started. see osi_start_mac() + * + * @retval 0 on success + * @retval -1 on failure. + */ +static nve32_t configure_l3l4_filter_helper(struct osi_core_priv_data *const osi_core, + nveu32_t filter_no, + const struct osi_l3_l4_filter *const l3_l4) { - struct core_local *l_core = (struct core_local *)osi_core; + struct osi_l3_l4_filter *cfg_l3_l4; + struct core_local *const l_core = (struct core_local *)(void *)osi_core; + nve32_t ret; - if (validate_args(osi_core, l_core) < 0) { - return -1; + ret = l_core->ops_p->config_l3l4_filters(osi_core, filter_no, l3_l4); + if (ret < 0) { + OSI_CORE_ERR((osi_core->osd), (OSI_LOG_ARG_HW_FAIL), + ("Failed to config L3L4 filters: "), (filter_no)); + goto exit_func; } - return l_core->ops_p->config_rxcsum_offload(osi_core, enable); + cfg_l3_l4 = &(l_core->cfg.l3_l4[filter_no]); + if (l3_l4->filter_enb_dis == OSI_TRUE) { + /* Store the filter. + * osi_memcpy is an internal function and it cannot fail, hence + * ignoring return value. + */ + (void)osi_memcpy(cfg_l3_l4, l3_l4, sizeof(struct osi_l3_l4_filter)); + OSI_CORE_INFO((osi_core->osd), (OSI_LOG_ARG_OUTOFBOUND), + ("L3L4: ADD: "), (filter_no)); + + /* update filter mask bit */ + osi_core->l3l4_filter_bitmask |= ((nveu32_t)1U << (filter_no & 0x1FU)); + } else { + /* Clear the filter data. + * osi_memset is an internal function and it cannot fail, hence + * ignoring return value. + */ + (void)osi_memset(cfg_l3_l4, 0, sizeof(struct osi_l3_l4_filter)); + OSI_CORE_INFO((osi_core->osd), (OSI_LOG_ARG_OUTOFBOUND), + ("L3L4: DELETE: "), (filter_no)); + + /* update filter mask bit */ + osi_core->l3l4_filter_bitmask &= ~((nveu32_t)1U << (filter_no & 0x1FU)); + } + + if (osi_core->l3l4_filter_bitmask != 0U) { + /* enable l3l4 filter */ + ret = hw_config_l3_l4_filter_enable(osi_core, OSI_ENABLE); + } else { + /* disable l3l4 filter */ + ret = hw_config_l3_l4_filter_enable(osi_core, OSI_DISABLE); + } + +exit_func: + + return ret; } -nve32_t osi_set_systime_to_mac(struct osi_core_priv_data *const osi_core, - const nveu32_t sec, const nveu32_t nsec) +#if defined(L3L4_WILDCARD_FILTER) +/** + * @brief l3l4_add_wildcard_filter - function to configure wildcard filter. + * + * @note + * Algorithm: + * - Configure TCP wildcard filter at index 0 using configure_l3l4_filter_helper(). + * + * @param[in] osi_core: OSI Core private data structure. + * @param[in] max_filter_no: maximum allowed filter number + * + * @pre + * - MAC should be initialized and started. see osi_start_mac() + */ +static void l3l4_add_wildcard_filter(struct osi_core_priv_data *const osi_core, + nveu32_t max_filter_no) { - struct core_local *l_core = (struct core_local *)osi_core; + nve32_t err = -1; + struct osi_l3_l4_filter *l3l4_filter; + struct core_local *l_core = (struct core_local *)(void *)osi_core; - if (validate_args(osi_core, l_core) < 0) { - return -1; + /* use max filter index to confiture wildcard filter */ + if (l_core->l3l4_wildcard_filter_configured != OSI_ENABLE) { + /* Configure TCP wildcard filter at index 0. + * INV IP4 filter with SA (0) + DA (0) with UDP perfect match with + * SP (0) + DP (0) with no routing enabled. + * - TCP packets will have a IP filter match and will be routed to default DMA. + * - UDP packets will have a IP match but no L4 match, hence HW goes for + * next filter index for finding match. + */ + l3l4_filter = &(l_core->cfg.l3_l4[0]); + osi_memset(l3l4_filter, 0, sizeof(struct osi_l3_l4_filter)); + l3l4_filter->filter_enb_dis = OSI_TRUE; + l3l4_filter->data.is_udp = OSI_TRUE; + l3l4_filter->data.src.addr_match = OSI_TRUE; + l3l4_filter->data.src.addr_match_inv = OSI_TRUE; + l3l4_filter->data.src.port_match = OSI_TRUE; + l3l4_filter->data.dst.addr_match = OSI_TRUE; + l3l4_filter->data.dst.addr_match_inv = OSI_TRUE; + l3l4_filter->data.dst.port_match = OSI_TRUE; + + /* configure wildcard at last filter index */ + err = configure_l3l4_filter_helper(osi_core, 0, l3l4_filter); + if (err < 0) { + /* wildcard config failed */ + OSI_CORE_ERR((osi_core->osd), (OSI_LOG_ARG_INVALID), + ("L3L4: TCP wildcard config failed: "), (0UL)); + } } - return l_core->ops_p->set_systime_to_mac(osi_core, sec, nsec); + if (err >= 0) { + /* wildcard config success */ + l_core->l3l4_wildcard_filter_configured = OSI_ENABLE; + OSI_CORE_INFO((osi_core->osd), (OSI_LOG_ARG_INVALID), + ("L3L4: Wildcard config success"), (0UL)); + } +} +#endif /* L3L4_WILDCARD_FILTER */ + +/** + * @brief configure_l3l4_filter - function to configure l3l4 filter. + * + * @note + * Algorithm: + * - Validate all the l3_l4 structure parameter using configure_l3l4_filter_valid_params(). + * Return -1 if parameter validation fails. + * - For filter enable case, + * -> If filter already enabled, return -1 to report error. + * -> Otherwise find free index and configure filter using configure_l3l4_filter_helper(). + * - For filter disable case, + * -> If filter match not found, return 0 to report caller that filter already removed. + * -> Otherwise disable filter using configure_l3l4_filter_helper(). + * - Return -1 if configure_l3l4_filter_helper() fails. + * - Return 0 on success. + * + * @param[in] osi_core: OSI Core private data structure. + * @param[in] l3_l4: Pointer to l3 l4 filter structure (#osi_l3_l4_filter) + * + * @pre + * - MAC should be initialized and started. see osi_start_mac() + * + * @retval 0 on success + * @retval -1 on failure. + */ +static nve32_t configure_l3l4_filter(struct osi_core_priv_data *const osi_core, + const struct osi_l3_l4_filter *const l3_l4) +{ + nve32_t err; + nveu32_t filter_no = 0; + nveu32_t free_filter_no = UINT_MAX; + const struct core_local *l_core = (struct core_local *)(void *)osi_core; + const nveu32_t max_filter_no[2] = { + EQOS_MAX_L3_L4_FILTER - 1U, + OSI_MGBE_MAX_L3_L4_FILTER - 1U, + }; + nve32_t ret = -1; + + if (configure_l3l4_filter_valid_params(osi_core, l3_l4) < 0) { + /* parameter validation failed */ + goto exit_func; + } + + /* search for a duplicate filter request or find for free index */ + err = l3l4_find_match(l_core, l3_l4, &filter_no, &free_filter_no, + max_filter_no[osi_core->mac]); + + if (l3_l4->filter_enb_dis == OSI_TRUE) { + if (err == 0) { + /* duplicate filter request */ + OSI_CORE_ERR((osi_core->osd), (OSI_LOG_ARG_HW_FAIL), + ("L3L4: Failed: duplicate filter: "), (filter_no)); + goto exit_func; + } + + /* check free index */ + if (free_filter_no > max_filter_no[osi_core->mac]) { + /* no free entry found */ + OSI_CORE_INFO((osi_core->osd), (OSI_LOG_ARG_HW_FAIL), + ("L3L4: Failed: no free filter: "), (free_filter_no)); + goto exit_func; + } + filter_no = free_filter_no; + } else { + if (err < 0) { + /* no match found */ + OSI_CORE_INFO((osi_core->osd), (OSI_LOG_ARG_HW_FAIL), + ("L3L4: delete: no filter match: "), (filter_no)); + /* filter already deleted, return success */ + ret = 0; + goto exit_func; + } + } + +#if defined(L3L4_WILDCARD_FILTER) + /* setup l3l4 wildcard filter for l3l4 */ + l3l4_add_wildcard_filter(osi_core, max_filter_no[osi_core->mac]); + if (l_core->l3l4_wildcard_filter_configured != OSI_ENABLE) { + OSI_CORE_ERR((osi_core->osd), (OSI_LOG_ARG_HW_FAIL), + ("L3L4: Rejected: wildcard is not enabled: "), (filter_no)); + goto exit_func; + } +#endif /* L3L4_WILDCARD_FILTER */ + + /* configure l3l4 filter */ + err = configure_l3l4_filter_helper(osi_core, filter_no, l3_l4); + if (err < 0) { + /* filter config failed */ + OSI_CORE_ERR((osi_core->osd), (OSI_LOG_ARG_HW_FAIL), + ("L3L4: configure_l3l4_filter_helper() failed"), (filter_no)); + goto exit_func; + } + + /* success */ + ret = 0; + +exit_func: + + return ret; } /** - * @brief div_u64 - Calls a function which returns quotient - * - * @param[in] dividend: Dividend - * @param[in] divisor: Divisor - * - * @pre MAC IP should be out of reset and need to be initialized as the - * requirements. - * + * @brief osi_adjust_freq - Adjust frequency * * @note - * API Group: - * - Initialization: No - * - Run time: Yes - * - De-initialization: No - * @returns Quotient + * Algorithm: + * - Adjust a drift of +/- comp nanoseconds per second. + * "Compensation" is the difference in frequency between + * the master and slave clocks in Parts Per Billion. + * + * @param[in] osi_core: OSI core private data structure. + * @param[in] ppb: Parts per Billion + * + * @pre MAC should be init and started. see osi_start_mac() + * + * @note + * Traceability Details: + * - SWUD_ID: ETHERNET_NVETHERNETRM_023 + * + * @usage + * - Allowed context for the API call + * - Interrupt handler: No + * - Signal handler: No + * - Thread safe: No + * - Async/Sync: Sync + * - Required Privileges: None + * - API Group: + * - Initialization: No + * - Run time: Yes + * - De-initialization: No + * + * @retval 0 on success + * @retval -1 on failure. */ -static inline nveu64_t div_u64(nveu64_t dividend, - nveu64_t divisor) +static nve32_t osi_adjust_freq(struct osi_core_priv_data *const osi_core, nve32_t ppb) { - nveu64_t remain; - - return div_u64_rem(dividend, divisor, &remain); -} - -nve32_t osi_adjust_freq(struct osi_core_priv_data *const osi_core, nve32_t ppb) -{ - struct core_local *l_core = (struct core_local *)osi_core; - nveu64_t adj; nveu64_t temp; nveu32_t diff = 0; @@ -698,10 +1165,6 @@ nve32_t osi_adjust_freq(struct osi_core_priv_data *const osi_core, nve32_t ppb) nve32_t ret = -1; nve32_t ppb1 = ppb; - if (validate_args(osi_core, l_core) < 0) { - return -1; - } - addend = osi_core->default_addend; if (ppb1 < 0) { neg_adj = 1U; @@ -719,18 +1182,18 @@ nve32_t osi_adjust_freq(struct osi_core_priv_data *const osi_core, nve32_t ppb) if (temp < UINT_MAX) { diff = (nveu32_t)temp; } else { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, "temp > UINT_MAX\n", - 0ULL); - return ret; + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "temp > UINT_MAX\n", + (nvel64_t)temp); + goto fail; } if (neg_adj == 0U) { if (addend <= (UINT_MAX - diff)) { addend = (addend + diff); } else { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "addend > UINT_MAX\n", 0ULL); - return ret; + goto fail; } } else { if (addend > diff) { @@ -738,29 +1201,30 @@ nve32_t osi_adjust_freq(struct osi_core_priv_data *const osi_core, nve32_t ppb) } else if (addend < diff) { addend = diff - addend; } else { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "addend = diff\n", 0ULL); } } - return l_core->ops_p->config_addend(osi_core, addend); + ret = hw_config_addend(osi_core, addend); + +fail: + return ret; } -nve32_t osi_adjust_time(struct osi_core_priv_data *const osi_core, - nvel64_t nsec_delta) +static nve32_t osi_adjust_time(struct osi_core_priv_data *const osi_core, + nvel64_t nsec_delta) { - struct core_local *l_core = (struct core_local *)osi_core; + struct core_local *l_core = (struct core_local *)(void *)osi_core; nveu32_t neg_adj = 0; nveu32_t sec = 0, nsec = 0; + nveu32_t cur_sec = 0, cur_nsec = 0; nveu64_t quotient; nveu64_t reminder = 0; nveu64_t udelta = 0; nve32_t ret = -1; nvel64_t nsec_delta1 = nsec_delta; - - if (validate_args(osi_core, l_core) < 0) { - return -1; - } + nvel64_t calculate; if (nsec_delta1 < 0) { neg_adj = 1; @@ -774,119 +1238,47 @@ nve32_t osi_adjust_time(struct osi_core_priv_data *const osi_core, if (quotient <= UINT_MAX) { sec = (nveu32_t)quotient; } else { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "quotient > UINT_MAX\n", 0ULL); - return ret; + goto fail; } if (reminder <= UINT_MAX) { nsec = (nveu32_t)reminder; } else { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "reminder > UINT_MAX\n", 0ULL); - return ret; + goto fail; } - return l_core->ops_p->adjust_mactime(osi_core, sec, nsec, neg_adj, - osi_core->ptp_config.one_nsec_accuracy); -} + common_get_systime_from_mac(osi_core->base, + osi_core->mac, &cur_sec, &cur_nsec); + calculate = ((nvel64_t)cur_sec * OSI_NSEC_PER_SEC_SIGNED) + (nvel64_t)cur_nsec; -nve32_t osi_ptp_configuration(struct osi_core_priv_data *const osi_core, - const nveu32_t enable) -{ - struct core_local *l_core = (struct core_local *)osi_core; - nve32_t ret = 0; - nveu64_t temp = 0, temp1 = 0, temp2 = 0; - nveu64_t ssinc = 0; - - if (validate_args(osi_core, l_core) < 0) { - return -1; - } - - if (enable == OSI_DISABLE) { - /* disable hw time stamping */ - /* Program MAC_Timestamp_Control Register */ - l_core->ops_p->config_tscr(osi_core, OSI_DISABLE); - /* Disable PTP RX Queue routing */ - ret = l_core->ops_p->config_ptp_rxq(osi_core, - osi_core->ptp_config.ptp_rx_queue, - OSI_DISABLE); + if (neg_adj == 1U) { + if ((calculate + nsec_delta) < 0LL) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "Wrong delta, put time in -ve\n", 0ULL); + ret = -1; + goto fail; + } } else { - /* Program MAC_Timestamp_Control Register */ - l_core->ops_p->config_tscr(osi_core, - osi_core->ptp_config.ptp_filter); - - if (osi_core->pre_si == OSI_ENABLE) { - if (osi_core->mac == OSI_MAC_HW_MGBE) { - /* FIXME: Pass it from OSD */ - osi_core->ptp_config.ptp_clock = 78125000U; - osi_core->ptp_config.ptp_ref_clk_rate = - 78125000U; - } else { - /* FIXME: Pass it from OSD */ - osi_core->ptp_config.ptp_clock = 312500000U; - osi_core->ptp_config.ptp_ref_clk_rate = - 312500000U; - } - } - /* Program Sub Second Increment Register */ - l_core->ops_p->config_ssir(osi_core, - osi_core->ptp_config.ptp_clock); - - /* formula for calculating addend value is - * TSAR = (2^32 * 1000) / (ptp_ref_clk_rate in MHz * SSINC) - * 2^x * y == (y << x), hence - * 2^32 * 1000 == (1000 << 32) - * so addend = (2^32 * 1000)/(ptp_ref_clk_rate in MHZ * SSINC); - */ - if ((osi_core->pre_si == OSI_ENABLE) && - ((osi_core->mac == OSI_MAC_HW_MGBE) || - (osi_core->mac_ver <= OSI_EQOS_MAC_4_10))) { - ssinc = OSI_PTP_SSINC_16; - } else { - ssinc = OSI_PTP_SSINC_4; - if (osi_core->mac_ver == OSI_EQOS_MAC_5_30) { - ssinc = OSI_PTP_SSINC_6; - } - } - - temp = ((nveu64_t)1000 << 32); - temp = (nveu64_t)temp * 1000000U; - - temp1 = div_u64(temp, - (nveu64_t)osi_core->ptp_config.ptp_ref_clk_rate); - - temp2 = div_u64(temp1, (nveu64_t)ssinc); - - if (temp2 < UINT_MAX) { - osi_core->default_addend = (nveu32_t)temp2; - } else { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, - "core: temp2 >= UINT_MAX\n", 0ULL); - return -1; - } - - /* Program addend value */ - ret = l_core->ops_p->config_addend(osi_core, - osi_core->default_addend); - - /* Set current time */ - if (ret == 0) { - ret = l_core->ops_p->set_systime_to_mac(osi_core, - osi_core->ptp_config.sec, - osi_core->ptp_config.nsec); - if (ret == 0) { - /* Enable PTP RX Queue routing */ - ret = l_core->ops_p->config_ptp_rxq(osi_core, - osi_core->ptp_config.ptp_rx_queue, - OSI_ENABLE); - } + /* Addition of 2 sec for compensate Max nanosec factors*/ + if (cur_sec > (UINT_MAX - sec - 2U)) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "Not Supported sec beyond UINT_max\n", 0ULL); + ret = -1; + goto fail; } } + ret = l_core->ops_p->adjust_mactime(osi_core, sec, nsec, neg_adj, + osi_core->ptp_config.one_nsec_accuracy); +fail: return ret; } +#ifndef OSI_STRIPPED_LIB /** * @brief rxq_route_config - Enable PTP RX packets routing * @@ -904,7 +1296,7 @@ nve32_t osi_ptp_configuration(struct osi_core_priv_data *const osi_core, static nve32_t rxq_route_config(struct osi_core_priv_data *const osi_core, const struct osi_rxq_route *rxq_route) { - struct core_local *l_core = (struct core_local *)osi_core; + struct core_local *l_core = (struct core_local *)(void *)osi_core; if (rxq_route->route_type != OSI_RXQ_ROUTE_PTP) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, @@ -918,96 +1310,6 @@ static nve32_t rxq_route_config(struct osi_core_priv_data *const osi_core, rxq_route->enable); } -nve32_t osi_read_mmc(struct osi_core_priv_data *const osi_core) -{ - struct core_local *l_core = (struct core_local *)osi_core; - - if (validate_args(osi_core, l_core) < 0) { - return -1; - } - - l_core->ops_p->read_mmc(osi_core); - - return 0; -} - -nve32_t osi_get_mac_version(struct osi_core_priv_data *const osi_core, - nveu32_t *mac_ver) -{ - struct core_local *l_core = (struct core_local *)osi_core; - - if (validate_args(osi_core, l_core) < 0) { - return -1; - } - - if (mac_ver == OSI_NULL) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, - "mac_ver is NULL\n", 0ULL); - return -1; - } - - *mac_ver = ((l_core->ops_p->read_reg(osi_core, (nve32_t)MAC_VERSION)) & - MAC_VERSION_SNVER_MASK); - - if (validate_mac_ver_update_chans(*mac_ver, &l_core->max_chans) == 0) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, - "Invalid MAC version\n", (nveu64_t)*mac_ver) - return -1; - } - - return 0; -} - -#ifndef OSI_STRIPPED_LIB -/** - * @brief validate_core_regs - Read-validate HW registers for func safety. - * - * @note - * Algorithm: - * - Reads pre-configured list of MAC/MTL configuration registers - * and compares with last written value for any modifications. - * - * @param[in] osi_core: OSI core private data structure. - * - * @pre - * - MAC has to be out of reset. - * - osi_hal_hw_core_init has to be called. Internally this would initialize - * the safety_config (see osi_core_priv_data) based on MAC version and - * which specific registers needs to be validated periodically. - * - Invoke this call if (osi_core_priv_data->safety_config != OSI_NULL) - * - * @note - * Traceability Details: - * - * @note - * Classification: - * - Interrupt: No - * - Signal handler: No - * - Thread safe: No - * - Required Privileges: None - * - * @note - * API Group: - * - Initialization: No - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -static nve32_t validate_core_regs(struct osi_core_priv_data *const osi_core) -{ - struct core_local *l_core = (struct core_local *)osi_core; - - if (osi_core->safety_config == OSI_NULL) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, - "CORE: Safety config is NULL\n", 0ULL); - return -1; - } - - return l_core->ops_p->validate_regs(osi_core); -} - /** * @brief vlan_id_update - invoke osi call to update VLAN ID * @@ -1042,9 +1344,9 @@ static nve32_t validate_core_regs(struct osi_core_priv_data *const osi_core) static nve32_t vlan_id_update(struct osi_core_priv_data *const osi_core, const nveu32_t vid) { - struct core_local *l_core = (struct core_local *)osi_core; - unsigned int action = vid & VLAN_ACTION_MASK; - unsigned short vlan_id = vid & VLAN_VID_MASK; + struct core_local *const l_core = (struct core_local *)(void *)osi_core; + nveu32_t action = vid & VLAN_ACTION_MASK; + nveu16_t vlan_id = (nveu16_t)(vid & VLAN_VID_MASK); if ((osi_core->mac_ver == OSI_EQOS_MAC_4_10) || (osi_core->mac_ver == OSI_EQOS_MAC_5_00)) { @@ -1055,7 +1357,7 @@ static nve32_t vlan_id_update(struct osi_core_priv_data *const osi_core, if (((action != OSI_VLAN_ACTION_ADD) && (action != OSI_VLAN_ACTION_DEL)) || (vlan_id >= VLAN_NUM_VID)) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "CORE: Invalid action/vlan_id\n", 0ULL); /* Unsupported action */ return -1; @@ -1101,12 +1403,12 @@ static nve32_t vlan_id_update(struct osi_core_priv_data *const osi_core, static nve32_t conf_eee(struct osi_core_priv_data *const osi_core, nveu32_t tx_lpi_enabled, nveu32_t tx_lpi_timer) { - struct core_local *l_core = (struct core_local *)osi_core; + struct core_local *l_core = (struct core_local *)(void *)osi_core; if ((tx_lpi_timer >= OSI_MAX_TX_LPI_TIMER) || (tx_lpi_timer <= OSI_MIN_TX_LPI_TIMER) || ((tx_lpi_timer % OSI_MIN_TX_LPI_TIMER) != OSI_NONE)) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Invalid Tx LPI timer value\n", (nveul64_t)tx_lpi_timer); return -1; @@ -1117,57 +1419,6 @@ static nve32_t conf_eee(struct osi_core_priv_data *const osi_core, return 0; } -/** - * @brief configure_frp - Configure the FRP offload entry in the - * Instruction Table. - * - * @param[in] osi_core: OSI core private data structure. - * @param[in] cmd: FRP command data structure. - * - * @pre - * - MAC and PHY should be init and started. see osi_start_mac() - * - * @note - * Traceability Details: - * - * @note - * Classification: - * - Interrupt: No - * - Signal handler: No - * - Thread safe: No - * - Required Privileges: None - * - * @note - * API Group: - * - Initialization: No - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -static int configure_frp(struct osi_core_priv_data *const osi_core, - struct osi_core_frp_cmd *const cmd) -{ - struct core_local *l_core = (struct core_local *)osi_core; - - if (cmd == OSI_NULL) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, - "Invalid argment\n", OSI_NONE); - return -1; - } - - /* Check for supported MAC version */ - if ((osi_core->mac == OSI_MAC_HW_EQOS) && - (osi_core->mac_ver < OSI_EQOS_MAC_5_10)) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, - "MAC doesn't support FRP\n", OSI_NONE); - return -1; - } - - return setup_frp(osi_core, l_core->ops_p, cmd); -} - /** * @brief config_arp_offload - Configure ARP offload in MAC. * @@ -1206,16 +1457,16 @@ static nve32_t conf_arp_offload(struct osi_core_priv_data *const osi_core, const nveu32_t flags, const nveu8_t *ip_addr) { - struct core_local *l_core = (struct core_local *)osi_core; + struct core_local *l_core = (struct core_local *)(void *)osi_core; if (ip_addr == OSI_NULL) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "CORE: ip_addr is NULL\n", 0ULL); return -1; } if ((flags != OSI_ENABLE) && (flags != OSI_DISABLE)) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Invalid ARP offload enable/disable flag\n", 0ULL); return -1; } @@ -1257,11 +1508,11 @@ static nve32_t conf_arp_offload(struct osi_core_priv_data *const osi_core, static nve32_t conf_mac_loopback(struct osi_core_priv_data *const osi_core, const nveu32_t lb_mode) { - struct core_local *l_core = (struct core_local *)osi_core; + struct core_local *l_core = (struct core_local *)(void *)osi_core; /* don't allow only if loopback mode is other than 0 or 1 */ - if (lb_mode != OSI_ENABLE && lb_mode != OSI_DISABLE) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + if ((lb_mode != OSI_ENABLE) && (lb_mode != OSI_DISABLE)) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Invalid loopback mode\n", 0ULL); return -1; } @@ -1270,6 +1521,62 @@ static nve32_t conf_mac_loopback(struct osi_core_priv_data *const osi_core, } #endif /* !OSI_STRIPPED_LIB */ +/** + * @brief configure_frp - Configure the FRP offload entry in the + * Instruction Table. + * + * @param[in] osi_core: OSI core private data structure. + * @param[in] cmd: FRP command data structure. + * + * @pre + * - MAC and PHY should be init and started. see osi_start_mac() + * + * @note + * Traceability Details: + * + * @note + * Classification: + * - Interrupt: No + * - Signal handler: No + * - Thread safe: No + * - Required Privileges: None + * + * @note + * API Group: + * - Initialization: No + * - Run time: Yes + * - De-initialization: No + * + * @retval 0 on success + * @retval -1 on failure. + */ +static nve32_t configure_frp(struct osi_core_priv_data *const osi_core, + struct osi_core_frp_cmd *const cmd) +{ + struct core_local *l_core = (struct core_local *)(void *)osi_core; + nve32_t ret; + + if (cmd == OSI_NULL) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "FRP command invalid\n", 0ULL); + ret = -1; + goto done; + } + + /* Check for supported MAC version */ + if ((osi_core->mac == OSI_MAC_HW_EQOS) && + (osi_core->mac_ver < OSI_EQOS_MAC_5_30)) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, + "MAC doesn't support FRP\n", OSI_NONE); + ret = -1; + goto done; + } + + ret = setup_frp(osi_core, l_core->ops_p, cmd); +done: + return ret; +} + /** * @brief config_est - Read Setting for GCL from input and update * registers. @@ -1313,23 +1620,28 @@ static nve32_t conf_mac_loopback(struct osi_core_priv_data *const osi_core, static nve32_t config_est(struct osi_core_priv_data *osi_core, struct osi_est_config *est) { - struct core_local *l_core = (struct core_local *)osi_core; + nve32_t ret; if (est == OSI_NULL) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "EST data is NULL", 0ULL); - return -1; + ret = -1; + goto done; } if ((osi_core->flow_ctrl & OSI_FLOW_CTRL_TX) == OSI_FLOW_CTRL_TX) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "TX Flow control enabled, please disable it", 0ULL); - return -1; + ret = -1; + goto done; } - return l_core->ops_p->hw_config_est(osi_core, est); + ret = hw_config_est(osi_core, est); + +done: + return ret; } /** @@ -1368,15 +1680,19 @@ static nve32_t config_est(struct osi_core_priv_data *osi_core, static nve32_t config_fpe(struct osi_core_priv_data *osi_core, struct osi_fpe_config *fpe) { - struct core_local *l_core = (struct core_local *)osi_core; + nve32_t ret; if (fpe == OSI_NULL) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "FPE data is NULL", 0ULL); - return -1; + ret = -1; + goto done; } - return l_core->ops_p->hw_config_fpe(osi_core, fpe); + ret = hw_config_fpe(osi_core, fpe); + +done: + return ret; } /** @@ -1393,7 +1709,7 @@ static nve32_t config_fpe(struct osi_core_priv_data *osi_core, static inline void free_tx_ts(struct osi_core_priv_data *osi_core, nveu32_t chan) { - struct core_local *l_core = (struct core_local *)osi_core; + struct core_local *l_core = (struct core_local *)(void *)osi_core; struct osi_core_tx_ts *head = &l_core->tx_ts_head; struct osi_core_tx_ts *temp = l_core->tx_ts_head.next; nveu32_t count = 0U; @@ -1410,6 +1726,29 @@ static inline void free_tx_ts(struct osi_core_priv_data *osi_core, } } +/** + * @brief Return absolute difference + * Algorithm: + * - calculate absolute positive difference + * + * @param[in] a - First input argument + * @param[in] b - Second input argument + * + * @retval absolute difference + */ +static inline nveul64_t eth_abs(nveul64_t a, nveul64_t b) +{ + nveul64_t temp = 0ULL; + + if (a > b) { + temp = (a - b); + } else { + temp = (b - a); + } + + return temp; +} + /** * @brief Parses internal ts structure array and update time stamp if packet * id matches. @@ -1427,24 +1766,48 @@ static inline void free_tx_ts(struct osi_core_priv_data *osi_core, static inline nve32_t get_tx_ts(struct osi_core_priv_data *osi_core, struct osi_core_tx_ts *ts) { - struct core_local *l_core = (struct core_local *)osi_core; + struct core_local *l_core = (struct core_local *)(void *)osi_core; struct osi_core_tx_ts *temp = l_core->tx_ts_head.next; - struct osi_core_tx_ts *head = &l_core->tx_ts_head; + struct osi_core_tx_ts const *head = &l_core->tx_ts_head; nve32_t ret = -1; nveu32_t count = 0U; + nveu32_t nsec, sec, temp_nsec; + nveul64_t temp_val = 0ULL; + nveul64_t ts_val = 0ULL; + + common_get_systime_from_mac(osi_core->base, osi_core->mac, &sec, &nsec); + ts_val = (sec * OSI_NSEC_PER_SEC) + nsec; if (__sync_fetch_and_add(&l_core->ts_lock, 1) == 1U) { /* mask return as initial value is returned always */ (void)__sync_fetch_and_sub(&l_core->ts_lock, 1); - osi_core->xstats.ts_lock_del_fail = +#ifndef OSI_STRIPPED_LIB + osi_core->stats.ts_lock_del_fail = osi_update_stats_counter( - osi_core->xstats.ts_lock_del_fail, 1U); + osi_core->stats.ts_lock_del_fail, 1U); +#endif goto done; } while ((temp != head) && (count < MAX_TX_TS_CNT)) { - if ((temp->pkt_id == ts->pkt_id) && + temp_nsec = temp->nsec & ETHER_NSEC_MASK; + temp_val = (temp->sec * OSI_NSEC_PER_SEC) + temp_nsec; + + if ((eth_abs(ts_val, temp_val) > OSI_NSEC_PER_SEC) && (temp->in_use != OSI_NONE)) { + /* remove old node from the link */ + temp->next->prev = temp->prev; + temp->prev->next = temp->next; + /* Clear in_use fields */ + temp->in_use = OSI_DISABLE; + OSI_CORE_INFO(osi_core->osd, OSI_LOG_ARG_INVALID, + "Removing stale TS from queue pkt_id\n", + (nveul64_t)temp->pkt_id); + count++; + temp = temp->next; + continue; + } else if ((temp->pkt_id == ts->pkt_id) && + (temp->in_use != OSI_NONE)) { ts->sec = temp->sec; ts->nsec = temp->nsec; /* remove temp node from the link */ @@ -1454,7 +1817,10 @@ static inline nve32_t get_tx_ts(struct osi_core_priv_data *osi_core, temp->in_use = OSI_DISABLE; ret = 0; break; + } else { + /* empty case */ } + count++; temp = temp->next; } @@ -1465,71 +1831,115 @@ done: return ret; } -#if DRIFT_CAL -/** - * @brief read time counters from HW register - * - * Algorithm: - * - read HW time counters and take care of roll-over - * - * @param[in] addr: base address - * @param[in] mac: IP type - * @param[out] sec: sec counter - * @param[out] nsec: nsec counter - */ -static void read_sec_ns(void *addr, nveu32_t mac, - nveu32_t *sec, - nveu32_t *nsec) -{ - nveu32_t ns1, ns2; - nveu32_t time_reg_offset[][2] = {{EQOS_SEC_OFFSET, EQOS_NSEC_OFFSET}, - {MGBE_SEC_OFFSET, MGBE_NSEC_OFFSET}}; - - ns1 = osi_readl((nveu8_t *)addr + time_reg_offset[mac][1]); - ns1 = (ns1 & ETHER_NSEC_MASK); - - *sec = osi_readl((nveu8_t *)addr + time_reg_offset[mac][0]); - - ns2 = osi_readl((nveu8_t *)addr + time_reg_offset[mac][1]); - ns2 = (ns2 & ETHER_NSEC_MASK); - - /* if ns1 is greater than ns2, it means nsec counter rollover - * happened. In that case read the updated sec counter again - */ - if (ns1 >= ns2) { - *sec = osi_readl((nveu8_t *)addr + time_reg_offset[mac][0]); - *nsec = ns2; - } else { - *nsec = ns1; - } -} - /** * @brief calculate time drift between primary and secondary - * interface. + * interface and update current time. * Algorithm: * - Get drift using last difference = 0 and * current differance as MGBE time - EQOS time * drift = current differance with which EQOS should * update. * - * @param[in] sec: primary interface sec counter - * @param[in] nsec: primary interface nsec counter - * @param[in] secondary_sec: Secondary interface sec counter - * @param[in] secondary_nsec: Secondary interface nsec counter + * @param[in] osi_core: OSI core data structure for primary interface. + * @param[in] sec_osi_core: OSI core data structure for seconday interface. + * @param[out] primary_time: primary interface time pointer + * @param[out] secondary_time: Secondary interface time pointer * * @retval calculated drift value */ -static inline nvel64_t dirft_calculation(nveu32_t sec, nveu32_t nsec, - nveu32_t secondary_sec, - nveu32_t secondary_nsec) +static inline nvel64_t dirft_calculation(struct osi_core_priv_data *const osi_core, + struct osi_core_priv_data *const sec_osi_core, + nvel64_t *primary_time, + nvel64_t *secondary_time) { + nve32_t ret; + nveu32_t sec = 0x0; + nveu32_t nsec = 0x0; + nveu32_t secondary_sec = 0x0; + nveu32_t secondary_nsec = 0x0; nvel64_t val = 0LL; + nveul64_t temp = 0x0U; + nveul64_t time1 = 0x0U; + nveul64_t time2 = 0x0U; + struct osi_core_ptp_tsc_data ptp_tsc1; + struct osi_core_ptp_tsc_data ptp_tsc2; - val = (nvel64_t)sec - (nvel64_t)secondary_sec; - val = (nvel64_t)(val * 1000000000LL); - val += (nvel64_t)nsec - (nvel64_t)secondary_nsec; + ret = hw_ptp_tsc_capture(osi_core, &ptp_tsc1); + if (ret != 0) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "CORE: TSC PTP capture failed for primary\n", 0ULL); + goto fail; + } + ret = hw_ptp_tsc_capture(sec_osi_core, &ptp_tsc2); + if (ret != 0) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "CORE: TSC PTP capture failed for secondary\n", 0ULL); + goto fail; + } + + time1 = ((nveul64_t)((nveul64_t)ptp_tsc1.tsc_high_bits << 32) + + (nveul64_t)ptp_tsc1.tsc_low_bits); + sec = ptp_tsc1.ptp_high_bits; + nsec = ptp_tsc1.ptp_low_bits; + if ((OSI_LLONG_MAX - (nvel64_t)nsec) > ((nvel64_t)sec * OSI_NSEC_PER_SEC_SIGNED)) { + *primary_time = ((nvel64_t)sec * OSI_NSEC_PER_SEC_SIGNED) + (nvel64_t)nsec; + } else { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "CORE: Negative primary PTP time\n", 0ULL); + goto fail; + } + + time2 = ((nveul64_t)((nveul64_t)ptp_tsc2.tsc_high_bits << 32) + + (nveul64_t)ptp_tsc2.tsc_low_bits); + secondary_sec = ptp_tsc2.ptp_high_bits; + secondary_nsec = ptp_tsc2.ptp_low_bits; + + if ((OSI_LLONG_MAX - (nvel64_t)secondary_nsec) > + ((nvel64_t)secondary_sec * OSI_NSEC_PER_SEC_SIGNED)) { + *secondary_time = ((nvel64_t)secondary_sec * OSI_NSEC_PER_SEC_SIGNED) + + (nvel64_t)secondary_nsec; + } else { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "CORE: Negative secondary PTP time\n", 0ULL); + goto fail; + } + + if (time2 > time1) { + temp = time2 - time1; + if ((OSI_LLONG_MAX - (nvel64_t)temp) > *secondary_time) { + *secondary_time -= (nvel64_t)temp; + } else { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "CORE: sec time crossing limit\n", 0ULL); + goto fail; + } + } else if (time1 >= time2) { + temp = time1 - time2; + if ((OSI_LLONG_MAX - (nvel64_t)temp) > *secondary_time) { + *secondary_time += (nvel64_t)temp; + } else { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "CORE: sec time crossing limit\n", 0ULL); + goto fail; + } + } else { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "CORE: wrong drift\n", 0ULL); + goto fail; + } + /* 0 is lowest possible valid time value which represent + * 1 Jan, 1970 + */ + if ((*primary_time >= 0) && (*secondary_time >= 0)) { + val = (*primary_time - *secondary_time); + } else { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "CORE: negative time\n", 0ULL); + goto fail; + } + +fail: return val; } @@ -1547,10 +1957,11 @@ static inline nvel64_t dirft_calculation(nveu32_t sec, nveu32_t nsec, * @retval calculated frequency adjustment value in ppb */ static inline nve32_t freq_offset_calculate(struct osi_core_priv_data *sec_osi_core, - nvel64_t offset, nvel64_t secondary_time) + nvel64_t offset, + nvel64_t secondary_time) { struct core_ptp_servo *s; - struct core_local *secondary_osi_lcore = (struct core_local *)sec_osi_core; + struct core_local *secondary_osi_lcore = (struct core_local *)(void *)sec_osi_core; nvel64_t ki_term, ppb = 0; nvel64_t cofficient; @@ -1561,9 +1972,11 @@ static inline nve32_t freq_offset_calculate(struct osi_core_priv_data *sec_osi_c * it should be corrected with adjust time * threshold value 1 sec */ - if (offset >= 1000000000 || offset <= -1000000000) { + if ((offset >= 1000000000LL) || (offset <= -1000000000LL)) { s->count = SERVO_STATS_0; /* JUMP */ - return (nve32_t) s->last_ppb; + s->drift = 0; + s->last_ppb = 0; + goto fail; } switch (s->count) { @@ -1579,19 +1992,36 @@ static inline nve32_t freq_offset_calculate(struct osi_core_priv_data *sec_osi_c /* Make sure the first sample is older than the second. */ if (s->local[0] >= s->local[1]) { + s->offset[0] = s->offset[1]; + s->local[0] = s->local[1]; s->count = SERVO_STATS_0; break; } /* Adjust drift by the measured frequency offset. */ cofficient = (1000000000LL - s->drift) / (s->local[1] - s->local[0]); - s->drift += cofficient * s->offset[1]; + if ((cofficient == 0) || + (((cofficient < 0) && (s->offset[1] < 0)) && + ((OSI_LLONG_MAX / cofficient) < s->offset[1])) || + ((cofficient < 0) && ((-OSI_LLONG_MAX / cofficient) > s->offset[1])) || + ((s->offset[1] < 0) && ((-OSI_LLONG_MAX / cofficient) > s->offset[1]))) { + /* do nothing */ + } else { + if (((s->drift >= 0) && ((OSI_LLONG_MAX - s->drift) < (cofficient * s->offset[1]))) || + ((s->drift < 0) && ((-OSI_LLONG_MAX - s->drift) > (cofficient * s->offset[1])))) { + /* Do nothing */ + } else { + s->drift += cofficient * s->offset[1]; + } + } /* update this with constant */ - if (s->drift < -MAX_FREQ) { - s->drift = -MAX_FREQ; - } else if (s->drift > MAX_FREQ) { - s->drift = MAX_FREQ; + if (s->drift < MAX_FREQ_NEG) { + s->drift = MAX_FREQ_NEG; + } else if (s->drift > MAX_FREQ_POS) { + s->drift = MAX_FREQ_POS; + } else { + /* Do Nothing */ } ppb = s->drift; @@ -1603,22 +2033,49 @@ static inline nve32_t freq_offset_calculate(struct osi_core_priv_data *sec_osi_c case SERVO_STATS_2: s->offset[1] = offset; s->local[1] = secondary_time; + if (s->local[0] >= s->local[1]) { + s->offset[0] = s->offset[1]; + s->local[0] = s->local[1]; + s->count = SERVO_STATS_0; + break; + } + cofficient = (1000000000LL) / (s->local[1] - s->local[0]); + + if ((cofficient != 0) && (offset < 0) && + (((offset / WEIGHT_BY_10) < (-OSI_LLONG_MAX / (s->const_i * cofficient))) || + ((offset / WEIGHT_BY_10) < (-OSI_LLONG_MAX / (s->const_p * cofficient))))) { + s->count = SERVO_STATS_0; + break; + } + + if ((cofficient != 0) && (offset > 0) && + (((offset / WEIGHT_BY_10) > (OSI_LLONG_MAX / (cofficient * s->const_i))) || + ((offset / WEIGHT_BY_10) > (OSI_LLONG_MAX / (cofficient * s->const_p))))) { + s->count = SERVO_STATS_0; + break; + } + /* calculate ppb */ - ki_term = (s->const_i * cofficient * offset * WEIGHT_BY_10) / (100);//weight; - ppb = (s->const_p * cofficient * offset * WEIGHT_BY_10) / (100) + s->drift + + ki_term = ((s->const_i * cofficient * offset) / WEIGHT_BY_10); + ppb = (s->const_p * cofficient * offset / WEIGHT_BY_10) + s->drift + ki_term; /* FIXME tune cofficients */ - if (ppb < -MAX_FREQ) { - ppb = -MAX_FREQ; - } else if (ppb > MAX_FREQ) { - ppb = MAX_FREQ; + if (ppb < MAX_FREQ_NEG) { + ppb = MAX_FREQ_NEG; + } else if (ppb > MAX_FREQ_POS) { + ppb = MAX_FREQ_POS; } else { - s->drift += ki_term; + if (((s->drift >= 0) && ((OSI_LLONG_MAX - s->drift) < ki_term)) || + ((s->drift < 0) && ((-OSI_LLONG_MAX - s->drift) > ki_term))) { + } else { + + s->drift += ki_term; + } + s->offset[0] = s->offset[1]; + s->local[0] = s->local[1]; } - s->offset[0] = s->offset[1]; - s->local[0] = s->local[1]; break; default: break; @@ -1626,85 +2083,390 @@ static inline nve32_t freq_offset_calculate(struct osi_core_priv_data *sec_osi_c s->last_ppb = ppb; +fail: + if ((ppb > INT_MAX) || (ppb < -INT_MAX)) { + ppb = 0LL; + } + return (nve32_t)ppb; } -#endif -nve32_t osi_hal_handle_ioctl(struct osi_core_priv_data *osi_core, - struct osi_ioctl *data) +static void cfg_l3_l4_filter(struct core_local *l_core) { - struct core_local *l_core = (struct core_local *)osi_core; - struct core_ops *ops_p; + nveu32_t i = 0U; + + for (i = 0U; i < OSI_MGBE_MAX_L3_L4_FILTER; i++) { + if (l_core->cfg.l3_l4[i].filter_enb_dis == OSI_FALSE) { + /* filter not enabled */ + continue; + } + + (void)configure_l3l4_filter_helper( + (struct osi_core_priv_data *)(void *)l_core, + i, &l_core->cfg.l3_l4[i]); + +#if defined(L3L4_WILDCARD_FILTER) + if (i == 0U) { + /* first filter supposed to be tcp wildcard filter */ + l_core->l3l4_wildcard_filter_configured = OSI_ENABLE; + } +#endif /* L3L4_WILDCARD_FILTER */ + } +} + +static void cfg_l2_filter(struct core_local *l_core) +{ + nveu32_t i; + + (void)osi_l2_filter((struct osi_core_priv_data *)(void *)l_core, + &l_core->cfg.l2_filter); + + for (i = 0U; i < EQOS_MAX_MAC_ADDRESS_FILTER; i++) { + if (l_core->cfg.l2[i].used == OSI_DISABLE) { + continue; + } + + (void)osi_l2_filter((struct osi_core_priv_data *)(void *)l_core, + &l_core->cfg.l2[i].filter); + } +} + +static void cfg_rxcsum(struct core_local *l_core) +{ + (void)hw_config_rxcsum_offload((struct osi_core_priv_data *)(void *)l_core, + l_core->cfg.rxcsum); +} + +#ifndef OSI_STRIPPED_LIB +static void cfg_vlan(struct core_local *l_core) +{ + nveu32_t i; + + for (i = 0U; i < VLAN_NUM_VID; i++) { + if (l_core->cfg.vlan[i].used == OSI_DISABLE) { + continue; + } + + (void)vlan_id_update((struct osi_core_priv_data *)(void *)l_core, + (l_core->cfg.vlan[i].vid | OSI_VLAN_ACTION_ADD)); + } +} + +static void cfg_fc(struct core_local *l_core) +{ + (void)l_core->ops_p->config_flow_control((struct osi_core_priv_data *)(void *)l_core, + l_core->cfg.flow_ctrl); +} + +static void cfg_eee(struct core_local *l_core) +{ + (void)conf_eee((struct osi_core_priv_data *)(void *)l_core, + l_core->cfg.tx_lpi_enabled, + l_core->cfg.tx_lpi_timer); +} +#endif /* !OSI_STRIPPED_LIB */ + +static void cfg_avb(struct core_local *l_core) +{ + nveu32_t i; + + for (i = 0U; i < OSI_MGBE_MAX_NUM_QUEUES; i++) { + if (l_core->cfg.avb[i].used == OSI_DISABLE) { + continue; + } + + (void)l_core->ops_p->set_avb_algorithm((struct osi_core_priv_data *)(void *)l_core, + &l_core->cfg.avb[i].avb_info); + } +} + +static void cfg_est(struct core_local *l_core) +{ + (void)config_est((struct osi_core_priv_data *)(void *)l_core, + &l_core->cfg.est); +} + +static void cfg_fpe(struct core_local *l_core) +{ + (void)config_fpe((struct osi_core_priv_data *)(void *)l_core, + &l_core->cfg.fpe); +} + +static void cfg_ptp(struct core_local *l_core) +{ + struct osi_core_priv_data *osi_core = (struct osi_core_priv_data *)(void *)l_core; + struct osi_ioctl ioctl_data = {}; + + ioctl_data.arg1_u32 = l_core->cfg.ptp; + ioctl_data.cmd = OSI_CMD_CONFIG_PTP; + + (void)osi_handle_ioctl(osi_core, &ioctl_data); +} + +static void cfg_frp(struct core_local *l_core) +{ + struct osi_core_priv_data *osi_core = (struct osi_core_priv_data *)(void *)l_core; + + (void)frp_hw_write(osi_core, l_core->ops_p); +} + +static void apply_dynamic_cfg(struct osi_core_priv_data *osi_core) +{ + struct core_local *l_core = (struct core_local *)(void *)osi_core; + typedef void (*cfg_fn)(struct core_local *local_core); + const cfg_fn fn[11] = { + [DYNAMIC_CFG_L3_L4_IDX] = cfg_l3_l4_filter, + [DYNAMIC_CFG_L2_IDX] = cfg_l2_filter, + [DYNAMIC_CFG_RXCSUM_IDX] = cfg_rxcsum, +#ifndef OSI_STRIPPED_LIB + [DYNAMIC_CFG_VLAN_IDX] = cfg_vlan, + [DYNAMIC_CFG_FC_IDX] = cfg_fc, + [DYNAMIC_CFG_EEE_IDX] = cfg_eee, +#endif /* !OSI_STRIPPED_LIB */ + [DYNAMIC_CFG_AVB_IDX] = cfg_avb, + [DYNAMIC_CFG_EST_IDX] = cfg_est, + [DYNAMIC_CFG_FPE_IDX] = cfg_fpe, + [DYNAMIC_CFG_PTP_IDX] = cfg_ptp, + [DYNAMIC_CFG_FRP_IDX] = cfg_frp + }; + nveu32_t flags = l_core->cfg.flags; + nveu32_t i = 0U; + + while (flags > 0U) { + if ((flags & OSI_ENABLE) == OSI_ENABLE) { + fn[i](l_core); + } + + flags = flags >> 1U; + update_counter_u(&i, 1U); + } +} + +static void store_l2_filter(struct osi_core_priv_data *osi_core, + struct osi_filter *filter) +{ + struct core_local *l_core = (struct core_local *)(void *)osi_core; + + if ((filter->oper_mode & OSI_OPER_ADDR_UPDATE) == OSI_OPER_ADDR_UPDATE) { + (void)osi_memcpy(&l_core->cfg.l2[filter->index].filter, filter, + sizeof(struct osi_filter)); + l_core->cfg.l2[filter->index].used = OSI_ENABLE; + } else if ((filter->oper_mode & OSI_OPER_ADDR_DEL) == OSI_OPER_ADDR_DEL) { + l_core->cfg.l2[filter->index].used = OSI_DISABLE; + } else { + (void)osi_memcpy(&l_core->cfg.l2_filter, filter, + sizeof(struct osi_filter)); + } +} + +/** + * @brief osi_hal_handle_ioctl - HW function API to handle runtime command + * + * @note + * Algorithm: + * - Handle runtime commands to OSI + * - OSI_CMD_MDC_CONFIG + * Derive MDC clock based on provided AXI_CBB clk + * arg1_u32 - CSR (AXI CBB) clock rate. + * - OSI_CMD_RESTORE_REGISTER + * Restore backup of MAC MMIO address space + * - OSI_CMD_POLL_FOR_MAC_RST + * Poll Software reset bit in MAC HW + * - OSI_CMD_COMMON_ISR + * Common ISR handler + * - OSI_CMD_PAD_CALIBRATION + * PAD calibration + * - OSI_CMD_READ_MMC + * invoke function to read actual registers and update + * structure variable mmc + * - OSI_CMD_GET_MAC_VER + * Reading MAC version + * arg1_u32 - holds mac version + * - OSI_CMD_VALIDATE_CORE_REG + * Read-validate HW registers for func safety + * - OSI_CMD_RESET_MMC + * invoke function to reset MMC counter and data + * structure + * - OSI_CMD_SAVE_REGISTER + * Take backup of MAC MMIO address space + * - OSI_CMD_MAC_LB + * Configure MAC loopback + * - OSI_CMD_FLOW_CTRL + * Configure flow control settings + * arg1_u32 - Enable or disable flow control settings + * - OSI_CMD_SET_MODE + * Set Full/Half Duplex mode. + * arg1_u32 - mode + * - OSI_CMD_SET_SPEED + * Set Operating speed + * arg1_u32 - Operating speed + * - OSI_CMD_L2_FILTER + * configure L2 mac filter + * l2_filter_struct - OSI filter structure + * - OSI_CMD_RXCSUM_OFFLOAD + * Configure RX checksum offload in MAC + * arg1_u32 - enable(1)/disable(0) + * - OSI_CMD_ADJ_FREQ + * Adjust frequency + * arg6_u32 - Parts per Billion + * - OSI_CMD_ADJ_TIME + * Adjust MAC time with system time + * arg1_u32 - Delta time in nano seconds + * - OSI_CMD_CONFIG_PTP + * Configure PTP + * arg1_u32 - Enable(1) or disable(0) Time Stamping + * - OSI_CMD_GET_AVB + * Get CBS algo and parameters + * avb_struct - osi core avb data structure + * - OSI_CMD_SET_AVB + * Set CBS algo and parameters + * avb_struct - osi core avb data structure + * - OSI_CMD_CONFIG_RX_CRC_CHECK + * Configure CRC Checking for Received Packets + * arg1_u32 - Enable or disable checking of CRC field in + * received pkts + * - OSI_CMD_UPDATE_VLAN_ID + * invoke osi call to update VLAN ID + * arg1_u32 - VLAN ID + * - OSI_CMD_CONFIG_TXSTATUS + * Configure Tx packet status reporting + * Enable(1) or disable(0) tx packet status reporting + * - OSI_CMD_GET_HW_FEAT + * Reading MAC HW features + * hw_feat_struct - holds the supported features of the hardware + * - OSI_CMD_CONFIG_FW_ERR + * Configure forwarding of error packets + * arg1_u32 - queue index, Max OSI_EQOS_MAX_NUM_QUEUES + * arg2_u32 - FWD error enable(1)/disable(0) + * - OSI_CMD_ARP_OFFLOAD + * Configure ARP offload in MAC + * arg1_u32 - Enable/disable flag + * arg7_u8_p - Char array representation of IP address + * - OSI_CMD_VLAN_FILTER + * OSI call for configuring VLAN filter + * vlan_filter - vlan filter structure + * - OSI_CMD_CONFIG_EEE + * Configure EEE LPI in MAC + * arg1_u32 - Enable (1)/disable (0) tx lpi + * arg2_u32 - Tx LPI entry timer in usecs upto + * OSI_MAX_TX_LPI_TIMER (in steps of 8usec) + * - OSI_CMD_L3L4_FILTER + * invoke OSI call to add L3/L4 + * l3l4_filter - l3_l4 filter structure + * arg1_u32 - L3 filter (ipv4(0) or ipv6(1)) + * or L4 filter (tcp(0) or udp(1) + * arg2_u32 - filter based dma routing enable(1) + * arg3_u32 - dma channel for routing based on filter. + * Max OSI_EQOS_MAX_NUM_CHANS. + * arg4_u32 - API call for L3 filter(0) or L4 filter(1) + * - OSI_CMD_SET_SYSTOHW_TIME + * set system to MAC hardware + * arg1_u32 - sec + * arg1_u32 - nsec + * - OSI_CMD_CONFIG_PTP_OFFLOAD + * enable/disable PTP offload feature + * pto_config - ptp offload structure + * - OSI_CMD_PTP_RXQ_ROUTE + * rxq routing to secific queue + * rxq_route - rxq routing information in structure + * - OSI_CMD_CONFIG_FRP + * Issue FRP command to HW + * frp_cmd - FRP command parameter + * - OSI_CMD_CONFIG_RSS + * Configure RSS + * - OSI_CMD_CONFIG_EST + * Configure EST registers and GCL to hw + * est - EST configuration structure + * - OSI_CMD_CONFIG_FPE + * Configuration FPE register and preemptable queue + * fpe - FPE configuration structure + * + * - OSI_CMD_GET_TX_TS + * Command to get TX timestamp for PTP packet + * ts - OSI core timestamp structure + * + * - OSI_CMD_FREE_TS + * Command to free old timestamp for PTP packet + * chan - DMA channel number +1. 0 will be used for onestep + * + * - OSI_CMD_CAP_TSC_PTP + * Capture TSC and PTP time stamp + * ptp_tsc_data - output structure with time + * + * - OSI_CMD_CONF_M2M_TS + * Enable/Disable MAC to MAC time sync for Secondary interface + * enable_disable - 1 - enable, 0- disable + * + * @param[in] osi_core: OSI core private data structure. + * @param[in] data: void pointer pointing to osi_ioctl + * + * @pre MAC should be init and started. see osi_start_mac() + * + * @note + * Traceability Details: + * + * @usage + * - Allowed context for the API call + * - Interrupt handler: No + * - Signal handler: No + * - Thread safe: No + * - Async/Sync: Sync + * - Required Privileges: None + * - API Group: + * - Initialization: No + * - Run time: Yes + * - De-initialization: No + * + * @retval 0 on success + * @retval -1 on failure. + */ +static nve32_t osi_hal_handle_ioctl(struct osi_core_priv_data *osi_core, + struct osi_ioctl *data) +{ + struct core_local *l_core = (struct core_local *)(void *)osi_core; + const struct core_ops *ops_p; nve32_t ret = -1; -#if DRIFT_CAL struct osi_core_priv_data *sec_osi_core; struct core_local *secondary_osi_lcore; - struct core_ops *secondary_ops_p; - nvel64_t drift_value = 0x0; nveu32_t sec = 0x0; nveu32_t nsec = 0x0; - nveu32_t secondary_sec = 0x0; - nveu32_t secondary_nsec = 0x0; + nvel64_t drift_value = 0x0; nve32_t freq_adj_value = 0x0; - nvel64_t secondary_time; -#endif - - if (validate_args(osi_core, l_core) < 0) { - return ret; - } + nvel64_t secondary_time = 0x0; + nvel64_t primary_time = 0x0; ops_p = l_core->ops_p; - if (data == OSI_NULL) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, - "CORE: Invalid argument\n", 0ULL); - return -1; - } - switch (data->cmd) { -#ifndef OSI_STRIPPED_LIB - case OSI_CMD_RESTORE_REGISTER: - ret = ops_p->restore_registers(osi_core); - break; - case OSI_CMD_L3L4_FILTER: - ret = osi_l3l4_filter(osi_core, data->l3l4_filter, - data->arg1_u32, data->arg2_u32, - data->arg3_u32, data->arg4_u32); + ret = configure_l3l4_filter(osi_core, &data->l3l4_filter); + if (ret == 0) { + l_core->cfg.flags |= DYNAMIC_CFG_L3_L4; + } break; +#ifndef OSI_STRIPPED_LIB case OSI_CMD_MDC_CONFIG: ops_p->set_mdc_clk_rate(osi_core, data->arg5_u64); ret = 0; break; - case OSI_CMD_VALIDATE_CORE_REG: - ret = validate_core_regs(osi_core); - break; - case OSI_CMD_RESET_MMC: ops_p->reset_mmc(osi_core); ret = 0; break; - case OSI_CMD_SAVE_REGISTER: - ret = ops_p->save_registers(osi_core); - break; - case OSI_CMD_MAC_LB: ret = conf_mac_loopback(osi_core, data->arg1_u32); break; case OSI_CMD_FLOW_CTRL: ret = ops_p->config_flow_control(osi_core, data->arg1_u32); - break; + if (ret == 0) { + l_core->cfg.flow_ctrl = data->arg1_u32; + l_core->cfg.flags |= DYNAMIC_CFG_FC; + } - case OSI_CMD_GET_AVB: - ret = ops_p->get_avb_algorithm(osi_core, &data->avb); - break; - - case OSI_CMD_SET_AVB: - ret = ops_p->set_avb_algorithm(osi_core, &data->avb); break; case OSI_CMD_CONFIG_RX_CRC_CHECK: @@ -1713,16 +2475,24 @@ nve32_t osi_hal_handle_ioctl(struct osi_core_priv_data *osi_core, case OSI_CMD_UPDATE_VLAN_ID: ret = vlan_id_update(osi_core, data->arg1_u32); + if (ret == 0) { + if ((data->arg1_u32 & VLAN_ACTION_MASK) == OSI_VLAN_ACTION_ADD) { + l_core->cfg.vlan[data->arg1_u32 & VLAN_VID_MASK].vid = + data->arg1_u32 & VLAN_VID_MASK; + l_core->cfg.vlan[data->arg1_u32 & VLAN_VID_MASK].used = OSI_ENABLE; + } else { + l_core->cfg.vlan[data->arg1_u32 & VLAN_VID_MASK].used = OSI_DISABLE; + } + + l_core->cfg.flags |= DYNAMIC_CFG_VLAN; + } + break; case OSI_CMD_CONFIG_TXSTATUS: ret = ops_p->config_tx_status(osi_core, data->arg1_u32); break; - case OSI_CMD_CONFIG_FW_ERR: - ret = ops_p->config_fw_err_pkts(osi_core, data->arg1_u32, - data->arg2_u32); - break; case OSI_CMD_ARP_OFFLOAD: ret = conf_arp_offload(osi_core, data->arg1_u32, @@ -1738,21 +2508,49 @@ nve32_t osi_hal_handle_ioctl(struct osi_core_priv_data *osi_core, case OSI_CMD_CONFIG_EEE: ret = conf_eee(osi_core, data->arg1_u32, data->arg2_u32); + if (ret == 0) { + l_core->cfg.tx_lpi_enabled = data->arg1_u32; + l_core->cfg.tx_lpi_timer = data->arg2_u32; + l_core->cfg.flags |= DYNAMIC_CFG_EEE; + } + + break; + case OSI_CMD_CONFIG_FW_ERR: + ret = hw_config_fw_err_pkts(osi_core, data->arg1_u32, data->arg2_u32); break; -#endif /* !OSI_STRIPPED_LIB */ case OSI_CMD_POLL_FOR_MAC_RST: - ret = ops_p->poll_for_swr(osi_core); + ret = hw_poll_for_swr(osi_core); break; - case OSI_CMD_START_MAC: - ops_p->start_mac(osi_core); - ret = 0; + case OSI_CMD_GET_MAC_VER: + ret = osi_get_mac_version(osi_core, &data->arg1_u32); break; - case OSI_CMD_STOP_MAC: - ops_p->stop_mac(osi_core); - ret = 0; + case OSI_CMD_SET_MODE: + ret = hw_set_mode(osi_core, data->arg6_32); + break; +#endif /* !OSI_STRIPPED_LIB */ + + case OSI_CMD_GET_AVB: + ret = ops_p->get_avb_algorithm(osi_core, &data->avb); + break; + + case OSI_CMD_SET_AVB: + if (data->avb.algo == OSI_MTL_TXQ_AVALG_CBS) { + ret = hw_validate_avb_input(osi_core, &data->avb); + if (ret != 0) { + break; + } + } + + ret = ops_p->set_avb_algorithm(osi_core, &data->avb); + if (ret == 0) { + (void)osi_memcpy(&l_core->cfg.avb[data->avb.qindex].avb_info, + &data->avb, sizeof(struct osi_core_avb_algorithm)); + l_core->cfg.avb[data->avb.qindex].used = OSI_ENABLE; + l_core->cfg.flags |= DYNAMIC_CFG_AVB; + } break; case OSI_CMD_COMMON_ISR: @@ -1769,31 +2567,32 @@ nve32_t osi_hal_handle_ioctl(struct osi_core_priv_data *osi_core, ret = 0; break; - case OSI_CMD_GET_MAC_VER: - ret = osi_get_mac_version(osi_core, &data->arg1_u32); - break; - - case OSI_CMD_SET_MODE: - ret = ops_p->set_mode(osi_core, data->arg6_32); - break; - case OSI_CMD_SET_SPEED: - ret = ops_p->set_speed(osi_core, data->arg6_32); + ret = hw_set_speed(osi_core, data->arg6_32); break; case OSI_CMD_L2_FILTER: ret = osi_l2_filter(osi_core, &data->l2_filter); + if (ret == 0) { + store_l2_filter(osi_core, &data->l2_filter); + l_core->cfg.flags |= DYNAMIC_CFG_L2; + } + break; case OSI_CMD_RXCSUM_OFFLOAD: - ret = ops_p->config_rxcsum_offload(osi_core, data->arg1_u32); + ret = hw_config_rxcsum_offload(osi_core, data->arg1_u32); + if (ret == 0) { + l_core->cfg.rxcsum = data->arg1_u32; + l_core->cfg.flags |= DYNAMIC_CFG_RXCSUM; + } + break; case OSI_CMD_ADJ_FREQ: ret = osi_adjust_freq(osi_core, data->arg6_32); -#if DRIFT_CAL if (ret < 0) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "CORE: adjust freq failed\n", 0ULL); break; } @@ -1804,7 +2603,7 @@ nve32_t osi_hal_handle_ioctl(struct osi_core_priv_data *osi_core, } sec_osi_core = get_role_pointer(OSI_PTP_M2M_SECONDARY); - secondary_osi_lcore = (struct core_local *)sec_osi_core; + secondary_osi_lcore = (struct core_local *)(void *)sec_osi_core; if ((validate_args(sec_osi_core, secondary_osi_lcore) < 0) || (secondary_osi_lcore->hw_init_successful != OSI_ENABLE) || (secondary_osi_lcore->m2m_tsync != OSI_ENABLE)) { @@ -1812,16 +2611,10 @@ nve32_t osi_hal_handle_ioctl(struct osi_core_priv_data *osi_core, } if (l_core->ether_m2m_role == OSI_PTP_M2M_PRIMARY) { - drift_value = 0x0; - osi_lock_irq_enabled(&secondary_osi_lcore->serv.m2m_lock); - read_sec_ns(sec_osi_core->base, - sec_osi_core->mac, &secondary_sec, &secondary_nsec); - read_sec_ns(osi_core->base, - osi_core->mac, &sec, &nsec); - osi_unlock_irq_enabled(&secondary_osi_lcore->serv.m2m_lock); + drift_value = dirft_calculation(osi_core, sec_osi_core, + &primary_time, + &secondary_time); - drift_value = dirft_calculation(sec, nsec, secondary_sec, secondary_nsec); - secondary_time = (secondary_sec * 1000000000LL) + secondary_nsec; secondary_osi_lcore->serv.const_i = I_COMPONENT_BY_10; secondary_osi_lcore->serv.const_p = P_COMPONENT_BY_10; freq_adj_value = freq_offset_calculate(sec_osi_core, @@ -1831,6 +2624,13 @@ nve32_t osi_hal_handle_ioctl(struct osi_core_priv_data *osi_core, /* call adjust time as JUMP happened */ ret = osi_adjust_time(sec_osi_core, drift_value); + if (ret < 0) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "CORE: adjust_time failed\n", + 0ULL); + } else { + ret = osi_adjust_freq(sec_osi_core, 0); + } } else { ret = osi_adjust_freq(sec_osi_core, freq_adj_value); @@ -1838,19 +2638,19 @@ nve32_t osi_hal_handle_ioctl(struct osi_core_priv_data *osi_core, } if (ret < 0) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "CORE: adjust_freq for sec_controller failed\n", 0ULL); ret = 0; } -#endif + break; case OSI_CMD_ADJ_TIME: ret = osi_adjust_time(osi_core, data->arg8_64); -#if DRIFT_CAL + if (ret < 0) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "CORE: adjust_time failed\n", 0ULL); break; } @@ -1861,7 +2661,7 @@ nve32_t osi_hal_handle_ioctl(struct osi_core_priv_data *osi_core, } sec_osi_core = get_role_pointer(OSI_PTP_M2M_SECONDARY); - secondary_osi_lcore = (struct core_local *)sec_osi_core; + secondary_osi_lcore = (struct core_local *)(void *)sec_osi_core; if ((validate_args(sec_osi_core, secondary_osi_lcore) < 0) || (secondary_osi_lcore->hw_init_successful != OSI_ENABLE) || (secondary_osi_lcore->m2m_tsync != OSI_ENABLE)) { @@ -1870,37 +2670,36 @@ nve32_t osi_hal_handle_ioctl(struct osi_core_priv_data *osi_core, if (l_core->ether_m2m_role == OSI_PTP_M2M_PRIMARY) { drift_value = 0x0; - osi_lock_irq_enabled(&secondary_osi_lcore->serv.m2m_lock); - read_sec_ns(sec_osi_core->base, - sec_osi_core->mac, &secondary_sec, &secondary_nsec); - read_sec_ns(osi_core->base, - osi_core->mac, &sec, &nsec); - osi_unlock_irq_enabled(&secondary_osi_lcore->serv.m2m_lock); - drift_value = dirft_calculation(sec, nsec, - secondary_sec, - secondary_nsec); + drift_value = dirft_calculation(osi_core, sec_osi_core, + &primary_time, + &secondary_time); ret = osi_adjust_time(sec_osi_core, drift_value); if (ret == 0) { secondary_osi_lcore->serv.count = SERVO_STATS_0; secondary_osi_lcore->serv.drift = 0; secondary_osi_lcore->serv.last_ppb = 0; + ret = osi_adjust_freq(sec_osi_core, 0); } } if (ret < 0) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "CORE: adjust_time for sec_controller failed\n", 0ULL); ret = 0; } -#endif + break; case OSI_CMD_CONFIG_PTP: ret = osi_ptp_configuration(osi_core, data->arg1_u32); -#if DRIFT_CAL + if (ret == 0) { + l_core->cfg.ptp = data->arg1_u32; + l_core->cfg.flags |= DYNAMIC_CFG_PTP; + } + if (ret < 0) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "CORE: configure_ptp failed\n", 0ULL); break; } @@ -1911,7 +2710,7 @@ nve32_t osi_hal_handle_ioctl(struct osi_core_priv_data *osi_core, } sec_osi_core = get_role_pointer(OSI_PTP_M2M_SECONDARY); - secondary_osi_lcore = (struct core_local *)sec_osi_core; + secondary_osi_lcore = (struct core_local *)(void *)sec_osi_core; if ((validate_args(sec_osi_core, secondary_osi_lcore) < 0) || (secondary_osi_lcore->hw_init_successful != OSI_ENABLE) || (secondary_osi_lcore->m2m_tsync != OSI_ENABLE)) { @@ -1924,7 +2723,7 @@ nve32_t osi_hal_handle_ioctl(struct osi_core_priv_data *osi_core, secondary_osi_lcore->serv.drift = 0; secondary_osi_lcore->serv.last_ppb = 0; } -#endif + break; case OSI_CMD_GET_HW_FEAT: @@ -1932,11 +2731,10 @@ nve32_t osi_hal_handle_ioctl(struct osi_core_priv_data *osi_core, break; case OSI_CMD_SET_SYSTOHW_TIME: - ret = ops_p->set_systime_to_mac(osi_core, data->arg1_u32, - data->arg2_u32); -#if DRIFT_CAL + ret = hw_set_systime_to_mac(osi_core, data->arg1_u32, data->arg2_u32); + if (ret < 0) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "CORE: set systohw time failed\n", 0ULL); break; } @@ -1947,7 +2745,7 @@ nve32_t osi_hal_handle_ioctl(struct osi_core_priv_data *osi_core, } sec_osi_core = get_role_pointer(OSI_PTP_M2M_SECONDARY); - secondary_osi_lcore = (struct core_local *)sec_osi_core; + secondary_osi_lcore = (struct core_local *)(void *)sec_osi_core; if ((validate_args(sec_osi_core, secondary_osi_lcore) < 0) || (secondary_osi_lcore->hw_init_successful != OSI_ENABLE) || (secondary_osi_lcore->m2m_tsync != OSI_ENABLE)) { @@ -1955,28 +2753,27 @@ nve32_t osi_hal_handle_ioctl(struct osi_core_priv_data *osi_core, } if (l_core->ether_m2m_role == OSI_PTP_M2M_PRIMARY) { - drift_value = 0x0; osi_lock_irq_enabled(&secondary_osi_lcore->serv.m2m_lock); - read_sec_ns(osi_core->base, + common_get_systime_from_mac(osi_core->base, osi_core->mac, &sec, &nsec); osi_unlock_irq_enabled(&secondary_osi_lcore->serv.m2m_lock); - secondary_ops_p = secondary_osi_lcore->ops_p; - ret = secondary_ops_p->set_systime_to_mac(sec_osi_core, sec, - nsec); + ret = hw_set_systime_to_mac(sec_osi_core, sec, nsec); if (ret == 0) { secondary_osi_lcore->serv.count = SERVO_STATS_0; secondary_osi_lcore->serv.drift = 0; secondary_osi_lcore->serv.last_ppb = 0; + ret = osi_adjust_freq(sec_osi_core, 0); } } if (ret < 0) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "CORE: set_time for sec_controller failed\n", 0ULL); ret = 0; } -#endif + break; +#ifndef OSI_STRIPPED_LIB case OSI_CMD_CONFIG_PTP_OFFLOAD: ret = conf_ptp_offload(osi_core, &data->pto_config); break; @@ -1985,20 +2782,34 @@ nve32_t osi_hal_handle_ioctl(struct osi_core_priv_data *osi_core, ret = rxq_route_config(osi_core, &data->rxq_route); break; - case OSI_CMD_CONFIG_FRP: - ret = configure_frp(osi_core, &data->frp_cmd); - break; - case OSI_CMD_CONFIG_RSS: ret = ops_p->config_rss(osi_core); break; +#endif /* !OSI_STRIPPED_LIB */ + case OSI_CMD_CONFIG_FRP: + ret = configure_frp(osi_core, &data->frp_cmd); + l_core->cfg.flags |= DYNAMIC_CFG_FRP; + break; + case OSI_CMD_CONFIG_EST: ret = config_est(osi_core, &data->est); + if (ret == 0) { + (void)osi_memcpy(&l_core->cfg.est, &data->est, + sizeof(struct osi_est_config)); + l_core->cfg.flags |= DYNAMIC_CFG_EST; + } + break; case OSI_CMD_CONFIG_FPE: ret = config_fpe(osi_core, &data->fpe); + if (ret == 0) { + (void)osi_memcpy(&l_core->cfg.fpe, &data->fpe, + sizeof(struct osi_fpe_config)); + l_core->cfg.flags |= DYNAMIC_CFG_FPE; + } + break; case OSI_CMD_READ_REG: @@ -2030,6 +2841,12 @@ nve32_t osi_hal_handle_ioctl(struct osi_core_priv_data *osi_core, case OSI_CMD_MAC_MTU: ret = 0; +#ifdef MACSEC_SUPPORT + if ((osi_core->macsec_ops != OSI_NULL) && + (osi_core->macsec_ops->update_mtu != OSI_NULL)) { + ret = osi_core->macsec_ops->update_mtu(osi_core, data->arg1_u32); + } +#endif /* MACSEC_SUPPORT */ break; #ifdef OSI_DEBUG @@ -2043,7 +2860,7 @@ nve32_t osi_hal_handle_ioctl(struct osi_core_priv_data *osi_core, break; #endif /* OSI_DEBUG */ case OSI_CMD_CAP_TSC_PTP: - ret = ops_p->ptp_tsc_capture(osi_core, &data->ptp_tsc); + ret = hw_ptp_tsc_capture(osi_core, &data->ptp_tsc); break; case OSI_CMD_CONF_M2M_TS: @@ -2056,9 +2873,33 @@ nve32_t osi_hal_handle_ioctl(struct osi_core_priv_data *osi_core, case OSI_CMD_HSI_CONFIGURE: ret = ops_p->core_hsi_configure(osi_core, data->arg1_u32); break; + case OSI_CMD_HSI_INJECT_ERR: + ret = ops_p->core_hsi_inject_err(osi_core, data->arg1_u32); + break; #endif + +#ifdef OSI_DEBUG + case OSI_CMD_DEBUG_INTR_CONFIG: +#ifdef DEBUG_MACSEC + osi_core->macsec_ops->intr_config(osi_core, data->arg1_u32); +#endif + ret = 0; + break; +#endif + case OSI_CMD_SUSPEND: + l_core->state = OSI_SUSPENDED; + ret = osi_hal_hw_core_deinit(osi_core); + break; + case OSI_CMD_RESUME: + ret = osi_hal_hw_core_init(osi_core); + if (ret < 0) { + break; + } + + apply_dynamic_cfg(osi_core); + break; default: - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "CORE: Incorrect command\n", (nveul64_t)data->cmd); break; @@ -2067,24 +2908,6 @@ nve32_t osi_hal_handle_ioctl(struct osi_core_priv_data *osi_core, return ret; } -nve32_t osi_get_hw_features(struct osi_core_priv_data *const osi_core, - struct osi_hw_features *hw_feat) -{ - struct core_local *l_core = (struct core_local *)osi_core; - - if (validate_args(osi_core, l_core) < 0) { - return -1; - } - - if (hw_feat == OSI_NULL) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, - "CORE: Invalid hw_feat\n", 0ULL); - return -1; - } - - return l_core->ops_p->get_hw_features(osi_core, hw_feat); -} - void hw_interface_init_core_ops(struct if_core_ops *if_ops_p) { if_ops_p->if_core_init = osi_hal_hw_core_init; diff --git a/osi/core/vlan_filter.c b/osi/core/vlan_filter.c index 4f99be2..9f8fdaf 100644 --- a/osi/core/vlan_filter.c +++ b/osi/core/vlan_filter.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -20,6 +20,7 @@ * DEALINGS IN THE SOFTWARE. */ +#ifndef OSI_STRIPPED_LIB #include "../osi/common/common.h" #include "vlan_filter.h" @@ -35,11 +36,11 @@ * @return Index from VID array if match found. * @return Return VLAN_HW_FILTER_FULL_IDX if not found. */ -static inline unsigned int get_vlan_filter_idx( +static inline nveu32_t get_vlan_filter_idx( struct osi_core_priv_data *osi_core, - unsigned short vlan_id) + nveu16_t vlan_id) { - unsigned int vid_idx = VLAN_HW_FILTER_FULL_IDX; + nveu32_t vid_idx = VLAN_HW_FILTER_FULL_IDX; unsigned long bitmap = osi_core->vf_bitmap; unsigned long temp = 0U; @@ -48,7 +49,7 @@ static inline unsigned int get_vlan_filter_idx( if (osi_core->vid[temp] == vlan_id) { /* vlan ID match found */ - vid_idx = (unsigned int)temp; + vid_idx = (nveu32_t)temp; break; } @@ -70,11 +71,11 @@ static inline unsigned int get_vlan_filter_idx( * * @return 0 on success */ -static inline int allow_all_vid_tags(unsigned char *base, - unsigned int pass_all_vids) +static inline nve32_t allow_all_vid_tags(nveu8_t *base, + nveu32_t pass_all_vids) { - unsigned int vlan_tag_reg = 0; - unsigned int hash_filter_reg = 0; + nveu32_t vlan_tag_reg = 0; + nveu32_t hash_filter_reg = 0; vlan_tag_reg = osi_readl(base + MAC_VLAN_TAG_CTRL); hash_filter_reg = osi_readl(base + MAC_VLAN_HASH_FILTER); @@ -84,7 +85,7 @@ static inline int allow_all_vid_tags(unsigned char *base, hash_filter_reg |= VLAN_HASH_ALLOW_ALL; } else { vlan_tag_reg &= ~MAC_VLAN_TAG_CTRL_VHTM; - hash_filter_reg &= (unsigned int) ~VLAN_HASH_ALLOW_ALL; + hash_filter_reg &= (nveu32_t) ~VLAN_HASH_ALLOW_ALL; } osi_writel(vlan_tag_reg, base + MAC_VLAN_TAG_CTRL); @@ -107,11 +108,11 @@ static inline int allow_all_vid_tags(unsigned char *base, * @return 0 on Success. * @return negative value on failure */ -static inline int is_vlan_id_enqueued(struct osi_core_priv_data *osi_core, - unsigned short vlan_id, - unsigned int *idx) +static inline nve32_t is_vlan_id_enqueued(struct osi_core_priv_data *osi_core, + nveu16_t vlan_id, + nveu32_t *idx) { - unsigned int i = 0; + nveu32_t i = 0; if (osi_core->vlan_filter_cnt == VLAN_HW_FILTER_FULL_IDX) { /* No elements in SW queue to search */ @@ -140,11 +141,11 @@ static inline int is_vlan_id_enqueued(struct osi_core_priv_data *osi_core, * @return 0 on success. * @return negative value on failure. */ -static inline int enqueue_vlan_id(struct osi_core_priv_data *osi_core, - unsigned short vlan_id) +static inline nve32_t enqueue_vlan_id(struct osi_core_priv_data *osi_core, + nveu16_t vlan_id) { - int ret = 0; - unsigned int idx; + nve32_t ret = 0; + nveu32_t idx; if (osi_core->vlan_filter_cnt == VLAN_NUM_VID) { /* Entire SW queue full */ @@ -154,7 +155,7 @@ static inline int enqueue_vlan_id(struct osi_core_priv_data *osi_core, /* Check if requested vlan_id alredy queued */ ret = is_vlan_id_enqueued(osi_core, vlan_id, &idx); if (ret == 0) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "VLAN ID already programmed\n", 0ULL); return -1; @@ -177,25 +178,25 @@ static inline int enqueue_vlan_id(struct osi_core_priv_data *osi_core, * @return 0 on success. * @return -1 on failure. */ -static inline int poll_for_vlan_filter_reg_rw( +static inline nve32_t poll_for_vlan_filter_reg_rw( struct osi_core_priv_data *osi_core) { - unsigned int retry = 10; - unsigned int count; - unsigned int val = 0; - int cond = 1; + nveu32_t retry = 10; + nveu32_t count; + nveu32_t val = 0; + nve32_t cond = 1; count = 0; while (cond == 1) { if (count > retry) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "VLAN filter update timedout\n", 0ULL); return -1; } count++; - val = osi_readl((unsigned char *)osi_core->base + + val = osi_readl((nveu8_t *)osi_core->base + MAC_VLAN_TAG_CTRL); if ((val & MAC_VLAN_TAG_CTRL_OB) == OSI_NONE) { /* Set cond to 0 to exit loop */ @@ -222,17 +223,17 @@ static inline int poll_for_vlan_filter_reg_rw( * @return 0 on success * @return -1 on failure. */ -static inline int update_vlan_filters(struct osi_core_priv_data *osi_core, - unsigned int vid_idx, - unsigned int val) +static inline nve32_t update_vlan_filters(struct osi_core_priv_data *osi_core, + nveu32_t vid_idx, + nveu32_t val) { - unsigned char *base = (unsigned char *)osi_core->base; - int ret = 0; + nveu8_t *base = (nveu8_t *)osi_core->base; + nve32_t ret = 0; osi_writel(val, base + MAC_VLAN_TAG_DATA); val = osi_readl(base + MAC_VLAN_TAG_CTRL); - val &= (unsigned int) ~MAC_VLAN_TAG_CTRL_OFS_MASK; + val &= (nveu32_t) ~MAC_VLAN_TAG_CTRL_OFS_MASK; val |= vid_idx << MAC_VLAN_TAG_CTRL_OFS_SHIFT; val &= ~MAC_VLAN_TAG_CTRL_CT; val |= MAC_VLAN_TAG_CTRL_OB; @@ -240,7 +241,7 @@ static inline int update_vlan_filters(struct osi_core_priv_data *osi_core, ret = poll_for_vlan_filter_reg_rw(osi_core); if (ret < 0) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Failed to update VLAN filters\n", 0ULL); return -1; } @@ -259,13 +260,13 @@ static inline int update_vlan_filters(struct osi_core_priv_data *osi_core, * @return 0 on success * @return -1 on failure. */ -static inline int add_vlan_id(struct osi_core_priv_data *osi_core, +static inline nve32_t add_vlan_id(struct osi_core_priv_data *osi_core, struct core_ops *ops_p, - unsigned short vlan_id) + nveu16_t vlan_id) { - unsigned int vid_idx = 0; - unsigned int val = 0; - int ret = 0; + nveu32_t vid_idx = 0; + nveu32_t val = 0; + nve32_t ret = 0; /* Check if VLAN ID already programmed */ vid_idx = get_vlan_filter_idx(osi_core, vlan_id); @@ -277,7 +278,7 @@ static inline int add_vlan_id(struct osi_core_priv_data *osi_core, } /* Get free index to add the VID */ - vid_idx = (unsigned int) __builtin_ctzl(~osi_core->vf_bitmap); + vid_idx = (nveu32_t) __builtin_ctzl(~osi_core->vf_bitmap); /* If there is no free filter index add into SW VLAN filter queue to store */ if (vid_idx == VLAN_HW_FILTER_FULL_IDX) { /* Add VLAN ID to SW queue */ @@ -299,14 +300,14 @@ static inline int add_vlan_id(struct osi_core_priv_data *osi_core, OSI_DISABLE, OSI_DISABLE); if (ret < 0) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Failed to enable VLAN filtering\n", 0ULL); return -1; } } - val = osi_readl((unsigned char *)osi_core->base + MAC_VLAN_TAG_DATA); - val &= (unsigned int) ~VLAN_VID_MASK; + val = osi_readl((nveu8_t *)osi_core->base + MAC_VLAN_TAG_DATA); + val &= (nveu32_t) ~VLAN_VID_MASK; val |= (vlan_id | MAC_VLAN_TAG_DATA_ETV | MAC_VLAN_TAG_DATA_VEN); return update_vlan_filters(osi_core, vid_idx, val); @@ -325,10 +326,10 @@ static inline int add_vlan_id(struct osi_core_priv_data *osi_core, * @return 0 on success * @return -1 on failure. */ -static inline int dequeue_vlan_id(struct osi_core_priv_data *osi_core, - unsigned int idx) +static inline nve32_t dequeue_vlan_id(struct osi_core_priv_data *osi_core, + nveu32_t idx) { - unsigned int i; + nveu32_t i; if (osi_core->vlan_filter_cnt == VLAN_HW_MAX_NRVF) { return -1; @@ -336,14 +337,14 @@ static inline int dequeue_vlan_id(struct osi_core_priv_data *osi_core, /* Left shift the array elements by one for the VID order */ for (i = idx; i <= osi_core->vlan_filter_cnt; i++) { - osi_core->vid[i] = osi_core->vid[i + 1]; + osi_core->vid[i] = osi_core->vid[i + 1U]; } osi_core->vid[i] = VLAN_ID_INVALID; osi_core->vlan_filter_cnt--; if (osi_core->vlan_filter_cnt == VLAN_HW_MAX_NRVF) { - allow_all_vid_tags(osi_core->base, OSI_DISABLE); + return allow_all_vid_tags(osi_core->base, OSI_DISABLE); } return 0; @@ -363,14 +364,14 @@ static inline int dequeue_vlan_id(struct osi_core_priv_data *osi_core, * @return 0 on success * @return -1 on failure. */ -static inline int dequeue_vid_to_add_filter_reg( +static inline nve32_t dequeue_vid_to_add_filter_reg( struct osi_core_priv_data *osi_core, - unsigned int vid_idx) + nveu32_t vid_idx) { - unsigned int val = 0; - unsigned short vlan_id = 0; - unsigned int i = 0; - int ret = 0; + nveu32_t val = 0; + nveu16_t vlan_id = 0; + nveu32_t i = 0; + nve32_t ret = 0; vlan_id = osi_core->vid[VLAN_HW_FILTER_FULL_IDX]; if (vlan_id == VLAN_ID_INVALID) { @@ -380,8 +381,8 @@ static inline int dequeue_vid_to_add_filter_reg( osi_core->vf_bitmap |= OSI_BIT(vid_idx); osi_core->vid[vid_idx] = vlan_id; - val = osi_readl((unsigned char *)osi_core->base + MAC_VLAN_TAG_DATA); - val &= (unsigned int) ~VLAN_VID_MASK; + val = osi_readl((nveu8_t *)osi_core->base + MAC_VLAN_TAG_DATA); + val &= (nveu32_t) ~VLAN_VID_MASK; val |= (vlan_id | MAC_VLAN_TAG_DATA_ETV | MAC_VLAN_TAG_DATA_VEN); ret = update_vlan_filters(osi_core, vid_idx, val); @@ -390,7 +391,7 @@ static inline int dequeue_vid_to_add_filter_reg( } for (i = VLAN_HW_FILTER_FULL_IDX; i <= osi_core->vlan_filter_cnt; i++) { - osi_core->vid[i] = osi_core->vid[i + 1]; + osi_core->vid[i] = osi_core->vid[i + 1U]; } osi_core->vid[i] = VLAN_ID_INVALID; @@ -409,14 +410,14 @@ static inline int dequeue_vid_to_add_filter_reg( * @return 0 on success * @return -1 on failure. */ -static inline int del_vlan_id(struct osi_core_priv_data *osi_core, +static inline nve32_t del_vlan_id(struct osi_core_priv_data *osi_core, struct core_ops *ops_p, - unsigned short vlan_id) + nveu16_t vlan_id) { - unsigned int vid_idx = 0; - unsigned int val = 0; - unsigned int idx; - int ret = 0; + nveu32_t vid_idx = 0; + nveu32_t val = 0; + nveu32_t idx; + nve32_t ret = 0; /* Search for vlan filter index to be deleted */ vid_idx = get_vlan_filter_idx(osi_core, vlan_id); @@ -445,26 +446,29 @@ static inline int del_vlan_id(struct osi_core_priv_data *osi_core, OSI_DISABLE, OSI_DISABLE); if (ret < 0) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Failed to disable VLAN filtering\n", 0ULL); return -1; } } if (osi_core->vlan_filter_cnt == VLAN_HW_MAX_NRVF) { - allow_all_vid_tags(osi_core->base, OSI_DISABLE); + ret = allow_all_vid_tags(osi_core->base, OSI_DISABLE); + if (ret < 0) { + return -1; + } } /* if SW queue is not empty dequeue from SW queue and update filter */ return dequeue_vid_to_add_filter_reg(osi_core, vid_idx); } -int update_vlan_id(struct osi_core_priv_data *osi_core, +nve32_t update_vlan_id(struct osi_core_priv_data *osi_core, struct core_ops *ops_p, - unsigned int vid) + nveu32_t vid) { - unsigned int action = vid & VLAN_ACTION_MASK; - unsigned short vlan_id = vid & VLAN_VID_MASK; + nveu32_t action = vid & VLAN_ACTION_MASK; + nveu16_t vlan_id = (nveu16_t)(vid & VLAN_VID_MASK); if (action == OSI_VLAN_ACTION_ADD) { return add_vlan_id(osi_core, ops_p, vlan_id); @@ -472,3 +476,4 @@ int update_vlan_id(struct osi_core_priv_data *osi_core, return del_vlan_id(osi_core, ops_p, vlan_id); } +#endif /* !OSI_STRIPPED_LIB */ diff --git a/osi/core/vlan_filter.h b/osi/core/vlan_filter.h index d4406ce..e60568f 100644 --- a/osi/core/vlan_filter.h +++ b/osi/core/vlan_filter.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -26,6 +26,7 @@ #include #include "core_local.h" +#ifndef OSI_STRIPPED_LIB /** * @addtogroup MAC-VLAN MAC VLAN configuration registers and bit fields * @@ -36,7 +37,7 @@ #define MAC_VLAN_TAG_CTRL 0x50 #define MAC_VLAN_TAG_DATA 0x54 #define MAC_VLAN_HASH_FILTER 0x58 -#define MAC_VLAN_TAG_CTRL_OFS_MASK 0x7C +#define MAC_VLAN_TAG_CTRL_OFS_MASK 0x7CU #define MAC_VLAN_TAG_CTRL_OFS_SHIFT 2U #define MAC_VLAN_TAG_CTRL_CT OSI_BIT(1) #define MAC_VLAN_TAG_CTRL_OB OSI_BIT(0) @@ -53,9 +54,9 @@ */ #define VLAN_HW_MAX_NRVF 32U #define VLAN_HW_FILTER_FULL_IDX VLAN_HW_MAX_NRVF -#define VLAN_VID_MASK 0xFFFF -#define VLAN_ID_INVALID 0xFFFF -#define VLAN_HASH_ALLOW_ALL 0xFFFF +#define VLAN_VID_MASK 0xFFFFU +#define VLAN_ID_INVALID 0xFFFFU +#define VLAN_HASH_ALLOW_ALL 0xFFFFU #define VLAN_ACTION_MASK OSI_BIT(31) /** @} */ @@ -70,7 +71,7 @@ * @return 0 on success * @return -1 on failure. */ -int update_vlan_id(struct osi_core_priv_data *osi_core, - struct core_ops *ops_p, - unsigned int vid); +nve32_t update_vlan_id(struct osi_core_priv_data *osi_core, + struct core_ops *ops_p, nveu32_t vid); +#endif /* !OSI_STRIPPED_LIB */ #endif /* VLAN_FILTER_H */ diff --git a/osi/core/xpcs.c b/osi/core/xpcs.c index 6ba0d31..b61cacb 100644 --- a/osi/core/xpcs.c +++ b/osi/core/xpcs.c @@ -21,6 +21,7 @@ */ #include "xpcs.h" +#include "core_local.h" /** * @brief xpcs_poll_for_an_complete - Polling for AN complete. @@ -34,22 +35,22 @@ * @retval 0 on success * @retval -1 on failure. */ -static inline int xpcs_poll_for_an_complete(struct osi_core_priv_data *osi_core, - unsigned int *an_status) +static inline nve32_t xpcs_poll_for_an_complete(struct osi_core_priv_data *osi_core, + nveu32_t *an_status) { void *xpcs_base = osi_core->xpcs_base; - unsigned int status = 0; - unsigned int retry = 1000; - unsigned int count; - int cond = 1; - int ret = 0; + nveu32_t status = 0; + nveu32_t retry = 1000; + nveu32_t count; + nve32_t cond = 1; + nve32_t ret = 0; /* 14. Poll for AN complete */ cond = 1; count = 0; while (cond == 1) { if (count > retry) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "XPCS AN completion timed out\n", 0ULL); #ifdef HSI_SUPPORT if (osi_core->hsi.enabled == OSI_ENABLE) { @@ -59,7 +60,8 @@ static inline int xpcs_poll_for_an_complete(struct osi_core_priv_data *osi_core, osi_core->hsi.report_count_err[AUTONEG_ERR_IDX] = OSI_ENABLE; } #endif - return -1; + ret = -1; + goto fail; } count++; @@ -73,20 +75,22 @@ static inline int xpcs_poll_for_an_complete(struct osi_core_priv_data *osi_core, status &= ~XPCS_VR_MII_AN_INTR_STS_CL37_ANCMPLT_INTR; ret = xpcs_write_safety(osi_core, XPCS_VR_MII_AN_INTR_STS, status); if (ret != 0) { - return ret; + goto fail; } cond = 0; } } if ((status & XPCS_USXG_AN_STS_SPEED_MASK) == 0U) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "XPCS AN completed with zero speed\n", 0ULL); - return -1; + ret = -1; + goto fail; } *an_status = status; - return 0; +fail: + return ret; } /** @@ -100,11 +104,11 @@ static inline int xpcs_poll_for_an_complete(struct osi_core_priv_data *osi_core, * @retval 0 on success * @retval -1 on failure */ -static inline int xpcs_set_speed(struct osi_core_priv_data *osi_core, - unsigned int status) +static inline nve32_t xpcs_set_speed(struct osi_core_priv_data *osi_core, + nveu32_t status) { - unsigned int speed = status & XPCS_USXG_AN_STS_SPEED_MASK; - unsigned int ctrl = 0; + nveu32_t speed = status & XPCS_USXG_AN_STS_SPEED_MASK; + nveu32_t ctrl = 0; void *xpcs_base = osi_core->xpcs_base; ctrl = xpcs_read(xpcs_base, XPCS_SR_MII_CTRL); @@ -141,21 +145,21 @@ static inline int xpcs_set_speed(struct osi_core_priv_data *osi_core, * @retval 0 on success * @retval -1 on failure. */ -int xpcs_start(struct osi_core_priv_data *osi_core) +nve32_t xpcs_start(struct osi_core_priv_data *osi_core) { void *xpcs_base = osi_core->xpcs_base; - unsigned int an_status = 0; - unsigned int retry = RETRY_COUNT; - unsigned int count = 0; - unsigned int ctrl = 0; - int ret = 0; - int cond = COND_NOT_MET; + nveu32_t an_status = 0; + nveu32_t retry = RETRY_COUNT; + nveu32_t count = 0; + nveu32_t ctrl = 0; + nve32_t ret = 0; + nve32_t cond = COND_NOT_MET; if (osi_core->xpcs_base == OSI_NULL) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "XPCS base is NULL", 0ULL); - /* TODO: Remove this once silicon arrives */ - return 0; + ret = -1; + goto fail; } if ((osi_core->phy_iface_mode == OSI_USXGMII_MODE_10G) || @@ -164,16 +168,16 @@ int xpcs_start(struct osi_core_priv_data *osi_core) ctrl |= XPCS_SR_MII_CTRL_AN_ENABLE; ret = xpcs_write_safety(osi_core, XPCS_SR_MII_CTRL, ctrl); if (ret != 0) { - return ret; + goto fail; } ret = xpcs_poll_for_an_complete(osi_core, &an_status); if (ret < 0) { - return ret; + goto fail; } ret = xpcs_set_speed(osi_core, an_status); if (ret != 0) { - return ret; + goto fail; } /* USXGMII Rate Adaptor Reset before data transfer */ ctrl = xpcs_read(xpcs_base, XPCS_VR_XS_PCS_DIG_CTRL1); @@ -181,7 +185,8 @@ int xpcs_start(struct osi_core_priv_data *osi_core) xpcs_write(xpcs_base, XPCS_VR_XS_PCS_DIG_CTRL1, ctrl); while (cond == COND_NOT_MET) { if (count > retry) { - return -1; + ret = -1; + goto fail; } count++; @@ -200,7 +205,8 @@ int xpcs_start(struct osi_core_priv_data *osi_core) count = 0; while (cond == COND_NOT_MET) { if (count > retry) { - return -1; + ret = -1; + break; } count++; @@ -210,11 +216,16 @@ int xpcs_start(struct osi_core_priv_data *osi_core) XPCS_SR_XS_PCS_STS1_RLU) { cond = COND_MET; } else { - osi_core->osd_ops.udelay(1000U); + /* Maximum wait delay as per HW team is 1msec. + * So add a loop for 1000 iterations with 1usec delay, + * so that if check get satisfies before 1msec will come + * out of loop and it can save some boot time + */ + osi_core->osd_ops.udelay(1U); } } - - return 0; +fail: + return ret; } /** @@ -230,46 +241,50 @@ int xpcs_start(struct osi_core_priv_data *osi_core) * @retval -1 on failure. */ static nve32_t xpcs_uphy_lane_bring_up(struct osi_core_priv_data *osi_core, - unsigned int lane_init_en) + nveu32_t lane_init_en) { void *xpcs_base = osi_core->xpcs_base; - nveu32_t retry = XPCS_RETRY_COUNT; + nveu32_t retry = 5U; nve32_t cond = COND_NOT_MET; nveu32_t val = 0; nveu32_t count; + nve32_t ret = 0; val = osi_readla(osi_core, (nveu8_t *)xpcs_base + XPCS_WRAP_UPHY_STATUS); - if ((val & XPCS_WRAP_UPHY_STATUS_TX_P_UP_STATUS) == + if ((val & XPCS_WRAP_UPHY_STATUS_TX_P_UP_STATUS) != XPCS_WRAP_UPHY_STATUS_TX_P_UP_STATUS) { - /* return success if TX lane is already UP */ - return 0; - } - - val = osi_readla(osi_core, - (nveu8_t *)xpcs_base + XPCS_WRAP_UPHY_HW_INIT_CTRL); - val |= lane_init_en; - osi_writela(osi_core, val, - (nveu8_t *)xpcs_base + XPCS_WRAP_UPHY_HW_INIT_CTRL); - - count = 0; - while (cond == COND_NOT_MET) { - if (count > retry) { - return -1; - } - count++; - val = osi_readla(osi_core, - (nveu8_t *)xpcs_base + XPCS_WRAP_UPHY_HW_INIT_CTRL); - if ((val & lane_init_en) == OSI_NONE) { - /* exit loop */ - cond = COND_MET; - } else { - osi_core->osd_ops.udelay(500U); + (nveu8_t *)xpcs_base + XPCS_WRAP_UPHY_HW_INIT_CTRL); + val |= lane_init_en; + osi_writela(osi_core, val, + (nveu8_t *)xpcs_base + XPCS_WRAP_UPHY_HW_INIT_CTRL); + + count = 0; + while (cond == COND_NOT_MET) { + if (count > retry) { + ret = -1; + goto fail; + } + count++; + + val = osi_readla(osi_core, + (nveu8_t *)xpcs_base + XPCS_WRAP_UPHY_HW_INIT_CTRL); + if ((val & lane_init_en) == OSI_NONE) { + /* exit loop */ + cond = COND_MET; + } else { + /* Max wait time is 1usec. + * Most of the time loop got exited in first iteration. + * but added an extra count of 4 for safer side + */ + osi_core->osd_ops.udelay(1U); + } } } - return 0; +fail: + return ret; } /** @@ -285,15 +300,17 @@ static nve32_t xpcs_uphy_lane_bring_up(struct osi_core_priv_data *osi_core, static nve32_t xpcs_check_pcs_lock_status(struct osi_core_priv_data *osi_core) { void *xpcs_base = osi_core->xpcs_base; - nveu32_t retry = XPCS_RETRY_COUNT; + nveu32_t retry = RETRY_COUNT; nve32_t cond = COND_NOT_MET; nveu32_t val = 0; nveu32_t count; + nve32_t ret = 0; count = 0; while (cond == COND_NOT_MET) { if (count > retry) { - return -1; + ret = -1; + goto fail; } count++; @@ -304,14 +321,19 @@ static nve32_t xpcs_check_pcs_lock_status(struct osi_core_priv_data *osi_core) /* exit loop */ cond = COND_MET; } else { - osi_core->osd_ops.udelay(500U); + /* Maximum wait delay as per HW team is 1msec. + * So add a loop for 1000 iterations with 1usec delay, + * so that if check get satisfies before 1msec will come + * out of loop and it can save some boot time + */ + osi_core->osd_ops.udelay(1U); } } /* Clear the status */ osi_writela(osi_core, val, (nveu8_t *)xpcs_base + XPCS_WRAP_IRQ_STATUS); - - return 0; +fail: + return ret; } /** @@ -327,16 +349,19 @@ static nve32_t xpcs_check_pcs_lock_status(struct osi_core_priv_data *osi_core) */ static nve32_t xpcs_lane_bring_up(struct osi_core_priv_data *osi_core) { - unsigned int retry = 1000; - unsigned int count; + struct core_local *l_core = (struct core_local *)(void *)osi_core; + nveu32_t retry = 7U; + nveu32_t count; nveu32_t val = 0; - int cond; + nve32_t cond; + nve32_t ret = 0; if (xpcs_uphy_lane_bring_up(osi_core, XPCS_WRAP_UPHY_HW_INIT_CTRL_TX_EN) < 0) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "UPHY TX lane bring-up failed\n", 0ULL); - return -1; + ret = -1; + goto fail; } val = osi_readla(osi_core, @@ -389,7 +414,8 @@ static nve32_t xpcs_lane_bring_up(struct osi_core_priv_data *osi_core) count = 0; while (cond == COND_NOT_MET) { if (count > retry) { - return -1; + ret = -1; + goto fail; } count++; @@ -397,10 +423,17 @@ static nve32_t xpcs_lane_bring_up(struct osi_core_priv_data *osi_core) val = osi_readla(osi_core, (nveu8_t *)osi_core->xpcs_base + XPCS_WRAP_UPHY_RX_CONTROL_0_0); - if ((val & XPCS_WRAP_UPHY_RX_CONTROL_0_0_RX_CAL_EN) == 0) { + if ((val & XPCS_WRAP_UPHY_RX_CONTROL_0_0_RX_CAL_EN) == 0U) { cond = COND_MET; } else { - osi_core->osd_ops.udelay(1000U); + /* Maximum wait delay as per HW team is 100 usec. + * But most of the time as per experiments it takes + * around 14usec to satisy the condition, so add a + * minimum delay of 14usec and loop it for 7times. + * With this 14usec delay condition gets satifies + * in first iteration itself. + */ + osi_core->osd_ops.udelay(14U); } } @@ -433,12 +466,20 @@ static nve32_t xpcs_lane_bring_up(struct osi_core_priv_data *osi_core) XPCS_WRAP_UPHY_RX_CONTROL_0_0); if (xpcs_check_pcs_lock_status(osi_core) < 0) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL, - "Failed to get PCS block lock\n", 0ULL); - return -1; + if (l_core->lane_status == OSI_ENABLE) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, + "Failed to get PCS block lock\n", 0ULL); + l_core->lane_status = OSI_DISABLE; + } + ret = -1; + goto fail; + } else { + OSI_CORE_INFO(osi_core->osd, OSI_LOG_ARG_HW_FAIL, + "PCS block lock SUCCESS\n", 0ULL); + l_core->lane_status = OSI_ENABLE; } - - return 0; +fail: + return ret; } /** @@ -451,28 +492,25 @@ static nve32_t xpcs_lane_bring_up(struct osi_core_priv_data *osi_core) * @retval 0 on success * @retval -1 on failure. */ -int xpcs_init(struct osi_core_priv_data *osi_core) +nve32_t xpcs_init(struct osi_core_priv_data *osi_core) { void *xpcs_base = osi_core->xpcs_base; - unsigned int retry = 1000; - unsigned int count; - unsigned int ctrl = 0; - int cond = 1; - int ret = 0; + nveu32_t retry = 1000; + nveu32_t count; + nveu32_t ctrl = 0; + nve32_t cond = 1; + nve32_t ret = 0; if (osi_core->xpcs_base == OSI_NULL) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "XPCS base is NULL", 0ULL); - /* TODO: Remove this once silicon arrives */ - return 0; + ret = -1; + goto fail; } - if (osi_core->pre_si != OSI_ENABLE) { - if (xpcs_lane_bring_up(osi_core) < 0) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL, - "TX/RX lane bring-up failed\n", 0ULL); - return -1; - } + if (xpcs_lane_bring_up(osi_core) < 0) { + ret = -1; + goto fail; } /* Switching to USXGMII Mode based on @@ -484,7 +522,7 @@ int xpcs_init(struct osi_core_priv_data *osi_core) ctrl |= XPCS_SR_XS_PCS_CTRL2_PCS_TYPE_SEL_BASE_R; ret = xpcs_write_safety(osi_core, XPCS_SR_XS_PCS_CTRL2, ctrl); if (ret != 0) { - return ret; + goto fail; } /* 2. enable USXGMII Mode inside DWC_xpcs */ @@ -501,7 +539,7 @@ int xpcs_init(struct osi_core_priv_data *osi_core) ret = xpcs_write_safety(osi_core, XPCS_VR_XS_PCS_KR_CTRL, ctrl); if (ret != 0) { - return ret; + goto fail; } /* 4. Program PHY to operate at 10Gbps/5Gbps/2Gbps * this step not required since PHY speed programming @@ -512,7 +550,7 @@ int xpcs_init(struct osi_core_priv_data *osi_core) ctrl |= XPCS_VR_XS_PCS_DIG_CTRL1_USXG_EN; ret = xpcs_write_safety(osi_core, XPCS_VR_XS_PCS_DIG_CTRL1, ctrl); if (ret != 0) { - return ret; + goto fail; } /* XPCS_VR_XS_PCS_DIG_CTRL1_VR_RST bit is self clearing @@ -528,7 +566,8 @@ int xpcs_init(struct osi_core_priv_data *osi_core) count = 0; while (cond == 1) { if (count > retry) { - return -1; + ret = -1; + goto fail; } count++; @@ -551,13 +590,13 @@ int xpcs_init(struct osi_core_priv_data *osi_core) ctrl &= ~XPCS_SR_AN_CTRL_AN_EN; ret = xpcs_write_safety(osi_core, XPCS_SR_AN_CTRL, ctrl); if (ret != 0) { - return ret; + goto fail; } ctrl = xpcs_read(xpcs_base, XPCS_VR_XS_PCS_DIG_CTRL1); ctrl |= XPCS_VR_XS_PCS_DIG_CTRL1_CL37_BP; ret = xpcs_write_safety(osi_core, XPCS_VR_XS_PCS_DIG_CTRL1, ctrl); if (ret != 0) { - return ret; + goto fail; } } @@ -569,10 +608,11 @@ int xpcs_init(struct osi_core_priv_data *osi_core) /* 11. XPCS configured as MAC-side USGMII - NA */ /* 13. TODO: If there is interrupt enabled for AN interrupt */ - - return 0; +fail: + return ret; } +#ifndef OSI_STRIPPED_LIB /** * @brief xpcs_eee - XPCS enable/disable EEE * @@ -585,54 +625,55 @@ int xpcs_init(struct osi_core_priv_data *osi_core) * @retval 0 on success * @retval -1 on failure. */ -int xpcs_eee(struct osi_core_priv_data *osi_core, unsigned int en_dis) +nve32_t xpcs_eee(struct osi_core_priv_data *osi_core, nveu32_t en_dis) { void *xpcs_base = osi_core->xpcs_base; - unsigned int val = 0x0U; - int ret = 0; + nveu32_t val = 0x0U; + nve32_t ret = 0; - if (en_dis != OSI_ENABLE && en_dis != OSI_DISABLE) { - return -1; + if ((en_dis != OSI_ENABLE) && (en_dis != OSI_DISABLE)) { + ret = -1; + goto fail; } - if (xpcs_base == OSI_NULL) - return -1; + if (xpcs_base == OSI_NULL) { + ret = -1; + goto fail; + } if (en_dis == OSI_DISABLE) { val = xpcs_read(xpcs_base, XPCS_VR_XS_PCS_EEE_MCTRL0); val &= ~XPCS_VR_XS_PCS_EEE_MCTRL0_LTX_EN; val &= ~XPCS_VR_XS_PCS_EEE_MCTRL0_LRX_EN; ret = xpcs_write_safety(osi_core, XPCS_VR_XS_PCS_EEE_MCTRL0, val); + } else { + + /* 1. Check if DWC_xpcs supports the EEE feature by + * reading the SR_XS_PCS_EEE_ABL register + * 1000BASEX-Only is different config then else so can (skip) + */ + + /* 2. Program various timers used in the EEE mode depending on the + * clk_eee_i clock frequency. default times are same as IEEE std + * clk_eee_i() is 102MHz. MULT_FACT_100NS = 9 because 9.8ns*10 = 98 + * which is between 80 and 120 this leads to default setting match + */ + + val = xpcs_read(xpcs_base, XPCS_VR_XS_PCS_EEE_MCTRL0); + /* 3. If FEC is enabled in the KR mode (skip in FPGA)*/ + /* 4. enable the EEE feature on the Tx path and Rx path */ + val |= (XPCS_VR_XS_PCS_EEE_MCTRL0_LTX_EN | + XPCS_VR_XS_PCS_EEE_MCTRL0_LRX_EN); + ret = xpcs_write_safety(osi_core, XPCS_VR_XS_PCS_EEE_MCTRL0, val); if (ret != 0) { - return ret; + goto fail; } - return 0; + /* Transparent Tx LPI Mode Enable */ + val = xpcs_read(xpcs_base, XPCS_VR_XS_PCS_EEE_MCTRL1); + val |= XPCS_VR_XS_PCS_EEE_MCTRL1_TRN_LPI; + ret = xpcs_write_safety(osi_core, XPCS_VR_XS_PCS_EEE_MCTRL1, val); } - - /* 1. Check if DWC_xpcs supports the EEE feature by - * reading the SR_XS_PCS_EEE_ABL register - * 1000BASEX-Only is different config then else so can (skip) */ - - /* 2. Program various timers used in the EEE mode depending on the - * clk_eee_i clock frequency. default times are same as IEEE std - * clk_eee_i() is 102MHz. MULT_FACT_100NS = 9 because 9.8ns*10 = 98 - * which is between 80 and 120 this leads to default setting match */ - - val = xpcs_read(xpcs_base, XPCS_VR_XS_PCS_EEE_MCTRL0); - /* 3. If FEC is enabled in the KR mode (skip in FPGA)*/ - /* 4. enable the EEE feature on the Tx path and Rx path */ - val |= (XPCS_VR_XS_PCS_EEE_MCTRL0_LTX_EN | - XPCS_VR_XS_PCS_EEE_MCTRL0_LRX_EN); - ret = xpcs_write_safety(osi_core, XPCS_VR_XS_PCS_EEE_MCTRL0, val); - if (ret != 0) { - return ret; - } - /* Transparent Tx LPI Mode Enable */ - val = xpcs_read(xpcs_base, XPCS_VR_XS_PCS_EEE_MCTRL1); - val |= XPCS_VR_XS_PCS_EEE_MCTRL1_TRN_LPI; - ret = xpcs_write_safety(osi_core, XPCS_VR_XS_PCS_EEE_MCTRL1, val); - if (ret != 0) { - return ret; - } - return 0; +fail: + return ret; } +#endif /* !OSI_STRIPPED_LIB */ diff --git a/osi/core/xpcs.h b/osi/core/xpcs.h index 070e441..be788e3 100644 --- a/osi/core/xpcs.h +++ b/osi/core/xpcs.h @@ -26,15 +26,6 @@ #include "../osi/common/common.h" #include -/** - * @addtogroup XPCS helper macros - * - * @brief XPCS helper macros. - * @{ - */ -#define XPCS_RETRY_COUNT (RETRY_COUNT * (2U)) -/** @} */ - /** * @addtogroup XPCS Register offsets * @@ -42,24 +33,27 @@ * @{ */ #define XPCS_ADDRESS 0x03FC -#define XPCS_SR_XS_PCS_CTRL1 0xC0000 #define XPCS_SR_XS_PCS_STS1 0xC0004 #define XPCS_SR_XS_PCS_CTRL2 0xC001C -#define XPCS_SR_XS_PCS_EEE_ABL 0xC0050 -#define XPCS_SR_XS_PCS_EEE_ABL2 0xC0054 #define XPCS_VR_XS_PCS_DIG_CTRL1 0xE0000 #define XPCS_VR_XS_PCS_KR_CTRL 0xE001C #define XPCS_SR_AN_CTRL 0x1C0000 #define XPCS_SR_MII_CTRL 0x7C0000 #define XPCS_VR_MII_AN_INTR_STS 0x7E0008 -#define XPCS_VR_XS_PCS_EEE_MCTRL0 0xE0018 -#define XPCS_VR_XS_PCS_EEE_MCTRL1 0xE002C #define XPCS_WRAP_UPHY_HW_INIT_CTRL 0x8020 #define XPCS_WRAP_UPHY_STATUS 0x8044 #define XPCS_WRAP_IRQ_STATUS 0x8050 #define XPCS_WRAP_UPHY_RX_CONTROL_0_0 0x801C /** @} */ +#ifndef OSI_STRIPPED_LIB +#define XPCS_VR_XS_PCS_EEE_MCTRL0 0xE0018 +#define XPCS_VR_XS_PCS_EEE_MCTRL1 0xE002C + +#define XPCS_VR_XS_PCS_EEE_MCTRL1_TRN_LPI OSI_BIT(0) +#define XPCS_VR_XS_PCS_EEE_MCTRL0_LTX_EN OSI_BIT(0) +#define XPCS_VR_XS_PCS_EEE_MCTRL0_LRX_EN OSI_BIT(1) +#endif /* !OSI_STRIPPED_LIB */ /** * @addtogroup XPCS-BIT Register bit fileds @@ -67,16 +61,12 @@ * @brief XPCS register bit fields * @{ */ -#define XPCS_SR_XS_PCS_CTRL1_RST OSI_BIT(15) #define XPCS_SR_XS_PCS_CTRL2_PCS_TYPE_SEL_BASE_R 0x0U #define XPCS_SR_XS_PCS_STS1_RLU OSI_BIT(2) #define XPCS_VR_XS_PCS_DIG_CTRL1_USXG_EN OSI_BIT(9) #define XPCS_VR_XS_PCS_DIG_CTRL1_VR_RST OSI_BIT(15) #define XPCS_VR_XS_PCS_DIG_CTRL1_USRA_RST OSI_BIT(10) #define XPCS_VR_XS_PCS_DIG_CTRL1_CL37_BP OSI_BIT(12) -#define XPCS_VR_XS_PCS_EEE_MCTRL1_TRN_LPI OSI_BIT(0) -#define XPCS_VR_XS_PCS_EEE_MCTRL0_LTX_EN OSI_BIT(0) -#define XPCS_VR_XS_PCS_EEE_MCTRL0_LRX_EN OSI_BIT(1) #define XPCS_SR_AN_CTRL_AN_EN OSI_BIT(12) #define XPCS_SR_MII_CTRL_AN_ENABLE OSI_BIT(12) #define XPCS_VR_MII_AN_INTR_STS_CL37_ANCMPLT_INTR OSI_BIT(0) @@ -95,7 +85,6 @@ OSI_BIT(10)) #define XPCS_VR_XS_PCS_KR_CTRL_USXG_MODE_5G OSI_BIT(10) #define XPCS_WRAP_UPHY_HW_INIT_CTRL_TX_EN OSI_BIT(0) -#define XPCS_WRAP_UPHY_HW_INIT_CTRL_RX_EN OSI_BIT(2) #define XPCS_WRAP_IRQ_STATUS_PCS_LINK_STS OSI_BIT(6) #define XPCS_WRAP_UPHY_RX_CONTROL_0_0_RX_DATA_EN OSI_BIT(0) #define XPCS_WRAP_UPHY_RX_CONTROL_0_0_RX_IDDQ OSI_BIT(4) @@ -114,20 +103,19 @@ #define XPCS_CORE_CORRECTABLE_ERR OSI_BIT(10) #define XPCS_CORE_UNCORRECTABLE_ERR OSI_BIT(9) #define XPCS_REGISTER_PARITY_ERR OSI_BIT(8) -#define XPCS_BASE_PMA_MMD_SR_PMA_KR_FEC_CTRL 0x402AC -#define EN_ERR_IND OSI_BIT(1) -#define FEC_EN OSI_BIT(0) #define XPCS_VR_XS_PCS_SFTY_UE_INTR0 0xE03C0 #define XPCS_VR_XS_PCS_SFTY_CE_INTR 0xE03C8 #define XPCS_VR_XS_PCS_SFTY_TMR_CTRL 0xE03D4 -#define XPCS_SFTY_1US_MULT_MASK 0xFF +#define XPCS_SFTY_1US_MULT_MASK 0xFFU #define XPCS_SFTY_1US_MULT_SHIFT 0U #endif /** @} */ -int xpcs_init(struct osi_core_priv_data *osi_core); -int xpcs_start(struct osi_core_priv_data *osi_core); -int xpcs_eee(struct osi_core_priv_data *osi_core, unsigned int en_dis); +nve32_t xpcs_init(struct osi_core_priv_data *osi_core); +nve32_t xpcs_start(struct osi_core_priv_data *osi_core); +#ifndef OSI_STRIPPED_LIB +nve32_t xpcs_eee(struct osi_core_priv_data *osi_core, nveu32_t en_dis); +#endif /* !OSI_STRIPPED_LIB */ /** * @brief xpcs_read - read from xpcs. @@ -139,11 +127,11 @@ int xpcs_eee(struct osi_core_priv_data *osi_core, unsigned int en_dis); * * @retval value read from xpcs register. */ -static inline unsigned int xpcs_read(void *xpcs_base, unsigned int reg_addr) +static inline nveu32_t xpcs_read(void *xpcs_base, nveu32_t reg_addr) { osi_writel(((reg_addr >> XPCS_REG_ADDR_SHIFT) & XPCS_REG_ADDR_MASK), - ((unsigned char *)xpcs_base + XPCS_ADDRESS)); - return osi_readl((unsigned char *)xpcs_base + + ((nveu8_t *)xpcs_base + XPCS_ADDRESS)); + return osi_readl((nveu8_t *)xpcs_base + ((reg_addr) & XPCS_REG_VALUE_MASK)); } @@ -156,12 +144,12 @@ static inline unsigned int xpcs_read(void *xpcs_base, unsigned int reg_addr) * @param[in] reg_addr: register address for writing * @param[in] val: write value to register address */ -static inline void xpcs_write(void *xpcs_base, unsigned int reg_addr, - unsigned int val) +static inline void xpcs_write(void *xpcs_base, nveu32_t reg_addr, + nveu32_t val) { osi_writel(((reg_addr >> XPCS_REG_ADDR_SHIFT) & XPCS_REG_ADDR_MASK), - ((unsigned char *)xpcs_base + XPCS_ADDRESS)); - osi_writel(val, (unsigned char *)xpcs_base + + ((nveu8_t *)xpcs_base + XPCS_ADDRESS)); + osi_writel(val, (nveu8_t *)xpcs_base + (((reg_addr) & XPCS_REG_VALUE_MASK))); } @@ -176,28 +164,33 @@ static inline void xpcs_write(void *xpcs_base, unsigned int reg_addr, * @param[in] val: write value to register address * * @retval 0 on success - * @retval -1 on failure. + * @retval XPCS_WRITE_FAIL_CODE on failure * */ -static inline int xpcs_write_safety(struct osi_core_priv_data *osi_core, - unsigned int reg_addr, - unsigned int val) +static inline nve32_t xpcs_write_safety(struct osi_core_priv_data *osi_core, + nveu32_t reg_addr, + nveu32_t val) { void *xpcs_base = osi_core->xpcs_base; - unsigned int read_val; - int retry = 10; + nveu32_t read_val; + nve32_t retry = 10; + nve32_t ret = XPCS_WRITE_FAIL_CODE; while (--retry > 0) { xpcs_write(xpcs_base, reg_addr, val); read_val = xpcs_read(xpcs_base, reg_addr); if (val == read_val) { - return 0; + ret = 0; + break; } osi_core->osd_ops.udelay(OSI_DELAY_1US); } - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL, - "xpcs_write_safety failed", reg_addr); - return -1; + if (ret != 0) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, + "xpcs_write_safety failed", reg_addr); + } + + return ret; } #endif diff --git a/osi/dma/Makefile.interface.tmk b/osi/dma/Makefile.interface.tmk index c12901e..5df87ee 100644 --- a/osi/dma/Makefile.interface.tmk +++ b/osi/dma/Makefile.interface.tmk @@ -26,7 +26,11 @@ ifdef NV_INTERFACE_FLAG_SHARED_LIBRARY_SECTION NV_INTERFACE_NAME := nvethernetcl +ifeq ($(NV_BUILD_CONFIGURATION_IS_SAFETY), 0) NV_INTERFACE_EXPORTS := lib$(NV_INTERFACE_NAME) +else +NV_INTERFACE_EXPORTS := lib$(NV_INTERFACE_NAME)_safety +endif NV_INTERFACE_PUBLIC_INCLUDES := \ ./include endif diff --git a/osi/dma/Makefile.tmk b/osi/dma/Makefile.tmk index 7e1e52d..fbb585a 100644 --- a/osi/dma/Makefile.tmk +++ b/osi/dma/Makefile.tmk @@ -30,13 +30,10 @@ NV_COMPONENT_STRICT_WARNINGS_qnx_64 := 1 NV_COMPONENT_NAME := nvethernetcl NV_COMPONENT_OWN_INTERFACE_DIR := . NV_COMPONENT_SOURCES := \ - eqos_dma.c \ - osi_dma.c \ - osi_dma_txrx.c \ - mgbe_dma.c \ - eqos_desc.c \ - mgbe_desc.c \ - debug.c \ + $(NV_SOURCE)/nvethernetrm/osi/dma/osi_dma.c \ + $(NV_SOURCE)/nvethernetrm/osi/dma/osi_dma_txrx.c \ + $(NV_SOURCE)/nvethernetrm/osi/dma/eqos_desc.c \ + $(NV_SOURCE)/nvethernetrm/osi/dma/mgbe_desc.c \ $(NV_SOURCE)/nvethernetrm/osi/common/osi_common.c \ $(NV_SOURCE)/nvethernetrm/osi/common/eqos_common.c \ $(NV_SOURCE)/nvethernetrm/osi/common/mgbe_common.c @@ -45,10 +42,17 @@ NV_COMPONENT_INCLUDES := \ $(NV_SOURCE)/nvethernetrm/include \ $(NV_SOURCE)/nvethernetrm/osi/common/include -ifeq ($(NV_BUILD_CONFIGURATION_IS_SAFETY),0) -NV_COMPONENT_CFLAGS += -DOSI_DEBUG +include $(NV_SOURCE)/nvethernetrm/include/config.tmk + +ifeq ($(OSI_DEBUG),1) +NV_COMPONENT_SOURCES += $(NV_SOURCE)/nvethernetrm/osi/dma/debug.c endif +ifeq ($(OSI_STRIPPED_LIB),0) +NV_COMPONENT_SOURCES += \ + $(NV_SOURCE)/nvethernetrm/osi/dma/mgbe_dma.c \ + $(NV_SOURCE)/nvethernetrm/osi/dma/eqos_dma.c +endif include $(NV_BUILD_SHARED_LIBRARY) endif diff --git a/osi/dma/debug.c b/osi/dma/debug.c index 3ccb451..fc14ef7 100644 --- a/osi/dma/debug.c +++ b/osi/dma/debug.c @@ -35,7 +35,7 @@ static void dump_struct(struct osi_dma_priv_data *osi_dma, unsigned char *ptr, unsigned long size) { - nveu32_t i = 0, rem, j; + nveu32_t i = 0, rem, j = 0; unsigned long temp; if (ptr == OSI_NULL) { @@ -129,7 +129,9 @@ void reg_dump(struct osi_dma_priv_data *osi_dma) max_addr = 0x14EC; break; case OSI_MGBE_MAC_3_10: +#ifndef OSI_STRIPPED_LIB case OSI_MGBE_MAC_4_00: +#endif addr = 0x3100; max_addr = 0x35FC; break; @@ -205,9 +207,9 @@ static void tx_desc_dump(struct osi_dma_priv_data *osi_dma, unsigned int f_idx, int cnt; if (f_idx > l_idx) { - cnt = l_idx + osi_dma->tx_ring_sz - f_idx; + cnt = (int)(l_idx + osi_dma->tx_ring_sz - f_idx); } else { - cnt = l_idx - f_idx; + cnt = (int)(l_idx - f_idx); } for (i = f_idx; cnt >= 0; cnt--) { @@ -250,6 +252,8 @@ void desc_dump(struct osi_dma_priv_data *osi_dma, unsigned int f_idx, rx_desc_dump(osi_dma, f_idx, chan); break; default: + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, + "Invalid desc dump flag\n", 0ULL); break; } } diff --git a/osi/dma/dma_local.h b/osi/dma/dma_local.h index 465fd03..b73d6af 100644 --- a/osi/dma/dma_local.h +++ b/osi/dma/dma_local.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -24,8 +24,10 @@ #ifndef INCLUDED_DMA_LOCAL_H #define INCLUDED_DMA_LOCAL_H +#include "../osi/common/common.h" #include #include "eqos_dma.h" +#include "mgbe_dma.h" /** * @brief Maximum number of OSI DMA instances. @@ -46,56 +48,17 @@ * @brief MAC DMA Channel operations */ struct dma_chan_ops { - /** Called to set Transmit Ring length */ - void (*set_tx_ring_len)(struct osi_dma_priv_data *osi_dma, - nveu32_t chan, - nveu32_t len); - /** Called to set Transmit Ring Base address */ - void (*set_tx_ring_start_addr)(void *addr, nveu32_t chan, - nveu64_t base_addr); - /** Called to update Tx Ring tail pointer */ - void (*update_tx_tailptr)(void *addr, nveu32_t chan, - nveu64_t tailptr); - /** Called to set Receive channel ring length */ - void (*set_rx_ring_len)(struct osi_dma_priv_data *osi_dma, - nveu32_t chan, - nveu32_t len); - /** Called to set receive channel ring base address */ - void (*set_rx_ring_start_addr)(void *addr, nveu32_t chan, - nveu64_t base_addr); - /** Called to update Rx ring tail pointer */ - void (*update_rx_tailptr)(void *addr, nveu32_t chan, - nveu64_t tailptr); - /** Called to disable DMA Tx channel interrupts at wrapper level */ - void (*disable_chan_tx_intr)(void *addr, nveu32_t chan); - /** Called to enable DMA Tx channel interrupts at wrapper level */ - void (*enable_chan_tx_intr)(void *addr, nveu32_t chan); - /** Called to disable DMA Rx channel interrupts at wrapper level */ - void (*disable_chan_rx_intr)(void *addr, nveu32_t chan); - /** Called to enable DMA Rx channel interrupts at wrapper level */ - void (*enable_chan_rx_intr)(void *addr, nveu32_t chan); - /** Called to start the Tx/Rx DMA */ - void (*start_dma)(struct osi_dma_priv_data *osi_dma, nveu32_t chan); - /** Called to stop the Tx/Rx DMA */ - void (*stop_dma)(struct osi_dma_priv_data *osi_dma, nveu32_t chan); - /** Called to initialize the DMA channel */ - nve32_t (*init_dma_channel)(struct osi_dma_priv_data *osi_dma); - /** Called to set Rx buffer length */ - void (*set_rx_buf_len)(struct osi_dma_priv_data *osi_dma); #ifndef OSI_STRIPPED_LIB - /** Called periodically to read and validate safety critical - * registers against last written value */ - nve32_t (*validate_regs)(struct osi_dma_priv_data *osi_dma); /** Called to configure the DMA channel slot function */ void (*config_slot)(struct osi_dma_priv_data *osi_dma, nveu32_t chan, nveu32_t set, nveu32_t interval); #endif /* !OSI_STRIPPED_LIB */ - /** Called to clear VM Tx interrupt */ - void (*clear_vm_tx_intr)(void *addr, nveu32_t chan); - /** Called to clear VM Rx interrupt */ - void (*clear_vm_rx_intr)(void *addr, nveu32_t chan); +#ifdef OSI_DEBUG + /** Called to enable/disable debug interrupt */ + void (*debug_intr_config)(struct osi_dma_priv_data *osi_dma); +#endif }; /** @@ -103,8 +66,9 @@ struct dma_chan_ops { */ struct desc_ops { /** Called to get receive checksum */ - void (*get_rx_csum)(struct osi_rx_desc *rx_desc, + void (*get_rx_csum)(const struct osi_rx_desc *const rx_desc, struct osi_rx_pkt_cx *rx_pkt_cx); +#ifndef OSI_STRIPPED_LIB /** Called to get rx error stats */ void (*update_rx_err_stats)(struct osi_rx_desc *rx_desc, struct osi_pkt_err_stats *stats); @@ -114,11 +78,12 @@ struct desc_ops { /** Called to get rx HASH from descriptor */ void (*get_rx_hash)(struct osi_rx_desc *rx_desc, struct osi_rx_pkt_cx *rx_pkt_cx); +#endif /* !OSI_STRIPPED_LIB */ /** Called to get RX hw timestamp */ - int (*get_rx_hwstamp)(struct osi_dma_priv_data *osi_dma, - struct osi_rx_desc *rx_desc, - struct osi_rx_desc *context_desc, - struct osi_rx_pkt_cx *rx_pkt_cx); + nve32_t (*get_rx_hwstamp)(const struct osi_dma_priv_data *const osi_dma, + const struct osi_rx_desc *const rx_desc, + const struct osi_rx_desc *const context_desc, + struct osi_rx_pkt_cx *rx_pkt_cx); }; /** @@ -139,14 +104,15 @@ struct dma_local { nveu32_t init_done; /** Holds the MAC version of MAC controller */ nveu32_t mac_ver; - /** Represents whether DMA interrupts are VM or Non-VM */ - nveu32_t vm_intr; /** Magic number to validate osi_dma pointer */ nveu64_t magic_num; /** Maximum number of DMA channels */ - nveu32_t max_chans; + nveu32_t num_max_chans; + /** Exact MAC used across SOCs 0:Legacy EQOS, 1:Orin EQOS, 2:Orin MGBE */ + nveu32_t l_mac_ver; }; +#ifndef OSI_STRIPPED_LIB /** * @brief eqos_init_dma_chan_ops - Initialize eqos DMA operations. * @@ -172,18 +138,19 @@ void eqos_init_dma_chan_ops(struct dma_chan_ops *ops); * - De-initialization: No */ void mgbe_init_dma_chan_ops(struct dma_chan_ops *ops); +#endif /* !OSI_STRIPPED_LIB */ /** * @brief eqos_get_desc_ops - EQOS init DMA descriptor operations */ -void eqos_init_desc_ops(struct desc_ops *d_ops); +void eqos_init_desc_ops(struct desc_ops *p_dops); /** * @brief mgbe_get_desc_ops - MGBE init DMA descriptor operations */ -void mgbe_init_desc_ops(struct desc_ops *d_ops); +void mgbe_init_desc_ops(struct desc_ops *p_dops); -nve32_t init_desc_ops(struct osi_dma_priv_data *osi_dma); +nve32_t init_desc_ops(const struct osi_dma_priv_data *const osi_dma); /** * @brief osi_hw_transmit - Initialize Tx DMA descriptors for a channel @@ -196,8 +163,7 @@ nve32_t init_desc_ops(struct osi_dma_priv_data *osi_dma); * * @param[in, out] osi_dma: OSI DMA private data. * @param[in] tx_ring: DMA Tx ring. - * @param[in] ops: DMA channel operations. - * @param[in] chan: DMA Tx channel number. Max OSI_EQOS_MAX_NUM_CHANS. + * @param[in] dma_chan: DMA Tx channel number. Max OSI_EQOS_MAX_NUM_CHANS. * * @note * API Group: @@ -207,8 +173,7 @@ nve32_t init_desc_ops(struct osi_dma_priv_data *osi_dma); */ nve32_t hw_transmit(struct osi_dma_priv_data *osi_dma, struct osi_tx_ring *tx_ring, - struct dma_chan_ops *ops, - nveu32_t chan); + nveu32_t dma_chan); /* Function prototype needed for misra */ @@ -232,41 +197,36 @@ nve32_t hw_transmit(struct osi_dma_priv_data *osi_dma, * @retval 0 on success * @retval -1 on failure. */ -nve32_t dma_desc_init(struct osi_dma_priv_data *osi_dma, - struct dma_chan_ops *ops); +nve32_t dma_desc_init(struct osi_dma_priv_data *osi_dma); static inline nveu32_t is_power_of_two(nveu32_t num) { + nveu32_t ret = OSI_DISABLE; + if ((num > 0U) && ((num & (num - 1U)) == 0U)) { - return OSI_ENABLE; + ret = OSI_ENABLE; } - return OSI_DISABLE; + return ret; } -/** - * @addtogroup Helper Helper MACROS - * - * @brief EQOS generic helper MACROS. - * @{ - */ -#define CHECK_CHAN_BOUND(chan) \ - { \ - if ((chan) >= OSI_EQOS_MAX_NUM_CHANS) { \ - return; \ - } \ - } - -#define MGBE_CHECK_CHAN_BOUND(chan) \ -{ \ - if ((chan) >= OSI_MGBE_MAX_NUM_CHANS) { \ - return; \ - } \ -} \ - #define BOOLEAN_FALSE (0U != 0U) #define L32(data) ((nveu32_t)((data) & 0xFFFFFFFFU)) #define H32(data) ((nveu32_t)(((data) & 0xFFFFFFFF00000000UL) >> 32UL)) + +static inline void update_rx_tail_ptr(const struct osi_dma_priv_data *const osi_dma, + nveu32_t dma_chan, + nveu64_t tailptr) +{ + nveu32_t chan = dma_chan & 0xFU; + const nveu32_t tail_ptr_reg[2] = { + EQOS_DMA_CHX_RDTP(chan), + MGBE_DMA_CHX_RDTLP(chan) + }; + + osi_writel(L32(tailptr), (nveu8_t *)osi_dma->base + tail_ptr_reg[osi_dma->mac]); +} + /** @} */ #endif /* INCLUDED_DMA_LOCAL_H */ diff --git a/osi/dma/eqos_desc.c b/osi/dma/eqos_desc.c index f45b200..389ac72 100644 --- a/osi/dma/eqos_desc.c +++ b/osi/dma/eqos_desc.c @@ -23,6 +23,7 @@ #include "dma_local.h" #include "hw_desc.h" +#ifndef OSI_STRIPPED_LIB /** * @brief eqos_get_rx_vlan - Get Rx VLAN from descriptor * @@ -77,6 +78,22 @@ static inline void eqos_update_rx_err_stats(struct osi_rx_desc *rx_desc, } } +/** + * @brief eqos_get_rx_hash - Get Rx packet hash from descriptor if valid + * + * Algorithm: This routine will be invoked by OSI layer itself to get received + * packet Hash from descriptor if RSS hash is valid and it also sets the type + * of RSS hash. + * + * @param[in] rx_desc: Rx Descriptor. + * @param[in] rx_pkt_cx: Per-Rx packet context structure + */ +static void eqos_get_rx_hash(OSI_UNUSED struct osi_rx_desc *rx_desc, + OSI_UNUSED struct osi_rx_pkt_cx *rx_pkt_cx) +{ +} +#endif /* !OSI_STRIPPED_LIB */ + /** * @brief eqos_get_rx_csum - Get the Rx checksum from descriptor if valid * @@ -98,7 +115,7 @@ static inline void eqos_update_rx_err_stats(struct osi_rx_desc *rx_desc, * @param[in, out] rx_desc: Rx descriptor * @param[in, out] rx_pkt_cx: Per-Rx packet context structure */ -static void eqos_get_rx_csum(struct osi_rx_desc *rx_desc, +static void eqos_get_rx_csum(const struct osi_rx_desc *const rx_desc, struct osi_rx_pkt_cx *rx_pkt_cx) { nveu32_t pkt_type; @@ -108,66 +125,49 @@ static void eqos_get_rx_csum(struct osi_rx_desc *rx_desc, * Set none/unnecessary bit as well for other OS to check and * take proper actions. */ - if ((rx_desc->rdes3 & RDES3_RS1V) != RDES3_RS1V) { - return; - } - - if ((rx_desc->rdes1 & - (RDES1_IPCE | RDES1_IPCB | RDES1_IPHE)) == OSI_DISABLE) { - rx_pkt_cx->rxcsum |= OSI_CHECKSUM_UNNECESSARY; - } - - if ((rx_desc->rdes1 & RDES1_IPCB) != OSI_DISABLE) { - return; - } - - rx_pkt_cx->rxcsum |= OSI_CHECKSUM_IPv4; - if ((rx_desc->rdes1 & RDES1_IPHE) == RDES1_IPHE) { - rx_pkt_cx->rxcsum |= OSI_CHECKSUM_IPv4_BAD; - } - - pkt_type = rx_desc->rdes1 & RDES1_PT_MASK; - if ((rx_desc->rdes1 & RDES1_IPV4) == RDES1_IPV4) { - if (pkt_type == RDES1_PT_UDP) { - rx_pkt_cx->rxcsum |= OSI_CHECKSUM_UDPv4; - } else if (pkt_type == RDES1_PT_TCP) { - rx_pkt_cx->rxcsum |= OSI_CHECKSUM_TCPv4; - - } else { - /* Do nothing */ - } - } else if ((rx_desc->rdes1 & RDES1_IPV6) == RDES1_IPV6) { - if (pkt_type == RDES1_PT_UDP) { - rx_pkt_cx->rxcsum |= OSI_CHECKSUM_UDPv6; - } else if (pkt_type == RDES1_PT_TCP) { - rx_pkt_cx->rxcsum |= OSI_CHECKSUM_TCPv6; - - } else { - /* Do nothing */ + if ((rx_desc->rdes3 & RDES3_RS1V) == RDES3_RS1V) { + if ((rx_desc->rdes1 & + (RDES1_IPCE | RDES1_IPCB | RDES1_IPHE)) == OSI_DISABLE) { + rx_pkt_cx->rxcsum |= OSI_CHECKSUM_UNNECESSARY; } - } else { - /* Do nothing */ + if ((rx_desc->rdes1 & RDES1_IPCB) != RDES1_IPCB) { + rx_pkt_cx->rxcsum |= OSI_CHECKSUM_IPv4; + if ((rx_desc->rdes1 & RDES1_IPHE) == RDES1_IPHE) { + rx_pkt_cx->rxcsum |= OSI_CHECKSUM_IPv4_BAD; + } + + pkt_type = rx_desc->rdes1 & RDES1_PT_MASK; + if ((rx_desc->rdes1 & RDES1_IPV4) == RDES1_IPV4) { + if (pkt_type == RDES1_PT_UDP) { + rx_pkt_cx->rxcsum |= OSI_CHECKSUM_UDPv4; + } else if (pkt_type == RDES1_PT_TCP) { + rx_pkt_cx->rxcsum |= OSI_CHECKSUM_TCPv4; + + } else { + /* Do nothing */ + } + } else if ((rx_desc->rdes1 & RDES1_IPV6) == RDES1_IPV6) { + if (pkt_type == RDES1_PT_UDP) { + rx_pkt_cx->rxcsum |= OSI_CHECKSUM_UDPv6; + } else if (pkt_type == RDES1_PT_TCP) { + rx_pkt_cx->rxcsum |= OSI_CHECKSUM_TCPv6; + + } else { + /* Do nothing */ + } + + } else { + /* Do nothing */ + } + + if ((rx_desc->rdes1 & RDES1_IPCE) == RDES1_IPCE) { + rx_pkt_cx->rxcsum |= OSI_CHECKSUM_TCP_UDP_BAD; + } + } } - if ((rx_desc->rdes1 & RDES1_IPCE) == RDES1_IPCE) { - rx_pkt_cx->rxcsum |= OSI_CHECKSUM_TCP_UDP_BAD; - } -} - -/** - * @brief eqos_get_rx_hash - Get Rx packet hash from descriptor if valid - * - * Algorithm: This routine will be invoked by OSI layer itself to get received - * packet Hash from descriptor if RSS hash is valid and it also sets the type - * of RSS hash. - * - * @param[in] rx_desc: Rx Descriptor. - * @param[in] rx_pkt_cx: Per-Rx packet context structure - */ -static void eqos_get_rx_hash(OSI_UNUSED struct osi_rx_desc *rx_desc, - OSI_UNUSED struct osi_rx_pkt_cx *rx_pkt_cx) -{ + return; } /** @@ -186,12 +186,13 @@ static void eqos_get_rx_hash(OSI_UNUSED struct osi_rx_desc *rx_desc, * @retval -1 if TimeStamp is not available * @retval 0 if TimeStamp is available. */ -static int eqos_get_rx_hwstamp(struct osi_dma_priv_data *osi_dma, - struct osi_rx_desc *rx_desc, - struct osi_rx_desc *context_desc, - struct osi_rx_pkt_cx *rx_pkt_cx) +static nve32_t eqos_get_rx_hwstamp(const struct osi_dma_priv_data *const osi_dma, + const struct osi_rx_desc *const rx_desc, + const struct osi_rx_desc *const context_desc, + struct osi_rx_pkt_cx *rx_pkt_cx) { - int retry; + nve32_t ret = 0; + nve32_t retry; /* Check for RS1V/TSA/TD valid */ if (((rx_desc->rdes3 & RDES3_RS1V) == RDES3_RS1V) && @@ -205,7 +206,8 @@ static int eqos_get_rx_hwstamp(struct osi_dma_priv_data *osi_dma, OSI_INVALID_VALUE) && (context_desc->rdes1 == OSI_INVALID_VALUE)) { - return -1; + ret = -1; + goto fail; } /* Update rx pkt context flags to indicate * PTP */ @@ -219,27 +221,31 @@ static int eqos_get_rx_hwstamp(struct osi_dma_priv_data *osi_dma, } if (retry == 10) { /* Timed out waiting for Rx timestamp */ - return -1; + ret = -1; + goto fail; } rx_pkt_cx->ns = context_desc->rdes0 + (OSI_NSEC_PER_SEC * context_desc->rdes1); if (rx_pkt_cx->ns < context_desc->rdes0) { /* Will not hit this case */ - return -1; + ret = -1; + goto fail; } } else { - return -1; + ret = -1; } - - return 0; +fail: + return ret; } -void eqos_init_desc_ops(struct desc_ops *d_ops) +void eqos_init_desc_ops(struct desc_ops *p_dops) { - d_ops->get_rx_csum = eqos_get_rx_csum; - d_ops->update_rx_err_stats = eqos_update_rx_err_stats; - d_ops->get_rx_vlan = eqos_get_rx_vlan; - d_ops->get_rx_hash = eqos_get_rx_hash; - d_ops->get_rx_hwstamp = eqos_get_rx_hwstamp; +#ifndef OSI_STRIPPED_LIB + p_dops->update_rx_err_stats = eqos_update_rx_err_stats; + p_dops->get_rx_vlan = eqos_get_rx_vlan; + p_dops->get_rx_hash = eqos_get_rx_hash; +#endif /* !OSI_STRIPPED_LIB */ + p_dops->get_rx_csum = eqos_get_rx_csum; + p_dops->get_rx_hwstamp = eqos_get_rx_hwstamp; } diff --git a/osi/dma/eqos_dma.c b/osi/dma/eqos_dma.c index 095ddbf..6f6fc6d 100644 --- a/osi/dma/eqos_dma.c +++ b/osi/dma/eqos_dma.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -20,825 +20,10 @@ * DEALINGS IN THE SOFTWARE. */ +#ifndef OSI_STRIPPED_LIB #include "../osi/common/common.h" #include "dma_local.h" #include "eqos_dma.h" -#include "../osi/common/type.h" - -/** - * @brief eqos_dma_safety_config - EQOS MAC DMA safety configuration - */ -static struct dma_func_safety eqos_dma_safety_config; - -/** - * @brief Write to safety critical register. - * - * @note - * Algorithm: - * - Acquire RW lock, so that eqos_validate_dma_regs does not run while - * updating the safety critical register. - * - call osi_writel() to actually update the memory mapped register. - * - Store the same value in eqos_dma_safety_config->reg_val[idx], so that - * this latest value will be compared when eqos_validate_dma_regs is - * scheduled. - * - * @param[in] osi_dma: OSI DMA private data structure. - * @param[in] val: Value to be written. - * @param[in] addr: memory mapped register address to be written to. - * @param[in] idx: Index of register corresponding to enum func_safety_dma_regs. - * - * @pre MAC has to be out of reset, and clocks supplied. - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: Yes - * - De-initialization: Yes - */ -static inline void eqos_dma_safety_writel(struct osi_dma_priv_data *osi_dma, - nveu32_t val, void *addr, - nveu32_t idx) -{ - struct dma_func_safety *config = &eqos_dma_safety_config; - - osi_lock_irq_enabled(&config->dma_safety_lock); - osi_writela(osi_dma->osd, val, addr); - config->reg_val[idx] = (val & config->reg_mask[idx]); - osi_unlock_irq_enabled(&config->dma_safety_lock); -} - -/** - * @brief Initialize the eqos_dma_safety_config. - * - * @param[in] osi_dma: OSI DMA private data structure. - * - * @note - * Algorithm: - * - Populate the list of safety critical registers and provide - * - the address of the register - * - Register mask (to ignore reserved/self-critical bits in the reg). - * See eqos_validate_dma_regs which can be invoked periodically to compare - * the last written value to this register vs the actual value read when - * eqos_validate_dma_regs is scheduled. - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: No - * - De-initialization: No - */ -static void eqos_dma_safety_init(struct osi_dma_priv_data *osi_dma) -{ - struct dma_func_safety *config = &eqos_dma_safety_config; - nveu8_t *base = (nveu8_t *)osi_dma->base; - nveu32_t val; - nveu32_t i, idx; - - /* Initialize all reg address to NULL, since we may not use - * some regs depending on the number of DMA chans enabled. - */ - for (i = EQOS_DMA_CH0_CTRL_IDX; i < EQOS_MAX_DMA_SAFETY_REGS; i++) { - config->reg_addr[i] = OSI_NULL; - } - - for (i = 0U; i < osi_dma->num_dma_chans; i++) { - idx = osi_dma->dma_chans[i]; -#if 0 - CHECK_CHAN_BOUND(idx); -#endif - config->reg_addr[EQOS_DMA_CH0_CTRL_IDX + idx] = base + - EQOS_DMA_CHX_CTRL(idx); - config->reg_addr[EQOS_DMA_CH0_TX_CTRL_IDX + idx] = base + - EQOS_DMA_CHX_TX_CTRL(idx); - config->reg_addr[EQOS_DMA_CH0_RX_CTRL_IDX + idx] = base + - EQOS_DMA_CHX_RX_CTRL(idx); - config->reg_addr[EQOS_DMA_CH0_TDRL_IDX + idx] = base + - EQOS_DMA_CHX_TDRL(idx); - config->reg_addr[EQOS_DMA_CH0_RDRL_IDX + idx] = base + - EQOS_DMA_CHX_RDRL(idx); - config->reg_addr[EQOS_DMA_CH0_INTR_ENA_IDX + idx] = base + - EQOS_DMA_CHX_INTR_ENA(idx); - - config->reg_mask[EQOS_DMA_CH0_CTRL_IDX + idx] = - EQOS_DMA_CHX_CTRL_MASK; - config->reg_mask[EQOS_DMA_CH0_TX_CTRL_IDX + idx] = - EQOS_DMA_CHX_TX_CTRL_MASK; - config->reg_mask[EQOS_DMA_CH0_RX_CTRL_IDX + idx] = - EQOS_DMA_CHX_RX_CTRL_MASK; - config->reg_mask[EQOS_DMA_CH0_TDRL_IDX + idx] = - EQOS_DMA_CHX_TDRL_MASK; - config->reg_mask[EQOS_DMA_CH0_RDRL_IDX + idx] = - EQOS_DMA_CHX_RDRL_MASK; - config->reg_mask[EQOS_DMA_CH0_INTR_ENA_IDX + idx] = - EQOS_DMA_CHX_INTR_ENA_MASK; - } - - /* Initialize current power-on-reset values of these registers. */ - for (i = EQOS_DMA_CH0_CTRL_IDX; i < EQOS_MAX_DMA_SAFETY_REGS; i++) { - if (config->reg_addr[i] == OSI_NULL) { - continue; - } - val = osi_readl((nveu8_t *)config->reg_addr[i]); - config->reg_val[i] = val & config->reg_mask[i]; - } - - osi_lock_init(&config->dma_safety_lock); -} - -/** - * @brief eqos_disable_chan_tx_intr - Disables DMA Tx channel interrupts. - * - * @param[in] addr: Base address indicating the start of - * memory mapped IO region of the MAC. - * @param[in] chan: DMA Tx channel number. - * - * @pre - * - MAC needs to be out of reset and proper clocks need to be configured - * - DMA HW init need to be completed successfully, see osi_hw_dma_init - * - Mapping of physical IRQ line to DMA channel need to be maintained at - * OSDependent layer and pass corresponding channel number. - * - * @note - * API Group: - * - Initialization: No - * - Run time: Yes - * - De-initialization: Yes - */ -static void eqos_disable_chan_tx_intr(void *addr, nveu32_t chan) -{ - nveu32_t cntrl, status; - -#if 0 - CHECK_CHAN_BOUND(chan); -#endif - /* Clear irq before disabling */ - status = osi_readl((nveu8_t *)addr + - EQOS_VIRT_INTR_CHX_STATUS(chan)); - if ((status & EQOS_VIRT_INTR_CHX_STATUS_TX) == - EQOS_VIRT_INTR_CHX_STATUS_TX) { - osi_writel(EQOS_DMA_CHX_STATUS_CLEAR_TX, - (nveu8_t *)addr + EQOS_DMA_CHX_STATUS(chan)); - osi_writel(EQOS_VIRT_INTR_CHX_STATUS_TX, - (nveu8_t *)addr + - EQOS_VIRT_INTR_CHX_STATUS(chan)); - } - - /* Disable the irq */ - cntrl = osi_readl((nveu8_t *)addr + - EQOS_VIRT_INTR_CHX_CNTRL(chan)); - cntrl &= ~EQOS_VIRT_INTR_CHX_CNTRL_TX; - osi_writel(cntrl, (nveu8_t *)addr + - EQOS_VIRT_INTR_CHX_CNTRL(chan)); -} - -/** - * @brief eqos_enable_chan_tx_intr - Enable Tx channel interrupts. - * - * @param[in] addr: Base address indicating the start of - * memory mapped IO region of the MAC. - * @param[in] chan: DMA Tx channel number. - * - * @pre - * - MAC needs to be out of reset and proper clocks need to be configured - * - DMA HW init need to be completed successfully, see osi_hw_dma_init - * - Mapping of physical IRQ line to DMA channel need to be maintained at - * OSDependent layer and pass corresponding channel number. - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: Yes - * - De-initialization: No - */ -static void eqos_enable_chan_tx_intr(void *addr, nveu32_t chan) -{ - nveu32_t cntrl; -#if 0 - CHECK_CHAN_BOUND(chan); -#endif - cntrl = osi_readl((nveu8_t *)addr + - EQOS_VIRT_INTR_CHX_CNTRL(chan)); - cntrl |= EQOS_VIRT_INTR_CHX_CNTRL_TX; - osi_writel(cntrl, (nveu8_t *)addr + - EQOS_VIRT_INTR_CHX_CNTRL(chan)); -} - -/** - * @brief eqos_disable_chan_rx_intr - Disable Rx channel interrupts. - * - * @param[in] addr: Base address indicating the start of - * memory mapped IO region of the MAC. - * @param[in] chan: DMA Rx channel number. - * - * @pre - * - MAC needs to be out of reset and proper clocks need to be configured - * - DMA HW init need to be completed successfully, see osi_hw_dma_init - * - Mapping of physical IRQ line to DMA channel need to be maintained at - * OSDependent layer and pass corresponding channel number. - * - * @note - * API Group: - * - Initialization: No - * - Run time: Yes - * - De-initialization: Yes - */ -static void eqos_disable_chan_rx_intr(void *addr, nveu32_t chan) -{ - nveu32_t cntrl, status; -#if 0 - CHECK_CHAN_BOUND(chan); -#endif - /* Clear irq before disabling */ - status = osi_readl((nveu8_t *)addr + - EQOS_VIRT_INTR_CHX_STATUS(chan)); - if ((status & EQOS_VIRT_INTR_CHX_STATUS_RX) == - EQOS_VIRT_INTR_CHX_STATUS_RX) { - osi_writel(EQOS_DMA_CHX_STATUS_CLEAR_RX, - (nveu8_t *)addr + EQOS_DMA_CHX_STATUS(chan)); - osi_writel(EQOS_VIRT_INTR_CHX_STATUS_RX, - (nveu8_t *)addr + - EQOS_VIRT_INTR_CHX_STATUS(chan)); - } - - /* Disable irq */ - cntrl = osi_readl((nveu8_t *)addr + - EQOS_VIRT_INTR_CHX_CNTRL(chan)); - cntrl &= ~EQOS_VIRT_INTR_CHX_CNTRL_RX; - osi_writel(cntrl, (nveu8_t *)addr + - EQOS_VIRT_INTR_CHX_CNTRL(chan)); -} - -/** - * @brief eqos_enable_chan_rx_intr - Enable Rx channel interrupts. - * - * @param[in] addr: Base address indicating the start of - * memory mapped IO region of the MAC. - * @param[in] chan: DMA Rx channel number. - * - * @pre - * - MAC needs to be out of reset and proper clocks need to be configured - * - DMA HW init need to be completed successfully, see osi_hw_dma_init - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: Yes - * - De-initialization: No - */ -static void eqos_enable_chan_rx_intr(void *addr, nveu32_t chan) -{ - nveu32_t cntrl; -#if 0 - CHECK_CHAN_BOUND(chan); -#endif - cntrl = osi_readl((nveu8_t *)addr + - EQOS_VIRT_INTR_CHX_CNTRL(chan)); - cntrl |= EQOS_VIRT_INTR_CHX_CNTRL_RX; - osi_writel(cntrl, (nveu8_t *)addr + - EQOS_VIRT_INTR_CHX_CNTRL(chan)); -} - -/** - * @brief eqos_set_tx_ring_len - Set DMA Tx ring length. - * - * @note - * Algorithm: - * - Set DMA Tx channel ring length for specific channel. - * - * @param[in] osi_dma: OSI DMA private data structure. - * @param[in] chan: DMA Tx channel number. - * @param[in] len: Length. - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: No - * - De-initialization: No - */ -static void eqos_set_tx_ring_len(struct osi_dma_priv_data *osi_dma, - nveu32_t chan, - nveu32_t len) -{ - void *addr = osi_dma->base; -#if 0 - CHECK_CHAN_BOUND(chan); -#endif - eqos_dma_safety_writel(osi_dma, len, (nveu8_t *)addr + - EQOS_DMA_CHX_TDRL(chan), - EQOS_DMA_CH0_TDRL_IDX + chan); -} - -/** - * @brief eqos_set_tx_ring_start_addr - Set DMA Tx ring base address. - * - * @note - * Algorithm: - * - Sets DMA Tx ring base address for specific channel. - * - * @param[in] addr: Base address indicating the start of - * memory mapped IO region of the MAC. - * @param[in] chan: DMA Tx channel number. - * @param[in] tx_desc: Tx desc base address. - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: No - * - De-initialization: No - */ -static void eqos_set_tx_ring_start_addr(void *addr, nveu32_t chan, - nveu64_t tx_desc) -{ - nveu64_t tmp; -#if 0 - CHECK_CHAN_BOUND(chan); -#endif - tmp = H32(tx_desc); - if (tmp < UINT_MAX) { - osi_writel((nveu32_t)tmp, (nveu8_t *)addr + - EQOS_DMA_CHX_TDLH(chan)); - } - - tmp = L32(tx_desc); - if (tmp < UINT_MAX) { - osi_writel((nveu32_t)tmp, (nveu8_t *)addr + - EQOS_DMA_CHX_TDLA(chan)); - } -} - -/** - * @brief eqos_update_tx_tailptr - Updates DMA Tx ring tail pointer. - * - * @note - * Algorithm: - * - Updates DMA Tx ring tail pointer for specific channel. - * - * @param[in] addr: Base address indicating the start of - * memory mapped IO region of the MAC. - * @param[in] chan: DMA Tx channel number. - * @param[in] tailptr: DMA Tx ring tail pointer. - * - * - * @pre - * - MAC needs to be out of reset and proper clocks need to be configured - * - DMA HW init need to be completed successfully, see osi_hw_dma_init - * - * @note - * API Group: - * - Initialization: No - * - Run time: Yes - * - De-initialization: No - */ -static void eqos_update_tx_tailptr(void *addr, nveu32_t chan, - nveu64_t tailptr) -{ - nveu64_t tmp; -#if 0 - CHECK_CHAN_BOUND(chan); -#endif - tmp = L32(tailptr); - if (tmp < UINT_MAX) { - osi_writel((nveu32_t)tmp, (nveu8_t *)addr + - EQOS_DMA_CHX_TDTP(chan)); - } -} - -/** - * @brief eqos_set_rx_ring_len - Set Rx channel ring length. - * - * @note - * Algorithm: - * - Sets DMA Rx channel ring length for specific DMA channel. - * - * @param[in] osi_dma: OSI DMA private data structure. - * @param[in] chan: DMA Rx channel number. - * @param[in] len: Length - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: No - * - De-initialization: No - */ -static void eqos_set_rx_ring_len(struct osi_dma_priv_data *osi_dma, - nveu32_t chan, - nveu32_t len) -{ - void *addr = osi_dma->base; -#if 0 - CHECK_CHAN_BOUND(chan); -#endif - eqos_dma_safety_writel(osi_dma, len, (nveu8_t *)addr + - EQOS_DMA_CHX_RDRL(chan), - EQOS_DMA_CH0_RDRL_IDX + chan); -} - -/** - * @brief eqos_set_rx_ring_start_addr - Set DMA Rx ring base address. - * - * @note - * Algorithm: - * - Sets DMA Rx channel ring base address. - * - * @param[in] addr: Base address indicating the start of - * memory mapped IO region of the MAC. - * @param[in] chan: DMA Rx channel number. - * @param[in] rx_desc: DMA Rx desc base address. - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: No - * - De-initialization: No - */ -static void eqos_set_rx_ring_start_addr(void *addr, nveu32_t chan, - nveu64_t rx_desc) -{ - nveu64_t tmp; -#if 0 - CHECK_CHAN_BOUND(chan); -#endif - tmp = H32(rx_desc); - if (tmp < UINT_MAX) { - osi_writel((nveu32_t)tmp, (nveu8_t *)addr + - EQOS_DMA_CHX_RDLH(chan)); - } - - tmp = L32(rx_desc); - if (tmp < UINT_MAX) { - osi_writel((nveu32_t)tmp, (nveu8_t *)addr + - EQOS_DMA_CHX_RDLA(chan)); - } -} - -/** - * @brief eqos_update_rx_tailptr - Update Rx ring tail pointer - * - * @note - * Algorithm: - * - Updates DMA Rx channel tail pointer for specific channel. - * - * @param[in] addr: Base address indicating the start of - * memory mapped IO region of the MAC. - * @param[in] chan: DMA Rx channel number. - * @param[in] tailptr: Tail pointer - * - * @pre - * - MAC needs to be out of reset and proper clocks need to be configured - * - DMA HW init need to be completed successfully, see osi_hw_dma_init - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: Yes - * - De-initialization: No - */ -static void eqos_update_rx_tailptr(void *addr, nveu32_t chan, - nveu64_t tailptr) -{ - nveu64_t tmp; -#if 0 - CHECK_CHAN_BOUND(chan); -#endif - tmp = L32(tailptr); - if (tmp < UINT_MAX) { - osi_writel((nveu32_t)tmp, (nveu8_t *)addr + - EQOS_DMA_CHX_RDTP(chan)); - } -} - -/** - * @brief eqos_start_dma - Start DMA. - * - * @note - * Algorithm: - * - Start Tx and Rx DMA for specific channel. - * - * @param[in] osi_dma: OSI DMA private data structure. - * @param[in] chan: DMA Tx/Rx channel number. - * - * @pre - * - MAC needs to be out of reset and proper clocks need to be configured - * - DMA HW init need to be completed successfully, see osi_hw_dma_init - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: No - * - De-initialization: No - */ -static void eqos_start_dma(struct osi_dma_priv_data *osi_dma, nveu32_t chan) -{ - nveu32_t val; - void *addr = osi_dma->base; -#if 0 - CHECK_CHAN_BOUND(chan); -#endif - /* start Tx DMA */ - val = osi_readla(osi_dma->osd, - (nveu8_t *)addr + EQOS_DMA_CHX_TX_CTRL(chan)); - val |= OSI_BIT(0); - eqos_dma_safety_writel(osi_dma, val, (nveu8_t *)addr + - EQOS_DMA_CHX_TX_CTRL(chan), - EQOS_DMA_CH0_TX_CTRL_IDX + chan); - - /* start Rx DMA */ - val = osi_readla(osi_dma->osd, - (nveu8_t *)addr + EQOS_DMA_CHX_RX_CTRL(chan)); - val |= OSI_BIT(0); - val &= ~OSI_BIT(31); - eqos_dma_safety_writel(osi_dma, val, (nveu8_t *)addr + - EQOS_DMA_CHX_RX_CTRL(chan), - EQOS_DMA_CH0_RX_CTRL_IDX + chan); -} - -/** - * @brief eqos_stop_dma - Stop DMA. - * - * @note - * Algorithm: - * - Start Tx and Rx DMA for specific channel. - * - * @param[in] osi_dma: OSI DMA private data structure. - * @param[in] chan: DMA Tx/Rx channel number. - * - * @pre - * - MAC needs to be out of reset and proper clocks need to be configured - * - DMA HW init need to be completed successfully, see osi_hw_dma_init - * - * @note - * API Group: - * - Initialization: No - * - Run time: No - * - De-initialization: Yes - */ -static void eqos_stop_dma(struct osi_dma_priv_data *osi_dma, nveu32_t chan) -{ - nveu32_t val; - void *addr = osi_dma->base; -#if 0 - CHECK_CHAN_BOUND(chan); -#endif - /* stop Tx DMA */ - val = osi_readla(osi_dma->osd, - (nveu8_t *)addr + EQOS_DMA_CHX_TX_CTRL(chan)); - val &= ~OSI_BIT(0); - eqos_dma_safety_writel(osi_dma, val, (nveu8_t *)addr + - EQOS_DMA_CHX_TX_CTRL(chan), - EQOS_DMA_CH0_TX_CTRL_IDX + chan); - - /* stop Rx DMA */ - val = osi_readla(osi_dma->osd, - (nveu8_t *)addr + EQOS_DMA_CHX_RX_CTRL(chan)); - val &= ~OSI_BIT(0); - val |= OSI_BIT(31); - eqos_dma_safety_writel(osi_dma, val, (nveu8_t *)addr + - EQOS_DMA_CHX_RX_CTRL(chan), - EQOS_DMA_CH0_RX_CTRL_IDX + chan); -} - -/** - * @brief eqos_configure_dma_channel - Configure DMA channel - * - * @note - * Algorithm: - * - This takes care of configuring the below - * parameters for the DMA channel - * - Enabling DMA channel interrupts - * - Enable 8xPBL mode - * - Program Tx, Rx PBL - * - Enable TSO if HW supports - * - Program Rx Watchdog timer - * - * @param[in] chan: DMA channel number that need to be configured. - * @param[in] osi_dma: OSI DMA private data structure. - * - * @pre MAC has to be out of reset. - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: No - * - De-initialization: No - */ -static void eqos_configure_dma_channel(nveu32_t chan, - struct osi_dma_priv_data *osi_dma) -{ - nveu32_t value; -#if 0 - CHECK_CHAN_BOUND(chan); -#endif - /* enable DMA channel interrupts */ - /* Enable TIE and TBUE */ - /* TIE - Transmit Interrupt Enable */ - /* TBUE - Transmit Buffer Unavailable Enable */ - /* RIE - Receive Interrupt Enable */ - /* RBUE - Receive Buffer Unavailable Enable */ - /* AIE - Abnormal Interrupt Summary Enable */ - /* NIE - Normal Interrupt Summary Enable */ - /* FBE - Fatal Bus Error Enable */ - value = osi_readl((nveu8_t *)osi_dma->base + - EQOS_DMA_CHX_INTR_ENA(chan)); - if (osi_dma->use_virtualization == OSI_DISABLE) { - value |= EQOS_DMA_CHX_INTR_TBUE | - EQOS_DMA_CHX_INTR_RBUE; - } - - value |= EQOS_DMA_CHX_INTR_TIE | EQOS_DMA_CHX_INTR_RIE | - EQOS_DMA_CHX_INTR_FBEE | EQOS_DMA_CHX_INTR_AIE | - EQOS_DMA_CHX_INTR_NIE; - /* For multi-irqs to work nie needs to be disabled */ - value &= ~(EQOS_DMA_CHX_INTR_NIE); - eqos_dma_safety_writel(osi_dma, value, (nveu8_t *)osi_dma->base + - EQOS_DMA_CHX_INTR_ENA(chan), - EQOS_DMA_CH0_INTR_ENA_IDX + chan); - - /* Enable 8xPBL mode */ - value = osi_readl((nveu8_t *)osi_dma->base + - EQOS_DMA_CHX_CTRL(chan)); - value |= EQOS_DMA_CHX_CTRL_PBLX8; - eqos_dma_safety_writel(osi_dma, value, (nveu8_t *)osi_dma->base + - EQOS_DMA_CHX_CTRL(chan), - EQOS_DMA_CH0_CTRL_IDX + chan); - - /* Configure DMA channel Transmit control register */ - value = osi_readl((nveu8_t *)osi_dma->base + - EQOS_DMA_CHX_TX_CTRL(chan)); - /* Enable OSF mode */ - value |= EQOS_DMA_CHX_TX_CTRL_OSF; - /* TxPBL = 32*/ - value |= EQOS_DMA_CHX_TX_CTRL_TXPBL_RECOMMENDED; - /* enable TSO by default if HW supports */ - value |= EQOS_DMA_CHX_TX_CTRL_TSE; - - eqos_dma_safety_writel(osi_dma, value, (nveu8_t *)osi_dma->base + - EQOS_DMA_CHX_TX_CTRL(chan), - EQOS_DMA_CH0_TX_CTRL_IDX + chan); - - /* Configure DMA channel Receive control register */ - /* Select Rx Buffer size. Needs to be rounded up to next multiple of - * bus width - */ - value = osi_readl((nveu8_t *)osi_dma->base + - EQOS_DMA_CHX_RX_CTRL(chan)); - - /* clear previous Rx buffer size */ - value &= ~EQOS_DMA_CHX_RBSZ_MASK; - - value |= (osi_dma->rx_buf_len << EQOS_DMA_CHX_RBSZ_SHIFT); - /* RXPBL = 12 */ - value |= EQOS_DMA_CHX_RX_CTRL_RXPBL_RECOMMENDED; - eqos_dma_safety_writel(osi_dma, value, (nveu8_t *)osi_dma->base + - EQOS_DMA_CHX_RX_CTRL(chan), - EQOS_DMA_CH0_RX_CTRL_IDX + chan); - - /* Set Receive Interrupt Watchdog Timer Count */ - /* conversion of usec to RWIT value - * Eg: System clock is 125MHz, each clock cycle would then be 8ns - * For value 0x1 in RWT, device would wait for 512 clk cycles with - * RWTU as 0x1, - * ie, (8ns x 512) => 4.096us (rounding off to 4us) - * So formula with above values is,ret = usec/4 - */ - if ((osi_dma->use_riwt == OSI_ENABLE) && - (osi_dma->rx_riwt < UINT_MAX)) { - value = osi_readl((nveu8_t *)osi_dma->base + - EQOS_DMA_CHX_RX_WDT(chan)); - /* Mask the RWT and RWTU value */ - value &= ~(EQOS_DMA_CHX_RX_WDT_RWT_MASK | - EQOS_DMA_CHX_RX_WDT_RWTU_MASK); - /* Conversion of usec to Rx Interrupt Watchdog Timer Count */ - value |= ((osi_dma->rx_riwt * - (EQOS_AXI_CLK_FREQ / OSI_ONE_MEGA_HZ)) / - EQOS_DMA_CHX_RX_WDT_RWTU) & - EQOS_DMA_CHX_RX_WDT_RWT_MASK; - value |= EQOS_DMA_CHX_RX_WDT_RWTU_512_CYCLE; - osi_writel(value, (nveu8_t *)osi_dma->base + - EQOS_DMA_CHX_RX_WDT(chan)); - } -} - -/** - * @brief eqos_init_dma_channel - DMA channel INIT - * - * @param[in] osi_dma: OSI DMA private data structure. - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: No - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -static nve32_t eqos_init_dma_channel(struct osi_dma_priv_data *osi_dma) -{ - nveu32_t chinx; - - eqos_dma_safety_init(osi_dma); - - /* configure EQOS DMA channels */ - for (chinx = 0; chinx < osi_dma->num_dma_chans; chinx++) { - eqos_configure_dma_channel(osi_dma->dma_chans[chinx], osi_dma); - } - - return 0; -} - -/** - * @brief eqos_set_rx_buf_len - Set Rx buffer length - * Sets the Rx buffer length based on the new MTU size set. - * - * @param[in, out] osi_dma: OSI DMA private data structure. - * - * @pre - * - MAC needs to be out of reset and proper clocks need to be configured - * - DMA HW init need to be completed successfully, see osi_hw_dma_init - * - osi_dma->mtu need to be filled with current MTU size <= 9K - * - * @note - * API Group: - * - Initialization: No - * - Run time: Yes - * - De-initialization: No - */ -static void eqos_set_rx_buf_len(struct osi_dma_priv_data *osi_dma) -{ - nveu32_t rx_buf_len = 0U; - - /* Add Ethernet header + VLAN header + NET IP align size to MTU */ - if (osi_dma->mtu <= OSI_MAX_MTU_SIZE) { - rx_buf_len = osi_dma->mtu + OSI_ETH_HLEN + NV_VLAN_HLEN + - OSI_NET_IP_ALIGN; - } else { - rx_buf_len = OSI_MAX_MTU_SIZE + OSI_ETH_HLEN + NV_VLAN_HLEN + - OSI_NET_IP_ALIGN; - } - - /* Buffer alignment */ - osi_dma->rx_buf_len = ((rx_buf_len + (EQOS_AXI_BUS_WIDTH - 1U)) & - ~(EQOS_AXI_BUS_WIDTH - 1U)); -} - -#ifndef OSI_STRIPPED_LIB -/** - * @brief Read-validate HW registers for functional safety. - * - * @note - * Algorithm: - * - Reads pre-configured list of MAC/MTL configuration registers - * and compares with last written value for any modifications. - * - * @param[in] osi_dma: OSI DMA private data structure. - * - * @pre - * - MAC has to be out of reset. - * - osi_hw_dma_init has to be called. Internally this would initialize - * the safety_config (see osi_dma_priv_data) based on MAC version and - * which specific registers needs to be validated periodically. - * - Invoke this call if (osi_dma_priv_data->safety_config != OSI_NULL) - * - * @note - * API Group: - * - Initialization: No - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -static nve32_t eqos_validate_dma_regs(struct osi_dma_priv_data *osi_dma) -{ - struct dma_func_safety *config = - (struct dma_func_safety *)osi_dma->safety_config; - nveu32_t cur_val; - nveu32_t i; - - osi_lock_irq_enabled(&config->dma_safety_lock); - for (i = EQOS_DMA_CH0_CTRL_IDX; i < EQOS_MAX_DMA_SAFETY_REGS; i++) { - if (config->reg_addr[i] == OSI_NULL) { - continue; - } - - cur_val = osi_readl((nveu8_t *)config->reg_addr[i]); - cur_val &= config->reg_mask[i]; - - if (cur_val == config->reg_val[i]) { - continue; - } else { - /* Register content differs from what was written. - * Return error and let safety manager (NVGaurd etc.) - * take care of corrective action. - */ - osi_unlock_irq_enabled(&config->dma_safety_lock); - return -1; - } - } - osi_unlock_irq_enabled(&config->dma_safety_lock); - - return 0; -} /** * @brief eqos_config_slot - Configure slot Checking for DMA channel @@ -895,94 +80,66 @@ static void eqos_config_slot(struct osi_dma_priv_data *osi_dma, EQOS_DMA_CHX_SLOT_CTRL(chan)); } } -#endif /* !OSI_STRIPPED_LIB */ +#ifdef OSI_DEBUG /** - * @brief eqos_clear_vm_tx_intr - Handle VM Tx interrupt + * @brief Enable/disable debug interrupt * - * @param[in] addr: MAC base address. - * @param[in] chan: DMA Tx channel number. + * @param[in] osi_dma: OSI DMA private data structure. * - * Algorithm: Clear Tx interrupt source at DMA and wrapper level. - * - * @note - * Dependencies: None. - * Protection: None. - * @retval None. + * Algorithm: + * - if osi_dma->ioctl_data.arg_u32 == OSI_ENABLE enable debug interrupt + * - else disable bebug inerrupts */ -static void eqos_clear_vm_tx_intr(void *addr, nveu32_t chan) +static void eqos_debug_intr_config(struct osi_dma_priv_data *osi_dma) { -#if 0 - CHECK_CHAN_BOUND(chan); + nveu32_t chinx; + nveu32_t chan; + nveu32_t val; + nveu32_t enable = osi_dma->ioctl_data.arg_u32; + + if (enable == OSI_ENABLE) { + for (chinx = 0; chinx < osi_dma->num_dma_chans; chinx++) { + chan = osi_dma->dma_chans[chinx]; + val = osi_readl((nveu8_t *)osi_dma->base + + EQOS_DMA_CHX_INTR_ENA(chan)); + + val |= (EQOS_DMA_CHX_INTR_AIE | + EQOS_DMA_CHX_INTR_FBEE | + EQOS_DMA_CHX_INTR_RBUE | + EQOS_DMA_CHX_INTR_TBUE | + EQOS_DMA_CHX_INTR_NIE); + osi_writel(val, (nveu8_t *)osi_dma->base + + EQOS_DMA_CHX_INTR_ENA(chan)); + } + + } else { + for (chinx = 0; chinx < osi_dma->num_dma_chans; chinx++) { + chan = osi_dma->dma_chans[chinx]; + val = osi_readl((nveu8_t *)osi_dma->base + + EQOS_DMA_CHX_INTR_ENA(chan)); + val &= (~EQOS_DMA_CHX_INTR_AIE & + ~EQOS_DMA_CHX_INTR_FBEE & + ~EQOS_DMA_CHX_INTR_RBUE & + ~EQOS_DMA_CHX_INTR_TBUE & + ~EQOS_DMA_CHX_INTR_NIE); + osi_writel(val, (nveu8_t *)osi_dma->base + + EQOS_DMA_CHX_INTR_ENA(chan)); + } + } +} #endif - osi_writel(EQOS_DMA_CHX_STATUS_CLEAR_TX, - (nveu8_t *)addr + EQOS_DMA_CHX_STATUS(chan)); - osi_writel(EQOS_VIRT_INTR_CHX_STATUS_TX, - (nveu8_t *)addr + EQOS_VIRT_INTR_CHX_STATUS(chan)); - eqos_disable_chan_tx_intr(addr, chan); -} - -/** - * @brief eqos_clear_vm_rx_intr - Handle VM Rx interrupt - * - * @param[in] addr: MAC base address. - * @param[in] chan: DMA Rx channel number. - * - * Algorithm: Clear Rx interrupt source at DMA and wrapper level. - * - * @note - * Dependencies: None. - * Protection: None. - * - * @retval None. - */ -static void eqos_clear_vm_rx_intr(void *addr, nveu32_t chan) -{ -#if 0 - CHECK_CHAN_BOUND(chan); -#endif - osi_writel(EQOS_DMA_CHX_STATUS_CLEAR_RX, - (nveu8_t *)addr + EQOS_DMA_CHX_STATUS(chan)); - osi_writel(EQOS_VIRT_INTR_CHX_STATUS_RX, - (nveu8_t *)addr + EQOS_VIRT_INTR_CHX_STATUS(chan)); - - eqos_disable_chan_rx_intr(addr, chan); -} - -/** - * @brief eqos_get_dma_safety_config - EQOS get DMA safety configuration - */ -void *eqos_get_dma_safety_config(void) -{ - return &eqos_dma_safety_config; -} - -/** +/* * @brief eqos_init_dma_chan_ops - Initialize EQOS DMA operations. * * @param[in] ops: DMA channel operations pointer. */ void eqos_init_dma_chan_ops(struct dma_chan_ops *ops) { - ops->set_tx_ring_len = eqos_set_tx_ring_len; - ops->set_rx_ring_len = eqos_set_rx_ring_len; - ops->set_tx_ring_start_addr = eqos_set_tx_ring_start_addr; - ops->set_rx_ring_start_addr = eqos_set_rx_ring_start_addr; - ops->update_tx_tailptr = eqos_update_tx_tailptr; - ops->update_rx_tailptr = eqos_update_rx_tailptr; - ops->disable_chan_tx_intr = eqos_disable_chan_tx_intr; - ops->enable_chan_tx_intr = eqos_enable_chan_tx_intr; - ops->disable_chan_rx_intr = eqos_disable_chan_rx_intr; - ops->enable_chan_rx_intr = eqos_enable_chan_rx_intr; - ops->start_dma = eqos_start_dma; - ops->stop_dma = eqos_stop_dma; - ops->init_dma_channel = eqos_init_dma_channel; - ops->set_rx_buf_len = eqos_set_rx_buf_len; -#ifndef OSI_STRIPPED_LIB - ops->validate_regs = eqos_validate_dma_regs; ops->config_slot = eqos_config_slot; -#endif /* !OSI_STRIPPED_LIB */ - ops->clear_vm_tx_intr = eqos_clear_vm_tx_intr; - ops->clear_vm_rx_intr = eqos_clear_vm_rx_intr; +#ifdef OSI_DEBUG + ops->debug_intr_config = eqos_debug_intr_config; +#endif } +#endif /* !OSI_STRIPPED_LIB */ diff --git a/osi/dma/eqos_dma.h b/osi/dma/eqos_dma.h index 7644438..cb4fbf0 100644 --- a/osi/dma/eqos_dma.h +++ b/osi/dma/eqos_dma.h @@ -55,9 +55,6 @@ #define EQOS_DMA_CHX_TDLH(x) ((0x0080U * (x)) + 0x1110U) #define EQOS_DMA_CHX_TDLA(x) ((0x0080U * (x)) + 0x1114U) #define EQOS_DMA_CHX_TDRL(x) ((0x0080U * (x)) + 0x112CU) -#define EQOS_VIRT_INTR_APB_CHX_CNTRL(x) (0x8200U + ((x) * 4U)) -#define EQOS_VIRT_INTR_CHX_STATUS(x) (0x8604U + ((x) * 8U)) -#define EQOS_VIRT_INTR_CHX_CNTRL(x) (0x8600U + ((x) * 8U)) /** @} */ /** @@ -66,8 +63,6 @@ * @brief Values defined for the DMA channel registers * @{ */ -#define EQOS_VIRT_INTR_CHX_STATUS_TX OSI_BIT(0) -#define EQOS_VIRT_INTR_CHX_STATUS_RX OSI_BIT(1) #define EQOS_DMA_CHX_STATUS_TI OSI_BIT(0) #define EQOS_DMA_CHX_STATUS_RI OSI_BIT(6) #define EQOS_DMA_CHX_STATUS_NIS OSI_BIT(15) @@ -76,21 +71,13 @@ #define EQOS_DMA_CHX_STATUS_CLEAR_RX \ (EQOS_DMA_CHX_STATUS_RI | EQOS_DMA_CHX_STATUS_NIS) -#define EQOS_VIRT_INTR_CHX_CNTRL_TX OSI_BIT(0) -#define EQOS_VIRT_INTR_CHX_CNTRL_RX OSI_BIT(1) - -#define EQOS_DMA_CHX_INTR_TIE OSI_BIT(0) +#ifdef OSI_DEBUG #define EQOS_DMA_CHX_INTR_TBUE OSI_BIT(2) -#define EQOS_DMA_CHX_INTR_RIE OSI_BIT(6) #define EQOS_DMA_CHX_INTR_RBUE OSI_BIT(7) #define EQOS_DMA_CHX_INTR_FBEE OSI_BIT(12) #define EQOS_DMA_CHX_INTR_AIE OSI_BIT(14) #define EQOS_DMA_CHX_INTR_NIE OSI_BIT(15) -#define EQOS_DMA_CHX_TX_CTRL_OSF OSI_BIT(4) -#define EQOS_DMA_CHX_TX_CTRL_TSE OSI_BIT(12) -#define EQOS_DMA_CHX_CTRL_PBLX8 OSI_BIT(16) -#define EQOS_DMA_CHX_RBSZ_MASK 0x7FFEU -#define EQOS_DMA_CHX_RBSZ_SHIFT 1U +#endif #define EQOS_DMA_CHX_TX_CTRL_TXPBL_RECOMMENDED 0x200000U #define EQOS_DMA_CHX_RX_CTRL_RXPBL_RECOMMENDED 0xC0000U #define EQOS_DMA_CHX_RX_WDT_RWT_MASK 0xFFU @@ -101,100 +88,10 @@ /* Below macros are used for periodic reg validation for functional safety. * HW register mask - to mask out reserved and self-clearing bits */ -#define EQOS_DMA_CHX_CTRL_MASK 0x11D3FFFU -#define EQOS_DMA_CHX_TX_CTRL_MASK 0xF3F9010U -#define EQOS_DMA_CHX_RX_CTRL_MASK 0x8F3F7FE0U -#define EQOS_DMA_CHX_TDRL_MASK 0x3FFU -#define EQOS_DMA_CHX_RDRL_MASK 0x3FFU -#define EQOS_DMA_CHX_INTR_ENA_MASK 0xFFC7U #ifndef OSI_STRIPPED_LIB #define EQOS_DMA_CHX_SLOT_SIV_MASK 0xFFFU #define EQOS_DMA_CHX_SLOT_SIV_SHIFT 4U #define EQOS_DMA_CHX_SLOT_ESC 0x1U #endif /* !OSI_STRIPPED_LIB */ -/* To add new registers to validate,append at end of below macro list and - * increment EQOS_MAX_DMA_SAFETY_REGS. - * Using macros instead of enum due to misra error. - */ -#define EQOS_DMA_CH0_CTRL_IDX 0U -#define EQOS_DMA_CH1_CTRL_IDX 1U -#define EQOS_DMA_CH2_CTRL_IDX 2U -#define EQOS_DMA_CH3_CTRL_IDX 3U -#define EQOS_DMA_CH4_CTRL_IDX 4U -#define EQOS_DMA_CH5_CTRL_IDX 5U -#define EQOS_DMA_CH6_CTRL_IDX 6U -#define EQOS_DMA_CH7_CTRL_IDX 7U -#define EQOS_DMA_CH0_TX_CTRL_IDX 8U -#define EQOS_DMA_CH1_TX_CTRL_IDX 9U -#define EQOS_DMA_CH2_TX_CTRL_IDX 10U -#define EQOS_DMA_CH3_TX_CTRL_IDX 11U -#define EQOS_DMA_CH4_TX_CTRL_IDX 12U -#define EQOS_DMA_CH5_TX_CTRL_IDX 13U -#define EQOS_DMA_CH6_TX_CTRL_IDX 14U -#define EQOS_DMA_CH7_TX_CTRL_IDX 15U -#define EQOS_DMA_CH0_RX_CTRL_IDX 16U -#define EQOS_DMA_CH1_RX_CTRL_IDX 17U -#define EQOS_DMA_CH2_RX_CTRL_IDX 18U -#define EQOS_DMA_CH3_RX_CTRL_IDX 19U -#define EQOS_DMA_CH4_RX_CTRL_IDX 20U -#define EQOS_DMA_CH5_RX_CTRL_IDX 21U -#define EQOS_DMA_CH6_RX_CTRL_IDX 22U -#define EQOS_DMA_CH7_RX_CTRL_IDX 23U -#define EQOS_DMA_CH0_TDRL_IDX 24U -#define EQOS_DMA_CH1_TDRL_IDX 25U -#define EQOS_DMA_CH2_TDRL_IDX 26U -#define EQOS_DMA_CH3_TDRL_IDX 27U -#define EQOS_DMA_CH4_TDRL_IDX 28U -#define EQOS_DMA_CH5_TDRL_IDX 29U -#define EQOS_DMA_CH6_TDRL_IDX 30U -#define EQOS_DMA_CH7_TDRL_IDX 31U -#define EQOS_DMA_CH0_RDRL_IDX 32U -#define EQOS_DMA_CH1_RDRL_IDX 33U -#define EQOS_DMA_CH2_RDRL_IDX 34U -#define EQOS_DMA_CH3_RDRL_IDX 35U -#define EQOS_DMA_CH4_RDRL_IDX 36U -#define EQOS_DMA_CH5_RDRL_IDX 37U -#define EQOS_DMA_CH6_RDRL_IDX 38U -#define EQOS_DMA_CH7_RDRL_IDX 39U -#define EQOS_DMA_CH0_INTR_ENA_IDX 40U -#define EQOS_DMA_CH1_INTR_ENA_IDX 41U -#define EQOS_DMA_CH2_INTR_ENA_IDX 42U -#define EQOS_DMA_CH3_INTR_ENA_IDX 43U -#define EQOS_DMA_CH4_INTR_ENA_IDX 44U -#define EQOS_DMA_CH5_INTR_ENA_IDX 45U -#define EQOS_DMA_CH6_INTR_ENA_IDX 46U -#define EQOS_DMA_CH7_INTR_ENA_IDX 47U -#define EQOS_MAX_DMA_SAFETY_REGS 48U -#define EQOS_AXI_BUS_WIDTH 0x10U /** @} */ - -/** - * @brief dma_func_safety - Struct used to store last written values of - * critical DMA HW registers. - */ -struct dma_func_safety { - /** Array of reg MMIO addresses (base EQoS + offset of reg) */ - void *reg_addr[EQOS_MAX_DMA_SAFETY_REGS]; - /** Array of bit-mask value of each corresponding reg - * (used to ignore self-clearing/reserved bits in reg) */ - nveu32_t reg_mask[EQOS_MAX_DMA_SAFETY_REGS]; - /** Array of value stored in each corresponding register */ - nveu32_t reg_val[EQOS_MAX_DMA_SAFETY_REGS]; - /** OSI lock variable used to protect writes to reg - * while validation is in-progress */ - nveu32_t dma_safety_lock; -}; - -/** - * @brief eqos_get_dma_safety_config - EQOS get DMA safety configuration - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: No - * - De-initialization: No - * - * @returns Pointer to DMA safety configuration - */ -void *eqos_get_dma_safety_config(void); #endif /* INCLUDED_EQOS_DMA_H */ diff --git a/osi/dma/hw_common.h b/osi/dma/hw_common.h index a7b6335..474c7d3 100644 --- a/osi/dma/hw_common.h +++ b/osi/dma/hw_common.h @@ -30,7 +30,17 @@ * @{ */ #define HW_GLOBAL_DMA_STATUS 0x8700U +#define VIRT_INTR_CHX_CNTRL(x) (0x8600U + ((x) * 8U)) +#define VIRT_INTR_CHX_STATUS(x) (0x8604U + ((x) * 8U)) +#define AXI_BUS_WIDTH 0x10U +#define DMA_CHX_INTR_TIE OSI_BIT(0) +#define DMA_CHX_INTR_RIE OSI_BIT(6) +#define DMA_CHX_CTRL_PBLX8 OSI_BIT(16) +#define DMA_CHX_TX_CTRL_OSP OSI_BIT(4) +#define DMA_CHX_TX_CTRL_TSE OSI_BIT(12) +#define DMA_CHX_RBSZ_MASK 0x7FFEU +#define DMA_CHX_RBSZ_SHIFT 1U +#define DMA_CHX_RX_WDT_RWT_MASK 0xFFU /** @} */ #endif /* INCLUDED_HW_COMMON_H */ - diff --git a/osi/dma/hw_desc.h b/osi/dma/hw_desc.h index 45cf896..ddf27f0 100644 --- a/osi/dma/hw_desc.h +++ b/osi/dma/hw_desc.h @@ -45,22 +45,26 @@ #define RDES3_ERR_RE OSI_BIT(20) #define RDES3_ERR_DRIB OSI_BIT(19) #define RDES3_PKT_LEN 0x00007fffU -#define RDES3_LT (OSI_BIT(16) | OSI_BIT(17) | OSI_BIT(18)) -#define RDES3_LT_VT OSI_BIT(18) -#define RDES3_LT_DVT (OSI_BIT(16) | OSI_BIT(18)) -#define RDES3_RS0V OSI_BIT(25) #define RDES3_RS1V OSI_BIT(26) -#define RDES3_RSV OSI_BIT(26) -#define RDES0_OVT 0x0000FFFFU #define RDES3_TSD OSI_BIT(6) #define RDES3_TSA OSI_BIT(4) #define RDES1_TSA OSI_BIT(14) #define RDES1_TD OSI_BIT(15) +#ifndef OSI_STRIPPED_LIB +#define RDES3_LT (OSI_BIT(16) | OSI_BIT(17) | OSI_BIT(18)) +#define RDES3_LT_VT OSI_BIT(18) +#define RDES3_LT_DVT (OSI_BIT(16) | OSI_BIT(18)) +#define RDES0_OVT 0x0000FFFFU +#define RDES3_RS0V OSI_BIT(25) +#define RDES3_RSV OSI_BIT(26) #define RDES3_L34T 0x00F00000U #define RDES3_L34T_IPV4_TCP OSI_BIT(20) #define RDES3_L34T_IPV4_UDP OSI_BIT(21) #define RDES3_L34T_IPV6_TCP (OSI_BIT(23) | OSI_BIT(20)) #define RDES3_L34T_IPV6_UDP (OSI_BIT(23) | OSI_BIT(21)) +#define RDES3_ELLT_CVLAN 0x90000U +#define RDES3_ERR_MGBE_CRC (OSI_BIT(16) | OSI_BIT(17)) +#endif /* !OSI_STRIPPED_LIB */ #define RDES1_IPCE OSI_BIT(7) #define RDES1_IPCB OSI_BIT(6) @@ -73,7 +77,6 @@ #define RDES3_ELLT 0xF0000U #define RDES3_ELLT_IPHE 0x50000U #define RDES3_ELLT_CSUM_ERR 0x60000U -#define RDES3_ELLT_CVLAN 0x90000U /** @} */ /** Error Summary bits for Received packet */ @@ -83,7 +86,6 @@ /** MGBE error summary bits for Received packet */ #define RDES3_ES_MGBE 0x8000U -#define RDES3_ERR_MGBE_CRC (OSI_BIT(16) | OSI_BIT(17)) /** * @addtogroup EQOS_TxDesc Transmit Descriptors bit fields * diff --git a/osi/dma/libnvethernetcl.export b/osi/dma/libnvethernetcl.export index 311e3bc..8fdb285 100644 --- a/osi/dma/libnvethernetcl.export +++ b/osi/dma/libnvethernetcl.export @@ -23,8 +23,6 @@ # libnvethernetcl interface export # ############################################################################### -osi_start_dma -osi_stop_dma osi_get_refill_rx_desc_cnt osi_rx_dma_desc_init osi_set_rx_buf_len diff --git a/osi/dma/libnvethernetcl_safety.export b/osi/dma/libnvethernetcl_safety.export new file mode 100644 index 0000000..5e62c6c --- /dev/null +++ b/osi/dma/libnvethernetcl_safety.export @@ -0,0 +1,39 @@ +################################### tell Emacs this is a -*- makefile-gmake -*- +# +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. +# +# libnvethernetcl safety interface export +# +############################################################################### +osi_get_refill_rx_desc_cnt +osi_rx_dma_desc_init +osi_set_rx_buf_len +osi_hw_transmit +osi_process_tx_completions +osi_process_rx_completions +osi_hw_dma_init +osi_hw_dma_deinit +osi_init_dma_ops +osi_dma_get_systime_from_mac +osi_is_mac_enabled +osi_get_dma +osi_handle_dma_intr +osi_get_global_dma_status diff --git a/osi/dma/mgbe_desc.c b/osi/dma/mgbe_desc.c index ef12db5..6b15b67 100644 --- a/osi/dma/mgbe_desc.c +++ b/osi/dma/mgbe_desc.c @@ -24,6 +24,7 @@ #include "hw_desc.h" #include "mgbe_desc.h" +#ifndef OSI_STRIPPED_LIB /** * @brief mgbe_get_rx_vlan - Get Rx VLAN from descriptor * @@ -94,34 +95,6 @@ static inline void mgbe_update_rx_err_stats(struct osi_rx_desc *rx_desc, } } -/** - * @brief mgbe_get_rx_csum - Get the Rx checksum from descriptor if valid - * - * Algorithm: - * 1) Check if the descriptor has any checksum validation errors. - * 2) If none, set a per packet context flag indicating no err in - * Rx checksum - * 3) The OSD layer will mark the packet appropriately to skip - * IP/TCP/UDP checksum validation in software based on whether - * COE is enabled for the device. - * - * @param[in] rx_desc: Rx descriptor - * @param[in] rx_pkt_cx: Per-Rx packet context structure - */ -static void mgbe_get_rx_csum(struct osi_rx_desc *rx_desc, - struct osi_rx_pkt_cx *rx_pkt_cx) -{ - unsigned int ellt = rx_desc->rdes3 & RDES3_ELLT; - - /* Always include either checksum none/unnecessary - * depending on status fields in desc. - * Hence no need to explicitly add OSI_PKT_CX_CSUM flag. - */ - if ((ellt != RDES3_ELLT_IPHE) && (ellt != RDES3_ELLT_CSUM_ERR)) { - rx_pkt_cx->rxcsum |= OSI_CHECKSUM_UNNECESSARY; - } -} - /** * @brief mgbe_get_rx_hash - Get Rx packet hash from descriptor if valid * @@ -157,8 +130,60 @@ static void mgbe_get_rx_hash(struct osi_rx_desc *rx_desc, rx_pkt_cx->rx_hash = rx_desc->rdes1; rx_pkt_cx->flags |= OSI_PKT_CX_RSS; } +#endif /* !OSI_STRIPPED_LIB */ -/** +/** + * @brief mgbe_get_rx_csum - Get the Rx checksum from descriptor if valid + * + * Algorithm: + * 1) Check if the descriptor has any checksum validation errors. + * 2) If none, set a per packet context flag indicating no err in + * Rx checksum + * 3) The OSD layer will mark the packet appropriately to skip + * IP/TCP/UDP checksum validation in software based on whether + * COE is enabled for the device. + * + * @param[in] rx_desc: Rx descriptor + * @param[in] rx_pkt_cx: Per-Rx packet context structure + */ +static void mgbe_get_rx_csum(const struct osi_rx_desc *const rx_desc, + struct osi_rx_pkt_cx *rx_pkt_cx) +{ + nveu32_t ellt = rx_desc->rdes3 & RDES3_ELLT; + nveu32_t pkt_type; + + /* Always include either checksum none/unnecessary + * depending on status fields in desc. + * Hence no need to explicitly add OSI_PKT_CX_CSUM flag. + */ + if ((ellt != RDES3_ELLT_IPHE) && (ellt != RDES3_ELLT_CSUM_ERR)) { + rx_pkt_cx->rxcsum |= OSI_CHECKSUM_UNNECESSARY; + } + + rx_pkt_cx->rxcsum |= OSI_CHECKSUM_IPv4; + if (ellt == RDES3_ELLT_IPHE) { + rx_pkt_cx->rxcsum |= OSI_CHECKSUM_IPv4_BAD; + } + + pkt_type = rx_desc->rdes3 & MGBE_RDES3_PT_MASK; + if (pkt_type == MGBE_RDES3_PT_IPV4_TCP) { + rx_pkt_cx->rxcsum |= OSI_CHECKSUM_TCPv4; + } else if (pkt_type == MGBE_RDES3_PT_IPV4_UDP) { + rx_pkt_cx->rxcsum |= OSI_CHECKSUM_UDPv4; + } else if (pkt_type == MGBE_RDES3_PT_IPV6_TCP) { + rx_pkt_cx->rxcsum |= OSI_CHECKSUM_TCPv6; + } else if (pkt_type == MGBE_RDES3_PT_IPV6_UDP) { + rx_pkt_cx->rxcsum |= OSI_CHECKSUM_UDPv6; + } else { + /* Do nothing */ + } + + if (ellt == RDES3_ELLT_CSUM_ERR) { + rx_pkt_cx->rxcsum |= OSI_CHECKSUM_TCP_UDP_BAD; + } +} + +/** * @brief mgbe_get_rx_hwstamp - Get Rx HW Time stamp * * Algorithm: @@ -174,15 +199,17 @@ static void mgbe_get_rx_hash(struct osi_rx_desc *rx_desc, * @retval -1 if TimeStamp is not available * @retval 0 if TimeStamp is available. */ -static int mgbe_get_rx_hwstamp(struct osi_dma_priv_data *osi_dma, - struct osi_rx_desc *rx_desc, - struct osi_rx_desc *context_desc, - struct osi_rx_pkt_cx *rx_pkt_cx) +static nve32_t mgbe_get_rx_hwstamp(const struct osi_dma_priv_data *const osi_dma, + const struct osi_rx_desc *const rx_desc, + const struct osi_rx_desc *const context_desc, + struct osi_rx_pkt_cx *rx_pkt_cx) { - int retry; + nve32_t ret = 0; + nve32_t retry; if ((rx_desc->rdes3 & RDES3_CDA) != RDES3_CDA) { - return -1; + ret = -1; + goto fail; } for (retry = 0; retry < 10; retry++) { @@ -193,7 +220,8 @@ static int mgbe_get_rx_hwstamp(struct osi_dma_priv_data *osi_dma, if ((context_desc->rdes0 == OSI_INVALID_VALUE) && (context_desc->rdes1 == OSI_INVALID_VALUE)) { /* Invalid time stamp */ - return -1; + ret = -1; + goto fail; } /* Update rx pkt context flags to indicate PTP */ rx_pkt_cx->flags |= OSI_PKT_CX_PTP; @@ -207,24 +235,27 @@ static int mgbe_get_rx_hwstamp(struct osi_dma_priv_data *osi_dma, if (retry == 10) { /* Timed out waiting for Rx timestamp */ - return -1; + ret = -1; + goto fail; } rx_pkt_cx->ns = context_desc->rdes0 + (OSI_NSEC_PER_SEC * context_desc->rdes1); if (rx_pkt_cx->ns < context_desc->rdes0) { - /* Will not hit this case */ - return -1; + ret = -1; } - return 0; +fail: + return ret; } -void mgbe_init_desc_ops(struct desc_ops *d_ops) +void mgbe_init_desc_ops(struct desc_ops *p_dops) { - d_ops->get_rx_csum = mgbe_get_rx_csum; - d_ops->update_rx_err_stats = mgbe_update_rx_err_stats; - d_ops->get_rx_vlan = mgbe_get_rx_vlan; - d_ops->get_rx_hash = mgbe_get_rx_hash; - d_ops->get_rx_hwstamp = mgbe_get_rx_hwstamp; +#ifndef OSI_STRIPPED_LIB + p_dops->update_rx_err_stats = mgbe_update_rx_err_stats; + p_dops->get_rx_vlan = mgbe_get_rx_vlan; + p_dops->get_rx_hash = mgbe_get_rx_hash; +#endif /* !OSI_STRIPPED_LIB */ + p_dops->get_rx_csum = mgbe_get_rx_csum; + p_dops->get_rx_hwstamp = mgbe_get_rx_hwstamp; } diff --git a/osi/dma/mgbe_desc.h b/osi/dma/mgbe_desc.h index 8b0d5d0..ae7bda1 100644 --- a/osi/dma/mgbe_desc.h +++ b/osi/dma/mgbe_desc.h @@ -23,6 +23,7 @@ #ifndef MGBE_DESC_H_ #define MGBE_DESC_H_ +#ifndef OSI_STRIPPED_LIB /** * @addtogroup MGBE MAC FRP Stats. * @@ -32,6 +33,20 @@ #define MGBE_RDES2_FRPSM OSI_BIT(10) #define MGBE_RDES3_FRPSL OSI_BIT(14) /** @} */ +#endif /* !OSI_STRIPPED_LIB */ + +/** + * @addtogroup MGBE RDESC bits. + * + * @brief Values defined for the MGBE rx descriptor bit fields + * @{ + */ + +#define MGBE_RDES3_PT_MASK (OSI_BIT(20) | OSI_BIT(21) | OSI_BIT(22) | OSI_BIT(23)) +#define MGBE_RDES3_PT_IPV4_TCP OSI_BIT(20) +#define MGBE_RDES3_PT_IPV4_UDP OSI_BIT(21) +#define MGBE_RDES3_PT_IPV6_TCP (OSI_BIT(20) | OSI_BIT(23)) +#define MGBE_RDES3_PT_IPV6_UDP (OSI_BIT(21) | OSI_BIT(23)) +/** @} */ #endif /* MGBE_DESC_H_ */ - diff --git a/osi/dma/mgbe_dma.c b/osi/dma/mgbe_dma.c index eab9f3b..997d49e 100644 --- a/osi/dma/mgbe_dma.c +++ b/osi/dma/mgbe_dma.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -20,664 +20,12 @@ * DEALINGS IN THE SOFTWARE. */ +#ifndef OSI_STRIPPED_LIB #include "../osi/common/common.h" #include #include "mgbe_dma.h" #include "dma_local.h" -/** - * @brief mgbe_disable_chan_tx_intr - Disables DMA Tx channel interrupts. - * - * @param[in] addr: Base address indicating the start of - * memory mapped IO region of the MAC. - * @param[in] chan: DMA Tx channel number. - * - * @note 1) MAC needs to be out of reset and proper clocks need to be configured - * 2) DMA HW init need to be completed successfully, see osi_hw_dma_init - * 3) Mapping of physical IRQ line to DMA channel need to be maintained at - * OSDependent layer and pass corresponding channel number. - */ -static void mgbe_disable_chan_tx_intr(void *addr, nveu32_t chan) -{ - nveu32_t cntrl; -#if 0 - MGBE_CHECK_CHAN_BOUND(chan); -#endif - cntrl = osi_readl((nveu8_t *)addr + - MGBE_VIRT_INTR_CHX_CNTRL(chan)); - cntrl &= ~MGBE_VIRT_INTR_CHX_CNTRL_TX; - osi_writel(cntrl, (nveu8_t *)addr + - MGBE_VIRT_INTR_CHX_CNTRL(chan)); -} - -/** - * @brief mgbe_enable_chan_tx_intr - Enable Tx channel interrupts. - * - * @param[in] addr: Base address indicating the start of - * memory mapped IO region of the MAC. - * @param[in] chan: DMA Tx channel number. - * - * @note 1) MAC needs to be out of reset and proper clocks need to be configured - * 2) DMA HW init need to be completed successfully, see osi_hw_dma_init - * 3) Mapping of physical IRQ line to DMA channel need to be maintained at - * OSDependent layer and pass corresponding channel number. - */ -static void mgbe_enable_chan_tx_intr(void *addr, nveu32_t chan) -{ - nveu32_t cntrl; -#if 0 - MGBE_CHECK_CHAN_BOUND(chan); -#endif - cntrl = osi_readl((nveu8_t *)addr + - MGBE_VIRT_INTR_CHX_CNTRL(chan)); - cntrl |= MGBE_VIRT_INTR_CHX_CNTRL_TX; - osi_writel(cntrl, (nveu8_t *)addr + - MGBE_VIRT_INTR_CHX_CNTRL(chan)); -} - -/** - * @brief mgbe_disable_chan_rx_intr - Disable Rx channel interrupts. - * - * @param[in] addr: Base address indicating the start of - * memory mapped IO region of the MAC. - * @param[in] chan: DMA Rx channel number. - * - * @note 1) MAC needs to be out of reset and proper clocks need to be configured - * 2) DMA HW init need to be completed successfully, see osi_hw_dma_init - * 3) Mapping of physical IRQ line to DMA channel need to be maintained at - * OSDependent layer and pass corresponding channel number. - */ -static void mgbe_disable_chan_rx_intr(void *addr, nveu32_t chan) -{ - nveu32_t cntrl; -#if 0 - MGBE_CHECK_CHAN_BOUND(chan); -#endif - cntrl = osi_readl((nveu8_t *)addr + - MGBE_VIRT_INTR_CHX_CNTRL(chan)); - cntrl &= ~MGBE_VIRT_INTR_CHX_CNTRL_RX; - osi_writel(cntrl, (nveu8_t *)addr + - MGBE_VIRT_INTR_CHX_CNTRL(chan)); -} - -/** - * @brief mgbe_enable_chan_rx_intr - Enable Rx channel interrupts. - * - * @param[in] addr: Base address indicating the start of - * memory mapped IO region of the MAC. - * @param[in] chan: DMA Rx channel number. - * - * @note 1) MAC needs to be out of reset and proper clocks need to be configured - * 2) DMA HW init need to be completed successfully, see osi_hw_dma_init - */ -static void mgbe_enable_chan_rx_intr(void *addr, nveu32_t chan) -{ - nveu32_t cntrl; -#if 0 - MGBE_CHECK_CHAN_BOUND(chan); -#endif - cntrl = osi_readl((nveu8_t *)addr + - MGBE_VIRT_INTR_CHX_CNTRL(chan)); - cntrl |= MGBE_VIRT_INTR_CHX_CNTRL_RX; - osi_writel(cntrl, (nveu8_t *)addr + - MGBE_VIRT_INTR_CHX_CNTRL(chan)); -} - -/** - * @brief mgbe_set_tx_ring_len - Set DMA Tx ring length. - * - * Algorithm: Set DMA Tx channel ring length for specific channel. - * - * @param[in] osi_dma: OSI DMA data structure. - * @param[in] chan: DMA Tx channel number. - * @param[in] len: Length. - */ -static void mgbe_set_tx_ring_len(struct osi_dma_priv_data *osi_dma, - nveu32_t chan, - nveu32_t len) -{ - void *addr = osi_dma->base; - nveu32_t value; -#if 0 - MGBE_CHECK_CHAN_BOUND(chan); -#endif - value = osi_readl((nveu8_t *)addr + MGBE_DMA_CHX_TX_CNTRL2(chan)); - value |= (len & MGBE_DMA_RING_LENGTH_MASK); - osi_writel(value, (nveu8_t *)addr + MGBE_DMA_CHX_TX_CNTRL2(chan)); -} - -/** - * @brief mgbe_set_tx_ring_start_addr - Set DMA Tx ring base address. - * - * Algorithm: Sets DMA Tx ring base address for specific channel. - * - * @param[in] addr: Base address indicating the start of - * memory mapped IO region of the MAC. - * @param[in] chan: DMA Tx channel number. - * @param[in] tx_desc: Tx desc base addess. - */ -static void mgbe_set_tx_ring_start_addr(void *addr, nveu32_t chan, - nveu64_t tx_desc) -{ - nveu64_t temp; -#if 0 - MGBE_CHECK_CHAN_BOUND(chan); -#endif - temp = H32(tx_desc); - if (temp < UINT_MAX) { - osi_writel((nveu32_t)temp, (nveu8_t *)addr + - MGBE_DMA_CHX_TDLH(chan)); - } - - temp = L32(tx_desc); - if (temp < UINT_MAX) { - osi_writel((nveu32_t)temp, (nveu8_t *)addr + - MGBE_DMA_CHX_TDLA(chan)); - } -} - -/** - * @brief mgbe_update_tx_tailptr - Updates DMA Tx ring tail pointer. - * - * Algorithm: Updates DMA Tx ring tail pointer for specific channel. - * - * @param[in] addr: Base address indicating the start of - * memory mapped IO region of the MAC. - * @param[in] chan: DMA Tx channel number. - * @param[in] tailptr: DMA Tx ring tail pointer. - * - * - * @note 1) MAC needs to be out of reset and proper clocks need to be configured - * 2) DMA HW init need to be completed successfully, see osi_hw_dma_init - */ -static void mgbe_update_tx_tailptr(void *addr, nveu32_t chan, - nveu64_t tailptr) -{ - nveu64_t temp; -#if 0 - MGBE_CHECK_CHAN_BOUND(chan); -#endif - temp = L32(tailptr); - if (temp < UINT_MAX) { - osi_writel((nveu32_t)temp, (nveu8_t *)addr + - MGBE_DMA_CHX_TDTLP(chan)); - } -} - -/** - * @brief mgbe_set_rx_ring_len - Set Rx channel ring length. - * - * Algorithm: Sets DMA Rx channel ring length for specific DMA channel. - * - * @param[in] osi_dma: OSI DMA data structure. - * @param[in] chan: DMA Rx channel number. - * @param[in] len: Length - */ -static void mgbe_set_rx_ring_len(struct osi_dma_priv_data *osi_dma, - nveu32_t chan, - nveu32_t len) -{ - void *addr = osi_dma->base; - nveu32_t value; -#if 0 - MGBE_CHECK_CHAN_BOUND(chan); -#endif - value = osi_readl((nveu8_t *)addr + MGBE_DMA_CHX_RX_CNTRL2(chan)); - value |= (len & MGBE_DMA_RING_LENGTH_MASK); - osi_writel(value, (nveu8_t *)addr + MGBE_DMA_CHX_RX_CNTRL2(chan)); -} - -/** - * @brief mgbe_set_rx_ring_start_addr - Set DMA Rx ring base address. - * - * Algorithm: Sets DMA Rx channel ring base address. - * - * @param[in] addr: Base address indicating the start of - * memory mapped IO region of the MAC. - * @param[in] chan: DMA Rx channel number. - * @param[in] tx_desc: DMA Rx desc base address. - */ -static void mgbe_set_rx_ring_start_addr(void *addr, nveu32_t chan, - nveu64_t tx_desc) -{ - nveu64_t temp; -#if 0 - MGBE_CHECK_CHAN_BOUND(chan); -#endif - temp = H32(tx_desc); - if (temp < UINT_MAX) { - osi_writel((nveu32_t)temp, (nveu8_t *)addr + - MGBE_DMA_CHX_RDLH(chan)); - } - - temp = L32(tx_desc); - if (temp < UINT_MAX) { - osi_writel((nveu32_t)temp, (nveu8_t *)addr + - MGBE_DMA_CHX_RDLA(chan)); - } -} - -/** - * @brief mgbe_update_rx_tailptr - Update Rx ring tail pointer - * - * Algorithm: Updates DMA Rx channel tail pointer for specific channel. - * - * @param[in] addr: Base address indicating the start of - * memory mapped IO region of the MAC. - * @param[in] chan: DMA Rx channel number. - * @param[in] tailptr: Tail pointer - * - * @note 1) MAC needs to be out of reset and proper clocks need to be configured - * 2) DMA HW init need to be completed successfully, see osi_hw_dma_init - */ -static void mgbe_update_rx_tailptr(void *addr, nveu32_t chan, - nveu64_t tailptr) -{ - nveu64_t temp; -#if 0 - MGBE_CHECK_CHAN_BOUND(chan); -#endif - temp = H32(tailptr); - if (temp < UINT_MAX) { - osi_writel((nveu32_t)temp, (nveu8_t *)addr + - MGBE_DMA_CHX_RDTHP(chan)); - } - - temp = L32(tailptr); - if (temp < UINT_MAX) { - osi_writel((nveu32_t)temp, (nveu8_t *)addr + - MGBE_DMA_CHX_RDTLP(chan)); - } -} - -/** - * @brief mgbe_start_dma - Start DMA. - * - * Algorithm: Start Tx and Rx DMA for specific channel. - * - * @param[in] osi_dma: OSI DMA private data structure. - * @param[in] chan: DMA Tx/Rx channel number. - * - * @note 1) MAC needs to be out of reset and proper clocks need to be configured - * 2) DMA HW init need to be completed successfully, see osi_hw_dma_init - */ -static void mgbe_start_dma(struct osi_dma_priv_data *osi_dma, nveu32_t chan) -{ - nveu32_t val; - void *addr = osi_dma->base; -#if 0 - MGBE_CHECK_CHAN_BOUND(chan); -#endif - /* start Tx DMA */ - val = osi_readl((nveu8_t *)addr + MGBE_DMA_CHX_TX_CTRL(chan)); - val |= OSI_BIT(0); - osi_writel(val, (nveu8_t *)addr + MGBE_DMA_CHX_TX_CTRL(chan)); - - /* start Rx DMA */ - val = osi_readl((nveu8_t *)addr + MGBE_DMA_CHX_RX_CTRL(chan)); - val |= OSI_BIT(0); - val &= ~OSI_BIT(31); - osi_writel(val, (nveu8_t *)addr + MGBE_DMA_CHX_RX_CTRL(chan)); -} - -/** - * @brief mgbe_stop_dma - Stop DMA. - * - * Algorithm: Start Tx and Rx DMA for specific channel. - * - * @param[in] osi_dma: OSI DMA private data structure. - * @param[in] chan: DMA Tx/Rx channel number. - * - * @note 1) MAC needs to be out of reset and proper clocks need to be configured - * 2) DMA HW init need to be completed successfully, see osi_hw_dma_init - */ -static void mgbe_stop_dma(struct osi_dma_priv_data *osi_dma, nveu32_t chan) -{ - nveu32_t val; - void *addr = osi_dma->base; -#if 0 - MGBE_CHECK_CHAN_BOUND(chan); -#endif - /* stop Tx DMA */ - val = osi_readl((nveu8_t *)addr + MGBE_DMA_CHX_TX_CTRL(chan)); - val &= ~OSI_BIT(0); - osi_writel(val, (nveu8_t *)addr + MGBE_DMA_CHX_TX_CTRL(chan)); - - /* stop Rx DMA */ - val = osi_readl((nveu8_t *)addr + MGBE_DMA_CHX_RX_CTRL(chan)); - val &= ~OSI_BIT(0); - val |= OSI_BIT(31); - osi_writel(val, (nveu8_t *)addr + MGBE_DMA_CHX_RX_CTRL(chan)); -} - -/** - * @brief mgbe_configure_dma_channel - Configure DMA channel - * - * Algorithm: This takes care of configuring the below - * parameters for the DMA channel - * 1) Enabling DMA channel interrupts - * 2) Enable 8xPBL mode - * 3) Program Tx, Rx PBL - * 4) Enable TSO if HW supports - * 5) Program Rx Watchdog timer - * 6) Program Out Standing DMA Read Requests - * 7) Program Out Standing DMA write Requests - * - * @param[in] chan: DMA channel number that need to be configured. - * @param[in] owrq: out standing write dma requests - * @param[in] orrq: out standing read dma requests - * @param[in] osi_dma: OSI DMA private data structure. - * - * @note MAC has to be out of reset. - */ -static void mgbe_configure_dma_channel(nveu32_t chan, - nveu32_t owrq, - nveu32_t orrq, - struct osi_dma_priv_data *osi_dma) -{ - nveu32_t value; - nveu32_t txpbl; - nveu32_t rxpbl; -#if 0 - MGBE_CHECK_CHAN_BOUND(chan); -#endif - /* enable DMA channel interrupts */ - /* Enable TIE and TBUE */ - /* TIE - Transmit Interrupt Enable */ - /* TBUE - Transmit Buffer Unavailable Enable */ - /* RIE - Receive Interrupt Enable */ - /* RBUE - Receive Buffer Unavailable Enable */ - /* AIE - Abnormal Interrupt Summary Enable */ - /* NIE - Normal Interrupt Summary Enable */ - /* FBE - Fatal Bus Error Enable */ - value = osi_readl((nveu8_t *)osi_dma->base + - MGBE_DMA_CHX_INTR_ENA(chan)); - value |= MGBE_DMA_CHX_INTR_TIE | MGBE_DMA_CHX_INTR_TBUE | - MGBE_DMA_CHX_INTR_RIE | MGBE_DMA_CHX_INTR_RBUE | - MGBE_DMA_CHX_INTR_FBEE | MGBE_DMA_CHX_INTR_AIE | - MGBE_DMA_CHX_INTR_NIE; - - /* For multi-irqs to work nie needs to be disabled */ - /* TODO: do we need this ? */ - value &= ~(MGBE_DMA_CHX_INTR_NIE); - osi_writel(value, (nveu8_t *)osi_dma->base + - MGBE_DMA_CHX_INTR_ENA(chan)); - - /* Enable 8xPBL mode */ - value = osi_readl((nveu8_t *)osi_dma->base + - MGBE_DMA_CHX_CTRL(chan)); - value |= MGBE_DMA_CHX_CTRL_PBLX8; - osi_writel(value, (nveu8_t *)osi_dma->base + - MGBE_DMA_CHX_CTRL(chan)); - - /* Configure DMA channel Transmit control register */ - value = osi_readl((nveu8_t *)osi_dma->base + - MGBE_DMA_CHX_TX_CTRL(chan)); - /* Enable OSF mode */ - value |= MGBE_DMA_CHX_TX_CTRL_OSP; - - /* - * Formula for TxPBL calculation is - * (TxPBL) < ((TXQSize - MTU)/(DATAWIDTH/8)) - 5 - * if TxPBL exceeds the value of 256 then we need to make use of 256 - * as the TxPBL else we should be using the value whcih we get after - * calculation by using above formula - */ - if (osi_dma->pre_si == OSI_ENABLE) { - txpbl = ((((MGBE_TXQ_RXQ_SIZE_FPGA / osi_dma->num_dma_chans) - - osi_dma->mtu) / (MGBE_AXI_DATAWIDTH / 8U)) - 5U); - } else { - txpbl = ((((MGBE_TXQ_SIZE / osi_dma->num_dma_chans) - - osi_dma->mtu) / (MGBE_AXI_DATAWIDTH / 8U)) - 5U); - } - - /* Since PBLx8 is set, so txpbl/8 will be the value that - * need to be programmed - */ - if (txpbl >= MGBE_DMA_CHX_MAX_PBL) { - value |= ((MGBE_DMA_CHX_MAX_PBL / 8U) << - MGBE_DMA_CHX_CTRL_PBL_SHIFT); - } else { - value |= ((txpbl / 8U) << MGBE_DMA_CHX_CTRL_PBL_SHIFT); - } - - /* enable TSO by default if HW supports */ - value |= MGBE_DMA_CHX_TX_CTRL_TSE; - - osi_writel(value, (nveu8_t *)osi_dma->base + - MGBE_DMA_CHX_TX_CTRL(chan)); - - /* Configure DMA channel Receive control register */ - /* Select Rx Buffer size. Needs to be rounded up to next multiple of - * bus width - */ - value = osi_readl((nveu8_t *)osi_dma->base + - MGBE_DMA_CHX_RX_CTRL(chan)); - - /* clear previous Rx buffer size */ - value &= ~MGBE_DMA_CHX_RBSZ_MASK; - value |= (osi_dma->rx_buf_len << MGBE_DMA_CHX_RBSZ_SHIFT); - /* RxPBL calculation is - * RxPBL <= Rx Queue Size/2 - */ - if (osi_dma->pre_si == OSI_ENABLE) { - rxpbl = (((MGBE_TXQ_RXQ_SIZE_FPGA / osi_dma->num_dma_chans) / - 2U) << MGBE_DMA_CHX_CTRL_PBL_SHIFT); - } else { - rxpbl = (((MGBE_RXQ_SIZE / osi_dma->num_dma_chans) / 2U) << - MGBE_DMA_CHX_CTRL_PBL_SHIFT); - } - /* Since PBLx8 is set, so rxpbl/8 will be the value that - * need to be programmed - */ - if (rxpbl >= MGBE_DMA_CHX_MAX_PBL) { - value |= ((MGBE_DMA_CHX_MAX_PBL / 8) << - MGBE_DMA_CHX_CTRL_PBL_SHIFT); - } else { - value |= ((rxpbl / 8) << MGBE_DMA_CHX_CTRL_PBL_SHIFT); - } - - osi_writel(value, (nveu8_t *)osi_dma->base + - MGBE_DMA_CHX_RX_CTRL(chan)); - - /* Set Receive Interrupt Watchdog Timer Count */ - /* conversion of usec to RWIT value - * Eg:System clock is 62.5MHz, each clock cycle would then be 16ns - * For value 0x1 in watchdog timer,device would wait for 256 clk cycles, - * ie, (16ns x 256) => 4.096us (rounding off to 4us) - * So formula with above values is,ret = usec/4 - */ - /* NOTE: Bug 3287883: If RWTU value programmed then driver needs - * to follow below order - - * 1. First write RWT field with non-zero value. - * 2. Program RWTU field of register - * DMA_CH(#i)_Rx_Interrupt_Watchdog_Time. - */ - if ((osi_dma->use_riwt == OSI_ENABLE) && - (osi_dma->rx_riwt < UINT_MAX)) { - value = osi_readl((nveu8_t *)osi_dma->base + - MGBE_DMA_CHX_RX_WDT(chan)); - /* Mask the RWT value */ - value &= ~MGBE_DMA_CHX_RX_WDT_RWT_MASK; - /* Conversion of usec to Rx Interrupt Watchdog Timer Count */ - /* TODO: Need to fix AXI clock for silicon */ - value |= ((osi_dma->rx_riwt * - ((nveu32_t)MGBE_AXI_CLK_FREQ / OSI_ONE_MEGA_HZ)) / - MGBE_DMA_CHX_RX_WDT_RWTU) & - MGBE_DMA_CHX_RX_WDT_RWT_MASK; - osi_writel(value, (nveu8_t *)osi_dma->base + - MGBE_DMA_CHX_RX_WDT(chan)); - - value = osi_readl((nveu8_t *)osi_dma->base + - MGBE_DMA_CHX_RX_WDT(chan)); - value &= ~(MGBE_DMA_CHX_RX_WDT_RWTU_MASK << - MGBE_DMA_CHX_RX_WDT_RWTU_SHIFT); - value |= (MGBE_DMA_CHX_RX_WDT_RWTU_2048_CYCLE << - MGBE_DMA_CHX_RX_WDT_RWTU_SHIFT); - osi_writel(value, (nveu8_t *)osi_dma->base + - MGBE_DMA_CHX_RX_WDT(chan)); - } - - /* Update ORRQ in DMA_CH(#i)_Tx_Control2 register */ - value = osi_readl((nveu8_t *)osi_dma->base + - MGBE_DMA_CHX_TX_CNTRL2(chan)); - value |= (orrq << MGBE_DMA_CHX_TX_CNTRL2_ORRQ_SHIFT); - osi_writel(value, (nveu8_t *)osi_dma->base + - MGBE_DMA_CHX_TX_CNTRL2(chan)); - - /* Update OWRQ in DMA_CH(#i)_Rx_Control2 register */ - value = osi_readl((nveu8_t *)osi_dma->base + - MGBE_DMA_CHX_RX_CNTRL2(chan)); - value |= (owrq << MGBE_DMA_CHX_RX_CNTRL2_OWRQ_SHIFT); - osi_writel(value, (nveu8_t *)osi_dma->base + - MGBE_DMA_CHX_RX_CNTRL2(chan)); -} - -/** - * @brief mgbe_init_dma_channel - DMA channel INIT - * - * @param[in] osi_dma: OSI DMA private data structure. - */ -static nve32_t mgbe_init_dma_channel(struct osi_dma_priv_data *osi_dma) -{ - nveu32_t chinx; - nveu32_t owrq; - nveu32_t orrq; - - /* DMA Read Out Standing Requests */ - /* For Presi ORRQ is 16 in case of schannel and 64 in case of mchannel. - * For Si ORRQ is 64 in case of single and multi channel - */ - orrq = (MGBE_DMA_CHX_TX_CNTRL2_ORRQ_RECOMMENDED / - osi_dma->num_dma_chans); - if ((osi_dma->num_dma_chans == 1U) && (osi_dma->pre_si == OSI_ENABLE)) { - /* For Presi ORRQ is 16 in a single channel configuration - * so overwrite only for this configuration - */ - orrq = MGBE_DMA_CHX_RX_CNTRL2_ORRQ_SCHAN_PRESI; - } - - /* DMA Write Out Standing Requests */ - /* For Presi OWRQ is 8 and for Si it is 32 in case of single channel. - * For Multi Channel OWRQ is 64 for both si and presi - */ - if (osi_dma->num_dma_chans == 1U) { - if (osi_dma->pre_si == OSI_ENABLE) { - owrq = MGBE_DMA_CHX_RX_CNTRL2_OWRQ_SCHAN_PRESI; - } else { - owrq = MGBE_DMA_CHX_RX_CNTRL2_OWRQ_SCHAN; - } - } else { - owrq = (MGBE_DMA_CHX_RX_CNTRL2_OWRQ_MCHAN / - osi_dma->num_dma_chans); - } - - /* configure MGBE DMA channels */ - for (chinx = 0; chinx < osi_dma->num_dma_chans; chinx++) { - mgbe_configure_dma_channel(osi_dma->dma_chans[chinx], - owrq, orrq, osi_dma); - } - - return 0; -} - -/** - * @brief mgbe_set_rx_buf_len - Set Rx buffer length - * Sets the Rx buffer length based on the new MTU size set. - * - * @param[in] osi_dma: OSI DMA private data structure. - * - * @note 1) MAC needs to be out of reset and proper clocks need to be configured - * 2) DMA HW init need to be completed successfully, see osi_hw_dma_init - * 3) osi_dma->mtu need to be filled with current MTU size <= 9K - */ -static void mgbe_set_rx_buf_len(struct osi_dma_priv_data *osi_dma) -{ - nveu32_t rx_buf_len; - - /* Add Ethernet header + FCS + NET IP align size to MTU */ - rx_buf_len = osi_dma->mtu + OSI_ETH_HLEN + - NV_VLAN_HLEN + OSI_NET_IP_ALIGN; - /* Buffer alignment */ - osi_dma->rx_buf_len = ((rx_buf_len + (MGBE_AXI_BUS_WIDTH - 1U)) & - ~(MGBE_AXI_BUS_WIDTH - 1U)); -} - -/** - * @brief Read-validate HW registers for functional safety. - * - * @note - * Algorithm: - * - Reads pre-configured list of MAC/MTL configuration registers - * and compares with last written value for any modifications. - * - * @param[in] osi_dma: OSI DMA private data structure. - * - * @pre - * - MAC has to be out of reset. - * - osi_hw_dma_init has to be called. Internally this would initialize - * the safety_config (see osi_dma_priv_data) based on MAC version and - * which specific registers needs to be validated periodically. - * - Invoke this call if (osi_dma_priv_data->safety_config != OSI_NULL) - * - * @note - * API Group: - * - Initialization: No - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -static nve32_t mgbe_validate_dma_regs(OSI_UNUSED - struct osi_dma_priv_data *osi_dma) -{ - /* TODO: for mgbe */ - return 0; -} - -/** - * @brief mgbe_clear_vm_tx_intr - Clear VM Tx interrupt - * - * Algorithm: Clear Tx interrupt source at DMA and wrapper level. - * - * @param[in] addr: MAC base address. - * @param[in] chan: DMA Tx channel number. - */ -static void mgbe_clear_vm_tx_intr(void *addr, nveu32_t chan) -{ -#if 0 - MGBE_CHECK_CHAN_BOUND(chan); -#endif - osi_writel(MGBE_DMA_CHX_STATUS_CLEAR_TX, - (nveu8_t *)addr + MGBE_DMA_CHX_STATUS(chan)); - osi_writel(MGBE_VIRT_INTR_CHX_STATUS_TX, - (nveu8_t *)addr + MGBE_VIRT_INTR_CHX_STATUS(chan)); - - mgbe_disable_chan_tx_intr(addr, chan); -} - -/** - * @brief mgbe_clear_vm_rx_intr - Clear VM Rx interrupt - * - * @param[in] addr: MAC base address. - * @param[in] chan: DMA Tx channel number. - * - * Algorithm: Clear Rx interrupt source at DMA and wrapper level. - */ -static void mgbe_clear_vm_rx_intr(void *addr, nveu32_t chan) -{ -#if 0 - MGBE_CHECK_CHAN_BOUND(chan); -#endif - osi_writel(MGBE_DMA_CHX_STATUS_CLEAR_RX, - (nveu8_t *)addr + MGBE_DMA_CHX_STATUS(chan)); - osi_writel(MGBE_VIRT_INTR_CHX_STATUS_RX, - (nveu8_t *)addr + MGBE_VIRT_INTR_CHX_STATUS(chan)); - - mgbe_disable_chan_rx_intr(addr, chan); -} - /** * @brief mgbe_config_slot - Configure slot Checking for DMA channel * @@ -720,24 +68,60 @@ static void mgbe_config_slot(struct osi_dma_priv_data *osi_dma, } } +#ifdef OSI_DEBUG +/** + * @brief Enable/disable debug interrupt + * + * @param[in] osi_dma: OSI DMA private data structure. + * + * Algorithm: + * - if osi_dma->ioctl_data.arg_u32 == OSI_ENABLE enable debug interrupt + * - else disable bebug inerrupts + */ +static void mgbe_debug_intr_config(struct osi_dma_priv_data *osi_dma) +{ + nveu32_t chinx; + nveu32_t chan; + nveu32_t val; + nveu32_t enable = osi_dma->ioctl_data.arg_u32; + + if (enable == OSI_ENABLE) { + for (chinx = 0; chinx < osi_dma->num_dma_chans; chinx++) { + chan = osi_dma->dma_chans[chinx]; + val = osi_readl((nveu8_t *)osi_dma->base + + MGBE_DMA_CHX_INTR_ENA(chan)); + + val |= (MGBE_DMA_CHX_INTR_AIE | + MGBE_DMA_CHX_INTR_FBEE | + MGBE_DMA_CHX_INTR_RBUE | + MGBE_DMA_CHX_INTR_TBUE | + MGBE_DMA_CHX_INTR_NIE); + osi_writel(val, (nveu8_t *)osi_dma->base + + MGBE_DMA_CHX_INTR_ENA(chan)); + } + + } else { + for (chinx = 0; chinx < osi_dma->num_dma_chans; chinx++) { + chan = osi_dma->dma_chans[chinx]; + val = osi_readl((nveu8_t *)osi_dma->base + + MGBE_DMA_CHX_INTR_ENA(chan)); + val &= (~MGBE_DMA_CHX_INTR_AIE & + ~MGBE_DMA_CHX_INTR_FBEE & + ~MGBE_DMA_CHX_INTR_RBUE & + ~MGBE_DMA_CHX_INTR_TBUE & + ~MGBE_DMA_CHX_INTR_NIE); + osi_writel(val, (nveu8_t *)osi_dma->base + + MGBE_DMA_CHX_INTR_ENA(chan)); + } + } +} +#endif + void mgbe_init_dma_chan_ops(struct dma_chan_ops *ops) { - ops->set_tx_ring_len = mgbe_set_tx_ring_len; - ops->set_rx_ring_len = mgbe_set_rx_ring_len; - ops->set_tx_ring_start_addr = mgbe_set_tx_ring_start_addr; - ops->set_rx_ring_start_addr = mgbe_set_rx_ring_start_addr; - ops->update_tx_tailptr = mgbe_update_tx_tailptr; - ops->update_rx_tailptr = mgbe_update_rx_tailptr; - ops->disable_chan_tx_intr = mgbe_disable_chan_tx_intr; - ops->enable_chan_tx_intr = mgbe_enable_chan_tx_intr; - ops->disable_chan_rx_intr = mgbe_disable_chan_rx_intr; - ops->enable_chan_rx_intr = mgbe_enable_chan_rx_intr; - ops->start_dma = mgbe_start_dma; - ops->stop_dma = mgbe_stop_dma; - ops->init_dma_channel = mgbe_init_dma_channel; - ops->set_rx_buf_len = mgbe_set_rx_buf_len; - ops->validate_regs = mgbe_validate_dma_regs; - ops->clear_vm_tx_intr = mgbe_clear_vm_tx_intr; - ops->clear_vm_rx_intr = mgbe_clear_vm_rx_intr; ops->config_slot = mgbe_config_slot; +#ifdef OSI_DEBUG + ops->debug_intr_config = mgbe_debug_intr_config; +#endif }; +#endif /* !OSI_STRIPPED_LIB */ diff --git a/osi/dma/mgbe_dma.h b/osi/dma/mgbe_dma.h index 9321501..89032ca 100644 --- a/osi/dma/mgbe_dma.h +++ b/osi/dma/mgbe_dma.h @@ -32,17 +32,6 @@ #define MGBE_AXI_CLK_FREQ 480000000U /** @} */ -/** - * @@addtogroup Timestamp Capture Register - * @brief MGBE MAC Timestamp Register offset - * @{ - */ -#define MGBE_MAC_TSS 0X0D20 -#define MGBE_MAC_TS_NSEC 0x0D30 -#define MGBE_MAC_TS_SEC 0x0D34 -#define MGBE_MAC_TS_PID 0x0D38 -/** @} */ - /** * @addtogroup MGBE_DMA DMA Channel Register offsets * @@ -51,7 +40,9 @@ */ #define MGBE_DMA_CHX_TX_CTRL(x) ((0x0080U * (x)) + 0x3104U) #define MGBE_DMA_CHX_RX_CTRL(x) ((0x0080U * (x)) + 0x3108U) +#ifndef OSI_STRIPPED_LIB #define MGBE_DMA_CHX_SLOT_CTRL(x) ((0x0080U * (x)) + 0x310CU) +#endif /* !OSI_STRIPPED_LIB */ #define MGBE_DMA_CHX_INTR_ENA(x) ((0x0080U * (x)) + 0x3138U) #define MGBE_DMA_CHX_CTRL(x) ((0x0080U * (x)) + 0x3100U) #define MGBE_DMA_CHX_RX_WDT(x) ((0x0080U * (x)) + 0x313CU) @@ -60,22 +51,11 @@ #define MGBE_DMA_CHX_TDLH(x) ((0x0080U * (x)) + 0x3110U) #define MGBE_DMA_CHX_TDLA(x) ((0x0080U * (x)) + 0x3114U) #define MGBE_DMA_CHX_TDTLP(x) ((0x0080U * (x)) + 0x3124U) -#define MGBE_DMA_CHX_TDTHP(x) ((0x0080U * (x)) + 0x3120U) #define MGBE_DMA_CHX_RDLH(x) ((0x0080U * (x)) + 0x3118U) #define MGBE_DMA_CHX_RDLA(x) ((0x0080U * (x)) + 0x311CU) -#define MGBE_DMA_CHX_RDTHP(x) ((0x0080U * (x)) + 0x3128U) #define MGBE_DMA_CHX_RDTLP(x) ((0x0080U * (x)) + 0x312CU) /** @} */ -/** - * @addtogroup MGBE_INTR INT Channel Register offsets - * - * @brief MGBE Virtural Interrupt Channel register offsets - * @{ - */ -#define MGBE_VIRT_INTR_CHX_STATUS(x) (0x8604U + ((x) * 8U)) -#define MGBE_VIRT_INTR_CHX_CNTRL(x) (0x8600U + ((x) * 8U)) -#define MGBE_VIRT_INTR_APB_CHX_CNTRL(x) (0x8200U + ((x) * 4U)) /** @} */ /** @@ -84,44 +64,25 @@ * @brief Values defined for the MGBE registers * @{ */ -#define MGBE_DMA_CHX_TX_CTRL_OSP OSI_BIT(4) -#define MGBE_DMA_CHX_TX_CTRL_TSE OSI_BIT(12) #define MGBE_DMA_CHX_RX_WDT_RWT_MASK 0xFFU #define MGBE_DMA_CHX_RX_WDT_RWTU 2048U -#define MGBE_DMA_CHX_RX_WDT_RWTU_2048_CYCLE 3U -#define MGBE_DMA_CHX_RX_WDT_RWTU_MASK 3U -#define MGBE_DMA_CHX_RX_WDT_RWTU_SHIFT 12U -#define MGBE_DMA_CHX_RBSZ_MASK 0x7FFEU -#define MGBE_DMA_CHX_RBSZ_SHIFT 1U -#define MGBE_AXI_BUS_WIDTH 0x10U -#define MGBE_DMA_CHX_CTRL_PBLX8 OSI_BIT(16) -#define MGBE_DMA_CHX_INTR_TIE OSI_BIT(0) +#define MGBE_DMA_CHX_RX_WDT_RWTU_2048_CYCLE 0x3000U +#define MGBE_DMA_CHX_RX_WDT_RWTU_MASK 0x3000U +#ifdef OSI_DEBUG #define MGBE_DMA_CHX_INTR_TBUE OSI_BIT(2) -#define MGBE_DMA_CHX_INTR_RIE OSI_BIT(6) #define MGBE_DMA_CHX_INTR_RBUE OSI_BIT(7) #define MGBE_DMA_CHX_INTR_FBEE OSI_BIT(12) #define MGBE_DMA_CHX_INTR_AIE OSI_BIT(14) #define MGBE_DMA_CHX_INTR_NIE OSI_BIT(15) -#define MGBE_DMA_CHX_STATUS_TI OSI_BIT(0) -#define MGBE_DMA_CHX_STATUS_RI OSI_BIT(6) -#define MGBE_DMA_CHX_STATUS_NIS OSI_BIT(15) +#endif +#ifndef OSI_STRIPPED_LIB #define MGBE_DMA_CHX_SLOT_ESC OSI_BIT(0) -#define MGBE_DMA_CHX_STATUS_CLEAR_TX (MGBE_DMA_CHX_STATUS_TI | \ - MGBE_DMA_CHX_STATUS_NIS) -#define MGBE_DMA_CHX_STATUS_CLEAR_RX (MGBE_DMA_CHX_STATUS_RI | \ - MGBE_DMA_CHX_STATUS_NIS) -#define MGBE_VIRT_INTR_CHX_STATUS_TX OSI_BIT(0) -#define MGBE_VIRT_INTR_CHX_STATUS_RX OSI_BIT(1) -#define MGBE_VIRT_INTR_CHX_CNTRL_TX OSI_BIT(0) -#define MGBE_VIRT_INTR_CHX_CNTRL_RX OSI_BIT(1) +#endif /* !OSI_STRIPPED_LIB */ #define MGBE_DMA_CHX_TX_CNTRL2_ORRQ_RECOMMENDED 64U #define MGBE_DMA_CHX_TX_CNTRL2_ORRQ_SHIFT 24U #define MGBE_DMA_CHX_RX_CNTRL2_OWRQ_SCHAN 32U #define MGBE_DMA_CHX_RX_CNTRL2_OWRQ_MCHAN 64U #define MGBE_DMA_CHX_RX_CNTRL2_OWRQ_SHIFT 24U -#define MGBE_DMA_CHX_RX_CNTRL2_OWRQ_SCHAN_PRESI 8U -#define MGBE_DMA_CHX_RX_CNTRL2_ORRQ_SCHAN_PRESI 16U -#define MGBE_DMA_RING_LENGTH_MASK 0xFFFFU #define MGBE_DMA_CHX_CTRL_PBL_SHIFT 16U /** @} */ @@ -131,35 +92,14 @@ * @brief Values defined for PBL settings * @{ */ -/* Tx and Rx Qsize is 64KB */ -#define MGBE_TXQ_RXQ_SIZE_FPGA 65536U /* Tx Queue size is 128KB */ #define MGBE_TXQ_SIZE 131072U /* Rx Queue size is 192KB */ #define MGBE_RXQ_SIZE 196608U /* MAX PBL value */ #define MGBE_DMA_CHX_MAX_PBL 256U +#define MGBE_DMA_CHX_MAX_PBL_VAL 0x200000U /* AXI Data width */ #define MGBE_AXI_DATAWIDTH 128U /** @} */ - -/** - * @addtogroup MGBE MAC timestamp registers bit field. - * - * @brief Values defined for the MGBE timestamp registers - * @{ - */ -#define MGBE_MAC_TSS_TXTSC OSI_BIT(15) -#define MGBE_MAC_TS_PID_MASK 0x3FFU -#define MGBE_MAC_TS_NSEC_MASK 0x7FFFFFFFU -/** @} */ - -/** - * @brief mgbe_get_dma_chan_ops - MGBE get DMA channel operations - * - * Algorithm: Returns pointer DMA channel operations structure. - * - * @returns Pointer to DMA channel operations structure - */ -struct osi_dma_chan_ops *mgbe_get_dma_chan_ops(void); #endif diff --git a/osi/dma/osi_dma.c b/osi/dma/osi_dma.c index 6d7e16f..be19708 100644 --- a/osi/dma/osi_dma.c +++ b/osi/dma/osi_dma.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -32,15 +32,92 @@ /** * @brief g_dma - DMA local data array. */ -static struct dma_local g_dma[MAX_DMA_INSTANCES]; /** * @brief g_ops - local DMA HW operations array. */ -static struct dma_chan_ops g_ops[MAX_MAC_IP_TYPES]; + +typedef nve32_t (*dma_intr_fn)(struct osi_dma_priv_data const *osi_dma, + nveu32_t intr_ctrl, nveu32_t intr_status, + nveu32_t dma_status, nveu32_t val); +static inline nve32_t enable_intr(struct osi_dma_priv_data const *osi_dma, + nveu32_t intr_ctrl, nveu32_t intr_status, + nveu32_t dma_status, nveu32_t val); +static inline nve32_t disable_intr(struct osi_dma_priv_data const *osi_dma, + nveu32_t intr_ctrl, nveu32_t intr_status, + nveu32_t dma_status, nveu32_t val); +static dma_intr_fn intr_fn[2] = { disable_intr, enable_intr }; + +static inline nveu32_t set_pos_val(nveu32_t val, nveu32_t pos_val) +{ + return (val | pos_val); +} + +static inline nveu32_t clear_pos_val(nveu32_t val, nveu32_t pos_val) +{ + return (val & ~pos_val); +} + +static inline nve32_t intr_en_dis_retry(nveu8_t *base, nveu32_t intr_ctrl, + nveu32_t val, nveu32_t en_dis) +{ + typedef nveu32_t (*set_clear)(nveu32_t val, nveu32_t pos); + const set_clear set_clr[2] = { clear_pos_val, set_pos_val }; + nveu32_t cntrl1, cntrl2, i; + nve32_t ret = -1; + + for (i = 0U; i < 10U; i++) { + cntrl1 = osi_readl(base + intr_ctrl); + cntrl1 = set_clr[en_dis](cntrl1, val); + osi_writel(cntrl1, base + intr_ctrl); + + cntrl2 = osi_readl(base + intr_ctrl); + if (cntrl1 == cntrl2) { + ret = 0; + break; + } else { + continue; + } + } + + return ret; +} + +static inline nve32_t enable_intr(struct osi_dma_priv_data const *osi_dma, + nveu32_t intr_ctrl, OSI_UNUSED nveu32_t intr_status, + OSI_UNUSED nveu32_t dma_status, nveu32_t val) +{ + return intr_en_dis_retry((nveu8_t *)osi_dma->base, intr_ctrl, + val, OSI_DMA_INTR_ENABLE); +} + +static inline nve32_t disable_intr(struct osi_dma_priv_data const *osi_dma, + nveu32_t intr_ctrl, nveu32_t intr_status, + nveu32_t dma_status, nveu32_t val) +{ + nveu8_t *base = (nveu8_t *)osi_dma->base; + const nveu32_t status_val[4] = { + 0, + EQOS_DMA_CHX_STATUS_CLEAR_TX, + EQOS_DMA_CHX_STATUS_CLEAR_RX, + 0, + }; + nveu32_t status; + + status = osi_readl(base + intr_status); + if ((status & val) == val) { + osi_writel(status_val[val], base + dma_status); + osi_writel(val, base + intr_status); + } + + return intr_en_dis_retry((nveu8_t *)osi_dma->base, intr_ctrl, + val, OSI_DMA_INTR_DISABLE); +} struct osi_dma_priv_data *osi_get_dma(void) { + static struct dma_local g_dma[MAX_DMA_INSTANCES]; + struct osi_dma_priv_data *osi_dma = OSI_NULL; nveu32_t i; for (i = 0U; i < MAX_DMA_INSTANCES; i++) { @@ -52,12 +129,14 @@ struct osi_dma_priv_data *osi_get_dma(void) } if (i == MAX_DMA_INSTANCES) { - return OSI_NULL; + goto fail; } g_dma[i].magic_num = (nveu64_t)&g_dma[i].osi_dma; - return &g_dma[i].osi_dma; + osi_dma = &g_dma[i].osi_dma; +fail: + return osi_dma; } /** @@ -75,15 +154,17 @@ struct osi_dma_priv_data *osi_get_dma(void) * @retval 0 on Success * @retval -1 on Failure */ -static inline nve32_t validate_args(struct osi_dma_priv_data *osi_dma, - struct dma_local *l_dma) +static inline nve32_t dma_validate_args(const struct osi_dma_priv_data *const osi_dma, + const struct dma_local *const l_dma) { + nve32_t ret = 0; + if ((osi_dma == OSI_NULL) || (osi_dma->base == OSI_NULL) || (l_dma->init_done == OSI_DISABLE)) { - return -1; + ret = -1; } - return 0; + return ret; } /** @@ -104,15 +185,16 @@ static inline nve32_t validate_args(struct osi_dma_priv_data *osi_dma, static inline nve32_t validate_dma_chan_num(struct osi_dma_priv_data *osi_dma, nveu32_t chan) { - struct dma_local *l_dma = (struct dma_local *)osi_dma; + const struct dma_local *const l_dma = (struct dma_local *)(void *)osi_dma; + nve32_t ret = 0; - if (chan >= l_dma->max_chans) { - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + if (chan >= l_dma->num_max_chans) { + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "Invalid DMA channel number\n", chan); - return -1; + ret = -1; } - return 0; + return ret; } /** @@ -131,21 +213,23 @@ static inline nve32_t validate_dma_chan_num(struct osi_dma_priv_data *osi_dma, */ static inline nve32_t validate_dma_chans(struct osi_dma_priv_data *osi_dma) { - struct dma_local *l_dma = (struct dma_local *)osi_dma; - nveu32_t i = 0; + const struct dma_local *const l_dma = (struct dma_local *)(void *)osi_dma; + nveu32_t i = 0U; + nve32_t ret = 0; for (i = 0; i < osi_dma->num_dma_chans; i++) { - if (osi_dma->dma_chans[i] > l_dma->max_chans) { - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + if (osi_dma->dma_chans[i] > l_dma->num_max_chans) { + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "Invalid DMA channel number:\n", osi_dma->dma_chans[i]); - return -1; + ret = -1; } } - return 0; + return ret; } +#ifndef OSI_STRIPPED_LIB /** * @brief Function to validate function pointers. * @@ -171,14 +255,15 @@ static nve32_t validate_func_ptrs(struct osi_dma_priv_data *osi_dma, #elif __SIZEOF_POINTER__ == 4 nveu32_t *l_ops = (nveu32_t *)temp_ops; #else - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "DMA: Undefined architecture\n", 0ULL); return -1; #endif + (void) osi_dma; for (i = 0; i < (sizeof(*ops_p) / (nveu64_t)__SIZEOF_POINTER__); i++) { if (*l_ops == 0U) { - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "dma: fn ptr validation failed at\n", (nveu64_t)i); return -1; @@ -189,30 +274,31 @@ static nve32_t validate_func_ptrs(struct osi_dma_priv_data *osi_dma, return 0; } +#endif nve32_t osi_init_dma_ops(struct osi_dma_priv_data *osi_dma) { - struct dma_local *l_dma = (struct dma_local *)osi_dma; - nveu32_t default_rz[] = { EQOS_DEFAULT_RING_SZ, MGBE_DEFAULT_RING_SZ }; - nveu32_t max_rz[] = { EQOS_DEFAULT_RING_SZ, MGBE_MAX_RING_SZ }; + const nveu32_t default_rz[] = { EQOS_DEFAULT_RING_SZ, MGBE_DEFAULT_RING_SZ }; + const nveu32_t max_rz[] = { EQOS_DEFAULT_RING_SZ, MGBE_MAX_RING_SZ }; + struct dma_local *l_dma = (struct dma_local *)(void *)osi_dma; + static struct dma_chan_ops dma_gops[MAX_MAC_IP_TYPES]; +#ifndef OSI_STRIPPED_LIB typedef void (*init_ops_arr)(struct dma_chan_ops *temp); - typedef void *(*safety_init)(void); - - init_ops_arr i_ops[MAX_MAC_IP_TYPES] = { + const init_ops_arr i_ops[MAX_MAC_IP_TYPES] = { eqos_init_dma_chan_ops, mgbe_init_dma_chan_ops }; - - safety_init s_init[MAX_MAC_IP_TYPES] = { - eqos_get_dma_safety_config, OSI_NULL - }; +#endif + nve32_t ret = 0; if (osi_dma == OSI_NULL) { - return -1; + ret = -1; + goto fail; } if ((l_dma->magic_num != (nveu64_t)osi_dma) || (l_dma->init_done == OSI_ENABLE)) { - return -1; + ret = -1; + goto fail; } if (osi_dma->is_ethernet_server != OSI_ENABLE) { @@ -223,115 +309,295 @@ nve32_t osi_init_dma_ops(struct osi_dma_priv_data *osi_dma) (osi_dma->osd_ops.printf == OSI_NULL) || #endif /* OSI_DEBUG */ (osi_dma->osd_ops.udelay == OSI_NULL)) { - return -1; + ret = -1; + goto fail; } } if (osi_dma->mac > OSI_MAC_HW_MGBE) { - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "DMA: Invalid MAC HW type\n", 0ULL); - return -1; + ret = -1; + goto fail; } if ((osi_dma->tx_ring_sz == 0U) || - !(is_power_of_two(osi_dma->tx_ring_sz)) || + (is_power_of_two(osi_dma->tx_ring_sz) == 0U) || (osi_dma->tx_ring_sz < HW_MIN_RING_SZ) || (osi_dma->tx_ring_sz > default_rz[osi_dma->mac])) { - osi_dma->tx_ring_sz = default_rz[osi_dma->mac]; - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, - "DMA: Using default Tx ring size: \n", + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, + "DMA: Invalid Tx ring size:\n", osi_dma->tx_ring_sz); + ret = -1; + goto fail; } if ((osi_dma->rx_ring_sz == 0U) || - !(is_power_of_two(osi_dma->rx_ring_sz)) || + (is_power_of_two(osi_dma->rx_ring_sz) == 0U) || (osi_dma->rx_ring_sz < HW_MIN_RING_SZ) || (osi_dma->rx_ring_sz > max_rz[osi_dma->mac])) { - osi_dma->rx_ring_sz = default_rz[osi_dma->mac]; - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, - "DMA: Using default rx ring size: \n", + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, + "DMA: Invalid Rx ring size:\n", osi_dma->tx_ring_sz); + ret = -1; + goto fail; } - - i_ops[osi_dma->mac](&g_ops[osi_dma->mac]); - - if (s_init[osi_dma->mac] != OSI_NULL) { - osi_dma->safety_config = s_init[osi_dma->mac](); - } - +#ifndef OSI_STRIPPED_LIB + i_ops[osi_dma->mac](&dma_gops[osi_dma->mac]); +#endif if (init_desc_ops(osi_dma) < 0) { - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "DMA desc ops init failed\n", 0ULL); - return -1; + ret = -1; + goto fail; } - if (validate_func_ptrs(osi_dma, &g_ops[osi_dma->mac]) < 0) { - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, +#ifndef OSI_STRIPPED_LIB + if (validate_func_ptrs(osi_dma, &dma_gops[osi_dma->mac]) < 0) { + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "DMA ops validation failed\n", 0ULL); - return -1; + ret = -1; + goto fail; } +#endif - l_dma->ops_p = &g_ops[osi_dma->mac]; + l_dma->ops_p = &dma_gops[osi_dma->mac]; l_dma->init_done = OSI_ENABLE; - return 0; +fail: + return ret; +} + +static inline void start_dma(const struct osi_dma_priv_data *const osi_dma, nveu32_t dma_chan) +{ + nveu32_t chan = dma_chan & 0xFU; + const nveu32_t tx_dma_reg[2] = { + EQOS_DMA_CHX_TX_CTRL(chan), + MGBE_DMA_CHX_TX_CTRL(chan) + }; + const nveu32_t rx_dma_reg[2] = { + EQOS_DMA_CHX_RX_CTRL(chan), + MGBE_DMA_CHX_RX_CTRL(chan) + }; + nveu32_t val; + + /* Start Tx DMA */ + val = osi_readl((nveu8_t *)osi_dma->base + tx_dma_reg[osi_dma->mac]); + val |= OSI_BIT(0); + osi_writel(val, (nveu8_t *)osi_dma->base + tx_dma_reg[osi_dma->mac]); + + /* Start Rx DMA */ + val = osi_readl((nveu8_t *)osi_dma->base + rx_dma_reg[osi_dma->mac]); + val |= OSI_BIT(0); + val &= ~OSI_BIT(31); + osi_writel(val, (nveu8_t *)osi_dma->base + rx_dma_reg[osi_dma->mac]); +} + +static void init_dma_channel(const struct osi_dma_priv_data *const osi_dma, + nveu32_t dma_chan) +{ + nveu32_t chan = dma_chan & 0xFU; + nveu32_t riwt = osi_dma->rx_riwt & 0xFFFU; + const nveu32_t intr_en_reg[2] = { + EQOS_DMA_CHX_INTR_ENA(chan), + MGBE_DMA_CHX_INTR_ENA(chan) + }; + const nveu32_t chx_ctrl_reg[2] = { + EQOS_DMA_CHX_CTRL(chan), + MGBE_DMA_CHX_CTRL(chan) + }; + const nveu32_t tx_ctrl_reg[2] = { + EQOS_DMA_CHX_TX_CTRL(chan), + MGBE_DMA_CHX_TX_CTRL(chan) + }; + const nveu32_t rx_ctrl_reg[2] = { + EQOS_DMA_CHX_RX_CTRL(chan), + MGBE_DMA_CHX_RX_CTRL(chan) + }; + const nveu32_t rx_wdt_reg[2] = { + EQOS_DMA_CHX_RX_WDT(chan), + MGBE_DMA_CHX_RX_WDT(chan) + }; + const nveu32_t tx_pbl[2] = { + EQOS_DMA_CHX_TX_CTRL_TXPBL_RECOMMENDED, + ((((MGBE_TXQ_SIZE / osi_dma->num_dma_chans) - + osi_dma->mtu) / (MGBE_AXI_DATAWIDTH / 8U)) - 5U) + }; + const nveu32_t rx_pbl[2] = { + EQOS_DMA_CHX_RX_CTRL_RXPBL_RECOMMENDED, + ((MGBE_RXQ_SIZE / osi_dma->num_dma_chans) / 2U) + }; + const nveu32_t rwt_val[2] = { + (((riwt * (EQOS_AXI_CLK_FREQ / OSI_ONE_MEGA_HZ)) / + EQOS_DMA_CHX_RX_WDT_RWTU) & EQOS_DMA_CHX_RX_WDT_RWT_MASK), + (((riwt * ((nveu32_t)MGBE_AXI_CLK_FREQ / OSI_ONE_MEGA_HZ)) / + MGBE_DMA_CHX_RX_WDT_RWTU) & MGBE_DMA_CHX_RX_WDT_RWT_MASK) + }; + const nveu32_t rwtu_val[2] = { + EQOS_DMA_CHX_RX_WDT_RWTU_512_CYCLE, + MGBE_DMA_CHX_RX_WDT_RWTU_2048_CYCLE + }; + const nveu32_t rwtu_mask[2] = { + EQOS_DMA_CHX_RX_WDT_RWTU_MASK, + MGBE_DMA_CHX_RX_WDT_RWTU_MASK + }; + const nveu32_t owrq = (MGBE_DMA_CHX_RX_CNTRL2_OWRQ_MCHAN / osi_dma->num_dma_chans); + const nveu32_t owrq_arr[OSI_MGBE_MAX_NUM_CHANS] = { + MGBE_DMA_CHX_RX_CNTRL2_OWRQ_SCHAN, owrq, owrq, owrq, + owrq, owrq, owrq, owrq, owrq, owrq + }; + nveu32_t val; + + /* Enable Transmit/Receive interrupts */ + val = osi_readl((nveu8_t *)osi_dma->base + intr_en_reg[osi_dma->mac]); + val |= (DMA_CHX_INTR_TIE | DMA_CHX_INTR_RIE); + osi_writel(val, (nveu8_t *)osi_dma->base + intr_en_reg[osi_dma->mac]); + + /* Enable PBLx8 */ + val = osi_readl((nveu8_t *)osi_dma->base + chx_ctrl_reg[osi_dma->mac]); + val |= DMA_CHX_CTRL_PBLX8; + osi_writel(val, (nveu8_t *)osi_dma->base + chx_ctrl_reg[osi_dma->mac]); + + /* Program OSP, TSO enable and TXPBL */ + val = osi_readl((nveu8_t *)osi_dma->base + tx_ctrl_reg[osi_dma->mac]); + val |= (DMA_CHX_TX_CTRL_OSP | DMA_CHX_TX_CTRL_TSE); + + if (osi_dma->mac == OSI_MAC_HW_EQOS) { + val |= tx_pbl[osi_dma->mac]; + } else { + /* + * Formula for TxPBL calculation is + * (TxPBL) < ((TXQSize - MTU)/(DATAWIDTH/8)) - 5 + * if TxPBL exceeds the value of 256 then we need to make use of 256 + * as the TxPBL else we should be using the value whcih we get after + * calculation by using above formula + */ + if (tx_pbl[osi_dma->mac] >= MGBE_DMA_CHX_MAX_PBL) { + val |= MGBE_DMA_CHX_MAX_PBL_VAL; + } else { + val |= ((tx_pbl[osi_dma->mac] / 8U) << MGBE_DMA_CHX_CTRL_PBL_SHIFT); + } + } + osi_writel(val, (nveu8_t *)osi_dma->base + tx_ctrl_reg[osi_dma->mac]); + + val = osi_readl((nveu8_t *)osi_dma->base + rx_ctrl_reg[osi_dma->mac]); + val &= ~DMA_CHX_RBSZ_MASK; + /** Subtract 30 bytes again which were added for buffer address alignment + * HW don't need those extra 30 bytes. If data length received more than + * below programed value then it will result in two descriptors which + * eventually drop by OSI. Subtracting 30 bytes so that HW don't receive + * unwanted length data. + **/ + val |= ((osi_dma->rx_buf_len - 30U) << DMA_CHX_RBSZ_SHIFT); + if (osi_dma->mac == OSI_MAC_HW_EQOS) { + val |= rx_pbl[osi_dma->mac]; + } else { + if (rx_pbl[osi_dma->mac] >= MGBE_DMA_CHX_MAX_PBL) { + val |= MGBE_DMA_CHX_MAX_PBL_VAL; + } else { + val |= ((rx_pbl[osi_dma->mac] / 8U) << MGBE_DMA_CHX_CTRL_PBL_SHIFT); + } + } + osi_writel(val, (nveu8_t *)osi_dma->base + rx_ctrl_reg[osi_dma->mac]); + + if ((osi_dma->use_riwt == OSI_ENABLE) && + (osi_dma->rx_riwt < UINT_MAX)) { + val = osi_readl((nveu8_t *)osi_dma->base + rx_wdt_reg[osi_dma->mac]); + val &= ~DMA_CHX_RX_WDT_RWT_MASK; + val |= rwt_val[osi_dma->mac]; + osi_writel(val, (nveu8_t *)osi_dma->base + rx_wdt_reg[osi_dma->mac]); + + val = osi_readl((nveu8_t *)osi_dma->base + rx_wdt_reg[osi_dma->mac]); + val &= ~rwtu_mask[osi_dma->mac]; + val |= rwtu_val[osi_dma->mac]; + osi_writel(val, (nveu8_t *)osi_dma->base + rx_wdt_reg[osi_dma->mac]); + } + + if (osi_dma->mac == OSI_MAC_HW_MGBE) { + /* Update ORRQ in DMA_CH(#i)_Tx_Control2 register */ + val = osi_readl((nveu8_t *)osi_dma->base + MGBE_DMA_CHX_TX_CNTRL2(chan)); + val |= (((MGBE_DMA_CHX_TX_CNTRL2_ORRQ_RECOMMENDED / osi_dma->num_dma_chans)) << + MGBE_DMA_CHX_TX_CNTRL2_ORRQ_SHIFT); + osi_writel(val, (nveu8_t *)osi_dma->base + MGBE_DMA_CHX_TX_CNTRL2(chan)); + + /* Update OWRQ in DMA_CH(#i)_Rx_Control2 register */ + val = osi_readl((nveu8_t *)osi_dma->base + MGBE_DMA_CHX_RX_CNTRL2(chan)); + val |= (owrq_arr[osi_dma->num_dma_chans - 1U] << MGBE_DMA_CHX_RX_CNTRL2_OWRQ_SHIFT); + osi_writel(val, (nveu8_t *)osi_dma->base + MGBE_DMA_CHX_RX_CNTRL2(chan)); + } } nve32_t osi_hw_dma_init(struct osi_dma_priv_data *osi_dma) { - struct dma_local *l_dma = (struct dma_local *)osi_dma; + struct dma_local *l_dma = (struct dma_local *)(void *)osi_dma; nveu32_t i, chan; - nve32_t ret; + nve32_t ret = 0; - if (validate_args(osi_dma, l_dma) < 0) { - return -1; + if (dma_validate_args(osi_dma, l_dma) < 0) { + ret = -1; + goto fail; } l_dma->mac_ver = osi_readl((nveu8_t *)osi_dma->base + MAC_VERSION) & MAC_VERSION_SNVER_MASK; if (validate_mac_ver_update_chans(l_dma->mac_ver, - &l_dma->max_chans) == 0) { - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + &l_dma->num_max_chans, + &l_dma->l_mac_ver) == 0) { + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "Invalid MAC version\n", (nveu64_t)l_dma->mac_ver); - return -1; + ret = -1; + goto fail; } - if (osi_dma->num_dma_chans > l_dma->max_chans) { - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + if ((osi_dma->num_dma_chans == 0U) || + (osi_dma->num_dma_chans > l_dma->num_max_chans)) { + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "Invalid number of DMA channels\n", 0ULL); - return -1; + ret = -1; + goto fail; } if (validate_dma_chans(osi_dma) < 0) { - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "DMA channels validation failed\n", 0ULL); - return -1; + ret = -1; + goto fail; } - ret = l_dma->ops_p->init_dma_channel(osi_dma); - if (ret < 0) { - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, - "dma: init dma channel failed\n", 0ULL); - return ret; - } - - ret = dma_desc_init(osi_dma, l_dma->ops_p); + ret = dma_desc_init(osi_dma); if (ret != 0) { - return ret; - } - - if ((l_dma->mac_ver != OSI_EQOS_MAC_4_10) && - (l_dma->mac_ver != OSI_EQOS_MAC_5_00)) { - l_dma->vm_intr = OSI_ENABLE; + goto fail; } /* Enable channel interrupts at wrapper level and start DMA */ for (i = 0; i < osi_dma->num_dma_chans; i++) { chan = osi_dma->dma_chans[i]; - l_dma->ops_p->enable_chan_tx_intr(osi_dma->base, chan); - l_dma->ops_p->enable_chan_rx_intr(osi_dma->base, chan); - l_dma->ops_p->start_dma(osi_dma, chan); + init_dma_channel(osi_dma, chan); + + ret = intr_fn[OSI_DMA_INTR_ENABLE](osi_dma, + VIRT_INTR_CHX_CNTRL(chan), + VIRT_INTR_CHX_STATUS(chan), + ((osi_dma->mac == OSI_MAC_HW_MGBE) ? + MGBE_DMA_CHX_STATUS(chan) : + EQOS_DMA_CHX_STATUS(chan)), + OSI_BIT(OSI_DMA_CH_TX_INTR)); + if (ret < 0) { + goto fail; + } + + ret = intr_fn[OSI_DMA_INTR_ENABLE](osi_dma, + VIRT_INTR_CHX_CNTRL(chan), + VIRT_INTR_CHX_STATUS(chan), + ((osi_dma->mac == OSI_MAC_HW_MGBE) ? + MGBE_DMA_CHX_STATUS(chan) : + EQOS_DMA_CHX_STATUS(chan)), + OSI_BIT(OSI_DMA_CH_RX_INTR)); + if (ret < 0) { + goto fail; + } + + start_dma(osi_dma, chan); } /** @@ -342,158 +608,81 @@ nve32_t osi_hw_dma_init(struct osi_dma_priv_data *osi_dma) osi_dma->ptp_flag = (OSI_PTP_SYNC_SLAVE | OSI_PTP_SYNC_TWOSTEP); } - return 0; +fail: + return ret; +} + +static inline void stop_dma(const struct osi_dma_priv_data *const osi_dma, + nveu32_t dma_chan) +{ + nveu32_t chan = dma_chan & 0xFU; + const nveu32_t dma_tx_reg[2] = { + EQOS_DMA_CHX_TX_CTRL(chan), + MGBE_DMA_CHX_TX_CTRL(chan) + }; + const nveu32_t dma_rx_reg[2] = { + EQOS_DMA_CHX_RX_CTRL(chan), + MGBE_DMA_CHX_RX_CTRL(chan) + }; + nveu32_t val; + + /* Stop Tx DMA */ + val = osi_readl((nveu8_t *)osi_dma->base + dma_tx_reg[osi_dma->mac]); + val &= ~OSI_BIT(0); + osi_writel(val, (nveu8_t *)osi_dma->base + dma_tx_reg[osi_dma->mac]); + + /* Stop Rx DMA */ + val = osi_readl((nveu8_t *)osi_dma->base + dma_rx_reg[osi_dma->mac]); + val &= ~OSI_BIT(0); + val |= OSI_BIT(31); + osi_writel(val, (nveu8_t *)osi_dma->base + dma_rx_reg[osi_dma->mac]); } nve32_t osi_hw_dma_deinit(struct osi_dma_priv_data *osi_dma) { - struct dma_local *l_dma = (struct dma_local *)osi_dma; + struct dma_local *l_dma = (struct dma_local *)(void *)osi_dma; + nve32_t ret = 0; nveu32_t i; - if (validate_args(osi_dma, l_dma) < 0) { - return -1; + if (dma_validate_args(osi_dma, l_dma) < 0) { + ret = -1; + goto fail; } - if (osi_dma->num_dma_chans > l_dma->max_chans) { - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + if (osi_dma->num_dma_chans > l_dma->num_max_chans) { + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "Invalid number of DMA channels\n", 0ULL); - return -1; + ret = -1; + goto fail; } if (validate_dma_chans(osi_dma) < 0) { - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "DMA channels validation failed\n", 0ULL); - return -1; + ret = -1; + goto fail; } for (i = 0; i < osi_dma->num_dma_chans; i++) { - l_dma->ops_p->stop_dma(osi_dma, osi_dma->dma_chans[i]); + stop_dma(osi_dma, osi_dma->dma_chans[i]); } - /* FIXME: Need to fix */ -// l_dma->magic_num = 0; -// l_dma->init_done = OSI_DISABLE; - - return 0; -} - -nve32_t osi_disable_chan_tx_intr(struct osi_dma_priv_data *osi_dma, - nveu32_t chan) -{ - struct dma_local *l_dma = (struct dma_local *)osi_dma; - - if (validate_args(osi_dma, l_dma) < 0) { - return -1; - } - - if (validate_dma_chan_num(osi_dma, chan) < 0) { - return -1; - } - - l_dma->ops_p->disable_chan_tx_intr(osi_dma->base, chan); - - return 0; -} - -nve32_t osi_enable_chan_tx_intr(struct osi_dma_priv_data *osi_dma, - nveu32_t chan) -{ - struct dma_local *l_dma = (struct dma_local *)osi_dma; - - if (validate_args(osi_dma, l_dma) < 0) { - return -1; - } - - if (validate_dma_chan_num(osi_dma, chan) < 0) { - return -1; - } - - l_dma->ops_p->enable_chan_tx_intr(osi_dma->base, chan); - - return 0; -} - -nve32_t osi_disable_chan_rx_intr(struct osi_dma_priv_data *osi_dma, - nveu32_t chan) -{ - struct dma_local *l_dma = (struct dma_local *)osi_dma; - - if (validate_args(osi_dma, l_dma) < 0) { - return -1; - } - - if (validate_dma_chan_num(osi_dma, chan) < 0) { - return -1; - } - - l_dma->ops_p->disable_chan_rx_intr(osi_dma->base, chan); - - return 0; -} - -nve32_t osi_enable_chan_rx_intr(struct osi_dma_priv_data *osi_dma, - nveu32_t chan) -{ - struct dma_local *l_dma = (struct dma_local *)osi_dma; - - if (validate_args(osi_dma, l_dma) < 0) { - return -1; - } - - if (validate_dma_chan_num(osi_dma, chan) < 0) { - return -1; - } - - l_dma->ops_p->enable_chan_rx_intr(osi_dma->base, chan); - - return 0; -} - -nve32_t osi_clear_vm_tx_intr(struct osi_dma_priv_data *osi_dma, - nveu32_t chan) -{ - struct dma_local *l_dma = (struct dma_local *)osi_dma; - - if (validate_args(osi_dma, l_dma) < 0) { - return -1; - } - - if (validate_dma_chan_num(osi_dma, chan) < 0) { - return -1; - } - - l_dma->ops_p->clear_vm_tx_intr(osi_dma->base, chan); - - return 0; -} - -nve32_t osi_clear_vm_rx_intr(struct osi_dma_priv_data *osi_dma, - nveu32_t chan) -{ - struct dma_local *l_dma = (struct dma_local *)osi_dma; - - if (validate_args(osi_dma, l_dma) < 0) { - return -1; - } - - if (validate_dma_chan_num(osi_dma, chan) < 0) { - return -1; - } - - l_dma->ops_p->clear_vm_rx_intr(osi_dma->base, chan); - - return 0; +fail: + return ret; } nveu32_t osi_get_global_dma_status(struct osi_dma_priv_data *osi_dma) { - struct dma_local *l_dma = (struct dma_local *)osi_dma; + struct dma_local *l_dma = (struct dma_local *)(void *)osi_dma; + nveu32_t ret = 0U; - if (validate_args(osi_dma, l_dma) < 0) { - return 0; + if (dma_validate_args(osi_dma, l_dma) < 0) { + goto fail; } - return osi_readl((nveu8_t *)osi_dma->base + HW_GLOBAL_DMA_STATUS); + ret = osi_readl((nveu8_t *)osi_dma->base + HW_GLOBAL_DMA_STATUS); +fail: + return ret; } nve32_t osi_handle_dma_intr(struct osi_dma_priv_data *osi_dma, @@ -501,86 +690,54 @@ nve32_t osi_handle_dma_intr(struct osi_dma_priv_data *osi_dma, nveu32_t tx_rx, nveu32_t en_dis) { - struct dma_local *l_dma = (struct dma_local *)osi_dma; - typedef void (*dma_intr_fn)(void *base, nveu32_t ch); - dma_intr_fn fn[2][2][2] = { - { { l_dma->ops_p->disable_chan_tx_intr, l_dma->ops_p->enable_chan_tx_intr }, - { l_dma->ops_p->disable_chan_rx_intr, l_dma->ops_p->enable_chan_rx_intr } }, - { { l_dma->ops_p->clear_vm_tx_intr, l_dma->ops_p->enable_chan_tx_intr }, - { l_dma->ops_p->clear_vm_rx_intr, l_dma->ops_p->enable_chan_rx_intr } } - }; + struct dma_local *l_dma = (struct dma_local *)(void *)osi_dma; + nve32_t ret = 0; - if (validate_args(osi_dma, l_dma) < 0) { - return -1; + if (dma_validate_args(osi_dma, l_dma) < 0) { + ret = -1; + goto fail; } if (validate_dma_chan_num(osi_dma, chan) < 0) { - return -1; + ret = -1; + goto fail; } if ((tx_rx > OSI_DMA_CH_RX_INTR) || (en_dis > OSI_DMA_INTR_ENABLE)) { - return -1; + ret = -1; + goto fail; } - fn[l_dma->vm_intr][tx_rx][en_dis](osi_dma->base, chan); + ret = intr_fn[en_dis](osi_dma, VIRT_INTR_CHX_CNTRL(chan), + VIRT_INTR_CHX_STATUS(chan), ((osi_dma->mac == OSI_MAC_HW_MGBE) ? + MGBE_DMA_CHX_STATUS(chan) : EQOS_DMA_CHX_STATUS(chan)), + OSI_BIT(tx_rx)); - return 0; +fail: + return ret; } -nve32_t osi_start_dma(struct osi_dma_priv_data *osi_dma, - nveu32_t chan) +nveu32_t osi_get_refill_rx_desc_cnt(const struct osi_dma_priv_data *const osi_dma, + nveu32_t chan) { - struct dma_local *l_dma = (struct dma_local *)osi_dma; - - if (validate_args(osi_dma, l_dma) < 0) { - return -1; - } - - if (validate_dma_chan_num(osi_dma, chan) < 0) { - return -1; - } - - l_dma->ops_p->start_dma(osi_dma, chan); - - return 0; -} - -nve32_t osi_stop_dma(struct osi_dma_priv_data *osi_dma, - nveu32_t chan) -{ - struct dma_local *l_dma = (struct dma_local *)osi_dma; - - if (validate_args(osi_dma, l_dma) < 0) { - return -1; - } - - if (validate_dma_chan_num(osi_dma, chan) < 0) { - return -1; - } - - l_dma->ops_p->stop_dma(osi_dma, chan); - - return 0; -} - -nveu32_t osi_get_refill_rx_desc_cnt(struct osi_dma_priv_data *osi_dma, - unsigned int chan) -{ - struct osi_rx_ring *rx_ring = osi_dma->rx_ring[chan]; + const struct osi_rx_ring *const rx_ring = osi_dma->rx_ring[chan]; + nveu32_t ret = 0U; if ((rx_ring == OSI_NULL) || (rx_ring->cur_rx_idx >= osi_dma->rx_ring_sz) || (rx_ring->refill_idx >= osi_dma->rx_ring_sz)) { - return 0; + goto fail; } - return (rx_ring->cur_rx_idx - rx_ring->refill_idx) & + ret = (rx_ring->cur_rx_idx - rx_ring->refill_idx) & (osi_dma->rx_ring_sz - 1U); +fail: + return ret; } /** - * @brief rx_dma_desc_validate_args - DMA Rx descriptor init args Validate + * @brief rx_dma_desc_dma_validate_args - DMA Rx descriptor init args Validate * * Algorithm: Validates DMA Rx descriptor init argments. * @@ -597,30 +754,36 @@ nveu32_t osi_get_refill_rx_desc_cnt(struct osi_dma_priv_data *osi_dma, * @retval 0 on success * @retval -1 on failure. */ -static inline nve32_t rx_dma_desc_validate_args( +static inline nve32_t rx_dma_desc_dma_validate_args( struct osi_dma_priv_data *osi_dma, struct dma_local *l_dma, - struct osi_rx_ring *rx_ring, + const struct osi_rx_ring *const rx_ring, nveu32_t chan) { - if (validate_args(osi_dma, l_dma) < 0) { - return -1; + nve32_t ret = 0; + + if (dma_validate_args(osi_dma, l_dma) < 0) { + ret = -1; + goto fail; } if (!((rx_ring != OSI_NULL) && (rx_ring->rx_swcx != OSI_NULL) && (rx_ring->rx_desc != OSI_NULL))) { - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "dma: Invalid pointers\n", 0ULL); - return -1; + ret = -1; + goto fail; } if (validate_dma_chan_num(osi_dma, chan) < 0) { - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "dma: Invalid channel\n", 0ULL); - return -1; + ret = -1; + goto fail; } - return 0; +fail: + return ret; } /** @@ -641,8 +804,8 @@ static inline nve32_t rx_dma_desc_validate_args( * - De-initialization: No * */ -static inline void rx_dma_handle_ioc(struct osi_dma_priv_data *osi_dma, - struct osi_rx_ring *rx_ring, +static inline void rx_dma_handle_ioc(const struct osi_dma_priv_data *const osi_dma, + const struct osi_rx_ring *const rx_ring, struct osi_rx_desc *rx_desc) { /* reset IOC bit if RWIT is enabled */ @@ -663,14 +826,16 @@ static inline void rx_dma_handle_ioc(struct osi_dma_priv_data *osi_dma, nve32_t osi_rx_dma_desc_init(struct osi_dma_priv_data *osi_dma, struct osi_rx_ring *rx_ring, nveu32_t chan) { - struct dma_local *l_dma = (struct dma_local *)osi_dma; - nveu64_t tailptr = 0; + struct dma_local *l_dma = (struct dma_local *)(void *)osi_dma; struct osi_rx_swcx *rx_swcx = OSI_NULL; struct osi_rx_desc *rx_desc = OSI_NULL; + nveu64_t tailptr = 0; + nve32_t ret = 0; - if (rx_dma_desc_validate_args(osi_dma, l_dma, rx_ring, chan) < 0) { + if (rx_dma_desc_dma_validate_args(osi_dma, l_dma, rx_ring, chan) < 0) { /* Return on arguments validation failureĀ */ - return -1; + ret = -1; + goto fail; } /* Refill buffers */ @@ -714,103 +879,139 @@ nve32_t osi_rx_dma_desc_init(struct osi_dma_priv_data *osi_dma, if (osi_unlikely(tailptr < rx_ring->rx_desc_phy_addr)) { /* Will not hit this case, used for CERT-C compliance */ - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "dma: Invalid tailptr\n", 0ULL); - return -1; + ret = -1; + goto fail; } - l_dma->ops_p->update_rx_tailptr(osi_dma->base, chan, tailptr); + update_rx_tail_ptr(osi_dma, chan, tailptr); - return 0; +fail: + return ret; } nve32_t osi_set_rx_buf_len(struct osi_dma_priv_data *osi_dma) { - struct dma_local *l_dma = (struct dma_local *)osi_dma; + struct dma_local *l_dma = (struct dma_local *)(void *)osi_dma; + nveu32_t rx_buf_len; + nve32_t ret = 0; - if (validate_args(osi_dma, l_dma) < 0) { - return -1; + if (dma_validate_args(osi_dma, l_dma) < 0) { + ret = -1; + goto fail; } - l_dma->ops_p->set_rx_buf_len(osi_dma); + if (osi_dma->mtu > OSI_MAX_MTU_SIZE) { + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, + "Invalid MTU setting\n", 0ULL); + ret = -1; + goto fail; + } - return 0; + /* Add Ethernet header + FCS */ + rx_buf_len = osi_dma->mtu + OSI_ETH_HLEN + NV_VLAN_HLEN; + + /* Add 30 bytes (15bytes extra at head portion for alignment and 15bytes + * extra to cover tail portion) again for the buffer address alignment + */ + rx_buf_len += 30U; + + /* Buffer alignment */ + osi_dma->rx_buf_len = ((rx_buf_len + (AXI_BUS_WIDTH - 1U)) & + ~(AXI_BUS_WIDTH - 1U)); + +fail: + return ret; } nve32_t osi_dma_get_systime_from_mac(struct osi_dma_priv_data *const osi_dma, nveu32_t *sec, nveu32_t *nsec) { - struct dma_local *l_dma = (struct dma_local *)osi_dma; + struct dma_local *l_dma = (struct dma_local *)(void *)osi_dma; + nve32_t ret = 0; - if (validate_args(osi_dma, l_dma) < 0) { - return -1; + if (dma_validate_args(osi_dma, l_dma) < 0) { + ret = -1; } common_get_systime_from_mac(osi_dma->base, osi_dma->mac, sec, nsec); - return 0; + return ret; } nveu32_t osi_is_mac_enabled(struct osi_dma_priv_data *const osi_dma) { - struct dma_local *l_dma = (struct dma_local *)osi_dma; + struct dma_local *l_dma = (struct dma_local *)(void *)osi_dma; + nveu32_t ret = OSI_DISABLE; - if (validate_args(osi_dma, l_dma) < 0) { - return OSI_DISABLE; + if (dma_validate_args(osi_dma, l_dma) < 0) { + goto fail; } - return common_is_mac_enabled(osi_dma->base, osi_dma->mac); + ret = common_is_mac_enabled(osi_dma->base, osi_dma->mac); +fail: + return ret; } nve32_t osi_hw_transmit(struct osi_dma_priv_data *osi_dma, nveu32_t chan) { - struct dma_local *l_dma = (struct dma_local *)osi_dma; + struct dma_local *l_dma = (struct dma_local *)(void *)osi_dma; + nve32_t ret = 0; - if (osi_unlikely(validate_args(osi_dma, l_dma) < 0)) { - return -1; + if (osi_unlikely(dma_validate_args(osi_dma, l_dma) < 0)) { + ret = -1; + goto fail; } if (osi_unlikely(validate_dma_chan_num(osi_dma, chan) < 0)) { - return -1; + ret = -1; + goto fail; } if (osi_unlikely(osi_dma->tx_ring[chan] == OSI_NULL)) { - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "DMA: Invalid Tx ring\n", 0ULL); - return -1; + ret = -1; + goto fail; } - return hw_transmit(osi_dma, osi_dma->tx_ring[chan], l_dma->ops_p, chan); + ret = hw_transmit(osi_dma, osi_dma->tx_ring[chan], chan); +fail: + return ret; } +#ifdef OSI_DEBUG nve32_t osi_dma_ioctl(struct osi_dma_priv_data *osi_dma) { struct dma_local *l_dma = (struct dma_local *)osi_dma; struct osi_dma_ioctl_data *data; - if (osi_unlikely(validate_args(osi_dma, l_dma) < 0)) { + if (osi_unlikely(dma_validate_args(osi_dma, l_dma) < 0)) { return -1; } data = &osi_dma->ioctl_data; switch (data->cmd) { -#ifdef OSI_DEBUG case OSI_DMA_IOCTL_CMD_REG_DUMP: reg_dump(osi_dma); break; case OSI_DMA_IOCTL_CMD_STRUCTS_DUMP: structs_dump(osi_dma); break; -#endif /* OSI_DEBUG */ + case OSI_DMA_IOCTL_CMD_DEBUG_INTR_CONFIG: + l_dma->ops_p->debug_intr_config(osi_dma); + break; default: - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "DMA: Invalid IOCTL command", 0ULL); return -1; } return 0; } +#endif /* OSI_DEBUG */ #ifndef OSI_STRIPPED_LIB @@ -840,7 +1041,7 @@ static inline nve32_t osi_slot_args_validate(struct osi_dma_priv_data *osi_dma, struct dma_local *l_dma, nveu32_t set) { - if (validate_args(osi_dma, l_dma) < 0) { + if (dma_validate_args(osi_dma, l_dma) < 0) { return -1; } @@ -871,7 +1072,7 @@ nve32_t osi_config_slot_function(struct osi_dma_priv_data *osi_dma, chan = osi_dma->dma_chans[i]; if ((chan == 0x0U) || - (chan >= l_dma->max_chans)) { + (chan >= l_dma->num_max_chans)) { /* Ignore 0 and invalid channels */ continue; } @@ -902,17 +1103,6 @@ nve32_t osi_config_slot_function(struct osi_dma_priv_data *osi_dma, return 0; } -nve32_t osi_validate_dma_regs(struct osi_dma_priv_data *osi_dma) -{ - struct dma_local *l_dma = (struct dma_local *)osi_dma; - - if (validate_args(osi_dma, l_dma) < 0) { - return -1; - } - - return l_dma->ops_p->validate_regs(osi_dma); -} - nve32_t osi_txring_empty(struct osi_dma_priv_data *osi_dma, nveu32_t chan) { struct osi_tx_ring *tx_ring = osi_dma->tx_ring[chan]; diff --git a/osi/dma/osi_dma_txrx.c b/osi/dma/osi_dma_txrx.c index 336ed32..93c529e 100644 --- a/osi/dma/osi_dma_txrx.c +++ b/osi/dma/osi_dma_txrx.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -32,45 +32,6 @@ static struct desc_ops d_ops[MAX_MAC_IP_TYPES]; -/** - * @brief get_rx_err_stats - Detect Errors from Rx Descriptor - * - * @note - * Algorithm: - * - This routine will be invoked by OSI layer itself which - * checks for the Last Descriptor and updates the receive status errors - * accordingly. - * - * @note - * API Group: - * - Initialization: No - * - Run time: Yes - * - De-initialization: No - * - * @param[in] rx_desc: Rx Descriptor. - * @param[in, out] pkt_err_stats: Packet error stats which stores the errors - * reported - */ -static inline void get_rx_err_stats(struct osi_rx_desc *rx_desc, - struct osi_pkt_err_stats *pkt_err_stats) -{ - /* increment rx crc if we see CE bit set */ - if ((rx_desc->rdes3 & RDES3_ERR_CRC) == RDES3_ERR_CRC) { - pkt_err_stats->rx_crc_error = - osi_update_stats_counter( - pkt_err_stats->rx_crc_error, - 1UL); - } - - /* increment rx frame error if we see RE bit set */ - if ((rx_desc->rdes3 & RDES3_ERR_RE) == RDES3_ERR_RE) { - pkt_err_stats->rx_frame_error = - osi_update_stats_counter( - pkt_err_stats->rx_frame_error, - 1UL); - } -} - /** * @brief validate_rx_completions_arg- Validate input argument of rx_completions * @@ -97,34 +58,39 @@ static inline void get_rx_err_stats(struct osi_rx_desc *rx_desc, static inline nve32_t validate_rx_completions_arg( struct osi_dma_priv_data *osi_dma, nveu32_t chan, - nveu32_t *more_data_avail, + const nveu32_t *const more_data_avail, struct osi_rx_ring **rx_ring, struct osi_rx_pkt_cx **rx_pkt_cx) { - struct dma_local *l_dma = (struct dma_local *)osi_dma; + const struct dma_local *const l_dma = (struct dma_local *)(void *)osi_dma; + nve32_t ret = 0; if (osi_unlikely((osi_dma == OSI_NULL) || (more_data_avail == OSI_NULL) || - (chan >= l_dma->max_chans))) { - return -1; + (chan >= l_dma->num_max_chans))) { + ret = -1; + goto fail; } *rx_ring = osi_dma->rx_ring[chan]; if (osi_unlikely(*rx_ring == OSI_NULL)) { - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "validate_input_rx_completions: Invalid pointers\n", 0ULL); - return -1; + ret = -1; + goto fail; } *rx_pkt_cx = &(*rx_ring)->rx_pkt_cx; if (osi_unlikely(*rx_pkt_cx == OSI_NULL)) { - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "validate_input_rx_completions: Invalid pointers\n", 0ULL); - return -1; + ret = -1; + goto fail; } - return 0; +fail: + return ret; } nve32_t osi_process_rx_completions(struct osi_dma_priv_data *osi_dma, @@ -139,34 +105,42 @@ nve32_t osi_process_rx_completions(struct osi_dma_priv_data *osi_dma, struct osi_rx_desc *context_desc = OSI_NULL; nveu32_t ip_type = osi_dma->mac; nve32_t received = 0; +#ifndef OSI_STRIPPED_LIB nve32_t received_resv = 0; +#endif /* !OSI_STRIPPED_LIB */ nve32_t ret = 0; ret = validate_rx_completions_arg(osi_dma, chan, more_data_avail, &rx_ring, &rx_pkt_cx); if (osi_unlikely(ret < 0)) { - return ret; + received = -1; + goto fail; } if (rx_ring->cur_rx_idx >= osi_dma->rx_ring_sz) { - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "dma_txrx: Invalid cur_rx_idx\n", 0ULL); - return -1; + received = -1; + goto fail; } /* Reset flag to indicate if more Rx frames available to OSD layer */ *more_data_avail = OSI_NONE; - while ((received < budget) && (received_resv < budget)) { - osi_memset(rx_pkt_cx, 0U, sizeof(*rx_pkt_cx)); + while ((received < budget) +#ifndef OSI_STRIPPED_LIB + && (received_resv < budget) +#endif /* !OSI_STRIPPED_LIB */ + ) { rx_desc = rx_ring->rx_desc + rx_ring->cur_rx_idx; - rx_swcx = rx_ring->rx_swcx + rx_ring->cur_rx_idx; /* check for data availability */ if ((rx_desc->rdes3 & RDES3_OWN) == RDES3_OWN) { break; } -#ifdef OSI_DEBUG + rx_swcx = rx_ring->rx_swcx + rx_ring->cur_rx_idx; + osi_memset(rx_pkt_cx, 0U, sizeof(*rx_pkt_cx)); +#if defined OSI_DEBUG && !defined OSI_STRIPPED_LIB if (osi_dma->enable_desc_dump == 1U) { desc_dump(osi_dma, rx_ring->cur_rx_idx, rx_ring->cur_rx_idx, RX_DESC_DUMP, chan); @@ -175,6 +149,7 @@ nve32_t osi_process_rx_completions(struct osi_dma_priv_data *osi_dma, INCR_RX_DESC_INDEX(rx_ring->cur_rx_idx, osi_dma->rx_ring_sz); +#ifndef OSI_STRIPPED_LIB if (osi_unlikely(rx_swcx->buf_virt_addr == osi_dma->resv_buf_virt_addr)) { rx_swcx->buf_virt_addr = OSI_NULL; @@ -187,6 +162,7 @@ nve32_t osi_process_rx_completions(struct osi_dma_priv_data *osi_dma, } continue; } +#endif /* !OSI_STRIPPED_LIB */ /* packet already processed */ if ((rx_swcx->flags & OSI_RX_SWCX_PROCESSED) == @@ -227,19 +203,22 @@ nve32_t osi_process_rx_completions(struct osi_dma_priv_data *osi_dma, * are set */ rx_pkt_cx->flags &= ~OSI_PKT_CX_VALID; +#ifndef OSI_STRIPPED_LIB d_ops[ip_type].update_rx_err_stats(rx_desc, &osi_dma->pkt_err_stats); +#endif /* !OSI_STRIPPED_LIB */ } /* Check if COE Rx checksum is valid */ d_ops[ip_type].get_rx_csum(rx_desc, rx_pkt_cx); +#ifndef OSI_STRIPPED_LIB /* Get Rx VLAN from descriptor */ d_ops[ip_type].get_rx_vlan(rx_desc, rx_pkt_cx); /* get_rx_hash for RSS */ d_ops[ip_type].get_rx_hash(rx_desc, rx_pkt_cx); - +#endif /* !OSI_STRIPPED_LIB */ context_desc = rx_ring->rx_desc + rx_ring->cur_rx_idx; /* Get rx time stamp */ ret = d_ops[ip_type].get_rx_hwstamp(osi_dma, rx_desc, @@ -273,21 +252,25 @@ nve32_t osi_process_rx_completions(struct osi_dma_priv_data *osi_dma, osi_dma->rx_buf_len, rx_pkt_cx, rx_swcx); } else { - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "dma_txrx: Invalid function pointer\n", 0ULL); - return -1; + received = -1; + goto fail; } } +#ifndef OSI_STRIPPED_LIB osi_dma->dstats.q_rx_pkt_n[chan] = osi_update_stats_counter( osi_dma->dstats.q_rx_pkt_n[chan], 1UL); osi_dma->dstats.rx_pkt_n = osi_update_stats_counter(osi_dma->dstats.rx_pkt_n, 1UL); +#endif /* !OSI_STRIPPED_LIB */ received++; } +#ifndef OSI_STRIPPED_LIB /* If budget is done, check if HW ring still has unprocessed * Rx packets, so that the OSD layer can decide to schedule * this function again. @@ -304,10 +287,13 @@ nve32_t osi_process_rx_completions(struct osi_dma_priv_data *osi_dma, *more_data_avail = OSI_ENABLE; } } +#endif /* !OSI_STRIPPED_LIB */ +fail: return received; } +#ifndef OSI_STRIPPED_LIB /** * @brief inc_tx_pkt_stats - Increment Tx packet count Stats * @@ -437,7 +423,6 @@ static inline void get_tx_err_stats(struct osi_tx_desc *tx_desc, } } -#ifndef OSI_STRIPPED_LIB nve32_t osi_clear_tx_pkt_err_stats(struct osi_dma_priv_data *osi_dma) { nve32_t ret = -1; @@ -509,23 +494,26 @@ static inline nve32_t validate_tx_completions_arg( nveu32_t chan, struct osi_tx_ring **tx_ring) { - struct dma_local *l_dma = (struct dma_local *)osi_dma; + const struct dma_local *const l_dma = (struct dma_local *)(void *)osi_dma; + nve32_t ret = 0; if (osi_unlikely((osi_dma == OSI_NULL) || - (chan >= l_dma->max_chans))) { - return -1; + (chan >= l_dma->num_max_chans))) { + ret = -1; + goto fail; } *tx_ring = osi_dma->tx_ring[chan]; if (osi_unlikely(*tx_ring == OSI_NULL)) { - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "validate_tx_completions_arg: Invalid pointers\n", 0ULL); - return -1; + ret = -1; + goto fail; } - - return 0; +fail: + return ret; } /** @@ -538,15 +526,15 @@ static inline nve32_t validate_tx_completions_arg( * @retval 1 if condition is true * @retval 0 if condition is false. */ -static inline unsigned int is_ptp_twostep_or_slave_mode(unsigned int ptp_flag) +static inline nveu32_t is_ptp_twostep_or_slave_mode(nveu32_t ptp_flag) { return (((ptp_flag & OSI_PTP_SYNC_SLAVE) == OSI_PTP_SYNC_SLAVE) || ((ptp_flag & OSI_PTP_SYNC_TWOSTEP) == OSI_PTP_SYNC_TWOSTEP)) ? OSI_ENABLE : OSI_DISABLE; } -int osi_process_tx_completions(struct osi_dma_priv_data *osi_dma, - unsigned int chan, int budget) +nve32_t osi_process_tx_completions(struct osi_dma_priv_data *osi_dma, + nveu32_t chan, nve32_t budget) { struct osi_tx_ring *tx_ring = OSI_NULL; struct osi_txdone_pkt_cx *txdone_pkt_cx = OSI_NULL; @@ -560,15 +548,17 @@ int osi_process_tx_completions(struct osi_dma_priv_data *osi_dma, ret = validate_tx_completions_arg(osi_dma, chan, &tx_ring); if (osi_unlikely(ret < 0)) { - return ret; + processed = -1; + goto fail; } txdone_pkt_cx = &tx_ring->txdone_pkt_cx; entry = tx_ring->clean_idx; +#ifndef OSI_STRIPPED_LIB osi_dma->dstats.tx_clean_n[chan] = osi_update_stats_counter(osi_dma->dstats.tx_clean_n[chan], 1U); - +#endif /* !OSI_STRIPPED_LIB */ while ((entry != tx_ring->cur_tx_idx) && (entry < osi_dma->tx_ring_sz) && (processed < budget)) { osi_memset(txdone_pkt_cx, 0U, sizeof(*txdone_pkt_cx)); @@ -592,11 +582,15 @@ int osi_process_tx_completions(struct osi_dma_priv_data *osi_dma, if (((tx_desc->tdes3 & TDES3_ES_BITS) != 0U) && (osi_dma->mac != OSI_MAC_HW_MGBE)) { txdone_pkt_cx->flags |= OSI_TXDONE_CX_ERROR; +#ifndef OSI_STRIPPED_LIB /* fill packet error stats */ get_tx_err_stats(tx_desc, &osi_dma->pkt_err_stats); +#endif /* !OSI_STRIPPED_LIB */ } else { +#ifndef OSI_STRIPPED_LIB inc_tx_pkt_stats(osi_dma, chan); +#endif /* !OSI_STRIPPED_LIB */ } if (processed < INT_MAX) { @@ -659,10 +653,11 @@ int osi_process_tx_completions(struct osi_dma_priv_data *osi_dma, tx_swcx, txdone_pkt_cx); } else { - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "dma_txrx: Invalid function pointer\n", 0ULL); - return -1; + processed = -1; + goto fail; } tx_desc->tdes3 = 0; @@ -674,6 +669,7 @@ int osi_process_tx_completions(struct osi_dma_priv_data *osi_dma, tx_swcx->buf_virt_addr = OSI_NULL; tx_swcx->buf_phy_addr = 0; tx_swcx->flags = 0; + tx_swcx->data_idx = 0; INCR_TX_DESC_INDEX(entry, osi_dma->tx_ring_sz); /* Don't wait to update tx_ring->clean-idx. It will @@ -684,6 +680,7 @@ int osi_process_tx_completions(struct osi_dma_priv_data *osi_dma, tx_ring->clean_idx = entry; } +fail: return processed; } @@ -712,18 +709,17 @@ int osi_process_tx_completions(struct osi_dma_priv_data *osi_dma, * @retval 1 - cntx desc used. */ -static inline nve32_t need_cntx_desc(struct osi_tx_pkt_cx *tx_pkt_cx, - struct osi_tx_swcx *tx_swcx, - struct osi_tx_desc *tx_desc, - unsigned int ptp_sync_flag, - unsigned int mac) +static inline nve32_t need_cntx_desc(const struct osi_tx_pkt_cx *const tx_pkt_cx, + struct osi_tx_swcx *tx_swcx, + struct osi_tx_desc *tx_desc, + nveu32_t ptp_sync_flag, + nveu32_t mac) { nve32_t ret = 0; if (((tx_pkt_cx->flags & OSI_PKT_CX_VLAN) == OSI_PKT_CX_VLAN) || ((tx_pkt_cx->flags & OSI_PKT_CX_TSO) == OSI_PKT_CX_TSO) || ((tx_pkt_cx->flags & OSI_PKT_CX_PTP) == OSI_PKT_CX_PTP)) { - if ((tx_pkt_cx->flags & OSI_PKT_CX_VLAN) == OSI_PKT_CX_VLAN) { /* Set context type */ tx_desc->tdes3 |= TDES3_CTXT; @@ -750,24 +746,22 @@ static inline nve32_t need_cntx_desc(struct osi_tx_pkt_cx *tx_pkt_cx, /* This part of code must be at the end of function */ if ((tx_pkt_cx->flags & OSI_PKT_CX_PTP) == OSI_PKT_CX_PTP) { - if ((mac == OSI_MAC_HW_EQOS) && - ((ptp_sync_flag & OSI_PTP_SYNC_TWOSTEP) == - OSI_PTP_SYNC_TWOSTEP)){ - /* return the current ret value */ - return ret; - } + if (((mac == OSI_MAC_HW_EQOS) && + ((ptp_sync_flag & OSI_PTP_SYNC_TWOSTEP) == OSI_PTP_SYNC_TWOSTEP))) { + /* Doing nothing */ + } else { + /* Set context type */ + tx_desc->tdes3 |= TDES3_CTXT; + /* in case of One-step sync */ + if ((ptp_sync_flag & OSI_PTP_SYNC_ONESTEP) == + OSI_PTP_SYNC_ONESTEP) { + /* Set TDES3_OSTC */ + tx_desc->tdes3 |= TDES3_OSTC; + tx_desc->tdes3 &= ~TDES3_TCMSSV; + } - /* Set context type */ - tx_desc->tdes3 |= TDES3_CTXT; - /* in case of One-step sync */ - if ((ptp_sync_flag & OSI_PTP_SYNC_ONESTEP) == - OSI_PTP_SYNC_ONESTEP) { - /* Set TDES3_OSTC */ - tx_desc->tdes3 |= TDES3_OSTC; - tx_desc->tdes3 &= ~TDES3_TCMSSV; + ret = 1; } - - ret = 1; } } @@ -784,7 +778,7 @@ static inline nve32_t need_cntx_desc(struct osi_tx_pkt_cx *tx_pkt_cx, * @retval 1 if condition is true * @retval 0 if condition is false. */ -static inline unsigned int is_ptp_onestep_and_master_mode(unsigned int ptp_flag) +static inline nveu32_t is_ptp_onestep_and_master_mode(nveu32_t ptp_flag) { return (((ptp_flag & OSI_PTP_SYNC_MASTER) == OSI_PTP_SYNC_MASTER) && ((ptp_flag & OSI_PTP_SYNC_ONESTEP) == OSI_PTP_SYNC_ONESTEP)) ? @@ -813,11 +807,19 @@ static inline unsigned int is_ptp_onestep_and_master_mode(unsigned int ptp_flag) * @param[in, out] tx_desc: Pointer to transmit descriptor to be filled. * @param[in] tx_swcx: Pointer to corresponding tx descriptor software context. */ +#ifndef OSI_STRIPPED_LIB static inline void fill_first_desc(struct osi_tx_ring *tx_ring, struct osi_tx_pkt_cx *tx_pkt_cx, struct osi_tx_desc *tx_desc, struct osi_tx_swcx *tx_swcx, - unsigned int ptp_flag) + nveu32_t ptp_flag) +#else +static inline void fill_first_desc(OSI_UNUSED struct osi_tx_ring *tx_ring, + struct osi_tx_pkt_cx *tx_pkt_cx, + struct osi_tx_desc *tx_desc, + struct osi_tx_swcx *tx_swcx, + nveu32_t ptp_flag) +#endif /* !OSI_STRIPPED_LIB */ { tx_desc->tdes0 = L32(tx_swcx->buf_phy_addr); tx_desc->tdes1 = H32(tx_swcx->buf_phy_addr); @@ -876,6 +878,7 @@ static inline void fill_first_desc(struct osi_tx_ring *tx_ring, tx_desc->tdes3 &= ~TDES3_TPL_MASK; tx_desc->tdes3 |= tx_pkt_cx->payload_len; } else { +#ifndef OSI_STRIPPED_LIB if ((tx_ring->slot_check == OSI_ENABLE) && (tx_ring->slot_number < OSI_SLOT_NUM_MAX)) { /* Fill Slot number */ @@ -884,6 +887,7 @@ static inline void fill_first_desc(struct osi_tx_ring *tx_ring, tx_ring->slot_number = ((tx_ring->slot_number + 1U) % OSI_SLOT_NUM_MAX); } +#endif /* !OSI_STRIPPED_LIB */ } } @@ -921,55 +925,64 @@ static inline void dmb_oshst(void) * @retval 0 on success * @retval -1 on failure. */ -static inline nve32_t validate_ctx(struct osi_dma_priv_data *osi_dma, - struct osi_tx_pkt_cx *tx_pkt_cx) +static inline nve32_t validate_ctx(const struct osi_dma_priv_data *const osi_dma, + const struct osi_tx_pkt_cx *const tx_pkt_cx) { + nve32_t ret = 0; + + (void) osi_dma; if ((tx_pkt_cx->flags & OSI_PKT_CX_TSO) == OSI_PKT_CX_TSO) { if (osi_unlikely((tx_pkt_cx->tcp_udp_hdrlen / OSI_TSO_HDR_LEN_DIVISOR) > TDES3_THL_MASK)) { - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "dma_txrx: Invalid TSO header len\n", (nveul64_t)tx_pkt_cx->tcp_udp_hdrlen); + ret = -1; goto fail; } else if (osi_unlikely(tx_pkt_cx->payload_len > TDES3_TPL_MASK)) { - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "dma_txrx: Invalid TSO payload len\n", (nveul64_t)tx_pkt_cx->payload_len); + ret = -1; goto fail; } else if (osi_unlikely(tx_pkt_cx->mss > TDES2_MSS_MASK)) { - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "dma_txrx: Invalid MSS\n", (nveul64_t)tx_pkt_cx->mss); + ret = -1; goto fail; + } else { + /* empty statement */ } } else if ((tx_pkt_cx->flags & OSI_PKT_CX_LEN) == OSI_PKT_CX_LEN) { if (osi_unlikely(tx_pkt_cx->payload_len > TDES3_PL_MASK)) { - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "dma_txrx: Invalid frame len\n", (nveul64_t)tx_pkt_cx->payload_len); + ret = -1; goto fail; } + } else { + /* empty statement */ } if (osi_unlikely(tx_pkt_cx->vtag_id > TDES3_VT_MASK)) { - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "dma_txrx: Invalid VTAG_ID\n", (nveul64_t)tx_pkt_cx->vtag_id); - goto fail; + ret = -1; } - return 0; fail: - return -1; + return ret; } nve32_t hw_transmit(struct osi_dma_priv_data *osi_dma, struct osi_tx_ring *tx_ring, - struct dma_chan_ops *ops, - nveu32_t chan) + nveu32_t dma_chan) { - struct dma_local *l_dma = (struct dma_local *)osi_dma; + struct dma_local *l_dma = (struct dma_local *)(void *)osi_dma; struct osi_tx_pkt_cx *tx_pkt_cx = OSI_NULL; struct osi_tx_desc *first_desc = OSI_NULL; struct osi_tx_desc *last_desc = OSI_NULL; @@ -980,18 +993,25 @@ nve32_t hw_transmit(struct osi_dma_priv_data *osi_dma, nveu32_t f_idx = tx_ring->cur_tx_idx; nveu32_t l_idx = 0; #endif /* OSI_DEBUG */ + nveu32_t chan = dma_chan & 0xFU; + const nveu32_t tail_ptr_reg[2] = { + EQOS_DMA_CHX_TDTP(chan), + MGBE_DMA_CHX_TDTLP(chan) + }; nve32_t cntx_desc_consumed; nveu32_t pkt_id = 0x0U; nveu32_t desc_cnt = 0U; nveu64_t tailptr; nveu32_t entry = 0U; + nve32_t ret = 0; nveu32_t i; entry = tx_ring->cur_tx_idx; if (entry >= osi_dma->tx_ring_sz) { - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "dma_txrx: Invalid cur_tx_idx\n", 0ULL); - return -1; + ret = -1; + goto fail; } tx_desc = tx_ring->tx_desc + entry; @@ -1001,15 +1021,18 @@ nve32_t hw_transmit(struct osi_dma_priv_data *osi_dma, desc_cnt = tx_pkt_cx->desc_cnt; if (osi_unlikely(desc_cnt == 0U)) { /* Will not hit this case */ - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "dma_txrx: Invalid desc_cnt\n", 0ULL); - return -1; + ret = -1; + goto fail; } if (validate_ctx(osi_dma, tx_pkt_cx) < 0) { - return -1; + ret = -1; + goto fail; } +#ifndef OSI_STRIPPED_LIB /* Context descriptor for VLAN/TSO */ if ((tx_pkt_cx->flags & OSI_PKT_CX_VLAN) == OSI_PKT_CX_VLAN) { osi_dma->dstats.tx_vlan_pkt_n = @@ -1022,6 +1045,7 @@ nve32_t hw_transmit(struct osi_dma_priv_data *osi_dma, osi_update_stats_counter(osi_dma->dstats.tx_tso_pkt_n, 1UL); } +#endif /* !OSI_STRIPPED_LIB */ cntx_desc_consumed = need_cntx_desc(tx_pkt_cx, tx_swcx, tx_desc, osi_dma->ptp_flag, osi_dma->mac); @@ -1124,7 +1148,9 @@ nve32_t hw_transmit(struct osi_dma_priv_data *osi_dma, * We need to make sure Tx descriptor updated above is really updated * before setting up the DMA, hence add memory write barrier here. */ - dmb_oshst(); + if (tx_ring->skip_dmb == 0U) { + dmb_oshst(); + } #ifdef OSI_DEBUG if (osi_dma->enable_desc_dump == 1U) { @@ -1138,9 +1164,10 @@ nve32_t hw_transmit(struct osi_dma_priv_data *osi_dma, (entry * sizeof(struct osi_tx_desc)); if (osi_unlikely(tailptr < tx_ring->tx_desc_phy_addr)) { /* Will not hit this case */ - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "dma_txrx: Invalid tx_desc_phy_addr\n", 0ULL); - return -1; + ret = -1; + goto fail; } /* @@ -1149,9 +1176,11 @@ nve32_t hw_transmit(struct osi_dma_priv_data *osi_dma, */ tx_ring->cur_tx_idx = entry; - ops->update_tx_tailptr(osi_dma->base, chan, tailptr); + /* Update the Tx tail pointer */ + osi_writel(L32(tailptr), (nveu8_t *)osi_dma->base + tail_ptr_reg[osi_dma->mac]); - return 0; +fail: + return ret; } /** @@ -1176,22 +1205,37 @@ nve32_t hw_transmit(struct osi_dma_priv_data *osi_dma, * @retval 0 on success * @retval -1 on failure. */ -static nve32_t rx_dma_desc_initialization(struct osi_dma_priv_data *osi_dma, - nveu32_t chan, - struct dma_chan_ops *ops) +static nve32_t rx_dma_desc_initialization(const struct osi_dma_priv_data *const osi_dma, + nveu32_t dma_chan) { + nveu32_t chan = dma_chan & 0xFU; + const nveu32_t start_addr_high_reg[2] = { + EQOS_DMA_CHX_RDLH(chan), + MGBE_DMA_CHX_RDLH(chan) + }; + const nveu32_t start_addr_low_reg[2] = { + EQOS_DMA_CHX_RDLA(chan), + MGBE_DMA_CHX_RDLA(chan) + }; + const nveu32_t ring_len_reg[2] = { + EQOS_DMA_CHX_RDRL(chan), + MGBE_DMA_CHX_RX_CNTRL2(chan) + }; + const nveu32_t mask[2] = { 0x3FFU, 0x3FFFU }; struct osi_rx_ring *rx_ring = OSI_NULL; struct osi_rx_desc *rx_desc = OSI_NULL; struct osi_rx_swcx *rx_swcx = OSI_NULL; nveu64_t tailptr = 0; - nveu32_t i; nve32_t ret = 0; + nveu32_t val; + nveu32_t i; rx_ring = osi_dma->rx_ring[chan]; if (osi_unlikely(rx_ring == OSI_NULL)) { - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "dma_txrx: Invalid argument\n", 0ULL); - return -1; + ret = -1; + goto fail; }; rx_ring->cur_rx_idx = 0; @@ -1239,16 +1283,26 @@ static nve32_t rx_dma_desc_initialization(struct osi_dma_priv_data *osi_dma, if (osi_unlikely((tailptr < rx_ring->rx_desc_phy_addr))) { /* Will not hit this case */ - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "dma_txrx: Invalid phys address\n", 0ULL); - return -1; + ret = -1; + goto fail; } - ops->set_rx_ring_len(osi_dma, chan, (osi_dma->rx_ring_sz - 1U)); - ops->update_rx_tailptr(osi_dma->base, chan, tailptr); - ops->set_rx_ring_start_addr(osi_dma->base, chan, - rx_ring->rx_desc_phy_addr); + /* Update the HW DMA ring length */ + val = osi_readl((nveu8_t *)osi_dma->base + ring_len_reg[osi_dma->mac]); + val |= (osi_dma->rx_ring_sz - 1U) & mask[osi_dma->mac]; + osi_writel(val, (nveu8_t *)osi_dma->base + ring_len_reg[osi_dma->mac]); + update_rx_tail_ptr(osi_dma, chan, tailptr); + + /* Program Ring start address */ + osi_writel(H32(rx_ring->rx_desc_phy_addr), + (nveu8_t *)osi_dma->base + start_addr_high_reg[osi_dma->mac]); + osi_writel(L32(rx_ring->rx_desc_phy_addr), + (nveu8_t *)osi_dma->base + start_addr_low_reg[osi_dma->mac]); + +fail: return ret; } @@ -1273,25 +1327,58 @@ static nve32_t rx_dma_desc_initialization(struct osi_dma_priv_data *osi_dma, * @retval 0 on success * @retval -1 on failure. */ -static nve32_t rx_dma_desc_init(struct osi_dma_priv_data *osi_dma, - struct dma_chan_ops *ops) +static nve32_t rx_dma_desc_init(struct osi_dma_priv_data *osi_dma) { nveu32_t chan = 0; - nveu32_t i; nve32_t ret = 0; + nveu32_t i; for (i = 0; i < osi_dma->num_dma_chans; i++) { chan = osi_dma->dma_chans[i]; - ret = rx_dma_desc_initialization(osi_dma, chan, ops); + ret = rx_dma_desc_initialization(osi_dma, chan); if (ret != 0) { - return ret; + goto fail; } } +fail: return ret; } +static inline void set_tx_ring_len_and_start_addr(const struct osi_dma_priv_data *const osi_dma, + nveu64_t tx_desc_phy_addr, + nveu32_t dma_chan, + nveu32_t len) +{ + nveu32_t chan = dma_chan & 0xFU; + const nveu32_t ring_len_reg[2] = { + EQOS_DMA_CHX_TDRL(chan), + MGBE_DMA_CHX_TX_CNTRL2(chan) + }; + const nveu32_t start_addr_high_reg[2] = { + EQOS_DMA_CHX_TDLH(chan), + MGBE_DMA_CHX_TDLH(chan) + }; + const nveu32_t start_addr_low_reg[2] = { + EQOS_DMA_CHX_TDLA(chan), + MGBE_DMA_CHX_TDLA(chan) + }; + const nveu32_t mask[2] = { 0x3FFU, 0x3FFFU }; + nveu32_t val; + + /* Program ring length */ + val = osi_readl((nveu8_t *)osi_dma->base + ring_len_reg[osi_dma->mac]); + val |= len & mask[osi_dma->mac]; + osi_writel(val, (nveu8_t *)osi_dma->base + ring_len_reg[osi_dma->mac]); + + /* Program tx ring start address */ + osi_writel(H32(tx_desc_phy_addr), + (nveu8_t *)osi_dma->base + start_addr_high_reg[osi_dma->mac]); + osi_writel(L32(tx_desc_phy_addr), + (nveu8_t *)osi_dma->base + start_addr_low_reg[osi_dma->mac]); +} + /** * @brief tx_dma_desc_init - Initialize DMA Transmit descriptors. * @@ -1312,13 +1399,13 @@ static nve32_t rx_dma_desc_init(struct osi_dma_priv_data *osi_dma, * @retval 0 on success * @retval -1 on failure. */ -static nve32_t tx_dma_desc_init(struct osi_dma_priv_data *osi_dma, - struct dma_chan_ops *ops) +static nve32_t tx_dma_desc_init(const struct osi_dma_priv_data *const osi_dma) { struct osi_tx_ring *tx_ring = OSI_NULL; struct osi_tx_desc *tx_desc = OSI_NULL; struct osi_tx_swcx *tx_swcx = OSI_NULL; nveu32_t chan = 0; + nve32_t ret = 0; nveu32_t i, j; for (i = 0; i < osi_dma->num_dma_chans; i++) { @@ -1326,9 +1413,10 @@ static nve32_t tx_dma_desc_init(struct osi_dma_priv_data *osi_dma, tx_ring = osi_dma->tx_ring[chan]; if (osi_unlikely(tx_ring == OSI_NULL)) { - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "dma_txrx: Invalid pointers\n", 0ULL); - return -1; + ret = -1; + goto fail; } for (j = 0; j < osi_dma->tx_ring_sz; j++) { @@ -1349,46 +1437,47 @@ static nve32_t tx_dma_desc_init(struct osi_dma_priv_data *osi_dma, tx_ring->cur_tx_idx = 0; tx_ring->clean_idx = 0; +#ifndef OSI_STRIPPED_LIB /* Slot function parameter initialization */ tx_ring->slot_number = 0U; tx_ring->slot_check = OSI_DISABLE; +#endif /* !OSI_STRIPPED_LIB */ - ops->set_tx_ring_len(osi_dma, chan, - (osi_dma->tx_ring_sz - 1U)); - ops->set_tx_ring_start_addr(osi_dma->base, chan, - tx_ring->tx_desc_phy_addr); - } - - return 0; -} - -nve32_t dma_desc_init(struct osi_dma_priv_data *osi_dma, - struct dma_chan_ops *ops) -{ - nve32_t ret = 0; - - ret = tx_dma_desc_init(osi_dma, ops); - if (ret != 0) { - return ret; - } - - ret = rx_dma_desc_init(osi_dma, ops); - if (ret != 0) { - return ret; + set_tx_ring_len_and_start_addr(osi_dma, tx_ring->tx_desc_phy_addr, + chan, (osi_dma->tx_ring_sz - 1U)); } +fail: return ret; } -nve32_t init_desc_ops(struct osi_dma_priv_data *osi_dma) +nve32_t dma_desc_init(struct osi_dma_priv_data *osi_dma) { - typedef void (*desc_ops_arr)(struct desc_ops *); + nve32_t ret = 0; - desc_ops_arr desc_ops[2] = { + ret = tx_dma_desc_init(osi_dma); + if (ret != 0) { + goto fail; + } + + ret = rx_dma_desc_init(osi_dma); + if (ret != 0) { + goto fail; + } + +fail: + return ret; +} + +nve32_t init_desc_ops(const struct osi_dma_priv_data *const osi_dma) +{ + typedef void (*desc_ops_arr)(struct desc_ops *p_ops); + + const desc_ops_arr desc_ops_a[2] = { eqos_init_desc_ops, mgbe_init_desc_ops }; - desc_ops[osi_dma->mac](&d_ops[osi_dma->mac]); + desc_ops_a[osi_dma->mac](&d_ops[osi_dma->mac]); /* TODO: validate function pointers */ return 0; diff --git a/osi/core/libnvethernetrm.export b/osi/dma/staticlib/Makefile.interface.tmk similarity index 69% rename from osi/core/libnvethernetrm.export rename to osi/dma/staticlib/Makefile.interface.tmk index d27755a..c87a743 100644 --- a/osi/core/libnvethernetrm.export +++ b/osi/dma/staticlib/Makefile.interface.tmk @@ -20,30 +20,19 @@ # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # -# libnvethernetrm interface export +# libnvethernetcl interface makefile fragment # ############################################################################### -osi_init_core_ops -osi_write_phy_reg -osi_read_phy_reg -osi_hw_core_init -osi_hw_core_deinit -osi_get_core -osi_handle_ioctl -#Below need to be enabled when MACSEC is enabled -#osi_macsec_en -#osi_macsec_deinit -#osi_macsec_ns_isr -#osi_macsec_s_isr -#osi_macsec_init -#osi_macsec_cipher_config -#osi_macsec_config -#osi_init_macsec_ops -#osi_macsec_config_lut -#osi_macsec_loopback -#osi_macsec_read_mmc -#osi_macsec_config_dbg_buf -#osi_macsec_dbg_events_config -#osi_macsec_config_kt -#osi_macsec_get_sc_lut_key_index -#osi_macsec_update_mtu + +ifdef NV_INTERFACE_FLAG_STATIC_LIBRARY_SECTION +NV_INTERFACE_NAME := nvethernetcl +NV_INTERFACE_COMPONENT_DIR := . +NV_INTERFACE_PUBLIC_INCLUDES := \ + ./include +endif + +# Local Variables: +# indent-tabs-mode: t +# tab-width: 8 +# End: +# vi: set tabstop=8 noexpandtab: diff --git a/osi/dma/staticlib/Makefile.tmk b/osi/dma/staticlib/Makefile.tmk new file mode 100644 index 0000000..c033b19 --- /dev/null +++ b/osi/dma/staticlib/Makefile.tmk @@ -0,0 +1,54 @@ +################################### tell Emacs this is a -*- makefile-gmake -*- +# +# Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. +# +############################################################################### + +ifdef NV_COMPONENT_FLAG_STATIC_LIBRARY_SECTION +include $(NV_BUILD_START_COMPONENT) + +NV_COMPONENT_STRICT_WARNINGS_qnx_64 := 1 + +NV_COMPONENT_NAME := nvethernetcl +NV_COMPONENT_OWN_INTERFACE_DIR := . +NV_COMPONENT_SOURCES := \ + $(NV_SOURCE)/nvethernetrm/osi/dma/eqos_dma.c \ + $(NV_SOURCE)/nvethernetrm/osi/dma/osi_dma.c \ + $(NV_SOURCE)/nvethernetrm/osi/dma/osi_dma_txrx.c \ + $(NV_SOURCE)/nvethernetrm/osi/dma/mgbe_dma.c \ + $(NV_SOURCE)/nvethernetrm/osi/dma/eqos_desc.c \ + $(NV_SOURCE)/nvethernetrm/osi/dma/mgbe_desc.c \ + $(NV_SOURCE)/nvethernetrm/osi/dma/debug.c \ + $(NV_SOURCE)/nvethernetrm/osi/common/osi_common.c \ + $(NV_SOURCE)/nvethernetrm/osi/common/eqos_common.c \ + $(NV_SOURCE)/nvethernetrm/osi/common/mgbe_common.c + +NV_COMPONENT_INCLUDES := \ + $(NV_SOURCE)/nvethernetrm/include \ + $(NV_SOURCE)/nvethernetrm/osi/common/include + +ifeq ($(NV_BUILD_CONFIGURATION_IS_SAFETY),0) + NV_COMPONENT_CFLAGS += -DOSI_DEBUG +else + NV_COMPONENT_CFLAGS += -DOSI_STRIPPED_LIB +endif +include $(NV_BUILD_STATIC_LIBRARY) +endif