mirror of
git://nv-tegra.nvidia.com/kernel/nvethernetrm.git
synced 2025-12-22 17:34:29 +03:00
Merge remote-tracking branch 'origin/dev/vbhadram/nvethernet_rel-35' into rel-35
Bug 3918941 Change-Id: I377214043c0a564eea4ce481313e34f90ada846b Signed-off-by: Bhadram Varka <vbhadram@nvidia.com>
This commit is contained in:
44
include/config.tmk
Normal file
44
include/config.tmk
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
# copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
|
||||||
|
#
|
||||||
|
# Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
|
# copy of this software and associated documentation files (the "Software"),
|
||||||
|
# to deal in the Software without restriction, including without limitation
|
||||||
|
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||||
|
# and/or sell copies of the Software, and to permit persons to whom the
|
||||||
|
# Software is furnished to do so, subject to the following conditions:
|
||||||
|
#
|
||||||
|
# The above copyright notice and this permission notice shall be included in
|
||||||
|
# all copies or substantial portions of the Software.
|
||||||
|
#
|
||||||
|
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||||
|
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||||
|
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||||
|
# DEALINGS IN THE SOFTWARE.
|
||||||
|
#
|
||||||
|
###############################################################################
|
||||||
|
|
||||||
|
# Set the Makefile config macros to zero by default
|
||||||
|
OSI_STRIPPED_LIB := 0
|
||||||
|
OSI_DEBUG := 0
|
||||||
|
DEBUG_MACSEC := 0
|
||||||
|
|
||||||
|
ifeq ($(NV_BUILD_CONFIGURATION_IS_SAFETY),1)
|
||||||
|
NV_COMPONENT_CFLAGS += -DOSI_STRIPPED_LIB
|
||||||
|
OSI_STRIPPED_LIB := 1
|
||||||
|
else
|
||||||
|
NV_COMPONENT_CFLAGS += -DOSI_DEBUG
|
||||||
|
NV_COMPONENT_CFLAGS += -DDEBUG_MACSEC
|
||||||
|
OSI_DEBUG := 1
|
||||||
|
DEBUG_MACSEC := 1
|
||||||
|
endif
|
||||||
|
NV_COMPONENT_CFLAGS += -DHSI_SUPPORT
|
||||||
|
NV_COMPONENT_CFLAGS += -DMACSEC_SUPPORT
|
||||||
|
NV_COMPONENT_CFLAGS += -DLOG_OSI
|
||||||
|
|
||||||
|
#NV_COMPONENT_CFLAGS += -DMACSEC_KEY_PROGRAM
|
||||||
|
HSI_SUPPORT := 1
|
||||||
|
MACSEC_SUPPORT := 1
|
||||||
|
ccflags-y += $(NV_COMPONENT_CFLAGS)
|
||||||
@@ -38,7 +38,7 @@
|
|||||||
/**
|
/**
|
||||||
* @brief IVC commands between OSD & OSI.
|
* @brief IVC commands between OSD & OSI.
|
||||||
*/
|
*/
|
||||||
typedef enum ivc_cmd {
|
typedef enum {
|
||||||
core_init = 1,
|
core_init = 1,
|
||||||
core_deinit,
|
core_deinit,
|
||||||
write_phy_reg,
|
write_phy_reg,
|
||||||
@@ -46,8 +46,7 @@ typedef enum ivc_cmd {
|
|||||||
handle_ioctl,
|
handle_ioctl,
|
||||||
init_macsec,
|
init_macsec,
|
||||||
deinit_macsec,
|
deinit_macsec,
|
||||||
handle_ns_irq_macsec,
|
handle_irq_macsec,
|
||||||
handle_s_irq_macsec,
|
|
||||||
lut_config_macsec,
|
lut_config_macsec,
|
||||||
kt_config_macsec,
|
kt_config_macsec,
|
||||||
cipher_config,
|
cipher_config,
|
||||||
@@ -58,13 +57,15 @@ typedef enum ivc_cmd {
|
|||||||
dbg_buf_config_macsec,
|
dbg_buf_config_macsec,
|
||||||
dbg_events_config_macsec,
|
dbg_events_config_macsec,
|
||||||
macsec_get_sc_lut_key_index,
|
macsec_get_sc_lut_key_index,
|
||||||
macsec_update_mtu_size,
|
nvethmgr_get_status,
|
||||||
|
nvethmgr_verify_ts,
|
||||||
|
nvethmgr_get_avb_perf,
|
||||||
}ivc_cmd;
|
}ivc_cmd;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief IVC arguments structure.
|
* @brief IVC arguments structure.
|
||||||
*/
|
*/
|
||||||
typedef struct ivc_args {
|
typedef struct {
|
||||||
/** Number of arguments */
|
/** Number of arguments */
|
||||||
nveu32_t count;
|
nveu32_t count;
|
||||||
/** arguments */
|
/** arguments */
|
||||||
@@ -74,7 +75,7 @@ typedef struct ivc_args {
|
|||||||
/**
|
/**
|
||||||
* @brief IVC core argument structure.
|
* @brief IVC core argument structure.
|
||||||
*/
|
*/
|
||||||
typedef struct ivc_core_args {
|
typedef struct {
|
||||||
/** Number of MTL queues enabled in MAC */
|
/** Number of MTL queues enabled in MAC */
|
||||||
nveu32_t num_mtl_queues;
|
nveu32_t num_mtl_queues;
|
||||||
/** Array of MTL queues */
|
/** Array of MTL queues */
|
||||||
@@ -85,8 +86,6 @@ typedef struct ivc_core_args {
|
|||||||
nveu32_t rxq_prio[OSI_EQOS_MAX_NUM_CHANS];
|
nveu32_t rxq_prio[OSI_EQOS_MAX_NUM_CHANS];
|
||||||
/** Ethernet MAC address */
|
/** Ethernet MAC address */
|
||||||
nveu8_t mac_addr[OSI_ETH_ALEN];
|
nveu8_t mac_addr[OSI_ETH_ALEN];
|
||||||
/** Tegra Pre-si platform info */
|
|
||||||
nveu32_t pre_si;
|
|
||||||
/** VLAN tag stripping enable(1) or disable(0) */
|
/** VLAN tag stripping enable(1) or disable(0) */
|
||||||
nveu32_t strip_vlan_tag;
|
nveu32_t strip_vlan_tag;
|
||||||
/** pause frame support */
|
/** pause frame support */
|
||||||
@@ -103,15 +102,15 @@ typedef struct ivc_core_args {
|
|||||||
* @brief macsec config structure.
|
* @brief macsec config structure.
|
||||||
*/
|
*/
|
||||||
#ifdef MACSEC_SUPPORT
|
#ifdef MACSEC_SUPPORT
|
||||||
typedef struct macsec_config {
|
typedef struct {
|
||||||
/** MACsec secure channel basic information */
|
/** MACsec secure channel basic information */
|
||||||
struct osi_macsec_sc_info sc_info;
|
struct osi_macsec_sc_info sc_info;
|
||||||
/** MACsec enable or disable */
|
/** MACsec enable or disable */
|
||||||
unsigned int enable;
|
nveu32_t enable;
|
||||||
/** MACsec controller */
|
/** MACsec controller */
|
||||||
unsigned short ctlr;
|
nveu16_t ctlr;
|
||||||
/** MACsec KT index */
|
/** MACsec KT index */
|
||||||
unsigned short kt_idx;
|
nveu16_t kt_idx;
|
||||||
/** MACsec KT index */
|
/** MACsec KT index */
|
||||||
nveu32_t key_index;
|
nveu32_t key_index;
|
||||||
/** MACsec SCI */
|
/** MACsec SCI */
|
||||||
@@ -133,19 +132,20 @@ typedef struct ivc_msg_common {
|
|||||||
/** message count, used for debug */
|
/** message count, used for debug */
|
||||||
nveu32_t count;
|
nveu32_t count;
|
||||||
|
|
||||||
union {
|
|
||||||
/** IVC argument structure */
|
/** IVC argument structure */
|
||||||
ivc_args args;
|
ivc_args args;
|
||||||
#ifndef OSI_STRIPPED_LIB
|
|
||||||
|
union {
|
||||||
/** avb algorithm structure */
|
/** avb algorithm structure */
|
||||||
struct osi_core_avb_algorithm avb_algo;
|
struct osi_core_avb_algorithm avb_algo;
|
||||||
#endif
|
|
||||||
/** OSI filter structure */
|
/** OSI filter structure */
|
||||||
struct osi_filter filter;
|
struct osi_filter filter;
|
||||||
/** OSI HW features */
|
/** OSI HW features */
|
||||||
struct osi_hw_features hw_feat;
|
struct osi_hw_features hw_feat;
|
||||||
/** MMC counters */
|
/** MMC counters */
|
||||||
struct osi_mmc_counters mmc;
|
struct osi_mmc_counters mmc_s;
|
||||||
|
/** OSI stats counters */
|
||||||
|
struct osi_stats stats_s;
|
||||||
/** core argument structure */
|
/** core argument structure */
|
||||||
ivc_core_args init_args;
|
ivc_core_args init_args;
|
||||||
/** ioctl command structure */
|
/** ioctl command structure */
|
||||||
@@ -186,14 +186,4 @@ typedef struct ivc_msg_common {
|
|||||||
*/
|
*/
|
||||||
nve32_t osd_ivc_send_cmd(void *priv, ivc_msg_common_t *ivc_buf,
|
nve32_t osd_ivc_send_cmd(void *priv, ivc_msg_common_t *ivc_buf,
|
||||||
nveu32_t len);
|
nveu32_t len);
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief ivc_get_core_safety_config - Get core safety config
|
|
||||||
*
|
|
||||||
* API Group:
|
|
||||||
* - Initialization: Yes
|
|
||||||
* - Run time: Yes
|
|
||||||
* - De-initialization: Yes
|
|
||||||
*/
|
|
||||||
void *ivc_get_core_safety_config(void);
|
|
||||||
#endif /* IVC_CORE_H */
|
#endif /* IVC_CORE_H */
|
||||||
|
|||||||
561
include/mmc.h
561
include/mmc.h
@@ -23,568 +23,9 @@
|
|||||||
#ifndef INCLUDED_MMC_H
|
#ifndef INCLUDED_MMC_H
|
||||||
#define INCLUDED_MMC_H
|
#define INCLUDED_MMC_H
|
||||||
|
|
||||||
#include "../osi/common/type.h"
|
#include <nvethernet_type.h>
|
||||||
#include "osi_common.h"
|
#include "osi_common.h"
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief osi_mmc_counters - The structure to hold RMON counter values
|
|
||||||
*/
|
|
||||||
struct osi_mmc_counters {
|
|
||||||
/** This counter provides the number of bytes transmitted, exclusive of
|
|
||||||
* preamble and retried bytes, in good and bad packets */
|
|
||||||
nveu64_t mmc_tx_octetcount_gb;
|
|
||||||
/** This counter provides upper 32 bits of transmitted octet count */
|
|
||||||
nveu64_t mmc_tx_octetcount_gb_h;
|
|
||||||
/** This counter provides the number of good and
|
|
||||||
* bad packets transmitted, exclusive of retried packets */
|
|
||||||
nveu64_t mmc_tx_framecount_gb;
|
|
||||||
/** This counter provides upper 32 bits of transmitted good and bad
|
|
||||||
* packets count */
|
|
||||||
nveu64_t mmc_tx_framecount_gb_h;
|
|
||||||
/** This counter provides number of good broadcast
|
|
||||||
* packets transmitted */
|
|
||||||
nveu64_t mmc_tx_broadcastframe_g;
|
|
||||||
/** This counter provides upper 32 bits of transmitted good broadcast
|
|
||||||
* packets count */
|
|
||||||
nveu64_t mmc_tx_broadcastframe_g_h;
|
|
||||||
/** This counter provides number of good multicast
|
|
||||||
* packets transmitted */
|
|
||||||
nveu64_t mmc_tx_multicastframe_g;
|
|
||||||
/** This counter provides upper 32 bits of transmitted good multicast
|
|
||||||
* packet count */
|
|
||||||
nveu64_t mmc_tx_multicastframe_g_h;
|
|
||||||
/** This counter provides the number of good and bad packets
|
|
||||||
* transmitted with length 64 bytes, exclusive of preamble and
|
|
||||||
* retried packets */
|
|
||||||
nveu64_t mmc_tx_64_octets_gb;
|
|
||||||
/** This counter provides upper 32 bits of transmitted 64 octet size
|
|
||||||
* good and bad packets count */
|
|
||||||
nveu64_t mmc_tx_64_octets_gb_h;
|
|
||||||
/** This counter provides the number of good and bad packets
|
|
||||||
* transmitted with length 65-127 bytes, exclusive of preamble and
|
|
||||||
* retried packets */
|
|
||||||
nveu64_t mmc_tx_65_to_127_octets_gb;
|
|
||||||
/** Provides upper 32 bits of transmitted 65-to-127 octet size good and
|
|
||||||
* bad packets count */
|
|
||||||
nveu64_t mmc_tx_65_to_127_octets_gb_h;
|
|
||||||
/** This counter provides the number of good and bad packets
|
|
||||||
* transmitted with length 128-255 bytes, exclusive of preamble and
|
|
||||||
* retried packets */
|
|
||||||
nveu64_t mmc_tx_128_to_255_octets_gb;
|
|
||||||
/** This counter provides upper 32 bits of transmitted 128-to-255
|
|
||||||
* octet size good and bad packets count */
|
|
||||||
nveu64_t mmc_tx_128_to_255_octets_gb_h;
|
|
||||||
/** This counter provides the number of good and bad packets
|
|
||||||
* transmitted with length 256-511 bytes, exclusive of preamble and
|
|
||||||
* retried packets */
|
|
||||||
nveu64_t mmc_tx_256_to_511_octets_gb;
|
|
||||||
/** This counter provides upper 32 bits of transmitted 256-to-511
|
|
||||||
* octet size good and bad packets count. */
|
|
||||||
nveu64_t mmc_tx_256_to_511_octets_gb_h;
|
|
||||||
/** This counter provides the number of good and bad packets
|
|
||||||
* transmitted with length 512-1023 bytes, exclusive of preamble and
|
|
||||||
* retried packets */
|
|
||||||
nveu64_t mmc_tx_512_to_1023_octets_gb;
|
|
||||||
/** This counter provides upper 32 bits of transmitted 512-to-1023
|
|
||||||
* octet size good and bad packets count.*/
|
|
||||||
nveu64_t mmc_tx_512_to_1023_octets_gb_h;
|
|
||||||
/** This counter provides the number of good and bad packets
|
|
||||||
* transmitted with length 1024-max bytes, exclusive of preamble and
|
|
||||||
* retried packets */
|
|
||||||
nveu64_t mmc_tx_1024_to_max_octets_gb;
|
|
||||||
/** This counter provides upper 32 bits of transmitted 1024-tomaxsize
|
|
||||||
* octet size good and bad packets count. */
|
|
||||||
nveu64_t mmc_tx_1024_to_max_octets_gb_h;
|
|
||||||
/** This counter provides the number of good and bad unicast packets */
|
|
||||||
nveu64_t mmc_tx_unicast_gb;
|
|
||||||
/** This counter provides upper 32 bits of transmitted good bad
|
|
||||||
* unicast packets count */
|
|
||||||
nveu64_t mmc_tx_unicast_gb_h;
|
|
||||||
/** This counter provides the number of good and bad
|
|
||||||
* multicast packets */
|
|
||||||
nveu64_t mmc_tx_multicast_gb;
|
|
||||||
/** This counter provides upper 32 bits of transmitted good bad
|
|
||||||
* multicast packets count */
|
|
||||||
nveu64_t mmc_tx_multicast_gb_h;
|
|
||||||
/** This counter provides the number of good and bad
|
|
||||||
* broadcast packets */
|
|
||||||
nveu64_t mmc_tx_broadcast_gb;
|
|
||||||
/** This counter provides upper 32 bits of transmitted good bad
|
|
||||||
* broadcast packets count */
|
|
||||||
nveu64_t mmc_tx_broadcast_gb_h;
|
|
||||||
/** This counter provides the number of abort packets due to
|
|
||||||
* underflow error */
|
|
||||||
nveu64_t mmc_tx_underflow_error;
|
|
||||||
/** This counter provides upper 32 bits of abort packets due to
|
|
||||||
* underflow error */
|
|
||||||
nveu64_t mmc_tx_underflow_error_h;
|
|
||||||
/** This counter provides the number of successfully transmitted
|
|
||||||
* packets after a single collision in the half-duplex mode */
|
|
||||||
nveu64_t mmc_tx_singlecol_g;
|
|
||||||
/** This counter provides the number of successfully transmitted
|
|
||||||
* packets after a multi collision in the half-duplex mode */
|
|
||||||
nveu64_t mmc_tx_multicol_g;
|
|
||||||
/** This counter provides the number of successfully transmitted
|
|
||||||
* after a deferral in the half-duplex mode */
|
|
||||||
nveu64_t mmc_tx_deferred;
|
|
||||||
/** This counter provides the number of packets aborted because of
|
|
||||||
* late collision error */
|
|
||||||
nveu64_t mmc_tx_latecol;
|
|
||||||
/** This counter provides the number of packets aborted because of
|
|
||||||
* excessive (16) collision errors */
|
|
||||||
nveu64_t mmc_tx_exesscol;
|
|
||||||
/** This counter provides the number of packets aborted because of
|
|
||||||
* carrier sense error (no carrier or loss of carrier) */
|
|
||||||
nveu64_t mmc_tx_carrier_error;
|
|
||||||
/** This counter provides the number of bytes transmitted,
|
|
||||||
* exclusive of preamble, only in good packets */
|
|
||||||
nveu64_t mmc_tx_octetcount_g;
|
|
||||||
/** This counter provides upper 32 bytes of bytes transmitted,
|
|
||||||
* exclusive of preamble, only in good packets */
|
|
||||||
nveu64_t mmc_tx_octetcount_g_h;
|
|
||||||
/** This counter provides the number of good packets transmitted */
|
|
||||||
nveu64_t mmc_tx_framecount_g;
|
|
||||||
/** This counter provides upper 32 bytes of good packets transmitted */
|
|
||||||
nveu64_t mmc_tx_framecount_g_h;
|
|
||||||
/** This counter provides the number of packets aborted because of
|
|
||||||
* excessive deferral error
|
|
||||||
* (deferred for more than two max-sized packet times) */
|
|
||||||
nveu64_t mmc_tx_excessdef;
|
|
||||||
/** This counter provides the number of good Pause
|
|
||||||
* packets transmitted */
|
|
||||||
nveu64_t mmc_tx_pause_frame;
|
|
||||||
/** This counter provides upper 32 bytes of good Pause
|
|
||||||
* packets transmitted */
|
|
||||||
nveu64_t mmc_tx_pause_frame_h;
|
|
||||||
/** This counter provides the number of good VLAN packets transmitted */
|
|
||||||
nveu64_t mmc_tx_vlan_frame_g;
|
|
||||||
/** This counter provides upper 32 bytes of good VLAN packets
|
|
||||||
* transmitted */
|
|
||||||
nveu64_t mmc_tx_vlan_frame_g_h;
|
|
||||||
/** This counter provides the number of packets transmitted without
|
|
||||||
* errors and with length greater than the maxsize (1,518 or 1,522 bytes
|
|
||||||
* for VLAN tagged packets; 2000 bytes */
|
|
||||||
nveu64_t mmc_tx_osize_frame_g;
|
|
||||||
/** This counter provides the number of good and bad packets received */
|
|
||||||
nveu64_t mmc_rx_framecount_gb;
|
|
||||||
/** This counter provides upper 32 bytes of good and bad packets
|
|
||||||
* received */
|
|
||||||
nveu64_t mmc_rx_framecount_gb_h;
|
|
||||||
/** This counter provides the number of bytes received by DWC_ther_qos,
|
|
||||||
* exclusive of preamble, in good and bad packets */
|
|
||||||
nveu64_t mmc_rx_octetcount_gb;
|
|
||||||
/** This counter provides upper 32 bytes of bytes received by
|
|
||||||
* DWC_ether_qos, exclusive of preamble, in good and bad packets */
|
|
||||||
nveu64_t mmc_rx_octetcount_gb_h;
|
|
||||||
/** This counter provides the number of bytes received by DWC_ether_qos,
|
|
||||||
* exclusive of preamble, in good and bad packets */
|
|
||||||
nveu64_t mmc_rx_octetcount_g;
|
|
||||||
/** This counter provides upper 32 bytes of bytes received by
|
|
||||||
* DWC_ether_qos, exclusive of preamble, in good and bad packets */
|
|
||||||
nveu64_t mmc_rx_octetcount_g_h;
|
|
||||||
/** This counter provides the number of good
|
|
||||||
* broadcast packets received */
|
|
||||||
nveu64_t mmc_rx_broadcastframe_g;
|
|
||||||
/** This counter provides upper 32 bytes of good
|
|
||||||
* broadcast packets received */
|
|
||||||
nveu64_t mmc_rx_broadcastframe_g_h;
|
|
||||||
/** This counter provides the number of good
|
|
||||||
* multicast packets received */
|
|
||||||
nveu64_t mmc_rx_multicastframe_g;
|
|
||||||
/** This counter provides upper 32 bytes of good
|
|
||||||
* multicast packets received */
|
|
||||||
nveu64_t mmc_rx_multicastframe_g_h;
|
|
||||||
/** This counter provides the number of packets
|
|
||||||
* received with CRC error */
|
|
||||||
nveu64_t mmc_rx_crc_error;
|
|
||||||
/** This counter provides upper 32 bytes of packets
|
|
||||||
* received with CRC error */
|
|
||||||
nveu64_t mmc_rx_crc_error_h;
|
|
||||||
/** This counter provides the number of packets received with
|
|
||||||
* alignment (dribble) error. It is valid only in 10/100 mode */
|
|
||||||
nveu64_t mmc_rx_align_error;
|
|
||||||
/** This counter provides the number of packets received with
|
|
||||||
* runt (length less than 64 bytes and CRC error) error */
|
|
||||||
nveu64_t mmc_rx_runt_error;
|
|
||||||
/** This counter provides the number of giant packets received with
|
|
||||||
* length (including CRC) greater than 1,518 bytes (1,522 bytes for
|
|
||||||
* VLAN tagged) and with CRC error */
|
|
||||||
nveu64_t mmc_rx_jabber_error;
|
|
||||||
/** This counter provides the number of packets received with length
|
|
||||||
* less than 64 bytes, without any errors */
|
|
||||||
nveu64_t mmc_rx_undersize_g;
|
|
||||||
/** This counter provides the number of packets received without error,
|
|
||||||
* with length greater than the maxsize */
|
|
||||||
nveu64_t mmc_rx_oversize_g;
|
|
||||||
/** This counter provides the number of good and bad packets received
|
|
||||||
* with length 64 bytes, exclusive of the preamble */
|
|
||||||
nveu64_t mmc_rx_64_octets_gb;
|
|
||||||
/** This counter provides upper 32 bytes of good and bad packets
|
|
||||||
* received with length 64 bytes, exclusive of the preamble */
|
|
||||||
nveu64_t mmc_rx_64_octets_gb_h;
|
|
||||||
/** This counter provides the number of good and bad packets received
|
|
||||||
* with length 65-127 bytes, exclusive of the preamble */
|
|
||||||
nveu64_t mmc_rx_65_to_127_octets_gb;
|
|
||||||
/** This counter provides upper 32 bytes of good and bad packets
|
|
||||||
* received with length 65-127 bytes, exclusive of the preamble */
|
|
||||||
nveu64_t mmc_rx_65_to_127_octets_gb_h;
|
|
||||||
/** This counter provides the number of good and bad packets received
|
|
||||||
* with length 128-255 bytes, exclusive of the preamble */
|
|
||||||
nveu64_t mmc_rx_128_to_255_octets_gb;
|
|
||||||
/** This counter provides upper 32 bytes of good and bad packets
|
|
||||||
* received with length 128-255 bytes, exclusive of the preamble */
|
|
||||||
nveu64_t mmc_rx_128_to_255_octets_gb_h;
|
|
||||||
/** This counter provides the number of good and bad packets received
|
|
||||||
* with length 256-511 bytes, exclusive of the preamble */
|
|
||||||
nveu64_t mmc_rx_256_to_511_octets_gb;
|
|
||||||
/** This counter provides upper 32 bytes of good and bad packets
|
|
||||||
* received with length 256-511 bytes, exclusive of the preamble */
|
|
||||||
nveu64_t mmc_rx_256_to_511_octets_gb_h;
|
|
||||||
/** This counter provides the number of good and bad packets received
|
|
||||||
* with length 512-1023 bytes, exclusive of the preamble */
|
|
||||||
nveu64_t mmc_rx_512_to_1023_octets_gb;
|
|
||||||
/** This counter provides upper 32 bytes of good and bad packets
|
|
||||||
* received with length 512-1023 bytes, exclusive of the preamble */
|
|
||||||
nveu64_t mmc_rx_512_to_1023_octets_gb_h;
|
|
||||||
/** This counter provides the number of good and bad packets received
|
|
||||||
* with length 1024-maxbytes, exclusive of the preamble */
|
|
||||||
nveu64_t mmc_rx_1024_to_max_octets_gb;
|
|
||||||
/** This counter provides upper 32 bytes of good and bad packets
|
|
||||||
* received with length 1024-maxbytes, exclusive of the preamble */
|
|
||||||
nveu64_t mmc_rx_1024_to_max_octets_gb_h;
|
|
||||||
/** This counter provides the number of good unicast packets received */
|
|
||||||
nveu64_t mmc_rx_unicast_g;
|
|
||||||
/** This counter provides upper 32 bytes of good unicast packets
|
|
||||||
* received */
|
|
||||||
nveu64_t mmc_rx_unicast_g_h;
|
|
||||||
/** This counter provides the number of packets received with length
|
|
||||||
* error (Length Type field not equal to packet size), for all packets
|
|
||||||
* with valid length field */
|
|
||||||
nveu64_t mmc_rx_length_error;
|
|
||||||
/** This counter provides upper 32 bytes of packets received with
|
|
||||||
* length error (Length Type field not equal to packet size), for all
|
|
||||||
* packets with valid length field */
|
|
||||||
nveu64_t mmc_rx_length_error_h;
|
|
||||||
/** This counter provides the number of packets received with length
|
|
||||||
* field not equal to the valid packet size (greater than 1,500 but
|
|
||||||
* less than 1,536) */
|
|
||||||
nveu64_t mmc_rx_outofrangetype;
|
|
||||||
/** This counter provides upper 32 bytes of packets received with
|
|
||||||
* length field not equal to the valid packet size (greater than 1,500
|
|
||||||
* but less than 1,536) */
|
|
||||||
nveu64_t mmc_rx_outofrangetype_h;
|
|
||||||
/** This counter provides the number of good and valid Pause packets
|
|
||||||
* received */
|
|
||||||
nveu64_t mmc_rx_pause_frames;
|
|
||||||
/** This counter provides upper 32 bytes of good and valid Pause packets
|
|
||||||
* received */
|
|
||||||
nveu64_t mmc_rx_pause_frames_h;
|
|
||||||
/** This counter provides the number of missed received packets
|
|
||||||
* because of FIFO overflow in DWC_ether_qos */
|
|
||||||
nveu64_t mmc_rx_fifo_overflow;
|
|
||||||
/** This counter provides upper 32 bytes of missed received packets
|
|
||||||
* because of FIFO overflow in DWC_ether_qos */
|
|
||||||
nveu64_t mmc_rx_fifo_overflow_h;
|
|
||||||
/** This counter provides the number of good and bad VLAN packets
|
|
||||||
* received */
|
|
||||||
nveu64_t mmc_rx_vlan_frames_gb;
|
|
||||||
/** This counter provides upper 32 bytes of good and bad VLAN packets
|
|
||||||
* received */
|
|
||||||
nveu64_t mmc_rx_vlan_frames_gb_h;
|
|
||||||
/** This counter provides the number of packets received with error
|
|
||||||
* because of watchdog timeout error */
|
|
||||||
nveu64_t mmc_rx_watchdog_error;
|
|
||||||
/** This counter provides the number of packets received with Receive
|
|
||||||
* error or Packet Extension error on the GMII or MII interface */
|
|
||||||
nveu64_t mmc_rx_receive_error;
|
|
||||||
/** This counter provides the number of packets received with Receive
|
|
||||||
* error or Packet Extension error on the GMII or MII interface */
|
|
||||||
nveu64_t mmc_rx_ctrl_frames_g;
|
|
||||||
/** This counter provides the number of microseconds Tx LPI is asserted
|
|
||||||
* in the MAC controller */
|
|
||||||
nveu64_t mmc_tx_lpi_usec_cntr;
|
|
||||||
/** This counter provides the number of times MAC controller has
|
|
||||||
* entered Tx LPI. */
|
|
||||||
nveu64_t mmc_tx_lpi_tran_cntr;
|
|
||||||
/** This counter provides the number of microseconds Rx LPI is asserted
|
|
||||||
* in the MAC controller */
|
|
||||||
nveu64_t mmc_rx_lpi_usec_cntr;
|
|
||||||
/** This counter provides the number of times MAC controller has
|
|
||||||
* entered Rx LPI.*/
|
|
||||||
nveu64_t mmc_rx_lpi_tran_cntr;
|
|
||||||
/** This counter provides the number of good IPv4 datagrams received
|
|
||||||
* with the TCP, UDP, or ICMP payload */
|
|
||||||
nveu64_t mmc_rx_ipv4_gd;
|
|
||||||
/** This counter provides upper 32 bytes of good IPv4 datagrams received
|
|
||||||
* with the TCP, UDP, or ICMP payload */
|
|
||||||
nveu64_t mmc_rx_ipv4_gd_h;
|
|
||||||
/** RxIPv4 Header Error Packets */
|
|
||||||
nveu64_t mmc_rx_ipv4_hderr;
|
|
||||||
/** RxIPv4 of upper 32 bytes of Header Error Packets */
|
|
||||||
nveu64_t mmc_rx_ipv4_hderr_h;
|
|
||||||
/** This counter provides the number of IPv4 datagram packets received
|
|
||||||
* that did not have a TCP, UDP, or ICMP payload */
|
|
||||||
nveu64_t mmc_rx_ipv4_nopay;
|
|
||||||
/** This counter provides upper 32 bytes of IPv4 datagram packets
|
|
||||||
* received that did not have a TCP, UDP, or ICMP payload */
|
|
||||||
nveu64_t mmc_rx_ipv4_nopay_h;
|
|
||||||
/** This counter provides the number of good IPv4 datagrams received
|
|
||||||
* with fragmentation */
|
|
||||||
nveu64_t mmc_rx_ipv4_frag;
|
|
||||||
/** This counter provides upper 32 bytes of good IPv4 datagrams received
|
|
||||||
* with fragmentation */
|
|
||||||
nveu64_t mmc_rx_ipv4_frag_h;
|
|
||||||
/** This counter provides the number of good IPv4 datagrams received
|
|
||||||
* that had a UDP payload with checksum disabled */
|
|
||||||
nveu64_t mmc_rx_ipv4_udsbl;
|
|
||||||
/** This counter provides upper 32 bytes of good IPv4 datagrams received
|
|
||||||
* that had a UDP payload with checksum disabled */
|
|
||||||
nveu64_t mmc_rx_ipv4_udsbl_h;
|
|
||||||
/** This counter provides the number of good IPv6 datagrams received
|
|
||||||
* with the TCP, UDP, or ICMP payload */
|
|
||||||
nveu64_t mmc_rx_ipv6_gd_octets;
|
|
||||||
/** This counter provides upper 32 bytes of good IPv6 datagrams received
|
|
||||||
* with the TCP, UDP, or ICMP payload */
|
|
||||||
nveu64_t mmc_rx_ipv6_gd_octets_h;
|
|
||||||
/** This counter provides the number of IPv6 datagrams received
|
|
||||||
* with header (length or version mismatch) errors */
|
|
||||||
nveu64_t mmc_rx_ipv6_hderr_octets;
|
|
||||||
/** This counter provides the number of IPv6 datagrams received
|
|
||||||
* with header (length or version mismatch) errors */
|
|
||||||
nveu64_t mmc_rx_ipv6_hderr_octets_h;
|
|
||||||
/** This counter provides the number of IPv6 datagram packets received
|
|
||||||
* that did not have a TCP, UDP, or ICMP payload */
|
|
||||||
nveu64_t mmc_rx_ipv6_nopay_octets;
|
|
||||||
/** This counter provides upper 32 bytes of IPv6 datagram packets
|
|
||||||
* received that did not have a TCP, UDP, or ICMP payload */
|
|
||||||
nveu64_t mmc_rx_ipv6_nopay_octets_h;
|
|
||||||
/* Protocols */
|
|
||||||
/** This counter provides the number of good IP datagrams received by
|
|
||||||
* DWC_ether_qos with a good UDP payload */
|
|
||||||
nveu64_t mmc_rx_udp_gd;
|
|
||||||
/** This counter provides upper 32 bytes of good IP datagrams received
|
|
||||||
* by DWC_ether_qos with a good UDP payload */
|
|
||||||
nveu64_t mmc_rx_udp_gd_h;
|
|
||||||
/** This counter provides the number of good IP datagrams received by
|
|
||||||
* DWC_ether_qos with a good UDP payload. This counter is not updated
|
|
||||||
* when the RxIPv4_UDP_Checksum_Disabled_Packets counter is
|
|
||||||
* incremented */
|
|
||||||
nveu64_t mmc_rx_udp_err;
|
|
||||||
/** This counter provides upper 32 bytes of good IP datagrams received
|
|
||||||
* by DWC_ether_qos with a good UDP payload. This counter is not updated
|
|
||||||
* when the RxIPv4_UDP_Checksum_Disabled_Packets counter is
|
|
||||||
* incremented */
|
|
||||||
nveu64_t mmc_rx_udp_err_h;
|
|
||||||
/** This counter provides the number of good IP datagrams received
|
|
||||||
* with a good TCP payload */
|
|
||||||
nveu64_t mmc_rx_tcp_gd;
|
|
||||||
/** This counter provides the number of good IP datagrams received
|
|
||||||
* with a good TCP payload */
|
|
||||||
nveu64_t mmc_rx_tcp_gd_h;
|
|
||||||
/** This counter provides upper 32 bytes of good IP datagrams received
|
|
||||||
* with a good TCP payload */
|
|
||||||
nveu64_t mmc_rx_tcp_err;
|
|
||||||
/** This counter provides upper 32 bytes of good IP datagrams received
|
|
||||||
* with a good TCP payload */
|
|
||||||
nveu64_t mmc_rx_tcp_err_h;
|
|
||||||
/** This counter provides the number of good IP datagrams received
|
|
||||||
* with a good ICMP payload */
|
|
||||||
nveu64_t mmc_rx_icmp_gd;
|
|
||||||
/** This counter provides upper 32 bytes of good IP datagrams received
|
|
||||||
* with a good ICMP payload */
|
|
||||||
nveu64_t mmc_rx_icmp_gd_h;
|
|
||||||
/** This counter provides the number of good IP datagrams received
|
|
||||||
* whose ICMP payload has a checksum error */
|
|
||||||
nveu64_t mmc_rx_icmp_err;
|
|
||||||
/** This counter provides upper 32 bytes of good IP datagrams received
|
|
||||||
* whose ICMP payload has a checksum error */
|
|
||||||
nveu64_t mmc_rx_icmp_err_h;
|
|
||||||
/** This counter provides the number of bytes received by DWC_ether_qos
|
|
||||||
* in good IPv4 datagrams encapsulating TCP, UDP, or ICMP data.
|
|
||||||
* (Ethernet header, FCS, pad, or IP pad bytes are not included
|
|
||||||
* in this counter */
|
|
||||||
nveu64_t mmc_rx_ipv4_gd_octets;
|
|
||||||
/** This counter provides upper 32 bytes received by DWC_ether_qos
|
|
||||||
* in good IPv4 datagrams encapsulating TCP, UDP, or ICMP data.
|
|
||||||
* (Ethernet header, FCS, pad, or IP pad bytes are not included
|
|
||||||
* in this counter */
|
|
||||||
nveu64_t mmc_rx_ipv4_gd_octets_h;
|
|
||||||
/** This counter provides the number of bytes received in IPv4 datagram
|
|
||||||
* with header errors (checksum, length, version mismatch). The value
|
|
||||||
* in the Length field of IPv4 header is used to update this counter.
|
|
||||||
* (Ethernet header, FCS, pad, or IP pad bytes are not included
|
|
||||||
* in this counter */
|
|
||||||
nveu64_t mmc_rx_ipv4_hderr_octets;
|
|
||||||
/** This counter provides upper 32 bytes received in IPv4 datagram
|
|
||||||
* with header errors (checksum, length, version mismatch). The value
|
|
||||||
* in the Length field of IPv4 header is used to update this counter.
|
|
||||||
* (Ethernet header, FCS, pad, or IP pad bytes are not included
|
|
||||||
* in this counter */
|
|
||||||
nveu64_t mmc_rx_ipv4_hderr_octets_h;
|
|
||||||
/** This counter provides the number of bytes received in IPv4 datagram
|
|
||||||
* that did not have a TCP, UDP, or ICMP payload. The value in the
|
|
||||||
* Length field of IPv4 header is used to update this counter.
|
|
||||||
* (Ethernet header, FCS, pad, or IP pad bytes are not included
|
|
||||||
* in this counter */
|
|
||||||
nveu64_t mmc_rx_ipv4_nopay_octets;
|
|
||||||
/** This counter provides upper 32 bytes received in IPv4 datagram
|
|
||||||
* that did not have a TCP, UDP, or ICMP payload. The value in the
|
|
||||||
* Length field of IPv4 header is used to update this counter.
|
|
||||||
* (Ethernet header, FCS, pad, or IP pad bytes are not included
|
|
||||||
* in this counter */
|
|
||||||
nveu64_t mmc_rx_ipv4_nopay_octets_h;
|
|
||||||
/** This counter provides the number of bytes received in fragmented
|
|
||||||
* IPv4 datagrams. The value in the Length field of IPv4 header is
|
|
||||||
* used to update this counter. (Ethernet header, FCS, pad, or IP pad
|
|
||||||
* bytes are not included in this counter */
|
|
||||||
nveu64_t mmc_rx_ipv4_frag_octets;
|
|
||||||
/** This counter provides upper 32 bytes received in fragmented
|
|
||||||
* IPv4 datagrams. The value in the Length field of IPv4 header is
|
|
||||||
* used to update this counter. (Ethernet header, FCS, pad, or IP pad
|
|
||||||
* bytes are not included in this counter */
|
|
||||||
nveu64_t mmc_rx_ipv4_frag_octets_h;
|
|
||||||
/** This counter provides the number of bytes received in a UDP segment
|
|
||||||
* that had the UDP checksum disabled. This counter does not count IP
|
|
||||||
* Header bytes. (Ethernet header, FCS, pad, or IP pad bytes are not
|
|
||||||
* included in this counter */
|
|
||||||
nveu64_t mmc_rx_ipv4_udsbl_octets;
|
|
||||||
/** This counter provides upper 32 bytes received in a UDP segment
|
|
||||||
* that had the UDP checksum disabled. This counter does not count IP
|
|
||||||
* Header bytes. (Ethernet header, FCS, pad, or IP pad bytes are not
|
|
||||||
* included in this counter */
|
|
||||||
nveu64_t mmc_rx_ipv4_udsbl_octets_h;
|
|
||||||
/** This counter provides the number of bytes received in good IPv6
|
|
||||||
* datagrams encapsulating TCP, UDP, or ICMP data. (Ethernet header,
|
|
||||||
* FCS, pad, or IP pad bytes are not included in this counter */
|
|
||||||
nveu64_t mmc_rx_ipv6_gd;
|
|
||||||
/** This counter provides upper 32 bytes received in good IPv6
|
|
||||||
* datagrams encapsulating TCP, UDP, or ICMP data. (Ethernet header,
|
|
||||||
* FCS, pad, or IP pad bytes are not included in this counter */
|
|
||||||
nveu64_t mmc_rx_ipv6_gd_h;
|
|
||||||
/** This counter provides the number of bytes received in IPv6 datagrams
|
|
||||||
* with header errors (length, version mismatch). The value in the
|
|
||||||
* Length field of IPv6 header is used to update this counter.
|
|
||||||
* (Ethernet header, FCS, pad, or IP pad bytes are not included in
|
|
||||||
* this counter */
|
|
||||||
nveu64_t mmc_rx_ipv6_hderr;
|
|
||||||
/** This counter provides upper 32 bytes received in IPv6 datagrams
|
|
||||||
* with header errors (length, version mismatch). The value in the
|
|
||||||
* Length field of IPv6 header is used to update this counter.
|
|
||||||
* (Ethernet header, FCS, pad, or IP pad bytes are not included in
|
|
||||||
* this counter */
|
|
||||||
nveu64_t mmc_rx_ipv6_hderr_h;
|
|
||||||
/** This counter provides the number of bytes received in IPv6
|
|
||||||
* datagrams that did not have a TCP, UDP, or ICMP payload. The value
|
|
||||||
* in the Length field of IPv6 header is used to update this counter.
|
|
||||||
* (Ethernet header, FCS, pad, or IP pad bytes are not included
|
|
||||||
* in this counter */
|
|
||||||
nveu64_t mmc_rx_ipv6_nopay;
|
|
||||||
/** This counter provides upper 32 bytes received in IPv6
|
|
||||||
* datagrams that did not have a TCP, UDP, or ICMP payload. The value
|
|
||||||
* in the Length field of IPv6 header is used to update this counter.
|
|
||||||
* (Ethernet header, FCS, pad, or IP pad bytes are not included
|
|
||||||
* in this counter */
|
|
||||||
nveu64_t mmc_rx_ipv6_nopay_h;
|
|
||||||
/* Protocols */
|
|
||||||
/** This counter provides the number of bytes received in a good UDP
|
|
||||||
* segment. This counter does not count IP header bytes */
|
|
||||||
nveu64_t mmc_rx_udp_gd_octets;
|
|
||||||
/** This counter provides upper 32 bytes received in a good UDP
|
|
||||||
* segment. This counter does not count IP header bytes */
|
|
||||||
nveu64_t mmc_rx_udp_gd_octets_h;
|
|
||||||
/** This counter provides the number of bytes received in a UDP
|
|
||||||
* segment that had checksum errors. This counter does not count
|
|
||||||
* IP header bytes */
|
|
||||||
nveu64_t mmc_rx_udp_err_octets;
|
|
||||||
/** This counter provides upper 32 bytes received in a UDP
|
|
||||||
* segment that had checksum errors. This counter does not count
|
|
||||||
* IP header bytes */
|
|
||||||
nveu64_t mmc_rx_udp_err_octets_h;
|
|
||||||
/** This counter provides the number of bytes received in a good
|
|
||||||
* TCP segment. This counter does not count IP header bytes */
|
|
||||||
nveu64_t mmc_rx_tcp_gd_octets;
|
|
||||||
/** This counter provides upper 32 bytes received in a good
|
|
||||||
* TCP segment. This counter does not count IP header bytes */
|
|
||||||
nveu64_t mmc_rx_tcp_gd_octets_h;
|
|
||||||
/** This counter provides the number of bytes received in a TCP
|
|
||||||
* segment that had checksum errors. This counter does not count
|
|
||||||
* IP header bytes */
|
|
||||||
nveu64_t mmc_rx_tcp_err_octets;
|
|
||||||
/** This counter provides upper 32 bytes received in a TCP
|
|
||||||
* segment that had checksum errors. This counter does not count
|
|
||||||
* IP header bytes */
|
|
||||||
nveu64_t mmc_rx_tcp_err_octets_h;
|
|
||||||
/** This counter provides the number of bytes received in a good
|
|
||||||
* ICMP segment. This counter does not count IP header bytes */
|
|
||||||
nveu64_t mmc_rx_icmp_gd_octets;
|
|
||||||
/** This counter provides upper 32 bytes received in a good
|
|
||||||
* ICMP segment. This counter does not count IP header bytes */
|
|
||||||
nveu64_t mmc_rx_icmp_gd_octets_h;
|
|
||||||
/** This counter provides the number of bytes received in a ICMP
|
|
||||||
* segment that had checksum errors. This counter does not count
|
|
||||||
* IP header bytes */
|
|
||||||
nveu64_t mmc_rx_icmp_err_octets;
|
|
||||||
/** This counter provides upper 32 bytes received in a ICMP
|
|
||||||
* segment that had checksum errors. This counter does not count
|
|
||||||
* IP header bytes */
|
|
||||||
nveu64_t mmc_rx_icmp_err_octets_h;
|
|
||||||
/** This counter provides the number of additional mPackets
|
|
||||||
* transmitted due to preemption */
|
|
||||||
unsigned long mmc_tx_fpe_frag_cnt;
|
|
||||||
/** This counter provides the count of number of times a hold
|
|
||||||
* request is given to MAC */
|
|
||||||
unsigned long mmc_tx_fpe_hold_req_cnt;
|
|
||||||
/** This counter provides the number of MAC frames with reassembly
|
|
||||||
* errors on the Receiver, due to mismatch in the fragment
|
|
||||||
* count value */
|
|
||||||
unsigned long mmc_rx_packet_reass_err_cnt;
|
|
||||||
/** This counter the number of received MAC frames rejected
|
|
||||||
* due to unknown SMD value and MAC frame fragments rejected due
|
|
||||||
* to arriving with an SMD-C when there was no preceding preempted
|
|
||||||
* frame */
|
|
||||||
unsigned long mmc_rx_packet_smd_err_cnt;
|
|
||||||
/** This counter provides the number of MAC frames that were
|
|
||||||
* successfully reassembled and delivered to MAC */
|
|
||||||
unsigned long mmc_rx_packet_asm_ok_cnt;
|
|
||||||
/** This counter provides the number of additional mPackets received
|
|
||||||
* due to preemption */
|
|
||||||
unsigned long mmc_rx_fpe_fragment_cnt;
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief osi_xtra_stat_counters - OSI core extra stat counters
|
|
||||||
*/
|
|
||||||
struct osi_xtra_stat_counters {
|
|
||||||
/** RX buffer unavailable irq count */
|
|
||||||
nveu64_t rx_buf_unavail_irq_n[OSI_MGBE_MAX_NUM_QUEUES];
|
|
||||||
/** Transmit Process Stopped irq count */
|
|
||||||
nveu64_t tx_proc_stopped_irq_n[OSI_MGBE_MAX_NUM_QUEUES];
|
|
||||||
/** Transmit Buffer Unavailable irq count */
|
|
||||||
nveu64_t tx_buf_unavail_irq_n[OSI_MGBE_MAX_NUM_QUEUES];
|
|
||||||
/** Receive Process Stopped irq count */
|
|
||||||
nveu64_t rx_proc_stopped_irq_n[OSI_MGBE_MAX_NUM_QUEUES];
|
|
||||||
/** Receive Watchdog Timeout irq count */
|
|
||||||
nveu64_t rx_watchdog_irq_n;
|
|
||||||
/** Fatal Bus Error irq count */
|
|
||||||
nveu64_t fatal_bus_error_irq_n;
|
|
||||||
/** rx skb allocation failure count */
|
|
||||||
nveu64_t re_alloc_rxbuf_failed[OSI_MGBE_MAX_NUM_QUEUES];
|
|
||||||
/** TX per channel interrupt count */
|
|
||||||
nveu64_t tx_normal_irq_n[OSI_MGBE_MAX_NUM_QUEUES];
|
|
||||||
/** TX per channel SW timer callback count */
|
|
||||||
nveu64_t tx_usecs_swtimer_n[OSI_MGBE_MAX_NUM_QUEUES];
|
|
||||||
/** RX per channel interrupt count */
|
|
||||||
nveu64_t rx_normal_irq_n[OSI_MGBE_MAX_NUM_QUEUES];
|
|
||||||
/** link connect count */
|
|
||||||
nveu64_t link_connect_count;
|
|
||||||
/** link disconnect count */
|
|
||||||
nveu64_t link_disconnect_count;
|
|
||||||
/** lock fail count node addition */
|
|
||||||
nveu64_t ts_lock_add_fail;
|
|
||||||
/** lock fail count node removal */
|
|
||||||
nveu64_t ts_lock_del_fail;
|
|
||||||
};
|
|
||||||
|
|
||||||
#ifdef MACSEC_SUPPORT
|
#ifdef MACSEC_SUPPORT
|
||||||
/**
|
/**
|
||||||
* @brief The structure hold macsec statistics counters
|
* @brief The structure hold macsec statistics counters
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -37,8 +37,6 @@ typedef unsigned int my_uint32_t;
|
|||||||
typedef int my_int32_t;
|
typedef int my_int32_t;
|
||||||
/** intermediate type for unsigned short */
|
/** intermediate type for unsigned short */
|
||||||
typedef unsigned short my_uint16_t;
|
typedef unsigned short my_uint16_t;
|
||||||
/** intermediate type for short */
|
|
||||||
typedef short my_int16_t;
|
|
||||||
/** intermediate type for char */
|
/** intermediate type for char */
|
||||||
typedef char my_int8_t;
|
typedef char my_int8_t;
|
||||||
/** intermediate type for unsigned char */
|
/** intermediate type for unsigned char */
|
||||||
@@ -55,8 +53,6 @@ typedef my_uint32_t nveu32_t;
|
|||||||
typedef my_int32_t nve32_t;
|
typedef my_int32_t nve32_t;
|
||||||
/** typedef equivalent to unsigned short */
|
/** typedef equivalent to unsigned short */
|
||||||
typedef my_uint16_t nveu16_t;
|
typedef my_uint16_t nveu16_t;
|
||||||
/** typedef equivalent to short */
|
|
||||||
typedef my_int16_t nve16_t;
|
|
||||||
/** typedef equivalent to char */
|
/** typedef equivalent to char */
|
||||||
typedef my_int8_t nve8_t;
|
typedef my_int8_t nve8_t;
|
||||||
/** typedef equivalent to unsigned char */
|
/** typedef equivalent to unsigned char */
|
||||||
@@ -68,3 +64,4 @@ typedef my_uint64_t nveu64_t;
|
|||||||
/** @} */
|
/** @} */
|
||||||
|
|
||||||
#endif /* INCLUDED_TYPE_H */
|
#endif /* INCLUDED_TYPE_H */
|
||||||
|
|
||||||
775
include/nvethernetrm_export.h
Normal file
775
include/nvethernetrm_export.h
Normal file
@@ -0,0 +1,775 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
|
||||||
|
*
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
|
* to deal in the Software without restriction, including without limitation
|
||||||
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||||
|
* and/or sell copies of the Software, and to permit persons to whom the
|
||||||
|
* Software is furnished to do so, subject to the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice shall be included in
|
||||||
|
* all copies or substantial portions of the Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||||
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||||
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||||
|
* DEALINGS IN THE SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef INCLUDED_NVETHERNETRM_EXPORT_H
|
||||||
|
#define INCLUDED_NVETHERNETRM_EXPORT_H
|
||||||
|
|
||||||
|
#include <nvethernet_type.h>
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @addtogroup Helper MACROS
|
||||||
|
*
|
||||||
|
* @brief EQOS generic helper MACROS.
|
||||||
|
* @{
|
||||||
|
*/
|
||||||
|
#define OSI_GCL_SIZE_256 256U
|
||||||
|
#define OSI_MAX_TC_NUM 8U
|
||||||
|
/* Ethernet Address length */
|
||||||
|
#define OSI_ETH_ALEN 6U
|
||||||
|
/** @} */
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @addtogroup Flexible Receive Parser related information
|
||||||
|
*
|
||||||
|
* @brief Flexible Receive Parser commands, table size and other defines
|
||||||
|
* @{
|
||||||
|
*/
|
||||||
|
/* Match data defines */
|
||||||
|
#define OSI_FRP_MATCH_DATA_MAX 12U
|
||||||
|
/** @} */
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @addtogroup MTL queue operation mode
|
||||||
|
*
|
||||||
|
* @brief MTL queue operation mode options
|
||||||
|
* @{
|
||||||
|
*/
|
||||||
|
#define OSI_MTL_QUEUE_AVB 0x1U
|
||||||
|
#define OSI_MTL_QUEUE_ENABLE 0x2U
|
||||||
|
#define OSI_MTL_QUEUE_MODEMAX 0x3U
|
||||||
|
#ifndef OSI_STRIPPED_LIB
|
||||||
|
#define OSI_MTL_MAX_NUM_QUEUES 10U
|
||||||
|
#endif
|
||||||
|
/** @} */
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @addtogroup EQOS_MTL MTL queue AVB algorithm mode
|
||||||
|
*
|
||||||
|
* @brief MTL AVB queue algorithm type
|
||||||
|
* @{
|
||||||
|
*/
|
||||||
|
#define OSI_MTL_TXQ_AVALG_CBS 1U
|
||||||
|
#define OSI_MTL_TXQ_AVALG_SP 0U
|
||||||
|
/** @} */
|
||||||
|
|
||||||
|
#ifndef OSI_STRIPPED_LIB
|
||||||
|
/**
|
||||||
|
* @addtogroup Helper MACROS
|
||||||
|
*
|
||||||
|
* @brief EQOS generic helper MACROS.
|
||||||
|
* @{
|
||||||
|
*/
|
||||||
|
/* L2 DA filter mode(enable/disable) */
|
||||||
|
#define OSI_OPER_EN_L2_DA_INV OSI_BIT(4)
|
||||||
|
#define OSI_OPER_DIS_L2_DA_INV OSI_BIT(5)
|
||||||
|
#endif /* !OSI_STRIPPED_LIB */
|
||||||
|
|
||||||
|
/* Ethernet Address length */
|
||||||
|
#define OSI_ETH_ALEN 6U
|
||||||
|
#define OSI_MAX_TC_NUM 8U
|
||||||
|
/** @} */
|
||||||
|
|
||||||
|
#pragma pack(push, 1)
|
||||||
|
/**
|
||||||
|
* @brief FRP command structure for OSD to OSI
|
||||||
|
*/
|
||||||
|
struct osi_core_frp_cmd {
|
||||||
|
/** FRP Command type */
|
||||||
|
nveu32_t cmd;
|
||||||
|
/** OSD FRP ID */
|
||||||
|
nve32_t frp_id;
|
||||||
|
/** OSD match data type */
|
||||||
|
nveu8_t match_type;
|
||||||
|
/** OSD match data */
|
||||||
|
nveu8_t match[OSI_FRP_MATCH_DATA_MAX];
|
||||||
|
/** OSD match data length */
|
||||||
|
nveu8_t match_length;
|
||||||
|
/** OSD Offset */
|
||||||
|
nveu8_t offset;
|
||||||
|
/** OSD FRP filter mode flag */
|
||||||
|
nveu8_t filter_mode;
|
||||||
|
/** OSD FRP Link ID */
|
||||||
|
nve32_t next_frp_id;
|
||||||
|
/** OSD DMA Channel Selection
|
||||||
|
* Bit selection of DMA channels to route the frame
|
||||||
|
* Bit[0] - DMA channel 0
|
||||||
|
* ..
|
||||||
|
* Bit [N] - DMA channel N] */
|
||||||
|
nveu32_t dma_sel;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief OSI Core avb data structure per queue.
|
||||||
|
*/
|
||||||
|
struct osi_core_avb_algorithm {
|
||||||
|
/** TX Queue/TC index */
|
||||||
|
nveu32_t qindex;
|
||||||
|
/** CBS Algorithm enable(1) or disable(0) */
|
||||||
|
nveu32_t algo;
|
||||||
|
/** When this bit is set, the accumulated credit parameter in the
|
||||||
|
* credit-based shaper algorithm logic is not reset to zero when
|
||||||
|
* there is positive credit and no packet to transmit in the channel.
|
||||||
|
*
|
||||||
|
* Expected values are enable(1) or disable(0) */
|
||||||
|
nveu32_t credit_control;
|
||||||
|
/** idleSlopeCredit value required for CBS
|
||||||
|
* Max value for EQOS - 0x000FFFFFU
|
||||||
|
* Max value for MGBE - 0x001FFFFFU */
|
||||||
|
nveu32_t idle_slope;
|
||||||
|
/** sendSlopeCredit value required for CBS
|
||||||
|
* Max value for EQOS - 0x0000FFFFU
|
||||||
|
* Max value for MGBE - 0x00003FFFU */
|
||||||
|
nveu32_t send_slope;
|
||||||
|
/** hiCredit value required for CBS
|
||||||
|
* Max value - 0x1FFFFFFFU */
|
||||||
|
nveu32_t hi_credit;
|
||||||
|
/** lowCredit value required for CBS
|
||||||
|
* Max value - 0x1FFFFFFFU */
|
||||||
|
nveu32_t low_credit;
|
||||||
|
/** Transmit queue operating mode
|
||||||
|
*
|
||||||
|
* 00: disable
|
||||||
|
*
|
||||||
|
* 01: avb
|
||||||
|
*
|
||||||
|
* 10: enable */
|
||||||
|
nveu32_t oper_mode;
|
||||||
|
/** TC index
|
||||||
|
* value 0 to 7 represent 8 TC */
|
||||||
|
nveu32_t tcindex;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief OSI Core EST structure
|
||||||
|
*/
|
||||||
|
struct osi_est_config {
|
||||||
|
/** enable/disable */
|
||||||
|
nveu32_t en_dis;
|
||||||
|
/** 64 bit base time register
|
||||||
|
* if both values are 0, take ptp time to avoid BTRE
|
||||||
|
* index 0 for nsec, index 1 for sec
|
||||||
|
*/
|
||||||
|
nveu32_t btr[2];
|
||||||
|
/** 64 bit base time offset index 0 for nsec, index 1 for sec
|
||||||
|
* 32 bits for Seconds, 32 bits for nanoseconds (max 10^9) */
|
||||||
|
nveu32_t btr_offset[2];
|
||||||
|
/** 40 bits cycle time register, index 0 for nsec, index 1 for sec
|
||||||
|
* 8 bits for Seconds, 32 bits for nanoseconds (max 10^9) */
|
||||||
|
nveu32_t ctr[2];
|
||||||
|
/** Configured Time Interval width(24 bits) + 7 bits
|
||||||
|
* extension register */
|
||||||
|
nveu32_t ter;
|
||||||
|
/** size of the gate control list Max 256 entries
|
||||||
|
* valid value range (1-255)*/
|
||||||
|
nveu32_t llr;
|
||||||
|
/** data array 8 bit gate op + 24 execution time
|
||||||
|
* MGBE HW support GCL depth 256 */
|
||||||
|
nveu32_t gcl[OSI_GCL_SIZE_256];
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief OSI Core FPE structure
|
||||||
|
*/
|
||||||
|
struct osi_fpe_config {
|
||||||
|
/** Queue Mask 1 - preemption 0 - express
|
||||||
|
* bit representation*/
|
||||||
|
nveu32_t tx_queue_preemption_enable;
|
||||||
|
/** RQ for all preemptable packets which are not filtered
|
||||||
|
* based on user priority or SA-DA
|
||||||
|
* Value range for EQOS 1-7
|
||||||
|
* Value range for MGBE 1-9 */
|
||||||
|
nveu32_t rq;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief OSI Core error stats structure
|
||||||
|
*/
|
||||||
|
struct osi_stats {
|
||||||
|
/** Constant Gate Control Error */
|
||||||
|
nveu64_t const_gate_ctr_err;
|
||||||
|
/** Head-Of-Line Blocking due to Scheduling */
|
||||||
|
nveu64_t head_of_line_blk_sch;
|
||||||
|
/** Per TC Schedule Error */
|
||||||
|
nveu64_t hlbs_q[OSI_MAX_TC_NUM];
|
||||||
|
/** Head-Of-Line Blocking due to Frame Size */
|
||||||
|
nveu64_t head_of_line_blk_frm;
|
||||||
|
/** Per TC Frame Size Error */
|
||||||
|
nveu64_t hlbf_q[OSI_MAX_TC_NUM];
|
||||||
|
/** BTR Error */
|
||||||
|
nveu64_t base_time_reg_err;
|
||||||
|
/** Switch to Software Owned List Complete */
|
||||||
|
nveu64_t sw_own_list_complete;
|
||||||
|
#ifndef OSI_STRIPPED_LIB
|
||||||
|
/** IP Header Error */
|
||||||
|
nveu64_t mgbe_ip_header_err;
|
||||||
|
/** Jabber time out Error */
|
||||||
|
nveu64_t mgbe_jabber_timeout_err;
|
||||||
|
/** Payload Checksum Error */
|
||||||
|
nveu64_t mgbe_payload_cs_err;
|
||||||
|
/** Under Flow Error */
|
||||||
|
nveu64_t mgbe_tx_underflow_err;
|
||||||
|
/** RX buffer unavailable irq count */
|
||||||
|
nveu64_t rx_buf_unavail_irq_n[OSI_MTL_MAX_NUM_QUEUES];
|
||||||
|
/** Transmit Process Stopped irq count */
|
||||||
|
nveu64_t tx_proc_stopped_irq_n[OSI_MTL_MAX_NUM_QUEUES];
|
||||||
|
/** Transmit Buffer Unavailable irq count */
|
||||||
|
nveu64_t tx_buf_unavail_irq_n[OSI_MTL_MAX_NUM_QUEUES];
|
||||||
|
/** Receive Process Stopped irq count */
|
||||||
|
nveu64_t rx_proc_stopped_irq_n[OSI_MTL_MAX_NUM_QUEUES];
|
||||||
|
/** Receive Watchdog Timeout irq count */
|
||||||
|
nveu64_t rx_watchdog_irq_n;
|
||||||
|
/** Fatal Bus Error irq count */
|
||||||
|
nveu64_t fatal_bus_error_irq_n;
|
||||||
|
/** lock fail count node addition */
|
||||||
|
nveu64_t ts_lock_add_fail;
|
||||||
|
/** lock fail count node removal */
|
||||||
|
nveu64_t ts_lock_del_fail;
|
||||||
|
#endif
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief osi_mmc_counters - The structure to hold RMON counter values
|
||||||
|
*/
|
||||||
|
struct osi_mmc_counters {
|
||||||
|
/** This counter provides the number of bytes transmitted, exclusive of
|
||||||
|
* preamble and retried bytes, in good and bad packets */
|
||||||
|
nveu64_t mmc_tx_octetcount_gb;
|
||||||
|
/** This counter provides upper 32 bits of transmitted octet count */
|
||||||
|
nveu64_t mmc_tx_octetcount_gb_h;
|
||||||
|
/** This counter provides the number of good and
|
||||||
|
* bad packets transmitted, exclusive of retried packets */
|
||||||
|
nveu64_t mmc_tx_framecount_gb;
|
||||||
|
/** This counter provides upper 32 bits of transmitted good and bad
|
||||||
|
* packets count */
|
||||||
|
nveu64_t mmc_tx_framecount_gb_h;
|
||||||
|
/** This counter provides number of good broadcast
|
||||||
|
* packets transmitted */
|
||||||
|
nveu64_t mmc_tx_broadcastframe_g;
|
||||||
|
/** This counter provides upper 32 bits of transmitted good broadcast
|
||||||
|
* packets count */
|
||||||
|
nveu64_t mmc_tx_broadcastframe_g_h;
|
||||||
|
/** This counter provides number of good multicast
|
||||||
|
* packets transmitted */
|
||||||
|
nveu64_t mmc_tx_multicastframe_g;
|
||||||
|
/** This counter provides upper 32 bits of transmitted good multicast
|
||||||
|
* packet count */
|
||||||
|
nveu64_t mmc_tx_multicastframe_g_h;
|
||||||
|
/** This counter provides the number of good and bad packets
|
||||||
|
* transmitted with length 64 bytes, exclusive of preamble and
|
||||||
|
* retried packets */
|
||||||
|
nveu64_t mmc_tx_64_octets_gb;
|
||||||
|
/** This counter provides upper 32 bits of transmitted 64 octet size
|
||||||
|
* good and bad packets count */
|
||||||
|
nveu64_t mmc_tx_64_octets_gb_h;
|
||||||
|
/** This counter provides the number of good and bad packets
|
||||||
|
* transmitted with length 65-127 bytes, exclusive of preamble and
|
||||||
|
* retried packets */
|
||||||
|
nveu64_t mmc_tx_65_to_127_octets_gb;
|
||||||
|
/** Provides upper 32 bits of transmitted 65-to-127 octet size good and
|
||||||
|
* bad packets count */
|
||||||
|
nveu64_t mmc_tx_65_to_127_octets_gb_h;
|
||||||
|
/** This counter provides the number of good and bad packets
|
||||||
|
* transmitted with length 128-255 bytes, exclusive of preamble and
|
||||||
|
* retried packets */
|
||||||
|
nveu64_t mmc_tx_128_to_255_octets_gb;
|
||||||
|
/** This counter provides upper 32 bits of transmitted 128-to-255
|
||||||
|
* octet size good and bad packets count */
|
||||||
|
nveu64_t mmc_tx_128_to_255_octets_gb_h;
|
||||||
|
/** This counter provides the number of good and bad packets
|
||||||
|
* transmitted with length 256-511 bytes, exclusive of preamble and
|
||||||
|
* retried packets */
|
||||||
|
nveu64_t mmc_tx_256_to_511_octets_gb;
|
||||||
|
/** This counter provides upper 32 bits of transmitted 256-to-511
|
||||||
|
* octet size good and bad packets count. */
|
||||||
|
nveu64_t mmc_tx_256_to_511_octets_gb_h;
|
||||||
|
/** This counter provides the number of good and bad packets
|
||||||
|
* transmitted with length 512-1023 bytes, exclusive of preamble and
|
||||||
|
* retried packets */
|
||||||
|
nveu64_t mmc_tx_512_to_1023_octets_gb;
|
||||||
|
/** This counter provides upper 32 bits of transmitted 512-to-1023
|
||||||
|
* octet size good and bad packets count.*/
|
||||||
|
nveu64_t mmc_tx_512_to_1023_octets_gb_h;
|
||||||
|
/** This counter provides the number of good and bad packets
|
||||||
|
* transmitted with length 1024-max bytes, exclusive of preamble and
|
||||||
|
* retried packets */
|
||||||
|
nveu64_t mmc_tx_1024_to_max_octets_gb;
|
||||||
|
/** This counter provides upper 32 bits of transmitted 1024-tomaxsize
|
||||||
|
* octet size good and bad packets count. */
|
||||||
|
nveu64_t mmc_tx_1024_to_max_octets_gb_h;
|
||||||
|
/** This counter provides the number of good and bad unicast packets */
|
||||||
|
nveu64_t mmc_tx_unicast_gb;
|
||||||
|
/** This counter provides upper 32 bits of transmitted good bad
|
||||||
|
* unicast packets count */
|
||||||
|
nveu64_t mmc_tx_unicast_gb_h;
|
||||||
|
/** This counter provides the number of good and bad
|
||||||
|
* multicast packets */
|
||||||
|
nveu64_t mmc_tx_multicast_gb;
|
||||||
|
/** This counter provides upper 32 bits of transmitted good bad
|
||||||
|
* multicast packets count */
|
||||||
|
nveu64_t mmc_tx_multicast_gb_h;
|
||||||
|
/** This counter provides the number of good and bad
|
||||||
|
* broadcast packets */
|
||||||
|
nveu64_t mmc_tx_broadcast_gb;
|
||||||
|
/** This counter provides upper 32 bits of transmitted good bad
|
||||||
|
* broadcast packets count */
|
||||||
|
nveu64_t mmc_tx_broadcast_gb_h;
|
||||||
|
/** This counter provides the number of abort packets due to
|
||||||
|
* underflow error */
|
||||||
|
nveu64_t mmc_tx_underflow_error;
|
||||||
|
/** This counter provides upper 32 bits of abort packets due to
|
||||||
|
* underflow error */
|
||||||
|
nveu64_t mmc_tx_underflow_error_h;
|
||||||
|
/** This counter provides the number of successfully transmitted
|
||||||
|
* packets after a single collision in the half-duplex mode */
|
||||||
|
nveu64_t mmc_tx_singlecol_g;
|
||||||
|
/** This counter provides the number of successfully transmitted
|
||||||
|
* packets after a multi collision in the half-duplex mode */
|
||||||
|
nveu64_t mmc_tx_multicol_g;
|
||||||
|
/** This counter provides the number of successfully transmitted
|
||||||
|
* after a deferral in the half-duplex mode */
|
||||||
|
nveu64_t mmc_tx_deferred;
|
||||||
|
/** This counter provides the number of packets aborted because of
|
||||||
|
* late collision error */
|
||||||
|
nveu64_t mmc_tx_latecol;
|
||||||
|
/** This counter provides the number of packets aborted because of
|
||||||
|
* excessive (16) collision errors */
|
||||||
|
nveu64_t mmc_tx_exesscol;
|
||||||
|
/** This counter provides the number of packets aborted because of
|
||||||
|
* carrier sense error (no carrier or loss of carrier) */
|
||||||
|
nveu64_t mmc_tx_carrier_error;
|
||||||
|
/** This counter provides the number of bytes transmitted,
|
||||||
|
* exclusive of preamble, only in good packets */
|
||||||
|
nveu64_t mmc_tx_octetcount_g;
|
||||||
|
/** This counter provides upper 32 bytes of bytes transmitted,
|
||||||
|
* exclusive of preamble, only in good packets */
|
||||||
|
nveu64_t mmc_tx_octetcount_g_h;
|
||||||
|
/** This counter provides the number of good packets transmitted */
|
||||||
|
nveu64_t mmc_tx_framecount_g;
|
||||||
|
/** This counter provides upper 32 bytes of good packets transmitted */
|
||||||
|
nveu64_t mmc_tx_framecount_g_h;
|
||||||
|
/** This counter provides the number of packets aborted because of
|
||||||
|
* excessive deferral error
|
||||||
|
* (deferred for more than two max-sized packet times) */
|
||||||
|
nveu64_t mmc_tx_excessdef;
|
||||||
|
/** This counter provides the number of good Pause
|
||||||
|
* packets transmitted */
|
||||||
|
nveu64_t mmc_tx_pause_frame;
|
||||||
|
/** This counter provides upper 32 bytes of good Pause
|
||||||
|
* packets transmitted */
|
||||||
|
nveu64_t mmc_tx_pause_frame_h;
|
||||||
|
/** This counter provides the number of good VLAN packets transmitted */
|
||||||
|
nveu64_t mmc_tx_vlan_frame_g;
|
||||||
|
/** This counter provides upper 32 bytes of good VLAN packets
|
||||||
|
* transmitted */
|
||||||
|
nveu64_t mmc_tx_vlan_frame_g_h;
|
||||||
|
/** This counter provides the number of packets transmitted without
|
||||||
|
* errors and with length greater than the maxsize (1,518 or 1,522 bytes
|
||||||
|
* for VLAN tagged packets; 2000 bytes */
|
||||||
|
nveu64_t mmc_tx_osize_frame_g;
|
||||||
|
/** This counter provides the number of good and bad packets received */
|
||||||
|
nveu64_t mmc_rx_framecount_gb;
|
||||||
|
/** This counter provides upper 32 bytes of good and bad packets
|
||||||
|
* received */
|
||||||
|
nveu64_t mmc_rx_framecount_gb_h;
|
||||||
|
/** This counter provides the number of bytes received by DWC_ther_qos,
|
||||||
|
* exclusive of preamble, in good and bad packets */
|
||||||
|
nveu64_t mmc_rx_octetcount_gb;
|
||||||
|
/** This counter provides upper 32 bytes of bytes received by
|
||||||
|
* DWC_ether_qos, exclusive of preamble, in good and bad packets */
|
||||||
|
nveu64_t mmc_rx_octetcount_gb_h;
|
||||||
|
/** This counter provides the number of bytes received by DWC_ether_qos,
|
||||||
|
* exclusive of preamble, in good and bad packets */
|
||||||
|
nveu64_t mmc_rx_octetcount_g;
|
||||||
|
/** This counter provides upper 32 bytes of bytes received by
|
||||||
|
* DWC_ether_qos, exclusive of preamble, in good and bad packets */
|
||||||
|
nveu64_t mmc_rx_octetcount_g_h;
|
||||||
|
/** This counter provides the number of good
|
||||||
|
* broadcast packets received */
|
||||||
|
nveu64_t mmc_rx_broadcastframe_g;
|
||||||
|
/** This counter provides upper 32 bytes of good
|
||||||
|
* broadcast packets received */
|
||||||
|
nveu64_t mmc_rx_broadcastframe_g_h;
|
||||||
|
/** This counter provides the number of good
|
||||||
|
* multicast packets received */
|
||||||
|
nveu64_t mmc_rx_multicastframe_g;
|
||||||
|
/** This counter provides upper 32 bytes of good
|
||||||
|
* multicast packets received */
|
||||||
|
nveu64_t mmc_rx_multicastframe_g_h;
|
||||||
|
/** This counter provides the number of packets
|
||||||
|
* received with CRC error */
|
||||||
|
nveu64_t mmc_rx_crc_error;
|
||||||
|
/** This counter provides upper 32 bytes of packets
|
||||||
|
* received with CRC error */
|
||||||
|
nveu64_t mmc_rx_crc_error_h;
|
||||||
|
/** This counter provides the number of packets received with
|
||||||
|
* alignment (dribble) error. It is valid only in 10/100 mode */
|
||||||
|
nveu64_t mmc_rx_align_error;
|
||||||
|
/** This counter provides the number of packets received with
|
||||||
|
* runt (length less than 64 bytes and CRC error) error */
|
||||||
|
nveu64_t mmc_rx_runt_error;
|
||||||
|
/** This counter provides the number of giant packets received with
|
||||||
|
* length (including CRC) greater than 1,518 bytes (1,522 bytes for
|
||||||
|
* VLAN tagged) and with CRC error */
|
||||||
|
nveu64_t mmc_rx_jabber_error;
|
||||||
|
/** This counter provides the number of packets received with length
|
||||||
|
* less than 64 bytes, without any errors */
|
||||||
|
nveu64_t mmc_rx_undersize_g;
|
||||||
|
/** This counter provides the number of packets received without error,
|
||||||
|
* with length greater than the maxsize */
|
||||||
|
nveu64_t mmc_rx_oversize_g;
|
||||||
|
/** This counter provides the number of good and bad packets received
|
||||||
|
* with length 64 bytes, exclusive of the preamble */
|
||||||
|
nveu64_t mmc_rx_64_octets_gb;
|
||||||
|
/** This counter provides upper 32 bytes of good and bad packets
|
||||||
|
* received with length 64 bytes, exclusive of the preamble */
|
||||||
|
nveu64_t mmc_rx_64_octets_gb_h;
|
||||||
|
/** This counter provides the number of good and bad packets received
|
||||||
|
* with length 65-127 bytes, exclusive of the preamble */
|
||||||
|
nveu64_t mmc_rx_65_to_127_octets_gb;
|
||||||
|
/** This counter provides upper 32 bytes of good and bad packets
|
||||||
|
* received with length 65-127 bytes, exclusive of the preamble */
|
||||||
|
nveu64_t mmc_rx_65_to_127_octets_gb_h;
|
||||||
|
/** This counter provides the number of good and bad packets received
|
||||||
|
* with length 128-255 bytes, exclusive of the preamble */
|
||||||
|
nveu64_t mmc_rx_128_to_255_octets_gb;
|
||||||
|
/** This counter provides upper 32 bytes of good and bad packets
|
||||||
|
* received with length 128-255 bytes, exclusive of the preamble */
|
||||||
|
nveu64_t mmc_rx_128_to_255_octets_gb_h;
|
||||||
|
/** This counter provides the number of good and bad packets received
|
||||||
|
* with length 256-511 bytes, exclusive of the preamble */
|
||||||
|
nveu64_t mmc_rx_256_to_511_octets_gb;
|
||||||
|
/** This counter provides upper 32 bytes of good and bad packets
|
||||||
|
* received with length 256-511 bytes, exclusive of the preamble */
|
||||||
|
nveu64_t mmc_rx_256_to_511_octets_gb_h;
|
||||||
|
/** This counter provides the number of good and bad packets received
|
||||||
|
* with length 512-1023 bytes, exclusive of the preamble */
|
||||||
|
nveu64_t mmc_rx_512_to_1023_octets_gb;
|
||||||
|
/** This counter provides upper 32 bytes of good and bad packets
|
||||||
|
* received with length 512-1023 bytes, exclusive of the preamble */
|
||||||
|
nveu64_t mmc_rx_512_to_1023_octets_gb_h;
|
||||||
|
/** This counter provides the number of good and bad packets received
|
||||||
|
* with length 1024-maxbytes, exclusive of the preamble */
|
||||||
|
nveu64_t mmc_rx_1024_to_max_octets_gb;
|
||||||
|
/** This counter provides upper 32 bytes of good and bad packets
|
||||||
|
* received with length 1024-maxbytes, exclusive of the preamble */
|
||||||
|
nveu64_t mmc_rx_1024_to_max_octets_gb_h;
|
||||||
|
/** This counter provides the number of good unicast packets received */
|
||||||
|
nveu64_t mmc_rx_unicast_g;
|
||||||
|
/** This counter provides upper 32 bytes of good unicast packets
|
||||||
|
* received */
|
||||||
|
nveu64_t mmc_rx_unicast_g_h;
|
||||||
|
/** This counter provides the number of packets received with length
|
||||||
|
* error (Length Type field not equal to packet size), for all packets
|
||||||
|
* with valid length field */
|
||||||
|
nveu64_t mmc_rx_length_error;
|
||||||
|
/** This counter provides upper 32 bytes of packets received with
|
||||||
|
* length error (Length Type field not equal to packet size), for all
|
||||||
|
* packets with valid length field */
|
||||||
|
nveu64_t mmc_rx_length_error_h;
|
||||||
|
/** This counter provides the number of packets received with length
|
||||||
|
* field not equal to the valid packet size (greater than 1,500 but
|
||||||
|
* less than 1,536) */
|
||||||
|
nveu64_t mmc_rx_outofrangetype;
|
||||||
|
/** This counter provides upper 32 bytes of packets received with
|
||||||
|
* length field not equal to the valid packet size (greater than 1,500
|
||||||
|
* but less than 1,536) */
|
||||||
|
nveu64_t mmc_rx_outofrangetype_h;
|
||||||
|
/** This counter provides the number of good and valid Pause packets
|
||||||
|
* received */
|
||||||
|
nveu64_t mmc_rx_pause_frames;
|
||||||
|
/** This counter provides upper 32 bytes of good and valid Pause packets
|
||||||
|
* received */
|
||||||
|
nveu64_t mmc_rx_pause_frames_h;
|
||||||
|
/** This counter provides the number of missed received packets
|
||||||
|
* because of FIFO overflow in DWC_ether_qos */
|
||||||
|
nveu64_t mmc_rx_fifo_overflow;
|
||||||
|
/** This counter provides upper 32 bytes of missed received packets
|
||||||
|
* because of FIFO overflow in DWC_ether_qos */
|
||||||
|
nveu64_t mmc_rx_fifo_overflow_h;
|
||||||
|
/** This counter provides the number of good and bad VLAN packets
|
||||||
|
* received */
|
||||||
|
nveu64_t mmc_rx_vlan_frames_gb;
|
||||||
|
/** This counter provides upper 32 bytes of good and bad VLAN packets
|
||||||
|
* received */
|
||||||
|
nveu64_t mmc_rx_vlan_frames_gb_h;
|
||||||
|
/** This counter provides the number of packets received with error
|
||||||
|
* because of watchdog timeout error */
|
||||||
|
nveu64_t mmc_rx_watchdog_error;
|
||||||
|
/** This counter provides the number of packets received with Receive
|
||||||
|
* error or Packet Extension error on the GMII or MII interface */
|
||||||
|
nveu64_t mmc_rx_receive_error;
|
||||||
|
/** This counter provides the number of packets received with Receive
|
||||||
|
* error or Packet Extension error on the GMII or MII interface */
|
||||||
|
nveu64_t mmc_rx_ctrl_frames_g;
|
||||||
|
/** This counter provides the number of microseconds Tx LPI is asserted
|
||||||
|
* in the MAC controller */
|
||||||
|
nveu64_t mmc_tx_lpi_usec_cntr;
|
||||||
|
/** This counter provides the number of times MAC controller has
|
||||||
|
* entered Tx LPI. */
|
||||||
|
nveu64_t mmc_tx_lpi_tran_cntr;
|
||||||
|
/** This counter provides the number of microseconds Rx LPI is asserted
|
||||||
|
* in the MAC controller */
|
||||||
|
nveu64_t mmc_rx_lpi_usec_cntr;
|
||||||
|
/** This counter provides the number of times MAC controller has
|
||||||
|
* entered Rx LPI.*/
|
||||||
|
nveu64_t mmc_rx_lpi_tran_cntr;
|
||||||
|
/** This counter provides the number of good IPv4 datagrams received
|
||||||
|
* with the TCP, UDP, or ICMP payload */
|
||||||
|
nveu64_t mmc_rx_ipv4_gd;
|
||||||
|
/** This counter provides upper 32 bytes of good IPv4 datagrams received
|
||||||
|
* with the TCP, UDP, or ICMP payload */
|
||||||
|
nveu64_t mmc_rx_ipv4_gd_h;
|
||||||
|
/** RxIPv4 Header Error Packets */
|
||||||
|
nveu64_t mmc_rx_ipv4_hderr;
|
||||||
|
/** RxIPv4 of upper 32 bytes of Header Error Packets */
|
||||||
|
nveu64_t mmc_rx_ipv4_hderr_h;
|
||||||
|
/** This counter provides the number of IPv4 datagram packets received
|
||||||
|
* that did not have a TCP, UDP, or ICMP payload */
|
||||||
|
nveu64_t mmc_rx_ipv4_nopay;
|
||||||
|
/** This counter provides upper 32 bytes of IPv4 datagram packets
|
||||||
|
* received that did not have a TCP, UDP, or ICMP payload */
|
||||||
|
nveu64_t mmc_rx_ipv4_nopay_h;
|
||||||
|
/** This counter provides the number of good IPv4 datagrams received
|
||||||
|
* with fragmentation */
|
||||||
|
nveu64_t mmc_rx_ipv4_frag;
|
||||||
|
/** This counter provides upper 32 bytes of good IPv4 datagrams received
|
||||||
|
* with fragmentation */
|
||||||
|
nveu64_t mmc_rx_ipv4_frag_h;
|
||||||
|
/** This counter provides the number of good IPv4 datagrams received
|
||||||
|
* that had a UDP payload with checksum disabled */
|
||||||
|
nveu64_t mmc_rx_ipv4_udsbl;
|
||||||
|
/** This counter provides upper 32 bytes of good IPv4 datagrams received
|
||||||
|
* that had a UDP payload with checksum disabled */
|
||||||
|
nveu64_t mmc_rx_ipv4_udsbl_h;
|
||||||
|
/** This counter provides the number of good IPv6 datagrams received
|
||||||
|
* with the TCP, UDP, or ICMP payload */
|
||||||
|
nveu64_t mmc_rx_ipv6_gd_octets;
|
||||||
|
/** This counter provides upper 32 bytes of good IPv6 datagrams received
|
||||||
|
* with the TCP, UDP, or ICMP payload */
|
||||||
|
nveu64_t mmc_rx_ipv6_gd_octets_h;
|
||||||
|
/** This counter provides the number of IPv6 datagrams received
|
||||||
|
* with header (length or version mismatch) errors */
|
||||||
|
nveu64_t mmc_rx_ipv6_hderr_octets;
|
||||||
|
/** This counter provides the number of IPv6 datagrams received
|
||||||
|
* with header (length or version mismatch) errors */
|
||||||
|
nveu64_t mmc_rx_ipv6_hderr_octets_h;
|
||||||
|
/** This counter provides the number of IPv6 datagram packets received
|
||||||
|
* that did not have a TCP, UDP, or ICMP payload */
|
||||||
|
nveu64_t mmc_rx_ipv6_nopay_octets;
|
||||||
|
/** This counter provides upper 32 bytes of IPv6 datagram packets
|
||||||
|
* received that did not have a TCP, UDP, or ICMP payload */
|
||||||
|
nveu64_t mmc_rx_ipv6_nopay_octets_h;
|
||||||
|
/* Protocols */
|
||||||
|
/** This counter provides the number of good IP datagrams received by
|
||||||
|
* DWC_ether_qos with a good UDP payload */
|
||||||
|
nveu64_t mmc_rx_udp_gd;
|
||||||
|
/** This counter provides upper 32 bytes of good IP datagrams received
|
||||||
|
* by DWC_ether_qos with a good UDP payload */
|
||||||
|
nveu64_t mmc_rx_udp_gd_h;
|
||||||
|
/** This counter provides the number of good IP datagrams received by
|
||||||
|
* DWC_ether_qos with a good UDP payload. This counter is not updated
|
||||||
|
* when the RxIPv4_UDP_Checksum_Disabled_Packets counter is
|
||||||
|
* incremented */
|
||||||
|
nveu64_t mmc_rx_udp_err;
|
||||||
|
/** This counter provides upper 32 bytes of good IP datagrams received
|
||||||
|
* by DWC_ether_qos with a good UDP payload. This counter is not updated
|
||||||
|
* when the RxIPv4_UDP_Checksum_Disabled_Packets counter is
|
||||||
|
* incremented */
|
||||||
|
nveu64_t mmc_rx_udp_err_h;
|
||||||
|
/** This counter provides the number of good IP datagrams received
|
||||||
|
* with a good TCP payload */
|
||||||
|
nveu64_t mmc_rx_tcp_gd;
|
||||||
|
/** This counter provides the number of good IP datagrams received
|
||||||
|
* with a good TCP payload */
|
||||||
|
nveu64_t mmc_rx_tcp_gd_h;
|
||||||
|
/** This counter provides upper 32 bytes of good IP datagrams received
|
||||||
|
* with a good TCP payload */
|
||||||
|
nveu64_t mmc_rx_tcp_err;
|
||||||
|
/** This counter provides upper 32 bytes of good IP datagrams received
|
||||||
|
* with a good TCP payload */
|
||||||
|
nveu64_t mmc_rx_tcp_err_h;
|
||||||
|
/** This counter provides the number of good IP datagrams received
|
||||||
|
* with a good ICMP payload */
|
||||||
|
nveu64_t mmc_rx_icmp_gd;
|
||||||
|
/** This counter provides upper 32 bytes of good IP datagrams received
|
||||||
|
* with a good ICMP payload */
|
||||||
|
nveu64_t mmc_rx_icmp_gd_h;
|
||||||
|
/** This counter provides the number of good IP datagrams received
|
||||||
|
* whose ICMP payload has a checksum error */
|
||||||
|
nveu64_t mmc_rx_icmp_err;
|
||||||
|
/** This counter provides upper 32 bytes of good IP datagrams received
|
||||||
|
* whose ICMP payload has a checksum error */
|
||||||
|
nveu64_t mmc_rx_icmp_err_h;
|
||||||
|
/** This counter provides the number of bytes received by DWC_ether_qos
|
||||||
|
* in good IPv4 datagrams encapsulating TCP, UDP, or ICMP data.
|
||||||
|
* (Ethernet header, FCS, pad, or IP pad bytes are not included
|
||||||
|
* in this counter */
|
||||||
|
nveu64_t mmc_rx_ipv4_gd_octets;
|
||||||
|
/** This counter provides upper 32 bytes received by DWC_ether_qos
|
||||||
|
* in good IPv4 datagrams encapsulating TCP, UDP, or ICMP data.
|
||||||
|
* (Ethernet header, FCS, pad, or IP pad bytes are not included
|
||||||
|
* in this counter */
|
||||||
|
nveu64_t mmc_rx_ipv4_gd_octets_h;
|
||||||
|
/** This counter provides the number of bytes received in IPv4 datagram
|
||||||
|
* with header errors (checksum, length, version mismatch). The value
|
||||||
|
* in the Length field of IPv4 header is used to update this counter.
|
||||||
|
* (Ethernet header, FCS, pad, or IP pad bytes are not included
|
||||||
|
* in this counter */
|
||||||
|
nveu64_t mmc_rx_ipv4_hderr_octets;
|
||||||
|
/** This counter provides upper 32 bytes received in IPv4 datagram
|
||||||
|
* with header errors (checksum, length, version mismatch). The value
|
||||||
|
* in the Length field of IPv4 header is used to update this counter.
|
||||||
|
* (Ethernet header, FCS, pad, or IP pad bytes are not included
|
||||||
|
* in this counter */
|
||||||
|
nveu64_t mmc_rx_ipv4_hderr_octets_h;
|
||||||
|
/** This counter provides the number of bytes received in IPv4 datagram
|
||||||
|
* that did not have a TCP, UDP, or ICMP payload. The value in the
|
||||||
|
* Length field of IPv4 header is used to update this counter.
|
||||||
|
* (Ethernet header, FCS, pad, or IP pad bytes are not included
|
||||||
|
* in this counter */
|
||||||
|
nveu64_t mmc_rx_ipv4_nopay_octets;
|
||||||
|
/** This counter provides upper 32 bytes received in IPv4 datagram
|
||||||
|
* that did not have a TCP, UDP, or ICMP payload. The value in the
|
||||||
|
* Length field of IPv4 header is used to update this counter.
|
||||||
|
* (Ethernet header, FCS, pad, or IP pad bytes are not included
|
||||||
|
* in this counter */
|
||||||
|
nveu64_t mmc_rx_ipv4_nopay_octets_h;
|
||||||
|
/** This counter provides the number of bytes received in fragmented
|
||||||
|
* IPv4 datagrams. The value in the Length field of IPv4 header is
|
||||||
|
* used to update this counter. (Ethernet header, FCS, pad, or IP pad
|
||||||
|
* bytes are not included in this counter */
|
||||||
|
nveu64_t mmc_rx_ipv4_frag_octets;
|
||||||
|
/** This counter provides upper 32 bytes received in fragmented
|
||||||
|
* IPv4 datagrams. The value in the Length field of IPv4 header is
|
||||||
|
* used to update this counter. (Ethernet header, FCS, pad, or IP pad
|
||||||
|
* bytes are not included in this counter */
|
||||||
|
nveu64_t mmc_rx_ipv4_frag_octets_h;
|
||||||
|
/** This counter provides the number of bytes received in a UDP segment
|
||||||
|
* that had the UDP checksum disabled. This counter does not count IP
|
||||||
|
* Header bytes. (Ethernet header, FCS, pad, or IP pad bytes are not
|
||||||
|
* included in this counter */
|
||||||
|
nveu64_t mmc_rx_ipv4_udsbl_octets;
|
||||||
|
/** This counter provides upper 32 bytes received in a UDP segment
|
||||||
|
* that had the UDP checksum disabled. This counter does not count IP
|
||||||
|
* Header bytes. (Ethernet header, FCS, pad, or IP pad bytes are not
|
||||||
|
* included in this counter */
|
||||||
|
nveu64_t mmc_rx_ipv4_udsbl_octets_h;
|
||||||
|
/** This counter provides the number of bytes received in good IPv6
|
||||||
|
* datagrams encapsulating TCP, UDP, or ICMP data. (Ethernet header,
|
||||||
|
* FCS, pad, or IP pad bytes are not included in this counter */
|
||||||
|
nveu64_t mmc_rx_ipv6_gd;
|
||||||
|
/** This counter provides upper 32 bytes received in good IPv6
|
||||||
|
* datagrams encapsulating TCP, UDP, or ICMP data. (Ethernet header,
|
||||||
|
* FCS, pad, or IP pad bytes are not included in this counter */
|
||||||
|
nveu64_t mmc_rx_ipv6_gd_h;
|
||||||
|
/** This counter provides the number of bytes received in IPv6 datagrams
|
||||||
|
* with header errors (length, version mismatch). The value in the
|
||||||
|
* Length field of IPv6 header is used to update this counter.
|
||||||
|
* (Ethernet header, FCS, pad, or IP pad bytes are not included in
|
||||||
|
* this counter */
|
||||||
|
nveu64_t mmc_rx_ipv6_hderr;
|
||||||
|
/** This counter provides upper 32 bytes received in IPv6 datagrams
|
||||||
|
* with header errors (length, version mismatch). The value in the
|
||||||
|
* Length field of IPv6 header is used to update this counter.
|
||||||
|
* (Ethernet header, FCS, pad, or IP pad bytes are not included in
|
||||||
|
* this counter */
|
||||||
|
nveu64_t mmc_rx_ipv6_hderr_h;
|
||||||
|
/** This counter provides the number of bytes received in IPv6
|
||||||
|
* datagrams that did not have a TCP, UDP, or ICMP payload. The value
|
||||||
|
* in the Length field of IPv6 header is used to update this counter.
|
||||||
|
* (Ethernet header, FCS, pad, or IP pad bytes are not included
|
||||||
|
* in this counter */
|
||||||
|
nveu64_t mmc_rx_ipv6_nopay;
|
||||||
|
/** This counter provides upper 32 bytes received in IPv6
|
||||||
|
* datagrams that did not have a TCP, UDP, or ICMP payload. The value
|
||||||
|
* in the Length field of IPv6 header is used to update this counter.
|
||||||
|
* (Ethernet header, FCS, pad, or IP pad bytes are not included
|
||||||
|
* in this counter */
|
||||||
|
nveu64_t mmc_rx_ipv6_nopay_h;
|
||||||
|
/* Protocols */
|
||||||
|
/** This counter provides the number of bytes received in a good UDP
|
||||||
|
* segment. This counter does not count IP header bytes */
|
||||||
|
nveu64_t mmc_rx_udp_gd_octets;
|
||||||
|
/** This counter provides upper 32 bytes received in a good UDP
|
||||||
|
* segment. This counter does not count IP header bytes */
|
||||||
|
nveu64_t mmc_rx_udp_gd_octets_h;
|
||||||
|
/** This counter provides the number of bytes received in a UDP
|
||||||
|
* segment that had checksum errors. This counter does not count
|
||||||
|
* IP header bytes */
|
||||||
|
nveu64_t mmc_rx_udp_err_octets;
|
||||||
|
/** This counter provides upper 32 bytes received in a UDP
|
||||||
|
* segment that had checksum errors. This counter does not count
|
||||||
|
* IP header bytes */
|
||||||
|
nveu64_t mmc_rx_udp_err_octets_h;
|
||||||
|
/** This counter provides the number of bytes received in a good
|
||||||
|
* TCP segment. This counter does not count IP header bytes */
|
||||||
|
nveu64_t mmc_rx_tcp_gd_octets;
|
||||||
|
/** This counter provides upper 32 bytes received in a good
|
||||||
|
* TCP segment. This counter does not count IP header bytes */
|
||||||
|
nveu64_t mmc_rx_tcp_gd_octets_h;
|
||||||
|
/** This counter provides the number of bytes received in a TCP
|
||||||
|
* segment that had checksum errors. This counter does not count
|
||||||
|
* IP header bytes */
|
||||||
|
nveu64_t mmc_rx_tcp_err_octets;
|
||||||
|
/** This counter provides upper 32 bytes received in a TCP
|
||||||
|
* segment that had checksum errors. This counter does not count
|
||||||
|
* IP header bytes */
|
||||||
|
nveu64_t mmc_rx_tcp_err_octets_h;
|
||||||
|
/** This counter provides the number of bytes received in a good
|
||||||
|
* ICMP segment. This counter does not count IP header bytes */
|
||||||
|
nveu64_t mmc_rx_icmp_gd_octets;
|
||||||
|
/** This counter provides upper 32 bytes received in a good
|
||||||
|
* ICMP segment. This counter does not count IP header bytes */
|
||||||
|
nveu64_t mmc_rx_icmp_gd_octets_h;
|
||||||
|
/** This counter provides the number of bytes received in a ICMP
|
||||||
|
* segment that had checksum errors. This counter does not count
|
||||||
|
* IP header bytes */
|
||||||
|
nveu64_t mmc_rx_icmp_err_octets;
|
||||||
|
/** This counter provides upper 32 bytes received in a ICMP
|
||||||
|
* segment that had checksum errors. This counter does not count
|
||||||
|
* IP header bytes */
|
||||||
|
nveu64_t mmc_rx_icmp_err_octets_h;
|
||||||
|
/** This counter provides the number of additional mPackets
|
||||||
|
* transmitted due to preemption */
|
||||||
|
nveu64_t mmc_tx_fpe_frag_cnt;
|
||||||
|
/** This counter provides the count of number of times a hold
|
||||||
|
* request is given to MAC */
|
||||||
|
nveu64_t mmc_tx_fpe_hold_req_cnt;
|
||||||
|
/** This counter provides the number of MAC frames with reassembly
|
||||||
|
* errors on the Receiver, due to mismatch in the fragment
|
||||||
|
* count value */
|
||||||
|
nveu64_t mmc_rx_packet_reass_err_cnt;
|
||||||
|
/** This counter the number of received MAC frames rejected
|
||||||
|
* due to unknown SMD value and MAC frame fragments rejected due
|
||||||
|
* to arriving with an SMD-C when there was no preceding preempted
|
||||||
|
* frame */
|
||||||
|
nveu64_t mmc_rx_packet_smd_err_cnt;
|
||||||
|
/** This counter provides the number of MAC frames that were
|
||||||
|
* successfully reassembled and delivered to MAC */
|
||||||
|
nveu64_t mmc_rx_packet_asm_ok_cnt;
|
||||||
|
/** This counter provides the number of additional mPackets received
|
||||||
|
* due to preemption */
|
||||||
|
nveu64_t mmc_rx_fpe_fragment_cnt;
|
||||||
|
};
|
||||||
|
|
||||||
|
#pragma pack(pop)
|
||||||
|
#endif /* INCLUDED_NVETHERNETRM_EXPORT_H */
|
||||||
95
include/nvethernetrm_l3l4.h
Normal file
95
include/nvethernetrm_l3l4.h
Normal file
@@ -0,0 +1,95 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
|
||||||
|
*
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
|
* to deal in the Software without restriction, including without limitation
|
||||||
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||||
|
* and/or sell copies of the Software, and to permit persons to whom the
|
||||||
|
* Software is furnished to do so, subject to the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice shall be included in
|
||||||
|
* all copies or substantial portions of the Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||||
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||||
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||||
|
* DEALINGS IN THE SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef INCLUDED_NVETHERNETRM_L3L4_H
|
||||||
|
#define INCLUDED_NVETHERNETRM_L3L4_H
|
||||||
|
|
||||||
|
#include <nvethernet_type.h>
|
||||||
|
|
||||||
|
/** helper macro for enable */
|
||||||
|
#define OSI_TRUE ((nveu32_t)1U)
|
||||||
|
|
||||||
|
/** helper macro to disable */
|
||||||
|
#define OSI_FALSE ((nveu32_t)0U)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief L3/L4 filter function dependent parameter
|
||||||
|
*/
|
||||||
|
struct osi_l3_l4_filter {
|
||||||
|
/** filter data */
|
||||||
|
struct {
|
||||||
|
#ifndef OSI_STRIPPED_LIB
|
||||||
|
/** udp (OSI_TRUE) or tcp (OSI_FALSE) */
|
||||||
|
nveu32_t is_udp;
|
||||||
|
/** ipv6 (OSI_TRUE) or ipv4 (OSI_FALSE) */
|
||||||
|
nveu32_t is_ipv6;
|
||||||
|
#endif /* !OSI_STRIPPED_LIB */
|
||||||
|
/** destination ip address information */
|
||||||
|
struct {
|
||||||
|
/** ipv4 address */
|
||||||
|
nveu8_t ip4_addr[4];
|
||||||
|
#ifndef OSI_STRIPPED_LIB
|
||||||
|
/** ipv6 address */
|
||||||
|
nveu16_t ip6_addr[8];
|
||||||
|
/** Port number */
|
||||||
|
nveu16_t port_no;
|
||||||
|
/** addr match enable (OSI_TRUE) or disable (OSI_FALSE) */
|
||||||
|
nveu32_t addr_match;
|
||||||
|
/** perfect(OSI_FALSE) or inverse(OSI_TRUE) match for address */
|
||||||
|
nveu32_t addr_match_inv;
|
||||||
|
/** port match enable (OSI_TRUE) or disable (OSI_FALSE) */
|
||||||
|
nveu32_t port_match;
|
||||||
|
/** perfect(OSI_FALSE) or inverse(OSI_TRUE) match for port */
|
||||||
|
nveu32_t port_match_inv;
|
||||||
|
#endif /* !OSI_STRIPPED_LIB */
|
||||||
|
} dst;
|
||||||
|
#ifndef OSI_STRIPPED_LIB
|
||||||
|
/** ip address and port information */
|
||||||
|
struct {
|
||||||
|
/** ipv4 address */
|
||||||
|
nveu8_t ip4_addr[4];
|
||||||
|
/** ipv6 address */
|
||||||
|
nveu16_t ip6_addr[8];
|
||||||
|
/** Port number */
|
||||||
|
nveu16_t port_no;
|
||||||
|
/** addr match enable (OSI_TRUE) or disable (OSI_FALSE) */
|
||||||
|
nveu32_t addr_match;
|
||||||
|
/** perfect(OSI_FALSE) or inverse(OSI_TRUE) match for address */
|
||||||
|
nveu32_t addr_match_inv;
|
||||||
|
/** port match enable (OSI_TRUE) or disable (OSI_FALSE) */
|
||||||
|
nveu32_t port_match;
|
||||||
|
/** perfect(OSI_FALSE) or inverse(OSI_TRUE) match for port */
|
||||||
|
nveu32_t port_match_inv;
|
||||||
|
} src;
|
||||||
|
#endif /* !OSI_STRIPPED_LIB */
|
||||||
|
} data;
|
||||||
|
#ifndef OSI_STRIPPED_LIB
|
||||||
|
/** Represents whether DMA routing enabled (OSI_TRUE) or not (OSI_FALSE) */
|
||||||
|
nveu32_t dma_routing_enable;
|
||||||
|
#endif /* !OSI_STRIPPED_LIB */
|
||||||
|
/** DMA channel number of routing enabled */
|
||||||
|
nveu32_t dma_chan;
|
||||||
|
/** filter enable (OSI_TRUE) or disable (OSI_FALSE) */
|
||||||
|
nveu32_t filter_enb_dis;
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif /* INCLUDED_NVETHERNETRM_L3L4_H */
|
||||||
@@ -23,7 +23,7 @@
|
|||||||
#ifndef INCLUDED_OSI_COMMON_H
|
#ifndef INCLUDED_OSI_COMMON_H
|
||||||
#define INCLUDED_OSI_COMMON_H
|
#define INCLUDED_OSI_COMMON_H
|
||||||
|
|
||||||
#include "../osi/common/type.h"
|
#include <nvethernet_type.h>
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @addtogroup FC Flow Control Threshold Macros
|
* @addtogroup FC Flow Control Threshold Macros
|
||||||
@@ -32,22 +32,9 @@
|
|||||||
* the flow control is asserted or de-asserted
|
* the flow control is asserted or de-asserted
|
||||||
* @{
|
* @{
|
||||||
*/
|
*/
|
||||||
#define FULL_MINUS_1_5K (unsigned int)1
|
#define FULL_MINUS_1_5K ((nveu32_t)1)
|
||||||
#define FULL_MINUS_2_K (unsigned int)2
|
#define FULL_MINUS_16_K ((nveu32_t)30)
|
||||||
#define FULL_MINUS_2_5K (unsigned int)3
|
#define FULL_MINUS_32_K ((nveu32_t)62)
|
||||||
#define FULL_MINUS_3_K (unsigned int)4
|
|
||||||
#define FULL_MINUS_4_K (unsigned int)6
|
|
||||||
#define FULL_MINUS_6_K (unsigned int)10
|
|
||||||
#define FULL_MINUS_10_K (unsigned int)18
|
|
||||||
#define FULL_MINUS_13_K (unsigned int)24
|
|
||||||
#define FULL_MINUS_14_K (unsigned int)26
|
|
||||||
#define FULL_MINUS_16_K (unsigned int)30
|
|
||||||
#define FULL_MINUS_18_K (unsigned int)34
|
|
||||||
#define FULL_MINUS_21_K (unsigned int)40
|
|
||||||
#define FULL_MINUS_24_K (unsigned int)46
|
|
||||||
#define FULL_MINUS_29_K (unsigned int)56
|
|
||||||
#define FULL_MINUS_31_K (unsigned int)60
|
|
||||||
#define FULL_MINUS_32_K (unsigned int)62
|
|
||||||
/** @} */
|
/** @} */
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -66,13 +53,46 @@
|
|||||||
#define OSI_MAX_TX_COALESCE_USEC 1020U
|
#define OSI_MAX_TX_COALESCE_USEC 1020U
|
||||||
#define OSI_MIN_TX_COALESCE_USEC 32U
|
#define OSI_MIN_TX_COALESCE_USEC 32U
|
||||||
#define OSI_MIN_TX_COALESCE_FRAMES 1U
|
#define OSI_MIN_TX_COALESCE_FRAMES 1U
|
||||||
|
#define OSI_PAUSE_FRAMES_DISABLE 0U
|
||||||
|
#define OSI_PAUSE_FRAMES_ENABLE 1U
|
||||||
#endif /* !OSI_STRIPPED_LIB */
|
#endif /* !OSI_STRIPPED_LIB */
|
||||||
|
|
||||||
/* Compiler hints for branch prediction */
|
/* Compiler hints for branch prediction */
|
||||||
#define osi_unlikely(x) __builtin_expect(!!(x), 0)
|
#define osi_unlikely(x) __builtin_expect(!!(x), 0)
|
||||||
/** @} */
|
/** @} */
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @addtogroup Helper MACROS
|
||||||
|
*
|
||||||
|
* @brief EQOS generic helper MACROS.
|
||||||
|
* @{
|
||||||
|
*/
|
||||||
|
#define OSI_MAX_24BITS 0xFFFFFFU
|
||||||
|
#define OSI_MAX_28BITS 0xFFFFFFFU
|
||||||
|
#define OSI_MAX_32BITS 0xFFFFFFFFU
|
||||||
|
#define OSI_MASK_16BITS 0xFFFFU
|
||||||
|
#define OSI_MASK_20BITS 0xFFFFFU
|
||||||
|
#define OSI_MASK_24BITS 0xFFFFFFU
|
||||||
|
#define OSI_GCL_SIZE_64 64U
|
||||||
|
#define OSI_GCL_SIZE_128 128U
|
||||||
|
#define OSI_GCL_SIZE_512 512U
|
||||||
|
#define OSI_GCL_SIZE_1024 1024U
|
||||||
|
/** @} */
|
||||||
|
|
||||||
#ifndef OSI_STRIPPED_LIB
|
#ifndef OSI_STRIPPED_LIB
|
||||||
|
/**
|
||||||
|
* @addtogroup Helper MACROS
|
||||||
|
*
|
||||||
|
* @brief EQOS generic helper MACROS.
|
||||||
|
* @{
|
||||||
|
*/
|
||||||
|
#define OSI_PTP_REQ_CLK_FREQ 250000000U
|
||||||
|
#define OSI_FLOW_CTRL_DISABLE 0U
|
||||||
|
#define OSI_ADDRESS_32BIT 0
|
||||||
|
#define OSI_ADDRESS_40BIT 1
|
||||||
|
#define OSI_ADDRESS_48BIT 2
|
||||||
|
/** @ } */
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @addtogroup - LPI-Timers LPI configuration macros
|
* @addtogroup - LPI-Timers LPI configuration macros
|
||||||
*
|
*
|
||||||
@@ -120,47 +140,22 @@
|
|||||||
/** @} */
|
/** @} */
|
||||||
#endif /* !OSI_STRIPPED_LIB */
|
#endif /* !OSI_STRIPPED_LIB */
|
||||||
|
|
||||||
/**
|
|
||||||
* @addtogroup Helper Helper MACROS
|
|
||||||
*
|
|
||||||
* @brief EQOS generic helper MACROS.
|
|
||||||
* @{
|
|
||||||
*/
|
|
||||||
#ifndef OSI_STRIPPED_LIB
|
|
||||||
#define OSI_PAUSE_FRAMES_ENABLE 1U
|
|
||||||
#define OSI_PTP_REQ_CLK_FREQ 250000000U
|
|
||||||
#define OSI_FLOW_CTRL_DISABLE 0U
|
|
||||||
#define OSI_MAX_24BITS 0xFFFFFFU
|
|
||||||
#define OSI_MAX_28BITS 0xFFFFFFFU
|
|
||||||
#define OSI_MAX_32BITS 0xFFFFFFFFU
|
|
||||||
#define OSI_MASK_16BITS 0xFFFFU
|
|
||||||
#define OSI_MASK_20BITS 0xFFFFFU
|
|
||||||
#define OSI_MASK_24BITS 0xFFFFFFU
|
|
||||||
#define OSI_GCL_SIZE_64 64U
|
|
||||||
#define OSI_GCL_SIZE_128 128U
|
|
||||||
#define OSI_GCL_SIZE_256 256U
|
|
||||||
#define OSI_GCL_SIZE_512 512U
|
|
||||||
#define OSI_GCL_SIZE_1024 1024U
|
|
||||||
|
|
||||||
#define OSI_POLL_COUNT 1000U
|
#define OSI_POLL_COUNT 1000U
|
||||||
|
|
||||||
#define OSI_ADDRESS_32BIT 0
|
|
||||||
#define OSI_ADDRESS_40BIT 1
|
|
||||||
#define OSI_ADDRESS_48BIT 2
|
|
||||||
#endif /* !OSI_STRIPPED_LIB */
|
|
||||||
|
|
||||||
#ifndef UINT_MAX
|
#ifndef UINT_MAX
|
||||||
#define UINT_MAX (~0U)
|
#define UINT_MAX (~0U)
|
||||||
#endif
|
#endif
|
||||||
#ifndef INT_MAX
|
#ifndef INT_MAX
|
||||||
#define INT_MAX (0x7FFFFFFF)
|
#define INT_MAX (0x7FFFFFFF)
|
||||||
|
#ifndef OSI_LLONG_MAX
|
||||||
|
#define OSI_LLONG_MAX (0x7FFFFFFFFFFFFFFF)
|
||||||
|
#endif
|
||||||
#endif
|
#endif
|
||||||
/** @} */
|
/** @} */
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @addtogroup Helper Helper MACROS
|
* @addtogroup Generic helper MACROS
|
||||||
*
|
*
|
||||||
* @brief EQOS generic helper MACROS.
|
* @brief These are Generic helper macros used at various places.
|
||||||
* @{
|
* @{
|
||||||
*/
|
*/
|
||||||
#define OSI_UCHAR_MAX (0xFFU)
|
#define OSI_UCHAR_MAX (0xFFU)
|
||||||
@@ -169,21 +164,23 @@
|
|||||||
/* log levels */
|
/* log levels */
|
||||||
|
|
||||||
#define OSI_LOG_INFO 1U
|
#define OSI_LOG_INFO 1U
|
||||||
|
#ifndef OSI_STRIPPED_LIB
|
||||||
#define OSI_LOG_WARN 2U
|
#define OSI_LOG_WARN 2U
|
||||||
|
#endif /* OSI_STRIPPED_LIB */
|
||||||
#define OSI_LOG_ERR 3U
|
#define OSI_LOG_ERR 3U
|
||||||
/* Error types */
|
/* Error types */
|
||||||
#define OSI_LOG_ARG_OUTOFBOUND 1U
|
#define OSI_LOG_ARG_OUTOFBOUND 1U
|
||||||
#define OSI_LOG_ARG_INVALID 2U
|
#define OSI_LOG_ARG_INVALID 2U
|
||||||
#define OSI_LOG_ARG_HW_FAIL 4U
|
#define OSI_LOG_ARG_HW_FAIL 4U
|
||||||
#define OSI_LOG_WARN 2U
|
|
||||||
#ifndef OSI_STRIPPED_LIB
|
|
||||||
#define OSI_LOG_ARG_OPNOTSUPP 3U
|
#define OSI_LOG_ARG_OPNOTSUPP 3U
|
||||||
#endif /* !OSI_STRIPPED_LIB */
|
|
||||||
/* Default maximum Giant Packet Size Limit is 16K */
|
/* Default maximum Giant Packet Size Limit is 16K */
|
||||||
#define OSI_MAX_MTU_SIZE 16383U
|
#define OSI_MAX_MTU_SIZE 16383U
|
||||||
|
|
||||||
|
#ifdef UPDATED_PAD_CAL
|
||||||
/* MAC Tx/Rx Idle retry and delay count */
|
/* MAC Tx/Rx Idle retry and delay count */
|
||||||
#define OSI_TXRX_IDLE_RETRY 5000U
|
#define OSI_TXRX_IDLE_RETRY 5000U
|
||||||
#define OSI_DELAY_COUNT 10U
|
#define OSI_DELAY_COUNT 10U
|
||||||
|
#endif
|
||||||
|
|
||||||
#define EQOS_DMA_CHX_STATUS(x) ((0x0080U * (x)) + 0x1160U)
|
#define EQOS_DMA_CHX_STATUS(x) ((0x0080U * (x)) + 0x1160U)
|
||||||
#define MGBE_DMA_CHX_STATUS(x) ((0x0080U * (x)) + 0x3160U)
|
#define MGBE_DMA_CHX_STATUS(x) ((0x0080U * (x)) + 0x3160U)
|
||||||
@@ -200,15 +197,16 @@
|
|||||||
/* MACSEC max SC's supported 16*/
|
/* MACSEC max SC's supported 16*/
|
||||||
#define OSI_MACSEC_SC_INDEX_MAX 16
|
#define OSI_MACSEC_SC_INDEX_MAX 16
|
||||||
|
|
||||||
|
#ifndef OSI_STRIPPED_LIB
|
||||||
/* HW supports 8 Hash table regs, but eqos_validate_core_regs only checks 4 */
|
/* HW supports 8 Hash table regs, but eqos_validate_core_regs only checks 4 */
|
||||||
#define OSI_EQOS_MAX_HASH_REGS 4U
|
#define OSI_EQOS_MAX_HASH_REGS 4U
|
||||||
|
#endif /* OSI_STRIPPED_LIB */
|
||||||
|
|
||||||
#define MAC_VERSION 0x110
|
#define MAC_VERSION 0x110
|
||||||
#define MAC_VERSION_SNVER_MASK 0x7FU
|
#define MAC_VERSION_SNVER_MASK 0x7FU
|
||||||
|
|
||||||
#define OSI_MAC_HW_EQOS 0U
|
#define OSI_MAC_HW_EQOS 0U
|
||||||
#define OSI_MAC_HW_MGBE 1U
|
#define OSI_MAC_HW_MGBE 1U
|
||||||
#define OSI_ETH_ALEN 6U
|
|
||||||
#define OSI_MAX_VM_IRQS 5U
|
#define OSI_MAX_VM_IRQS 5U
|
||||||
|
|
||||||
#define OSI_NULL ((void *)0)
|
#define OSI_NULL ((void *)0)
|
||||||
@@ -216,37 +214,30 @@
|
|||||||
#define OSI_NONE 0U
|
#define OSI_NONE 0U
|
||||||
#define OSI_NONE_SIGNED 0
|
#define OSI_NONE_SIGNED 0
|
||||||
#define OSI_DISABLE 0U
|
#define OSI_DISABLE 0U
|
||||||
|
#define OSI_H_DISABLE 0x10101010U
|
||||||
|
#define OSI_H_ENABLE (~OSI_H_DISABLE)
|
||||||
|
|
||||||
#define OSI_BIT(nr) ((nveu32_t)1 << (nr))
|
#define OSI_BIT(nr) ((nveu32_t)1 << (nr))
|
||||||
|
|
||||||
#define OSI_EQOS_MAC_4_10 0x41U
|
#ifndef OSI_STRIPPED_LIB
|
||||||
#define OSI_EQOS_MAC_5_00 0x50U
|
|
||||||
#define OSI_EQOS_MAC_5_10 0x51U
|
|
||||||
#define OSI_EQOS_MAC_5_30 0x53U
|
|
||||||
#define OSI_MGBE_MAC_3_00 0x30U
|
#define OSI_MGBE_MAC_3_00 0x30U
|
||||||
#define OSI_MGBE_MAC_3_10 0x31U
|
#define OSI_EQOS_MAC_4_10 0x41U
|
||||||
|
#define OSI_EQOS_MAC_5_10 0x51U
|
||||||
#define OSI_MGBE_MAC_4_00 0x40U
|
#define OSI_MGBE_MAC_4_00 0x40U
|
||||||
|
#endif /* OSI_STRIPPED_LIB */
|
||||||
|
|
||||||
|
#define OSI_EQOS_MAC_5_00 0x50U
|
||||||
|
#define OSI_EQOS_MAC_5_30 0x53U
|
||||||
|
#define OSI_MGBE_MAC_3_10 0x31U
|
||||||
|
|
||||||
#define OSI_MAX_VM_IRQS 5U
|
#define OSI_MAX_VM_IRQS 5U
|
||||||
#define OSI_IP4_FILTER 0U
|
|
||||||
#define OSI_IP6_FILTER 1U
|
|
||||||
|
|
||||||
#ifndef OSI_STRIPPED_LIB
|
#ifndef OSI_STRIPPED_LIB
|
||||||
#define OSI_L2_FILTER_INDEX_ANY 127U
|
|
||||||
#define OSI_HASH_FILTER_MODE 1U
|
#define OSI_HASH_FILTER_MODE 1U
|
||||||
#define OSI_L4_FILTER_TCP 0U
|
#define OSI_L4_FILTER_TCP 0U
|
||||||
#define OSI_L4_FILTER_UDP 1U
|
#define OSI_L4_FILTER_UDP 1U
|
||||||
#define OSI_PERFECT_FILTER_MODE 0U
|
#define OSI_PERFECT_FILTER_MODE 0U
|
||||||
|
|
||||||
#define NV_ETH_FCS_LEN 0x4U
|
|
||||||
#define NV_ETH_FRAME_LEN 1514U
|
|
||||||
|
|
||||||
#define MAX_ETH_FRAME_LEN_DEFAULT \
|
|
||||||
(NV_ETH_FRAME_LEN + NV_ETH_FCS_LEN + NV_VLAN_HLEN)
|
|
||||||
#define OSI_MTU_SIZE_16K 16000U
|
|
||||||
#define OSI_MTU_SIZE_8K 8000U
|
|
||||||
#define OSI_MTU_SIZE_4K 4000U
|
|
||||||
#define OSI_MTU_SIZE_2K 2000U
|
|
||||||
#define OSI_INVALID_CHAN_NUM 0xFFU
|
#define OSI_INVALID_CHAN_NUM 0xFFU
|
||||||
#endif /* OSI_STRIPPED_LIB */
|
#endif /* OSI_STRIPPED_LIB */
|
||||||
/** @} */
|
/** @} */
|
||||||
@@ -262,31 +253,8 @@
|
|||||||
#define OSI_DEBUG_TYPE_REG 2U
|
#define OSI_DEBUG_TYPE_REG 2U
|
||||||
#define OSI_DEBUG_TYPE_STRUCTS 3U
|
#define OSI_DEBUG_TYPE_STRUCTS 3U
|
||||||
#endif /* OSI_DEBUG */
|
#endif /* OSI_DEBUG */
|
||||||
|
|
||||||
#ifndef OSI_STRIPPED_LIB
|
|
||||||
/**
|
|
||||||
* @addtogroup MTL queue operation mode
|
|
||||||
*
|
|
||||||
* @brief MTL queue operation mode options
|
|
||||||
* @{
|
|
||||||
*/
|
|
||||||
#define OSI_MTL_QUEUE_DISABLED 0x0U
|
|
||||||
#define OSI_MTL_QUEUE_AVB 0x1U
|
|
||||||
#define OSI_MTL_QUEUE_ENABLE 0x2U
|
|
||||||
#define OSI_MTL_QUEUE_MODEMAX 0x3U
|
|
||||||
/** @} */
|
/** @} */
|
||||||
|
|
||||||
/**
|
|
||||||
* @addtogroup EQOS_MTL MTL queue AVB algorithm mode
|
|
||||||
*
|
|
||||||
* @brief MTL AVB queue algorithm type
|
|
||||||
* @{
|
|
||||||
*/
|
|
||||||
#define OSI_MTL_TXQ_AVALG_CBS 1U
|
|
||||||
#define OSI_MTL_TXQ_AVALG_SP 0U
|
|
||||||
/** @} */
|
|
||||||
#endif /* OSI_STRIPPED_LIB */
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief unused function attribute
|
* @brief unused function attribute
|
||||||
*/
|
*/
|
||||||
@@ -320,7 +288,7 @@ static inline nveu64_t osi_update_stats_counter(nveu64_t last_value,
|
|||||||
|
|
||||||
if (temp < last_value) {
|
if (temp < last_value) {
|
||||||
/* Stats overflow, so reset it to zero */
|
/* Stats overflow, so reset it to zero */
|
||||||
return 0UL;
|
temp = 0UL;
|
||||||
}
|
}
|
||||||
|
|
||||||
return temp;
|
return temp;
|
||||||
|
|||||||
1636
include/osi_core.h
1636
include/osi_core.h
File diff suppressed because it is too large
Load Diff
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -45,7 +45,6 @@
|
|||||||
* @brief EQOS generic helper MACROS.
|
* @brief EQOS generic helper MACROS.
|
||||||
* @{
|
* @{
|
||||||
*/
|
*/
|
||||||
#define OSI_NET_IP_ALIGN 0x2U
|
|
||||||
#define NV_VLAN_HLEN 0x4U
|
#define NV_VLAN_HLEN 0x4U
|
||||||
#define OSI_ETH_HLEN 0xEU
|
#define OSI_ETH_HLEN 0xEU
|
||||||
|
|
||||||
@@ -67,6 +66,7 @@
|
|||||||
#define OSI_VM_IRQ_RX_CHAN_MASK(x) OSI_BIT(((x) * 2U) + 1U)
|
#define OSI_VM_IRQ_RX_CHAN_MASK(x) OSI_BIT(((x) * 2U) + 1U)
|
||||||
/** @} */
|
/** @} */
|
||||||
|
|
||||||
|
#ifdef LOG_OSI
|
||||||
/**
|
/**
|
||||||
* OSI error macro definition,
|
* OSI error macro definition,
|
||||||
* @param[in] priv: OSD private data OR NULL
|
* @param[in] priv: OSD private data OR NULL
|
||||||
@@ -94,6 +94,10 @@
|
|||||||
OSI_LOG_INFO, type, err, loga);\
|
OSI_LOG_INFO, type, err, loga);\
|
||||||
}
|
}
|
||||||
#endif /* !OSI_STRIPPED_LIB */
|
#endif /* !OSI_STRIPPED_LIB */
|
||||||
|
#else
|
||||||
|
#define OSI_DMA_ERR(priv, type, err, loga)
|
||||||
|
#endif /* LOG_OSI */
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @addtogroup EQOS-PKT Packet context fields
|
* @addtogroup EQOS-PKT Packet context fields
|
||||||
*
|
*
|
||||||
@@ -119,7 +123,9 @@
|
|||||||
/** Paged buffer */
|
/** Paged buffer */
|
||||||
#define OSI_PKT_CX_PAGED_BUF OSI_BIT(4)
|
#define OSI_PKT_CX_PAGED_BUF OSI_BIT(4)
|
||||||
/** Rx packet has RSS hash */
|
/** Rx packet has RSS hash */
|
||||||
|
#ifndef OSI_STRIPPED_LIB
|
||||||
#define OSI_PKT_CX_RSS OSI_BIT(5)
|
#define OSI_PKT_CX_RSS OSI_BIT(5)
|
||||||
|
#endif /* !OSI_STRIPPED_LIB */
|
||||||
/** Valid packet */
|
/** Valid packet */
|
||||||
#define OSI_PKT_CX_VALID OSI_BIT(10)
|
#define OSI_PKT_CX_VALID OSI_BIT(10)
|
||||||
/** Update Packet Length in Tx Desc3 */
|
/** Update Packet Length in Tx Desc3 */
|
||||||
@@ -128,18 +134,18 @@
|
|||||||
#define OSI_PKT_CX_IP_CSUM OSI_BIT(12)
|
#define OSI_PKT_CX_IP_CSUM OSI_BIT(12)
|
||||||
/** @} */
|
/** @} */
|
||||||
|
|
||||||
|
#ifndef OSI_STRIPPED_LIB
|
||||||
/**
|
/**
|
||||||
* @addtogroup SLOT function context fields
|
* @addtogroup SLOT function context fields
|
||||||
*
|
*
|
||||||
* @brief These flags are used for DMA channel Slot context configuration
|
* @brief These flags are used for DMA channel Slot context configuration
|
||||||
* @{
|
* @{
|
||||||
*/
|
*/
|
||||||
#ifndef OSI_STRIPPED_LIB
|
|
||||||
#define OSI_SLOT_INTVL_DEFAULT 125U
|
#define OSI_SLOT_INTVL_DEFAULT 125U
|
||||||
#define OSI_SLOT_INTVL_MAX 4095U
|
#define OSI_SLOT_INTVL_MAX 4095U
|
||||||
#endif /* !OSI_STRIPPED_LIB */
|
|
||||||
#define OSI_SLOT_NUM_MAX 16U
|
#define OSI_SLOT_NUM_MAX 16U
|
||||||
/** @} */
|
/** @} */
|
||||||
|
#endif /* !OSI_STRIPPED_LIB */
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @addtogroup EQOS-TX Tx done packet context fields
|
* @addtogroup EQOS-TX Tx done packet context fields
|
||||||
@@ -209,7 +215,7 @@
|
|||||||
|
|
||||||
/** @} */
|
/** @} */
|
||||||
|
|
||||||
|
#ifndef OSI_STRIPPED_LIB
|
||||||
/**
|
/**
|
||||||
* @addtogroup RSS-HASH type
|
* @addtogroup RSS-HASH type
|
||||||
*
|
*
|
||||||
@@ -221,6 +227,7 @@
|
|||||||
#define OSI_RX_PKT_HASH_TYPE_L3 0x2U
|
#define OSI_RX_PKT_HASH_TYPE_L3 0x2U
|
||||||
#define OSI_RX_PKT_HASH_TYPE_L4 0x3U
|
#define OSI_RX_PKT_HASH_TYPE_L4 0x3U
|
||||||
/** @} */
|
/** @} */
|
||||||
|
#endif /* !OSI_STRIPPED_LIB */
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @addtogroup OSI-INTR OSI DMA interrupt handling macros.
|
* @addtogroup OSI-INTR OSI DMA interrupt handling macros.
|
||||||
@@ -244,6 +251,7 @@
|
|||||||
#ifdef OSI_DEBUG
|
#ifdef OSI_DEBUG
|
||||||
#define OSI_DMA_IOCTL_CMD_REG_DUMP 1U
|
#define OSI_DMA_IOCTL_CMD_REG_DUMP 1U
|
||||||
#define OSI_DMA_IOCTL_CMD_STRUCTS_DUMP 2U
|
#define OSI_DMA_IOCTL_CMD_STRUCTS_DUMP 2U
|
||||||
|
#define OSI_DMA_IOCTL_CMD_DEBUG_INTR_CONFIG 3U
|
||||||
#endif /* OSI_DEBUG */
|
#endif /* OSI_DEBUG */
|
||||||
/** @} */
|
/** @} */
|
||||||
|
|
||||||
@@ -252,6 +260,7 @@
|
|||||||
*/
|
*/
|
||||||
#define OSI_TX_MAX_BUFF_SIZE 0x3FFFU
|
#define OSI_TX_MAX_BUFF_SIZE 0x3FFFU
|
||||||
|
|
||||||
|
#ifndef OSI_STRIPPED_LIB
|
||||||
/**
|
/**
|
||||||
* @brief OSI packet error stats
|
* @brief OSI packet error stats
|
||||||
*/
|
*/
|
||||||
@@ -287,14 +296,15 @@ struct osi_pkt_err_stats {
|
|||||||
/** FRP Parsed count, includes accept
|
/** FRP Parsed count, includes accept
|
||||||
* routing-bypass, or result-bypass count.
|
* routing-bypass, or result-bypass count.
|
||||||
*/
|
*/
|
||||||
unsigned long frp_parsed;
|
nveu64_t frp_parsed;
|
||||||
/** FRP Dropped count */
|
/** FRP Dropped count */
|
||||||
unsigned long frp_dropped;
|
nveu64_t frp_dropped;
|
||||||
/** FRP Parsing Error count */
|
/** FRP Parsing Error count */
|
||||||
unsigned long frp_err;
|
nveu64_t frp_err;
|
||||||
/** FRP Incomplete Parsing */
|
/** FRP Incomplete Parsing */
|
||||||
unsigned long frp_incomplete;
|
nveu64_t frp_incomplete;
|
||||||
};
|
};
|
||||||
|
#endif /* !OSI_STRIPPED_LIB */
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Receive Descriptor
|
* @brief Receive Descriptor
|
||||||
@@ -322,6 +332,8 @@ struct osi_rx_swcx {
|
|||||||
nveu32_t len;
|
nveu32_t len;
|
||||||
/** Flags to share info about Rx swcx between OSD and OSI */
|
/** Flags to share info about Rx swcx between OSD and OSI */
|
||||||
nveu32_t flags;
|
nveu32_t flags;
|
||||||
|
/** nvsocket data index */
|
||||||
|
nveu64_t data_idx;
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -333,16 +345,18 @@ struct osi_rx_pkt_cx {
|
|||||||
nveu32_t flags;
|
nveu32_t flags;
|
||||||
/** Stores the Rx csum */
|
/** Stores the Rx csum */
|
||||||
nveu32_t rxcsum;
|
nveu32_t rxcsum;
|
||||||
/** Stores the VLAN tag ID in received packet */
|
|
||||||
nveu32_t vlan_tag;
|
|
||||||
/** Length of received packet */
|
/** Length of received packet */
|
||||||
nveu32_t pkt_len;
|
nveu32_t pkt_len;
|
||||||
|
/** TS in nsec for the received packet */
|
||||||
|
nveul64_t ns;
|
||||||
|
#ifndef OSI_STRIPPED_LIB
|
||||||
|
/** Stores the VLAN tag ID in received packet */
|
||||||
|
nveu32_t vlan_tag;
|
||||||
/** Stores received packet hash */
|
/** Stores received packet hash */
|
||||||
nveu32_t rx_hash;
|
nveu32_t rx_hash;
|
||||||
/** Store type of packet for which hash carries at rx_hash */
|
/** Store type of packet for which hash carries at rx_hash */
|
||||||
nveu32_t rx_hash_type;
|
nveu32_t rx_hash_type;
|
||||||
/** TS in nsec for the received packet */
|
#endif /* !OSI_STRIPPED_LIB */
|
||||||
nveul64_t ns;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -374,20 +388,22 @@ struct osi_tx_swcx {
|
|||||||
void *buf_virt_addr;
|
void *buf_virt_addr;
|
||||||
/** Length of buffer */
|
/** Length of buffer */
|
||||||
nveu32_t len;
|
nveu32_t len;
|
||||||
|
#ifndef OSI_STRIPPED_LIB
|
||||||
/** Flag to keep track of whether buffer pointed by buf_phy_addr
|
/** Flag to keep track of whether buffer pointed by buf_phy_addr
|
||||||
* is a paged buffer/linear buffer */
|
* is a paged buffer/linear buffer */
|
||||||
nveu32_t is_paged_buf;
|
nveu32_t is_paged_buf;
|
||||||
|
#endif /* !OSI_STRIPPED_LIB */
|
||||||
/** Flag to keep track of SWCX
|
/** Flag to keep track of SWCX
|
||||||
* Bit 0 is_paged_buf - whether buffer pointed by buf_phy_addr
|
* Bit 0 is_paged_buf - whether buffer pointed by buf_phy_addr
|
||||||
* is a paged buffer/linear buffer
|
* is a paged buffer/linear buffer
|
||||||
* Bit 1 PTP hwtime form timestamp registers */
|
* Bit 1 PTP hwtime form timestamp registers */
|
||||||
unsigned int flags;
|
nveu32_t flags;
|
||||||
/** Packet id of packet for which TX timestamp needed */
|
/** Packet id of packet for which TX timestamp needed */
|
||||||
unsigned int pktid;
|
nveu32_t pktid;
|
||||||
/** dma channel number for osd use */
|
/** dma channel number for osd use */
|
||||||
nveu32_t chan;
|
nveu32_t chan;
|
||||||
/** reserved field 1 for future use */
|
/** nvsocket data index */
|
||||||
nveu64_t rsvd1;
|
nveu64_t data_idx;
|
||||||
/** reserved field 2 for future use */
|
/** reserved field 2 for future use */
|
||||||
nveu64_t rsvd2;
|
nveu64_t rsvd2;
|
||||||
};
|
};
|
||||||
@@ -438,7 +454,7 @@ struct osi_txdone_pkt_cx {
|
|||||||
* bit is set in fields */
|
* bit is set in fields */
|
||||||
nveul64_t ns;
|
nveul64_t ns;
|
||||||
/** Passing packet id to map TX time to packet */
|
/** Passing packet id to map TX time to packet */
|
||||||
unsigned int pktid;
|
nveu32_t pktid;
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -456,18 +472,23 @@ struct osi_tx_ring {
|
|||||||
nveu32_t cur_tx_idx;
|
nveu32_t cur_tx_idx;
|
||||||
/** Descriptor index for descriptor cleanup */
|
/** Descriptor index for descriptor cleanup */
|
||||||
nveu32_t clean_idx;
|
nveu32_t clean_idx;
|
||||||
|
#ifndef OSI_STRIPPED_LIB
|
||||||
/** Slot function check */
|
/** Slot function check */
|
||||||
nveu32_t slot_check;
|
nveu32_t slot_check;
|
||||||
/** Slot number */
|
/** Slot number */
|
||||||
nveu32_t slot_number;
|
nveu32_t slot_number;
|
||||||
|
#endif /* !OSI_STRIPPED_LIB */
|
||||||
/** Transmit packet context */
|
/** Transmit packet context */
|
||||||
struct osi_tx_pkt_cx tx_pkt_cx;
|
struct osi_tx_pkt_cx tx_pkt_cx;
|
||||||
/** Transmit complete packet context information */
|
/** Transmit complete packet context information */
|
||||||
struct osi_txdone_pkt_cx txdone_pkt_cx;
|
struct osi_txdone_pkt_cx txdone_pkt_cx;
|
||||||
/** Number of packets or frames transmitted */
|
/** Number of packets or frames transmitted */
|
||||||
nveu32_t frame_cnt;
|
nveu32_t frame_cnt;
|
||||||
|
/** flag to skip memory barrier */
|
||||||
|
nveu32_t skip_dmb;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#ifndef OSI_STRIPPED_LIB
|
||||||
/**
|
/**
|
||||||
* @brief osi_xtra_dma_stat_counters - OSI DMA extra stats counters
|
* @brief osi_xtra_dma_stat_counters - OSI DMA extra stats counters
|
||||||
*/
|
*/
|
||||||
@@ -489,6 +510,7 @@ struct osi_xtra_dma_stat_counters {
|
|||||||
/** Total number of TSO packet count */
|
/** Total number of TSO packet count */
|
||||||
nveu64_t tx_tso_pkt_n;
|
nveu64_t tx_tso_pkt_n;
|
||||||
};
|
};
|
||||||
|
#endif /* !OSI_STRIPPED_LIB */
|
||||||
|
|
||||||
struct osi_dma_priv_data;
|
struct osi_dma_priv_data;
|
||||||
|
|
||||||
@@ -522,13 +544,17 @@ struct osd_dma_ops {
|
|||||||
#endif /* OSI_DEBUG */
|
#endif /* OSI_DEBUG */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#ifdef OSI_DEBUG
|
||||||
/**
|
/**
|
||||||
* @brief The OSI DMA IOCTL data structure.
|
* @brief The OSI DMA IOCTL data structure.
|
||||||
*/
|
*/
|
||||||
struct osi_dma_ioctl_data {
|
struct osi_dma_ioctl_data {
|
||||||
/** IOCTL command number */
|
/** IOCTL command number */
|
||||||
nveu32_t cmd;
|
nveu32_t cmd;
|
||||||
|
/** IOCTL command argument */
|
||||||
|
nveu32_t arg_u32;
|
||||||
};
|
};
|
||||||
|
#endif /* OSI_DEBUG */
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief The OSI DMA private data structure.
|
* @brief The OSI DMA private data structure.
|
||||||
@@ -552,10 +578,12 @@ struct osi_dma_priv_data {
|
|||||||
nveu32_t rx_buf_len;
|
nveu32_t rx_buf_len;
|
||||||
/** MTU size */
|
/** MTU size */
|
||||||
nveu32_t mtu;
|
nveu32_t mtu;
|
||||||
|
#ifndef OSI_STRIPPED_LIB
|
||||||
/** Packet error stats */
|
/** Packet error stats */
|
||||||
struct osi_pkt_err_stats pkt_err_stats;
|
struct osi_pkt_err_stats pkt_err_stats;
|
||||||
/** Extra DMA stats */
|
/** Extra DMA stats */
|
||||||
struct osi_xtra_dma_stat_counters dstats;
|
struct osi_xtra_dma_stat_counters dstats;
|
||||||
|
#endif /* !OSI_STRIPPED_LIB */
|
||||||
/** Receive Interrupt Watchdog Timer Count Units */
|
/** Receive Interrupt Watchdog Timer Count Units */
|
||||||
nveu32_t rx_riwt;
|
nveu32_t rx_riwt;
|
||||||
/** Flag which decides riwt is enabled(1) or disabled(0) */
|
/** Flag which decides riwt is enabled(1) or disabled(0) */
|
||||||
@@ -572,33 +600,30 @@ struct osi_dma_priv_data {
|
|||||||
nveu32_t tx_frames;
|
nveu32_t tx_frames;
|
||||||
/** Flag which decides tx_frames is enabled(1) or disabled(0) */
|
/** Flag which decides tx_frames is enabled(1) or disabled(0) */
|
||||||
nveu32_t use_tx_frames;
|
nveu32_t use_tx_frames;
|
||||||
|
/** DMA callback ops structure */
|
||||||
|
struct osd_dma_ops osd_ops;
|
||||||
|
#ifndef OSI_STRIPPED_LIB
|
||||||
/** Flag which decides virtualization is enabled(1) or disabled(0) */
|
/** Flag which decides virtualization is enabled(1) or disabled(0) */
|
||||||
nveu32_t use_virtualization;
|
nveu32_t use_virtualization;
|
||||||
/** Functional safety config to do periodic read-verify of
|
|
||||||
* certain safety critical dma registers */
|
|
||||||
void *safety_config;
|
|
||||||
/** Array of DMA channel slot snterval value from DT */
|
/** Array of DMA channel slot snterval value from DT */
|
||||||
nveu32_t slot_interval[OSI_MGBE_MAX_NUM_CHANS];
|
nveu32_t slot_interval[OSI_MGBE_MAX_NUM_CHANS];
|
||||||
/** Array of DMA channel slot enabled status from DT*/
|
/** Array of DMA channel slot enabled status from DT*/
|
||||||
nveu32_t slot_enabled[OSI_MGBE_MAX_NUM_CHANS];
|
nveu32_t slot_enabled[OSI_MGBE_MAX_NUM_CHANS];
|
||||||
/** DMA callback ops structure */
|
|
||||||
struct osd_dma_ops osd_ops;
|
|
||||||
/** Virtual address of reserved DMA buffer */
|
/** Virtual address of reserved DMA buffer */
|
||||||
void *resv_buf_virt_addr;
|
void *resv_buf_virt_addr;
|
||||||
/** Physical address of reserved DMA buffer */
|
/** Physical address of reserved DMA buffer */
|
||||||
nveu64_t resv_buf_phy_addr;
|
nveu64_t resv_buf_phy_addr;
|
||||||
/** Tegra Pre-si platform info */
|
#endif /* !OSI_STRIPPED_LIB */
|
||||||
nveu32_t pre_si;
|
|
||||||
/** PTP flags
|
/** PTP flags
|
||||||
* OSI_PTP_SYNC_MASTER - acting as master
|
* OSI_PTP_SYNC_MASTER - acting as master
|
||||||
* OSI_PTP_SYNC_SLAVE - acting as slave
|
* OSI_PTP_SYNC_SLAVE - acting as slave
|
||||||
* OSI_PTP_SYNC_ONESTEP - one-step mode
|
* OSI_PTP_SYNC_ONESTEP - one-step mode
|
||||||
* OSI_PTP_SYNC_TWOSTEP - two step mode
|
* OSI_PTP_SYNC_TWOSTEP - two step mode
|
||||||
*/
|
*/
|
||||||
unsigned int ptp_flag;
|
nveu32_t ptp_flag;
|
||||||
|
#ifdef OSI_DEBUG
|
||||||
/** OSI DMA IOCTL data */
|
/** OSI DMA IOCTL data */
|
||||||
struct osi_dma_ioctl_data ioctl_data;
|
struct osi_dma_ioctl_data ioctl_data;
|
||||||
#ifdef OSI_DEBUG
|
|
||||||
/** Flag to enable/disable descriptor dump */
|
/** Flag to enable/disable descriptor dump */
|
||||||
nveu32_t enable_desc_dump;
|
nveu32_t enable_desc_dump;
|
||||||
#endif /* OSI_DEBUG */
|
#endif /* OSI_DEBUG */
|
||||||
@@ -610,158 +635,6 @@ struct osi_dma_priv_data {
|
|||||||
nveu32_t rx_ring_sz;
|
nveu32_t rx_ring_sz;
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief osi_disable_chan_tx_intr - Disables DMA Tx channel interrupts.
|
|
||||||
*
|
|
||||||
* @note
|
|
||||||
* Algorithm:
|
|
||||||
* - Disables Tx interrupts at wrapper level.
|
|
||||||
*
|
|
||||||
* @param[in] osi_dma: OSI DMA private data.
|
|
||||||
* @param[in] chan: DMA Tx channel number. Max OSI_EQOS_MAX_NUM_CHANS.
|
|
||||||
*
|
|
||||||
* @pre
|
|
||||||
* - MAC needs to be out of reset and proper clocks need to be configured.
|
|
||||||
* - DMA HW init need to be completed successfully, see osi_hw_dma_init
|
|
||||||
* - Mapping of physical IRQ line to DMA channel need to be maintained at
|
|
||||||
* OS Dependent layer and pass corresponding channel number.
|
|
||||||
*
|
|
||||||
* @note
|
|
||||||
* Traceability Details:
|
|
||||||
* - SWUD_ID: ETHERNET_NVETHERNETCL_001
|
|
||||||
*
|
|
||||||
* @usage
|
|
||||||
* - Allowed context for the API call
|
|
||||||
* - Interrupt handler: Yes
|
|
||||||
* - Signal handler: Yes
|
|
||||||
* - Thread safe: No
|
|
||||||
* - Async/Sync: Sync
|
|
||||||
* - Required Privileges: None
|
|
||||||
* - API Group:
|
|
||||||
* - Initialization: No
|
|
||||||
* - Run time: Yes
|
|
||||||
* - De-initialization: Yes
|
|
||||||
*
|
|
||||||
* @retval 0 on success
|
|
||||||
* @retval -1 on failure.
|
|
||||||
*/
|
|
||||||
nve32_t osi_disable_chan_tx_intr(struct osi_dma_priv_data *osi_dma,
|
|
||||||
nveu32_t chan);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief osi_enable_chan_tx_intr - Enable DMA Tx channel interrupts.
|
|
||||||
*
|
|
||||||
* @note
|
|
||||||
* Algorithm:
|
|
||||||
* - Enables Tx interrupts at wrapper level.
|
|
||||||
*
|
|
||||||
* @param[in] osi_dma: OSI DMA private data.
|
|
||||||
* @param[in] chan: DMA Tx channel number. Max OSI_EQOS_MAX_NUM_CHANS.
|
|
||||||
*
|
|
||||||
* @pre
|
|
||||||
* - MAC needs to be out of reset and proper clocks need to be configured.
|
|
||||||
* - DMA HW init need to be completed successfully, see osi_hw_dma_init
|
|
||||||
* - Mapping of physical IRQ line to DMA channel need to be maintained at
|
|
||||||
* OS Dependent layer and pass corresponding channel number.
|
|
||||||
*
|
|
||||||
* @note
|
|
||||||
* Traceability Details:
|
|
||||||
* - SWUD_ID: ETHERNET_NVETHERNETCL_002
|
|
||||||
*
|
|
||||||
* @usage
|
|
||||||
* - Allowed context for the API call
|
|
||||||
* - Interrupt handler: Yes
|
|
||||||
* - Signal handler: Yes
|
|
||||||
* - Thread safe: No
|
|
||||||
* - Async/Sync: Sync
|
|
||||||
* - Required Privileges: None
|
|
||||||
* - API Group:
|
|
||||||
* - Initialization: Yes
|
|
||||||
* - Run time: Yes
|
|
||||||
* - De-initialization: No
|
|
||||||
*
|
|
||||||
* @retval 0 on success
|
|
||||||
* @retval -1 on failure.
|
|
||||||
*/
|
|
||||||
nve32_t osi_enable_chan_tx_intr(struct osi_dma_priv_data *osi_dma,
|
|
||||||
nveu32_t chan);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief osi_disable_chan_rx_intr - Disable DMA Rx channel interrupts.
|
|
||||||
*
|
|
||||||
* @note
|
|
||||||
* Algorithm:
|
|
||||||
* - Disables Rx interrupts at wrapper level.
|
|
||||||
*
|
|
||||||
* @param[in] osi_dma: OSI DMA private data.
|
|
||||||
* @param[in] chan: DMA Rx channel number. Max OSI_EQOS_MAX_NUM_CHANS.
|
|
||||||
*
|
|
||||||
* @pre
|
|
||||||
* - MAC needs to be out of reset and proper clocks need to be configured.
|
|
||||||
* - DMA HW init need to be completed successfully, see osi_hw_dma_init
|
|
||||||
* - Mapping of physical IRQ line to DMA channel need to be maintained at
|
|
||||||
* OS Dependent layer and pass corresponding channel number.
|
|
||||||
*
|
|
||||||
* @note
|
|
||||||
* Traceability Details:
|
|
||||||
* - SWUD_ID: ETHERNET_NVETHERNETCL_003
|
|
||||||
*
|
|
||||||
* @usage
|
|
||||||
* - Allowed context for the API call
|
|
||||||
* - Interrupt handler: Yes
|
|
||||||
* - Signal handler: Yes
|
|
||||||
* - Thread safe: No
|
|
||||||
* - Async/Sync: Sync
|
|
||||||
* - Required Privileges: None
|
|
||||||
* - API Group:
|
|
||||||
* - Initialization: No
|
|
||||||
* - Run time: Yes
|
|
||||||
* - De-initialization: Yes
|
|
||||||
*
|
|
||||||
* @retval 0 on success
|
|
||||||
* @retval -1 on failure.
|
|
||||||
*/
|
|
||||||
nve32_t osi_disable_chan_rx_intr(struct osi_dma_priv_data *osi_dma,
|
|
||||||
nveu32_t chan);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief osi_enable_chan_rx_intr - Enable DMA Rx channel interrupts.
|
|
||||||
*
|
|
||||||
* @note
|
|
||||||
* Algorithm:
|
|
||||||
* - Enables Rx interrupts at wrapper level.
|
|
||||||
*
|
|
||||||
* @param[in] osi_dma: OSI DMA private data.
|
|
||||||
* @param[in] chan: DMA Rx channel number. Max OSI_EQOS_MAX_NUM_CHANS.
|
|
||||||
*
|
|
||||||
* @pre
|
|
||||||
* - MAC needs to be out of reset and proper clocks need to be configured.
|
|
||||||
* - DMA HW init need to be completed successfully, see osi_hw_dma_init
|
|
||||||
* - Mapping of physical IRQ line to DMA channel need to be maintained at
|
|
||||||
* OS Dependent layer and pass corresponding channel number.
|
|
||||||
*
|
|
||||||
* @note
|
|
||||||
* Traceability Details:
|
|
||||||
* - SWUD_ID: ETHERNET_NVETHERNETCL_004
|
|
||||||
*
|
|
||||||
* @usage
|
|
||||||
* - Allowed context for the API call
|
|
||||||
* - Interrupt handler: Yes
|
|
||||||
* - Signal handler: Yes
|
|
||||||
* - Thread safe: No
|
|
||||||
* - Async/Sync: Sync
|
|
||||||
* - Required Privileges: None
|
|
||||||
* - API Group:
|
|
||||||
* - Initialization: Yes
|
|
||||||
* - Run time: Yes
|
|
||||||
* - De-initialization: No
|
|
||||||
*
|
|
||||||
* @retval 0 on success
|
|
||||||
* @retval -1 on failure.
|
|
||||||
*/
|
|
||||||
nve32_t osi_enable_chan_rx_intr(struct osi_dma_priv_data *osi_dma,
|
|
||||||
nveu32_t chan);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief osi_get_global_dma_status - Gets DMA status.
|
* @brief osi_get_global_dma_status - Gets DMA status.
|
||||||
*
|
*
|
||||||
@@ -777,114 +650,6 @@ nve32_t osi_enable_chan_rx_intr(struct osi_dma_priv_data *osi_dma,
|
|||||||
*/
|
*/
|
||||||
nveu32_t osi_get_global_dma_status(struct osi_dma_priv_data *osi_dma);
|
nveu32_t osi_get_global_dma_status(struct osi_dma_priv_data *osi_dma);
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief osi_clear_vm_tx_intr - Handles VM Tx interrupt source.
|
|
||||||
*
|
|
||||||
* Algorithm: Clear Tx interrupt source at wrapper level and DMA level.
|
|
||||||
*
|
|
||||||
* @param[in] osi_dma: DMA private data.
|
|
||||||
* @param[in] chan: DMA tx channel number.
|
|
||||||
*
|
|
||||||
* @note
|
|
||||||
* 1) MAC needs to be out of reset and proper clocks need to be configured.
|
|
||||||
* 2) DMA HW init need to be completed successfully, see osi_hw_dma_init
|
|
||||||
*
|
|
||||||
* @retval 0 on success
|
|
||||||
* @retval -1 on failure.
|
|
||||||
*/
|
|
||||||
nve32_t osi_clear_vm_tx_intr(struct osi_dma_priv_data *osi_dma,
|
|
||||||
nveu32_t chan);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief osi_clear_vm_rx_intr - Handles VM Rx interrupt source.
|
|
||||||
*
|
|
||||||
* Algorithm: Clear Rx interrupt source at wrapper level and DMA level.
|
|
||||||
*
|
|
||||||
* @param[in] osi_dma: DMA private data.
|
|
||||||
* @param[in] chan: DMA rx channel number.
|
|
||||||
*
|
|
||||||
* @note
|
|
||||||
* 1) MAC needs to be out of reset and proper clocks need to be configured.
|
|
||||||
* 2) DMA HW init need to be completed successfully, see osi_hw_dma_init
|
|
||||||
* 3) Mapping of physical IRQ line to DMA channel need to be maintained at
|
|
||||||
* OS Dependent layer and pass corresponding channel number.
|
|
||||||
*
|
|
||||||
* @retval 0 on success
|
|
||||||
* @retval -1 on failure.
|
|
||||||
*/
|
|
||||||
nve32_t osi_clear_vm_rx_intr(struct osi_dma_priv_data *osi_dma,
|
|
||||||
nveu32_t chan);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Start DMA
|
|
||||||
*
|
|
||||||
* @note
|
|
||||||
* Algorithm:
|
|
||||||
* - Start the DMA for specific MAC
|
|
||||||
*
|
|
||||||
* @param[in] osi_dma: OSI DMA private data.
|
|
||||||
* @param[in] chan: DMA Tx/Rx channel number. Max OSI_EQOS_MAX_NUM_CHANS.
|
|
||||||
*
|
|
||||||
* @pre
|
|
||||||
* - MAC needs to be out of reset and proper clocks need to be configured.
|
|
||||||
* - DMA HW init need to be completed successfully, see osi_hw_dma_init
|
|
||||||
*
|
|
||||||
* @note
|
|
||||||
* Traceability Details:
|
|
||||||
* - SWUD_ID: ETHERNET_NVETHERNETCL_005
|
|
||||||
*
|
|
||||||
* @usage
|
|
||||||
* - Allowed context for the API call
|
|
||||||
* - Interrupt handler: No
|
|
||||||
* - Signal handler: No
|
|
||||||
* - Thread safe: No
|
|
||||||
* - Async/Sync: Sync
|
|
||||||
* - Required Privileges: None
|
|
||||||
* - API Group:
|
|
||||||
* - Initialization: Yes
|
|
||||||
* - Run time: No
|
|
||||||
* - De-initialization: No
|
|
||||||
*
|
|
||||||
* @retval 0 on success
|
|
||||||
* @retval -1 on failure.
|
|
||||||
*/
|
|
||||||
nve32_t osi_start_dma(struct osi_dma_priv_data *osi_dma, nveu32_t chan);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief osi_stop_dma - Stop DMA
|
|
||||||
*
|
|
||||||
* @note
|
|
||||||
* Algorithm:
|
|
||||||
* - Stop the DMA for specific MAC
|
|
||||||
*
|
|
||||||
* @param[in] osi_dma: OSI DMA private data.
|
|
||||||
* @param[in] chan: DMA Tx/Rx channel number. Max OSI_EQOS_MAX_NUM_CHANS.
|
|
||||||
*
|
|
||||||
* @pre
|
|
||||||
* - MAC needs to be out of reset and proper clocks need to be configured.
|
|
||||||
* - DMA HW init need to be completed successfully, see osi_hw_dma_init
|
|
||||||
*
|
|
||||||
* @note
|
|
||||||
* Traceability Details:
|
|
||||||
* - SWUD_ID: ETHERNET_NVETHERNETCL_006
|
|
||||||
*
|
|
||||||
* @usage
|
|
||||||
* - Allowed context for the API call
|
|
||||||
* - Interrupt handler: No
|
|
||||||
* - Signal handler: No
|
|
||||||
* - Thread safe: No
|
|
||||||
* - Async/Sync: Sync
|
|
||||||
* - Required Privileges: None
|
|
||||||
* - API Group:
|
|
||||||
* - Initialization: No
|
|
||||||
* - Run time: No
|
|
||||||
* - De-initialization: Yes
|
|
||||||
*
|
|
||||||
* @retval 0 on success
|
|
||||||
* @retval -1 on failure.
|
|
||||||
*/
|
|
||||||
nve32_t osi_stop_dma(struct osi_dma_priv_data *osi_dma, nveu32_t chan);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief osi_get_refill_rx_desc_cnt - Rx descriptors count that needs to refill
|
* @brief osi_get_refill_rx_desc_cnt - Rx descriptors count that needs to refill
|
||||||
*
|
*
|
||||||
@@ -913,8 +678,8 @@ nve32_t osi_stop_dma(struct osi_dma_priv_data *osi_dma, nveu32_t chan);
|
|||||||
*
|
*
|
||||||
* @retval "Number of available free descriptors."
|
* @retval "Number of available free descriptors."
|
||||||
*/
|
*/
|
||||||
nveu32_t osi_get_refill_rx_desc_cnt(struct osi_dma_priv_data *osi_dma,
|
nveu32_t osi_get_refill_rx_desc_cnt(const struct osi_dma_priv_data *const osi_dma,
|
||||||
unsigned int chan);
|
nveu32_t chan);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief osi_rx_dma_desc_init - DMA Rx descriptor init
|
* @brief osi_rx_dma_desc_init - DMA Rx descriptor init
|
||||||
@@ -1349,6 +1114,7 @@ nveu32_t osi_is_mac_enabled(struct osi_dma_priv_data *const osi_dma);
|
|||||||
nve32_t osi_handle_dma_intr(struct osi_dma_priv_data *osi_dma,
|
nve32_t osi_handle_dma_intr(struct osi_dma_priv_data *osi_dma,
|
||||||
nveu32_t chan, nveu32_t tx_rx, nveu32_t en_dis);
|
nveu32_t chan, nveu32_t tx_rx, nveu32_t en_dis);
|
||||||
|
|
||||||
|
#ifdef OSI_DEBUG
|
||||||
/**
|
/**
|
||||||
* @brief osi_dma_ioctl - OSI DMA IOCTL
|
* @brief osi_dma_ioctl - OSI DMA IOCTL
|
||||||
*
|
*
|
||||||
@@ -1365,44 +1131,8 @@ nve32_t osi_handle_dma_intr(struct osi_dma_priv_data *osi_dma,
|
|||||||
* @retval -1 on failure.
|
* @retval -1 on failure.
|
||||||
*/
|
*/
|
||||||
nve32_t osi_dma_ioctl(struct osi_dma_priv_data *osi_dma);
|
nve32_t osi_dma_ioctl(struct osi_dma_priv_data *osi_dma);
|
||||||
|
#endif /* OSI_DEBUG */
|
||||||
#ifndef OSI_STRIPPED_LIB
|
#ifndef OSI_STRIPPED_LIB
|
||||||
/**
|
|
||||||
* @brief - Read-validate HW registers for func safety.
|
|
||||||
*
|
|
||||||
* @note
|
|
||||||
* Algorithm:
|
|
||||||
* - Reads pre-configured list of DMA configuration registers
|
|
||||||
* and compares with last written value for any modifications.
|
|
||||||
*
|
|
||||||
* @param[in] osi_dma: OSI DMA private data structure.
|
|
||||||
*
|
|
||||||
* @pre
|
|
||||||
* - MAC has to be out of reset.
|
|
||||||
* - osi_hw_dma_init has to be called. Internally this would initialize
|
|
||||||
* the safety_config (see osi_dma_priv_data) based on MAC version and
|
|
||||||
* which specific registers needs to be validated periodically.
|
|
||||||
* - Invoke this call if (osi_dma_priv_data->safety_config != OSI_NULL)
|
|
||||||
*
|
|
||||||
* @note
|
|
||||||
* Traceability Details:
|
|
||||||
*
|
|
||||||
* @usage
|
|
||||||
* - Allowed context for the API call
|
|
||||||
* - Interrupt handler: No
|
|
||||||
* - Signal handler: No
|
|
||||||
* - Thread safe: No
|
|
||||||
* - Async/Sync: Sync
|
|
||||||
* - Required Privileges: None
|
|
||||||
* - API Group:
|
|
||||||
* - Initialization: No
|
|
||||||
* - Run time: Yes
|
|
||||||
* - De-initialization: No
|
|
||||||
*
|
|
||||||
* @retval 0 on success
|
|
||||||
* @retval -1 on failure.
|
|
||||||
*/
|
|
||||||
nve32_t osi_validate_dma_regs(struct osi_dma_priv_data *osi_dma);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief osi_clear_tx_pkt_err_stats - Clear tx packet error stats.
|
* @brief osi_clear_tx_pkt_err_stats - Clear tx packet error stats.
|
||||||
*
|
*
|
||||||
|
|||||||
@@ -32,7 +32,6 @@
|
|||||||
#define OSI_EQOS_TX_DESC_CNT 1024U
|
#define OSI_EQOS_TX_DESC_CNT 1024U
|
||||||
#define OSI_EQOS_RX_DESC_CNT 1024U
|
#define OSI_EQOS_RX_DESC_CNT 1024U
|
||||||
#define OSI_MGBE_TX_DESC_CNT 4096U
|
#define OSI_MGBE_TX_DESC_CNT 4096U
|
||||||
#define OSI_MGBE_RX_DESC_CNT 4096U
|
|
||||||
#define OSI_MGBE_MAX_RX_DESC_CNT 16384U
|
#define OSI_MGBE_MAX_RX_DESC_CNT 16384U
|
||||||
/** @} */
|
/** @} */
|
||||||
|
|
||||||
@@ -49,9 +48,11 @@
|
|||||||
#define INCR_TX_DESC_INDEX(idx, x) ((idx) = ((idx) + (1U)) & ((x) - 1U))
|
#define INCR_TX_DESC_INDEX(idx, x) ((idx) = ((idx) + (1U)) & ((x) - 1U))
|
||||||
/** Increment the rx descriptor index */
|
/** Increment the rx descriptor index */
|
||||||
#define INCR_RX_DESC_INDEX(idx, x) ((idx) = ((idx) + (1U)) & ((x) - 1U))
|
#define INCR_RX_DESC_INDEX(idx, x) ((idx) = ((idx) + (1U)) & ((x) - 1U))
|
||||||
#ifndef OSI_STRIPPED_LIB
|
#ifdef OSI_DEBUG
|
||||||
/** Decrement the tx descriptor index */
|
/** Decrement the tx descriptor index */
|
||||||
#define DECR_TX_DESC_INDEX(idx, x) ((idx) = ((idx) - (1U)) & ((x) - 1U))
|
#define DECR_TX_DESC_INDEX(idx, x) ((idx) = ((idx) - (1U)) & ((x) - 1U))
|
||||||
|
#endif /* OSI_DEBUG */
|
||||||
|
#ifndef OSI_STRIPPED_LIB
|
||||||
/** Decrement the rx descriptor index */
|
/** Decrement the rx descriptor index */
|
||||||
#define DECR_RX_DESC_INDEX(idx, x) ((idx) = ((idx) - (1U)) & ((x) - 1U))
|
#define DECR_RX_DESC_INDEX(idx, x) ((idx) = ((idx) - (1U)) & ((x) - 1U))
|
||||||
#endif /* !OSI_STRIPPED_LIB */
|
#endif /* !OSI_STRIPPED_LIB */
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -41,7 +41,9 @@
|
|||||||
#define OSI_AN2_VALID OSI_BIT(2)
|
#define OSI_AN2_VALID OSI_BIT(2)
|
||||||
#define OSI_AN3_VALID OSI_BIT(3)
|
#define OSI_AN3_VALID OSI_BIT(3)
|
||||||
#define OSI_MAX_NUM_SA 4U
|
#define OSI_MAX_NUM_SA 4U
|
||||||
|
#ifdef DEBUG_MACSEC
|
||||||
#define OSI_CURR_AN_MAX 3
|
#define OSI_CURR_AN_MAX 3
|
||||||
|
#endif /* DEBUG_MACSEC */
|
||||||
#define OSI_KEY_INDEX_MAX 31U
|
#define OSI_KEY_INDEX_MAX 31U
|
||||||
#define OSI_PN_MAX_DEFAULT 0xFFFFFFFFU
|
#define OSI_PN_MAX_DEFAULT 0xFFFFFFFFU
|
||||||
#define OSI_PN_THRESHOLD_DEFAULT 0xC0000000U
|
#define OSI_PN_THRESHOLD_DEFAULT 0xC0000000U
|
||||||
@@ -97,7 +99,7 @@
|
|||||||
/** @} */
|
/** @} */
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @addtogroup Generic table CONFIG register helpers macros
|
* @addtogroup MACSEC-Generic table CONFIG register helpers macros
|
||||||
*
|
*
|
||||||
* @brief Helper macros for generic table CONFIG register programming
|
* @brief Helper macros for generic table CONFIG register programming
|
||||||
* @{
|
* @{
|
||||||
@@ -114,14 +116,13 @@
|
|||||||
#define OSI_SA_LUT_MAX_INDEX OSI_TABLE_INDEX_MAX
|
#define OSI_SA_LUT_MAX_INDEX OSI_TABLE_INDEX_MAX
|
||||||
/** @} */
|
/** @} */
|
||||||
|
|
||||||
|
#ifdef DEBUG_MACSEC
|
||||||
/**
|
/**
|
||||||
* @addtogroup Debug buffer table CONFIG register helpers macros
|
* @addtogroup Debug buffer table CONFIG register helpers macros
|
||||||
*
|
*
|
||||||
* @brief Helper macros for debug buffer table CONFIG register programming
|
* @brief Helper macros for debug buffer table CONFIG register programming
|
||||||
* @{
|
* @{
|
||||||
*/
|
*/
|
||||||
#define OSI_DBG_TBL_READ OSI_LUT_READ
|
|
||||||
#define OSI_DBG_TBL_WRITE OSI_LUT_WRITE
|
|
||||||
/* Num of Tx debug buffers */
|
/* Num of Tx debug buffers */
|
||||||
#define OSI_TX_DBG_BUF_IDX_MAX 12U
|
#define OSI_TX_DBG_BUF_IDX_MAX 12U
|
||||||
/* Num of Rx debug buffers */
|
/* Num of Rx debug buffers */
|
||||||
@@ -140,6 +141,7 @@
|
|||||||
#define OSI_RX_DBG_ICV_ERROR_EVT OSI_BIT(10)
|
#define OSI_RX_DBG_ICV_ERROR_EVT OSI_BIT(10)
|
||||||
#define OSI_RX_DBG_CAPTURE_EVT OSI_BIT(11)
|
#define OSI_RX_DBG_CAPTURE_EVT OSI_BIT(11)
|
||||||
/** @} */
|
/** @} */
|
||||||
|
#endif /* DEBUG_MACSEC*/
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @addtogroup AES ciphers
|
* @addtogroup AES ciphers
|
||||||
@@ -152,27 +154,22 @@
|
|||||||
/** @} */
|
/** @} */
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @addtogroup MACSEC Misc helper macro's
|
* @addtogroup MACSEC related helper MACROs
|
||||||
*
|
*
|
||||||
* @brief MACSEC Helper macro's
|
* @brief MACSEC generic helper MACROs
|
||||||
* @{
|
* @{
|
||||||
*/
|
*/
|
||||||
#define OSI_MACSEC_TX_EN OSI_BIT(0)
|
#define OSI_MACSEC_TX_EN OSI_BIT(0)
|
||||||
#define OSI_MACSEC_RX_EN OSI_BIT(1)
|
#define OSI_MACSEC_RX_EN OSI_BIT(1)
|
||||||
/* MACSEC SECTAG + ICV + 2B ethertype adds upto 34B */
|
|
||||||
#define MACSEC_TAG_ICV_LEN 34U
|
|
||||||
/* MACSEC TZ key config cmd */
|
|
||||||
#define OSI_MACSEC_CMD_TZ_CONFIG 0x1
|
|
||||||
/* MACSEC TZ key table entries reset cmd */
|
|
||||||
#define OSI_MACSEC_CMD_TZ_KT_RESET 0x2
|
|
||||||
/** @} */
|
/** @} */
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Indicates different operations on MACSEC SA
|
* @brief Indicates different operations on MACSEC SA
|
||||||
*/
|
*/
|
||||||
|
#ifdef MACSEC_KEY_PROGRAM
|
||||||
#define OSI_CREATE_SA 1U
|
#define OSI_CREATE_SA 1U
|
||||||
|
#endif /* MACSEC_KEY_PROGRAM */
|
||||||
#define OSI_ENABLE_SA 2U
|
#define OSI_ENABLE_SA 2U
|
||||||
#define OSI_DISABLE_SA 3U
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief MACSEC SA State LUT entry outputs structure
|
* @brief MACSEC SA State LUT entry outputs structure
|
||||||
@@ -238,6 +235,7 @@ struct osi_macsec_table_config {
|
|||||||
nveu16_t index;
|
nveu16_t index;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#if defined(MACSEC_KEY_PROGRAM) || defined(LINUX_OS)
|
||||||
/**
|
/**
|
||||||
* @brief MACSEC Key Table entry structure
|
* @brief MACSEC Key Table entry structure
|
||||||
*/
|
*/
|
||||||
@@ -247,6 +245,7 @@ struct osi_kt_entry {
|
|||||||
/** Indicates Hash-key */
|
/** Indicates Hash-key */
|
||||||
nveu8_t h[OSI_KEY_LEN_128];
|
nveu8_t h[OSI_KEY_LEN_128];
|
||||||
};
|
};
|
||||||
|
#endif /* MACSEC_KEY_PROGRAM */
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief MACSEC BYP/SCI LUT entry inputs structure
|
* @brief MACSEC BYP/SCI LUT entry inputs structure
|
||||||
@@ -296,6 +295,7 @@ struct osi_macsec_lut_config {
|
|||||||
struct osi_sa_state_outputs sa_state_out;
|
struct osi_sa_state_outputs sa_state_out;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#if defined(MACSEC_KEY_PROGRAM) || defined(LINUX_OS)
|
||||||
/**
|
/**
|
||||||
* @brief MACSEC Key Table config data structure
|
* @brief MACSEC Key Table config data structure
|
||||||
*/
|
*/
|
||||||
@@ -307,6 +307,7 @@ struct osi_macsec_kt_config {
|
|||||||
/** Indicates key table entry valid or not, bit 31 */
|
/** Indicates key table entry valid or not, bit 31 */
|
||||||
nveu32_t flags;
|
nveu32_t flags;
|
||||||
};
|
};
|
||||||
|
#endif /* MACSEC_KEY_PROGRAM */
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief MACSEC Debug buffer config data structure
|
* @brief MACSEC Debug buffer config data structure
|
||||||
@@ -333,10 +334,8 @@ struct osi_macsec_core_ops {
|
|||||||
nveu32_t mtu);
|
nveu32_t mtu);
|
||||||
/** macsec de-init */
|
/** macsec de-init */
|
||||||
nve32_t (*deinit)(struct osi_core_priv_data *const osi_core);
|
nve32_t (*deinit)(struct osi_core_priv_data *const osi_core);
|
||||||
/** Non Secure irq handler */
|
/** Macsec irq handler */
|
||||||
void (*handle_ns_irq)(struct osi_core_priv_data *const osi_core);
|
void (*handle_irq)(struct osi_core_priv_data *const osi_core);
|
||||||
/** Secure irq handler */
|
|
||||||
void (*handle_s_irq)(struct osi_core_priv_data *const osi_core);
|
|
||||||
/** macsec lut config */
|
/** macsec lut config */
|
||||||
nve32_t (*lut_config)(struct osi_core_priv_data *const osi_core,
|
nve32_t (*lut_config)(struct osi_core_priv_data *const osi_core,
|
||||||
struct osi_macsec_lut_config *const lut_config);
|
struct osi_macsec_lut_config *const lut_config);
|
||||||
@@ -348,9 +347,11 @@ struct osi_macsec_core_ops {
|
|||||||
/** macsec cipher config */
|
/** macsec cipher config */
|
||||||
nve32_t (*cipher_config)(struct osi_core_priv_data *const osi_core,
|
nve32_t (*cipher_config)(struct osi_core_priv_data *const osi_core,
|
||||||
nveu32_t cipher);
|
nveu32_t cipher);
|
||||||
|
#ifdef DEBUG_MACSEC
|
||||||
/** macsec loopback config */
|
/** macsec loopback config */
|
||||||
nve32_t (*loopback_config)(struct osi_core_priv_data *const osi_core,
|
nve32_t (*loopback_config)(struct osi_core_priv_data *const osi_core,
|
||||||
nveu32_t enable);
|
nveu32_t enable);
|
||||||
|
#endif /* DEBUG_MACSEC */
|
||||||
/** macsec enable */
|
/** macsec enable */
|
||||||
nve32_t (*macsec_en)(struct osi_core_priv_data *const osi_core,
|
nve32_t (*macsec_en)(struct osi_core_priv_data *const osi_core,
|
||||||
nveu32_t enable);
|
nveu32_t enable);
|
||||||
@@ -361,19 +362,24 @@ struct osi_macsec_core_ops {
|
|||||||
nveu16_t *kt_idx);
|
nveu16_t *kt_idx);
|
||||||
/** macsec read mmc counters */
|
/** macsec read mmc counters */
|
||||||
void (*read_mmc)(struct osi_core_priv_data *const osi_core);
|
void (*read_mmc)(struct osi_core_priv_data *const osi_core);
|
||||||
|
#ifdef DEBUG_MACSEC
|
||||||
/** macsec debug buffer config */
|
/** macsec debug buffer config */
|
||||||
nve32_t (*dbg_buf_config)(struct osi_core_priv_data *const osi_core,
|
nve32_t (*dbg_buf_config)(struct osi_core_priv_data *const osi_core,
|
||||||
struct osi_macsec_dbg_buf_config *const dbg_buf_config);
|
struct osi_macsec_dbg_buf_config *const dbg_buf_config);
|
||||||
/** macsec debug buffer config */
|
/** macsec debug buffer config */
|
||||||
nve32_t (*dbg_events_config)(struct osi_core_priv_data *const osi_core,
|
nve32_t (*dbg_events_config)(struct osi_core_priv_data *const osi_core,
|
||||||
struct osi_macsec_dbg_buf_config *const dbg_buf_config);
|
struct osi_macsec_dbg_buf_config *const dbg_buf_config);
|
||||||
|
#endif /* DEBUG_MACSEC */
|
||||||
/** macsec get Key Index start for a given SCI */
|
/** macsec get Key Index start for a given SCI */
|
||||||
nve32_t (*get_sc_lut_key_index)(struct osi_core_priv_data *const osi_core,
|
nve32_t (*get_sc_lut_key_index)(struct osi_core_priv_data *const osi_core,
|
||||||
nveu8_t *sci, nveu32_t *key_index, nveu16_t ctlr);
|
nveu8_t *sci, nveu32_t *key_index, nveu16_t ctlr);
|
||||||
/** macsec set MTU size */
|
/** macsec set MTU size */
|
||||||
nve32_t (*update_mtu)(struct osi_core_priv_data *const osi_core,
|
nve32_t (*update_mtu)(struct osi_core_priv_data *const osi_core,
|
||||||
nveu32_t mtu);
|
nveu32_t mtu);
|
||||||
|
#ifdef DEBUG_MACSEC
|
||||||
|
/** macsec interrupts configuration */
|
||||||
|
void (*intr_config)(struct osi_core_priv_data *const osi_core, nveu32_t enable);
|
||||||
|
#endif /* DEBUG_MACSEC */
|
||||||
};
|
};
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////
|
||||||
@@ -461,12 +467,12 @@ nve32_t osi_macsec_init(struct osi_core_priv_data *const osi_core,
|
|||||||
nve32_t osi_macsec_deinit(struct osi_core_priv_data *const osi_core);
|
nve32_t osi_macsec_deinit(struct osi_core_priv_data *const osi_core);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief osi_macsec_ns_isr - macsec non-secure irq handler
|
* @brief osi_macsec_isr - macsec irq handler
|
||||||
*
|
*
|
||||||
* @note
|
* @note
|
||||||
* Algorithm:
|
* Algorithm:
|
||||||
* - Return -1 if osi core or ops is null
|
* - Return -1 if osi core or ops is null
|
||||||
* - handles non-secure macsec interrupts
|
* - handles macsec interrupts
|
||||||
* - Refer to MACSEC column of <<******, (sequence diagram)>> for API details.
|
* - Refer to MACSEC column of <<******, (sequence diagram)>> for API details.
|
||||||
* - TraceID: ***********
|
* - TraceID: ***********
|
||||||
*
|
*
|
||||||
@@ -482,31 +488,7 @@ nve32_t osi_macsec_deinit(struct osi_core_priv_data *const osi_core);
|
|||||||
*
|
*
|
||||||
* @retval none
|
* @retval none
|
||||||
*/
|
*/
|
||||||
void osi_macsec_ns_isr(struct osi_core_priv_data *const osi_core);
|
void osi_macsec_isr(struct osi_core_priv_data *const osi_core);
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief osi_macsec_s_isr - macsec secure irq handler
|
|
||||||
*
|
|
||||||
* @note
|
|
||||||
* Algorithm:
|
|
||||||
* - Return -1 if osi core or ops is null
|
|
||||||
* - handles secure macsec interrupts
|
|
||||||
* - Refer to MACSEC column of <<******, (sequence diagram)>> for API details.
|
|
||||||
* - TraceID: ***********
|
|
||||||
*
|
|
||||||
* @param[in] osi_core: OSI core private data structure
|
|
||||||
*
|
|
||||||
* @pre MACSEC needs to be out of reset and proper clock configured.
|
|
||||||
*
|
|
||||||
* @note
|
|
||||||
* API Group:
|
|
||||||
* - Initialization: No
|
|
||||||
* - Run time: Yes
|
|
||||||
* - De-initialization: No
|
|
||||||
*
|
|
||||||
* @retval none
|
|
||||||
*/
|
|
||||||
void osi_macsec_s_isr(struct osi_core_priv_data *const osi_core);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief osi_macsec_config_lut - Read or write to macsec LUTs
|
* @brief osi_macsec_config_lut - Read or write to macsec LUTs
|
||||||
@@ -535,6 +517,7 @@ void osi_macsec_s_isr(struct osi_core_priv_data *const osi_core);
|
|||||||
nve32_t osi_macsec_config_lut(struct osi_core_priv_data *const osi_core,
|
nve32_t osi_macsec_config_lut(struct osi_core_priv_data *const osi_core,
|
||||||
struct osi_macsec_lut_config *const lut_config);
|
struct osi_macsec_lut_config *const lut_config);
|
||||||
|
|
||||||
|
#ifdef MACSEC_KEY_PROGRAM
|
||||||
/**
|
/**
|
||||||
* @brief osi_macsec_config_kt - API to read or update the keys
|
* @brief osi_macsec_config_kt - API to read or update the keys
|
||||||
*
|
*
|
||||||
@@ -561,6 +544,7 @@ nve32_t osi_macsec_config_lut(struct osi_core_priv_data *const osi_core,
|
|||||||
*/
|
*/
|
||||||
nve32_t osi_macsec_config_kt(struct osi_core_priv_data *const osi_core,
|
nve32_t osi_macsec_config_kt(struct osi_core_priv_data *const osi_core,
|
||||||
struct osi_macsec_kt_config *const kt_config);
|
struct osi_macsec_kt_config *const kt_config);
|
||||||
|
#endif /* MACSEC_KEY_PROGRAM */
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief osi_macsec_cipher_config - API to update the cipher
|
* @brief osi_macsec_cipher_config - API to update the cipher
|
||||||
@@ -589,6 +573,7 @@ nve32_t osi_macsec_config_kt(struct osi_core_priv_data *const osi_core,
|
|||||||
nve32_t osi_macsec_cipher_config(struct osi_core_priv_data *const osi_core,
|
nve32_t osi_macsec_cipher_config(struct osi_core_priv_data *const osi_core,
|
||||||
nveu32_t cipher);
|
nveu32_t cipher);
|
||||||
|
|
||||||
|
#ifdef DEBUG_MACSEC
|
||||||
/**
|
/**
|
||||||
* @brief osi_macsec_loopback - API to enable/disable macsec loopback
|
* @brief osi_macsec_loopback - API to enable/disable macsec loopback
|
||||||
*
|
*
|
||||||
@@ -613,8 +598,10 @@ nve32_t osi_macsec_cipher_config(struct osi_core_priv_data *const osi_core,
|
|||||||
* @retval 0 on success
|
* @retval 0 on success
|
||||||
* @retval -1 on failure
|
* @retval -1 on failure
|
||||||
*/
|
*/
|
||||||
|
|
||||||
nve32_t osi_macsec_loopback(struct osi_core_priv_data *const osi_core,
|
nve32_t osi_macsec_loopback(struct osi_core_priv_data *const osi_core,
|
||||||
nveu32_t enable);
|
nveu32_t enable);
|
||||||
|
#endif /* DEBUG_MACSEC */
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief osi_macsec_en - API to enable/disable macsec
|
* @brief osi_macsec_en - API to enable/disable macsec
|
||||||
@@ -657,6 +644,7 @@ nve32_t osi_macsec_en(struct osi_core_priv_data *const osi_core,
|
|||||||
*
|
*
|
||||||
* @param[in] osi_core: OSI core private data structure
|
* @param[in] osi_core: OSI core private data structure
|
||||||
* @param[in] sc: Pointer to the sc that needs to be added/deleted/updated
|
* @param[in] sc: Pointer to the sc that needs to be added/deleted/updated
|
||||||
|
* @param[in] enable: macsec enable/disable selection
|
||||||
* @param[in] ctlr: Controller selected
|
* @param[in] ctlr: Controller selected
|
||||||
* @param[out] kt_idx: Pointer to the kt_index passed to OSD
|
* @param[out] kt_idx: Pointer to the kt_index passed to OSD
|
||||||
*
|
*
|
||||||
@@ -701,6 +689,7 @@ nve32_t osi_macsec_config(struct osi_core_priv_data *const osi_core,
|
|||||||
*/
|
*/
|
||||||
nve32_t osi_macsec_read_mmc(struct osi_core_priv_data *const osi_core);
|
nve32_t osi_macsec_read_mmc(struct osi_core_priv_data *const osi_core);
|
||||||
|
|
||||||
|
#ifdef DEBUG_MACSEC
|
||||||
/**
|
/**
|
||||||
* @brief osi_macsec_config_dbg_buf - Reads the debug buffer captured
|
* @brief osi_macsec_config_dbg_buf - Reads the debug buffer captured
|
||||||
*
|
*
|
||||||
@@ -756,7 +745,7 @@ nve32_t osi_macsec_config_dbg_buf(
|
|||||||
nve32_t osi_macsec_dbg_events_config(
|
nve32_t osi_macsec_dbg_events_config(
|
||||||
struct osi_core_priv_data *const osi_core,
|
struct osi_core_priv_data *const osi_core,
|
||||||
struct osi_macsec_dbg_buf_config *const dbg_buf_config);
|
struct osi_macsec_dbg_buf_config *const dbg_buf_config);
|
||||||
|
#endif /* DEBUG_MACSEC */
|
||||||
/**
|
/**
|
||||||
* @brief osi_macsec_get_sc_lut_key_index - API to get key index for a given SCI
|
* @brief osi_macsec_get_sc_lut_key_index - API to get key index for a given SCI
|
||||||
*
|
*
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -22,11 +22,11 @@
|
|||||||
#ifndef INCLUDED_COMMON_H
|
#ifndef INCLUDED_COMMON_H
|
||||||
#define INCLUDED_COMMON_H
|
#define INCLUDED_COMMON_H
|
||||||
|
|
||||||
#include "../osi/common/type.h"
|
#include <nvethernet_type.h>
|
||||||
#include <osi_common.h>
|
#include <osi_common.h>
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @addtogroup Generic helper macros
|
* @addtogroup Generic helper MACROS
|
||||||
*
|
*
|
||||||
* @brief These are Generic helper macros used at various places.
|
* @brief These are Generic helper macros used at various places.
|
||||||
* @{
|
* @{
|
||||||
@@ -37,6 +37,12 @@
|
|||||||
#define RETRY_DELAY 1U
|
#define RETRY_DELAY 1U
|
||||||
/** @} */
|
/** @} */
|
||||||
|
|
||||||
|
/** MAC version type for EQOS version previous to 5.30 */
|
||||||
|
#define MAC_CORE_VER_TYPE_EQOS 0U
|
||||||
|
/** MAC version type for EQOS version 5.30 */
|
||||||
|
#define MAC_CORE_VER_TYPE_EQOS_5_30 1U
|
||||||
|
/** MAC version type for MGBE IP */
|
||||||
|
#define MAC_CORE_VER_TYPE_MGBE 2U
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Maximum number of supported MAC IP types (EQOS and MGBE)
|
* @brief Maximum number of supported MAC IP types (EQOS and MGBE)
|
||||||
@@ -48,8 +54,9 @@
|
|||||||
* a condition is met or a timeout occurs
|
* a condition is met or a timeout occurs
|
||||||
*
|
*
|
||||||
* @param[in] addr: Memory mapped address.
|
* @param[in] addr: Memory mapped address.
|
||||||
|
* @param[in] fn: function to be used.
|
||||||
* @param[in] val: Variable to read the value.
|
* @param[in] val: Variable to read the value.
|
||||||
* @param[in] cond: Break condition (usually involving @val).
|
* @param[in] cond: Break condition.
|
||||||
* @param[in] delay_us: Maximum time to sleep between reads in us.
|
* @param[in] delay_us: Maximum time to sleep between reads in us.
|
||||||
* @param[in] retry: Retry count.
|
* @param[in] retry: Retry count.
|
||||||
|
|
||||||
@@ -60,9 +67,9 @@
|
|||||||
*/
|
*/
|
||||||
#define osi_readl_poll_timeout(addr, fn, val, cond, delay_us, retry) \
|
#define osi_readl_poll_timeout(addr, fn, val, cond, delay_us, retry) \
|
||||||
({ \
|
({ \
|
||||||
unsigned int count = 0; \
|
nveu32_t count = 0; \
|
||||||
while (count++ < retry) { \
|
while (count++ < retry) { \
|
||||||
val = osi_readl((unsigned char *)addr); \
|
val = osi_readl((nveu8_t *)addr); \
|
||||||
if ((cond)) { \
|
if ((cond)) { \
|
||||||
break; \
|
break; \
|
||||||
} \
|
} \
|
||||||
@@ -234,7 +241,8 @@ static inline void osi_writela(OSI_UNUSED void *priv, nveu32_t val, void *addr)
|
|||||||
* @brief validate_mac_ver_update_chans - Validates mac version and update chan
|
* @brief validate_mac_ver_update_chans - Validates mac version and update chan
|
||||||
*
|
*
|
||||||
* @param[in] mac_ver: MAC version read.
|
* @param[in] mac_ver: MAC version read.
|
||||||
* @param[out] max_chans: Maximum channel number.
|
* @param[out] num_max_chans: Maximum channel number.
|
||||||
|
* @param[out] l_mac_ver: local mac version.
|
||||||
*
|
*
|
||||||
* @note MAC has to be out of reset.
|
* @note MAC has to be out of reset.
|
||||||
*
|
*
|
||||||
@@ -248,26 +256,36 @@ static inline void osi_writela(OSI_UNUSED void *priv, nveu32_t val, void *addr)
|
|||||||
* @retval 1 - for Valid MAC
|
* @retval 1 - for Valid MAC
|
||||||
*/
|
*/
|
||||||
static inline nve32_t validate_mac_ver_update_chans(nveu32_t mac_ver,
|
static inline nve32_t validate_mac_ver_update_chans(nveu32_t mac_ver,
|
||||||
nveu32_t *max_chans)
|
nveu32_t *num_max_chans,
|
||||||
|
nveu32_t *l_mac_ver)
|
||||||
{
|
{
|
||||||
|
nve32_t ret;
|
||||||
|
|
||||||
switch (mac_ver) {
|
switch (mac_ver) {
|
||||||
case OSI_EQOS_MAC_4_10:
|
|
||||||
case OSI_EQOS_MAC_5_00:
|
case OSI_EQOS_MAC_5_00:
|
||||||
*max_chans = OSI_EQOS_XP_MAX_CHANS;
|
*num_max_chans = OSI_EQOS_XP_MAX_CHANS;
|
||||||
|
*l_mac_ver = MAC_CORE_VER_TYPE_EQOS;
|
||||||
|
ret = 1;
|
||||||
break;
|
break;
|
||||||
case OSI_EQOS_MAC_5_30:
|
case OSI_EQOS_MAC_5_30:
|
||||||
*max_chans = OSI_EQOS_MAX_NUM_CHANS;
|
*num_max_chans = OSI_EQOS_MAX_NUM_CHANS;
|
||||||
|
*l_mac_ver = MAC_CORE_VER_TYPE_EQOS_5_30;
|
||||||
|
ret = 1;
|
||||||
break;
|
break;
|
||||||
case OSI_MGBE_MAC_3_00:
|
|
||||||
case OSI_MGBE_MAC_3_10:
|
case OSI_MGBE_MAC_3_10:
|
||||||
|
#ifndef OSI_STRIPPED_LIB
|
||||||
case OSI_MGBE_MAC_4_00:
|
case OSI_MGBE_MAC_4_00:
|
||||||
*max_chans = OSI_MGBE_MAX_NUM_CHANS;
|
#endif /* !OSI_STRIPPED_LIB */
|
||||||
|
*num_max_chans = OSI_MGBE_MAX_NUM_CHANS;
|
||||||
|
*l_mac_ver = MAC_CORE_VER_TYPE_MGBE;
|
||||||
|
ret = 1;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
return 0;
|
ret = 0;
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 1;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -289,7 +307,7 @@ static inline void osi_memset(void *s, nveu32_t c, nveu64_t count)
|
|||||||
nveu64_t temp = count;
|
nveu64_t temp = count;
|
||||||
|
|
||||||
if (s == OSI_NULL) {
|
if (s == OSI_NULL) {
|
||||||
return;
|
goto done;
|
||||||
}
|
}
|
||||||
xs = (nveu8_t *)s;
|
xs = (nveu8_t *)s;
|
||||||
while (temp != 0UL) {
|
while (temp != 0UL) {
|
||||||
@@ -299,6 +317,8 @@ static inline void osi_memset(void *s, nveu32_t c, nveu64_t count)
|
|||||||
}
|
}
|
||||||
temp--;
|
temp--;
|
||||||
}
|
}
|
||||||
|
done:
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -314,38 +334,49 @@ static inline void osi_memset(void *s, nveu32_t c, nveu64_t count)
|
|||||||
* - Run time: Yes
|
* - Run time: Yes
|
||||||
* - De-initialization: No
|
* - De-initialization: No
|
||||||
*/
|
*/
|
||||||
static inline nve32_t osi_memcpy(void *dest, void *src, nveu64_t n)
|
static inline nve32_t osi_memcpy(void *dest, const void *src, nveu64_t n)
|
||||||
{
|
{
|
||||||
nve8_t *csrc = (nve8_t *)src;
|
nve8_t *cdest = dest;
|
||||||
nve8_t *cdest = (nve8_t *)dest;
|
const nve8_t *csrc = src;
|
||||||
|
nve32_t ret = 0;
|
||||||
nveu64_t i = 0;
|
nveu64_t i = 0;
|
||||||
|
|
||||||
if (src == OSI_NULL || dest == OSI_NULL) {
|
if ((src == OSI_NULL) || (dest == OSI_NULL)) {
|
||||||
return -1;
|
ret = -1;
|
||||||
|
goto fail;
|
||||||
}
|
}
|
||||||
for (i = 0; i < n; i++) {
|
for (i = 0; i < n; i++) {
|
||||||
cdest[i] = csrc[i];
|
cdest[i] = csrc[i];
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
fail:
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline nve32_t osi_memcmp(void *dest, void *src, nve32_t n)
|
static inline nve32_t osi_memcmp(const void *dest, const void *src, nve32_t n)
|
||||||
{
|
{
|
||||||
|
const nve8_t *const cdest = dest;
|
||||||
|
const nve8_t *const csrc = src;
|
||||||
|
nve32_t ret = 0;
|
||||||
nve32_t i;
|
nve32_t i;
|
||||||
nve8_t *csrc = (nve8_t *)src;
|
|
||||||
nve8_t *cdest = (nve8_t *)dest;
|
|
||||||
|
|
||||||
if (src == OSI_NULL || dest == OSI_NULL)
|
if ((src == OSI_NULL) || (dest == OSI_NULL)) {
|
||||||
return -1;
|
ret = -1;
|
||||||
|
goto fail;
|
||||||
|
}
|
||||||
|
|
||||||
for (i = 0; i < n; i++) {
|
for (i = 0; i < n; i++) {
|
||||||
if (csrc[i] < cdest[i]) {
|
if (csrc[i] < cdest[i]) {
|
||||||
return -1;
|
ret = -1;
|
||||||
|
goto fail;
|
||||||
} else if (csrc[i] > cdest[i]) {
|
} else if (csrc[i] > cdest[i]) {
|
||||||
return 1;
|
ret = 1;
|
||||||
|
goto fail;
|
||||||
|
} else {
|
||||||
|
/* Do Nothing */
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return 0;
|
fail:
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -24,7 +24,7 @@
|
|||||||
#define INCLUDED_MGBE_COMMON_H
|
#define INCLUDED_MGBE_COMMON_H
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @addtogroup MGBE-MAC MGBE MAC common HW feature registers
|
* @addtogroup MGBE-MAC MAC register offsets
|
||||||
*
|
*
|
||||||
* @{
|
* @{
|
||||||
*/
|
*/
|
||||||
|
|||||||
@@ -31,7 +31,7 @@ void common_get_systime_from_mac(void *addr, nveu32_t mac, nveu32_t *sec,
|
|||||||
nveu64_t remain;
|
nveu64_t remain;
|
||||||
nveul64_t ns;
|
nveul64_t ns;
|
||||||
typedef nveul64_t (*get_time)(void *addr);
|
typedef nveul64_t (*get_time)(void *addr);
|
||||||
get_time i_ops[MAX_MAC_IP_TYPES] = {
|
const get_time i_ops[MAX_MAC_IP_TYPES] = {
|
||||||
eqos_get_systime_from_mac, mgbe_get_systime_from_mac
|
eqos_get_systime_from_mac, mgbe_get_systime_from_mac
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -53,7 +53,7 @@ void common_get_systime_from_mac(void *addr, nveu32_t mac, nveu32_t *sec,
|
|||||||
nveu32_t common_is_mac_enabled(void *addr, nveu32_t mac)
|
nveu32_t common_is_mac_enabled(void *addr, nveu32_t mac)
|
||||||
{
|
{
|
||||||
typedef nveu32_t (*mac_enable_arr)(void *addr);
|
typedef nveu32_t (*mac_enable_arr)(void *addr);
|
||||||
mac_enable_arr i_ops[MAX_MAC_IP_TYPES] = {
|
const mac_enable_arr i_ops[MAX_MAC_IP_TYPES] = {
|
||||||
eqos_is_mac_enabled, mgbe_is_mac_enabled
|
eqos_is_mac_enabled, mgbe_is_mac_enabled
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@@ -24,13 +24,12 @@
|
|||||||
#
|
#
|
||||||
###############################################################################
|
###############################################################################
|
||||||
|
|
||||||
ifdef NV_INTERFACE_FLAG_SHARED_LIBRARY_SECTION
|
ifdef NV_INTERFACE_FLAG_STATIC_LIBRARY_SECTION
|
||||||
NV_INTERFACE_NAME := nvethernetrm
|
NV_COMPONENT_NAME := nvethernetrm
|
||||||
NV_INTERFACE_EXPORTS := lib$(NV_INTERFACE_NAME)
|
NV_INTERFACE_COMPONENT_DIR := .
|
||||||
NV_INTERFACE_PUBLIC_INCLUDES := \
|
NV_INTERFACE_PUBLIC_INCLUDES := \
|
||||||
./include
|
./include
|
||||||
endif
|
endif
|
||||||
|
|
||||||
# Local Variables:
|
# Local Variables:
|
||||||
# indent-tabs-mode: t
|
# indent-tabs-mode: t
|
||||||
# tab-width: 8
|
# tab-width: 8
|
||||||
|
|||||||
@@ -22,7 +22,7 @@
|
|||||||
#
|
#
|
||||||
###############################################################################
|
###############################################################################
|
||||||
|
|
||||||
ifdef NV_COMPONENT_FLAG_SHARED_LIBRARY_SECTION
|
ifdef NV_COMPONENT_FLAG_STATIC_LIBRARY_SECTION
|
||||||
include $(NV_BUILD_START_COMPONENT)
|
include $(NV_BUILD_START_COMPONENT)
|
||||||
|
|
||||||
NV_COMPONENT_STRICT_WARNINGS_qnx_64 := 1
|
NV_COMPONENT_STRICT_WARNINGS_qnx_64 := 1
|
||||||
@@ -30,42 +30,37 @@ NV_COMPONENT_STRICT_WARNINGS_qnx_64 := 1
|
|||||||
NV_COMPONENT_NAME := nvethernetrm
|
NV_COMPONENT_NAME := nvethernetrm
|
||||||
NV_COMPONENT_OWN_INTERFACE_DIR := .
|
NV_COMPONENT_OWN_INTERFACE_DIR := .
|
||||||
NV_COMPONENT_SOURCES := \
|
NV_COMPONENT_SOURCES := \
|
||||||
eqos_core.c \
|
$(NV_SOURCE)/nvethernetrm/osi/core/eqos_core.c \
|
||||||
eqos_mmc.c \
|
$(NV_SOURCE)/nvethernetrm/osi/core/eqos_mmc.c \
|
||||||
osi_core.c \
|
$(NV_SOURCE)/nvethernetrm/osi/core/osi_core.c \
|
||||||
vlan_filter.c \
|
$(NV_SOURCE)/nvethernetrm/osi/core/osi_hal.c \
|
||||||
osi_hal.c \
|
$(NV_SOURCE)/nvethernetrm/osi/core/ivc_core.c \
|
||||||
ivc_core.c \
|
$(NV_SOURCE)/nvethernetrm/osi/core/frp.c \
|
||||||
frp.c \
|
$(NV_SOURCE)/nvethernetrm/osi/core/mgbe_core.c \
|
||||||
mgbe_core.c \
|
$(NV_SOURCE)/nvethernetrm/osi/core/xpcs.c \
|
||||||
xpcs.c \
|
$(NV_SOURCE)/nvethernetrm/osi/core/mgbe_mmc.c \
|
||||||
mgbe_mmc.c \
|
$(NV_SOURCE)/nvethernetrm/osi/core/core_common.c \
|
||||||
debug.c \
|
|
||||||
core_common.c \
|
|
||||||
$(NV_SOURCE)/nvethernetrm/osi/common/osi_common.c \
|
$(NV_SOURCE)/nvethernetrm/osi/common/osi_common.c \
|
||||||
$(NV_SOURCE)/nvethernetrm/osi/common/eqos_common.c \
|
$(NV_SOURCE)/nvethernetrm/osi/common/eqos_common.c \
|
||||||
$(NV_SOURCE)/nvethernetrm/osi/common/mgbe_common.c \
|
$(NV_SOURCE)/nvethernetrm/osi/common/mgbe_common.c \
|
||||||
$(NV_SOURCE)/nvethernetrm/osi/core/macsec.c
|
$(NV_SOURCE)/nvethernetrm/osi/core/macsec.c
|
||||||
|
|
||||||
#NV_COMPONENT_CFLAGS += -DMACSEC_SUPPORT
|
|
||||||
#NV_COMPONENT_CFLAGS += -DMACSEC_KEY_PROGRAM
|
|
||||||
#NV_COMPONENT_CFLAGS += -DDEBUG_MACSEC
|
|
||||||
|
|
||||||
ifeq ($(NV_BUILD_CONFIGURATION_OS_IS_LINUX),1)
|
|
||||||
NV_COMPONENT_CFLAGS += -DLINUX_OS
|
|
||||||
else ifeq ($(NV_BUILD_CONFIGURATION_OS_IS_QNX),1)
|
|
||||||
NV_COMPONENT_CFLAGS += -DQNX_OS
|
|
||||||
endif
|
|
||||||
|
|
||||||
ifeq ($(NV_BUILD_CONFIGURATION_IS_SAFETY),0)
|
|
||||||
NV_COMPONENT_CFLAGS += -DOSI_DEBUG
|
|
||||||
endif
|
|
||||||
|
|
||||||
NV_COMPONENT_INCLUDES := \
|
NV_COMPONENT_INCLUDES := \
|
||||||
$(NV_SOURCE)/nvethernetrm/include \
|
$(NV_SOURCE)/nvethernetrm/include \
|
||||||
$(NV_SOURCE)/nvethernetrm/osi/common/include
|
$(NV_SOURCE)/nvethernetrm/osi/common/include
|
||||||
|
|
||||||
include $(NV_BUILD_SHARED_LIBRARY)
|
include $(NV_SOURCE)/nvethernetrm/include/config.tmk
|
||||||
|
|
||||||
|
ifeq ($(OSI_DEBUG),1)
|
||||||
|
NV_COMPONENT_SOURCES += $(NV_SOURCE)/nvethernetrm/osi/core/debug.c
|
||||||
|
endif
|
||||||
|
|
||||||
|
ifeq ($(OSI_STRIPPED_LIB),0)
|
||||||
|
NV_COMPONENT_SOURCES += \
|
||||||
|
$(NV_SOURCE)/nvethernetrm/osi/core/vlan_filter.c
|
||||||
|
endif
|
||||||
|
|
||||||
|
include $(NV_BUILD_STATIC_LIBRARY)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
# Local Variables:
|
# Local Variables:
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -24,11 +24,20 @@
|
|||||||
#define INCLUDED_CORE_COMMON_H
|
#define INCLUDED_CORE_COMMON_H
|
||||||
|
|
||||||
#include "core_local.h"
|
#include "core_local.h"
|
||||||
|
|
||||||
|
#ifndef OSI_STRIPPED_LIB
|
||||||
|
#define MAC_PFR_PR OSI_BIT(0)
|
||||||
|
#define MAC_TCR_TSCFUPDT OSI_BIT(1)
|
||||||
|
#define MAC_TCR_TSCTRLSSR OSI_BIT(9)
|
||||||
|
#define MAC_PFR_PM OSI_BIT(4)
|
||||||
|
#endif /* !OSI_STRIPPED_LIB */
|
||||||
|
|
||||||
|
#define MTL_EST_ADDR_SHIFT 8
|
||||||
#define MTL_EST_ADDR_MASK (OSI_BIT(8) | OSI_BIT(9) | \
|
#define MTL_EST_ADDR_MASK (OSI_BIT(8) | OSI_BIT(9) | \
|
||||||
OSI_BIT(10) | OSI_BIT(11) | \
|
OSI_BIT(10) | OSI_BIT(11) | \
|
||||||
OSI_BIT(12) | OSI_BIT(13) | \
|
OSI_BIT(12) | OSI_BIT(13) | \
|
||||||
OSI_BIT(14) | OSI_BIT(15) | \
|
OSI_BIT(14) | OSI_BIT(15) | \
|
||||||
OSI_BIT(16) | (17) | \
|
OSI_BIT(16) | (17U) | \
|
||||||
OSI_BIT(18) | OSI_BIT(19))
|
OSI_BIT(18) | OSI_BIT(19))
|
||||||
#define MTL_EST_SRWO OSI_BIT(0)
|
#define MTL_EST_SRWO OSI_BIT(0)
|
||||||
#define MTL_EST_R1W0 OSI_BIT(1)
|
#define MTL_EST_R1W0 OSI_BIT(1)
|
||||||
@@ -38,6 +47,75 @@
|
|||||||
#define MTL_EST_ERR0 OSI_BIT(20)
|
#define MTL_EST_ERR0 OSI_BIT(20)
|
||||||
#define MTL_EST_CONTROL_EEST OSI_BIT(0)
|
#define MTL_EST_CONTROL_EEST OSI_BIT(0)
|
||||||
#define MTL_EST_STATUS_SWOL OSI_BIT(7)
|
#define MTL_EST_STATUS_SWOL OSI_BIT(7)
|
||||||
|
/* EST control OSI_BIT map */
|
||||||
|
#define MTL_EST_EEST OSI_BIT(0)
|
||||||
|
#define MTL_EST_SSWL OSI_BIT(1)
|
||||||
|
#define MTL_EST_QHLBF OSI_BIT(3)
|
||||||
|
#define MTL_EST_CTR_HIGH_MAX 0xFFU
|
||||||
|
#define MTL_EST_ITRE_CGCE OSI_BIT(4)
|
||||||
|
#define MTL_EST_ITRE_IEHS OSI_BIT(3)
|
||||||
|
#define MTL_EST_ITRE_IEHF OSI_BIT(2)
|
||||||
|
#define MTL_EST_ITRE_IEBE OSI_BIT(1)
|
||||||
|
#define MTL_EST_ITRE_IECC OSI_BIT(0)
|
||||||
|
/* MTL_FPE_CTRL_STS */
|
||||||
|
#define MTL_FPE_CTS_PEC (OSI_BIT(8) | OSI_BIT(9) | \
|
||||||
|
OSI_BIT(10) | OSI_BIT(11) | \
|
||||||
|
OSI_BIT(12) | OSI_BIT(13) | \
|
||||||
|
OSI_BIT(14) | OSI_BIT(15))
|
||||||
|
#define MTL_FPE_CTS_PEC_SHIFT 8U
|
||||||
|
#define MTL_FPE_CTS_PEC_MAX_SHIFT 16U
|
||||||
|
#define MAC_FPE_CTS_EFPE OSI_BIT(0)
|
||||||
|
#define MAC_FPE_CTS_SVER OSI_BIT(1)
|
||||||
|
/* MTL FPE adv registers */
|
||||||
|
#define MTL_FPE_ADV_HADV_MASK (0xFFFFU)
|
||||||
|
#define MTL_FPE_ADV_HADV_VAL 100U
|
||||||
|
#define DMA_MODE_SWR OSI_BIT(0)
|
||||||
|
#define MTL_QTOMR_FTQ OSI_BIT(0)
|
||||||
|
#define MTL_RXQ_OP_MODE_FEP OSI_BIT(4)
|
||||||
|
#define MAC_TCR_TSINIT OSI_BIT(2)
|
||||||
|
#define MAC_TCR_TSADDREG OSI_BIT(5)
|
||||||
|
#define MAC_PPS_CTL_PPSCTRL0 (OSI_BIT(3) | OSI_BIT(2) |\
|
||||||
|
OSI_BIT(1) | OSI_BIT(0))
|
||||||
|
#define MAC_SSIR_SSINC_SHIFT 16U
|
||||||
|
#define MAC_PFR_DAIF OSI_BIT(3)
|
||||||
|
#define MAC_PFR_DBF OSI_BIT(5)
|
||||||
|
#define MAC_PFR_PCF (OSI_BIT(6) | OSI_BIT(7))
|
||||||
|
#define MAC_PFR_SAIF OSI_BIT(8)
|
||||||
|
#define MAC_PFR_SAF OSI_BIT(9)
|
||||||
|
#define MAC_PFR_HPF OSI_BIT(10)
|
||||||
|
#define MAC_PFR_VTFE OSI_BIT(16)
|
||||||
|
#define MAC_PFR_IPFE OSI_BIT(20)
|
||||||
|
#define MAC_PFR_IPFE_SHIFT 20U
|
||||||
|
#define MAC_PFR_DNTU OSI_BIT(21)
|
||||||
|
#define MAC_PFR_RA OSI_BIT(31)
|
||||||
|
|
||||||
|
#define WRAP_SYNC_TSC_PTP_CAPTURE 0x800CU
|
||||||
|
#define WRAP_TSC_CAPTURE_LOW 0x8010U
|
||||||
|
#define WRAP_TSC_CAPTURE_HIGH 0x8014U
|
||||||
|
#define WRAP_PTP_CAPTURE_LOW 0x8018U
|
||||||
|
#define WRAP_PTP_CAPTURE_HIGH 0x801CU
|
||||||
|
#define MAC_PKT_FILTER_REG 0x0008
|
||||||
|
#define HW_MAC_IER 0x00B4U
|
||||||
|
#define WRAP_COMMON_INTR_ENABLE 0x8704U
|
||||||
|
|
||||||
|
/* common l3 l4 register bit fields for eqos and mgbe */
|
||||||
|
#ifndef OSI_STRIPPED_LIB
|
||||||
|
#define MAC_L3L4_CTR_L3PEN_SHIFT 0
|
||||||
|
#define MAC_L3L4_CTR_L3SAM_SHIFT 2
|
||||||
|
#define MAC_L3L4_CTR_L3SAIM_SHIFT 3
|
||||||
|
#endif /* !OSI_STRIPPED_LIB */
|
||||||
|
#define MAC_L3L4_CTR_L3DAM_SHIFT 4
|
||||||
|
#ifndef OSI_STRIPPED_LIB
|
||||||
|
#define MAC_L3L4_CTR_L3DAIM_SHIFT 5
|
||||||
|
#define MAC_L3L4_CTR_L4PEN_SHIFT 16
|
||||||
|
#define MAC_L3L4_CTR_L4SPM_SHIFT 18
|
||||||
|
#define MAC_L3L4_CTR_L4SPIM_SHIFT 19
|
||||||
|
#define MAC_L3L4_CTR_L4DPM_SHIFT 20
|
||||||
|
#define MAC_L3L4_CTR_L4DPIM_SHIFT 21
|
||||||
|
#endif /* !OSI_STRIPPED_LIB */
|
||||||
|
#define MAC_L3L4_CTR_DMCHN_SHIFT 24
|
||||||
|
#define EQOS_MAC_L3L4_CTR_DMCHEN_SHIFT 28
|
||||||
|
#define MGBE_MAC_L3L4_CTR_DMCHEN_SHIFT 31
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @addtogroup typedef related info
|
* @addtogroup typedef related info
|
||||||
@@ -47,15 +125,57 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
struct est_read {
|
struct est_read {
|
||||||
/* variable pointer */
|
/** variable pointer */
|
||||||
nveu32_t *var;
|
nveu32_t *var;
|
||||||
/* memory register/address offset */
|
/** memory register/address offset */
|
||||||
nveu32_t addr;
|
nveu32_t addr;
|
||||||
};
|
};
|
||||||
|
|
||||||
/** @} */
|
/** @} */
|
||||||
|
|
||||||
nve32_t gcl_validate(struct osi_core_priv_data *const osi_core,
|
nve32_t hw_poll_for_swr(struct osi_core_priv_data *const osi_core);
|
||||||
struct osi_est_config *const est,
|
void hw_start_mac(struct osi_core_priv_data *const osi_core);
|
||||||
const nveu32_t *btr, nveu32_t mac);
|
void hw_stop_mac(struct osi_core_priv_data *const osi_core);
|
||||||
|
nve32_t hw_set_mode(struct osi_core_priv_data *const osi_core, const nve32_t mode);
|
||||||
|
nve32_t hw_set_speed(struct osi_core_priv_data *const osi_core, const nve32_t speed);
|
||||||
|
nve32_t hw_flush_mtl_tx_queue(struct osi_core_priv_data *const osi_core,
|
||||||
|
const nveu32_t q_inx);
|
||||||
|
nve32_t hw_config_fw_err_pkts(struct osi_core_priv_data *osi_core,
|
||||||
|
const nveu32_t q_inx, const nveu32_t enable_fw_err_pkts);
|
||||||
|
nve32_t hw_config_rxcsum_offload(struct osi_core_priv_data *const osi_core,
|
||||||
|
nveu32_t enabled);
|
||||||
|
nve32_t hw_set_systime_to_mac(struct osi_core_priv_data *const osi_core,
|
||||||
|
const nveu32_t sec, const nveu32_t nsec);
|
||||||
|
nve32_t hw_config_addend(struct osi_core_priv_data *const osi_core,
|
||||||
|
const nveu32_t addend);
|
||||||
|
void hw_config_tscr(struct osi_core_priv_data *const osi_core, const nveu32_t ptp_filter);
|
||||||
|
void hw_config_ssir(struct osi_core_priv_data *const osi_core);
|
||||||
|
nve32_t hw_ptp_tsc_capture(struct osi_core_priv_data *const osi_core,
|
||||||
|
struct osi_core_ptp_tsc_data *data);
|
||||||
|
nve32_t hw_config_mac_pkt_filter_reg(struct osi_core_priv_data *const osi_core,
|
||||||
|
const struct osi_filter *filter);
|
||||||
|
nve32_t hw_config_l3_l4_filter_enable(struct osi_core_priv_data *const osi_core,
|
||||||
|
const nveu32_t filter_enb_dis);
|
||||||
|
nve32_t hw_config_est(struct osi_core_priv_data *const osi_core,
|
||||||
|
struct osi_est_config *const est);
|
||||||
|
nve32_t hw_config_fpe(struct osi_core_priv_data *const osi_core,
|
||||||
|
struct osi_fpe_config *const fpe);
|
||||||
|
void hw_tsn_init(struct osi_core_priv_data *osi_core,
|
||||||
|
nveu32_t est_sel, nveu32_t fpe_sel);
|
||||||
|
void prepare_l3l4_registers(const struct osi_core_priv_data *const osi_core,
|
||||||
|
const struct osi_l3_l4_filter *const l3_l4,
|
||||||
|
#ifndef OSI_STRIPPED_LIB
|
||||||
|
nveu32_t *l3_addr0_reg,
|
||||||
|
nveu32_t *l3_addr2_reg,
|
||||||
|
nveu32_t *l3_addr3_reg,
|
||||||
|
nveu32_t *l4_addr_reg,
|
||||||
|
#endif /* !OSI_STRIPPED_LIB */
|
||||||
|
nveu32_t *l3_addr1_reg,
|
||||||
|
nveu32_t *ctr_reg);
|
||||||
|
#ifdef HSI_SUPPORT
|
||||||
|
nve32_t hsi_common_error_inject(struct osi_core_priv_data *osi_core,
|
||||||
|
nveu32_t error_code);
|
||||||
|
#endif
|
||||||
|
nve32_t hw_validate_avb_input(struct osi_core_priv_data *const osi_core,
|
||||||
|
const struct osi_core_avb_algorithm *const avb);
|
||||||
#endif /* INCLUDED_CORE_COMMON_H */
|
#endif /* INCLUDED_CORE_COMMON_H */
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -43,13 +43,51 @@
|
|||||||
*/
|
*/
|
||||||
#define MAX_TX_TS_CNT (PKT_ID_CNT * OSI_MGBE_MAX_NUM_CHANS)
|
#define MAX_TX_TS_CNT (PKT_ID_CNT * OSI_MGBE_MAX_NUM_CHANS)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief FIFO size helper macro
|
||||||
|
*/
|
||||||
|
#define FIFO_SZ(x) ((((x) * 1024U) / 256U) - 1U)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Dynamic configuration helper macros.
|
||||||
|
*/
|
||||||
|
#define DYNAMIC_CFG_L3_L4 OSI_BIT(0)
|
||||||
|
#define DYNAMIC_CFG_AVB OSI_BIT(2)
|
||||||
|
#define DYNAMIC_CFG_L2 OSI_BIT(3)
|
||||||
|
#define DYNAMIC_CFG_L2_IDX 3U
|
||||||
|
#define DYNAMIC_CFG_RXCSUM OSI_BIT(4)
|
||||||
|
#define DYNAMIC_CFG_PTP OSI_BIT(7)
|
||||||
|
#define DYNAMIC_CFG_EST OSI_BIT(8)
|
||||||
|
#define DYNAMIC_CFG_FPE OSI_BIT(9)
|
||||||
|
#define DYNAMIC_CFG_FRP OSI_BIT(10)
|
||||||
|
|
||||||
|
#ifndef OSI_STRIPPED_LIB
|
||||||
|
#define DYNAMIC_CFG_FC OSI_BIT(1)
|
||||||
|
#define DYNAMIC_CFG_VLAN OSI_BIT(5)
|
||||||
|
#define DYNAMIC_CFG_EEE OSI_BIT(6)
|
||||||
|
#define DYNAMIC_CFG_FC_IDX 1U
|
||||||
|
#define DYNAMIC_CFG_VLAN_IDX 5U
|
||||||
|
#define DYNAMIC_CFG_EEE_IDX 6U
|
||||||
|
#endif /* !OSI_STRIPPED_LIB */
|
||||||
|
|
||||||
|
#define DYNAMIC_CFG_L3_L4_IDX 0U
|
||||||
|
#define DYNAMIC_CFG_AVB_IDX 2U
|
||||||
|
#define DYNAMIC_CFG_L2_IDX 3U
|
||||||
|
#define DYNAMIC_CFG_RXCSUM_IDX 4U
|
||||||
|
#define DYNAMIC_CFG_PTP_IDX 7U
|
||||||
|
#define DYNAMIC_CFG_EST_IDX 8U
|
||||||
|
#define DYNAMIC_CFG_FPE_IDX 9U
|
||||||
|
#define DYNAMIC_CFG_FRP_IDX 10U
|
||||||
|
|
||||||
|
#define OSI_SUSPENDED OSI_BIT(0)
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* interface core ops
|
* interface core ops
|
||||||
*/
|
*/
|
||||||
struct if_core_ops {
|
struct if_core_ops {
|
||||||
/** Interface function called to initialize MAC and MTL registers */
|
/** Interface function called to initialize MAC and MTL registers */
|
||||||
nve32_t (*if_core_init)(struct osi_core_priv_data *const osi_core,
|
nve32_t (*if_core_init)(struct osi_core_priv_data *const osi_core);
|
||||||
nveu32_t tx_fifo_size, nveu32_t rx_fifo_size);
|
|
||||||
/** Interface function called to deinitialize MAC and MTL registers */
|
/** Interface function called to deinitialize MAC and MTL registers */
|
||||||
nve32_t (*if_core_deinit)(struct osi_core_priv_data *const osi_core);
|
nve32_t (*if_core_deinit)(struct osi_core_priv_data *const osi_core);
|
||||||
/** Interface function called to write into a PHY reg over MDIO bus */
|
/** Interface function called to write into a PHY reg over MDIO bus */
|
||||||
@@ -72,103 +110,26 @@ struct if_core_ops {
|
|||||||
* @brief Initialize MAC & MTL core operations.
|
* @brief Initialize MAC & MTL core operations.
|
||||||
*/
|
*/
|
||||||
struct core_ops {
|
struct core_ops {
|
||||||
/** Called to poll for software reset bit */
|
|
||||||
nve32_t (*poll_for_swr)(struct osi_core_priv_data *const osi_core);
|
|
||||||
/** Called to initialize MAC and MTL registers */
|
/** Called to initialize MAC and MTL registers */
|
||||||
nve32_t (*core_init)(struct osi_core_priv_data *const osi_core,
|
nve32_t (*core_init)(struct osi_core_priv_data *const osi_core);
|
||||||
const nveu32_t tx_fifo_size,
|
|
||||||
const nveu32_t rx_fifo_size);
|
|
||||||
/** Called to deinitialize MAC and MTL registers */
|
|
||||||
void (*core_deinit)(struct osi_core_priv_data *const osi_core);
|
|
||||||
/** Called to start MAC Tx and Rx engine */
|
|
||||||
void (*start_mac)(struct osi_core_priv_data *const osi_core);
|
|
||||||
/** Called to stop MAC Tx and Rx engine */
|
|
||||||
void (*stop_mac)(struct osi_core_priv_data *const osi_core);
|
|
||||||
/** Called to handle common interrupt */
|
/** Called to handle common interrupt */
|
||||||
void (*handle_common_intr)(struct osi_core_priv_data *const osi_core);
|
void (*handle_common_intr)(struct osi_core_priv_data *const osi_core);
|
||||||
/** Called to set the mode at MAC (full/duplex) */
|
|
||||||
nve32_t (*set_mode)(struct osi_core_priv_data *const osi_core,
|
|
||||||
const nve32_t mode);
|
|
||||||
/** Called to set the speed at MAC */
|
|
||||||
nve32_t (*set_speed)(struct osi_core_priv_data *const osi_core,
|
|
||||||
const nve32_t speed);
|
|
||||||
/** Called to do pad caliberation */
|
/** Called to do pad caliberation */
|
||||||
nve32_t (*pad_calibrate)(struct osi_core_priv_data *const osi_core);
|
nve32_t (*pad_calibrate)(struct osi_core_priv_data *const osi_core);
|
||||||
/** Called to configure MTL RxQ to forward the err pkt */
|
|
||||||
nve32_t (*config_fw_err_pkts)(struct osi_core_priv_data *const osi_core,
|
|
||||||
const nveu32_t qinx,
|
|
||||||
const nveu32_t fw_err);
|
|
||||||
/** Called to configure Rx Checksum offload engine */
|
|
||||||
nve32_t (*config_rxcsum_offload)(
|
|
||||||
struct osi_core_priv_data *const osi_core,
|
|
||||||
const nveu32_t enabled);
|
|
||||||
/** Called to config mac packet filter */
|
|
||||||
nve32_t (*config_mac_pkt_filter_reg)(
|
|
||||||
struct osi_core_priv_data *const osi_core,
|
|
||||||
const struct osi_filter *filter);
|
|
||||||
/** Called to update MAC address 1-127 */
|
/** Called to update MAC address 1-127 */
|
||||||
nve32_t (*update_mac_addr_low_high_reg)(
|
nve32_t (*update_mac_addr_low_high_reg)(
|
||||||
struct osi_core_priv_data *const osi_core,
|
struct osi_core_priv_data *const osi_core,
|
||||||
const struct osi_filter *filter);
|
const struct osi_filter *filter);
|
||||||
/** Called to configure l3/L4 filter */
|
/** Called to configure L3L4 filter */
|
||||||
nve32_t (*config_l3_l4_filter_enable)(
|
nve32_t (*config_l3l4_filters)(struct osi_core_priv_data *const osi_core,
|
||||||
struct osi_core_priv_data *const osi_core,
|
nveu32_t filter_no,
|
||||||
const nveu32_t enable);
|
const struct osi_l3_l4_filter *const l3_l4);
|
||||||
/** Called to configure L3 filter */
|
|
||||||
nve32_t (*config_l3_filters)(struct osi_core_priv_data *const osi_core,
|
|
||||||
const nveu32_t filter_no,
|
|
||||||
const nveu32_t enb_dis,
|
|
||||||
const nveu32_t ipv4_ipv6_match,
|
|
||||||
const nveu32_t src_dst_addr_match,
|
|
||||||
const nveu32_t perfect_inverse_match,
|
|
||||||
const nveu32_t dma_routing_enable,
|
|
||||||
const nveu32_t dma_chan);
|
|
||||||
/** Called to update ip4 src or desc address */
|
|
||||||
nve32_t (*update_ip4_addr)(struct osi_core_priv_data *const osi_core,
|
|
||||||
const nveu32_t filter_no,
|
|
||||||
const nveu8_t addr[],
|
|
||||||
const nveu32_t src_dst_addr_match);
|
|
||||||
/** Called to update ip6 address */
|
|
||||||
nve32_t (*update_ip6_addr)(struct osi_core_priv_data *const osi_core,
|
|
||||||
const nveu32_t filter_no,
|
|
||||||
const nveu16_t addr[]);
|
|
||||||
/** Called to configure L4 filter */
|
|
||||||
nve32_t (*config_l4_filters)(struct osi_core_priv_data *const osi_core,
|
|
||||||
const nveu32_t filter_no,
|
|
||||||
const nveu32_t enb_dis,
|
|
||||||
const nveu32_t tcp_udp_match,
|
|
||||||
const nveu32_t src_dst_port_match,
|
|
||||||
const nveu32_t perfect_inverse_match,
|
|
||||||
const nveu32_t dma_routing_enable,
|
|
||||||
const nveu32_t dma_chan);
|
|
||||||
/** Called to update L4 Port for filter packet */
|
|
||||||
nve32_t (*update_l4_port_no)(struct osi_core_priv_data *const osi_core,
|
|
||||||
const nveu32_t filter_no,
|
|
||||||
const nveu16_t port_no,
|
|
||||||
const nveu32_t src_dst_port_match);
|
|
||||||
/** Called to set the addend value to adjust the time */
|
|
||||||
nve32_t (*config_addend)(struct osi_core_priv_data *const osi_core,
|
|
||||||
const nveu32_t addend);
|
|
||||||
/** Called to adjust the mac time */
|
/** Called to adjust the mac time */
|
||||||
nve32_t (*adjust_mactime)(struct osi_core_priv_data *const osi_core,
|
nve32_t (*adjust_mactime)(struct osi_core_priv_data *const osi_core,
|
||||||
const nveu32_t sec,
|
const nveu32_t sec,
|
||||||
const nveu32_t nsec,
|
const nveu32_t nsec,
|
||||||
const nveu32_t neg_adj,
|
const nveu32_t neg_adj,
|
||||||
const nveu32_t one_nsec_accuracy);
|
const nveu32_t one_nsec_accuracy);
|
||||||
/** Called to set current system time to MAC */
|
|
||||||
nve32_t (*set_systime_to_mac)(struct osi_core_priv_data *const osi_core,
|
|
||||||
const nveu32_t sec,
|
|
||||||
const nveu32_t nsec);
|
|
||||||
/** Called to configure the TimeStampControl register */
|
|
||||||
void (*config_tscr)(struct osi_core_priv_data *const osi_core,
|
|
||||||
const nveu32_t ptp_filter);
|
|
||||||
/** Called to configure the sub second increment register */
|
|
||||||
void (*config_ssir)(struct osi_core_priv_data *const osi_core,
|
|
||||||
const nveu32_t ptp_clock);
|
|
||||||
/** Called to configure the PTP RX packets Queue */
|
|
||||||
nve32_t (*config_ptp_rxq)(struct osi_core_priv_data *const osi_core,
|
|
||||||
const unsigned int rxq_idx,
|
|
||||||
const unsigned int enable);
|
|
||||||
/** Called to update MMC counter from HW register */
|
/** Called to update MMC counter from HW register */
|
||||||
void (*read_mmc)(struct osi_core_priv_data *const osi_core);
|
void (*read_mmc)(struct osi_core_priv_data *const osi_core);
|
||||||
/** Called to write into a PHY reg over MDIO bus */
|
/** Called to write into a PHY reg over MDIO bus */
|
||||||
@@ -180,6 +141,9 @@ struct core_ops {
|
|||||||
nve32_t (*read_phy_reg)(struct osi_core_priv_data *const osi_core,
|
nve32_t (*read_phy_reg)(struct osi_core_priv_data *const osi_core,
|
||||||
const nveu32_t phyaddr,
|
const nveu32_t phyaddr,
|
||||||
const nveu32_t phyreg);
|
const nveu32_t phyreg);
|
||||||
|
/** Called to get HW features */
|
||||||
|
nve32_t (*get_hw_features)(struct osi_core_priv_data *const osi_core,
|
||||||
|
struct osi_hw_features *hw_feat);
|
||||||
/** Called to read reg */
|
/** Called to read reg */
|
||||||
nveu32_t (*read_reg)(struct osi_core_priv_data *const osi_core,
|
nveu32_t (*read_reg)(struct osi_core_priv_data *const osi_core,
|
||||||
const nve32_t reg);
|
const nve32_t reg);
|
||||||
@@ -195,20 +159,12 @@ struct core_ops {
|
|||||||
nveu32_t (*write_macsec_reg)(struct osi_core_priv_data *const osi_core,
|
nveu32_t (*write_macsec_reg)(struct osi_core_priv_data *const osi_core,
|
||||||
const nveu32_t val,
|
const nveu32_t val,
|
||||||
const nve32_t reg);
|
const nve32_t reg);
|
||||||
|
#ifndef OSI_STRIPPED_LIB
|
||||||
|
void (*macsec_config_mac)(struct osi_core_priv_data *const osi_core,
|
||||||
|
const nveu32_t enable);
|
||||||
|
#endif /* !OSI_STRIPPED_LIB */
|
||||||
#endif /* MACSEC_SUPPORT */
|
#endif /* MACSEC_SUPPORT */
|
||||||
#ifndef OSI_STRIPPED_LIB
|
#ifndef OSI_STRIPPED_LIB
|
||||||
/** Called periodically to read and validate safety critical
|
|
||||||
* registers against last written value */
|
|
||||||
nve32_t (*validate_regs)(struct osi_core_priv_data *const osi_core);
|
|
||||||
/** Called to flush MTL Tx queue */
|
|
||||||
nve32_t (*flush_mtl_tx_queue)(struct osi_core_priv_data *const osi_core,
|
|
||||||
const nveu32_t qinx);
|
|
||||||
/** Called to set av parameter */
|
|
||||||
nve32_t (*set_avb_algorithm)(struct osi_core_priv_data *const osi_core,
|
|
||||||
const struct osi_core_avb_algorithm *const avb);
|
|
||||||
/** Called to get av parameter */
|
|
||||||
nve32_t (*get_avb_algorithm)(struct osi_core_priv_data *const osi_core,
|
|
||||||
struct osi_core_avb_algorithm *const avb);
|
|
||||||
/** Called to configure the MTL to forward/drop tx status */
|
/** Called to configure the MTL to forward/drop tx status */
|
||||||
nve32_t (*config_tx_status)(struct osi_core_priv_data *const osi_core,
|
nve32_t (*config_tx_status)(struct osi_core_priv_data *const osi_core,
|
||||||
const nveu32_t tx_status);
|
const nveu32_t tx_status);
|
||||||
@@ -224,6 +180,9 @@ struct core_ops {
|
|||||||
nve32_t (*config_arp_offload)(struct osi_core_priv_data *const osi_core,
|
nve32_t (*config_arp_offload)(struct osi_core_priv_data *const osi_core,
|
||||||
const nveu32_t enable,
|
const nveu32_t enable,
|
||||||
const nveu8_t *ip_addr);
|
const nveu8_t *ip_addr);
|
||||||
|
/** Called to configure HW PTP offload feature */
|
||||||
|
nve32_t (*config_ptp_offload)(struct osi_core_priv_data *const osi_core,
|
||||||
|
struct osi_pto_config *const pto_config);
|
||||||
/** Called to configure VLAN filtering */
|
/** Called to configure VLAN filtering */
|
||||||
nve32_t (*config_vlan_filtering)(
|
nve32_t (*config_vlan_filtering)(
|
||||||
struct osi_core_priv_data *const osi_core,
|
struct osi_core_priv_data *const osi_core,
|
||||||
@@ -236,10 +195,6 @@ struct core_ops {
|
|||||||
void (*configure_eee)(struct osi_core_priv_data *const osi_core,
|
void (*configure_eee)(struct osi_core_priv_data *const osi_core,
|
||||||
const nveu32_t tx_lpi_enabled,
|
const nveu32_t tx_lpi_enabled,
|
||||||
const nveu32_t tx_lpi_timer);
|
const nveu32_t tx_lpi_timer);
|
||||||
/** Called to save MAC register space during SoC suspend */
|
|
||||||
nve32_t (*save_registers)(struct osi_core_priv_data *const osi_core);
|
|
||||||
/** Called to restore MAC control registers during SoC resume */
|
|
||||||
nve32_t (*restore_registers)(struct osi_core_priv_data *const osi_core);
|
|
||||||
/** Called to set MDC clock rate for MDIO operation */
|
/** Called to set MDC clock rate for MDIO operation */
|
||||||
void (*set_mdc_clk_rate)(struct osi_core_priv_data *const osi_core,
|
void (*set_mdc_clk_rate)(struct osi_core_priv_data *const osi_core,
|
||||||
const nveu64_t csr_clk_rate);
|
const nveu64_t csr_clk_rate);
|
||||||
@@ -247,63 +202,54 @@ struct core_ops {
|
|||||||
nve32_t (*config_mac_loopback)(
|
nve32_t (*config_mac_loopback)(
|
||||||
struct osi_core_priv_data *const osi_core,
|
struct osi_core_priv_data *const osi_core,
|
||||||
const nveu32_t lb_mode);
|
const nveu32_t lb_mode);
|
||||||
#endif /* !OSI_STRIPPED_LIB */
|
|
||||||
/** Called to get HW features */
|
|
||||||
nve32_t (*get_hw_features)(struct osi_core_priv_data *const osi_core,
|
|
||||||
struct osi_hw_features *hw_feat);
|
|
||||||
/** Called to configure RSS for MAC */
|
/** Called to configure RSS for MAC */
|
||||||
nve32_t (*config_rss)(struct osi_core_priv_data *osi_core);
|
nve32_t (*config_rss)(struct osi_core_priv_data *osi_core);
|
||||||
/** Called to update GCL config */
|
/** Called to configure the PTP RX packets Queue */
|
||||||
int (*hw_config_est)(struct osi_core_priv_data *const osi_core,
|
nve32_t (*config_ptp_rxq)(struct osi_core_priv_data *const osi_core,
|
||||||
struct osi_est_config *const est);
|
const nveu32_t rxq_idx,
|
||||||
/** Called to update FPE config */
|
const nveu32_t enable);
|
||||||
int (*hw_config_fpe)(struct osi_core_priv_data *const osi_core,
|
#endif /* !OSI_STRIPPED_LIB */
|
||||||
struct osi_fpe_config *const fpe);
|
/** Called to set av parameter */
|
||||||
|
nve32_t (*set_avb_algorithm)(struct osi_core_priv_data *const osi_core,
|
||||||
|
const struct osi_core_avb_algorithm *const avb);
|
||||||
|
/** Called to get av parameter */
|
||||||
|
nve32_t (*get_avb_algorithm)(struct osi_core_priv_data *const osi_core,
|
||||||
|
struct osi_core_avb_algorithm *const avb);
|
||||||
/** Called to configure FRP engine */
|
/** Called to configure FRP engine */
|
||||||
int (*config_frp)(struct osi_core_priv_data *const osi_core,
|
nve32_t (*config_frp)(struct osi_core_priv_data *const osi_core,
|
||||||
const unsigned int enabled);
|
const nveu32_t enabled);
|
||||||
/** Called to update FRP Instruction Table entry */
|
/** Called to update FRP Instruction Table entry */
|
||||||
int (*update_frp_entry)(struct osi_core_priv_data *const osi_core,
|
nve32_t (*update_frp_entry)(struct osi_core_priv_data *const osi_core,
|
||||||
const unsigned int pos,
|
const nveu32_t pos,
|
||||||
struct osi_core_frp_data *const data);
|
struct osi_core_frp_data *const data);
|
||||||
/** Called to update FRP NVE and */
|
/** Called to update FRP NVE and */
|
||||||
int (*update_frp_nve)(struct osi_core_priv_data *const osi_core,
|
nve32_t (*update_frp_nve)(struct osi_core_priv_data *const osi_core,
|
||||||
const unsigned int nve);
|
const nveu32_t nve);
|
||||||
/** Called to configure HW PTP offload feature */
|
|
||||||
int (*config_ptp_offload)(struct osi_core_priv_data *const osi_core,
|
|
||||||
struct osi_pto_config *const pto_config);
|
|
||||||
#ifdef MACSEC_SUPPORT
|
|
||||||
void (*macsec_config_mac)(struct osi_core_priv_data *const osi_core,
|
|
||||||
const nveu32_t enable);
|
|
||||||
#endif /* MACSEC_SUPPORT */
|
|
||||||
int (*ptp_tsc_capture)(struct osi_core_priv_data *const osi_core,
|
|
||||||
struct osi_core_ptp_tsc_data *data);
|
|
||||||
#ifdef HSI_SUPPORT
|
#ifdef HSI_SUPPORT
|
||||||
/** Interface function called to initialize HSI */
|
/** Interface function called to initialize HSI */
|
||||||
int (*core_hsi_configure)(struct osi_core_priv_data *const osi_core,
|
nve32_t (*core_hsi_configure)(struct osi_core_priv_data *const osi_core,
|
||||||
const nveu32_t enable);
|
const nveu32_t enable);
|
||||||
|
/** Interface function called to inject error */
|
||||||
|
nve32_t (*core_hsi_inject_err)(struct osi_core_priv_data *const osi_core,
|
||||||
|
const nveu32_t error_code);
|
||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief constant values for drift MAC to MAC sync.
|
* @brief constant values for drift MAC to MAC sync.
|
||||||
*/
|
*/
|
||||||
#ifndef DRIFT_CAL
|
/* No longer needed since DRIFT CAL is not used */
|
||||||
#define DRIFT_CAL 1
|
#define I_COMPONENT_BY_10 3LL
|
||||||
#define I_COMPONENT_BY_10 3
|
#define P_COMPONENT_BY_10 7LL
|
||||||
#define P_COMPONENT_BY_10 7
|
#define WEIGHT_BY_10 10LL
|
||||||
#define WEIGHT_BY_10 10
|
#define MAX_FREQ_POS 250000000LL
|
||||||
#define CONST_FACTOR 8 //(1sec/125ns)
|
#define MAX_FREQ_NEG -250000000LL
|
||||||
#define MAX_FREQ 85000000LL
|
#define SERVO_STATS_0 0U
|
||||||
#endif
|
#define SERVO_STATS_1 1U
|
||||||
#define EQOS_SEC_OFFSET 0xB08
|
#define SERVO_STATS_2 2U
|
||||||
#define EQOS_NSEC_OFFSET 0xB0C
|
#define OSI_NSEC_PER_SEC_SIGNED 1000000000LL
|
||||||
#define MGBE_SEC_OFFSET 0xD08
|
|
||||||
#define MGBE_NSEC_OFFSET 0xD0C
|
#define ETHER_NSEC_MASK 0x7FFFFFFFU
|
||||||
#define ETHER_NSEC_MASK 0x7FFFFFFF
|
|
||||||
#define SERVO_STATS_0 0
|
|
||||||
#define SERVO_STATS_1 1
|
|
||||||
#define SERVO_STATS_2 2
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief servo data structure.
|
* @brief servo data structure.
|
||||||
@@ -330,6 +276,64 @@ struct core_ptp_servo {
|
|||||||
nveu32_t m2m_lock;
|
nveu32_t m2m_lock;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief AVB dynamic config storage structure
|
||||||
|
*/
|
||||||
|
struct core_avb {
|
||||||
|
/** Represend whether AVB config done or not */
|
||||||
|
nveu32_t used;
|
||||||
|
/** AVB data structure */
|
||||||
|
struct osi_core_avb_algorithm avb_info;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief VLAN dynamic config storage structure
|
||||||
|
*/
|
||||||
|
struct core_vlan {
|
||||||
|
/** VID to be stored */
|
||||||
|
nveu32_t vid;
|
||||||
|
/** Represens whether VLAN config done or not */
|
||||||
|
nveu32_t used;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief L2 filter dynamic config storage structure
|
||||||
|
*/
|
||||||
|
struct core_l2 {
|
||||||
|
nveu32_t used;
|
||||||
|
struct osi_filter filter;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Dynamic config storage structure
|
||||||
|
*/
|
||||||
|
struct dynamic_cfg {
|
||||||
|
nveu32_t flags;
|
||||||
|
/** L3_L4 filters */
|
||||||
|
struct osi_l3_l4_filter l3_l4[OSI_MGBE_MAX_L3_L4_FILTER];
|
||||||
|
/** flow control */
|
||||||
|
nveu32_t flow_ctrl;
|
||||||
|
/** AVB */
|
||||||
|
struct core_avb avb[OSI_MGBE_MAX_NUM_QUEUES];
|
||||||
|
/** RXCSUM */
|
||||||
|
nveu32_t rxcsum;
|
||||||
|
/** VLAN arguments storage */
|
||||||
|
struct core_vlan vlan[VLAN_NUM_VID];
|
||||||
|
/** LPI parameters storage */
|
||||||
|
nveu32_t tx_lpi_enabled;
|
||||||
|
nveu32_t tx_lpi_timer;
|
||||||
|
/** PTP information storage */
|
||||||
|
nveu32_t ptp;
|
||||||
|
/** EST information storage */
|
||||||
|
struct osi_est_config est;
|
||||||
|
/** FPE information storage */
|
||||||
|
struct osi_fpe_config fpe;
|
||||||
|
/** L2 filter storage */
|
||||||
|
struct osi_filter l2_filter;
|
||||||
|
/** L2 filter configuration */
|
||||||
|
struct core_l2 l2[EQOS_MAX_MAC_ADDRESS_FILTER];
|
||||||
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Core local data structure.
|
* @brief Core local data structure.
|
||||||
*/
|
*/
|
||||||
@@ -351,7 +355,7 @@ struct core_local {
|
|||||||
/** This is the head node for PTP packet ID queue */
|
/** This is the head node for PTP packet ID queue */
|
||||||
struct osi_core_tx_ts tx_ts_head;
|
struct osi_core_tx_ts tx_ts_head;
|
||||||
/** Maximum number of queues/channels */
|
/** Maximum number of queues/channels */
|
||||||
nveu32_t max_chans;
|
nveu32_t num_max_chans;
|
||||||
/** GCL depth supported by HW */
|
/** GCL depth supported by HW */
|
||||||
nveu32_t gcl_dep;
|
nveu32_t gcl_dep;
|
||||||
/** Max GCL width (time + gate) value supported by HW */
|
/** Max GCL width (time + gate) value supported by HW */
|
||||||
@@ -370,8 +374,43 @@ struct core_local {
|
|||||||
nveu32_t pps_freq;
|
nveu32_t pps_freq;
|
||||||
/** Time interval mask for GCL entry */
|
/** Time interval mask for GCL entry */
|
||||||
nveu32_t ti_mask;
|
nveu32_t ti_mask;
|
||||||
|
/** Hardware dynamic configuration context */
|
||||||
|
struct dynamic_cfg cfg;
|
||||||
|
/** Hardware dynamic configuration state */
|
||||||
|
nveu32_t state;
|
||||||
|
/** XPCS Lane bringup/Block lock status */
|
||||||
|
nveu32_t lane_status;
|
||||||
|
/** Exact MAC used across SOCs 0:Legacy EQOS, 1:Orin EQOS, 2:Orin MGBE */
|
||||||
|
nveu32_t l_mac_ver;
|
||||||
|
#if defined(L3L4_WILDCARD_FILTER)
|
||||||
|
/** l3l4 wildcard filter configured (OSI_ENABLE) / not configured (OSI_DISABLE) */
|
||||||
|
nveu32_t l3l4_wildcard_filter_configured;
|
||||||
|
#endif /* L3L4_WILDCARD_FILTER */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief update_counter_u - Increment nveu32_t counter
|
||||||
|
*
|
||||||
|
* @param[out] value: Pointer to value to be incremented.
|
||||||
|
* @param[in] incr: increment value
|
||||||
|
*
|
||||||
|
* @note
|
||||||
|
* API Group:
|
||||||
|
* - Initialization: Yes
|
||||||
|
* - Run time: No
|
||||||
|
* - De-initialization: No
|
||||||
|
*/
|
||||||
|
static inline void update_counter_u(nveu32_t *value, nveu32_t incr)
|
||||||
|
{
|
||||||
|
nveu32_t temp = *value + incr;
|
||||||
|
|
||||||
|
if (temp < *value) {
|
||||||
|
/* Overflow, so reset it to zero */
|
||||||
|
*value = 0U;
|
||||||
|
}
|
||||||
|
*value = temp;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief eqos_init_core_ops - Initialize EQOS core operations.
|
* @brief eqos_init_core_ops - Initialize EQOS core operations.
|
||||||
*
|
*
|
||||||
@@ -385,19 +424,6 @@ struct core_local {
|
|||||||
*/
|
*/
|
||||||
void eqos_init_core_ops(struct core_ops *ops);
|
void eqos_init_core_ops(struct core_ops *ops);
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief ivc_init_core_ops - Initialize IVC core operations.
|
|
||||||
*
|
|
||||||
* @param[in] ops: Core operations pointer.
|
|
||||||
*
|
|
||||||
* @note
|
|
||||||
* API Group:
|
|
||||||
* - Initialization: Yes
|
|
||||||
* - Run time: No
|
|
||||||
* - De-initialization: No
|
|
||||||
*/
|
|
||||||
void ivc_init_core_ops(struct core_ops *ops);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief mgbe_init_core_ops - Initialize MGBE core operations.
|
* @brief mgbe_init_core_ops - Initialize MGBE core operations.
|
||||||
*
|
*
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -32,10 +32,10 @@
|
|||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
static void core_dump_struct(struct osi_core_priv_data *osi_core,
|
static void core_dump_struct(struct osi_core_priv_data *osi_core,
|
||||||
unsigned char *ptr,
|
nveu8_t *ptr,
|
||||||
unsigned long size)
|
unsigned long size)
|
||||||
{
|
{
|
||||||
nveu32_t i = 0, rem, j;
|
nveu32_t i = 0, rem, j = 0;
|
||||||
unsigned long temp;
|
unsigned long temp;
|
||||||
|
|
||||||
if (ptr == OSI_NULL) {
|
if (ptr == OSI_NULL) {
|
||||||
@@ -72,40 +72,40 @@ void core_structs_dump(struct osi_core_priv_data *osi_core)
|
|||||||
osi_core->osd_ops.printf(osi_core, OSI_DEBUG_TYPE_STRUCTS,
|
osi_core->osd_ops.printf(osi_core, OSI_DEBUG_TYPE_STRUCTS,
|
||||||
"CORE struct size = %lu",
|
"CORE struct size = %lu",
|
||||||
sizeof(struct osi_core_priv_data));
|
sizeof(struct osi_core_priv_data));
|
||||||
core_dump_struct(osi_core, (unsigned char *)osi_core,
|
core_dump_struct(osi_core, (nveu8_t *)osi_core,
|
||||||
sizeof(struct osi_core_priv_data));
|
sizeof(struct osi_core_priv_data));
|
||||||
#ifdef MACSEC_SUPPORT
|
#ifdef MACSEC_SUPPORT
|
||||||
osi_core->osd_ops.printf(osi_core, OSI_DEBUG_TYPE_STRUCTS,
|
osi_core->osd_ops.printf(osi_core, OSI_DEBUG_TYPE_STRUCTS,
|
||||||
"MACSEC ops size = %lu",
|
"MACSEC ops size = %lu",
|
||||||
sizeof(struct osi_macsec_core_ops));
|
sizeof(struct osi_macsec_core_ops));
|
||||||
core_dump_struct(osi_core, (unsigned char *)osi_core->macsec_ops,
|
core_dump_struct(osi_core, (nveu8_t *)osi_core->macsec_ops,
|
||||||
sizeof(struct osi_macsec_core_ops));
|
sizeof(struct osi_macsec_core_ops));
|
||||||
|
|
||||||
osi_core->osd_ops.printf(osi_core, OSI_DEBUG_TYPE_STRUCTS,
|
osi_core->osd_ops.printf(osi_core, OSI_DEBUG_TYPE_STRUCTS,
|
||||||
"MACSEC LUT status size = %lu",
|
"MACSEC LUT status size = %lu",
|
||||||
sizeof(struct osi_macsec_lut_status));
|
sizeof(struct osi_macsec_lut_status));
|
||||||
core_dump_struct(osi_core, (unsigned char *)osi_core->macsec_ops,
|
core_dump_struct(osi_core, (nveu8_t *)osi_core->macsec_ops,
|
||||||
sizeof(struct osi_macsec_lut_status));
|
sizeof(struct osi_macsec_lut_status));
|
||||||
#endif
|
#endif
|
||||||
osi_core->osd_ops.printf(osi_core, OSI_DEBUG_TYPE_STRUCTS,
|
osi_core->osd_ops.printf(osi_core, OSI_DEBUG_TYPE_STRUCTS,
|
||||||
"HW features size = %lu",
|
"HW features size = %lu",
|
||||||
sizeof(struct osi_hw_features));
|
sizeof(struct osi_hw_features));
|
||||||
core_dump_struct(osi_core, (unsigned char *)osi_core->hw_feature,
|
core_dump_struct(osi_core, (nveu8_t *)osi_core->hw_feature,
|
||||||
sizeof(struct osi_hw_features));
|
sizeof(struct osi_hw_features));
|
||||||
osi_core->osd_ops.printf(osi_core, OSI_DEBUG_TYPE_STRUCTS,
|
osi_core->osd_ops.printf(osi_core, OSI_DEBUG_TYPE_STRUCTS,
|
||||||
"core local size = %lu",
|
"core local size = %lu",
|
||||||
sizeof(struct core_local));
|
sizeof(struct core_local));
|
||||||
core_dump_struct(osi_core, (unsigned char *)l_core,
|
core_dump_struct(osi_core, (nveu8_t *)l_core,
|
||||||
sizeof(struct core_local));
|
sizeof(struct core_local));
|
||||||
osi_core->osd_ops.printf(osi_core, OSI_DEBUG_TYPE_STRUCTS,
|
osi_core->osd_ops.printf(osi_core, OSI_DEBUG_TYPE_STRUCTS,
|
||||||
"core ops size = %lu",
|
"core ops size = %lu",
|
||||||
sizeof(struct core_ops));
|
sizeof(struct core_ops));
|
||||||
core_dump_struct(osi_core, (unsigned char *)l_core->ops_p,
|
core_dump_struct(osi_core, (nveu8_t *)l_core->ops_p,
|
||||||
sizeof(struct core_ops));
|
sizeof(struct core_ops));
|
||||||
osi_core->osd_ops.printf(osi_core, OSI_DEBUG_TYPE_STRUCTS,
|
osi_core->osd_ops.printf(osi_core, OSI_DEBUG_TYPE_STRUCTS,
|
||||||
"if_ops_p struct size = %lu",
|
"if_ops_p struct size = %lu",
|
||||||
sizeof(struct if_core_ops));
|
sizeof(struct if_core_ops));
|
||||||
core_dump_struct(osi_core, (unsigned char *)l_core->if_ops_p,
|
core_dump_struct(osi_core, (nveu8_t *)l_core->if_ops_p,
|
||||||
sizeof(struct if_core_ops));
|
sizeof(struct if_core_ops));
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -116,9 +116,9 @@ void core_structs_dump(struct osi_core_priv_data *osi_core)
|
|||||||
*/
|
*/
|
||||||
void core_reg_dump(struct osi_core_priv_data *osi_core)
|
void core_reg_dump(struct osi_core_priv_data *osi_core)
|
||||||
{
|
{
|
||||||
unsigned int max_addr;
|
nveu32_t max_addr;
|
||||||
unsigned int addr = 0x0;
|
nveu32_t addr = 0x0;
|
||||||
unsigned int reg_val;
|
nveu32_t reg_val;
|
||||||
|
|
||||||
switch (osi_core->mac_ver) {
|
switch (osi_core->mac_ver) {
|
||||||
case OSI_EQOS_MAC_5_00:
|
case OSI_EQOS_MAC_5_00:
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -20,6 +20,7 @@
|
|||||||
* DEALINGS IN THE SOFTWARE.
|
* DEALINGS IN THE SOFTWARE.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#ifdef OSI_DEBUG
|
||||||
#ifndef INCLUDED_CORE_DEBUG_H
|
#ifndef INCLUDED_CORE_DEBUG_H
|
||||||
#define INCLUDED_CORE_DEBUG_H
|
#define INCLUDED_CORE_DEBUG_H
|
||||||
|
|
||||||
@@ -32,3 +33,4 @@ void core_reg_dump(struct osi_core_priv_data *osi_core);
|
|||||||
void core_structs_dump(struct osi_core_priv_data *osi_core);
|
void core_structs_dump(struct osi_core_priv_data *osi_core);
|
||||||
|
|
||||||
#endif /* INCLUDED_CORE_DEBUG_H*/
|
#endif /* INCLUDED_CORE_DEBUG_H*/
|
||||||
|
#endif /* OSI_DEBUG */
|
||||||
|
|||||||
3775
osi/core/eqos_core.c
3775
osi/core/eqos_core.c
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -54,7 +54,7 @@ static inline nveu64_t update_mmc_val(struct osi_core_priv_data *const osi_core,
|
|||||||
nveu64_t last_value,
|
nveu64_t last_value,
|
||||||
nveu64_t offset)
|
nveu64_t offset)
|
||||||
{
|
{
|
||||||
nveu64_t temp;
|
nveu64_t temp = 0;
|
||||||
nveu32_t value = osi_readla(osi_core,
|
nveu32_t value = osi_readla(osi_core,
|
||||||
(nveu8_t *)osi_core->base + offset);
|
(nveu8_t *)osi_core->base + offset);
|
||||||
|
|
||||||
@@ -65,11 +65,9 @@ static inline nveu64_t update_mmc_val(struct osi_core_priv_data *const osi_core,
|
|||||||
"Value overflow resetting all counters\n",
|
"Value overflow resetting all counters\n",
|
||||||
(nveul64_t)offset);
|
(nveul64_t)offset);
|
||||||
eqos_reset_mmc(osi_core);
|
eqos_reset_mmc(osi_core);
|
||||||
} else {
|
|
||||||
return temp;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return temp;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|||||||
322
osi/core/frp.c
322
osi/core/frp.c
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -33,7 +33,7 @@
|
|||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
static void frp_entry_copy(struct osi_core_frp_entry *dst,
|
static void frp_entry_copy(struct osi_core_frp_entry *dst,
|
||||||
struct osi_core_frp_entry *src)
|
struct osi_core_frp_entry *const src)
|
||||||
{
|
{
|
||||||
dst->frp_id = src->frp_id;
|
dst->frp_id = src->frp_id;
|
||||||
dst->data.match_data = src->data.match_data;
|
dst->data.match_data = src->data.match_data;
|
||||||
@@ -61,13 +61,14 @@ static void frp_entry_copy(struct osi_core_frp_entry *dst,
|
|||||||
* @retval 0 on success.
|
* @retval 0 on success.
|
||||||
* @retval -1 on failure.
|
* @retval -1 on failure.
|
||||||
*/
|
*/
|
||||||
static int frp_entry_find(struct osi_core_priv_data *const osi_core,
|
static nve32_t frp_entry_find(struct osi_core_priv_data *const osi_core,
|
||||||
int frp_id,
|
nve32_t frp_id,
|
||||||
unsigned char *start,
|
nveu8_t *start,
|
||||||
unsigned char *no_entries)
|
nveu8_t *no_entries)
|
||||||
{
|
{
|
||||||
unsigned char count = OSI_NONE, found = OSI_NONE;
|
nveu8_t count = OSI_NONE, found = OSI_NONE;
|
||||||
struct osi_core_frp_entry *entry = OSI_NULL;
|
struct osi_core_frp_entry *entry = OSI_NULL;
|
||||||
|
nve32_t ret = 0;
|
||||||
|
|
||||||
/* Parse the FRP table for give frp_id */
|
/* Parse the FRP table for give frp_id */
|
||||||
for (count = 0U; count < osi_core->frp_cnt; count++) {
|
for (count = 0U; count < osi_core->frp_cnt; count++) {
|
||||||
@@ -80,17 +81,17 @@ static int frp_entry_find(struct osi_core_priv_data *const osi_core,
|
|||||||
found = OSI_ENABLE;
|
found = OSI_ENABLE;
|
||||||
} else {
|
} else {
|
||||||
/* Increment entries */
|
/* Increment entries */
|
||||||
*no_entries = (unsigned char) (*no_entries + 1U);
|
*no_entries = (nveu8_t)(*no_entries + 1U);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (found == OSI_NONE) {
|
if (found == OSI_NONE) {
|
||||||
/* No entry found return error */
|
/* No entry found return error */
|
||||||
return -1;
|
ret = -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -104,34 +105,38 @@ static int frp_entry_find(struct osi_core_priv_data *const osi_core,
|
|||||||
*
|
*
|
||||||
* @retval No of FRP entries required.
|
* @retval No of FRP entries required.
|
||||||
*/
|
*/
|
||||||
static unsigned char frp_req_entries(unsigned char offset,
|
static nveu8_t frp_req_entries(nveu8_t offset,
|
||||||
unsigned char match_length)
|
nveu8_t match_length)
|
||||||
{
|
{
|
||||||
unsigned char req = 0U;
|
nveu8_t req = 0U;
|
||||||
|
nveu8_t temp_match_length = match_length;
|
||||||
|
|
||||||
/* Validate for match_length */
|
/* Validate for temp_match_length */
|
||||||
if ((match_length == OSI_NONE) ||
|
if ((temp_match_length == OSI_NONE) ||
|
||||||
(match_length > OSI_FRP_MATCH_DATA_MAX)) {
|
(temp_match_length > OSI_FRP_MATCH_DATA_MAX)) {
|
||||||
/* return zero */
|
/* return zero */
|
||||||
return req;
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Check does the given length can fit in fist entry */
|
/* Check does the given length can fit in fist entry */
|
||||||
if (match_length <= (unsigned char) FRP_OFFSET_BYTES(offset)) {
|
if (temp_match_length <= (nveu8_t)FRP_OFFSET_BYTES(offset)) {
|
||||||
/* Require one entry */
|
/* Require one entry */
|
||||||
return 1U;
|
req = 1U;
|
||||||
|
goto done;
|
||||||
}
|
}
|
||||||
/* Initialize req as 1U and decrement length by FRP_OFFSET_BYTES */
|
/* Initialize req as 1U and decrement length by FRP_OFFSET_BYTES */
|
||||||
req = 1U;
|
req = 1U;
|
||||||
match_length = (unsigned char) (match_length - (unsigned char) FRP_OFFSET_BYTES(offset));
|
temp_match_length = (nveu8_t)(temp_match_length -
|
||||||
if ((match_length / FRP_MD_SIZE) < OSI_FRP_MATCH_DATA_MAX) {
|
(nveu8_t)FRP_OFFSET_BYTES(offset));
|
||||||
req = (unsigned char) (req + (match_length / FRP_MD_SIZE));
|
if ((temp_match_length / FRP_MD_SIZE) < OSI_FRP_MATCH_DATA_MAX) {
|
||||||
if ((match_length % FRP_MD_SIZE) != OSI_NONE) {
|
req = (nveu8_t)(req + (temp_match_length / FRP_MD_SIZE));
|
||||||
|
if ((temp_match_length % FRP_MD_SIZE) != OSI_NONE) {
|
||||||
/* Need one more entry */
|
/* Need one more entry */
|
||||||
req = (unsigned char) (req + 1U);
|
req = (nveu8_t)(req + 1U);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
done:
|
||||||
return req;
|
return req;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -144,7 +149,7 @@ static unsigned char frp_req_entries(unsigned char offset,
|
|||||||
* @param[in] data: FRP entry data pointer.
|
* @param[in] data: FRP entry data pointer.
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
static void frp_entry_mode_parse(unsigned char filter_mode,
|
static void frp_entry_mode_parse(nveu8_t filter_mode,
|
||||||
struct osi_core_frp_data *data)
|
struct osi_core_frp_data *data)
|
||||||
{
|
{
|
||||||
switch (filter_mode) {
|
switch (filter_mode) {
|
||||||
@@ -189,7 +194,7 @@ static void frp_entry_mode_parse(unsigned char filter_mode,
|
|||||||
data->inverse_match = OSI_DISABLE;
|
data->inverse_match = OSI_DISABLE;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
//OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
|
//OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID,
|
||||||
// "Invalid filter mode argment\n",
|
// "Invalid filter mode argment\n",
|
||||||
// filter_mode);
|
// filter_mode);
|
||||||
break;
|
break;
|
||||||
@@ -205,6 +210,7 @@ static void frp_entry_mode_parse(unsigned char filter_mode,
|
|||||||
*
|
*
|
||||||
* @param[in] osi_core: OSI core private data structure.
|
* @param[in] osi_core: OSI core private data structure.
|
||||||
* @param[in] frp_id: FRP ID to add.
|
* @param[in] frp_id: FRP ID to add.
|
||||||
|
* @param[in] pos: FRP entry position.
|
||||||
* @param[in] match: Pointer to match data.
|
* @param[in] match: Pointer to match data.
|
||||||
* @param[in] length: Match data length.
|
* @param[in] length: Match data length.
|
||||||
* @param[in] offset: Actual match data offset position.
|
* @param[in] offset: Actual match data offset position.
|
||||||
@@ -215,30 +221,34 @@ static void frp_entry_mode_parse(unsigned char filter_mode,
|
|||||||
* @retval 0 on success.
|
* @retval 0 on success.
|
||||||
* @retval -1 on failure.
|
* @retval -1 on failure.
|
||||||
*/
|
*/
|
||||||
static int frp_entry_add(struct osi_core_priv_data *const osi_core,
|
static nve32_t frp_entry_add(struct osi_core_priv_data *const osi_core,
|
||||||
int frp_id,
|
nve32_t frp_id,
|
||||||
unsigned char pos,
|
nveu8_t pos,
|
||||||
unsigned char *const match,
|
nveu8_t *const match,
|
||||||
unsigned char length,
|
nveu8_t length,
|
||||||
unsigned char offset,
|
nveu8_t offset,
|
||||||
unsigned char filter_mode,
|
nveu8_t filter_mode,
|
||||||
int next_frp_id,
|
nve32_t next_frp_id,
|
||||||
unsigned int dma_sel)
|
nveu32_t dma_sel)
|
||||||
{
|
{
|
||||||
struct osi_core_frp_entry *entry = OSI_NULL;
|
struct osi_core_frp_entry *entry = OSI_NULL;
|
||||||
struct osi_core_frp_data *data = OSI_NULL;
|
struct osi_core_frp_data *data = OSI_NULL;
|
||||||
unsigned int req_entries = 0U;
|
nveu32_t req_entries = 0U;
|
||||||
unsigned char ok_index = 0U;
|
nveu8_t ok_index = 0U;
|
||||||
unsigned char fo_t = 0U;
|
nveu8_t fo_t = 0U;
|
||||||
unsigned char fp_t = 0U;
|
nveu8_t fp_t = 0U;
|
||||||
unsigned char i = 0U, j = 0U, md_pos = 0U;
|
nveu8_t i = 0U, j = 0U, md_pos = 0U;
|
||||||
|
nveu8_t temp_pos = pos;
|
||||||
|
nve32_t ret;
|
||||||
|
nveu32_t dma_sel_val[MAX_MAC_IP_TYPES] = {0xFFU, 0x3FF};
|
||||||
|
|
||||||
/* Validate length */
|
/* Validate length */
|
||||||
if (length > OSI_FRP_MATCH_DATA_MAX) {
|
if (length > OSI_FRP_MATCH_DATA_MAX) {
|
||||||
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_OUTOFBOUND,
|
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_OUTOFBOUND,
|
||||||
"Invalid match length\n",
|
"Invalid match length\n",
|
||||||
length);
|
length);
|
||||||
return -1;
|
ret = -1;
|
||||||
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Validate filter_mode */
|
/* Validate filter_mode */
|
||||||
@@ -246,7 +256,8 @@ static int frp_entry_add(struct osi_core_priv_data *const osi_core,
|
|||||||
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID,
|
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID,
|
||||||
"Invalid filter mode argment\n",
|
"Invalid filter mode argment\n",
|
||||||
filter_mode);
|
filter_mode);
|
||||||
return -1;
|
ret = -1;
|
||||||
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Validate offset */
|
/* Validate offset */
|
||||||
@@ -254,27 +265,38 @@ static int frp_entry_add(struct osi_core_priv_data *const osi_core,
|
|||||||
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID,
|
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID,
|
||||||
"Invalid offset value\n",
|
"Invalid offset value\n",
|
||||||
offset);
|
offset);
|
||||||
return -1;
|
ret = -1;
|
||||||
|
goto done;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Validate channel selection */
|
||||||
|
if (dma_sel > dma_sel_val[osi_core->mac]) {
|
||||||
|
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID,
|
||||||
|
"Invalid DMA selection\n",
|
||||||
|
(nveu64_t)dma_sel);
|
||||||
|
ret = -1;
|
||||||
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Check for avilable space */
|
/* Check for avilable space */
|
||||||
req_entries = frp_req_entries(offset, length);
|
req_entries = frp_req_entries(offset, length);
|
||||||
if ((req_entries >= OSI_FRP_MAX_ENTRY) ||
|
if ((req_entries >= OSI_FRP_MAX_ENTRY) ||
|
||||||
(req_entries + pos) >= OSI_FRP_MAX_ENTRY) {
|
((req_entries + temp_pos) >= OSI_FRP_MAX_ENTRY)) {
|
||||||
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID,
|
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID,
|
||||||
"No space to update FRP ID\n",
|
"No space to update FRP ID\n",
|
||||||
OSI_NONE);
|
OSI_NONE);
|
||||||
return -1;
|
ret = -1;
|
||||||
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Validate next_frp_id index ok_index */
|
/* Validate next_frp_id index ok_index */
|
||||||
if (filter_mode == OSI_FRP_MODE_LINK ||
|
if ((filter_mode == OSI_FRP_MODE_LINK) ||
|
||||||
filter_mode == OSI_FRP_MODE_IM_LINK) {
|
(filter_mode == OSI_FRP_MODE_IM_LINK)) {
|
||||||
if (frp_entry_find(osi_core, next_frp_id, &i, &j) < 0) {
|
if (frp_entry_find(osi_core, next_frp_id, &i, &j) < 0) {
|
||||||
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL,
|
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL,
|
||||||
"No Link FRP ID index found\n",
|
"No Link FRP ID index found\n",
|
||||||
OSI_NONE);
|
OSI_NONE);
|
||||||
i = (unsigned char) next_frp_id;
|
i = (nveu8_t)next_frp_id;
|
||||||
}
|
}
|
||||||
ok_index = i;
|
ok_index = i;
|
||||||
}
|
}
|
||||||
@@ -285,7 +307,7 @@ static int frp_entry_add(struct osi_core_priv_data *const osi_core,
|
|||||||
md_pos = 0U;
|
md_pos = 0U;
|
||||||
for (i = 0U; i < req_entries; i++) {
|
for (i = 0U; i < req_entries; i++) {
|
||||||
/* Get FRP entry*/
|
/* Get FRP entry*/
|
||||||
entry = &osi_core->frp_table[pos];
|
entry = &osi_core->frp_table[temp_pos];
|
||||||
data = &entry->data;
|
data = &entry->data;
|
||||||
|
|
||||||
/* Fill FRP ID */
|
/* Fill FRP ID */
|
||||||
@@ -295,9 +317,9 @@ static int frp_entry_add(struct osi_core_priv_data *const osi_core,
|
|||||||
data->match_data = OSI_NONE;
|
data->match_data = OSI_NONE;
|
||||||
data->match_en = OSI_NONE;
|
data->match_en = OSI_NONE;
|
||||||
for (j = fp_t; j < FRP_MD_SIZE; j++) {
|
for (j = fp_t; j < FRP_MD_SIZE; j++) {
|
||||||
data->match_data |= ((unsigned int)match[md_pos])
|
data->match_data |= ((nveu32_t)match[md_pos])
|
||||||
<< (j * FRP_ME_BYTE_SHIFT);
|
<< (j * FRP_ME_BYTE_SHIFT);
|
||||||
data->match_en |= ((unsigned int)FRP_ME_BYTE <<
|
data->match_en |= ((nveu32_t)FRP_ME_BYTE <<
|
||||||
(j * FRP_ME_BYTE_SHIFT));
|
(j * FRP_ME_BYTE_SHIFT));
|
||||||
md_pos++;
|
md_pos++;
|
||||||
if (md_pos >= length) {
|
if (md_pos >= length) {
|
||||||
@@ -323,10 +345,10 @@ static int frp_entry_add(struct osi_core_priv_data *const osi_core,
|
|||||||
data->next_ins_ctrl = OSI_ENABLE;
|
data->next_ins_ctrl = OSI_ENABLE;
|
||||||
|
|
||||||
/* Init next FRP entry */
|
/* Init next FRP entry */
|
||||||
pos++;
|
temp_pos++;
|
||||||
fo_t++;
|
fo_t++;
|
||||||
fp_t = OSI_NONE;
|
fp_t = OSI_NONE;
|
||||||
data->ok_index = pos;
|
data->ok_index = temp_pos;
|
||||||
} else {
|
} else {
|
||||||
data->next_ins_ctrl = OSI_DISABLE;
|
data->next_ins_ctrl = OSI_DISABLE;
|
||||||
data->ok_index = OSI_DISABLE;
|
data->ok_index = OSI_DISABLE;
|
||||||
@@ -334,14 +356,16 @@ static int frp_entry_add(struct osi_core_priv_data *const osi_core,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Check and fill final OKI */
|
/* Check and fill final OKI */
|
||||||
if (filter_mode == OSI_FRP_MODE_LINK ||
|
if ((filter_mode == OSI_FRP_MODE_LINK) ||
|
||||||
filter_mode == OSI_FRP_MODE_IM_LINK) {
|
(filter_mode == OSI_FRP_MODE_IM_LINK)) {
|
||||||
/* Update NIC and OKI in final entry */
|
/* Update NIC and OKI in final entry */
|
||||||
data->next_ins_ctrl = OSI_ENABLE;
|
data->next_ins_ctrl = OSI_ENABLE;
|
||||||
data->ok_index = ok_index;
|
data->ok_index = ok_index;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
ret = 0;
|
||||||
|
done:
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -350,16 +374,19 @@ static int frp_entry_add(struct osi_core_priv_data *const osi_core,
|
|||||||
* Algorithm: Update FRP table into HW.
|
* Algorithm: Update FRP table into HW.
|
||||||
*
|
*
|
||||||
* @param[in] osi_core: OSI core private data structure.
|
* @param[in] osi_core: OSI core private data structure.
|
||||||
|
* @param[in] ops_p: Core operations data structure.
|
||||||
*
|
*
|
||||||
* @retval 0 on success.
|
* @retval 0 on success.
|
||||||
* @retval -1 on failure.
|
* @retval -1 on failure.
|
||||||
*/
|
*/
|
||||||
static int frp_hw_write(struct osi_core_priv_data *const osi_core,
|
nve32_t frp_hw_write(struct osi_core_priv_data *const osi_core,
|
||||||
struct core_ops *ops_p)
|
struct core_ops *const ops_p)
|
||||||
{
|
{
|
||||||
int ret = -1, tmp = -1;
|
nve32_t ret = 0;
|
||||||
|
nve32_t tmp = 0;
|
||||||
struct osi_core_frp_entry *entry;
|
struct osi_core_frp_entry *entry;
|
||||||
unsigned int frp_cnt = osi_core->frp_cnt, i = OSI_NONE;
|
struct osi_core_frp_data bypass_entry = {};
|
||||||
|
nveu32_t frp_cnt = osi_core->frp_cnt, i = OSI_NONE;
|
||||||
|
|
||||||
/* Disable the FRP in HW */
|
/* Disable the FRP in HW */
|
||||||
ret = ops_p->config_frp(osi_core, OSI_DISABLE);
|
ret = ops_p->config_frp(osi_core, OSI_DISABLE);
|
||||||
@@ -371,10 +398,21 @@ static int frp_hw_write(struct osi_core_priv_data *const osi_core,
|
|||||||
goto hw_write_enable_frp;
|
goto hw_write_enable_frp;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Check space for XCS BYPASS rule */
|
||||||
|
if ((frp_cnt + 1U) > OSI_FRP_MAX_ENTRY) {
|
||||||
|
ret = -1;
|
||||||
|
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL,
|
||||||
|
"No space for rules\n", OSI_NONE);
|
||||||
|
goto error;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Check HW table size for non-zero */
|
||||||
|
if (frp_cnt != 0U) {
|
||||||
/* Write FRP entries into HW */
|
/* Write FRP entries into HW */
|
||||||
for (i = 0; i < frp_cnt; i++) {
|
for (i = 0; i < frp_cnt; i++) {
|
||||||
entry = &osi_core->frp_table[i];
|
entry = &osi_core->frp_table[i];
|
||||||
ret = ops_p->update_frp_entry(osi_core, i, &entry->data);
|
ret = ops_p->update_frp_entry(osi_core, i,
|
||||||
|
&entry->data);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL,
|
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL,
|
||||||
"Fail to update FRP entry\n",
|
"Fail to update FRP entry\n",
|
||||||
@@ -383,8 +421,20 @@ static int frp_hw_write(struct osi_core_priv_data *const osi_core,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Write BYPASS rule for XDCS */
|
||||||
|
bypass_entry.match_en = 0x0U;
|
||||||
|
bypass_entry.accept_frame = 1;
|
||||||
|
bypass_entry.reject_frame = 1;
|
||||||
|
ret = ops_p->update_frp_entry(osi_core, frp_cnt, &bypass_entry);
|
||||||
|
if (ret < 0) {
|
||||||
|
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL,
|
||||||
|
"Fail to update BYPASS entry\n",
|
||||||
|
OSI_NONE);
|
||||||
|
goto hw_write_enable_frp;
|
||||||
|
}
|
||||||
|
|
||||||
/* Update the NVE */
|
/* Update the NVE */
|
||||||
ret = ops_p->update_frp_nve(osi_core, (frp_cnt - 1U));
|
ret = ops_p->update_frp_nve(osi_core, frp_cnt);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL,
|
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL,
|
||||||
"Fail to update FRP NVE\n",
|
"Fail to update FRP NVE\n",
|
||||||
@@ -394,6 +444,9 @@ static int frp_hw_write(struct osi_core_priv_data *const osi_core,
|
|||||||
/* Enable the FRP in HW */
|
/* Enable the FRP in HW */
|
||||||
hw_write_enable_frp:
|
hw_write_enable_frp:
|
||||||
tmp = ops_p->config_frp(osi_core, OSI_ENABLE);
|
tmp = ops_p->config_frp(osi_core, OSI_ENABLE);
|
||||||
|
}
|
||||||
|
|
||||||
|
error:
|
||||||
return (ret < 0) ? ret : tmp;
|
return (ret < 0) ? ret : tmp;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -409,17 +462,17 @@ hw_write_enable_frp:
|
|||||||
* @retval 0 on success.
|
* @retval 0 on success.
|
||||||
* @retval -1 on failure.
|
* @retval -1 on failure.
|
||||||
*/
|
*/
|
||||||
static int frp_add_proto(struct osi_core_priv_data *const osi_core,
|
static nve32_t frp_add_proto(struct osi_core_priv_data *const osi_core,
|
||||||
struct osi_core_frp_cmd *const cmd,
|
struct osi_core_frp_cmd *const cmd,
|
||||||
unsigned char *pos)
|
nveu8_t *pos)
|
||||||
{
|
{
|
||||||
int ret = -1, proto_oki = -1;
|
nve32_t ret, proto_oki;
|
||||||
unsigned char proto_entry = OSI_DISABLE;
|
nveu8_t proto_entry = OSI_DISABLE;
|
||||||
unsigned char req = 0U;
|
nveu8_t req = 0U;
|
||||||
unsigned char proto_match[FRP_PROTO_LENGTH];
|
nveu8_t proto_match[FRP_PROTO_LENGTH];
|
||||||
unsigned char proto_lendth;
|
nveu8_t proto_lendth;
|
||||||
unsigned char proto_offset;
|
nveu8_t proto_offset;
|
||||||
unsigned char match_type = cmd->match_type;
|
nveu8_t match_type = cmd->match_type;
|
||||||
|
|
||||||
switch (match_type) {
|
switch (match_type) {
|
||||||
case OSI_FRP_MATCH_L4_S_UPORT:
|
case OSI_FRP_MATCH_L4_S_UPORT:
|
||||||
@@ -462,16 +515,18 @@ static int frp_add_proto(struct osi_core_priv_data *const osi_core,
|
|||||||
/* Check and Add protocol FRP entire */
|
/* Check and Add protocol FRP entire */
|
||||||
if (proto_entry == OSI_ENABLE) {
|
if (proto_entry == OSI_ENABLE) {
|
||||||
/* Check for space */
|
/* Check for space */
|
||||||
req = (unsigned char) (frp_req_entries(cmd->offset, cmd->match_length) + 1U);
|
req = (nveu8_t)(frp_req_entries(cmd->offset, cmd->match_length) + 1U);
|
||||||
if (*pos > (OSI_FRP_MAX_ENTRY - req)) {
|
if (*pos > (OSI_FRP_MAX_ENTRY - req)) {
|
||||||
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL,
|
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL,
|
||||||
"Fail add FRP protocol entry\n",
|
"Fail add FRP protocol entry\n",
|
||||||
OSI_NONE);
|
OSI_NONE);
|
||||||
return -1;
|
ret = -1;
|
||||||
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Add protocol FRP entire */
|
/* Add protocol FRP entire */
|
||||||
proto_oki = *pos + 1;
|
proto_oki = (nve32_t)*pos;
|
||||||
|
proto_oki += 1;
|
||||||
ret = frp_entry_add(osi_core, cmd->frp_id, *pos,
|
ret = frp_entry_add(osi_core, cmd->frp_id, *pos,
|
||||||
proto_match, proto_lendth,
|
proto_match, proto_lendth,
|
||||||
proto_offset, OSI_FRP_MODE_LINK,
|
proto_offset, OSI_FRP_MODE_LINK,
|
||||||
@@ -480,14 +535,16 @@ static int frp_add_proto(struct osi_core_priv_data *const osi_core,
|
|||||||
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL,
|
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL,
|
||||||
"Fail add FRP protocol entry\n",
|
"Fail add FRP protocol entry\n",
|
||||||
OSI_NONE);
|
OSI_NONE);
|
||||||
return ret;
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Increment pos value */
|
/* Increment pos value */
|
||||||
*pos = (unsigned char) (*pos + 1U);
|
*pos = (nveu8_t)(*pos + (nveu8_t)1);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
ret = 0;
|
||||||
|
done:
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -495,15 +552,13 @@ static int frp_add_proto(struct osi_core_priv_data *const osi_core,
|
|||||||
*
|
*
|
||||||
* Algorithm: Parse give FRP command match type and update it's offset.
|
* Algorithm: Parse give FRP command match type and update it's offset.
|
||||||
*
|
*
|
||||||
* @param[in] osi_core: OSI core private data structure.
|
|
||||||
* @param[in] cmd: OSI FRP command structure.
|
* @param[in] cmd: OSI FRP command structure.
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
static void frp_parse_mtype(OSI_UNUSED struct osi_core_priv_data *const osi_core,
|
static void frp_parse_mtype(struct osi_core_frp_cmd *const cmd)
|
||||||
struct osi_core_frp_cmd *const cmd)
|
|
||||||
{
|
{
|
||||||
unsigned char offset;
|
nveu8_t offset;
|
||||||
unsigned char match_type = cmd->match_type;
|
nveu8_t match_type = cmd->match_type;
|
||||||
|
|
||||||
switch (match_type) {
|
switch (match_type) {
|
||||||
case OSI_FRP_MATCH_L2_DA:
|
case OSI_FRP_MATCH_L2_DA:
|
||||||
@@ -549,26 +604,28 @@ static void frp_parse_mtype(OSI_UNUSED struct osi_core_priv_data *const osi_core
|
|||||||
* Algorithm: Parse give FRP delete command and update it on OSI data and HW.
|
* Algorithm: Parse give FRP delete command and update it on OSI data and HW.
|
||||||
*
|
*
|
||||||
* @param[in] osi_core: OSI core private data structure.
|
* @param[in] osi_core: OSI core private data structure.
|
||||||
|
* @param[in] ops_p: Core operations data structure.
|
||||||
* @param[in] cmd: OSI FRP command structure.
|
* @param[in] cmd: OSI FRP command structure.
|
||||||
*
|
*
|
||||||
* @retval 0 on success.
|
* @retval 0 on success.
|
||||||
* @retval -1 on failure.
|
* @retval -1 on failure.
|
||||||
*/
|
*/
|
||||||
static int frp_delete(struct osi_core_priv_data *const osi_core,
|
static nve32_t frp_delete(struct osi_core_priv_data *const osi_core,
|
||||||
struct core_ops *ops_p,
|
struct core_ops *ops_p,
|
||||||
struct osi_core_frp_cmd *const cmd)
|
struct osi_core_frp_cmd *const cmd)
|
||||||
{
|
{
|
||||||
int ret = -1;
|
nve32_t ret;
|
||||||
unsigned char i = 0U, pos = 0U, count = 0U;
|
nveu8_t i = 0U, pos = 0U, count = 0U;
|
||||||
int frp_id = cmd->frp_id;
|
nve32_t frp_id = cmd->frp_id;
|
||||||
unsigned int frp_cnt = osi_core->frp_cnt;
|
nveu32_t frp_cnt = osi_core->frp_cnt;
|
||||||
|
|
||||||
/* Check for FRP entries */
|
/* Check for FRP entries */
|
||||||
if (frp_cnt == 0U) {
|
if (frp_cnt == 0U) {
|
||||||
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL,
|
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL,
|
||||||
"No FRP entries in the table\n",
|
"No FRP entries in the table\n",
|
||||||
OSI_NONE);
|
OSI_NONE);
|
||||||
return -1;
|
ret = -1;
|
||||||
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Find the FRP entry */
|
/* Find the FRP entry */
|
||||||
@@ -576,15 +633,17 @@ static int frp_delete(struct osi_core_priv_data *const osi_core,
|
|||||||
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL,
|
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL,
|
||||||
"No FRP entry found to delete\n",
|
"No FRP entry found to delete\n",
|
||||||
OSI_NONE);
|
OSI_NONE);
|
||||||
return -1;
|
ret = -1;
|
||||||
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Validate pos and count */
|
/* Validate pos and count */
|
||||||
if (((unsigned int)pos + count) > frp_cnt) {
|
if (((nveu32_t)pos + count) > frp_cnt) {
|
||||||
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL,
|
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL,
|
||||||
"Invalid FRP entry index\n",
|
"Invalid FRP entry index\n",
|
||||||
OSI_NONE);
|
OSI_NONE);
|
||||||
return -1;
|
ret = -1;
|
||||||
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Update the frp_table entry */
|
/* Update the frp_table entry */
|
||||||
@@ -592,12 +651,15 @@ static int frp_delete(struct osi_core_priv_data *const osi_core,
|
|||||||
(sizeof(struct osi_core_frp_entry) * count));
|
(sizeof(struct osi_core_frp_entry) * count));
|
||||||
|
|
||||||
/* Move in FRP table entries by count */
|
/* Move in FRP table entries by count */
|
||||||
for (i = (unsigned char) (pos + count); i <= frp_cnt; i++) {
|
for (i = (nveu8_t)(pos + count); i <= frp_cnt; i++) {
|
||||||
frp_entry_copy(&osi_core->frp_table[pos],
|
frp_entry_copy(&osi_core->frp_table[pos],
|
||||||
&osi_core->frp_table[i]);
|
&osi_core->frp_table[i]);
|
||||||
pos++;
|
pos++;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Update the frp_cnt entry */
|
||||||
|
osi_core->frp_cnt = (frp_cnt - count);
|
||||||
|
|
||||||
/* Write FRP Table into HW */
|
/* Write FRP Table into HW */
|
||||||
ret = frp_hw_write(osi_core, ops_p);
|
ret = frp_hw_write(osi_core, ops_p);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
@@ -606,9 +668,7 @@ static int frp_delete(struct osi_core_priv_data *const osi_core,
|
|||||||
OSI_NONE);
|
OSI_NONE);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Update the frp_cnt entry */
|
done:
|
||||||
osi_core->frp_cnt = (frp_cnt - count);
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -618,29 +678,31 @@ static int frp_delete(struct osi_core_priv_data *const osi_core,
|
|||||||
* Algorithm: Parse give FRP update command and update it on OSI data and HW.
|
* Algorithm: Parse give FRP update command and update it on OSI data and HW.
|
||||||
*
|
*
|
||||||
* @param[in] osi_core: OSI core private data structure.
|
* @param[in] osi_core: OSI core private data structure.
|
||||||
|
* @param[in] ops_p: Core operations data structure.
|
||||||
* @param[in] cmd: OSI FRP command structure.
|
* @param[in] cmd: OSI FRP command structure.
|
||||||
*
|
*
|
||||||
* @retval 0 on success.
|
* @retval 0 on success.
|
||||||
* @retval -1 on failure.
|
* @retval -1 on failure.
|
||||||
*/
|
*/
|
||||||
static int frp_update(struct osi_core_priv_data *const osi_core,
|
static nve32_t frp_update(struct osi_core_priv_data *const osi_core,
|
||||||
struct core_ops *ops_p,
|
struct core_ops *ops_p,
|
||||||
struct osi_core_frp_cmd *const cmd)
|
struct osi_core_frp_cmd *const cmd)
|
||||||
{
|
{
|
||||||
int ret = -1;
|
nve32_t ret;
|
||||||
unsigned char pos = 0U, count = 0U, req = 0U;
|
nveu8_t pos = 0U, count = 0U, req = 0U;
|
||||||
int frp_id = cmd->frp_id;
|
nve32_t frp_id = cmd->frp_id;
|
||||||
|
|
||||||
/* Validate given frp_id */
|
/* Validate given frp_id */
|
||||||
if (frp_entry_find(osi_core, frp_id, &pos, &count) < 0) {
|
if (frp_entry_find(osi_core, frp_id, &pos, &count) < 0) {
|
||||||
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID,
|
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID,
|
||||||
"No FRP entry found\n",
|
"No FRP entry found\n",
|
||||||
OSI_NONE);
|
OSI_NONE);
|
||||||
return -1;
|
ret = -1;
|
||||||
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Parse match type and update command offset */
|
/* Parse match type and update command offset */
|
||||||
frp_parse_mtype(osi_core, cmd);
|
frp_parse_mtype(cmd);
|
||||||
|
|
||||||
/* Calculate the required FRP entries for Update Command. */
|
/* Calculate the required FRP entries for Update Command. */
|
||||||
req = frp_req_entries(cmd->offset, cmd->match_length);
|
req = frp_req_entries(cmd->offset, cmd->match_length);
|
||||||
@@ -662,7 +724,8 @@ static int frp_update(struct osi_core_priv_data *const osi_core,
|
|||||||
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID,
|
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID,
|
||||||
"Old and New required FRP entries mismatch\n",
|
"Old and New required FRP entries mismatch\n",
|
||||||
OSI_NONE);
|
OSI_NONE);
|
||||||
return -1;
|
ret = -1;
|
||||||
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Process and update FRP Command Protocal Entry */
|
/* Process and update FRP Command Protocal Entry */
|
||||||
@@ -671,7 +734,7 @@ static int frp_update(struct osi_core_priv_data *const osi_core,
|
|||||||
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL,
|
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL,
|
||||||
"Fail to parse match type\n",
|
"Fail to parse match type\n",
|
||||||
OSI_NONE);
|
OSI_NONE);
|
||||||
return ret;
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Update FRP entries */
|
/* Update FRP entries */
|
||||||
@@ -683,7 +746,7 @@ static int frp_update(struct osi_core_priv_data *const osi_core,
|
|||||||
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL,
|
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL,
|
||||||
"Fail to update FRP entry\n",
|
"Fail to update FRP entry\n",
|
||||||
OSI_NONE);
|
OSI_NONE);
|
||||||
return ret;
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Write FRP Table into HW */
|
/* Write FRP Table into HW */
|
||||||
@@ -694,6 +757,7 @@ static int frp_update(struct osi_core_priv_data *const osi_core,
|
|||||||
OSI_NONE);
|
OSI_NONE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
done:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -703,26 +767,28 @@ static int frp_update(struct osi_core_priv_data *const osi_core,
|
|||||||
* Algorithm: Parse give FRP Add command and update it on OSI data and HW.
|
* Algorithm: Parse give FRP Add command and update it on OSI data and HW.
|
||||||
*
|
*
|
||||||
* @param[in] osi_core: OSI core private data structure.
|
* @param[in] osi_core: OSI core private data structure.
|
||||||
|
* @param[in] ops_p: Core operations data structure.
|
||||||
* @param[in] cmd: OSI FRP command structure.
|
* @param[in] cmd: OSI FRP command structure.
|
||||||
*
|
*
|
||||||
* @retval 0 on success.
|
* @retval 0 on success.
|
||||||
* @retval -1 on failure.
|
* @retval -1 on failure.
|
||||||
*/
|
*/
|
||||||
static int frp_add(struct osi_core_priv_data *const osi_core,
|
static nve32_t frp_add(struct osi_core_priv_data *const osi_core,
|
||||||
struct core_ops *ops_p,
|
struct core_ops *ops_p,
|
||||||
struct osi_core_frp_cmd *const cmd)
|
struct osi_core_frp_cmd *const cmd)
|
||||||
{
|
{
|
||||||
int ret = -1;
|
nve32_t ret;
|
||||||
unsigned char pos = 0U, count = 0U;
|
nveu8_t pos = 0U, count = 0U;
|
||||||
int frp_id = cmd->frp_id;
|
nve32_t frp_id = cmd->frp_id;
|
||||||
unsigned int nve = osi_core->frp_cnt;
|
nveu32_t nve = osi_core->frp_cnt;
|
||||||
|
|
||||||
/* Check for MAX FRP entries */
|
/* Check for MAX FRP entries */
|
||||||
if (nve >= OSI_FRP_MAX_ENTRY) {
|
if (nve >= OSI_FRP_MAX_ENTRY) {
|
||||||
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_OUTOFBOUND,
|
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_OUTOFBOUND,
|
||||||
"FRP etries are full\n",
|
"FRP etries are full\n",
|
||||||
nve);
|
nve);
|
||||||
return -1;
|
ret = -1;
|
||||||
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Check the FRP entry already exists */
|
/* Check the FRP entry already exists */
|
||||||
@@ -731,23 +797,24 @@ static int frp_add(struct osi_core_priv_data *const osi_core,
|
|||||||
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID,
|
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID,
|
||||||
"FRP entry already exists\n",
|
"FRP entry already exists\n",
|
||||||
OSI_NONE);
|
OSI_NONE);
|
||||||
return -1;
|
ret = -1;
|
||||||
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Parse match type and update command offset */
|
/* Parse match type and update command offset */
|
||||||
frp_parse_mtype(osi_core, cmd);
|
frp_parse_mtype(cmd);
|
||||||
|
|
||||||
/* Process and add FRP Command Protocal Entry */
|
/* Process and add FRP Command Protocal Entry */
|
||||||
ret = frp_add_proto(osi_core, cmd, (unsigned char *)&nve);
|
ret = frp_add_proto(osi_core, cmd, (nveu8_t *)&nve);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL,
|
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL,
|
||||||
"Fail to parse match type\n",
|
"Fail to parse match type\n",
|
||||||
OSI_NONE);
|
OSI_NONE);
|
||||||
return ret;
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Add Match data FRP Entry */
|
/* Add Match data FRP Entry */
|
||||||
ret = frp_entry_add(osi_core, frp_id, (unsigned char)nve,
|
ret = frp_entry_add(osi_core, frp_id, (nveu8_t)nve,
|
||||||
cmd->match, cmd->match_length,
|
cmd->match, cmd->match_length,
|
||||||
cmd->offset, cmd->filter_mode,
|
cmd->offset, cmd->filter_mode,
|
||||||
cmd->next_frp_id, cmd->dma_sel);
|
cmd->next_frp_id, cmd->dma_sel);
|
||||||
@@ -755,7 +822,7 @@ static int frp_add(struct osi_core_priv_data *const osi_core,
|
|||||||
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL,
|
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL,
|
||||||
"Fail to add FRP entry\n",
|
"Fail to add FRP entry\n",
|
||||||
nve);
|
nve);
|
||||||
return ret;
|
goto done;
|
||||||
}
|
}
|
||||||
osi_core->frp_cnt = nve + frp_req_entries(cmd->offset,
|
osi_core->frp_cnt = nve + frp_req_entries(cmd->offset,
|
||||||
cmd->match_length);
|
cmd->match_length);
|
||||||
@@ -768,6 +835,7 @@ static int frp_add(struct osi_core_priv_data *const osi_core,
|
|||||||
OSI_NONE);
|
OSI_NONE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
done:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -777,16 +845,17 @@ static int frp_add(struct osi_core_priv_data *const osi_core,
|
|||||||
* Algorithm: Parse give FRP command and update it on OSI data and HW.
|
* Algorithm: Parse give FRP command and update it on OSI data and HW.
|
||||||
*
|
*
|
||||||
* @param[in] osi_core: OSI core private data structure.
|
* @param[in] osi_core: OSI core private data structure.
|
||||||
|
* @param[in] ops_p: Core operations data structure.
|
||||||
* @param[in] cmd: OSI FRP command structure.
|
* @param[in] cmd: OSI FRP command structure.
|
||||||
*
|
*
|
||||||
* @retval 0 on success.
|
* @retval 0 on success.
|
||||||
* @retval -1 on failure.
|
* @retval -1 on failure.
|
||||||
*/
|
*/
|
||||||
int setup_frp(struct osi_core_priv_data *const osi_core,
|
nve32_t setup_frp(struct osi_core_priv_data *const osi_core,
|
||||||
struct core_ops *ops_p,
|
struct core_ops *ops_p,
|
||||||
struct osi_core_frp_cmd *const cmd)
|
struct osi_core_frp_cmd *const cmd)
|
||||||
{
|
{
|
||||||
int ret = -1;
|
nve32_t ret = -1;
|
||||||
|
|
||||||
switch (cmd->cmd) {
|
switch (cmd->cmd) {
|
||||||
case OSI_FRP_CMD_ADD:
|
case OSI_FRP_CMD_ADD:
|
||||||
@@ -817,20 +886,3 @@ int setup_frp(struct osi_core_priv_data *const osi_core,
|
|||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief init_frp - Initialize FRP.
|
|
||||||
*
|
|
||||||
* Algorithm: Reset all the data in the FRP table Initialize FRP count to zero.
|
|
||||||
*
|
|
||||||
* @param[in] osi_core: OSI core private data structure.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
void init_frp(struct osi_core_priv_data *const osi_core)
|
|
||||||
{
|
|
||||||
/* Reset the NVE count to zero */
|
|
||||||
osi_core->frp_cnt = 0U;
|
|
||||||
/* Clear all instruction of FRP */
|
|
||||||
osi_memset(osi_core->frp_table, 0U,
|
|
||||||
(sizeof(struct osi_core_frp_entry) * OSI_FRP_MAX_ENTRY));
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -64,21 +64,20 @@
|
|||||||
* @retval 0 on success.
|
* @retval 0 on success.
|
||||||
* @retval -1 on failure.
|
* @retval -1 on failure.
|
||||||
*/
|
*/
|
||||||
int setup_frp(struct osi_core_priv_data *const osi_core,
|
nve32_t setup_frp(struct osi_core_priv_data *const osi_core,
|
||||||
struct core_ops *ops_p,
|
struct core_ops *ops_p,
|
||||||
struct osi_core_frp_cmd *const cmd);
|
struct osi_core_frp_cmd *const cmd);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief init_frp - Init the FRP Instruction Table.
|
* @brief frp_hw_write - Update HW FRP table.
|
||||||
|
*
|
||||||
|
* Algorithm: Update FRP table into HW.
|
||||||
*
|
*
|
||||||
* @param[in] osi_core: OSI core private data structure.
|
* @param[in] osi_core: OSI core private data structure.
|
||||||
*
|
*
|
||||||
* @note
|
* @retval 0 on success.
|
||||||
* 1) MAC and PHY should be init and started. see osi_start_mac()
|
|
||||||
*
|
|
||||||
* @retval 0 on success
|
|
||||||
* @retval -1 on failure.
|
* @retval -1 on failure.
|
||||||
*/
|
*/
|
||||||
void init_frp(struct osi_core_priv_data *const osi_core);
|
nve32_t frp_hw_write(struct osi_core_priv_data *const osi_core,
|
||||||
|
struct core_ops *const ops_p);
|
||||||
#endif /* FRP_H */
|
#endif /* FRP_H */
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -30,11 +30,6 @@
|
|||||||
#include "../osi/common/common.h"
|
#include "../osi/common/common.h"
|
||||||
#include "macsec.h"
|
#include "macsec.h"
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief ivc_safety_config - EQOS MAC core safety configuration
|
|
||||||
*/
|
|
||||||
static struct core_func_safety ivc_safety_config;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief ivc_handle_ioctl - marshell input argument to handle runtime command
|
* @brief ivc_handle_ioctl - marshell input argument to handle runtime command
|
||||||
*
|
*
|
||||||
@@ -55,27 +50,40 @@ static nve32_t ivc_handle_ioctl(struct osi_core_priv_data *osi_core,
|
|||||||
osi_memset(&msg, 0, sizeof(msg));
|
osi_memset(&msg, 0, sizeof(msg));
|
||||||
|
|
||||||
msg.cmd = handle_ioctl;
|
msg.cmd = handle_ioctl;
|
||||||
msg.status = osi_memcpy((void *)&msg.data.ioctl_data,
|
/* osi_memcpy is treated as void since it is
|
||||||
(void *)data,
|
* an internal functin which will be always success
|
||||||
|
*/
|
||||||
|
(void)osi_memcpy((void *)&msg.data.ioctl_data, (void *)data,
|
||||||
sizeof(struct osi_ioctl));
|
sizeof(struct osi_ioctl));
|
||||||
|
|
||||||
if (data->cmd == OSI_CMD_CONFIG_PTP) {
|
if (data->cmd == OSI_CMD_CONFIG_PTP) {
|
||||||
osi_memcpy((void *)&msg.data.ioctl_data.ptp_config,
|
(void)osi_memcpy((void *)&msg.data.ioctl_data.ptp_config,
|
||||||
(void *)&osi_core->ptp_config,
|
(void *)&osi_core->ptp_config,
|
||||||
sizeof(struct osi_ptp_config));
|
sizeof(struct osi_ptp_config));
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg));
|
ret = osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg));
|
||||||
|
|
||||||
if (data->cmd == OSI_CMD_READ_MMC) {
|
switch (data->cmd) {
|
||||||
msg.status = osi_memcpy((void *)&osi_core->mmc,
|
case OSI_CMD_READ_MMC:
|
||||||
(void *)&msg.data.mmc,
|
(void)osi_memcpy((void *)&osi_core->mmc,
|
||||||
|
(void *)&msg.data.mmc_s,
|
||||||
sizeof(struct osi_mmc_counters));
|
sizeof(struct osi_mmc_counters));
|
||||||
} else {
|
break;
|
||||||
msg.status = osi_memcpy((void *)data,
|
|
||||||
|
case OSI_CMD_READ_STATS:
|
||||||
|
(void)osi_memcpy((void *)&osi_core->stats,
|
||||||
|
(void *)&msg.data.stats_s,
|
||||||
|
sizeof(struct osi_stats));
|
||||||
|
break;
|
||||||
|
|
||||||
|
default:
|
||||||
|
(void)osi_memcpy((void *)data,
|
||||||
(void *)&msg.data.ioctl_data,
|
(void *)&msg.data.ioctl_data,
|
||||||
sizeof(struct osi_ioctl));
|
sizeof(struct osi_ioctl));
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -83,15 +91,11 @@ static nve32_t ivc_handle_ioctl(struct osi_core_priv_data *osi_core,
|
|||||||
* @brief ivc_core_init - EQOS MAC, MTL and common DMA Initialization
|
* @brief ivc_core_init - EQOS MAC, MTL and common DMA Initialization
|
||||||
*
|
*
|
||||||
* @param[in] osi_core: OSI core private data structure.
|
* @param[in] osi_core: OSI core private data structure.
|
||||||
* @param[in] tx_fifo_size: MTL TX FIFO size
|
|
||||||
* @param[in] rx_fifo_size: MTL RX FIFO size
|
|
||||||
*
|
*
|
||||||
* @retval 0 on success
|
* @retval 0 on success
|
||||||
* @retval -1 on failure.
|
* @retval -1 on failure.
|
||||||
*/
|
*/
|
||||||
static nve32_t ivc_core_init(struct osi_core_priv_data *const osi_core,
|
static nve32_t ivc_core_init(struct osi_core_priv_data *const osi_core)
|
||||||
OSI_UNUSED nveu32_t tx_fifo_size,
|
|
||||||
OSI_UNUSED nveu32_t rx_fifo_size)
|
|
||||||
{
|
{
|
||||||
ivc_msg_common_t msg;
|
ivc_msg_common_t msg;
|
||||||
|
|
||||||
@@ -117,8 +121,7 @@ static void ivc_core_deinit(struct osi_core_priv_data *const osi_core)
|
|||||||
|
|
||||||
osi_memset(&msg, 0, sizeof(msg));
|
osi_memset(&msg, 0, sizeof(msg));
|
||||||
|
|
||||||
msg.cmd = handle_ioctl;
|
msg.cmd = core_deinit;
|
||||||
msg.data.ioctl_data.cmd = OSI_CMD_STOP_MAC;
|
|
||||||
|
|
||||||
ret = osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg));
|
ret = osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg));
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
@@ -151,10 +154,10 @@ static nve32_t ivc_write_phy_reg(struct osi_core_priv_data *const osi_core,
|
|||||||
osi_memset(&msg, 0, sizeof(msg));
|
osi_memset(&msg, 0, sizeof(msg));
|
||||||
|
|
||||||
msg.cmd = write_phy_reg;
|
msg.cmd = write_phy_reg;
|
||||||
msg.data.args.arguments[index++] = phyaddr;
|
msg.args.arguments[index++] = phyaddr;
|
||||||
msg.data.args.arguments[index++] = phyreg;
|
msg.args.arguments[index++] = phyreg;
|
||||||
msg.data.args.arguments[index++] = phydata;
|
msg.args.arguments[index++] = phydata;
|
||||||
msg.data.args.count = index;
|
msg.args.count = index;
|
||||||
|
|
||||||
return osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg));
|
return osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg));
|
||||||
}
|
}
|
||||||
@@ -182,14 +185,15 @@ static nve32_t ivc_read_phy_reg(struct osi_core_priv_data *const osi_core,
|
|||||||
osi_memset(&msg, 0, sizeof(msg));
|
osi_memset(&msg, 0, sizeof(msg));
|
||||||
|
|
||||||
msg.cmd = read_phy_reg;
|
msg.cmd = read_phy_reg;
|
||||||
msg.data.args.arguments[index++] = phyaddr;
|
msg.args.arguments[index++] = phyaddr;
|
||||||
msg.data.args.arguments[index++] = phyreg;
|
msg.args.arguments[index++] = phyreg;
|
||||||
msg.data.args.count = index;
|
msg.args.count = index;
|
||||||
|
|
||||||
return osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg));
|
return osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg));
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef MACSEC_SUPPORT
|
#ifdef MACSEC_SUPPORT
|
||||||
|
#ifdef DEBUG_MACSEC
|
||||||
/**
|
/**
|
||||||
* @brief ivc_macsec_dbg_events_config - Configure Debug events
|
* @brief ivc_macsec_dbg_events_config - Configure Debug events
|
||||||
*
|
*
|
||||||
@@ -199,7 +203,7 @@ static nve32_t ivc_read_phy_reg(struct osi_core_priv_data *const osi_core,
|
|||||||
* @retval 0 on success
|
* @retval 0 on success
|
||||||
* @retval -1 on failure.
|
* @retval -1 on failure.
|
||||||
*/
|
*/
|
||||||
static int ivc_macsec_dbg_events_config(
|
static nve32_t ivc_macsec_dbg_events_config(
|
||||||
struct osi_core_priv_data *const osi_core,
|
struct osi_core_priv_data *const osi_core,
|
||||||
struct osi_macsec_dbg_buf_config *const dbg_buf_config)
|
struct osi_macsec_dbg_buf_config *const dbg_buf_config)
|
||||||
{
|
{
|
||||||
@@ -210,19 +214,19 @@ static int ivc_macsec_dbg_events_config(
|
|||||||
|
|
||||||
msg.cmd = dbg_events_config_macsec;
|
msg.cmd = dbg_events_config_macsec;
|
||||||
|
|
||||||
msg.status = osi_memcpy((void *)&msg.data.dbg_buf_config,
|
(void)osi_memcpy((void *)&msg.data.dbg_buf_config,
|
||||||
(void *)dbg_buf_config,
|
(void *)dbg_buf_config,
|
||||||
sizeof(struct osi_macsec_dbg_buf_config));
|
sizeof(struct osi_macsec_dbg_buf_config));
|
||||||
|
|
||||||
ret = osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg));
|
ret = osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg));
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
return ret;
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
msg.status = osi_memcpy((void *)dbg_buf_config,
|
(void)osi_memcpy((void *)dbg_buf_config,
|
||||||
(void *)&msg.data.dbg_buf_config,
|
(void *)&msg.data.dbg_buf_config,
|
||||||
sizeof(struct osi_macsec_dbg_buf_config));
|
sizeof(struct osi_macsec_dbg_buf_config));
|
||||||
|
done:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -235,7 +239,7 @@ static int ivc_macsec_dbg_events_config(
|
|||||||
* @retval 0 on Success
|
* @retval 0 on Success
|
||||||
* @retval -1 on Failure
|
* @retval -1 on Failure
|
||||||
*/
|
*/
|
||||||
static int ivc_macsec_dbg_buf_config(
|
static nve32_t ivc_macsec_dbg_buf_config(
|
||||||
struct osi_core_priv_data *const osi_core,
|
struct osi_core_priv_data *const osi_core,
|
||||||
struct osi_macsec_dbg_buf_config *const dbg_buf_config)
|
struct osi_macsec_dbg_buf_config *const dbg_buf_config)
|
||||||
{
|
{
|
||||||
@@ -246,21 +250,22 @@ static int ivc_macsec_dbg_buf_config(
|
|||||||
|
|
||||||
msg.cmd = dbg_buf_config_macsec;
|
msg.cmd = dbg_buf_config_macsec;
|
||||||
|
|
||||||
msg.status = osi_memcpy((void *)&msg.data.dbg_buf_config,
|
(void)osi_memcpy((void *)&msg.data.dbg_buf_config,
|
||||||
(void *)dbg_buf_config,
|
(void *)dbg_buf_config,
|
||||||
sizeof(struct osi_macsec_dbg_buf_config));
|
sizeof(struct osi_macsec_dbg_buf_config));
|
||||||
|
|
||||||
ret = osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg));
|
ret = osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg));
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
return ret;
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
msg.status = osi_memcpy((void *)dbg_buf_config,
|
(void)osi_memcpy((void *)dbg_buf_config,
|
||||||
(void *) &msg.data.dbg_buf_config,
|
(void *) &msg.data.dbg_buf_config,
|
||||||
sizeof(struct osi_macsec_dbg_buf_config));
|
sizeof(struct osi_macsec_dbg_buf_config));
|
||||||
|
done:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
#endif /* DEBUG_MACSEC */
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief macsec_read_mmc - To read statitics registers and update structure
|
* @brief macsec_read_mmc - To read statitics registers and update structure
|
||||||
@@ -284,10 +289,10 @@ static void ivc_macsec_read_mmc(struct osi_core_priv_data *const osi_core)
|
|||||||
|
|
||||||
msg.status = osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg));
|
msg.status = osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg));
|
||||||
|
|
||||||
msg.status = osi_memcpy((void *)&osi_core->macsec_mmc,
|
(void)osi_memcpy((void *)&osi_core->macsec_mmc,
|
||||||
(void *) &msg.data.macsec_mmc,
|
(void *) &msg.data.macsec_mmc,
|
||||||
sizeof(struct osi_macsec_mmc_counters));
|
sizeof(struct osi_macsec_mmc_counters));
|
||||||
msg.status = osi_memcpy((void *)&osi_core->macsec_irq_stats,
|
(void)osi_memcpy((void *)&osi_core->macsec_irq_stats,
|
||||||
(void *) &msg.data.macsec_irq_stats,
|
(void *) &msg.data.macsec_irq_stats,
|
||||||
sizeof(struct osi_macsec_irq_stats));
|
sizeof(struct osi_macsec_irq_stats));
|
||||||
}
|
}
|
||||||
@@ -296,15 +301,14 @@ static void ivc_macsec_read_mmc(struct osi_core_priv_data *const osi_core)
|
|||||||
* @brief ivc_get_sc_lut_key_index - Macsec get Key_index
|
* @brief ivc_get_sc_lut_key_index - Macsec get Key_index
|
||||||
*
|
*
|
||||||
* @param[in] osi_core: OSI Core private data structure.
|
* @param[in] osi_core: OSI Core private data structure.
|
||||||
* @param[in] sc: Secure Channel info.
|
* @param[in] sci: Secure Channel info.
|
||||||
* @param[in] enable: enable or disable.
|
* @param[out] key_index: Key table index to program SAK.
|
||||||
* @param[in] ctlr: Controller instance.
|
* @param[in] ctlr: Controller instance.
|
||||||
* @param[[out] kt_idx: Key table index to program SAK.
|
|
||||||
*
|
*
|
||||||
* @retval 0 on Success
|
* @retval 0 on Success
|
||||||
* @retval -1 on Failure
|
* @retval -1 on Failure
|
||||||
*/
|
*/
|
||||||
static int ivc_get_sc_lut_key_index(struct osi_core_priv_data *const osi_core,
|
static nve32_t ivc_get_sc_lut_key_index(struct osi_core_priv_data *const osi_core,
|
||||||
nveu8_t *sci, nveu32_t *key_index,
|
nveu8_t *sci, nveu32_t *key_index,
|
||||||
nveu16_t ctlr)
|
nveu16_t ctlr)
|
||||||
{
|
{
|
||||||
@@ -314,17 +318,16 @@ static int ivc_get_sc_lut_key_index(struct osi_core_priv_data *const osi_core,
|
|||||||
osi_memset(&msg, 0, sizeof(msg));
|
osi_memset(&msg, 0, sizeof(msg));
|
||||||
|
|
||||||
msg.cmd = macsec_get_sc_lut_key_index;
|
msg.cmd = macsec_get_sc_lut_key_index;
|
||||||
msg.status = osi_memcpy((void *) &msg.data.macsec_cfg.sci,
|
(void)osi_memcpy((void *) &msg.data.macsec_cfg.sci,
|
||||||
(void *)sci,
|
(void *)sci,
|
||||||
OSI_SCI_LEN);
|
OSI_SCI_LEN);
|
||||||
msg.data.macsec_cfg.ctlr = ctlr;
|
msg.data.macsec_cfg.ctlr = ctlr;
|
||||||
|
|
||||||
ret = osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg));
|
ret = osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg));
|
||||||
if (ret != 0) {
|
if (ret == 0) {
|
||||||
return ret;
|
*key_index = msg.data.macsec_cfg.key_index;
|
||||||
}
|
}
|
||||||
|
|
||||||
*key_index = msg.data.macsec_cfg.key_index;
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -335,15 +338,15 @@ static int ivc_get_sc_lut_key_index(struct osi_core_priv_data *const osi_core,
|
|||||||
* @param[in] sc: Secure Channel info.
|
* @param[in] sc: Secure Channel info.
|
||||||
* @param[in] enable: enable or disable.
|
* @param[in] enable: enable or disable.
|
||||||
* @param[in] ctlr: Controller instance.
|
* @param[in] ctlr: Controller instance.
|
||||||
* @param[[out] kt_idx: Key table index to program SAK.
|
* @param[out] kt_idx: Key table index to program SAK.
|
||||||
*
|
*
|
||||||
* @retval 0 on Success
|
* @retval 0 on Success
|
||||||
* @retval -1 on Failure
|
* @retval -1 on Failure
|
||||||
*/
|
*/
|
||||||
static int ivc_macsec_config(struct osi_core_priv_data *const osi_core,
|
static nve32_t ivc_macsec_config(struct osi_core_priv_data *const osi_core,
|
||||||
struct osi_macsec_sc_info *const sc,
|
struct osi_macsec_sc_info *const sc,
|
||||||
unsigned int enable, unsigned short ctlr,
|
nveu32_t enable, nveu16_t ctlr,
|
||||||
unsigned short *kt_idx)
|
nveu16_t *kt_idx)
|
||||||
{
|
{
|
||||||
ivc_msg_common_t msg;
|
ivc_msg_common_t msg;
|
||||||
nve32_t ret = 0;
|
nve32_t ret = 0;
|
||||||
@@ -351,7 +354,7 @@ static int ivc_macsec_config(struct osi_core_priv_data *const osi_core,
|
|||||||
osi_memset(&msg, 0, sizeof(msg));
|
osi_memset(&msg, 0, sizeof(msg));
|
||||||
|
|
||||||
msg.cmd = config_macsec;
|
msg.cmd = config_macsec;
|
||||||
msg.status = osi_memcpy((void *) &msg.data.macsec_cfg.sc_info,
|
(void)osi_memcpy((void *) &msg.data.macsec_cfg.sc_info,
|
||||||
(void *)sc,
|
(void *)sc,
|
||||||
sizeof(struct osi_macsec_sc_info));
|
sizeof(struct osi_macsec_sc_info));
|
||||||
msg.data.macsec_cfg.enable = enable;
|
msg.data.macsec_cfg.enable = enable;
|
||||||
@@ -360,38 +363,14 @@ static int ivc_macsec_config(struct osi_core_priv_data *const osi_core,
|
|||||||
|
|
||||||
ret = osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg));
|
ret = osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg));
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
return ret;
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
*kt_idx = msg.data.macsec_cfg.kt_idx;
|
*kt_idx = msg.data.macsec_cfg.kt_idx;
|
||||||
|
done:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief ivc_macsec_update_mtu - Update MACSEC mtu.
|
|
||||||
*
|
|
||||||
* @param[in] osi_core: OSI Core private data structure.
|
|
||||||
* @param[in] mtu: MACSEC MTU len.
|
|
||||||
*
|
|
||||||
* @retval 0 on Success
|
|
||||||
* @retval -1 on Failure
|
|
||||||
*/
|
|
||||||
static nve32_t ivc_macsec_update_mtu(struct osi_core_priv_data *const osi_core,
|
|
||||||
nveu32_t mtu)
|
|
||||||
{
|
|
||||||
ivc_msg_common_t msg;
|
|
||||||
nveu32_t index = 0;
|
|
||||||
|
|
||||||
osi_memset(&msg, 0, sizeof(msg));
|
|
||||||
|
|
||||||
msg.cmd = macsec_update_mtu_size;
|
|
||||||
msg.data.args.arguments[index] = mtu;
|
|
||||||
index++;
|
|
||||||
msg.data.args.count = index;
|
|
||||||
|
|
||||||
return osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg));
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief ivc_macsec_enable - Enable or disable Macsec.
|
* @brief ivc_macsec_enable - Enable or disable Macsec.
|
||||||
*
|
*
|
||||||
@@ -401,8 +380,8 @@ static nve32_t ivc_macsec_update_mtu(struct osi_core_priv_data *const osi_core,
|
|||||||
* @retval 0 on Success
|
* @retval 0 on Success
|
||||||
* @retval -1 on Failure
|
* @retval -1 on Failure
|
||||||
*/
|
*/
|
||||||
static int ivc_macsec_enable(struct osi_core_priv_data *const osi_core,
|
static nve32_t ivc_macsec_enable(struct osi_core_priv_data *const osi_core,
|
||||||
unsigned int enable)
|
nveu32_t enable)
|
||||||
{
|
{
|
||||||
ivc_msg_common_t msg;
|
ivc_msg_common_t msg;
|
||||||
nveu32_t index = 0;
|
nveu32_t index = 0;
|
||||||
@@ -410,13 +389,14 @@ static int ivc_macsec_enable(struct osi_core_priv_data *const osi_core,
|
|||||||
osi_memset(&msg, 0, sizeof(msg));
|
osi_memset(&msg, 0, sizeof(msg));
|
||||||
|
|
||||||
msg.cmd = en_macsec;
|
msg.cmd = en_macsec;
|
||||||
msg.data.args.arguments[index] = enable;
|
msg.args.arguments[index] = enable;
|
||||||
index++;
|
index++;
|
||||||
msg.data.args.count = index;
|
msg.args.count = index;
|
||||||
|
|
||||||
return osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg));
|
return osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef DEBUG_MACSEC
|
||||||
/**
|
/**
|
||||||
* @brief ivc_macsec_loopback_config - Loopback configure.
|
* @brief ivc_macsec_loopback_config - Loopback configure.
|
||||||
*
|
*
|
||||||
@@ -426,8 +406,8 @@ static int ivc_macsec_enable(struct osi_core_priv_data *const osi_core,
|
|||||||
* @retval 0 on Success
|
* @retval 0 on Success
|
||||||
* @retval -1 on Failure
|
* @retval -1 on Failure
|
||||||
*/
|
*/
|
||||||
static int ivc_macsec_loopback_config(struct osi_core_priv_data *const osi_core,
|
static nve32_t ivc_macsec_loopback_config(struct osi_core_priv_data *const osi_core,
|
||||||
unsigned int enable)
|
nveu32_t enable)
|
||||||
{
|
{
|
||||||
ivc_msg_common_t msg;
|
ivc_msg_common_t msg;
|
||||||
nveu32_t index = 0;
|
nveu32_t index = 0;
|
||||||
@@ -435,12 +415,13 @@ static int ivc_macsec_loopback_config(struct osi_core_priv_data *const osi_core,
|
|||||||
osi_memset(&msg, 0, sizeof(msg));
|
osi_memset(&msg, 0, sizeof(msg));
|
||||||
|
|
||||||
msg.cmd = loopback_config_macsec;
|
msg.cmd = loopback_config_macsec;
|
||||||
msg.data.args.arguments[index] = enable;
|
msg.args.arguments[index] = enable;
|
||||||
index++;
|
index++;
|
||||||
msg.data.args.count = index;
|
msg.args.count = index;
|
||||||
|
|
||||||
return osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg));
|
return osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg));
|
||||||
}
|
}
|
||||||
|
#endif /* DEBUG_MACSEC */
|
||||||
|
|
||||||
#ifdef MACSEC_KEY_PROGRAM
|
#ifdef MACSEC_KEY_PROGRAM
|
||||||
/**
|
/**
|
||||||
@@ -461,7 +442,7 @@ static nve32_t ivc_macsec_kt_config(struct osi_core_priv_data *const osi_core,
|
|||||||
osi_memset(&msg, 0, sizeof(msg));
|
osi_memset(&msg, 0, sizeof(msg));
|
||||||
|
|
||||||
msg.cmd = kt_config_macsec;
|
msg.cmd = kt_config_macsec;
|
||||||
msg.status = osi_memcpy((void *) &msg.data.kt_config,
|
(void)osi_memcpy((void *) &msg.data.kt_config,
|
||||||
(void *)kt_config,
|
(void *)kt_config,
|
||||||
sizeof(struct osi_macsec_kt_config));
|
sizeof(struct osi_macsec_kt_config));
|
||||||
|
|
||||||
@@ -470,7 +451,7 @@ static nve32_t ivc_macsec_kt_config(struct osi_core_priv_data *const osi_core,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
msg.status = osi_memcpy((void *)kt_config,
|
(void)osi_memcpy((void *)kt_config,
|
||||||
(void *)&msg.data.kt_config,
|
(void *)&msg.data.kt_config,
|
||||||
sizeof(struct osi_macsec_kt_config));
|
sizeof(struct osi_macsec_kt_config));
|
||||||
return ret;
|
return ret;
|
||||||
@@ -486,8 +467,8 @@ static nve32_t ivc_macsec_kt_config(struct osi_core_priv_data *const osi_core,
|
|||||||
* @retval 0 on Success
|
* @retval 0 on Success
|
||||||
* @retval -1 on Failure
|
* @retval -1 on Failure
|
||||||
*/
|
*/
|
||||||
static int ivc_macsec_cipher_config(struct osi_core_priv_data *const osi_core,
|
static nve32_t ivc_macsec_cipher_config(struct osi_core_priv_data *const osi_core,
|
||||||
unsigned int cipher)
|
nveu32_t cipher)
|
||||||
{
|
{
|
||||||
ivc_msg_common_t msg;
|
ivc_msg_common_t msg;
|
||||||
nveu32_t index = 0;
|
nveu32_t index = 0;
|
||||||
@@ -495,9 +476,9 @@ static int ivc_macsec_cipher_config(struct osi_core_priv_data *const osi_core,
|
|||||||
osi_memset(&msg, 0, sizeof(msg));
|
osi_memset(&msg, 0, sizeof(msg));
|
||||||
|
|
||||||
msg.cmd = cipher_config;
|
msg.cmd = cipher_config;
|
||||||
msg.data.args.arguments[index] = cipher;
|
msg.args.arguments[index] = cipher;
|
||||||
index++;
|
index++;
|
||||||
msg.data.args.count = index;
|
msg.args.count = index;
|
||||||
|
|
||||||
return osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg));
|
return osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg));
|
||||||
}
|
}
|
||||||
@@ -519,42 +500,29 @@ static nve32_t ivc_macsec_lut_config(struct osi_core_priv_data *const osi_core,
|
|||||||
osi_memset(&msg, 0, sizeof(msg));
|
osi_memset(&msg, 0, sizeof(msg));
|
||||||
|
|
||||||
msg.cmd = lut_config_macsec;
|
msg.cmd = lut_config_macsec;
|
||||||
msg.status = osi_memcpy((void *) &msg.data.lut_config,
|
(void)osi_memcpy((void *) &msg.data.lut_config,
|
||||||
(void *)lut_config,
|
(void *)lut_config,
|
||||||
sizeof(struct osi_macsec_lut_config));
|
sizeof(struct osi_macsec_lut_config));
|
||||||
|
|
||||||
ret = osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg));
|
ret = osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg));
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
return ret;
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
msg.status = osi_memcpy((void *)lut_config,
|
(void)osi_memcpy((void *)lut_config,
|
||||||
(void *)&msg.data.lut_config,
|
(void *)&msg.data.lut_config,
|
||||||
sizeof(struct osi_macsec_lut_config));
|
sizeof(struct osi_macsec_lut_config));
|
||||||
|
done:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief ivc_macsec_handle_s_irq - handle s irq.
|
* @brief ivc_macsec_handle_irq - handle macsec irq.
|
||||||
*
|
*
|
||||||
* @param[in] osi_core: OSI Core private data structure.
|
* @param[in] osi_core: OSI Core private data structure.
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
static void ivc_macsec_handle_s_irq(OSI_UNUSED
|
static void ivc_macsec_handle_irq(OSI_UNUSED
|
||||||
struct osi_core_priv_data *const osi_core)
|
|
||||||
{
|
|
||||||
OSI_CORE_INFO(osi_core->osd, OSI_LOG_ARG_INVALID,
|
|
||||||
"Nothing to handle \n", 0ULL);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief ivc_macsec_handle_ns_irq - handle ns irq.
|
|
||||||
*
|
|
||||||
* @param[in] osi_core: OSI Core private data structure.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
static void ivc_macsec_handle_ns_irq(OSI_UNUSED
|
|
||||||
struct osi_core_priv_data *const osi_core)
|
struct osi_core_priv_data *const osi_core)
|
||||||
{
|
{
|
||||||
OSI_CORE_INFO(osi_core->osd, OSI_LOG_ARG_INVALID,
|
OSI_CORE_INFO(osi_core->osd, OSI_LOG_ARG_INVALID,
|
||||||
@@ -570,7 +538,7 @@ static void ivc_macsec_handle_ns_irq(OSI_UNUSED
|
|||||||
* @retval -1 on Failure
|
* @retval -1 on Failure
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static int ivc_macsec_deinit(struct osi_core_priv_data *const osi_core)
|
static nve32_t ivc_macsec_deinit(struct osi_core_priv_data *const osi_core)
|
||||||
{
|
{
|
||||||
ivc_msg_common_t msg;
|
ivc_msg_common_t msg;
|
||||||
|
|
||||||
@@ -585,12 +553,12 @@ static int ivc_macsec_deinit(struct osi_core_priv_data *const osi_core)
|
|||||||
* @brief ivc_macsec_init -Initialize.
|
* @brief ivc_macsec_init -Initialize.
|
||||||
*
|
*
|
||||||
* @param[in] osi_core: OSI Core private data structure.
|
* @param[in] osi_core: OSI Core private data structure.
|
||||||
* @param[in] genl_info: Generic netlink information structure.
|
* @param[in] mtu: mtu to be set.
|
||||||
*
|
*
|
||||||
* @retval 0 on Success
|
* @retval 0 on Success
|
||||||
* @retval -1 on Failure
|
* @retval -1 on Failure
|
||||||
*/
|
*/
|
||||||
static int ivc_macsec_init(struct osi_core_priv_data *const osi_core,
|
static nve32_t ivc_macsec_init(struct osi_core_priv_data *const osi_core,
|
||||||
nveu32_t mtu)
|
nveu32_t mtu)
|
||||||
{
|
{
|
||||||
ivc_msg_common_t msg;
|
ivc_msg_common_t msg;
|
||||||
@@ -599,9 +567,9 @@ static int ivc_macsec_init(struct osi_core_priv_data *const osi_core,
|
|||||||
osi_memset(&msg, 0, sizeof(msg));
|
osi_memset(&msg, 0, sizeof(msg));
|
||||||
|
|
||||||
msg.cmd = init_macsec;
|
msg.cmd = init_macsec;
|
||||||
msg.data.args.arguments[index] = mtu;
|
msg.args.arguments[index] = mtu;
|
||||||
index++;
|
index++;
|
||||||
msg.data.args.count = index;
|
msg.args.count = index;
|
||||||
|
|
||||||
return osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg));
|
return osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg));
|
||||||
}
|
}
|
||||||
@@ -621,32 +589,24 @@ void ivc_init_macsec_ops(void *macsecops)
|
|||||||
|
|
||||||
ops->init = ivc_macsec_init;
|
ops->init = ivc_macsec_init;
|
||||||
ops->deinit = ivc_macsec_deinit;
|
ops->deinit = ivc_macsec_deinit;
|
||||||
ops->handle_ns_irq = ivc_macsec_handle_ns_irq;
|
ops->handle_irq = ivc_macsec_handle_irq;
|
||||||
ops->handle_s_irq = ivc_macsec_handle_s_irq;
|
|
||||||
ops->lut_config = ivc_macsec_lut_config;
|
ops->lut_config = ivc_macsec_lut_config;
|
||||||
#ifdef MACSEC_KEY_PROGRAM
|
#ifdef MACSEC_KEY_PROGRAM
|
||||||
ops->kt_config = ivc_macsec_kt_config;
|
ops->kt_config = ivc_macsec_kt_config;
|
||||||
#endif /* MACSEC_KEY_PROGRAM */
|
#endif /* MACSEC_KEY_PROGRAM */
|
||||||
ops->cipher_config = ivc_macsec_cipher_config;
|
ops->cipher_config = ivc_macsec_cipher_config;
|
||||||
ops->loopback_config = ivc_macsec_loopback_config;
|
|
||||||
ops->macsec_en = ivc_macsec_enable;
|
ops->macsec_en = ivc_macsec_enable;
|
||||||
ops->config = ivc_macsec_config;
|
ops->config = ivc_macsec_config;
|
||||||
ops->read_mmc = ivc_macsec_read_mmc;
|
ops->read_mmc = ivc_macsec_read_mmc;
|
||||||
ops->dbg_buf_config = ivc_macsec_dbg_buf_config;
|
#ifdef DEBUG_MACSEC
|
||||||
|
ops->loopback_config = ivc_macsec_loopback_config;
|
||||||
ops->dbg_events_config = ivc_macsec_dbg_events_config;
|
ops->dbg_events_config = ivc_macsec_dbg_events_config;
|
||||||
|
ops->dbg_buf_config = ivc_macsec_dbg_buf_config;
|
||||||
|
#endif /* DEBUG_MACSEC */
|
||||||
ops->get_sc_lut_key_index = ivc_get_sc_lut_key_index;
|
ops->get_sc_lut_key_index = ivc_get_sc_lut_key_index;
|
||||||
ops->update_mtu = ivc_macsec_update_mtu;
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief ivc_get_core_safety_config - EQOS MAC safety configuration
|
|
||||||
*/
|
|
||||||
void *ivc_get_core_safety_config(void)
|
|
||||||
{
|
|
||||||
return &ivc_safety_config;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief vir_ivc_core_deinit - MAC core deinitialization
|
* @brief vir_ivc_core_deinit - MAC core deinitialization
|
||||||
*
|
*
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -48,8 +48,10 @@
|
|||||||
* @brief MACsec controller register offsets
|
* @brief MACsec controller register offsets
|
||||||
* @{
|
* @{
|
||||||
*/
|
*/
|
||||||
|
#ifdef MACSEC_KEY_PROGRAM
|
||||||
#define MACSEC_GCM_KEYTABLE_CONFIG 0x0000
|
#define MACSEC_GCM_KEYTABLE_CONFIG 0x0000
|
||||||
#define MACSEC_GCM_KEYTABLE_DATA(x) ((0x0004U) + ((x) * 4U))
|
#define MACSEC_GCM_KEYTABLE_DATA(x) ((0x0004U) + ((x) * 4U))
|
||||||
|
#endif /* MACSEC_KEY_PROGRAM */
|
||||||
#define MACSEC_RX_ICV_ERR_CNTRL 0x4000
|
#define MACSEC_RX_ICV_ERR_CNTRL 0x4000
|
||||||
#define MACSEC_INTERRUPT_COMMON_SR 0x4004
|
#define MACSEC_INTERRUPT_COMMON_SR 0x4004
|
||||||
#define MACSEC_TX_IMR 0x4008
|
#define MACSEC_TX_IMR 0x4008
|
||||||
@@ -89,7 +91,6 @@
|
|||||||
#define MACSEC_TX_SCI_LUT_VALID 0xD028
|
#define MACSEC_TX_SCI_LUT_VALID 0xD028
|
||||||
#define MACSEC_RX_BYP_LUT_VALID 0xD02C
|
#define MACSEC_RX_BYP_LUT_VALID 0xD02C
|
||||||
#define MACSEC_RX_SCI_LUT_VALID 0xD030
|
#define MACSEC_RX_SCI_LUT_VALID 0xD030
|
||||||
|
|
||||||
#define MACSEC_COMMON_IMR 0xD054
|
#define MACSEC_COMMON_IMR 0xD054
|
||||||
#define MACSEC_COMMON_ISR 0xD058
|
#define MACSEC_COMMON_ISR 0xD058
|
||||||
#define MACSEC_TX_SC_KEY_INVALID_STS0_0 0xD064
|
#define MACSEC_TX_SC_KEY_INVALID_STS0_0 0xD064
|
||||||
@@ -97,14 +98,16 @@
|
|||||||
#define MACSEC_RX_SC_KEY_INVALID_STS0_0 0xD080
|
#define MACSEC_RX_SC_KEY_INVALID_STS0_0 0xD080
|
||||||
#define MACSEC_RX_SC_KEY_INVALID_STS1_0 0xD084
|
#define MACSEC_RX_SC_KEY_INVALID_STS1_0 0xD084
|
||||||
|
|
||||||
#define MACSEC_TX_DEBUG_CONTROL_0 0xD098
|
|
||||||
#define MACSEC_TX_DEBUG_TRIGGER_EN_0 0xD09C
|
|
||||||
#define MACSEC_TX_DEBUG_STATUS_0 0xD0C4
|
#define MACSEC_TX_DEBUG_STATUS_0 0xD0C4
|
||||||
|
#define MACSEC_TX_DEBUG_TRIGGER_EN_0 0xD09C
|
||||||
|
#define MACSEC_RX_DEBUG_STATUS_0 0xD0F8
|
||||||
|
#define MACSEC_RX_DEBUG_TRIGGER_EN_0 0xD0E0
|
||||||
|
#ifdef DEBUG_MACSEC
|
||||||
|
#define MACSEC_TX_DEBUG_CONTROL_0 0xD098
|
||||||
#define MACSEC_DEBUG_BUF_CONFIG_0 0xD0C8
|
#define MACSEC_DEBUG_BUF_CONFIG_0 0xD0C8
|
||||||
#define MACSEC_DEBUG_BUF_DATA_0(x) ((0xD0CCU) + ((x) * 4U))
|
#define MACSEC_DEBUG_BUF_DATA_0(x) ((0xD0CCU) + ((x) * 4U))
|
||||||
#define MACSEC_RX_DEBUG_CONTROL_0 0xD0DC
|
#define MACSEC_RX_DEBUG_CONTROL_0 0xD0DC
|
||||||
#define MACSEC_RX_DEBUG_TRIGGER_EN_0 0xD0E0
|
#endif /* DEBUG_MACSEC */
|
||||||
#define MACSEC_RX_DEBUG_STATUS_0 0xD0F8
|
|
||||||
|
|
||||||
#define MACSEC_CONTROL1 0xE000
|
#define MACSEC_CONTROL1 0xE000
|
||||||
#define MACSEC_GCM_AES_CONTROL_0 0xE004
|
#define MACSEC_GCM_AES_CONTROL_0 0xE004
|
||||||
@@ -114,6 +117,7 @@
|
|||||||
#define MACSEC_RX_SOT_DELAY 0xE01C
|
#define MACSEC_RX_SOT_DELAY 0xE01C
|
||||||
/** @} */
|
/** @} */
|
||||||
|
|
||||||
|
#ifdef MACSEC_KEY_PROGRAM
|
||||||
/**
|
/**
|
||||||
* @addtogroup MACSEC_GCM_KEYTABLE_CONFIG register
|
* @addtogroup MACSEC_GCM_KEYTABLE_CONFIG register
|
||||||
*
|
*
|
||||||
@@ -138,6 +142,7 @@
|
|||||||
#define MACSEC_KT_DATA_REG_SAK_CNT 8U
|
#define MACSEC_KT_DATA_REG_SAK_CNT 8U
|
||||||
#define MACSEC_KT_DATA_REG_H_CNT 4U
|
#define MACSEC_KT_DATA_REG_H_CNT 4U
|
||||||
/** @} */
|
/** @} */
|
||||||
|
#endif /* MACSEC_KEY_PROGRAM */
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @addtogroup MACSEC_LUT_CONFIG register
|
* @addtogroup MACSEC_LUT_CONFIG register
|
||||||
@@ -188,7 +193,9 @@
|
|||||||
* @brief Bit definitions of MACSEC_CONTROL1 register
|
* @brief Bit definitions of MACSEC_CONTROL1 register
|
||||||
* @{
|
* @{
|
||||||
*/
|
*/
|
||||||
|
#ifdef DEBUG_MACSEC
|
||||||
#define MACSEC_LOOPBACK_MODE_EN OSI_BIT(31)
|
#define MACSEC_LOOPBACK_MODE_EN OSI_BIT(31)
|
||||||
|
#endif /* DEBUG_MACSEC */
|
||||||
#define MACSEC_RX_MTU_CHECK_EN OSI_BIT(16)
|
#define MACSEC_RX_MTU_CHECK_EN OSI_BIT(16)
|
||||||
#define MACSEC_TX_LUT_PRIO_BYP OSI_BIT(2)
|
#define MACSEC_TX_LUT_PRIO_BYP OSI_BIT(2)
|
||||||
#define MACSEC_TX_MTU_CHECK_EN OSI_BIT(0)
|
#define MACSEC_TX_MTU_CHECK_EN OSI_BIT(0)
|
||||||
@@ -215,10 +222,12 @@
|
|||||||
* @{
|
* @{
|
||||||
*/
|
*/
|
||||||
#define MACSEC_SECURE_REG_VIOL_INT_EN OSI_BIT(31)
|
#define MACSEC_SECURE_REG_VIOL_INT_EN OSI_BIT(31)
|
||||||
|
#ifdef DEBUG_MACSEC
|
||||||
#define MACSEC_RX_UNINIT_KEY_SLOT_INT_EN OSI_BIT(17)
|
#define MACSEC_RX_UNINIT_KEY_SLOT_INT_EN OSI_BIT(17)
|
||||||
#define MACSEC_RX_LKUP_MISS_INT_EN OSI_BIT(16)
|
#define MACSEC_RX_LKUP_MISS_INT_EN OSI_BIT(16)
|
||||||
#define MACSEC_TX_UNINIT_KEY_SLOT_INT_EN OSI_BIT(1)
|
#define MACSEC_TX_UNINIT_KEY_SLOT_INT_EN OSI_BIT(1)
|
||||||
#define MACSEC_TX_LKUP_MISS_INT_EN OSI_BIT(0)
|
#define MACSEC_TX_LKUP_MISS_INT_EN OSI_BIT(0)
|
||||||
|
#endif /* DEBUG_MACSEC */
|
||||||
/** @} */
|
/** @} */
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -227,11 +236,12 @@
|
|||||||
* @brief Bit definitions of TX_INTERRUPT_MASK register
|
* @brief Bit definitions of TX_INTERRUPT_MASK register
|
||||||
* @{
|
* @{
|
||||||
*/
|
*/
|
||||||
|
#define MACSEC_TX_MAC_CRC_ERROR_INT_EN OSI_BIT(16)
|
||||||
|
#ifdef DEBUG_MACSEC
|
||||||
#define MACSEC_TX_DBG_BUF_CAPTURE_DONE_INT_EN OSI_BIT(22)
|
#define MACSEC_TX_DBG_BUF_CAPTURE_DONE_INT_EN OSI_BIT(22)
|
||||||
#define MACSEC_TX_MTU_CHECK_FAIL_INT_EN OSI_BIT(19)
|
#define MACSEC_TX_MTU_CHECK_FAIL_INT_EN OSI_BIT(19)
|
||||||
#define MACSEC_TX_AES_GCM_BUF_OVF_INT_EN OSI_BIT(18)
|
#define MACSEC_TX_AES_GCM_BUF_OVF_INT_EN OSI_BIT(18)
|
||||||
#define MACSEC_TX_SC_AN_NOT_VALID_INT_EN OSI_BIT(17)
|
#define MACSEC_TX_SC_AN_NOT_VALID_INT_EN OSI_BIT(17)
|
||||||
#define MACSEC_TX_MAC_CRC_ERROR_INT_EN OSI_BIT(16)
|
|
||||||
#define MACSEC_TX_PN_EXHAUSTED_INT_EN OSI_BIT(1)
|
#define MACSEC_TX_PN_EXHAUSTED_INT_EN OSI_BIT(1)
|
||||||
#define MACSEC_TX_PN_THRSHLD_RCHD_INT_EN OSI_BIT(0)
|
#define MACSEC_TX_PN_THRSHLD_RCHD_INT_EN OSI_BIT(0)
|
||||||
/** @} */
|
/** @} */
|
||||||
@@ -243,12 +253,13 @@
|
|||||||
* @{
|
* @{
|
||||||
*/
|
*/
|
||||||
#define MACSEC_RX_DBG_BUF_CAPTURE_DONE_INT_EN OSI_BIT(22)
|
#define MACSEC_RX_DBG_BUF_CAPTURE_DONE_INT_EN OSI_BIT(22)
|
||||||
#define MACSEC_RX_ICV_ERROR_INT_EN OSI_BIT(21)
|
|
||||||
#define RX_REPLAY_ERROR_INT_EN OSI_BIT(20)
|
#define RX_REPLAY_ERROR_INT_EN OSI_BIT(20)
|
||||||
#define MACSEC_RX_MTU_CHECK_FAIL_INT_EN OSI_BIT(19)
|
#define MACSEC_RX_MTU_CHECK_FAIL_INT_EN OSI_BIT(19)
|
||||||
#define MACSEC_RX_AES_GCM_BUF_OVF_INT_EN OSI_BIT(18)
|
#define MACSEC_RX_AES_GCM_BUF_OVF_INT_EN OSI_BIT(18)
|
||||||
#define MACSEC_RX_MAC_CRC_ERROR_INT_EN OSI_BIT(16)
|
|
||||||
#define MACSEC_RX_PN_EXHAUSTED_INT_EN OSI_BIT(1)
|
#define MACSEC_RX_PN_EXHAUSTED_INT_EN OSI_BIT(1)
|
||||||
|
#endif /* DEBUG_MACSEC */
|
||||||
|
#define MACSEC_RX_ICV_ERROR_INT_EN OSI_BIT(21)
|
||||||
|
#define MACSEC_RX_MAC_CRC_ERROR_INT_EN OSI_BIT(16)
|
||||||
/** @} */
|
/** @} */
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -264,6 +275,16 @@
|
|||||||
#define MACSEC_TX_LKUP_MISS OSI_BIT(0)
|
#define MACSEC_TX_LKUP_MISS OSI_BIT(0)
|
||||||
/** @} */
|
/** @} */
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @addtogroup MACSEC_STATS_CONTROL_0 register
|
||||||
|
*
|
||||||
|
* @brief Bit definitions of MACSEC_STATS_CONTROL_0 register
|
||||||
|
* @{
|
||||||
|
*/
|
||||||
|
#define MACSEC_STATS_CONTROL0_CNT_RL_OVR_CPY OSI_BIT(1)
|
||||||
|
/** @} */
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @addtogroup MACSEC_TX_ISR register
|
* @addtogroup MACSEC_TX_ISR register
|
||||||
*
|
*
|
||||||
@@ -294,15 +315,7 @@
|
|||||||
#define MACSEC_RX_PN_EXHAUSTED OSI_BIT(1)
|
#define MACSEC_RX_PN_EXHAUSTED OSI_BIT(1)
|
||||||
/** @} */
|
/** @} */
|
||||||
|
|
||||||
/**
|
#ifdef DEBUG_MACSEC
|
||||||
* @addtogroup MACSEC_STATS_CONTROL_0 register
|
|
||||||
*
|
|
||||||
* @brief Bit definitions of MACSEC_STATS_CONTROL_0 register
|
|
||||||
* @{
|
|
||||||
*/
|
|
||||||
#define MACSEC_STATS_CONTROL0_CNT_RL_OVR_CPY OSI_BIT(1)
|
|
||||||
/** @} */
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @addtogroup MACSEC_DEBUG_BUF_CONFIG_0 register
|
* @addtogroup MACSEC_DEBUG_BUF_CONFIG_0 register
|
||||||
*
|
*
|
||||||
@@ -361,21 +374,14 @@
|
|||||||
*/
|
*/
|
||||||
#define MACSEC_RX_DEBUG_CONTROL_0_START_CAP OSI_BIT(31)
|
#define MACSEC_RX_DEBUG_CONTROL_0_START_CAP OSI_BIT(31)
|
||||||
/** @} */
|
/** @} */
|
||||||
|
#endif /* DEBUG_MACSEC */
|
||||||
|
|
||||||
#define MTU_LENGTH_MASK 0xFFFFU
|
#define MTU_LENGTH_MASK 0xFFFFU
|
||||||
#define SOT_LENGTH_MASK 0xFFU
|
#define SOT_LENGTH_MASK 0xFFU
|
||||||
#define EQOS_MACSEC_SOT_DELAY 0x4EU
|
#define EQOS_MACSEC_SOT_DELAY 0x4EU
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @addtogroup TX/RX_BYP/SCI_LUT_VALID register
|
* @addtogroup MACSEC-LUT TX/RX LUT bit fields in LUT_DATA registers
|
||||||
*
|
|
||||||
* @brief Bit definitions of LUT_VALID registers
|
|
||||||
* @{
|
|
||||||
*/
|
|
||||||
/** @} */
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @addtogroup TX/RX LUT bit fields in LUT_DATA registers
|
|
||||||
*
|
*
|
||||||
* @brief Helper macros for LUT data programming
|
* @brief Helper macros for LUT data programming
|
||||||
* @{
|
* @{
|
||||||
@@ -439,8 +445,21 @@
|
|||||||
#define MACSEC_RX_SCI_LUT_PREEMPT_INACTIVE OSI_BIT(9)
|
#define MACSEC_RX_SCI_LUT_PREEMPT_INACTIVE OSI_BIT(9)
|
||||||
/** @} */
|
/** @} */
|
||||||
|
|
||||||
|
#ifdef DEBUG_MACSEC
|
||||||
/* debug buffer data read/write length */
|
/* debug buffer data read/write length */
|
||||||
#define DBG_BUF_LEN 4U
|
#define DBG_BUF_LEN 4U
|
||||||
|
#endif /* DEBUG_MACSEC */
|
||||||
|
#ifdef MACSEC_KEY_PROGRAM
|
||||||
#define INTEGER_LEN 4U
|
#define INTEGER_LEN 4U
|
||||||
|
#endif /* MACSEC_KEY_PROGRAM */
|
||||||
|
|
||||||
|
#ifdef HSI_SUPPORT
|
||||||
|
/* Set RX ISR set interrupt status bit */
|
||||||
|
#define MACSEC_RX_ISR_SET 0x4050U
|
||||||
|
/* Set TX ISR set interrupt status bit */
|
||||||
|
#define MACSEC_TX_ISR_SET 0x4010U
|
||||||
|
/* Set Common ISR set interrupt status bit */
|
||||||
|
#define MACSEC_COMMON_ISR_SET 0xd05cU
|
||||||
|
#endif
|
||||||
|
|
||||||
#endif /* INCLUDED_MACSEC_H */
|
#endif /* INCLUDED_MACSEC_H */
|
||||||
|
|||||||
3863
osi/core/mgbe_core.c
3863
osi/core/mgbe_core.c
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -27,7 +27,7 @@
|
|||||||
#include "mgbe_core.h"
|
#include "mgbe_core.h"
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief update_mmc_val - function to read register and return value to callee
|
* @brief mgbe_update_mmc_val - function to read register and return value to callee
|
||||||
*
|
*
|
||||||
* Algorithm: Read the registers, check for boundary, if more, reset
|
* Algorithm: Read the registers, check for boundary, if more, reset
|
||||||
* counters else return same to caller.
|
* counters else return same to caller.
|
||||||
@@ -43,12 +43,12 @@
|
|||||||
* @retval 0 on MMC counters overflow
|
* @retval 0 on MMC counters overflow
|
||||||
* @retval value on current MMC counter value.
|
* @retval value on current MMC counter value.
|
||||||
*/
|
*/
|
||||||
static inline unsigned long update_mmc_val(struct osi_core_priv_data *osi_core,
|
static inline nveu64_t mgbe_update_mmc_val(struct osi_core_priv_data *osi_core,
|
||||||
unsigned long last_value,
|
nveu64_t last_value,
|
||||||
unsigned long offset)
|
nveu64_t offset)
|
||||||
{
|
{
|
||||||
unsigned long temp;
|
nveu64_t temp = 0;
|
||||||
unsigned int value = osi_readl((unsigned char *)osi_core->base +
|
nveu32_t value = osi_readl((nveu8_t *)osi_core->base +
|
||||||
offset);
|
offset);
|
||||||
|
|
||||||
temp = last_value + value;
|
temp = last_value + value;
|
||||||
@@ -56,13 +56,11 @@ static inline unsigned long update_mmc_val(struct osi_core_priv_data *osi_core,
|
|||||||
OSI_CORE_ERR(osi_core->osd,
|
OSI_CORE_ERR(osi_core->osd,
|
||||||
OSI_LOG_ARG_OUTOFBOUND,
|
OSI_LOG_ARG_OUTOFBOUND,
|
||||||
"Value overflow resetting all counters\n",
|
"Value overflow resetting all counters\n",
|
||||||
(unsigned long long)offset);
|
(nveul64_t)offset);
|
||||||
mgbe_reset_mmc(osi_core);
|
mgbe_reset_mmc(osi_core);
|
||||||
} else {
|
|
||||||
return temp;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return temp;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -75,14 +73,14 @@ static inline unsigned long update_mmc_val(struct osi_core_priv_data *osi_core,
|
|||||||
* 1) MAC should be init and started. see osi_start_mac()
|
* 1) MAC should be init and started. see osi_start_mac()
|
||||||
* 2) osi_core->osd should be populated
|
* 2) osi_core->osd should be populated
|
||||||
*/
|
*/
|
||||||
void mgbe_reset_mmc(struct osi_core_priv_data *osi_core)
|
void mgbe_reset_mmc(struct osi_core_priv_data *const osi_core)
|
||||||
{
|
{
|
||||||
unsigned int value;
|
nveu32_t value;
|
||||||
|
|
||||||
value = osi_readl((unsigned char *)osi_core->base + MGBE_MMC_CNTRL);
|
value = osi_readl((nveu8_t *)osi_core->base + MGBE_MMC_CNTRL);
|
||||||
/* self-clear bit in one clock cycle */
|
/* self-clear bit in one clock cycle */
|
||||||
value |= MGBE_MMC_CNTRL_CNTRST;
|
value |= MGBE_MMC_CNTRL_CNTRST;
|
||||||
osi_writel(value, (unsigned char *)osi_core->base + MGBE_MMC_CNTRL);
|
osi_writel(value, (nveu8_t *)osi_core->base + MGBE_MMC_CNTRL);
|
||||||
osi_memset(&osi_core->mmc, 0U, sizeof(struct osi_mmc_counters));
|
osi_memset(&osi_core->mmc, 0U, sizeof(struct osi_mmc_counters));
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -99,461 +97,461 @@ void mgbe_reset_mmc(struct osi_core_priv_data *osi_core)
|
|||||||
* 1) MAC should be init and started. see osi_start_mac()
|
* 1) MAC should be init and started. see osi_start_mac()
|
||||||
* 2) osi_core->osd should be populated
|
* 2) osi_core->osd should be populated
|
||||||
*/
|
*/
|
||||||
void mgbe_read_mmc(struct osi_core_priv_data *osi_core)
|
void mgbe_read_mmc(struct osi_core_priv_data *const osi_core)
|
||||||
{
|
{
|
||||||
struct osi_mmc_counters *mmc = &osi_core->mmc;
|
struct osi_mmc_counters *mmc = &osi_core->mmc;
|
||||||
|
|
||||||
mmc->mmc_tx_octetcount_gb =
|
mmc->mmc_tx_octetcount_gb =
|
||||||
update_mmc_val(osi_core, mmc->mmc_tx_octetcount_gb,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_tx_octetcount_gb,
|
||||||
MMC_TXOCTETCOUNT_GB_L);
|
MMC_TXOCTETCOUNT_GB_L);
|
||||||
mmc->mmc_tx_octetcount_gb_h =
|
mmc->mmc_tx_octetcount_gb_h =
|
||||||
update_mmc_val(osi_core, mmc->mmc_tx_octetcount_gb_h,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_tx_octetcount_gb_h,
|
||||||
MMC_TXOCTETCOUNT_GB_H);
|
MMC_TXOCTETCOUNT_GB_H);
|
||||||
mmc->mmc_tx_framecount_gb =
|
mmc->mmc_tx_framecount_gb =
|
||||||
update_mmc_val(osi_core, mmc->mmc_tx_framecount_gb,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_tx_framecount_gb,
|
||||||
MMC_TXPACKETCOUNT_GB_L);
|
MMC_TXPACKETCOUNT_GB_L);
|
||||||
mmc->mmc_tx_framecount_gb_h =
|
mmc->mmc_tx_framecount_gb_h =
|
||||||
update_mmc_val(osi_core, mmc->mmc_tx_framecount_gb_h,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_tx_framecount_gb_h,
|
||||||
MMC_TXPACKETCOUNT_GB_H);
|
MMC_TXPACKETCOUNT_GB_H);
|
||||||
mmc->mmc_tx_broadcastframe_g =
|
mmc->mmc_tx_broadcastframe_g =
|
||||||
update_mmc_val(osi_core, mmc->mmc_tx_broadcastframe_g,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_tx_broadcastframe_g,
|
||||||
MMC_TXBROADCASTPACKETS_G_L);
|
MMC_TXBROADCASTPACKETS_G_L);
|
||||||
mmc->mmc_tx_broadcastframe_g_h =
|
mmc->mmc_tx_broadcastframe_g_h =
|
||||||
update_mmc_val(osi_core, mmc->mmc_tx_broadcastframe_g_h,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_tx_broadcastframe_g_h,
|
||||||
MMC_TXBROADCASTPACKETS_G_H);
|
MMC_TXBROADCASTPACKETS_G_H);
|
||||||
mmc->mmc_tx_multicastframe_g =
|
mmc->mmc_tx_multicastframe_g =
|
||||||
update_mmc_val(osi_core, mmc->mmc_tx_multicastframe_g,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_tx_multicastframe_g,
|
||||||
MMC_TXMULTICASTPACKETS_G_L);
|
MMC_TXMULTICASTPACKETS_G_L);
|
||||||
mmc->mmc_tx_multicastframe_g_h =
|
mmc->mmc_tx_multicastframe_g_h =
|
||||||
update_mmc_val(osi_core, mmc->mmc_tx_multicastframe_g_h,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_tx_multicastframe_g_h,
|
||||||
MMC_TXMULTICASTPACKETS_G_H);
|
MMC_TXMULTICASTPACKETS_G_H);
|
||||||
mmc->mmc_tx_64_octets_gb =
|
mmc->mmc_tx_64_octets_gb =
|
||||||
update_mmc_val(osi_core, mmc->mmc_tx_64_octets_gb,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_tx_64_octets_gb,
|
||||||
MMC_TX64OCTETS_GB_L);
|
MMC_TX64OCTETS_GB_L);
|
||||||
mmc->mmc_tx_64_octets_gb_h =
|
mmc->mmc_tx_64_octets_gb_h =
|
||||||
update_mmc_val(osi_core, mmc->mmc_tx_64_octets_gb_h,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_tx_64_octets_gb_h,
|
||||||
MMC_TX64OCTETS_GB_H);
|
MMC_TX64OCTETS_GB_H);
|
||||||
mmc->mmc_tx_65_to_127_octets_gb =
|
mmc->mmc_tx_65_to_127_octets_gb =
|
||||||
update_mmc_val(osi_core, mmc->mmc_tx_65_to_127_octets_gb,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_tx_65_to_127_octets_gb,
|
||||||
MMC_TX65TO127OCTETS_GB_L);
|
MMC_TX65TO127OCTETS_GB_L);
|
||||||
mmc->mmc_tx_65_to_127_octets_gb_h =
|
mmc->mmc_tx_65_to_127_octets_gb_h =
|
||||||
update_mmc_val(osi_core, mmc->mmc_tx_65_to_127_octets_gb_h,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_tx_65_to_127_octets_gb_h,
|
||||||
MMC_TX65TO127OCTETS_GB_H);
|
MMC_TX65TO127OCTETS_GB_H);
|
||||||
mmc->mmc_tx_128_to_255_octets_gb =
|
mmc->mmc_tx_128_to_255_octets_gb =
|
||||||
update_mmc_val(osi_core, mmc->mmc_tx_128_to_255_octets_gb,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_tx_128_to_255_octets_gb,
|
||||||
MMC_TX128TO255OCTETS_GB_L);
|
MMC_TX128TO255OCTETS_GB_L);
|
||||||
mmc->mmc_tx_128_to_255_octets_gb_h =
|
mmc->mmc_tx_128_to_255_octets_gb_h =
|
||||||
update_mmc_val(osi_core, mmc->mmc_tx_128_to_255_octets_gb_h,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_tx_128_to_255_octets_gb_h,
|
||||||
MMC_TX128TO255OCTETS_GB_H);
|
MMC_TX128TO255OCTETS_GB_H);
|
||||||
mmc->mmc_tx_256_to_511_octets_gb =
|
mmc->mmc_tx_256_to_511_octets_gb =
|
||||||
update_mmc_val(osi_core, mmc->mmc_tx_256_to_511_octets_gb,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_tx_256_to_511_octets_gb,
|
||||||
MMC_TX256TO511OCTETS_GB_L);
|
MMC_TX256TO511OCTETS_GB_L);
|
||||||
mmc->mmc_tx_256_to_511_octets_gb_h =
|
mmc->mmc_tx_256_to_511_octets_gb_h =
|
||||||
update_mmc_val(osi_core, mmc->mmc_tx_256_to_511_octets_gb_h,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_tx_256_to_511_octets_gb_h,
|
||||||
MMC_TX256TO511OCTETS_GB_H);
|
MMC_TX256TO511OCTETS_GB_H);
|
||||||
mmc->mmc_tx_512_to_1023_octets_gb =
|
mmc->mmc_tx_512_to_1023_octets_gb =
|
||||||
update_mmc_val(osi_core, mmc->mmc_tx_512_to_1023_octets_gb,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_tx_512_to_1023_octets_gb,
|
||||||
MMC_TX512TO1023OCTETS_GB_L);
|
MMC_TX512TO1023OCTETS_GB_L);
|
||||||
mmc->mmc_tx_512_to_1023_octets_gb_h =
|
mmc->mmc_tx_512_to_1023_octets_gb_h =
|
||||||
update_mmc_val(osi_core, mmc->mmc_tx_512_to_1023_octets_gb_h,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_tx_512_to_1023_octets_gb_h,
|
||||||
MMC_TX512TO1023OCTETS_GB_H);
|
MMC_TX512TO1023OCTETS_GB_H);
|
||||||
mmc->mmc_tx_1024_to_max_octets_gb =
|
mmc->mmc_tx_1024_to_max_octets_gb =
|
||||||
update_mmc_val(osi_core, mmc->mmc_tx_1024_to_max_octets_gb,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_tx_1024_to_max_octets_gb,
|
||||||
MMC_TX1024TOMAXOCTETS_GB_L);
|
MMC_TX1024TOMAXOCTETS_GB_L);
|
||||||
mmc->mmc_tx_1024_to_max_octets_gb_h =
|
mmc->mmc_tx_1024_to_max_octets_gb_h =
|
||||||
update_mmc_val(osi_core, mmc->mmc_tx_1024_to_max_octets_gb_h,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_tx_1024_to_max_octets_gb_h,
|
||||||
MMC_TX1024TOMAXOCTETS_GB_H);
|
MMC_TX1024TOMAXOCTETS_GB_H);
|
||||||
mmc->mmc_tx_unicast_gb =
|
mmc->mmc_tx_unicast_gb =
|
||||||
update_mmc_val(osi_core, mmc->mmc_tx_unicast_gb,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_tx_unicast_gb,
|
||||||
MMC_TXUNICASTPACKETS_GB_L);
|
MMC_TXUNICASTPACKETS_GB_L);
|
||||||
mmc->mmc_tx_unicast_gb_h =
|
mmc->mmc_tx_unicast_gb_h =
|
||||||
update_mmc_val(osi_core, mmc->mmc_tx_unicast_gb_h,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_tx_unicast_gb_h,
|
||||||
MMC_TXUNICASTPACKETS_GB_H);
|
MMC_TXUNICASTPACKETS_GB_H);
|
||||||
mmc->mmc_tx_multicast_gb =
|
mmc->mmc_tx_multicast_gb =
|
||||||
update_mmc_val(osi_core, mmc->mmc_tx_multicast_gb,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_tx_multicast_gb,
|
||||||
MMC_TXMULTICASTPACKETS_GB_L);
|
MMC_TXMULTICASTPACKETS_GB_L);
|
||||||
mmc->mmc_tx_multicast_gb_h =
|
mmc->mmc_tx_multicast_gb_h =
|
||||||
update_mmc_val(osi_core, mmc->mmc_tx_multicast_gb_h,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_tx_multicast_gb_h,
|
||||||
MMC_TXMULTICASTPACKETS_GB_H);
|
MMC_TXMULTICASTPACKETS_GB_H);
|
||||||
mmc->mmc_tx_broadcast_gb =
|
mmc->mmc_tx_broadcast_gb =
|
||||||
update_mmc_val(osi_core, mmc->mmc_tx_broadcast_gb,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_tx_broadcast_gb,
|
||||||
MMC_TXBROADCASTPACKETS_GB_L);
|
MMC_TXBROADCASTPACKETS_GB_L);
|
||||||
mmc->mmc_tx_broadcast_gb_h =
|
mmc->mmc_tx_broadcast_gb_h =
|
||||||
update_mmc_val(osi_core, mmc->mmc_tx_broadcast_gb_h,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_tx_broadcast_gb_h,
|
||||||
MMC_TXBROADCASTPACKETS_GB_H);
|
MMC_TXBROADCASTPACKETS_GB_H);
|
||||||
mmc->mmc_tx_underflow_error =
|
mmc->mmc_tx_underflow_error =
|
||||||
update_mmc_val(osi_core, mmc->mmc_tx_underflow_error,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_tx_underflow_error,
|
||||||
MMC_TXUNDERFLOWERROR_L);
|
MMC_TXUNDERFLOWERROR_L);
|
||||||
mmc->mmc_tx_underflow_error_h =
|
mmc->mmc_tx_underflow_error_h =
|
||||||
update_mmc_val(osi_core, mmc->mmc_tx_underflow_error_h,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_tx_underflow_error_h,
|
||||||
MMC_TXUNDERFLOWERROR_H);
|
MMC_TXUNDERFLOWERROR_H);
|
||||||
mmc->mmc_tx_singlecol_g =
|
mmc->mmc_tx_singlecol_g =
|
||||||
update_mmc_val(osi_core, mmc->mmc_tx_singlecol_g,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_tx_singlecol_g,
|
||||||
MMC_TXSINGLECOL_G);
|
MMC_TXSINGLECOL_G);
|
||||||
mmc->mmc_tx_multicol_g =
|
mmc->mmc_tx_multicol_g =
|
||||||
update_mmc_val(osi_core, mmc->mmc_tx_multicol_g,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_tx_multicol_g,
|
||||||
MMC_TXMULTICOL_G);
|
MMC_TXMULTICOL_G);
|
||||||
mmc->mmc_tx_deferred =
|
mmc->mmc_tx_deferred =
|
||||||
update_mmc_val(osi_core, mmc->mmc_tx_deferred,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_tx_deferred,
|
||||||
MMC_TXDEFERRED);
|
MMC_TXDEFERRED);
|
||||||
mmc->mmc_tx_latecol =
|
mmc->mmc_tx_latecol =
|
||||||
update_mmc_val(osi_core, mmc->mmc_tx_latecol,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_tx_latecol,
|
||||||
MMC_TXLATECOL);
|
MMC_TXLATECOL);
|
||||||
mmc->mmc_tx_exesscol =
|
mmc->mmc_tx_exesscol =
|
||||||
update_mmc_val(osi_core, mmc->mmc_tx_exesscol,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_tx_exesscol,
|
||||||
MMC_TXEXESSCOL);
|
MMC_TXEXESSCOL);
|
||||||
mmc->mmc_tx_carrier_error =
|
mmc->mmc_tx_carrier_error =
|
||||||
update_mmc_val(osi_core, mmc->mmc_tx_carrier_error,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_tx_carrier_error,
|
||||||
MMC_TXCARRIERERROR);
|
MMC_TXCARRIERERROR);
|
||||||
mmc->mmc_tx_octetcount_g =
|
mmc->mmc_tx_octetcount_g =
|
||||||
update_mmc_val(osi_core, mmc->mmc_tx_octetcount_g,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_tx_octetcount_g,
|
||||||
MMC_TXOCTETCOUNT_G_L);
|
MMC_TXOCTETCOUNT_G_L);
|
||||||
mmc->mmc_tx_octetcount_g_h =
|
mmc->mmc_tx_octetcount_g_h =
|
||||||
update_mmc_val(osi_core, mmc->mmc_tx_octetcount_g_h,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_tx_octetcount_g_h,
|
||||||
MMC_TXOCTETCOUNT_G_H);
|
MMC_TXOCTETCOUNT_G_H);
|
||||||
mmc->mmc_tx_framecount_g =
|
mmc->mmc_tx_framecount_g =
|
||||||
update_mmc_val(osi_core, mmc->mmc_tx_framecount_g,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_tx_framecount_g,
|
||||||
MMC_TXPACKETSCOUNT_G_L);
|
MMC_TXPACKETSCOUNT_G_L);
|
||||||
mmc->mmc_tx_framecount_g_h =
|
mmc->mmc_tx_framecount_g_h =
|
||||||
update_mmc_val(osi_core, mmc->mmc_tx_framecount_g_h,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_tx_framecount_g_h,
|
||||||
MMC_TXPACKETSCOUNT_G_H);
|
MMC_TXPACKETSCOUNT_G_H);
|
||||||
mmc->mmc_tx_excessdef =
|
mmc->mmc_tx_excessdef =
|
||||||
update_mmc_val(osi_core, mmc->mmc_tx_excessdef,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_tx_excessdef,
|
||||||
MMC_TXEXECESS_DEFERRED);
|
MMC_TXEXECESS_DEFERRED);
|
||||||
mmc->mmc_tx_pause_frame =
|
mmc->mmc_tx_pause_frame =
|
||||||
update_mmc_val(osi_core, mmc->mmc_tx_pause_frame,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_tx_pause_frame,
|
||||||
MMC_TXPAUSEPACKETS_L);
|
MMC_TXPAUSEPACKETS_L);
|
||||||
mmc->mmc_tx_pause_frame_h =
|
mmc->mmc_tx_pause_frame_h =
|
||||||
update_mmc_val(osi_core, mmc->mmc_tx_pause_frame_h,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_tx_pause_frame_h,
|
||||||
MMC_TXPAUSEPACKETS_H);
|
MMC_TXPAUSEPACKETS_H);
|
||||||
mmc->mmc_tx_vlan_frame_g =
|
mmc->mmc_tx_vlan_frame_g =
|
||||||
update_mmc_val(osi_core, mmc->mmc_tx_vlan_frame_g,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_tx_vlan_frame_g,
|
||||||
MMC_TXVLANPACKETS_G_L);
|
MMC_TXVLANPACKETS_G_L);
|
||||||
mmc->mmc_tx_vlan_frame_g_h =
|
mmc->mmc_tx_vlan_frame_g_h =
|
||||||
update_mmc_val(osi_core, mmc->mmc_tx_vlan_frame_g_h,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_tx_vlan_frame_g_h,
|
||||||
MMC_TXVLANPACKETS_G_H);
|
MMC_TXVLANPACKETS_G_H);
|
||||||
mmc->mmc_rx_framecount_gb =
|
mmc->mmc_rx_framecount_gb =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_framecount_gb,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_framecount_gb,
|
||||||
MMC_RXPACKETCOUNT_GB_L);
|
MMC_RXPACKETCOUNT_GB_L);
|
||||||
mmc->mmc_rx_framecount_gb_h =
|
mmc->mmc_rx_framecount_gb_h =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_framecount_gb_h,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_framecount_gb_h,
|
||||||
MMC_RXPACKETCOUNT_GB_H);
|
MMC_RXPACKETCOUNT_GB_H);
|
||||||
mmc->mmc_rx_octetcount_gb =
|
mmc->mmc_rx_octetcount_gb =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_octetcount_gb,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_octetcount_gb,
|
||||||
MMC_RXOCTETCOUNT_GB_L);
|
MMC_RXOCTETCOUNT_GB_L);
|
||||||
mmc->mmc_rx_octetcount_gb_h =
|
mmc->mmc_rx_octetcount_gb_h =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_octetcount_gb_h,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_octetcount_gb_h,
|
||||||
MMC_RXOCTETCOUNT_GB_H);
|
MMC_RXOCTETCOUNT_GB_H);
|
||||||
mmc->mmc_rx_octetcount_g =
|
mmc->mmc_rx_octetcount_g =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_octetcount_g,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_octetcount_g,
|
||||||
MMC_RXOCTETCOUNT_G_L);
|
MMC_RXOCTETCOUNT_G_L);
|
||||||
mmc->mmc_rx_octetcount_g_h =
|
mmc->mmc_rx_octetcount_g_h =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_octetcount_g_h,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_octetcount_g_h,
|
||||||
MMC_RXOCTETCOUNT_G_H);
|
MMC_RXOCTETCOUNT_G_H);
|
||||||
mmc->mmc_rx_broadcastframe_g =
|
mmc->mmc_rx_broadcastframe_g =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_broadcastframe_g,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_broadcastframe_g,
|
||||||
MMC_RXBROADCASTPACKETS_G_L);
|
MMC_RXBROADCASTPACKETS_G_L);
|
||||||
mmc->mmc_rx_broadcastframe_g_h =
|
mmc->mmc_rx_broadcastframe_g_h =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_broadcastframe_g_h,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_broadcastframe_g_h,
|
||||||
MMC_RXBROADCASTPACKETS_G_H);
|
MMC_RXBROADCASTPACKETS_G_H);
|
||||||
mmc->mmc_rx_multicastframe_g =
|
mmc->mmc_rx_multicastframe_g =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_multicastframe_g,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_multicastframe_g,
|
||||||
MMC_RXMULTICASTPACKETS_G_L);
|
MMC_RXMULTICASTPACKETS_G_L);
|
||||||
mmc->mmc_rx_multicastframe_g_h =
|
mmc->mmc_rx_multicastframe_g_h =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_multicastframe_g_h,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_multicastframe_g_h,
|
||||||
MMC_RXMULTICASTPACKETS_G_H);
|
MMC_RXMULTICASTPACKETS_G_H);
|
||||||
mmc->mmc_rx_crc_error =
|
mmc->mmc_rx_crc_error =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_crc_error,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_crc_error,
|
||||||
MMC_RXCRCERROR_L);
|
MMC_RXCRCERROR_L);
|
||||||
mmc->mmc_rx_crc_error_h =
|
mmc->mmc_rx_crc_error_h =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_crc_error_h,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_crc_error_h,
|
||||||
MMC_RXCRCERROR_H);
|
MMC_RXCRCERROR_H);
|
||||||
mmc->mmc_rx_align_error =
|
mmc->mmc_rx_align_error =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_align_error,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_align_error,
|
||||||
MMC_RXALIGNMENTERROR);
|
MMC_RXALIGNMENTERROR);
|
||||||
mmc->mmc_rx_runt_error =
|
mmc->mmc_rx_runt_error =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_runt_error,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_runt_error,
|
||||||
MMC_RXRUNTERROR);
|
MMC_RXRUNTERROR);
|
||||||
mmc->mmc_rx_jabber_error =
|
mmc->mmc_rx_jabber_error =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_jabber_error,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_jabber_error,
|
||||||
MMC_RXJABBERERROR);
|
MMC_RXJABBERERROR);
|
||||||
mmc->mmc_rx_undersize_g =
|
mmc->mmc_rx_undersize_g =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_undersize_g,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_undersize_g,
|
||||||
MMC_RXUNDERSIZE_G);
|
MMC_RXUNDERSIZE_G);
|
||||||
mmc->mmc_rx_oversize_g =
|
mmc->mmc_rx_oversize_g =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_oversize_g,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_oversize_g,
|
||||||
MMC_RXOVERSIZE_G);
|
MMC_RXOVERSIZE_G);
|
||||||
mmc->mmc_rx_64_octets_gb =
|
mmc->mmc_rx_64_octets_gb =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_64_octets_gb,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_64_octets_gb,
|
||||||
MMC_RX64OCTETS_GB_L);
|
MMC_RX64OCTETS_GB_L);
|
||||||
mmc->mmc_rx_64_octets_gb_h =
|
mmc->mmc_rx_64_octets_gb_h =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_64_octets_gb_h,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_64_octets_gb_h,
|
||||||
MMC_RX64OCTETS_GB_H);
|
MMC_RX64OCTETS_GB_H);
|
||||||
mmc->mmc_rx_65_to_127_octets_gb =
|
mmc->mmc_rx_65_to_127_octets_gb =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_65_to_127_octets_gb,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_65_to_127_octets_gb,
|
||||||
MMC_RX65TO127OCTETS_GB_L);
|
MMC_RX65TO127OCTETS_GB_L);
|
||||||
mmc->mmc_rx_65_to_127_octets_gb_h =
|
mmc->mmc_rx_65_to_127_octets_gb_h =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_65_to_127_octets_gb_h,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_65_to_127_octets_gb_h,
|
||||||
MMC_RX65TO127OCTETS_GB_H);
|
MMC_RX65TO127OCTETS_GB_H);
|
||||||
mmc->mmc_rx_128_to_255_octets_gb =
|
mmc->mmc_rx_128_to_255_octets_gb =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_128_to_255_octets_gb,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_128_to_255_octets_gb,
|
||||||
MMC_RX128TO255OCTETS_GB_L);
|
MMC_RX128TO255OCTETS_GB_L);
|
||||||
mmc->mmc_rx_128_to_255_octets_gb_h =
|
mmc->mmc_rx_128_to_255_octets_gb_h =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_128_to_255_octets_gb_h,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_128_to_255_octets_gb_h,
|
||||||
MMC_RX128TO255OCTETS_GB_H);
|
MMC_RX128TO255OCTETS_GB_H);
|
||||||
mmc->mmc_rx_256_to_511_octets_gb =
|
mmc->mmc_rx_256_to_511_octets_gb =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_256_to_511_octets_gb,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_256_to_511_octets_gb,
|
||||||
MMC_RX256TO511OCTETS_GB_L);
|
MMC_RX256TO511OCTETS_GB_L);
|
||||||
mmc->mmc_rx_256_to_511_octets_gb_h =
|
mmc->mmc_rx_256_to_511_octets_gb_h =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_256_to_511_octets_gb_h,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_256_to_511_octets_gb_h,
|
||||||
MMC_RX256TO511OCTETS_GB_H);
|
MMC_RX256TO511OCTETS_GB_H);
|
||||||
mmc->mmc_rx_512_to_1023_octets_gb =
|
mmc->mmc_rx_512_to_1023_octets_gb =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_512_to_1023_octets_gb,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_512_to_1023_octets_gb,
|
||||||
MMC_RX512TO1023OCTETS_GB_L);
|
MMC_RX512TO1023OCTETS_GB_L);
|
||||||
mmc->mmc_rx_512_to_1023_octets_gb_h =
|
mmc->mmc_rx_512_to_1023_octets_gb_h =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_512_to_1023_octets_gb_h,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_512_to_1023_octets_gb_h,
|
||||||
MMC_RX512TO1023OCTETS_GB_H);
|
MMC_RX512TO1023OCTETS_GB_H);
|
||||||
mmc->mmc_rx_1024_to_max_octets_gb =
|
mmc->mmc_rx_1024_to_max_octets_gb =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_1024_to_max_octets_gb,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_1024_to_max_octets_gb,
|
||||||
MMC_RX1024TOMAXOCTETS_GB_L);
|
MMC_RX1024TOMAXOCTETS_GB_L);
|
||||||
mmc->mmc_rx_1024_to_max_octets_gb_h =
|
mmc->mmc_rx_1024_to_max_octets_gb_h =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_1024_to_max_octets_gb_h,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_1024_to_max_octets_gb_h,
|
||||||
MMC_RX1024TOMAXOCTETS_GB_H);
|
MMC_RX1024TOMAXOCTETS_GB_H);
|
||||||
mmc->mmc_rx_unicast_g =
|
mmc->mmc_rx_unicast_g =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_unicast_g,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_unicast_g,
|
||||||
MMC_RXUNICASTPACKETS_G_L);
|
MMC_RXUNICASTPACKETS_G_L);
|
||||||
mmc->mmc_rx_unicast_g_h =
|
mmc->mmc_rx_unicast_g_h =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_unicast_g_h,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_unicast_g_h,
|
||||||
MMC_RXUNICASTPACKETS_G_H);
|
MMC_RXUNICASTPACKETS_G_H);
|
||||||
mmc->mmc_rx_length_error =
|
mmc->mmc_rx_length_error =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_length_error,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_length_error,
|
||||||
MMC_RXLENGTHERROR_L);
|
MMC_RXLENGTHERROR_L);
|
||||||
mmc->mmc_rx_length_error_h =
|
mmc->mmc_rx_length_error_h =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_length_error_h,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_length_error_h,
|
||||||
MMC_RXLENGTHERROR_H);
|
MMC_RXLENGTHERROR_H);
|
||||||
mmc->mmc_rx_outofrangetype =
|
mmc->mmc_rx_outofrangetype =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_outofrangetype,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_outofrangetype,
|
||||||
MMC_RXOUTOFRANGETYPE_L);
|
MMC_RXOUTOFRANGETYPE_L);
|
||||||
mmc->mmc_rx_outofrangetype_h =
|
mmc->mmc_rx_outofrangetype_h =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_outofrangetype_h,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_outofrangetype_h,
|
||||||
MMC_RXOUTOFRANGETYPE_H);
|
MMC_RXOUTOFRANGETYPE_H);
|
||||||
mmc->mmc_rx_pause_frames =
|
mmc->mmc_rx_pause_frames =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_pause_frames,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_pause_frames,
|
||||||
MMC_RXPAUSEPACKETS_L);
|
MMC_RXPAUSEPACKETS_L);
|
||||||
mmc->mmc_rx_pause_frames_h =
|
mmc->mmc_rx_pause_frames_h =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_pause_frames_h,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_pause_frames_h,
|
||||||
MMC_RXPAUSEPACKETS_H);
|
MMC_RXPAUSEPACKETS_H);
|
||||||
mmc->mmc_rx_fifo_overflow =
|
mmc->mmc_rx_fifo_overflow =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_fifo_overflow,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_fifo_overflow,
|
||||||
MMC_RXFIFOOVERFLOW_L);
|
MMC_RXFIFOOVERFLOW_L);
|
||||||
mmc->mmc_rx_fifo_overflow_h =
|
mmc->mmc_rx_fifo_overflow_h =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_fifo_overflow_h,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_fifo_overflow_h,
|
||||||
MMC_RXFIFOOVERFLOW_H);
|
MMC_RXFIFOOVERFLOW_H);
|
||||||
mmc->mmc_rx_vlan_frames_gb =
|
mmc->mmc_rx_vlan_frames_gb =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_vlan_frames_gb,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_vlan_frames_gb,
|
||||||
MMC_RXVLANPACKETS_GB_L);
|
MMC_RXVLANPACKETS_GB_L);
|
||||||
mmc->mmc_rx_vlan_frames_gb_h =
|
mmc->mmc_rx_vlan_frames_gb_h =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_vlan_frames_gb_h,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_vlan_frames_gb_h,
|
||||||
MMC_RXVLANPACKETS_GB_H);
|
MMC_RXVLANPACKETS_GB_H);
|
||||||
mmc->mmc_rx_watchdog_error =
|
mmc->mmc_rx_watchdog_error =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_watchdog_error,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_watchdog_error,
|
||||||
MMC_RXWATCHDOGERROR);
|
MMC_RXWATCHDOGERROR);
|
||||||
mmc->mmc_tx_lpi_usec_cntr =
|
mmc->mmc_tx_lpi_usec_cntr =
|
||||||
update_mmc_val(osi_core, mmc->mmc_tx_lpi_usec_cntr,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_tx_lpi_usec_cntr,
|
||||||
MMC_TXLPIUSECCNTR);
|
MMC_TXLPIUSECCNTR);
|
||||||
mmc->mmc_tx_lpi_tran_cntr =
|
mmc->mmc_tx_lpi_tran_cntr =
|
||||||
update_mmc_val(osi_core, mmc->mmc_tx_lpi_tran_cntr,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_tx_lpi_tran_cntr,
|
||||||
MMC_TXLPITRANCNTR);
|
MMC_TXLPITRANCNTR);
|
||||||
mmc->mmc_rx_lpi_usec_cntr =
|
mmc->mmc_rx_lpi_usec_cntr =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_lpi_usec_cntr,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_lpi_usec_cntr,
|
||||||
MMC_RXLPIUSECCNTR);
|
MMC_RXLPIUSECCNTR);
|
||||||
mmc->mmc_rx_lpi_tran_cntr =
|
mmc->mmc_rx_lpi_tran_cntr =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_lpi_tran_cntr,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_lpi_tran_cntr,
|
||||||
MMC_RXLPITRANCNTR);
|
MMC_RXLPITRANCNTR);
|
||||||
mmc->mmc_rx_ipv4_gd =
|
mmc->mmc_rx_ipv4_gd =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_ipv4_gd,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv4_gd,
|
||||||
MMC_RXIPV4_GD_PKTS_L);
|
MMC_RXIPV4_GD_PKTS_L);
|
||||||
mmc->mmc_rx_ipv4_gd_h =
|
mmc->mmc_rx_ipv4_gd_h =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_ipv4_gd_h,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv4_gd_h,
|
||||||
MMC_RXIPV4_GD_PKTS_H);
|
MMC_RXIPV4_GD_PKTS_H);
|
||||||
mmc->mmc_rx_ipv4_hderr =
|
mmc->mmc_rx_ipv4_hderr =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_ipv4_hderr,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv4_hderr,
|
||||||
MMC_RXIPV4_HDRERR_PKTS_L);
|
MMC_RXIPV4_HDRERR_PKTS_L);
|
||||||
mmc->mmc_rx_ipv4_hderr_h =
|
mmc->mmc_rx_ipv4_hderr_h =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_ipv4_hderr_h,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv4_hderr_h,
|
||||||
MMC_RXIPV4_HDRERR_PKTS_H);
|
MMC_RXIPV4_HDRERR_PKTS_H);
|
||||||
mmc->mmc_rx_ipv4_nopay =
|
mmc->mmc_rx_ipv4_nopay =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_ipv4_nopay,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv4_nopay,
|
||||||
MMC_RXIPV4_NOPAY_PKTS_L);
|
MMC_RXIPV4_NOPAY_PKTS_L);
|
||||||
mmc->mmc_rx_ipv4_nopay_h =
|
mmc->mmc_rx_ipv4_nopay_h =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_ipv4_nopay_h,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv4_nopay_h,
|
||||||
MMC_RXIPV4_NOPAY_PKTS_H);
|
MMC_RXIPV4_NOPAY_PKTS_H);
|
||||||
mmc->mmc_rx_ipv4_frag =
|
mmc->mmc_rx_ipv4_frag =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_ipv4_frag,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv4_frag,
|
||||||
MMC_RXIPV4_FRAG_PKTS_L);
|
MMC_RXIPV4_FRAG_PKTS_L);
|
||||||
mmc->mmc_rx_ipv4_frag_h =
|
mmc->mmc_rx_ipv4_frag_h =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_ipv4_frag_h,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv4_frag_h,
|
||||||
MMC_RXIPV4_FRAG_PKTS_H);
|
MMC_RXIPV4_FRAG_PKTS_H);
|
||||||
mmc->mmc_rx_ipv4_udsbl =
|
mmc->mmc_rx_ipv4_udsbl =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_ipv4_udsbl,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv4_udsbl,
|
||||||
MMC_RXIPV4_UBSBL_PKTS_L);
|
MMC_RXIPV4_UBSBL_PKTS_L);
|
||||||
mmc->mmc_rx_ipv4_udsbl_h =
|
mmc->mmc_rx_ipv4_udsbl_h =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_ipv4_udsbl_h,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv4_udsbl_h,
|
||||||
MMC_RXIPV4_UBSBL_PKTS_H);
|
MMC_RXIPV4_UBSBL_PKTS_H);
|
||||||
mmc->mmc_rx_ipv6_gd =
|
mmc->mmc_rx_ipv6_gd =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_ipv6_gd,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv6_gd,
|
||||||
MMC_RXIPV6_GD_PKTS_L);
|
MMC_RXIPV6_GD_PKTS_L);
|
||||||
mmc->mmc_rx_ipv6_gd_h =
|
mmc->mmc_rx_ipv6_gd_h =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_ipv6_gd_h,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv6_gd_h,
|
||||||
MMC_RXIPV6_GD_PKTS_H);
|
MMC_RXIPV6_GD_PKTS_H);
|
||||||
mmc->mmc_rx_ipv6_hderr =
|
mmc->mmc_rx_ipv6_hderr =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_ipv6_hderr,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv6_hderr,
|
||||||
MMC_RXIPV6_HDRERR_PKTS_L);
|
MMC_RXIPV6_HDRERR_PKTS_L);
|
||||||
mmc->mmc_rx_ipv6_hderr_h =
|
mmc->mmc_rx_ipv6_hderr_h =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_ipv6_hderr_h,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv6_hderr_h,
|
||||||
MMC_RXIPV6_HDRERR_PKTS_H);
|
MMC_RXIPV6_HDRERR_PKTS_H);
|
||||||
mmc->mmc_rx_ipv6_nopay =
|
mmc->mmc_rx_ipv6_nopay =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_ipv6_nopay,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv6_nopay,
|
||||||
MMC_RXIPV6_NOPAY_PKTS_L);
|
MMC_RXIPV6_NOPAY_PKTS_L);
|
||||||
mmc->mmc_rx_ipv6_nopay_h =
|
mmc->mmc_rx_ipv6_nopay_h =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_ipv6_nopay_h,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv6_nopay_h,
|
||||||
MMC_RXIPV6_NOPAY_PKTS_H);
|
MMC_RXIPV6_NOPAY_PKTS_H);
|
||||||
mmc->mmc_rx_udp_gd =
|
mmc->mmc_rx_udp_gd =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_udp_gd,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_udp_gd,
|
||||||
MMC_RXUDP_GD_PKTS_L);
|
MMC_RXUDP_GD_PKTS_L);
|
||||||
mmc->mmc_rx_udp_gd_h =
|
mmc->mmc_rx_udp_gd_h =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_udp_gd_h,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_udp_gd_h,
|
||||||
MMC_RXUDP_GD_PKTS_H);
|
MMC_RXUDP_GD_PKTS_H);
|
||||||
mmc->mmc_rx_udp_err =
|
mmc->mmc_rx_udp_err =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_udp_err,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_udp_err,
|
||||||
MMC_RXUDP_ERR_PKTS_L);
|
MMC_RXUDP_ERR_PKTS_L);
|
||||||
mmc->mmc_rx_udp_err_h =
|
mmc->mmc_rx_udp_err_h =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_udp_err_h,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_udp_err_h,
|
||||||
MMC_RXUDP_ERR_PKTS_H);
|
MMC_RXUDP_ERR_PKTS_H);
|
||||||
mmc->mmc_rx_tcp_gd =
|
mmc->mmc_rx_tcp_gd =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_tcp_gd,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_tcp_gd,
|
||||||
MMC_RXTCP_GD_PKTS_L);
|
MMC_RXTCP_GD_PKTS_L);
|
||||||
mmc->mmc_rx_tcp_gd_h =
|
mmc->mmc_rx_tcp_gd_h =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_tcp_gd_h,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_tcp_gd_h,
|
||||||
MMC_RXTCP_GD_PKTS_H);
|
MMC_RXTCP_GD_PKTS_H);
|
||||||
mmc->mmc_rx_tcp_err =
|
mmc->mmc_rx_tcp_err =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_tcp_err,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_tcp_err,
|
||||||
MMC_RXTCP_ERR_PKTS_L);
|
MMC_RXTCP_ERR_PKTS_L);
|
||||||
mmc->mmc_rx_tcp_err_h =
|
mmc->mmc_rx_tcp_err_h =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_tcp_err_h,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_tcp_err_h,
|
||||||
MMC_RXTCP_ERR_PKTS_H);
|
MMC_RXTCP_ERR_PKTS_H);
|
||||||
mmc->mmc_rx_icmp_gd =
|
mmc->mmc_rx_icmp_gd =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_icmp_gd,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_icmp_gd,
|
||||||
MMC_RXICMP_GD_PKTS_L);
|
MMC_RXICMP_GD_PKTS_L);
|
||||||
mmc->mmc_rx_icmp_gd_h =
|
mmc->mmc_rx_icmp_gd_h =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_icmp_gd_h,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_icmp_gd_h,
|
||||||
MMC_RXICMP_GD_PKTS_H);
|
MMC_RXICMP_GD_PKTS_H);
|
||||||
mmc->mmc_rx_icmp_err =
|
mmc->mmc_rx_icmp_err =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_icmp_err,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_icmp_err,
|
||||||
MMC_RXICMP_ERR_PKTS_L);
|
MMC_RXICMP_ERR_PKTS_L);
|
||||||
mmc->mmc_rx_icmp_err_h =
|
mmc->mmc_rx_icmp_err_h =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_icmp_err_h,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_icmp_err_h,
|
||||||
MMC_RXICMP_ERR_PKTS_H);
|
MMC_RXICMP_ERR_PKTS_H);
|
||||||
mmc->mmc_rx_ipv4_gd_octets =
|
mmc->mmc_rx_ipv4_gd_octets =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_ipv4_gd_octets,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv4_gd_octets,
|
||||||
MMC_RXIPV4_GD_OCTETS_L);
|
MMC_RXIPV4_GD_OCTETS_L);
|
||||||
mmc->mmc_rx_ipv4_gd_octets_h =
|
mmc->mmc_rx_ipv4_gd_octets_h =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_ipv4_gd_octets_h,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv4_gd_octets_h,
|
||||||
MMC_RXIPV4_GD_OCTETS_H);
|
MMC_RXIPV4_GD_OCTETS_H);
|
||||||
mmc->mmc_rx_ipv4_hderr_octets =
|
mmc->mmc_rx_ipv4_hderr_octets =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_ipv4_hderr_octets,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv4_hderr_octets,
|
||||||
MMC_RXIPV4_HDRERR_OCTETS_L);
|
MMC_RXIPV4_HDRERR_OCTETS_L);
|
||||||
mmc->mmc_rx_ipv4_hderr_octets_h =
|
mmc->mmc_rx_ipv4_hderr_octets_h =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_ipv4_hderr_octets_h,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv4_hderr_octets_h,
|
||||||
MMC_RXIPV4_HDRERR_OCTETS_H);
|
MMC_RXIPV4_HDRERR_OCTETS_H);
|
||||||
mmc->mmc_rx_ipv4_nopay_octets =
|
mmc->mmc_rx_ipv4_nopay_octets =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_ipv4_nopay_octets,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv4_nopay_octets,
|
||||||
MMC_RXIPV4_NOPAY_OCTETS_L);
|
MMC_RXIPV4_NOPAY_OCTETS_L);
|
||||||
mmc->mmc_rx_ipv4_nopay_octets_h =
|
mmc->mmc_rx_ipv4_nopay_octets_h =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_ipv4_nopay_octets_h,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv4_nopay_octets_h,
|
||||||
MMC_RXIPV4_NOPAY_OCTETS_H);
|
MMC_RXIPV4_NOPAY_OCTETS_H);
|
||||||
mmc->mmc_rx_ipv4_frag_octets =
|
mmc->mmc_rx_ipv4_frag_octets =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_ipv4_frag_octets,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv4_frag_octets,
|
||||||
MMC_RXIPV4_FRAG_OCTETS_L);
|
MMC_RXIPV4_FRAG_OCTETS_L);
|
||||||
mmc->mmc_rx_ipv4_frag_octets_h =
|
mmc->mmc_rx_ipv4_frag_octets_h =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_ipv4_frag_octets_h,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv4_frag_octets_h,
|
||||||
MMC_RXIPV4_FRAG_OCTETS_H);
|
MMC_RXIPV4_FRAG_OCTETS_H);
|
||||||
mmc->mmc_rx_ipv4_udsbl_octets =
|
mmc->mmc_rx_ipv4_udsbl_octets =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_ipv4_udsbl_octets,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv4_udsbl_octets,
|
||||||
MMC_RXIPV4_UDP_CHKSM_DIS_OCT_L);
|
MMC_RXIPV4_UDP_CHKSM_DIS_OCT_L);
|
||||||
mmc->mmc_rx_ipv4_udsbl_octets_h =
|
mmc->mmc_rx_ipv4_udsbl_octets_h =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_ipv4_udsbl_octets_h,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv4_udsbl_octets_h,
|
||||||
MMC_RXIPV4_UDP_CHKSM_DIS_OCT_H);
|
MMC_RXIPV4_UDP_CHKSM_DIS_OCT_H);
|
||||||
mmc->mmc_rx_udp_gd_octets =
|
mmc->mmc_rx_udp_gd_octets =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_udp_gd_octets,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_udp_gd_octets,
|
||||||
MMC_RXUDP_GD_OCTETS_L);
|
MMC_RXUDP_GD_OCTETS_L);
|
||||||
mmc->mmc_rx_udp_gd_octets_h =
|
mmc->mmc_rx_udp_gd_octets_h =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_udp_gd_octets_h,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_udp_gd_octets_h,
|
||||||
MMC_RXUDP_GD_OCTETS_H);
|
MMC_RXUDP_GD_OCTETS_H);
|
||||||
mmc->mmc_rx_ipv6_gd_octets =
|
mmc->mmc_rx_ipv6_gd_octets =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_ipv6_gd_octets,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv6_gd_octets,
|
||||||
MMC_RXIPV6_GD_OCTETS_L);
|
MMC_RXIPV6_GD_OCTETS_L);
|
||||||
mmc->mmc_rx_ipv6_gd_octets_h =
|
mmc->mmc_rx_ipv6_gd_octets_h =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_ipv6_gd_octets_h,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv6_gd_octets_h,
|
||||||
MMC_RXIPV6_GD_OCTETS_H);
|
MMC_RXIPV6_GD_OCTETS_H);
|
||||||
mmc->mmc_rx_ipv6_hderr_octets =
|
mmc->mmc_rx_ipv6_hderr_octets =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_ipv6_hderr_octets,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv6_hderr_octets,
|
||||||
MMC_RXIPV6_HDRERR_OCTETS_L);
|
MMC_RXIPV6_HDRERR_OCTETS_L);
|
||||||
mmc->mmc_rx_ipv6_hderr_octets_h =
|
mmc->mmc_rx_ipv6_hderr_octets_h =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_ipv6_hderr_octets_h,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv6_hderr_octets_h,
|
||||||
MMC_RXIPV6_HDRERR_OCTETS_H);
|
MMC_RXIPV6_HDRERR_OCTETS_H);
|
||||||
mmc->mmc_rx_ipv6_nopay_octets =
|
mmc->mmc_rx_ipv6_nopay_octets =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_ipv6_nopay_octets,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv6_nopay_octets,
|
||||||
MMC_RXIPV6_NOPAY_OCTETS_L);
|
MMC_RXIPV6_NOPAY_OCTETS_L);
|
||||||
mmc->mmc_rx_ipv6_nopay_octets_h =
|
mmc->mmc_rx_ipv6_nopay_octets_h =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_ipv6_nopay_octets_h,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv6_nopay_octets_h,
|
||||||
MMC_RXIPV6_NOPAY_OCTETS_H);
|
MMC_RXIPV6_NOPAY_OCTETS_H);
|
||||||
mmc->mmc_rx_udp_err_octets =
|
mmc->mmc_rx_udp_err_octets =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_udp_err_octets,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_udp_err_octets,
|
||||||
MMC_RXUDP_ERR_OCTETS_L);
|
MMC_RXUDP_ERR_OCTETS_L);
|
||||||
mmc->mmc_rx_udp_err_octets_h =
|
mmc->mmc_rx_udp_err_octets_h =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_udp_err_octets_h,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_udp_err_octets_h,
|
||||||
MMC_RXUDP_ERR_OCTETS_H);
|
MMC_RXUDP_ERR_OCTETS_H);
|
||||||
mmc->mmc_rx_tcp_gd_octets =
|
mmc->mmc_rx_tcp_gd_octets =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_tcp_gd_octets,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_tcp_gd_octets,
|
||||||
MMC_RXTCP_GD_OCTETS_L);
|
MMC_RXTCP_GD_OCTETS_L);
|
||||||
mmc->mmc_rx_tcp_gd_octets_h =
|
mmc->mmc_rx_tcp_gd_octets_h =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_tcp_gd_octets_h,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_tcp_gd_octets_h,
|
||||||
MMC_RXTCP_GD_OCTETS_H);
|
MMC_RXTCP_GD_OCTETS_H);
|
||||||
mmc->mmc_rx_tcp_err_octets =
|
mmc->mmc_rx_tcp_err_octets =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_tcp_err_octets,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_tcp_err_octets,
|
||||||
MMC_RXTCP_ERR_OCTETS_L);
|
MMC_RXTCP_ERR_OCTETS_L);
|
||||||
mmc->mmc_rx_tcp_err_octets_h =
|
mmc->mmc_rx_tcp_err_octets_h =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_tcp_err_octets_h,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_tcp_err_octets_h,
|
||||||
MMC_RXTCP_ERR_OCTETS_H);
|
MMC_RXTCP_ERR_OCTETS_H);
|
||||||
mmc->mmc_rx_icmp_gd_octets =
|
mmc->mmc_rx_icmp_gd_octets =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_icmp_gd_octets,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_icmp_gd_octets,
|
||||||
MMC_RXICMP_GD_OCTETS_L);
|
MMC_RXICMP_GD_OCTETS_L);
|
||||||
mmc->mmc_rx_icmp_gd_octets_h =
|
mmc->mmc_rx_icmp_gd_octets_h =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_icmp_gd_octets_h,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_icmp_gd_octets_h,
|
||||||
MMC_RXICMP_GD_OCTETS_H);
|
MMC_RXICMP_GD_OCTETS_H);
|
||||||
mmc->mmc_rx_icmp_err_octets =
|
mmc->mmc_rx_icmp_err_octets =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_icmp_err_octets,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_icmp_err_octets,
|
||||||
MMC_RXICMP_ERR_OCTETS_L);
|
MMC_RXICMP_ERR_OCTETS_L);
|
||||||
mmc->mmc_rx_icmp_err_octets_h =
|
mmc->mmc_rx_icmp_err_octets_h =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_icmp_err_octets_h,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_icmp_err_octets_h,
|
||||||
MMC_RXICMP_ERR_OCTETS_H);
|
MMC_RXICMP_ERR_OCTETS_H);
|
||||||
mmc->mmc_tx_fpe_frag_cnt =
|
mmc->mmc_tx_fpe_frag_cnt =
|
||||||
update_mmc_val(osi_core, mmc->mmc_tx_fpe_frag_cnt,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_tx_fpe_frag_cnt,
|
||||||
MMC_TX_FPE_FRAG_COUNTER);
|
MMC_TX_FPE_FRAG_COUNTER);
|
||||||
mmc->mmc_tx_fpe_hold_req_cnt =
|
mmc->mmc_tx_fpe_hold_req_cnt =
|
||||||
update_mmc_val(osi_core, mmc->mmc_tx_fpe_hold_req_cnt,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_tx_fpe_hold_req_cnt,
|
||||||
MMC_TX_HOLD_REQ_COUNTER);
|
MMC_TX_HOLD_REQ_COUNTER);
|
||||||
mmc->mmc_rx_packet_reass_err_cnt =
|
mmc->mmc_rx_packet_reass_err_cnt =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_packet_reass_err_cnt,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_packet_reass_err_cnt,
|
||||||
MMC_RX_PKT_ASSEMBLY_ERR_CNTR);
|
MMC_RX_PKT_ASSEMBLY_ERR_CNTR);
|
||||||
mmc->mmc_rx_packet_smd_err_cnt =
|
mmc->mmc_rx_packet_smd_err_cnt =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_packet_smd_err_cnt,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_packet_smd_err_cnt,
|
||||||
MMC_RX_PKT_SMD_ERR_CNTR);
|
MMC_RX_PKT_SMD_ERR_CNTR);
|
||||||
mmc->mmc_rx_packet_asm_ok_cnt =
|
mmc->mmc_rx_packet_asm_ok_cnt =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_packet_asm_ok_cnt,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_packet_asm_ok_cnt,
|
||||||
MMC_RX_PKT_ASSEMBLY_OK_CNTR);
|
MMC_RX_PKT_ASSEMBLY_OK_CNTR);
|
||||||
mmc->mmc_rx_fpe_fragment_cnt =
|
mmc->mmc_rx_fpe_fragment_cnt =
|
||||||
update_mmc_val(osi_core, mmc->mmc_rx_fpe_fragment_cnt,
|
mgbe_update_mmc_val(osi_core, mmc->mmc_rx_fpe_fragment_cnt,
|
||||||
MMC_RX_FPE_FRAG_CNTR);
|
MMC_RX_FPE_FRAG_CNTR);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -67,13 +67,6 @@
|
|||||||
#define MMC_TXVLANPACKETS_G_H 0x008A0
|
#define MMC_TXVLANPACKETS_G_H 0x008A0
|
||||||
#define MMC_TXLPIUSECCNTR 0x008A4
|
#define MMC_TXLPIUSECCNTR 0x008A4
|
||||||
#define MMC_TXLPITRANCNTR 0x008A8
|
#define MMC_TXLPITRANCNTR 0x008A8
|
||||||
#define MMC_PRIO_INT_STATUS 0x008CC
|
|
||||||
#define MMC_TX_PER_PRIO_STATUS 0x008D0
|
|
||||||
#define MMC_TX_PER_PRIO_PKT_GB 0x008D4
|
|
||||||
#define MMC_TX_PER_PRIO_PFC_PKT_GB 0x008D8
|
|
||||||
#define MMC_TX_PER_PRIO_GPFC_PKT_GB 0x008DC
|
|
||||||
#define MMC_TX_PER_PRIO_OCTET_GB_L 0x008E0
|
|
||||||
#define MMC_TX_PER_PRIO_OCTET_GB_H 0x008E4
|
|
||||||
|
|
||||||
#define MMC_RXPACKETCOUNT_GB_L 0x00900
|
#define MMC_RXPACKETCOUNT_GB_L 0x00900
|
||||||
#define MMC_RXPACKETCOUNT_GB_H 0x00904
|
#define MMC_RXPACKETCOUNT_GB_H 0x00904
|
||||||
@@ -118,24 +111,9 @@
|
|||||||
#define MMC_RXWATCHDOGERROR 0x009A0
|
#define MMC_RXWATCHDOGERROR 0x009A0
|
||||||
#define MMC_RXLPIUSECCNTR 0x009A4
|
#define MMC_RXLPIUSECCNTR 0x009A4
|
||||||
#define MMC_RXLPITRANCNTR 0x009A8
|
#define MMC_RXLPITRANCNTR 0x009A8
|
||||||
#define MMC_RX_DISCARD_PKTS_GB_L 0x009AC
|
|
||||||
#define MMC_RX_DISCARD_PKTS_GB_H 0x009B0
|
|
||||||
#define MMC_RX_DISCARD_OCTET_GB_L 0x009B4
|
|
||||||
#define MMC_RX_DISCARD_OCTET_GB_H 0x009B8
|
|
||||||
#define MMC_RXALIGNMENTERROR 0x009BC
|
#define MMC_RXALIGNMENTERROR 0x009BC
|
||||||
#define MMC_RX_PER_PRIO_STATUS 0x009D0
|
|
||||||
#define MMC_RX_PER_PRIO_PKT_GB 0x009D4
|
|
||||||
#define MMC_RX_PER_PRIO_PKT_B 0x009D8
|
|
||||||
#define MMC_RX_PER_PRIO_PFC_PKT_GB 0x009DC
|
|
||||||
#define MMC_RX_PER_PRIO_OCTET_GB_L 0x009E0
|
|
||||||
#define MMC_RX_PER_PRIO_OCTET_GB_H 0x009E4
|
|
||||||
#define MMC_RX_PER_PRIO_DISCARD_GB 0x009E8
|
|
||||||
#define MMC_FPE_TX_INT 0x00A00
|
|
||||||
#define MMC_FPE_TX_INT_MASK 0x00A04
|
|
||||||
#define MMC_TX_FPE_FRAG_COUNTER 0x00A08
|
#define MMC_TX_FPE_FRAG_COUNTER 0x00A08
|
||||||
#define MMC_TX_HOLD_REQ_COUNTER 0x00A0C
|
#define MMC_TX_HOLD_REQ_COUNTER 0x00A0C
|
||||||
#define MMC_FPE_RX_INT 0x00A20
|
|
||||||
#define MMC_FPE_RX_INT_MASK 0x00A24
|
|
||||||
#define MMC_RX_PKT_ASSEMBLY_ERR_CNTR 0x00A28
|
#define MMC_RX_PKT_ASSEMBLY_ERR_CNTR 0x00A28
|
||||||
#define MMC_RX_PKT_SMD_ERR_CNTR 0x00A2C
|
#define MMC_RX_PKT_SMD_ERR_CNTR 0x00A2C
|
||||||
#define MMC_RX_PKT_ASSEMBLY_OK_CNTR 0x00A30
|
#define MMC_RX_PKT_ASSEMBLY_OK_CNTR 0x00A30
|
||||||
@@ -147,8 +125,6 @@
|
|||||||
#define MMC_TXEXESSCOL 0x00A50
|
#define MMC_TXEXESSCOL 0x00A50
|
||||||
#define MMC_TXCARRIERERROR 0x00A54
|
#define MMC_TXCARRIERERROR 0x00A54
|
||||||
#define MMC_TXEXECESS_DEFERRED 0x00A58
|
#define MMC_TXEXECESS_DEFERRED 0x00A58
|
||||||
#define MMC_IPC_RX_INT_MASK 0x00A5C
|
|
||||||
#define MMC_IPC_RX_INT 0x00A60
|
|
||||||
#define MMC_RXIPV4_GD_PKTS_L 0x00A64
|
#define MMC_RXIPV4_GD_PKTS_L 0x00A64
|
||||||
#define MMC_RXIPV4_GD_PKTS_H 0x00A68
|
#define MMC_RXIPV4_GD_PKTS_H 0x00A68
|
||||||
#define MMC_RXIPV4_HDRERR_PKTS_L 0x00A6C
|
#define MMC_RXIPV4_HDRERR_PKTS_L 0x00A6C
|
||||||
@@ -220,7 +196,7 @@
|
|||||||
* 1) MAC should be init and started. see osi_start_mac()
|
* 1) MAC should be init and started. see osi_start_mac()
|
||||||
* 2) osi_core->osd should be populated
|
* 2) osi_core->osd should be populated
|
||||||
*/
|
*/
|
||||||
void mgbe_read_mmc(struct osi_core_priv_data *osi_core);
|
void mgbe_read_mmc(struct osi_core_priv_data *const osi_core);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief mgbe_reset_mmc - To reset MMC registers and ether_mmc_counter
|
* @brief mgbe_reset_mmc - To reset MMC registers and ether_mmc_counter
|
||||||
@@ -232,5 +208,5 @@ void mgbe_read_mmc(struct osi_core_priv_data *osi_core);
|
|||||||
* 1) MAC should be init and started. see osi_start_mac()
|
* 1) MAC should be init and started. see osi_start_mac()
|
||||||
* 2) osi_core->osd should be populated
|
* 2) osi_core->osd should be populated
|
||||||
*/
|
*/
|
||||||
void mgbe_reset_mmc(struct osi_core_priv_data *osi_core);
|
void mgbe_reset_mmc(struct osi_core_priv_data *const osi_core);
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -25,40 +25,13 @@
|
|||||||
#include "core_local.h"
|
#include "core_local.h"
|
||||||
#include "../osi/common/common.h"
|
#include "../osi/common/common.h"
|
||||||
|
|
||||||
#ifdef HSI_SUPPORT
|
|
||||||
/**
|
|
||||||
* @brief hsi_err_code - Arry of error code and reporter ID to be use by
|
|
||||||
* each Ethernet controller instance
|
|
||||||
* a condition is met or a timeout occurs
|
|
||||||
* Below is the data:
|
|
||||||
* uncorrectable_error_code, correctable_error_code, reporter ID
|
|
||||||
* hsi_err_code[0] to hsi_err_code[3] for MGBE instance
|
|
||||||
* hsi_err_code[4] is for EQOS
|
|
||||||
*/
|
|
||||||
nveu32_t hsi_err_code[][3] = {
|
|
||||||
{0x2A00, 0x2E08, 0x8019},
|
|
||||||
{0x2A01, 0x2E09, 0x801A},
|
|
||||||
{0x2A02, 0x2E0A, 0x801B},
|
|
||||||
{0x2A03, 0x2E0B, 0x801C},
|
|
||||||
{0x28AD, 0x2DE6, 0x8009},
|
|
||||||
};
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief g_core - Static core local data array
|
|
||||||
*/
|
|
||||||
static struct core_local g_core[MAX_CORE_INSTANCES];
|
static struct core_local g_core[MAX_CORE_INSTANCES];
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief if_ops - Static core interface operations for virtual/non-virtual
|
|
||||||
* case
|
|
||||||
*/
|
|
||||||
static struct if_core_ops if_ops[MAX_INTERFACE_OPS];
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Function to validate function pointers.
|
* @brief Function to validate function pointers.
|
||||||
*
|
*
|
||||||
* @param[in] osi_core: OSI Core private data structure.
|
* @param[in] osi_core: OSI Core private data structure.
|
||||||
|
* @param[in] if_ops_p: pointer to interface core operations.
|
||||||
*
|
*
|
||||||
* @note
|
* @note
|
||||||
* API Group:
|
* API Group:
|
||||||
@@ -74,34 +47,39 @@ static nve32_t validate_if_func_ptrs(struct osi_core_priv_data *const osi_core,
|
|||||||
{
|
{
|
||||||
nveu32_t i = 0;
|
nveu32_t i = 0;
|
||||||
void *temp_ops = (void *)if_ops_p;
|
void *temp_ops = (void *)if_ops_p;
|
||||||
|
nve32_t ret = 0;
|
||||||
#if __SIZEOF_POINTER__ == 8
|
#if __SIZEOF_POINTER__ == 8
|
||||||
nveu64_t *l_ops = (nveu64_t *)temp_ops;
|
nveu64_t *l_ops = (nveu64_t *)temp_ops;
|
||||||
#elif __SIZEOF_POINTER__ == 4
|
#elif __SIZEOF_POINTER__ == 4
|
||||||
nveu32_t *l_ops = (nveu32_t *)temp_ops;
|
nveu32_t *l_ops = (nveu32_t *)temp_ops;
|
||||||
#else
|
#else
|
||||||
OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
|
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID,
|
||||||
"Undefined architecture\n", 0ULL);
|
"Undefined architecture\n", 0ULL);
|
||||||
return -1;
|
ret = -1;
|
||||||
|
goto fail;
|
||||||
#endif
|
#endif
|
||||||
|
(void) osi_core;
|
||||||
|
|
||||||
for (i = 0; i < (sizeof(*if_ops_p) / (nveu64_t)__SIZEOF_POINTER__);
|
for (i = 0; i < (sizeof(*if_ops_p) / (nveu64_t)__SIZEOF_POINTER__);
|
||||||
i++) {
|
i++) {
|
||||||
if (*l_ops == 0U) {
|
if (*l_ops == 0U) {
|
||||||
OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
|
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID,
|
||||||
"failed at index : ", i);
|
"failed at index : ", i);
|
||||||
return -1;
|
ret = -1;
|
||||||
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
l_ops++;
|
l_ops++;
|
||||||
}
|
}
|
||||||
|
fail:
|
||||||
return 0;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Function to validate input arguments of API.
|
* @brief Function to validate input arguments of API.
|
||||||
*
|
*
|
||||||
* @param[in] osi_core: OSI Core private data structure.
|
* @param[in] osi_core: OSI Core private data structure.
|
||||||
|
* @param[in] l_core: Core local private data structure.
|
||||||
*
|
*
|
||||||
* @note
|
* @note
|
||||||
* API Group:
|
* API Group:
|
||||||
@@ -115,17 +93,20 @@ static nve32_t validate_if_func_ptrs(struct osi_core_priv_data *const osi_core,
|
|||||||
static inline nve32_t validate_if_args(struct osi_core_priv_data *const osi_core,
|
static inline nve32_t validate_if_args(struct osi_core_priv_data *const osi_core,
|
||||||
struct core_local *l_core)
|
struct core_local *l_core)
|
||||||
{
|
{
|
||||||
|
nve32_t ret = 0;
|
||||||
|
|
||||||
if ((osi_core == OSI_NULL) || (l_core->if_init_done == OSI_DISABLE) ||
|
if ((osi_core == OSI_NULL) || (l_core->if_init_done == OSI_DISABLE) ||
|
||||||
(l_core->magic_num != (nveu64_t)osi_core)) {
|
(l_core->magic_num != (nveu64_t)osi_core)) {
|
||||||
return -1;
|
ret = -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct osi_core_priv_data *osi_get_core(void)
|
struct osi_core_priv_data *osi_get_core(void)
|
||||||
{
|
{
|
||||||
nveu32_t i;
|
nveu32_t i;
|
||||||
|
struct osi_core_priv_data *osi_core = OSI_NULL;
|
||||||
|
|
||||||
for (i = 0U; i < MAX_CORE_INSTANCES; i++) {
|
for (i = 0U; i < MAX_CORE_INSTANCES; i++) {
|
||||||
if (g_core[i].if_init_done == OSI_ENABLE) {
|
if (g_core[i].if_init_done == OSI_ENABLE) {
|
||||||
@@ -136,7 +117,7 @@ struct osi_core_priv_data *osi_get_core(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (i == MAX_CORE_INSTANCES) {
|
if (i == MAX_CORE_INSTANCES) {
|
||||||
return OSI_NULL;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
g_core[i].magic_num = (nveu64_t)&g_core[i].osi_core;
|
g_core[i].magic_num = (nveu64_t)&g_core[i].osi_core;
|
||||||
@@ -145,45 +126,55 @@ struct osi_core_priv_data *osi_get_core(void)
|
|||||||
g_core[i].tx_ts_head.next = &g_core[i].tx_ts_head;
|
g_core[i].tx_ts_head.next = &g_core[i].tx_ts_head;
|
||||||
g_core[i].pps_freq = OSI_DISABLE;
|
g_core[i].pps_freq = OSI_DISABLE;
|
||||||
|
|
||||||
return &g_core[i].osi_core;
|
osi_core = &g_core[i].osi_core;
|
||||||
|
osi_memset(osi_core, 0, sizeof(struct osi_core_priv_data));
|
||||||
|
fail:
|
||||||
|
return osi_core;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct osi_core_priv_data *get_role_pointer(nveu32_t role)
|
struct osi_core_priv_data *get_role_pointer(nveu32_t role)
|
||||||
{
|
{
|
||||||
nveu32_t i;
|
nveu32_t i;
|
||||||
|
struct osi_core_priv_data *ret_ptr = OSI_NULL;
|
||||||
|
|
||||||
if ((role != OSI_PTP_M2M_PRIMARY) &&
|
if ((role != OSI_PTP_M2M_PRIMARY) &&
|
||||||
(role != OSI_PTP_M2M_SECONDARY)) {
|
(role != OSI_PTP_M2M_SECONDARY)) {
|
||||||
return OSI_NULL;
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Current approch to give pointer for 1st role */
|
/* Current approch to give pointer for 1st role */
|
||||||
for (i = 0U; i < MAX_CORE_INSTANCES; i++) {
|
for (i = 0U; i < MAX_CORE_INSTANCES; i++) {
|
||||||
if ((g_core[i].if_init_done == OSI_ENABLE) &&
|
if ((g_core[i].if_init_done == OSI_ENABLE) &&
|
||||||
(g_core[i].ether_m2m_role == role)) {
|
(g_core[i].ether_m2m_role == role)) {
|
||||||
return &g_core[i].osi_core;
|
ret_ptr = &g_core[i].osi_core;
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return OSI_NULL;
|
done:
|
||||||
|
return ret_ptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
nve32_t osi_init_core_ops(struct osi_core_priv_data *const osi_core)
|
nve32_t osi_init_core_ops(struct osi_core_priv_data *const osi_core)
|
||||||
{
|
{
|
||||||
struct core_local *l_core = (struct core_local *)osi_core;
|
struct core_local *l_core = (struct core_local *)(void *)osi_core;
|
||||||
nve32_t ret = -1;
|
static struct if_core_ops if_ops[MAX_INTERFACE_OPS];
|
||||||
|
nve32_t ret = 0;
|
||||||
|
|
||||||
if (osi_core == OSI_NULL) {
|
if (osi_core == OSI_NULL) {
|
||||||
return -1;
|
ret = -1;
|
||||||
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (osi_core->use_virtualization > OSI_ENABLE) {
|
if (osi_core->use_virtualization > OSI_ENABLE) {
|
||||||
return ret;
|
ret = -1;
|
||||||
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((l_core->magic_num != (nveu64_t)osi_core) ||
|
if ((l_core->magic_num != (nveu64_t)osi_core) ||
|
||||||
(l_core->if_init_done == OSI_ENABLE)) {
|
(l_core->if_init_done == OSI_ENABLE)) {
|
||||||
return -1;
|
ret = -1;
|
||||||
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
l_core->if_ops_p = &if_ops[osi_core->use_virtualization];
|
l_core->if_ops_p = &if_ops[osi_core->use_virtualization];
|
||||||
@@ -195,16 +186,17 @@ nve32_t osi_init_core_ops(struct osi_core_priv_data *const osi_core)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (validate_if_func_ptrs(osi_core, l_core->if_ops_p) < 0) {
|
if (validate_if_func_ptrs(osi_core, l_core->if_ops_p) < 0) {
|
||||||
OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
|
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID,
|
||||||
"Interface function validation failed\n", 0ULL);
|
"Interface function validation failed\n", 0ULL);
|
||||||
return -1;
|
ret = -1;
|
||||||
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = l_core->if_ops_p->if_init_core_ops(osi_core);
|
ret = l_core->if_ops_p->if_init_core_ops(osi_core);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
|
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID,
|
||||||
"if_init_core_ops failed\n", 0ULL);
|
"if_init_core_ops failed\n", 0ULL);
|
||||||
return ret;
|
goto fail;
|
||||||
}
|
}
|
||||||
l_core->ts_lock = OSI_DISABLE;
|
l_core->ts_lock = OSI_DISABLE;
|
||||||
l_core->ether_m2m_role = osi_core->m2m_role;
|
l_core->ether_m2m_role = osi_core->m2m_role;
|
||||||
@@ -228,11 +220,11 @@ nve32_t osi_init_core_ops(struct osi_core_priv_data *const osi_core)
|
|||||||
if (osi_core->pps_frq <= OSI_ENABLE) {
|
if (osi_core->pps_frq <= OSI_ENABLE) {
|
||||||
l_core->pps_freq = osi_core->pps_frq;
|
l_core->pps_freq = osi_core->pps_frq;
|
||||||
} else {
|
} else {
|
||||||
OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
|
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID,
|
||||||
"invalid pps_frq\n", (nveu64_t)osi_core->pps_frq);
|
"invalid pps_frq\n", (nveu64_t)osi_core->pps_frq);
|
||||||
ret = -1;
|
ret = -1;
|
||||||
}
|
}
|
||||||
|
fail:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -240,67 +232,79 @@ nve32_t osi_write_phy_reg(struct osi_core_priv_data *const osi_core,
|
|||||||
const nveu32_t phyaddr, const nveu32_t phyreg,
|
const nveu32_t phyaddr, const nveu32_t phyreg,
|
||||||
const nveu16_t phydata)
|
const nveu16_t phydata)
|
||||||
{
|
{
|
||||||
struct core_local *l_core = (struct core_local *)osi_core;
|
nve32_t ret = -1;
|
||||||
|
struct core_local *l_core = (struct core_local *)(void *)osi_core;
|
||||||
|
|
||||||
if (validate_if_args(osi_core, l_core) < 0) {
|
if (validate_if_args(osi_core, l_core) < 0) {
|
||||||
return -1;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
return l_core->if_ops_p->if_write_phy_reg(osi_core, phyaddr, phyreg,
|
ret = l_core->if_ops_p->if_write_phy_reg(osi_core, phyaddr, phyreg,
|
||||||
phydata);
|
phydata);
|
||||||
|
fail:
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
nve32_t osi_read_phy_reg(struct osi_core_priv_data *const osi_core,
|
nve32_t osi_read_phy_reg(struct osi_core_priv_data *const osi_core,
|
||||||
const nveu32_t phyaddr, const nveu32_t phyreg)
|
const nveu32_t phyaddr, const nveu32_t phyreg)
|
||||||
{
|
{
|
||||||
struct core_local *l_core = (struct core_local *)osi_core;
|
nve32_t ret = -1;
|
||||||
|
struct core_local *l_core = (struct core_local *)(void *)osi_core;
|
||||||
|
|
||||||
if (validate_if_args(osi_core, l_core) < 0) {
|
if (validate_if_args(osi_core, l_core) < 0) {
|
||||||
return -1;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
return l_core->if_ops_p->if_read_phy_reg(osi_core, phyaddr, phyreg);
|
ret = l_core->if_ops_p->if_read_phy_reg(osi_core, phyaddr, phyreg);
|
||||||
|
fail:
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
nve32_t osi_hw_core_init(struct osi_core_priv_data *const osi_core,
|
nve32_t osi_hw_core_init(struct osi_core_priv_data *const osi_core)
|
||||||
nveu32_t tx_fifo_size, nveu32_t rx_fifo_size)
|
|
||||||
{
|
{
|
||||||
struct core_local *l_core = (struct core_local *)osi_core;
|
nve32_t ret = -1;
|
||||||
|
struct core_local *l_core = (struct core_local *)(void *)osi_core;
|
||||||
|
|
||||||
if (validate_if_args(osi_core, l_core) < 0) {
|
if (validate_if_args(osi_core, l_core) < 0) {
|
||||||
return -1;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
return l_core->if_ops_p->if_core_init(osi_core, tx_fifo_size,
|
ret = l_core->if_ops_p->if_core_init(osi_core);
|
||||||
rx_fifo_size);
|
fail:
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
nve32_t osi_hw_core_deinit(struct osi_core_priv_data *const osi_core)
|
nve32_t osi_hw_core_deinit(struct osi_core_priv_data *const osi_core)
|
||||||
{
|
{
|
||||||
struct core_local *l_core = (struct core_local *)osi_core;
|
nve32_t ret = -1;
|
||||||
|
struct core_local *l_core = (struct core_local *)(void *)osi_core;
|
||||||
|
|
||||||
if (validate_if_args(osi_core, l_core) < 0) {
|
if (validate_if_args(osi_core, l_core) < 0) {
|
||||||
return -1;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
return l_core->if_ops_p->if_core_deinit(osi_core);
|
ret = l_core->if_ops_p->if_core_deinit(osi_core);
|
||||||
|
fail:
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
nve32_t osi_handle_ioctl(struct osi_core_priv_data *osi_core,
|
nve32_t osi_handle_ioctl(struct osi_core_priv_data *osi_core,
|
||||||
struct osi_ioctl *data)
|
struct osi_ioctl *data)
|
||||||
{
|
{
|
||||||
struct core_local *l_core = (struct core_local *)osi_core;
|
struct core_local *l_core = (struct core_local *)(void *)osi_core;
|
||||||
nve32_t ret = -1;
|
nve32_t ret = -1;
|
||||||
|
|
||||||
if (validate_if_args(osi_core, l_core) < 0) {
|
if (validate_if_args(osi_core, l_core) < 0) {
|
||||||
return ret;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (data == OSI_NULL) {
|
if (data == OSI_NULL) {
|
||||||
OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
|
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID,
|
||||||
"CORE: Invalid argument\n", 0ULL);
|
"CORE: Invalid argument\n", 0ULL);
|
||||||
return ret;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
return l_core->if_ops_p->if_handle_ioctl(osi_core, data);
|
ret = l_core->if_ops_p->if_handle_ioctl(osi_core, data);
|
||||||
|
fail:
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|||||||
2517
osi/core/osi_hal.c
2517
osi/core/osi_hal.c
File diff suppressed because it is too large
Load Diff
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -20,6 +20,7 @@
|
|||||||
* DEALINGS IN THE SOFTWARE.
|
* DEALINGS IN THE SOFTWARE.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#ifndef OSI_STRIPPED_LIB
|
||||||
#include "../osi/common/common.h"
|
#include "../osi/common/common.h"
|
||||||
#include "vlan_filter.h"
|
#include "vlan_filter.h"
|
||||||
|
|
||||||
@@ -35,11 +36,11 @@
|
|||||||
* @return Index from VID array if match found.
|
* @return Index from VID array if match found.
|
||||||
* @return Return VLAN_HW_FILTER_FULL_IDX if not found.
|
* @return Return VLAN_HW_FILTER_FULL_IDX if not found.
|
||||||
*/
|
*/
|
||||||
static inline unsigned int get_vlan_filter_idx(
|
static inline nveu32_t get_vlan_filter_idx(
|
||||||
struct osi_core_priv_data *osi_core,
|
struct osi_core_priv_data *osi_core,
|
||||||
unsigned short vlan_id)
|
nveu16_t vlan_id)
|
||||||
{
|
{
|
||||||
unsigned int vid_idx = VLAN_HW_FILTER_FULL_IDX;
|
nveu32_t vid_idx = VLAN_HW_FILTER_FULL_IDX;
|
||||||
unsigned long bitmap = osi_core->vf_bitmap;
|
unsigned long bitmap = osi_core->vf_bitmap;
|
||||||
unsigned long temp = 0U;
|
unsigned long temp = 0U;
|
||||||
|
|
||||||
@@ -48,7 +49,7 @@ static inline unsigned int get_vlan_filter_idx(
|
|||||||
|
|
||||||
if (osi_core->vid[temp] == vlan_id) {
|
if (osi_core->vid[temp] == vlan_id) {
|
||||||
/* vlan ID match found */
|
/* vlan ID match found */
|
||||||
vid_idx = (unsigned int)temp;
|
vid_idx = (nveu32_t)temp;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -70,11 +71,11 @@ static inline unsigned int get_vlan_filter_idx(
|
|||||||
*
|
*
|
||||||
* @return 0 on success
|
* @return 0 on success
|
||||||
*/
|
*/
|
||||||
static inline int allow_all_vid_tags(unsigned char *base,
|
static inline nve32_t allow_all_vid_tags(nveu8_t *base,
|
||||||
unsigned int pass_all_vids)
|
nveu32_t pass_all_vids)
|
||||||
{
|
{
|
||||||
unsigned int vlan_tag_reg = 0;
|
nveu32_t vlan_tag_reg = 0;
|
||||||
unsigned int hash_filter_reg = 0;
|
nveu32_t hash_filter_reg = 0;
|
||||||
|
|
||||||
vlan_tag_reg = osi_readl(base + MAC_VLAN_TAG_CTRL);
|
vlan_tag_reg = osi_readl(base + MAC_VLAN_TAG_CTRL);
|
||||||
hash_filter_reg = osi_readl(base + MAC_VLAN_HASH_FILTER);
|
hash_filter_reg = osi_readl(base + MAC_VLAN_HASH_FILTER);
|
||||||
@@ -84,7 +85,7 @@ static inline int allow_all_vid_tags(unsigned char *base,
|
|||||||
hash_filter_reg |= VLAN_HASH_ALLOW_ALL;
|
hash_filter_reg |= VLAN_HASH_ALLOW_ALL;
|
||||||
} else {
|
} else {
|
||||||
vlan_tag_reg &= ~MAC_VLAN_TAG_CTRL_VHTM;
|
vlan_tag_reg &= ~MAC_VLAN_TAG_CTRL_VHTM;
|
||||||
hash_filter_reg &= (unsigned int) ~VLAN_HASH_ALLOW_ALL;
|
hash_filter_reg &= (nveu32_t) ~VLAN_HASH_ALLOW_ALL;
|
||||||
}
|
}
|
||||||
|
|
||||||
osi_writel(vlan_tag_reg, base + MAC_VLAN_TAG_CTRL);
|
osi_writel(vlan_tag_reg, base + MAC_VLAN_TAG_CTRL);
|
||||||
@@ -107,11 +108,11 @@ static inline int allow_all_vid_tags(unsigned char *base,
|
|||||||
* @return 0 on Success.
|
* @return 0 on Success.
|
||||||
* @return negative value on failure
|
* @return negative value on failure
|
||||||
*/
|
*/
|
||||||
static inline int is_vlan_id_enqueued(struct osi_core_priv_data *osi_core,
|
static inline nve32_t is_vlan_id_enqueued(struct osi_core_priv_data *osi_core,
|
||||||
unsigned short vlan_id,
|
nveu16_t vlan_id,
|
||||||
unsigned int *idx)
|
nveu32_t *idx)
|
||||||
{
|
{
|
||||||
unsigned int i = 0;
|
nveu32_t i = 0;
|
||||||
|
|
||||||
if (osi_core->vlan_filter_cnt == VLAN_HW_FILTER_FULL_IDX) {
|
if (osi_core->vlan_filter_cnt == VLAN_HW_FILTER_FULL_IDX) {
|
||||||
/* No elements in SW queue to search */
|
/* No elements in SW queue to search */
|
||||||
@@ -140,11 +141,11 @@ static inline int is_vlan_id_enqueued(struct osi_core_priv_data *osi_core,
|
|||||||
* @return 0 on success.
|
* @return 0 on success.
|
||||||
* @return negative value on failure.
|
* @return negative value on failure.
|
||||||
*/
|
*/
|
||||||
static inline int enqueue_vlan_id(struct osi_core_priv_data *osi_core,
|
static inline nve32_t enqueue_vlan_id(struct osi_core_priv_data *osi_core,
|
||||||
unsigned short vlan_id)
|
nveu16_t vlan_id)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
nve32_t ret = 0;
|
||||||
unsigned int idx;
|
nveu32_t idx;
|
||||||
|
|
||||||
if (osi_core->vlan_filter_cnt == VLAN_NUM_VID) {
|
if (osi_core->vlan_filter_cnt == VLAN_NUM_VID) {
|
||||||
/* Entire SW queue full */
|
/* Entire SW queue full */
|
||||||
@@ -154,7 +155,7 @@ static inline int enqueue_vlan_id(struct osi_core_priv_data *osi_core,
|
|||||||
/* Check if requested vlan_id alredy queued */
|
/* Check if requested vlan_id alredy queued */
|
||||||
ret = is_vlan_id_enqueued(osi_core, vlan_id, &idx);
|
ret = is_vlan_id_enqueued(osi_core, vlan_id, &idx);
|
||||||
if (ret == 0) {
|
if (ret == 0) {
|
||||||
OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
|
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID,
|
||||||
"VLAN ID already programmed\n",
|
"VLAN ID already programmed\n",
|
||||||
0ULL);
|
0ULL);
|
||||||
return -1;
|
return -1;
|
||||||
@@ -177,25 +178,25 @@ static inline int enqueue_vlan_id(struct osi_core_priv_data *osi_core,
|
|||||||
* @return 0 on success.
|
* @return 0 on success.
|
||||||
* @return -1 on failure.
|
* @return -1 on failure.
|
||||||
*/
|
*/
|
||||||
static inline int poll_for_vlan_filter_reg_rw(
|
static inline nve32_t poll_for_vlan_filter_reg_rw(
|
||||||
struct osi_core_priv_data *osi_core)
|
struct osi_core_priv_data *osi_core)
|
||||||
{
|
{
|
||||||
unsigned int retry = 10;
|
nveu32_t retry = 10;
|
||||||
unsigned int count;
|
nveu32_t count;
|
||||||
unsigned int val = 0;
|
nveu32_t val = 0;
|
||||||
int cond = 1;
|
nve32_t cond = 1;
|
||||||
|
|
||||||
count = 0;
|
count = 0;
|
||||||
while (cond == 1) {
|
while (cond == 1) {
|
||||||
if (count > retry) {
|
if (count > retry) {
|
||||||
OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL,
|
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL,
|
||||||
"VLAN filter update timedout\n", 0ULL);
|
"VLAN filter update timedout\n", 0ULL);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
count++;
|
count++;
|
||||||
|
|
||||||
val = osi_readl((unsigned char *)osi_core->base +
|
val = osi_readl((nveu8_t *)osi_core->base +
|
||||||
MAC_VLAN_TAG_CTRL);
|
MAC_VLAN_TAG_CTRL);
|
||||||
if ((val & MAC_VLAN_TAG_CTRL_OB) == OSI_NONE) {
|
if ((val & MAC_VLAN_TAG_CTRL_OB) == OSI_NONE) {
|
||||||
/* Set cond to 0 to exit loop */
|
/* Set cond to 0 to exit loop */
|
||||||
@@ -222,17 +223,17 @@ static inline int poll_for_vlan_filter_reg_rw(
|
|||||||
* @return 0 on success
|
* @return 0 on success
|
||||||
* @return -1 on failure.
|
* @return -1 on failure.
|
||||||
*/
|
*/
|
||||||
static inline int update_vlan_filters(struct osi_core_priv_data *osi_core,
|
static inline nve32_t update_vlan_filters(struct osi_core_priv_data *osi_core,
|
||||||
unsigned int vid_idx,
|
nveu32_t vid_idx,
|
||||||
unsigned int val)
|
nveu32_t val)
|
||||||
{
|
{
|
||||||
unsigned char *base = (unsigned char *)osi_core->base;
|
nveu8_t *base = (nveu8_t *)osi_core->base;
|
||||||
int ret = 0;
|
nve32_t ret = 0;
|
||||||
|
|
||||||
osi_writel(val, base + MAC_VLAN_TAG_DATA);
|
osi_writel(val, base + MAC_VLAN_TAG_DATA);
|
||||||
|
|
||||||
val = osi_readl(base + MAC_VLAN_TAG_CTRL);
|
val = osi_readl(base + MAC_VLAN_TAG_CTRL);
|
||||||
val &= (unsigned int) ~MAC_VLAN_TAG_CTRL_OFS_MASK;
|
val &= (nveu32_t) ~MAC_VLAN_TAG_CTRL_OFS_MASK;
|
||||||
val |= vid_idx << MAC_VLAN_TAG_CTRL_OFS_SHIFT;
|
val |= vid_idx << MAC_VLAN_TAG_CTRL_OFS_SHIFT;
|
||||||
val &= ~MAC_VLAN_TAG_CTRL_CT;
|
val &= ~MAC_VLAN_TAG_CTRL_CT;
|
||||||
val |= MAC_VLAN_TAG_CTRL_OB;
|
val |= MAC_VLAN_TAG_CTRL_OB;
|
||||||
@@ -240,7 +241,7 @@ static inline int update_vlan_filters(struct osi_core_priv_data *osi_core,
|
|||||||
|
|
||||||
ret = poll_for_vlan_filter_reg_rw(osi_core);
|
ret = poll_for_vlan_filter_reg_rw(osi_core);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL,
|
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL,
|
||||||
"Failed to update VLAN filters\n", 0ULL);
|
"Failed to update VLAN filters\n", 0ULL);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
@@ -259,13 +260,13 @@ static inline int update_vlan_filters(struct osi_core_priv_data *osi_core,
|
|||||||
* @return 0 on success
|
* @return 0 on success
|
||||||
* @return -1 on failure.
|
* @return -1 on failure.
|
||||||
*/
|
*/
|
||||||
static inline int add_vlan_id(struct osi_core_priv_data *osi_core,
|
static inline nve32_t add_vlan_id(struct osi_core_priv_data *osi_core,
|
||||||
struct core_ops *ops_p,
|
struct core_ops *ops_p,
|
||||||
unsigned short vlan_id)
|
nveu16_t vlan_id)
|
||||||
{
|
{
|
||||||
unsigned int vid_idx = 0;
|
nveu32_t vid_idx = 0;
|
||||||
unsigned int val = 0;
|
nveu32_t val = 0;
|
||||||
int ret = 0;
|
nve32_t ret = 0;
|
||||||
|
|
||||||
/* Check if VLAN ID already programmed */
|
/* Check if VLAN ID already programmed */
|
||||||
vid_idx = get_vlan_filter_idx(osi_core, vlan_id);
|
vid_idx = get_vlan_filter_idx(osi_core, vlan_id);
|
||||||
@@ -277,7 +278,7 @@ static inline int add_vlan_id(struct osi_core_priv_data *osi_core,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Get free index to add the VID */
|
/* Get free index to add the VID */
|
||||||
vid_idx = (unsigned int) __builtin_ctzl(~osi_core->vf_bitmap);
|
vid_idx = (nveu32_t) __builtin_ctzl(~osi_core->vf_bitmap);
|
||||||
/* If there is no free filter index add into SW VLAN filter queue to store */
|
/* If there is no free filter index add into SW VLAN filter queue to store */
|
||||||
if (vid_idx == VLAN_HW_FILTER_FULL_IDX) {
|
if (vid_idx == VLAN_HW_FILTER_FULL_IDX) {
|
||||||
/* Add VLAN ID to SW queue */
|
/* Add VLAN ID to SW queue */
|
||||||
@@ -299,14 +300,14 @@ static inline int add_vlan_id(struct osi_core_priv_data *osi_core,
|
|||||||
OSI_DISABLE,
|
OSI_DISABLE,
|
||||||
OSI_DISABLE);
|
OSI_DISABLE);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
|
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID,
|
||||||
"Failed to enable VLAN filtering\n", 0ULL);
|
"Failed to enable VLAN filtering\n", 0ULL);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
val = osi_readl((unsigned char *)osi_core->base + MAC_VLAN_TAG_DATA);
|
val = osi_readl((nveu8_t *)osi_core->base + MAC_VLAN_TAG_DATA);
|
||||||
val &= (unsigned int) ~VLAN_VID_MASK;
|
val &= (nveu32_t) ~VLAN_VID_MASK;
|
||||||
val |= (vlan_id | MAC_VLAN_TAG_DATA_ETV | MAC_VLAN_TAG_DATA_VEN);
|
val |= (vlan_id | MAC_VLAN_TAG_DATA_ETV | MAC_VLAN_TAG_DATA_VEN);
|
||||||
|
|
||||||
return update_vlan_filters(osi_core, vid_idx, val);
|
return update_vlan_filters(osi_core, vid_idx, val);
|
||||||
@@ -325,10 +326,10 @@ static inline int add_vlan_id(struct osi_core_priv_data *osi_core,
|
|||||||
* @return 0 on success
|
* @return 0 on success
|
||||||
* @return -1 on failure.
|
* @return -1 on failure.
|
||||||
*/
|
*/
|
||||||
static inline int dequeue_vlan_id(struct osi_core_priv_data *osi_core,
|
static inline nve32_t dequeue_vlan_id(struct osi_core_priv_data *osi_core,
|
||||||
unsigned int idx)
|
nveu32_t idx)
|
||||||
{
|
{
|
||||||
unsigned int i;
|
nveu32_t i;
|
||||||
|
|
||||||
if (osi_core->vlan_filter_cnt == VLAN_HW_MAX_NRVF) {
|
if (osi_core->vlan_filter_cnt == VLAN_HW_MAX_NRVF) {
|
||||||
return -1;
|
return -1;
|
||||||
@@ -336,14 +337,14 @@ static inline int dequeue_vlan_id(struct osi_core_priv_data *osi_core,
|
|||||||
|
|
||||||
/* Left shift the array elements by one for the VID order */
|
/* Left shift the array elements by one for the VID order */
|
||||||
for (i = idx; i <= osi_core->vlan_filter_cnt; i++) {
|
for (i = idx; i <= osi_core->vlan_filter_cnt; i++) {
|
||||||
osi_core->vid[i] = osi_core->vid[i + 1];
|
osi_core->vid[i] = osi_core->vid[i + 1U];
|
||||||
}
|
}
|
||||||
|
|
||||||
osi_core->vid[i] = VLAN_ID_INVALID;
|
osi_core->vid[i] = VLAN_ID_INVALID;
|
||||||
osi_core->vlan_filter_cnt--;
|
osi_core->vlan_filter_cnt--;
|
||||||
|
|
||||||
if (osi_core->vlan_filter_cnt == VLAN_HW_MAX_NRVF) {
|
if (osi_core->vlan_filter_cnt == VLAN_HW_MAX_NRVF) {
|
||||||
allow_all_vid_tags(osi_core->base, OSI_DISABLE);
|
return allow_all_vid_tags(osi_core->base, OSI_DISABLE);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@@ -363,14 +364,14 @@ static inline int dequeue_vlan_id(struct osi_core_priv_data *osi_core,
|
|||||||
* @return 0 on success
|
* @return 0 on success
|
||||||
* @return -1 on failure.
|
* @return -1 on failure.
|
||||||
*/
|
*/
|
||||||
static inline int dequeue_vid_to_add_filter_reg(
|
static inline nve32_t dequeue_vid_to_add_filter_reg(
|
||||||
struct osi_core_priv_data *osi_core,
|
struct osi_core_priv_data *osi_core,
|
||||||
unsigned int vid_idx)
|
nveu32_t vid_idx)
|
||||||
{
|
{
|
||||||
unsigned int val = 0;
|
nveu32_t val = 0;
|
||||||
unsigned short vlan_id = 0;
|
nveu16_t vlan_id = 0;
|
||||||
unsigned int i = 0;
|
nveu32_t i = 0;
|
||||||
int ret = 0;
|
nve32_t ret = 0;
|
||||||
|
|
||||||
vlan_id = osi_core->vid[VLAN_HW_FILTER_FULL_IDX];
|
vlan_id = osi_core->vid[VLAN_HW_FILTER_FULL_IDX];
|
||||||
if (vlan_id == VLAN_ID_INVALID) {
|
if (vlan_id == VLAN_ID_INVALID) {
|
||||||
@@ -380,8 +381,8 @@ static inline int dequeue_vid_to_add_filter_reg(
|
|||||||
osi_core->vf_bitmap |= OSI_BIT(vid_idx);
|
osi_core->vf_bitmap |= OSI_BIT(vid_idx);
|
||||||
osi_core->vid[vid_idx] = vlan_id;
|
osi_core->vid[vid_idx] = vlan_id;
|
||||||
|
|
||||||
val = osi_readl((unsigned char *)osi_core->base + MAC_VLAN_TAG_DATA);
|
val = osi_readl((nveu8_t *)osi_core->base + MAC_VLAN_TAG_DATA);
|
||||||
val &= (unsigned int) ~VLAN_VID_MASK;
|
val &= (nveu32_t) ~VLAN_VID_MASK;
|
||||||
val |= (vlan_id | MAC_VLAN_TAG_DATA_ETV | MAC_VLAN_TAG_DATA_VEN);
|
val |= (vlan_id | MAC_VLAN_TAG_DATA_ETV | MAC_VLAN_TAG_DATA_VEN);
|
||||||
|
|
||||||
ret = update_vlan_filters(osi_core, vid_idx, val);
|
ret = update_vlan_filters(osi_core, vid_idx, val);
|
||||||
@@ -390,7 +391,7 @@ static inline int dequeue_vid_to_add_filter_reg(
|
|||||||
}
|
}
|
||||||
|
|
||||||
for (i = VLAN_HW_FILTER_FULL_IDX; i <= osi_core->vlan_filter_cnt; i++) {
|
for (i = VLAN_HW_FILTER_FULL_IDX; i <= osi_core->vlan_filter_cnt; i++) {
|
||||||
osi_core->vid[i] = osi_core->vid[i + 1];
|
osi_core->vid[i] = osi_core->vid[i + 1U];
|
||||||
}
|
}
|
||||||
|
|
||||||
osi_core->vid[i] = VLAN_ID_INVALID;
|
osi_core->vid[i] = VLAN_ID_INVALID;
|
||||||
@@ -409,14 +410,14 @@ static inline int dequeue_vid_to_add_filter_reg(
|
|||||||
* @return 0 on success
|
* @return 0 on success
|
||||||
* @return -1 on failure.
|
* @return -1 on failure.
|
||||||
*/
|
*/
|
||||||
static inline int del_vlan_id(struct osi_core_priv_data *osi_core,
|
static inline nve32_t del_vlan_id(struct osi_core_priv_data *osi_core,
|
||||||
struct core_ops *ops_p,
|
struct core_ops *ops_p,
|
||||||
unsigned short vlan_id)
|
nveu16_t vlan_id)
|
||||||
{
|
{
|
||||||
unsigned int vid_idx = 0;
|
nveu32_t vid_idx = 0;
|
||||||
unsigned int val = 0;
|
nveu32_t val = 0;
|
||||||
unsigned int idx;
|
nveu32_t idx;
|
||||||
int ret = 0;
|
nve32_t ret = 0;
|
||||||
|
|
||||||
/* Search for vlan filter index to be deleted */
|
/* Search for vlan filter index to be deleted */
|
||||||
vid_idx = get_vlan_filter_idx(osi_core, vlan_id);
|
vid_idx = get_vlan_filter_idx(osi_core, vlan_id);
|
||||||
@@ -445,26 +446,29 @@ static inline int del_vlan_id(struct osi_core_priv_data *osi_core,
|
|||||||
OSI_DISABLE,
|
OSI_DISABLE,
|
||||||
OSI_DISABLE);
|
OSI_DISABLE);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
|
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID,
|
||||||
"Failed to disable VLAN filtering\n", 0ULL);
|
"Failed to disable VLAN filtering\n", 0ULL);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (osi_core->vlan_filter_cnt == VLAN_HW_MAX_NRVF) {
|
if (osi_core->vlan_filter_cnt == VLAN_HW_MAX_NRVF) {
|
||||||
allow_all_vid_tags(osi_core->base, OSI_DISABLE);
|
ret = allow_all_vid_tags(osi_core->base, OSI_DISABLE);
|
||||||
|
if (ret < 0) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* if SW queue is not empty dequeue from SW queue and update filter */
|
/* if SW queue is not empty dequeue from SW queue and update filter */
|
||||||
return dequeue_vid_to_add_filter_reg(osi_core, vid_idx);
|
return dequeue_vid_to_add_filter_reg(osi_core, vid_idx);
|
||||||
}
|
}
|
||||||
|
|
||||||
int update_vlan_id(struct osi_core_priv_data *osi_core,
|
nve32_t update_vlan_id(struct osi_core_priv_data *osi_core,
|
||||||
struct core_ops *ops_p,
|
struct core_ops *ops_p,
|
||||||
unsigned int vid)
|
nveu32_t vid)
|
||||||
{
|
{
|
||||||
unsigned int action = vid & VLAN_ACTION_MASK;
|
nveu32_t action = vid & VLAN_ACTION_MASK;
|
||||||
unsigned short vlan_id = vid & VLAN_VID_MASK;
|
nveu16_t vlan_id = (nveu16_t)(vid & VLAN_VID_MASK);
|
||||||
|
|
||||||
if (action == OSI_VLAN_ACTION_ADD) {
|
if (action == OSI_VLAN_ACTION_ADD) {
|
||||||
return add_vlan_id(osi_core, ops_p, vlan_id);
|
return add_vlan_id(osi_core, ops_p, vlan_id);
|
||||||
@@ -472,3 +476,4 @@ int update_vlan_id(struct osi_core_priv_data *osi_core,
|
|||||||
|
|
||||||
return del_vlan_id(osi_core, ops_p, vlan_id);
|
return del_vlan_id(osi_core, ops_p, vlan_id);
|
||||||
}
|
}
|
||||||
|
#endif /* !OSI_STRIPPED_LIB */
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -26,6 +26,7 @@
|
|||||||
#include <osi_core.h>
|
#include <osi_core.h>
|
||||||
#include "core_local.h"
|
#include "core_local.h"
|
||||||
|
|
||||||
|
#ifndef OSI_STRIPPED_LIB
|
||||||
/**
|
/**
|
||||||
* @addtogroup MAC-VLAN MAC VLAN configuration registers and bit fields
|
* @addtogroup MAC-VLAN MAC VLAN configuration registers and bit fields
|
||||||
*
|
*
|
||||||
@@ -36,7 +37,7 @@
|
|||||||
#define MAC_VLAN_TAG_CTRL 0x50
|
#define MAC_VLAN_TAG_CTRL 0x50
|
||||||
#define MAC_VLAN_TAG_DATA 0x54
|
#define MAC_VLAN_TAG_DATA 0x54
|
||||||
#define MAC_VLAN_HASH_FILTER 0x58
|
#define MAC_VLAN_HASH_FILTER 0x58
|
||||||
#define MAC_VLAN_TAG_CTRL_OFS_MASK 0x7C
|
#define MAC_VLAN_TAG_CTRL_OFS_MASK 0x7CU
|
||||||
#define MAC_VLAN_TAG_CTRL_OFS_SHIFT 2U
|
#define MAC_VLAN_TAG_CTRL_OFS_SHIFT 2U
|
||||||
#define MAC_VLAN_TAG_CTRL_CT OSI_BIT(1)
|
#define MAC_VLAN_TAG_CTRL_CT OSI_BIT(1)
|
||||||
#define MAC_VLAN_TAG_CTRL_OB OSI_BIT(0)
|
#define MAC_VLAN_TAG_CTRL_OB OSI_BIT(0)
|
||||||
@@ -53,9 +54,9 @@
|
|||||||
*/
|
*/
|
||||||
#define VLAN_HW_MAX_NRVF 32U
|
#define VLAN_HW_MAX_NRVF 32U
|
||||||
#define VLAN_HW_FILTER_FULL_IDX VLAN_HW_MAX_NRVF
|
#define VLAN_HW_FILTER_FULL_IDX VLAN_HW_MAX_NRVF
|
||||||
#define VLAN_VID_MASK 0xFFFF
|
#define VLAN_VID_MASK 0xFFFFU
|
||||||
#define VLAN_ID_INVALID 0xFFFF
|
#define VLAN_ID_INVALID 0xFFFFU
|
||||||
#define VLAN_HASH_ALLOW_ALL 0xFFFF
|
#define VLAN_HASH_ALLOW_ALL 0xFFFFU
|
||||||
#define VLAN_ACTION_MASK OSI_BIT(31)
|
#define VLAN_ACTION_MASK OSI_BIT(31)
|
||||||
/** @} */
|
/** @} */
|
||||||
|
|
||||||
@@ -70,7 +71,7 @@
|
|||||||
* @return 0 on success
|
* @return 0 on success
|
||||||
* @return -1 on failure.
|
* @return -1 on failure.
|
||||||
*/
|
*/
|
||||||
int update_vlan_id(struct osi_core_priv_data *osi_core,
|
nve32_t update_vlan_id(struct osi_core_priv_data *osi_core,
|
||||||
struct core_ops *ops_p,
|
struct core_ops *ops_p, nveu32_t vid);
|
||||||
unsigned int vid);
|
#endif /* !OSI_STRIPPED_LIB */
|
||||||
#endif /* VLAN_FILTER_H */
|
#endif /* VLAN_FILTER_H */
|
||||||
|
|||||||
245
osi/core/xpcs.c
245
osi/core/xpcs.c
@@ -21,6 +21,7 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#include "xpcs.h"
|
#include "xpcs.h"
|
||||||
|
#include "core_local.h"
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief xpcs_poll_for_an_complete - Polling for AN complete.
|
* @brief xpcs_poll_for_an_complete - Polling for AN complete.
|
||||||
@@ -34,22 +35,22 @@
|
|||||||
* @retval 0 on success
|
* @retval 0 on success
|
||||||
* @retval -1 on failure.
|
* @retval -1 on failure.
|
||||||
*/
|
*/
|
||||||
static inline int xpcs_poll_for_an_complete(struct osi_core_priv_data *osi_core,
|
static inline nve32_t xpcs_poll_for_an_complete(struct osi_core_priv_data *osi_core,
|
||||||
unsigned int *an_status)
|
nveu32_t *an_status)
|
||||||
{
|
{
|
||||||
void *xpcs_base = osi_core->xpcs_base;
|
void *xpcs_base = osi_core->xpcs_base;
|
||||||
unsigned int status = 0;
|
nveu32_t status = 0;
|
||||||
unsigned int retry = 1000;
|
nveu32_t retry = 1000;
|
||||||
unsigned int count;
|
nveu32_t count;
|
||||||
int cond = 1;
|
nve32_t cond = 1;
|
||||||
int ret = 0;
|
nve32_t ret = 0;
|
||||||
|
|
||||||
/* 14. Poll for AN complete */
|
/* 14. Poll for AN complete */
|
||||||
cond = 1;
|
cond = 1;
|
||||||
count = 0;
|
count = 0;
|
||||||
while (cond == 1) {
|
while (cond == 1) {
|
||||||
if (count > retry) {
|
if (count > retry) {
|
||||||
OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL,
|
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL,
|
||||||
"XPCS AN completion timed out\n", 0ULL);
|
"XPCS AN completion timed out\n", 0ULL);
|
||||||
#ifdef HSI_SUPPORT
|
#ifdef HSI_SUPPORT
|
||||||
if (osi_core->hsi.enabled == OSI_ENABLE) {
|
if (osi_core->hsi.enabled == OSI_ENABLE) {
|
||||||
@@ -59,7 +60,8 @@ static inline int xpcs_poll_for_an_complete(struct osi_core_priv_data *osi_core,
|
|||||||
osi_core->hsi.report_count_err[AUTONEG_ERR_IDX] = OSI_ENABLE;
|
osi_core->hsi.report_count_err[AUTONEG_ERR_IDX] = OSI_ENABLE;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
return -1;
|
ret = -1;
|
||||||
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
count++;
|
count++;
|
||||||
@@ -73,20 +75,22 @@ static inline int xpcs_poll_for_an_complete(struct osi_core_priv_data *osi_core,
|
|||||||
status &= ~XPCS_VR_MII_AN_INTR_STS_CL37_ANCMPLT_INTR;
|
status &= ~XPCS_VR_MII_AN_INTR_STS_CL37_ANCMPLT_INTR;
|
||||||
ret = xpcs_write_safety(osi_core, XPCS_VR_MII_AN_INTR_STS, status);
|
ret = xpcs_write_safety(osi_core, XPCS_VR_MII_AN_INTR_STS, status);
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
return ret;
|
goto fail;
|
||||||
}
|
}
|
||||||
cond = 0;
|
cond = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((status & XPCS_USXG_AN_STS_SPEED_MASK) == 0U) {
|
if ((status & XPCS_USXG_AN_STS_SPEED_MASK) == 0U) {
|
||||||
OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL,
|
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL,
|
||||||
"XPCS AN completed with zero speed\n", 0ULL);
|
"XPCS AN completed with zero speed\n", 0ULL);
|
||||||
return -1;
|
ret = -1;
|
||||||
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
*an_status = status;
|
*an_status = status;
|
||||||
return 0;
|
fail:
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -100,11 +104,11 @@ static inline int xpcs_poll_for_an_complete(struct osi_core_priv_data *osi_core,
|
|||||||
* @retval 0 on success
|
* @retval 0 on success
|
||||||
* @retval -1 on failure
|
* @retval -1 on failure
|
||||||
*/
|
*/
|
||||||
static inline int xpcs_set_speed(struct osi_core_priv_data *osi_core,
|
static inline nve32_t xpcs_set_speed(struct osi_core_priv_data *osi_core,
|
||||||
unsigned int status)
|
nveu32_t status)
|
||||||
{
|
{
|
||||||
unsigned int speed = status & XPCS_USXG_AN_STS_SPEED_MASK;
|
nveu32_t speed = status & XPCS_USXG_AN_STS_SPEED_MASK;
|
||||||
unsigned int ctrl = 0;
|
nveu32_t ctrl = 0;
|
||||||
void *xpcs_base = osi_core->xpcs_base;
|
void *xpcs_base = osi_core->xpcs_base;
|
||||||
|
|
||||||
ctrl = xpcs_read(xpcs_base, XPCS_SR_MII_CTRL);
|
ctrl = xpcs_read(xpcs_base, XPCS_SR_MII_CTRL);
|
||||||
@@ -141,21 +145,21 @@ static inline int xpcs_set_speed(struct osi_core_priv_data *osi_core,
|
|||||||
* @retval 0 on success
|
* @retval 0 on success
|
||||||
* @retval -1 on failure.
|
* @retval -1 on failure.
|
||||||
*/
|
*/
|
||||||
int xpcs_start(struct osi_core_priv_data *osi_core)
|
nve32_t xpcs_start(struct osi_core_priv_data *osi_core)
|
||||||
{
|
{
|
||||||
void *xpcs_base = osi_core->xpcs_base;
|
void *xpcs_base = osi_core->xpcs_base;
|
||||||
unsigned int an_status = 0;
|
nveu32_t an_status = 0;
|
||||||
unsigned int retry = RETRY_COUNT;
|
nveu32_t retry = RETRY_COUNT;
|
||||||
unsigned int count = 0;
|
nveu32_t count = 0;
|
||||||
unsigned int ctrl = 0;
|
nveu32_t ctrl = 0;
|
||||||
int ret = 0;
|
nve32_t ret = 0;
|
||||||
int cond = COND_NOT_MET;
|
nve32_t cond = COND_NOT_MET;
|
||||||
|
|
||||||
if (osi_core->xpcs_base == OSI_NULL) {
|
if (osi_core->xpcs_base == OSI_NULL) {
|
||||||
OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL,
|
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL,
|
||||||
"XPCS base is NULL", 0ULL);
|
"XPCS base is NULL", 0ULL);
|
||||||
/* TODO: Remove this once silicon arrives */
|
ret = -1;
|
||||||
return 0;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((osi_core->phy_iface_mode == OSI_USXGMII_MODE_10G) ||
|
if ((osi_core->phy_iface_mode == OSI_USXGMII_MODE_10G) ||
|
||||||
@@ -164,16 +168,16 @@ int xpcs_start(struct osi_core_priv_data *osi_core)
|
|||||||
ctrl |= XPCS_SR_MII_CTRL_AN_ENABLE;
|
ctrl |= XPCS_SR_MII_CTRL_AN_ENABLE;
|
||||||
ret = xpcs_write_safety(osi_core, XPCS_SR_MII_CTRL, ctrl);
|
ret = xpcs_write_safety(osi_core, XPCS_SR_MII_CTRL, ctrl);
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
return ret;
|
goto fail;
|
||||||
}
|
}
|
||||||
ret = xpcs_poll_for_an_complete(osi_core, &an_status);
|
ret = xpcs_poll_for_an_complete(osi_core, &an_status);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
return ret;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = xpcs_set_speed(osi_core, an_status);
|
ret = xpcs_set_speed(osi_core, an_status);
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
return ret;
|
goto fail;
|
||||||
}
|
}
|
||||||
/* USXGMII Rate Adaptor Reset before data transfer */
|
/* USXGMII Rate Adaptor Reset before data transfer */
|
||||||
ctrl = xpcs_read(xpcs_base, XPCS_VR_XS_PCS_DIG_CTRL1);
|
ctrl = xpcs_read(xpcs_base, XPCS_VR_XS_PCS_DIG_CTRL1);
|
||||||
@@ -181,7 +185,8 @@ int xpcs_start(struct osi_core_priv_data *osi_core)
|
|||||||
xpcs_write(xpcs_base, XPCS_VR_XS_PCS_DIG_CTRL1, ctrl);
|
xpcs_write(xpcs_base, XPCS_VR_XS_PCS_DIG_CTRL1, ctrl);
|
||||||
while (cond == COND_NOT_MET) {
|
while (cond == COND_NOT_MET) {
|
||||||
if (count > retry) {
|
if (count > retry) {
|
||||||
return -1;
|
ret = -1;
|
||||||
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
count++;
|
count++;
|
||||||
@@ -200,7 +205,8 @@ int xpcs_start(struct osi_core_priv_data *osi_core)
|
|||||||
count = 0;
|
count = 0;
|
||||||
while (cond == COND_NOT_MET) {
|
while (cond == COND_NOT_MET) {
|
||||||
if (count > retry) {
|
if (count > retry) {
|
||||||
return -1;
|
ret = -1;
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
count++;
|
count++;
|
||||||
@@ -210,11 +216,16 @@ int xpcs_start(struct osi_core_priv_data *osi_core)
|
|||||||
XPCS_SR_XS_PCS_STS1_RLU) {
|
XPCS_SR_XS_PCS_STS1_RLU) {
|
||||||
cond = COND_MET;
|
cond = COND_MET;
|
||||||
} else {
|
} else {
|
||||||
osi_core->osd_ops.udelay(1000U);
|
/* Maximum wait delay as per HW team is 1msec.
|
||||||
|
* So add a loop for 1000 iterations with 1usec delay,
|
||||||
|
* so that if check get satisfies before 1msec will come
|
||||||
|
* out of loop and it can save some boot time
|
||||||
|
*/
|
||||||
|
osi_core->osd_ops.udelay(1U);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
fail:
|
||||||
return 0;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -230,22 +241,19 @@ int xpcs_start(struct osi_core_priv_data *osi_core)
|
|||||||
* @retval -1 on failure.
|
* @retval -1 on failure.
|
||||||
*/
|
*/
|
||||||
static nve32_t xpcs_uphy_lane_bring_up(struct osi_core_priv_data *osi_core,
|
static nve32_t xpcs_uphy_lane_bring_up(struct osi_core_priv_data *osi_core,
|
||||||
unsigned int lane_init_en)
|
nveu32_t lane_init_en)
|
||||||
{
|
{
|
||||||
void *xpcs_base = osi_core->xpcs_base;
|
void *xpcs_base = osi_core->xpcs_base;
|
||||||
nveu32_t retry = XPCS_RETRY_COUNT;
|
nveu32_t retry = 5U;
|
||||||
nve32_t cond = COND_NOT_MET;
|
nve32_t cond = COND_NOT_MET;
|
||||||
nveu32_t val = 0;
|
nveu32_t val = 0;
|
||||||
nveu32_t count;
|
nveu32_t count;
|
||||||
|
nve32_t ret = 0;
|
||||||
|
|
||||||
val = osi_readla(osi_core,
|
val = osi_readla(osi_core,
|
||||||
(nveu8_t *)xpcs_base + XPCS_WRAP_UPHY_STATUS);
|
(nveu8_t *)xpcs_base + XPCS_WRAP_UPHY_STATUS);
|
||||||
if ((val & XPCS_WRAP_UPHY_STATUS_TX_P_UP_STATUS) ==
|
if ((val & XPCS_WRAP_UPHY_STATUS_TX_P_UP_STATUS) !=
|
||||||
XPCS_WRAP_UPHY_STATUS_TX_P_UP_STATUS) {
|
XPCS_WRAP_UPHY_STATUS_TX_P_UP_STATUS) {
|
||||||
/* return success if TX lane is already UP */
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
val = osi_readla(osi_core,
|
val = osi_readla(osi_core,
|
||||||
(nveu8_t *)xpcs_base + XPCS_WRAP_UPHY_HW_INIT_CTRL);
|
(nveu8_t *)xpcs_base + XPCS_WRAP_UPHY_HW_INIT_CTRL);
|
||||||
val |= lane_init_en;
|
val |= lane_init_en;
|
||||||
@@ -255,7 +263,8 @@ static nve32_t xpcs_uphy_lane_bring_up(struct osi_core_priv_data *osi_core,
|
|||||||
count = 0;
|
count = 0;
|
||||||
while (cond == COND_NOT_MET) {
|
while (cond == COND_NOT_MET) {
|
||||||
if (count > retry) {
|
if (count > retry) {
|
||||||
return -1;
|
ret = -1;
|
||||||
|
goto fail;
|
||||||
}
|
}
|
||||||
count++;
|
count++;
|
||||||
|
|
||||||
@@ -265,11 +274,17 @@ static nve32_t xpcs_uphy_lane_bring_up(struct osi_core_priv_data *osi_core,
|
|||||||
/* exit loop */
|
/* exit loop */
|
||||||
cond = COND_MET;
|
cond = COND_MET;
|
||||||
} else {
|
} else {
|
||||||
osi_core->osd_ops.udelay(500U);
|
/* Max wait time is 1usec.
|
||||||
|
* Most of the time loop got exited in first iteration.
|
||||||
|
* but added an extra count of 4 for safer side
|
||||||
|
*/
|
||||||
|
osi_core->osd_ops.udelay(1U);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
fail:
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -285,15 +300,17 @@ static nve32_t xpcs_uphy_lane_bring_up(struct osi_core_priv_data *osi_core,
|
|||||||
static nve32_t xpcs_check_pcs_lock_status(struct osi_core_priv_data *osi_core)
|
static nve32_t xpcs_check_pcs_lock_status(struct osi_core_priv_data *osi_core)
|
||||||
{
|
{
|
||||||
void *xpcs_base = osi_core->xpcs_base;
|
void *xpcs_base = osi_core->xpcs_base;
|
||||||
nveu32_t retry = XPCS_RETRY_COUNT;
|
nveu32_t retry = RETRY_COUNT;
|
||||||
nve32_t cond = COND_NOT_MET;
|
nve32_t cond = COND_NOT_MET;
|
||||||
nveu32_t val = 0;
|
nveu32_t val = 0;
|
||||||
nveu32_t count;
|
nveu32_t count;
|
||||||
|
nve32_t ret = 0;
|
||||||
|
|
||||||
count = 0;
|
count = 0;
|
||||||
while (cond == COND_NOT_MET) {
|
while (cond == COND_NOT_MET) {
|
||||||
if (count > retry) {
|
if (count > retry) {
|
||||||
return -1;
|
ret = -1;
|
||||||
|
goto fail;
|
||||||
}
|
}
|
||||||
count++;
|
count++;
|
||||||
|
|
||||||
@@ -304,14 +321,19 @@ static nve32_t xpcs_check_pcs_lock_status(struct osi_core_priv_data *osi_core)
|
|||||||
/* exit loop */
|
/* exit loop */
|
||||||
cond = COND_MET;
|
cond = COND_MET;
|
||||||
} else {
|
} else {
|
||||||
osi_core->osd_ops.udelay(500U);
|
/* Maximum wait delay as per HW team is 1msec.
|
||||||
|
* So add a loop for 1000 iterations with 1usec delay,
|
||||||
|
* so that if check get satisfies before 1msec will come
|
||||||
|
* out of loop and it can save some boot time
|
||||||
|
*/
|
||||||
|
osi_core->osd_ops.udelay(1U);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Clear the status */
|
/* Clear the status */
|
||||||
osi_writela(osi_core, val, (nveu8_t *)xpcs_base + XPCS_WRAP_IRQ_STATUS);
|
osi_writela(osi_core, val, (nveu8_t *)xpcs_base + XPCS_WRAP_IRQ_STATUS);
|
||||||
|
fail:
|
||||||
return 0;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -327,16 +349,19 @@ static nve32_t xpcs_check_pcs_lock_status(struct osi_core_priv_data *osi_core)
|
|||||||
*/
|
*/
|
||||||
static nve32_t xpcs_lane_bring_up(struct osi_core_priv_data *osi_core)
|
static nve32_t xpcs_lane_bring_up(struct osi_core_priv_data *osi_core)
|
||||||
{
|
{
|
||||||
unsigned int retry = 1000;
|
struct core_local *l_core = (struct core_local *)(void *)osi_core;
|
||||||
unsigned int count;
|
nveu32_t retry = 7U;
|
||||||
|
nveu32_t count;
|
||||||
nveu32_t val = 0;
|
nveu32_t val = 0;
|
||||||
int cond;
|
nve32_t cond;
|
||||||
|
nve32_t ret = 0;
|
||||||
|
|
||||||
if (xpcs_uphy_lane_bring_up(osi_core,
|
if (xpcs_uphy_lane_bring_up(osi_core,
|
||||||
XPCS_WRAP_UPHY_HW_INIT_CTRL_TX_EN) < 0) {
|
XPCS_WRAP_UPHY_HW_INIT_CTRL_TX_EN) < 0) {
|
||||||
OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL,
|
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL,
|
||||||
"UPHY TX lane bring-up failed\n", 0ULL);
|
"UPHY TX lane bring-up failed\n", 0ULL);
|
||||||
return -1;
|
ret = -1;
|
||||||
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
val = osi_readla(osi_core,
|
val = osi_readla(osi_core,
|
||||||
@@ -389,7 +414,8 @@ static nve32_t xpcs_lane_bring_up(struct osi_core_priv_data *osi_core)
|
|||||||
count = 0;
|
count = 0;
|
||||||
while (cond == COND_NOT_MET) {
|
while (cond == COND_NOT_MET) {
|
||||||
if (count > retry) {
|
if (count > retry) {
|
||||||
return -1;
|
ret = -1;
|
||||||
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
count++;
|
count++;
|
||||||
@@ -397,10 +423,17 @@ static nve32_t xpcs_lane_bring_up(struct osi_core_priv_data *osi_core)
|
|||||||
val = osi_readla(osi_core,
|
val = osi_readla(osi_core,
|
||||||
(nveu8_t *)osi_core->xpcs_base +
|
(nveu8_t *)osi_core->xpcs_base +
|
||||||
XPCS_WRAP_UPHY_RX_CONTROL_0_0);
|
XPCS_WRAP_UPHY_RX_CONTROL_0_0);
|
||||||
if ((val & XPCS_WRAP_UPHY_RX_CONTROL_0_0_RX_CAL_EN) == 0) {
|
if ((val & XPCS_WRAP_UPHY_RX_CONTROL_0_0_RX_CAL_EN) == 0U) {
|
||||||
cond = COND_MET;
|
cond = COND_MET;
|
||||||
} else {
|
} else {
|
||||||
osi_core->osd_ops.udelay(1000U);
|
/* Maximum wait delay as per HW team is 100 usec.
|
||||||
|
* But most of the time as per experiments it takes
|
||||||
|
* around 14usec to satisy the condition, so add a
|
||||||
|
* minimum delay of 14usec and loop it for 7times.
|
||||||
|
* With this 14usec delay condition gets satifies
|
||||||
|
* in first iteration itself.
|
||||||
|
*/
|
||||||
|
osi_core->osd_ops.udelay(14U);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -433,12 +466,20 @@ static nve32_t xpcs_lane_bring_up(struct osi_core_priv_data *osi_core)
|
|||||||
XPCS_WRAP_UPHY_RX_CONTROL_0_0);
|
XPCS_WRAP_UPHY_RX_CONTROL_0_0);
|
||||||
|
|
||||||
if (xpcs_check_pcs_lock_status(osi_core) < 0) {
|
if (xpcs_check_pcs_lock_status(osi_core) < 0) {
|
||||||
OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL,
|
if (l_core->lane_status == OSI_ENABLE) {
|
||||||
|
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL,
|
||||||
"Failed to get PCS block lock\n", 0ULL);
|
"Failed to get PCS block lock\n", 0ULL);
|
||||||
return -1;
|
l_core->lane_status = OSI_DISABLE;
|
||||||
}
|
}
|
||||||
|
ret = -1;
|
||||||
return 0;
|
goto fail;
|
||||||
|
} else {
|
||||||
|
OSI_CORE_INFO(osi_core->osd, OSI_LOG_ARG_HW_FAIL,
|
||||||
|
"PCS block lock SUCCESS\n", 0ULL);
|
||||||
|
l_core->lane_status = OSI_ENABLE;
|
||||||
|
}
|
||||||
|
fail:
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -451,28 +492,25 @@ static nve32_t xpcs_lane_bring_up(struct osi_core_priv_data *osi_core)
|
|||||||
* @retval 0 on success
|
* @retval 0 on success
|
||||||
* @retval -1 on failure.
|
* @retval -1 on failure.
|
||||||
*/
|
*/
|
||||||
int xpcs_init(struct osi_core_priv_data *osi_core)
|
nve32_t xpcs_init(struct osi_core_priv_data *osi_core)
|
||||||
{
|
{
|
||||||
void *xpcs_base = osi_core->xpcs_base;
|
void *xpcs_base = osi_core->xpcs_base;
|
||||||
unsigned int retry = 1000;
|
nveu32_t retry = 1000;
|
||||||
unsigned int count;
|
nveu32_t count;
|
||||||
unsigned int ctrl = 0;
|
nveu32_t ctrl = 0;
|
||||||
int cond = 1;
|
nve32_t cond = 1;
|
||||||
int ret = 0;
|
nve32_t ret = 0;
|
||||||
|
|
||||||
if (osi_core->xpcs_base == OSI_NULL) {
|
if (osi_core->xpcs_base == OSI_NULL) {
|
||||||
OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL,
|
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL,
|
||||||
"XPCS base is NULL", 0ULL);
|
"XPCS base is NULL", 0ULL);
|
||||||
/* TODO: Remove this once silicon arrives */
|
ret = -1;
|
||||||
return 0;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (osi_core->pre_si != OSI_ENABLE) {
|
|
||||||
if (xpcs_lane_bring_up(osi_core) < 0) {
|
if (xpcs_lane_bring_up(osi_core) < 0) {
|
||||||
OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL,
|
ret = -1;
|
||||||
"TX/RX lane bring-up failed\n", 0ULL);
|
goto fail;
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Switching to USXGMII Mode based on
|
/* Switching to USXGMII Mode based on
|
||||||
@@ -484,7 +522,7 @@ int xpcs_init(struct osi_core_priv_data *osi_core)
|
|||||||
ctrl |= XPCS_SR_XS_PCS_CTRL2_PCS_TYPE_SEL_BASE_R;
|
ctrl |= XPCS_SR_XS_PCS_CTRL2_PCS_TYPE_SEL_BASE_R;
|
||||||
ret = xpcs_write_safety(osi_core, XPCS_SR_XS_PCS_CTRL2, ctrl);
|
ret = xpcs_write_safety(osi_core, XPCS_SR_XS_PCS_CTRL2, ctrl);
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
return ret;
|
goto fail;
|
||||||
}
|
}
|
||||||
/* 2. enable USXGMII Mode inside DWC_xpcs */
|
/* 2. enable USXGMII Mode inside DWC_xpcs */
|
||||||
|
|
||||||
@@ -501,7 +539,7 @@ int xpcs_init(struct osi_core_priv_data *osi_core)
|
|||||||
|
|
||||||
ret = xpcs_write_safety(osi_core, XPCS_VR_XS_PCS_KR_CTRL, ctrl);
|
ret = xpcs_write_safety(osi_core, XPCS_VR_XS_PCS_KR_CTRL, ctrl);
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
return ret;
|
goto fail;
|
||||||
}
|
}
|
||||||
/* 4. Program PHY to operate at 10Gbps/5Gbps/2Gbps
|
/* 4. Program PHY to operate at 10Gbps/5Gbps/2Gbps
|
||||||
* this step not required since PHY speed programming
|
* this step not required since PHY speed programming
|
||||||
@@ -512,7 +550,7 @@ int xpcs_init(struct osi_core_priv_data *osi_core)
|
|||||||
ctrl |= XPCS_VR_XS_PCS_DIG_CTRL1_USXG_EN;
|
ctrl |= XPCS_VR_XS_PCS_DIG_CTRL1_USXG_EN;
|
||||||
ret = xpcs_write_safety(osi_core, XPCS_VR_XS_PCS_DIG_CTRL1, ctrl);
|
ret = xpcs_write_safety(osi_core, XPCS_VR_XS_PCS_DIG_CTRL1, ctrl);
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
return ret;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* XPCS_VR_XS_PCS_DIG_CTRL1_VR_RST bit is self clearing
|
/* XPCS_VR_XS_PCS_DIG_CTRL1_VR_RST bit is self clearing
|
||||||
@@ -528,7 +566,8 @@ int xpcs_init(struct osi_core_priv_data *osi_core)
|
|||||||
count = 0;
|
count = 0;
|
||||||
while (cond == 1) {
|
while (cond == 1) {
|
||||||
if (count > retry) {
|
if (count > retry) {
|
||||||
return -1;
|
ret = -1;
|
||||||
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
count++;
|
count++;
|
||||||
@@ -551,13 +590,13 @@ int xpcs_init(struct osi_core_priv_data *osi_core)
|
|||||||
ctrl &= ~XPCS_SR_AN_CTRL_AN_EN;
|
ctrl &= ~XPCS_SR_AN_CTRL_AN_EN;
|
||||||
ret = xpcs_write_safety(osi_core, XPCS_SR_AN_CTRL, ctrl);
|
ret = xpcs_write_safety(osi_core, XPCS_SR_AN_CTRL, ctrl);
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
return ret;
|
goto fail;
|
||||||
}
|
}
|
||||||
ctrl = xpcs_read(xpcs_base, XPCS_VR_XS_PCS_DIG_CTRL1);
|
ctrl = xpcs_read(xpcs_base, XPCS_VR_XS_PCS_DIG_CTRL1);
|
||||||
ctrl |= XPCS_VR_XS_PCS_DIG_CTRL1_CL37_BP;
|
ctrl |= XPCS_VR_XS_PCS_DIG_CTRL1_CL37_BP;
|
||||||
ret = xpcs_write_safety(osi_core, XPCS_VR_XS_PCS_DIG_CTRL1, ctrl);
|
ret = xpcs_write_safety(osi_core, XPCS_VR_XS_PCS_DIG_CTRL1, ctrl);
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
return ret;
|
goto fail;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -569,10 +608,11 @@ int xpcs_init(struct osi_core_priv_data *osi_core)
|
|||||||
/* 11. XPCS configured as MAC-side USGMII - NA */
|
/* 11. XPCS configured as MAC-side USGMII - NA */
|
||||||
|
|
||||||
/* 13. TODO: If there is interrupt enabled for AN interrupt */
|
/* 13. TODO: If there is interrupt enabled for AN interrupt */
|
||||||
|
fail:
|
||||||
return 0;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifndef OSI_STRIPPED_LIB
|
||||||
/**
|
/**
|
||||||
* @brief xpcs_eee - XPCS enable/disable EEE
|
* @brief xpcs_eee - XPCS enable/disable EEE
|
||||||
*
|
*
|
||||||
@@ -585,38 +625,39 @@ int xpcs_init(struct osi_core_priv_data *osi_core)
|
|||||||
* @retval 0 on success
|
* @retval 0 on success
|
||||||
* @retval -1 on failure.
|
* @retval -1 on failure.
|
||||||
*/
|
*/
|
||||||
int xpcs_eee(struct osi_core_priv_data *osi_core, unsigned int en_dis)
|
nve32_t xpcs_eee(struct osi_core_priv_data *osi_core, nveu32_t en_dis)
|
||||||
{
|
{
|
||||||
void *xpcs_base = osi_core->xpcs_base;
|
void *xpcs_base = osi_core->xpcs_base;
|
||||||
unsigned int val = 0x0U;
|
nveu32_t val = 0x0U;
|
||||||
int ret = 0;
|
nve32_t ret = 0;
|
||||||
|
|
||||||
if (en_dis != OSI_ENABLE && en_dis != OSI_DISABLE) {
|
if ((en_dis != OSI_ENABLE) && (en_dis != OSI_DISABLE)) {
|
||||||
return -1;
|
ret = -1;
|
||||||
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (xpcs_base == OSI_NULL)
|
if (xpcs_base == OSI_NULL) {
|
||||||
return -1;
|
ret = -1;
|
||||||
|
goto fail;
|
||||||
|
}
|
||||||
|
|
||||||
if (en_dis == OSI_DISABLE) {
|
if (en_dis == OSI_DISABLE) {
|
||||||
val = xpcs_read(xpcs_base, XPCS_VR_XS_PCS_EEE_MCTRL0);
|
val = xpcs_read(xpcs_base, XPCS_VR_XS_PCS_EEE_MCTRL0);
|
||||||
val &= ~XPCS_VR_XS_PCS_EEE_MCTRL0_LTX_EN;
|
val &= ~XPCS_VR_XS_PCS_EEE_MCTRL0_LTX_EN;
|
||||||
val &= ~XPCS_VR_XS_PCS_EEE_MCTRL0_LRX_EN;
|
val &= ~XPCS_VR_XS_PCS_EEE_MCTRL0_LRX_EN;
|
||||||
ret = xpcs_write_safety(osi_core, XPCS_VR_XS_PCS_EEE_MCTRL0, val);
|
ret = xpcs_write_safety(osi_core, XPCS_VR_XS_PCS_EEE_MCTRL0, val);
|
||||||
if (ret != 0) {
|
} else {
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* 1. Check if DWC_xpcs supports the EEE feature by
|
/* 1. Check if DWC_xpcs supports the EEE feature by
|
||||||
* reading the SR_XS_PCS_EEE_ABL register
|
* reading the SR_XS_PCS_EEE_ABL register
|
||||||
* 1000BASEX-Only is different config then else so can (skip) */
|
* 1000BASEX-Only is different config then else so can (skip)
|
||||||
|
*/
|
||||||
|
|
||||||
/* 2. Program various timers used in the EEE mode depending on the
|
/* 2. Program various timers used in the EEE mode depending on the
|
||||||
* clk_eee_i clock frequency. default times are same as IEEE std
|
* clk_eee_i clock frequency. default times are same as IEEE std
|
||||||
* clk_eee_i() is 102MHz. MULT_FACT_100NS = 9 because 9.8ns*10 = 98
|
* clk_eee_i() is 102MHz. MULT_FACT_100NS = 9 because 9.8ns*10 = 98
|
||||||
* which is between 80 and 120 this leads to default setting match */
|
* which is between 80 and 120 this leads to default setting match
|
||||||
|
*/
|
||||||
|
|
||||||
val = xpcs_read(xpcs_base, XPCS_VR_XS_PCS_EEE_MCTRL0);
|
val = xpcs_read(xpcs_base, XPCS_VR_XS_PCS_EEE_MCTRL0);
|
||||||
/* 3. If FEC is enabled in the KR mode (skip in FPGA)*/
|
/* 3. If FEC is enabled in the KR mode (skip in FPGA)*/
|
||||||
@@ -625,14 +666,14 @@ int xpcs_eee(struct osi_core_priv_data *osi_core, unsigned int en_dis)
|
|||||||
XPCS_VR_XS_PCS_EEE_MCTRL0_LRX_EN);
|
XPCS_VR_XS_PCS_EEE_MCTRL0_LRX_EN);
|
||||||
ret = xpcs_write_safety(osi_core, XPCS_VR_XS_PCS_EEE_MCTRL0, val);
|
ret = xpcs_write_safety(osi_core, XPCS_VR_XS_PCS_EEE_MCTRL0, val);
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
return ret;
|
goto fail;
|
||||||
}
|
}
|
||||||
/* Transparent Tx LPI Mode Enable */
|
/* Transparent Tx LPI Mode Enable */
|
||||||
val = xpcs_read(xpcs_base, XPCS_VR_XS_PCS_EEE_MCTRL1);
|
val = xpcs_read(xpcs_base, XPCS_VR_XS_PCS_EEE_MCTRL1);
|
||||||
val |= XPCS_VR_XS_PCS_EEE_MCTRL1_TRN_LPI;
|
val |= XPCS_VR_XS_PCS_EEE_MCTRL1_TRN_LPI;
|
||||||
ret = xpcs_write_safety(osi_core, XPCS_VR_XS_PCS_EEE_MCTRL1, val);
|
ret = xpcs_write_safety(osi_core, XPCS_VR_XS_PCS_EEE_MCTRL1, val);
|
||||||
if (ret != 0) {
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
return 0;
|
fail:
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
#endif /* !OSI_STRIPPED_LIB */
|
||||||
|
|||||||
@@ -26,15 +26,6 @@
|
|||||||
#include "../osi/common/common.h"
|
#include "../osi/common/common.h"
|
||||||
#include <osi_core.h>
|
#include <osi_core.h>
|
||||||
|
|
||||||
/**
|
|
||||||
* @addtogroup XPCS helper macros
|
|
||||||
*
|
|
||||||
* @brief XPCS helper macros.
|
|
||||||
* @{
|
|
||||||
*/
|
|
||||||
#define XPCS_RETRY_COUNT (RETRY_COUNT * (2U))
|
|
||||||
/** @} */
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @addtogroup XPCS Register offsets
|
* @addtogroup XPCS Register offsets
|
||||||
*
|
*
|
||||||
@@ -42,24 +33,27 @@
|
|||||||
* @{
|
* @{
|
||||||
*/
|
*/
|
||||||
#define XPCS_ADDRESS 0x03FC
|
#define XPCS_ADDRESS 0x03FC
|
||||||
#define XPCS_SR_XS_PCS_CTRL1 0xC0000
|
|
||||||
#define XPCS_SR_XS_PCS_STS1 0xC0004
|
#define XPCS_SR_XS_PCS_STS1 0xC0004
|
||||||
#define XPCS_SR_XS_PCS_CTRL2 0xC001C
|
#define XPCS_SR_XS_PCS_CTRL2 0xC001C
|
||||||
#define XPCS_SR_XS_PCS_EEE_ABL 0xC0050
|
|
||||||
#define XPCS_SR_XS_PCS_EEE_ABL2 0xC0054
|
|
||||||
#define XPCS_VR_XS_PCS_DIG_CTRL1 0xE0000
|
#define XPCS_VR_XS_PCS_DIG_CTRL1 0xE0000
|
||||||
#define XPCS_VR_XS_PCS_KR_CTRL 0xE001C
|
#define XPCS_VR_XS_PCS_KR_CTRL 0xE001C
|
||||||
#define XPCS_SR_AN_CTRL 0x1C0000
|
#define XPCS_SR_AN_CTRL 0x1C0000
|
||||||
#define XPCS_SR_MII_CTRL 0x7C0000
|
#define XPCS_SR_MII_CTRL 0x7C0000
|
||||||
#define XPCS_VR_MII_AN_INTR_STS 0x7E0008
|
#define XPCS_VR_MII_AN_INTR_STS 0x7E0008
|
||||||
#define XPCS_VR_XS_PCS_EEE_MCTRL0 0xE0018
|
|
||||||
#define XPCS_VR_XS_PCS_EEE_MCTRL1 0xE002C
|
|
||||||
#define XPCS_WRAP_UPHY_HW_INIT_CTRL 0x8020
|
#define XPCS_WRAP_UPHY_HW_INIT_CTRL 0x8020
|
||||||
#define XPCS_WRAP_UPHY_STATUS 0x8044
|
#define XPCS_WRAP_UPHY_STATUS 0x8044
|
||||||
#define XPCS_WRAP_IRQ_STATUS 0x8050
|
#define XPCS_WRAP_IRQ_STATUS 0x8050
|
||||||
#define XPCS_WRAP_UPHY_RX_CONTROL_0_0 0x801C
|
#define XPCS_WRAP_UPHY_RX_CONTROL_0_0 0x801C
|
||||||
/** @} */
|
/** @} */
|
||||||
|
|
||||||
|
#ifndef OSI_STRIPPED_LIB
|
||||||
|
#define XPCS_VR_XS_PCS_EEE_MCTRL0 0xE0018
|
||||||
|
#define XPCS_VR_XS_PCS_EEE_MCTRL1 0xE002C
|
||||||
|
|
||||||
|
#define XPCS_VR_XS_PCS_EEE_MCTRL1_TRN_LPI OSI_BIT(0)
|
||||||
|
#define XPCS_VR_XS_PCS_EEE_MCTRL0_LTX_EN OSI_BIT(0)
|
||||||
|
#define XPCS_VR_XS_PCS_EEE_MCTRL0_LRX_EN OSI_BIT(1)
|
||||||
|
#endif /* !OSI_STRIPPED_LIB */
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @addtogroup XPCS-BIT Register bit fileds
|
* @addtogroup XPCS-BIT Register bit fileds
|
||||||
@@ -67,16 +61,12 @@
|
|||||||
* @brief XPCS register bit fields
|
* @brief XPCS register bit fields
|
||||||
* @{
|
* @{
|
||||||
*/
|
*/
|
||||||
#define XPCS_SR_XS_PCS_CTRL1_RST OSI_BIT(15)
|
|
||||||
#define XPCS_SR_XS_PCS_CTRL2_PCS_TYPE_SEL_BASE_R 0x0U
|
#define XPCS_SR_XS_PCS_CTRL2_PCS_TYPE_SEL_BASE_R 0x0U
|
||||||
#define XPCS_SR_XS_PCS_STS1_RLU OSI_BIT(2)
|
#define XPCS_SR_XS_PCS_STS1_RLU OSI_BIT(2)
|
||||||
#define XPCS_VR_XS_PCS_DIG_CTRL1_USXG_EN OSI_BIT(9)
|
#define XPCS_VR_XS_PCS_DIG_CTRL1_USXG_EN OSI_BIT(9)
|
||||||
#define XPCS_VR_XS_PCS_DIG_CTRL1_VR_RST OSI_BIT(15)
|
#define XPCS_VR_XS_PCS_DIG_CTRL1_VR_RST OSI_BIT(15)
|
||||||
#define XPCS_VR_XS_PCS_DIG_CTRL1_USRA_RST OSI_BIT(10)
|
#define XPCS_VR_XS_PCS_DIG_CTRL1_USRA_RST OSI_BIT(10)
|
||||||
#define XPCS_VR_XS_PCS_DIG_CTRL1_CL37_BP OSI_BIT(12)
|
#define XPCS_VR_XS_PCS_DIG_CTRL1_CL37_BP OSI_BIT(12)
|
||||||
#define XPCS_VR_XS_PCS_EEE_MCTRL1_TRN_LPI OSI_BIT(0)
|
|
||||||
#define XPCS_VR_XS_PCS_EEE_MCTRL0_LTX_EN OSI_BIT(0)
|
|
||||||
#define XPCS_VR_XS_PCS_EEE_MCTRL0_LRX_EN OSI_BIT(1)
|
|
||||||
#define XPCS_SR_AN_CTRL_AN_EN OSI_BIT(12)
|
#define XPCS_SR_AN_CTRL_AN_EN OSI_BIT(12)
|
||||||
#define XPCS_SR_MII_CTRL_AN_ENABLE OSI_BIT(12)
|
#define XPCS_SR_MII_CTRL_AN_ENABLE OSI_BIT(12)
|
||||||
#define XPCS_VR_MII_AN_INTR_STS_CL37_ANCMPLT_INTR OSI_BIT(0)
|
#define XPCS_VR_MII_AN_INTR_STS_CL37_ANCMPLT_INTR OSI_BIT(0)
|
||||||
@@ -95,7 +85,6 @@
|
|||||||
OSI_BIT(10))
|
OSI_BIT(10))
|
||||||
#define XPCS_VR_XS_PCS_KR_CTRL_USXG_MODE_5G OSI_BIT(10)
|
#define XPCS_VR_XS_PCS_KR_CTRL_USXG_MODE_5G OSI_BIT(10)
|
||||||
#define XPCS_WRAP_UPHY_HW_INIT_CTRL_TX_EN OSI_BIT(0)
|
#define XPCS_WRAP_UPHY_HW_INIT_CTRL_TX_EN OSI_BIT(0)
|
||||||
#define XPCS_WRAP_UPHY_HW_INIT_CTRL_RX_EN OSI_BIT(2)
|
|
||||||
#define XPCS_WRAP_IRQ_STATUS_PCS_LINK_STS OSI_BIT(6)
|
#define XPCS_WRAP_IRQ_STATUS_PCS_LINK_STS OSI_BIT(6)
|
||||||
#define XPCS_WRAP_UPHY_RX_CONTROL_0_0_RX_DATA_EN OSI_BIT(0)
|
#define XPCS_WRAP_UPHY_RX_CONTROL_0_0_RX_DATA_EN OSI_BIT(0)
|
||||||
#define XPCS_WRAP_UPHY_RX_CONTROL_0_0_RX_IDDQ OSI_BIT(4)
|
#define XPCS_WRAP_UPHY_RX_CONTROL_0_0_RX_IDDQ OSI_BIT(4)
|
||||||
@@ -114,20 +103,19 @@
|
|||||||
#define XPCS_CORE_CORRECTABLE_ERR OSI_BIT(10)
|
#define XPCS_CORE_CORRECTABLE_ERR OSI_BIT(10)
|
||||||
#define XPCS_CORE_UNCORRECTABLE_ERR OSI_BIT(9)
|
#define XPCS_CORE_UNCORRECTABLE_ERR OSI_BIT(9)
|
||||||
#define XPCS_REGISTER_PARITY_ERR OSI_BIT(8)
|
#define XPCS_REGISTER_PARITY_ERR OSI_BIT(8)
|
||||||
#define XPCS_BASE_PMA_MMD_SR_PMA_KR_FEC_CTRL 0x402AC
|
|
||||||
#define EN_ERR_IND OSI_BIT(1)
|
|
||||||
#define FEC_EN OSI_BIT(0)
|
|
||||||
#define XPCS_VR_XS_PCS_SFTY_UE_INTR0 0xE03C0
|
#define XPCS_VR_XS_PCS_SFTY_UE_INTR0 0xE03C0
|
||||||
#define XPCS_VR_XS_PCS_SFTY_CE_INTR 0xE03C8
|
#define XPCS_VR_XS_PCS_SFTY_CE_INTR 0xE03C8
|
||||||
#define XPCS_VR_XS_PCS_SFTY_TMR_CTRL 0xE03D4
|
#define XPCS_VR_XS_PCS_SFTY_TMR_CTRL 0xE03D4
|
||||||
#define XPCS_SFTY_1US_MULT_MASK 0xFF
|
#define XPCS_SFTY_1US_MULT_MASK 0xFFU
|
||||||
#define XPCS_SFTY_1US_MULT_SHIFT 0U
|
#define XPCS_SFTY_1US_MULT_SHIFT 0U
|
||||||
#endif
|
#endif
|
||||||
/** @} */
|
/** @} */
|
||||||
|
|
||||||
int xpcs_init(struct osi_core_priv_data *osi_core);
|
nve32_t xpcs_init(struct osi_core_priv_data *osi_core);
|
||||||
int xpcs_start(struct osi_core_priv_data *osi_core);
|
nve32_t xpcs_start(struct osi_core_priv_data *osi_core);
|
||||||
int xpcs_eee(struct osi_core_priv_data *osi_core, unsigned int en_dis);
|
#ifndef OSI_STRIPPED_LIB
|
||||||
|
nve32_t xpcs_eee(struct osi_core_priv_data *osi_core, nveu32_t en_dis);
|
||||||
|
#endif /* !OSI_STRIPPED_LIB */
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief xpcs_read - read from xpcs.
|
* @brief xpcs_read - read from xpcs.
|
||||||
@@ -139,11 +127,11 @@ int xpcs_eee(struct osi_core_priv_data *osi_core, unsigned int en_dis);
|
|||||||
*
|
*
|
||||||
* @retval value read from xpcs register.
|
* @retval value read from xpcs register.
|
||||||
*/
|
*/
|
||||||
static inline unsigned int xpcs_read(void *xpcs_base, unsigned int reg_addr)
|
static inline nveu32_t xpcs_read(void *xpcs_base, nveu32_t reg_addr)
|
||||||
{
|
{
|
||||||
osi_writel(((reg_addr >> XPCS_REG_ADDR_SHIFT) & XPCS_REG_ADDR_MASK),
|
osi_writel(((reg_addr >> XPCS_REG_ADDR_SHIFT) & XPCS_REG_ADDR_MASK),
|
||||||
((unsigned char *)xpcs_base + XPCS_ADDRESS));
|
((nveu8_t *)xpcs_base + XPCS_ADDRESS));
|
||||||
return osi_readl((unsigned char *)xpcs_base +
|
return osi_readl((nveu8_t *)xpcs_base +
|
||||||
((reg_addr) & XPCS_REG_VALUE_MASK));
|
((reg_addr) & XPCS_REG_VALUE_MASK));
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -156,12 +144,12 @@ static inline unsigned int xpcs_read(void *xpcs_base, unsigned int reg_addr)
|
|||||||
* @param[in] reg_addr: register address for writing
|
* @param[in] reg_addr: register address for writing
|
||||||
* @param[in] val: write value to register address
|
* @param[in] val: write value to register address
|
||||||
*/
|
*/
|
||||||
static inline void xpcs_write(void *xpcs_base, unsigned int reg_addr,
|
static inline void xpcs_write(void *xpcs_base, nveu32_t reg_addr,
|
||||||
unsigned int val)
|
nveu32_t val)
|
||||||
{
|
{
|
||||||
osi_writel(((reg_addr >> XPCS_REG_ADDR_SHIFT) & XPCS_REG_ADDR_MASK),
|
osi_writel(((reg_addr >> XPCS_REG_ADDR_SHIFT) & XPCS_REG_ADDR_MASK),
|
||||||
((unsigned char *)xpcs_base + XPCS_ADDRESS));
|
((nveu8_t *)xpcs_base + XPCS_ADDRESS));
|
||||||
osi_writel(val, (unsigned char *)xpcs_base +
|
osi_writel(val, (nveu8_t *)xpcs_base +
|
||||||
(((reg_addr) & XPCS_REG_VALUE_MASK)));
|
(((reg_addr) & XPCS_REG_VALUE_MASK)));
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -176,28 +164,33 @@ static inline void xpcs_write(void *xpcs_base, unsigned int reg_addr,
|
|||||||
* @param[in] val: write value to register address
|
* @param[in] val: write value to register address
|
||||||
*
|
*
|
||||||
* @retval 0 on success
|
* @retval 0 on success
|
||||||
* @retval -1 on failure.
|
* @retval XPCS_WRITE_FAIL_CODE on failure
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
static inline int xpcs_write_safety(struct osi_core_priv_data *osi_core,
|
static inline nve32_t xpcs_write_safety(struct osi_core_priv_data *osi_core,
|
||||||
unsigned int reg_addr,
|
nveu32_t reg_addr,
|
||||||
unsigned int val)
|
nveu32_t val)
|
||||||
{
|
{
|
||||||
void *xpcs_base = osi_core->xpcs_base;
|
void *xpcs_base = osi_core->xpcs_base;
|
||||||
unsigned int read_val;
|
nveu32_t read_val;
|
||||||
int retry = 10;
|
nve32_t retry = 10;
|
||||||
|
nve32_t ret = XPCS_WRITE_FAIL_CODE;
|
||||||
|
|
||||||
while (--retry > 0) {
|
while (--retry > 0) {
|
||||||
xpcs_write(xpcs_base, reg_addr, val);
|
xpcs_write(xpcs_base, reg_addr, val);
|
||||||
read_val = xpcs_read(xpcs_base, reg_addr);
|
read_val = xpcs_read(xpcs_base, reg_addr);
|
||||||
if (val == read_val) {
|
if (val == read_val) {
|
||||||
return 0;
|
ret = 0;
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
osi_core->osd_ops.udelay(OSI_DELAY_1US);
|
osi_core->osd_ops.udelay(OSI_DELAY_1US);
|
||||||
}
|
}
|
||||||
|
|
||||||
OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL,
|
if (ret != 0) {
|
||||||
|
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL,
|
||||||
"xpcs_write_safety failed", reg_addr);
|
"xpcs_write_safety failed", reg_addr);
|
||||||
return -1;
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
@@ -26,7 +26,11 @@
|
|||||||
|
|
||||||
ifdef NV_INTERFACE_FLAG_SHARED_LIBRARY_SECTION
|
ifdef NV_INTERFACE_FLAG_SHARED_LIBRARY_SECTION
|
||||||
NV_INTERFACE_NAME := nvethernetcl
|
NV_INTERFACE_NAME := nvethernetcl
|
||||||
|
ifeq ($(NV_BUILD_CONFIGURATION_IS_SAFETY), 0)
|
||||||
NV_INTERFACE_EXPORTS := lib$(NV_INTERFACE_NAME)
|
NV_INTERFACE_EXPORTS := lib$(NV_INTERFACE_NAME)
|
||||||
|
else
|
||||||
|
NV_INTERFACE_EXPORTS := lib$(NV_INTERFACE_NAME)_safety
|
||||||
|
endif
|
||||||
NV_INTERFACE_PUBLIC_INCLUDES := \
|
NV_INTERFACE_PUBLIC_INCLUDES := \
|
||||||
./include
|
./include
|
||||||
endif
|
endif
|
||||||
|
|||||||
@@ -30,13 +30,10 @@ NV_COMPONENT_STRICT_WARNINGS_qnx_64 := 1
|
|||||||
NV_COMPONENT_NAME := nvethernetcl
|
NV_COMPONENT_NAME := nvethernetcl
|
||||||
NV_COMPONENT_OWN_INTERFACE_DIR := .
|
NV_COMPONENT_OWN_INTERFACE_DIR := .
|
||||||
NV_COMPONENT_SOURCES := \
|
NV_COMPONENT_SOURCES := \
|
||||||
eqos_dma.c \
|
$(NV_SOURCE)/nvethernetrm/osi/dma/osi_dma.c \
|
||||||
osi_dma.c \
|
$(NV_SOURCE)/nvethernetrm/osi/dma/osi_dma_txrx.c \
|
||||||
osi_dma_txrx.c \
|
$(NV_SOURCE)/nvethernetrm/osi/dma/eqos_desc.c \
|
||||||
mgbe_dma.c \
|
$(NV_SOURCE)/nvethernetrm/osi/dma/mgbe_desc.c \
|
||||||
eqos_desc.c \
|
|
||||||
mgbe_desc.c \
|
|
||||||
debug.c \
|
|
||||||
$(NV_SOURCE)/nvethernetrm/osi/common/osi_common.c \
|
$(NV_SOURCE)/nvethernetrm/osi/common/osi_common.c \
|
||||||
$(NV_SOURCE)/nvethernetrm/osi/common/eqos_common.c \
|
$(NV_SOURCE)/nvethernetrm/osi/common/eqos_common.c \
|
||||||
$(NV_SOURCE)/nvethernetrm/osi/common/mgbe_common.c
|
$(NV_SOURCE)/nvethernetrm/osi/common/mgbe_common.c
|
||||||
@@ -45,10 +42,17 @@ NV_COMPONENT_INCLUDES := \
|
|||||||
$(NV_SOURCE)/nvethernetrm/include \
|
$(NV_SOURCE)/nvethernetrm/include \
|
||||||
$(NV_SOURCE)/nvethernetrm/osi/common/include
|
$(NV_SOURCE)/nvethernetrm/osi/common/include
|
||||||
|
|
||||||
ifeq ($(NV_BUILD_CONFIGURATION_IS_SAFETY),0)
|
include $(NV_SOURCE)/nvethernetrm/include/config.tmk
|
||||||
NV_COMPONENT_CFLAGS += -DOSI_DEBUG
|
|
||||||
|
ifeq ($(OSI_DEBUG),1)
|
||||||
|
NV_COMPONENT_SOURCES += $(NV_SOURCE)/nvethernetrm/osi/dma/debug.c
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
ifeq ($(OSI_STRIPPED_LIB),0)
|
||||||
|
NV_COMPONENT_SOURCES += \
|
||||||
|
$(NV_SOURCE)/nvethernetrm/osi/dma/mgbe_dma.c \
|
||||||
|
$(NV_SOURCE)/nvethernetrm/osi/dma/eqos_dma.c
|
||||||
|
endif
|
||||||
|
|
||||||
include $(NV_BUILD_SHARED_LIBRARY)
|
include $(NV_BUILD_SHARED_LIBRARY)
|
||||||
endif
|
endif
|
||||||
|
|||||||
@@ -35,7 +35,7 @@ static void dump_struct(struct osi_dma_priv_data *osi_dma,
|
|||||||
unsigned char *ptr,
|
unsigned char *ptr,
|
||||||
unsigned long size)
|
unsigned long size)
|
||||||
{
|
{
|
||||||
nveu32_t i = 0, rem, j;
|
nveu32_t i = 0, rem, j = 0;
|
||||||
unsigned long temp;
|
unsigned long temp;
|
||||||
|
|
||||||
if (ptr == OSI_NULL) {
|
if (ptr == OSI_NULL) {
|
||||||
@@ -129,7 +129,9 @@ void reg_dump(struct osi_dma_priv_data *osi_dma)
|
|||||||
max_addr = 0x14EC;
|
max_addr = 0x14EC;
|
||||||
break;
|
break;
|
||||||
case OSI_MGBE_MAC_3_10:
|
case OSI_MGBE_MAC_3_10:
|
||||||
|
#ifndef OSI_STRIPPED_LIB
|
||||||
case OSI_MGBE_MAC_4_00:
|
case OSI_MGBE_MAC_4_00:
|
||||||
|
#endif
|
||||||
addr = 0x3100;
|
addr = 0x3100;
|
||||||
max_addr = 0x35FC;
|
max_addr = 0x35FC;
|
||||||
break;
|
break;
|
||||||
@@ -205,9 +207,9 @@ static void tx_desc_dump(struct osi_dma_priv_data *osi_dma, unsigned int f_idx,
|
|||||||
int cnt;
|
int cnt;
|
||||||
|
|
||||||
if (f_idx > l_idx) {
|
if (f_idx > l_idx) {
|
||||||
cnt = l_idx + osi_dma->tx_ring_sz - f_idx;
|
cnt = (int)(l_idx + osi_dma->tx_ring_sz - f_idx);
|
||||||
} else {
|
} else {
|
||||||
cnt = l_idx - f_idx;
|
cnt = (int)(l_idx - f_idx);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = f_idx; cnt >= 0; cnt--) {
|
for (i = f_idx; cnt >= 0; cnt--) {
|
||||||
@@ -250,6 +252,8 @@ void desc_dump(struct osi_dma_priv_data *osi_dma, unsigned int f_idx,
|
|||||||
rx_desc_dump(osi_dma, f_idx, chan);
|
rx_desc_dump(osi_dma, f_idx, chan);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
|
OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID,
|
||||||
|
"Invalid desc dump flag\n", 0ULL);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -24,8 +24,10 @@
|
|||||||
#ifndef INCLUDED_DMA_LOCAL_H
|
#ifndef INCLUDED_DMA_LOCAL_H
|
||||||
#define INCLUDED_DMA_LOCAL_H
|
#define INCLUDED_DMA_LOCAL_H
|
||||||
|
|
||||||
|
#include "../osi/common/common.h"
|
||||||
#include <osi_dma.h>
|
#include <osi_dma.h>
|
||||||
#include "eqos_dma.h"
|
#include "eqos_dma.h"
|
||||||
|
#include "mgbe_dma.h"
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Maximum number of OSI DMA instances.
|
* @brief Maximum number of OSI DMA instances.
|
||||||
@@ -46,56 +48,17 @@
|
|||||||
* @brief MAC DMA Channel operations
|
* @brief MAC DMA Channel operations
|
||||||
*/
|
*/
|
||||||
struct dma_chan_ops {
|
struct dma_chan_ops {
|
||||||
/** Called to set Transmit Ring length */
|
|
||||||
void (*set_tx_ring_len)(struct osi_dma_priv_data *osi_dma,
|
|
||||||
nveu32_t chan,
|
|
||||||
nveu32_t len);
|
|
||||||
/** Called to set Transmit Ring Base address */
|
|
||||||
void (*set_tx_ring_start_addr)(void *addr, nveu32_t chan,
|
|
||||||
nveu64_t base_addr);
|
|
||||||
/** Called to update Tx Ring tail pointer */
|
|
||||||
void (*update_tx_tailptr)(void *addr, nveu32_t chan,
|
|
||||||
nveu64_t tailptr);
|
|
||||||
/** Called to set Receive channel ring length */
|
|
||||||
void (*set_rx_ring_len)(struct osi_dma_priv_data *osi_dma,
|
|
||||||
nveu32_t chan,
|
|
||||||
nveu32_t len);
|
|
||||||
/** Called to set receive channel ring base address */
|
|
||||||
void (*set_rx_ring_start_addr)(void *addr, nveu32_t chan,
|
|
||||||
nveu64_t base_addr);
|
|
||||||
/** Called to update Rx ring tail pointer */
|
|
||||||
void (*update_rx_tailptr)(void *addr, nveu32_t chan,
|
|
||||||
nveu64_t tailptr);
|
|
||||||
/** Called to disable DMA Tx channel interrupts at wrapper level */
|
|
||||||
void (*disable_chan_tx_intr)(void *addr, nveu32_t chan);
|
|
||||||
/** Called to enable DMA Tx channel interrupts at wrapper level */
|
|
||||||
void (*enable_chan_tx_intr)(void *addr, nveu32_t chan);
|
|
||||||
/** Called to disable DMA Rx channel interrupts at wrapper level */
|
|
||||||
void (*disable_chan_rx_intr)(void *addr, nveu32_t chan);
|
|
||||||
/** Called to enable DMA Rx channel interrupts at wrapper level */
|
|
||||||
void (*enable_chan_rx_intr)(void *addr, nveu32_t chan);
|
|
||||||
/** Called to start the Tx/Rx DMA */
|
|
||||||
void (*start_dma)(struct osi_dma_priv_data *osi_dma, nveu32_t chan);
|
|
||||||
/** Called to stop the Tx/Rx DMA */
|
|
||||||
void (*stop_dma)(struct osi_dma_priv_data *osi_dma, nveu32_t chan);
|
|
||||||
/** Called to initialize the DMA channel */
|
|
||||||
nve32_t (*init_dma_channel)(struct osi_dma_priv_data *osi_dma);
|
|
||||||
/** Called to set Rx buffer length */
|
|
||||||
void (*set_rx_buf_len)(struct osi_dma_priv_data *osi_dma);
|
|
||||||
#ifndef OSI_STRIPPED_LIB
|
#ifndef OSI_STRIPPED_LIB
|
||||||
/** Called periodically to read and validate safety critical
|
|
||||||
* registers against last written value */
|
|
||||||
nve32_t (*validate_regs)(struct osi_dma_priv_data *osi_dma);
|
|
||||||
/** Called to configure the DMA channel slot function */
|
/** Called to configure the DMA channel slot function */
|
||||||
void (*config_slot)(struct osi_dma_priv_data *osi_dma,
|
void (*config_slot)(struct osi_dma_priv_data *osi_dma,
|
||||||
nveu32_t chan,
|
nveu32_t chan,
|
||||||
nveu32_t set,
|
nveu32_t set,
|
||||||
nveu32_t interval);
|
nveu32_t interval);
|
||||||
#endif /* !OSI_STRIPPED_LIB */
|
#endif /* !OSI_STRIPPED_LIB */
|
||||||
/** Called to clear VM Tx interrupt */
|
#ifdef OSI_DEBUG
|
||||||
void (*clear_vm_tx_intr)(void *addr, nveu32_t chan);
|
/** Called to enable/disable debug interrupt */
|
||||||
/** Called to clear VM Rx interrupt */
|
void (*debug_intr_config)(struct osi_dma_priv_data *osi_dma);
|
||||||
void (*clear_vm_rx_intr)(void *addr, nveu32_t chan);
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -103,8 +66,9 @@ struct dma_chan_ops {
|
|||||||
*/
|
*/
|
||||||
struct desc_ops {
|
struct desc_ops {
|
||||||
/** Called to get receive checksum */
|
/** Called to get receive checksum */
|
||||||
void (*get_rx_csum)(struct osi_rx_desc *rx_desc,
|
void (*get_rx_csum)(const struct osi_rx_desc *const rx_desc,
|
||||||
struct osi_rx_pkt_cx *rx_pkt_cx);
|
struct osi_rx_pkt_cx *rx_pkt_cx);
|
||||||
|
#ifndef OSI_STRIPPED_LIB
|
||||||
/** Called to get rx error stats */
|
/** Called to get rx error stats */
|
||||||
void (*update_rx_err_stats)(struct osi_rx_desc *rx_desc,
|
void (*update_rx_err_stats)(struct osi_rx_desc *rx_desc,
|
||||||
struct osi_pkt_err_stats *stats);
|
struct osi_pkt_err_stats *stats);
|
||||||
@@ -114,10 +78,11 @@ struct desc_ops {
|
|||||||
/** Called to get rx HASH from descriptor */
|
/** Called to get rx HASH from descriptor */
|
||||||
void (*get_rx_hash)(struct osi_rx_desc *rx_desc,
|
void (*get_rx_hash)(struct osi_rx_desc *rx_desc,
|
||||||
struct osi_rx_pkt_cx *rx_pkt_cx);
|
struct osi_rx_pkt_cx *rx_pkt_cx);
|
||||||
|
#endif /* !OSI_STRIPPED_LIB */
|
||||||
/** Called to get RX hw timestamp */
|
/** Called to get RX hw timestamp */
|
||||||
int (*get_rx_hwstamp)(struct osi_dma_priv_data *osi_dma,
|
nve32_t (*get_rx_hwstamp)(const struct osi_dma_priv_data *const osi_dma,
|
||||||
struct osi_rx_desc *rx_desc,
|
const struct osi_rx_desc *const rx_desc,
|
||||||
struct osi_rx_desc *context_desc,
|
const struct osi_rx_desc *const context_desc,
|
||||||
struct osi_rx_pkt_cx *rx_pkt_cx);
|
struct osi_rx_pkt_cx *rx_pkt_cx);
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -139,14 +104,15 @@ struct dma_local {
|
|||||||
nveu32_t init_done;
|
nveu32_t init_done;
|
||||||
/** Holds the MAC version of MAC controller */
|
/** Holds the MAC version of MAC controller */
|
||||||
nveu32_t mac_ver;
|
nveu32_t mac_ver;
|
||||||
/** Represents whether DMA interrupts are VM or Non-VM */
|
|
||||||
nveu32_t vm_intr;
|
|
||||||
/** Magic number to validate osi_dma pointer */
|
/** Magic number to validate osi_dma pointer */
|
||||||
nveu64_t magic_num;
|
nveu64_t magic_num;
|
||||||
/** Maximum number of DMA channels */
|
/** Maximum number of DMA channels */
|
||||||
nveu32_t max_chans;
|
nveu32_t num_max_chans;
|
||||||
|
/** Exact MAC used across SOCs 0:Legacy EQOS, 1:Orin EQOS, 2:Orin MGBE */
|
||||||
|
nveu32_t l_mac_ver;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#ifndef OSI_STRIPPED_LIB
|
||||||
/**
|
/**
|
||||||
* @brief eqos_init_dma_chan_ops - Initialize eqos DMA operations.
|
* @brief eqos_init_dma_chan_ops - Initialize eqos DMA operations.
|
||||||
*
|
*
|
||||||
@@ -172,18 +138,19 @@ void eqos_init_dma_chan_ops(struct dma_chan_ops *ops);
|
|||||||
* - De-initialization: No
|
* - De-initialization: No
|
||||||
*/
|
*/
|
||||||
void mgbe_init_dma_chan_ops(struct dma_chan_ops *ops);
|
void mgbe_init_dma_chan_ops(struct dma_chan_ops *ops);
|
||||||
|
#endif /* !OSI_STRIPPED_LIB */
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief eqos_get_desc_ops - EQOS init DMA descriptor operations
|
* @brief eqos_get_desc_ops - EQOS init DMA descriptor operations
|
||||||
*/
|
*/
|
||||||
void eqos_init_desc_ops(struct desc_ops *d_ops);
|
void eqos_init_desc_ops(struct desc_ops *p_dops);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief mgbe_get_desc_ops - MGBE init DMA descriptor operations
|
* @brief mgbe_get_desc_ops - MGBE init DMA descriptor operations
|
||||||
*/
|
*/
|
||||||
void mgbe_init_desc_ops(struct desc_ops *d_ops);
|
void mgbe_init_desc_ops(struct desc_ops *p_dops);
|
||||||
|
|
||||||
nve32_t init_desc_ops(struct osi_dma_priv_data *osi_dma);
|
nve32_t init_desc_ops(const struct osi_dma_priv_data *const osi_dma);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief osi_hw_transmit - Initialize Tx DMA descriptors for a channel
|
* @brief osi_hw_transmit - Initialize Tx DMA descriptors for a channel
|
||||||
@@ -196,8 +163,7 @@ nve32_t init_desc_ops(struct osi_dma_priv_data *osi_dma);
|
|||||||
*
|
*
|
||||||
* @param[in, out] osi_dma: OSI DMA private data.
|
* @param[in, out] osi_dma: OSI DMA private data.
|
||||||
* @param[in] tx_ring: DMA Tx ring.
|
* @param[in] tx_ring: DMA Tx ring.
|
||||||
* @param[in] ops: DMA channel operations.
|
* @param[in] dma_chan: DMA Tx channel number. Max OSI_EQOS_MAX_NUM_CHANS.
|
||||||
* @param[in] chan: DMA Tx channel number. Max OSI_EQOS_MAX_NUM_CHANS.
|
|
||||||
*
|
*
|
||||||
* @note
|
* @note
|
||||||
* API Group:
|
* API Group:
|
||||||
@@ -207,8 +173,7 @@ nve32_t init_desc_ops(struct osi_dma_priv_data *osi_dma);
|
|||||||
*/
|
*/
|
||||||
nve32_t hw_transmit(struct osi_dma_priv_data *osi_dma,
|
nve32_t hw_transmit(struct osi_dma_priv_data *osi_dma,
|
||||||
struct osi_tx_ring *tx_ring,
|
struct osi_tx_ring *tx_ring,
|
||||||
struct dma_chan_ops *ops,
|
nveu32_t dma_chan);
|
||||||
nveu32_t chan);
|
|
||||||
|
|
||||||
/* Function prototype needed for misra */
|
/* Function prototype needed for misra */
|
||||||
|
|
||||||
@@ -232,41 +197,36 @@ nve32_t hw_transmit(struct osi_dma_priv_data *osi_dma,
|
|||||||
* @retval 0 on success
|
* @retval 0 on success
|
||||||
* @retval -1 on failure.
|
* @retval -1 on failure.
|
||||||
*/
|
*/
|
||||||
nve32_t dma_desc_init(struct osi_dma_priv_data *osi_dma,
|
nve32_t dma_desc_init(struct osi_dma_priv_data *osi_dma);
|
||||||
struct dma_chan_ops *ops);
|
|
||||||
|
|
||||||
static inline nveu32_t is_power_of_two(nveu32_t num)
|
static inline nveu32_t is_power_of_two(nveu32_t num)
|
||||||
{
|
{
|
||||||
|
nveu32_t ret = OSI_DISABLE;
|
||||||
|
|
||||||
if ((num > 0U) && ((num & (num - 1U)) == 0U)) {
|
if ((num > 0U) && ((num & (num - 1U)) == 0U)) {
|
||||||
return OSI_ENABLE;
|
ret = OSI_ENABLE;
|
||||||
}
|
}
|
||||||
|
|
||||||
return OSI_DISABLE;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* @addtogroup Helper Helper MACROS
|
|
||||||
*
|
|
||||||
* @brief EQOS generic helper MACROS.
|
|
||||||
* @{
|
|
||||||
*/
|
|
||||||
#define CHECK_CHAN_BOUND(chan) \
|
|
||||||
{ \
|
|
||||||
if ((chan) >= OSI_EQOS_MAX_NUM_CHANS) { \
|
|
||||||
return; \
|
|
||||||
} \
|
|
||||||
}
|
|
||||||
|
|
||||||
#define MGBE_CHECK_CHAN_BOUND(chan) \
|
|
||||||
{ \
|
|
||||||
if ((chan) >= OSI_MGBE_MAX_NUM_CHANS) { \
|
|
||||||
return; \
|
|
||||||
} \
|
|
||||||
} \
|
|
||||||
|
|
||||||
#define BOOLEAN_FALSE (0U != 0U)
|
#define BOOLEAN_FALSE (0U != 0U)
|
||||||
#define L32(data) ((nveu32_t)((data) & 0xFFFFFFFFU))
|
#define L32(data) ((nveu32_t)((data) & 0xFFFFFFFFU))
|
||||||
#define H32(data) ((nveu32_t)(((data) & 0xFFFFFFFF00000000UL) >> 32UL))
|
#define H32(data) ((nveu32_t)(((data) & 0xFFFFFFFF00000000UL) >> 32UL))
|
||||||
|
|
||||||
|
static inline void update_rx_tail_ptr(const struct osi_dma_priv_data *const osi_dma,
|
||||||
|
nveu32_t dma_chan,
|
||||||
|
nveu64_t tailptr)
|
||||||
|
{
|
||||||
|
nveu32_t chan = dma_chan & 0xFU;
|
||||||
|
const nveu32_t tail_ptr_reg[2] = {
|
||||||
|
EQOS_DMA_CHX_RDTP(chan),
|
||||||
|
MGBE_DMA_CHX_RDTLP(chan)
|
||||||
|
};
|
||||||
|
|
||||||
|
osi_writel(L32(tailptr), (nveu8_t *)osi_dma->base + tail_ptr_reg[osi_dma->mac]);
|
||||||
|
}
|
||||||
|
|
||||||
/** @} */
|
/** @} */
|
||||||
|
|
||||||
#endif /* INCLUDED_DMA_LOCAL_H */
|
#endif /* INCLUDED_DMA_LOCAL_H */
|
||||||
|
|||||||
@@ -23,6 +23,7 @@
|
|||||||
#include "dma_local.h"
|
#include "dma_local.h"
|
||||||
#include "hw_desc.h"
|
#include "hw_desc.h"
|
||||||
|
|
||||||
|
#ifndef OSI_STRIPPED_LIB
|
||||||
/**
|
/**
|
||||||
* @brief eqos_get_rx_vlan - Get Rx VLAN from descriptor
|
* @brief eqos_get_rx_vlan - Get Rx VLAN from descriptor
|
||||||
*
|
*
|
||||||
@@ -77,6 +78,22 @@ static inline void eqos_update_rx_err_stats(struct osi_rx_desc *rx_desc,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief eqos_get_rx_hash - Get Rx packet hash from descriptor if valid
|
||||||
|
*
|
||||||
|
* Algorithm: This routine will be invoked by OSI layer itself to get received
|
||||||
|
* packet Hash from descriptor if RSS hash is valid and it also sets the type
|
||||||
|
* of RSS hash.
|
||||||
|
*
|
||||||
|
* @param[in] rx_desc: Rx Descriptor.
|
||||||
|
* @param[in] rx_pkt_cx: Per-Rx packet context structure
|
||||||
|
*/
|
||||||
|
static void eqos_get_rx_hash(OSI_UNUSED struct osi_rx_desc *rx_desc,
|
||||||
|
OSI_UNUSED struct osi_rx_pkt_cx *rx_pkt_cx)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
#endif /* !OSI_STRIPPED_LIB */
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief eqos_get_rx_csum - Get the Rx checksum from descriptor if valid
|
* @brief eqos_get_rx_csum - Get the Rx checksum from descriptor if valid
|
||||||
*
|
*
|
||||||
@@ -98,7 +115,7 @@ static inline void eqos_update_rx_err_stats(struct osi_rx_desc *rx_desc,
|
|||||||
* @param[in, out] rx_desc: Rx descriptor
|
* @param[in, out] rx_desc: Rx descriptor
|
||||||
* @param[in, out] rx_pkt_cx: Per-Rx packet context structure
|
* @param[in, out] rx_pkt_cx: Per-Rx packet context structure
|
||||||
*/
|
*/
|
||||||
static void eqos_get_rx_csum(struct osi_rx_desc *rx_desc,
|
static void eqos_get_rx_csum(const struct osi_rx_desc *const rx_desc,
|
||||||
struct osi_rx_pkt_cx *rx_pkt_cx)
|
struct osi_rx_pkt_cx *rx_pkt_cx)
|
||||||
{
|
{
|
||||||
nveu32_t pkt_type;
|
nveu32_t pkt_type;
|
||||||
@@ -108,19 +125,13 @@ static void eqos_get_rx_csum(struct osi_rx_desc *rx_desc,
|
|||||||
* Set none/unnecessary bit as well for other OS to check and
|
* Set none/unnecessary bit as well for other OS to check and
|
||||||
* take proper actions.
|
* take proper actions.
|
||||||
*/
|
*/
|
||||||
if ((rx_desc->rdes3 & RDES3_RS1V) != RDES3_RS1V) {
|
if ((rx_desc->rdes3 & RDES3_RS1V) == RDES3_RS1V) {
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if ((rx_desc->rdes1 &
|
if ((rx_desc->rdes1 &
|
||||||
(RDES1_IPCE | RDES1_IPCB | RDES1_IPHE)) == OSI_DISABLE) {
|
(RDES1_IPCE | RDES1_IPCB | RDES1_IPHE)) == OSI_DISABLE) {
|
||||||
rx_pkt_cx->rxcsum |= OSI_CHECKSUM_UNNECESSARY;
|
rx_pkt_cx->rxcsum |= OSI_CHECKSUM_UNNECESSARY;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((rx_desc->rdes1 & RDES1_IPCB) != OSI_DISABLE) {
|
if ((rx_desc->rdes1 & RDES1_IPCB) != RDES1_IPCB) {
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
rx_pkt_cx->rxcsum |= OSI_CHECKSUM_IPv4;
|
rx_pkt_cx->rxcsum |= OSI_CHECKSUM_IPv4;
|
||||||
if ((rx_desc->rdes1 & RDES1_IPHE) == RDES1_IPHE) {
|
if ((rx_desc->rdes1 & RDES1_IPHE) == RDES1_IPHE) {
|
||||||
rx_pkt_cx->rxcsum |= OSI_CHECKSUM_IPv4_BAD;
|
rx_pkt_cx->rxcsum |= OSI_CHECKSUM_IPv4_BAD;
|
||||||
@@ -153,21 +164,10 @@ static void eqos_get_rx_csum(struct osi_rx_desc *rx_desc,
|
|||||||
if ((rx_desc->rdes1 & RDES1_IPCE) == RDES1_IPCE) {
|
if ((rx_desc->rdes1 & RDES1_IPCE) == RDES1_IPCE) {
|
||||||
rx_pkt_cx->rxcsum |= OSI_CHECKSUM_TCP_UDP_BAD;
|
rx_pkt_cx->rxcsum |= OSI_CHECKSUM_TCP_UDP_BAD;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
return;
|
||||||
* @brief eqos_get_rx_hash - Get Rx packet hash from descriptor if valid
|
|
||||||
*
|
|
||||||
* Algorithm: This routine will be invoked by OSI layer itself to get received
|
|
||||||
* packet Hash from descriptor if RSS hash is valid and it also sets the type
|
|
||||||
* of RSS hash.
|
|
||||||
*
|
|
||||||
* @param[in] rx_desc: Rx Descriptor.
|
|
||||||
* @param[in] rx_pkt_cx: Per-Rx packet context structure
|
|
||||||
*/
|
|
||||||
static void eqos_get_rx_hash(OSI_UNUSED struct osi_rx_desc *rx_desc,
|
|
||||||
OSI_UNUSED struct osi_rx_pkt_cx *rx_pkt_cx)
|
|
||||||
{
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -186,12 +186,13 @@ static void eqos_get_rx_hash(OSI_UNUSED struct osi_rx_desc *rx_desc,
|
|||||||
* @retval -1 if TimeStamp is not available
|
* @retval -1 if TimeStamp is not available
|
||||||
* @retval 0 if TimeStamp is available.
|
* @retval 0 if TimeStamp is available.
|
||||||
*/
|
*/
|
||||||
static int eqos_get_rx_hwstamp(struct osi_dma_priv_data *osi_dma,
|
static nve32_t eqos_get_rx_hwstamp(const struct osi_dma_priv_data *const osi_dma,
|
||||||
struct osi_rx_desc *rx_desc,
|
const struct osi_rx_desc *const rx_desc,
|
||||||
struct osi_rx_desc *context_desc,
|
const struct osi_rx_desc *const context_desc,
|
||||||
struct osi_rx_pkt_cx *rx_pkt_cx)
|
struct osi_rx_pkt_cx *rx_pkt_cx)
|
||||||
{
|
{
|
||||||
int retry;
|
nve32_t ret = 0;
|
||||||
|
nve32_t retry;
|
||||||
|
|
||||||
/* Check for RS1V/TSA/TD valid */
|
/* Check for RS1V/TSA/TD valid */
|
||||||
if (((rx_desc->rdes3 & RDES3_RS1V) == RDES3_RS1V) &&
|
if (((rx_desc->rdes3 & RDES3_RS1V) == RDES3_RS1V) &&
|
||||||
@@ -205,7 +206,8 @@ static int eqos_get_rx_hwstamp(struct osi_dma_priv_data *osi_dma,
|
|||||||
OSI_INVALID_VALUE) &&
|
OSI_INVALID_VALUE) &&
|
||||||
(context_desc->rdes1 ==
|
(context_desc->rdes1 ==
|
||||||
OSI_INVALID_VALUE)) {
|
OSI_INVALID_VALUE)) {
|
||||||
return -1;
|
ret = -1;
|
||||||
|
goto fail;
|
||||||
}
|
}
|
||||||
/* Update rx pkt context flags to indicate
|
/* Update rx pkt context flags to indicate
|
||||||
* PTP */
|
* PTP */
|
||||||
@@ -219,27 +221,31 @@ static int eqos_get_rx_hwstamp(struct osi_dma_priv_data *osi_dma,
|
|||||||
}
|
}
|
||||||
if (retry == 10) {
|
if (retry == 10) {
|
||||||
/* Timed out waiting for Rx timestamp */
|
/* Timed out waiting for Rx timestamp */
|
||||||
return -1;
|
ret = -1;
|
||||||
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
rx_pkt_cx->ns = context_desc->rdes0 +
|
rx_pkt_cx->ns = context_desc->rdes0 +
|
||||||
(OSI_NSEC_PER_SEC * context_desc->rdes1);
|
(OSI_NSEC_PER_SEC * context_desc->rdes1);
|
||||||
if (rx_pkt_cx->ns < context_desc->rdes0) {
|
if (rx_pkt_cx->ns < context_desc->rdes0) {
|
||||||
/* Will not hit this case */
|
/* Will not hit this case */
|
||||||
return -1;
|
ret = -1;
|
||||||
|
goto fail;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
return -1;
|
ret = -1;
|
||||||
}
|
}
|
||||||
|
fail:
|
||||||
return 0;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
void eqos_init_desc_ops(struct desc_ops *d_ops)
|
void eqos_init_desc_ops(struct desc_ops *p_dops)
|
||||||
{
|
{
|
||||||
d_ops->get_rx_csum = eqos_get_rx_csum;
|
#ifndef OSI_STRIPPED_LIB
|
||||||
d_ops->update_rx_err_stats = eqos_update_rx_err_stats;
|
p_dops->update_rx_err_stats = eqos_update_rx_err_stats;
|
||||||
d_ops->get_rx_vlan = eqos_get_rx_vlan;
|
p_dops->get_rx_vlan = eqos_get_rx_vlan;
|
||||||
d_ops->get_rx_hash = eqos_get_rx_hash;
|
p_dops->get_rx_hash = eqos_get_rx_hash;
|
||||||
d_ops->get_rx_hwstamp = eqos_get_rx_hwstamp;
|
#endif /* !OSI_STRIPPED_LIB */
|
||||||
|
p_dops->get_rx_csum = eqos_get_rx_csum;
|
||||||
|
p_dops->get_rx_hwstamp = eqos_get_rx_hwstamp;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -20,825 +20,10 @@
|
|||||||
* DEALINGS IN THE SOFTWARE.
|
* DEALINGS IN THE SOFTWARE.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#ifndef OSI_STRIPPED_LIB
|
||||||
#include "../osi/common/common.h"
|
#include "../osi/common/common.h"
|
||||||
#include "dma_local.h"
|
#include "dma_local.h"
|
||||||
#include "eqos_dma.h"
|
#include "eqos_dma.h"
|
||||||
#include "../osi/common/type.h"
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief eqos_dma_safety_config - EQOS MAC DMA safety configuration
|
|
||||||
*/
|
|
||||||
static struct dma_func_safety eqos_dma_safety_config;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Write to safety critical register.
|
|
||||||
*
|
|
||||||
* @note
|
|
||||||
* Algorithm:
|
|
||||||
* - Acquire RW lock, so that eqos_validate_dma_regs does not run while
|
|
||||||
* updating the safety critical register.
|
|
||||||
* - call osi_writel() to actually update the memory mapped register.
|
|
||||||
* - Store the same value in eqos_dma_safety_config->reg_val[idx], so that
|
|
||||||
* this latest value will be compared when eqos_validate_dma_regs is
|
|
||||||
* scheduled.
|
|
||||||
*
|
|
||||||
* @param[in] osi_dma: OSI DMA private data structure.
|
|
||||||
* @param[in] val: Value to be written.
|
|
||||||
* @param[in] addr: memory mapped register address to be written to.
|
|
||||||
* @param[in] idx: Index of register corresponding to enum func_safety_dma_regs.
|
|
||||||
*
|
|
||||||
* @pre MAC has to be out of reset, and clocks supplied.
|
|
||||||
*
|
|
||||||
* @note
|
|
||||||
* API Group:
|
|
||||||
* - Initialization: Yes
|
|
||||||
* - Run time: Yes
|
|
||||||
* - De-initialization: Yes
|
|
||||||
*/
|
|
||||||
static inline void eqos_dma_safety_writel(struct osi_dma_priv_data *osi_dma,
|
|
||||||
nveu32_t val, void *addr,
|
|
||||||
nveu32_t idx)
|
|
||||||
{
|
|
||||||
struct dma_func_safety *config = &eqos_dma_safety_config;
|
|
||||||
|
|
||||||
osi_lock_irq_enabled(&config->dma_safety_lock);
|
|
||||||
osi_writela(osi_dma->osd, val, addr);
|
|
||||||
config->reg_val[idx] = (val & config->reg_mask[idx]);
|
|
||||||
osi_unlock_irq_enabled(&config->dma_safety_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Initialize the eqos_dma_safety_config.
|
|
||||||
*
|
|
||||||
* @param[in] osi_dma: OSI DMA private data structure.
|
|
||||||
*
|
|
||||||
* @note
|
|
||||||
* Algorithm:
|
|
||||||
* - Populate the list of safety critical registers and provide
|
|
||||||
* - the address of the register
|
|
||||||
* - Register mask (to ignore reserved/self-critical bits in the reg).
|
|
||||||
* See eqos_validate_dma_regs which can be invoked periodically to compare
|
|
||||||
* the last written value to this register vs the actual value read when
|
|
||||||
* eqos_validate_dma_regs is scheduled.
|
|
||||||
*
|
|
||||||
* @note
|
|
||||||
* API Group:
|
|
||||||
* - Initialization: Yes
|
|
||||||
* - Run time: No
|
|
||||||
* - De-initialization: No
|
|
||||||
*/
|
|
||||||
static void eqos_dma_safety_init(struct osi_dma_priv_data *osi_dma)
|
|
||||||
{
|
|
||||||
struct dma_func_safety *config = &eqos_dma_safety_config;
|
|
||||||
nveu8_t *base = (nveu8_t *)osi_dma->base;
|
|
||||||
nveu32_t val;
|
|
||||||
nveu32_t i, idx;
|
|
||||||
|
|
||||||
/* Initialize all reg address to NULL, since we may not use
|
|
||||||
* some regs depending on the number of DMA chans enabled.
|
|
||||||
*/
|
|
||||||
for (i = EQOS_DMA_CH0_CTRL_IDX; i < EQOS_MAX_DMA_SAFETY_REGS; i++) {
|
|
||||||
config->reg_addr[i] = OSI_NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (i = 0U; i < osi_dma->num_dma_chans; i++) {
|
|
||||||
idx = osi_dma->dma_chans[i];
|
|
||||||
#if 0
|
|
||||||
CHECK_CHAN_BOUND(idx);
|
|
||||||
#endif
|
|
||||||
config->reg_addr[EQOS_DMA_CH0_CTRL_IDX + idx] = base +
|
|
||||||
EQOS_DMA_CHX_CTRL(idx);
|
|
||||||
config->reg_addr[EQOS_DMA_CH0_TX_CTRL_IDX + idx] = base +
|
|
||||||
EQOS_DMA_CHX_TX_CTRL(idx);
|
|
||||||
config->reg_addr[EQOS_DMA_CH0_RX_CTRL_IDX + idx] = base +
|
|
||||||
EQOS_DMA_CHX_RX_CTRL(idx);
|
|
||||||
config->reg_addr[EQOS_DMA_CH0_TDRL_IDX + idx] = base +
|
|
||||||
EQOS_DMA_CHX_TDRL(idx);
|
|
||||||
config->reg_addr[EQOS_DMA_CH0_RDRL_IDX + idx] = base +
|
|
||||||
EQOS_DMA_CHX_RDRL(idx);
|
|
||||||
config->reg_addr[EQOS_DMA_CH0_INTR_ENA_IDX + idx] = base +
|
|
||||||
EQOS_DMA_CHX_INTR_ENA(idx);
|
|
||||||
|
|
||||||
config->reg_mask[EQOS_DMA_CH0_CTRL_IDX + idx] =
|
|
||||||
EQOS_DMA_CHX_CTRL_MASK;
|
|
||||||
config->reg_mask[EQOS_DMA_CH0_TX_CTRL_IDX + idx] =
|
|
||||||
EQOS_DMA_CHX_TX_CTRL_MASK;
|
|
||||||
config->reg_mask[EQOS_DMA_CH0_RX_CTRL_IDX + idx] =
|
|
||||||
EQOS_DMA_CHX_RX_CTRL_MASK;
|
|
||||||
config->reg_mask[EQOS_DMA_CH0_TDRL_IDX + idx] =
|
|
||||||
EQOS_DMA_CHX_TDRL_MASK;
|
|
||||||
config->reg_mask[EQOS_DMA_CH0_RDRL_IDX + idx] =
|
|
||||||
EQOS_DMA_CHX_RDRL_MASK;
|
|
||||||
config->reg_mask[EQOS_DMA_CH0_INTR_ENA_IDX + idx] =
|
|
||||||
EQOS_DMA_CHX_INTR_ENA_MASK;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Initialize current power-on-reset values of these registers. */
|
|
||||||
for (i = EQOS_DMA_CH0_CTRL_IDX; i < EQOS_MAX_DMA_SAFETY_REGS; i++) {
|
|
||||||
if (config->reg_addr[i] == OSI_NULL) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
val = osi_readl((nveu8_t *)config->reg_addr[i]);
|
|
||||||
config->reg_val[i] = val & config->reg_mask[i];
|
|
||||||
}
|
|
||||||
|
|
||||||
osi_lock_init(&config->dma_safety_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief eqos_disable_chan_tx_intr - Disables DMA Tx channel interrupts.
|
|
||||||
*
|
|
||||||
* @param[in] addr: Base address indicating the start of
|
|
||||||
* memory mapped IO region of the MAC.
|
|
||||||
* @param[in] chan: DMA Tx channel number.
|
|
||||||
*
|
|
||||||
* @pre
|
|
||||||
* - MAC needs to be out of reset and proper clocks need to be configured
|
|
||||||
* - DMA HW init need to be completed successfully, see osi_hw_dma_init
|
|
||||||
* - Mapping of physical IRQ line to DMA channel need to be maintained at
|
|
||||||
* OSDependent layer and pass corresponding channel number.
|
|
||||||
*
|
|
||||||
* @note
|
|
||||||
* API Group:
|
|
||||||
* - Initialization: No
|
|
||||||
* - Run time: Yes
|
|
||||||
* - De-initialization: Yes
|
|
||||||
*/
|
|
||||||
static void eqos_disable_chan_tx_intr(void *addr, nveu32_t chan)
|
|
||||||
{
|
|
||||||
nveu32_t cntrl, status;
|
|
||||||
|
|
||||||
#if 0
|
|
||||||
CHECK_CHAN_BOUND(chan);
|
|
||||||
#endif
|
|
||||||
/* Clear irq before disabling */
|
|
||||||
status = osi_readl((nveu8_t *)addr +
|
|
||||||
EQOS_VIRT_INTR_CHX_STATUS(chan));
|
|
||||||
if ((status & EQOS_VIRT_INTR_CHX_STATUS_TX) ==
|
|
||||||
EQOS_VIRT_INTR_CHX_STATUS_TX) {
|
|
||||||
osi_writel(EQOS_DMA_CHX_STATUS_CLEAR_TX,
|
|
||||||
(nveu8_t *)addr + EQOS_DMA_CHX_STATUS(chan));
|
|
||||||
osi_writel(EQOS_VIRT_INTR_CHX_STATUS_TX,
|
|
||||||
(nveu8_t *)addr +
|
|
||||||
EQOS_VIRT_INTR_CHX_STATUS(chan));
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Disable the irq */
|
|
||||||
cntrl = osi_readl((nveu8_t *)addr +
|
|
||||||
EQOS_VIRT_INTR_CHX_CNTRL(chan));
|
|
||||||
cntrl &= ~EQOS_VIRT_INTR_CHX_CNTRL_TX;
|
|
||||||
osi_writel(cntrl, (nveu8_t *)addr +
|
|
||||||
EQOS_VIRT_INTR_CHX_CNTRL(chan));
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief eqos_enable_chan_tx_intr - Enable Tx channel interrupts.
|
|
||||||
*
|
|
||||||
* @param[in] addr: Base address indicating the start of
|
|
||||||
* memory mapped IO region of the MAC.
|
|
||||||
* @param[in] chan: DMA Tx channel number.
|
|
||||||
*
|
|
||||||
* @pre
|
|
||||||
* - MAC needs to be out of reset and proper clocks need to be configured
|
|
||||||
* - DMA HW init need to be completed successfully, see osi_hw_dma_init
|
|
||||||
* - Mapping of physical IRQ line to DMA channel need to be maintained at
|
|
||||||
* OSDependent layer and pass corresponding channel number.
|
|
||||||
*
|
|
||||||
* @note
|
|
||||||
* API Group:
|
|
||||||
* - Initialization: Yes
|
|
||||||
* - Run time: Yes
|
|
||||||
* - De-initialization: No
|
|
||||||
*/
|
|
||||||
static void eqos_enable_chan_tx_intr(void *addr, nveu32_t chan)
|
|
||||||
{
|
|
||||||
nveu32_t cntrl;
|
|
||||||
#if 0
|
|
||||||
CHECK_CHAN_BOUND(chan);
|
|
||||||
#endif
|
|
||||||
cntrl = osi_readl((nveu8_t *)addr +
|
|
||||||
EQOS_VIRT_INTR_CHX_CNTRL(chan));
|
|
||||||
cntrl |= EQOS_VIRT_INTR_CHX_CNTRL_TX;
|
|
||||||
osi_writel(cntrl, (nveu8_t *)addr +
|
|
||||||
EQOS_VIRT_INTR_CHX_CNTRL(chan));
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief eqos_disable_chan_rx_intr - Disable Rx channel interrupts.
|
|
||||||
*
|
|
||||||
* @param[in] addr: Base address indicating the start of
|
|
||||||
* memory mapped IO region of the MAC.
|
|
||||||
* @param[in] chan: DMA Rx channel number.
|
|
||||||
*
|
|
||||||
* @pre
|
|
||||||
* - MAC needs to be out of reset and proper clocks need to be configured
|
|
||||||
* - DMA HW init need to be completed successfully, see osi_hw_dma_init
|
|
||||||
* - Mapping of physical IRQ line to DMA channel need to be maintained at
|
|
||||||
* OSDependent layer and pass corresponding channel number.
|
|
||||||
*
|
|
||||||
* @note
|
|
||||||
* API Group:
|
|
||||||
* - Initialization: No
|
|
||||||
* - Run time: Yes
|
|
||||||
* - De-initialization: Yes
|
|
||||||
*/
|
|
||||||
static void eqos_disable_chan_rx_intr(void *addr, nveu32_t chan)
|
|
||||||
{
|
|
||||||
nveu32_t cntrl, status;
|
|
||||||
#if 0
|
|
||||||
CHECK_CHAN_BOUND(chan);
|
|
||||||
#endif
|
|
||||||
/* Clear irq before disabling */
|
|
||||||
status = osi_readl((nveu8_t *)addr +
|
|
||||||
EQOS_VIRT_INTR_CHX_STATUS(chan));
|
|
||||||
if ((status & EQOS_VIRT_INTR_CHX_STATUS_RX) ==
|
|
||||||
EQOS_VIRT_INTR_CHX_STATUS_RX) {
|
|
||||||
osi_writel(EQOS_DMA_CHX_STATUS_CLEAR_RX,
|
|
||||||
(nveu8_t *)addr + EQOS_DMA_CHX_STATUS(chan));
|
|
||||||
osi_writel(EQOS_VIRT_INTR_CHX_STATUS_RX,
|
|
||||||
(nveu8_t *)addr +
|
|
||||||
EQOS_VIRT_INTR_CHX_STATUS(chan));
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Disable irq */
|
|
||||||
cntrl = osi_readl((nveu8_t *)addr +
|
|
||||||
EQOS_VIRT_INTR_CHX_CNTRL(chan));
|
|
||||||
cntrl &= ~EQOS_VIRT_INTR_CHX_CNTRL_RX;
|
|
||||||
osi_writel(cntrl, (nveu8_t *)addr +
|
|
||||||
EQOS_VIRT_INTR_CHX_CNTRL(chan));
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief eqos_enable_chan_rx_intr - Enable Rx channel interrupts.
|
|
||||||
*
|
|
||||||
* @param[in] addr: Base address indicating the start of
|
|
||||||
* memory mapped IO region of the MAC.
|
|
||||||
* @param[in] chan: DMA Rx channel number.
|
|
||||||
*
|
|
||||||
* @pre
|
|
||||||
* - MAC needs to be out of reset and proper clocks need to be configured
|
|
||||||
* - DMA HW init need to be completed successfully, see osi_hw_dma_init
|
|
||||||
*
|
|
||||||
* @note
|
|
||||||
* API Group:
|
|
||||||
* - Initialization: Yes
|
|
||||||
* - Run time: Yes
|
|
||||||
* - De-initialization: No
|
|
||||||
*/
|
|
||||||
static void eqos_enable_chan_rx_intr(void *addr, nveu32_t chan)
|
|
||||||
{
|
|
||||||
nveu32_t cntrl;
|
|
||||||
#if 0
|
|
||||||
CHECK_CHAN_BOUND(chan);
|
|
||||||
#endif
|
|
||||||
cntrl = osi_readl((nveu8_t *)addr +
|
|
||||||
EQOS_VIRT_INTR_CHX_CNTRL(chan));
|
|
||||||
cntrl |= EQOS_VIRT_INTR_CHX_CNTRL_RX;
|
|
||||||
osi_writel(cntrl, (nveu8_t *)addr +
|
|
||||||
EQOS_VIRT_INTR_CHX_CNTRL(chan));
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief eqos_set_tx_ring_len - Set DMA Tx ring length.
|
|
||||||
*
|
|
||||||
* @note
|
|
||||||
* Algorithm:
|
|
||||||
* - Set DMA Tx channel ring length for specific channel.
|
|
||||||
*
|
|
||||||
* @param[in] osi_dma: OSI DMA private data structure.
|
|
||||||
* @param[in] chan: DMA Tx channel number.
|
|
||||||
* @param[in] len: Length.
|
|
||||||
*
|
|
||||||
* @note
|
|
||||||
* API Group:
|
|
||||||
* - Initialization: Yes
|
|
||||||
* - Run time: No
|
|
||||||
* - De-initialization: No
|
|
||||||
*/
|
|
||||||
static void eqos_set_tx_ring_len(struct osi_dma_priv_data *osi_dma,
|
|
||||||
nveu32_t chan,
|
|
||||||
nveu32_t len)
|
|
||||||
{
|
|
||||||
void *addr = osi_dma->base;
|
|
||||||
#if 0
|
|
||||||
CHECK_CHAN_BOUND(chan);
|
|
||||||
#endif
|
|
||||||
eqos_dma_safety_writel(osi_dma, len, (nveu8_t *)addr +
|
|
||||||
EQOS_DMA_CHX_TDRL(chan),
|
|
||||||
EQOS_DMA_CH0_TDRL_IDX + chan);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief eqos_set_tx_ring_start_addr - Set DMA Tx ring base address.
|
|
||||||
*
|
|
||||||
* @note
|
|
||||||
* Algorithm:
|
|
||||||
* - Sets DMA Tx ring base address for specific channel.
|
|
||||||
*
|
|
||||||
* @param[in] addr: Base address indicating the start of
|
|
||||||
* memory mapped IO region of the MAC.
|
|
||||||
* @param[in] chan: DMA Tx channel number.
|
|
||||||
* @param[in] tx_desc: Tx desc base address.
|
|
||||||
*
|
|
||||||
* @note
|
|
||||||
* API Group:
|
|
||||||
* - Initialization: Yes
|
|
||||||
* - Run time: No
|
|
||||||
* - De-initialization: No
|
|
||||||
*/
|
|
||||||
static void eqos_set_tx_ring_start_addr(void *addr, nveu32_t chan,
|
|
||||||
nveu64_t tx_desc)
|
|
||||||
{
|
|
||||||
nveu64_t tmp;
|
|
||||||
#if 0
|
|
||||||
CHECK_CHAN_BOUND(chan);
|
|
||||||
#endif
|
|
||||||
tmp = H32(tx_desc);
|
|
||||||
if (tmp < UINT_MAX) {
|
|
||||||
osi_writel((nveu32_t)tmp, (nveu8_t *)addr +
|
|
||||||
EQOS_DMA_CHX_TDLH(chan));
|
|
||||||
}
|
|
||||||
|
|
||||||
tmp = L32(tx_desc);
|
|
||||||
if (tmp < UINT_MAX) {
|
|
||||||
osi_writel((nveu32_t)tmp, (nveu8_t *)addr +
|
|
||||||
EQOS_DMA_CHX_TDLA(chan));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief eqos_update_tx_tailptr - Updates DMA Tx ring tail pointer.
|
|
||||||
*
|
|
||||||
* @note
|
|
||||||
* Algorithm:
|
|
||||||
* - Updates DMA Tx ring tail pointer for specific channel.
|
|
||||||
*
|
|
||||||
* @param[in] addr: Base address indicating the start of
|
|
||||||
* memory mapped IO region of the MAC.
|
|
||||||
* @param[in] chan: DMA Tx channel number.
|
|
||||||
* @param[in] tailptr: DMA Tx ring tail pointer.
|
|
||||||
*
|
|
||||||
*
|
|
||||||
* @pre
|
|
||||||
* - MAC needs to be out of reset and proper clocks need to be configured
|
|
||||||
* - DMA HW init need to be completed successfully, see osi_hw_dma_init
|
|
||||||
*
|
|
||||||
* @note
|
|
||||||
* API Group:
|
|
||||||
* - Initialization: No
|
|
||||||
* - Run time: Yes
|
|
||||||
* - De-initialization: No
|
|
||||||
*/
|
|
||||||
static void eqos_update_tx_tailptr(void *addr, nveu32_t chan,
|
|
||||||
nveu64_t tailptr)
|
|
||||||
{
|
|
||||||
nveu64_t tmp;
|
|
||||||
#if 0
|
|
||||||
CHECK_CHAN_BOUND(chan);
|
|
||||||
#endif
|
|
||||||
tmp = L32(tailptr);
|
|
||||||
if (tmp < UINT_MAX) {
|
|
||||||
osi_writel((nveu32_t)tmp, (nveu8_t *)addr +
|
|
||||||
EQOS_DMA_CHX_TDTP(chan));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief eqos_set_rx_ring_len - Set Rx channel ring length.
|
|
||||||
*
|
|
||||||
* @note
|
|
||||||
* Algorithm:
|
|
||||||
* - Sets DMA Rx channel ring length for specific DMA channel.
|
|
||||||
*
|
|
||||||
* @param[in] osi_dma: OSI DMA private data structure.
|
|
||||||
* @param[in] chan: DMA Rx channel number.
|
|
||||||
* @param[in] len: Length
|
|
||||||
*
|
|
||||||
* @note
|
|
||||||
* API Group:
|
|
||||||
* - Initialization: Yes
|
|
||||||
* - Run time: No
|
|
||||||
* - De-initialization: No
|
|
||||||
*/
|
|
||||||
static void eqos_set_rx_ring_len(struct osi_dma_priv_data *osi_dma,
|
|
||||||
nveu32_t chan,
|
|
||||||
nveu32_t len)
|
|
||||||
{
|
|
||||||
void *addr = osi_dma->base;
|
|
||||||
#if 0
|
|
||||||
CHECK_CHAN_BOUND(chan);
|
|
||||||
#endif
|
|
||||||
eqos_dma_safety_writel(osi_dma, len, (nveu8_t *)addr +
|
|
||||||
EQOS_DMA_CHX_RDRL(chan),
|
|
||||||
EQOS_DMA_CH0_RDRL_IDX + chan);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief eqos_set_rx_ring_start_addr - Set DMA Rx ring base address.
|
|
||||||
*
|
|
||||||
* @note
|
|
||||||
* Algorithm:
|
|
||||||
* - Sets DMA Rx channel ring base address.
|
|
||||||
*
|
|
||||||
* @param[in] addr: Base address indicating the start of
|
|
||||||
* memory mapped IO region of the MAC.
|
|
||||||
* @param[in] chan: DMA Rx channel number.
|
|
||||||
* @param[in] rx_desc: DMA Rx desc base address.
|
|
||||||
*
|
|
||||||
* @note
|
|
||||||
* API Group:
|
|
||||||
* - Initialization: Yes
|
|
||||||
* - Run time: No
|
|
||||||
* - De-initialization: No
|
|
||||||
*/
|
|
||||||
static void eqos_set_rx_ring_start_addr(void *addr, nveu32_t chan,
|
|
||||||
nveu64_t rx_desc)
|
|
||||||
{
|
|
||||||
nveu64_t tmp;
|
|
||||||
#if 0
|
|
||||||
CHECK_CHAN_BOUND(chan);
|
|
||||||
#endif
|
|
||||||
tmp = H32(rx_desc);
|
|
||||||
if (tmp < UINT_MAX) {
|
|
||||||
osi_writel((nveu32_t)tmp, (nveu8_t *)addr +
|
|
||||||
EQOS_DMA_CHX_RDLH(chan));
|
|
||||||
}
|
|
||||||
|
|
||||||
tmp = L32(rx_desc);
|
|
||||||
if (tmp < UINT_MAX) {
|
|
||||||
osi_writel((nveu32_t)tmp, (nveu8_t *)addr +
|
|
||||||
EQOS_DMA_CHX_RDLA(chan));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief eqos_update_rx_tailptr - Update Rx ring tail pointer
|
|
||||||
*
|
|
||||||
* @note
|
|
||||||
* Algorithm:
|
|
||||||
* - Updates DMA Rx channel tail pointer for specific channel.
|
|
||||||
*
|
|
||||||
* @param[in] addr: Base address indicating the start of
|
|
||||||
* memory mapped IO region of the MAC.
|
|
||||||
* @param[in] chan: DMA Rx channel number.
|
|
||||||
* @param[in] tailptr: Tail pointer
|
|
||||||
*
|
|
||||||
* @pre
|
|
||||||
* - MAC needs to be out of reset and proper clocks need to be configured
|
|
||||||
* - DMA HW init need to be completed successfully, see osi_hw_dma_init
|
|
||||||
*
|
|
||||||
* @note
|
|
||||||
* API Group:
|
|
||||||
* - Initialization: Yes
|
|
||||||
* - Run time: Yes
|
|
||||||
* - De-initialization: No
|
|
||||||
*/
|
|
||||||
static void eqos_update_rx_tailptr(void *addr, nveu32_t chan,
|
|
||||||
nveu64_t tailptr)
|
|
||||||
{
|
|
||||||
nveu64_t tmp;
|
|
||||||
#if 0
|
|
||||||
CHECK_CHAN_BOUND(chan);
|
|
||||||
#endif
|
|
||||||
tmp = L32(tailptr);
|
|
||||||
if (tmp < UINT_MAX) {
|
|
||||||
osi_writel((nveu32_t)tmp, (nveu8_t *)addr +
|
|
||||||
EQOS_DMA_CHX_RDTP(chan));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief eqos_start_dma - Start DMA.
|
|
||||||
*
|
|
||||||
* @note
|
|
||||||
* Algorithm:
|
|
||||||
* - Start Tx and Rx DMA for specific channel.
|
|
||||||
*
|
|
||||||
* @param[in] osi_dma: OSI DMA private data structure.
|
|
||||||
* @param[in] chan: DMA Tx/Rx channel number.
|
|
||||||
*
|
|
||||||
* @pre
|
|
||||||
* - MAC needs to be out of reset and proper clocks need to be configured
|
|
||||||
* - DMA HW init need to be completed successfully, see osi_hw_dma_init
|
|
||||||
*
|
|
||||||
* @note
|
|
||||||
* API Group:
|
|
||||||
* - Initialization: Yes
|
|
||||||
* - Run time: No
|
|
||||||
* - De-initialization: No
|
|
||||||
*/
|
|
||||||
static void eqos_start_dma(struct osi_dma_priv_data *osi_dma, nveu32_t chan)
|
|
||||||
{
|
|
||||||
nveu32_t val;
|
|
||||||
void *addr = osi_dma->base;
|
|
||||||
#if 0
|
|
||||||
CHECK_CHAN_BOUND(chan);
|
|
||||||
#endif
|
|
||||||
/* start Tx DMA */
|
|
||||||
val = osi_readla(osi_dma->osd,
|
|
||||||
(nveu8_t *)addr + EQOS_DMA_CHX_TX_CTRL(chan));
|
|
||||||
val |= OSI_BIT(0);
|
|
||||||
eqos_dma_safety_writel(osi_dma, val, (nveu8_t *)addr +
|
|
||||||
EQOS_DMA_CHX_TX_CTRL(chan),
|
|
||||||
EQOS_DMA_CH0_TX_CTRL_IDX + chan);
|
|
||||||
|
|
||||||
/* start Rx DMA */
|
|
||||||
val = osi_readla(osi_dma->osd,
|
|
||||||
(nveu8_t *)addr + EQOS_DMA_CHX_RX_CTRL(chan));
|
|
||||||
val |= OSI_BIT(0);
|
|
||||||
val &= ~OSI_BIT(31);
|
|
||||||
eqos_dma_safety_writel(osi_dma, val, (nveu8_t *)addr +
|
|
||||||
EQOS_DMA_CHX_RX_CTRL(chan),
|
|
||||||
EQOS_DMA_CH0_RX_CTRL_IDX + chan);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief eqos_stop_dma - Stop DMA.
|
|
||||||
*
|
|
||||||
* @note
|
|
||||||
* Algorithm:
|
|
||||||
* - Start Tx and Rx DMA for specific channel.
|
|
||||||
*
|
|
||||||
* @param[in] osi_dma: OSI DMA private data structure.
|
|
||||||
* @param[in] chan: DMA Tx/Rx channel number.
|
|
||||||
*
|
|
||||||
* @pre
|
|
||||||
* - MAC needs to be out of reset and proper clocks need to be configured
|
|
||||||
* - DMA HW init need to be completed successfully, see osi_hw_dma_init
|
|
||||||
*
|
|
||||||
* @note
|
|
||||||
* API Group:
|
|
||||||
* - Initialization: No
|
|
||||||
* - Run time: No
|
|
||||||
* - De-initialization: Yes
|
|
||||||
*/
|
|
||||||
static void eqos_stop_dma(struct osi_dma_priv_data *osi_dma, nveu32_t chan)
|
|
||||||
{
|
|
||||||
nveu32_t val;
|
|
||||||
void *addr = osi_dma->base;
|
|
||||||
#if 0
|
|
||||||
CHECK_CHAN_BOUND(chan);
|
|
||||||
#endif
|
|
||||||
/* stop Tx DMA */
|
|
||||||
val = osi_readla(osi_dma->osd,
|
|
||||||
(nveu8_t *)addr + EQOS_DMA_CHX_TX_CTRL(chan));
|
|
||||||
val &= ~OSI_BIT(0);
|
|
||||||
eqos_dma_safety_writel(osi_dma, val, (nveu8_t *)addr +
|
|
||||||
EQOS_DMA_CHX_TX_CTRL(chan),
|
|
||||||
EQOS_DMA_CH0_TX_CTRL_IDX + chan);
|
|
||||||
|
|
||||||
/* stop Rx DMA */
|
|
||||||
val = osi_readla(osi_dma->osd,
|
|
||||||
(nveu8_t *)addr + EQOS_DMA_CHX_RX_CTRL(chan));
|
|
||||||
val &= ~OSI_BIT(0);
|
|
||||||
val |= OSI_BIT(31);
|
|
||||||
eqos_dma_safety_writel(osi_dma, val, (nveu8_t *)addr +
|
|
||||||
EQOS_DMA_CHX_RX_CTRL(chan),
|
|
||||||
EQOS_DMA_CH0_RX_CTRL_IDX + chan);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief eqos_configure_dma_channel - Configure DMA channel
|
|
||||||
*
|
|
||||||
* @note
|
|
||||||
* Algorithm:
|
|
||||||
* - This takes care of configuring the below
|
|
||||||
* parameters for the DMA channel
|
|
||||||
* - Enabling DMA channel interrupts
|
|
||||||
* - Enable 8xPBL mode
|
|
||||||
* - Program Tx, Rx PBL
|
|
||||||
* - Enable TSO if HW supports
|
|
||||||
* - Program Rx Watchdog timer
|
|
||||||
*
|
|
||||||
* @param[in] chan: DMA channel number that need to be configured.
|
|
||||||
* @param[in] osi_dma: OSI DMA private data structure.
|
|
||||||
*
|
|
||||||
* @pre MAC has to be out of reset.
|
|
||||||
*
|
|
||||||
* @note
|
|
||||||
* API Group:
|
|
||||||
* - Initialization: Yes
|
|
||||||
* - Run time: No
|
|
||||||
* - De-initialization: No
|
|
||||||
*/
|
|
||||||
static void eqos_configure_dma_channel(nveu32_t chan,
|
|
||||||
struct osi_dma_priv_data *osi_dma)
|
|
||||||
{
|
|
||||||
nveu32_t value;
|
|
||||||
#if 0
|
|
||||||
CHECK_CHAN_BOUND(chan);
|
|
||||||
#endif
|
|
||||||
/* enable DMA channel interrupts */
|
|
||||||
/* Enable TIE and TBUE */
|
|
||||||
/* TIE - Transmit Interrupt Enable */
|
|
||||||
/* TBUE - Transmit Buffer Unavailable Enable */
|
|
||||||
/* RIE - Receive Interrupt Enable */
|
|
||||||
/* RBUE - Receive Buffer Unavailable Enable */
|
|
||||||
/* AIE - Abnormal Interrupt Summary Enable */
|
|
||||||
/* NIE - Normal Interrupt Summary Enable */
|
|
||||||
/* FBE - Fatal Bus Error Enable */
|
|
||||||
value = osi_readl((nveu8_t *)osi_dma->base +
|
|
||||||
EQOS_DMA_CHX_INTR_ENA(chan));
|
|
||||||
if (osi_dma->use_virtualization == OSI_DISABLE) {
|
|
||||||
value |= EQOS_DMA_CHX_INTR_TBUE |
|
|
||||||
EQOS_DMA_CHX_INTR_RBUE;
|
|
||||||
}
|
|
||||||
|
|
||||||
value |= EQOS_DMA_CHX_INTR_TIE | EQOS_DMA_CHX_INTR_RIE |
|
|
||||||
EQOS_DMA_CHX_INTR_FBEE | EQOS_DMA_CHX_INTR_AIE |
|
|
||||||
EQOS_DMA_CHX_INTR_NIE;
|
|
||||||
/* For multi-irqs to work nie needs to be disabled */
|
|
||||||
value &= ~(EQOS_DMA_CHX_INTR_NIE);
|
|
||||||
eqos_dma_safety_writel(osi_dma, value, (nveu8_t *)osi_dma->base +
|
|
||||||
EQOS_DMA_CHX_INTR_ENA(chan),
|
|
||||||
EQOS_DMA_CH0_INTR_ENA_IDX + chan);
|
|
||||||
|
|
||||||
/* Enable 8xPBL mode */
|
|
||||||
value = osi_readl((nveu8_t *)osi_dma->base +
|
|
||||||
EQOS_DMA_CHX_CTRL(chan));
|
|
||||||
value |= EQOS_DMA_CHX_CTRL_PBLX8;
|
|
||||||
eqos_dma_safety_writel(osi_dma, value, (nveu8_t *)osi_dma->base +
|
|
||||||
EQOS_DMA_CHX_CTRL(chan),
|
|
||||||
EQOS_DMA_CH0_CTRL_IDX + chan);
|
|
||||||
|
|
||||||
/* Configure DMA channel Transmit control register */
|
|
||||||
value = osi_readl((nveu8_t *)osi_dma->base +
|
|
||||||
EQOS_DMA_CHX_TX_CTRL(chan));
|
|
||||||
/* Enable OSF mode */
|
|
||||||
value |= EQOS_DMA_CHX_TX_CTRL_OSF;
|
|
||||||
/* TxPBL = 32*/
|
|
||||||
value |= EQOS_DMA_CHX_TX_CTRL_TXPBL_RECOMMENDED;
|
|
||||||
/* enable TSO by default if HW supports */
|
|
||||||
value |= EQOS_DMA_CHX_TX_CTRL_TSE;
|
|
||||||
|
|
||||||
eqos_dma_safety_writel(osi_dma, value, (nveu8_t *)osi_dma->base +
|
|
||||||
EQOS_DMA_CHX_TX_CTRL(chan),
|
|
||||||
EQOS_DMA_CH0_TX_CTRL_IDX + chan);
|
|
||||||
|
|
||||||
/* Configure DMA channel Receive control register */
|
|
||||||
/* Select Rx Buffer size. Needs to be rounded up to next multiple of
|
|
||||||
* bus width
|
|
||||||
*/
|
|
||||||
value = osi_readl((nveu8_t *)osi_dma->base +
|
|
||||||
EQOS_DMA_CHX_RX_CTRL(chan));
|
|
||||||
|
|
||||||
/* clear previous Rx buffer size */
|
|
||||||
value &= ~EQOS_DMA_CHX_RBSZ_MASK;
|
|
||||||
|
|
||||||
value |= (osi_dma->rx_buf_len << EQOS_DMA_CHX_RBSZ_SHIFT);
|
|
||||||
/* RXPBL = 12 */
|
|
||||||
value |= EQOS_DMA_CHX_RX_CTRL_RXPBL_RECOMMENDED;
|
|
||||||
eqos_dma_safety_writel(osi_dma, value, (nveu8_t *)osi_dma->base +
|
|
||||||
EQOS_DMA_CHX_RX_CTRL(chan),
|
|
||||||
EQOS_DMA_CH0_RX_CTRL_IDX + chan);
|
|
||||||
|
|
||||||
/* Set Receive Interrupt Watchdog Timer Count */
|
|
||||||
/* conversion of usec to RWIT value
|
|
||||||
* Eg: System clock is 125MHz, each clock cycle would then be 8ns
|
|
||||||
* For value 0x1 in RWT, device would wait for 512 clk cycles with
|
|
||||||
* RWTU as 0x1,
|
|
||||||
* ie, (8ns x 512) => 4.096us (rounding off to 4us)
|
|
||||||
* So formula with above values is,ret = usec/4
|
|
||||||
*/
|
|
||||||
if ((osi_dma->use_riwt == OSI_ENABLE) &&
|
|
||||||
(osi_dma->rx_riwt < UINT_MAX)) {
|
|
||||||
value = osi_readl((nveu8_t *)osi_dma->base +
|
|
||||||
EQOS_DMA_CHX_RX_WDT(chan));
|
|
||||||
/* Mask the RWT and RWTU value */
|
|
||||||
value &= ~(EQOS_DMA_CHX_RX_WDT_RWT_MASK |
|
|
||||||
EQOS_DMA_CHX_RX_WDT_RWTU_MASK);
|
|
||||||
/* Conversion of usec to Rx Interrupt Watchdog Timer Count */
|
|
||||||
value |= ((osi_dma->rx_riwt *
|
|
||||||
(EQOS_AXI_CLK_FREQ / OSI_ONE_MEGA_HZ)) /
|
|
||||||
EQOS_DMA_CHX_RX_WDT_RWTU) &
|
|
||||||
EQOS_DMA_CHX_RX_WDT_RWT_MASK;
|
|
||||||
value |= EQOS_DMA_CHX_RX_WDT_RWTU_512_CYCLE;
|
|
||||||
osi_writel(value, (nveu8_t *)osi_dma->base +
|
|
||||||
EQOS_DMA_CHX_RX_WDT(chan));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief eqos_init_dma_channel - DMA channel INIT
|
|
||||||
*
|
|
||||||
* @param[in] osi_dma: OSI DMA private data structure.
|
|
||||||
*
|
|
||||||
* @note
|
|
||||||
* API Group:
|
|
||||||
* - Initialization: Yes
|
|
||||||
* - Run time: No
|
|
||||||
* - De-initialization: No
|
|
||||||
*
|
|
||||||
* @retval 0 on success
|
|
||||||
* @retval -1 on failure.
|
|
||||||
*/
|
|
||||||
static nve32_t eqos_init_dma_channel(struct osi_dma_priv_data *osi_dma)
|
|
||||||
{
|
|
||||||
nveu32_t chinx;
|
|
||||||
|
|
||||||
eqos_dma_safety_init(osi_dma);
|
|
||||||
|
|
||||||
/* configure EQOS DMA channels */
|
|
||||||
for (chinx = 0; chinx < osi_dma->num_dma_chans; chinx++) {
|
|
||||||
eqos_configure_dma_channel(osi_dma->dma_chans[chinx], osi_dma);
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief eqos_set_rx_buf_len - Set Rx buffer length
|
|
||||||
* Sets the Rx buffer length based on the new MTU size set.
|
|
||||||
*
|
|
||||||
* @param[in, out] osi_dma: OSI DMA private data structure.
|
|
||||||
*
|
|
||||||
* @pre
|
|
||||||
* - MAC needs to be out of reset and proper clocks need to be configured
|
|
||||||
* - DMA HW init need to be completed successfully, see osi_hw_dma_init
|
|
||||||
* - osi_dma->mtu need to be filled with current MTU size <= 9K
|
|
||||||
*
|
|
||||||
* @note
|
|
||||||
* API Group:
|
|
||||||
* - Initialization: No
|
|
||||||
* - Run time: Yes
|
|
||||||
* - De-initialization: No
|
|
||||||
*/
|
|
||||||
static void eqos_set_rx_buf_len(struct osi_dma_priv_data *osi_dma)
|
|
||||||
{
|
|
||||||
nveu32_t rx_buf_len = 0U;
|
|
||||||
|
|
||||||
/* Add Ethernet header + VLAN header + NET IP align size to MTU */
|
|
||||||
if (osi_dma->mtu <= OSI_MAX_MTU_SIZE) {
|
|
||||||
rx_buf_len = osi_dma->mtu + OSI_ETH_HLEN + NV_VLAN_HLEN +
|
|
||||||
OSI_NET_IP_ALIGN;
|
|
||||||
} else {
|
|
||||||
rx_buf_len = OSI_MAX_MTU_SIZE + OSI_ETH_HLEN + NV_VLAN_HLEN +
|
|
||||||
OSI_NET_IP_ALIGN;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Buffer alignment */
|
|
||||||
osi_dma->rx_buf_len = ((rx_buf_len + (EQOS_AXI_BUS_WIDTH - 1U)) &
|
|
||||||
~(EQOS_AXI_BUS_WIDTH - 1U));
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifndef OSI_STRIPPED_LIB
|
|
||||||
/**
|
|
||||||
* @brief Read-validate HW registers for functional safety.
|
|
||||||
*
|
|
||||||
* @note
|
|
||||||
* Algorithm:
|
|
||||||
* - Reads pre-configured list of MAC/MTL configuration registers
|
|
||||||
* and compares with last written value for any modifications.
|
|
||||||
*
|
|
||||||
* @param[in] osi_dma: OSI DMA private data structure.
|
|
||||||
*
|
|
||||||
* @pre
|
|
||||||
* - MAC has to be out of reset.
|
|
||||||
* - osi_hw_dma_init has to be called. Internally this would initialize
|
|
||||||
* the safety_config (see osi_dma_priv_data) based on MAC version and
|
|
||||||
* which specific registers needs to be validated periodically.
|
|
||||||
* - Invoke this call if (osi_dma_priv_data->safety_config != OSI_NULL)
|
|
||||||
*
|
|
||||||
* @note
|
|
||||||
* API Group:
|
|
||||||
* - Initialization: No
|
|
||||||
* - Run time: Yes
|
|
||||||
* - De-initialization: No
|
|
||||||
*
|
|
||||||
* @retval 0 on success
|
|
||||||
* @retval -1 on failure.
|
|
||||||
*/
|
|
||||||
static nve32_t eqos_validate_dma_regs(struct osi_dma_priv_data *osi_dma)
|
|
||||||
{
|
|
||||||
struct dma_func_safety *config =
|
|
||||||
(struct dma_func_safety *)osi_dma->safety_config;
|
|
||||||
nveu32_t cur_val;
|
|
||||||
nveu32_t i;
|
|
||||||
|
|
||||||
osi_lock_irq_enabled(&config->dma_safety_lock);
|
|
||||||
for (i = EQOS_DMA_CH0_CTRL_IDX; i < EQOS_MAX_DMA_SAFETY_REGS; i++) {
|
|
||||||
if (config->reg_addr[i] == OSI_NULL) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
cur_val = osi_readl((nveu8_t *)config->reg_addr[i]);
|
|
||||||
cur_val &= config->reg_mask[i];
|
|
||||||
|
|
||||||
if (cur_val == config->reg_val[i]) {
|
|
||||||
continue;
|
|
||||||
} else {
|
|
||||||
/* Register content differs from what was written.
|
|
||||||
* Return error and let safety manager (NVGaurd etc.)
|
|
||||||
* take care of corrective action.
|
|
||||||
*/
|
|
||||||
osi_unlock_irq_enabled(&config->dma_safety_lock);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
osi_unlock_irq_enabled(&config->dma_safety_lock);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief eqos_config_slot - Configure slot Checking for DMA channel
|
* @brief eqos_config_slot - Configure slot Checking for DMA channel
|
||||||
@@ -895,94 +80,66 @@ static void eqos_config_slot(struct osi_dma_priv_data *osi_dma,
|
|||||||
EQOS_DMA_CHX_SLOT_CTRL(chan));
|
EQOS_DMA_CHX_SLOT_CTRL(chan));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif /* !OSI_STRIPPED_LIB */
|
|
||||||
|
|
||||||
|
#ifdef OSI_DEBUG
|
||||||
/**
|
/**
|
||||||
* @brief eqos_clear_vm_tx_intr - Handle VM Tx interrupt
|
* @brief Enable/disable debug interrupt
|
||||||
*
|
*
|
||||||
* @param[in] addr: MAC base address.
|
* @param[in] osi_dma: OSI DMA private data structure.
|
||||||
* @param[in] chan: DMA Tx channel number.
|
|
||||||
*
|
*
|
||||||
* Algorithm: Clear Tx interrupt source at DMA and wrapper level.
|
* Algorithm:
|
||||||
*
|
* - if osi_dma->ioctl_data.arg_u32 == OSI_ENABLE enable debug interrupt
|
||||||
* @note
|
* - else disable bebug inerrupts
|
||||||
* Dependencies: None.
|
|
||||||
* Protection: None.
|
|
||||||
* @retval None.
|
|
||||||
*/
|
*/
|
||||||
static void eqos_clear_vm_tx_intr(void *addr, nveu32_t chan)
|
static void eqos_debug_intr_config(struct osi_dma_priv_data *osi_dma)
|
||||||
{
|
{
|
||||||
#if 0
|
nveu32_t chinx;
|
||||||
CHECK_CHAN_BOUND(chan);
|
nveu32_t chan;
|
||||||
|
nveu32_t val;
|
||||||
|
nveu32_t enable = osi_dma->ioctl_data.arg_u32;
|
||||||
|
|
||||||
|
if (enable == OSI_ENABLE) {
|
||||||
|
for (chinx = 0; chinx < osi_dma->num_dma_chans; chinx++) {
|
||||||
|
chan = osi_dma->dma_chans[chinx];
|
||||||
|
val = osi_readl((nveu8_t *)osi_dma->base +
|
||||||
|
EQOS_DMA_CHX_INTR_ENA(chan));
|
||||||
|
|
||||||
|
val |= (EQOS_DMA_CHX_INTR_AIE |
|
||||||
|
EQOS_DMA_CHX_INTR_FBEE |
|
||||||
|
EQOS_DMA_CHX_INTR_RBUE |
|
||||||
|
EQOS_DMA_CHX_INTR_TBUE |
|
||||||
|
EQOS_DMA_CHX_INTR_NIE);
|
||||||
|
osi_writel(val, (nveu8_t *)osi_dma->base +
|
||||||
|
EQOS_DMA_CHX_INTR_ENA(chan));
|
||||||
|
}
|
||||||
|
|
||||||
|
} else {
|
||||||
|
for (chinx = 0; chinx < osi_dma->num_dma_chans; chinx++) {
|
||||||
|
chan = osi_dma->dma_chans[chinx];
|
||||||
|
val = osi_readl((nveu8_t *)osi_dma->base +
|
||||||
|
EQOS_DMA_CHX_INTR_ENA(chan));
|
||||||
|
val &= (~EQOS_DMA_CHX_INTR_AIE &
|
||||||
|
~EQOS_DMA_CHX_INTR_FBEE &
|
||||||
|
~EQOS_DMA_CHX_INTR_RBUE &
|
||||||
|
~EQOS_DMA_CHX_INTR_TBUE &
|
||||||
|
~EQOS_DMA_CHX_INTR_NIE);
|
||||||
|
osi_writel(val, (nveu8_t *)osi_dma->base +
|
||||||
|
EQOS_DMA_CHX_INTR_ENA(chan));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
osi_writel(EQOS_DMA_CHX_STATUS_CLEAR_TX,
|
|
||||||
(nveu8_t *)addr + EQOS_DMA_CHX_STATUS(chan));
|
|
||||||
osi_writel(EQOS_VIRT_INTR_CHX_STATUS_TX,
|
|
||||||
(nveu8_t *)addr + EQOS_VIRT_INTR_CHX_STATUS(chan));
|
|
||||||
|
|
||||||
eqos_disable_chan_tx_intr(addr, chan);
|
/*
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief eqos_clear_vm_rx_intr - Handle VM Rx interrupt
|
|
||||||
*
|
|
||||||
* @param[in] addr: MAC base address.
|
|
||||||
* @param[in] chan: DMA Rx channel number.
|
|
||||||
*
|
|
||||||
* Algorithm: Clear Rx interrupt source at DMA and wrapper level.
|
|
||||||
*
|
|
||||||
* @note
|
|
||||||
* Dependencies: None.
|
|
||||||
* Protection: None.
|
|
||||||
*
|
|
||||||
* @retval None.
|
|
||||||
*/
|
|
||||||
static void eqos_clear_vm_rx_intr(void *addr, nveu32_t chan)
|
|
||||||
{
|
|
||||||
#if 0
|
|
||||||
CHECK_CHAN_BOUND(chan);
|
|
||||||
#endif
|
|
||||||
osi_writel(EQOS_DMA_CHX_STATUS_CLEAR_RX,
|
|
||||||
(nveu8_t *)addr + EQOS_DMA_CHX_STATUS(chan));
|
|
||||||
osi_writel(EQOS_VIRT_INTR_CHX_STATUS_RX,
|
|
||||||
(nveu8_t *)addr + EQOS_VIRT_INTR_CHX_STATUS(chan));
|
|
||||||
|
|
||||||
eqos_disable_chan_rx_intr(addr, chan);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief eqos_get_dma_safety_config - EQOS get DMA safety configuration
|
|
||||||
*/
|
|
||||||
void *eqos_get_dma_safety_config(void)
|
|
||||||
{
|
|
||||||
return &eqos_dma_safety_config;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief eqos_init_dma_chan_ops - Initialize EQOS DMA operations.
|
* @brief eqos_init_dma_chan_ops - Initialize EQOS DMA operations.
|
||||||
*
|
*
|
||||||
* @param[in] ops: DMA channel operations pointer.
|
* @param[in] ops: DMA channel operations pointer.
|
||||||
*/
|
*/
|
||||||
void eqos_init_dma_chan_ops(struct dma_chan_ops *ops)
|
void eqos_init_dma_chan_ops(struct dma_chan_ops *ops)
|
||||||
{
|
{
|
||||||
ops->set_tx_ring_len = eqos_set_tx_ring_len;
|
|
||||||
ops->set_rx_ring_len = eqos_set_rx_ring_len;
|
|
||||||
ops->set_tx_ring_start_addr = eqos_set_tx_ring_start_addr;
|
|
||||||
ops->set_rx_ring_start_addr = eqos_set_rx_ring_start_addr;
|
|
||||||
ops->update_tx_tailptr = eqos_update_tx_tailptr;
|
|
||||||
ops->update_rx_tailptr = eqos_update_rx_tailptr;
|
|
||||||
ops->disable_chan_tx_intr = eqos_disable_chan_tx_intr;
|
|
||||||
ops->enable_chan_tx_intr = eqos_enable_chan_tx_intr;
|
|
||||||
ops->disable_chan_rx_intr = eqos_disable_chan_rx_intr;
|
|
||||||
ops->enable_chan_rx_intr = eqos_enable_chan_rx_intr;
|
|
||||||
ops->start_dma = eqos_start_dma;
|
|
||||||
ops->stop_dma = eqos_stop_dma;
|
|
||||||
ops->init_dma_channel = eqos_init_dma_channel;
|
|
||||||
ops->set_rx_buf_len = eqos_set_rx_buf_len;
|
|
||||||
#ifndef OSI_STRIPPED_LIB
|
|
||||||
ops->validate_regs = eqos_validate_dma_regs;
|
|
||||||
ops->config_slot = eqos_config_slot;
|
ops->config_slot = eqos_config_slot;
|
||||||
#endif /* !OSI_STRIPPED_LIB */
|
#ifdef OSI_DEBUG
|
||||||
ops->clear_vm_tx_intr = eqos_clear_vm_tx_intr;
|
ops->debug_intr_config = eqos_debug_intr_config;
|
||||||
ops->clear_vm_rx_intr = eqos_clear_vm_rx_intr;
|
#endif
|
||||||
}
|
}
|
||||||
|
#endif /* !OSI_STRIPPED_LIB */
|
||||||
|
|||||||
@@ -55,9 +55,6 @@
|
|||||||
#define EQOS_DMA_CHX_TDLH(x) ((0x0080U * (x)) + 0x1110U)
|
#define EQOS_DMA_CHX_TDLH(x) ((0x0080U * (x)) + 0x1110U)
|
||||||
#define EQOS_DMA_CHX_TDLA(x) ((0x0080U * (x)) + 0x1114U)
|
#define EQOS_DMA_CHX_TDLA(x) ((0x0080U * (x)) + 0x1114U)
|
||||||
#define EQOS_DMA_CHX_TDRL(x) ((0x0080U * (x)) + 0x112CU)
|
#define EQOS_DMA_CHX_TDRL(x) ((0x0080U * (x)) + 0x112CU)
|
||||||
#define EQOS_VIRT_INTR_APB_CHX_CNTRL(x) (0x8200U + ((x) * 4U))
|
|
||||||
#define EQOS_VIRT_INTR_CHX_STATUS(x) (0x8604U + ((x) * 8U))
|
|
||||||
#define EQOS_VIRT_INTR_CHX_CNTRL(x) (0x8600U + ((x) * 8U))
|
|
||||||
/** @} */
|
/** @} */
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -66,8 +63,6 @@
|
|||||||
* @brief Values defined for the DMA channel registers
|
* @brief Values defined for the DMA channel registers
|
||||||
* @{
|
* @{
|
||||||
*/
|
*/
|
||||||
#define EQOS_VIRT_INTR_CHX_STATUS_TX OSI_BIT(0)
|
|
||||||
#define EQOS_VIRT_INTR_CHX_STATUS_RX OSI_BIT(1)
|
|
||||||
#define EQOS_DMA_CHX_STATUS_TI OSI_BIT(0)
|
#define EQOS_DMA_CHX_STATUS_TI OSI_BIT(0)
|
||||||
#define EQOS_DMA_CHX_STATUS_RI OSI_BIT(6)
|
#define EQOS_DMA_CHX_STATUS_RI OSI_BIT(6)
|
||||||
#define EQOS_DMA_CHX_STATUS_NIS OSI_BIT(15)
|
#define EQOS_DMA_CHX_STATUS_NIS OSI_BIT(15)
|
||||||
@@ -76,21 +71,13 @@
|
|||||||
#define EQOS_DMA_CHX_STATUS_CLEAR_RX \
|
#define EQOS_DMA_CHX_STATUS_CLEAR_RX \
|
||||||
(EQOS_DMA_CHX_STATUS_RI | EQOS_DMA_CHX_STATUS_NIS)
|
(EQOS_DMA_CHX_STATUS_RI | EQOS_DMA_CHX_STATUS_NIS)
|
||||||
|
|
||||||
#define EQOS_VIRT_INTR_CHX_CNTRL_TX OSI_BIT(0)
|
#ifdef OSI_DEBUG
|
||||||
#define EQOS_VIRT_INTR_CHX_CNTRL_RX OSI_BIT(1)
|
|
||||||
|
|
||||||
#define EQOS_DMA_CHX_INTR_TIE OSI_BIT(0)
|
|
||||||
#define EQOS_DMA_CHX_INTR_TBUE OSI_BIT(2)
|
#define EQOS_DMA_CHX_INTR_TBUE OSI_BIT(2)
|
||||||
#define EQOS_DMA_CHX_INTR_RIE OSI_BIT(6)
|
|
||||||
#define EQOS_DMA_CHX_INTR_RBUE OSI_BIT(7)
|
#define EQOS_DMA_CHX_INTR_RBUE OSI_BIT(7)
|
||||||
#define EQOS_DMA_CHX_INTR_FBEE OSI_BIT(12)
|
#define EQOS_DMA_CHX_INTR_FBEE OSI_BIT(12)
|
||||||
#define EQOS_DMA_CHX_INTR_AIE OSI_BIT(14)
|
#define EQOS_DMA_CHX_INTR_AIE OSI_BIT(14)
|
||||||
#define EQOS_DMA_CHX_INTR_NIE OSI_BIT(15)
|
#define EQOS_DMA_CHX_INTR_NIE OSI_BIT(15)
|
||||||
#define EQOS_DMA_CHX_TX_CTRL_OSF OSI_BIT(4)
|
#endif
|
||||||
#define EQOS_DMA_CHX_TX_CTRL_TSE OSI_BIT(12)
|
|
||||||
#define EQOS_DMA_CHX_CTRL_PBLX8 OSI_BIT(16)
|
|
||||||
#define EQOS_DMA_CHX_RBSZ_MASK 0x7FFEU
|
|
||||||
#define EQOS_DMA_CHX_RBSZ_SHIFT 1U
|
|
||||||
#define EQOS_DMA_CHX_TX_CTRL_TXPBL_RECOMMENDED 0x200000U
|
#define EQOS_DMA_CHX_TX_CTRL_TXPBL_RECOMMENDED 0x200000U
|
||||||
#define EQOS_DMA_CHX_RX_CTRL_RXPBL_RECOMMENDED 0xC0000U
|
#define EQOS_DMA_CHX_RX_CTRL_RXPBL_RECOMMENDED 0xC0000U
|
||||||
#define EQOS_DMA_CHX_RX_WDT_RWT_MASK 0xFFU
|
#define EQOS_DMA_CHX_RX_WDT_RWT_MASK 0xFFU
|
||||||
@@ -101,100 +88,10 @@
|
|||||||
/* Below macros are used for periodic reg validation for functional safety.
|
/* Below macros are used for periodic reg validation for functional safety.
|
||||||
* HW register mask - to mask out reserved and self-clearing bits
|
* HW register mask - to mask out reserved and self-clearing bits
|
||||||
*/
|
*/
|
||||||
#define EQOS_DMA_CHX_CTRL_MASK 0x11D3FFFU
|
|
||||||
#define EQOS_DMA_CHX_TX_CTRL_MASK 0xF3F9010U
|
|
||||||
#define EQOS_DMA_CHX_RX_CTRL_MASK 0x8F3F7FE0U
|
|
||||||
#define EQOS_DMA_CHX_TDRL_MASK 0x3FFU
|
|
||||||
#define EQOS_DMA_CHX_RDRL_MASK 0x3FFU
|
|
||||||
#define EQOS_DMA_CHX_INTR_ENA_MASK 0xFFC7U
|
|
||||||
#ifndef OSI_STRIPPED_LIB
|
#ifndef OSI_STRIPPED_LIB
|
||||||
#define EQOS_DMA_CHX_SLOT_SIV_MASK 0xFFFU
|
#define EQOS_DMA_CHX_SLOT_SIV_MASK 0xFFFU
|
||||||
#define EQOS_DMA_CHX_SLOT_SIV_SHIFT 4U
|
#define EQOS_DMA_CHX_SLOT_SIV_SHIFT 4U
|
||||||
#define EQOS_DMA_CHX_SLOT_ESC 0x1U
|
#define EQOS_DMA_CHX_SLOT_ESC 0x1U
|
||||||
#endif /* !OSI_STRIPPED_LIB */
|
#endif /* !OSI_STRIPPED_LIB */
|
||||||
/* To add new registers to validate,append at end of below macro list and
|
|
||||||
* increment EQOS_MAX_DMA_SAFETY_REGS.
|
|
||||||
* Using macros instead of enum due to misra error.
|
|
||||||
*/
|
|
||||||
#define EQOS_DMA_CH0_CTRL_IDX 0U
|
|
||||||
#define EQOS_DMA_CH1_CTRL_IDX 1U
|
|
||||||
#define EQOS_DMA_CH2_CTRL_IDX 2U
|
|
||||||
#define EQOS_DMA_CH3_CTRL_IDX 3U
|
|
||||||
#define EQOS_DMA_CH4_CTRL_IDX 4U
|
|
||||||
#define EQOS_DMA_CH5_CTRL_IDX 5U
|
|
||||||
#define EQOS_DMA_CH6_CTRL_IDX 6U
|
|
||||||
#define EQOS_DMA_CH7_CTRL_IDX 7U
|
|
||||||
#define EQOS_DMA_CH0_TX_CTRL_IDX 8U
|
|
||||||
#define EQOS_DMA_CH1_TX_CTRL_IDX 9U
|
|
||||||
#define EQOS_DMA_CH2_TX_CTRL_IDX 10U
|
|
||||||
#define EQOS_DMA_CH3_TX_CTRL_IDX 11U
|
|
||||||
#define EQOS_DMA_CH4_TX_CTRL_IDX 12U
|
|
||||||
#define EQOS_DMA_CH5_TX_CTRL_IDX 13U
|
|
||||||
#define EQOS_DMA_CH6_TX_CTRL_IDX 14U
|
|
||||||
#define EQOS_DMA_CH7_TX_CTRL_IDX 15U
|
|
||||||
#define EQOS_DMA_CH0_RX_CTRL_IDX 16U
|
|
||||||
#define EQOS_DMA_CH1_RX_CTRL_IDX 17U
|
|
||||||
#define EQOS_DMA_CH2_RX_CTRL_IDX 18U
|
|
||||||
#define EQOS_DMA_CH3_RX_CTRL_IDX 19U
|
|
||||||
#define EQOS_DMA_CH4_RX_CTRL_IDX 20U
|
|
||||||
#define EQOS_DMA_CH5_RX_CTRL_IDX 21U
|
|
||||||
#define EQOS_DMA_CH6_RX_CTRL_IDX 22U
|
|
||||||
#define EQOS_DMA_CH7_RX_CTRL_IDX 23U
|
|
||||||
#define EQOS_DMA_CH0_TDRL_IDX 24U
|
|
||||||
#define EQOS_DMA_CH1_TDRL_IDX 25U
|
|
||||||
#define EQOS_DMA_CH2_TDRL_IDX 26U
|
|
||||||
#define EQOS_DMA_CH3_TDRL_IDX 27U
|
|
||||||
#define EQOS_DMA_CH4_TDRL_IDX 28U
|
|
||||||
#define EQOS_DMA_CH5_TDRL_IDX 29U
|
|
||||||
#define EQOS_DMA_CH6_TDRL_IDX 30U
|
|
||||||
#define EQOS_DMA_CH7_TDRL_IDX 31U
|
|
||||||
#define EQOS_DMA_CH0_RDRL_IDX 32U
|
|
||||||
#define EQOS_DMA_CH1_RDRL_IDX 33U
|
|
||||||
#define EQOS_DMA_CH2_RDRL_IDX 34U
|
|
||||||
#define EQOS_DMA_CH3_RDRL_IDX 35U
|
|
||||||
#define EQOS_DMA_CH4_RDRL_IDX 36U
|
|
||||||
#define EQOS_DMA_CH5_RDRL_IDX 37U
|
|
||||||
#define EQOS_DMA_CH6_RDRL_IDX 38U
|
|
||||||
#define EQOS_DMA_CH7_RDRL_IDX 39U
|
|
||||||
#define EQOS_DMA_CH0_INTR_ENA_IDX 40U
|
|
||||||
#define EQOS_DMA_CH1_INTR_ENA_IDX 41U
|
|
||||||
#define EQOS_DMA_CH2_INTR_ENA_IDX 42U
|
|
||||||
#define EQOS_DMA_CH3_INTR_ENA_IDX 43U
|
|
||||||
#define EQOS_DMA_CH4_INTR_ENA_IDX 44U
|
|
||||||
#define EQOS_DMA_CH5_INTR_ENA_IDX 45U
|
|
||||||
#define EQOS_DMA_CH6_INTR_ENA_IDX 46U
|
|
||||||
#define EQOS_DMA_CH7_INTR_ENA_IDX 47U
|
|
||||||
#define EQOS_MAX_DMA_SAFETY_REGS 48U
|
|
||||||
#define EQOS_AXI_BUS_WIDTH 0x10U
|
|
||||||
/** @} */
|
/** @} */
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief dma_func_safety - Struct used to store last written values of
|
|
||||||
* critical DMA HW registers.
|
|
||||||
*/
|
|
||||||
struct dma_func_safety {
|
|
||||||
/** Array of reg MMIO addresses (base EQoS + offset of reg) */
|
|
||||||
void *reg_addr[EQOS_MAX_DMA_SAFETY_REGS];
|
|
||||||
/** Array of bit-mask value of each corresponding reg
|
|
||||||
* (used to ignore self-clearing/reserved bits in reg) */
|
|
||||||
nveu32_t reg_mask[EQOS_MAX_DMA_SAFETY_REGS];
|
|
||||||
/** Array of value stored in each corresponding register */
|
|
||||||
nveu32_t reg_val[EQOS_MAX_DMA_SAFETY_REGS];
|
|
||||||
/** OSI lock variable used to protect writes to reg
|
|
||||||
* while validation is in-progress */
|
|
||||||
nveu32_t dma_safety_lock;
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief eqos_get_dma_safety_config - EQOS get DMA safety configuration
|
|
||||||
*
|
|
||||||
* @note
|
|
||||||
* API Group:
|
|
||||||
* - Initialization: Yes
|
|
||||||
* - Run time: No
|
|
||||||
* - De-initialization: No
|
|
||||||
*
|
|
||||||
* @returns Pointer to DMA safety configuration
|
|
||||||
*/
|
|
||||||
void *eqos_get_dma_safety_config(void);
|
|
||||||
#endif /* INCLUDED_EQOS_DMA_H */
|
#endif /* INCLUDED_EQOS_DMA_H */
|
||||||
|
|||||||
@@ -30,7 +30,17 @@
|
|||||||
* @{
|
* @{
|
||||||
*/
|
*/
|
||||||
#define HW_GLOBAL_DMA_STATUS 0x8700U
|
#define HW_GLOBAL_DMA_STATUS 0x8700U
|
||||||
|
#define VIRT_INTR_CHX_CNTRL(x) (0x8600U + ((x) * 8U))
|
||||||
|
#define VIRT_INTR_CHX_STATUS(x) (0x8604U + ((x) * 8U))
|
||||||
|
#define AXI_BUS_WIDTH 0x10U
|
||||||
|
#define DMA_CHX_INTR_TIE OSI_BIT(0)
|
||||||
|
#define DMA_CHX_INTR_RIE OSI_BIT(6)
|
||||||
|
#define DMA_CHX_CTRL_PBLX8 OSI_BIT(16)
|
||||||
|
#define DMA_CHX_TX_CTRL_OSP OSI_BIT(4)
|
||||||
|
#define DMA_CHX_TX_CTRL_TSE OSI_BIT(12)
|
||||||
|
#define DMA_CHX_RBSZ_MASK 0x7FFEU
|
||||||
|
#define DMA_CHX_RBSZ_SHIFT 1U
|
||||||
|
#define DMA_CHX_RX_WDT_RWT_MASK 0xFFU
|
||||||
/** @} */
|
/** @} */
|
||||||
|
|
||||||
#endif /* INCLUDED_HW_COMMON_H */
|
#endif /* INCLUDED_HW_COMMON_H */
|
||||||
|
|
||||||
|
|||||||
@@ -45,22 +45,26 @@
|
|||||||
#define RDES3_ERR_RE OSI_BIT(20)
|
#define RDES3_ERR_RE OSI_BIT(20)
|
||||||
#define RDES3_ERR_DRIB OSI_BIT(19)
|
#define RDES3_ERR_DRIB OSI_BIT(19)
|
||||||
#define RDES3_PKT_LEN 0x00007fffU
|
#define RDES3_PKT_LEN 0x00007fffU
|
||||||
#define RDES3_LT (OSI_BIT(16) | OSI_BIT(17) | OSI_BIT(18))
|
|
||||||
#define RDES3_LT_VT OSI_BIT(18)
|
|
||||||
#define RDES3_LT_DVT (OSI_BIT(16) | OSI_BIT(18))
|
|
||||||
#define RDES3_RS0V OSI_BIT(25)
|
|
||||||
#define RDES3_RS1V OSI_BIT(26)
|
#define RDES3_RS1V OSI_BIT(26)
|
||||||
#define RDES3_RSV OSI_BIT(26)
|
|
||||||
#define RDES0_OVT 0x0000FFFFU
|
|
||||||
#define RDES3_TSD OSI_BIT(6)
|
#define RDES3_TSD OSI_BIT(6)
|
||||||
#define RDES3_TSA OSI_BIT(4)
|
#define RDES3_TSA OSI_BIT(4)
|
||||||
#define RDES1_TSA OSI_BIT(14)
|
#define RDES1_TSA OSI_BIT(14)
|
||||||
#define RDES1_TD OSI_BIT(15)
|
#define RDES1_TD OSI_BIT(15)
|
||||||
|
#ifndef OSI_STRIPPED_LIB
|
||||||
|
#define RDES3_LT (OSI_BIT(16) | OSI_BIT(17) | OSI_BIT(18))
|
||||||
|
#define RDES3_LT_VT OSI_BIT(18)
|
||||||
|
#define RDES3_LT_DVT (OSI_BIT(16) | OSI_BIT(18))
|
||||||
|
#define RDES0_OVT 0x0000FFFFU
|
||||||
|
#define RDES3_RS0V OSI_BIT(25)
|
||||||
|
#define RDES3_RSV OSI_BIT(26)
|
||||||
#define RDES3_L34T 0x00F00000U
|
#define RDES3_L34T 0x00F00000U
|
||||||
#define RDES3_L34T_IPV4_TCP OSI_BIT(20)
|
#define RDES3_L34T_IPV4_TCP OSI_BIT(20)
|
||||||
#define RDES3_L34T_IPV4_UDP OSI_BIT(21)
|
#define RDES3_L34T_IPV4_UDP OSI_BIT(21)
|
||||||
#define RDES3_L34T_IPV6_TCP (OSI_BIT(23) | OSI_BIT(20))
|
#define RDES3_L34T_IPV6_TCP (OSI_BIT(23) | OSI_BIT(20))
|
||||||
#define RDES3_L34T_IPV6_UDP (OSI_BIT(23) | OSI_BIT(21))
|
#define RDES3_L34T_IPV6_UDP (OSI_BIT(23) | OSI_BIT(21))
|
||||||
|
#define RDES3_ELLT_CVLAN 0x90000U
|
||||||
|
#define RDES3_ERR_MGBE_CRC (OSI_BIT(16) | OSI_BIT(17))
|
||||||
|
#endif /* !OSI_STRIPPED_LIB */
|
||||||
|
|
||||||
#define RDES1_IPCE OSI_BIT(7)
|
#define RDES1_IPCE OSI_BIT(7)
|
||||||
#define RDES1_IPCB OSI_BIT(6)
|
#define RDES1_IPCB OSI_BIT(6)
|
||||||
@@ -73,7 +77,6 @@
|
|||||||
#define RDES3_ELLT 0xF0000U
|
#define RDES3_ELLT 0xF0000U
|
||||||
#define RDES3_ELLT_IPHE 0x50000U
|
#define RDES3_ELLT_IPHE 0x50000U
|
||||||
#define RDES3_ELLT_CSUM_ERR 0x60000U
|
#define RDES3_ELLT_CSUM_ERR 0x60000U
|
||||||
#define RDES3_ELLT_CVLAN 0x90000U
|
|
||||||
/** @} */
|
/** @} */
|
||||||
|
|
||||||
/** Error Summary bits for Received packet */
|
/** Error Summary bits for Received packet */
|
||||||
@@ -83,7 +86,6 @@
|
|||||||
|
|
||||||
/** MGBE error summary bits for Received packet */
|
/** MGBE error summary bits for Received packet */
|
||||||
#define RDES3_ES_MGBE 0x8000U
|
#define RDES3_ES_MGBE 0x8000U
|
||||||
#define RDES3_ERR_MGBE_CRC (OSI_BIT(16) | OSI_BIT(17))
|
|
||||||
/**
|
/**
|
||||||
* @addtogroup EQOS_TxDesc Transmit Descriptors bit fields
|
* @addtogroup EQOS_TxDesc Transmit Descriptors bit fields
|
||||||
*
|
*
|
||||||
|
|||||||
@@ -23,8 +23,6 @@
|
|||||||
# libnvethernetcl interface export
|
# libnvethernetcl interface export
|
||||||
#
|
#
|
||||||
###############################################################################
|
###############################################################################
|
||||||
osi_start_dma
|
|
||||||
osi_stop_dma
|
|
||||||
osi_get_refill_rx_desc_cnt
|
osi_get_refill_rx_desc_cnt
|
||||||
osi_rx_dma_desc_init
|
osi_rx_dma_desc_init
|
||||||
osi_set_rx_buf_len
|
osi_set_rx_buf_len
|
||||||
|
|||||||
39
osi/dma/libnvethernetcl_safety.export
Normal file
39
osi/dma/libnvethernetcl_safety.export
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
################################### tell Emacs this is a -*- makefile-gmake -*-
|
||||||
|
#
|
||||||
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
|
||||||
|
#
|
||||||
|
# Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
|
# copy of this software and associated documentation files (the "Software"),
|
||||||
|
# to deal in the Software without restriction, including without limitation
|
||||||
|
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||||
|
# and/or sell copies of the Software, and to permit persons to whom the
|
||||||
|
# Software is furnished to do so, subject to the following conditions:
|
||||||
|
#
|
||||||
|
# The above copyright notice and this permission notice shall be included in
|
||||||
|
# all copies or substantial portions of the Software.
|
||||||
|
#
|
||||||
|
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||||
|
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||||
|
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||||
|
# DEALINGS IN THE SOFTWARE.
|
||||||
|
#
|
||||||
|
# libnvethernetcl safety interface export
|
||||||
|
#
|
||||||
|
###############################################################################
|
||||||
|
osi_get_refill_rx_desc_cnt
|
||||||
|
osi_rx_dma_desc_init
|
||||||
|
osi_set_rx_buf_len
|
||||||
|
osi_hw_transmit
|
||||||
|
osi_process_tx_completions
|
||||||
|
osi_process_rx_completions
|
||||||
|
osi_hw_dma_init
|
||||||
|
osi_hw_dma_deinit
|
||||||
|
osi_init_dma_ops
|
||||||
|
osi_dma_get_systime_from_mac
|
||||||
|
osi_is_mac_enabled
|
||||||
|
osi_get_dma
|
||||||
|
osi_handle_dma_intr
|
||||||
|
osi_get_global_dma_status
|
||||||
@@ -24,6 +24,7 @@
|
|||||||
#include "hw_desc.h"
|
#include "hw_desc.h"
|
||||||
#include "mgbe_desc.h"
|
#include "mgbe_desc.h"
|
||||||
|
|
||||||
|
#ifndef OSI_STRIPPED_LIB
|
||||||
/**
|
/**
|
||||||
* @brief mgbe_get_rx_vlan - Get Rx VLAN from descriptor
|
* @brief mgbe_get_rx_vlan - Get Rx VLAN from descriptor
|
||||||
*
|
*
|
||||||
@@ -94,34 +95,6 @@ static inline void mgbe_update_rx_err_stats(struct osi_rx_desc *rx_desc,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief mgbe_get_rx_csum - Get the Rx checksum from descriptor if valid
|
|
||||||
*
|
|
||||||
* Algorithm:
|
|
||||||
* 1) Check if the descriptor has any checksum validation errors.
|
|
||||||
* 2) If none, set a per packet context flag indicating no err in
|
|
||||||
* Rx checksum
|
|
||||||
* 3) The OSD layer will mark the packet appropriately to skip
|
|
||||||
* IP/TCP/UDP checksum validation in software based on whether
|
|
||||||
* COE is enabled for the device.
|
|
||||||
*
|
|
||||||
* @param[in] rx_desc: Rx descriptor
|
|
||||||
* @param[in] rx_pkt_cx: Per-Rx packet context structure
|
|
||||||
*/
|
|
||||||
static void mgbe_get_rx_csum(struct osi_rx_desc *rx_desc,
|
|
||||||
struct osi_rx_pkt_cx *rx_pkt_cx)
|
|
||||||
{
|
|
||||||
unsigned int ellt = rx_desc->rdes3 & RDES3_ELLT;
|
|
||||||
|
|
||||||
/* Always include either checksum none/unnecessary
|
|
||||||
* depending on status fields in desc.
|
|
||||||
* Hence no need to explicitly add OSI_PKT_CX_CSUM flag.
|
|
||||||
*/
|
|
||||||
if ((ellt != RDES3_ELLT_IPHE) && (ellt != RDES3_ELLT_CSUM_ERR)) {
|
|
||||||
rx_pkt_cx->rxcsum |= OSI_CHECKSUM_UNNECESSARY;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief mgbe_get_rx_hash - Get Rx packet hash from descriptor if valid
|
* @brief mgbe_get_rx_hash - Get Rx packet hash from descriptor if valid
|
||||||
*
|
*
|
||||||
@@ -157,6 +130,58 @@ static void mgbe_get_rx_hash(struct osi_rx_desc *rx_desc,
|
|||||||
rx_pkt_cx->rx_hash = rx_desc->rdes1;
|
rx_pkt_cx->rx_hash = rx_desc->rdes1;
|
||||||
rx_pkt_cx->flags |= OSI_PKT_CX_RSS;
|
rx_pkt_cx->flags |= OSI_PKT_CX_RSS;
|
||||||
}
|
}
|
||||||
|
#endif /* !OSI_STRIPPED_LIB */
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief mgbe_get_rx_csum - Get the Rx checksum from descriptor if valid
|
||||||
|
*
|
||||||
|
* Algorithm:
|
||||||
|
* 1) Check if the descriptor has any checksum validation errors.
|
||||||
|
* 2) If none, set a per packet context flag indicating no err in
|
||||||
|
* Rx checksum
|
||||||
|
* 3) The OSD layer will mark the packet appropriately to skip
|
||||||
|
* IP/TCP/UDP checksum validation in software based on whether
|
||||||
|
* COE is enabled for the device.
|
||||||
|
*
|
||||||
|
* @param[in] rx_desc: Rx descriptor
|
||||||
|
* @param[in] rx_pkt_cx: Per-Rx packet context structure
|
||||||
|
*/
|
||||||
|
static void mgbe_get_rx_csum(const struct osi_rx_desc *const rx_desc,
|
||||||
|
struct osi_rx_pkt_cx *rx_pkt_cx)
|
||||||
|
{
|
||||||
|
nveu32_t ellt = rx_desc->rdes3 & RDES3_ELLT;
|
||||||
|
nveu32_t pkt_type;
|
||||||
|
|
||||||
|
/* Always include either checksum none/unnecessary
|
||||||
|
* depending on status fields in desc.
|
||||||
|
* Hence no need to explicitly add OSI_PKT_CX_CSUM flag.
|
||||||
|
*/
|
||||||
|
if ((ellt != RDES3_ELLT_IPHE) && (ellt != RDES3_ELLT_CSUM_ERR)) {
|
||||||
|
rx_pkt_cx->rxcsum |= OSI_CHECKSUM_UNNECESSARY;
|
||||||
|
}
|
||||||
|
|
||||||
|
rx_pkt_cx->rxcsum |= OSI_CHECKSUM_IPv4;
|
||||||
|
if (ellt == RDES3_ELLT_IPHE) {
|
||||||
|
rx_pkt_cx->rxcsum |= OSI_CHECKSUM_IPv4_BAD;
|
||||||
|
}
|
||||||
|
|
||||||
|
pkt_type = rx_desc->rdes3 & MGBE_RDES3_PT_MASK;
|
||||||
|
if (pkt_type == MGBE_RDES3_PT_IPV4_TCP) {
|
||||||
|
rx_pkt_cx->rxcsum |= OSI_CHECKSUM_TCPv4;
|
||||||
|
} else if (pkt_type == MGBE_RDES3_PT_IPV4_UDP) {
|
||||||
|
rx_pkt_cx->rxcsum |= OSI_CHECKSUM_UDPv4;
|
||||||
|
} else if (pkt_type == MGBE_RDES3_PT_IPV6_TCP) {
|
||||||
|
rx_pkt_cx->rxcsum |= OSI_CHECKSUM_TCPv6;
|
||||||
|
} else if (pkt_type == MGBE_RDES3_PT_IPV6_UDP) {
|
||||||
|
rx_pkt_cx->rxcsum |= OSI_CHECKSUM_UDPv6;
|
||||||
|
} else {
|
||||||
|
/* Do nothing */
|
||||||
|
}
|
||||||
|
|
||||||
|
if (ellt == RDES3_ELLT_CSUM_ERR) {
|
||||||
|
rx_pkt_cx->rxcsum |= OSI_CHECKSUM_TCP_UDP_BAD;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief mgbe_get_rx_hwstamp - Get Rx HW Time stamp
|
* @brief mgbe_get_rx_hwstamp - Get Rx HW Time stamp
|
||||||
@@ -174,15 +199,17 @@ static void mgbe_get_rx_hash(struct osi_rx_desc *rx_desc,
|
|||||||
* @retval -1 if TimeStamp is not available
|
* @retval -1 if TimeStamp is not available
|
||||||
* @retval 0 if TimeStamp is available.
|
* @retval 0 if TimeStamp is available.
|
||||||
*/
|
*/
|
||||||
static int mgbe_get_rx_hwstamp(struct osi_dma_priv_data *osi_dma,
|
static nve32_t mgbe_get_rx_hwstamp(const struct osi_dma_priv_data *const osi_dma,
|
||||||
struct osi_rx_desc *rx_desc,
|
const struct osi_rx_desc *const rx_desc,
|
||||||
struct osi_rx_desc *context_desc,
|
const struct osi_rx_desc *const context_desc,
|
||||||
struct osi_rx_pkt_cx *rx_pkt_cx)
|
struct osi_rx_pkt_cx *rx_pkt_cx)
|
||||||
{
|
{
|
||||||
int retry;
|
nve32_t ret = 0;
|
||||||
|
nve32_t retry;
|
||||||
|
|
||||||
if ((rx_desc->rdes3 & RDES3_CDA) != RDES3_CDA) {
|
if ((rx_desc->rdes3 & RDES3_CDA) != RDES3_CDA) {
|
||||||
return -1;
|
ret = -1;
|
||||||
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (retry = 0; retry < 10; retry++) {
|
for (retry = 0; retry < 10; retry++) {
|
||||||
@@ -193,7 +220,8 @@ static int mgbe_get_rx_hwstamp(struct osi_dma_priv_data *osi_dma,
|
|||||||
if ((context_desc->rdes0 == OSI_INVALID_VALUE) &&
|
if ((context_desc->rdes0 == OSI_INVALID_VALUE) &&
|
||||||
(context_desc->rdes1 == OSI_INVALID_VALUE)) {
|
(context_desc->rdes1 == OSI_INVALID_VALUE)) {
|
||||||
/* Invalid time stamp */
|
/* Invalid time stamp */
|
||||||
return -1;
|
ret = -1;
|
||||||
|
goto fail;
|
||||||
}
|
}
|
||||||
/* Update rx pkt context flags to indicate PTP */
|
/* Update rx pkt context flags to indicate PTP */
|
||||||
rx_pkt_cx->flags |= OSI_PKT_CX_PTP;
|
rx_pkt_cx->flags |= OSI_PKT_CX_PTP;
|
||||||
@@ -207,24 +235,27 @@ static int mgbe_get_rx_hwstamp(struct osi_dma_priv_data *osi_dma,
|
|||||||
|
|
||||||
if (retry == 10) {
|
if (retry == 10) {
|
||||||
/* Timed out waiting for Rx timestamp */
|
/* Timed out waiting for Rx timestamp */
|
||||||
return -1;
|
ret = -1;
|
||||||
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
rx_pkt_cx->ns = context_desc->rdes0 +
|
rx_pkt_cx->ns = context_desc->rdes0 +
|
||||||
(OSI_NSEC_PER_SEC * context_desc->rdes1);
|
(OSI_NSEC_PER_SEC * context_desc->rdes1);
|
||||||
if (rx_pkt_cx->ns < context_desc->rdes0) {
|
if (rx_pkt_cx->ns < context_desc->rdes0) {
|
||||||
/* Will not hit this case */
|
ret = -1;
|
||||||
return -1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
fail:
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
void mgbe_init_desc_ops(struct desc_ops *d_ops)
|
void mgbe_init_desc_ops(struct desc_ops *p_dops)
|
||||||
{
|
{
|
||||||
d_ops->get_rx_csum = mgbe_get_rx_csum;
|
#ifndef OSI_STRIPPED_LIB
|
||||||
d_ops->update_rx_err_stats = mgbe_update_rx_err_stats;
|
p_dops->update_rx_err_stats = mgbe_update_rx_err_stats;
|
||||||
d_ops->get_rx_vlan = mgbe_get_rx_vlan;
|
p_dops->get_rx_vlan = mgbe_get_rx_vlan;
|
||||||
d_ops->get_rx_hash = mgbe_get_rx_hash;
|
p_dops->get_rx_hash = mgbe_get_rx_hash;
|
||||||
d_ops->get_rx_hwstamp = mgbe_get_rx_hwstamp;
|
#endif /* !OSI_STRIPPED_LIB */
|
||||||
|
p_dops->get_rx_csum = mgbe_get_rx_csum;
|
||||||
|
p_dops->get_rx_hwstamp = mgbe_get_rx_hwstamp;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -23,6 +23,7 @@
|
|||||||
#ifndef MGBE_DESC_H_
|
#ifndef MGBE_DESC_H_
|
||||||
#define MGBE_DESC_H_
|
#define MGBE_DESC_H_
|
||||||
|
|
||||||
|
#ifndef OSI_STRIPPED_LIB
|
||||||
/**
|
/**
|
||||||
* @addtogroup MGBE MAC FRP Stats.
|
* @addtogroup MGBE MAC FRP Stats.
|
||||||
*
|
*
|
||||||
@@ -32,6 +33,20 @@
|
|||||||
#define MGBE_RDES2_FRPSM OSI_BIT(10)
|
#define MGBE_RDES2_FRPSM OSI_BIT(10)
|
||||||
#define MGBE_RDES3_FRPSL OSI_BIT(14)
|
#define MGBE_RDES3_FRPSL OSI_BIT(14)
|
||||||
/** @} */
|
/** @} */
|
||||||
|
#endif /* !OSI_STRIPPED_LIB */
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @addtogroup MGBE RDESC bits.
|
||||||
|
*
|
||||||
|
* @brief Values defined for the MGBE rx descriptor bit fields
|
||||||
|
* @{
|
||||||
|
*/
|
||||||
|
|
||||||
|
#define MGBE_RDES3_PT_MASK (OSI_BIT(20) | OSI_BIT(21) | OSI_BIT(22) | OSI_BIT(23))
|
||||||
|
#define MGBE_RDES3_PT_IPV4_TCP OSI_BIT(20)
|
||||||
|
#define MGBE_RDES3_PT_IPV4_UDP OSI_BIT(21)
|
||||||
|
#define MGBE_RDES3_PT_IPV6_TCP (OSI_BIT(20) | OSI_BIT(23))
|
||||||
|
#define MGBE_RDES3_PT_IPV6_UDP (OSI_BIT(21) | OSI_BIT(23))
|
||||||
|
/** @} */
|
||||||
|
|
||||||
#endif /* MGBE_DESC_H_ */
|
#endif /* MGBE_DESC_H_ */
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -20,664 +20,12 @@
|
|||||||
* DEALINGS IN THE SOFTWARE.
|
* DEALINGS IN THE SOFTWARE.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#ifndef OSI_STRIPPED_LIB
|
||||||
#include "../osi/common/common.h"
|
#include "../osi/common/common.h"
|
||||||
#include <osi_common.h>
|
#include <osi_common.h>
|
||||||
#include "mgbe_dma.h"
|
#include "mgbe_dma.h"
|
||||||
#include "dma_local.h"
|
#include "dma_local.h"
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief mgbe_disable_chan_tx_intr - Disables DMA Tx channel interrupts.
|
|
||||||
*
|
|
||||||
* @param[in] addr: Base address indicating the start of
|
|
||||||
* memory mapped IO region of the MAC.
|
|
||||||
* @param[in] chan: DMA Tx channel number.
|
|
||||||
*
|
|
||||||
* @note 1) MAC needs to be out of reset and proper clocks need to be configured
|
|
||||||
* 2) DMA HW init need to be completed successfully, see osi_hw_dma_init
|
|
||||||
* 3) Mapping of physical IRQ line to DMA channel need to be maintained at
|
|
||||||
* OSDependent layer and pass corresponding channel number.
|
|
||||||
*/
|
|
||||||
static void mgbe_disable_chan_tx_intr(void *addr, nveu32_t chan)
|
|
||||||
{
|
|
||||||
nveu32_t cntrl;
|
|
||||||
#if 0
|
|
||||||
MGBE_CHECK_CHAN_BOUND(chan);
|
|
||||||
#endif
|
|
||||||
cntrl = osi_readl((nveu8_t *)addr +
|
|
||||||
MGBE_VIRT_INTR_CHX_CNTRL(chan));
|
|
||||||
cntrl &= ~MGBE_VIRT_INTR_CHX_CNTRL_TX;
|
|
||||||
osi_writel(cntrl, (nveu8_t *)addr +
|
|
||||||
MGBE_VIRT_INTR_CHX_CNTRL(chan));
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief mgbe_enable_chan_tx_intr - Enable Tx channel interrupts.
|
|
||||||
*
|
|
||||||
* @param[in] addr: Base address indicating the start of
|
|
||||||
* memory mapped IO region of the MAC.
|
|
||||||
* @param[in] chan: DMA Tx channel number.
|
|
||||||
*
|
|
||||||
* @note 1) MAC needs to be out of reset and proper clocks need to be configured
|
|
||||||
* 2) DMA HW init need to be completed successfully, see osi_hw_dma_init
|
|
||||||
* 3) Mapping of physical IRQ line to DMA channel need to be maintained at
|
|
||||||
* OSDependent layer and pass corresponding channel number.
|
|
||||||
*/
|
|
||||||
static void mgbe_enable_chan_tx_intr(void *addr, nveu32_t chan)
|
|
||||||
{
|
|
||||||
nveu32_t cntrl;
|
|
||||||
#if 0
|
|
||||||
MGBE_CHECK_CHAN_BOUND(chan);
|
|
||||||
#endif
|
|
||||||
cntrl = osi_readl((nveu8_t *)addr +
|
|
||||||
MGBE_VIRT_INTR_CHX_CNTRL(chan));
|
|
||||||
cntrl |= MGBE_VIRT_INTR_CHX_CNTRL_TX;
|
|
||||||
osi_writel(cntrl, (nveu8_t *)addr +
|
|
||||||
MGBE_VIRT_INTR_CHX_CNTRL(chan));
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief mgbe_disable_chan_rx_intr - Disable Rx channel interrupts.
|
|
||||||
*
|
|
||||||
* @param[in] addr: Base address indicating the start of
|
|
||||||
* memory mapped IO region of the MAC.
|
|
||||||
* @param[in] chan: DMA Rx channel number.
|
|
||||||
*
|
|
||||||
* @note 1) MAC needs to be out of reset and proper clocks need to be configured
|
|
||||||
* 2) DMA HW init need to be completed successfully, see osi_hw_dma_init
|
|
||||||
* 3) Mapping of physical IRQ line to DMA channel need to be maintained at
|
|
||||||
* OSDependent layer and pass corresponding channel number.
|
|
||||||
*/
|
|
||||||
static void mgbe_disable_chan_rx_intr(void *addr, nveu32_t chan)
|
|
||||||
{
|
|
||||||
nveu32_t cntrl;
|
|
||||||
#if 0
|
|
||||||
MGBE_CHECK_CHAN_BOUND(chan);
|
|
||||||
#endif
|
|
||||||
cntrl = osi_readl((nveu8_t *)addr +
|
|
||||||
MGBE_VIRT_INTR_CHX_CNTRL(chan));
|
|
||||||
cntrl &= ~MGBE_VIRT_INTR_CHX_CNTRL_RX;
|
|
||||||
osi_writel(cntrl, (nveu8_t *)addr +
|
|
||||||
MGBE_VIRT_INTR_CHX_CNTRL(chan));
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief mgbe_enable_chan_rx_intr - Enable Rx channel interrupts.
|
|
||||||
*
|
|
||||||
* @param[in] addr: Base address indicating the start of
|
|
||||||
* memory mapped IO region of the MAC.
|
|
||||||
* @param[in] chan: DMA Rx channel number.
|
|
||||||
*
|
|
||||||
* @note 1) MAC needs to be out of reset and proper clocks need to be configured
|
|
||||||
* 2) DMA HW init need to be completed successfully, see osi_hw_dma_init
|
|
||||||
*/
|
|
||||||
static void mgbe_enable_chan_rx_intr(void *addr, nveu32_t chan)
|
|
||||||
{
|
|
||||||
nveu32_t cntrl;
|
|
||||||
#if 0
|
|
||||||
MGBE_CHECK_CHAN_BOUND(chan);
|
|
||||||
#endif
|
|
||||||
cntrl = osi_readl((nveu8_t *)addr +
|
|
||||||
MGBE_VIRT_INTR_CHX_CNTRL(chan));
|
|
||||||
cntrl |= MGBE_VIRT_INTR_CHX_CNTRL_RX;
|
|
||||||
osi_writel(cntrl, (nveu8_t *)addr +
|
|
||||||
MGBE_VIRT_INTR_CHX_CNTRL(chan));
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief mgbe_set_tx_ring_len - Set DMA Tx ring length.
|
|
||||||
*
|
|
||||||
* Algorithm: Set DMA Tx channel ring length for specific channel.
|
|
||||||
*
|
|
||||||
* @param[in] osi_dma: OSI DMA data structure.
|
|
||||||
* @param[in] chan: DMA Tx channel number.
|
|
||||||
* @param[in] len: Length.
|
|
||||||
*/
|
|
||||||
static void mgbe_set_tx_ring_len(struct osi_dma_priv_data *osi_dma,
|
|
||||||
nveu32_t chan,
|
|
||||||
nveu32_t len)
|
|
||||||
{
|
|
||||||
void *addr = osi_dma->base;
|
|
||||||
nveu32_t value;
|
|
||||||
#if 0
|
|
||||||
MGBE_CHECK_CHAN_BOUND(chan);
|
|
||||||
#endif
|
|
||||||
value = osi_readl((nveu8_t *)addr + MGBE_DMA_CHX_TX_CNTRL2(chan));
|
|
||||||
value |= (len & MGBE_DMA_RING_LENGTH_MASK);
|
|
||||||
osi_writel(value, (nveu8_t *)addr + MGBE_DMA_CHX_TX_CNTRL2(chan));
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief mgbe_set_tx_ring_start_addr - Set DMA Tx ring base address.
|
|
||||||
*
|
|
||||||
* Algorithm: Sets DMA Tx ring base address for specific channel.
|
|
||||||
*
|
|
||||||
* @param[in] addr: Base address indicating the start of
|
|
||||||
* memory mapped IO region of the MAC.
|
|
||||||
* @param[in] chan: DMA Tx channel number.
|
|
||||||
* @param[in] tx_desc: Tx desc base addess.
|
|
||||||
*/
|
|
||||||
static void mgbe_set_tx_ring_start_addr(void *addr, nveu32_t chan,
|
|
||||||
nveu64_t tx_desc)
|
|
||||||
{
|
|
||||||
nveu64_t temp;
|
|
||||||
#if 0
|
|
||||||
MGBE_CHECK_CHAN_BOUND(chan);
|
|
||||||
#endif
|
|
||||||
temp = H32(tx_desc);
|
|
||||||
if (temp < UINT_MAX) {
|
|
||||||
osi_writel((nveu32_t)temp, (nveu8_t *)addr +
|
|
||||||
MGBE_DMA_CHX_TDLH(chan));
|
|
||||||
}
|
|
||||||
|
|
||||||
temp = L32(tx_desc);
|
|
||||||
if (temp < UINT_MAX) {
|
|
||||||
osi_writel((nveu32_t)temp, (nveu8_t *)addr +
|
|
||||||
MGBE_DMA_CHX_TDLA(chan));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief mgbe_update_tx_tailptr - Updates DMA Tx ring tail pointer.
|
|
||||||
*
|
|
||||||
* Algorithm: Updates DMA Tx ring tail pointer for specific channel.
|
|
||||||
*
|
|
||||||
* @param[in] addr: Base address indicating the start of
|
|
||||||
* memory mapped IO region of the MAC.
|
|
||||||
* @param[in] chan: DMA Tx channel number.
|
|
||||||
* @param[in] tailptr: DMA Tx ring tail pointer.
|
|
||||||
*
|
|
||||||
*
|
|
||||||
* @note 1) MAC needs to be out of reset and proper clocks need to be configured
|
|
||||||
* 2) DMA HW init need to be completed successfully, see osi_hw_dma_init
|
|
||||||
*/
|
|
||||||
static void mgbe_update_tx_tailptr(void *addr, nveu32_t chan,
|
|
||||||
nveu64_t tailptr)
|
|
||||||
{
|
|
||||||
nveu64_t temp;
|
|
||||||
#if 0
|
|
||||||
MGBE_CHECK_CHAN_BOUND(chan);
|
|
||||||
#endif
|
|
||||||
temp = L32(tailptr);
|
|
||||||
if (temp < UINT_MAX) {
|
|
||||||
osi_writel((nveu32_t)temp, (nveu8_t *)addr +
|
|
||||||
MGBE_DMA_CHX_TDTLP(chan));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief mgbe_set_rx_ring_len - Set Rx channel ring length.
|
|
||||||
*
|
|
||||||
* Algorithm: Sets DMA Rx channel ring length for specific DMA channel.
|
|
||||||
*
|
|
||||||
* @param[in] osi_dma: OSI DMA data structure.
|
|
||||||
* @param[in] chan: DMA Rx channel number.
|
|
||||||
* @param[in] len: Length
|
|
||||||
*/
|
|
||||||
static void mgbe_set_rx_ring_len(struct osi_dma_priv_data *osi_dma,
|
|
||||||
nveu32_t chan,
|
|
||||||
nveu32_t len)
|
|
||||||
{
|
|
||||||
void *addr = osi_dma->base;
|
|
||||||
nveu32_t value;
|
|
||||||
#if 0
|
|
||||||
MGBE_CHECK_CHAN_BOUND(chan);
|
|
||||||
#endif
|
|
||||||
value = osi_readl((nveu8_t *)addr + MGBE_DMA_CHX_RX_CNTRL2(chan));
|
|
||||||
value |= (len & MGBE_DMA_RING_LENGTH_MASK);
|
|
||||||
osi_writel(value, (nveu8_t *)addr + MGBE_DMA_CHX_RX_CNTRL2(chan));
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief mgbe_set_rx_ring_start_addr - Set DMA Rx ring base address.
|
|
||||||
*
|
|
||||||
* Algorithm: Sets DMA Rx channel ring base address.
|
|
||||||
*
|
|
||||||
* @param[in] addr: Base address indicating the start of
|
|
||||||
* memory mapped IO region of the MAC.
|
|
||||||
* @param[in] chan: DMA Rx channel number.
|
|
||||||
* @param[in] tx_desc: DMA Rx desc base address.
|
|
||||||
*/
|
|
||||||
static void mgbe_set_rx_ring_start_addr(void *addr, nveu32_t chan,
|
|
||||||
nveu64_t tx_desc)
|
|
||||||
{
|
|
||||||
nveu64_t temp;
|
|
||||||
#if 0
|
|
||||||
MGBE_CHECK_CHAN_BOUND(chan);
|
|
||||||
#endif
|
|
||||||
temp = H32(tx_desc);
|
|
||||||
if (temp < UINT_MAX) {
|
|
||||||
osi_writel((nveu32_t)temp, (nveu8_t *)addr +
|
|
||||||
MGBE_DMA_CHX_RDLH(chan));
|
|
||||||
}
|
|
||||||
|
|
||||||
temp = L32(tx_desc);
|
|
||||||
if (temp < UINT_MAX) {
|
|
||||||
osi_writel((nveu32_t)temp, (nveu8_t *)addr +
|
|
||||||
MGBE_DMA_CHX_RDLA(chan));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief mgbe_update_rx_tailptr - Update Rx ring tail pointer
|
|
||||||
*
|
|
||||||
* Algorithm: Updates DMA Rx channel tail pointer for specific channel.
|
|
||||||
*
|
|
||||||
* @param[in] addr: Base address indicating the start of
|
|
||||||
* memory mapped IO region of the MAC.
|
|
||||||
* @param[in] chan: DMA Rx channel number.
|
|
||||||
* @param[in] tailptr: Tail pointer
|
|
||||||
*
|
|
||||||
* @note 1) MAC needs to be out of reset and proper clocks need to be configured
|
|
||||||
* 2) DMA HW init need to be completed successfully, see osi_hw_dma_init
|
|
||||||
*/
|
|
||||||
static void mgbe_update_rx_tailptr(void *addr, nveu32_t chan,
|
|
||||||
nveu64_t tailptr)
|
|
||||||
{
|
|
||||||
nveu64_t temp;
|
|
||||||
#if 0
|
|
||||||
MGBE_CHECK_CHAN_BOUND(chan);
|
|
||||||
#endif
|
|
||||||
temp = H32(tailptr);
|
|
||||||
if (temp < UINT_MAX) {
|
|
||||||
osi_writel((nveu32_t)temp, (nveu8_t *)addr +
|
|
||||||
MGBE_DMA_CHX_RDTHP(chan));
|
|
||||||
}
|
|
||||||
|
|
||||||
temp = L32(tailptr);
|
|
||||||
if (temp < UINT_MAX) {
|
|
||||||
osi_writel((nveu32_t)temp, (nveu8_t *)addr +
|
|
||||||
MGBE_DMA_CHX_RDTLP(chan));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief mgbe_start_dma - Start DMA.
|
|
||||||
*
|
|
||||||
* Algorithm: Start Tx and Rx DMA for specific channel.
|
|
||||||
*
|
|
||||||
* @param[in] osi_dma: OSI DMA private data structure.
|
|
||||||
* @param[in] chan: DMA Tx/Rx channel number.
|
|
||||||
*
|
|
||||||
* @note 1) MAC needs to be out of reset and proper clocks need to be configured
|
|
||||||
* 2) DMA HW init need to be completed successfully, see osi_hw_dma_init
|
|
||||||
*/
|
|
||||||
static void mgbe_start_dma(struct osi_dma_priv_data *osi_dma, nveu32_t chan)
|
|
||||||
{
|
|
||||||
nveu32_t val;
|
|
||||||
void *addr = osi_dma->base;
|
|
||||||
#if 0
|
|
||||||
MGBE_CHECK_CHAN_BOUND(chan);
|
|
||||||
#endif
|
|
||||||
/* start Tx DMA */
|
|
||||||
val = osi_readl((nveu8_t *)addr + MGBE_DMA_CHX_TX_CTRL(chan));
|
|
||||||
val |= OSI_BIT(0);
|
|
||||||
osi_writel(val, (nveu8_t *)addr + MGBE_DMA_CHX_TX_CTRL(chan));
|
|
||||||
|
|
||||||
/* start Rx DMA */
|
|
||||||
val = osi_readl((nveu8_t *)addr + MGBE_DMA_CHX_RX_CTRL(chan));
|
|
||||||
val |= OSI_BIT(0);
|
|
||||||
val &= ~OSI_BIT(31);
|
|
||||||
osi_writel(val, (nveu8_t *)addr + MGBE_DMA_CHX_RX_CTRL(chan));
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief mgbe_stop_dma - Stop DMA.
|
|
||||||
*
|
|
||||||
* Algorithm: Start Tx and Rx DMA for specific channel.
|
|
||||||
*
|
|
||||||
* @param[in] osi_dma: OSI DMA private data structure.
|
|
||||||
* @param[in] chan: DMA Tx/Rx channel number.
|
|
||||||
*
|
|
||||||
* @note 1) MAC needs to be out of reset and proper clocks need to be configured
|
|
||||||
* 2) DMA HW init need to be completed successfully, see osi_hw_dma_init
|
|
||||||
*/
|
|
||||||
static void mgbe_stop_dma(struct osi_dma_priv_data *osi_dma, nveu32_t chan)
|
|
||||||
{
|
|
||||||
nveu32_t val;
|
|
||||||
void *addr = osi_dma->base;
|
|
||||||
#if 0
|
|
||||||
MGBE_CHECK_CHAN_BOUND(chan);
|
|
||||||
#endif
|
|
||||||
/* stop Tx DMA */
|
|
||||||
val = osi_readl((nveu8_t *)addr + MGBE_DMA_CHX_TX_CTRL(chan));
|
|
||||||
val &= ~OSI_BIT(0);
|
|
||||||
osi_writel(val, (nveu8_t *)addr + MGBE_DMA_CHX_TX_CTRL(chan));
|
|
||||||
|
|
||||||
/* stop Rx DMA */
|
|
||||||
val = osi_readl((nveu8_t *)addr + MGBE_DMA_CHX_RX_CTRL(chan));
|
|
||||||
val &= ~OSI_BIT(0);
|
|
||||||
val |= OSI_BIT(31);
|
|
||||||
osi_writel(val, (nveu8_t *)addr + MGBE_DMA_CHX_RX_CTRL(chan));
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief mgbe_configure_dma_channel - Configure DMA channel
|
|
||||||
*
|
|
||||||
* Algorithm: This takes care of configuring the below
|
|
||||||
* parameters for the DMA channel
|
|
||||||
* 1) Enabling DMA channel interrupts
|
|
||||||
* 2) Enable 8xPBL mode
|
|
||||||
* 3) Program Tx, Rx PBL
|
|
||||||
* 4) Enable TSO if HW supports
|
|
||||||
* 5) Program Rx Watchdog timer
|
|
||||||
* 6) Program Out Standing DMA Read Requests
|
|
||||||
* 7) Program Out Standing DMA write Requests
|
|
||||||
*
|
|
||||||
* @param[in] chan: DMA channel number that need to be configured.
|
|
||||||
* @param[in] owrq: out standing write dma requests
|
|
||||||
* @param[in] orrq: out standing read dma requests
|
|
||||||
* @param[in] osi_dma: OSI DMA private data structure.
|
|
||||||
*
|
|
||||||
* @note MAC has to be out of reset.
|
|
||||||
*/
|
|
||||||
static void mgbe_configure_dma_channel(nveu32_t chan,
|
|
||||||
nveu32_t owrq,
|
|
||||||
nveu32_t orrq,
|
|
||||||
struct osi_dma_priv_data *osi_dma)
|
|
||||||
{
|
|
||||||
nveu32_t value;
|
|
||||||
nveu32_t txpbl;
|
|
||||||
nveu32_t rxpbl;
|
|
||||||
#if 0
|
|
||||||
MGBE_CHECK_CHAN_BOUND(chan);
|
|
||||||
#endif
|
|
||||||
/* enable DMA channel interrupts */
|
|
||||||
/* Enable TIE and TBUE */
|
|
||||||
/* TIE - Transmit Interrupt Enable */
|
|
||||||
/* TBUE - Transmit Buffer Unavailable Enable */
|
|
||||||
/* RIE - Receive Interrupt Enable */
|
|
||||||
/* RBUE - Receive Buffer Unavailable Enable */
|
|
||||||
/* AIE - Abnormal Interrupt Summary Enable */
|
|
||||||
/* NIE - Normal Interrupt Summary Enable */
|
|
||||||
/* FBE - Fatal Bus Error Enable */
|
|
||||||
value = osi_readl((nveu8_t *)osi_dma->base +
|
|
||||||
MGBE_DMA_CHX_INTR_ENA(chan));
|
|
||||||
value |= MGBE_DMA_CHX_INTR_TIE | MGBE_DMA_CHX_INTR_TBUE |
|
|
||||||
MGBE_DMA_CHX_INTR_RIE | MGBE_DMA_CHX_INTR_RBUE |
|
|
||||||
MGBE_DMA_CHX_INTR_FBEE | MGBE_DMA_CHX_INTR_AIE |
|
|
||||||
MGBE_DMA_CHX_INTR_NIE;
|
|
||||||
|
|
||||||
/* For multi-irqs to work nie needs to be disabled */
|
|
||||||
/* TODO: do we need this ? */
|
|
||||||
value &= ~(MGBE_DMA_CHX_INTR_NIE);
|
|
||||||
osi_writel(value, (nveu8_t *)osi_dma->base +
|
|
||||||
MGBE_DMA_CHX_INTR_ENA(chan));
|
|
||||||
|
|
||||||
/* Enable 8xPBL mode */
|
|
||||||
value = osi_readl((nveu8_t *)osi_dma->base +
|
|
||||||
MGBE_DMA_CHX_CTRL(chan));
|
|
||||||
value |= MGBE_DMA_CHX_CTRL_PBLX8;
|
|
||||||
osi_writel(value, (nveu8_t *)osi_dma->base +
|
|
||||||
MGBE_DMA_CHX_CTRL(chan));
|
|
||||||
|
|
||||||
/* Configure DMA channel Transmit control register */
|
|
||||||
value = osi_readl((nveu8_t *)osi_dma->base +
|
|
||||||
MGBE_DMA_CHX_TX_CTRL(chan));
|
|
||||||
/* Enable OSF mode */
|
|
||||||
value |= MGBE_DMA_CHX_TX_CTRL_OSP;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Formula for TxPBL calculation is
|
|
||||||
* (TxPBL) < ((TXQSize - MTU)/(DATAWIDTH/8)) - 5
|
|
||||||
* if TxPBL exceeds the value of 256 then we need to make use of 256
|
|
||||||
* as the TxPBL else we should be using the value whcih we get after
|
|
||||||
* calculation by using above formula
|
|
||||||
*/
|
|
||||||
if (osi_dma->pre_si == OSI_ENABLE) {
|
|
||||||
txpbl = ((((MGBE_TXQ_RXQ_SIZE_FPGA / osi_dma->num_dma_chans) -
|
|
||||||
osi_dma->mtu) / (MGBE_AXI_DATAWIDTH / 8U)) - 5U);
|
|
||||||
} else {
|
|
||||||
txpbl = ((((MGBE_TXQ_SIZE / osi_dma->num_dma_chans) -
|
|
||||||
osi_dma->mtu) / (MGBE_AXI_DATAWIDTH / 8U)) - 5U);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Since PBLx8 is set, so txpbl/8 will be the value that
|
|
||||||
* need to be programmed
|
|
||||||
*/
|
|
||||||
if (txpbl >= MGBE_DMA_CHX_MAX_PBL) {
|
|
||||||
value |= ((MGBE_DMA_CHX_MAX_PBL / 8U) <<
|
|
||||||
MGBE_DMA_CHX_CTRL_PBL_SHIFT);
|
|
||||||
} else {
|
|
||||||
value |= ((txpbl / 8U) << MGBE_DMA_CHX_CTRL_PBL_SHIFT);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* enable TSO by default if HW supports */
|
|
||||||
value |= MGBE_DMA_CHX_TX_CTRL_TSE;
|
|
||||||
|
|
||||||
osi_writel(value, (nveu8_t *)osi_dma->base +
|
|
||||||
MGBE_DMA_CHX_TX_CTRL(chan));
|
|
||||||
|
|
||||||
/* Configure DMA channel Receive control register */
|
|
||||||
/* Select Rx Buffer size. Needs to be rounded up to next multiple of
|
|
||||||
* bus width
|
|
||||||
*/
|
|
||||||
value = osi_readl((nveu8_t *)osi_dma->base +
|
|
||||||
MGBE_DMA_CHX_RX_CTRL(chan));
|
|
||||||
|
|
||||||
/* clear previous Rx buffer size */
|
|
||||||
value &= ~MGBE_DMA_CHX_RBSZ_MASK;
|
|
||||||
value |= (osi_dma->rx_buf_len << MGBE_DMA_CHX_RBSZ_SHIFT);
|
|
||||||
/* RxPBL calculation is
|
|
||||||
* RxPBL <= Rx Queue Size/2
|
|
||||||
*/
|
|
||||||
if (osi_dma->pre_si == OSI_ENABLE) {
|
|
||||||
rxpbl = (((MGBE_TXQ_RXQ_SIZE_FPGA / osi_dma->num_dma_chans) /
|
|
||||||
2U) << MGBE_DMA_CHX_CTRL_PBL_SHIFT);
|
|
||||||
} else {
|
|
||||||
rxpbl = (((MGBE_RXQ_SIZE / osi_dma->num_dma_chans) / 2U) <<
|
|
||||||
MGBE_DMA_CHX_CTRL_PBL_SHIFT);
|
|
||||||
}
|
|
||||||
/* Since PBLx8 is set, so rxpbl/8 will be the value that
|
|
||||||
* need to be programmed
|
|
||||||
*/
|
|
||||||
if (rxpbl >= MGBE_DMA_CHX_MAX_PBL) {
|
|
||||||
value |= ((MGBE_DMA_CHX_MAX_PBL / 8) <<
|
|
||||||
MGBE_DMA_CHX_CTRL_PBL_SHIFT);
|
|
||||||
} else {
|
|
||||||
value |= ((rxpbl / 8) << MGBE_DMA_CHX_CTRL_PBL_SHIFT);
|
|
||||||
}
|
|
||||||
|
|
||||||
osi_writel(value, (nveu8_t *)osi_dma->base +
|
|
||||||
MGBE_DMA_CHX_RX_CTRL(chan));
|
|
||||||
|
|
||||||
/* Set Receive Interrupt Watchdog Timer Count */
|
|
||||||
/* conversion of usec to RWIT value
|
|
||||||
* Eg:System clock is 62.5MHz, each clock cycle would then be 16ns
|
|
||||||
* For value 0x1 in watchdog timer,device would wait for 256 clk cycles,
|
|
||||||
* ie, (16ns x 256) => 4.096us (rounding off to 4us)
|
|
||||||
* So formula with above values is,ret = usec/4
|
|
||||||
*/
|
|
||||||
/* NOTE: Bug 3287883: If RWTU value programmed then driver needs
|
|
||||||
* to follow below order -
|
|
||||||
* 1. First write RWT field with non-zero value.
|
|
||||||
* 2. Program RWTU field of register
|
|
||||||
* DMA_CH(#i)_Rx_Interrupt_Watchdog_Time.
|
|
||||||
*/
|
|
||||||
if ((osi_dma->use_riwt == OSI_ENABLE) &&
|
|
||||||
(osi_dma->rx_riwt < UINT_MAX)) {
|
|
||||||
value = osi_readl((nveu8_t *)osi_dma->base +
|
|
||||||
MGBE_DMA_CHX_RX_WDT(chan));
|
|
||||||
/* Mask the RWT value */
|
|
||||||
value &= ~MGBE_DMA_CHX_RX_WDT_RWT_MASK;
|
|
||||||
/* Conversion of usec to Rx Interrupt Watchdog Timer Count */
|
|
||||||
/* TODO: Need to fix AXI clock for silicon */
|
|
||||||
value |= ((osi_dma->rx_riwt *
|
|
||||||
((nveu32_t)MGBE_AXI_CLK_FREQ / OSI_ONE_MEGA_HZ)) /
|
|
||||||
MGBE_DMA_CHX_RX_WDT_RWTU) &
|
|
||||||
MGBE_DMA_CHX_RX_WDT_RWT_MASK;
|
|
||||||
osi_writel(value, (nveu8_t *)osi_dma->base +
|
|
||||||
MGBE_DMA_CHX_RX_WDT(chan));
|
|
||||||
|
|
||||||
value = osi_readl((nveu8_t *)osi_dma->base +
|
|
||||||
MGBE_DMA_CHX_RX_WDT(chan));
|
|
||||||
value &= ~(MGBE_DMA_CHX_RX_WDT_RWTU_MASK <<
|
|
||||||
MGBE_DMA_CHX_RX_WDT_RWTU_SHIFT);
|
|
||||||
value |= (MGBE_DMA_CHX_RX_WDT_RWTU_2048_CYCLE <<
|
|
||||||
MGBE_DMA_CHX_RX_WDT_RWTU_SHIFT);
|
|
||||||
osi_writel(value, (nveu8_t *)osi_dma->base +
|
|
||||||
MGBE_DMA_CHX_RX_WDT(chan));
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Update ORRQ in DMA_CH(#i)_Tx_Control2 register */
|
|
||||||
value = osi_readl((nveu8_t *)osi_dma->base +
|
|
||||||
MGBE_DMA_CHX_TX_CNTRL2(chan));
|
|
||||||
value |= (orrq << MGBE_DMA_CHX_TX_CNTRL2_ORRQ_SHIFT);
|
|
||||||
osi_writel(value, (nveu8_t *)osi_dma->base +
|
|
||||||
MGBE_DMA_CHX_TX_CNTRL2(chan));
|
|
||||||
|
|
||||||
/* Update OWRQ in DMA_CH(#i)_Rx_Control2 register */
|
|
||||||
value = osi_readl((nveu8_t *)osi_dma->base +
|
|
||||||
MGBE_DMA_CHX_RX_CNTRL2(chan));
|
|
||||||
value |= (owrq << MGBE_DMA_CHX_RX_CNTRL2_OWRQ_SHIFT);
|
|
||||||
osi_writel(value, (nveu8_t *)osi_dma->base +
|
|
||||||
MGBE_DMA_CHX_RX_CNTRL2(chan));
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief mgbe_init_dma_channel - DMA channel INIT
|
|
||||||
*
|
|
||||||
* @param[in] osi_dma: OSI DMA private data structure.
|
|
||||||
*/
|
|
||||||
static nve32_t mgbe_init_dma_channel(struct osi_dma_priv_data *osi_dma)
|
|
||||||
{
|
|
||||||
nveu32_t chinx;
|
|
||||||
nveu32_t owrq;
|
|
||||||
nveu32_t orrq;
|
|
||||||
|
|
||||||
/* DMA Read Out Standing Requests */
|
|
||||||
/* For Presi ORRQ is 16 in case of schannel and 64 in case of mchannel.
|
|
||||||
* For Si ORRQ is 64 in case of single and multi channel
|
|
||||||
*/
|
|
||||||
orrq = (MGBE_DMA_CHX_TX_CNTRL2_ORRQ_RECOMMENDED /
|
|
||||||
osi_dma->num_dma_chans);
|
|
||||||
if ((osi_dma->num_dma_chans == 1U) && (osi_dma->pre_si == OSI_ENABLE)) {
|
|
||||||
/* For Presi ORRQ is 16 in a single channel configuration
|
|
||||||
* so overwrite only for this configuration
|
|
||||||
*/
|
|
||||||
orrq = MGBE_DMA_CHX_RX_CNTRL2_ORRQ_SCHAN_PRESI;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* DMA Write Out Standing Requests */
|
|
||||||
/* For Presi OWRQ is 8 and for Si it is 32 in case of single channel.
|
|
||||||
* For Multi Channel OWRQ is 64 for both si and presi
|
|
||||||
*/
|
|
||||||
if (osi_dma->num_dma_chans == 1U) {
|
|
||||||
if (osi_dma->pre_si == OSI_ENABLE) {
|
|
||||||
owrq = MGBE_DMA_CHX_RX_CNTRL2_OWRQ_SCHAN_PRESI;
|
|
||||||
} else {
|
|
||||||
owrq = MGBE_DMA_CHX_RX_CNTRL2_OWRQ_SCHAN;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
owrq = (MGBE_DMA_CHX_RX_CNTRL2_OWRQ_MCHAN /
|
|
||||||
osi_dma->num_dma_chans);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* configure MGBE DMA channels */
|
|
||||||
for (chinx = 0; chinx < osi_dma->num_dma_chans; chinx++) {
|
|
||||||
mgbe_configure_dma_channel(osi_dma->dma_chans[chinx],
|
|
||||||
owrq, orrq, osi_dma);
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief mgbe_set_rx_buf_len - Set Rx buffer length
|
|
||||||
* Sets the Rx buffer length based on the new MTU size set.
|
|
||||||
*
|
|
||||||
* @param[in] osi_dma: OSI DMA private data structure.
|
|
||||||
*
|
|
||||||
* @note 1) MAC needs to be out of reset and proper clocks need to be configured
|
|
||||||
* 2) DMA HW init need to be completed successfully, see osi_hw_dma_init
|
|
||||||
* 3) osi_dma->mtu need to be filled with current MTU size <= 9K
|
|
||||||
*/
|
|
||||||
static void mgbe_set_rx_buf_len(struct osi_dma_priv_data *osi_dma)
|
|
||||||
{
|
|
||||||
nveu32_t rx_buf_len;
|
|
||||||
|
|
||||||
/* Add Ethernet header + FCS + NET IP align size to MTU */
|
|
||||||
rx_buf_len = osi_dma->mtu + OSI_ETH_HLEN +
|
|
||||||
NV_VLAN_HLEN + OSI_NET_IP_ALIGN;
|
|
||||||
/* Buffer alignment */
|
|
||||||
osi_dma->rx_buf_len = ((rx_buf_len + (MGBE_AXI_BUS_WIDTH - 1U)) &
|
|
||||||
~(MGBE_AXI_BUS_WIDTH - 1U));
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Read-validate HW registers for functional safety.
|
|
||||||
*
|
|
||||||
* @note
|
|
||||||
* Algorithm:
|
|
||||||
* - Reads pre-configured list of MAC/MTL configuration registers
|
|
||||||
* and compares with last written value for any modifications.
|
|
||||||
*
|
|
||||||
* @param[in] osi_dma: OSI DMA private data structure.
|
|
||||||
*
|
|
||||||
* @pre
|
|
||||||
* - MAC has to be out of reset.
|
|
||||||
* - osi_hw_dma_init has to be called. Internally this would initialize
|
|
||||||
* the safety_config (see osi_dma_priv_data) based on MAC version and
|
|
||||||
* which specific registers needs to be validated periodically.
|
|
||||||
* - Invoke this call if (osi_dma_priv_data->safety_config != OSI_NULL)
|
|
||||||
*
|
|
||||||
* @note
|
|
||||||
* API Group:
|
|
||||||
* - Initialization: No
|
|
||||||
* - Run time: Yes
|
|
||||||
* - De-initialization: No
|
|
||||||
*
|
|
||||||
* @retval 0 on success
|
|
||||||
* @retval -1 on failure.
|
|
||||||
*/
|
|
||||||
static nve32_t mgbe_validate_dma_regs(OSI_UNUSED
|
|
||||||
struct osi_dma_priv_data *osi_dma)
|
|
||||||
{
|
|
||||||
/* TODO: for mgbe */
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief mgbe_clear_vm_tx_intr - Clear VM Tx interrupt
|
|
||||||
*
|
|
||||||
* Algorithm: Clear Tx interrupt source at DMA and wrapper level.
|
|
||||||
*
|
|
||||||
* @param[in] addr: MAC base address.
|
|
||||||
* @param[in] chan: DMA Tx channel number.
|
|
||||||
*/
|
|
||||||
static void mgbe_clear_vm_tx_intr(void *addr, nveu32_t chan)
|
|
||||||
{
|
|
||||||
#if 0
|
|
||||||
MGBE_CHECK_CHAN_BOUND(chan);
|
|
||||||
#endif
|
|
||||||
osi_writel(MGBE_DMA_CHX_STATUS_CLEAR_TX,
|
|
||||||
(nveu8_t *)addr + MGBE_DMA_CHX_STATUS(chan));
|
|
||||||
osi_writel(MGBE_VIRT_INTR_CHX_STATUS_TX,
|
|
||||||
(nveu8_t *)addr + MGBE_VIRT_INTR_CHX_STATUS(chan));
|
|
||||||
|
|
||||||
mgbe_disable_chan_tx_intr(addr, chan);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief mgbe_clear_vm_rx_intr - Clear VM Rx interrupt
|
|
||||||
*
|
|
||||||
* @param[in] addr: MAC base address.
|
|
||||||
* @param[in] chan: DMA Tx channel number.
|
|
||||||
*
|
|
||||||
* Algorithm: Clear Rx interrupt source at DMA and wrapper level.
|
|
||||||
*/
|
|
||||||
static void mgbe_clear_vm_rx_intr(void *addr, nveu32_t chan)
|
|
||||||
{
|
|
||||||
#if 0
|
|
||||||
MGBE_CHECK_CHAN_BOUND(chan);
|
|
||||||
#endif
|
|
||||||
osi_writel(MGBE_DMA_CHX_STATUS_CLEAR_RX,
|
|
||||||
(nveu8_t *)addr + MGBE_DMA_CHX_STATUS(chan));
|
|
||||||
osi_writel(MGBE_VIRT_INTR_CHX_STATUS_RX,
|
|
||||||
(nveu8_t *)addr + MGBE_VIRT_INTR_CHX_STATUS(chan));
|
|
||||||
|
|
||||||
mgbe_disable_chan_rx_intr(addr, chan);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief mgbe_config_slot - Configure slot Checking for DMA channel
|
* @brief mgbe_config_slot - Configure slot Checking for DMA channel
|
||||||
*
|
*
|
||||||
@@ -720,24 +68,60 @@ static void mgbe_config_slot(struct osi_dma_priv_data *osi_dma,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef OSI_DEBUG
|
||||||
|
/**
|
||||||
|
* @brief Enable/disable debug interrupt
|
||||||
|
*
|
||||||
|
* @param[in] osi_dma: OSI DMA private data structure.
|
||||||
|
*
|
||||||
|
* Algorithm:
|
||||||
|
* - if osi_dma->ioctl_data.arg_u32 == OSI_ENABLE enable debug interrupt
|
||||||
|
* - else disable bebug inerrupts
|
||||||
|
*/
|
||||||
|
static void mgbe_debug_intr_config(struct osi_dma_priv_data *osi_dma)
|
||||||
|
{
|
||||||
|
nveu32_t chinx;
|
||||||
|
nveu32_t chan;
|
||||||
|
nveu32_t val;
|
||||||
|
nveu32_t enable = osi_dma->ioctl_data.arg_u32;
|
||||||
|
|
||||||
|
if (enable == OSI_ENABLE) {
|
||||||
|
for (chinx = 0; chinx < osi_dma->num_dma_chans; chinx++) {
|
||||||
|
chan = osi_dma->dma_chans[chinx];
|
||||||
|
val = osi_readl((nveu8_t *)osi_dma->base +
|
||||||
|
MGBE_DMA_CHX_INTR_ENA(chan));
|
||||||
|
|
||||||
|
val |= (MGBE_DMA_CHX_INTR_AIE |
|
||||||
|
MGBE_DMA_CHX_INTR_FBEE |
|
||||||
|
MGBE_DMA_CHX_INTR_RBUE |
|
||||||
|
MGBE_DMA_CHX_INTR_TBUE |
|
||||||
|
MGBE_DMA_CHX_INTR_NIE);
|
||||||
|
osi_writel(val, (nveu8_t *)osi_dma->base +
|
||||||
|
MGBE_DMA_CHX_INTR_ENA(chan));
|
||||||
|
}
|
||||||
|
|
||||||
|
} else {
|
||||||
|
for (chinx = 0; chinx < osi_dma->num_dma_chans; chinx++) {
|
||||||
|
chan = osi_dma->dma_chans[chinx];
|
||||||
|
val = osi_readl((nveu8_t *)osi_dma->base +
|
||||||
|
MGBE_DMA_CHX_INTR_ENA(chan));
|
||||||
|
val &= (~MGBE_DMA_CHX_INTR_AIE &
|
||||||
|
~MGBE_DMA_CHX_INTR_FBEE &
|
||||||
|
~MGBE_DMA_CHX_INTR_RBUE &
|
||||||
|
~MGBE_DMA_CHX_INTR_TBUE &
|
||||||
|
~MGBE_DMA_CHX_INTR_NIE);
|
||||||
|
osi_writel(val, (nveu8_t *)osi_dma->base +
|
||||||
|
MGBE_DMA_CHX_INTR_ENA(chan));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
void mgbe_init_dma_chan_ops(struct dma_chan_ops *ops)
|
void mgbe_init_dma_chan_ops(struct dma_chan_ops *ops)
|
||||||
{
|
{
|
||||||
ops->set_tx_ring_len = mgbe_set_tx_ring_len;
|
|
||||||
ops->set_rx_ring_len = mgbe_set_rx_ring_len;
|
|
||||||
ops->set_tx_ring_start_addr = mgbe_set_tx_ring_start_addr;
|
|
||||||
ops->set_rx_ring_start_addr = mgbe_set_rx_ring_start_addr;
|
|
||||||
ops->update_tx_tailptr = mgbe_update_tx_tailptr;
|
|
||||||
ops->update_rx_tailptr = mgbe_update_rx_tailptr;
|
|
||||||
ops->disable_chan_tx_intr = mgbe_disable_chan_tx_intr;
|
|
||||||
ops->enable_chan_tx_intr = mgbe_enable_chan_tx_intr;
|
|
||||||
ops->disable_chan_rx_intr = mgbe_disable_chan_rx_intr;
|
|
||||||
ops->enable_chan_rx_intr = mgbe_enable_chan_rx_intr;
|
|
||||||
ops->start_dma = mgbe_start_dma;
|
|
||||||
ops->stop_dma = mgbe_stop_dma;
|
|
||||||
ops->init_dma_channel = mgbe_init_dma_channel;
|
|
||||||
ops->set_rx_buf_len = mgbe_set_rx_buf_len;
|
|
||||||
ops->validate_regs = mgbe_validate_dma_regs;
|
|
||||||
ops->clear_vm_tx_intr = mgbe_clear_vm_tx_intr;
|
|
||||||
ops->clear_vm_rx_intr = mgbe_clear_vm_rx_intr;
|
|
||||||
ops->config_slot = mgbe_config_slot;
|
ops->config_slot = mgbe_config_slot;
|
||||||
|
#ifdef OSI_DEBUG
|
||||||
|
ops->debug_intr_config = mgbe_debug_intr_config;
|
||||||
|
#endif
|
||||||
};
|
};
|
||||||
|
#endif /* !OSI_STRIPPED_LIB */
|
||||||
|
|||||||
@@ -32,17 +32,6 @@
|
|||||||
#define MGBE_AXI_CLK_FREQ 480000000U
|
#define MGBE_AXI_CLK_FREQ 480000000U
|
||||||
/** @} */
|
/** @} */
|
||||||
|
|
||||||
/**
|
|
||||||
* @@addtogroup Timestamp Capture Register
|
|
||||||
* @brief MGBE MAC Timestamp Register offset
|
|
||||||
* @{
|
|
||||||
*/
|
|
||||||
#define MGBE_MAC_TSS 0X0D20
|
|
||||||
#define MGBE_MAC_TS_NSEC 0x0D30
|
|
||||||
#define MGBE_MAC_TS_SEC 0x0D34
|
|
||||||
#define MGBE_MAC_TS_PID 0x0D38
|
|
||||||
/** @} */
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @addtogroup MGBE_DMA DMA Channel Register offsets
|
* @addtogroup MGBE_DMA DMA Channel Register offsets
|
||||||
*
|
*
|
||||||
@@ -51,7 +40,9 @@
|
|||||||
*/
|
*/
|
||||||
#define MGBE_DMA_CHX_TX_CTRL(x) ((0x0080U * (x)) + 0x3104U)
|
#define MGBE_DMA_CHX_TX_CTRL(x) ((0x0080U * (x)) + 0x3104U)
|
||||||
#define MGBE_DMA_CHX_RX_CTRL(x) ((0x0080U * (x)) + 0x3108U)
|
#define MGBE_DMA_CHX_RX_CTRL(x) ((0x0080U * (x)) + 0x3108U)
|
||||||
|
#ifndef OSI_STRIPPED_LIB
|
||||||
#define MGBE_DMA_CHX_SLOT_CTRL(x) ((0x0080U * (x)) + 0x310CU)
|
#define MGBE_DMA_CHX_SLOT_CTRL(x) ((0x0080U * (x)) + 0x310CU)
|
||||||
|
#endif /* !OSI_STRIPPED_LIB */
|
||||||
#define MGBE_DMA_CHX_INTR_ENA(x) ((0x0080U * (x)) + 0x3138U)
|
#define MGBE_DMA_CHX_INTR_ENA(x) ((0x0080U * (x)) + 0x3138U)
|
||||||
#define MGBE_DMA_CHX_CTRL(x) ((0x0080U * (x)) + 0x3100U)
|
#define MGBE_DMA_CHX_CTRL(x) ((0x0080U * (x)) + 0x3100U)
|
||||||
#define MGBE_DMA_CHX_RX_WDT(x) ((0x0080U * (x)) + 0x313CU)
|
#define MGBE_DMA_CHX_RX_WDT(x) ((0x0080U * (x)) + 0x313CU)
|
||||||
@@ -60,22 +51,11 @@
|
|||||||
#define MGBE_DMA_CHX_TDLH(x) ((0x0080U * (x)) + 0x3110U)
|
#define MGBE_DMA_CHX_TDLH(x) ((0x0080U * (x)) + 0x3110U)
|
||||||
#define MGBE_DMA_CHX_TDLA(x) ((0x0080U * (x)) + 0x3114U)
|
#define MGBE_DMA_CHX_TDLA(x) ((0x0080U * (x)) + 0x3114U)
|
||||||
#define MGBE_DMA_CHX_TDTLP(x) ((0x0080U * (x)) + 0x3124U)
|
#define MGBE_DMA_CHX_TDTLP(x) ((0x0080U * (x)) + 0x3124U)
|
||||||
#define MGBE_DMA_CHX_TDTHP(x) ((0x0080U * (x)) + 0x3120U)
|
|
||||||
#define MGBE_DMA_CHX_RDLH(x) ((0x0080U * (x)) + 0x3118U)
|
#define MGBE_DMA_CHX_RDLH(x) ((0x0080U * (x)) + 0x3118U)
|
||||||
#define MGBE_DMA_CHX_RDLA(x) ((0x0080U * (x)) + 0x311CU)
|
#define MGBE_DMA_CHX_RDLA(x) ((0x0080U * (x)) + 0x311CU)
|
||||||
#define MGBE_DMA_CHX_RDTHP(x) ((0x0080U * (x)) + 0x3128U)
|
|
||||||
#define MGBE_DMA_CHX_RDTLP(x) ((0x0080U * (x)) + 0x312CU)
|
#define MGBE_DMA_CHX_RDTLP(x) ((0x0080U * (x)) + 0x312CU)
|
||||||
/** @} */
|
/** @} */
|
||||||
|
|
||||||
/**
|
|
||||||
* @addtogroup MGBE_INTR INT Channel Register offsets
|
|
||||||
*
|
|
||||||
* @brief MGBE Virtural Interrupt Channel register offsets
|
|
||||||
* @{
|
|
||||||
*/
|
|
||||||
#define MGBE_VIRT_INTR_CHX_STATUS(x) (0x8604U + ((x) * 8U))
|
|
||||||
#define MGBE_VIRT_INTR_CHX_CNTRL(x) (0x8600U + ((x) * 8U))
|
|
||||||
#define MGBE_VIRT_INTR_APB_CHX_CNTRL(x) (0x8200U + ((x) * 4U))
|
|
||||||
/** @} */
|
/** @} */
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -84,44 +64,25 @@
|
|||||||
* @brief Values defined for the MGBE registers
|
* @brief Values defined for the MGBE registers
|
||||||
* @{
|
* @{
|
||||||
*/
|
*/
|
||||||
#define MGBE_DMA_CHX_TX_CTRL_OSP OSI_BIT(4)
|
|
||||||
#define MGBE_DMA_CHX_TX_CTRL_TSE OSI_BIT(12)
|
|
||||||
#define MGBE_DMA_CHX_RX_WDT_RWT_MASK 0xFFU
|
#define MGBE_DMA_CHX_RX_WDT_RWT_MASK 0xFFU
|
||||||
#define MGBE_DMA_CHX_RX_WDT_RWTU 2048U
|
#define MGBE_DMA_CHX_RX_WDT_RWTU 2048U
|
||||||
#define MGBE_DMA_CHX_RX_WDT_RWTU_2048_CYCLE 3U
|
#define MGBE_DMA_CHX_RX_WDT_RWTU_2048_CYCLE 0x3000U
|
||||||
#define MGBE_DMA_CHX_RX_WDT_RWTU_MASK 3U
|
#define MGBE_DMA_CHX_RX_WDT_RWTU_MASK 0x3000U
|
||||||
#define MGBE_DMA_CHX_RX_WDT_RWTU_SHIFT 12U
|
#ifdef OSI_DEBUG
|
||||||
#define MGBE_DMA_CHX_RBSZ_MASK 0x7FFEU
|
|
||||||
#define MGBE_DMA_CHX_RBSZ_SHIFT 1U
|
|
||||||
#define MGBE_AXI_BUS_WIDTH 0x10U
|
|
||||||
#define MGBE_DMA_CHX_CTRL_PBLX8 OSI_BIT(16)
|
|
||||||
#define MGBE_DMA_CHX_INTR_TIE OSI_BIT(0)
|
|
||||||
#define MGBE_DMA_CHX_INTR_TBUE OSI_BIT(2)
|
#define MGBE_DMA_CHX_INTR_TBUE OSI_BIT(2)
|
||||||
#define MGBE_DMA_CHX_INTR_RIE OSI_BIT(6)
|
|
||||||
#define MGBE_DMA_CHX_INTR_RBUE OSI_BIT(7)
|
#define MGBE_DMA_CHX_INTR_RBUE OSI_BIT(7)
|
||||||
#define MGBE_DMA_CHX_INTR_FBEE OSI_BIT(12)
|
#define MGBE_DMA_CHX_INTR_FBEE OSI_BIT(12)
|
||||||
#define MGBE_DMA_CHX_INTR_AIE OSI_BIT(14)
|
#define MGBE_DMA_CHX_INTR_AIE OSI_BIT(14)
|
||||||
#define MGBE_DMA_CHX_INTR_NIE OSI_BIT(15)
|
#define MGBE_DMA_CHX_INTR_NIE OSI_BIT(15)
|
||||||
#define MGBE_DMA_CHX_STATUS_TI OSI_BIT(0)
|
#endif
|
||||||
#define MGBE_DMA_CHX_STATUS_RI OSI_BIT(6)
|
#ifndef OSI_STRIPPED_LIB
|
||||||
#define MGBE_DMA_CHX_STATUS_NIS OSI_BIT(15)
|
|
||||||
#define MGBE_DMA_CHX_SLOT_ESC OSI_BIT(0)
|
#define MGBE_DMA_CHX_SLOT_ESC OSI_BIT(0)
|
||||||
#define MGBE_DMA_CHX_STATUS_CLEAR_TX (MGBE_DMA_CHX_STATUS_TI | \
|
#endif /* !OSI_STRIPPED_LIB */
|
||||||
MGBE_DMA_CHX_STATUS_NIS)
|
|
||||||
#define MGBE_DMA_CHX_STATUS_CLEAR_RX (MGBE_DMA_CHX_STATUS_RI | \
|
|
||||||
MGBE_DMA_CHX_STATUS_NIS)
|
|
||||||
#define MGBE_VIRT_INTR_CHX_STATUS_TX OSI_BIT(0)
|
|
||||||
#define MGBE_VIRT_INTR_CHX_STATUS_RX OSI_BIT(1)
|
|
||||||
#define MGBE_VIRT_INTR_CHX_CNTRL_TX OSI_BIT(0)
|
|
||||||
#define MGBE_VIRT_INTR_CHX_CNTRL_RX OSI_BIT(1)
|
|
||||||
#define MGBE_DMA_CHX_TX_CNTRL2_ORRQ_RECOMMENDED 64U
|
#define MGBE_DMA_CHX_TX_CNTRL2_ORRQ_RECOMMENDED 64U
|
||||||
#define MGBE_DMA_CHX_TX_CNTRL2_ORRQ_SHIFT 24U
|
#define MGBE_DMA_CHX_TX_CNTRL2_ORRQ_SHIFT 24U
|
||||||
#define MGBE_DMA_CHX_RX_CNTRL2_OWRQ_SCHAN 32U
|
#define MGBE_DMA_CHX_RX_CNTRL2_OWRQ_SCHAN 32U
|
||||||
#define MGBE_DMA_CHX_RX_CNTRL2_OWRQ_MCHAN 64U
|
#define MGBE_DMA_CHX_RX_CNTRL2_OWRQ_MCHAN 64U
|
||||||
#define MGBE_DMA_CHX_RX_CNTRL2_OWRQ_SHIFT 24U
|
#define MGBE_DMA_CHX_RX_CNTRL2_OWRQ_SHIFT 24U
|
||||||
#define MGBE_DMA_CHX_RX_CNTRL2_OWRQ_SCHAN_PRESI 8U
|
|
||||||
#define MGBE_DMA_CHX_RX_CNTRL2_ORRQ_SCHAN_PRESI 16U
|
|
||||||
#define MGBE_DMA_RING_LENGTH_MASK 0xFFFFU
|
|
||||||
#define MGBE_DMA_CHX_CTRL_PBL_SHIFT 16U
|
#define MGBE_DMA_CHX_CTRL_PBL_SHIFT 16U
|
||||||
/** @} */
|
/** @} */
|
||||||
|
|
||||||
@@ -131,35 +92,14 @@
|
|||||||
* @brief Values defined for PBL settings
|
* @brief Values defined for PBL settings
|
||||||
* @{
|
* @{
|
||||||
*/
|
*/
|
||||||
/* Tx and Rx Qsize is 64KB */
|
|
||||||
#define MGBE_TXQ_RXQ_SIZE_FPGA 65536U
|
|
||||||
/* Tx Queue size is 128KB */
|
/* Tx Queue size is 128KB */
|
||||||
#define MGBE_TXQ_SIZE 131072U
|
#define MGBE_TXQ_SIZE 131072U
|
||||||
/* Rx Queue size is 192KB */
|
/* Rx Queue size is 192KB */
|
||||||
#define MGBE_RXQ_SIZE 196608U
|
#define MGBE_RXQ_SIZE 196608U
|
||||||
/* MAX PBL value */
|
/* MAX PBL value */
|
||||||
#define MGBE_DMA_CHX_MAX_PBL 256U
|
#define MGBE_DMA_CHX_MAX_PBL 256U
|
||||||
|
#define MGBE_DMA_CHX_MAX_PBL_VAL 0x200000U
|
||||||
/* AXI Data width */
|
/* AXI Data width */
|
||||||
#define MGBE_AXI_DATAWIDTH 128U
|
#define MGBE_AXI_DATAWIDTH 128U
|
||||||
/** @} */
|
/** @} */
|
||||||
|
|
||||||
/**
|
|
||||||
* @addtogroup MGBE MAC timestamp registers bit field.
|
|
||||||
*
|
|
||||||
* @brief Values defined for the MGBE timestamp registers
|
|
||||||
* @{
|
|
||||||
*/
|
|
||||||
#define MGBE_MAC_TSS_TXTSC OSI_BIT(15)
|
|
||||||
#define MGBE_MAC_TS_PID_MASK 0x3FFU
|
|
||||||
#define MGBE_MAC_TS_NSEC_MASK 0x7FFFFFFFU
|
|
||||||
/** @} */
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief mgbe_get_dma_chan_ops - MGBE get DMA channel operations
|
|
||||||
*
|
|
||||||
* Algorithm: Returns pointer DMA channel operations structure.
|
|
||||||
*
|
|
||||||
* @returns Pointer to DMA channel operations structure
|
|
||||||
*/
|
|
||||||
struct osi_dma_chan_ops *mgbe_get_dma_chan_ops(void);
|
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -32,45 +32,6 @@
|
|||||||
|
|
||||||
static struct desc_ops d_ops[MAX_MAC_IP_TYPES];
|
static struct desc_ops d_ops[MAX_MAC_IP_TYPES];
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief get_rx_err_stats - Detect Errors from Rx Descriptor
|
|
||||||
*
|
|
||||||
* @note
|
|
||||||
* Algorithm:
|
|
||||||
* - This routine will be invoked by OSI layer itself which
|
|
||||||
* checks for the Last Descriptor and updates the receive status errors
|
|
||||||
* accordingly.
|
|
||||||
*
|
|
||||||
* @note
|
|
||||||
* API Group:
|
|
||||||
* - Initialization: No
|
|
||||||
* - Run time: Yes
|
|
||||||
* - De-initialization: No
|
|
||||||
*
|
|
||||||
* @param[in] rx_desc: Rx Descriptor.
|
|
||||||
* @param[in, out] pkt_err_stats: Packet error stats which stores the errors
|
|
||||||
* reported
|
|
||||||
*/
|
|
||||||
static inline void get_rx_err_stats(struct osi_rx_desc *rx_desc,
|
|
||||||
struct osi_pkt_err_stats *pkt_err_stats)
|
|
||||||
{
|
|
||||||
/* increment rx crc if we see CE bit set */
|
|
||||||
if ((rx_desc->rdes3 & RDES3_ERR_CRC) == RDES3_ERR_CRC) {
|
|
||||||
pkt_err_stats->rx_crc_error =
|
|
||||||
osi_update_stats_counter(
|
|
||||||
pkt_err_stats->rx_crc_error,
|
|
||||||
1UL);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* increment rx frame error if we see RE bit set */
|
|
||||||
if ((rx_desc->rdes3 & RDES3_ERR_RE) == RDES3_ERR_RE) {
|
|
||||||
pkt_err_stats->rx_frame_error =
|
|
||||||
osi_update_stats_counter(
|
|
||||||
pkt_err_stats->rx_frame_error,
|
|
||||||
1UL);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief validate_rx_completions_arg- Validate input argument of rx_completions
|
* @brief validate_rx_completions_arg- Validate input argument of rx_completions
|
||||||
*
|
*
|
||||||
@@ -97,34 +58,39 @@ static inline void get_rx_err_stats(struct osi_rx_desc *rx_desc,
|
|||||||
static inline nve32_t validate_rx_completions_arg(
|
static inline nve32_t validate_rx_completions_arg(
|
||||||
struct osi_dma_priv_data *osi_dma,
|
struct osi_dma_priv_data *osi_dma,
|
||||||
nveu32_t chan,
|
nveu32_t chan,
|
||||||
nveu32_t *more_data_avail,
|
const nveu32_t *const more_data_avail,
|
||||||
struct osi_rx_ring **rx_ring,
|
struct osi_rx_ring **rx_ring,
|
||||||
struct osi_rx_pkt_cx **rx_pkt_cx)
|
struct osi_rx_pkt_cx **rx_pkt_cx)
|
||||||
{
|
{
|
||||||
struct dma_local *l_dma = (struct dma_local *)osi_dma;
|
const struct dma_local *const l_dma = (struct dma_local *)(void *)osi_dma;
|
||||||
|
nve32_t ret = 0;
|
||||||
|
|
||||||
if (osi_unlikely((osi_dma == OSI_NULL) ||
|
if (osi_unlikely((osi_dma == OSI_NULL) ||
|
||||||
(more_data_avail == OSI_NULL) ||
|
(more_data_avail == OSI_NULL) ||
|
||||||
(chan >= l_dma->max_chans))) {
|
(chan >= l_dma->num_max_chans))) {
|
||||||
return -1;
|
ret = -1;
|
||||||
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
*rx_ring = osi_dma->rx_ring[chan];
|
*rx_ring = osi_dma->rx_ring[chan];
|
||||||
if (osi_unlikely(*rx_ring == OSI_NULL)) {
|
if (osi_unlikely(*rx_ring == OSI_NULL)) {
|
||||||
OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
|
OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID,
|
||||||
"validate_input_rx_completions: Invalid pointers\n",
|
"validate_input_rx_completions: Invalid pointers\n",
|
||||||
0ULL);
|
0ULL);
|
||||||
return -1;
|
ret = -1;
|
||||||
|
goto fail;
|
||||||
}
|
}
|
||||||
*rx_pkt_cx = &(*rx_ring)->rx_pkt_cx;
|
*rx_pkt_cx = &(*rx_ring)->rx_pkt_cx;
|
||||||
if (osi_unlikely(*rx_pkt_cx == OSI_NULL)) {
|
if (osi_unlikely(*rx_pkt_cx == OSI_NULL)) {
|
||||||
OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
|
OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID,
|
||||||
"validate_input_rx_completions: Invalid pointers\n",
|
"validate_input_rx_completions: Invalid pointers\n",
|
||||||
0ULL);
|
0ULL);
|
||||||
return -1;
|
ret = -1;
|
||||||
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
fail:
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
nve32_t osi_process_rx_completions(struct osi_dma_priv_data *osi_dma,
|
nve32_t osi_process_rx_completions(struct osi_dma_priv_data *osi_dma,
|
||||||
@@ -139,34 +105,42 @@ nve32_t osi_process_rx_completions(struct osi_dma_priv_data *osi_dma,
|
|||||||
struct osi_rx_desc *context_desc = OSI_NULL;
|
struct osi_rx_desc *context_desc = OSI_NULL;
|
||||||
nveu32_t ip_type = osi_dma->mac;
|
nveu32_t ip_type = osi_dma->mac;
|
||||||
nve32_t received = 0;
|
nve32_t received = 0;
|
||||||
|
#ifndef OSI_STRIPPED_LIB
|
||||||
nve32_t received_resv = 0;
|
nve32_t received_resv = 0;
|
||||||
|
#endif /* !OSI_STRIPPED_LIB */
|
||||||
nve32_t ret = 0;
|
nve32_t ret = 0;
|
||||||
|
|
||||||
ret = validate_rx_completions_arg(osi_dma, chan, more_data_avail,
|
ret = validate_rx_completions_arg(osi_dma, chan, more_data_avail,
|
||||||
&rx_ring, &rx_pkt_cx);
|
&rx_ring, &rx_pkt_cx);
|
||||||
if (osi_unlikely(ret < 0)) {
|
if (osi_unlikely(ret < 0)) {
|
||||||
return ret;
|
received = -1;
|
||||||
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (rx_ring->cur_rx_idx >= osi_dma->rx_ring_sz) {
|
if (rx_ring->cur_rx_idx >= osi_dma->rx_ring_sz) {
|
||||||
OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
|
OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID,
|
||||||
"dma_txrx: Invalid cur_rx_idx\n", 0ULL);
|
"dma_txrx: Invalid cur_rx_idx\n", 0ULL);
|
||||||
return -1;
|
received = -1;
|
||||||
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Reset flag to indicate if more Rx frames available to OSD layer */
|
/* Reset flag to indicate if more Rx frames available to OSD layer */
|
||||||
*more_data_avail = OSI_NONE;
|
*more_data_avail = OSI_NONE;
|
||||||
|
|
||||||
while ((received < budget) && (received_resv < budget)) {
|
while ((received < budget)
|
||||||
osi_memset(rx_pkt_cx, 0U, sizeof(*rx_pkt_cx));
|
#ifndef OSI_STRIPPED_LIB
|
||||||
|
&& (received_resv < budget)
|
||||||
|
#endif /* !OSI_STRIPPED_LIB */
|
||||||
|
) {
|
||||||
rx_desc = rx_ring->rx_desc + rx_ring->cur_rx_idx;
|
rx_desc = rx_ring->rx_desc + rx_ring->cur_rx_idx;
|
||||||
rx_swcx = rx_ring->rx_swcx + rx_ring->cur_rx_idx;
|
|
||||||
|
|
||||||
/* check for data availability */
|
/* check for data availability */
|
||||||
if ((rx_desc->rdes3 & RDES3_OWN) == RDES3_OWN) {
|
if ((rx_desc->rdes3 & RDES3_OWN) == RDES3_OWN) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
#ifdef OSI_DEBUG
|
rx_swcx = rx_ring->rx_swcx + rx_ring->cur_rx_idx;
|
||||||
|
osi_memset(rx_pkt_cx, 0U, sizeof(*rx_pkt_cx));
|
||||||
|
#if defined OSI_DEBUG && !defined OSI_STRIPPED_LIB
|
||||||
if (osi_dma->enable_desc_dump == 1U) {
|
if (osi_dma->enable_desc_dump == 1U) {
|
||||||
desc_dump(osi_dma, rx_ring->cur_rx_idx,
|
desc_dump(osi_dma, rx_ring->cur_rx_idx,
|
||||||
rx_ring->cur_rx_idx, RX_DESC_DUMP, chan);
|
rx_ring->cur_rx_idx, RX_DESC_DUMP, chan);
|
||||||
@@ -175,6 +149,7 @@ nve32_t osi_process_rx_completions(struct osi_dma_priv_data *osi_dma,
|
|||||||
|
|
||||||
INCR_RX_DESC_INDEX(rx_ring->cur_rx_idx, osi_dma->rx_ring_sz);
|
INCR_RX_DESC_INDEX(rx_ring->cur_rx_idx, osi_dma->rx_ring_sz);
|
||||||
|
|
||||||
|
#ifndef OSI_STRIPPED_LIB
|
||||||
if (osi_unlikely(rx_swcx->buf_virt_addr ==
|
if (osi_unlikely(rx_swcx->buf_virt_addr ==
|
||||||
osi_dma->resv_buf_virt_addr)) {
|
osi_dma->resv_buf_virt_addr)) {
|
||||||
rx_swcx->buf_virt_addr = OSI_NULL;
|
rx_swcx->buf_virt_addr = OSI_NULL;
|
||||||
@@ -187,6 +162,7 @@ nve32_t osi_process_rx_completions(struct osi_dma_priv_data *osi_dma,
|
|||||||
}
|
}
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
#endif /* !OSI_STRIPPED_LIB */
|
||||||
|
|
||||||
/* packet already processed */
|
/* packet already processed */
|
||||||
if ((rx_swcx->flags & OSI_RX_SWCX_PROCESSED) ==
|
if ((rx_swcx->flags & OSI_RX_SWCX_PROCESSED) ==
|
||||||
@@ -227,19 +203,22 @@ nve32_t osi_process_rx_completions(struct osi_dma_priv_data *osi_dma,
|
|||||||
* are set
|
* are set
|
||||||
*/
|
*/
|
||||||
rx_pkt_cx->flags &= ~OSI_PKT_CX_VALID;
|
rx_pkt_cx->flags &= ~OSI_PKT_CX_VALID;
|
||||||
|
#ifndef OSI_STRIPPED_LIB
|
||||||
d_ops[ip_type].update_rx_err_stats(rx_desc,
|
d_ops[ip_type].update_rx_err_stats(rx_desc,
|
||||||
&osi_dma->pkt_err_stats);
|
&osi_dma->pkt_err_stats);
|
||||||
|
#endif /* !OSI_STRIPPED_LIB */
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Check if COE Rx checksum is valid */
|
/* Check if COE Rx checksum is valid */
|
||||||
d_ops[ip_type].get_rx_csum(rx_desc, rx_pkt_cx);
|
d_ops[ip_type].get_rx_csum(rx_desc, rx_pkt_cx);
|
||||||
|
|
||||||
|
#ifndef OSI_STRIPPED_LIB
|
||||||
/* Get Rx VLAN from descriptor */
|
/* Get Rx VLAN from descriptor */
|
||||||
d_ops[ip_type].get_rx_vlan(rx_desc, rx_pkt_cx);
|
d_ops[ip_type].get_rx_vlan(rx_desc, rx_pkt_cx);
|
||||||
|
|
||||||
/* get_rx_hash for RSS */
|
/* get_rx_hash for RSS */
|
||||||
d_ops[ip_type].get_rx_hash(rx_desc, rx_pkt_cx);
|
d_ops[ip_type].get_rx_hash(rx_desc, rx_pkt_cx);
|
||||||
|
#endif /* !OSI_STRIPPED_LIB */
|
||||||
context_desc = rx_ring->rx_desc + rx_ring->cur_rx_idx;
|
context_desc = rx_ring->rx_desc + rx_ring->cur_rx_idx;
|
||||||
/* Get rx time stamp */
|
/* Get rx time stamp */
|
||||||
ret = d_ops[ip_type].get_rx_hwstamp(osi_dma, rx_desc,
|
ret = d_ops[ip_type].get_rx_hwstamp(osi_dma, rx_desc,
|
||||||
@@ -273,21 +252,25 @@ nve32_t osi_process_rx_completions(struct osi_dma_priv_data *osi_dma,
|
|||||||
osi_dma->rx_buf_len,
|
osi_dma->rx_buf_len,
|
||||||
rx_pkt_cx, rx_swcx);
|
rx_pkt_cx, rx_swcx);
|
||||||
} else {
|
} else {
|
||||||
OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
|
OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID,
|
||||||
"dma_txrx: Invalid function pointer\n",
|
"dma_txrx: Invalid function pointer\n",
|
||||||
0ULL);
|
0ULL);
|
||||||
return -1;
|
received = -1;
|
||||||
|
goto fail;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
#ifndef OSI_STRIPPED_LIB
|
||||||
osi_dma->dstats.q_rx_pkt_n[chan] =
|
osi_dma->dstats.q_rx_pkt_n[chan] =
|
||||||
osi_update_stats_counter(
|
osi_update_stats_counter(
|
||||||
osi_dma->dstats.q_rx_pkt_n[chan],
|
osi_dma->dstats.q_rx_pkt_n[chan],
|
||||||
1UL);
|
1UL);
|
||||||
osi_dma->dstats.rx_pkt_n =
|
osi_dma->dstats.rx_pkt_n =
|
||||||
osi_update_stats_counter(osi_dma->dstats.rx_pkt_n, 1UL);
|
osi_update_stats_counter(osi_dma->dstats.rx_pkt_n, 1UL);
|
||||||
|
#endif /* !OSI_STRIPPED_LIB */
|
||||||
received++;
|
received++;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifndef OSI_STRIPPED_LIB
|
||||||
/* If budget is done, check if HW ring still has unprocessed
|
/* If budget is done, check if HW ring still has unprocessed
|
||||||
* Rx packets, so that the OSD layer can decide to schedule
|
* Rx packets, so that the OSD layer can decide to schedule
|
||||||
* this function again.
|
* this function again.
|
||||||
@@ -304,10 +287,13 @@ nve32_t osi_process_rx_completions(struct osi_dma_priv_data *osi_dma,
|
|||||||
*more_data_avail = OSI_ENABLE;
|
*more_data_avail = OSI_ENABLE;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
#endif /* !OSI_STRIPPED_LIB */
|
||||||
|
|
||||||
|
fail:
|
||||||
return received;
|
return received;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifndef OSI_STRIPPED_LIB
|
||||||
/**
|
/**
|
||||||
* @brief inc_tx_pkt_stats - Increment Tx packet count Stats
|
* @brief inc_tx_pkt_stats - Increment Tx packet count Stats
|
||||||
*
|
*
|
||||||
@@ -437,7 +423,6 @@ static inline void get_tx_err_stats(struct osi_tx_desc *tx_desc,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifndef OSI_STRIPPED_LIB
|
|
||||||
nve32_t osi_clear_tx_pkt_err_stats(struct osi_dma_priv_data *osi_dma)
|
nve32_t osi_clear_tx_pkt_err_stats(struct osi_dma_priv_data *osi_dma)
|
||||||
{
|
{
|
||||||
nve32_t ret = -1;
|
nve32_t ret = -1;
|
||||||
@@ -509,23 +494,26 @@ static inline nve32_t validate_tx_completions_arg(
|
|||||||
nveu32_t chan,
|
nveu32_t chan,
|
||||||
struct osi_tx_ring **tx_ring)
|
struct osi_tx_ring **tx_ring)
|
||||||
{
|
{
|
||||||
struct dma_local *l_dma = (struct dma_local *)osi_dma;
|
const struct dma_local *const l_dma = (struct dma_local *)(void *)osi_dma;
|
||||||
|
nve32_t ret = 0;
|
||||||
|
|
||||||
if (osi_unlikely((osi_dma == OSI_NULL) ||
|
if (osi_unlikely((osi_dma == OSI_NULL) ||
|
||||||
(chan >= l_dma->max_chans))) {
|
(chan >= l_dma->num_max_chans))) {
|
||||||
return -1;
|
ret = -1;
|
||||||
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
*tx_ring = osi_dma->tx_ring[chan];
|
*tx_ring = osi_dma->tx_ring[chan];
|
||||||
|
|
||||||
if (osi_unlikely(*tx_ring == OSI_NULL)) {
|
if (osi_unlikely(*tx_ring == OSI_NULL)) {
|
||||||
OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
|
OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID,
|
||||||
"validate_tx_completions_arg: Invalid pointers\n",
|
"validate_tx_completions_arg: Invalid pointers\n",
|
||||||
0ULL);
|
0ULL);
|
||||||
return -1;
|
ret = -1;
|
||||||
|
goto fail;
|
||||||
}
|
}
|
||||||
|
fail:
|
||||||
return 0;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -538,15 +526,15 @@ static inline nve32_t validate_tx_completions_arg(
|
|||||||
* @retval 1 if condition is true
|
* @retval 1 if condition is true
|
||||||
* @retval 0 if condition is false.
|
* @retval 0 if condition is false.
|
||||||
*/
|
*/
|
||||||
static inline unsigned int is_ptp_twostep_or_slave_mode(unsigned int ptp_flag)
|
static inline nveu32_t is_ptp_twostep_or_slave_mode(nveu32_t ptp_flag)
|
||||||
{
|
{
|
||||||
return (((ptp_flag & OSI_PTP_SYNC_SLAVE) == OSI_PTP_SYNC_SLAVE) ||
|
return (((ptp_flag & OSI_PTP_SYNC_SLAVE) == OSI_PTP_SYNC_SLAVE) ||
|
||||||
((ptp_flag & OSI_PTP_SYNC_TWOSTEP) == OSI_PTP_SYNC_TWOSTEP)) ?
|
((ptp_flag & OSI_PTP_SYNC_TWOSTEP) == OSI_PTP_SYNC_TWOSTEP)) ?
|
||||||
OSI_ENABLE : OSI_DISABLE;
|
OSI_ENABLE : OSI_DISABLE;
|
||||||
}
|
}
|
||||||
|
|
||||||
int osi_process_tx_completions(struct osi_dma_priv_data *osi_dma,
|
nve32_t osi_process_tx_completions(struct osi_dma_priv_data *osi_dma,
|
||||||
unsigned int chan, int budget)
|
nveu32_t chan, nve32_t budget)
|
||||||
{
|
{
|
||||||
struct osi_tx_ring *tx_ring = OSI_NULL;
|
struct osi_tx_ring *tx_ring = OSI_NULL;
|
||||||
struct osi_txdone_pkt_cx *txdone_pkt_cx = OSI_NULL;
|
struct osi_txdone_pkt_cx *txdone_pkt_cx = OSI_NULL;
|
||||||
@@ -560,15 +548,17 @@ int osi_process_tx_completions(struct osi_dma_priv_data *osi_dma,
|
|||||||
|
|
||||||
ret = validate_tx_completions_arg(osi_dma, chan, &tx_ring);
|
ret = validate_tx_completions_arg(osi_dma, chan, &tx_ring);
|
||||||
if (osi_unlikely(ret < 0)) {
|
if (osi_unlikely(ret < 0)) {
|
||||||
return ret;
|
processed = -1;
|
||||||
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
txdone_pkt_cx = &tx_ring->txdone_pkt_cx;
|
txdone_pkt_cx = &tx_ring->txdone_pkt_cx;
|
||||||
entry = tx_ring->clean_idx;
|
entry = tx_ring->clean_idx;
|
||||||
|
|
||||||
|
#ifndef OSI_STRIPPED_LIB
|
||||||
osi_dma->dstats.tx_clean_n[chan] =
|
osi_dma->dstats.tx_clean_n[chan] =
|
||||||
osi_update_stats_counter(osi_dma->dstats.tx_clean_n[chan], 1U);
|
osi_update_stats_counter(osi_dma->dstats.tx_clean_n[chan], 1U);
|
||||||
|
#endif /* !OSI_STRIPPED_LIB */
|
||||||
while ((entry != tx_ring->cur_tx_idx) && (entry < osi_dma->tx_ring_sz) &&
|
while ((entry != tx_ring->cur_tx_idx) && (entry < osi_dma->tx_ring_sz) &&
|
||||||
(processed < budget)) {
|
(processed < budget)) {
|
||||||
osi_memset(txdone_pkt_cx, 0U, sizeof(*txdone_pkt_cx));
|
osi_memset(txdone_pkt_cx, 0U, sizeof(*txdone_pkt_cx));
|
||||||
@@ -592,11 +582,15 @@ int osi_process_tx_completions(struct osi_dma_priv_data *osi_dma,
|
|||||||
if (((tx_desc->tdes3 & TDES3_ES_BITS) != 0U) &&
|
if (((tx_desc->tdes3 & TDES3_ES_BITS) != 0U) &&
|
||||||
(osi_dma->mac != OSI_MAC_HW_MGBE)) {
|
(osi_dma->mac != OSI_MAC_HW_MGBE)) {
|
||||||
txdone_pkt_cx->flags |= OSI_TXDONE_CX_ERROR;
|
txdone_pkt_cx->flags |= OSI_TXDONE_CX_ERROR;
|
||||||
|
#ifndef OSI_STRIPPED_LIB
|
||||||
/* fill packet error stats */
|
/* fill packet error stats */
|
||||||
get_tx_err_stats(tx_desc,
|
get_tx_err_stats(tx_desc,
|
||||||
&osi_dma->pkt_err_stats);
|
&osi_dma->pkt_err_stats);
|
||||||
|
#endif /* !OSI_STRIPPED_LIB */
|
||||||
} else {
|
} else {
|
||||||
|
#ifndef OSI_STRIPPED_LIB
|
||||||
inc_tx_pkt_stats(osi_dma, chan);
|
inc_tx_pkt_stats(osi_dma, chan);
|
||||||
|
#endif /* !OSI_STRIPPED_LIB */
|
||||||
}
|
}
|
||||||
|
|
||||||
if (processed < INT_MAX) {
|
if (processed < INT_MAX) {
|
||||||
@@ -659,10 +653,11 @@ int osi_process_tx_completions(struct osi_dma_priv_data *osi_dma,
|
|||||||
tx_swcx,
|
tx_swcx,
|
||||||
txdone_pkt_cx);
|
txdone_pkt_cx);
|
||||||
} else {
|
} else {
|
||||||
OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
|
OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID,
|
||||||
"dma_txrx: Invalid function pointer\n",
|
"dma_txrx: Invalid function pointer\n",
|
||||||
0ULL);
|
0ULL);
|
||||||
return -1;
|
processed = -1;
|
||||||
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
tx_desc->tdes3 = 0;
|
tx_desc->tdes3 = 0;
|
||||||
@@ -674,6 +669,7 @@ int osi_process_tx_completions(struct osi_dma_priv_data *osi_dma,
|
|||||||
tx_swcx->buf_virt_addr = OSI_NULL;
|
tx_swcx->buf_virt_addr = OSI_NULL;
|
||||||
tx_swcx->buf_phy_addr = 0;
|
tx_swcx->buf_phy_addr = 0;
|
||||||
tx_swcx->flags = 0;
|
tx_swcx->flags = 0;
|
||||||
|
tx_swcx->data_idx = 0;
|
||||||
INCR_TX_DESC_INDEX(entry, osi_dma->tx_ring_sz);
|
INCR_TX_DESC_INDEX(entry, osi_dma->tx_ring_sz);
|
||||||
|
|
||||||
/* Don't wait to update tx_ring->clean-idx. It will
|
/* Don't wait to update tx_ring->clean-idx. It will
|
||||||
@@ -684,6 +680,7 @@ int osi_process_tx_completions(struct osi_dma_priv_data *osi_dma,
|
|||||||
tx_ring->clean_idx = entry;
|
tx_ring->clean_idx = entry;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fail:
|
||||||
return processed;
|
return processed;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -712,18 +709,17 @@ int osi_process_tx_completions(struct osi_dma_priv_data *osi_dma,
|
|||||||
* @retval 1 - cntx desc used.
|
* @retval 1 - cntx desc used.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static inline nve32_t need_cntx_desc(struct osi_tx_pkt_cx *tx_pkt_cx,
|
static inline nve32_t need_cntx_desc(const struct osi_tx_pkt_cx *const tx_pkt_cx,
|
||||||
struct osi_tx_swcx *tx_swcx,
|
struct osi_tx_swcx *tx_swcx,
|
||||||
struct osi_tx_desc *tx_desc,
|
struct osi_tx_desc *tx_desc,
|
||||||
unsigned int ptp_sync_flag,
|
nveu32_t ptp_sync_flag,
|
||||||
unsigned int mac)
|
nveu32_t mac)
|
||||||
{
|
{
|
||||||
nve32_t ret = 0;
|
nve32_t ret = 0;
|
||||||
|
|
||||||
if (((tx_pkt_cx->flags & OSI_PKT_CX_VLAN) == OSI_PKT_CX_VLAN) ||
|
if (((tx_pkt_cx->flags & OSI_PKT_CX_VLAN) == OSI_PKT_CX_VLAN) ||
|
||||||
((tx_pkt_cx->flags & OSI_PKT_CX_TSO) == OSI_PKT_CX_TSO) ||
|
((tx_pkt_cx->flags & OSI_PKT_CX_TSO) == OSI_PKT_CX_TSO) ||
|
||||||
((tx_pkt_cx->flags & OSI_PKT_CX_PTP) == OSI_PKT_CX_PTP)) {
|
((tx_pkt_cx->flags & OSI_PKT_CX_PTP) == OSI_PKT_CX_PTP)) {
|
||||||
|
|
||||||
if ((tx_pkt_cx->flags & OSI_PKT_CX_VLAN) == OSI_PKT_CX_VLAN) {
|
if ((tx_pkt_cx->flags & OSI_PKT_CX_VLAN) == OSI_PKT_CX_VLAN) {
|
||||||
/* Set context type */
|
/* Set context type */
|
||||||
tx_desc->tdes3 |= TDES3_CTXT;
|
tx_desc->tdes3 |= TDES3_CTXT;
|
||||||
@@ -750,13 +746,10 @@ static inline nve32_t need_cntx_desc(struct osi_tx_pkt_cx *tx_pkt_cx,
|
|||||||
|
|
||||||
/* This part of code must be at the end of function */
|
/* This part of code must be at the end of function */
|
||||||
if ((tx_pkt_cx->flags & OSI_PKT_CX_PTP) == OSI_PKT_CX_PTP) {
|
if ((tx_pkt_cx->flags & OSI_PKT_CX_PTP) == OSI_PKT_CX_PTP) {
|
||||||
if ((mac == OSI_MAC_HW_EQOS) &&
|
if (((mac == OSI_MAC_HW_EQOS) &&
|
||||||
((ptp_sync_flag & OSI_PTP_SYNC_TWOSTEP) ==
|
((ptp_sync_flag & OSI_PTP_SYNC_TWOSTEP) == OSI_PTP_SYNC_TWOSTEP))) {
|
||||||
OSI_PTP_SYNC_TWOSTEP)){
|
/* Doing nothing */
|
||||||
/* return the current ret value */
|
} else {
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Set context type */
|
/* Set context type */
|
||||||
tx_desc->tdes3 |= TDES3_CTXT;
|
tx_desc->tdes3 |= TDES3_CTXT;
|
||||||
/* in case of One-step sync */
|
/* in case of One-step sync */
|
||||||
@@ -770,6 +763,7 @@ static inline nve32_t need_cntx_desc(struct osi_tx_pkt_cx *tx_pkt_cx,
|
|||||||
ret = 1;
|
ret = 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@@ -784,7 +778,7 @@ static inline nve32_t need_cntx_desc(struct osi_tx_pkt_cx *tx_pkt_cx,
|
|||||||
* @retval 1 if condition is true
|
* @retval 1 if condition is true
|
||||||
* @retval 0 if condition is false.
|
* @retval 0 if condition is false.
|
||||||
*/
|
*/
|
||||||
static inline unsigned int is_ptp_onestep_and_master_mode(unsigned int ptp_flag)
|
static inline nveu32_t is_ptp_onestep_and_master_mode(nveu32_t ptp_flag)
|
||||||
{
|
{
|
||||||
return (((ptp_flag & OSI_PTP_SYNC_MASTER) == OSI_PTP_SYNC_MASTER) &&
|
return (((ptp_flag & OSI_PTP_SYNC_MASTER) == OSI_PTP_SYNC_MASTER) &&
|
||||||
((ptp_flag & OSI_PTP_SYNC_ONESTEP) == OSI_PTP_SYNC_ONESTEP)) ?
|
((ptp_flag & OSI_PTP_SYNC_ONESTEP) == OSI_PTP_SYNC_ONESTEP)) ?
|
||||||
@@ -813,11 +807,19 @@ static inline unsigned int is_ptp_onestep_and_master_mode(unsigned int ptp_flag)
|
|||||||
* @param[in, out] tx_desc: Pointer to transmit descriptor to be filled.
|
* @param[in, out] tx_desc: Pointer to transmit descriptor to be filled.
|
||||||
* @param[in] tx_swcx: Pointer to corresponding tx descriptor software context.
|
* @param[in] tx_swcx: Pointer to corresponding tx descriptor software context.
|
||||||
*/
|
*/
|
||||||
|
#ifndef OSI_STRIPPED_LIB
|
||||||
static inline void fill_first_desc(struct osi_tx_ring *tx_ring,
|
static inline void fill_first_desc(struct osi_tx_ring *tx_ring,
|
||||||
struct osi_tx_pkt_cx *tx_pkt_cx,
|
struct osi_tx_pkt_cx *tx_pkt_cx,
|
||||||
struct osi_tx_desc *tx_desc,
|
struct osi_tx_desc *tx_desc,
|
||||||
struct osi_tx_swcx *tx_swcx,
|
struct osi_tx_swcx *tx_swcx,
|
||||||
unsigned int ptp_flag)
|
nveu32_t ptp_flag)
|
||||||
|
#else
|
||||||
|
static inline void fill_first_desc(OSI_UNUSED struct osi_tx_ring *tx_ring,
|
||||||
|
struct osi_tx_pkt_cx *tx_pkt_cx,
|
||||||
|
struct osi_tx_desc *tx_desc,
|
||||||
|
struct osi_tx_swcx *tx_swcx,
|
||||||
|
nveu32_t ptp_flag)
|
||||||
|
#endif /* !OSI_STRIPPED_LIB */
|
||||||
{
|
{
|
||||||
tx_desc->tdes0 = L32(tx_swcx->buf_phy_addr);
|
tx_desc->tdes0 = L32(tx_swcx->buf_phy_addr);
|
||||||
tx_desc->tdes1 = H32(tx_swcx->buf_phy_addr);
|
tx_desc->tdes1 = H32(tx_swcx->buf_phy_addr);
|
||||||
@@ -876,6 +878,7 @@ static inline void fill_first_desc(struct osi_tx_ring *tx_ring,
|
|||||||
tx_desc->tdes3 &= ~TDES3_TPL_MASK;
|
tx_desc->tdes3 &= ~TDES3_TPL_MASK;
|
||||||
tx_desc->tdes3 |= tx_pkt_cx->payload_len;
|
tx_desc->tdes3 |= tx_pkt_cx->payload_len;
|
||||||
} else {
|
} else {
|
||||||
|
#ifndef OSI_STRIPPED_LIB
|
||||||
if ((tx_ring->slot_check == OSI_ENABLE) &&
|
if ((tx_ring->slot_check == OSI_ENABLE) &&
|
||||||
(tx_ring->slot_number < OSI_SLOT_NUM_MAX)) {
|
(tx_ring->slot_number < OSI_SLOT_NUM_MAX)) {
|
||||||
/* Fill Slot number */
|
/* Fill Slot number */
|
||||||
@@ -884,6 +887,7 @@ static inline void fill_first_desc(struct osi_tx_ring *tx_ring,
|
|||||||
tx_ring->slot_number = ((tx_ring->slot_number + 1U) %
|
tx_ring->slot_number = ((tx_ring->slot_number + 1U) %
|
||||||
OSI_SLOT_NUM_MAX);
|
OSI_SLOT_NUM_MAX);
|
||||||
}
|
}
|
||||||
|
#endif /* !OSI_STRIPPED_LIB */
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -921,55 +925,64 @@ static inline void dmb_oshst(void)
|
|||||||
* @retval 0 on success
|
* @retval 0 on success
|
||||||
* @retval -1 on failure.
|
* @retval -1 on failure.
|
||||||
*/
|
*/
|
||||||
static inline nve32_t validate_ctx(struct osi_dma_priv_data *osi_dma,
|
static inline nve32_t validate_ctx(const struct osi_dma_priv_data *const osi_dma,
|
||||||
struct osi_tx_pkt_cx *tx_pkt_cx)
|
const struct osi_tx_pkt_cx *const tx_pkt_cx)
|
||||||
{
|
{
|
||||||
|
nve32_t ret = 0;
|
||||||
|
|
||||||
|
(void) osi_dma;
|
||||||
if ((tx_pkt_cx->flags & OSI_PKT_CX_TSO) == OSI_PKT_CX_TSO) {
|
if ((tx_pkt_cx->flags & OSI_PKT_CX_TSO) == OSI_PKT_CX_TSO) {
|
||||||
if (osi_unlikely((tx_pkt_cx->tcp_udp_hdrlen /
|
if (osi_unlikely((tx_pkt_cx->tcp_udp_hdrlen /
|
||||||
OSI_TSO_HDR_LEN_DIVISOR) > TDES3_THL_MASK)) {
|
OSI_TSO_HDR_LEN_DIVISOR) > TDES3_THL_MASK)) {
|
||||||
OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
|
OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID,
|
||||||
"dma_txrx: Invalid TSO header len\n",
|
"dma_txrx: Invalid TSO header len\n",
|
||||||
(nveul64_t)tx_pkt_cx->tcp_udp_hdrlen);
|
(nveul64_t)tx_pkt_cx->tcp_udp_hdrlen);
|
||||||
|
ret = -1;
|
||||||
goto fail;
|
goto fail;
|
||||||
} else if (osi_unlikely(tx_pkt_cx->payload_len >
|
} else if (osi_unlikely(tx_pkt_cx->payload_len >
|
||||||
TDES3_TPL_MASK)) {
|
TDES3_TPL_MASK)) {
|
||||||
OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
|
OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID,
|
||||||
"dma_txrx: Invalid TSO payload len\n",
|
"dma_txrx: Invalid TSO payload len\n",
|
||||||
(nveul64_t)tx_pkt_cx->payload_len);
|
(nveul64_t)tx_pkt_cx->payload_len);
|
||||||
|
ret = -1;
|
||||||
goto fail;
|
goto fail;
|
||||||
} else if (osi_unlikely(tx_pkt_cx->mss > TDES2_MSS_MASK)) {
|
} else if (osi_unlikely(tx_pkt_cx->mss > TDES2_MSS_MASK)) {
|
||||||
OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
|
OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID,
|
||||||
"dma_txrx: Invalid MSS\n",
|
"dma_txrx: Invalid MSS\n",
|
||||||
(nveul64_t)tx_pkt_cx->mss);
|
(nveul64_t)tx_pkt_cx->mss);
|
||||||
|
ret = -1;
|
||||||
goto fail;
|
goto fail;
|
||||||
|
} else {
|
||||||
|
/* empty statement */
|
||||||
}
|
}
|
||||||
} else if ((tx_pkt_cx->flags & OSI_PKT_CX_LEN) == OSI_PKT_CX_LEN) {
|
} else if ((tx_pkt_cx->flags & OSI_PKT_CX_LEN) == OSI_PKT_CX_LEN) {
|
||||||
if (osi_unlikely(tx_pkt_cx->payload_len > TDES3_PL_MASK)) {
|
if (osi_unlikely(tx_pkt_cx->payload_len > TDES3_PL_MASK)) {
|
||||||
OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
|
OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID,
|
||||||
"dma_txrx: Invalid frame len\n",
|
"dma_txrx: Invalid frame len\n",
|
||||||
(nveul64_t)tx_pkt_cx->payload_len);
|
(nveul64_t)tx_pkt_cx->payload_len);
|
||||||
|
ret = -1;
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
/* empty statement */
|
||||||
}
|
}
|
||||||
|
|
||||||
if (osi_unlikely(tx_pkt_cx->vtag_id > TDES3_VT_MASK)) {
|
if (osi_unlikely(tx_pkt_cx->vtag_id > TDES3_VT_MASK)) {
|
||||||
OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
|
OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID,
|
||||||
"dma_txrx: Invalid VTAG_ID\n",
|
"dma_txrx: Invalid VTAG_ID\n",
|
||||||
(nveul64_t)tx_pkt_cx->vtag_id);
|
(nveul64_t)tx_pkt_cx->vtag_id);
|
||||||
goto fail;
|
ret = -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
|
||||||
fail:
|
fail:
|
||||||
return -1;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
nve32_t hw_transmit(struct osi_dma_priv_data *osi_dma,
|
nve32_t hw_transmit(struct osi_dma_priv_data *osi_dma,
|
||||||
struct osi_tx_ring *tx_ring,
|
struct osi_tx_ring *tx_ring,
|
||||||
struct dma_chan_ops *ops,
|
nveu32_t dma_chan)
|
||||||
nveu32_t chan)
|
|
||||||
{
|
{
|
||||||
struct dma_local *l_dma = (struct dma_local *)osi_dma;
|
struct dma_local *l_dma = (struct dma_local *)(void *)osi_dma;
|
||||||
struct osi_tx_pkt_cx *tx_pkt_cx = OSI_NULL;
|
struct osi_tx_pkt_cx *tx_pkt_cx = OSI_NULL;
|
||||||
struct osi_tx_desc *first_desc = OSI_NULL;
|
struct osi_tx_desc *first_desc = OSI_NULL;
|
||||||
struct osi_tx_desc *last_desc = OSI_NULL;
|
struct osi_tx_desc *last_desc = OSI_NULL;
|
||||||
@@ -980,18 +993,25 @@ nve32_t hw_transmit(struct osi_dma_priv_data *osi_dma,
|
|||||||
nveu32_t f_idx = tx_ring->cur_tx_idx;
|
nveu32_t f_idx = tx_ring->cur_tx_idx;
|
||||||
nveu32_t l_idx = 0;
|
nveu32_t l_idx = 0;
|
||||||
#endif /* OSI_DEBUG */
|
#endif /* OSI_DEBUG */
|
||||||
|
nveu32_t chan = dma_chan & 0xFU;
|
||||||
|
const nveu32_t tail_ptr_reg[2] = {
|
||||||
|
EQOS_DMA_CHX_TDTP(chan),
|
||||||
|
MGBE_DMA_CHX_TDTLP(chan)
|
||||||
|
};
|
||||||
nve32_t cntx_desc_consumed;
|
nve32_t cntx_desc_consumed;
|
||||||
nveu32_t pkt_id = 0x0U;
|
nveu32_t pkt_id = 0x0U;
|
||||||
nveu32_t desc_cnt = 0U;
|
nveu32_t desc_cnt = 0U;
|
||||||
nveu64_t tailptr;
|
nveu64_t tailptr;
|
||||||
nveu32_t entry = 0U;
|
nveu32_t entry = 0U;
|
||||||
|
nve32_t ret = 0;
|
||||||
nveu32_t i;
|
nveu32_t i;
|
||||||
|
|
||||||
entry = tx_ring->cur_tx_idx;
|
entry = tx_ring->cur_tx_idx;
|
||||||
if (entry >= osi_dma->tx_ring_sz) {
|
if (entry >= osi_dma->tx_ring_sz) {
|
||||||
OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
|
OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID,
|
||||||
"dma_txrx: Invalid cur_tx_idx\n", 0ULL);
|
"dma_txrx: Invalid cur_tx_idx\n", 0ULL);
|
||||||
return -1;
|
ret = -1;
|
||||||
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
tx_desc = tx_ring->tx_desc + entry;
|
tx_desc = tx_ring->tx_desc + entry;
|
||||||
@@ -1001,15 +1021,18 @@ nve32_t hw_transmit(struct osi_dma_priv_data *osi_dma,
|
|||||||
desc_cnt = tx_pkt_cx->desc_cnt;
|
desc_cnt = tx_pkt_cx->desc_cnt;
|
||||||
if (osi_unlikely(desc_cnt == 0U)) {
|
if (osi_unlikely(desc_cnt == 0U)) {
|
||||||
/* Will not hit this case */
|
/* Will not hit this case */
|
||||||
OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
|
OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID,
|
||||||
"dma_txrx: Invalid desc_cnt\n", 0ULL);
|
"dma_txrx: Invalid desc_cnt\n", 0ULL);
|
||||||
return -1;
|
ret = -1;
|
||||||
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (validate_ctx(osi_dma, tx_pkt_cx) < 0) {
|
if (validate_ctx(osi_dma, tx_pkt_cx) < 0) {
|
||||||
return -1;
|
ret = -1;
|
||||||
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifndef OSI_STRIPPED_LIB
|
||||||
/* Context descriptor for VLAN/TSO */
|
/* Context descriptor for VLAN/TSO */
|
||||||
if ((tx_pkt_cx->flags & OSI_PKT_CX_VLAN) == OSI_PKT_CX_VLAN) {
|
if ((tx_pkt_cx->flags & OSI_PKT_CX_VLAN) == OSI_PKT_CX_VLAN) {
|
||||||
osi_dma->dstats.tx_vlan_pkt_n =
|
osi_dma->dstats.tx_vlan_pkt_n =
|
||||||
@@ -1022,6 +1045,7 @@ nve32_t hw_transmit(struct osi_dma_priv_data *osi_dma,
|
|||||||
osi_update_stats_counter(osi_dma->dstats.tx_tso_pkt_n,
|
osi_update_stats_counter(osi_dma->dstats.tx_tso_pkt_n,
|
||||||
1UL);
|
1UL);
|
||||||
}
|
}
|
||||||
|
#endif /* !OSI_STRIPPED_LIB */
|
||||||
|
|
||||||
cntx_desc_consumed = need_cntx_desc(tx_pkt_cx, tx_swcx, tx_desc,
|
cntx_desc_consumed = need_cntx_desc(tx_pkt_cx, tx_swcx, tx_desc,
|
||||||
osi_dma->ptp_flag, osi_dma->mac);
|
osi_dma->ptp_flag, osi_dma->mac);
|
||||||
@@ -1124,7 +1148,9 @@ nve32_t hw_transmit(struct osi_dma_priv_data *osi_dma,
|
|||||||
* We need to make sure Tx descriptor updated above is really updated
|
* We need to make sure Tx descriptor updated above is really updated
|
||||||
* before setting up the DMA, hence add memory write barrier here.
|
* before setting up the DMA, hence add memory write barrier here.
|
||||||
*/
|
*/
|
||||||
|
if (tx_ring->skip_dmb == 0U) {
|
||||||
dmb_oshst();
|
dmb_oshst();
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef OSI_DEBUG
|
#ifdef OSI_DEBUG
|
||||||
if (osi_dma->enable_desc_dump == 1U) {
|
if (osi_dma->enable_desc_dump == 1U) {
|
||||||
@@ -1138,9 +1164,10 @@ nve32_t hw_transmit(struct osi_dma_priv_data *osi_dma,
|
|||||||
(entry * sizeof(struct osi_tx_desc));
|
(entry * sizeof(struct osi_tx_desc));
|
||||||
if (osi_unlikely(tailptr < tx_ring->tx_desc_phy_addr)) {
|
if (osi_unlikely(tailptr < tx_ring->tx_desc_phy_addr)) {
|
||||||
/* Will not hit this case */
|
/* Will not hit this case */
|
||||||
OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
|
OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID,
|
||||||
"dma_txrx: Invalid tx_desc_phy_addr\n", 0ULL);
|
"dma_txrx: Invalid tx_desc_phy_addr\n", 0ULL);
|
||||||
return -1;
|
ret = -1;
|
||||||
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -1149,9 +1176,11 @@ nve32_t hw_transmit(struct osi_dma_priv_data *osi_dma,
|
|||||||
*/
|
*/
|
||||||
tx_ring->cur_tx_idx = entry;
|
tx_ring->cur_tx_idx = entry;
|
||||||
|
|
||||||
ops->update_tx_tailptr(osi_dma->base, chan, tailptr);
|
/* Update the Tx tail pointer */
|
||||||
|
osi_writel(L32(tailptr), (nveu8_t *)osi_dma->base + tail_ptr_reg[osi_dma->mac]);
|
||||||
|
|
||||||
return 0;
|
fail:
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -1176,22 +1205,37 @@ nve32_t hw_transmit(struct osi_dma_priv_data *osi_dma,
|
|||||||
* @retval 0 on success
|
* @retval 0 on success
|
||||||
* @retval -1 on failure.
|
* @retval -1 on failure.
|
||||||
*/
|
*/
|
||||||
static nve32_t rx_dma_desc_initialization(struct osi_dma_priv_data *osi_dma,
|
static nve32_t rx_dma_desc_initialization(const struct osi_dma_priv_data *const osi_dma,
|
||||||
nveu32_t chan,
|
nveu32_t dma_chan)
|
||||||
struct dma_chan_ops *ops)
|
|
||||||
{
|
{
|
||||||
|
nveu32_t chan = dma_chan & 0xFU;
|
||||||
|
const nveu32_t start_addr_high_reg[2] = {
|
||||||
|
EQOS_DMA_CHX_RDLH(chan),
|
||||||
|
MGBE_DMA_CHX_RDLH(chan)
|
||||||
|
};
|
||||||
|
const nveu32_t start_addr_low_reg[2] = {
|
||||||
|
EQOS_DMA_CHX_RDLA(chan),
|
||||||
|
MGBE_DMA_CHX_RDLA(chan)
|
||||||
|
};
|
||||||
|
const nveu32_t ring_len_reg[2] = {
|
||||||
|
EQOS_DMA_CHX_RDRL(chan),
|
||||||
|
MGBE_DMA_CHX_RX_CNTRL2(chan)
|
||||||
|
};
|
||||||
|
const nveu32_t mask[2] = { 0x3FFU, 0x3FFFU };
|
||||||
struct osi_rx_ring *rx_ring = OSI_NULL;
|
struct osi_rx_ring *rx_ring = OSI_NULL;
|
||||||
struct osi_rx_desc *rx_desc = OSI_NULL;
|
struct osi_rx_desc *rx_desc = OSI_NULL;
|
||||||
struct osi_rx_swcx *rx_swcx = OSI_NULL;
|
struct osi_rx_swcx *rx_swcx = OSI_NULL;
|
||||||
nveu64_t tailptr = 0;
|
nveu64_t tailptr = 0;
|
||||||
nveu32_t i;
|
|
||||||
nve32_t ret = 0;
|
nve32_t ret = 0;
|
||||||
|
nveu32_t val;
|
||||||
|
nveu32_t i;
|
||||||
|
|
||||||
rx_ring = osi_dma->rx_ring[chan];
|
rx_ring = osi_dma->rx_ring[chan];
|
||||||
if (osi_unlikely(rx_ring == OSI_NULL)) {
|
if (osi_unlikely(rx_ring == OSI_NULL)) {
|
||||||
OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
|
OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID,
|
||||||
"dma_txrx: Invalid argument\n", 0ULL);
|
"dma_txrx: Invalid argument\n", 0ULL);
|
||||||
return -1;
|
ret = -1;
|
||||||
|
goto fail;
|
||||||
};
|
};
|
||||||
|
|
||||||
rx_ring->cur_rx_idx = 0;
|
rx_ring->cur_rx_idx = 0;
|
||||||
@@ -1239,16 +1283,26 @@ static nve32_t rx_dma_desc_initialization(struct osi_dma_priv_data *osi_dma,
|
|||||||
|
|
||||||
if (osi_unlikely((tailptr < rx_ring->rx_desc_phy_addr))) {
|
if (osi_unlikely((tailptr < rx_ring->rx_desc_phy_addr))) {
|
||||||
/* Will not hit this case */
|
/* Will not hit this case */
|
||||||
OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
|
OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID,
|
||||||
"dma_txrx: Invalid phys address\n", 0ULL);
|
"dma_txrx: Invalid phys address\n", 0ULL);
|
||||||
return -1;
|
ret = -1;
|
||||||
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
ops->set_rx_ring_len(osi_dma, chan, (osi_dma->rx_ring_sz - 1U));
|
/* Update the HW DMA ring length */
|
||||||
ops->update_rx_tailptr(osi_dma->base, chan, tailptr);
|
val = osi_readl((nveu8_t *)osi_dma->base + ring_len_reg[osi_dma->mac]);
|
||||||
ops->set_rx_ring_start_addr(osi_dma->base, chan,
|
val |= (osi_dma->rx_ring_sz - 1U) & mask[osi_dma->mac];
|
||||||
rx_ring->rx_desc_phy_addr);
|
osi_writel(val, (nveu8_t *)osi_dma->base + ring_len_reg[osi_dma->mac]);
|
||||||
|
|
||||||
|
update_rx_tail_ptr(osi_dma, chan, tailptr);
|
||||||
|
|
||||||
|
/* Program Ring start address */
|
||||||
|
osi_writel(H32(rx_ring->rx_desc_phy_addr),
|
||||||
|
(nveu8_t *)osi_dma->base + start_addr_high_reg[osi_dma->mac]);
|
||||||
|
osi_writel(L32(rx_ring->rx_desc_phy_addr),
|
||||||
|
(nveu8_t *)osi_dma->base + start_addr_low_reg[osi_dma->mac]);
|
||||||
|
|
||||||
|
fail:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1273,25 +1327,58 @@ static nve32_t rx_dma_desc_initialization(struct osi_dma_priv_data *osi_dma,
|
|||||||
* @retval 0 on success
|
* @retval 0 on success
|
||||||
* @retval -1 on failure.
|
* @retval -1 on failure.
|
||||||
*/
|
*/
|
||||||
static nve32_t rx_dma_desc_init(struct osi_dma_priv_data *osi_dma,
|
static nve32_t rx_dma_desc_init(struct osi_dma_priv_data *osi_dma)
|
||||||
struct dma_chan_ops *ops)
|
|
||||||
{
|
{
|
||||||
nveu32_t chan = 0;
|
nveu32_t chan = 0;
|
||||||
nveu32_t i;
|
|
||||||
nve32_t ret = 0;
|
nve32_t ret = 0;
|
||||||
|
nveu32_t i;
|
||||||
|
|
||||||
for (i = 0; i < osi_dma->num_dma_chans; i++) {
|
for (i = 0; i < osi_dma->num_dma_chans; i++) {
|
||||||
chan = osi_dma->dma_chans[i];
|
chan = osi_dma->dma_chans[i];
|
||||||
|
|
||||||
ret = rx_dma_desc_initialization(osi_dma, chan, ops);
|
ret = rx_dma_desc_initialization(osi_dma, chan);
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
return ret;
|
goto fail;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fail:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void set_tx_ring_len_and_start_addr(const struct osi_dma_priv_data *const osi_dma,
|
||||||
|
nveu64_t tx_desc_phy_addr,
|
||||||
|
nveu32_t dma_chan,
|
||||||
|
nveu32_t len)
|
||||||
|
{
|
||||||
|
nveu32_t chan = dma_chan & 0xFU;
|
||||||
|
const nveu32_t ring_len_reg[2] = {
|
||||||
|
EQOS_DMA_CHX_TDRL(chan),
|
||||||
|
MGBE_DMA_CHX_TX_CNTRL2(chan)
|
||||||
|
};
|
||||||
|
const nveu32_t start_addr_high_reg[2] = {
|
||||||
|
EQOS_DMA_CHX_TDLH(chan),
|
||||||
|
MGBE_DMA_CHX_TDLH(chan)
|
||||||
|
};
|
||||||
|
const nveu32_t start_addr_low_reg[2] = {
|
||||||
|
EQOS_DMA_CHX_TDLA(chan),
|
||||||
|
MGBE_DMA_CHX_TDLA(chan)
|
||||||
|
};
|
||||||
|
const nveu32_t mask[2] = { 0x3FFU, 0x3FFFU };
|
||||||
|
nveu32_t val;
|
||||||
|
|
||||||
|
/* Program ring length */
|
||||||
|
val = osi_readl((nveu8_t *)osi_dma->base + ring_len_reg[osi_dma->mac]);
|
||||||
|
val |= len & mask[osi_dma->mac];
|
||||||
|
osi_writel(val, (nveu8_t *)osi_dma->base + ring_len_reg[osi_dma->mac]);
|
||||||
|
|
||||||
|
/* Program tx ring start address */
|
||||||
|
osi_writel(H32(tx_desc_phy_addr),
|
||||||
|
(nveu8_t *)osi_dma->base + start_addr_high_reg[osi_dma->mac]);
|
||||||
|
osi_writel(L32(tx_desc_phy_addr),
|
||||||
|
(nveu8_t *)osi_dma->base + start_addr_low_reg[osi_dma->mac]);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief tx_dma_desc_init - Initialize DMA Transmit descriptors.
|
* @brief tx_dma_desc_init - Initialize DMA Transmit descriptors.
|
||||||
*
|
*
|
||||||
@@ -1312,13 +1399,13 @@ static nve32_t rx_dma_desc_init(struct osi_dma_priv_data *osi_dma,
|
|||||||
* @retval 0 on success
|
* @retval 0 on success
|
||||||
* @retval -1 on failure.
|
* @retval -1 on failure.
|
||||||
*/
|
*/
|
||||||
static nve32_t tx_dma_desc_init(struct osi_dma_priv_data *osi_dma,
|
static nve32_t tx_dma_desc_init(const struct osi_dma_priv_data *const osi_dma)
|
||||||
struct dma_chan_ops *ops)
|
|
||||||
{
|
{
|
||||||
struct osi_tx_ring *tx_ring = OSI_NULL;
|
struct osi_tx_ring *tx_ring = OSI_NULL;
|
||||||
struct osi_tx_desc *tx_desc = OSI_NULL;
|
struct osi_tx_desc *tx_desc = OSI_NULL;
|
||||||
struct osi_tx_swcx *tx_swcx = OSI_NULL;
|
struct osi_tx_swcx *tx_swcx = OSI_NULL;
|
||||||
nveu32_t chan = 0;
|
nveu32_t chan = 0;
|
||||||
|
nve32_t ret = 0;
|
||||||
nveu32_t i, j;
|
nveu32_t i, j;
|
||||||
|
|
||||||
for (i = 0; i < osi_dma->num_dma_chans; i++) {
|
for (i = 0; i < osi_dma->num_dma_chans; i++) {
|
||||||
@@ -1326,9 +1413,10 @@ static nve32_t tx_dma_desc_init(struct osi_dma_priv_data *osi_dma,
|
|||||||
|
|
||||||
tx_ring = osi_dma->tx_ring[chan];
|
tx_ring = osi_dma->tx_ring[chan];
|
||||||
if (osi_unlikely(tx_ring == OSI_NULL)) {
|
if (osi_unlikely(tx_ring == OSI_NULL)) {
|
||||||
OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
|
OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID,
|
||||||
"dma_txrx: Invalid pointers\n", 0ULL);
|
"dma_txrx: Invalid pointers\n", 0ULL);
|
||||||
return -1;
|
ret = -1;
|
||||||
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (j = 0; j < osi_dma->tx_ring_sz; j++) {
|
for (j = 0; j < osi_dma->tx_ring_sz; j++) {
|
||||||
@@ -1349,46 +1437,47 @@ static nve32_t tx_dma_desc_init(struct osi_dma_priv_data *osi_dma,
|
|||||||
tx_ring->cur_tx_idx = 0;
|
tx_ring->cur_tx_idx = 0;
|
||||||
tx_ring->clean_idx = 0;
|
tx_ring->clean_idx = 0;
|
||||||
|
|
||||||
|
#ifndef OSI_STRIPPED_LIB
|
||||||
/* Slot function parameter initialization */
|
/* Slot function parameter initialization */
|
||||||
tx_ring->slot_number = 0U;
|
tx_ring->slot_number = 0U;
|
||||||
tx_ring->slot_check = OSI_DISABLE;
|
tx_ring->slot_check = OSI_DISABLE;
|
||||||
|
#endif /* !OSI_STRIPPED_LIB */
|
||||||
|
|
||||||
ops->set_tx_ring_len(osi_dma, chan,
|
set_tx_ring_len_and_start_addr(osi_dma, tx_ring->tx_desc_phy_addr,
|
||||||
(osi_dma->tx_ring_sz - 1U));
|
chan, (osi_dma->tx_ring_sz - 1U));
|
||||||
ops->set_tx_ring_start_addr(osi_dma->base, chan,
|
|
||||||
tx_ring->tx_desc_phy_addr);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
fail:
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
nve32_t dma_desc_init(struct osi_dma_priv_data *osi_dma,
|
nve32_t dma_desc_init(struct osi_dma_priv_data *osi_dma)
|
||||||
struct dma_chan_ops *ops)
|
|
||||||
{
|
{
|
||||||
nve32_t ret = 0;
|
nve32_t ret = 0;
|
||||||
|
|
||||||
ret = tx_dma_desc_init(osi_dma, ops);
|
ret = tx_dma_desc_init(osi_dma);
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
return ret;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = rx_dma_desc_init(osi_dma, ops);
|
ret = rx_dma_desc_init(osi_dma);
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
return ret;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fail:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
nve32_t init_desc_ops(struct osi_dma_priv_data *osi_dma)
|
nve32_t init_desc_ops(const struct osi_dma_priv_data *const osi_dma)
|
||||||
{
|
{
|
||||||
typedef void (*desc_ops_arr)(struct desc_ops *);
|
typedef void (*desc_ops_arr)(struct desc_ops *p_ops);
|
||||||
|
|
||||||
desc_ops_arr desc_ops[2] = {
|
const desc_ops_arr desc_ops_a[2] = {
|
||||||
eqos_init_desc_ops, mgbe_init_desc_ops
|
eqos_init_desc_ops, mgbe_init_desc_ops
|
||||||
};
|
};
|
||||||
|
|
||||||
desc_ops[osi_dma->mac](&d_ops[osi_dma->mac]);
|
desc_ops_a[osi_dma->mac](&d_ops[osi_dma->mac]);
|
||||||
|
|
||||||
/* TODO: validate function pointers */
|
/* TODO: validate function pointers */
|
||||||
return 0;
|
return 0;
|
||||||
|
|||||||
@@ -20,30 +20,19 @@
|
|||||||
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||||
# DEALINGS IN THE SOFTWARE.
|
# DEALINGS IN THE SOFTWARE.
|
||||||
#
|
#
|
||||||
# libnvethernetrm interface export
|
# libnvethernetcl interface makefile fragment
|
||||||
#
|
#
|
||||||
###############################################################################
|
###############################################################################
|
||||||
osi_init_core_ops
|
|
||||||
osi_write_phy_reg
|
ifdef NV_INTERFACE_FLAG_STATIC_LIBRARY_SECTION
|
||||||
osi_read_phy_reg
|
NV_INTERFACE_NAME := nvethernetcl
|
||||||
osi_hw_core_init
|
NV_INTERFACE_COMPONENT_DIR := .
|
||||||
osi_hw_core_deinit
|
NV_INTERFACE_PUBLIC_INCLUDES := \
|
||||||
osi_get_core
|
./include
|
||||||
osi_handle_ioctl
|
endif
|
||||||
#Below need to be enabled when MACSEC is enabled
|
|
||||||
#osi_macsec_en
|
# Local Variables:
|
||||||
#osi_macsec_deinit
|
# indent-tabs-mode: t
|
||||||
#osi_macsec_ns_isr
|
# tab-width: 8
|
||||||
#osi_macsec_s_isr
|
# End:
|
||||||
#osi_macsec_init
|
# vi: set tabstop=8 noexpandtab:
|
||||||
#osi_macsec_cipher_config
|
|
||||||
#osi_macsec_config
|
|
||||||
#osi_init_macsec_ops
|
|
||||||
#osi_macsec_config_lut
|
|
||||||
#osi_macsec_loopback
|
|
||||||
#osi_macsec_read_mmc
|
|
||||||
#osi_macsec_config_dbg_buf
|
|
||||||
#osi_macsec_dbg_events_config
|
|
||||||
#osi_macsec_config_kt
|
|
||||||
#osi_macsec_get_sc_lut_key_index
|
|
||||||
#osi_macsec_update_mtu
|
|
||||||
54
osi/dma/staticlib/Makefile.tmk
Normal file
54
osi/dma/staticlib/Makefile.tmk
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
################################### tell Emacs this is a -*- makefile-gmake -*-
|
||||||
|
#
|
||||||
|
# Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
|
||||||
|
#
|
||||||
|
# Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
|
# copy of this software and associated documentation files (the "Software"),
|
||||||
|
# to deal in the Software without restriction, including without limitation
|
||||||
|
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||||
|
# and/or sell copies of the Software, and to permit persons to whom the
|
||||||
|
# Software is furnished to do so, subject to the following conditions:
|
||||||
|
#
|
||||||
|
# The above copyright notice and this permission notice shall be included in
|
||||||
|
# all copies or substantial portions of the Software.
|
||||||
|
#
|
||||||
|
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||||
|
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||||
|
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||||
|
# DEALINGS IN THE SOFTWARE.
|
||||||
|
#
|
||||||
|
###############################################################################
|
||||||
|
|
||||||
|
ifdef NV_COMPONENT_FLAG_STATIC_LIBRARY_SECTION
|
||||||
|
include $(NV_BUILD_START_COMPONENT)
|
||||||
|
|
||||||
|
NV_COMPONENT_STRICT_WARNINGS_qnx_64 := 1
|
||||||
|
|
||||||
|
NV_COMPONENT_NAME := nvethernetcl
|
||||||
|
NV_COMPONENT_OWN_INTERFACE_DIR := .
|
||||||
|
NV_COMPONENT_SOURCES := \
|
||||||
|
$(NV_SOURCE)/nvethernetrm/osi/dma/eqos_dma.c \
|
||||||
|
$(NV_SOURCE)/nvethernetrm/osi/dma/osi_dma.c \
|
||||||
|
$(NV_SOURCE)/nvethernetrm/osi/dma/osi_dma_txrx.c \
|
||||||
|
$(NV_SOURCE)/nvethernetrm/osi/dma/mgbe_dma.c \
|
||||||
|
$(NV_SOURCE)/nvethernetrm/osi/dma/eqos_desc.c \
|
||||||
|
$(NV_SOURCE)/nvethernetrm/osi/dma/mgbe_desc.c \
|
||||||
|
$(NV_SOURCE)/nvethernetrm/osi/dma/debug.c \
|
||||||
|
$(NV_SOURCE)/nvethernetrm/osi/common/osi_common.c \
|
||||||
|
$(NV_SOURCE)/nvethernetrm/osi/common/eqos_common.c \
|
||||||
|
$(NV_SOURCE)/nvethernetrm/osi/common/mgbe_common.c
|
||||||
|
|
||||||
|
NV_COMPONENT_INCLUDES := \
|
||||||
|
$(NV_SOURCE)/nvethernetrm/include \
|
||||||
|
$(NV_SOURCE)/nvethernetrm/osi/common/include
|
||||||
|
|
||||||
|
ifeq ($(NV_BUILD_CONFIGURATION_IS_SAFETY),0)
|
||||||
|
NV_COMPONENT_CFLAGS += -DOSI_DEBUG
|
||||||
|
else
|
||||||
|
NV_COMPONENT_CFLAGS += -DOSI_STRIPPED_LIB
|
||||||
|
endif
|
||||||
|
include $(NV_BUILD_STATIC_LIBRARY)
|
||||||
|
endif
|
||||||
Reference in New Issue
Block a user