Delete nvethernetrm to move the history from dev-main to rel-35

Bug 3918941

Change-Id: Iddf8b0f6d343490407dfafc8914dca01b4b84b4b
This commit is contained in:
Bhadram Varka
2023-02-13 19:42:06 +05:30
parent c7aff594fc
commit 413eeb0b04
64 changed files with 0 additions and 51595 deletions

View File

@@ -1,34 +0,0 @@
################################### tell Emacs this is a -*- makefile-gmake -*-
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# Repository umbrella makefile fragment for nvethernetrm
###############################################################################
NV_REPOSITORY_COMPONENTS := \
osi/core \
osi/dma
# Local Variables:
# indent-tabs-mode: t
# tab-width: 8
# End:
# vi: set tabstop=8 noexpandtab:

View File

@@ -1,19 +0,0 @@
Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

View File

@@ -1,199 +0,0 @@
/*
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef IVC_CORE_H
#define IVC_CORE_H
#include <osi_macsec.h>
/**
* @brief Ethernet Maximum IVC BUF
*/
#define ETHER_MAX_IVC_BUF 2048U
/**
* @brief IVC maximum arguments
*/
#define MAX_ARGS 10
/**
* @brief IVC commands between OSD & OSI.
*/
typedef enum ivc_cmd {
core_init = 1,
core_deinit,
write_phy_reg,
read_phy_reg,
handle_ioctl,
init_macsec,
deinit_macsec,
handle_ns_irq_macsec,
handle_s_irq_macsec,
lut_config_macsec,
kt_config_macsec,
cipher_config,
loopback_config_macsec,
en_macsec,
config_macsec,
read_mmc_macsec,
dbg_buf_config_macsec,
dbg_events_config_macsec,
macsec_get_sc_lut_key_index,
macsec_update_mtu_size,
}ivc_cmd;
/**
* @brief IVC arguments structure.
*/
typedef struct ivc_args {
/** Number of arguments */
nveu32_t count;
/** arguments */
nveu32_t arguments[MAX_ARGS];
} ivc_args;
/**
* @brief IVC core argument structure.
*/
typedef struct ivc_core_args {
/** Number of MTL queues enabled in MAC */
nveu32_t num_mtl_queues;
/** Array of MTL queues */
nveu32_t mtl_queues[OSI_EQOS_MAX_NUM_CHANS];
/** List of MTL Rx queue mode that need to be enabled */
nveu32_t rxq_ctrl[OSI_EQOS_MAX_NUM_CHANS];
/** Rx MTl Queue mapping based on User Priority field */
nveu32_t rxq_prio[OSI_EQOS_MAX_NUM_CHANS];
/** Ethernet MAC address */
nveu8_t mac_addr[OSI_ETH_ALEN];
/** Tegra Pre-si platform info */
nveu32_t pre_si;
/** VLAN tag stripping enable(1) or disable(0) */
nveu32_t strip_vlan_tag;
/** pause frame support */
nveu32_t pause_frames;
/** Current flow control settings */
nveu32_t flow_ctrl;
/** Rx fifo size */
nveu32_t rx_fifo_size;
/** Tx fifo size */
nveu32_t tx_fifo_size;
} ivc_core_args;
/**
* @brief macsec config structure.
*/
#ifdef MACSEC_SUPPORT
typedef struct macsec_config {
/** MACsec secure channel basic information */
struct osi_macsec_sc_info sc_info;
/** MACsec enable or disable */
unsigned int enable;
/** MACsec controller */
unsigned short ctlr;
/** MACsec KT index */
unsigned short kt_idx;
/** MACsec KT index */
nveu32_t key_index;
/** MACsec SCI */
nveu8_t sci[OSI_SCI_LEN];
} macsec_config;
#endif
/**
* @brief IVC message structure.
*/
typedef struct ivc_msg_common {
/**
* Status code returned as part of response message of IVC messages.
* Status code value is "0" for success and "< 0" for failure.
*/
nve32_t status;
/** ID of the CMD. */
ivc_cmd cmd;
/** message count, used for debug */
nveu32_t count;
union {
/** IVC argument structure */
ivc_args args;
#ifndef OSI_STRIPPED_LIB
/** avb algorithm structure */
struct osi_core_avb_algorithm avb_algo;
#endif
/** OSI filter structure */
struct osi_filter filter;
/** OSI HW features */
struct osi_hw_features hw_feat;
/** MMC counters */
struct osi_mmc_counters mmc;
/** core argument structure */
ivc_core_args init_args;
/** ioctl command structure */
struct osi_ioctl ioctl_data;
#ifdef MACSEC_SUPPORT
/** lut config */
struct osi_macsec_lut_config lut_config;
#ifdef MACSEC_KEY_PROGRAM
/** kt config */
struct osi_macsec_kt_config kt_config;
#endif
/** MACsec Debug buffer data structure */
struct osi_macsec_dbg_buf_config dbg_buf_config;
/** MACsec config */
macsec_config macsec_cfg;
/** macsec mmc counters */
struct osi_macsec_mmc_counters macsec_mmc;
/** macsec IRQ stats */
struct osi_macsec_irq_stats macsec_irq_stats;
#endif
}data;
} ivc_msg_common_t;
/**
* @brief osd_ivc_send_cmd - OSD ivc send cmd
*
* @param[in] priv: OSD private data
* @param[in] ivc_buf: ivc_msg_common structure
* @param[in] len: length of data
* @note
* API Group:
* - Initialization: Yes
* - Run time: Yes
* - De-initialization: Yes
*
* @retval ivc status
* @retval -1 on failure
*/
nve32_t osd_ivc_send_cmd(void *priv, ivc_msg_common_t *ivc_buf,
nveu32_t len);
/**
* @brief ivc_get_core_safety_config - Get core safety config
*
* API Group:
* - Initialization: Yes
* - Run time: Yes
* - De-initialization: Yes
*/
void *ivc_get_core_safety_config(void);
#endif /* IVC_CORE_H */

View File

@@ -1,634 +0,0 @@
/*
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef INCLUDED_MMC_H
#define INCLUDED_MMC_H
#include "../osi/common/type.h"
#include "osi_common.h"
/**
* @brief osi_mmc_counters - The structure to hold RMON counter values
*/
struct osi_mmc_counters {
/** This counter provides the number of bytes transmitted, exclusive of
* preamble and retried bytes, in good and bad packets */
nveu64_t mmc_tx_octetcount_gb;
/** This counter provides upper 32 bits of transmitted octet count */
nveu64_t mmc_tx_octetcount_gb_h;
/** This counter provides the number of good and
* bad packets transmitted, exclusive of retried packets */
nveu64_t mmc_tx_framecount_gb;
/** This counter provides upper 32 bits of transmitted good and bad
* packets count */
nveu64_t mmc_tx_framecount_gb_h;
/** This counter provides number of good broadcast
* packets transmitted */
nveu64_t mmc_tx_broadcastframe_g;
/** This counter provides upper 32 bits of transmitted good broadcast
* packets count */
nveu64_t mmc_tx_broadcastframe_g_h;
/** This counter provides number of good multicast
* packets transmitted */
nveu64_t mmc_tx_multicastframe_g;
/** This counter provides upper 32 bits of transmitted good multicast
* packet count */
nveu64_t mmc_tx_multicastframe_g_h;
/** This counter provides the number of good and bad packets
* transmitted with length 64 bytes, exclusive of preamble and
* retried packets */
nveu64_t mmc_tx_64_octets_gb;
/** This counter provides upper 32 bits of transmitted 64 octet size
* good and bad packets count */
nveu64_t mmc_tx_64_octets_gb_h;
/** This counter provides the number of good and bad packets
* transmitted with length 65-127 bytes, exclusive of preamble and
* retried packets */
nveu64_t mmc_tx_65_to_127_octets_gb;
/** Provides upper 32 bits of transmitted 65-to-127 octet size good and
* bad packets count */
nveu64_t mmc_tx_65_to_127_octets_gb_h;
/** This counter provides the number of good and bad packets
* transmitted with length 128-255 bytes, exclusive of preamble and
* retried packets */
nveu64_t mmc_tx_128_to_255_octets_gb;
/** This counter provides upper 32 bits of transmitted 128-to-255
* octet size good and bad packets count */
nveu64_t mmc_tx_128_to_255_octets_gb_h;
/** This counter provides the number of good and bad packets
* transmitted with length 256-511 bytes, exclusive of preamble and
* retried packets */
nveu64_t mmc_tx_256_to_511_octets_gb;
/** This counter provides upper 32 bits of transmitted 256-to-511
* octet size good and bad packets count. */
nveu64_t mmc_tx_256_to_511_octets_gb_h;
/** This counter provides the number of good and bad packets
* transmitted with length 512-1023 bytes, exclusive of preamble and
* retried packets */
nveu64_t mmc_tx_512_to_1023_octets_gb;
/** This counter provides upper 32 bits of transmitted 512-to-1023
* octet size good and bad packets count.*/
nveu64_t mmc_tx_512_to_1023_octets_gb_h;
/** This counter provides the number of good and bad packets
* transmitted with length 1024-max bytes, exclusive of preamble and
* retried packets */
nveu64_t mmc_tx_1024_to_max_octets_gb;
/** This counter provides upper 32 bits of transmitted 1024-tomaxsize
* octet size good and bad packets count. */
nveu64_t mmc_tx_1024_to_max_octets_gb_h;
/** This counter provides the number of good and bad unicast packets */
nveu64_t mmc_tx_unicast_gb;
/** This counter provides upper 32 bits of transmitted good bad
* unicast packets count */
nveu64_t mmc_tx_unicast_gb_h;
/** This counter provides the number of good and bad
* multicast packets */
nveu64_t mmc_tx_multicast_gb;
/** This counter provides upper 32 bits of transmitted good bad
* multicast packets count */
nveu64_t mmc_tx_multicast_gb_h;
/** This counter provides the number of good and bad
* broadcast packets */
nveu64_t mmc_tx_broadcast_gb;
/** This counter provides upper 32 bits of transmitted good bad
* broadcast packets count */
nveu64_t mmc_tx_broadcast_gb_h;
/** This counter provides the number of abort packets due to
* underflow error */
nveu64_t mmc_tx_underflow_error;
/** This counter provides upper 32 bits of abort packets due to
* underflow error */
nveu64_t mmc_tx_underflow_error_h;
/** This counter provides the number of successfully transmitted
* packets after a single collision in the half-duplex mode */
nveu64_t mmc_tx_singlecol_g;
/** This counter provides the number of successfully transmitted
* packets after a multi collision in the half-duplex mode */
nveu64_t mmc_tx_multicol_g;
/** This counter provides the number of successfully transmitted
* after a deferral in the half-duplex mode */
nveu64_t mmc_tx_deferred;
/** This counter provides the number of packets aborted because of
* late collision error */
nveu64_t mmc_tx_latecol;
/** This counter provides the number of packets aborted because of
* excessive (16) collision errors */
nveu64_t mmc_tx_exesscol;
/** This counter provides the number of packets aborted because of
* carrier sense error (no carrier or loss of carrier) */
nveu64_t mmc_tx_carrier_error;
/** This counter provides the number of bytes transmitted,
* exclusive of preamble, only in good packets */
nveu64_t mmc_tx_octetcount_g;
/** This counter provides upper 32 bytes of bytes transmitted,
* exclusive of preamble, only in good packets */
nveu64_t mmc_tx_octetcount_g_h;
/** This counter provides the number of good packets transmitted */
nveu64_t mmc_tx_framecount_g;
/** This counter provides upper 32 bytes of good packets transmitted */
nveu64_t mmc_tx_framecount_g_h;
/** This counter provides the number of packets aborted because of
* excessive deferral error
* (deferred for more than two max-sized packet times) */
nveu64_t mmc_tx_excessdef;
/** This counter provides the number of good Pause
* packets transmitted */
nveu64_t mmc_tx_pause_frame;
/** This counter provides upper 32 bytes of good Pause
* packets transmitted */
nveu64_t mmc_tx_pause_frame_h;
/** This counter provides the number of good VLAN packets transmitted */
nveu64_t mmc_tx_vlan_frame_g;
/** This counter provides upper 32 bytes of good VLAN packets
* transmitted */
nveu64_t mmc_tx_vlan_frame_g_h;
/** This counter provides the number of packets transmitted without
* errors and with length greater than the maxsize (1,518 or 1,522 bytes
* for VLAN tagged packets; 2000 bytes */
nveu64_t mmc_tx_osize_frame_g;
/** This counter provides the number of good and bad packets received */
nveu64_t mmc_rx_framecount_gb;
/** This counter provides upper 32 bytes of good and bad packets
* received */
nveu64_t mmc_rx_framecount_gb_h;
/** This counter provides the number of bytes received by DWC_ther_qos,
* exclusive of preamble, in good and bad packets */
nveu64_t mmc_rx_octetcount_gb;
/** This counter provides upper 32 bytes of bytes received by
* DWC_ether_qos, exclusive of preamble, in good and bad packets */
nveu64_t mmc_rx_octetcount_gb_h;
/** This counter provides the number of bytes received by DWC_ether_qos,
* exclusive of preamble, in good and bad packets */
nveu64_t mmc_rx_octetcount_g;
/** This counter provides upper 32 bytes of bytes received by
* DWC_ether_qos, exclusive of preamble, in good and bad packets */
nveu64_t mmc_rx_octetcount_g_h;
/** This counter provides the number of good
* broadcast packets received */
nveu64_t mmc_rx_broadcastframe_g;
/** This counter provides upper 32 bytes of good
* broadcast packets received */
nveu64_t mmc_rx_broadcastframe_g_h;
/** This counter provides the number of good
* multicast packets received */
nveu64_t mmc_rx_multicastframe_g;
/** This counter provides upper 32 bytes of good
* multicast packets received */
nveu64_t mmc_rx_multicastframe_g_h;
/** This counter provides the number of packets
* received with CRC error */
nveu64_t mmc_rx_crc_error;
/** This counter provides upper 32 bytes of packets
* received with CRC error */
nveu64_t mmc_rx_crc_error_h;
/** This counter provides the number of packets received with
* alignment (dribble) error. It is valid only in 10/100 mode */
nveu64_t mmc_rx_align_error;
/** This counter provides the number of packets received with
* runt (length less than 64 bytes and CRC error) error */
nveu64_t mmc_rx_runt_error;
/** This counter provides the number of giant packets received with
* length (including CRC) greater than 1,518 bytes (1,522 bytes for
* VLAN tagged) and with CRC error */
nveu64_t mmc_rx_jabber_error;
/** This counter provides the number of packets received with length
* less than 64 bytes, without any errors */
nveu64_t mmc_rx_undersize_g;
/** This counter provides the number of packets received without error,
* with length greater than the maxsize */
nveu64_t mmc_rx_oversize_g;
/** This counter provides the number of good and bad packets received
* with length 64 bytes, exclusive of the preamble */
nveu64_t mmc_rx_64_octets_gb;
/** This counter provides upper 32 bytes of good and bad packets
* received with length 64 bytes, exclusive of the preamble */
nveu64_t mmc_rx_64_octets_gb_h;
/** This counter provides the number of good and bad packets received
* with length 65-127 bytes, exclusive of the preamble */
nveu64_t mmc_rx_65_to_127_octets_gb;
/** This counter provides upper 32 bytes of good and bad packets
* received with length 65-127 bytes, exclusive of the preamble */
nveu64_t mmc_rx_65_to_127_octets_gb_h;
/** This counter provides the number of good and bad packets received
* with length 128-255 bytes, exclusive of the preamble */
nveu64_t mmc_rx_128_to_255_octets_gb;
/** This counter provides upper 32 bytes of good and bad packets
* received with length 128-255 bytes, exclusive of the preamble */
nveu64_t mmc_rx_128_to_255_octets_gb_h;
/** This counter provides the number of good and bad packets received
* with length 256-511 bytes, exclusive of the preamble */
nveu64_t mmc_rx_256_to_511_octets_gb;
/** This counter provides upper 32 bytes of good and bad packets
* received with length 256-511 bytes, exclusive of the preamble */
nveu64_t mmc_rx_256_to_511_octets_gb_h;
/** This counter provides the number of good and bad packets received
* with length 512-1023 bytes, exclusive of the preamble */
nveu64_t mmc_rx_512_to_1023_octets_gb;
/** This counter provides upper 32 bytes of good and bad packets
* received with length 512-1023 bytes, exclusive of the preamble */
nveu64_t mmc_rx_512_to_1023_octets_gb_h;
/** This counter provides the number of good and bad packets received
* with length 1024-maxbytes, exclusive of the preamble */
nveu64_t mmc_rx_1024_to_max_octets_gb;
/** This counter provides upper 32 bytes of good and bad packets
* received with length 1024-maxbytes, exclusive of the preamble */
nveu64_t mmc_rx_1024_to_max_octets_gb_h;
/** This counter provides the number of good unicast packets received */
nveu64_t mmc_rx_unicast_g;
/** This counter provides upper 32 bytes of good unicast packets
* received */
nveu64_t mmc_rx_unicast_g_h;
/** This counter provides the number of packets received with length
* error (Length Type field not equal to packet size), for all packets
* with valid length field */
nveu64_t mmc_rx_length_error;
/** This counter provides upper 32 bytes of packets received with
* length error (Length Type field not equal to packet size), for all
* packets with valid length field */
nveu64_t mmc_rx_length_error_h;
/** This counter provides the number of packets received with length
* field not equal to the valid packet size (greater than 1,500 but
* less than 1,536) */
nveu64_t mmc_rx_outofrangetype;
/** This counter provides upper 32 bytes of packets received with
* length field not equal to the valid packet size (greater than 1,500
* but less than 1,536) */
nveu64_t mmc_rx_outofrangetype_h;
/** This counter provides the number of good and valid Pause packets
* received */
nveu64_t mmc_rx_pause_frames;
/** This counter provides upper 32 bytes of good and valid Pause packets
* received */
nveu64_t mmc_rx_pause_frames_h;
/** This counter provides the number of missed received packets
* because of FIFO overflow in DWC_ether_qos */
nveu64_t mmc_rx_fifo_overflow;
/** This counter provides upper 32 bytes of missed received packets
* because of FIFO overflow in DWC_ether_qos */
nveu64_t mmc_rx_fifo_overflow_h;
/** This counter provides the number of good and bad VLAN packets
* received */
nveu64_t mmc_rx_vlan_frames_gb;
/** This counter provides upper 32 bytes of good and bad VLAN packets
* received */
nveu64_t mmc_rx_vlan_frames_gb_h;
/** This counter provides the number of packets received with error
* because of watchdog timeout error */
nveu64_t mmc_rx_watchdog_error;
/** This counter provides the number of packets received with Receive
* error or Packet Extension error on the GMII or MII interface */
nveu64_t mmc_rx_receive_error;
/** This counter provides the number of packets received with Receive
* error or Packet Extension error on the GMII or MII interface */
nveu64_t mmc_rx_ctrl_frames_g;
/** This counter provides the number of microseconds Tx LPI is asserted
* in the MAC controller */
nveu64_t mmc_tx_lpi_usec_cntr;
/** This counter provides the number of times MAC controller has
* entered Tx LPI. */
nveu64_t mmc_tx_lpi_tran_cntr;
/** This counter provides the number of microseconds Rx LPI is asserted
* in the MAC controller */
nveu64_t mmc_rx_lpi_usec_cntr;
/** This counter provides the number of times MAC controller has
* entered Rx LPI.*/
nveu64_t mmc_rx_lpi_tran_cntr;
/** This counter provides the number of good IPv4 datagrams received
* with the TCP, UDP, or ICMP payload */
nveu64_t mmc_rx_ipv4_gd;
/** This counter provides upper 32 bytes of good IPv4 datagrams received
* with the TCP, UDP, or ICMP payload */
nveu64_t mmc_rx_ipv4_gd_h;
/** RxIPv4 Header Error Packets */
nveu64_t mmc_rx_ipv4_hderr;
/** RxIPv4 of upper 32 bytes of Header Error Packets */
nveu64_t mmc_rx_ipv4_hderr_h;
/** This counter provides the number of IPv4 datagram packets received
* that did not have a TCP, UDP, or ICMP payload */
nveu64_t mmc_rx_ipv4_nopay;
/** This counter provides upper 32 bytes of IPv4 datagram packets
* received that did not have a TCP, UDP, or ICMP payload */
nveu64_t mmc_rx_ipv4_nopay_h;
/** This counter provides the number of good IPv4 datagrams received
* with fragmentation */
nveu64_t mmc_rx_ipv4_frag;
/** This counter provides upper 32 bytes of good IPv4 datagrams received
* with fragmentation */
nveu64_t mmc_rx_ipv4_frag_h;
/** This counter provides the number of good IPv4 datagrams received
* that had a UDP payload with checksum disabled */
nveu64_t mmc_rx_ipv4_udsbl;
/** This counter provides upper 32 bytes of good IPv4 datagrams received
* that had a UDP payload with checksum disabled */
nveu64_t mmc_rx_ipv4_udsbl_h;
/** This counter provides the number of good IPv6 datagrams received
* with the TCP, UDP, or ICMP payload */
nveu64_t mmc_rx_ipv6_gd_octets;
/** This counter provides upper 32 bytes of good IPv6 datagrams received
* with the TCP, UDP, or ICMP payload */
nveu64_t mmc_rx_ipv6_gd_octets_h;
/** This counter provides the number of IPv6 datagrams received
* with header (length or version mismatch) errors */
nveu64_t mmc_rx_ipv6_hderr_octets;
/** This counter provides the number of IPv6 datagrams received
* with header (length or version mismatch) errors */
nveu64_t mmc_rx_ipv6_hderr_octets_h;
/** This counter provides the number of IPv6 datagram packets received
* that did not have a TCP, UDP, or ICMP payload */
nveu64_t mmc_rx_ipv6_nopay_octets;
/** This counter provides upper 32 bytes of IPv6 datagram packets
* received that did not have a TCP, UDP, or ICMP payload */
nveu64_t mmc_rx_ipv6_nopay_octets_h;
/* Protocols */
/** This counter provides the number of good IP datagrams received by
* DWC_ether_qos with a good UDP payload */
nveu64_t mmc_rx_udp_gd;
/** This counter provides upper 32 bytes of good IP datagrams received
* by DWC_ether_qos with a good UDP payload */
nveu64_t mmc_rx_udp_gd_h;
/** This counter provides the number of good IP datagrams received by
* DWC_ether_qos with a good UDP payload. This counter is not updated
* when the RxIPv4_UDP_Checksum_Disabled_Packets counter is
* incremented */
nveu64_t mmc_rx_udp_err;
/** This counter provides upper 32 bytes of good IP datagrams received
* by DWC_ether_qos with a good UDP payload. This counter is not updated
* when the RxIPv4_UDP_Checksum_Disabled_Packets counter is
* incremented */
nveu64_t mmc_rx_udp_err_h;
/** This counter provides the number of good IP datagrams received
* with a good TCP payload */
nveu64_t mmc_rx_tcp_gd;
/** This counter provides the number of good IP datagrams received
* with a good TCP payload */
nveu64_t mmc_rx_tcp_gd_h;
/** This counter provides upper 32 bytes of good IP datagrams received
* with a good TCP payload */
nveu64_t mmc_rx_tcp_err;
/** This counter provides upper 32 bytes of good IP datagrams received
* with a good TCP payload */
nveu64_t mmc_rx_tcp_err_h;
/** This counter provides the number of good IP datagrams received
* with a good ICMP payload */
nveu64_t mmc_rx_icmp_gd;
/** This counter provides upper 32 bytes of good IP datagrams received
* with a good ICMP payload */
nveu64_t mmc_rx_icmp_gd_h;
/** This counter provides the number of good IP datagrams received
* whose ICMP payload has a checksum error */
nveu64_t mmc_rx_icmp_err;
/** This counter provides upper 32 bytes of good IP datagrams received
* whose ICMP payload has a checksum error */
nveu64_t mmc_rx_icmp_err_h;
/** This counter provides the number of bytes received by DWC_ether_qos
* in good IPv4 datagrams encapsulating TCP, UDP, or ICMP data.
* (Ethernet header, FCS, pad, or IP pad bytes are not included
* in this counter */
nveu64_t mmc_rx_ipv4_gd_octets;
/** This counter provides upper 32 bytes received by DWC_ether_qos
* in good IPv4 datagrams encapsulating TCP, UDP, or ICMP data.
* (Ethernet header, FCS, pad, or IP pad bytes are not included
* in this counter */
nveu64_t mmc_rx_ipv4_gd_octets_h;
/** This counter provides the number of bytes received in IPv4 datagram
* with header errors (checksum, length, version mismatch). The value
* in the Length field of IPv4 header is used to update this counter.
* (Ethernet header, FCS, pad, or IP pad bytes are not included
* in this counter */
nveu64_t mmc_rx_ipv4_hderr_octets;
/** This counter provides upper 32 bytes received in IPv4 datagram
* with header errors (checksum, length, version mismatch). The value
* in the Length field of IPv4 header is used to update this counter.
* (Ethernet header, FCS, pad, or IP pad bytes are not included
* in this counter */
nveu64_t mmc_rx_ipv4_hderr_octets_h;
/** This counter provides the number of bytes received in IPv4 datagram
* that did not have a TCP, UDP, or ICMP payload. The value in the
* Length field of IPv4 header is used to update this counter.
* (Ethernet header, FCS, pad, or IP pad bytes are not included
* in this counter */
nveu64_t mmc_rx_ipv4_nopay_octets;
/** This counter provides upper 32 bytes received in IPv4 datagram
* that did not have a TCP, UDP, or ICMP payload. The value in the
* Length field of IPv4 header is used to update this counter.
* (Ethernet header, FCS, pad, or IP pad bytes are not included
* in this counter */
nveu64_t mmc_rx_ipv4_nopay_octets_h;
/** This counter provides the number of bytes received in fragmented
* IPv4 datagrams. The value in the Length field of IPv4 header is
* used to update this counter. (Ethernet header, FCS, pad, or IP pad
* bytes are not included in this counter */
nveu64_t mmc_rx_ipv4_frag_octets;
/** This counter provides upper 32 bytes received in fragmented
* IPv4 datagrams. The value in the Length field of IPv4 header is
* used to update this counter. (Ethernet header, FCS, pad, or IP pad
* bytes are not included in this counter */
nveu64_t mmc_rx_ipv4_frag_octets_h;
/** This counter provides the number of bytes received in a UDP segment
* that had the UDP checksum disabled. This counter does not count IP
* Header bytes. (Ethernet header, FCS, pad, or IP pad bytes are not
* included in this counter */
nveu64_t mmc_rx_ipv4_udsbl_octets;
/** This counter provides upper 32 bytes received in a UDP segment
* that had the UDP checksum disabled. This counter does not count IP
* Header bytes. (Ethernet header, FCS, pad, or IP pad bytes are not
* included in this counter */
nveu64_t mmc_rx_ipv4_udsbl_octets_h;
/** This counter provides the number of bytes received in good IPv6
* datagrams encapsulating TCP, UDP, or ICMP data. (Ethernet header,
* FCS, pad, or IP pad bytes are not included in this counter */
nveu64_t mmc_rx_ipv6_gd;
/** This counter provides upper 32 bytes received in good IPv6
* datagrams encapsulating TCP, UDP, or ICMP data. (Ethernet header,
* FCS, pad, or IP pad bytes are not included in this counter */
nveu64_t mmc_rx_ipv6_gd_h;
/** This counter provides the number of bytes received in IPv6 datagrams
* with header errors (length, version mismatch). The value in the
* Length field of IPv6 header is used to update this counter.
* (Ethernet header, FCS, pad, or IP pad bytes are not included in
* this counter */
nveu64_t mmc_rx_ipv6_hderr;
/** This counter provides upper 32 bytes received in IPv6 datagrams
* with header errors (length, version mismatch). The value in the
* Length field of IPv6 header is used to update this counter.
* (Ethernet header, FCS, pad, or IP pad bytes are not included in
* this counter */
nveu64_t mmc_rx_ipv6_hderr_h;
/** This counter provides the number of bytes received in IPv6
* datagrams that did not have a TCP, UDP, or ICMP payload. The value
* in the Length field of IPv6 header is used to update this counter.
* (Ethernet header, FCS, pad, or IP pad bytes are not included
* in this counter */
nveu64_t mmc_rx_ipv6_nopay;
/** This counter provides upper 32 bytes received in IPv6
* datagrams that did not have a TCP, UDP, or ICMP payload. The value
* in the Length field of IPv6 header is used to update this counter.
* (Ethernet header, FCS, pad, or IP pad bytes are not included
* in this counter */
nveu64_t mmc_rx_ipv6_nopay_h;
/* Protocols */
/** This counter provides the number of bytes received in a good UDP
* segment. This counter does not count IP header bytes */
nveu64_t mmc_rx_udp_gd_octets;
/** This counter provides upper 32 bytes received in a good UDP
* segment. This counter does not count IP header bytes */
nveu64_t mmc_rx_udp_gd_octets_h;
/** This counter provides the number of bytes received in a UDP
* segment that had checksum errors. This counter does not count
* IP header bytes */
nveu64_t mmc_rx_udp_err_octets;
/** This counter provides upper 32 bytes received in a UDP
* segment that had checksum errors. This counter does not count
* IP header bytes */
nveu64_t mmc_rx_udp_err_octets_h;
/** This counter provides the number of bytes received in a good
* TCP segment. This counter does not count IP header bytes */
nveu64_t mmc_rx_tcp_gd_octets;
/** This counter provides upper 32 bytes received in a good
* TCP segment. This counter does not count IP header bytes */
nveu64_t mmc_rx_tcp_gd_octets_h;
/** This counter provides the number of bytes received in a TCP
* segment that had checksum errors. This counter does not count
* IP header bytes */
nveu64_t mmc_rx_tcp_err_octets;
/** This counter provides upper 32 bytes received in a TCP
* segment that had checksum errors. This counter does not count
* IP header bytes */
nveu64_t mmc_rx_tcp_err_octets_h;
/** This counter provides the number of bytes received in a good
* ICMP segment. This counter does not count IP header bytes */
nveu64_t mmc_rx_icmp_gd_octets;
/** This counter provides upper 32 bytes received in a good
* ICMP segment. This counter does not count IP header bytes */
nveu64_t mmc_rx_icmp_gd_octets_h;
/** This counter provides the number of bytes received in a ICMP
* segment that had checksum errors. This counter does not count
* IP header bytes */
nveu64_t mmc_rx_icmp_err_octets;
/** This counter provides upper 32 bytes received in a ICMP
* segment that had checksum errors. This counter does not count
* IP header bytes */
nveu64_t mmc_rx_icmp_err_octets_h;
/** This counter provides the number of additional mPackets
* transmitted due to preemption */
unsigned long mmc_tx_fpe_frag_cnt;
/** This counter provides the count of number of times a hold
* request is given to MAC */
unsigned long mmc_tx_fpe_hold_req_cnt;
/** This counter provides the number of MAC frames with reassembly
* errors on the Receiver, due to mismatch in the fragment
* count value */
unsigned long mmc_rx_packet_reass_err_cnt;
/** This counter the number of received MAC frames rejected
* due to unknown SMD value and MAC frame fragments rejected due
* to arriving with an SMD-C when there was no preceding preempted
* frame */
unsigned long mmc_rx_packet_smd_err_cnt;
/** This counter provides the number of MAC frames that were
* successfully reassembled and delivered to MAC */
unsigned long mmc_rx_packet_asm_ok_cnt;
/** This counter provides the number of additional mPackets received
* due to preemption */
unsigned long mmc_rx_fpe_fragment_cnt;
};
/**
* @brief osi_xtra_stat_counters - OSI core extra stat counters
*/
struct osi_xtra_stat_counters {
/** RX buffer unavailable irq count */
nveu64_t rx_buf_unavail_irq_n[OSI_MGBE_MAX_NUM_QUEUES];
/** Transmit Process Stopped irq count */
nveu64_t tx_proc_stopped_irq_n[OSI_MGBE_MAX_NUM_QUEUES];
/** Transmit Buffer Unavailable irq count */
nveu64_t tx_buf_unavail_irq_n[OSI_MGBE_MAX_NUM_QUEUES];
/** Receive Process Stopped irq count */
nveu64_t rx_proc_stopped_irq_n[OSI_MGBE_MAX_NUM_QUEUES];
/** Receive Watchdog Timeout irq count */
nveu64_t rx_watchdog_irq_n;
/** Fatal Bus Error irq count */
nveu64_t fatal_bus_error_irq_n;
/** rx skb allocation failure count */
nveu64_t re_alloc_rxbuf_failed[OSI_MGBE_MAX_NUM_QUEUES];
/** TX per channel interrupt count */
nveu64_t tx_normal_irq_n[OSI_MGBE_MAX_NUM_QUEUES];
/** TX per channel SW timer callback count */
nveu64_t tx_usecs_swtimer_n[OSI_MGBE_MAX_NUM_QUEUES];
/** RX per channel interrupt count */
nveu64_t rx_normal_irq_n[OSI_MGBE_MAX_NUM_QUEUES];
/** link connect count */
nveu64_t link_connect_count;
/** link disconnect count */
nveu64_t link_disconnect_count;
/** lock fail count node addition */
nveu64_t ts_lock_add_fail;
/** lock fail count node removal */
nveu64_t ts_lock_del_fail;
};
#ifdef MACSEC_SUPPORT
/**
* @brief The structure hold macsec statistics counters
*/
struct osi_macsec_mmc_counters {
/** This counter provides the number of controller port macsec
* untaged packets */
nveul64_t rx_pkts_no_tag;
/** This counter provides the number of controller port macsec
* untaged packets validateFrame != strict */
nveul64_t rx_pkts_untagged;
/** This counter provides the number of invalid tag or icv packets */
nveul64_t rx_pkts_bad_tag;
/** This counter provides the number of no sc lookup hit or sc match
* packets */
nveul64_t rx_pkts_no_sa_err;
/** This counter provides the number of no sc lookup hit or sc match
* packets validateFrame != strict */
nveul64_t rx_pkts_no_sa;
/** This counter provides the number of late packets
*received PN < lowest PN */
nveul64_t rx_pkts_late[OSI_MACSEC_SC_INDEX_MAX];
/** This counter provides the number of overrun packets */
nveul64_t rx_pkts_overrun;
/** This counter provides the number of octets after IVC passing */
nveul64_t rx_octets_validated;
/** This counter provides the number not valid packets */
nveul64_t rx_pkts_not_valid[OSI_MACSEC_SC_INDEX_MAX];
/** This counter provides the number of invalid packets */
nveul64_t in_pkts_invalid[OSI_MACSEC_SC_INDEX_MAX];
/** This counter provides the number of in packet delayed */
nveul64_t rx_pkts_delayed[OSI_MACSEC_SC_INDEX_MAX];
/** This counter provides the number of in packets un checked */
nveul64_t rx_pkts_unchecked[OSI_MACSEC_SC_INDEX_MAX];
/** This counter provides the number of in packets ok */
nveul64_t rx_pkts_ok[OSI_MACSEC_SC_INDEX_MAX];
/** This counter provides the number of out packets untaged */
nveul64_t tx_pkts_untaged;
/** This counter provides the number of out too long */
nveul64_t tx_pkts_too_long;
/** This counter provides the number of out packets protected */
nveul64_t tx_pkts_protected[OSI_MACSEC_SC_INDEX_MAX];
/** This counter provides the number of out octets protected */
nveul64_t tx_octets_protected;
};
#endif /* MACSEC_SUPPORT */
#endif /* INCLUDED_MMC_H */

View File

@@ -1,328 +0,0 @@
/*
* Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef INCLUDED_OSI_COMMON_H
#define INCLUDED_OSI_COMMON_H
#include "../osi/common/type.h"
/**
* @addtogroup FC Flow Control Threshold Macros
*
* @brief These bits control the threshold (fill-level of Rx queue) at which
* the flow control is asserted or de-asserted
* @{
*/
#define FULL_MINUS_1_5K (unsigned int)1
#define FULL_MINUS_2_K (unsigned int)2
#define FULL_MINUS_2_5K (unsigned int)3
#define FULL_MINUS_3_K (unsigned int)4
#define FULL_MINUS_4_K (unsigned int)6
#define FULL_MINUS_6_K (unsigned int)10
#define FULL_MINUS_10_K (unsigned int)18
#define FULL_MINUS_13_K (unsigned int)24
#define FULL_MINUS_14_K (unsigned int)26
#define FULL_MINUS_16_K (unsigned int)30
#define FULL_MINUS_18_K (unsigned int)34
#define FULL_MINUS_21_K (unsigned int)40
#define FULL_MINUS_24_K (unsigned int)46
#define FULL_MINUS_29_K (unsigned int)56
#define FULL_MINUS_31_K (unsigned int)60
#define FULL_MINUS_32_K (unsigned int)62
/** @} */
/**
* @addtogroup OSI-Helper OSI Helper MACROS
* @{
*/
#define OSI_UNLOCKED 0x0U
#define OSI_LOCKED 0x1U
#define OSI_NSEC_PER_SEC 1000000000ULL
#ifndef OSI_STRIPPED_LIB
#define OSI_MAX_RX_COALESCE_USEC 1020U
#define OSI_EQOS_MIN_RX_COALESCE_USEC 5U
#define OSI_MGBE_MIN_RX_COALESCE_USEC 6U
#define OSI_MIN_RX_COALESCE_FRAMES 1U
#define OSI_MAX_TX_COALESCE_USEC 1020U
#define OSI_MIN_TX_COALESCE_USEC 32U
#define OSI_MIN_TX_COALESCE_FRAMES 1U
#endif /* !OSI_STRIPPED_LIB */
/* Compiler hints for branch prediction */
#define osi_unlikely(x) __builtin_expect(!!(x), 0)
/** @} */
#ifndef OSI_STRIPPED_LIB
/**
* @addtogroup - LPI-Timers LPI configuration macros
*
* @brief LPI timers and config register field masks.
* @{
*/
/* LPI LS timer - minimum time (in milliseconds) for which the link status from
* PHY should be up before the LPI pattern can be transmitted to the PHY.
* Default 1sec.
*/
#define OSI_DEFAULT_LPI_LS_TIMER (nveu32_t)1000
#define OSI_LPI_LS_TIMER_MASK 0x3FFU
#define OSI_LPI_LS_TIMER_SHIFT 16U
/* LPI TW timer - minimum time (in microseconds) for which MAC wait after it
* stops transmitting LPI pattern before resuming normal tx.
* Default 21us
*/
#define OSI_DEFAULT_LPI_TW_TIMER 0x15U
#define OSI_LPI_TW_TIMER_MASK 0xFFFFU
/* LPI entry timer - Time in microseconds that MAC will wait to enter LPI mode
* after all tx is complete.
* Default 1sec.
*/
#define OSI_LPI_ENTRY_TIMER_MASK 0xFFFF8U
/* LPI entry timer - Time in microseconds that MAC will wait to enter LPI mode
* after all tx is complete. Default 1sec.
*/
#define OSI_DEFAULT_TX_LPI_TIMER 0xF4240U
/* Max Tx LPI timer (in usecs) based on the timer value field length in HW
* MAC_LPI_ENTRY_TIMER register */
#define OSI_MAX_TX_LPI_TIMER 0xFFFF8U
/* Min Tx LPI timer (in usecs) based on the timer value field length in HW
* MAC_LPI_ENTRY_TIMER register */
#define OSI_MIN_TX_LPI_TIMER 0x8U
/* Time in 1 microseconds tic counter used as reference for all LPI timers.
* It is clock rate of CSR slave port (APB clock[eqos_pclk] in eqos) minus 1
* Current eqos_pclk is 204MHz
*/
#define OSI_LPI_1US_TIC_COUNTER_DEFAULT 0xCBU
#define OSI_LPI_1US_TIC_COUNTER_MASK 0xFFFU
/** @} */
#endif /* !OSI_STRIPPED_LIB */
/**
* @addtogroup Helper Helper MACROS
*
* @brief EQOS generic helper MACROS.
* @{
*/
#ifndef OSI_STRIPPED_LIB
#define OSI_PAUSE_FRAMES_ENABLE 1U
#define OSI_PTP_REQ_CLK_FREQ 250000000U
#define OSI_FLOW_CTRL_DISABLE 0U
#define OSI_MAX_24BITS 0xFFFFFFU
#define OSI_MAX_28BITS 0xFFFFFFFU
#define OSI_MAX_32BITS 0xFFFFFFFFU
#define OSI_MASK_16BITS 0xFFFFU
#define OSI_MASK_20BITS 0xFFFFFU
#define OSI_MASK_24BITS 0xFFFFFFU
#define OSI_GCL_SIZE_64 64U
#define OSI_GCL_SIZE_128 128U
#define OSI_GCL_SIZE_256 256U
#define OSI_GCL_SIZE_512 512U
#define OSI_GCL_SIZE_1024 1024U
#define OSI_POLL_COUNT 1000U
#define OSI_ADDRESS_32BIT 0
#define OSI_ADDRESS_40BIT 1
#define OSI_ADDRESS_48BIT 2
#endif /* !OSI_STRIPPED_LIB */
#ifndef UINT_MAX
#define UINT_MAX (~0U)
#endif
#ifndef INT_MAX
#define INT_MAX (0x7FFFFFFF)
#endif
/** @} */
/**
* @addtogroup Helper Helper MACROS
*
* @brief EQOS generic helper MACROS.
* @{
*/
#define OSI_UCHAR_MAX (0xFFU)
/* Logging defines */
/* log levels */
#define OSI_LOG_INFO 1U
#define OSI_LOG_WARN 2U
#define OSI_LOG_ERR 3U
/* Error types */
#define OSI_LOG_ARG_OUTOFBOUND 1U
#define OSI_LOG_ARG_INVALID 2U
#define OSI_LOG_ARG_HW_FAIL 4U
#define OSI_LOG_WARN 2U
#ifndef OSI_STRIPPED_LIB
#define OSI_LOG_ARG_OPNOTSUPP 3U
#endif /* !OSI_STRIPPED_LIB */
/* Default maximum Giant Packet Size Limit is 16K */
#define OSI_MAX_MTU_SIZE 16383U
/* MAC Tx/Rx Idle retry and delay count */
#define OSI_TXRX_IDLE_RETRY 5000U
#define OSI_DELAY_COUNT 10U
#define EQOS_DMA_CHX_STATUS(x) ((0x0080U * (x)) + 0x1160U)
#define MGBE_DMA_CHX_STATUS(x) ((0x0080U * (x)) + 0x3160U)
#define EQOS_DMA_CHX_IER(x) ((0x0080U * (x)) + 0x1134U)
/* FIXME add logic based on HW version */
#define OSI_EQOS_MAX_NUM_CHANS 8U
#define OSI_EQOS_MAX_NUM_QUEUES 8U
#define OSI_MGBE_MAX_L3_L4_FILTER 8U
#define OSI_MGBE_MAX_NUM_CHANS 10U
#define OSI_MGBE_MAX_NUM_QUEUES 10U
#define OSI_EQOS_XP_MAX_CHANS 4U
/* MACSEC max SC's supported 16*/
#define OSI_MACSEC_SC_INDEX_MAX 16
/* HW supports 8 Hash table regs, but eqos_validate_core_regs only checks 4 */
#define OSI_EQOS_MAX_HASH_REGS 4U
#define MAC_VERSION 0x110
#define MAC_VERSION_SNVER_MASK 0x7FU
#define OSI_MAC_HW_EQOS 0U
#define OSI_MAC_HW_MGBE 1U
#define OSI_ETH_ALEN 6U
#define OSI_MAX_VM_IRQS 5U
#define OSI_NULL ((void *)0)
#define OSI_ENABLE 1U
#define OSI_NONE 0U
#define OSI_NONE_SIGNED 0
#define OSI_DISABLE 0U
#define OSI_BIT(nr) ((nveu32_t)1 << (nr))
#define OSI_EQOS_MAC_4_10 0x41U
#define OSI_EQOS_MAC_5_00 0x50U
#define OSI_EQOS_MAC_5_10 0x51U
#define OSI_EQOS_MAC_5_30 0x53U
#define OSI_MGBE_MAC_3_00 0x30U
#define OSI_MGBE_MAC_3_10 0x31U
#define OSI_MGBE_MAC_4_00 0x40U
#define OSI_MAX_VM_IRQS 5U
#define OSI_IP4_FILTER 0U
#define OSI_IP6_FILTER 1U
#ifndef OSI_STRIPPED_LIB
#define OSI_L2_FILTER_INDEX_ANY 127U
#define OSI_HASH_FILTER_MODE 1U
#define OSI_L4_FILTER_TCP 0U
#define OSI_L4_FILTER_UDP 1U
#define OSI_PERFECT_FILTER_MODE 0U
#define NV_ETH_FCS_LEN 0x4U
#define NV_ETH_FRAME_LEN 1514U
#define MAX_ETH_FRAME_LEN_DEFAULT \
(NV_ETH_FRAME_LEN + NV_ETH_FCS_LEN + NV_VLAN_HLEN)
#define OSI_MTU_SIZE_16K 16000U
#define OSI_MTU_SIZE_8K 8000U
#define OSI_MTU_SIZE_4K 4000U
#define OSI_MTU_SIZE_2K 2000U
#define OSI_INVALID_CHAN_NUM 0xFFU
#endif /* OSI_STRIPPED_LIB */
/** @} */
/**
* @addtogroup OSI-DEBUG helper macros
*
* @brief OSI debug type macros
* @{
*/
#ifdef OSI_DEBUG
#define OSI_DEBUG_TYPE_DESC 1U
#define OSI_DEBUG_TYPE_REG 2U
#define OSI_DEBUG_TYPE_STRUCTS 3U
#endif /* OSI_DEBUG */
#ifndef OSI_STRIPPED_LIB
/**
* @addtogroup MTL queue operation mode
*
* @brief MTL queue operation mode options
* @{
*/
#define OSI_MTL_QUEUE_DISABLED 0x0U
#define OSI_MTL_QUEUE_AVB 0x1U
#define OSI_MTL_QUEUE_ENABLE 0x2U
#define OSI_MTL_QUEUE_MODEMAX 0x3U
/** @} */
/**
* @addtogroup EQOS_MTL MTL queue AVB algorithm mode
*
* @brief MTL AVB queue algorithm type
* @{
*/
#define OSI_MTL_TXQ_AVALG_CBS 1U
#define OSI_MTL_TXQ_AVALG_SP 0U
/** @} */
#endif /* OSI_STRIPPED_LIB */
/**
* @brief unused function attribute
*/
#define OSI_UNUSED __attribute__((__unused__))
/**
* @brief osi_update_stats_counter - update value by increment passed
* as parameter
* @note
* Algorithm:
* - Check for boundary and return sum
*
* @param[in] last_value: last value of stat counter
* @param[in] incr: increment value
*
* @note Input parameter should be only nveu64_t type
*
* @note
* API Group:
* - Initialization: No
* - Run time: Yes
* - De-initialization: No
*
* @retval 0 on sucess
* @retval -1 on failure
*/
static inline nveu64_t osi_update_stats_counter(nveu64_t last_value,
nveu64_t incr)
{
nveu64_t temp = last_value + incr;
if (temp < last_value) {
/* Stats overflow, so reset it to zero */
return 0UL;
}
return temp;
}
#endif /* OSI_COMMON_H */

View File

File diff suppressed because it is too large Load Diff

View File

File diff suppressed because it is too large Load Diff

View File

@@ -1,59 +0,0 @@
/*
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef INCLUDED_OSI_DMA_TXRX_H
#define INCLUDED_OSI_DMA_TXRX_H
/**
* @addtogroup EQOS_Help Descriptor Helper MACROS
*
* @brief Helper macros for defining Tx/Rx descriptor count
* @{
*/
#define OSI_EQOS_TX_DESC_CNT 1024U
#define OSI_EQOS_RX_DESC_CNT 1024U
#define OSI_MGBE_TX_DESC_CNT 4096U
#define OSI_MGBE_RX_DESC_CNT 4096U
#define OSI_MGBE_MAX_RX_DESC_CNT 16384U
/** @} */
/** TSO Header length divisor */
#define OSI_TSO_HDR_LEN_DIVISOR 4U
/**
* @addtogroup EQOS_Help1 Helper MACROS for descriptor index operations
*
* @brief Helper macros for incrementing or decrementing Tx/Rx descriptor index
* @{
*/
/** Increment the tx descriptor index */
#define INCR_TX_DESC_INDEX(idx, x) ((idx) = ((idx) + (1U)) & ((x) - 1U))
/** Increment the rx descriptor index */
#define INCR_RX_DESC_INDEX(idx, x) ((idx) = ((idx) + (1U)) & ((x) - 1U))
#ifndef OSI_STRIPPED_LIB
/** Decrement the tx descriptor index */
#define DECR_TX_DESC_INDEX(idx, x) ((idx) = ((idx) - (1U)) & ((x) - 1U))
/** Decrement the rx descriptor index */
#define DECR_RX_DESC_INDEX(idx, x) ((idx) = ((idx) - (1U)) & ((x) - 1U))
#endif /* !OSI_STRIPPED_LIB */
/** @} */
#endif /* INCLUDED_OSI_DMA_TXRX_H */

View File

@@ -1,818 +0,0 @@
/*
* Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef INCLUDED_OSI_MACSEC_H
#define INCLUDED_OSI_MACSEC_H
#include <osi_core.h>
#ifdef MACSEC_SUPPORT
//////////////////////////////////////////////////////////////////////////
/* MACSEC OSI data structures */
//////////////////////////////////////////////////////////////////////////
/**
* @addtogroup TX/RX BYP/SCI LUT helpers macros
*
* @brief Helper macros for LUT programming
* @{
*/
#define OSI_AN0_VALID OSI_BIT(0)
#define OSI_AN1_VALID OSI_BIT(1)
#define OSI_AN2_VALID OSI_BIT(2)
#define OSI_AN3_VALID OSI_BIT(3)
#define OSI_MAX_NUM_SA 4U
#define OSI_CURR_AN_MAX 3
#define OSI_KEY_INDEX_MAX 31U
#define OSI_PN_MAX_DEFAULT 0xFFFFFFFFU
#define OSI_PN_THRESHOLD_DEFAULT 0xC0000000U
#define OSI_TCI_DEFAULT 0x1
#define OSI_VLAN_IN_CLEAR_DEFAULT 0x0
#define OSI_SC_INDEX_MAX 15U
#define OSI_ETHTYPE_LEN 2
#define OSI_LUT_BYTE_PATTERN_MAX 4U
/* LUT byte pattern offset range 0-63 */
#define OSI_LUT_BYTE_PATTERN_MAX_OFFSET 63U
/* VLAN PCP range 0-7 */
#define OSI_VLAN_PCP_MAX 7U
/* VLAN ID range 1-4095 */
#define OSI_VLAN_ID_MAX 4095U
#define OSI_LUT_SEL_BYPASS 0U
#define OSI_LUT_SEL_SCI 1U
#define OSI_LUT_SEL_SC_PARAM 2U
#define OSI_LUT_SEL_SC_STATE 3U
#define OSI_LUT_SEL_SA_STATE 4U
#define OSI_LUT_SEL_MAX 4U
/* LUT input fields flags bit offsets */
#define OSI_LUT_FLAGS_DA_BYTE0_VALID OSI_BIT(0)
#define OSI_LUT_FLAGS_DA_BYTE1_VALID OSI_BIT(1)
#define OSI_LUT_FLAGS_DA_BYTE2_VALID OSI_BIT(2)
#define OSI_LUT_FLAGS_DA_BYTE3_VALID OSI_BIT(3)
#define OSI_LUT_FLAGS_DA_BYTE4_VALID OSI_BIT(4)
#define OSI_LUT_FLAGS_DA_BYTE5_VALID OSI_BIT(5)
#define OSI_LUT_FLAGS_DA_VALID (OSI_BIT(0) | OSI_BIT(1) | OSI_BIT(2) |\
OSI_BIT(3) | OSI_BIT(4) | OSI_BIT(5))
#define OSI_LUT_FLAGS_SA_BYTE0_VALID OSI_BIT(6)
#define OSI_LUT_FLAGS_SA_BYTE1_VALID OSI_BIT(7)
#define OSI_LUT_FLAGS_SA_BYTE2_VALID OSI_BIT(8)
#define OSI_LUT_FLAGS_SA_BYTE3_VALID OSI_BIT(9)
#define OSI_LUT_FLAGS_SA_BYTE4_VALID OSI_BIT(10)
#define OSI_LUT_FLAGS_SA_BYTE5_VALID OSI_BIT(11)
#define OSI_LUT_FLAGS_SA_VALID (OSI_BIT(6) | OSI_BIT(7) | OSI_BIT(8) |\
OSI_BIT(9) | OSI_BIT(10) | OSI_BIT(11))
#define OSI_LUT_FLAGS_ETHTYPE_VALID OSI_BIT(12)
#define OSI_LUT_FLAGS_VLAN_PCP_VALID OSI_BIT(13)
#define OSI_LUT_FLAGS_VLAN_ID_VALID OSI_BIT(14)
#define OSI_LUT_FLAGS_VLAN_VALID OSI_BIT(15)
#define OSI_LUT_FLAGS_BYTE0_PATTERN_VALID OSI_BIT(16)
#define OSI_LUT_FLAGS_BYTE1_PATTERN_VALID OSI_BIT(17)
#define OSI_LUT_FLAGS_BYTE2_PATTERN_VALID OSI_BIT(18)
#define OSI_LUT_FLAGS_BYTE3_PATTERN_VALID OSI_BIT(19)
#define OSI_LUT_FLAGS_PREEMPT OSI_BIT(20)
#define OSI_LUT_FLAGS_PREEMPT_VALID OSI_BIT(21)
#define OSI_LUT_FLAGS_CONTROLLED_PORT OSI_BIT(22)
#define OSI_LUT_FLAGS_DVLAN_PKT OSI_BIT(23)
#define OSI_LUT_FLAGS_DVLAN_OUTER_INNER_TAG_SEL OSI_BIT(24)
#define OSI_LUT_FLAGS_ENTRY_VALID OSI_BIT(31)
/** @} */
/**
* @addtogroup Generic table CONFIG register helpers macros
*
* @brief Helper macros for generic table CONFIG register programming
* @{
*/
#define OSI_CTLR_SEL_TX 0U
#define OSI_CTLR_SEL_RX 1U
#define OSI_CTLR_SEL_MAX 1U
#define OSI_LUT_READ 0U
#define OSI_LUT_WRITE 1U
#define OSI_RW_MAX 1U
#define OSI_TABLE_INDEX_MAX 31U
#define OSI_BYP_LUT_MAX_INDEX OSI_TABLE_INDEX_MAX
#define OSI_SC_LUT_MAX_INDEX 15U
#define OSI_SA_LUT_MAX_INDEX OSI_TABLE_INDEX_MAX
/** @} */
/**
* @addtogroup Debug buffer table CONFIG register helpers macros
*
* @brief Helper macros for debug buffer table CONFIG register programming
* @{
*/
#define OSI_DBG_TBL_READ OSI_LUT_READ
#define OSI_DBG_TBL_WRITE OSI_LUT_WRITE
/* Num of Tx debug buffers */
#define OSI_TX_DBG_BUF_IDX_MAX 12U
/* Num of Rx debug buffers */
#define OSI_RX_DBG_BUF_IDX_MAX 13U
/** flag - encoding various debug event bits */
#define OSI_TX_DBG_LKUP_MISS_EVT OSI_BIT(0)
#define OSI_TX_DBG_AN_NOT_VALID_EVT OSI_BIT(1)
#define OSI_TX_DBG_KEY_NOT_VALID_EVT OSI_BIT(2)
#define OSI_TX_DBG_CRC_CORRUPT_EVT OSI_BIT(3)
#define OSI_TX_DBG_ICV_CORRUPT_EVT OSI_BIT(4)
#define OSI_TX_DBG_CAPTURE_EVT OSI_BIT(5)
#define OSI_RX_DBG_LKUP_MISS_EVT OSI_BIT(6)
#define OSI_RX_DBG_KEY_NOT_VALID_EVT OSI_BIT(7)
#define OSI_RX_DBG_REPLAY_ERR_EVT OSI_BIT(8)
#define OSI_RX_DBG_CRC_CORRUPT_EVT OSI_BIT(9)
#define OSI_RX_DBG_ICV_ERROR_EVT OSI_BIT(10)
#define OSI_RX_DBG_CAPTURE_EVT OSI_BIT(11)
/** @} */
/**
* @addtogroup AES ciphers
*
* @brief Helper macro's for AES ciphers
* @{
*/
#define OSI_MACSEC_CIPHER_AES128 0U
#define OSI_MACSEC_CIPHER_AES256 1U
/** @} */
/**
* @addtogroup MACSEC Misc helper macro's
*
* @brief MACSEC Helper macro's
* @{
*/
#define OSI_MACSEC_TX_EN OSI_BIT(0)
#define OSI_MACSEC_RX_EN OSI_BIT(1)
/* MACSEC SECTAG + ICV + 2B ethertype adds upto 34B */
#define MACSEC_TAG_ICV_LEN 34U
/* MACSEC TZ key config cmd */
#define OSI_MACSEC_CMD_TZ_CONFIG 0x1
/* MACSEC TZ key table entries reset cmd */
#define OSI_MACSEC_CMD_TZ_KT_RESET 0x2
/** @} */
/**
* @brief Indicates different operations on MACSEC SA
*/
#define OSI_CREATE_SA 1U
#define OSI_ENABLE_SA 2U
#define OSI_DISABLE_SA 3U
/**
* @brief MACSEC SA State LUT entry outputs structure
*/
struct osi_sa_state_outputs {
/** Indicates next PN to use */
nveu32_t next_pn;
/** Indicates lowest PN to use */
nveu32_t lowest_pn;
};
/**
* @brief MACSEC SC State LUT entry outputs structure
*/
struct osi_sc_state_outputs {
/** Indicates current AN to use */
nveu32_t curr_an;
};
/**
* @brief MACSEC SC Param LUT entry outputs structure
*/
struct osi_sc_param_outputs {
/** Indicates Key index start */
nveu32_t key_index_start;
/** PN max for given AN, after which HW will roll over to next AN */
nveu32_t pn_max;
/** PN threshold to trigger irq when threshold is reached */
nveu32_t pn_threshold;
/** Indidate PN window for engress packets */
nveu32_t pn_window;
/** SC identifier */
nveu8_t sci[OSI_SCI_LEN];
/** Indicates SECTAG 3 TCI bits V, ES, SC
* Default TCI value V=1, ES=0, SC = 1
*/
nveu8_t tci;
/** Indicates 1 bit VLAN IN CLEAR config */
nveu8_t vlan_in_clear;
};
/**
* @brief MACSEC SCI LUT entry outputs structure
*/
struct osi_sci_lut_outputs {
/** Indicates SC index to use */
nveu32_t sc_index;
/** SC identifier */
nveu8_t sci[OSI_SCI_LEN];
/** AN's valid */
nveu32_t an_valid;
};
/**
* @brief MACSEC LUT config data structure
*/
struct osi_macsec_table_config {
/** Indicates controller select, Tx=0, Rx=1 */
nveu16_t ctlr_sel;
/** Read or write operation select, Read=0, Write=1 */
nveu16_t rw;
/** LUT entry index */
nveu16_t index;
};
/**
* @brief MACSEC Key Table entry structure
*/
struct osi_kt_entry {
/** Indicates SAK key - max 256bit */
nveu8_t sak[OSI_KEY_LEN_256];
/** Indicates Hash-key */
nveu8_t h[OSI_KEY_LEN_128];
};
/**
* @brief MACSEC BYP/SCI LUT entry inputs structure
*/
struct osi_lut_inputs {
/** MAC DA to compare */
nveu8_t da[OSI_ETH_ALEN];
/** MAC SA to compare */
nveu8_t sa[OSI_ETH_ALEN];
/** Ethertype to compare */
nveu8_t ethtype[OSI_ETHTYPE_LEN];
/** 4-Byte pattern to compare */
nveu8_t byte_pattern[OSI_LUT_BYTE_PATTERN_MAX];
/** Offset for 4-Byte pattern to compare */
nveu32_t byte_pattern_offset[OSI_LUT_BYTE_PATTERN_MAX];
/** VLAN PCP to compare */
nveu32_t vlan_pcp;
/** VLAN ID to compare */
nveu32_t vlan_id;
};
/**
* @brief MACSEC LUT config data structure
*/
struct osi_macsec_lut_config {
/** Generic table config */
struct osi_macsec_table_config table_config;
/** Indicates LUT to select
* 0: Bypass LUT
* 1: SCI LUT
* 2: SC PARAM LUT
* 3: SC STATE LUT
* 4: SA STATE LUT
*/
nveu16_t lut_sel;
/** flag - encoding various valid LUT bits for above fields */
nveu32_t flags;
/** LUT inputs to use */
struct osi_lut_inputs lut_in;
/** SCI LUT outputs */
struct osi_sci_lut_outputs sci_lut_out;
/** SC Param LUT outputs */
struct osi_sc_param_outputs sc_param_out;
/** SC State LUT outputs */
struct osi_sc_state_outputs sc_state_out;
/** SA State LUT outputs */
struct osi_sa_state_outputs sa_state_out;
};
/**
* @brief MACSEC Key Table config data structure
*/
struct osi_macsec_kt_config {
/** Generic table config */
struct osi_macsec_table_config table_config;
/** Key Table entry config */
struct osi_kt_entry entry;
/** Indicates key table entry valid or not, bit 31 */
nveu32_t flags;
};
/**
* @brief MACSEC Debug buffer config data structure
*/
struct osi_macsec_dbg_buf_config {
/** Indicates Controller select, Tx=0, Rx=1 */
nveu16_t ctlr_sel;
/** Read or write operation select, Read=0, Write=1 */
nveu16_t rw;
/** Indicates debug data buffer */
nveu32_t dbg_buf[4];
/** flag - encoding various debug event bits */
nveu32_t flags;
/** Indicates debug buffer index */
nveu32_t index;
};
/**
* @brief MACSEC core operations structure
*/
struct osi_macsec_core_ops {
/** macsec init */
nve32_t (*init)(struct osi_core_priv_data *const osi_core,
nveu32_t mtu);
/** macsec de-init */
nve32_t (*deinit)(struct osi_core_priv_data *const osi_core);
/** Non Secure irq handler */
void (*handle_ns_irq)(struct osi_core_priv_data *const osi_core);
/** Secure irq handler */
void (*handle_s_irq)(struct osi_core_priv_data *const osi_core);
/** macsec lut config */
nve32_t (*lut_config)(struct osi_core_priv_data *const osi_core,
struct osi_macsec_lut_config *const lut_config);
#ifdef MACSEC_KEY_PROGRAM
/** macsec kt config */
nve32_t (*kt_config)(struct osi_core_priv_data *const osi_core,
struct osi_macsec_kt_config *const kt_config);
#endif /* MACSEC_KEY_PROGRAM */
/** macsec cipher config */
nve32_t (*cipher_config)(struct osi_core_priv_data *const osi_core,
nveu32_t cipher);
/** macsec loopback config */
nve32_t (*loopback_config)(struct osi_core_priv_data *const osi_core,
nveu32_t enable);
/** macsec enable */
nve32_t (*macsec_en)(struct osi_core_priv_data *const osi_core,
nveu32_t enable);
/** macsec config SA in HW LUT */
nve32_t (*config)(struct osi_core_priv_data *const osi_core,
struct osi_macsec_sc_info *const sc,
nveu32_t enable, nveu16_t ctlr,
nveu16_t *kt_idx);
/** macsec read mmc counters */
void (*read_mmc)(struct osi_core_priv_data *const osi_core);
/** macsec debug buffer config */
nve32_t (*dbg_buf_config)(struct osi_core_priv_data *const osi_core,
struct osi_macsec_dbg_buf_config *const dbg_buf_config);
/** macsec debug buffer config */
nve32_t (*dbg_events_config)(struct osi_core_priv_data *const osi_core,
struct osi_macsec_dbg_buf_config *const dbg_buf_config);
/** macsec get Key Index start for a given SCI */
nve32_t (*get_sc_lut_key_index)(struct osi_core_priv_data *const osi_core,
nveu8_t *sci, nveu32_t *key_index, nveu16_t ctlr);
/** macsec set MTU size */
nve32_t (*update_mtu)(struct osi_core_priv_data *const osi_core,
nveu32_t mtu);
};
//////////////////////////////////////////////////////////////////////////
/* MACSEC OSI interface API prototypes */
//////////////////////////////////////////////////////////////////////////
/**
* @brief osi_init_macsec_ops - macsec initialize operations
*
* @note
* Algorithm:
* - If virtualization is enabled initialize virt ops
* - Else
* - If macsec base is null return -1
* - initialize with macsec ops
* - Refer to MACSEC column of <<******, (sequence diagram)>> for API details.
* - TraceID: ***********
*
* @param[in] osi_core: OSI core private data structure. used param macsec_base
*
* @pre MACSEC needs to be out of reset and proper clock configured.
*
* @note
* API Group:
* - Initialization: No
* - Run time: Yes
* - De-initialization: No
*
* @retval 0 on success
* @retval -1 on failure
*/
nve32_t osi_init_macsec_ops(struct osi_core_priv_data *const osi_core);
/**
* @brief osi_macsec_init - Initialize the macsec controller
*
* @note
* Algorithm:
* - Return -1 if osi core or ops is null
* - Configure MTU, controller configs, interrupts, clear all LUT's and
* set BYP LUT entries for MKPDU and BC packets
* - Refer to MACSEC column of <<******, (sequence diagram)>> for API details.
* - TraceID: ***********
*
* @param[in] osi_core: OSI core private data structure.
* @param[in] mtu: mtu to be programmed
*
* @pre MACSEC needs to be out of reset and proper clock configured.
*
* @note
* API Group:
* - Initialization: No
* - Run time: Yes
* - De-initialization: No
*
* @retval 0 on success
* @retval -1 on failure
*/
nve32_t osi_macsec_init(struct osi_core_priv_data *const osi_core,
nveu32_t mtu);
/**
* @brief osi_macsec_deinit - De-Initialize the macsec controller
*
* @note
* Algorithm:
* - Return -1 if osi core or ops is null
* - Resets macsec global data structured and restores the mac confirguration
* - Refer to MACSEC column of <<******, (sequence diagram)>> for API details.
* - TraceID: ***********
*
* @param[in] osi_core: OSI core private data structure
*
* @pre MACSEC needs to be out of reset and proper clock configured.
*
* @note
* API Group:
* - Initialization: No
* - Run time: Yes
* - De-initialization: No
*
* @retval 0 on success
* @retval -1 on failure
*/
nve32_t osi_macsec_deinit(struct osi_core_priv_data *const osi_core);
/**
* @brief osi_macsec_ns_isr - macsec non-secure irq handler
*
* @note
* Algorithm:
* - Return -1 if osi core or ops is null
* - handles non-secure macsec interrupts
* - Refer to MACSEC column of <<******, (sequence diagram)>> for API details.
* - TraceID: ***********
*
* @param[in] osi_core: OSI core private data structure
*
* @pre MACSEC needs to be out of reset and proper clock configured.
*
* @note
* API Group:
* - Initialization: No
* - Run time: Yes
* - De-initialization: No
*
* @retval none
*/
void osi_macsec_ns_isr(struct osi_core_priv_data *const osi_core);
/**
* @brief osi_macsec_s_isr - macsec secure irq handler
*
* @note
* Algorithm:
* - Return -1 if osi core or ops is null
* - handles secure macsec interrupts
* - Refer to MACSEC column of <<******, (sequence diagram)>> for API details.
* - TraceID: ***********
*
* @param[in] osi_core: OSI core private data structure
*
* @pre MACSEC needs to be out of reset and proper clock configured.
*
* @note
* API Group:
* - Initialization: No
* - Run time: Yes
* - De-initialization: No
*
* @retval none
*/
void osi_macsec_s_isr(struct osi_core_priv_data *const osi_core);
/**
* @brief osi_macsec_config_lut - Read or write to macsec LUTs
*
* @note
* Algorithm:
* - Return -1 if osi core or ops is null
* - Reads or writes to MACSEC LUTs
* - Refer to MACSEC column of <<******, (sequence diagram)>> for API details.
* - TraceID: ***********
*
* @param[in] osi_core: OSI core private data structure
* @param[out] lut_config: Pointer to the lut configuration
*
* @pre MACSEC needs to be out of reset and proper clock configured.
*
* @note
* API Group:
* - Initialization: No
* - Run time: Yes
* - De-initialization: No
*
* @retval 0 on success
* @retval -1 on failure
*/
nve32_t osi_macsec_config_lut(struct osi_core_priv_data *const osi_core,
struct osi_macsec_lut_config *const lut_config);
/**
* @brief osi_macsec_config_kt - API to read or update the keys
*
* @note
* Algorithm:
* - Return -1 if osi core or ops is null
* - Read or write the keys
* - Refer to MACSEC column of <<******, (sequence diagram)>> for API details.
* - TraceID: ***********
*
* @param[in] osi_core: OSI core private data structure
* @param[in] kt_config: Keys that needs to be programmed
*
* @pre MACSEC needs to be out of reset and proper clock configured.
*
* @note
* API Group:
* - Initialization: No
* - Run time: Yes
* - De-initialization: No
*
* @retval 0 on success
* @retval -1 on failure
*/
nve32_t osi_macsec_config_kt(struct osi_core_priv_data *const osi_core,
struct osi_macsec_kt_config *const kt_config);
/**
* @brief osi_macsec_cipher_config - API to update the cipher
*
* @note
* Algorithm:
* - Return -1 if osi core or ops is null
* - Updates cipher to use
* - Refer to MACSEC column of <<******, (sequence diagram)>> for API details.
* - TraceID: ***********
*
* @param[in] osi_core: OSI core private data structure
* @param[in] cipher: Cipher suit to be used
*
* @pre MACSEC needs to be out of reset and proper clock configured.
*
* @note
* API Group:
* - Initialization: No
* - Run time: Yes
* - De-initialization: No
*
* @retval 0 on success
* @retval -1 on failure
*/
nve32_t osi_macsec_cipher_config(struct osi_core_priv_data *const osi_core,
nveu32_t cipher);
/**
* @brief osi_macsec_loopback - API to enable/disable macsec loopback
*
* @note
* Algorithm:
* - Return -1 if osi core or ops is null
* - Enables/disables macsec loopback
* - Refer to MACSEC column of <<******, (sequence diagram)>> for API details.
* - TraceID: ***********
*
* @param[in] osi_core: OSI core private data structure
* @param[in] enable: parameter to enable or disable
*
* @pre MACSEC needs to be out of reset and proper clock configured.
*
* @note
* API Group:
* - Initialization: No
* - Run time: Yes
* - De-initialization: No
*
* @retval 0 on success
* @retval -1 on failure
*/
nve32_t osi_macsec_loopback(struct osi_core_priv_data *const osi_core,
nveu32_t enable);
/**
* @brief osi_macsec_en - API to enable/disable macsec
*
* @note
* Algorithm:
* - Return -1 if passed enable param is invalid
* - Return -1 if osi core or ops is null
* - Enables/disables macsec
* - Refer to MACSEC column of <<******, (sequence diagram)>> for API details.
* - TraceID: ***********
*
* @param[in] osi_core: OSI core private data structure
* @param[in] enable: parameter to enable or disable
*
* @pre MACSEC needs to be out of reset and proper clock configured.
*
* @note
* API Group:
* - Initialization: No
* - Run time: Yes
* - De-initialization: No
*
* @retval 0 on success
* @retval -1 on failure
*/
nve32_t osi_macsec_en(struct osi_core_priv_data *const osi_core,
nveu32_t enable);
/**
* @brief osi_macsec_config - Updates SC or SA in the macsec
*
* @note
* Algorithm:
* - Return -1 if passed params are invalid
* - Return -1 if osi core or ops is null
* - Update/add/delete SC/SA
* - Refer to MACSEC column of <<******, (sequence diagram)>> for API details.
* - TraceID: ***********
*
* @param[in] osi_core: OSI core private data structure
* @param[in] sc: Pointer to the sc that needs to be added/deleted/updated
* @param[in] ctlr: Controller selected
* @param[out] kt_idx: Pointer to the kt_index passed to OSD
*
* @pre MACSEC needs to be out of reset and proper clock configured.
*
* @note
* API Group:
* - Initialization: No
* - Run time: Yes
* - De-initialization: No
*
* @retval 0 on success
* @retval -1 on failure
*/
nve32_t osi_macsec_config(struct osi_core_priv_data *const osi_core,
struct osi_macsec_sc_info *const sc,
nveu32_t enable, nveu16_t ctlr,
nveu16_t *kt_idx);
/**
* @brief osi_macsec_read_mmc - Updates the mmc counters
*
* @note
* Algorithm:
* - Return -1 if osi core or ops is null
* - Updates the mcc counters in osi_core structure
* - Refer to MACSEC column of <<******, (sequence diagram)>> for API details.
* - TraceID: ***********
*
* @param[out] osi_core: OSI core private data structure
*
* @pre MACSEC needs to be out of reset and proper clock configured.
*
* @note
* API Group:
* - Initialization: No
* - Run time: Yes
* - De-initialization: No
*
* @retval 0 on success
* @retval -1 on failure
*/
nve32_t osi_macsec_read_mmc(struct osi_core_priv_data *const osi_core);
/**
* @brief osi_macsec_config_dbg_buf - Reads the debug buffer captured
*
* @note
* Algorithm:
* - Return -1 if osi core or ops is null
* - Reads the dbg buffers captured
* - Refer to MACSEC column of <<******, (sequence diagram)>> for API details.
* - TraceID: ***********
*
* @param[in] osi_core: OSI core private data structure
* @param[out] dbg_buf_config: dbg buffer data captured
*
* @pre MACSEC needs to be out of reset and proper clock configured.
*
* @note
* API Group:
* - Initialization: No
* - Run time: Yes
* - De-initialization: No
*
* @retval 0 on success
* @retval -1 on failure
*/
nve32_t osi_macsec_config_dbg_buf(
struct osi_core_priv_data *const osi_core,
struct osi_macsec_dbg_buf_config *const dbg_buf_config);
/**
* @brief osi_macsec_dbg_events_config - Enables debug buffer events
*
* @note
* Algorithm:
* - Return -1 if osi core or ops is null
* - Enables specific events to capture debug buffers
* - Refer to MACSEC column of <<******, (sequence diagram)>> for API details.
* - TraceID: ***********
*
* @param[in] osi_core: OSI core private data structure
* @param[in] dbg_buf_config: dbg buffer data captured
*
* @pre MACSEC needs to be out of reset and proper clock configured.
*
* @note
* API Group:
* - Initialization: No
* - Run time: Yes
* - De-initialization: No
*
* @retval 0 on success
* @retval -1 on failure
*/
nve32_t osi_macsec_dbg_events_config(
struct osi_core_priv_data *const osi_core,
struct osi_macsec_dbg_buf_config *const dbg_buf_config);
/**
* @brief osi_macsec_get_sc_lut_key_index - API to get key index for a given SCI
*
* @note
* Algorithm:
* - Return -1 if osi core or ops is null
* - gets the key index for the given sci
* - Refer to MACSEC column of <<******, (sequence diagram)>> for API details.
* - TraceID: ***********
*
* @param[in] osi_core: OSI core private data structure
* @param[in] sci: Pointer to sci that needs to be found
* @param[out] key_index: Pointer to key_index
* @param[in] ctlr: macsec controller selected
*
* @pre MACSEC needs to be out of reset and proper clock configured.
*
* @note
* API Group:
* - Initialization: No
* - Run time: Yes
* - De-initialization: No
*
* @retval 0 on success
* @retval -1 on failure
*/
nve32_t osi_macsec_get_sc_lut_key_index(
struct osi_core_priv_data *const osi_core,
nveu8_t *sci, nveu32_t *key_index, nveu16_t ctlr);
/**
* @brief osi_macsec_update_mtu - Update the macsec mtu in run-time
*
* @note
* Algorithm:
* - Return -1 if osi core or ops is null
* - Updates the macsec mtu
* - Refer to MACSEC column of <<******, (sequence diagram)>> for API details.
* - TraceID: ***********
*
* @param[in] osi_core: OSI core private data structure
* @param[in] mtu: mtu that needs to be programmed
*
* @pre MACSEC needs to be out of reset and proper clock configured.
*
* @note
* API Group:
* - Initialization: No
* - Run time: Yes
* - De-initialization: No
*
* @retval 0 on success
* @retval -1 on failure
*/
nve32_t osi_macsec_update_mtu(struct osi_core_priv_data *const osi_core,
nveu32_t mtu);
#endif /* MACSEC_SUPPORT */
#endif /* INCLUDED_OSI_MACSEC_H */

View File

@@ -1,351 +0,0 @@
/*
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef INCLUDED_COMMON_H
#define INCLUDED_COMMON_H
#include "../osi/common/type.h"
#include <osi_common.h>
/**
* @addtogroup Generic helper macros
*
* @brief These are Generic helper macros used at various places.
* @{
*/
#define RETRY_COUNT 1000U
#define COND_MET 0
#define COND_NOT_MET 1
#define RETRY_DELAY 1U
/** @} */
/**
* @brief Maximum number of supported MAC IP types (EQOS and MGBE)
*/
#define MAX_MAC_IP_TYPES 2U
/**
* @brief osi_readl_poll_timeout - Periodically poll an address until
* a condition is met or a timeout occurs
*
* @param[in] addr: Memory mapped address.
* @param[in] val: Variable to read the value.
* @param[in] cond: Break condition (usually involving @val).
* @param[in] delay_us: Maximum time to sleep between reads in us.
* @param[in] retry: Retry count.
* @note Physical address has to be memmory mapped.
*
* @retval 0 on success
* @retval -1 on failure.
*/
#define osi_readl_poll_timeout(addr, fn, val, cond, delay_us, retry) \
({ \
unsigned int count = 0; \
while (count++ < retry) { \
val = osi_readl((unsigned char *)addr); \
if ((cond)) { \
break; \
} \
fn(delay_us); \
} \
(cond) ? 0 : -1; \
})
struct osi_core_priv_data;
/**
* @brief osi_lock_init - Initialize lock to unlocked state.
*
* @note
* Algorithm:
* - Set lock to unlocked state.
*
* @param[in] lock - Pointer to lock to be initialized
*
* @note
* API Group:
* - Initialization: Yes
* - Run time: No
* - De-initialization: No
*/
static inline void osi_lock_init(nveu32_t *lock)
{
*lock = OSI_UNLOCKED;
}
/**
* @brief osi_lock_irq_enabled - Spin lock. Busy loop till lock is acquired.
*
* @note
* Algorithm:
* - Atomic compare and swap operation till lock is held.
*
* @param[in] lock - Pointer to lock to be acquired.
*
* @note
* - Does not disable irq. Do not call this API to acquire any
* lock that is shared between top/bottom half. It will result in deadlock.
*
* @note
* API Group:
* - Initialization: No
* - Run time: Yes
* - De-initialization: No
*/
static inline void osi_lock_irq_enabled(nveu32_t *lock)
{
/* __sync_val_compare_and_swap(lock, old value, new value) returns the
* old value if successful.
*/
while (__sync_val_compare_and_swap(lock, OSI_UNLOCKED, OSI_LOCKED) !=
OSI_UNLOCKED) {
/* Spinning.
* Will deadlock if any ISR tried to lock again.
*/
}
}
/**
* @brief osi_unlock_irq_enabled - Release lock.
*
* @note
* Algorithm:
* - Atomic compare and swap operation to release lock.
*
* @param[in] lock - Pointer to lock to be released.
*
* @note
* - Does not disable irq. Do not call this API to release any
* lock that is shared between top/bottom half.
*
* @note
* API Group:
* - Initialization: No
* - Run time: Yes
* - De-initialization: No
*/
static inline void osi_unlock_irq_enabled(nveu32_t *lock)
{
if (__sync_val_compare_and_swap(lock, OSI_LOCKED, OSI_UNLOCKED) !=
OSI_LOCKED) {
/* Do nothing. Already unlocked */
}
}
/**
* @brief osi_readl - Read a memory mapped register.
*
* @param[in] addr: Memory mapped address.
*
* @pre Physical address has to be memory mapped.
*
* @return Data from memory mapped register - success.
*
* @note
* API Group:
* - Initialization: Yes
* - Run time: Yes
* - De-initialization: Yes
*/
static inline nveu32_t osi_readl(void *addr)
{
return *(volatile nveu32_t *)addr;
}
/**
* @brief osi_writel - Write to a memory mapped register.
*
* @param[in] val: Value to be written.
* @param[in] addr: Memory mapped address.
*
* @pre Physical address has to be memory mapped.
*
* @note
* API Group:
* - Initialization: Yes
* - Run time: Yes
* - De-initialization: Yes
*/
static inline void osi_writel(nveu32_t val, void *addr)
{
*(volatile nveu32_t *)addr = val;
}
/**
* @brief osi_readla - Read a memory mapped register.
*
* @ note
* The difference between osi_readla & osi_readl is osi_core argument.
* In case of ethernet server, osi_core used to define policy for each VM.
* In case of non virtualization osi_core argument is ignored.
*
* @param[in] priv: Priv address.
* @param[in] addr: Memory mapped address.
*
* @note Physical address has to be memmory mapped.
*
* @return Data from memory mapped register - success.
*/
static inline nveu32_t osi_readla(OSI_UNUSED void *priv, void *addr)
{
return *(volatile nveu32_t *)addr;
}
/**
*
* @ note
* @brief osi_writela - Write to a memory mapped register.
* The difference between osi_writela & osi_writel is osi_core argument.
* In case of ethernet server, osi_core used to define policy for each VM.
* In case of non virtualization osi_core argument is ignored.
*
* @param[in] priv: Priv address.
* @param[in] val: Value to be written.
* @param[in] addr: Memory mapped address.
*
* @note Physical address has to be memmory mapped.
*/
static inline void osi_writela(OSI_UNUSED void *priv, nveu32_t val, void *addr)
{
*(volatile nveu32_t *)addr = val;
}
/**
* @brief validate_mac_ver_update_chans - Validates mac version and update chan
*
* @param[in] mac_ver: MAC version read.
* @param[out] max_chans: Maximum channel number.
*
* @note MAC has to be out of reset.
*
* @note
* API Group:
* - Initialization: Yes
* - Run time: No
* - De-initialization: No
*
* @retval 0 - for not Valid MAC
* @retval 1 - for Valid MAC
*/
static inline nve32_t validate_mac_ver_update_chans(nveu32_t mac_ver,
nveu32_t *max_chans)
{
switch (mac_ver) {
case OSI_EQOS_MAC_4_10:
case OSI_EQOS_MAC_5_00:
*max_chans = OSI_EQOS_XP_MAX_CHANS;
break;
case OSI_EQOS_MAC_5_30:
*max_chans = OSI_EQOS_MAX_NUM_CHANS;
break;
case OSI_MGBE_MAC_3_00:
case OSI_MGBE_MAC_3_10:
case OSI_MGBE_MAC_4_00:
*max_chans = OSI_MGBE_MAX_NUM_CHANS;
break;
default:
return 0;
}
return 1;
}
/**
* @brief osi_memset - osi memset
*
* @param[out] s: source that need to be set
* @param[in] c: value to fill in source
* @param[in] count: first n bytes of source
*
* @note
* API Group:
* - Initialization: No
* - Run time: Yes
* - De-initialization: No
*/
static inline void osi_memset(void *s, nveu32_t c, nveu64_t count)
{
nveu8_t *xs = OSI_NULL;
nveu64_t temp = count;
if (s == OSI_NULL) {
return;
}
xs = (nveu8_t *)s;
while (temp != 0UL) {
if (c < OSI_UCHAR_MAX) {
*xs = (nveu8_t)c;
xs++;
}
temp--;
}
}
/**
* @brief osi_memcpy - osi memcpy
*
* @param[out] dest: destination pointer
* @param[in] src: source pointer
* @param[in] n: number bytes of source
*
* @note
* API Group:
* - Initialization: No
* - Run time: Yes
* - De-initialization: No
*/
static inline nve32_t osi_memcpy(void *dest, void *src, nveu64_t n)
{
nve8_t *csrc = (nve8_t *)src;
nve8_t *cdest = (nve8_t *)dest;
nveu64_t i = 0;
if (src == OSI_NULL || dest == OSI_NULL) {
return -1;
}
for (i = 0; i < n; i++) {
cdest[i] = csrc[i];
}
return 0;
}
static inline nve32_t osi_memcmp(void *dest, void *src, nve32_t n)
{
nve32_t i;
nve8_t *csrc = (nve8_t *)src;
nve8_t *cdest = (nve8_t *)dest;
if (src == OSI_NULL || dest == OSI_NULL)
return -1;
for (i = 0; i < n; i++) {
if (csrc[i] < cdest[i]) {
return -1;
} else if (csrc[i] > cdest[i]) {
return 1;
}
}
return 0;
}
#endif

View File

@@ -1,73 +0,0 @@
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "eqos_common.h"
#include "../osi/common/common.h"
nveul64_t eqos_get_systime_from_mac(void *addr)
{
nveul64_t ns1, ns2, ns = 0;
nveu32_t varmac_stnsr, temp1;
nveu32_t varmac_stsr;
varmac_stnsr = osi_readl((nveu8_t *)addr + EQOS_MAC_STNSR);
temp1 = (varmac_stnsr & EQOS_MAC_STNSR_TSSS_MASK);
ns1 = (nveul64_t)temp1;
varmac_stsr = osi_readl((nveu8_t *)addr + EQOS_MAC_STSR);
varmac_stnsr = osi_readl((nveu8_t *)addr + EQOS_MAC_STNSR);
temp1 = (varmac_stnsr & EQOS_MAC_STNSR_TSSS_MASK);
ns2 = (nveul64_t)temp1;
/* if ns1 is greater than ns2, it means nsec counter rollover
* happened. In that case read the updated sec counter again
*/
if (ns1 >= ns2) {
varmac_stsr = osi_readl((nveu8_t *)addr + EQOS_MAC_STSR);
/* convert sec/high time value to nanosecond */
if (varmac_stsr < UINT_MAX) {
ns = ns2 + (varmac_stsr * OSI_NSEC_PER_SEC);
}
} else {
/* convert sec/high time value to nanosecond */
if (varmac_stsr < UINT_MAX) {
ns = ns1 + (varmac_stsr * OSI_NSEC_PER_SEC);
}
}
return ns;
}
nveu32_t eqos_is_mac_enabled(void *addr)
{
nveu32_t enable = OSI_DISABLE;
nveu32_t reg;
reg = osi_readl((nveu8_t *)addr + EQOS_MAC_MCR);
if ((reg & (EQOS_MCR_TE | EQOS_MCR_RE)) ==
(EQOS_MCR_TE | EQOS_MCR_RE)) {
enable = OSI_ENABLE;
}
return enable;
}

View File

@@ -1,83 +0,0 @@
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef INCLUDED_EQOS_COMMON_H
#define INCLUDED_EQOS_COMMON_H
#include <local_common.h>
/**
* @brief PTP Time read registers
* @{
*/
#define EQOS_MAC_STSR 0x0B08
#define EQOS_MAC_STNSR 0x0B0C
#define EQOS_MAC_STNSR_TSSS_MASK 0x7FFFFFFFU
/** @} */
/**
* @brief Common MAC MCR register and its bits
* @{
*/
#define EQOS_MAC_MCR 0x0000
#define EQOS_MCR_TE OSI_BIT(0)
#define EQOS_MCR_RE OSI_BIT(1)
/** @} */
/**
* @brief eqos_get_systime - Get system time from MAC
*
* @param[in] addr: Base address indicating the start of
* memory mapped IO region of the MAC.
*
* @pre MAC should be init and started. see osi_start_mac()
*
* @note
* API Group:
* - Initialization: No
* - Run time: Yes
* - De-initialization: No
*
* @retval 0 on success
* @retval -1 on failure.
*/
nveul64_t eqos_get_systime_from_mac(void *addr);
/**
* @brief eqos_is_mac_enabled - Checks if MAC is enabled or not.
*
* @param[in] addr: Base address indicating the start of
* memory mapped IO region of the MAC.
*
* @pre MAC should be init and started. see osi_start_mac()
*
* @note
* API Group:
* - Initialization: Yes
* - Run time: No
* - De-initialization: No
*
* @retval OSI_ENABLE if MAC enabled.
* @retval OSI_DISABLE otherwise.
*/
nveu32_t eqos_is_mac_enabled(void *addr);
#endif /* INCLUDED_EQOS_COMMON_H */

View File

@@ -1,105 +0,0 @@
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef LOCAL_COMMON_H
#define LOCAL_COMMON_H
#include <osi_common.h>
/**
* @brief TX timestamp helper MACROS
* @{
*/
#define CHAN_START_POSITION 6U
#define PKT_ID_CNT ((nveu32_t)1 << CHAN_START_POSITION)
/* First 6 bytes of idx and last 4 bytes of chan(+1 to avoid pkt_id to be 0) */
#define GET_TX_TS_PKTID(idx, c) (((++(idx)) & (PKT_ID_CNT - 1U)) | \
(((c) + 1U) << CHAN_START_POSITION))
/** @} */
/**
*@brief div_u64_rem - updates remainder and returns Quotient
*
* @note
* Algorithm:
* - Dividend will be divided by divisor and stores the
* remainder value and returns quotient
*
* @param[in] dividend: Dividend value
* @param[in] divisor: Divisor value
* @param[out] remain: Remainder
*
* @pre MAC IP should be out of reset and need to be initialized as the
* requirements
*
* @note
* API Group:
* - Initialization: No
* - Run time: Yes
* - De-initialization: No
*
* @retval Quotient
*/
nveu64_t div_u64_rem(nveu64_t dividend, nveu64_t divisor,
nveu64_t *remain);
/**
* @brief common_get_systime_from_mac - Get system time
*
* @param[in] addr: Address of base register.
* @param[in] mac: MAC HW type.
* @param[out] sec: Value read in Seconds.
* @param[out] nsec: Value read in Nano seconds.
*
* @pre MAC should be init and started. see osi_start_mac()
*
* @note
* API Group:
* - Initialization: No
* - Run time: Yes
* - De-initialization: No
*
* @retval 0 on success
* @retval -1 on failure.
*/
void common_get_systime_from_mac(void *addr, nveu32_t mac,
nveu32_t *sec, nveu32_t *nsec);
/**
* @brief common_is_mac_enabled - Checks if MAC is enabled or not.
*
* @param[in] addr: Address of base register.
* @param[in] mac: MAC HW type.
*
* @pre MAC should be init and started. see osi_start_mac()
*
* @note
* API Group:
* - Initialization: Yes
* - Run time: No
* - De-initialization: No
*
* @retval OSI_ENABLE if MAC enabled.
* @retval OSI_DISABLE otherwise.
*/
nveu32_t common_is_mac_enabled(void *addr, nveu32_t mac);
#endif /* LOCAL_COMMON_H */

View File

@@ -1,85 +0,0 @@
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "../osi/common/common.h"
#include "mgbe_common.h"
/**
* @brief mgbe_get_systime_from_mac - Get system time from MAC
*
* Algorithm: Get current system time
*
* @param[in] addr: Base address indicating the start of
* memory mapped IO region of the MAC.
*
* @note MAC should be init and started. see osi_start_mac()
*
* @retval 0 on success
* @retval -1 on failure.
*/
nveul64_t mgbe_get_systime_from_mac(void *addr)
{
nveul64_t ns1, ns2, ns = 0;
nveu32_t varmac_stnsr, temp1;
nveu32_t varmac_stsr;
varmac_stnsr = osi_readl((nveu8_t *)addr + MGBE_MAC_STNSR);
temp1 = (varmac_stnsr & MGBE_MAC_STNSR_TSSS_MASK);
ns1 = (nveul64_t)temp1;
varmac_stsr = osi_readl((nveu8_t *)addr + MGBE_MAC_STSR);
varmac_stnsr = osi_readl((nveu8_t *)addr + MGBE_MAC_STNSR);
temp1 = (varmac_stnsr & MGBE_MAC_STNSR_TSSS_MASK);
ns2 = (nveul64_t)temp1;
/* if ns1 is greater than ns2, it means nsec counter rollover
* happened. In that case read the updated sec counter again
*/
if (ns1 >= ns2) {
varmac_stsr = osi_readl((nveu8_t *)addr + MGBE_MAC_STSR);
/* convert sec/high time value to nanosecond */
if (varmac_stsr < UINT_MAX) {
ns = ns2 + (varmac_stsr * OSI_NSEC_PER_SEC);
}
} else {
/* convert sec/high time value to nanosecond */
if (varmac_stsr < UINT_MAX) {
ns = ns1 + (varmac_stsr * OSI_NSEC_PER_SEC);
}
}
return ns;
}
nveu32_t mgbe_is_mac_enabled(void *addr)
{
nveu32_t enable = OSI_DISABLE;
nveu32_t reg;
reg = osi_readl((nveu8_t *)addr + MGBE_MAC_TX);
if ((reg & (MGBE_MCR_TE)) == MGBE_MCR_TE) {
enable = OSI_ENABLE;
}
return enable;
}

View File

@@ -1,43 +0,0 @@
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef INCLUDED_MGBE_COMMON_H
#define INCLUDED_MGBE_COMMON_H
/**
* @addtogroup MGBE-MAC MGBE MAC common HW feature registers
*
* @{
*/
#define MGBE_MAC_STSR 0x0D08
#define MGBE_MAC_STNSR 0x0D0C
#define MGBE_MAC_STNSR_TSSS_MASK 0x7FFFFFFFU
#define MGBE_MAC_TX 0x0000
#define MGBE_MCR_TE OSI_BIT(0)
/** @} */
nveul64_t mgbe_get_systime_from_mac(void *addr);
nveu32_t mgbe_is_mac_enabled(void *addr);
#endif /* INCLUDED_MGBE_COMMON_H */

View File

@@ -1,76 +0,0 @@
/*
* Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "eqos_common.h"
#include "mgbe_common.h"
#include "../osi/common/common.h"
void common_get_systime_from_mac(void *addr, nveu32_t mac, nveu32_t *sec,
nveu32_t *nsec)
{
nveu64_t temp;
nveu64_t remain;
nveul64_t ns;
typedef nveul64_t (*get_time)(void *addr);
get_time i_ops[MAX_MAC_IP_TYPES] = {
eqos_get_systime_from_mac, mgbe_get_systime_from_mac
};
ns = i_ops[mac](addr);
temp = div_u64_rem((nveu64_t)ns, OSI_NSEC_PER_SEC, &remain);
if (temp < UINT_MAX) {
*sec = (nveu32_t)temp;
} else {
/* do nothing here */
}
if (remain < UINT_MAX) {
*nsec = (nveu32_t)remain;
} else {
/* do nothing here */
}
}
nveu32_t common_is_mac_enabled(void *addr, nveu32_t mac)
{
typedef nveu32_t (*mac_enable_arr)(void *addr);
mac_enable_arr i_ops[MAX_MAC_IP_TYPES] = {
eqos_is_mac_enabled, mgbe_is_mac_enabled
};
return i_ops[mac](addr);
}
nveu64_t div_u64_rem(nveu64_t dividend, nveu64_t divisor,
nveu64_t *remain)
{
nveu64_t ret = 0;
if (divisor != 0U) {
*remain = dividend % divisor;
ret = dividend / divisor;
} else {
ret = 0;
}
return ret;
}

View File

@@ -1,70 +0,0 @@
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef INCLUDED_TYPE_H
#define INCLUDED_TYPE_H
/*
* @addtogroup typedef related info
*
* @brief typedefs that indicate size and signness
* @{
*/
/* Following added to avoid misraC 4.6
* Here we are defining intermediate type
*/
/** intermediate type for unsigned int */
typedef unsigned int my_uint32_t;
/** intermediate type for int */
typedef int my_int32_t;
/** intermediate type for unsigned short */
typedef unsigned short my_uint16_t;
/** intermediate type for short */
typedef short my_int16_t;
/** intermediate type for char */
typedef char my_int8_t;
/** intermediate type for unsigned char */
typedef unsigned char my_uint8_t;
/** intermediate type for unsigned long long */
typedef unsigned long long my_ulint_64;
/** intermediate type for long */
typedef unsigned long my_uint64_t;
/* Actual type used in code */
/** typedef equivalent to unsigned int */
typedef my_uint32_t nveu32_t;
/** typedef equivalent to int */
typedef my_int32_t nve32_t;
/** typedef equivalent to unsigned short */
typedef my_uint16_t nveu16_t;
/** typedef equivalent to short */
typedef my_int16_t nve16_t;
/** typedef equivalent to char */
typedef my_int8_t nve8_t;
/** typedef equivalent to unsigned char */
typedef my_uint8_t nveu8_t;
/** typedef equivalent to unsigned long long */
typedef my_ulint_64 nveul64_t;
/** typedef equivalent to long long */
typedef my_uint64_t nveu64_t;
/** @} */
#endif /* INCLUDED_TYPE_H */

View File

@@ -1,38 +0,0 @@
################################### tell Emacs this is a -*- makefile-gmake -*-
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# libnvethernetrm interface makefile fragment
#
###############################################################################
ifdef NV_INTERFACE_FLAG_SHARED_LIBRARY_SECTION
NV_INTERFACE_NAME := nvethernetrm
NV_INTERFACE_EXPORTS := lib$(NV_INTERFACE_NAME)
NV_INTERFACE_PUBLIC_INCLUDES := \
./include
endif
# Local Variables:
# indent-tabs-mode: t
# tab-width: 8
# End:
# vi: set tabstop=8 noexpandtab:

View File

@@ -1,75 +0,0 @@
################################### tell Emacs this is a -*- makefile-gmake -*-
#
# Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
###############################################################################
ifdef NV_COMPONENT_FLAG_SHARED_LIBRARY_SECTION
include $(NV_BUILD_START_COMPONENT)
NV_COMPONENT_STRICT_WARNINGS_qnx_64 := 1
NV_COMPONENT_NAME := nvethernetrm
NV_COMPONENT_OWN_INTERFACE_DIR := .
NV_COMPONENT_SOURCES := \
eqos_core.c \
eqos_mmc.c \
osi_core.c \
vlan_filter.c \
osi_hal.c \
ivc_core.c \
frp.c \
mgbe_core.c \
xpcs.c \
mgbe_mmc.c \
debug.c \
core_common.c \
$(NV_SOURCE)/nvethernetrm/osi/common/osi_common.c \
$(NV_SOURCE)/nvethernetrm/osi/common/eqos_common.c \
$(NV_SOURCE)/nvethernetrm/osi/common/mgbe_common.c \
$(NV_SOURCE)/nvethernetrm/osi/core/macsec.c
#NV_COMPONENT_CFLAGS += -DMACSEC_SUPPORT
#NV_COMPONENT_CFLAGS += -DMACSEC_KEY_PROGRAM
#NV_COMPONENT_CFLAGS += -DDEBUG_MACSEC
ifeq ($(NV_BUILD_CONFIGURATION_OS_IS_LINUX),1)
NV_COMPONENT_CFLAGS += -DLINUX_OS
else ifeq ($(NV_BUILD_CONFIGURATION_OS_IS_QNX),1)
NV_COMPONENT_CFLAGS += -DQNX_OS
endif
ifeq ($(NV_BUILD_CONFIGURATION_IS_SAFETY),0)
NV_COMPONENT_CFLAGS += -DOSI_DEBUG
endif
NV_COMPONENT_INCLUDES := \
$(NV_SOURCE)/nvethernetrm/include \
$(NV_SOURCE)/nvethernetrm/osi/common/include
include $(NV_BUILD_SHARED_LIBRARY)
endif
# Local Variables:
# indent-tabs-mode: t
# tab-width: 8
# End:
# vi: set tabstop=8 noexpandtab:

View File

@@ -1,228 +0,0 @@
/*
* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "../osi/common/common.h"
#include "core_common.h"
#include "mgbe_core.h"
#include "eqos_core.h"
/**
* @brief hw_est_read - indirect read the GCL to Software own list
* (SWOL)
*
* @param[in] base: MAC base IOVA address.
* @param[in] addr_val: Address offset for indirect write.
* @param[in] data: Data to be written at offset.
* @param[in] gcla: Gate Control List Address, 0 for ETS register.
* 1 for GCL memory.
* @param[in] bunk: Memory bunk from which vlaues will be read. Possible
* value 0 or 1.
* @param[in] mac: MAC index
*
* @note MAC should be init and started. see osi_start_mac()
*
* @retval 0 on success
* @retval -1 on failure.
*/
static inline nve32_t hw_est_read(struct osi_core_priv_data *osi_core,
nveu32_t addr_val, nveu32_t *data,
nveu32_t gcla, nveu32_t bunk,
nveu32_t mac)
{
nve32_t retry = 1000;
nveu32_t val = 0U;
nve32_t ret;
const nveu32_t MTL_EST_GCL_CONTROL[MAX_MAC_IP_TYPES] = {
EQOS_MTL_EST_GCL_CONTROL, MGBE_MTL_EST_GCL_CONTROL};
const nveu32_t MTL_EST_DATA[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_DATA,
MGBE_MTL_EST_DATA};
*data = 0U;
val &= ~MTL_EST_ADDR_MASK;
val |= (gcla == 1U) ? 0x0U : MTL_EST_GCRR;
val |= MTL_EST_SRWO | MTL_EST_R1W0 | MTL_EST_DBGM | bunk | addr_val;
osi_writela(osi_core, val, (nveu8_t *)osi_core->base +
MTL_EST_GCL_CONTROL[mac]);
while (--retry > 0) {
val = osi_readla(osi_core, (nveu8_t *)osi_core->base +
MTL_EST_GCL_CONTROL[mac]);
if ((val & MTL_EST_SRWO) == MTL_EST_SRWO) {
continue;
}
osi_core->osd_ops.udelay(OSI_DELAY_1US);
break;
}
if (((val & MTL_EST_ERR0) == MTL_EST_ERR0) ||
(retry <= 0)) {
ret = -1;
goto err;
}
*data = osi_readla(osi_core, (nveu8_t *)osi_core->base +
MTL_EST_DATA[mac]);
ret = 0;
err:
return ret;
}
/**
* @brief eqos_gcl_validate - validate GCL from user
*
* Algorithm: validate GCL size and width of time interval value
*
* @param[in] osi_core: OSI core private data structure.
* @param[in] est: Configuration input argument.
* @param[in] mac: MAC index
*
* @note MAC should be init and started. see osi_start_mac()
*
* @retval 0 on success
* @retval -1 on failure.
*/
nve32_t gcl_validate(struct osi_core_priv_data *const osi_core,
struct osi_est_config *const est,
const nveu32_t *btr, nveu32_t mac)
{
const struct core_local *l_core = (struct core_local *)osi_core;
const nveu32_t PTP_CYCLE_8[MAX_MAC_IP_TYPES] = {EQOS_8PTP_CYCLE,
MGBE_8PTP_CYCLE};
const nveu32_t MTL_EST_CONTROL[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_CONTROL,
MGBE_MTL_EST_CONTROL};
const nveu32_t MTL_EST_STATUS[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_STATUS,
MGBE_MTL_EST_STATUS};
const nveu32_t MTL_EST_BTR_LOW[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_BTR_LOW,
MGBE_MTL_EST_BTR_LOW};
const nveu32_t MTL_EST_BTR_HIGH[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_BTR_HIGH,
MGBE_MTL_EST_BTR_HIGH};
const nveu32_t MTL_EST_CTR_LOW[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_CTR_LOW,
MGBE_MTL_EST_CTR_LOW};
const nveu32_t MTL_EST_CTR_HIGH[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_CTR_HIGH,
MGBE_MTL_EST_CTR_HIGH};
nveu32_t i;
nveu64_t sum_ti = 0U;
nveu64_t sum_tin = 0U;
nveu64_t ctr = 0U;
nveu64_t btr_new = 0U;
nveu32_t btr_l, btr_h, ctr_l, ctr_h;
nveu32_t bunk = 0U;
nveu32_t est_status;
nveu64_t old_btr, old_ctr;
nve32_t ret;
nveu32_t val = 0U;
nveu64_t rem = 0U;
const struct est_read hw_read_arr[4] = {
{&btr_l, MTL_EST_BTR_LOW[mac]},
{&btr_h, MTL_EST_BTR_HIGH[mac]},
{&ctr_l, MTL_EST_CTR_LOW[mac]},
{&ctr_h, MTL_EST_CTR_HIGH[mac]}};
if (est->llr > l_core->gcl_dep) {
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID,
"input argument more than GCL depth\n",
(nveul64_t)est->llr);
return -1;
}
ctr = ((nveu64_t)est->ctr[1] * OSI_NSEC_PER_SEC) + est->ctr[0];
btr_new = (((nveu64_t)btr[1] + est->btr_offset[1]) * OSI_NSEC_PER_SEC) +
(btr[0] + est->btr_offset[0]);
for (i = 0U; i < est->llr; i++) {
if (est->gcl[i] <= l_core->gcl_width_val) {
sum_ti += ((nveu64_t)est->gcl[i] & l_core->ti_mask);
if ((sum_ti > ctr) &&
((ctr - sum_tin) >= PTP_CYCLE_8[mac])) {
continue;
} else if (((ctr - sum_ti) != 0U) &&
((ctr - sum_ti) < PTP_CYCLE_8[mac])) {
OSI_CORE_ERR(osi_core->osd,
OSI_LOG_ARG_INVALID,
"CTR issue due to trancate\n",
(nveul64_t)i);
return -1;
} else {
//do nothing
}
sum_tin = sum_ti;
continue;
}
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID,
"validation of GCL entry failed\n",
(nveul64_t)i);
return -1;
}
/* Check for BTR in case of new ETS while current GCL enabled */
val = osi_readla(osi_core,
(nveu8_t *)osi_core->base +
MTL_EST_CONTROL[mac]);
if ((val & MTL_EST_CONTROL_EEST) != MTL_EST_CONTROL_EEST) {
return 0;
}
/* Read EST_STATUS for bunk */
est_status = osi_readla(osi_core,
(nveu8_t *)osi_core->base +
MTL_EST_STATUS[mac]);
if ((est_status & MTL_EST_STATUS_SWOL) == 0U) {
bunk = MTL_EST_DBGB;
}
/* Read last BTR and CTR */
for (i = 0U; i < (sizeof(hw_read_arr) / sizeof(hw_read_arr[0])); i++) {
ret = hw_est_read(osi_core, hw_read_arr[i].addr,
hw_read_arr[i].var, OSI_DISABLE,
bunk, mac);
if (ret < 0) {
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID,
"Reading failed for index\n",
(nveul64_t)i);
return ret;
}
}
old_btr = btr_l + ((nveu64_t)btr_h * OSI_NSEC_PER_SEC);
old_ctr = ctr_l + ((nveu64_t)ctr_h * OSI_NSEC_PER_SEC);
if (old_btr > btr_new) {
rem = (old_btr - btr_new) % old_ctr;
if ((rem != OSI_NONE) && (rem < PTP_CYCLE_8[mac])) {
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID,
"invalid BTR", (nveul64_t)rem);
return -1;
}
} else if (btr_new > old_btr) {
rem = (btr_new - old_btr) % old_ctr;
if ((rem != OSI_NONE) && (rem < PTP_CYCLE_8[mac])) {
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID,
"invalid BTR", (nveul64_t)rem);
return -1;
}
} else {
// Nothing to do
}
return 0;
}

View File

@@ -1,61 +0,0 @@
/*
* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef INCLUDED_CORE_COMMON_H
#define INCLUDED_CORE_COMMON_H
#include "core_local.h"
#define MTL_EST_ADDR_MASK (OSI_BIT(8) | OSI_BIT(9) | \
OSI_BIT(10) | OSI_BIT(11) | \
OSI_BIT(12) | OSI_BIT(13) | \
OSI_BIT(14) | OSI_BIT(15) | \
OSI_BIT(16) | (17) | \
OSI_BIT(18) | OSI_BIT(19))
#define MTL_EST_SRWO OSI_BIT(0)
#define MTL_EST_R1W0 OSI_BIT(1)
#define MTL_EST_GCRR OSI_BIT(2)
#define MTL_EST_DBGM OSI_BIT(4)
#define MTL_EST_DBGB OSI_BIT(5)
#define MTL_EST_ERR0 OSI_BIT(20)
#define MTL_EST_CONTROL_EEST OSI_BIT(0)
#define MTL_EST_STATUS_SWOL OSI_BIT(7)
/**
* @addtogroup typedef related info
*
* @brief typedefs that indeicates variable address and memory addr
* @{
*/
struct est_read {
/* variable pointer */
nveu32_t *var;
/* memory register/address offset */
nveu32_t addr;
};
/** @} */
nve32_t gcl_validate(struct osi_core_priv_data *const osi_core,
struct osi_est_config *const est,
const nveu32_t *btr, nveu32_t mac);
#endif /* INCLUDED_CORE_COMMON_H */

View File

@@ -1,484 +0,0 @@
/*
* Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef INCLUDED_CORE_LOCAL_H
#define INCLUDED_CORE_LOCAL_H
#include <osi_core.h>
#include <local_common.h>
/**
* @brief Maximum number of OSI core instances.
*/
#ifndef MAX_CORE_INSTANCES
#define MAX_CORE_INSTANCES 10U
#endif
/**
* @brief Maximum number of interface operations.
*/
#define MAX_INTERFACE_OPS 2U
/**
* @brief Maximum number of timestamps stored in OSI from HW FIFO.
*/
#define MAX_TX_TS_CNT (PKT_ID_CNT * OSI_MGBE_MAX_NUM_CHANS)
/**
* interface core ops
*/
struct if_core_ops {
/** Interface function called to initialize MAC and MTL registers */
nve32_t (*if_core_init)(struct osi_core_priv_data *const osi_core,
nveu32_t tx_fifo_size, nveu32_t rx_fifo_size);
/** Interface function called to deinitialize MAC and MTL registers */
nve32_t (*if_core_deinit)(struct osi_core_priv_data *const osi_core);
/** Interface function called to write into a PHY reg over MDIO bus */
nve32_t (*if_write_phy_reg)(struct osi_core_priv_data *const osi_core,
const nveu32_t phyaddr,
const nveu32_t phyreg,
const nveu16_t phydata);
/** Interface function called to read a PHY reg over MDIO bus */
nve32_t (*if_read_phy_reg)(struct osi_core_priv_data *const osi_core,
const nveu32_t phyaddr,
const nveu32_t phyreg);
/** Initialize Interface core operations */
nve32_t (*if_init_core_ops)(struct osi_core_priv_data *const osi_core);
/** Interface function called to handle runtime commands */
nve32_t (*if_handle_ioctl)(struct osi_core_priv_data *osi_core,
struct osi_ioctl *data);
};
/**
* @brief Initialize MAC & MTL core operations.
*/
struct core_ops {
/** Called to poll for software reset bit */
nve32_t (*poll_for_swr)(struct osi_core_priv_data *const osi_core);
/** Called to initialize MAC and MTL registers */
nve32_t (*core_init)(struct osi_core_priv_data *const osi_core,
const nveu32_t tx_fifo_size,
const nveu32_t rx_fifo_size);
/** Called to deinitialize MAC and MTL registers */
void (*core_deinit)(struct osi_core_priv_data *const osi_core);
/** Called to start MAC Tx and Rx engine */
void (*start_mac)(struct osi_core_priv_data *const osi_core);
/** Called to stop MAC Tx and Rx engine */
void (*stop_mac)(struct osi_core_priv_data *const osi_core);
/** Called to handle common interrupt */
void (*handle_common_intr)(struct osi_core_priv_data *const osi_core);
/** Called to set the mode at MAC (full/duplex) */
nve32_t (*set_mode)(struct osi_core_priv_data *const osi_core,
const nve32_t mode);
/** Called to set the speed at MAC */
nve32_t (*set_speed)(struct osi_core_priv_data *const osi_core,
const nve32_t speed);
/** Called to do pad caliberation */
nve32_t (*pad_calibrate)(struct osi_core_priv_data *const osi_core);
/** Called to configure MTL RxQ to forward the err pkt */
nve32_t (*config_fw_err_pkts)(struct osi_core_priv_data *const osi_core,
const nveu32_t qinx,
const nveu32_t fw_err);
/** Called to configure Rx Checksum offload engine */
nve32_t (*config_rxcsum_offload)(
struct osi_core_priv_data *const osi_core,
const nveu32_t enabled);
/** Called to config mac packet filter */
nve32_t (*config_mac_pkt_filter_reg)(
struct osi_core_priv_data *const osi_core,
const struct osi_filter *filter);
/** Called to update MAC address 1-127 */
nve32_t (*update_mac_addr_low_high_reg)(
struct osi_core_priv_data *const osi_core,
const struct osi_filter *filter);
/** Called to configure l3/L4 filter */
nve32_t (*config_l3_l4_filter_enable)(
struct osi_core_priv_data *const osi_core,
const nveu32_t enable);
/** Called to configure L3 filter */
nve32_t (*config_l3_filters)(struct osi_core_priv_data *const osi_core,
const nveu32_t filter_no,
const nveu32_t enb_dis,
const nveu32_t ipv4_ipv6_match,
const nveu32_t src_dst_addr_match,
const nveu32_t perfect_inverse_match,
const nveu32_t dma_routing_enable,
const nveu32_t dma_chan);
/** Called to update ip4 src or desc address */
nve32_t (*update_ip4_addr)(struct osi_core_priv_data *const osi_core,
const nveu32_t filter_no,
const nveu8_t addr[],
const nveu32_t src_dst_addr_match);
/** Called to update ip6 address */
nve32_t (*update_ip6_addr)(struct osi_core_priv_data *const osi_core,
const nveu32_t filter_no,
const nveu16_t addr[]);
/** Called to configure L4 filter */
nve32_t (*config_l4_filters)(struct osi_core_priv_data *const osi_core,
const nveu32_t filter_no,
const nveu32_t enb_dis,
const nveu32_t tcp_udp_match,
const nveu32_t src_dst_port_match,
const nveu32_t perfect_inverse_match,
const nveu32_t dma_routing_enable,
const nveu32_t dma_chan);
/** Called to update L4 Port for filter packet */
nve32_t (*update_l4_port_no)(struct osi_core_priv_data *const osi_core,
const nveu32_t filter_no,
const nveu16_t port_no,
const nveu32_t src_dst_port_match);
/** Called to set the addend value to adjust the time */
nve32_t (*config_addend)(struct osi_core_priv_data *const osi_core,
const nveu32_t addend);
/** Called to adjust the mac time */
nve32_t (*adjust_mactime)(struct osi_core_priv_data *const osi_core,
const nveu32_t sec,
const nveu32_t nsec,
const nveu32_t neg_adj,
const nveu32_t one_nsec_accuracy);
/** Called to set current system time to MAC */
nve32_t (*set_systime_to_mac)(struct osi_core_priv_data *const osi_core,
const nveu32_t sec,
const nveu32_t nsec);
/** Called to configure the TimeStampControl register */
void (*config_tscr)(struct osi_core_priv_data *const osi_core,
const nveu32_t ptp_filter);
/** Called to configure the sub second increment register */
void (*config_ssir)(struct osi_core_priv_data *const osi_core,
const nveu32_t ptp_clock);
/** Called to configure the PTP RX packets Queue */
nve32_t (*config_ptp_rxq)(struct osi_core_priv_data *const osi_core,
const unsigned int rxq_idx,
const unsigned int enable);
/** Called to update MMC counter from HW register */
void (*read_mmc)(struct osi_core_priv_data *const osi_core);
/** Called to write into a PHY reg over MDIO bus */
nve32_t (*write_phy_reg)(struct osi_core_priv_data *const osi_core,
const nveu32_t phyaddr,
const nveu32_t phyreg,
const nveu16_t phydata);
/** Called to read from a PHY reg over MDIO bus */
nve32_t (*read_phy_reg)(struct osi_core_priv_data *const osi_core,
const nveu32_t phyaddr,
const nveu32_t phyreg);
/** Called to read reg */
nveu32_t (*read_reg)(struct osi_core_priv_data *const osi_core,
const nve32_t reg);
/** Called to write reg */
nveu32_t (*write_reg)(struct osi_core_priv_data *const osi_core,
const nveu32_t val,
const nve32_t reg);
#ifdef MACSEC_SUPPORT
/** Called to read macsec reg */
nveu32_t (*read_macsec_reg)(struct osi_core_priv_data *const osi_core,
const nve32_t reg);
/** Called to write macsec reg */
nveu32_t (*write_macsec_reg)(struct osi_core_priv_data *const osi_core,
const nveu32_t val,
const nve32_t reg);
#endif /* MACSEC_SUPPORT */
#ifndef OSI_STRIPPED_LIB
/** Called periodically to read and validate safety critical
* registers against last written value */
nve32_t (*validate_regs)(struct osi_core_priv_data *const osi_core);
/** Called to flush MTL Tx queue */
nve32_t (*flush_mtl_tx_queue)(struct osi_core_priv_data *const osi_core,
const nveu32_t qinx);
/** Called to set av parameter */
nve32_t (*set_avb_algorithm)(struct osi_core_priv_data *const osi_core,
const struct osi_core_avb_algorithm *const avb);
/** Called to get av parameter */
nve32_t (*get_avb_algorithm)(struct osi_core_priv_data *const osi_core,
struct osi_core_avb_algorithm *const avb);
/** Called to configure the MTL to forward/drop tx status */
nve32_t (*config_tx_status)(struct osi_core_priv_data *const osi_core,
const nveu32_t tx_status);
/** Called to configure the MAC rx crc */
nve32_t (*config_rx_crc_check)(
struct osi_core_priv_data *const osi_core,
const nveu32_t crc_chk);
/** Called to configure the MAC flow control */
nve32_t (*config_flow_control)(
struct osi_core_priv_data *const osi_core,
const nveu32_t flw_ctrl);
/** Called to enable/disable HW ARP offload feature */
nve32_t (*config_arp_offload)(struct osi_core_priv_data *const osi_core,
const nveu32_t enable,
const nveu8_t *ip_addr);
/** Called to configure VLAN filtering */
nve32_t (*config_vlan_filtering)(
struct osi_core_priv_data *const osi_core,
const nveu32_t filter_enb_dis,
const nveu32_t perfect_hash_filtering,
const nveu32_t perfect_inverse_match);
/** Called to reset MMC HW counter structure */
void (*reset_mmc)(struct osi_core_priv_data *const osi_core);
/** Called to configure EEE Tx LPI */
void (*configure_eee)(struct osi_core_priv_data *const osi_core,
const nveu32_t tx_lpi_enabled,
const nveu32_t tx_lpi_timer);
/** Called to save MAC register space during SoC suspend */
nve32_t (*save_registers)(struct osi_core_priv_data *const osi_core);
/** Called to restore MAC control registers during SoC resume */
nve32_t (*restore_registers)(struct osi_core_priv_data *const osi_core);
/** Called to set MDC clock rate for MDIO operation */
void (*set_mdc_clk_rate)(struct osi_core_priv_data *const osi_core,
const nveu64_t csr_clk_rate);
/** Called to configure MAC in loopback mode */
nve32_t (*config_mac_loopback)(
struct osi_core_priv_data *const osi_core,
const nveu32_t lb_mode);
#endif /* !OSI_STRIPPED_LIB */
/** Called to get HW features */
nve32_t (*get_hw_features)(struct osi_core_priv_data *const osi_core,
struct osi_hw_features *hw_feat);
/** Called to configure RSS for MAC */
nve32_t (*config_rss)(struct osi_core_priv_data *osi_core);
/** Called to update GCL config */
int (*hw_config_est)(struct osi_core_priv_data *const osi_core,
struct osi_est_config *const est);
/** Called to update FPE config */
int (*hw_config_fpe)(struct osi_core_priv_data *const osi_core,
struct osi_fpe_config *const fpe);
/** Called to configure FRP engine */
int (*config_frp)(struct osi_core_priv_data *const osi_core,
const unsigned int enabled);
/** Called to update FRP Instruction Table entry */
int (*update_frp_entry)(struct osi_core_priv_data *const osi_core,
const unsigned int pos,
struct osi_core_frp_data *const data);
/** Called to update FRP NVE and */
int (*update_frp_nve)(struct osi_core_priv_data *const osi_core,
const unsigned int nve);
/** Called to configure HW PTP offload feature */
int (*config_ptp_offload)(struct osi_core_priv_data *const osi_core,
struct osi_pto_config *const pto_config);
#ifdef MACSEC_SUPPORT
void (*macsec_config_mac)(struct osi_core_priv_data *const osi_core,
const nveu32_t enable);
#endif /* MACSEC_SUPPORT */
int (*ptp_tsc_capture)(struct osi_core_priv_data *const osi_core,
struct osi_core_ptp_tsc_data *data);
#ifdef HSI_SUPPORT
/** Interface function called to initialize HSI */
int (*core_hsi_configure)(struct osi_core_priv_data *const osi_core,
const nveu32_t enable);
#endif
};
/**
* @brief constant values for drift MAC to MAC sync.
*/
#ifndef DRIFT_CAL
#define DRIFT_CAL 1
#define I_COMPONENT_BY_10 3
#define P_COMPONENT_BY_10 7
#define WEIGHT_BY_10 10
#define CONST_FACTOR 8 //(1sec/125ns)
#define MAX_FREQ 85000000LL
#endif
#define EQOS_SEC_OFFSET 0xB08
#define EQOS_NSEC_OFFSET 0xB0C
#define MGBE_SEC_OFFSET 0xD08
#define MGBE_NSEC_OFFSET 0xD0C
#define ETHER_NSEC_MASK 0x7FFFFFFF
#define SERVO_STATS_0 0
#define SERVO_STATS_1 1
#define SERVO_STATS_2 2
/**
* @brief servo data structure.
*/
struct core_ptp_servo {
/** Offset/drift array to maintain current and last value */
nvel64_t offset[2];
/** Target MAC HW time counter array to maintain current and last
* value
*/
nvel64_t local[2];
/* Servo state. initialized with 0. This states are used to monitor
* if there is sudden change in offset */
nveu32_t count;
/* Accumulated freq drift */
nvel64_t drift;
/* P component */
nvel64_t const_p;
/* I component */
nvel64_t const_i;
/* Last know ppb */
nvel64_t last_ppb;
/* MAC to MAC locking to access HW time register within OSI calls */
nveu32_t m2m_lock;
};
/**
* @brief Core local data structure.
*/
struct core_local {
/** OSI Core data variable */
struct osi_core_priv_data osi_core;
/** Core local operations variable */
struct core_ops *ops_p;
/** interface core local operations variable */
struct if_core_ops *if_ops_p;
/** structure to store tx time stamps */
struct osi_core_tx_ts ts[MAX_TX_TS_CNT];
/** Flag to represent initialization done or not */
nveu32_t init_done;
/** Flag to represent infterface initialization done or not */
nveu32_t if_init_done;
/** Magic number to validate osi core pointer */
nveu64_t magic_num;
/** This is the head node for PTP packet ID queue */
struct osi_core_tx_ts tx_ts_head;
/** Maximum number of queues/channels */
nveu32_t max_chans;
/** GCL depth supported by HW */
nveu32_t gcl_dep;
/** Max GCL width (time + gate) value supported by HW */
nveu32_t gcl_width_val;
/** TS lock */
nveu32_t ts_lock;
/** Controller mac to mac role */
nveu32_t ether_m2m_role;
/** Servo structure */
struct core_ptp_servo serv;
/** HW comeout from reset successful OSI_ENABLE else OSI_DISABLE */
nveu32_t hw_init_successful;
/** Dynamic MAC to MAC time sync control for secondary interface */
nveu32_t m2m_tsync;
/** control pps output signal */
nveu32_t pps_freq;
/** Time interval mask for GCL entry */
nveu32_t ti_mask;
};
/**
* @brief eqos_init_core_ops - Initialize EQOS core operations.
*
* @param[in] ops: Core operations pointer.
*
* @note
* API Group:
* - Initialization: Yes
* - Run time: No
* - De-initialization: No
*/
void eqos_init_core_ops(struct core_ops *ops);
/**
* @brief ivc_init_core_ops - Initialize IVC core operations.
*
* @param[in] ops: Core operations pointer.
*
* @note
* API Group:
* - Initialization: Yes
* - Run time: No
* - De-initialization: No
*/
void ivc_init_core_ops(struct core_ops *ops);
/**
* @brief mgbe_init_core_ops - Initialize MGBE core operations.
*
* @param[in] ops: Core operations pointer.
*
* @note
* API Group:
* - Initialization: Yes
* - Run time: No
* - De-initialization: No
*/
void mgbe_init_core_ops(struct core_ops *ops);
/**
* @brief ivc_init_macsec_ops - Initialize macsec core operations.
*
* @param[in] macsecops: Macsec operations pointer.
*
* @note
* API Group:
* - Initialization: Yes
* - Run time: No
* - De-initialization: No
*/
void ivc_init_macsec_ops(void *macsecops);
/**
* @brief hw_interface_init_core_ops - Initialize HW interface functions.
*
* @param[in] if_ops_p: interface core operations pointer.
*
* @note
* API Group:
* - Initialization: Yes
* - Run time: No
* - De-initialization: No
*/
void hw_interface_init_core_ops(struct if_core_ops *if_ops_p);
/**
* @brief ivc_interface_init_core_ops - Initialize IVC interface functions
*
* @param[in] if_ops_p: interface core operations pointer.
*
* @note
* API Group:
* - Initialization: Yes
* - Run time: No
* - De-initialization: No
*/
void ivc_interface_init_core_ops(struct if_core_ops *if_ops_p);
/**
* @brief get osi pointer for PTP primary/sec interface
*
* @note
* Algorithm:
* - Returns OSI core data structure corresponding to mac-to-mac PTP
* role.
*
* @pre OSD layer should use this as first API to get osi_core pointer and
* use the same in remaning API invocation for mac-to-mac time sync.
*
* @note
* Traceability Details:
*
* @note
* Classification:
* - Interrupt: No
* - Signal handler: No
* - Thread safe: No
* - Required Privileges: None
*
* @note
* API Group:
* - Initialization: No
* - Run time: Yes
* - De-initialization: No
*
* @retval valid and unique osi_core pointer on success
* @retval NULL on failure.
*/
struct osi_core_priv_data *get_role_pointer(nveu32_t role);
#endif /* INCLUDED_CORE_LOCAL_H */

View File

@@ -1,148 +0,0 @@
/*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifdef OSI_DEBUG
#include "debug.h"
/**
* @brief core_dump_struct - Dumps a given structure.
*
* @param[in] osi_core: OSI DMA private data structure.
* @param[in] ptr: Pointer to structure.
* @param[in] size: Size of structure to dump.
*
*/
static void core_dump_struct(struct osi_core_priv_data *osi_core,
unsigned char *ptr,
unsigned long size)
{
nveu32_t i = 0, rem, j;
unsigned long temp;
if (ptr == OSI_NULL) {
osi_core->osd_ops.printf(osi_core, OSI_DEBUG_TYPE_STRUCTS,
"pointer is NULL\n");
return;
}
rem = i % 4;
temp = size - rem;
for (i = 0; i < temp; i += 4) {
osi_core->osd_ops.printf(osi_core, OSI_DEBUG_TYPE_STRUCTS,
"%02x%02x%02x%02x", ptr[i], ptr[i + 1],
ptr[i + 2], ptr[i + 3]);
j = i;
}
for (i = j; i < size; i++) {
osi_core->osd_ops.printf(osi_core, OSI_DEBUG_TYPE_STRUCTS, "%x",
ptr[i]);
}
}
/**
* @brief core_structs_dump - Dumps OSI CORE structure.
*
* @param[in] osi_core: OSI CORE private data structure.
*/
void core_structs_dump(struct osi_core_priv_data *osi_core)
{
struct core_local *l_core = (struct core_local *)osi_core;
osi_core->osd_ops.printf(osi_core, OSI_DEBUG_TYPE_STRUCTS,
"CORE struct size = %lu",
sizeof(struct osi_core_priv_data));
core_dump_struct(osi_core, (unsigned char *)osi_core,
sizeof(struct osi_core_priv_data));
#ifdef MACSEC_SUPPORT
osi_core->osd_ops.printf(osi_core, OSI_DEBUG_TYPE_STRUCTS,
"MACSEC ops size = %lu",
sizeof(struct osi_macsec_core_ops));
core_dump_struct(osi_core, (unsigned char *)osi_core->macsec_ops,
sizeof(struct osi_macsec_core_ops));
osi_core->osd_ops.printf(osi_core, OSI_DEBUG_TYPE_STRUCTS,
"MACSEC LUT status size = %lu",
sizeof(struct osi_macsec_lut_status));
core_dump_struct(osi_core, (unsigned char *)osi_core->macsec_ops,
sizeof(struct osi_macsec_lut_status));
#endif
osi_core->osd_ops.printf(osi_core, OSI_DEBUG_TYPE_STRUCTS,
"HW features size = %lu",
sizeof(struct osi_hw_features));
core_dump_struct(osi_core, (unsigned char *)osi_core->hw_feature,
sizeof(struct osi_hw_features));
osi_core->osd_ops.printf(osi_core, OSI_DEBUG_TYPE_STRUCTS,
"core local size = %lu",
sizeof(struct core_local));
core_dump_struct(osi_core, (unsigned char *)l_core,
sizeof(struct core_local));
osi_core->osd_ops.printf(osi_core, OSI_DEBUG_TYPE_STRUCTS,
"core ops size = %lu",
sizeof(struct core_ops));
core_dump_struct(osi_core, (unsigned char *)l_core->ops_p,
sizeof(struct core_ops));
osi_core->osd_ops.printf(osi_core, OSI_DEBUG_TYPE_STRUCTS,
"if_ops_p struct size = %lu",
sizeof(struct if_core_ops));
core_dump_struct(osi_core, (unsigned char *)l_core->if_ops_p,
sizeof(struct if_core_ops));
}
/**
* @brief reg_dump - Dumps MAC DMA registers
*
* @param[in] osi_core: OSI core private data structure.
*/
void core_reg_dump(struct osi_core_priv_data *osi_core)
{
unsigned int max_addr;
unsigned int addr = 0x0;
unsigned int reg_val;
switch (osi_core->mac_ver) {
case OSI_EQOS_MAC_5_00:
max_addr = 0x12E4;
break;
case OSI_EQOS_MAC_5_30:
max_addr = 0x14EC;
break;
case OSI_MGBE_MAC_3_10:
max_addr = 0x35FC;
break;
default:
return;
}
while (1) {
if (addr > max_addr)
break;
reg_val = osi_readla(osi_core,
(nveu8_t *)osi_core->base + addr);
osi_core->osd_ops.printf(osi_core, OSI_DEBUG_TYPE_REG,
"%x: %x\n", addr, reg_val);
addr += 4;
}
}
#endif

View File

@@ -1,34 +0,0 @@
/*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef INCLUDED_CORE_DEBUG_H
#define INCLUDED_CORE_DEBUG_H
#include <osi_core.h>
#include <osi_macsec.h>
#include "../osi/common/common.h"
#include "core_local.h"
void core_reg_dump(struct osi_core_priv_data *osi_core);
void core_structs_dump(struct osi_core_priv_data *osi_core);
#endif /* INCLUDED_CORE_DEBUG_H*/

View File

File diff suppressed because it is too large Load Diff

View File

File diff suppressed because it is too large Load Diff

View File

@@ -1,403 +0,0 @@
/*
* Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "../osi/common/common.h"
#include <osi_core.h>
#include "eqos_mmc.h"
#include "eqos_core.h"
/**
* @brief update_mmc_val - function to read register and return value to callee
*
* @note
* Algorithm:
* - Read the registers, check for boundary, if more, reset
* counters else return same to caller.
*
* @param[in, out] osi_core: OSI core private data structure.
* @param[in] last_value: previous value of stats variable.
* @param[in] offset: HW register offset
*
* @pre
* - MAC should be init and started. see osi_start_mac()
* - osi_core->osd should be populated
*
* @note
* API Group:
* - Initialization: No
* - Run time: Yes
* - De-initialization: No
*
* @retval 0 on success
* @retval -1 on failure
*/
static inline nveu64_t update_mmc_val(struct osi_core_priv_data *const osi_core,
nveu64_t last_value,
nveu64_t offset)
{
nveu64_t temp;
nveu32_t value = osi_readla(osi_core,
(nveu8_t *)osi_core->base + offset);
temp = last_value + value;
if (temp < last_value) {
OSI_CORE_ERR(osi_core->osd,
OSI_LOG_ARG_OUTOFBOUND,
"Value overflow resetting all counters\n",
(nveul64_t)offset);
eqos_reset_mmc(osi_core);
} else {
return temp;
}
return 0;
}
/**
* @brief eqos_reset_mmc - To reset MMC registers and ether_mmc_counter
* structure variable
*
* @param[in, out] osi_core: OSI core private data structure.
*
* @pre
* - MAC should be init and started. see osi_start_mac()
* - osi_core->osd should be populated
*
* @note
* API Group:
* - Initialization: No
* - Run time: Yes
* - De-initialization: No
*/
void eqos_reset_mmc(struct osi_core_priv_data *const osi_core)
{
nveu32_t value;
value = osi_readla(osi_core,
(nveu8_t *)osi_core->base + EQOS_MMC_CNTRL);
/* self-clear bit in one clock cycle */
value |= EQOS_MMC_CNTRL_CNTRST;
osi_writela(osi_core, value,
(nveu8_t *)osi_core->base + EQOS_MMC_CNTRL);
osi_memset(&osi_core->mmc, 0U, sizeof(struct osi_mmc_counters));
}
/**
* @brief eqos_read_mmc - To read MMC registers and ether_mmc_counter structure
* variable
*
* @note
* Algorithm:
* - Read corresponding register value of #osi_core_priv_data->mmc(#osi_mmc_counters)
* member and increment its value.
* - If any counter overflows, reset all Sw counters and reset HW counter register.
*
* @param[in, out] osi_core: OSI core private data structure.
*
* @pre
* - MAC should be init and started. see osi_start_mac()
* - osi_core->osd should be populated
*
* @note
* API Group:
* - Initialization: No
* - Run time: Yes
* - De-initialization: No
*/
void eqos_read_mmc(struct osi_core_priv_data *const osi_core)
{
struct osi_mmc_counters *mmc = &osi_core->mmc;
mmc->mmc_tx_octetcount_gb =
update_mmc_val(osi_core, mmc->mmc_tx_octetcount_gb,
MMC_TXOCTETCOUNT_GB);
mmc->mmc_tx_framecount_gb =
update_mmc_val(osi_core, mmc->mmc_tx_framecount_gb,
MMC_TXPACKETCOUNT_GB);
mmc->mmc_tx_broadcastframe_g =
update_mmc_val(osi_core, mmc->mmc_tx_broadcastframe_g,
MMC_TXBROADCASTPACKETS_G);
mmc->mmc_tx_multicastframe_g =
update_mmc_val(osi_core, mmc->mmc_tx_multicastframe_g,
MMC_TXMULTICASTPACKETS_G);
mmc->mmc_tx_64_octets_gb =
update_mmc_val(osi_core, mmc->mmc_tx_64_octets_gb,
MMC_TX64OCTETS_GB);
mmc->mmc_tx_65_to_127_octets_gb =
update_mmc_val(osi_core, mmc->mmc_tx_65_to_127_octets_gb,
MMC_TX65TO127OCTETS_GB);
mmc->mmc_tx_128_to_255_octets_gb =
update_mmc_val(osi_core, mmc->mmc_tx_128_to_255_octets_gb,
MMC_TX128TO255OCTETS_GB);
mmc->mmc_tx_256_to_511_octets_gb =
update_mmc_val(osi_core, mmc->mmc_tx_256_to_511_octets_gb,
MMC_TX256TO511OCTETS_GB);
mmc->mmc_tx_512_to_1023_octets_gb =
update_mmc_val(osi_core, mmc->mmc_tx_512_to_1023_octets_gb,
MMC_TX512TO1023OCTETS_GB);
mmc->mmc_tx_1024_to_max_octets_gb =
update_mmc_val(osi_core, mmc->mmc_tx_1024_to_max_octets_gb,
MMC_TX1024TOMAXOCTETS_GB);
mmc->mmc_tx_unicast_gb =
update_mmc_val(osi_core, mmc->mmc_tx_unicast_gb,
MMC_TXUNICASTPACKETS_GB);
mmc->mmc_tx_multicast_gb =
update_mmc_val(osi_core, mmc->mmc_tx_multicast_gb,
MMC_TXMULTICASTPACKETS_GB);
mmc->mmc_tx_broadcast_gb =
update_mmc_val(osi_core, mmc->mmc_tx_broadcast_gb,
MMC_TXBROADCASTPACKETS_GB);
mmc->mmc_tx_underflow_error =
update_mmc_val(osi_core, mmc->mmc_tx_underflow_error,
MMC_TXUNDERFLOWERROR);
mmc->mmc_tx_singlecol_g =
update_mmc_val(osi_core, mmc->mmc_tx_singlecol_g,
MMC_TXSINGLECOL_G);
mmc->mmc_tx_multicol_g =
update_mmc_val(osi_core, mmc->mmc_tx_multicol_g,
MMC_TXMULTICOL_G);
mmc->mmc_tx_deferred =
update_mmc_val(osi_core, mmc->mmc_tx_deferred,
MMC_TXDEFERRED);
mmc->mmc_tx_latecol =
update_mmc_val(osi_core, mmc->mmc_tx_latecol,
MMC_TXLATECOL);
mmc->mmc_tx_exesscol =
update_mmc_val(osi_core, mmc->mmc_tx_exesscol,
MMC_TXEXESSCOL);
mmc->mmc_tx_carrier_error =
update_mmc_val(osi_core, mmc->mmc_tx_exesscol,
MMC_TXCARRIERERROR);
mmc->mmc_tx_octetcount_g =
update_mmc_val(osi_core, mmc->mmc_tx_octetcount_g,
MMC_TXOCTETCOUNT_G);
mmc->mmc_tx_framecount_g =
update_mmc_val(osi_core, mmc->mmc_tx_framecount_g,
MMC_TXPACKETSCOUNT_G);
mmc->mmc_tx_excessdef =
update_mmc_val(osi_core, mmc->mmc_tx_excessdef,
MMC_TXEXCESSDEF);
mmc->mmc_tx_pause_frame =
update_mmc_val(osi_core, mmc->mmc_tx_pause_frame,
MMC_TXPAUSEPACKETS);
mmc->mmc_tx_vlan_frame_g =
update_mmc_val(osi_core, mmc->mmc_tx_vlan_frame_g,
MMC_TXVLANPACKETS_G);
mmc->mmc_tx_osize_frame_g =
update_mmc_val(osi_core, mmc->mmc_tx_osize_frame_g,
MMC_TXOVERSIZE_G);
mmc->mmc_rx_framecount_gb =
update_mmc_val(osi_core, mmc->mmc_rx_framecount_gb,
MMC_RXPACKETCOUNT_GB);
mmc->mmc_rx_octetcount_gb =
update_mmc_val(osi_core, mmc->mmc_rx_octetcount_gb,
MMC_RXOCTETCOUNT_GB);
mmc->mmc_rx_octetcount_g =
update_mmc_val(osi_core, mmc->mmc_rx_octetcount_g,
MMC_RXOCTETCOUNT_G);
mmc->mmc_rx_broadcastframe_g =
update_mmc_val(osi_core, mmc->mmc_rx_broadcastframe_g,
MMC_RXBROADCASTPACKETS_G);
mmc->mmc_rx_multicastframe_g =
update_mmc_val(osi_core, mmc->mmc_rx_multicastframe_g,
MMC_RXMULTICASTPACKETS_G);
mmc->mmc_rx_crc_error =
update_mmc_val(osi_core, mmc->mmc_rx_crc_error,
MMC_RXCRCERROR);
mmc->mmc_rx_align_error =
update_mmc_val(osi_core, mmc->mmc_rx_align_error,
MMC_RXALIGNMENTERROR);
mmc->mmc_rx_runt_error =
update_mmc_val(osi_core, mmc->mmc_rx_runt_error,
MMC_RXRUNTERROR);
mmc->mmc_rx_jabber_error =
update_mmc_val(osi_core, mmc->mmc_rx_jabber_error,
MMC_RXJABBERERROR);
mmc->mmc_rx_undersize_g =
update_mmc_val(osi_core, mmc->mmc_rx_undersize_g,
MMC_RXUNDERSIZE_G);
mmc->mmc_rx_oversize_g =
update_mmc_val(osi_core, mmc->mmc_rx_oversize_g,
MMC_RXOVERSIZE_G);
mmc->mmc_rx_64_octets_gb =
update_mmc_val(osi_core, mmc->mmc_rx_64_octets_gb,
MMC_RX64OCTETS_GB);
mmc->mmc_rx_65_to_127_octets_gb =
update_mmc_val(osi_core, mmc->mmc_rx_65_to_127_octets_gb,
MMC_RX65TO127OCTETS_GB);
mmc->mmc_rx_128_to_255_octets_gb =
update_mmc_val(osi_core, mmc->mmc_rx_128_to_255_octets_gb,
MMC_RX128TO255OCTETS_GB);
mmc->mmc_rx_256_to_511_octets_gb =
update_mmc_val(osi_core, mmc->mmc_rx_256_to_511_octets_gb,
MMC_RX256TO511OCTETS_GB);
mmc->mmc_rx_512_to_1023_octets_gb =
update_mmc_val(osi_core, mmc->mmc_rx_512_to_1023_octets_gb,
MMC_RX512TO1023OCTETS_GB);
mmc->mmc_rx_1024_to_max_octets_gb =
update_mmc_val(osi_core, mmc->mmc_rx_1024_to_max_octets_gb,
MMC_RX1024TOMAXOCTETS_GB);
mmc->mmc_rx_unicast_g =
update_mmc_val(osi_core, mmc->mmc_rx_unicast_g,
MMC_RXUNICASTPACKETS_G);
mmc->mmc_rx_length_error =
update_mmc_val(osi_core, mmc->mmc_rx_length_error,
MMC_RXLENGTHERROR);
mmc->mmc_rx_outofrangetype =
update_mmc_val(osi_core, mmc->mmc_rx_outofrangetype,
MMC_RXOUTOFRANGETYPE);
mmc->mmc_rx_pause_frames =
update_mmc_val(osi_core, mmc->mmc_rx_pause_frames,
MMC_RXPAUSEPACKETS);
mmc->mmc_rx_fifo_overflow =
update_mmc_val(osi_core, mmc->mmc_rx_fifo_overflow,
MMC_RXFIFOOVERFLOW);
mmc->mmc_rx_vlan_frames_gb =
update_mmc_val(osi_core, mmc->mmc_rx_vlan_frames_gb,
MMC_RXVLANPACKETS_GB);
mmc->mmc_rx_watchdog_error =
update_mmc_val(osi_core, mmc->mmc_rx_watchdog_error,
MMC_RXWATCHDOGERROR);
mmc->mmc_rx_receive_error =
update_mmc_val(osi_core, mmc->mmc_rx_receive_error,
MMC_RXRCVERROR);
mmc->mmc_rx_ctrl_frames_g =
update_mmc_val(osi_core, mmc->mmc_rx_ctrl_frames_g,
MMC_RXCTRLPACKETS_G);
mmc->mmc_tx_lpi_usec_cntr =
update_mmc_val(osi_core, mmc->mmc_tx_lpi_usec_cntr,
MMC_TXLPIUSECCNTR);
mmc->mmc_tx_lpi_tran_cntr =
update_mmc_val(osi_core, mmc->mmc_tx_lpi_tran_cntr,
MMC_TXLPITRANCNTR);
mmc->mmc_rx_lpi_usec_cntr =
update_mmc_val(osi_core, mmc->mmc_rx_lpi_usec_cntr,
MMC_RXLPIUSECCNTR);
mmc->mmc_rx_lpi_tran_cntr =
update_mmc_val(osi_core, mmc->mmc_rx_lpi_tran_cntr,
MMC_RXLPITRANCNTR);
mmc->mmc_rx_ipv4_gd =
update_mmc_val(osi_core, mmc->mmc_rx_ipv4_gd,
MMC_RXIPV4_GD_PKTS);
mmc->mmc_rx_ipv4_hderr =
update_mmc_val(osi_core, mmc->mmc_rx_ipv4_hderr,
MMC_RXIPV4_HDRERR_PKTS);
mmc->mmc_rx_ipv4_nopay =
update_mmc_val(osi_core, mmc->mmc_rx_ipv4_nopay,
MMC_RXIPV4_NOPAY_PKTS);
mmc->mmc_rx_ipv4_frag =
update_mmc_val(osi_core, mmc->mmc_rx_ipv4_frag,
MMC_RXIPV4_FRAG_PKTS);
mmc->mmc_rx_ipv4_udsbl =
update_mmc_val(osi_core, mmc->mmc_rx_ipv4_udsbl,
MMC_RXIPV4_UBSBL_PKTS);
mmc->mmc_rx_ipv6_gd =
update_mmc_val(osi_core, mmc->mmc_rx_ipv6_gd,
MMC_RXIPV6_GD_PKTS);
mmc->mmc_rx_ipv6_hderr =
update_mmc_val(osi_core, mmc->mmc_rx_ipv6_hderr,
MMC_RXIPV6_HDRERR_PKTS);
mmc->mmc_rx_ipv6_nopay =
update_mmc_val(osi_core, mmc->mmc_rx_ipv6_nopay,
MMC_RXIPV6_NOPAY_PKTS);
mmc->mmc_rx_udp_gd =
update_mmc_val(osi_core, mmc->mmc_rx_udp_gd,
MMC_RXUDP_GD_PKTS);
mmc->mmc_rx_udp_err =
update_mmc_val(osi_core, mmc->mmc_rx_udp_err,
MMC_RXUDP_ERR_PKTS);
mmc->mmc_rx_tcp_gd =
update_mmc_val(osi_core, mmc->mmc_rx_tcp_gd,
MMC_RXTCP_GD_PKTS);
mmc->mmc_rx_tcp_err =
update_mmc_val(osi_core, mmc->mmc_rx_tcp_err,
MMC_RXTCP_ERR_PKTS);
mmc->mmc_rx_icmp_gd =
update_mmc_val(osi_core, mmc->mmc_rx_icmp_gd,
MMC_RXICMP_GD_PKTS);
mmc->mmc_rx_icmp_err =
update_mmc_val(osi_core, mmc->mmc_rx_icmp_err,
MMC_RXICMP_ERR_PKTS);
mmc->mmc_rx_ipv4_gd_octets =
update_mmc_val(osi_core, mmc->mmc_rx_ipv4_gd_octets,
MMC_RXIPV4_GD_OCTETS);
mmc->mmc_rx_ipv4_hderr_octets =
update_mmc_val(osi_core, mmc->mmc_rx_ipv4_hderr_octets,
MMC_RXIPV4_HDRERR_OCTETS);
mmc->mmc_rx_ipv4_nopay_octets =
update_mmc_val(osi_core, mmc->mmc_rx_ipv4_nopay_octets,
MMC_RXIPV4_NOPAY_OCTETS);
mmc->mmc_rx_ipv4_frag_octets =
update_mmc_val(osi_core, mmc->mmc_rx_ipv4_frag_octets,
MMC_RXIPV4_FRAG_OCTETS);
mmc->mmc_rx_ipv4_udsbl_octets =
update_mmc_val(osi_core, mmc->mmc_rx_ipv4_udsbl_octets,
MMC_RXIPV4_UDSBL_OCTETS);
mmc->mmc_rx_udp_gd_octets =
update_mmc_val(osi_core, mmc->mmc_rx_udp_gd_octets,
MMC_RXUDP_GD_OCTETS);
mmc->mmc_rx_ipv6_gd_octets =
update_mmc_val(osi_core, mmc->mmc_rx_ipv6_gd_octets,
MMC_RXIPV6_GD_OCTETS);
mmc->mmc_rx_ipv6_hderr_octets =
update_mmc_val(osi_core, mmc->mmc_rx_ipv6_hderr_octets,
MMC_RXIPV6_HDRERR_OCTETS);
mmc->mmc_rx_ipv6_nopay_octets =
update_mmc_val(osi_core, mmc->mmc_rx_ipv6_nopay_octets,
MMC_RXIPV6_NOPAY_OCTETS);
mmc->mmc_rx_udp_gd_octets =
update_mmc_val(osi_core, mmc->mmc_rx_udp_gd_octets,
MMC_RXUDP_GD_OCTETS);
mmc->mmc_rx_udp_err_octets =
update_mmc_val(osi_core, mmc->mmc_rx_udp_err_octets,
MMC_RXUDP_ERR_OCTETS);
mmc->mmc_rx_tcp_gd_octets =
update_mmc_val(osi_core, mmc->mmc_rx_tcp_gd_octets,
MMC_RXTCP_GD_OCTETS);
mmc->mmc_rx_tcp_err_octets =
update_mmc_val(osi_core, mmc->mmc_rx_tcp_err_octets,
MMC_RXTCP_ERR_OCTETS);
mmc->mmc_rx_icmp_gd_octets =
update_mmc_val(osi_core, mmc->mmc_rx_icmp_gd_octets,
MMC_RXICMP_GD_OCTETS);
mmc->mmc_rx_icmp_err_octets =
update_mmc_val(osi_core, mmc->mmc_rx_icmp_err_octets,
MMC_RXICMP_ERR_OCTETS);
mmc->mmc_tx_fpe_frag_cnt =
update_mmc_val(osi_core, mmc->mmc_tx_fpe_frag_cnt,
MMC_TX_FPE_FRAG_COUNTER);
mmc->mmc_tx_fpe_hold_req_cnt =
update_mmc_val(osi_core, mmc->mmc_tx_fpe_hold_req_cnt,
MMC_TX_HOLD_REQ_COUNTER);
mmc->mmc_rx_packet_reass_err_cnt =
update_mmc_val(osi_core, mmc->mmc_rx_packet_reass_err_cnt,
MMC_RX_PKT_ASSEMBLY_ERR_CNTR);
mmc->mmc_rx_packet_smd_err_cnt =
update_mmc_val(osi_core, mmc->mmc_rx_packet_smd_err_cnt,
MMC_RX_PKT_SMD_ERR_CNTR);
mmc->mmc_rx_packet_asm_ok_cnt =
update_mmc_val(osi_core, mmc->mmc_rx_packet_asm_ok_cnt,
MMC_RX_PKT_ASSEMBLY_OK_CNTR);
mmc->mmc_rx_fpe_fragment_cnt =
update_mmc_val(osi_core, mmc->mmc_rx_fpe_fragment_cnt,
MMC_RX_FPE_FRAG_CNTR);
}

View File

@@ -1,126 +0,0 @@
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef INCLUDED_EQOS_MMC_H
#define INCLUDED_EQOS_MMC_H
/**
* @addtogroup EQOS-MMC MMC HW register offsets
*
* @brief MMC HW register offsets
* @{
*/
#define MMC_TXOCTETCOUNT_GB 0x00714U
#define MMC_TXPACKETCOUNT_GB 0x00718
#define MMC_TXBROADCASTPACKETS_G 0x0071c
#define MMC_TXMULTICASTPACKETS_G 0x00720
#define MMC_TX64OCTETS_GB 0x00724
#define MMC_TX65TO127OCTETS_GB 0x00728
#define MMC_TX128TO255OCTETS_GB 0x0072c
#define MMC_TX256TO511OCTETS_GB 0x00730
#define MMC_TX512TO1023OCTETS_GB 0x00734
#define MMC_TX1024TOMAXOCTETS_GB 0x00738
#define MMC_TXUNICASTPACKETS_GB 0x0073c
#define MMC_TXMULTICASTPACKETS_GB 0x00740
#define MMC_TXBROADCASTPACKETS_GB 0x00744
#define MMC_TXUNDERFLOWERROR 0x00748
#define MMC_TXSINGLECOL_G 0x0074c
#define MMC_TXMULTICOL_G 0x00750
#define MMC_TXDEFERRED 0x00754
#define MMC_TXLATECOL 0x00758
#define MMC_TXEXESSCOL 0x0075c
#define MMC_TXCARRIERERROR 0x00760
#define MMC_TXOCTETCOUNT_G 0x00764
#define MMC_TXPACKETSCOUNT_G 0x00768
#define MMC_TXEXCESSDEF 0x0076c
#define MMC_TXPAUSEPACKETS 0x00770
#define MMC_TXVLANPACKETS_G 0x00774
#define MMC_TXOVERSIZE_G 0x00778
#define MMC_RXPACKETCOUNT_GB 0x00780
#define MMC_RXOCTETCOUNT_GB 0x00784
#define MMC_RXOCTETCOUNT_G 0x00788
#define MMC_RXBROADCASTPACKETS_G 0x0078c
#define MMC_RXMULTICASTPACKETS_G 0x00790
#define MMC_RXCRCERROR 0x00794
#define MMC_RXALIGNMENTERROR 0x00798
#define MMC_RXRUNTERROR 0x0079c
#define MMC_RXJABBERERROR 0x007a0
#define MMC_RXUNDERSIZE_G 0x007a4
#define MMC_RXOVERSIZE_G 0x007a8
#define MMC_RX64OCTETS_GB 0x007ac
#define MMC_RX65TO127OCTETS_GB 0x007b0
#define MMC_RX128TO255OCTETS_GB 0x007b4
#define MMC_RX256TO511OCTETS_GB 0x007b8
#define MMC_RX512TO1023OCTETS_GB 0x007bc
#define MMC_RX1024TOMAXOCTETS_GB 0x007c0
#define MMC_RXUNICASTPACKETS_G 0x007c4
#define MMC_RXLENGTHERROR 0x007c8
#define MMC_RXOUTOFRANGETYPE 0x007cc
#define MMC_RXPAUSEPACKETS 0x007d0
#define MMC_RXFIFOOVERFLOW 0x007d4
#define MMC_RXVLANPACKETS_GB 0x007d8
#define MMC_RXWATCHDOGERROR 0x007dc
#define MMC_RXRCVERROR 0x007e0
#define MMC_RXCTRLPACKETS_G 0x007e4
#define MMC_TXLPIUSECCNTR 0x007ec
#define MMC_TXLPITRANCNTR 0x007f0
#define MMC_RXLPIUSECCNTR 0x007f4
#define MMC_RXLPITRANCNTR 0x007f8
#define MMC_RXIPV4_GD_PKTS 0x00810
#define MMC_RXIPV4_HDRERR_PKTS 0x00814
#define MMC_RXIPV4_NOPAY_PKTS 0x00818
#define MMC_RXIPV4_FRAG_PKTS 0x0081c
#define MMC_RXIPV4_UBSBL_PKTS 0x00820
#define MMC_RXIPV6_GD_PKTS 0x00824
#define MMC_RXIPV6_HDRERR_PKTS 0x00828
#define MMC_RXIPV6_NOPAY_PKTS 0x0082c
#define MMC_RXUDP_GD_PKTS 0x00830
#define MMC_RXUDP_ERR_PKTS 0x00834
#define MMC_RXTCP_GD_PKTS 0x00838
#define MMC_RXTCP_ERR_PKTS 0x0083c
#define MMC_RXICMP_GD_PKTS 0x00840
#define MMC_RXICMP_ERR_PKTS 0x00844
#define MMC_RXIPV4_GD_OCTETS 0x00850
#define MMC_RXIPV4_HDRERR_OCTETS 0x00854
#define MMC_RXIPV4_NOPAY_OCTETS 0x00858
#define MMC_RXIPV4_FRAG_OCTETS 0x0085c
#define MMC_RXIPV4_UDSBL_OCTETS 0x00860
#define MMC_RXIPV6_GD_OCTETS 0x00864
#define MMC_RXIPV6_HDRERR_OCTETS 0x00868
#define MMC_RXIPV6_NOPAY_OCTETS 0x0086c
#define MMC_RXUDP_GD_OCTETS 0x00870
#define MMC_RXUDP_ERR_OCTETS 0x00874
#define MMC_RXTCP_GD_OCTETS 0x00878
#define MMC_RXTCP_ERR_OCTETS 0x0087c
#define MMC_RXICMP_GD_OCTETS 0x00880
#define MMC_RXICMP_ERR_OCTETS 0x00884
#define MMC_TX_FPE_FRAG_COUNTER 0x008A8
#define MMC_TX_HOLD_REQ_COUNTER 0x008AC
#define MMC_RX_PKT_ASSEMBLY_ERR_CNTR 0x008C8
#define MMC_RX_PKT_SMD_ERR_CNTR 0x008CC
#define MMC_RX_PKT_ASSEMBLY_OK_CNTR 0x008D0
#define MMC_RX_FPE_FRAG_CNTR 0x008D4
/** @} */
void eqos_read_mmc(struct osi_core_priv_data *const osi_core);
void eqos_reset_mmc(struct osi_core_priv_data *const osi_core);
#endif /* INCLUDED_EQOS_MMC_H */

View File

@@ -1,836 +0,0 @@
/*
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "../osi/common/common.h"
#include "frp.h"
/**
* @brief frp_entry_copy - Copy FRP entry
*
* Algorithm: Copy source FRP entry data into destination entry
*
* @param[in] dst: Destination FRP entry pointer.
* @param[in] src: source FRP entry pointer.
*
*/
static void frp_entry_copy(struct osi_core_frp_entry *dst,
struct osi_core_frp_entry *src)
{
dst->frp_id = src->frp_id;
dst->data.match_data = src->data.match_data;
dst->data.match_en = src->data.match_en;
dst->data.accept_frame = src->data.accept_frame;
dst->data.reject_frame = src->data.reject_frame;
dst->data.inverse_match = src->data.inverse_match;
dst->data.next_ins_ctrl = src->data.next_ins_ctrl;
dst->data.frame_offset = src->data.frame_offset;
dst->data.ok_index = src->data.ok_index;
dst->data.dma_chsel = src->data.dma_chsel;
}
/**
* @brief frp_entry_find - Find FRP entry in table
*
* Algorithm: Parse FRP table for given ID and bookmark
* start position and count of entries for a given ID.
*
* @param[in] osi_core: OSI core private data structure.
* @param[in] frp_id: FRP ID to find.
* @param[out] start: Pointer to store start index for given frp_id.
* @param[out] no_entries: No of FRP index's used for given frp_id.
*
* @retval 0 on success.
* @retval -1 on failure.
*/
static int frp_entry_find(struct osi_core_priv_data *const osi_core,
int frp_id,
unsigned char *start,
unsigned char *no_entries)
{
unsigned char count = OSI_NONE, found = OSI_NONE;
struct osi_core_frp_entry *entry = OSI_NULL;
/* Parse the FRP table for give frp_id */
for (count = 0U; count < osi_core->frp_cnt; count++) {
entry = &osi_core->frp_table[count];
if (entry->frp_id == frp_id) {
/* Entry found break */
if (found == OSI_NONE) {
*start = count;
*no_entries = 1;
found = OSI_ENABLE;
} else {
/* Increment entries */
*no_entries = (unsigned char) (*no_entries + 1U);
}
}
}
if (found == OSI_NONE) {
/* No entry found return error */
return -1;
}
return 0;
}
/**
* @brief frp_req_entries - Calculates required FRP entries.
*
* Algorithm: Calculates required FRP entries for
* the given offset and data length.
*
* @param[in] offset: Actual match data offset position.
* @param[in] match_length: Match data length.
*
* @retval No of FRP entries required.
*/
static unsigned char frp_req_entries(unsigned char offset,
unsigned char match_length)
{
unsigned char req = 0U;
/* Validate for match_length */
if ((match_length == OSI_NONE) ||
(match_length > OSI_FRP_MATCH_DATA_MAX)) {
/* return zero */
return req;
}
/* Check does the given length can fit in fist entry */
if (match_length <= (unsigned char) FRP_OFFSET_BYTES(offset)) {
/* Require one entry */
return 1U;
}
/* Initialize req as 1U and decrement length by FRP_OFFSET_BYTES */
req = 1U;
match_length = (unsigned char) (match_length - (unsigned char) FRP_OFFSET_BYTES(offset));
if ((match_length / FRP_MD_SIZE) < OSI_FRP_MATCH_DATA_MAX) {
req = (unsigned char) (req + (match_length / FRP_MD_SIZE));
if ((match_length % FRP_MD_SIZE) != OSI_NONE) {
/* Need one more entry */
req = (unsigned char) (req + 1U);
}
}
return req;
}
/**
* @brief frp_entry_mode_parse - Filter mode parse function.
*
* Algorithm: Parse give filter mode and set's FRP entry flags.
*
* @param[in] filter_mode: Filter mode from FRP command.
* @param[in] data: FRP entry data pointer.
*
*/
static void frp_entry_mode_parse(unsigned char filter_mode,
struct osi_core_frp_data *data)
{
switch (filter_mode) {
case OSI_FRP_MODE_ROUTE:
data->accept_frame = OSI_ENABLE;
data->reject_frame = OSI_DISABLE;
data->inverse_match = OSI_DISABLE;
break;
case OSI_FRP_MODE_DROP:
data->accept_frame = OSI_DISABLE;
data->reject_frame = OSI_ENABLE;
data->inverse_match = OSI_DISABLE;
break;
case OSI_FRP_MODE_BYPASS:
data->accept_frame = OSI_ENABLE;
data->reject_frame = OSI_ENABLE;
data->inverse_match = OSI_DISABLE;
break;
case OSI_FRP_MODE_LINK:
data->accept_frame = OSI_DISABLE;
data->reject_frame = OSI_DISABLE;
data->inverse_match = OSI_DISABLE;
break;
case OSI_FRP_MODE_IM_ROUTE:
data->accept_frame = OSI_ENABLE;
data->reject_frame = OSI_DISABLE;
data->inverse_match = OSI_ENABLE;
break;
case OSI_FRP_MODE_IM_DROP:
data->accept_frame = OSI_DISABLE;
data->reject_frame = OSI_ENABLE;
data->inverse_match = OSI_ENABLE;
break;
case OSI_FRP_MODE_IM_BYPASS:
data->accept_frame = OSI_ENABLE;
data->reject_frame = OSI_ENABLE;
data->inverse_match = OSI_ENABLE;
break;
case OSI_FRP_MODE_IM_LINK:
data->accept_frame = OSI_DISABLE;
data->reject_frame = OSI_DISABLE;
data->inverse_match = OSI_DISABLE;
break;
default:
//OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
// "Invalid filter mode argment\n",
// filter_mode);
break;
}
}
/**
* @brief frp_entry_add - Add new FRP entries in table.
*
* Algorithm: This function will prepare the FRP entries
* for given inputs add or update them from a given
* position into the FRP table.
*
* @param[in] osi_core: OSI core private data structure.
* @param[in] frp_id: FRP ID to add.
* @param[in] match: Pointer to match data.
* @param[in] length: Match data length.
* @param[in] offset: Actual match data offset position.
* @param[in] filter_mode: Filter mode from FRP command.
* @param[in] next_frp_id: FRP ID to link this ID.
* @param[in] dma_sel: Indicate the DMA Channel Number (1-bit for each).
*
* @retval 0 on success.
* @retval -1 on failure.
*/
static int frp_entry_add(struct osi_core_priv_data *const osi_core,
int frp_id,
unsigned char pos,
unsigned char *const match,
unsigned char length,
unsigned char offset,
unsigned char filter_mode,
int next_frp_id,
unsigned int dma_sel)
{
struct osi_core_frp_entry *entry = OSI_NULL;
struct osi_core_frp_data *data = OSI_NULL;
unsigned int req_entries = 0U;
unsigned char ok_index = 0U;
unsigned char fo_t = 0U;
unsigned char fp_t = 0U;
unsigned char i = 0U, j = 0U, md_pos = 0U;
/* Validate length */
if (length > OSI_FRP_MATCH_DATA_MAX) {
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_OUTOFBOUND,
"Invalid match length\n",
length);
return -1;
}
/* Validate filter_mode */
if (filter_mode >= OSI_FRP_MODE_MAX) {
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID,
"Invalid filter mode argment\n",
filter_mode);
return -1;
}
/* Validate offset */
if (offset >= OSI_FRP_OFFSET_MAX) {
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID,
"Invalid offset value\n",
offset);
return -1;
}
/* Check for avilable space */
req_entries = frp_req_entries(offset, length);
if ((req_entries >= OSI_FRP_MAX_ENTRY) ||
(req_entries + pos) >= OSI_FRP_MAX_ENTRY) {
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID,
"No space to update FRP ID\n",
OSI_NONE);
return -1;
}
/* Validate next_frp_id index ok_index */
if (filter_mode == OSI_FRP_MODE_LINK ||
filter_mode == OSI_FRP_MODE_IM_LINK) {
if (frp_entry_find(osi_core, next_frp_id, &i, &j) < 0) {
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL,
"No Link FRP ID index found\n",
OSI_NONE);
i = (unsigned char) next_frp_id;
}
ok_index = i;
}
/* Start data fill from 0U ... (length - 1U) */
fo_t = (offset / FRP_MD_SIZE);
fp_t = (offset % FRP_MD_SIZE);
md_pos = 0U;
for (i = 0U; i < req_entries; i++) {
/* Get FRP entry*/
entry = &osi_core->frp_table[pos];
data = &entry->data;
/* Fill FRP ID */
entry->frp_id = frp_id;
/* Fill MD and ME */
data->match_data = OSI_NONE;
data->match_en = OSI_NONE;
for (j = fp_t; j < FRP_MD_SIZE; j++) {
data->match_data |= ((unsigned int)match[md_pos])
<< (j * FRP_ME_BYTE_SHIFT);
data->match_en |= ((unsigned int)FRP_ME_BYTE <<
(j * FRP_ME_BYTE_SHIFT));
md_pos++;
if (md_pos >= length) {
/* data fill completed */
break;
}
}
/* Fill FO */
data->frame_offset = fo_t;
/* Fill AF, RF, and IM flags */
frp_entry_mode_parse(filter_mode, data);
/* Fill DCH */
data->dma_chsel = dma_sel;
/* Check for the remain data and update FRP flags */
if (md_pos < length) {
/* Reset AF, RF and set NIC, OKI */
data->accept_frame = OSI_DISABLE;
data->reject_frame = OSI_DISABLE;
data->next_ins_ctrl = OSI_ENABLE;
/* Init next FRP entry */
pos++;
fo_t++;
fp_t = OSI_NONE;
data->ok_index = pos;
} else {
data->next_ins_ctrl = OSI_DISABLE;
data->ok_index = OSI_DISABLE;
}
}
/* Check and fill final OKI */
if (filter_mode == OSI_FRP_MODE_LINK ||
filter_mode == OSI_FRP_MODE_IM_LINK) {
/* Update NIC and OKI in final entry */
data->next_ins_ctrl = OSI_ENABLE;
data->ok_index = ok_index;
}
return 0;
}
/**
* @brief frp_hw_write - Update HW FRP table.
*
* Algorithm: Update FRP table into HW.
*
* @param[in] osi_core: OSI core private data structure.
*
* @retval 0 on success.
* @retval -1 on failure.
*/
static int frp_hw_write(struct osi_core_priv_data *const osi_core,
struct core_ops *ops_p)
{
int ret = -1, tmp = -1;
struct osi_core_frp_entry *entry;
unsigned int frp_cnt = osi_core->frp_cnt, i = OSI_NONE;
/* Disable the FRP in HW */
ret = ops_p->config_frp(osi_core, OSI_DISABLE);
if (ret < 0) {
/* Fail to disable try to enable it back */
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL,
"HW Fail on FRP update\n",
OSI_NONE);
goto hw_write_enable_frp;
}
/* Write FRP entries into HW */
for (i = 0; i < frp_cnt; i++) {
entry = &osi_core->frp_table[i];
ret = ops_p->update_frp_entry(osi_core, i, &entry->data);
if (ret < 0) {
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL,
"Fail to update FRP entry\n",
OSI_NONE);
goto hw_write_enable_frp;
}
}
/* Update the NVE */
ret = ops_p->update_frp_nve(osi_core, (frp_cnt - 1U));
if (ret < 0) {
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL,
"Fail to update FRP NVE\n",
OSI_NONE);
}
/* Enable the FRP in HW */
hw_write_enable_frp:
tmp = ops_p->config_frp(osi_core, OSI_ENABLE);
return (ret < 0) ? ret : tmp;
}
/**
* @brief frp_add_proto - Process and update FRP Command Protocal Entry.
*
* Algorithm: Parse give FRP command and update Protocal Entry.
*
* @param[in] osi_core: OSI core private data structure.
* @param[in] cmd: OSI FRP command structure.
* @param[in] pos: Pointer to the FRP entry position.
*
* @retval 0 on success.
* @retval -1 on failure.
*/
static int frp_add_proto(struct osi_core_priv_data *const osi_core,
struct osi_core_frp_cmd *const cmd,
unsigned char *pos)
{
int ret = -1, proto_oki = -1;
unsigned char proto_entry = OSI_DISABLE;
unsigned char req = 0U;
unsigned char proto_match[FRP_PROTO_LENGTH];
unsigned char proto_lendth;
unsigned char proto_offset;
unsigned char match_type = cmd->match_type;
switch (match_type) {
case OSI_FRP_MATCH_L4_S_UPORT:
proto_entry = OSI_ENABLE;
proto_match[0] = FRP_L4_UDP_MD;
proto_lendth = 1U;
proto_offset = FRP_L4_IP4_PROTO_OFFSET;
break;
case OSI_FRP_MATCH_L4_D_UPORT:
proto_entry = OSI_ENABLE;
proto_match[0] = FRP_L4_UDP_MD;
proto_lendth = 1U;
proto_offset = FRP_L4_IP4_PROTO_OFFSET;
break;
case OSI_FRP_MATCH_L4_S_TPORT:
proto_entry = OSI_ENABLE;
proto_match[0] = FRP_L4_TCP_MD;
proto_lendth = 1U;
proto_offset = FRP_L4_IP4_PROTO_OFFSET;
break;
case OSI_FRP_MATCH_L4_D_TPORT:
proto_entry = OSI_ENABLE;
proto_match[0] = FRP_L4_TCP_MD;
proto_lendth = 1U;
proto_offset = FRP_L4_IP4_PROTO_OFFSET;
break;
case OSI_FRP_MATCH_VLAN:
proto_entry = OSI_ENABLE;
proto_match[0] = FRP_L2_VLAN_MD0;
proto_match[1] = FRP_L2_VLAN_MD1;
proto_lendth = 2U;
proto_offset = FRP_L2_VLAN_PROTO_OFFSET;
break;
case OSI_FRP_MATCH_NORMAL:
default:
proto_entry = OSI_DISABLE;
break;
}
/* Check and Add protocol FRP entire */
if (proto_entry == OSI_ENABLE) {
/* Check for space */
req = (unsigned char) (frp_req_entries(cmd->offset, cmd->match_length) + 1U);
if (*pos > (OSI_FRP_MAX_ENTRY - req)) {
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL,
"Fail add FRP protocol entry\n",
OSI_NONE);
return -1;
}
/* Add protocol FRP entire */
proto_oki = *pos + 1;
ret = frp_entry_add(osi_core, cmd->frp_id, *pos,
proto_match, proto_lendth,
proto_offset, OSI_FRP_MODE_LINK,
proto_oki, cmd->dma_sel);
if (ret < 0) {
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL,
"Fail add FRP protocol entry\n",
OSI_NONE);
return ret;
}
/* Increment pos value */
*pos = (unsigned char) (*pos + 1U);
}
return 0;
}
/**
* @brief frp_parse_offset - Process and update FRP Command offset.
*
* Algorithm: Parse give FRP command match type and update it's offset.
*
* @param[in] osi_core: OSI core private data structure.
* @param[in] cmd: OSI FRP command structure.
*
*/
static void frp_parse_mtype(OSI_UNUSED struct osi_core_priv_data *const osi_core,
struct osi_core_frp_cmd *const cmd)
{
unsigned char offset;
unsigned char match_type = cmd->match_type;
switch (match_type) {
case OSI_FRP_MATCH_L2_DA:
offset = FRP_L2_DA_OFFSET;
break;
case OSI_FRP_MATCH_L2_SA:
offset = FRP_L2_SA_OFFSET;
break;
case OSI_FRP_MATCH_L3_SIP:
offset = FRP_L3_IP4_SIP_OFFSET;
break;
case OSI_FRP_MATCH_L3_DIP:
offset = FRP_L3_IP4_DIP_OFFSET;
break;
case OSI_FRP_MATCH_L4_S_UPORT:
offset = FRP_L4_IP4_SPORT_OFFSET;
break;
case OSI_FRP_MATCH_L4_D_UPORT:
offset = FRP_L4_IP4_DPORT_OFFSET;
break;
case OSI_FRP_MATCH_L4_S_TPORT:
offset = FRP_L4_IP4_SPORT_OFFSET;
break;
case OSI_FRP_MATCH_L4_D_TPORT:
offset = FRP_L4_IP4_DPORT_OFFSET;
break;
case OSI_FRP_MATCH_VLAN:
offset = FRP_L2_VLAN_TAG_OFFSET;
break;
case OSI_FRP_MATCH_NORMAL:
default:
offset = cmd->offset;
break;
}
/* Update command offset */
cmd->offset = offset;
}
/**
* @brief frp_delete - Process FRP Delete Command.
*
* Algorithm: Parse give FRP delete command and update it on OSI data and HW.
*
* @param[in] osi_core: OSI core private data structure.
* @param[in] cmd: OSI FRP command structure.
*
* @retval 0 on success.
* @retval -1 on failure.
*/
static int frp_delete(struct osi_core_priv_data *const osi_core,
struct core_ops *ops_p,
struct osi_core_frp_cmd *const cmd)
{
int ret = -1;
unsigned char i = 0U, pos = 0U, count = 0U;
int frp_id = cmd->frp_id;
unsigned int frp_cnt = osi_core->frp_cnt;
/* Check for FRP entries */
if (frp_cnt == 0U) {
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL,
"No FRP entries in the table\n",
OSI_NONE);
return -1;
}
/* Find the FRP entry */
if (frp_entry_find(osi_core, frp_id, &pos, &count) < 0) {
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL,
"No FRP entry found to delete\n",
OSI_NONE);
return -1;
}
/* Validate pos and count */
if (((unsigned int)pos + count) > frp_cnt) {
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL,
"Invalid FRP entry index\n",
OSI_NONE);
return -1;
}
/* Update the frp_table entry */
osi_memset(&osi_core->frp_table[pos], 0U,
(sizeof(struct osi_core_frp_entry) * count));
/* Move in FRP table entries by count */
for (i = (unsigned char) (pos + count); i <= frp_cnt; i++) {
frp_entry_copy(&osi_core->frp_table[pos],
&osi_core->frp_table[i]);
pos++;
}
/* Write FRP Table into HW */
ret = frp_hw_write(osi_core, ops_p);
if (ret < 0) {
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL,
"Fail to update FRP NVE\n",
OSI_NONE);
}
/* Update the frp_cnt entry */
osi_core->frp_cnt = (frp_cnt - count);
return ret;
}
/**
* @brief frp_update - Process FRP Update Command.
*
* Algorithm: Parse give FRP update command and update it on OSI data and HW.
*
* @param[in] osi_core: OSI core private data structure.
* @param[in] cmd: OSI FRP command structure.
*
* @retval 0 on success.
* @retval -1 on failure.
*/
static int frp_update(struct osi_core_priv_data *const osi_core,
struct core_ops *ops_p,
struct osi_core_frp_cmd *const cmd)
{
int ret = -1;
unsigned char pos = 0U, count = 0U, req = 0U;
int frp_id = cmd->frp_id;
/* Validate given frp_id */
if (frp_entry_find(osi_core, frp_id, &pos, &count) < 0) {
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID,
"No FRP entry found\n",
OSI_NONE);
return -1;
}
/* Parse match type and update command offset */
frp_parse_mtype(osi_core, cmd);
/* Calculate the required FRP entries for Update Command. */
req = frp_req_entries(cmd->offset, cmd->match_length);
switch (cmd->match_type) {
case OSI_FRP_MATCH_L4_S_UPORT:
case OSI_FRP_MATCH_L4_D_UPORT:
case OSI_FRP_MATCH_L4_S_TPORT:
case OSI_FRP_MATCH_L4_D_TPORT:
case OSI_FRP_MATCH_VLAN:
req++;
break;
default:
/* No need of Protocal Entry */
break;
}
/* Reject update on old and new required FRP entries mismatch */
if (count != req) {
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID,
"Old and New required FRP entries mismatch\n",
OSI_NONE);
return -1;
}
/* Process and update FRP Command Protocal Entry */
ret = frp_add_proto(osi_core, cmd, &pos);
if (ret < 0) {
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL,
"Fail to parse match type\n",
OSI_NONE);
return ret;
}
/* Update FRP entries */
ret = frp_entry_add(osi_core, frp_id, pos,
cmd->match, cmd->match_length,
cmd->offset, cmd->filter_mode,
cmd->next_frp_id, cmd->dma_sel);
if (ret < 0) {
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL,
"Fail to update FRP entry\n",
OSI_NONE);
return ret;
}
/* Write FRP Table into HW */
ret = frp_hw_write(osi_core, ops_p);
if (ret < 0) {
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL,
"Fail to update FRP NVE\n",
OSI_NONE);
}
return ret;
}
/**
* @brief frp_add - Process FRP Add Command.
*
* Algorithm: Parse give FRP Add command and update it on OSI data and HW.
*
* @param[in] osi_core: OSI core private data structure.
* @param[in] cmd: OSI FRP command structure.
*
* @retval 0 on success.
* @retval -1 on failure.
*/
static int frp_add(struct osi_core_priv_data *const osi_core,
struct core_ops *ops_p,
struct osi_core_frp_cmd *const cmd)
{
int ret = -1;
unsigned char pos = 0U, count = 0U;
int frp_id = cmd->frp_id;
unsigned int nve = osi_core->frp_cnt;
/* Check for MAX FRP entries */
if (nve >= OSI_FRP_MAX_ENTRY) {
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_OUTOFBOUND,
"FRP etries are full\n",
nve);
return -1;
}
/* Check the FRP entry already exists */
ret = frp_entry_find(osi_core, frp_id, &pos, &count);
if (ret >= 0) {
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID,
"FRP entry already exists\n",
OSI_NONE);
return -1;
}
/* Parse match type and update command offset */
frp_parse_mtype(osi_core, cmd);
/* Process and add FRP Command Protocal Entry */
ret = frp_add_proto(osi_core, cmd, (unsigned char *)&nve);
if (ret < 0) {
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL,
"Fail to parse match type\n",
OSI_NONE);
return ret;
}
/* Add Match data FRP Entry */
ret = frp_entry_add(osi_core, frp_id, (unsigned char)nve,
cmd->match, cmd->match_length,
cmd->offset, cmd->filter_mode,
cmd->next_frp_id, cmd->dma_sel);
if (ret < 0) {
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL,
"Fail to add FRP entry\n",
nve);
return ret;
}
osi_core->frp_cnt = nve + frp_req_entries(cmd->offset,
cmd->match_length);
/* Write FRP Table into HW */
ret = frp_hw_write(osi_core, ops_p);
if (ret < 0) {
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL,
"Fail to update FRP NVE\n",
OSI_NONE);
}
return ret;
}
/**
* @brief setup_frp - Process OSD FRP Command.
*
* Algorithm: Parse give FRP command and update it on OSI data and HW.
*
* @param[in] osi_core: OSI core private data structure.
* @param[in] cmd: OSI FRP command structure.
*
* @retval 0 on success.
* @retval -1 on failure.
*/
int setup_frp(struct osi_core_priv_data *const osi_core,
struct core_ops *ops_p,
struct osi_core_frp_cmd *const cmd)
{
int ret = -1;
switch (cmd->cmd) {
case OSI_FRP_CMD_ADD:
ret = frp_add(osi_core, ops_p, cmd);
break;
case OSI_FRP_CMD_UPDATE:
ret = frp_update(osi_core, ops_p, cmd);
break;
case OSI_FRP_CMD_DEL:
ret = frp_delete(osi_core, ops_p, cmd);
break;
default:
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL,
"Invalid FRP command\n",
cmd->cmd);
break;
}
OSI_CORE_INFO(osi_core->osd, OSI_LOG_ARG_HW_FAIL,
"FRP instrctions count\n",
osi_core->frp_cnt);
if (ret < 0) {
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL,
"FRP command fail\n",
cmd->cmd);
}
return ret;
}
/**
* @brief init_frp - Initialize FRP.
*
* Algorithm: Reset all the data in the FRP table Initialize FRP count to zero.
*
* @param[in] osi_core: OSI core private data structure.
*
*/
void init_frp(struct osi_core_priv_data *const osi_core)
{
/* Reset the NVE count to zero */
osi_core->frp_cnt = 0U;
/* Clear all instruction of FRP */
osi_memset(osi_core->frp_table, 0U,
(sizeof(struct osi_core_frp_entry) * OSI_FRP_MAX_ENTRY));
}

View File

@@ -1,84 +0,0 @@
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef FRP_H
#define FRP_H
#include <osi_common.h>
#include <osi_core.h>
#include "core_local.h"
#define FRP_MD_SIZE (4U)
#define FRP_ME_BYTE (0xFFU)
#define FRP_ME_BYTE_SHIFT (8U)
/* Offset defines for Match data types */
#define FRP_L2_DA_OFFSET 0U
#define FRP_L2_SA_OFFSET 6U
#define FRP_L2_VLAN_TAG_OFFSET 14U
#define FRP_L3_IP4_SIP_OFFSET 26U
#define FRP_L3_IP4_DIP_OFFSET 30U
#define FRP_L4_IP4_SPORT_OFFSET 34U
#define FRP_L4_IP4_DPORT_OFFSET 36U
/* Protocols Match data define values  */
#define FRP_PROTO_LENGTH 2U
#define FRP_L2_VLAN_PROTO_OFFSET 12U
#define FRP_L2_VLAN_MD0 0x81U
#define FRP_L2_VLAN_MD1 0x00U
#define FRP_L4_IP4_PROTO_OFFSET 23U
#define FRP_L4_UDP_MD 17U
#define FRP_L4_TCP_MD 6U
/* Define for FRP Entries offsets and lengths */
#define FRP_OFFSET_BYTES(offset) \
(FRP_MD_SIZE - ((offset) % FRP_MD_SIZE))
/**
* @brief setup_frp - Process OSD FRP Command.
*
* Algorithm: Parse give FRP command and update it on OSI data and HW.
*
* @param[in] osi_core: OSI core private data structure.
* @param[in] cmd: OSI FRP command structure.
*
* @retval 0 on success.
* @retval -1 on failure.
*/
int setup_frp(struct osi_core_priv_data *const osi_core,
struct core_ops *ops_p,
struct osi_core_frp_cmd *const cmd);
/**
* @brief init_frp - Init the FRP Instruction Table.
*
* @param[in] osi_core: OSI core private data structure.
*
* @note
* 1) MAC and PHY should be init and started. see osi_start_mac()
*
* @retval 0 on success
* @retval -1 on failure.
*/
void init_frp(struct osi_core_priv_data *const osi_core);
#endif /* FRP_H */

View File

@@ -1,689 +0,0 @@
/*
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <osi_common.h>
#include <osi_core.h>
#include <ivc_core.h>
#include <osi_macsec.h>
#include "eqos_core.h"
#include "eqos_mmc.h"
#include "core_local.h"
#include "../osi/common/common.h"
#include "macsec.h"
/**
* @brief ivc_safety_config - EQOS MAC core safety configuration
*/
static struct core_func_safety ivc_safety_config;
/**
* @brief ivc_handle_ioctl - marshell input argument to handle runtime command
*
* @param[in] osi_core: OSI core private data structure.
* @param[in] data: OSI IOCTL data structure.
*
* @note MAC should be init and started. see osi_start_mac()
*
* @retval data from PHY register on success
* @retval -1 on failure
*/
static nve32_t ivc_handle_ioctl(struct osi_core_priv_data *osi_core,
struct osi_ioctl *data)
{
nve32_t ret = 0;
ivc_msg_common_t msg;
osi_memset(&msg, 0, sizeof(msg));
msg.cmd = handle_ioctl;
msg.status = osi_memcpy((void *)&msg.data.ioctl_data,
(void *)data,
sizeof(struct osi_ioctl));
if (data->cmd == OSI_CMD_CONFIG_PTP) {
osi_memcpy((void *)&msg.data.ioctl_data.ptp_config,
(void *)&osi_core->ptp_config,
sizeof(struct osi_ptp_config));
}
ret = osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg));
if (data->cmd == OSI_CMD_READ_MMC) {
msg.status = osi_memcpy((void *)&osi_core->mmc,
(void *)&msg.data.mmc,
sizeof(struct osi_mmc_counters));
} else {
msg.status = osi_memcpy((void *)data,
(void *)&msg.data.ioctl_data,
sizeof(struct osi_ioctl));
}
return ret;
}
/**
* @brief ivc_core_init - EQOS MAC, MTL and common DMA Initialization
*
* @param[in] osi_core: OSI core private data structure.
* @param[in] tx_fifo_size: MTL TX FIFO size
* @param[in] rx_fifo_size: MTL RX FIFO size
*
* @retval 0 on success
* @retval -1 on failure.
*/
static nve32_t ivc_core_init(struct osi_core_priv_data *const osi_core,
OSI_UNUSED nveu32_t tx_fifo_size,
OSI_UNUSED nveu32_t rx_fifo_size)
{
ivc_msg_common_t msg;
osi_memset(&msg, 0, sizeof(msg));
msg.cmd = core_init;
return osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg));
}
/**
* @brief ivc_core_deinit - EQOS MAC core deinitialization
*
* @param[in] osi_core: OSI core private data structure.
*
* @note Required clks and resets has to be enabled
*/
static void ivc_core_deinit(struct osi_core_priv_data *const osi_core)
{
ivc_msg_common_t msg;
nve32_t ret = 0;
osi_memset(&msg, 0, sizeof(msg));
msg.cmd = handle_ioctl;
msg.data.ioctl_data.cmd = OSI_CMD_STOP_MAC;
ret = osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg));
if (ret < 0) {
/* handle Error */
}
}
/**
* @brief ivc_write_phy_reg - Write to a PHY register through MAC over MDIO bus
*
* @param[in] osi_core: OSI core private data structure.
* @param[in] phyaddr: PHY address (PHY ID) associated with PHY
* @param[in] phyreg: Register which needs to be write to PHY.
* @param[in] phydata: Data to write to a PHY register.
*
* @note MAC should be init and started. see osi_start_mac()
*
* @retval 0 on success
* @retval -1 on failure.
*/
static nve32_t ivc_write_phy_reg(struct osi_core_priv_data *const osi_core,
const nveu32_t phyaddr,
const nveu32_t phyreg,
const nveu16_t phydata)
{
ivc_msg_common_t msg;
nveu32_t index = 0;
osi_memset(&msg, 0, sizeof(msg));
msg.cmd = write_phy_reg;
msg.data.args.arguments[index++] = phyaddr;
msg.data.args.arguments[index++] = phyreg;
msg.data.args.arguments[index++] = phydata;
msg.data.args.count = index;
return osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg));
}
/**
* @brief ivc_read_phy_reg - Read from a PHY register through MAC over MDIO bus
*
* @param[in] osi_core: OSI core private data structure.
* @param[in] phyaddr: PHY address (PHY ID) associated with PHY
* @param[in] phyreg: Register which needs to be read from PHY.
*
* @note MAC should be init and started. see osi_start_mac()
*
* @retval data from PHY register on success
* @retval -1 on failure
*/
static nve32_t ivc_read_phy_reg(struct osi_core_priv_data *const osi_core,
const nveu32_t phyaddr,
const nveu32_t phyreg)
{
ivc_msg_common_t msg;
nveu32_t index = 0;
osi_memset(&msg, 0, sizeof(msg));
msg.cmd = read_phy_reg;
msg.data.args.arguments[index++] = phyaddr;
msg.data.args.arguments[index++] = phyreg;
msg.data.args.count = index;
return osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg));
}
#ifdef MACSEC_SUPPORT
/**
* @brief ivc_macsec_dbg_events_config - Configure Debug events
*
* @param[in] osi_core: OSI core private data structure.
* @param[in] dbg_buf_config: Config Buffer
*
* @retval 0 on success
* @retval -1 on failure.
*/
static int ivc_macsec_dbg_events_config(
struct osi_core_priv_data *const osi_core,
struct osi_macsec_dbg_buf_config *const dbg_buf_config)
{
ivc_msg_common_t msg;
nve32_t ret = 0;
osi_memset(&msg, 0, sizeof(msg));
msg.cmd = dbg_events_config_macsec;
msg.status = osi_memcpy((void *)&msg.data.dbg_buf_config,
(void *)dbg_buf_config,
sizeof(struct osi_macsec_dbg_buf_config));
ret = osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg));
if (ret != 0) {
return ret;
}
msg.status = osi_memcpy((void *)dbg_buf_config,
(void *)&msg.data.dbg_buf_config,
sizeof(struct osi_macsec_dbg_buf_config));
return ret;
}
/**
* @brief macsec_dbg_buf_config - Read/Write debug buffers.
*
* @param[in] osi_core: OSI Core private data structure.
* @param[in] dbg_buf_config: Pointer to debug buffer config data structure.
*
* @retval 0 on Success
* @retval -1 on Failure
*/
static int ivc_macsec_dbg_buf_config(
struct osi_core_priv_data *const osi_core,
struct osi_macsec_dbg_buf_config *const dbg_buf_config)
{
ivc_msg_common_t msg;
nve32_t ret = 0;
osi_memset(&msg, 0, sizeof(msg));
msg.cmd = dbg_buf_config_macsec;
msg.status = osi_memcpy((void *)&msg.data.dbg_buf_config,
(void *)dbg_buf_config,
sizeof(struct osi_macsec_dbg_buf_config));
ret = osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg));
if (ret != 0) {
return ret;
}
msg.status = osi_memcpy((void *)dbg_buf_config,
(void *) &msg.data.dbg_buf_config,
sizeof(struct osi_macsec_dbg_buf_config));
return ret;
}
/**
* @brief macsec_read_mmc - To read statitics registers and update structure
* variable
*
* Algorithm: Pass register offset and old value to helper function and
* update structure.
*
* @param[in] osi_core: OSI core private data structure.
*
* @note
* 1) MAC/MACSEC should be init and started.
*/
static void ivc_macsec_read_mmc(struct osi_core_priv_data *const osi_core)
{
ivc_msg_common_t msg;
osi_memset(&msg, 0, sizeof(msg));
msg.cmd = read_mmc_macsec;
msg.status = osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg));
msg.status = osi_memcpy((void *)&osi_core->macsec_mmc,
(void *) &msg.data.macsec_mmc,
sizeof(struct osi_macsec_mmc_counters));
msg.status = osi_memcpy((void *)&osi_core->macsec_irq_stats,
(void *) &msg.data.macsec_irq_stats,
sizeof(struct osi_macsec_irq_stats));
}
/**
* @brief ivc_get_sc_lut_key_index - Macsec get Key_index
*
* @param[in] osi_core: OSI Core private data structure.
* @param[in] sc: Secure Channel info.
* @param[in] enable: enable or disable.
* @param[in] ctlr: Controller instance.
* @param[[out] kt_idx: Key table index to program SAK.
*
* @retval 0 on Success
* @retval -1 on Failure
*/
static int ivc_get_sc_lut_key_index(struct osi_core_priv_data *const osi_core,
nveu8_t *sci, nveu32_t *key_index,
nveu16_t ctlr)
{
ivc_msg_common_t msg;
nve32_t ret = 0;
osi_memset(&msg, 0, sizeof(msg));
msg.cmd = macsec_get_sc_lut_key_index;
msg.status = osi_memcpy((void *) &msg.data.macsec_cfg.sci,
(void *)sci,
OSI_SCI_LEN);
msg.data.macsec_cfg.ctlr = ctlr;
ret = osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg));
if (ret != 0) {
return ret;
}
*key_index = msg.data.macsec_cfg.key_index;
return ret;
}
/**
* @brief ivc_macsec_config - Mac sec config.
*
* @param[in] osi_core: OSI Core private data structure.
* @param[in] sc: Secure Channel info.
* @param[in] enable: enable or disable.
* @param[in] ctlr: Controller instance.
* @param[[out] kt_idx: Key table index to program SAK.
*
* @retval 0 on Success
* @retval -1 on Failure
*/
static int ivc_macsec_config(struct osi_core_priv_data *const osi_core,
struct osi_macsec_sc_info *const sc,
unsigned int enable, unsigned short ctlr,
unsigned short *kt_idx)
{
ivc_msg_common_t msg;
nve32_t ret = 0;
osi_memset(&msg, 0, sizeof(msg));
msg.cmd = config_macsec;
msg.status = osi_memcpy((void *) &msg.data.macsec_cfg.sc_info,
(void *)sc,
sizeof(struct osi_macsec_sc_info));
msg.data.macsec_cfg.enable = enable;
msg.data.macsec_cfg.ctlr = ctlr;
msg.data.macsec_cfg.kt_idx = *kt_idx;
ret = osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg));
if (ret != 0) {
return ret;
}
*kt_idx = msg.data.macsec_cfg.kt_idx;
return ret;
}
/**
* @brief ivc_macsec_update_mtu - Update MACSEC mtu.
*
* @param[in] osi_core: OSI Core private data structure.
* @param[in] mtu: MACSEC MTU len.
*
* @retval 0 on Success
* @retval -1 on Failure
*/
static nve32_t ivc_macsec_update_mtu(struct osi_core_priv_data *const osi_core,
nveu32_t mtu)
{
ivc_msg_common_t msg;
nveu32_t index = 0;
osi_memset(&msg, 0, sizeof(msg));
msg.cmd = macsec_update_mtu_size;
msg.data.args.arguments[index] = mtu;
index++;
msg.data.args.count = index;
return osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg));
}
/**
* @brief ivc_macsec_enable - Enable or disable Macsec.
*
* @param[in] osi_core: OSI Core private data structure.
* @param[in] enable: Enable or Disable Macsec.
*
* @retval 0 on Success
* @retval -1 on Failure
*/
static int ivc_macsec_enable(struct osi_core_priv_data *const osi_core,
unsigned int enable)
{
ivc_msg_common_t msg;
nveu32_t index = 0;
osi_memset(&msg, 0, sizeof(msg));
msg.cmd = en_macsec;
msg.data.args.arguments[index] = enable;
index++;
msg.data.args.count = index;
return osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg));
}
/**
* @brief ivc_macsec_loopback_config - Loopback configure.
*
* @param[in] osi_core: OSI Core private data structure.
* @param[in] enable: Enable or disable loopback.
*
* @retval 0 on Success
* @retval -1 on Failure
*/
static int ivc_macsec_loopback_config(struct osi_core_priv_data *const osi_core,
unsigned int enable)
{
ivc_msg_common_t msg;
nveu32_t index = 0;
osi_memset(&msg, 0, sizeof(msg));
msg.cmd = loopback_config_macsec;
msg.data.args.arguments[index] = enable;
index++;
msg.data.args.count = index;
return osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg));
}
#ifdef MACSEC_KEY_PROGRAM
/**
* @brief ivc_macsec_kt_config - MacSec KT configure.
*
* @param[in] osi_core: OSI Core private data structure.
* @param[in] kt_config: KT config structure.
*
* @retval 0 on Success
* @retval -1 on Failure
*/
static nve32_t ivc_macsec_kt_config(struct osi_core_priv_data *const osi_core,
struct osi_macsec_kt_config *const kt_config)
{
ivc_msg_common_t msg;
nve32_t ret = 0;
osi_memset(&msg, 0, sizeof(msg));
msg.cmd = kt_config_macsec;
msg.status = osi_memcpy((void *) &msg.data.kt_config,
(void *)kt_config,
sizeof(struct osi_macsec_kt_config));
ret = osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg));
if (ret != 0) {
return ret;
}
msg.status = osi_memcpy((void *)kt_config,
(void *)&msg.data.kt_config,
sizeof(struct osi_macsec_kt_config));
return ret;
}
#endif /* MACSEC_KEY_PROGRAM */
/**
* @brief ivc_macsec_cipher_config - cipher configure.
*
* @param[in] osi_core: OSI Core private data structure.
* @param[in] cipher: value of cipher.
*
* @retval 0 on Success
* @retval -1 on Failure
*/
static int ivc_macsec_cipher_config(struct osi_core_priv_data *const osi_core,
unsigned int cipher)
{
ivc_msg_common_t msg;
nveu32_t index = 0;
osi_memset(&msg, 0, sizeof(msg));
msg.cmd = cipher_config;
msg.data.args.arguments[index] = cipher;
index++;
msg.data.args.count = index;
return osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg));
}
/**
* @brief ivc_macsec_lut_config - LUT config.
*
* @param[in] osi_core: OSI Core private data structure.
* @param[in] lut_config: lut data structure.
*
* @retval 0 on Success
* @retval -1 on Failure
*/
static nve32_t ivc_macsec_lut_config(struct osi_core_priv_data *const osi_core,
struct osi_macsec_lut_config *const lut_config)
{
ivc_msg_common_t msg;
nve32_t ret = 0;
osi_memset(&msg, 0, sizeof(msg));
msg.cmd = lut_config_macsec;
msg.status = osi_memcpy((void *) &msg.data.lut_config,
(void *)lut_config,
sizeof(struct osi_macsec_lut_config));
ret = osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg));
if (ret != 0) {
return ret;
}
msg.status = osi_memcpy((void *)lut_config,
(void *)&msg.data.lut_config,
sizeof(struct osi_macsec_lut_config));
return ret;
}
/**
* @brief ivc_macsec_handle_s_irq - handle s irq.
*
* @param[in] osi_core: OSI Core private data structure.
*
*/
static void ivc_macsec_handle_s_irq(OSI_UNUSED
struct osi_core_priv_data *const osi_core)
{
OSI_CORE_INFO(osi_core->osd, OSI_LOG_ARG_INVALID,
"Nothing to handle \n", 0ULL);
}
/**
* @brief ivc_macsec_handle_ns_irq - handle ns irq.
*
* @param[in] osi_core: OSI Core private data structure.
*
*/
static void ivc_macsec_handle_ns_irq(OSI_UNUSED
struct osi_core_priv_data *const osi_core)
{
OSI_CORE_INFO(osi_core->osd, OSI_LOG_ARG_INVALID,
"Nothing to handle \n", 0ULL);
}
/**
* @brief ivc_macsec_deinit - De Initialize.
*
* @param[in] osi_core: OSI Core private data structure.
*
* @retval 0 on Success
* @retval -1 on Failure
*/
static int ivc_macsec_deinit(struct osi_core_priv_data *const osi_core)
{
ivc_msg_common_t msg;
osi_memset(&msg, 0, sizeof(msg));
msg.cmd = deinit_macsec;
return osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg));
}
/**
* @brief ivc_macsec_init -Initialize.
*
* @param[in] osi_core: OSI Core private data structure.
* @param[in] genl_info: Generic netlink information structure.
*
* @retval 0 on Success
* @retval -1 on Failure
*/
static int ivc_macsec_init(struct osi_core_priv_data *const osi_core,
nveu32_t mtu)
{
ivc_msg_common_t msg;
nveu32_t index = 0;
osi_memset(&msg, 0, sizeof(msg));
msg.cmd = init_macsec;
msg.data.args.arguments[index] = mtu;
index++;
msg.data.args.count = index;
return osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg));
}
/**
* @brief ivc_init_macsec_ops - Initialize IVC core operations.
*
* @note
* API Group:
* - Initialization: Yes
* - Run time: No
* - De-initialization: No
*/
void ivc_init_macsec_ops(void *macsecops)
{
struct osi_macsec_core_ops *ops = (struct osi_macsec_core_ops *) macsecops;
ops->init = ivc_macsec_init;
ops->deinit = ivc_macsec_deinit;
ops->handle_ns_irq = ivc_macsec_handle_ns_irq;
ops->handle_s_irq = ivc_macsec_handle_s_irq;
ops->lut_config = ivc_macsec_lut_config;
#ifdef MACSEC_KEY_PROGRAM
ops->kt_config = ivc_macsec_kt_config;
#endif /* MACSEC_KEY_PROGRAM */
ops->cipher_config = ivc_macsec_cipher_config;
ops->loopback_config = ivc_macsec_loopback_config;
ops->macsec_en = ivc_macsec_enable;
ops->config = ivc_macsec_config;
ops->read_mmc = ivc_macsec_read_mmc;
ops->dbg_buf_config = ivc_macsec_dbg_buf_config;
ops->dbg_events_config = ivc_macsec_dbg_events_config;
ops->get_sc_lut_key_index = ivc_get_sc_lut_key_index;
ops->update_mtu = ivc_macsec_update_mtu;
}
#endif
/**
* @brief ivc_get_core_safety_config - EQOS MAC safety configuration
*/
void *ivc_get_core_safety_config(void)
{
return &ivc_safety_config;
}
/**
* @brief vir_ivc_core_deinit - MAC core deinitialization
*
* @param[in] osi_core: OSI core private data structure.
*
* @note Required clks and resets has to be enabled
*
* @retval Return 0
*/
static nve32_t vir_ivc_core_deinit(struct osi_core_priv_data *const osi_core)
{
ivc_core_deinit(osi_core);
return 0;
}
/**
* @brief vir_init_core_ops - core ops initialization
*
* @param[in] osi_core: OSI core private data structure.
*
* @retval Return 0
*/
static nve32_t vir_ivc_init_core_ops(OSI_UNUSED
struct osi_core_priv_data *const osi_core)
{
/* This API should not do anything as ethernet_server maintain ops
* locally
*/
return 0;
}
void ivc_interface_init_core_ops(struct if_core_ops *if_ops_p)
{
if_ops_p->if_core_init = ivc_core_init;
if_ops_p->if_core_deinit = vir_ivc_core_deinit;
if_ops_p->if_write_phy_reg = ivc_write_phy_reg;
if_ops_p->if_read_phy_reg = ivc_read_phy_reg;
if_ops_p->if_init_core_ops = vir_ivc_init_core_ops;
if_ops_p->if_handle_ioctl = ivc_handle_ioctl;
}

View File

@@ -1,49 +0,0 @@
################################### tell Emacs this is a -*- makefile-gmake -*-
#
# Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# libnvethernetrm interface export
#
###############################################################################
osi_init_core_ops
osi_write_phy_reg
osi_read_phy_reg
osi_hw_core_init
osi_hw_core_deinit
osi_get_core
osi_handle_ioctl
#Below need to be enabled when MACSEC is enabled
#osi_macsec_en
#osi_macsec_deinit
#osi_macsec_ns_isr
#osi_macsec_s_isr
#osi_macsec_init
#osi_macsec_cipher_config
#osi_macsec_config
#osi_init_macsec_ops
#osi_macsec_config_lut
#osi_macsec_loopback
#osi_macsec_read_mmc
#osi_macsec_config_dbg_buf
#osi_macsec_dbg_events_config
#osi_macsec_config_kt
#osi_macsec_get_sc_lut_key_index
#osi_macsec_update_mtu

View File

File diff suppressed because it is too large Load Diff

View File

@@ -1,446 +0,0 @@
/*
* Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef INCLUDED_MACSEC_H
#define INCLUDED_MACSEC_H
#ifdef DEBUG_MACSEC
#define HKEY2STR(a) (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5], (a)[6], (a)[7], (a)[8], (a)[9], (a)[10], (a)[11], (a)[12], (a)[13], (a)[14], (a)[15]
#define HKEYSTR "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x"
#define KEY2STR(a) (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5], (a)[6], (a)[7], (a)[8], (a)[9], (a)[10], (a)[11], (a)[12], (a)[13], (a)[14], (a)[15]
#define KEYSTR "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x"
#endif /* DEBUG_MACSEC */
#define MAX_U64_VAL 0xFFFFFFFFFFFFFFFFU
#define CERT_C__POST_INC__U64(a)\
{\
if ((a) < MAX_U64_VAL) {\
(a)++;\
} else {\
(a) = 0;\
} \
} \
/**
* @addtogroup MACsec AMAP
*
* @brief MACsec controller register offsets
* @{
*/
#define MACSEC_GCM_KEYTABLE_CONFIG 0x0000
#define MACSEC_GCM_KEYTABLE_DATA(x) ((0x0004U) + ((x) * 4U))
#define MACSEC_RX_ICV_ERR_CNTRL 0x4000
#define MACSEC_INTERRUPT_COMMON_SR 0x4004
#define MACSEC_TX_IMR 0x4008
#define MACSEC_TX_ISR 0x400C
#define MACSEC_RX_IMR 0x4048
#define MACSEC_RX_ISR 0x404C
#define MACSEC_TX_SC_PN_THRESHOLD_STATUS0_0 0x4018
#define MACSEC_TX_SC_PN_THRESHOLD_STATUS1_0 0x401C
#define MACSEC_TX_SC_PN_EXHAUSTED_STATUS0_0 0x4024
#define MACSEC_TX_SC_PN_EXHAUSTED_STATUS1_0 0x4028
#define MACSEC_TX_SC_ERROR_INTERRUPT_STATUS_0 0x402C
#define MACSEC_RX_SC_PN_EXHAUSTED_STATUS0_0 0x405C
#define MACSEC_RX_SC_PN_EXHAUSTED_STATUS1_0 0x4060
#define MACSEC_RX_SC_REPLAY_ERROR_STATUS0_0 0x4090
#define MACSEC_RX_SC_REPLAY_ERROR_STATUS1_0 0x4094
#define MACSEC_STATS_CONTROL_0 0x900C
#define MACSEC_TX_PKTS_UNTG_LO_0 0x9010
#define MACSEC_TX_OCTETS_PRTCTD_LO_0 0x9018
#define MACSEC_TX_PKTS_TOO_LONG_LO_0 0x9020
#define MACSEC_TX_PKTS_PROTECTED_SCx_LO_0(x) ((0x9028UL) + ((x) * 8UL))
#define MACSEC_RX_PKTS_NOTG_LO_0 0x90B0
#define MACSEC_RX_PKTS_UNTG_LO_0 0x90A8
#define MACSEC_RX_PKTS_BADTAG_LO_0 0x90B8
#define MACSEC_RX_PKTS_NOSA_LO_0 0x90C0
#define MACSEC_RX_PKTS_NOSAERROR_LO_0 0x90C8
#define MACSEC_RX_PKTS_OVRRUN_LO_0 0x90D0
#define MACSEC_RX_OCTETS_VLDTD_LO_0 0x90D8
#define MACSEC_RX_PKTS_LATE_SCx_LO_0(x) ((0x90E0UL) + ((x) * 8UL))
#define MACSEC_RX_PKTS_NOTVALID_SCx_LO_0(x) ((0x9160UL) + ((x) * 8UL))
#define MACSEC_RX_PKTS_OK_SCx_LO_0(x) ((0x91E0UL) + ((x) * 8UL))
#define MACSEC_CONTROL0 0xD000
#define MACSEC_LUT_CONFIG 0xD004
#define MACSEC_LUT_DATA(x) ((0xD008U) + ((x) * 4U))
#define MACSEC_TX_BYP_LUT_VALID 0xD024
#define MACSEC_TX_SCI_LUT_VALID 0xD028
#define MACSEC_RX_BYP_LUT_VALID 0xD02C
#define MACSEC_RX_SCI_LUT_VALID 0xD030
#define MACSEC_COMMON_IMR 0xD054
#define MACSEC_COMMON_ISR 0xD058
#define MACSEC_TX_SC_KEY_INVALID_STS0_0 0xD064
#define MACSEC_TX_SC_KEY_INVALID_STS1_0 0xD068
#define MACSEC_RX_SC_KEY_INVALID_STS0_0 0xD080
#define MACSEC_RX_SC_KEY_INVALID_STS1_0 0xD084
#define MACSEC_TX_DEBUG_CONTROL_0 0xD098
#define MACSEC_TX_DEBUG_TRIGGER_EN_0 0xD09C
#define MACSEC_TX_DEBUG_STATUS_0 0xD0C4
#define MACSEC_DEBUG_BUF_CONFIG_0 0xD0C8
#define MACSEC_DEBUG_BUF_DATA_0(x) ((0xD0CCU) + ((x) * 4U))
#define MACSEC_RX_DEBUG_CONTROL_0 0xD0DC
#define MACSEC_RX_DEBUG_TRIGGER_EN_0 0xD0E0
#define MACSEC_RX_DEBUG_STATUS_0 0xD0F8
#define MACSEC_CONTROL1 0xE000
#define MACSEC_GCM_AES_CONTROL_0 0xE004
#define MACSEC_TX_MTU_LEN 0xE008
#define MACSEC_TX_SOT_DELAY 0xE010
#define MACSEC_RX_MTU_LEN 0xE014
#define MACSEC_RX_SOT_DELAY 0xE01C
/** @} */
/**
* @addtogroup MACSEC_GCM_KEYTABLE_CONFIG register
*
* @brief Bit definitions of MACSEC_GCM_KEYTABLE_CONFIG register
* @{
*/
#define MACSEC_KT_CONFIG_UPDATE OSI_BIT(31)
#define MACSEC_KT_CONFIG_CTLR_SEL OSI_BIT(25)
#define MACSEC_KT_CONFIG_RW OSI_BIT(24)
#define MACSEC_KT_CONFIG_INDEX_MASK (OSI_BIT(4) | OSI_BIT(3) | OSI_BIT(2) |\
OSI_BIT(1) | OSI_BIT(0))
#define MACSEC_KT_ENTRY_VALID OSI_BIT(0)
/** @} */
/**
* @addtogroup MACSEC_GCM_KEYTABLE_DATA registers
*
* @brief Bit definitions of MACSEC_GCM_KEYTABLE_DATA register & helpful macros
* @{
*/
#define MACSEC_KT_DATA_REG_CNT 13U
#define MACSEC_KT_DATA_REG_SAK_CNT 8U
#define MACSEC_KT_DATA_REG_H_CNT 4U
/** @} */
/**
* @addtogroup MACSEC_LUT_CONFIG register
*
* @brief Bit definitions of MACSEC_LUT_CONFIG register
* @{
*/
#define MACSEC_LUT_CONFIG_UPDATE OSI_BIT(31)
#define MACSEC_LUT_CONFIG_CTLR_SEL OSI_BIT(25)
#define MACSEC_LUT_CONFIG_RW OSI_BIT(24)
#define MACSEC_LUT_CONFIG_LUT_SEL_MASK (OSI_BIT(18) | OSI_BIT(17) |\
OSI_BIT(16))
#define MACSEC_LUT_CONFIG_LUT_SEL_SHIFT 16
#define MACSEC_LUT_CONFIG_INDEX_MASK (OSI_BIT(4) | OSI_BIT(3) | OSI_BIT(2) |\
OSI_BIT(1) | OSI_BIT(0))
/** @} */
/**
* @addtogroup INTERRUPT_COMMON_STATUS register
*
* @brief Bit definitions of MACSEC_INTERRUPT_COMMON_STATUS register
* @{
*/
#define MACSEC_COMMON_SR_SFTY_ERR OSI_BIT(2)
#define MACSEC_COMMON_SR_RX OSI_BIT(1)
#define MACSEC_COMMON_SR_TX OSI_BIT(0)
/** @} */
/**
* @addtogroup MACSEC_CONTROL0 register
*
* @brief Bit definitions of MACSEC_CONTROL0 register
* @{
*/
#define MACSEC_TX_LKUP_MISS_NS_INTR OSI_BIT(24)
#define MACSEC_RX_LKUP_MISS_NS_INTR OSI_BIT(23)
#define MACSEC_VALIDATE_FRAMES_MASK (OSI_BIT(22) | OSI_BIT(21))
#define MACSEC_VALIDATE_FRAMES_STRICT OSI_BIT(22)
#define MACSEC_RX_REPLAY_PROT_EN OSI_BIT(20)
#define MACSEC_RX_LKUP_MISS_BYPASS OSI_BIT(19)
#define MACSEC_RX_EN OSI_BIT(16)
#define MACSEC_TX_LKUP_MISS_BYPASS OSI_BIT(3)
#define MACSEC_TX_EN OSI_BIT(0)
/** @} */
/**
* @addtogroup MACSEC_CONTROL1 register
*
* @brief Bit definitions of MACSEC_CONTROL1 register
* @{
*/
#define MACSEC_LOOPBACK_MODE_EN OSI_BIT(31)
#define MACSEC_RX_MTU_CHECK_EN OSI_BIT(16)
#define MACSEC_TX_LUT_PRIO_BYP OSI_BIT(2)
#define MACSEC_TX_MTU_CHECK_EN OSI_BIT(0)
/** @} */
/**
* @addtogroup MACSEC_GCM_AES_CONTROL_0 register
*
* @brief Bit definitions of MACSEC_GCM_AES_CONTROL_0 register
* @{
*/
#define MACSEC_RX_AES_MODE_MASK (OSI_BIT(17) | OSI_BIT(16))
#define MACSEC_RX_AES_MODE_AES128 0x0U
#define MACSEC_RX_AES_MODE_AES256 OSI_BIT(17)
#define MACSEC_TX_AES_MODE_MASK (OSI_BIT(1) | OSI_BIT(0))
#define MACSEC_TX_AES_MODE_AES128 0x0U
#define MACSEC_TX_AES_MODE_AES256 OSI_BIT(1)
/** @} */
/**
* @addtogroup MACSEC_COMMON_IMR register
*
* @brief Bit definitions of MACSEC_INTERRUPT_MASK register
* @{
*/
#define MACSEC_SECURE_REG_VIOL_INT_EN OSI_BIT(31)
#define MACSEC_RX_UNINIT_KEY_SLOT_INT_EN OSI_BIT(17)
#define MACSEC_RX_LKUP_MISS_INT_EN OSI_BIT(16)
#define MACSEC_TX_UNINIT_KEY_SLOT_INT_EN OSI_BIT(1)
#define MACSEC_TX_LKUP_MISS_INT_EN OSI_BIT(0)
/** @} */
/**
* @addtogroup MACSEC_TX_IMR register
*
* @brief Bit definitions of TX_INTERRUPT_MASK register
* @{
*/
#define MACSEC_TX_DBG_BUF_CAPTURE_DONE_INT_EN OSI_BIT(22)
#define MACSEC_TX_MTU_CHECK_FAIL_INT_EN OSI_BIT(19)
#define MACSEC_TX_AES_GCM_BUF_OVF_INT_EN OSI_BIT(18)
#define MACSEC_TX_SC_AN_NOT_VALID_INT_EN OSI_BIT(17)
#define MACSEC_TX_MAC_CRC_ERROR_INT_EN OSI_BIT(16)
#define MACSEC_TX_PN_EXHAUSTED_INT_EN OSI_BIT(1)
#define MACSEC_TX_PN_THRSHLD_RCHD_INT_EN OSI_BIT(0)
/** @} */
/**
* @addtogroup MACSEC_RX_IMR register
*
* @brief Bit definitions of RX_INTERRUPT_MASK register
* @{
*/
#define MACSEC_RX_DBG_BUF_CAPTURE_DONE_INT_EN OSI_BIT(22)
#define MACSEC_RX_ICV_ERROR_INT_EN OSI_BIT(21)
#define RX_REPLAY_ERROR_INT_EN OSI_BIT(20)
#define MACSEC_RX_MTU_CHECK_FAIL_INT_EN OSI_BIT(19)
#define MACSEC_RX_AES_GCM_BUF_OVF_INT_EN OSI_BIT(18)
#define MACSEC_RX_MAC_CRC_ERROR_INT_EN OSI_BIT(16)
#define MACSEC_RX_PN_EXHAUSTED_INT_EN OSI_BIT(1)
/** @} */
/**
* @addtogroup MACSEC_COMMON_ISR register
*
* @brief Bit definitions of MACSEC_INTERRUPT_STATUS register
* @{
*/
#define MACSEC_SECURE_REG_VIOL OSI_BIT(31)
#define MACSEC_RX_UNINIT_KEY_SLOT OSI_BIT(17)
#define MACSEC_RX_LKUP_MISS OSI_BIT(16)
#define MACSEC_TX_UNINIT_KEY_SLOT OSI_BIT(1)
#define MACSEC_TX_LKUP_MISS OSI_BIT(0)
/** @} */
/**
* @addtogroup MACSEC_TX_ISR register
*
* @brief Bit definitions of TX_INTERRUPT_STATUS register
* @{
*/
#define MACSEC_TX_DBG_BUF_CAPTURE_DONE OSI_BIT(22)
#define MACSEC_TX_MTU_CHECK_FAIL OSI_BIT(19)
#define MACSEC_TX_AES_GCM_BUF_OVF OSI_BIT(18)
#define MACSEC_TX_SC_AN_NOT_VALID OSI_BIT(17)
#define MACSEC_TX_MAC_CRC_ERROR OSI_BIT(16)
#define MACSEC_TX_PN_EXHAUSTED OSI_BIT(1)
#define MACSEC_TX_PN_THRSHLD_RCHD OSI_BIT(0)
/** @} */
/**
* @addtogroup MACSEC_RX_ISR register
*
* @brief Bit definitions of RX_INTERRUPT_STATUS register
* @{
*/
#define MACSEC_RX_DBG_BUF_CAPTURE_DONE OSI_BIT(22)
#define MACSEC_RX_ICV_ERROR OSI_BIT(21)
#define MACSEC_RX_REPLAY_ERROR OSI_BIT(20)
#define MACSEC_RX_MTU_CHECK_FAIL OSI_BIT(19)
#define MACSEC_RX_AES_GCM_BUF_OVF OSI_BIT(18)
#define MACSEC_RX_MAC_CRC_ERROR OSI_BIT(16)
#define MACSEC_RX_PN_EXHAUSTED OSI_BIT(1)
/** @} */
/**
* @addtogroup MACSEC_STATS_CONTROL_0 register
*
* @brief Bit definitions of MACSEC_STATS_CONTROL_0 register
* @{
*/
#define MACSEC_STATS_CONTROL0_CNT_RL_OVR_CPY OSI_BIT(1)
/** @} */
/**
* @addtogroup MACSEC_DEBUG_BUF_CONFIG_0 register
*
* @brief Bit definitions of MACSEC_DEBUG_BUF_CONFIG_0 register
* @{
*/
#define MACSEC_DEBUG_BUF_CONFIG_0_UPDATE OSI_BIT(31)
#define MACSEC_DEBUG_BUF_CONFIG_0_CTLR_SEL OSI_BIT(25)
#define MACSEC_DEBUG_BUF_CONFIG_0_RW OSI_BIT(24)
#define MACSEC_DEBUG_BUF_CONFIG_0_IDX_MASK (OSI_BIT(0) | OSI_BIT(1) | \
OSI_BIT(2) | OSI_BIT(3))
/** @} */
/**
* @addtogroup MACSEC_TX_DEBUG_TRIGGER_EN_0 register
*
* @brief Bit definitions of MACSEC_TX_DEBUG_TRIGGER_EN_0 register
* @{
*/
#define MACSEC_TX_DBG_CAPTURE OSI_BIT(10)
#define MACSEC_TX_DBG_ICV_CORRUPT OSI_BIT(9)
#define MACSEC_TX_DBG_CRC_CORRUPT OSI_BIT(8)
#define MACSEC_TX_DBG_KEY_NOT_VALID OSI_BIT(2)
#define MACSEC_TX_DBG_AN_NOT_VALID OSI_BIT(1)
#define MACSEC_TX_DBG_LKUP_MISS OSI_BIT(0)
/** @} */
/**
* @addtogroup MACSEC_RX_DEBUG_TRIGGER_EN_0 register
*
* @brief Bit definitions of MACSEC_RX_DEBUG_TRIGGER_EN_0 register
* @{
*/
#define MACSEC_RX_DBG_CAPTURE OSI_BIT(10)
#define MACSEC_RX_DBG_ICV_ERROR OSI_BIT(9)
#define MACSEC_RX_DBG_CRC_CORRUPT OSI_BIT(8)
#define MACSEC_RX_DBG_REPLAY_ERR OSI_BIT(3)
#define MACSEC_RX_DBG_KEY_NOT_VALID OSI_BIT(2)
#define MACSEC_RX_DBG_LKUP_MISS OSI_BIT(0)
/** @} */
/**
* @addtogroup MACSEC_TX_DEBUG_CONTROL_0 register
*
* @brief Bit definitions of MACSEC_TX_DEBUG_CONTROL_0 register
* @{
*/
#define MACSEC_TX_DEBUG_CONTROL_0_START_CAP OSI_BIT(31)
/** @} */
/**
* @addtogroup MACSEC_RX_DEBUG_CONTROL_0 register
*
* @brief Bit definitions of MACSEC_RX_DEBUG_CONTROL_0 register
* @{
*/
#define MACSEC_RX_DEBUG_CONTROL_0_START_CAP OSI_BIT(31)
/** @} */
#define MTU_LENGTH_MASK 0xFFFFU
#define SOT_LENGTH_MASK 0xFFU
#define EQOS_MACSEC_SOT_DELAY 0x4EU
/**
* @addtogroup TX/RX_BYP/SCI_LUT_VALID register
*
* @brief Bit definitions of LUT_VALID registers
* @{
*/
/** @} */
/**
* @addtogroup TX/RX LUT bit fields in LUT_DATA registers
*
* @brief Helper macros for LUT data programming
* @{
*/
#define MACSEC_LUT_DATA_REG_CNT 7U
/* Bit Offsets for LUT DATA[x] registers for various lookup field masks */
/* DA mask bits in LUT_DATA[1] register */
#define MACSEC_LUT_DA_BYTE0_INACTIVE OSI_BIT(16)
#define MACSEC_LUT_DA_BYTE1_INACTIVE OSI_BIT(17)
#define MACSEC_LUT_DA_BYTE2_INACTIVE OSI_BIT(18)
#define MACSEC_LUT_DA_BYTE3_INACTIVE OSI_BIT(19)
#define MACSEC_LUT_DA_BYTE4_INACTIVE OSI_BIT(20)
#define MACSEC_LUT_DA_BYTE5_INACTIVE OSI_BIT(21)
/* SA mask bits in LUT_DATA[3] register */
#define MACSEC_LUT_SA_BYTE0_INACTIVE OSI_BIT(6)
#define MACSEC_LUT_SA_BYTE1_INACTIVE OSI_BIT(7)
#define MACSEC_LUT_SA_BYTE2_INACTIVE OSI_BIT(8)
#define MACSEC_LUT_SA_BYTE3_INACTIVE OSI_BIT(9)
#define MACSEC_LUT_SA_BYTE4_INACTIVE OSI_BIT(10)
#define MACSEC_LUT_SA_BYTE5_INACTIVE OSI_BIT(11)
/* Ether type mask in LUT_DATA[3] register */
#define MACSEC_LUT_ETHTYPE_INACTIVE OSI_BIT(28)
/* VLAN PCP mask in LUT_DATA[4] register */
#define MACSEC_LUT_VLAN_PCP_INACTIVE OSI_BIT(0)
/* VLAN ID mask in LUT_DATA[4] register */
#define MACSEC_LUT_VLAN_ID_INACTIVE OSI_BIT(13)
/* VLAN mask in LUT_DATA[4] register */
#define MACSEC_LUT_VLAN_ACTIVE OSI_BIT(14)
/* Byte pattern masks in LUT_DATA[4] register */
#define MACSEC_LUT_BYTE0_PATTERN_INACTIVE OSI_BIT(29)
/* Byte pattern masks in LUT_DATA[5] register */
#define MACSEC_LUT_BYTE1_PATTERN_INACTIVE OSI_BIT(12)
#define MACSEC_LUT_BYTE2_PATTERN_INACTIVE OSI_BIT(27)
/* Byte pattern masks in LUT_DATA[6] register */
#define MACSEC_LUT_BYTE3_PATTERN_INACTIVE OSI_BIT(10)
/* Preemptable packet in LUT_DATA[6] register */
#define MACSEC_LUT_PREEMPT OSI_BIT(11)
/* Preempt mask in LUT_DATA[6] register */
#define MACSEC_LUT_PREEMPT_INACTIVE OSI_BIT(12)
/* Controlled port mask in LUT_DATA[6] register */
#define MACSEC_LUT_CONTROLLED_PORT OSI_BIT(13)
/* DVLAN packet in LUT_DATA[6] register */
#define MACSEC_BYP_LUT_DVLAN_PKT OSI_BIT(14)
/* DVLAN outer/inner tag select in LUT_DATA[6] register */
#define BYP_LUT_DVLAN_OUTER_INNER_TAG_SEL OSI_BIT(15)
/* AN valid bits for SCI LUT in LUT_DATA[6] register */
#define MACSEC_LUT_AN0_VALID OSI_BIT(13)
#define MACSEC_LUT_AN1_VALID OSI_BIT(14)
#define MACSEC_LUT_AN2_VALID OSI_BIT(15)
#define MACSEC_LUT_AN3_VALID OSI_BIT(16)
/* DVLAN packet in LUT_DATA[6] register */
#define MACSEC_TX_SCI_LUT_DVLAN_PKT OSI_BIT(21)
/* DVLAN outer/inner tag select in LUT_DATA[6] register */
#define MACSEC_TX_SCI_LUT_DVLAN_OUTER_INNER_TAG_SEL OSI_BIT(22)
/* SA State LUT entry valid in LUT_DATA[0] register */
#define MACSEC_SA_STATE_LUT_ENTRY_VALID OSI_BIT(0)
/* Preemptable packet in LUT_DATA[2] register for Rx SCI */
#define MACSEC_RX_SCI_LUT_PREEMPT OSI_BIT(8)
/* Preempt mask in LUT_DATA[2] register for Rx SCI */
#define MACSEC_RX_SCI_LUT_PREEMPT_INACTIVE OSI_BIT(9)
/** @} */
/* debug buffer data read/write length */
#define DBG_BUF_LEN 4U
#define INTEGER_LEN 4U
#endif /* INCLUDED_MACSEC_H */

View File

File diff suppressed because it is too large Load Diff

View File

File diff suppressed because it is too large Load Diff

View File

@@ -1,559 +0,0 @@
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "../osi/common/common.h"
#include <osi_common.h>
#include <osi_core.h>
#include "mgbe_mmc.h"
#include "mgbe_core.h"
/**
* @brief update_mmc_val - function to read register and return value to callee
*
* Algorithm: Read the registers, check for boundary, if more, reset
* counters else return same to caller.
*
* @param[in] osi_core: OSI core private data structure.
* @param[in] last_value: previous value of stats variable.
* @param[in] offset: HW register offset
*
* @note
* 1) MAC should be init and started. see osi_start_mac()
* 2) osi_core->osd should be populated
*
* @retval 0 on MMC counters overflow
* @retval value on current MMC counter value.
*/
static inline unsigned long update_mmc_val(struct osi_core_priv_data *osi_core,
unsigned long last_value,
unsigned long offset)
{
unsigned long temp;
unsigned int value = osi_readl((unsigned char *)osi_core->base +
offset);
temp = last_value + value;
if (temp < last_value) {
OSI_CORE_ERR(osi_core->osd,
OSI_LOG_ARG_OUTOFBOUND,
"Value overflow resetting all counters\n",
(unsigned long long)offset);
mgbe_reset_mmc(osi_core);
} else {
return temp;
}
return 0;
}
/**
* @brief mgbe_reset_mmc - To reset MMC registers and ether_mmc_counter
* structure variable
*
* @param[in] osi_core: OSI core private data structure.
*
* @note
* 1) MAC should be init and started. see osi_start_mac()
* 2) osi_core->osd should be populated
*/
void mgbe_reset_mmc(struct osi_core_priv_data *osi_core)
{
unsigned int value;
value = osi_readl((unsigned char *)osi_core->base + MGBE_MMC_CNTRL);
/* self-clear bit in one clock cycle */
value |= MGBE_MMC_CNTRL_CNTRST;
osi_writel(value, (unsigned char *)osi_core->base + MGBE_MMC_CNTRL);
osi_memset(&osi_core->mmc, 0U, sizeof(struct osi_mmc_counters));
}
/**
* @brief mgbe_read_mmc - To read MMC registers and ether_mmc_counter structure
* variable
*
* Algorithm: Pass register offset and old value to helper function and
* update structure.
*
* @param[in] osi_core: OSI core private data structure.
*
* @note
* 1) MAC should be init and started. see osi_start_mac()
* 2) osi_core->osd should be populated
*/
void mgbe_read_mmc(struct osi_core_priv_data *osi_core)
{
struct osi_mmc_counters *mmc = &osi_core->mmc;
mmc->mmc_tx_octetcount_gb =
update_mmc_val(osi_core, mmc->mmc_tx_octetcount_gb,
MMC_TXOCTETCOUNT_GB_L);
mmc->mmc_tx_octetcount_gb_h =
update_mmc_val(osi_core, mmc->mmc_tx_octetcount_gb_h,
MMC_TXOCTETCOUNT_GB_H);
mmc->mmc_tx_framecount_gb =
update_mmc_val(osi_core, mmc->mmc_tx_framecount_gb,
MMC_TXPACKETCOUNT_GB_L);
mmc->mmc_tx_framecount_gb_h =
update_mmc_val(osi_core, mmc->mmc_tx_framecount_gb_h,
MMC_TXPACKETCOUNT_GB_H);
mmc->mmc_tx_broadcastframe_g =
update_mmc_val(osi_core, mmc->mmc_tx_broadcastframe_g,
MMC_TXBROADCASTPACKETS_G_L);
mmc->mmc_tx_broadcastframe_g_h =
update_mmc_val(osi_core, mmc->mmc_tx_broadcastframe_g_h,
MMC_TXBROADCASTPACKETS_G_H);
mmc->mmc_tx_multicastframe_g =
update_mmc_val(osi_core, mmc->mmc_tx_multicastframe_g,
MMC_TXMULTICASTPACKETS_G_L);
mmc->mmc_tx_multicastframe_g_h =
update_mmc_val(osi_core, mmc->mmc_tx_multicastframe_g_h,
MMC_TXMULTICASTPACKETS_G_H);
mmc->mmc_tx_64_octets_gb =
update_mmc_val(osi_core, mmc->mmc_tx_64_octets_gb,
MMC_TX64OCTETS_GB_L);
mmc->mmc_tx_64_octets_gb_h =
update_mmc_val(osi_core, mmc->mmc_tx_64_octets_gb_h,
MMC_TX64OCTETS_GB_H);
mmc->mmc_tx_65_to_127_octets_gb =
update_mmc_val(osi_core, mmc->mmc_tx_65_to_127_octets_gb,
MMC_TX65TO127OCTETS_GB_L);
mmc->mmc_tx_65_to_127_octets_gb_h =
update_mmc_val(osi_core, mmc->mmc_tx_65_to_127_octets_gb_h,
MMC_TX65TO127OCTETS_GB_H);
mmc->mmc_tx_128_to_255_octets_gb =
update_mmc_val(osi_core, mmc->mmc_tx_128_to_255_octets_gb,
MMC_TX128TO255OCTETS_GB_L);
mmc->mmc_tx_128_to_255_octets_gb_h =
update_mmc_val(osi_core, mmc->mmc_tx_128_to_255_octets_gb_h,
MMC_TX128TO255OCTETS_GB_H);
mmc->mmc_tx_256_to_511_octets_gb =
update_mmc_val(osi_core, mmc->mmc_tx_256_to_511_octets_gb,
MMC_TX256TO511OCTETS_GB_L);
mmc->mmc_tx_256_to_511_octets_gb_h =
update_mmc_val(osi_core, mmc->mmc_tx_256_to_511_octets_gb_h,
MMC_TX256TO511OCTETS_GB_H);
mmc->mmc_tx_512_to_1023_octets_gb =
update_mmc_val(osi_core, mmc->mmc_tx_512_to_1023_octets_gb,
MMC_TX512TO1023OCTETS_GB_L);
mmc->mmc_tx_512_to_1023_octets_gb_h =
update_mmc_val(osi_core, mmc->mmc_tx_512_to_1023_octets_gb_h,
MMC_TX512TO1023OCTETS_GB_H);
mmc->mmc_tx_1024_to_max_octets_gb =
update_mmc_val(osi_core, mmc->mmc_tx_1024_to_max_octets_gb,
MMC_TX1024TOMAXOCTETS_GB_L);
mmc->mmc_tx_1024_to_max_octets_gb_h =
update_mmc_val(osi_core, mmc->mmc_tx_1024_to_max_octets_gb_h,
MMC_TX1024TOMAXOCTETS_GB_H);
mmc->mmc_tx_unicast_gb =
update_mmc_val(osi_core, mmc->mmc_tx_unicast_gb,
MMC_TXUNICASTPACKETS_GB_L);
mmc->mmc_tx_unicast_gb_h =
update_mmc_val(osi_core, mmc->mmc_tx_unicast_gb_h,
MMC_TXUNICASTPACKETS_GB_H);
mmc->mmc_tx_multicast_gb =
update_mmc_val(osi_core, mmc->mmc_tx_multicast_gb,
MMC_TXMULTICASTPACKETS_GB_L);
mmc->mmc_tx_multicast_gb_h =
update_mmc_val(osi_core, mmc->mmc_tx_multicast_gb_h,
MMC_TXMULTICASTPACKETS_GB_H);
mmc->mmc_tx_broadcast_gb =
update_mmc_val(osi_core, mmc->mmc_tx_broadcast_gb,
MMC_TXBROADCASTPACKETS_GB_L);
mmc->mmc_tx_broadcast_gb_h =
update_mmc_val(osi_core, mmc->mmc_tx_broadcast_gb_h,
MMC_TXBROADCASTPACKETS_GB_H);
mmc->mmc_tx_underflow_error =
update_mmc_val(osi_core, mmc->mmc_tx_underflow_error,
MMC_TXUNDERFLOWERROR_L);
mmc->mmc_tx_underflow_error_h =
update_mmc_val(osi_core, mmc->mmc_tx_underflow_error_h,
MMC_TXUNDERFLOWERROR_H);
mmc->mmc_tx_singlecol_g =
update_mmc_val(osi_core, mmc->mmc_tx_singlecol_g,
MMC_TXSINGLECOL_G);
mmc->mmc_tx_multicol_g =
update_mmc_val(osi_core, mmc->mmc_tx_multicol_g,
MMC_TXMULTICOL_G);
mmc->mmc_tx_deferred =
update_mmc_val(osi_core, mmc->mmc_tx_deferred,
MMC_TXDEFERRED);
mmc->mmc_tx_latecol =
update_mmc_val(osi_core, mmc->mmc_tx_latecol,
MMC_TXLATECOL);
mmc->mmc_tx_exesscol =
update_mmc_val(osi_core, mmc->mmc_tx_exesscol,
MMC_TXEXESSCOL);
mmc->mmc_tx_carrier_error =
update_mmc_val(osi_core, mmc->mmc_tx_carrier_error,
MMC_TXCARRIERERROR);
mmc->mmc_tx_octetcount_g =
update_mmc_val(osi_core, mmc->mmc_tx_octetcount_g,
MMC_TXOCTETCOUNT_G_L);
mmc->mmc_tx_octetcount_g_h =
update_mmc_val(osi_core, mmc->mmc_tx_octetcount_g_h,
MMC_TXOCTETCOUNT_G_H);
mmc->mmc_tx_framecount_g =
update_mmc_val(osi_core, mmc->mmc_tx_framecount_g,
MMC_TXPACKETSCOUNT_G_L);
mmc->mmc_tx_framecount_g_h =
update_mmc_val(osi_core, mmc->mmc_tx_framecount_g_h,
MMC_TXPACKETSCOUNT_G_H);
mmc->mmc_tx_excessdef =
update_mmc_val(osi_core, mmc->mmc_tx_excessdef,
MMC_TXEXECESS_DEFERRED);
mmc->mmc_tx_pause_frame =
update_mmc_val(osi_core, mmc->mmc_tx_pause_frame,
MMC_TXPAUSEPACKETS_L);
mmc->mmc_tx_pause_frame_h =
update_mmc_val(osi_core, mmc->mmc_tx_pause_frame_h,
MMC_TXPAUSEPACKETS_H);
mmc->mmc_tx_vlan_frame_g =
update_mmc_val(osi_core, mmc->mmc_tx_vlan_frame_g,
MMC_TXVLANPACKETS_G_L);
mmc->mmc_tx_vlan_frame_g_h =
update_mmc_val(osi_core, mmc->mmc_tx_vlan_frame_g_h,
MMC_TXVLANPACKETS_G_H);
mmc->mmc_rx_framecount_gb =
update_mmc_val(osi_core, mmc->mmc_rx_framecount_gb,
MMC_RXPACKETCOUNT_GB_L);
mmc->mmc_rx_framecount_gb_h =
update_mmc_val(osi_core, mmc->mmc_rx_framecount_gb_h,
MMC_RXPACKETCOUNT_GB_H);
mmc->mmc_rx_octetcount_gb =
update_mmc_val(osi_core, mmc->mmc_rx_octetcount_gb,
MMC_RXOCTETCOUNT_GB_L);
mmc->mmc_rx_octetcount_gb_h =
update_mmc_val(osi_core, mmc->mmc_rx_octetcount_gb_h,
MMC_RXOCTETCOUNT_GB_H);
mmc->mmc_rx_octetcount_g =
update_mmc_val(osi_core, mmc->mmc_rx_octetcount_g,
MMC_RXOCTETCOUNT_G_L);
mmc->mmc_rx_octetcount_g_h =
update_mmc_val(osi_core, mmc->mmc_rx_octetcount_g_h,
MMC_RXOCTETCOUNT_G_H);
mmc->mmc_rx_broadcastframe_g =
update_mmc_val(osi_core, mmc->mmc_rx_broadcastframe_g,
MMC_RXBROADCASTPACKETS_G_L);
mmc->mmc_rx_broadcastframe_g_h =
update_mmc_val(osi_core, mmc->mmc_rx_broadcastframe_g_h,
MMC_RXBROADCASTPACKETS_G_H);
mmc->mmc_rx_multicastframe_g =
update_mmc_val(osi_core, mmc->mmc_rx_multicastframe_g,
MMC_RXMULTICASTPACKETS_G_L);
mmc->mmc_rx_multicastframe_g_h =
update_mmc_val(osi_core, mmc->mmc_rx_multicastframe_g_h,
MMC_RXMULTICASTPACKETS_G_H);
mmc->mmc_rx_crc_error =
update_mmc_val(osi_core, mmc->mmc_rx_crc_error,
MMC_RXCRCERROR_L);
mmc->mmc_rx_crc_error_h =
update_mmc_val(osi_core, mmc->mmc_rx_crc_error_h,
MMC_RXCRCERROR_H);
mmc->mmc_rx_align_error =
update_mmc_val(osi_core, mmc->mmc_rx_align_error,
MMC_RXALIGNMENTERROR);
mmc->mmc_rx_runt_error =
update_mmc_val(osi_core, mmc->mmc_rx_runt_error,
MMC_RXRUNTERROR);
mmc->mmc_rx_jabber_error =
update_mmc_val(osi_core, mmc->mmc_rx_jabber_error,
MMC_RXJABBERERROR);
mmc->mmc_rx_undersize_g =
update_mmc_val(osi_core, mmc->mmc_rx_undersize_g,
MMC_RXUNDERSIZE_G);
mmc->mmc_rx_oversize_g =
update_mmc_val(osi_core, mmc->mmc_rx_oversize_g,
MMC_RXOVERSIZE_G);
mmc->mmc_rx_64_octets_gb =
update_mmc_val(osi_core, mmc->mmc_rx_64_octets_gb,
MMC_RX64OCTETS_GB_L);
mmc->mmc_rx_64_octets_gb_h =
update_mmc_val(osi_core, mmc->mmc_rx_64_octets_gb_h,
MMC_RX64OCTETS_GB_H);
mmc->mmc_rx_65_to_127_octets_gb =
update_mmc_val(osi_core, mmc->mmc_rx_65_to_127_octets_gb,
MMC_RX65TO127OCTETS_GB_L);
mmc->mmc_rx_65_to_127_octets_gb_h =
update_mmc_val(osi_core, mmc->mmc_rx_65_to_127_octets_gb_h,
MMC_RX65TO127OCTETS_GB_H);
mmc->mmc_rx_128_to_255_octets_gb =
update_mmc_val(osi_core, mmc->mmc_rx_128_to_255_octets_gb,
MMC_RX128TO255OCTETS_GB_L);
mmc->mmc_rx_128_to_255_octets_gb_h =
update_mmc_val(osi_core, mmc->mmc_rx_128_to_255_octets_gb_h,
MMC_RX128TO255OCTETS_GB_H);
mmc->mmc_rx_256_to_511_octets_gb =
update_mmc_val(osi_core, mmc->mmc_rx_256_to_511_octets_gb,
MMC_RX256TO511OCTETS_GB_L);
mmc->mmc_rx_256_to_511_octets_gb_h =
update_mmc_val(osi_core, mmc->mmc_rx_256_to_511_octets_gb_h,
MMC_RX256TO511OCTETS_GB_H);
mmc->mmc_rx_512_to_1023_octets_gb =
update_mmc_val(osi_core, mmc->mmc_rx_512_to_1023_octets_gb,
MMC_RX512TO1023OCTETS_GB_L);
mmc->mmc_rx_512_to_1023_octets_gb_h =
update_mmc_val(osi_core, mmc->mmc_rx_512_to_1023_octets_gb_h,
MMC_RX512TO1023OCTETS_GB_H);
mmc->mmc_rx_1024_to_max_octets_gb =
update_mmc_val(osi_core, mmc->mmc_rx_1024_to_max_octets_gb,
MMC_RX1024TOMAXOCTETS_GB_L);
mmc->mmc_rx_1024_to_max_octets_gb_h =
update_mmc_val(osi_core, mmc->mmc_rx_1024_to_max_octets_gb_h,
MMC_RX1024TOMAXOCTETS_GB_H);
mmc->mmc_rx_unicast_g =
update_mmc_val(osi_core, mmc->mmc_rx_unicast_g,
MMC_RXUNICASTPACKETS_G_L);
mmc->mmc_rx_unicast_g_h =
update_mmc_val(osi_core, mmc->mmc_rx_unicast_g_h,
MMC_RXUNICASTPACKETS_G_H);
mmc->mmc_rx_length_error =
update_mmc_val(osi_core, mmc->mmc_rx_length_error,
MMC_RXLENGTHERROR_L);
mmc->mmc_rx_length_error_h =
update_mmc_val(osi_core, mmc->mmc_rx_length_error_h,
MMC_RXLENGTHERROR_H);
mmc->mmc_rx_outofrangetype =
update_mmc_val(osi_core, mmc->mmc_rx_outofrangetype,
MMC_RXOUTOFRANGETYPE_L);
mmc->mmc_rx_outofrangetype_h =
update_mmc_val(osi_core, mmc->mmc_rx_outofrangetype_h,
MMC_RXOUTOFRANGETYPE_H);
mmc->mmc_rx_pause_frames =
update_mmc_val(osi_core, mmc->mmc_rx_pause_frames,
MMC_RXPAUSEPACKETS_L);
mmc->mmc_rx_pause_frames_h =
update_mmc_val(osi_core, mmc->mmc_rx_pause_frames_h,
MMC_RXPAUSEPACKETS_H);
mmc->mmc_rx_fifo_overflow =
update_mmc_val(osi_core, mmc->mmc_rx_fifo_overflow,
MMC_RXFIFOOVERFLOW_L);
mmc->mmc_rx_fifo_overflow_h =
update_mmc_val(osi_core, mmc->mmc_rx_fifo_overflow_h,
MMC_RXFIFOOVERFLOW_H);
mmc->mmc_rx_vlan_frames_gb =
update_mmc_val(osi_core, mmc->mmc_rx_vlan_frames_gb,
MMC_RXVLANPACKETS_GB_L);
mmc->mmc_rx_vlan_frames_gb_h =
update_mmc_val(osi_core, mmc->mmc_rx_vlan_frames_gb_h,
MMC_RXVLANPACKETS_GB_H);
mmc->mmc_rx_watchdog_error =
update_mmc_val(osi_core, mmc->mmc_rx_watchdog_error,
MMC_RXWATCHDOGERROR);
mmc->mmc_tx_lpi_usec_cntr =
update_mmc_val(osi_core, mmc->mmc_tx_lpi_usec_cntr,
MMC_TXLPIUSECCNTR);
mmc->mmc_tx_lpi_tran_cntr =
update_mmc_val(osi_core, mmc->mmc_tx_lpi_tran_cntr,
MMC_TXLPITRANCNTR);
mmc->mmc_rx_lpi_usec_cntr =
update_mmc_val(osi_core, mmc->mmc_rx_lpi_usec_cntr,
MMC_RXLPIUSECCNTR);
mmc->mmc_rx_lpi_tran_cntr =
update_mmc_val(osi_core, mmc->mmc_rx_lpi_tran_cntr,
MMC_RXLPITRANCNTR);
mmc->mmc_rx_ipv4_gd =
update_mmc_val(osi_core, mmc->mmc_rx_ipv4_gd,
MMC_RXIPV4_GD_PKTS_L);
mmc->mmc_rx_ipv4_gd_h =
update_mmc_val(osi_core, mmc->mmc_rx_ipv4_gd_h,
MMC_RXIPV4_GD_PKTS_H);
mmc->mmc_rx_ipv4_hderr =
update_mmc_val(osi_core, mmc->mmc_rx_ipv4_hderr,
MMC_RXIPV4_HDRERR_PKTS_L);
mmc->mmc_rx_ipv4_hderr_h =
update_mmc_val(osi_core, mmc->mmc_rx_ipv4_hderr_h,
MMC_RXIPV4_HDRERR_PKTS_H);
mmc->mmc_rx_ipv4_nopay =
update_mmc_val(osi_core, mmc->mmc_rx_ipv4_nopay,
MMC_RXIPV4_NOPAY_PKTS_L);
mmc->mmc_rx_ipv4_nopay_h =
update_mmc_val(osi_core, mmc->mmc_rx_ipv4_nopay_h,
MMC_RXIPV4_NOPAY_PKTS_H);
mmc->mmc_rx_ipv4_frag =
update_mmc_val(osi_core, mmc->mmc_rx_ipv4_frag,
MMC_RXIPV4_FRAG_PKTS_L);
mmc->mmc_rx_ipv4_frag_h =
update_mmc_val(osi_core, mmc->mmc_rx_ipv4_frag_h,
MMC_RXIPV4_FRAG_PKTS_H);
mmc->mmc_rx_ipv4_udsbl =
update_mmc_val(osi_core, mmc->mmc_rx_ipv4_udsbl,
MMC_RXIPV4_UBSBL_PKTS_L);
mmc->mmc_rx_ipv4_udsbl_h =
update_mmc_val(osi_core, mmc->mmc_rx_ipv4_udsbl_h,
MMC_RXIPV4_UBSBL_PKTS_H);
mmc->mmc_rx_ipv6_gd =
update_mmc_val(osi_core, mmc->mmc_rx_ipv6_gd,
MMC_RXIPV6_GD_PKTS_L);
mmc->mmc_rx_ipv6_gd_h =
update_mmc_val(osi_core, mmc->mmc_rx_ipv6_gd_h,
MMC_RXIPV6_GD_PKTS_H);
mmc->mmc_rx_ipv6_hderr =
update_mmc_val(osi_core, mmc->mmc_rx_ipv6_hderr,
MMC_RXIPV6_HDRERR_PKTS_L);
mmc->mmc_rx_ipv6_hderr_h =
update_mmc_val(osi_core, mmc->mmc_rx_ipv6_hderr_h,
MMC_RXIPV6_HDRERR_PKTS_H);
mmc->mmc_rx_ipv6_nopay =
update_mmc_val(osi_core, mmc->mmc_rx_ipv6_nopay,
MMC_RXIPV6_NOPAY_PKTS_L);
mmc->mmc_rx_ipv6_nopay_h =
update_mmc_val(osi_core, mmc->mmc_rx_ipv6_nopay_h,
MMC_RXIPV6_NOPAY_PKTS_H);
mmc->mmc_rx_udp_gd =
update_mmc_val(osi_core, mmc->mmc_rx_udp_gd,
MMC_RXUDP_GD_PKTS_L);
mmc->mmc_rx_udp_gd_h =
update_mmc_val(osi_core, mmc->mmc_rx_udp_gd_h,
MMC_RXUDP_GD_PKTS_H);
mmc->mmc_rx_udp_err =
update_mmc_val(osi_core, mmc->mmc_rx_udp_err,
MMC_RXUDP_ERR_PKTS_L);
mmc->mmc_rx_udp_err_h =
update_mmc_val(osi_core, mmc->mmc_rx_udp_err_h,
MMC_RXUDP_ERR_PKTS_H);
mmc->mmc_rx_tcp_gd =
update_mmc_val(osi_core, mmc->mmc_rx_tcp_gd,
MMC_RXTCP_GD_PKTS_L);
mmc->mmc_rx_tcp_gd_h =
update_mmc_val(osi_core, mmc->mmc_rx_tcp_gd_h,
MMC_RXTCP_GD_PKTS_H);
mmc->mmc_rx_tcp_err =
update_mmc_val(osi_core, mmc->mmc_rx_tcp_err,
MMC_RXTCP_ERR_PKTS_L);
mmc->mmc_rx_tcp_err_h =
update_mmc_val(osi_core, mmc->mmc_rx_tcp_err_h,
MMC_RXTCP_ERR_PKTS_H);
mmc->mmc_rx_icmp_gd =
update_mmc_val(osi_core, mmc->mmc_rx_icmp_gd,
MMC_RXICMP_GD_PKTS_L);
mmc->mmc_rx_icmp_gd_h =
update_mmc_val(osi_core, mmc->mmc_rx_icmp_gd_h,
MMC_RXICMP_GD_PKTS_H);
mmc->mmc_rx_icmp_err =
update_mmc_val(osi_core, mmc->mmc_rx_icmp_err,
MMC_RXICMP_ERR_PKTS_L);
mmc->mmc_rx_icmp_err_h =
update_mmc_val(osi_core, mmc->mmc_rx_icmp_err_h,
MMC_RXICMP_ERR_PKTS_H);
mmc->mmc_rx_ipv4_gd_octets =
update_mmc_val(osi_core, mmc->mmc_rx_ipv4_gd_octets,
MMC_RXIPV4_GD_OCTETS_L);
mmc->mmc_rx_ipv4_gd_octets_h =
update_mmc_val(osi_core, mmc->mmc_rx_ipv4_gd_octets_h,
MMC_RXIPV4_GD_OCTETS_H);
mmc->mmc_rx_ipv4_hderr_octets =
update_mmc_val(osi_core, mmc->mmc_rx_ipv4_hderr_octets,
MMC_RXIPV4_HDRERR_OCTETS_L);
mmc->mmc_rx_ipv4_hderr_octets_h =
update_mmc_val(osi_core, mmc->mmc_rx_ipv4_hderr_octets_h,
MMC_RXIPV4_HDRERR_OCTETS_H);
mmc->mmc_rx_ipv4_nopay_octets =
update_mmc_val(osi_core, mmc->mmc_rx_ipv4_nopay_octets,
MMC_RXIPV4_NOPAY_OCTETS_L);
mmc->mmc_rx_ipv4_nopay_octets_h =
update_mmc_val(osi_core, mmc->mmc_rx_ipv4_nopay_octets_h,
MMC_RXIPV4_NOPAY_OCTETS_H);
mmc->mmc_rx_ipv4_frag_octets =
update_mmc_val(osi_core, mmc->mmc_rx_ipv4_frag_octets,
MMC_RXIPV4_FRAG_OCTETS_L);
mmc->mmc_rx_ipv4_frag_octets_h =
update_mmc_val(osi_core, mmc->mmc_rx_ipv4_frag_octets_h,
MMC_RXIPV4_FRAG_OCTETS_H);
mmc->mmc_rx_ipv4_udsbl_octets =
update_mmc_val(osi_core, mmc->mmc_rx_ipv4_udsbl_octets,
MMC_RXIPV4_UDP_CHKSM_DIS_OCT_L);
mmc->mmc_rx_ipv4_udsbl_octets_h =
update_mmc_val(osi_core, mmc->mmc_rx_ipv4_udsbl_octets_h,
MMC_RXIPV4_UDP_CHKSM_DIS_OCT_H);
mmc->mmc_rx_udp_gd_octets =
update_mmc_val(osi_core, mmc->mmc_rx_udp_gd_octets,
MMC_RXUDP_GD_OCTETS_L);
mmc->mmc_rx_udp_gd_octets_h =
update_mmc_val(osi_core, mmc->mmc_rx_udp_gd_octets_h,
MMC_RXUDP_GD_OCTETS_H);
mmc->mmc_rx_ipv6_gd_octets =
update_mmc_val(osi_core, mmc->mmc_rx_ipv6_gd_octets,
MMC_RXIPV6_GD_OCTETS_L);
mmc->mmc_rx_ipv6_gd_octets_h =
update_mmc_val(osi_core, mmc->mmc_rx_ipv6_gd_octets_h,
MMC_RXIPV6_GD_OCTETS_H);
mmc->mmc_rx_ipv6_hderr_octets =
update_mmc_val(osi_core, mmc->mmc_rx_ipv6_hderr_octets,
MMC_RXIPV6_HDRERR_OCTETS_L);
mmc->mmc_rx_ipv6_hderr_octets_h =
update_mmc_val(osi_core, mmc->mmc_rx_ipv6_hderr_octets_h,
MMC_RXIPV6_HDRERR_OCTETS_H);
mmc->mmc_rx_ipv6_nopay_octets =
update_mmc_val(osi_core, mmc->mmc_rx_ipv6_nopay_octets,
MMC_RXIPV6_NOPAY_OCTETS_L);
mmc->mmc_rx_ipv6_nopay_octets_h =
update_mmc_val(osi_core, mmc->mmc_rx_ipv6_nopay_octets_h,
MMC_RXIPV6_NOPAY_OCTETS_H);
mmc->mmc_rx_udp_err_octets =
update_mmc_val(osi_core, mmc->mmc_rx_udp_err_octets,
MMC_RXUDP_ERR_OCTETS_L);
mmc->mmc_rx_udp_err_octets_h =
update_mmc_val(osi_core, mmc->mmc_rx_udp_err_octets_h,
MMC_RXUDP_ERR_OCTETS_H);
mmc->mmc_rx_tcp_gd_octets =
update_mmc_val(osi_core, mmc->mmc_rx_tcp_gd_octets,
MMC_RXTCP_GD_OCTETS_L);
mmc->mmc_rx_tcp_gd_octets_h =
update_mmc_val(osi_core, mmc->mmc_rx_tcp_gd_octets_h,
MMC_RXTCP_GD_OCTETS_H);
mmc->mmc_rx_tcp_err_octets =
update_mmc_val(osi_core, mmc->mmc_rx_tcp_err_octets,
MMC_RXTCP_ERR_OCTETS_L);
mmc->mmc_rx_tcp_err_octets_h =
update_mmc_val(osi_core, mmc->mmc_rx_tcp_err_octets_h,
MMC_RXTCP_ERR_OCTETS_H);
mmc->mmc_rx_icmp_gd_octets =
update_mmc_val(osi_core, mmc->mmc_rx_icmp_gd_octets,
MMC_RXICMP_GD_OCTETS_L);
mmc->mmc_rx_icmp_gd_octets_h =
update_mmc_val(osi_core, mmc->mmc_rx_icmp_gd_octets_h,
MMC_RXICMP_GD_OCTETS_H);
mmc->mmc_rx_icmp_err_octets =
update_mmc_val(osi_core, mmc->mmc_rx_icmp_err_octets,
MMC_RXICMP_ERR_OCTETS_L);
mmc->mmc_rx_icmp_err_octets_h =
update_mmc_val(osi_core, mmc->mmc_rx_icmp_err_octets_h,
MMC_RXICMP_ERR_OCTETS_H);
mmc->mmc_tx_fpe_frag_cnt =
update_mmc_val(osi_core, mmc->mmc_tx_fpe_frag_cnt,
MMC_TX_FPE_FRAG_COUNTER);
mmc->mmc_tx_fpe_hold_req_cnt =
update_mmc_val(osi_core, mmc->mmc_tx_fpe_hold_req_cnt,
MMC_TX_HOLD_REQ_COUNTER);
mmc->mmc_rx_packet_reass_err_cnt =
update_mmc_val(osi_core, mmc->mmc_rx_packet_reass_err_cnt,
MMC_RX_PKT_ASSEMBLY_ERR_CNTR);
mmc->mmc_rx_packet_smd_err_cnt =
update_mmc_val(osi_core, mmc->mmc_rx_packet_smd_err_cnt,
MMC_RX_PKT_SMD_ERR_CNTR);
mmc->mmc_rx_packet_asm_ok_cnt =
update_mmc_val(osi_core, mmc->mmc_rx_packet_asm_ok_cnt,
MMC_RX_PKT_ASSEMBLY_OK_CNTR);
mmc->mmc_rx_fpe_fragment_cnt =
update_mmc_val(osi_core, mmc->mmc_rx_fpe_fragment_cnt,
MMC_RX_FPE_FRAG_CNTR);
}

View File

@@ -1,236 +0,0 @@
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef MGBE_MMC_H_
#define MGBE_MMC_H_
/**
* @addtogroup MGBE-MMC MMC HW register offsets
*
* @brief MMC HW register offsets
* @{
*/
#define MMC_TXOCTETCOUNT_GB_L 0x00814
#define MMC_TXOCTETCOUNT_GB_H 0x00818
#define MMC_TXPACKETCOUNT_GB_L 0x0081C
#define MMC_TXPACKETCOUNT_GB_H 0x00820
#define MMC_TXBROADCASTPACKETS_G_L 0x00824
#define MMC_TXBROADCASTPACKETS_G_H 0x00828
#define MMC_TXMULTICASTPACKETS_G_L 0x0082C
#define MMC_TXMULTICASTPACKETS_G_H 0x00830
#define MMC_TX64OCTETS_GB_L 0x00834
#define MMC_TX64OCTETS_GB_H 0x00838
#define MMC_TX65TO127OCTETS_GB_L 0x0083C
#define MMC_TX65TO127OCTETS_GB_H 0x00840
#define MMC_TX128TO255OCTETS_GB_L 0x00844
#define MMC_TX128TO255OCTETS_GB_H 0x00848
#define MMC_TX256TO511OCTETS_GB_L 0x0084C
#define MMC_TX256TO511OCTETS_GB_H 0x00850
#define MMC_TX512TO1023OCTETS_GB_L 0x00854
#define MMC_TX512TO1023OCTETS_GB_H 0x00858
#define MMC_TX1024TOMAXOCTETS_GB_L 0x0085C
#define MMC_TX1024TOMAXOCTETS_GB_H 0x00860
#define MMC_TXUNICASTPACKETS_GB_L 0x00864
#define MMC_TXUNICASTPACKETS_GB_H 0x00868
#define MMC_TXMULTICASTPACKETS_GB_L 0x0086C
#define MMC_TXMULTICASTPACKETS_GB_H 0x00870
#define MMC_TXBROADCASTPACKETS_GB_L 0x00874
#define MMC_TXBROADCASTPACKETS_GB_H 0x00878
#define MMC_TXUNDERFLOWERROR_L 0x0087C
#define MMC_TXUNDERFLOWERROR_H 0x00880
#define MMC_TXOCTETCOUNT_G_L 0x00884
#define MMC_TXOCTETCOUNT_G_H 0x00888
#define MMC_TXPACKETSCOUNT_G_L 0x0088C
#define MMC_TXPACKETSCOUNT_G_H 0x00890
#define MMC_TXPAUSEPACKETS_L 0x00894
#define MMC_TXPAUSEPACKETS_H 0x00898
#define MMC_TXVLANPACKETS_G_L 0x0089C
#define MMC_TXVLANPACKETS_G_H 0x008A0
#define MMC_TXLPIUSECCNTR 0x008A4
#define MMC_TXLPITRANCNTR 0x008A8
#define MMC_PRIO_INT_STATUS 0x008CC
#define MMC_TX_PER_PRIO_STATUS 0x008D0
#define MMC_TX_PER_PRIO_PKT_GB 0x008D4
#define MMC_TX_PER_PRIO_PFC_PKT_GB 0x008D8
#define MMC_TX_PER_PRIO_GPFC_PKT_GB 0x008DC
#define MMC_TX_PER_PRIO_OCTET_GB_L 0x008E0
#define MMC_TX_PER_PRIO_OCTET_GB_H 0x008E4
#define MMC_RXPACKETCOUNT_GB_L 0x00900
#define MMC_RXPACKETCOUNT_GB_H 0x00904
#define MMC_RXOCTETCOUNT_GB_L 0x00908
#define MMC_RXOCTETCOUNT_GB_H 0x0090C
#define MMC_RXOCTETCOUNT_G_L 0x00910
#define MMC_RXOCTETCOUNT_G_H 0x00914
#define MMC_RXBROADCASTPACKETS_G_L 0x00918
#define MMC_RXBROADCASTPACKETS_G_H 0x0091C
#define MMC_RXMULTICASTPACKETS_G_L 0x00920
#define MMC_RXMULTICASTPACKETS_G_H 0x00924
#define MMC_RXCRCERROR_L 0x00928
#define MMC_RXCRCERROR_H 0x0092C
#define MMC_RXRUNTERROR 0x00930
#define MMC_RXJABBERERROR 0x00934
#define MMC_RXUNDERSIZE_G 0x00938
#define MMC_RXOVERSIZE_G 0x0093C
#define MMC_RX64OCTETS_GB_L 0x00940
#define MMC_RX64OCTETS_GB_H 0x00944
#define MMC_RX65TO127OCTETS_GB_L 0x00948
#define MMC_RX65TO127OCTETS_GB_H 0x0094C
#define MMC_RX128TO255OCTETS_GB_L 0x00950
#define MMC_RX128TO255OCTETS_GB_H 0x00954
#define MMC_RX256TO511OCTETS_GB_L 0x00958
#define MMC_RX256TO511OCTETS_GB_H 0x0095C
#define MMC_RX512TO1023OCTETS_GB_L 0x00960
#define MMC_RX512TO1023OCTETS_GB_H 0x00964
#define MMC_RX1024TOMAXOCTETS_GB_L 0x00968
#define MMC_RX1024TOMAXOCTETS_GB_H 0x0096C
#define MMC_RXUNICASTPACKETS_G_L 0x00970
#define MMC_RXUNICASTPACKETS_G_H 0x00974
#define MMC_RXLENGTHERROR_L 0x00978
#define MMC_RXLENGTHERROR_H 0x0097C
#define MMC_RXOUTOFRANGETYPE_L 0x00980
#define MMC_RXOUTOFRANGETYPE_H 0x00984
#define MMC_RXPAUSEPACKETS_L 0x00988
#define MMC_RXPAUSEPACKETS_H 0x0098C
#define MMC_RXFIFOOVERFLOW_L 0x00990
#define MMC_RXFIFOOVERFLOW_H 0x00994
#define MMC_RXVLANPACKETS_GB_L 0x00998
#define MMC_RXVLANPACKETS_GB_H 0x0099C
#define MMC_RXWATCHDOGERROR 0x009A0
#define MMC_RXLPIUSECCNTR 0x009A4
#define MMC_RXLPITRANCNTR 0x009A8
#define MMC_RX_DISCARD_PKTS_GB_L 0x009AC
#define MMC_RX_DISCARD_PKTS_GB_H 0x009B0
#define MMC_RX_DISCARD_OCTET_GB_L 0x009B4
#define MMC_RX_DISCARD_OCTET_GB_H 0x009B8
#define MMC_RXALIGNMENTERROR 0x009BC
#define MMC_RX_PER_PRIO_STATUS 0x009D0
#define MMC_RX_PER_PRIO_PKT_GB 0x009D4
#define MMC_RX_PER_PRIO_PKT_B 0x009D8
#define MMC_RX_PER_PRIO_PFC_PKT_GB 0x009DC
#define MMC_RX_PER_PRIO_OCTET_GB_L 0x009E0
#define MMC_RX_PER_PRIO_OCTET_GB_H 0x009E4
#define MMC_RX_PER_PRIO_DISCARD_GB 0x009E8
#define MMC_FPE_TX_INT 0x00A00
#define MMC_FPE_TX_INT_MASK 0x00A04
#define MMC_TX_FPE_FRAG_COUNTER 0x00A08
#define MMC_TX_HOLD_REQ_COUNTER 0x00A0C
#define MMC_FPE_RX_INT 0x00A20
#define MMC_FPE_RX_INT_MASK 0x00A24
#define MMC_RX_PKT_ASSEMBLY_ERR_CNTR 0x00A28
#define MMC_RX_PKT_SMD_ERR_CNTR 0x00A2C
#define MMC_RX_PKT_ASSEMBLY_OK_CNTR 0x00A30
#define MMC_RX_FPE_FRAG_CNTR 0x00A34
#define MMC_TXSINGLECOL_G 0x00A40
#define MMC_TXMULTICOL_G 0x00A44
#define MMC_TXDEFERRED 0x00A48
#define MMC_TXLATECOL 0x00A4C
#define MMC_TXEXESSCOL 0x00A50
#define MMC_TXCARRIERERROR 0x00A54
#define MMC_TXEXECESS_DEFERRED 0x00A58
#define MMC_IPC_RX_INT_MASK 0x00A5C
#define MMC_IPC_RX_INT 0x00A60
#define MMC_RXIPV4_GD_PKTS_L 0x00A64
#define MMC_RXIPV4_GD_PKTS_H 0x00A68
#define MMC_RXIPV4_HDRERR_PKTS_L 0x00A6C
#define MMC_RXIPV4_HDRERR_PKTS_H 0x00A70
#define MMC_RXIPV4_NOPAY_PKTS_L 0x00A74
#define MMC_RXIPV4_NOPAY_PKTS_H 0x00A78
#define MMC_RXIPV4_FRAG_PKTS_L 0x00A7C
#define MMC_RXIPV4_FRAG_PKTS_H 0x00A80
#define MMC_RXIPV4_UBSBL_PKTS_L 0x00A84
#define MMC_RXIPV4_UBSBL_PKTS_H 0x00A88
#define MMC_RXIPV6_GD_PKTS_L 0x00A8C
#define MMC_RXIPV6_GD_PKTS_H 0x00A90
#define MMC_RXIPV6_HDRERR_PKTS_L 0x00A94
#define MMC_RXIPV6_HDRERR_PKTS_H 0x00A98
#define MMC_RXIPV6_NOPAY_PKTS_L 0x00A9C
#define MMC_RXIPV6_NOPAY_PKTS_H 0x00AA0
#define MMC_RXUDP_GD_PKTS_L 0x00AA4
#define MMC_RXUDP_GD_PKTS_H 0x00AA8
#define MMC_RXUDP_ERR_PKTS_L 0x00AAC
#define MMC_RXUDP_ERR_PKTS_H 0x00AB0
#define MMC_RXTCP_GD_PKTS_L 0x00AB4
#define MMC_RXTCP_GD_PKTS_H 0x00AB8
#define MMC_RXTCP_ERR_PKTS_L 0x00ABC
#define MMC_RXTCP_ERR_PKTS_H 0x00AC0
#define MMC_RXICMP_GD_PKTS_L 0x00AC4
#define MMC_RXICMP_GD_PKTS_H 0x00AC8
#define MMC_RXICMP_ERR_PKTS_L 0x00ACC
#define MMC_RXICMP_ERR_PKTS_H 0x00AD0
#define MMC_RXIPV4_GD_OCTETS_L 0x00AD4
#define MMC_RXIPV4_GD_OCTETS_H 0x00AD8
#define MMC_RXIPV4_HDRERR_OCTETS_L 0x00ADC
#define MMC_RXIPV4_HDRERR_OCTETS_H 0x00AE0
#define MMC_RXIPV4_NOPAY_OCTETS_L 0x00AE4
#define MMC_RXIPV4_NOPAY_OCTETS_H 0x00AE8
#define MMC_RXIPV4_FRAG_OCTETS_L 0x00AEC
#define MMC_RXIPV4_FRAG_OCTETS_H 0x00AF0
#define MMC_RXIPV4_UDP_CHKSM_DIS_OCT_L 0x00AF4
#define MMC_RXIPV4_UDP_CHKSM_DIS_OCT_H 0x00AF8
#define MMC_RXIPV6_GD_OCTETS_L 0x00AFC
#define MMC_RXIPV6_GD_OCTETS_H 0x00B00
#define MMC_RXIPV6_HDRERR_OCTETS_L 0x00B04
#define MMC_RXIPV6_HDRERR_OCTETS_H 0x00B08
#define MMC_RXIPV6_NOPAY_OCTETS_L 0x00B0C
#define MMC_RXIPV6_NOPAY_OCTETS_H 0x00B10
#define MMC_RXUDP_GD_OCTETS_L 0x00B14
#define MMC_RXUDP_GD_OCTETS_H 0x00B18
#define MMC_RXUDP_ERR_OCTETS_L 0x00B1C
#define MMC_RXUDP_ERR_OCTETS_H 0x00B20
#define MMC_RXTCP_GD_OCTETS_L 0x00B24
#define MMC_RXTCP_GD_OCTETS_H 0x00B28
#define MMC_RXTCP_ERR_OCTETS_L 0x00B2C
#define MMC_RXTCP_ERR_OCTETS_H 0x00B30
#define MMC_RXICMP_GD_OCTETS_L 0x00B34
#define MMC_RXICMP_GD_OCTETS_H 0x00B38
#define MMC_RXICMP_ERR_OCTETS_L 0x00B3C
#define MMC_RXICMP_ERR_OCTETS_H 0x00B40
/** @} */
/**
* @brief mgbe_read_mmc - To read MMC registers and ether_mmc_counter structure
* variable
*
* Algorithm: Pass register offset and old value to helper function and
* update structure.
*
* @param[in] osi_core: OSI core private data structure.
*
* @note
* 1) MAC should be init and started. see osi_start_mac()
* 2) osi_core->osd should be populated
*/
void mgbe_read_mmc(struct osi_core_priv_data *osi_core);
/**
* @brief mgbe_reset_mmc - To reset MMC registers and ether_mmc_counter
* structure variable
*
* @param[in] osi_core: OSI core private data structure.
*
* @note
* 1) MAC should be init and started. see osi_start_mac()
* 2) osi_core->osd should be populated
*/
void mgbe_reset_mmc(struct osi_core_priv_data *osi_core);
#endif

View File

@@ -1,306 +0,0 @@
/*
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <local_common.h>
#include <ivc_core.h>
#include "core_local.h"
#include "../osi/common/common.h"
#ifdef HSI_SUPPORT
/**
* @brief hsi_err_code - Arry of error code and reporter ID to be use by
* each Ethernet controller instance
* a condition is met or a timeout occurs
* Below is the data:
* uncorrectable_error_code, correctable_error_code, reporter ID
* hsi_err_code[0] to hsi_err_code[3] for MGBE instance
* hsi_err_code[4] is for EQOS
*/
nveu32_t hsi_err_code[][3] = {
{0x2A00, 0x2E08, 0x8019},
{0x2A01, 0x2E09, 0x801A},
{0x2A02, 0x2E0A, 0x801B},
{0x2A03, 0x2E0B, 0x801C},
{0x28AD, 0x2DE6, 0x8009},
};
#endif
/**
* @brief g_core - Static core local data array
*/
static struct core_local g_core[MAX_CORE_INSTANCES];
/**
* @brief if_ops - Static core interface operations for virtual/non-virtual
* case
*/
static struct if_core_ops if_ops[MAX_INTERFACE_OPS];
/**
* @brief Function to validate function pointers.
*
* @param[in] osi_core: OSI Core private data structure.
*
* @note
* API Group:
* - Initialization: Yes
* - Run time: No
* - De-initialization: No
*
* @retval 0 on Success
* @retval -1 on Failure
*/
static nve32_t validate_if_func_ptrs(struct osi_core_priv_data *const osi_core,
struct if_core_ops *if_ops_p)
{
nveu32_t i = 0;
void *temp_ops = (void *)if_ops_p;
#if __SIZEOF_POINTER__ == 8
nveu64_t *l_ops = (nveu64_t *)temp_ops;
#elif __SIZEOF_POINTER__ == 4
nveu32_t *l_ops = (nveu32_t *)temp_ops;
#else
OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
"Undefined architecture\n", 0ULL);
return -1;
#endif
for (i = 0; i < (sizeof(*if_ops_p) / (nveu64_t)__SIZEOF_POINTER__);
i++) {
if (*l_ops == 0U) {
OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
"failed at index : ", i);
return -1;
}
l_ops++;
}
return 0;
}
/**
* @brief Function to validate input arguments of API.
*
* @param[in] osi_core: OSI Core private data structure.
*
* @note
* API Group:
* - Initialization: Yes
* - Run time: Yes
* - De-initialization: Yes
*
* @retval 0 on Success
* @retval -1 on Failure
*/
static inline nve32_t validate_if_args(struct osi_core_priv_data *const osi_core,
struct core_local *l_core)
{
if ((osi_core == OSI_NULL) || (l_core->if_init_done == OSI_DISABLE) ||
(l_core->magic_num != (nveu64_t)osi_core)) {
return -1;
}
return 0;
}
struct osi_core_priv_data *osi_get_core(void)
{
nveu32_t i;
for (i = 0U; i < MAX_CORE_INSTANCES; i++) {
if (g_core[i].if_init_done == OSI_ENABLE) {
continue;
}
break;
}
if (i == MAX_CORE_INSTANCES) {
return OSI_NULL;
}
g_core[i].magic_num = (nveu64_t)&g_core[i].osi_core;
g_core[i].tx_ts_head.prev = &g_core[i].tx_ts_head;
g_core[i].tx_ts_head.next = &g_core[i].tx_ts_head;
g_core[i].pps_freq = OSI_DISABLE;
return &g_core[i].osi_core;
}
struct osi_core_priv_data *get_role_pointer(nveu32_t role)
{
nveu32_t i;
if ((role != OSI_PTP_M2M_PRIMARY) &&
(role != OSI_PTP_M2M_SECONDARY)) {
return OSI_NULL;
}
/* Current approch to give pointer for 1st role */
for (i = 0U; i < MAX_CORE_INSTANCES; i++) {
if ((g_core[i].if_init_done == OSI_ENABLE) &&
(g_core[i].ether_m2m_role == role)) {
return &g_core[i].osi_core;
}
}
return OSI_NULL;
}
nve32_t osi_init_core_ops(struct osi_core_priv_data *const osi_core)
{
struct core_local *l_core = (struct core_local *)osi_core;
nve32_t ret = -1;
if (osi_core == OSI_NULL) {
return -1;
}
if (osi_core->use_virtualization > OSI_ENABLE) {
return ret;
}
if ((l_core->magic_num != (nveu64_t)osi_core) ||
(l_core->if_init_done == OSI_ENABLE)) {
return -1;
}
l_core->if_ops_p = &if_ops[osi_core->use_virtualization];
if (osi_core->use_virtualization == OSI_DISABLE) {
hw_interface_init_core_ops(l_core->if_ops_p);
} else {
ivc_interface_init_core_ops(l_core->if_ops_p);
}
if (validate_if_func_ptrs(osi_core, l_core->if_ops_p) < 0) {
OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
"Interface function validation failed\n", 0ULL);
return -1;
}
ret = l_core->if_ops_p->if_init_core_ops(osi_core);
if (ret < 0) {
OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
"if_init_core_ops failed\n", 0ULL);
return ret;
}
l_core->ts_lock = OSI_DISABLE;
l_core->ether_m2m_role = osi_core->m2m_role;
l_core->serv.count = SERVO_STATS_0;
l_core->serv.drift = 0;
l_core->serv.last_ppb = 0;
osi_lock_init(&l_core->serv.m2m_lock);
#ifdef MACSEC_SUPPORT
osi_lock_init(&osi_core->macsec_fpe_lock);
#endif /* MACSEC_SUPPORT */
l_core->hw_init_successful = OSI_DISABLE;
l_core->m2m_tsync = OSI_DISABLE;
l_core->if_init_done = OSI_ENABLE;
if ((osi_core->m2m_role == OSI_PTP_M2M_PRIMARY) ||
(osi_core->m2m_role == OSI_PTP_M2M_SECONDARY)) {
l_core->m2m_tsync = OSI_ENABLE;
} else {
l_core->m2m_tsync = OSI_DISABLE;
}
if (osi_core->pps_frq <= OSI_ENABLE) {
l_core->pps_freq = osi_core->pps_frq;
} else {
OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
"invalid pps_frq\n", (nveu64_t)osi_core->pps_frq);
ret = -1;
}
return ret;
}
nve32_t osi_write_phy_reg(struct osi_core_priv_data *const osi_core,
const nveu32_t phyaddr, const nveu32_t phyreg,
const nveu16_t phydata)
{
struct core_local *l_core = (struct core_local *)osi_core;
if (validate_if_args(osi_core, l_core) < 0) {
return -1;
}
return l_core->if_ops_p->if_write_phy_reg(osi_core, phyaddr, phyreg,
phydata);
}
nve32_t osi_read_phy_reg(struct osi_core_priv_data *const osi_core,
const nveu32_t phyaddr, const nveu32_t phyreg)
{
struct core_local *l_core = (struct core_local *)osi_core;
if (validate_if_args(osi_core, l_core) < 0) {
return -1;
}
return l_core->if_ops_p->if_read_phy_reg(osi_core, phyaddr, phyreg);
}
nve32_t osi_hw_core_init(struct osi_core_priv_data *const osi_core,
nveu32_t tx_fifo_size, nveu32_t rx_fifo_size)
{
struct core_local *l_core = (struct core_local *)osi_core;
if (validate_if_args(osi_core, l_core) < 0) {
return -1;
}
return l_core->if_ops_p->if_core_init(osi_core, tx_fifo_size,
rx_fifo_size);
}
nve32_t osi_hw_core_deinit(struct osi_core_priv_data *const osi_core)
{
struct core_local *l_core = (struct core_local *)osi_core;
if (validate_if_args(osi_core, l_core) < 0) {
return -1;
}
return l_core->if_ops_p->if_core_deinit(osi_core);
}
nve32_t osi_handle_ioctl(struct osi_core_priv_data *osi_core,
struct osi_ioctl *data)
{
struct core_local *l_core = (struct core_local *)osi_core;
nve32_t ret = -1;
if (validate_if_args(osi_core, l_core) < 0) {
return ret;
}
if (data == OSI_NULL) {
OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
"CORE: Invalid argument\n", 0ULL);
return ret;
}
return l_core->if_ops_p->if_handle_ioctl(osi_core, data);
}

View File

File diff suppressed because it is too large Load Diff

View File

@@ -1,474 +0,0 @@
/*
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "../osi/common/common.h"
#include "vlan_filter.h"
/**
* @brief get_vlan_filter_idx - Get VLAN HW filter index which match vlan_id
*
* Algorithm: From VID flags gets the set bit position and get the value
* from that position and compare with vlan ID passed to this function
*
* @param[in] osi_core: OSI core private data
* @param[in] vlan_id: VLAN ID to be searched in HW filters
*
* @return Index from VID array if match found.
* @return Return VLAN_HW_FILTER_FULL_IDX if not found.
*/
static inline unsigned int get_vlan_filter_idx(
struct osi_core_priv_data *osi_core,
unsigned short vlan_id)
{
unsigned int vid_idx = VLAN_HW_FILTER_FULL_IDX;
unsigned long bitmap = osi_core->vf_bitmap;
unsigned long temp = 0U;
while (bitmap != 0U) {
temp = (unsigned long) __builtin_ctzl(bitmap);
if (osi_core->vid[temp] == vlan_id) {
/* vlan ID match found */
vid_idx = (unsigned int)temp;
break;
}
bitmap &= ~OSI_BIT(temp);
}
return vid_idx;
}
/**
* @brief allow_all_vid_tags - Program MAC to pass all VID
*
* Algorithm: Enable HASH filtering and program the hash to 0xFFFF
* if all VID's to pass. For not to pass all VID's disable HASH
* filtering and program the HASH as zero.
*
* @param[in] base: MAC base address
* @param[in] pass_all_vids: Flag to pass all VID's or not.
*
* @return 0 on success
*/
static inline int allow_all_vid_tags(unsigned char *base,
unsigned int pass_all_vids)
{
unsigned int vlan_tag_reg = 0;
unsigned int hash_filter_reg = 0;
vlan_tag_reg = osi_readl(base + MAC_VLAN_TAG_CTRL);
hash_filter_reg = osi_readl(base + MAC_VLAN_HASH_FILTER);
if (pass_all_vids == OSI_ENABLE) {
vlan_tag_reg |= MAC_VLAN_TAG_CTRL_VHTM;
hash_filter_reg |= VLAN_HASH_ALLOW_ALL;
} else {
vlan_tag_reg &= ~MAC_VLAN_TAG_CTRL_VHTM;
hash_filter_reg &= (unsigned int) ~VLAN_HASH_ALLOW_ALL;
}
osi_writel(vlan_tag_reg, base + MAC_VLAN_TAG_CTRL);
osi_writel(hash_filter_reg, base + MAC_VLAN_HASH_FILTER);
return 0;
}
/**
* @brief is_vlan_id_enqueued - Checks passed VID already queued or not.
*
* Algorithm: Search VID array from index VLAN_HW_FILTER_FULL_IDX
* to total VID programmed count. If match found return index to VID
* array or return negative value if no match.
*
* @param[in] osi_core: OSI core private data
* @param[in] vlan_id: VLAN ID to be searched in VID array.
* @param[out] idx: Index of the VID array after match
*
* @return 0 on Success.
* @return negative value on failure
*/
static inline int is_vlan_id_enqueued(struct osi_core_priv_data *osi_core,
unsigned short vlan_id,
unsigned int *idx)
{
unsigned int i = 0;
if (osi_core->vlan_filter_cnt == VLAN_HW_FILTER_FULL_IDX) {
/* No elements in SW queue to search */
return -1;
}
for (i = VLAN_HW_FILTER_FULL_IDX; i <= osi_core->vlan_filter_cnt; i++) {
if (osi_core->vid[i] == vlan_id) {
*idx = i;
/* match found */
return 0;
}
}
return -1;
}
/**
* @brief enqueue_vlan_id - ADD vlan_id to VID array.
*
* Algorithm: Add VLAN ID to VID array at filter_cnt index.
*
* @param[in] osi_core: OSI core private data
* @param[in] vlan_id: VLAN ID to be added to VID array.
*
* @return 0 on success.
* @return negative value on failure.
*/
static inline int enqueue_vlan_id(struct osi_core_priv_data *osi_core,
unsigned short vlan_id)
{
int ret = 0;
unsigned int idx;
if (osi_core->vlan_filter_cnt == VLAN_NUM_VID) {
/* Entire SW queue full */
return -1;
}
/* Check if requested vlan_id alredy queued */
ret = is_vlan_id_enqueued(osi_core, vlan_id, &idx);
if (ret == 0) {
OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
"VLAN ID already programmed\n",
0ULL);
return -1;
}
osi_core->vid[osi_core->vlan_filter_cnt] = vlan_id;
osi_core->vlan_filter_cnt++;
return 0;
}
/**
* @brief poll_for_vlan_filter_reg_rw - Poll for VLAN filter register update
*
* Algorithm: Program VLAN filter registers through indirect address
* mechanism.
*
* @param[in] addr: MAC base address
*
* @return 0 on success.
* @return -1 on failure.
*/
static inline int poll_for_vlan_filter_reg_rw(
struct osi_core_priv_data *osi_core)
{
unsigned int retry = 10;
unsigned int count;
unsigned int val = 0;
int cond = 1;
count = 0;
while (cond == 1) {
if (count > retry) {
OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL,
"VLAN filter update timedout\n", 0ULL);
return -1;
}
count++;
val = osi_readl((unsigned char *)osi_core->base +
MAC_VLAN_TAG_CTRL);
if ((val & MAC_VLAN_TAG_CTRL_OB) == OSI_NONE) {
/* Set cond to 0 to exit loop */
cond = 0;
} else {
/* wait for 10 usec for XB clear */
osi_core->osd_ops.udelay(10U);
}
}
return 0;
}
/**
* @brief update_vlan_filters - Update HW filter registers
*
* Algorithm: Program VLAN HW filter registers through indirect
* address mechanism.
*
* @param[in] base: MAC base address.
* @param[in] vid_idx: HW filter index in VLAN filter registers.
* @param[in] val: VLAN ID to be programmed.
*
* @return 0 on success
* @return -1 on failure.
*/
static inline int update_vlan_filters(struct osi_core_priv_data *osi_core,
unsigned int vid_idx,
unsigned int val)
{
unsigned char *base = (unsigned char *)osi_core->base;
int ret = 0;
osi_writel(val, base + MAC_VLAN_TAG_DATA);
val = osi_readl(base + MAC_VLAN_TAG_CTRL);
val &= (unsigned int) ~MAC_VLAN_TAG_CTRL_OFS_MASK;
val |= vid_idx << MAC_VLAN_TAG_CTRL_OFS_SHIFT;
val &= ~MAC_VLAN_TAG_CTRL_CT;
val |= MAC_VLAN_TAG_CTRL_OB;
osi_writel(val, base + MAC_VLAN_TAG_CTRL);
ret = poll_for_vlan_filter_reg_rw(osi_core);
if (ret < 0) {
OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL,
"Failed to update VLAN filters\n", 0ULL);
return -1;
}
return 0;
}
/**
* @brief add_vlan_id - Add VLAN ID.
*
* Algorithm: ADD VLAN ID to HW filters and SW VID array.
*
* @param[in] osi_core: OSI core private data.
* @param[in] val: VLAN ID to be programmed.
*
* @return 0 on success
* @return -1 on failure.
*/
static inline int add_vlan_id(struct osi_core_priv_data *osi_core,
struct core_ops *ops_p,
unsigned short vlan_id)
{
unsigned int vid_idx = 0;
unsigned int val = 0;
int ret = 0;
/* Check if VLAN ID already programmed */
vid_idx = get_vlan_filter_idx(osi_core, vlan_id);
if (vid_idx != VLAN_HW_FILTER_FULL_IDX) {
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID,
"VLAN ID already added\n",
0ULL);
return -1;
}
/* Get free index to add the VID */
vid_idx = (unsigned int) __builtin_ctzl(~osi_core->vf_bitmap);
/* If there is no free filter index add into SW VLAN filter queue to store */
if (vid_idx == VLAN_HW_FILTER_FULL_IDX) {
/* Add VLAN ID to SW queue */
ret = enqueue_vlan_id(osi_core, vlan_id);
if (ret < 0)
return ret;
/* Since VLAN HW filters full - program to allow all packets */
return allow_all_vid_tags(osi_core->base, OSI_ENABLE);
}
osi_core->vf_bitmap |= OSI_BIT(vid_idx);
osi_core->vid[vid_idx] = vlan_id;
osi_core->vlan_filter_cnt++;
if (osi_core->vlan_filter_cnt > 0U) {
ret = ops_p->config_vlan_filtering(osi_core,
OSI_ENABLE,
OSI_DISABLE,
OSI_DISABLE);
if (ret < 0) {
OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
"Failed to enable VLAN filtering\n", 0ULL);
return -1;
}
}
val = osi_readl((unsigned char *)osi_core->base + MAC_VLAN_TAG_DATA);
val &= (unsigned int) ~VLAN_VID_MASK;
val |= (vlan_id | MAC_VLAN_TAG_DATA_ETV | MAC_VLAN_TAG_DATA_VEN);
return update_vlan_filters(osi_core, vid_idx, val);
}
/**
* @brief dequeue_vlan_id - Remove VLAN ID from VID array
*
* Algorithm: Do the left shift of array from index to
* total filter count. Allow all VID tags after removal
* VID if numbers of filter count is 32.
*
* @param[in]: osi_core: OSI core private data.
* @param[in] idx: Index at which VLAN ID to be deleted.
*
* @return 0 on success
* @return -1 on failure.
*/
static inline int dequeue_vlan_id(struct osi_core_priv_data *osi_core,
unsigned int idx)
{
unsigned int i;
if (osi_core->vlan_filter_cnt == VLAN_HW_MAX_NRVF) {
return -1;
}
/* Left shift the array elements by one for the VID order */
for (i = idx; i <= osi_core->vlan_filter_cnt; i++) {
osi_core->vid[i] = osi_core->vid[i + 1];
}
osi_core->vid[i] = VLAN_ID_INVALID;
osi_core->vlan_filter_cnt--;
if (osi_core->vlan_filter_cnt == VLAN_HW_MAX_NRVF) {
allow_all_vid_tags(osi_core->base, OSI_DISABLE);
}
return 0;
}
/**
* @brief dequeue_vid_to_add_filter_reg - Get VID from Array and add to HW
* filters.
*
* Algorithm: Get the VID from index 32 and program in HW filter
* registers. With this first added VID will be programmed in filter registers
* if any VID deleted from HW filter registers.
*
* @param[in]: osi_core: OSI core private data.
* @param[in] idx: Index at which VLAN ID to be deleted.
*
* @return 0 on success
* @return -1 on failure.
*/
static inline int dequeue_vid_to_add_filter_reg(
struct osi_core_priv_data *osi_core,
unsigned int vid_idx)
{
unsigned int val = 0;
unsigned short vlan_id = 0;
unsigned int i = 0;
int ret = 0;
vlan_id = osi_core->vid[VLAN_HW_FILTER_FULL_IDX];
if (vlan_id == VLAN_ID_INVALID) {
return 0;
}
osi_core->vf_bitmap |= OSI_BIT(vid_idx);
osi_core->vid[vid_idx] = vlan_id;
val = osi_readl((unsigned char *)osi_core->base + MAC_VLAN_TAG_DATA);
val &= (unsigned int) ~VLAN_VID_MASK;
val |= (vlan_id | MAC_VLAN_TAG_DATA_ETV | MAC_VLAN_TAG_DATA_VEN);
ret = update_vlan_filters(osi_core, vid_idx, val);
if (ret < 0) {
return -1;
}
for (i = VLAN_HW_FILTER_FULL_IDX; i <= osi_core->vlan_filter_cnt; i++) {
osi_core->vid[i] = osi_core->vid[i + 1];
}
osi_core->vid[i] = VLAN_ID_INVALID;
return 0;
}
/**
* @brief del_vlan_id - Delete VLAN ID.
*
* Algorithm: Delete VLAN ID from HW filters or SW VID array.
*
* @param[in] osi_core: OSI core private data.
* @param[in] val: VLAN ID to be deleted
*
* @return 0 on success
* @return -1 on failure.
*/
static inline int del_vlan_id(struct osi_core_priv_data *osi_core,
struct core_ops *ops_p,
unsigned short vlan_id)
{
unsigned int vid_idx = 0;
unsigned int val = 0;
unsigned int idx;
int ret = 0;
/* Search for vlan filter index to be deleted */
vid_idx = get_vlan_filter_idx(osi_core, vlan_id);
if (vid_idx == VLAN_HW_FILTER_FULL_IDX) {
ret = is_vlan_id_enqueued(osi_core, vlan_id, &idx);
if (ret != 0) {
/* VID not found in HW/SW filter list */
return -1;
}
return dequeue_vlan_id(osi_core, idx);
}
osi_core->vf_bitmap &= ~OSI_BIT(vid_idx);
osi_core->vid[vid_idx] = VLAN_ID_INVALID;
ret = update_vlan_filters(osi_core, vid_idx, val);
if (ret < 0) {
return -1;
}
osi_core->vlan_filter_cnt--;
if (osi_core->vlan_filter_cnt == 0U) {
ret = ops_p->config_vlan_filtering(osi_core,
OSI_DISABLE,
OSI_DISABLE,
OSI_DISABLE);
if (ret < 0) {
OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
"Failed to disable VLAN filtering\n", 0ULL);
return -1;
}
}
if (osi_core->vlan_filter_cnt == VLAN_HW_MAX_NRVF) {
allow_all_vid_tags(osi_core->base, OSI_DISABLE);
}
/* if SW queue is not empty dequeue from SW queue and update filter */
return dequeue_vid_to_add_filter_reg(osi_core, vid_idx);
}
int update_vlan_id(struct osi_core_priv_data *osi_core,
struct core_ops *ops_p,
unsigned int vid)
{
unsigned int action = vid & VLAN_ACTION_MASK;
unsigned short vlan_id = vid & VLAN_VID_MASK;
if (action == OSI_VLAN_ACTION_ADD) {
return add_vlan_id(osi_core, ops_p, vlan_id);
}
return del_vlan_id(osi_core, ops_p, vlan_id);
}

View File

@@ -1,76 +0,0 @@
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef VLAN_FILTER_H
#define VLAN_FILTER_H
#include <osi_core.h>
#include "core_local.h"
/**
* @addtogroup MAC-VLAN MAC VLAN configuration registers and bit fields
*
* @brief These are the macros for register offsets and bit fields
* for VLAN configuration.
* @{
*/
#define MAC_VLAN_TAG_CTRL 0x50
#define MAC_VLAN_TAG_DATA 0x54
#define MAC_VLAN_HASH_FILTER 0x58
#define MAC_VLAN_TAG_CTRL_OFS_MASK 0x7C
#define MAC_VLAN_TAG_CTRL_OFS_SHIFT 2U
#define MAC_VLAN_TAG_CTRL_CT OSI_BIT(1)
#define MAC_VLAN_TAG_CTRL_OB OSI_BIT(0)
#define MAC_VLAN_TAG_CTRL_VHTM OSI_BIT(25)
#define MAC_VLAN_TAG_DATA_ETV OSI_BIT(16)
#define MAC_VLAN_TAG_DATA_VEN OSI_BIT(17)
/** @} */
/**
* @addtogroup VLAN filter macros
*
* @brief VLAN filtering releated macros
* @{
*/
#define VLAN_HW_MAX_NRVF 32U
#define VLAN_HW_FILTER_FULL_IDX VLAN_HW_MAX_NRVF
#define VLAN_VID_MASK 0xFFFF
#define VLAN_ID_INVALID 0xFFFF
#define VLAN_HASH_ALLOW_ALL 0xFFFF
#define VLAN_ACTION_MASK OSI_BIT(31)
/** @} */
/**
* @brief update_vlan_id - Add/Delete VLAN ID.
*
* Algorithm: Add/deleted VLAN ID from HW filters or SW VID array.
*
* @param[in] osi_core: OSI core private data.
* @param[in] vid: VLAN ID to be added/deleted
*
* @return 0 on success
* @return -1 on failure.
*/
int update_vlan_id(struct osi_core_priv_data *osi_core,
struct core_ops *ops_p,
unsigned int vid);
#endif /* VLAN_FILTER_H */

View File

@@ -1,638 +0,0 @@
/*
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "xpcs.h"
/**
* @brief xpcs_poll_for_an_complete - Polling for AN complete.
*
* Algorithm: This routine poll for AN completion status from
* XPCS IP.
*
* @param[in] osi_core: OSI core data structure.
* @param[out] an_status: AN status from XPCS
*
* @retval 0 on success
* @retval -1 on failure.
*/
static inline int xpcs_poll_for_an_complete(struct osi_core_priv_data *osi_core,
unsigned int *an_status)
{
void *xpcs_base = osi_core->xpcs_base;
unsigned int status = 0;
unsigned int retry = 1000;
unsigned int count;
int cond = 1;
int ret = 0;
/* 14. Poll for AN complete */
cond = 1;
count = 0;
while (cond == 1) {
if (count > retry) {
OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL,
"XPCS AN completion timed out\n", 0ULL);
#ifdef HSI_SUPPORT
if (osi_core->hsi.enabled == OSI_ENABLE) {
osi_core->hsi.err_code[AUTONEG_ERR_IDX] =
OSI_PCS_AUTONEG_ERR;
osi_core->hsi.report_err = OSI_ENABLE;
osi_core->hsi.report_count_err[AUTONEG_ERR_IDX] = OSI_ENABLE;
}
#endif
return -1;
}
count++;
status = xpcs_read(xpcs_base, XPCS_VR_MII_AN_INTR_STS);
if ((status & XPCS_VR_MII_AN_INTR_STS_CL37_ANCMPLT_INTR) == 0U) {
/* autoneg not completed - poll */
osi_core->osd_ops.udelay(1000U);
} else {
/* 15. clear interrupt */
status &= ~XPCS_VR_MII_AN_INTR_STS_CL37_ANCMPLT_INTR;
ret = xpcs_write_safety(osi_core, XPCS_VR_MII_AN_INTR_STS, status);
if (ret != 0) {
return ret;
}
cond = 0;
}
}
if ((status & XPCS_USXG_AN_STS_SPEED_MASK) == 0U) {
OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL,
"XPCS AN completed with zero speed\n", 0ULL);
return -1;
}
*an_status = status;
return 0;
}
/**
* @brief xpcs_set_speed - Set speed at XPCS
*
* Algorithm: This routine program XPCS speed based on AN status.
*
* @param[in] osi_core: OSI core data structure.
* @param[in] status: Autonegotation Status.
*
* @retval 0 on success
* @retval -1 on failure
*/
static inline int xpcs_set_speed(struct osi_core_priv_data *osi_core,
unsigned int status)
{
unsigned int speed = status & XPCS_USXG_AN_STS_SPEED_MASK;
unsigned int ctrl = 0;
void *xpcs_base = osi_core->xpcs_base;
ctrl = xpcs_read(xpcs_base, XPCS_SR_MII_CTRL);
switch (speed) {
case XPCS_USXG_AN_STS_SPEED_2500:
/* 2.5Gbps */
ctrl |= XPCS_SR_MII_CTRL_SS5;
ctrl &= ~(XPCS_SR_MII_CTRL_SS6 | XPCS_SR_MII_CTRL_SS13);
break;
case XPCS_USXG_AN_STS_SPEED_5000:
/* 5Gbps */
ctrl |= (XPCS_SR_MII_CTRL_SS5 | XPCS_SR_MII_CTRL_SS13);
ctrl &= ~XPCS_SR_MII_CTRL_SS6;
break;
case XPCS_USXG_AN_STS_SPEED_10000:
default:
/* 10Gbps */
ctrl |= (XPCS_SR_MII_CTRL_SS6 | XPCS_SR_MII_CTRL_SS13);
ctrl &= ~XPCS_SR_MII_CTRL_SS5;
break;
}
return xpcs_write_safety(osi_core, XPCS_SR_MII_CTRL, ctrl);
}
/**
* @brief xpcs_start - Start XPCS
*
* Algorithm: This routine enables AN and set speed based on AN status
*
* @param[in] osi_core: OSI core data structure.
*
* @retval 0 on success
* @retval -1 on failure.
*/
int xpcs_start(struct osi_core_priv_data *osi_core)
{
void *xpcs_base = osi_core->xpcs_base;
unsigned int an_status = 0;
unsigned int retry = RETRY_COUNT;
unsigned int count = 0;
unsigned int ctrl = 0;
int ret = 0;
int cond = COND_NOT_MET;
if (osi_core->xpcs_base == OSI_NULL) {
OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL,
"XPCS base is NULL", 0ULL);
/* TODO: Remove this once silicon arrives */
return 0;
}
if ((osi_core->phy_iface_mode == OSI_USXGMII_MODE_10G) ||
(osi_core->phy_iface_mode == OSI_USXGMII_MODE_5G)) {
ctrl = xpcs_read(xpcs_base, XPCS_SR_MII_CTRL);
ctrl |= XPCS_SR_MII_CTRL_AN_ENABLE;
ret = xpcs_write_safety(osi_core, XPCS_SR_MII_CTRL, ctrl);
if (ret != 0) {
return ret;
}
ret = xpcs_poll_for_an_complete(osi_core, &an_status);
if (ret < 0) {
return ret;
}
ret = xpcs_set_speed(osi_core, an_status);
if (ret != 0) {
return ret;
}
/* USXGMII Rate Adaptor Reset before data transfer */
ctrl = xpcs_read(xpcs_base, XPCS_VR_XS_PCS_DIG_CTRL1);
ctrl |= XPCS_VR_XS_PCS_DIG_CTRL1_USRA_RST;
xpcs_write(xpcs_base, XPCS_VR_XS_PCS_DIG_CTRL1, ctrl);
while (cond == COND_NOT_MET) {
if (count > retry) {
return -1;
}
count++;
ctrl = xpcs_read(xpcs_base, XPCS_VR_XS_PCS_DIG_CTRL1);
if ((ctrl & XPCS_VR_XS_PCS_DIG_CTRL1_USRA_RST) == 0U) {
cond = COND_MET;
} else {
osi_core->osd_ops.udelay(1000U);
}
}
}
/* poll for Rx link up */
cond = COND_NOT_MET;
count = 0;
while (cond == COND_NOT_MET) {
if (count > retry) {
return -1;
}
count++;
ctrl = xpcs_read(xpcs_base, XPCS_SR_XS_PCS_STS1);
if ((ctrl & XPCS_SR_XS_PCS_STS1_RLU) ==
XPCS_SR_XS_PCS_STS1_RLU) {
cond = COND_MET;
} else {
osi_core->osd_ops.udelay(1000U);
}
}
return 0;
}
/**
* @brief xpcs_uphy_lane_bring_up - Bring up UPHY Tx/Rx lanes
*
* Algorithm: This routine bring up the UPHY Tx/Rx lanes
* through XPCS FSM wrapper.
*
* @param[in] osi_core: OSI core data structure.
* @param[in] lane_init_en: Tx/Rx lane init value.
*
* @retval 0 on success
* @retval -1 on failure.
*/
static nve32_t xpcs_uphy_lane_bring_up(struct osi_core_priv_data *osi_core,
unsigned int lane_init_en)
{
void *xpcs_base = osi_core->xpcs_base;
nveu32_t retry = XPCS_RETRY_COUNT;
nve32_t cond = COND_NOT_MET;
nveu32_t val = 0;
nveu32_t count;
val = osi_readla(osi_core,
(nveu8_t *)xpcs_base + XPCS_WRAP_UPHY_STATUS);
if ((val & XPCS_WRAP_UPHY_STATUS_TX_P_UP_STATUS) ==
XPCS_WRAP_UPHY_STATUS_TX_P_UP_STATUS) {
/* return success if TX lane is already UP */
return 0;
}
val = osi_readla(osi_core,
(nveu8_t *)xpcs_base + XPCS_WRAP_UPHY_HW_INIT_CTRL);
val |= lane_init_en;
osi_writela(osi_core, val,
(nveu8_t *)xpcs_base + XPCS_WRAP_UPHY_HW_INIT_CTRL);
count = 0;
while (cond == COND_NOT_MET) {
if (count > retry) {
return -1;
}
count++;
val = osi_readla(osi_core,
(nveu8_t *)xpcs_base + XPCS_WRAP_UPHY_HW_INIT_CTRL);
if ((val & lane_init_en) == OSI_NONE) {
/* exit loop */
cond = COND_MET;
} else {
osi_core->osd_ops.udelay(500U);
}
}
return 0;
}
/**
* @brief xpcs_check_pcs_lock_status - Checks whether PCS lock happened or not.
*
* Algorithm: This routine helps to check whether PCS lock happened or not.
*
* @param[in] osi_core: OSI core data structure.
*
* @retval 0 on success
* @retval -1 on failure.
*/
static nve32_t xpcs_check_pcs_lock_status(struct osi_core_priv_data *osi_core)
{
void *xpcs_base = osi_core->xpcs_base;
nveu32_t retry = XPCS_RETRY_COUNT;
nve32_t cond = COND_NOT_MET;
nveu32_t val = 0;
nveu32_t count;
count = 0;
while (cond == COND_NOT_MET) {
if (count > retry) {
return -1;
}
count++;
val = osi_readla(osi_core,
(nveu8_t *)xpcs_base + XPCS_WRAP_IRQ_STATUS);
if ((val & XPCS_WRAP_IRQ_STATUS_PCS_LINK_STS) ==
XPCS_WRAP_IRQ_STATUS_PCS_LINK_STS) {
/* exit loop */
cond = COND_MET;
} else {
osi_core->osd_ops.udelay(500U);
}
}
/* Clear the status */
osi_writela(osi_core, val, (nveu8_t *)xpcs_base + XPCS_WRAP_IRQ_STATUS);
return 0;
}
/**
* @brief xpcs_lane_bring_up - Bring up UPHY Tx/Rx lanes
*
* Algorithm: This routine bring up the UPHY Tx/Rx lanes
* through XPCS FSM wrapper.
*
* @param[in] osi_core: OSI core data structure.
*
* @retval 0 on success
* @retval -1 on failure.
*/
static nve32_t xpcs_lane_bring_up(struct osi_core_priv_data *osi_core)
{
unsigned int retry = 1000;
unsigned int count;
nveu32_t val = 0;
int cond;
if (xpcs_uphy_lane_bring_up(osi_core,
XPCS_WRAP_UPHY_HW_INIT_CTRL_TX_EN) < 0) {
OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL,
"UPHY TX lane bring-up failed\n", 0ULL);
return -1;
}
val = osi_readla(osi_core,
(nveu8_t *)osi_core->xpcs_base +
XPCS_WRAP_UPHY_RX_CONTROL_0_0);
/* Step1 RX_SW_OVRD */
val |= XPCS_WRAP_UPHY_RX_CONTROL_0_0_RX_SW_OVRD;
osi_writela(osi_core, val,
(nveu8_t *)osi_core->xpcs_base +
XPCS_WRAP_UPHY_RX_CONTROL_0_0);
val = osi_readla(osi_core,
(nveu8_t *)osi_core->xpcs_base +
XPCS_WRAP_UPHY_RX_CONTROL_0_0);
/* Step2 RX_IDDQ */
val &= ~(XPCS_WRAP_UPHY_RX_CONTROL_0_0_RX_IDDQ);
osi_writela(osi_core, val,
(nveu8_t *)osi_core->xpcs_base +
XPCS_WRAP_UPHY_RX_CONTROL_0_0);
val = osi_readla(osi_core,
(nveu8_t *)osi_core->xpcs_base +
XPCS_WRAP_UPHY_RX_CONTROL_0_0);
/* Step2 AUX_RX_IDDQ */
val &= ~(XPCS_WRAP_UPHY_RX_CONTROL_0_0_AUX_RX_IDDQ);
osi_writela(osi_core, val,
(nveu8_t *)osi_core->xpcs_base +
XPCS_WRAP_UPHY_RX_CONTROL_0_0);
val = osi_readla(osi_core,
(nveu8_t *)osi_core->xpcs_base +
XPCS_WRAP_UPHY_RX_CONTROL_0_0);
/* Step3 RX_SLEEP */
val &= ~(XPCS_WRAP_UPHY_RX_CONTROL_0_0_RX_SLEEP);
osi_writela(osi_core, val,
(nveu8_t *)osi_core->xpcs_base +
XPCS_WRAP_UPHY_RX_CONTROL_0_0);
val = osi_readla(osi_core,
(nveu8_t *)osi_core->xpcs_base +
XPCS_WRAP_UPHY_RX_CONTROL_0_0);
/* Step4 RX_CAL_EN */
val |= XPCS_WRAP_UPHY_RX_CONTROL_0_0_RX_CAL_EN;
osi_writela(osi_core, val,
(nveu8_t *)osi_core->xpcs_base +
XPCS_WRAP_UPHY_RX_CONTROL_0_0);
/* Step5 poll for Rx cal enable */
cond = COND_NOT_MET;
count = 0;
while (cond == COND_NOT_MET) {
if (count > retry) {
return -1;
}
count++;
val = osi_readla(osi_core,
(nveu8_t *)osi_core->xpcs_base +
XPCS_WRAP_UPHY_RX_CONTROL_0_0);
if ((val & XPCS_WRAP_UPHY_RX_CONTROL_0_0_RX_CAL_EN) == 0) {
cond = COND_MET;
} else {
osi_core->osd_ops.udelay(1000U);
}
}
/* Step6 RX_DATA_EN */
val = osi_readla(osi_core, (nveu8_t *)osi_core->xpcs_base +
XPCS_WRAP_UPHY_RX_CONTROL_0_0);
val |= XPCS_WRAP_UPHY_RX_CONTROL_0_0_RX_DATA_EN;
osi_writela(osi_core, val, (nveu8_t *)osi_core->xpcs_base +
XPCS_WRAP_UPHY_RX_CONTROL_0_0);
/* Step7 RX_CDR_RESET */
val = osi_readla(osi_core, (nveu8_t *)osi_core->xpcs_base +
XPCS_WRAP_UPHY_RX_CONTROL_0_0);
val |= XPCS_WRAP_UPHY_RX_CONTROL_0_0_RX_CDR_RESET;
osi_writela(osi_core, val, (nveu8_t *)osi_core->xpcs_base +
XPCS_WRAP_UPHY_RX_CONTROL_0_0);
/* Step8 RX_CDR_RESET */
val = osi_readla(osi_core, (nveu8_t *)osi_core->xpcs_base +
XPCS_WRAP_UPHY_RX_CONTROL_0_0);
val &= ~(XPCS_WRAP_UPHY_RX_CONTROL_0_0_RX_CDR_RESET);
osi_writela(osi_core, val, (nveu8_t *)osi_core->xpcs_base +
XPCS_WRAP_UPHY_RX_CONTROL_0_0);
val = osi_readla(osi_core, (nveu8_t *)osi_core->xpcs_base +
XPCS_WRAP_UPHY_RX_CONTROL_0_0);
/* Step9 RX_PCS_PHY_RDY */
val |= XPCS_WRAP_UPHY_RX_CONTROL_0_0_RX_PCS_PHY_RDY;
osi_writela(osi_core, val, (nveu8_t *)osi_core->xpcs_base +
XPCS_WRAP_UPHY_RX_CONTROL_0_0);
if (xpcs_check_pcs_lock_status(osi_core) < 0) {
OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL,
"Failed to get PCS block lock\n", 0ULL);
return -1;
}
return 0;
}
/**
* @brief xpcs_init - XPCS initialization
*
* Algorithm: This routine initialize XPCS in USXMII mode.
*
* @param[in] osi_core: OSI core data structure.
*
* @retval 0 on success
* @retval -1 on failure.
*/
int xpcs_init(struct osi_core_priv_data *osi_core)
{
void *xpcs_base = osi_core->xpcs_base;
unsigned int retry = 1000;
unsigned int count;
unsigned int ctrl = 0;
int cond = 1;
int ret = 0;
if (osi_core->xpcs_base == OSI_NULL) {
OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL,
"XPCS base is NULL", 0ULL);
/* TODO: Remove this once silicon arrives */
return 0;
}
if (osi_core->pre_si != OSI_ENABLE) {
if (xpcs_lane_bring_up(osi_core) < 0) {
OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL,
"TX/RX lane bring-up failed\n", 0ULL);
return -1;
}
}
/* Switching to USXGMII Mode based on
* XPCS programming guideline 7.6
*/
/* 1. switch DWC_xpcs to BASE-R mode */
ctrl = xpcs_read(xpcs_base, XPCS_SR_XS_PCS_CTRL2);
ctrl |= XPCS_SR_XS_PCS_CTRL2_PCS_TYPE_SEL_BASE_R;
ret = xpcs_write_safety(osi_core, XPCS_SR_XS_PCS_CTRL2, ctrl);
if (ret != 0) {
return ret;
}
/* 2. enable USXGMII Mode inside DWC_xpcs */
/* 3. USXG_MODE = 10G - default it will be 10G mode */
if ((osi_core->phy_iface_mode == OSI_USXGMII_MODE_10G) ||
(osi_core->phy_iface_mode == OSI_USXGMII_MODE_5G)) {
ctrl = xpcs_read(xpcs_base, XPCS_VR_XS_PCS_KR_CTRL);
ctrl &= ~(XPCS_VR_XS_PCS_KR_CTRL_USXG_MODE_MASK);
if (osi_core->uphy_gbe_mode == OSI_DISABLE) {
ctrl |= XPCS_VR_XS_PCS_KR_CTRL_USXG_MODE_5G;
}
}
ret = xpcs_write_safety(osi_core, XPCS_VR_XS_PCS_KR_CTRL, ctrl);
if (ret != 0) {
return ret;
}
/* 4. Program PHY to operate at 10Gbps/5Gbps/2Gbps
* this step not required since PHY speed programming
* already done as part of phy INIT
*/
/* 5. Vendor specific software reset */
ctrl = xpcs_read(xpcs_base, XPCS_VR_XS_PCS_DIG_CTRL1);
ctrl |= XPCS_VR_XS_PCS_DIG_CTRL1_USXG_EN;
ret = xpcs_write_safety(osi_core, XPCS_VR_XS_PCS_DIG_CTRL1, ctrl);
if (ret != 0) {
return ret;
}
/* XPCS_VR_XS_PCS_DIG_CTRL1_VR_RST bit is self clearing
* value readback varification is not needed
*/
ctrl |= XPCS_VR_XS_PCS_DIG_CTRL1_VR_RST;
xpcs_write(xpcs_base, XPCS_VR_XS_PCS_DIG_CTRL1, ctrl);
/* 6. Programming for Synopsys PHY - NA */
/* 7. poll until vendor specific software reset */
cond = 1;
count = 0;
while (cond == 1) {
if (count > retry) {
return -1;
}
count++;
ctrl = xpcs_read(xpcs_base, XPCS_VR_XS_PCS_DIG_CTRL1);
if ((ctrl & XPCS_VR_XS_PCS_DIG_CTRL1_VR_RST) == 0U) {
cond = 0;
} else {
osi_core->osd_ops.udelay(1000U);
}
}
/* 8. Backplane Ethernet PCS configurations
* clear AN_EN in SR_AN_CTRL
* set CL37_BP in VR_XS_PCS_DIG_CTRL1
*/
if ((osi_core->phy_iface_mode == OSI_USXGMII_MODE_10G) ||
(osi_core->phy_iface_mode == OSI_USXGMII_MODE_5G)) {
ctrl = xpcs_read(xpcs_base, XPCS_SR_AN_CTRL);
ctrl &= ~XPCS_SR_AN_CTRL_AN_EN;
ret = xpcs_write_safety(osi_core, XPCS_SR_AN_CTRL, ctrl);
if (ret != 0) {
return ret;
}
ctrl = xpcs_read(xpcs_base, XPCS_VR_XS_PCS_DIG_CTRL1);
ctrl |= XPCS_VR_XS_PCS_DIG_CTRL1_CL37_BP;
ret = xpcs_write_safety(osi_core, XPCS_VR_XS_PCS_DIG_CTRL1, ctrl);
if (ret != 0) {
return ret;
}
}
/* TODO: 9. MII_AN_INTR_EN to 1, to enable auto-negotiation
* complete interrupt */
/* 10. (Optional step) Duration of link timer change */
/* 11. XPCS configured as MAC-side USGMII - NA */
/* 13. TODO: If there is interrupt enabled for AN interrupt */
return 0;
}
/**
* @brief xpcs_eee - XPCS enable/disable EEE
*
* Algorithm: This routine update register related to EEE
* for XPCS.
*
* @param[in] osi_core: OSI core data structure.
* @param[in] en_dis: enable - 1 or disable - 0
*
* @retval 0 on success
* @retval -1 on failure.
*/
int xpcs_eee(struct osi_core_priv_data *osi_core, unsigned int en_dis)
{
void *xpcs_base = osi_core->xpcs_base;
unsigned int val = 0x0U;
int ret = 0;
if (en_dis != OSI_ENABLE && en_dis != OSI_DISABLE) {
return -1;
}
if (xpcs_base == OSI_NULL)
return -1;
if (en_dis == OSI_DISABLE) {
val = xpcs_read(xpcs_base, XPCS_VR_XS_PCS_EEE_MCTRL0);
val &= ~XPCS_VR_XS_PCS_EEE_MCTRL0_LTX_EN;
val &= ~XPCS_VR_XS_PCS_EEE_MCTRL0_LRX_EN;
ret = xpcs_write_safety(osi_core, XPCS_VR_XS_PCS_EEE_MCTRL0, val);
if (ret != 0) {
return ret;
}
return 0;
}
/* 1. Check if DWC_xpcs supports the EEE feature by
* reading the SR_XS_PCS_EEE_ABL register
* 1000BASEX-Only is different config then else so can (skip) */
/* 2. Program various timers used in the EEE mode depending on the
* clk_eee_i clock frequency. default times are same as IEEE std
* clk_eee_i() is 102MHz. MULT_FACT_100NS = 9 because 9.8ns*10 = 98
* which is between 80 and 120 this leads to default setting match */
val = xpcs_read(xpcs_base, XPCS_VR_XS_PCS_EEE_MCTRL0);
/* 3. If FEC is enabled in the KR mode (skip in FPGA)*/
/* 4. enable the EEE feature on the Tx path and Rx path */
val |= (XPCS_VR_XS_PCS_EEE_MCTRL0_LTX_EN |
XPCS_VR_XS_PCS_EEE_MCTRL0_LRX_EN);
ret = xpcs_write_safety(osi_core, XPCS_VR_XS_PCS_EEE_MCTRL0, val);
if (ret != 0) {
return ret;
}
/* Transparent Tx LPI Mode Enable */
val = xpcs_read(xpcs_base, XPCS_VR_XS_PCS_EEE_MCTRL1);
val |= XPCS_VR_XS_PCS_EEE_MCTRL1_TRN_LPI;
ret = xpcs_write_safety(osi_core, XPCS_VR_XS_PCS_EEE_MCTRL1, val);
if (ret != 0) {
return ret;
}
return 0;
}

View File

@@ -1,203 +0,0 @@
/*
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef XPCS_H_
#define XPCS_H_
#include "../osi/common/common.h"
#include <osi_core.h>
/**
* @addtogroup XPCS helper macros
*
* @brief XPCS helper macros.
* @{
*/
#define XPCS_RETRY_COUNT (RETRY_COUNT * (2U))
/** @} */
/**
* @addtogroup XPCS Register offsets
*
* @brief XPCS register offsets
* @{
*/
#define XPCS_ADDRESS 0x03FC
#define XPCS_SR_XS_PCS_CTRL1 0xC0000
#define XPCS_SR_XS_PCS_STS1 0xC0004
#define XPCS_SR_XS_PCS_CTRL2 0xC001C
#define XPCS_SR_XS_PCS_EEE_ABL 0xC0050
#define XPCS_SR_XS_PCS_EEE_ABL2 0xC0054
#define XPCS_VR_XS_PCS_DIG_CTRL1 0xE0000
#define XPCS_VR_XS_PCS_KR_CTRL 0xE001C
#define XPCS_SR_AN_CTRL 0x1C0000
#define XPCS_SR_MII_CTRL 0x7C0000
#define XPCS_VR_MII_AN_INTR_STS 0x7E0008
#define XPCS_VR_XS_PCS_EEE_MCTRL0 0xE0018
#define XPCS_VR_XS_PCS_EEE_MCTRL1 0xE002C
#define XPCS_WRAP_UPHY_HW_INIT_CTRL 0x8020
#define XPCS_WRAP_UPHY_STATUS 0x8044
#define XPCS_WRAP_IRQ_STATUS 0x8050
#define XPCS_WRAP_UPHY_RX_CONTROL_0_0 0x801C
/** @} */
/**
* @addtogroup XPCS-BIT Register bit fileds
*
* @brief XPCS register bit fields
* @{
*/
#define XPCS_SR_XS_PCS_CTRL1_RST OSI_BIT(15)
#define XPCS_SR_XS_PCS_CTRL2_PCS_TYPE_SEL_BASE_R 0x0U
#define XPCS_SR_XS_PCS_STS1_RLU OSI_BIT(2)
#define XPCS_VR_XS_PCS_DIG_CTRL1_USXG_EN OSI_BIT(9)
#define XPCS_VR_XS_PCS_DIG_CTRL1_VR_RST OSI_BIT(15)
#define XPCS_VR_XS_PCS_DIG_CTRL1_USRA_RST OSI_BIT(10)
#define XPCS_VR_XS_PCS_DIG_CTRL1_CL37_BP OSI_BIT(12)
#define XPCS_VR_XS_PCS_EEE_MCTRL1_TRN_LPI OSI_BIT(0)
#define XPCS_VR_XS_PCS_EEE_MCTRL0_LTX_EN OSI_BIT(0)
#define XPCS_VR_XS_PCS_EEE_MCTRL0_LRX_EN OSI_BIT(1)
#define XPCS_SR_AN_CTRL_AN_EN OSI_BIT(12)
#define XPCS_SR_MII_CTRL_AN_ENABLE OSI_BIT(12)
#define XPCS_VR_MII_AN_INTR_STS_CL37_ANCMPLT_INTR OSI_BIT(0)
#define XPCS_SR_MII_CTRL_SS5 OSI_BIT(5)
#define XPCS_SR_MII_CTRL_SS6 OSI_BIT(6)
#define XPCS_SR_MII_CTRL_SS13 OSI_BIT(13)
#define XPCS_USXG_AN_STS_SPEED_MASK 0x1C00U
#define XPCS_USXG_AN_STS_SPEED_2500 0x1000U
#define XPCS_USXG_AN_STS_SPEED_5000 0x1400U
#define XPCS_USXG_AN_STS_SPEED_10000 0xC00U
#define XPCS_REG_ADDR_SHIFT 10U
#define XPCS_REG_ADDR_MASK 0x1FFFU
#define XPCS_REG_VALUE_MASK 0x3FFU
#define XPCS_VR_XS_PCS_KR_CTRL_USXG_MODE_MASK (OSI_BIT(12) | \
OSI_BIT(11) | \
OSI_BIT(10))
#define XPCS_VR_XS_PCS_KR_CTRL_USXG_MODE_5G OSI_BIT(10)
#define XPCS_WRAP_UPHY_HW_INIT_CTRL_TX_EN OSI_BIT(0)
#define XPCS_WRAP_UPHY_HW_INIT_CTRL_RX_EN OSI_BIT(2)
#define XPCS_WRAP_IRQ_STATUS_PCS_LINK_STS OSI_BIT(6)
#define XPCS_WRAP_UPHY_RX_CONTROL_0_0_RX_DATA_EN OSI_BIT(0)
#define XPCS_WRAP_UPHY_RX_CONTROL_0_0_RX_IDDQ OSI_BIT(4)
#define XPCS_WRAP_UPHY_RX_CONTROL_0_0_AUX_RX_IDDQ OSI_BIT(5)
#define XPCS_WRAP_UPHY_RX_CONTROL_0_0_RX_SLEEP (OSI_BIT(6) | \
OSI_BIT(7))
#define XPCS_WRAP_UPHY_RX_CONTROL_0_0_RX_CAL_EN OSI_BIT(8)
#define XPCS_WRAP_UPHY_RX_CONTROL_0_0_RX_CDR_RESET OSI_BIT(9)
#define XPCS_WRAP_UPHY_RX_CONTROL_0_0_RX_PCS_PHY_RDY OSI_BIT(10)
#define XPCS_WRAP_UPHY_RX_CONTROL_0_0_RX_SW_OVRD OSI_BIT(31)
#define XPCS_WRAP_UPHY_STATUS_TX_P_UP_STATUS OSI_BIT(0)
#ifdef HSI_SUPPORT
#define XPCS_WRAP_INTERRUPT_CONTROL 0x8048
#define XPCS_WRAP_INTERRUPT_STATUS 0x8050
#define XPCS_CORE_CORRECTABLE_ERR OSI_BIT(10)
#define XPCS_CORE_UNCORRECTABLE_ERR OSI_BIT(9)
#define XPCS_REGISTER_PARITY_ERR OSI_BIT(8)
#define XPCS_BASE_PMA_MMD_SR_PMA_KR_FEC_CTRL 0x402AC
#define EN_ERR_IND OSI_BIT(1)
#define FEC_EN OSI_BIT(0)
#define XPCS_VR_XS_PCS_SFTY_UE_INTR0 0xE03C0
#define XPCS_VR_XS_PCS_SFTY_CE_INTR 0xE03C8
#define XPCS_VR_XS_PCS_SFTY_TMR_CTRL 0xE03D4
#define XPCS_SFTY_1US_MULT_MASK 0xFF
#define XPCS_SFTY_1US_MULT_SHIFT 0U
#endif
/** @} */
int xpcs_init(struct osi_core_priv_data *osi_core);
int xpcs_start(struct osi_core_priv_data *osi_core);
int xpcs_eee(struct osi_core_priv_data *osi_core, unsigned int en_dis);
/**
* @brief xpcs_read - read from xpcs.
*
* Algorithm: This routine reads data from XPCS register.
*
* @param[in] xpcs_base: XPCS virtual base address
* @param[in] reg_addr: register address to be read
*
* @retval value read from xpcs register.
*/
static inline unsigned int xpcs_read(void *xpcs_base, unsigned int reg_addr)
{
osi_writel(((reg_addr >> XPCS_REG_ADDR_SHIFT) & XPCS_REG_ADDR_MASK),
((unsigned char *)xpcs_base + XPCS_ADDRESS));
return osi_readl((unsigned char *)xpcs_base +
((reg_addr) & XPCS_REG_VALUE_MASK));
}
/**
* @brief xpcs_write - write to xpcs.
*
* Algorithm: This routine writes data to XPCS register.
*
* @param[in] xpcs_base: XPCS virtual base address
* @param[in] reg_addr: register address for writing
* @param[in] val: write value to register address
*/
static inline void xpcs_write(void *xpcs_base, unsigned int reg_addr,
unsigned int val)
{
osi_writel(((reg_addr >> XPCS_REG_ADDR_SHIFT) & XPCS_REG_ADDR_MASK),
((unsigned char *)xpcs_base + XPCS_ADDRESS));
osi_writel(val, (unsigned char *)xpcs_base +
(((reg_addr) & XPCS_REG_VALUE_MASK)));
}
/**
* @brief xpcs_write_safety - write to xpcs.
*
* Algorithm: This routine writes data to XPCS register.
* And verifiy by reading back the value
*
* @param[in] osi_core: OSI core data structure
* @param[in] reg_addr: register address for writing
* @param[in] val: write value to register address
*
* @retval 0 on success
* @retval -1 on failure.
*
*/
static inline int xpcs_write_safety(struct osi_core_priv_data *osi_core,
unsigned int reg_addr,
unsigned int val)
{
void *xpcs_base = osi_core->xpcs_base;
unsigned int read_val;
int retry = 10;
while (--retry > 0) {
xpcs_write(xpcs_base, reg_addr, val);
read_val = xpcs_read(xpcs_base, reg_addr);
if (val == read_val) {
return 0;
}
osi_core->osd_ops.udelay(OSI_DELAY_1US);
}
OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL,
"xpcs_write_safety failed", reg_addr);
return -1;
}
#endif

View File

@@ -1,39 +0,0 @@
################################### tell Emacs this is a -*- makefile-gmake -*-
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# libnvethernetcl interface makefile fragment
#
###############################################################################
ifdef NV_INTERFACE_FLAG_SHARED_LIBRARY_SECTION
NV_INTERFACE_NAME := nvethernetcl
NV_INTERFACE_EXPORTS := lib$(NV_INTERFACE_NAME)
NV_INTERFACE_PUBLIC_INCLUDES := \
./include
endif
# Local Variables:
# indent-tabs-mode: t
# tab-width: 8
# End:
# vi: set tabstop=8 noexpandtab:

View File

@@ -1,61 +0,0 @@
################################### tell Emacs this is a -*- makefile-gmake -*-
#
# Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
###############################################################################
ifdef NV_COMPONENT_FLAG_SHARED_LIBRARY_SECTION
include $(NV_BUILD_START_COMPONENT)
NV_COMPONENT_STRICT_WARNINGS_qnx_64 := 1
NV_COMPONENT_NAME := nvethernetcl
NV_COMPONENT_OWN_INTERFACE_DIR := .
NV_COMPONENT_SOURCES := \
eqos_dma.c \
osi_dma.c \
osi_dma_txrx.c \
mgbe_dma.c \
eqos_desc.c \
mgbe_desc.c \
debug.c \
$(NV_SOURCE)/nvethernetrm/osi/common/osi_common.c \
$(NV_SOURCE)/nvethernetrm/osi/common/eqos_common.c \
$(NV_SOURCE)/nvethernetrm/osi/common/mgbe_common.c
NV_COMPONENT_INCLUDES := \
$(NV_SOURCE)/nvethernetrm/include \
$(NV_SOURCE)/nvethernetrm/osi/common/include
ifeq ($(NV_BUILD_CONFIGURATION_IS_SAFETY),0)
NV_COMPONENT_CFLAGS += -DOSI_DEBUG
endif
include $(NV_BUILD_SHARED_LIBRARY)
endif
# Local Variables:
# indent-tabs-mode: t
# tab-width: 8
# End:
# vi: set tabstop=8 noexpandtab:

View File

@@ -1,256 +0,0 @@
/*
* Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifdef OSI_DEBUG
#include "debug.h"
/**
* @brief dump_struct - Dumps a given structure.
*
* @param[in] osi_dma: OSI DMA private data structure.
* @param[in] ptr: Pointer to structure.
* @param[in] size: Size of structure to dump.
*
*/
static void dump_struct(struct osi_dma_priv_data *osi_dma,
unsigned char *ptr,
unsigned long size)
{
nveu32_t i = 0, rem, j;
unsigned long temp;
if (ptr == OSI_NULL) {
osi_dma->osd_ops.printf(osi_dma, OSI_DEBUG_TYPE_STRUCTS,
"Pointer is NULL\n");
return;
}
rem = i % 4;
temp = size - rem;
for (i = 0; i < temp; i += 4) {
osi_dma->osd_ops.printf(osi_dma, OSI_DEBUG_TYPE_STRUCTS,
"%02x%02x%02x%02x", ptr[i], ptr[i + 1],
ptr[i + 2], ptr[i + 3]);
j = i;
}
for (i = j; i < size; i++) {
osi_dma->osd_ops.printf(osi_dma, OSI_DEBUG_TYPE_STRUCTS, "%x",
ptr[i]);
}
}
/**
* @brief structs_dump - Dumps OSI DMA structure.
*
* @param[in] osi_dma: OSI DMA private data structure.
*/
void structs_dump(struct osi_dma_priv_data *osi_dma)
{
struct dma_local *l_dma = (struct dma_local *)osi_dma;
nveu32_t i = 0;
osi_dma->osd_ops.printf(osi_dma, OSI_DEBUG_TYPE_STRUCTS,
"OSI DMA struct size: %lu",
sizeof(struct osi_dma_priv_data));
dump_struct(osi_dma, (unsigned char *)osi_dma,
sizeof(struct osi_dma_priv_data));
osi_dma->osd_ops.printf(osi_dma, OSI_DEBUG_TYPE_STRUCTS,
"OSI DMA Tx/Rx Ring struct sizes: %lu %lu",
sizeof(struct osi_tx_ring),
sizeof(struct osi_rx_ring));
for (i = 0; i < osi_dma->num_dma_chans; i++) {
dump_struct(osi_dma, (unsigned char *)osi_dma->tx_ring[i],
sizeof(struct osi_tx_ring));
dump_struct(osi_dma, (unsigned char *)osi_dma->rx_ring[i],
sizeof(struct osi_rx_ring));
}
osi_dma->osd_ops.printf(osi_dma, OSI_DEBUG_TYPE_STRUCTS,
"OSD DMA ops struct size: %lu",
sizeof(struct osd_dma_ops));
dump_struct(osi_dma, (unsigned char *)(&osi_dma->osd_ops),
sizeof(struct osd_dma_ops));
osi_dma->osd_ops.printf(osi_dma, OSI_DEBUG_TYPE_STRUCTS,
"OSI local DMA struct size: %lu",
sizeof(struct dma_local));
dump_struct(osi_dma, (unsigned char *)l_dma,
sizeof(struct dma_local));
osi_dma->osd_ops.printf(osi_dma, OSI_DEBUG_TYPE_STRUCTS,
"OSI local ops DMA struct size: %lu",
sizeof(struct dma_chan_ops));
dump_struct(osi_dma, (unsigned char *)l_dma->ops_p,
sizeof(struct dma_chan_ops));
}
/**
* @brief reg_dump - Dumps MAC DMA registers
*
* @param[in] osi_dma: OSI DMA private data structure.
*/
void reg_dump(struct osi_dma_priv_data *osi_dma)
{
struct dma_local *l_dma = (struct dma_local *)osi_dma;
unsigned int max_addr;
unsigned int addr;
unsigned int reg_val;
switch (l_dma->mac_ver) {
case OSI_EQOS_MAC_5_00:
addr = 0x1100;
max_addr = 0x12E4;
break;
case OSI_EQOS_MAC_5_30:
addr = 0x116C;
max_addr = 0x14EC;
break;
case OSI_MGBE_MAC_3_10:
case OSI_MGBE_MAC_4_00:
addr = 0x3100;
max_addr = 0x35FC;
break;
default:
return;
}
while (1) {
if (addr > max_addr)
break;
reg_val = osi_readl((nveu8_t *)osi_dma->base + addr);
osi_dma->osd_ops.printf(osi_dma, OSI_DEBUG_TYPE_REG,
"%x: %x\n", addr, reg_val);
addr += 4;
}
}
/**
* @brief rx_desc_dump - Function to dump Rx descriptors
*
* @param[in] osi_dma: OSI DMA private data structure.
* @param[in] idx: Index to be dumped in Rx ring.
* @param[in] chan: DMA channel number
*/
static void rx_desc_dump(struct osi_dma_priv_data *osi_dma, unsigned int idx,
unsigned int chan)
{
struct osi_rx_ring *rx_ring = osi_dma->rx_ring[chan];
struct osi_rx_desc *rx_desc = rx_ring->rx_desc + idx;
struct osd_dma_ops *ops = &osi_dma->osd_ops;
ops->printf(osi_dma, OSI_DEBUG_TYPE_DESC,
"N [%02d %4p %04d %lx R_D] = %#x:%#x:%#x:%#x\n",
chan, rx_desc, idx,
(rx_ring->rx_desc_phy_addr + (idx * sizeof(struct osi_rx_desc))),
rx_desc->rdes3, rx_desc->rdes2,
rx_desc->rdes1, rx_desc->rdes0);
}
/**
* @brief tx_desc_dump - Function to dump Tx descriptors
*
* @param[in] osi_dma: OSI DMA private data structure.
* @param[in] f_idx: First index to be dumped in Tx ring.
* @param[in] l_idx: Last index to be dumped in Tx ring.
* @param[in] tx: Represents whether packet queued for tx done.
* @param[in] chan: DMA channel number
*/
static void tx_desc_dump(struct osi_dma_priv_data *osi_dma, unsigned int f_idx,
unsigned int l_idx, unsigned int tx,
unsigned int chan)
{
struct osi_tx_ring *tx_ring = osi_dma->tx_ring[chan];
struct osi_tx_desc *tx_desc = OSI_NULL;
struct osd_dma_ops *ops = &osi_dma->osd_ops;
unsigned int ctxt = 0, i = 0;
if (f_idx == l_idx) {
tx_desc = tx_ring->tx_desc + f_idx;
ctxt = tx_desc->tdes3 & TDES3_CTXT;
ops->printf(osi_dma, OSI_DEBUG_TYPE_DESC,
"%s [%02d %4p %04d %lx %s] = %#x:%#x:%#x:%#x\n",
(ctxt == TDES3_CTXT) ? "C" : "N",
chan, tx_desc, f_idx,
(tx_ring->tx_desc_phy_addr + (f_idx * sizeof(struct osi_tx_desc))),
(tx == TX_DESC_DUMP_TX) ? "T_Q" : "T_D",
tx_desc->tdes3, tx_desc->tdes2,
tx_desc->tdes1, tx_desc->tdes0);
} else {
int cnt;
if (f_idx > l_idx) {
cnt = l_idx + osi_dma->tx_ring_sz - f_idx;
} else {
cnt = l_idx - f_idx;
}
for (i = f_idx; cnt >= 0; cnt--) {
tx_desc = tx_ring->tx_desc + i;
ctxt = tx_desc->tdes3 & TDES3_CTXT;
ops->printf(osi_dma, OSI_DEBUG_TYPE_DESC,
"%s [%02d %4p %04d %lx %s] = %#x:%#x:%#x:%#x\n",
(ctxt == TDES3_CTXT) ? "C" : "N",
chan, tx_desc, i,
(tx_ring->tx_desc_phy_addr + (i * sizeof(struct osi_tx_desc))),
(tx == TX_DESC_DUMP_TX) ? "T_Q" : "T_D",
tx_desc->tdes3, tx_desc->tdes2,
tx_desc->tdes1, tx_desc->tdes0);
INCR_TX_DESC_INDEX(i, osi_dma->tx_ring_sz);
}
}
}
/**
* @brief desc_dump - Function to dump Tx/Rx descriptors
*
* @param[in] osi_dma: OSI DMA private data structure.
* @param[in] f_idx: First index to be dumped in Tx/Rx ring.
* @param[in] l_idx: Last index to be dumped in Tx/Rx ring.
* @param[in] flag: Flags to indicate Tx/Tx done or Rx.
* @param[in] chan: DMA channel number
*
*/
void desc_dump(struct osi_dma_priv_data *osi_dma, unsigned int f_idx,
unsigned int l_idx, unsigned int flag, unsigned int chan)
{
switch (flag & TXRX_DESC_DUMP_MASK) {
case TX_DESC_DUMP:
tx_desc_dump(osi_dma, f_idx, l_idx,
(flag & TX_DESC_DUMP_MASK), chan);
break;
case RX_DESC_DUMP:
rx_desc_dump(osi_dma, f_idx, chan);
break;
default:
break;
}
}
#endif /* OSI_DEBUG */

View File

@@ -1,50 +0,0 @@
/*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef INCLUDED_DMA_DEBUG_H
#define INCLUDED_DMA_DEBUG_H
#include <osi_common.h>
#include <osi_dma.h>
#include "hw_desc.h"
#include "../osi/common/common.h"
#include "dma_local.h"
/**
* @addtogroup DESC-DUMP helper macros.
*
* @brief Helper macros used for debugging.
* @{
*/
#define TX_DESC_DUMP OSI_BIT(0)
#define RX_DESC_DUMP OSI_BIT(1)
#define TXRX_DESC_DUMP_MASK (OSI_BIT(0) | OSI_BIT(1))
#define TX_DESC_DUMP_TX OSI_BIT(2)
#define TX_DESC_DUMP_TX_DONE OSI_BIT(3)
#define TX_DESC_DUMP_MASK (OSI_BIT(2) | OSI_BIT(3))
/** @} */
void desc_dump(struct osi_dma_priv_data *osi_dma, unsigned int f_idx,
unsigned int l_idx, unsigned int flag, unsigned int chan);
void reg_dump(struct osi_dma_priv_data *osi_dma);
void structs_dump(struct osi_dma_priv_data *osi_dma);
#endif /* INCLUDED_DMA_DEBUG_H*/

View File

@@ -1,272 +0,0 @@
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef INCLUDED_DMA_LOCAL_H
#define INCLUDED_DMA_LOCAL_H
#include <osi_dma.h>
#include "eqos_dma.h"
/**
* @brief Maximum number of OSI DMA instances.
*/
#ifndef MAX_DMA_INSTANCES
#define MAX_DMA_INSTANCES 10U
#endif
/**
* @brief Default DMA Tx/Rx ring sizes for EQOS/MGBE.
*/
#define EQOS_DEFAULT_RING_SZ 1024U
#define MGBE_DEFAULT_RING_SZ 4096U
#define MGBE_MAX_RING_SZ 16384U
#define HW_MIN_RING_SZ 4U
/**
* @brief MAC DMA Channel operations
*/
struct dma_chan_ops {
/** Called to set Transmit Ring length */
void (*set_tx_ring_len)(struct osi_dma_priv_data *osi_dma,
nveu32_t chan,
nveu32_t len);
/** Called to set Transmit Ring Base address */
void (*set_tx_ring_start_addr)(void *addr, nveu32_t chan,
nveu64_t base_addr);
/** Called to update Tx Ring tail pointer */
void (*update_tx_tailptr)(void *addr, nveu32_t chan,
nveu64_t tailptr);
/** Called to set Receive channel ring length */
void (*set_rx_ring_len)(struct osi_dma_priv_data *osi_dma,
nveu32_t chan,
nveu32_t len);
/** Called to set receive channel ring base address */
void (*set_rx_ring_start_addr)(void *addr, nveu32_t chan,
nveu64_t base_addr);
/** Called to update Rx ring tail pointer */
void (*update_rx_tailptr)(void *addr, nveu32_t chan,
nveu64_t tailptr);
/** Called to disable DMA Tx channel interrupts at wrapper level */
void (*disable_chan_tx_intr)(void *addr, nveu32_t chan);
/** Called to enable DMA Tx channel interrupts at wrapper level */
void (*enable_chan_tx_intr)(void *addr, nveu32_t chan);
/** Called to disable DMA Rx channel interrupts at wrapper level */
void (*disable_chan_rx_intr)(void *addr, nveu32_t chan);
/** Called to enable DMA Rx channel interrupts at wrapper level */
void (*enable_chan_rx_intr)(void *addr, nveu32_t chan);
/** Called to start the Tx/Rx DMA */
void (*start_dma)(struct osi_dma_priv_data *osi_dma, nveu32_t chan);
/** Called to stop the Tx/Rx DMA */
void (*stop_dma)(struct osi_dma_priv_data *osi_dma, nveu32_t chan);
/** Called to initialize the DMA channel */
nve32_t (*init_dma_channel)(struct osi_dma_priv_data *osi_dma);
/** Called to set Rx buffer length */
void (*set_rx_buf_len)(struct osi_dma_priv_data *osi_dma);
#ifndef OSI_STRIPPED_LIB
/** Called periodically to read and validate safety critical
* registers against last written value */
nve32_t (*validate_regs)(struct osi_dma_priv_data *osi_dma);
/** Called to configure the DMA channel slot function */
void (*config_slot)(struct osi_dma_priv_data *osi_dma,
nveu32_t chan,
nveu32_t set,
nveu32_t interval);
#endif /* !OSI_STRIPPED_LIB */
/** Called to clear VM Tx interrupt */
void (*clear_vm_tx_intr)(void *addr, nveu32_t chan);
/** Called to clear VM Rx interrupt */
void (*clear_vm_rx_intr)(void *addr, nveu32_t chan);
};
/**
* @brief DMA descriptor operations
*/
struct desc_ops {
/** Called to get receive checksum */
void (*get_rx_csum)(struct osi_rx_desc *rx_desc,
struct osi_rx_pkt_cx *rx_pkt_cx);
/** Called to get rx error stats */
void (*update_rx_err_stats)(struct osi_rx_desc *rx_desc,
struct osi_pkt_err_stats *stats);
/** Called to get rx VLAN from descriptor */
void (*get_rx_vlan)(struct osi_rx_desc *rx_desc,
struct osi_rx_pkt_cx *rx_pkt_cx);
/** Called to get rx HASH from descriptor */
void (*get_rx_hash)(struct osi_rx_desc *rx_desc,
struct osi_rx_pkt_cx *rx_pkt_cx);
/** Called to get RX hw timestamp */
int (*get_rx_hwstamp)(struct osi_dma_priv_data *osi_dma,
struct osi_rx_desc *rx_desc,
struct osi_rx_desc *context_desc,
struct osi_rx_pkt_cx *rx_pkt_cx);
};
/**
* @brief OSI DMA private data.
*/
struct dma_local {
/** OSI DMA data variable */
struct osi_dma_priv_data osi_dma;
/** DMA channel operations */
struct dma_chan_ops *ops_p;
/**
* PacketID for PTP TS.
* MSB 4-bits of channel number and LSB 6-bits of local
* index(PKT_ID_CNT).
*/
nveu32_t pkt_id;
/** Flag to represent OSI DMA software init done */
nveu32_t init_done;
/** Holds the MAC version of MAC controller */
nveu32_t mac_ver;
/** Represents whether DMA interrupts are VM or Non-VM */
nveu32_t vm_intr;
/** Magic number to validate osi_dma pointer */
nveu64_t magic_num;
/** Maximum number of DMA channels */
nveu32_t max_chans;
};
/**
* @brief eqos_init_dma_chan_ops - Initialize eqos DMA operations.
*
* @param[in] ops: DMA channel operations pointer.
*
* @note
* API Group:
* - Initialization: Yes
* - Run time: No
* - De-initialization: No
*/
void eqos_init_dma_chan_ops(struct dma_chan_ops *ops);
/**
* @brief mgbe_init_dma_chan_ops - Initialize MGBE DMA operations.
*
* @param[in] ops: DMA channel operations pointer.
*
* @note
* API Group:
* - Initialization: Yes
* - Run time: No
* - De-initialization: No
*/
void mgbe_init_dma_chan_ops(struct dma_chan_ops *ops);
/**
* @brief eqos_get_desc_ops - EQOS init DMA descriptor operations
*/
void eqos_init_desc_ops(struct desc_ops *d_ops);
/**
* @brief mgbe_get_desc_ops - MGBE init DMA descriptor operations
*/
void mgbe_init_desc_ops(struct desc_ops *d_ops);
nve32_t init_desc_ops(struct osi_dma_priv_data *osi_dma);
/**
* @brief osi_hw_transmit - Initialize Tx DMA descriptors for a channel
*
* @note
* Algorithm:
* - Initialize Transmit descriptors with DMA mappable buffers,
* set OWN bit, Tx ring length and set starting address of Tx DMA channel
* Tx ring base address in Tx DMA registers.
*
* @param[in, out] osi_dma: OSI DMA private data.
* @param[in] tx_ring: DMA Tx ring.
* @param[in] ops: DMA channel operations.
* @param[in] chan: DMA Tx channel number. Max OSI_EQOS_MAX_NUM_CHANS.
*
* @note
* API Group:
* - Initialization: No
* - Run time: Yes
* - De-initialization: No
*/
nve32_t hw_transmit(struct osi_dma_priv_data *osi_dma,
struct osi_tx_ring *tx_ring,
struct dma_chan_ops *ops,
nveu32_t chan);
/* Function prototype needed for misra */
/**
* @brief dma_desc_init - Initialize DMA Tx/Rx descriptors
*
* @note
* Algorithm:
* - Transmit and Receive descriptors will be initialized with
* required values so that MAC DMA can understand and act accordingly.
*
* @param[in, out] osi_dma: OSI DMA private data structure.
* @param[in] ops: DMA channel operations.
*
* @note
* API Group:
* - Initialization: Yes
* - Run time: No
* - De-initialization: No
*
* @retval 0 on success
* @retval -1 on failure.
*/
nve32_t dma_desc_init(struct osi_dma_priv_data *osi_dma,
struct dma_chan_ops *ops);
static inline nveu32_t is_power_of_two(nveu32_t num)
{
if ((num > 0U) && ((num & (num - 1U)) == 0U)) {
return OSI_ENABLE;
}
return OSI_DISABLE;
}
/**
* @addtogroup Helper Helper MACROS
*
* @brief EQOS generic helper MACROS.
* @{
*/
#define CHECK_CHAN_BOUND(chan) \
{ \
if ((chan) >= OSI_EQOS_MAX_NUM_CHANS) { \
return; \
} \
}
#define MGBE_CHECK_CHAN_BOUND(chan) \
{ \
if ((chan) >= OSI_MGBE_MAX_NUM_CHANS) { \
return; \
} \
} \
#define BOOLEAN_FALSE (0U != 0U)
#define L32(data) ((nveu32_t)((data) & 0xFFFFFFFFU))
#define H32(data) ((nveu32_t)(((data) & 0xFFFFFFFF00000000UL) >> 32UL))
/** @} */
#endif /* INCLUDED_DMA_LOCAL_H */

View File

@@ -1,245 +0,0 @@
/*
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "dma_local.h"
#include "hw_desc.h"
/**
* @brief eqos_get_rx_vlan - Get Rx VLAN from descriptor
*
* Algorithm:
* 1) Check if the descriptor has any type set.
* 2) If set, set a per packet context flag indicating packet is VLAN
* tagged.
* 3) Extract VLAN tag ID from the descriptor
*
* @param[in] rx_desc: Rx descriptor
* @param[in] rx_pkt_cx: Per-Rx packet context structure
*/
static inline void eqos_get_rx_vlan(struct osi_rx_desc *rx_desc,
struct osi_rx_pkt_cx *rx_pkt_cx)
{
unsigned int lt;
/* Check for Receive Status rdes0 */
if ((rx_desc->rdes3 & RDES3_RS0V) == RDES3_RS0V) {
/* get length or type */
lt = rx_desc->rdes3 & RDES3_LT;
if (lt == RDES3_LT_VT || lt == RDES3_LT_DVT) {
rx_pkt_cx->flags |= OSI_PKT_CX_VLAN;
rx_pkt_cx->vlan_tag = rx_desc->rdes0 & RDES0_OVT;
}
}
}
/**
* @brief eqos_update_rx_err_stats - Detect Errors from Rx Descriptor
*
* Algorithm: This routine will be invoked by OSI layer itself which
* checks for the Last Descriptor and updates the receive status errors
* accordingly.
*
* @param[in] rx_desc: Rx Descriptor.
* @param[in] pkt_err_stats: Packet error stats which stores the errors reported
*/
static inline void eqos_update_rx_err_stats(struct osi_rx_desc *rx_desc,
struct osi_pkt_err_stats *stats)
{
/* increment rx crc if we see CE bit set */
if ((rx_desc->rdes3 & RDES3_ERR_CRC) == RDES3_ERR_CRC) {
stats->rx_crc_error =
osi_update_stats_counter(stats->rx_crc_error, 1UL);
}
/* increment rx frame error if we see RE bit set */
if ((rx_desc->rdes3 & RDES3_ERR_RE) == RDES3_ERR_RE) {
stats->rx_frame_error =
osi_update_stats_counter(stats->rx_frame_error, 1UL);
}
}
/**
* @brief eqos_get_rx_csum - Get the Rx checksum from descriptor if valid
*
* @note
* Algorithm:
* - Check if the descriptor has any checksum validation errors.
* - If none, set a per packet context flag indicating no err in
* Rx checksum
* - The OSD layer will mark the packet appropriately to skip
* IP/TCP/UDP checksum validation in software based on whether
* COE is enabled for the device.
*
* @note
* API Group:
* - Initialization: No
* - Run time: Yes
* - De-initialization: No
*
* @param[in, out] rx_desc: Rx descriptor
* @param[in, out] rx_pkt_cx: Per-Rx packet context structure
*/
static void eqos_get_rx_csum(struct osi_rx_desc *rx_desc,
struct osi_rx_pkt_cx *rx_pkt_cx)
{
nveu32_t pkt_type;
/* Set rxcsum flags based on RDES1 values. These are required
* for QNX as it requires more granularity.
* Set none/unnecessary bit as well for other OS to check and
* take proper actions.
*/
if ((rx_desc->rdes3 & RDES3_RS1V) != RDES3_RS1V) {
return;
}
if ((rx_desc->rdes1 &
(RDES1_IPCE | RDES1_IPCB | RDES1_IPHE)) == OSI_DISABLE) {
rx_pkt_cx->rxcsum |= OSI_CHECKSUM_UNNECESSARY;
}
if ((rx_desc->rdes1 & RDES1_IPCB) != OSI_DISABLE) {
return;
}
rx_pkt_cx->rxcsum |= OSI_CHECKSUM_IPv4;
if ((rx_desc->rdes1 & RDES1_IPHE) == RDES1_IPHE) {
rx_pkt_cx->rxcsum |= OSI_CHECKSUM_IPv4_BAD;
}
pkt_type = rx_desc->rdes1 & RDES1_PT_MASK;
if ((rx_desc->rdes1 & RDES1_IPV4) == RDES1_IPV4) {
if (pkt_type == RDES1_PT_UDP) {
rx_pkt_cx->rxcsum |= OSI_CHECKSUM_UDPv4;
} else if (pkt_type == RDES1_PT_TCP) {
rx_pkt_cx->rxcsum |= OSI_CHECKSUM_TCPv4;
} else {
/* Do nothing */
}
} else if ((rx_desc->rdes1 & RDES1_IPV6) == RDES1_IPV6) {
if (pkt_type == RDES1_PT_UDP) {
rx_pkt_cx->rxcsum |= OSI_CHECKSUM_UDPv6;
} else if (pkt_type == RDES1_PT_TCP) {
rx_pkt_cx->rxcsum |= OSI_CHECKSUM_TCPv6;
} else {
/* Do nothing */
}
} else {
/* Do nothing */
}
if ((rx_desc->rdes1 & RDES1_IPCE) == RDES1_IPCE) {
rx_pkt_cx->rxcsum |= OSI_CHECKSUM_TCP_UDP_BAD;
}
}
/**
* @brief eqos_get_rx_hash - Get Rx packet hash from descriptor if valid
*
* Algorithm: This routine will be invoked by OSI layer itself to get received
* packet Hash from descriptor if RSS hash is valid and it also sets the type
* of RSS hash.
*
* @param[in] rx_desc: Rx Descriptor.
* @param[in] rx_pkt_cx: Per-Rx packet context structure
*/
static void eqos_get_rx_hash(OSI_UNUSED struct osi_rx_desc *rx_desc,
OSI_UNUSED struct osi_rx_pkt_cx *rx_pkt_cx)
{
}
/**
* @brief eqos_get_rx_hwstamp - Get Rx HW Time stamp
*
* Algorithm:
* 1) Check for TS availability.
* 2) call get_tx_tstamp_status if TS is valid or not.
* 3) If yes, set a bit and update nano seconds in rx_pkt_cx so that OSD
* layer can extract the time by checking this bit.
*
* @param[in] rx_desc: Rx descriptor
* @param[in] context_desc: Rx context descriptor
* @param[in] rx_pkt_cx: Rx packet context
*
* @retval -1 if TimeStamp is not available
* @retval 0 if TimeStamp is available.
*/
static int eqos_get_rx_hwstamp(struct osi_dma_priv_data *osi_dma,
struct osi_rx_desc *rx_desc,
struct osi_rx_desc *context_desc,
struct osi_rx_pkt_cx *rx_pkt_cx)
{
int retry;
/* Check for RS1V/TSA/TD valid */
if (((rx_desc->rdes3 & RDES3_RS1V) == RDES3_RS1V) &&
((rx_desc->rdes1 & RDES1_TSA) == RDES1_TSA) &&
((rx_desc->rdes1 & RDES1_TD) == 0U)) {
for (retry = 0; retry < 10; retry++) {
if (((context_desc->rdes3 & RDES3_OWN) == 0U) &&
((context_desc->rdes3 & RDES3_CTXT) ==
RDES3_CTXT)) {
if ((context_desc->rdes0 ==
OSI_INVALID_VALUE) &&
(context_desc->rdes1 ==
OSI_INVALID_VALUE)) {
return -1;
}
/* Update rx pkt context flags to indicate
* PTP */
rx_pkt_cx->flags |= OSI_PKT_CX_PTP;
/* Time Stamp can be read */
break;
} else {
/* TS not available yet, so retrying */
osi_dma->osd_ops.udelay(OSI_DELAY_1US);
}
}
if (retry == 10) {
/* Timed out waiting for Rx timestamp */
return -1;
}
rx_pkt_cx->ns = context_desc->rdes0 +
(OSI_NSEC_PER_SEC * context_desc->rdes1);
if (rx_pkt_cx->ns < context_desc->rdes0) {
/* Will not hit this case */
return -1;
}
} else {
return -1;
}
return 0;
}
void eqos_init_desc_ops(struct desc_ops *d_ops)
{
d_ops->get_rx_csum = eqos_get_rx_csum;
d_ops->update_rx_err_stats = eqos_update_rx_err_stats;
d_ops->get_rx_vlan = eqos_get_rx_vlan;
d_ops->get_rx_hash = eqos_get_rx_hash;
d_ops->get_rx_hwstamp = eqos_get_rx_hwstamp;
}

View File

@@ -1,988 +0,0 @@
/*
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "../osi/common/common.h"
#include "dma_local.h"
#include "eqos_dma.h"
#include "../osi/common/type.h"
/**
* @brief eqos_dma_safety_config - EQOS MAC DMA safety configuration
*/
static struct dma_func_safety eqos_dma_safety_config;
/**
* @brief Write to safety critical register.
*
* @note
* Algorithm:
* - Acquire RW lock, so that eqos_validate_dma_regs does not run while
* updating the safety critical register.
* - call osi_writel() to actually update the memory mapped register.
* - Store the same value in eqos_dma_safety_config->reg_val[idx], so that
* this latest value will be compared when eqos_validate_dma_regs is
* scheduled.
*
* @param[in] osi_dma: OSI DMA private data structure.
* @param[in] val: Value to be written.
* @param[in] addr: memory mapped register address to be written to.
* @param[in] idx: Index of register corresponding to enum func_safety_dma_regs.
*
* @pre MAC has to be out of reset, and clocks supplied.
*
* @note
* API Group:
* - Initialization: Yes
* - Run time: Yes
* - De-initialization: Yes
*/
static inline void eqos_dma_safety_writel(struct osi_dma_priv_data *osi_dma,
nveu32_t val, void *addr,
nveu32_t idx)
{
struct dma_func_safety *config = &eqos_dma_safety_config;
osi_lock_irq_enabled(&config->dma_safety_lock);
osi_writela(osi_dma->osd, val, addr);
config->reg_val[idx] = (val & config->reg_mask[idx]);
osi_unlock_irq_enabled(&config->dma_safety_lock);
}
/**
* @brief Initialize the eqos_dma_safety_config.
*
* @param[in] osi_dma: OSI DMA private data structure.
*
* @note
* Algorithm:
* - Populate the list of safety critical registers and provide
* - the address of the register
* - Register mask (to ignore reserved/self-critical bits in the reg).
* See eqos_validate_dma_regs which can be invoked periodically to compare
* the last written value to this register vs the actual value read when
* eqos_validate_dma_regs is scheduled.
*
* @note
* API Group:
* - Initialization: Yes
* - Run time: No
* - De-initialization: No
*/
static void eqos_dma_safety_init(struct osi_dma_priv_data *osi_dma)
{
struct dma_func_safety *config = &eqos_dma_safety_config;
nveu8_t *base = (nveu8_t *)osi_dma->base;
nveu32_t val;
nveu32_t i, idx;
/* Initialize all reg address to NULL, since we may not use
* some regs depending on the number of DMA chans enabled.
*/
for (i = EQOS_DMA_CH0_CTRL_IDX; i < EQOS_MAX_DMA_SAFETY_REGS; i++) {
config->reg_addr[i] = OSI_NULL;
}
for (i = 0U; i < osi_dma->num_dma_chans; i++) {
idx = osi_dma->dma_chans[i];
#if 0
CHECK_CHAN_BOUND(idx);
#endif
config->reg_addr[EQOS_DMA_CH0_CTRL_IDX + idx] = base +
EQOS_DMA_CHX_CTRL(idx);
config->reg_addr[EQOS_DMA_CH0_TX_CTRL_IDX + idx] = base +
EQOS_DMA_CHX_TX_CTRL(idx);
config->reg_addr[EQOS_DMA_CH0_RX_CTRL_IDX + idx] = base +
EQOS_DMA_CHX_RX_CTRL(idx);
config->reg_addr[EQOS_DMA_CH0_TDRL_IDX + idx] = base +
EQOS_DMA_CHX_TDRL(idx);
config->reg_addr[EQOS_DMA_CH0_RDRL_IDX + idx] = base +
EQOS_DMA_CHX_RDRL(idx);
config->reg_addr[EQOS_DMA_CH0_INTR_ENA_IDX + idx] = base +
EQOS_DMA_CHX_INTR_ENA(idx);
config->reg_mask[EQOS_DMA_CH0_CTRL_IDX + idx] =
EQOS_DMA_CHX_CTRL_MASK;
config->reg_mask[EQOS_DMA_CH0_TX_CTRL_IDX + idx] =
EQOS_DMA_CHX_TX_CTRL_MASK;
config->reg_mask[EQOS_DMA_CH0_RX_CTRL_IDX + idx] =
EQOS_DMA_CHX_RX_CTRL_MASK;
config->reg_mask[EQOS_DMA_CH0_TDRL_IDX + idx] =
EQOS_DMA_CHX_TDRL_MASK;
config->reg_mask[EQOS_DMA_CH0_RDRL_IDX + idx] =
EQOS_DMA_CHX_RDRL_MASK;
config->reg_mask[EQOS_DMA_CH0_INTR_ENA_IDX + idx] =
EQOS_DMA_CHX_INTR_ENA_MASK;
}
/* Initialize current power-on-reset values of these registers. */
for (i = EQOS_DMA_CH0_CTRL_IDX; i < EQOS_MAX_DMA_SAFETY_REGS; i++) {
if (config->reg_addr[i] == OSI_NULL) {
continue;
}
val = osi_readl((nveu8_t *)config->reg_addr[i]);
config->reg_val[i] = val & config->reg_mask[i];
}
osi_lock_init(&config->dma_safety_lock);
}
/**
* @brief eqos_disable_chan_tx_intr - Disables DMA Tx channel interrupts.
*
* @param[in] addr: Base address indicating the start of
* memory mapped IO region of the MAC.
* @param[in] chan: DMA Tx channel number.
*
* @pre
* - MAC needs to be out of reset and proper clocks need to be configured
* - DMA HW init need to be completed successfully, see osi_hw_dma_init
* - Mapping of physical IRQ line to DMA channel need to be maintained at
* OSDependent layer and pass corresponding channel number.
*
* @note
* API Group:
* - Initialization: No
* - Run time: Yes
* - De-initialization: Yes
*/
static void eqos_disable_chan_tx_intr(void *addr, nveu32_t chan)
{
nveu32_t cntrl, status;
#if 0
CHECK_CHAN_BOUND(chan);
#endif
/* Clear irq before disabling */
status = osi_readl((nveu8_t *)addr +
EQOS_VIRT_INTR_CHX_STATUS(chan));
if ((status & EQOS_VIRT_INTR_CHX_STATUS_TX) ==
EQOS_VIRT_INTR_CHX_STATUS_TX) {
osi_writel(EQOS_DMA_CHX_STATUS_CLEAR_TX,
(nveu8_t *)addr + EQOS_DMA_CHX_STATUS(chan));
osi_writel(EQOS_VIRT_INTR_CHX_STATUS_TX,
(nveu8_t *)addr +
EQOS_VIRT_INTR_CHX_STATUS(chan));
}
/* Disable the irq */
cntrl = osi_readl((nveu8_t *)addr +
EQOS_VIRT_INTR_CHX_CNTRL(chan));
cntrl &= ~EQOS_VIRT_INTR_CHX_CNTRL_TX;
osi_writel(cntrl, (nveu8_t *)addr +
EQOS_VIRT_INTR_CHX_CNTRL(chan));
}
/**
* @brief eqos_enable_chan_tx_intr - Enable Tx channel interrupts.
*
* @param[in] addr: Base address indicating the start of
* memory mapped IO region of the MAC.
* @param[in] chan: DMA Tx channel number.
*
* @pre
* - MAC needs to be out of reset and proper clocks need to be configured
* - DMA HW init need to be completed successfully, see osi_hw_dma_init
* - Mapping of physical IRQ line to DMA channel need to be maintained at
* OSDependent layer and pass corresponding channel number.
*
* @note
* API Group:
* - Initialization: Yes
* - Run time: Yes
* - De-initialization: No
*/
static void eqos_enable_chan_tx_intr(void *addr, nveu32_t chan)
{
nveu32_t cntrl;
#if 0
CHECK_CHAN_BOUND(chan);
#endif
cntrl = osi_readl((nveu8_t *)addr +
EQOS_VIRT_INTR_CHX_CNTRL(chan));
cntrl |= EQOS_VIRT_INTR_CHX_CNTRL_TX;
osi_writel(cntrl, (nveu8_t *)addr +
EQOS_VIRT_INTR_CHX_CNTRL(chan));
}
/**
* @brief eqos_disable_chan_rx_intr - Disable Rx channel interrupts.
*
* @param[in] addr: Base address indicating the start of
* memory mapped IO region of the MAC.
* @param[in] chan: DMA Rx channel number.
*
* @pre
* - MAC needs to be out of reset and proper clocks need to be configured
* - DMA HW init need to be completed successfully, see osi_hw_dma_init
* - Mapping of physical IRQ line to DMA channel need to be maintained at
* OSDependent layer and pass corresponding channel number.
*
* @note
* API Group:
* - Initialization: No
* - Run time: Yes
* - De-initialization: Yes
*/
static void eqos_disable_chan_rx_intr(void *addr, nveu32_t chan)
{
nveu32_t cntrl, status;
#if 0
CHECK_CHAN_BOUND(chan);
#endif
/* Clear irq before disabling */
status = osi_readl((nveu8_t *)addr +
EQOS_VIRT_INTR_CHX_STATUS(chan));
if ((status & EQOS_VIRT_INTR_CHX_STATUS_RX) ==
EQOS_VIRT_INTR_CHX_STATUS_RX) {
osi_writel(EQOS_DMA_CHX_STATUS_CLEAR_RX,
(nveu8_t *)addr + EQOS_DMA_CHX_STATUS(chan));
osi_writel(EQOS_VIRT_INTR_CHX_STATUS_RX,
(nveu8_t *)addr +
EQOS_VIRT_INTR_CHX_STATUS(chan));
}
/* Disable irq */
cntrl = osi_readl((nveu8_t *)addr +
EQOS_VIRT_INTR_CHX_CNTRL(chan));
cntrl &= ~EQOS_VIRT_INTR_CHX_CNTRL_RX;
osi_writel(cntrl, (nveu8_t *)addr +
EQOS_VIRT_INTR_CHX_CNTRL(chan));
}
/**
* @brief eqos_enable_chan_rx_intr - Enable Rx channel interrupts.
*
* @param[in] addr: Base address indicating the start of
* memory mapped IO region of the MAC.
* @param[in] chan: DMA Rx channel number.
*
* @pre
* - MAC needs to be out of reset and proper clocks need to be configured
* - DMA HW init need to be completed successfully, see osi_hw_dma_init
*
* @note
* API Group:
* - Initialization: Yes
* - Run time: Yes
* - De-initialization: No
*/
static void eqos_enable_chan_rx_intr(void *addr, nveu32_t chan)
{
nveu32_t cntrl;
#if 0
CHECK_CHAN_BOUND(chan);
#endif
cntrl = osi_readl((nveu8_t *)addr +
EQOS_VIRT_INTR_CHX_CNTRL(chan));
cntrl |= EQOS_VIRT_INTR_CHX_CNTRL_RX;
osi_writel(cntrl, (nveu8_t *)addr +
EQOS_VIRT_INTR_CHX_CNTRL(chan));
}
/**
* @brief eqos_set_tx_ring_len - Set DMA Tx ring length.
*
* @note
* Algorithm:
* - Set DMA Tx channel ring length for specific channel.
*
* @param[in] osi_dma: OSI DMA private data structure.
* @param[in] chan: DMA Tx channel number.
* @param[in] len: Length.
*
* @note
* API Group:
* - Initialization: Yes
* - Run time: No
* - De-initialization: No
*/
static void eqos_set_tx_ring_len(struct osi_dma_priv_data *osi_dma,
nveu32_t chan,
nveu32_t len)
{
void *addr = osi_dma->base;
#if 0
CHECK_CHAN_BOUND(chan);
#endif
eqos_dma_safety_writel(osi_dma, len, (nveu8_t *)addr +
EQOS_DMA_CHX_TDRL(chan),
EQOS_DMA_CH0_TDRL_IDX + chan);
}
/**
* @brief eqos_set_tx_ring_start_addr - Set DMA Tx ring base address.
*
* @note
* Algorithm:
* - Sets DMA Tx ring base address for specific channel.
*
* @param[in] addr: Base address indicating the start of
* memory mapped IO region of the MAC.
* @param[in] chan: DMA Tx channel number.
* @param[in] tx_desc: Tx desc base address.
*
* @note
* API Group:
* - Initialization: Yes
* - Run time: No
* - De-initialization: No
*/
static void eqos_set_tx_ring_start_addr(void *addr, nveu32_t chan,
nveu64_t tx_desc)
{
nveu64_t tmp;
#if 0
CHECK_CHAN_BOUND(chan);
#endif
tmp = H32(tx_desc);
if (tmp < UINT_MAX) {
osi_writel((nveu32_t)tmp, (nveu8_t *)addr +
EQOS_DMA_CHX_TDLH(chan));
}
tmp = L32(tx_desc);
if (tmp < UINT_MAX) {
osi_writel((nveu32_t)tmp, (nveu8_t *)addr +
EQOS_DMA_CHX_TDLA(chan));
}
}
/**
* @brief eqos_update_tx_tailptr - Updates DMA Tx ring tail pointer.
*
* @note
* Algorithm:
* - Updates DMA Tx ring tail pointer for specific channel.
*
* @param[in] addr: Base address indicating the start of
* memory mapped IO region of the MAC.
* @param[in] chan: DMA Tx channel number.
* @param[in] tailptr: DMA Tx ring tail pointer.
*
*
* @pre
* - MAC needs to be out of reset and proper clocks need to be configured
* - DMA HW init need to be completed successfully, see osi_hw_dma_init
*
* @note
* API Group:
* - Initialization: No
* - Run time: Yes
* - De-initialization: No
*/
static void eqos_update_tx_tailptr(void *addr, nveu32_t chan,
nveu64_t tailptr)
{
nveu64_t tmp;
#if 0
CHECK_CHAN_BOUND(chan);
#endif
tmp = L32(tailptr);
if (tmp < UINT_MAX) {
osi_writel((nveu32_t)tmp, (nveu8_t *)addr +
EQOS_DMA_CHX_TDTP(chan));
}
}
/**
* @brief eqos_set_rx_ring_len - Set Rx channel ring length.
*
* @note
* Algorithm:
* - Sets DMA Rx channel ring length for specific DMA channel.
*
* @param[in] osi_dma: OSI DMA private data structure.
* @param[in] chan: DMA Rx channel number.
* @param[in] len: Length
*
* @note
* API Group:
* - Initialization: Yes
* - Run time: No
* - De-initialization: No
*/
static void eqos_set_rx_ring_len(struct osi_dma_priv_data *osi_dma,
nveu32_t chan,
nveu32_t len)
{
void *addr = osi_dma->base;
#if 0
CHECK_CHAN_BOUND(chan);
#endif
eqos_dma_safety_writel(osi_dma, len, (nveu8_t *)addr +
EQOS_DMA_CHX_RDRL(chan),
EQOS_DMA_CH0_RDRL_IDX + chan);
}
/**
* @brief eqos_set_rx_ring_start_addr - Set DMA Rx ring base address.
*
* @note
* Algorithm:
* - Sets DMA Rx channel ring base address.
*
* @param[in] addr: Base address indicating the start of
* memory mapped IO region of the MAC.
* @param[in] chan: DMA Rx channel number.
* @param[in] rx_desc: DMA Rx desc base address.
*
* @note
* API Group:
* - Initialization: Yes
* - Run time: No
* - De-initialization: No
*/
static void eqos_set_rx_ring_start_addr(void *addr, nveu32_t chan,
nveu64_t rx_desc)
{
nveu64_t tmp;
#if 0
CHECK_CHAN_BOUND(chan);
#endif
tmp = H32(rx_desc);
if (tmp < UINT_MAX) {
osi_writel((nveu32_t)tmp, (nveu8_t *)addr +
EQOS_DMA_CHX_RDLH(chan));
}
tmp = L32(rx_desc);
if (tmp < UINT_MAX) {
osi_writel((nveu32_t)tmp, (nveu8_t *)addr +
EQOS_DMA_CHX_RDLA(chan));
}
}
/**
* @brief eqos_update_rx_tailptr - Update Rx ring tail pointer
*
* @note
* Algorithm:
* - Updates DMA Rx channel tail pointer for specific channel.
*
* @param[in] addr: Base address indicating the start of
* memory mapped IO region of the MAC.
* @param[in] chan: DMA Rx channel number.
* @param[in] tailptr: Tail pointer
*
* @pre
* - MAC needs to be out of reset and proper clocks need to be configured
* - DMA HW init need to be completed successfully, see osi_hw_dma_init
*
* @note
* API Group:
* - Initialization: Yes
* - Run time: Yes
* - De-initialization: No
*/
static void eqos_update_rx_tailptr(void *addr, nveu32_t chan,
nveu64_t tailptr)
{
nveu64_t tmp;
#if 0
CHECK_CHAN_BOUND(chan);
#endif
tmp = L32(tailptr);
if (tmp < UINT_MAX) {
osi_writel((nveu32_t)tmp, (nveu8_t *)addr +
EQOS_DMA_CHX_RDTP(chan));
}
}
/**
* @brief eqos_start_dma - Start DMA.
*
* @note
* Algorithm:
* - Start Tx and Rx DMA for specific channel.
*
* @param[in] osi_dma: OSI DMA private data structure.
* @param[in] chan: DMA Tx/Rx channel number.
*
* @pre
* - MAC needs to be out of reset and proper clocks need to be configured
* - DMA HW init need to be completed successfully, see osi_hw_dma_init
*
* @note
* API Group:
* - Initialization: Yes
* - Run time: No
* - De-initialization: No
*/
static void eqos_start_dma(struct osi_dma_priv_data *osi_dma, nveu32_t chan)
{
nveu32_t val;
void *addr = osi_dma->base;
#if 0
CHECK_CHAN_BOUND(chan);
#endif
/* start Tx DMA */
val = osi_readla(osi_dma->osd,
(nveu8_t *)addr + EQOS_DMA_CHX_TX_CTRL(chan));
val |= OSI_BIT(0);
eqos_dma_safety_writel(osi_dma, val, (nveu8_t *)addr +
EQOS_DMA_CHX_TX_CTRL(chan),
EQOS_DMA_CH0_TX_CTRL_IDX + chan);
/* start Rx DMA */
val = osi_readla(osi_dma->osd,
(nveu8_t *)addr + EQOS_DMA_CHX_RX_CTRL(chan));
val |= OSI_BIT(0);
val &= ~OSI_BIT(31);
eqos_dma_safety_writel(osi_dma, val, (nveu8_t *)addr +
EQOS_DMA_CHX_RX_CTRL(chan),
EQOS_DMA_CH0_RX_CTRL_IDX + chan);
}
/**
* @brief eqos_stop_dma - Stop DMA.
*
* @note
* Algorithm:
* - Start Tx and Rx DMA for specific channel.
*
* @param[in] osi_dma: OSI DMA private data structure.
* @param[in] chan: DMA Tx/Rx channel number.
*
* @pre
* - MAC needs to be out of reset and proper clocks need to be configured
* - DMA HW init need to be completed successfully, see osi_hw_dma_init
*
* @note
* API Group:
* - Initialization: No
* - Run time: No
* - De-initialization: Yes
*/
static void eqos_stop_dma(struct osi_dma_priv_data *osi_dma, nveu32_t chan)
{
nveu32_t val;
void *addr = osi_dma->base;
#if 0
CHECK_CHAN_BOUND(chan);
#endif
/* stop Tx DMA */
val = osi_readla(osi_dma->osd,
(nveu8_t *)addr + EQOS_DMA_CHX_TX_CTRL(chan));
val &= ~OSI_BIT(0);
eqos_dma_safety_writel(osi_dma, val, (nveu8_t *)addr +
EQOS_DMA_CHX_TX_CTRL(chan),
EQOS_DMA_CH0_TX_CTRL_IDX + chan);
/* stop Rx DMA */
val = osi_readla(osi_dma->osd,
(nveu8_t *)addr + EQOS_DMA_CHX_RX_CTRL(chan));
val &= ~OSI_BIT(0);
val |= OSI_BIT(31);
eqos_dma_safety_writel(osi_dma, val, (nveu8_t *)addr +
EQOS_DMA_CHX_RX_CTRL(chan),
EQOS_DMA_CH0_RX_CTRL_IDX + chan);
}
/**
* @brief eqos_configure_dma_channel - Configure DMA channel
*
* @note
* Algorithm:
* - This takes care of configuring the below
* parameters for the DMA channel
* - Enabling DMA channel interrupts
* - Enable 8xPBL mode
* - Program Tx, Rx PBL
* - Enable TSO if HW supports
* - Program Rx Watchdog timer
*
* @param[in] chan: DMA channel number that need to be configured.
* @param[in] osi_dma: OSI DMA private data structure.
*
* @pre MAC has to be out of reset.
*
* @note
* API Group:
* - Initialization: Yes
* - Run time: No
* - De-initialization: No
*/
static void eqos_configure_dma_channel(nveu32_t chan,
struct osi_dma_priv_data *osi_dma)
{
nveu32_t value;
#if 0
CHECK_CHAN_BOUND(chan);
#endif
/* enable DMA channel interrupts */
/* Enable TIE and TBUE */
/* TIE - Transmit Interrupt Enable */
/* TBUE - Transmit Buffer Unavailable Enable */
/* RIE - Receive Interrupt Enable */
/* RBUE - Receive Buffer Unavailable Enable */
/* AIE - Abnormal Interrupt Summary Enable */
/* NIE - Normal Interrupt Summary Enable */
/* FBE - Fatal Bus Error Enable */
value = osi_readl((nveu8_t *)osi_dma->base +
EQOS_DMA_CHX_INTR_ENA(chan));
if (osi_dma->use_virtualization == OSI_DISABLE) {
value |= EQOS_DMA_CHX_INTR_TBUE |
EQOS_DMA_CHX_INTR_RBUE;
}
value |= EQOS_DMA_CHX_INTR_TIE | EQOS_DMA_CHX_INTR_RIE |
EQOS_DMA_CHX_INTR_FBEE | EQOS_DMA_CHX_INTR_AIE |
EQOS_DMA_CHX_INTR_NIE;
/* For multi-irqs to work nie needs to be disabled */
value &= ~(EQOS_DMA_CHX_INTR_NIE);
eqos_dma_safety_writel(osi_dma, value, (nveu8_t *)osi_dma->base +
EQOS_DMA_CHX_INTR_ENA(chan),
EQOS_DMA_CH0_INTR_ENA_IDX + chan);
/* Enable 8xPBL mode */
value = osi_readl((nveu8_t *)osi_dma->base +
EQOS_DMA_CHX_CTRL(chan));
value |= EQOS_DMA_CHX_CTRL_PBLX8;
eqos_dma_safety_writel(osi_dma, value, (nveu8_t *)osi_dma->base +
EQOS_DMA_CHX_CTRL(chan),
EQOS_DMA_CH0_CTRL_IDX + chan);
/* Configure DMA channel Transmit control register */
value = osi_readl((nveu8_t *)osi_dma->base +
EQOS_DMA_CHX_TX_CTRL(chan));
/* Enable OSF mode */
value |= EQOS_DMA_CHX_TX_CTRL_OSF;
/* TxPBL = 32*/
value |= EQOS_DMA_CHX_TX_CTRL_TXPBL_RECOMMENDED;
/* enable TSO by default if HW supports */
value |= EQOS_DMA_CHX_TX_CTRL_TSE;
eqos_dma_safety_writel(osi_dma, value, (nveu8_t *)osi_dma->base +
EQOS_DMA_CHX_TX_CTRL(chan),
EQOS_DMA_CH0_TX_CTRL_IDX + chan);
/* Configure DMA channel Receive control register */
/* Select Rx Buffer size. Needs to be rounded up to next multiple of
* bus width
*/
value = osi_readl((nveu8_t *)osi_dma->base +
EQOS_DMA_CHX_RX_CTRL(chan));
/* clear previous Rx buffer size */
value &= ~EQOS_DMA_CHX_RBSZ_MASK;
value |= (osi_dma->rx_buf_len << EQOS_DMA_CHX_RBSZ_SHIFT);
/* RXPBL = 12 */
value |= EQOS_DMA_CHX_RX_CTRL_RXPBL_RECOMMENDED;
eqos_dma_safety_writel(osi_dma, value, (nveu8_t *)osi_dma->base +
EQOS_DMA_CHX_RX_CTRL(chan),
EQOS_DMA_CH0_RX_CTRL_IDX + chan);
/* Set Receive Interrupt Watchdog Timer Count */
/* conversion of usec to RWIT value
* Eg: System clock is 125MHz, each clock cycle would then be 8ns
* For value 0x1 in RWT, device would wait for 512 clk cycles with
* RWTU as 0x1,
* ie, (8ns x 512) => 4.096us (rounding off to 4us)
* So formula with above values is,ret = usec/4
*/
if ((osi_dma->use_riwt == OSI_ENABLE) &&
(osi_dma->rx_riwt < UINT_MAX)) {
value = osi_readl((nveu8_t *)osi_dma->base +
EQOS_DMA_CHX_RX_WDT(chan));
/* Mask the RWT and RWTU value */
value &= ~(EQOS_DMA_CHX_RX_WDT_RWT_MASK |
EQOS_DMA_CHX_RX_WDT_RWTU_MASK);
/* Conversion of usec to Rx Interrupt Watchdog Timer Count */
value |= ((osi_dma->rx_riwt *
(EQOS_AXI_CLK_FREQ / OSI_ONE_MEGA_HZ)) /
EQOS_DMA_CHX_RX_WDT_RWTU) &
EQOS_DMA_CHX_RX_WDT_RWT_MASK;
value |= EQOS_DMA_CHX_RX_WDT_RWTU_512_CYCLE;
osi_writel(value, (nveu8_t *)osi_dma->base +
EQOS_DMA_CHX_RX_WDT(chan));
}
}
/**
* @brief eqos_init_dma_channel - DMA channel INIT
*
* @param[in] osi_dma: OSI DMA private data structure.
*
* @note
* API Group:
* - Initialization: Yes
* - Run time: No
* - De-initialization: No
*
* @retval 0 on success
* @retval -1 on failure.
*/
static nve32_t eqos_init_dma_channel(struct osi_dma_priv_data *osi_dma)
{
nveu32_t chinx;
eqos_dma_safety_init(osi_dma);
/* configure EQOS DMA channels */
for (chinx = 0; chinx < osi_dma->num_dma_chans; chinx++) {
eqos_configure_dma_channel(osi_dma->dma_chans[chinx], osi_dma);
}
return 0;
}
/**
* @brief eqos_set_rx_buf_len - Set Rx buffer length
* Sets the Rx buffer length based on the new MTU size set.
*
* @param[in, out] osi_dma: OSI DMA private data structure.
*
* @pre
* - MAC needs to be out of reset and proper clocks need to be configured
* - DMA HW init need to be completed successfully, see osi_hw_dma_init
* - osi_dma->mtu need to be filled with current MTU size <= 9K
*
* @note
* API Group:
* - Initialization: No
* - Run time: Yes
* - De-initialization: No
*/
static void eqos_set_rx_buf_len(struct osi_dma_priv_data *osi_dma)
{
nveu32_t rx_buf_len = 0U;
/* Add Ethernet header + VLAN header + NET IP align size to MTU */
if (osi_dma->mtu <= OSI_MAX_MTU_SIZE) {
rx_buf_len = osi_dma->mtu + OSI_ETH_HLEN + NV_VLAN_HLEN +
OSI_NET_IP_ALIGN;
} else {
rx_buf_len = OSI_MAX_MTU_SIZE + OSI_ETH_HLEN + NV_VLAN_HLEN +
OSI_NET_IP_ALIGN;
}
/* Buffer alignment */
osi_dma->rx_buf_len = ((rx_buf_len + (EQOS_AXI_BUS_WIDTH - 1U)) &
~(EQOS_AXI_BUS_WIDTH - 1U));
}
#ifndef OSI_STRIPPED_LIB
/**
* @brief Read-validate HW registers for functional safety.
*
* @note
* Algorithm:
* - Reads pre-configured list of MAC/MTL configuration registers
* and compares with last written value for any modifications.
*
* @param[in] osi_dma: OSI DMA private data structure.
*
* @pre
* - MAC has to be out of reset.
* - osi_hw_dma_init has to be called. Internally this would initialize
* the safety_config (see osi_dma_priv_data) based on MAC version and
* which specific registers needs to be validated periodically.
* - Invoke this call if (osi_dma_priv_data->safety_config != OSI_NULL)
*
* @note
* API Group:
* - Initialization: No
* - Run time: Yes
* - De-initialization: No
*
* @retval 0 on success
* @retval -1 on failure.
*/
static nve32_t eqos_validate_dma_regs(struct osi_dma_priv_data *osi_dma)
{
struct dma_func_safety *config =
(struct dma_func_safety *)osi_dma->safety_config;
nveu32_t cur_val;
nveu32_t i;
osi_lock_irq_enabled(&config->dma_safety_lock);
for (i = EQOS_DMA_CH0_CTRL_IDX; i < EQOS_MAX_DMA_SAFETY_REGS; i++) {
if (config->reg_addr[i] == OSI_NULL) {
continue;
}
cur_val = osi_readl((nveu8_t *)config->reg_addr[i]);
cur_val &= config->reg_mask[i];
if (cur_val == config->reg_val[i]) {
continue;
} else {
/* Register content differs from what was written.
* Return error and let safety manager (NVGaurd etc.)
* take care of corrective action.
*/
osi_unlock_irq_enabled(&config->dma_safety_lock);
return -1;
}
}
osi_unlock_irq_enabled(&config->dma_safety_lock);
return 0;
}
/**
* @brief eqos_config_slot - Configure slot Checking for DMA channel
*
* @note
* Algorithm:
* - Set/Reset the slot function of DMA channel based on given inputs
*
* @param[in] osi_dma: OSI DMA private data structure.
* @param[in] chan: DMA channel number to enable slot function
* @param[in] set: flag for set/reset with value OSI_ENABLE/OSI_DISABLE
* @param[in] interval: slot interval from 0usec to 4095usec
*
* @pre
* - MAC should be init and started. see osi_start_mac()
* - OSD should be initialized
*
* @note
* API Group:
* - Initialization: Yes
* - Run time: Yes
* - De-initialization: No
*/
static void eqos_config_slot(struct osi_dma_priv_data *osi_dma,
nveu32_t chan,
nveu32_t set,
nveu32_t interval)
{
nveu32_t value;
nveu32_t intr;
#if 0
CHECK_CHAN_BOUND(chan);
#endif
if (set == OSI_ENABLE) {
/* Program SLOT CTRL register SIV and set ESC bit */
value = osi_readl((nveu8_t *)osi_dma->base +
EQOS_DMA_CHX_SLOT_CTRL(chan));
value &= ~EQOS_DMA_CHX_SLOT_SIV_MASK;
/* remove overflow bits of interval */
intr = interval & EQOS_DMA_CHX_SLOT_SIV_MASK;
value |= (intr << EQOS_DMA_CHX_SLOT_SIV_SHIFT);
/* Set ESC bit */
value |= EQOS_DMA_CHX_SLOT_ESC;
osi_writel(value, (nveu8_t *)osi_dma->base +
EQOS_DMA_CHX_SLOT_CTRL(chan));
} else {
/* Clear ESC bit of SLOT CTRL register */
value = osi_readl((nveu8_t *)osi_dma->base +
EQOS_DMA_CHX_SLOT_CTRL(chan));
value &= ~EQOS_DMA_CHX_SLOT_ESC;
osi_writel(value, (nveu8_t *)osi_dma->base +
EQOS_DMA_CHX_SLOT_CTRL(chan));
}
}
#endif /* !OSI_STRIPPED_LIB */
/**
* @brief eqos_clear_vm_tx_intr - Handle VM Tx interrupt
*
* @param[in] addr: MAC base address.
* @param[in] chan: DMA Tx channel number.
*
* Algorithm: Clear Tx interrupt source at DMA and wrapper level.
*
* @note
* Dependencies: None.
* Protection: None.
* @retval None.
*/
static void eqos_clear_vm_tx_intr(void *addr, nveu32_t chan)
{
#if 0
CHECK_CHAN_BOUND(chan);
#endif
osi_writel(EQOS_DMA_CHX_STATUS_CLEAR_TX,
(nveu8_t *)addr + EQOS_DMA_CHX_STATUS(chan));
osi_writel(EQOS_VIRT_INTR_CHX_STATUS_TX,
(nveu8_t *)addr + EQOS_VIRT_INTR_CHX_STATUS(chan));
eqos_disable_chan_tx_intr(addr, chan);
}
/**
* @brief eqos_clear_vm_rx_intr - Handle VM Rx interrupt
*
* @param[in] addr: MAC base address.
* @param[in] chan: DMA Rx channel number.
*
* Algorithm: Clear Rx interrupt source at DMA and wrapper level.
*
* @note
* Dependencies: None.
* Protection: None.
*
* @retval None.
*/
static void eqos_clear_vm_rx_intr(void *addr, nveu32_t chan)
{
#if 0
CHECK_CHAN_BOUND(chan);
#endif
osi_writel(EQOS_DMA_CHX_STATUS_CLEAR_RX,
(nveu8_t *)addr + EQOS_DMA_CHX_STATUS(chan));
osi_writel(EQOS_VIRT_INTR_CHX_STATUS_RX,
(nveu8_t *)addr + EQOS_VIRT_INTR_CHX_STATUS(chan));
eqos_disable_chan_rx_intr(addr, chan);
}
/**
* @brief eqos_get_dma_safety_config - EQOS get DMA safety configuration
*/
void *eqos_get_dma_safety_config(void)
{
return &eqos_dma_safety_config;
}
/**
* @brief eqos_init_dma_chan_ops - Initialize EQOS DMA operations.
*
* @param[in] ops: DMA channel operations pointer.
*/
void eqos_init_dma_chan_ops(struct dma_chan_ops *ops)
{
ops->set_tx_ring_len = eqos_set_tx_ring_len;
ops->set_rx_ring_len = eqos_set_rx_ring_len;
ops->set_tx_ring_start_addr = eqos_set_tx_ring_start_addr;
ops->set_rx_ring_start_addr = eqos_set_rx_ring_start_addr;
ops->update_tx_tailptr = eqos_update_tx_tailptr;
ops->update_rx_tailptr = eqos_update_rx_tailptr;
ops->disable_chan_tx_intr = eqos_disable_chan_tx_intr;
ops->enable_chan_tx_intr = eqos_enable_chan_tx_intr;
ops->disable_chan_rx_intr = eqos_disable_chan_rx_intr;
ops->enable_chan_rx_intr = eqos_enable_chan_rx_intr;
ops->start_dma = eqos_start_dma;
ops->stop_dma = eqos_stop_dma;
ops->init_dma_channel = eqos_init_dma_channel;
ops->set_rx_buf_len = eqos_set_rx_buf_len;
#ifndef OSI_STRIPPED_LIB
ops->validate_regs = eqos_validate_dma_regs;
ops->config_slot = eqos_config_slot;
#endif /* !OSI_STRIPPED_LIB */
ops->clear_vm_tx_intr = eqos_clear_vm_tx_intr;
ops->clear_vm_rx_intr = eqos_clear_vm_rx_intr;
}

View File

@@ -1,200 +0,0 @@
/*
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef INCLUDED_EQOS_DMA_H
#define INCLUDED_EQOS_DMA_H
/**
* @addtogroup EQOS AXI Clock defines
*
* @brief AXI Clock defines
* @{
*/
#define EQOS_AXI_CLK_FREQ 125000000U
/** @} */
/**
* @addtogroup EQOS1 DMA Channel Register offsets
*
* @brief EQOS DMA Channel register offsets
* @{
*/
#define EQOS_DMA_CHX_CTRL(x) ((0x0080U * (x)) + 0x1100U)
#define EQOS_DMA_CHX_TX_CTRL(x) ((0x0080U * (x)) + 0x1104U)
#define EQOS_DMA_CHX_RX_CTRL(x) ((0x0080U * (x)) + 0x1108U)
#define EQOS_DMA_CHX_INTR_ENA(x) ((0x0080U * (x)) + 0x1134U)
#define EQOS_DMA_CHX_RX_WDT(x) ((0x0080U * (x)) + 0x1138U)
#ifndef OSI_STRIPPED_LIB
#define EQOS_DMA_CHX_SLOT_CTRL(x) ((0x0080U * (x)) + 0x113CU)
#endif /* !OSI_STRIPPED_LIB */
#define EQOS_DMA_CHX_RDTP(x) ((0x0080U * (x)) + 0x1128U)
#define EQOS_DMA_CHX_RDLH(x) ((0x0080U * (x)) + 0x1118U)
#define EQOS_DMA_CHX_RDLA(x) ((0x0080U * (x)) + 0x111CU)
#define EQOS_DMA_CHX_RDRL(x) ((0x0080U * (x)) + 0x1130U)
#define EQOS_DMA_CHX_TDTP(x) ((0x0080U * (x)) + 0x1120U)
#define EQOS_DMA_CHX_TDLH(x) ((0x0080U * (x)) + 0x1110U)
#define EQOS_DMA_CHX_TDLA(x) ((0x0080U * (x)) + 0x1114U)
#define EQOS_DMA_CHX_TDRL(x) ((0x0080U * (x)) + 0x112CU)
#define EQOS_VIRT_INTR_APB_CHX_CNTRL(x) (0x8200U + ((x) * 4U))
#define EQOS_VIRT_INTR_CHX_STATUS(x) (0x8604U + ((x) * 8U))
#define EQOS_VIRT_INTR_CHX_CNTRL(x) (0x8600U + ((x) * 8U))
/** @} */
/**
* @addtogroup EQOS2 BIT fields for EQOS MAC HW DMA Channel Registers
*
* @brief Values defined for the DMA channel registers
* @{
*/
#define EQOS_VIRT_INTR_CHX_STATUS_TX OSI_BIT(0)
#define EQOS_VIRT_INTR_CHX_STATUS_RX OSI_BIT(1)
#define EQOS_DMA_CHX_STATUS_TI OSI_BIT(0)
#define EQOS_DMA_CHX_STATUS_RI OSI_BIT(6)
#define EQOS_DMA_CHX_STATUS_NIS OSI_BIT(15)
#define EQOS_DMA_CHX_STATUS_CLEAR_TX \
(EQOS_DMA_CHX_STATUS_TI | EQOS_DMA_CHX_STATUS_NIS)
#define EQOS_DMA_CHX_STATUS_CLEAR_RX \
(EQOS_DMA_CHX_STATUS_RI | EQOS_DMA_CHX_STATUS_NIS)
#define EQOS_VIRT_INTR_CHX_CNTRL_TX OSI_BIT(0)
#define EQOS_VIRT_INTR_CHX_CNTRL_RX OSI_BIT(1)
#define EQOS_DMA_CHX_INTR_TIE OSI_BIT(0)
#define EQOS_DMA_CHX_INTR_TBUE OSI_BIT(2)
#define EQOS_DMA_CHX_INTR_RIE OSI_BIT(6)
#define EQOS_DMA_CHX_INTR_RBUE OSI_BIT(7)
#define EQOS_DMA_CHX_INTR_FBEE OSI_BIT(12)
#define EQOS_DMA_CHX_INTR_AIE OSI_BIT(14)
#define EQOS_DMA_CHX_INTR_NIE OSI_BIT(15)
#define EQOS_DMA_CHX_TX_CTRL_OSF OSI_BIT(4)
#define EQOS_DMA_CHX_TX_CTRL_TSE OSI_BIT(12)
#define EQOS_DMA_CHX_CTRL_PBLX8 OSI_BIT(16)
#define EQOS_DMA_CHX_RBSZ_MASK 0x7FFEU
#define EQOS_DMA_CHX_RBSZ_SHIFT 1U
#define EQOS_DMA_CHX_TX_CTRL_TXPBL_RECOMMENDED 0x200000U
#define EQOS_DMA_CHX_RX_CTRL_RXPBL_RECOMMENDED 0xC0000U
#define EQOS_DMA_CHX_RX_WDT_RWT_MASK 0xFFU
#define EQOS_DMA_CHX_RX_WDT_RWTU_MASK 0x30000U
#define EQOS_DMA_CHX_RX_WDT_RWTU_512_CYCLE 0x10000U
#define EQOS_DMA_CHX_RX_WDT_RWTU 512U
/* Below macros are used for periodic reg validation for functional safety.
* HW register mask - to mask out reserved and self-clearing bits
*/
#define EQOS_DMA_CHX_CTRL_MASK 0x11D3FFFU
#define EQOS_DMA_CHX_TX_CTRL_MASK 0xF3F9010U
#define EQOS_DMA_CHX_RX_CTRL_MASK 0x8F3F7FE0U
#define EQOS_DMA_CHX_TDRL_MASK 0x3FFU
#define EQOS_DMA_CHX_RDRL_MASK 0x3FFU
#define EQOS_DMA_CHX_INTR_ENA_MASK 0xFFC7U
#ifndef OSI_STRIPPED_LIB
#define EQOS_DMA_CHX_SLOT_SIV_MASK 0xFFFU
#define EQOS_DMA_CHX_SLOT_SIV_SHIFT 4U
#define EQOS_DMA_CHX_SLOT_ESC 0x1U
#endif /* !OSI_STRIPPED_LIB */
/* To add new registers to validate,append at end of below macro list and
* increment EQOS_MAX_DMA_SAFETY_REGS.
* Using macros instead of enum due to misra error.
*/
#define EQOS_DMA_CH0_CTRL_IDX 0U
#define EQOS_DMA_CH1_CTRL_IDX 1U
#define EQOS_DMA_CH2_CTRL_IDX 2U
#define EQOS_DMA_CH3_CTRL_IDX 3U
#define EQOS_DMA_CH4_CTRL_IDX 4U
#define EQOS_DMA_CH5_CTRL_IDX 5U
#define EQOS_DMA_CH6_CTRL_IDX 6U
#define EQOS_DMA_CH7_CTRL_IDX 7U
#define EQOS_DMA_CH0_TX_CTRL_IDX 8U
#define EQOS_DMA_CH1_TX_CTRL_IDX 9U
#define EQOS_DMA_CH2_TX_CTRL_IDX 10U
#define EQOS_DMA_CH3_TX_CTRL_IDX 11U
#define EQOS_DMA_CH4_TX_CTRL_IDX 12U
#define EQOS_DMA_CH5_TX_CTRL_IDX 13U
#define EQOS_DMA_CH6_TX_CTRL_IDX 14U
#define EQOS_DMA_CH7_TX_CTRL_IDX 15U
#define EQOS_DMA_CH0_RX_CTRL_IDX 16U
#define EQOS_DMA_CH1_RX_CTRL_IDX 17U
#define EQOS_DMA_CH2_RX_CTRL_IDX 18U
#define EQOS_DMA_CH3_RX_CTRL_IDX 19U
#define EQOS_DMA_CH4_RX_CTRL_IDX 20U
#define EQOS_DMA_CH5_RX_CTRL_IDX 21U
#define EQOS_DMA_CH6_RX_CTRL_IDX 22U
#define EQOS_DMA_CH7_RX_CTRL_IDX 23U
#define EQOS_DMA_CH0_TDRL_IDX 24U
#define EQOS_DMA_CH1_TDRL_IDX 25U
#define EQOS_DMA_CH2_TDRL_IDX 26U
#define EQOS_DMA_CH3_TDRL_IDX 27U
#define EQOS_DMA_CH4_TDRL_IDX 28U
#define EQOS_DMA_CH5_TDRL_IDX 29U
#define EQOS_DMA_CH6_TDRL_IDX 30U
#define EQOS_DMA_CH7_TDRL_IDX 31U
#define EQOS_DMA_CH0_RDRL_IDX 32U
#define EQOS_DMA_CH1_RDRL_IDX 33U
#define EQOS_DMA_CH2_RDRL_IDX 34U
#define EQOS_DMA_CH3_RDRL_IDX 35U
#define EQOS_DMA_CH4_RDRL_IDX 36U
#define EQOS_DMA_CH5_RDRL_IDX 37U
#define EQOS_DMA_CH6_RDRL_IDX 38U
#define EQOS_DMA_CH7_RDRL_IDX 39U
#define EQOS_DMA_CH0_INTR_ENA_IDX 40U
#define EQOS_DMA_CH1_INTR_ENA_IDX 41U
#define EQOS_DMA_CH2_INTR_ENA_IDX 42U
#define EQOS_DMA_CH3_INTR_ENA_IDX 43U
#define EQOS_DMA_CH4_INTR_ENA_IDX 44U
#define EQOS_DMA_CH5_INTR_ENA_IDX 45U
#define EQOS_DMA_CH6_INTR_ENA_IDX 46U
#define EQOS_DMA_CH7_INTR_ENA_IDX 47U
#define EQOS_MAX_DMA_SAFETY_REGS 48U
#define EQOS_AXI_BUS_WIDTH 0x10U
/** @} */
/**
* @brief dma_func_safety - Struct used to store last written values of
* critical DMA HW registers.
*/
struct dma_func_safety {
/** Array of reg MMIO addresses (base EQoS + offset of reg) */
void *reg_addr[EQOS_MAX_DMA_SAFETY_REGS];
/** Array of bit-mask value of each corresponding reg
* (used to ignore self-clearing/reserved bits in reg) */
nveu32_t reg_mask[EQOS_MAX_DMA_SAFETY_REGS];
/** Array of value stored in each corresponding register */
nveu32_t reg_val[EQOS_MAX_DMA_SAFETY_REGS];
/** OSI lock variable used to protect writes to reg
* while validation is in-progress */
nveu32_t dma_safety_lock;
};
/**
* @brief eqos_get_dma_safety_config - EQOS get DMA safety configuration
*
* @note
* API Group:
* - Initialization: Yes
* - Run time: No
* - De-initialization: No
*
* @returns Pointer to DMA safety configuration
*/
void *eqos_get_dma_safety_config(void);
#endif /* INCLUDED_EQOS_DMA_H */

View File

@@ -1,36 +0,0 @@
/*
* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef INCLUDED_HW_COMMON_H
#define INCLUDED_HW_COMMON_H
/**
* @addtogroup COMMON HW specific offset macros
*
* @brief Register offset values common for EQOS and MGBE
* @{
*/
#define HW_GLOBAL_DMA_STATUS 0x8700U
/** @} */
#endif /* INCLUDED_HW_COMMON_H */

View File

@@ -1,145 +0,0 @@
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef INCLUDED_HW_DESC_H
#define INCLUDED_HW_DESC_H
/**
* @addtogroup EQOS_RxDesc Receive Descriptors bit fields
*
* @brief These macros are used to check the value in specific bit fields of
* the descriptor. The fields in the descriptor are mapped as
* defined in the HW manual
* @{
*/
#define RDES3_OWN OSI_BIT(31)
#define RDES3_CTXT OSI_BIT(30)
#define RDES3_IOC OSI_BIT(30)
#define RDES3_B1V OSI_BIT(24)
#define RDES3_CDA OSI_BIT(27)
#define RDES3_LD OSI_BIT(28)
#define RDES3_FD OSI_BIT(29)
#define RDES3_ERR_CRC OSI_BIT(24)
#define RDES3_ERR_GP OSI_BIT(23)
#define RDES3_ERR_WD OSI_BIT(22)
#define RDES3_ERR_ORUN OSI_BIT(21)
#define RDES3_ERR_RE OSI_BIT(20)
#define RDES3_ERR_DRIB OSI_BIT(19)
#define RDES3_PKT_LEN 0x00007fffU
#define RDES3_LT (OSI_BIT(16) | OSI_BIT(17) | OSI_BIT(18))
#define RDES3_LT_VT OSI_BIT(18)
#define RDES3_LT_DVT (OSI_BIT(16) | OSI_BIT(18))
#define RDES3_RS0V OSI_BIT(25)
#define RDES3_RS1V OSI_BIT(26)
#define RDES3_RSV OSI_BIT(26)
#define RDES0_OVT 0x0000FFFFU
#define RDES3_TSD OSI_BIT(6)
#define RDES3_TSA OSI_BIT(4)
#define RDES1_TSA OSI_BIT(14)
#define RDES1_TD OSI_BIT(15)
#define RDES3_L34T 0x00F00000U
#define RDES3_L34T_IPV4_TCP OSI_BIT(20)
#define RDES3_L34T_IPV4_UDP OSI_BIT(21)
#define RDES3_L34T_IPV6_TCP (OSI_BIT(23) | OSI_BIT(20))
#define RDES3_L34T_IPV6_UDP (OSI_BIT(23) | OSI_BIT(21))
#define RDES1_IPCE OSI_BIT(7)
#define RDES1_IPCB OSI_BIT(6)
#define RDES1_IPV6 OSI_BIT(5)
#define RDES1_IPV4 OSI_BIT(4)
#define RDES1_IPHE OSI_BIT(3)
#define RDES1_PT_MASK (OSI_BIT(2) | OSI_BIT(1) | OSI_BIT(0))
#define RDES1_PT_TCP OSI_BIT(1)
#define RDES1_PT_UDP OSI_BIT(0)
#define RDES3_ELLT 0xF0000U
#define RDES3_ELLT_IPHE 0x50000U
#define RDES3_ELLT_CSUM_ERR 0x60000U
#define RDES3_ELLT_CVLAN 0x90000U
/** @} */
/** Error Summary bits for Received packet */
#define RDES3_ES_BITS \
(RDES3_ERR_CRC | RDES3_ERR_GP | RDES3_ERR_WD | \
RDES3_ERR_ORUN | RDES3_ERR_RE | RDES3_ERR_DRIB)
/** MGBE error summary bits for Received packet */
#define RDES3_ES_MGBE 0x8000U
#define RDES3_ERR_MGBE_CRC (OSI_BIT(16) | OSI_BIT(17))
/**
* @addtogroup EQOS_TxDesc Transmit Descriptors bit fields
*
* @brief These macros are used to check the value in specific bit fields of
* the descriptor. The fields in the descriptor are mapped as
* defined in the HW manual
* @{
*/
#define TDES2_IOC OSI_BIT(31)
#define TDES2_MSS_MASK 0x3FFFU
#define TDES3_OWN OSI_BIT(31)
#define TDES3_CTXT OSI_BIT(30)
#define TDES3_TCMSSV OSI_BIT(26)
#define TDES3_FD OSI_BIT(29)
#define TDES3_LD OSI_BIT(28)
#define TDES3_OSTC OSI_BIT(27)
#define TDES3_TSE OSI_BIT(18)
#define TDES3_HW_CIC_ALL (OSI_BIT(16) | OSI_BIT(17))
#define TDES3_HW_CIC_IP_ONLY (OSI_BIT(16))
#define TDES3_VT_MASK 0xFFFFU
#define TDES3_THL_MASK 0xFU
#define TDES3_TPL_MASK 0x3FFFFU
#define TDES3_PL_MASK 0x7FFFU
#define TDES3_THL_SHIFT 19U
#define TDES3_VLTV OSI_BIT(16)
#define TDES3_TTSS OSI_BIT(17)
#define TDES3_PIDV OSI_BIT(25)
/* Tx Errors */
#define TDES3_IP_HEADER_ERR OSI_BIT(0)
#define TDES3_UNDER_FLOW_ERR OSI_BIT(2)
#define TDES3_EXCESSIVE_DEF_ERR OSI_BIT(3)
#define TDES3_EXCESSIVE_COL_ERR OSI_BIT(8)
#define TDES3_LATE_COL_ERR OSI_BIT(9)
#define TDES3_NO_CARRIER_ERR OSI_BIT(10)
#define TDES3_LOSS_CARRIER_ERR OSI_BIT(11)
#define TDES3_PL_CHK_SUM_ERR OSI_BIT(12)
#define TDES3_PKT_FLUSH_ERR OSI_BIT(13)
#define TDES3_JABBER_TIMEO_ERR OSI_BIT(14)
/* VTIR = 0x2 (Insert a VLAN tag with the tag value programmed in the
* MAC_VLAN_Incl register or context descriptor.)
*/
#define TDES2_VTIR ((nveu32_t)0x2 << 14U)
#define TDES2_TTSE ((nveu32_t)0x1 << 30U)
/** @} */
/** Error Summary bits for Transmitted packet */
#define TDES3_ES_BITS (TDES3_IP_HEADER_ERR | \
TDES3_UNDER_FLOW_ERR | \
TDES3_EXCESSIVE_DEF_ERR | \
TDES3_EXCESSIVE_COL_ERR | \
TDES3_LATE_COL_ERR | \
TDES3_NO_CARRIER_ERR | \
TDES3_LOSS_CARRIER_ERR | \
TDES3_PL_CHK_SUM_ERR | \
TDES3_PKT_FLUSH_ERR | \
TDES3_JABBER_TIMEO_ERR)
#endif /* INCLUDED_HW_DESC_H */

View File

@@ -1,42 +0,0 @@
################################### tell Emacs this is a -*- makefile-gmake -*-
#
# Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# libnvethernetcl interface export
#
###############################################################################
osi_start_dma
osi_stop_dma
osi_get_refill_rx_desc_cnt
osi_rx_dma_desc_init
osi_set_rx_buf_len
osi_hw_transmit
osi_process_tx_completions
osi_process_rx_completions
osi_hw_dma_init
osi_hw_dma_deinit
osi_init_dma_ops
osi_dma_get_systime_from_mac
osi_is_mac_enabled
osi_get_dma
osi_handle_dma_intr
osi_get_global_dma_status
osi_dma_ioctl

View File

@@ -1,230 +0,0 @@
/*
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "dma_local.h"
#include "hw_desc.h"
#include "mgbe_desc.h"
/**
* @brief mgbe_get_rx_vlan - Get Rx VLAN from descriptor
*
* Algorithm:
* 1) Check if the descriptor has CVLAN set
* 2) If set, set a per packet context flag indicating packet is VLAN
* tagged.
* 3) Extract VLAN tag ID from the descriptor
*
* @param[in] rx_desc: Rx descriptor
* @param[in] rx_pkt_cx: Per-Rx packet context structure
*/
static inline void mgbe_get_rx_vlan(struct osi_rx_desc *rx_desc,
struct osi_rx_pkt_cx *rx_pkt_cx)
{
unsigned int ellt = rx_desc->rdes3 & RDES3_ELLT;
if ((ellt & RDES3_ELLT_CVLAN) == RDES3_ELLT_CVLAN) {
rx_pkt_cx->flags |= OSI_PKT_CX_VLAN;
rx_pkt_cx->vlan_tag = rx_desc->rdes0 & RDES0_OVT;
}
}
/**
* @brief mgbe_get_rx_err_stats - Detect Errors from Rx Descriptor
*
* Algorithm: This routine will be invoked by OSI layer itself which
* checks for the Last Descriptor and updates the receive status errors
* accordingly.
*
* @param[in] rx_desc: Rx Descriptor.
* @param[in] pkt_err_stats: Packet error stats which stores the errors reported
*/
static inline void mgbe_update_rx_err_stats(struct osi_rx_desc *rx_desc,
struct osi_pkt_err_stats *stats)
{
unsigned int frpsm = 0;
unsigned int frpsl = 0;
/* increment rx crc if we see CE bit set */
if ((rx_desc->rdes3 & RDES3_ERR_MGBE_CRC) == RDES3_ERR_MGBE_CRC) {
stats->rx_crc_error =
osi_update_stats_counter(stats->rx_crc_error, 1UL);
}
/* Update FRP Counters */
frpsm = rx_desc->rdes2 & MGBE_RDES2_FRPSM;
frpsl = rx_desc->rdes3 & MGBE_RDES3_FRPSL;
/* Increment FRP parsed count */
if ((frpsm == OSI_NONE) && (frpsl == OSI_NONE)) {
stats->frp_parsed =
osi_update_stats_counter(stats->frp_parsed, 1UL);
}
/* Increment FRP dropped count */
if ((frpsm == OSI_NONE) && (frpsl == MGBE_RDES3_FRPSL)) {
stats->frp_dropped =
osi_update_stats_counter(stats->frp_dropped, 1UL);
}
/* Increment FRP Parsing Error count */
if ((frpsm == MGBE_RDES2_FRPSM) && (frpsl == OSI_NONE)) {
stats->frp_err =
osi_update_stats_counter(stats->frp_err, 1UL);
}
/* Increment FRP Incomplete Parsing count */
if ((frpsm == MGBE_RDES2_FRPSM) && (frpsl == MGBE_RDES3_FRPSL)) {
stats->frp_incomplete =
osi_update_stats_counter(stats->frp_incomplete, 1UL);
}
}
/**
* @brief mgbe_get_rx_csum - Get the Rx checksum from descriptor if valid
*
* Algorithm:
* 1) Check if the descriptor has any checksum validation errors.
* 2) If none, set a per packet context flag indicating no err in
* Rx checksum
* 3) The OSD layer will mark the packet appropriately to skip
* IP/TCP/UDP checksum validation in software based on whether
* COE is enabled for the device.
*
* @param[in] rx_desc: Rx descriptor
* @param[in] rx_pkt_cx: Per-Rx packet context structure
*/
static void mgbe_get_rx_csum(struct osi_rx_desc *rx_desc,
struct osi_rx_pkt_cx *rx_pkt_cx)
{
unsigned int ellt = rx_desc->rdes3 & RDES3_ELLT;
/* Always include either checksum none/unnecessary
* depending on status fields in desc.
* Hence no need to explicitly add OSI_PKT_CX_CSUM flag.
*/
if ((ellt != RDES3_ELLT_IPHE) && (ellt != RDES3_ELLT_CSUM_ERR)) {
rx_pkt_cx->rxcsum |= OSI_CHECKSUM_UNNECESSARY;
}
}
/**
* @brief mgbe_get_rx_hash - Get Rx packet hash from descriptor if valid
*
* Algorithm: This routine will be invoked by OSI layer itself to get received
* packet Hash from descriptor if RSS hash is valid and it also sets the type
* of RSS hash.
*
* @param[in] rx_desc: Rx Descriptor.
* @param[in] rx_pkt_cx: Per-Rx packet context structure
*/
static void mgbe_get_rx_hash(struct osi_rx_desc *rx_desc,
struct osi_rx_pkt_cx *rx_pkt_cx)
{
unsigned int pkt_type = rx_desc->rdes3 & RDES3_L34T;
if ((rx_desc->rdes3 & RDES3_RSV) != RDES3_RSV) {
return;
}
switch (pkt_type) {
case RDES3_L34T_IPV4_TCP:
case RDES3_L34T_IPV4_UDP:
case RDES3_L34T_IPV6_TCP:
case RDES3_L34T_IPV6_UDP:
rx_pkt_cx->rx_hash_type = OSI_RX_PKT_HASH_TYPE_L4;
break;
default:
rx_pkt_cx->rx_hash_type = OSI_RX_PKT_HASH_TYPE_L3;
break;
}
/* Get Rx hash from RDES1 RSSH */
rx_pkt_cx->rx_hash = rx_desc->rdes1;
rx_pkt_cx->flags |= OSI_PKT_CX_RSS;
}
/**
* @brief mgbe_get_rx_hwstamp - Get Rx HW Time stamp
*
* Algorithm:
* 1) Check for TS availability.
* 2) call get_tx_tstamp_status if TS is valid or not.
* 3) If yes, set a bit and update nano seconds in rx_pkt_cx so that OSD
* layer can extract the time by checking this bit.
*
* @param[in] rx_desc: Rx descriptor
* @param[in] context_desc: Rx context descriptor
* @param[in] rx_pkt_cx: Rx packet context
*
* @retval -1 if TimeStamp is not available
* @retval 0 if TimeStamp is available.
*/
static int mgbe_get_rx_hwstamp(struct osi_dma_priv_data *osi_dma,
struct osi_rx_desc *rx_desc,
struct osi_rx_desc *context_desc,
struct osi_rx_pkt_cx *rx_pkt_cx)
{
int retry;
if ((rx_desc->rdes3 & RDES3_CDA) != RDES3_CDA) {
return -1;
}
for (retry = 0; retry < 10; retry++) {
if (((context_desc->rdes3 & RDES3_OWN) == 0U) &&
((context_desc->rdes3 & RDES3_CTXT) == RDES3_CTXT) &&
((context_desc->rdes3 & RDES3_TSA) == RDES3_TSA) &&
((context_desc->rdes3 & RDES3_TSD) != RDES3_TSD)) {
if ((context_desc->rdes0 == OSI_INVALID_VALUE) &&
(context_desc->rdes1 == OSI_INVALID_VALUE)) {
/* Invalid time stamp */
return -1;
}
/* Update rx pkt context flags to indicate PTP */
rx_pkt_cx->flags |= OSI_PKT_CX_PTP;
/* Time Stamp can be read */
break;
} else {
/* TS not available yet, so retrying */
osi_dma->osd_ops.udelay(OSI_DELAY_1US);
}
}
if (retry == 10) {
/* Timed out waiting for Rx timestamp */
return -1;
}
rx_pkt_cx->ns = context_desc->rdes0 +
(OSI_NSEC_PER_SEC * context_desc->rdes1);
if (rx_pkt_cx->ns < context_desc->rdes0) {
/* Will not hit this case */
return -1;
}
return 0;
}
void mgbe_init_desc_ops(struct desc_ops *d_ops)
{
d_ops->get_rx_csum = mgbe_get_rx_csum;
d_ops->update_rx_err_stats = mgbe_update_rx_err_stats;
d_ops->get_rx_vlan = mgbe_get_rx_vlan;
d_ops->get_rx_hash = mgbe_get_rx_hash;
d_ops->get_rx_hwstamp = mgbe_get_rx_hwstamp;
}

View File

@@ -1,37 +0,0 @@
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef MGBE_DESC_H_
#define MGBE_DESC_H_
/**
* @addtogroup MGBE MAC FRP Stats.
*
* @brief Values defined for the MGBE Flexible Receive Parser Receive Status
* @{
*/
#define MGBE_RDES2_FRPSM OSI_BIT(10)
#define MGBE_RDES3_FRPSL OSI_BIT(14)
/** @} */
#endif /* MGBE_DESC_H_ */

View File

@@ -1,743 +0,0 @@
/*
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "../osi/common/common.h"
#include <osi_common.h>
#include "mgbe_dma.h"
#include "dma_local.h"
/**
* @brief mgbe_disable_chan_tx_intr - Disables DMA Tx channel interrupts.
*
* @param[in] addr: Base address indicating the start of
* memory mapped IO region of the MAC.
* @param[in] chan: DMA Tx channel number.
*
* @note 1) MAC needs to be out of reset and proper clocks need to be configured
* 2) DMA HW init need to be completed successfully, see osi_hw_dma_init
* 3) Mapping of physical IRQ line to DMA channel need to be maintained at
* OSDependent layer and pass corresponding channel number.
*/
static void mgbe_disable_chan_tx_intr(void *addr, nveu32_t chan)
{
nveu32_t cntrl;
#if 0
MGBE_CHECK_CHAN_BOUND(chan);
#endif
cntrl = osi_readl((nveu8_t *)addr +
MGBE_VIRT_INTR_CHX_CNTRL(chan));
cntrl &= ~MGBE_VIRT_INTR_CHX_CNTRL_TX;
osi_writel(cntrl, (nveu8_t *)addr +
MGBE_VIRT_INTR_CHX_CNTRL(chan));
}
/**
* @brief mgbe_enable_chan_tx_intr - Enable Tx channel interrupts.
*
* @param[in] addr: Base address indicating the start of
* memory mapped IO region of the MAC.
* @param[in] chan: DMA Tx channel number.
*
* @note 1) MAC needs to be out of reset and proper clocks need to be configured
* 2) DMA HW init need to be completed successfully, see osi_hw_dma_init
* 3) Mapping of physical IRQ line to DMA channel need to be maintained at
* OSDependent layer and pass corresponding channel number.
*/
static void mgbe_enable_chan_tx_intr(void *addr, nveu32_t chan)
{
nveu32_t cntrl;
#if 0
MGBE_CHECK_CHAN_BOUND(chan);
#endif
cntrl = osi_readl((nveu8_t *)addr +
MGBE_VIRT_INTR_CHX_CNTRL(chan));
cntrl |= MGBE_VIRT_INTR_CHX_CNTRL_TX;
osi_writel(cntrl, (nveu8_t *)addr +
MGBE_VIRT_INTR_CHX_CNTRL(chan));
}
/**
* @brief mgbe_disable_chan_rx_intr - Disable Rx channel interrupts.
*
* @param[in] addr: Base address indicating the start of
* memory mapped IO region of the MAC.
* @param[in] chan: DMA Rx channel number.
*
* @note 1) MAC needs to be out of reset and proper clocks need to be configured
* 2) DMA HW init need to be completed successfully, see osi_hw_dma_init
* 3) Mapping of physical IRQ line to DMA channel need to be maintained at
* OSDependent layer and pass corresponding channel number.
*/
static void mgbe_disable_chan_rx_intr(void *addr, nveu32_t chan)
{
nveu32_t cntrl;
#if 0
MGBE_CHECK_CHAN_BOUND(chan);
#endif
cntrl = osi_readl((nveu8_t *)addr +
MGBE_VIRT_INTR_CHX_CNTRL(chan));
cntrl &= ~MGBE_VIRT_INTR_CHX_CNTRL_RX;
osi_writel(cntrl, (nveu8_t *)addr +
MGBE_VIRT_INTR_CHX_CNTRL(chan));
}
/**
* @brief mgbe_enable_chan_rx_intr - Enable Rx channel interrupts.
*
* @param[in] addr: Base address indicating the start of
* memory mapped IO region of the MAC.
* @param[in] chan: DMA Rx channel number.
*
* @note 1) MAC needs to be out of reset and proper clocks need to be configured
* 2) DMA HW init need to be completed successfully, see osi_hw_dma_init
*/
static void mgbe_enable_chan_rx_intr(void *addr, nveu32_t chan)
{
nveu32_t cntrl;
#if 0
MGBE_CHECK_CHAN_BOUND(chan);
#endif
cntrl = osi_readl((nveu8_t *)addr +
MGBE_VIRT_INTR_CHX_CNTRL(chan));
cntrl |= MGBE_VIRT_INTR_CHX_CNTRL_RX;
osi_writel(cntrl, (nveu8_t *)addr +
MGBE_VIRT_INTR_CHX_CNTRL(chan));
}
/**
* @brief mgbe_set_tx_ring_len - Set DMA Tx ring length.
*
* Algorithm: Set DMA Tx channel ring length for specific channel.
*
* @param[in] osi_dma: OSI DMA data structure.
* @param[in] chan: DMA Tx channel number.
* @param[in] len: Length.
*/
static void mgbe_set_tx_ring_len(struct osi_dma_priv_data *osi_dma,
nveu32_t chan,
nveu32_t len)
{
void *addr = osi_dma->base;
nveu32_t value;
#if 0
MGBE_CHECK_CHAN_BOUND(chan);
#endif
value = osi_readl((nveu8_t *)addr + MGBE_DMA_CHX_TX_CNTRL2(chan));
value |= (len & MGBE_DMA_RING_LENGTH_MASK);
osi_writel(value, (nveu8_t *)addr + MGBE_DMA_CHX_TX_CNTRL2(chan));
}
/**
* @brief mgbe_set_tx_ring_start_addr - Set DMA Tx ring base address.
*
* Algorithm: Sets DMA Tx ring base address for specific channel.
*
* @param[in] addr: Base address indicating the start of
* memory mapped IO region of the MAC.
* @param[in] chan: DMA Tx channel number.
* @param[in] tx_desc: Tx desc base addess.
*/
static void mgbe_set_tx_ring_start_addr(void *addr, nveu32_t chan,
nveu64_t tx_desc)
{
nveu64_t temp;
#if 0
MGBE_CHECK_CHAN_BOUND(chan);
#endif
temp = H32(tx_desc);
if (temp < UINT_MAX) {
osi_writel((nveu32_t)temp, (nveu8_t *)addr +
MGBE_DMA_CHX_TDLH(chan));
}
temp = L32(tx_desc);
if (temp < UINT_MAX) {
osi_writel((nveu32_t)temp, (nveu8_t *)addr +
MGBE_DMA_CHX_TDLA(chan));
}
}
/**
* @brief mgbe_update_tx_tailptr - Updates DMA Tx ring tail pointer.
*
* Algorithm: Updates DMA Tx ring tail pointer for specific channel.
*
* @param[in] addr: Base address indicating the start of
* memory mapped IO region of the MAC.
* @param[in] chan: DMA Tx channel number.
* @param[in] tailptr: DMA Tx ring tail pointer.
*
*
* @note 1) MAC needs to be out of reset and proper clocks need to be configured
* 2) DMA HW init need to be completed successfully, see osi_hw_dma_init
*/
static void mgbe_update_tx_tailptr(void *addr, nveu32_t chan,
nveu64_t tailptr)
{
nveu64_t temp;
#if 0
MGBE_CHECK_CHAN_BOUND(chan);
#endif
temp = L32(tailptr);
if (temp < UINT_MAX) {
osi_writel((nveu32_t)temp, (nveu8_t *)addr +
MGBE_DMA_CHX_TDTLP(chan));
}
}
/**
* @brief mgbe_set_rx_ring_len - Set Rx channel ring length.
*
* Algorithm: Sets DMA Rx channel ring length for specific DMA channel.
*
* @param[in] osi_dma: OSI DMA data structure.
* @param[in] chan: DMA Rx channel number.
* @param[in] len: Length
*/
static void mgbe_set_rx_ring_len(struct osi_dma_priv_data *osi_dma,
nveu32_t chan,
nveu32_t len)
{
void *addr = osi_dma->base;
nveu32_t value;
#if 0
MGBE_CHECK_CHAN_BOUND(chan);
#endif
value = osi_readl((nveu8_t *)addr + MGBE_DMA_CHX_RX_CNTRL2(chan));
value |= (len & MGBE_DMA_RING_LENGTH_MASK);
osi_writel(value, (nveu8_t *)addr + MGBE_DMA_CHX_RX_CNTRL2(chan));
}
/**
* @brief mgbe_set_rx_ring_start_addr - Set DMA Rx ring base address.
*
* Algorithm: Sets DMA Rx channel ring base address.
*
* @param[in] addr: Base address indicating the start of
* memory mapped IO region of the MAC.
* @param[in] chan: DMA Rx channel number.
* @param[in] tx_desc: DMA Rx desc base address.
*/
static void mgbe_set_rx_ring_start_addr(void *addr, nveu32_t chan,
nveu64_t tx_desc)
{
nveu64_t temp;
#if 0
MGBE_CHECK_CHAN_BOUND(chan);
#endif
temp = H32(tx_desc);
if (temp < UINT_MAX) {
osi_writel((nveu32_t)temp, (nveu8_t *)addr +
MGBE_DMA_CHX_RDLH(chan));
}
temp = L32(tx_desc);
if (temp < UINT_MAX) {
osi_writel((nveu32_t)temp, (nveu8_t *)addr +
MGBE_DMA_CHX_RDLA(chan));
}
}
/**
* @brief mgbe_update_rx_tailptr - Update Rx ring tail pointer
*
* Algorithm: Updates DMA Rx channel tail pointer for specific channel.
*
* @param[in] addr: Base address indicating the start of
* memory mapped IO region of the MAC.
* @param[in] chan: DMA Rx channel number.
* @param[in] tailptr: Tail pointer
*
* @note 1) MAC needs to be out of reset and proper clocks need to be configured
* 2) DMA HW init need to be completed successfully, see osi_hw_dma_init
*/
static void mgbe_update_rx_tailptr(void *addr, nveu32_t chan,
nveu64_t tailptr)
{
nveu64_t temp;
#if 0
MGBE_CHECK_CHAN_BOUND(chan);
#endif
temp = H32(tailptr);
if (temp < UINT_MAX) {
osi_writel((nveu32_t)temp, (nveu8_t *)addr +
MGBE_DMA_CHX_RDTHP(chan));
}
temp = L32(tailptr);
if (temp < UINT_MAX) {
osi_writel((nveu32_t)temp, (nveu8_t *)addr +
MGBE_DMA_CHX_RDTLP(chan));
}
}
/**
* @brief mgbe_start_dma - Start DMA.
*
* Algorithm: Start Tx and Rx DMA for specific channel.
*
* @param[in] osi_dma: OSI DMA private data structure.
* @param[in] chan: DMA Tx/Rx channel number.
*
* @note 1) MAC needs to be out of reset and proper clocks need to be configured
* 2) DMA HW init need to be completed successfully, see osi_hw_dma_init
*/
static void mgbe_start_dma(struct osi_dma_priv_data *osi_dma, nveu32_t chan)
{
nveu32_t val;
void *addr = osi_dma->base;
#if 0
MGBE_CHECK_CHAN_BOUND(chan);
#endif
/* start Tx DMA */
val = osi_readl((nveu8_t *)addr + MGBE_DMA_CHX_TX_CTRL(chan));
val |= OSI_BIT(0);
osi_writel(val, (nveu8_t *)addr + MGBE_DMA_CHX_TX_CTRL(chan));
/* start Rx DMA */
val = osi_readl((nveu8_t *)addr + MGBE_DMA_CHX_RX_CTRL(chan));
val |= OSI_BIT(0);
val &= ~OSI_BIT(31);
osi_writel(val, (nveu8_t *)addr + MGBE_DMA_CHX_RX_CTRL(chan));
}
/**
* @brief mgbe_stop_dma - Stop DMA.
*
* Algorithm: Start Tx and Rx DMA for specific channel.
*
* @param[in] osi_dma: OSI DMA private data structure.
* @param[in] chan: DMA Tx/Rx channel number.
*
* @note 1) MAC needs to be out of reset and proper clocks need to be configured
* 2) DMA HW init need to be completed successfully, see osi_hw_dma_init
*/
static void mgbe_stop_dma(struct osi_dma_priv_data *osi_dma, nveu32_t chan)
{
nveu32_t val;
void *addr = osi_dma->base;
#if 0
MGBE_CHECK_CHAN_BOUND(chan);
#endif
/* stop Tx DMA */
val = osi_readl((nveu8_t *)addr + MGBE_DMA_CHX_TX_CTRL(chan));
val &= ~OSI_BIT(0);
osi_writel(val, (nveu8_t *)addr + MGBE_DMA_CHX_TX_CTRL(chan));
/* stop Rx DMA */
val = osi_readl((nveu8_t *)addr + MGBE_DMA_CHX_RX_CTRL(chan));
val &= ~OSI_BIT(0);
val |= OSI_BIT(31);
osi_writel(val, (nveu8_t *)addr + MGBE_DMA_CHX_RX_CTRL(chan));
}
/**
* @brief mgbe_configure_dma_channel - Configure DMA channel
*
* Algorithm: This takes care of configuring the below
* parameters for the DMA channel
* 1) Enabling DMA channel interrupts
* 2) Enable 8xPBL mode
* 3) Program Tx, Rx PBL
* 4) Enable TSO if HW supports
* 5) Program Rx Watchdog timer
* 6) Program Out Standing DMA Read Requests
* 7) Program Out Standing DMA write Requests
*
* @param[in] chan: DMA channel number that need to be configured.
* @param[in] owrq: out standing write dma requests
* @param[in] orrq: out standing read dma requests
* @param[in] osi_dma: OSI DMA private data structure.
*
* @note MAC has to be out of reset.
*/
static void mgbe_configure_dma_channel(nveu32_t chan,
nveu32_t owrq,
nveu32_t orrq,
struct osi_dma_priv_data *osi_dma)
{
nveu32_t value;
nveu32_t txpbl;
nveu32_t rxpbl;
#if 0
MGBE_CHECK_CHAN_BOUND(chan);
#endif
/* enable DMA channel interrupts */
/* Enable TIE and TBUE */
/* TIE - Transmit Interrupt Enable */
/* TBUE - Transmit Buffer Unavailable Enable */
/* RIE - Receive Interrupt Enable */
/* RBUE - Receive Buffer Unavailable Enable */
/* AIE - Abnormal Interrupt Summary Enable */
/* NIE - Normal Interrupt Summary Enable */
/* FBE - Fatal Bus Error Enable */
value = osi_readl((nveu8_t *)osi_dma->base +
MGBE_DMA_CHX_INTR_ENA(chan));
value |= MGBE_DMA_CHX_INTR_TIE | MGBE_DMA_CHX_INTR_TBUE |
MGBE_DMA_CHX_INTR_RIE | MGBE_DMA_CHX_INTR_RBUE |
MGBE_DMA_CHX_INTR_FBEE | MGBE_DMA_CHX_INTR_AIE |
MGBE_DMA_CHX_INTR_NIE;
/* For multi-irqs to work nie needs to be disabled */
/* TODO: do we need this ? */
value &= ~(MGBE_DMA_CHX_INTR_NIE);
osi_writel(value, (nveu8_t *)osi_dma->base +
MGBE_DMA_CHX_INTR_ENA(chan));
/* Enable 8xPBL mode */
value = osi_readl((nveu8_t *)osi_dma->base +
MGBE_DMA_CHX_CTRL(chan));
value |= MGBE_DMA_CHX_CTRL_PBLX8;
osi_writel(value, (nveu8_t *)osi_dma->base +
MGBE_DMA_CHX_CTRL(chan));
/* Configure DMA channel Transmit control register */
value = osi_readl((nveu8_t *)osi_dma->base +
MGBE_DMA_CHX_TX_CTRL(chan));
/* Enable OSF mode */
value |= MGBE_DMA_CHX_TX_CTRL_OSP;
/*
* Formula for TxPBL calculation is
* (TxPBL) < ((TXQSize - MTU)/(DATAWIDTH/8)) - 5
* if TxPBL exceeds the value of 256 then we need to make use of 256
* as the TxPBL else we should be using the value whcih we get after
* calculation by using above formula
*/
if (osi_dma->pre_si == OSI_ENABLE) {
txpbl = ((((MGBE_TXQ_RXQ_SIZE_FPGA / osi_dma->num_dma_chans) -
osi_dma->mtu) / (MGBE_AXI_DATAWIDTH / 8U)) - 5U);
} else {
txpbl = ((((MGBE_TXQ_SIZE / osi_dma->num_dma_chans) -
osi_dma->mtu) / (MGBE_AXI_DATAWIDTH / 8U)) - 5U);
}
/* Since PBLx8 is set, so txpbl/8 will be the value that
* need to be programmed
*/
if (txpbl >= MGBE_DMA_CHX_MAX_PBL) {
value |= ((MGBE_DMA_CHX_MAX_PBL / 8U) <<
MGBE_DMA_CHX_CTRL_PBL_SHIFT);
} else {
value |= ((txpbl / 8U) << MGBE_DMA_CHX_CTRL_PBL_SHIFT);
}
/* enable TSO by default if HW supports */
value |= MGBE_DMA_CHX_TX_CTRL_TSE;
osi_writel(value, (nveu8_t *)osi_dma->base +
MGBE_DMA_CHX_TX_CTRL(chan));
/* Configure DMA channel Receive control register */
/* Select Rx Buffer size. Needs to be rounded up to next multiple of
* bus width
*/
value = osi_readl((nveu8_t *)osi_dma->base +
MGBE_DMA_CHX_RX_CTRL(chan));
/* clear previous Rx buffer size */
value &= ~MGBE_DMA_CHX_RBSZ_MASK;
value |= (osi_dma->rx_buf_len << MGBE_DMA_CHX_RBSZ_SHIFT);
/* RxPBL calculation is
* RxPBL <= Rx Queue Size/2
*/
if (osi_dma->pre_si == OSI_ENABLE) {
rxpbl = (((MGBE_TXQ_RXQ_SIZE_FPGA / osi_dma->num_dma_chans) /
2U) << MGBE_DMA_CHX_CTRL_PBL_SHIFT);
} else {
rxpbl = (((MGBE_RXQ_SIZE / osi_dma->num_dma_chans) / 2U) <<
MGBE_DMA_CHX_CTRL_PBL_SHIFT);
}
/* Since PBLx8 is set, so rxpbl/8 will be the value that
* need to be programmed
*/
if (rxpbl >= MGBE_DMA_CHX_MAX_PBL) {
value |= ((MGBE_DMA_CHX_MAX_PBL / 8) <<
MGBE_DMA_CHX_CTRL_PBL_SHIFT);
} else {
value |= ((rxpbl / 8) << MGBE_DMA_CHX_CTRL_PBL_SHIFT);
}
osi_writel(value, (nveu8_t *)osi_dma->base +
MGBE_DMA_CHX_RX_CTRL(chan));
/* Set Receive Interrupt Watchdog Timer Count */
/* conversion of usec to RWIT value
* Eg:System clock is 62.5MHz, each clock cycle would then be 16ns
* For value 0x1 in watchdog timer,device would wait for 256 clk cycles,
* ie, (16ns x 256) => 4.096us (rounding off to 4us)
* So formula with above values is,ret = usec/4
*/
/* NOTE: Bug 3287883: If RWTU value programmed then driver needs
* to follow below order -
* 1. First write RWT field with non-zero value.
* 2. Program RWTU field of register
* DMA_CH(#i)_Rx_Interrupt_Watchdog_Time.
*/
if ((osi_dma->use_riwt == OSI_ENABLE) &&
(osi_dma->rx_riwt < UINT_MAX)) {
value = osi_readl((nveu8_t *)osi_dma->base +
MGBE_DMA_CHX_RX_WDT(chan));
/* Mask the RWT value */
value &= ~MGBE_DMA_CHX_RX_WDT_RWT_MASK;
/* Conversion of usec to Rx Interrupt Watchdog Timer Count */
/* TODO: Need to fix AXI clock for silicon */
value |= ((osi_dma->rx_riwt *
((nveu32_t)MGBE_AXI_CLK_FREQ / OSI_ONE_MEGA_HZ)) /
MGBE_DMA_CHX_RX_WDT_RWTU) &
MGBE_DMA_CHX_RX_WDT_RWT_MASK;
osi_writel(value, (nveu8_t *)osi_dma->base +
MGBE_DMA_CHX_RX_WDT(chan));
value = osi_readl((nveu8_t *)osi_dma->base +
MGBE_DMA_CHX_RX_WDT(chan));
value &= ~(MGBE_DMA_CHX_RX_WDT_RWTU_MASK <<
MGBE_DMA_CHX_RX_WDT_RWTU_SHIFT);
value |= (MGBE_DMA_CHX_RX_WDT_RWTU_2048_CYCLE <<
MGBE_DMA_CHX_RX_WDT_RWTU_SHIFT);
osi_writel(value, (nveu8_t *)osi_dma->base +
MGBE_DMA_CHX_RX_WDT(chan));
}
/* Update ORRQ in DMA_CH(#i)_Tx_Control2 register */
value = osi_readl((nveu8_t *)osi_dma->base +
MGBE_DMA_CHX_TX_CNTRL2(chan));
value |= (orrq << MGBE_DMA_CHX_TX_CNTRL2_ORRQ_SHIFT);
osi_writel(value, (nveu8_t *)osi_dma->base +
MGBE_DMA_CHX_TX_CNTRL2(chan));
/* Update OWRQ in DMA_CH(#i)_Rx_Control2 register */
value = osi_readl((nveu8_t *)osi_dma->base +
MGBE_DMA_CHX_RX_CNTRL2(chan));
value |= (owrq << MGBE_DMA_CHX_RX_CNTRL2_OWRQ_SHIFT);
osi_writel(value, (nveu8_t *)osi_dma->base +
MGBE_DMA_CHX_RX_CNTRL2(chan));
}
/**
* @brief mgbe_init_dma_channel - DMA channel INIT
*
* @param[in] osi_dma: OSI DMA private data structure.
*/
static nve32_t mgbe_init_dma_channel(struct osi_dma_priv_data *osi_dma)
{
nveu32_t chinx;
nveu32_t owrq;
nveu32_t orrq;
/* DMA Read Out Standing Requests */
/* For Presi ORRQ is 16 in case of schannel and 64 in case of mchannel.
* For Si ORRQ is 64 in case of single and multi channel
*/
orrq = (MGBE_DMA_CHX_TX_CNTRL2_ORRQ_RECOMMENDED /
osi_dma->num_dma_chans);
if ((osi_dma->num_dma_chans == 1U) && (osi_dma->pre_si == OSI_ENABLE)) {
/* For Presi ORRQ is 16 in a single channel configuration
* so overwrite only for this configuration
*/
orrq = MGBE_DMA_CHX_RX_CNTRL2_ORRQ_SCHAN_PRESI;
}
/* DMA Write Out Standing Requests */
/* For Presi OWRQ is 8 and for Si it is 32 in case of single channel.
* For Multi Channel OWRQ is 64 for both si and presi
*/
if (osi_dma->num_dma_chans == 1U) {
if (osi_dma->pre_si == OSI_ENABLE) {
owrq = MGBE_DMA_CHX_RX_CNTRL2_OWRQ_SCHAN_PRESI;
} else {
owrq = MGBE_DMA_CHX_RX_CNTRL2_OWRQ_SCHAN;
}
} else {
owrq = (MGBE_DMA_CHX_RX_CNTRL2_OWRQ_MCHAN /
osi_dma->num_dma_chans);
}
/* configure MGBE DMA channels */
for (chinx = 0; chinx < osi_dma->num_dma_chans; chinx++) {
mgbe_configure_dma_channel(osi_dma->dma_chans[chinx],
owrq, orrq, osi_dma);
}
return 0;
}
/**
* @brief mgbe_set_rx_buf_len - Set Rx buffer length
* Sets the Rx buffer length based on the new MTU size set.
*
* @param[in] osi_dma: OSI DMA private data structure.
*
* @note 1) MAC needs to be out of reset and proper clocks need to be configured
* 2) DMA HW init need to be completed successfully, see osi_hw_dma_init
* 3) osi_dma->mtu need to be filled with current MTU size <= 9K
*/
static void mgbe_set_rx_buf_len(struct osi_dma_priv_data *osi_dma)
{
nveu32_t rx_buf_len;
/* Add Ethernet header + FCS + NET IP align size to MTU */
rx_buf_len = osi_dma->mtu + OSI_ETH_HLEN +
NV_VLAN_HLEN + OSI_NET_IP_ALIGN;
/* Buffer alignment */
osi_dma->rx_buf_len = ((rx_buf_len + (MGBE_AXI_BUS_WIDTH - 1U)) &
~(MGBE_AXI_BUS_WIDTH - 1U));
}
/**
* @brief Read-validate HW registers for functional safety.
*
* @note
* Algorithm:
* - Reads pre-configured list of MAC/MTL configuration registers
* and compares with last written value for any modifications.
*
* @param[in] osi_dma: OSI DMA private data structure.
*
* @pre
* - MAC has to be out of reset.
* - osi_hw_dma_init has to be called. Internally this would initialize
* the safety_config (see osi_dma_priv_data) based on MAC version and
* which specific registers needs to be validated periodically.
* - Invoke this call if (osi_dma_priv_data->safety_config != OSI_NULL)
*
* @note
* API Group:
* - Initialization: No
* - Run time: Yes
* - De-initialization: No
*
* @retval 0 on success
* @retval -1 on failure.
*/
static nve32_t mgbe_validate_dma_regs(OSI_UNUSED
struct osi_dma_priv_data *osi_dma)
{
/* TODO: for mgbe */
return 0;
}
/**
* @brief mgbe_clear_vm_tx_intr - Clear VM Tx interrupt
*
* Algorithm: Clear Tx interrupt source at DMA and wrapper level.
*
* @param[in] addr: MAC base address.
* @param[in] chan: DMA Tx channel number.
*/
static void mgbe_clear_vm_tx_intr(void *addr, nveu32_t chan)
{
#if 0
MGBE_CHECK_CHAN_BOUND(chan);
#endif
osi_writel(MGBE_DMA_CHX_STATUS_CLEAR_TX,
(nveu8_t *)addr + MGBE_DMA_CHX_STATUS(chan));
osi_writel(MGBE_VIRT_INTR_CHX_STATUS_TX,
(nveu8_t *)addr + MGBE_VIRT_INTR_CHX_STATUS(chan));
mgbe_disable_chan_tx_intr(addr, chan);
}
/**
* @brief mgbe_clear_vm_rx_intr - Clear VM Rx interrupt
*
* @param[in] addr: MAC base address.
* @param[in] chan: DMA Tx channel number.
*
* Algorithm: Clear Rx interrupt source at DMA and wrapper level.
*/
static void mgbe_clear_vm_rx_intr(void *addr, nveu32_t chan)
{
#if 0
MGBE_CHECK_CHAN_BOUND(chan);
#endif
osi_writel(MGBE_DMA_CHX_STATUS_CLEAR_RX,
(nveu8_t *)addr + MGBE_DMA_CHX_STATUS(chan));
osi_writel(MGBE_VIRT_INTR_CHX_STATUS_RX,
(nveu8_t *)addr + MGBE_VIRT_INTR_CHX_STATUS(chan));
mgbe_disable_chan_rx_intr(addr, chan);
}
/**
* @brief mgbe_config_slot - Configure slot Checking for DMA channel
*
* Algorithm: Set/Reset the slot function of DMA channel based on given inputs
*
* @param[in] osi_dma: OSI DMA private data structure.
* @param[in] chan: DMA channel number to enable slot function
* @param[in] set: flag for set/reset with value OSI_ENABLE/OSI_DISABLE
* @param[in] interval: slot interval fixed for MGBE - its 125usec
*
* @note 1) MAC should be init and started. see osi_start_mac()
* 2) OSD should be initialized
*
*/
static void mgbe_config_slot(struct osi_dma_priv_data *osi_dma,
unsigned int chan,
unsigned int set,
OSI_UNUSED unsigned int interval)
{
unsigned int value;
#if 0
MGBE_CHECK_CHAN_BOUND(chan);
#endif
if (set == OSI_ENABLE) {
/* Program SLOT CTRL register SIV and set ESC bit */
value = osi_readl((unsigned char *)osi_dma->base +
MGBE_DMA_CHX_SLOT_CTRL(chan));
/* Set ESC bit */
value |= MGBE_DMA_CHX_SLOT_ESC;
osi_writel(value, (unsigned char *)osi_dma->base +
MGBE_DMA_CHX_SLOT_CTRL(chan));
} else {
/* Clear ESC bit of SLOT CTRL register */
value = osi_readl((unsigned char *)osi_dma->base +
MGBE_DMA_CHX_SLOT_CTRL(chan));
value &= ~MGBE_DMA_CHX_SLOT_ESC;
osi_writel(value, (unsigned char *)osi_dma->base +
MGBE_DMA_CHX_SLOT_CTRL(chan));
}
}
void mgbe_init_dma_chan_ops(struct dma_chan_ops *ops)
{
ops->set_tx_ring_len = mgbe_set_tx_ring_len;
ops->set_rx_ring_len = mgbe_set_rx_ring_len;
ops->set_tx_ring_start_addr = mgbe_set_tx_ring_start_addr;
ops->set_rx_ring_start_addr = mgbe_set_rx_ring_start_addr;
ops->update_tx_tailptr = mgbe_update_tx_tailptr;
ops->update_rx_tailptr = mgbe_update_rx_tailptr;
ops->disable_chan_tx_intr = mgbe_disable_chan_tx_intr;
ops->enable_chan_tx_intr = mgbe_enable_chan_tx_intr;
ops->disable_chan_rx_intr = mgbe_disable_chan_rx_intr;
ops->enable_chan_rx_intr = mgbe_enable_chan_rx_intr;
ops->start_dma = mgbe_start_dma;
ops->stop_dma = mgbe_stop_dma;
ops->init_dma_channel = mgbe_init_dma_channel;
ops->set_rx_buf_len = mgbe_set_rx_buf_len;
ops->validate_regs = mgbe_validate_dma_regs;
ops->clear_vm_tx_intr = mgbe_clear_vm_tx_intr;
ops->clear_vm_rx_intr = mgbe_clear_vm_rx_intr;
ops->config_slot = mgbe_config_slot;
};

View File

@@ -1,165 +0,0 @@
/*
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef INCLUDED_MGBE_DMA_H
#define INCLUDED_MGBE_DMA_H
/**
* @addtogroup MGBE AXI Clock defines
*
* @brief AXI Clock defines
* @{
*/
#define MGBE_AXI_CLK_FREQ 480000000U
/** @} */
/**
* @@addtogroup Timestamp Capture Register
* @brief MGBE MAC Timestamp Register offset
* @{
*/
#define MGBE_MAC_TSS 0X0D20
#define MGBE_MAC_TS_NSEC 0x0D30
#define MGBE_MAC_TS_SEC 0x0D34
#define MGBE_MAC_TS_PID 0x0D38
/** @} */
/**
* @addtogroup MGBE_DMA DMA Channel Register offsets
*
* @brief MGBE DMA Channel register offsets
* @{
*/
#define MGBE_DMA_CHX_TX_CTRL(x) ((0x0080U * (x)) + 0x3104U)
#define MGBE_DMA_CHX_RX_CTRL(x) ((0x0080U * (x)) + 0x3108U)
#define MGBE_DMA_CHX_SLOT_CTRL(x) ((0x0080U * (x)) + 0x310CU)
#define MGBE_DMA_CHX_INTR_ENA(x) ((0x0080U * (x)) + 0x3138U)
#define MGBE_DMA_CHX_CTRL(x) ((0x0080U * (x)) + 0x3100U)
#define MGBE_DMA_CHX_RX_WDT(x) ((0x0080U * (x)) + 0x313CU)
#define MGBE_DMA_CHX_TX_CNTRL2(x) ((0x0080U * (x)) + 0x3130U)
#define MGBE_DMA_CHX_RX_CNTRL2(x) ((0x0080U * (x)) + 0x3134U)
#define MGBE_DMA_CHX_TDLH(x) ((0x0080U * (x)) + 0x3110U)
#define MGBE_DMA_CHX_TDLA(x) ((0x0080U * (x)) + 0x3114U)
#define MGBE_DMA_CHX_TDTLP(x) ((0x0080U * (x)) + 0x3124U)
#define MGBE_DMA_CHX_TDTHP(x) ((0x0080U * (x)) + 0x3120U)
#define MGBE_DMA_CHX_RDLH(x) ((0x0080U * (x)) + 0x3118U)
#define MGBE_DMA_CHX_RDLA(x) ((0x0080U * (x)) + 0x311CU)
#define MGBE_DMA_CHX_RDTHP(x) ((0x0080U * (x)) + 0x3128U)
#define MGBE_DMA_CHX_RDTLP(x) ((0x0080U * (x)) + 0x312CU)
/** @} */
/**
* @addtogroup MGBE_INTR INT Channel Register offsets
*
* @brief MGBE Virtural Interrupt Channel register offsets
* @{
*/
#define MGBE_VIRT_INTR_CHX_STATUS(x) (0x8604U + ((x) * 8U))
#define MGBE_VIRT_INTR_CHX_CNTRL(x) (0x8600U + ((x) * 8U))
#define MGBE_VIRT_INTR_APB_CHX_CNTRL(x) (0x8200U + ((x) * 4U))
/** @} */
/**
* @addtogroup MGBE_BIT BIT fields for MGBE channel registers
*
* @brief Values defined for the MGBE registers
* @{
*/
#define MGBE_DMA_CHX_TX_CTRL_OSP OSI_BIT(4)
#define MGBE_DMA_CHX_TX_CTRL_TSE OSI_BIT(12)
#define MGBE_DMA_CHX_RX_WDT_RWT_MASK 0xFFU
#define MGBE_DMA_CHX_RX_WDT_RWTU 2048U
#define MGBE_DMA_CHX_RX_WDT_RWTU_2048_CYCLE 3U
#define MGBE_DMA_CHX_RX_WDT_RWTU_MASK 3U
#define MGBE_DMA_CHX_RX_WDT_RWTU_SHIFT 12U
#define MGBE_DMA_CHX_RBSZ_MASK 0x7FFEU
#define MGBE_DMA_CHX_RBSZ_SHIFT 1U
#define MGBE_AXI_BUS_WIDTH 0x10U
#define MGBE_DMA_CHX_CTRL_PBLX8 OSI_BIT(16)
#define MGBE_DMA_CHX_INTR_TIE OSI_BIT(0)
#define MGBE_DMA_CHX_INTR_TBUE OSI_BIT(2)
#define MGBE_DMA_CHX_INTR_RIE OSI_BIT(6)
#define MGBE_DMA_CHX_INTR_RBUE OSI_BIT(7)
#define MGBE_DMA_CHX_INTR_FBEE OSI_BIT(12)
#define MGBE_DMA_CHX_INTR_AIE OSI_BIT(14)
#define MGBE_DMA_CHX_INTR_NIE OSI_BIT(15)
#define MGBE_DMA_CHX_STATUS_TI OSI_BIT(0)
#define MGBE_DMA_CHX_STATUS_RI OSI_BIT(6)
#define MGBE_DMA_CHX_STATUS_NIS OSI_BIT(15)
#define MGBE_DMA_CHX_SLOT_ESC OSI_BIT(0)
#define MGBE_DMA_CHX_STATUS_CLEAR_TX (MGBE_DMA_CHX_STATUS_TI | \
MGBE_DMA_CHX_STATUS_NIS)
#define MGBE_DMA_CHX_STATUS_CLEAR_RX (MGBE_DMA_CHX_STATUS_RI | \
MGBE_DMA_CHX_STATUS_NIS)
#define MGBE_VIRT_INTR_CHX_STATUS_TX OSI_BIT(0)
#define MGBE_VIRT_INTR_CHX_STATUS_RX OSI_BIT(1)
#define MGBE_VIRT_INTR_CHX_CNTRL_TX OSI_BIT(0)
#define MGBE_VIRT_INTR_CHX_CNTRL_RX OSI_BIT(1)
#define MGBE_DMA_CHX_TX_CNTRL2_ORRQ_RECOMMENDED 64U
#define MGBE_DMA_CHX_TX_CNTRL2_ORRQ_SHIFT 24U
#define MGBE_DMA_CHX_RX_CNTRL2_OWRQ_SCHAN 32U
#define MGBE_DMA_CHX_RX_CNTRL2_OWRQ_MCHAN 64U
#define MGBE_DMA_CHX_RX_CNTRL2_OWRQ_SHIFT 24U
#define MGBE_DMA_CHX_RX_CNTRL2_OWRQ_SCHAN_PRESI 8U
#define MGBE_DMA_CHX_RX_CNTRL2_ORRQ_SCHAN_PRESI 16U
#define MGBE_DMA_RING_LENGTH_MASK 0xFFFFU
#define MGBE_DMA_CHX_CTRL_PBL_SHIFT 16U
/** @} */
/**
* @addtogroup MGBE PBL settings.
*
* @brief Values defined for PBL settings
* @{
*/
/* Tx and Rx Qsize is 64KB */
#define MGBE_TXQ_RXQ_SIZE_FPGA 65536U
/* Tx Queue size is 128KB */
#define MGBE_TXQ_SIZE 131072U
/* Rx Queue size is 192KB */
#define MGBE_RXQ_SIZE 196608U
/* MAX PBL value */
#define MGBE_DMA_CHX_MAX_PBL 256U
/* AXI Data width */
#define MGBE_AXI_DATAWIDTH 128U
/** @} */
/**
* @addtogroup MGBE MAC timestamp registers bit field.
*
* @brief Values defined for the MGBE timestamp registers
* @{
*/
#define MGBE_MAC_TSS_TXTSC OSI_BIT(15)
#define MGBE_MAC_TS_PID_MASK 0x3FFU
#define MGBE_MAC_TS_NSEC_MASK 0x7FFFFFFFU
/** @} */
/**
* @brief mgbe_get_dma_chan_ops - MGBE get DMA channel operations
*
* Algorithm: Returns pointer DMA channel operations structure.
*
* @returns Pointer to DMA channel operations structure
*/
struct osi_dma_chan_ops *mgbe_get_dma_chan_ops(void);
#endif

View File

@@ -1,922 +0,0 @@
/*
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "dma_local.h"
#include <local_common.h>
#include "hw_desc.h"
#include "../osi/common/common.h"
#ifdef OSI_DEBUG
#include "debug.h"
#endif /* OSI_DEBUG */
#include "hw_common.h"
/**
* @brief g_dma - DMA local data array.
*/
static struct dma_local g_dma[MAX_DMA_INSTANCES];
/**
* @brief g_ops - local DMA HW operations array.
*/
static struct dma_chan_ops g_ops[MAX_MAC_IP_TYPES];
struct osi_dma_priv_data *osi_get_dma(void)
{
nveu32_t i;
for (i = 0U; i < MAX_DMA_INSTANCES; i++) {
if (g_dma[i].init_done == OSI_ENABLE) {
continue;
}
break;
}
if (i == MAX_DMA_INSTANCES) {
return OSI_NULL;
}
g_dma[i].magic_num = (nveu64_t)&g_dma[i].osi_dma;
return &g_dma[i].osi_dma;
}
/**
* @brief Function to validate input arguments of API.
*
* @param[in] osi_dma: OSI DMA private data structure.
* @param[in] l_dma: Local OSI DMA data structure.
*
* @note
* API Group:
* - Initialization: Yes
* - Run time: Yes
* - De-initialization: Yes
*
* @retval 0 on Success
* @retval -1 on Failure
*/
static inline nve32_t validate_args(struct osi_dma_priv_data *osi_dma,
struct dma_local *l_dma)
{
if ((osi_dma == OSI_NULL) || (osi_dma->base == OSI_NULL) ||
(l_dma->init_done == OSI_DISABLE)) {
return -1;
}
return 0;
}
/**
* @brief Function to validate input arguments of API.
*
* @param[in] osi_dma: OSI DMA private data structure.
* @param[in] chan: DMA channel number.
*
* @note
* API Group:
* - Initialization: Yes
* - Run time: Yes
* - De-initialization: Yes
*
* @retval 0 on Success
* @retval -1 on Failure
*/
static inline nve32_t validate_dma_chan_num(struct osi_dma_priv_data *osi_dma,
nveu32_t chan)
{
struct dma_local *l_dma = (struct dma_local *)osi_dma;
if (chan >= l_dma->max_chans) {
OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
"Invalid DMA channel number\n", chan);
return -1;
}
return 0;
}
/**
* @brief Function to validate array of DMA channels.
*
* @param[in] osi_dma: OSI DMA private data structure.
*
* @note
* API Group:
* - Initialization: Yes
* - Run time: Yes
* - De-initialization: Yes
*
* @retval 0 on Success
* @retval -1 on Failure
*/
static inline nve32_t validate_dma_chans(struct osi_dma_priv_data *osi_dma)
{
struct dma_local *l_dma = (struct dma_local *)osi_dma;
nveu32_t i = 0;
for (i = 0; i < osi_dma->num_dma_chans; i++) {
if (osi_dma->dma_chans[i] > l_dma->max_chans) {
OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
"Invalid DMA channel number:\n",
osi_dma->dma_chans[i]);
return -1;
}
}
return 0;
}
/**
* @brief Function to validate function pointers.
*
* @param[in] osi_dma: OSI DMA private data structure.
* @param[in] ops_p: Pointer to OSI DMA channel operations.
*
* @note
* API Group:
* - Initialization: Yes
* - Run time: No
* - De-initialization: No
*
* @retval 0 on Success
* @retval -1 on Failure
*/
static nve32_t validate_func_ptrs(struct osi_dma_priv_data *osi_dma,
struct dma_chan_ops *ops_p)
{
nveu32_t i = 0;
void *temp_ops = (void *)ops_p;
#if __SIZEOF_POINTER__ == 8
nveu64_t *l_ops = (nveu64_t *)temp_ops;
#elif __SIZEOF_POINTER__ == 4
nveu32_t *l_ops = (nveu32_t *)temp_ops;
#else
OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
"DMA: Undefined architecture\n", 0ULL);
return -1;
#endif
for (i = 0; i < (sizeof(*ops_p) / (nveu64_t)__SIZEOF_POINTER__); i++) {
if (*l_ops == 0U) {
OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
"dma: fn ptr validation failed at\n",
(nveu64_t)i);
return -1;
}
l_ops++;
}
return 0;
}
nve32_t osi_init_dma_ops(struct osi_dma_priv_data *osi_dma)
{
struct dma_local *l_dma = (struct dma_local *)osi_dma;
nveu32_t default_rz[] = { EQOS_DEFAULT_RING_SZ, MGBE_DEFAULT_RING_SZ };
nveu32_t max_rz[] = { EQOS_DEFAULT_RING_SZ, MGBE_MAX_RING_SZ };
typedef void (*init_ops_arr)(struct dma_chan_ops *temp);
typedef void *(*safety_init)(void);
init_ops_arr i_ops[MAX_MAC_IP_TYPES] = {
eqos_init_dma_chan_ops, mgbe_init_dma_chan_ops
};
safety_init s_init[MAX_MAC_IP_TYPES] = {
eqos_get_dma_safety_config, OSI_NULL
};
if (osi_dma == OSI_NULL) {
return -1;
}
if ((l_dma->magic_num != (nveu64_t)osi_dma) ||
(l_dma->init_done == OSI_ENABLE)) {
return -1;
}
if (osi_dma->is_ethernet_server != OSI_ENABLE) {
if ((osi_dma->osd_ops.transmit_complete == OSI_NULL) ||
(osi_dma->osd_ops.receive_packet == OSI_NULL) ||
(osi_dma->osd_ops.ops_log == OSI_NULL) ||
#ifdef OSI_DEBUG
(osi_dma->osd_ops.printf == OSI_NULL) ||
#endif /* OSI_DEBUG */
(osi_dma->osd_ops.udelay == OSI_NULL)) {
return -1;
}
}
if (osi_dma->mac > OSI_MAC_HW_MGBE) {
OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
"DMA: Invalid MAC HW type\n", 0ULL);
return -1;
}
if ((osi_dma->tx_ring_sz == 0U) ||
!(is_power_of_two(osi_dma->tx_ring_sz)) ||
(osi_dma->tx_ring_sz < HW_MIN_RING_SZ) ||
(osi_dma->tx_ring_sz > default_rz[osi_dma->mac])) {
osi_dma->tx_ring_sz = default_rz[osi_dma->mac];
OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
"DMA: Using default Tx ring size: \n",
osi_dma->tx_ring_sz);
}
if ((osi_dma->rx_ring_sz == 0U) ||
!(is_power_of_two(osi_dma->rx_ring_sz)) ||
(osi_dma->rx_ring_sz < HW_MIN_RING_SZ) ||
(osi_dma->rx_ring_sz > max_rz[osi_dma->mac])) {
osi_dma->rx_ring_sz = default_rz[osi_dma->mac];
OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
"DMA: Using default rx ring size: \n",
osi_dma->tx_ring_sz);
}
i_ops[osi_dma->mac](&g_ops[osi_dma->mac]);
if (s_init[osi_dma->mac] != OSI_NULL) {
osi_dma->safety_config = s_init[osi_dma->mac]();
}
if (init_desc_ops(osi_dma) < 0) {
OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
"DMA desc ops init failed\n", 0ULL);
return -1;
}
if (validate_func_ptrs(osi_dma, &g_ops[osi_dma->mac]) < 0) {
OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
"DMA ops validation failed\n", 0ULL);
return -1;
}
l_dma->ops_p = &g_ops[osi_dma->mac];
l_dma->init_done = OSI_ENABLE;
return 0;
}
nve32_t osi_hw_dma_init(struct osi_dma_priv_data *osi_dma)
{
struct dma_local *l_dma = (struct dma_local *)osi_dma;
nveu32_t i, chan;
nve32_t ret;
if (validate_args(osi_dma, l_dma) < 0) {
return -1;
}
l_dma->mac_ver = osi_readl((nveu8_t *)osi_dma->base + MAC_VERSION) &
MAC_VERSION_SNVER_MASK;
if (validate_mac_ver_update_chans(l_dma->mac_ver,
&l_dma->max_chans) == 0) {
OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
"Invalid MAC version\n", (nveu64_t)l_dma->mac_ver);
return -1;
}
if (osi_dma->num_dma_chans > l_dma->max_chans) {
OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
"Invalid number of DMA channels\n", 0ULL);
return -1;
}
if (validate_dma_chans(osi_dma) < 0) {
OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
"DMA channels validation failed\n", 0ULL);
return -1;
}
ret = l_dma->ops_p->init_dma_channel(osi_dma);
if (ret < 0) {
OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
"dma: init dma channel failed\n", 0ULL);
return ret;
}
ret = dma_desc_init(osi_dma, l_dma->ops_p);
if (ret != 0) {
return ret;
}
if ((l_dma->mac_ver != OSI_EQOS_MAC_4_10) &&
(l_dma->mac_ver != OSI_EQOS_MAC_5_00)) {
l_dma->vm_intr = OSI_ENABLE;
}
/* Enable channel interrupts at wrapper level and start DMA */
for (i = 0; i < osi_dma->num_dma_chans; i++) {
chan = osi_dma->dma_chans[i];
l_dma->ops_p->enable_chan_tx_intr(osi_dma->base, chan);
l_dma->ops_p->enable_chan_rx_intr(osi_dma->base, chan);
l_dma->ops_p->start_dma(osi_dma, chan);
}
/**
* OSD will update this if PTP needs to be run in diffrent modes.
* Default configuration is PTP sync in two step sync with slave mode.
*/
if (osi_dma->ptp_flag == 0U) {
osi_dma->ptp_flag = (OSI_PTP_SYNC_SLAVE | OSI_PTP_SYNC_TWOSTEP);
}
return 0;
}
nve32_t osi_hw_dma_deinit(struct osi_dma_priv_data *osi_dma)
{
struct dma_local *l_dma = (struct dma_local *)osi_dma;
nveu32_t i;
if (validate_args(osi_dma, l_dma) < 0) {
return -1;
}
if (osi_dma->num_dma_chans > l_dma->max_chans) {
OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
"Invalid number of DMA channels\n", 0ULL);
return -1;
}
if (validate_dma_chans(osi_dma) < 0) {
OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
"DMA channels validation failed\n", 0ULL);
return -1;
}
for (i = 0; i < osi_dma->num_dma_chans; i++) {
l_dma->ops_p->stop_dma(osi_dma, osi_dma->dma_chans[i]);
}
/* FIXME: Need to fix */
// l_dma->magic_num = 0;
// l_dma->init_done = OSI_DISABLE;
return 0;
}
nve32_t osi_disable_chan_tx_intr(struct osi_dma_priv_data *osi_dma,
nveu32_t chan)
{
struct dma_local *l_dma = (struct dma_local *)osi_dma;
if (validate_args(osi_dma, l_dma) < 0) {
return -1;
}
if (validate_dma_chan_num(osi_dma, chan) < 0) {
return -1;
}
l_dma->ops_p->disable_chan_tx_intr(osi_dma->base, chan);
return 0;
}
nve32_t osi_enable_chan_tx_intr(struct osi_dma_priv_data *osi_dma,
nveu32_t chan)
{
struct dma_local *l_dma = (struct dma_local *)osi_dma;
if (validate_args(osi_dma, l_dma) < 0) {
return -1;
}
if (validate_dma_chan_num(osi_dma, chan) < 0) {
return -1;
}
l_dma->ops_p->enable_chan_tx_intr(osi_dma->base, chan);
return 0;
}
nve32_t osi_disable_chan_rx_intr(struct osi_dma_priv_data *osi_dma,
nveu32_t chan)
{
struct dma_local *l_dma = (struct dma_local *)osi_dma;
if (validate_args(osi_dma, l_dma) < 0) {
return -1;
}
if (validate_dma_chan_num(osi_dma, chan) < 0) {
return -1;
}
l_dma->ops_p->disable_chan_rx_intr(osi_dma->base, chan);
return 0;
}
nve32_t osi_enable_chan_rx_intr(struct osi_dma_priv_data *osi_dma,
nveu32_t chan)
{
struct dma_local *l_dma = (struct dma_local *)osi_dma;
if (validate_args(osi_dma, l_dma) < 0) {
return -1;
}
if (validate_dma_chan_num(osi_dma, chan) < 0) {
return -1;
}
l_dma->ops_p->enable_chan_rx_intr(osi_dma->base, chan);
return 0;
}
nve32_t osi_clear_vm_tx_intr(struct osi_dma_priv_data *osi_dma,
nveu32_t chan)
{
struct dma_local *l_dma = (struct dma_local *)osi_dma;
if (validate_args(osi_dma, l_dma) < 0) {
return -1;
}
if (validate_dma_chan_num(osi_dma, chan) < 0) {
return -1;
}
l_dma->ops_p->clear_vm_tx_intr(osi_dma->base, chan);
return 0;
}
nve32_t osi_clear_vm_rx_intr(struct osi_dma_priv_data *osi_dma,
nveu32_t chan)
{
struct dma_local *l_dma = (struct dma_local *)osi_dma;
if (validate_args(osi_dma, l_dma) < 0) {
return -1;
}
if (validate_dma_chan_num(osi_dma, chan) < 0) {
return -1;
}
l_dma->ops_p->clear_vm_rx_intr(osi_dma->base, chan);
return 0;
}
nveu32_t osi_get_global_dma_status(struct osi_dma_priv_data *osi_dma)
{
struct dma_local *l_dma = (struct dma_local *)osi_dma;
if (validate_args(osi_dma, l_dma) < 0) {
return 0;
}
return osi_readl((nveu8_t *)osi_dma->base + HW_GLOBAL_DMA_STATUS);
}
nve32_t osi_handle_dma_intr(struct osi_dma_priv_data *osi_dma,
nveu32_t chan,
nveu32_t tx_rx,
nveu32_t en_dis)
{
struct dma_local *l_dma = (struct dma_local *)osi_dma;
typedef void (*dma_intr_fn)(void *base, nveu32_t ch);
dma_intr_fn fn[2][2][2] = {
{ { l_dma->ops_p->disable_chan_tx_intr, l_dma->ops_p->enable_chan_tx_intr },
{ l_dma->ops_p->disable_chan_rx_intr, l_dma->ops_p->enable_chan_rx_intr } },
{ { l_dma->ops_p->clear_vm_tx_intr, l_dma->ops_p->enable_chan_tx_intr },
{ l_dma->ops_p->clear_vm_rx_intr, l_dma->ops_p->enable_chan_rx_intr } }
};
if (validate_args(osi_dma, l_dma) < 0) {
return -1;
}
if (validate_dma_chan_num(osi_dma, chan) < 0) {
return -1;
}
if ((tx_rx > OSI_DMA_CH_RX_INTR) ||
(en_dis > OSI_DMA_INTR_ENABLE)) {
return -1;
}
fn[l_dma->vm_intr][tx_rx][en_dis](osi_dma->base, chan);
return 0;
}
nve32_t osi_start_dma(struct osi_dma_priv_data *osi_dma,
nveu32_t chan)
{
struct dma_local *l_dma = (struct dma_local *)osi_dma;
if (validate_args(osi_dma, l_dma) < 0) {
return -1;
}
if (validate_dma_chan_num(osi_dma, chan) < 0) {
return -1;
}
l_dma->ops_p->start_dma(osi_dma, chan);
return 0;
}
nve32_t osi_stop_dma(struct osi_dma_priv_data *osi_dma,
nveu32_t chan)
{
struct dma_local *l_dma = (struct dma_local *)osi_dma;
if (validate_args(osi_dma, l_dma) < 0) {
return -1;
}
if (validate_dma_chan_num(osi_dma, chan) < 0) {
return -1;
}
l_dma->ops_p->stop_dma(osi_dma, chan);
return 0;
}
nveu32_t osi_get_refill_rx_desc_cnt(struct osi_dma_priv_data *osi_dma,
unsigned int chan)
{
struct osi_rx_ring *rx_ring = osi_dma->rx_ring[chan];
if ((rx_ring == OSI_NULL) ||
(rx_ring->cur_rx_idx >= osi_dma->rx_ring_sz) ||
(rx_ring->refill_idx >= osi_dma->rx_ring_sz)) {
return 0;
}
return (rx_ring->cur_rx_idx - rx_ring->refill_idx) &
(osi_dma->rx_ring_sz - 1U);
}
/**
* @brief rx_dma_desc_validate_args - DMA Rx descriptor init args Validate
*
* Algorithm: Validates DMA Rx descriptor init argments.
*
* @param[in] osi_dma: OSI DMA private data struture.
* @param[in] rx_ring: HW ring corresponding to Rx DMA channel.
* @param[in] chan: Rx DMA channel number
*
* @note
* API Group:
* - Initialization: No
* - Run time: Yes
* - De-initialization: No
*
* @retval 0 on success
* @retval -1 on failure.
*/
static inline nve32_t rx_dma_desc_validate_args(
struct osi_dma_priv_data *osi_dma,
struct dma_local *l_dma,
struct osi_rx_ring *rx_ring,
nveu32_t chan)
{
if (validate_args(osi_dma, l_dma) < 0) {
return -1;
}
if (!((rx_ring != OSI_NULL) && (rx_ring->rx_swcx != OSI_NULL) &&
(rx_ring->rx_desc != OSI_NULL))) {
OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
"dma: Invalid pointers\n", 0ULL);
return -1;
}
if (validate_dma_chan_num(osi_dma, chan) < 0) {
OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
"dma: Invalid channel\n", 0ULL);
return -1;
}
return 0;
}
/**
* @brief rx_dma_handle_ioc - DMA Rx descriptor RWIT Handler
*
* Algorithm:
* 1) Check RWIT enable and reset IOC bit
* 2) Check rx_frames enable and update IOC bit
*
* @param[in] osi_dma: OSI DMA private data struture.
* @param[in] rx_ring: HW ring corresponding to Rx DMA channel.
* @param[in, out] rx_desc: Rx Rx descriptor.
*
* @note
* API Group:
* - Initialization: No
* - Run time: Yes
* - De-initialization: No
*
*/
static inline void rx_dma_handle_ioc(struct osi_dma_priv_data *osi_dma,
struct osi_rx_ring *rx_ring,
struct osi_rx_desc *rx_desc)
{
/* reset IOC bit if RWIT is enabled */
if (osi_dma->use_riwt == OSI_ENABLE) {
rx_desc->rdes3 &= ~RDES3_IOC;
/* update IOC bit if rx_frames is enabled. Rx_frames
* can be enabled only along with RWIT.
*/
if (osi_dma->use_rx_frames == OSI_ENABLE) {
if ((rx_ring->refill_idx %
osi_dma->rx_frames) == OSI_NONE) {
rx_desc->rdes3 |= RDES3_IOC;
}
}
}
}
nve32_t osi_rx_dma_desc_init(struct osi_dma_priv_data *osi_dma,
struct osi_rx_ring *rx_ring, nveu32_t chan)
{
struct dma_local *l_dma = (struct dma_local *)osi_dma;
nveu64_t tailptr = 0;
struct osi_rx_swcx *rx_swcx = OSI_NULL;
struct osi_rx_desc *rx_desc = OSI_NULL;
if (rx_dma_desc_validate_args(osi_dma, l_dma, rx_ring, chan) < 0) {
/* Return on arguments validation failure */
return -1;
}
/* Refill buffers */
while ((rx_ring->refill_idx != rx_ring->cur_rx_idx) &&
(rx_ring->refill_idx < osi_dma->rx_ring_sz)) {
rx_swcx = rx_ring->rx_swcx + rx_ring->refill_idx;
rx_desc = rx_ring->rx_desc + rx_ring->refill_idx;
if ((rx_swcx->flags & OSI_RX_SWCX_BUF_VALID) !=
OSI_RX_SWCX_BUF_VALID) {
break;
}
rx_swcx->flags = 0;
/* Populate the newly allocated buffer address */
rx_desc->rdes0 = L32(rx_swcx->buf_phy_addr);
rx_desc->rdes1 = H32(rx_swcx->buf_phy_addr);
rx_desc->rdes2 = 0;
rx_desc->rdes3 = RDES3_IOC;
if (osi_dma->mac == OSI_MAC_HW_EQOS) {
rx_desc->rdes3 |= RDES3_B1V;
}
/* Reset IOC bit if RWIT is enabled */
rx_dma_handle_ioc(osi_dma, rx_ring, rx_desc);
rx_desc->rdes3 |= RDES3_OWN;
INCR_RX_DESC_INDEX(rx_ring->refill_idx, osi_dma->rx_ring_sz);
}
/* Update the Rx tail ptr whenever buffer is replenished to
* kick the Rx DMA to resume if it is in suspend. Always set
* Rx tailptr to 1 greater than last descriptor in the ring since HW
* knows to loop over to start of ring.
*/
tailptr = rx_ring->rx_desc_phy_addr +
(sizeof(struct osi_rx_desc) * (osi_dma->rx_ring_sz));
if (osi_unlikely(tailptr < rx_ring->rx_desc_phy_addr)) {
/* Will not hit this case, used for CERT-C compliance */
OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
"dma: Invalid tailptr\n", 0ULL);
return -1;
}
l_dma->ops_p->update_rx_tailptr(osi_dma->base, chan, tailptr);
return 0;
}
nve32_t osi_set_rx_buf_len(struct osi_dma_priv_data *osi_dma)
{
struct dma_local *l_dma = (struct dma_local *)osi_dma;
if (validate_args(osi_dma, l_dma) < 0) {
return -1;
}
l_dma->ops_p->set_rx_buf_len(osi_dma);
return 0;
}
nve32_t osi_dma_get_systime_from_mac(struct osi_dma_priv_data *const osi_dma,
nveu32_t *sec, nveu32_t *nsec)
{
struct dma_local *l_dma = (struct dma_local *)osi_dma;
if (validate_args(osi_dma, l_dma) < 0) {
return -1;
}
common_get_systime_from_mac(osi_dma->base, osi_dma->mac, sec, nsec);
return 0;
}
nveu32_t osi_is_mac_enabled(struct osi_dma_priv_data *const osi_dma)
{
struct dma_local *l_dma = (struct dma_local *)osi_dma;
if (validate_args(osi_dma, l_dma) < 0) {
return OSI_DISABLE;
}
return common_is_mac_enabled(osi_dma->base, osi_dma->mac);
}
nve32_t osi_hw_transmit(struct osi_dma_priv_data *osi_dma, nveu32_t chan)
{
struct dma_local *l_dma = (struct dma_local *)osi_dma;
if (osi_unlikely(validate_args(osi_dma, l_dma) < 0)) {
return -1;
}
if (osi_unlikely(validate_dma_chan_num(osi_dma, chan) < 0)) {
return -1;
}
if (osi_unlikely(osi_dma->tx_ring[chan] == OSI_NULL)) {
OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
"DMA: Invalid Tx ring\n", 0ULL);
return -1;
}
return hw_transmit(osi_dma, osi_dma->tx_ring[chan], l_dma->ops_p, chan);
}
nve32_t osi_dma_ioctl(struct osi_dma_priv_data *osi_dma)
{
struct dma_local *l_dma = (struct dma_local *)osi_dma;
struct osi_dma_ioctl_data *data;
if (osi_unlikely(validate_args(osi_dma, l_dma) < 0)) {
return -1;
}
data = &osi_dma->ioctl_data;
switch (data->cmd) {
#ifdef OSI_DEBUG
case OSI_DMA_IOCTL_CMD_REG_DUMP:
reg_dump(osi_dma);
break;
case OSI_DMA_IOCTL_CMD_STRUCTS_DUMP:
structs_dump(osi_dma);
break;
#endif /* OSI_DEBUG */
default:
OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID,
"DMA: Invalid IOCTL command", 0ULL);
return -1;
}
return 0;
}
#ifndef OSI_STRIPPED_LIB
/**
* @brief osi_slot_args_validate - Validate slot function arguments
*
* @note
* Algorithm:
* - Check set argument and return error.
* - Validate osi_dma structure pointers.
*
* @param[in] osi_dma: OSI DMA private data structure.
* @param[in] set: Flag to set with OSI_ENABLE and reset with OSI_DISABLE
*
* @pre MAC should be init and started. see osi_start_mac()
*
* @note
* API Group:
* - Initialization: No
* - Run time: Yes
* - De-initialization: No
*
* @retval 0 on success
* @retval -1 on failure.
*/
static inline nve32_t osi_slot_args_validate(struct osi_dma_priv_data *osi_dma,
struct dma_local *l_dma,
nveu32_t set)
{
if (validate_args(osi_dma, l_dma) < 0) {
return -1;
}
/* return on invalid set argument */
if ((set != OSI_ENABLE) && (set != OSI_DISABLE)) {
OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID,
"dma: Invalid set argument\n", set);
return -1;
}
return 0;
}
nve32_t osi_config_slot_function(struct osi_dma_priv_data *osi_dma,
nveu32_t set)
{
struct dma_local *l_dma = (struct dma_local *)osi_dma;
nveu32_t i = 0U, chan = 0U, interval = 0U;
struct osi_tx_ring *tx_ring = OSI_NULL;
/* Validate arguments */
if (osi_slot_args_validate(osi_dma, l_dma, set) < 0) {
return -1;
}
for (i = 0; i < osi_dma->num_dma_chans; i++) {
/* Get DMA channel and validate */
chan = osi_dma->dma_chans[i];
if ((chan == 0x0U) ||
(chan >= l_dma->max_chans)) {
/* Ignore 0 and invalid channels */
continue;
}
/* Check for slot enable */
if (osi_dma->slot_enabled[chan] == OSI_ENABLE) {
/* Get DMA slot interval and validate */
interval = osi_dma->slot_interval[chan];
if (interval > OSI_SLOT_INTVL_MAX) {
OSI_DMA_ERR(osi_dma->osd,
OSI_LOG_ARG_INVALID,
"dma: Invalid interval arguments\n",
interval);
return -1;
}
tx_ring = osi_dma->tx_ring[chan];
if (tx_ring == OSI_NULL) {
OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID,
"tx_ring is null\n", chan);
return -1;
}
tx_ring->slot_check = set;
l_dma->ops_p->config_slot(osi_dma, chan, set, interval);
}
}
return 0;
}
nve32_t osi_validate_dma_regs(struct osi_dma_priv_data *osi_dma)
{
struct dma_local *l_dma = (struct dma_local *)osi_dma;
if (validate_args(osi_dma, l_dma) < 0) {
return -1;
}
return l_dma->ops_p->validate_regs(osi_dma);
}
nve32_t osi_txring_empty(struct osi_dma_priv_data *osi_dma, nveu32_t chan)
{
struct osi_tx_ring *tx_ring = osi_dma->tx_ring[chan];
return (tx_ring->clean_idx == tx_ring->cur_tx_idx) ? 1 : 0;
}
#endif /* !OSI_STRIPPED_LIB */

View File

File diff suppressed because it is too large Load Diff

View File

File diff suppressed because it is too large Load Diff

View File

@@ -1,68 +0,0 @@
acpi_dock_ops
address_space_operations
backlight_ops
block_device_operations
clk_ops
comedi_lrange
component_ops
dentry_operations
dev_pm_ops
dma_map_ops
driver_info
drm_connector_funcs
drm_encoder_funcs
drm_encoder_helper_funcs
ethtool_ops
extent_io_ops
file_lock_operations
file_operations
hv_ops
ide_dma_ops
ide_port_ops
inode_operations
intel_dvo_dev_ops
irq_domain_ops
item_operations
iwl_cfg
iwl_ops
kgdb_arch
kgdb_io
kset_uevent_ops
lock_manager_operations
machine_desc
microcode_ops
mlxsw_reg_info
mtrr_ops
neigh_ops
net_device_ops
nlmsvc_binding
nvkm_device_chip
of_device_id
pci_raw_ops
phy_ops
pinctrl_ops
pinmux_ops
pipe_buf_operations
platform_hibernation_ops
platform_suspend_ops
proto_ops
regmap_access_table
regulator_ops
rpc_pipe_ops
rtc_class_ops
sd_desc
seq_operations
sirfsoc_padmux
snd_ac97_build_ops
snd_soc_component_driver
soc_pcmcia_socket_ops
stacktrace_ops
sysfs_ops
tty_operations
uart_ops
usb_mon_operations
v4l2_ctrl_ops
v4l2_ioctl_ops
vm_operations_struct
wacom_features
wd_ops

View File

File diff suppressed because it is too large Load Diff