Merge remote-tracking branch 'origin/dev/t264-ethernet' into dev-t264

Bug 4687787

Change-Id: Ib6a8c7e6c6424f2b338efcd88f54fe4343f8036f
Signed-off-by: Bhadram Varka <vbhadram@nvidia.com>
This commit is contained in:
Bhadram Varka
2024-06-07 15:14:03 +00:00
10 changed files with 1656 additions and 443 deletions

View File

@@ -22,6 +22,9 @@ ccflags-y += -DLINUX_OS -DNET30 -DNVPKCS_MACSEC -DLINUX_IVC \
-I$(srctree.nvidia-oot)/drivers/net/ethernet/nvidia/nvethernet/nvethernetrm/include
endif
#ccflags-y += -DOSI_DEBUG -DMACSEC_SUPPORT -DDEBUG_MACSEC -DMACSEC_KEY_PROGRAM
ccflags-y += -DMACSEC_SUPPORT
nvethernet-objs:= ether_linux.o \
osd.o \
ethtool.o \

View File

@@ -39,6 +39,10 @@ struct ether_l2_filter {
nveu32_t index;
/** Ethernet MAC address to be added */
nveu8_t mac_addr[OSI_ETH_ALEN];
/** packet duplication */
nveu32_t pkt_dup;
/** dma channel */
nveu32_t dma_chan;
};
/**

View File

File diff suppressed because it is too large Load Diff

View File

@@ -112,6 +112,7 @@
* @brief Ethernet default PTP clock frequency
*/
#define ETHER_DFLT_PTP_CLK 312500000U
#define ETHER_DFLT_PTP_CLK_UFPGA 78125000U
/**
* @brief Ethernet default PTP default RxQ
@@ -127,22 +128,28 @@
* @brief Ethernet clk rates
*/
#define ETHER_RX_INPUT_CLK_RATE 125000000UL
#define ETHER_MGBE_MAC_DIV_RATE_25G 781250000UL
#define ETHER_MGBE_MAC_DIV_RATE_10G 312500000UL
#define ETHER_MGBE_MAC_DIV_RATE_5G 156250000UL
#define ETHER_MGBE_MAC_DIV_RATE_2_5G 78125000UL
// gbe_pll2_txclkref (644 MHz) --> programmable link TX_CLK divider
// --> link_Tx_clk --> fixed 1/2 gear box divider --> lane TX clk.
#define ETHER_MGBE_TXRX_CLK_XAUI_25G 805664000UL
#define ETHER_MGBE_TX_CLK_USXGMII_10G 644531250UL
#define ETHER_MGBE_TX_CLK_USXGMII_5G 322265625UL
#define ETHER_MGBE_RX_CLK_USXGMII_10G 644531250UL
#define ETHER_MGBE_RX_CLK_USXGMII_5G 322265625UL
#define ETHER_MGBE_TXRX_PCS_CLK_XAUI_25G 390625000UL
#define ETHER_MGBE_TX_PCS_CLK_USXGMII_10G 156250000UL
#define ETHER_MGBE_TX_PCS_CLK_USXGMII_5G 78125000UL
#define ETHER_MGBE_RX_PCS_CLK_USXGMII_10G 156250000UL
#define ETHER_MGBE_RX_PCS_CLK_USXGMII_5G 78125000UL
#define ETHER_EQOS_TX_CLK_2_5G 312500000UL
#define ETHER_EQOS_TX_CLK_1000M 125000000UL
#define ETHER_EQOS_TX_CLK_100M 25000000UL
#define ETHER_EQOS_TX_CLK_10M 2500000UL
#define ETHER_EQOS_UPHY_LX_TX_2_5G_CLK 195312500UL
#define ETHER_EQOS_UPHY_LX_TX_1G_CLK 78125000UL
/**
* @brief 1 Second in Neno Second
@@ -167,6 +174,7 @@
*/
#define ETHER_ADDR_REG_CNT_128 128
#define ETHER_ADDR_REG_CNT_64 64
#define ETHER_ADDR_REG_CNT_48 48
#define ETHER_ADDR_REG_CNT_32 32
#define ETHER_ADDR_REG_CNT_1 1
/** @} */
@@ -207,8 +215,10 @@
/**
* @brief Broadcast and MAC address macros
*/
#define ETHER_MAC_ADDRESS_INDEX 1U
#define ETHER_BC_ADDRESS_INDEX 0
#define ETHER_MAC_ADDRESS_INDEX 1U
#define ETHER_BC_ADDRESS_INDEX 0U
#define ETHER_MAC_ADDRESS_INDEX_T26X 0U
#define ETHER_BC_ADDRESS_INDEX_T26X 1U
#define ETHER_ADDRESS_MAC 1
#define ETHER_ADDRESS_BC 0
@@ -388,6 +398,8 @@ struct ether_tx_ts_skb_list {
struct sk_buff *skb;
/** packet id to identify timestamp */
unsigned int pktid;
/** vdmaid to identify timestamp */
unsigned int vdmaid;
/** SKB jiffies to find time */
unsigned long pkt_jiffies;
};
@@ -411,13 +423,13 @@ struct ether_timestamp_skb_list {
*/
struct ether_xtra_stat_counters {
/** rx skb allocation failure count */
nveu64_t re_alloc_rxbuf_failed[OSI_MGBE_MAX_NUM_QUEUES];
nveu64_t re_alloc_rxbuf_failed[OSI_MGBE_MAX_NUM_CHANS];
/** TX per channel interrupt count */
nveu64_t tx_normal_irq_n[OSI_MGBE_MAX_NUM_QUEUES];
nveu64_t tx_normal_irq_n[OSI_MGBE_MAX_NUM_CHANS];
/** TX per channel SW timer callback count */
nveu64_t tx_usecs_swtimer_n[OSI_MGBE_MAX_NUM_QUEUES];
nveu64_t tx_usecs_swtimer_n[OSI_MGBE_MAX_NUM_CHANS];
/** RX per channel interrupt count */
nveu64_t rx_normal_irq_n[OSI_MGBE_MAX_NUM_QUEUES];
nveu64_t rx_normal_irq_n[OSI_MGBE_MAX_NUM_CHANS];
/** link connect count */
nveu64_t link_connect_count;
/** link disconnect count */
@@ -466,6 +478,8 @@ struct ether_priv_data {
struct clk *tx_div_clk;
/** Receive Monitoring clock */
struct clk *rx_m_clk;
/** Transmit Monitoring clock */
struct clk *tx_m_clk;
/** RX PCS monitoring clock */
struct clk *rx_pcs_m_clk;
/** RX PCS input clock */
@@ -645,6 +659,8 @@ struct ether_priv_data {
unsigned int fixed_link;
/** Flag to represent rx_m clk enabled or not */
bool rx_m_enabled;
/** Flag to represent tx_m clk enabled or not */
bool tx_m_enabled;
/** Flag to represent rx_pcs_m clk enabled or not */
bool rx_pcs_m_enabled;
/* Timer value in msec for ether_stats_work thread */
@@ -673,6 +689,8 @@ struct ether_priv_data {
struct hwtstamp_config ptp_config;
/** Flag to hold DT config to disable Rx csum in HW */
uint32_t disable_rx_csum;
/** select Tx queue/dma channel for testing */
unsigned int tx_queue_select;
};
/**

View File

@@ -105,6 +105,16 @@ static const struct ether_stats ether_dstrings_stats[] = {
ETHER_DMA_EXTRA_STAT(tx_clean_n[7]),
ETHER_DMA_EXTRA_STAT(tx_clean_n[8]),
ETHER_DMA_EXTRA_STAT(tx_clean_n[9]),
ETHER_DMA_EXTRA_STAT(tx_clean_n[10]),
ETHER_DMA_EXTRA_STAT(tx_clean_n[11]),
ETHER_DMA_EXTRA_STAT(tx_clean_n[12]),
ETHER_DMA_EXTRA_STAT(tx_clean_n[13]),
ETHER_DMA_EXTRA_STAT(tx_clean_n[14]),
ETHER_DMA_EXTRA_STAT(tx_clean_n[15]),
ETHER_DMA_EXTRA_STAT(tx_clean_n[16]),
ETHER_DMA_EXTRA_STAT(tx_clean_n[17]),
ETHER_DMA_EXTRA_STAT(tx_clean_n[18]),
ETHER_DMA_EXTRA_STAT(tx_clean_n[19]),
/* Tx/Rx frames */
ETHER_DMA_EXTRA_STAT(tx_pkt_n),
@@ -114,26 +124,47 @@ static const struct ether_stats ether_dstrings_stats[] = {
ETHER_DMA_EXTRA_STAT(tx_tso_pkt_n),
/* Tx/Rx frames per channels/queues */
ETHER_DMA_EXTRA_STAT(q_tx_pkt_n[0]),
ETHER_DMA_EXTRA_STAT(q_tx_pkt_n[1]),
ETHER_DMA_EXTRA_STAT(q_tx_pkt_n[2]),
ETHER_DMA_EXTRA_STAT(q_tx_pkt_n[3]),
ETHER_DMA_EXTRA_STAT(q_tx_pkt_n[4]),
ETHER_DMA_EXTRA_STAT(q_tx_pkt_n[5]),
ETHER_DMA_EXTRA_STAT(q_tx_pkt_n[6]),
ETHER_DMA_EXTRA_STAT(q_tx_pkt_n[7]),
ETHER_DMA_EXTRA_STAT(q_tx_pkt_n[8]),
ETHER_DMA_EXTRA_STAT(q_tx_pkt_n[9]),
ETHER_DMA_EXTRA_STAT(q_rx_pkt_n[0]),
ETHER_DMA_EXTRA_STAT(q_rx_pkt_n[1]),
ETHER_DMA_EXTRA_STAT(q_rx_pkt_n[2]),
ETHER_DMA_EXTRA_STAT(q_rx_pkt_n[3]),
ETHER_DMA_EXTRA_STAT(q_rx_pkt_n[4]),
ETHER_DMA_EXTRA_STAT(q_rx_pkt_n[5]),
ETHER_DMA_EXTRA_STAT(q_rx_pkt_n[6]),
ETHER_DMA_EXTRA_STAT(q_rx_pkt_n[7]),
ETHER_DMA_EXTRA_STAT(q_rx_pkt_n[8]),
ETHER_DMA_EXTRA_STAT(q_rx_pkt_n[9]),
ETHER_DMA_EXTRA_STAT(chan_tx_pkt_n[0]),
ETHER_DMA_EXTRA_STAT(chan_tx_pkt_n[1]),
ETHER_DMA_EXTRA_STAT(chan_tx_pkt_n[2]),
ETHER_DMA_EXTRA_STAT(chan_tx_pkt_n[3]),
ETHER_DMA_EXTRA_STAT(chan_tx_pkt_n[4]),
ETHER_DMA_EXTRA_STAT(chan_tx_pkt_n[5]),
ETHER_DMA_EXTRA_STAT(chan_tx_pkt_n[6]),
ETHER_DMA_EXTRA_STAT(chan_tx_pkt_n[7]),
ETHER_DMA_EXTRA_STAT(chan_tx_pkt_n[8]),
ETHER_DMA_EXTRA_STAT(chan_tx_pkt_n[9]),
ETHER_DMA_EXTRA_STAT(chan_tx_pkt_n[10]),
ETHER_DMA_EXTRA_STAT(chan_tx_pkt_n[11]),
ETHER_DMA_EXTRA_STAT(chan_tx_pkt_n[12]),
ETHER_DMA_EXTRA_STAT(chan_tx_pkt_n[13]),
ETHER_DMA_EXTRA_STAT(chan_tx_pkt_n[14]),
ETHER_DMA_EXTRA_STAT(chan_tx_pkt_n[15]),
ETHER_DMA_EXTRA_STAT(chan_tx_pkt_n[16]),
ETHER_DMA_EXTRA_STAT(chan_tx_pkt_n[17]),
ETHER_DMA_EXTRA_STAT(chan_tx_pkt_n[18]),
ETHER_DMA_EXTRA_STAT(chan_tx_pkt_n[19]),
ETHER_DMA_EXTRA_STAT(chan_rx_pkt_n[0]),
ETHER_DMA_EXTRA_STAT(chan_rx_pkt_n[1]),
ETHER_DMA_EXTRA_STAT(chan_rx_pkt_n[2]),
ETHER_DMA_EXTRA_STAT(chan_rx_pkt_n[3]),
ETHER_DMA_EXTRA_STAT(chan_rx_pkt_n[4]),
ETHER_DMA_EXTRA_STAT(chan_rx_pkt_n[5]),
ETHER_DMA_EXTRA_STAT(chan_rx_pkt_n[6]),
ETHER_DMA_EXTRA_STAT(chan_rx_pkt_n[7]),
ETHER_DMA_EXTRA_STAT(chan_rx_pkt_n[8]),
ETHER_DMA_EXTRA_STAT(chan_rx_pkt_n[9]),
ETHER_DMA_EXTRA_STAT(chan_rx_pkt_n[10]),
ETHER_DMA_EXTRA_STAT(chan_rx_pkt_n[11]),
ETHER_DMA_EXTRA_STAT(chan_rx_pkt_n[12]),
ETHER_DMA_EXTRA_STAT(chan_rx_pkt_n[13]),
ETHER_DMA_EXTRA_STAT(chan_rx_pkt_n[14]),
ETHER_DMA_EXTRA_STAT(chan_rx_pkt_n[15]),
ETHER_DMA_EXTRA_STAT(chan_rx_pkt_n[16]),
ETHER_DMA_EXTRA_STAT(chan_rx_pkt_n[17]),
ETHER_DMA_EXTRA_STAT(chan_rx_pkt_n[18]),
ETHER_DMA_EXTRA_STAT(chan_rx_pkt_n[19]),
};
/**
@@ -1003,7 +1034,7 @@ static int ether_set_coalesce(struct net_device *dev,
ETHER_MAX_RX_COALESCE_USEC);
return -EINVAL;
} else if (osi_dma->mac == OSI_MAC_HW_MGBE &&
} else if (osi_dma->mac != OSI_MAC_HW_EQOS &&
(ec->rx_coalesce_usecs > ETHER_MAX_RX_COALESCE_USEC ||
ec->rx_coalesce_usecs < ETHER_MGBE_MIN_RX_COALESCE_USEC)) {
netdev_err(dev,

View File

@@ -562,9 +562,11 @@ static int ether_config_l2_filters(struct net_device *dev,
return ret;
}
if (osi_core->use_virtualization == OSI_DISABLE) {
dev_err(pdata->dev, "%s Ethernet virualization is not enabled\n", __func__);
return ret;
if (osi_core->pre_sil != OSI_ENABLE) {
if (osi_core->use_virtualization == OSI_DISABLE) {
dev_err(pdata->dev, "%s Ethernet virualization is not enabled\n", __func__);
return ret;
}
}
if (copy_from_user(&u_l2_filter, (void __user *)ifdata->ptr,
sizeof(struct ether_l2_filter)) != 0U) {
@@ -589,8 +591,13 @@ static int ether_config_l2_filters(struct net_device *dev,
u_l2_filter.mac_addr, ETH_ALEN);
ioctl_data.l2_filter.dma_routing = OSI_ENABLE;
ioctl_data.l2_filter.addr_mask = OSI_DISABLE;
ioctl_data.l2_filter.dma_chan = osi_dma->dma_chans[0];
ioctl_data.l2_filter.dma_chansel = OSI_BIT(osi_dma->dma_chans[0]);
ioctl_data.l2_filter.pkt_dup = u_l2_filter.pkt_dup;
if (ioctl_data.l2_filter.pkt_dup) {
ioctl_data.l2_filter.dma_chan = u_l2_filter.dma_chan;
} else {
ioctl_data.l2_filter.dma_chan = osi_dma->dma_chans[0];
}
ioctl_data.l2_filter.dma_chansel = OSI_BIT_64(ioctl_data.l2_filter.dma_chan);
ioctl_data.cmd = OSI_CMD_L2_FILTER;
return osi_handle_ioctl(osi_core, &ioctl_data);
}

View File

@@ -100,9 +100,9 @@ static int macsec_disable_car(struct macsec_priv_data *macsec_pdata)
struct ether_priv_data *pdata = macsec_pdata->ether_pdata;
PRINT_ENTRY();
if (pdata->osi_core->mac == OSI_MAC_HW_MGBE) {
if (!IS_ERR_OR_NULL(macsec_pdata->mgbe_clk)) {
clk_disable_unprepare(macsec_pdata->mgbe_clk);
if (pdata->osi_core->mac != OSI_MAC_HW_EQOS) {
if (!IS_ERR_OR_NULL(macsec_pdata->macsec_clk)) {
clk_disable_unprepare(macsec_pdata->macsec_clk);
}
} else {
if (!IS_ERR_OR_NULL(macsec_pdata->eqos_tx_clk)) {
@@ -129,9 +129,9 @@ static int macsec_enable_car(struct macsec_priv_data *macsec_pdata)
int ret = 0;
PRINT_ENTRY();
if (pdata->osi_core->mac == OSI_MAC_HW_MGBE) {
if (!IS_ERR_OR_NULL(macsec_pdata->mgbe_clk)) {
ret = clk_prepare_enable(macsec_pdata->mgbe_clk);
if (pdata->osi_core->mac != OSI_MAC_HW_EQOS) {
if (!IS_ERR_OR_NULL(macsec_pdata->macsec_clk)) {
ret = clk_prepare_enable(macsec_pdata->macsec_clk);
if (ret < 0) {
dev_err(dev, "failed to enable macsec clk\n");
goto exit;
@@ -166,9 +166,9 @@ static int macsec_enable_car(struct macsec_priv_data *macsec_pdata)
goto exit;
err_ns_rst:
if (pdata->osi_core->mac == OSI_MAC_HW_MGBE) {
if (!IS_ERR_OR_NULL(macsec_pdata->mgbe_clk)) {
clk_disable_unprepare(macsec_pdata->mgbe_clk);
if (pdata->osi_core->mac != OSI_MAC_HW_EQOS) {
if (!IS_ERR_OR_NULL(macsec_pdata->macsec_clk)) {
clk_disable_unprepare(macsec_pdata->macsec_clk);
}
} else {
if (!IS_ERR_OR_NULL(macsec_pdata->eqos_rx_clk)) {
@@ -223,7 +223,7 @@ int macsec_open(struct macsec_priv_data *macsec_pdata,
IRQF_TRIGGER_NONE, macsec_pdata->irq_name[0],
macsec_pdata);
if (ret < 0) {
dev_err(dev, "failed to request irq %d\n", __LINE__);
dev_err(dev, "failed to request irq %d\n", ret);
goto exit;
}
@@ -246,7 +246,7 @@ int macsec_open(struct macsec_priv_data *macsec_pdata,
macsec_pdata);
#endif
if (ret < 0) {
dev_err(dev, "failed to request irq %d\n", __LINE__);
dev_err(dev, "failed to request irq %d\n", ret);
goto err_ns_irq;
}
@@ -297,7 +297,26 @@ static int macsec_get_platform_res(struct macsec_priv_data *macsec_pdata)
int ret = 0;
PRINT_ENTRY();
/* 1. Get resets */
/* Get irqs */
macsec_pdata->ns_irq = platform_get_irq_byname(pdev, "macsec-ns-irq");
if (macsec_pdata->ns_irq < 0) {
dev_err(dev, "failed to get macsec-ns-irq\n");
ret = macsec_pdata->ns_irq;
goto exit;
}
macsec_pdata->s_irq = platform_get_irq_byname(pdev, "macsec-s-irq");
if (macsec_pdata->s_irq < 0) {
dev_err(dev, "failed to get macsec-s-irq\n");
ret = macsec_pdata->s_irq;
goto exit;
}
if (pdata->osi_core->pre_sil == 0x1U) {
dev_warn(dev, "%s: Pre-silicon simulation, skipping reset/clk config\n", __func__);
goto exit;
}
/* Get resets */
macsec_pdata->ns_rst = devm_reset_control_get(dev, "macsec_ns_rst");
if (IS_ERR_OR_NULL(macsec_pdata->ns_rst)) {
dev_err(dev, "Failed to get macsec_ns_rst\n");
@@ -305,12 +324,16 @@ static int macsec_get_platform_res(struct macsec_priv_data *macsec_pdata)
goto exit;
}
/* 2. Get clks */
if (pdata->osi_core->mac == OSI_MAC_HW_MGBE) {
macsec_pdata->mgbe_clk = devm_clk_get(dev, "mgbe_macsec");
if (IS_ERR(macsec_pdata->mgbe_clk)) {
/* Get clks */
if (pdata->osi_core->mac != OSI_MAC_HW_EQOS) {
if (pdata->osi_core->mac_ver == OSI_MGBE_MAC_3_10) {
macsec_pdata->macsec_clk = devm_clk_get(dev, "mgbe_macsec");
} else {
macsec_pdata->macsec_clk = devm_clk_get(dev, "macsec");
}
if (IS_ERR(macsec_pdata->macsec_clk)) {
dev_err(dev, "failed to get macsec clk\n");
ret = PTR_ERR(macsec_pdata->mgbe_clk);
ret = PTR_ERR(macsec_pdata->macsec_clk);
goto exit;
}
} else {
@@ -329,21 +352,6 @@ static int macsec_get_platform_res(struct macsec_priv_data *macsec_pdata)
}
}
/* 3. Get irqs */
macsec_pdata->ns_irq = platform_get_irq_byname(pdev, "macsec-ns-irq");
if (macsec_pdata->ns_irq < 0) {
dev_err(dev, "failed to get macsec-ns-irq\n");
ret = macsec_pdata->ns_irq;
goto exit;
}
macsec_pdata->s_irq = platform_get_irq_byname(pdev, "macsec-s-irq");
if (macsec_pdata->s_irq < 0) {
dev_err(dev, "failed to get macsec-s-irq\n");
ret = macsec_pdata->s_irq;
goto exit;
}
exit:
PRINT_EXIT();
return ret;
@@ -355,9 +363,9 @@ static void macsec_release_platform_res(struct macsec_priv_data *macsec_pdata)
struct device *dev = pdata->dev;
PRINT_ENTRY();
if (pdata->osi_core->mac == OSI_MAC_HW_MGBE) {
if (!IS_ERR_OR_NULL(macsec_pdata->mgbe_clk)) {
devm_clk_put(dev, macsec_pdata->mgbe_clk);
if (pdata->osi_core->mac != OSI_MAC_HW_EQOS) {
if (!IS_ERR_OR_NULL(macsec_pdata->macsec_clk)) {
devm_clk_put(dev, macsec_pdata->macsec_clk);
}
} else {
if (!IS_ERR_OR_NULL(macsec_pdata->eqos_tx_clk)) {
@@ -500,6 +508,12 @@ static int parse_sa_config(struct nlattr **attrs, struct nlattr **tb_sa,
if (tb_sa[NV_MACSEC_SA_ATTR_LOWEST_PN]) {
sc_info->lowest_pn = nla_get_u32(tb_sa[NV_MACSEC_SA_ATTR_LOWEST_PN]);
}
if (tb_sa[NV_MACSEC_SA_ATTR_CONF_OFFSET]) {
sc_info->conf_offset = nla_get_u8(tb_sa[NV_MACSEC_SA_ATTR_CONF_OFFSET]);
}
if (tb_sa[NV_MACSEC_SA_ATTR_ENCRYPT]) {
sc_info->encrypt = nla_get_u8(tb_sa[NV_MACSEC_SA_ATTR_ENCRYPT]);
}
#ifdef NVPKCS_MACSEC
if (pkcs) {
if (tb_sa[NV_MACSEC_SA_PKCS_KEY_WRAP]) {
@@ -1374,6 +1388,15 @@ int macsec_probe(struct ether_priv_data *pdata)
mutex_init(&pdata->macsec_pdata->lock);
/* Read MAC instance id and used in TZ api's */
ret = of_property_read_u32(np, "nvidia,instance_id", &macsec_pdata->id);
if (ret != 0) {
dev_info(dev,
"DT instance_id missing, setting default to MGBE0\n");
macsec_pdata->id = 0;
}
osi_core->instance_id = macsec_pdata->id;
/* Get OSI MACsec ops */
if (osi_init_macsec_ops(osi_core) != 0) {
dev_err(dev, "osi_init_macsec_ops failed\n");
@@ -1406,6 +1429,7 @@ int macsec_probe(struct ether_priv_data *pdata)
macsec_pdata->nv_macsec_fam.module = THIS_MODULE;
macsec_pdata->nv_macsec_fam.ops = nv_macsec_genl_ops;
macsec_pdata->nv_macsec_fam.n_ops = ARRAY_SIZE(nv_macsec_genl_ops);
macsec_pdata->nv_macsec_fam.policy = nv_macsec_genl_policy;
if (macsec_pdata->is_nv_macsec_fam_registered == OSI_DISABLE) {
if (strlen(netdev_name(pdata->ndev)) >= GENL_NAMSIZ) {
dev_err(dev, "Intf name %s of len %lu exceed nl_family name size\n",

View File

@@ -7,14 +7,19 @@
#include <osi_macsec.h>
#include <linux/random.h>
#include <net/genetlink.h>
#include <linux/crypto.h>
#include <crypto/internal/cipher.h>
/**
* @brief Expected number of inputs in BYP or SCI LUT sysfs config
*/
#define LUT_INPUTS_LEN 39
/**
* @brief Maximum entries per 1 sysfs node
*/
#define MAX_ENTRIES_PER_SYSFS_NODE 24
#define MAX_SA_ENTRIES_PER_SYSFS_NODE 33U
/**
* @brief Expected number of extra inputs in BYP LUT sysfs config
*/
@@ -61,6 +66,8 @@ enum nv_macsec_sa_attrs {
NV_MACSEC_SA_ATTR_AN,
NV_MACSEC_SA_ATTR_PN,
NV_MACSEC_SA_ATTR_LOWEST_PN,
NV_MACSEC_SA_ATTR_CONF_OFFSET,
NV_MACSEC_SA_ATTR_ENCRYPT,
#ifdef NVPKCS_MACSEC
NV_MACSEC_SA_PKCS_KEY_WRAP,
NV_MACSEC_SA_PKCS_KEK_HANDLE,
@@ -119,6 +126,8 @@ static const struct nla_policy nv_macsec_sa_genl_policy[NUM_NV_MACSEC_SA_ATTR] =
[NV_MACSEC_SA_ATTR_AN] = { .type = NLA_U8 },
[NV_MACSEC_SA_ATTR_PN] = { .type = NLA_U32 },
[NV_MACSEC_SA_ATTR_LOWEST_PN] = { .type = NLA_U32 },
[NV_MACSEC_SA_ATTR_CONF_OFFSET] = { .type = NLA_U8 },
[NV_MACSEC_SA_ATTR_ENCRYPT] = { .type = NLA_U8 },
#ifdef NVPKCS_MACSEC
[NV_MACSEC_SA_PKCS_KEY_WRAP] = { .type = NLA_BINARY,
.len = NV_SAK_WRAPPED_LEN,},
@@ -135,9 +144,9 @@ static const struct nla_policy nv_macsec_tz_genl_policy[NUM_NV_MACSEC_TZ_ATTR] =
[NV_MACSEC_TZ_ATTR_RW] = { .type = NLA_U8 },
[NV_MACSEC_TZ_ATTR_INDEX] = { .type = NLA_U8 },
#ifdef NVPKCS_MACSEC
[NV_MACSEC_SA_PKCS_KEY_WRAP] = { .type = NLA_BINARY,
[NV_MACSEC_TZ_PKCS_KEY_WRAP] = { .type = NLA_BINARY,
.len = NV_SAK_WRAPPED_LEN,},
[NV_MACSEC_SA_PKCS_KEK_HANDLE] = { .type = NLA_U64 },
[NV_MACSEC_TZ_PKCS_KEK_HANDLE] = { .type = NLA_U64 },
#else
[NV_MACSEC_TZ_ATTR_KEY] = { .type = NLA_BINARY,
.len = OSI_KEY_LEN_256 },
@@ -154,6 +163,7 @@ static const struct nla_policy nv_macsec_genl_policy[NUM_NV_MACSEC_ATTR] = {
[NV_MACSEC_ATTR_TXSC_PORT] = { .type = NLA_U16 },
[NV_MACSEC_ATTR_REPLAY_PROT_EN] = { .type = NLA_U32 },
[NV_MACSEC_ATTR_REPLAY_WINDOW] = { .type = NLA_U32 },
[NV_MACSEC_ATTR_CIPHER_SUITE] = { .type = NLA_U32 },
[NV_MACSEC_ATTR_SA_CONFIG] = { .type = NLA_NESTED },
[NV_MACSEC_ATTR_TZ_CONFIG] = { .type = NLA_NESTED },
[NV_MACSEC_ATTR_TZ_KT_RESET] = { .type = NLA_NESTED },
@@ -209,11 +219,11 @@ struct nvpkcs_data {
struct macsec_priv_data {
/** Non secure reset */
struct reset_control *ns_rst;
/** MGBE Macsec clock */
struct clk *mgbe_clk;
/** EQOS Macsec TX clock */
/** MGBE/EQOS Macsec clock */
struct clk *macsec_clk;
/** T23x EQOS Macsec TX clock */
struct clk *eqos_tx_clk;
/** EQOS Macsec RX clock */
/** T23x EQOS Macsec RX clock */
struct clk *eqos_rx_clk;
/** Secure irq */
int s_irq;
@@ -243,6 +253,8 @@ struct macsec_priv_data {
unsigned short next_supp_idx;
/** macsec mutex lock */
struct mutex lock;
/** macsec hw instance id */
unsigned int id;
/** Macsec enable flag in DT */
unsigned int is_macsec_enabled_in_dt;
/** Context family name */

View File

@@ -27,7 +27,7 @@ static inline unsigned int ether_get_free_tx_ts_node(struct ether_priv_data *pda
}
static inline void add_skb_node(struct ether_priv_data *pdata, struct sk_buff *skb,
unsigned int pktid) {
unsigned int pktid, unsigned int vdmaid) {
struct list_head *head_node, *temp_head_node;
struct ether_tx_ts_skb_list *pnode = NULL;
unsigned int idx;
@@ -72,6 +72,7 @@ empty:
pnode = &pdata->tx_ts_skb[idx];
pnode->skb = skb;
pnode->pktid = pktid;
pnode->vdmaid = vdmaid;
pnode->pkt_jiffies = now_jiffies;
dev_dbg(pdata->dev, "%s() SKB %p added for pktid = %x time=%lu\n",
@@ -790,7 +791,7 @@ static void osd_transmit_complete(void *priv, const struct osi_tx_swcx *swcx,
ndev->stats.tx_packets++;
if ((txdone_pkt_cx->flags & OSI_TXDONE_CX_TS_DELAYED) ==
OSI_TXDONE_CX_TS_DELAYED) {
add_skb_node(pdata, skb, txdone_pkt_cx->pktid);
add_skb_node(pdata, skb, txdone_pkt_cx->pktid, txdone_pkt_cx->vdmaid);
/* Consume the timestamp immediately if already available */
if (ether_get_tx_ts(pdata) < 0)
schedule_delayed_work(&pdata->tx_ts_work,

View File

File diff suppressed because it is too large Load Diff