nvethernet: T264 Enable 20 VDMA channel support

Ported from -
https://git-master.nvidia.com/r/c/nvethernet-docs/+/2940794

Bug 4043836

Change-Id: I763bfdaa78082de035219e10ef1c131bdbba5e35
Signed-off-by: Mahesh Patil <maheshp@nvidia.com>
This commit is contained in:
Mahesh Patil
2023-07-21 16:49:59 -07:00
committed by Bhadram Varka
parent e44802987e
commit 5edac49a28
5 changed files with 208 additions and 58 deletions

View File

@@ -1829,8 +1829,13 @@ static void free_rx_dma_resources(struct osi_dma_priv_data *osi_dma,
unsigned long rx_desc_size = sizeof(struct osi_rx_desc) * osi_dma->rx_ring_sz;
struct osi_rx_ring *rx_ring = NULL;
unsigned int i, chan;
const nveu32_t max_dma_chan[OSI_MAX_MAC_IP_TYPES] = {
OSI_EQOS_MAX_NUM_CHANS,
OSI_MGBE_T23X_MAX_NUM_CHANS,
OSI_MGBE_MAX_NUM_CHANS
};
for (i = 0; i < OSI_MGBE_MAX_NUM_CHANS; i++) {
for (i = 0; i < max_dma_chan[osi_dma->mac]; i++) {
rx_ring = osi_dma->rx_ring[i];
chan = osi_dma->dma_chans[i];
@@ -2048,8 +2053,13 @@ static int ether_allocate_rx_dma_resources(struct osi_dma_priv_data *osi_dma,
unsigned int chan;
unsigned int i;
int ret = 0;
const nveu32_t max_dma_chan[OSI_MAX_MAC_IP_TYPES] = {
OSI_EQOS_MAX_NUM_CHANS,
OSI_MGBE_T23X_MAX_NUM_CHANS,
OSI_MGBE_MAX_NUM_CHANS
};
for (i = 0; i < OSI_MGBE_MAX_NUM_CHANS; i++) {
for (i = 0; i < max_dma_chan[osi_dma->mac]; i++) {
chan = osi_dma->dma_chans[i];
if (chan != ETHER_INVALID_CHAN_NUM) {
@@ -2097,8 +2107,13 @@ static void free_tx_dma_resources(struct osi_dma_priv_data *osi_dma,
unsigned long tx_desc_size = sizeof(struct osi_tx_desc) * osi_dma->tx_ring_sz;
struct osi_tx_ring *tx_ring = NULL;
unsigned int i;
const nveu32_t max_dma_chan[OSI_MAX_MAC_IP_TYPES] = {
OSI_EQOS_MAX_NUM_CHANS,
OSI_MGBE_T23X_MAX_NUM_CHANS,
OSI_MGBE_MAX_NUM_CHANS
};
for (i = 0; i < OSI_MGBE_MAX_NUM_CHANS; i++) {
for (i = 0; i < max_dma_chan[osi_dma->mac]; i++) {
tx_ring = osi_dma->tx_ring[i];
if (tx_ring != NULL) {
@@ -2200,8 +2215,13 @@ static int ether_allocate_tx_dma_resources(struct osi_dma_priv_data *osi_dma,
unsigned int chan;
unsigned int i;
int ret = 0;
const nveu32_t max_dma_chan[OSI_MAX_MAC_IP_TYPES] = {
OSI_EQOS_MAX_NUM_CHANS,
OSI_MGBE_T23X_MAX_NUM_CHANS,
OSI_MGBE_MAX_NUM_CHANS
};
for (i = 0; i < OSI_MGBE_MAX_NUM_CHANS; i++) {
for (i = 0; i < max_dma_chan[osi_dma->mac]; i++) {
chan = osi_dma->dma_chans[i];
if (chan != ETHER_INVALID_CHAN_NUM) {
@@ -2237,13 +2257,18 @@ exit:
static void ether_init_invalid_chan_ring(struct osi_dma_priv_data *osi_dma)
{
unsigned int i;
const nveu32_t max_dma_chan[OSI_MAX_MAC_IP_TYPES] = {
OSI_EQOS_MAX_NUM_CHANS,
OSI_MGBE_T23X_MAX_NUM_CHANS,
OSI_MGBE_MAX_NUM_CHANS
};
for (i = 0; i < OSI_MGBE_MAX_NUM_CHANS; i++) {
for (i = 0; i < max_dma_chan[osi_dma->mac]; i++) {
osi_dma->tx_ring[i] = NULL;
osi_dma->rx_ring[i] = NULL;
}
for (i = osi_dma->num_dma_chans; i < OSI_MGBE_MAX_NUM_CHANS; i++) {
for (i = osi_dma->num_dma_chans; i < max_dma_chan[osi_dma->mac]; i++) {
osi_dma->dma_chans[i] = ETHER_INVALID_CHAN_NUM;
}
}
@@ -3404,11 +3429,15 @@ static unsigned short ether_select_queue(struct net_device *dev,
priority = skb_vlan_tag_get_prio(skb);
}
for (i = 0; i < osi_core->num_mtl_queues; i++) {
mtlq = osi_core->mtl_queues[i];
if (pdata->txq_prio[mtlq] == priority) {
txqueue_select = (unsigned short)i;
break;
if ((osi_core->pre_sil == OSI_ENABLE) && (pdata->tx_queue_select != 0U)) {
txqueue_select = pdata->tx_queue_select;
} else {
for (i = 0; i < osi_core->num_mtl_queues; i++) {
mtlq = osi_core->mtl_queues[i];
if (pdata->txq_prio[mtlq] == priority) {
txqueue_select = (unsigned short)i;
break;
}
}
}
@@ -5987,9 +6016,28 @@ static int ether_parse_dt(struct ether_priv_data *pdata)
}
}
if (osi_dma->num_dma_chans != osi_core->num_mtl_queues) {
dev_err(dev, "mismatch in numbers of DMA channel and MTL Q\n");
return -EINVAL;
if (osi_core->mac != OSI_MAC_HW_MGBE_T26X) {
if (osi_dma->num_dma_chans != osi_core->num_mtl_queues) {
dev_err(dev, "mismatch in numbers of DMA channel and MTL Q\n");
return -EINVAL;
}
for (i = 0; i < osi_dma->num_dma_chans; i++) {
if (osi_dma->dma_chans[i] != osi_core->mtl_queues[i]) {
dev_err(dev,
"mismatch in DMA channel and MTL Q number at index %d\n",
i);
return -EINVAL;
}
if (osi_dma->dma_chans[i] == 0) {
ret = 0;
}
}
if (ret != 0) {
dev_err(dev, "Q0 Must be enabled for rx path\n");
return -EINVAL;
}
}
/* Allow to set non zero DMA channel for virtualization */
@@ -6000,23 +6048,6 @@ static int ether_parse_dt(struct ether_priv_data *pdata)
ret = -1;
}
for (i = 0; i < osi_dma->num_dma_chans; i++) {
if (osi_dma->dma_chans[i] != osi_core->mtl_queues[i]) {
dev_err(dev,
"mismatch in DMA channel and MTL Q number at index %d\n",
i);
return -EINVAL;
}
if (osi_dma->dma_chans[i] == 0) {
ret = 0;
}
}
if (ret != 0) {
dev_err(dev, "Q0 Must be enabled for rx path\n");
return -EINVAL;
}
ret = of_property_read_u32_array(np, "nvidia,rxq_enable_ctrl",
tmp_value,
osi_core->num_mtl_queues);
@@ -6440,7 +6471,7 @@ static void ether_get_num_dma_chan_mtl_q(struct platform_device *pdev,
ret = of_device_is_compatible(np, "nvidia,nvmgbe");
if (ret != 0) {
*mac = OSI_MAC_HW_MGBE;
max_chans = OSI_MGBE_MAX_NUM_CHANS;
max_chans = OSI_MGBE_T23X_MAX_NUM_CHANS;
}
ret = of_device_is_compatible(np, "nvidia,tegra234-eqos");
@@ -6452,7 +6483,7 @@ static void ether_get_num_dma_chan_mtl_q(struct platform_device *pdev,
ret = of_device_is_compatible(np, "nvidia,tegra234-mgbe");
if (ret != 0) {
*mac = OSI_MAC_HW_MGBE;
max_chans = OSI_MGBE_MAX_NUM_PDMA_CHANS;
max_chans = OSI_MGBE_T23X_MAX_NUM_CHANS;
}
if (of_device_is_compatible(np, "nvidia,tegra264-mgbe")) {
@@ -6700,6 +6731,11 @@ static int ether_probe(struct platform_device *pdev)
struct net_device *ndev;
int ret = 0, i;
const char *if_name;
const nveu32_t max_dma_chan[OSI_MAX_MAC_IP_TYPES] = {
OSI_EQOS_MAX_NUM_CHANS,
OSI_MGBE_T23X_MAX_NUM_CHANS,
OSI_MGBE_MAX_NUM_CHANS
};
ether_get_num_dma_chan_mtl_q(pdev, &num_dma_chans,
&mac, &num_mtl_queues);
@@ -6852,7 +6888,7 @@ static int ether_probe(struct platform_device *pdev)
/* store enabled dma channels into osi_core */
osi_core->num_dma_chans = osi_dma->num_dma_chans;
memcpy(osi_core->dma_chans, osi_dma->dma_chans,
(sizeof(nveu32_t) * OSI_MGBE_MAX_NUM_CHANS));
(sizeof(nveu32_t) * max_dma_chan[mac]));
ndev->netdev_ops = &ether_netdev_ops;
ether_set_ethtool_ops(ndev);

View File

@@ -673,6 +673,8 @@ struct ether_priv_data {
struct hwtstamp_config ptp_config;
/** Flag to hold DT config to disable Rx csum in HW */
uint32_t disable_rx_csum;
/** select Tx queue/dma channel for testing */
unsigned int tx_queue_select;
};
/**

View File

@@ -105,6 +105,16 @@ static const struct ether_stats ether_dstrings_stats[] = {
ETHER_DMA_EXTRA_STAT(tx_clean_n[7]),
ETHER_DMA_EXTRA_STAT(tx_clean_n[8]),
ETHER_DMA_EXTRA_STAT(tx_clean_n[9]),
ETHER_DMA_EXTRA_STAT(tx_clean_n[10]),
ETHER_DMA_EXTRA_STAT(tx_clean_n[11]),
ETHER_DMA_EXTRA_STAT(tx_clean_n[12]),
ETHER_DMA_EXTRA_STAT(tx_clean_n[13]),
ETHER_DMA_EXTRA_STAT(tx_clean_n[14]),
ETHER_DMA_EXTRA_STAT(tx_clean_n[15]),
ETHER_DMA_EXTRA_STAT(tx_clean_n[16]),
ETHER_DMA_EXTRA_STAT(tx_clean_n[17]),
ETHER_DMA_EXTRA_STAT(tx_clean_n[18]),
ETHER_DMA_EXTRA_STAT(tx_clean_n[19]),
/* Tx/Rx frames */
ETHER_DMA_EXTRA_STAT(tx_pkt_n),
@@ -114,26 +124,47 @@ static const struct ether_stats ether_dstrings_stats[] = {
ETHER_DMA_EXTRA_STAT(tx_tso_pkt_n),
/* Tx/Rx frames per channels/queues */
ETHER_DMA_EXTRA_STAT(q_tx_pkt_n[0]),
ETHER_DMA_EXTRA_STAT(q_tx_pkt_n[1]),
ETHER_DMA_EXTRA_STAT(q_tx_pkt_n[2]),
ETHER_DMA_EXTRA_STAT(q_tx_pkt_n[3]),
ETHER_DMA_EXTRA_STAT(q_tx_pkt_n[4]),
ETHER_DMA_EXTRA_STAT(q_tx_pkt_n[5]),
ETHER_DMA_EXTRA_STAT(q_tx_pkt_n[6]),
ETHER_DMA_EXTRA_STAT(q_tx_pkt_n[7]),
ETHER_DMA_EXTRA_STAT(q_tx_pkt_n[8]),
ETHER_DMA_EXTRA_STAT(q_tx_pkt_n[9]),
ETHER_DMA_EXTRA_STAT(q_rx_pkt_n[0]),
ETHER_DMA_EXTRA_STAT(q_rx_pkt_n[1]),
ETHER_DMA_EXTRA_STAT(q_rx_pkt_n[2]),
ETHER_DMA_EXTRA_STAT(q_rx_pkt_n[3]),
ETHER_DMA_EXTRA_STAT(q_rx_pkt_n[4]),
ETHER_DMA_EXTRA_STAT(q_rx_pkt_n[5]),
ETHER_DMA_EXTRA_STAT(q_rx_pkt_n[6]),
ETHER_DMA_EXTRA_STAT(q_rx_pkt_n[7]),
ETHER_DMA_EXTRA_STAT(q_rx_pkt_n[8]),
ETHER_DMA_EXTRA_STAT(q_rx_pkt_n[9]),
ETHER_DMA_EXTRA_STAT(chan_tx_pkt_n[0]),
ETHER_DMA_EXTRA_STAT(chan_tx_pkt_n[1]),
ETHER_DMA_EXTRA_STAT(chan_tx_pkt_n[2]),
ETHER_DMA_EXTRA_STAT(chan_tx_pkt_n[3]),
ETHER_DMA_EXTRA_STAT(chan_tx_pkt_n[4]),
ETHER_DMA_EXTRA_STAT(chan_tx_pkt_n[5]),
ETHER_DMA_EXTRA_STAT(chan_tx_pkt_n[6]),
ETHER_DMA_EXTRA_STAT(chan_tx_pkt_n[7]),
ETHER_DMA_EXTRA_STAT(chan_tx_pkt_n[8]),
ETHER_DMA_EXTRA_STAT(chan_tx_pkt_n[9]),
ETHER_DMA_EXTRA_STAT(chan_tx_pkt_n[10]),
ETHER_DMA_EXTRA_STAT(chan_tx_pkt_n[11]),
ETHER_DMA_EXTRA_STAT(chan_tx_pkt_n[12]),
ETHER_DMA_EXTRA_STAT(chan_tx_pkt_n[13]),
ETHER_DMA_EXTRA_STAT(chan_tx_pkt_n[14]),
ETHER_DMA_EXTRA_STAT(chan_tx_pkt_n[15]),
ETHER_DMA_EXTRA_STAT(chan_tx_pkt_n[16]),
ETHER_DMA_EXTRA_STAT(chan_tx_pkt_n[17]),
ETHER_DMA_EXTRA_STAT(chan_tx_pkt_n[18]),
ETHER_DMA_EXTRA_STAT(chan_tx_pkt_n[19]),
ETHER_DMA_EXTRA_STAT(chan_rx_pkt_n[0]),
ETHER_DMA_EXTRA_STAT(chan_rx_pkt_n[1]),
ETHER_DMA_EXTRA_STAT(chan_rx_pkt_n[2]),
ETHER_DMA_EXTRA_STAT(chan_rx_pkt_n[3]),
ETHER_DMA_EXTRA_STAT(chan_rx_pkt_n[4]),
ETHER_DMA_EXTRA_STAT(chan_rx_pkt_n[5]),
ETHER_DMA_EXTRA_STAT(chan_rx_pkt_n[6]),
ETHER_DMA_EXTRA_STAT(chan_rx_pkt_n[7]),
ETHER_DMA_EXTRA_STAT(chan_rx_pkt_n[8]),
ETHER_DMA_EXTRA_STAT(chan_rx_pkt_n[9]),
ETHER_DMA_EXTRA_STAT(chan_rx_pkt_n[10]),
ETHER_DMA_EXTRA_STAT(chan_rx_pkt_n[11]),
ETHER_DMA_EXTRA_STAT(chan_rx_pkt_n[12]),
ETHER_DMA_EXTRA_STAT(chan_rx_pkt_n[13]),
ETHER_DMA_EXTRA_STAT(chan_rx_pkt_n[14]),
ETHER_DMA_EXTRA_STAT(chan_rx_pkt_n[15]),
ETHER_DMA_EXTRA_STAT(chan_rx_pkt_n[16]),
ETHER_DMA_EXTRA_STAT(chan_rx_pkt_n[17]),
ETHER_DMA_EXTRA_STAT(chan_rx_pkt_n[18]),
ETHER_DMA_EXTRA_STAT(chan_rx_pkt_n[19]),
};
/**
@@ -1003,7 +1034,7 @@ static int ether_set_coalesce(struct net_device *dev,
ETHER_MAX_RX_COALESCE_USEC);
return -EINVAL;
} else if (osi_dma->mac == OSI_MAC_HW_MGBE &&
} else if (osi_dma->mac != OSI_MAC_HW_EQOS &&
(ec->rx_coalesce_usecs > ETHER_MAX_RX_COALESCE_USEC ||
ec->rx_coalesce_usecs < ETHER_MGBE_MIN_RX_COALESCE_USEC)) {
netdev_err(dev,

View File

@@ -562,9 +562,11 @@ static int ether_config_l2_filters(struct net_device *dev,
return ret;
}
if (osi_core->use_virtualization == OSI_DISABLE) {
dev_err(pdata->dev, "%s Ethernet virualization is not enabled\n", __func__);
return ret;
if (osi_core->pre_sil != OSI_ENABLE) {
if (osi_core->use_virtualization == OSI_DISABLE) {
dev_err(pdata->dev, "%s Ethernet virualization is not enabled\n", __func__);
return ret;
}
}
if (copy_from_user(&u_l2_filter, (void __user *)ifdata->ptr,
sizeof(struct ether_l2_filter)) != 0U) {

View File

@@ -76,6 +76,84 @@ static ssize_t ether_desc_dump_store(struct device *dev,
static DEVICE_ATTR(desc_dump_enable, (S_IRUGO | S_IWUSR),
ether_desc_dump_show,
ether_desc_dump_store);
/**
* @brief Shows current configured tx queue
*
* @param[in] dev: Device data.
* @param[in] attr: Device attribute
* @param[in] buf: Buffer to print the current Tx Q configuration
*/
static ssize_t ether_mac_tx_q_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct net_device *ndev = (struct net_device *)dev_get_drvdata(dev);
struct ether_priv_data *pdata = netdev_priv(ndev);
struct osi_core_priv_data *osi_core = pdata->osi_core;
char *start = buf;
if (osi_core->pre_sil != OSI_ENABLE) {
dev_err(pdata->dev, "Not Allowed. Not pre-sil platform\n");
return 0;
}
if (!netif_running(ndev)) {
dev_err(pdata->dev, "Not Allowed. Ether interface is not up\n");
return 0;
}
buf += scnprintf(buf, PAGE_SIZE, "Current Tx queue: %d\n",
pdata->tx_queue_select);
return (buf - start);
}
/**
* @brief Choose dma channel for Tx traffic or Tx queue select when non-zero
*
* @param[in] dev: Device data.
* @param[in] attr: Device attribute
* @param[in] buf: Buffer which contains dma channel number or Tx Q
* @param[in] size: size of buffer
*
* @return size of buffer.
*/
static ssize_t ether_mac_tx_q_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct net_device *ndev = (struct net_device *)dev_get_drvdata(dev);
struct ether_priv_data *pdata = netdev_priv(ndev);
struct osi_core_priv_data *osi_core = pdata->osi_core;
int ret, bufp = 0, dma_chanel = 0;
if (osi_core->pre_sil != OSI_ENABLE) {
dev_err(pdata->dev, "Not Allowed. Not pre-sil platform\n");
return 0;
}
if (!netif_running(ndev)) {
dev_err(pdata->dev, "Not Allowed. Ether interface is not up\n");
return size;
}
ret = sscanf(buf + bufp, "%d", &dma_chanel);
if (ret != 1 || dma_chanel >= OSI_MGBE_MAX_NUM_CHANS) {
dev_err(pdata->dev, "Failed to parse args or invalid dma chan");
goto exit;
}
pdata->tx_queue_select = dma_chanel;
exit:
return size;
}
/**
* @brief Sysfs attribute for MAC Tx Q
*
*/
static DEVICE_ATTR(mac_tx_q, (S_IRUGO | S_IWUSR),
ether_mac_tx_q_show,
ether_mac_tx_q_store);
#endif /* OSI_DEBUG */
/**
@@ -2752,6 +2830,7 @@ static struct attribute *ether_sysfs_attrs_without_macsec[] = {
#if defined HSI_SUPPORT && defined(NV_VLTEST_BUILD) && (IS_ENABLED(CONFIG_TEGRA_HSIERRRPTINJ))
&dev_attr_hsi_enable.attr,
#endif
&dev_attr_mac_tx_q.attr,
#endif /* OSI_STRIPPED_LIB */
NULL
};