nvethernet: support filter based dma routing API

Updated IOCTL path to not get impacted with new API
change. OSI call should be done with correct parameters
1) dma_routing_enable
2) dma_chan and
3) addr_mask (for L2 filtering) to support new IOCTL
4) src_dest (for L2 filtering) to support new IOCTL

Included logic for User priority based RxQ selection. We can have DT
entry "nvidia,rx_queue_prio" to configure this setting
i.e. for valid following Queue mapping

Priorities 6,3 to Queue 3
Priorities 4,5 to Queue 2
Priority 0 to Queue 1 and
Priority 1 to Queue 0.
setting will be nvidia,rx-queue-prio = <0x2 0x1 0x30 0x48>;

If static channel selection, i.e. RXQtoDMA set with value 0x03020100,
	Priorities 6,3 to Queue 3 to chan 3
	Priorities 4,5 to Queue 2 to chan 2
	Priority 0 to Queue 1 to chan 1 and
	Priority 1 to Queue 0 to chan 0
else if DCS(dma channel selection) enabled
	channel will be selected based on filter rules.

Included logic to take input from User On DCS enable or disable. User can
update dt entry "nvidia,dcs-enable", for dcs enable disable for all queue.
i.e nvidia,dcs-enable = <0x1> will enable DCS for all queue.

Bug 200525721

Change-Id: I4ba820f178b03424d01bb4ddd1f1d6eadde572f7
Signed-off-by: Rakesh Goyal <rgoyal@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2133196
GVS: Gerrit_Virtual_Submit
Reviewed-by: Narayan Reddy <narayanr@nvidia.com>
Reviewed-by: Srinivas Ramachandran <srinivasra@nvidia.com>
Reviewed-by: Sachin Nikam <snikam@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Rakesh Goyal
2019-06-10 00:49:15 +05:30
committed by Revanth Kumar Uppala
parent 2f9ecd7d54
commit 8c7d9510f2
4 changed files with 68 additions and 17 deletions

View File

@@ -1320,7 +1320,7 @@ static unsigned short ether_select_queue(struct net_device *dev,
for (i = 0; i < OSI_EQOS_MAX_NUM_CHANS; i++) {
chan = osi_dma->dma_chans[i];
if (pdata->q_prio[chan] == skb->priority) {
if (pdata->txq_prio[chan] == skb->priority) {
txqueue_select = (unsigned short)chan;
break;
}
@@ -1409,7 +1409,15 @@ static int ether_prepare_mc_list(struct net_device *dev)
pdata->num_mac_addr_regs);
/* Clear previously set filters */
for (cnt = 1; cnt <= pdata->last_uc_filter_index; cnt++) {
osi_update_mac_addr_low_high_reg(osi_core, cnt, NULL);
if (osi_update_mac_addr_low_high_reg(osi_core,
(unsigned int)cnt,
NULL,
OSI_DISABLE, 0x0,
OSI_AMASK_DISABLE,
OSI_DA_MATCH) !=
0) {
dev_err(pdata->dev, "issue in cleaning mc list\n");
}
}
netdev_for_each_mc_addr(ha, dev) {
@@ -1418,7 +1426,16 @@ static int ether_prepare_mc_list(struct net_device *dev)
i,
ha->addr[0], ha->addr[1], ha->addr[2],
ha->addr[3], ha->addr[4], ha->addr[5]);
osi_update_mac_addr_low_high_reg(osi_core, i, ha->addr);
if (osi_update_mac_addr_low_high_reg(osi_core,
(unsigned int)i,
ha->addr,
OSI_DISABLE, 0x0,
OSI_AMASK_DISABLE,
OSI_DA_MATCH) !=
0) {
dev_err(pdata->dev, "issue in creating mc list\n");
}
if (i == EQOS_MAX_MAC_ADDRESS_FILTER - 1) {
dev_err(pdata->dev, "Configured max number of supported MAC, ignoring it\n");
break;
@@ -1469,7 +1486,15 @@ static int ether_prepare_uc_list(struct net_device *dev)
/* Clear previously set filters */
for (cnt = pdata->last_mc_filter_index + 1;
cnt <= pdata->last_uc_filter_index; cnt++) {
osi_update_mac_addr_low_high_reg(osi_core, cnt, NULL);
if (osi_update_mac_addr_low_high_reg(osi_core,
(unsigned int)cnt,
NULL,
OSI_DISABLE, 0x0,
OSI_AMASK_DISABLE,
OSI_DA_MATCH) !=
0) {
dev_err(pdata->dev, "issue in cleaning uc list\n");
}
}
netdev_for_each_uc_addr(ha, dev) {
@@ -1477,7 +1502,16 @@ static int ether_prepare_uc_list(struct net_device *dev)
"uc addr[%d] = %#x:%#x:%#x:%#x:%#x:%#x\n",
i, ha->addr[0], ha->addr[1], ha->addr[2],
ha->addr[3], ha->addr[4], ha->addr[5]);
osi_update_mac_addr_low_high_reg(osi_core, i, ha->addr);
if (osi_update_mac_addr_low_high_reg(osi_core,
(unsigned int)i,
ha->addr,
OSI_DISABLE, 0x0,
OSI_AMASK_DISABLE,
OSI_DA_MATCH) !=
0) {
dev_err(pdata->dev, "issue in creating uc list\n");
}
if (i == EQOS_MAX_MAC_ADDRESS_FILTER - 1) {
dev_err(pdata->dev, "Already MAX MAC added\n");
break;
@@ -2631,6 +2665,7 @@ static int ether_parse_dt(struct ether_priv_data *pdata)
struct osi_dma_priv_data *osi_dma = pdata->osi_dma;
struct device_node *np = dev->of_node;
int ret = -EINVAL;
unsigned int i;
/* read ptp clock */
ret = of_property_read_u32(np, "nvidia,ptp_ref_clock_speed",
@@ -2693,14 +2728,27 @@ static int ether_parse_dt(struct ether_priv_data *pdata)
return ret;
}
ether_parse_queue_prio(pdata, "nvidia,queue_prio", pdata->q_prio,
/* Read tx queue priority */
ether_parse_queue_prio(pdata, "nvidia,tx-queue-prio", pdata->txq_prio,
ETHER_QUEUE_PRIO_DEFAULT, ETHER_QUEUE_PRIO_MAX,
osi_core->num_mtl_queues);
ether_parse_queue_prio(pdata, "nvidia,rx_queue_prio",
osi_core->rxq_prio,
ETHER_QUEUE_PRIO_INVALID, ETHER_QUEUE_PRIO_MAX,
osi_core->num_mtl_queues);
/* Read Rx Queue - User priority mapping for tagged packets */
ret = of_property_read_u32_array(np, "nvidia,rx-queue-prio",
osi_core->rxq_prio,
osi_core->num_mtl_queues);
if (ret < 0) {
dev_err(dev, "failed to read rx Queue priority mapping, Setting default 0x0\n");
for (i = 0; i < osi_core->num_mtl_queues; i++) {
osi_core->rxq_prio[i] = 0x0U;
}
}
/* Read DCS enable/disable input, default disable */
ret = of_property_read_u32(np, "nvidia,dcs-enable", &osi_core->dcs_en);
if (ret < 0 || osi_core->dcs_en != OSI_ENABLE) {
osi_core->dcs_en = OSI_DISABLE;
}
ret = ether_parse_phy_dt(pdata, np);
if (ret < 0) {