nvethernet: Add support for VM interrupts

Adds VM interrupt handling for VM interrupt
based MAC controllers.

Need to pass below parameters from DT -
o Number of VM IRQ's assigned per OS/VM.
o Number of VM channels assigned to a VM IRQ.
o List of DMA channels assigned to a VM IRQ.

Below is the sample DT representation -

vm_irq_config: vm-irq-config {
	nvidia,num-vm-irqs = <4>;
	vm_irq1 {
		nvidia,num-vm-channels = <2>;
		nvidia,vm-channels = <0 1>;
	};
	vm_irq2 {
		nvidia,num-vm-channels = <2>;
		nvidia,vm-channels = <2 3>;
	};
	vm_irq3 {
		nvidia,num-vm-channels = <2>;
		nvidia,vm-channels = <4 5>;
	};
	vm_irq4 {
		nvidia,num-vm-channels = <2>;
		nvidia,vm-channels = <6 7>;
	};
};

ethernet@<base_addr> {
	[...]
	nvidia,vm-irq-config = <&vm_irq_config>;
	[...]
}

Bug 200548572

Change-Id: I802f247fa95ef6dcd769afbc7c13c6362d2f328e
Signed-off-by: Bhadram Varka <vbhadram@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvidia/+/2292602
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Reviewed-by: automaticguardword <automaticguardword@nvidia.com>
Reviewed-by: Bitan Biswas <bbiswas@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
GVS: Gerrit_Virtual_Submit
This commit is contained in:
Bhadram Varka
2019-06-26 20:22:43 +05:30
committed by Revanth Kumar Uppala
parent 3c81e255bd
commit c4a75f03ac
2 changed files with 312 additions and 50 deletions

View File

@@ -408,6 +408,82 @@ static int ether_phy_init(struct net_device *dev)
return 0; return 0;
} }
/**
* @brief ether_vm_isr - VM based ISR routine.
*
* Algorithm:
* 1) Get global DMA status (common for all VM IRQ's)
* + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
* + RX7 + TX7 + RX6 + TX6 + . . . . . . . + RX1 + TX1 + RX0 + TX0 +
* + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
*
* 2) Mask the channels which are specific to VM in global DMA status.
* 3) Process all DMA channel interrupts which are triggered the IRQ
* a) Find first first set from LSB with ffs
* b) The least significant bit is position 1 for ffs. So decremented
* by one to start from zero.
* c) Get channel number and TX/RX info by using bit position.
* d) Invoke OSI layer to clear interrupt source for DMA Tx/Rx at
* DMA and wrapper level.
* e) Get NAPI instance based on channel number and schedule the same.
*
* @param[in] irq: IRQ number.
* @param[in] data: VM IRQ private data structure.
*
* @note MAC and PHY need to be initialized.
*
* @retval IRQ_HANDLED on success.
* @retval IRQ_NONE on failure.
*/
irqreturn_t ether_vm_isr(int irq, void *data)
{
struct ether_vm_irq_data *vm_irq = (struct ether_vm_irq_data *)data;
struct ether_priv_data *pdata = vm_irq->pdata;
unsigned int temp = 0, chan = 0, txrx = 0;
struct osi_dma_priv_data *osi_dma = pdata->osi_dma;
struct ether_rx_napi *rx_napi = NULL;
struct ether_tx_napi *tx_napi = NULL;
unsigned int dma_status;
/* TODO: locking required since this is shared register b/w VM IRQ's */
dma_status = osi_get_global_dma_status(osi_dma);
dma_status &= vm_irq->chan_mask;
while (dma_status) {
temp = ffs(dma_status);
temp--;
/* divide by two get channel number */
chan = temp >> 1U;
/* bitwise and with one to get whether Tx or Rx */
txrx = temp & 1U;
if (txrx) {
osi_clear_vm_rx_intr(osi_dma, chan);
rx_napi = pdata->rx_napi[chan];
if (likely(napi_schedule_prep(&rx_napi->napi))) {
osi_disable_chan_rx_intr(osi_dma, chan);
/* TODO: Schedule NAPI on different CPU core */
__napi_schedule(&rx_napi->napi);
}
} else {
osi_clear_vm_tx_intr(osi_dma, chan);
tx_napi = pdata->tx_napi[chan];
if (likely(napi_schedule_prep(&tx_napi->napi))) {
osi_disable_chan_tx_intr(osi_dma, chan);
/* TODO: Schedule NAPI on different CPU core */
__napi_schedule(&tx_napi->napi);
}
}
dma_status &= ~BIT(temp);
}
return IRQ_HANDLED;
}
/** /**
* @brief Transmit done ISR Routine. * @brief Transmit done ISR Routine.
* *
@@ -547,18 +623,29 @@ static void ether_free_irqs(struct ether_priv_data *pdata)
devm_free_irq(pdata->dev, pdata->ivck->irq, pdata); devm_free_irq(pdata->dev, pdata->ivck->irq, pdata);
} }
if (pdata->osi_core->mac_ver > OSI_EQOS_MAC_5_00) {
for (i = 0; i < pdata->osi_dma->num_vm_irqs; i++) {
if (pdata->rx_irq_alloc_mask & (OSI_ENABLE << i)) {
devm_free_irq(pdata->dev, pdata->vm_irqs[i],
&pdata->vm_irq_data[i]);
}
}
} else {
for (i = 0; i < pdata->osi_dma->num_dma_chans; i++) { for (i = 0; i < pdata->osi_dma->num_dma_chans; i++) {
chan = pdata->osi_dma->dma_chans[i]; chan = pdata->osi_dma->dma_chans[i];
if (pdata->rx_irq_alloc_mask & (1U << i)) { if (pdata->rx_irq_alloc_mask & (OSI_ENABLE << i)) {
devm_free_irq(pdata->dev, pdata->rx_irqs[i], devm_free_irq(pdata->dev, pdata->rx_irqs[i],
pdata->rx_napi[chan]); pdata->rx_napi[chan]);
pdata->rx_irq_alloc_mask &= (~(1U << i)); pdata->rx_irq_alloc_mask &=
(~(OSI_ENABLE << i));
} }
if (pdata->tx_irq_alloc_mask & (1U << i)) { if (pdata->tx_irq_alloc_mask & (OSI_ENABLE << i)) {
devm_free_irq(pdata->dev, pdata->tx_irqs[i], devm_free_irq(pdata->dev, pdata->tx_irqs[i],
pdata->tx_napi[chan]); pdata->tx_napi[chan]);
pdata->tx_irq_alloc_mask &= (~(1U << i)); pdata->tx_irq_alloc_mask &=
(~(OSI_ENABLE << i));
}
} }
} }
} }
@@ -709,6 +796,7 @@ static int ether_init_ivc(struct ether_priv_data *pdata)
static int ether_request_irqs(struct ether_priv_data *pdata) static int ether_request_irqs(struct ether_priv_data *pdata)
{ {
struct osi_dma_priv_data *osi_dma = pdata->osi_dma; struct osi_dma_priv_data *osi_dma = pdata->osi_dma;
struct osi_core_priv_data *osi_core = pdata->osi_core;
static char irq_names[ETHER_IRQ_MAX_IDX][ETHER_IRQ_NAME_SZ] = {0}; static char irq_names[ETHER_IRQ_MAX_IDX][ETHER_IRQ_NAME_SZ] = {0};
int ret = 0, i, j = 0; int ret = 0, i, j = 0;
unsigned int chan; unsigned int chan;
@@ -723,14 +811,34 @@ static int ether_request_irqs(struct ether_priv_data *pdata)
} }
pdata->common_irq_alloc_mask = 1; pdata->common_irq_alloc_mask = 1;
if (osi_core->mac_ver > OSI_EQOS_MAC_5_00) {
for (i = 0; i < osi_dma->num_vm_irqs; i++) {
snprintf(irq_names[j], ETHER_IRQ_NAME_SZ, "%s.vm%d",
netdev_name(pdata->ndev), i);
ret = devm_request_irq(pdata->dev, pdata->vm_irqs[i],
ether_vm_isr, IRQF_TRIGGER_NONE,
irq_names[j++],
&pdata->vm_irq_data[i]);
if (unlikely(ret < 0)) {
dev_err(pdata->dev,
"failed to request VM IRQ (%d)\n",
pdata->vm_irqs[i]);
goto err_chan_irq;
}
pdata->rx_irq_alloc_mask |= (OSI_ENABLE << i);
}
} else {
for (i = 0; i < osi_dma->num_dma_chans; i++) { for (i = 0; i < osi_dma->num_dma_chans; i++) {
chan = osi_dma->dma_chans[i]; chan = osi_dma->dma_chans[i];
snprintf(irq_names[j], ETHER_IRQ_NAME_SZ, "%s.rx%d", snprintf(irq_names[j], ETHER_IRQ_NAME_SZ, "%s.rx%d",
netdev_name(pdata->ndev), chan); netdev_name(pdata->ndev), chan);
ret = devm_request_irq(pdata->dev, pdata->rx_irqs[i], ret = devm_request_irq(pdata->dev, pdata->rx_irqs[i],
ether_rx_chan_isr, IRQF_TRIGGER_NONE, ether_rx_chan_isr,
irq_names[j++], pdata->rx_napi[chan]); IRQF_TRIGGER_NONE,
irq_names[j++],
pdata->rx_napi[chan]);
if (unlikely(ret < 0)) { if (unlikely(ret < 0)) {
dev_err(pdata->dev, dev_err(pdata->dev,
"failed to register Rx chan interrupt: %d\n", "failed to register Rx chan interrupt: %d\n",
@@ -738,13 +846,16 @@ static int ether_request_irqs(struct ether_priv_data *pdata)
goto err_chan_irq; goto err_chan_irq;
} }
pdata->rx_irq_alloc_mask |= (1U << i); pdata->rx_irq_alloc_mask |= (OSI_ENABLE << i);
snprintf(irq_names[j], ETHER_IRQ_NAME_SZ, "%s.tx%d", snprintf(irq_names[j], ETHER_IRQ_NAME_SZ, "%s.tx%d",
netdev_name(pdata->ndev), chan); netdev_name(pdata->ndev), chan);
ret = devm_request_irq(pdata->dev, (unsigned int)pdata->tx_irqs[i], ret = devm_request_irq(pdata->dev,
ether_tx_chan_isr, IRQF_TRIGGER_NONE, (unsigned int)pdata->tx_irqs[i],
irq_names[j++], pdata->tx_napi[chan]); ether_tx_chan_isr,
IRQF_TRIGGER_NONE,
irq_names[j++],
pdata->tx_napi[chan]);
if (unlikely(ret < 0)) { if (unlikely(ret < 0)) {
dev_err(pdata->dev, dev_err(pdata->dev,
"failed to register Tx chan interrupt: %d\n", "failed to register Tx chan interrupt: %d\n",
@@ -752,7 +863,8 @@ static int ether_request_irqs(struct ether_priv_data *pdata)
goto err_chan_irq; goto err_chan_irq;
} }
pdata->tx_irq_alloc_mask |= (1U << i); pdata->tx_irq_alloc_mask |= (OSI_ENABLE << i);
}
} }
return ret; return ret;
@@ -2836,6 +2948,130 @@ exit:
return ret; return ret;
} }
/**
* @brief ether_set_vm_irq_chan_mask - Set VM DMA channel mask.
*
* Algorithm: Set VM DMA channels specific mask for ISR based on
* number of DMA channels and list of DMA channels.
*
* @param[in] vm_irq_data: VM IRQ data
* @param[in] num_vm_chan: Number of VM DMA channels
* @param[in] vm_chans: Pointer to list of VM DMA channels
*
* @retval None.
*/
static void ether_set_vm_irq_chan_mask(struct ether_vm_irq_data *vm_irq_data,
unsigned int num_vm_chan,
unsigned int *vm_chans)
{
unsigned int i;
unsigned int chan;
for (i = 0; i < num_vm_chan; i++) {
chan = vm_chans[i];
vm_irq_data->chan_mask |= ETHER_VM_IRQ_TX_CHAN_MASK(chan);
vm_irq_data->chan_mask |= ETHER_VM_IRQ_RX_CHAN_MASK(chan);
}
}
/**
* @brief ether_get_vm_irq_data - Get VM IRQ data from DT.
*
* Algorimthm: Parse DT for VM IRQ data and get VM IRQ numbers
* from DT.
*
* @param[in] pdev: Platform device instance.
* @param[in] pdata: OSD private data.
*
* @retval 0 on success
* @retval "negative value" on failure
*/
static int ether_get_vm_irq_data(struct platform_device *pdev,
struct ether_priv_data *pdata)
{
struct osi_dma_priv_data *osi_dma = pdata->osi_dma;
struct device_node *vm_node, *temp;
unsigned int i, j, node = 0;
int ret = 0;
vm_node = of_parse_phandle(pdev->dev.of_node,
"nvidia,vm-irq-config", 0);
if (vm_node == NULL) {
dev_err(pdata->dev, "failed to found VM IRQ configuration\n");
return -ENOMEM;
}
/* parse the number of VM IRQ's */
ret = of_property_read_u32(vm_node, "nvidia,num-vm-irqs",
&osi_dma->num_vm_irqs);
if (ret != 0) {
dev_err(&pdev->dev, "failed to get number of VM IRQ's (%d)\n",
ret);
dev_info(&pdev->dev, "Using num_vm_irqs as one\n");
osi_dma->num_vm_irqs = 1;
}
if (osi_dma->num_vm_irqs > OSI_MAX_VM_IRQS) {
dev_err(&pdev->dev, "Invalid Num. of VM IRQS\n");
return -EINVAL;
}
pdata->vm_irq_data = devm_kzalloc(pdata->dev,
sizeof(struct ether_vm_irq_data) *
osi_dma->num_vm_irqs,
GFP_KERNEL);
if (pdata->vm_irq_data == NULL) {
dev_err(&pdev->dev, "failed to allocate VM IRQ data\n");
return -ENOMEM;
}
ret = of_get_child_count(vm_node);
if (ret != osi_dma->num_vm_irqs) {
dev_err(&pdev->dev,
"Mismatch in num_vm_irqs and VM IRQ config DT nodes\n");
return -EINVAL;
}
for_each_child_of_node(vm_node, temp) {
if (node == osi_dma->num_vm_irqs)
break;
ret = of_property_read_u32(temp, "nvidia,num-vm-channels",
&osi_dma->irq_data[node].num_vm_chans);
if (ret != 0) {
dev_err(&pdev->dev,
"failed to read number of VM channels\n");
return ret;
}
ret = of_property_read_u32_array(temp, "nvidia,vm-channels",
osi_dma->irq_data[node].vm_chans,
osi_dma->irq_data[node].num_vm_chans);
if (ret != 0) {
dev_err(&pdev->dev, "failed to get VM channels\n");
return ret;
}
ether_set_vm_irq_chan_mask(&pdata->vm_irq_data[node],
osi_dma->irq_data[node].num_vm_chans,
osi_dma->irq_data[node].vm_chans);
pdata->vm_irq_data[node].pdata = pdata;
node++;
}
for (i = 0, j = 1; i < osi_dma->num_vm_irqs; i++, j++) {
pdata->vm_irqs[i] = platform_get_irq(pdev, j);
if (pdata->vm_irqs[i] < 0) {
dev_err(&pdev->dev, "failed to get VM IRQ number\n");
return pdata->vm_irqs[i];
}
}
return ret;
}
/** /**
* @brief Read IRQ numbers from DT. * @brief Read IRQ numbers from DT.
* *
@@ -2852,7 +3088,9 @@ static int ether_get_irqs(struct platform_device *pdev,
struct ether_priv_data *pdata, struct ether_priv_data *pdata,
unsigned int num_chans) unsigned int num_chans)
{ {
struct osi_core_priv_data *osi_core = pdata->osi_core;
unsigned int i, j; unsigned int i, j;
int ret = -1;
/* get common IRQ*/ /* get common IRQ*/
pdata->common_irq = platform_get_irq(pdev, 0); pdata->common_irq = platform_get_irq(pdev, 0);
@@ -2860,9 +3098,14 @@ static int ether_get_irqs(struct platform_device *pdev,
dev_err(&pdev->dev, "failed to get common IRQ number\n"); dev_err(&pdev->dev, "failed to get common IRQ number\n");
return pdata->common_irq; return pdata->common_irq;
} }
if (osi_core->mac_ver > OSI_EQOS_MAC_5_00) {
ret = ether_get_vm_irq_data(pdev, pdata);
if (ret < 0) {
dev_err(pdata->dev, "failed to get VM IRQ info\n");
return ret;
}
} else {
/* get TX IRQ numbers */ /* get TX IRQ numbers */
/* TODO: Need to get VM based IRQ numbers based on MAC version */
for (i = 0, j = 1; i < num_chans; i++) { for (i = 0, j = 1; i < num_chans; i++) {
pdata->tx_irqs[i] = platform_get_irq(pdev, j++); pdata->tx_irqs[i] = platform_get_irq(pdev, j++);
if (pdata->tx_irqs[i] < 0) { if (pdata->tx_irqs[i] < 0) {
@@ -2878,6 +3121,7 @@ static int ether_get_irqs(struct platform_device *pdev,
return pdata->rx_irqs[i]; return pdata->rx_irqs[i];
} }
} }
}
return 0; return 0;
} }

View File

@@ -193,6 +193,10 @@ static inline int ether_avail_txdesc_cnt(struct osi_tx_ring *tx_ring)
* 36 second for 1 G interface and 3.6 sec for 10 G interface. * 36 second for 1 G interface and 3.6 sec for 10 G interface.
*/ */
#define ETHER_STATS_TIMER 3U #define ETHER_STATS_TIMER 3U
#define ETHER_VM_IRQ_TX_CHAN_MASK(x) BIT((x) * 2U)
#define ETHER_VM_IRQ_RX_CHAN_MASK(x) BIT(((x) * 2U) + 1U)
/** /**
* @brief DMA Transmit Channel NAPI * @brief DMA Transmit Channel NAPI
*/ */
@@ -221,6 +225,16 @@ struct ether_rx_napi {
struct napi_struct napi; struct napi_struct napi;
}; };
/**
* @brief VM Based IRQ data
*/
struct ether_vm_irq_data {
/** List of DMA Tx/Rx channel mask */
unsigned int chan_mask;
/** OSD private data */
struct ether_priv_data *pdata;
};
/** /**
* @brief Ethernet driver private data * @brief Ethernet driver private data
*/ */
@@ -284,6 +298,8 @@ struct ether_priv_data {
int tx_irqs[ETHER_MAX_IRQS]; int tx_irqs[ETHER_MAX_IRQS];
/** Array of DMA Receive channel IRQ numbers */ /** Array of DMA Receive channel IRQ numbers */
int rx_irqs[ETHER_MAX_IRQS]; int rx_irqs[ETHER_MAX_IRQS];
/** Array of VM IRQ numbers */
int vm_irqs[OSI_MAX_VM_IRQS];
/** memory allocation mask */ /** memory allocation mask */
unsigned long long dma_mask; unsigned long long dma_mask;
/** Current state of features enabled in HW*/ /** Current state of features enabled in HW*/
@@ -349,6 +365,8 @@ struct ether_priv_data {
struct work_struct ivc_work; struct work_struct ivc_work;
/** Flag which decides stats is enabled(1) or disabled(0) */ /** Flag which decides stats is enabled(1) or disabled(0) */
unsigned int use_stats; unsigned int use_stats;
/** VM channel info data associated with VM IRQ */
struct ether_vm_irq_data *vm_irq_data;
}; };
/** /**