diff --git a/drivers/net/ethernet/nvidia/nvethernet/Makefile b/drivers/net/ethernet/nvidia/nvethernet/Makefile index cee61472..3b20ec7d 100644 --- a/drivers/net/ethernet/nvidia/nvethernet/Makefile +++ b/drivers/net/ethernet/nvidia/nvethernet/Makefile @@ -18,7 +18,7 @@ OSI_DMA := nvethernetrm/osi/dma obj-$(CONFIG_NVETHERNET) += nvethernet.o -ccflags-y += -DTHERMAL_CAL -I$(srctree.nvidia)/drivers/net/ethernet/nvidia/nvethernet/nvethernetrm/include \ +ccflags-y += -DTHERMAL_CAL -DLINUX_IVC -I$(srctree.nvidia)/drivers/net/ethernet/nvidia/nvethernet/nvethernetrm/include \ -I$(srctree.nvidia)/drivers/net/ethernet/nvidia/nvethernet/nvethernetrm/osi/common/include nvethernet-objs:= ether_linux.o \ @@ -33,6 +33,7 @@ nvethernet-objs:= ether_linux.o \ $(OSI_DMA)/osi_dma.o \ $(OSI_DMA)/osi_dma_txrx.o \ $(OSI_CORE)/eqos_core.o \ + $(OSI_CORE)/ivc_core.o \ $(OSI_CORE)/eqos_mmc.o \ $(OSI_DMA)/eqos_dma.o diff --git a/drivers/net/ethernet/nvidia/nvethernet/ether_linux.c b/drivers/net/ethernet/nvidia/nvethernet/ether_linux.c index fd054141..15713801 100644 --- a/drivers/net/ethernet/nvidia/nvethernet/ether_linux.c +++ b/drivers/net/ethernet/nvidia/nvethernet/ether_linux.c @@ -647,13 +647,6 @@ static void ether_free_irqs(struct ether_priv_data *pdata) devm_free_irq(pdata->dev, pdata->common_irq, pdata); pdata->common_irq_alloc_mask = 0U; } -#if (KERNEL_VERSION(5, 4, 0) > LINUX_VERSION_CODE) - if (pdata->ivck != NULL) { - cancel_work_sync(&pdata->ivc_work); - tegra_hv_ivc_unreserve(pdata->ivck); - devm_free_irq(pdata->dev, pdata->ivck->irq, pdata); - } -#endif if (pdata->osi_core->mac_ver > OSI_EQOS_MAC_5_00) { for (i = 0; i < pdata->osi_dma->num_vm_irqs; i++) { @@ -682,7 +675,7 @@ static void ether_free_irqs(struct ether_priv_data *pdata) } } -#if (KERNEL_VERSION(5, 4, 0) > LINUX_VERSION_CODE) +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(5, 9, 0)) /** * @brief IVC ISR Routine * @@ -701,52 +694,60 @@ static void ether_free_irqs(struct ether_priv_data *pdata) */ static irqreturn_t ether_ivc_irq(int irq, void *data) { - struct ether_priv_data *pdata = data; - int ret; + struct ether_priv_data *pdata = (struct ether_priv_data *)data; + struct ether_ivc_ctxt *ictxt = &pdata->ictxt; - if (tegra_hv_ivc_channel_notified(pdata->ivck) != 0) { - dev_err(pdata->dev, "ivc channel not usable\n"); - return IRQ_HANDLED; - } + complete(&ictxt->msg_complete); - if (tegra_hv_ivc_can_read(pdata->ivck)) { - dev_info(pdata->dev, "ivc read done\n"); - /* Read the current message for the ethernet server to be - * able to send further messages on next interrupt - */ - ret = tegra_hv_ivc_read(pdata->ivck, pdata->ivc_rx, - ETHER_MAX_IVC_BUF); - if (ret < 0) { - dev_err(pdata->dev, "IVC read failed: %d\n", ret); - } else { - /* Schedule work to execute the common IRQ Function - * which takes the appropriate action. - */ - schedule_work(&pdata->ivc_work); - } - } else { - dev_info(pdata->dev, "Can not read ivc channel: %d\n", - pdata->ivck->irq); - } return IRQ_HANDLED; } /** - * @brief IVC work - * - * Algorithm: Invoke OSI layer to handle common interrupt. - * - * @param[in] work: work structure. + * @brief Start IVC, initializes IVC. * + * @param[in]: Priv data. * * @retval void */ -static void ether_ivc_work(struct work_struct *work) +static void ether_start_ivc(struct ether_priv_data *pdata) { - struct ether_priv_data *pdata = - container_of(work, struct ether_priv_data, ivc_work); - osi_common_isr(pdata->osi_core); + int ret; + struct ether_ivc_ctxt *ictxt = &pdata->ictxt; + if (ictxt->ivck != NULL && !ictxt->ivc_state) { + tegra_hv_ivc_channel_reset(ictxt->ivck); + + ret = devm_request_irq(pdata->dev, ictxt->ivck->irq, + ether_ivc_irq, + 0, dev_name(pdata->dev), pdata); + if (ret) { + dev_err(pdata->dev, + "Unable to request irq(%d)\n", ictxt->ivck->irq); + tegra_hv_ivc_unreserve(ictxt->ivck); + return; + } + ictxt->ivc_state = 1; + // initialize + spin_lock_init(&ictxt->ivck_lock); + } +} + +/** + * @brief Stop IVC, de initializes IVC + * + * @param[in]: Priv data. + * + * @retval void + */ + +static void ether_stop_ivc(struct ether_priv_data *pdata) +{ + struct ether_ivc_ctxt *ictxt = &pdata->ictxt; + if (ictxt->ivck != NULL) { + tegra_hv_ivc_unreserve(ictxt->ivck); + devm_free_irq(pdata->dev, ictxt->ivck->irq, pdata); + ictxt->ivc_state = 0; + } } /** @@ -763,6 +764,8 @@ static void ether_ivc_work(struct work_struct *work) */ static int ether_init_ivc(struct ether_priv_data *pdata) { + struct osi_core_priv_data *osi_core = pdata->osi_core; + struct ether_ivc_ctxt *ictxt = &pdata->ictxt; struct device *dev = pdata->dev; struct device_node *np, *hv_np; uint32_t id; @@ -770,6 +773,7 @@ static int ether_init_ivc(struct ether_priv_data *pdata) np = dev->of_node; if (!np) { + ictxt->ivck = NULL; return -EINVAL; } @@ -785,29 +789,22 @@ static int ether_init_ivc(struct ether_priv_data *pdata) return -EINVAL; } - pdata->ivck = tegra_hv_ivc_reserve(hv_np, id, NULL); + ictxt->ivck = tegra_hv_ivc_reserve(hv_np, id, NULL); of_node_put(hv_np); - if (IS_ERR_OR_NULL(pdata->ivck)) { + if (IS_ERR_OR_NULL(ictxt->ivck)) { dev_err(dev, "Failed to reserve ivc channel:%u\n", id); - ret = PTR_ERR(pdata->ivck); - pdata->ivck = NULL; + ret = PTR_ERR(ictxt->ivck); + ictxt->ivck = NULL; return ret; } dev_info(dev, "Reserved IVC channel #%u - frame_size=%d irq %d\n", - id, pdata->ivck->frame_size, pdata->ivck->irq); - - tegra_hv_ivc_channel_reset(pdata->ivck); - - INIT_WORK(&pdata->ivc_work, ether_ivc_work); - - ret = devm_request_irq(dev, pdata->ivck->irq, ether_ivc_irq, - 0, dev_name(dev), pdata); - if (ret) { - dev_err(dev, "Unable to request irq(%d)\n", pdata->ivck->irq); - tegra_hv_ivc_unreserve(pdata->ivck); - return ret; - } + id, ictxt->ivck->frame_size, ictxt->ivck->irq); + osi_core->osd_ops.ivc_send = osd_ivc_send_cmd; + init_completion(&ictxt->msg_complete); +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(5, 9, 0)) + ether_start_ivc(pdata); +#endif return 0; } #endif @@ -1628,6 +1625,10 @@ static int ether_open(struct net_device *dev) gpio_set_value(pdata->phy_reset, 1); } +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(5, 9, 0)) + ether_start_ivc(pdata); +#endif + ret = ether_enable_clks(pdata); if (ret < 0) { dev_err(&dev->dev, "failed to enable clks\n"); @@ -1869,6 +1870,10 @@ static int ether_close(struct net_device *ndev) /* MAC deinit which inturn stop MAC Tx,Rx */ osi_hw_core_deinit(pdata->osi_core); +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(5, 9, 0)) + ether_stop_ivc(pdata); +#endif + /* Assert MAC RST gpio */ if (!pdata->osi_core->pre_si && pdata->mac_rst) { reset_control_assert(pdata->mac_rst); @@ -2317,7 +2322,7 @@ static int ether_prepare_mc_list(struct net_device *dev, i, ha->addr[0], ha->addr[1], ha->addr[2], ha->addr[3], ha->addr[4], ha->addr[5]); filter->index = i; - filter->mac_address = ha->addr; + memcpy(filter->mac_address, ha->addr, ETH_ALEN); filter->dma_routing = OSI_DISABLE; filter->dma_chan = 0x0; filter->addr_mask = OSI_AMASK_DISABLE; @@ -2405,7 +2410,7 @@ static int ether_prepare_uc_list(struct net_device *dev, i, ha->addr[0], ha->addr[1], ha->addr[2], ha->addr[3], ha->addr[4], ha->addr[5]); filter->index = i; - filter->mac_address = ha->addr; + memcpy(filter->mac_address, ha->addr, ETH_ALEN); filter->dma_routing = OSI_DISABLE; filter->dma_chan = 0x0; filter->addr_mask = OSI_AMASK_DISABLE; @@ -2498,7 +2503,6 @@ static void ether_set_rx_mode(struct net_device *dev) for (i = pdata->last_filter_index + 1; i <= last_index; i++) { filter.oper_mode = OSI_OPER_ADDR_UPDATE; filter.index = i; - filter.mac_address = NULL; filter.dma_routing = OSI_DISABLE; filter.dma_chan = OSI_CHAN_ANY; filter.addr_mask = OSI_AMASK_DISABLE; @@ -3850,6 +3854,7 @@ static int ether_parse_dt(struct ether_priv_data *pdata) /* Allow to set non zero DMA channel for virtualization */ if (!ether_init_ivc(pdata)) { osi_dma->use_virtualization = OSI_ENABLE; + osi_core->use_virtualization = OSI_ENABLE; /* read mac management flag and set use_stats */ of_property_read_u32(np, "nvidia,mmc_daemon", &pdata->use_stats); @@ -4289,6 +4294,13 @@ static int ether_probe(struct platform_device *pdev) tegra_pre_si_platform(osi_core); + /* Parse the ethernet DT node */ + ret = ether_parse_dt(pdata); + if (ret < 0) { + dev_err(&pdev->dev, "failed to parse DT\n"); + goto err_parse_dt; + } + /* Initialize core and DMA ops based on MAC type */ if (osi_init_core_ops(osi_core) != 0) { dev_err(&pdev->dev, "failed to get osi_init_core_ops\n"); @@ -4303,13 +4315,6 @@ static int ether_probe(struct platform_device *pdev) goto err_dma_ops; } - /* Parse the ethernet DT node */ - ret = ether_parse_dt(pdata); - if (ret < 0) { - dev_err(&pdev->dev, "failed to parse DT\n"); - goto err_parse_dt; - } - ndev->max_mtu = pdata->max_platform_mtu; /* get base address, clks, reset ID's and MAC address*/ @@ -4319,18 +4324,18 @@ static int ether_probe(struct platform_device *pdev) goto err_init_res; } - osi_get_hw_features(osi_core->base, &pdata->hw_feat); - - /* Set netdev features based on hw features */ - ether_set_ndev_features(ndev, pdata); - - ret = osi_get_mac_version(osi_core->base, &osi_core->mac_ver); + ret = osi_get_mac_version(osi_core, &osi_core->mac_ver); if (ret < 0) { dev_err(&pdev->dev, "failed to get MAC version (%u)\n", osi_core->mac_ver); goto err_dma_mask; } + osi_get_hw_features(osi_core, &pdata->hw_feat); + + /* Set netdev features based on hw features */ + ether_set_ndev_features(ndev, pdata); + ret = ether_get_irqs(pdev, pdata, num_dma_chans); if (ret < 0) { dev_err(&pdev->dev, "failed to get IRQ's\n"); diff --git a/drivers/net/ethernet/nvidia/nvethernet/ether_linux.h b/drivers/net/ethernet/nvidia/nvethernet/ether_linux.h index 79784a99..448530a8 100644 --- a/drivers/net/ethernet/nvidia/nvethernet/ether_linux.h +++ b/drivers/net/ethernet/nvidia/nvethernet/ether_linux.h @@ -128,13 +128,6 @@ */ #define ETHER_TX_MAX_FRAME_SIZE GSO_MAX_SIZE -#if (KERNEL_VERSION(5, 4, 0) > LINUX_VERSION_CODE) -/** - * @brief Ethernet Maximum IVC BUF - */ -#define ETHER_MAX_IVC_BUF 128 -#endif - /** * @brief Check if Tx data buffer length is within bounds. * @@ -241,6 +234,22 @@ struct ether_vm_irq_data { struct ether_priv_data *pdata; }; +/** + * @brief Ethernet IVC context + */ +struct ether_ivc_ctxt { + /** ivc cookie */ + struct tegra_hv_ivc_cookie *ivck; + /** ivc lock */ + spinlock_t ivck_lock; + /** ivc work */ + struct work_struct ivc_work; + /** wait for event */ + struct completion msg_complete; + /** Flag to indicate ivc started or stopped */ + unsigned int ivc_state; +}; + /** * @brief Ethernet driver private data */ @@ -363,16 +372,10 @@ struct ether_priv_data { unsigned int tx_lpi_enabled; /** Time (usec) MAC waits to enter LPI after Tx complete */ unsigned int tx_lpi_timer; -#if (KERNEL_VERSION(5, 4, 0) > LINUX_VERSION_CODE) - /** ivc cookie */ - struct tegra_hv_ivc_cookie *ivck; - /** Buffer to receive pad ivc message */ - char ivc_rx[ETHER_MAX_IVC_BUF]; - /** ivc work */ - struct work_struct ivc_work; /** Flag which decides stats is enabled(1) or disabled(0) */ unsigned int use_stats; -#endif + /** ivc context */ + struct ether_ivc_ctxt ictxt; /** VM channel info data associated with VM IRQ */ struct ether_vm_irq_data *vm_irq_data; #ifdef CONFIG_DEBUG_FS @@ -491,4 +494,18 @@ static inline int ether_selftest_get_count(struct ether_priv_data *pdata) * */ void osd_realloc_buf(void *priv, void *rxring, unsigned int chan); + +/** + * @brief osd_send_cmd - OSD ivc send cmd + * + * @param[in] priv: OSD private data + * @param[in] func: data + * @param[in] len: length of the data + * @note + * API Group: + * - Initialization: Yes + * - Run time: Yes + * - De-initialization: Yes + */ +int osd_ivc_send_cmd(void *priv, void *data, unsigned int len); #endif /* ETHER_LINUX_H */ diff --git a/drivers/net/ethernet/nvidia/nvethernet/osd.c b/drivers/net/ethernet/nvidia/nvethernet/osd.c index da8f4e1f..1d8f3cf2 100644 --- a/drivers/net/ethernet/nvidia/nvethernet/osd.c +++ b/drivers/net/ethernet/nvidia/nvethernet/osd.c @@ -16,6 +16,9 @@ #include "ether_linux.h" #include +#include + +#define IVC_WAIT_TIMEOUT (msecs_to_jiffies(1000)) /** * @brief Adds delay in micro seconds. @@ -398,3 +401,76 @@ void osd_transmit_complete(void *priv, void *buffer, unsigned long dmaaddr, dev_consume_skb_any(skb); } } + +/** + * @brief osd_send_cmd - OSD ivc send cmd + * + * @param[in] priv: OSD private data + * @param[in] func: data + * @param[in] line: len + * @note + * API Group: + * - Initialization: Yes + * - Run time: Yes + * - De-initialization: Yes + */ +int osd_ivc_send_cmd(void *priv, void *data, unsigned int len) +{ + int ret = -1; +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(5, 9, 0)) + unsigned long flags = 0; + static int cnt = 0; + struct osi_core_priv_data *core = (struct osi_core_priv_data *)priv; + ivc_msg_common *ivc_buf = (ivc_msg_common *) data; + struct ether_priv_data *pdata = (struct ether_priv_data *)core->osd; + struct ether_ivc_ctxt *ictxt = &pdata->ictxt; + struct tegra_hv_ivc_cookie *ivck = + (struct tegra_hv_ivc_cookie *) ictxt->ivck; + int dcnt = 50; + int is_atomic = 0; + if (len > ETHER_MAX_IVC_BUF) { + dev_err(pdata->dev, "Invalid IVC len\n"); + return -1; + } + + ivc_buf->status = -1; + spin_lock_irqsave(&ictxt->ivck_lock, flags); + if (in_atomic()) { + preempt_enable(); + is_atomic = 1; + } + ivc_buf->count = cnt++; + /* Waiting for the channel to be ready */ + while (tegra_hv_ivc_channel_notified(ivck) != 0){ + osd_msleep(1); + dcnt--; + if (!dcnt) { + pr_err("IVC recv timeout\n"); + goto fail; + } + } + + /* Write the current message for the ethernet server */ + ret = tegra_hv_ivc_write(ivck, ivc_buf, len); + if (ret != len) { + dev_err(pdata->dev, "IVC write len %d ret %d cmd %d failed\n", + len, ret, ivc_buf->cmd); + goto fail; + } + while (!tegra_hv_ivc_can_read(ictxt->ivck)) { + wait_for_completion_timeout(&ictxt->msg_complete, IVC_WAIT_TIMEOUT); + } + + ret = tegra_hv_ivc_read(ivck, ivc_buf, len); + if (ret < 0) { + dev_err(pdata->dev, "IVC read failed: %d\n", ret); + } + if (is_atomic) { + preempt_disable(); + } + ret = ivc_buf->status; +fail: + spin_unlock_irqrestore(&ictxt->ivck_lock, flags); +#endif + return ret; +}