From cac53247c00df86f065eb88f7d0f078bb03d165b Mon Sep 17 00:00:00 2001 From: Ashish Mhetre Date: Mon, 20 Dec 2021 10:11:35 +0530 Subject: [PATCH] Revert "nvethernet: remove setting of dma_mask" Set appropriate dma-mask for ethernet device. This reverts commit cc55897988610e4618860f5606d2e5b10a4a521f. Bug 200776516 Bug 200681386 Change-Id: Ic0ce23c84fd043fd47fa21151a4ff0dd53091493 Signed-off-by: Ashish Mhetre Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvidia/+/2644527 Tested-by: mobile promotions Reviewed-by: svc_kernel_abi Reviewed-by: Sachin Nikam Reviewed-by: mobile promotions GVS: Gerrit_Virtual_Submit --- .../ethernet/nvidia/nvethernet/ether_linux.c | 53 +++++++++++++++++++ 1 file changed, 53 insertions(+) diff --git a/drivers/net/ethernet/nvidia/nvethernet/ether_linux.c b/drivers/net/ethernet/nvidia/nvethernet/ether_linux.c index b02cbfe4..2fda5926 100644 --- a/drivers/net/ethernet/nvidia/nvethernet/ether_linux.c +++ b/drivers/net/ethernet/nvidia/nvethernet/ether_linux.c @@ -5954,6 +5954,53 @@ static void ether_get_num_dma_chan_mtl_q(struct platform_device *pdev, } } +/** + * @brief Set DMA address mask. + * + * Algorithm: + * Based on the addressing capability (address bit length) supported in the HW, + * the dma mask is set accordingly. + * + * @param[in] pdata: OS dependent private data structure. + * + * @note MAC_HW_Feature1 register need to read and store the value of ADDR64. + * + * @retval 0 on success + * @retval "negative value" on failure. + */ +static int ether_set_dma_mask(struct ether_priv_data *pdata) +{ + int ret = 0; + + /* Set DMA addressing limitations based on the value read from HW if + * dma_mask is not defined in DT + */ + if (pdata->dma_mask == DMA_MASK_NONE) { + switch (pdata->hw_feat.addr_64) { + case OSI_ADDRESS_32BIT: + pdata->dma_mask = DMA_BIT_MASK(32); + break; + case OSI_ADDRESS_40BIT: + pdata->dma_mask = DMA_BIT_MASK(40); + break; + case OSI_ADDRESS_48BIT: + pdata->dma_mask = DMA_BIT_MASK(48); + break; + default: + pdata->dma_mask = DMA_BIT_MASK(40); + break; + } + } + + ret = dma_set_mask_and_coherent(pdata->dev, pdata->dma_mask); + if (ret < 0) { + dev_err(pdata->dev, "dma_set_mask_and_coherent failed\n"); + return ret; + } + + return ret; +} + /** * @brief Set the network device feature flags * @@ -6249,6 +6296,12 @@ static int ether_probe(struct platform_device *pdev) memcpy(&pdata->hw_feat, &ioctl_data.hw_feat, sizeof(struct osi_hw_features)); + ret = ether_set_dma_mask(pdata); + if (ret < 0) { + dev_err(&pdev->dev, "failed to set dma mask\n"); + goto err_dma_mask; + } + if (pdata->hw_feat.fpe_sel) { ret = ether_parse_residual_queue(pdata, "nvidia,residual-queue", &osi_core->residual_queue);