From eec9650f5859450f7f4810298fd908650046df6f Mon Sep 17 00:00:00 2001 From: Suresh Venkatachalam Date: Wed, 18 Jan 2023 02:01:37 +0000 Subject: [PATCH] nvidia: remove mempool support for UFS devices Using this patch we are removing the support of mempool for UFS devices for which pass through commands are not supported. Bug 3870621 Bug 3941252 Change-Id: Ifd1be5c4ebeb6f670f0b29f6f19e5ce458446ee7 Signed-off-by: Manish Bhardwaj Signed-off-by: Suresh Venkatachalam Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/2845274 GVS: Gerrit_Virtual_Submit Reviewed-by: Bitan Biswas Reviewed-by: Sanjith T D Reviewed-by: Sandeep Trasi Reviewed-by: svcacv --- .../block/tegra_virt_storage/tegra_hv_vblk.c | 76 ++++++++++--------- 1 file changed, 42 insertions(+), 34 deletions(-) diff --git a/drivers/block/tegra_virt_storage/tegra_hv_vblk.c b/drivers/block/tegra_virt_storage/tegra_hv_vblk.c index 69f7740d..7fc82fbb 100644 --- a/drivers/block/tegra_virt_storage/tegra_hv_vblk.c +++ b/drivers/block/tegra_virt_storage/tegra_hv_vblk.c @@ -35,6 +35,8 @@ #include #include "tegra_vblk.h" +#define UFS_IOCTL_MAX_SIZE_SUPPORTED 0x80000 + static int vblk_major; /** @@ -791,6 +793,7 @@ static void setup_device(struct vblk_dev *vblkdev) uint32_t max_ioctl_requests; struct vsc_request *req; int ret; + struct tegra_hv_ivm_cookie *ivmk; vblkdev->size = vblkdev->config.blk_config.num_blks * @@ -847,18 +850,52 @@ static void setup_device(struct vblk_dev *vblkdev) return; } + /* reserve mempool for eMMC device and for ufs device + * with pass through support + */ + if ((vblkdev->config.blk_config.use_vm_address == 1U + && vblkdev->config.blk_config.req_ops_supported & VS_BLK_IOCTL_OP_F) + || vblkdev->config.blk_config.use_vm_address == 0U) { + if (of_property_read_u32_index(vblkdev->device->of_node, "mempool", 0, + &(vblkdev->ivm_id))) { + dev_err(vblkdev->device, "Failed to read mempool property\n"); + return; + } + + ivmk = tegra_hv_mempool_reserve(vblkdev->ivm_id); + if (IS_ERR_OR_NULL(ivmk)) { + dev_err(vblkdev->device, "Failed to reserve IVM channel %d\n", + vblkdev->ivm_id); + ivmk = NULL; + return; + } + vblkdev->ivmk = ivmk; + + vblkdev->shared_buffer = devm_memremap(vblkdev->device, + ivmk->ipa, ivmk->size, MEMREMAP_WB); + if (IS_ERR_OR_NULL(vblkdev->shared_buffer)) { + dev_err(vblkdev->device, "Failed to map mempool area %d\n", + vblkdev->ivm_id); + tegra_hv_mempool_unreserve(vblkdev->ivmk); + return; + } + } + /* If IOVA feature is enabled for virt partition, then set max_requests * to number of IVC frames. Since IOCTL's still use mempool, set * max_ioctl_requests based on mempool. */ if (vblkdev->config.blk_config.use_vm_address == 1U) { max_requests = vblkdev->ivck->nframes; - max_ioctl_requests = ((vblkdev->ivmk->size) / max_io_bytes); - if (max_ioctl_requests > MAX_VSC_REQS) - max_ioctl_requests = MAX_VSC_REQS; + /* set max_ioctl_requests if pass through is supported */ + if (vblkdev->config.blk_config.req_ops_supported & VS_BLK_IOCTL_OP_F) { + max_ioctl_requests = ((vblkdev->ivmk->size) / UFS_IOCTL_MAX_SIZE_SUPPORTED); + if (max_ioctl_requests > MAX_VSC_REQS) + max_ioctl_requests = MAX_VSC_REQS; + } } else { max_requests = ((vblkdev->ivmk->size) / max_io_bytes); - max_ioctl_requests = max_requests; + max_ioctl_requests = max_requests; } if (max_requests < MAX_VSC_REQS) { @@ -1077,7 +1114,6 @@ static int tegra_hv_vblk_probe(struct platform_device *pdev) struct vblk_dev *vblkdev; struct device *dev = &pdev->dev; int ret; - struct tegra_hv_ivm_cookie *ivmk; if (!is_tegra_hypervisor_mode()) { dev_err(dev, "Hypervisor is not present\n"); @@ -1115,12 +1151,6 @@ static int tegra_hv_vblk_probe(struct platform_device *pdev) ret = -ENODEV; goto fail; } - if (of_property_read_u32_index(vblk_node, "mempool", 0, - &(vblkdev->ivm_id))) { - dev_err(dev, "Failed to read mempool property\n"); - ret = -ENODEV; - goto fail; - } } vblkdev->ivck = tegra_hv_ivc_reserve(NULL, vblkdev->ivc_id, NULL); @@ -1132,25 +1162,6 @@ static int tegra_hv_vblk_probe(struct platform_device *pdev) goto fail; } - ivmk = tegra_hv_mempool_reserve(vblkdev->ivm_id); - if (IS_ERR_OR_NULL(ivmk)) { - dev_err(dev, "Failed to reserve IVM channel %d\n", - vblkdev->ivm_id); - ivmk = NULL; - ret = -ENODEV; - goto free_ivc; - } - vblkdev->ivmk = ivmk; - - vblkdev->shared_buffer = devm_memremap(vblkdev->device, - ivmk->ipa, ivmk->size, MEMREMAP_WB); - if (IS_ERR_OR_NULL(vblkdev->shared_buffer)) { - dev_err(dev, "Failed to map mempool area %d\n", - vblkdev->ivm_id); - ret = -ENOMEM; - goto free_mempool; - } - vblkdev->initialized = false; vblkdev->wq = alloc_workqueue("vblk_req_wq%d", @@ -1159,7 +1170,7 @@ static int tegra_hv_vblk_probe(struct platform_device *pdev) if (vblkdev->wq == NULL) { dev_err(dev, "Failed to allocate workqueue\n"); ret = -ENOMEM; - goto free_mempool; + goto free_ivc; } init_completion(&vblkdev->req_queue_empty); @@ -1189,9 +1200,6 @@ static int tegra_hv_vblk_probe(struct platform_device *pdev) free_wq: destroy_workqueue(vblkdev->wq); -free_mempool: - tegra_hv_mempool_unreserve(vblkdev->ivmk); - free_ivc: tegra_hv_ivc_unreserve(vblkdev->ivck);