nvidia: remove mempool support for UFS devices

Using this patch we are removing the support of
mempool for UFS devices for which pass through
commands are not supported.

Bug 3870621
Bug 3941252

Change-Id: Ifd1be5c4ebeb6f670f0b29f6f19e5ce458446ee7
Signed-off-by: Manish Bhardwaj <mbhardwaj@nvidia.com>
Signed-off-by: Suresh Venkatachalam <skathirampat@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/2845274
GVS: Gerrit_Virtual_Submit <buildbot_gerritrpt@nvidia.com>
Reviewed-by: Bitan Biswas <bbiswas@nvidia.com>
Reviewed-by: Sanjith T D <std@nvidia.com>
Reviewed-by: Sandeep Trasi <strasi@nvidia.com>
Reviewed-by: svcacv <svcacv@nvidia.com>
This commit is contained in:
Suresh Venkatachalam
2023-01-18 02:01:37 +00:00
committed by mobile promotions
parent 04ba246081
commit eec9650f58

View File

@@ -35,6 +35,8 @@
#include <linux/version.h> #include <linux/version.h>
#include "tegra_vblk.h" #include "tegra_vblk.h"
#define UFS_IOCTL_MAX_SIZE_SUPPORTED 0x80000
static int vblk_major; static int vblk_major;
/** /**
@@ -791,6 +793,7 @@ static void setup_device(struct vblk_dev *vblkdev)
uint32_t max_ioctl_requests; uint32_t max_ioctl_requests;
struct vsc_request *req; struct vsc_request *req;
int ret; int ret;
struct tegra_hv_ivm_cookie *ivmk;
vblkdev->size = vblkdev->size =
vblkdev->config.blk_config.num_blks * vblkdev->config.blk_config.num_blks *
@@ -847,15 +850,49 @@ static void setup_device(struct vblk_dev *vblkdev)
return; return;
} }
/* reserve mempool for eMMC device and for ufs device
* with pass through support
*/
if ((vblkdev->config.blk_config.use_vm_address == 1U
&& vblkdev->config.blk_config.req_ops_supported & VS_BLK_IOCTL_OP_F)
|| vblkdev->config.blk_config.use_vm_address == 0U) {
if (of_property_read_u32_index(vblkdev->device->of_node, "mempool", 0,
&(vblkdev->ivm_id))) {
dev_err(vblkdev->device, "Failed to read mempool property\n");
return;
}
ivmk = tegra_hv_mempool_reserve(vblkdev->ivm_id);
if (IS_ERR_OR_NULL(ivmk)) {
dev_err(vblkdev->device, "Failed to reserve IVM channel %d\n",
vblkdev->ivm_id);
ivmk = NULL;
return;
}
vblkdev->ivmk = ivmk;
vblkdev->shared_buffer = devm_memremap(vblkdev->device,
ivmk->ipa, ivmk->size, MEMREMAP_WB);
if (IS_ERR_OR_NULL(vblkdev->shared_buffer)) {
dev_err(vblkdev->device, "Failed to map mempool area %d\n",
vblkdev->ivm_id);
tegra_hv_mempool_unreserve(vblkdev->ivmk);
return;
}
}
/* If IOVA feature is enabled for virt partition, then set max_requests /* If IOVA feature is enabled for virt partition, then set max_requests
* to number of IVC frames. Since IOCTL's still use mempool, set * to number of IVC frames. Since IOCTL's still use mempool, set
* max_ioctl_requests based on mempool. * max_ioctl_requests based on mempool.
*/ */
if (vblkdev->config.blk_config.use_vm_address == 1U) { if (vblkdev->config.blk_config.use_vm_address == 1U) {
max_requests = vblkdev->ivck->nframes; max_requests = vblkdev->ivck->nframes;
max_ioctl_requests = ((vblkdev->ivmk->size) / max_io_bytes); /* set max_ioctl_requests if pass through is supported */
if (vblkdev->config.blk_config.req_ops_supported & VS_BLK_IOCTL_OP_F) {
max_ioctl_requests = ((vblkdev->ivmk->size) / UFS_IOCTL_MAX_SIZE_SUPPORTED);
if (max_ioctl_requests > MAX_VSC_REQS) if (max_ioctl_requests > MAX_VSC_REQS)
max_ioctl_requests = MAX_VSC_REQS; max_ioctl_requests = MAX_VSC_REQS;
}
} else { } else {
max_requests = ((vblkdev->ivmk->size) / max_io_bytes); max_requests = ((vblkdev->ivmk->size) / max_io_bytes);
max_ioctl_requests = max_requests; max_ioctl_requests = max_requests;
@@ -1077,7 +1114,6 @@ static int tegra_hv_vblk_probe(struct platform_device *pdev)
struct vblk_dev *vblkdev; struct vblk_dev *vblkdev;
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
int ret; int ret;
struct tegra_hv_ivm_cookie *ivmk;
if (!is_tegra_hypervisor_mode()) { if (!is_tegra_hypervisor_mode()) {
dev_err(dev, "Hypervisor is not present\n"); dev_err(dev, "Hypervisor is not present\n");
@@ -1115,12 +1151,6 @@ static int tegra_hv_vblk_probe(struct platform_device *pdev)
ret = -ENODEV; ret = -ENODEV;
goto fail; goto fail;
} }
if (of_property_read_u32_index(vblk_node, "mempool", 0,
&(vblkdev->ivm_id))) {
dev_err(dev, "Failed to read mempool property\n");
ret = -ENODEV;
goto fail;
}
} }
vblkdev->ivck = tegra_hv_ivc_reserve(NULL, vblkdev->ivc_id, NULL); vblkdev->ivck = tegra_hv_ivc_reserve(NULL, vblkdev->ivc_id, NULL);
@@ -1132,25 +1162,6 @@ static int tegra_hv_vblk_probe(struct platform_device *pdev)
goto fail; goto fail;
} }
ivmk = tegra_hv_mempool_reserve(vblkdev->ivm_id);
if (IS_ERR_OR_NULL(ivmk)) {
dev_err(dev, "Failed to reserve IVM channel %d\n",
vblkdev->ivm_id);
ivmk = NULL;
ret = -ENODEV;
goto free_ivc;
}
vblkdev->ivmk = ivmk;
vblkdev->shared_buffer = devm_memremap(vblkdev->device,
ivmk->ipa, ivmk->size, MEMREMAP_WB);
if (IS_ERR_OR_NULL(vblkdev->shared_buffer)) {
dev_err(dev, "Failed to map mempool area %d\n",
vblkdev->ivm_id);
ret = -ENOMEM;
goto free_mempool;
}
vblkdev->initialized = false; vblkdev->initialized = false;
vblkdev->wq = alloc_workqueue("vblk_req_wq%d", vblkdev->wq = alloc_workqueue("vblk_req_wq%d",
@@ -1159,7 +1170,7 @@ static int tegra_hv_vblk_probe(struct platform_device *pdev)
if (vblkdev->wq == NULL) { if (vblkdev->wq == NULL) {
dev_err(dev, "Failed to allocate workqueue\n"); dev_err(dev, "Failed to allocate workqueue\n");
ret = -ENOMEM; ret = -ENOMEM;
goto free_mempool; goto free_ivc;
} }
init_completion(&vblkdev->req_queue_empty); init_completion(&vblkdev->req_queue_empty);
@@ -1189,9 +1200,6 @@ static int tegra_hv_vblk_probe(struct platform_device *pdev)
free_wq: free_wq:
destroy_workqueue(vblkdev->wq); destroy_workqueue(vblkdev->wq);
free_mempool:
tegra_hv_mempool_unreserve(vblkdev->ivmk);
free_ivc: free_ivc:
tegra_hv_ivc_unreserve(vblkdev->ivck); tegra_hv_ivc_unreserve(vblkdev->ivck);