storage: Update VSC driver for handling erase

1. If vs_blk_dev_config.req_ops_supported has only DISCARD
   support enabled, then REQ_OP_DISCARD should be mapped to
   DISCARD for UFS.

2. If vs_blk_dev_config.req_ops_supported has only SECURE_ERASE
   support enabled, then REQ_OP_DISCARD and REQ_OP_SECURE_ERASE
   both should be mapped to SECURE_ERASE for UFS.

3. If vs_blk_dev_config.req_ops_supported has only ERASE
   support enabled, then REQ_OP_DISCARD and REQ_OP_SECURE_ERASE
   both should be mapped to ERASE for UFS.

Bug 4176555

Signed-off-by: Manish Bhardwaj <mbhardwaj@nvidia.com>
Change-Id: I01599df9ab93525120106dfabf2d345ab8b64770
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/2920433
Reviewed-by: Sanjith T D <std@nvidia.com>
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com>
Reviewed-by: Sandeep Trasi <strasi@nvidia.com>
GVS: Gerrit_Virtual_Submit <buildbot_gerritrpt@nvidia.com>
This commit is contained in:
Manish Bhardwaj
2023-07-10 15:28:54 +05:30
committed by mobile promotions
parent 6fa354a3b2
commit 7e764afb5f
2 changed files with 49 additions and 9 deletions

View File

@@ -38,6 +38,9 @@
#endif
#include "tegra_vblk.h"
#define DISCARD_ERASE_SECERASE_MASK (VS_BLK_DISCARD_OP_F | \
VS_BLK_SECURE_ERASE_OP_F | \
VS_BLK_ERASE_OP_F)
#define UFS_IOCTL_MAX_SIZE_SUPPORTED 0x80000
#define READ_WRITE_OR_IOCTL_OP (req_op(bio_req) == REQ_OP_READ \
|| req_op(bio_req) == REQ_OP_WRITE \
@@ -504,6 +507,23 @@ static bool bio_req_sanity_check(struct vblk_dev *vblkdev,
return true;
}
static enum blk_cmd_op cleanup_op_supported(struct vblk_dev *vblkdev, uint32_t ops_supported)
{
enum blk_cmd_op cleanup_op = VS_UNKNOWN_BLK_CMD;
/* Map discard operation if only secure erase ops is supported by VSC */
if ((ops_supported & DISCARD_ERASE_SECERASE_MASK) == VS_BLK_SECURE_ERASE_OP_F)
cleanup_op = VS_BLK_SECURE_ERASE;
else if ((ops_supported & DISCARD_ERASE_SECERASE_MASK) == VS_BLK_ERASE_OP_F)
cleanup_op = VS_BLK_ERASE;
else if ((ops_supported & DISCARD_ERASE_SECERASE_MASK) == VS_BLK_DISCARD_OP_F)
cleanup_op = VS_BLK_DISCARD;
else
dev_err(vblkdev->device, "Erase/Discard/SecErase neither is supported");
return cleanup_op;
}
/**
* submit_bio_req: Fetch a bio request and submit it to
* server for processing.
@@ -521,6 +541,7 @@ static bool submit_bio_req(struct vblk_dev *vblkdev)
struct req_entry *entry = NULL;
size_t sz;
uint32_t sg_cnt, __data_len;
uint32_t ops_supported = vblkdev->config.blk_config.req_ops_supported;
dma_addr_t sg_dma_addr = 0;
/* Check if ivc queue is full */
@@ -603,9 +624,23 @@ static bool submit_bio_req(struct vblk_dev *vblkdev)
} else if (req_op(bio_req) == REQ_OP_FLUSH) {
vs_req->blkdev_req.req_op = VS_BLK_FLUSH;
} else if (req_op(bio_req) == REQ_OP_DISCARD) {
if (vblkdev->config.phys_dev == VSC_DEV_UFS) {
vs_req->blkdev_req.req_op =
cleanup_op_supported(vblkdev, ops_supported);
if (vs_req->blkdev_req.req_op == VS_UNKNOWN_BLK_CMD)
goto bio_exit;
} else {
vs_req->blkdev_req.req_op = VS_BLK_DISCARD;
}
} else if (req_op(bio_req) == REQ_OP_SECURE_ERASE) {
if (vblkdev->config.phys_dev == VSC_DEV_UFS) {
vs_req->blkdev_req.req_op =
cleanup_op_supported(vblkdev, ops_supported);
if (vs_req->blkdev_req.req_op == VS_UNKNOWN_BLK_CMD)
goto bio_exit;
} else {
vs_req->blkdev_req.req_op = VS_BLK_SECURE_ERASE;
}
} else {
dev_err(vblkdev->device,
"Request direction is not read/write!\n");
@@ -1173,8 +1208,16 @@ static void setup_device(struct vblk_dev *vblkdev)
blk_queue_max_hw_sectors(vblkdev->queue, max_io_bytes / SECTOR_SIZE);
blk_queue_flag_set(QUEUE_FLAG_NONROT, vblkdev->queue);
if (vblkdev->config.blk_config.req_ops_supported
& VS_BLK_DISCARD_OP_F) {
#if KERNEL_VERSION(5, 19, 0) > LINUX_VERSION_CODE
if ((vblkdev->config.blk_config.req_ops_supported & VS_BLK_SECURE_ERASE_OP_F)
|| (vblkdev->config.blk_config.req_ops_supported & VS_BLK_ERASE_OP_F))
blk_queue_flag_set(QUEUE_FLAG_SECERASE, vblkdev->queue);
#endif
if ((vblkdev->config.blk_config.req_ops_supported & VS_BLK_DISCARD_OP_F)
|| (((vblkdev->config.blk_config.req_ops_supported & VS_BLK_SECURE_ERASE_OP_F)
|| (vblkdev->config.blk_config.req_ops_supported & VS_BLK_ERASE_OP_F))
&& vblkdev->config.phys_dev == VSC_DEV_UFS)) {
#if KERNEL_VERSION(5, 19, 0) > LINUX_VERSION_CODE
blk_queue_flag_set(QUEUE_FLAG_DISCARD, vblkdev->queue);
#endif
@@ -1182,11 +1225,6 @@ static void setup_device(struct vblk_dev *vblkdev)
vblkdev->config.blk_config.max_erase_blks_per_io);
vblkdev->queue->limits.discard_granularity =
vblkdev->config.blk_config.hardblk_size;
#if KERNEL_VERSION(5, 19, 0) > LINUX_VERSION_CODE
if (vblkdev->config.blk_config.req_ops_supported &
VS_BLK_SECURE_ERASE_OP_F)
blk_queue_flag_set(QUEUE_FLAG_SECERASE, vblkdev->queue);
#endif
}
/* And the gendisk structure. */

View File

@@ -46,6 +46,7 @@ enum blk_cmd_op {
VS_BLK_DISCARD = 4,
VS_BLK_SECURE_ERASE = 5,
VS_BLK_IOCTL = 6,
VS_BLK_ERASE = 7,
VS_BLK_INVAL_REQ = 32,
VS_UNKNOWN_BLK_CMD = 0xffffffff,
};
@@ -57,6 +58,7 @@ enum blk_cmd_op {
#define VS_BLK_DISCARD_OP_F (1 << VS_BLK_DISCARD)
#define VS_BLK_SECURE_ERASE_OP_F (1 << VS_BLK_SECURE_ERASE)
#define VS_BLK_IOCTL_OP_F (1 << VS_BLK_IOCTL)
#define VS_BLK_ERASE_OP_F (1 << VS_BLK_ERASE)
#define VS_BLK_READ_ONLY_MASK ~(VS_BLK_READ_OP_F)
#pragma pack(push)