diff --git a/drivers/Makefile b/drivers/Makefile index c0b3e0e9..a65546b6 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -3,6 +3,7 @@ LINUXINCLUDE += -I$(srctree.nvidia-oot)/include +obj-m += block/tegra_virt_storage/ obj-m += crypto/ obj-m += devfreq/ obj-m += dma/ diff --git a/drivers/block/tegra_virt_storage/Makefile b/drivers/block/tegra_virt_storage/Makefile new file mode 100644 index 00000000..7cf17404 --- /dev/null +++ b/drivers/block/tegra_virt_storage/Makefile @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +# +# Makefile for Virtual Storage Driver +# + +tegra_vblk-y += tegra_hv_vblk.o +tegra_vblk-y += tegra_hv_ioctl.o +tegra_vblk-y += tegra_hv_mmc.o +tegra_vblk-y += tegra_hv_scsi.o +tegra_vblk-y += tegra_hv_ufs.o +obj-m += tegra_vblk.o diff --git a/drivers/block/tegra_virt_storage/tegra_hv_ioctl.c b/drivers/block/tegra_virt_storage/tegra_hv_ioctl.c new file mode 100644 index 00000000..2c855bea --- /dev/null +++ b/drivers/block/tegra_virt_storage/tegra_hv_ioctl.c @@ -0,0 +1,198 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + */ + +#include +#include +#include +#include +#include /* printk() */ +#include /* everything... */ +#include /* error codes */ +#include +#include /* kmalloc() */ +#include +#include +#include +#include +#include "tegra_vblk.h" + +int vblk_complete_ioctl_req(struct vblk_dev *vblkdev, + struct vsc_request *vsc_req, int status) +{ + struct vblk_ioctl_req *ioctl_req = vsc_req->ioctl_req; + int32_t ret = 0; + + if (ioctl_req == NULL) { + dev_err(vblkdev->device, + "Invalid ioctl request for completion!\n"); + ret = -EINVAL; + goto comp_exit; + } + + ioctl_req->status = status; + memcpy(ioctl_req->ioctl_buf, vsc_req->mempool_virt, + ioctl_req->ioctl_len); +comp_exit: + return ret; +} + +int vblk_prep_ioctl_req(struct vblk_dev *vblkdev, + struct vblk_ioctl_req *ioctl_req, + struct vsc_request *vsc_req) +{ + int32_t ret = 0; + struct vs_request *vs_req; + + if (ioctl_req == NULL) { + dev_err(vblkdev->device, + "Invalid ioctl request for preparation!\n"); + return -EINVAL; + } + + + if (ioctl_req->ioctl_len > vsc_req->mempool_len) { + dev_err(vblkdev->device, + "Ioctl length exceeding mempool length!\n"); + return -EINVAL; + } + + if (ioctl_req->ioctl_buf == NULL) { + dev_err(vblkdev->device, + "Ioctl buffer invalid!\n"); + return -EINVAL; + } + + vs_req = &vsc_req->vs_req; + vs_req->blkdev_req.req_op = VS_BLK_IOCTL; + memcpy(vsc_req->mempool_virt, ioctl_req->ioctl_buf, + ioctl_req->ioctl_len); + vs_req->blkdev_req.ioctl_req.ioctl_id = ioctl_req->ioctl_id; + vs_req->blkdev_req.ioctl_req.data_offset = vsc_req->mempool_offset; + vs_req->blkdev_req.ioctl_req.ioctl_len = ioctl_req->ioctl_len; + + vsc_req->ioctl_req = ioctl_req; + + return ret; +} + +int vblk_submit_ioctl_req(struct block_device *bdev, + unsigned int cmd, void __user *user) +{ + struct vblk_dev *vblkdev = bdev->bd_disk->private_data; + struct vblk_ioctl_req *ioctl_req = NULL; + struct request *rq; + int err; + + /* + * The caller must have CAP_SYS_RAWIO, and must be calling this on the + * whole block device, not on a partition. This prevents overspray + * between sibling partitions. + */ + if (!capable(CAP_SYS_RAWIO)) + return -EPERM; + + ioctl_req = kmalloc(sizeof(struct vblk_ioctl_req), GFP_KERNEL); + if (!ioctl_req) { + dev_err(vblkdev->device, + "failed to alloc memory for ioctl req!\n"); + return -ENOMEM; + } + + switch (cmd) { + case SG_IO: + err = vblk_prep_sg_io(vblkdev, ioctl_req, + user); + break; + case MMC_IOC_MULTI_CMD: + case MMC_IOC_CMD: + err = vblk_prep_mmc_multi_ioc(vblkdev, ioctl_req, + user, cmd); + break; + case UFS_IOCTL_COMBO_QUERY: + err = vblk_prep_ufs_combo_ioc(vblkdev, ioctl_req, + user, cmd); + break; + default: + dev_err(vblkdev->device, "unsupported command %x!\n", cmd); + err = -EINVAL; + goto free_ioctl_req; + } + + if (err) + goto free_ioctl_req; + +#if KERNEL_VERSION(5, 16, 0) >= LINUX_VERSION_CODE + rq = blk_get_request(vblkdev->queue, REQ_OP_DRV_IN, BLK_MQ_REQ_NOWAIT); +#else + rq = blk_mq_alloc_request(vblkdev->queue, REQ_OP_DRV_IN, BLK_MQ_REQ_NOWAIT); +#endif + if (IS_ERR_OR_NULL(rq)) { + dev_err(vblkdev->device, + "Failed to get handle to a request!\n"); + err = PTR_ERR(rq); + goto free_ioctl_req; + } + + rq->completion_data = (void *)ioctl_req; + +#if KERNEL_VERSION(5, 16, 0) >= LINUX_VERSION_CODE + blk_execute_rq(vblkdev->gd, rq, 0); + blk_put_request(rq); +#else + blk_execute_rq(rq, 0); + blk_mq_free_request(rq); +#endif + + switch (cmd) { + case SG_IO: + err = vblk_complete_sg_io(vblkdev, ioctl_req, + user); + break; + case MMC_IOC_MULTI_CMD: + case MMC_IOC_CMD: + err = vblk_complete_mmc_multi_ioc(vblkdev, ioctl_req, + user, cmd); + break; + case UFS_IOCTL_COMBO_QUERY: + err = vblk_complete_ufs_combo_ioc(vblkdev, ioctl_req, + user, cmd); + break; + default: + dev_err(vblkdev->device, "unsupported command %x!\n", cmd); + err = -EINVAL; + goto free_ioctl_req; + } + +free_ioctl_req: + if (ioctl_req) + kfree(ioctl_req); + + return err; +} + +/* The ioctl() implementation */ +int vblk_ioctl(struct block_device *bdev, fmode_t mode, + unsigned int cmd, unsigned long arg) +{ + int ret; + struct vblk_dev *vblkdev = bdev->bd_disk->private_data; + + mutex_lock(&vblkdev->ioctl_lock); + switch (cmd) { + case MMC_IOC_MULTI_CMD: + case MMC_IOC_CMD: + case SG_IO: + case UFS_IOCTL_COMBO_QUERY: + ret = vblk_submit_ioctl_req(bdev, cmd, + (void __user *)arg); + break; + default: /* unknown command */ + ret = -ENOTTY; + break; + } + mutex_unlock(&vblkdev->ioctl_lock); + + return ret; +} diff --git a/drivers/block/tegra_virt_storage/tegra_hv_mmc.c b/drivers/block/tegra_virt_storage/tegra_hv_mmc.c new file mode 100644 index 00000000..38582bf1 --- /dev/null +++ b/drivers/block/tegra_virt_storage/tegra_hv_mmc.c @@ -0,0 +1,210 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + */ + +#include /* printk() */ +#include /* kmalloc() */ +#include /* everything... */ +#include /* error codes */ +#include /* O_ACCMODE */ +#include +#include +#include +#include +#include "tegra_vblk.h" + +#define VBLK_MMC_MAX_IOC_SIZE (256 * 1024) + +int vblk_prep_mmc_multi_ioc(struct vblk_dev *vblkdev, + struct vblk_ioctl_req *ioctl_req, + void __user *user, + uint32_t cmd) +{ + int err = 0; + struct combo_info_t *combo_info; + struct combo_cmd_t *combo_cmd; + int i = 0; + uint64_t num_cmd; + struct mmc_ioc_cmd ic; + struct mmc_ioc_multi_cmd __user *user_cmd; + struct mmc_ioc_cmd __user *usr_ptr; + uint32_t combo_cmd_size; + uint32_t ioctl_bytes = VBLK_MMC_MAX_IOC_SIZE; + uint8_t *tmpaddr; + void *ioctl_buf; + + ioctl_buf = vmalloc(ioctl_bytes); + if (ioctl_buf == NULL) + return -ENOMEM; + + combo_info = (struct combo_info_t *)ioctl_buf; + combo_cmd_size = sizeof(uint32_t); + + if (cmd == MMC_IOC_MULTI_CMD) { + user_cmd = (struct mmc_ioc_multi_cmd __user *)user; + if (copy_from_user(&num_cmd, &user_cmd->num_of_cmds, + sizeof(num_cmd))) { + err = -EFAULT; + goto free_ioc_buf; + } + + if (num_cmd > MMC_IOC_MAX_CMDS) { + err = -EINVAL; + goto free_ioc_buf; + } + + usr_ptr = (void __user *)&user_cmd->cmds; + } else { + num_cmd = 1; + usr_ptr = (void __user *)user; + } + combo_info->count = num_cmd; + + combo_cmd = (struct combo_cmd_t *)(ioctl_buf + + sizeof(struct combo_info_t)); + + combo_cmd_size = sizeof(struct combo_info_t) + + sizeof(struct combo_cmd_t) * combo_info->count; + if (combo_cmd_size < sizeof(struct combo_info_t)) { + dev_err(vblkdev->device, + "combo_cmd_size is overflowing!\n"); + err = -EINVAL; + goto free_ioc_buf; + } + + if (combo_cmd_size > ioctl_bytes) { + dev_err(vblkdev->device, + " buffer has no enough space to serve ioctl\n"); + err = -EFAULT; + goto free_ioc_buf; + } + + tmpaddr = (uint8_t *)⁣ + for (i = 0; i < combo_info->count; i++) { + if (copy_from_user((void *)tmpaddr, usr_ptr, sizeof(ic))) { + err = -EFAULT; + goto free_ioc_buf; + } + combo_cmd->cmd = ic.opcode; + combo_cmd->arg = ic.arg; + combo_cmd->write_flag = (uint32_t)ic.write_flag; + combo_cmd->data_len = (uint32_t)(ic.blksz * ic.blocks); + combo_cmd->buf_offset = combo_cmd_size; + combo_cmd_size += combo_cmd->data_len; + if ((combo_cmd_size < combo_cmd->data_len) || + (combo_cmd_size > ioctl_bytes)) { + dev_err(vblkdev->device, + " buffer has no enough space to serve ioctl\n"); + err = -EFAULT; + goto free_ioc_buf; + } + + if (ic.write_flag && combo_cmd->data_len) { + if (copy_from_user(( + (void *)ioctl_buf + + combo_cmd->buf_offset), + (void __user *)(unsigned long)ic.data_ptr, + (u64)combo_cmd->data_len)) + { + dev_err(vblkdev->device, + "copy from user failed for data!\n"); + err = -EFAULT; + goto free_ioc_buf; + } + } + combo_cmd++; + usr_ptr++; + } + + ioctl_req->ioctl_id = VBLK_MMC_MULTI_IOC_ID; + ioctl_req->ioctl_buf = ioctl_buf; + ioctl_req->ioctl_len = ioctl_bytes; + +free_ioc_buf: + if (err && ioctl_buf) + vfree(ioctl_buf); + + return err; +} + +int vblk_complete_mmc_multi_ioc(struct vblk_dev *vblkdev, + struct vblk_ioctl_req *ioctl_req, + void __user *user, + uint32_t cmd) +{ + uint64_t num_cmd; + struct mmc_ioc_cmd ic; + struct mmc_ioc_cmd *ic_ptr = ⁣ + struct mmc_ioc_multi_cmd __user *user_cmd; + struct mmc_ioc_cmd __user *usr_ptr; + struct combo_cmd_t *combo_cmd; + uint32_t i; + int err = 0; + void *ioctl_buf = ioctl_req->ioctl_buf; + + if (ioctl_req->status) { + err = ioctl_req->status; + if (ioctl_req->ioctl_buf) + vfree(ioctl_req->ioctl_buf); + goto exit; + } + + if (cmd == MMC_IOC_MULTI_CMD) { + user_cmd = (struct mmc_ioc_multi_cmd __user *)user; + if (copy_from_user(&num_cmd, &user_cmd->num_of_cmds, + sizeof(num_cmd))) { + err = -EFAULT; + goto free_ioc_buf; + } + + if (num_cmd > MMC_IOC_MAX_CMDS) { + err = -EINVAL; + goto free_ioc_buf; + } + + usr_ptr = (void __user *)&user_cmd->cmds; + } else { + usr_ptr = (void __user *)user; + num_cmd = 1; + } + + combo_cmd = (struct combo_cmd_t *)(ioctl_buf + + sizeof(struct combo_info_t)); + + for (i = 0; i < num_cmd; i++) { + if (copy_from_user((void *)ic_ptr, usr_ptr, + sizeof(struct mmc_ioc_cmd))) { + err = -EFAULT; + goto free_ioc_buf; + } + + if (copy_to_user(&(usr_ptr->response), combo_cmd->response, + sizeof(combo_cmd->response))) { + err = -EFAULT; + goto free_ioc_buf; + } + + if (!ic.write_flag && combo_cmd->data_len) { + if (copy_to_user( + (void __user *)(unsigned long)ic.data_ptr, + (ioctl_buf + combo_cmd->buf_offset), + (u64)combo_cmd->data_len)) + { + dev_err(vblkdev->device, + "copy to user of ioctl data failed!\n"); + err = -EFAULT; + goto free_ioc_buf; + } + } + combo_cmd++; + usr_ptr++; + } + +free_ioc_buf: + if (ioctl_buf) + vfree(ioctl_buf); + +exit: + return err; +} diff --git a/drivers/block/tegra_virt_storage/tegra_hv_scsi.c b/drivers/block/tegra_virt_storage/tegra_hv_scsi.c new file mode 100644 index 00000000..13adaf68 --- /dev/null +++ b/drivers/block/tegra_virt_storage/tegra_hv_scsi.c @@ -0,0 +1,229 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + */ + +#include +#include +#include /* printk() */ +#include /* kmalloc() */ +#include /* everything... */ +#include /* error codes */ +#include /* O_ACCMODE */ +#include +#include +#include +#include +#include "tegra_vblk.h" + +int vblk_prep_sg_io(struct vblk_dev *vblkdev, + struct vblk_ioctl_req *ioctl_req, + void __user *user) +{ + int err = 0; + sg_io_hdr_t *hp = NULL; + uint32_t header_len = sizeof(sg_io_hdr_t); + struct vblk_sg_io_hdr *vblk_hp; + uint32_t vblk_sg_header_len = sizeof(struct vblk_sg_io_hdr); + uint32_t cmnd_offset; + void *cmnd; + uint32_t sbp_offset; + void *sbp; + uint32_t data_buf_offset; + uint32_t data_buf_offset_aligned; + void *data_buf; + uint32_t data_buf_size_aligned; + uint32_t ioctl_len; + void *ioctl_buf = NULL; + + hp = kmalloc(header_len, GFP_KERNEL); + if (hp == NULL) { + return -ENOMEM; + } + + if (copy_from_user(hp, user, header_len)) { + err = -EFAULT; + goto free_hp; + } + + if ((!hp->cmdp) || (hp->cmd_len < 6) || + (hp->cmd_len > VBLK_SG_MAX_CMD_LEN)) { + err = -EMSGSIZE; + goto free_hp; + } + + cmnd_offset = vblk_sg_header_len; + + sbp_offset = (cmnd_offset + hp->cmd_len); + if (sbp_offset < cmnd_offset) { + err = - EMSGSIZE; + goto free_hp; + } + + data_buf_offset = (sbp_offset + hp->mx_sb_len); + if (data_buf_offset < sbp_offset) { + err = -EMSGSIZE; + goto free_hp; + } + + data_buf_offset_aligned = ALIGN(data_buf_offset, + vblkdev->config.blk_config.hardblk_size); + if (data_buf_offset_aligned < data_buf_offset) { + err = -EMSGSIZE; + goto free_hp; + } + + data_buf_size_aligned = ALIGN(hp->dxfer_len, + vblkdev->config.blk_config.hardblk_size); + if (data_buf_size_aligned < hp->dxfer_len) { + err = -EMSGSIZE; + goto free_hp; + } + + ioctl_len = data_buf_offset_aligned + data_buf_size_aligned; + if (ioctl_len < data_buf_offset_aligned) { + err = -EMSGSIZE; + goto free_hp; + } + + ioctl_buf = kmalloc(ioctl_len, GFP_KERNEL); + if (ioctl_buf == NULL) { + err = -ENOMEM; + goto free_hp; + } + + vblk_hp = (struct vblk_sg_io_hdr *)(ioctl_buf); + sbp = (ioctl_buf + sbp_offset); + cmnd = (ioctl_buf + cmnd_offset); + if (copy_from_user(cmnd, hp->cmdp, hp->cmd_len)) { + err = -EFAULT; + goto free_ioctl_buf; + } + + data_buf = (ioctl_buf + data_buf_offset_aligned); + + switch (hp->dxfer_direction) { + case SG_DXFER_NONE: + vblk_hp->data_direction = SCSI_DATA_NONE; + break; + case SG_DXFER_TO_DEV: + vblk_hp->data_direction = SCSI_TO_DEVICE; + break; + case SG_DXFER_FROM_DEV: + vblk_hp->data_direction = SCSI_FROM_DEVICE; + break; + case SG_DXFER_TO_FROM_DEV: + vblk_hp->data_direction = SCSI_BIDIRECTIONAL; + break; + default: + err = -EBADMSG; + goto free_ioctl_buf; + } + + if ((vblk_hp->data_direction == SCSI_TO_DEVICE) || + (vblk_hp->data_direction == SCSI_BIDIRECTIONAL)) { + if (copy_from_user(data_buf, hp->dxferp, hp->dxfer_len)) { + err = -EFAULT; + goto free_ioctl_buf; + } + } + + vblk_hp->cmd_len = hp->cmd_len; + vblk_hp->mx_sb_len = hp->mx_sb_len; + /* This is actual data len on which storage server needs to act */ + vblk_hp->dxfer_len = hp->dxfer_len; + /* This is the data buffer len, data length is strictly dependent on the + * IOCTL being executed. data_buffer length is atleast cache aligned to + * make sure that cache operations can be done successfully without + * corruption. + * Since Block size is 4K, if it is aligned to blocksize, it will + * indirectly align to cache line. + */ + vblk_hp->dxfer_buf_len = data_buf_size_aligned; + vblk_hp->xfer_arg_offset = data_buf_offset_aligned; + vblk_hp->cmdp_arg_offset = cmnd_offset; + vblk_hp->sbp_arg_offset = sbp_offset; + ioctl_req->ioctl_id = VBLK_SG_IO_ID; + ioctl_req->ioctl_buf = ioctl_buf; + ioctl_req->ioctl_len = ioctl_len; + +free_ioctl_buf: + if (err && ioctl_buf) + kfree(ioctl_buf); + +free_hp: + if (hp) + kfree(hp); + + return err; +} + +int vblk_complete_sg_io(struct vblk_dev *vblkdev, + struct vblk_ioctl_req *ioctl_req, + void __user *user) +{ + sg_io_hdr_t *hp = NULL; + uint32_t header_len = sizeof(sg_io_hdr_t); + struct vblk_sg_io_hdr *vblk_hp; + void *sbp; + void *data_buf; + int err = 0; + + if (ioctl_req->status) { + err = ioctl_req->status; + if (ioctl_req->ioctl_buf) + kfree(ioctl_req->ioctl_buf); + goto exit; + } + + hp = kmalloc(header_len, GFP_KERNEL); + if (hp == NULL) { + return -ENOMEM; + } + + if (copy_from_user(hp, user, header_len)) { + err = -EFAULT; + goto free_hp; + } + + vblk_hp = (struct vblk_sg_io_hdr *)(ioctl_req->ioctl_buf); + hp->status = 0xff & vblk_hp->status; + hp->masked_status = status_byte(vblk_hp->status); + hp->host_status = host_byte(vblk_hp->status); + hp->driver_status = driver_byte(vblk_hp->status); + hp->sb_len_wr = vblk_hp->sb_len_wr; + /* TODO: Handle the residual length */ + hp->resid = 0; + + sbp = (ioctl_req->ioctl_buf + vblk_hp->sbp_arg_offset); + if ((hp->sb_len_wr != 0) && (hp->sbp != NULL)) { + if (copy_to_user(hp->sbp, sbp, hp->sb_len_wr)) { + err = -EFAULT; + goto free_hp; + } + } + + data_buf = (ioctl_req->ioctl_buf + vblk_hp->xfer_arg_offset); + + if ((vblk_hp->data_direction == SCSI_FROM_DEVICE) || + (vblk_hp->data_direction == SCSI_BIDIRECTIONAL)) { + if (copy_to_user(hp->dxferp, data_buf, vblk_hp->dxfer_len)) { + err = -EFAULT; + goto free_hp; + } + } + + if (copy_to_user(user, hp, header_len)) { + err = -EFAULT; + goto free_hp; + } + +free_hp: + if (ioctl_req->ioctl_buf) + kfree(ioctl_req->ioctl_buf); + + if (hp) + kfree(hp); +exit: + return err; +} diff --git a/drivers/block/tegra_virt_storage/tegra_hv_ufs.c b/drivers/block/tegra_virt_storage/tegra_hv_ufs.c new file mode 100644 index 00000000..75da097e --- /dev/null +++ b/drivers/block/tegra_virt_storage/tegra_hv_ufs.c @@ -0,0 +1,348 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + */ + +#include /* kmalloc() */ +#include /* error codes */ +#include /* For msleep and usleep_range */ +#include +#include "tegra_vblk.h" +#include "tegra_hv_ufs.h" + +#define VBLK_UFS_MAX_IOC_SIZE (256 * 1024) + +static int vblk_validate_single_query_io(struct vblk_dev *vblkdev, + struct ufs_ioc_query_req *query_req, + size_t *data_len, + bool *w_flag) +{ + int err = 0; + + switch (query_req->opcode) { + case UPIU_QUERY_OPCODE_READ_DESC: + if (query_req->idn >= QUERY_DESC_IDN_MAX) { + dev_err(vblkdev->device, + "Desc IDN out of range %d\n", + query_req->idn); + err = -EINVAL; + goto out; + } + + *data_len = min_t(size_t, QUERY_DESC_MAX_SIZE, + query_req->buf_size); + break; + + case UPIU_QUERY_OPCODE_WRITE_DESC: + if (query_req->idn >= QUERY_DESC_IDN_MAX) { + err = -EINVAL; + dev_err(vblkdev->device, + "Desc IDN out of range %d\n", + query_req->idn); + goto out; + } + + *data_len = min_t(size_t, QUERY_DESC_MAX_SIZE, + query_req->buf_size); + *w_flag = true; + break; + + case UPIU_QUERY_OPCODE_READ_ATTR: + if (query_req->idn >= QUERY_ATTR_IDN_MAX) { + err = -EINVAL; + dev_err(vblkdev->device, + "ATTR IDN out of range %d\n", + query_req->idn); + goto out; + } + + if (query_req->buf_size != sizeof(u32)) { + err = -EINVAL; + dev_err(vblkdev->device, + "Buf size out of range %d\n", + query_req->buf_size); + goto out; + } + *data_len = sizeof(u32); + break; + + case UPIU_QUERY_OPCODE_WRITE_ATTR: + if (query_req->idn > QUERY_ATTR_IDN_MAX) { + err = -EINVAL; + dev_err(vblkdev->device, + "ATTR IDN out of range %d\n", + query_req->idn); + goto out; + } + + if (query_req->buf_size != sizeof(u32)) { + err = -EINVAL; + dev_err(vblkdev->device, + "Buf size out of range %d\n", + query_req->buf_size); + goto out; + } + *data_len = sizeof(u32); + *w_flag = true; + break; + + case UPIU_QUERY_OPCODE_READ_FLAG: + if (query_req->idn > QUERY_FLAG_IDN_MAX) { + err = -EINVAL; + dev_err(vblkdev->device, + "Flag IDN out of range %d\n", + query_req->idn); + goto out; + } + + if (query_req->buf_size != sizeof(u8)) { + err = -EINVAL; + dev_err(vblkdev->device, + "Buf size out of range %d\n", + query_req->buf_size); + goto out; + } + *data_len = sizeof(u8); + break; + + case UPIU_QUERY_OPCODE_SET_FLAG: + case UPIU_QUERY_OPCODE_CLEAR_FLAG: + case UPIU_QUERY_OPCODE_TOGGLE_FLAG: + if (query_req->idn > QUERY_FLAG_IDN_MAX) { + err = -EINVAL; + dev_err(vblkdev->device, + "Flag IDN out of range %d\n", + query_req->idn); + goto out; + } + /* TODO: Create buffer to be attached */ + *data_len = 0; + break; + default: + err = -EINVAL; + dev_err(vblkdev->device, "Invalid opcode %d\n", + query_req->idn); + break; + } +out: + return err; +} + +int vblk_prep_ufs_combo_ioc(struct vblk_dev *vblkdev, + struct vblk_ioctl_req *ioctl_req, + void __user *user, uint32_t cmd) +{ + int err = 0; + struct vblk_ufs_combo_info *combo_info; + struct vblk_ufs_ioc_query_req *combo_cmd; + int i = 0; + uint8_t num_cmd; + struct ufs_ioc_query_req ic; + struct ufs_ioc_combo_query_req cc; + struct ufs_ioc_combo_query_req __user *user_cmd; + struct ufs_ioc_query_req __user *usr_ptr; + uint32_t combo_cmd_size; + uint32_t ioctl_bytes = VBLK_UFS_MAX_IOC_SIZE; + uint8_t *tmpaddr; + void *ioctl_buf; + size_t data_len = 0; + bool w_flag = false; + + ioctl_buf = vmalloc(ioctl_bytes); + if (ioctl_buf == NULL) + return -ENOMEM; + + combo_info = (struct vblk_ufs_combo_info *)ioctl_buf; + + user_cmd = (struct ufs_ioc_combo_query_req __user *)user; + if (copy_from_user(&cc, user_cmd, sizeof(cc))) { + err = -EFAULT; + goto free_ioc_buf; + } + num_cmd = cc.num_cmds; + if (num_cmd > MAX_QUERY_CMD_PER_COMBO) { + err = -EINVAL; + goto free_ioc_buf; + } + + usr_ptr = (void __user *)cc.query; + combo_info->count = num_cmd; + combo_info->need_cq_empty = cc.need_cq_empty; + combo_cmd = (struct vblk_ufs_ioc_query_req *)(ioctl_buf + + sizeof(struct vblk_ufs_combo_info)); + + combo_cmd_size = sizeof(struct vblk_ufs_combo_info) + + sizeof(struct vblk_ufs_ioc_query_req) * combo_info->count; + if (combo_cmd_size < sizeof(struct vblk_ufs_combo_info)) { + dev_err(vblkdev->device, + "combo_cmd_size is overflowing!\n"); + err = -EINVAL; + goto free_ioc_buf; + } + + if (combo_cmd_size > ioctl_bytes) { + dev_err(vblkdev->device, + " buffer has no enough space to serve ioctl\n"); + err = -EFAULT; + goto free_ioc_buf; + } + memset(&ic, 0, sizeof(ic)); + tmpaddr = (uint8_t *)⁣ + for (i = 0; i < combo_info->count; i++) { + if (copy_from_user((void *)tmpaddr, usr_ptr, sizeof(ic))) { + err = -EFAULT; + goto free_ioc_buf; + } + + err = vblk_validate_single_query_io(vblkdev, + (struct ufs_ioc_query_req*)tmpaddr, + &data_len, &w_flag); + if (err) { + dev_err(vblkdev->device, "Validating request failed\n"); + err = -EFAULT; + goto free_ioc_buf; + } + + combo_cmd->opcode = ic.opcode; + combo_cmd->idn = ic.idn; + combo_cmd->index = ic.index; + combo_cmd->selector = ic.selector; + combo_cmd->buf_size = ic.buf_size; + combo_cmd->delay = ic.delay; + combo_cmd->error_status = ic.error_status; + combo_cmd->buffer_offset = combo_cmd_size; + + combo_cmd_size += data_len; + if ((combo_cmd_size < data_len) || + (combo_cmd_size > ioctl_bytes)) { + dev_err(vblkdev->device, + " buffer has no enough space to serve ioctl\n"); + err = -EFAULT; + goto free_ioc_buf; + } + + if (w_flag && data_len) { + if (copy_from_user(( + (void *)ioctl_buf + + combo_cmd->buffer_offset), + (void __user *)(unsigned long)ic.buffer, + (u64)data_len)) + { + dev_err(vblkdev->device, + "copy from user failed for data!\n"); + err = -EFAULT; + goto free_ioc_buf; + } + } + combo_cmd++; + usr_ptr++; + } + + ioctl_req->ioctl_id = VBLK_UFS_COMBO_IO_ID; + ioctl_req->ioctl_buf = ioctl_buf; + ioctl_req->ioctl_len = ioctl_bytes; + +free_ioc_buf: + if (err && ioctl_buf) + vfree(ioctl_buf); + + return err; +} + +int vblk_complete_ufs_combo_ioc(struct vblk_dev *vblkdev, + struct vblk_ioctl_req *ioctl_req, + void __user *user, + uint32_t cmd) +{ + uint64_t num_cmd; + struct ufs_ioc_combo_query_req cc; + struct ufs_ioc_query_req ic; + struct ufs_ioc_query_req *ic_ptr = ⁣ + struct ufs_ioc_combo_query_req __user *user_cmd; + struct ufs_ioc_query_req __user *usr_ptr; + struct vblk_ufs_ioc_query_req *combo_cmd; + uint32_t i; + int err = 0; + size_t data_len; + bool w_flag = false; + + void *ioctl_buf = ioctl_req->ioctl_buf; + + if (ioctl_req->status) { + err = ioctl_req->status; + if (ioctl_req->ioctl_buf) + vfree(ioctl_req->ioctl_buf); + goto exit; + } + + user_cmd = (struct ufs_ioc_combo_query_req __user *)user; + if (copy_from_user(&cc, user_cmd, + sizeof(cc))) { + err = -EFAULT; + goto free_ioc_buf; + } + num_cmd = cc.num_cmds; + if (num_cmd > MAX_QUERY_CMD_PER_COMBO) { + err = -EINVAL; + goto free_ioc_buf; + } + + usr_ptr = (void __user *)cc.query; + + combo_cmd = (struct vblk_ufs_ioc_query_req *)(ioctl_buf + + sizeof(struct vblk_ufs_combo_info)); + + for (i = 0; i < num_cmd; i++) { + if (copy_from_user((void *)ic_ptr, usr_ptr, + sizeof(struct ufs_ioc_query_req))) { + err = -EFAULT; + goto free_ioc_buf; + } + + err = vblk_validate_single_query_io(vblkdev, ic_ptr, + &data_len, &w_flag); + if (err) { + dev_err(vblkdev->device, "Validating request failed\n"); + err = -EFAULT; + goto free_ioc_buf; + } + + err = copy_to_user(&usr_ptr->buf_size, &combo_cmd->buf_size, + sizeof(combo_cmd->buf_size)); + if (err) { + dev_err(vblkdev->device, "Failed copy_to_user query_req buf_size\n"); + err = -EFAULT; + goto free_ioc_buf; + } + + err = copy_to_user(&usr_ptr->error_status, &combo_cmd->error_status, + sizeof(combo_cmd->error_status)); + if (err) { + dev_err(vblkdev->device, "Failed copy_to_user query_req status\n"); + err = -EFAULT; + goto free_ioc_buf; + } + + if (!w_flag && data_len) { + if (copy_to_user( + (void __user *)(unsigned long)ic.buffer, + (ioctl_buf + combo_cmd->buffer_offset), + (u64)data_len)) + { + dev_err(vblkdev->device, + "copy to user of ioctl data failed!\n"); + err = -EFAULT; + goto free_ioc_buf; + } + } + combo_cmd++; + usr_ptr++; + } + +free_ioc_buf: + if (ioctl_buf) + vfree(ioctl_buf); + +exit: + return err; +} diff --git a/drivers/block/tegra_virt_storage/tegra_hv_ufs.h b/drivers/block/tegra_virt_storage/tegra_hv_ufs.h new file mode 100644 index 00000000..d2986498 --- /dev/null +++ b/drivers/block/tegra_virt_storage/tegra_hv_ufs.h @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + */ + +#ifndef _TEGRA_HV_UFS_H_ +#define _TEGRA_HV_UFS_H_ + +#include + +#define QUERY_DESC_MAX_SIZE 255 +#define QUERY_DESC_MIN_SIZE 2 +#define QUERY_DESC_HDR_SIZE 2 + +/* Attribute idn for Query requests */ +enum attr_idn { + QUERY_ATTR_IDN_BOOTLUN_EN = 0x0, + QUERY_ATTR_IDN_PWR_MODE = 0x02, + QUERY_ATTR_IDN_ACTIVE_ICC_LVL = 0x03, + QUERY_ATTR_IDN_BKOPS_STATUS = 0x05, + QUERY_ATTR_IDN_REF_CLK_FREQ = 0x0A, + QUERY_ATTR_IDN_CONF_DESC_LCK = 0x0B, + QUERY_ATTR_IDN_EE_CONTROL = 0x0D, + QUERY_ATTR_IDN_EE_STATUS = 0x0E, + QUERY_ATTR_IDN_MAX = 0x30, +}; + +/* Query response result code */ +enum { + QUERY_RESULT_SUCCESS = 0x00, + QUERY_RESULT_NOT_READABLE = 0xF6, + QUERY_RESULT_NOT_WRITEABLE = 0xF7, + QUERY_RESULT_ALREADY_WRITTEN = 0xF8, + QUERY_RESULT_INVALID_LENGTH = 0xF9, + QUERY_RESULT_INVALID_VALUE = 0xFA, + QUERY_RESULT_INVALID_SELECTOR = 0xFB, + QUERY_RESULT_INVALID_INDEX = 0xFC, + QUERY_RESULT_INVALID_IDN = 0xFD, + QUERY_RESULT_INVALID_OPCODE = 0xFE, + QUERY_RESULT_GENERAL_FAILURE = 0xFF, +}; + +/* UTP QUERY Transaction Specific Fields OpCode */ +enum query_opcode { + UPIU_QUERY_OPCODE_NOP = 0x0, + UPIU_QUERY_OPCODE_READ_DESC = 0x1, + UPIU_QUERY_OPCODE_WRITE_DESC = 0x2, + UPIU_QUERY_OPCODE_READ_ATTR = 0x3, + UPIU_QUERY_OPCODE_WRITE_ATTR = 0x4, + UPIU_QUERY_OPCODE_READ_FLAG = 0x5, + UPIU_QUERY_OPCODE_SET_FLAG = 0x6, + UPIU_QUERY_OPCODE_CLEAR_FLAG = 0x7, + UPIU_QUERY_OPCODE_TOGGLE_FLAG = 0x8, +}; + +/* Descriptor idn for Query requests */ +enum desc_idn { + QUERY_DESC_IDN_DEVICE = 0x0, + QUERY_DESC_IDN_CONFIGURATION = 0x1, + QUERY_DESC_IDN_UNIT = 0x2, + QUERY_DESC_IDN_RFU_0 = 0x3, + QUERY_DESC_IDN_INTERCONNECT = 0x4, + QUERY_DESC_IDN_STRING = 0x5, + QUERY_DESC_IDN_RFU_1 = 0x6, + QUERY_DESC_IDN_GEOMETRY = 0x7, + QUERY_DESC_IDN_POWER = 0x8, + QUERY_DESC_IDN_DEVICE_HEALTH = 0x9, + QUERY_DESC_IDN_MAX, +}; + +/* Flag idn for Query Requests*/ +enum flag_idn { + QUERY_FLAG_IDN_FDEVICEINIT = 0x01, + QUERY_FLAG_IDN_PWR_ON_WPE = 0x03, + QUERY_FLAG_IDN_BKOPS_EN = 0x04, + QUERY_FLAG_IDN_MAX = 0x0E, +}; + +#endif /* _TEGRA_HV_UFS_H_ */ diff --git a/drivers/block/tegra_virt_storage/tegra_hv_vblk.c b/drivers/block/tegra_virt_storage/tegra_hv_vblk.c new file mode 100644 index 00000000..481d0a38 --- /dev/null +++ b/drivers/block/tegra_virt_storage/tegra_hv_vblk.c @@ -0,0 +1,1286 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include /* printk() */ +#include +#include /* kmalloc() */ +#include /* everything... */ +#include /* error codes */ +#include /* O_ACCMODE */ +#include /* HDIO_GETGEO */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "tegra_vblk.h" + +static int vblk_major; + +/** + * vblk_get_req: Get a handle to free vsc request. + */ +static struct vsc_request *vblk_get_req(struct vblk_dev *vblkdev) +{ + struct vsc_request *req = NULL; + unsigned long bit; + + mutex_lock(&vblkdev->req_lock); + + if (vblkdev->queue_state != VBLK_QUEUE_ACTIVE) + goto exit; + + bit = find_first_zero_bit(vblkdev->pending_reqs, vblkdev->max_requests); + if (bit < vblkdev->max_requests) { + req = &vblkdev->reqs[bit]; + req->vs_req.req_id = bit; + set_bit(bit, vblkdev->pending_reqs); + vblkdev->inflight_reqs++; + } + +exit: + mutex_unlock(&vblkdev->req_lock); + return req; +} + +static struct vsc_request *vblk_get_req_by_sr_num(struct vblk_dev *vblkdev, + uint32_t num) +{ + struct vsc_request *req; + + if (num >= vblkdev->max_requests) + return NULL; + + mutex_lock(&vblkdev->req_lock); + req = &vblkdev->reqs[num]; + if (test_bit(req->id, vblkdev->pending_reqs) == 0) { + dev_err(vblkdev->device, + "sr_num: Request index %d is not active!\n", + req->id); + req = NULL; + } + mutex_unlock(&vblkdev->req_lock); + + /* Assuming serial number is same as index into request array */ + return req; +} + +/** + * vblk_put_req: Free an active vsc request. + */ +static void vblk_put_req(struct vsc_request *req) +{ + struct vblk_dev *vblkdev; + + vblkdev = req->vblkdev; + if (vblkdev == NULL) { + pr_err("Request %d does not have valid vblkdev!\n", + req->id); + return; + } + + if (req->id >= vblkdev->max_requests) { + dev_err(vblkdev->device, "Request Index %d out of range!\n", + req->id); + return; + } + + mutex_lock(&vblkdev->req_lock); + if (req != &vblkdev->reqs[req->id]) { + dev_err(vblkdev->device, + "Request Index %d does not match with the request!\n", + req->id); + goto exit; + } + + if (test_bit(req->id, vblkdev->pending_reqs) == 0) { + dev_err(vblkdev->device, + "Request index %d is not active!\n", + req->id); + } else { + clear_bit(req->id, vblkdev->pending_reqs); + memset(&req->vs_req, 0, sizeof(struct vs_request)); + req->req = NULL; + memset(&req->iter, 0, sizeof(struct req_iterator)); + vblkdev->inflight_reqs--; + + if ((vblkdev->inflight_reqs == 0) && + (vblkdev->queue_state == VBLK_QUEUE_SUSPENDED)) { + complete(&vblkdev->req_queue_empty); + } + } +exit: + mutex_unlock(&vblkdev->req_lock); +} + +static int vblk_send_config_cmd(struct vblk_dev *vblkdev) +{ + struct vs_request *vs_req; + int i = 0; + + /* This while loop exits as long as the remote endpoint cooperates. */ + if (tegra_hv_ivc_channel_notified(vblkdev->ivck) != 0) { + pr_notice("vblk: send_config wait for ivc channel reset\n"); + while (tegra_hv_ivc_channel_notified(vblkdev->ivck) != 0) { + if (i++ > IVC_RESET_RETRIES) { + dev_err(vblkdev->device, "ivc reset timeout\n"); + return -EIO; + } + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(usecs_to_jiffies(1)); + } + } + vs_req = (struct vs_request *) + tegra_hv_ivc_write_get_next_frame(vblkdev->ivck); + if (IS_ERR_OR_NULL(vs_req)) { + dev_err(vblkdev->device, "no empty frame for write\n"); + return -EIO; + } + + vs_req->type = VS_CONFIGINFO_REQ; + + dev_info(vblkdev->device, "send config cmd to ivc #%d\n", + vblkdev->ivc_id); + + if (tegra_hv_ivc_write_advance(vblkdev->ivck)) { + dev_err(vblkdev->device, "ivc write failed\n"); + return -EIO; + } + + return 0; +} + +static int vblk_get_configinfo(struct vblk_dev *vblkdev) +{ + struct vs_request *req; + int32_t status; + + dev_info(vblkdev->device, "get config data from ivc #%d\n", + vblkdev->ivc_id); + + req = (struct vs_request *) + tegra_hv_ivc_read_get_next_frame(vblkdev->ivck); + if (IS_ERR_OR_NULL(req)) { + dev_err(vblkdev->device, "no empty frame for read\n"); + return -EIO; + } + + status = req->status; + vblkdev->config = req->config_info; + + if (tegra_hv_ivc_read_advance(vblkdev->ivck)) { + dev_err(vblkdev->device, "ivc read failed\n"); + return -EIO; + } + + if (status != 0) + return -EINVAL; + + if (vblkdev->config.type != VS_BLK_DEV) { + dev_err(vblkdev->device, "Non Blk dev config not supported!\n"); + return -EINVAL; + } + + if (vblkdev->config.blk_config.num_blks == 0) { + dev_err(vblkdev->device, "controller init failed\n"); + return -EINVAL; + } + + return 0; +} + +static void req_error_handler(struct vblk_dev *vblkdev, struct request *breq) +{ + dev_err(vblkdev->device, + "Error for request pos %llx type %llx size %x\n", + (blk_rq_pos(breq) * (uint64_t)SECTOR_SIZE), + (uint64_t)req_op(breq), + blk_rq_bytes(breq)); + + blk_mq_end_request(breq, BLK_STS_IOERR); +} + +static void handle_non_ioctl_resp(struct vblk_dev *vblkdev, + struct vsc_request *vsc_req, + struct vs_blk_response *blk_resp) +{ + struct bio_vec bvec; + void *buffer; + size_t size; + size_t total_size = 0; + bool invoke_req_err_hand = false; + struct request *const bio_req = vsc_req->req; + struct vs_blk_request *const blk_req = + &(vsc_req->vs_req.blkdev_req.blk_req); + + if (blk_resp->status != 0) { + invoke_req_err_hand = true; + goto end; + } + + if (req_op(bio_req) != REQ_OP_FLUSH) { + if (blk_req->num_blks != + blk_resp->num_blks) { + invoke_req_err_hand = true; + goto end; + } + } + + if (req_op(bio_req) == REQ_OP_READ) { + rq_for_each_segment(bvec, bio_req, vsc_req->iter) { + size = bvec.bv_len; + buffer = page_address(bvec.bv_page) + + bvec.bv_offset; + + if ((total_size + size) > + (blk_req->num_blks * + vblkdev->config.blk_config.hardblk_size)) { + size = + (blk_req->num_blks * + vblkdev->config.blk_config.hardblk_size) - + total_size; + } + + if (!vblkdev->config.blk_config.use_vm_address) { + memcpy(buffer, + vsc_req->mempool_virt + + total_size, + size); + } + + total_size += size; + if (total_size == + (blk_req->num_blks * + vblkdev->config.blk_config.hardblk_size)) + break; + } + } + +end: + if (vblkdev->config.blk_config.use_vm_address) { + if ((req_op(bio_req) == REQ_OP_READ) || + (req_op(bio_req) == REQ_OP_WRITE)) { + dma_unmap_sg(vblkdev->device, + vsc_req->sg_lst, + vsc_req->sg_num_ents, + DMA_BIDIRECTIONAL); + devm_kfree(vblkdev->device, vsc_req->sg_lst); + } + } + + if (!invoke_req_err_hand) { + blk_mq_end_request(bio_req, BLK_STS_OK); + } else { + + req_error_handler(vblkdev, bio_req); + } +} + +/** + * complete_bio_req: Complete a bio request after server is + * done processing the request. + */ +static bool complete_bio_req(struct vblk_dev *vblkdev) +{ + int status = 0; + struct vsc_request *vsc_req = NULL; + struct vs_request *vs_req; + struct vs_request req_resp; + struct request *bio_req; + + /* First check if ivc read queue is empty */ + if (!tegra_hv_ivc_can_read(vblkdev->ivck)) + goto no_valid_io; + + /* Copy the data and advance to next frame */ + if ((tegra_hv_ivc_read(vblkdev->ivck, &req_resp, + sizeof(struct vs_request)) <= 0)) { + dev_err(vblkdev->device, + "Couldn't increment read frame pointer!\n"); + goto no_valid_io; + } + + status = req_resp.status; + if (status != 0) { + dev_err(vblkdev->device, "IO request error = %d\n", + status); + } + + vsc_req = vblk_get_req_by_sr_num(vblkdev, req_resp.req_id); + if (vsc_req == NULL) { + dev_err(vblkdev->device, "serial_number mismatch num %d!\n", + req_resp.req_id); + goto complete_bio_exit; + } + + bio_req = vsc_req->req; + vs_req = &vsc_req->vs_req; + + if ((bio_req != NULL) && (status == 0)) { + if (req_op(bio_req) == REQ_OP_DRV_IN) { + vblk_complete_ioctl_req(vblkdev, vsc_req, + req_resp.blkdev_resp. + ioctl_resp.status); + blk_mq_end_request(bio_req, BLK_STS_OK); + } else { + handle_non_ioctl_resp(vblkdev, vsc_req, + &(req_resp.blkdev_resp.blk_resp)); + } + + } else if ((bio_req != NULL) && (status != 0)) { + req_error_handler(vblkdev, bio_req); + } else { + dev_err(vblkdev->device, + "VSC request %d has null bio request!\n", + vsc_req->id); + } + + vblk_put_req(vsc_req); + +complete_bio_exit: + return true; + +no_valid_io: + return false; +} + +static bool bio_req_sanity_check(struct vblk_dev *vblkdev, + struct request *bio_req, + struct vsc_request *vsc_req) +{ + uint64_t start_offset = (blk_rq_pos(bio_req) * (uint64_t)SECTOR_SIZE); + uint64_t req_bytes = blk_rq_bytes(bio_req); + + if ((start_offset >= vblkdev->size) || (req_bytes > vblkdev->size) || + ((start_offset + req_bytes) > vblkdev->size)) + { + dev_err(vblkdev->device, + "Invalid I/O limit start 0x%llx size 0x%llx > 0x%llx\n", + start_offset, + req_bytes, vblkdev->size); + return false; + } + + if ((start_offset % vblkdev->config.blk_config.hardblk_size) != 0) { + dev_err(vblkdev->device, "Unaligned block offset (%lld %d)\n", + start_offset, vblkdev->config.blk_config.hardblk_size); + return false; + } + + if ((req_bytes % vblkdev->config.blk_config.hardblk_size) != 0) { + dev_err(vblkdev->device, "Unaligned io length (%lld %d)\n", + req_bytes, vblkdev->config.blk_config.hardblk_size); + return false; + } + + if (req_bytes > (uint64_t)vsc_req->mempool_len) { + dev_err(vblkdev->device, "Req bytes %llx greater than %x!\n", + req_bytes, vsc_req->mempool_len); + return false; + } + + return true; +} + +/** + * submit_bio_req: Fetch a bio request and submit it to + * server for processing. + */ +static bool submit_bio_req(struct vblk_dev *vblkdev) +{ + struct vsc_request *vsc_req = NULL; + struct request *bio_req = NULL; + struct vs_request *vs_req; + struct bio_vec bvec; + size_t size; + size_t total_size = 0; + void *buffer; + struct req_entry *entry = NULL; + size_t sz; + uint32_t sg_cnt; + dma_addr_t sg_dma_addr = 0; + + /* Check if ivc queue is full */ + if (!tegra_hv_ivc_can_write(vblkdev->ivck)) + goto bio_exit; + + if (vblkdev->queue == NULL) + goto bio_exit; + + vsc_req = vblk_get_req(vblkdev); + if (vsc_req == NULL) + goto bio_exit; + + spin_lock(&vblkdev->queue_lock); + if(!list_empty(&vblkdev->req_list)) { + entry = list_first_entry(&vblkdev->req_list, struct req_entry, + list_entry); + list_del(&entry->list_entry); + bio_req = entry->req; + kfree(entry); + } + spin_unlock(&vblkdev->queue_lock); + + if (bio_req == NULL) + goto bio_exit; + + if ((vblkdev->config.blk_config.use_vm_address) && + ((req_op(bio_req) == REQ_OP_READ) || + (req_op(bio_req) == REQ_OP_WRITE))) { + sz = (sizeof(struct scatterlist) + * bio_req->nr_phys_segments); + vsc_req->sg_lst = devm_kzalloc(vblkdev->device, sz, + GFP_KERNEL); + if (vsc_req->sg_lst == NULL) { + dev_err(vblkdev->device, + "SG mem allocation failed\n"); + goto bio_exit; + } + sg_init_table(vsc_req->sg_lst, + bio_req->nr_phys_segments); + sg_cnt = blk_rq_map_sg(vblkdev->queue, bio_req, + vsc_req->sg_lst); + vsc_req->sg_num_ents = sg_nents(vsc_req->sg_lst); + if (dma_map_sg(vblkdev->device, vsc_req->sg_lst, + vsc_req->sg_num_ents, DMA_BIDIRECTIONAL) == 0) { + dev_err(vblkdev->device, "dma_map_sg failed\n"); + goto bio_exit; + } + sg_dma_addr = sg_dma_address(vsc_req->sg_lst); + } + + vsc_req->req = bio_req; + vs_req = &vsc_req->vs_req; + + vs_req->type = VS_DATA_REQ; + if (req_op(bio_req) != REQ_OP_DRV_IN) { + if (req_op(bio_req) == REQ_OP_READ) { + vs_req->blkdev_req.req_op = VS_BLK_READ; + } else if (req_op(bio_req) == REQ_OP_WRITE) { + vs_req->blkdev_req.req_op = VS_BLK_WRITE; + } else if (req_op(bio_req) == REQ_OP_FLUSH) { + vs_req->blkdev_req.req_op = VS_BLK_FLUSH; + } else if (req_op(bio_req) == REQ_OP_DISCARD) { + vs_req->blkdev_req.req_op = VS_BLK_DISCARD; + } else if (req_op(bio_req) == REQ_OP_SECURE_ERASE) { + vs_req->blkdev_req.req_op = VS_BLK_SECURE_ERASE; + } else { + dev_err(vblkdev->device, + "Request direction is not read/write!\n"); + goto bio_exit; + } + + vsc_req->iter.bio = NULL; + if (req_op(bio_req) == REQ_OP_FLUSH) { + vs_req->blkdev_req.blk_req.blk_offset = 0; + vs_req->blkdev_req.blk_req.num_blks = + vblkdev->config.blk_config.num_blks; + } else { + if (!bio_req_sanity_check(vblkdev, bio_req, vsc_req)) { + goto bio_exit; + } + + vs_req->blkdev_req.blk_req.blk_offset = ((blk_rq_pos(bio_req) * + (uint64_t)SECTOR_SIZE) + / vblkdev->config.blk_config.hardblk_size); + vs_req->blkdev_req.blk_req.num_blks = ((blk_rq_sectors(bio_req) * + SECTOR_SIZE) / + vblkdev->config.blk_config.hardblk_size); + + if (!vblkdev->config.blk_config.use_vm_address) { + vs_req->blkdev_req.blk_req.data_offset = + vsc_req->mempool_offset; + } else { + vs_req->blkdev_req.blk_req.data_offset = 0; + /* Provide IOVA as part of request */ + vs_req->blkdev_req.blk_req.iova_addr = + (uint64_t)sg_dma_addr; + } + } + + if (req_op(bio_req) == REQ_OP_WRITE) { + rq_for_each_segment(bvec, bio_req, vsc_req->iter) { + size = bvec.bv_len; + buffer = page_address(bvec.bv_page) + + bvec.bv_offset; + + if ((total_size + size) > + (vs_req->blkdev_req.blk_req.num_blks * + vblkdev->config.blk_config.hardblk_size)) + { + size = (vs_req->blkdev_req.blk_req.num_blks * + vblkdev->config.blk_config.hardblk_size) - + total_size; + } + + /* memcpy to mempool not needed as VM IOVA is + * provided + */ + if (!vblkdev->config.blk_config.use_vm_address) { + memcpy( + vsc_req->mempool_virt + total_size, + buffer, size); + } + + total_size += size; + if (total_size == (vs_req->blkdev_req.blk_req.num_blks * + vblkdev->config.blk_config.hardblk_size)) { + break; + } + } + } + } else { + if (vblk_prep_ioctl_req(vblkdev, + (struct vblk_ioctl_req *)bio_req->completion_data, + vsc_req)) { + dev_err(vblkdev->device, + "Failed to prepare ioctl request!\n"); + goto bio_exit; + } + } + + if (!tegra_hv_ivc_write(vblkdev->ivck, vs_req, + sizeof(struct vs_request))) { + dev_err(vblkdev->device, + "Request Id %d IVC write failed!\n", + vsc_req->id); + goto bio_exit; + } + + return true; + +bio_exit: + if (vsc_req != NULL) { + vblk_put_req(vsc_req); + } + + if (bio_req != NULL) { + req_error_handler(vblkdev, bio_req); + return true; + } + + return false; +} + +static void vblk_request_work(struct work_struct *ws) +{ + struct vblk_dev *vblkdev = + container_of(ws, struct vblk_dev, work); + bool req_submitted, req_completed; + + /* Taking ivc lock before performing IVC read/write */ + mutex_lock(&vblkdev->ivc_lock); + if (tegra_hv_ivc_channel_notified(vblkdev->ivck) != 0) { + mutex_unlock(&vblkdev->ivc_lock); + return; + } + + req_submitted = true; + req_completed = true; + while (req_submitted || req_completed) { + req_completed = complete_bio_req(vblkdev); + + req_submitted = submit_bio_req(vblkdev); + } + mutex_unlock(&vblkdev->ivc_lock); +} + +/* The simple form of the request function. */ +static blk_status_t vblk_request(struct blk_mq_hw_ctx *hctx, + const struct blk_mq_queue_data *bd) +{ + struct req_entry *entry; + struct request *req = bd->rq; + struct vblk_dev *vblkdev = hctx->queue->queuedata; + + blk_mq_start_request(req); + + /* malloc for req list entry */ + entry = kmalloc(sizeof(struct req_entry), GFP_ATOMIC); + if (entry == NULL) { + dev_err(vblkdev->device, "Failed to allocate memory\n"); + return BLK_STS_IOERR; + } + + /* Initialise the entry */ + entry->req = req; + INIT_LIST_HEAD(&entry->list_entry); + + /* Insert the req to list */ + spin_lock(&vblkdev->queue_lock); + list_add_tail(&entry->list_entry, &vblkdev->req_list); + spin_unlock(&vblkdev->queue_lock); + + /* Now invoke the queue to handle data inserted in queue */ + queue_work_on(WORK_CPU_UNBOUND, vblkdev->wq, &vblkdev->work); + + return BLK_STS_OK; +} + +/* Open and release */ +static int vblk_open(struct block_device *device, fmode_t mode) +{ + struct vblk_dev *vblkdev = device->bd_disk->private_data; + + spin_lock(&vblkdev->lock); + if (!vblkdev->users) { + bdev_check_media_change(device); + } + vblkdev->users++; + + spin_unlock(&vblkdev->lock); + return 0; +} + +static void vblk_release(struct gendisk *disk, fmode_t mode) +{ + struct vblk_dev *vblkdev = disk->private_data; + + spin_lock(&vblkdev->lock); + + vblkdev->users--; + + spin_unlock(&vblkdev->lock); +} + +static int vblk_getgeo(struct block_device *device, struct hd_geometry *geo) +{ + geo->heads = VS_LOG_HEADS; + geo->sectors = VS_LOG_SECTS; + geo->cylinders = get_capacity(device->bd_disk) / + (geo->heads * geo->sectors); + + return 0; +} + +/* The device operations structure. */ +static const struct block_device_operations vblk_ops = { + .owner = THIS_MODULE, + .open = vblk_open, + .release = vblk_release, + .getgeo = vblk_getgeo, + .ioctl = vblk_ioctl +}; + +static ssize_t +vblk_phys_dev_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct gendisk *disk = dev_to_disk(dev); + struct vblk_dev *vblk = disk->private_data; + + if (vblk->config.phys_dev == VSC_DEV_EMMC) + return snprintf(buf, 16, "EMMC\n"); + else if (vblk->config.phys_dev == VSC_DEV_UFS) + return snprintf(buf, 16, "UFS\n"); + else + return snprintf(buf, 16, "Unknown\n"); +} + +static ssize_t +vblk_phys_base_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct gendisk *disk = dev_to_disk(dev); + struct vblk_dev *vblk = disk->private_data; + + return snprintf(buf, 16, "0x%x\n", vblk->config.phys_base); +} + +static ssize_t +vblk_storage_type_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct gendisk *disk = dev_to_disk(dev); + struct vblk_dev *vblk = disk->private_data; + + switch (vblk->config.storage_type) { + case VSC_STORAGE_RPMB: + return snprintf(buf, 16, "RPMB\n"); + case VSC_STORAGE_BOOT: + return snprintf(buf, 16, "BOOT\n"); + case VSC_STORAGE_LUN0: + return snprintf(buf, 16, "LUN0\n"); + case VSC_STORAGE_LUN1: + return snprintf(buf, 16, "LUN1\n"); + case VSC_STORAGE_LUN2: + return snprintf(buf, 16, "LUN2\n"); + case VSC_STORAGE_LUN3: + return snprintf(buf, 16, "LUN3\n"); + case VSC_STORAGE_LUN4: + return snprintf(buf, 16, "LUN4\n"); + case VSC_STORAGE_LUN5: + return snprintf(buf, 16, "LUN5\n"); + case VSC_STORAGE_LUN6: + return snprintf(buf, 16, "LUN6\n"); + case VSC_STORAGE_LUN7: + return snprintf(buf, 16, "LUN7\n"); + default: + break; + } + + return snprintf(buf, 16, "Unknown\n"); +} + +static ssize_t +vblk_speed_mode_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct gendisk *disk = dev_to_disk(dev); + struct vblk_dev *vblk = disk->private_data; + + return snprintf(buf, 32, "%s\n", vblk->config.speed_mode); +} + +static const struct device_attribute dev_attr_phys_dev_ro = + __ATTR(phys_dev, 0444, + vblk_phys_dev_show, NULL); + +static const struct device_attribute dev_attr_phys_base_ro = + __ATTR(phys_base, 0444, + vblk_phys_base_show, NULL); + +static const struct device_attribute dev_attr_storage_type_ro = + __ATTR(storage_type, 0444, + vblk_storage_type_show, NULL); + +static const struct device_attribute dev_attr_speed_mode_ro = + __ATTR(speed_mode, 0444, + vblk_speed_mode_show, NULL); + +static const struct blk_mq_ops vblk_mq_ops = { + .queue_rq = vblk_request, +}; + +/* Set up virtual device. */ +static void setup_device(struct vblk_dev *vblkdev) +{ + uint32_t max_io_bytes; + uint32_t req_id; + uint32_t max_requests; + struct vsc_request *req; + int ret; + + vblkdev->size = + vblkdev->config.blk_config.num_blks * + vblkdev->config.blk_config.hardblk_size; + + spin_lock_init(&vblkdev->lock); + spin_lock_init(&vblkdev->queue_lock); + mutex_init(&vblkdev->ioctl_lock); + mutex_init(&vblkdev->ivc_lock); + + memset(&vblkdev->tag_set, 0, sizeof(vblkdev->tag_set)); + vblkdev->tag_set.ops = &vblk_mq_ops; + vblkdev->tag_set.nr_hw_queues = 1; + vblkdev->tag_set.nr_maps = 1; + vblkdev->tag_set.queue_depth = 16; + vblkdev->tag_set.numa_node = NUMA_NO_NODE; + vblkdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; + + ret = blk_mq_alloc_tag_set(&vblkdev->tag_set); + if (ret) + return; + + vblkdev->queue = blk_mq_init_queue(&vblkdev->tag_set); + if (IS_ERR(vblkdev->queue)) { + dev_err(vblkdev->device, "failed to init blk queue\n"); + blk_mq_free_tag_set(&vblkdev->tag_set); + return; + } + + vblkdev->queue->queuedata = vblkdev; + + blk_queue_logical_block_size(vblkdev->queue, + vblkdev->config.blk_config.hardblk_size); + blk_queue_physical_block_size(vblkdev->queue, + vblkdev->config.blk_config.hardblk_size); + + if (vblkdev->config.blk_config.req_ops_supported & VS_BLK_FLUSH_OP_F) { + blk_queue_write_cache(vblkdev->queue, true, false); + } + + if (vblkdev->config.blk_config.max_read_blks_per_io != + vblkdev->config.blk_config.max_write_blks_per_io) { + dev_err(vblkdev->device, + "Different read/write blks not supported!\n"); + return; + } + + /* Set the maximum number of requests possible using + * server returned information */ + max_io_bytes = (vblkdev->config.blk_config.hardblk_size * + vblkdev->config.blk_config.max_read_blks_per_io); + if (max_io_bytes == 0) { + dev_err(vblkdev->device, "Maximum io bytes value is 0!\n"); + return; + } + + max_requests = ((vblkdev->ivmk->size) / max_io_bytes); + + if (max_requests < MAX_VSC_REQS) { + /* Warn if the virtual storage device supports + * normal read write operations */ + if (vblkdev->config.blk_config.req_ops_supported & + (VS_BLK_READ_OP_F | + VS_BLK_WRITE_OP_F)) { + dev_warn(vblkdev->device, + "Setting Max requests to %d, consider " + "increasing mempool size !\n", + max_requests); + } + } else if (max_requests > MAX_VSC_REQS) { + max_requests = MAX_VSC_REQS; + dev_warn(vblkdev->device, + "Reducing the max requests to %d, consider" + " supporting more requests for the vblkdev!\n", + MAX_VSC_REQS); + } + + /* if the number of ivc frames is lesser than th maximum requests that + * can be supported(calculated based on mempool size above), treat this + * as critical error and panic. + * + *if (num_of_ivc_frames < max_supported_requests) + * PANIC + * Ideally, these 2 should be equal for below reasons + * 1. Each ivc frame is a request should have a backing data memory + * for transfers. So, number of requests supported by message + * request memory should be <= number of frames in + * IVC queue. The read/write logic depends on this. + * 2. If number of requests supported by message request memory is + * more than IVC frame count, then thats a wastage of memory space + * and it introduces a race condition in submit_bio_req(). + * The race condition happens when there is only one empty slot in + * IVC write queue and 2 threads enter submit_bio_req(). Both will + * compete for IVC write(After calling ivc_can_write) and one of + * the write will fail. But with vblk_get_req() this race can be + * avoided if num_of_ivc_frames >= max_supported_requests + * holds true. + * + * In short, the optimal setting is when both of these are equal + */ + if (vblkdev->ivck->nframes < max_requests) { + /* Error if the virtual storage device supports + * read, write and ioctl operations + */ + if (vblkdev->config.blk_config.req_ops_supported & + (VS_BLK_READ_OP_F | + VS_BLK_WRITE_OP_F | + VS_BLK_IOCTL_OP_F)) { + panic("hv_vblk: IVC Channel:%u IVC frames %d less than possible max requests %d!\n", + vblkdev->ivc_id, vblkdev->ivck->nframes, + max_requests); + return; + } + } + + for (req_id = 0; req_id < max_requests; req_id++){ + req = &vblkdev->reqs[req_id]; + req->mempool_virt = (void *)((uintptr_t)vblkdev->shared_buffer + + (uintptr_t)(req_id * max_io_bytes)); + req->mempool_offset = (req_id * max_io_bytes); + req->mempool_len = max_io_bytes; + req->id = req_id; + req->vblkdev = vblkdev; + } + + if (max_requests == 0) { + dev_err(vblkdev->device, + "maximum requests set to 0!\n"); + return; + } + mutex_init(&vblkdev->req_lock); + + vblkdev->max_requests = max_requests; + blk_queue_max_hw_sectors(vblkdev->queue, max_io_bytes / SECTOR_SIZE); + blk_queue_flag_set(QUEUE_FLAG_NONROT, vblkdev->queue); + + if (vblkdev->config.blk_config.req_ops_supported + & VS_BLK_DISCARD_OP_F) { + blk_queue_flag_set(QUEUE_FLAG_DISCARD, vblkdev->queue); + blk_queue_max_discard_sectors(vblkdev->queue, + vblkdev->config.blk_config.max_erase_blks_per_io); + vblkdev->queue->limits.discard_granularity = + vblkdev->config.blk_config.hardblk_size; + if (vblkdev->config.blk_config.req_ops_supported & + VS_BLK_SECURE_ERASE_OP_F) + blk_queue_flag_set(QUEUE_FLAG_SECERASE, vblkdev->queue); + } + + /* And the gendisk structure. */ + vblkdev->gd = __alloc_disk_node(vblkdev->queue, NUMA_NO_NODE, NULL); + if (!vblkdev->gd) { + dev_err(vblkdev->device, "alloc_disk failure\n"); + return; + } + vblkdev->gd->major = vblk_major; + vblkdev->gd->first_minor = vblkdev->devnum * VBLK_MINORS; + vblkdev->gd->minors = VBLK_MINORS; + vblkdev->gd->fops = &vblk_ops; + vblkdev->gd->queue = vblkdev->queue; + vblkdev->gd->private_data = vblkdev; +#if KERNEL_VERSION(5, 16, 0) >= LINUX_VERSION_CODE + vblkdev->gd->flags |= GENHD_FL_EXT_DEVT; +#endif + + /* Don't allow scanning of the device when block + * requests are not supported */ + if (!(vblkdev->config.blk_config.req_ops_supported & + VS_BLK_READ_OP_F)) { +#if KERNEL_VERSION(5, 16, 0) >= LINUX_VERSION_CODE + vblkdev->gd->flags |= GENHD_FL_NO_PART_SCAN; +#endif + } + + /* Set disk read-only if config response say so */ + if (!(vblkdev->config.blk_config.req_ops_supported & + VS_BLK_READ_ONLY_MASK)) { + dev_info(vblkdev->device, "setting device read-only\n"); + set_disk_ro(vblkdev->gd, 1); + } + + if (vblkdev->config.storage_type == VSC_STORAGE_RPMB) { + if (snprintf(vblkdev->gd->disk_name, 32, "vblkrpmb%d", + vblkdev->devnum) < 0) { + dev_err(vblkdev->device, "Error while updating disk_name!\n"); + return; + } + } else { + if (snprintf(vblkdev->gd->disk_name, 32, "vblkdev%d", + vblkdev->devnum) < 0) { + dev_err(vblkdev->device, "Error while updating disk_name!\n"); + return; + } + } + + set_capacity(vblkdev->gd, (vblkdev->size / SECTOR_SIZE)); + device_add_disk(vblkdev->device, vblkdev->gd, NULL); + + if (device_create_file(disk_to_dev(vblkdev->gd), + &dev_attr_phys_dev_ro)) { + dev_warn(vblkdev->device, "Error adding phys dev file!\n"); + return; + } + + if (device_create_file(disk_to_dev(vblkdev->gd), + &dev_attr_phys_base_ro)) { + dev_warn(vblkdev->device, "Error adding phys base file!\n"); + return; + } + + if (device_create_file(disk_to_dev(vblkdev->gd), + &dev_attr_storage_type_ro)) { + dev_warn(vblkdev->device, "Error adding storage type file!\n"); + return; + } + + if (device_create_file(disk_to_dev(vblkdev->gd), + &dev_attr_speed_mode_ro)) { + dev_warn(vblkdev->device, "Error adding speed_mode file!\n"); + return; + } +} + +static void vblk_init_device(struct work_struct *ws) +{ + struct vblk_dev *vblkdev = container_of(ws, struct vblk_dev, init); + + /* wait for ivc channel reset to finish */ + if (tegra_hv_ivc_channel_notified(vblkdev->ivck) != 0) + return; /* this will be rescheduled by irq handler */ + + if (tegra_hv_ivc_can_read(vblkdev->ivck) && !vblkdev->initialized) { + if (vblk_get_configinfo(vblkdev)) + return; + + vblkdev->initialized = true; + setup_device(vblkdev); + } +} + +static irqreturn_t ivc_irq_handler(int irq, void *data) +{ + struct vblk_dev *vblkdev = (struct vblk_dev *)data; + + if (vblkdev->initialized) + queue_work_on(WORK_CPU_UNBOUND, vblkdev->wq, &vblkdev->work); + else + schedule_work(&vblkdev->init); + + return IRQ_HANDLED; +} + +static int tegra_hv_vblk_probe(struct platform_device *pdev) +{ + static struct device_node *vblk_node; + struct vblk_dev *vblkdev; + struct device *dev = &pdev->dev; + int ret; + struct tegra_hv_ivm_cookie *ivmk; + + if (!is_tegra_hypervisor_mode()) { + dev_err(dev, "Hypervisor is not present\n"); + return -ENODEV; + } + + if (vblk_major == 0) { + dev_err(dev, "major number is invalid\n"); + return -ENODEV; + } + + vblk_node = dev->of_node; + if (vblk_node == NULL) { + dev_err(dev, "No of_node data\n"); + return -ENODEV; + } + + dev_info(dev, "allocate drvdata buffer\n"); + vblkdev = devm_kzalloc(dev, sizeof(struct vblk_dev), GFP_KERNEL); + if (vblkdev == NULL) + return -ENOMEM; + + platform_set_drvdata(pdev, vblkdev); + vblkdev->device = dev; + + /* Get properties of instance and ivc channel id */ + if (of_property_read_u32(vblk_node, "instance", &(vblkdev->devnum))) { + dev_err(dev, "Failed to read instance property\n"); + ret = -ENODEV; + goto fail; + } else { + if (of_property_read_u32_index(vblk_node, "ivc", 1, + &(vblkdev->ivc_id))) { + dev_err(dev, "Failed to read ivc property\n"); + ret = -ENODEV; + goto fail; + } + if (of_property_read_u32_index(vblk_node, "mempool", 0, + &(vblkdev->ivm_id))) { + dev_err(dev, "Failed to read mempool property\n"); + ret = -ENODEV; + goto fail; + } + } + + vblkdev->ivck = tegra_hv_ivc_reserve(NULL, vblkdev->ivc_id, NULL); + if (IS_ERR_OR_NULL(vblkdev->ivck)) { + dev_err(dev, "Failed to reserve IVC channel %d\n", + vblkdev->ivc_id); + vblkdev->ivck = NULL; + ret = -ENODEV; + goto fail; + } + + ivmk = tegra_hv_mempool_reserve(vblkdev->ivm_id); + if (IS_ERR_OR_NULL(ivmk)) { + dev_err(dev, "Failed to reserve IVM channel %d\n", + vblkdev->ivm_id); + ivmk = NULL; + ret = -ENODEV; + goto free_ivc; + } + vblkdev->ivmk = ivmk; + + vblkdev->shared_buffer = devm_memremap(vblkdev->device, + ivmk->ipa, ivmk->size, MEMREMAP_WB); + if (IS_ERR_OR_NULL(vblkdev->shared_buffer)) { + dev_err(dev, "Failed to map mempool area %d\n", + vblkdev->ivm_id); + ret = -ENOMEM; + goto free_mempool; + } + + vblkdev->initialized = false; + + vblkdev->wq = alloc_workqueue("vblk_req_wq%d", + WQ_UNBOUND | WQ_MEM_RECLAIM, + 1, vblkdev->devnum); + if (vblkdev->wq == NULL) { + dev_err(dev, "Failed to allocate workqueue\n"); + ret = -ENOMEM; + goto free_mempool; + } + + init_completion(&vblkdev->req_queue_empty); + vblkdev->queue_state = VBLK_QUEUE_ACTIVE; + + INIT_WORK(&vblkdev->init, vblk_init_device); + INIT_WORK(&vblkdev->work, vblk_request_work); + /* creating and initializing the an internal request list */ + INIT_LIST_HEAD(&vblkdev->req_list); + + if (devm_request_irq(vblkdev->device, vblkdev->ivck->irq, + ivc_irq_handler, 0, "vblk", vblkdev)) { + dev_err(dev, "Failed to request irq %d\n", vblkdev->ivck->irq); + ret = -EINVAL; + goto free_wq; + } + + tegra_hv_ivc_channel_reset(vblkdev->ivck); + if (vblk_send_config_cmd(vblkdev)) { + dev_err(dev, "Failed to send config cmd\n"); + ret = -EACCES; + goto free_wq; + } + + return 0; + +free_wq: + destroy_workqueue(vblkdev->wq); + +free_mempool: + tegra_hv_mempool_unreserve(vblkdev->ivmk); + +free_ivc: + tegra_hv_ivc_unreserve(vblkdev->ivck); + +fail: + return ret; +} + +static int tegra_hv_vblk_remove(struct platform_device *pdev) +{ + struct vblk_dev *vblkdev = platform_get_drvdata(pdev); + + if (vblkdev->gd) { + del_gendisk(vblkdev->gd); + put_disk(vblkdev->gd); + } + + if (vblkdev->queue) + blk_cleanup_queue(vblkdev->queue); + + destroy_workqueue(vblkdev->wq); + tegra_hv_ivc_unreserve(vblkdev->ivck); + tegra_hv_mempool_unreserve(vblkdev->ivmk); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int tegra_hv_vblk_suspend(struct device *dev) +{ + struct vblk_dev *vblkdev = dev_get_drvdata(dev); + unsigned long flags; + + if (vblkdev->queue) { + spin_lock_irqsave(&vblkdev->queue->queue_lock, flags); + blk_mq_stop_hw_queues(vblkdev->queue); + spin_unlock_irqrestore(&vblkdev->queue->queue_lock, flags); + + mutex_lock(&vblkdev->req_lock); + vblkdev->queue_state = VBLK_QUEUE_SUSPENDED; + + /* Mark the queue as empty if inflight requests are 0 */ + if (vblkdev->inflight_reqs == 0) + complete(&vblkdev->req_queue_empty); + mutex_unlock(&vblkdev->req_lock); + + wait_for_completion(&vblkdev->req_queue_empty); + disable_irq(vblkdev->ivck->irq); + + flush_workqueue(vblkdev->wq); + + /* Reset the channel */ + mutex_lock(&vblkdev->ivc_lock); + tegra_hv_ivc_channel_reset(vblkdev->ivck); + mutex_unlock(&vblkdev->ivc_lock); + } + + return 0; +} + +static int tegra_hv_vblk_resume(struct device *dev) +{ + struct vblk_dev *vblkdev = dev_get_drvdata(dev); + unsigned long flags; + + if (vblkdev->queue) { + mutex_lock(&vblkdev->req_lock); + vblkdev->queue_state = VBLK_QUEUE_ACTIVE; + reinit_completion(&vblkdev->req_queue_empty); + mutex_unlock(&vblkdev->req_lock); + + enable_irq(vblkdev->ivck->irq); + + spin_lock_irqsave(&vblkdev->queue->queue_lock, flags); + blk_mq_start_hw_queues(vblkdev->queue); + spin_unlock_irqrestore(&vblkdev->queue->queue_lock, flags); + + queue_work_on(WORK_CPU_UNBOUND, vblkdev->wq, &vblkdev->work); + } + + return 0; +} + +static const struct dev_pm_ops tegra_hv_vblk_pm_ops = { + .suspend = tegra_hv_vblk_suspend, + .resume = tegra_hv_vblk_resume, +}; +#endif /* CONFIG_PM_SLEEP */ + +#ifdef CONFIG_OF +static struct of_device_id tegra_hv_vblk_match[] = { + { .compatible = "nvidia,tegra-hv-storage", }, + {}, +}; +MODULE_DEVICE_TABLE(of, tegra_hv_vblk_match); +#endif /* CONFIG_OF */ + +static struct platform_driver tegra_hv_vblk_driver = { + .probe = tegra_hv_vblk_probe, + .remove = tegra_hv_vblk_remove, + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(tegra_hv_vblk_match), +#ifdef CONFIG_PM_SLEEP + .pm = &tegra_hv_vblk_pm_ops, +#endif + }, +}; + +static int __init tegra_hv_vblk_driver_init(void) +{ + vblk_major = 0; + vblk_major = register_blkdev(vblk_major, "vblk"); + if (vblk_major <= 0) { + pr_err("vblk: unable to get major number\n"); + return -ENODEV; + } + + return platform_driver_register(&tegra_hv_vblk_driver); +} +module_init(tegra_hv_vblk_driver_init); + +static void __exit tegra_hv_vblk_driver_exit(void) +{ + unregister_blkdev(vblk_major, "vblk"); + platform_driver_unregister(&tegra_hv_vblk_driver); +} +module_exit(tegra_hv_vblk_driver_exit); + +MODULE_AUTHOR("Dilan Lee "); +MODULE_DESCRIPTION("Virtual storage device over Tegra Hypervisor IVC channel"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/block/tegra_virt_storage/tegra_vblk.h b/drivers/block/tegra_virt_storage/tegra_vblk.h new file mode 100644 index 00000000..ce983d33 --- /dev/null +++ b/drivers/block/tegra_virt_storage/tegra_vblk.h @@ -0,0 +1,140 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + */ + +#ifndef _TEGRA_VBLK_H_ +#define _TEGRA_VBLK_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRV_NAME "tegra_hv_vblk" + +/* Minor number and partition management. */ +#define VBLK_MINORS 16 + +#define IVC_RESET_RETRIES 30 + +#define VS_LOG_HEADS 4 +#define VS_LOG_SECTS 16 + +#define MAX_VSC_REQS 32 + +struct vblk_ioctl_req { + uint32_t ioctl_id; + void *ioctl_buf; + uint32_t ioctl_len; + int32_t status; +}; + +struct req_entry { + struct list_head list_entry; + struct request *req; +}; + +struct vsc_request { + struct vs_request vs_req; + struct request *req; + struct req_iterator iter; + struct vblk_ioctl_req *ioctl_req; + void *mempool_virt; + uint32_t mempool_offset; + uint32_t mempool_len; + uint32_t id; + struct vblk_dev* vblkdev; + /* Scatter list for maping IOVA address */ + struct scatterlist *sg_lst; + int sg_num_ents; +}; + +enum vblk_queue_state { + VBLK_UNKNOWN, + VBLK_QUEUE_SUSPENDED, + VBLK_QUEUE_ACTIVE, +}; + +/* +* The drvdata of virtual device. +*/ +struct vblk_dev { + struct vs_config_info config; + uint64_t size; /* Device size in bytes */ + short users; /* How many users */ + short media_change; /* Flag a media change? */ + spinlock_t lock; /* For mutual exclusion */ + struct request_queue *queue; /* The device request queue */ + struct gendisk *gd; /* The gendisk structure */ + struct blk_mq_tag_set tag_set; + struct list_head req_list; /* List containing req */ + uint32_t ivc_id; + uint32_t ivm_id; + struct tegra_hv_ivc_cookie *ivck; + struct tegra_hv_ivm_cookie *ivmk; + uint32_t devnum; + bool initialized; + struct work_struct init; + struct work_struct work; + struct workqueue_struct *wq; + struct device *device; + void *shared_buffer; + struct mutex ioctl_lock; + spinlock_t queue_lock; + struct vsc_request reqs[MAX_VSC_REQS]; + DECLARE_BITMAP(pending_reqs, MAX_VSC_REQS); + uint32_t inflight_reqs; + uint32_t max_requests; + struct mutex req_lock; + struct mutex ivc_lock; + enum vblk_queue_state queue_state; + struct completion req_queue_empty; +}; + +int vblk_complete_ioctl_req(struct vblk_dev *vblkdev, + struct vsc_request *vsc_req, int32_t status); + +int vblk_prep_ioctl_req(struct vblk_dev *vblkdev, + struct vblk_ioctl_req *ioctl_req, + struct vsc_request *vsc_req); + +int vblk_prep_sg_io(struct vblk_dev *vblkdev, + struct vblk_ioctl_req *ioctl_req, + void __user *user); + +int vblk_complete_sg_io(struct vblk_dev *vblkdev, + struct vblk_ioctl_req *ioctl_req, + void __user *user); + +int vblk_prep_mmc_multi_ioc(struct vblk_dev *vblkdev, + struct vblk_ioctl_req *ioctl_req, + void __user *user, + uint32_t cmd); + +int vblk_complete_mmc_multi_ioc(struct vblk_dev *vblkdev, + struct vblk_ioctl_req *ioctl_req, + void __user *user, + uint32_t cmd); + +int vblk_prep_ufs_combo_ioc(struct vblk_dev *vblkdev, + struct vblk_ioctl_req *ioctl_req, + void __user *user, uint32_t cmd); + +int vblk_complete_ufs_combo_ioc(struct vblk_dev *vblkdev, + struct vblk_ioctl_req *ioctl_req, + void __user *user, + uint32_t cmd); + +int vblk_submit_ioctl_req(struct block_device *bdev, + unsigned int cmd, void __user *user); + +int vblk_ioctl(struct block_device *bdev, fmode_t mode, + unsigned int cmd, unsigned long arg); +#endif diff --git a/include/tegra_virt_storage_spec.h b/include/tegra_virt_storage_spec.h new file mode 100644 index 00000000..b34eb16b --- /dev/null +++ b/include/tegra_virt_storage_spec.h @@ -0,0 +1,347 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + */ + +#ifndef _TEGRA_VIRT_STORAGE_SPEC_H_ +#define _TEGRA_VIRT_STORAGE_SPEC_H_ + +#include /* size_t */ + +#define VS_REQ_OP_F_NONE 0 + +enum vs_req_type { + VS_DATA_REQ = 1, + VS_CONFIGINFO_REQ = 2, + VS_UNKNOWN_CMD = 0xffffffff, +}; + +enum vs_dev_type { + VS_BLK_DEV = 1, + VS_MTD_DEV = 2, + VS_UNKNOWN_DEV = 0xffffffff, +}; + +enum mtd_cmd_op { + VS_MTD_READ = 1, + VS_MTD_WRITE = 2, + VS_MTD_ERASE = 3, + VS_MTD_IOCTL = 4, + VS_MTD_INVAL_REQ = 32, + VS_UNKNOWN_MTD_CMD = 0xffffffff, +}; + +/* MTD device request Operation type features supported */ +#define VS_MTD_READ_OP_F (1 << VS_MTD_READ) +#define VS_MTD_WRITE_OP_F (1 << VS_MTD_WRITE) +#define VS_MTD_ERASE_OP_F (1 << VS_MTD_ERASE) +#define VS_MTD_IOCTL_OP_F (1 << VS_MTD_IOCTL) +#define VS_MTD_READ_ONLY_MASK ~(VS_MTD_READ_OP_F) + +enum blk_cmd_op { + VS_BLK_READ = 1, + VS_BLK_WRITE = 2, + VS_BLK_FLUSH = 3, + VS_BLK_DISCARD = 4, + VS_BLK_SECURE_ERASE = 5, + VS_BLK_IOCTL = 6, + VS_BLK_INVAL_REQ = 32, + VS_UNKNOWN_BLK_CMD = 0xffffffff, +}; + +/* Blk device request Operation type features supported */ +#define VS_BLK_READ_OP_F (1 << VS_BLK_READ) +#define VS_BLK_WRITE_OP_F (1 << VS_BLK_WRITE) +#define VS_BLK_FLUSH_OP_F (1 << VS_BLK_FLUSH) +#define VS_BLK_DISCARD_OP_F (1 << VS_BLK_DISCARD) +#define VS_BLK_SECURE_ERASE_OP_F (1 << VS_BLK_SECURE_ERASE) +#define VS_BLK_IOCTL_OP_F (1 << VS_BLK_IOCTL) +#define VS_BLK_READ_ONLY_MASK ~(VS_BLK_READ_OP_F) + +#pragma pack(push) +#pragma pack(1) + +struct vs_blk_request { + uint64_t blk_offset; /* Offset into storage device in terms + of blocks for block device */ + uint32_t num_blks; /* Total Block number to transfer */ + uint32_t data_offset; /* Offset into mempool for data region + */ + /* IOVA address of the buffer. In case of read request, VSC will get + * the response to this address. In case of write request, VSC will + * get the data from this address. + */ + uint64_t iova_addr; +}; + +struct vs_mtd_request { + uint64_t offset; /* Offset into storage device in terms + of bytes in case of mtd device */ + uint32_t size; /* Total number of bytes to transfer + to be used for MTD device */ + uint32_t data_offset; /* Offset into mempool for data region + */ +}; + +struct vs_ioctl_request { + uint32_t ioctl_id; /* Id of the ioctl */ + uint32_t ioctl_len; /* Length of the mempool area associated + with ioctl */ + uint32_t data_offset; /* Offset into mempool for data region + */ +}; + +struct vs_blkdev_request { + enum blk_cmd_op req_op; + union { + struct vs_blk_request blk_req; + struct vs_ioctl_request ioctl_req; + }; +}; + +struct vs_mtddev_request { + enum mtd_cmd_op req_op; + union { + struct vs_mtd_request mtd_req; + struct vs_ioctl_request ioctl_req; + }; +}; + +struct vs_blk_response { + int32_t status; /* 0 for success, < 0 for error */ + uint32_t num_blks; +}; + +struct vs_mtd_response { + int32_t status; /* 0 for success, < 0 for error */ + uint32_t size; /* Number of bytes processed in case of + of mtd device*/ +}; + +struct vs_ioctl_response { + int32_t status; /* 0 for success, < 0 for error */ +}; + +struct vs_blkdev_response { + union { + struct vs_blk_response blk_resp; + struct vs_ioctl_response ioctl_resp; + }; +}; + +struct vs_mtddev_response { + union { + struct vs_mtd_response mtd_resp; + struct vs_ioctl_response ioctl_resp; + }; +}; + +struct vs_blk_dev_config { + uint32_t hardblk_size; /* Block Size */ + uint32_t max_read_blks_per_io; /* Limit number of Blocks + per I/O*/ + uint32_t max_write_blks_per_io; /* Limit number of Blocks + per I/O*/ + uint32_t max_erase_blks_per_io; /* Limit number of Blocks per I/O */ + uint32_t req_ops_supported; /* Allowed operations by requests */ + uint64_t num_blks; /* Total number of blks */ + + /* + * If true, then VM need to provide local IOVA address for read and + * write requests. For IOCTL requests, mempool will be used + * irrespective of this flag. + */ + uint32_t use_vm_address; +}; + +struct vs_mtd_dev_config { + uint32_t max_read_bytes_per_io; /* Limit number of bytes + per I/O */ + uint32_t max_write_bytes_per_io; /* Limit number of bytes + per I/O */ + uint32_t erase_size; /* Erase size for mtd + device*/ + uint32_t req_ops_supported; /* Allowed operations by requests */ + uint64_t size; /* Total number of bytes */ +}; + +/* Physical device types */ +#define VSC_DEV_EMMC 1U +#define VSC_DEV_UFS 2U +#define VSC_DEV_QSPI 3U + +/* Storage Types */ +#define VSC_STORAGE_RPMB 1U +#define VSC_STORAGE_BOOT 2U +#define VSC_STORAGE_LUN0 3U +#define VSC_STORAGE_LUN1 4U +#define VSC_STORAGE_LUN2 5U +#define VSC_STORAGE_LUN3 6U +#define VSC_STORAGE_LUN4 7U +#define VSC_STORAGE_LUN5 8U +#define VSC_STORAGE_LUN6 9U +#define VSC_STORAGE_LUN7 10U + +#define SPEED_MODE_MAX_LEN 32 + +struct vs_config_info { + uint32_t virtual_storage_ver; /* Version of virtual storage */ + enum vs_dev_type type; /* Type of underlying device */ + union { + struct vs_blk_dev_config blk_config; + struct vs_mtd_dev_config mtd_config; + }; + uint32_t phys_dev; + uint32_t phys_base; + uint32_t storage_type; + uint8_t speed_mode[SPEED_MODE_MAX_LEN]; +}; + +struct vs_request { + uint32_t req_id; + enum vs_req_type type; + union { + struct vs_blkdev_request blkdev_req; + struct vs_mtddev_request mtddev_req; + }; + int32_t status; + union { + struct vs_blkdev_response blkdev_resp; + struct vs_mtddev_response mtddev_resp; + struct vs_config_info config_info; + }; +}; + +/** + * @addtogroup MMC_RESP MMC Responses + * + * @brief Defines Command Responses of EMMC + */ +typedef enum { + /** @brief No Response */ + RESP_TYPE_NO_RESP = 0U, + /** @brief Response Type 1 */ + RESP_TYPE_R1 = 1U, + /** @brief Response Type 2 */ + RESP_TYPE_R2 = 2U, + /** @brief Response Type 3 */ + RESP_TYPE_R3 = 3U, + /** @brief Response Type 4 */ + RESP_TYPE_R4 = 4U, + /** @brief Response Type 5 */ + RESP_TYPE_R5 = 5U, + /** @brief Response Type 6 */ + RESP_TYPE_R6 = 6U, + /** @brief Response Type 7 */ + RESP_TYPE_R7 = 7U, + /** @brief Response Type 1B */ + RESP_TYPE_R1B = 8U, + /** @brief Number of Response Type */ + RESP_TYPE_NUM = 9U + /* @} */ +} sdmmc_resp_type; + +#define VBLK_MMC_MULTI_IOC_ID 0x1000 +struct combo_cmd_t { + uint32_t cmd; + uint32_t arg; + uint32_t write_flag; + uint32_t response[4]; + uint32_t buf_offset; + uint32_t data_len; + sdmmc_resp_type flags; +}; + +struct combo_info_t { + uint32_t count; + int32_t result; +}; + +/* SCSI bio layer needs to handle SCSI and UFS IOCTL separately + * This flag will be ORed with IO_IOCTL to find out difference + * between SCSI and UFS IOCTL + */ +#define SCSI_IOCTL_FLAG 0x10000000 +#define UFS_IOCTL_FLAG 0x20000000 +/* Mask for SCSI and UFS ioctl flags, 4 MSB (bits) reserved for it Two LSB + * bits are used for SCSI and UFS, 2 MSB bits reserved for future use. + */ +#define SCSI_UFS_IOCTL_FLAG_MASK 0xF0000000 + +#define VBLK_SG_IO_ID (0x1001 | SCSI_IOCTL_FLAG) +#define VBLK_UFS_IO_ID (0x1002 | UFS_IOCTL_FLAG) +#define VBLK_UFS_COMBO_IO_ID (0x1003 | UFS_IOCTL_FLAG) + +#define VBLK_SG_MAX_CMD_LEN 16 + +enum scsi_data_direction { + SCSI_BIDIRECTIONAL = 0, + SCSI_TO_DEVICE = 1, + SCSI_FROM_DEVICE = 2, + SCSI_DATA_NONE = 3, + UNKNOWN_DIRECTION = 0xffffffff, +}; + +struct vblk_sg_io_hdr +{ + int32_t data_direction; /* [i] data transfer direction */ + uint8_t cmd_len; /* [i] SCSI command length */ + uint8_t mx_sb_len; /* [i] max length to write to sbp */ + uint32_t dxfer_len; /* [i] byte count of data transfer */ + uint32_t xfer_arg_offset; /* [i], [*io] offset to data transfer memory */ + uint32_t cmdp_arg_offset; /* [i], [*i] offset to command to perform */ + uint32_t sbp_arg_offset; /* [i], [*o] offset to sense_buffer memory */ + uint32_t status; /* [o] scsi status */ + uint8_t sb_len_wr; /* [o] byte count actually written to sbp */ + uint32_t dxfer_buf_len; /* [i] Length of data transfer buffer */ +}; + +struct vblk_ufs_ioc_query_req { + /* Query opcode to specify the type of Query operation */ + uint8_t opcode; + /* idn to provide more info on specific operation. */ + uint8_t idn; + /* index - optional in some cases */ + uint8_t index; + /* index - optional in some cases */ + uint8_t selector; + /* buf_size - buffer size in bytes pointed by buffer. + * Note: + * For Read/Write Attribute this should be of 4 bytes + * For Read Flag this should be of 1 byte + * For Descriptor Read/Write size depends on the type of the descriptor + */ + uint16_t buf_size; + /* + * User buffer offset for query data. The offset should be within the + * bounds of the mempool memory region. + */ + uint32_t buffer_offset; + /* Delay after each query command completion in micro seconds. */ + uint32_t delay; + /* error status for the query operation */ + int32_t error_status; + +}; + +/** @brief Meta data of UFS Native ioctl Combo Command */ +typedef struct vblk_ufs_combo_info { + /** Count of commands in combo command */ + uint32_t count; + /** Status of combo command */ + int32_t result; + /** Flag to specify whether to empty the command queue before + * processing the combo request. + * If user wants to ensure that there are no requests in the UFS device + * command queue before executing a query command, this flag has to be + * set to 1. + * For Example, in case of refresh for Samsung UFS Device, the + * command queue should be emptied before setting the attribute for + * refresh. + */ + uint8_t need_cq_empty; +}vblk_ufs_combo_info_t; + +#pragma pack(pop) + +#endif diff --git a/include/uapi/scsi/ufs/ioctl.h b/include/uapi/scsi/ufs/ioctl.h new file mode 100644 index 00000000..25bb68c6 --- /dev/null +++ b/include/uapi/scsi/ufs/ioctl.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + */ + +#ifndef UAPI_SCSI_UFS_UFS_IOCTL_H_ +#define UAPI_SCSI_UFS_UFS_IOCTL_H_ + +#include + +/* + * IOCTL opcode for ufs combo queries has the following opcode after + * SCSI_IOCTL_GET_PCI + */ +#define UFS_IOCTL_COMBO_QUERY 0x5388 +/* + * IOCTL opcode to set UFS power mode + */ +#define UFS_IOCTL_SET_POWER_MODE 0x5389 + +/* + * Maximum number of Query requests per Combo Query Request + */ +#define MAX_QUERY_CMD_PER_COMBO 10 + +/** + * struct ufs_ioc_query_cmd - used to transfer ufs query command/data to and + * from user via ioctl + */ +struct ufs_ioc_query_req { + /* Query opcode to specify the type of Query operation */ + __u8 opcode; + /* idn to provide more info on specific operation. */ + __u8 idn; + /* index - optional in some cases */ + __u8 index; + /* index - optional in some cases */ + __u8 selector; + /* buf_size - buffer size in bytes pointed by buffer. */ + __u16 buf_size; + /* + * user buffer pointer for query data. + * Note: + * For Read/Write Attribute this should be of 4 bytes + * For Read Flag this should be of 1 byte + * For Descriptor Read/Write size depends on the type of the descriptor + */ + __u8 *buffer; + /* delay after query command completion */ + __u32 delay; + /* error status for the query operation */ + __s32 error_status; +}; + +struct ufs_ioc_combo_query_req { + /* Number of Query Commands in this Combo */ + __u8 num_cmds; + /* Flag to Specify if Command Queue need to be empty or not */ + __u8 need_cq_empty; + /* Flag to Specify if return or continue with all requests on error */ + __u8 return_on_error; + /* pointer to the first query command request */ + struct ufs_ioc_query_req *query; +}; + +#endif /* UAPI_SCSI_UFS_UFS_IOCTL_H_ */ \ No newline at end of file