mirror of
git://nv-tegra.nvidia.com/linux-nv-oot.git
synced 2025-12-22 09:11:26 +03:00
nvidia-oot: port tegra vblk driver
Using this patch we are adding support for tegra vblk driver in oot kernel. JIRA ESLC-6885 Signed-off-by: Manish Bhardwaj <mbhardwaj@nvidia.com> Change-Id: I914bb3337019412593e05e75fa0569dd1c7398d1 Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/2781122 Tested-by: mobile promotions <svcmobile_promotions@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
6cf5029afb
commit
95d8293e6c
@@ -3,6 +3,7 @@
|
||||
|
||||
LINUXINCLUDE += -I$(srctree.nvidia-oot)/include
|
||||
|
||||
obj-m += block/tegra_virt_storage/
|
||||
obj-m += crypto/
|
||||
obj-m += devfreq/
|
||||
obj-m += dma/
|
||||
|
||||
13
drivers/block/tegra_virt_storage/Makefile
Normal file
13
drivers/block/tegra_virt_storage/Makefile
Normal file
@@ -0,0 +1,13 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
|
||||
#
|
||||
# Makefile for Virtual Storage Driver
|
||||
#
|
||||
|
||||
tegra_vblk-y += tegra_hv_vblk.o
|
||||
tegra_vblk-y += tegra_hv_ioctl.o
|
||||
tegra_vblk-y += tegra_hv_mmc.o
|
||||
tegra_vblk-y += tegra_hv_scsi.o
|
||||
tegra_vblk-y += tegra_hv_ufs.o
|
||||
obj-m += tegra_vblk.o
|
||||
198
drivers/block/tegra_virt_storage/tegra_hv_ioctl.c
Normal file
198
drivers/block/tegra_virt_storage/tegra_hv_ioctl.c
Normal file
@@ -0,0 +1,198 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/version.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h> /* printk() */
|
||||
#include <linux/fs.h> /* everything... */
|
||||
#include <linux/errno.h> /* error codes */
|
||||
#include <asm-generic/bug.h>
|
||||
#include <linux/slab.h> /* kmalloc() */
|
||||
#include <scsi/scsi.h>
|
||||
#include <uapi/scsi/ufs/ioctl.h>
|
||||
#include <scsi/sg.h>
|
||||
#include <linux/mmc/ioctl.h>
|
||||
#include "tegra_vblk.h"
|
||||
|
||||
int vblk_complete_ioctl_req(struct vblk_dev *vblkdev,
|
||||
struct vsc_request *vsc_req, int status)
|
||||
{
|
||||
struct vblk_ioctl_req *ioctl_req = vsc_req->ioctl_req;
|
||||
int32_t ret = 0;
|
||||
|
||||
if (ioctl_req == NULL) {
|
||||
dev_err(vblkdev->device,
|
||||
"Invalid ioctl request for completion!\n");
|
||||
ret = -EINVAL;
|
||||
goto comp_exit;
|
||||
}
|
||||
|
||||
ioctl_req->status = status;
|
||||
memcpy(ioctl_req->ioctl_buf, vsc_req->mempool_virt,
|
||||
ioctl_req->ioctl_len);
|
||||
comp_exit:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int vblk_prep_ioctl_req(struct vblk_dev *vblkdev,
|
||||
struct vblk_ioctl_req *ioctl_req,
|
||||
struct vsc_request *vsc_req)
|
||||
{
|
||||
int32_t ret = 0;
|
||||
struct vs_request *vs_req;
|
||||
|
||||
if (ioctl_req == NULL) {
|
||||
dev_err(vblkdev->device,
|
||||
"Invalid ioctl request for preparation!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
||||
if (ioctl_req->ioctl_len > vsc_req->mempool_len) {
|
||||
dev_err(vblkdev->device,
|
||||
"Ioctl length exceeding mempool length!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (ioctl_req->ioctl_buf == NULL) {
|
||||
dev_err(vblkdev->device,
|
||||
"Ioctl buffer invalid!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
vs_req = &vsc_req->vs_req;
|
||||
vs_req->blkdev_req.req_op = VS_BLK_IOCTL;
|
||||
memcpy(vsc_req->mempool_virt, ioctl_req->ioctl_buf,
|
||||
ioctl_req->ioctl_len);
|
||||
vs_req->blkdev_req.ioctl_req.ioctl_id = ioctl_req->ioctl_id;
|
||||
vs_req->blkdev_req.ioctl_req.data_offset = vsc_req->mempool_offset;
|
||||
vs_req->blkdev_req.ioctl_req.ioctl_len = ioctl_req->ioctl_len;
|
||||
|
||||
vsc_req->ioctl_req = ioctl_req;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int vblk_submit_ioctl_req(struct block_device *bdev,
|
||||
unsigned int cmd, void __user *user)
|
||||
{
|
||||
struct vblk_dev *vblkdev = bdev->bd_disk->private_data;
|
||||
struct vblk_ioctl_req *ioctl_req = NULL;
|
||||
struct request *rq;
|
||||
int err;
|
||||
|
||||
/*
|
||||
* The caller must have CAP_SYS_RAWIO, and must be calling this on the
|
||||
* whole block device, not on a partition. This prevents overspray
|
||||
* between sibling partitions.
|
||||
*/
|
||||
if (!capable(CAP_SYS_RAWIO))
|
||||
return -EPERM;
|
||||
|
||||
ioctl_req = kmalloc(sizeof(struct vblk_ioctl_req), GFP_KERNEL);
|
||||
if (!ioctl_req) {
|
||||
dev_err(vblkdev->device,
|
||||
"failed to alloc memory for ioctl req!\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
switch (cmd) {
|
||||
case SG_IO:
|
||||
err = vblk_prep_sg_io(vblkdev, ioctl_req,
|
||||
user);
|
||||
break;
|
||||
case MMC_IOC_MULTI_CMD:
|
||||
case MMC_IOC_CMD:
|
||||
err = vblk_prep_mmc_multi_ioc(vblkdev, ioctl_req,
|
||||
user, cmd);
|
||||
break;
|
||||
case UFS_IOCTL_COMBO_QUERY:
|
||||
err = vblk_prep_ufs_combo_ioc(vblkdev, ioctl_req,
|
||||
user, cmd);
|
||||
break;
|
||||
default:
|
||||
dev_err(vblkdev->device, "unsupported command %x!\n", cmd);
|
||||
err = -EINVAL;
|
||||
goto free_ioctl_req;
|
||||
}
|
||||
|
||||
if (err)
|
||||
goto free_ioctl_req;
|
||||
|
||||
#if KERNEL_VERSION(5, 16, 0) >= LINUX_VERSION_CODE
|
||||
rq = blk_get_request(vblkdev->queue, REQ_OP_DRV_IN, BLK_MQ_REQ_NOWAIT);
|
||||
#else
|
||||
rq = blk_mq_alloc_request(vblkdev->queue, REQ_OP_DRV_IN, BLK_MQ_REQ_NOWAIT);
|
||||
#endif
|
||||
if (IS_ERR_OR_NULL(rq)) {
|
||||
dev_err(vblkdev->device,
|
||||
"Failed to get handle to a request!\n");
|
||||
err = PTR_ERR(rq);
|
||||
goto free_ioctl_req;
|
||||
}
|
||||
|
||||
rq->completion_data = (void *)ioctl_req;
|
||||
|
||||
#if KERNEL_VERSION(5, 16, 0) >= LINUX_VERSION_CODE
|
||||
blk_execute_rq(vblkdev->gd, rq, 0);
|
||||
blk_put_request(rq);
|
||||
#else
|
||||
blk_execute_rq(rq, 0);
|
||||
blk_mq_free_request(rq);
|
||||
#endif
|
||||
|
||||
switch (cmd) {
|
||||
case SG_IO:
|
||||
err = vblk_complete_sg_io(vblkdev, ioctl_req,
|
||||
user);
|
||||
break;
|
||||
case MMC_IOC_MULTI_CMD:
|
||||
case MMC_IOC_CMD:
|
||||
err = vblk_complete_mmc_multi_ioc(vblkdev, ioctl_req,
|
||||
user, cmd);
|
||||
break;
|
||||
case UFS_IOCTL_COMBO_QUERY:
|
||||
err = vblk_complete_ufs_combo_ioc(vblkdev, ioctl_req,
|
||||
user, cmd);
|
||||
break;
|
||||
default:
|
||||
dev_err(vblkdev->device, "unsupported command %x!\n", cmd);
|
||||
err = -EINVAL;
|
||||
goto free_ioctl_req;
|
||||
}
|
||||
|
||||
free_ioctl_req:
|
||||
if (ioctl_req)
|
||||
kfree(ioctl_req);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
/* The ioctl() implementation */
|
||||
int vblk_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
int ret;
|
||||
struct vblk_dev *vblkdev = bdev->bd_disk->private_data;
|
||||
|
||||
mutex_lock(&vblkdev->ioctl_lock);
|
||||
switch (cmd) {
|
||||
case MMC_IOC_MULTI_CMD:
|
||||
case MMC_IOC_CMD:
|
||||
case SG_IO:
|
||||
case UFS_IOCTL_COMBO_QUERY:
|
||||
ret = vblk_submit_ioctl_req(bdev, cmd,
|
||||
(void __user *)arg);
|
||||
break;
|
||||
default: /* unknown command */
|
||||
ret = -ENOTTY;
|
||||
break;
|
||||
}
|
||||
mutex_unlock(&vblkdev->ioctl_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
210
drivers/block/tegra_virt_storage/tegra_hv_mmc.c
Normal file
210
drivers/block/tegra_virt_storage/tegra_hv_mmc.c
Normal file
@@ -0,0 +1,210 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h> /* printk() */
|
||||
#include <linux/slab.h> /* kmalloc() */
|
||||
#include <linux/fs.h> /* everything... */
|
||||
#include <linux/errno.h> /* error codes */
|
||||
#include <linux/fcntl.h> /* O_ACCMODE */
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm-generic/bug.h>
|
||||
#include <linux/mmc/ioctl.h>
|
||||
#include <linux/mmc/core.h>
|
||||
#include "tegra_vblk.h"
|
||||
|
||||
#define VBLK_MMC_MAX_IOC_SIZE (256 * 1024)
|
||||
|
||||
int vblk_prep_mmc_multi_ioc(struct vblk_dev *vblkdev,
|
||||
struct vblk_ioctl_req *ioctl_req,
|
||||
void __user *user,
|
||||
uint32_t cmd)
|
||||
{
|
||||
int err = 0;
|
||||
struct combo_info_t *combo_info;
|
||||
struct combo_cmd_t *combo_cmd;
|
||||
int i = 0;
|
||||
uint64_t num_cmd;
|
||||
struct mmc_ioc_cmd ic;
|
||||
struct mmc_ioc_multi_cmd __user *user_cmd;
|
||||
struct mmc_ioc_cmd __user *usr_ptr;
|
||||
uint32_t combo_cmd_size;
|
||||
uint32_t ioctl_bytes = VBLK_MMC_MAX_IOC_SIZE;
|
||||
uint8_t *tmpaddr;
|
||||
void *ioctl_buf;
|
||||
|
||||
ioctl_buf = vmalloc(ioctl_bytes);
|
||||
if (ioctl_buf == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
combo_info = (struct combo_info_t *)ioctl_buf;
|
||||
combo_cmd_size = sizeof(uint32_t);
|
||||
|
||||
if (cmd == MMC_IOC_MULTI_CMD) {
|
||||
user_cmd = (struct mmc_ioc_multi_cmd __user *)user;
|
||||
if (copy_from_user(&num_cmd, &user_cmd->num_of_cmds,
|
||||
sizeof(num_cmd))) {
|
||||
err = -EFAULT;
|
||||
goto free_ioc_buf;
|
||||
}
|
||||
|
||||
if (num_cmd > MMC_IOC_MAX_CMDS) {
|
||||
err = -EINVAL;
|
||||
goto free_ioc_buf;
|
||||
}
|
||||
|
||||
usr_ptr = (void __user *)&user_cmd->cmds;
|
||||
} else {
|
||||
num_cmd = 1;
|
||||
usr_ptr = (void __user *)user;
|
||||
}
|
||||
combo_info->count = num_cmd;
|
||||
|
||||
combo_cmd = (struct combo_cmd_t *)(ioctl_buf +
|
||||
sizeof(struct combo_info_t));
|
||||
|
||||
combo_cmd_size = sizeof(struct combo_info_t) +
|
||||
sizeof(struct combo_cmd_t) * combo_info->count;
|
||||
if (combo_cmd_size < sizeof(struct combo_info_t)) {
|
||||
dev_err(vblkdev->device,
|
||||
"combo_cmd_size is overflowing!\n");
|
||||
err = -EINVAL;
|
||||
goto free_ioc_buf;
|
||||
}
|
||||
|
||||
if (combo_cmd_size > ioctl_bytes) {
|
||||
dev_err(vblkdev->device,
|
||||
" buffer has no enough space to serve ioctl\n");
|
||||
err = -EFAULT;
|
||||
goto free_ioc_buf;
|
||||
}
|
||||
|
||||
tmpaddr = (uint8_t *)⁣
|
||||
for (i = 0; i < combo_info->count; i++) {
|
||||
if (copy_from_user((void *)tmpaddr, usr_ptr, sizeof(ic))) {
|
||||
err = -EFAULT;
|
||||
goto free_ioc_buf;
|
||||
}
|
||||
combo_cmd->cmd = ic.opcode;
|
||||
combo_cmd->arg = ic.arg;
|
||||
combo_cmd->write_flag = (uint32_t)ic.write_flag;
|
||||
combo_cmd->data_len = (uint32_t)(ic.blksz * ic.blocks);
|
||||
combo_cmd->buf_offset = combo_cmd_size;
|
||||
combo_cmd_size += combo_cmd->data_len;
|
||||
if ((combo_cmd_size < combo_cmd->data_len) ||
|
||||
(combo_cmd_size > ioctl_bytes)) {
|
||||
dev_err(vblkdev->device,
|
||||
" buffer has no enough space to serve ioctl\n");
|
||||
err = -EFAULT;
|
||||
goto free_ioc_buf;
|
||||
}
|
||||
|
||||
if (ic.write_flag && combo_cmd->data_len) {
|
||||
if (copy_from_user((
|
||||
(void *)ioctl_buf +
|
||||
combo_cmd->buf_offset),
|
||||
(void __user *)(unsigned long)ic.data_ptr,
|
||||
(u64)combo_cmd->data_len))
|
||||
{
|
||||
dev_err(vblkdev->device,
|
||||
"copy from user failed for data!\n");
|
||||
err = -EFAULT;
|
||||
goto free_ioc_buf;
|
||||
}
|
||||
}
|
||||
combo_cmd++;
|
||||
usr_ptr++;
|
||||
}
|
||||
|
||||
ioctl_req->ioctl_id = VBLK_MMC_MULTI_IOC_ID;
|
||||
ioctl_req->ioctl_buf = ioctl_buf;
|
||||
ioctl_req->ioctl_len = ioctl_bytes;
|
||||
|
||||
free_ioc_buf:
|
||||
if (err && ioctl_buf)
|
||||
vfree(ioctl_buf);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int vblk_complete_mmc_multi_ioc(struct vblk_dev *vblkdev,
|
||||
struct vblk_ioctl_req *ioctl_req,
|
||||
void __user *user,
|
||||
uint32_t cmd)
|
||||
{
|
||||
uint64_t num_cmd;
|
||||
struct mmc_ioc_cmd ic;
|
||||
struct mmc_ioc_cmd *ic_ptr = ⁣
|
||||
struct mmc_ioc_multi_cmd __user *user_cmd;
|
||||
struct mmc_ioc_cmd __user *usr_ptr;
|
||||
struct combo_cmd_t *combo_cmd;
|
||||
uint32_t i;
|
||||
int err = 0;
|
||||
void *ioctl_buf = ioctl_req->ioctl_buf;
|
||||
|
||||
if (ioctl_req->status) {
|
||||
err = ioctl_req->status;
|
||||
if (ioctl_req->ioctl_buf)
|
||||
vfree(ioctl_req->ioctl_buf);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if (cmd == MMC_IOC_MULTI_CMD) {
|
||||
user_cmd = (struct mmc_ioc_multi_cmd __user *)user;
|
||||
if (copy_from_user(&num_cmd, &user_cmd->num_of_cmds,
|
||||
sizeof(num_cmd))) {
|
||||
err = -EFAULT;
|
||||
goto free_ioc_buf;
|
||||
}
|
||||
|
||||
if (num_cmd > MMC_IOC_MAX_CMDS) {
|
||||
err = -EINVAL;
|
||||
goto free_ioc_buf;
|
||||
}
|
||||
|
||||
usr_ptr = (void __user *)&user_cmd->cmds;
|
||||
} else {
|
||||
usr_ptr = (void __user *)user;
|
||||
num_cmd = 1;
|
||||
}
|
||||
|
||||
combo_cmd = (struct combo_cmd_t *)(ioctl_buf +
|
||||
sizeof(struct combo_info_t));
|
||||
|
||||
for (i = 0; i < num_cmd; i++) {
|
||||
if (copy_from_user((void *)ic_ptr, usr_ptr,
|
||||
sizeof(struct mmc_ioc_cmd))) {
|
||||
err = -EFAULT;
|
||||
goto free_ioc_buf;
|
||||
}
|
||||
|
||||
if (copy_to_user(&(usr_ptr->response), combo_cmd->response,
|
||||
sizeof(combo_cmd->response))) {
|
||||
err = -EFAULT;
|
||||
goto free_ioc_buf;
|
||||
}
|
||||
|
||||
if (!ic.write_flag && combo_cmd->data_len) {
|
||||
if (copy_to_user(
|
||||
(void __user *)(unsigned long)ic.data_ptr,
|
||||
(ioctl_buf + combo_cmd->buf_offset),
|
||||
(u64)combo_cmd->data_len))
|
||||
{
|
||||
dev_err(vblkdev->device,
|
||||
"copy to user of ioctl data failed!\n");
|
||||
err = -EFAULT;
|
||||
goto free_ioc_buf;
|
||||
}
|
||||
}
|
||||
combo_cmd++;
|
||||
usr_ptr++;
|
||||
}
|
||||
|
||||
free_ioc_buf:
|
||||
if (ioctl_buf)
|
||||
vfree(ioctl_buf);
|
||||
|
||||
exit:
|
||||
return err;
|
||||
}
|
||||
229
drivers/block/tegra_virt_storage/tegra_hv_scsi.c
Normal file
229
drivers/block/tegra_virt_storage/tegra_hv_scsi.c
Normal file
@@ -0,0 +1,229 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/kernel.h> /* printk() */
|
||||
#include <linux/slab.h> /* kmalloc() */
|
||||
#include <linux/fs.h> /* everything... */
|
||||
#include <linux/errno.h> /* error codes */
|
||||
#include <linux/fcntl.h> /* O_ACCMODE */
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm-generic/bug.h>
|
||||
#include <scsi/scsi.h>
|
||||
#include <scsi/sg.h>
|
||||
#include "tegra_vblk.h"
|
||||
|
||||
int vblk_prep_sg_io(struct vblk_dev *vblkdev,
|
||||
struct vblk_ioctl_req *ioctl_req,
|
||||
void __user *user)
|
||||
{
|
||||
int err = 0;
|
||||
sg_io_hdr_t *hp = NULL;
|
||||
uint32_t header_len = sizeof(sg_io_hdr_t);
|
||||
struct vblk_sg_io_hdr *vblk_hp;
|
||||
uint32_t vblk_sg_header_len = sizeof(struct vblk_sg_io_hdr);
|
||||
uint32_t cmnd_offset;
|
||||
void *cmnd;
|
||||
uint32_t sbp_offset;
|
||||
void *sbp;
|
||||
uint32_t data_buf_offset;
|
||||
uint32_t data_buf_offset_aligned;
|
||||
void *data_buf;
|
||||
uint32_t data_buf_size_aligned;
|
||||
uint32_t ioctl_len;
|
||||
void *ioctl_buf = NULL;
|
||||
|
||||
hp = kmalloc(header_len, GFP_KERNEL);
|
||||
if (hp == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (copy_from_user(hp, user, header_len)) {
|
||||
err = -EFAULT;
|
||||
goto free_hp;
|
||||
}
|
||||
|
||||
if ((!hp->cmdp) || (hp->cmd_len < 6) ||
|
||||
(hp->cmd_len > VBLK_SG_MAX_CMD_LEN)) {
|
||||
err = -EMSGSIZE;
|
||||
goto free_hp;
|
||||
}
|
||||
|
||||
cmnd_offset = vblk_sg_header_len;
|
||||
|
||||
sbp_offset = (cmnd_offset + hp->cmd_len);
|
||||
if (sbp_offset < cmnd_offset) {
|
||||
err = - EMSGSIZE;
|
||||
goto free_hp;
|
||||
}
|
||||
|
||||
data_buf_offset = (sbp_offset + hp->mx_sb_len);
|
||||
if (data_buf_offset < sbp_offset) {
|
||||
err = -EMSGSIZE;
|
||||
goto free_hp;
|
||||
}
|
||||
|
||||
data_buf_offset_aligned = ALIGN(data_buf_offset,
|
||||
vblkdev->config.blk_config.hardblk_size);
|
||||
if (data_buf_offset_aligned < data_buf_offset) {
|
||||
err = -EMSGSIZE;
|
||||
goto free_hp;
|
||||
}
|
||||
|
||||
data_buf_size_aligned = ALIGN(hp->dxfer_len,
|
||||
vblkdev->config.blk_config.hardblk_size);
|
||||
if (data_buf_size_aligned < hp->dxfer_len) {
|
||||
err = -EMSGSIZE;
|
||||
goto free_hp;
|
||||
}
|
||||
|
||||
ioctl_len = data_buf_offset_aligned + data_buf_size_aligned;
|
||||
if (ioctl_len < data_buf_offset_aligned) {
|
||||
err = -EMSGSIZE;
|
||||
goto free_hp;
|
||||
}
|
||||
|
||||
ioctl_buf = kmalloc(ioctl_len, GFP_KERNEL);
|
||||
if (ioctl_buf == NULL) {
|
||||
err = -ENOMEM;
|
||||
goto free_hp;
|
||||
}
|
||||
|
||||
vblk_hp = (struct vblk_sg_io_hdr *)(ioctl_buf);
|
||||
sbp = (ioctl_buf + sbp_offset);
|
||||
cmnd = (ioctl_buf + cmnd_offset);
|
||||
if (copy_from_user(cmnd, hp->cmdp, hp->cmd_len)) {
|
||||
err = -EFAULT;
|
||||
goto free_ioctl_buf;
|
||||
}
|
||||
|
||||
data_buf = (ioctl_buf + data_buf_offset_aligned);
|
||||
|
||||
switch (hp->dxfer_direction) {
|
||||
case SG_DXFER_NONE:
|
||||
vblk_hp->data_direction = SCSI_DATA_NONE;
|
||||
break;
|
||||
case SG_DXFER_TO_DEV:
|
||||
vblk_hp->data_direction = SCSI_TO_DEVICE;
|
||||
break;
|
||||
case SG_DXFER_FROM_DEV:
|
||||
vblk_hp->data_direction = SCSI_FROM_DEVICE;
|
||||
break;
|
||||
case SG_DXFER_TO_FROM_DEV:
|
||||
vblk_hp->data_direction = SCSI_BIDIRECTIONAL;
|
||||
break;
|
||||
default:
|
||||
err = -EBADMSG;
|
||||
goto free_ioctl_buf;
|
||||
}
|
||||
|
||||
if ((vblk_hp->data_direction == SCSI_TO_DEVICE) ||
|
||||
(vblk_hp->data_direction == SCSI_BIDIRECTIONAL)) {
|
||||
if (copy_from_user(data_buf, hp->dxferp, hp->dxfer_len)) {
|
||||
err = -EFAULT;
|
||||
goto free_ioctl_buf;
|
||||
}
|
||||
}
|
||||
|
||||
vblk_hp->cmd_len = hp->cmd_len;
|
||||
vblk_hp->mx_sb_len = hp->mx_sb_len;
|
||||
/* This is actual data len on which storage server needs to act */
|
||||
vblk_hp->dxfer_len = hp->dxfer_len;
|
||||
/* This is the data buffer len, data length is strictly dependent on the
|
||||
* IOCTL being executed. data_buffer length is atleast cache aligned to
|
||||
* make sure that cache operations can be done successfully without
|
||||
* corruption.
|
||||
* Since Block size is 4K, if it is aligned to blocksize, it will
|
||||
* indirectly align to cache line.
|
||||
*/
|
||||
vblk_hp->dxfer_buf_len = data_buf_size_aligned;
|
||||
vblk_hp->xfer_arg_offset = data_buf_offset_aligned;
|
||||
vblk_hp->cmdp_arg_offset = cmnd_offset;
|
||||
vblk_hp->sbp_arg_offset = sbp_offset;
|
||||
ioctl_req->ioctl_id = VBLK_SG_IO_ID;
|
||||
ioctl_req->ioctl_buf = ioctl_buf;
|
||||
ioctl_req->ioctl_len = ioctl_len;
|
||||
|
||||
free_ioctl_buf:
|
||||
if (err && ioctl_buf)
|
||||
kfree(ioctl_buf);
|
||||
|
||||
free_hp:
|
||||
if (hp)
|
||||
kfree(hp);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int vblk_complete_sg_io(struct vblk_dev *vblkdev,
|
||||
struct vblk_ioctl_req *ioctl_req,
|
||||
void __user *user)
|
||||
{
|
||||
sg_io_hdr_t *hp = NULL;
|
||||
uint32_t header_len = sizeof(sg_io_hdr_t);
|
||||
struct vblk_sg_io_hdr *vblk_hp;
|
||||
void *sbp;
|
||||
void *data_buf;
|
||||
int err = 0;
|
||||
|
||||
if (ioctl_req->status) {
|
||||
err = ioctl_req->status;
|
||||
if (ioctl_req->ioctl_buf)
|
||||
kfree(ioctl_req->ioctl_buf);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
hp = kmalloc(header_len, GFP_KERNEL);
|
||||
if (hp == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (copy_from_user(hp, user, header_len)) {
|
||||
err = -EFAULT;
|
||||
goto free_hp;
|
||||
}
|
||||
|
||||
vblk_hp = (struct vblk_sg_io_hdr *)(ioctl_req->ioctl_buf);
|
||||
hp->status = 0xff & vblk_hp->status;
|
||||
hp->masked_status = status_byte(vblk_hp->status);
|
||||
hp->host_status = host_byte(vblk_hp->status);
|
||||
hp->driver_status = driver_byte(vblk_hp->status);
|
||||
hp->sb_len_wr = vblk_hp->sb_len_wr;
|
||||
/* TODO: Handle the residual length */
|
||||
hp->resid = 0;
|
||||
|
||||
sbp = (ioctl_req->ioctl_buf + vblk_hp->sbp_arg_offset);
|
||||
if ((hp->sb_len_wr != 0) && (hp->sbp != NULL)) {
|
||||
if (copy_to_user(hp->sbp, sbp, hp->sb_len_wr)) {
|
||||
err = -EFAULT;
|
||||
goto free_hp;
|
||||
}
|
||||
}
|
||||
|
||||
data_buf = (ioctl_req->ioctl_buf + vblk_hp->xfer_arg_offset);
|
||||
|
||||
if ((vblk_hp->data_direction == SCSI_FROM_DEVICE) ||
|
||||
(vblk_hp->data_direction == SCSI_BIDIRECTIONAL)) {
|
||||
if (copy_to_user(hp->dxferp, data_buf, vblk_hp->dxfer_len)) {
|
||||
err = -EFAULT;
|
||||
goto free_hp;
|
||||
}
|
||||
}
|
||||
|
||||
if (copy_to_user(user, hp, header_len)) {
|
||||
err = -EFAULT;
|
||||
goto free_hp;
|
||||
}
|
||||
|
||||
free_hp:
|
||||
if (ioctl_req->ioctl_buf)
|
||||
kfree(ioctl_req->ioctl_buf);
|
||||
|
||||
if (hp)
|
||||
kfree(hp);
|
||||
exit:
|
||||
return err;
|
||||
}
|
||||
348
drivers/block/tegra_virt_storage/tegra_hv_ufs.c
Normal file
348
drivers/block/tegra_virt_storage/tegra_hv_ufs.c
Normal file
@@ -0,0 +1,348 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/slab.h> /* kmalloc() */
|
||||
#include <linux/errno.h> /* error codes */
|
||||
#include <linux/delay.h> /* For msleep and usleep_range */
|
||||
#include <uapi/scsi/ufs/ioctl.h>
|
||||
#include "tegra_vblk.h"
|
||||
#include "tegra_hv_ufs.h"
|
||||
|
||||
#define VBLK_UFS_MAX_IOC_SIZE (256 * 1024)
|
||||
|
||||
static int vblk_validate_single_query_io(struct vblk_dev *vblkdev,
|
||||
struct ufs_ioc_query_req *query_req,
|
||||
size_t *data_len,
|
||||
bool *w_flag)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
switch (query_req->opcode) {
|
||||
case UPIU_QUERY_OPCODE_READ_DESC:
|
||||
if (query_req->idn >= QUERY_DESC_IDN_MAX) {
|
||||
dev_err(vblkdev->device,
|
||||
"Desc IDN out of range %d\n",
|
||||
query_req->idn);
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
*data_len = min_t(size_t, QUERY_DESC_MAX_SIZE,
|
||||
query_req->buf_size);
|
||||
break;
|
||||
|
||||
case UPIU_QUERY_OPCODE_WRITE_DESC:
|
||||
if (query_req->idn >= QUERY_DESC_IDN_MAX) {
|
||||
err = -EINVAL;
|
||||
dev_err(vblkdev->device,
|
||||
"Desc IDN out of range %d\n",
|
||||
query_req->idn);
|
||||
goto out;
|
||||
}
|
||||
|
||||
*data_len = min_t(size_t, QUERY_DESC_MAX_SIZE,
|
||||
query_req->buf_size);
|
||||
*w_flag = true;
|
||||
break;
|
||||
|
||||
case UPIU_QUERY_OPCODE_READ_ATTR:
|
||||
if (query_req->idn >= QUERY_ATTR_IDN_MAX) {
|
||||
err = -EINVAL;
|
||||
dev_err(vblkdev->device,
|
||||
"ATTR IDN out of range %d\n",
|
||||
query_req->idn);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (query_req->buf_size != sizeof(u32)) {
|
||||
err = -EINVAL;
|
||||
dev_err(vblkdev->device,
|
||||
"Buf size out of range %d\n",
|
||||
query_req->buf_size);
|
||||
goto out;
|
||||
}
|
||||
*data_len = sizeof(u32);
|
||||
break;
|
||||
|
||||
case UPIU_QUERY_OPCODE_WRITE_ATTR:
|
||||
if (query_req->idn > QUERY_ATTR_IDN_MAX) {
|
||||
err = -EINVAL;
|
||||
dev_err(vblkdev->device,
|
||||
"ATTR IDN out of range %d\n",
|
||||
query_req->idn);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (query_req->buf_size != sizeof(u32)) {
|
||||
err = -EINVAL;
|
||||
dev_err(vblkdev->device,
|
||||
"Buf size out of range %d\n",
|
||||
query_req->buf_size);
|
||||
goto out;
|
||||
}
|
||||
*data_len = sizeof(u32);
|
||||
*w_flag = true;
|
||||
break;
|
||||
|
||||
case UPIU_QUERY_OPCODE_READ_FLAG:
|
||||
if (query_req->idn > QUERY_FLAG_IDN_MAX) {
|
||||
err = -EINVAL;
|
||||
dev_err(vblkdev->device,
|
||||
"Flag IDN out of range %d\n",
|
||||
query_req->idn);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (query_req->buf_size != sizeof(u8)) {
|
||||
err = -EINVAL;
|
||||
dev_err(vblkdev->device,
|
||||
"Buf size out of range %d\n",
|
||||
query_req->buf_size);
|
||||
goto out;
|
||||
}
|
||||
*data_len = sizeof(u8);
|
||||
break;
|
||||
|
||||
case UPIU_QUERY_OPCODE_SET_FLAG:
|
||||
case UPIU_QUERY_OPCODE_CLEAR_FLAG:
|
||||
case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
|
||||
if (query_req->idn > QUERY_FLAG_IDN_MAX) {
|
||||
err = -EINVAL;
|
||||
dev_err(vblkdev->device,
|
||||
"Flag IDN out of range %d\n",
|
||||
query_req->idn);
|
||||
goto out;
|
||||
}
|
||||
/* TODO: Create buffer to be attached */
|
||||
*data_len = 0;
|
||||
break;
|
||||
default:
|
||||
err = -EINVAL;
|
||||
dev_err(vblkdev->device, "Invalid opcode %d\n",
|
||||
query_req->idn);
|
||||
break;
|
||||
}
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
int vblk_prep_ufs_combo_ioc(struct vblk_dev *vblkdev,
|
||||
struct vblk_ioctl_req *ioctl_req,
|
||||
void __user *user, uint32_t cmd)
|
||||
{
|
||||
int err = 0;
|
||||
struct vblk_ufs_combo_info *combo_info;
|
||||
struct vblk_ufs_ioc_query_req *combo_cmd;
|
||||
int i = 0;
|
||||
uint8_t num_cmd;
|
||||
struct ufs_ioc_query_req ic;
|
||||
struct ufs_ioc_combo_query_req cc;
|
||||
struct ufs_ioc_combo_query_req __user *user_cmd;
|
||||
struct ufs_ioc_query_req __user *usr_ptr;
|
||||
uint32_t combo_cmd_size;
|
||||
uint32_t ioctl_bytes = VBLK_UFS_MAX_IOC_SIZE;
|
||||
uint8_t *tmpaddr;
|
||||
void *ioctl_buf;
|
||||
size_t data_len = 0;
|
||||
bool w_flag = false;
|
||||
|
||||
ioctl_buf = vmalloc(ioctl_bytes);
|
||||
if (ioctl_buf == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
combo_info = (struct vblk_ufs_combo_info *)ioctl_buf;
|
||||
|
||||
user_cmd = (struct ufs_ioc_combo_query_req __user *)user;
|
||||
if (copy_from_user(&cc, user_cmd, sizeof(cc))) {
|
||||
err = -EFAULT;
|
||||
goto free_ioc_buf;
|
||||
}
|
||||
num_cmd = cc.num_cmds;
|
||||
if (num_cmd > MAX_QUERY_CMD_PER_COMBO) {
|
||||
err = -EINVAL;
|
||||
goto free_ioc_buf;
|
||||
}
|
||||
|
||||
usr_ptr = (void __user *)cc.query;
|
||||
combo_info->count = num_cmd;
|
||||
combo_info->need_cq_empty = cc.need_cq_empty;
|
||||
combo_cmd = (struct vblk_ufs_ioc_query_req *)(ioctl_buf +
|
||||
sizeof(struct vblk_ufs_combo_info));
|
||||
|
||||
combo_cmd_size = sizeof(struct vblk_ufs_combo_info) +
|
||||
sizeof(struct vblk_ufs_ioc_query_req) * combo_info->count;
|
||||
if (combo_cmd_size < sizeof(struct vblk_ufs_combo_info)) {
|
||||
dev_err(vblkdev->device,
|
||||
"combo_cmd_size is overflowing!\n");
|
||||
err = -EINVAL;
|
||||
goto free_ioc_buf;
|
||||
}
|
||||
|
||||
if (combo_cmd_size > ioctl_bytes) {
|
||||
dev_err(vblkdev->device,
|
||||
" buffer has no enough space to serve ioctl\n");
|
||||
err = -EFAULT;
|
||||
goto free_ioc_buf;
|
||||
}
|
||||
memset(&ic, 0, sizeof(ic));
|
||||
tmpaddr = (uint8_t *)⁣
|
||||
for (i = 0; i < combo_info->count; i++) {
|
||||
if (copy_from_user((void *)tmpaddr, usr_ptr, sizeof(ic))) {
|
||||
err = -EFAULT;
|
||||
goto free_ioc_buf;
|
||||
}
|
||||
|
||||
err = vblk_validate_single_query_io(vblkdev,
|
||||
(struct ufs_ioc_query_req*)tmpaddr,
|
||||
&data_len, &w_flag);
|
||||
if (err) {
|
||||
dev_err(vblkdev->device, "Validating request failed\n");
|
||||
err = -EFAULT;
|
||||
goto free_ioc_buf;
|
||||
}
|
||||
|
||||
combo_cmd->opcode = ic.opcode;
|
||||
combo_cmd->idn = ic.idn;
|
||||
combo_cmd->index = ic.index;
|
||||
combo_cmd->selector = ic.selector;
|
||||
combo_cmd->buf_size = ic.buf_size;
|
||||
combo_cmd->delay = ic.delay;
|
||||
combo_cmd->error_status = ic.error_status;
|
||||
combo_cmd->buffer_offset = combo_cmd_size;
|
||||
|
||||
combo_cmd_size += data_len;
|
||||
if ((combo_cmd_size < data_len) ||
|
||||
(combo_cmd_size > ioctl_bytes)) {
|
||||
dev_err(vblkdev->device,
|
||||
" buffer has no enough space to serve ioctl\n");
|
||||
err = -EFAULT;
|
||||
goto free_ioc_buf;
|
||||
}
|
||||
|
||||
if (w_flag && data_len) {
|
||||
if (copy_from_user((
|
||||
(void *)ioctl_buf +
|
||||
combo_cmd->buffer_offset),
|
||||
(void __user *)(unsigned long)ic.buffer,
|
||||
(u64)data_len))
|
||||
{
|
||||
dev_err(vblkdev->device,
|
||||
"copy from user failed for data!\n");
|
||||
err = -EFAULT;
|
||||
goto free_ioc_buf;
|
||||
}
|
||||
}
|
||||
combo_cmd++;
|
||||
usr_ptr++;
|
||||
}
|
||||
|
||||
ioctl_req->ioctl_id = VBLK_UFS_COMBO_IO_ID;
|
||||
ioctl_req->ioctl_buf = ioctl_buf;
|
||||
ioctl_req->ioctl_len = ioctl_bytes;
|
||||
|
||||
free_ioc_buf:
|
||||
if (err && ioctl_buf)
|
||||
vfree(ioctl_buf);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int vblk_complete_ufs_combo_ioc(struct vblk_dev *vblkdev,
|
||||
struct vblk_ioctl_req *ioctl_req,
|
||||
void __user *user,
|
||||
uint32_t cmd)
|
||||
{
|
||||
uint64_t num_cmd;
|
||||
struct ufs_ioc_combo_query_req cc;
|
||||
struct ufs_ioc_query_req ic;
|
||||
struct ufs_ioc_query_req *ic_ptr = ⁣
|
||||
struct ufs_ioc_combo_query_req __user *user_cmd;
|
||||
struct ufs_ioc_query_req __user *usr_ptr;
|
||||
struct vblk_ufs_ioc_query_req *combo_cmd;
|
||||
uint32_t i;
|
||||
int err = 0;
|
||||
size_t data_len;
|
||||
bool w_flag = false;
|
||||
|
||||
void *ioctl_buf = ioctl_req->ioctl_buf;
|
||||
|
||||
if (ioctl_req->status) {
|
||||
err = ioctl_req->status;
|
||||
if (ioctl_req->ioctl_buf)
|
||||
vfree(ioctl_req->ioctl_buf);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
user_cmd = (struct ufs_ioc_combo_query_req __user *)user;
|
||||
if (copy_from_user(&cc, user_cmd,
|
||||
sizeof(cc))) {
|
||||
err = -EFAULT;
|
||||
goto free_ioc_buf;
|
||||
}
|
||||
num_cmd = cc.num_cmds;
|
||||
if (num_cmd > MAX_QUERY_CMD_PER_COMBO) {
|
||||
err = -EINVAL;
|
||||
goto free_ioc_buf;
|
||||
}
|
||||
|
||||
usr_ptr = (void __user *)cc.query;
|
||||
|
||||
combo_cmd = (struct vblk_ufs_ioc_query_req *)(ioctl_buf +
|
||||
sizeof(struct vblk_ufs_combo_info));
|
||||
|
||||
for (i = 0; i < num_cmd; i++) {
|
||||
if (copy_from_user((void *)ic_ptr, usr_ptr,
|
||||
sizeof(struct ufs_ioc_query_req))) {
|
||||
err = -EFAULT;
|
||||
goto free_ioc_buf;
|
||||
}
|
||||
|
||||
err = vblk_validate_single_query_io(vblkdev, ic_ptr,
|
||||
&data_len, &w_flag);
|
||||
if (err) {
|
||||
dev_err(vblkdev->device, "Validating request failed\n");
|
||||
err = -EFAULT;
|
||||
goto free_ioc_buf;
|
||||
}
|
||||
|
||||
err = copy_to_user(&usr_ptr->buf_size, &combo_cmd->buf_size,
|
||||
sizeof(combo_cmd->buf_size));
|
||||
if (err) {
|
||||
dev_err(vblkdev->device, "Failed copy_to_user query_req buf_size\n");
|
||||
err = -EFAULT;
|
||||
goto free_ioc_buf;
|
||||
}
|
||||
|
||||
err = copy_to_user(&usr_ptr->error_status, &combo_cmd->error_status,
|
||||
sizeof(combo_cmd->error_status));
|
||||
if (err) {
|
||||
dev_err(vblkdev->device, "Failed copy_to_user query_req status\n");
|
||||
err = -EFAULT;
|
||||
goto free_ioc_buf;
|
||||
}
|
||||
|
||||
if (!w_flag && data_len) {
|
||||
if (copy_to_user(
|
||||
(void __user *)(unsigned long)ic.buffer,
|
||||
(ioctl_buf + combo_cmd->buffer_offset),
|
||||
(u64)data_len))
|
||||
{
|
||||
dev_err(vblkdev->device,
|
||||
"copy to user of ioctl data failed!\n");
|
||||
err = -EFAULT;
|
||||
goto free_ioc_buf;
|
||||
}
|
||||
}
|
||||
combo_cmd++;
|
||||
usr_ptr++;
|
||||
}
|
||||
|
||||
free_ioc_buf:
|
||||
if (ioctl_buf)
|
||||
vfree(ioctl_buf);
|
||||
|
||||
exit:
|
||||
return err;
|
||||
}
|
||||
79
drivers/block/tegra_virt_storage/tegra_hv_ufs.h
Normal file
79
drivers/block/tegra_virt_storage/tegra_hv_ufs.h
Normal file
@@ -0,0 +1,79 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef _TEGRA_HV_UFS_H_
|
||||
#define _TEGRA_HV_UFS_H_
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
#define QUERY_DESC_MAX_SIZE 255
|
||||
#define QUERY_DESC_MIN_SIZE 2
|
||||
#define QUERY_DESC_HDR_SIZE 2
|
||||
|
||||
/* Attribute idn for Query requests */
|
||||
enum attr_idn {
|
||||
QUERY_ATTR_IDN_BOOTLUN_EN = 0x0,
|
||||
QUERY_ATTR_IDN_PWR_MODE = 0x02,
|
||||
QUERY_ATTR_IDN_ACTIVE_ICC_LVL = 0x03,
|
||||
QUERY_ATTR_IDN_BKOPS_STATUS = 0x05,
|
||||
QUERY_ATTR_IDN_REF_CLK_FREQ = 0x0A,
|
||||
QUERY_ATTR_IDN_CONF_DESC_LCK = 0x0B,
|
||||
QUERY_ATTR_IDN_EE_CONTROL = 0x0D,
|
||||
QUERY_ATTR_IDN_EE_STATUS = 0x0E,
|
||||
QUERY_ATTR_IDN_MAX = 0x30,
|
||||
};
|
||||
|
||||
/* Query response result code */
|
||||
enum {
|
||||
QUERY_RESULT_SUCCESS = 0x00,
|
||||
QUERY_RESULT_NOT_READABLE = 0xF6,
|
||||
QUERY_RESULT_NOT_WRITEABLE = 0xF7,
|
||||
QUERY_RESULT_ALREADY_WRITTEN = 0xF8,
|
||||
QUERY_RESULT_INVALID_LENGTH = 0xF9,
|
||||
QUERY_RESULT_INVALID_VALUE = 0xFA,
|
||||
QUERY_RESULT_INVALID_SELECTOR = 0xFB,
|
||||
QUERY_RESULT_INVALID_INDEX = 0xFC,
|
||||
QUERY_RESULT_INVALID_IDN = 0xFD,
|
||||
QUERY_RESULT_INVALID_OPCODE = 0xFE,
|
||||
QUERY_RESULT_GENERAL_FAILURE = 0xFF,
|
||||
};
|
||||
|
||||
/* UTP QUERY Transaction Specific Fields OpCode */
|
||||
enum query_opcode {
|
||||
UPIU_QUERY_OPCODE_NOP = 0x0,
|
||||
UPIU_QUERY_OPCODE_READ_DESC = 0x1,
|
||||
UPIU_QUERY_OPCODE_WRITE_DESC = 0x2,
|
||||
UPIU_QUERY_OPCODE_READ_ATTR = 0x3,
|
||||
UPIU_QUERY_OPCODE_WRITE_ATTR = 0x4,
|
||||
UPIU_QUERY_OPCODE_READ_FLAG = 0x5,
|
||||
UPIU_QUERY_OPCODE_SET_FLAG = 0x6,
|
||||
UPIU_QUERY_OPCODE_CLEAR_FLAG = 0x7,
|
||||
UPIU_QUERY_OPCODE_TOGGLE_FLAG = 0x8,
|
||||
};
|
||||
|
||||
/* Descriptor idn for Query requests */
|
||||
enum desc_idn {
|
||||
QUERY_DESC_IDN_DEVICE = 0x0,
|
||||
QUERY_DESC_IDN_CONFIGURATION = 0x1,
|
||||
QUERY_DESC_IDN_UNIT = 0x2,
|
||||
QUERY_DESC_IDN_RFU_0 = 0x3,
|
||||
QUERY_DESC_IDN_INTERCONNECT = 0x4,
|
||||
QUERY_DESC_IDN_STRING = 0x5,
|
||||
QUERY_DESC_IDN_RFU_1 = 0x6,
|
||||
QUERY_DESC_IDN_GEOMETRY = 0x7,
|
||||
QUERY_DESC_IDN_POWER = 0x8,
|
||||
QUERY_DESC_IDN_DEVICE_HEALTH = 0x9,
|
||||
QUERY_DESC_IDN_MAX,
|
||||
};
|
||||
|
||||
/* Flag idn for Query Requests*/
|
||||
enum flag_idn {
|
||||
QUERY_FLAG_IDN_FDEVICEINIT = 0x01,
|
||||
QUERY_FLAG_IDN_PWR_ON_WPE = 0x03,
|
||||
QUERY_FLAG_IDN_BKOPS_EN = 0x04,
|
||||
QUERY_FLAG_IDN_MAX = 0x0E,
|
||||
};
|
||||
|
||||
#endif /* _TEGRA_HV_UFS_H_ */
|
||||
1286
drivers/block/tegra_virt_storage/tegra_hv_vblk.c
Normal file
1286
drivers/block/tegra_virt_storage/tegra_hv_vblk.c
Normal file
File diff suppressed because it is too large
Load Diff
140
drivers/block/tegra_virt_storage/tegra_vblk.h
Normal file
140
drivers/block/tegra_virt_storage/tegra_vblk.h
Normal file
@@ -0,0 +1,140 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef _TEGRA_VBLK_H_
|
||||
#define _TEGRA_VBLK_H_
|
||||
|
||||
#include <linux/genhd.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/blk-mq.h>
|
||||
#include <linux/bio.h>
|
||||
#include <soc/tegra/ivc.h>
|
||||
#include <soc/tegra/ivc_ext.h>
|
||||
#include <soc/tegra/virt/hv-ivc.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <tegra_virt_storage_spec.h>
|
||||
|
||||
#define DRV_NAME "tegra_hv_vblk"
|
||||
|
||||
/* Minor number and partition management. */
|
||||
#define VBLK_MINORS 16
|
||||
|
||||
#define IVC_RESET_RETRIES 30
|
||||
|
||||
#define VS_LOG_HEADS 4
|
||||
#define VS_LOG_SECTS 16
|
||||
|
||||
#define MAX_VSC_REQS 32
|
||||
|
||||
struct vblk_ioctl_req {
|
||||
uint32_t ioctl_id;
|
||||
void *ioctl_buf;
|
||||
uint32_t ioctl_len;
|
||||
int32_t status;
|
||||
};
|
||||
|
||||
struct req_entry {
|
||||
struct list_head list_entry;
|
||||
struct request *req;
|
||||
};
|
||||
|
||||
struct vsc_request {
|
||||
struct vs_request vs_req;
|
||||
struct request *req;
|
||||
struct req_iterator iter;
|
||||
struct vblk_ioctl_req *ioctl_req;
|
||||
void *mempool_virt;
|
||||
uint32_t mempool_offset;
|
||||
uint32_t mempool_len;
|
||||
uint32_t id;
|
||||
struct vblk_dev* vblkdev;
|
||||
/* Scatter list for maping IOVA address */
|
||||
struct scatterlist *sg_lst;
|
||||
int sg_num_ents;
|
||||
};
|
||||
|
||||
enum vblk_queue_state {
|
||||
VBLK_UNKNOWN,
|
||||
VBLK_QUEUE_SUSPENDED,
|
||||
VBLK_QUEUE_ACTIVE,
|
||||
};
|
||||
|
||||
/*
|
||||
* The drvdata of virtual device.
|
||||
*/
|
||||
struct vblk_dev {
|
||||
struct vs_config_info config;
|
||||
uint64_t size; /* Device size in bytes */
|
||||
short users; /* How many users */
|
||||
short media_change; /* Flag a media change? */
|
||||
spinlock_t lock; /* For mutual exclusion */
|
||||
struct request_queue *queue; /* The device request queue */
|
||||
struct gendisk *gd; /* The gendisk structure */
|
||||
struct blk_mq_tag_set tag_set;
|
||||
struct list_head req_list; /* List containing req */
|
||||
uint32_t ivc_id;
|
||||
uint32_t ivm_id;
|
||||
struct tegra_hv_ivc_cookie *ivck;
|
||||
struct tegra_hv_ivm_cookie *ivmk;
|
||||
uint32_t devnum;
|
||||
bool initialized;
|
||||
struct work_struct init;
|
||||
struct work_struct work;
|
||||
struct workqueue_struct *wq;
|
||||
struct device *device;
|
||||
void *shared_buffer;
|
||||
struct mutex ioctl_lock;
|
||||
spinlock_t queue_lock;
|
||||
struct vsc_request reqs[MAX_VSC_REQS];
|
||||
DECLARE_BITMAP(pending_reqs, MAX_VSC_REQS);
|
||||
uint32_t inflight_reqs;
|
||||
uint32_t max_requests;
|
||||
struct mutex req_lock;
|
||||
struct mutex ivc_lock;
|
||||
enum vblk_queue_state queue_state;
|
||||
struct completion req_queue_empty;
|
||||
};
|
||||
|
||||
int vblk_complete_ioctl_req(struct vblk_dev *vblkdev,
|
||||
struct vsc_request *vsc_req, int32_t status);
|
||||
|
||||
int vblk_prep_ioctl_req(struct vblk_dev *vblkdev,
|
||||
struct vblk_ioctl_req *ioctl_req,
|
||||
struct vsc_request *vsc_req);
|
||||
|
||||
int vblk_prep_sg_io(struct vblk_dev *vblkdev,
|
||||
struct vblk_ioctl_req *ioctl_req,
|
||||
void __user *user);
|
||||
|
||||
int vblk_complete_sg_io(struct vblk_dev *vblkdev,
|
||||
struct vblk_ioctl_req *ioctl_req,
|
||||
void __user *user);
|
||||
|
||||
int vblk_prep_mmc_multi_ioc(struct vblk_dev *vblkdev,
|
||||
struct vblk_ioctl_req *ioctl_req,
|
||||
void __user *user,
|
||||
uint32_t cmd);
|
||||
|
||||
int vblk_complete_mmc_multi_ioc(struct vblk_dev *vblkdev,
|
||||
struct vblk_ioctl_req *ioctl_req,
|
||||
void __user *user,
|
||||
uint32_t cmd);
|
||||
|
||||
int vblk_prep_ufs_combo_ioc(struct vblk_dev *vblkdev,
|
||||
struct vblk_ioctl_req *ioctl_req,
|
||||
void __user *user, uint32_t cmd);
|
||||
|
||||
int vblk_complete_ufs_combo_ioc(struct vblk_dev *vblkdev,
|
||||
struct vblk_ioctl_req *ioctl_req,
|
||||
void __user *user,
|
||||
uint32_t cmd);
|
||||
|
||||
int vblk_submit_ioctl_req(struct block_device *bdev,
|
||||
unsigned int cmd, void __user *user);
|
||||
|
||||
int vblk_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
unsigned int cmd, unsigned long arg);
|
||||
#endif
|
||||
347
include/tegra_virt_storage_spec.h
Normal file
347
include/tegra_virt_storage_spec.h
Normal file
@@ -0,0 +1,347 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef _TEGRA_VIRT_STORAGE_SPEC_H_
|
||||
#define _TEGRA_VIRT_STORAGE_SPEC_H_
|
||||
|
||||
#include <linux/types.h> /* size_t */
|
||||
|
||||
#define VS_REQ_OP_F_NONE 0
|
||||
|
||||
enum vs_req_type {
|
||||
VS_DATA_REQ = 1,
|
||||
VS_CONFIGINFO_REQ = 2,
|
||||
VS_UNKNOWN_CMD = 0xffffffff,
|
||||
};
|
||||
|
||||
enum vs_dev_type {
|
||||
VS_BLK_DEV = 1,
|
||||
VS_MTD_DEV = 2,
|
||||
VS_UNKNOWN_DEV = 0xffffffff,
|
||||
};
|
||||
|
||||
enum mtd_cmd_op {
|
||||
VS_MTD_READ = 1,
|
||||
VS_MTD_WRITE = 2,
|
||||
VS_MTD_ERASE = 3,
|
||||
VS_MTD_IOCTL = 4,
|
||||
VS_MTD_INVAL_REQ = 32,
|
||||
VS_UNKNOWN_MTD_CMD = 0xffffffff,
|
||||
};
|
||||
|
||||
/* MTD device request Operation type features supported */
|
||||
#define VS_MTD_READ_OP_F (1 << VS_MTD_READ)
|
||||
#define VS_MTD_WRITE_OP_F (1 << VS_MTD_WRITE)
|
||||
#define VS_MTD_ERASE_OP_F (1 << VS_MTD_ERASE)
|
||||
#define VS_MTD_IOCTL_OP_F (1 << VS_MTD_IOCTL)
|
||||
#define VS_MTD_READ_ONLY_MASK ~(VS_MTD_READ_OP_F)
|
||||
|
||||
enum blk_cmd_op {
|
||||
VS_BLK_READ = 1,
|
||||
VS_BLK_WRITE = 2,
|
||||
VS_BLK_FLUSH = 3,
|
||||
VS_BLK_DISCARD = 4,
|
||||
VS_BLK_SECURE_ERASE = 5,
|
||||
VS_BLK_IOCTL = 6,
|
||||
VS_BLK_INVAL_REQ = 32,
|
||||
VS_UNKNOWN_BLK_CMD = 0xffffffff,
|
||||
};
|
||||
|
||||
/* Blk device request Operation type features supported */
|
||||
#define VS_BLK_READ_OP_F (1 << VS_BLK_READ)
|
||||
#define VS_BLK_WRITE_OP_F (1 << VS_BLK_WRITE)
|
||||
#define VS_BLK_FLUSH_OP_F (1 << VS_BLK_FLUSH)
|
||||
#define VS_BLK_DISCARD_OP_F (1 << VS_BLK_DISCARD)
|
||||
#define VS_BLK_SECURE_ERASE_OP_F (1 << VS_BLK_SECURE_ERASE)
|
||||
#define VS_BLK_IOCTL_OP_F (1 << VS_BLK_IOCTL)
|
||||
#define VS_BLK_READ_ONLY_MASK ~(VS_BLK_READ_OP_F)
|
||||
|
||||
#pragma pack(push)
|
||||
#pragma pack(1)
|
||||
|
||||
struct vs_blk_request {
|
||||
uint64_t blk_offset; /* Offset into storage device in terms
|
||||
of blocks for block device */
|
||||
uint32_t num_blks; /* Total Block number to transfer */
|
||||
uint32_t data_offset; /* Offset into mempool for data region
|
||||
*/
|
||||
/* IOVA address of the buffer. In case of read request, VSC will get
|
||||
* the response to this address. In case of write request, VSC will
|
||||
* get the data from this address.
|
||||
*/
|
||||
uint64_t iova_addr;
|
||||
};
|
||||
|
||||
struct vs_mtd_request {
|
||||
uint64_t offset; /* Offset into storage device in terms
|
||||
of bytes in case of mtd device */
|
||||
uint32_t size; /* Total number of bytes to transfer
|
||||
to be used for MTD device */
|
||||
uint32_t data_offset; /* Offset into mempool for data region
|
||||
*/
|
||||
};
|
||||
|
||||
struct vs_ioctl_request {
|
||||
uint32_t ioctl_id; /* Id of the ioctl */
|
||||
uint32_t ioctl_len; /* Length of the mempool area associated
|
||||
with ioctl */
|
||||
uint32_t data_offset; /* Offset into mempool for data region
|
||||
*/
|
||||
};
|
||||
|
||||
struct vs_blkdev_request {
|
||||
enum blk_cmd_op req_op;
|
||||
union {
|
||||
struct vs_blk_request blk_req;
|
||||
struct vs_ioctl_request ioctl_req;
|
||||
};
|
||||
};
|
||||
|
||||
struct vs_mtddev_request {
|
||||
enum mtd_cmd_op req_op;
|
||||
union {
|
||||
struct vs_mtd_request mtd_req;
|
||||
struct vs_ioctl_request ioctl_req;
|
||||
};
|
||||
};
|
||||
|
||||
struct vs_blk_response {
|
||||
int32_t status; /* 0 for success, < 0 for error */
|
||||
uint32_t num_blks;
|
||||
};
|
||||
|
||||
struct vs_mtd_response {
|
||||
int32_t status; /* 0 for success, < 0 for error */
|
||||
uint32_t size; /* Number of bytes processed in case of
|
||||
of mtd device*/
|
||||
};
|
||||
|
||||
struct vs_ioctl_response {
|
||||
int32_t status; /* 0 for success, < 0 for error */
|
||||
};
|
||||
|
||||
struct vs_blkdev_response {
|
||||
union {
|
||||
struct vs_blk_response blk_resp;
|
||||
struct vs_ioctl_response ioctl_resp;
|
||||
};
|
||||
};
|
||||
|
||||
struct vs_mtddev_response {
|
||||
union {
|
||||
struct vs_mtd_response mtd_resp;
|
||||
struct vs_ioctl_response ioctl_resp;
|
||||
};
|
||||
};
|
||||
|
||||
struct vs_blk_dev_config {
|
||||
uint32_t hardblk_size; /* Block Size */
|
||||
uint32_t max_read_blks_per_io; /* Limit number of Blocks
|
||||
per I/O*/
|
||||
uint32_t max_write_blks_per_io; /* Limit number of Blocks
|
||||
per I/O*/
|
||||
uint32_t max_erase_blks_per_io; /* Limit number of Blocks per I/O */
|
||||
uint32_t req_ops_supported; /* Allowed operations by requests */
|
||||
uint64_t num_blks; /* Total number of blks */
|
||||
|
||||
/*
|
||||
* If true, then VM need to provide local IOVA address for read and
|
||||
* write requests. For IOCTL requests, mempool will be used
|
||||
* irrespective of this flag.
|
||||
*/
|
||||
uint32_t use_vm_address;
|
||||
};
|
||||
|
||||
struct vs_mtd_dev_config {
|
||||
uint32_t max_read_bytes_per_io; /* Limit number of bytes
|
||||
per I/O */
|
||||
uint32_t max_write_bytes_per_io; /* Limit number of bytes
|
||||
per I/O */
|
||||
uint32_t erase_size; /* Erase size for mtd
|
||||
device*/
|
||||
uint32_t req_ops_supported; /* Allowed operations by requests */
|
||||
uint64_t size; /* Total number of bytes */
|
||||
};
|
||||
|
||||
/* Physical device types */
|
||||
#define VSC_DEV_EMMC 1U
|
||||
#define VSC_DEV_UFS 2U
|
||||
#define VSC_DEV_QSPI 3U
|
||||
|
||||
/* Storage Types */
|
||||
#define VSC_STORAGE_RPMB 1U
|
||||
#define VSC_STORAGE_BOOT 2U
|
||||
#define VSC_STORAGE_LUN0 3U
|
||||
#define VSC_STORAGE_LUN1 4U
|
||||
#define VSC_STORAGE_LUN2 5U
|
||||
#define VSC_STORAGE_LUN3 6U
|
||||
#define VSC_STORAGE_LUN4 7U
|
||||
#define VSC_STORAGE_LUN5 8U
|
||||
#define VSC_STORAGE_LUN6 9U
|
||||
#define VSC_STORAGE_LUN7 10U
|
||||
|
||||
#define SPEED_MODE_MAX_LEN 32
|
||||
|
||||
struct vs_config_info {
|
||||
uint32_t virtual_storage_ver; /* Version of virtual storage */
|
||||
enum vs_dev_type type; /* Type of underlying device */
|
||||
union {
|
||||
struct vs_blk_dev_config blk_config;
|
||||
struct vs_mtd_dev_config mtd_config;
|
||||
};
|
||||
uint32_t phys_dev;
|
||||
uint32_t phys_base;
|
||||
uint32_t storage_type;
|
||||
uint8_t speed_mode[SPEED_MODE_MAX_LEN];
|
||||
};
|
||||
|
||||
struct vs_request {
|
||||
uint32_t req_id;
|
||||
enum vs_req_type type;
|
||||
union {
|
||||
struct vs_blkdev_request blkdev_req;
|
||||
struct vs_mtddev_request mtddev_req;
|
||||
};
|
||||
int32_t status;
|
||||
union {
|
||||
struct vs_blkdev_response blkdev_resp;
|
||||
struct vs_mtddev_response mtddev_resp;
|
||||
struct vs_config_info config_info;
|
||||
};
|
||||
};
|
||||
|
||||
/**
|
||||
* @addtogroup MMC_RESP MMC Responses
|
||||
*
|
||||
* @brief Defines Command Responses of EMMC
|
||||
*/
|
||||
typedef enum {
|
||||
/** @brief No Response */
|
||||
RESP_TYPE_NO_RESP = 0U,
|
||||
/** @brief Response Type 1 */
|
||||
RESP_TYPE_R1 = 1U,
|
||||
/** @brief Response Type 2 */
|
||||
RESP_TYPE_R2 = 2U,
|
||||
/** @brief Response Type 3 */
|
||||
RESP_TYPE_R3 = 3U,
|
||||
/** @brief Response Type 4 */
|
||||
RESP_TYPE_R4 = 4U,
|
||||
/** @brief Response Type 5 */
|
||||
RESP_TYPE_R5 = 5U,
|
||||
/** @brief Response Type 6 */
|
||||
RESP_TYPE_R6 = 6U,
|
||||
/** @brief Response Type 7 */
|
||||
RESP_TYPE_R7 = 7U,
|
||||
/** @brief Response Type 1B */
|
||||
RESP_TYPE_R1B = 8U,
|
||||
/** @brief Number of Response Type */
|
||||
RESP_TYPE_NUM = 9U
|
||||
/* @} */
|
||||
} sdmmc_resp_type;
|
||||
|
||||
#define VBLK_MMC_MULTI_IOC_ID 0x1000
|
||||
struct combo_cmd_t {
|
||||
uint32_t cmd;
|
||||
uint32_t arg;
|
||||
uint32_t write_flag;
|
||||
uint32_t response[4];
|
||||
uint32_t buf_offset;
|
||||
uint32_t data_len;
|
||||
sdmmc_resp_type flags;
|
||||
};
|
||||
|
||||
struct combo_info_t {
|
||||
uint32_t count;
|
||||
int32_t result;
|
||||
};
|
||||
|
||||
/* SCSI bio layer needs to handle SCSI and UFS IOCTL separately
|
||||
* This flag will be ORed with IO_IOCTL to find out difference
|
||||
* between SCSI and UFS IOCTL
|
||||
*/
|
||||
#define SCSI_IOCTL_FLAG 0x10000000
|
||||
#define UFS_IOCTL_FLAG 0x20000000
|
||||
/* Mask for SCSI and UFS ioctl flags, 4 MSB (bits) reserved for it Two LSB
|
||||
* bits are used for SCSI and UFS, 2 MSB bits reserved for future use.
|
||||
*/
|
||||
#define SCSI_UFS_IOCTL_FLAG_MASK 0xF0000000
|
||||
|
||||
#define VBLK_SG_IO_ID (0x1001 | SCSI_IOCTL_FLAG)
|
||||
#define VBLK_UFS_IO_ID (0x1002 | UFS_IOCTL_FLAG)
|
||||
#define VBLK_UFS_COMBO_IO_ID (0x1003 | UFS_IOCTL_FLAG)
|
||||
|
||||
#define VBLK_SG_MAX_CMD_LEN 16
|
||||
|
||||
enum scsi_data_direction {
|
||||
SCSI_BIDIRECTIONAL = 0,
|
||||
SCSI_TO_DEVICE = 1,
|
||||
SCSI_FROM_DEVICE = 2,
|
||||
SCSI_DATA_NONE = 3,
|
||||
UNKNOWN_DIRECTION = 0xffffffff,
|
||||
};
|
||||
|
||||
struct vblk_sg_io_hdr
|
||||
{
|
||||
int32_t data_direction; /* [i] data transfer direction */
|
||||
uint8_t cmd_len; /* [i] SCSI command length */
|
||||
uint8_t mx_sb_len; /* [i] max length to write to sbp */
|
||||
uint32_t dxfer_len; /* [i] byte count of data transfer */
|
||||
uint32_t xfer_arg_offset; /* [i], [*io] offset to data transfer memory */
|
||||
uint32_t cmdp_arg_offset; /* [i], [*i] offset to command to perform */
|
||||
uint32_t sbp_arg_offset; /* [i], [*o] offset to sense_buffer memory */
|
||||
uint32_t status; /* [o] scsi status */
|
||||
uint8_t sb_len_wr; /* [o] byte count actually written to sbp */
|
||||
uint32_t dxfer_buf_len; /* [i] Length of data transfer buffer */
|
||||
};
|
||||
|
||||
struct vblk_ufs_ioc_query_req {
|
||||
/* Query opcode to specify the type of Query operation */
|
||||
uint8_t opcode;
|
||||
/* idn to provide more info on specific operation. */
|
||||
uint8_t idn;
|
||||
/* index - optional in some cases */
|
||||
uint8_t index;
|
||||
/* index - optional in some cases */
|
||||
uint8_t selector;
|
||||
/* buf_size - buffer size in bytes pointed by buffer.
|
||||
* Note:
|
||||
* For Read/Write Attribute this should be of 4 bytes
|
||||
* For Read Flag this should be of 1 byte
|
||||
* For Descriptor Read/Write size depends on the type of the descriptor
|
||||
*/
|
||||
uint16_t buf_size;
|
||||
/*
|
||||
* User buffer offset for query data. The offset should be within the
|
||||
* bounds of the mempool memory region.
|
||||
*/
|
||||
uint32_t buffer_offset;
|
||||
/* Delay after each query command completion in micro seconds. */
|
||||
uint32_t delay;
|
||||
/* error status for the query operation */
|
||||
int32_t error_status;
|
||||
|
||||
};
|
||||
|
||||
/** @brief Meta data of UFS Native ioctl Combo Command */
|
||||
typedef struct vblk_ufs_combo_info {
|
||||
/** Count of commands in combo command */
|
||||
uint32_t count;
|
||||
/** Status of combo command */
|
||||
int32_t result;
|
||||
/** Flag to specify whether to empty the command queue before
|
||||
* processing the combo request.
|
||||
* If user wants to ensure that there are no requests in the UFS device
|
||||
* command queue before executing a query command, this flag has to be
|
||||
* set to 1.
|
||||
* For Example, in case of refresh for Samsung UFS Device, the
|
||||
* command queue should be emptied before setting the attribute for
|
||||
* refresh.
|
||||
*/
|
||||
uint8_t need_cq_empty;
|
||||
}vblk_ufs_combo_info_t;
|
||||
|
||||
#pragma pack(pop)
|
||||
|
||||
#endif
|
||||
66
include/uapi/scsi/ufs/ioctl.h
Normal file
66
include/uapi/scsi/ufs/ioctl.h
Normal file
@@ -0,0 +1,66 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef UAPI_SCSI_UFS_UFS_IOCTL_H_
|
||||
#define UAPI_SCSI_UFS_UFS_IOCTL_H_
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
/*
|
||||
* IOCTL opcode for ufs combo queries has the following opcode after
|
||||
* SCSI_IOCTL_GET_PCI
|
||||
*/
|
||||
#define UFS_IOCTL_COMBO_QUERY 0x5388
|
||||
/*
|
||||
* IOCTL opcode to set UFS power mode
|
||||
*/
|
||||
#define UFS_IOCTL_SET_POWER_MODE 0x5389
|
||||
|
||||
/*
|
||||
* Maximum number of Query requests per Combo Query Request
|
||||
*/
|
||||
#define MAX_QUERY_CMD_PER_COMBO 10
|
||||
|
||||
/**
|
||||
* struct ufs_ioc_query_cmd - used to transfer ufs query command/data to and
|
||||
* from user via ioctl
|
||||
*/
|
||||
struct ufs_ioc_query_req {
|
||||
/* Query opcode to specify the type of Query operation */
|
||||
__u8 opcode;
|
||||
/* idn to provide more info on specific operation. */
|
||||
__u8 idn;
|
||||
/* index - optional in some cases */
|
||||
__u8 index;
|
||||
/* index - optional in some cases */
|
||||
__u8 selector;
|
||||
/* buf_size - buffer size in bytes pointed by buffer. */
|
||||
__u16 buf_size;
|
||||
/*
|
||||
* user buffer pointer for query data.
|
||||
* Note:
|
||||
* For Read/Write Attribute this should be of 4 bytes
|
||||
* For Read Flag this should be of 1 byte
|
||||
* For Descriptor Read/Write size depends on the type of the descriptor
|
||||
*/
|
||||
__u8 *buffer;
|
||||
/* delay after query command completion */
|
||||
__u32 delay;
|
||||
/* error status for the query operation */
|
||||
__s32 error_status;
|
||||
};
|
||||
|
||||
struct ufs_ioc_combo_query_req {
|
||||
/* Number of Query Commands in this Combo */
|
||||
__u8 num_cmds;
|
||||
/* Flag to Specify if Command Queue need to be empty or not */
|
||||
__u8 need_cq_empty;
|
||||
/* Flag to Specify if return or continue with all requests on error */
|
||||
__u8 return_on_error;
|
||||
/* pointer to the first query command request */
|
||||
struct ufs_ioc_query_req *query;
|
||||
};
|
||||
|
||||
#endif /* UAPI_SCSI_UFS_UFS_IOCTL_H_ */
|
||||
Reference in New Issue
Block a user