nvdla: kmd: handle {RELEASE,ALLOC}_QUEUE ioctls

[1] This commit separates the allocation and deallocation
    of mission critical resources away from open() and close()
[2] It is achieved through introduction of NVDLA_IOCTL_{ALLOC,RELEASE}
    IOCTLs.
[3] nvdla_buffer_* APIs introduced to facilitate easier split.

Bug 200628173

Change-Id: I3fb07ecaff69c62ec5eb9e5bea39b07ae1624240
Signed-off-by: Arvind M <am@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvidia/+/2403689
Reviewed-by: automaticguardword <automaticguardword@nvidia.com>
Reviewed-by: Anup Mahindre <amahindre@nvidia.com>
Reviewed-by: Bharat Nihalani <bnihalani@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
GVS: Gerrit_Virtual_Submit
This commit is contained in:
Arvind M
2020-08-25 17:04:27 +05:30
committed by Laxman Dewangan
parent ca570376ea
commit 9eec5aea15
4 changed files with 142 additions and 16 deletions

View File

@@ -1,7 +1,7 @@
/*
* NVHOST buffer management for T194
*
* Copyright (c) 2016-2018, NVIDIA Corporation. All rights reserved.
* Copyright (c) 2016-2020, NVIDIA Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -236,6 +236,18 @@ nvdla_buffer_init_err:
return ERR_PTR(err);
}
bool nvdla_buffer_is_valid(struct nvdla_buffers *nvdla_buffers)
{
/* Currently there is only one check */
return (nvdla_buffers->pdev != NULL);
}
void nvdla_buffer_set_platform_device(struct nvdla_buffers *nvdla_buffers,
struct platform_device *pdev)
{
nvdla_buffers->pdev = pdev;
}
int nvdla_buffer_submit_pin(struct nvdla_buffers *nvdla_buffers,
struct dma_buf **dmabufs, u32 count,
dma_addr_t *paddr, size_t *psize,

View File

@@ -1,7 +1,7 @@
/*
* NVDLA Buffer Management Header
*
* Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
* Copyright (c) 2019-2020, NVIDIA Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -59,6 +59,28 @@ struct nvdla_buffers {
*/
struct nvdla_buffers *nvdla_buffer_init(struct platform_device *pdev);
/**
* @brief Checks for validity of nvdla_buffer
*
* This function checks the validity of buffer and is
* recommended to be called prior to any buffer operations
*
* @param nvdla_buffers Pointer to nvdla_buffers struct
* @return true on buffer being valid, and false otherwise
**/
bool nvdla_buffer_is_valid(struct nvdla_buffers *nvdla_buffers);
/**
* @brief Sets host1x platform device corresponding to nvdla_buffer
*
* This function resets the platform_device pdev information of nvdla_buffer.
*
* @param nvdla_buffers Pointer to nvdla_buffers struct
* @param pdev Pointer to NvHost device
**/
void nvdla_buffer_set_platform_device(struct nvdla_buffers *nvdla_buffers,
struct platform_device *pdev);
/**
* @brief Pin the memhandle using dma_buf functions
*

View File

@@ -178,6 +178,12 @@ static int nvdla_pin(struct nvdla_private *priv, void *arg)
nvdla_dbg_fn(pdev, "");
if (!nvdla_buffer_is_valid(priv->buffers)) {
nvdla_dbg_err(pdev, "Invalid buffer\n");
err = -EINVAL;
goto fail_to_get_val_arg;
}
if (!buf_list) {
nvdla_dbg_err(pdev, "Invalid argument ptr in pin\n");
err = -EINVAL;
@@ -234,6 +240,12 @@ static int nvdla_unpin(struct nvdla_private *priv, void *arg)
nvdla_dbg_fn(pdev, "");
if (!nvdla_buffer_is_valid(priv->buffers)) {
nvdla_dbg_err(pdev, "Invalid buffer\n");
err = -EINVAL;
goto fail_to_get_val_arg;
}
if (!buf_list) {
nvdla_dbg_err(pdev, "Invalid argument for pointer\n");
err = -EINVAL;
@@ -938,6 +950,70 @@ exit:
return 0;
}
static int nvdla_queue_alloc_handler(struct nvdla_private *priv, void *arg)
{
int err = 0;
struct platform_device *pdev = priv->pdev;
struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
struct nvdla_device *nvdla_dev = pdata->private_data;
/* Currently unused and kept to be consistent with other handlers. */
(void) arg;
/* If queue is already allocated, error out. */
if (unlikely(NULL != priv->queue)) {
nvdla_dbg_err(pdev, "Queue already allocated");
err = -EINVAL;
goto fail;
}
/* Allocate the queue */
priv->queue = nvdla_queue_alloc(nvdla_dev->pool, MAX_NVDLA_TASK_COUNT,
nvdla_dev->submit_mode == NVDLA_SUBMIT_MODE_CHANNEL);
if (IS_ERR(priv->queue)) {
err = PTR_ERR(priv->queue);
priv->queue = NULL;
goto fail;
}
/* Set nvdla_buffers platform device */
nvdla_buffer_set_platform_device(priv->buffers, priv->queue->vm_pdev);
fail:
return err;
}
static int nvdla_queue_release_handler(struct nvdla_private *priv, void *arg)
{
int err = 0;
struct platform_device *pdev = priv->pdev;
/**
* Note: This functiona shall be reached directly either
* [1] NVDLA_IOCTL_RELEASE_QUEUE ioctl call.
* [2] when fd is closed before releasing the queue.
**/
/* Currently unused and kept to be consistent with other handlers. */
(void) arg;
/* If no queue is allocated, error out. */
if (unlikely(NULL == priv->queue)) {
nvdla_dbg_err(pdev, "No queue to be released.");
err = -EINVAL;
goto fail;
}
/* Release the queue */
(void) nvdla_queue_abort(priv->queue);
nvdla_queue_put(priv->queue);
priv->queue = NULL;
fail:
return err;
}
static int nvdla_submit(struct nvdla_private *priv, void *arg)
{
struct nvdla_submit_args *args =
@@ -1105,6 +1181,12 @@ static long nvdla_ioctl(struct file *file, unsigned int cmd,
case NVDLA_IOCTL_EMU_TASK_SUBMIT:
err = nvdla_emu_task_submit(priv, (void *)buf);
break;
case NVDLA_IOCTL_ALLOC_QUEUE:
err = nvdla_queue_alloc_handler(priv, (void*)buf);
break;
case NVDLA_IOCTL_RELEASE_QUEUE:
err = nvdla_queue_release_handler(priv, (void*)buf);
break;
default:
nvdla_dbg_err(pdev, "invalid IOCTL CMD");
err = -ENOIOCTLCMD;
@@ -1123,7 +1205,6 @@ static int nvdla_open(struct inode *inode, struct file *file)
struct nvhost_device_data *pdata = container_of(inode->i_cdev,
struct nvhost_device_data, ctrl_cdev);
struct platform_device *pdev = pdata->pdev;
struct nvdla_device *nvdla_dev = pdata->private_data;
struct nvdla_private *priv;
int err = 0, index;
@@ -1154,20 +1235,19 @@ static int nvdla_open(struct inode *inode, struct file *file)
err = nvhost_module_set_rate(pdev, priv, UINT_MAX,
index, clock->request_type);
if (err < 0)
goto err_alloc_queue;
goto err_set_emc_rate;
break;
}
}
priv->queue = nvdla_queue_alloc(nvdla_dev->pool,
MAX_NVDLA_TASK_COUNT,
nvdla_dev->submit_mode == NVDLA_SUBMIT_MODE_CHANNEL);
if (IS_ERR(priv->queue)) {
err = PTR_ERR(priv->queue);
goto err_alloc_queue;
}
/* Zero out explicitly */
priv->queue = NULL;
priv->buffers = nvdla_buffer_init(priv->queue->vm_pdev);
/**
* Platform device corresponding to buffers is deferred
* to queue allocation.
**/
priv->buffers = nvdla_buffer_init(NULL);
if (IS_ERR(priv->buffers)) {
err = PTR_ERR(priv->buffers);
goto err_alloc_buffer;
@@ -1177,7 +1257,7 @@ static int nvdla_open(struct inode *inode, struct file *file)
err_alloc_buffer:
kfree(priv->buffers);
err_alloc_queue:
err_set_emc_rate:
nvhost_module_remove_client(pdev, priv);
err_add_client:
kfree(priv);
@@ -1192,8 +1272,16 @@ static int nvdla_release(struct inode *inode, struct file *file)
nvdla_dbg_fn(pdev, "priv:%p", priv);
nvdla_queue_abort(priv->queue);
nvdla_queue_put(priv->queue);
/* If NVDLA_IOCTL_RELEASE_QUEUE is not called, free it explicitly. */
if (NULL != priv->queue) {
/**
* Error value is intentionally ignored to continue freeing
* other resources.
* arg is set to NULL and should work since they are unused.
**/
nvdla_queue_release_handler(priv, NULL);
}
nvdla_buffer_release(priv->buffers);
nvhost_module_remove_client(pdev, priv);

View File

@@ -254,8 +254,12 @@ struct nvdla_status_notify {
_IOWR(NVHOST_NVDLA_IOCTL_MAGIC, 7, struct nvdla_get_q_status_args)
#define NVDLA_IOCTL_EMU_TASK_SUBMIT \
_IOWR(NVHOST_NVDLA_IOCTL_MAGIC, 8, struct nvdla_submit_args)
#define NVDLA_IOCTL_ALLOC_QUEUE \
_IO(NVHOST_NVDLA_IOCTL_MAGIC, 9)
#define NVDLA_IOCTL_RELEASE_QUEUE \
_IO(NVHOST_NVDLA_IOCTL_MAGIC, 10)
#define NVDLA_IOCTL_LAST \
_IOC_NR(NVDLA_IOCTL_EMU_TASK_SUBMIT)
_IOC_NR(NVDLA_IOCTL_RELEASE_QUEUE)
#define NVDLA_IOCTL_MAX_ARG_SIZE \
sizeof(struct nvdla_pin_unpin_args)