video: tegra: host: dla: manage operation desc

- pin mapped operation descriptor buffers during task submission
- get operation descriptors handle from user and pass its IOVA to engine
- pin API returns IOVA for given mem handle
- unpin operation descriptors buffers in task cleanup

Jira DLA-93

Change-Id: I78fb22301ab472685c3bae7c424d75140b814887
Signed-off-by: Shridhar Rasal <srasal@nvidia.com>
Reviewed-on: http://git-master/r/1213761
Reviewed-by: Automatic_Commit_Validation_User
Reviewed-by: Prashant Gaikwad <pgaikwad@nvidia.com>
GVS: Gerrit_Virtual_Submit
This commit is contained in:
Shridhar Rasal
2016-09-02 18:08:06 +05:30
committed by Laxman Dewangan
parent a53758ab10
commit 7f3a13acfc
3 changed files with 51 additions and 1 deletions

View File

@@ -22,6 +22,7 @@
#define __NVHOST_NVDLA_H__
#include <linux/nvhost_nvdla_ioctl.h>
#include "nvhost_buffer.h"
/**
* Method ID and Method data THI registers
@@ -30,6 +31,7 @@
#define NV_DLA_THI_METHOD_DATA 0x00000044 /* RW-4R */
#define MAX_NUM_ACTION_LIST 1
#define MAX_HANDLE_PER_OP_DESC 1
/* TODO: Below should come from firmware interface */
#define ACTION_OPCODE_TERMINATE 0x00
@@ -78,6 +80,7 @@ struct nvdla_task_fence {
* struct nvdla_task: structure for task info
*
* @queue Queue in which task submitted
* @buffers nvhost buffers for priv/task
* @sp pointer to syncpt
* @prefences pointer to prefences
* @postfences pointer to post fences
@@ -88,9 +91,12 @@ struct nvdla_task_fence {
* @task_desc_pa DLA task desc PA
* @buf_size Total size of task dma alloc
* @timeout max timeout to wait for task completion
* @op_handle pointer to handle list of operation descriptor
*
*/
struct nvdla_task {
struct nvhost_queue *queue;
struct nvhost_buffers *buffers;
struct nvhost_syncpt *sp;
struct nvdla_task_fence *prefences;
struct nvdla_task_fence *postfences;
@@ -101,6 +107,7 @@ struct nvdla_task {
dma_addr_t task_desc_pa;
size_t buf_size;
int timeout;
u32 *op_handle;
};
extern const struct file_operations tegra_nvdla_ctrl_ops;
@@ -184,6 +191,7 @@ void nvdla_task_get(struct nvdla_task *task);
* nvdla_task_alloc() allocate task for a give queue
*
* @queue Pointer to nvhost queue
* @buffers Pointer to nvhost buffers
* @user_task Pointer to user task passed from UMD
*
* Return allocated task in success, otherwise pointer to err
@@ -192,6 +200,7 @@ void nvdla_task_get(struct nvdla_task *task);
* parameter detais
*/
struct nvdla_task *nvdla_task_alloc(struct nvhost_queue *queue,
struct nvhost_buffers *buffers,
struct nvdla_ctrl_ioctl_submit_task user_task);
/**

View File

@@ -184,6 +184,7 @@ static int nvdla_ctrl_submit(struct nvdla_private *priv, void *arg)
struct nvdla_ctrl_ioctl_submit_task *local_tasks;
struct platform_device *pdev;
struct nvhost_queue *queue;
struct nvhost_buffers *buffers;
u32 num_tasks;
struct nvdla_task *task;
int err = 0, i = 0;
@@ -196,6 +197,8 @@ static int nvdla_ctrl_submit(struct nvdla_private *priv, void *arg)
if (!queue)
return -EINVAL;
buffers = priv->buffers;
user_tasks = (struct nvdla_ctrl_ioctl_submit_task __user *)
(uintptr_t)args->tasks;
num_tasks = args->num_tasks;
@@ -222,7 +225,7 @@ static int nvdla_ctrl_submit(struct nvdla_private *priv, void *arg)
nvdla_dbg_info(pdev, "submit [%d]th task", i + 1);
/* allocate per task and update fields */
task = nvdla_task_alloc(queue, local_tasks[i]);
task = nvdla_task_alloc(queue, buffers, local_tasks[i]);
if (IS_ERR(task)) {
err = PTR_ERR(task);
goto fail_to_task_alloc;

View File

@@ -119,6 +119,9 @@ static void task_free(struct kref *ref)
task->task_desc = NULL;
}
/* free operation descriptor handle */
kfree(task->op_handle);
/* finally free task */
kfree(task);
}
@@ -153,6 +156,10 @@ static void nvdla_queue_update(void *priv, int nr_completed)
/* give syncpoint reference */
nvhost_syncpt_put_ref(task->sp, queue->syncpt_id);
/* unpin submit ref */
nvhost_buffer_submit_unpin(task->buffers,
task->op_handle, MAX_HANDLE_PER_OP_DESC);
/* update takslist */
list_del(&task->list);
@@ -192,9 +199,11 @@ dma_addr_t get_semaphore_pa(struct platform_device *pdev)
#endif
struct nvdla_task *nvdla_task_alloc(struct nvhost_queue *queue,
struct nvhost_buffers *buffers,
struct nvdla_ctrl_ioctl_submit_task user_task)
{
struct platform_device *pdev = queue->pool->pdev;
u32 num_operations = user_task.num_operations;
u32 num_postfences = user_task.num_postfences;
u32 num_prefences = user_task.num_prefences;
struct dla_action_semaphore *postaction;
@@ -210,9 +219,11 @@ struct nvdla_task *nvdla_task_alloc(struct nvhost_queue *queue,
size_t preactionlist_size;
uint16_t postactionl_of;
uint16_t preactionl_of;
dma_addr_t op_dma_addr;
dma_addr_t buffer_pa;
size_t task_size;
size_t buf_size;
size_t op_size;
u32 *buffer_va;
void *mem;
int err;
@@ -400,10 +411,37 @@ struct nvdla_task *nvdla_task_alloc(struct nvhost_queue *queue,
postaction->address = get_semaphore_pa(pdev);
}
if (num_operations) {
task->buffers = buffers;
task->op_handle = kcalloc(MAX_HANDLE_PER_OP_DESC, sizeof(u32),
GFP_KERNEL);
if (!task->op_handle) {
err = -ENOMEM;
goto fail_to_alloc_opdesc;
}
if (copy_from_user(task->op_handle,
(void __user *)user_task.operation_desc,
(MAX_HANDLE_PER_OP_DESC * sizeof(u32)))) {
err = -EFAULT;
goto fail_to_cpy_buffer;
}
err = nvhost_buffer_submit_pin(buffers, task->op_handle,
MAX_HANDLE_PER_OP_DESC, &op_dma_addr, &op_size);
if (!err) {
task_desc->operation_desc = op_dma_addr;
task_desc->num_operations = num_operations;
}
}
nvdla_dbg_info(pdev, "task[%p] initialized", task);
return task;
fail_to_cpy_buffer:
kfree(task->op_handle);
fail_to_alloc_opdesc:
fail_to_dma_alloc:
kfree(task);
fail_to_alloc_task: