nvdla: Use import_id for creating dma buf memory

[1]: Add import_id field in `nvdla_mem_share_handle`.
[2]: Use import_id for creating the dma buf.
[3]: Use share_id/handle as key for search/insert the vm tree.
[4]: Optimize code and avoid un-necessary dma_buf_get/put which
     become possible with [3]

Bug 3470815

Change-Id: Idb1df6ef04301e1ade4e39d3489502759e7d8462
Signed-off-by: Amit Sharma <amisharma@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvidia/+/2645563
Reviewed-by: Bharat Nihalani <bnihalani@nvidia.com>
Reviewed-by: Praveen K <kpraveen@nvidia.com>
Reviewed-by: Arvind M <am@nvidia.com>
GVS: Gerrit_Virtual_Submit
This commit is contained in:
Amit Sharma
2021-12-22 05:24:47 +00:00
committed by Laxman Dewangan
parent 90998456e4
commit 9de4a4df0a
5 changed files with 67 additions and 170 deletions

View File

@@ -1,7 +1,7 @@
/* /*
* NVHOST buffer management for T194 * NVHOST buffer management for T194
* *
* Copyright (c) 2016-2021, NVIDIA Corporation. All rights reserved. * Copyright (c) 2016-2022, NVIDIA Corporation. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License, * under the terms and conditions of the GNU General Public License,
@@ -35,6 +35,9 @@
* @size: Size of the buffer * @size: Size of the buffer
* @user_map_count: Buffer reference count from user space * @user_map_count: Buffer reference count from user space
* @submit_map_count: Buffer reference count from task submit * @submit_map_count: Buffer reference count from task submit
* @handle MemHandle of the buffer passed from user space
* @offset offset
* @access_flags access (rw/ro)
* @rb_node: pinned buffer node * @rb_node: pinned buffer node
* @list_head: List entry * @list_head: List entry
* *
@@ -51,12 +54,15 @@ struct nvdla_vm_buffer {
s32 user_map_count; s32 user_map_count;
s32 submit_map_count; s32 submit_map_count;
u32 handle;
u32 offset;
u32 access_flags;
struct rb_node rb_node; struct rb_node rb_node;
struct list_head list_head; struct list_head list_head;
}; };
static struct nvdla_vm_buffer *nvdla_find_map_buffer( static struct nvdla_vm_buffer *nvdla_find_map_buffer(
struct nvdla_buffers *nvdla_buffers, struct dma_buf *dmabuf) struct nvdla_buffers *nvdla_buffers, u32 handle)
{ {
struct rb_root *root = &nvdla_buffers->rb_root; struct rb_root *root = &nvdla_buffers->rb_root;
struct rb_node *node = root->rb_node; struct rb_node *node = root->rb_node;
@@ -67,9 +73,9 @@ static struct nvdla_vm_buffer *nvdla_find_map_buffer(
vm = rb_entry(node, struct nvdla_vm_buffer, vm = rb_entry(node, struct nvdla_vm_buffer,
rb_node); rb_node);
if (vm->dmabuf > dmabuf) if (vm->handle > handle)
node = node->rb_left; node = node->rb_left;
else if (vm->dmabuf != dmabuf) else if (vm->handle != handle)
node = node->rb_right; node = node->rb_right;
else else
return vm; return vm;
@@ -92,7 +98,7 @@ static void nvdla_buffer_insert_map_buffer(
rb_node); rb_node);
parent = *new_node; parent = *new_node;
if (vm->dmabuf > new_vm->dmabuf) if (vm->handle > new_vm->handle)
new_node = &((*new_node)->rb_left); new_node = &((*new_node)->rb_left);
else else
new_node = &((*new_node)->rb_right); new_node = &((*new_node)->rb_right);
@@ -106,39 +112,25 @@ static void nvdla_buffer_insert_map_buffer(
list_add_tail(&new_vm->list_head, &nvdla_buffers->list_head); list_add_tail(&new_vm->list_head, &nvdla_buffers->list_head);
} }
int nvdla_get_iova_addr(struct nvdla_buffers *nvdla_buffers,
struct dma_buf *dmabuf, dma_addr_t *addr)
{
struct nvdla_vm_buffer *vm;
int err = -EINVAL;
mutex_lock(&nvdla_buffers->mutex);
vm = nvdla_find_map_buffer(nvdla_buffers, dmabuf);
if (vm) {
*addr = vm->addr;
err = 0;
}
mutex_unlock(&nvdla_buffers->mutex);
return err;
}
static int nvdla_buffer_map(struct platform_device *pdev, static int nvdla_buffer_map(struct platform_device *pdev,
struct dma_buf *dmabuf, struct nvdla_mem_share_handle *desc,
struct nvdla_vm_buffer *vm) struct nvdla_vm_buffer *vm)
{ {
const dma_addr_t cvnas_begin = nvcvnas_get_cvsram_base(); const dma_addr_t cvnas_begin = nvcvnas_get_cvsram_base();
const dma_addr_t cvnas_end = cvnas_begin + nvcvnas_get_cvsram_size(); const dma_addr_t cvnas_end = cvnas_begin + nvcvnas_get_cvsram_size();
struct dma_buf_attachment *attach; struct dma_buf_attachment *attach;
struct dma_buf *dmabuf;
struct sg_table *sgt; struct sg_table *sgt;
dma_addr_t dma_addr; dma_addr_t dma_addr;
dma_addr_t phys_addr; dma_addr_t phys_addr;
int err = 0; int err = 0;
get_dma_buf(dmabuf); dmabuf = dma_buf_get(desc->import_id);
if (IS_ERR_OR_NULL(dmabuf)) {
err = -EFAULT;
goto fail_to_get_dma_buf;
}
attach = dma_buf_attach(dmabuf, &pdev->dev); attach = dma_buf_attach(dmabuf, &pdev->dev);
if (IS_ERR_OR_NULL(attach)) { if (IS_ERR_OR_NULL(attach)) {
@@ -171,6 +163,9 @@ static int nvdla_buffer_map(struct platform_device *pdev,
dma_addr = phys_addr; dma_addr = phys_addr;
vm->sgt = sgt; vm->sgt = sgt;
vm->handle = desc->share_id;
vm->offset = desc->offset;
vm->access_flags = desc->access_flags;
vm->attach = attach; vm->attach = attach;
vm->dmabuf = dmabuf; vm->dmabuf = dmabuf;
vm->size = dmabuf->size; vm->size = dmabuf->size;
@@ -183,6 +178,7 @@ buf_map_err:
dma_buf_detach(dmabuf, attach); dma_buf_detach(dmabuf, attach);
buf_attach_err: buf_attach_err:
dma_buf_put(dmabuf); dma_buf_put(dmabuf);
fail_to_get_dma_buf:
return err; return err;
} }
@@ -248,7 +244,7 @@ void nvdla_buffer_set_platform_device(struct nvdla_buffers *nvdla_buffers,
} }
int nvdla_buffer_submit_pin(struct nvdla_buffers *nvdla_buffers, int nvdla_buffer_submit_pin(struct nvdla_buffers *nvdla_buffers,
struct dma_buf **dmabufs, u32 count, u32 *handles, u32 count,
dma_addr_t *paddr, size_t *psize, dma_addr_t *paddr, size_t *psize,
enum nvdla_buffers_heap *heap) enum nvdla_buffers_heap *heap)
{ {
@@ -260,7 +256,7 @@ int nvdla_buffer_submit_pin(struct nvdla_buffers *nvdla_buffers,
mutex_lock(&nvdla_buffers->mutex); mutex_lock(&nvdla_buffers->mutex);
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
vm = nvdla_find_map_buffer(nvdla_buffers, dmabufs[i]); vm = nvdla_find_map_buffer(nvdla_buffers, handles[i]);
if (vm == NULL) if (vm == NULL)
goto submit_err; goto submit_err;
@@ -282,13 +278,13 @@ submit_err:
count = i; count = i;
nvdla_buffer_submit_unpin(nvdla_buffers, dmabufs, count); nvdla_buffer_submit_unpin(nvdla_buffers, handles, count);
return -EINVAL; return -EINVAL;
} }
int nvdla_buffer_pin(struct nvdla_buffers *nvdla_buffers, int nvdla_buffer_pin(struct nvdla_buffers *nvdla_buffers,
struct dma_buf **dmabufs, struct nvdla_mem_share_handle *descs,
u32 count) u32 count)
{ {
struct nvdla_vm_buffer *vm; struct nvdla_vm_buffer *vm;
@@ -298,7 +294,7 @@ int nvdla_buffer_pin(struct nvdla_buffers *nvdla_buffers,
mutex_lock(&nvdla_buffers->mutex); mutex_lock(&nvdla_buffers->mutex);
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
vm = nvdla_find_map_buffer(nvdla_buffers, dmabufs[i]); vm = nvdla_find_map_buffer(nvdla_buffers, descs[i].share_id);
if (vm) { if (vm) {
vm->user_map_count++; vm->user_map_count++;
continue; continue;
@@ -310,7 +306,7 @@ int nvdla_buffer_pin(struct nvdla_buffers *nvdla_buffers,
goto unpin; goto unpin;
} }
err = nvdla_buffer_map(nvdla_buffers->pdev, dmabufs[i], vm); err = nvdla_buffer_map(nvdla_buffers->pdev, &descs[i], vm);
if (err) if (err)
goto free_vm; goto free_vm;
@@ -328,13 +324,13 @@ unpin:
/* free pinned buffers */ /* free pinned buffers */
count = i; count = i;
nvdla_buffer_unpin(nvdla_buffers, dmabufs, count); nvdla_buffer_unpin(nvdla_buffers, descs, count);
return err; return err;
} }
void nvdla_buffer_submit_unpin(struct nvdla_buffers *nvdla_buffers, void nvdla_buffer_submit_unpin(struct nvdla_buffers *nvdla_buffers,
struct dma_buf **dmabufs, u32 count) u32 *handles, u32 count)
{ {
struct nvdla_vm_buffer *vm; struct nvdla_vm_buffer *vm;
int i = 0; int i = 0;
@@ -343,7 +339,7 @@ void nvdla_buffer_submit_unpin(struct nvdla_buffers *nvdla_buffers,
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
vm = nvdla_find_map_buffer(nvdla_buffers, dmabufs[i]); vm = nvdla_find_map_buffer(nvdla_buffers, handles[i]);
if (vm == NULL) if (vm == NULL)
continue; continue;
@@ -358,7 +354,7 @@ void nvdla_buffer_submit_unpin(struct nvdla_buffers *nvdla_buffers,
} }
void nvdla_buffer_unpin(struct nvdla_buffers *nvdla_buffers, void nvdla_buffer_unpin(struct nvdla_buffers *nvdla_buffers,
struct dma_buf **dmabufs, u32 count) struct nvdla_mem_share_handle *descs, u32 count)
{ {
int i = 0; int i = 0;
@@ -367,7 +363,7 @@ void nvdla_buffer_unpin(struct nvdla_buffers *nvdla_buffers,
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
struct nvdla_vm_buffer *vm = NULL; struct nvdla_vm_buffer *vm = NULL;
vm = nvdla_find_map_buffer(nvdla_buffers, dmabufs[i]); vm = nvdla_find_map_buffer(nvdla_buffers, descs[i].share_id);
if (vm == NULL) if (vm == NULL)
continue; continue;

View File

@@ -1,7 +1,7 @@
/* /*
* NVDLA Buffer Management Header * NVDLA Buffer Management Header
* *
* Copyright (c) 2019-2020, NVIDIA Corporation. All rights reserved. * Copyright (c) 2019-2022, NVIDIA Corporation. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License, * under the terms and conditions of the GNU General Public License,
@@ -20,6 +20,7 @@
#define __NVHOST_NVDLA_BUFFER_H__ #define __NVHOST_NVDLA_BUFFER_H__
#include <linux/dma-buf.h> #include <linux/dma-buf.h>
#include <uapi/linux/nvhost_nvdla_ioctl.h>
enum nvdla_buffers_heap { enum nvdla_buffers_heap {
NVDLA_BUFFERS_HEAP_DRAM = 0, NVDLA_BUFFERS_HEAP_DRAM = 0,
@@ -87,27 +88,27 @@ void nvdla_buffer_set_platform_device(struct nvdla_buffers *nvdla_buffers,
* This function maps the buffer memhandle list passed from user side * This function maps the buffer memhandle list passed from user side
* to device iova. * to device iova.
* *
* @param nvdla_buffers Pointer to nvdla_buffers struct * @param nvdla_buffers Pointer to nvdla_buffers struct
* @param dmabufs Pointer to dmabuffer list * @param descs Descs Pointer to share descriptor list
* @param count Number of memhandles in the list * @param count Number of memhandles in the list
* @return 0 on success or negative on error * @return 0 on success or negative on error
* *
*/ */
int nvdla_buffer_pin(struct nvdla_buffers *nvdla_buffers, int nvdla_buffer_pin(struct nvdla_buffers *nvdla_buffers,
struct dma_buf **dmabufs, struct nvdla_mem_share_handle *descs,
u32 count); u32 count);
/** /**
* @brief UnPins the mapped address space. * @brief UnPins the mapped address space.
* *
* @param nvdla_buffers Pointer to nvdla_buffer struct * @param nvdla_buffers Pointer to nvdla_buffer struct
* @param dmabufs Pointer to dmabuffer list * @param descs Descs Pointer to share descriptor list
* @param count Number of memhandles in the list * @param count Number of memhandles in the list
* @return None * @return None
* *
*/ */
void nvdla_buffer_unpin(struct nvdla_buffers *nvdla_buffers, void nvdla_buffer_unpin(struct nvdla_buffers *nvdla_buffers,
struct dma_buf **dmabufs, struct nvdla_mem_share_handle *descs,
u32 count); u32 count);
/** /**
@@ -116,8 +117,8 @@ void nvdla_buffer_unpin(struct nvdla_buffers *nvdla_buffers,
* This function increased the reference count for a mapped buffer during * This function increased the reference count for a mapped buffer during
* task submission. * task submission.
* *
* @param nvdla_buffers Pointer to nvdla_buffer struct * @param nvdla_buffers Pointer to nvdla_buffer struct
* @param dmabufs Pointer to dmabuffer list * @param handles Pointer to MemHandle list
* @param count Number of memhandles in the list * @param count Number of memhandles in the list
* @param paddr Pointer to IOVA list * @param paddr Pointer to IOVA list
* @param psize Pointer to size of buffer to return * @param psize Pointer to size of buffer to return
@@ -128,7 +129,7 @@ void nvdla_buffer_unpin(struct nvdla_buffers *nvdla_buffers,
* *
*/ */
int nvdla_buffer_submit_pin(struct nvdla_buffers *nvdla_buffers, int nvdla_buffer_submit_pin(struct nvdla_buffers *nvdla_buffers,
struct dma_buf **dmabufs, u32 count, u32 *handles, u32 count,
dma_addr_t *paddr, size_t *psize, dma_addr_t *paddr, size_t *psize,
enum nvdla_buffers_heap *heap); enum nvdla_buffers_heap *heap);
@@ -138,14 +139,14 @@ int nvdla_buffer_submit_pin(struct nvdla_buffers *nvdla_buffers,
* This function decrease the reference count for a mapped buffer when the * This function decrease the reference count for a mapped buffer when the
* task get completed or aborted. * task get completed or aborted.
* *
* @param nvdla_buffers Pointer to nvdla_buffer struct * @param nvdla_buffers Pointer to nvdla_buffer struct
* @param dmabufs Pointer to dmabuffer list * @param handles Pointer to MemHandle list
* @param count Number of memhandles in the list * @param count Number of memhandles in the list
* @return None * @return None
* *
*/ */
void nvdla_buffer_submit_unpin(struct nvdla_buffers *nvdla_buffers, void nvdla_buffer_submit_unpin(struct nvdla_buffers *nvdla_buffers,
struct dma_buf **dmabufs, u32 count); u32 *handles, u32 count);
/** /**
* @brief Drop a user reference to buffer structure * @brief Drop a user reference to buffer structure
@@ -156,16 +157,4 @@ void nvdla_buffer_submit_unpin(struct nvdla_buffers *nvdla_buffers,
*/ */
void nvdla_buffer_release(struct nvdla_buffers *nvdla_buffers); void nvdla_buffer_release(struct nvdla_buffers *nvdla_buffers);
/**
* @brief Returns dma buf and dma addr for a given handle
*
* @param nvdla_buffers Pointer to nvdla_buffer struct
* @param dmabuf dma buf pointer to search for
* @param addr dma_addr_t pointer to return
* @return 0 on success or negative on error
*
*/
int nvdla_get_iova_addr(struct nvdla_buffers *nvdla_buffers,
struct dma_buf *dmabuf, dma_addr_t *addr);
#endif /*__NVHOST_NVDLA_BUFFER_H__ */ #endif /*__NVHOST_NVDLA_BUFFER_H__ */

View File

@@ -161,9 +161,7 @@ fail_to_send_fence:
static int nvdla_pin(struct nvdla_private *priv, void *arg) static int nvdla_pin(struct nvdla_private *priv, void *arg)
{ {
struct nvdla_mem_share_handle handles[MAX_NVDLA_PIN_BUFFERS]; struct nvdla_mem_share_handle handles[MAX_NVDLA_PIN_BUFFERS];
struct dma_buf *dmabufs[MAX_NVDLA_PIN_BUFFERS];
int err = 0; int err = 0;
int i = 0;
struct nvdla_pin_unpin_args *buf_list = struct nvdla_pin_unpin_args *buf_list =
(struct nvdla_pin_unpin_args *)arg; (struct nvdla_pin_unpin_args *)arg;
u32 count; u32 count;
@@ -198,22 +196,10 @@ static int nvdla_pin(struct nvdla_private *priv, void *arg)
goto nvdla_buffer_cpy_err; goto nvdla_buffer_cpy_err;
} }
/* get the dmabuf pointer from the fd handle */
for (i = 0; i < count; i++) {
dmabufs[i] = dma_buf_get(handles[i].share_id);
if (IS_ERR_OR_NULL(dmabufs[i])) {
err = -EFAULT;
goto fail_to_get_dma_buf;
}
}
speculation_barrier(); /* break_spec_p#5_1 */ speculation_barrier(); /* break_spec_p#5_1 */
err = nvdla_buffer_pin(priv->buffers, dmabufs, count); err = nvdla_buffer_pin(priv->buffers, handles, count);
fail_to_get_dma_buf:
count = i;
for (i = 0; i < count; i++)
dma_buf_put(dmabufs[i]);
nvdla_buffer_cpy_err: nvdla_buffer_cpy_err:
fail_to_get_val_cnt: fail_to_get_val_cnt:
fail_to_get_val_arg: fail_to_get_val_arg:
@@ -223,9 +209,7 @@ fail_to_get_val_arg:
static int nvdla_unpin(struct nvdla_private *priv, void *arg) static int nvdla_unpin(struct nvdla_private *priv, void *arg)
{ {
struct nvdla_mem_share_handle handles[MAX_NVDLA_PIN_BUFFERS]; struct nvdla_mem_share_handle handles[MAX_NVDLA_PIN_BUFFERS];
struct dma_buf *dmabufs[MAX_NVDLA_PIN_BUFFERS];
int err = 0; int err = 0;
int i = 0;
struct nvdla_pin_unpin_args *buf_list = struct nvdla_pin_unpin_args *buf_list =
(struct nvdla_pin_unpin_args *)arg; (struct nvdla_pin_unpin_args *)arg;
u32 count; u32 count;
@@ -260,19 +244,9 @@ static int nvdla_unpin(struct nvdla_private *priv, void *arg)
goto nvdla_buffer_cpy_err; goto nvdla_buffer_cpy_err;
} }
/* get the dmabuf pointer and clean valid ones */
for (i = 0; i < count; i++) {
dmabufs[i] = dma_buf_get(handles[i].share_id);
if (IS_ERR_OR_NULL(dmabufs[i]))
continue;
}
speculation_barrier(); /* break_spec_p#5_1 */ speculation_barrier(); /* break_spec_p#5_1 */
nvdla_buffer_unpin(priv->buffers, dmabufs, count); nvdla_buffer_unpin(priv->buffers, handles, count);
count = i;
for (i = 0; i < count; i++)
dma_buf_put(dmabufs[i]);
nvdla_buffer_cpy_err: nvdla_buffer_cpy_err:
fail_to_get_val_cnt: fail_to_get_val_cnt:

View File

@@ -182,8 +182,7 @@ static int nvdla_unmap_task_memory(struct nvdla_task *task)
} }
if (task->memory_handles[ii].handle) { if (task->memory_handles[ii].handle) {
nvdla_buffer_submit_unpin(task->buffers, nvdla_buffer_submit_unpin(task->buffers,
&task->memory_dmabuf[ii], 1); &task->memory_handles[ii].handle, 1);
dma_buf_put(task->memory_dmabuf[ii]);
} }
} }
nvdla_dbg_fn(pdev, "all mem handles unmaped"); nvdla_dbg_fn(pdev, "all mem handles unmaped");
@@ -194,8 +193,7 @@ static int nvdla_unmap_task_memory(struct nvdla_task *task)
task->prefences[ii].type == NVDEV_FENCE_TYPE_SEMAPHORE_TS) && task->prefences[ii].type == NVDEV_FENCE_TYPE_SEMAPHORE_TS) &&
task->prefences[ii].semaphore_handle) { task->prefences[ii].semaphore_handle) {
nvdla_buffer_submit_unpin(task->buffers, nvdla_buffer_submit_unpin(task->buffers,
&task->prefences_sem_dmabuf[ii], 1); &task->prefences[ii].semaphore_handle, 1);
dma_buf_put(task->prefences_sem_dmabuf[ii]);
} }
} }
nvdla_dbg_fn(pdev, "all prefences unmaped"); nvdla_dbg_fn(pdev, "all prefences unmaped");
@@ -204,8 +202,7 @@ static int nvdla_unmap_task_memory(struct nvdla_task *task)
for (ii = 0; ii < task->num_in_task_status; ii++) { for (ii = 0; ii < task->num_in_task_status; ii++) {
if (task->in_task_status[ii].handle) { if (task->in_task_status[ii].handle) {
nvdla_buffer_submit_unpin(task->buffers, nvdla_buffer_submit_unpin(task->buffers,
&task->in_task_status_dmabuf[ii], 1); &task->in_task_status[ii].handle, 1);
dma_buf_put(task->in_task_status_dmabuf[ii]);
} }
} }
nvdla_dbg_fn(pdev, "all in task status unmaped"); nvdla_dbg_fn(pdev, "all in task status unmaped");
@@ -216,8 +213,7 @@ static int nvdla_unmap_task_memory(struct nvdla_task *task)
task->postfences[ii].type == NVDEV_FENCE_TYPE_SEMAPHORE_TS) && task->postfences[ii].type == NVDEV_FENCE_TYPE_SEMAPHORE_TS) &&
task->postfences[ii].semaphore_handle) { task->postfences[ii].semaphore_handle) {
nvdla_buffer_submit_unpin(task->buffers, nvdla_buffer_submit_unpin(task->buffers,
&task->postfences_sem_dmabuf[ii], 1); &task->postfences[ii].semaphore_handle, 1);
dma_buf_put(task->postfences_sem_dmabuf[ii]);
} }
} }
nvdla_dbg_fn(pdev, "all postfences unmaped"); nvdla_dbg_fn(pdev, "all postfences unmaped");
@@ -226,16 +222,14 @@ static int nvdla_unmap_task_memory(struct nvdla_task *task)
for (ii = 0; ii < task->num_sof_task_status; ii++) { for (ii = 0; ii < task->num_sof_task_status; ii++) {
if (task->sof_task_status[ii].handle) { if (task->sof_task_status[ii].handle) {
nvdla_buffer_submit_unpin(task->buffers, nvdla_buffer_submit_unpin(task->buffers,
&task->sof_task_status_dmabuf[ii], 1); &task->sof_task_status[ii].handle, 1);
dma_buf_put(task->sof_task_status_dmabuf[ii]);
} }
} }
for (ii = 0; ii < task->num_eof_task_status; ii++) { for (ii = 0; ii < task->num_eof_task_status; ii++) {
if (task->eof_task_status[ii].handle) { if (task->eof_task_status[ii].handle) {
nvdla_buffer_submit_unpin(task->buffers, nvdla_buffer_submit_unpin(task->buffers,
&task->eof_task_status_dmabuf[ii], 1); &task->eof_task_status[ii].handle, 1);
dma_buf_put(task->eof_task_status_dmabuf[ii]);
} }
} }
nvdla_dbg_fn(pdev, "all out task status unmaped"); nvdla_dbg_fn(pdev, "all out task status unmaped");
@@ -244,16 +238,14 @@ static int nvdla_unmap_task_memory(struct nvdla_task *task)
for (ii = 0; ii < task->num_sof_timestamps; ii++) { for (ii = 0; ii < task->num_sof_timestamps; ii++) {
if (task->sof_timestamps[ii].handle) { if (task->sof_timestamps[ii].handle) {
nvdla_buffer_submit_unpin(task->buffers, nvdla_buffer_submit_unpin(task->buffers,
&task->sof_timestamps_dmabuf[ii], 1); &task->sof_timestamps[ii].handle, 1);
dma_buf_put(task->sof_timestamps_dmabuf[ii]);
} }
} }
for (ii = 0; ii < task->num_eof_timestamps; ii++) { for (ii = 0; ii < task->num_eof_timestamps; ii++) {
if (task->eof_timestamps[ii].handle) { if (task->eof_timestamps[ii].handle) {
nvdla_buffer_submit_unpin(task->buffers, nvdla_buffer_submit_unpin(task->buffers,
&task->eof_timestamps_dmabuf[ii], 1); &task->eof_timestamps[ii].handle, 1);
dma_buf_put(task->eof_timestamps_dmabuf[ii]);
} }
} }
nvdla_dbg_fn(pdev, "all out timestamps unmaped"); nvdla_dbg_fn(pdev, "all out timestamps unmaped");
@@ -607,17 +599,8 @@ static int nvdla_map_task_memory(struct nvdla_task *task)
goto fail_to_pin_mem; goto fail_to_pin_mem;
} }
task->memory_dmabuf[jj] =
dma_buf_get(task->memory_handles[jj].handle);
if (IS_ERR_OR_NULL(task->memory_dmabuf[jj])) {
task->memory_dmabuf[jj] = NULL;
err = -EFAULT;
nvdla_dbg_err(pdev, "fail to get buf");
goto fail_to_pin_mem;
}
err = nvdla_buffer_submit_pin(buffers, err = nvdla_buffer_submit_pin(buffers,
&task->memory_dmabuf[jj], &task->memory_handles[jj].handle,
1, &dma_addr, &dma_size, NULL); 1, &dma_addr, &dma_size, NULL);
if (err) { if (err) {
nvdla_dbg_err(pdev, "fail to pin address list"); nvdla_dbg_err(pdev, "fail to pin address list");
@@ -745,15 +728,8 @@ static int nvdla_fill_wait_fence_action(struct nvdla_task *task,
fence->semaphore_offset, fence->semaphore_offset,
fence->semaphore_value); fence->semaphore_value);
*dma_buf = dma_buf_get(fence->semaphore_handle);
if (IS_ERR_OR_NULL(*dma_buf)) {
*dma_buf = NULL;
nvdla_dbg_err(pdev, "fail to get wait buf");
break;
}
if (nvdla_buffer_submit_pin(buffers, if (nvdla_buffer_submit_pin(buffers,
dma_buf, 1, &dma_addr, &dma_size, NULL)) { &fence->semaphore_handle, 1, &dma_addr, &dma_size, NULL)) {
nvdla_dbg_err(pdev, "fail to pin WAIT SEM"); nvdla_dbg_err(pdev, "fail to pin WAIT SEM");
break; break;
} }
@@ -831,15 +807,8 @@ static int nvdla_fill_signal_fence_action(struct nvdla_task *task,
fence->semaphore_offset, fence->semaphore_offset,
fence->semaphore_value); fence->semaphore_value);
*dma_buf = dma_buf_get(fence->semaphore_handle);
if (IS_ERR_OR_NULL(*dma_buf)) {
*dma_buf = NULL;
nvdla_dbg_err(pdev, "fail to get buf");
break;
}
if (nvdla_buffer_submit_pin(buffers, if (nvdla_buffer_submit_pin(buffers,
dma_buf, 1, &dma_addr, &dma_size, NULL)) { &fence->semaphore_handle, 1, &dma_addr, &dma_size, NULL)) {
nvdla_dbg_err(pdev, "fail to pin SIGNAL SEM"); nvdla_dbg_err(pdev, "fail to pin SIGNAL SEM");
break; break;
} }
@@ -864,15 +833,8 @@ static int nvdla_fill_signal_fence_action(struct nvdla_task *task,
fence->semaphore_offset, fence->semaphore_offset,
fence->semaphore_value); fence->semaphore_value);
*dma_buf = dma_buf_get(fence->semaphore_handle);
if (IS_ERR_OR_NULL(*dma_buf)) {
*dma_buf = NULL;
nvdla_dbg_err(pdev, "fail to get buf");
break;
}
if (nvdla_buffer_submit_pin(buffers, if (nvdla_buffer_submit_pin(buffers,
dma_buf, 1, &dma_addr, &dma_size, NULL)) { &fence->semaphore_handle, 1, &dma_addr, &dma_size, NULL)) {
nvdla_dbg_err(pdev, "fail to pin SIGNAL SEM"); nvdla_dbg_err(pdev, "fail to pin SIGNAL SEM");
break; break;
} }
@@ -916,16 +878,8 @@ static int nvdla_fill_taskstatus_read_action(struct nvdla_task *task,
task_status->offset, task_status->offset,
task_status->status); task_status->status);
*dma_buf = dma_buf_get(task_status->handle);
if (IS_ERR_OR_NULL(*dma_buf)) {
*dma_buf = NULL;
nvdla_dbg_err(pdev, "fail to get buf");
err = -EINVAL;
goto fail;
}
if (nvdla_buffer_submit_pin(buffers, if (nvdla_buffer_submit_pin(buffers,
dma_buf, 1, &dma_addr, &dma_size, NULL)) { &task_status->handle, 1, &dma_addr, &dma_size, NULL)) {
nvdla_dbg_err(pdev, "fail to pin in status"); nvdla_dbg_err(pdev, "fail to pin in status");
err = -EINVAL; err = -EINVAL;
goto fail; goto fail;
@@ -961,16 +915,8 @@ static int nvdla_fill_taskstatus_write_action(struct nvdla_task *task,
task_status->offset, task_status->offset,
task_status->status); task_status->status);
*dma_buf = dma_buf_get(task_status->handle);
if (IS_ERR_OR_NULL(*dma_buf)) {
*dma_buf = NULL;
nvdla_dbg_err(pdev, "fail to get buf");
err = -EINVAL;
goto fail;
}
if (nvdla_buffer_submit_pin(buffers, if (nvdla_buffer_submit_pin(buffers,
dma_buf, 1, &dma_addr, &dma_size, NULL)) { &task_status->handle, 1, &dma_addr, &dma_size, NULL)) {
nvdla_dbg_err(pdev, "fail to pin status"); nvdla_dbg_err(pdev, "fail to pin status");
err = -EINVAL; err = -EINVAL;
goto fail; goto fail;
@@ -1005,16 +951,8 @@ static int nvdla_fill_timestamp_write_action(struct nvdla_task *task,
timestamp->handle, timestamp->handle,
timestamp->offset); timestamp->offset);
*dma_buf = dma_buf_get(timestamp->handle);
if (IS_ERR_OR_NULL(*dma_buf)) {
*dma_buf = NULL;
nvdla_dbg_err(pdev, "fail to get buf");
err = -EINVAL;
goto fail;
}
if (nvdla_buffer_submit_pin(buffers, if (nvdla_buffer_submit_pin(buffers,
dma_buf, 1, &dma_addr, &dma_size, NULL)) { &timestamp->handle, 1, &dma_addr, &dma_size, NULL)) {
nvdla_dbg_err(pdev, "fail to pin timestamp"); nvdla_dbg_err(pdev, "fail to pin timestamp");
err = -EINVAL; err = -EINVAL;
goto fail; goto fail;

View File

@@ -3,7 +3,7 @@
* *
* Tegra NvDLA Driver * Tegra NvDLA Driver
* *
* Copyright (c) 2016-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License, * under the terms and conditions of the GNU General Public License,
@@ -70,13 +70,13 @@ struct nvdla_ping_args {
* @share_id identifier of handle to be shared * @share_id identifier of handle to be shared
* @offset offset within the shared memory * @offset offset within the shared memory
* @access_flags access with which memory is intended to be shared * @access_flags access with which memory is intended to be shared
* @reserved reserved for future use * @import_id memory import transaction identifier
**/ **/
struct nvdla_mem_share_handle { struct nvdla_mem_share_handle {
__u32 share_id; __u32 share_id;
__u32 offset; __u32 offset;
__u32 access_flags; __u32 access_flags;
__u32 reserved; __u32 import_id;
}; };
/** /**