video: tegra: nvmap: Add more checks to avoid races

Add more checks in nvmap code so as to avoid any possible races.
- Update is_nvmap_id_ro and is_nvmap_dmabuf_fd_ro functions so that they
return error value during error conditions and also update their callers
to handle those error values.
- Move all trace statements from end of the function to before handle
refcount or dup count is decremented, this make sure we are not
dereferencing any freed handle/reference/dambuf.
- Increment ref's dup count wherever we feel data race is possible, and
decrement it accordingly towards end of function.

Bug 4253911

Change-Id: I50fc7cc98ebbf3c50025bc2f9ca32882138fb272
Signed-off-by: Ketan Patil <ketanp@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/2972602
Reviewed-by: Sachin Nikam <snikam@nvidia.com>
GVS: Gerrit_Virtual_Submit <buildbot_gerritrpt@nvidia.com>
This commit is contained in:
Ketan Patil
2023-09-01 10:50:17 +00:00
committed by mobile promotions
parent 7a01b01e88
commit ebf51c43ae
6 changed files with 165 additions and 52 deletions

View File

@@ -1142,13 +1142,13 @@ out:
nvmap_handle_put(h); nvmap_handle_put(h);
} }
bool is_nvmap_id_ro(struct nvmap_client *client, int id) int is_nvmap_id_ro(struct nvmap_client *client, int id, bool *is_ro)
{ {
struct nvmap_handle_info *info = NULL; struct nvmap_handle_info *info = NULL;
struct dma_buf *dmabuf = NULL; struct dma_buf *dmabuf = NULL;
if (WARN_ON(!client)) if (WARN_ON(!client))
return ERR_PTR(-EINVAL); goto fail;
if (client->ida) if (client->ida)
dmabuf = nvmap_id_array_get_dmabuf_from_id(client->ida, dmabuf = nvmap_id_array_get_dmabuf_from_id(client->ida,
@@ -1157,19 +1157,32 @@ bool is_nvmap_id_ro(struct nvmap_client *client, int id)
dmabuf = dma_buf_get(id); dmabuf = dma_buf_get(id);
if (IS_ERR_OR_NULL(dmabuf)) if (IS_ERR_OR_NULL(dmabuf))
return false; goto fail;
if (dmabuf_is_nvmap(dmabuf)) if (dmabuf_is_nvmap(dmabuf))
info = dmabuf->priv; info = dmabuf->priv;
if (!info) {
dma_buf_put(dmabuf); dma_buf_put(dmabuf);
/*
* Ideally, we should return error from here,
* but this is done intentionally to handle foreign buffers.
*/
return 0;
}
return (info != NULL) ? info->is_ro : false; *is_ro = info->is_ro;
dma_buf_put(dmabuf);
return 0;
fail:
pr_err("Handle RO check failed\n");
return -EINVAL;
} }
void nvmap_free_handle_from_fd(struct nvmap_client *client, void nvmap_free_handle_from_fd(struct nvmap_client *client,
int id) int id)
{ {
bool is_ro = is_nvmap_id_ro(client, id); bool is_ro = false;
struct nvmap_handle *handle; struct nvmap_handle *handle;
struct dma_buf *dmabuf = NULL; struct dma_buf *dmabuf = NULL;
int handle_ref = 0; int handle_ref = 0;
@@ -1179,12 +1192,15 @@ void nvmap_free_handle_from_fd(struct nvmap_client *client,
if (IS_ERR_OR_NULL(handle)) if (IS_ERR_OR_NULL(handle))
return; return;
if (is_nvmap_id_ro(client, id, &is_ro) != 0) {
nvmap_handle_put(handle);
return;
}
if (client->ida) if (client->ida)
nvmap_id_array_id_release(client->ida, id); nvmap_id_array_id_release(client->ida, id);
nvmap_free_handle(client, handle, is_ro); nvmap_free_handle(client, handle, is_ro);
nvmap_handle_put(handle);
if (handle) { if (handle) {
dmabuf = is_ro ? handle->dmabuf_ro : handle->dmabuf; dmabuf = is_ro ? handle->dmabuf_ro : handle->dmabuf;
handle_ref = atomic_read(&handle->ref); handle_ref = atomic_read(&handle->ref);
@@ -1193,6 +1209,7 @@ void nvmap_free_handle_from_fd(struct nvmap_client *client,
trace_refcount_free_handle(handle, dmabuf, handle_ref, dmabuf_ref, trace_refcount_free_handle(handle, dmabuf, handle_ref, dmabuf_ref,
is_ro ? "RO" : "RW"); is_ro ? "RO" : "RW");
nvmap_handle_put(handle);
} }
static int nvmap_assign_pages_per_handle(struct nvmap_handle *src_h, static int nvmap_assign_pages_per_handle(struct nvmap_handle *src_h,

View File

@@ -738,21 +738,35 @@ struct nvmap_handle *nvmap_handle_get_from_dmabuf_fd(
return handle; return handle;
} }
bool is_nvmap_dmabuf_fd_ro(int fd) int is_nvmap_dmabuf_fd_ro(int fd, bool *is_ro)
{ {
struct dma_buf *dmabuf; struct dma_buf *dmabuf;
struct nvmap_handle_info *info = NULL; struct nvmap_handle_info *info = NULL;
dmabuf = dma_buf_get(fd); dmabuf = dma_buf_get(fd);
if (IS_ERR(dmabuf)) { if (IS_ERR(dmabuf)) {
return false; goto fail;
} }
if (dmabuf_is_nvmap(dmabuf)) {
info = dmabuf->priv;
}
dma_buf_put(dmabuf);
return (info != NULL) ? info->is_ro : false; if (dmabuf_is_nvmap(dmabuf))
info = dmabuf->priv;
if (!info) {
dma_buf_put(dmabuf);
/*
* Ideally, we should return error from here,
* but this is done intentionally to handle foreign buffers.
*/
return 0;
}
*is_ro = info->is_ro;
dma_buf_put(dmabuf);
return 0;
fail:
pr_err("Dmabuf fd RO check failed\n");
return -EINVAL;
} }
/* /*

View File

@@ -420,11 +420,17 @@ struct nvmap_handle_ref *nvmap_create_handle_from_id(
{ {
struct nvmap_handle *handle; struct nvmap_handle *handle;
struct nvmap_handle_ref *ref; struct nvmap_handle_ref *ref;
bool is_ro = false;
if (WARN_ON(!client)) if (WARN_ON(!client))
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
if (is_nvmap_id_ro(client, id)) if (is_nvmap_id_ro(client, id, &is_ro) != 0) {
pr_err("Handle ID RO check failed\n");
return ERR_PTR(-EINVAL);
}
if (is_ro)
return nvmap_dup_handle_ro(client, id); return nvmap_dup_handle_ro(client, id);
handle = nvmap_handle_get_from_id(client, id); handle = nvmap_handle_get_from_id(client, id);
@@ -447,7 +453,7 @@ struct nvmap_handle_ref *nvmap_create_handle_from_fd(
{ {
struct nvmap_handle *handle; struct nvmap_handle *handle;
struct nvmap_handle_ref *ref; struct nvmap_handle_ref *ref;
bool is_ro; bool is_ro = false;
if (WARN_ON(!client)) if (WARN_ON(!client))
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
@@ -456,7 +462,12 @@ struct nvmap_handle_ref *nvmap_create_handle_from_fd(
if (IS_ERR(handle)) if (IS_ERR(handle))
return ERR_CAST(handle); return ERR_CAST(handle);
is_ro = is_nvmap_dmabuf_fd_ro(fd); if (is_nvmap_dmabuf_fd_ro(fd, &is_ro) != 0) {
pr_err("Dmabuf fd RO check failed\n");
nvmap_handle_put(handle);
return ERR_PTR(-EINVAL);
}
if (is_ro) if (is_ro)
ref = nvmap_duplicate_handle(client, handle, false, true); ref = nvmap_duplicate_handle(client, handle, false, true);
else else

View File

@@ -143,17 +143,20 @@ int nvmap_ioctl_getfd(struct file *filp, void __user *arg)
struct nvmap_client *client = filp->private_data; struct nvmap_client *client = filp->private_data;
struct dma_buf *dmabuf; struct dma_buf *dmabuf;
int ret = 0; int ret = 0;
bool is_ro; bool is_ro = false;
if (copy_from_user(&op, arg, sizeof(op))) if (copy_from_user(&op, arg, sizeof(op)))
return -EFAULT; return -EFAULT;
is_ro = is_nvmap_id_ro(client, op.handle);
handle = nvmap_handle_get_from_id(client, op.handle); handle = nvmap_handle_get_from_id(client, op.handle);
if (!IS_ERR_OR_NULL(handle)) { if (!IS_ERR_OR_NULL(handle)) {
ret = is_nvmap_id_ro(client, op.handle, &is_ro);
if (ret != 0) {
pr_err("Handle ID RO check failed\n");
goto fail;
}
op.fd = nvmap_get_dmabuf_fd(client, handle, is_ro); op.fd = nvmap_get_dmabuf_fd(client, handle, is_ro);
nvmap_handle_put(handle);
dmabuf = IS_ERR_VALUE((uintptr_t)op.fd) ? dmabuf = IS_ERR_VALUE((uintptr_t)op.fd) ?
NULL : (is_ro ? handle->dmabuf_ro : handle->dmabuf); NULL : (is_ro ? handle->dmabuf_ro : handle->dmabuf);
} else { } else {
@@ -175,6 +178,11 @@ int nvmap_ioctl_getfd(struct file *filp, void __user *arg)
atomic_read(&handle->ref), atomic_read(&handle->ref),
atomic_long_read(&dmabuf->file->f_count), atomic_long_read(&dmabuf->file->f_count),
is_ro ? "RO" : "RW"); is_ro ? "RO" : "RW");
fail:
if (!IS_ERR_OR_NULL(handle))
nvmap_handle_put(handle);
return ret; return ret;
} }
@@ -184,7 +192,7 @@ int nvmap_ioctl_alloc(struct file *filp, void __user *arg)
struct nvmap_client *client = filp->private_data; struct nvmap_client *client = filp->private_data;
struct nvmap_handle *handle; struct nvmap_handle *handle;
struct dma_buf *dmabuf = NULL; struct dma_buf *dmabuf = NULL;
bool is_ro; bool is_ro = false;
int err, i; int err, i;
unsigned int page_sz = PAGE_SIZE; unsigned int page_sz = PAGE_SIZE;
@@ -233,14 +241,14 @@ int nvmap_ioctl_alloc(struct file *filp, void __user *arg)
0, /* no kind */ 0, /* no kind */
op.flags & (~NVMAP_HANDLE_KIND_SPECIFIED), op.flags & (~NVMAP_HANDLE_KIND_SPECIFIED),
NVMAP_IVM_INVALID_PEER); NVMAP_IVM_INVALID_PEER);
is_ro = is_nvmap_id_ro(client, op.handle);
dmabuf = is_ro ? handle->dmabuf_ro : handle->dmabuf;
if (!err) if (!err && !is_nvmap_id_ro(client, op.handle, &is_ro)) {
dmabuf = is_ro ? handle->dmabuf_ro : handle->dmabuf;
trace_refcount_alloc(handle, dmabuf, trace_refcount_alloc(handle, dmabuf,
atomic_read(&handle->ref), atomic_read(&handle->ref),
atomic_long_read(&dmabuf->file->f_count), atomic_long_read(&dmabuf->file->f_count),
is_ro ? "RO" : "RW"); is_ro ? "RO" : "RW");
}
nvmap_handle_put(handle); nvmap_handle_put(handle);
return err; return err;
} }
@@ -312,7 +320,6 @@ int nvmap_ioctl_create(struct file *filp, unsigned int cmd, void __user *arg)
if (!IS_ERR(ref)) if (!IS_ERR(ref))
ref->handle->orig_size = op.size64; ref->handle->orig_size = op.size64;
} else if (cmd == NVMAP_IOC_FROM_FD) { } else if (cmd == NVMAP_IOC_FROM_FD) {
is_ro = is_nvmap_dmabuf_fd_ro(op.fd);
ref = nvmap_create_handle_from_fd(client, op.fd); ref = nvmap_create_handle_from_fd(client, op.fd);
/* if we get an error, the fd might be non-nvmap dmabuf fd */ /* if we get an error, the fd might be non-nvmap dmabuf fd */
if (IS_ERR_OR_NULL(ref)) { if (IS_ERR_OR_NULL(ref)) {
@@ -328,13 +335,19 @@ int nvmap_ioctl_create(struct file *filp, unsigned int cmd, void __user *arg)
} }
if (!IS_ERR(ref)) { if (!IS_ERR(ref)) {
/*
* Increase reference dup count, so that handle is not freed accidentally
* due to other thread calling NvRmMemHandleFree
*/
atomic_inc(&ref->dupes);
is_ro = ref->is_ro;
handle = ref->handle; handle = ref->handle;
dmabuf = is_ro ? handle->dmabuf_ro : handle->dmabuf; dmabuf = is_ro ? handle->dmabuf_ro : handle->dmabuf;
if (client->ida) { if (client->ida) {
if (nvmap_id_array_id_alloc(client->ida, if (nvmap_id_array_id_alloc(client->ida,
&id, dmabuf) < 0) { &id, dmabuf) < 0) {
atomic_dec(&ref->dupes);
if (dmabuf) if (dmabuf)
dma_buf_put(dmabuf); dma_buf_put(dmabuf);
nvmap_free_handle(client, handle, is_ro); nvmap_free_handle(client, handle, is_ro);
@@ -346,6 +359,7 @@ int nvmap_ioctl_create(struct file *filp, unsigned int cmd, void __user *arg)
op.handle = id; op.handle = id;
if (copy_to_user(arg, &op, sizeof(op))) { if (copy_to_user(arg, &op, sizeof(op))) {
atomic_dec(&ref->dupes);
if (dmabuf) if (dmabuf)
dma_buf_put(dmabuf); dma_buf_put(dmabuf);
nvmap_free_handle(client, handle, is_ro); nvmap_free_handle(client, handle, is_ro);
@@ -383,6 +397,8 @@ out:
is_ro ? "RO" : "RW"); is_ro ? "RO" : "RW");
} }
if (!IS_ERR(ref))
atomic_dec(&ref->dupes);
return ret; return ret;
} }
@@ -419,12 +435,18 @@ int nvmap_ioctl_create_from_va(struct file *filp, void __user *arg)
return err; return err;
} }
/*
* Increase reference dup count, so that handle is not freed accidentally
* due to other thread calling NvRmMemHandleFree
*/
atomic_inc(&ref->dupes);
dmabuf = is_ro ? ref->handle->dmabuf_ro : ref->handle->dmabuf; dmabuf = is_ro ? ref->handle->dmabuf_ro : ref->handle->dmabuf;
if (client->ida) { if (client->ida) {
err = nvmap_id_array_id_alloc(client->ida, &id, err = nvmap_id_array_id_alloc(client->ida, &id,
dmabuf); dmabuf);
if (err < 0) { if (err < 0) {
atomic_dec(&ref->dupes);
if (dmabuf) if (dmabuf)
dma_buf_put(dmabuf); dma_buf_put(dmabuf);
nvmap_free_handle(client, ref->handle, is_ro); nvmap_free_handle(client, ref->handle, is_ro);
@@ -432,6 +454,7 @@ int nvmap_ioctl_create_from_va(struct file *filp, void __user *arg)
} }
op.handle = id; op.handle = id;
if (copy_to_user(arg, &op, sizeof(op))) { if (copy_to_user(arg, &op, sizeof(op))) {
atomic_dec(&ref->dupes);
if (dmabuf) if (dmabuf)
dma_buf_put(dmabuf); dma_buf_put(dmabuf);
nvmap_free_handle(client, ref->handle, is_ro); nvmap_free_handle(client, ref->handle, is_ro);
@@ -455,7 +478,7 @@ out:
atomic_read(&handle->ref), atomic_read(&handle->ref),
atomic_long_read(&dmabuf->file->f_count), atomic_long_read(&dmabuf->file->f_count),
is_ro ? "RO" : "RW"); is_ro ? "RO" : "RW");
atomic_dec(&ref->dupes);
return err; return err;
} }
@@ -475,6 +498,7 @@ int nvmap_ioctl_rw_handle(struct file *filp, int is_read, void __user *arg,
unsigned long addr, offset, elem_size, hmem_stride, user_stride; unsigned long addr, offset, elem_size, hmem_stride, user_stride;
unsigned long count; unsigned long count;
int handle; int handle;
bool is_ro = false;
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
if (op_size == sizeof(op32)) { if (op_size == sizeof(op32)) {
@@ -508,15 +532,23 @@ int nvmap_ioctl_rw_handle(struct file *filp, int is_read, void __user *arg,
if (IS_ERR_OR_NULL(h)) if (IS_ERR_OR_NULL(h))
return -EINVAL; return -EINVAL;
if (is_nvmap_id_ro(client, handle, &is_ro) != 0) {
pr_err("Handle ID RO check failed\n");
err = -EINVAL;
goto fail;
}
/* Don't allow write on RO handle */ /* Don't allow write on RO handle */
if (!is_read && is_nvmap_id_ro(client, handle)) { if (!is_read && is_ro) {
nvmap_handle_put(h); pr_err("Write operation is not allowed on RO handle\n");
return -EPERM; err = -EPERM;
goto fail;
} }
if (is_read && h->heap_type == NVMAP_HEAP_CARVEOUT_VPR) { if (is_read && h->heap_type == NVMAP_HEAP_CARVEOUT_VPR) {
nvmap_handle_put(h); pr_err("CPU read operation is not allowed on VPR carveout\n");
return -EPERM; err = -EPERM;
goto fail;
} }
/* /*
@@ -524,8 +556,8 @@ int nvmap_ioctl_rw_handle(struct file *filp, int is_read, void __user *arg,
* return error. * return error.
*/ */
if (h->is_ro && !is_read) { if (h->is_ro && !is_read) {
nvmap_handle_put(h); err = -EPERM;
return -EPERM; goto fail;
} }
nvmap_kmaps_inc(h); nvmap_kmaps_inc(h);
@@ -550,8 +582,8 @@ int nvmap_ioctl_rw_handle(struct file *filp, int is_read, void __user *arg,
#endif #endif
__put_user(copied, &uarg->count); __put_user(copied, &uarg->count);
fail:
nvmap_handle_put(h); nvmap_handle_put(h);
return err; return err;
} }
@@ -1163,20 +1195,27 @@ int nvmap_ioctl_get_handle_parameters(struct file *filp, void __user *arg)
} }
op.align = handle->align; op.align = handle->align;
op.offset = handle->offs; op.offset = handle->offs;
op.coherency = handle->flags; op.coherency = handle->flags;
is_ro = is_nvmap_id_ro(client, op.handle); if (is_nvmap_id_ro(client, op.handle, &is_ro) != 0) {
pr_err("Handle ID RO check failed\n");
nvmap_handle_put(handle);
goto exit;
}
if (is_ro) if (is_ro)
op.access_flags = NVMAP_HANDLE_RO; op.access_flags = NVMAP_HANDLE_RO;
op.serial_id = handle->serial_id; op.serial_id = handle->serial_id;
nvmap_handle_put(handle);
if (copy_to_user(arg, &op, sizeof(op))) if (copy_to_user(arg, &op, sizeof(op))) {
return -EFAULT; pr_err("Failed to copy to userspace\n");
nvmap_handle_put(handle);
goto exit;
}
nvmap_handle_put(handle);
return 0; return 0;
exit: exit:
@@ -1201,7 +1240,11 @@ int nvmap_ioctl_get_sci_ipc_id(struct file *filp, void __user *arg)
if (IS_ERR_OR_NULL(handle)) if (IS_ERR_OR_NULL(handle))
return -ENODEV; return -ENODEV;
is_ro = is_nvmap_id_ro(client, op.handle); if (is_nvmap_id_ro(client, op.handle, &is_ro) != 0) {
pr_err("Handle ID RO check failed\n");
ret = -EINVAL;
goto exit;
}
/* Cannot create RW handle from RO handle */ /* Cannot create RW handle from RO handle */
if (is_ro && (op.flags != PROT_READ)) { if (is_ro && (op.flags != PROT_READ)) {
@@ -1223,15 +1266,17 @@ int nvmap_ioctl_get_sci_ipc_id(struct file *filp, void __user *arg)
pr_err("copy_to_user failed\n"); pr_err("copy_to_user failed\n");
ret = -EINVAL; ret = -EINVAL;
} }
exit:
nvmap_handle_put(handle);
dmabuf = is_ro ? handle->dmabuf_ro : handle->dmabuf;
if (!ret) exit:
if (!ret) {
dmabuf = is_ro ? handle->dmabuf_ro : handle->dmabuf;
trace_refcount_get_sci_ipc_id(handle, dmabuf, trace_refcount_get_sci_ipc_id(handle, dmabuf,
atomic_read(&handle->ref), atomic_read(&handle->ref),
atomic_long_read(&dmabuf->file->f_count), atomic_long_read(&dmabuf->file->f_count),
is_ro ? "RO" : "RW"); is_ro ? "RO" : "RW");
}
nvmap_handle_put(handle);
return ret; return ret;
} }
@@ -1400,10 +1445,16 @@ int nvmap_ioctl_dup_handle(struct file *filp, void __user *arg)
if (!client) if (!client)
return -ENODEV; return -ENODEV;
if (is_nvmap_id_ro(client, op.handle, &is_ro) != 0) {
pr_err("Handle ID RO check failed\n");
return -EINVAL;
}
/* Don't allow duplicating RW handle from RO handle */ /* Don't allow duplicating RW handle from RO handle */
if (is_nvmap_id_ro(client, op.handle) && if (is_ro && op.access_flags != NVMAP_HANDLE_RO) {
op.access_flags != NVMAP_HANDLE_RO) pr_err("Duplicating RW handle from RO handle is not allowed\n");
return -EPERM; return -EPERM;
}
is_ro = (op.access_flags == NVMAP_HANDLE_RO); is_ro = (op.access_flags == NVMAP_HANDLE_RO);
if (!is_ro) if (!is_ro)
@@ -1412,11 +1463,17 @@ int nvmap_ioctl_dup_handle(struct file *filp, void __user *arg)
ref = nvmap_dup_handle_ro(client, op.handle); ref = nvmap_dup_handle_ro(client, op.handle);
if (!IS_ERR(ref)) { if (!IS_ERR(ref)) {
/*
* Increase reference dup count, so that handle is not freed accidentally
* due to other thread calling NvRmMemHandleFree
*/
atomic_inc(&ref->dupes);
dmabuf = is_ro ? ref->handle->dmabuf_ro : ref->handle->dmabuf; dmabuf = is_ro ? ref->handle->dmabuf_ro : ref->handle->dmabuf;
handle = ref->handle; handle = ref->handle;
if (client->ida) { if (client->ida) {
if (nvmap_id_array_id_alloc(client->ida, if (nvmap_id_array_id_alloc(client->ida,
&id, dmabuf) < 0) { &id, dmabuf) < 0) {
atomic_dec(&ref->dupes);
if (dmabuf) if (dmabuf)
dma_buf_put(dmabuf); dma_buf_put(dmabuf);
if (handle) if (handle)
@@ -1427,6 +1484,7 @@ int nvmap_ioctl_dup_handle(struct file *filp, void __user *arg)
op.dup_handle = id; op.dup_handle = id;
if (copy_to_user(arg, &op, sizeof(op))) { if (copy_to_user(arg, &op, sizeof(op))) {
atomic_dec(&ref->dupes);
if (dmabuf) if (dmabuf)
dma_buf_put(dmabuf); dma_buf_put(dmabuf);
if (handle) if (handle)
@@ -1459,6 +1517,9 @@ out:
atomic_read(&handle->ref), atomic_read(&handle->ref),
atomic_long_read(&dmabuf->file->f_count), atomic_long_read(&dmabuf->file->f_count),
is_ro ? "RO" : "RW"); is_ro ? "RO" : "RW");
if (!IS_ERR(ref))
atomic_dec(&ref->dupes);
return ret; return ret;
} }

View File

@@ -530,9 +530,9 @@ struct nvmap_handle_ref *nvmap_create_handle_from_va(struct nvmap_client *client
struct nvmap_handle_ref *nvmap_dup_handle_ro(struct nvmap_client *client, struct nvmap_handle_ref *nvmap_dup_handle_ro(struct nvmap_client *client,
int fd); int fd);
bool is_nvmap_dmabuf_fd_ro(int fd); int is_nvmap_dmabuf_fd_ro(int fd, bool *is_ro);
bool is_nvmap_id_ro(struct nvmap_client *client, int id); int is_nvmap_id_ro(struct nvmap_client *client, int id, bool *is_ro);
struct nvmap_handle_ref *nvmap_duplicate_handle(struct nvmap_client *client, struct nvmap_handle_ref *nvmap_duplicate_handle(struct nvmap_client *client,
struct nvmap_handle *h, bool skip_val, struct nvmap_handle *h, bool skip_val,

View File

@@ -286,9 +286,15 @@ int nvmap_get_handle_from_sci_ipc_id(struct nvmap_client *client, u32 flags,
if (!IS_ERR(ref)) { if (!IS_ERR(ref)) {
u32 id = 0; u32 id = 0;
/*
* Increase reference dup count, so that handle is not freed accidentally
* due to other thread calling NvRmMemHandleFree
*/
atomic_inc(&ref->dupes);
dmabuf = is_ro ? h->dmabuf_ro : h->dmabuf; dmabuf = is_ro ? h->dmabuf_ro : h->dmabuf;
if (client->ida) { if (client->ida) {
if (nvmap_id_array_id_alloc(client->ida, &id, dmabuf) < 0) { if (nvmap_id_array_id_alloc(client->ida, &id, dmabuf) < 0) {
atomic_dec(&ref->dupes);
if (dmabuf) if (dmabuf)
dma_buf_put(dmabuf); dma_buf_put(dmabuf);
nvmap_free_handle(client, h, is_ro); nvmap_free_handle(client, h, is_ro);
@@ -302,6 +308,7 @@ int nvmap_get_handle_from_sci_ipc_id(struct nvmap_client *client, u32 flags,
} else { } else {
fd = nvmap_get_dmabuf_fd(client, h, is_ro); fd = nvmap_get_dmabuf_fd(client, h, is_ro);
if (IS_ERR_VALUE((uintptr_t)fd)) { if (IS_ERR_VALUE((uintptr_t)fd)) {
atomic_dec(&ref->dupes);
if (dmabuf) if (dmabuf)
dma_buf_put(dmabuf); dma_buf_put(dmabuf);
nvmap_free_handle(client, h, is_ro); nvmap_free_handle(client, h, is_ro);
@@ -340,6 +347,9 @@ unlock:
trace_refcount_get_handle_from_sci_ipc_id(h, dmabuf, trace_refcount_get_handle_from_sci_ipc_id(h, dmabuf,
atomic_read(&h->ref), atomic_read(&h->ref),
is_ro ? "RO" : "RW"); is_ro ? "RO" : "RW");
if (!IS_ERR(ref))
atomic_dec(&ref->dupes);
} }
return ret; return ret;