mirror of
git://nv-tegra.nvidia.com/linux-nv-oot.git
synced 2025-12-22 09:11:26 +03:00
nvmap: Don't free pages while freeing subhandle
This patch fixes the following issues: - When the handle associated with the sub-buffer is freed, the call to the nvmap_page_pool_fill_lots is made, which would free the pages when the refcount on the pages is > 1, even though the main handle is not freed, hence add a check for sub-handle. - In cpu unmap code, list_for_each_entry is used for iterating over the vma list and in the same loop list_del is aldo being called, this can lead to undefined behavior, instead use list_for_each_entry_safe which is safe against removal of list entry. - Mutex of sub-handle is not initialized, fix it by initializing it. - Set error value when handle creation failed. Bug 3494980 Change-Id: I0659d7f70b44814e87e3081702352e891d9191f7 Signed-off-by: Ketan Patil <ketanp@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvidia/+/2824668 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: svc_kernel_abi <svc_kernel_abi@nvidia.com> Reviewed-by: Puneet Saxena <puneets@nvidia.com> Reviewed-by: Sachin Nikam <snikam@nvidia.com> GVS: Gerrit_Virtual_Submit <buildbot_gerritrpt@nvidia.com>
This commit is contained in:
committed by
Laxman Dewangan
parent
abb244025f
commit
85f7def3a6
@@ -1037,7 +1037,7 @@ void _nvmap_handle_free(struct nvmap_handle *h)
|
||||
h->pgalloc.pages[i] = nvmap_to_page(h->pgalloc.pages[i]);
|
||||
|
||||
#ifdef NVMAP_CONFIG_PAGE_POOLS
|
||||
if (!h->from_va)
|
||||
if (!h->from_va && !h->is_subhandle)
|
||||
page_index = nvmap_page_pool_fill_lots(&nvmap_dev->pool,
|
||||
h->pgalloc.pages, nr_page);
|
||||
#endif
|
||||
|
||||
@@ -130,7 +130,7 @@ unlock:
|
||||
static void nvmap_vma_close(struct vm_area_struct *vma)
|
||||
{
|
||||
struct nvmap_vma_priv *priv = vma->vm_private_data;
|
||||
struct nvmap_vma_list *vma_list;
|
||||
struct nvmap_vma_list *vma_list, *tmp_list;
|
||||
struct nvmap_handle *h;
|
||||
bool vma_found = false;
|
||||
size_t nr_page, i;
|
||||
@@ -144,7 +144,7 @@ static void nvmap_vma_close(struct vm_area_struct *vma)
|
||||
nr_page = h->size >> PAGE_SHIFT;
|
||||
|
||||
mutex_lock(&h->lock);
|
||||
list_for_each_entry(vma_list, &h->vmas, list) {
|
||||
list_for_each_entry_safe(vma_list, tmp_list, &h->vmas, list) {
|
||||
if (vma_list->vma != vma)
|
||||
continue;
|
||||
if (atomic_dec_return(&vma_list->ref) == 0) {
|
||||
|
||||
@@ -1580,11 +1580,14 @@ int nvmap_ioctl_get_fd_from_list(struct file *filp, void __user *arg)
|
||||
}
|
||||
/* Create new handle for the size */
|
||||
ref = nvmap_create_handle(client, hrange.sz, false);
|
||||
if (IS_ERR_OR_NULL(ref))
|
||||
if (IS_ERR_OR_NULL(ref)) {
|
||||
err = -EINVAL;
|
||||
goto free_hs;
|
||||
}
|
||||
|
||||
ref->handle->orig_size = hrange.sz;
|
||||
h = ref->handle;
|
||||
mutex_init(&h->pg_ref_h_lock);
|
||||
|
||||
/* Assign pages from the handles to newly created nvmap handle */
|
||||
err = nvmap_assign_pages_to_handle(client, hs, h, &hrange);
|
||||
|
||||
Reference in New Issue
Block a user