mirror of
git://nv-tegra.nvidia.com/linux-nv-oot.git
synced 2025-12-22 17:25:35 +03:00
nvmap: Fix coherency issues while creating subhandle
Make sure that all handles, from which sub-buffer handle needs to be created, have same cache coherency, are of type RW, not created from VA. Set the cache coherency of the sub-buffer handle same as the cache coherency of the main handles, so that explicit cache flush is not needed during vma fault. Bug 3494980 Change-Id: I878568e4cdc4529fb3d1a6682e38798769dad9c0 Signed-off-by: Ketan Patil <ketanp@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvidia/+/2819129 Reviewed-by: Puneet Saxena <puneets@nvidia.com> Reviewed-by: Krishna Reddy <vdumpa@nvidia.com> Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com> Reviewed-by: svc_kernel_abi <svc_kernel_abi@nvidia.com> Reviewed-by: Sachin Nikam <snikam@nvidia.com> GVS: Gerrit_Virtual_Submit <buildbot_gerritrpt@nvidia.com>
This commit is contained in:
committed by
Laxman Dewangan
parent
ec9ee786c4
commit
abb244025f
@@ -1176,16 +1176,12 @@ static int nvmap_assign_pages_per_handle(struct nvmap_handle *src_h,
|
|||||||
|
|
||||||
while (src_h_start < src_h_end) {
|
while (src_h_start < src_h_end) {
|
||||||
unsigned long next;
|
unsigned long next;
|
||||||
struct page *src_page;
|
|
||||||
struct page *dest_page;
|
struct page *dest_page;
|
||||||
|
|
||||||
dest_h->pgalloc.pages[*pg_cnt] =
|
dest_h->pgalloc.pages[*pg_cnt] =
|
||||||
src_h->pgalloc.pages[src_h_start >> PAGE_SHIFT];
|
src_h->pgalloc.pages[src_h_start >> PAGE_SHIFT];
|
||||||
src_page = nvmap_to_page(src_h->pgalloc.pages
|
|
||||||
[src_h_start >> PAGE_SHIFT]);
|
|
||||||
dest_page = nvmap_to_page(dest_h->pgalloc.pages[*pg_cnt]);
|
dest_page = nvmap_to_page(dest_h->pgalloc.pages[*pg_cnt]);
|
||||||
get_page(dest_page);
|
get_page(dest_page);
|
||||||
nvmap_clean_cache_page(src_page);
|
|
||||||
|
|
||||||
next = min(((src_h_start + PAGE_SIZE) & PAGE_MASK),
|
next = min(((src_h_start + PAGE_SIZE) & PAGE_MASK),
|
||||||
src_h_end);
|
src_h_end);
|
||||||
@@ -1243,11 +1239,11 @@ int nvmap_assign_pages_to_handle(struct nvmap_client *client,
|
|||||||
start = 0;
|
start = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
h->flags = hs[0]->flags;
|
||||||
h->heap_type = NVMAP_HEAP_IOVMM;
|
h->heap_type = NVMAP_HEAP_IOVMM;
|
||||||
h->heap_pgalloc = true;
|
h->heap_pgalloc = true;
|
||||||
h->alloc = true;
|
h->alloc = true;
|
||||||
h->is_subhandle = true;
|
h->is_subhandle = true;
|
||||||
atomic_set(&h->pgalloc.ndirty, 0);
|
|
||||||
mb();
|
mb();
|
||||||
return err;
|
return err;
|
||||||
err_h:
|
err_h:
|
||||||
|
|||||||
@@ -229,13 +229,6 @@ static int nvmap_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
|||||||
if (PageAnon(page) && (vma->vm_flags & VM_SHARED))
|
if (PageAnon(page) && (vma->vm_flags & VM_SHARED))
|
||||||
return VM_FAULT_SIGSEGV;
|
return VM_FAULT_SIGSEGV;
|
||||||
|
|
||||||
if (priv->handle->is_subhandle) {
|
|
||||||
pr_err("sub_handle page:%p\n", page);
|
|
||||||
kaddr = kmap(page);
|
|
||||||
BUG_ON(!kaddr);
|
|
||||||
inner_cache_maint(NVMAP_CACHE_OP_WB_INV, kaddr, PAGE_SIZE);
|
|
||||||
kunmap(page);
|
|
||||||
}
|
|
||||||
if (!nvmap_handle_track_dirty(priv->handle))
|
if (!nvmap_handle_track_dirty(priv->handle))
|
||||||
goto finish;
|
goto finish;
|
||||||
mutex_lock(&priv->handle->lock);
|
mutex_lock(&priv->handle->lock);
|
||||||
|
|||||||
@@ -1487,7 +1487,7 @@ int nvmap_ioctl_get_fd_from_list(struct file *filp, void __user *arg)
|
|||||||
struct nvmap_handle *h = NULL;
|
struct nvmap_handle *h = NULL;
|
||||||
struct handles_range hrange = {0};
|
struct handles_range hrange = {0};
|
||||||
size_t tot_hs_size = 0;
|
size_t tot_hs_size = 0;
|
||||||
u32 i, count = 0;
|
u32 i, count = 0, flags = 0;
|
||||||
size_t bytes;
|
size_t bytes;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
int fd = -1;
|
int fd = -1;
|
||||||
@@ -1552,12 +1552,18 @@ int nvmap_ioctl_get_fd_from_list(struct file *filp, void __user *arg)
|
|||||||
goto free_hs;
|
goto free_hs;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Check all of the handles from system heap */
|
flags = hs[0]->flags;
|
||||||
|
/*
|
||||||
|
* Check all of the handles from system heap, are RW, not from VA
|
||||||
|
* and having same cache coherency
|
||||||
|
*/
|
||||||
for (i = 0; i < op.num_handles; i++)
|
for (i = 0; i < op.num_handles; i++)
|
||||||
if (hs[i]->heap_pgalloc)
|
if (hs[i]->heap_pgalloc && !hs[i]->from_va &&
|
||||||
|
!hs[i]->is_ro && hs[i]->flags == flags)
|
||||||
count++;
|
count++;
|
||||||
if (!count || (op.num_handles && count % op.num_handles)) {
|
if (!count || (op.num_handles && count % op.num_handles)) {
|
||||||
pr_err("all or none of the handles should be from heap\n");
|
pr_err("all of the handles should be from system heap, of type RW,"
|
||||||
|
" not from VA and having same cache coherency\n");
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
goto free_hs;
|
goto free_hs;
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user