video: tegra: nvmap: Handle pages mapped using remap_pfn_range in alloc from VA

OpenRM maps the buffer with remap_pfn_range and then it's user VA is
passed to libnvrm_mem to create a handle out of it. NvMap uses
get_user_pages to get user pages from the VA. It fails for the buffer
mapped with remap_pfn_range. Hence use follow_pfn/follow_pfnmap_start
functions to obtain pfn from the VA and then obtain page pointer from
it.

Bug 5007238

Change-Id: I45a6ede6b5f164e537f014d9db3475b4c6d89307
Signed-off-by: Ketan Patil <ketanp@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/3298720
GVS: buildbot_gerritrpt <buildbot_gerritrpt@nvidia.com>
Reviewed-by: Lakshmanan Selvi Muthusamy <lm@nvidia.com>
Reviewed-by: Krishna Reddy <vdumpa@nvidia.com>
This commit is contained in:
Ketan Patil
2025-02-07 16:40:10 +00:00
committed by Jon Hunter
parent db0d9da92f
commit d256920b00

View File

@@ -351,6 +351,56 @@ static const unsigned int heap_policy_excl[] = {
0, 0,
}; };
static int nvmap_page_from_vma(struct vm_area_struct *vma, ulong vaddr, struct page **page)
{
#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 12, 0)
unsigned long pfn;
struct follow_pfnmap_args args = {
.vma = vma,
.address = vaddr,
};
if (follow_pfnmap_start(&args)) {
pr_err("follow_pfnmap_start failed\n");
goto fail;
}
pfn = args.pfn;
if (!pfn_is_map_memory(pfn)) {
follow_pfnmap_end(&args);
pr_err("pfn_is_map_memory failed\n");
goto fail;
}
*page = pfn_to_page(pfn);
get_page(*page);
follow_pfnmap_end(&args);
return 0;
fail:
return -EINVAL;
#else
unsigned long pfn;
if (follow_pfn(vma, vaddr, &pfn)) {
pr_err("follow_pfn failed\n");
goto fail;
}
if (!pfn_is_map_memory(pfn)) {
pr_err("no-map memory not allowed\n");
goto fail;
}
*page = pfn_to_page(pfn);
get_page(*page);
return 0;
fail:
return -EINVAL;
#endif
}
/* must be called with mmap_sem held for read or write */ /* must be called with mmap_sem held for read or write */
int nvmap_get_user_pages(ulong vaddr, int nvmap_get_user_pages(ulong vaddr,
size_t nr_page, struct page **pages, size_t nr_page, struct page **pages,
@@ -393,6 +443,26 @@ int nvmap_get_user_pages(ulong vaddr,
user_pages); user_pages);
while (--user_pages >= 0) while (--user_pages >= 0)
put_page(pages[user_pages]); put_page(pages[user_pages]);
/*
* OpenRM case: When buffer is mapped using remap_pfn_range
*/
if (vma->vm_flags & VM_PFNMAP) {
user_pages = 0;
while (user_pages < nr_page) {
ret = nvmap_page_from_vma(vma, vaddr, &pages[user_pages]);
if (ret)
break;
vaddr += PAGE_SIZE;
user_pages++;
}
if (ret) {
while (--user_pages >= 0)
put_page(pages[user_pages]);
}
}
} }
return ret; return ret;