video: tegra: nvmap: Add CBC carveout support

- Add CBC carveout suport in nvmap.
- Chunk size for CBC is 2MB, hence each bit from bitmap for CBC carveout
indicate 2MB physically contiguous chunk.
- In case of allocation from CBC, first try to allocate the entire chunk
in physically contiguous manner, if it's not possible then allocate in
chunks of 2MB. All page pointers to these chunks will be stored in
nvmap_handle struct.
- Modify all other operations like vmap, kmap, mmap as per the above
restrictions.

Bug 3956637

Change-Id: I7c304b0127c8fef028e135a4662ab3ad3dc1d1f6
Signed-off-by: Ketan Patil <ketanp@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvidia/+/2880662
Reviewed-by: Krishna Reddy <vdumpa@nvidia.com>
Reviewed-by: Sachin Nikam <snikam@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/2885806
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com>
Reviewed-by: Laxman Dewangan <ldewangan@nvidia.com>
GVS: Gerrit_Virtual_Submit <buildbot_gerritrpt@nvidia.com>
Tested-by: Laxman Dewangan <ldewangan@nvidia.com>
This commit is contained in:
Ketan Patil
2023-03-13 10:26:51 +00:00
committed by mobile promotions
parent 7e15a9bb58
commit 6937db210f
10 changed files with 303 additions and 108 deletions

View File

@@ -1,7 +1,7 @@
/*
* drivers/video/tegra/nvmap/nvmap_fault.c
*
* Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2011-2023, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -207,8 +207,9 @@ static int nvmap_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (offs >= priv->handle->size)
return VM_FAULT_SIGBUS;
if (!priv->handle->heap_pgalloc) {
if (!priv->handle->pgalloc.pages) {
unsigned long pfn;
BUG_ON(priv->handle->carveout->base & ~PAGE_MASK);
pfn = ((priv->handle->carveout->base + offs) >> PAGE_SHIFT);
if (!pfn_valid(pfn)) {
@@ -220,38 +221,51 @@ static int nvmap_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
page = pfn_to_page(pfn);
} else {
void *kaddr;
unsigned long pfn;
offs >>= PAGE_SHIFT;
if (atomic_read(&priv->handle->pgalloc.reserved))
return VM_FAULT_SIGBUS;
page = nvmap_to_page(priv->handle->pgalloc.pages[offs]);
if (priv->handle->heap_type != NVMAP_HEAP_IOVMM) {
offs >>= PAGE_SHIFT;
page = priv->handle->pgalloc.pages[offs];
pfn = page_to_pfn(page);
if (!pfn_valid(pfn)) {
vm_insert_pfn(vma,
(unsigned long)vmf_address, pfn);
return VM_FAULT_NOPAGE;
}
} else {
offs >>= PAGE_SHIFT;
if (atomic_read(&priv->handle->pgalloc.reserved))
return VM_FAULT_SIGBUS;
page = nvmap_to_page(priv->handle->pgalloc.pages[offs]);
if (PageAnon(page) && (vma->vm_flags & VM_SHARED))
return VM_FAULT_SIGSEGV;
if (PageAnon(page)) {
if (vma->vm_flags & VM_SHARED)
return VM_FAULT_SIGSEGV;
}
if (!nvmap_handle_track_dirty(priv->handle))
goto finish;
mutex_lock(&priv->handle->lock);
if (nvmap_page_dirty(priv->handle->pgalloc.pages[offs])) {
mutex_unlock(&priv->handle->lock);
goto finish;
}
if (!nvmap_handle_track_dirty(priv->handle))
goto finish;
mutex_lock(&priv->handle->lock);
if (nvmap_page_dirty(priv->handle->pgalloc.pages[offs])) {
mutex_unlock(&priv->handle->lock);
goto finish;
}
/* inner cache maint */
kaddr = kmap(page);
BUG_ON(!kaddr);
inner_cache_maint(NVMAP_CACHE_OP_WB_INV, kaddr, PAGE_SIZE);
kunmap(page);
/* inner cache maint */
kaddr = kmap(page);
BUG_ON(!kaddr);
inner_cache_maint(NVMAP_CACHE_OP_WB_INV, kaddr, PAGE_SIZE);
kunmap(page);
if (priv->handle->flags & NVMAP_HANDLE_INNER_CACHEABLE)
goto make_dirty;
if (priv->handle->flags & NVMAP_HANDLE_INNER_CACHEABLE)
goto make_dirty;
make_dirty:
nvmap_page_mkdirty(&priv->handle->pgalloc.pages[offs]);
atomic_inc(&priv->handle->pgalloc.ndirty);
mutex_unlock(&priv->handle->lock);
nvmap_page_mkdirty(&priv->handle->pgalloc.pages[offs]);
atomic_inc(&priv->handle->pgalloc.ndirty);
mutex_unlock(&priv->handle->lock);
}
}
finish:
if (page)
get_page(page);