Files
linux-nv-oot/drivers/video/tegra/nvmap/nvmap_fault.c
Laxman Dewangan 4cf8c80669 nvmap: Copy drivers and headers from kernel/nvidia
Copy the driver and header sources of the nvmap to
kernel/nvidia-oot from kernel/nvidia as part of
removing the dependency of kernel/nvidia for OOT drivers.

The latest (few) git history of the files copied are
	b7a355916 video: tegra: nvmap: Fix type casting issue
	2128c5433 video: tegra: nvmap: Fix type casting issues
	0cd082559 video: tegra: nvmap: Change peer vm id data type
	4bd7ece67 tegra: nvmap: mark ivm carveout pages occupied
	e86f3630a video: tegra: nvmap: Fix type casting issue
	c43a23e58 video: tegra: nvmap: Fix type casting issue
	ca1dda22e video: tegra: nvmap: Fix type casting issue
	1f567abfe video: tegra: nvmap: Fix wrap up condition
	29db4d31c video: tegra: nvmap: Remove unnecessary debugfs
	fe72f1413 video: tegra: nvmap: Remove get_drv_data() call
	3b0fc79e7 video: tegra: nvmap: Fix coverity defect
	3cc0ce41b video: tegra: nvmap: Fix coverity defect
	6da39e966 video: tegra: nvmap: Fix WARN_ON condition
	a16351ff1 video: tegra: nvmap: Remove dead code
	9993f2d2d video: tegra: nvmap: Update print level
	6066a2077 video: tegra: nvmap: Remove nvmap_debug_lru_allocations_show
	3cdf2b7ba video: tegra: nvmap: Add kernel version check
	716ded4fc video: tegra: nvmap: Initialize the return value
	9b6c1b4ab video: tegra: nvmap: Correct debugfs code
	33e70118b video: tegra: nvmap: Fix Cert-C error handling bug
	7b960ed79 video: tegra: nvmap: Fix Cert-C error handling bug
	945dc1471 video: tegra: nvmap: Fix Cert-C error handling bug
	31e572de2 video: tegra: nvmap: Fix Cert-C error handling bug
	1f25cbf68 video: tegra: nvmap: Fix Cert-C error handling bug
	fa5428107 video: tegra: nvmap: Remove nvmap_handle_get_from_fd
	df73f2208 video: tegra: nvmap: Protect kmap/kunmap code
	9842e7c6a video: tegra: nvmap: Remove t19x dma_buf map/unmap
	06dff1a8d video: tegra: nvmap: Remove unnecessary export symbols
	6f097f86b video: tegra: nvmap: Fix Cert-C error handling bug
	f14171608 video: tegra: nvmap: load nvmap for T23x compatible platforms
	266812814 video: tegra: nvmap: Get rid of NVMAP_CONFIG_KSTABLE_KERNEL
	1b38c0887 nvmap: Don't use NV_BUILD_KERNEL_OPTIONS
	0ab8dc032 video: tegra: nvmap: Reintroduce NVMAP_CONFIG_VPR_RESIZE
	cc8db9797 driver: platform: tegra: Separate out vpr code
	28955d95c video/tegra: nvmap: Enable build as OOT module
	876d1fbb8 video: tegra: nvmap: Remove IS_ENABLED check
	5ea30867a nvmap: Add support to build as module from OOT kernel
	a71ad020e video: tegra: nvmap: Protect tegra_vpr args under config
	e70061cc1 video: tegra: nvmap: Do not export cvnas_dev
	d2a26ff36 video: tegra: nvmap: Include missing header
	692e4f682 video: tegra: nvmap: Update page coloring algo
	2b9dbb911 video: tegra: nvmap: Check for return value
	de8de12b6 video: tegra: nvmap: Enable legacy init support
	65d478158 video: tegra: nvmap: Remove dependency of cvnas
	38bdd6f05 video: tegra: nvmap: Make nvmap as loadable module
	9668e410b video: tegra: nvmap: Enable handle as ID
	11c6cbd23 tegra: nvmap: Fix build for Linux v5.18
	fbd95c3ab linux: nvmap: change ivm_handle to u32
	eb1e2c302 video: tegra: nvmap: Fix NVSCIIPC support
	022689b29 tegra: nvmap: return error if handle as ID enabled but id is fd
	19e5106ed video: tegra: nvmap: Don't treat ivm as reserved mem carveouts

Bug 4038415

Change-Id: I7108aec3b8532fe79c9423c2835744b1213719e8
Signed-off-by: Laxman Dewangan <ldewangan@nvidia.com>
2023-04-11 05:47:21 +00:00

262 lines
7.0 KiB
C

/*
* drivers/video/tegra/nvmap/nvmap_fault.c
*
* Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <trace/events/nvmap.h>
#include <linux/highmem.h>
#include "nvmap_priv.h"
static void nvmap_vma_close(struct vm_area_struct *vma);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)
#define __atomic_add_unless atomic_fetch_add_unless
static vm_fault_t nvmap_vma_fault(struct vm_fault *vmf);
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
static int nvmap_vma_fault(struct vm_fault *vmf);
#else
static int nvmap_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
#endif
struct vm_operations_struct nvmap_vma_ops = {
.open = nvmap_vma_open,
.close = nvmap_vma_close,
.fault = nvmap_vma_fault,
};
int is_nvmap_vma(struct vm_area_struct *vma)
{
return vma->vm_ops == &nvmap_vma_ops;
}
/* to ensure that the backing store for the VMA isn't freed while a fork'd
* reference still exists, nvmap_vma_open increments the reference count on
* the handle, and nvmap_vma_close decrements it. alternatively, we could
* disallow copying of the vma, or behave like pmem and zap the pages. FIXME.
*/
void nvmap_vma_open(struct vm_area_struct *vma)
{
struct nvmap_vma_priv *priv;
struct nvmap_handle *h;
struct nvmap_vma_list *vma_list, *tmp;
struct list_head *tmp_head = NULL;
pid_t current_pid = task_tgid_nr(current);
bool vma_pos_found = false;
size_t nr_page, i;
int vma_open_count;
priv = vma->vm_private_data;
BUG_ON(!priv);
BUG_ON(!priv->handle);
h = priv->handle;
nvmap_umaps_inc(h);
mutex_lock(&h->lock);
vma_open_count = atomic_inc_return(&priv->count);
if (vma_open_count == 1 && h->heap_pgalloc) {
nr_page = h->size >> PAGE_SHIFT;
for (i = 0; i < nr_page; i++) {
struct page *page = nvmap_to_page(h->pgalloc.pages[i]);
/* This is necessry to avoid page being accounted
* under NR_FILE_MAPPED. This way NR_FILE_MAPPED would
* be fully accounted under NR_FILE_PAGES. This allows
* Android low mem killer detect low memory condition
* precisely.
* This has a side effect of inaccurate pss accounting
* for NvMap memory mapped into user space. Android
* procrank and NvMap Procrank both would have same
* issue. Subtracting NvMap_Procrank pss from
* procrank pss would give non-NvMap pss held by process
* and adding NvMap memory used by process represents
* entire memroy consumption by the process.
*/
atomic_inc(&page->_mapcount);
}
}
mutex_unlock(&h->lock);
vma_list = kmalloc(sizeof(*vma_list), GFP_KERNEL);
if (vma_list) {
mutex_lock(&h->lock);
tmp_head = &h->vmas;
/* insert vma into handle's vmas list in the increasing order of
* handle offsets
*/
list_for_each_entry(tmp, &h->vmas, list) {
/* if vma exists in list, just increment refcount */
if (tmp->vma == vma) {
atomic_inc(&tmp->ref);
kfree(vma_list);
goto unlock;
}
if (!vma_pos_found && (current_pid == tmp->pid)) {
if (vma->vm_pgoff < tmp->vma->vm_pgoff) {
tmp_head = &tmp->list;
vma_pos_found = true;
} else {
tmp_head = tmp->list.next;
}
}
}
vma_list->vma = vma;
vma_list->pid = current_pid;
vma_list->save_vm_flags = vma->vm_flags;
atomic_set(&vma_list->ref, 1);
list_add_tail(&vma_list->list, tmp_head);
unlock:
mutex_unlock(&h->lock);
} else {
WARN(1, "vma not tracked");
}
}
static void nvmap_vma_close(struct vm_area_struct *vma)
{
struct nvmap_vma_priv *priv = vma->vm_private_data;
struct nvmap_vma_list *vma_list;
struct nvmap_handle *h;
bool vma_found = false;
size_t nr_page, i;
if (!priv)
return;
BUG_ON(!priv->handle);
h = priv->handle;
nr_page = h->size >> PAGE_SHIFT;
mutex_lock(&h->lock);
list_for_each_entry(vma_list, &h->vmas, list) {
if (vma_list->vma != vma)
continue;
if (atomic_dec_return(&vma_list->ref) == 0) {
list_del(&vma_list->list);
kfree(vma_list);
}
vma_found = true;
break;
}
BUG_ON(!vma_found);
nvmap_umaps_dec(h);
if (__atomic_add_unless(&priv->count, -1, 0) == 1) {
if (h->heap_pgalloc) {
for (i = 0; i < nr_page; i++) {
struct page *page;
page = nvmap_to_page(h->pgalloc.pages[i]);
atomic_dec(&page->_mapcount);
}
}
mutex_unlock(&h->lock);
if (priv->handle)
nvmap_handle_put(priv->handle);
vma->vm_private_data = NULL;
kfree(priv);
} else {
mutex_unlock(&h->lock);
}
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)
static vm_fault_t nvmap_vma_fault(struct vm_fault *vmf)
#define vm_insert_pfn vmf_insert_pfn
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
static int nvmap_vma_fault(struct vm_fault *vmf)
#else
static int nvmap_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
#endif
{
struct page *page;
struct nvmap_vma_priv *priv;
unsigned long offs;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
struct vm_area_struct *vma = vmf->vma;
unsigned long vmf_address = vmf->address;
#else
void __user *vmf_address = vmf->virtual_address;
#endif
offs = (unsigned long)(vmf_address - vma->vm_start);
priv = vma->vm_private_data;
if (!priv || !priv->handle || !priv->handle->alloc)
return VM_FAULT_SIGBUS;
offs += priv->offs;
/* if the VMA was split for some reason, vm_pgoff will be the VMA's
* offset from the original VMA */
offs += (vma->vm_pgoff << PAGE_SHIFT);
if (offs >= priv->handle->size)
return VM_FAULT_SIGBUS;
if (!priv->handle->heap_pgalloc) {
unsigned long pfn;
BUG_ON(priv->handle->carveout->base & ~PAGE_MASK);
pfn = ((priv->handle->carveout->base + offs) >> PAGE_SHIFT);
if (!pfn_valid(pfn)) {
vm_insert_pfn(vma,
(unsigned long)vmf_address, pfn);
return VM_FAULT_NOPAGE;
}
/* CMA memory would get here */
page = pfn_to_page(pfn);
} else {
void *kaddr;
offs >>= PAGE_SHIFT;
if (atomic_read(&priv->handle->pgalloc.reserved))
return VM_FAULT_SIGBUS;
page = nvmap_to_page(priv->handle->pgalloc.pages[offs]);
if (PageAnon(page) && (vma->vm_flags & VM_SHARED))
return VM_FAULT_SIGSEGV;
if (!nvmap_handle_track_dirty(priv->handle))
goto finish;
mutex_lock(&priv->handle->lock);
if (nvmap_page_dirty(priv->handle->pgalloc.pages[offs])) {
mutex_unlock(&priv->handle->lock);
goto finish;
}
/* inner cache maint */
kaddr = kmap(page);
BUG_ON(!kaddr);
inner_cache_maint(NVMAP_CACHE_OP_WB_INV, kaddr, PAGE_SIZE);
kunmap(page);
if (priv->handle->flags & NVMAP_HANDLE_INNER_CACHEABLE)
goto make_dirty;
make_dirty:
nvmap_page_mkdirty(&priv->handle->pgalloc.pages[offs]);
atomic_inc(&priv->handle->pgalloc.ndirty);
mutex_unlock(&priv->handle->lock);
}
finish:
if (page)
get_page(page);
vmf->page = page;
return (page) ? 0 : VM_FAULT_SIGBUS;
}