video: tegra: nvmap: Refactor nvmap_dev unit

- Add nvmap_dev.h file which include declaration for functions and data
structs which are exposed by nvmap_dev unit to other units.
- Also, add nvmap_dev_int.h file which include declaration for
functions which are internal to nvmap_dev unit that can be called by
files within nvmap_dev unit.
- Move definition of nvmap_handle_get_from_id, nvmap_install_fd,
find_range_of_handles to nvmap_handle.c as they belong to nvmap_handle
unit.
- Cleanup nvmap_priv.h by moving all relevant items for nvmap_dev unit
to nvmap_dev unit.
- Remove nvmap_mm.c file as nvmap_zap_handle is the only function
present; move it to nvmap_cache.c where it's being called.
- Remove function declarations whose definition are not present.

JIRA TMM-5694
JIRA TMM-5730

Change-Id: Ifd45235076da2ef0c628f3179d828c0ccadf6df2
Signed-off-by: Ketan Patil <ketanp@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/3223994
Reviewed-by: Pritesh Raithatha <praithatha@nvidia.com>
GVS: buildbot_gerritrpt <buildbot_gerritrpt@nvidia.com>
This commit is contained in:
Ketan Patil
2024-10-04 11:07:09 +00:00
committed by Jon Hunter
parent da4fcf255a
commit f384316969
22 changed files with 340 additions and 357 deletions

View File

@@ -26,7 +26,6 @@ nvmap-y := nvmap_core.o \
nvmap_ioctl.o \
nvmap_init.o \
nvmap_tag.o \
nvmap_mm.o \
nvmap_stats.o \
nvmap_carveout.o \
nvmap_kasan_wrapper.o

View File

@@ -16,6 +16,7 @@
#include <linux/libnvdimm.h>
#include "nvmap_priv.h"
#include "nvmap_dev.h"
#include "nvmap_alloc.h"
#include "nvmap_alloc_int.h"
#include "nvmap_dmabuf.h"

View File

@@ -19,6 +19,7 @@ __weak struct arm64_ftr_reg arm64_ftr_reg_ctrel0;
#include <trace/events/nvmap.h>
#include "nvmap_priv.h"
#include "nvmap_dev.h"
#include "nvmap_alloc.h"
#include "nvmap_alloc_int.h"
#include "nvmap_handle.h"
@@ -75,7 +76,6 @@ static void heap_page_cache_maint(
* will result in a fault and can be marked dirty
*/
nvmap_handle_mkclean(h, start, end-start);
nvmap_zap_handle(h, start, end - start);
}
if (inner) {

View File

@@ -10,6 +10,7 @@
#include <soc/tegra/fuse-helper.h>
#include "nvmap_priv.h"
#include "nvmap_dev.h"
#include "nvmap_alloc.h"
#include "nvmap_alloc_int.h"
#include "nvmap_handle.h"

View File

@@ -23,6 +23,7 @@
#include <trace/events/nvmap.h>
#include <linux/libnvdimm.h>
#include "nvmap_dev.h"
#include "nvmap_priv.h"
#include "nvmap_alloc.h"
#include "nvmap_handle.h"

View File

@@ -43,10 +43,11 @@
#include <trace/events/nvmap.h>
#include "nvmap_priv.h"
#include "nvmap_dev.h"
#include "nvmap_alloc.h"
#include "nvmap_ioctl.h"
#include "nvmap_dmabuf.h"
#include "nvmap_handle.h"
#include "nvmap_dev_int.h"
#include <linux/pagewalk.h>
#define NVMAP_CARVEOUT_KILLER_RETRY_TIME 100 /* msecs */
@@ -169,7 +170,7 @@ static void nvmap_pid_put_locked(struct nvmap_device *dev, pid_t pid)
kref_put(&p->refcount, nvmap_pid_release_locked);
}
struct nvmap_client *__nvmap_create_client(struct nvmap_device *dev,
static struct nvmap_client *__nvmap_create_client(struct nvmap_device *dev,
const char *name)
{
struct nvmap_client *client;

View File

@@ -0,0 +1,81 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/* SPDX-FileCopyrightText: Copyright (c) 2009-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
#ifndef __NVMAP_DEV_H
#define __NVMAP_DEV_H
#define NVMAP_HEAP_IOVMM (1ul<<30)
/* common carveout heaps */
#define NVMAP_HEAP_CARVEOUT_VPR (1ul<<28)
#define NVMAP_HEAP_CARVEOUT_TSEC (1ul<<27)
#define NVMAP_HEAP_CARVEOUT_VIDMEM (1ul<<26)
#define NVMAP_HEAP_CARVEOUT_GPU (1ul << 3)
#define NVMAP_HEAP_CARVEOUT_FSI (1ul<<2)
#define NVMAP_HEAP_CARVEOUT_IVM (1ul<<1)
#define NVMAP_HEAP_CARVEOUT_GENERIC (1ul<<0)
#define NVMAP_HEAP_CARVEOUT_MASK (NVMAP_HEAP_IOVMM - 1)
/* allocation flags */
#define NVMAP_HANDLE_UNCACHEABLE (0x0ul << 0)
#define NVMAP_HANDLE_WRITE_COMBINE (0x1ul << 0)
#define NVMAP_HANDLE_INNER_CACHEABLE (0x2ul << 0)
#define NVMAP_HANDLE_CACHEABLE (0x3ul << 0)
#define NVMAP_HANDLE_CACHE_FLAG (0x3ul << 0)
#define NVMAP_HANDLE_SECURE (0x1ul << 2)
#define NVMAP_HANDLE_KIND_SPECIFIED (0x1ul << 3)
#define NVMAP_HANDLE_COMPR_SPECIFIED (0x1ul << 4)
#define NVMAP_HANDLE_ZEROED_PAGES (0x1ul << 5)
#define NVMAP_HANDLE_PHYS_CONTIG (0x1ul << 6)
#define NVMAP_HANDLE_CACHE_SYNC (0x1ul << 7)
#define NVMAP_HANDLE_CACHE_SYNC_AT_RESERVE (0x1ul << 8)
#define NVMAP_HANDLE_RO (0x1ul << 9)
/*
* A heap can be mapped to memory other than DRAM.
* The HW, controls the memory, can be power gated/ungated
* based upon the clients using the memory.
* if no client/alloc happens from the memory, the HW needs
* to be power gated. Similarly it should power ungated if
* alloc happens from the memory.
* int (*busy)(void) - trigger runtime power ungate
* int (*idle)(void) - trigger runtime power gate
*/
struct nvmap_pm_ops {
int (*busy)(void);
int (*idle)(void);
};
struct nvmap_platform_carveout {
const char *name;
unsigned int usage_mask;
phys_addr_t base;
size_t size;
struct device *cma_dev;
bool resize;
struct device *dma_dev;
struct device dev;
bool is_ivm;
unsigned int peer;
unsigned int vmid;
int can_alloc;
bool enable_static_dma_map;
bool disable_dynamic_dma_map;
bool no_cpu_access; /* carveout can't be accessed from cpu at all */
bool init_done; /* FIXME: remove once all caveouts use reserved-memory */
struct nvmap_pm_ops pm_ops;
int numa_node_id; /* NUMA node id from which the carveout is allocated from */
};
struct nvmap_platform_data {
const struct nvmap_platform_carveout *carveouts;
unsigned int nr_carveouts;
};
bool is_nvmap_memory_available(size_t size, uint32_t heap, int numa_nid);
void kasan_memcpy_toio(void __iomem *to, const void *from,
size_t count);
#endif /* __NVMAP_DEV_H */

View File

@@ -1,18 +1,14 @@
/* SPDX-License-Identifier: GPL-2.0-only
* SPDX-FileCopyrightText: Copyright (c) 2010-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* ioctl declarations for nvmap
*/
/* SPDX-License-Identifier: GPL-2.0-only */
/* SPDX-FileCopyrightText: Copyright (c) 2010-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
#ifndef __VIDEO_TEGRA_NVMAP_IOCTL_H
#define __VIDEO_TEGRA_NVMAP_IOCTL_H
#ifndef __NVMAP_DEV_INT_H
#define __NVMAP_DEV_INT_H
#include <linux/nvmap.h>
int nvmap_probe(struct platform_device *pdev);
int nvmap_ioctl_pinop(struct file *filp, bool is_pin, void __user *arg,
bool is32);
int nvmap_remove(struct platform_device *pdev);
int nvmap_ioctl_getid(struct file *filp, void __user *arg);
int nvmap_init(struct platform_device *pdev);
int nvmap_ioctl_get_ivcid(struct file *filp, void __user *arg);
@@ -20,8 +16,6 @@ int nvmap_ioctl_getfd(struct file *filp, void __user *arg);
int nvmap_ioctl_alloc(struct file *filp, void __user *arg);
int nvmap_ioctl_alloc_kind(struct file *filp, void __user *arg);
int nvmap_ioctl_alloc_ivm(struct file *filp, void __user *arg);
int nvmap_ioctl_free(struct file *filp, unsigned long arg);
@@ -34,8 +28,6 @@ int nvmap_ioctl_create_from_ivc(struct file *filp, void __user *arg);
int nvmap_ioctl_get_ivc_heap(struct file *filp, void __user *arg);
int nvmap_map_into_caller_ptr(struct file *filp, void __user *arg, bool is32);
int nvmap_ioctl_cache_maint(struct file *filp, void __user *arg, int size);
int nvmap_ioctl_rw_handle(struct file *filp, int is_read, void __user *arg,
@@ -47,9 +39,6 @@ int nvmap_ioctl_set_tag_label(struct file *filp, void __user *arg);
int nvmap_ioctl_get_available_heaps(struct file *filp, void __user *arg);
void kasan_memcpy_toio(void __iomem *to, const void *from,
size_t count);
int nvmap_ioctl_get_handle_parameters(struct file *filp, void __user *arg);
int nvmap_ioctl_get_sci_ipc_id(struct file *filp, void __user *arg);
@@ -63,4 +52,5 @@ int nvmap_ioctl_query_heap_params_numa(struct file *filp, void __user *arg);
int nvmap_ioctl_dup_handle(struct file *filp, void __user *arg);
int nvmap_ioctl_get_fd_from_list(struct file *filp, void __user *arg);
#endif /* __VIDEO_TEGRA_NVMAP_IOCTL_H */
#endif /* __NVMAP_DEV_INT_H */

View File

@@ -31,7 +31,7 @@
#include <trace/events/nvmap.h>
#include "nvmap_priv.h"
#include "nvmap_ioctl.h"
#include "nvmap_dev.h"
#include "nvmap_alloc.h"
#include "nvmap_dmabuf.h"
#include "nvmap_handle.h"

View File

@@ -9,6 +9,7 @@
#include <linux/highmem.h>
#include "nvmap_priv.h"
#include "nvmap_dev.h"
#include "nvmap_alloc.h"
#include "nvmap_handle.h"

View File

@@ -26,7 +26,7 @@
#include <trace/events/nvmap.h>
#include "nvmap_priv.h"
#include "nvmap_ioctl.h"
#include "nvmap_dev.h"
#include "nvmap_alloc.h"
#include "nvmap_dmabuf.h"
#include "nvmap_handle.h"
@@ -34,6 +34,130 @@
u32 nvmap_max_handle_count;
struct nvmap_handle *nvmap_handle_get_from_id(struct nvmap_client *client,
u32 id)
{
struct nvmap_handle *handle = ERR_PTR(-EINVAL);
struct nvmap_handle_info *info;
struct dma_buf *dmabuf;
if (WARN_ON(!client))
return ERR_PTR(-EINVAL);
if (client->ida) {
dmabuf = dma_buf_get((int)id);
/*
* id is dmabuf fd created from foreign dmabuf
* but handle as ID is enabled, hence it doesn't belong
* to nvmap_handle, bail out early.
*/
if (!IS_ERR_OR_NULL(dmabuf)) {
dma_buf_put(dmabuf);
return NULL;
}
dmabuf = nvmap_id_array_get_dmabuf_from_id(client->ida, id);
} else {
dmabuf = dma_buf_get((int)id);
}
if (IS_ERR_OR_NULL(dmabuf))
return ERR_CAST(dmabuf);
if (dmabuf_is_nvmap(dmabuf)) {
info = dmabuf->priv;
handle = info->handle;
if (!nvmap_handle_get(handle))
handle = ERR_PTR(-EINVAL);
}
dma_buf_put(dmabuf);
if (!IS_ERR(handle))
return handle;
return NULL;
}
int nvmap_install_fd(struct nvmap_client *client,
struct nvmap_handle *handle, int fd, void __user *arg,
void *op, size_t op_size, bool free, struct dma_buf *dmabuf)
{
int err = 0;
struct nvmap_handle_info *info;
if (!dmabuf) {
err = -EFAULT;
goto dmabuf_fail;
}
info = dmabuf->priv;
if (IS_ERR_VALUE((uintptr_t)fd)) {
err = fd;
goto fd_fail;
}
if (copy_to_user(arg, op, op_size)) {
err = -EFAULT;
goto copy_fail;
}
fd_install(fd, dmabuf->file);
return err;
copy_fail:
put_unused_fd(fd);
fd_fail:
if (dmabuf)
dma_buf_put(dmabuf);
if (free && handle)
nvmap_free_handle(client, handle, info->is_ro);
dmabuf_fail:
return err;
}
int find_range_of_handles(struct nvmap_handle **hs, u32 nr,
struct handles_range *hrange)
{
u64 tot_sz = 0, rem_sz = 0;
u64 offs = hrange->offs;
u32 start = 0, end = 0;
u64 sz = hrange->sz;
u32 i;
hrange->offs_start = offs;
/* Find start handle */
for (i = 0; i < nr; i++) {
tot_sz += hs[i]->size;
if (offs > tot_sz) {
hrange->offs_start -= tot_sz;
continue;
} else {
rem_sz = tot_sz - offs;
start = i;
/* Check size in current handle */
if (rem_sz >= sz) {
end = i;
hrange->start = start;
hrange->end = end;
return 0;
}
/* Though start found but end lies in further handles */
i++;
break;
}
}
/* find end handle number */
for (; i < nr; i++) {
rem_sz += hs[i]->size;
if (rem_sz >= sz) {
end = i;
hrange->start = start;
hrange->end = end;
return 0;
}
}
return -1;
}
static inline void nvmap_lru_add(struct nvmap_handle *h)
{
spin_lock(&nvmap_dev->lru_lock);

View File

@@ -239,6 +239,13 @@ struct nvmap_handle_ref *nvmap_create_handle_from_id(
struct nvmap_handle_ref *nvmap_create_handle_from_fd(
struct nvmap_client *client, int fd);
int nvmap_install_fd(struct nvmap_client *client,
struct nvmap_handle *handle, int fd, void __user *arg,
void *op, size_t op_size, bool free, struct dma_buf *dmabuf);
int find_range_of_handles(struct nvmap_handle **hs, u32 nr,
struct handles_range *hrange);
void nvmap_free_handle(struct nvmap_client *c, struct nvmap_handle *h, bool is_ro);
void nvmap_free_handle_from_fd(struct nvmap_client *c, int fd);

View File

@@ -26,6 +26,7 @@
#include <linux/dma-mapping.h>
#include <linux/dma-map-ops.h>
#include "nvmap_priv.h"
#include "nvmap_dev.h"
#include "nvmap_alloc.h"
#include "nvmap_alloc_int.h"
#include "nvmap_handle.h"

View File

@@ -6,6 +6,7 @@
#include <linux/xarray.h>
#include <linux/dma-buf.h>
#include "nvmap_priv.h"
#include "nvmap_dev.h"
#include "nvmap_handle.h"
/*

View File

@@ -23,7 +23,9 @@
#include "include/linux/nvmap_exports.h"
#include "nvmap_priv.h"
#include "nvmap_dev.h"
#include "nvmap_alloc.h"
#include "nvmap_dev_int.h"
#ifdef CONFIG_TEGRA_VIRTUALIZATION
#include <soc/tegra/virt/hv-ivc.h>
@@ -356,7 +358,7 @@ static const struct reserved_mem_ops nvmap_co_ops = {
.device_release = nvmap_co_device_release,
};
int __init nvmap_co_setup(struct reserved_mem *rmem)
static int __init nvmap_co_setup(struct reserved_mem *rmem)
{
struct nvmap_platform_carveout *co;
ulong start = sched_clock();
@@ -507,5 +509,7 @@ static void __exit nvmap_exit_driver(void)
module_exit(nvmap_exit_driver);
MODULE_IMPORT_NS(DMA_BUF);
MODULE_DESCRIPTION("NvMap: Nvidia Tegra Memory Management Driver");
MODULE_AUTHOR("Puneet Saxena <puneets@nvidia.com>");
MODULE_AUTHOR("Ketan Patil <ketanp@nvidia.com>");
MODULE_AUTHOR("Ashish Mhetre <amhetre@nvidia.com>");
MODULE_AUTHOR("Pritesh Raithatha <praithatha@nvidia.com>");
MODULE_LICENSE("GPL v2");

View File

@@ -32,11 +32,12 @@
#include <linux/nvsciipc_interface.h>
#endif
#include "nvmap_ioctl.h"
#include "nvmap_dev.h"
#include "nvmap_priv.h"
#include "nvmap_alloc.h"
#include "nvmap_dmabuf.h"
#include "nvmap_handle.h"
#include "nvmap_dev_int.h"
#include <linux/syscalls.h>
#include <linux/nodemask.h>
@@ -50,92 +51,6 @@ MODULE_IMPORT_NS(VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver);
extern bool vpr_cpu_access;
static ssize_t rw_handle(struct nvmap_client *client, struct nvmap_handle *h,
int is_read, unsigned long h_offs,
unsigned long sys_addr, unsigned long h_stride,
unsigned long sys_stride, unsigned long elem_size,
unsigned long count);
struct nvmap_handle *nvmap_handle_get_from_id(struct nvmap_client *client,
u32 id)
{
struct nvmap_handle *handle = ERR_PTR(-EINVAL);
struct nvmap_handle_info *info;
struct dma_buf *dmabuf;
if (WARN_ON(!client))
return ERR_PTR(-EINVAL);
if (client->ida) {
dmabuf = dma_buf_get((int)id);
/*
* id is dmabuf fd created from foreign dmabuf
* but handle as ID is enabled, hence it doesn't belong
* to nvmap_handle, bail out early.
*/
if (!IS_ERR_OR_NULL(dmabuf)) {
dma_buf_put(dmabuf);
return NULL;
}
dmabuf = nvmap_id_array_get_dmabuf_from_id(client->ida, id);
} else {
dmabuf = dma_buf_get((int)id);
}
if (IS_ERR_OR_NULL(dmabuf))
return ERR_CAST(dmabuf);
if (dmabuf_is_nvmap(dmabuf)) {
info = dmabuf->priv;
handle = info->handle;
if (!nvmap_handle_get(handle))
handle = ERR_PTR(-EINVAL);
}
dma_buf_put(dmabuf);
if (!IS_ERR(handle))
return handle;
return NULL;
}
static int nvmap_install_fd(struct nvmap_client *client,
struct nvmap_handle *handle, int fd, void __user *arg,
void *op, size_t op_size, bool free, struct dma_buf *dmabuf)
{
int err = 0;
struct nvmap_handle_info *info;
if (!dmabuf) {
err = -EFAULT;
goto dmabuf_fail;
}
info = dmabuf->priv;
if (IS_ERR_VALUE((uintptr_t)fd)) {
err = fd;
goto fd_fail;
}
if (copy_to_user(arg, op, op_size)) {
err = -EFAULT;
goto copy_fail;
}
fd_install(fd, dmabuf->file);
return err;
copy_fail:
put_unused_fd(fd);
fd_fail:
if (dmabuf)
dma_buf_put(dmabuf);
if (free && handle)
nvmap_free_handle(client, handle, info->is_ro);
dmabuf_fail:
return err;
}
int nvmap_ioctl_getfd(struct file *filp, void __user *arg)
{
struct nvmap_handle *handle = NULL;
@@ -505,6 +420,101 @@ out:
return err;
}
static ssize_t rw_handle(struct nvmap_client *client, struct nvmap_handle *h,
int is_read, unsigned long h_offs,
unsigned long sys_addr, unsigned long h_stride,
unsigned long sys_stride, unsigned long elem_size,
unsigned long count)
{
ssize_t copied = 0;
void *tmp = NULL;
void *addr;
int ret = 0;
if ((h->heap_type & nvmap_dev->cpu_access_mask) == 0)
return -EPERM;
if (elem_size == 0 || count == 0)
return -EINVAL;
if (!h->alloc)
return -EFAULT;
if (elem_size == h_stride && elem_size == sys_stride && (h_offs % 8 == 0)) {
elem_size *= count;
h_stride = elem_size;
sys_stride = elem_size;
count = 1;
}
if (elem_size > h->size ||
h_offs >= h->size ||
elem_size > sys_stride ||
elem_size > h_stride ||
sys_stride > (h->size - h_offs) / count ||
h_offs + h_stride * (count - 1) + elem_size > h->size)
return -EINVAL;
if (h->vaddr == NULL) {
if (!__nvmap_mmap(h))
return -ENOMEM;
__nvmap_munmap(h, h->vaddr);
}
addr = h->vaddr + h_offs;
/* Allocate buffer to cache data for VPR write */
if (!is_read && h->heap_type == NVMAP_HEAP_CARVEOUT_VPR) {
tmp = vmalloc(elem_size);
if (!tmp)
return -ENOMEM;
}
while (count--) {
if (h_offs + elem_size > h->size) {
pr_warn("read/write outside of handle\n");
ret = -EFAULT;
break;
}
if (is_read &&
!(h->userflags & NVMAP_HANDLE_CACHE_SYNC_AT_RESERVE))
__nvmap_do_cache_maint(client, h, h_offs,
h_offs + elem_size, NVMAP_CACHE_OP_INV, false);
if (is_read)
ret = copy_to_user((void __user *)sys_addr, addr, elem_size);
else {
if (h->heap_type == NVMAP_HEAP_CARVEOUT_VPR) {
ret = copy_from_user(tmp, (void __user *)sys_addr,
elem_size);
if (ret == 0)
kasan_memcpy_toio((void __iomem *)addr, tmp, elem_size);
} else
ret = copy_from_user(addr, (void __user *)sys_addr, elem_size);
}
if (ret)
break;
if (!is_read &&
!(h->userflags & NVMAP_HANDLE_CACHE_SYNC_AT_RESERVE))
__nvmap_do_cache_maint(client, h, h_offs,
h_offs + elem_size, NVMAP_CACHE_OP_WB_INV,
false);
copied += elem_size;
sys_addr += sys_stride;
h_offs += h_stride;
addr += h_stride;
}
/* Release the buffer used for VPR write */
if (!is_read && h->heap_type == NVMAP_HEAP_CARVEOUT_VPR && tmp)
vfree(tmp);
return ret ?: copied;
}
int nvmap_ioctl_rw_handle(struct file *filp, int is_read, void __user *arg,
size_t op_size)
{
@@ -631,101 +641,6 @@ close_fd:
return SYS_CLOSE(arg);
}
static ssize_t rw_handle(struct nvmap_client *client, struct nvmap_handle *h,
int is_read, unsigned long h_offs,
unsigned long sys_addr, unsigned long h_stride,
unsigned long sys_stride, unsigned long elem_size,
unsigned long count)
{
ssize_t copied = 0;
void *tmp = NULL;
void *addr;
int ret = 0;
if ((h->heap_type & nvmap_dev->cpu_access_mask) == 0)
return -EPERM;
if (elem_size == 0 || count == 0)
return -EINVAL;
if (!h->alloc)
return -EFAULT;
if (elem_size == h_stride && elem_size == sys_stride && (h_offs % 8 == 0)) {
elem_size *= count;
h_stride = elem_size;
sys_stride = elem_size;
count = 1;
}
if (elem_size > h->size ||
h_offs >= h->size ||
elem_size > sys_stride ||
elem_size > h_stride ||
sys_stride > (h->size - h_offs) / count ||
h_offs + h_stride * (count - 1) + elem_size > h->size)
return -EINVAL;
if (h->vaddr == NULL) {
if (!__nvmap_mmap(h))
return -ENOMEM;
__nvmap_munmap(h, h->vaddr);
}
addr = h->vaddr + h_offs;
/* Allocate buffer to cache data for VPR write */
if (!is_read && h->heap_type == NVMAP_HEAP_CARVEOUT_VPR) {
tmp = vmalloc(elem_size);
if (!tmp)
return -ENOMEM;
}
while (count--) {
if (h_offs + elem_size > h->size) {
pr_warn("read/write outside of handle\n");
ret = -EFAULT;
break;
}
if (is_read &&
!(h->userflags & NVMAP_HANDLE_CACHE_SYNC_AT_RESERVE))
__nvmap_do_cache_maint(client, h, h_offs,
h_offs + elem_size, NVMAP_CACHE_OP_INV, false);
if (is_read)
ret = copy_to_user((void __user *)sys_addr, addr, elem_size);
else {
if (h->heap_type == NVMAP_HEAP_CARVEOUT_VPR) {
ret = copy_from_user(tmp, (void __user *)sys_addr,
elem_size);
if (ret == 0)
kasan_memcpy_toio((void __iomem *)addr, tmp, elem_size);
} else
ret = copy_from_user(addr, (void __user *)sys_addr, elem_size);
}
if (ret)
break;
if (!is_read &&
!(h->userflags & NVMAP_HANDLE_CACHE_SYNC_AT_RESERVE))
__nvmap_do_cache_maint(client, h, h_offs,
h_offs + elem_size, NVMAP_CACHE_OP_WB_INV,
false);
copied += elem_size;
sys_addr += sys_stride;
h_offs += h_stride;
addr += h_stride;
}
/* Release the buffer used for VPR write */
if (!is_read && h->heap_type == NVMAP_HEAP_CARVEOUT_VPR && tmp)
vfree(tmp);
return ret ?: copied;
}
int nvmap_ioctl_get_ivcid(struct file *filp, void __user *arg)
{
struct nvmap_client *client = filp->private_data;
@@ -1293,50 +1208,6 @@ out:
return ret;
}
static int find_range_of_handles(struct nvmap_handle **hs, u32 nr,
struct handles_range *hrange)
{
u64 tot_sz = 0, rem_sz = 0;
u64 offs = hrange->offs;
u32 start = 0, end = 0;
u64 sz = hrange->sz;
u32 i;
hrange->offs_start = offs;
/* Find start handle */
for (i = 0; i < nr; i++) {
tot_sz += hs[i]->size;
if (offs > tot_sz) {
hrange->offs_start -= tot_sz;
continue;
} else {
rem_sz = tot_sz - offs;
start = i;
/* Check size in current handle */
if (rem_sz >= sz) {
end = i;
hrange->start = start;
hrange->end = end;
return 0;
}
/* Though start found but end lies in further handles */
i++;
break;
}
}
/* find end handle number */
for (; i < nr; i++) {
rem_sz += hs[i]->size;
if (rem_sz >= sz) {
end = i;
hrange->start = start;
hrange->end = end;
return 0;
}
}
return -1;
}
int nvmap_ioctl_get_fd_from_list(struct file *filp, void __user *arg)
{
struct nvmap_client *client = filp->private_data;

View File

@@ -8,7 +8,8 @@
#include <linux/export.h>
#include <linux/types.h>
#include <linux/io.h>
#include "nvmap_ioctl.h"
#include <linux/device.h>
#include "nvmap_dev.h"
void kasan_memcpy_toio(void __iomem *to,
const void *from, size_t count)

View File

@@ -1,18 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* SPDX-FileCopyrightText: Copyright (c) 2013-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Some MM related functionality specific to nvmap.
*/
#include <trace/events/nvmap.h>
#include <linux/version.h>
#include <asm/pgtable.h>
#include "nvmap_priv.h"
void nvmap_zap_handle(struct nvmap_handle *handle, u64 offset, u64 size)
{
pr_debug("%s is not supported!\n", __func__);
}

View File

@@ -24,6 +24,7 @@
#include <trace/events/nvmap.h>
#include "nvmap_priv.h"
#include "nvmap_dev.h"
#include "nvmap_alloc.h"
#include "nvmap_alloc_int.h"

View File

@@ -235,12 +235,6 @@ static inline void nvmap_release_mmap_read_lock(struct mm_struct *mm)
up_read(&mm->mmap_lock);
}
int nvmap_probe(struct platform_device *pdev);
int nvmap_remove(struct platform_device *pdev);
int nvmap_init(struct platform_device *pdev);
int nvmap_co_setup(struct reserved_mem *rmem);
struct nvmap_carveout_node;
struct nvmap_handle *nvmap_handle_get(struct nvmap_handle *h);
@@ -258,8 +252,6 @@ void __nvmap_free_sg_table(struct nvmap_client *client,
struct nvmap_handle *h, struct sg_table *sgt);
void *__nvmap_mmap(struct nvmap_handle *h);
void __nvmap_munmap(struct nvmap_handle *h, void *addr);
struct nvmap_client *__nvmap_create_client(struct nvmap_device *dev,
const char *name);
static inline bool nvmap_page_dirty(struct page *page)
{
@@ -282,8 +274,6 @@ static inline bool nvmap_page_mkclean(struct page **page)
return true;
}
void nvmap_zap_handle(struct nvmap_handle *handle, u64 offset, u64 size);
void nvmap_vma_open(struct vm_area_struct *vma);
struct nvmap_tag_entry *nvmap_search_tag_entry(struct rb_root *root, u32 tag);
@@ -308,7 +298,6 @@ static inline pid_t nvmap_client_pid(struct nvmap_client *client)
void *nvmap_dmabuf_get_drv_data(struct dma_buf *dmabuf,
struct device *dev);
bool is_nvmap_memory_available(size_t size, uint32_t heap, int numa_nid);
#ifdef NVMAP_CONFIG_DEBUG_MAPS
struct nvmap_device_list *nvmap_is_device_present(char *device_name, u32 heap_type);

View File

@@ -21,6 +21,7 @@
#include <trace/events/nvmap.h>
#include "nvmap_priv.h"
#include "nvmap_dev.h"
#include "nvmap_dmabuf.h"
#include "nvmap_handle.h"
#include "nvmap_handle_int.h"

View File

@@ -14,81 +14,7 @@
#include <linux/version.h>
#include <uapi/linux/nvmap.h>
#define NVMAP_HEAP_IOVMM (1ul<<30)
/* common carveout heaps */
#define NVMAP_HEAP_CARVEOUT_VPR (1ul<<28)
#define NVMAP_HEAP_CARVEOUT_TSEC (1ul<<27)
#define NVMAP_HEAP_CARVEOUT_VIDMEM (1ul<<26)
#define NVMAP_HEAP_CARVEOUT_GPU (1ul << 3)
#define NVMAP_HEAP_CARVEOUT_FSI (1ul<<2)
#define NVMAP_HEAP_CARVEOUT_IVM (1ul<<1)
#define NVMAP_HEAP_CARVEOUT_GENERIC (1ul<<0)
#define NVMAP_HEAP_CARVEOUT_MASK (NVMAP_HEAP_IOVMM - 1)
/* allocation flags */
#define NVMAP_HANDLE_UNCACHEABLE (0x0ul << 0)
#define NVMAP_HANDLE_WRITE_COMBINE (0x1ul << 0)
#define NVMAP_HANDLE_INNER_CACHEABLE (0x2ul << 0)
#define NVMAP_HANDLE_CACHEABLE (0x3ul << 0)
#define NVMAP_HANDLE_CACHE_FLAG (0x3ul << 0)
#define NVMAP_HANDLE_SECURE (0x1ul << 2)
#define NVMAP_HANDLE_KIND_SPECIFIED (0x1ul << 3)
#define NVMAP_HANDLE_COMPR_SPECIFIED (0x1ul << 4)
#define NVMAP_HANDLE_ZEROED_PAGES (0x1ul << 5)
#define NVMAP_HANDLE_PHYS_CONTIG (0x1ul << 6)
#define NVMAP_HANDLE_CACHE_SYNC (0x1ul << 7)
#define NVMAP_HANDLE_CACHE_SYNC_AT_RESERVE (0x1ul << 8)
#define NVMAP_HANDLE_RO (0x1ul << 9)
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0)
ulong nvmap_iovmm_get_used_pages(void);
#endif
int nvmap_register_vidmem_carveout(struct device *dma_dev,
phys_addr_t base, size_t size);
/*
* A heap can be mapped to memory other than DRAM.
* The HW, controls the memory, can be power gated/ungated
* based upon the clients using the memory.
* if no client/alloc happens from the memory, the HW needs
* to be power gated. Similarly it should power ungated if
* alloc happens from the memory.
* int (*busy)(void) - trigger runtime power ungate
* int (*idle)(void) - trigger runtime power gate
*/
struct nvmap_pm_ops {
int (*busy)(void);
int (*idle)(void);
};
struct nvmap_platform_carveout {
const char *name;
unsigned int usage_mask;
phys_addr_t base;
size_t size;
struct device *cma_dev;
bool resize;
struct device *dma_dev;
struct device dev;
bool is_ivm;
unsigned int peer;
unsigned int vmid;
int can_alloc;
bool enable_static_dma_map;
bool disable_dynamic_dma_map;
bool no_cpu_access; /* carveout can't be accessed from cpu at all */
bool init_done; /* FIXME: remove once all caveouts use reserved-memory */
struct nvmap_pm_ops pm_ops;
int numa_node_id; /* NUMA node id from which the carveout is allocated from */
};
struct nvmap_platform_data {
const struct nvmap_platform_carveout *carveouts;
unsigned int nr_carveouts;
};
#endif /* _LINUX_NVMAP_H */