video: tegra: nvmap: Add helper functions for nvmap_heap

- Move all data structures from nvmap_heap.h header file to
nvmap_alloc_int.h file as they are owned by nvmap_alloc unit.
- Provide getter and setter functions to get or set the members of these
data structures.
- Provide forward declaration of such data structures.
- Remove nvmap_heap.h header file as nvmap_heap is part of the
nvmap_alloc unit and nvmap_alloc unit exposes nvmap_alloc.h as header
file to other units.

JIRA TMM-5621

Change-Id: I2c4dd95a1a1011e4a7c1b425aa7521c6f13202da
Signed-off-by: Ketan Patil <ketanp@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/3201354
Reviewed-by: Pritesh Raithatha <praithatha@nvidia.com>
GVS: buildbot_gerritrpt <buildbot_gerritrpt@nvidia.com>
This commit is contained in:
Ketan Patil
2024-08-26 11:37:34 +00:00
committed by Jon Hunter
parent 98b0460f42
commit 8971a981c5
11 changed files with 196 additions and 135 deletions

View File

@@ -4,6 +4,9 @@
#ifndef __NVMAP_ALLOC_H
#define __NVMAP_ALLOC_H
struct nvmap_heap;
struct debugfs_info;
void *nvmap_altalloc(size_t len);
void nvmap_altfree(void *ptr, size_t len);
@@ -64,4 +67,26 @@ int nvmap_page_pool_init(struct nvmap_device *dev);
int nvmap_page_pool_fini(struct nvmap_device *dev);
#endif /* NVMAP_CONFIG_PAGE_POOLS */
/* helper functions for nvmap_heap struct */
size_t nvmap_get_heap_free_size(struct nvmap_heap *heap);
int nvmap_get_heap_nid(struct nvmap_heap *heap);
/* helper functions for nvmap_heap_block struct */
phys_addr_t nvmap_get_heap_block_base(struct nvmap_heap_block *block);
void nvmap_set_heap_block_handle(struct nvmap_heap_block *block, struct nvmap_handle *handle);
/* helper functions for debugfs_info struct */
unsigned int nvmap_get_debug_info_heap(struct debugfs_info *info);
int nvmap_get_debug_info_nid(struct debugfs_info *info);
struct debugfs_info *nvmap_create_debugfs_info(void);
void nvmap_set_debugfs_heap(struct debugfs_info *info, unsigned int heap_bit);
void nvmap_set_debugfs_numa(struct debugfs_info *info, int nid);
#endif /* __NVMAP_ALLOC_H */

View File

@@ -4,6 +4,56 @@
#ifndef __NVMAP_ALLOC_INT_H
#define __NVMAP_ALLOC_INT_H
struct nvmap_heap_block {
phys_addr_t base;
unsigned int type;
struct nvmap_handle *handle;
};
/*
* Info to be passed to debugfs nodes, so as to provide heap type and
* numa node id.
*/
struct debugfs_info {
unsigned int heap_bit;
int numa_id;
};
struct nvmap_heap {
struct list_head all_list;
struct mutex lock;
const char *name;
void *arg;
/* heap base */
phys_addr_t base;
/* heap size */
size_t len;
size_t free_size;
struct device *cma_dev;
struct device *dma_dev;
bool is_ivm;
int numa_node_id;
bool can_alloc; /* Used only if is_ivm == true */
unsigned int peer; /* Used only if is_ivm == true */
unsigned int vm_id; /* Used only if is_ivm == true */
struct nvmap_pm_ops pm_ops;
#ifdef NVMAP_CONFIG_DEBUG_MAPS
struct rb_root device_names;
#endif /* NVMAP_CONFIG_DEBUG_MAPS */
struct debugfs_info *carevout_debugfs_info; /* Used for storing debugfs info */
};
struct list_block {
struct nvmap_heap_block block;
struct list_head all_list;
unsigned int mem_prot;
phys_addr_t orig_addr;
size_t size;
size_t align;
struct nvmap_heap *heap;
struct list_head free_list;
};
int nvmap_cache_maint_phys_range(unsigned int op, phys_addr_t pstart,
phys_addr_t pend, int inner, int outer);

View File

@@ -30,7 +30,7 @@ static phys_addr_t handle_phys(struct nvmap_handle *h)
{
if (h->heap_pgalloc)
BUG();
return h->carveout->base;
return nvmap_get_heap_block_base(h->carveout);
}
void *__nvmap_mmap(struct nvmap_handle *h)
@@ -84,16 +84,16 @@ void *__nvmap_mmap(struct nvmap_handle *h)
}
/* carveout - explicitly map the pfns into a vmalloc area */
adj_size = h->carveout->base & ~PAGE_MASK;
adj_size = nvmap_get_heap_block_base(h->carveout) & ~PAGE_MASK;
adj_size += h->size;
adj_size = PAGE_ALIGN(adj_size);
if (pfn_valid(__phys_to_pfn(h->carveout->base & PAGE_MASK))) {
if (pfn_valid(__phys_to_pfn(nvmap_get_heap_block_base(h->carveout) & PAGE_MASK))) {
unsigned long pfn;
struct page *page;
int nr_pages;
pfn = ((h->carveout->base) >> PAGE_SHIFT);
pfn = ((nvmap_get_heap_block_base(h->carveout)) >> PAGE_SHIFT);
page = pfn_to_page(pfn);
nr_pages = h->size >> PAGE_SHIFT;
@@ -107,10 +107,11 @@ void *__nvmap_mmap(struct nvmap_handle *h)
vaddr = vmap(pages, nr_pages, VM_MAP, prot);
} else {
#if defined(CONFIG_GENERIC_IOREMAP)
vaddr = (__force void *)ioremap_prot(h->carveout->base, adj_size, pgprot_val(prot));
vaddr = (__force void *)ioremap_prot(nvmap_get_heap_block_base(h->carveout),
adj_size, pgprot_val(prot));
#else
vaddr = (__force void *)__ioremap(h->carveout->base, adj_size,
prot);
vaddr = (__force void *)__ioremap(nvmap_get_heap_block_base(h->carveout),
adj_size, prot);
#endif
}
if (vaddr == NULL)
@@ -118,7 +119,7 @@ void *__nvmap_mmap(struct nvmap_handle *h)
if (vaddr && atomic_long_cmpxchg((atomic_long_t *)&h->vaddr,
0, (long)vaddr)) {
vaddr -= (h->carveout->base & ~PAGE_MASK);
vaddr -= (nvmap_get_heap_block_base(h->carveout) & ~PAGE_MASK);
/*
* iounmap calls vunmap for vmalloced address, hence
* takes care of vmap/__ioremap freeing part.

View File

@@ -44,7 +44,6 @@
#include "nvmap_priv.h"
#include "nvmap_alloc.h"
#include "nvmap_heap.h"
#include "nvmap_ioctl.h"
#include <linux/pagewalk.h>
@@ -58,7 +57,6 @@ static struct device_dma_parameters nvmap_dma_parameters = {
.max_segment_size = UINT_MAX,
};
static struct debugfs_info iovmm_debugfs_info;
static int nvmap_open(struct inode *inode, struct file *filp);
static int nvmap_release(struct inode *inode, struct file *filp);
static long nvmap_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
@@ -543,7 +541,7 @@ static void allocations_stringify(struct nvmap_client *client,
if (handle->alloc && handle->heap_type == heap_type) {
phys_addr_t base = heap_type == NVMAP_HEAP_IOVMM ? 0 :
handle->heap_pgalloc ? 0 :
(handle->carveout->base);
(nvmap_get_heap_block_base(handle->carveout));
size_t size = K(handle->size);
int i = 0;
@@ -631,14 +629,14 @@ bool is_nvmap_memory_available(size_t size, uint32_t heap, int numa_nid)
* on that numa node.
*/
if (numa_nid == NUMA_NO_NODE) {
if (size > (h->free_size & PAGE_MASK))
if (size > (nvmap_get_heap_free_size(h) & PAGE_MASK))
continue;
memory_available = true;
goto exit;
} else {
if (h->numa_node_id != numa_nid)
if (nvmap_get_heap_nid(h) != numa_nid)
continue;
else if (size > (h->free_size & PAGE_MASK))
else if (size > (nvmap_get_heap_free_size(h) & PAGE_MASK))
memory_available = false;
else
memory_available = true;
@@ -702,7 +700,7 @@ static void maps_stringify(struct nvmap_client *client,
if (handle->alloc && handle->heap_type == heap_type) {
phys_addr_t base = heap_type == NVMAP_HEAP_IOVMM ? 0 :
handle->heap_pgalloc ? 0 :
(handle->carveout->base);
(nvmap_get_heap_block_base(handle->carveout));
size_t size = K(handle->size);
int i = 0;
@@ -765,7 +763,7 @@ static void nvmap_get_client_mss(struct nvmap_client *client,
struct nvmap_handle *handle = ref->handle;
if (handle->alloc && handle->heap_type == heap_type) {
if (heap_type != NVMAP_HEAP_IOVMM &&
(nvmap_block_to_heap(handle->carveout)->numa_node_id !=
(nvmap_get_heap_nid(nvmap_block_to_heap(handle->carveout)) !=
numa_id))
continue;
@@ -815,7 +813,7 @@ static void nvmap_get_total_mss(u64 *pss, u64 *total, u32 heap_type, int numa_id
continue;
if (heap_type != NVMAP_HEAP_IOVMM &&
(nvmap_block_to_heap(h->carveout)->numa_node_id !=
(nvmap_get_heap_nid(nvmap_block_to_heap(h->carveout)) !=
numa_id))
continue;
@@ -838,8 +836,8 @@ static int nvmap_debug_allocations_show(struct seq_file *s, void *unused)
u64 total;
struct nvmap_client *client;
struct debugfs_info *debugfs_information = (struct debugfs_info *)s->private;
u32 heap_type = debugfs_information->heap_bit;
int numa_id = debugfs_information->numa_id;
u32 heap_type = nvmap_get_debug_info_heap(debugfs_information);
int numa_id = nvmap_get_debug_info_nid(debugfs_information);
mutex_lock(&nvmap_dev->clients_lock);
seq_printf(s, "%-18s %18s %8s %11s\n",
@@ -915,8 +913,8 @@ DEBUGFS_OPEN_FOPS(device_list);
static int nvmap_debug_all_allocations_show(struct seq_file *s, void *unused)
{
struct debugfs_info *debugfs_information = (struct debugfs_info *)s->private;
u32 heap_type = debugfs_information->heap_bit;
int numa_id = debugfs_information->numa_id;
u32 heap_type = nvmap_get_debug_info_heap(debugfs_information);
int numa_id = nvmap_get_debug_info_nid(debugfs_information);
struct rb_node *n;
spin_lock(&nvmap_dev->handle_lock);
@@ -931,14 +929,15 @@ static int nvmap_debug_all_allocations_show(struct seq_file *s, void *unused)
rb_entry(n, struct nvmap_handle, node);
int i = 0;
if (handle->alloc && handle->heap_type == debugfs_information->heap_bit) {
if (handle->alloc && handle->heap_type ==
nvmap_get_debug_info_heap(debugfs_information)) {
phys_addr_t base = heap_type == NVMAP_HEAP_IOVMM ? 0 :
handle->heap_pgalloc ? 0 :
(handle->carveout->base);
(nvmap_get_heap_block_base(handle->carveout));
size_t size = K(handle->size);
if (heap_type != NVMAP_HEAP_IOVMM &&
(nvmap_block_to_heap(handle->carveout)->numa_node_id != numa_id))
(nvmap_get_heap_nid(nvmap_block_to_heap(handle->carveout)) != numa_id))
continue;
next_page:
@@ -975,8 +974,8 @@ DEBUGFS_OPEN_FOPS(all_allocations);
static int nvmap_debug_orphan_handles_show(struct seq_file *s, void *unused)
{
struct debugfs_info *debugfs_information = (struct debugfs_info *)s->private;
u32 heap_type = debugfs_information->heap_bit;
int numa_id = debugfs_information->numa_id;
u32 heap_type = nvmap_get_debug_info_heap(debugfs_information);
int numa_id = nvmap_get_debug_info_nid(debugfs_information);
struct rb_node *n;
@@ -996,11 +995,11 @@ static int nvmap_debug_orphan_handles_show(struct seq_file *s, void *unused)
!atomic_read(&handle->share_count)) {
phys_addr_t base = heap_type == NVMAP_HEAP_IOVMM ? 0 :
handle->heap_pgalloc ? 0 :
(handle->carveout->base);
(nvmap_get_heap_block_base(handle->carveout));
size_t size = K(handle->size);
if (heap_type != NVMAP_HEAP_IOVMM &&
(nvmap_block_to_heap(handle->carveout)->numa_node_id !=
(nvmap_get_heap_nid(nvmap_block_to_heap(handle->carveout)) !=
numa_id))
continue;
@@ -1039,8 +1038,8 @@ static int nvmap_debug_maps_show(struct seq_file *s, void *unused)
u64 total;
struct nvmap_client *client;
struct debugfs_info *debugfs_information = (struct debugfs_info *)s->private;
u32 heap_type = debugfs_information->heap_bit;
int numa_id = debugfs_information->numa_id;
u32 heap_type = nvmap_get_debug_info_heap(debugfs_information);
int numa_id = nvmap_get_debug_info_nid(debugfs_information);
mutex_lock(&nvmap_dev->clients_lock);
seq_printf(s, "%-18s %18s %8s %11s\n",
@@ -1071,8 +1070,8 @@ static int nvmap_debug_clients_show(struct seq_file *s, void *unused)
u64 total;
struct nvmap_client *client;
struct debugfs_info *debugfs_information = (struct debugfs_info *)s->private;
u32 heap_type = debugfs_information->heap_bit;
int numa_id = debugfs_information->numa_id;
u32 heap_type = nvmap_get_debug_info_heap(debugfs_information);
int numa_id = nvmap_get_debug_info_nid(debugfs_information);
mutex_lock(&nvmap_dev->clients_lock);
seq_printf(s, "%-18s %18s %8s %11s\n",
@@ -1116,7 +1115,7 @@ static int nvmap_debug_handles_by_pid_show_client(struct seq_file *s,
entry.base = handle->heap_type == NVMAP_HEAP_IOVMM ? 0 :
handle->heap_pgalloc ? 0 :
(handle->carveout->base);
(nvmap_get_heap_block_base(handle->carveout));
entry.size = handle->size;
entry.flags = handle->userflags;
entry.share_count = atomic_read(&handle->share_count);
@@ -1303,34 +1302,36 @@ DEBUGFS_OPEN_FOPS(iovmm_procrank);
static void nvmap_iovmm_debugfs_init(void)
{
if (!IS_ERR_OR_NULL(nvmap_dev->debug_root)) {
struct debugfs_info *iovmm_debugfs_info = nvmap_create_debugfs_info();
if (iovmm_debugfs_info != NULL) {
struct dentry *iovmm_root =
debugfs_create_dir("iovmm", nvmap_dev->debug_root);
iovmm_debugfs_info.heap_bit = NVMAP_HEAP_IOVMM;
iovmm_debugfs_info.numa_id = NUMA_NO_NODE;
nvmap_set_debugfs_heap(iovmm_debugfs_info, NVMAP_HEAP_IOVMM);
nvmap_set_debugfs_numa(iovmm_debugfs_info, NUMA_NO_NODE);
if (!IS_ERR_OR_NULL(iovmm_root)) {
debugfs_create_file("clients", S_IRUGO, iovmm_root,
(void *)&iovmm_debugfs_info,
(void *)iovmm_debugfs_info,
&debug_clients_fops);
debugfs_create_file("allocations", S_IRUGO, iovmm_root,
(void *)&iovmm_debugfs_info,
(void *)iovmm_debugfs_info,
&debug_allocations_fops);
debugfs_create_file("all_allocations", S_IRUGO,
iovmm_root, (void *)&iovmm_debugfs_info,
iovmm_root, (void *)iovmm_debugfs_info,
&debug_all_allocations_fops);
debugfs_create_file("orphan_handles", S_IRUGO,
iovmm_root, (void *)&iovmm_debugfs_info,
iovmm_root, (void *)iovmm_debugfs_info,
&debug_orphan_handles_fops);
debugfs_create_file("maps", S_IRUGO, iovmm_root,
(void *)&iovmm_debugfs_info,
(void *)iovmm_debugfs_info,
&debug_maps_fops);
debugfs_create_file("free_size", S_IRUGO, iovmm_root,
(void *)&iovmm_debugfs_info,
(void *)iovmm_debugfs_info,
&debug_free_size_fops);
#ifdef NVMAP_CONFIG_DEBUG_MAPS
debugfs_create_file("device_list", S_IRUGO, iovmm_root,
(void *)&iovmm_debugfs_info,
(void *)iovmm_debugfs_info,
&debug_device_list_fops);
#endif /* NVMAP_CONFIG_DEBUG_MAPS */
@@ -1340,6 +1341,7 @@ static void nvmap_iovmm_debugfs_init(void)
#endif
}
}
}
}
static bool nvmap_is_iommu_present(void)

View File

@@ -196,7 +196,7 @@ static struct sg_table *nvmap_dmabuf_map_dma_buf(struct dma_buf_attachment *atta
goto err_map;
} else if (!(nvmap_dev->dynamic_dma_map_mask &
info->handle->heap_type)) {
sg_dma_address(sgt->sgl) = info->handle->carveout->base;
sg_dma_address(sgt->sgl) = nvmap_get_heap_block_base(info->handle->carveout);
} else if (info->handle->heap_type == NVMAP_HEAP_CARVEOUT_VPR &&
access_vpr_phys(attach->dev)) {
sg_dma_address(sgt->sgl) = 0;

View File

@@ -185,8 +185,8 @@ static vm_fault_t nvmap_vma_fault(struct vm_fault *vmf)
if (!priv->handle->heap_pgalloc) {
unsigned long pfn;
BUG_ON(priv->handle->carveout->base & ~PAGE_MASK);
pfn = ((priv->handle->carveout->base + offs) >> PAGE_SHIFT);
BUG_ON(nvmap_get_heap_block_base(priv->handle->carveout) & ~PAGE_MASK);
pfn = ((nvmap_get_heap_block_base(priv->handle->carveout) + offs) >> PAGE_SHIFT);
if (!pfn_is_map_memory(pfn)) {
vm_insert_pfn(vma,
(unsigned long)vmf_address, pfn);

View File

@@ -27,6 +27,7 @@
#include "nvmap_priv.h"
#include "nvmap_ioctl.h"
#include "nvmap_alloc.h"
/*
* Verifies that the passed ID is a valid handle ID. Then the passed client's

View File

@@ -28,7 +28,6 @@
#include "nvmap_priv.h"
#include "nvmap_alloc.h"
#include "nvmap_alloc_int.h"
#include "nvmap_heap.h"
#include "include/linux/nvmap_exports.h"
/*
@@ -502,3 +501,51 @@ out:
return ret;
}
#endif /* !NVMAP_CONFIG_CACHE_FLUSH_AT_ALLOC */
size_t nvmap_get_heap_free_size(struct nvmap_heap *heap)
{
return heap->free_size;
}
int nvmap_get_heap_nid(struct nvmap_heap *heap)
{
return heap->numa_node_id;
}
phys_addr_t nvmap_get_heap_block_base(struct nvmap_heap_block *block)
{
return block->base;
}
unsigned int nvmap_get_debug_info_heap(struct debugfs_info *info)
{
return info->heap_bit;
}
int nvmap_get_debug_info_nid(struct debugfs_info *info)
{
return info->numa_id;
}
void nvmap_set_heap_block_handle(struct nvmap_heap_block *block, struct nvmap_handle *handle)
{
block->handle = handle;
}
struct debugfs_info *nvmap_create_debugfs_info(void)
{
struct debugfs_info *info;
info = kzalloc(sizeof(*info), GFP_KERNEL);
return info;
}
void nvmap_set_debugfs_heap(struct debugfs_info *info, unsigned int heap_bit)
{
info->heap_bit = heap_bit;
}
void nvmap_set_debugfs_numa(struct debugfs_info *info, int nid)
{
info->numa_id = nid;
}

View File

@@ -1,64 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only
* SPDX-FileCopyrightText: Copyright (c) 2010-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* GPU heap allocator.
*/
#ifndef __NVMAP_HEAP_H
#define __NVMAP_HEAP_H
struct device;
struct nvmap_heap;
struct nvmap_client;
struct nvmap_heap_block {
phys_addr_t base;
unsigned int type;
struct nvmap_handle *handle;
};
/*
* Info to be passed to debugfs nodes, so as to provide heap type and
* numa node id.
*/
struct debugfs_info {
unsigned int heap_bit;
int numa_id;
};
struct nvmap_heap {
struct list_head all_list;
struct mutex lock;
const char *name;
void *arg;
/* heap base */
phys_addr_t base;
/* heap size */
size_t len;
size_t free_size;
struct device *cma_dev;
struct device *dma_dev;
bool is_ivm;
int numa_node_id;
bool can_alloc; /* Used only if is_ivm == true */
unsigned int peer; /* Used only if is_ivm == true */
unsigned int vm_id; /* Used only if is_ivm == true */
struct nvmap_pm_ops pm_ops;
#ifdef NVMAP_CONFIG_DEBUG_MAPS
struct rb_root device_names;
#endif /* NVMAP_CONFIG_DEBUG_MAPS */
struct debugfs_info *carevout_debugfs_info; /* Used for storing debugfs info */
};
struct list_block {
struct nvmap_heap_block block;
struct list_head all_list;
unsigned int mem_prot;
phys_addr_t orig_addr;
size_t size;
size_t align;
struct nvmap_heap *heap;
struct list_head free_list;
};
#endif

View File

@@ -36,7 +36,6 @@
#include "nvmap_ioctl.h"
#include "nvmap_priv.h"
#include "nvmap_alloc.h"
#include "nvmap_heap.h"
#include <linux/syscalls.h>
#include <linux/nodemask.h>
@@ -823,7 +822,7 @@ int nvmap_ioctl_create_from_ivc(struct file *filp, void __user *arg)
ref->handle->heap_pgalloc = false;
ref->handle->ivm_id = op.ivm_id;
ref->handle->carveout = block;
block->handle = ref->handle;
nvmap_set_heap_block_handle(block, ref->handle);
mb();
ref->handle->alloc = true;
NVMAP_TAG_TRACE(trace_nvmap_alloc_handle_done,
@@ -1377,10 +1376,11 @@ static int nvmap_query_heap_params(void __user *arg, bool is_numa_aware)
for (i = 0; i < nvmap_dev->nr_carveouts; i++) {
if ((type & nvmap_dev->heaps[i].heap_bit) &&
(is_numa_aware ?
(numa_id == nvmap_dev->heaps[i].carveout->numa_node_id) : true)) {
(numa_id == nvmap_get_heap_nid(nvmap_dev->heaps[i].carveout)) :
true)) {
heap = nvmap_dev->heaps[i].carveout;
op.total = nvmap_query_heap_size(heap);
op.free = heap->free_size;
op.free = nvmap_get_heap_free_size(heap);
break;
}
}

View File

@@ -37,7 +37,6 @@
#ifndef CONFIG_ARM64
#include <asm/outercache.h>
#endif
#include "nvmap_heap.h"
#include "nvmap_stats.h"
#include <linux/fdtable.h>
@@ -70,7 +69,7 @@
handle, \
atomic_read(&handle->share_count), \
handle->heap_type == NVMAP_HEAP_IOVMM ? 0 : \
(handle->carveout ? handle->carveout->base : 0), \
(handle->carveout ? nvmap_get_heap_block_base(handle->carveout) : 0), \
handle->size, \
(handle->userflags & 0xFFFF), \
(handle->userflags >> 16), \