mirror of
git://nv-tegra.nvidia.com/linux-nv-oot.git
synced 2025-12-24 10:11:26 +03:00
video: tegra: nvmap: Move items to correct nvmap unit
- Move macro definitions from nvmap_priv.h to nvmap_alloc unit wherever required. - Cleanup unnecessary macros. - Add function to cleanup the memory allocated for debugfs_info for iovmm. This was missed in the previous patch where the allocation for debugfs_info is moved to dynamic memory allocation. - Move nvmap page pool related data structs from nvmap_priv to nvmap_alloc unit. JIRA TMM-5621 Change-Id: I3b668b2d6182da1bf0d2034c66834efc02d3179f Signed-off-by: Ketan Patil <ketanp@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/3203118 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
@@ -23,7 +23,6 @@
|
||||
bool nvmap_convert_carveout_to_iovmm;
|
||||
bool nvmap_convert_iovmm_to_carveout;
|
||||
|
||||
u32 nvmap_max_handle_count;
|
||||
u64 nvmap_big_page_allocs;
|
||||
u64 nvmap_total_page_allocs;
|
||||
|
||||
@@ -124,9 +123,9 @@ static int handle_page_alloc(struct nvmap_client *client,
|
||||
#ifdef CONFIG_ARM64_4K_PAGES
|
||||
#ifdef NVMAP_CONFIG_PAGE_POOLS
|
||||
/* Get as many big pages from the pool as possible. */
|
||||
page_index = nvmap_page_pool_alloc_lots_bp(&nvmap_dev->pool, pages,
|
||||
page_index = nvmap_page_pool_alloc_lots_bp(nvmap_dev->pool, pages,
|
||||
nr_page, true, h->numa_id);
|
||||
pages_per_big_pg = nvmap_dev->pool.pages_per_big_pg;
|
||||
pages_per_big_pg = nvmap_dev->pool->pages_per_big_pg;
|
||||
#endif
|
||||
/* Try to allocate big pages from page allocator */
|
||||
for (i = page_index;
|
||||
@@ -154,7 +153,7 @@ static int handle_page_alloc(struct nvmap_client *client,
|
||||
#ifdef NVMAP_CONFIG_PAGE_POOLS
|
||||
/* Get as many pages from the pool as possible. */
|
||||
page_index += nvmap_page_pool_alloc_lots(
|
||||
&nvmap_dev->pool, &pages[page_index],
|
||||
nvmap_dev->pool, &pages[page_index],
|
||||
nr_page - page_index, true, h->numa_id);
|
||||
#endif
|
||||
allocated = page_index;
|
||||
@@ -553,7 +552,7 @@ void _nvmap_handle_free(struct nvmap_handle *h)
|
||||
|
||||
#ifdef NVMAP_CONFIG_PAGE_POOLS
|
||||
if (!h->from_va && !h->is_subhandle)
|
||||
page_index = nvmap_page_pool_fill_lots(&nvmap_dev->pool,
|
||||
page_index = nvmap_page_pool_fill_lots(nvmap_dev->pool,
|
||||
h->pgalloc.pages, nr_page);
|
||||
#endif
|
||||
|
||||
|
||||
@@ -85,6 +85,8 @@ int nvmap_get_debug_info_nid(struct debugfs_info *info);
|
||||
|
||||
struct debugfs_info *nvmap_create_debugfs_info(void);
|
||||
|
||||
void nvmap_free_debugfs_info(struct debugfs_info *info);
|
||||
|
||||
void nvmap_set_debugfs_heap(struct debugfs_info *info, unsigned int heap_bit);
|
||||
|
||||
void nvmap_set_debugfs_numa(struct debugfs_info *info, int nid);
|
||||
|
||||
@@ -4,6 +4,14 @@
|
||||
#ifndef __NVMAP_ALLOC_INT_H
|
||||
#define __NVMAP_ALLOC_INT_H
|
||||
|
||||
#define DMA_ERROR_CODE (~(dma_addr_t)0)
|
||||
|
||||
#define GFP_NVMAP (GFP_KERNEL | __GFP_HIGHMEM | __GFP_NOWARN)
|
||||
|
||||
#ifdef CONFIG_ARM64_4K_PAGES
|
||||
#define NVMAP_PP_BIG_PAGE_SIZE (0x10000)
|
||||
#endif /* CONFIG_ARM64_4K_PAGES */
|
||||
|
||||
struct nvmap_heap_block {
|
||||
phys_addr_t base;
|
||||
unsigned int type;
|
||||
@@ -54,6 +62,32 @@ struct list_block {
|
||||
struct list_head free_list;
|
||||
};
|
||||
|
||||
|
||||
struct nvmap_page_pool {
|
||||
struct rt_mutex lock;
|
||||
u32 count; /* Number of pages in the page & dirty list. */
|
||||
u32 max; /* Max no. of pages in all lists. */
|
||||
u32 to_zero; /* Number of pages on the zero list */
|
||||
u32 under_zero; /* Number of pages getting zeroed */
|
||||
#ifdef CONFIG_ARM64_4K_PAGES
|
||||
u32 big_pg_sz; /* big page size supported(64k, etc.) */
|
||||
u32 big_page_count; /* Number of zeroed big pages avaialble */
|
||||
u32 pages_per_big_pg; /* Number of pages in big page */
|
||||
#endif /* CONFIG_ARM64_4K_PAGES */
|
||||
struct list_head page_list;
|
||||
struct list_head zero_list;
|
||||
#ifdef CONFIG_ARM64_4K_PAGES
|
||||
struct list_head page_list_bp;
|
||||
#endif /* CONFIG_ARM64_4K_PAGES */
|
||||
|
||||
#ifdef NVMAP_CONFIG_PAGE_POOL_DEBUG
|
||||
u64 allocs;
|
||||
u64 fills;
|
||||
u64 hits;
|
||||
u64 misses;
|
||||
#endif
|
||||
};
|
||||
|
||||
int nvmap_cache_maint_phys_range(unsigned int op, phys_addr_t pstart,
|
||||
phys_addr_t pend, int inner, int outer);
|
||||
|
||||
|
||||
@@ -22,6 +22,8 @@ __weak struct arm64_ftr_reg arm64_ftr_reg_ctrel0;
|
||||
#include "nvmap_alloc.h"
|
||||
#include "nvmap_alloc_int.h"
|
||||
|
||||
extern void __clean_dcache_area_poc(void *addr, size_t len);
|
||||
|
||||
/*
|
||||
* FIXME:
|
||||
*
|
||||
|
||||
@@ -54,6 +54,8 @@ struct nvmap_device *nvmap_dev;
|
||||
EXPORT_SYMBOL(nvmap_dev);
|
||||
ulong nvmap_init_time;
|
||||
|
||||
struct debugfs_info *iovmm_debugfs_info;
|
||||
|
||||
static struct device_dma_parameters nvmap_dma_parameters = {
|
||||
.max_segment_size = UINT_MAX,
|
||||
};
|
||||
@@ -1303,7 +1305,7 @@ DEBUGFS_OPEN_FOPS(iovmm_procrank);
|
||||
static void nvmap_iovmm_debugfs_init(void)
|
||||
{
|
||||
if (!IS_ERR_OR_NULL(nvmap_dev->debug_root)) {
|
||||
struct debugfs_info *iovmm_debugfs_info = nvmap_create_debugfs_info();
|
||||
iovmm_debugfs_info = nvmap_create_debugfs_info();
|
||||
if (iovmm_debugfs_info != NULL) {
|
||||
struct dentry *iovmm_root =
|
||||
debugfs_create_dir("iovmm", nvmap_dev->debug_root);
|
||||
@@ -1345,6 +1347,11 @@ static void nvmap_iovmm_debugfs_init(void)
|
||||
}
|
||||
}
|
||||
|
||||
static void nvmap_iovmm_debugfs_free(void)
|
||||
{
|
||||
nvmap_free_debugfs_info(iovmm_debugfs_info);
|
||||
}
|
||||
|
||||
static bool nvmap_is_iommu_present(void)
|
||||
{
|
||||
struct device_node *np;
|
||||
@@ -1490,6 +1497,7 @@ fail_sci_ipc:
|
||||
nvmap_sci_ipc_exit();
|
||||
fail_heaps:
|
||||
debugfs_remove_recursive(nvmap_dev->debug_root);
|
||||
nvmap_iovmm_debugfs_free();
|
||||
for (i = 0; i < dev->nr_carveouts; i++) {
|
||||
struct nvmap_carveout_node *node = &dev->heaps[i];
|
||||
nvmap_heap_destroy(node->carveout);
|
||||
@@ -1522,6 +1530,7 @@ int nvmap_remove(struct platform_device *pdev)
|
||||
#endif
|
||||
nvmap_dmabuf_stash_deinit();
|
||||
debugfs_remove_recursive(dev->debug_root);
|
||||
nvmap_iovmm_debugfs_free();
|
||||
misc_deregister(&dev->dev_user);
|
||||
#ifdef NVMAP_CONFIG_PAGE_POOLS
|
||||
nvmap_page_pool_clear();
|
||||
|
||||
@@ -30,6 +30,7 @@
|
||||
#include "nvmap_alloc.h"
|
||||
#include "nvmap_dmabuf.h"
|
||||
|
||||
u32 nvmap_max_handle_count;
|
||||
/*
|
||||
* Verifies that the passed ID is a valid handle ID. Then the passed client's
|
||||
* reference to the handle is returned.
|
||||
|
||||
@@ -540,6 +540,12 @@ struct debugfs_info *nvmap_create_debugfs_info(void)
|
||||
return info;
|
||||
}
|
||||
|
||||
void nvmap_free_debugfs_info(struct debugfs_info *info)
|
||||
{
|
||||
if (info != NULL)
|
||||
kfree(info);
|
||||
}
|
||||
|
||||
void nvmap_set_debugfs_heap(struct debugfs_info *info, unsigned int heap_bit)
|
||||
{
|
||||
info->heap_bit = heap_bit;
|
||||
|
||||
@@ -30,12 +30,23 @@
|
||||
#define NVMAP_TEST_PAGE_POOL_SHRINKER 1
|
||||
#define PENDING_PAGES_SIZE (SZ_1M / PAGE_SIZE)
|
||||
|
||||
extern u64 nvmap_big_page_allocs;
|
||||
extern u64 nvmap_total_page_allocs;
|
||||
|
||||
static bool enable_pp = 1;
|
||||
static u32 pool_size;
|
||||
|
||||
static struct task_struct *background_allocator;
|
||||
static DECLARE_WAIT_QUEUE_HEAD(nvmap_bg_wait);
|
||||
|
||||
/*
|
||||
* This is the default ratio defining pool size. It can be thought of as pool
|
||||
* size in either MB per GB or KB per MB. That means the max this number can
|
||||
* be is 1024 (all physical memory - not a very good idea) or 0 (no page pool
|
||||
* at all).
|
||||
*/
|
||||
#define NVMAP_PP_POOL_SIZE (128)
|
||||
|
||||
#ifdef NVMAP_CONFIG_PAGE_POOL_DEBUG
|
||||
static inline void __pp_dbg_var_add(u64 *dbg_var, u32 nr)
|
||||
{
|
||||
@@ -278,7 +289,7 @@ static void nvmap_pp_do_background_zero_pages(struct nvmap_page_pool *pool)
|
||||
*/
|
||||
static int nvmap_background_zero_thread(void *arg)
|
||||
{
|
||||
struct nvmap_page_pool *pool = &nvmap_dev->pool;
|
||||
struct nvmap_page_pool *pool = nvmap_dev->pool;
|
||||
|
||||
pr_info("PP zeroing thread starting.\n");
|
||||
|
||||
@@ -500,7 +511,7 @@ static ulong nvmap_page_pool_get_unused_pages(void)
|
||||
if (!nvmap_dev)
|
||||
return 0;
|
||||
|
||||
total = nvmap_dev->pool.count + nvmap_dev->pool.to_zero;
|
||||
total = nvmap_dev->pool->count + nvmap_dev->pool->to_zero;
|
||||
|
||||
return total;
|
||||
}
|
||||
@@ -511,7 +522,7 @@ static ulong nvmap_page_pool_get_unused_pages(void)
|
||||
*/
|
||||
int nvmap_page_pool_clear(void)
|
||||
{
|
||||
struct nvmap_page_pool *pool = &nvmap_dev->pool;
|
||||
struct nvmap_page_pool *pool = nvmap_dev->pool;
|
||||
|
||||
rt_mutex_lock(&pool->lock);
|
||||
|
||||
@@ -562,10 +573,10 @@ static unsigned long nvmap_page_pool_scan_objects(struct shrinker *shrinker,
|
||||
|
||||
pr_debug("sh_pages=%lu", sc->nr_to_scan);
|
||||
|
||||
rt_mutex_lock(&nvmap_dev->pool.lock);
|
||||
rt_mutex_lock(&nvmap_dev->pool->lock);
|
||||
remaining = nvmap_page_pool_free_pages_locked(
|
||||
&nvmap_dev->pool, sc->nr_to_scan);
|
||||
rt_mutex_unlock(&nvmap_dev->pool.lock);
|
||||
nvmap_dev->pool, sc->nr_to_scan);
|
||||
rt_mutex_unlock(&nvmap_dev->pool->lock);
|
||||
|
||||
return (remaining == sc->nr_to_scan) ? \
|
||||
SHRINK_STOP : (sc->nr_to_scan - remaining);
|
||||
@@ -660,8 +671,8 @@ static int pool_size_set(const char *arg, const struct kernel_param *kp)
|
||||
{
|
||||
int ret = param_set_uint(arg, kp);
|
||||
|
||||
if (!ret && (pool_size != nvmap_dev->pool.max))
|
||||
nvmap_page_pool_resize(&nvmap_dev->pool, pool_size);
|
||||
if (!ret && (pool_size != nvmap_dev->pool->max))
|
||||
nvmap_page_pool_resize(nvmap_dev->pool, pool_size);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -691,17 +702,17 @@ int nvmap_page_pool_debugfs_init(struct dentry *nvmap_root)
|
||||
|
||||
debugfs_create_u32("page_pool_available_pages",
|
||||
S_IRUGO, pp_root,
|
||||
&nvmap_dev->pool.count);
|
||||
&nvmap_dev->pool->count);
|
||||
debugfs_create_u32("page_pool_pages_to_zero",
|
||||
S_IRUGO, pp_root,
|
||||
&nvmap_dev->pool.to_zero);
|
||||
&nvmap_dev->pool->to_zero);
|
||||
#ifdef CONFIG_ARM64_4K_PAGES
|
||||
debugfs_create_u32("page_pool_available_big_pages",
|
||||
S_IRUGO, pp_root,
|
||||
&nvmap_dev->pool.big_page_count);
|
||||
&nvmap_dev->pool->big_page_count);
|
||||
debugfs_create_u32("page_pool_big_page_size",
|
||||
S_IRUGO, pp_root,
|
||||
&nvmap_dev->pool.big_pg_sz);
|
||||
&nvmap_dev->pool->big_pg_sz);
|
||||
debugfs_create_u64("total_big_page_allocs",
|
||||
S_IRUGO, pp_root,
|
||||
&nvmap_big_page_allocs);
|
||||
@@ -713,16 +724,16 @@ int nvmap_page_pool_debugfs_init(struct dentry *nvmap_root)
|
||||
#ifdef NVMAP_CONFIG_PAGE_POOL_DEBUG
|
||||
debugfs_create_u64("page_pool_allocs",
|
||||
S_IRUGO, pp_root,
|
||||
&nvmap_dev->pool.allocs);
|
||||
&nvmap_dev->pool->allocs);
|
||||
debugfs_create_u64("page_pool_fills",
|
||||
S_IRUGO, pp_root,
|
||||
&nvmap_dev->pool.fills);
|
||||
&nvmap_dev->pool->fills);
|
||||
debugfs_create_u64("page_pool_hits",
|
||||
S_IRUGO, pp_root,
|
||||
&nvmap_dev->pool.hits);
|
||||
&nvmap_dev->pool->hits);
|
||||
debugfs_create_u64("page_pool_misses",
|
||||
S_IRUGO, pp_root,
|
||||
&nvmap_dev->pool.misses);
|
||||
&nvmap_dev->pool->misses);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
@@ -731,8 +742,13 @@ int nvmap_page_pool_debugfs_init(struct dentry *nvmap_root)
|
||||
int nvmap_page_pool_init(struct nvmap_device *dev)
|
||||
{
|
||||
struct sysinfo info;
|
||||
struct nvmap_page_pool *pool = &dev->pool;
|
||||
struct nvmap_page_pool *pool;
|
||||
|
||||
dev->pool = kzalloc(sizeof(*dev->pool), GFP_KERNEL);
|
||||
if (dev->pool == NULL)
|
||||
goto fail_mem;
|
||||
|
||||
pool = dev->pool;
|
||||
memset(pool, 0x0, sizeof(*pool));
|
||||
rt_mutex_init(&pool->lock);
|
||||
INIT_LIST_HEAD(&pool->page_list);
|
||||
@@ -787,13 +803,12 @@ int nvmap_page_pool_init(struct nvmap_device *dev)
|
||||
return 0;
|
||||
fail:
|
||||
nvmap_page_pool_fini(dev);
|
||||
fail_mem:
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
int nvmap_page_pool_fini(struct nvmap_device *dev)
|
||||
{
|
||||
struct nvmap_page_pool *pool = &dev->pool;
|
||||
|
||||
/*
|
||||
* if background allocator is not initialzed or not
|
||||
* properly initialized, then shrinker is also not
|
||||
@@ -801,8 +816,10 @@ int nvmap_page_pool_fini(struct nvmap_device *dev)
|
||||
*/
|
||||
if (!IS_ERR_OR_NULL(background_allocator)) {
|
||||
#if defined(NV_SHRINKER_ALLOC_PRESENT) /* Linux 6.7 */
|
||||
shrinker_free(nvmap_page_pool_shrinker);
|
||||
nvmap_page_pool_shrinker = NULL;
|
||||
if (nvmap_page_pool_shrinker != NULL) {
|
||||
shrinker_free(nvmap_page_pool_shrinker);
|
||||
nvmap_page_pool_shrinker = NULL;
|
||||
}
|
||||
#else
|
||||
unregister_shrinker(&nvmap_page_pool_shrinker);
|
||||
#endif
|
||||
@@ -810,7 +827,10 @@ int nvmap_page_pool_fini(struct nvmap_device *dev)
|
||||
background_allocator = NULL;
|
||||
}
|
||||
|
||||
WARN_ON(!list_empty(&pool->page_list));
|
||||
|
||||
if (dev->pool != NULL) {
|
||||
WARN_ON(!list_empty(&dev->pool->page_list));
|
||||
kfree(dev->pool);
|
||||
dev->pool = NULL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -44,8 +44,6 @@
|
||||
#define SIZE_2MB 0x200000
|
||||
#define ALIGN_2MB(size) ((size + SIZE_2MB - 1) & ~(SIZE_2MB - 1))
|
||||
|
||||
#define DMA_ERROR_CODE (~(dma_addr_t)0)
|
||||
|
||||
#define __DMA_ATTR(attrs) attrs
|
||||
#define DEFINE_DMA_ATTRS(attrs) unsigned long attrs = 0
|
||||
|
||||
@@ -90,8 +88,6 @@ do { \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define GFP_NVMAP (GFP_KERNEL | __GFP_HIGHMEM | __GFP_NOWARN)
|
||||
|
||||
/*
|
||||
* DMA_ATTR_ALLOC_EXACT_SIZE: This tells the DMA-mapping
|
||||
* subsystem to allocate the exact number of pages
|
||||
@@ -100,19 +96,6 @@ do { \
|
||||
|
||||
#define DMA_MEMORY_NOMAP 0x02
|
||||
|
||||
/*
|
||||
* DMA_ATTR_READ_ONLY: for DMA memory allocations, attempt to map
|
||||
* memory as read-only for the device. CPU access will still be
|
||||
* read-write. This corresponds to the direction being DMA_TO_DEVICE
|
||||
* instead of DMA_BIDIRECTIONAL.
|
||||
*/
|
||||
#define DMA_ATTR_READ_ONLY (DMA_ATTR_PRIVILEGED << 12)
|
||||
|
||||
/* DMA_ATTR_WRITE_ONLY: This tells the DMA-mapping subsystem
|
||||
* to map as write-only
|
||||
*/
|
||||
#define DMA_ATTR_WRITE_ONLY (DMA_ATTR_PRIVILEGED << 13)
|
||||
|
||||
#define DMA_ALLOC_FREE_ATTR DMA_ATTR_ALLOC_SINGLE_PAGES
|
||||
#define ACCESS_OK(type, addr, size) access_ok(addr, size)
|
||||
#define SYS_CLOSE(arg) close_fd(arg)
|
||||
@@ -122,8 +105,6 @@ struct nvmap_device;
|
||||
|
||||
/* holds max number of handles allocted per process at any time */
|
||||
extern u32 nvmap_max_handle_count;
|
||||
extern u64 nvmap_big_page_allocs;
|
||||
extern u64 nvmap_total_page_allocs;
|
||||
|
||||
extern bool nvmap_convert_iovmm_to_carveout;
|
||||
extern bool nvmap_convert_carveout_to_iovmm;
|
||||
@@ -132,16 +113,8 @@ extern struct vm_operations_struct nvmap_vma_ops;
|
||||
|
||||
#ifdef CONFIG_ARM64
|
||||
#define PG_PROT_KERNEL PAGE_KERNEL
|
||||
#define outer_flush_range(s, e)
|
||||
#define outer_inv_range(s, e)
|
||||
#define outer_clean_range(s, e)
|
||||
#define outer_flush_all()
|
||||
#define outer_clean_all()
|
||||
extern void __clean_dcache_page(struct page *);
|
||||
extern void __clean_dcache_area_poc(void *addr, size_t len);
|
||||
#else
|
||||
#define PG_PROT_KERNEL pgprot_kernel
|
||||
extern void __flush_dcache_page(struct address_space *, struct page *);
|
||||
#endif
|
||||
|
||||
struct nvmap_vma_list {
|
||||
@@ -276,44 +249,6 @@ struct nvmap_handle_ref {
|
||||
bool is_ro;
|
||||
};
|
||||
|
||||
#if defined(NVMAP_CONFIG_PAGE_POOLS)
|
||||
/*
|
||||
* This is the default ratio defining pool size. It can be thought of as pool
|
||||
* size in either MB per GB or KB per MB. That means the max this number can
|
||||
* be is 1024 (all physical memory - not a very good idea) or 0 (no page pool
|
||||
* at all).
|
||||
*/
|
||||
#define NVMAP_PP_POOL_SIZE (128)
|
||||
|
||||
#ifdef CONFIG_ARM64_4K_PAGES
|
||||
#define NVMAP_PP_BIG_PAGE_SIZE (0x10000)
|
||||
#endif /* CONFIG_ARM64_4K_PAGES */
|
||||
struct nvmap_page_pool {
|
||||
struct rt_mutex lock;
|
||||
u32 count; /* Number of pages in the page & dirty list. */
|
||||
u32 max; /* Max no. of pages in all lists. */
|
||||
u32 to_zero; /* Number of pages on the zero list */
|
||||
u32 under_zero; /* Number of pages getting zeroed */
|
||||
#ifdef CONFIG_ARM64_4K_PAGES
|
||||
u32 big_pg_sz; /* big page size supported(64k, etc.) */
|
||||
u32 big_page_count; /* Number of zeroed big pages avaialble */
|
||||
u32 pages_per_big_pg; /* Number of pages in big page */
|
||||
#endif /* CONFIG_ARM64_4K_PAGES */
|
||||
struct list_head page_list;
|
||||
struct list_head zero_list;
|
||||
#ifdef CONFIG_ARM64_4K_PAGES
|
||||
struct list_head page_list_bp;
|
||||
#endif /* CONFIG_ARM64_4K_PAGES */
|
||||
|
||||
#ifdef NVMAP_CONFIG_PAGE_POOL_DEBUG
|
||||
u64 allocs;
|
||||
u64 fills;
|
||||
u64 hits;
|
||||
u64 misses;
|
||||
#endif
|
||||
};
|
||||
#endif
|
||||
|
||||
#define NVMAP_IVM_INVALID_PEER (-1)
|
||||
|
||||
struct nvmap_client {
|
||||
@@ -346,7 +281,7 @@ struct nvmap_device {
|
||||
int nr_heaps;
|
||||
int nr_carveouts;
|
||||
#ifdef NVMAP_CONFIG_PAGE_POOLS
|
||||
struct nvmap_page_pool pool;
|
||||
struct nvmap_page_pool *pool;
|
||||
#endif
|
||||
struct list_head clients;
|
||||
struct rb_root pids;
|
||||
|
||||
Reference in New Issue
Block a user