mirror of
git://nv-tegra.nvidia.com/linux-nv-oot.git
synced 2025-12-22 09:11:26 +03:00
video: tegra: nvmap: Add header files for nvmap_alloc unit
As part of the nvmap_refactoring, add nvmap_alloc.h file which include declaration for functions which are exposed by nvmap_alloc unit to other units. Also, add nvmap_alloc_int.h file which include declaration for functions which are internal to nvmap_alloc unit that can be called by files within nvmap_alloc unit. JIRA TMM-5621 Change-Id: Ie30e5e8a4f87591eb9c49a0a349f837a22726fa5 Signed-off-by: Ketan Patil <ketanp@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/3198546 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
@@ -16,6 +16,8 @@
|
||||
|
||||
#include <linux/libnvdimm.h>
|
||||
#include "nvmap_priv.h"
|
||||
#include "nvmap_alloc.h"
|
||||
#include "nvmap_alloc_int.h"
|
||||
|
||||
bool nvmap_convert_carveout_to_iovmm;
|
||||
bool nvmap_convert_iovmm_to_carveout;
|
||||
@@ -49,6 +51,26 @@ void nvmap_altfree(void *ptr, size_t len)
|
||||
kfree(ptr);
|
||||
}
|
||||
|
||||
struct page *nvmap_to_page(struct page *page)
|
||||
{
|
||||
return (struct page *)((unsigned long)page & ~3UL);
|
||||
}
|
||||
|
||||
struct page **nvmap_pages(struct page **pg_pages, u32 nr_pages)
|
||||
{
|
||||
struct page **pages;
|
||||
int i;
|
||||
|
||||
pages = nvmap_altalloc(sizeof(*pages) * nr_pages);
|
||||
if (pages == NULL)
|
||||
return NULL;
|
||||
|
||||
for (i = 0; i < nr_pages; i++)
|
||||
pages[i] = nvmap_to_page(pg_pages[i]);
|
||||
|
||||
return pages;
|
||||
}
|
||||
|
||||
static struct page *nvmap_alloc_pages_exact(gfp_t gfp, size_t size, int numa_id)
|
||||
{
|
||||
struct page *page, *p, *e;
|
||||
|
||||
67
drivers/video/tegra/nvmap/nvmap_alloc.h
Normal file
67
drivers/video/tegra/nvmap/nvmap_alloc.h
Normal file
@@ -0,0 +1,67 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/* SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
||||
|
||||
#ifndef __NVMAP_ALLOC_H
|
||||
#define __NVMAP_ALLOC_H
|
||||
|
||||
void *nvmap_altalloc(size_t len);
|
||||
|
||||
void nvmap_altfree(void *ptr, size_t len);
|
||||
|
||||
int nvmap_alloc_handle(struct nvmap_client *client,
|
||||
struct nvmap_handle *h, unsigned int heap_mask,
|
||||
size_t align, u8 kind,
|
||||
unsigned int flags, unsigned int peer);
|
||||
|
||||
int nvmap_alloc_handle_from_va(struct nvmap_client *client,
|
||||
struct nvmap_handle *h,
|
||||
ulong addr,
|
||||
unsigned int flags,
|
||||
unsigned int heap_mask);
|
||||
|
||||
void _nvmap_handle_free(struct nvmap_handle *h);
|
||||
|
||||
int __nvmap_cache_maint(struct nvmap_client *client,
|
||||
struct nvmap_cache_op_64 *op);
|
||||
|
||||
int __nvmap_do_cache_maint(struct nvmap_client *client, struct nvmap_handle *h,
|
||||
unsigned long start, unsigned long end,
|
||||
unsigned int op, bool clean_only_dirty);
|
||||
|
||||
void inner_cache_maint(unsigned int op, void *vaddr, size_t size);
|
||||
|
||||
struct nvmap_heap_block *nvmap_carveout_alloc(struct nvmap_client *dev,
|
||||
struct nvmap_handle *handle,
|
||||
unsigned long type,
|
||||
phys_addr_t *start);
|
||||
|
||||
int nvmap_create_carveout(const struct nvmap_platform_carveout *co);
|
||||
|
||||
int nvmap_query_heap_peer(struct nvmap_heap *heap, unsigned int *peer);
|
||||
|
||||
size_t nvmap_query_heap_size(struct nvmap_heap *heap);
|
||||
|
||||
struct nvmap_heap *nvmap_block_to_heap(struct nvmap_heap_block *b);
|
||||
|
||||
void nvmap_heap_free(struct nvmap_heap_block *block);
|
||||
|
||||
void nvmap_heap_destroy(struct nvmap_heap *heap);
|
||||
|
||||
int __init nvmap_heap_init(void);
|
||||
|
||||
void nvmap_heap_deinit(void);
|
||||
|
||||
struct page **nvmap_pages(struct page **pg_pages, u32 nr_pages);
|
||||
|
||||
struct page *nvmap_to_page(struct page *page);
|
||||
|
||||
#ifdef NVMAP_CONFIG_PAGE_POOLS
|
||||
int nvmap_page_pool_clear(void);
|
||||
|
||||
int nvmap_page_pool_debugfs_init(struct dentry *nvmap_root);
|
||||
|
||||
int nvmap_page_pool_init(struct nvmap_device *dev);
|
||||
|
||||
int nvmap_page_pool_fini(struct nvmap_device *dev);
|
||||
#endif /* NVMAP_CONFIG_PAGE_POOLS */
|
||||
#endif /* __NVMAP_ALLOC_H */
|
||||
38
drivers/video/tegra/nvmap/nvmap_alloc_int.h
Normal file
38
drivers/video/tegra/nvmap/nvmap_alloc_int.h
Normal file
@@ -0,0 +1,38 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/* SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
||||
|
||||
#ifndef __NVMAP_ALLOC_INT_H
|
||||
#define __NVMAP_ALLOC_INT_H
|
||||
|
||||
int nvmap_cache_maint_phys_range(unsigned int op, phys_addr_t pstart,
|
||||
phys_addr_t pend, int inner, int outer);
|
||||
|
||||
void nvmap_clean_cache(struct page **pages, int numpages);
|
||||
|
||||
void nvmap_clean_cache_page(struct page *page);
|
||||
|
||||
void __dma_map_area(const void *cpu_va, size_t size, int dir);
|
||||
|
||||
void nvmap_heap_debugfs_init(struct dentry *heap_root, struct nvmap_heap *heap);
|
||||
|
||||
struct nvmap_heap_block *nvmap_heap_alloc(struct nvmap_heap *heap,
|
||||
struct nvmap_handle *handle,
|
||||
phys_addr_t *start);
|
||||
|
||||
struct nvmap_heap *nvmap_heap_create(struct device *parent,
|
||||
const struct nvmap_platform_carveout *co,
|
||||
phys_addr_t base, size_t len, void *arg);
|
||||
|
||||
#ifdef NVMAP_CONFIG_PAGE_POOLS
|
||||
int nvmap_page_pool_alloc_lots(struct nvmap_page_pool *pool,
|
||||
struct page **pages, u32 nr, bool use_numa, int numa_id);
|
||||
|
||||
#ifdef CONFIG_ARM64_4K_PAGES
|
||||
int nvmap_page_pool_alloc_lots_bp(struct nvmap_page_pool *pool,
|
||||
struct page **pages, u32 nr, bool use_numa, int numa_id);
|
||||
#endif /* CONFIG_ARM64_4K_PAGES */
|
||||
|
||||
u32 nvmap_page_pool_fill_lots(struct nvmap_page_pool *pool,
|
||||
struct page **pages, u32 nr);
|
||||
#endif /* NVMAP_CONFIG_PAGE_POOLS */
|
||||
#endif /* __NVMAP_ALLOC_INT_H */
|
||||
@@ -19,6 +19,8 @@ __weak struct arm64_ftr_reg arm64_ftr_reg_ctrel0;
|
||||
#include <trace/events/nvmap.h>
|
||||
|
||||
#include "nvmap_priv.h"
|
||||
#include "nvmap_alloc.h"
|
||||
#include "nvmap_alloc_int.h"
|
||||
|
||||
/*
|
||||
* FIXME:
|
||||
|
||||
@@ -10,6 +10,8 @@
|
||||
#include <soc/tegra/fuse-helper.h>
|
||||
|
||||
#include "nvmap_priv.h"
|
||||
#include "nvmap_alloc.h"
|
||||
#include "nvmap_alloc_int.h"
|
||||
|
||||
bool vpr_cpu_access;
|
||||
|
||||
|
||||
@@ -24,6 +24,7 @@
|
||||
#include <linux/libnvdimm.h>
|
||||
|
||||
#include "nvmap_priv.h"
|
||||
#include "nvmap_alloc.h"
|
||||
|
||||
static phys_addr_t handle_phys(struct nvmap_handle *h)
|
||||
{
|
||||
|
||||
@@ -43,6 +43,7 @@
|
||||
#include <trace/events/nvmap.h>
|
||||
|
||||
#include "nvmap_priv.h"
|
||||
#include "nvmap_alloc.h"
|
||||
#include "nvmap_heap.h"
|
||||
#include "nvmap_ioctl.h"
|
||||
#include <linux/pagewalk.h>
|
||||
|
||||
@@ -32,6 +32,7 @@
|
||||
|
||||
#include "nvmap_priv.h"
|
||||
#include "nvmap_ioctl.h"
|
||||
#include "nvmap_alloc.h"
|
||||
|
||||
#define NVMAP_DMABUF_ATTACH nvmap_dmabuf_attach
|
||||
|
||||
|
||||
@@ -9,6 +9,7 @@
|
||||
#include <linux/highmem.h>
|
||||
|
||||
#include "nvmap_priv.h"
|
||||
#include "nvmap_alloc.h"
|
||||
|
||||
static void nvmap_vma_close(struct vm_area_struct *vma);
|
||||
|
||||
|
||||
@@ -26,6 +26,8 @@
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include "nvmap_priv.h"
|
||||
#include "nvmap_alloc.h"
|
||||
#include "nvmap_alloc_int.h"
|
||||
#include "nvmap_heap.h"
|
||||
#include "include/linux/nvmap_exports.h"
|
||||
|
||||
|
||||
@@ -61,32 +61,4 @@ struct list_block {
|
||||
struct list_head free_list;
|
||||
};
|
||||
|
||||
struct nvmap_heap *nvmap_heap_create(struct device *parent,
|
||||
const struct nvmap_platform_carveout *co,
|
||||
phys_addr_t base, size_t len, void *arg);
|
||||
|
||||
void nvmap_heap_destroy(struct nvmap_heap *heap);
|
||||
|
||||
struct nvmap_heap_block *nvmap_heap_alloc(struct nvmap_heap *heap,
|
||||
struct nvmap_handle *handle,
|
||||
phys_addr_t *start);
|
||||
|
||||
struct nvmap_heap *nvmap_block_to_heap(struct nvmap_heap_block *b);
|
||||
|
||||
void nvmap_heap_free(struct nvmap_heap_block *block);
|
||||
|
||||
int __init nvmap_heap_init(void);
|
||||
|
||||
void nvmap_heap_deinit(void);
|
||||
|
||||
#ifndef NVMAP_CONFIG_CACHE_FLUSH_AT_ALLOC
|
||||
int nvmap_flush_heap_block(struct nvmap_client *client,
|
||||
struct nvmap_heap_block *block, size_t len, unsigned int prot);
|
||||
#endif /* !NVMAP_CONFIG_CACHE_FLUSH_AT_ALLOC */
|
||||
|
||||
void nvmap_heap_debugfs_init(struct dentry *heap_root, struct nvmap_heap *heap);
|
||||
|
||||
int nvmap_query_heap_peer(struct nvmap_heap *heap, unsigned int *peer);
|
||||
size_t nvmap_query_heap_size(struct nvmap_heap *heap);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -23,6 +23,7 @@
|
||||
#include "include/linux/nvmap_exports.h"
|
||||
|
||||
#include "nvmap_priv.h"
|
||||
#include "nvmap_alloc.h"
|
||||
|
||||
#ifdef CONFIG_TEGRA_VIRTUALIZATION
|
||||
#include <soc/tegra/virt/hv-ivc.h>
|
||||
|
||||
@@ -35,6 +35,7 @@
|
||||
|
||||
#include "nvmap_ioctl.h"
|
||||
#include "nvmap_priv.h"
|
||||
#include "nvmap_alloc.h"
|
||||
#include "nvmap_heap.h"
|
||||
|
||||
#include <linux/syscalls.h>
|
||||
|
||||
@@ -24,6 +24,8 @@
|
||||
#include <trace/events/nvmap.h>
|
||||
|
||||
#include "nvmap_priv.h"
|
||||
#include "nvmap_alloc.h"
|
||||
#include "nvmap_alloc_int.h"
|
||||
|
||||
#define NVMAP_TEST_PAGE_POOL_SHRINKER 1
|
||||
#define PENDING_PAGES_SIZE (SZ_1M / PAGE_SIZE)
|
||||
@@ -48,8 +50,88 @@ static inline void __pp_dbg_var_add(u64 *dbg_var, u32 nr)
|
||||
#define pp_hit_add(pool, nr) __pp_dbg_var_add(&(pool)->hits, nr)
|
||||
#define pp_miss_add(pool, nr) __pp_dbg_var_add(&(pool)->misses, nr)
|
||||
|
||||
#ifdef NVMAP_CONFIG_PAGE_POOL_DEBUG
|
||||
static void nvmap_pgcount(struct page *page, bool incr)
|
||||
{
|
||||
page_ref_add(page, incr ? 1 : -1);
|
||||
}
|
||||
#endif /* NVMAP_CONFIG_PAGE_POOL_DEBUG */
|
||||
|
||||
#ifdef CONFIG_ARM64_4K_PAGES
|
||||
static bool nvmap_is_big_page(struct nvmap_page_pool *pool,
|
||||
struct page **pages, int idx, int nr)
|
||||
{
|
||||
int i;
|
||||
struct page *page = pages[idx];
|
||||
|
||||
if (pool->pages_per_big_pg <= 1)
|
||||
return false;
|
||||
|
||||
if (nr - idx < pool->pages_per_big_pg)
|
||||
return false;
|
||||
|
||||
/* Allow coalescing pages at big page boundary only */
|
||||
if (page_to_phys(page) & (pool->big_pg_sz - 1))
|
||||
return false;
|
||||
|
||||
for (i = 1; i < pool->pages_per_big_pg; i++)
|
||||
if (pages[idx + i] != nth_page(page, i))
|
||||
break;
|
||||
|
||||
return i == pool->pages_per_big_pg ? true : false;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Fill a bunch of pages into the page pool. This will fill as many as it can
|
||||
* and return the number of pages filled. Pages are used from the start of the
|
||||
* passed page pointer array in a linear fashion.
|
||||
*
|
||||
* You must lock the page pool before using this.
|
||||
*/
|
||||
static int __nvmap_page_pool_fill_lots_locked(struct nvmap_page_pool *pool,
|
||||
struct page **pages, u32 nr);
|
||||
struct page **pages, u32 nr)
|
||||
{
|
||||
int real_nr;
|
||||
int pages_to_fill;
|
||||
int ind = 0;
|
||||
|
||||
if (!enable_pp)
|
||||
return 0;
|
||||
|
||||
BUG_ON(pool->count > pool->max);
|
||||
real_nr = min_t(u32, pool->max - pool->count, nr);
|
||||
pages_to_fill = real_nr;
|
||||
if (real_nr == 0)
|
||||
return 0;
|
||||
|
||||
while (real_nr > 0) {
|
||||
#ifdef NVMAP_CONFIG_PAGE_POOL_DEBUG
|
||||
nvmap_pgcount(pages[ind], true);
|
||||
BUG_ON(page_count(pages[ind]) != 2);
|
||||
#endif /* NVMAP_CONFIG_PAGE_POOL_DEBUG */
|
||||
|
||||
#ifdef CONFIG_ARM64_4K_PAGES
|
||||
if (nvmap_is_big_page(pool, pages, ind, pages_to_fill)) {
|
||||
list_add_tail(&pages[ind]->lru, &pool->page_list_bp);
|
||||
ind += pool->pages_per_big_pg;
|
||||
real_nr -= pool->pages_per_big_pg;
|
||||
pool->big_page_count += pool->pages_per_big_pg;
|
||||
} else {
|
||||
#endif /* CONFIG_ARM64_4K_PAGES */
|
||||
list_add_tail(&pages[ind++]->lru, &pool->page_list);
|
||||
real_nr--;
|
||||
#ifdef CONFIG_ARM64_4K_PAGES
|
||||
}
|
||||
#endif /* CONFIG_ARM64_4K_PAGES */
|
||||
}
|
||||
|
||||
pool->count += ind;
|
||||
BUG_ON(pool->count > pool->max);
|
||||
pp_fill_add(pool, ind);
|
||||
|
||||
return ind;
|
||||
}
|
||||
|
||||
static inline struct page *get_zero_list_page(struct nvmap_page_pool *pool, bool use_numa,
|
||||
int numa_id)
|
||||
@@ -215,13 +297,6 @@ static int nvmap_background_zero_thread(void *arg)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef NVMAP_CONFIG_PAGE_POOL_DEBUG
|
||||
static void nvmap_pgcount(struct page *page, bool incr)
|
||||
{
|
||||
page_ref_add(page, incr ? 1 : -1);
|
||||
}
|
||||
#endif /* NVMAP_CONFIG_PAGE_POOL_DEBUG */
|
||||
|
||||
/*
|
||||
* Free the passed number of pages from the page pool. This happens regardless
|
||||
* of whether the page pools are enabled. This lets one disable the page pools
|
||||
@@ -377,82 +452,8 @@ int nvmap_page_pool_alloc_lots_bp(struct nvmap_page_pool *pool,
|
||||
trace_nvmap_pp_alloc_lots_bp(ind, nr);
|
||||
return ind;
|
||||
}
|
||||
|
||||
static bool nvmap_is_big_page(struct nvmap_page_pool *pool,
|
||||
struct page **pages, int idx, int nr)
|
||||
{
|
||||
int i;
|
||||
struct page *page = pages[idx];
|
||||
|
||||
if (pool->pages_per_big_pg <= 1)
|
||||
return false;
|
||||
|
||||
if (nr - idx < pool->pages_per_big_pg)
|
||||
return false;
|
||||
|
||||
/* Allow coalescing pages at big page boundary only */
|
||||
if (page_to_phys(page) & (pool->big_pg_sz - 1))
|
||||
return false;
|
||||
|
||||
for (i = 1; i < pool->pages_per_big_pg; i++)
|
||||
if (pages[idx + i] != nth_page(page, i))
|
||||
break;
|
||||
|
||||
return i == pool->pages_per_big_pg ? true: false;
|
||||
}
|
||||
#endif /* CONFIG_ARM64_4K_PAGES */
|
||||
|
||||
/*
|
||||
* Fill a bunch of pages into the page pool. This will fill as many as it can
|
||||
* and return the number of pages filled. Pages are used from the start of the
|
||||
* passed page pointer array in a linear fashion.
|
||||
*
|
||||
* You must lock the page pool before using this.
|
||||
*/
|
||||
static int __nvmap_page_pool_fill_lots_locked(struct nvmap_page_pool *pool,
|
||||
struct page **pages, u32 nr)
|
||||
{
|
||||
int real_nr;
|
||||
int pages_to_fill;
|
||||
int ind = 0;
|
||||
|
||||
if (!enable_pp)
|
||||
return 0;
|
||||
|
||||
BUG_ON(pool->count > pool->max);
|
||||
real_nr = min_t(u32, pool->max - pool->count, nr);
|
||||
pages_to_fill = real_nr;
|
||||
if (real_nr == 0)
|
||||
return 0;
|
||||
|
||||
while (real_nr > 0) {
|
||||
#ifdef NVMAP_CONFIG_PAGE_POOL_DEBUG
|
||||
nvmap_pgcount(pages[ind], true);
|
||||
BUG_ON(page_count(pages[ind]) != 2);
|
||||
#endif /* NVMAP_CONFIG_PAGE_POOL_DEBUG */
|
||||
|
||||
#ifdef CONFIG_ARM64_4K_PAGES
|
||||
if (nvmap_is_big_page(pool, pages, ind, pages_to_fill)) {
|
||||
list_add_tail(&pages[ind]->lru, &pool->page_list_bp);
|
||||
ind += pool->pages_per_big_pg;
|
||||
real_nr -= pool->pages_per_big_pg;
|
||||
pool->big_page_count += pool->pages_per_big_pg;
|
||||
} else {
|
||||
#endif /* CONFIG_ARM64_4K_PAGES */
|
||||
list_add_tail(&pages[ind++]->lru, &pool->page_list);
|
||||
real_nr--;
|
||||
#ifdef CONFIG_ARM64_4K_PAGES
|
||||
}
|
||||
#endif /* CONFIG_ARM64_4K_PAGES */
|
||||
}
|
||||
|
||||
pool->count += ind;
|
||||
BUG_ON(pool->count > pool->max);
|
||||
pp_fill_add(pool, ind);
|
||||
|
||||
return ind;
|
||||
}
|
||||
|
||||
u32 nvmap_page_pool_fill_lots(struct nvmap_page_pool *pool,
|
||||
struct page **pages, u32 nr)
|
||||
{
|
||||
@@ -492,7 +493,7 @@ u32 nvmap_page_pool_fill_lots(struct nvmap_page_pool *pool,
|
||||
return ret;
|
||||
}
|
||||
|
||||
ulong nvmap_page_pool_get_unused_pages(void)
|
||||
static ulong nvmap_page_pool_get_unused_pages(void)
|
||||
{
|
||||
unsigned long total = 0;
|
||||
|
||||
|
||||
@@ -121,7 +121,6 @@ do { \
|
||||
struct page;
|
||||
struct nvmap_device;
|
||||
|
||||
void _nvmap_handle_free(struct nvmap_handle *h);
|
||||
/* holds max number of handles allocted per process at any time */
|
||||
extern u32 nvmap_max_handle_count;
|
||||
extern u64 nvmap_big_page_allocs;
|
||||
@@ -134,7 +133,6 @@ extern struct vm_operations_struct nvmap_vma_ops;
|
||||
|
||||
#ifdef CONFIG_ARM64
|
||||
#define PG_PROT_KERNEL PAGE_KERNEL
|
||||
#define FLUSH_DCACHE_AREA __flush_dcache_area
|
||||
#define outer_flush_range(s, e)
|
||||
#define outer_inv_range(s, e)
|
||||
#define outer_clean_range(s, e)
|
||||
@@ -144,7 +142,6 @@ extern void __clean_dcache_page(struct page *);
|
||||
extern void __clean_dcache_area_poc(void *addr, size_t len);
|
||||
#else
|
||||
#define PG_PROT_KERNEL pgprot_kernel
|
||||
#define FLUSH_DCACHE_AREA __cpuc_flush_dcache_area
|
||||
extern void __flush_dcache_page(struct address_space *, struct page *);
|
||||
#endif
|
||||
|
||||
@@ -316,20 +313,6 @@ struct nvmap_page_pool {
|
||||
u64 misses;
|
||||
#endif
|
||||
};
|
||||
|
||||
int nvmap_page_pool_init(struct nvmap_device *dev);
|
||||
int nvmap_page_pool_fini(struct nvmap_device *dev);
|
||||
struct page *nvmap_page_pool_alloc(struct nvmap_page_pool *pool);
|
||||
int nvmap_page_pool_alloc_lots(struct nvmap_page_pool *pool,
|
||||
struct page **pages, u32 nr, bool use_numa, int numa_id);
|
||||
#ifdef CONFIG_ARM64_4K_PAGES
|
||||
int nvmap_page_pool_alloc_lots_bp(struct nvmap_page_pool *pool,
|
||||
struct page **pages, u32 nr, bool use_numa, int numa_id);
|
||||
#endif /* CONFIG_ARM64_4K_PAGES */
|
||||
u32 nvmap_page_pool_fill_lots(struct nvmap_page_pool *pool,
|
||||
struct page **pages, u32 nr);
|
||||
int nvmap_page_pool_clear(void);
|
||||
int nvmap_page_pool_debugfs_init(struct dentry *nvmap_root);
|
||||
#endif
|
||||
|
||||
#define NVMAP_IVM_INVALID_PEER (-1)
|
||||
@@ -457,15 +440,8 @@ int nvmap_probe(struct platform_device *pdev);
|
||||
int nvmap_remove(struct platform_device *pdev);
|
||||
int nvmap_init(struct platform_device *pdev);
|
||||
|
||||
int nvmap_create_carveout(const struct nvmap_platform_carveout *co);
|
||||
|
||||
int nvmap_co_setup(struct reserved_mem *rmem);
|
||||
|
||||
struct nvmap_heap_block *nvmap_carveout_alloc(struct nvmap_client *dev,
|
||||
struct nvmap_handle *handle,
|
||||
unsigned long type,
|
||||
phys_addr_t *start);
|
||||
|
||||
struct nvmap_carveout_node;
|
||||
|
||||
struct nvmap_handle *nvmap_handle_get(struct nvmap_handle *h);
|
||||
@@ -505,20 +481,8 @@ struct nvmap_handle_ref *nvmap_create_handle_from_id(
|
||||
struct nvmap_handle_ref *nvmap_create_handle_from_fd(
|
||||
struct nvmap_client *client, int fd);
|
||||
|
||||
void inner_cache_maint(unsigned int op, void *vaddr, size_t size);
|
||||
void outer_cache_maint(unsigned int op, phys_addr_t paddr, size_t size);
|
||||
|
||||
int nvmap_alloc_handle(struct nvmap_client *client,
|
||||
struct nvmap_handle *h, unsigned int heap_mask,
|
||||
size_t align, u8 kind,
|
||||
unsigned int flags, unsigned int peer);
|
||||
|
||||
int nvmap_alloc_handle_from_va(struct nvmap_client *client,
|
||||
struct nvmap_handle *h,
|
||||
ulong addr,
|
||||
unsigned int flags,
|
||||
unsigned int heap_mask);
|
||||
|
||||
void nvmap_free_handle(struct nvmap_client *c, struct nvmap_handle *h, bool is_ro);
|
||||
|
||||
void nvmap_free_handle_from_fd(struct nvmap_client *c, int fd);
|
||||
@@ -545,14 +509,7 @@ struct nvmap_handle *nvmap_handle_get_from_fd(int fd);
|
||||
extern void v7_flush_kern_cache_all(void);
|
||||
extern void v7_clean_kern_cache_all(void *);
|
||||
|
||||
void nvmap_clean_cache(struct page **pages, int numpages);
|
||||
void nvmap_clean_cache_page(struct page *page);
|
||||
void nvmap_flush_cache(struct page **pages, int numpages);
|
||||
int nvmap_cache_maint_phys_range(unsigned int op, phys_addr_t pstart,
|
||||
phys_addr_t pend, int inner, int outer);
|
||||
|
||||
int __nvmap_cache_maint(struct nvmap_client *client,
|
||||
struct nvmap_cache_op_64 *op);
|
||||
|
||||
/* Internal API to support dmabuf */
|
||||
struct dma_buf *__nvmap_make_dmabuf(struct nvmap_client *client,
|
||||
@@ -564,9 +521,6 @@ void __nvmap_free_sg_table(struct nvmap_client *client,
|
||||
void *__nvmap_mmap(struct nvmap_handle *h);
|
||||
void __nvmap_munmap(struct nvmap_handle *h, void *addr);
|
||||
int __nvmap_map(struct nvmap_handle *h, struct vm_area_struct *vma);
|
||||
int __nvmap_do_cache_maint(struct nvmap_client *client, struct nvmap_handle *h,
|
||||
unsigned long start, unsigned long end,
|
||||
unsigned int op, bool clean_only_dirty);
|
||||
struct nvmap_client *__nvmap_create_client(struct nvmap_device *dev,
|
||||
const char *name);
|
||||
int __nvmap_dmabuf_fd(struct nvmap_client *client,
|
||||
@@ -575,14 +529,6 @@ int __nvmap_dmabuf_fd(struct nvmap_client *client,
|
||||
int nvmap_dmabuf_stash_init(void);
|
||||
void nvmap_dmabuf_stash_deinit(void);
|
||||
|
||||
void *nvmap_altalloc(size_t len);
|
||||
void nvmap_altfree(void *ptr, size_t len);
|
||||
|
||||
static inline struct page *nvmap_to_page(struct page *page)
|
||||
{
|
||||
return (struct page *)((unsigned long)page & ~3UL);
|
||||
}
|
||||
|
||||
static inline bool nvmap_page_dirty(struct page *page)
|
||||
{
|
||||
return (unsigned long)page & 1UL;
|
||||
@@ -660,21 +606,6 @@ static inline void _nvmap_handle_mkdirty(struct nvmap_handle *h,
|
||||
atomic_add(nchanged, &h->pgalloc.ndirty);
|
||||
}
|
||||
|
||||
static inline struct page **nvmap_pages(struct page **pg_pages, u32 nr_pages)
|
||||
{
|
||||
struct page **pages;
|
||||
int i;
|
||||
|
||||
pages = nvmap_altalloc(sizeof(*pages) * nr_pages);
|
||||
if (pages == NULL)
|
||||
return NULL;
|
||||
|
||||
for (i = 0; i < nr_pages; i++)
|
||||
pages[i] = nvmap_to_page(pg_pages[i]);
|
||||
|
||||
return pages;
|
||||
}
|
||||
|
||||
void nvmap_zap_handle(struct nvmap_handle *handle, u64 offset, u64 size);
|
||||
|
||||
void nvmap_vma_open(struct vm_area_struct *vma);
|
||||
@@ -885,9 +816,6 @@ void nvmap_dma_mark_declared_memory_unoccupied(struct device *dev,
|
||||
dma_addr_t device_addr, size_t size);
|
||||
#endif /* CONFIG_TEGRA_VIRTUALIZATION */
|
||||
|
||||
extern void __dma_flush_area(const void *cpu_va, size_t size);
|
||||
extern void __dma_map_area(const void *cpu_va, size_t size, int dir);
|
||||
|
||||
int nvmap_assign_pages_to_handle(struct nvmap_client *client,
|
||||
struct nvmap_handle **hs, struct nvmap_handle *h,
|
||||
struct handles_range *rng);
|
||||
|
||||
@@ -43,15 +43,6 @@
|
||||
#define NVMAP_HANDLE_CACHE_SYNC_AT_RESERVE (0x1ul << 8)
|
||||
#define NVMAP_HANDLE_RO (0x1ul << 9)
|
||||
|
||||
#ifdef NVMAP_CONFIG_PAGE_POOLS
|
||||
ulong nvmap_page_pool_get_unused_pages(void);
|
||||
#else
|
||||
static inline ulong nvmap_page_pool_get_unused_pages(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0)
|
||||
ulong nvmap_iovmm_get_used_pages(void);
|
||||
#endif
|
||||
|
||||
Reference in New Issue
Block a user