mirror of
git://nv-tegra.nvidia.com/linux-nv-oot.git
synced 2025-12-22 17:25:35 +03:00
video: tegra: nvmap: Add CBC carveout support
- Add CBC carveout suport in nvmap. - Chunk size for CBC is 2MB, hence each bit from bitmap for CBC carveout indicate 2MB physically contiguous chunk. - In case of allocation from CBC, first try to allocate the entire chunk in physically contiguous manner, if it's not possible then allocate in chunks of 2MB. All page pointers to these chunks will be stored in nvmap_handle struct. - Modify all other operations like vmap, kmap, mmap as per the above restrictions. Bug 3956637 Change-Id: I7c304b0127c8fef028e135a4662ab3ad3dc1d1f6 Signed-off-by: Ketan Patil <ketanp@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvidia/+/2880662 Reviewed-by: Krishna Reddy <vdumpa@nvidia.com> Reviewed-by: Sachin Nikam <snikam@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/2885806 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com> Reviewed-by: Laxman Dewangan <ldewangan@nvidia.com> GVS: Gerrit_Virtual_Submit <buildbot_gerritrpt@nvidia.com> Tested-by: Laxman Dewangan <ldewangan@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
7e15a9bb58
commit
6937db210f
@@ -3,7 +3,7 @@
|
|||||||
*
|
*
|
||||||
* Handle allocation and freeing routines for nvmap
|
* Handle allocation and freeing routines for nvmap
|
||||||
*
|
*
|
||||||
* Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2011-2023, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* This program is free software; you can redistribute it and/or modify it
|
* This program is free software; you can redistribute it and/or modify it
|
||||||
* under the terms and conditions of the GNU General Public License,
|
* under the terms and conditions of the GNU General Public License,
|
||||||
@@ -718,16 +718,42 @@ static void alloc_handle(struct nvmap_client *client,
|
|||||||
if (nvmap_cpu_map_is_allowed(h)) {
|
if (nvmap_cpu_map_is_allowed(h)) {
|
||||||
void *cpu_addr;
|
void *cpu_addr;
|
||||||
|
|
||||||
cpu_addr = memremap(b->base, h->size,
|
if (h->pgalloc.pages &&
|
||||||
MEMREMAP_WB);
|
h->heap_type == NVMAP_HEAP_CARVEOUT_CBC) {
|
||||||
if (cpu_addr != NULL) {
|
unsigned long page_count;
|
||||||
memset(cpu_addr, 0, h->size);
|
int i;
|
||||||
|
|
||||||
|
page_count = h->size >> PAGE_SHIFT;
|
||||||
|
/* Iterate over 2MB chunks */
|
||||||
|
for (i = 0; i < page_count; i += PAGES_PER_2MB) {
|
||||||
|
cpu_addr = memremap(page_to_phys(
|
||||||
|
h->pgalloc.pages[i]),
|
||||||
|
SIZE_2MB, MEMREMAP_WB);
|
||||||
|
if (cpu_addr != NULL) {
|
||||||
|
memset(cpu_addr, 0, SIZE_2MB);
|
||||||
#ifdef NVMAP_UPSTREAM_KERNEL
|
#ifdef NVMAP_UPSTREAM_KERNEL
|
||||||
arch_invalidate_pmem(cpu_addr, h->size);
|
arch_invalidate_pmem(cpu_addr,
|
||||||
|
SIZE_2MB);
|
||||||
#else
|
#else
|
||||||
__dma_flush_area(cpu_addr, h->size);
|
__dma_flush_area(cpu_addr,
|
||||||
|
SIZE_2MB);
|
||||||
#endif
|
#endif
|
||||||
memunmap(cpu_addr);
|
memunmap(cpu_addr);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
cpu_addr = memremap(b->base, h->size,
|
||||||
|
MEMREMAP_WB);
|
||||||
|
if (cpu_addr != NULL) {
|
||||||
|
memset(cpu_addr, 0, h->size);
|
||||||
|
#ifdef NVMAP_UPSTREAM_KERNEL
|
||||||
|
arch_invalidate_pmem(cpu_addr, h->size);
|
||||||
|
#else
|
||||||
|
__dma_flush_area(cpu_addr, h->size);
|
||||||
|
#endif
|
||||||
|
memunmap(cpu_addr);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1006,14 +1032,19 @@ void _nvmap_handle_free(struct nvmap_handle *h)
|
|||||||
if (h->vaddr) {
|
if (h->vaddr) {
|
||||||
void *addr = h->vaddr;
|
void *addr = h->vaddr;
|
||||||
|
|
||||||
addr -= (h->carveout->base & ~PAGE_MASK);
|
if (h->pgalloc.pages) {
|
||||||
iounmap((void __iomem *)addr);
|
vunmap(h->vaddr);
|
||||||
|
} else {
|
||||||
|
addr -= (h->carveout->base & ~PAGE_MASK);
|
||||||
|
iounmap((void __iomem *)addr);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
nvmap_heap_free(h->carveout);
|
nvmap_heap_free(h->carveout);
|
||||||
nvmap_kmaps_dec(h);
|
nvmap_kmaps_dec(h);
|
||||||
h->carveout = NULL;
|
h->carveout = NULL;
|
||||||
h->vaddr = NULL;
|
h->vaddr = NULL;
|
||||||
|
h->pgalloc.pages = NULL;
|
||||||
goto out;
|
goto out;
|
||||||
} else {
|
} else {
|
||||||
int ret = nvmap_heap_pgfree(h);
|
int ret = nvmap_heap_pgfree(h);
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
/*
|
/*
|
||||||
* drivers/video/tegra/nvmap/nvmap_cache.c
|
* drivers/video/tegra/nvmap/nvmap_cache.c
|
||||||
*
|
*
|
||||||
* Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2011-2023, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* This program is free software; you can redistribute it and/or modify it
|
* This program is free software; you can redistribute it and/or modify it
|
||||||
* under the terms and conditions of the GNU General Public License,
|
* under the terms and conditions of the GNU General Public License,
|
||||||
@@ -197,7 +197,7 @@ static int do_cache_maint(struct cache_maint_op *cache_work)
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (h->heap_pgalloc) {
|
if (h->pgalloc.pages) {
|
||||||
heap_page_cache_maint(h, pstart, pend, op, true,
|
heap_page_cache_maint(h, pstart, pend, op, true,
|
||||||
(h->flags == NVMAP_HANDLE_INNER_CACHEABLE) ?
|
(h->flags == NVMAP_HANDLE_INNER_CACHEABLE) ?
|
||||||
false : true, cache_work->clean_only_dirty);
|
false : true, cache_work->clean_only_dirty);
|
||||||
|
|||||||
@@ -3,7 +3,7 @@
|
|||||||
*
|
*
|
||||||
* Memory manager for Tegra GPU
|
* Memory manager for Tegra GPU
|
||||||
*
|
*
|
||||||
* Copyright (c) 2009-2022, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2009-2023, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* This program is free software; you can redistribute it and/or modify it
|
* This program is free software; you can redistribute it and/or modify it
|
||||||
* under the terms and conditions of the GNU General Public License,
|
* under the terms and conditions of the GNU General Public License,
|
||||||
@@ -170,7 +170,7 @@ void *__nvmap_mmap(struct nvmap_handle *h)
|
|||||||
nvmap_kmaps_inc(h);
|
nvmap_kmaps_inc(h);
|
||||||
prot = nvmap_pgprot(h, PG_PROT_KERNEL);
|
prot = nvmap_pgprot(h, PG_PROT_KERNEL);
|
||||||
|
|
||||||
if (h->heap_pgalloc) {
|
if (h->pgalloc.pages) {
|
||||||
pages = nvmap_pages(h->pgalloc.pages, h->size >> PAGE_SHIFT);
|
pages = nvmap_pages(h->pgalloc.pages, h->size >> PAGE_SHIFT);
|
||||||
if (!pages)
|
if (!pages)
|
||||||
goto out;
|
goto out;
|
||||||
@@ -327,7 +327,7 @@ struct sg_table *__nvmap_sg_table(struct nvmap_client *client,
|
|||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!h->heap_pgalloc) {
|
if (!h->pgalloc.pages) {
|
||||||
phys_addr_t paddr = handle_phys(h);
|
phys_addr_t paddr = handle_phys(h);
|
||||||
struct page *page = phys_to_page(paddr);
|
struct page *page = phys_to_page(paddr);
|
||||||
|
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
/*
|
/*
|
||||||
* drivers/video/tegra/nvmap/nvmap_fault.c
|
* drivers/video/tegra/nvmap/nvmap_fault.c
|
||||||
*
|
*
|
||||||
* Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2011-2023, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* This program is free software; you can redistribute it and/or modify it
|
* This program is free software; you can redistribute it and/or modify it
|
||||||
* under the terms and conditions of the GNU General Public License,
|
* under the terms and conditions of the GNU General Public License,
|
||||||
@@ -207,8 +207,9 @@ static int nvmap_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
|||||||
if (offs >= priv->handle->size)
|
if (offs >= priv->handle->size)
|
||||||
return VM_FAULT_SIGBUS;
|
return VM_FAULT_SIGBUS;
|
||||||
|
|
||||||
if (!priv->handle->heap_pgalloc) {
|
if (!priv->handle->pgalloc.pages) {
|
||||||
unsigned long pfn;
|
unsigned long pfn;
|
||||||
|
|
||||||
BUG_ON(priv->handle->carveout->base & ~PAGE_MASK);
|
BUG_ON(priv->handle->carveout->base & ~PAGE_MASK);
|
||||||
pfn = ((priv->handle->carveout->base + offs) >> PAGE_SHIFT);
|
pfn = ((priv->handle->carveout->base + offs) >> PAGE_SHIFT);
|
||||||
if (!pfn_valid(pfn)) {
|
if (!pfn_valid(pfn)) {
|
||||||
@@ -220,38 +221,51 @@ static int nvmap_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
|||||||
page = pfn_to_page(pfn);
|
page = pfn_to_page(pfn);
|
||||||
} else {
|
} else {
|
||||||
void *kaddr;
|
void *kaddr;
|
||||||
|
unsigned long pfn;
|
||||||
|
|
||||||
offs >>= PAGE_SHIFT;
|
if (priv->handle->heap_type != NVMAP_HEAP_IOVMM) {
|
||||||
if (atomic_read(&priv->handle->pgalloc.reserved))
|
offs >>= PAGE_SHIFT;
|
||||||
return VM_FAULT_SIGBUS;
|
page = priv->handle->pgalloc.pages[offs];
|
||||||
page = nvmap_to_page(priv->handle->pgalloc.pages[offs]);
|
pfn = page_to_pfn(page);
|
||||||
|
if (!pfn_valid(pfn)) {
|
||||||
|
vm_insert_pfn(vma,
|
||||||
|
(unsigned long)vmf_address, pfn);
|
||||||
|
return VM_FAULT_NOPAGE;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
offs >>= PAGE_SHIFT;
|
||||||
|
if (atomic_read(&priv->handle->pgalloc.reserved))
|
||||||
|
return VM_FAULT_SIGBUS;
|
||||||
|
page = nvmap_to_page(priv->handle->pgalloc.pages[offs]);
|
||||||
|
|
||||||
if (PageAnon(page) && (vma->vm_flags & VM_SHARED))
|
if (PageAnon(page)) {
|
||||||
return VM_FAULT_SIGSEGV;
|
if (vma->vm_flags & VM_SHARED)
|
||||||
|
return VM_FAULT_SIGSEGV;
|
||||||
|
}
|
||||||
|
|
||||||
if (!nvmap_handle_track_dirty(priv->handle))
|
if (!nvmap_handle_track_dirty(priv->handle))
|
||||||
goto finish;
|
goto finish;
|
||||||
mutex_lock(&priv->handle->lock);
|
mutex_lock(&priv->handle->lock);
|
||||||
if (nvmap_page_dirty(priv->handle->pgalloc.pages[offs])) {
|
if (nvmap_page_dirty(priv->handle->pgalloc.pages[offs])) {
|
||||||
mutex_unlock(&priv->handle->lock);
|
mutex_unlock(&priv->handle->lock);
|
||||||
goto finish;
|
goto finish;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* inner cache maint */
|
/* inner cache maint */
|
||||||
kaddr = kmap(page);
|
kaddr = kmap(page);
|
||||||
BUG_ON(!kaddr);
|
BUG_ON(!kaddr);
|
||||||
inner_cache_maint(NVMAP_CACHE_OP_WB_INV, kaddr, PAGE_SIZE);
|
inner_cache_maint(NVMAP_CACHE_OP_WB_INV, kaddr, PAGE_SIZE);
|
||||||
kunmap(page);
|
kunmap(page);
|
||||||
|
|
||||||
if (priv->handle->flags & NVMAP_HANDLE_INNER_CACHEABLE)
|
if (priv->handle->flags & NVMAP_HANDLE_INNER_CACHEABLE)
|
||||||
goto make_dirty;
|
goto make_dirty;
|
||||||
|
|
||||||
make_dirty:
|
make_dirty:
|
||||||
nvmap_page_mkdirty(&priv->handle->pgalloc.pages[offs]);
|
nvmap_page_mkdirty(&priv->handle->pgalloc.pages[offs]);
|
||||||
atomic_inc(&priv->handle->pgalloc.ndirty);
|
atomic_inc(&priv->handle->pgalloc.ndirty);
|
||||||
mutex_unlock(&priv->handle->lock);
|
mutex_unlock(&priv->handle->lock);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
finish:
|
finish:
|
||||||
if (page)
|
if (page)
|
||||||
get_page(page);
|
get_page(page);
|
||||||
|
|||||||
@@ -128,7 +128,7 @@ void nvmap_heap_debugfs_init(struct dentry *heap_root, struct nvmap_heap *heap)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static phys_addr_t nvmap_alloc_mem(struct nvmap_heap *h, size_t len,
|
static phys_addr_t nvmap_alloc_mem(struct nvmap_heap *h, size_t len,
|
||||||
phys_addr_t *start)
|
phys_addr_t *start, struct nvmap_handle *handle)
|
||||||
{
|
{
|
||||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)
|
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)
|
||||||
phys_addr_t pa = DMA_ERROR_CODE;
|
phys_addr_t pa = DMA_ERROR_CODE;
|
||||||
@@ -136,6 +136,9 @@ static phys_addr_t nvmap_alloc_mem(struct nvmap_heap *h, size_t len,
|
|||||||
phys_addr_t pa = DMA_MAPPING_ERROR;
|
phys_addr_t pa = DMA_MAPPING_ERROR;
|
||||||
#endif
|
#endif
|
||||||
struct device *dev = h->dma_dev;
|
struct device *dev = h->dma_dev;
|
||||||
|
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
|
||||||
|
void *err = NULL;
|
||||||
|
#endif
|
||||||
|
|
||||||
if (len > UINT_MAX) {
|
if (len > UINT_MAX) {
|
||||||
dev_err(dev, "%s: %d alloc size is out of range\n",
|
dev_err(dev, "%s: %d alloc size is out of range\n",
|
||||||
@@ -165,8 +168,26 @@ static phys_addr_t nvmap_alloc_mem(struct nvmap_heap *h, size_t len,
|
|||||||
(void)dma_alloc_attrs(dev, len, &pa,
|
(void)dma_alloc_attrs(dev, len, &pa,
|
||||||
GFP_KERNEL, DMA_ATTR_ALLOC_EXACT_SIZE);
|
GFP_KERNEL, DMA_ATTR_ALLOC_EXACT_SIZE);
|
||||||
#else
|
#else
|
||||||
(void)nvmap_dma_alloc_attrs(dev, len, &pa,
|
err = nvmap_dma_alloc_attrs(dev, len, &pa,
|
||||||
GFP_KERNEL, DMA_ATTR_ALLOC_EXACT_SIZE);
|
GFP_KERNEL, DMA_ATTR_ALLOC_EXACT_SIZE);
|
||||||
|
/*
|
||||||
|
* In case of CBC carveout, try to allocate the entire chunk in physically
|
||||||
|
* contiguous manner. If it returns error, then try to allocate the memory in
|
||||||
|
* 2MB chunks.
|
||||||
|
*/
|
||||||
|
if (h->is_cbc && IS_ERR(err)) {
|
||||||
|
err = nvmap_dma_alloc_attrs(dev, len, &pa,
|
||||||
|
GFP_KERNEL, DMA_ATTR_ALLOC_EXACT_SIZE |
|
||||||
|
DMA_ATTR_ALLOC_SINGLE_PAGES);
|
||||||
|
|
||||||
|
if (!IS_ERR_OR_NULL(err)) {
|
||||||
|
/*
|
||||||
|
* Need to keep track of pages, so that only those pages
|
||||||
|
* can be freed while freeing the buffer.
|
||||||
|
*/
|
||||||
|
handle->pgalloc.pages = (struct page **)err;
|
||||||
|
}
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
if (!dma_mapping_error(dev, pa)) {
|
if (!dma_mapping_error(dev, pa)) {
|
||||||
#ifdef NVMAP_CONFIG_VPR_RESIZE
|
#ifdef NVMAP_CONFIG_VPR_RESIZE
|
||||||
@@ -194,7 +215,7 @@ static phys_addr_t nvmap_alloc_mem(struct nvmap_heap *h, size_t len,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void nvmap_free_mem(struct nvmap_heap *h, phys_addr_t base,
|
static void nvmap_free_mem(struct nvmap_heap *h, phys_addr_t base,
|
||||||
size_t len)
|
size_t len, struct nvmap_handle *handle)
|
||||||
{
|
{
|
||||||
struct device *dev = h->dma_dev;
|
struct device *dev = h->dma_dev;
|
||||||
|
|
||||||
@@ -222,10 +243,18 @@ static void nvmap_free_mem(struct nvmap_heap *h, phys_addr_t base,
|
|||||||
(void *)(uintptr_t)base,
|
(void *)(uintptr_t)base,
|
||||||
(dma_addr_t)base, DMA_ATTR_ALLOC_EXACT_SIZE);
|
(dma_addr_t)base, DMA_ATTR_ALLOC_EXACT_SIZE);
|
||||||
#else
|
#else
|
||||||
nvmap_dma_free_attrs(dev, len,
|
if (h->is_cbc && handle->pgalloc.pages) {
|
||||||
|
/* In case of pages, we need to pass pointer to array of pages */
|
||||||
|
nvmap_dma_free_attrs(dev, len,
|
||||||
|
(void *)handle->pgalloc.pages,
|
||||||
|
(dma_addr_t)base,
|
||||||
|
DMA_ATTR_ALLOC_EXACT_SIZE | DMA_ATTR_ALLOC_SINGLE_PAGES);
|
||||||
|
} else {
|
||||||
|
nvmap_dma_free_attrs(dev, len,
|
||||||
(void *)(uintptr_t)base,
|
(void *)(uintptr_t)base,
|
||||||
(dma_addr_t)base,
|
(dma_addr_t)base,
|
||||||
DMA_ATTR_ALLOC_EXACT_SIZE);
|
DMA_ATTR_ALLOC_EXACT_SIZE);
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -238,7 +267,8 @@ static struct nvmap_heap_block *do_heap_alloc(struct nvmap_heap *heap,
|
|||||||
size_t len, size_t align,
|
size_t len, size_t align,
|
||||||
unsigned int mem_prot,
|
unsigned int mem_prot,
|
||||||
phys_addr_t base_max,
|
phys_addr_t base_max,
|
||||||
phys_addr_t *start)
|
phys_addr_t *start,
|
||||||
|
struct nvmap_handle *handle)
|
||||||
{
|
{
|
||||||
struct list_block *heap_block = NULL;
|
struct list_block *heap_block = NULL;
|
||||||
dma_addr_t dev_base;
|
dma_addr_t dev_base;
|
||||||
@@ -265,7 +295,7 @@ static struct nvmap_heap_block *do_heap_alloc(struct nvmap_heap *heap,
|
|||||||
goto fail_heap_block_alloc;
|
goto fail_heap_block_alloc;
|
||||||
}
|
}
|
||||||
|
|
||||||
dev_base = nvmap_alloc_mem(heap, len, start);
|
dev_base = nvmap_alloc_mem(heap, len, start, handle);
|
||||||
if (dma_mapping_error(dev, dev_base)) {
|
if (dma_mapping_error(dev, dev_base)) {
|
||||||
dev_err(dev, "failed to alloc mem of size (%zu)\n",
|
dev_err(dev, "failed to alloc mem of size (%zu)\n",
|
||||||
len);
|
len);
|
||||||
@@ -305,8 +335,8 @@ static void do_heap_free(struct nvmap_heap_block *block)
|
|||||||
|
|
||||||
list_del(&b->all_list);
|
list_del(&b->all_list);
|
||||||
|
|
||||||
|
nvmap_free_mem(heap, block->base, b->size, block->handle);
|
||||||
heap->free_size += b->size;
|
heap->free_size += b->size;
|
||||||
nvmap_free_mem(heap, block->base, b->size);
|
|
||||||
kmem_cache_free(heap_block_cache, b);
|
kmem_cache_free(heap_block_cache, b);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -359,7 +389,7 @@ struct nvmap_heap_block *nvmap_heap_alloc(struct nvmap_heap *h,
|
|||||||
}
|
}
|
||||||
|
|
||||||
align = max_t(size_t, align, L1_CACHE_BYTES);
|
align = max_t(size_t, align, L1_CACHE_BYTES);
|
||||||
b = do_heap_alloc(h, len, align, prot, 0, start);
|
b = do_heap_alloc(h, len, align, prot, 0, start, handle);
|
||||||
if (b) {
|
if (b) {
|
||||||
b->handle = handle;
|
b->handle = handle;
|
||||||
handle->carveout = b;
|
handle->carveout = b;
|
||||||
@@ -473,7 +503,7 @@ struct nvmap_heap *nvmap_heap_create(struct device *parent,
|
|||||||
DMA_MEMORY_NOMAP);
|
DMA_MEMORY_NOMAP);
|
||||||
#else
|
#else
|
||||||
err = nvmap_dma_declare_coherent_memory(h->dma_dev, 0, base, len,
|
err = nvmap_dma_declare_coherent_memory(h->dma_dev, 0, base, len,
|
||||||
DMA_MEMORY_NOMAP);
|
DMA_MEMORY_NOMAP, co->is_cbc);
|
||||||
#endif
|
#endif
|
||||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
|
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
|
||||||
if (!err) {
|
if (!err) {
|
||||||
@@ -496,6 +526,7 @@ struct nvmap_heap *nvmap_heap_create(struct device *parent,
|
|||||||
h->base = base;
|
h->base = base;
|
||||||
h->can_alloc = !!co->can_alloc;
|
h->can_alloc = !!co->can_alloc;
|
||||||
h->is_ivm = co->is_ivm;
|
h->is_ivm = co->is_ivm;
|
||||||
|
h->is_cbc = co->is_cbc;
|
||||||
h->len = len;
|
h->len = len;
|
||||||
h->free_size = len;
|
h->free_size = len;
|
||||||
h->peer = co->peer;
|
h->peer = co->peer;
|
||||||
@@ -616,12 +647,32 @@ int nvmap_flush_heap_block(struct nvmap_client *client,
|
|||||||
phys_addr_t phys = block->base;
|
phys_addr_t phys = block->base;
|
||||||
phys_addr_t end = block->base + len;
|
phys_addr_t end = block->base + len;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
struct nvmap_handle *h;
|
||||||
|
|
||||||
if (prot == NVMAP_HANDLE_UNCACHEABLE || prot == NVMAP_HANDLE_WRITE_COMBINE)
|
if (prot == NVMAP_HANDLE_UNCACHEABLE || prot == NVMAP_HANDLE_WRITE_COMBINE)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
ret = nvmap_cache_maint_phys_range(NVMAP_CACHE_OP_WB_INV, phys, end,
|
h = block->handle;
|
||||||
|
if (h->pgalloc.pages) {
|
||||||
|
unsigned long page_count, i;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* For CBC carveout with physically discontiguous 2MB chunks,
|
||||||
|
* iterate over 2MB chunks and do cache maint for it.
|
||||||
|
*/
|
||||||
|
page_count = h->size >> PAGE_SHIFT;
|
||||||
|
for (i = 0; i < page_count; i += PAGES_PER_2MB) {
|
||||||
|
phys = page_to_phys(h->pgalloc.pages[i]);
|
||||||
|
end = phys + SIZE_2MB;
|
||||||
|
ret = nvmap_cache_maint_phys_range(NVMAP_CACHE_OP_WB_INV, phys, end,
|
||||||
|
true, prot != NVMAP_HANDLE_INNER_CACHEABLE);
|
||||||
|
if (ret)
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
} else
|
||||||
|
ret = nvmap_cache_maint_phys_range(NVMAP_CACHE_OP_WB_INV, phys, end,
|
||||||
true, prot != NVMAP_HANDLE_INNER_CACHEABLE);
|
true, prot != NVMAP_HANDLE_INNER_CACHEABLE);
|
||||||
|
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out;
|
goto out;
|
||||||
out:
|
out:
|
||||||
|
|||||||
@@ -3,7 +3,7 @@
|
|||||||
*
|
*
|
||||||
* GPU heap allocator.
|
* GPU heap allocator.
|
||||||
*
|
*
|
||||||
* Copyright (c) 2010-2022, NVIDIA Corporation. All rights reserved.
|
* Copyright (c) 2010-2023, NVIDIA Corporation. All rights reserved.
|
||||||
*
|
*
|
||||||
* This program is free software; you can redistribute it and/or modify it
|
* This program is free software; you can redistribute it and/or modify it
|
||||||
* under the terms and conditions of the GNU General Public License,
|
* under the terms and conditions of the GNU General Public License,
|
||||||
@@ -41,6 +41,7 @@ struct nvmap_heap {
|
|||||||
struct device *cma_dev;
|
struct device *cma_dev;
|
||||||
struct device *dma_dev;
|
struct device *dma_dev;
|
||||||
bool is_ivm;
|
bool is_ivm;
|
||||||
|
bool is_cbc;
|
||||||
bool can_alloc; /* Used only if is_ivm == true */
|
bool can_alloc; /* Used only if is_ivm == true */
|
||||||
unsigned int peer; /* Used only if is_ivm == true */
|
unsigned int peer; /* Used only if is_ivm == true */
|
||||||
unsigned int vm_id; /* Used only if is_ivm == true */
|
unsigned int vm_id; /* Used only if is_ivm == true */
|
||||||
|
|||||||
@@ -141,11 +141,13 @@ static struct nvmap_platform_carveout nvmap_carveouts[] = {
|
|||||||
.base = 0,
|
.base = 0,
|
||||||
.size = 0,
|
.size = 0,
|
||||||
},
|
},
|
||||||
/* Need uninitialized entries for IVM carveouts */
|
|
||||||
[4] = {
|
[4] = {
|
||||||
.name = NULL,
|
.name = "cbc",
|
||||||
.usage_mask = NVMAP_HEAP_CARVEOUT_IVM,
|
.usage_mask = NVMAP_HEAP_CARVEOUT_CBC,
|
||||||
|
.base = 0,
|
||||||
|
.size = 0,
|
||||||
},
|
},
|
||||||
|
/* Need uninitialized entries for IVM carveouts */
|
||||||
[5] = {
|
[5] = {
|
||||||
.name = NULL,
|
.name = NULL,
|
||||||
.usage_mask = NVMAP_HEAP_CARVEOUT_IVM,
|
.usage_mask = NVMAP_HEAP_CARVEOUT_IVM,
|
||||||
@@ -158,11 +160,15 @@ static struct nvmap_platform_carveout nvmap_carveouts[] = {
|
|||||||
.name = NULL,
|
.name = NULL,
|
||||||
.usage_mask = NVMAP_HEAP_CARVEOUT_IVM,
|
.usage_mask = NVMAP_HEAP_CARVEOUT_IVM,
|
||||||
},
|
},
|
||||||
|
[8] = {
|
||||||
|
.name = NULL,
|
||||||
|
.usage_mask = NVMAP_HEAP_CARVEOUT_IVM,
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct nvmap_platform_data nvmap_data = {
|
static struct nvmap_platform_data nvmap_data = {
|
||||||
.carveouts = nvmap_carveouts,
|
.carveouts = nvmap_carveouts,
|
||||||
.nr_carveouts = 4,
|
.nr_carveouts = 5,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct nvmap_platform_carveout *nvmap_get_carveout_pdata(const char *name)
|
static struct nvmap_platform_carveout *nvmap_get_carveout_pdata(const char *name)
|
||||||
@@ -359,24 +365,39 @@ static void *__nvmap_dma_alloc_from_coherent(struct device *dev,
|
|||||||
{
|
{
|
||||||
int order = get_order(size);
|
int order = get_order(size);
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
unsigned int count, i = 0, j = 0;
|
unsigned int count = 0, i = 0, j = 0, k = 0;
|
||||||
unsigned int alloc_size;
|
unsigned int alloc_size;
|
||||||
unsigned long align, pageno, page_count;
|
unsigned long align, pageno, page_count, first_pageno;
|
||||||
void *addr = NULL;
|
void *addr = NULL;
|
||||||
struct page **pages = NULL;
|
struct page **pages = NULL;
|
||||||
int do_memset = 0;
|
int do_memset = 0;
|
||||||
int *bitmap_nos = NULL;
|
int *bitmap_nos = NULL;
|
||||||
|
const char *device_name;
|
||||||
|
bool is_cbc = false;
|
||||||
|
|
||||||
if (dma_get_attr(DMA_ATTR_ALLOC_EXACT_SIZE, attrs)) {
|
device_name = dev_name(dev);
|
||||||
page_count = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
if (!device_name) {
|
||||||
if (page_count > UINT_MAX) {
|
pr_err("Could not get device_name\n");
|
||||||
dev_err(dev, "Page count more than max value\n");
|
return NULL;
|
||||||
return NULL;
|
}
|
||||||
}
|
|
||||||
count = (unsigned int)page_count;
|
if (!strncmp(device_name, "cbc", 3))
|
||||||
|
is_cbc = true;
|
||||||
|
|
||||||
|
if (is_cbc) {
|
||||||
|
/* Calculation for CBC should consider 2MB chunks */
|
||||||
|
count = size >> PAGE_SHIFT_2MB;
|
||||||
|
} else {
|
||||||
|
if (dma_get_attr(DMA_ATTR_ALLOC_EXACT_SIZE, attrs)) {
|
||||||
|
page_count = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||||
|
if (page_count > UINT_MAX) {
|
||||||
|
dev_err(dev, "Page count more than max value\n");
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
count = (unsigned int)page_count;
|
||||||
|
} else
|
||||||
|
count = 1 << order;
|
||||||
}
|
}
|
||||||
else
|
|
||||||
count = 1 << order;
|
|
||||||
|
|
||||||
if (!count)
|
if (!count)
|
||||||
return NULL;
|
return NULL;
|
||||||
@@ -389,20 +410,30 @@ static void *__nvmap_dma_alloc_from_coherent(struct device *dev,
|
|||||||
if ((mem->flags & DMA_MEMORY_NOMAP) &&
|
if ((mem->flags & DMA_MEMORY_NOMAP) &&
|
||||||
dma_get_attr(DMA_ATTR_ALLOC_SINGLE_PAGES, attrs)) {
|
dma_get_attr(DMA_ATTR_ALLOC_SINGLE_PAGES, attrs)) {
|
||||||
alloc_size = 1;
|
alloc_size = 1;
|
||||||
pages = nvmap_kvzalloc_pages(count);
|
/* pages contain the array of pages of kernel PAGE_SIZE */
|
||||||
if (!pages)
|
if (!is_cbc)
|
||||||
|
pages = nvmap_kvzalloc_pages(count);
|
||||||
|
else
|
||||||
|
pages = nvmap_kvzalloc_pages(count * PAGES_PER_2MB);
|
||||||
|
|
||||||
|
if (!pages) {
|
||||||
|
kvfree(bitmap_nos);
|
||||||
return NULL;
|
return NULL;
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
alloc_size = count;
|
alloc_size = count;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock_irqsave(&mem->spinlock, flags);
|
spin_lock_irqsave(&mem->spinlock, flags);
|
||||||
|
|
||||||
if (unlikely(size > ((u64)mem->size << PAGE_SHIFT)))
|
if (!is_cbc && unlikely(size > ((u64)mem->size << PAGE_SHIFT)))
|
||||||
|
goto err;
|
||||||
|
else if (is_cbc && unlikely(size > ((u64)mem->size << PAGE_SHIFT_2MB)))
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
if ((mem->flags & DMA_MEMORY_NOMAP) &&
|
if (((mem->flags & DMA_MEMORY_NOMAP) &&
|
||||||
dma_get_attr(DMA_ATTR_ALLOC_SINGLE_PAGES, attrs)) {
|
dma_get_attr(DMA_ATTR_ALLOC_SINGLE_PAGES, attrs)) ||
|
||||||
|
is_cbc) {
|
||||||
align = 0;
|
align = 0;
|
||||||
} else {
|
} else {
|
||||||
if (order > DMA_BUF_ALIGNMENT)
|
if (order > DMA_BUF_ALIGNMENT)
|
||||||
@@ -418,9 +449,21 @@ static void *__nvmap_dma_alloc_from_coherent(struct device *dev,
|
|||||||
if (pageno >= mem->size)
|
if (pageno >= mem->size)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
|
if (!i)
|
||||||
|
first_pageno = pageno;
|
||||||
|
|
||||||
count -= alloc_size;
|
count -= alloc_size;
|
||||||
if (pages)
|
if (pages) {
|
||||||
pages[i++] = pfn_to_page(mem->pfn_base + pageno);
|
if (!is_cbc)
|
||||||
|
pages[i++] = pfn_to_page(mem->pfn_base + pageno);
|
||||||
|
else {
|
||||||
|
/* Handle 2MB chunks */
|
||||||
|
for (k = 0; k < (alloc_size * PAGES_PER_2MB); k++)
|
||||||
|
pages[i++] = pfn_to_page(mem->pfn_base +
|
||||||
|
pageno * PAGES_PER_2MB + k);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
bitmap_set(mem->bitmap, pageno, alloc_size);
|
bitmap_set(mem->bitmap, pageno, alloc_size);
|
||||||
bitmap_nos[j++] = pageno;
|
bitmap_nos[j++] = pageno;
|
||||||
}
|
}
|
||||||
@@ -428,9 +471,13 @@ static void *__nvmap_dma_alloc_from_coherent(struct device *dev,
|
|||||||
/*
|
/*
|
||||||
* Memory was found in the coherent area.
|
* Memory was found in the coherent area.
|
||||||
*/
|
*/
|
||||||
*dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
|
if (!is_cbc)
|
||||||
|
*dma_handle = mem->device_base + (first_pageno << PAGE_SHIFT);
|
||||||
|
else
|
||||||
|
*dma_handle = mem->device_base + (first_pageno << PAGE_SHIFT_2MB);
|
||||||
|
|
||||||
if (!(mem->flags & DMA_MEMORY_NOMAP)) {
|
if (!(mem->flags & DMA_MEMORY_NOMAP)) {
|
||||||
addr = mem->virt_base + (pageno << PAGE_SHIFT);
|
addr = mem->virt_base + (first_pageno << PAGE_SHIFT);
|
||||||
do_memset = 1;
|
do_memset = 1;
|
||||||
} else if (dma_get_attr(DMA_ATTR_ALLOC_SINGLE_PAGES, attrs)) {
|
} else if (dma_get_attr(DMA_ATTR_ALLOC_SINGLE_PAGES, attrs)) {
|
||||||
addr = pages;
|
addr = pages;
|
||||||
@@ -450,7 +497,7 @@ err:
|
|||||||
spin_unlock_irqrestore(&mem->spinlock, flags);
|
spin_unlock_irqrestore(&mem->spinlock, flags);
|
||||||
kvfree(pages);
|
kvfree(pages);
|
||||||
kvfree(bitmap_nos);
|
kvfree(bitmap_nos);
|
||||||
return NULL;
|
return ERR_PTR(-ENOMEM);
|
||||||
}
|
}
|
||||||
|
|
||||||
void *nvmap_dma_alloc_attrs(struct device *dev, size_t size,
|
void *nvmap_dma_alloc_attrs(struct device *dev, size_t size,
|
||||||
@@ -476,12 +523,23 @@ void nvmap_dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
|
|||||||
{
|
{
|
||||||
void *mem_addr;
|
void *mem_addr;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
unsigned int pageno;
|
unsigned int pageno, page_shift_val;
|
||||||
struct dma_coherent_mem_replica *mem;
|
struct dma_coherent_mem_replica *mem;
|
||||||
|
bool is_cbc = false;
|
||||||
|
const char *device_name;
|
||||||
|
|
||||||
if (!dev || !dev->dma_mem)
|
if (!dev || !dev->dma_mem)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
device_name = dev_name(dev);
|
||||||
|
if (!device_name) {
|
||||||
|
pr_err("Could not get device_name\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!strncmp(device_name, "cbc", 3))
|
||||||
|
is_cbc = true;
|
||||||
|
|
||||||
mem = (struct dma_coherent_mem_replica *)(dev->dma_mem);
|
mem = (struct dma_coherent_mem_replica *)(dev->dma_mem);
|
||||||
if ((mem->flags & DMA_MEMORY_NOMAP) &&
|
if ((mem->flags & DMA_MEMORY_NOMAP) &&
|
||||||
dma_get_attr(DMA_ATTR_ALLOC_SINGLE_PAGES, attrs)) {
|
dma_get_attr(DMA_ATTR_ALLOC_SINGLE_PAGES, attrs)) {
|
||||||
@@ -489,12 +547,22 @@ void nvmap_dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
|
|||||||
int i;
|
int i;
|
||||||
|
|
||||||
spin_lock_irqsave(&mem->spinlock, flags);
|
spin_lock_irqsave(&mem->spinlock, flags);
|
||||||
for (i = 0; i < (size >> PAGE_SHIFT); i++) {
|
if (!is_cbc) {
|
||||||
pageno = page_to_pfn(pages[i]) - mem->pfn_base;
|
for (i = 0; i < (size >> PAGE_SHIFT); i++) {
|
||||||
if (WARN_ONCE(pageno > mem->size,
|
pageno = page_to_pfn(pages[i]) - mem->pfn_base;
|
||||||
|
if (WARN_ONCE(pageno > mem->size,
|
||||||
"invalid pageno:%d\n", pageno))
|
"invalid pageno:%d\n", pageno))
|
||||||
continue;
|
continue;
|
||||||
bitmap_clear(mem->bitmap, pageno, 1);
|
bitmap_clear(mem->bitmap, pageno, 1);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for (i = 0; i < (size >> PAGE_SHIFT); i += PAGES_PER_2MB) {
|
||||||
|
pageno = (page_to_pfn(pages[i]) - mem->pfn_base) / PAGES_PER_2MB;
|
||||||
|
if (WARN_ONCE(pageno > mem->size,
|
||||||
|
"invalid pageno:%d\n", pageno))
|
||||||
|
continue;
|
||||||
|
bitmap_clear(mem->bitmap, pageno, 1);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&mem->spinlock, flags);
|
spin_unlock_irqrestore(&mem->spinlock, flags);
|
||||||
kvfree(pages);
|
kvfree(pages);
|
||||||
@@ -506,14 +574,19 @@ void nvmap_dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
|
|||||||
else
|
else
|
||||||
mem_addr = mem->virt_base;
|
mem_addr = mem->virt_base;
|
||||||
|
|
||||||
|
page_shift_val = is_cbc ? PAGE_SHIFT_2MB : PAGE_SHIFT;
|
||||||
if (mem && cpu_addr >= mem_addr &&
|
if (mem && cpu_addr >= mem_addr &&
|
||||||
cpu_addr - mem_addr < (u64)mem->size << PAGE_SHIFT) {
|
cpu_addr - mem_addr < (u64)mem->size << page_shift_val) {
|
||||||
unsigned int page = (cpu_addr - mem_addr) >> PAGE_SHIFT;
|
unsigned int page = (cpu_addr - mem_addr) >> page_shift_val;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
unsigned int count;
|
unsigned int count;
|
||||||
|
|
||||||
if (DMA_ATTR_ALLOC_EXACT_SIZE & attrs)
|
if (DMA_ATTR_ALLOC_EXACT_SIZE & attrs) {
|
||||||
count = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
if (is_cbc)
|
||||||
|
count = ALIGN_2MB(size) >> page_shift_val;
|
||||||
|
else
|
||||||
|
count = PAGE_ALIGN(size) >> page_shift_val;
|
||||||
|
}
|
||||||
else
|
else
|
||||||
count = 1 << get_order(size);
|
count = 1 << get_order(size);
|
||||||
|
|
||||||
@@ -601,17 +674,24 @@ static int nvmap_dma_assign_coherent_memory(struct device *dev,
|
|||||||
|
|
||||||
static int nvmap_dma_init_coherent_memory(
|
static int nvmap_dma_init_coherent_memory(
|
||||||
phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags,
|
phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags,
|
||||||
struct dma_coherent_mem_replica **mem)
|
struct dma_coherent_mem_replica **mem, bool is_cbc)
|
||||||
{
|
{
|
||||||
struct dma_coherent_mem_replica *dma_mem = NULL;
|
struct dma_coherent_mem_replica *dma_mem = NULL;
|
||||||
void *mem_base = NULL;
|
void *mem_base = NULL;
|
||||||
int pages = size >> PAGE_SHIFT;
|
int pages;
|
||||||
int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
|
int bitmap_size;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (!size)
|
if (!size)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (is_cbc)
|
||||||
|
pages = size >> PAGE_SHIFT_2MB;
|
||||||
|
else
|
||||||
|
pages = size >> PAGE_SHIFT;
|
||||||
|
|
||||||
|
bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
|
||||||
|
|
||||||
if (!(flags & DMA_MEMORY_NOMAP)) {
|
if (!(flags & DMA_MEMORY_NOMAP)) {
|
||||||
mem_base = memremap(phys_addr, size, MEMREMAP_WC);
|
mem_base = memremap(phys_addr, size, MEMREMAP_WC);
|
||||||
if (!mem_base)
|
if (!mem_base)
|
||||||
@@ -632,7 +712,7 @@ static int nvmap_dma_init_coherent_memory(
|
|||||||
|
|
||||||
dma_mem->virt_base = mem_base;
|
dma_mem->virt_base = mem_base;
|
||||||
dma_mem->device_base = device_addr;
|
dma_mem->device_base = device_addr;
|
||||||
dma_mem->pfn_base = PFN_DOWN(phys_addr);
|
dma_mem->pfn_base = PFN_DOWN(device_addr);
|
||||||
dma_mem->size = pages;
|
dma_mem->size = pages;
|
||||||
dma_mem->flags = flags;
|
dma_mem->flags = flags;
|
||||||
spin_lock_init(&dma_mem->spinlock);
|
spin_lock_init(&dma_mem->spinlock);
|
||||||
@@ -649,12 +729,12 @@ err_memunmap:
|
|||||||
}
|
}
|
||||||
|
|
||||||
int nvmap_dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
|
int nvmap_dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
|
||||||
dma_addr_t device_addr, size_t size, int flags)
|
dma_addr_t device_addr, size_t size, int flags, bool is_cbc)
|
||||||
{
|
{
|
||||||
struct dma_coherent_mem_replica *mem;
|
struct dma_coherent_mem_replica *mem;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = nvmap_dma_init_coherent_memory(phys_addr, device_addr, size, flags, &mem);
|
ret = nvmap_dma_init_coherent_memory(phys_addr, device_addr, size, flags, &mem, is_cbc);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
@@ -686,7 +766,7 @@ static int __init nvmap_co_device_init(struct reserved_mem *rmem,
|
|||||||
#else
|
#else
|
||||||
err = nvmap_dma_declare_coherent_memory(co->dma_dev, 0,
|
err = nvmap_dma_declare_coherent_memory(co->dma_dev, 0,
|
||||||
co->base, co->size,
|
co->base, co->size,
|
||||||
DMA_MEMORY_NOMAP);
|
DMA_MEMORY_NOMAP, co->is_cbc);
|
||||||
#endif
|
#endif
|
||||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
|
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
|
||||||
if (!err) {
|
if (!err) {
|
||||||
@@ -816,6 +896,9 @@ int __init nvmap_co_setup(struct reserved_mem *rmem)
|
|||||||
co->base = rmem->base;
|
co->base = rmem->base;
|
||||||
co->size = rmem->size;
|
co->size = rmem->size;
|
||||||
co->cma_dev = NULL;
|
co->cma_dev = NULL;
|
||||||
|
if (!strncmp(co->name, "cbc", 3))
|
||||||
|
co->is_cbc = true;
|
||||||
|
|
||||||
nvmap_init_time += sched_clock() - start;
|
nvmap_init_time += sched_clock() - start;
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -192,6 +192,7 @@ int nvmap_ioctl_alloc(struct file *filp, void __user *arg)
|
|||||||
struct dma_buf *dmabuf = NULL;
|
struct dma_buf *dmabuf = NULL;
|
||||||
bool is_ro;
|
bool is_ro;
|
||||||
int err;
|
int err;
|
||||||
|
unsigned int page_sz = PAGE_SIZE;
|
||||||
|
|
||||||
if (copy_from_user(&op, arg, sizeof(op)))
|
if (copy_from_user(&op, arg, sizeof(op)))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
@@ -206,6 +207,14 @@ int nvmap_ioctl_alloc(struct file *filp, void __user *arg)
|
|||||||
if (IS_ERR_OR_NULL(handle))
|
if (IS_ERR_OR_NULL(handle))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* In case of CBC carveout, the handle size needs to be aligned to 2MB.
|
||||||
|
*/
|
||||||
|
if (op.heap_mask & NVMAP_HEAP_CARVEOUT_CBC) {
|
||||||
|
handle->size = ALIGN_2MB(handle->size);
|
||||||
|
page_sz = SIZE_2MB;
|
||||||
|
}
|
||||||
|
|
||||||
if (!is_nvmap_memory_available(handle->size, op.heap_mask)) {
|
if (!is_nvmap_memory_available(handle->size, op.heap_mask)) {
|
||||||
nvmap_handle_put(handle);
|
nvmap_handle_put(handle);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
@@ -213,7 +222,7 @@ int nvmap_ioctl_alloc(struct file *filp, void __user *arg)
|
|||||||
|
|
||||||
/* user-space handles are aligned to page boundaries, to prevent
|
/* user-space handles are aligned to page boundaries, to prevent
|
||||||
* data leakage. */
|
* data leakage. */
|
||||||
op.align = max_t(size_t, op.align, PAGE_SIZE);
|
op.align = max_t(size_t, op.align, page_sz);
|
||||||
|
|
||||||
err = nvmap_alloc_handle(client, handle, op.heap_mask, op.align,
|
err = nvmap_alloc_handle(client, handle, op.heap_mask, op.align,
|
||||||
0, /* no kind */
|
0, /* no kind */
|
||||||
@@ -1134,13 +1143,15 @@ int nvmap_ioctl_get_handle_parameters(struct file *filp, void __user *arg)
|
|||||||
/*
|
/*
|
||||||
* Check handle is allocated or not while setting contig.
|
* Check handle is allocated or not while setting contig.
|
||||||
* If heap type is IOVMM, check if it has flag set for contiguous memory
|
* If heap type is IOVMM, check if it has flag set for contiguous memory
|
||||||
* allocation request. Otherwise, if handle belongs to any carveout then
|
* allocation request. Otherwise, if handle belongs to any carveout except cbc
|
||||||
* all allocations are contiguous, hence set contig flag to true.
|
* then all allocations are contiguous, hence set contig flag to true.
|
||||||
|
* In case of cbc, if allocation is page based then set contig flag to false
|
||||||
|
* otherwise true.
|
||||||
*/
|
*/
|
||||||
if (handle->alloc &&
|
if (handle->alloc &&
|
||||||
((handle->heap_type == NVMAP_HEAP_IOVMM &&
|
((handle->heap_type == NVMAP_HEAP_IOVMM &&
|
||||||
handle->userflags & NVMAP_HANDLE_PHYS_CONTIG) ||
|
handle->userflags & NVMAP_HANDLE_PHYS_CONTIG) ||
|
||||||
handle->heap_type != NVMAP_HEAP_IOVMM)) {
|
(handle->heap_type != NVMAP_HEAP_IOVMM && !handle->pgalloc.pages))) {
|
||||||
op.contig = 1U;
|
op.contig = 1U;
|
||||||
} else {
|
} else {
|
||||||
op.contig = 0U;
|
op.contig = 0U;
|
||||||
|
|||||||
@@ -51,6 +51,11 @@
|
|||||||
|
|
||||||
#include <linux/fdtable.h>
|
#include <linux/fdtable.h>
|
||||||
|
|
||||||
|
#define SIZE_2MB (2*1024*1024)
|
||||||
|
#define ALIGN_2MB(size) ((size + SIZE_2MB - 1) & ~(SIZE_2MB - 1))
|
||||||
|
#define PAGE_SHIFT_2MB 21
|
||||||
|
#define PAGES_PER_2MB (SIZE_2MB / PAGE_SIZE)
|
||||||
|
|
||||||
#define DMA_ERROR_CODE (~(dma_addr_t)0)
|
#define DMA_ERROR_CODE (~(dma_addr_t)0)
|
||||||
|
|
||||||
#define __DMA_ATTR(attrs) attrs
|
#define __DMA_ATTR(attrs) attrs
|
||||||
@@ -188,8 +193,7 @@ struct nvmap_carveout_node {
|
|||||||
size_t size;
|
size_t size;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* handles allocated using shared system memory (either IOVMM- or high-order
|
/* handles allocated as collection of pages */
|
||||||
* page allocations */
|
|
||||||
struct nvmap_pgalloc {
|
struct nvmap_pgalloc {
|
||||||
struct page **pages;
|
struct page **pages;
|
||||||
bool contig; /* contiguous system memory */
|
bool contig; /* contiguous system memory */
|
||||||
@@ -238,10 +242,8 @@ struct nvmap_handle {
|
|||||||
struct nvmap_client *owner;
|
struct nvmap_client *owner;
|
||||||
struct dma_buf *dmabuf;
|
struct dma_buf *dmabuf;
|
||||||
struct dma_buf *dmabuf_ro;
|
struct dma_buf *dmabuf_ro;
|
||||||
union {
|
struct nvmap_pgalloc pgalloc;
|
||||||
struct nvmap_pgalloc pgalloc;
|
struct nvmap_heap_block *carveout;
|
||||||
struct nvmap_heap_block *carveout;
|
|
||||||
};
|
|
||||||
bool heap_pgalloc; /* handle is page allocated (sysmem / iovmm) */
|
bool heap_pgalloc; /* handle is page allocated (sysmem / iovmm) */
|
||||||
bool alloc; /* handle has memory allocated */
|
bool alloc; /* handle has memory allocated */
|
||||||
bool from_va; /* handle memory is from VA */
|
bool from_va; /* handle memory is from VA */
|
||||||
@@ -494,7 +496,7 @@ struct dma_coherent_mem_replica {
|
|||||||
};
|
};
|
||||||
|
|
||||||
int nvmap_dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
|
int nvmap_dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
|
||||||
dma_addr_t device_addr, size_t size, int flags);
|
dma_addr_t device_addr, size_t size, int flags, bool is_cbc);
|
||||||
#endif
|
#endif
|
||||||
int nvmap_probe(struct platform_device *pdev);
|
int nvmap_probe(struct platform_device *pdev);
|
||||||
int nvmap_remove(struct platform_device *pdev);
|
int nvmap_remove(struct platform_device *pdev);
|
||||||
|
|||||||
@@ -3,7 +3,7 @@
|
|||||||
*
|
*
|
||||||
* structure declarations for nvmem and nvmap user-space ioctls
|
* structure declarations for nvmem and nvmap user-space ioctls
|
||||||
*
|
*
|
||||||
* Copyright (c) 2009-2022, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2009-2023, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* This program is free software; you can redistribute it and/or modify it
|
* This program is free software; you can redistribute it and/or modify it
|
||||||
* under the terms and conditions of the GNU General Public License,
|
* under the terms and conditions of the GNU General Public License,
|
||||||
@@ -31,6 +31,7 @@
|
|||||||
#define NVMAP_HEAP_CARVEOUT_VPR (1ul<<28)
|
#define NVMAP_HEAP_CARVEOUT_VPR (1ul<<28)
|
||||||
#define NVMAP_HEAP_CARVEOUT_TSEC (1ul<<27)
|
#define NVMAP_HEAP_CARVEOUT_TSEC (1ul<<27)
|
||||||
#define NVMAP_HEAP_CARVEOUT_VIDMEM (1ul<<26)
|
#define NVMAP_HEAP_CARVEOUT_VIDMEM (1ul<<26)
|
||||||
|
#define NVMAP_HEAP_CARVEOUT_CBC (1ul << 3)
|
||||||
#define NVMAP_HEAP_CARVEOUT_FSI (1ul<<2)
|
#define NVMAP_HEAP_CARVEOUT_FSI (1ul<<2)
|
||||||
#define NVMAP_HEAP_CARVEOUT_IVM (1ul<<1)
|
#define NVMAP_HEAP_CARVEOUT_IVM (1ul<<1)
|
||||||
#define NVMAP_HEAP_CARVEOUT_GENERIC (1ul<<0)
|
#define NVMAP_HEAP_CARVEOUT_GENERIC (1ul<<0)
|
||||||
@@ -103,6 +104,7 @@ struct nvmap_platform_carveout {
|
|||||||
bool no_cpu_access; /* carveout can't be accessed from cpu at all */
|
bool no_cpu_access; /* carveout can't be accessed from cpu at all */
|
||||||
bool init_done; /* FIXME: remove once all caveouts use reserved-memory */
|
bool init_done; /* FIXME: remove once all caveouts use reserved-memory */
|
||||||
struct nvmap_pm_ops pm_ops;
|
struct nvmap_pm_ops pm_ops;
|
||||||
|
bool is_cbc; /* cbc carveout is treated differently */
|
||||||
};
|
};
|
||||||
|
|
||||||
struct nvmap_platform_data {
|
struct nvmap_platform_data {
|
||||||
|
|||||||
Reference in New Issue
Block a user