video: tegra: nvmap: Update carveout name

CBC carveout is not the correct carveout name, rather compression
carveout is the correct name. Compresssion carveout is used to store the
gpu compressible buffers. While the comptags and other metadata related
to these buffers will be stored in CBC carveout which is a GSC carveout
not created/maintained by nvmap. Hence update carveout naming.

Bug 3956637

Change-Id: I50e01a6a8d8bda66960bdee378a12fde176b682a
Signed-off-by: Ketan Patil <ketanp@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/2888397
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com>
Reviewed-by: svcacv <svcacv@nvidia.com>
Reviewed-by: Krishna Reddy <vdumpa@nvidia.com>
GVS: Gerrit_Virtual_Submit <buildbot_gerritrpt@nvidia.com>
This commit is contained in:
Ketan Patil
2023-04-14 10:46:50 +00:00
committed by mobile promotions
parent 0c2cb3e676
commit 30e3826bc4
7 changed files with 44 additions and 43 deletions

View File

@@ -719,7 +719,7 @@ static void alloc_handle(struct nvmap_client *client,
void *cpu_addr; void *cpu_addr;
if (h->pgalloc.pages && if (h->pgalloc.pages &&
h->heap_type == NVMAP_HEAP_CARVEOUT_CBC) { h->heap_type == NVMAP_HEAP_CARVEOUT_COMPRESSION) {
unsigned long page_count; unsigned long page_count;
int i; int i;

View File

@@ -171,11 +171,11 @@ static phys_addr_t nvmap_alloc_mem(struct nvmap_heap *h, size_t len,
err = nvmap_dma_alloc_attrs(dev, len, &pa, err = nvmap_dma_alloc_attrs(dev, len, &pa,
GFP_KERNEL, DMA_ATTR_ALLOC_EXACT_SIZE); GFP_KERNEL, DMA_ATTR_ALLOC_EXACT_SIZE);
/* /*
* In case of CBC carveout, try to allocate the entire chunk in physically * In case of Compression carveout, try to allocate the entire chunk in physically
* contiguous manner. If it returns error, then try to allocate the memory in * contiguous manner. If it returns error, then try to allocate the memory in
* 2MB chunks. * 2MB chunks.
*/ */
if (h->is_cbc && IS_ERR(err)) { if (h->is_compression_co && IS_ERR(err)) {
err = nvmap_dma_alloc_attrs(dev, len, &pa, err = nvmap_dma_alloc_attrs(dev, len, &pa,
GFP_KERNEL, DMA_ATTR_ALLOC_EXACT_SIZE | GFP_KERNEL, DMA_ATTR_ALLOC_EXACT_SIZE |
DMA_ATTR_ALLOC_SINGLE_PAGES); DMA_ATTR_ALLOC_SINGLE_PAGES);
@@ -243,7 +243,7 @@ static void nvmap_free_mem(struct nvmap_heap *h, phys_addr_t base,
(void *)(uintptr_t)base, (void *)(uintptr_t)base,
(dma_addr_t)base, DMA_ATTR_ALLOC_EXACT_SIZE); (dma_addr_t)base, DMA_ATTR_ALLOC_EXACT_SIZE);
#else #else
if (h->is_cbc && handle->pgalloc.pages) { if (h->is_compression_co && handle->pgalloc.pages) {
/* In case of pages, we need to pass pointer to array of pages */ /* In case of pages, we need to pass pointer to array of pages */
nvmap_dma_free_attrs(dev, len, nvmap_dma_free_attrs(dev, len,
(void *)handle->pgalloc.pages, (void *)handle->pgalloc.pages,
@@ -503,7 +503,7 @@ struct nvmap_heap *nvmap_heap_create(struct device *parent,
DMA_MEMORY_NOMAP); DMA_MEMORY_NOMAP);
#else #else
err = nvmap_dma_declare_coherent_memory(h->dma_dev, 0, base, len, err = nvmap_dma_declare_coherent_memory(h->dma_dev, 0, base, len,
DMA_MEMORY_NOMAP, co->is_cbc); DMA_MEMORY_NOMAP, co->is_compression_co);
#endif #endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
if (!err) { if (!err) {
@@ -526,7 +526,7 @@ struct nvmap_heap *nvmap_heap_create(struct device *parent,
h->base = base; h->base = base;
h->can_alloc = !!co->can_alloc; h->can_alloc = !!co->can_alloc;
h->is_ivm = co->is_ivm; h->is_ivm = co->is_ivm;
h->is_cbc = co->is_cbc; h->is_compression_co = co->is_compression_co;
h->len = len; h->len = len;
h->free_size = len; h->free_size = len;
h->peer = co->peer; h->peer = co->peer;
@@ -657,7 +657,7 @@ int nvmap_flush_heap_block(struct nvmap_client *client,
unsigned long page_count, i; unsigned long page_count, i;
/* /*
* For CBC carveout with physically discontiguous 2MB chunks, * For Compression carveout with physically discontiguous 2MB chunks,
* iterate over 2MB chunks and do cache maint for it. * iterate over 2MB chunks and do cache maint for it.
*/ */
page_count = h->size >> PAGE_SHIFT; page_count = h->size >> PAGE_SHIFT;

View File

@@ -41,7 +41,7 @@ struct nvmap_heap {
struct device *cma_dev; struct device *cma_dev;
struct device *dma_dev; struct device *dma_dev;
bool is_ivm; bool is_ivm;
bool is_cbc; bool is_compression_co;
bool can_alloc; /* Used only if is_ivm == true */ bool can_alloc; /* Used only if is_ivm == true */
unsigned int peer; /* Used only if is_ivm == true */ unsigned int peer; /* Used only if is_ivm == true */
unsigned int vm_id; /* Used only if is_ivm == true */ unsigned int vm_id; /* Used only if is_ivm == true */

View File

@@ -142,8 +142,8 @@ static struct nvmap_platform_carveout nvmap_carveouts[] = {
.size = 0, .size = 0,
}, },
[4] = { [4] = {
.name = "cbc", .name = "compression",
.usage_mask = NVMAP_HEAP_CARVEOUT_CBC, .usage_mask = NVMAP_HEAP_CARVEOUT_COMPRESSION,
.base = 0, .base = 0,
.size = 0, .size = 0,
}, },
@@ -373,7 +373,7 @@ static void *__nvmap_dma_alloc_from_coherent(struct device *dev,
int do_memset = 0; int do_memset = 0;
int *bitmap_nos = NULL; int *bitmap_nos = NULL;
const char *device_name; const char *device_name;
bool is_cbc = false; bool is_compression = false;
device_name = dev_name(dev); device_name = dev_name(dev);
if (!device_name) { if (!device_name) {
@@ -381,11 +381,11 @@ static void *__nvmap_dma_alloc_from_coherent(struct device *dev,
return NULL; return NULL;
} }
if (!strncmp(device_name, "cbc", 3)) if (!strncmp(device_name, "compression", 11))
is_cbc = true; is_compression = true;
if (is_cbc) { if (is_compression) {
/* Calculation for CBC should consider 2MB chunks */ /* Calculation for Compression carveout should consider 2MB chunks */
count = size >> PAGE_SHIFT_2MB; count = size >> PAGE_SHIFT_2MB;
} else { } else {
if (dma_get_attr(DMA_ATTR_ALLOC_EXACT_SIZE, attrs)) { if (dma_get_attr(DMA_ATTR_ALLOC_EXACT_SIZE, attrs)) {
@@ -411,7 +411,7 @@ static void *__nvmap_dma_alloc_from_coherent(struct device *dev,
dma_get_attr(DMA_ATTR_ALLOC_SINGLE_PAGES, attrs)) { dma_get_attr(DMA_ATTR_ALLOC_SINGLE_PAGES, attrs)) {
alloc_size = 1; alloc_size = 1;
/* pages contain the array of pages of kernel PAGE_SIZE */ /* pages contain the array of pages of kernel PAGE_SIZE */
if (!is_cbc) if (!is_compression)
pages = nvmap_kvzalloc_pages(count); pages = nvmap_kvzalloc_pages(count);
else else
pages = nvmap_kvzalloc_pages(count * PAGES_PER_2MB); pages = nvmap_kvzalloc_pages(count * PAGES_PER_2MB);
@@ -426,14 +426,14 @@ static void *__nvmap_dma_alloc_from_coherent(struct device *dev,
spin_lock_irqsave(&mem->spinlock, flags); spin_lock_irqsave(&mem->spinlock, flags);
if (!is_cbc && unlikely(size > ((u64)mem->size << PAGE_SHIFT))) if (!is_compression && unlikely(size > ((u64)mem->size << PAGE_SHIFT)))
goto err; goto err;
else if (is_cbc && unlikely(size > ((u64)mem->size << PAGE_SHIFT_2MB))) else if (is_compression && unlikely(size > ((u64)mem->size << PAGE_SHIFT_2MB)))
goto err; goto err;
if (((mem->flags & DMA_MEMORY_NOMAP) && if (((mem->flags & DMA_MEMORY_NOMAP) &&
dma_get_attr(DMA_ATTR_ALLOC_SINGLE_PAGES, attrs)) || dma_get_attr(DMA_ATTR_ALLOC_SINGLE_PAGES, attrs)) ||
is_cbc) { is_compression) {
align = 0; align = 0;
} else { } else {
if (order > DMA_BUF_ALIGNMENT) if (order > DMA_BUF_ALIGNMENT)
@@ -454,7 +454,7 @@ static void *__nvmap_dma_alloc_from_coherent(struct device *dev,
count -= alloc_size; count -= alloc_size;
if (pages) { if (pages) {
if (!is_cbc) if (!is_compression)
pages[i++] = pfn_to_page(mem->pfn_base + pageno); pages[i++] = pfn_to_page(mem->pfn_base + pageno);
else { else {
/* Handle 2MB chunks */ /* Handle 2MB chunks */
@@ -471,7 +471,7 @@ static void *__nvmap_dma_alloc_from_coherent(struct device *dev,
/* /*
* Memory was found in the coherent area. * Memory was found in the coherent area.
*/ */
if (!is_cbc) if (!is_compression)
*dma_handle = mem->device_base + (first_pageno << PAGE_SHIFT); *dma_handle = mem->device_base + (first_pageno << PAGE_SHIFT);
else else
*dma_handle = mem->device_base + (first_pageno << PAGE_SHIFT_2MB); *dma_handle = mem->device_base + (first_pageno << PAGE_SHIFT_2MB);
@@ -525,7 +525,7 @@ void nvmap_dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
unsigned long flags; unsigned long flags;
unsigned int pageno, page_shift_val; unsigned int pageno, page_shift_val;
struct dma_coherent_mem_replica *mem; struct dma_coherent_mem_replica *mem;
bool is_cbc = false; bool is_compression = false;
const char *device_name; const char *device_name;
if (!dev || !dev->dma_mem) if (!dev || !dev->dma_mem)
@@ -537,8 +537,8 @@ void nvmap_dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
return; return;
} }
if (!strncmp(device_name, "cbc", 3)) if (!strncmp(device_name, "compression", 11))
is_cbc = true; is_compression = true;
mem = (struct dma_coherent_mem_replica *)(dev->dma_mem); mem = (struct dma_coherent_mem_replica *)(dev->dma_mem);
if ((mem->flags & DMA_MEMORY_NOMAP) && if ((mem->flags & DMA_MEMORY_NOMAP) &&
@@ -547,7 +547,7 @@ void nvmap_dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
int i; int i;
spin_lock_irqsave(&mem->spinlock, flags); spin_lock_irqsave(&mem->spinlock, flags);
if (!is_cbc) { if (!is_compression) {
for (i = 0; i < (size >> PAGE_SHIFT); i++) { for (i = 0; i < (size >> PAGE_SHIFT); i++) {
pageno = page_to_pfn(pages[i]) - mem->pfn_base; pageno = page_to_pfn(pages[i]) - mem->pfn_base;
if (WARN_ONCE(pageno > mem->size, if (WARN_ONCE(pageno > mem->size,
@@ -574,7 +574,7 @@ void nvmap_dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
else else
mem_addr = mem->virt_base; mem_addr = mem->virt_base;
page_shift_val = is_cbc ? PAGE_SHIFT_2MB : PAGE_SHIFT; page_shift_val = is_compression ? PAGE_SHIFT_2MB : PAGE_SHIFT;
if (mem && cpu_addr >= mem_addr && if (mem && cpu_addr >= mem_addr &&
cpu_addr - mem_addr < (u64)mem->size << page_shift_val) { cpu_addr - mem_addr < (u64)mem->size << page_shift_val) {
unsigned int page = (cpu_addr - mem_addr) >> page_shift_val; unsigned int page = (cpu_addr - mem_addr) >> page_shift_val;
@@ -582,7 +582,7 @@ void nvmap_dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
unsigned int count; unsigned int count;
if (DMA_ATTR_ALLOC_EXACT_SIZE & attrs) { if (DMA_ATTR_ALLOC_EXACT_SIZE & attrs) {
if (is_cbc) if (is_compression)
count = ALIGN_2MB(size) >> page_shift_val; count = ALIGN_2MB(size) >> page_shift_val;
else else
count = PAGE_ALIGN(size) >> page_shift_val; count = PAGE_ALIGN(size) >> page_shift_val;
@@ -674,7 +674,7 @@ static int nvmap_dma_assign_coherent_memory(struct device *dev,
static int nvmap_dma_init_coherent_memory( static int nvmap_dma_init_coherent_memory(
phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags, phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags,
struct dma_coherent_mem_replica **mem, bool is_cbc) struct dma_coherent_mem_replica **mem, bool is_compression)
{ {
struct dma_coherent_mem_replica *dma_mem = NULL; struct dma_coherent_mem_replica *dma_mem = NULL;
void *mem_base = NULL; void *mem_base = NULL;
@@ -685,7 +685,7 @@ static int nvmap_dma_init_coherent_memory(
if (!size) if (!size)
return -EINVAL; return -EINVAL;
if (is_cbc) if (is_compression)
pages = size >> PAGE_SHIFT_2MB; pages = size >> PAGE_SHIFT_2MB;
else else
pages = size >> PAGE_SHIFT; pages = size >> PAGE_SHIFT;
@@ -729,12 +729,13 @@ err_memunmap:
} }
int nvmap_dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, int nvmap_dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
dma_addr_t device_addr, size_t size, int flags, bool is_cbc) dma_addr_t device_addr, size_t size, int flags, bool is_compression)
{ {
struct dma_coherent_mem_replica *mem; struct dma_coherent_mem_replica *mem;
int ret; int ret;
ret = nvmap_dma_init_coherent_memory(phys_addr, device_addr, size, flags, &mem, is_cbc); ret = nvmap_dma_init_coherent_memory(phys_addr, device_addr, size, flags, &mem,
is_compression);
if (ret) if (ret)
return ret; return ret;
@@ -766,7 +767,7 @@ static int __init nvmap_co_device_init(struct reserved_mem *rmem,
#else #else
err = nvmap_dma_declare_coherent_memory(co->dma_dev, 0, err = nvmap_dma_declare_coherent_memory(co->dma_dev, 0,
co->base, co->size, co->base, co->size,
DMA_MEMORY_NOMAP, co->is_cbc); DMA_MEMORY_NOMAP, co->is_compression_co);
#endif #endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
if (!err) { if (!err) {
@@ -896,8 +897,8 @@ int __init nvmap_co_setup(struct reserved_mem *rmem)
co->base = rmem->base; co->base = rmem->base;
co->size = rmem->size; co->size = rmem->size;
co->cma_dev = NULL; co->cma_dev = NULL;
if (!strncmp(co->name, "cbc", 3)) if (!strncmp(co->name, "compression", 11))
co->is_cbc = true; co->is_compression_co = true;
nvmap_init_time += sched_clock() - start; nvmap_init_time += sched_clock() - start;
return ret; return ret;

View File

@@ -208,9 +208,9 @@ int nvmap_ioctl_alloc(struct file *filp, void __user *arg)
return -EINVAL; return -EINVAL;
/* /*
* In case of CBC carveout, the handle size needs to be aligned to 2MB. * In case of Compression carveout, the handle size needs to be aligned to 2MB.
*/ */
if (op.heap_mask & NVMAP_HEAP_CARVEOUT_CBC) { if (op.heap_mask & NVMAP_HEAP_CARVEOUT_COMPRESSION) {
handle->size = ALIGN_2MB(handle->size); handle->size = ALIGN_2MB(handle->size);
page_sz = SIZE_2MB; page_sz = SIZE_2MB;
} }
@@ -1143,10 +1143,10 @@ int nvmap_ioctl_get_handle_parameters(struct file *filp, void __user *arg)
/* /*
* Check handle is allocated or not while setting contig. * Check handle is allocated or not while setting contig.
* If heap type is IOVMM, check if it has flag set for contiguous memory * If heap type is IOVMM, check if it has flag set for contiguous memory
* allocation request. Otherwise, if handle belongs to any carveout except cbc * allocation request. Otherwise, if handle belongs to any carveout except compression
* then all allocations are contiguous, hence set contig flag to true. * carveout then all allocations are contiguous, hence set contig flag to true.
* In case of cbc, if allocation is page based then set contig flag to false * In case of compression carveout, if allocation is page based then set contig flag to
* otherwise true. * false otherwise true.
*/ */
if (handle->alloc && if (handle->alloc &&
((handle->heap_type == NVMAP_HEAP_IOVMM && ((handle->heap_type == NVMAP_HEAP_IOVMM &&

View File

@@ -496,7 +496,7 @@ struct dma_coherent_mem_replica {
}; };
int nvmap_dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, int nvmap_dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
dma_addr_t device_addr, size_t size, int flags, bool is_cbc); dma_addr_t device_addr, size_t size, int flags, bool is_compression);
#endif #endif
int nvmap_probe(struct platform_device *pdev); int nvmap_probe(struct platform_device *pdev);
int nvmap_remove(struct platform_device *pdev); int nvmap_remove(struct platform_device *pdev);

View File

@@ -31,7 +31,7 @@
#define NVMAP_HEAP_CARVEOUT_VPR (1ul<<28) #define NVMAP_HEAP_CARVEOUT_VPR (1ul<<28)
#define NVMAP_HEAP_CARVEOUT_TSEC (1ul<<27) #define NVMAP_HEAP_CARVEOUT_TSEC (1ul<<27)
#define NVMAP_HEAP_CARVEOUT_VIDMEM (1ul<<26) #define NVMAP_HEAP_CARVEOUT_VIDMEM (1ul<<26)
#define NVMAP_HEAP_CARVEOUT_CBC (1ul << 3) #define NVMAP_HEAP_CARVEOUT_COMPRESSION (1ul << 3)
#define NVMAP_HEAP_CARVEOUT_FSI (1ul<<2) #define NVMAP_HEAP_CARVEOUT_FSI (1ul<<2)
#define NVMAP_HEAP_CARVEOUT_IVM (1ul<<1) #define NVMAP_HEAP_CARVEOUT_IVM (1ul<<1)
#define NVMAP_HEAP_CARVEOUT_GENERIC (1ul<<0) #define NVMAP_HEAP_CARVEOUT_GENERIC (1ul<<0)
@@ -104,7 +104,7 @@ struct nvmap_platform_carveout {
bool no_cpu_access; /* carveout can't be accessed from cpu at all */ bool no_cpu_access; /* carveout can't be accessed from cpu at all */
bool init_done; /* FIXME: remove once all caveouts use reserved-memory */ bool init_done; /* FIXME: remove once all caveouts use reserved-memory */
struct nvmap_pm_ops pm_ops; struct nvmap_pm_ops pm_ops;
bool is_cbc; /* cbc carveout is treated differently */ bool is_compression_co; /* Compression carveout is treated differently */
}; };
struct nvmap_platform_data { struct nvmap_platform_data {