mirror of
git://nv-tegra.nvidia.com/linux-nv-oot.git
synced 2025-12-22 09:11:26 +03:00
video: tegra: nvmap: Update carveout name
CBC carveout is not the correct carveout name, rather compression carveout is the correct name. Compresssion carveout is used to store the gpu compressible buffers. While the comptags and other metadata related to these buffers will be stored in CBC carveout which is a GSC carveout not created/maintained by nvmap. Hence update carveout naming. Bug 3956637 Change-Id: I50e01a6a8d8bda66960bdee378a12fde176b682a Signed-off-by: Ketan Patil <ketanp@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/2888397 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com> Reviewed-by: svcacv <svcacv@nvidia.com> Reviewed-by: Krishna Reddy <vdumpa@nvidia.com> GVS: Gerrit_Virtual_Submit <buildbot_gerritrpt@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
0c2cb3e676
commit
30e3826bc4
@@ -719,7 +719,7 @@ static void alloc_handle(struct nvmap_client *client,
|
||||
void *cpu_addr;
|
||||
|
||||
if (h->pgalloc.pages &&
|
||||
h->heap_type == NVMAP_HEAP_CARVEOUT_CBC) {
|
||||
h->heap_type == NVMAP_HEAP_CARVEOUT_COMPRESSION) {
|
||||
unsigned long page_count;
|
||||
int i;
|
||||
|
||||
|
||||
@@ -171,11 +171,11 @@ static phys_addr_t nvmap_alloc_mem(struct nvmap_heap *h, size_t len,
|
||||
err = nvmap_dma_alloc_attrs(dev, len, &pa,
|
||||
GFP_KERNEL, DMA_ATTR_ALLOC_EXACT_SIZE);
|
||||
/*
|
||||
* In case of CBC carveout, try to allocate the entire chunk in physically
|
||||
* In case of Compression carveout, try to allocate the entire chunk in physically
|
||||
* contiguous manner. If it returns error, then try to allocate the memory in
|
||||
* 2MB chunks.
|
||||
*/
|
||||
if (h->is_cbc && IS_ERR(err)) {
|
||||
if (h->is_compression_co && IS_ERR(err)) {
|
||||
err = nvmap_dma_alloc_attrs(dev, len, &pa,
|
||||
GFP_KERNEL, DMA_ATTR_ALLOC_EXACT_SIZE |
|
||||
DMA_ATTR_ALLOC_SINGLE_PAGES);
|
||||
@@ -243,7 +243,7 @@ static void nvmap_free_mem(struct nvmap_heap *h, phys_addr_t base,
|
||||
(void *)(uintptr_t)base,
|
||||
(dma_addr_t)base, DMA_ATTR_ALLOC_EXACT_SIZE);
|
||||
#else
|
||||
if (h->is_cbc && handle->pgalloc.pages) {
|
||||
if (h->is_compression_co && handle->pgalloc.pages) {
|
||||
/* In case of pages, we need to pass pointer to array of pages */
|
||||
nvmap_dma_free_attrs(dev, len,
|
||||
(void *)handle->pgalloc.pages,
|
||||
@@ -503,7 +503,7 @@ struct nvmap_heap *nvmap_heap_create(struct device *parent,
|
||||
DMA_MEMORY_NOMAP);
|
||||
#else
|
||||
err = nvmap_dma_declare_coherent_memory(h->dma_dev, 0, base, len,
|
||||
DMA_MEMORY_NOMAP, co->is_cbc);
|
||||
DMA_MEMORY_NOMAP, co->is_compression_co);
|
||||
#endif
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
|
||||
if (!err) {
|
||||
@@ -526,7 +526,7 @@ struct nvmap_heap *nvmap_heap_create(struct device *parent,
|
||||
h->base = base;
|
||||
h->can_alloc = !!co->can_alloc;
|
||||
h->is_ivm = co->is_ivm;
|
||||
h->is_cbc = co->is_cbc;
|
||||
h->is_compression_co = co->is_compression_co;
|
||||
h->len = len;
|
||||
h->free_size = len;
|
||||
h->peer = co->peer;
|
||||
@@ -657,7 +657,7 @@ int nvmap_flush_heap_block(struct nvmap_client *client,
|
||||
unsigned long page_count, i;
|
||||
|
||||
/*
|
||||
* For CBC carveout with physically discontiguous 2MB chunks,
|
||||
* For Compression carveout with physically discontiguous 2MB chunks,
|
||||
* iterate over 2MB chunks and do cache maint for it.
|
||||
*/
|
||||
page_count = h->size >> PAGE_SHIFT;
|
||||
|
||||
@@ -41,7 +41,7 @@ struct nvmap_heap {
|
||||
struct device *cma_dev;
|
||||
struct device *dma_dev;
|
||||
bool is_ivm;
|
||||
bool is_cbc;
|
||||
bool is_compression_co;
|
||||
bool can_alloc; /* Used only if is_ivm == true */
|
||||
unsigned int peer; /* Used only if is_ivm == true */
|
||||
unsigned int vm_id; /* Used only if is_ivm == true */
|
||||
|
||||
@@ -142,8 +142,8 @@ static struct nvmap_platform_carveout nvmap_carveouts[] = {
|
||||
.size = 0,
|
||||
},
|
||||
[4] = {
|
||||
.name = "cbc",
|
||||
.usage_mask = NVMAP_HEAP_CARVEOUT_CBC,
|
||||
.name = "compression",
|
||||
.usage_mask = NVMAP_HEAP_CARVEOUT_COMPRESSION,
|
||||
.base = 0,
|
||||
.size = 0,
|
||||
},
|
||||
@@ -373,7 +373,7 @@ static void *__nvmap_dma_alloc_from_coherent(struct device *dev,
|
||||
int do_memset = 0;
|
||||
int *bitmap_nos = NULL;
|
||||
const char *device_name;
|
||||
bool is_cbc = false;
|
||||
bool is_compression = false;
|
||||
|
||||
device_name = dev_name(dev);
|
||||
if (!device_name) {
|
||||
@@ -381,11 +381,11 @@ static void *__nvmap_dma_alloc_from_coherent(struct device *dev,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (!strncmp(device_name, "cbc", 3))
|
||||
is_cbc = true;
|
||||
if (!strncmp(device_name, "compression", 11))
|
||||
is_compression = true;
|
||||
|
||||
if (is_cbc) {
|
||||
/* Calculation for CBC should consider 2MB chunks */
|
||||
if (is_compression) {
|
||||
/* Calculation for Compression carveout should consider 2MB chunks */
|
||||
count = size >> PAGE_SHIFT_2MB;
|
||||
} else {
|
||||
if (dma_get_attr(DMA_ATTR_ALLOC_EXACT_SIZE, attrs)) {
|
||||
@@ -411,7 +411,7 @@ static void *__nvmap_dma_alloc_from_coherent(struct device *dev,
|
||||
dma_get_attr(DMA_ATTR_ALLOC_SINGLE_PAGES, attrs)) {
|
||||
alloc_size = 1;
|
||||
/* pages contain the array of pages of kernel PAGE_SIZE */
|
||||
if (!is_cbc)
|
||||
if (!is_compression)
|
||||
pages = nvmap_kvzalloc_pages(count);
|
||||
else
|
||||
pages = nvmap_kvzalloc_pages(count * PAGES_PER_2MB);
|
||||
@@ -426,14 +426,14 @@ static void *__nvmap_dma_alloc_from_coherent(struct device *dev,
|
||||
|
||||
spin_lock_irqsave(&mem->spinlock, flags);
|
||||
|
||||
if (!is_cbc && unlikely(size > ((u64)mem->size << PAGE_SHIFT)))
|
||||
if (!is_compression && unlikely(size > ((u64)mem->size << PAGE_SHIFT)))
|
||||
goto err;
|
||||
else if (is_cbc && unlikely(size > ((u64)mem->size << PAGE_SHIFT_2MB)))
|
||||
else if (is_compression && unlikely(size > ((u64)mem->size << PAGE_SHIFT_2MB)))
|
||||
goto err;
|
||||
|
||||
if (((mem->flags & DMA_MEMORY_NOMAP) &&
|
||||
dma_get_attr(DMA_ATTR_ALLOC_SINGLE_PAGES, attrs)) ||
|
||||
is_cbc) {
|
||||
is_compression) {
|
||||
align = 0;
|
||||
} else {
|
||||
if (order > DMA_BUF_ALIGNMENT)
|
||||
@@ -454,7 +454,7 @@ static void *__nvmap_dma_alloc_from_coherent(struct device *dev,
|
||||
|
||||
count -= alloc_size;
|
||||
if (pages) {
|
||||
if (!is_cbc)
|
||||
if (!is_compression)
|
||||
pages[i++] = pfn_to_page(mem->pfn_base + pageno);
|
||||
else {
|
||||
/* Handle 2MB chunks */
|
||||
@@ -471,7 +471,7 @@ static void *__nvmap_dma_alloc_from_coherent(struct device *dev,
|
||||
/*
|
||||
* Memory was found in the coherent area.
|
||||
*/
|
||||
if (!is_cbc)
|
||||
if (!is_compression)
|
||||
*dma_handle = mem->device_base + (first_pageno << PAGE_SHIFT);
|
||||
else
|
||||
*dma_handle = mem->device_base + (first_pageno << PAGE_SHIFT_2MB);
|
||||
@@ -525,7 +525,7 @@ void nvmap_dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
|
||||
unsigned long flags;
|
||||
unsigned int pageno, page_shift_val;
|
||||
struct dma_coherent_mem_replica *mem;
|
||||
bool is_cbc = false;
|
||||
bool is_compression = false;
|
||||
const char *device_name;
|
||||
|
||||
if (!dev || !dev->dma_mem)
|
||||
@@ -537,8 +537,8 @@ void nvmap_dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
|
||||
return;
|
||||
}
|
||||
|
||||
if (!strncmp(device_name, "cbc", 3))
|
||||
is_cbc = true;
|
||||
if (!strncmp(device_name, "compression", 11))
|
||||
is_compression = true;
|
||||
|
||||
mem = (struct dma_coherent_mem_replica *)(dev->dma_mem);
|
||||
if ((mem->flags & DMA_MEMORY_NOMAP) &&
|
||||
@@ -547,7 +547,7 @@ void nvmap_dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
|
||||
int i;
|
||||
|
||||
spin_lock_irqsave(&mem->spinlock, flags);
|
||||
if (!is_cbc) {
|
||||
if (!is_compression) {
|
||||
for (i = 0; i < (size >> PAGE_SHIFT); i++) {
|
||||
pageno = page_to_pfn(pages[i]) - mem->pfn_base;
|
||||
if (WARN_ONCE(pageno > mem->size,
|
||||
@@ -574,7 +574,7 @@ void nvmap_dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
|
||||
else
|
||||
mem_addr = mem->virt_base;
|
||||
|
||||
page_shift_val = is_cbc ? PAGE_SHIFT_2MB : PAGE_SHIFT;
|
||||
page_shift_val = is_compression ? PAGE_SHIFT_2MB : PAGE_SHIFT;
|
||||
if (mem && cpu_addr >= mem_addr &&
|
||||
cpu_addr - mem_addr < (u64)mem->size << page_shift_val) {
|
||||
unsigned int page = (cpu_addr - mem_addr) >> page_shift_val;
|
||||
@@ -582,7 +582,7 @@ void nvmap_dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
|
||||
unsigned int count;
|
||||
|
||||
if (DMA_ATTR_ALLOC_EXACT_SIZE & attrs) {
|
||||
if (is_cbc)
|
||||
if (is_compression)
|
||||
count = ALIGN_2MB(size) >> page_shift_val;
|
||||
else
|
||||
count = PAGE_ALIGN(size) >> page_shift_val;
|
||||
@@ -674,7 +674,7 @@ static int nvmap_dma_assign_coherent_memory(struct device *dev,
|
||||
|
||||
static int nvmap_dma_init_coherent_memory(
|
||||
phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags,
|
||||
struct dma_coherent_mem_replica **mem, bool is_cbc)
|
||||
struct dma_coherent_mem_replica **mem, bool is_compression)
|
||||
{
|
||||
struct dma_coherent_mem_replica *dma_mem = NULL;
|
||||
void *mem_base = NULL;
|
||||
@@ -685,7 +685,7 @@ static int nvmap_dma_init_coherent_memory(
|
||||
if (!size)
|
||||
return -EINVAL;
|
||||
|
||||
if (is_cbc)
|
||||
if (is_compression)
|
||||
pages = size >> PAGE_SHIFT_2MB;
|
||||
else
|
||||
pages = size >> PAGE_SHIFT;
|
||||
@@ -729,12 +729,13 @@ err_memunmap:
|
||||
}
|
||||
|
||||
int nvmap_dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
|
||||
dma_addr_t device_addr, size_t size, int flags, bool is_cbc)
|
||||
dma_addr_t device_addr, size_t size, int flags, bool is_compression)
|
||||
{
|
||||
struct dma_coherent_mem_replica *mem;
|
||||
int ret;
|
||||
|
||||
ret = nvmap_dma_init_coherent_memory(phys_addr, device_addr, size, flags, &mem, is_cbc);
|
||||
ret = nvmap_dma_init_coherent_memory(phys_addr, device_addr, size, flags, &mem,
|
||||
is_compression);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@@ -766,7 +767,7 @@ static int __init nvmap_co_device_init(struct reserved_mem *rmem,
|
||||
#else
|
||||
err = nvmap_dma_declare_coherent_memory(co->dma_dev, 0,
|
||||
co->base, co->size,
|
||||
DMA_MEMORY_NOMAP, co->is_cbc);
|
||||
DMA_MEMORY_NOMAP, co->is_compression_co);
|
||||
#endif
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
|
||||
if (!err) {
|
||||
@@ -896,8 +897,8 @@ int __init nvmap_co_setup(struct reserved_mem *rmem)
|
||||
co->base = rmem->base;
|
||||
co->size = rmem->size;
|
||||
co->cma_dev = NULL;
|
||||
if (!strncmp(co->name, "cbc", 3))
|
||||
co->is_cbc = true;
|
||||
if (!strncmp(co->name, "compression", 11))
|
||||
co->is_compression_co = true;
|
||||
|
||||
nvmap_init_time += sched_clock() - start;
|
||||
return ret;
|
||||
|
||||
@@ -208,9 +208,9 @@ int nvmap_ioctl_alloc(struct file *filp, void __user *arg)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* In case of CBC carveout, the handle size needs to be aligned to 2MB.
|
||||
* In case of Compression carveout, the handle size needs to be aligned to 2MB.
|
||||
*/
|
||||
if (op.heap_mask & NVMAP_HEAP_CARVEOUT_CBC) {
|
||||
if (op.heap_mask & NVMAP_HEAP_CARVEOUT_COMPRESSION) {
|
||||
handle->size = ALIGN_2MB(handle->size);
|
||||
page_sz = SIZE_2MB;
|
||||
}
|
||||
@@ -1143,10 +1143,10 @@ int nvmap_ioctl_get_handle_parameters(struct file *filp, void __user *arg)
|
||||
/*
|
||||
* Check handle is allocated or not while setting contig.
|
||||
* If heap type is IOVMM, check if it has flag set for contiguous memory
|
||||
* allocation request. Otherwise, if handle belongs to any carveout except cbc
|
||||
* then all allocations are contiguous, hence set contig flag to true.
|
||||
* In case of cbc, if allocation is page based then set contig flag to false
|
||||
* otherwise true.
|
||||
* allocation request. Otherwise, if handle belongs to any carveout except compression
|
||||
* carveout then all allocations are contiguous, hence set contig flag to true.
|
||||
* In case of compression carveout, if allocation is page based then set contig flag to
|
||||
* false otherwise true.
|
||||
*/
|
||||
if (handle->alloc &&
|
||||
((handle->heap_type == NVMAP_HEAP_IOVMM &&
|
||||
|
||||
@@ -496,7 +496,7 @@ struct dma_coherent_mem_replica {
|
||||
};
|
||||
|
||||
int nvmap_dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
|
||||
dma_addr_t device_addr, size_t size, int flags, bool is_cbc);
|
||||
dma_addr_t device_addr, size_t size, int flags, bool is_compression);
|
||||
#endif
|
||||
int nvmap_probe(struct platform_device *pdev);
|
||||
int nvmap_remove(struct platform_device *pdev);
|
||||
|
||||
@@ -31,7 +31,7 @@
|
||||
#define NVMAP_HEAP_CARVEOUT_VPR (1ul<<28)
|
||||
#define NVMAP_HEAP_CARVEOUT_TSEC (1ul<<27)
|
||||
#define NVMAP_HEAP_CARVEOUT_VIDMEM (1ul<<26)
|
||||
#define NVMAP_HEAP_CARVEOUT_CBC (1ul << 3)
|
||||
#define NVMAP_HEAP_CARVEOUT_COMPRESSION (1ul << 3)
|
||||
#define NVMAP_HEAP_CARVEOUT_FSI (1ul<<2)
|
||||
#define NVMAP_HEAP_CARVEOUT_IVM (1ul<<1)
|
||||
#define NVMAP_HEAP_CARVEOUT_GENERIC (1ul<<0)
|
||||
@@ -104,7 +104,7 @@ struct nvmap_platform_carveout {
|
||||
bool no_cpu_access; /* carveout can't be accessed from cpu at all */
|
||||
bool init_done; /* FIXME: remove once all caveouts use reserved-memory */
|
||||
struct nvmap_pm_ops pm_ops;
|
||||
bool is_cbc; /* cbc carveout is treated differently */
|
||||
bool is_compression_co; /* Compression carveout is treated differently */
|
||||
};
|
||||
|
||||
struct nvmap_platform_data {
|
||||
|
||||
Reference in New Issue
Block a user