video: tegra: nvmap: Switch to configurable granule size

In compression carveout, granule size should be configurable via device
tree. Earlier code was written by considering granule size of 2MB,
update the code to use configurable granule size read from DT.

Bug 3956637

Change-Id: Ib1e966117b2bd9511cbcde37a6011c17f38f22e2
Signed-off-by: Ketan Patil <ketanp@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/2899865
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: Pritesh Raithatha <praithatha@nvidia.com>
Reviewed-by: Krishna Reddy <vdumpa@nvidia.com>
GVS: Gerrit_Virtual_Submit <buildbot_gerritrpt@nvidia.com>
This commit is contained in:
Ketan Patil
2023-05-03 15:01:32 +00:00
committed by mobile promotions
parent 408704d52a
commit faa6040875
7 changed files with 105 additions and 57 deletions

View File

@@ -364,6 +364,7 @@ static void *__nvmap_dma_alloc_from_coherent(struct device *dev,
int *bitmap_nos = NULL;
const char *device_name;
bool is_compression = false;
u32 granule_size = 0;
device_name = dev_name(dev);
if (!device_name) {
@@ -371,12 +372,17 @@ static void *__nvmap_dma_alloc_from_coherent(struct device *dev,
return NULL;
}
if (!strncmp(device_name, "compression", 11))
if (!strncmp(device_name, "compression", 11)) {
struct nvmap_platform_carveout *co;
is_compression = true;
co = nvmap_get_carveout_pdata("compression");
granule_size = co->granule_size;
}
if (is_compression) {
/* Calculation for Compression carveout should consider 2MB chunks */
count = size >> PAGE_SHIFT_2MB;
/* Calculation for Compression carveout should consider granule size */
count = size >> PAGE_SHIFT_GRANULE(granule_size);
} else {
if (dma_get_attr(DMA_ATTR_ALLOC_EXACT_SIZE, attrs)) {
page_count = PAGE_ALIGN(size) >> PAGE_SHIFT;
@@ -404,7 +410,7 @@ static void *__nvmap_dma_alloc_from_coherent(struct device *dev,
if (!is_compression)
pages = nvmap_kvzalloc_pages(count);
else
pages = nvmap_kvzalloc_pages(count * PAGES_PER_2MB);
pages = nvmap_kvzalloc_pages(count * PAGES_PER_GRANULE(granule_size));
if (!pages) {
kvfree(bitmap_nos);
@@ -418,7 +424,8 @@ static void *__nvmap_dma_alloc_from_coherent(struct device *dev,
if (!is_compression && unlikely(size > ((u64)mem->size << PAGE_SHIFT)))
goto err;
else if (is_compression && unlikely(size > ((u64)mem->size << PAGE_SHIFT_2MB)))
else if (is_compression &&
unlikely(size > ((u64)mem->size << PAGE_SHIFT_GRANULE(granule_size))))
goto err;
if (((mem->flags & DMA_MEMORY_NOMAP) &&
@@ -447,10 +454,10 @@ static void *__nvmap_dma_alloc_from_coherent(struct device *dev,
if (!is_compression)
pages[i++] = pfn_to_page(mem->pfn_base + pageno);
else {
/* Handle 2MB chunks */
for (k = 0; k < (alloc_size * PAGES_PER_2MB); k++)
pages[i++] = pfn_to_page(mem->pfn_base +
pageno * PAGES_PER_2MB + k);
/* Handle granules */
for (k = 0; k < (alloc_size * PAGES_PER_GRANULE(granule_size)); k++)
pages[i++] = pfn_to_page(mem->pfn_base + pageno *
PAGES_PER_GRANULE(granule_size) + k);
}
}
@@ -464,7 +471,7 @@ static void *__nvmap_dma_alloc_from_coherent(struct device *dev,
if (!is_compression)
*dma_handle = mem->device_base + (first_pageno << PAGE_SHIFT);
else
*dma_handle = mem->device_base + (first_pageno << PAGE_SHIFT_2MB);
*dma_handle = mem->device_base + (first_pageno << PAGE_SHIFT_GRANULE(granule_size));
if (!(mem->flags & DMA_MEMORY_NOMAP)) {
addr = mem->virt_base + (first_pageno << PAGE_SHIFT);
@@ -517,6 +524,7 @@ void nvmap_dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
struct dma_coherent_mem_replica *mem;
bool is_compression = false;
const char *device_name;
u32 granule_size = 0;
if (!dev || !dev->dma_mem)
return;
@@ -527,8 +535,13 @@ void nvmap_dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
return;
}
if (!strncmp(device_name, "compression", 11))
if (!strncmp(device_name, "compression", 11)) {
struct nvmap_platform_carveout *co;
is_compression = true;
co = nvmap_get_carveout_pdata("compression");
granule_size = co->granule_size;
}
mem = (struct dma_coherent_mem_replica *)(dev->dma_mem);
if ((mem->flags & DMA_MEMORY_NOMAP) &&
@@ -546,8 +559,9 @@ void nvmap_dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
bitmap_clear(mem->bitmap, pageno, 1);
}
} else {
for (i = 0; i < (size >> PAGE_SHIFT); i += PAGES_PER_2MB) {
pageno = (page_to_pfn(pages[i]) - mem->pfn_base) / PAGES_PER_2MB;
for (i = 0; i < (size >> PAGE_SHIFT); i += PAGES_PER_GRANULE(granule_size)) {
pageno = (page_to_pfn(pages[i]) - mem->pfn_base) /
PAGES_PER_GRANULE(granule_size);
if (WARN_ONCE(pageno > mem->size,
"invalid pageno:%d\n", pageno))
continue;
@@ -564,7 +578,7 @@ void nvmap_dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
else
mem_addr = mem->virt_base;
page_shift_val = is_compression ? PAGE_SHIFT_2MB : PAGE_SHIFT;
page_shift_val = is_compression ? PAGE_SHIFT_GRANULE(granule_size) : PAGE_SHIFT;
if (mem && cpu_addr >= mem_addr &&
cpu_addr - mem_addr < (u64)mem->size << page_shift_val) {
unsigned int page = (cpu_addr - mem_addr) >> page_shift_val;
@@ -573,7 +587,7 @@ void nvmap_dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
if (DMA_ATTR_ALLOC_EXACT_SIZE & attrs) {
if (is_compression)
count = ALIGN_2MB(size) >> page_shift_val;
count = ALIGN_GRANULE_SIZE(size, granule_size) >> page_shift_val;
else
count = PAGE_ALIGN(size) >> page_shift_val;
}
@@ -664,7 +678,7 @@ static int nvmap_dma_assign_coherent_memory(struct device *dev,
static int nvmap_dma_init_coherent_memory(
phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags,
struct dma_coherent_mem_replica **mem, bool is_compression)
struct dma_coherent_mem_replica **mem, bool is_compression, u32 granule_size)
{
struct dma_coherent_mem_replica *dma_mem = NULL;
void *mem_base = NULL;
@@ -676,7 +690,7 @@ static int nvmap_dma_init_coherent_memory(
return -EINVAL;
if (is_compression)
pages = size >> PAGE_SHIFT_2MB;
pages = size >> PAGE_SHIFT_GRANULE(granule_size);
else
pages = size >> PAGE_SHIFT;
@@ -719,13 +733,14 @@ err_memunmap:
}
int nvmap_dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
dma_addr_t device_addr, size_t size, int flags, bool is_compression)
dma_addr_t device_addr, size_t size, int flags, bool is_compression,
u32 granule_size)
{
struct dma_coherent_mem_replica *mem;
int ret;
ret = nvmap_dma_init_coherent_memory(phys_addr, device_addr, size, flags, &mem,
is_compression);
is_compression, granule_size);
if (ret)
return ret;
@@ -757,7 +772,8 @@ static int __init nvmap_co_device_init(struct reserved_mem *rmem,
#else
err = nvmap_dma_declare_coherent_memory(co->dma_dev, 0,
co->base, co->size,
DMA_MEMORY_NOMAP, co->is_compression_co);
DMA_MEMORY_NOMAP, co->is_compression_co,
co->granule_size);
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
if (!err) {
@@ -871,7 +887,7 @@ finish:
return ret;
}
#else
int __init nvmap_co_setup(struct reserved_mem *rmem)
int __init nvmap_co_setup(struct reserved_mem *rmem, u32 granule_size)
{
struct nvmap_platform_carveout *co;
ulong start = sched_clock();
@@ -887,16 +903,18 @@ int __init nvmap_co_setup(struct reserved_mem *rmem)
co->base = rmem->base;
co->size = rmem->size;
co->cma_dev = NULL;
if (!strncmp(co->name, "compression", 11))
if (!strncmp(co->name, "compression", 11)) {
co->is_compression_co = true;
co->granule_size = granule_size;
}
nvmap_init_time += sched_clock() - start;
return ret;
}
#endif /* !NVMAP_LOADABLE_MODULE */
RESERVEDMEM_OF_DECLARE(nvmap_co, "nvidia,generic_carveout", nvmap_co_setup);
#ifndef NVMAP_LOADABLE_MODULE
RESERVEDMEM_OF_DECLARE(nvmap_co, "nvidia,generic_carveout", nvmap_co_setup);
RESERVEDMEM_OF_DECLARE(nvmap_vpr_co, "nvidia,vpr-carveout", nvmap_co_setup);
RESERVEDMEM_OF_DECLARE(nvmap_fsi_co, "nvidia,fsi-carveout", nvmap_co_setup);
#endif /* !NVMAP_LOADABLE_MODULE */
@@ -909,7 +927,9 @@ int __init nvmap_init(struct platform_device *pdev)
{
int err;
struct reserved_mem rmem;
#ifdef NVMAP_LOADABLE_MODULE
u32 granule_size = 0;
struct reserved_mem *rmem2;
struct device_node *np = pdev->dev.of_node;
struct of_phandle_iterator it;
@@ -919,6 +939,12 @@ int __init nvmap_init(struct platform_device *pdev)
while (!of_phandle_iterator_next(&it) && it.node) {
if (of_device_is_available(it.node) &&
!of_device_is_compatible(it.node, "nvidia,ivm_carveout")) {
/* Read granule size in case of compression carveout */
if (of_device_is_compatible(it.node, "nvidia,compression_carveout")
&& of_property_read_u32(it.node, "granule-size", &granule_size)) {
pr_err("granule-size property is missing\n");
return -EINVAL;
}
rmem2 = of_reserved_mem_lookup(it.node);
if (!rmem2) {
if (!of_property_read_string(it.node, "compatible", &compp))
@@ -926,7 +952,7 @@ int __init nvmap_init(struct platform_device *pdev)
compp);
return -EINVAL;
}
nvmap_co_setup(rmem2);
nvmap_co_setup(rmem2, granule_size);
}
}
}