From 1c2599bcbae247b5576bc58d192e40fbb6352fc3 Mon Sep 17 00:00:00 2001 From: Ketan Patil Date: Fri, 11 Aug 2023 15:27:43 +0000 Subject: [PATCH] video: tegra: nvmap: Update carveout name Compression carveout is not correct carveout name as it will be used by gpu even for non-compression usecases. Hence rename it to gpu carveout. Bug 3956637 Change-Id: I802b91d58d9ca120e34655c21f56c0da8c8cf677 Signed-off-by: Ketan Patil Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/2955536 Reviewed-by: Pritesh Raithatha --- drivers/video/tegra/nvmap/nvmap_alloc.c | 2 +- drivers/video/tegra/nvmap/nvmap_heap.c | 12 ++--- drivers/video/tegra/nvmap/nvmap_heap.h | 2 +- drivers/video/tegra/nvmap/nvmap_init.c | 60 ++++++++++++------------- drivers/video/tegra/nvmap/nvmap_ioctl.c | 12 ++--- drivers/video/tegra/nvmap/nvmap_priv.h | 2 +- include/linux/nvmap.h | 6 +-- 7 files changed, 48 insertions(+), 48 deletions(-) diff --git a/drivers/video/tegra/nvmap/nvmap_alloc.c b/drivers/video/tegra/nvmap/nvmap_alloc.c index 2e07eeba..bbd82664 100644 --- a/drivers/video/tegra/nvmap/nvmap_alloc.c +++ b/drivers/video/tegra/nvmap/nvmap_alloc.c @@ -717,7 +717,7 @@ static void alloc_handle(struct nvmap_client *client, void *cpu_addr; if (h->pgalloc.pages && - h->heap_type == NVMAP_HEAP_CARVEOUT_COMPRESSION) { + h->heap_type == NVMAP_HEAP_CARVEOUT_GPU) { unsigned long page_count; u32 granule_size = 0; int i; diff --git a/drivers/video/tegra/nvmap/nvmap_heap.c b/drivers/video/tegra/nvmap/nvmap_heap.c index 27f92f03..485cc708 100644 --- a/drivers/video/tegra/nvmap/nvmap_heap.c +++ b/drivers/video/tegra/nvmap/nvmap_heap.c @@ -150,11 +150,11 @@ static phys_addr_t nvmap_alloc_mem(struct nvmap_heap *h, size_t len, err = nvmap_dma_alloc_attrs(dev, len, &pa, GFP_KERNEL, DMA_ATTR_ALLOC_EXACT_SIZE); /* - * In case of Compression carveout, try to allocate the entire granule in physically + * In case of Gpu carveout, try to allocate the entire granule in physically * contiguous manner. If it returns error, then try to allocate the memory in * granules of specified granule size. */ - if (h->is_compression_co && IS_ERR(err)) { + if (h->is_gpu_co && IS_ERR(err)) { err = nvmap_dma_alloc_attrs(dev, len, &pa, GFP_KERNEL, DMA_ATTR_ALLOC_EXACT_SIZE | DMA_ATTR_ALLOC_SINGLE_PAGES); @@ -222,7 +222,7 @@ static void nvmap_free_mem(struct nvmap_heap *h, phys_addr_t base, (void *)(uintptr_t)base, (dma_addr_t)base, DMA_ATTR_ALLOC_EXACT_SIZE); #else - if (h->is_compression_co && handle->pgalloc.pages) { + if (h->is_gpu_co && handle->pgalloc.pages) { /* In case of pages, we need to pass pointer to array of pages */ nvmap_dma_free_attrs(dev, len, (void *)handle->pgalloc.pages, @@ -482,7 +482,7 @@ struct nvmap_heap *nvmap_heap_create(struct device *parent, DMA_MEMORY_NOMAP); #else err = nvmap_dma_declare_coherent_memory(h->dma_dev, 0, base, len, - DMA_MEMORY_NOMAP, co->is_compression_co, co->granule_size); + DMA_MEMORY_NOMAP, co->is_gpu_co, co->granule_size); #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) if (!err) { @@ -505,7 +505,7 @@ struct nvmap_heap *nvmap_heap_create(struct device *parent, h->base = base; h->can_alloc = !!co->can_alloc; h->is_ivm = co->is_ivm; - h->is_compression_co = co->is_compression_co; + h->is_gpu_co = co->is_gpu_co; h->granule_size = co->granule_size; h->len = len; h->free_size = len; @@ -639,7 +639,7 @@ int nvmap_flush_heap_block(struct nvmap_client *client, struct list_block *b = container_of(block, struct list_block, block); /* - * For Compression carveout with physically discontiguous granules, + * For Gpu carveout with physically discontiguous granules, * iterate over granules and do cache maint for it. */ page_count = h->size >> PAGE_SHIFT; diff --git a/drivers/video/tegra/nvmap/nvmap_heap.h b/drivers/video/tegra/nvmap/nvmap_heap.h index c9d574d7..e4d2b446 100644 --- a/drivers/video/tegra/nvmap/nvmap_heap.h +++ b/drivers/video/tegra/nvmap/nvmap_heap.h @@ -31,7 +31,7 @@ struct nvmap_heap { struct device *cma_dev; struct device *dma_dev; bool is_ivm; - bool is_compression_co; + bool is_gpu_co; u32 granule_size; bool can_alloc; /* Used only if is_ivm == true */ unsigned int peer; /* Used only if is_ivm == true */ diff --git a/drivers/video/tegra/nvmap/nvmap_init.c b/drivers/video/tegra/nvmap/nvmap_init.c index 5a8f2f90..92285d4d 100644 --- a/drivers/video/tegra/nvmap/nvmap_init.c +++ b/drivers/video/tegra/nvmap/nvmap_init.c @@ -136,8 +136,8 @@ static struct nvmap_platform_carveout nvmap_carveouts[] = { .size = 0, }, [4] = { - .name = "compression", - .usage_mask = NVMAP_HEAP_CARVEOUT_COMPRESSION, + .name = "gpu", + .usage_mask = NVMAP_HEAP_CARVEOUT_GPU, .base = 0, .size = 0, }, @@ -367,7 +367,7 @@ static void *__nvmap_dma_alloc_from_coherent(struct device *dev, int do_memset = 0; int *bitmap_nos = NULL; const char *device_name; - bool is_compression = false; + bool is_gpu = false; u32 granule_size = 0; device_name = dev_name(dev); @@ -376,16 +376,16 @@ static void *__nvmap_dma_alloc_from_coherent(struct device *dev, return NULL; } - if (!strncmp(device_name, "compression", 11)) { + if (!strncmp(device_name, "gpu", 3)) { struct nvmap_platform_carveout *co; - is_compression = true; - co = nvmap_get_carveout_pdata("compression"); + is_gpu = true; + co = nvmap_get_carveout_pdata("gpu"); granule_size = co->granule_size; } - if (is_compression) { - /* Calculation for Compression carveout should consider granule size */ + if (is_gpu) { + /* Calculation for Gpu carveout should consider granule size */ count = size >> PAGE_SHIFT_GRANULE(granule_size); } else { if (dma_get_attr(DMA_ATTR_ALLOC_EXACT_SIZE, attrs)) { @@ -411,7 +411,7 @@ static void *__nvmap_dma_alloc_from_coherent(struct device *dev, dma_get_attr(DMA_ATTR_ALLOC_SINGLE_PAGES, attrs)) { alloc_size = 1; /* pages contain the array of pages of kernel PAGE_SIZE */ - if (!is_compression) + if (!is_gpu) pages = nvmap_kvzalloc_pages(count); else pages = nvmap_kvzalloc_pages(count * PAGES_PER_GRANULE(granule_size)); @@ -426,15 +426,15 @@ static void *__nvmap_dma_alloc_from_coherent(struct device *dev, spin_lock_irqsave(&mem->spinlock, flags); - if (!is_compression && unlikely(size > ((u64)mem->size << PAGE_SHIFT))) + if (!is_gpu && unlikely(size > ((u64)mem->size << PAGE_SHIFT))) goto err; - else if (is_compression && + else if (is_gpu && unlikely(size > ((u64)mem->size << PAGE_SHIFT_GRANULE(granule_size)))) goto err; if (((mem->flags & DMA_MEMORY_NOMAP) && dma_get_attr(DMA_ATTR_ALLOC_SINGLE_PAGES, attrs)) || - is_compression) { + is_gpu) { align = 0; } else { if (order > DMA_BUF_ALIGNMENT) @@ -455,7 +455,7 @@ static void *__nvmap_dma_alloc_from_coherent(struct device *dev, count -= alloc_size; if (pages) { - if (!is_compression) + if (!is_gpu) pages[i++] = pfn_to_page(mem->pfn_base + pageno); else { /* Handle granules */ @@ -472,7 +472,7 @@ static void *__nvmap_dma_alloc_from_coherent(struct device *dev, /* * Memory was found in the coherent area. */ - if (!is_compression) + if (!is_gpu) *dma_handle = mem->device_base + (first_pageno << PAGE_SHIFT); else *dma_handle = mem->device_base + (first_pageno << PAGE_SHIFT_GRANULE(granule_size)); @@ -526,7 +526,7 @@ void nvmap_dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, unsigned long flags; unsigned int pageno, page_shift_val; struct dma_coherent_mem_replica *mem; - bool is_compression = false; + bool is_gpu = false; const char *device_name; u32 granule_size = 0; @@ -539,11 +539,11 @@ void nvmap_dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, return; } - if (!strncmp(device_name, "compression", 11)) { + if (!strncmp(device_name, "gpu", 3)) { struct nvmap_platform_carveout *co; - is_compression = true; - co = nvmap_get_carveout_pdata("compression"); + is_gpu = true; + co = nvmap_get_carveout_pdata("gpu"); granule_size = co->granule_size; } @@ -554,7 +554,7 @@ void nvmap_dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, int i; spin_lock_irqsave(&mem->spinlock, flags); - if (!is_compression) { + if (!is_gpu) { for (i = 0; i < (size >> PAGE_SHIFT); i++) { pageno = page_to_pfn(pages[i]) - mem->pfn_base; if (WARN_ONCE(pageno > mem->size, @@ -582,7 +582,7 @@ void nvmap_dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, else mem_addr = mem->virt_base; - page_shift_val = is_compression ? PAGE_SHIFT_GRANULE(granule_size) : PAGE_SHIFT; + page_shift_val = is_gpu ? PAGE_SHIFT_GRANULE(granule_size) : PAGE_SHIFT; if (mem && cpu_addr >= mem_addr && cpu_addr - mem_addr < (u64)mem->size << page_shift_val) { unsigned int page = (cpu_addr - mem_addr) >> page_shift_val; @@ -590,7 +590,7 @@ void nvmap_dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, unsigned int count; if (DMA_ATTR_ALLOC_EXACT_SIZE & attrs) { - if (is_compression) + if (is_gpu) count = ALIGN_GRANULE_SIZE(size, granule_size) >> page_shift_val; else count = PAGE_ALIGN(size) >> page_shift_val; @@ -682,7 +682,7 @@ static int nvmap_dma_assign_coherent_memory(struct device *dev, static int nvmap_dma_init_coherent_memory( phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags, - struct dma_coherent_mem_replica **mem, bool is_compression, u32 granule_size) + struct dma_coherent_mem_replica **mem, bool is_gpu, u32 granule_size) { struct dma_coherent_mem_replica *dma_mem = NULL; void *mem_base = NULL; @@ -693,7 +693,7 @@ static int nvmap_dma_init_coherent_memory( if (!size) return -EINVAL; - if (is_compression) + if (is_gpu) pages = size >> PAGE_SHIFT_GRANULE(granule_size); else pages = size >> PAGE_SHIFT; @@ -737,14 +737,14 @@ err_memunmap: } int nvmap_dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, - dma_addr_t device_addr, size_t size, int flags, bool is_compression, + dma_addr_t device_addr, size_t size, int flags, bool is_gpu, u32 granule_size) { struct dma_coherent_mem_replica *mem; int ret; ret = nvmap_dma_init_coherent_memory(phys_addr, device_addr, size, flags, &mem, - is_compression, granule_size); + is_gpu, granule_size); if (ret) return ret; @@ -776,7 +776,7 @@ static int __init nvmap_co_device_init(struct reserved_mem *rmem, #else err = nvmap_dma_declare_coherent_memory(co->dma_dev, 0, co->base, co->size, - DMA_MEMORY_NOMAP, co->is_compression_co, + DMA_MEMORY_NOMAP, co->is_gpu_co, co->granule_size); #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) @@ -907,8 +907,8 @@ int __init nvmap_co_setup(struct reserved_mem *rmem, u32 granule_size) co->base = rmem->base; co->size = rmem->size; co->cma_dev = NULL; - if (!strncmp(co->name, "compression", 11)) { - co->is_compression_co = true; + if (!strncmp(co->name, "gpu", 3)) { + co->is_gpu_co = true; co->granule_size = granule_size; } @@ -943,8 +943,8 @@ int __init nvmap_init(struct platform_device *pdev) while (!of_phandle_iterator_next(&it) && it.node) { if (of_device_is_available(it.node) && !of_device_is_compatible(it.node, "nvidia,ivm_carveout")) { - /* Read granule size in case of compression carveout */ - if (of_device_is_compatible(it.node, "nvidia,compression_carveout") + /* Read granule size in case of gpu carveout */ + if (of_device_is_compatible(it.node, "nvidia,gpu_carveout") && of_property_read_u32(it.node, "granule-size", &granule_size)) { pr_err("granule-size property is missing\n"); return -EINVAL; diff --git a/drivers/video/tegra/nvmap/nvmap_ioctl.c b/drivers/video/tegra/nvmap/nvmap_ioctl.c index d5cd9331..fd8d9248 100644 --- a/drivers/video/tegra/nvmap/nvmap_ioctl.c +++ b/drivers/video/tegra/nvmap/nvmap_ioctl.c @@ -207,13 +207,13 @@ int nvmap_ioctl_alloc(struct file *filp, void __user *arg) return -EINVAL; /* - * In case of Compression carveout, the handle size needs to be aligned to granule. + * In case of Gpu carveout, the handle size needs to be aligned to granule. */ - if (op.heap_mask & NVMAP_HEAP_CARVEOUT_COMPRESSION) { + if (op.heap_mask & NVMAP_HEAP_CARVEOUT_GPU) { u32 granule_size = 0; for (i = 0; i < nvmap_dev->nr_carveouts; i++) - if (nvmap_dev->heaps[i].heap_bit & NVMAP_HEAP_CARVEOUT_COMPRESSION) + if (nvmap_dev->heaps[i].heap_bit & NVMAP_HEAP_CARVEOUT_GPU) granule_size = nvmap_dev->heaps[i].carveout->granule_size; handle->size = ALIGN_GRANULE_SIZE(handle->size, granule_size); page_sz = granule_size; @@ -1148,9 +1148,9 @@ int nvmap_ioctl_get_handle_parameters(struct file *filp, void __user *arg) /* * Check handle is allocated or not while setting contig. * If heap type is IOVMM, check if it has flag set for contiguous memory - * allocation request. Otherwise, if handle belongs to any carveout except compression + * allocation request. Otherwise, if handle belongs to any carveout except gpu * carveout then all allocations are contiguous, hence set contig flag to true. - * In case of compression carveout, if allocation is page based then set contig flag to + * In case of gpu carveout, if allocation is page based then set contig flag to * false otherwise true. */ if (handle->alloc && @@ -1359,7 +1359,7 @@ int nvmap_ioctl_query_heap_params(struct file *filp, void __user *arg) heap = nvmap_dev->heaps[i].carveout; op.total = nvmap_query_heap_size(heap); op.free = heap->free_size; - if (nvmap_dev->heaps[i].carveout->is_compression_co) + if (nvmap_dev->heaps[i].carveout->is_gpu_co) op.granule_size = nvmap_dev->heaps[i].carveout->granule_size; break; } diff --git a/drivers/video/tegra/nvmap/nvmap_priv.h b/drivers/video/tegra/nvmap/nvmap_priv.h index 7c577e1b..e01eae3d 100644 --- a/drivers/video/tegra/nvmap/nvmap_priv.h +++ b/drivers/video/tegra/nvmap/nvmap_priv.h @@ -488,7 +488,7 @@ struct dma_coherent_mem_replica { }; int nvmap_dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, - dma_addr_t device_addr, size_t size, int flags, bool is_compression, + dma_addr_t device_addr, size_t size, int flags, bool is_gpu, u32 granule_size); #endif int nvmap_probe(struct platform_device *pdev); diff --git a/include/linux/nvmap.h b/include/linux/nvmap.h index ad9ac6a6..fa02dcf6 100644 --- a/include/linux/nvmap.h +++ b/include/linux/nvmap.h @@ -21,7 +21,7 @@ #define NVMAP_HEAP_CARVEOUT_VPR (1ul<<28) #define NVMAP_HEAP_CARVEOUT_TSEC (1ul<<27) #define NVMAP_HEAP_CARVEOUT_VIDMEM (1ul<<26) -#define NVMAP_HEAP_CARVEOUT_COMPRESSION (1ul << 3) +#define NVMAP_HEAP_CARVEOUT_GPU (1ul << 3) #define NVMAP_HEAP_CARVEOUT_FSI (1ul<<2) #define NVMAP_HEAP_CARVEOUT_IVM (1ul<<1) #define NVMAP_HEAP_CARVEOUT_GENERIC (1ul<<0) @@ -94,8 +94,8 @@ struct nvmap_platform_carveout { bool no_cpu_access; /* carveout can't be accessed from cpu at all */ bool init_done; /* FIXME: remove once all caveouts use reserved-memory */ struct nvmap_pm_ops pm_ops; - bool is_compression_co; /* Compression carveout is treated differently */ - u32 granule_size; /* Granule size for compression carveout */ + bool is_gpu_co; /* Gpu carveout is treated differently */ + u32 granule_size; /* Granule size for gpu carveout */ }; struct nvmap_platform_data {