tegra: nvmap: replace _dma_* and __iomap

__dma_flush_area, __dma_map_area and __iomap
functions are no more supported from kernel version
6.0 onwards.

Replace them by equivalent exported function in K-6.0 and
onwards.

Bug 3767126

Change-Id: I2b8d81271ed3696ae3d9306a311fe74622c2b611
Signed-off-by: Puneet Saxena <puneets@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvidia/+/2770928
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com>
Reviewed-by: Jonathan Hunter <jonathanh@nvidia.com>
Reviewed-by: Sachin Nikam <snikam@nvidia.com>
Reviewed-by: Ketan Patil <ketanp@nvidia.com>
GVS: Gerrit_Virtual_Submit <buildbot_gerritrpt@nvidia.com>
This commit is contained in:
Puneet Saxena
2022-09-02 19:48:28 +05:30
committed by Laxman Dewangan
parent 4cf8c80669
commit 8ceb954814
3 changed files with 32 additions and 1 deletions

View File

@@ -718,7 +718,14 @@ static void alloc_handle(struct nvmap_client *client,
MEMREMAP_WB); MEMREMAP_WB);
if (cpu_addr != NULL) { if (cpu_addr != NULL) {
memset(cpu_addr, 0, h->size); memset(cpu_addr, 0, h->size);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 0, 0)
dcache_clean_inval_poc(
(unsigned long)cpu_addr,
(unsigned long)cpu_addr
+ h->size);
#else
__dma_flush_area(cpu_addr, h->size); __dma_flush_area(cpu_addr, h->size);
#endif
memunmap(cpu_addr); memunmap(cpu_addr);
} }
} }

View File

@@ -62,12 +62,21 @@ void nvmap_clean_cache(struct page **pages, int numpages)
void inner_cache_maint(unsigned int op, void *vaddr, size_t size) void inner_cache_maint(unsigned int op, void *vaddr, size_t size)
{ {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 0, 0)
if (op == NVMAP_CACHE_OP_WB_INV)
dcache_clean_inval_poc((unsigned long)vaddr, (unsigned long)vaddr + size);
else if (op == NVMAP_CACHE_OP_INV)
dcache_inval_poc((unsigned long)vaddr, (unsigned long)vaddr + size);
else
dcache_clean_poc((unsigned long)vaddr, (unsigned long)vaddr + size);
#else
if (op == NVMAP_CACHE_OP_WB_INV) if (op == NVMAP_CACHE_OP_WB_INV)
__dma_flush_area(vaddr, size); __dma_flush_area(vaddr, size);
else if (op == NVMAP_CACHE_OP_INV) else if (op == NVMAP_CACHE_OP_INV)
__dma_map_area(vaddr, size, DMA_FROM_DEVICE); __dma_map_area(vaddr, size, DMA_FROM_DEVICE);
else else
__dma_map_area(vaddr, size, DMA_TO_DEVICE); __dma_map_area(vaddr, size, DMA_TO_DEVICE);
#endif
} }
static void heap_page_cache_maint( static void heap_page_cache_maint(
@@ -149,7 +158,11 @@ int nvmap_cache_maint_phys_range(unsigned int op, phys_addr_t pstart,
phys_addr_t next = (loop + PAGE_SIZE) & PAGE_MASK; phys_addr_t next = (loop + PAGE_SIZE) & PAGE_MASK;
void *base; void *base;
next = min(next, pend); next = min(next, pend);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 0, 0)
io_addr = ioremap_prot(loop, PAGE_SIZE, (unsigned long)PAGE_KERNEL);
#else
io_addr = __ioremap(loop, PAGE_SIZE, PG_PROT_KERNEL); io_addr = __ioremap(loop, PAGE_SIZE, PG_PROT_KERNEL);
#endif
if (io_addr == NULL) if (io_addr == NULL)
return -ENOMEM; return -ENOMEM;
base = (__force void *)io_addr + (loop & ~PAGE_MASK); base = (__force void *)io_addr + (loop & ~PAGE_MASK);

View File

@@ -81,8 +81,11 @@ void *__nvmap_kmap(struct nvmap_handle *h, unsigned int pagenum)
h->pgalloc.pages[pagenum])); h->pgalloc.pages[pagenum]));
else else
paddr = h->carveout->base + pagenum * PAGE_SIZE; paddr = h->carveout->base + pagenum * PAGE_SIZE;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 0, 0)
addr = ioremap_prot(phys_addr, PAGE_SIZE, pgprot_val(prot));
#else
addr = __ioremap(paddr, PAGE_SIZE, prot); addr = __ioremap(paddr, PAGE_SIZE, prot);
#endif
if (addr == NULL) if (addr == NULL)
goto out; goto out;
kaddr = (unsigned long)addr; kaddr = (unsigned long)addr;
@@ -119,7 +122,11 @@ void __nvmap_kunmap(struct nvmap_handle *h, unsigned int pagenum,
if (h->flags != NVMAP_HANDLE_UNCACHEABLE && if (h->flags != NVMAP_HANDLE_UNCACHEABLE &&
h->flags != NVMAP_HANDLE_WRITE_COMBINE) { h->flags != NVMAP_HANDLE_WRITE_COMBINE) {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 0, 0)
dcache_clean_inval_poc(addr, PAGE_SIZE);
#else
__dma_flush_area(addr, PAGE_SIZE); __dma_flush_area(addr, PAGE_SIZE);
#endif
outer_flush_range(paddr, paddr + PAGE_SIZE); /* FIXME */ outer_flush_range(paddr, paddr + PAGE_SIZE); /* FIXME */
} }
iounmap((void __iomem *)addr); iounmap((void __iomem *)addr);
@@ -203,8 +210,12 @@ void *__nvmap_mmap(struct nvmap_handle *h)
vaddr = vmap(pages, nr_pages, VM_MAP, prot); vaddr = vmap(pages, nr_pages, VM_MAP, prot);
} else { } else {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 0, 0)
vaddr = ioremap_prot(h->carveout->base, adj_size, pgprot_val(prot));
#else
vaddr = (__force void *)__ioremap(h->carveout->base, adj_size, vaddr = (__force void *)__ioremap(h->carveout->base, adj_size,
prot); prot);
#endif
} }
if (vaddr == NULL) if (vaddr == NULL)
goto out; goto out;