tegra: nvmap: Clean-up OOT NvMap

Remove macro protected deadcode which is not applicable now

Bug 4479135

Change-Id: I51408f04171e5dc2c34a47755c853aa705cc4cd8
Signed-off-by: Yash Bhatt <ybhatt@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/3077840
Reviewed-by: Ashish Mhetre <amhetre@nvidia.com>
Reviewed-by: Sachin Nikam <snikam@nvidia.com>
Reviewed-by: Ketan Patil <ketanp@nvidia.com>
GVS: Gerrit_Virtual_Submit <buildbot_gerritrpt@nvidia.com>
This commit is contained in:
Yash Bhatt
2024-02-15 14:57:02 +00:00
committed by mobile promotions
parent e062e9d49c
commit 3516ab0f77
4 changed files with 2 additions and 168 deletions

View File

@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2011-2023, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2011-2024, NVIDIA CORPORATION. All rights reserved.
*
* Handle allocation and freeing routines for nvmap
*/
@@ -11,17 +11,11 @@
#include <linux/random.h>
#include <linux/version.h>
#include <linux/io.h>
#if KERNEL_VERSION(4, 15, 0) > LINUX_VERSION_CODE
#include <soc/tegra/chip-id.h>
#else
#include <soc/tegra/fuse.h>
#endif
#include <trace/events/nvmap.h>
#ifndef NVMAP_LOADABLE_MODULE
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)
#include <linux/dma-map-ops.h>
#endif
#endif /* !NVMAP_LOADABLE_MODULE */
#ifdef NVMAP_UPSTREAM_KERNEL
@@ -489,19 +483,11 @@ static int handle_page_alloc(struct nvmap_client *client,
int pages_per_big_pg = 0;
#endif
#endif /* CONFIG_ARM64_4K_PAGES */
#if KERNEL_VERSION(4, 15, 0) > LINUX_VERSION_CODE
static u32 chipid;
#else
static u8 chipid;
#endif
if (!chipid) {
#ifdef NVMAP_CONFIG_COLOR_PAGES
#if KERNEL_VERSION(4, 15, 0) > LINUX_VERSION_CODE
chipid = tegra_hidrev_get_chipid(tegra_read_chipid());
#else
chipid = tegra_get_chip_id();
#endif
if (chipid == TEGRA194)
s_nr_colors = 16;
#endif
@@ -559,14 +545,12 @@ static int handle_page_alloc(struct nvmap_client *client,
nr_page - page_index, true, h->numa_id);
#endif
allocated = page_index;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 13, 0)
if (page_index < nr_page) {
int nid = h->numa_id == NUMA_NO_NODE ? numa_mem_id() : h->numa_id;
allocated = __alloc_pages_bulk(gfp, nid, NULL,
nr_page, NULL, pages);
}
#endif
for (i = allocated; i < nr_page; i++) {
pages[i] = nvmap_alloc_pages_exact(gfp, PAGE_SIZE,
true, h->numa_id);
@@ -632,11 +616,7 @@ static int nvmap_heap_pgalloc(struct nvmap_client *client,
size_t size = h->size;
struct page **pages;
struct device *dma_dev;
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)
dma_addr_t pa = DMA_ERROR_CODE;
#else
dma_addr_t pa = DMA_MAPPING_ERROR;
#endif
dma_dev = nvmap_heap_pgalloc_dev(type);
if (IS_ERR(dma_dev))
@@ -1017,11 +997,7 @@ void _nvmap_handle_free(struct nvmap_handle *h)
list_for_each_entry_safe(curr, next, &h->dmabuf_priv, list) {
curr->priv_release(curr->priv);
list_del(&curr->list);
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)
kzfree(curr);
#else
kfree_sensitive(curr);
#endif
}
if (nvmap_handle_remove(nvmap_dev, h) != 0)

View File

@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2009-2023, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2009-2024, NVIDIA CORPORATION. All rights reserved.
*
* Memory manager for Tegra GPU
*/
@@ -34,98 +34,6 @@ static phys_addr_t handle_phys(struct nvmap_handle *h)
return h->carveout->base;
}
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)
void *__nvmap_kmap(struct nvmap_handle *h, unsigned int pagenum)
{
phys_addr_t paddr;
unsigned long kaddr;
void __iomem *addr;
pgprot_t prot;
if (!virt_addr_valid(h))
return NULL;
h = nvmap_handle_get(h);
if (!h)
return NULL;
/*
* If the handle is RO and virtual mapping is requested in
* kernel address space, return error.
*/
if (h->from_va && h->is_ro)
goto put_handle;
if (!h->alloc)
goto put_handle;
if (!(h->heap_type & nvmap_dev->cpu_access_mask))
goto put_handle;
nvmap_kmaps_inc(h);
if (pagenum >= h->size >> PAGE_SHIFT)
goto out;
if (h->vaddr) {
kaddr = (unsigned long)h->vaddr + pagenum * PAGE_SIZE;
} else {
prot = nvmap_pgprot(h, PG_PROT_KERNEL);
if (h->heap_pgalloc)
paddr = page_to_phys(nvmap_to_page(
h->pgalloc.pages[pagenum]));
else
paddr = h->carveout->base + pagenum * PAGE_SIZE;
addr = __ioremap(paddr, PAGE_SIZE, prot);
if (addr == NULL)
goto out;
kaddr = (unsigned long)addr;
}
return (void *)kaddr;
out:
nvmap_kmaps_dec(h);
put_handle:
nvmap_handle_put(h);
return NULL;
}
void __nvmap_kunmap(struct nvmap_handle *h, unsigned int pagenum,
void *addr)
{
phys_addr_t paddr;
if (!h || !h->alloc ||
WARN_ON(!virt_addr_valid(h)) ||
WARN_ON(!addr) ||
!(h->heap_type & nvmap_dev->cpu_access_mask))
return;
if (WARN_ON(pagenum >= h->size >> PAGE_SHIFT))
return;
if (h->vaddr && (h->vaddr == (addr - pagenum * PAGE_SIZE)))
goto out;
if (h->heap_pgalloc)
paddr = page_to_phys(nvmap_to_page(h->pgalloc.pages[pagenum]));
else
paddr = h->carveout->base + pagenum * PAGE_SIZE;
if (h->flags != NVMAP_HANDLE_UNCACHEABLE &&
h->flags != NVMAP_HANDLE_WRITE_COMBINE) {
#ifdef NVMAP_UPSTREAM_KERNEL
arch_invalidate_pmem(addr, PAGE_SIZE);
#else
__dma_flush_area(addr, PAGE_SIZE);
#endif
outer_flush_range(paddr, paddr + PAGE_SIZE); /* FIXME */
}
iounmap((void __iomem *)addr);
out:
nvmap_kmaps_dec(h);
nvmap_handle_put(h);
}
#endif
void *__nvmap_mmap(struct nvmap_handle *h)
{
pgprot_t prot;

View File

@@ -20,12 +20,7 @@
#include <linux/nvmap.h>
#include <linux/version.h>
#include <linux/wait.h>
#if KERNEL_VERSION(4, 15, 0) > LINUX_VERSION_CODE
#include <soc/tegra/chip-id.h>
#else
#include <soc/tegra/fuse.h>
#endif
#include <asm/pgtable.h>
#include <trace/events/nvmap.h>

View File

@@ -17,21 +17,10 @@
#include <linux/nvmap_t19x.h>
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
#include <linux/sched/clock.h>
#endif
#include <linux/cma.h>
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
#include <linux/dma-map-ops.h>
#else
#include <linux/dma-contiguous.h>
#include <asm/dma-contiguous.h>
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
#include "include/linux/nvmap_exports.h"
#endif
#include "nvmap_priv.h"
@@ -40,13 +29,11 @@
#include <soc/tegra/virt/syscalls.h>
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
#ifdef CONFIG_ARM_DMA_IOMMU_ALIGNMENT
#define DMA_BUF_ALIGNMENT CONFIG_ARM_DMA_IOMMU_ALIGNMENT
#else
#define DMA_BUF_ALIGNMENT 8
#endif
#endif /* LINUX_VERSION_CODE */
#ifndef NVMAP_UPSTREAM_KERNEL
#ifndef NVMAP_CONFIG_VPR_RESIZE
@@ -362,7 +349,6 @@ static int __nvmap_init_dt(struct platform_device *pdev)
return 0;
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
static inline struct page **nvmap_kvzalloc_pages(u32 count)
{
if (count * sizeof(struct page *) <= PAGE_SIZE)
@@ -774,7 +760,6 @@ int nvmap_dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
nvmap_dma_release_coherent_memory(mem);
return ret;
}
#endif /* LINUX_VERSION_CODE */
static int __init nvmap_co_device_init(struct reserved_mem *rmem,
struct device *dev)
@@ -790,21 +775,11 @@ static int __init nvmap_co_device_init(struct reserved_mem *rmem,
return 0;
if (!co->cma_dev) {
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)
err = dma_declare_coherent_memory(co->dma_dev, 0,
co->base, co->size,
DMA_MEMORY_NOMAP);
#else
err = nvmap_dma_declare_coherent_memory(co->dma_dev, 0,
co->base, co->size,
DMA_MEMORY_NOMAP, co->is_gpu_co,
co->granule_size);
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
if (!err) {
#else
if (err & DMA_MEMORY_NOMAP) {
#endif
pr_info("%s :dma coherent mem declare %pa,%zu\n",
co->name, &co->base, co->size);
co->init_done = true;
@@ -814,17 +789,6 @@ static int __init nvmap_co_device_init(struct reserved_mem *rmem,
co->name, &co->base, co->size, err);
} else {
#ifdef NVMAP_CONFIG_VPR_RESIZE
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)
/*
* When vpr memory is reserved, kmemleak tries to scan vpr
* memory for pointers. vpr memory should not be accessed
* from cpu so avoid scanning it. When vpr memory is removed,
* the memblock_remove() API ensures that kmemleak won't scan
* a removed block.
*/
if (!strncmp(co->name, "vpr", 3))
kmemleak_no_scan(__va(co->base));
#endif
co->dma_info->cma_dev = co->cma_dev;
err = dma_declare_coherent_resizable_cma_memory(
@@ -879,25 +843,16 @@ int __init nvmap_co_setup(struct reserved_mem *rmem)
pr_info("cma area initialed in legacy way already\n");
goto finish;
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
ret = cma_init_reserved_mem(rmem->base, rmem->size, 0,
rmem->name, &cma);
#else
ret = cma_init_reserved_mem(rmem->base, rmem->size, 0, &cma);
#endif
if (ret) {
pr_info("cma_init_reserved_mem fails for %s\n", rmem->name);
goto finish;
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
dma_contiguous_early_fixup_vpr(rmem->base, rmem->size);
if (co->cma_dev)
co->cma_dev->cma_area = cma;
#else
dma_contiguous_early_fixup(rmem->base, rmem->size);
dev_set_cma_area(co->cma_dev, cma);
#endif
pr_debug("tegra-carveouts carveout=%s %pa@%pa\n",
rmem->name, &rmem->size, &rmem->base);
goto finish;