tegra: nvmap: Clean-up OOT NvMap

Remove macro-protected dead code. NVMAP_LOADABLE_MODULE enables NvMap as
an OOT module, while NVMAP_UPSTREAM_KERNEL is a config for kstable/OOT
kernel. For Kernel 5.10+ both are always defined, So the related macro
protected code can be safely removed.

Bug 4479135

Change-Id: I792f1cb2c54fd21bcf0e73ffc52e46e4efd47862
Signed-off-by: Yash Bhatt <ybhatt@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/3079420
Reviewed-by: Ashish Mhetre <amhetre@nvidia.com>
Reviewed-by: Sachin Nikam <snikam@nvidia.com>
GVS: Gerrit_Virtual_Submit <buildbot_gerritrpt@nvidia.com>
This commit is contained in:
Yash Bhatt
2024-02-19 07:06:02 +00:00
committed by mobile promotions
parent 7393789444
commit 25bc2a3b96
8 changed files with 2 additions and 87 deletions

View File

@@ -11,14 +11,10 @@
#include <linux/version.h>
#include <soc/tegra/fuse.h>
#ifdef NVMAP_UPSTREAM_KERNEL
#include <linux/libnvdimm.h>
#endif /* NVMAP_UPSTREAM_KERNEL */
#include <linux/sys_soc.h>
#ifdef NVMAP_LOADABLE_MODULE
__weak struct arm64_ftr_reg arm64_ftr_reg_ctrel0;
#endif /*NVMAP_LOADABLE_MODULE */
#include <trace/events/nvmap.h>
@@ -53,11 +49,7 @@ void nvmap_clean_cache(struct page **pages, int numpages)
void inner_cache_maint(unsigned int op, void *vaddr, size_t size)
{
if (op == NVMAP_CACHE_OP_WB_INV)
#ifdef NVMAP_UPSTREAM_KERNEL
arch_invalidate_pmem(vaddr, size);
#else
__dma_flush_area(vaddr, size);
#endif
else if (op == NVMAP_CACHE_OP_INV)
__dma_map_area(vaddr, size, DMA_FROM_DEVICE);
else

View File

@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2021-2024, NVIDIA CORPORATION. All rights reserved.
* Derived from Linux kernel source file arch/arm64/mm/cache.S
* Copyright (C) 2001 Deep Blue Solutions Ltd.
* Copyright (C) 2012 ARM Ltd.
@@ -54,9 +54,7 @@ SYM_FUNC_END(invalidate_icache_range)
* - size - size in question
*/
SYM_FUNC_START(__flush_dcache_area)
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 14, 0)
add x1, x0, x1
#endif
dcache_by_line_op civac, sy, x0, x1, x2, x3
ret
SYM_FUNC_END(__flush_dcache_area)
@@ -75,9 +73,7 @@ alternative_if ARM64_HAS_CACHE_IDC
dsb ishst
ret
alternative_else_nop_endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 14, 0)
add x1, x0, x1
#endif
dcache_by_line_op cvau, ish, x0, x1, x2, x3
ret
SYM_FUNC_END(__clean_dcache_area_pou)
@@ -140,9 +136,7 @@ SYM_FUNC_START(__clean_dcache_area_poc)
* - start - virtual start address of region
* - size - size in question
*/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 14, 0)
add x1, x0, x1
#endif
dcache_by_line_op cvac, sy, x0, x1, x2, x3
ret
SYM_FUNC_END(__clean_dcache_area_poc)
@@ -161,9 +155,7 @@ SYM_FUNC_START(__clean_dcache_area_pop)
alternative_if_not ARM64_HAS_DCPOP
b __clean_dcache_area_poc
alternative_else_nop_endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 14, 0)
add x1, x0, x1
#endif
dcache_by_line_op cvap, sy, x0, x1, x2, x3
ret
SYM_FUNC_END(__clean_dcache_area_pop)
@@ -177,9 +169,7 @@ SYM_FUNC_END(__clean_dcache_area_pop)
* - size - size in question
*/
SYM_FUNC_START(__dma_flush_area)
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 14, 0)
add x1, x0, x1
#endif
dcache_by_line_op civac, sy, x0, x1, x2, x3
ret
SYM_FUNC_END(__dma_flush_area)

View File

@@ -1371,13 +1371,6 @@ int __init nvmap_probe(struct platform_device *pdev)
nvmap_init(pdev);
plat = pdev->dev.platform_data;
#ifndef NVMAP_LOADABLE_MODULE
if (!plat) {
dev_err(&pdev->dev, "no platform data?\n");
e = -ENODEV;
goto finish;
}
#endif /* !NVMAP_LOADABLE_MODULE */
nvmap_dev = dev;
nvmap_dev->plat = plat;

View File

@@ -548,9 +548,6 @@ err_nomem:
int __nvmap_dmabuf_fd(struct nvmap_client *client,
struct dma_buf *dmabuf, int flags)
{
#if !defined(NVMAP_CONFIG_HANDLE_AS_ID) && !defined(NVMAP_LOADABLE_MODULE)
int start_fd = NVMAP_CONFIG_FD_START;
#endif
int ret;
#ifdef NVMAP_CONFIG_DEFER_FD_RECYCLE
@@ -566,11 +563,7 @@ int __nvmap_dmabuf_fd(struct nvmap_client *client,
* __FD_SETSIZE limitation issue for select(),
* pselect() syscalls.
*/
#if defined(NVMAP_LOADABLE_MODULE) || defined(NVMAP_CONFIG_HANDLE_AS_ID)
ret = get_unused_fd_flags(flags);
#else
ret = __alloc_fd(current->files, start_fd, sysctl_nr_open, flags);
#endif
if (ret == -EMFILE)
pr_err_ratelimited("NvMap: FD limit is crossed for uid %d\n",
from_kuid(current_user_ns(), current_uid()));

View File

@@ -518,10 +518,8 @@ void nvmap_heap_destroy(struct nvmap_heap *heap)
kfree(heap->name);
kfree(heap->carevout_debugfs_info);
#ifdef NVMAP_LOADABLE_MODULE
nvmap_dma_release_coherent_memory((struct dma_coherent_mem_replica *)
heap->dma_dev->dma_mem);
#endif /* NVMAP_LOADABLE_MODULE */
while (!list_empty(&heap->all_list)) {
struct list_block *l;

View File

@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2010-2023, NVIDIA Corporation. All rights reserved.
* Copyright (c) 2010-2024, NVIDIA Corporation. All rights reserved.
*
* GPU heap allocator.
*/

View File

@@ -38,9 +38,6 @@
#include "nvmap_heap.h"
#include <linux/syscalls.h>
#ifndef NVMAP_LOADABLE_MODULE
#include <linux/dma-map-ops.h>
#endif /* !NVMAP_LOADABLE_MODULE */
#if defined(CONFIG_TEGRA_SYSTEM_TYPE_ACK)
MODULE_IMPORT_NS(VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver);

View File

@@ -12,55 +12,7 @@
#include "nvmap_priv.h"
#ifndef NVMAP_LOADABLE_MODULE
void nvmap_zap_handle(struct nvmap_handle *handle, u64 offset, u64 size)
{
struct list_head *vmas;
struct nvmap_vma_list *vma_list;
struct vm_area_struct *vma;
if (!handle->heap_pgalloc)
return;
/* if no dirty page is present, no need to zap */
if (nvmap_handle_track_dirty(handle) && !atomic_read(&handle->pgalloc.ndirty))
return;
if (!size) {
offset = 0;
size = handle->size;
}
size = PAGE_ALIGN((offset & ~PAGE_MASK) + size);
mutex_lock(&handle->lock);
vmas = &handle->vmas;
list_for_each_entry(vma_list, vmas, list) {
struct nvmap_vma_priv *priv;
size_t vm_size = size;
vma = vma_list->vma;
priv = vma->vm_private_data;
if ((offset + size) > (vma->vm_end - vma->vm_start))
vm_size = vma->vm_end - vma->vm_start - offset;
if (priv->offs || vma->vm_pgoff)
/* vma mapping starts in the middle of handle memory.
* zapping needs special care. zap entire range for now.
* FIXME: optimze zapping.
*/
zap_page_range(vma, vma->vm_start,
vma->vm_end - vma->vm_start);
else
zap_page_range(vma, vma->vm_start + offset,
vm_size);
}
mutex_unlock(&handle->lock);
}
#else
void nvmap_zap_handle(struct nvmap_handle *handle, u64 offset, u64 size)
{
pr_debug("%s is not supported!\n", __func__);
}
#endif /* !NVMAP_LOADABLE_MODULE */