From a3e0e896a045c9b5a9c2ec08d0ab28c53e24b464 Mon Sep 17 00:00:00 2001 From: N V S Abhishek Date: Fri, 24 Jan 2025 17:04:51 +0000 Subject: [PATCH] tegra: nvmap: Fix nvmap_query_heap for multi numa node platform In current implementation, when NvRmMemQueryHeapParams is called and multiple numa nodes are online: 1. For iovmm carveout, numa_id is set to garbage value, and we are calling compute_memory_stat with it. 2. For gpu carveout, we are returning values for numa_id 0. 3. For other carveouts, we are returning params for the first matching entry in nvmap_dev->heaps[i]. Correct this behavior as follows: Regardless of carveout type, return params for numa_id 0 when NvRmMemQueryHeapParams is called and multiple numa nodes are online. In long-term, we need to disable NvRmMemQueryHeapParams when multiple numa nodes are online. Clients should use NvRmMemQueryHeapParamsNuma instead. Jira TMM-5970 Change-Id: Id49289e51eda187b1d676e5192583f320835c2f4 Signed-off-by: N V S Abhishek Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/3290730 Reviewed-by: Ketan Patil Reviewed-by: Pritesh Raithatha GVS: buildbot_gerritrpt --- drivers/video/tegra/nvmap/nvmap_heap.c | 30 +++++++++++++++++++------- 1 file changed, 22 insertions(+), 8 deletions(-) diff --git a/drivers/video/tegra/nvmap/nvmap_heap.c b/drivers/video/tegra/nvmap/nvmap_heap.c index 51f11bcb..13839c01 100644 --- a/drivers/video/tegra/nvmap/nvmap_heap.c +++ b/drivers/video/tegra/nvmap/nvmap_heap.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * SPDX-FileCopyrightText: Copyright (c) 2011-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2011-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * GPU heap allocator. */ @@ -222,7 +222,26 @@ int nvmap_query_heap(struct nvmap_query_heap_params *op, bool is_numa_aware) struct nvmap_heap *heap; unsigned int type; int i; - int numa_id; + /* + * Default value of numa_id will be 0. It can be overwritten by user's + * input only if NvRmMemQueryHeapParamsNuma is called (making + * is_numa_aware true). + * If NvRmMemQueryHeapParamsNuma is called: + * 1. When single numa node present + * a. Return params for only op->numa_id = 0. + * b. Return error if op->numa_id is not 0. + * 2. When multiple numa nodes present + * a. Return params for op->numa_id + * If NvRmMemQueryHeapParams is called: + * 1. When single numa node present + * a. Return params using system_heap_total_mem & + * system_heap_free_mem for iovmm carveout. + * b. Return params for numa id 0 for any other + * carveout heaps. + * 2. When multiple numa nodes present + * a. Return params for numa id 0. + */ + int numa_id = 0; unsigned long free_mem = 0; int ret = 0; @@ -255,9 +274,6 @@ int nvmap_query_heap(struct nvmap_query_heap_params *op, bool is_numa_aware) * hugetlbfs, so we need to return the HugePages_Total, HugePages_Free values */ if (type & NVMAP_HEAP_CARVEOUT_GPU) { - if (!is_numa_aware) - numa_id = 0; - ret = compute_hugetlbfs_stat(&op->total, &op->free, numa_id); if (ret) goto exit; @@ -267,10 +283,8 @@ int nvmap_query_heap(struct nvmap_query_heap_params *op, bool is_numa_aware) } else if (type & NVMAP_HEAP_CARVEOUT_MASK) { for (i = 0; i < nvmap_dev->nr_carveouts; i++) { if ((type & nvmap_get_heap_bit(nvmap_dev->heaps[i])) && - (is_numa_aware ? (numa_id == nvmap_get_heap_nid(nvmap_get_heap_ptr( - nvmap_dev->heaps[i]))) : - true)) { + nvmap_dev->heaps[i])))) { heap = nvmap_get_heap_ptr(nvmap_dev->heaps[i]); op->total = nvmap_query_heap_size(heap); op->free = nvmap_get_heap_free_size(heap);