tegra: nvmap: Reduce reported freemem from QueryHeapParams

Reduce free memory reported by QueryHeapParams by 0.1% to avoid
OOM issues for Iovmm_heap. For the overall free memory reported
by the query heap params api, page align it to the previous page
to avoid any failures in allocattr due to page align of requested
size, when requested size is equal to reported free size. Update
allocattr api as well so that it checks the available free memory
before allocating.

Bug 4719292

Change-Id: I7ecd69216d58c36ee5d0750107546601400e722d
Signed-off-by: N V S Abhishek <nabhishek@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/3165599
(cherry picked from commit 3fdb84d2c8)
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/3230712
Reviewed-by: Pritesh Raithatha <praithatha@nvidia.com>
Reviewed-by: Ketan Patil <ketanp@nvidia.com>
GVS: buildbot_gerritrpt <buildbot_gerritrpt@nvidia.com>
This commit is contained in:
N V S Abhishek
2024-07-01 08:06:12 +00:00
committed by mobile promotions
parent 696fd39c49
commit c28e187068
2 changed files with 22 additions and 11 deletions

View File

@@ -597,12 +597,12 @@ next_page:
bool is_nvmap_memory_available(size_t size, uint32_t heap)
{
unsigned long total_num_pages;
unsigned int carveout_mask = NVMAP_HEAP_CARVEOUT_MASK;
unsigned int iovmm_mask = NVMAP_HEAP_IOVMM;
struct nvmap_device *dev = nvmap_dev;
bool heap_present = false;
int i;
unsigned long free_mem = 0;
if (!heap)
return false;
@@ -618,15 +618,15 @@ bool is_nvmap_memory_available(size_t size, uint32_t heap)
}
if (heap & iovmm_mask) {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)
total_num_pages = totalram_pages();
#else
total_num_pages = totalram_pages;
#endif
if ((size >> PAGE_SHIFT) > total_num_pages) {
if (system_heap_free_mem(&free_mem)) {
pr_debug("Call to system_heap_free_mem failed\n");
return false;
}
if (size > (free_mem & PAGE_MASK)) {
pr_debug("Requested size is more than available memory\n");
pr_debug("Requested size : %lu B, Available memory : %lu B\n", size,
total_num_pages << PAGE_SHIFT);
free_mem & PAGE_MASK);
return false;
}
return true;
@@ -642,10 +642,10 @@ bool is_nvmap_memory_available(size_t size, uint32_t heap)
heap_present = true;
h = co_heap->carveout;
if (size > h->free_size) {
if (size > (h->free_size & PAGE_MASK)) {
pr_debug("Requested size is more than available memory");
pr_debug("Requested size : %lu B, Available memory : %lu B\n", size,
h->free_size);
(h->free_size & PAGE_MASK));
return false;
}
break;

View File

@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2011-2023, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2011-2024, NVIDIA CORPORATION. All rights reserved.
*
* User-space interface to nvmap
*/
@@ -1323,6 +1323,7 @@ int nvmap_ioctl_handle_from_sci_ipc_id(struct file *filp, void __user *arg)
/*
* This function calculates allocatable free memory using following formula:
* free_mem = avail mem - cma free - (avail mem - cma free) / 16
* free_mem = free_mem - (free_mem / 1000);
* The CMA memory is not allocatable by NvMap for regular allocations and it
* is part of Available memory reported, so subtract it from available memory.
* NvMap allocates 1/16 extra memory in page coloring, so subtract it as well.
@@ -1352,6 +1353,10 @@ int system_heap_free_mem(unsigned long *mem_val)
#ifdef NVMAP_CONFIG_COLOR_PAGES
free_mem = free_mem - (free_mem >> 4);
#endif /* NVMAP_CONFIG_COLOR_PAGES */
/* reduce free_mem by ~ 0.1% */
free_mem = free_mem - (free_mem / 1000);
*mem_val = free_mem;
return 0;
}
@@ -1422,6 +1427,12 @@ int nvmap_ioctl_query_heap_params(struct file *filp, void __user *arg)
op.granule_size = PAGE_SIZE;
}
/*
* Align free size reported to the previous page.
* This avoids any AllocAttr failures due to using PAGE_ALIGN
* for allocating exactly the free memory reported.
*/
op.free = op.free & PAGE_MASK;
if (copy_to_user(arg, &op, sizeof(op)))
ret = -EFAULT;
exit: