tegra: nvmap: Reduce reported freemem from QueryHeapParams

Reduce free memory reported by QueryHeapParams by 0.1% to avoid
OOM issues for Iovmm_heap. For the overall free memory reported
by the query heap params api, page align it to the previous page
to avoid any failures in allocattr due to page align of requested
size, when requested size is equal to reported free size. Update
allocattr api as well so that it checks the available free memory
before allocating.

Bug 4719292

Change-Id: I7ecd69216d58c36ee5d0750107546601400e722d
Signed-off-by: N V S Abhishek <nabhishek@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/3165599
Reviewed-by: Ketan Patil <ketanp@nvidia.com>
GVS: buildbot_gerritrpt <buildbot_gerritrpt@nvidia.com>
Reviewed-by: Sachin Nikam <snikam@nvidia.com>
This commit is contained in:
N V S Abhishek
2024-07-01 08:06:12 +00:00
committed by mobile promotions
parent 4593cff331
commit 3fdb84d2c8
2 changed files with 21 additions and 6 deletions

View File

@@ -578,12 +578,12 @@ next_page:
bool is_nvmap_memory_available(size_t size, uint32_t heap, int numa_nid)
{
unsigned long total_num_pages;
unsigned int carveout_mask = NVMAP_HEAP_CARVEOUT_MASK;
unsigned int iovmm_mask = NVMAP_HEAP_IOVMM;
struct nvmap_device *dev = nvmap_dev;
bool memory_available = false;
int i;
unsigned long free_mem = 0;
if (!heap)
return false;
@@ -599,11 +599,15 @@ bool is_nvmap_memory_available(size_t size, uint32_t heap, int numa_nid)
}
if (heap & iovmm_mask) {
total_num_pages = totalram_pages();
if ((size >> PAGE_SHIFT) > total_num_pages) {
if (system_heap_free_mem(&free_mem)) {
pr_debug("Call to system_heap_free_mem failed\n");
return false;
}
if (size > (free_mem & PAGE_MASK)) {
pr_debug("Requested size is more than available memory\n");
pr_debug("Requested size : %lu B, Available memory : %lu B\n", size,
total_num_pages << PAGE_SHIFT);
free_mem & PAGE_MASK);
return false;
}
return true;
@@ -625,14 +629,14 @@ bool is_nvmap_memory_available(size_t size, uint32_t heap, int numa_nid)
* on that numa node.
*/
if (numa_nid == NUMA_NO_NODE) {
if (size > h->free_size)
if (size > (h->free_size & PAGE_MASK))
continue;
memory_available = true;
goto exit;
} else {
if (h->numa_node_id != numa_nid)
continue;
else if (size > h->free_size)
else if (size > (h->free_size & PAGE_MASK))
memory_available = false;
else
memory_available = true;

View File

@@ -1278,6 +1278,7 @@ static int compute_hugetlbfs_stat(u64 *total, u64 *free, int numa_id)
/*
* This function calculates allocatable free memory using following formula:
* free_mem = avail mem - cma free
* free_mem = free_mem - (free_mem / 1000);
* The CMA memory is not allocatable by NvMap for regular allocations and it
* is part of Available memory reported, so subtract it from available memory.
*/
@@ -1299,6 +1300,10 @@ int system_heap_free_mem(unsigned long *mem_val)
return 0;
}
free_mem = (available_mem << PAGE_SHIFT) - cma_free;
/* reduce free_mem by ~ 0.1% */
free_mem = free_mem - (free_mem / 1000);
*mem_val = free_mem;
return 0;
}
@@ -1405,6 +1410,12 @@ static int nvmap_query_heap_params(void __user *arg, bool is_numa_aware)
op.granule_size = PAGE_SIZE;
}
/*
* Align free size reported to the previous page.
* This avoids any AllocAttr failures due to using PAGE_ALIGN
* for allocating exactly the free memory reported.
*/
op.free = op.free & PAGE_MASK;
if (copy_to_user(arg, &op, sizeof(op)))
ret = -EFAULT;
exit: