video: tegra: nvmap: Account NvMap memory for OOM Decisions

Account NvMap allocated memory into both RSS and CG tracking to make
efficient OOM kill decisions during memory pressure.

NvMap allocates memory via kernel APIs like alloc_pages, the kernel
memory is not accounted on behalf of process who requests the
allocation. Hence in case OOM, the OOM killer never kills the process
who has allocated memory via NvMap even though this process might be
holding most of the memory.

Solve this issue using following approach:
- Use __GFP_ACCOUNT and __GFP_NORETRY flag
-  __GFP_NORETRY will not let the current allocation flow to go into OOM
path, so that it will never trigger OOM.
- __GFP_ACCOUNT causes the allocation to be accounted to kmemcg. So any
allocation done by NvMap will be definitely accounted to kmemcg and
cgroups can be used to define memory limits.
- Add RSS counting for the process which allocates by NvMap, so that OOM
score for that process will get updated and OOM killer can pick this
process based upon the OOM score.
- Every process that has a reference to NvMap Handle would have the
memory size accounted into its RSS. On releasing the reference to
handle, the RSS would be reduced.

Bug 5222690

Change-Id: I3fa9b76ec9fc8d7f805111cb96e11e2ab1db42ce
Signed-off-by: Ketan Patil <ketanp@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/3427871
(cherry picked from commit 2ca91098aa)
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/3453101
(cherry picked from commit cfe6242c8c)
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/3454642
Reviewed-by: Amulya Yarlagadda <ayarlagadda@nvidia.com>
Tested-by: Amulya Yarlagadda <ayarlagadda@nvidia.com>
GVS: buildbot_gerritrpt <buildbot_gerritrpt@nvidia.com>
This commit is contained in:
Ketan Patil
2025-08-11 17:32:18 +00:00
committed by Amulya Yarlagadda
parent 3ebca7545e
commit 3b830eb8a7
4 changed files with 99 additions and 9 deletions

View File

@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only // SPDX-License-Identifier: GPL-2.0-only
/* /*
* Copyright (c) 2011-2023, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2011-2025, NVIDIA CORPORATION. All rights reserved.
* *
* Handle allocation and freeing routines for nvmap * Handle allocation and freeing routines for nvmap
*/ */
@@ -28,6 +28,7 @@
#include <linux/libnvdimm.h> #include <linux/libnvdimm.h>
#endif /* NVMAP_UPSTREAM_KERNEL */ #endif /* NVMAP_UPSTREAM_KERNEL */
#include "nvmap_priv.h" #include "nvmap_priv.h"
#include <linux/mm.h>
bool nvmap_convert_carveout_to_iovmm; bool nvmap_convert_carveout_to_iovmm;
bool nvmap_convert_iovmm_to_carveout; bool nvmap_convert_iovmm_to_carveout;
@@ -494,6 +495,8 @@ static int handle_page_alloc(struct nvmap_client *client,
#else #else
static u8 chipid; static u8 chipid;
#endif #endif
struct mm_struct *mm = current->mm;
struct nvmap_handle_ref *ref;
if (!chipid) { if (!chipid) {
#ifdef NVMAP_CONFIG_COLOR_PAGES #ifdef NVMAP_CONFIG_COLOR_PAGES
@@ -511,6 +514,13 @@ static int handle_page_alloc(struct nvmap_client *client,
if (!pages) if (!pages)
return -ENOMEM; return -ENOMEM;
/*
* Get refcount on mm_struct, so that it won't be freed until
* nvmap reduces refcount after it reduces the RSS counter.
*/
if (!mmget_not_zero(mm))
goto page_free;
if (contiguous) { if (contiguous) {
struct page *page; struct page *page;
page = nvmap_alloc_pages_exact(gfp, size, true, h->numa_id); page = nvmap_alloc_pages_exact(gfp, size, true, h->numa_id);
@@ -582,6 +592,12 @@ static int handle_page_alloc(struct nvmap_client *client,
nvmap_total_page_allocs += nr_page; nvmap_total_page_allocs += nr_page;
} }
/*
* Increment the RSS counter of the allocating process by number of pages allocated.
*/
h->anon_count = nr_page;
add_mm_counter(mm, MM_ANONPAGES, nr_page);
/* /*
* Make sure any data in the caches is cleaned out before * Make sure any data in the caches is cleaned out before
* passing these pages to userspace. Many nvmap clients assume that * passing these pages to userspace. Many nvmap clients assume that
@@ -595,11 +611,28 @@ static int handle_page_alloc(struct nvmap_client *client,
h->pgalloc.pages = pages; h->pgalloc.pages = pages;
h->pgalloc.contig = contiguous; h->pgalloc.contig = contiguous;
atomic_set(&h->pgalloc.ndirty, 0); atomic_set(&h->pgalloc.ndirty, 0);
nvmap_ref_lock(client);
ref = __nvmap_validate_locked(client, h, false);
if (ref) {
ref->mm = mm;
ref->anon_count = h->anon_count;
} else {
add_mm_counter(mm, MM_ANONPAGES, -nr_page);
mmput(mm);
}
nvmap_ref_unlock(client);
return 0; return 0;
fail: fail:
while (i--) while (i--)
__free_page(pages[i]); __free_page(pages[i]);
/* Incase of failure, release the reference on mm_struct. */
mmput(mm);
page_free:
nvmap_altfree(pages, nr_page * sizeof(*pages)); nvmap_altfree(pages, nr_page * sizeof(*pages));
wmb(); wmb();
return -ENOMEM; return -ENOMEM;
@@ -1072,9 +1105,18 @@ void _nvmap_handle_free(struct nvmap_handle *h)
h->pgalloc.pages[i] = nvmap_to_page(h->pgalloc.pages[i]); h->pgalloc.pages[i] = nvmap_to_page(h->pgalloc.pages[i]);
#ifdef NVMAP_CONFIG_PAGE_POOLS #ifdef NVMAP_CONFIG_PAGE_POOLS
if (!h->from_va && !h->is_subhandle) if (!h->from_va && !h->is_subhandle) {
page_index = nvmap_page_pool_fill_lots(&nvmap_dev->pool, /*
h->pgalloc.pages, nr_page); * When the process is exiting with kill signal pending, don't release the memory
* back into page pool. So that memory would be released back to the kernel and OOM
* killer would be able to actually free the memory.
*/
if (fatal_signal_pending(current) == 0 &&
sigismember(&current->signal->shared_pending.signal, SIGKILL) == 0) {
page_index = nvmap_page_pool_fill_lots(&nvmap_dev->pool,
h->pgalloc.pages, nr_page);
}
}
#endif #endif
for (i = page_index; i < nr_page; i++) { for (i = page_index; i < nr_page; i++) {
@@ -1129,6 +1171,17 @@ void nvmap_free_handle(struct nvmap_client *client,
if (h->owner == client) if (h->owner == client)
h->owner = NULL; h->owner = NULL;
/*
* When a reference is freed, decrement rss counter of the process corresponding
* to this ref and do mmput so that mm_struct can be freed, if required.
*/
if (ref->mm != NULL && ref->anon_count != 0) {
add_mm_counter(ref->mm, MM_ANONPAGES, -ref->anon_count);
mmput(ref->mm);
ref->mm = NULL;
ref->anon_count = 0;
}
if (is_ro) if (is_ro)
dma_buf_put(ref->handle->dmabuf_ro); dma_buf_put(ref->handle->dmabuf_ro);
else else

View File

@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only // SPDX-License-Identifier: GPL-2.0-only
// SPDX-FileCopyrightText: Copyright (c) 2011-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
/* /*
* Copyright (c) 2011-2023, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2011-2023, NVIDIA CORPORATION. All rights reserved.
* *
@@ -277,6 +278,17 @@ static void destroy_client(struct nvmap_client *client)
if (ref->handle->owner == client) if (ref->handle->owner == client)
ref->handle->owner = NULL; ref->handle->owner = NULL;
/*
* When a reference is freed, decrement rss counter of the process corresponding
* to this ref and do mmput so that mm_struct can be freed, if required.
*/
if (ref->mm != NULL && ref->anon_count != 0) {
add_mm_counter(ref->mm, MM_ANONPAGES, -ref->anon_count);
mmput(ref->mm);
ref->mm = NULL;
ref->anon_count = 0;
}
if (ref->is_ro) if (ref->is_ro)
dma_buf_put(ref->handle->dmabuf_ro); dma_buf_put(ref->handle->dmabuf_ro);
else else

View File

@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only // SPDX-License-Identifier: GPL-2.0-only
/* /*
* Copyright (c) 2009-2023, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2009-2025, NVIDIA CORPORATION. All rights reserved.
* *
* Handle allocation and freeing routines for nvmap * Handle allocation and freeing routines for nvmap
*/ */
@@ -401,17 +401,31 @@ struct nvmap_handle_ref *nvmap_duplicate_handle(struct nvmap_client *client,
atomic_set(&ref->dupes, 1); atomic_set(&ref->dupes, 1);
ref->handle = h; ref->handle = h;
/*
* When a new reference is created to the handle, save mm, anon_count in ref and
* increment ref count of mm.
*/
ref->mm = current->mm;
ref->anon_count = h->anon_count;
add_handle_ref(client, ref); add_handle_ref(client, ref);
if (ref->anon_count != 0 && ref->mm != NULL) {
if (!mmget_not_zero(ref->mm))
goto exit;
add_mm_counter(ref->mm, MM_ANONPAGES, ref->anon_count);
}
if (is_ro) { if (is_ro) {
ref->is_ro = true; ref->is_ro = true;
if (!h->dmabuf_ro) if (!h->dmabuf_ro)
goto exit; goto exit_mm;
get_dma_buf(h->dmabuf_ro); get_dma_buf(h->dmabuf_ro);
} else { } else {
ref->is_ro = false; ref->is_ro = false;
if (!h->dmabuf) if (!h->dmabuf)
goto exit; goto exit_mm;
get_dma_buf(h->dmabuf); get_dma_buf(h->dmabuf);
} }
@@ -420,6 +434,14 @@ out:
NVMAP_TP_ARGS_CHR(client, h, ref)); NVMAP_TP_ARGS_CHR(client, h, ref));
return ref; return ref;
exit_mm:
if (ref->anon_count != 0 && ref->mm != NULL) {
add_mm_counter(ref->mm, MM_ANONPAGES, -ref->anon_count);
mmput(ref->mm);
ref->mm = NULL;
ref->anon_count = 0;
}
exit: exit:
pr_err("dmabuf is NULL\n"); pr_err("dmabuf is NULL\n");
kfree(ref); kfree(ref);

View File

@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */ /* SPDX-License-Identifier: GPL-2.0-only */
/* /*
* Copyright (c) 2009-2023, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2009-2025, NVIDIA CORPORATION. All rights reserved.
* *
* GPU memory management driver for Tegra * GPU memory management driver for Tegra
*/ */
@@ -93,7 +93,7 @@ do { \
} \ } \
} while (0) } while (0)
#define GFP_NVMAP (GFP_KERNEL | __GFP_HIGHMEM | __GFP_NOWARN) #define GFP_NVMAP (GFP_KERNEL | __GFP_HIGHMEM | __GFP_NOWARN | __GFP_ACCOUNT | __GFP_NORETRY)
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0) #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
@@ -271,6 +271,7 @@ struct nvmap_handle {
wait_queue_head_t waitq; wait_queue_head_t waitq;
int numa_id; int numa_id;
u64 serial_id; u64 serial_id;
u64 anon_count;
}; };
struct nvmap_handle_info { struct nvmap_handle_info {
@@ -295,6 +296,8 @@ struct nvmap_handle_ref {
struct rb_node node; struct rb_node node;
atomic_t dupes; /* number of times to free on file close */ atomic_t dupes; /* number of times to free on file close */
bool is_ro; bool is_ro;
struct mm_struct *mm;
u64 anon_count;
}; };
#if defined(NVMAP_CONFIG_PAGE_POOLS) #if defined(NVMAP_CONFIG_PAGE_POOLS)