diff --git a/drivers/video/tegra/nvmap/nvmap_alloc.c b/drivers/video/tegra/nvmap/nvmap_alloc.c index 4e42c6fc..19d62f5f 100644 --- a/drivers/video/tegra/nvmap/nvmap_alloc.c +++ b/drivers/video/tegra/nvmap/nvmap_alloc.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2011-2023, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2011-2025, NVIDIA CORPORATION. All rights reserved. * * Handle allocation and freeing routines for nvmap */ @@ -28,6 +28,7 @@ #include #endif /* NVMAP_UPSTREAM_KERNEL */ #include "nvmap_priv.h" +#include bool nvmap_convert_carveout_to_iovmm; bool nvmap_convert_iovmm_to_carveout; @@ -494,6 +495,8 @@ static int handle_page_alloc(struct nvmap_client *client, #else static u8 chipid; #endif + struct mm_struct *mm = current->mm; + struct nvmap_handle_ref *ref; if (!chipid) { #ifdef NVMAP_CONFIG_COLOR_PAGES @@ -511,6 +514,13 @@ static int handle_page_alloc(struct nvmap_client *client, if (!pages) return -ENOMEM; + /* + * Get refcount on mm_struct, so that it won't be freed until + * nvmap reduces refcount after it reduces the RSS counter. + */ + if (!mmget_not_zero(mm)) + goto page_free; + if (contiguous) { struct page *page; page = nvmap_alloc_pages_exact(gfp, size, true, h->numa_id); @@ -582,6 +592,12 @@ static int handle_page_alloc(struct nvmap_client *client, nvmap_total_page_allocs += nr_page; } + /* + * Increment the RSS counter of the allocating process by number of pages allocated. + */ + h->anon_count = nr_page; + add_mm_counter(mm, MM_ANONPAGES, nr_page); + /* * Make sure any data in the caches is cleaned out before * passing these pages to userspace. Many nvmap clients assume that @@ -595,11 +611,28 @@ static int handle_page_alloc(struct nvmap_client *client, h->pgalloc.pages = pages; h->pgalloc.contig = contiguous; atomic_set(&h->pgalloc.ndirty, 0); + + nvmap_ref_lock(client); + ref = __nvmap_validate_locked(client, h, false); + if (ref) { + ref->mm = mm; + ref->anon_count = h->anon_count; + } else { + add_mm_counter(mm, MM_ANONPAGES, -nr_page); + mmput(mm); + } + + nvmap_ref_unlock(client); return 0; fail: while (i--) __free_page(pages[i]); + + /* Incase of failure, release the reference on mm_struct. */ + mmput(mm); + +page_free: nvmap_altfree(pages, nr_page * sizeof(*pages)); wmb(); return -ENOMEM; @@ -1072,9 +1105,18 @@ void _nvmap_handle_free(struct nvmap_handle *h) h->pgalloc.pages[i] = nvmap_to_page(h->pgalloc.pages[i]); #ifdef NVMAP_CONFIG_PAGE_POOLS - if (!h->from_va && !h->is_subhandle) - page_index = nvmap_page_pool_fill_lots(&nvmap_dev->pool, - h->pgalloc.pages, nr_page); + if (!h->from_va && !h->is_subhandle) { + /* + * When the process is exiting with kill signal pending, don't release the memory + * back into page pool. So that memory would be released back to the kernel and OOM + * killer would be able to actually free the memory. + */ + if (fatal_signal_pending(current) == 0 && + sigismember(¤t->signal->shared_pending.signal, SIGKILL) == 0) { + page_index = nvmap_page_pool_fill_lots(&nvmap_dev->pool, + h->pgalloc.pages, nr_page); + } + } #endif for (i = page_index; i < nr_page; i++) { @@ -1129,6 +1171,17 @@ void nvmap_free_handle(struct nvmap_client *client, if (h->owner == client) h->owner = NULL; + /* + * When a reference is freed, decrement rss counter of the process corresponding + * to this ref and do mmput so that mm_struct can be freed, if required. + */ + if (ref->mm != NULL && ref->anon_count != 0) { + add_mm_counter(ref->mm, MM_ANONPAGES, -ref->anon_count); + mmput(ref->mm); + ref->mm = NULL; + ref->anon_count = 0; + } + if (is_ro) dma_buf_put(ref->handle->dmabuf_ro); else diff --git a/drivers/video/tegra/nvmap/nvmap_dev.c b/drivers/video/tegra/nvmap/nvmap_dev.c index 57feeda4..02d6bccb 100644 --- a/drivers/video/tegra/nvmap/nvmap_dev.c +++ b/drivers/video/tegra/nvmap/nvmap_dev.c @@ -1,4 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only +// SPDX-FileCopyrightText: Copyright (c) 2011-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. /* * Copyright (c) 2011-2023, NVIDIA CORPORATION. All rights reserved. * @@ -277,6 +278,17 @@ static void destroy_client(struct nvmap_client *client) if (ref->handle->owner == client) ref->handle->owner = NULL; + /* + * When a reference is freed, decrement rss counter of the process corresponding + * to this ref and do mmput so that mm_struct can be freed, if required. + */ + if (ref->mm != NULL && ref->anon_count != 0) { + add_mm_counter(ref->mm, MM_ANONPAGES, -ref->anon_count); + mmput(ref->mm); + ref->mm = NULL; + ref->anon_count = 0; + } + if (ref->is_ro) dma_buf_put(ref->handle->dmabuf_ro); else diff --git a/drivers/video/tegra/nvmap/nvmap_handle.c b/drivers/video/tegra/nvmap/nvmap_handle.c index 7c9b548d..3be9c8bb 100644 --- a/drivers/video/tegra/nvmap/nvmap_handle.c +++ b/drivers/video/tegra/nvmap/nvmap_handle.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2009-2023, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2009-2025, NVIDIA CORPORATION. All rights reserved. * * Handle allocation and freeing routines for nvmap */ @@ -401,17 +401,31 @@ struct nvmap_handle_ref *nvmap_duplicate_handle(struct nvmap_client *client, atomic_set(&ref->dupes, 1); ref->handle = h; + + /* + * When a new reference is created to the handle, save mm, anon_count in ref and + * increment ref count of mm. + */ + ref->mm = current->mm; + ref->anon_count = h->anon_count; add_handle_ref(client, ref); + if (ref->anon_count != 0 && ref->mm != NULL) { + if (!mmget_not_zero(ref->mm)) + goto exit; + + add_mm_counter(ref->mm, MM_ANONPAGES, ref->anon_count); + } + if (is_ro) { ref->is_ro = true; if (!h->dmabuf_ro) - goto exit; + goto exit_mm; get_dma_buf(h->dmabuf_ro); } else { ref->is_ro = false; if (!h->dmabuf) - goto exit; + goto exit_mm; get_dma_buf(h->dmabuf); } @@ -420,6 +434,14 @@ out: NVMAP_TP_ARGS_CHR(client, h, ref)); return ref; +exit_mm: + if (ref->anon_count != 0 && ref->mm != NULL) { + add_mm_counter(ref->mm, MM_ANONPAGES, -ref->anon_count); + mmput(ref->mm); + ref->mm = NULL; + ref->anon_count = 0; + } + exit: pr_err("dmabuf is NULL\n"); kfree(ref); diff --git a/drivers/video/tegra/nvmap/nvmap_priv.h b/drivers/video/tegra/nvmap/nvmap_priv.h index 232326cb..01328bfa 100644 --- a/drivers/video/tegra/nvmap/nvmap_priv.h +++ b/drivers/video/tegra/nvmap/nvmap_priv.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2009-2023, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2009-2025, NVIDIA CORPORATION. All rights reserved. * * GPU memory management driver for Tegra */ @@ -93,7 +93,7 @@ do { \ } \ } while (0) -#define GFP_NVMAP (GFP_KERNEL | __GFP_HIGHMEM | __GFP_NOWARN) +#define GFP_NVMAP (GFP_KERNEL | __GFP_HIGHMEM | __GFP_NOWARN | __GFP_ACCOUNT | __GFP_NORETRY) #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0) @@ -271,6 +271,7 @@ struct nvmap_handle { wait_queue_head_t waitq; int numa_id; u64 serial_id; + u64 anon_count; }; struct nvmap_handle_info { @@ -295,6 +296,8 @@ struct nvmap_handle_ref { struct rb_node node; atomic_t dupes; /* number of times to free on file close */ bool is_ro; + struct mm_struct *mm; + u64 anon_count; }; #if defined(NVMAP_CONFIG_PAGE_POOLS)