video: tegra: nvmap: Avoid double updation of RSS counter

The RSS counter is updated during buffer allocation as well as mmap,
which is leading to double updation. Fix this by decrementing the RSS
counter during page fault while increment it back during unmap flow.

Bug 5222690

Change-Id: I77972185f20d9d710571cc07ae1c5188060bfa1f
Signed-off-by: Ketan Patil <ketanp@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/3447073
Reviewed-by: Ajay Nandakumar Mannargudi <anandakumarm@nvidia.com>
Reviewed-by: Krishna Reddy <vdumpa@nvidia.com>
GVS: buildbot_gerritrpt <buildbot_gerritrpt@nvidia.com>
Reviewed-by: Jon Hunter <jonathanh@nvidia.com>
This commit is contained in:
Ketan Patil
2025-09-05 06:58:04 +00:00
committed by mobile promotions
parent 858d73775a
commit 5ef68aa58f
3 changed files with 23 additions and 3 deletions

View File

@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only // SPDX-License-Identifier: GPL-2.0-only
/* /*
* SPDX-FileCopyrightText: Copyright (c) 2012-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-FileCopyrightText: Copyright (c) 2012-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* *
* dma_buf exporter for nvmap * dma_buf exporter for nvmap
*/ */
@@ -410,6 +410,7 @@ static int __nvmap_map(struct nvmap_handle *h, struct vm_area_struct *vma)
nvmap_handle_put(h); nvmap_handle_put(h);
return -ENOMEM; return -ENOMEM;
} }
mutex_init(&priv->vma_lock);
priv->handle = h; priv->handle = h;
#if defined(NV_VM_AREA_STRUCT_HAS_CONST_VM_FLAGS) /* Linux v6.3 */ #if defined(NV_VM_AREA_STRUCT_HAS_CONST_VM_FLAGS) /* Linux v6.3 */

View File

@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */ /* SPDX-License-Identifier: GPL-2.0-only */
/* SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ /* SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
#ifndef __NVMAP_DMABUF_H #ifndef __NVMAP_DMABUF_H
#define __NVMAP_DMABUF_H #define __NVMAP_DMABUF_H
@@ -54,6 +54,9 @@ struct nvmap_vma_priv {
struct nvmap_handle *handle; struct nvmap_handle *handle;
size_t offs; size_t offs;
atomic_t count; /* number of processes cloning the VMA */ atomic_t count; /* number of processes cloning the VMA */
u64 map_rss_count;
struct mm_struct *mm;
struct mutex vma_lock;
}; };
int is_nvmap_vma(struct vm_area_struct *vma); int is_nvmap_vma(struct vm_area_struct *vma);

View File

@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only // SPDX-License-Identifier: GPL-2.0-only
/* /*
* SPDX-FileCopyrightText: Copyright (c) 2011-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-FileCopyrightText: Copyright (c) 2011-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*/ */
#define pr_fmt(fmt) "%s: " fmt, __func__ #define pr_fmt(fmt) "%s: " fmt, __func__
@@ -145,6 +145,14 @@ static void nvmap_vma_close(struct vm_area_struct *vma)
BUG_ON(!vma_found); BUG_ON(!vma_found);
nvmap_umaps_dec(h); nvmap_umaps_dec(h);
mutex_lock(&priv->vma_lock);
if (priv->mm != NULL && h->anon_count != 0) {
nvmap_add_mm_counter(priv->mm, MM_ANONPAGES, priv->map_rss_count);
priv->map_rss_count = 0;
priv->mm = NULL;
}
mutex_unlock(&priv->vma_lock);
if (__atomic_add_unless(&priv->count, -1, 0) == 1) { if (__atomic_add_unless(&priv->count, -1, 0) == 1) {
if (h->heap_pgalloc) { if (h->heap_pgalloc) {
for (i = 0; i < nr_page; i++) { for (i = 0; i < nr_page; i++) {
@@ -220,6 +228,14 @@ static vm_fault_t nvmap_vma_fault(struct vm_fault *vmf)
return VM_FAULT_SIGSEGV; return VM_FAULT_SIGSEGV;
} }
mutex_lock(&priv->vma_lock);
if (priv->handle->anon_count != 0 && current->mm != NULL) {
nvmap_add_mm_counter(current->mm, MM_ANONPAGES, -1);
priv->map_rss_count++;
priv->mm = current->mm;
}
mutex_unlock(&priv->vma_lock);
if (!nvmap_handle_track_dirty(priv->handle)) if (!nvmap_handle_track_dirty(priv->handle))
goto finish; goto finish;
mutex_lock(&priv->handle->lock); mutex_lock(&priv->handle->lock);