mirror of
git://nv-tegra.nvidia.com/linux-nv-oot.git
synced 2025-12-24 10:11:26 +03:00
Compare commits
3 Commits
rel-36-lws
...
jetson_36.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
564ce2a709 | ||
|
|
0a0a84577b | ||
|
|
eacae28123 |
@@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2011-2025, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2011-2023, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Handle allocation and freeing routines for nvmap
|
||||
*/
|
||||
@@ -28,7 +28,6 @@
|
||||
#include <linux/libnvdimm.h>
|
||||
#endif /* NVMAP_UPSTREAM_KERNEL */
|
||||
#include "nvmap_priv.h"
|
||||
#include <linux/mm.h>
|
||||
|
||||
bool nvmap_convert_carveout_to_iovmm;
|
||||
bool nvmap_convert_iovmm_to_carveout;
|
||||
@@ -495,8 +494,6 @@ static int handle_page_alloc(struct nvmap_client *client,
|
||||
#else
|
||||
static u8 chipid;
|
||||
#endif
|
||||
struct mm_struct *mm = current->mm;
|
||||
struct nvmap_handle_ref *ref;
|
||||
|
||||
if (!chipid) {
|
||||
#ifdef NVMAP_CONFIG_COLOR_PAGES
|
||||
@@ -514,13 +511,6 @@ static int handle_page_alloc(struct nvmap_client *client,
|
||||
if (!pages)
|
||||
return -ENOMEM;
|
||||
|
||||
/*
|
||||
* Get refcount on mm_struct, so that it won't be freed until
|
||||
* nvmap reduces refcount after it reduces the RSS counter.
|
||||
*/
|
||||
if (!mmget_not_zero(mm))
|
||||
goto page_free;
|
||||
|
||||
if (contiguous) {
|
||||
struct page *page;
|
||||
page = nvmap_alloc_pages_exact(gfp, size, true, h->numa_id);
|
||||
@@ -592,12 +582,6 @@ static int handle_page_alloc(struct nvmap_client *client,
|
||||
nvmap_total_page_allocs += nr_page;
|
||||
}
|
||||
|
||||
/*
|
||||
* Increment the RSS counter of the allocating process by number of pages allocated.
|
||||
*/
|
||||
h->anon_count = nr_page;
|
||||
nvmap_add_mm_counter(mm, MM_ANONPAGES, nr_page);
|
||||
|
||||
/*
|
||||
* Make sure any data in the caches is cleaned out before
|
||||
* passing these pages to userspace. Many nvmap clients assume that
|
||||
@@ -611,28 +595,11 @@ static int handle_page_alloc(struct nvmap_client *client,
|
||||
h->pgalloc.pages = pages;
|
||||
h->pgalloc.contig = contiguous;
|
||||
atomic_set(&h->pgalloc.ndirty, 0);
|
||||
|
||||
nvmap_ref_lock(client);
|
||||
ref = __nvmap_validate_locked(client, h, false);
|
||||
if (ref) {
|
||||
ref->mm = mm;
|
||||
ref->anon_count = h->anon_count;
|
||||
} else {
|
||||
nvmap_add_mm_counter(mm, MM_ANONPAGES, -nr_page);
|
||||
mmput(mm);
|
||||
}
|
||||
|
||||
nvmap_ref_unlock(client);
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
while (i--)
|
||||
__free_page(pages[i]);
|
||||
|
||||
/* Incase of failure, release the reference on mm_struct. */
|
||||
mmput(mm);
|
||||
|
||||
page_free:
|
||||
nvmap_altfree(pages, nr_page * sizeof(*pages));
|
||||
wmb();
|
||||
return -ENOMEM;
|
||||
@@ -1105,18 +1072,9 @@ void _nvmap_handle_free(struct nvmap_handle *h)
|
||||
h->pgalloc.pages[i] = nvmap_to_page(h->pgalloc.pages[i]);
|
||||
|
||||
#ifdef NVMAP_CONFIG_PAGE_POOLS
|
||||
if (!h->from_va && !h->is_subhandle) {
|
||||
/*
|
||||
* When the process is exiting with kill signal pending, don't release the memory
|
||||
* back into page pool. So that memory would be released back to the kernel and OOM
|
||||
* killer would be able to actually free the memory.
|
||||
*/
|
||||
if (fatal_signal_pending(current) == 0 &&
|
||||
sigismember(¤t->signal->shared_pending.signal, SIGKILL) == 0) {
|
||||
page_index = nvmap_page_pool_fill_lots(&nvmap_dev->pool,
|
||||
h->pgalloc.pages, nr_page);
|
||||
}
|
||||
}
|
||||
if (!h->from_va && !h->is_subhandle)
|
||||
page_index = nvmap_page_pool_fill_lots(&nvmap_dev->pool,
|
||||
h->pgalloc.pages, nr_page);
|
||||
#endif
|
||||
|
||||
for (i = page_index; i < nr_page; i++) {
|
||||
@@ -1171,17 +1129,6 @@ void nvmap_free_handle(struct nvmap_client *client,
|
||||
if (h->owner == client)
|
||||
h->owner = NULL;
|
||||
|
||||
/*
|
||||
* When a reference is freed, decrement rss counter of the process corresponding
|
||||
* to this ref and do mmput so that mm_struct can be freed, if required.
|
||||
*/
|
||||
if (ref->mm != NULL && ref->anon_count != 0) {
|
||||
nvmap_add_mm_counter(ref->mm, MM_ANONPAGES, -ref->anon_count);
|
||||
mmput(ref->mm);
|
||||
ref->mm = NULL;
|
||||
ref->anon_count = 0;
|
||||
}
|
||||
|
||||
if (is_ro)
|
||||
dma_buf_put(ref->handle->dmabuf_ro);
|
||||
else
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
// SPDX-FileCopyrightText: Copyright (c) 2011-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
/*
|
||||
* Copyright (c) 2011-2023, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
@@ -278,17 +277,6 @@ static void destroy_client(struct nvmap_client *client)
|
||||
if (ref->handle->owner == client)
|
||||
ref->handle->owner = NULL;
|
||||
|
||||
/*
|
||||
* When a reference is freed, decrement rss counter of the process corresponding
|
||||
* to this ref and do mmput so that mm_struct can be freed, if required.
|
||||
*/
|
||||
if (ref->mm != NULL && ref->anon_count != 0) {
|
||||
nvmap_add_mm_counter(ref->mm, MM_ANONPAGES, -ref->anon_count);
|
||||
mmput(ref->mm);
|
||||
ref->mm = NULL;
|
||||
ref->anon_count = 0;
|
||||
}
|
||||
|
||||
if (ref->is_ro)
|
||||
dma_buf_put(ref->handle->dmabuf_ro);
|
||||
else
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2012-2025, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2012-2023, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* dma_buf exporter for nvmap
|
||||
*/
|
||||
@@ -450,7 +450,6 @@ int __nvmap_map(struct nvmap_handle *h, struct vm_area_struct *vma)
|
||||
nvmap_handle_put(h);
|
||||
return -ENOMEM;
|
||||
}
|
||||
mutex_init(&priv->vma_lock);
|
||||
priv->handle = h;
|
||||
|
||||
#if defined(NV_VM_AREA_STRUCT_HAS_CONST_VM_FLAGS) /* Linux v6.3 */
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2011-2025, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2011-2023, NVIDIA CORPORATION. All rights reserved.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "%s: " fmt, __func__
|
||||
@@ -147,14 +147,6 @@ static void nvmap_vma_close(struct vm_area_struct *vma)
|
||||
BUG_ON(!vma_found);
|
||||
nvmap_umaps_dec(h);
|
||||
|
||||
mutex_lock(&priv->vma_lock);
|
||||
if (priv->mm != NULL && h->anon_count != 0) {
|
||||
nvmap_add_mm_counter(priv->mm, MM_ANONPAGES, priv->map_rss_count);
|
||||
priv->map_rss_count = 0;
|
||||
priv->mm = NULL;
|
||||
}
|
||||
mutex_unlock(&priv->vma_lock);
|
||||
|
||||
if (__atomic_add_unless(&priv->count, -1, 0) == 1) {
|
||||
if (h->heap_pgalloc) {
|
||||
for (i = 0; i < nr_page; i++) {
|
||||
@@ -241,14 +233,6 @@ static int nvmap_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
||||
return VM_FAULT_SIGSEGV;
|
||||
}
|
||||
|
||||
mutex_lock(&priv->vma_lock);
|
||||
if (priv->handle->anon_count != 0 && current->mm != NULL) {
|
||||
nvmap_add_mm_counter(current->mm, MM_ANONPAGES, -1);
|
||||
priv->map_rss_count++;
|
||||
priv->mm = current->mm;
|
||||
}
|
||||
mutex_unlock(&priv->vma_lock);
|
||||
|
||||
if (!nvmap_handle_track_dirty(priv->handle))
|
||||
goto finish;
|
||||
mutex_lock(&priv->handle->lock);
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2009-2025, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2009-2023, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Handle allocation and freeing routines for nvmap
|
||||
*/
|
||||
@@ -401,31 +401,17 @@ struct nvmap_handle_ref *nvmap_duplicate_handle(struct nvmap_client *client,
|
||||
|
||||
atomic_set(&ref->dupes, 1);
|
||||
ref->handle = h;
|
||||
|
||||
/*
|
||||
* When a new reference is created to the handle, save mm, anon_count in ref and
|
||||
* increment ref count of mm.
|
||||
*/
|
||||
ref->mm = current->mm;
|
||||
ref->anon_count = h->anon_count;
|
||||
add_handle_ref(client, ref);
|
||||
|
||||
if (ref->anon_count != 0 && ref->mm != NULL) {
|
||||
if (!mmget_not_zero(ref->mm))
|
||||
goto exit;
|
||||
|
||||
nvmap_add_mm_counter(ref->mm, MM_ANONPAGES, ref->anon_count);
|
||||
}
|
||||
|
||||
if (is_ro) {
|
||||
ref->is_ro = true;
|
||||
if (!h->dmabuf_ro)
|
||||
goto exit_mm;
|
||||
goto exit;
|
||||
get_dma_buf(h->dmabuf_ro);
|
||||
} else {
|
||||
ref->is_ro = false;
|
||||
if (!h->dmabuf)
|
||||
goto exit_mm;
|
||||
goto exit;
|
||||
get_dma_buf(h->dmabuf);
|
||||
}
|
||||
|
||||
@@ -434,14 +420,6 @@ out:
|
||||
NVMAP_TP_ARGS_CHR(client, h, ref));
|
||||
return ref;
|
||||
|
||||
exit_mm:
|
||||
if (ref->anon_count != 0 && ref->mm != NULL) {
|
||||
nvmap_add_mm_counter(ref->mm, MM_ANONPAGES, -ref->anon_count);
|
||||
mmput(ref->mm);
|
||||
ref->mm = NULL;
|
||||
ref->anon_count = 0;
|
||||
}
|
||||
|
||||
exit:
|
||||
pr_err("dmabuf is NULL\n");
|
||||
kfree(ref);
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2009-2025, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2009-2023, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* GPU memory management driver for Tegra
|
||||
*/
|
||||
@@ -93,7 +93,7 @@ do { \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define GFP_NVMAP (GFP_KERNEL | __GFP_HIGHMEM | __GFP_NOWARN | __GFP_ACCOUNT | __GFP_NORETRY)
|
||||
#define GFP_NVMAP (GFP_KERNEL | __GFP_HIGHMEM | __GFP_NOWARN)
|
||||
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
|
||||
|
||||
@@ -271,7 +271,6 @@ struct nvmap_handle {
|
||||
wait_queue_head_t waitq;
|
||||
int numa_id;
|
||||
u64 serial_id;
|
||||
u64 anon_count;
|
||||
};
|
||||
|
||||
struct nvmap_handle_info {
|
||||
@@ -296,8 +295,6 @@ struct nvmap_handle_ref {
|
||||
struct rb_node node;
|
||||
atomic_t dupes; /* number of times to free on file close */
|
||||
bool is_ro;
|
||||
struct mm_struct *mm;
|
||||
u64 anon_count;
|
||||
};
|
||||
|
||||
#if defined(NVMAP_CONFIG_PAGE_POOLS)
|
||||
@@ -380,9 +377,6 @@ struct nvmap_vma_priv {
|
||||
struct nvmap_handle *handle;
|
||||
size_t offs;
|
||||
atomic_t count; /* number of processes cloning the VMA */
|
||||
u64 map_rss_count;
|
||||
struct mm_struct *mm;
|
||||
struct mutex vma_lock;
|
||||
};
|
||||
|
||||
struct nvmap_device {
|
||||
@@ -919,16 +913,6 @@ static inline struct dma_buf *nvmap_id_array_id_release(struct xarray *xarr, u32
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline void nvmap_add_mm_counter(struct mm_struct *mm, int member, long value)
|
||||
{
|
||||
#if defined(NV_MM_STRUCT_STRUCT_HAS_PERCPU_COUNTER_RSS_STAT) /* Linux v6.2 */
|
||||
percpu_counter_add(&mm->rss_stat[member], value);
|
||||
#else
|
||||
atomic_long_add_return(value, &mm->rss_stat.count[member]);
|
||||
#endif
|
||||
}
|
||||
|
||||
void *nvmap_dmabuf_get_drv_data(struct dma_buf *dmabuf,
|
||||
struct device *dev);
|
||||
bool is_nvmap_memory_available(size_t size, uint32_t heap);
|
||||
|
||||
@@ -125,7 +125,6 @@ NV_CONFTEST_FUNCTION_COMPILE_TESTS += i2c_driver_struct_remove_return_type_int
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += iio_dev_opaque_has_mlock
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += iommu_map_has_gfp_arg
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += kthread_complete_and_exit
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += mm_struct_struct_has_percpu_counter_rss_stat
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += mii_bus_struct_has_read_c45
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += mii_bus_struct_has_write_c45
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += netif_set_tso_max_size
|
||||
|
||||
@@ -7152,23 +7152,6 @@ compile_test() {
|
||||
compile_check_conftest "$CODE" "NV_MII_BUS_STRUCT_HAS_WRITE_C45" "" "types"
|
||||
;;
|
||||
|
||||
mm_struct_struct_has_percpu_counter_rss_stat)
|
||||
#
|
||||
# Determine if the 'rss_stat' member of the 'mm_struct' structure is
|
||||
# defined with 'percpu_counter'.
|
||||
#
|
||||
# This change was made in Linux v6.2 by commit f1a7941243c1 ("mm:
|
||||
# convert mm's rss stats into percpu_counter2").
|
||||
#
|
||||
CODE="
|
||||
#include <linux/mm_types.h>
|
||||
void conftest_mm_struct_struct_has_percpu_counter_rss_stat(struct mm_struct *mm) {
|
||||
percpu_counter_add(&mm->rss_stat[0], 0);
|
||||
}"
|
||||
|
||||
compile_check_conftest "$CODE" "NV_MM_STRUCT_STRUCT_HAS_PERCPU_COUNTER_RSS_STAT" "" "types"
|
||||
;;
|
||||
|
||||
pwm_chip_struct_has_base_arg)
|
||||
#
|
||||
# Determine if 'struct pwm_chip' has the 'base' field.
|
||||
|
||||
Reference in New Issue
Block a user