From 838bb12196a3ccfda015977e8e2e92be807ee655 Mon Sep 17 00:00:00 2001 From: Ketan Patil Date: Mon, 24 Mar 2025 13:59:32 +0000 Subject: [PATCH] video: tegra: nvmap: Gather pages and then flush cache After a deeper analysis, it was found that multithreaded cache flush is not required. Only gathering the pages and then doing cache flush is enough to get the perf improvement, the cache flush time of each page is almost half in this case as compared with not gathering the pages. So it means there is continuous cache eviction is happening. So, while allocating big pages from allocator, instead of allocating one big page and flushing it, allocate all required big pages and then flush pages at last. Bug 4628529 Change-Id: I4d8113010a8c82a1cab124e26b14e8a3f5791cd1 Signed-off-by: Ketan Patil Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/3325002 Reviewed-by: Pritesh Raithatha GVS: buildbot_gerritrpt Reviewed-by: Ajay Nandakumar Mannargudi --- drivers/video/tegra/nvmap/nvmap_alloc.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/video/tegra/nvmap/nvmap_alloc.c b/drivers/video/tegra/nvmap/nvmap_alloc.c index 8e97b334..8dde0a53 100644 --- a/drivers/video/tegra/nvmap/nvmap_alloc.c +++ b/drivers/video/tegra/nvmap/nvmap_alloc.c @@ -105,6 +105,7 @@ static int handle_page_alloc(struct nvmap_client *client, u64 result; size_t tot_size; #ifdef CONFIG_ARM64_4K_PAGES + int start_index = 0; #ifdef NVMAP_CONFIG_PAGE_POOLS int pages_per_big_pg = NVMAP_PP_BIG_PAGE_SIZE >> PAGE_SHIFT; #else @@ -137,6 +138,7 @@ static int handle_page_alloc(struct nvmap_client *client, pages_per_big_pg = nvmap_dev->pool->pages_per_big_pg; #endif /* Try to allocate big pages from page allocator */ + start_index = page_index; for (i = page_index; i < nr_page && pages_per_big_pg > 1 && (nr_page - i) >= pages_per_big_pg; i += pages_per_big_pg, page_index += pages_per_big_pg) { @@ -155,9 +157,10 @@ static int handle_page_alloc(struct nvmap_client *client, for (idx = 0; idx < pages_per_big_pg; idx++) pages[i + idx] = nth_page(page, idx); - nvmap_clean_cache(&pages[i], pages_per_big_pg); } + /* Perform cache clean for the pages allocated from page allocator */ + nvmap_clean_cache(&pages[start_index], page_index - start_index); if (check_add_overflow(nvmap_big_page_allocs, (u64)page_index, &result)) goto fail;