mirror of
git://nv-tegra.nvidia.com/linux-nv-oot.git
synced 2025-12-22 09:11:26 +03:00
nvmap: Keep cache flush at allocation time only
On TOT, in case of carveouts, nvmap performs cache flush operation during carveout creation, buffer allocation and buffer release. Due to cache flush for entire carveout at creation time, nvmap takes ~430 ms for probe. This is affecting boot KPIs. Fix this by performing cache flush only at buffer allocation time, instead of carveout creation and buffer release. This is reducing nvmap probe time to ~0.69 ms. Bug 3821631 Change-Id: I54da7dd179f8d30b8b038daf3eceafb355b2e789 Signed-off-by: Ketan Patil <ketanp@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvidia/+/2802353 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: Bharat Nihalani <bnihalani@nvidia.com> Reviewed-by: Sachin Nikam <snikam@nvidia.com> Reviewed-by: Ashish Mhetre <amhetre@nvidia.com> GVS: Gerrit_Virtual_Submit <buildbot_gerritrpt@nvidia.com>
This commit is contained in:
committed by
Laxman Dewangan
parent
d1c06c1dce
commit
92f34279ba
@@ -119,6 +119,9 @@ NVMAP_CONFIG_HANDLE_AS_FD := n
|
||||
# Config for kstable/OOT kernel
|
||||
# This is useful when any kstable/OOT specific checks are needed
|
||||
NVMAP_CONFIG_UPSTREAM_KERNEL := n
|
||||
|
||||
# Config for enabling the cache flush at buffer allocation time from carveout
|
||||
NVMAP_CONFIG_CACHE_FLUSH_AT_ALLOC := y
|
||||
################################################################################
|
||||
# Section 3
|
||||
# Enable/Disable configs based upon the kernel version
|
||||
@@ -217,5 +220,9 @@ ifeq ($(CONFIG_TEGRA_CVNAS),y)
|
||||
ccflags-y += -DCVNAS_BUILTIN
|
||||
endif #CONFIG_TEGRA_CVNAS
|
||||
|
||||
ifeq ($(NVMAP_CONFIG_CACHE_FLUSH_AT_ALLOC),y)
|
||||
ccflags-y += -DNVMAP_CONFIG_CACHE_FLUSH_AT_ALLOC
|
||||
endif #NVMAP_CONFIG_CACHE_FLUSH_AT_ALLOC
|
||||
|
||||
endif #NVMAP_CONFIG
|
||||
endif #CONFIG_ARCH_TEGRA
|
||||
|
||||
@@ -710,19 +710,20 @@ static void alloc_handle(struct nvmap_client *client,
|
||||
mb();
|
||||
h->alloc = true;
|
||||
|
||||
/* Clear the allocated buffer */
|
||||
if (nvmap_cpu_map_is_allowed(h)) {
|
||||
void *cpu_addr;
|
||||
|
||||
cpu_addr = memremap(b->base, h->size,
|
||||
MEMREMAP_WB);
|
||||
if (cpu_addr != NULL) {
|
||||
memset(cpu_addr, 0, h->size);
|
||||
__dma_flush_area(cpu_addr, h->size);
|
||||
memunmap(cpu_addr);
|
||||
if (nvmap_dev->co_cache_flush_at_alloc) {
|
||||
/* Clear the allocated buffer */
|
||||
if (nvmap_cpu_map_is_allowed(h)) {
|
||||
void *cpu_addr;
|
||||
|
||||
cpu_addr = memremap(b->base, h->size,
|
||||
MEMREMAP_WB);
|
||||
if (cpu_addr != NULL) {
|
||||
memset(cpu_addr, 0, h->size);
|
||||
__dma_flush_area(cpu_addr, h->size);
|
||||
memunmap(cpu_addr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
ret = nvmap_heap_pgalloc(client, h, type);
|
||||
|
||||
@@ -1448,6 +1448,9 @@ int __init nvmap_probe(struct platform_device *pdev)
|
||||
}
|
||||
nvmap_dev->dynamic_dma_map_mask = ~0U;
|
||||
nvmap_dev->cpu_access_mask = ~0U;
|
||||
#ifdef NVMAP_CONFIG_CACHE_FLUSH_AT_ALLOC
|
||||
nvmap_dev->co_cache_flush_at_alloc = true;
|
||||
#endif /* NVMAP_CONFIG_CACHE_FLUSH_AT_ALLOC */
|
||||
if (plat)
|
||||
for (i = 0; i < plat->nr_carveouts; i++)
|
||||
nvmap_create_carveout(&plat->carveouts[i]);
|
||||
|
||||
@@ -411,7 +411,13 @@ void nvmap_heap_free(struct nvmap_heap_block *b)
|
||||
mutex_lock(&h->lock);
|
||||
|
||||
lb = container_of(b, struct list_block, block);
|
||||
nvmap_flush_heap_block(NULL, b, lb->size, lb->mem_prot);
|
||||
if (!nvmap_dev->co_cache_flush_at_alloc) {
|
||||
/*
|
||||
* For carveouts, if cache flush is done at buffer allocation time
|
||||
* then no need to do it during buffer release time.
|
||||
*/
|
||||
nvmap_flush_heap_block(NULL, b, lb->size, lb->mem_prot);
|
||||
}
|
||||
do_heap_free(b);
|
||||
/*
|
||||
* If this HEAP has pm_ops defined and powering off the
|
||||
@@ -507,11 +513,17 @@ struct nvmap_heap *nvmap_heap_create(struct device *parent,
|
||||
#ifdef NVMAP_CONFIG_DEBUG_MAPS
|
||||
h->device_names = RB_ROOT;
|
||||
#endif /* NVMAP_CONFIG_DEBUG_MAPS */
|
||||
if (!co->no_cpu_access && co->usage_mask != NVMAP_HEAP_CARVEOUT_VPR
|
||||
&& nvmap_cache_maint_phys_range(NVMAP_CACHE_OP_WB_INV,
|
||||
base, base + len, true, true)) {
|
||||
dev_err(parent, "cache flush failed\n");
|
||||
goto fail;
|
||||
if (!nvmap_dev->co_cache_flush_at_alloc) {
|
||||
/*
|
||||
* For carveouts, if cache flush is done at buffer allocation time
|
||||
* then no need to do it during carveout creation time.
|
||||
*/
|
||||
if (!co->no_cpu_access && co->usage_mask != NVMAP_HEAP_CARVEOUT_VPR
|
||||
&& nvmap_cache_maint_phys_range(NVMAP_CACHE_OP_WB_INV,
|
||||
base, base + len, true, true)) {
|
||||
dev_err(parent, "cache flush failed\n");
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
wmb();
|
||||
|
||||
|
||||
@@ -397,6 +397,8 @@ struct nvmap_device {
|
||||
#ifdef NVMAP_CONFIG_DEBUG_MAPS
|
||||
struct rb_root device_names;
|
||||
#endif /* NVMAP_CONFIG_DEBUG_MAPS */
|
||||
/* Perform cache flush at buffer allocation from carveout */
|
||||
bool co_cache_flush_at_alloc;
|
||||
};
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user