gpu: nvgpu: skip mapping global ctx buffer if already mapped

Global context buffers were mapped on every ALLOC_OBJ_CTX call.
If many channels are created sharing an address space they can
exhaust the VA space by mapping same global context buffers
again and again.

Skip mapping the global context buffer if it is already mapped
for an address space.

Bug 3802863
Bug 3796293

Change-Id: I3844c211b3350aa06cabd92c415a34a83034dd43
Signed-off-by: Sagar Kamble <skamble@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2789584
(cherry picked from commit 0611ec30c6a61b7e1b07d516b74d6eddb3c6b37e)
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2789581
Reviewed-by: Scott Long <scottl@nvidia.com>
Reviewed-by: Ankur Kishore <ankkishore@nvidia.com>
GVS: Gerrit_Virtual_Submit <buildbot_gerritrpt@nvidia.com>
This commit is contained in:
Sagar Kamble
2022-10-10 23:25:24 +05:30
committed by mobile promotions
parent 7ab3b9937d
commit 7ab770ae93

View File

@@ -419,6 +419,7 @@ static int nvgpu_gr_ctx_mappings_map_global_ctx_buffer(
bool vpr, struct nvgpu_gr_ctx_mappings *mappings)
{
struct vm_gk20a *vm = mappings->vm;
struct gk20a *g = mappings->tsg->g;
u64 *g_bfr_va;
u32 *g_bfr_index;
u64 gpu_va = 0ULL;
@@ -429,6 +430,11 @@ static int nvgpu_gr_ctx_mappings_map_global_ctx_buffer(
g_bfr_va = &mappings->global_ctx_buffer_va[0];
g_bfr_index = &mappings->global_ctx_buffer_index[0];
if (g_bfr_va[va_type] != 0ULL) {
nvgpu_log_info(g, "global buffer %u already mapped", va_type);
return 0;
}
#ifdef CONFIG_NVGPU_VPR
if (vpr && nvgpu_gr_global_ctx_buffer_ready(global_ctx_buffer,
buffer_vpr_type)) {