gpu: nvgpu: Add ELPG_MS protected call for TLB invalidate

- if TLB invalidate is done when ELPG_MS feature is engaged
  then it can cause some of the signals to go non-idle.
  This can cause idle snap in ELPG_MS.
- To avoid the idle snap, add elpg_ms protected call before
  TLB invalidate operation

Bug 200763448

Change-Id: I33435a70c3a4946cc157d5c9c001a17edb133573
Signed-off-by: Divya <dsinghatwari@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2576984
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com>
Reviewed-by: Mahantesh Kumbar <mkumbar@nvidia.com>
Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
GVS: Gerrit_Virtual_Submit
This commit is contained in:
Divya
2021-08-16 05:52:21 +00:00
committed by mobile promotions
parent d538737ba1
commit 4331c5f121
3 changed files with 14 additions and 4 deletions

View File

@@ -37,6 +37,7 @@
#include <nvgpu/gk20a.h> #include <nvgpu/gk20a.h>
#include <nvgpu/static_analysis.h> #include <nvgpu/static_analysis.h>
#include <nvgpu/errata.h> #include <nvgpu/errata.h>
#include <nvgpu/power_features/pg.h>
#ifdef CONFIG_NVGPU_TRACE #ifdef CONFIG_NVGPU_TRACE
#define nvgpu_gmmu_dbg(g, attrs, fmt, args...) \ #define nvgpu_gmmu_dbg(g, attrs, fmt, args...) \
@@ -856,7 +857,8 @@ static int nvgpu_gmmu_cache_maint_map(struct gk20a *g, struct vm_gk20a *vm,
int err = 0; int err = 0;
if (batch == NULL) { if (batch == NULL) {
err = g->ops.fb.tlb_invalidate(g, vm->pdb.mem); err = nvgpu_pg_elpg_ms_protected_call(g,
g->ops.fb.tlb_invalidate(g, vm->pdb.mem));
if (err != 0) { if (err != 0) {
nvgpu_err(g, "fb.tlb_invalidate() failed err=%d", err); nvgpu_err(g, "fb.tlb_invalidate() failed err=%d", err);
} }
@@ -877,7 +879,8 @@ static int nvgpu_gmmu_cache_maint_unmap(struct gk20a *g, struct vm_gk20a *vm,
g->ops.mm.cache.l2_flush(g, true))) != 0) { g->ops.mm.cache.l2_flush(g, true))) != 0) {
nvgpu_err(g, "gk20a_mm_l2_flush[1] failed"); nvgpu_err(g, "gk20a_mm_l2_flush[1] failed");
} }
err = g->ops.fb.tlb_invalidate(g, vm->pdb.mem); err = nvgpu_pg_elpg_ms_protected_call(g,
g->ops.fb.tlb_invalidate(g, vm->pdb.mem));
if (err != 0) { if (err != 0) {
nvgpu_err(g, "fb.tlb_invalidate() failed err=%d", err); nvgpu_err(g, "fb.tlb_invalidate() failed err=%d", err);
} }

View File

@@ -39,6 +39,7 @@
#include <nvgpu/vgpu/vm_vgpu.h> #include <nvgpu/vgpu/vm_vgpu.h>
#include <nvgpu/cbc.h> #include <nvgpu/cbc.h>
#include <nvgpu/static_analysis.h> #include <nvgpu/static_analysis.h>
#include <nvgpu/power_features/pg.h>
#include <nvgpu/nvhost.h> #include <nvgpu/nvhost.h>
struct nvgpu_ctag_buffer_info { struct nvgpu_ctag_buffer_info {
@@ -297,7 +298,7 @@ NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_MISRA(Rule, 15_6))
if (mapping_batch->need_tlb_invalidate) { if (mapping_batch->need_tlb_invalidate) {
struct gk20a *g = gk20a_from_vm(vm); struct gk20a *g = gk20a_from_vm(vm);
err = g->ops.fb.tlb_invalidate(g, vm->pdb.mem); err = nvgpu_pg_elpg_ms_protected_call(g, g->ops.fb.tlb_invalidate(g, vm->pdb.mem));
if (err != 0) { if (err != 0) {
nvgpu_err(g, "fb.tlb_invalidate() failed err=%d", err); nvgpu_err(g, "fb.tlb_invalidate() failed err=%d", err);
} }

View File

@@ -41,6 +41,7 @@
#include <nvgpu/rc.h> #include <nvgpu/rc.h>
#include <nvgpu/mmu_fault.h> #include <nvgpu/mmu_fault.h>
#include <nvgpu/nvgpu_init.h> #include <nvgpu/nvgpu_init.h>
#include <nvgpu/power_features/pg.h>
#include <nvgpu/hw/gv11b/hw_gmmu_gv11b.h> #include <nvgpu/hw/gv11b/hw_gmmu_gv11b.h>
@@ -862,7 +863,12 @@ static int gv11b_fb_fix_page_fault(struct gk20a *g,
return err; return err;
} }
/* invalidate tlb so that GMMU does not use old cached translation */ /* invalidate tlb so that GMMU does not use old cached translation */
g->ops.fb.tlb_invalidate(g, mmufault->refch->vm->pdb.mem); err = nvgpu_pg_elpg_ms_protected_call(g,
g->ops.fb.tlb_invalidate(g, mmufault->refch->vm->pdb.mem));
if (err != 0) {
nvgpu_err(g, "tlb invalidate failed");
return err;
}
err = nvgpu_get_pte(g, err = nvgpu_get_pte(g,
mmufault->refch->vm, mmufault->fault_addr, &pte[0]); mmufault->refch->vm, mmufault->fault_addr, &pte[0]);