gpu: nvgpu: disable fb fault buffer in prepare poweroff

FB fault buffer is enabled on finalize poweron. Disable the buffer
in prepare poweroff. This also eliminates the need to disable
the buffer in fault info mem destroy which otherwise accesses
GPU registers after these are locked in prepare poweroff.

Bug 200427479

Change-Id: I1ca3e6ed4417847731c09b887134f215a2ba331c
Signed-off-by: Aparna Das <aparnad@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1776387
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Aparna Das
2018-07-10 14:48:27 -07:00
committed by mobile promotions
parent f39ec4f9a0
commit 3a5fd2399c
6 changed files with 20 additions and 6 deletions

View File

@@ -117,6 +117,10 @@ int nvgpu_mm_suspend(struct gk20a *g)
g->ops.fb.disable_hub_intr(g);
}
if (g->ops.mm.mmu_fault_disable_hw != NULL) {
g->ops.mm.mmu_fault_disable_hw(g);
}
nvgpu_log_info(g, "MM suspend done!");
return 0;

View File

@@ -961,6 +961,7 @@ struct gpu_ops {
struct vm_gk20a *vm, u32 big_page_size);
bool (*mmu_fault_pending)(struct gk20a *g);
void (*fault_info_mem_destroy)(struct gk20a *g);
void (*mmu_fault_disable_hw)(struct gk20a *g);
u32 (*get_kind_invalid)(void);
u32 (*get_kind_pitch)(void);
u32 (*get_flush_retries)(struct gk20a *g,

View File

@@ -689,6 +689,7 @@ static const struct gpu_ops gv100_ops = {
.init_bar2_vm = gp10b_init_bar2_vm,
.remove_bar2_vm = gp10b_remove_bar2_vm,
.fault_info_mem_destroy = gv11b_mm_fault_info_mem_destroy,
.mmu_fault_disable_hw = gv11b_mm_mmu_fault_disable_hw,
.get_flush_retries = gv100_mm_get_flush_retries,
},
.pramin = {

View File

@@ -656,6 +656,7 @@ static const struct gpu_ops gv11b_ops = {
.init_bar2_vm = gp10b_init_bar2_vm,
.remove_bar2_vm = gp10b_remove_bar2_vm,
.fault_info_mem_destroy = gv11b_mm_fault_info_mem_destroy,
.mmu_fault_disable_hw = gv11b_mm_mmu_fault_disable_hw,
},
.therm = {
.init_therm_setup_hw = gv11b_init_therm_setup_hw,

View File

@@ -67,15 +67,10 @@ bool gv11b_mm_mmu_fault_pending(struct gk20a *g)
return g->ops.fb.mmu_fault_pending(g);
}
void gv11b_mm_fault_info_mem_destroy(struct gk20a *g)
void gv11b_mm_mmu_fault_disable_hw(struct gk20a *g)
{
struct vm_gk20a *vm = g->mm.bar2.vm;
nvgpu_log_fn(g, " ");
nvgpu_mutex_acquire(&g->mm.hub_isr_mutex);
if ((g->ops.fb.is_fault_buf_enabled(g,
NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX))) {
g->ops.fb.fault_buf_set_state_hw(g,
@@ -90,6 +85,17 @@ void gv11b_mm_fault_info_mem_destroy(struct gk20a *g)
NVGPU_FB_MMU_FAULT_BUF_DISABLED);
}
nvgpu_mutex_release(&g->mm.hub_isr_mutex);
}
void gv11b_mm_fault_info_mem_destroy(struct gk20a *g)
{
struct vm_gk20a *vm = g->mm.bar2.vm;
nvgpu_log_fn(g, " ");
nvgpu_mutex_acquire(&g->mm.hub_isr_mutex);
if (nvgpu_mem_is_valid(
&g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_OTHER_AND_NONREPLAY]))
nvgpu_dma_unmap_free(vm,

View File

@@ -37,5 +37,6 @@ void gv11b_mm_l2_flush(struct gk20a *g, bool invalidate);
u64 gv11b_gpu_phys_addr(struct gk20a *g,
struct nvgpu_gmmu_attrs *attrs, u64 phys);
void gv11b_mm_fault_info_mem_destroy(struct gk20a *g);
void gv11b_mm_mmu_fault_disable_hw(struct gk20a *g);
#endif