gpu: nvgpu: Combine the fault buffer deinit seqs

gv11b_mm_fault_info_mem_destroy() and gv11b_mm_mmu_hw_fault_buf_deinit()
serve a similar purpose of disabling hub interrupts and deinitializing
memory related to MMU fault handling.

Out of the two the latter was called from BAR2 deinitialization, and the
former from nvgpu_remove_mm_support().

Combine the functions and leave the call from nvgpu_remove_mm_support().
This way BAR2 deinitialization can be combined with gp10b version.

JIRA NVGPU-714

Change-Id: I4050865eaba404b049c621ac2ce54c963e1aea44
Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1769627
Reviewed-by: Alex Waterman <alexw@nvidia.com>
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Terje Bergstrom
2018-07-03 11:20:52 -07:00
committed by mobile promotions
parent 26783b85bf
commit d7c78df466
3 changed files with 24 additions and 60 deletions

View File

@@ -1019,7 +1019,7 @@ void gv11b_fb_handle_mmu_nonreplay_replay_fault(struct gk20a *g,
nvgpu_log(g, gpu_dbg_intr, "get ptr = %d", get_indx); nvgpu_log(g, gpu_dbg_intr, "get ptr = %d", get_indx);
mem = &g->mm.hw_fault_buf[index]; mem = &g->mm.hw_fault_buf[index];
mmfault = g->mm.fault_info[index]; mmfault = &g->mm.fault_info[index];
entries = gv11b_fb_fault_buffer_size_val(g, index); entries = gv11b_fb_fault_buffer_size_val(g, index);
nvgpu_log(g, gpu_dbg_intr, "buffer num entries = %d", entries); nvgpu_log(g, gpu_dbg_intr, "buffer num entries = %d", entries);
@@ -1251,7 +1251,7 @@ void gv11b_fb_handle_other_fault_notify(struct gk20a *g,
struct mmu_fault_info *mmfault; struct mmu_fault_info *mmfault;
u32 invalidate_replay_val = 0; u32 invalidate_replay_val = 0;
mmfault = g->mm.fault_info[FAULT_TYPE_OTHER_AND_NONREPLAY]; mmfault = &g->mm.fault_info[FAULT_TYPE_OTHER_AND_NONREPLAY];
gv11b_mm_copy_from_fault_snap_reg(g, fault_status, mmfault); gv11b_mm_copy_from_fault_snap_reg(g, fault_status, mmfault);

View File

@@ -71,6 +71,8 @@ bool gv11b_mm_mmu_fault_pending(struct gk20a *g)
void gv11b_mm_fault_info_mem_destroy(struct gk20a *g) void gv11b_mm_fault_info_mem_destroy(struct gk20a *g)
{ {
struct vm_gk20a *vm = g->mm.bar2.vm;
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
nvgpu_mutex_acquire(&g->mm.hub_isr_mutex); nvgpu_mutex_acquire(&g->mm.hub_isr_mutex);
@@ -78,10 +80,26 @@ void gv11b_mm_fault_info_mem_destroy(struct gk20a *g)
g->ops.fb.disable_hub_intr(g, STALL_REG_INDEX, HUB_INTR_TYPE_OTHER | g->ops.fb.disable_hub_intr(g, STALL_REG_INDEX, HUB_INTR_TYPE_OTHER |
HUB_INTR_TYPE_NONREPLAY | HUB_INTR_TYPE_REPLAY); HUB_INTR_TYPE_NONREPLAY | HUB_INTR_TYPE_REPLAY);
nvgpu_kfree(g, g->mm.fault_info[FAULT_TYPE_OTHER_AND_NONREPLAY]); g->mm.hub_intr_types &= (~(HUB_INTR_TYPE_NONREPLAY |
HUB_INTR_TYPE_REPLAY));
g->mm.fault_info[FAULT_TYPE_OTHER_AND_NONREPLAY] = NULL; if ((gv11b_fb_is_fault_buf_enabled(g, NONREPLAY_REG_INDEX))) {
g->mm.fault_info[FAULT_TYPE_REPLAY] = NULL; gv11b_fb_fault_buf_set_state_hw(g, NONREPLAY_REG_INDEX,
FAULT_BUF_DISABLED);
}
if ((gv11b_fb_is_fault_buf_enabled(g, REPLAY_REG_INDEX))) {
gv11b_fb_fault_buf_set_state_hw(g, REPLAY_REG_INDEX,
FAULT_BUF_DISABLED);
}
if (nvgpu_mem_is_valid(
&g->mm.hw_fault_buf[FAULT_TYPE_OTHER_AND_NONREPLAY]))
nvgpu_dma_unmap_free(vm,
&g->mm.hw_fault_buf[FAULT_TYPE_OTHER_AND_NONREPLAY]);
if (nvgpu_mem_is_valid(&g->mm.hw_fault_buf[FAULT_TYPE_REPLAY]))
nvgpu_dma_unmap_free(vm,
&g->mm.hw_fault_buf[FAULT_TYPE_REPLAY]);
nvgpu_mutex_release(&g->mm.hub_isr_mutex); nvgpu_mutex_release(&g->mm.hub_isr_mutex);
nvgpu_mutex_destroy(&g->mm.hub_isr_mutex); nvgpu_mutex_destroy(&g->mm.hub_isr_mutex);
@@ -90,27 +108,6 @@ void gv11b_mm_fault_info_mem_destroy(struct gk20a *g)
static int gv11b_mm_mmu_fault_info_buf_init(struct gk20a *g, static int gv11b_mm_mmu_fault_info_buf_init(struct gk20a *g,
u32 *hub_intr_types) u32 *hub_intr_types)
{ {
struct mmu_fault_info *fault_info_mem;
if (g->mm.fault_info[FAULT_TYPE_OTHER_AND_NONREPLAY] != NULL &&
g->mm.fault_info[FAULT_TYPE_REPLAY] != NULL) {
*hub_intr_types |= HUB_INTR_TYPE_OTHER;
return 0;
}
fault_info_mem = nvgpu_kzalloc(g, sizeof(struct mmu_fault_info) *
FAULT_TYPE_NUM);
if (!fault_info_mem) {
nvgpu_log_info(g, "failed to alloc shadow fault info");
return -ENOMEM;
}
/* shadow buffer for copying mmu fault info */
g->mm.fault_info[FAULT_TYPE_OTHER_AND_NONREPLAY] =
&fault_info_mem[FAULT_TYPE_OTHER_AND_NONREPLAY];
g->mm.fault_info[FAULT_TYPE_REPLAY] =
&fault_info_mem[FAULT_TYPE_REPLAY];
*hub_intr_types |= HUB_INTR_TYPE_OTHER; *hub_intr_types |= HUB_INTR_TYPE_OTHER;
return 0; return 0;
} }
@@ -156,45 +153,12 @@ static void gv11b_mm_mmu_hw_fault_buf_init(struct gk20a *g,
*hub_intr_types |= HUB_INTR_TYPE_REPLAY; *hub_intr_types |= HUB_INTR_TYPE_REPLAY;
} }
static void gv11b_mm_mmu_hw_fault_buf_deinit(struct gk20a *g)
{
struct vm_gk20a *vm = g->mm.bar2.vm;
nvgpu_log_fn(g, " ");
g->ops.fb.disable_hub_intr(g, STALL_REG_INDEX, HUB_INTR_TYPE_NONREPLAY |
HUB_INTR_TYPE_REPLAY);
g->mm.hub_intr_types &= (~(HUB_INTR_TYPE_NONREPLAY |
HUB_INTR_TYPE_REPLAY));
if ((gv11b_fb_is_fault_buf_enabled(g, NONREPLAY_REG_INDEX))) {
gv11b_fb_fault_buf_set_state_hw(g, NONREPLAY_REG_INDEX,
FAULT_BUF_DISABLED);
}
if ((gv11b_fb_is_fault_buf_enabled(g, REPLAY_REG_INDEX))) {
gv11b_fb_fault_buf_set_state_hw(g, REPLAY_REG_INDEX,
FAULT_BUF_DISABLED);
}
if (nvgpu_mem_is_valid(
&g->mm.hw_fault_buf[FAULT_TYPE_OTHER_AND_NONREPLAY]))
nvgpu_dma_unmap_free(vm,
&g->mm.hw_fault_buf[FAULT_TYPE_OTHER_AND_NONREPLAY]);
if (nvgpu_mem_is_valid(&g->mm.hw_fault_buf[FAULT_TYPE_REPLAY]))
nvgpu_dma_unmap_free(vm,
&g->mm.hw_fault_buf[FAULT_TYPE_REPLAY]);
}
void gv11b_mm_remove_bar2_vm(struct gk20a *g) void gv11b_mm_remove_bar2_vm(struct gk20a *g)
{ {
struct mm_gk20a *mm = &g->mm; struct mm_gk20a *mm = &g->mm;
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
gv11b_mm_mmu_hw_fault_buf_deinit(g);
nvgpu_free_inst_block(g, &mm->bar2.inst_block); nvgpu_free_inst_block(g, &mm->bar2.inst_block);
nvgpu_vm_put(mm->bar2.vm); nvgpu_vm_put(mm->bar2.vm);
} }

View File

@@ -129,7 +129,7 @@ struct mm_gk20a {
struct nvgpu_mem bar2_desc; struct nvgpu_mem bar2_desc;
struct nvgpu_mem hw_fault_buf[FAULT_TYPE_NUM]; struct nvgpu_mem hw_fault_buf[FAULT_TYPE_NUM];
struct mmu_fault_info *fault_info[FAULT_TYPE_NUM]; struct mmu_fault_info fault_info[FAULT_TYPE_NUM];
struct nvgpu_mutex hub_isr_mutex; struct nvgpu_mutex hub_isr_mutex;
u32 hub_intr_types; u32 hub_intr_types;