gpu: nvgpu: move mmu_fault_pending ops out from mm

Moved
-mmu_fault_pending mm ops to is_mmu_fault_pending mc ops
-mmu_fault_pending fb ops to is_mmu_fault_pending fb.intr ops. This
is needed to check if mmu fault intr is pending for volta onwards.

Added
is_mmu_fault_pending fifo ops. This is needed to check if mmu fault
interrupt is pending for chips prior to volta

JIRA NVGPU-1313

Change-Id: Ie8e778387cd486cb19b18c4aee734c581dcd9229
Signed-off-by: Seema Khowala <seemaj@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2094895
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Seema Khowala
2019-04-10 21:32:05 -07:00
committed by mobile promotions
parent 63fb543f63
commit 66cb9495a5
31 changed files with 100 additions and 83 deletions

View File

@@ -434,6 +434,7 @@ static const struct gpu_ops vgpu_gp10b_ops = {
.get_mmu_fault_client_desc =
gp10b_fifo_get_mmu_fault_client_desc,
.get_mmu_fault_gpc_desc = gm20b_fifo_get_mmu_fault_gpc_desc,
.is_mmu_fault_pending = NULL,
},
.engine = {
.is_fault_engine_subid_gpc = gm20b_is_fault_engine_subid_gpc,
@@ -571,7 +572,6 @@ static const struct gpu_ops vgpu_gp10b_ops = {
.init_mm_setup_hw = NULL,
.is_bar1_supported = gm20b_mm_is_bar1_supported,
.init_inst_block = gk20a_init_inst_block,
.mmu_fault_pending = NULL,
.init_bar2_vm = gp10b_init_bar2_vm,
.remove_bar2_vm = gp10b_remove_bar2_vm,
.get_kind_invalid = gm20b_get_kind_invalid,
@@ -672,6 +672,7 @@ static const struct gpu_ops vgpu_gp10b_ops = {
.reset_mask = NULL,
.is_enabled = NULL,
.fb_reset = NULL,
.is_mmu_fault_pending = NULL,
},
.debug = {
.show_dump = NULL,

View File

@@ -463,6 +463,7 @@ static const struct gpu_ops vgpu_gv11b_ops = {
.enable = gv11b_fb_intr_enable,
.disable = gv11b_fb_intr_disable,
.isr = gv11b_fb_intr_isr,
.is_mmu_fault_pending = NULL,
},
},
.cg = {
@@ -656,7 +657,6 @@ static const struct gpu_ops vgpu_gv11b_ops = {
.init_mm_setup_hw = NULL,
.is_bar1_supported = gv11b_mm_is_bar1_supported,
.init_inst_block = gv11b_init_inst_block,
.mmu_fault_pending = NULL,
.get_kind_invalid = gm20b_get_kind_invalid,
.get_kind_pitch = gm20b_get_kind_pitch,
.init_bar2_vm = gp10b_init_bar2_vm,
@@ -760,6 +760,7 @@ static const struct gpu_ops vgpu_gv11b_ops = {
.reset_mask = NULL,
.is_enabled = NULL,
.fb_reset = NULL,
.is_mmu_fault_pending = NULL,
},
.debug = {
.show_dump = NULL,

View File

@@ -929,16 +929,6 @@ int gk20a_fifo_suspend(struct gk20a *g)
return 0;
}
bool gk20a_fifo_mmu_fault_pending(struct gk20a *g)
{
if ((gk20a_readl(g, fifo_intr_0_r()) &
fifo_intr_0_mmu_fault_pending_f()) != 0U) {
return true;
} else {
return false;
}
}
static const char * const pbdma_chan_eng_ctx_status_str[] = {
"invalid",
"valid",

View File

@@ -226,8 +226,6 @@ u32 gk20a_fifo_engines_on_ch(struct gk20a *g, u32 chid);
int gk20a_fifo_suspend(struct gk20a *g);
bool gk20a_fifo_mmu_fault_pending(struct gk20a *g);
void gk20a_fifo_recover(struct gk20a *g,
u32 engine_ids, /* if zero, will be queried from HW */
u32 hw_id, /* if ~0, will be queried from HW */

View File

@@ -2523,7 +2523,7 @@ int gk20a_gr_wait_for_sm_lock_down(struct gk20a *g, u32 gpc, u32 tpc, u32 sm,
/* if an mmu fault is pending and mmu debug mode is not
* enabled, the sm will never lock down. */
if (!mmu_debug_mode_enabled &&
(g->ops.mm.mmu_fault_pending(g))) {
(g->ops.mc.is_mmu_fault_pending(g))) {
nvgpu_err(g,
"GPC%d TPC%d: mmu fault pending,"
" SM%d will never lock down!", gpc, tpc, sm);

View File

@@ -651,6 +651,7 @@ static const struct gpu_ops gm20b_ops = {
.get_mmu_fault_client_desc =
gk20a_fifo_get_mmu_fault_client_desc,
.get_mmu_fault_gpc_desc = gm20b_fifo_get_mmu_fault_gpc_desc,
.is_mmu_fault_pending = gk20a_fifo_is_mmu_fault_pending,
},
.engine = {
.is_fault_engine_subid_gpc = gm20b_is_fault_engine_subid_gpc,
@@ -794,7 +795,6 @@ static const struct gpu_ops gm20b_ops = {
.is_bar1_supported = gm20b_mm_is_bar1_supported,
.alloc_inst_block = gk20a_alloc_inst_block,
.init_inst_block = gk20a_init_inst_block,
.mmu_fault_pending = gk20a_fifo_mmu_fault_pending,
.get_kind_invalid = gm20b_get_kind_invalid,
.get_kind_pitch = gm20b_get_kind_pitch,
.bar1_map_userd = gk20a_mm_bar1_map_userd,
@@ -902,6 +902,7 @@ static const struct gpu_ops gm20b_ops = {
.is_enabled = gm20b_mc_is_enabled,
.fb_reset = gm20b_mc_fb_reset,
.ltc_isr = gm20b_mc_ltc_isr,
.is_mmu_fault_pending = gm20b_mc_is_mmu_fault_pending,
},
.debug = {
.show_dump = gk20a_debug_show_dump,

View File

@@ -739,6 +739,7 @@ static const struct gpu_ops gp10b_ops = {
.get_mmu_fault_client_desc =
gp10b_fifo_get_mmu_fault_client_desc,
.get_mmu_fault_gpc_desc = gm20b_fifo_get_mmu_fault_gpc_desc,
.is_mmu_fault_pending = gk20a_fifo_is_mmu_fault_pending,
},
.engine = {
.is_fault_engine_subid_gpc = gm20b_is_fault_engine_subid_gpc,
@@ -885,7 +886,6 @@ static const struct gpu_ops gp10b_ops = {
.is_bar1_supported = gm20b_mm_is_bar1_supported,
.alloc_inst_block = gk20a_alloc_inst_block,
.init_inst_block = gk20a_init_inst_block,
.mmu_fault_pending = gk20a_fifo_mmu_fault_pending,
.init_bar2_vm = gp10b_init_bar2_vm,
.remove_bar2_vm = gp10b_remove_bar2_vm,
.get_kind_invalid = gm20b_get_kind_invalid,
@@ -992,6 +992,7 @@ static const struct gpu_ops gp10b_ops = {
.is_enabled = gm20b_mc_is_enabled,
.fb_reset = gm20b_mc_fb_reset,
.ltc_isr = mc_gp10b_ltc_isr,
.is_mmu_fault_pending = gm20b_mc_is_mmu_fault_pending,
},
.debug = {
.show_dump = gk20a_debug_show_dump,

View File

@@ -826,7 +826,6 @@ static const struct gpu_ops gv100_ops = {
.read_mmu_fault_info = fb_gv11b_read_mmu_fault_info,
.read_mmu_fault_status = fb_gv11b_read_mmu_fault_status,
.mmu_invalidate_replay = gv11b_fb_mmu_invalidate_replay,
.mmu_fault_pending = gv11b_fb_mmu_fault_pending,
.is_fault_buf_enabled = gv11b_fb_is_fault_buf_enabled,
.fault_buf_set_state_hw = gv11b_fb_fault_buf_set_state_hw,
.fault_buf_configure_hw = gv11b_fb_fault_buf_configure_hw,
@@ -835,6 +834,8 @@ static const struct gpu_ops gv100_ops = {
.enable = gv100_fb_intr_enable,
.disable = gv100_fb_intr_disable,
.isr = gv11b_fb_intr_isr,
.is_mmu_fault_pending =
gv11b_fb_intr_is_mmu_fault_pending,
},
},
.nvdec = {
@@ -1073,7 +1074,6 @@ static const struct gpu_ops gv100_ops = {
.is_bar1_supported = gv11b_mm_is_bar1_supported,
.alloc_inst_block = gk20a_alloc_inst_block,
.init_inst_block = gv11b_init_inst_block,
.mmu_fault_pending = gv11b_mm_mmu_fault_pending,
.get_kind_invalid = gm20b_get_kind_invalid,
.get_kind_pitch = gm20b_get_kind_pitch,
.init_bar2_vm = gp10b_init_bar2_vm,
@@ -1198,6 +1198,7 @@ static const struct gpu_ops gv100_ops = {
.is_enabled = gm20b_mc_is_enabled,
.fb_reset = NULL,
.ltc_isr = mc_gp10b_ltc_isr,
.is_mmu_fault_pending = gv11b_mc_is_mmu_fault_pending,
},
.debug = {
.show_dump = gk20a_debug_show_dump,

View File

@@ -2428,7 +2428,7 @@ int gv11b_gr_wait_for_sm_lock_down(struct gk20a *g,
/* if an mmu fault is pending and mmu debug mode is not
* enabled, the sm will never lock down.
*/
if (g->ops.mm.mmu_fault_pending(g)) {
if (g->ops.mc.is_mmu_fault_pending(g)) {
nvgpu_err(g,
"GPC%d TPC%d: mmu fault pending,"
" SM%d will never lock down!",

View File

@@ -785,7 +785,6 @@ static const struct gpu_ops gv11b_ops = {
.read_mmu_fault_info = fb_gv11b_read_mmu_fault_info,
.read_mmu_fault_status = fb_gv11b_read_mmu_fault_status,
.mmu_invalidate_replay = gv11b_fb_mmu_invalidate_replay,
.mmu_fault_pending = gv11b_fb_mmu_fault_pending,
.is_fault_buf_enabled = gv11b_fb_is_fault_buf_enabled,
.fault_buf_set_state_hw = gv11b_fb_fault_buf_set_state_hw,
.fault_buf_configure_hw = gv11b_fb_fault_buf_configure_hw,
@@ -793,6 +792,8 @@ static const struct gpu_ops gv11b_ops = {
.enable = gv11b_fb_intr_enable,
.disable = gv11b_fb_intr_disable,
.isr = gv11b_fb_intr_isr,
.is_mmu_fault_pending =
gv11b_fb_intr_is_mmu_fault_pending,
},
},
.cg = {
@@ -1031,7 +1032,6 @@ static const struct gpu_ops gv11b_ops = {
.is_bar1_supported = gv11b_mm_is_bar1_supported,
.alloc_inst_block = gk20a_alloc_inst_block,
.init_inst_block = gv11b_init_inst_block,
.mmu_fault_pending = gv11b_mm_mmu_fault_pending,
.get_kind_invalid = gm20b_get_kind_invalid,
.get_kind_pitch = gm20b_get_kind_pitch,
.init_bar2_vm = gp10b_init_bar2_vm,
@@ -1160,6 +1160,7 @@ static const struct gpu_ops gv11b_ops = {
.is_enabled = gm20b_mc_is_enabled,
.fb_reset = NULL,
.ltc_isr = mc_gp10b_ltc_isr,
.is_mmu_fault_pending = gv11b_mc_is_mmu_fault_pending,
},
.debug = {
.show_dump = gk20a_debug_show_dump,

View File

@@ -65,11 +65,6 @@ void gv11b_init_inst_block(struct nvgpu_mem *inst_block,
}
}
bool gv11b_mm_mmu_fault_pending(struct gk20a *g)
{
return g->ops.fb.mmu_fault_pending(g);
}
void gv11b_mm_mmu_fault_disable_hw(struct gk20a *g)
{
nvgpu_mutex_acquire(&g->mm.hub_isr_mutex);

View File

@@ -31,7 +31,6 @@ struct vm_gk20a;
bool gv11b_mm_is_bar1_supported(struct gk20a *g);
void gv11b_init_inst_block(struct nvgpu_mem *inst_block,
struct vm_gk20a *vm, u32 big_page_size);
bool gv11b_mm_mmu_fault_pending(struct gk20a *g);
int gv11b_init_mm_setup_hw(struct gk20a *g);
int gv11b_mm_l2_flush(struct gk20a *g, bool invalidate);
u64 gv11b_gpu_phys_addr(struct gk20a *g,

View File

@@ -1173,21 +1173,6 @@ void gv11b_fb_handle_mmu_fault(struct gk20a *g, u32 niso_intr)
fb_mmu_fault_status_valid_clear_f());
}
bool gv11b_fb_mmu_fault_pending(struct gk20a *g)
{
if ((gk20a_readl(g, fb_niso_intr_r()) &
(fb_niso_intr_mmu_other_fault_notify_m() |
fb_niso_intr_mmu_ecc_uncorrected_error_notify_m() |
fb_niso_intr_mmu_replayable_fault_notify_m() |
fb_niso_intr_mmu_replayable_fault_overflow_m() |
fb_niso_intr_mmu_nonreplayable_fault_notify_m() |
fb_niso_intr_mmu_nonreplayable_fault_overflow_m())) != 0U) {
return true;
}
return false;
}
int gv11b_fb_mmu_invalidate_replay(struct gk20a *g,
u32 invalidate_replay_val)
{

View File

@@ -42,7 +42,6 @@ bool gv11b_fb_is_fault_buf_enabled(struct gk20a *g, u32 index );
void gv11b_fb_fault_buf_set_state_hw(struct gk20a *g,
u32 index, u32 state);
void gv11b_fb_fault_buf_configure_hw(struct gk20a *g, u32 index);
bool gv11b_fb_mmu_fault_pending(struct gk20a *g);
void gv11b_fb_handle_dropped_mmu_fault(struct gk20a *g, u32 fault_status);
void gv11b_fb_handle_other_fault_notify(struct gk20a *g,
u32 fault_status);

View File

@@ -42,33 +42,6 @@
#include "nvgpu/hw/tu104/hw_fb_tu104.h"
#include "nvgpu/hw/tu104/hw_func_tu104.h"
bool tu104_fb_mmu_fault_pending(struct gk20a *g)
{
u32 info_fault = nvgpu_readl(g, fb_mmu_int_vector_info_fault_r());
u32 nonreplay_fault = nvgpu_readl(g,
fb_mmu_int_vector_fault_r(NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX));
u32 replay_fault = nvgpu_readl(g,
fb_mmu_int_vector_fault_r(NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX));
u32 ecc_error = nvgpu_readl(g, fb_mmu_int_vector_ecc_error_r());
if (intr_tu104_vector_intr_pending(g,
fb_mmu_int_vector_fault_notify_v(replay_fault)) ||
intr_tu104_vector_intr_pending(g,
fb_mmu_int_vector_fault_error_v(replay_fault)) ||
intr_tu104_vector_intr_pending(g,
fb_mmu_int_vector_fault_notify_v(nonreplay_fault)) ||
intr_tu104_vector_intr_pending(g,
fb_mmu_int_vector_fault_error_v(nonreplay_fault)) ||
intr_tu104_vector_intr_pending(g,
fb_mmu_int_vector_info_fault_vector_v(info_fault)) ||
intr_tu104_vector_intr_pending(g,
fb_mmu_int_vector_ecc_error_vector_v(ecc_error))) {
return true;
}
return false;
}
void tu104_fb_handle_mmu_fault(struct gk20a *g)
{
u32 info_fault = nvgpu_readl(g, fb_mmu_int_vector_info_fault_r());

View File

@@ -31,7 +31,6 @@ struct nvgpu_mem;
struct nvgpu_cbc;
void tu104_fb_handle_mmu_fault(struct gk20a *g);
bool tu104_fb_mmu_fault_pending(struct gk20a *g);
void fb_tu104_write_mmu_fault_buffer_lo_hi(struct gk20a *g, u32 index,
u32 addr_lo, u32 addr_hi);

View File

@@ -96,3 +96,18 @@ void gv11b_fb_intr_isr(struct gk20a *g)
nvgpu_mutex_release(&g->mm.hub_isr_mutex);
}
bool gv11b_fb_intr_is_mmu_fault_pending(struct gk20a *g)
{
if ((gk20a_readl(g, fb_niso_intr_r()) &
(fb_niso_intr_mmu_other_fault_notify_m() |
fb_niso_intr_mmu_ecc_uncorrected_error_notify_m() |
fb_niso_intr_mmu_replayable_fault_notify_m() |
fb_niso_intr_mmu_replayable_fault_overflow_m() |
fb_niso_intr_mmu_nonreplayable_fault_notify_m() |
fb_niso_intr_mmu_nonreplayable_fault_overflow_m())) != 0U) {
return true;
}
return false;
}

View File

@@ -30,5 +30,6 @@ struct gk20a;
void gv11b_fb_intr_enable(struct gk20a *g);
void gv11b_fb_intr_disable(struct gk20a *g);
void gv11b_fb_intr_isr(struct gk20a *g);
bool gv11b_fb_intr_is_mmu_fault_pending(struct gk20a *g);
#endif /* NVGPU_FB_INTR_GV11B_H */

View File

@@ -113,3 +113,30 @@ void tu104_fb_intr_isr(struct gk20a *g)
nvgpu_mutex_release(&g->mm.hub_isr_mutex);
}
bool tu104_fb_intr_is_mmu_fault_pending(struct gk20a *g)
{
u32 info_fault = nvgpu_readl(g, fb_mmu_int_vector_info_fault_r());
u32 nonreplay_fault = nvgpu_readl(g,
fb_mmu_int_vector_fault_r(NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX));
u32 replay_fault = nvgpu_readl(g,
fb_mmu_int_vector_fault_r(NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX));
u32 ecc_error = nvgpu_readl(g, fb_mmu_int_vector_ecc_error_r());
if (intr_tu104_vector_intr_pending(g,
fb_mmu_int_vector_fault_notify_v(replay_fault)) ||
intr_tu104_vector_intr_pending(g,
fb_mmu_int_vector_fault_error_v(replay_fault)) ||
intr_tu104_vector_intr_pending(g,
fb_mmu_int_vector_fault_notify_v(nonreplay_fault)) ||
intr_tu104_vector_intr_pending(g,
fb_mmu_int_vector_fault_error_v(nonreplay_fault)) ||
intr_tu104_vector_intr_pending(g,
fb_mmu_int_vector_info_fault_vector_v(info_fault)) ||
intr_tu104_vector_intr_pending(g,
fb_mmu_int_vector_ecc_error_vector_v(ecc_error))) {
return true;
}
return false;
}

View File

@@ -30,5 +30,6 @@ struct gk20a;
void tu104_fb_intr_enable(struct gk20a *g);
void tu104_fb_intr_disable(struct gk20a *g);
void tu104_fb_intr_isr(struct gk20a *g);
bool tu104_fb_intr_is_mmu_fault_pending(struct gk20a *g);
#endif /* NVGPU_FB_INTR_TU104_H */

View File

@@ -279,3 +279,13 @@ void gk20a_fifo_intr_0_isr(struct gk20a *g)
nvgpu_writel(g, fifo_intr_0_r(), clear_intr);
}
bool gk20a_fifo_is_mmu_fault_pending(struct gk20a *g)
{
if ((nvgpu_readl(g, fifo_intr_0_r()) &
fifo_intr_0_mmu_fault_pending_f()) != 0U) {
return true;
} else {
return false;
}
}

View File

@@ -37,4 +37,6 @@ void gk20a_fifo_intr_handle_runlist_event(struct gk20a *g);
u32 gk20a_fifo_pbdma_isr(struct gk20a *g);
bool gk20a_fifo_handle_sched_error(struct gk20a *g);
bool gk20a_fifo_is_mmu_fault_pending(struct gk20a *g);
#endif /* NVGPU_FIFO_INTR_GK20A_H */

View File

@@ -367,3 +367,8 @@ void gm20b_mc_ltc_isr(struct gk20a *g)
g->ops.ltc.intr.isr(g, ltc);
}
}
bool gm20b_mc_is_mmu_fault_pending(struct gk20a *g)
{
return g->ops.fifo.is_mmu_fault_pending(g);
}

View File

@@ -56,4 +56,6 @@ bool gm20b_mc_is_enabled(struct gk20a *g, enum nvgpu_unit unit);
void gm20b_mc_fb_reset(struct gk20a *g);
void gm20b_mc_ltc_isr(struct gk20a *g);
bool gm20b_mc_is_mmu_fault_pending(struct gk20a *g);
#endif /* NVGPU_MC_GM20B_H */

View File

@@ -88,3 +88,8 @@ bool gv11b_mc_is_stall_and_eng_intr_pending(struct gk20a *g, u32 act_eng_id,
return (mc_intr_0 & (eng_intr_mask | stall_intr)) != 0U;
}
bool gv11b_mc_is_mmu_fault_pending(struct gk20a *g)
{
return g->ops.fb.intr.is_mmu_fault_pending(g);
}

View File

@@ -31,4 +31,6 @@ void mc_gv11b_intr_enable(struct gk20a *g);
bool gv11b_mc_is_intr_hub_pending(struct gk20a *g, u32 mc_intr_0);
bool gv11b_mc_is_stall_and_eng_intr_pending(struct gk20a *g, u32 act_eng_id,
u32 *eng_intr_pending);
bool gv11b_mc_is_mmu_fault_pending(struct gk20a *g);
#endif

View File

@@ -341,7 +341,7 @@ u32 intr_tu104_stall(struct gk20a *g)
/* Return true if HUB interrupt is pending */
bool intr_tu104_is_intr_hub_pending(struct gk20a *g, u32 mc_intr_0)
{
return g->ops.mm.mmu_fault_pending(g);
return g->ops.mc.is_mmu_fault_pending(g);
}
/* pause all stall interrupts */

View File

@@ -898,7 +898,6 @@ struct gpu_ops {
u32 (*read_mmu_fault_status)(struct gk20a *g);
int (*mmu_invalidate_replay)(struct gk20a *g,
u32 invalidate_replay_val);
bool (*mmu_fault_pending)(struct gk20a *g);
bool (*is_fault_buf_enabled)(struct gk20a *g, u32 index);
void (*fault_buf_set_state_hw)(struct gk20a *g,
u32 index, u32 state);
@@ -915,6 +914,7 @@ struct gpu_ops {
void (*enable)(struct gk20a *g);
void (*disable)(struct gk20a *g);
void (*isr)(struct gk20a *g);
bool (*is_mmu_fault_pending)(struct gk20a *g);
} intr;
} fb;
struct {
@@ -1000,6 +1000,7 @@ struct gpu_ops {
void (*get_mmu_fault_client_desc)(
struct mmu_fault_info *mmfault);
void (*get_mmu_fault_gpc_desc)(struct mmu_fault_info *mmfault);
bool (*is_mmu_fault_pending)(struct gk20a *g);
} fifo;
struct {
@@ -1351,7 +1352,6 @@ struct gpu_ops {
struct nvgpu_mem *inst_block);
void (*init_inst_block)(struct nvgpu_mem *inst_block,
struct vm_gk20a *vm, u32 big_page_size);
bool (*mmu_fault_pending)(struct gk20a *g);
void (*fault_info_mem_destroy)(struct gk20a *g);
void (*mmu_fault_disable_hw)(struct gk20a *g);
u32 (*get_kind_invalid)(void);
@@ -1578,6 +1578,7 @@ struct gpu_ops {
u32 (*reset_mask)(struct gk20a *g, enum nvgpu_unit unit);
void (*fb_reset)(struct gk20a *g);
void (*ltc_isr)(struct gk20a *g);
bool (*is_mmu_fault_pending)(struct gk20a *g);
} mc;
struct {
void (*show_dump)(struct gk20a *g,

View File

@@ -47,7 +47,7 @@ gv11b_fb_fault_buf_configure_hw
gv11b_fb_fault_buf_set_state_hw
gv11b_fb_init_hw
gv11b_fb_is_fault_buf_enabled
gv11b_fb_mmu_fault_pending
gv11b_fb_intr_is_mmu_fault_pending
gv11b_gpu_phys_addr
gv11b_init_inst_block
gv11b_init_mm_setup_hw
@@ -55,7 +55,7 @@ gv11b_mm_fault_info_mem_destroy
gv11b_mm_is_bar1_supported
gv11b_mm_l2_flush
gv11b_mm_mmu_fault_disable_hw
gv11b_mm_mmu_fault_pending
gv11b_mc_is_mmu_fault_pending
nvgpu_addr_is_vidmem_page_alloc
nvgpu_alloc
nvgpu_alloc_base

View File

@@ -859,7 +859,6 @@ static const struct gpu_ops tu104_ops = {
.read_mmu_fault_info = fb_tu104_read_mmu_fault_info,
.read_mmu_fault_status = fb_tu104_read_mmu_fault_status,
.mmu_invalidate_replay = fb_tu104_mmu_invalidate_replay,
.mmu_fault_pending = tu104_fb_mmu_fault_pending,
.is_fault_buf_enabled = gv11b_fb_is_fault_buf_enabled,
.fault_buf_set_state_hw = gv11b_fb_fault_buf_set_state_hw,
.fault_buf_configure_hw = gv11b_fb_fault_buf_configure_hw,
@@ -869,6 +868,8 @@ static const struct gpu_ops tu104_ops = {
.enable = tu104_fb_intr_enable,
.disable = tu104_fb_intr_disable,
.isr = tu104_fb_intr_isr,
.is_mmu_fault_pending =
tu104_fb_intr_is_mmu_fault_pending,
}
},
.nvdec = {
@@ -1107,7 +1108,6 @@ static const struct gpu_ops tu104_ops = {
.is_bar1_supported = gv11b_mm_is_bar1_supported,
.alloc_inst_block = gk20a_alloc_inst_block,
.init_inst_block = gv11b_init_inst_block,
.mmu_fault_pending = gv11b_mm_mmu_fault_pending,
.get_kind_invalid = gm20b_get_kind_invalid,
.get_kind_pitch = gm20b_get_kind_pitch,
.init_bar2_vm = gp10b_init_bar2_vm,
@@ -1242,6 +1242,7 @@ static const struct gpu_ops tu104_ops = {
.is_enabled = gm20b_mc_is_enabled,
.fb_reset = NULL,
.ltc_isr = mc_tu104_ltc_isr,
.is_mmu_fault_pending = gv11b_mc_is_mmu_fault_pending,
},
.debug = {
.show_dump = gk20a_debug_show_dump,

View File

@@ -43,6 +43,7 @@
#include "nvgpu/hw/gv11b/hw_gmmu_gv11b.h"
#include "nvgpu/hw/gv11b/hw_fb_gv11b.h"
#include "hal/mc/mc_gv11b.h"
#include "hal/fb/fb_gp10b.h"
#include "hal/fb/fb_gm20b.h"
#include "hal/fb/fb_gv11b.h"
@@ -128,7 +129,7 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
g->ops.ramin.alloc_size = gk20a_ramin_alloc_size;
/* New HALs for fault testing */
g->ops.mm.mmu_fault_pending = gv11b_mm_mmu_fault_pending;
g->ops.mc.is_mmu_fault_pending = gv11b_mc_is_mmu_fault_pending;
g->ops.mm.fault_info_mem_destroy = gv11b_mm_fault_info_mem_destroy;
g->ops.mm.mmu_fault_disable_hw = gv11b_mm_mmu_fault_disable_hw;
g->ops.mm.init_mm_setup_hw = gv11b_init_mm_setup_hw;
@@ -143,7 +144,7 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
g->ops.fb.read_mmu_fault_status = fb_gv11b_read_mmu_fault_status;
g->ops.fb.write_mmu_fault_buffer_lo_hi =
fb_gv11b_write_mmu_fault_buffer_lo_hi;
g->ops.fb.mmu_fault_pending = gv11b_fb_mmu_fault_pending;
g->ops.fb.intr.is_mmu_fault_pending = gv11b_fb_intr_is_mmu_fault_pending;
g->ops.fb.is_fault_buf_enabled = gv11b_fb_is_fault_buf_enabled;
g->ops.fb.fault_buf_set_state_hw = gv11b_fb_fault_buf_set_state_hw;
g->ops.ramin.set_big_page_size = gm20b_ramin_set_big_page_size;
@@ -264,14 +265,14 @@ static void write_error(struct unit_module *m, struct gk20a *g, u32 error)
static int test_page_faults_pending(struct unit_module *m, struct gk20a *g,
void *args)
{
if (g->ops.mm.mmu_fault_pending(g)) {
if (g->ops.mc.is_mmu_fault_pending(g)) {
unit_return_fail(m, "MMU fault already pending at init.\n");
}
/* Write a fault in the pending register */
write_error(m, g, fb_niso_intr_mmu_other_fault_notify_m());
if (!g->ops.mm.mmu_fault_pending(g)) {
if (!g->ops.mc.is_mmu_fault_pending(g)) {
unit_return_fail(m, "MMU fault not pending as expected.\n");
}