gpu: nvgpu: gv11b: enable and handle mpc exception

Implement gr ops to handle MPC exception triggered per TPC

JIRA GPUT19X-69

Change-Id: Ia92b1d51ad896116b25d71e07ed26f1539475be8
Signed-off-by: Seema Khowala <seemaj@nvidia.com>
Reviewed-on: https://git-master/r/1515915
Reviewed-by: Automatic_Commit_Validation_User
GVS: Gerrit_Virtual_Submit
Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
This commit is contained in:
Seema Khowala
2017-07-09 14:00:24 -07:00
committed by mobile promotions
parent d9ee7aff04
commit cc940da42f
2 changed files with 69 additions and 3 deletions

View File

@@ -885,7 +885,8 @@ static void gr_gv11b_enable_gpc_exceptions(struct gk20a *g)
u32 tpc_mask;
gk20a_writel(g, gr_gpcs_tpcs_tpccs_tpc_exception_en_r(),
gr_gpcs_tpcs_tpccs_tpc_exception_en_sm_enabled_f());
gr_gpcs_tpcs_tpccs_tpc_exception_en_sm_enabled_f() |
gr_gpcs_tpcs_tpccs_tpc_exception_en_mpc_enabled_f());
tpc_mask =
gr_gpcs_gpccs_gpc_exception_en_tpc_f((1 << gr->tpc_count) - 1);
@@ -2973,13 +2974,16 @@ static void gv11b_gr_resume_all_sms(struct gk20a *g)
static int gv11b_gr_resume_from_pause(struct gk20a *g)
{
int err = 0;
u32 reg_val;
/* Clear the pause mask to tell the GPU we want to resume everyone */
gk20a_writel(g, gr_gpcs_tpcs_sms_dbgr_bpt_pause_mask_0_r(), 0);
/* explicitly re-enable forwarding of SM interrupts upon any resume */
gk20a_writel(g, gr_gpcs_tpcs_tpccs_tpc_exception_en_r(),
gr_gpcs_tpcs_tpccs_tpc_exception_en_sm_enabled_f());
reg_val = gk20a_readl(g, gr_gpc0_tpc0_tpccs_tpc_exception_en_r());
reg_val |= gr_gpc0_tpc0_tpccs_tpc_exception_en_sm_enabled_f();
gk20a_writel(g, gr_gpcs_tpcs_tpccs_tpc_exception_en_r(), reg_val);
g->ops.gr.resume_all_sms(g);
@@ -3198,6 +3202,34 @@ static void gv11b_gr_clear_sm_hww(struct gk20a *g, u32 gpc, u32 tpc, u32 sm,
offset));
}
static int gr_gv11b_handle_tpc_mpc_exception(struct gk20a *g,
u32 gpc, u32 tpc, bool *post_event)
{
u32 esr;
u32 offset = gk20a_gr_gpc_offset(g, gpc) + gk20a_gr_tpc_offset(g, tpc);
u32 tpc_exception = gk20a_readl(g, gr_gpc0_tpc0_tpccs_tpc_exception_r()
+ offset);
if (!(tpc_exception & gr_gpc0_tpc0_tpccs_tpc_exception_mpc_m()))
return 0;
nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg,
"GPC%d TPC%d MPC exception", gpc, tpc);
esr = gk20a_readl(g, gr_gpc0_tpc0_mpc_hww_esr_r() + offset);
nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "mpc hww esr 0x%08x", esr);
esr = gk20a_readl(g, gr_gpc0_tpc0_mpc_hww_esr_info_r() + offset);
nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg,
"mpc hww esr info: veid 0x%08x",
gr_gpc0_tpc0_mpc_hww_esr_info_veid_v(esr));
gk20a_writel(g, gr_gpc0_tpc0_mpc_hww_esr_r() + offset,
gr_gpc0_tpc0_mpc_hww_esr_reset_trigger_f());
return 0;
}
void gv11b_init_gr(struct gpu_ops *gops)
{
gp10b_init_gr(gops);
@@ -3280,4 +3312,6 @@ void gv11b_init_gr(struct gpu_ops *gops)
gops->gr.clear_sm_hww = gv11b_gr_clear_sm_hww;
gops->gr.handle_tpc_sm_ecc_exception =
gr_gv11b_handle_tpc_sm_ecc_exception;
gops->gr.handle_tpc_mpc_exception =
gr_gv11b_handle_tpc_mpc_exception;
}

View File

@@ -902,6 +902,22 @@ static inline u32 gr_pri_gpc0_tpc0_tex_m_routing_sel_pipe1_f(void)
{
return 0x2;
}
static inline u32 gr_gpc0_tpc0_mpc_hww_esr_r(void)
{
return 0x00504430;
}
static inline u32 gr_gpc0_tpc0_mpc_hww_esr_reset_trigger_f(void)
{
return 0x40000000;
}
static inline u32 gr_gpc0_tpc0_mpc_hww_esr_info_r(void)
{
return 0x00504434;
}
static inline u32 gr_gpc0_tpc0_mpc_hww_esr_info_veid_v(u32 r)
{
return (r >> 0) & 0x3f;
}
static inline u32 gr_pri_be0_crop_status1_r(void)
{
return 0x00410134;
@@ -3470,6 +3486,10 @@ static inline u32 gr_gpcs_tpcs_tpccs_tpc_exception_en_tex_enabled_f(void)
{
return 0x1;
}
static inline u32 gr_gpcs_tpcs_tpccs_tpc_exception_en_mpc_enabled_f(void)
{
return 0x10;
}
static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_en_r(void)
{
return 0x0050450c;
@@ -3482,6 +3502,10 @@ static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_en_sm_enabled_f(void)
{
return 0x2;
}
static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_en_mpc_enabled_f(void)
{
return 0x10;
}
static inline u32 gr_gpcs_gpccs_gpc_exception_en_r(void)
{
return 0x0041ac94;
@@ -3618,6 +3642,14 @@ static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_sm_pending_v(void)
{
return 0x00000001;
}
static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_mpc_m(void)
{
return 0x1 << 4;
}
static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_mpc_pending_f(void)
{
return 0x10;
}
static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_r(void)
{
return 0x00504704;