gpu: nvgpu: SM/TEX exception handling support

Add TEX exception handling support. Also make SM exception handler into
a function pointer, which should allow different chips to implement
their own SM exception handling routine.

Bug 1635727
Bug 1637486

Change-Id: I429905726c1840c11e83780843d82729495dc6a5
Signed-off-by: Adeel Raza <araza@nvidia.com>
Reviewed-on: http://git-master/r/935329
This commit is contained in:
Adeel Raza
2015-06-25 15:40:12 -07:00
parent 9e02111a76
commit f0a9ce0469
6 changed files with 85 additions and 3 deletions

View File

@@ -202,6 +202,10 @@ struct gpu_ops {
struct channel_gk20a *fault_ch,
bool *early_exit, bool *ignore_debugger);
u32 (*mask_hww_warp_esr)(u32 hww_warp_esr);
int (*handle_sm_exception)(struct gk20a *g, u32 gpc, u32 tpc,
bool *post_event, struct channel_gk20a *fault_ch);
int (*handle_tex_exception)(struct gk20a *g, u32 gpc, u32 tpc,
bool *post_event);
} gr;
const char *name;
struct {

View File

@@ -3996,6 +3996,7 @@ static void gk20a_gr_enable_gpc_exceptions(struct gk20a *g)
u32 tpc_mask;
gk20a_writel(g, gr_gpcs_tpcs_tpccs_tpc_exception_en_r(),
gr_gpcs_tpcs_tpccs_tpc_exception_en_tex_enabled_f() |
gr_gpcs_tpcs_tpccs_tpc_exception_en_sm_enabled_f());
tpc_mask =
@@ -5241,7 +5242,7 @@ u32 gk20a_mask_hww_warp_esr(u32 hww_warp_esr)
return hww_warp_esr;
}
static int gk20a_gr_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc,
int gr_gk20a_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc,
bool *post_event, struct channel_gk20a *fault_ch)
{
int ret = 0;
@@ -5322,6 +5323,27 @@ static int gk20a_gr_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc,
return ret;
}
int gr_gk20a_handle_tex_exception(struct gk20a *g, u32 gpc, u32 tpc,
bool *post_event)
{
int ret = 0;
u32 offset = proj_gpc_stride_v() * gpc +
proj_tpc_in_gpc_stride_v() * tpc;
u32 esr;
gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
esr = gk20a_readl(g,
gr_gpc0_tpc0_tex_m_hww_esr_r() + offset);
gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "0x%08x", esr);
gk20a_writel(g,
gr_gpc0_tpc0_tex_m_hww_esr_r() + offset,
esr);
return ret;
}
static int gk20a_gr_handle_tpc_exception(struct gk20a *g, u32 gpc, u32 tpc,
bool *post_event, struct channel_gk20a *fault_ch)
{
@@ -5338,8 +5360,16 @@ static int gk20a_gr_handle_tpc_exception(struct gk20a *g, u32 gpc, u32 tpc,
gr_gpc0_tpc0_tpccs_tpc_exception_sm_pending_v()) {
gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg,
"GPC%d TPC%d: SM exception pending", gpc, tpc);
ret = gk20a_gr_handle_sm_exception(g, gpc, tpc,
post_event, fault_ch);
ret = g->ops.gr.handle_sm_exception(g, gpc, tpc,
post_event, fault_ch);
}
/* check if a tex exeption is pending */
if (gr_gpc0_tpc0_tpccs_tpc_exception_tex_v(tpc_exception) ==
gr_gpc0_tpc0_tpccs_tpc_exception_tex_pending_v()) {
gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg,
"GPC%d TPC%d: TEX exception pending", gpc, tpc);
ret = g->ops.gr.handle_tex_exception(g, gpc, tpc, post_event);
}
return ret;
@@ -7595,4 +7625,6 @@ void gk20a_init_gr_ops(struct gpu_ops *gops)
gops->gr.get_access_map = gr_gk20a_get_access_map;
gops->gr.handle_fecs_error = gk20a_gr_handle_fecs_error;
gops->gr.mask_hww_warp_esr = gk20a_mask_hww_warp_esr;
gops->gr.handle_sm_exception = gr_gk20a_handle_sm_exception;
gops->gr.handle_tex_exception = gr_gk20a_handle_tex_exception;
}

View File

@@ -528,6 +528,10 @@ int gr_gk20a_add_zbc_depth(struct gk20a *g, struct gr_gk20a *gr,
struct zbc_entry *depth_val, u32 index);
int gr_gk20a_wait_idle(struct gk20a *g, unsigned long end_jiffies,
u32 expect_delay);
int gr_gk20a_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc,
bool *post_event, struct channel_gk20a *fault_ch);
int gr_gk20a_handle_tex_exception(struct gk20a *g, u32 gpc, u32 tpc,
bool *post_event);
int gr_gk20a_init_ctx_state(struct gk20a *g);
int gr_gk20a_submit_fecs_method_op(struct gk20a *g,
struct fecs_method_op_gk20a op,

View File

@@ -2990,6 +2990,10 @@ static inline u32 gr_gpcs_tpcs_tpccs_tpc_exception_en_sm_enabled_f(void)
{
return 0x2;
}
static inline u32 gr_gpcs_tpcs_tpccs_tpc_exception_en_tex_enabled_f(void)
{
return 0x1;
}
static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_en_r(void)
{
return 0x0050450c;
@@ -3026,6 +3030,14 @@ static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_r(void)
{
return 0x00504508;
}
static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_tex_v(u32 r)
{
return (r >> 0) & 0x1;
}
static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_tex_pending_v(void)
{
return 0x00000001;
}
static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_sm_v(u32 r)
{
return (r >> 1) & 0x1;
@@ -3170,6 +3182,14 @@ static inline u32 gr_gpc0_tpc0_sm_hww_global_esr_single_step_complete_pending_f(
{
return 0x40;
}
static inline u32 gr_gpc0_tpc0_tex_m_hww_esr_r(void)
{
return 0x00504224;
}
static inline u32 gr_gpc0_tpc0_tex_m_hww_esr_intr_pending_f(void)
{
return 0x1;
}
static inline u32 gr_gpc0_tpc0_sm_hww_warp_esr_r(void)
{
return 0x00504648;

View File

@@ -1229,4 +1229,6 @@ void gm20b_init_gr(struct gpu_ops *gops)
gops->gr.get_access_map = gr_gm20b_get_access_map;
gops->gr.handle_fecs_error = gk20a_gr_handle_fecs_error;
gops->gr.mask_hww_warp_esr = gk20a_mask_hww_warp_esr;
gops->gr.handle_sm_exception = gr_gk20a_handle_sm_exception;
gops->gr.handle_tex_exception = gr_gk20a_handle_tex_exception;
}

View File

@@ -3022,6 +3022,10 @@ static inline u32 gr_gpcs_tpcs_tpccs_tpc_exception_en_sm_enabled_f(void)
{
return 0x2;
}
static inline u32 gr_gpcs_tpcs_tpccs_tpc_exception_en_tex_enabled_f(void)
{
return 0x1;
}
static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_en_r(void)
{
return 0x0050450c;
@@ -3058,6 +3062,14 @@ static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_r(void)
{
return 0x00504508;
}
static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_tex_v(u32 r)
{
return (r >> 0) & 0x1;
}
static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_tex_pending_v(void)
{
return 0x00000001;
}
static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_sm_v(u32 r)
{
return (r >> 1) & 0x1;
@@ -3214,6 +3226,14 @@ static inline u32 gr_gpc0_tpc0_sm_hww_global_esr_single_step_complete_pending_f(
{
return 0x40;
}
static inline u32 gr_gpc0_tpc0_tex_m_hww_esr_r(void)
{
return 0x00504224;
}
static inline u32 gr_gpc0_tpc0_tex_m_hww_esr_intr_pending_f(void)
{
return 0x1;
}
static inline u32 gr_gpc0_tpc0_sm_hww_warp_esr_r(void)
{
return 0x00504648;