mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 02:22:34 +03:00
gpu: nvgpu: add get_sm_hww_warp_esr gr ops
mask_hww_warp_esr gr ops is removed and replaced with get_sm_hww_warp_esr gr ops JIRA GPUT19X-75 Change-Id: I8c7194ca1b0e4fe740a6f8998a02fba846234e9e Signed-off-by: Seema Khowala <seemaj@nvidia.com> Reviewed-on: https://git-master/r/1512218 Reviewed-by: Automatic_Commit_Validation_User GVS: Gerrit_Virtual_Submit Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
5e17dc9419
commit
8b36c45b39
@@ -291,7 +291,8 @@ struct gpu_ops {
|
||||
bool sm_debugger_attached,
|
||||
struct channel_gk20a *fault_ch,
|
||||
bool *early_exit, bool *ignore_debugger);
|
||||
u32 (*mask_hww_warp_esr)(u32 hww_warp_esr);
|
||||
u32 (*get_sm_hww_warp_esr)(struct gk20a *g,
|
||||
u32 gpc, u32 tpc, u32 sm);
|
||||
void (*get_esr_sm_sel)(struct gk20a *g, u32 gpc, u32 tpc,
|
||||
u32 *esr_sm_sel);
|
||||
int (*handle_sm_exception)(struct gk20a *g,
|
||||
|
||||
@@ -5518,11 +5518,6 @@ void gk20a_gr_clear_sm_hww(struct gk20a *g,
|
||||
gr_gpc0_tpc0_sm_hww_warp_esr_error_none_f());
|
||||
}
|
||||
|
||||
u32 gk20a_mask_hww_warp_esr(u32 hww_warp_esr)
|
||||
{
|
||||
return hww_warp_esr;
|
||||
}
|
||||
|
||||
int gr_gk20a_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc, u32 sm,
|
||||
bool *post_event, struct channel_gk20a *fault_ch,
|
||||
u32 *hww_global_esr)
|
||||
@@ -5549,8 +5544,7 @@ int gr_gk20a_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc, u32 sm,
|
||||
|
||||
global_esr = gk20a_readl(g,
|
||||
gr_gpc0_tpc0_sm_hww_global_esr_r() + offset);
|
||||
warp_esr = gk20a_readl(g, gr_gpc0_tpc0_sm_hww_warp_esr_r() + offset);
|
||||
warp_esr = g->ops.gr.mask_hww_warp_esr(warp_esr);
|
||||
warp_esr = g->ops.gr.get_sm_hww_warp_esr(g, gpc, tpc, sm);
|
||||
|
||||
if (!sm_debugger_attached) {
|
||||
nvgpu_err(g, "sm hww global %08x warp %08x",
|
||||
@@ -7941,6 +7935,8 @@ int gk20a_gr_wait_for_sm_lock_down(struct gk20a *g, u32 gpc, u32 tpc,
|
||||
u32 dbgr_status0 = 0, dbgr_control0 = 0;
|
||||
u64 warps_valid = 0, warps_paused = 0, warps_trapped = 0;
|
||||
struct nvgpu_timeout timeout;
|
||||
u32 warp_esr;
|
||||
u32 sm = 0;
|
||||
|
||||
gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg,
|
||||
"GPC%d TPC%d: locking down SM", gpc, tpc);
|
||||
@@ -7952,12 +7948,10 @@ int gk20a_gr_wait_for_sm_lock_down(struct gk20a *g, u32 gpc, u32 tpc,
|
||||
do {
|
||||
u32 global_esr = gk20a_readl(g,
|
||||
gr_gpc0_tpc0_sm_hww_global_esr_r() + offset);
|
||||
u32 warp_esr = gk20a_readl(g,
|
||||
gr_gpc0_tpc0_sm_hww_warp_esr_r() + offset);
|
||||
dbgr_status0 = gk20a_readl(g,
|
||||
gr_gpc0_tpc0_sm_dbgr_status0_r() + offset);
|
||||
|
||||
warp_esr = g->ops.gr.mask_hww_warp_esr(warp_esr);
|
||||
warp_esr = g->ops.gr.get_sm_hww_warp_esr(g, gpc, tpc, sm);
|
||||
|
||||
locked_down =
|
||||
(gr_gpc0_tpc0_sm_dbgr_status0_locked_down_v(dbgr_status0) ==
|
||||
@@ -8496,3 +8490,11 @@ u32 gr_gk20a_tpc_enabled_exceptions(struct gk20a *g)
|
||||
|
||||
return tpc_exception_en;
|
||||
}
|
||||
|
||||
u32 gk20a_gr_get_sm_hww_warp_esr(struct gk20a *g, u32 gpc, u32 tpc, u32 sm)
|
||||
{
|
||||
u32 offset = gk20a_gr_gpc_offset(g, gpc) + gk20a_gr_tpc_offset(g, tpc);
|
||||
u32 hww_warp_esr = gk20a_readl(g,
|
||||
gr_gpc0_tpc0_sm_hww_warp_esr_r() + offset);
|
||||
return hww_warp_esr;
|
||||
}
|
||||
|
||||
@@ -693,7 +693,7 @@ int gr_gk20a_get_ctx_id(struct gk20a *g,
|
||||
struct channel_gk20a *c,
|
||||
u32 *ctx_id);
|
||||
|
||||
u32 gk20a_mask_hww_warp_esr(u32 hww_warp_esr);
|
||||
u32 gk20a_gr_get_sm_hww_warp_esr(struct gk20a *g, u32 gpc, u32 tpc, u32 sm);
|
||||
|
||||
int gr_gk20a_wait_fe_idle(struct gk20a *g, unsigned long duration_ms,
|
||||
u32 expect_delay);
|
||||
|
||||
@@ -1593,7 +1593,6 @@ void gm20b_init_gr(struct gpu_ops *gops)
|
||||
gops->gr.bpt_reg_info = gr_gm20b_bpt_reg_info;
|
||||
gops->gr.get_access_map = gr_gm20b_get_access_map;
|
||||
gops->gr.handle_fecs_error = gk20a_gr_handle_fecs_error;
|
||||
gops->gr.mask_hww_warp_esr = gk20a_mask_hww_warp_esr;
|
||||
gops->gr.handle_sm_exception = gr_gk20a_handle_sm_exception;
|
||||
gops->gr.handle_tex_exception = gr_gk20a_handle_tex_exception;
|
||||
gops->gr.enable_gpc_exceptions = gk20a_gr_enable_gpc_exceptions;
|
||||
@@ -1635,4 +1634,5 @@ void gm20b_init_gr(struct gpu_ops *gops)
|
||||
gops->gr.suspend_all_sms = gk20a_gr_suspend_all_sms;
|
||||
gops->gr.resume_single_sm = gk20a_gr_resume_single_sm;
|
||||
gops->gr.resume_all_sms = gk20a_gr_resume_all_sms;
|
||||
gops->gr.get_sm_hww_warp_esr = gk20a_gr_get_sm_hww_warp_esr;
|
||||
}
|
||||
|
||||
@@ -1958,8 +1958,13 @@ clean_up:
|
||||
return gk20a_gr_handle_fecs_error(g, __ch, isr_data);
|
||||
}
|
||||
|
||||
static u32 gp10b_mask_hww_warp_esr(u32 hww_warp_esr)
|
||||
static u32 gp10b_gr_get_sm_hww_warp_esr(struct gk20a *g,
|
||||
u32 gpc, u32 tpc, u32 sm)
|
||||
{
|
||||
u32 offset = gk20a_gr_gpc_offset(g, gpc) + gk20a_gr_tpc_offset(g, tpc);
|
||||
u32 hww_warp_esr = gk20a_readl(g,
|
||||
gr_gpc0_tpc0_sm_hww_warp_esr_r() + offset);
|
||||
|
||||
if (!(hww_warp_esr & gr_gpc0_tpc0_sm_hww_warp_esr_addr_valid_m()))
|
||||
hww_warp_esr = set_field(hww_warp_esr,
|
||||
gr_gpc0_tpc0_sm_hww_warp_esr_addr_error_type_m(),
|
||||
@@ -2369,7 +2374,6 @@ void gp10b_init_gr(struct gpu_ops *gops)
|
||||
gops->gr.get_access_map = gr_gp10b_get_access_map;
|
||||
gops->gr.handle_sm_exception = gr_gp10b_handle_sm_exception;
|
||||
gops->gr.handle_tex_exception = gr_gp10b_handle_tex_exception;
|
||||
gops->gr.mask_hww_warp_esr = gp10b_mask_hww_warp_esr;
|
||||
gops->gr.pre_process_sm_exception =
|
||||
gr_gp10b_pre_process_sm_exception;
|
||||
gops->gr.handle_fecs_error = gr_gp10b_handle_fecs_error;
|
||||
@@ -2384,4 +2388,5 @@ void gp10b_init_gr(struct gpu_ops *gops)
|
||||
gops->gr.set_boosted_ctx = gr_gp10b_set_boosted_ctx;
|
||||
gops->gr.update_boosted_ctx = gr_gp10b_update_boosted_ctx;
|
||||
gops->gr.set_czf_bypass = gr_gp10b_set_czf_bypass;
|
||||
gops->gr.get_sm_hww_warp_esr = gp10b_gr_get_sm_hww_warp_esr;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user