diff --git a/drivers/gpu/nvgpu/hal/gr/gr/gr_gv11b.c b/drivers/gpu/nvgpu/hal/gr/gr/gr_gv11b.c index bfbfad407..bf826382e 100644 --- a/drivers/gpu/nvgpu/hal/gr/gr/gr_gv11b.c +++ b/drivers/gpu/nvgpu/hal/gr/gr/gr_gv11b.c @@ -789,19 +789,6 @@ static void gv11b_gr_sm_stop_trigger_enable(struct gk20a *g) } } -int gv11b_gr_sm_trigger_suspend(struct gk20a *g) -{ - if (!g->ops.gr.sm_debugger_attached(g)) { - nvgpu_err(g, - "SM debugger not attached, do not trigger suspend!"); - return -EINVAL; - } - - gv11b_gr_sm_stop_trigger_enable(g); - - return 0; -} - void gv11b_gr_bpt_reg_info(struct gk20a *g, struct nvgpu_warpstate *w_state) { /* Check if we have at least one valid warp @@ -1211,31 +1198,6 @@ void gv11b_gr_resume_all_sms(struct gk20a *g) } } -int gv11b_gr_resume_from_pause(struct gk20a *g) -{ - int err = 0; - u32 reg_val; - - if (!g->ops.gr.sm_debugger_attached(g)) { - nvgpu_err(g, - "SM debugger not attached, do not resume for pause!"); - return -EINVAL; - } - - /* Clear the pause mask to tell the GPU we want to resume everyone */ - gk20a_writel(g, gr_gpcs_tpcs_sms_dbgr_bpt_pause_mask_0_r(), 0); - - /* explicitly re-enable forwarding of SM interrupts upon any resume */ - reg_val = gk20a_readl(g, gr_gpc0_tpc0_tpccs_tpc_exception_en_r()); - reg_val |= gr_gpc0_tpc0_tpccs_tpc_exception_en_sm_enabled_f(); - - gk20a_writel(g, gr_gpcs_tpcs_tpccs_tpc_exception_en_r(), reg_val); - - g->ops.gr.resume_all_sms(g); - - return err; -} - static void gv11b_gr_sm_dump_warp_bpt_pause_trap_mask_regs(struct gk20a *g, u32 offset, bool timeout) { diff --git a/drivers/gpu/nvgpu/hal/gr/gr/gr_gv11b.h b/drivers/gpu/nvgpu/hal/gr/gr/gr_gv11b.h index e335de03e..15d8bc04b 100644 --- a/drivers/gpu/nvgpu/hal/gr/gr/gr_gv11b.h +++ b/drivers/gpu/nvgpu/hal/gr/gr/gr_gv11b.h @@ -42,7 +42,6 @@ int gr_gv11b_pre_process_sm_exception(struct gk20a *g, u32 gpc, u32 tpc, u32 sm, u32 global_esr, u32 warp_esr, bool sm_debugger_attached, struct nvgpu_channel *fault_ch, bool *early_exit, bool *ignore_debugger); -int gv11b_gr_sm_trigger_suspend(struct gk20a *g); void gv11b_gr_bpt_reg_info(struct gk20a *g, struct nvgpu_warpstate *w_state); int gv11b_gr_set_sm_debug_mode(struct gk20a *g, struct nvgpu_channel *ch, u64 sms, bool enable); @@ -58,7 +57,6 @@ void gv11b_gr_suspend_all_sms(struct gk20a *g, void gv11b_gr_resume_single_sm(struct gk20a *g, u32 gpc, u32 tpc, u32 sm); void gv11b_gr_resume_all_sms(struct gk20a *g); -int gv11b_gr_resume_from_pause(struct gk20a *g); int gv11b_gr_wait_for_sm_lock_down(struct gk20a *g, u32 gpc, u32 tpc, u32 sm, u32 global_esr_mask, bool check_errors); diff --git a/drivers/gpu/nvgpu/hal/init/hal_gp10b.c b/drivers/gpu/nvgpu/hal/init/hal_gp10b.c index af1bf8327..29bac0e64 100644 --- a/drivers/gpu/nvgpu/hal/init/hal_gp10b.c +++ b/drivers/gpu/nvgpu/hal/init/hal_gp10b.c @@ -264,9 +264,9 @@ static const struct gpu_ops gp10b_ops = { .clear_sm_error_state = gm20b_gr_clear_sm_error_state, .suspend_contexts = gr_gp10b_suspend_contexts, .resume_contexts = gr_gk20a_resume_contexts, - .trigger_suspend = gr_gk20a_trigger_suspend, - .wait_for_pause = gr_gk20a_wait_for_pause, - .resume_from_pause = gr_gk20a_resume_from_pause, + .trigger_suspend = NULL, + .wait_for_pause = NULL, + .resume_from_pause = NULL, .clear_sm_errors = gr_gk20a_clear_sm_errors, .sm_debugger_attached = gk20a_gr_sm_debugger_attached, .suspend_single_sm = gk20a_gr_suspend_single_sm, diff --git a/drivers/gpu/nvgpu/hal/init/hal_gv11b.c b/drivers/gpu/nvgpu/hal/init/hal_gv11b.c index 942bbf5f6..2b0267a64 100644 --- a/drivers/gpu/nvgpu/hal/init/hal_gv11b.c +++ b/drivers/gpu/nvgpu/hal/init/hal_gv11b.c @@ -333,9 +333,9 @@ NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_MISRA(Rule, 8_7)) .clear_sm_error_state = gv11b_gr_clear_sm_error_state, .suspend_contexts = gr_gp10b_suspend_contexts, .resume_contexts = gr_gk20a_resume_contexts, - .trigger_suspend = gv11b_gr_sm_trigger_suspend, - .wait_for_pause = gr_gk20a_wait_for_pause, - .resume_from_pause = gv11b_gr_resume_from_pause, + .trigger_suspend = NULL, + .wait_for_pause = NULL, + .resume_from_pause = NULL, .clear_sm_errors = gr_gk20a_clear_sm_errors, .sm_debugger_attached = gv11b_gr_sm_debugger_attached, .suspend_single_sm = gv11b_gr_suspend_single_sm, diff --git a/drivers/gpu/nvgpu/hal/init/hal_tu104.c b/drivers/gpu/nvgpu/hal/init/hal_tu104.c index b983d635f..18c1d24d6 100644 --- a/drivers/gpu/nvgpu/hal/init/hal_tu104.c +++ b/drivers/gpu/nvgpu/hal/init/hal_tu104.c @@ -380,9 +380,9 @@ static const struct gpu_ops tu104_ops = { .clear_sm_error_state = gv11b_gr_clear_sm_error_state, .suspend_contexts = gr_gp10b_suspend_contexts, .resume_contexts = gr_gk20a_resume_contexts, - .trigger_suspend = gv11b_gr_sm_trigger_suspend, - .wait_for_pause = gr_gk20a_wait_for_pause, - .resume_from_pause = gv11b_gr_resume_from_pause, + .trigger_suspend = NULL, + .wait_for_pause = NULL, + .resume_from_pause = NULL, .clear_sm_errors = gr_gk20a_clear_sm_errors, .sm_debugger_attached = gv11b_gr_sm_debugger_attached, .suspend_single_sm = gv11b_gr_suspend_single_sm, diff --git a/drivers/gpu/nvgpu/os/linux/ioctl_ctrl.c b/drivers/gpu/nvgpu/os/linux/ioctl_ctrl.c index 2069678fb..a445acd6c 100644 --- a/drivers/gpu/nvgpu/os/linux/ioctl_ctrl.c +++ b/drivers/gpu/nvgpu/os/linux/ioctl_ctrl.c @@ -768,8 +768,12 @@ static int nvgpu_gpu_ioctl_trigger_suspend(struct gk20a *g) return err; nvgpu_mutex_acquire(&g->dbg_sessions_lock); - err = nvgpu_pg_elpg_protected_call(g, + if (g->ops.gr.trigger_suspend != NULL) { + err = nvgpu_pg_elpg_protected_call(g, g->ops.gr.trigger_suspend(g)); + } else { + err = -ENOSYS; + } nvgpu_mutex_release(&g->dbg_sessions_lock); gk20a_idle(g); @@ -806,28 +810,32 @@ static int nvgpu_gpu_ioctl_wait_for_pause(struct gk20a *g, } nvgpu_mutex_acquire(&g->dbg_sessions_lock); - (void)nvgpu_pg_elpg_protected_call(g, + if (g->ops.gr.wait_for_pause != NULL) { + err = nvgpu_pg_elpg_protected_call(g, g->ops.gr.wait_for_pause(g, w_state)); - for (sm_id = 0; sm_id < no_of_sm; sm_id++) { - ioctl_w_state[sm_id].valid_warps[0] = - w_state[sm_id].valid_warps[0]; - ioctl_w_state[sm_id].valid_warps[1] = - w_state[sm_id].valid_warps[1]; - ioctl_w_state[sm_id].trapped_warps[0] = - w_state[sm_id].trapped_warps[0]; - ioctl_w_state[sm_id].trapped_warps[1] = - w_state[sm_id].trapped_warps[1]; - ioctl_w_state[sm_id].paused_warps[0] = - w_state[sm_id].paused_warps[0]; - ioctl_w_state[sm_id].paused_warps[1] = - w_state[sm_id].paused_warps[1]; - } - /* Copy to user space - pointed by "args->pwarpstate" */ - if (copy_to_user((void __user *)(uintptr_t)args->pwarpstate, - w_state, ioctl_size)) { - nvgpu_log_fn(g, "copy_to_user failed!"); - err = -EFAULT; + for (sm_id = 0; sm_id < no_of_sm; sm_id++) { + ioctl_w_state[sm_id].valid_warps[0] = + w_state[sm_id].valid_warps[0]; + ioctl_w_state[sm_id].valid_warps[1] = + w_state[sm_id].valid_warps[1]; + ioctl_w_state[sm_id].trapped_warps[0] = + w_state[sm_id].trapped_warps[0]; + ioctl_w_state[sm_id].trapped_warps[1] = + w_state[sm_id].trapped_warps[1]; + ioctl_w_state[sm_id].paused_warps[0] = + w_state[sm_id].paused_warps[0]; + ioctl_w_state[sm_id].paused_warps[1] = + w_state[sm_id].paused_warps[1]; + } + /* Copy to user space - pointed by "args->pwarpstate" */ + if (copy_to_user((void __user *)(uintptr_t)args->pwarpstate, + w_state, ioctl_size)) { + nvgpu_log_fn(g, "copy_to_user failed!"); + err = -EFAULT; + } + } else { + err = -ENOSYS; } nvgpu_mutex_release(&g->dbg_sessions_lock); @@ -850,8 +858,12 @@ static int nvgpu_gpu_ioctl_resume_from_pause(struct gk20a *g) return err; nvgpu_mutex_acquire(&g->dbg_sessions_lock); - err = nvgpu_pg_elpg_protected_call(g, + if (g->ops.gr.resume_from_pause != NULL) { + err = nvgpu_pg_elpg_protected_call(g, g->ops.gr.resume_from_pause(g)); + } else { + err = -ENOSYS; + } nvgpu_mutex_release(&g->dbg_sessions_lock); gk20a_idle(g);