diff --git a/drivers/gpu/nvgpu/common/fifo/tsg.c b/drivers/gpu/nvgpu/common/fifo/tsg.c index ebcf23b5b..654e14614 100644 --- a/drivers/gpu/nvgpu/common/fifo/tsg.c +++ b/drivers/gpu/nvgpu/common/fifo/tsg.c @@ -824,6 +824,11 @@ void nvgpu_tsg_abort(struct gk20a *g, struct tsg_gk20a *tsg, bool preempt) g->ops.tsg.disable(tsg); if (preempt) { + /* + * Ignore the return value below. If preempt fails, preempt_tsg + * operation will print the error and ctxsw timeout may trigger + * a recovery if needed. + */ (void)g->ops.fifo.preempt_tsg(g, tsg); } diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c index 1f0a9bed6..7f90f8880 100644 --- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c @@ -148,10 +148,16 @@ int gk20a_fifo_is_preempt_pending(struct gk20a *g, u32 id, { struct nvgpu_timeout timeout; u32 delay = POLL_DELAY_MIN_US; - int ret = -EBUSY; + int ret = 0; - nvgpu_timeout_init(g, &timeout, gk20a_fifo_get_preempt_timeout(g), + ret = nvgpu_timeout_init(g, &timeout, gk20a_fifo_get_preempt_timeout(g), NVGPU_TIMER_CPU_TIMER); + if (ret != 0) { + nvgpu_err(g, "nvgpu_timeout_init failed err=%d ", ret); + return ret; + } + + ret = -EBUSY; do { if ((gk20a_readl(g, fifo_preempt_r()) & fifo_preempt_pending_true_f()) == 0U) { @@ -193,6 +199,7 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, struct channel_gk20a *ch) int ret = 0; u32 token = PMU_INVALID_MUTEX_OWNER_ID; int mutex_ret = 0; + int err = 0; nvgpu_log_fn(g, "chid: %d", ch->chid); @@ -205,7 +212,12 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, struct channel_gk20a *ch) ret = __locked_fifo_preempt(g, ch->chid, false); if (mutex_ret == 0) { - nvgpu_pmu_lock_release(g, &g->pmu, PMU_MUTEX_ID_FIFO, &token); + err = nvgpu_pmu_lock_release(g, &g->pmu, PMU_MUTEX_ID_FIFO, + &token); + if (err != 0) { + nvgpu_err(g, "nvgpu_pmu_lock_release failed err=%d", + err); + } } nvgpu_fifo_unlock_active_runlists(g); @@ -238,6 +250,7 @@ int gk20a_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg) int ret = 0; u32 token = PMU_INVALID_MUTEX_OWNER_ID; int mutex_ret = 0; + int err = 0; nvgpu_log_fn(g, "tsgid: %d", tsg->tsgid); @@ -250,7 +263,12 @@ int gk20a_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg) ret = __locked_fifo_preempt(g, tsg->tsgid, true); if (mutex_ret == 0) { - nvgpu_pmu_lock_release(g, &g->pmu, PMU_MUTEX_ID_FIFO, &token); + err = nvgpu_pmu_lock_release(g, &g->pmu, PMU_MUTEX_ID_FIFO, + &token); + if (err != 0) { + nvgpu_err(g, "nvgpu_pmu_lock_release failed err=%d", + err); + } } nvgpu_fifo_unlock_active_runlists(g); diff --git a/drivers/gpu/nvgpu/hal/fifo/runlist_gk20a.c b/drivers/gpu/nvgpu/hal/fifo/runlist_gk20a.c index a327452e0..ea1f6126a 100644 --- a/drivers/gpu/nvgpu/hal/fifo/runlist_gk20a.c +++ b/drivers/gpu/nvgpu/hal/fifo/runlist_gk20a.c @@ -95,7 +95,16 @@ int gk20a_fifo_reschedule_preempt_next(struct channel_gk20a *ch, nvgpu_readl(g, fifo_preempt_r())); #endif if (wait_preempt) { - g->ops.fifo.is_preempt_pending(g, preempt_id, preempt_type); + if (g->ops.fifo.is_preempt_pending(g, preempt_id, + preempt_type) != 0) { + nvgpu_err(g, "fifo preempt timed out"); + /* + * This function does not care if preempt + * times out since it is here only to improve + * latency. If a timeout happens, it will be + * handled by other fifo handling code. + */ + } } #ifdef TRACEPOINTS_ENABLED trace_gk20a_reschedule_preempted_next(ch->chid); diff --git a/drivers/gpu/nvgpu/hal/rc/rc_gk20a.c b/drivers/gpu/nvgpu/hal/rc/rc_gk20a.c index 74a60d289..c4d131b20 100644 --- a/drivers/gpu/nvgpu/hal/rc/rc_gk20a.c +++ b/drivers/gpu/nvgpu/hal/rc/rc_gk20a.c @@ -118,8 +118,13 @@ void gk20a_fifo_teardown_ch_tsg(struct gk20a *g, u32 eng_bitmask, g->ops.fifo.intr_set_recover_mask(g); g->ops.fifo.trigger_mmu_fault(g, engine_ids); - gk20a_fifo_handle_mmu_fault_locked(g, mmu_fault_engines, ref_id, - ref_id_is_tsg); + /* + * Ignore the "Verbose" flag from + * gk20a_fifo_handle_mmu_fault_locked since it is not needed + * here + */ + (void) gk20a_fifo_handle_mmu_fault_locked(g, mmu_fault_engines, + ref_id, ref_id_is_tsg); g->ops.fifo.intr_unset_recover_mask(g); }