diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c index f5d5e4677..9492d646c 100644 --- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c @@ -406,17 +406,36 @@ static int channel_gk20a_update_runlist(struct channel_gk20a *c, bool add) return gk20a_fifo_update_runlist(c->g, 0, c->hw_chid, add, true); } -void gk20a_disable_channel_no_update(struct channel_gk20a *ch) +void gk20a_channel_abort(struct channel_gk20a *ch) { + struct channel_gk20a_job *job, *n; + bool released_job_semaphore = false; + /* ensure no fences are pending */ if (ch->sync) ch->sync->set_min_eq_max(ch->sync); + /* release all job semaphores (applies only to jobs that use + semaphore synchronization) */ + mutex_lock(&ch->jobs_lock); + list_for_each_entry_safe(job, n, &ch->jobs, list) { + if (job->post_fence.semaphore) { + gk20a_semaphore_release(job->post_fence.semaphore); + released_job_semaphore = true; + } + } + mutex_unlock(&ch->jobs_lock); + /* disable channel */ gk20a_writel(ch->g, ccsr_channel_r(ch->hw_chid), gk20a_readl(ch->g, ccsr_channel_r(ch->hw_chid)) | ccsr_channel_enable_clr_true_f()); + + if (released_job_semaphore) { + wake_up_interruptible_all(&ch->semaphore_wq); + gk20a_channel_update(ch, 0); + } } int gk20a_wait_channel_idle(struct channel_gk20a *ch) @@ -455,7 +474,7 @@ void gk20a_disable_channel(struct channel_gk20a *ch, } /* disable the channel from hw and increment syncpoints */ - gk20a_disable_channel_no_update(ch); + gk20a_channel_abort(ch); gk20a_wait_channel_idle(ch); diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.h b/drivers/gpu/nvgpu/gk20a/channel_gk20a.h index 84983cc65..60437e669 100644 --- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.h @@ -153,7 +153,7 @@ bool gk20a_channel_update_and_check_timeout(struct channel_gk20a *ch, void gk20a_disable_channel(struct channel_gk20a *ch, bool wait_for_finish, unsigned long finish_timeout); -void gk20a_disable_channel_no_update(struct channel_gk20a *ch); +void gk20a_channel_abort(struct channel_gk20a *ch); int gk20a_channel_finish(struct channel_gk20a *ch, unsigned long timeout); void gk20a_set_error_notifier(struct channel_gk20a *ch, __u32 error); void gk20a_channel_semaphore_wakeup(struct gk20a *g); diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c index 2d09a8406..f246c73e4 100644 --- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c @@ -1043,7 +1043,7 @@ static bool gk20a_fifo_handle_mmu_fault(struct gk20a *g) if (ch->in_use) { /* disable the channel from hw and increment * syncpoints */ - gk20a_disable_channel_no_update(ch); + gk20a_channel_abort(ch); /* remove the channel from runlist */ clear_bit(ch->hw_chid, @@ -1180,7 +1180,7 @@ void gk20a_fifo_recover_ch(struct gk20a *g, u32 hw_chid, bool verbose) struct channel_gk20a *ch = g->fifo.channel + hw_chid; - gk20a_disable_channel_no_update(ch); + gk20a_channel_abort(ch); for (i = 0; i < g->fifo.max_runlists; i++) gk20a_fifo_update_runlist(g, i, hw_chid, false, false);