gpu: nvgpu: move abort_tsg from fifo to tsg

Moved abort tsg to common code:
- gk20a_fifo_abort_tsg -> nvgpu_tsg_abort

Removed gk20a_disable_channel which was not used.

Jira NVGPU-2979

Change-Id: Ie368b162dd775b4651e647d53f7e78261bdf5d84
Signed-off-by: Thomas Fleury <tfleury@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2093480
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Thomas Fleury
2019-04-08 13:38:51 -07:00
committed by mobile promotions
parent 9f619cfbaa
commit e91fdab442
7 changed files with 35 additions and 41 deletions

View File

@@ -240,7 +240,7 @@ void gk20a_channel_abort(struct channel_gk20a *ch, bool channel_preempt)
nvgpu_log_fn(ch->g, " ");
if (tsg != NULL) {
return gk20a_fifo_abort_tsg(ch->g, tsg, channel_preempt);
return nvgpu_tsg_abort(ch->g, tsg, channel_preempt);
} else {
nvgpu_err(ch->g, "chid: %d is not bound to tsg", ch->chid);
}
@@ -274,12 +274,6 @@ int gk20a_wait_channel_idle(struct channel_gk20a *ch)
return 0;
}
void gk20a_disable_channel(struct channel_gk20a *ch)
{
gk20a_channel_abort(ch, true);
channel_gk20a_update_runlist(ch, false);
}
void gk20a_wait_until_counter_is_N(
struct channel_gk20a *ch, nvgpu_atomic_t *counter, int wait_value,
struct nvgpu_cond *c, const char *caller, const char *counter_name)

View File

@@ -126,7 +126,7 @@ int nvgpu_tsg_unbind_channel(struct tsg_gk20a *tsg, struct channel_gk20a *ch)
nvgpu_err(g, "Channel %d unbind failed, tearing down TSG %d",
ch->chid, tsg->tsgid);
gk20a_fifo_abort_tsg(g, tsg, true);
nvgpu_tsg_abort(g, tsg, true);
/* If channel unbind fails, channel is still part of runlist */
channel_gk20a_update_runlist(ch, false);
@@ -322,7 +322,7 @@ void nvgpu_tsg_recover(struct gk20a *g, struct tsg_gk20a *tsg,
gk20a_debug_dump(g);
}
gk20a_fifo_abort_tsg(g, tsg, false);
nvgpu_tsg_abort(g, tsg, false);
}
nvgpu_mutex_release(&g->dbg_sessions_lock);
@@ -875,3 +875,31 @@ int gk20a_tsg_set_sm_exception_type_mask(struct channel_gk20a *ch,
return 0;
}
void nvgpu_tsg_abort(struct gk20a *g, struct tsg_gk20a *tsg, bool preempt)
{
struct channel_gk20a *ch = NULL;
nvgpu_log_fn(g, " ");
WARN_ON(tsg->abortable == false);
g->ops.tsg.disable(tsg);
if (preempt) {
(void)g->ops.fifo.preempt_tsg(g, tsg);
}
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
if (gk20a_channel_get(ch) != NULL) {
gk20a_channel_set_unserviceable(ch);
if (g->ops.channel.abort_clean_up != NULL) {
g->ops.channel.abort_clean_up(ch);
}
gk20a_channel_put(ch);
}
}
nvgpu_rwsem_up_read(&tsg->ch_list_lock);
}

View File

@@ -157,33 +157,6 @@ bool gk20a_fifo_should_defer_engine_reset(struct gk20a *g, u32 engine_id,
return g->ops.engine.is_fault_engine_subid_gpc(g, engine_subid);
}
void gk20a_fifo_abort_tsg(struct gk20a *g, struct tsg_gk20a *tsg, bool preempt)
{
struct channel_gk20a *ch = NULL;
nvgpu_log_fn(g, " ");
WARN_ON(tsg->abortable == false);
g->ops.tsg.disable(tsg);
if (preempt) {
g->ops.fifo.preempt_tsg(g, tsg);
}
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
if (gk20a_channel_get(ch) != NULL) {
gk20a_channel_set_unserviceable(ch);
if (ch->g->ops.channel.abort_clean_up != NULL) {
ch->g->ops.channel.abort_clean_up(ch);
}
gk20a_channel_put(ch);
}
}
nvgpu_rwsem_up_read(&tsg->ch_list_lock);
}
int gk20a_fifo_deferred_reset(struct gk20a *g, struct channel_gk20a *ch)
{
unsigned long engine_id, engines = 0U;
@@ -388,7 +361,7 @@ static bool gk20a_fifo_handle_mmu_fault_locked(
nvgpu_tsg_set_ctx_mmu_error(g, tsg);
}
verbose = nvgpu_tsg_mark_error(g, tsg);
gk20a_fifo_abort_tsg(g, tsg, false);
nvgpu_tsg_abort(g, tsg, false);
}
/* put back the ref taken early above */

View File

@@ -237,7 +237,6 @@ void fifo_gk20a_finish_mmu_fault_handling(struct gk20a *g,
unsigned long fault_id);
u32 gk20a_fifo_get_failing_engine_data(struct gk20a *g,
u32 *__id, bool *__is_tsg);
void gk20a_fifo_abort_tsg(struct gk20a *g, struct tsg_gk20a *tsg, bool preempt);
void gk20a_fifo_issue_preempt(struct gk20a *g, u32 id, bool is_tsg);
int gk20a_fifo_tsg_set_timeslice(struct tsg_gk20a *tsg, u32 timeslice);

View File

@@ -645,7 +645,7 @@ static void gv11b_fifo_locked_abort_runlist_active_tsgs(struct gk20a *g,
runlist->runlist_id);
}
gk20a_fifo_abort_tsg(g, tsg, false);
nvgpu_tsg_abort(g, tsg, false);
nvgpu_log(g, gpu_dbg_info, "aborted tsg id %lu", tsgid);
}
@@ -874,7 +874,7 @@ void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask,
nvgpu_tsg_set_ctx_mmu_error(g, tsg);
}
(void)nvgpu_tsg_mark_error(g, tsg);
gk20a_fifo_abort_tsg(g, tsg, false);
nvgpu_tsg_abort(g, tsg, false);
}
} else {
gv11b_fifo_locked_abort_runlist_active_tsgs(g, rc_type,

View File

@@ -422,7 +422,6 @@ bool nvgpu_channel_update_and_check_ctxsw_timeout(struct channel_gk20a *ch,
void nvgpu_channel_recover(struct gk20a *g, struct channel_gk20a *ch,
bool verbose, u32 rc_type);
void gk20a_disable_channel(struct channel_gk20a *ch);
void gk20a_channel_abort(struct channel_gk20a *ch, bool channel_preempt);
void nvgpu_channel_abort_clean_up(struct channel_gk20a *ch);
void gk20a_channel_semaphore_wakeup(struct gk20a *g, bool post_events);

View File

@@ -158,5 +158,6 @@ void nvgpu_tsg_set_error_notifier(struct gk20a *g, struct tsg_gk20a *tsg,
u32 error_notifier);
bool nvgpu_tsg_ctxsw_timeout_debug_dump_state(struct tsg_gk20a *tsg);
void nvgpu_tsg_set_ctxsw_timeout_accumulated_ms(struct tsg_gk20a *tsg, u32 ms);
void nvgpu_tsg_abort(struct gk20a *g, struct tsg_gk20a *tsg, bool preempt);
#endif /* NVGPU_TSG_H */