gpu: nvgpu: move some fifo based HAL ops to hal.channel unit

a) free_channel_ctx_header is used to free the channel's underlying subctx
and belongs to the hal.channel unit instead of fifo. Moved the same and
renamed the HAL ops to free_ctx_header. The function
gv11b_free_subctx_header is moved to channel_gv11b.* files and also
renamed to gv11b_channel_free_subctx_header.

b) ch_abort_clean_up is moved to hal.channel unit

c) channel_resume and channel_suspend are used to resume and suspend all
the serviceable channels. This belongs to hal.channel unit and are
moved from the hal.fifo unit.

The HAL ops channel_resume and channel_suspend are renamed to
resume_all_serviceable_ch and suspend_all_serviceable_ch respectively.

gk20a_channel_resume and gk20a_channel_suspend are also renamed to
nvgpu_channel_resume_all_serviceable_ch and
nvgpu_channel_suspend_all_serviceable_ch respectively.

d) set_error_notifier HAL ops belongs to hal.channel and is moved
accordingly.

Jira NVGPU-2978

Change-Id: Icb52245cacba3004e2fd32519029a1acff60c23c
Signed-off-by: Debarshi Dutta <ddutta@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2083593
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Debarshi Dutta
2019-03-27 12:40:16 +05:30
committed by mobile promotions
parent c0b65e8b05
commit eaab8ad1f2
22 changed files with 127 additions and 107 deletions

View File

@@ -204,8 +204,8 @@ void gk20a_fifo_abort_tsg(struct gk20a *g, struct tsg_gk20a *tsg, bool preempt)
nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
if (gk20a_channel_get(ch) != NULL) {
gk20a_channel_set_unserviceable(ch);
if (ch->g->ops.fifo.ch_abort_clean_up != NULL) {
ch->g->ops.fifo.ch_abort_clean_up(ch);
if (ch->g->ops.channel.abort_clean_up != NULL) {
ch->g->ops.channel.abort_clean_up(ch);
}
gk20a_channel_put(ch);
}
@@ -649,7 +649,7 @@ int gk20a_fifo_force_reset_ch(struct channel_gk20a *ch,
nvgpu_list_for_each_entry(ch_tsg, &tsg->ch_list,
channel_gk20a, ch_entry) {
if (gk20a_channel_get(ch_tsg) != NULL) {
g->ops.fifo.set_error_notifier(ch_tsg,
g->ops.channel.set_error_notifier(ch_tsg,
err_code);
gk20a_channel_put(ch_tsg);
}
@@ -747,8 +747,8 @@ int gk20a_fifo_tsg_unbind_channel(struct channel_gk20a *ch)
g->ops.tsg.enable(tsg);
}
if (ch->g->ops.fifo.ch_abort_clean_up != NULL) {
ch->g->ops.fifo.ch_abort_clean_up(ch);
if (ch->g->ops.channel.abort_clean_up != NULL) {
ch->g->ops.channel.abort_clean_up(ch);
}
return 0;
@@ -881,7 +881,7 @@ void gk20a_fifo_preempt_timeout_rc_tsg(struct gk20a *g, struct tsg_gk20a *tsg)
if (gk20a_channel_get(ch) == NULL) {
continue;
}
g->ops.fifo.set_error_notifier(ch,
g->ops.channel.set_error_notifier(ch,
NVGPU_ERR_NOTIFIER_FIFO_ERROR_IDLE_TIMEOUT);
gk20a_channel_put(ch);
}
@@ -893,7 +893,7 @@ void gk20a_fifo_preempt_timeout_rc(struct gk20a *g, struct channel_gk20a *ch)
{
nvgpu_err(g, "preempt channel %d timeout", ch->chid);
g->ops.fifo.set_error_notifier(ch,
g->ops.channel.set_error_notifier(ch,
NVGPU_ERR_NOTIFIER_FIFO_ERROR_IDLE_TIMEOUT);
nvgpu_channel_recover(g, ch, true, RC_TYPE_PREEMPT_TIMEOUT);
}