gpu: nvgpu: move some fifo based HAL ops to hal.channel unit

a) free_channel_ctx_header is used to free the channel's underlying subctx
and belongs to the hal.channel unit instead of fifo. Moved the same and
renamed the HAL ops to free_ctx_header. The function
gv11b_free_subctx_header is moved to channel_gv11b.* files and also
renamed to gv11b_channel_free_subctx_header.

b) ch_abort_clean_up is moved to hal.channel unit

c) channel_resume and channel_suspend are used to resume and suspend all
the serviceable channels. This belongs to hal.channel unit and are
moved from the hal.fifo unit.

The HAL ops channel_resume and channel_suspend are renamed to
resume_all_serviceable_ch and suspend_all_serviceable_ch respectively.

gk20a_channel_resume and gk20a_channel_suspend are also renamed to
nvgpu_channel_resume_all_serviceable_ch and
nvgpu_channel_suspend_all_serviceable_ch respectively.

d) set_error_notifier HAL ops belongs to hal.channel and is moved
accordingly.

Jira NVGPU-2978

Change-Id: Icb52245cacba3004e2fd32519029a1acff60c23c
Signed-off-by: Debarshi Dutta <ddutta@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2083593
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Debarshi Dutta
2019-03-27 12:40:16 +05:30
committed by mobile promotions
parent c0b65e8b05
commit eaab8ad1f2
22 changed files with 127 additions and 107 deletions

View File

@@ -191,7 +191,7 @@ int gk20a_disable_channel_tsg(struct gk20a *g, struct channel_gk20a *ch)
} }
} }
void gk20a_channel_abort_clean_up(struct channel_gk20a *ch) void nvgpu_channel_abort_clean_up(struct channel_gk20a *ch)
{ {
/* synchronize with actual job cleanup */ /* synchronize with actual job cleanup */
nvgpu_mutex_acquire(&ch->joblist.cleanup_lock); nvgpu_mutex_acquire(&ch->joblist.cleanup_lock);
@@ -409,8 +409,8 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force)
g->ops.gr.fecs_trace.unbind_channel(g, &ch->inst_block); g->ops.gr.fecs_trace.unbind_channel(g, &ch->inst_block);
#endif #endif
if (g->ops.fifo.free_channel_ctx_header != NULL) { if (g->ops.channel.free_ctx_header != NULL) {
g->ops.fifo.free_channel_ctx_header(ch); g->ops.channel.free_ctx_header(ch);
ch->subctx = NULL; ch->subctx = NULL;
} }
@@ -1437,7 +1437,7 @@ bool nvgpu_channel_mark_error(struct gk20a *g, struct channel_gk20a *ch)
void nvgpu_channel_set_error_notifier(struct gk20a *g, struct channel_gk20a *ch, void nvgpu_channel_set_error_notifier(struct gk20a *g, struct channel_gk20a *ch,
u32 error_notifier) u32 error_notifier)
{ {
g->ops.fifo.set_error_notifier(ch, error_notifier); g->ops.channel.set_error_notifier(ch, error_notifier);
} }
void nvgpu_channel_set_ctx_mmu_error(struct gk20a *g, void nvgpu_channel_set_ctx_mmu_error(struct gk20a *g,
@@ -2520,7 +2520,7 @@ clean_up_mutex:
/* in this context the "channel" is the host1x channel which /* in this context the "channel" is the host1x channel which
* maps to *all* gk20a channels */ * maps to *all* gk20a channels */
int gk20a_channel_suspend(struct gk20a *g) int nvgpu_channel_suspend_all_serviceable_ch(struct gk20a *g)
{ {
struct fifo_gk20a *f = &g->fifo; struct fifo_gk20a *f = &g->fifo;
u32 chid; u32 chid;
@@ -2580,7 +2580,7 @@ int gk20a_channel_suspend(struct gk20a *g)
return 0; return 0;
} }
int gk20a_channel_resume(struct gk20a *g) int nvgpu_channel_resume_all_serviceable_ch(struct gk20a *g)
{ {
struct fifo_gk20a *f = &g->fifo; struct fifo_gk20a *f = &g->fifo;
u32 chid; u32 chid;

View File

@@ -26,6 +26,7 @@
#include <nvgpu/atomic.h> #include <nvgpu/atomic.h>
#include <nvgpu/io.h> #include <nvgpu/io.h>
#include <nvgpu/gk20a.h> #include <nvgpu/gk20a.h>
#include <nvgpu/gr/subctx.h>
#include "channel_gk20a.h" #include "channel_gk20a.h"
#include "channel_gv11b.h" #include "channel_gv11b.h"
@@ -80,3 +81,10 @@ void gv11b_channel_reset_faulted(struct gk20a *g, struct channel_gk20a *ch,
gk20a_writel(g, ccsr_channel_r(ch->chid), reg); gk20a_writel(g, ccsr_channel_r(ch->chid), reg);
} }
void gv11b_channel_free_subctx_header(struct channel_gk20a *ch)
{
if (ch->subctx != NULL) {
nvgpu_gr_subctx_free(ch->g, ch->subctx, ch->vm);
}
}

View File

@@ -35,5 +35,6 @@ void gv11b_channel_read_state(struct gk20a *g, struct channel_gk20a *ch,
struct nvgpu_channel_hw_state *state); struct nvgpu_channel_hw_state *state);
void gv11b_channel_reset_faulted(struct gk20a *g, struct channel_gk20a *ch, void gv11b_channel_reset_faulted(struct gk20a *g, struct channel_gk20a *ch,
bool eng, bool pbdma); bool eng, bool pbdma);
void gv11b_channel_free_subctx_header(struct channel_gk20a *ch);
#endif /* FIFO_CHANNEL_GV11B_H */ #endif /* FIFO_CHANNEL_GV11B_H */

View File

@@ -82,9 +82,9 @@ int gk20a_prepare_poweroff(struct gk20a *g)
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
if (g->ops.fifo.channel_suspend != NULL) { if (g->ops.channel.suspend_all_serviceable_ch != NULL) {
ret = g->ops.fifo.channel_suspend(g); ret = g->ops.channel.suspend_all_serviceable_ch(g);
if (ret != 0) { if (ret != 0U) {
return ret; return ret;
} }
} }
@@ -438,8 +438,8 @@ int gk20a_finalize_poweron(struct gk20a *g)
} }
#endif #endif
if (g->ops.fifo.channel_resume != NULL) { if (g->ops.channel.resume_all_serviceable_ch != NULL) {
g->ops.fifo.channel_resume(g); g->ops.channel.resume_all_serviceable_ch(g);
} }
goto exit; goto exit;

View File

@@ -373,7 +373,7 @@ int vgpu_fifo_force_reset_ch(struct channel_gk20a *ch,
nvgpu_list_for_each_entry(ch_tsg, &tsg->ch_list, nvgpu_list_for_each_entry(ch_tsg, &tsg->ch_list,
channel_gk20a, ch_entry) { channel_gk20a, ch_entry) {
if (gk20a_channel_get(ch_tsg)) { if (gk20a_channel_get(ch_tsg)) {
g->ops.fifo.set_error_notifier(ch_tsg, g->ops.channel.set_error_notifier(ch_tsg,
err_code); err_code);
gk20a_channel_set_unserviceable(ch_tsg); gk20a_channel_set_unserviceable(ch_tsg);
gk20a_channel_put(ch_tsg); gk20a_channel_put(ch_tsg);
@@ -456,11 +456,11 @@ int vgpu_fifo_isr(struct gk20a *g, struct tegra_vgpu_fifo_intr_info *info)
switch (info->type) { switch (info->type) {
case TEGRA_VGPU_FIFO_INTR_PBDMA: case TEGRA_VGPU_FIFO_INTR_PBDMA:
g->ops.fifo.set_error_notifier(ch, g->ops.channel.set_error_notifier(ch,
NVGPU_ERR_NOTIFIER_PBDMA_ERROR); NVGPU_ERR_NOTIFIER_PBDMA_ERROR);
break; break;
case TEGRA_VGPU_FIFO_INTR_CTXSW_TIMEOUT: case TEGRA_VGPU_FIFO_INTR_CTXSW_TIMEOUT:
g->ops.fifo.set_error_notifier(ch, g->ops.channel.set_error_notifier(ch,
NVGPU_ERR_NOTIFIER_FIFO_ERROR_IDLE_TIMEOUT); NVGPU_ERR_NOTIFIER_FIFO_ERROR_IDLE_TIMEOUT);
break; break;
case TEGRA_VGPU_FIFO_INTR_MMU_FAULT: case TEGRA_VGPU_FIFO_INTR_MMU_FAULT:
@@ -526,7 +526,7 @@ void vgpu_channel_abort_cleanup(struct gk20a *g, u32 chid)
} }
gk20a_channel_set_unserviceable(ch); gk20a_channel_set_unserviceable(ch);
g->ops.fifo.ch_abort_clean_up(ch); g->ops.channel.abort_clean_up(ch);
gk20a_channel_put(ch); gk20a_channel_put(ch);
} }
@@ -541,5 +541,5 @@ void vgpu_set_error_notifier(struct gk20a *g,
} }
ch = &g->fifo.channel[p->chid]; ch = &g->fifo.channel[p->chid];
g->ops.fifo.set_error_notifier(ch, p->error); g->ops.channel.set_error_notifier(ch, p->error);
} }

View File

@@ -429,10 +429,6 @@ static const struct gpu_ops vgpu_gp10b_ops = {
.tsg_bind_channel = vgpu_tsg_bind_channel, .tsg_bind_channel = vgpu_tsg_bind_channel,
.tsg_unbind_channel = vgpu_tsg_unbind_channel, .tsg_unbind_channel = vgpu_tsg_unbind_channel,
.post_event_id = gk20a_tsg_event_id_post_event, .post_event_id = gk20a_tsg_event_id_post_event,
.ch_abort_clean_up = gk20a_channel_abort_clean_up,
.channel_suspend = gk20a_channel_suspend,
.channel_resume = gk20a_channel_resume,
.set_error_notifier = nvgpu_set_error_notifier,
.setup_sw = vgpu_fifo_setup_sw, .setup_sw = vgpu_fifo_setup_sw,
.cleanup_sw = vgpu_fifo_cleanup_sw, .cleanup_sw = vgpu_fifo_cleanup_sw,
.set_sm_exception_type_mask = vgpu_set_sm_exception_type_mask, .set_sm_exception_type_mask = vgpu_set_sm_exception_type_mask,
@@ -541,6 +537,12 @@ static const struct gpu_ops vgpu_gp10b_ops = {
.enable = vgpu_channel_enable, .enable = vgpu_channel_enable,
.disable = vgpu_channel_disable, .disable = vgpu_channel_disable,
.count = vgpu_channel_count, .count = vgpu_channel_count,
.abort_clean_up = nvgpu_channel_abort_clean_up,
.suspend_all_serviceable_ch =
nvgpu_channel_suspend_all_serviceable_ch,
.resume_all_serviceable_ch =
nvgpu_channel_resume_all_serviceable_ch,
.set_error_notifier = nvgpu_set_error_notifier,
}, },
.tsg = { .tsg = {
.enable = vgpu_tsg_enable, .enable = vgpu_tsg_enable,

View File

@@ -778,30 +778,30 @@ int vgpu_gr_isr(struct gk20a *g, struct tegra_vgpu_gr_intr_info *info)
nvgpu_cond_broadcast_interruptible(&ch->semaphore_wq); nvgpu_cond_broadcast_interruptible(&ch->semaphore_wq);
break; break;
case TEGRA_VGPU_GR_INTR_SEMAPHORE_TIMEOUT: case TEGRA_VGPU_GR_INTR_SEMAPHORE_TIMEOUT:
g->ops.fifo.set_error_notifier(ch, g->ops.channel.set_error_notifier(ch,
NVGPU_ERR_NOTIFIER_GR_SEMAPHORE_TIMEOUT); NVGPU_ERR_NOTIFIER_GR_SEMAPHORE_TIMEOUT);
break; break;
case TEGRA_VGPU_GR_INTR_ILLEGAL_NOTIFY: case TEGRA_VGPU_GR_INTR_ILLEGAL_NOTIFY:
g->ops.fifo.set_error_notifier(ch, g->ops.channel.set_error_notifier(ch,
NVGPU_ERR_NOTIFIER_GR_ILLEGAL_NOTIFY); NVGPU_ERR_NOTIFIER_GR_ILLEGAL_NOTIFY);
case TEGRA_VGPU_GR_INTR_ILLEGAL_METHOD: case TEGRA_VGPU_GR_INTR_ILLEGAL_METHOD:
break; break;
case TEGRA_VGPU_GR_INTR_ILLEGAL_CLASS: case TEGRA_VGPU_GR_INTR_ILLEGAL_CLASS:
g->ops.fifo.set_error_notifier(ch, g->ops.channel.set_error_notifier(ch,
NVGPU_ERR_NOTIFIER_GR_ERROR_SW_NOTIFY); NVGPU_ERR_NOTIFIER_GR_ERROR_SW_NOTIFY);
break; break;
case TEGRA_VGPU_GR_INTR_FECS_ERROR: case TEGRA_VGPU_GR_INTR_FECS_ERROR:
break; break;
case TEGRA_VGPU_GR_INTR_CLASS_ERROR: case TEGRA_VGPU_GR_INTR_CLASS_ERROR:
g->ops.fifo.set_error_notifier(ch, g->ops.channel.set_error_notifier(ch,
NVGPU_ERR_NOTIFIER_GR_ERROR_SW_NOTIFY); NVGPU_ERR_NOTIFIER_GR_ERROR_SW_NOTIFY);
break; break;
case TEGRA_VGPU_GR_INTR_FIRMWARE_METHOD: case TEGRA_VGPU_GR_INTR_FIRMWARE_METHOD:
g->ops.fifo.set_error_notifier(ch, g->ops.channel.set_error_notifier(ch,
NVGPU_ERR_NOTIFIER_GR_ERROR_SW_NOTIFY); NVGPU_ERR_NOTIFIER_GR_ERROR_SW_NOTIFY);
break; break;
case TEGRA_VGPU_GR_INTR_EXCEPTION: case TEGRA_VGPU_GR_INTR_EXCEPTION:
g->ops.fifo.set_error_notifier(ch, g->ops.channel.set_error_notifier(ch,
NVGPU_ERR_NOTIFIER_GR_ERROR_SW_NOTIFY); NVGPU_ERR_NOTIFIER_GR_ERROR_SW_NOTIFY);
break; break;
#ifdef NVGPU_DEBUGGER #ifdef NVGPU_DEBUGGER

View File

@@ -514,13 +514,8 @@ static const struct gpu_ops vgpu_gv11b_ops = {
.tsg_bind_channel = vgpu_gv11b_tsg_bind_channel, .tsg_bind_channel = vgpu_gv11b_tsg_bind_channel,
.tsg_unbind_channel = vgpu_tsg_unbind_channel, .tsg_unbind_channel = vgpu_tsg_unbind_channel,
.post_event_id = gk20a_tsg_event_id_post_event, .post_event_id = gk20a_tsg_event_id_post_event,
.ch_abort_clean_up = gk20a_channel_abort_clean_up,
.channel_suspend = gk20a_channel_suspend,
.channel_resume = gk20a_channel_resume,
.set_error_notifier = nvgpu_set_error_notifier,
.setup_sw = vgpu_fifo_setup_sw, .setup_sw = vgpu_fifo_setup_sw,
.cleanup_sw = vgpu_fifo_cleanup_sw, .cleanup_sw = vgpu_fifo_cleanup_sw,
.free_channel_ctx_header = vgpu_channel_free_ctx_header,
.ring_channel_doorbell = gv11b_ring_channel_doorbell, .ring_channel_doorbell = gv11b_ring_channel_doorbell,
.set_sm_exception_type_mask = vgpu_set_sm_exception_type_mask, .set_sm_exception_type_mask = vgpu_set_sm_exception_type_mask,
.usermode_base = gv11b_fifo_usermode_base, .usermode_base = gv11b_fifo_usermode_base,
@@ -628,6 +623,13 @@ static const struct gpu_ops vgpu_gv11b_ops = {
.enable = vgpu_channel_enable, .enable = vgpu_channel_enable,
.disable = vgpu_channel_disable, .disable = vgpu_channel_disable,
.count = vgpu_channel_count, .count = vgpu_channel_count,
.free_ctx_header = vgpu_channel_free_ctx_header,
.abort_clean_up = nvgpu_channel_abort_clean_up,
.suspend_all_serviceable_ch =
nvgpu_channel_suspend_all_serviceable_ch,
.resume_all_serviceable_ch =
nvgpu_channel_resume_all_serviceable_ch,
.set_error_notifier = nvgpu_set_error_notifier,
}, },
.tsg = { .tsg = {
.enable = gv11b_tsg_enable, .enable = gv11b_tsg_enable,

View File

@@ -188,7 +188,7 @@ int vgpu_finalize_poweron_common(struct gk20a *g)
g->ops.chip_init_gpu_characteristics(g); g->ops.chip_init_gpu_characteristics(g);
g->ops.fifo.channel_resume(g); g->ops.channel.resume_all_serviceable_ch(g);
return 0; return 0;
} }

View File

@@ -204,8 +204,8 @@ void gk20a_fifo_abort_tsg(struct gk20a *g, struct tsg_gk20a *tsg, bool preempt)
nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) { nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
if (gk20a_channel_get(ch) != NULL) { if (gk20a_channel_get(ch) != NULL) {
gk20a_channel_set_unserviceable(ch); gk20a_channel_set_unserviceable(ch);
if (ch->g->ops.fifo.ch_abort_clean_up != NULL) { if (ch->g->ops.channel.abort_clean_up != NULL) {
ch->g->ops.fifo.ch_abort_clean_up(ch); ch->g->ops.channel.abort_clean_up(ch);
} }
gk20a_channel_put(ch); gk20a_channel_put(ch);
} }
@@ -649,7 +649,7 @@ int gk20a_fifo_force_reset_ch(struct channel_gk20a *ch,
nvgpu_list_for_each_entry(ch_tsg, &tsg->ch_list, nvgpu_list_for_each_entry(ch_tsg, &tsg->ch_list,
channel_gk20a, ch_entry) { channel_gk20a, ch_entry) {
if (gk20a_channel_get(ch_tsg) != NULL) { if (gk20a_channel_get(ch_tsg) != NULL) {
g->ops.fifo.set_error_notifier(ch_tsg, g->ops.channel.set_error_notifier(ch_tsg,
err_code); err_code);
gk20a_channel_put(ch_tsg); gk20a_channel_put(ch_tsg);
} }
@@ -747,8 +747,8 @@ int gk20a_fifo_tsg_unbind_channel(struct channel_gk20a *ch)
g->ops.tsg.enable(tsg); g->ops.tsg.enable(tsg);
} }
if (ch->g->ops.fifo.ch_abort_clean_up != NULL) { if (ch->g->ops.channel.abort_clean_up != NULL) {
ch->g->ops.fifo.ch_abort_clean_up(ch); ch->g->ops.channel.abort_clean_up(ch);
} }
return 0; return 0;
@@ -881,7 +881,7 @@ void gk20a_fifo_preempt_timeout_rc_tsg(struct gk20a *g, struct tsg_gk20a *tsg)
if (gk20a_channel_get(ch) == NULL) { if (gk20a_channel_get(ch) == NULL) {
continue; continue;
} }
g->ops.fifo.set_error_notifier(ch, g->ops.channel.set_error_notifier(ch,
NVGPU_ERR_NOTIFIER_FIFO_ERROR_IDLE_TIMEOUT); NVGPU_ERR_NOTIFIER_FIFO_ERROR_IDLE_TIMEOUT);
gk20a_channel_put(ch); gk20a_channel_put(ch);
} }
@@ -893,7 +893,7 @@ void gk20a_fifo_preempt_timeout_rc(struct gk20a *g, struct channel_gk20a *ch)
{ {
nvgpu_err(g, "preempt channel %d timeout", ch->chid); nvgpu_err(g, "preempt channel %d timeout", ch->chid);
g->ops.fifo.set_error_notifier(ch, g->ops.channel.set_error_notifier(ch,
NVGPU_ERR_NOTIFIER_FIFO_ERROR_IDLE_TIMEOUT); NVGPU_ERR_NOTIFIER_FIFO_ERROR_IDLE_TIMEOUT);
nvgpu_channel_recover(g, ch, true, RC_TYPE_PREEMPT_TIMEOUT); nvgpu_channel_recover(g, ch, true, RC_TYPE_PREEMPT_TIMEOUT);
} }

View File

@@ -638,10 +638,6 @@ static const struct gpu_ops gm20b_ops = {
.tsg_bind_channel = gk20a_tsg_bind_channel, .tsg_bind_channel = gk20a_tsg_bind_channel,
.tsg_unbind_channel = gk20a_fifo_tsg_unbind_channel, .tsg_unbind_channel = gk20a_fifo_tsg_unbind_channel,
.post_event_id = gk20a_tsg_event_id_post_event, .post_event_id = gk20a_tsg_event_id_post_event,
.ch_abort_clean_up = gk20a_channel_abort_clean_up,
.channel_suspend = gk20a_channel_suspend,
.channel_resume = gk20a_channel_resume,
.set_error_notifier = nvgpu_set_error_notifier,
.setup_sw = nvgpu_fifo_setup_sw, .setup_sw = nvgpu_fifo_setup_sw,
.cleanup_sw = nvgpu_fifo_cleanup_sw, .cleanup_sw = nvgpu_fifo_cleanup_sw,
.set_sm_exception_type_mask = gk20a_tsg_set_sm_exception_type_mask, .set_sm_exception_type_mask = gk20a_tsg_set_sm_exception_type_mask,
@@ -759,6 +755,12 @@ static const struct gpu_ops gm20b_ops = {
.count = gm20b_channel_count, .count = gm20b_channel_count,
.read_state = gk20a_channel_read_state, .read_state = gk20a_channel_read_state,
.force_ctx_reload = gm20b_channel_force_ctx_reload, .force_ctx_reload = gm20b_channel_force_ctx_reload,
.abort_clean_up = nvgpu_channel_abort_clean_up,
.suspend_all_serviceable_ch =
nvgpu_channel_suspend_all_serviceable_ch,
.resume_all_serviceable_ch =
nvgpu_channel_resume_all_serviceable_ch,
.set_error_notifier = nvgpu_set_error_notifier,
}, },
.tsg = { .tsg = {
.enable = gk20a_tsg_enable, .enable = gk20a_tsg_enable,

View File

@@ -726,10 +726,6 @@ static const struct gpu_ops gp10b_ops = {
.tsg_bind_channel = gk20a_tsg_bind_channel, .tsg_bind_channel = gk20a_tsg_bind_channel,
.tsg_unbind_channel = gk20a_fifo_tsg_unbind_channel, .tsg_unbind_channel = gk20a_fifo_tsg_unbind_channel,
.post_event_id = gk20a_tsg_event_id_post_event, .post_event_id = gk20a_tsg_event_id_post_event,
.ch_abort_clean_up = gk20a_channel_abort_clean_up,
.channel_suspend = gk20a_channel_suspend,
.channel_resume = gk20a_channel_resume,
.set_error_notifier = nvgpu_set_error_notifier,
.setup_sw = nvgpu_fifo_setup_sw, .setup_sw = nvgpu_fifo_setup_sw,
.cleanup_sw = nvgpu_fifo_cleanup_sw, .cleanup_sw = nvgpu_fifo_cleanup_sw,
.set_sm_exception_type_mask = gk20a_tsg_set_sm_exception_type_mask, .set_sm_exception_type_mask = gk20a_tsg_set_sm_exception_type_mask,
@@ -850,6 +846,12 @@ static const struct gpu_ops gp10b_ops = {
.read_state = gk20a_channel_read_state, .read_state = gk20a_channel_read_state,
.force_ctx_reload = gm20b_channel_force_ctx_reload, .force_ctx_reload = gm20b_channel_force_ctx_reload,
.set_syncpt = nvgpu_channel_set_syncpt, .set_syncpt = nvgpu_channel_set_syncpt,
.abort_clean_up = nvgpu_channel_abort_clean_up,
.suspend_all_serviceable_ch =
nvgpu_channel_suspend_all_serviceable_ch,
.resume_all_serviceable_ch =
nvgpu_channel_resume_all_serviceable_ch,
.set_error_notifier = nvgpu_set_error_notifier,
}, },
.tsg = { .tsg = {
.enable = gk20a_tsg_enable, .enable = gk20a_tsg_enable,

View File

@@ -907,13 +907,8 @@ static const struct gpu_ops gv100_ops = {
.tsg_bind_channel = gk20a_tsg_bind_channel, .tsg_bind_channel = gk20a_tsg_bind_channel,
.tsg_unbind_channel = gk20a_fifo_tsg_unbind_channel, .tsg_unbind_channel = gk20a_fifo_tsg_unbind_channel,
.post_event_id = gk20a_tsg_event_id_post_event, .post_event_id = gk20a_tsg_event_id_post_event,
.ch_abort_clean_up = gk20a_channel_abort_clean_up,
.channel_suspend = gk20a_channel_suspend,
.channel_resume = gk20a_channel_resume,
.set_error_notifier = nvgpu_set_error_notifier_if_empty,
.setup_sw = nvgpu_fifo_setup_sw, .setup_sw = nvgpu_fifo_setup_sw,
.cleanup_sw = nvgpu_fifo_cleanup_sw, .cleanup_sw = nvgpu_fifo_cleanup_sw,
.free_channel_ctx_header = gv11b_free_subctx_header,
.ring_channel_doorbell = gv11b_ring_channel_doorbell, .ring_channel_doorbell = gv11b_ring_channel_doorbell,
.set_sm_exception_type_mask = gk20a_tsg_set_sm_exception_type_mask, .set_sm_exception_type_mask = gk20a_tsg_set_sm_exception_type_mask,
.usermode_base = gv11b_fifo_usermode_base, .usermode_base = gv11b_fifo_usermode_base,
@@ -1031,6 +1026,13 @@ static const struct gpu_ops gv100_ops = {
.count = gv100_channel_count, .count = gv100_channel_count,
.read_state = gv11b_channel_read_state, .read_state = gv11b_channel_read_state,
.force_ctx_reload = gm20b_channel_force_ctx_reload, .force_ctx_reload = gm20b_channel_force_ctx_reload,
.free_ctx_header = gv11b_channel_free_subctx_header,
.abort_clean_up = nvgpu_channel_abort_clean_up,
.suspend_all_serviceable_ch =
nvgpu_channel_suspend_all_serviceable_ch,
.resume_all_serviceable_ch =
nvgpu_channel_resume_all_serviceable_ch,
.set_error_notifier = nvgpu_set_error_notifier_if_empty,
.reset_faulted = gv11b_channel_reset_faulted, .reset_faulted = gv11b_channel_reset_faulted,
}, },
.tsg = { .tsg = {

View File

@@ -1375,7 +1375,6 @@ static int gr_gv11b_handle_all_warp_esr_errors(struct gk20a *g,
struct channel_gk20a *fault_ch) struct channel_gk20a *fault_ch)
{ {
struct tsg_gk20a *tsg; struct tsg_gk20a *tsg;
struct channel_gk20a *ch_tsg;
u32 offset = 0U; u32 offset = 0U;
bool is_esr_error = false; bool is_esr_error = false;
@@ -1407,16 +1406,8 @@ static int gr_gv11b_handle_all_warp_esr_errors(struct gk20a *g,
return 0; return 0;
} }
nvgpu_rwsem_down_read(&tsg->ch_list_lock); nvgpu_tsg_set_error_notifier(g, tsg,
nvgpu_list_for_each_entry(ch_tsg, &tsg->ch_list,
channel_gk20a, ch_entry) {
if (gk20a_channel_get(ch_tsg) != NULL) {
g->ops.fifo.set_error_notifier(ch_tsg,
NVGPU_ERR_NOTIFIER_GR_EXCEPTION); NVGPU_ERR_NOTIFIER_GR_EXCEPTION);
gk20a_channel_put(ch_tsg);
}
}
nvgpu_rwsem_up_read(&tsg->ch_list_lock);
} }
/* clear interrupt */ /* clear interrupt */

View File

@@ -864,13 +864,8 @@ static const struct gpu_ops gv11b_ops = {
.tsg_bind_channel = gk20a_tsg_bind_channel, .tsg_bind_channel = gk20a_tsg_bind_channel,
.tsg_unbind_channel = gk20a_fifo_tsg_unbind_channel, .tsg_unbind_channel = gk20a_fifo_tsg_unbind_channel,
.post_event_id = gk20a_tsg_event_id_post_event, .post_event_id = gk20a_tsg_event_id_post_event,
.ch_abort_clean_up = gk20a_channel_abort_clean_up,
.channel_suspend = gk20a_channel_suspend,
.channel_resume = gk20a_channel_resume,
.set_error_notifier = nvgpu_set_error_notifier_if_empty,
.setup_sw = nvgpu_fifo_setup_sw, .setup_sw = nvgpu_fifo_setup_sw,
.cleanup_sw = nvgpu_fifo_cleanup_sw, .cleanup_sw = nvgpu_fifo_cleanup_sw,
.free_channel_ctx_header = gv11b_free_subctx_header,
.ring_channel_doorbell = gv11b_ring_channel_doorbell, .ring_channel_doorbell = gv11b_ring_channel_doorbell,
.set_sm_exception_type_mask = gk20a_tsg_set_sm_exception_type_mask, .set_sm_exception_type_mask = gk20a_tsg_set_sm_exception_type_mask,
.usermode_base = gv11b_fifo_usermode_base, .usermode_base = gv11b_fifo_usermode_base,
@@ -990,6 +985,13 @@ static const struct gpu_ops gv11b_ops = {
.count = gv11b_channel_count, .count = gv11b_channel_count,
.read_state = gv11b_channel_read_state, .read_state = gv11b_channel_read_state,
.force_ctx_reload = gm20b_channel_force_ctx_reload, .force_ctx_reload = gm20b_channel_force_ctx_reload,
.free_ctx_header = gv11b_channel_free_subctx_header,
.abort_clean_up = nvgpu_channel_abort_clean_up,
.suspend_all_serviceable_ch =
nvgpu_channel_suspend_all_serviceable_ch,
.resume_all_serviceable_ch =
nvgpu_channel_resume_all_serviceable_ch,
.set_error_notifier = nvgpu_set_error_notifier_if_empty,
.reset_faulted = gv11b_channel_reset_faulted, .reset_faulted = gv11b_channel_reset_faulted,
}, },
.tsg = { .tsg = {

View File

@@ -31,18 +31,18 @@
#include "userd_gv11b.h" #include "userd_gv11b.h"
u32 gv11b_userd_gp_get(struct gk20a *g, struct channel_gk20a *c) u32 gv11b_userd_gp_get(struct gk20a *g, struct channel_gk20a *ch)
{ {
struct nvgpu_mem *mem = c->userd_mem; struct nvgpu_mem *mem = ch->userd_mem;
u32 offset = c->userd_offset / U32(sizeof(u32)); u32 offset = ch->userd_offset / U32(sizeof(u32));
return nvgpu_mem_rd32(g, mem, offset + ram_userd_gp_get_w()); return nvgpu_mem_rd32(g, mem, offset + ram_userd_gp_get_w());
} }
u64 gv11b_userd_pb_get(struct gk20a *g, struct channel_gk20a *c) u64 gv11b_userd_pb_get(struct gk20a *g, struct channel_gk20a *ch)
{ {
struct nvgpu_mem *mem = c->userd_mem; struct nvgpu_mem *mem = ch->userd_mem;
u32 offset = c->userd_offset / U32(sizeof(u32)); u32 offset = ch->userd_offset / U32(sizeof(u32));
u32 lo, hi; u32 lo, hi;
lo = nvgpu_mem_rd32(g, mem, offset + ram_userd_get_w()); lo = nvgpu_mem_rd32(g, mem, offset + ram_userd_get_w());
@@ -51,14 +51,14 @@ u64 gv11b_userd_pb_get(struct gk20a *g, struct channel_gk20a *c)
return ((u64)hi << 32) | lo; return ((u64)hi << 32) | lo;
} }
void gv11b_userd_gp_put(struct gk20a *g, struct channel_gk20a *c) void gv11b_userd_gp_put(struct gk20a *g, struct channel_gk20a *ch)
{ {
struct nvgpu_mem *mem = c->userd_mem; struct nvgpu_mem *mem = ch->userd_mem;
u32 offset = c->userd_offset / U32(sizeof(u32)); u32 offset = ch->userd_offset / U32(sizeof(u32));
nvgpu_mem_wr32(g, mem, offset + ram_userd_gp_put_w(), c->gpfifo.put); nvgpu_mem_wr32(g, mem, offset + ram_userd_gp_put_w(), ch->gpfifo.put);
/* Commit everything to GPU. */ /* Commit everything to GPU. */
nvgpu_mb(); nvgpu_mb();
g->ops.fifo.ring_channel_doorbell(c); g->ops.fifo.ring_channel_doorbell(ch);
} }

View File

@@ -26,8 +26,8 @@
struct gk20a; struct gk20a;
struct channel_gk20a; struct channel_gk20a;
u32 gv11b_userd_gp_get(struct gk20a *g, struct channel_gk20a *c); u32 gv11b_userd_gp_get(struct gk20a *g, struct channel_gk20a *ch);
u64 gv11b_userd_pb_get(struct gk20a *g, struct channel_gk20a *c); u64 gv11b_userd_pb_get(struct gk20a *g, struct channel_gk20a *ch);
void gv11b_userd_gp_put(struct gk20a *g, struct channel_gk20a *c); void gv11b_userd_gp_put(struct gk20a *g, struct channel_gk20a *ch);
#endif /* USERD_GV11B_H */ #endif /* USERD_GV11B_H */

View File

@@ -381,7 +381,7 @@ void nvgpu_channel_recover(struct gk20a *g, struct channel_gk20a *ch,
void gk20a_disable_channel(struct channel_gk20a *ch); void gk20a_disable_channel(struct channel_gk20a *ch);
void gk20a_channel_abort(struct channel_gk20a *ch, bool channel_preempt); void gk20a_channel_abort(struct channel_gk20a *ch, bool channel_preempt);
void gk20a_channel_abort_clean_up(struct channel_gk20a *ch); void nvgpu_channel_abort_clean_up(struct channel_gk20a *ch);
void gk20a_channel_semaphore_wakeup(struct gk20a *g, bool post_events); void gk20a_channel_semaphore_wakeup(struct gk20a *g, bool post_events);
int gk20a_channel_alloc_priv_cmdbuf(struct channel_gk20a *c, u32 orig_size, int gk20a_channel_alloc_priv_cmdbuf(struct channel_gk20a *c, u32 orig_size,
struct priv_cmd_entry *e); struct priv_cmd_entry *e);
@@ -390,8 +390,8 @@ int gk20a_free_priv_cmdbuf(struct channel_gk20a *c, struct priv_cmd_entry *e);
int gk20a_enable_channel_tsg(struct gk20a *g, struct channel_gk20a *ch); int gk20a_enable_channel_tsg(struct gk20a *g, struct channel_gk20a *ch);
int gk20a_disable_channel_tsg(struct gk20a *g, struct channel_gk20a *ch); int gk20a_disable_channel_tsg(struct gk20a *g, struct channel_gk20a *ch);
int gk20a_channel_suspend(struct gk20a *g); int nvgpu_channel_suspend_all_serviceable_ch(struct gk20a *g);
int gk20a_channel_resume(struct gk20a *g); int nvgpu_channel_resume_all_serviceable_ch(struct gk20a *g);
void gk20a_channel_deterministic_idle(struct gk20a *g); void gk20a_channel_deterministic_idle(struct gk20a *g);
void gk20a_channel_deterministic_unidle(struct gk20a *g); void gk20a_channel_deterministic_unidle(struct gk20a *g);

View File

@@ -960,7 +960,9 @@ struct gpu_ops {
int (*tsg_open)(struct tsg_gk20a *tsg); int (*tsg_open)(struct tsg_gk20a *tsg);
void (*tsg_release)(struct tsg_gk20a *tsg); void (*tsg_release)(struct tsg_gk20a *tsg);
int (*init_pbdma_info)(struct fifo_gk20a *f); int (*init_pbdma_info)(struct fifo_gk20a *f);
void (*free_channel_ctx_header)(struct channel_gk20a *ch); int (*init_engine_info)(struct fifo_gk20a *f);
u32 (*get_engines_mask_on_id)(struct gk20a *g,
u32 id, bool is_tsg);
void (*dump_channel_status_ramfc)(struct gk20a *g, void (*dump_channel_status_ramfc)(struct gk20a *g,
struct gk20a_debug_output *o, struct gk20a_debug_output *o,
struct nvgpu_channel_dump_info *info); struct nvgpu_channel_dump_info *info);
@@ -978,10 +980,6 @@ struct gpu_ops {
struct tsg_gk20a *tsg); struct tsg_gk20a *tsg);
u32 (*get_preempt_timeout)(struct gk20a *g); u32 (*get_preempt_timeout)(struct gk20a *g);
void (*post_event_id)(struct tsg_gk20a *tsg, int event_id); void (*post_event_id)(struct tsg_gk20a *tsg, int event_id);
void (*ch_abort_clean_up)(struct channel_gk20a *ch);
int (*channel_suspend)(struct gk20a *g);
int (*channel_resume)(struct gk20a *g);
void (*set_error_notifier)(struct channel_gk20a *ch, u32 error);
void (*ring_channel_doorbell)(struct channel_gk20a *c); void (*ring_channel_doorbell)(struct channel_gk20a *c);
u64 (*usermode_base)(struct gk20a *g); u64 (*usermode_base)(struct gk20a *g);
u32 (*doorbell_token)(struct channel_gk20a *c); u32 (*doorbell_token)(struct channel_gk20a *c);
@@ -1142,6 +1140,11 @@ struct gpu_ops {
void (*read_state)(struct gk20a *g, struct channel_gk20a *ch, void (*read_state)(struct gk20a *g, struct channel_gk20a *ch,
struct nvgpu_channel_hw_state *state); struct nvgpu_channel_hw_state *state);
void (*force_ctx_reload)(struct channel_gk20a *ch); void (*force_ctx_reload)(struct channel_gk20a *ch);
void (*free_ctx_header)(struct channel_gk20a *ch);
void (*abort_clean_up)(struct channel_gk20a *ch);
int (*suspend_all_serviceable_ch)(struct gk20a *g);
int (*resume_all_serviceable_ch)(struct gk20a *g);
void (*set_error_notifier)(struct channel_gk20a *ch, u32 error);
void (*reset_faulted)(struct gk20a *g, struct channel_gk20a *ch, void (*reset_faulted)(struct gk20a *g, struct channel_gk20a *ch,
bool eng, bool pbdma); bool eng, bool pbdma);
int (*set_syncpt)(struct channel_gk20a *ch); int (*set_syncpt)(struct channel_gk20a *ch);

View File

@@ -166,10 +166,13 @@ int vgpu_pm_prepare_poweroff(struct device *dev)
if (!g->power_on) if (!g->power_on)
goto done; goto done;
if (g->ops.fifo.channel_suspend) if (g->ops.channel.suspend_all_serviceable_ch != NULL) {
ret = g->ops.fifo.channel_suspend(g); ret = g->ops.channel.suspend_all_serviceable_ch(g);
if (ret) }
if (ret != 0) {
goto done; goto done;
}
g->power_on = false; g->power_on = false;
done: done:

View File

@@ -29,7 +29,7 @@ struct gk20a;
struct channel_gk20a; struct channel_gk20a;
int tu104_init_fifo_setup_hw(struct gk20a *g); int tu104_init_fifo_setup_hw(struct gk20a *g);
void tu104_ring_channel_doorbell(struct channel_gk20a *c); void tu104_ring_channel_doorbell(struct channel_gk20a *ch);
u64 tu104_fifo_usermode_base(struct gk20a *g); u64 tu104_fifo_usermode_base(struct gk20a *g);
u32 tu104_fifo_doorbell_token(struct channel_gk20a *c); u32 tu104_fifo_doorbell_token(struct channel_gk20a *c);

View File

@@ -942,13 +942,8 @@ static const struct gpu_ops tu104_ops = {
.tsg_bind_channel = gk20a_tsg_bind_channel, .tsg_bind_channel = gk20a_tsg_bind_channel,
.tsg_unbind_channel = gk20a_fifo_tsg_unbind_channel, .tsg_unbind_channel = gk20a_fifo_tsg_unbind_channel,
.post_event_id = gk20a_tsg_event_id_post_event, .post_event_id = gk20a_tsg_event_id_post_event,
.ch_abort_clean_up = gk20a_channel_abort_clean_up,
.channel_suspend = gk20a_channel_suspend,
.channel_resume = gk20a_channel_resume,
.set_error_notifier = nvgpu_set_error_notifier_if_empty,
.setup_sw = nvgpu_fifo_setup_sw, .setup_sw = nvgpu_fifo_setup_sw,
.cleanup_sw = nvgpu_fifo_cleanup_sw, .cleanup_sw = nvgpu_fifo_cleanup_sw,
.free_channel_ctx_header = gv11b_free_subctx_header,
.ring_channel_doorbell = tu104_ring_channel_doorbell, .ring_channel_doorbell = tu104_ring_channel_doorbell,
.usermode_base = tu104_fifo_usermode_base, .usermode_base = tu104_fifo_usermode_base,
.doorbell_token = tu104_fifo_doorbell_token, .doorbell_token = tu104_fifo_doorbell_token,
@@ -1068,6 +1063,13 @@ static const struct gpu_ops tu104_ops = {
.count = gv100_channel_count, .count = gv100_channel_count,
.read_state = gv11b_channel_read_state, .read_state = gv11b_channel_read_state,
.force_ctx_reload = gm20b_channel_force_ctx_reload, .force_ctx_reload = gm20b_channel_force_ctx_reload,
.free_ctx_header = gv11b_channel_free_subctx_header,
.abort_clean_up = nvgpu_channel_abort_clean_up,
.suspend_all_serviceable_ch =
nvgpu_channel_suspend_all_serviceable_ch,
.resume_all_serviceable_ch =
nvgpu_channel_resume_all_serviceable_ch,
.set_error_notifier = nvgpu_set_error_notifier_if_empty,
.reset_faulted = gv11b_channel_reset_faulted, .reset_faulted = gv11b_channel_reset_faulted,
}, },
.tsg = { .tsg = {