gpu: nvgpu: verify channel status while closing per-platform

We right now call gk20a_fifo_tsg_unbind_channel_verify_status() to verify
channel status while unbinding a channel from TSG while closing

Add support to do this verification per-platform and keep this disabled
for vgpu platforms

Bug 200327095

Change-Id: I19fab41c74d10d528d22bd9b3982a4ed73c3b4ca
Signed-off-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1572368
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Deepak Nibade
2017-10-03 00:51:07 -07:00
committed by mobile promotions
parent e400475a91
commit 3cd0603c42
6 changed files with 11 additions and 4 deletions

View File

@@ -1908,7 +1908,7 @@ int gk20a_fifo_force_reset_ch(struct channel_gk20a *ch,
return 0;
}
static int gk20a_fifo_tsg_unbind_channel_verify_status(struct channel_gk20a *ch)
int gk20a_fifo_tsg_unbind_channel_verify_status(struct channel_gk20a *ch)
{
struct gk20a *g = ch->g;
@@ -1939,9 +1939,11 @@ int gk20a_fifo_tsg_unbind_channel(struct channel_gk20a *ch)
if (err)
goto fail_enable_tsg;
err = gk20a_fifo_tsg_unbind_channel_verify_status(ch);
if (err)
goto fail_enable_tsg;
if (g->ops.fifo.tsg_verify_channel_status) {
err = g->ops.fifo.tsg_verify_channel_status(ch);
if (err)
goto fail_enable_tsg;
}
/* Channel should be seen as TSG channel while updating runlist */
err = channel_gk20a_update_runlist(ch, false);

View File

@@ -373,6 +373,7 @@ void gk20a_fifo_disable_channel(struct channel_gk20a *ch);
bool gk20a_fifo_channel_status_is_next(struct gk20a *g, u32 chid);
bool gk20a_fifo_channel_status_is_ctx_reload(struct gk20a *g, u32 chid);
int gk20a_fifo_tsg_unbind_channel_verify_status(struct channel_gk20a *ch);
struct channel_gk20a *gk20a_refch_from_inst_ptr(struct gk20a *g, u64 inst_ptr);
void gk20a_fifo_channel_unbind(struct channel_gk20a *ch_gk20a);

View File

@@ -491,6 +491,7 @@ struct gpu_ops {
int (*preempt_tsg)(struct gk20a *g, u32 tsgid);
int (*enable_tsg)(struct tsg_gk20a *tsg);
int (*disable_tsg)(struct tsg_gk20a *tsg);
int (*tsg_verify_channel_status)(struct channel_gk20a *ch);
void (*tsg_verify_status_ctx_reload)(struct channel_gk20a *ch);
void (*tsg_verify_status_faulted)(struct channel_gk20a *ch);
int (*reschedule_runlist)(struct gk20a *g, u32 runlist_id);

View File

@@ -379,6 +379,7 @@ static const struct gpu_ops gm20b_ops = {
.preempt_tsg = gk20a_fifo_preempt_tsg,
.enable_tsg = gk20a_enable_tsg,
.disable_tsg = gk20a_disable_tsg,
.tsg_verify_channel_status = gk20a_fifo_tsg_unbind_channel_verify_status,
.tsg_verify_status_ctx_reload = gm20b_fifo_tsg_verify_status_ctx_reload,
.update_runlist = gk20a_fifo_update_runlist,
.trigger_mmu_fault = gm20b_fifo_trigger_mmu_fault,

View File

@@ -438,6 +438,7 @@ static const struct gpu_ops gp106_ops = {
.preempt_tsg = gk20a_fifo_preempt_tsg,
.enable_tsg = gk20a_enable_tsg,
.disable_tsg = gk20a_disable_tsg,
.tsg_verify_channel_status = gk20a_fifo_tsg_unbind_channel_verify_status,
.tsg_verify_status_ctx_reload = gm20b_fifo_tsg_verify_status_ctx_reload,
.update_runlist = gk20a_fifo_update_runlist,
.trigger_mmu_fault = gm20b_fifo_trigger_mmu_fault,

View File

@@ -400,6 +400,7 @@ static const struct gpu_ops gp10b_ops = {
.preempt_tsg = gk20a_fifo_preempt_tsg,
.enable_tsg = gk20a_enable_tsg,
.disable_tsg = gk20a_disable_tsg,
.tsg_verify_channel_status = gk20a_fifo_tsg_unbind_channel_verify_status,
.tsg_verify_status_ctx_reload = gm20b_fifo_tsg_verify_status_ctx_reload,
.reschedule_runlist = gk20a_fifo_reschedule_runlist,
.update_runlist = gk20a_fifo_update_runlist,