gpu: nvgpu: channel MISRA fix for Rule 17.7

Check return value of below functions
gk20a_enable_channel_tsg
gk20a_disable_channel_tsg

Rename
gk20a_disable_channel_tsg -> nvgpu_channel_disable_tsg
gk20a_enable_channel_tsg -> nvgpu_channel_enable_tsg

JIRA NVGPU-3388

Change-Id: I0c18c4a14a872cecb12ae3089da886be9da43914
Signed-off-by: Seema Khowala <seemaj@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2115211
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Seema Khowala
2019-05-08 21:59:45 -07:00
committed by mobile promotions
parent 54e179ddad
commit 6f5cd4027c
5 changed files with 153 additions and 51 deletions

View File

@@ -141,7 +141,7 @@ int channel_gk20a_update_runlist(struct nvgpu_channel *c, bool add)
c, add, true);
}
int gk20a_enable_channel_tsg(struct gk20a *g, struct nvgpu_channel *ch)
int nvgpu_channel_enable_tsg(struct gk20a *g, struct nvgpu_channel *ch)
{
struct nvgpu_tsg *tsg;
@@ -150,11 +150,12 @@ int gk20a_enable_channel_tsg(struct gk20a *g, struct nvgpu_channel *ch)
g->ops.tsg.enable(tsg);
return 0;
} else {
nvgpu_err(ch->g, "chid: %d is not bound to tsg", ch->chid);
return -EINVAL;
}
}
int gk20a_disable_channel_tsg(struct gk20a *g, struct nvgpu_channel *ch)
int nvgpu_channel_disable_tsg(struct gk20a *g, struct nvgpu_channel *ch)
{
struct nvgpu_tsg *tsg;
@@ -163,6 +164,7 @@ int gk20a_disable_channel_tsg(struct gk20a *g, struct nvgpu_channel *ch)
g->ops.tsg.disable(tsg);
return 0;
} else {
nvgpu_err(ch->g, "chid: %d is not bound to tsg", ch->chid);
return -EINVAL;
}
}
@@ -1162,6 +1164,7 @@ int nvgpu_channel_set_syncpt(struct nvgpu_channel *ch)
struct nvgpu_channel_sync_syncpt *sync_syncpt;
u32 new_syncpt = 0U;
u32 old_syncpt = g->ops.ramfc.get_syncpt(ch);
int err = 0;
if (ch->sync != NULL) {
sync_syncpt = nvgpu_channel_sync_to_syncpt(ch->sync);
@@ -1170,25 +1173,43 @@ int nvgpu_channel_set_syncpt(struct nvgpu_channel *ch)
nvgpu_channel_sync_get_syncpt_id(sync_syncpt);
} else {
new_syncpt = NVGPU_INVALID_SYNCPT_ID;
/* ??? */
return -EINVAL;
}
} else {
return -EINVAL;
}
if ((new_syncpt != 0U) && (new_syncpt != old_syncpt)) {
/* disable channel */
gk20a_disable_channel_tsg(g, ch);
err = nvgpu_channel_disable_tsg(g, ch);
if (err != 0) {
nvgpu_err(g, "failed to disable channel/TSG");
return err;
}
/* preempt the channel */
nvgpu_assert(nvgpu_preempt_channel(g, ch) == 0);
err = nvgpu_preempt_channel(g, ch);
nvgpu_assert(err == 0);
if (err != 0 ) {
goto out;
}
/* no error at this point */
g->ops.ramfc.set_syncpt(ch, new_syncpt);
err = nvgpu_channel_enable_tsg(g, ch);
if (err != 0) {
nvgpu_err(g, "failed to enable channel/TSG");
}
}
/* enable channel */
gk20a_enable_channel_tsg(g, ch);
nvgpu_log_fn(g, "done");
return 0;
return err;
out:
if (nvgpu_channel_enable_tsg(g, ch) != 0) {
nvgpu_err(g, "failed to enable channel/TSG");
}
return err;
}
int nvgpu_channel_setup_bind(struct nvgpu_channel *c,
@@ -2405,7 +2426,9 @@ int nvgpu_channel_suspend_all_serviceable_ch(struct gk20a *g)
} else {
nvgpu_log_info(g, "suspend channel %d", chid);
/* disable channel */
gk20a_disable_channel_tsg(g, ch);
if (nvgpu_channel_disable_tsg(g, ch) != 0) {
nvgpu_err(g, "failed to disable channel/TSG");
}
/* preempt the channel */
nvgpu_assert(nvgpu_preempt_channel(g, ch) == 0);
/* wait for channel update notifiers */

View File

@@ -39,7 +39,7 @@ static int nvgpu_gr_setup_zcull(struct gk20a *g, struct nvgpu_channel *c,
nvgpu_log_fn(g, " ");
ret = gk20a_disable_channel_tsg(g, c);
ret = nvgpu_channel_disable_tsg(g, c);
if (ret != 0) {
nvgpu_err(g, "failed to disable channel/TSG");
return ret;
@@ -47,20 +47,31 @@ static int nvgpu_gr_setup_zcull(struct gk20a *g, struct nvgpu_channel *c,
ret = nvgpu_preempt_channel(g, c);
if (ret != 0) {
if (gk20a_enable_channel_tsg(g, c) != 0) {
nvgpu_err(g, "failed to re-enable channel/TSG");
}
nvgpu_err(g, "failed to preempt channel/TSG");
return ret;
goto out;
}
ret = nvgpu_gr_zcull_ctx_setup(g, c->subctx, gr_ctx);
if (ret != 0) {
nvgpu_err(g, "failed to setup zcull");
goto out;
}
/* no error at this point */
ret = nvgpu_channel_enable_tsg(g, c);
if (ret != 0) {
nvgpu_err(g, "failed to re-enable channel/TSG");
}
ret = gk20a_enable_channel_tsg(g, c);
if (ret != 0) {
return ret;
out:
/*
* control reaches here if preempt failed or nvgpu_gr_zcull_ctx_setup
* failed. Propagate preempt failure err or err for
* nvgpu_gr_zcull_ctx_setup
*/
if (nvgpu_channel_enable_tsg(g, c) != 0) {
/* ch might not be bound to tsg */
nvgpu_err(g, "failed to enable channel/TSG");
}
@@ -263,7 +274,7 @@ int nvgpu_gr_setup_set_preemption_mode(struct nvgpu_channel *ch,
return err;
}
err = gk20a_disable_channel_tsg(g, ch);
err = nvgpu_channel_disable_tsg(g, ch);
if (err != 0) {
nvgpu_err(g, "failed to disable channel/TSG");
return err;
@@ -287,7 +298,7 @@ int nvgpu_gr_setup_set_preemption_mode(struct nvgpu_channel *ch,
true);
nvgpu_gr_ctx_patch_write_end(g, gr_ctx, true);
err = gk20a_enable_channel_tsg(g, ch);
err = nvgpu_channel_enable_tsg(g, ch);
if (err != 0) {
nvgpu_err(g, "failed to re-enable channel/TSG");
}
@@ -295,7 +306,7 @@ int nvgpu_gr_setup_set_preemption_mode(struct nvgpu_channel *ch,
return err;
enable_ch:
if (gk20a_enable_channel_tsg(g, ch) != 0) {
if (nvgpu_channel_enable_tsg(g, ch) != 0) {
nvgpu_err(g, "failed to re-enable channel/TSG");
}
return err;

View File

@@ -66,22 +66,39 @@ int gr_gk20a_update_smpc_ctxsw_mode(struct gk20a *g,
return -EINVAL;
}
ret = gk20a_disable_channel_tsg(g, c);
ret = nvgpu_channel_disable_tsg(g, c);
if (ret != 0) {
/* ch might not be bound to tsg anymore */
nvgpu_err(g, "failed to disable channel/TSG");
goto out;
return ret;
}
ret = nvgpu_preempt_channel(g, c);
if (ret != 0) {
gk20a_enable_channel_tsg(g, c);
nvgpu_err(g, "failed to preempt channel/TSG");
goto out;
}
ret = nvgpu_gr_ctx_set_smpc_mode(g, tsg->gr_ctx, enable_smpc_ctxsw);
if (ret != 0) {
goto out;
}
/* no error at this point */
ret = nvgpu_channel_enable_tsg(g, c);
if (ret != 0) {
nvgpu_err(g, "failed to enable channel/TSG");
}
return ret;
out:
gk20a_enable_channel_tsg(g, c);
/*
* control reaches here if preempt failed or nvgpu_gr_ctx_set_smpc_mode
* failed. Propagate preempt failure err or err for
* nvgpu_gr_ctx_set_smpc_mode
*/
if (nvgpu_channel_enable_tsg(g, c) != 0) {
/* ch might not be bound to tsg anymore */
nvgpu_err(g, "failed to enable channel/TSG");
}
return ret;
}
@@ -132,37 +149,65 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g,
return 0;
}
ret = gk20a_disable_channel_tsg(g, c);
ret = nvgpu_channel_disable_tsg(g, c);
if (ret != 0) {
/* ch might not be bound to tsg anymore */
nvgpu_err(g, "failed to disable channel/TSG");
return ret;
}
ret = nvgpu_preempt_channel(g, c);
if (ret != 0) {
gk20a_enable_channel_tsg(g, c);
nvgpu_err(g, "failed to preempt channel/TSG");
return ret;
goto out;
}
if (c->subctx != NULL) {
struct nvgpu_channel *ch;
int err;
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
ret = nvgpu_gr_ctx_set_hwpm_mode(g, gr_ctx, false);
if (ret == 0) {
nvgpu_gr_subctx_set_hwpm_mode(g, ch->subctx,
gr_ctx);
err = nvgpu_gr_ctx_set_hwpm_mode(g, gr_ctx, false);
if (err != 0) {
nvgpu_err(g, "chid: %d set_hwpm_mode failed",
ch->chid);
ret = err;
continue;
}
nvgpu_gr_subctx_set_hwpm_mode(g, ch->subctx, gr_ctx);
}
nvgpu_rwsem_up_read(&tsg->ch_list_lock);
if (ret != 0) {
goto out;
}
} else {
ret = nvgpu_gr_ctx_set_hwpm_mode(g, gr_ctx, true);
if (ret != 0) {
goto out;
}
}
/* no error at this point */
ret = nvgpu_channel_enable_tsg(g, c);
if (ret != 0) {
nvgpu_err(g, "failed to enable channel/TSG");
}
return ret;
/* enable channel */
gk20a_enable_channel_tsg(g, c);
out:
/*
* control reaches here if preempt failed or
* set_hwpm_mode failed. Propagate preempt failure err or err for
* set_hwpm_mode
*/
if (nvgpu_channel_enable_tsg(g, c) != 0) {
/* ch might not be bound to tsg anymore */
nvgpu_err(g, "failed to enable channel/TSG");
}
return ret;
}
@@ -1979,7 +2024,10 @@ bool gr_gk20a_suspend_context(struct nvgpu_channel *ch)
g->ops.gr.suspend_all_sms(g, 0, false);
ctx_resident = true;
} else {
gk20a_disable_channel_tsg(g, ch);
if (nvgpu_channel_disable_tsg(g, ch) != 0) {
/* ch might not be bound to tsg anymore */
nvgpu_err(g, "failed to disable channel/TSG");
}
}
return ctx_resident;
@@ -1994,7 +2042,10 @@ bool gr_gk20a_resume_context(struct nvgpu_channel *ch)
g->ops.gr.resume_all_sms(g);
ctx_resident = true;
} else {
gk20a_enable_channel_tsg(g, ch);
if (nvgpu_channel_enable_tsg(g, ch) != 0) {
/* ch might not be bound to tsg anymore */
nvgpu_err(g, "failed to enable channel/TSG");
}
}
return ctx_resident;

View File

@@ -394,21 +394,20 @@ static int gr_gp10b_disable_channel_or_tsg(struct gk20a *g, struct nvgpu_channel
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, " ");
ret = gk20a_disable_channel_tsg(g, fault_ch);
ret = nvgpu_channel_disable_tsg(g, fault_ch);
if (ret != 0) {
nvgpu_err(g,
"CILP: failed to disable channel/TSG!");
nvgpu_err(g, "CILP: failed to disable channel/TSG!");
return ret;
}
ret = g->ops.runlist.reload(g, fault_ch->runlist_id, true, false);
if (ret != 0) {
nvgpu_err(g,
"CILP: failed to restart runlist 0!");
nvgpu_err(g, "CILP: failed to restart runlist 0!");
return ret;
}
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, "CILP: restarted runlist");
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr,
"CILP: restarted runlist");
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr,
"CILP: tsgid: 0x%x", tsg->tsgid);
@@ -659,7 +658,10 @@ bool gr_gp10b_suspend_context(struct nvgpu_channel *ch,
ctx_resident = true;
} else {
gk20a_disable_channel_tsg(g, ch);
if (nvgpu_channel_disable_tsg(g, ch) != 0) {
/* ch might not be bound to tsg anymore */
nvgpu_err(g, "failed to disable channel/TSG");
}
}
return ctx_resident;
@@ -771,14 +773,14 @@ int gr_gp10b_set_boosted_ctx(struct nvgpu_channel *ch,
nvgpu_gr_ctx_set_boosted_ctx(gr_ctx, boost);
mem = nvgpu_gr_ctx_get_ctx_mem(gr_ctx);
err = gk20a_disable_channel_tsg(g, ch);
err = nvgpu_channel_disable_tsg(g, ch);
if (err != 0) {
return err;
}
err = nvgpu_preempt_channel(g, ch);
if (err != 0) {
goto enable_ch;
goto out;
}
if (g->ops.gr.ctxsw_prog.set_pmu_options_boost_clock_frequencies !=
@@ -787,11 +789,26 @@ int gr_gp10b_set_boosted_ctx(struct nvgpu_channel *ch,
mem, nvgpu_gr_ctx_get_boosted_ctx(gr_ctx));
} else {
err = -ENOSYS;
goto out;
}
/* no error at this point */
err = nvgpu_channel_enable_tsg(g, ch);
if (err != 0) {
nvgpu_err(g, "failed to enable channel/TSG");
}
return err;
enable_ch:
gk20a_enable_channel_tsg(g, ch);
out:
/*
* control reaches here if preempt failed or
* set_pmu_options_boost_clock_frequencies fn pointer is NULL.
* Propagate preempt failure err or err for
* set_pmu_options_boost_clock_frequencies fn pointer being NULL
*/
if (nvgpu_channel_enable_tsg(g, ch) != 0) {
/* ch might not be bound to tsg anymore */
nvgpu_err(g, "failed to enable channel/TSG");
}
return err;
}

View File

@@ -430,8 +430,8 @@ int gk20a_channel_alloc_priv_cmdbuf(struct nvgpu_channel *c, u32 orig_size,
void nvgpu_channel_update_priv_cmd_q_and_free_entry(
struct nvgpu_channel *ch, struct priv_cmd_entry *e);
int gk20a_enable_channel_tsg(struct gk20a *g, struct nvgpu_channel *ch);
int gk20a_disable_channel_tsg(struct gk20a *g, struct nvgpu_channel *ch);
int nvgpu_channel_enable_tsg(struct gk20a *g, struct nvgpu_channel *ch);
int nvgpu_channel_disable_tsg(struct gk20a *g, struct nvgpu_channel *ch);
int nvgpu_channel_suspend_all_serviceable_ch(struct gk20a *g);
void nvgpu_channel_resume_all_serviceable_ch(struct gk20a *g);