gpu: nvgpu: fix MISRA 17.7 in nvgpu.common.hal.fifo.*

MISRA Rule-17.7 requires the return value of all functions to be
used. Fix is either to use the return value or change the function
to return void. This patch contains fixes for all 17.7 violations
in the following units:
- nvgpu.common.hal.fifo.runlist
- nvgpu.common.hal.fifo.fifo

JIRA NVGPU-3039

Change-Id: I9483f5cb623cfe36d6b26e41c33f124c24710c08
Signed-off-by: Nicolas Benech <nbenech@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2098765
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Nicolas Benech
2019-04-16 10:53:29 -04:00
committed by mobile promotions
parent 9449396ffc
commit 0435ca4eb3
4 changed files with 44 additions and 7 deletions

View File

@@ -824,6 +824,11 @@ void nvgpu_tsg_abort(struct gk20a *g, struct tsg_gk20a *tsg, bool preempt)
g->ops.tsg.disable(tsg);
if (preempt) {
/*
* Ignore the return value below. If preempt fails, preempt_tsg
* operation will print the error and ctxsw timeout may trigger
* a recovery if needed.
*/
(void)g->ops.fifo.preempt_tsg(g, tsg);
}

View File

@@ -148,10 +148,16 @@ int gk20a_fifo_is_preempt_pending(struct gk20a *g, u32 id,
{
struct nvgpu_timeout timeout;
u32 delay = POLL_DELAY_MIN_US;
int ret = -EBUSY;
int ret = 0;
nvgpu_timeout_init(g, &timeout, gk20a_fifo_get_preempt_timeout(g),
ret = nvgpu_timeout_init(g, &timeout, gk20a_fifo_get_preempt_timeout(g),
NVGPU_TIMER_CPU_TIMER);
if (ret != 0) {
nvgpu_err(g, "nvgpu_timeout_init failed err=%d ", ret);
return ret;
}
ret = -EBUSY;
do {
if ((gk20a_readl(g, fifo_preempt_r()) &
fifo_preempt_pending_true_f()) == 0U) {
@@ -193,6 +199,7 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, struct channel_gk20a *ch)
int ret = 0;
u32 token = PMU_INVALID_MUTEX_OWNER_ID;
int mutex_ret = 0;
int err = 0;
nvgpu_log_fn(g, "chid: %d", ch->chid);
@@ -205,7 +212,12 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, struct channel_gk20a *ch)
ret = __locked_fifo_preempt(g, ch->chid, false);
if (mutex_ret == 0) {
nvgpu_pmu_lock_release(g, &g->pmu, PMU_MUTEX_ID_FIFO, &token);
err = nvgpu_pmu_lock_release(g, &g->pmu, PMU_MUTEX_ID_FIFO,
&token);
if (err != 0) {
nvgpu_err(g, "nvgpu_pmu_lock_release failed err=%d",
err);
}
}
nvgpu_fifo_unlock_active_runlists(g);
@@ -238,6 +250,7 @@ int gk20a_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg)
int ret = 0;
u32 token = PMU_INVALID_MUTEX_OWNER_ID;
int mutex_ret = 0;
int err = 0;
nvgpu_log_fn(g, "tsgid: %d", tsg->tsgid);
@@ -250,7 +263,12 @@ int gk20a_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg)
ret = __locked_fifo_preempt(g, tsg->tsgid, true);
if (mutex_ret == 0) {
nvgpu_pmu_lock_release(g, &g->pmu, PMU_MUTEX_ID_FIFO, &token);
err = nvgpu_pmu_lock_release(g, &g->pmu, PMU_MUTEX_ID_FIFO,
&token);
if (err != 0) {
nvgpu_err(g, "nvgpu_pmu_lock_release failed err=%d",
err);
}
}
nvgpu_fifo_unlock_active_runlists(g);

View File

@@ -95,7 +95,16 @@ int gk20a_fifo_reschedule_preempt_next(struct channel_gk20a *ch,
nvgpu_readl(g, fifo_preempt_r()));
#endif
if (wait_preempt) {
g->ops.fifo.is_preempt_pending(g, preempt_id, preempt_type);
if (g->ops.fifo.is_preempt_pending(g, preempt_id,
preempt_type) != 0) {
nvgpu_err(g, "fifo preempt timed out");
/*
* This function does not care if preempt
* times out since it is here only to improve
* latency. If a timeout happens, it will be
* handled by other fifo handling code.
*/
}
}
#ifdef TRACEPOINTS_ENABLED
trace_gk20a_reschedule_preempted_next(ch->chid);

View File

@@ -118,8 +118,13 @@ void gk20a_fifo_teardown_ch_tsg(struct gk20a *g, u32 eng_bitmask,
g->ops.fifo.intr_set_recover_mask(g);
g->ops.fifo.trigger_mmu_fault(g, engine_ids);
gk20a_fifo_handle_mmu_fault_locked(g, mmu_fault_engines, ref_id,
ref_id_is_tsg);
/*
* Ignore the "Verbose" flag from
* gk20a_fifo_handle_mmu_fault_locked since it is not needed
* here
*/
(void) gk20a_fifo_handle_mmu_fault_locked(g, mmu_fault_engines,
ref_id, ref_id_is_tsg);
g->ops.fifo.intr_unset_recover_mask(g);
}