diff --git a/drivers/gpu/nvgpu/common/fifo/runlist.c b/drivers/gpu/nvgpu/common/fifo/runlist.c index 1b994e5fb..ccb1ee11e 100644 --- a/drivers/gpu/nvgpu/common/fifo/runlist.c +++ b/drivers/gpu/nvgpu/common/fifo/runlist.c @@ -378,7 +378,7 @@ int nvgpu_fifo_reschedule_runlist(struct channel_gk20a *ch, bool preempt_next, struct gk20a *g = ch->g; struct fifo_runlist_info_gk20a *runlist; u32 token = PMU_INVALID_MUTEX_OWNER_ID; - int mutex_ret; + int mutex_ret = -EINVAL; int ret = 0; runlist = &g->fifo.runlist_info[ch->runlist_id]; @@ -386,8 +386,10 @@ int nvgpu_fifo_reschedule_runlist(struct channel_gk20a *ch, bool preempt_next, return -EBUSY; } - mutex_ret = nvgpu_pmu_mutex_acquire( - &g->pmu, PMU_MUTEX_ID_FIFO, &token); + if (g->ops.pmu.is_pmu_supported(g)) { + mutex_ret = nvgpu_pmu_mutex_acquire( + &g->pmu, PMU_MUTEX_ID_FIFO, &token); + } g->ops.fifo.runlist_hw_submit( g, ch->runlist_id, runlist->count, runlist->cur_buffer); @@ -427,7 +429,7 @@ int gk20a_fifo_update_runlist(struct gk20a *g, u32 runlist_id, u32 chid, struct fifo_runlist_info_gk20a *runlist = NULL; struct fifo_gk20a *f = &g->fifo; u32 token = PMU_INVALID_MUTEX_OWNER_ID; - int mutex_ret; + int mutex_ret = -EINVAL; int ret = 0; nvgpu_log_fn(g, " "); @@ -436,7 +438,10 @@ int gk20a_fifo_update_runlist(struct gk20a *g, u32 runlist_id, u32 chid, nvgpu_mutex_acquire(&runlist->runlist_lock); - mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); + if (g->ops.pmu.is_pmu_supported(g)) { + mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, + PMU_MUTEX_ID_FIFO, &token); + } ret = gk20a_fifo_update_runlist_locked(g, runlist_id, chid, add, wait_for_finish); @@ -511,12 +516,15 @@ void gk20a_fifo_set_runlist_state(struct gk20a *g, u32 runlists_mask, u32 runlist_state) { u32 token = PMU_INVALID_MUTEX_OWNER_ID; - int mutex_ret; + int mutex_ret = -EINVAL; nvgpu_log(g, gpu_dbg_info, "runlist mask = 0x%08x state = 0x%08x", runlists_mask, runlist_state); - mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); + if (g->ops.pmu.is_pmu_supported(g)) { + mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, + PMU_MUTEX_ID_FIFO, &token); + } g->ops.fifo.runlist_write_state(g, runlists_mask, runlist_state); diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c index ae8318b52..30fc9930a 100644 --- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c @@ -2461,7 +2461,7 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, struct channel_gk20a *ch) struct fifo_gk20a *f = &g->fifo; int ret = 0; u32 token = PMU_INVALID_MUTEX_OWNER_ID; - int mutex_ret = 0; + int mutex_ret = -EINVAL; u32 i; nvgpu_log_fn(g, "chid: %d", ch->chid); @@ -2471,7 +2471,10 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, struct channel_gk20a *ch) nvgpu_mutex_acquire(&f->runlist_info[i].runlist_lock); } - mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); + if (g->ops.pmu.is_pmu_supported(g)) { + mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, + PMU_MUTEX_ID_FIFO, &token); + } ret = __locked_fifo_preempt(g, ch->chid, false); @@ -2503,7 +2506,7 @@ int gk20a_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg) struct fifo_gk20a *f = &g->fifo; int ret = 0; u32 token = PMU_INVALID_MUTEX_OWNER_ID; - int mutex_ret = 0; + int mutex_ret = -EINVAL; u32 i; nvgpu_log_fn(g, "tsgid: %d", tsg->tsgid); @@ -2513,7 +2516,10 @@ int gk20a_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg) nvgpu_mutex_acquire(&f->runlist_info[i].runlist_lock); } - mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); + if (g->ops.pmu.is_pmu_supported(g)) { + mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, + PMU_MUTEX_ID_FIFO, &token); + } ret = __locked_fifo_preempt(g, tsg->tsgid, true); @@ -2616,7 +2622,7 @@ int gk20a_fifo_disable_engine_activity(struct gk20a *g, u32 pbdma_chid = FIFO_INVAL_CHANNEL_ID; u32 engine_chid = FIFO_INVAL_CHANNEL_ID; u32 token = PMU_INVALID_MUTEX_OWNER_ID; - int mutex_ret; + int mutex_ret = -EINVAL; struct channel_gk20a *ch = NULL; int err = 0; @@ -2629,7 +2635,10 @@ int gk20a_fifo_disable_engine_activity(struct gk20a *g, return -EBUSY; } - mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); + if (g->ops.pmu.is_pmu_supported(g)) { + mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, + PMU_MUTEX_ID_FIFO, &token); + } gk20a_fifo_set_runlist_state(g, BIT32(eng_info->runlist_id), RUNLIST_DISABLED); diff --git a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c index 72cbf69b5..4b4f7b63c 100644 --- a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c +++ b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c @@ -878,7 +878,7 @@ int gv11b_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg) struct fifo_gk20a *f = &g->fifo; int ret = 0; u32 token = PMU_INVALID_MUTEX_OWNER_ID; - u32 mutex_ret = 0; + int mutex_ret = -EINVAL; u32 runlist_id; nvgpu_log_fn(g, "tsgid: %d", tsg->tsgid); @@ -894,11 +894,14 @@ int gv11b_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg) /* WAR for Bug 2065990 */ gk20a_tsg_disable_sched(g, tsg); - mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); + if (g->ops.pmu.is_pmu_supported(g)) { + mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, + PMU_MUTEX_ID_FIFO, &token); + } ret = __locked_fifo_preempt(g, tsg->tsgid, true); - if (mutex_ret == 0U) { + if (mutex_ret == 0) { nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); } @@ -923,13 +926,16 @@ static void gv11b_fifo_locked_preempt_runlists_rc(struct gk20a *g, u32 runlists_mask) { u32 token = PMU_INVALID_MUTEX_OWNER_ID; - u32 mutex_ret = 0; + int mutex_ret = -EINVAL; u32 rlid; /* runlist_lock are locked by teardown and sched are disabled too */ nvgpu_log_fn(g, "preempt runlists_mask:0x%08x", runlists_mask); - mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); + if (g->ops.pmu.is_pmu_supported(g)) { + mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, + PMU_MUTEX_ID_FIFO, &token); + } /* issue runlist preempt */ gv11b_fifo_issue_runlist_preempt(g, runlists_mask); @@ -947,7 +953,7 @@ static void gv11b_fifo_locked_preempt_runlists_rc(struct gk20a *g, } } - if (mutex_ret == 0U) { + if (mutex_ret == 0) { nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); } } @@ -961,14 +967,17 @@ static void gv11b_fifo_locked_abort_runlist_active_tsgs(struct gk20a *g, u32 rlid; struct fifo_runlist_info_gk20a *runlist = NULL; u32 token = PMU_INVALID_MUTEX_OWNER_ID; - u32 mutex_ret = 0; + int mutex_ret = -EINVAL; bool add = false, wait_for_finish = false; int err; nvgpu_err(g, "runlist id unknown, abort active tsgs in runlists"); /* runlist_lock are locked by teardown */ - mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); + if (g->ops.pmu.is_pmu_supported(g)) { + mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, + PMU_MUTEX_ID_FIFO, &token); + } for (rlid = 0; rlid < g->fifo.max_runlists; rlid++) { @@ -1013,7 +1022,7 @@ static void gv11b_fifo_locked_abort_runlist_active_tsgs(struct gk20a *g, nvgpu_log(g, gpu_dbg_info, "aborted tsg id %lu", tsgid); } } - if (mutex_ret == 0U) { + if (mutex_ret == 0) { nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); } }