gpu: nvgpu: pmu code fix for VDK

dgpu vdk does not have pmu support. pmu variables do not get
initialized in fmodel.

Add is_pmu_supported check before nvgpu_pmu_mutex_acquire call.

JIRA NVGPU-1564

Change-Id: Ieb683d3092b5289a9959c8811c25782074d19804
Signed-off-by: Vinod G <vinodg@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1992193
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com>
Reviewed-by: svc-misra-checker <svc-misra-checker@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Seshendra Gadagottu <sgadagottu@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Vinod G
2019-01-10 08:39:22 -08:00
committed by mobile promotions
parent fc503da086
commit c0a2f356c4
3 changed files with 48 additions and 22 deletions

View File

@@ -378,7 +378,7 @@ int nvgpu_fifo_reschedule_runlist(struct channel_gk20a *ch, bool preempt_next,
struct gk20a *g = ch->g;
struct fifo_runlist_info_gk20a *runlist;
u32 token = PMU_INVALID_MUTEX_OWNER_ID;
int mutex_ret;
int mutex_ret = -EINVAL;
int ret = 0;
runlist = &g->fifo.runlist_info[ch->runlist_id];
@@ -386,8 +386,10 @@ int nvgpu_fifo_reschedule_runlist(struct channel_gk20a *ch, bool preempt_next,
return -EBUSY;
}
mutex_ret = nvgpu_pmu_mutex_acquire(
&g->pmu, PMU_MUTEX_ID_FIFO, &token);
if (g->ops.pmu.is_pmu_supported(g)) {
mutex_ret = nvgpu_pmu_mutex_acquire(
&g->pmu, PMU_MUTEX_ID_FIFO, &token);
}
g->ops.fifo.runlist_hw_submit(
g, ch->runlist_id, runlist->count, runlist->cur_buffer);
@@ -427,7 +429,7 @@ int gk20a_fifo_update_runlist(struct gk20a *g, u32 runlist_id, u32 chid,
struct fifo_runlist_info_gk20a *runlist = NULL;
struct fifo_gk20a *f = &g->fifo;
u32 token = PMU_INVALID_MUTEX_OWNER_ID;
int mutex_ret;
int mutex_ret = -EINVAL;
int ret = 0;
nvgpu_log_fn(g, " ");
@@ -436,7 +438,10 @@ int gk20a_fifo_update_runlist(struct gk20a *g, u32 runlist_id, u32 chid,
nvgpu_mutex_acquire(&runlist->runlist_lock);
mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
if (g->ops.pmu.is_pmu_supported(g)) {
mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu,
PMU_MUTEX_ID_FIFO, &token);
}
ret = gk20a_fifo_update_runlist_locked(g, runlist_id, chid, add,
wait_for_finish);
@@ -511,12 +516,15 @@ void gk20a_fifo_set_runlist_state(struct gk20a *g, u32 runlists_mask,
u32 runlist_state)
{
u32 token = PMU_INVALID_MUTEX_OWNER_ID;
int mutex_ret;
int mutex_ret = -EINVAL;
nvgpu_log(g, gpu_dbg_info, "runlist mask = 0x%08x state = 0x%08x",
runlists_mask, runlist_state);
mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
if (g->ops.pmu.is_pmu_supported(g)) {
mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu,
PMU_MUTEX_ID_FIFO, &token);
}
g->ops.fifo.runlist_write_state(g, runlists_mask, runlist_state);

View File

@@ -2461,7 +2461,7 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, struct channel_gk20a *ch)
struct fifo_gk20a *f = &g->fifo;
int ret = 0;
u32 token = PMU_INVALID_MUTEX_OWNER_ID;
int mutex_ret = 0;
int mutex_ret = -EINVAL;
u32 i;
nvgpu_log_fn(g, "chid: %d", ch->chid);
@@ -2471,7 +2471,10 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, struct channel_gk20a *ch)
nvgpu_mutex_acquire(&f->runlist_info[i].runlist_lock);
}
mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
if (g->ops.pmu.is_pmu_supported(g)) {
mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu,
PMU_MUTEX_ID_FIFO, &token);
}
ret = __locked_fifo_preempt(g, ch->chid, false);
@@ -2503,7 +2506,7 @@ int gk20a_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg)
struct fifo_gk20a *f = &g->fifo;
int ret = 0;
u32 token = PMU_INVALID_MUTEX_OWNER_ID;
int mutex_ret = 0;
int mutex_ret = -EINVAL;
u32 i;
nvgpu_log_fn(g, "tsgid: %d", tsg->tsgid);
@@ -2513,7 +2516,10 @@ int gk20a_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg)
nvgpu_mutex_acquire(&f->runlist_info[i].runlist_lock);
}
mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
if (g->ops.pmu.is_pmu_supported(g)) {
mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu,
PMU_MUTEX_ID_FIFO, &token);
}
ret = __locked_fifo_preempt(g, tsg->tsgid, true);
@@ -2616,7 +2622,7 @@ int gk20a_fifo_disable_engine_activity(struct gk20a *g,
u32 pbdma_chid = FIFO_INVAL_CHANNEL_ID;
u32 engine_chid = FIFO_INVAL_CHANNEL_ID;
u32 token = PMU_INVALID_MUTEX_OWNER_ID;
int mutex_ret;
int mutex_ret = -EINVAL;
struct channel_gk20a *ch = NULL;
int err = 0;
@@ -2629,7 +2635,10 @@ int gk20a_fifo_disable_engine_activity(struct gk20a *g,
return -EBUSY;
}
mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
if (g->ops.pmu.is_pmu_supported(g)) {
mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu,
PMU_MUTEX_ID_FIFO, &token);
}
gk20a_fifo_set_runlist_state(g, BIT32(eng_info->runlist_id),
RUNLIST_DISABLED);

View File

@@ -878,7 +878,7 @@ int gv11b_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg)
struct fifo_gk20a *f = &g->fifo;
int ret = 0;
u32 token = PMU_INVALID_MUTEX_OWNER_ID;
u32 mutex_ret = 0;
int mutex_ret = -EINVAL;
u32 runlist_id;
nvgpu_log_fn(g, "tsgid: %d", tsg->tsgid);
@@ -894,11 +894,14 @@ int gv11b_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg)
/* WAR for Bug 2065990 */
gk20a_tsg_disable_sched(g, tsg);
mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
if (g->ops.pmu.is_pmu_supported(g)) {
mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu,
PMU_MUTEX_ID_FIFO, &token);
}
ret = __locked_fifo_preempt(g, tsg->tsgid, true);
if (mutex_ret == 0U) {
if (mutex_ret == 0) {
nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
}
@@ -923,13 +926,16 @@ static void gv11b_fifo_locked_preempt_runlists_rc(struct gk20a *g,
u32 runlists_mask)
{
u32 token = PMU_INVALID_MUTEX_OWNER_ID;
u32 mutex_ret = 0;
int mutex_ret = -EINVAL;
u32 rlid;
/* runlist_lock are locked by teardown and sched are disabled too */
nvgpu_log_fn(g, "preempt runlists_mask:0x%08x", runlists_mask);
mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
if (g->ops.pmu.is_pmu_supported(g)) {
mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu,
PMU_MUTEX_ID_FIFO, &token);
}
/* issue runlist preempt */
gv11b_fifo_issue_runlist_preempt(g, runlists_mask);
@@ -947,7 +953,7 @@ static void gv11b_fifo_locked_preempt_runlists_rc(struct gk20a *g,
}
}
if (mutex_ret == 0U) {
if (mutex_ret == 0) {
nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
}
}
@@ -961,14 +967,17 @@ static void gv11b_fifo_locked_abort_runlist_active_tsgs(struct gk20a *g,
u32 rlid;
struct fifo_runlist_info_gk20a *runlist = NULL;
u32 token = PMU_INVALID_MUTEX_OWNER_ID;
u32 mutex_ret = 0;
int mutex_ret = -EINVAL;
bool add = false, wait_for_finish = false;
int err;
nvgpu_err(g, "runlist id unknown, abort active tsgs in runlists");
/* runlist_lock are locked by teardown */
mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
if (g->ops.pmu.is_pmu_supported(g)) {
mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu,
PMU_MUTEX_ID_FIFO, &token);
}
for (rlid = 0; rlid < g->fifo.max_runlists;
rlid++) {
@@ -1013,7 +1022,7 @@ static void gv11b_fifo_locked_abort_runlist_active_tsgs(struct gk20a *g,
nvgpu_log(g, gpu_dbg_info, "aborted tsg id %lu", tsgid);
}
}
if (mutex_ret == 0U) {
if (mutex_ret == 0) {
nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
}
}