diff --git a/drivers/gpu/nvgpu/common/fifo/engines.c b/drivers/gpu/nvgpu/common/fifo/engines.c index 9fc69f240..10416fd8c 100644 --- a/drivers/gpu/nvgpu/common/fifo/engines.c +++ b/drivers/gpu/nvgpu/common/fifo/engines.c @@ -282,8 +282,10 @@ int nvgpu_engine_disable_activity(struct gk20a *g, { u32 pbdma_chid = NVGPU_INVALID_CHANNEL_ID; u32 engine_chid = NVGPU_INVALID_CHANNEL_ID; +#ifdef NVGPU_LS_PMU u32 token = PMU_INVALID_MUTEX_OWNER_ID; int mutex_ret = -EINVAL; +#endif struct nvgpu_channel *ch = NULL; int err = 0; struct nvgpu_engine_status_info engine_status; @@ -297,10 +299,12 @@ int nvgpu_engine_disable_activity(struct gk20a *g, return -EBUSY; } +#ifdef NVGPU_LS_PMU if (g->ops.pmu.is_pmu_supported(g)) { mutex_ret = nvgpu_pmu_lock_acquire(g, g->pmu, PMU_MUTEX_ID_FIFO, &token); } +#endif nvgpu_fifo_runlist_set_state(g, BIT32(eng_info->runlist_id), RUNLIST_DISABLED); @@ -350,13 +354,14 @@ int nvgpu_engine_disable_activity(struct gk20a *g, } clean_up: +#ifdef NVGPU_LS_PMU if (mutex_ret == 0) { if (nvgpu_pmu_lock_release(g, g->pmu, PMU_MUTEX_ID_FIFO, &token) != 0){ nvgpu_err(g, "failed to release PMU lock"); } } - +#endif if (err != 0) { nvgpu_log_fn(g, "failed"); if (nvgpu_engine_enable_activity(g, eng_info) != 0) { diff --git a/drivers/gpu/nvgpu/common/fifo/runlist.c b/drivers/gpu/nvgpu/common/fifo/runlist.c index d1d322298..7501dac9a 100644 --- a/drivers/gpu/nvgpu/common/fifo/runlist.c +++ b/drivers/gpu/nvgpu/common/fifo/runlist.c @@ -452,18 +452,20 @@ int nvgpu_runlist_reschedule(struct nvgpu_channel *ch, bool preempt_next, { struct gk20a *g = ch->g; struct nvgpu_runlist_info *runlist; +#ifdef NVGPU_LS_PMU u32 token = PMU_INVALID_MUTEX_OWNER_ID; int mutex_ret = 0; +#endif int ret = 0; runlist = g->fifo.runlist_info[ch->runlist_id]; if (nvgpu_mutex_tryacquire(&runlist->runlist_lock) == 0) { return -EBUSY; } - +#ifdef NVGPU_LS_PMU mutex_ret = nvgpu_pmu_lock_acquire( g, g->pmu, PMU_MUTEX_ID_FIFO, &token); - +#endif g->ops.runlist.hw_submit( g, ch->runlist_id, runlist->count, runlist->cur_buffer); @@ -479,13 +481,14 @@ int nvgpu_runlist_reschedule(struct nvgpu_channel *ch, bool preempt_next, nvgpu_err(g, "wait pending failed for runlist %u", ch->runlist_id); } - +#ifdef NVGPU_LS_PMU if (mutex_ret == 0) { if (nvgpu_pmu_lock_release(g, g->pmu, PMU_MUTEX_ID_FIFO, &token) != 0) { nvgpu_err(g, "failed to release PMU lock"); } } +#endif nvgpu_mutex_release(&runlist->runlist_lock); return ret; @@ -502,8 +505,10 @@ static int nvgpu_runlist_update(struct gk20a *g, u32 runlist_id, { struct nvgpu_runlist_info *runlist = NULL; struct nvgpu_fifo *f = &g->fifo; +#ifdef NVGPU_LS_PMU u32 token = PMU_INVALID_MUTEX_OWNER_ID; int mutex_ret = 0; +#endif int ret = 0; nvgpu_log_fn(g, " "); @@ -511,20 +516,20 @@ static int nvgpu_runlist_update(struct gk20a *g, u32 runlist_id, runlist = f->runlist_info[runlist_id]; nvgpu_mutex_acquire(&runlist->runlist_lock); - +#ifdef NVGPU_LS_PMU mutex_ret = nvgpu_pmu_lock_acquire(g, g->pmu, PMU_MUTEX_ID_FIFO, &token); - +#endif ret = nvgpu_runlist_update_locked(g, runlist_id, ch, add, wait_for_finish); - +#ifdef NVGPU_LS_PMU if (mutex_ret == 0) { if (nvgpu_pmu_lock_release(g, g->pmu, PMU_MUTEX_ID_FIFO, &token) != 0) { nvgpu_err(g, "failed to release PMU lock"); } } - +#endif nvgpu_mutex_release(&runlist->runlist_lock); if (ret == -ETIMEDOUT) { @@ -603,24 +608,26 @@ const char *nvgpu_runlist_interleave_level_name(u32 interleave_level) void nvgpu_fifo_runlist_set_state(struct gk20a *g, u32 runlists_mask, u32 runlist_state) { +#ifdef NVGPU_LS_PMU u32 token = PMU_INVALID_MUTEX_OWNER_ID; int mutex_ret = 0; - +#endif nvgpu_log(g, gpu_dbg_info, "runlist mask = 0x%08x state = 0x%08x", runlists_mask, runlist_state); - +#ifdef NVGPU_LS_PMU mutex_ret = nvgpu_pmu_lock_acquire(g, g->pmu, PMU_MUTEX_ID_FIFO, &token); - +#endif g->ops.runlist.write_state(g, runlists_mask, runlist_state); - +#ifdef NVGPU_LS_PMU if (mutex_ret == 0) { if (nvgpu_pmu_lock_release(g, g->pmu, PMU_MUTEX_ID_FIFO, &token) != 0) { nvgpu_err(g, "failed to release PMU lock"); } } +#endif } void nvgpu_runlist_cleanup_sw(struct gk20a *g) diff --git a/drivers/gpu/nvgpu/common/pmu/pmu.c b/drivers/gpu/nvgpu/common/pmu/pmu.c index 5aa7e394e..5d2a74334 100644 --- a/drivers/gpu/nvgpu/common/pmu/pmu.c +++ b/drivers/gpu/nvgpu/common/pmu/pmu.c @@ -45,6 +45,7 @@ #include #include +#ifdef NVGPU_LS_PMU /* PMU locks used to sync with PMU-RTOS */ int nvgpu_pmu_lock_acquire(struct gk20a *g, struct nvgpu_pmu *pmu, u32 id, u32 *token) @@ -81,6 +82,7 @@ int nvgpu_pmu_lock_release(struct gk20a *g, struct nvgpu_pmu *pmu, return nvgpu_pmu_mutex_release(g, pmu->mutexes, id, token); } +#endif /* PMU RTOS init/setup functions */ int nvgpu_pmu_destroy(struct gk20a *g, struct nvgpu_pmu *pmu) diff --git a/drivers/gpu/nvgpu/hal/fifo/preempt_gk20a.c b/drivers/gpu/nvgpu/hal/fifo/preempt_gk20a.c index 52262f9d5..c173b8daf 100644 --- a/drivers/gpu/nvgpu/hal/fifo/preempt_gk20a.c +++ b/drivers/gpu/nvgpu/hal/fifo/preempt_gk20a.c @@ -99,26 +99,27 @@ int gk20a_fifo_is_preempt_pending(struct gk20a *g, u32 id, int gk20a_fifo_preempt_channel(struct gk20a *g, struct nvgpu_channel *ch) { int ret = 0; +#ifdef NVGPU_LS_PMU u32 token = PMU_INVALID_MUTEX_OWNER_ID; int mutex_ret = 0; - +#endif nvgpu_log_fn(g, "preempt chid: %d", ch->chid); /* we have no idea which runlist we are using. lock all */ nvgpu_runlist_lock_active_runlists(g); - +#ifdef NVGPU_LS_PMU mutex_ret = nvgpu_pmu_lock_acquire(g, g->pmu, PMU_MUTEX_ID_FIFO, &token); - +#endif ret = gk20a_fifo_preempt_locked(g, ch->chid, ID_TYPE_CHANNEL); - +#ifdef NVGPU_LS_PMU if (mutex_ret == 0) { if (nvgpu_pmu_lock_release(g, g->pmu, PMU_MUTEX_ID_FIFO, &token) != 0) { nvgpu_err(g, "failed to release PMU lock"); } } - +#endif nvgpu_runlist_unlock_active_runlists(g); if (ret != 0) { @@ -147,26 +148,27 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, struct nvgpu_channel *ch) int gk20a_fifo_preempt_tsg(struct gk20a *g, struct nvgpu_tsg *tsg) { int ret = 0; +#ifdef NVGPU_LS_PMU u32 token = PMU_INVALID_MUTEX_OWNER_ID; int mutex_ret = 0; - +#endif nvgpu_log_fn(g, "tsgid: %d", tsg->tsgid); /* we have no idea which runlist we are using. lock all */ nvgpu_runlist_lock_active_runlists(g); - +#ifdef NVGPU_LS_PMU mutex_ret = nvgpu_pmu_lock_acquire(g, g->pmu, PMU_MUTEX_ID_FIFO, &token); - +#endif ret = gk20a_fifo_preempt_locked(g, tsg->tsgid, ID_TYPE_TSG); - +#ifdef NVGPU_LS_PMU if (mutex_ret == 0) { if (nvgpu_pmu_lock_release(g, g->pmu, PMU_MUTEX_ID_FIFO, &token) != 0) { nvgpu_err(g, "failed to release PMU lock"); } } - +#endif nvgpu_runlist_unlock_active_runlists(g); if (ret != 0) { diff --git a/drivers/gpu/nvgpu/hal/fifo/preempt_gv11b.c b/drivers/gpu/nvgpu/hal/fifo/preempt_gv11b.c index 624f7df6a..1e9947bbc 100644 --- a/drivers/gpu/nvgpu/hal/fifo/preempt_gv11b.c +++ b/drivers/gpu/nvgpu/hal/fifo/preempt_gv11b.c @@ -86,16 +86,18 @@ void gv11b_fifo_preempt_runlists_for_rc(struct gk20a *g, u32 runlists_mask) { struct nvgpu_fifo *f = &g->fifo; struct nvgpu_runlist_info *runlist; +#ifdef NVGPU_LS_PMU u32 token = PMU_INVALID_MUTEX_OWNER_ID; int mutex_ret = 0; +#endif u32 i; /* runlist_lock are locked by teardown and sched are disabled too */ nvgpu_log_fn(g, "preempt runlists_mask:0x%08x", runlists_mask); - +#ifdef NVGPU_LS_PMU mutex_ret = nvgpu_pmu_lock_acquire(g, g->pmu, PMU_MUTEX_ID_FIFO, &token); - +#endif /* issue runlist preempt */ gv11b_fifo_issue_runlist_preempt(g, runlists_mask); @@ -112,7 +114,7 @@ void gv11b_fifo_preempt_runlists_for_rc(struct gk20a *g, u32 runlists_mask) runlist->reset_eng_bitmask = runlist->eng_bitmask; } } - +#ifdef NVGPU_LS_PMU if (mutex_ret == 0) { int err = nvgpu_pmu_lock_release(g, g->pmu, PMU_MUTEX_ID_FIFO, &token); @@ -121,6 +123,7 @@ void gv11b_fifo_preempt_runlists_for_rc(struct gk20a *g, u32 runlists_mask) err); } } +#endif } int gv11b_fifo_preempt_poll_pbdma(struct gk20a *g, u32 tsgid, @@ -425,8 +428,10 @@ int gv11b_fifo_preempt_tsg(struct gk20a *g, struct nvgpu_tsg *tsg) { struct nvgpu_fifo *f = &g->fifo; int ret = 0; +#ifdef NVGPU_LS_PMU u32 token = PMU_INVALID_MUTEX_OWNER_ID; int mutex_ret = 0; +#endif u32 runlist_id; nvgpu_log_fn(g, "tsgid: %d", tsg->tsgid); @@ -441,12 +446,12 @@ int gv11b_fifo_preempt_tsg(struct gk20a *g, struct nvgpu_tsg *tsg) /* WAR for Bug 2065990 */ nvgpu_tsg_disable_sched(g, tsg); - +#ifdef NVGPU_LS_PMU mutex_ret = nvgpu_pmu_lock_acquire(g, g->pmu, PMU_MUTEX_ID_FIFO, &token); - +#endif ret = gv11b_fifo_preempt_locked(g, tsg->tsgid, ID_TYPE_TSG); - +#ifdef NVGPU_LS_PMU if (mutex_ret == 0) { int err = nvgpu_pmu_lock_release(g, g->pmu, PMU_MUTEX_ID_FIFO, &token); @@ -455,7 +460,7 @@ int gv11b_fifo_preempt_tsg(struct gk20a *g, struct nvgpu_tsg *tsg) err); } } - +#endif /* WAR for Bug 2065990 */ nvgpu_tsg_enable_sched(g, tsg); diff --git a/drivers/gpu/nvgpu/hal/rc/rc_gv11b.c b/drivers/gpu/nvgpu/hal/rc/rc_gv11b.c index aa3fda3e6..511ea684e 100644 --- a/drivers/gpu/nvgpu/hal/rc/rc_gv11b.c +++ b/drivers/gpu/nvgpu/hal/rc/rc_gv11b.c @@ -52,18 +52,20 @@ static void gv11b_fifo_locked_abort_runlist_active_tsgs(struct gk20a *g, struct nvgpu_tsg *tsg = NULL; unsigned long tsgid; struct nvgpu_runlist_info *runlist = NULL; +#ifdef NVGPU_LS_PMU u32 token = PMU_INVALID_MUTEX_OWNER_ID; int mutex_ret = 0; +#endif int err; u32 i; nvgpu_err(g, "abort active tsgs of runlists set in " "runlists_mask: 0x%08x", runlists_mask); - +#ifdef NVGPU_LS_PMU /* runlist_lock are locked by teardown */ mutex_ret = nvgpu_pmu_lock_acquire(g, g->pmu, PMU_MUTEX_ID_FIFO, &token); - +#endif for (i = 0U; i < f->num_runlists; i++) { runlist = &f->active_runlist_info[i]; @@ -118,6 +120,7 @@ static void gv11b_fifo_locked_abort_runlist_active_tsgs(struct gk20a *g, nvgpu_log(g, gpu_dbg_info, "aborted tsg id %lu", tsgid); } } +#ifdef NVGPU_LS_PMU if (mutex_ret == 0) { err = nvgpu_pmu_lock_release(g, g->pmu, PMU_MUTEX_ID_FIFO, &token); @@ -126,6 +129,7 @@ static void gv11b_fifo_locked_abort_runlist_active_tsgs(struct gk20a *g, err); } } +#endif } void gv11b_fifo_recover(struct gk20a *g, u32 act_eng_bitmask,