gpu: nvgpu: compile out PMU mutex code for safety

Compile out PMU mutex calls called from other unit when
PMU RTOS support is disabled for safety build by setting
NVGPU_LS_PMU build flag to 0

NVGPU JIRA-3418

Change-Id: I040a744d5102f7fd889d4e8ad6e94129eadb73dd
Signed-off-by: Mahantesh Kumbar <mkumbar@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2124698
Reviewed-by: Automatic_Commit_Validation_User
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com>
Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Mahantesh Kumbar
2019-05-27 11:34:45 +05:30
committed by mobile promotions
parent b6dfba15fa
commit 120defb7cb
6 changed files with 56 additions and 31 deletions

View File

@@ -282,8 +282,10 @@ int nvgpu_engine_disable_activity(struct gk20a *g,
{
u32 pbdma_chid = NVGPU_INVALID_CHANNEL_ID;
u32 engine_chid = NVGPU_INVALID_CHANNEL_ID;
#ifdef NVGPU_LS_PMU
u32 token = PMU_INVALID_MUTEX_OWNER_ID;
int mutex_ret = -EINVAL;
#endif
struct nvgpu_channel *ch = NULL;
int err = 0;
struct nvgpu_engine_status_info engine_status;
@@ -297,10 +299,12 @@ int nvgpu_engine_disable_activity(struct gk20a *g,
return -EBUSY;
}
#ifdef NVGPU_LS_PMU
if (g->ops.pmu.is_pmu_supported(g)) {
mutex_ret = nvgpu_pmu_lock_acquire(g, g->pmu,
PMU_MUTEX_ID_FIFO, &token);
}
#endif
nvgpu_fifo_runlist_set_state(g, BIT32(eng_info->runlist_id),
RUNLIST_DISABLED);
@@ -350,13 +354,14 @@ int nvgpu_engine_disable_activity(struct gk20a *g,
}
clean_up:
#ifdef NVGPU_LS_PMU
if (mutex_ret == 0) {
if (nvgpu_pmu_lock_release(g, g->pmu,
PMU_MUTEX_ID_FIFO, &token) != 0){
nvgpu_err(g, "failed to release PMU lock");
}
}
#endif
if (err != 0) {
nvgpu_log_fn(g, "failed");
if (nvgpu_engine_enable_activity(g, eng_info) != 0) {

View File

@@ -452,18 +452,20 @@ int nvgpu_runlist_reschedule(struct nvgpu_channel *ch, bool preempt_next,
{
struct gk20a *g = ch->g;
struct nvgpu_runlist_info *runlist;
#ifdef NVGPU_LS_PMU
u32 token = PMU_INVALID_MUTEX_OWNER_ID;
int mutex_ret = 0;
#endif
int ret = 0;
runlist = g->fifo.runlist_info[ch->runlist_id];
if (nvgpu_mutex_tryacquire(&runlist->runlist_lock) == 0) {
return -EBUSY;
}
#ifdef NVGPU_LS_PMU
mutex_ret = nvgpu_pmu_lock_acquire(
g, g->pmu, PMU_MUTEX_ID_FIFO, &token);
#endif
g->ops.runlist.hw_submit(
g, ch->runlist_id, runlist->count, runlist->cur_buffer);
@@ -479,13 +481,14 @@ int nvgpu_runlist_reschedule(struct nvgpu_channel *ch, bool preempt_next,
nvgpu_err(g, "wait pending failed for runlist %u",
ch->runlist_id);
}
#ifdef NVGPU_LS_PMU
if (mutex_ret == 0) {
if (nvgpu_pmu_lock_release(g, g->pmu,
PMU_MUTEX_ID_FIFO, &token) != 0) {
nvgpu_err(g, "failed to release PMU lock");
}
}
#endif
nvgpu_mutex_release(&runlist->runlist_lock);
return ret;
@@ -502,8 +505,10 @@ static int nvgpu_runlist_update(struct gk20a *g, u32 runlist_id,
{
struct nvgpu_runlist_info *runlist = NULL;
struct nvgpu_fifo *f = &g->fifo;
#ifdef NVGPU_LS_PMU
u32 token = PMU_INVALID_MUTEX_OWNER_ID;
int mutex_ret = 0;
#endif
int ret = 0;
nvgpu_log_fn(g, " ");
@@ -511,20 +516,20 @@ static int nvgpu_runlist_update(struct gk20a *g, u32 runlist_id,
runlist = f->runlist_info[runlist_id];
nvgpu_mutex_acquire(&runlist->runlist_lock);
#ifdef NVGPU_LS_PMU
mutex_ret = nvgpu_pmu_lock_acquire(g, g->pmu,
PMU_MUTEX_ID_FIFO, &token);
#endif
ret = nvgpu_runlist_update_locked(g, runlist_id, ch, add,
wait_for_finish);
#ifdef NVGPU_LS_PMU
if (mutex_ret == 0) {
if (nvgpu_pmu_lock_release(g, g->pmu,
PMU_MUTEX_ID_FIFO, &token) != 0) {
nvgpu_err(g, "failed to release PMU lock");
}
}
#endif
nvgpu_mutex_release(&runlist->runlist_lock);
if (ret == -ETIMEDOUT) {
@@ -603,24 +608,26 @@ const char *nvgpu_runlist_interleave_level_name(u32 interleave_level)
void nvgpu_fifo_runlist_set_state(struct gk20a *g, u32 runlists_mask,
u32 runlist_state)
{
#ifdef NVGPU_LS_PMU
u32 token = PMU_INVALID_MUTEX_OWNER_ID;
int mutex_ret = 0;
#endif
nvgpu_log(g, gpu_dbg_info, "runlist mask = 0x%08x state = 0x%08x",
runlists_mask, runlist_state);
#ifdef NVGPU_LS_PMU
mutex_ret = nvgpu_pmu_lock_acquire(g, g->pmu,
PMU_MUTEX_ID_FIFO, &token);
#endif
g->ops.runlist.write_state(g, runlists_mask, runlist_state);
#ifdef NVGPU_LS_PMU
if (mutex_ret == 0) {
if (nvgpu_pmu_lock_release(g, g->pmu,
PMU_MUTEX_ID_FIFO, &token) != 0) {
nvgpu_err(g, "failed to release PMU lock");
}
}
#endif
}
void nvgpu_runlist_cleanup_sw(struct gk20a *g)

View File

@@ -45,6 +45,7 @@
#include <nvgpu/pmu/pmu_pstate.h>
#include <nvgpu/nvgpu_err.h>
#ifdef NVGPU_LS_PMU
/* PMU locks used to sync with PMU-RTOS */
int nvgpu_pmu_lock_acquire(struct gk20a *g, struct nvgpu_pmu *pmu,
u32 id, u32 *token)
@@ -81,6 +82,7 @@ int nvgpu_pmu_lock_release(struct gk20a *g, struct nvgpu_pmu *pmu,
return nvgpu_pmu_mutex_release(g, pmu->mutexes, id, token);
}
#endif
/* PMU RTOS init/setup functions */
int nvgpu_pmu_destroy(struct gk20a *g, struct nvgpu_pmu *pmu)

View File

@@ -99,26 +99,27 @@ int gk20a_fifo_is_preempt_pending(struct gk20a *g, u32 id,
int gk20a_fifo_preempt_channel(struct gk20a *g, struct nvgpu_channel *ch)
{
int ret = 0;
#ifdef NVGPU_LS_PMU
u32 token = PMU_INVALID_MUTEX_OWNER_ID;
int mutex_ret = 0;
#endif
nvgpu_log_fn(g, "preempt chid: %d", ch->chid);
/* we have no idea which runlist we are using. lock all */
nvgpu_runlist_lock_active_runlists(g);
#ifdef NVGPU_LS_PMU
mutex_ret = nvgpu_pmu_lock_acquire(g, g->pmu,
PMU_MUTEX_ID_FIFO, &token);
#endif
ret = gk20a_fifo_preempt_locked(g, ch->chid, ID_TYPE_CHANNEL);
#ifdef NVGPU_LS_PMU
if (mutex_ret == 0) {
if (nvgpu_pmu_lock_release(g, g->pmu,
PMU_MUTEX_ID_FIFO, &token) != 0) {
nvgpu_err(g, "failed to release PMU lock");
}
}
#endif
nvgpu_runlist_unlock_active_runlists(g);
if (ret != 0) {
@@ -147,26 +148,27 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, struct nvgpu_channel *ch)
int gk20a_fifo_preempt_tsg(struct gk20a *g, struct nvgpu_tsg *tsg)
{
int ret = 0;
#ifdef NVGPU_LS_PMU
u32 token = PMU_INVALID_MUTEX_OWNER_ID;
int mutex_ret = 0;
#endif
nvgpu_log_fn(g, "tsgid: %d", tsg->tsgid);
/* we have no idea which runlist we are using. lock all */
nvgpu_runlist_lock_active_runlists(g);
#ifdef NVGPU_LS_PMU
mutex_ret = nvgpu_pmu_lock_acquire(g, g->pmu,
PMU_MUTEX_ID_FIFO, &token);
#endif
ret = gk20a_fifo_preempt_locked(g, tsg->tsgid, ID_TYPE_TSG);
#ifdef NVGPU_LS_PMU
if (mutex_ret == 0) {
if (nvgpu_pmu_lock_release(g, g->pmu,
PMU_MUTEX_ID_FIFO, &token) != 0) {
nvgpu_err(g, "failed to release PMU lock");
}
}
#endif
nvgpu_runlist_unlock_active_runlists(g);
if (ret != 0) {

View File

@@ -86,16 +86,18 @@ void gv11b_fifo_preempt_runlists_for_rc(struct gk20a *g, u32 runlists_mask)
{
struct nvgpu_fifo *f = &g->fifo;
struct nvgpu_runlist_info *runlist;
#ifdef NVGPU_LS_PMU
u32 token = PMU_INVALID_MUTEX_OWNER_ID;
int mutex_ret = 0;
#endif
u32 i;
/* runlist_lock are locked by teardown and sched are disabled too */
nvgpu_log_fn(g, "preempt runlists_mask:0x%08x", runlists_mask);
#ifdef NVGPU_LS_PMU
mutex_ret = nvgpu_pmu_lock_acquire(g, g->pmu,
PMU_MUTEX_ID_FIFO, &token);
#endif
/* issue runlist preempt */
gv11b_fifo_issue_runlist_preempt(g, runlists_mask);
@@ -112,7 +114,7 @@ void gv11b_fifo_preempt_runlists_for_rc(struct gk20a *g, u32 runlists_mask)
runlist->reset_eng_bitmask = runlist->eng_bitmask;
}
}
#ifdef NVGPU_LS_PMU
if (mutex_ret == 0) {
int err = nvgpu_pmu_lock_release(g, g->pmu, PMU_MUTEX_ID_FIFO,
&token);
@@ -121,6 +123,7 @@ void gv11b_fifo_preempt_runlists_for_rc(struct gk20a *g, u32 runlists_mask)
err);
}
}
#endif
}
int gv11b_fifo_preempt_poll_pbdma(struct gk20a *g, u32 tsgid,
@@ -425,8 +428,10 @@ int gv11b_fifo_preempt_tsg(struct gk20a *g, struct nvgpu_tsg *tsg)
{
struct nvgpu_fifo *f = &g->fifo;
int ret = 0;
#ifdef NVGPU_LS_PMU
u32 token = PMU_INVALID_MUTEX_OWNER_ID;
int mutex_ret = 0;
#endif
u32 runlist_id;
nvgpu_log_fn(g, "tsgid: %d", tsg->tsgid);
@@ -441,12 +446,12 @@ int gv11b_fifo_preempt_tsg(struct gk20a *g, struct nvgpu_tsg *tsg)
/* WAR for Bug 2065990 */
nvgpu_tsg_disable_sched(g, tsg);
#ifdef NVGPU_LS_PMU
mutex_ret = nvgpu_pmu_lock_acquire(g, g->pmu,
PMU_MUTEX_ID_FIFO, &token);
#endif
ret = gv11b_fifo_preempt_locked(g, tsg->tsgid, ID_TYPE_TSG);
#ifdef NVGPU_LS_PMU
if (mutex_ret == 0) {
int err = nvgpu_pmu_lock_release(g, g->pmu, PMU_MUTEX_ID_FIFO,
&token);
@@ -455,7 +460,7 @@ int gv11b_fifo_preempt_tsg(struct gk20a *g, struct nvgpu_tsg *tsg)
err);
}
}
#endif
/* WAR for Bug 2065990 */
nvgpu_tsg_enable_sched(g, tsg);

View File

@@ -52,18 +52,20 @@ static void gv11b_fifo_locked_abort_runlist_active_tsgs(struct gk20a *g,
struct nvgpu_tsg *tsg = NULL;
unsigned long tsgid;
struct nvgpu_runlist_info *runlist = NULL;
#ifdef NVGPU_LS_PMU
u32 token = PMU_INVALID_MUTEX_OWNER_ID;
int mutex_ret = 0;
#endif
int err;
u32 i;
nvgpu_err(g, "abort active tsgs of runlists set in "
"runlists_mask: 0x%08x", runlists_mask);
#ifdef NVGPU_LS_PMU
/* runlist_lock are locked by teardown */
mutex_ret = nvgpu_pmu_lock_acquire(g, g->pmu,
PMU_MUTEX_ID_FIFO, &token);
#endif
for (i = 0U; i < f->num_runlists; i++) {
runlist = &f->active_runlist_info[i];
@@ -118,6 +120,7 @@ static void gv11b_fifo_locked_abort_runlist_active_tsgs(struct gk20a *g,
nvgpu_log(g, gpu_dbg_info, "aborted tsg id %lu", tsgid);
}
}
#ifdef NVGPU_LS_PMU
if (mutex_ret == 0) {
err = nvgpu_pmu_lock_release(g, g->pmu, PMU_MUTEX_ID_FIFO,
&token);
@@ -126,6 +129,7 @@ static void gv11b_fifo_locked_abort_runlist_active_tsgs(struct gk20a *g,
err);
}
}
#endif
}
void gv11b_fifo_recover(struct gk20a *g, u32 act_eng_bitmask,