mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-25 11:04:51 +03:00
gpu: nvgpu: use support_ls_pmu flag to check LS PMU support
Currently PMU support enable check is done with multiple methods which added complexity to know status of PMU support. Changed to replace multiple methods with support_pmu flag to know the PMU support, support_pmu will be updated at init stage based on platform/chip specific settings to know the PMU support status. Cleaned up support_pmu flag check with platform specific PMU members in multiple places & moved check to public functions JIRA NVGPU-173 Change-Id: Ief2c64250d1f78e3b054203be56499e4d1d9b046 Signed-off-by: Mahantesh Kumbar <mkumbar@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2024024 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com> Reviewed-by: svc-misra-checker <svc-misra-checker@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
e7c9a5309c
commit
4bb9b0b987
@@ -409,7 +409,7 @@ int nvgpu_fifo_reschedule_runlist(struct channel_gk20a *ch, bool preempt_next,
|
||||
struct gk20a *g = ch->g;
|
||||
struct fifo_runlist_info_gk20a *runlist;
|
||||
u32 token = PMU_INVALID_MUTEX_OWNER_ID;
|
||||
int mutex_ret = -EINVAL;
|
||||
int mutex_ret = 0;
|
||||
int ret = 0;
|
||||
|
||||
runlist = &g->fifo.runlist_info[ch->runlist_id];
|
||||
@@ -417,10 +417,9 @@ int nvgpu_fifo_reschedule_runlist(struct channel_gk20a *ch, bool preempt_next,
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
if (g->ops.pmu.is_pmu_supported(g)) {
|
||||
mutex_ret = nvgpu_pmu_mutex_acquire(
|
||||
&g->pmu, PMU_MUTEX_ID_FIFO, &token);
|
||||
}
|
||||
mutex_ret = nvgpu_pmu_mutex_acquire(
|
||||
&g->pmu, PMU_MUTEX_ID_FIFO, &token);
|
||||
|
||||
|
||||
g->ops.runlist.hw_submit(
|
||||
g, ch->runlist_id, runlist->count, runlist->cur_buffer);
|
||||
@@ -461,7 +460,7 @@ static int gk20a_runlist_update(struct gk20a *g, u32 runlist_id,
|
||||
struct fifo_runlist_info_gk20a *runlist = NULL;
|
||||
struct fifo_gk20a *f = &g->fifo;
|
||||
u32 token = PMU_INVALID_MUTEX_OWNER_ID;
|
||||
int mutex_ret = -EINVAL;
|
||||
int mutex_ret = 0;
|
||||
int ret = 0;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
@@ -470,10 +469,8 @@ static int gk20a_runlist_update(struct gk20a *g, u32 runlist_id,
|
||||
|
||||
nvgpu_mutex_acquire(&runlist->runlist_lock);
|
||||
|
||||
if (g->ops.pmu.is_pmu_supported(g)) {
|
||||
mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu,
|
||||
PMU_MUTEX_ID_FIFO, &token);
|
||||
}
|
||||
mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu,
|
||||
PMU_MUTEX_ID_FIFO, &token);
|
||||
|
||||
ret = gk20a_runlist_update_locked(g, runlist_id, ch, add,
|
||||
wait_for_finish);
|
||||
@@ -561,15 +558,14 @@ void gk20a_fifo_set_runlist_state(struct gk20a *g, u32 runlists_mask,
|
||||
u32 runlist_state)
|
||||
{
|
||||
u32 token = PMU_INVALID_MUTEX_OWNER_ID;
|
||||
int mutex_ret = -EINVAL;
|
||||
int mutex_ret = 0;
|
||||
|
||||
nvgpu_log(g, gpu_dbg_info, "runlist mask = 0x%08x state = 0x%08x",
|
||||
runlists_mask, runlist_state);
|
||||
|
||||
if (g->ops.pmu.is_pmu_supported(g)) {
|
||||
mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu,
|
||||
PMU_MUTEX_ID_FIFO, &token);
|
||||
}
|
||||
|
||||
mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu,
|
||||
PMU_MUTEX_ID_FIFO, &token);
|
||||
|
||||
g->ops.runlist.write_state(g, runlists_mask, runlist_state);
|
||||
|
||||
|
||||
@@ -88,7 +88,7 @@ int gk20a_prepare_poweroff(struct gk20a *g)
|
||||
}
|
||||
|
||||
/* disable elpg before gr or fifo suspend */
|
||||
if (g->ops.pmu.is_pmu_supported(g)) {
|
||||
if (g->support_ls_pmu) {
|
||||
ret |= nvgpu_pmu_destroy(g);
|
||||
}
|
||||
|
||||
@@ -179,12 +179,10 @@ int gk20a_finalize_poweron(struct gk20a *g)
|
||||
goto done_gsp;
|
||||
}
|
||||
|
||||
if (g->ops.pmu.is_pmu_supported(g)) {
|
||||
err = nvgpu_early_init_pmu_sw(g, &g->pmu);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "failed to early init pmu sw");
|
||||
goto done;
|
||||
}
|
||||
err = nvgpu_early_init_pmu_sw(g, &g->pmu);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "failed to early init pmu sw");
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (nvgpu_is_enabled(g, NVGPU_SUPPORT_SEC2_RTOS)) {
|
||||
@@ -336,13 +334,11 @@ int gk20a_finalize_poweron(struct gk20a *g)
|
||||
}
|
||||
}
|
||||
|
||||
if (g->ops.pmu.is_pmu_supported(g)) {
|
||||
err = nvgpu_init_pmu_support(g);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "failed to init gk20a pmu");
|
||||
nvgpu_mutex_release(&g->tpc_pg_lock);
|
||||
goto done;
|
||||
}
|
||||
err = nvgpu_init_pmu_support(g);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "failed to init gk20a pmu");
|
||||
nvgpu_mutex_release(&g->tpc_pg_lock);
|
||||
goto done;
|
||||
}
|
||||
|
||||
err = gk20a_init_gr_support(g);
|
||||
|
||||
@@ -395,7 +395,7 @@ int nvgpu_lpwr_enable_pg(struct gk20a *g, bool pstate_lock)
|
||||
is_rppg_supported = nvgpu_lpwr_is_rppg_supported(g,
|
||||
present_pstate);
|
||||
if (is_rppg_supported) {
|
||||
if (g->support_pmu && g->can_elpg) {
|
||||
if (g->can_elpg) {
|
||||
status = nvgpu_pmu_enable_elpg(g);
|
||||
}
|
||||
}
|
||||
@@ -428,7 +428,7 @@ int nvgpu_lpwr_disable_pg(struct gk20a *g, bool pstate_lock)
|
||||
is_rppg_supported = nvgpu_lpwr_is_rppg_supported(g,
|
||||
present_pstate);
|
||||
if (is_rppg_supported) {
|
||||
if (g->support_pmu && g->elpg_enabled) {
|
||||
if (g->elpg_enabled) {
|
||||
status = nvgpu_pmu_disable_elpg(g);
|
||||
if (status != 0) {
|
||||
goto exit_unlock;
|
||||
|
||||
@@ -298,69 +298,71 @@ int nvgpu_init_pmu_support(struct gk20a *g)
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (g->support_pmu) {
|
||||
err = nvgpu_init_pmu_setup_sw(g);
|
||||
if (!g->support_ls_pmu) {
|
||||
goto exit;
|
||||
}
|
||||
|
||||
err = nvgpu_init_pmu_setup_sw(g);
|
||||
if (err != 0) {
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if (nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) {
|
||||
|
||||
if (nvgpu_is_enabled(g, NVGPU_SUPPORT_SEC2_RTOS)) {
|
||||
/* Reset PMU engine */
|
||||
err = nvgpu_falcon_reset(g->pmu.flcn);
|
||||
|
||||
/* Bootstrap PMU from SEC2 RTOS*/
|
||||
err = nvgpu_sec2_bootstrap_ls_falcons(g, &g->sec2,
|
||||
FALCON_ID_PMU);
|
||||
if (err != 0) {
|
||||
goto exit;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* clear halt interrupt to avoid PMU-RTOS ucode
|
||||
* hitting breakpoint due to PMU halt
|
||||
*/
|
||||
err = nvgpu_falcon_clear_halt_intr_status(g->pmu.flcn,
|
||||
gk20a_get_gr_idle_timeout(g));
|
||||
if (err != 0) {
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if (nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) {
|
||||
|
||||
if (nvgpu_is_enabled(g, NVGPU_SUPPORT_SEC2_RTOS)) {
|
||||
/* Reset PMU engine */
|
||||
err = nvgpu_falcon_reset(g->pmu.flcn);
|
||||
|
||||
/* Bootstrap PMU from SEC2 RTOS*/
|
||||
err = nvgpu_sec2_bootstrap_ls_falcons(g, &g->sec2,
|
||||
FALCON_ID_PMU);
|
||||
if (err != 0) {
|
||||
goto exit;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* clear halt interrupt to avoid PMU-RTOS ucode
|
||||
* hitting breakpoint due to PMU halt
|
||||
*/
|
||||
err = nvgpu_falcon_clear_halt_intr_status(g->pmu.flcn,
|
||||
gk20a_get_gr_idle_timeout(g));
|
||||
if (err != 0) {
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if (g->ops.pmu.setup_apertures != NULL) {
|
||||
g->ops.pmu.setup_apertures(g);
|
||||
}
|
||||
|
||||
if (g->ops.pmu.update_lspmu_cmdline_args != NULL) {
|
||||
g->ops.pmu.update_lspmu_cmdline_args(g);
|
||||
}
|
||||
|
||||
if (g->ops.pmu.pmu_enable_irq != NULL) {
|
||||
nvgpu_mutex_acquire(&g->pmu.isr_mutex);
|
||||
g->ops.pmu.pmu_enable_irq(&g->pmu, true);
|
||||
g->pmu.isr_enabled = true;
|
||||
nvgpu_mutex_release(&g->pmu.isr_mutex);
|
||||
}
|
||||
|
||||
/*Once in LS mode, cpuctl_alias is only accessible*/
|
||||
if (g->ops.pmu.secured_pmu_start != NULL) {
|
||||
g->ops.pmu.secured_pmu_start(g);
|
||||
}
|
||||
} else {
|
||||
/* prepare blob for non-secure PMU boot */
|
||||
err = nvgpu_pmu_prepare_ns_ucode_blob(g);
|
||||
|
||||
/* Do non-secure PMU boot */
|
||||
err = g->ops.pmu.pmu_setup_hw_and_bootstrap(g);
|
||||
if (err != 0) {
|
||||
goto exit;
|
||||
}
|
||||
if (g->ops.pmu.setup_apertures != NULL) {
|
||||
g->ops.pmu.setup_apertures(g);
|
||||
}
|
||||
|
||||
nvgpu_pmu_state_change(g, PMU_STATE_STARTING, false);
|
||||
if (g->ops.pmu.update_lspmu_cmdline_args != NULL) {
|
||||
g->ops.pmu.update_lspmu_cmdline_args(g);
|
||||
}
|
||||
|
||||
if (g->ops.pmu.pmu_enable_irq != NULL) {
|
||||
nvgpu_mutex_acquire(&g->pmu.isr_mutex);
|
||||
g->ops.pmu.pmu_enable_irq(&g->pmu, true);
|
||||
g->pmu.isr_enabled = true;
|
||||
nvgpu_mutex_release(&g->pmu.isr_mutex);
|
||||
}
|
||||
|
||||
/*Once in LS mode, cpuctl_alias is only accessible*/
|
||||
if (g->ops.pmu.secured_pmu_start != NULL) {
|
||||
g->ops.pmu.secured_pmu_start(g);
|
||||
}
|
||||
} else {
|
||||
/* prepare blob for non-secure PMU boot */
|
||||
err = nvgpu_pmu_prepare_ns_ucode_blob(g);
|
||||
|
||||
/* Do non-secure PMU boot */
|
||||
err = g->ops.pmu.pmu_setup_hw_and_bootstrap(g);
|
||||
if (err != 0) {
|
||||
goto exit;
|
||||
}
|
||||
}
|
||||
|
||||
nvgpu_pmu_state_change(g, PMU_STATE_STARTING, false);
|
||||
|
||||
exit:
|
||||
return err;
|
||||
}
|
||||
@@ -681,7 +683,7 @@ int nvgpu_pmu_destroy(struct gk20a *g)
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
if (!g->support_pmu) {
|
||||
if (!g->support_ls_pmu) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -1755,6 +1755,15 @@ int nvgpu_early_init_pmu_sw(struct gk20a *g, struct nvgpu_pmu *pmu)
|
||||
|
||||
pmu->g = g;
|
||||
|
||||
if (!g->support_ls_pmu) {
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if (!g->ops.pmu.is_pmu_supported(g)) {
|
||||
g->support_ls_pmu = false;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
err = nvgpu_mutex_init(&pmu->elpg_mutex);
|
||||
if (err != 0) {
|
||||
return err;
|
||||
|
||||
@@ -96,6 +96,10 @@ int nvgpu_pmu_mutex_acquire(struct nvgpu_pmu *pmu, u32 id, u32 *token)
|
||||
{
|
||||
struct gk20a *g = gk20a_from_pmu(pmu);
|
||||
|
||||
if (!g->support_ls_pmu) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
return g->ops.pmu.pmu_mutex_acquire(pmu, id, token);
|
||||
}
|
||||
|
||||
@@ -103,6 +107,10 @@ int nvgpu_pmu_mutex_release(struct nvgpu_pmu *pmu, u32 id, u32 *token)
|
||||
{
|
||||
struct gk20a *g = gk20a_from_pmu(pmu);
|
||||
|
||||
if (!g->support_ls_pmu) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
return g->ops.pmu.pmu_mutex_release(pmu, id, token);
|
||||
}
|
||||
|
||||
|
||||
@@ -118,6 +118,10 @@ int nvgpu_pmu_pg_global_enable(struct gk20a *g, bool enable_pg)
|
||||
{
|
||||
int status = 0;
|
||||
|
||||
if (!g->support_ls_pmu) {
|
||||
return status;
|
||||
}
|
||||
|
||||
if (enable_pg) {
|
||||
if (g->ops.pmu.pmu_pg_engines_feature_list != NULL &&
|
||||
g->ops.pmu.pmu_pg_engines_feature_list(g,
|
||||
@@ -127,7 +131,7 @@ int nvgpu_pmu_pg_global_enable(struct gk20a *g, bool enable_pg)
|
||||
status = g->ops.pmu.pmu_lpwr_enable_pg(g,
|
||||
true);
|
||||
}
|
||||
} else if (g->support_pmu && g->can_elpg) {
|
||||
} else if (g->can_elpg) {
|
||||
status = nvgpu_pmu_enable_elpg(g);
|
||||
}
|
||||
} else {
|
||||
@@ -139,7 +143,7 @@ int nvgpu_pmu_pg_global_enable(struct gk20a *g, bool enable_pg)
|
||||
status = g->ops.pmu.pmu_lpwr_disable_pg(g,
|
||||
true);
|
||||
}
|
||||
} else if (g->support_pmu && g->can_elpg) {
|
||||
} else if (g->can_elpg) {
|
||||
status = nvgpu_pmu_disable_elpg(g);
|
||||
}
|
||||
}
|
||||
@@ -201,7 +205,7 @@ int nvgpu_pmu_enable_elpg(struct gk20a *g)
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
if (!g->support_pmu) {
|
||||
if (!g->support_ls_pmu) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -269,12 +273,12 @@ int nvgpu_pmu_disable_elpg(struct gk20a *g)
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
if (g->ops.pmu.pmu_pg_supported_engines_list != NULL) {
|
||||
pg_engine_id_list = g->ops.pmu.pmu_pg_supported_engines_list(g);
|
||||
if (!g->support_ls_pmu) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!g->support_pmu) {
|
||||
return ret;
|
||||
if (g->ops.pmu.pmu_pg_supported_engines_list != NULL) {
|
||||
pg_engine_id_list = g->ops.pmu.pmu_pg_supported_engines_list(g);
|
||||
}
|
||||
|
||||
nvgpu_mutex_acquire(&pmu->elpg_mutex);
|
||||
|
||||
@@ -931,7 +931,7 @@ void gk20a_fifo_reset_engine(struct gk20a *g, u32 engine_id)
|
||||
}
|
||||
|
||||
if (engine_enum == NVGPU_ENGINE_GR_GK20A) {
|
||||
if (g->support_pmu && g->can_elpg) {
|
||||
if (g->can_elpg) {
|
||||
if (nvgpu_pmu_disable_elpg(g) != 0) {
|
||||
nvgpu_err(g, "failed to set disable elpg");
|
||||
}
|
||||
@@ -961,7 +961,7 @@ void gk20a_fifo_reset_engine(struct gk20a *g, u32 engine_id)
|
||||
"HALT gr pipe not supported and "
|
||||
"gr cannot be reset without halting gr pipe");
|
||||
}
|
||||
if (g->support_pmu && g->can_elpg) {
|
||||
if (g->can_elpg) {
|
||||
nvgpu_pmu_enable_elpg(g);
|
||||
}
|
||||
}
|
||||
@@ -1123,7 +1123,7 @@ static bool gk20a_fifo_handle_mmu_fault_locked(
|
||||
g->fifo.deferred_reset_pending = false;
|
||||
|
||||
/* Disable power management */
|
||||
if (g->support_pmu && g->can_elpg) {
|
||||
if (g->can_elpg) {
|
||||
if (nvgpu_pmu_disable_elpg(g) != 0) {
|
||||
nvgpu_err(g, "failed to set disable elpg");
|
||||
}
|
||||
@@ -1327,7 +1327,7 @@ static bool gk20a_fifo_handle_mmu_fault_locked(
|
||||
gr_gpfifo_ctl_semaphore_access_enabled_f());
|
||||
|
||||
/* It is safe to enable ELPG again. */
|
||||
if (g->support_pmu && g->can_elpg) {
|
||||
if (g->can_elpg) {
|
||||
nvgpu_pmu_enable_elpg(g);
|
||||
}
|
||||
|
||||
@@ -2265,7 +2265,7 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, struct channel_gk20a *ch)
|
||||
struct fifo_gk20a *f = &g->fifo;
|
||||
int ret = 0;
|
||||
u32 token = PMU_INVALID_MUTEX_OWNER_ID;
|
||||
int mutex_ret = -EINVAL;
|
||||
int mutex_ret = 0;
|
||||
u32 i;
|
||||
|
||||
nvgpu_log_fn(g, "chid: %d", ch->chid);
|
||||
@@ -2275,10 +2275,8 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, struct channel_gk20a *ch)
|
||||
nvgpu_mutex_acquire(&f->runlist_info[i].runlist_lock);
|
||||
}
|
||||
|
||||
if (g->ops.pmu.is_pmu_supported(g)) {
|
||||
mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu,
|
||||
PMU_MUTEX_ID_FIFO, &token);
|
||||
}
|
||||
mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu,
|
||||
PMU_MUTEX_ID_FIFO, &token);
|
||||
|
||||
ret = __locked_fifo_preempt(g, ch->chid, false);
|
||||
|
||||
@@ -2310,7 +2308,7 @@ int gk20a_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg)
|
||||
struct fifo_gk20a *f = &g->fifo;
|
||||
int ret = 0;
|
||||
u32 token = PMU_INVALID_MUTEX_OWNER_ID;
|
||||
int mutex_ret = -EINVAL;
|
||||
int mutex_ret = 0;
|
||||
u32 i;
|
||||
|
||||
nvgpu_log_fn(g, "tsgid: %d", tsg->tsgid);
|
||||
@@ -2320,10 +2318,8 @@ int gk20a_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg)
|
||||
nvgpu_mutex_acquire(&f->runlist_info[i].runlist_lock);
|
||||
}
|
||||
|
||||
if (g->ops.pmu.is_pmu_supported(g)) {
|
||||
mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu,
|
||||
PMU_MUTEX_ID_FIFO, &token);
|
||||
}
|
||||
mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu,
|
||||
PMU_MUTEX_ID_FIFO, &token);
|
||||
|
||||
ret = __locked_fifo_preempt(g, tsg->tsgid, true);
|
||||
|
||||
@@ -2398,7 +2394,7 @@ int gk20a_fifo_disable_engine_activity(struct gk20a *g,
|
||||
u32 pbdma_chid = FIFO_INVAL_CHANNEL_ID;
|
||||
u32 engine_chid = FIFO_INVAL_CHANNEL_ID;
|
||||
u32 token = PMU_INVALID_MUTEX_OWNER_ID;
|
||||
int mutex_ret = -EINVAL;
|
||||
int mutex_ret = 0;
|
||||
struct channel_gk20a *ch = NULL;
|
||||
int err = 0;
|
||||
struct nvgpu_engine_status_info engine_status;
|
||||
@@ -2412,10 +2408,8 @@ int gk20a_fifo_disable_engine_activity(struct gk20a *g,
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
if (g->ops.pmu.is_pmu_supported(g)) {
|
||||
mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu,
|
||||
PMU_MUTEX_ID_FIFO, &token);
|
||||
}
|
||||
mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu,
|
||||
PMU_MUTEX_ID_FIFO, &token);
|
||||
|
||||
gk20a_fifo_set_runlist_state(g, BIT32(eng_info->runlist_id),
|
||||
RUNLIST_DISABLED);
|
||||
|
||||
@@ -402,7 +402,7 @@ u32 gk20a_gr_get_sm_no_lock_down_hww_global_esr_mask(struct gk20a *g);
|
||||
#define gr_gk20a_elpg_protected_call(g, func) \
|
||||
({ \
|
||||
int err = 0; \
|
||||
if (((g)->support_pmu) && ((g)->elpg_enabled)) {\
|
||||
if ((g)->elpg_enabled) {\
|
||||
err = nvgpu_pmu_disable_elpg(g); \
|
||||
if (err != 0) {\
|
||||
nvgpu_pmu_enable_elpg(g); \
|
||||
@@ -410,7 +410,7 @@ u32 gk20a_gr_get_sm_no_lock_down_hww_global_esr_mask(struct gk20a *g);
|
||||
} \
|
||||
if (err == 0) { \
|
||||
err = (func); \
|
||||
if (((g)->support_pmu) && ((g)->elpg_enabled)) {\
|
||||
if ((g)->elpg_enabled) {\
|
||||
nvgpu_pmu_enable_elpg(g); \
|
||||
} \
|
||||
} \
|
||||
|
||||
@@ -637,6 +637,7 @@ static const struct gpu_ops gm20b_ops = {
|
||||
.idle_slowdown_disable = gm20b_therm_idle_slowdown_disable,
|
||||
},
|
||||
.pmu = {
|
||||
.is_pmu_supported = gm20b_is_pmu_supported,
|
||||
.falcon_base_addr = gk20a_pmu_falcon_base_addr,
|
||||
.pmu_setup_elpg = gm20b_pmu_setup_elpg,
|
||||
.pmu_get_queue_head = pwr_pmu_queue_head_r,
|
||||
@@ -895,7 +896,6 @@ int gm20b_init_hal(struct gk20a *g)
|
||||
/* priv security dependent ops */
|
||||
if (nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) {
|
||||
/* Add in ops from gm20b acr */
|
||||
gops->pmu.is_pmu_supported = gm20b_is_pmu_supported;
|
||||
gops->pmu.update_lspmu_cmdline_args =
|
||||
gm20b_update_lspmu_cmdline_args;
|
||||
gops->pmu.setup_apertures = gm20b_pmu_setup_apertures;
|
||||
@@ -907,7 +907,6 @@ int gm20b_init_hal(struct gk20a *g)
|
||||
gops->gr.load_ctxsw_ucode = gr_gm20b_load_ctxsw_ucode;
|
||||
} else {
|
||||
/* Inherit from gk20a */
|
||||
gops->pmu.is_pmu_supported = gk20a_is_pmu_supported;
|
||||
gops->pmu.pmu_setup_hw_and_bootstrap =
|
||||
gm20b_ns_pmu_setup_hw_and_bootstrap;
|
||||
gops->pmu.pmu_nsbootstrap = pmu_bootstrap;
|
||||
|
||||
@@ -713,6 +713,7 @@ static const struct gpu_ops gp10b_ops = {
|
||||
.elcg_init_idle_filters = gp10b_elcg_init_idle_filters,
|
||||
},
|
||||
.pmu = {
|
||||
.is_pmu_supported = gp10b_is_pmu_supported,
|
||||
.falcon_base_addr = gk20a_pmu_falcon_base_addr,
|
||||
.pmu_setup_elpg = gp10b_pmu_setup_elpg,
|
||||
.pmu_get_queue_head = pwr_pmu_queue_head_r,
|
||||
@@ -968,7 +969,7 @@ int gp10b_init_hal(struct gk20a *g)
|
||||
/* priv security dependent ops */
|
||||
if (nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) {
|
||||
/* Add in ops from gm20b acr */
|
||||
gops->pmu.is_pmu_supported = gm20b_is_pmu_supported,
|
||||
|
||||
gops->pmu.update_lspmu_cmdline_args =
|
||||
gm20b_update_lspmu_cmdline_args;
|
||||
gops->pmu.setup_apertures = gm20b_pmu_setup_apertures;
|
||||
@@ -980,7 +981,6 @@ int gp10b_init_hal(struct gk20a *g)
|
||||
gops->gr.load_ctxsw_ucode = gr_gm20b_load_ctxsw_ucode;
|
||||
} else {
|
||||
/* Inherit from gk20a */
|
||||
gops->pmu.is_pmu_supported = gk20a_is_pmu_supported,
|
||||
gops->pmu.pmu_setup_hw_and_bootstrap =
|
||||
gm20b_ns_pmu_setup_hw_and_bootstrap;
|
||||
gops->pmu.pmu_nsbootstrap = pmu_bootstrap,
|
||||
|
||||
@@ -761,7 +761,7 @@ int gv11b_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg)
|
||||
struct fifo_gk20a *f = &g->fifo;
|
||||
int ret = 0;
|
||||
u32 token = PMU_INVALID_MUTEX_OWNER_ID;
|
||||
int mutex_ret = -EINVAL;
|
||||
int mutex_ret = 0;
|
||||
u32 runlist_id;
|
||||
|
||||
nvgpu_log_fn(g, "tsgid: %d", tsg->tsgid);
|
||||
@@ -777,10 +777,8 @@ int gv11b_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg)
|
||||
/* WAR for Bug 2065990 */
|
||||
gk20a_tsg_disable_sched(g, tsg);
|
||||
|
||||
if (g->ops.pmu.is_pmu_supported(g)) {
|
||||
mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu,
|
||||
mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu,
|
||||
PMU_MUTEX_ID_FIFO, &token);
|
||||
}
|
||||
|
||||
ret = __locked_fifo_preempt(g, tsg->tsgid, true);
|
||||
|
||||
@@ -814,16 +812,14 @@ static void gv11b_fifo_locked_preempt_runlists_rc(struct gk20a *g,
|
||||
u32 runlists_mask)
|
||||
{
|
||||
u32 token = PMU_INVALID_MUTEX_OWNER_ID;
|
||||
int mutex_ret = -EINVAL;
|
||||
int mutex_ret = 0;
|
||||
u32 rlid;
|
||||
|
||||
/* runlist_lock are locked by teardown and sched are disabled too */
|
||||
nvgpu_log_fn(g, "preempt runlists_mask:0x%08x", runlists_mask);
|
||||
|
||||
if (g->ops.pmu.is_pmu_supported(g)) {
|
||||
mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu,
|
||||
PMU_MUTEX_ID_FIFO, &token);
|
||||
}
|
||||
mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu,
|
||||
PMU_MUTEX_ID_FIFO, &token);
|
||||
|
||||
/* issue runlist preempt */
|
||||
gv11b_fifo_issue_runlist_preempt(g, runlists_mask);
|
||||
@@ -860,16 +856,14 @@ static void gv11b_fifo_locked_abort_runlist_active_tsgs(struct gk20a *g,
|
||||
u32 rlid;
|
||||
struct fifo_runlist_info_gk20a *runlist = NULL;
|
||||
u32 token = PMU_INVALID_MUTEX_OWNER_ID;
|
||||
int mutex_ret = -EINVAL;
|
||||
int mutex_ret = 0;
|
||||
int err;
|
||||
|
||||
nvgpu_err(g, "runlist id unknown, abort active tsgs in runlists");
|
||||
|
||||
/* runlist_lock are locked by teardown */
|
||||
if (g->ops.pmu.is_pmu_supported(g)) {
|
||||
mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu,
|
||||
PMU_MUTEX_ID_FIFO, &token);
|
||||
}
|
||||
mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu,
|
||||
PMU_MUTEX_ID_FIFO, &token);
|
||||
|
||||
for (rlid = 0; rlid < g->fifo.max_runlists;
|
||||
rlid++) {
|
||||
@@ -1053,7 +1047,7 @@ void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask,
|
||||
g->fifo.deferred_reset_pending = false;
|
||||
|
||||
/* Disable power management */
|
||||
if (g->support_pmu && g->elpg_enabled) {
|
||||
if (g->elpg_enabled) {
|
||||
if (nvgpu_pmu_disable_elpg(g) != 0) {
|
||||
nvgpu_err(g, "failed to set disable elpg");
|
||||
}
|
||||
@@ -1184,7 +1178,7 @@ void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask,
|
||||
gk20a_fifo_set_runlist_state(g, runlists_mask, RUNLIST_ENABLED);
|
||||
|
||||
/* It is safe to enable ELPG again. */
|
||||
if (g->support_pmu && g->elpg_enabled) {
|
||||
if (g->elpg_enabled) {
|
||||
if (nvgpu_pmu_enable_elpg(g) != 0) {
|
||||
nvgpu_err(g, "ELPG enable failed");
|
||||
}
|
||||
|
||||
@@ -1779,7 +1779,7 @@ struct gk20a {
|
||||
/* Debugfs knob for forcing syncpt support off in runtime. */
|
||||
u32 disable_syncpoints;
|
||||
|
||||
bool support_pmu;
|
||||
bool support_ls_pmu;
|
||||
|
||||
bool is_virtual;
|
||||
|
||||
|
||||
@@ -152,14 +152,6 @@ static void nvgpu_init_pm_vars(struct gk20a *g)
|
||||
nvgpu_platform_is_silicon(g) ? platform->enable_blcg : false;
|
||||
g->elcg_enabled =
|
||||
nvgpu_platform_is_silicon(g) ? platform->enable_elcg : false;
|
||||
g->elpg_enabled =
|
||||
nvgpu_platform_is_silicon(g) ? platform->enable_elpg : false;
|
||||
g->aelpg_enabled =
|
||||
nvgpu_platform_is_silicon(g) ? platform->enable_aelpg : false;
|
||||
g->mscg_enabled =
|
||||
nvgpu_platform_is_silicon(g) ? platform->enable_mscg : false;
|
||||
g->can_elpg =
|
||||
nvgpu_platform_is_silicon(g) ? platform->can_elpg_init : false;
|
||||
|
||||
nvgpu_set_enabled(g, NVGPU_GPU_CAN_ELCG,
|
||||
nvgpu_platform_is_silicon(g) ? platform->can_elcg : false);
|
||||
@@ -174,7 +166,7 @@ static void nvgpu_init_pm_vars(struct gk20a *g)
|
||||
g->has_cde = platform->has_cde;
|
||||
#endif
|
||||
g->ptimer_src_freq = platform->ptimer_src_freq;
|
||||
g->support_pmu = support_gk20a_pmu(dev_from_gk20a(g));
|
||||
|
||||
nvgpu_set_enabled(g, NVGPU_CAN_RAILGATE, platform->can_railgate_init);
|
||||
g->can_tpc_powergate = platform->can_tpc_powergate;
|
||||
|
||||
@@ -187,14 +179,28 @@ static void nvgpu_init_pm_vars(struct gk20a *g)
|
||||
g->railgate_delay = platform->railgate_delay_init;
|
||||
else
|
||||
g->railgate_delay = NVGPU_DEFAULT_RAILGATE_IDLE_TIMEOUT;
|
||||
nvgpu_set_enabled(g, NVGPU_PMU_PERFMON, platform->enable_perfmon);
|
||||
|
||||
/* set default values to aelpg parameters */
|
||||
g->pmu.aelpg_param[0] = APCTRL_SAMPLING_PERIOD_PG_DEFAULT_US;
|
||||
g->pmu.aelpg_param[1] = APCTRL_MINIMUM_IDLE_FILTER_DEFAULT_US;
|
||||
g->pmu.aelpg_param[2] = APCTRL_MINIMUM_TARGET_SAVING_DEFAULT_US;
|
||||
g->pmu.aelpg_param[3] = APCTRL_POWER_BREAKEVEN_DEFAULT_US;
|
||||
g->pmu.aelpg_param[4] = APCTRL_CYCLES_PER_SAMPLE_MAX_DEFAULT;
|
||||
g->support_ls_pmu = support_gk20a_pmu(dev_from_gk20a(g));
|
||||
|
||||
if (g->support_ls_pmu) {
|
||||
g->elpg_enabled =
|
||||
nvgpu_platform_is_silicon(g) ? platform->enable_elpg : false;
|
||||
g->aelpg_enabled =
|
||||
nvgpu_platform_is_silicon(g) ? platform->enable_aelpg : false;
|
||||
g->mscg_enabled =
|
||||
nvgpu_platform_is_silicon(g) ? platform->enable_mscg : false;
|
||||
g->can_elpg =
|
||||
nvgpu_platform_is_silicon(g) ? platform->can_elpg_init : false;
|
||||
|
||||
nvgpu_set_enabled(g, NVGPU_PMU_PERFMON, platform->enable_perfmon);
|
||||
|
||||
/* set default values to aelpg parameters */
|
||||
g->pmu.aelpg_param[0] = APCTRL_SAMPLING_PERIOD_PG_DEFAULT_US;
|
||||
g->pmu.aelpg_param[1] = APCTRL_MINIMUM_IDLE_FILTER_DEFAULT_US;
|
||||
g->pmu.aelpg_param[2] = APCTRL_MINIMUM_TARGET_SAVING_DEFAULT_US;
|
||||
g->pmu.aelpg_param[3] = APCTRL_POWER_BREAKEVEN_DEFAULT_US;
|
||||
g->pmu.aelpg_param[4] = APCTRL_CYCLES_PER_SAMPLE_MAX_DEFAULT;
|
||||
}
|
||||
|
||||
nvgpu_set_enabled(g, NVGPU_SUPPORT_ASPM, !platform->disable_aspm);
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user