mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 10:34:43 +03:00
gpu: nvgpu: Compile out PMU PG/LSFM code for safety
Compile out PMU PG & LSFM calls called from other unit when PMU RTOS support is disabled for safety build by setting NVGPU_LS_PMU build flag to 0 NVGPU JIRA-3418 Change-Id: I6a5089b37344697ffb0cc9ad301f4e7cf03f9f55 Signed-off-by: Mahantesh Kumbar <mkumbar@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2117770 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com> Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
cd6f926c1b
commit
b6dfba15fa
@@ -35,6 +35,7 @@
|
||||
#include "acr_wpr.h"
|
||||
#include "acr_priv.h"
|
||||
|
||||
#ifdef NVGPU_LS_PMU
|
||||
int nvgpu_acr_lsf_pmu_ucode_details_v0(struct gk20a *g, void *lsf_ucode_img)
|
||||
{
|
||||
struct lsf_ucode_desc *lsf_desc;
|
||||
@@ -69,6 +70,7 @@ int nvgpu_acr_lsf_pmu_ucode_details_v0(struct gk20a *g, void *lsf_ucode_img)
|
||||
exit:
|
||||
return err;
|
||||
}
|
||||
#endif
|
||||
|
||||
int nvgpu_acr_lsf_fecs_ucode_details_v0(struct gk20a *g, void *lsf_ucode_img)
|
||||
{
|
||||
@@ -555,8 +557,9 @@ static int gm20b_pmu_populate_loader_cfg(struct gk20a *g,
|
||||
|
||||
/* Update the argc/argv members*/
|
||||
ldr_cfg->argc = 1;
|
||||
#ifdef NVGPU_LS_PMU
|
||||
nvgpu_pmu_fw_get_cmd_line_args_offset(g, &ldr_cfg->argv);
|
||||
|
||||
#endif
|
||||
*p_bl_gen_desc_size = (u32)sizeof(struct loader_config);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -116,9 +116,10 @@ static u32 gm20b_acr_lsf_pmu(struct gk20a *g,
|
||||
lsf->falcon_dma_idx = GK20A_PMU_DMAIDX_UCODE;
|
||||
lsf->is_lazy_bootstrap = false;
|
||||
lsf->is_priv_load = false;
|
||||
#ifdef NVGPU_LS_PMU
|
||||
lsf->get_lsf_ucode_details = nvgpu_acr_lsf_pmu_ucode_details_v0;
|
||||
lsf->get_cmd_line_args_offset = nvgpu_pmu_fw_get_cmd_line_args_offset;
|
||||
|
||||
#endif
|
||||
return BIT32(lsf->falcon_id);
|
||||
}
|
||||
|
||||
|
||||
@@ -96,9 +96,8 @@ static u32 gv100_acr_lsf_pmu(struct gk20a *g,
|
||||
lsf->is_priv_load = false;
|
||||
#ifdef NVGPU_LS_PMU
|
||||
lsf->get_lsf_ucode_details = nvgpu_acr_lsf_pmu_ucode_details_v1;
|
||||
#endif
|
||||
lsf->get_cmd_line_args_offset = nvgpu_pmu_fw_get_cmd_line_args_offset;
|
||||
|
||||
#endif
|
||||
return BIT32(lsf->falcon_id);
|
||||
}
|
||||
|
||||
|
||||
@@ -82,6 +82,7 @@ void nvgpu_gr_falcon_remove_support(struct gk20a *g,
|
||||
|
||||
int nvgpu_gr_falcon_bind_fecs_elpg(struct gk20a *g)
|
||||
{
|
||||
#ifdef NVGPU_LS_PMU
|
||||
struct nvgpu_pmu *pmu = g->pmu;
|
||||
struct mm_gk20a *mm = &g->mm;
|
||||
struct vm_gk20a *vm = mm->pmu.vm;
|
||||
@@ -127,8 +128,10 @@ int nvgpu_gr_falcon_bind_fecs_elpg(struct gk20a *g)
|
||||
"fail to set pg buffer pmu va");
|
||||
return err;
|
||||
}
|
||||
|
||||
return err;
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
int nvgpu_gr_falcon_init_ctxsw(struct gk20a *g, struct nvgpu_gr_falcon *falcon)
|
||||
@@ -551,8 +554,10 @@ int nvgpu_gr_falcon_load_secure_ctxsw_ucode(struct gk20a *g,
|
||||
/* this must be recovery so bootstrap fecs and gpccs */
|
||||
if (!nvgpu_is_enabled(g, NVGPU_SEC_SECUREGPCCS)) {
|
||||
nvgpu_gr_falcon_load_gpccs_with_bootloader(g, falcon);
|
||||
#ifdef NVGPU_LS_PMU
|
||||
err = nvgpu_pmu_lsfm_bootstrap_ls_falcon(g, g->pmu,
|
||||
g->pmu->lsfm, BIT32(FALCON_ID_FECS));
|
||||
#endif
|
||||
} else {
|
||||
/* bind WPR VA inst block */
|
||||
nvgpu_gr_falcon_bind_instblk(g, falcon);
|
||||
@@ -564,12 +569,15 @@ int nvgpu_gr_falcon_load_secure_ctxsw_ucode(struct gk20a *g,
|
||||
&g->sec2, FALCON_ID_GPCCS);
|
||||
} else
|
||||
#endif
|
||||
#ifdef NVGPU_LS_PMU
|
||||
if (g->support_ls_pmu) {
|
||||
err = nvgpu_pmu_lsfm_bootstrap_ls_falcon(g,
|
||||
g->pmu, g->pmu->lsfm,
|
||||
BIT32(FALCON_ID_FECS) |
|
||||
BIT32(FALCON_ID_GPCCS));
|
||||
} else {
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
err = nvgpu_acr_bootstrap_hs_acr(g, g->acr);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g,
|
||||
@@ -607,11 +615,14 @@ int nvgpu_gr_falcon_load_secure_ctxsw_ucode(struct gk20a *g,
|
||||
&g->sec2, FALCON_ID_GPCCS);
|
||||
} else
|
||||
#endif
|
||||
#ifdef NVGPU_LS_PMU
|
||||
if (g->support_ls_pmu) {
|
||||
err = nvgpu_pmu_lsfm_bootstrap_ls_falcon(g,
|
||||
g->pmu, g->pmu->lsfm,
|
||||
falcon_id_mask);
|
||||
} else {
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
/* GR falcons bootstrapped by ACR */
|
||||
err = 0;
|
||||
}
|
||||
|
||||
@@ -527,8 +527,9 @@ restore_fe_go_idle:
|
||||
}
|
||||
|
||||
golden_image->ready = true;
|
||||
|
||||
#ifdef NVGPU_LS_PMU
|
||||
nvgpu_pmu_set_golden_image_initialized(g, true);
|
||||
#endif
|
||||
g->ops.gr.falcon.set_current_ctx_invalid(g);
|
||||
|
||||
clean_up:
|
||||
@@ -726,8 +727,9 @@ void nvgpu_gr_obj_ctx_deinit(struct gk20a *g,
|
||||
golden_image->local_golden_image);
|
||||
golden_image->local_golden_image = NULL;
|
||||
}
|
||||
|
||||
#ifdef NVGPU_LS_PMU
|
||||
nvgpu_pmu_set_golden_image_initialized(g, false);
|
||||
#endif
|
||||
golden_image->ready = false;
|
||||
nvgpu_kfree(g, golden_image);
|
||||
}
|
||||
|
||||
@@ -37,7 +37,9 @@ static int nvgpu_gr_zbc_add(struct gk20a *g, struct nvgpu_gr_zbc *zbc,
|
||||
u32 i;
|
||||
int ret = -ENOSPC;
|
||||
bool added = false;
|
||||
#ifdef NVGPU_LS_PMU
|
||||
u32 entries;
|
||||
#endif
|
||||
|
||||
/* no endian swap ? */
|
||||
|
||||
@@ -130,6 +132,7 @@ static int nvgpu_gr_zbc_add(struct gk20a *g, struct nvgpu_gr_zbc *zbc,
|
||||
goto err_mutex;
|
||||
}
|
||||
|
||||
#ifdef NVGPU_LS_PMU
|
||||
if (!added && ret == 0) {
|
||||
/* update zbc for elpg only when new entry is added */
|
||||
entries = max(zbc->max_used_color_index,
|
||||
@@ -138,6 +141,7 @@ static int nvgpu_gr_zbc_add(struct gk20a *g, struct nvgpu_gr_zbc *zbc,
|
||||
nvgpu_pmu_save_zbc(g, entries);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
err_mutex:
|
||||
nvgpu_mutex_release(&zbc->zbc_lock);
|
||||
|
||||
@@ -40,7 +40,7 @@ bool nvgpu_pg_elpg_is_enabled(struct gk20a *g)
|
||||
int nvgpu_pg_elpg_enable(struct gk20a *g)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
#ifdef NVGPU_LS_PMU
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
if (!g->can_elpg) {
|
||||
@@ -54,13 +54,14 @@ int nvgpu_pg_elpg_enable(struct gk20a *g)
|
||||
err = nvgpu_pmu_pg_global_enable(g, true);
|
||||
}
|
||||
nvgpu_mutex_release(&g->cg_pg_lock);
|
||||
#endif
|
||||
return err;
|
||||
}
|
||||
|
||||
int nvgpu_pg_elpg_disable(struct gk20a *g)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
#ifdef NVGPU_LS_PMU
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
if (!g->can_elpg) {
|
||||
@@ -74,6 +75,7 @@ int nvgpu_pg_elpg_disable(struct gk20a *g)
|
||||
err = nvgpu_pmu_pg_global_enable(g, false);
|
||||
}
|
||||
nvgpu_mutex_release(&g->cg_pg_lock);
|
||||
#endif
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -105,8 +107,9 @@ int nvgpu_pg_elpg_set_elpg_enabled(struct gk20a *g, bool enable)
|
||||
if (!change_mode) {
|
||||
goto done;
|
||||
}
|
||||
|
||||
#ifdef NVGPU_LS_PMU
|
||||
err = nvgpu_pmu_pg_global_enable(g, enable);
|
||||
#endif
|
||||
done:
|
||||
nvgpu_mutex_release(&g->cg_pg_lock);
|
||||
return err;
|
||||
|
||||
Reference in New Issue
Block a user