diff --git a/drivers/gpu/nvgpu/common/acr/acr_blob_construct_v0.c b/drivers/gpu/nvgpu/common/acr/acr_blob_construct_v0.c index e550536b4..2e01c433a 100644 --- a/drivers/gpu/nvgpu/common/acr/acr_blob_construct_v0.c +++ b/drivers/gpu/nvgpu/common/acr/acr_blob_construct_v0.c @@ -50,9 +50,9 @@ int nvgpu_acr_lsf_pmu_ucode_details_v0(struct gk20a *g, void *lsf_ucode_img) goto exit; } - fw_sig = nvgpu_pmu_fw_sig_desc(g, &g->pmu); - fw_desc = nvgpu_pmu_fw_desc_desc(g, &g->pmu); - fw_image = nvgpu_pmu_fw_image_desc(g, &g->pmu); + fw_sig = nvgpu_pmu_fw_sig_desc(g, g->pmu); + fw_desc = nvgpu_pmu_fw_desc_desc(g, g->pmu); + fw_image = nvgpu_pmu_fw_image_desc(g, g->pmu); nvgpu_memcpy((u8 *)lsf_desc, (u8 *)fw_sig->data, min_t(size_t, sizeof(*lsf_desc), fw_sig->size)); diff --git a/drivers/gpu/nvgpu/common/acr/acr_blob_construct_v1.c b/drivers/gpu/nvgpu/common/acr/acr_blob_construct_v1.c index a0878d94c..9f6d88325 100644 --- a/drivers/gpu/nvgpu/common/acr/acr_blob_construct_v1.c +++ b/drivers/gpu/nvgpu/common/acr/acr_blob_construct_v1.c @@ -55,9 +55,9 @@ int nvgpu_acr_lsf_pmu_ucode_details_v1(struct gk20a *g, void *lsf_ucode_img) goto exit; } - fw_sig = nvgpu_pmu_fw_sig_desc(g, &g->pmu); - fw_desc = nvgpu_pmu_fw_desc_desc(g, &g->pmu); - fw_image = nvgpu_pmu_fw_image_desc(g, &g->pmu); + fw_sig = nvgpu_pmu_fw_sig_desc(g, g->pmu); + fw_desc = nvgpu_pmu_fw_desc_desc(g, g->pmu); + fw_image = nvgpu_pmu_fw_image_desc(g, g->pmu); nvgpu_memcpy((u8 *)lsf_desc, (u8 *)fw_sig->data, min_t(size_t, sizeof(*lsf_desc), fw_sig->size)); diff --git a/drivers/gpu/nvgpu/common/acr/acr_sw_gm20b.c b/drivers/gpu/nvgpu/common/acr/acr_sw_gm20b.c index e9e745aa3..27bd1eeca 100644 --- a/drivers/gpu/nvgpu/common/acr/acr_sw_gm20b.c +++ b/drivers/gpu/nvgpu/common/acr/acr_sw_gm20b.c @@ -165,7 +165,7 @@ static void gm20b_acr_default_sw_init(struct gk20a *g, struct hs_acr *hs_acr) hs_acr->bl_dmem_desc_size = (u32)sizeof(struct flcn_bl_dmem_desc); /* set on which falcon ACR need to execute*/ - hs_acr->acr_flcn = &g->pmu.flcn; + hs_acr->acr_flcn = g->pmu->flcn; hs_acr->acr_flcn_setup_boot_config = g->ops.pmu.flcn_setup_boot_config; hs_acr->acr_engine_bus_err_status = diff --git a/drivers/gpu/nvgpu/common/acr/acr_sw_gv11b.c b/drivers/gpu/nvgpu/common/acr/acr_sw_gv11b.c index 28988506c..4f425337f 100644 --- a/drivers/gpu/nvgpu/common/acr/acr_sw_gv11b.c +++ b/drivers/gpu/nvgpu/common/acr/acr_sw_gv11b.c @@ -161,7 +161,7 @@ static void gv11b_acr_default_sw_init(struct gk20a *g, struct hs_acr *hs_acr) hs_acr->ptr_bl_dmem_desc = &hs_acr->bl_dmem_desc_v1; hs_acr->bl_dmem_desc_size = (u32)sizeof(struct flcn_bl_dmem_desc_v1); - hs_acr->acr_flcn = &g->pmu.flcn; + hs_acr->acr_flcn = g->pmu->flcn; hs_acr->acr_flcn_setup_boot_config = g->ops.pmu.flcn_setup_boot_config; hs_acr->report_acr_engine_bus_err_status = diff --git a/drivers/gpu/nvgpu/common/clk_arb/clk_arb.c b/drivers/gpu/nvgpu/common/clk_arb/clk_arb.c index 0bfd94969..760af2ee0 100644 --- a/drivers/gpu/nvgpu/common/clk_arb/clk_arb.c +++ b/drivers/gpu/nvgpu/common/clk_arb/clk_arb.c @@ -233,7 +233,7 @@ static void nvgpu_clk_arb_run_vf_table_cb(struct nvgpu_clk_arb *arb) int err; /* get latest vf curve from pmu */ - err = g->pmu.clk_pmu->nvgpu_clk_vf_point_cache(g); + err = g->pmu->clk_pmu->nvgpu_clk_vf_point_cache(g); if (err != 0) { nvgpu_err(g, "failed to cache VF table"); nvgpu_clk_arb_set_global_alarm(g, diff --git a/drivers/gpu/nvgpu/common/clk_arb/clk_arb_gv100.c b/drivers/gpu/nvgpu/common/clk_arb/clk_arb_gv100.c index 843e39c9b..720829313 100644 --- a/drivers/gpu/nvgpu/common/clk_arb/clk_arb_gv100.c +++ b/drivers/gpu/nvgpu/common/clk_arb/clk_arb_gv100.c @@ -59,7 +59,7 @@ int gv100_get_arbiter_clk_range(struct gk20a *g, u32 api_domain, { u32 clkwhich; struct clk_set_info *p0_info; - struct nvgpu_avfsfllobjs *pfllobjs = g->pmu.clk_pmu->avfs_fllobjs; + struct nvgpu_avfsfllobjs *pfllobjs = g->pmu->clk_pmu->avfs_fllobjs; u16 limit_min_mhz; bool error_status = false; diff --git a/drivers/gpu/nvgpu/common/falcon/falcon.c b/drivers/gpu/nvgpu/common/falcon/falcon.c index f4f178d8c..f9b9df606 100644 --- a/drivers/gpu/nvgpu/common/falcon/falcon.c +++ b/drivers/gpu/nvgpu/common/falcon/falcon.c @@ -646,7 +646,7 @@ static struct nvgpu_falcon *falcon_get_instance(struct gk20a *g, u32 flcn_id) switch (flcn_id) { case FALCON_ID_PMU: - flcn = &g->pmu.flcn; + flcn = &g->pmu_flcn; break; case FALCON_ID_SEC2: flcn = &g->sec2.flcn; diff --git a/drivers/gpu/nvgpu/common/fifo/engines.c b/drivers/gpu/nvgpu/common/fifo/engines.c index 2da9747ac..9fc69f240 100644 --- a/drivers/gpu/nvgpu/common/fifo/engines.c +++ b/drivers/gpu/nvgpu/common/fifo/engines.c @@ -298,7 +298,7 @@ int nvgpu_engine_disable_activity(struct gk20a *g, } if (g->ops.pmu.is_pmu_supported(g)) { - mutex_ret = nvgpu_pmu_lock_acquire(g, &g->pmu, + mutex_ret = nvgpu_pmu_lock_acquire(g, g->pmu, PMU_MUTEX_ID_FIFO, &token); } @@ -351,7 +351,10 @@ int nvgpu_engine_disable_activity(struct gk20a *g, clean_up: if (mutex_ret == 0) { - nvgpu_pmu_lock_release(g, &g->pmu, PMU_MUTEX_ID_FIFO, &token); + if (nvgpu_pmu_lock_release(g, g->pmu, + PMU_MUTEX_ID_FIFO, &token) != 0){ + nvgpu_err(g, "failed to release PMU lock"); + } } if (err != 0) { diff --git a/drivers/gpu/nvgpu/common/fifo/runlist.c b/drivers/gpu/nvgpu/common/fifo/runlist.c index 4eac0c8b6..56b217c75 100644 --- a/drivers/gpu/nvgpu/common/fifo/runlist.c +++ b/drivers/gpu/nvgpu/common/fifo/runlist.c @@ -461,7 +461,7 @@ int nvgpu_runlist_reschedule(struct nvgpu_channel *ch, bool preempt_next, } mutex_ret = nvgpu_pmu_lock_acquire( - g, &g->pmu, PMU_MUTEX_ID_FIFO, &token); + g, g->pmu, PMU_MUTEX_ID_FIFO, &token); g->ops.runlist.hw_submit( @@ -480,7 +480,7 @@ int nvgpu_runlist_reschedule(struct nvgpu_channel *ch, bool preempt_next, } if (mutex_ret == 0) { - if (nvgpu_pmu_lock_release(g, &g->pmu, + if (nvgpu_pmu_lock_release(g, g->pmu, PMU_MUTEX_ID_FIFO, &token) != 0) { nvgpu_err(g, "failed to release PMU lock"); } @@ -510,14 +510,14 @@ static int nvgpu_runlist_update(struct gk20a *g, u32 runlist_id, nvgpu_mutex_acquire(&runlist->runlist_lock); - mutex_ret = nvgpu_pmu_lock_acquire(g, &g->pmu, + mutex_ret = nvgpu_pmu_lock_acquire(g, g->pmu, PMU_MUTEX_ID_FIFO, &token); ret = nvgpu_runlist_update_locked(g, runlist_id, ch, add, wait_for_finish); if (mutex_ret == 0) { - if (nvgpu_pmu_lock_release(g, &g->pmu, + if (nvgpu_pmu_lock_release(g, g->pmu, PMU_MUTEX_ID_FIFO, &token) != 0) { nvgpu_err(g, "failed to release PMU lock"); } @@ -608,13 +608,13 @@ void nvgpu_fifo_runlist_set_state(struct gk20a *g, u32 runlists_mask, runlists_mask, runlist_state); - mutex_ret = nvgpu_pmu_lock_acquire(g, &g->pmu, + mutex_ret = nvgpu_pmu_lock_acquire(g, g->pmu, PMU_MUTEX_ID_FIFO, &token); g->ops.runlist.write_state(g, runlists_mask, runlist_state); if (mutex_ret == 0) { - if (nvgpu_pmu_lock_release(g, &g->pmu, + if (nvgpu_pmu_lock_release(g, g->pmu, PMU_MUTEX_ID_FIFO, &token) != 0) { nvgpu_err(g, "failed to release PMU lock"); } diff --git a/drivers/gpu/nvgpu/common/gr/gr_falcon.c b/drivers/gpu/nvgpu/common/gr/gr_falcon.c index 2c1e60fb1..55ed515cf 100644 --- a/drivers/gpu/nvgpu/common/gr/gr_falcon.c +++ b/drivers/gpu/nvgpu/common/gr/gr_falcon.c @@ -80,7 +80,7 @@ void nvgpu_gr_falcon_remove_support(struct gk20a *g, int nvgpu_gr_falcon_bind_fecs_elpg(struct gk20a *g) { - struct nvgpu_pmu *pmu = &g->pmu; + struct nvgpu_pmu *pmu = g->pmu; struct mm_gk20a *mm = &g->mm; struct vm_gk20a *vm = mm->pmu.vm; int err = 0; @@ -549,8 +549,8 @@ int nvgpu_gr_falcon_load_secure_ctxsw_ucode(struct gk20a *g, /* this must be recovery so bootstrap fecs and gpccs */ if (!nvgpu_is_enabled(g, NVGPU_SEC_SECUREGPCCS)) { nvgpu_gr_falcon_load_gpccs_with_bootloader(g, falcon); - err = nvgpu_pmu_lsfm_bootstrap_ls_falcon(g, &g->pmu, - g->pmu.lsfm, BIT32(FALCON_ID_FECS)); + err = nvgpu_pmu_lsfm_bootstrap_ls_falcon(g, g->pmu, + g->pmu->lsfm, BIT32(FALCON_ID_FECS)); } else { /* bind WPR VA inst block */ nvgpu_gr_falcon_bind_instblk(g, falcon); @@ -561,7 +561,7 @@ int nvgpu_gr_falcon_load_secure_ctxsw_ucode(struct gk20a *g, &g->sec2, FALCON_ID_GPCCS); } else if (g->support_ls_pmu) { err = nvgpu_pmu_lsfm_bootstrap_ls_falcon(g, - &g->pmu, g->pmu.lsfm, + g->pmu, g->pmu->lsfm, BIT32(FALCON_ID_FECS) | BIT32(FALCON_ID_GPCCS)); } else { @@ -601,7 +601,7 @@ int nvgpu_gr_falcon_load_secure_ctxsw_ucode(struct gk20a *g, &g->sec2, FALCON_ID_GPCCS); } else if (g->support_ls_pmu) { err = nvgpu_pmu_lsfm_bootstrap_ls_falcon(g, - &g->pmu, g->pmu.lsfm, + g->pmu, g->pmu->lsfm, falcon_id_mask); } else { /* GR falcons bootstrapped by ACR */ diff --git a/drivers/gpu/nvgpu/common/gr/zbc.c b/drivers/gpu/nvgpu/common/gr/zbc.c index a05235323..1f6b81edd 100644 --- a/drivers/gpu/nvgpu/common/gr/zbc.c +++ b/drivers/gpu/nvgpu/common/gr/zbc.c @@ -25,6 +25,7 @@ #include #include #include +#include #include "zbc_priv.h" diff --git a/drivers/gpu/nvgpu/common/init/nvgpu_init.c b/drivers/gpu/nvgpu/common/init/nvgpu_init.c index 5c960d99c..86d2dde08 100644 --- a/drivers/gpu/nvgpu/common/init/nvgpu_init.c +++ b/drivers/gpu/nvgpu/common/init/nvgpu_init.c @@ -100,7 +100,7 @@ int gk20a_prepare_poweroff(struct gk20a *g) /* disable elpg before gr or fifo suspend */ if (g->support_ls_pmu) { - ret = nvgpu_pmu_destroy(g, &g->pmu); + ret = nvgpu_pmu_destroy(g, g->pmu); } if (nvgpu_is_enabled(g, NVGPU_SUPPORT_SEC2_RTOS)) { @@ -368,7 +368,7 @@ int gk20a_finalize_poweron(struct gk20a *g) } } - err = nvgpu_pmu_init(g, &g->pmu); + err = nvgpu_pmu_init(g, g->pmu); if (err != 0) { nvgpu_err(g, "failed to init gk20a pmu"); nvgpu_mutex_release(&g->tpc_pg_lock); @@ -414,8 +414,8 @@ int gk20a_finalize_poweron(struct gk20a *g) } if (nvgpu_is_enabled(g, NVGPU_PMU_PSTATE) && - (g->pmu.fw->ops.clk.clk_set_boot_clk != NULL)) { - err = g->pmu.fw->ops.clk.clk_set_boot_clk(g); + (g->pmu->fw->ops.clk.clk_set_boot_clk != NULL)) { + err = g->pmu->fw->ops.clk.clk_set_boot_clk(g); if (err != 0) { nvgpu_err(g, "failed to set boot clk"); goto done; diff --git a/drivers/gpu/nvgpu/common/pmu/boardobj/boardobjgrp.c b/drivers/gpu/nvgpu/common/pmu/boardobj/boardobjgrp.c index a532950dd..e767055ea 100644 --- a/drivers/gpu/nvgpu/common/pmu/boardobj/boardobjgrp.c +++ b/drivers/gpu/nvgpu/common/pmu/boardobj/boardobjgrp.c @@ -318,7 +318,6 @@ static int pmu_cmd_pmu_init_handle_impl(struct gk20a *g, nvgpu_log_info(g, " "); - if (is_pmu_cmd_id_valid(g, pboardobjgrp, pcmd) != 0) { goto pmu_cmd_pmu_init_handle_impl_exit; @@ -366,7 +365,6 @@ static int pmu_init_handle_impl(struct gk20a *g, } /* If the GRP_SET CMD has not been allocated, nothing left to do. */ - if ((is_pmu_cmd_id_valid(g, pboardobjgrp, &pboardobjgrp->pmu.set) != 0)|| (BOARDOBJGRP_IS_EMPTY(pboardobjgrp))) { @@ -388,7 +386,7 @@ static int pmu_cmd_send_rpc(struct gk20a *g, struct boardobjgrp_pmu_cmd *pcmd, bool copy_out) { - struct nvgpu_pmu *pmu = &g->pmu; + struct nvgpu_pmu *pmu = g->pmu; struct nv_pmu_rpc_struct_board_obj_grp_cmd rpc; int status = 0; @@ -426,7 +424,7 @@ static int pmu_cmd_send_rpc(struct gk20a *g, static int pmu_set_impl(struct gk20a *g, struct boardobjgrp *pboardobjgrp) { - struct nvgpu_pmu *pmu = &g->pmu; + struct nvgpu_pmu *pmu = g->pmu; int status = 0; struct boardobjgrp_pmu_cmd *pcmd = (struct boardobjgrp_pmu_cmd *)(&pboardobjgrp->pmu.set); @@ -489,7 +487,7 @@ static int pmu_get_status_impl(struct gk20a *g, struct boardobjgrp *pboardobjgrp, struct boardobjgrpmask *mask) { - struct nvgpu_pmu *pmu = &g->pmu; + struct nvgpu_pmu *pmu = g->pmu; int status = 0; struct boardobjgrp_pmu_cmd *pcmd = (struct boardobjgrp_pmu_cmd *)(&pboardobjgrp->pmu.getstatus); diff --git a/drivers/gpu/nvgpu/common/pmu/clk/clk.c b/drivers/gpu/nvgpu/common/pmu/clk/clk.c index 94f6f217f..907d91d68 100644 --- a/drivers/gpu/nvgpu/common/pmu/clk/clk.c +++ b/drivers/gpu/nvgpu/common/pmu/clk/clk.c @@ -57,7 +57,7 @@ int nvgpu_clk_domain_freq_to_volt(struct gk20a *g, u8 clkdomain_idx, u32 *pclkmhz, u32 *pvoltuv, u8 railidx) { struct nv_pmu_rpc_clk_domain_35_prog_freq_to_volt rpc; - struct nvgpu_pmu *pmu = &g->pmu; + struct nvgpu_pmu *pmu = g->pmu; int status = -EINVAL; (void)memset(&rpc, 0, @@ -168,7 +168,7 @@ static int clk_pmu_vf_inject(struct gk20a *g, goto done; } - pmu_wait_message_cond(&g->pmu, + pmu_wait_message_cond(g->pmu, nvgpu_get_poll_timeout(g), &handler.success, 1); @@ -186,31 +186,31 @@ int nvgpu_clk_set_fll_clks(struct gk20a *g, int status = -EINVAL; /*set regime ids */ - status = g->pmu.clk_pmu->get_regime_id(g, CTRL_CLK_DOMAIN_GPCCLK, + status = g->pmu->clk_pmu->get_regime_id(g, CTRL_CLK_DOMAIN_GPCCLK, &setfllclk->current_regime_id_gpc); if (status != 0) { goto done; } - setfllclk->target_regime_id_gpc = g->pmu.clk_pmu->find_regime_id(g, + setfllclk->target_regime_id_gpc = g->pmu->clk_pmu->find_regime_id(g, CTRL_CLK_DOMAIN_GPCCLK, setfllclk->gpc2clkmhz); - status = g->pmu.clk_pmu->get_regime_id(g, CTRL_CLK_DOMAIN_SYSCLK, + status = g->pmu->clk_pmu->get_regime_id(g, CTRL_CLK_DOMAIN_SYSCLK, &setfllclk->current_regime_id_sys); if (status != 0) { goto done; } - setfllclk->target_regime_id_sys = g->pmu.clk_pmu->find_regime_id(g, + setfllclk->target_regime_id_sys = g->pmu->clk_pmu->find_regime_id(g, CTRL_CLK_DOMAIN_SYSCLK, setfllclk->sys2clkmhz); - status = g->pmu.clk_pmu->get_regime_id(g, CTRL_CLK_DOMAIN_XBARCLK, + status = g->pmu->clk_pmu->get_regime_id(g, CTRL_CLK_DOMAIN_XBARCLK, &setfllclk->current_regime_id_xbar); if (status != 0) { goto done; } - setfllclk->target_regime_id_xbar = g->pmu.clk_pmu->find_regime_id(g, + setfllclk->target_regime_id_xbar = g->pmu->clk_pmu->find_regime_id(g, CTRL_CLK_DOMAIN_XBARCLK, setfllclk->xbar2clkmhz); status = clk_pmu_vf_inject(g, setfllclk); @@ -220,19 +220,19 @@ int nvgpu_clk_set_fll_clks(struct gk20a *g, } /* save regime ids */ - status = g->pmu.clk_pmu->set_regime_id(g, CTRL_CLK_DOMAIN_XBARCLK, + status = g->pmu->clk_pmu->set_regime_id(g, CTRL_CLK_DOMAIN_XBARCLK, setfllclk->target_regime_id_xbar); if (status != 0) { goto done; } - status = g->pmu.clk_pmu->set_regime_id(g, CTRL_CLK_DOMAIN_GPCCLK, + status = g->pmu->clk_pmu->set_regime_id(g, CTRL_CLK_DOMAIN_GPCCLK, setfllclk->target_regime_id_gpc); if (status != 0) { goto done; } - status = g->pmu.clk_pmu->set_regime_id(g, CTRL_CLK_DOMAIN_SYSCLK, + status = g->pmu->clk_pmu->set_regime_id(g, CTRL_CLK_DOMAIN_SYSCLK, setfllclk->target_regime_id_sys); if (status != 0) { goto done; @@ -244,23 +244,23 @@ done: int nvgpu_clk_get_fll_clks(struct gk20a *g, struct nvgpu_set_fll_clk *setfllclk) { - return g->pmu.clk_pmu->get_fll(g, setfllclk); + return g->pmu->clk_pmu->get_fll(g, setfllclk); } int nvgpu_clk_set_boot_fll_clk_tu10x(struct gk20a *g) { - return g->pmu.clk_pmu->set_boot_fll(g); + return g->pmu->clk_pmu->set_boot_fll(g); } int nvgpu_clk_init_pmupstate(struct gk20a *g) { /* If already allocated, do not re-allocate */ - if (g->pmu.clk_pmu != NULL) { + if (g->pmu->clk_pmu != NULL) { return 0; } - g->pmu.clk_pmu = nvgpu_kzalloc(g, sizeof(*g->pmu.clk_pmu)); - if (g->pmu.clk_pmu == NULL) { + g->pmu->clk_pmu = nvgpu_kzalloc(g, sizeof(*g->pmu->clk_pmu)); + if (g->pmu->clk_pmu == NULL) { return -ENOMEM; } @@ -269,14 +269,14 @@ int nvgpu_clk_init_pmupstate(struct gk20a *g) void nvgpu_clk_free_pmupstate(struct gk20a *g) { - nvgpu_kfree(g, g->pmu.clk_pmu); - g->pmu.clk_pmu = NULL; + nvgpu_kfree(g, g->pmu->clk_pmu); + g->pmu->clk_pmu = NULL; } int nvgpu_clk_set_req_fll_clk_ps35(struct gk20a *g, struct nvgpu_clk_slave_freq *vf_point) { - struct nvgpu_pmu *pmu = &g->pmu; + struct nvgpu_pmu *pmu = g->pmu; struct nv_pmu_rpc_perf_change_seq_queue_change rpc; struct ctrl_perf_change_seq_change_input change_input; int status = 0; @@ -287,7 +287,7 @@ int nvgpu_clk_set_req_fll_clk_ps35(struct gk20a *g, (void) memset(&change_input, 0, sizeof(struct ctrl_perf_change_seq_change_input)); - g->pmu.clk_pmu->set_p0_clks(g, &gpcclk_domain, &gpcclk_clkmhz, + g->pmu->clk_pmu->set_p0_clks(g, &gpcclk_domain, &gpcclk_clkmhz, vf_point, &change_input); change_input.pstate_index = 0U; diff --git a/drivers/gpu/nvgpu/common/pmu/clk/clk_domain.c b/drivers/gpu/nvgpu/common/pmu/clk/clk_domain.c index a3170eac4..9d16f3364 100644 --- a/drivers/gpu/nvgpu/common/pmu/clk/clk_domain.c +++ b/drivers/gpu/nvgpu/common/pmu/clk/clk_domain.c @@ -232,7 +232,7 @@ int nvgpu_clk_domain_sw_setup(struct gk20a *g) status = nvgpu_boardobjgrp_construct_e32(g, - &g->pmu.clk_pmu->clk_domainobjs->super); + &g->pmu->clk_pmu->clk_domainobjs->super); if (status != 0) { nvgpu_err(g, "error creating boardobjgrp for clk domain, status - 0x%x", @@ -240,8 +240,8 @@ int nvgpu_clk_domain_sw_setup(struct gk20a *g) goto done; } - pboardobjgrp = &g->pmu.clk_pmu->clk_domainobjs->super.super; - pclkdomainobjs = g->pmu.clk_pmu->clk_domainobjs; + pboardobjgrp = &g->pmu->clk_pmu->clk_domainobjs->super.super; + pclkdomainobjs = g->pmu->clk_pmu->clk_domainobjs; BOARDOBJGRP_PMU_CONSTRUCT(pboardobjgrp, CLK, CLK_DOMAIN); @@ -325,8 +325,8 @@ int nvgpu_clk_domain_sw_setup(struct gk20a *g) (struct clk_domain_35_slave *)(void *)pdomain; pdomain_master_35 = (struct clk_domain_35_master *) (void *) - (g->pmu.clk_pmu->clk_get_clk_domain( - (g->pmu.clk_pmu), + (g->pmu->clk_pmu->clk_get_clk_domain( + (g->pmu->clk_pmu), pdomain_slave_35->slave.master_idx)); pdomain_master_35->master.slave_idxs_mask |= BIT32(i); pdomain_slave_35->super.clk_pos = @@ -355,7 +355,7 @@ int nvgpu_clk_domain_pmu_setup(struct gk20a *g) nvgpu_log_info(g, " "); - pboardobjgrp = &g->pmu.clk_pmu->clk_domainobjs->super.super; + pboardobjgrp = &g->pmu->clk_pmu->clk_domainobjs->super.super; if (!pboardobjgrp->bconstructed) { return -EINVAL; @@ -732,7 +732,7 @@ static int clkdomaingetslaveclk(struct gk20a *g, } slaveidx = BOARDOBJ_GET_IDX(pdomain); p35master = (struct clk_domain_35_master *)(void *) - g->pmu.clk_pmu->clk_get_clk_domain(pclk, + g->pmu->clk_pmu->clk_get_clk_domain(pclk, ((struct clk_domain_35_slave *) (void *)pdomain)->slave.master_idx); pprog = CLK_CLK_PROG_GET(pclk, p35master-> @@ -782,7 +782,7 @@ static int clkdomainvfsearch(struct gk20a *g, slaveidx = BOARDOBJ_GET_IDX(pdomain); pslaveidx = &slaveidx; p3xmaster = (struct clk_domain_3x_master *)(void *) - g->pmu.clk_pmu->clk_get_clk_domain(pclk, + g->pmu->clk_pmu->clk_get_clk_domain(pclk, ((struct clk_domain_3x_slave *)(void *) pdomain)->master_idx); } @@ -908,7 +908,7 @@ static int clk_domain_pmudatainit_35_prog(struct gk20a *g, struct clk_domain_35_prog *pclk_domain_35_prog; struct clk_domain_3x_prog *pclk_domain_3x_prog; struct nv_pmu_clk_clk_domain_35_prog_boardobj_set *pset; - struct nvgpu_clk_domains *pdomains = g->pmu.clk_pmu->clk_domainobjs; + struct nvgpu_clk_domains *pdomains = g->pmu->clk_pmu->clk_domainobjs; nvgpu_log_info(g, " "); @@ -1377,7 +1377,7 @@ int nvgpu_clk_pmu_clk_domains_load(struct gk20a *g) goto done; } - (void) pmu_wait_message_cond(&g->pmu, nvgpu_get_poll_timeout(g), + (void) pmu_wait_message_cond(g->pmu, nvgpu_get_poll_timeout(g), &handler.success, 1); if (handler.success == 0U) { @@ -1395,7 +1395,7 @@ static int clk_get_fll_clks_per_clk_domain(struct gk20a *g, int status = -EINVAL; struct nvgpu_clk_domain *pdomain; u8 i; - struct nvgpu_clk_pmupstate *pclk = g->pmu.clk_pmu; + struct nvgpu_clk_pmupstate *pclk = g->pmu->clk_pmu; unsigned long bit; u16 clkmhz = 0; struct clk_domain_35_master *p35master; @@ -1422,7 +1422,7 @@ static int clk_get_fll_clks_per_clk_domain(struct gk20a *g, i = (u8)bit; p35slave = (struct clk_domain_35_slave *) (void *) - g->pmu.clk_pmu->clk_get_clk_domain(pclk, i); + g->pmu->clk_pmu->clk_get_clk_domain(pclk, i); clkmhz = 0; status = p35slave-> @@ -1459,7 +1459,7 @@ done: static int clk_set_boot_fll_clks_per_clk_domain(struct gk20a *g) { - struct nvgpu_pmu *pmu = &g->pmu; + struct nvgpu_pmu *pmu = g->pmu; struct nv_pmu_rpc_perf_change_seq_queue_change rpc; struct ctrl_perf_change_seq_change_input change_input; struct clk_set_info *p0_clk_set_info; @@ -1472,7 +1472,7 @@ static int clk_set_boot_fll_clks_per_clk_domain(struct gk20a *g) (void) memset(&change_input, 0, sizeof(struct ctrl_perf_change_seq_change_input)); - BOARDOBJGRP_FOR_EACH(&(g->pmu.clk_pmu->clk_domainobjs->super.super), + BOARDOBJGRP_FOR_EACH(&(g->pmu->clk_pmu->clk_domainobjs->super.super), struct nvgpu_clk_domain *, pclk_domain, i) { p0_clk_set_info = nvgpu_pmu_perf_pstate_get_clk_set_info(g, @@ -1554,7 +1554,7 @@ static void clk_set_p0_clk_per_domain(struct gk20a *g, u8 *gpcclk_domain, u16 max_ratio; u8 i = 0; - BOARDOBJGRP_FOR_EACH(&(g->pmu.clk_pmu->clk_domainobjs->super.super), + BOARDOBJGRP_FOR_EACH(&(g->pmu->clk_pmu->clk_domainobjs->super.super), struct nvgpu_clk_domain *, pclk_domain, i) { switch (pclk_domain->api_domain) { @@ -1693,25 +1693,25 @@ static void clk_set_p0_clk_per_domain(struct gk20a *g, u8 *gpcclk_domain, int nvgpu_clk_domain_init_pmupstate(struct gk20a *g) { /* If already allocated, do not re-allocate */ - if (g->pmu.clk_pmu->clk_domainobjs != NULL) { + if (g->pmu->clk_pmu->clk_domainobjs != NULL) { return 0; } - g->pmu.clk_pmu->clk_domainobjs = nvgpu_kzalloc(g, - sizeof(*g->pmu.clk_pmu->clk_domainobjs)); - if (g->pmu.clk_pmu->clk_domainobjs == NULL) { + g->pmu->clk_pmu->clk_domainobjs = nvgpu_kzalloc(g, + sizeof(*g->pmu->clk_pmu->clk_domainobjs)); + if (g->pmu->clk_pmu->clk_domainobjs == NULL) { return -ENOMEM; } - g->pmu.clk_pmu->get_fll = + g->pmu->clk_pmu->get_fll = clk_get_fll_clks_per_clk_domain; - g->pmu.clk_pmu->set_boot_fll = + g->pmu->clk_pmu->set_boot_fll = clk_set_boot_fll_clks_per_clk_domain; - g->pmu.clk_pmu->set_p0_clks = + g->pmu->clk_pmu->set_p0_clks = clk_set_p0_clk_per_domain; - g->pmu.clk_pmu->clk_get_clk_domain = + g->pmu->clk_pmu->clk_get_clk_domain = clk_get_clk_domain_from_index; - g->pmu.clk_pmu->clk_domain_clk_prog_link = + g->pmu->clk_pmu->clk_domain_clk_prog_link = clk_domain_clk_prog_link; return 0; @@ -1719,7 +1719,7 @@ int nvgpu_clk_domain_init_pmupstate(struct gk20a *g) void nvgpu_clk_domain_free_pmupstate(struct gk20a *g) { - nvgpu_kfree(g, g->pmu.clk_pmu->clk_domainobjs); - g->pmu.clk_pmu->clk_domainobjs = NULL; + nvgpu_kfree(g, g->pmu->clk_pmu->clk_domainobjs); + g->pmu->clk_pmu->clk_domainobjs = NULL; } diff --git a/drivers/gpu/nvgpu/common/pmu/clk/clk_fll.c b/drivers/gpu/nvgpu/common/pmu/clk/clk_fll.c index f50fb311e..3060b71f8 100644 --- a/drivers/gpu/nvgpu/common/pmu/clk/clk_fll.c +++ b/drivers/gpu/nvgpu/common/pmu/clk/clk_fll.c @@ -149,14 +149,14 @@ int nvgpu_clk_fll_sw_setup(struct gk20a *g) nvgpu_log_info(g, " "); status = nvgpu_boardobjgrp_construct_e32(g, - &g->pmu.clk_pmu->avfs_fllobjs->super); + &g->pmu->clk_pmu->avfs_fllobjs->super); if (status != 0) { nvgpu_err(g, "error creating boardobjgrp for fll, status - 0x%x", status); goto done; } - pfllobjs = g->pmu.clk_pmu->avfs_fllobjs; - pboardobjgrp = &(g->pmu.clk_pmu->avfs_fllobjs->super.super); + pfllobjs = g->pmu->clk_pmu->avfs_fllobjs; + pboardobjgrp = &(g->pmu->clk_pmu->avfs_fllobjs->super.super); BOARDOBJGRP_PMU_CONSTRUCT(pboardobjgrp, CLK, FLL_DEVICE); @@ -191,7 +191,7 @@ int nvgpu_clk_fll_sw_setup(struct gk20a *g) } status = BOARDOBJGRP_PMU_CMD_GRP_GET_STATUS_CONSTRUCT(g, - &g->pmu.clk_pmu->avfs_fllobjs->super.super, + &g->pmu->clk_pmu->avfs_fllobjs->super.super, clk, CLK, clk_fll_device, CLK_FLL_DEVICE); if (status != 0) { nvgpu_err(g, @@ -243,7 +243,7 @@ int nvgpu_clk_fll_pmu_setup(struct gk20a *g) nvgpu_log_info(g, " "); - pboardobjgrp = &g->pmu.clk_pmu->avfs_fllobjs->super.super; + pboardobjgrp = &g->pmu->clk_pmu->avfs_fllobjs->super.super; if (!pboardobjgrp->bconstructed) { return -EINVAL; @@ -270,7 +270,7 @@ static int devinit_get_fll_device_table(struct gk20a *g, struct nvgpu_vin_device *pvin_dev; u32 desctablesize; u32 vbios_domain = NV_PERF_DOMAIN_4X_CLOCK_DOMAIN_SKIP; - struct nvgpu_avfsvinobjs *pvinobjs = g->pmu.clk_pmu->avfs_vinobjs; + struct nvgpu_avfsvinobjs *pvinobjs = g->pmu->clk_pmu->avfs_vinobjs; nvgpu_log_info(g, " "); @@ -320,7 +320,7 @@ static int devinit_get_fll_device_table(struct gk20a *g, if ((u8)fll_desc_table_entry.vin_idx_logic != CTRL_CLK_VIN_ID_UNDEFINED) { - pvin_dev = g->pmu.clk_pmu->clk_get_vin(pvinobjs, + pvin_dev = g->pmu->clk_pmu->clk_get_vin(pvinobjs, (u8)fll_desc_table_entry.vin_idx_logic); if (pvin_dev == NULL) { return -EINVAL; @@ -339,7 +339,7 @@ static int devinit_get_fll_device_table(struct gk20a *g, if ((u8)fll_desc_table_entry.vin_idx_sram != CTRL_CLK_VIN_ID_UNDEFINED) { - pvin_dev = g->pmu.clk_pmu->clk_get_vin(pvinobjs, + pvin_dev = g->pmu->clk_pmu->clk_get_vin(pvinobjs, (u8)fll_desc_table_entry.vin_idx_sram); if (pvin_dev == NULL) { return -EINVAL; @@ -547,7 +547,7 @@ static u8 find_regime_id(struct gk20a *g, u32 domain, u16 clkmhz) { struct fll_device *pflldev; u8 j; - struct nvgpu_clk_pmupstate *pclk = g->pmu.clk_pmu; + struct nvgpu_clk_pmupstate *pclk = g->pmu->clk_pmu; BOARDOBJGRP_FOR_EACH(&(pclk->avfs_fllobjs->super.super), struct fll_device *, pflldev, j) { @@ -567,7 +567,7 @@ static int set_regime_id(struct gk20a *g, u32 domain, u8 regimeid) { struct fll_device *pflldev; u8 j; - struct nvgpu_clk_pmupstate *pclk = g->pmu.clk_pmu; + struct nvgpu_clk_pmupstate *pclk = g->pmu->clk_pmu; BOARDOBJGRP_FOR_EACH(&(pclk->avfs_fllobjs->super.super), struct fll_device *, pflldev, j) { @@ -583,7 +583,7 @@ static int get_regime_id(struct gk20a *g, u32 domain, u8 *regimeid) { struct fll_device *pflldev; u8 j; - struct nvgpu_clk_pmupstate *pclk = g->pmu.clk_pmu; + struct nvgpu_clk_pmupstate *pclk = g->pmu->clk_pmu; BOARDOBJGRP_FOR_EACH(&(pclk->avfs_fllobjs->super.super), struct fll_device *, pflldev, j) { @@ -598,24 +598,24 @@ static int get_regime_id(struct gk20a *g, u32 domain, u8 *regimeid) int nvgpu_clk_fll_init_pmupstate(struct gk20a *g) { /* If already allocated, do not re-allocate */ - if (g->pmu.clk_pmu->avfs_fllobjs != NULL) { + if (g->pmu->clk_pmu->avfs_fllobjs != NULL) { return 0; } - g->pmu.clk_pmu->avfs_fllobjs = nvgpu_kzalloc(g, - sizeof(*g->pmu.clk_pmu->avfs_fllobjs)); - if (g->pmu.clk_pmu->avfs_fllobjs == NULL) { + g->pmu->clk_pmu->avfs_fllobjs = nvgpu_kzalloc(g, + sizeof(*g->pmu->clk_pmu->avfs_fllobjs)); + if (g->pmu->clk_pmu->avfs_fllobjs == NULL) { return -ENOMEM; } - g->pmu.clk_pmu->find_regime_id = find_regime_id; - g->pmu.clk_pmu->get_regime_id = get_regime_id; - g->pmu.clk_pmu->set_regime_id = set_regime_id; - g->pmu.clk_pmu->get_fll_lut_vf_num_entries = + g->pmu->clk_pmu->find_regime_id = find_regime_id; + g->pmu->clk_pmu->get_regime_id = get_regime_id; + g->pmu->clk_pmu->set_regime_id = set_regime_id; + g->pmu->clk_pmu->get_fll_lut_vf_num_entries = clk_get_fll_lut_vf_num_entries; - g->pmu.clk_pmu->get_fll_lut_min_volt = + g->pmu->clk_pmu->get_fll_lut_min_volt = clk_get_fll_lut_min_volt; - g->pmu.clk_pmu->get_fll_lut_step_size = + g->pmu->clk_pmu->get_fll_lut_step_size = clk_get_fll_lut_step_size; return 0; @@ -623,6 +623,6 @@ int nvgpu_clk_fll_init_pmupstate(struct gk20a *g) void nvgpu_clk_fll_free_pmupstate(struct gk20a *g) { - nvgpu_kfree(g, g->pmu.clk_pmu->avfs_fllobjs); - g->pmu.clk_pmu->avfs_fllobjs = NULL; + nvgpu_kfree(g, g->pmu->clk_pmu->avfs_fllobjs); + g->pmu->clk_pmu->avfs_fllobjs = NULL; } diff --git a/drivers/gpu/nvgpu/common/pmu/clk/clk_freq_controller.c b/drivers/gpu/nvgpu/common/pmu/clk/clk_freq_controller.c index 6a8ad97e3..908968dd5 100644 --- a/drivers/gpu/nvgpu/common/pmu/clk/clk_freq_controller.c +++ b/drivers/gpu/nvgpu/common/pmu/clk/clk_freq_controller.c @@ -269,8 +269,8 @@ static int clk_get_freq_controller_table(struct gk20a *g, BIOS_GET_FIELD(u8, entry.param0, NV_VBIOS_FCT_1X_ENTRY_PARAM0_ID); - pclk_domain = g->pmu.clk_pmu->clk_get_clk_domain( - (g->pmu.clk_pmu), + pclk_domain = g->pmu->clk_pmu->clk_get_clk_domain( + (g->pmu->clk_pmu), (u32)entry.clk_domain_idx); freq_controller_data.freq_controller.clk_domain = pclk_domain->api_domain; @@ -362,7 +362,7 @@ int nvgpu_clk_freq_controller_pmu_setup(struct gk20a *g) nvgpu_log_info(g, " "); - pboardobjgrp = &g->pmu.clk_pmu->clk_freq_controllers->super.super; + pboardobjgrp = &g->pmu->clk_pmu->clk_freq_controllers->super.super; if (!pboardobjgrp->bconstructed) { return -EINVAL; @@ -426,7 +426,7 @@ int nvgpu_clk_freq_controller_sw_setup(struct gk20a *g) int status = 0; struct boardobjgrp *pboardobjgrp = NULL; struct nvgpu_clk_freq_controllers *pclk_freq_controllers; - struct nvgpu_avfsfllobjs *pfllobjs = g->pmu.clk_pmu->avfs_fllobjs; + struct nvgpu_avfsfllobjs *pfllobjs = g->pmu->clk_pmu->avfs_fllobjs; struct fll_device *pfll; struct clk_freq_controller *pclkfreqctrl; u8 i; @@ -435,7 +435,7 @@ int nvgpu_clk_freq_controller_sw_setup(struct gk20a *g) nvgpu_log_info(g, " "); - pclk_freq_controllers = g->pmu.clk_pmu->clk_freq_controllers; + pclk_freq_controllers = g->pmu->clk_pmu->clk_freq_controllers; status = nvgpu_boardobjgrp_construct_e32(g, &pclk_freq_controllers->super); if (status != 0) { @@ -445,7 +445,7 @@ int nvgpu_clk_freq_controller_sw_setup(struct gk20a *g) goto done; } - pboardobjgrp = &g->pmu.clk_pmu->clk_freq_controllers->super.super; + pboardobjgrp = &g->pmu->clk_pmu->clk_freq_controllers->super.super; pboardobjgrp->pmudatainit = _clk_freq_controllers_pmudatainit; pboardobjgrp->pmudatainstget = @@ -519,7 +519,7 @@ int nvgpu_clk_pmu_freq_controller_load(struct gk20a *g, bool bload, u8 bit_idx) (void) memset(&handler, 0, sizeof( struct clk_freq_ctlr_rpc_pmucmdhandler_params)); - pclk_freq_controllers = g->pmu.clk_pmu->clk_freq_controllers; + pclk_freq_controllers = g->pmu->clk_pmu->clk_freq_controllers; rpccall.function = NV_PMU_CLK_RPC_ID_LOAD; clkload = &rpccall.params.clk_load; clkload->feature = NV_PMU_CLK_LOAD_FEATURE_FREQ_CONTROLLER; @@ -594,7 +594,7 @@ int nvgpu_clk_pmu_freq_controller_load(struct gk20a *g, bool bload, u8 bit_idx) goto done; } - pmu_wait_message_cond(&g->pmu, nvgpu_get_poll_timeout(g), + pmu_wait_message_cond(g->pmu, nvgpu_get_poll_timeout(g), &handler.success, 1); if (handler.success == 0U) { @@ -609,13 +609,13 @@ done: int nvgpu_clk_freq_controller_init_pmupstate(struct gk20a *g) { /* If already allocated, do not re-allocate */ - if (g->pmu.clk_pmu->clk_freq_controllers != NULL) { + if (g->pmu->clk_pmu->clk_freq_controllers != NULL) { return 0; } - g->pmu.clk_pmu->clk_freq_controllers = nvgpu_kzalloc(g, - sizeof(*g->pmu.clk_pmu->clk_freq_controllers)); - if (g->pmu.clk_pmu->clk_freq_controllers == NULL) { + g->pmu->clk_pmu->clk_freq_controllers = nvgpu_kzalloc(g, + sizeof(*g->pmu->clk_pmu->clk_freq_controllers)); + if (g->pmu->clk_pmu->clk_freq_controllers == NULL) { return -ENOMEM; } @@ -624,6 +624,6 @@ int nvgpu_clk_freq_controller_init_pmupstate(struct gk20a *g) void nvgpu_clk_freq_controller_free_pmupstate(struct gk20a *g) { - nvgpu_kfree(g, g->pmu.clk_pmu->clk_freq_controllers); - g->pmu.clk_pmu->clk_freq_controllers = NULL; + nvgpu_kfree(g, g->pmu->clk_pmu->clk_freq_controllers); + g->pmu->clk_pmu->clk_freq_controllers = NULL; } diff --git a/drivers/gpu/nvgpu/common/pmu/clk/clk_freq_domain.c b/drivers/gpu/nvgpu/common/pmu/clk/clk_freq_domain.c index da790ee7a..1a86773a1 100644 --- a/drivers/gpu/nvgpu/common/pmu/clk/clk_freq_domain.c +++ b/drivers/gpu/nvgpu/common/pmu/clk/clk_freq_domain.c @@ -154,11 +154,11 @@ int nvgpu_clk_freq_domain_sw_setup(struct gk20a *g) nvgpu_assert(tmp_num_of_domains <= U8_MAX); num_of_domains = (u8)tmp_num_of_domains; - pboardobjgrp = &g->pmu.clk_pmu->freq_domain_grp_objs->super.super; - pfreq_domain_grp = g->pmu.clk_pmu->freq_domain_grp_objs; + pboardobjgrp = &g->pmu->clk_pmu->freq_domain_grp_objs->super.super; + pfreq_domain_grp = g->pmu->clk_pmu->freq_domain_grp_objs; status = nvgpu_boardobjgrp_construct_e32(g, - &g->pmu.clk_pmu->freq_domain_grp_objs->super); + &g->pmu->clk_pmu->freq_domain_grp_objs->super); if (status != 0) { nvgpu_err(g, "error creating boardobjgrp for clk freq domain, " @@ -228,7 +228,7 @@ int nvgpu_clk_freq_domain_pmu_setup(struct gk20a *g) nvgpu_log_fn(g, " "); - pboardobjgrp = &g->pmu.clk_pmu->freq_domain_grp_objs->super.super; + pboardobjgrp = &g->pmu->clk_pmu->freq_domain_grp_objs->super.super; if (!pboardobjgrp->bconstructed) { return -EINVAL; @@ -243,13 +243,13 @@ int nvgpu_clk_freq_domain_pmu_setup(struct gk20a *g) int nvgpu_clk_freq_domain_init_pmupstate(struct gk20a *g) { /* If already allocated, do not re-allocate */ - if (g->pmu.clk_pmu->freq_domain_grp_objs != NULL) { + if (g->pmu->clk_pmu->freq_domain_grp_objs != NULL) { return 0; } - g->pmu.clk_pmu->freq_domain_grp_objs = nvgpu_kzalloc(g, - sizeof(*g->pmu.clk_pmu->freq_domain_grp_objs)); - if (g->pmu.clk_pmu->freq_domain_grp_objs == NULL) { + g->pmu->clk_pmu->freq_domain_grp_objs = nvgpu_kzalloc(g, + sizeof(*g->pmu->clk_pmu->freq_domain_grp_objs)); + if (g->pmu->clk_pmu->freq_domain_grp_objs == NULL) { return -ENOMEM; } @@ -258,6 +258,6 @@ int nvgpu_clk_freq_domain_init_pmupstate(struct gk20a *g) void nvgpu_clk_freq_domain_free_pmupstate(struct gk20a *g) { - nvgpu_kfree(g, g->pmu.clk_pmu->freq_domain_grp_objs); - g->pmu.clk_pmu->freq_domain_grp_objs = NULL; + nvgpu_kfree(g, g->pmu->clk_pmu->freq_domain_grp_objs); + g->pmu->clk_pmu->freq_domain_grp_objs = NULL; } diff --git a/drivers/gpu/nvgpu/common/pmu/clk/clk_prog.c b/drivers/gpu/nvgpu/common/pmu/clk/clk_prog.c index 205e68a27..f503ae960 100644 --- a/drivers/gpu/nvgpu/common/pmu/clk/clk_prog.c +++ b/drivers/gpu/nvgpu/common/pmu/clk/clk_prog.c @@ -118,7 +118,7 @@ int nvgpu_clk_prog_sw_setup(struct gk20a *g) nvgpu_log_info(g, " "); status = nvgpu_boardobjgrp_construct_e255(g, - &g->pmu.clk_pmu->clk_progobjs->super); + &g->pmu->clk_pmu->clk_progobjs->super); if (status != 0) { nvgpu_err(g, "error creating boardobjgrp for clk prog, status- 0x%x", @@ -126,8 +126,8 @@ int nvgpu_clk_prog_sw_setup(struct gk20a *g) goto done; } - pboardobjgrp = &g->pmu.clk_pmu->clk_progobjs->super.super; - pclkprogobjs = g->pmu.clk_pmu->clk_progobjs; + pboardobjgrp = &g->pmu->clk_pmu->clk_progobjs->super.super; + pclkprogobjs = g->pmu->clk_pmu->clk_progobjs; BOARDOBJGRP_PMU_CONSTRUCT(pboardobjgrp, CLK, CLK_PROG); @@ -149,7 +149,7 @@ int nvgpu_clk_prog_sw_setup(struct gk20a *g) goto done; } - status = g->pmu.clk_pmu->clk_domain_clk_prog_link(g, g->pmu.clk_pmu); + status = g->pmu->clk_pmu->clk_domain_clk_prog_link(g, g->pmu->clk_pmu); if (status != 0) { nvgpu_err(g, "error constructing VF point board objects"); goto done; @@ -167,7 +167,7 @@ int nvgpu_clk_prog_pmu_setup(struct gk20a *g) nvgpu_log_info(g, " "); - pboardobjgrp = &g->pmu.clk_pmu->clk_progobjs->super.super; + pboardobjgrp = &g->pmu->clk_pmu->clk_progobjs->super.super; if (!pboardobjgrp->bconstructed) { return -EINVAL; @@ -523,7 +523,7 @@ static int clk_prog_pmudatainit_1x_master(struct gk20a *g, struct clk_prog_1x_master *pclk_prog_1x_master; struct nv_pmu_clk_clk_prog_1x_master_boardobj_set *pset; size_t vfsize = sizeof(struct ctrl_clk_clk_prog_1x_master_vf_entry) * - g->pmu.clk_pmu->clk_progobjs->vf_entry_count; + g->pmu->clk_pmu->clk_progobjs->vf_entry_count; nvgpu_log_info(g, " "); @@ -556,7 +556,7 @@ static int clk_prog_pmudatainit_35_master(struct gk20a *g, struct nv_pmu_clk_clk_prog_35_master_boardobj_set *pset; size_t voltrail_sec_vfsize = sizeof(struct ctrl_clk_clk_prog_35_master_sec_vf_entry_voltrail) - * g->pmu.clk_pmu->clk_progobjs->vf_sec_entry_count; + * g->pmu->clk_pmu->clk_progobjs->vf_sec_entry_count; nvgpu_log_info(g, " "); @@ -583,7 +583,7 @@ static int clk_prog_pmudatainit_35_master_ratio(struct gk20a *g, struct clk_prog_35_master_ratio *pclk_prog_35_master_ratio; struct nv_pmu_clk_clk_prog_35_master_ratio_boardobj_set *pset; size_t slavesize = sizeof(struct ctrl_clk_clk_prog_1x_master_ratio_slave_entry) * - g->pmu.clk_pmu->clk_progobjs->slave_entry_count; + g->pmu->clk_pmu->clk_progobjs->slave_entry_count; nvgpu_log_info(g, " "); @@ -614,7 +614,7 @@ static int clk_prog_pmudatainit_35_master_table(struct gk20a *g, struct nv_pmu_clk_clk_prog_35_master_table_boardobj_set *pset; size_t slavesize = sizeof( struct ctrl_clk_clk_prog_1x_master_ratio_slave_entry) * - g->pmu.clk_pmu->clk_progobjs->slave_entry_count; + g->pmu->clk_pmu->clk_progobjs->slave_entry_count; nvgpu_log_info(g, " "); @@ -751,7 +751,7 @@ static int clk_prog_construct_1x_master(struct gk20a *g, (struct clk_prog_1x_master *)pargs; int status = 0; size_t vfsize = sizeof(struct ctrl_clk_clk_prog_1x_master_vf_entry) * - g->pmu.clk_pmu->clk_progobjs->vf_entry_count; + g->pmu->clk_pmu->clk_progobjs->vf_entry_count; u8 railidx; nvgpu_log_info(g, " type - %x", BOARDOBJ_GET_TYPE(pargs)); @@ -788,7 +788,7 @@ static int clk_prog_construct_1x_master(struct gk20a *g, pclkprog->b_o_c_o_v_enabled = ptmpprog->b_o_c_o_v_enabled; for (railidx = 0; - railidx < g->pmu.clk_pmu->clk_progobjs->vf_entry_count; + railidx < g->pmu->clk_pmu->clk_progobjs->vf_entry_count; railidx++) { pclkprog->p_vf_entries[railidx].vf_point_idx_first = CTRL_CLK_CLK_VF_POINT_IDX_INVALID; @@ -847,7 +847,7 @@ static int clk_prog_construct_35_master_ratio(struct gk20a *g, int status = 0; size_t slavesize = sizeof( struct ctrl_clk_clk_prog_1x_master_ratio_slave_entry) * - g->pmu.clk_pmu->clk_progobjs->slave_entry_count; + g->pmu->clk_pmu->clk_progobjs->slave_entry_count; if (BOARDOBJ_GET_TYPE(pargs) != CTRL_CLK_CLK_PROG_TYPE_35_MASTER_RATIO) { return -EINVAL; @@ -890,7 +890,7 @@ static int clk_prog_construct_35_master_table(struct gk20a *g, int status = 0; size_t slavesize = sizeof(struct ctrl_clk_clk_prog_1x_master_table_slave_entry) * - g->pmu.clk_pmu->clk_progobjs->slave_entry_count; + g->pmu->clk_pmu->clk_progobjs->slave_entry_count; nvgpu_log_info(g, "type - %x", BOARDOBJ_GET_TYPE(pargs)); @@ -1048,10 +1048,10 @@ static int vfflatten_prog_1x_master(struct gk20a *g, case CTRL_CLK_PROG_1X_SOURCE_FLL: voltage_min_uv = - g->pmu.clk_pmu->get_fll_lut_min_volt(pclk); + g->pmu->clk_pmu->get_fll_lut_min_volt(pclk); voltage_step_size_uv = - g->pmu.clk_pmu->get_fll_lut_step_size(pclk); - step_count = g->pmu.clk_pmu-> + g->pmu->clk_pmu->get_fll_lut_step_size(pclk); + step_count = g->pmu->clk_pmu-> get_fll_lut_vf_num_entries(pclk); /* FLL sources use a voltage-based VF_POINT.*/ @@ -1371,13 +1371,13 @@ static int getslaveclk_prog_1x_master(struct gk20a *g, int nvgpu_clk_prog_init_pmupstate(struct gk20a *g) { /* If already allocated, do not re-allocate */ - if (g->pmu.clk_pmu->clk_progobjs != NULL) { + if (g->pmu->clk_pmu->clk_progobjs != NULL) { return 0; } - g->pmu.clk_pmu->clk_progobjs = nvgpu_kzalloc(g, - sizeof(*g->pmu.clk_pmu->clk_progobjs)); - if (g->pmu.clk_pmu->clk_progobjs == NULL) { + g->pmu->clk_pmu->clk_progobjs = nvgpu_kzalloc(g, + sizeof(*g->pmu->clk_pmu->clk_progobjs)); + if (g->pmu->clk_pmu->clk_progobjs == NULL) { return -ENOMEM; } @@ -1386,6 +1386,6 @@ int nvgpu_clk_prog_init_pmupstate(struct gk20a *g) void nvgpu_clk_prog_free_pmupstate(struct gk20a *g) { - nvgpu_kfree(g, g->pmu.clk_pmu->clk_progobjs); - g->pmu.clk_pmu->clk_progobjs = NULL; + nvgpu_kfree(g, g->pmu->clk_pmu->clk_progobjs); + g->pmu->clk_pmu->clk_progobjs = NULL; } diff --git a/drivers/gpu/nvgpu/common/pmu/clk/clk_vf_point.c b/drivers/gpu/nvgpu/common/pmu/clk/clk_vf_point.c index c349eaf54..9f519ecc6 100644 --- a/drivers/gpu/nvgpu/common/pmu/clk/clk_vf_point.c +++ b/drivers/gpu/nvgpu/common/pmu/clk/clk_vf_point.c @@ -40,7 +40,7 @@ int nvgpu_clk_domain_volt_to_freq(struct gk20a *g, u8 clkdomain_idx, u32 *pclkmhz, u32 *pvoltuv, u8 railidx) { struct nv_pmu_rpc_clk_domain_35_prog_freq_to_volt rpc; - struct nvgpu_pmu *pmu = &g->pmu; + struct nvgpu_pmu *pmu = g->pmu; int status = -EINVAL; (void)memset(&rpc, 0, @@ -129,7 +129,7 @@ int nvgpu_clk_vf_point_sw_setup(struct gk20a *g) nvgpu_log_info(g, " "); status = nvgpu_boardobjgrp_construct_e255(g, - &g->pmu.clk_pmu->clk_vf_pointobjs->super); + &g->pmu->clk_pmu->clk_vf_pointobjs->super); if (status != 0) { nvgpu_err(g, "error creating boardobjgrp for clk vfpoint, status - 0x%x", @@ -137,7 +137,7 @@ int nvgpu_clk_vf_point_sw_setup(struct gk20a *g) goto done; } - pboardobjgrp = &g->pmu.clk_pmu->clk_vf_pointobjs->super.super; + pboardobjgrp = &g->pmu->clk_pmu->clk_vf_pointobjs->super.super; BOARDOBJGRP_PMU_CONSTRUCT(pboardobjgrp, CLK, CLK_VF_POINT); @@ -151,7 +151,7 @@ int nvgpu_clk_vf_point_sw_setup(struct gk20a *g) } status = BOARDOBJGRP_PMU_CMD_GRP_GET_STATUS_CONSTRUCT(g, - &g->pmu.clk_pmu->clk_vf_pointobjs->super.super, + &g->pmu->clk_pmu->clk_vf_pointobjs->super.super, clk, CLK, clk_vf_point, CLK_VF_POINT); if (status != 0) { nvgpu_err(g, @@ -176,7 +176,7 @@ int nvgpu_clk_vf_point_pmu_setup(struct gk20a *g) nvgpu_log_info(g, " "); - pboardobjgrp = &g->pmu.clk_pmu->clk_vf_pointobjs->super.super; + pboardobjgrp = &g->pmu->clk_pmu->clk_vf_pointobjs->super.super; if (!pboardobjgrp->bconstructed) { return -EINVAL; @@ -456,12 +456,12 @@ int nvgpu_clk_vf_point_cache(struct gk20a *g) u32 gpcclk_clkmhz=0, gpcclk_voltuv=0; nvgpu_log_info(g, " "); - pclk_vf_points = g->pmu.clk_pmu->clk_vf_pointobjs; + pclk_vf_points = g->pmu->clk_pmu->clk_vf_pointobjs; pboardobjgrp = &pclk_vf_points->super.super; - voltage_min_uv = g->pmu.clk_pmu->get_fll_lut_min_volt(g->pmu.clk_pmu); + voltage_min_uv = g->pmu->clk_pmu->get_fll_lut_min_volt(g->pmu->clk_pmu); voltage_step_size_uv = - g->pmu.clk_pmu->get_fll_lut_step_size(g->pmu.clk_pmu); + g->pmu->clk_pmu->get_fll_lut_step_size(g->pmu->clk_pmu); BOARDOBJGRP_FOR_EACH(pboardobjgrp, struct boardobj*, pboardobj, index) { pclk_vf_point = (struct clk_vf_point *)(void *)pboardobj; gpcclk_voltuv = @@ -483,23 +483,23 @@ int nvgpu_clk_vf_point_cache(struct gk20a *g) int nvgpu_clk_vf_point_init_pmupstate(struct gk20a *g) { /* If already allocated, do not re-allocate */ - if (g->pmu.clk_pmu->clk_vf_pointobjs != NULL) { + if (g->pmu->clk_pmu->clk_vf_pointobjs != NULL) { return 0; } - g->pmu.clk_pmu->clk_vf_pointobjs = nvgpu_kzalloc(g, - sizeof(*g->pmu.clk_pmu->clk_vf_pointobjs)); - if (g->pmu.clk_pmu->clk_vf_pointobjs == NULL) { + g->pmu->clk_pmu->clk_vf_pointobjs = nvgpu_kzalloc(g, + sizeof(*g->pmu->clk_pmu->clk_vf_pointobjs)); + if (g->pmu->clk_pmu->clk_vf_pointobjs == NULL) { return -ENOMEM; } - g->pmu.clk_pmu->nvgpu_clk_vf_point_cache = nvgpu_clk_vf_point_cache; + g->pmu->clk_pmu->nvgpu_clk_vf_point_cache = nvgpu_clk_vf_point_cache; return 0; } void nvgpu_clk_vf_point_free_pmupstate(struct gk20a *g) { - nvgpu_kfree(g, g->pmu.clk_pmu->clk_vf_pointobjs); - g->pmu.clk_pmu->clk_vf_pointobjs = NULL; + nvgpu_kfree(g, g->pmu->clk_pmu->clk_vf_pointobjs); + g->pmu->clk_pmu->clk_vf_pointobjs = NULL; } diff --git a/drivers/gpu/nvgpu/common/pmu/clk/clk_vin.c b/drivers/gpu/nvgpu/common/pmu/clk/clk_vin.c index ae749ae34..ff24e99c3 100644 --- a/drivers/gpu/nvgpu/common/pmu/clk/clk_vin.c +++ b/drivers/gpu/nvgpu/common/pmu/clk/clk_vin.c @@ -97,7 +97,7 @@ static int nvgpu_clk_avfs_get_vin_cal_fuse_v20(struct gk20a *g, gain = 0; offset = 0; pvindev = (struct vin_device_v20 *)(void *) - g->pmu.clk_pmu->clk_get_vin(pvinobjs, i); + g->pmu->clk_pmu->clk_get_vin(pvinobjs, i); status = g->ops.fuse.read_vin_cal_gain_offset_fuse(g, pvindev->super.id, &gain, &offset); if (status != 0) { @@ -186,7 +186,7 @@ int nvgpu_clk_vin_sw_setup(struct gk20a *g) nvgpu_log_info(g, " "); status = nvgpu_boardobjgrp_construct_e32(g, - &g->pmu.clk_pmu->avfs_vinobjs->super); + &g->pmu->clk_pmu->avfs_vinobjs->super); if (status != 0) { nvgpu_err(g, "error creating boardobjgrp for clk vin, statu - 0x%x", @@ -194,8 +194,8 @@ int nvgpu_clk_vin_sw_setup(struct gk20a *g) goto done; } - pboardobjgrp = &g->pmu.clk_pmu->avfs_vinobjs->super.super; - pvinobjs = g->pmu.clk_pmu->avfs_vinobjs; + pboardobjgrp = &g->pmu->clk_pmu->avfs_vinobjs->super.super; + pvinobjs = g->pmu->clk_pmu->avfs_vinobjs; BOARDOBJGRP_PMU_CONSTRUCT(pboardobjgrp, CLK, VIN_DEVICE); @@ -212,7 +212,7 @@ int nvgpu_clk_vin_sw_setup(struct gk20a *g) pboardobjgrp->pmudatainstget = _clk_vin_devgrp_pmudata_instget; pboardobjgrp->pmustatusinstget = _clk_vin_devgrp_pmustatus_instget; - status = devinit_get_vin_device_table(g, g->pmu.clk_pmu->avfs_vinobjs); + status = devinit_get_vin_device_table(g, g->pmu->clk_pmu->avfs_vinobjs); if (status != 0) { goto done; } @@ -226,7 +226,7 @@ int nvgpu_clk_vin_sw_setup(struct gk20a *g) } status = BOARDOBJGRP_PMU_CMD_GRP_GET_STATUS_CONSTRUCT(g, - &g->pmu.clk_pmu->avfs_vinobjs->super.super, + &g->pmu->clk_pmu->avfs_vinobjs->super.super, clk, CLK, clk_vin_device, CLK_VIN_DEVICE); if (status != 0) { nvgpu_err(g, @@ -247,7 +247,7 @@ int nvgpu_clk_vin_pmu_setup(struct gk20a *g) nvgpu_log_info(g, " "); - pboardobjgrp = &g->pmu.clk_pmu->avfs_vinobjs->super.super; + pboardobjgrp = &g->pmu->clk_pmu->avfs_vinobjs->super.super; if (!pboardobjgrp->bconstructed) { return -EINVAL; @@ -537,7 +537,7 @@ int nvgpu_clk_pmu_vin_load(struct gk20a *g) goto done; } - pmu_wait_message_cond(&g->pmu, nvgpu_get_poll_timeout(g), + pmu_wait_message_cond(g->pmu, nvgpu_get_poll_timeout(g), &handler.success, 1); if (handler.success == 0U) { @@ -552,23 +552,23 @@ done: int nvgpu_clk_vin_init_pmupstate(struct gk20a *g) { /* If already allocated, do not re-allocate */ - if (g->pmu.clk_pmu->avfs_vinobjs != NULL) { + if (g->pmu->clk_pmu->avfs_vinobjs != NULL) { return 0; } - g->pmu.clk_pmu->avfs_vinobjs = nvgpu_kzalloc(g, - sizeof(*g->pmu.clk_pmu->avfs_vinobjs)); - if (g->pmu.clk_pmu->avfs_vinobjs == NULL) { + g->pmu->clk_pmu->avfs_vinobjs = nvgpu_kzalloc(g, + sizeof(*g->pmu->clk_pmu->avfs_vinobjs)); + if (g->pmu->clk_pmu->avfs_vinobjs == NULL) { return -ENOMEM; } - g->pmu.clk_pmu->clk_get_vin = clk_get_vin_from_index; + g->pmu->clk_pmu->clk_get_vin = clk_get_vin_from_index; return 0; } void nvgpu_clk_vin_free_pmupstate(struct gk20a *g) { - nvgpu_kfree(g, g->pmu.clk_pmu->avfs_vinobjs); - g->pmu.clk_pmu->avfs_vinobjs = NULL; + nvgpu_kfree(g, g->pmu->clk_pmu->avfs_vinobjs); + g->pmu->clk_pmu->avfs_vinobjs = NULL; } diff --git a/drivers/gpu/nvgpu/common/pmu/fw/fw.c b/drivers/gpu/nvgpu/common/pmu/fw/fw.c index 79f0d5b43..861c0312a 100644 --- a/drivers/gpu/nvgpu/common/pmu/fw/fw.c +++ b/drivers/gpu/nvgpu/common/pmu/fw/fw.c @@ -42,11 +42,11 @@ void nvgpu_pmu_fw_get_cmd_line_args_offset(struct gk20a *g, u32 *args_offset) { - struct nvgpu_pmu *pmu = &g->pmu; + struct nvgpu_pmu *pmu = g->pmu; u32 dmem_size = 0; int err = 0; - err = nvgpu_falcon_get_mem_size(&pmu->flcn, MEM_DMEM, &dmem_size); + err = nvgpu_falcon_get_mem_size(pmu->flcn, MEM_DMEM, &dmem_size); if (err != 0) { nvgpu_err(g, "dmem size request failed"); *args_offset = 0; diff --git a/drivers/gpu/nvgpu/common/pmu/fw/fw_ns_bootstrap.c b/drivers/gpu/nvgpu/common/pmu/fw/fw_ns_bootstrap.c index 3ccdcb25c..a41b087d0 100644 --- a/drivers/gpu/nvgpu/common/pmu/fw/fw_ns_bootstrap.c +++ b/drivers/gpu/nvgpu/common/pmu/fw/fw_ns_bootstrap.c @@ -28,7 +28,7 @@ static int pmu_prepare_ns_ucode_blob(struct gk20a *g) { - struct nvgpu_pmu *pmu = &g->pmu; + struct nvgpu_pmu *pmu = g->pmu; struct mm_gk20a *mm = &g->mm; struct vm_gk20a *vm = mm->pmu.vm; struct pmu_ucode_desc *desc; @@ -41,10 +41,12 @@ static int pmu_prepare_ns_ucode_blob(struct gk20a *g) desc = (struct pmu_ucode_desc *)(void *)rtos_fw->fw_image->data; ucode_image = (u32 *)(void *)((u8 *)desc + desc->descriptor_size); - err = nvgpu_dma_alloc_map_sys(vm, GK20A_PMU_UCODE_SIZE_MAX, - &pmu->fw->ucode); - if (err != 0) { - goto exit; + if (!nvgpu_mem_is_valid(&rtos_fw->ucode)) { + err = nvgpu_dma_alloc_map_sys(vm, GK20A_PMU_UCODE_SIZE_MAX, + &rtos_fw->ucode); + if (err != 0) { + goto exit; + } } nvgpu_mem_wr_n(g, &pmu->fw->ucode, 0, ucode_image, @@ -68,7 +70,7 @@ int nvgpu_pmu_ns_fw_bootstrap(struct gk20a *g, struct nvgpu_pmu *pmu) /* Do non-secure PMU boot */ nvgpu_mutex_acquire(&pmu->isr_mutex); - nvgpu_falcon_reset(&pmu->flcn); + nvgpu_falcon_reset(pmu->flcn); pmu->isr_enabled = true; nvgpu_mutex_release(&pmu->isr_mutex); @@ -85,9 +87,13 @@ int nvgpu_pmu_ns_fw_bootstrap(struct gk20a *g, struct nvgpu_pmu *pmu) nvgpu_pmu_fw_get_cmd_line_args_offset(g, &args_offset); - nvgpu_falcon_copy_to_dmem(&pmu->flcn, args_offset, + err = nvgpu_falcon_copy_to_dmem(pmu->flcn, args_offset, (u8 *)(pmu->fw->ops.get_cmd_line_args_ptr(pmu)), pmu->fw->ops.get_cmd_line_args_size(pmu), 0); + if (err != 0) { + nvgpu_err(g, "cmd line args copy failed"); + return err; + } return g->ops.pmu.pmu_ns_bootstrap(g, pmu, args_offset); } diff --git a/drivers/gpu/nvgpu/common/pmu/ipc/pmu_cmd.c b/drivers/gpu/nvgpu/common/pmu/ipc/pmu_cmd.c index 9d00345aa..0ff2b17aa 100644 --- a/drivers/gpu/nvgpu/common/pmu/ipc/pmu_cmd.c +++ b/drivers/gpu/nvgpu/common/pmu/ipc/pmu_cmd.c @@ -164,7 +164,7 @@ static int pmu_write_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd, } do { - err = nvgpu_pmu_queue_push(&pmu->queues, &pmu->flcn, + err = nvgpu_pmu_queue_push(&pmu->queues, pmu->flcn, queue_id, cmd); if (nvgpu_timeout_expired(&timeout) == 0 && err == -EAGAIN) { nvgpu_usleep_range(1000, 2000); @@ -185,7 +185,7 @@ static int pmu_write_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd, static void pmu_payload_deallocate(struct gk20a *g, struct falcon_payload_alloc *alloc) { - struct nvgpu_pmu *pmu = &g->pmu; + struct nvgpu_pmu *pmu = g->pmu; if (alloc->fb_surface != NULL) { nvgpu_pmu_surface_free(g, alloc->fb_surface); @@ -200,7 +200,7 @@ static void pmu_payload_deallocate(struct gk20a *g, static int pmu_payload_allocate(struct gk20a *g, struct pmu_sequence *seq, struct falcon_payload_alloc *alloc) { - struct nvgpu_pmu *pmu = &g->pmu; + struct nvgpu_pmu *pmu = g->pmu; u16 buffer_size; int err = 0; u64 tmp; @@ -248,8 +248,8 @@ clean_up: static int pmu_cmd_payload_setup_rpc(struct gk20a *g, struct pmu_cmd *cmd, struct pmu_payload *payload, struct pmu_sequence *seq) { - struct nvgpu_pmu *pmu = &g->pmu; - struct pmu_fw_ver_ops *fw_ops = &g->pmu.fw->ops; + struct nvgpu_pmu *pmu = g->pmu; + struct pmu_fw_ver_ops *fw_ops = &g->pmu->fw->ops; struct nvgpu_engine_fb_queue *queue = nvgpu_pmu_seq_get_cmd_queue(seq); struct falcon_payload_alloc alloc; int err = 0; @@ -280,7 +280,7 @@ static int pmu_cmd_payload_setup_rpc(struct gk20a *g, struct pmu_cmd *cmd, nvgpu_pmu_seq_set_in_payload_fb_queue(seq, true); nvgpu_pmu_seq_set_out_payload_fb_queue(seq, true); } else { - err = nvgpu_falcon_copy_to_dmem(&pmu->flcn, alloc.dmem_offset, + err = nvgpu_falcon_copy_to_dmem(pmu->flcn, alloc.dmem_offset, payload->rpc.prpc, payload->rpc.size_rpc, 0); if (err != 0) { pmu_payload_deallocate(g, &alloc); @@ -292,10 +292,10 @@ static int pmu_cmd_payload_setup_rpc(struct gk20a *g, struct pmu_cmd *cmd, cmd->cmd.rpc.rpc_dmem_ptr = alloc.dmem_offset; nvgpu_pmu_seq_set_out_payload(seq, payload->rpc.prpc); - g->pmu.fw->ops.allocation_set_dmem_size(pmu, + g->pmu->fw->ops.allocation_set_dmem_size(pmu, fw_ops->get_seq_out_alloc_ptr(seq), payload->rpc.size_rpc); - g->pmu.fw->ops.allocation_set_dmem_offset(pmu, + g->pmu->fw->ops.allocation_set_dmem_offset(pmu, fw_ops->get_seq_out_alloc_ptr(seq), alloc.dmem_offset); @@ -314,9 +314,9 @@ static int pmu_cmd_in_payload_setup(struct gk20a *g, struct pmu_cmd *cmd, { struct nvgpu_engine_fb_queue *fb_queue = nvgpu_pmu_seq_get_cmd_queue(seq); - struct pmu_fw_ver_ops *fw_ops = &g->pmu.fw->ops; + struct pmu_fw_ver_ops *fw_ops = &g->pmu->fw->ops; struct falcon_payload_alloc alloc; - struct nvgpu_pmu *pmu = &g->pmu; + struct nvgpu_pmu *pmu = g->pmu; void *in = NULL; int err = 0; u32 offset; @@ -385,7 +385,7 @@ static int pmu_cmd_in_payload_setup(struct gk20a *g, struct pmu_cmd *cmd, offset = fw_ops->allocation_get_dmem_offset(pmu, in); - err = nvgpu_falcon_copy_to_dmem(&pmu->flcn, + err = nvgpu_falcon_copy_to_dmem(pmu->flcn, offset, payload->in.buf, payload->in.size, 0); if (err != 0) { @@ -408,9 +408,9 @@ static int pmu_cmd_in_payload_setup(struct gk20a *g, struct pmu_cmd *cmd, static int pmu_cmd_out_payload_setup(struct gk20a *g, struct pmu_cmd *cmd, struct pmu_payload *payload, struct pmu_sequence *seq) { - struct pmu_fw_ver_ops *fw_ops = &g->pmu.fw->ops; + struct pmu_fw_ver_ops *fw_ops = &g->pmu->fw->ops; struct falcon_payload_alloc alloc; - struct nvgpu_pmu *pmu = &g->pmu; + struct nvgpu_pmu *pmu = g->pmu; void *in = NULL, *out = NULL; int err = 0; @@ -482,8 +482,8 @@ static int pmu_cmd_out_payload_setup(struct gk20a *g, struct pmu_cmd *cmd, static int pmu_cmd_payload_setup(struct gk20a *g, struct pmu_cmd *cmd, struct pmu_payload *payload, struct pmu_sequence *seq) { - struct pmu_fw_ver_ops *fw_ops = &g->pmu.fw->ops; - struct nvgpu_pmu *pmu = &g->pmu; + struct pmu_fw_ver_ops *fw_ops = &g->pmu->fw->ops; + struct nvgpu_pmu *pmu = g->pmu; void *in = NULL; int err = 0; @@ -529,7 +529,7 @@ static int pmu_fbq_cmd_setup(struct gk20a *g, struct pmu_cmd *cmd, struct nvgpu_engine_fb_queue *queue, struct pmu_payload *payload, struct pmu_sequence *seq) { - struct nvgpu_pmu *pmu = &g->pmu; + struct nvgpu_pmu *pmu = g->pmu; struct nv_falcon_fbq_hdr *fbq_hdr = NULL; struct pmu_cmd *flcn_cmd = NULL; u32 fbq_size_needed = 0; @@ -619,7 +619,7 @@ int nvgpu_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd, struct pmu_payload *payload, u32 queue_id, pmu_callback callback, void *cb_param) { - struct nvgpu_pmu *pmu = &g->pmu; + struct nvgpu_pmu *pmu = g->pmu; struct pmu_sequence *seq = NULL; struct nvgpu_engine_fb_queue *fb_queue = NULL; int err; diff --git a/drivers/gpu/nvgpu/common/pmu/ipc/pmu_msg.c b/drivers/gpu/nvgpu/common/pmu/ipc/pmu_msg.c index 9b074d90e..f139ee3cf 100644 --- a/drivers/gpu/nvgpu/common/pmu/ipc/pmu_msg.c +++ b/drivers/gpu/nvgpu/common/pmu/ipc/pmu_msg.c @@ -37,7 +37,7 @@ static int pmu_payload_extract(struct nvgpu_pmu *pmu, struct pmu_sequence *seq) struct nvgpu_engine_fb_queue *fb_queue = nvgpu_pmu_seq_get_cmd_queue(seq); struct gk20a *g = pmu->g; - struct pmu_fw_ver_ops *fw_ops = &g->pmu.fw->ops; + struct pmu_fw_ver_ops *fw_ops = &g->pmu->fw->ops; u32 fbq_payload_offset = 0U; int err = 0; @@ -59,7 +59,7 @@ static int pmu_payload_extract(struct nvgpu_pmu *pmu, struct pmu_sequence *seq) } else { if (fw_ops->allocation_get_dmem_size(pmu, fw_ops->get_seq_out_alloc_ptr(seq)) != 0U) { - err = nvgpu_falcon_copy_from_dmem(&pmu->flcn, + err = nvgpu_falcon_copy_from_dmem(pmu->flcn, fw_ops->allocation_get_dmem_offset(pmu, fw_ops->get_seq_out_alloc_ptr(seq)), nvgpu_pmu_seq_get_out_payload(seq), @@ -81,7 +81,7 @@ static void pmu_payload_free(struct nvgpu_pmu *pmu, struct pmu_sequence *seq) struct nvgpu_engine_fb_queue *fb_queue = nvgpu_pmu_seq_get_cmd_queue(seq); struct gk20a *g = pmu->g; - struct pmu_fw_ver_ops *fw_ops = &g->pmu.fw->ops; + struct pmu_fw_ver_ops *fw_ops = &g->pmu->fw->ops; struct nvgpu_mem *in_mem = nvgpu_pmu_seq_get_in_mem(seq); struct nvgpu_mem *out_mem = nvgpu_pmu_seq_get_out_mem(seq); void *seq_in_ptr = fw_ops->get_seq_in_alloc_ptr(seq); @@ -240,7 +240,7 @@ static bool pmu_engine_mem_queue_read(struct nvgpu_pmu *pmu, u32 bytes_read; int err; - err = nvgpu_pmu_queue_pop(&pmu->queues, &pmu->flcn, queue_id, data, + err = nvgpu_pmu_queue_pop(&pmu->queues, pmu->flcn, queue_id, data, bytes_to_read, &bytes_read); if (err != 0) { nvgpu_err(g, "fail to read msg: err %d", err); @@ -279,7 +279,7 @@ static bool pmu_read_message(struct nvgpu_pmu *pmu, u32 queue_id, if (msg->hdr.unit_id == PMU_UNIT_REWIND) { if (!nvgpu_pmu_fb_queue_enabled(&pmu->queues)) { err = nvgpu_pmu_queue_rewind(&pmu->queues, queue_id, - &pmu->flcn); + pmu->flcn); if (err != 0) { nvgpu_err(g, "fail to rewind queue %d", queue_id); @@ -384,7 +384,7 @@ static int pmu_process_init_msg_dmem(struct gk20a *g, struct nvgpu_pmu *pmu, g->ops.pmu.pmu_msgq_tail(pmu, &tail, QUEUE_GET); - err = nvgpu_falcon_copy_from_dmem(&pmu->flcn, tail, + err = nvgpu_falcon_copy_from_dmem(pmu->flcn, tail, (u8 *)&msg->hdr, PMU_MSG_HDR_SIZE, 0); if (err != 0) { nvgpu_err(g, "PMU falcon DMEM copy failed"); @@ -396,7 +396,7 @@ static int pmu_process_init_msg_dmem(struct gk20a *g, struct nvgpu_pmu *pmu, goto exit; } - err = nvgpu_falcon_copy_from_dmem(&pmu->flcn, tail + PMU_MSG_HDR_SIZE, + err = nvgpu_falcon_copy_from_dmem(pmu->flcn, tail + PMU_MSG_HDR_SIZE, (u8 *)&msg->msg, (u32)msg->hdr.size - PMU_MSG_HDR_SIZE, 0); if (err != 0) { nvgpu_err(g, "PMU falcon DMEM copy failed"); @@ -420,7 +420,7 @@ static int pmu_process_init_msg(struct nvgpu_pmu *pmu, struct pmu_msg *msg) { struct gk20a *g = pmu->g; - struct pmu_fw_ver_ops *fw_ops = &g->pmu.fw->ops; + struct pmu_fw_ver_ops *fw_ops = &g->pmu->fw->ops; union pmu_init_msg_pmu *init; struct pmu_sha1_gid_data gid_data; int err = 0; @@ -444,7 +444,7 @@ static int pmu_process_init_msg(struct nvgpu_pmu *pmu, if (!pmu->gid_info.valid) { u32 *gid_hdr_data = &gid_data.signature; - err = nvgpu_falcon_copy_from_dmem(&pmu->flcn, + err = nvgpu_falcon_copy_from_dmem(pmu->flcn, fw_ops->get_init_msg_sw_mngd_area_off(init), gid_data.sign_bytes, (u32)sizeof(struct pmu_sha1_gid_data), 0); @@ -554,7 +554,7 @@ static void pmu_rpc_handler(struct gk20a *g, struct pmu_msg *msg, struct nv_pmu_rpc_header rpc, struct rpc_handler_payload *rpc_payload) { - struct nvgpu_pmu *pmu = &g->pmu; + struct nvgpu_pmu *pmu = g->pmu; switch (msg->hdr.unit_id) { case PMU_UNIT_ACR: diff --git a/drivers/gpu/nvgpu/common/pmu/ipc/pmu_queue.c b/drivers/gpu/nvgpu/common/pmu/ipc/pmu_queue.c index 60cb088c4..b1be99bc7 100644 --- a/drivers/gpu/nvgpu/common/pmu/ipc/pmu_queue.c +++ b/drivers/gpu/nvgpu/common/pmu/ipc/pmu_queue.c @@ -33,7 +33,7 @@ static int pmu_fb_queue_init(struct gk20a *g, struct pmu_queues *queues, u32 id, union pmu_init_msg_pmu *init, struct nvgpu_mem *super_surface_buf) { - struct nvgpu_pmu *pmu = &g->pmu; + struct nvgpu_pmu *pmu = g->pmu; struct nvgpu_engine_fb_queue_params params = {0}; u32 oflag = 0; int err = 0; @@ -144,7 +144,7 @@ static int pmu_dmem_queue_init(struct gk20a *g, struct pmu_queues *queues, params.queue_head = g->ops.pmu.pmu_queue_head; params.queue_tail = g->ops.pmu.pmu_queue_tail; params.queue_type = QUEUE_TYPE_DMEM; - g->pmu.fw->ops.get_init_msg_queue_params(id, init, + g->pmu->fw->ops.get_init_msg_queue_params(id, init, ¶ms.index, ¶ms.offset, ¶ms.size); diff --git a/drivers/gpu/nvgpu/common/pmu/lpwr/lpwr.c b/drivers/gpu/nvgpu/common/pmu/lpwr/lpwr.c index c0b1a18b7..d6392e5e6 100644 --- a/drivers/gpu/nvgpu/common/pmu/lpwr/lpwr.c +++ b/drivers/gpu/nvgpu/common/pmu/lpwr/lpwr.c @@ -275,7 +275,7 @@ int nvgpu_lwpr_mclk_change(struct gk20a *g, u32 pstate) PMU_COMMAND_QUEUE_HPQ, nvgpu_pmu_handle_param_lpwr_msg, &ack_status); - pmu_wait_message_cond(&g->pmu, nvgpu_get_poll_timeout(g), + pmu_wait_message_cond(g->pmu, nvgpu_get_poll_timeout(g), &ack_status, 1); if (ack_status == 0U) { status = -EINVAL; @@ -310,7 +310,7 @@ int nvgpu_lpwr_post_init(struct gk20a *g) PMU_COMMAND_QUEUE_LPQ, nvgpu_pmu_handle_param_lpwr_msg, &ack_status); - pmu_wait_message_cond(&g->pmu, nvgpu_get_poll_timeout(g), + pmu_wait_message_cond(g->pmu, nvgpu_get_poll_timeout(g), &ack_status, 1); if (ack_status == 0U) { status = -EINVAL; @@ -369,7 +369,7 @@ bool nvgpu_lpwr_is_rppg_supported(struct gk20a *g, u32 pstate_num) int nvgpu_lpwr_enable_pg(struct gk20a *g, bool pstate_lock) { - struct nvgpu_pmu *pmu = &g->pmu; + struct nvgpu_pmu *pmu = g->pmu; int status = 0; bool is_mscg_supported = false; bool is_rppg_supported = false; @@ -410,7 +410,7 @@ int nvgpu_lpwr_enable_pg(struct gk20a *g, bool pstate_lock) int nvgpu_lpwr_disable_pg(struct gk20a *g, bool pstate_lock) { - struct nvgpu_pmu *pmu = &g->pmu; + struct nvgpu_pmu *pmu = g->pmu; int status = 0; bool is_mscg_supported = false; bool is_rppg_supported = false; diff --git a/drivers/gpu/nvgpu/common/pmu/lpwr/rppg.c b/drivers/gpu/nvgpu/common/pmu/lpwr/rppg.c index 777fe421b..f8ea5ef26 100644 --- a/drivers/gpu/nvgpu/common/pmu/lpwr/rppg.c +++ b/drivers/gpu/nvgpu/common/pmu/lpwr/rppg.c @@ -98,7 +98,7 @@ static int rppg_send_cmd(struct gk20a *g, struct nv_pmu_rppg_cmd *prppg_cmd) } if (prppg_cmd->cmn.cmd_id == NV_PMU_RPPG_CMD_ID_INIT_CTRL) { - pmu_wait_message_cond(&g->pmu, nvgpu_get_poll_timeout(g), + pmu_wait_message_cond(g->pmu, nvgpu_get_poll_timeout(g), &success, 1); if (success == 0U) { status = -EINVAL; diff --git a/drivers/gpu/nvgpu/common/pmu/lsfm/lsfm.c b/drivers/gpu/nvgpu/common/pmu/lsfm/lsfm.c index 8464c3f54..852bd146d 100644 --- a/drivers/gpu/nvgpu/common/pmu/lsfm/lsfm.c +++ b/drivers/gpu/nvgpu/common/pmu/lsfm/lsfm.c @@ -32,40 +32,59 @@ #include "lsfm_sw_gv100.h" #include "lsfm_sw_tu104.h" +static bool is_lsfm_supported(struct gk20a *g, + struct nvgpu_pmu *pmu, struct nvgpu_pmu_lsfm *lsfm) +{ + if (nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY) && + !nvgpu_is_enabled(g, NVGPU_IS_FMODEL) && + (lsfm != NULL)) { + return true; + } + + return false; +} + int nvgpu_pmu_lsfm_int_wpr_region(struct gk20a *g, struct nvgpu_pmu *pmu, struct nvgpu_pmu_lsfm *lsfm) { - if (lsfm == NULL || lsfm->init_wpr_region == NULL) { - return 0; + if (is_lsfm_supported(g, pmu, lsfm)) { + if (lsfm->init_wpr_region != NULL) { + return lsfm->init_wpr_region(g, pmu); + } } - return lsfm->init_wpr_region(g, pmu); + return 0; } int nvgpu_pmu_lsfm_bootstrap_ls_falcon(struct gk20a *g, struct nvgpu_pmu *pmu, struct nvgpu_pmu_lsfm *lsfm, u32 falcon_id_mask) { - if (lsfm == NULL || lsfm->bootstrap_ls_falcon == NULL) { - return 0; + if (is_lsfm_supported(g, pmu, lsfm)) { + if (lsfm->bootstrap_ls_falcon != NULL) { + return lsfm->bootstrap_ls_falcon(g, pmu, lsfm, + falcon_id_mask); + } } - return lsfm->bootstrap_ls_falcon(g, pmu, lsfm, falcon_id_mask); + return 0; } int nvgpu_pmu_lsfm_ls_pmu_cmdline_args_copy(struct gk20a *g, struct nvgpu_pmu *pmu, struct nvgpu_pmu_lsfm *lsfm) { - if (lsfm == NULL || lsfm->ls_pmu_cmdline_args_copy == NULL) { - return 0; + if (is_lsfm_supported(g, pmu, lsfm)) { + if (lsfm->ls_pmu_cmdline_args_copy != NULL) { + return lsfm->ls_pmu_cmdline_args_copy(g, pmu); + } } - return lsfm->ls_pmu_cmdline_args_copy(g, pmu); + return 0; } void nvgpu_pmu_lsfm_rpc_handler(struct gk20a *g, struct rpc_handler_payload *rpc_payload) { - struct nvgpu_pmu *pmu = &g->pmu; + struct nvgpu_pmu *pmu = g->pmu; struct nv_pmu_rpc_struct_acr_bootstrap_gr_falcons acr_rpc; (void) memset(&acr_rpc, 0, sizeof(struct nv_pmu_rpc_header)); @@ -89,20 +108,30 @@ void nvgpu_pmu_lsfm_rpc_handler(struct gk20a *g, } } +void nvgpu_pmu_lsfm_clean(struct gk20a *g, struct nvgpu_pmu *pmu, + struct nvgpu_pmu_lsfm *lsfm) +{ + nvgpu_log_fn(g, " "); + + if (is_lsfm_supported(g, pmu, lsfm)) { + lsfm->is_wpr_init_done = false; + lsfm->loaded_falcon_id = 0U; + } +} + int nvgpu_pmu_lsfm_init(struct gk20a *g, struct nvgpu_pmu_lsfm **lsfm) { u32 ver = g->params.gpu_arch + g->params.gpu_impl; int err = 0; - if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) { - goto done; + if (!nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY) || + nvgpu_is_enabled(g, NVGPU_IS_FMODEL)){ + return 0; } if (*lsfm != NULL) { /* skip alloc/reinit for unrailgate sequence */ nvgpu_pmu_dbg(g, "skip lsfm init for unrailgate sequence"); - (*lsfm)->is_wpr_init_done = false; - (*lsfm)->loaded_falcon_id = 0U; goto done; } @@ -142,7 +171,8 @@ done: void nvgpu_pmu_lsfm_deinit(struct gk20a *g, struct nvgpu_pmu *pmu, struct nvgpu_pmu_lsfm *lsfm) { - if (lsfm != NULL) { + if (is_lsfm_supported(g, pmu, lsfm)) { nvgpu_kfree(g, lsfm); } + pmu->lsfm = NULL; } diff --git a/drivers/gpu/nvgpu/common/pmu/lsfm/lsfm_sw_gm20b.c b/drivers/gpu/nvgpu/common/pmu/lsfm/lsfm_sw_gm20b.c index 6373a4a7f..cd346e306 100644 --- a/drivers/gpu/nvgpu/common/pmu/lsfm/lsfm_sw_gm20b.c +++ b/drivers/gpu/nvgpu/common/pmu/lsfm/lsfm_sw_gm20b.c @@ -32,7 +32,7 @@ static void lsfm_handle_acr_init_wpr_region_msg(struct gk20a *g, struct pmu_msg *msg, void *param, u32 status) { - struct nvgpu_pmu *pmu = &g->pmu; + struct nvgpu_pmu *pmu = g->pmu; nvgpu_log_fn(g, " "); @@ -72,7 +72,7 @@ int gm20b_pmu_lsfm_init_acr_wpr_region(struct gk20a *g, struct nvgpu_pmu *pmu) void gm20b_pmu_lsfm_handle_bootstrap_falcon_msg(struct gk20a *g, struct pmu_msg *msg, void *param, u32 status) { - struct nvgpu_pmu *pmu = &g->pmu; + struct nvgpu_pmu *pmu = g->pmu; nvgpu_log_fn(g, " "); @@ -130,7 +130,7 @@ static int gm20b_pmu_lsfm_bootstrap_ls_falcon(struct gk20a *g, /* check whether pmu is ready to bootstrap lsf if not wait for it */ if (!lsfm->is_wpr_init_done) { - pmu_wait_message_cond(&g->pmu, + pmu_wait_message_cond(g->pmu, nvgpu_get_poll_timeout(g), &lsfm->is_wpr_init_done, 1U); /* check again if it still not ready indicate an error */ @@ -150,7 +150,7 @@ static int gm20b_pmu_lsfm_bootstrap_ls_falcon(struct gk20a *g, } nvgpu_assert(falcon_id_mask <= U8_MAX); - pmu_wait_message_cond(&g->pmu, nvgpu_get_poll_timeout(g), + pmu_wait_message_cond(g->pmu, nvgpu_get_poll_timeout(g), &lsfm->loaded_falcon_id, (u8)FALCON_ID_FECS); if (lsfm->loaded_falcon_id != FALCON_ID_FECS) { err = -ETIMEDOUT; @@ -166,7 +166,7 @@ int gm20b_pmu_lsfm_pmu_cmd_line_args_copy(struct gk20a *g, u32 dmem_size = 0U; int err = 0; - err = nvgpu_falcon_get_mem_size(&pmu->flcn, MEM_DMEM, &dmem_size); + err = nvgpu_falcon_get_mem_size(pmu->flcn, MEM_DMEM, &dmem_size); if (err != 0) { nvgpu_err(g, "dmem size request failed"); return -EINVAL; @@ -185,7 +185,7 @@ int gm20b_pmu_lsfm_pmu_cmd_line_args_copy(struct gk20a *g, pmu->fw->ops.set_cmd_line_args_trace_dma_idx( pmu, GK20A_PMU_DMAIDX_VIRT); - return nvgpu_falcon_copy_to_dmem(&pmu->flcn, cmd_line_args_offset, + return nvgpu_falcon_copy_to_dmem(pmu->flcn, cmd_line_args_offset, (u8 *)(pmu->fw->ops.get_cmd_line_args_ptr(pmu)), pmu->fw->ops.get_cmd_line_args_size(pmu), 0U); } diff --git a/drivers/gpu/nvgpu/common/pmu/lsfm/lsfm_sw_gp10b.c b/drivers/gpu/nvgpu/common/pmu/lsfm/lsfm_sw_gp10b.c index fd878bb1c..749386f03 100644 --- a/drivers/gpu/nvgpu/common/pmu/lsfm/lsfm_sw_gp10b.c +++ b/drivers/gpu/nvgpu/common/pmu/lsfm/lsfm_sw_gp10b.c @@ -91,7 +91,7 @@ static int gp10b_pmu_lsfm_bootstrap_ls_falcon(struct gk20a *g, lsfm->loaded_falcon_id = 0U; /* check whether pmu is ready to bootstrap lsf if not wait for it */ if (!lsfm->is_wpr_init_done) { - pmu_wait_message_cond(&g->pmu, + pmu_wait_message_cond(g->pmu, nvgpu_get_poll_timeout(g), &lsfm->is_wpr_init_done, 1U); /* check again if it still not ready indicate an error */ @@ -111,7 +111,7 @@ static int gp10b_pmu_lsfm_bootstrap_ls_falcon(struct gk20a *g, } nvgpu_assert(falcon_id_mask <= U8_MAX); - pmu_wait_message_cond(&g->pmu, nvgpu_get_poll_timeout(g), + pmu_wait_message_cond(g->pmu, nvgpu_get_poll_timeout(g), &lsfm->loaded_falcon_id, (u8)falcon_id_mask); if (lsfm->loaded_falcon_id != falcon_id_mask) { err = -ETIMEDOUT; diff --git a/drivers/gpu/nvgpu/common/pmu/lsfm/lsfm_sw_gv100.c b/drivers/gpu/nvgpu/common/pmu/lsfm/lsfm_sw_gv100.c index 2c93e5e91..da738fd60 100644 --- a/drivers/gpu/nvgpu/common/pmu/lsfm/lsfm_sw_gv100.c +++ b/drivers/gpu/nvgpu/common/pmu/lsfm/lsfm_sw_gv100.c @@ -69,7 +69,7 @@ static int gv100_pmu_lsfm_bootstrap_ls_falcon(struct gk20a *g, lsfm->loaded_falcon_id = 0U; /* check whether pmu is ready to bootstrap lsf if not wait for it */ if (!lsfm->is_wpr_init_done) { - pmu_wait_message_cond(&g->pmu, + pmu_wait_message_cond(g->pmu, nvgpu_get_poll_timeout(g), &lsfm->is_wpr_init_done, 1U); /* check again if it still not ready indicate an error */ @@ -90,7 +90,7 @@ static int gv100_pmu_lsfm_bootstrap_ls_falcon(struct gk20a *g, goto exit; } - pmu_wait_message_cond(&g->pmu, nvgpu_get_poll_timeout(g), + pmu_wait_message_cond(g->pmu, nvgpu_get_poll_timeout(g), &lsfm->loaded_falcon_id, 1U); if (lsfm->loaded_falcon_id != 1U) { @@ -108,7 +108,7 @@ int gv100_update_lspmu_cmdline_args_copy(struct gk20a *g, u32 dmem_size = 0U; int err = 0; - err = nvgpu_falcon_get_mem_size(&pmu->flcn, MEM_DMEM, &dmem_size); + err = nvgpu_falcon_get_mem_size(pmu->flcn, MEM_DMEM, &dmem_size); if (err != 0) { nvgpu_err(g, "dmem size request failed"); return -EINVAL; @@ -129,7 +129,7 @@ int gv100_update_lspmu_cmdline_args_copy(struct gk20a *g, pmu->fw->ops.config_cmd_line_args_super_surface(pmu); } - return nvgpu_falcon_copy_to_dmem(&pmu->flcn, cmd_line_args_offset, + return nvgpu_falcon_copy_to_dmem(pmu->flcn, cmd_line_args_offset, (u8 *)(pmu->fw->ops.get_cmd_line_args_ptr(pmu)), pmu->fw->ops.get_cmd_line_args_size(pmu), 0U); } diff --git a/drivers/gpu/nvgpu/common/pmu/perf/change_seq.c b/drivers/gpu/nvgpu/common/pmu/perf/change_seq.c index 41ede9929..93bd7f8e5 100644 --- a/drivers/gpu/nvgpu/common/pmu/perf/change_seq.c +++ b/drivers/gpu/nvgpu/common/pmu/perf/change_seq.c @@ -106,7 +106,7 @@ exit: static void build_change_seq_boot (struct gk20a *g) { - struct nvgpu_pmu *pmu = &g->pmu; + struct nvgpu_pmu *pmu = g->pmu; struct change_seq_pmu *perf_change_seq_pmu = &(g->perf_pmu->changeseq_pmu); struct nvgpu_clk_domain *pdomain; @@ -131,7 +131,7 @@ static void build_change_seq_boot (struct gk20a *g) script_last->buf.change.data.flags = CTRL_PERF_CHANGE_SEQ_CHANGE_NONE; - BOARDOBJGRP_FOR_EACH(&(g->pmu.clk_pmu->clk_domainobjs->super.super), + BOARDOBJGRP_FOR_EACH(&(g->pmu->clk_pmu->clk_domainobjs->super.super), struct nvgpu_clk_domain *, pdomain, i) { p0_info = nvgpu_pmu_perf_pstate_get_clk_set_info(g, @@ -173,7 +173,7 @@ int nvgpu_perf_change_seq_pmu_setup(struct gk20a *g) { struct nv_pmu_rpc_perf_change_seq_info_get info_get; struct nv_pmu_rpc_perf_change_seq_info_set info_set; - struct nvgpu_pmu *pmu = &g->pmu; + struct nvgpu_pmu *pmu = g->pmu; struct change_seq_pmu *perf_change_seq_pmu = &(g->perf_pmu->changeseq_pmu); int status; diff --git a/drivers/gpu/nvgpu/common/pmu/perf/perf_gv100.c b/drivers/gpu/nvgpu/common/pmu/perf/perf_gv100.c index 766aedb07..f6b7c27b5 100644 --- a/drivers/gpu/nvgpu/common/pmu/perf/perf_gv100.c +++ b/drivers/gpu/nvgpu/common/pmu/perf/perf_gv100.c @@ -37,7 +37,7 @@ static int pmu_set_boot_clk_runcb_fn(void *arg) { struct gk20a *g = (struct gk20a *)arg; - struct nvgpu_pmu *pmu = &g->pmu; + struct nvgpu_pmu *pmu = g->pmu; struct nv_pmu_rpc_struct_perf_load rpc; struct perf_pmupstate *perf_pmu = g->perf_pmu; struct nvgpu_vfe_invalidate *vfe_init = &perf_pmu->vfe_init; @@ -113,7 +113,7 @@ static int perf_pmu_init_vfe_perf_event(struct gk20a *g) int gv100_perf_pmu_vfe_load(struct gk20a *g) { - struct nvgpu_pmu *pmu = &g->pmu; + struct nvgpu_pmu *pmu = g->pmu; struct nv_pmu_rpc_struct_perf_load rpc; int status = 0; diff --git a/drivers/gpu/nvgpu/common/pmu/perf/perf_ps35.c b/drivers/gpu/nvgpu/common/pmu/perf/perf_ps35.c index 328a6f5dc..b8a533cb5 100644 --- a/drivers/gpu/nvgpu/common/pmu/perf/perf_ps35.c +++ b/drivers/gpu/nvgpu/common/pmu/perf/perf_ps35.c @@ -100,7 +100,7 @@ static int perf_pmu_init_vfe_perf_event(struct gk20a *g) int nvgpu_perf_pmu_vfe_load_ps35(struct gk20a *g) { - struct nvgpu_pmu *pmu = &g->pmu; + struct nvgpu_pmu *pmu = g->pmu; struct nv_pmu_rpc_struct_perf_load rpc; int status = 0; diff --git a/drivers/gpu/nvgpu/common/pmu/perf/perf_pstate.c b/drivers/gpu/nvgpu/common/pmu/perf/perf_pstate.c index d77e39697..ac29af42b 100644 --- a/drivers/gpu/nvgpu/common/pmu/perf/perf_pstate.c +++ b/drivers/gpu/nvgpu/common/pmu/perf/perf_pstate.c @@ -124,7 +124,7 @@ static int parse_pstate_entry_6x(struct gk20a *g, clk_domain = (struct nvgpu_clk_domain *) BOARDOBJGRP_OBJ_GET_BY_IDX( - &g->pmu.clk_pmu->clk_domainobjs->super.super, clkidx); + &g->pmu->clk_pmu->clk_domainobjs->super.super, clkidx); pclksetinfo = &pstate->clklist.clksetinfo[clkidx]; clk_entry = (struct vbios_pstate_entry_clock_6x *)p; diff --git a/drivers/gpu/nvgpu/common/pmu/perf/vfe_equ.c b/drivers/gpu/nvgpu/common/pmu/perf/vfe_equ.c index 19c8e4a6a..d17f98428 100644 --- a/drivers/gpu/nvgpu/common/pmu/perf/vfe_equ.c +++ b/drivers/gpu/nvgpu/common/pmu/perf/vfe_equ.c @@ -746,7 +746,7 @@ static struct vfe_equ *construct_vfe_equ(struct gk20a *g, void *pargs) int nvgpu_vfe_get_volt_margin_limit(struct gk20a *g, u32 *vmargin_uv) { - struct nvgpu_pmu *pmu = &g->pmu; + struct nvgpu_pmu *pmu = g->pmu; struct nv_pmu_rpc_struct_perf_vfe_eval rpc; int status = 0; u8 vmargin_idx; @@ -773,11 +773,11 @@ int nvgpu_vfe_get_volt_margin_limit(struct gk20a *g, u32 *vmargin_uv) int nvgpu_vfe_get_freq_margin_limit(struct gk20a *g, u32 *fmargin_mhz) { - struct nvgpu_pmu *pmu = &g->pmu; + struct nvgpu_pmu *pmu = g->pmu; struct nv_pmu_rpc_struct_perf_vfe_eval rpc; int status = 0; u8 fmargin_idx; - struct nvgpu_avfsfllobjs *pfllobjs = g->pmu.clk_pmu->avfs_fllobjs; + struct nvgpu_avfsfllobjs *pfllobjs = g->pmu->clk_pmu->avfs_fllobjs; fmargin_idx = pfllobjs->freq_margin_vfe_idx; if (fmargin_idx == 255U) { diff --git a/drivers/gpu/nvgpu/common/pmu/perfmon/pmu_perfmon.c b/drivers/gpu/nvgpu/common/pmu/perfmon/pmu_perfmon.c index d4affb687..29b07b958 100644 --- a/drivers/gpu/nvgpu/common/pmu/perfmon/pmu_perfmon.c +++ b/drivers/gpu/nvgpu/common/pmu/perfmon/pmu_perfmon.c @@ -343,13 +343,13 @@ int nvgpu_pmu_perfmon_stop_sampling(struct nvgpu_pmu *pmu) int nvgpu_pmu_load_norm(struct gk20a *g, u32 *load) { - *load = g->pmu.pmu_perfmon->load_shadow; + *load = g->pmu->pmu_perfmon->load_shadow; return 0; } int nvgpu_pmu_load_update(struct gk20a *g) { - struct nvgpu_pmu *pmu = &g->pmu; + struct nvgpu_pmu *pmu = g->pmu; u32 load = 0; int err = 0; if (!pmu->pmu_perfmon->perfmon_ready) { @@ -362,7 +362,7 @@ int nvgpu_pmu_load_update(struct gk20a *g) nvgpu_pmu_perfmon_get_sample(g, pmu, pmu->pmu_perfmon); load = pmu->pmu_perfmon->load; } else { - err = nvgpu_falcon_copy_from_dmem(&pmu->flcn, + err = nvgpu_falcon_copy_from_dmem(pmu->flcn, pmu->pmu_perfmon->sample_buffer, (u8 *)&load, 2 * 1, 0); if (err != 0) { nvgpu_err(g, "PMU falcon DMEM copy failed"); diff --git a/drivers/gpu/nvgpu/common/pmu/pg/pg_sw_gm20b.c b/drivers/gpu/nvgpu/common/pmu/pg/pg_sw_gm20b.c index 4e943b47e..fc19fcd45 100644 --- a/drivers/gpu/nvgpu/common/pmu/pg/pg_sw_gm20b.c +++ b/drivers/gpu/nvgpu/common/pmu/pg/pg_sw_gm20b.c @@ -53,7 +53,7 @@ static void pmu_handle_zbc_msg(struct gk20a *g, struct pmu_msg *msg, void gm20b_pmu_save_zbc(struct gk20a *g, u32 entries) { - struct nvgpu_pmu *pmu = &g->pmu; + struct nvgpu_pmu *pmu = g->pmu; struct pmu_cmd cmd; size_t tmp_size; int err = 0; @@ -90,11 +90,11 @@ void gm20b_pmu_save_zbc(struct gk20a *g, u32 entries) int gm20b_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id, struct pmu_pg_stats_data *pg_stat_data) { - struct nvgpu_pmu *pmu = &g->pmu; + struct nvgpu_pmu *pmu = g->pmu; struct pmu_pg_stats stats; int err; - err = nvgpu_falcon_copy_from_dmem(&pmu->flcn, + err = nvgpu_falcon_copy_from_dmem(pmu->flcn, pmu->pg->stat_dmem_offset[pg_engine_id], (u8 *)&stats, (u32)sizeof(struct pmu_pg_stats), 0); if (err != 0) { diff --git a/drivers/gpu/nvgpu/common/pmu/pg/pg_sw_gp106.c b/drivers/gpu/nvgpu/common/pmu/pg/pg_sw_gp106.c index 42d601790..4ef31353d 100644 --- a/drivers/gpu/nvgpu/common/pmu/pg/pg_sw_gp106.c +++ b/drivers/gpu/nvgpu/common/pmu/pg/pg_sw_gp106.c @@ -46,7 +46,7 @@ static void pmu_handle_param_msg(struct gk20a *g, struct pmu_msg *msg, int gp106_pg_param_init(struct gk20a *g, u32 pg_engine_id) { - struct nvgpu_pmu *pmu = &g->pmu; + struct nvgpu_pmu *pmu = g->pmu; struct pmu_cmd cmd; int status; u64 tmp_size; @@ -102,11 +102,11 @@ int gp106_pg_param_init(struct gk20a *g, u32 pg_engine_id) int gp106_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id, struct pmu_pg_stats_data *pg_stat_data) { - struct nvgpu_pmu *pmu = &g->pmu; + struct nvgpu_pmu *pmu = g->pmu; struct pmu_pg_stats_v2 stats; int err; - err = nvgpu_falcon_copy_from_dmem(&pmu->flcn, + err = nvgpu_falcon_copy_from_dmem(pmu->flcn, pmu->pg->stat_dmem_offset[pg_engine_id], (u8 *)&stats, (u32)sizeof(struct pmu_pg_stats_v2), 0); if (err != 0) { diff --git a/drivers/gpu/nvgpu/common/pmu/pg/pg_sw_gp10b.c b/drivers/gpu/nvgpu/common/pmu/pg/pg_sw_gp10b.c index d9e0540ee..94d4062fe 100644 --- a/drivers/gpu/nvgpu/common/pmu/pg/pg_sw_gp10b.c +++ b/drivers/gpu/nvgpu/common/pmu/pg/pg_sw_gp10b.c @@ -48,7 +48,7 @@ static void pmu_handle_gr_param_msg(struct gk20a *g, struct pmu_msg *msg, int gp10b_pg_gr_init(struct gk20a *g, u32 pg_engine_id) { - struct nvgpu_pmu *pmu = &g->pmu; + struct nvgpu_pmu *pmu = g->pmu; struct pmu_cmd cmd; size_t tmp_size; @@ -82,11 +82,11 @@ int gp10b_pg_gr_init(struct gk20a *g, u32 pg_engine_id) int gp10b_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id, struct pmu_pg_stats_data *pg_stat_data) { - struct nvgpu_pmu *pmu = &g->pmu; + struct nvgpu_pmu *pmu = g->pmu; struct pmu_pg_stats_v1 stats; int err; - err = nvgpu_falcon_copy_from_dmem(&pmu->flcn, + err = nvgpu_falcon_copy_from_dmem(pmu->flcn, pmu->pg->stat_dmem_offset[pg_engine_id], (u8 *)&stats, (u32)sizeof(struct pmu_pg_stats_v1), 0); if (err != 0) { diff --git a/drivers/gpu/nvgpu/common/pmu/pg/pg_sw_gv11b.c b/drivers/gpu/nvgpu/common/pmu/pg/pg_sw_gv11b.c index 358279c01..b50774941 100644 --- a/drivers/gpu/nvgpu/common/pmu/pg/pg_sw_gv11b.c +++ b/drivers/gpu/nvgpu/common/pmu/pg/pg_sw_gv11b.c @@ -60,7 +60,7 @@ static void pmu_handle_pg_param_msg(struct gk20a *g, struct pmu_msg *msg, int gv11b_pg_gr_init(struct gk20a *g, u32 pg_engine_id) { - struct nvgpu_pmu *pmu = &g->pmu; + struct nvgpu_pmu *pmu = g->pmu; struct pmu_cmd cmd; size_t tmp_size; @@ -91,7 +91,7 @@ int gv11b_pg_gr_init(struct gk20a *g, u32 pg_engine_id) int gv11b_pg_set_subfeature_mask(struct gk20a *g, u32 pg_engine_id) { - struct nvgpu_pmu *pmu = &g->pmu; + struct nvgpu_pmu *pmu = g->pmu; struct pmu_cmd cmd; size_t tmp_size; diff --git a/drivers/gpu/nvgpu/common/pmu/pg/pmu_aelpg.c b/drivers/gpu/nvgpu/common/pmu/pg/pmu_aelpg.c index 34e0be95f..1bfbe8db5 100644 --- a/drivers/gpu/nvgpu/common/pmu/pg/pmu_aelpg.c +++ b/drivers/gpu/nvgpu/common/pmu/pg/pmu_aelpg.c @@ -35,7 +35,7 @@ int nvgpu_aelpg_init(struct gk20a *g) union pmu_ap_cmd ap_cmd; ap_cmd.init.cmd_id = PMU_AP_CMD_ID_INIT; - ap_cmd.init.pg_sampling_period_us = g->pmu.pg->aelpg_param[0]; + ap_cmd.init.pg_sampling_period_us = g->pmu->pg->aelpg_param[0]; status = nvgpu_pmu_ap_send_command(g, &ap_cmd, false); return status; @@ -43,7 +43,7 @@ int nvgpu_aelpg_init(struct gk20a *g) int nvgpu_aelpg_init_and_enable(struct gk20a *g, u8 ctrl_id) { - struct nvgpu_pmu *pmu = &g->pmu; + struct nvgpu_pmu *pmu = g->pmu; int status = 0; union pmu_ap_cmd ap_cmd; @@ -95,7 +95,7 @@ static void ap_callback_init_and_enable_ctrl( int nvgpu_pmu_ap_send_command(struct gk20a *g, union pmu_ap_cmd *p_ap_cmd, bool b_block) { - struct nvgpu_pmu *pmu = &g->pmu; + struct nvgpu_pmu *pmu = g->pmu; int status = 0; struct pmu_cmd cmd; pmu_callback p_callback = NULL; diff --git a/drivers/gpu/nvgpu/common/pmu/pg/pmu_pg.c b/drivers/gpu/nvgpu/common/pmu/pg/pmu_pg.c index c0db115e9..f40f2ad5b 100644 --- a/drivers/gpu/nvgpu/common/pmu/pg/pmu_pg.c +++ b/drivers/gpu/nvgpu/common/pmu/pg/pmu_pg.c @@ -184,7 +184,7 @@ static void pmu_handle_pg_elpg_msg(struct gk20a *g, struct pmu_msg *msg, /* PG enable/disable */ int nvgpu_pmu_pg_global_enable(struct gk20a *g, bool enable_pg) { - struct nvgpu_pmu *pmu = &g->pmu; + struct nvgpu_pmu *pmu = g->pmu; int status = 0; if (!is_pg_supported(g, pmu->pg)) { @@ -222,7 +222,7 @@ int nvgpu_pmu_pg_global_enable(struct gk20a *g, bool enable_pg) static int pmu_enable_elpg_locked(struct gk20a *g, u8 pg_engine_id) { - struct nvgpu_pmu *pmu = &g->pmu; + struct nvgpu_pmu *pmu = g->pmu; struct pmu_cmd cmd; int status; u64 tmp; @@ -264,7 +264,7 @@ static int pmu_enable_elpg_locked(struct gk20a *g, u8 pg_engine_id) int nvgpu_pmu_enable_elpg(struct gk20a *g) { - struct nvgpu_pmu *pmu = &g->pmu; + struct nvgpu_pmu *pmu = g->pmu; u8 pg_engine_id; u32 pg_engine_id_list = 0; @@ -272,7 +272,7 @@ int nvgpu_pmu_enable_elpg(struct gk20a *g) nvgpu_log_fn(g, " "); - if (!is_pg_supported(g, g->pmu.pg)) { + if (!is_pg_supported(g, g->pmu->pg)) { return ret; } @@ -337,7 +337,7 @@ static void pmu_dump_elpg_stats(struct nvgpu_pmu *pmu) /* Print PG stats */ nvgpu_err(g, "Print PG stats"); - nvgpu_falcon_print_dmem(&pmu->flcn, + nvgpu_falcon_print_dmem(pmu->flcn, pmu->pg->stat_dmem_offset[PMU_PG_ELPG_ENGINE_ID_GRAPHICS], (u32)sizeof(struct pmu_pg_stats_v2)); @@ -347,7 +347,7 @@ static void pmu_dump_elpg_stats(struct nvgpu_pmu *pmu) int nvgpu_pmu_disable_elpg(struct gk20a *g) { - struct nvgpu_pmu *pmu = &g->pmu; + struct nvgpu_pmu *pmu = g->pmu; struct pmu_cmd cmd; int ret = 0; u8 pg_engine_id; @@ -494,7 +494,7 @@ static void pmu_handle_pg_stat_msg(struct gk20a *g, struct pmu_msg *msg, static int pmu_pg_init_send(struct gk20a *g, u8 pg_engine_id) { - struct nvgpu_pmu *pmu = &g->pmu; + struct nvgpu_pmu *pmu = g->pmu; struct pmu_cmd cmd; int err = 0; u64 tmp; @@ -744,7 +744,7 @@ static void pmu_pg_setup_hw_load_zbc(struct gk20a *g, struct nvgpu_pmu *pmu, int nvgpu_pmu_get_pg_stats(struct gk20a *g, u32 pg_engine_id, struct pmu_pg_stats_data *pg_stat_data) { - struct nvgpu_pmu *pmu = &g->pmu; + struct nvgpu_pmu *pmu = g->pmu; u32 pg_engine_id_list = 0; int err = 0; @@ -804,7 +804,7 @@ static void pmu_pg_kill_task(struct gk20a *g, struct nvgpu_pmu *pmu, static int pmu_pg_task(void *arg) { struct gk20a *g = (struct gk20a *)arg; - struct nvgpu_pmu *pmu = &g->pmu; + struct nvgpu_pmu *pmu = g->pmu; struct nvgpu_pg_init *pg_init = &pmu->pg->pg_init; u32 pmu_state = 0; int err = 0; @@ -1053,7 +1053,7 @@ void nvgpu_pmu_pg_deinit(struct gk20a *g, struct nvgpu_pmu *pmu, void nvgpu_pmu_set_golden_image_initialized(struct gk20a *g, bool initialized) { - struct nvgpu_pmu *pmu = &g->pmu; + struct nvgpu_pmu *pmu = g->pmu; if (!is_pg_supported(g, pmu->pg)) { return; @@ -1065,7 +1065,7 @@ void nvgpu_pmu_set_golden_image_initialized(struct gk20a *g, bool initialized) int nvgpu_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id, struct pmu_pg_stats_data *pg_stat_data) { - struct nvgpu_pmu *pmu = &g->pmu; + struct nvgpu_pmu *pmu = g->pmu; if (!is_pg_supported(g, pmu->pg)) { return 0; @@ -1076,7 +1076,7 @@ int nvgpu_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id, void nvgpu_pmu_save_zbc(struct gk20a *g, u32 entries) { - struct nvgpu_pmu *pmu = &g->pmu; + struct nvgpu_pmu *pmu = g->pmu; if (!is_pg_supported(g, pmu->pg)) { return; @@ -1087,7 +1087,7 @@ void nvgpu_pmu_save_zbc(struct gk20a *g, u32 entries) bool nvgpu_pmu_is_lpwr_feature_supported(struct gk20a *g, u32 feature_id) { - struct nvgpu_pmu *pmu = &g->pmu; + struct nvgpu_pmu *pmu = g->pmu; if (!is_pg_supported(g, pmu->pg)) { return false; diff --git a/drivers/gpu/nvgpu/common/pmu/pmgr/pmgrpmu.c b/drivers/gpu/nvgpu/common/pmu/pmgr/pmgrpmu.c index ec44f19a6..aec49078e 100644 --- a/drivers/gpu/nvgpu/common/pmu/pmgr/pmgrpmu.c +++ b/drivers/gpu/nvgpu/common/pmu/pmgr/pmgrpmu.c @@ -134,7 +134,7 @@ static int pmgr_pmu_set_object(struct gk20a *g, goto exit; } - pmu_wait_message_cond(&g->pmu, + pmu_wait_message_cond(g->pmu, nvgpu_get_poll_timeout(g), &handlerparams.success, 1); @@ -444,7 +444,7 @@ int pmgr_pmu_pwr_devices_query_blocking( goto exit; } - pmu_wait_message_cond(&g->pmu, + pmu_wait_message_cond(g->pmu, nvgpu_get_poll_timeout(g), &handlerparams.success, 1); @@ -486,7 +486,7 @@ static int pmgr_pmu_load_blocking(struct gk20a *g) goto exit; } - pmu_wait_message_cond(&g->pmu, + pmu_wait_message_cond(g->pmu, nvgpu_get_poll_timeout(g), &handlerparams.success, 1); diff --git a/drivers/gpu/nvgpu/common/pmu/pmu.c b/drivers/gpu/nvgpu/common/pmu/pmu.c index 970297024..ad1e49468 100644 --- a/drivers/gpu/nvgpu/common/pmu/pmu.c +++ b/drivers/gpu/nvgpu/common/pmu/pmu.c @@ -40,6 +40,7 @@ #include #include #include +#include /* PMU locks used to sync with PMU-RTOS */ int nvgpu_pmu_lock_acquire(struct gk20a *g, struct nvgpu_pmu *pmu, @@ -100,6 +101,7 @@ int nvgpu_pmu_destroy(struct gk20a *g, struct nvgpu_pmu *pmu) nvgpu_pmu_fw_state_change(g, pmu, PMU_FW_STATE_OFF, false); nvgpu_pmu_set_fw_ready(g, pmu, false); + nvgpu_pmu_lsfm_clean(g, pmu, pmu->lsfm); pmu->pmu_perfmon->perfmon_ready = false; nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false); @@ -141,6 +143,10 @@ static void remove_pmu_support(struct nvgpu_pmu *pmu) nvgpu_pmu_super_surface_deinit(g, pmu, pmu->super_surface); } + if (nvgpu_is_enabled(g, NVGPU_PMU_PSTATE)) { + nvgpu_pmu_pstate_deinit(g); + } + nvgpu_pmu_debug_deinit(g, pmu); nvgpu_pmu_lsfm_deinit(g, pmu, pmu->lsfm); nvgpu_pmu_pg_deinit(g, pmu, pmu->pg); @@ -218,7 +224,7 @@ int nvgpu_pmu_init(struct gk20a *g, struct nvgpu_pmu *pmu) if (nvgpu_is_enabled(g, NVGPU_SUPPORT_SEC2_RTOS)) { /* Reset PMU engine */ - err = nvgpu_falcon_reset(&g->pmu.flcn); + err = nvgpu_falcon_reset(g->pmu->flcn); /* Bootstrap PMU from SEC2 RTOS*/ err = nvgpu_sec2_bootstrap_ls_falcons(g, &g->sec2, @@ -232,7 +238,7 @@ int nvgpu_pmu_init(struct gk20a *g, struct nvgpu_pmu *pmu) * clear halt interrupt to avoid PMU-RTOS ucode * hitting breakpoint due to PMU halt */ - err = nvgpu_falcon_clear_halt_intr_status(&g->pmu.flcn, + err = nvgpu_falcon_clear_halt_intr_status(g->pmu->flcn, nvgpu_get_poll_timeout(g)); if (err != 0) { goto exit; @@ -249,10 +255,10 @@ int nvgpu_pmu_init(struct gk20a *g, struct nvgpu_pmu *pmu) } if (g->ops.pmu.pmu_enable_irq != NULL) { - nvgpu_mutex_acquire(&g->pmu.isr_mutex); - g->ops.pmu.pmu_enable_irq(&g->pmu, true); - g->pmu.isr_enabled = true; - nvgpu_mutex_release(&g->pmu.isr_mutex); + nvgpu_mutex_acquire(&g->pmu->isr_mutex); + g->ops.pmu.pmu_enable_irq(g->pmu, true); + g->pmu->isr_enabled = true; + nvgpu_mutex_release(&g->pmu->isr_mutex); } /*Once in LS mode, cpuctl_alias is only accessible*/ @@ -273,13 +279,28 @@ exit: return err; } -int nvgpu_pmu_early_init(struct gk20a *g, struct nvgpu_pmu *pmu) +int nvgpu_pmu_early_init(struct gk20a *g, struct nvgpu_pmu **pmu_p) { int err = 0; + struct nvgpu_pmu *pmu; nvgpu_log_fn(g, " "); + if (*pmu_p != NULL) { + /* skip alloc/reinit for unrailgate sequence */ + nvgpu_pmu_dbg(g, "skip pmu init for unrailgate sequence"); + goto exit; + } + + pmu = (struct nvgpu_pmu *) nvgpu_kzalloc(g, sizeof(struct nvgpu_pmu)); + if (pmu == NULL) { + err = -ENOMEM; + goto exit; + } + + *pmu_p = pmu; pmu->g = g; + pmu->flcn = &g->pmu_flcn; if (!g->support_ls_pmu) { goto exit; @@ -343,7 +364,6 @@ int nvgpu_pmu_early_init(struct gk20a *g, struct nvgpu_pmu *pmu) } pmu->remove_support = remove_pmu_support; - goto exit; init_failed: @@ -352,6 +372,18 @@ exit: return err; } +void nvgpu_pmu_remove_support(struct gk20a *g, struct nvgpu_pmu *pmu) +{ + if(pmu != NULL) { + if (pmu->remove_support != NULL) { + pmu->remove_support(g->pmu); + } + + nvgpu_kfree(g, g->pmu); + g->pmu = NULL; + } +} + /* PMU H/W error functions */ static void pmu_report_error(struct gk20a *g, u32 err_type, u32 status, u32 pmu_err_type) @@ -392,7 +424,7 @@ static int pmu_enable_hw(struct nvgpu_pmu *pmu, bool enable) nvgpu_cg_blcg_pmu_load_enable(g); - if (nvgpu_falcon_mem_scrub_wait(&pmu->flcn) != 0) { + if (nvgpu_falcon_mem_scrub_wait(pmu->flcn) != 0) { /* keep PMU falcon/engine in reset * if IMEM/DMEM scrubbing fails */ @@ -427,7 +459,7 @@ static int pmu_enable(struct nvgpu_pmu *pmu, bool enable) goto exit; } - err = nvgpu_falcon_wait_idle(&pmu->flcn); + err = nvgpu_falcon_wait_idle(pmu->flcn); if (err != 0) { goto exit; } @@ -440,12 +472,12 @@ exit: int nvgpu_pmu_reset(struct gk20a *g) { - struct nvgpu_pmu *pmu = &g->pmu; + struct nvgpu_pmu *pmu = g->pmu; int err = 0; nvgpu_log_fn(g, " %s ", g->name); - err = nvgpu_falcon_wait_idle(&pmu->flcn); + err = nvgpu_falcon_wait_idle(pmu->flcn); if (err != 0) { goto exit; } diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_debug.c b/drivers/gpu/nvgpu/common/pmu/pmu_debug.c index 7711a4753..e67d639a1 100644 --- a/drivers/gpu/nvgpu/common/pmu/pmu_debug.c +++ b/drivers/gpu/nvgpu/common/pmu/pmu_debug.c @@ -102,7 +102,7 @@ void nvgpu_pmu_dump_falcon_stats(struct nvgpu_pmu *pmu) { struct gk20a *g = pmu->g; - nvgpu_falcon_dump_stats(&pmu->flcn); + nvgpu_falcon_dump_stats(pmu->flcn); g->ops.pmu.pmu_dump_falcon_stats(pmu); /* Print PMU F/W debug prints */ diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_pstate.c b/drivers/gpu/nvgpu/common/pmu/pmu_pstate.c index c4ae496aa..94e5be72a 100644 --- a/drivers/gpu/nvgpu/common/pmu/pmu_pstate.c +++ b/drivers/gpu/nvgpu/common/pmu/pmu_pstate.c @@ -47,7 +47,7 @@ void nvgpu_pmu_pstate_deinit(struct gk20a *g) { pmgr_pmu_free_pmupstate(g); - nvgpu_therm_pmu_free_pmupstate(g, &g->pmu); + nvgpu_therm_pmu_free_pmupstate(g, g->pmu); nvgpu_perf_pmu_free_pmupstate(g); nvgpu_clk_domain_free_pmupstate(g); nvgpu_clk_prog_free_pmupstate(g); @@ -124,9 +124,9 @@ static int pmu_pstate_init(struct gk20a *g) int err; nvgpu_log_fn(g, " "); - err = nvgpu_therm_pmu_init_pmupstate(g, &g->pmu); + err = nvgpu_therm_pmu_init_pmupstate(g, g->pmu); if (err != 0) { - nvgpu_therm_pmu_free_pmupstate(g, &g->pmu); + nvgpu_therm_pmu_free_pmupstate(g, g->pmu); return err; } @@ -268,7 +268,7 @@ int nvgpu_pmu_pstate_sw_setup(struct gk20a *g) int err; nvgpu_log_fn(g, " "); - err = nvgpu_pmu_wait_fw_ready(g, &g->pmu); + err = nvgpu_pmu_wait_fw_ready(g, g->pmu); if (err != 0) { nvgpu_err(g, "PMU not ready to process pstate requests"); return err; @@ -286,7 +286,7 @@ int nvgpu_pmu_pstate_sw_setup(struct gk20a *g) return err; } - err = nvgpu_therm_domain_sw_setup(g, &g->pmu); + err = nvgpu_therm_domain_sw_setup(g, g->pmu); if (err != 0) { goto err_therm_pmu_init_pmupstate; } @@ -322,7 +322,7 @@ int nvgpu_pmu_pstate_sw_setup(struct gk20a *g) err_pmgr_pmu_init_pmupstate: pmgr_pmu_free_pmupstate(g); err_therm_pmu_init_pmupstate: - nvgpu_therm_pmu_free_pmupstate(g, &g->pmu); + nvgpu_therm_pmu_free_pmupstate(g, g->pmu); err_perf_pmu_init_pmupstate: nvgpu_perf_pmu_free_pmupstate(g); @@ -469,7 +469,7 @@ int nvgpu_pmu_pstate_pmu_setup(struct gk20a *g) return err; } - err = nvgpu_therm_domain_pmu_setup(g, &g->pmu); + err = nvgpu_therm_domain_pmu_setup(g, g->pmu); if (err != 0) { return err; } diff --git a/drivers/gpu/nvgpu/common/pmu/therm/thrmchannel.c b/drivers/gpu/nvgpu/common/pmu/therm/thrmchannel.c index 0b7c4ef61..541f7c4d3 100644 --- a/drivers/gpu/nvgpu/common/pmu/therm/thrmchannel.c +++ b/drivers/gpu/nvgpu/common/pmu/therm/thrmchannel.c @@ -222,7 +222,7 @@ int therm_channel_sw_setup(struct gk20a *g) /* Construct the Super Class and override the Interfaces */ status = nvgpu_boardobjgrp_construct_e32(g, - &g->pmu.therm_pmu->therm_channelobjs.super); + &g->pmu->therm_pmu->therm_channelobjs.super); if (status != 0) { nvgpu_err(g, "error creating boardobjgrp for therm devices, " @@ -230,8 +230,8 @@ int therm_channel_sw_setup(struct gk20a *g) goto done; } - pboardobjgrp = &g->pmu.therm_pmu->therm_channelobjs.super.super; - pthermchannelobjs = &(g->pmu.therm_pmu->therm_channelobjs); + pboardobjgrp = &g->pmu->therm_pmu->therm_channelobjs.super.super; + pthermchannelobjs = &(g->pmu->therm_pmu->therm_channelobjs); /* Override the Interfaces */ pboardobjgrp->pmudatainstget = _therm_channel_pmudata_instget; diff --git a/drivers/gpu/nvgpu/common/pmu/therm/thrmdev.c b/drivers/gpu/nvgpu/common/pmu/therm/thrmdev.c index 8d0eeda49..794e15a35 100644 --- a/drivers/gpu/nvgpu/common/pmu/therm/thrmdev.c +++ b/drivers/gpu/nvgpu/common/pmu/therm/thrmdev.c @@ -341,7 +341,7 @@ int therm_device_sw_setup(struct gk20a *g) /* Construct the Super Class and override the Interfaces */ status = nvgpu_boardobjgrp_construct_e32(g, - &g->pmu.therm_pmu->therm_deviceobjs.super); + &g->pmu->therm_pmu->therm_deviceobjs.super); if (status != 0) { nvgpu_err(g, "error creating boardobjgrp for therm devices," @@ -349,8 +349,8 @@ int therm_device_sw_setup(struct gk20a *g) goto done; } - pboardobjgrp = &g->pmu.therm_pmu->therm_deviceobjs.super.super; - pthermdeviceobjs = &(g->pmu.therm_pmu->therm_deviceobjs); + pboardobjgrp = &g->pmu->therm_pmu->therm_deviceobjs.super.super; + pthermdeviceobjs = &(g->pmu->therm_pmu->therm_deviceobjs); /* Override the Interfaces */ pboardobjgrp->pmudatainstget = _therm_device_pmudata_instget; diff --git a/drivers/gpu/nvgpu/common/pmu/therm/thrmpmu.c b/drivers/gpu/nvgpu/common/pmu/therm/thrmpmu.c index 813562468..48b8ebfc2 100644 --- a/drivers/gpu/nvgpu/common/pmu/therm/thrmpmu.c +++ b/drivers/gpu/nvgpu/common/pmu/therm/thrmpmu.c @@ -59,8 +59,8 @@ int therm_send_pmgr_tables_to_pmu(struct gk20a *g) int status = 0; struct boardobjgrp *pboardobjgrp = NULL; - if (!BOARDOBJGRP_IS_EMPTY(&g->pmu.therm_pmu->therm_deviceobjs.super.super)) { - pboardobjgrp = &g->pmu.therm_pmu->therm_deviceobjs.super.super; + if (!BOARDOBJGRP_IS_EMPTY(&g->pmu->therm_pmu->therm_deviceobjs.super.super)) { + pboardobjgrp = &g->pmu->therm_pmu->therm_deviceobjs.super.super; status = pboardobjgrp->pmuinithandle(g, pboardobjgrp); if (status != 0) { nvgpu_err(g, @@ -71,8 +71,8 @@ int therm_send_pmgr_tables_to_pmu(struct gk20a *g) } if (!BOARDOBJGRP_IS_EMPTY( - &g->pmu.therm_pmu->therm_channelobjs.super.super)) { - pboardobjgrp = &g->pmu.therm_pmu->therm_channelobjs.super.super; + &g->pmu->therm_pmu->therm_channelobjs.super.super)) { + pboardobjgrp = &g->pmu->therm_pmu->therm_channelobjs.super.super; status = pboardobjgrp->pmuinithandle(g, pboardobjgrp); if (status != 0) { nvgpu_err(g, @@ -107,7 +107,7 @@ static int therm_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd, if (cb_param != NULL) { handlerparams = (struct therm_pmucmdhandler_params*)cb_param; - pmu_wait_message_cond(&g->pmu, + pmu_wait_message_cond(g->pmu, nvgpu_get_poll_timeout(g), &handlerparams->success, 1); diff --git a/drivers/gpu/nvgpu/common/pmu/volt/volt_dev.c b/drivers/gpu/nvgpu/common/pmu/volt/volt_dev.c index 449d07baa..825ea308d 100644 --- a/drivers/gpu/nvgpu/common/pmu/volt/volt_dev.c +++ b/drivers/gpu/nvgpu/common/pmu/volt/volt_dev.c @@ -592,7 +592,7 @@ int nvgpu_volt_dev_sw_setup(struct gk20a *g) } } - g->pmu.volt_rpc_handler = nvgpu_pmu_volt_rpc_handler; + g->pmu->volt_rpc_handler = nvgpu_pmu_volt_rpc_handler; done: nvgpu_log_info(g, " done status %x", status); diff --git a/drivers/gpu/nvgpu/common/pmu/volt/volt_pmu.c b/drivers/gpu/nvgpu/common/pmu/volt/volt_pmu.c index a5a3dcc7e..0bbbd9c29 100644 --- a/drivers/gpu/nvgpu/common/pmu/volt/volt_pmu.c +++ b/drivers/gpu/nvgpu/common/pmu/volt/volt_pmu.c @@ -42,7 +42,7 @@ struct volt_rpc_pmucmdhandler_params { static int volt_set_voltage_rpc(struct gk20a *g, u8 client_id, struct ctrl_volt_volt_rail_list_v1 *prail_list) { - struct nvgpu_pmu *pmu = &g->pmu; + struct nvgpu_pmu *pmu = g->pmu; struct nv_pmu_rpc_struct_volt_volt_set_voltage rpc; int status = 0; @@ -63,7 +63,7 @@ static int volt_set_voltage_rpc(struct gk20a *g, u8 client_id, static int volt_rail_get_voltage(struct gk20a *g, u8 volt_domain, u32 *pvoltage_uv) { - struct nvgpu_pmu *pmu = &g->pmu; + struct nvgpu_pmu *pmu = g->pmu; struct nv_pmu_rpc_struct_volt_volt_rail_get_voltage rpc; int status = 0; u8 rail_idx; @@ -113,7 +113,7 @@ static int volt_set_voltage(struct gk20a *g, u32 logic_voltage_uv, int nvgpu_volt_send_load_cmd_to_pmu(struct gk20a *g) { - struct nvgpu_pmu *pmu = &g->pmu; + struct nvgpu_pmu *pmu = g->pmu; struct nv_pmu_rpc_struct_volt_load rpc; int status = 0; diff --git a/drivers/gpu/nvgpu/common/vbios/bios_sw_gp106.c b/drivers/gpu/nvgpu/common/vbios/bios_sw_gp106.c index 6c8dc4ccf..574bebc7d 100644 --- a/drivers/gpu/nvgpu/common/vbios/bios_sw_gp106.c +++ b/drivers/gpu/nvgpu/common/vbios/bios_sw_gp106.c @@ -49,12 +49,12 @@ int gp106_bios_devinit(struct gk20a *g) nvgpu_log_fn(g, " "); - if (nvgpu_falcon_reset(&g->pmu.flcn) != 0) { + if (nvgpu_falcon_reset(g->pmu->flcn) != 0) { err = -ETIMEDOUT; goto out; } - err = nvgpu_falcon_copy_to_imem(&g->pmu.flcn, + err = nvgpu_falcon_copy_to_imem(g->pmu->flcn, g->bios.devinit.bootloader_phys_base, g->bios.devinit.bootloader, g->bios.devinit.bootloader_size, @@ -64,7 +64,7 @@ int gp106_bios_devinit(struct gk20a *g) goto out; } - err = nvgpu_falcon_copy_to_imem(&g->pmu.flcn, g->bios.devinit.phys_base, + err = nvgpu_falcon_copy_to_imem(g->pmu->flcn, g->bios.devinit.phys_base, g->bios.devinit.ucode, g->bios.devinit.size, 0, 1, g->bios.devinit.phys_base >> 8); @@ -73,7 +73,7 @@ int gp106_bios_devinit(struct gk20a *g) goto out; } - err = nvgpu_falcon_copy_to_dmem(&g->pmu.flcn, g->bios.devinit.dmem_phys_base, + err = nvgpu_falcon_copy_to_dmem(g->pmu->flcn, g->bios.devinit.dmem_phys_base, g->bios.devinit.dmem, g->bios.devinit.dmem_size, 0); @@ -82,7 +82,7 @@ int gp106_bios_devinit(struct gk20a *g) goto out; } - err = nvgpu_falcon_copy_to_dmem(&g->pmu.flcn, g->bios.devinit_tables_phys_base, + err = nvgpu_falcon_copy_to_dmem(g->pmu->flcn, g->bios.devinit_tables_phys_base, g->bios.devinit_tables, g->bios.devinit_tables_size, 0); @@ -91,7 +91,7 @@ int gp106_bios_devinit(struct gk20a *g) goto out; } - err = nvgpu_falcon_copy_to_dmem(&g->pmu.flcn, g->bios.devinit_script_phys_base, + err = nvgpu_falcon_copy_to_dmem(g->pmu->flcn, g->bios.devinit_script_phys_base, g->bios.bootscripts, g->bios.bootscripts_size, 0); @@ -100,7 +100,7 @@ int gp106_bios_devinit(struct gk20a *g) goto out; } - err = nvgpu_falcon_bootstrap(&g->pmu.flcn, + err = nvgpu_falcon_bootstrap(g->pmu->flcn, g->bios.devinit.code_entry_point); if (err != 0) { nvgpu_err(g, "falcon bootstrap failed %d", err); @@ -114,7 +114,7 @@ int gp106_bios_devinit(struct gk20a *g) do { top_scratch1_reg = g->ops.top.read_top_scratch1_reg(g); devinit_completed = ((g->ops.falcon.is_falcon_cpu_halted( - &g->pmu.flcn) != 0U) && + g->pmu->flcn) != 0U) && (g->ops.top.top_scratch1_devinit_completed(g, top_scratch1_reg)) != 0U); @@ -126,7 +126,7 @@ int gp106_bios_devinit(struct gk20a *g) goto out; } - err = nvgpu_falcon_clear_halt_intr_status(&g->pmu.flcn, + err = nvgpu_falcon_clear_halt_intr_status(g->pmu->flcn, nvgpu_get_poll_timeout(g)); if (err != 0) { nvgpu_err(g, "falcon_clear_halt_intr_status failed %d", err); @@ -140,7 +140,7 @@ out: int gp106_bios_preos_wait_for_halt(struct gk20a *g) { - return nvgpu_falcon_wait_for_halt(&g->pmu.flcn, + return nvgpu_falcon_wait_for_halt(g->pmu->flcn, PMU_BOOT_TIMEOUT_MAX / 1000); } @@ -150,7 +150,7 @@ int gp106_bios_preos(struct gk20a *g) nvgpu_log_fn(g, " "); - if (nvgpu_falcon_reset(&g->pmu.flcn) != 0) { + if (nvgpu_falcon_reset(g->pmu->flcn) != 0) { err = -ETIMEDOUT; goto out; } @@ -159,7 +159,7 @@ int gp106_bios_preos(struct gk20a *g) g->ops.bios.preos_reload_check(g); } - err = nvgpu_falcon_copy_to_imem(&g->pmu.flcn, + err = nvgpu_falcon_copy_to_imem(g->pmu->flcn, g->bios.preos.bootloader_phys_base, g->bios.preos.bootloader, g->bios.preos.bootloader_size, @@ -169,7 +169,7 @@ int gp106_bios_preos(struct gk20a *g) goto out; } - err = nvgpu_falcon_copy_to_imem(&g->pmu.flcn, g->bios.preos.phys_base, + err = nvgpu_falcon_copy_to_imem(g->pmu->flcn, g->bios.preos.phys_base, g->bios.preos.ucode, g->bios.preos.size, 0, 1, g->bios.preos.phys_base >> 8); @@ -178,7 +178,7 @@ int gp106_bios_preos(struct gk20a *g) goto out; } - err = nvgpu_falcon_copy_to_dmem(&g->pmu.flcn, g->bios.preos.dmem_phys_base, + err = nvgpu_falcon_copy_to_dmem(g->pmu->flcn, g->bios.preos.dmem_phys_base, g->bios.preos.dmem, g->bios.preos.dmem_size, 0); @@ -187,7 +187,7 @@ int gp106_bios_preos(struct gk20a *g) goto out; } - err = nvgpu_falcon_bootstrap(&g->pmu.flcn, + err = nvgpu_falcon_bootstrap(g->pmu->flcn, g->bios.preos.code_entry_point); if (err != 0) { nvgpu_err(g, "falcon bootstrap failed %d", err); @@ -200,7 +200,7 @@ int gp106_bios_preos(struct gk20a *g) goto out; } - err = nvgpu_falcon_clear_halt_intr_status(&g->pmu.flcn, + err = nvgpu_falcon_clear_halt_intr_status(g->pmu->flcn, nvgpu_get_poll_timeout(g)); if (err != 0) { nvgpu_err(g, "falcon_clear_halt_intr_status failed %d", err); diff --git a/drivers/gpu/nvgpu/common/vbios/bios_sw_gv100.c b/drivers/gpu/nvgpu/common/vbios/bios_sw_gv100.c index 6ed1c0908..7bcdeb32e 100644 --- a/drivers/gpu/nvgpu/common/vbios/bios_sw_gv100.c +++ b/drivers/gpu/nvgpu/common/vbios/bios_sw_gv100.c @@ -93,7 +93,7 @@ int gv100_bios_preos_wait_for_halt(struct gk20a *g) progress = g->ops.bus.read_sw_scratch(g, SCRATCH_PREOS_PROGRESS); preos_completed = (g->ops.falcon.is_falcon_cpu_halted( - &g->pmu.flcn) != 0U) && + g->pmu->flcn) != 0U) && (PREOS_PROGRESS_MASK(progress) == PREOS_PROGRESS_EXIT); diff --git a/drivers/gpu/nvgpu/common/vgpu/init/init_vgpu.c b/drivers/gpu/nvgpu/common/vgpu/init/init_vgpu.c index e2175cd5d..27b3913a5 100644 --- a/drivers/gpu/nvgpu/common/vgpu/init/init_vgpu.c +++ b/drivers/gpu/nvgpu/common/vgpu/init/init_vgpu.c @@ -68,16 +68,14 @@ void vgpu_remove_support_common(struct gk20a *g) nvgpu_kfree(g, g->dbg_regops_tmp_buf); } - if (g->pmu.remove_support) { - g->pmu.remove_support(&g->pmu); - } - nvgpu_gr_remove_support(g); if (g->fifo.remove_support) { g->fifo.remove_support(&g->fifo); } + nvgpu_pmu_remove_support(g, g->pmu); + if (g->mm.remove_support) { g->mm.remove_support(&g->mm); } diff --git a/drivers/gpu/nvgpu/common/vgpu/vgpu.c b/drivers/gpu/nvgpu/common/vgpu/vgpu.c index d1fb3eb8c..e6912dd6a 100644 --- a/drivers/gpu/nvgpu/common/vgpu/vgpu.c +++ b/drivers/gpu/nvgpu/common/vgpu/vgpu.c @@ -234,10 +234,6 @@ void vgpu_remove_support_common(struct gk20a *g) nvgpu_kfree(g, g->dbg_regops_tmp_buf); } - if (g->pmu.remove_support) { - g->pmu.remove_support(&g->pmu); - } - if (g->gr->remove_support) { g->gr->remove_support(g); } @@ -246,6 +242,8 @@ void vgpu_remove_support_common(struct gk20a *g) g->fifo.remove_support(&g->fifo); } + nvgpu_pmu_remove_support(g, g->pmu); + if (g->mm.remove_support) { g->mm.remove_support(&g->mm); } diff --git a/drivers/gpu/nvgpu/hal/clk/clk_gv100.c b/drivers/gpu/nvgpu/hal/clk/clk_gv100.c index 0fd265b27..eea1bc037 100644 --- a/drivers/gpu/nvgpu/hal/clk/clk_gv100.c +++ b/drivers/gpu/nvgpu/hal/clk/clk_gv100.c @@ -214,7 +214,7 @@ int gv100_clk_domain_get_f_points( int status = -EINVAL; struct nvgpu_clk_domain *pdomain; u8 i; - struct nvgpu_clk_pmupstate *pclk = g->pmu.clk_pmu; + struct nvgpu_clk_pmupstate *pclk = g->pmu->clk_pmu; if (pfpointscount == NULL) { return -EINVAL; } diff --git a/drivers/gpu/nvgpu/hal/fifo/preempt_gk20a.c b/drivers/gpu/nvgpu/hal/fifo/preempt_gk20a.c index 1e4d3bd50..52262f9d5 100644 --- a/drivers/gpu/nvgpu/hal/fifo/preempt_gk20a.c +++ b/drivers/gpu/nvgpu/hal/fifo/preempt_gk20a.c @@ -107,13 +107,13 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, struct nvgpu_channel *ch) /* we have no idea which runlist we are using. lock all */ nvgpu_runlist_lock_active_runlists(g); - mutex_ret = nvgpu_pmu_lock_acquire(g, &g->pmu, + mutex_ret = nvgpu_pmu_lock_acquire(g, g->pmu, PMU_MUTEX_ID_FIFO, &token); ret = gk20a_fifo_preempt_locked(g, ch->chid, ID_TYPE_CHANNEL); if (mutex_ret == 0) { - if (nvgpu_pmu_lock_release(g, &g->pmu, + if (nvgpu_pmu_lock_release(g, g->pmu, PMU_MUTEX_ID_FIFO, &token) != 0) { nvgpu_err(g, "failed to release PMU lock"); } @@ -155,13 +155,13 @@ int gk20a_fifo_preempt_tsg(struct gk20a *g, struct nvgpu_tsg *tsg) /* we have no idea which runlist we are using. lock all */ nvgpu_runlist_lock_active_runlists(g); - mutex_ret = nvgpu_pmu_lock_acquire(g, &g->pmu, + mutex_ret = nvgpu_pmu_lock_acquire(g, g->pmu, PMU_MUTEX_ID_FIFO, &token); ret = gk20a_fifo_preempt_locked(g, tsg->tsgid, ID_TYPE_TSG); if (mutex_ret == 0) { - if (nvgpu_pmu_lock_release(g, &g->pmu, + if (nvgpu_pmu_lock_release(g, g->pmu, PMU_MUTEX_ID_FIFO, &token) != 0) { nvgpu_err(g, "failed to release PMU lock"); } diff --git a/drivers/gpu/nvgpu/hal/fifo/preempt_gv11b.c b/drivers/gpu/nvgpu/hal/fifo/preempt_gv11b.c index 8162c3c8a..624f7df6a 100644 --- a/drivers/gpu/nvgpu/hal/fifo/preempt_gv11b.c +++ b/drivers/gpu/nvgpu/hal/fifo/preempt_gv11b.c @@ -93,7 +93,7 @@ void gv11b_fifo_preempt_runlists_for_rc(struct gk20a *g, u32 runlists_mask) /* runlist_lock are locked by teardown and sched are disabled too */ nvgpu_log_fn(g, "preempt runlists_mask:0x%08x", runlists_mask); - mutex_ret = nvgpu_pmu_lock_acquire(g, &g->pmu, + mutex_ret = nvgpu_pmu_lock_acquire(g, g->pmu, PMU_MUTEX_ID_FIFO, &token); /* issue runlist preempt */ @@ -114,7 +114,7 @@ void gv11b_fifo_preempt_runlists_for_rc(struct gk20a *g, u32 runlists_mask) } if (mutex_ret == 0) { - int err = nvgpu_pmu_lock_release(g, &g->pmu, PMU_MUTEX_ID_FIFO, + int err = nvgpu_pmu_lock_release(g, g->pmu, PMU_MUTEX_ID_FIFO, &token); if (err != 0) { nvgpu_err(g, "PMU_MUTEX_ID_FIFO not released err=%d", @@ -442,13 +442,13 @@ int gv11b_fifo_preempt_tsg(struct gk20a *g, struct nvgpu_tsg *tsg) /* WAR for Bug 2065990 */ nvgpu_tsg_disable_sched(g, tsg); - mutex_ret = nvgpu_pmu_lock_acquire(g, &g->pmu, + mutex_ret = nvgpu_pmu_lock_acquire(g, g->pmu, PMU_MUTEX_ID_FIFO, &token); ret = gv11b_fifo_preempt_locked(g, tsg->tsgid, ID_TYPE_TSG); if (mutex_ret == 0) { - int err = nvgpu_pmu_lock_release(g, &g->pmu, PMU_MUTEX_ID_FIFO, + int err = nvgpu_pmu_lock_release(g, g->pmu, PMU_MUTEX_ID_FIFO, &token); if (err != 0) { nvgpu_err(g, "PMU_MUTEX_ID_FIFO not released err=%d", diff --git a/drivers/gpu/nvgpu/hal/pmu/pmu_gk20a.c b/drivers/gpu/nvgpu/hal/pmu/pmu_gk20a.c index 74ca49979..9627b4ce5 100644 --- a/drivers/gpu/nvgpu/hal/pmu/pmu_gk20a.c +++ b/drivers/gpu/nvgpu/hal/pmu/pmu_gk20a.c @@ -450,7 +450,7 @@ void gk20a_pmu_enable_irq(struct nvgpu_pmu *pmu, bool enable) g->ops.mc.intr_unit_config(g, MC_INTR_UNIT_DISABLE, false, mc_intr_mask_1_pmu_enabled_f()); - nvgpu_falcon_set_irq(&pmu->flcn, false, 0x0, 0x0); + nvgpu_falcon_set_irq(pmu->flcn, false, 0x0, 0x0); if (enable) { intr_dest = g->ops.pmu.get_irqdest(g); @@ -464,7 +464,7 @@ void gk20a_pmu_enable_irq(struct nvgpu_pmu *pmu, bool enable) pwr_falcon_irqmset_swgen0_f(1) | pwr_falcon_irqmset_swgen1_f(1); - nvgpu_falcon_set_irq(&pmu->flcn, true, intr_mask, intr_dest); + nvgpu_falcon_set_irq(pmu->flcn, true, intr_mask, intr_dest); g->ops.mc.intr_unit_config(g, MC_INTR_UNIT_ENABLE, true, mc_intr_mask_0_pmu_enabled_f()); @@ -491,7 +491,7 @@ bool gk20a_pmu_is_interrupted(struct nvgpu_pmu *pmu) static void gk20a_pmu_handle_interrupts(struct gk20a *g, u32 intr) { - struct nvgpu_pmu *pmu = &g->pmu; + struct nvgpu_pmu *pmu = g->pmu; bool recheck = false; int err = 0; @@ -542,7 +542,7 @@ static void gk20a_pmu_handle_interrupts(struct gk20a *g, u32 intr) void gk20a_pmu_isr(struct gk20a *g) { - struct nvgpu_pmu *pmu = &g->pmu; + struct nvgpu_pmu *pmu = g->pmu; u32 intr, mask; nvgpu_log_fn(g, " "); @@ -722,7 +722,7 @@ int gk20a_pmu_ns_bootstrap(struct gk20a *g, struct nvgpu_pmu *pmu, pwr_falcon_dmatrfcmd_ctxdma_f(GK20A_PMU_DMAIDX_UCODE)); } - err = nvgpu_falcon_bootstrap(&g->pmu.flcn, + err = nvgpu_falcon_bootstrap(g->pmu->flcn, desc->bootloader_entry_point); gk20a_writel(g, pwr_falcon_os_r(), desc->app_version); diff --git a/drivers/gpu/nvgpu/hal/pmu/pmu_gv11b.c b/drivers/gpu/nvgpu/hal/pmu/pmu_gv11b.c index af77f4394..3e9892104 100644 --- a/drivers/gpu/nvgpu/hal/pmu/pmu_gv11b.c +++ b/drivers/gpu/nvgpu/hal/pmu/pmu_gv11b.c @@ -229,7 +229,7 @@ int gv11b_pmu_bootstrap(struct gk20a *g, struct nvgpu_pmu *pmu, pwr_falcon_dmatrfcmd_ctxdma_f(GK20A_PMU_DMAIDX_UCODE)); } - err = nvgpu_falcon_bootstrap(&pmu->flcn, desc->bootloader_entry_point); + err = nvgpu_falcon_bootstrap(pmu->flcn, desc->bootloader_entry_point); gk20a_writel(g, pwr_falcon_os_r(), desc->app_version); diff --git a/drivers/gpu/nvgpu/hal/rc/rc_gv11b.c b/drivers/gpu/nvgpu/hal/rc/rc_gv11b.c index 682e57976..aa3fda3e6 100644 --- a/drivers/gpu/nvgpu/hal/rc/rc_gv11b.c +++ b/drivers/gpu/nvgpu/hal/rc/rc_gv11b.c @@ -61,7 +61,7 @@ static void gv11b_fifo_locked_abort_runlist_active_tsgs(struct gk20a *g, "runlists_mask: 0x%08x", runlists_mask); /* runlist_lock are locked by teardown */ - mutex_ret = nvgpu_pmu_lock_acquire(g, &g->pmu, + mutex_ret = nvgpu_pmu_lock_acquire(g, g->pmu, PMU_MUTEX_ID_FIFO, &token); for (i = 0U; i < f->num_runlists; i++) { @@ -119,7 +119,7 @@ static void gv11b_fifo_locked_abort_runlist_active_tsgs(struct gk20a *g, } } if (mutex_ret == 0) { - err = nvgpu_pmu_lock_release(g, &g->pmu, PMU_MUTEX_ID_FIFO, + err = nvgpu_pmu_lock_release(g, g->pmu, PMU_MUTEX_ID_FIFO, &token); if (err != 0) { nvgpu_err(g, "PMU_MUTEX_ID_FIFO not released err=%d", diff --git a/drivers/gpu/nvgpu/hal/therm/therm_gp106.c b/drivers/gpu/nvgpu/hal/therm/therm_gp106.c index 87e58900f..140ab6b8b 100644 --- a/drivers/gpu/nvgpu/hal/therm/therm_gp106.c +++ b/drivers/gpu/nvgpu/hal/therm/therm_gp106.c @@ -107,7 +107,7 @@ int gp106_configure_therm_alert(struct gk20a *g, s32 curr_warn_temp) if (g->curr_warn_temp != curr_warn_temp) { g->curr_warn_temp = curr_warn_temp; - err = nvgpu_therm_configure_therm_alert(g, &g->pmu); + err = nvgpu_therm_configure_therm_alert(g, g->pmu); } return err; diff --git a/drivers/gpu/nvgpu/include/nvgpu/boardobjgrp.h b/drivers/gpu/nvgpu/include/nvgpu/boardobjgrp.h index 2990f19f5..12063f3ab 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/boardobjgrp.h +++ b/drivers/gpu/nvgpu/include/nvgpu/boardobjgrp.h @@ -286,9 +286,9 @@ do { \ _boardobjgrp_set_header_aligned), \ (u32)sizeof(union nv_pmu_##eng##_##class## \ _boardobj_set_union_aligned), \ - (u32)nvgpu_pmu_get_ss_member_set_size(g, &g->pmu, \ + (u32)nvgpu_pmu_get_ss_member_set_size(g, g->pmu, \ NV_PMU_SUPER_SURFACE_MEMBER_##CLASS##_GRP), \ - (u32)nvgpu_pmu_get_ss_member_set_offset(g, &g->pmu, \ + (u32)nvgpu_pmu_get_ss_member_set_offset(g, g->pmu, \ NV_PMU_SUPER_SURFACE_MEMBER_##CLASS##_GRP), \ NV_PMU_RPC_ID_##ENG##_BOARD_OBJ_GRP_CMD)) @@ -304,9 +304,9 @@ do { \ _boardobjgrp_get_status_header_aligned), \ (u32)sizeof(union nv_pmu_##eng##_##class## \ _boardobj_get_status_union_aligned), \ - (u32)nvgpu_pmu_get_ss_member_get_status_size(g, &g->pmu, \ + (u32)nvgpu_pmu_get_ss_member_get_status_size(g, g->pmu, \ NV_PMU_SUPER_SURFACE_MEMBER_##CLASS##_GRP), \ - (u32)nvgpu_pmu_get_ss_member_get_status_offset(g, &g->pmu, \ + (u32)nvgpu_pmu_get_ss_member_get_status_offset(g, g->pmu, \ NV_PMU_SUPER_SURFACE_MEMBER_##CLASS##_GRP), \ NV_PMU_RPC_ID_##ENG##_BOARD_OBJ_GRP_CMD)) diff --git a/drivers/gpu/nvgpu/include/nvgpu/gk20a.h b/drivers/gpu/nvgpu/include/nvgpu/gk20a.h index 6453b9a1c..823473d10 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/gk20a.h +++ b/drivers/gpu/nvgpu/include/nvgpu/gk20a.h @@ -1931,6 +1931,7 @@ struct gk20a { struct nvgpu_netlist_vars *netlist_vars; bool netlist_valid; + struct nvgpu_falcon pmu_flcn; struct nvgpu_falcon fecs_flcn; struct nvgpu_falcon gpccs_flcn; struct nvgpu_falcon nvdec_flcn; @@ -1943,7 +1944,7 @@ struct gk20a { struct nvgpu_fbp *fbp; struct sim_nvgpu *sim; struct mm_gk20a mm; - struct nvgpu_pmu pmu; + struct nvgpu_pmu *pmu; struct nvgpu_acr *acr; struct nvgpu_ecc ecc; struct perf_pmupstate *perf_pmu; diff --git a/drivers/gpu/nvgpu/include/nvgpu/pmu.h b/drivers/gpu/nvgpu/include/nvgpu/pmu.h index e80377b49..69e8afdf8 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/pmu.h +++ b/drivers/gpu/nvgpu/include/nvgpu/pmu.h @@ -149,7 +149,7 @@ struct nvgpu_pmu { bool sw_ready; bool isr_enabled; struct nvgpu_mutex isr_mutex; - struct nvgpu_falcon flcn; + struct nvgpu_falcon *flcn; struct nvgpu_allocator dmem; struct nvgpu_mem trace_buf; struct pmu_sha1_gid gid_info; @@ -189,9 +189,10 @@ int nvgpu_pmu_lock_release(struct gk20a *g, struct nvgpu_pmu *pmu, u32 id, u32 *token); /* PMU RTOS init/setup functions */ -int nvgpu_pmu_early_init(struct gk20a *g, struct nvgpu_pmu *pmu); +int nvgpu_pmu_early_init(struct gk20a *g, struct nvgpu_pmu **pmu_p); int nvgpu_pmu_init(struct gk20a *g, struct nvgpu_pmu *pmu); int nvgpu_pmu_destroy(struct gk20a *g, struct nvgpu_pmu *pmu); +void nvgpu_pmu_remove_support(struct gk20a *g, struct nvgpu_pmu *pmu); /* PMU H/W error functions */ void nvgpu_pmu_report_bar0_pri_err_status(struct gk20a *g, u32 bar0_status, diff --git a/drivers/gpu/nvgpu/include/nvgpu/pmu/lsfm.h b/drivers/gpu/nvgpu/include/nvgpu/pmu/lsfm.h index 0f44258ee..a13aed734 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/pmu/lsfm.h +++ b/drivers/gpu/nvgpu/include/nvgpu/pmu/lsfm.h @@ -45,6 +45,8 @@ int nvgpu_pmu_lsfm_ls_pmu_cmdline_args_copy(struct gk20a *g, void nvgpu_pmu_lsfm_rpc_handler(struct gk20a *g, struct rpc_handler_payload *rpc_payload); int nvgpu_pmu_lsfm_init(struct gk20a *g, struct nvgpu_pmu_lsfm **lsfm); +void nvgpu_pmu_lsfm_clean(struct gk20a *g, struct nvgpu_pmu *pmu, + struct nvgpu_pmu_lsfm *lsfm); void nvgpu_pmu_lsfm_deinit(struct gk20a *g, struct nvgpu_pmu *pmu, struct nvgpu_pmu_lsfm *lsfm); diff --git a/drivers/gpu/nvgpu/os/linux/debug_clk_gv100.c b/drivers/gpu/nvgpu/os/linux/debug_clk_gv100.c index 1bb3048e8..5248a5fcd 100644 --- a/drivers/gpu/nvgpu/os/linux/debug_clk_gv100.c +++ b/drivers/gpu/nvgpu/os/linux/debug_clk_gv100.c @@ -53,7 +53,7 @@ static int sys_cfc_read(void *data , u64 *val) struct gk20a *g = (struct gk20a *)data; bool bload = nvgpu_boardobjgrpmask_bit_get( - &g->pmu.clk_pmu->clk_freq_controllers-> + &g->pmu->clk_pmu->clk_freq_controllers-> freq_ctrl_load_mask.super, CTRL_CLK_CLK_FREQ_CONTROLLER_ID_SYS); @@ -82,7 +82,7 @@ static int ltc_cfc_read(void *data , u64 *val) struct gk20a *g = (struct gk20a *)data; bool bload = nvgpu_boardobjgrpmask_bit_get( - &g->pmu.clk_pmu->clk_freq_controllers-> + &g->pmu->clk_pmu->clk_freq_controllers-> freq_ctrl_load_mask.super, CTRL_CLK_CLK_FREQ_CONTROLLER_ID_LTC); @@ -111,7 +111,7 @@ static int xbar_cfc_read(void *data , u64 *val) struct gk20a *g = (struct gk20a *)data; bool bload = nvgpu_boardobjgrpmask_bit_get( - &g->pmu.clk_pmu->clk_freq_controllers-> + &g->pmu->clk_pmu->clk_freq_controllers-> freq_ctrl_load_mask.super, CTRL_CLK_CLK_FREQ_CONTROLLER_ID_XBAR); @@ -141,7 +141,7 @@ static int gpc_cfc_read(void *data , u64 *val) struct gk20a *g = (struct gk20a *)data; bool bload = nvgpu_boardobjgrpmask_bit_get( - &g->pmu.clk_pmu->clk_freq_controllers-> + &g->pmu->clk_pmu->clk_freq_controllers-> freq_ctrl_load_mask.super, CTRL_CLK_CLK_FREQ_CONTROLLER_ID_GPC0); @@ -173,8 +173,8 @@ static int vftable_show(struct seq_file *s, void *unused) u32 voltage_min_uv, voltage_step_size_uv; u32 gpcclk_clkmhz = 0, gpcclk_voltuv = 0; - voltage_min_uv = g->pmu.clk_pmu->avfs_fllobjs->lut_min_voltage_uv; - voltage_step_size_uv = g->pmu.clk_pmu->avfs_fllobjs->lut_step_size_uv; + voltage_min_uv = g->pmu->clk_pmu->avfs_fllobjs->lut_min_voltage_uv; + voltage_step_size_uv = g->pmu->clk_pmu->avfs_fllobjs->lut_step_size_uv; for (index = 0; index < CTRL_CLK_LUT_NUM_ENTRIES_GV10x; index++) { gpcclk_voltuv = voltage_min_uv + index * voltage_step_size_uv; diff --git a/drivers/gpu/nvgpu/os/linux/debug_pmu.c b/drivers/gpu/nvgpu/os/linux/debug_pmu.c index 4b55389bc..6017bd859 100644 --- a/drivers/gpu/nvgpu/os/linux/debug_pmu.c +++ b/drivers/gpu/nvgpu/os/linux/debug_pmu.c @@ -27,7 +27,7 @@ static int lpwr_debug_show(struct seq_file *s, void *data) { struct gk20a *g = s->private; - struct nvgpu_pmu *pmu = &g->pmu; + struct nvgpu_pmu *pmu = g->pmu; if (pmu->pg->engines_feature_list && pmu->pg->engines_feature_list(g, @@ -41,16 +41,16 @@ static int lpwr_debug_show(struct seq_file *s, void *data) "MSCG pstate state: %u\n" "MSCG transition state: %u\n", g->ops.clk_arb.get_current_pstate(g), - g->elpg_enabled, g->pmu.pg->elpg_refcnt, - g->pmu.pg->elpg_stat, g->mscg_enabled, - g->pmu.pg->mscg_stat, g->pmu.pg->mscg_transition_state); + g->elpg_enabled, g->pmu->pg->elpg_refcnt, + g->pmu->pg->elpg_stat, g->mscg_enabled, + g->pmu->pg->mscg_stat, g->pmu->pg->mscg_transition_state); } else seq_printf(s, "ELPG Enabled: %u\n" "ELPG ref count: %u\n" "ELPG state: %u\n", - g->elpg_enabled, g->pmu.pg->elpg_refcnt, - g->pmu.pg->elpg_stat); + g->elpg_enabled, g->pmu->pg->elpg_refcnt, + g->pmu->pg->elpg_stat); return 0; @@ -258,7 +258,7 @@ static const struct file_operations elpg_transitions_fops = { static int falc_trace_show(struct seq_file *s, void *data) { struct gk20a *g = s->private; - struct nvgpu_pmu *pmu = &g->pmu; + struct nvgpu_pmu *pmu = g->pmu; u32 i = 0, j = 0, k, l, m; char part_str[40]; void *tracebuffer; @@ -320,7 +320,7 @@ static int perfmon_events_enable_show(struct seq_file *s, void *data) struct gk20a *g = s->private; seq_printf(s, "%u\n", - nvgpu_pmu_perfmon_get_sampling_enable_status(&(g->pmu)) ? 1 : 0); + nvgpu_pmu_perfmon_get_sampling_enable_status(g->pmu) ? 1 : 0); return 0; } @@ -356,24 +356,24 @@ static ssize_t perfmon_events_enable_write(struct file *file, if (err) return err; - if (val && !nvgpu_pmu_perfmon_get_sampling_enable_status(&(g->pmu)) + if (val && !nvgpu_pmu_perfmon_get_sampling_enable_status(g->pmu) && nvgpu_is_enabled(g, NVGPU_PMU_PERFMON)) { - nvgpu_pmu_perfmon_set_sampling_enable_status(&(g->pmu), + nvgpu_pmu_perfmon_set_sampling_enable_status(g->pmu, true); - nvgpu_pmu_perfmon_start_sample(g, &(g->pmu), - g->pmu.pmu_perfmon); + nvgpu_pmu_perfmon_start_sample(g, g->pmu, + g->pmu->pmu_perfmon); } else if (!val - && nvgpu_pmu_perfmon_get_sampling_enable_status(&(g->pmu)) + && nvgpu_pmu_perfmon_get_sampling_enable_status(g->pmu) && nvgpu_is_enabled(g, NVGPU_PMU_PERFMON)) { - nvgpu_pmu_perfmon_set_sampling_enable_status(&(g->pmu), + nvgpu_pmu_perfmon_set_sampling_enable_status(g->pmu, false); - nvgpu_pmu_perfmon_stop_sample(g, &(g->pmu), - g->pmu.pmu_perfmon); + nvgpu_pmu_perfmon_stop_sample(g, g->pmu, + g->pmu->pmu_perfmon); } gk20a_idle(g); } else { status = val ? true : false; - nvgpu_pmu_perfmon_set_sampling_enable_status(&(g->pmu), status); + nvgpu_pmu_perfmon_set_sampling_enable_status(g->pmu, status); } return count; @@ -391,7 +391,7 @@ static int perfmon_events_count_show(struct seq_file *s, void *data) { struct gk20a *g = s->private; - seq_printf(s, "%llu\n", nvgpu_pmu_perfmon_get_events_count(&(g->pmu))); + seq_printf(s, "%llu\n", nvgpu_pmu_perfmon_get_events_count(g->pmu)); return 0; } diff --git a/drivers/gpu/nvgpu/os/linux/module.c b/drivers/gpu/nvgpu/os/linux/module.c index 2283fa8d6..1897e1682 100644 --- a/drivers/gpu/nvgpu/os/linux/module.c +++ b/drivers/gpu/nvgpu/os/linux/module.c @@ -752,13 +752,6 @@ void gk20a_remove_support(struct gk20a *g) nvgpu_channel_remove_support_linux(l); - if (g->pmu.remove_support) - g->pmu.remove_support(&g->pmu); - - if (nvgpu_is_enabled(g, NVGPU_PMU_PSTATE)) { - nvgpu_pmu_pstate_deinit(g); - } - if (g->sec2.remove_support != NULL) { g->sec2.remove_support(&g->sec2); } @@ -771,6 +764,8 @@ void gk20a_remove_support(struct gk20a *g) if (g->fifo.remove_support) g->fifo.remove_support(&g->fifo); + nvgpu_pmu_remove_support(g, g->pmu); + if (g->mm.remove_support) g->mm.remove_support(&g->mm); diff --git a/drivers/gpu/nvgpu/os/linux/platform_gk20a_tegra.c b/drivers/gpu/nvgpu/os/linux/platform_gk20a_tegra.c index 3446fb8ac..e5edffa9f 100644 --- a/drivers/gpu/nvgpu/os/linux/platform_gk20a_tegra.c +++ b/drivers/gpu/nvgpu/os/linux/platform_gk20a_tegra.c @@ -172,7 +172,7 @@ static unsigned long gk20a_tegra_get_emc_rate(struct gk20a *g, /* When scaling emc, account for the gpu load when the * gpu frequency is less than or equal to fmax@vmin. */ if (gpu_freq <= gpu_fmax_at_vmin) - emc_scale = min(nvgpu_pmu_perfmon_get_load_avg(&(g->pmu)), + emc_scale = min(nvgpu_pmu_perfmon_get_load_avg(g->pmu), g->emc3d_ratio); else emc_scale = g->emc3d_ratio; diff --git a/drivers/gpu/nvgpu/os/linux/sysfs.c b/drivers/gpu/nvgpu/os/linux/sysfs.c index 8a7004728..2b07b9958 100644 --- a/drivers/gpu/nvgpu/os/linux/sysfs.c +++ b/drivers/gpu/nvgpu/os/linux/sysfs.c @@ -463,7 +463,7 @@ static ssize_t ldiv_slowdown_factor_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct gk20a *g = get_gk20a(dev); - struct nvgpu_pmu *pmu = &g->pmu; + struct nvgpu_pmu *pmu = g->pmu; unsigned long val = 0; int err; @@ -516,7 +516,7 @@ static ssize_t mscg_enable_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct gk20a *g = get_gk20a(dev); - struct nvgpu_pmu *pmu = &g->pmu; + struct nvgpu_pmu *pmu = g->pmu; unsigned long val = 0; int err; @@ -584,7 +584,7 @@ static ssize_t aelpg_param_store(struct device *dev, struct gk20a *g = get_gk20a(dev); int status = 0; union pmu_ap_cmd ap_cmd; - int *paramlist = (int *)g->pmu.pg->aelpg_param; + int *paramlist = (int *)g->pmu->pg->aelpg_param; u32 defaultparam[5] = { APCTRL_SAMPLING_PERIOD_PG_DEFAULT_US, APCTRL_MINIMUM_IDLE_FILTER_DEFAULT_US, @@ -607,7 +607,7 @@ static ssize_t aelpg_param_store(struct device *dev, /* If aelpg is enabled & pmu is ready then post values to * PMU else store then post later */ - if (g->aelpg_enabled && nvgpu_pmu_get_fw_ready(g, &g->pmu)) { + if (g->aelpg_enabled && nvgpu_pmu_get_fw_ready(g, g->pmu)) { /* Disable AELPG */ ap_cmd.disable_ctrl.cmd_id = PMU_AP_CMD_ID_DISABLE_CTRL; ap_cmd.disable_ctrl.ctrl_id = PMU_AP_CTRL_ID_GRAPHICS; @@ -627,9 +627,9 @@ static ssize_t aelpg_param_read(struct device *dev, struct gk20a *g = get_gk20a(dev); return snprintf(buf, PAGE_SIZE, - "%d %d %d %d %d\n", g->pmu.pg->aelpg_param[0], - g->pmu.pg->aelpg_param[1], g->pmu.pg->aelpg_param[2], - g->pmu.pg->aelpg_param[3], g->pmu.pg->aelpg_param[4]); + "%d %d %d %d %d\n", g->pmu->pg->aelpg_param[0], + g->pmu->pg->aelpg_param[1], g->pmu->pg->aelpg_param[2], + g->pmu->pg->aelpg_param[3], g->pmu->pg->aelpg_param[4]); } static DEVICE_ATTR(aelpg_param, ROOTRW, @@ -652,7 +652,7 @@ static ssize_t aelpg_enable_store(struct device *dev, return err; } - if (nvgpu_pmu_get_fw_ready(g, &g->pmu)) { + if (nvgpu_pmu_get_fw_ready(g, g->pmu)) { if (val && !g->aelpg_enabled) { g->aelpg_enabled = true; /* Enable AELPG */