gpu: nvgpu: pmu: update init APIs

Remove the second parameter for the pmu_early_init() and pmu_init()
functions so they only require the gk20a object. The g->pmu was always
passed for this parameter. And this makes the API signature match the
other init functions in the driver.

JIRA NVGPU-3980

Change-Id: Iae9361a5f14bc5c1d02f4ddb6583f30b71b22d59
Signed-off-by: Philip Elcan <pelcan@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2202968
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Vinod Gopalakrishnakurup <vinodg@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Philip Elcan
2019-09-19 15:17:33 -04:00
committed by Alex Waterman
parent 78c1f328bb
commit b53ec4731e
6 changed files with 21 additions and 23 deletions

View File

@@ -346,7 +346,7 @@ int nvgpu_finalize_poweron(struct gk20a *g)
return err;
}
err = g->ops.pmu.pmu_early_init(g, &g->pmu);
err = g->ops.pmu.pmu_early_init(g);
if (err != 0) {
nvgpu_err(g, "failed to early init pmu sw");
goto done;
@@ -523,7 +523,7 @@ int nvgpu_finalize_poweron(struct gk20a *g)
#endif
#ifdef CONFIG_NVGPU_LS_PMU
err = nvgpu_pmu_rtos_init(g, g->pmu);
err = nvgpu_pmu_rtos_init(g);
if (err != 0) {
nvgpu_err(g, "failed to init gk20a pmu");
nvgpu_mutex_release(&g->tpc_pg_lock);

View File

@@ -143,14 +143,14 @@ void nvgpu_pmu_remove_support(struct gk20a *g, struct nvgpu_pmu *pmu)
}
/* PMU unit init */
int nvgpu_pmu_early_init(struct gk20a *g, struct nvgpu_pmu **pmu_p)
int nvgpu_pmu_early_init(struct gk20a *g)
{
int err = 0;
struct nvgpu_pmu *pmu;
nvgpu_log_fn(g, " ");
if (*pmu_p != NULL) {
if (g->pmu != NULL) {
/* skip alloc/reinit for unrailgate sequence */
nvgpu_pmu_dbg(g, "skip pmu init for unrailgate sequence");
goto exit;
@@ -162,7 +162,7 @@ int nvgpu_pmu_early_init(struct gk20a *g, struct nvgpu_pmu **pmu_p)
goto exit;
}
*pmu_p = pmu;
g->pmu = pmu;
pmu->g = g;
pmu->flcn = &g->pmu_flcn;

View File

@@ -209,17 +209,17 @@ exit:
return err;
}
int nvgpu_pmu_rtos_init(struct gk20a *g, struct nvgpu_pmu *pmu)
int nvgpu_pmu_rtos_init(struct gk20a *g)
{
int err = 0;
nvgpu_log_fn(g, " ");
if (!g->support_ls_pmu) {
if (!g->support_ls_pmu || (g->pmu == NULL)) {
goto exit;
}
err = pmu_sw_setup(g, pmu);
err = pmu_sw_setup(g, g->pmu);
if (err != 0) {
goto exit;
}
@@ -252,8 +252,8 @@ int nvgpu_pmu_rtos_init(struct gk20a *g, struct nvgpu_pmu *pmu)
g->ops.pmu.setup_apertures(g);
}
err = nvgpu_pmu_lsfm_ls_pmu_cmdline_args_copy(g, pmu,
pmu->lsfm);
err = nvgpu_pmu_lsfm_ls_pmu_cmdline_args_copy(g, g->pmu,
g->pmu->lsfm);
if (err != 0) {
goto exit;
}
@@ -271,13 +271,13 @@ int nvgpu_pmu_rtos_init(struct gk20a *g, struct nvgpu_pmu *pmu)
}
} else {
/* non-secure boot */
err = nvgpu_pmu_ns_fw_bootstrap(g, pmu);
err = nvgpu_pmu_ns_fw_bootstrap(g, g->pmu);
if (err != 0) {
goto exit;
}
}
nvgpu_pmu_fw_state_change(g, pmu, PMU_FW_STATE_STARTING, false);
nvgpu_pmu_fw_state_change(g, g->pmu, PMU_FW_STATE_STARTING, false);
exit:
return err;

View File

@@ -1207,8 +1207,7 @@ struct gpu_ops {
u32 (*idle_slowdown_disable)(struct gk20a *g);
} therm;
struct {
int (*pmu_early_init)(struct gk20a *g,
struct nvgpu_pmu **pmu_p);
int (*pmu_early_init)(struct gk20a *g);
int (*pmu_destroy)(struct gk20a *g, struct nvgpu_pmu *pmu);
int (*pmu_pstate_sw_setup)(struct gk20a *g);
int (*pmu_pstate_pmu_setup)(struct gk20a *g);

View File

@@ -329,7 +329,7 @@ int nvgpu_pmu_lock_release(struct gk20a *g, struct nvgpu_pmu *pmu,
/* PMU RTOS init/setup functions*/
int nvgpu_pmu_rtos_early_init(struct gk20a *g, struct nvgpu_pmu *pmu);
int nvgpu_pmu_rtos_init(struct gk20a *g, struct nvgpu_pmu *pmu);
int nvgpu_pmu_rtos_init(struct gk20a *g);
int nvgpu_pmu_destroy(struct gk20a *g, struct nvgpu_pmu *pmu);
#endif
@@ -366,7 +366,6 @@ int nvgpu_pmu_reset(struct gk20a *g);
* detected chip,
*
* @param g [in] The GPU driver struct.
* @param nvgpu_pmu [in] The PMU unit.
*
* Initializes PMU unit data struct in the GPU driver based on detected chip.
* Allocate memory for #nvgpu_pmu data struct & set PMU Engine h/w properties,
@@ -375,7 +374,7 @@ int nvgpu_pmu_reset(struct gk20a *g);
*
* @return 0 in case of success, < 0 in case of failure.
*/
int nvgpu_pmu_early_init(struct gk20a *g, struct nvgpu_pmu **pmu_p);
int nvgpu_pmu_early_init(struct gk20a *g);
/**
* @brief PMU remove to free space allocted for PMU unit

View File

@@ -154,7 +154,7 @@ static int test_pmu_early_init(struct unit_module *m,
* allocation failure
*/
nvgpu_posix_enable_fault_injection(kmem_fi, true, 0);
err = nvgpu_pmu_early_init(g, &g->pmu);
err = nvgpu_pmu_early_init(g);
if (err != -ENOMEM) {
unit_return_fail(m,
@@ -165,7 +165,7 @@ static int test_pmu_early_init(struct unit_module *m,
nvgpu_pmu_remove_support(g, g->pmu);
/* Case 2: nvgpu_pmu_early_init() passes */
err = nvgpu_pmu_early_init(g, &g->pmu);
err = nvgpu_pmu_early_init(g);
if (err != 0) {
unit_return_fail(m, "nvgpu_pmu_early_init failed\n");
}
@@ -175,7 +175,7 @@ static int test_pmu_early_init(struct unit_module *m,
/* case 3: */
g->support_ls_pmu = false;
err = nvgpu_pmu_early_init(g, &g->pmu);
err = nvgpu_pmu_early_init(g);
if (err != 0) {
unit_return_fail(m, "support_ls_pmu failed\n");
}
@@ -185,7 +185,7 @@ static int test_pmu_early_init(struct unit_module *m,
/* case 4: */
g->support_ls_pmu = true;
g->ops.pmu.is_pmu_supported = stub_gv11b_is_pmu_supported;
err = nvgpu_pmu_early_init(g, &g->pmu);
err = nvgpu_pmu_early_init(g);
if (g->support_ls_pmu != false || g->can_elpg != false ||
g->elpg_enabled != false || g->aelpg_enabled != false) {
@@ -203,7 +203,7 @@ static int test_pmu_remove_support(struct unit_module *m,
{
int err;
err = nvgpu_pmu_early_init(g, &g->pmu);
err = nvgpu_pmu_early_init(g);
if (err != 0) {
unit_return_fail(m, "support_ls_pmu failed\n");
}
@@ -228,7 +228,7 @@ static int test_pmu_reset(struct unit_module *m,
}
/* initialize PMU */
err = nvgpu_pmu_early_init(g, &g->pmu);
err = nvgpu_pmu_early_init(g);
if (err != 0) {
unit_return_fail(m, "nvgpu_pmu_early_init failed\n");
}