gpu: nvgpu: enable PMU ECC interrupt early

PMU IRQs were not enabled assuming entire functionality for LS PMU.
Debugging early init issues of PMU falcon ECC errors triggered
during nvgpu power-on will be cumbersome if interrupts are not
enabled early. FMEA analysis of the nvgpu init path also
requires this interrupt be enabled earlier.

Hence, Enable the PMU ECC IRQ early during nvgpu_finalize_poweron.
pmu_enable_irq is updated to enable interrupts differently for
safety and non-safety. PMU interrupts disabling is moved out
of nvgpu_pmu_destroy to nvgpu_prepare_poweroff. Prepared new
wrapper API nvgpu_pmu_enable_irq.

PMU ECC init and isr mutex init is moved to the beginning of
nvgpu_pmu_early_init as for safety, ls pmu code path is
disabled. Fixed the pmu_early_init dependent and mc
interrupt related unit tests.

Update the doxygen for changed functions.

JIRA NVGPU-4439

Change-Id: I1a1e792d2ad2cc7a926c8c1456d4d0d6d1f14d1a
Signed-off-by: Sagar Kamble <skamble@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2251732
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Sagar Kamble
2019-11-29 15:18:17 +05:30
committed by Alex Waterman
parent 359fc35fa8
commit fba516ffae
29 changed files with 448 additions and 263 deletions

View File

@@ -88,19 +88,10 @@ int nvgpu_pmu_destroy(struct gk20a *g, struct nvgpu_pmu *pmu)
{
nvgpu_log_fn(g, " ");
if (!g->support_ls_pmu) {
return 0;
}
if (g->can_elpg) {
nvgpu_pmu_pg_destroy(g, pmu, pmu->pg);
}
nvgpu_mutex_acquire(&pmu->isr_mutex);
g->ops.pmu.pmu_enable_irq(pmu, false);
pmu->isr_enabled = false;
nvgpu_mutex_release(&pmu->isr_mutex);
nvgpu_pmu_queues_free(g, &pmu->queues);
nvgpu_pmu_fw_state_change(g, pmu, PMU_FW_STATE_OFF, false);
@@ -158,7 +149,6 @@ static void remove_pmu_support(struct nvgpu_pmu *pmu)
nvgpu_pmu_mutexe_deinit(g, pmu, pmu->mutexes);
nvgpu_pmu_fw_deinit(g, pmu, pmu->fw);
nvgpu_pmu_deinitialize_perfmon(g, pmu);
nvgpu_mutex_destroy(&pmu->isr_mutex);
}
static int pmu_sw_setup(struct gk20a *g, struct nvgpu_pmu *pmu )
@@ -258,12 +248,7 @@ int nvgpu_pmu_rtos_init(struct gk20a *g)
goto exit;
}
if (g->ops.pmu.pmu_enable_irq != NULL) {
nvgpu_mutex_acquire(&g->pmu->isr_mutex);
g->ops.pmu.pmu_enable_irq(g->pmu, true);
g->pmu->isr_enabled = true;
nvgpu_mutex_release(&g->pmu->isr_mutex);
}
nvgpu_pmu_enable_irq(g, true);
/*Once in LS mode, cpuctl_alias is only accessible*/
if (g->ops.pmu.secured_pmu_start != NULL) {
@@ -289,8 +274,6 @@ int nvgpu_pmu_rtos_early_init(struct gk20a *g, struct nvgpu_pmu *pmu)
nvgpu_log_fn(g, " ");
nvgpu_mutex_init(&pmu->isr_mutex);
/* Allocate memory for pmu_perfmon */
err = nvgpu_pmu_initialize_perfmon(g, pmu, &pmu->pmu_perfmon);
if (err != 0) {