gpu: nvgpu: Remove cyclic dependency PMU<->GR.

-Created & used HAL for dumping gr falcon stats.
-Trimmed the fecs_dump_falcon_stats to re-use code from
 generic falcon debug dump.

JIRA NVGPU-621

Change-Id: Ia008726915112b33f0aca68a48cb98b8ed2c3475
Signed-off-by: Deepak <dgoyal@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1923353
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: Automatic_Commit_Validation_User
GVS: Gerrit_Virtual_Submit
Reviewed-by: svc-misra-checker <svc-misra-checker@nvidia.com>
Reviewed-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Deepak
2018-10-10 13:00:49 +05:30
committed by mobile promotions
parent 435892a784
commit 7e8ca5f5e7
11 changed files with 19 additions and 80 deletions

View File

@@ -52,5 +52,5 @@ void nvgpu_pmu_dump_falcon_stats(struct nvgpu_pmu *pmu)
nvgpu_err(g, "elpg state: %d", pmu->elpg_stat);
/* PMU may crash due to FECS crash. Dump FECS status */
gk20a_fecs_dump_falcon_stats(g);
g->ops.gr.dump_gr_falcon_stats(g);
}

View File

@@ -1409,7 +1409,7 @@ static void gk20a_fifo_handle_chsw_fault(struct gk20a *g)
intr = gk20a_readl(g, fifo_intr_chsw_error_r());
nvgpu_err(g, "chsw: %08x", intr);
gk20a_fecs_dump_falcon_stats(g);
g->ops.gr.dump_gr_falcon_stats(g);
gk20a_writel(g, fifo_intr_chsw_error_r(), intr);
}
@@ -1722,7 +1722,7 @@ static bool gk20a_fifo_handle_mmu_fault_locked(
mmfault_info.access_type, mmfault_info.inst_ptr);
if (ctxsw) {
gk20a_fecs_dump_falcon_stats(g);
g->ops.gr.dump_gr_falcon_stats(g);
nvgpu_err(g, "gr_status_r : 0x%x",
gk20a_readl(g, gr_status_r()));
}

View File

@@ -174,6 +174,11 @@ int gk20a_finalize_poweron(struct gk20a *g)
nvgpu_err(g, "failed to sw init FALCON_ID_GSPLITE");
goto done;
}
err = nvgpu_flcn_sw_init(g, FALCON_ID_FECS);
if (err != 0) {
nvgpu_err(g, "failed to sw init FALCON_ID_FECS");
goto done;
}
if (g->ops.acr.acr_sw_init != NULL &&
nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) {

View File

@@ -125,86 +125,13 @@ void gk20a_fecs_dump_falcon_stats(struct gk20a *g)
{
unsigned int i;
nvgpu_err(g, "gr_fecs_os_r : %d",
gk20a_readl(g, gr_fecs_os_r()));
nvgpu_err(g, "gr_fecs_cpuctl_r : 0x%x",
gk20a_readl(g, gr_fecs_cpuctl_r()));
nvgpu_err(g, "gr_fecs_idlestate_r : 0x%x",
gk20a_readl(g, gr_fecs_idlestate_r()));
nvgpu_err(g, "gr_fecs_mailbox0_r : 0x%x",
gk20a_readl(g, gr_fecs_mailbox0_r()));
nvgpu_err(g, "gr_fecs_mailbox1_r : 0x%x",
gk20a_readl(g, gr_fecs_mailbox1_r()));
nvgpu_err(g, "gr_fecs_irqstat_r : 0x%x",
gk20a_readl(g, gr_fecs_irqstat_r()));
nvgpu_err(g, "gr_fecs_irqmode_r : 0x%x",
gk20a_readl(g, gr_fecs_irqmode_r()));
nvgpu_err(g, "gr_fecs_irqmask_r : 0x%x",
gk20a_readl(g, gr_fecs_irqmask_r()));
nvgpu_err(g, "gr_fecs_irqdest_r : 0x%x",
gk20a_readl(g, gr_fecs_irqdest_r()));
nvgpu_err(g, "gr_fecs_debug1_r : 0x%x",
gk20a_readl(g, gr_fecs_debug1_r()));
nvgpu_err(g, "gr_fecs_debuginfo_r : 0x%x",
gk20a_readl(g, gr_fecs_debuginfo_r()));
nvgpu_err(g, "gr_fecs_ctxsw_status_1_r : 0x%x",
gk20a_readl(g, gr_fecs_ctxsw_status_1_r()));
nvgpu_flcn_dump_stats(&g->fecs_flcn);
for (i = 0; i < g->ops.gr.fecs_ctxsw_mailbox_size(); i++) {
nvgpu_err(g, "gr_fecs_ctxsw_mailbox_r(%d) : 0x%x",
i, gk20a_readl(g, gr_fecs_ctxsw_mailbox_r(i)));
}
nvgpu_err(g, "gr_fecs_engctl_r : 0x%x",
gk20a_readl(g, gr_fecs_engctl_r()));
nvgpu_err(g, "gr_fecs_curctx_r : 0x%x",
gk20a_readl(g, gr_fecs_curctx_r()));
nvgpu_err(g, "gr_fecs_nxtctx_r : 0x%x",
gk20a_readl(g, gr_fecs_nxtctx_r()));
gk20a_writel(g, gr_fecs_icd_cmd_r(),
gr_fecs_icd_cmd_opc_rreg_f() |
gr_fecs_icd_cmd_idx_f(PMU_FALCON_REG_IMB));
nvgpu_err(g, "FECS_FALCON_REG_IMB : 0x%x",
gk20a_readl(g, gr_fecs_icd_rdata_r()));
gk20a_writel(g, gr_fecs_icd_cmd_r(),
gr_fecs_icd_cmd_opc_rreg_f() |
gr_fecs_icd_cmd_idx_f(PMU_FALCON_REG_DMB));
nvgpu_err(g, "FECS_FALCON_REG_DMB : 0x%x",
gk20a_readl(g, gr_fecs_icd_rdata_r()));
gk20a_writel(g, gr_fecs_icd_cmd_r(),
gr_fecs_icd_cmd_opc_rreg_f() |
gr_fecs_icd_cmd_idx_f(PMU_FALCON_REG_CSW));
nvgpu_err(g, "FECS_FALCON_REG_CSW : 0x%x",
gk20a_readl(g, gr_fecs_icd_rdata_r()));
gk20a_writel(g, gr_fecs_icd_cmd_r(),
gr_fecs_icd_cmd_opc_rreg_f() |
gr_fecs_icd_cmd_idx_f(PMU_FALCON_REG_CTX));
nvgpu_err(g, "FECS_FALCON_REG_CTX : 0x%x",
gk20a_readl(g, gr_fecs_icd_rdata_r()));
gk20a_writel(g, gr_fecs_icd_cmd_r(),
gr_fecs_icd_cmd_opc_rreg_f() |
gr_fecs_icd_cmd_idx_f(PMU_FALCON_REG_EXCI));
nvgpu_err(g, "FECS_FALCON_REG_EXCI : 0x%x",
gk20a_readl(g, gr_fecs_icd_rdata_r()));
for (i = 0; i < 4; i++) {
gk20a_writel(g, gr_fecs_icd_cmd_r(),
gr_fecs_icd_cmd_opc_rreg_f() |
gr_fecs_icd_cmd_idx_f(PMU_FALCON_REG_PC));
nvgpu_err(g, "FECS_FALCON_REG_PC : 0x%x",
gk20a_readl(g, gr_fecs_icd_rdata_r()));
gk20a_writel(g, gr_fecs_icd_cmd_r(),
gr_fecs_icd_cmd_opc_rreg_f() |
gr_fecs_icd_cmd_idx_f(PMU_FALCON_REG_SP));
nvgpu_err(g, "FECS_FALCON_REG_SP : 0x%x",
gk20a_readl(g, gr_fecs_icd_rdata_r()));
}
}
static void gr_gk20a_load_falcon_dmem(struct gk20a *g)
@@ -526,14 +453,14 @@ int gr_gk20a_ctx_wait_ucode(struct gk20a *g, u32 mailbox_id,
nvgpu_err(g,
"timeout waiting on mailbox=%d value=0x%08x",
mailbox_id, reg);
gk20a_fecs_dump_falcon_stats(g);
g->ops.gr.dump_gr_falcon_stats(g);
gk20a_gr_debug_dump(g);
return -1;
} else if (check == WAIT_UCODE_ERROR) {
nvgpu_err(g,
"ucode method failed on mailbox=%d value=0x%08x",
mailbox_id, reg);
gk20a_fecs_dump_falcon_stats(g);
g->ops.gr.dump_gr_falcon_stats(g);
return -1;
}
@@ -5303,7 +5230,7 @@ int gk20a_gr_handle_fecs_error(struct gk20a *g, struct channel_gk20a *ch,
/* currently, recovery is not initiated */
nvgpu_err(g, "fecs watchdog triggered for channel %u, "
"cannot ctxsw anymore !!", isr_data->chid);
gk20a_fecs_dump_falcon_stats(g);
g->ops.gr.dump_gr_falcon_stats(g);
} else {
nvgpu_err(g,
"fecs error interrupt 0x%08x for channel %u",

View File

@@ -330,6 +330,7 @@ static const struct gpu_ops gm20b_ops = {
.get_offset_in_gpccs_segment =
gr_gk20a_get_offset_in_gpccs_segment,
.set_debug_mode = gm20b_gr_set_debug_mode,
.dump_gr_falcon_stats = gk20a_fecs_dump_falcon_stats,
},
.fb = {
.init_hw = gm20b_fb_init_hw,

View File

@@ -406,6 +406,7 @@ static const struct gpu_ops gp106_ops = {
.get_offset_in_gpccs_segment =
gr_gk20a_get_offset_in_gpccs_segment,
.set_debug_mode = gm20b_gr_set_debug_mode,
.dump_gr_falcon_stats = gk20a_fecs_dump_falcon_stats,
},
.fb = {
.init_hw = gm20b_fb_init_hw,

View File

@@ -367,6 +367,7 @@ static const struct gpu_ops gp10b_ops = {
.get_offset_in_gpccs_segment =
gr_gk20a_get_offset_in_gpccs_segment,
.set_debug_mode = gm20b_gr_set_debug_mode,
.dump_gr_falcon_stats = gk20a_fecs_dump_falcon_stats,
},
.fb = {
.init_hw = gm20b_fb_init_hw,

View File

@@ -487,6 +487,7 @@ static const struct gpu_ops gv100_ops = {
.get_offset_in_gpccs_segment =
gr_gk20a_get_offset_in_gpccs_segment,
.set_debug_mode = gm20b_gr_set_debug_mode,
.dump_gr_falcon_stats = gk20a_fecs_dump_falcon_stats,
},
.fb = {
.init_hw = gv11b_fb_init_hw,

View File

@@ -449,6 +449,7 @@ static const struct gpu_ops gv11b_ops = {
.get_offset_in_gpccs_segment =
gr_gk20a_get_offset_in_gpccs_segment,
.set_debug_mode = gm20b_gr_set_debug_mode,
.dump_gr_falcon_stats = gk20a_fecs_dump_falcon_stats,
},
.fb = {
.init_hw = gv11b_fb_init_hw,

View File

@@ -524,6 +524,7 @@ struct gpu_ops {
u32 num_ppcs, u32 reg_list_ppc_count,
u32 *__offset_in_segment);
void (*set_debug_mode)(struct gk20a *g, bool enable);
void (*dump_gr_falcon_stats)(struct gk20a *g);
} gr;
struct {
void (*init_hw)(struct gk20a *g);

View File

@@ -506,6 +506,7 @@ static const struct gpu_ops tu104_ops = {
.get_offset_in_gpccs_segment =
gr_tu104_get_offset_in_gpccs_segment,
.set_debug_mode = gm20b_gr_set_debug_mode,
.dump_gr_falcon_stats = gk20a_fecs_dump_falcon_stats,
},
.fb = {
.init_hw = gv11b_fb_init_hw,