gpu: nvgpu: use nvgpu_flcn_* interfaces

- set nvgpu_flcn_reset() to point to gk20a_pmu_reset()
- set PMU interrupt using nvgpu_flcn_enable_irq()
- replace pmu_idle with nvgpu_flcn_wait_idle()

JIRA NVGPU-57

Change-Id: I50d0310ae78ad266da3c1e662f1598d61ff7abb6
Signed-off-by: Mahantesh Kumbar <mkumbar@nvidia.com>
Reviewed-on: http://git-master/r/1469478
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Mahantesh Kumbar
2017-06-15 22:10:43 +05:30
committed by mobile promotions
parent be04b9b1b5
commit 94cb4b635f
8 changed files with 65 additions and 101 deletions

View File

@@ -107,6 +107,25 @@ static bool gk20a_is_falcon_scrubbing_done(struct nvgpu_falcon *flcn)
return status;
}
static void gk20a_falcon_engine_dependency_ops(struct nvgpu_falcon *flcn)
{
struct nvgpu_falcon_engine_dependency_ops *flcn_eng_dep_ops =
&flcn->flcn_engine_dep_ops;
switch (flcn->flcn_id) {
case FALCON_ID_PMU:
flcn_eng_dep_ops->reset_eng = gk20a_pmu_reset;
break;
default:
/* NULL assignment make sure
* CPU hard reset in gk20a_flcn_reset() gets execute
* if falcon doesn't need specific reset implementation
*/
flcn_eng_dep_ops->reset_eng = NULL;
break;
}
}
static void gk20a_falcon_ops(struct nvgpu_falcon *flcn)
{
struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops;
@@ -116,6 +135,8 @@ static void gk20a_falcon_ops(struct nvgpu_falcon *flcn)
flcn_ops->is_falcon_cpu_halted = gk20a_is_falcon_cpu_halted;
flcn_ops->is_falcon_idle = gk20a_is_falcon_idle;
flcn_ops->is_falcon_scrubbing_done = gk20a_is_falcon_scrubbing_done;
gk20a_falcon_engine_dependency_ops(flcn);
}
static void gk20a_falcon_hal_sw_init(struct nvgpu_falcon *flcn)

View File

@@ -178,6 +178,9 @@ int gk20a_finalize_poweron(struct gk20a *g)
g->gpu_reset_done = true;
}
/* init interface layer support for PMU falcon */
nvgpu_flcn_sw_init(g, FALCON_ID_PMU);
if (g->ops.bios_init)
err = g->ops.bios_init(g);
if (err)
@@ -237,9 +240,6 @@ int gk20a_finalize_poweron(struct gk20a *g)
goto done;
}
/* init interface layer support for PMU falcon */
nvgpu_flcn_sw_init(g, FALCON_ID_PMU);
if (g->ops.pmu.is_pmu_supported(g)) {
if (g->ops.pmu.prepare_ucode)
err = g->ops.pmu.prepare_ucode(g);

View File

@@ -200,38 +200,11 @@ void pmu_copy_to_dmem(struct nvgpu_pmu *pmu,
return;
}
int pmu_idle(struct nvgpu_pmu *pmu)
{
struct gk20a *g = gk20a_from_pmu(pmu);
struct nvgpu_timeout timeout;
u32 idle_stat;
nvgpu_timeout_init(g, &timeout, 2000, NVGPU_TIMER_RETRY_TIMER);
/* wait for pmu idle */
do {
idle_stat = gk20a_readl(g, pwr_falcon_idlestate_r());
if (pwr_falcon_idlestate_falcon_busy_v(idle_stat) == 0 &&
pwr_falcon_idlestate_ext_busy_v(idle_stat) == 0) {
break;
}
if (nvgpu_timeout_expired_msg(&timeout,
"waiting for pmu idle: 0x%08x",
idle_stat))
return -EBUSY;
nvgpu_usleep_range(100, 200);
} while (1);
gk20a_dbg_fn("done");
return 0;
}
void pmu_enable_irq(struct nvgpu_pmu *pmu, bool enable)
{
struct gk20a *g = gk20a_from_pmu(pmu);
u32 intr_mask;
u32 intr_dest;
gk20a_dbg_fn("");
@@ -240,21 +213,11 @@ void pmu_enable_irq(struct nvgpu_pmu *pmu, bool enable)
g->ops.mc.intr_unit_config(g, MC_INTR_UNIT_DISABLE, false,
mc_intr_mask_1_pmu_enabled_f());
gk20a_writel(g, pwr_falcon_irqmclr_r(),
pwr_falcon_irqmclr_gptmr_f(1) |
pwr_falcon_irqmclr_wdtmr_f(1) |
pwr_falcon_irqmclr_mthd_f(1) |
pwr_falcon_irqmclr_ctxsw_f(1) |
pwr_falcon_irqmclr_halt_f(1) |
pwr_falcon_irqmclr_exterr_f(1) |
pwr_falcon_irqmclr_swgen0_f(1) |
pwr_falcon_irqmclr_swgen1_f(1) |
pwr_falcon_irqmclr_ext_f(0xff));
nvgpu_flcn_set_irq(pmu->flcn, false, 0x0, 0x0);
if (enable) {
/* dest 0=falcon, 1=host; level 0=irq0, 1=irq1 */
gk20a_writel(g, pwr_falcon_irqdest_r(),
pwr_falcon_irqdest_host_gptmr_f(0) |
intr_dest = pwr_falcon_irqdest_host_gptmr_f(0) |
pwr_falcon_irqdest_host_wdtmr_f(1) |
pwr_falcon_irqdest_host_mthd_f(0) |
pwr_falcon_irqdest_host_ctxsw_f(0) |
@@ -271,18 +234,19 @@ void pmu_enable_irq(struct nvgpu_pmu *pmu, bool enable)
pwr_falcon_irqdest_target_exterr_f(0) |
pwr_falcon_irqdest_target_swgen0_f(0) |
pwr_falcon_irqdest_target_swgen1_f(0) |
pwr_falcon_irqdest_target_ext_f(0xff));
pwr_falcon_irqdest_target_ext_f(0xff);
/* 0=disable, 1=enable */
gk20a_writel(g, pwr_falcon_irqmset_r(),
pwr_falcon_irqmset_gptmr_f(1) |
intr_mask = pwr_falcon_irqmset_gptmr_f(1) |
pwr_falcon_irqmset_wdtmr_f(1) |
pwr_falcon_irqmset_mthd_f(0) |
pwr_falcon_irqmset_ctxsw_f(0) |
pwr_falcon_irqmset_halt_f(1) |
pwr_falcon_irqmset_exterr_f(1) |
pwr_falcon_irqmset_swgen0_f(1) |
pwr_falcon_irqmset_swgen1_f(1));
pwr_falcon_irqmset_swgen1_f(1);
nvgpu_flcn_set_irq(pmu->flcn, true, intr_mask, intr_dest);
g->ops.mc.intr_unit_config(g, MC_INTR_UNIT_ENABLE, true,
mc_intr_mask_0_pmu_enabled_f());
@@ -295,6 +259,7 @@ int pmu_enable_hw(struct nvgpu_pmu *pmu, bool enable)
{
struct gk20a *g = gk20a_from_pmu(pmu);
struct nvgpu_timeout timeout;
int err = 0;
gk20a_dbg_fn("");
@@ -313,13 +278,9 @@ int pmu_enable_hw(struct nvgpu_pmu *pmu, bool enable)
PMU_MEM_SCRUBBING_TIMEOUT_DEFAULT,
NVGPU_TIMER_RETRY_TIMER);
do {
u32 w = gk20a_readl(g, pwr_falcon_dmactl_r()) &
(pwr_falcon_dmactl_dmem_scrubbing_m() |
pwr_falcon_dmactl_imem_scrubbing_m());
if (!w) {
if (nvgpu_flcn_get_mem_scrubbing_status(pmu->flcn)) {
gk20a_dbg_fn("done");
return 0;
goto exit;
}
nvgpu_udelay(PMU_MEM_SCRUBBING_TIMEOUT_DEFAULT);
} while (!nvgpu_timeout_expired(&timeout));
@@ -327,11 +288,12 @@ int pmu_enable_hw(struct nvgpu_pmu *pmu, bool enable)
g->ops.mc.disable(g, mc_enable_pwr_enabled_f());
nvgpu_err(g, "Falcon mem scrubbing timeout");
return -ETIMEDOUT;
} else {
err = -ETIMEDOUT;
} else
g->ops.mc.disable(g, mc_enable_pwr_enabled_f());
return 0;
}
exit:
return err;
}
static int pmu_enable(struct nvgpu_pmu *pmu, bool enable)
@@ -357,7 +319,7 @@ static int pmu_enable(struct nvgpu_pmu *pmu, bool enable)
/* TBD: post reset */
err = pmu_idle(pmu);
err = nvgpu_flcn_wait_idle(pmu->flcn);
if (err)
return err;
@@ -368,31 +330,6 @@ static int pmu_enable(struct nvgpu_pmu *pmu, bool enable)
return 0;
}
int pmu_reset(struct nvgpu_pmu *pmu)
{
int err;
err = pmu_idle(pmu);
if (err)
return err;
/* TBD: release pmu hw mutex */
err = pmu_enable(pmu, false);
if (err)
return err;
/* TBD: cancel all sequences */
/* TBD: init all sequences and state tables */
/* TBD: restore pre-init message handler */
err = pmu_enable(pmu, true);
if (err)
return err;
return 0;
}
int pmu_bootstrap(struct nvgpu_pmu *pmu)
{
struct gk20a *g = gk20a_from_pmu(pmu);
@@ -704,7 +641,7 @@ static int gk20a_init_pmu_setup_hw1(struct gk20a *g)
gk20a_dbg_fn("");
nvgpu_mutex_acquire(&pmu->isr_mutex);
g->ops.pmu.reset(g);
nvgpu_flcn_reset(pmu->flcn);
pmu->isr_enabled = true;
nvgpu_mutex_release(&pmu->isr_mutex);
@@ -737,11 +674,22 @@ static void gk20a_write_dmatrfbase(struct gk20a *g, u32 addr)
int gk20a_pmu_reset(struct gk20a *g)
{
int err;
struct nvgpu_pmu *pmu = &g->pmu;
int err;
err = pmu_reset(pmu);
err = nvgpu_flcn_wait_idle(pmu->flcn);
if (err)
goto exit;
err = pmu_enable(pmu, false);
if (err)
goto exit;
err = pmu_enable(pmu, true);
if (err)
goto exit;
exit:
return err;
}
@@ -799,7 +747,7 @@ void gk20a_init_pmu_ops(struct gpu_ops *gops)
gops->pmu.alloc_blob_space = NULL;
gops->pmu.pmu_populate_loader_cfg = NULL;
gops->pmu.flcn_populate_bl_dmem_desc = NULL;
gops->pmu.reset = gk20a_pmu_reset;
gops->pmu.reset = NULL;
}
static void pmu_handle_zbc_msg(struct gk20a *g, struct pmu_msg *msg,

View File

@@ -60,7 +60,6 @@ void pmu_copy_to_dmem(struct nvgpu_pmu *pmu,
u32 dst, u8 *src, u32 size, u8 port);
void pmu_copy_from_dmem(struct nvgpu_pmu *pmu,
u32 src, u8 *dst, u32 size, u8 port);
int pmu_reset(struct nvgpu_pmu *pmu);
int pmu_bootstrap(struct nvgpu_pmu *pmu);
void pmu_dump_elpg_stats(struct nvgpu_pmu *pmu);

View File

@@ -1291,7 +1291,7 @@ int gm20b_init_nspmu_setup_hw1(struct gk20a *g)
gk20a_dbg_fn("");
nvgpu_mutex_acquire(&pmu->isr_mutex);
g->ops.pmu.reset(g);
nvgpu_flcn_reset(pmu->flcn);
pmu->isr_enabled = true;
nvgpu_mutex_release(&pmu->isr_mutex);
@@ -1326,7 +1326,7 @@ static int gm20b_init_pmu_setup_hw1(struct gk20a *g,
gk20a_dbg_fn("");
nvgpu_mutex_acquire(&pmu->isr_mutex);
g->ops.pmu.reset(g);
nvgpu_flcn_reset(pmu->flcn);
pmu->isr_enabled = true;
nvgpu_mutex_release(&pmu->isr_mutex);

View File

@@ -310,5 +310,5 @@ void gm20b_init_pmu_ops(struct gpu_ops *gops)
gops->pmu.pmu_pg_param_post_init = NULL;
gops->pmu.send_lrf_tex_ltc_dram_overide_en_dis_cmd = NULL;
gops->pmu.dump_secure_fuses = pmu_dump_security_fuses_gm20b;
gops->pmu.reset = gk20a_pmu_reset;
gops->pmu.reset = NULL;
}

View File

@@ -64,11 +64,7 @@ static int gp106_pmu_enable_hw(struct nvgpu_pmu *pmu, bool enable)
/* wait for Scrubbing to complete */
do {
u32 w = gk20a_readl(g, pwr_falcon_dmactl_r()) &
(pwr_falcon_dmactl_dmem_scrubbing_m() |
pwr_falcon_dmactl_imem_scrubbing_m());
if (!w) {
if (nvgpu_flcn_get_mem_scrubbing_status(pmu->flcn)) {
gk20a_dbg_fn("done");
return 0;
}
@@ -112,7 +108,7 @@ static int pmu_enable(struct nvgpu_pmu *pmu, bool enable)
/* TBD: post reset */
/*idle the PMU and enable interrupts on the Falcon*/
err = pmu_idle(pmu);
err = nvgpu_flcn_wait_idle(pmu->flcn);
if (err)
return err;
nvgpu_udelay(5);
@@ -130,7 +126,7 @@ int gp106_pmu_reset(struct gk20a *g)
gk20a_dbg_fn("");
err = pmu_idle(pmu);
err = nvgpu_flcn_wait_idle(pmu->flcn);
if (err)
return err;

View File

@@ -307,7 +307,7 @@ static int gp10b_init_pmu_setup_hw1(struct gk20a *g)
gk20a_dbg_fn("");
nvgpu_mutex_acquire(&pmu->isr_mutex);
g->ops.pmu.reset(g);
nvgpu_flcn_reset(pmu->flcn);
pmu->isr_enabled = true;
nvgpu_mutex_release(&pmu->isr_mutex);
@@ -430,6 +430,6 @@ void gp10b_init_pmu_ops(struct gpu_ops *gops)
gops->pmu.pmu_lpwr_disable_pg = NULL;
gops->pmu.pmu_pg_param_post_init = NULL;
gops->pmu.send_lrf_tex_ltc_dram_overide_en_dis_cmd = NULL;
gops->pmu.reset = gk20a_pmu_reset;
gops->pmu.reset = NULL;
gops->pmu.dump_secure_fuses = pmu_dump_security_fuses_gp10b;
}