gpu: nvgpu: Falcon controller halt interrupt status clear

- Added nvgpu_flcn_clear_halt_intr_status() to
Wait for halt interrupt status clear by
clear_halt_interrupt_status() HAL within timeout

- Added gk20a_flcn_clear_halt_interrupt_status()
to clear falcon controller halt interrupt status

- Replaced flacon halt interrupt clear with
nvgpu_flcn_clear_halt_intr_status() method

NVGPU JIRA-99

Change-Id: I762a3c01cd1d02028eb6aaa9898a50be94376619
Signed-off-by: Mahantesh Kumbar <mkumbar@nvidia.com>
Reviewed-on: https://git-master/r/1511333
Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
This commit is contained in:
Mahantesh Kumbar
2017-06-30 11:42:17 +05:30
committed by mobile promotions
parent fbeca4a841
commit 2cf964d175
6 changed files with 66 additions and 39 deletions

View File

@@ -122,6 +122,34 @@ int nvgpu_flcn_wait_for_halt(struct nvgpu_falcon *flcn, unsigned int timeout)
return status;
}
int nvgpu_flcn_clear_halt_intr_status(struct nvgpu_falcon *flcn,
unsigned int timeout)
{
struct gk20a *g = flcn->g;
struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops;
struct nvgpu_timeout to;
int status = 0;
if (!flcn_ops->clear_halt_interrupt_status) {
nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ",
flcn->flcn_id);
return -EINVAL;
}
nvgpu_timeout_init(g, &to, timeout, NVGPU_TIMER_CPU_TIMER);
do {
if (flcn_ops->clear_halt_interrupt_status(flcn))
break;
nvgpu_udelay(1);
} while (!nvgpu_timeout_expired(&to));
if (nvgpu_timeout_peek_expired(&to))
status = -EBUSY;
return status;
}
bool nvgpu_flcn_get_idle_status(struct nvgpu_falcon *flcn)
{
struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops;

View File

@@ -38,6 +38,26 @@ static int gk20a_flcn_reset(struct nvgpu_falcon *flcn)
return status;
}
static bool gk20a_flcn_clear_halt_interrupt_status(struct nvgpu_falcon *flcn)
{
struct gk20a *g = flcn->g;
u32 base_addr = flcn->flcn_base;
u32 data = 0;
bool status = false;
gk20a_writel(g, base_addr + falcon_falcon_irqsclr_r(),
gk20a_readl(g, base_addr + falcon_falcon_irqsclr_r()) |
(0x10));
data = gk20a_readl(g, (base_addr + falcon_falcon_irqstat_r()));
if ((data & falcon_falcon_irqstat_halt_true_f()) !=
falcon_falcon_irqstat_halt_true_f())
/*halt irq is clear*/
status = true;
return status;
}
static void gk20a_flcn_set_irq(struct nvgpu_falcon *flcn, bool enable)
{
struct gk20a *g = flcn->g;
@@ -275,6 +295,8 @@ void gk20a_falcon_ops(struct nvgpu_falcon *flcn)
flcn_ops->reset = gk20a_flcn_reset;
flcn_ops->set_irq = gk20a_flcn_set_irq;
flcn_ops->clear_halt_interrupt_status =
gk20a_flcn_clear_halt_interrupt_status;
flcn_ops->is_falcon_cpu_halted = gk20a_is_falcon_cpu_halted;
flcn_ops->is_falcon_idle = gk20a_is_falcon_idle;
flcn_ops->is_falcon_scrubbing_done = gk20a_is_falcon_scrubbing_done;

View File

@@ -154,9 +154,8 @@ static int gm206_bios_devinit(struct gk20a *g)
if (nvgpu_timeout_peek_expired(&timeout))
err = -ETIMEDOUT;
gk20a_writel(g, pwr_falcon_irqsclr_r(),
pwr_falcon_irqstat_halt_true_f());
gk20a_readl(g, pwr_falcon_irqsclr_r());
nvgpu_flcn_clear_halt_intr_status(g->pmu.flcn,
gk20a_get_gr_idle_timeout(g));
out:
gk20a_dbg_fn("done");
@@ -200,9 +199,8 @@ static int gm206_bios_preos(struct gk20a *g)
goto out;
}
gk20a_writel(g, pwr_falcon_irqsclr_r(),
pwr_falcon_irqstat_halt_true_f());
gk20a_readl(g, pwr_falcon_irqsclr_r());
nvgpu_flcn_clear_halt_intr_status(g->pmu.flcn,
gk20a_get_gr_idle_timeout(g));
out:
gk20a_dbg_fn("done");

View File

@@ -1517,23 +1517,11 @@ static int pmu_wait_for_halt(struct gk20a *g, unsigned int timeout_ms)
*/
static int clear_halt_interrupt_status(struct gk20a *g, unsigned int timeout_ms)
{
u32 data = 0;
struct nvgpu_timeout timeout;
struct nvgpu_pmu *pmu = &g->pmu;
int status = 0;
nvgpu_timeout_init(g, &timeout, timeout_ms, NVGPU_TIMER_CPU_TIMER);
if (nvgpu_flcn_clear_halt_intr_status(pmu->flcn, timeout_ms))
status = -EBUSY;
do {
gk20a_writel(g, pwr_falcon_irqsclr_r(),
gk20a_readl(g, pwr_falcon_irqsclr_r()) | (0x10));
data = gk20a_readl(g, (pwr_falcon_irqstat_r()));
if ((data & pwr_falcon_irqstat_halt_true_f()) !=
pwr_falcon_irqstat_halt_true_f())
/*halt irq is clear*/
return 0;
nvgpu_udelay(1);
} while (!nvgpu_timeout_expired(&timeout));
return -ETIMEDOUT;
return status;
}

View File

@@ -34,24 +34,12 @@
int sec2_clear_halt_interrupt_status(struct gk20a *g, unsigned int timeout)
{
u32 data = 0;
struct nvgpu_timeout to;
int status = 0;
nvgpu_timeout_init(g, &to, timeout, NVGPU_TIMER_CPU_TIMER);
do {
gk20a_writel(g, psec_falcon_irqsclr_r(),
gk20a_readl(g, psec_falcon_irqsclr_r()) | (0x10));
data = gk20a_readl(g, psec_falcon_irqstat_r());
if ((data & psec_falcon_irqstat_halt_true_f()) !=
psec_falcon_irqstat_halt_true_f())
/*halt irq is clear*/
break;
nvgpu_udelay(1);
} while (!nvgpu_timeout_expired(&to));
if (nvgpu_flcn_clear_halt_intr_status(&g->sec2_flcn, timeout))
status = -EBUSY;
if (nvgpu_timeout_peek_expired(&to))
return -EBUSY;
return 0;
return status;
}
int sec2_wait_for_halt(struct gk20a *g, unsigned int timeout)

View File

@@ -129,6 +129,7 @@ struct nvgpu_falcon_engine_dependency_ops {
struct nvgpu_falcon_ops {
int (*reset)(struct nvgpu_falcon *flcn);
void (*set_irq)(struct nvgpu_falcon *flcn, bool enable);
bool (*clear_halt_interrupt_status)(struct nvgpu_falcon *flcn);
bool (*is_falcon_cpu_halted)(struct nvgpu_falcon *flcn);
bool (*is_falcon_idle)(struct nvgpu_falcon *flcn);
bool (*is_falcon_scrubbing_done)(struct nvgpu_falcon *flcn);
@@ -167,6 +168,8 @@ struct nvgpu_falcon {
int nvgpu_flcn_wait_idle(struct nvgpu_falcon *flcn);
int nvgpu_flcn_wait_for_halt(struct nvgpu_falcon *flcn, unsigned int timeout);
int nvgpu_flcn_clear_halt_intr_status(struct nvgpu_falcon *flcn,
unsigned int timeout);
int nvgpu_flcn_reset(struct nvgpu_falcon *flcn);
void nvgpu_flcn_set_irq(struct nvgpu_falcon *flcn, bool enable,
u32 intr_mask, u32 intr_dest);