gpu: nvgpu: get PMU NEXT core irqmask

-Add new PMU ops to get NEXT core irq mask
-Add support to handle NEXT core interrupt request.

Bug 200659053

Change-Id: I8b1c9b9d74ed59b4130fea712f970b4a31a8b4fe
Signed-off-by: mkumbar <mkumbar@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2429042
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com>
Reviewed-by: automaticguardword <automaticguardword@nvidia.com>
Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
GVS: Gerrit_Virtual_Submit
This commit is contained in:
mkumbar
2020-10-15 20:14:03 +05:30
committed by Alex Waterman
parent 5deb5d1164
commit 8284832300
7 changed files with 30 additions and 16 deletions

View File

@@ -828,6 +828,7 @@ static const struct gops_pmu gm20b_ops_pmu = {
.pmu_mutex_acquire = gk20a_pmu_mutex_acquire, .pmu_mutex_acquire = gk20a_pmu_mutex_acquire,
.pmu_mutex_release = gk20a_pmu_mutex_release, .pmu_mutex_release = gk20a_pmu_mutex_release,
.pmu_is_interrupted = gk20a_pmu_is_interrupted, .pmu_is_interrupted = gk20a_pmu_is_interrupted,
.get_irqmask = gk20a_pmu_get_irqmask,
.pmu_isr = gk20a_pmu_isr, .pmu_isr = gk20a_pmu_isr,
.pmu_init_perfmon_counter = gk20a_pmu_init_perfmon_counter, .pmu_init_perfmon_counter = gk20a_pmu_init_perfmon_counter,
.pmu_pg_idle_counter_config = gk20a_pmu_pg_idle_counter_config, .pmu_pg_idle_counter_config = gk20a_pmu_pg_idle_counter_config,

View File

@@ -910,6 +910,7 @@ static const struct gops_pmu gp10b_ops_pmu = {
.pmu_mutex_acquire = gk20a_pmu_mutex_acquire, .pmu_mutex_acquire = gk20a_pmu_mutex_acquire,
.pmu_mutex_release = gk20a_pmu_mutex_release, .pmu_mutex_release = gk20a_pmu_mutex_release,
.pmu_is_interrupted = gk20a_pmu_is_interrupted, .pmu_is_interrupted = gk20a_pmu_is_interrupted,
.get_irqmask = gk20a_pmu_get_irqmask,
.pmu_isr = gk20a_pmu_isr, .pmu_isr = gk20a_pmu_isr,
.pmu_init_perfmon_counter = gk20a_pmu_init_perfmon_counter, .pmu_init_perfmon_counter = gk20a_pmu_init_perfmon_counter,
.pmu_pg_idle_counter_config = gk20a_pmu_pg_idle_counter_config, .pmu_pg_idle_counter_config = gk20a_pmu_pg_idle_counter_config,

View File

@@ -1100,6 +1100,7 @@ static const struct gops_pmu gv11b_ops_pmu = {
.validate_mem_integrity = gv11b_pmu_validate_mem_integrity, .validate_mem_integrity = gv11b_pmu_validate_mem_integrity,
.pmu_enable_irq = gv11b_pmu_enable_irq, .pmu_enable_irq = gv11b_pmu_enable_irq,
.get_irqdest = gv11b_pmu_get_irqdest, .get_irqdest = gv11b_pmu_get_irqdest,
.get_irqmask = gk20a_pmu_get_irqmask,
.pmu_isr = gk20a_pmu_isr, .pmu_isr = gk20a_pmu_isr,
.handle_ext_irq = gv11b_pmu_handle_ext_irq, .handle_ext_irq = gv11b_pmu_handle_ext_irq,
#ifdef CONFIG_NVGPU_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU

View File

@@ -1140,6 +1140,7 @@ static const struct gops_pmu tu104_ops_pmu = {
.pmu_get_queue_head = tu104_pmu_queue_head_r, .pmu_get_queue_head = tu104_pmu_queue_head_r,
.pmu_mutex_release = gk20a_pmu_mutex_release, .pmu_mutex_release = gk20a_pmu_mutex_release,
.pmu_is_interrupted = gk20a_pmu_is_interrupted, .pmu_is_interrupted = gk20a_pmu_is_interrupted,
.get_irqmask = gk20a_pmu_get_irqmask,
.pmu_isr = gk20a_pmu_isr, .pmu_isr = gk20a_pmu_isr,
.pmu_init_perfmon_counter = gk20a_pmu_init_perfmon_counter, .pmu_init_perfmon_counter = gk20a_pmu_init_perfmon_counter,
.pmu_pg_idle_counter_config = gk20a_pmu_pg_idle_counter_config, .pmu_pg_idle_counter_config = gk20a_pmu_pg_idle_counter_config,

View File

@@ -33,6 +33,7 @@ struct pmu_mutexes;
#define PMU_MODE_MISMATCH_STATUS_VAL 0xDEADDEADU #define PMU_MODE_MISMATCH_STATUS_VAL 0xDEADDEADU
void gk20a_pmu_isr(struct gk20a *g); void gk20a_pmu_isr(struct gk20a *g);
u32 gk20a_pmu_get_irqmask(struct gk20a *g);
#ifdef CONFIG_NVGPU_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
void gk20a_pmu_dump_falcon_stats(struct nvgpu_pmu *pmu); void gk20a_pmu_dump_falcon_stats(struct nvgpu_pmu *pmu);

View File

@@ -28,33 +28,41 @@
#include "pmu_gk20a.h" #include "pmu_gk20a.h"
void gk20a_pmu_isr(struct gk20a *g) u32 gk20a_pmu_get_irqmask(struct gk20a *g)
{ {
struct nvgpu_pmu *pmu = g->pmu; u32 mask = 0U;
u32 intr, mask;
nvgpu_log_fn(g, " ");
nvgpu_mutex_acquire(&pmu->isr_mutex);
if (!pmu->isr_enabled) {
nvgpu_mutex_release(&pmu->isr_mutex);
return;
}
mask = nvgpu_readl(g, pwr_falcon_irqmask_r()); mask = nvgpu_readl(g, pwr_falcon_irqmask_r());
mask &= nvgpu_readl(g, pwr_falcon_irqdest_r()); mask &= nvgpu_readl(g, pwr_falcon_irqdest_r());
return mask;
}
void gk20a_pmu_isr(struct gk20a *g)
{
struct nvgpu_pmu *pmu = g->pmu;
u32 intr = 0U;
u32 mask = 0U;
nvgpu_log_fn(g, " ");
intr = nvgpu_readl(g, pwr_falcon_irqstat_r()); intr = nvgpu_readl(g, pwr_falcon_irqstat_r());
mask = g->ops.pmu.get_irqmask(g);
nvgpu_pmu_dbg(g, "received PMU interrupt: stat:0x%08x mask:0x%08x",
intr, mask);
nvgpu_pmu_dbg(g, "received falcon interrupt: 0x%08x", intr); nvgpu_mutex_acquire(&pmu->isr_mutex);
if (!pmu->isr_enabled || !(intr & mask)) {
intr = nvgpu_readl(g, pwr_falcon_irqstat_r()) & mask; nvgpu_log_info(g,
"clearing unhandled interrupt: stat:0x%08x mask:0x%08x",
if (intr == 0U) { intr, mask);
nvgpu_writel(g, pwr_falcon_irqsclr_r(), intr);
nvgpu_mutex_release(&pmu->isr_mutex); nvgpu_mutex_release(&pmu->isr_mutex);
return; return;
} }
intr = intr & mask;
if (g->ops.pmu.handle_ext_irq != NULL) { if (g->ops.pmu.handle_ext_irq != NULL) {
g->ops.pmu.handle_ext_irq(g, intr); g->ops.pmu.handle_ext_irq(g, intr);
} }

View File

@@ -311,6 +311,7 @@ struct gops_pmu {
void (*pmu_enable_irq)(struct nvgpu_pmu *pmu, bool enable); void (*pmu_enable_irq)(struct nvgpu_pmu *pmu, bool enable);
u32 (*get_irqdest)(struct gk20a *g); u32 (*get_irqdest)(struct gk20a *g);
u32 (*get_irqmask)(struct gk20a *g);
#ifdef CONFIG_NVGPU_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
u32 (*get_inst_block_config)(struct gk20a *g); u32 (*get_inst_block_config)(struct gk20a *g);