mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 10:34:43 +03:00
gpu: nvgpu: add pmu hals
Add the following HALs for following PMU registers: - get_irqstat - set_irqsclr - set_irqsset - get_exterrstat - set_exterrstat - get_exterraddr JIRA NVGPU-9758 Change-Id: Ib153d3189ff493fdb726ec2d1e81b863476fc667 Signed-off-by: Divya <dsinghatwari@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2886108 Tested-by: mobile promotions <svcmobile_promotions@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
@@ -1359,6 +1359,12 @@ static const struct gops_pmu ga100_ops_pmu = {
|
||||
.pmu_get_queue_head = tu104_pmu_queue_head_r,
|
||||
.pmu_mutex_release = gk20a_pmu_mutex_release,
|
||||
.pmu_is_interrupted = gk20a_pmu_is_interrupted,
|
||||
.get_irqstat = gk20a_pmu_get_irqstat,
|
||||
.set_irqsclr = gk20a_pmu_set_irqsclr,
|
||||
.set_irqsset = gk20a_pmu_set_irqsset,
|
||||
.get_exterrstat = gk20a_pmu_get_exterrstat,
|
||||
.set_exterrstat = gk20a_pmu_set_exterrstat,
|
||||
.get_exterraddr = gk20a_pmu_get_exterraddr,
|
||||
.pmu_isr = gk20a_pmu_isr,
|
||||
.pmu_init_perfmon_counter = ga10b_pmu_init_perfmon_counter,
|
||||
.pmu_pg_idle_counter_config = gk20a_pmu_pg_idle_counter_config,
|
||||
|
||||
@@ -1413,6 +1413,12 @@ static const struct gops_pmu ga10b_ops_pmu = {
|
||||
.get_irqmask = ga10b_pmu_get_irqmask,
|
||||
.pmu_isr = gk20a_pmu_isr,
|
||||
.handle_ext_irq = ga10b_pmu_handle_ext_irq,
|
||||
.get_irqstat = gk20a_pmu_get_irqstat,
|
||||
.set_irqsclr = gk20a_pmu_set_irqsclr,
|
||||
.set_irqsset = gk20a_pmu_set_irqsset,
|
||||
.get_exterrstat = gk20a_pmu_get_exterrstat,
|
||||
.set_exterrstat = gk20a_pmu_set_exterrstat,
|
||||
.get_exterraddr = gk20a_pmu_get_exterraddr,
|
||||
#ifdef CONFIG_NVGPU_LS_PMU
|
||||
.pmu_seq_cleanup = nvgpu_pmu_seq_free_release,
|
||||
.get_inst_block_config = ga10b_pmu_get_inst_block_config,
|
||||
|
||||
@@ -860,6 +860,12 @@ static const struct gops_pmu gm20b_ops_pmu = {
|
||||
.pmu_mutex_release = gk20a_pmu_mutex_release,
|
||||
.pmu_is_interrupted = gk20a_pmu_is_interrupted,
|
||||
.get_irqmask = gk20a_pmu_get_irqmask,
|
||||
.get_irqstat = gk20a_pmu_get_irqstat,
|
||||
.set_irqsclr = gk20a_pmu_set_irqsclr,
|
||||
.set_irqsset = gk20a_pmu_set_irqsset,
|
||||
.get_exterrstat = gk20a_pmu_get_exterrstat,
|
||||
.set_exterrstat = gk20a_pmu_set_exterrstat,
|
||||
.get_exterraddr = gk20a_pmu_get_exterraddr,
|
||||
.pmu_isr = gk20a_pmu_isr,
|
||||
.pmu_init_perfmon_counter = gk20a_pmu_init_perfmon_counter,
|
||||
.pmu_pg_idle_counter_config = gk20a_pmu_pg_idle_counter_config,
|
||||
|
||||
@@ -1173,6 +1173,12 @@ static const struct gops_pmu gv11b_ops_pmu = {
|
||||
.pmu_enable_irq = gv11b_pmu_enable_irq,
|
||||
.get_irqdest = gv11b_pmu_get_irqdest,
|
||||
.get_irqmask = gk20a_pmu_get_irqmask,
|
||||
.get_irqstat = gk20a_pmu_get_irqstat,
|
||||
.set_irqsclr = gk20a_pmu_set_irqsclr,
|
||||
.set_irqsset = gk20a_pmu_set_irqsset,
|
||||
.get_exterrstat = gk20a_pmu_get_exterrstat,
|
||||
.set_exterrstat = gk20a_pmu_set_exterrstat,
|
||||
.get_exterraddr = gk20a_pmu_get_exterraddr,
|
||||
.pmu_isr = gk20a_pmu_isr,
|
||||
.handle_ext_irq = gv11b_pmu_handle_ext_irq,
|
||||
#ifdef CONFIG_NVGPU_LS_PMU
|
||||
|
||||
@@ -1236,6 +1236,12 @@ static const struct gops_pmu tu104_ops_pmu = {
|
||||
.pmu_mutex_release = gk20a_pmu_mutex_release,
|
||||
.pmu_is_interrupted = gk20a_pmu_is_interrupted,
|
||||
.get_irqmask = gk20a_pmu_get_irqmask,
|
||||
.get_irqstat = gk20a_pmu_get_irqstat,
|
||||
.set_irqsclr = gk20a_pmu_set_irqsclr,
|
||||
.set_irqsset = gk20a_pmu_set_irqsset,
|
||||
.get_exterrstat = gk20a_pmu_get_exterrstat,
|
||||
.set_exterrstat = gk20a_pmu_set_exterrstat,
|
||||
.get_exterraddr = gk20a_pmu_get_exterraddr,
|
||||
.pmu_isr = gk20a_pmu_isr,
|
||||
.pmu_init_perfmon_counter = gk20a_pmu_init_perfmon_counter,
|
||||
.pmu_pg_idle_counter_config = gk20a_pmu_pg_idle_counter_config,
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2011-2023, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -67,12 +67,12 @@ void gk20a_pmu_dump_falcon_stats(struct nvgpu_pmu *pmu)
|
||||
i = gk20a_readl(g, pwr_pmu_bar0_fecs_error_r());
|
||||
nvgpu_err(g, "pwr_pmu_bar0_fecs_error_r : 0x%x", i);
|
||||
|
||||
i = gk20a_readl(g, pwr_falcon_exterrstat_r());
|
||||
i = g->ops.pmu.get_exterrstat(g);
|
||||
nvgpu_err(g, "pwr_falcon_exterrstat_r : 0x%x", i);
|
||||
if (pwr_falcon_exterrstat_valid_v(i) ==
|
||||
pwr_falcon_exterrstat_valid_true_v()) {
|
||||
nvgpu_err(g, "pwr_falcon_exterraddr_r : 0x%x",
|
||||
gk20a_readl(g, pwr_falcon_exterraddr_r()));
|
||||
g->ops.pmu.get_exterraddr(g));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -497,6 +497,7 @@ void gk20a_pmu_handle_interrupts(struct gk20a *g, u32 intr)
|
||||
struct nvgpu_pmu *pmu = g->pmu;
|
||||
bool recheck = false;
|
||||
int err = 0;
|
||||
u32 reg_val = 0U;
|
||||
|
||||
if ((intr & pwr_falcon_irqstat_halt_true_f()) != 0U) {
|
||||
nvgpu_err(g, "pmu halt intr not implemented");
|
||||
@@ -514,9 +515,10 @@ void gk20a_pmu_handle_interrupts(struct gk20a *g, u32 intr)
|
||||
"pmu exterr intr not implemented. Clearing interrupt.");
|
||||
nvgpu_pmu_dump_falcon_stats(pmu);
|
||||
|
||||
nvgpu_writel(g, pwr_falcon_exterrstat_r(),
|
||||
nvgpu_readl(g, pwr_falcon_exterrstat_r()) &
|
||||
~pwr_falcon_exterrstat_valid_m());
|
||||
reg_val = g->ops.pmu.get_exterrstat(g) &
|
||||
~pwr_falcon_exterrstat_valid_m();
|
||||
|
||||
g->ops.pmu.set_exterrstat(g, reg_val);
|
||||
}
|
||||
|
||||
if (g->ops.pmu.handle_swgen1_irq != NULL) {
|
||||
@@ -535,8 +537,8 @@ void gk20a_pmu_handle_interrupts(struct gk20a *g, u32 intr)
|
||||
if (recheck) {
|
||||
if (!nvgpu_pmu_queue_is_empty(&pmu->queues,
|
||||
PMU_MESSAGE_QUEUE)) {
|
||||
nvgpu_writel(g, pwr_falcon_irqsset_r(),
|
||||
pwr_falcon_irqsset_swgen0_set_f());
|
||||
g->ops.pmu.set_irqsset(g,
|
||||
pwr_falcon_irqsset_swgen0_set_f());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2011-2023, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -34,6 +34,12 @@ struct pmu_mutexes;
|
||||
|
||||
void gk20a_pmu_isr(struct gk20a *g);
|
||||
u32 gk20a_pmu_get_irqmask(struct gk20a *g);
|
||||
u32 gk20a_pmu_get_irqstat(struct gk20a *g);
|
||||
void gk20a_pmu_set_irqsclr(struct gk20a *g, u32 intr);
|
||||
void gk20a_pmu_set_irqsset(struct gk20a *g, u32 intr);
|
||||
u32 gk20a_pmu_get_exterrstat(struct gk20a *g);
|
||||
void gk20a_pmu_set_exterrstat(struct gk20a *g, u32 intr);
|
||||
u32 gk20a_pmu_get_exterraddr(struct gk20a *g);
|
||||
|
||||
#ifdef CONFIG_NVGPU_LS_PMU
|
||||
void gk20a_pmu_dump_falcon_stats(struct nvgpu_pmu *pmu);
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2011-2023, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -38,6 +38,36 @@ u32 gk20a_pmu_get_irqmask(struct gk20a *g)
|
||||
return mask;
|
||||
}
|
||||
|
||||
u32 gk20a_pmu_get_irqstat(struct gk20a *g)
|
||||
{
|
||||
return nvgpu_readl(g, pwr_falcon_irqstat_r());
|
||||
}
|
||||
|
||||
void gk20a_pmu_set_irqsclr(struct gk20a *g, u32 intr)
|
||||
{
|
||||
nvgpu_writel(g, pwr_falcon_irqsclr_r(), intr);
|
||||
}
|
||||
|
||||
void gk20a_pmu_set_irqsset(struct gk20a *g, u32 intr)
|
||||
{
|
||||
nvgpu_writel(g, pwr_falcon_irqsset_r(), intr);
|
||||
}
|
||||
|
||||
u32 gk20a_pmu_get_exterrstat(struct gk20a *g)
|
||||
{
|
||||
return nvgpu_readl(g, pwr_falcon_exterrstat_r());
|
||||
}
|
||||
|
||||
void gk20a_pmu_set_exterrstat(struct gk20a *g, u32 intr)
|
||||
{
|
||||
nvgpu_writel(g, pwr_falcon_exterrstat_r(), intr);
|
||||
}
|
||||
|
||||
u32 gk20a_pmu_get_exterraddr(struct gk20a *g)
|
||||
{
|
||||
return nvgpu_readl(g, pwr_falcon_exterraddr_r());
|
||||
}
|
||||
|
||||
void gk20a_pmu_isr(struct gk20a *g)
|
||||
{
|
||||
struct nvgpu_pmu *pmu = g->pmu;
|
||||
@@ -48,7 +78,7 @@ void gk20a_pmu_isr(struct gk20a *g)
|
||||
|
||||
nvgpu_mutex_acquire(&pmu->isr_mutex);
|
||||
|
||||
intr = nvgpu_readl(g, pwr_falcon_irqstat_r());
|
||||
intr = g->ops.pmu.get_irqstat(g);
|
||||
mask = g->ops.pmu.get_irqmask(g);
|
||||
nvgpu_pmu_dbg(g, "received PMU interrupt: stat:0x%08x mask:0x%08x",
|
||||
intr, mask);
|
||||
@@ -57,7 +87,7 @@ void gk20a_pmu_isr(struct gk20a *g)
|
||||
nvgpu_log_info(g,
|
||||
"clearing unhandled interrupt: stat:0x%08x mask:0x%08x",
|
||||
intr, mask);
|
||||
nvgpu_writel(g, pwr_falcon_irqsclr_r(), intr);
|
||||
g->ops.pmu.set_irqsclr(g, intr);
|
||||
nvgpu_mutex_release(&pmu->isr_mutex);
|
||||
return;
|
||||
}
|
||||
@@ -68,7 +98,7 @@ void gk20a_pmu_isr(struct gk20a *g)
|
||||
g->ops.pmu.handle_ext_irq(g, intr);
|
||||
}
|
||||
|
||||
nvgpu_writel(g, pwr_falcon_irqsclr_r(), intr);
|
||||
g->ops.pmu.set_irqsclr(g, intr);
|
||||
|
||||
#ifdef CONFIG_NVGPU_LS_PMU
|
||||
if (nvgpu_pmu_get_fw_state(g, pmu) == PMU_FW_STATE_OFF) {
|
||||
|
||||
@@ -473,6 +473,12 @@ struct gops_pmu {
|
||||
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
|
||||
void (*handle_swgen1_irq)(struct gk20a *g, u32 intr);
|
||||
u32 (*get_irqmask)(struct gk20a *g);
|
||||
u32 (*get_irqstat)(struct gk20a *g);
|
||||
void (*set_irqsclr)(struct gk20a *g, u32 intr);
|
||||
void (*set_irqsset)(struct gk20a *g, u32 intr);
|
||||
u32 (*get_exterrstat)(struct gk20a *g);
|
||||
void (*set_exterrstat)(struct gk20a *g, u32 intr);
|
||||
u32 (*get_exterraddr)(struct gk20a *g);
|
||||
|
||||
#ifdef CONFIG_NVGPU_LS_PMU
|
||||
u32 (*get_inst_block_config)(struct gk20a *g);
|
||||
|
||||
Reference in New Issue
Block a user