gpu: nvgpu: add pmu hals to resolve mismatch

Add the following HALs to avoid the duplication
of code for future chips:
- set_mailbox1
- get_ecc_address
- get_ecc_status
- set_ecc_status

JIRA NVGPU-9758

Change-Id: I54ce3dfaae2873dbcd88edabbd877eca9f3d1fdb
Signed-off-by: Divya <dsinghatwari@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2898016
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Divya
2023-05-03 15:36:08 +00:00
committed by mobile promotions
parent 24a533c9dc
commit ef1fb41e54
12 changed files with 54 additions and 15 deletions

View File

@@ -1359,6 +1359,7 @@ static const struct gops_pmu ga100_ops_pmu = {
.pmu_get_queue_head = tu104_pmu_queue_head_r, .pmu_get_queue_head = tu104_pmu_queue_head_r,
.pmu_mutex_release = gk20a_pmu_mutex_release, .pmu_mutex_release = gk20a_pmu_mutex_release,
.pmu_is_interrupted = gk20a_pmu_is_interrupted, .pmu_is_interrupted = gk20a_pmu_is_interrupted,
.set_mailbox1 = gk20a_pmu_set_mailbox1,
.get_irqstat = gk20a_pmu_get_irqstat, .get_irqstat = gk20a_pmu_get_irqstat,
.set_irqsclr = gk20a_pmu_set_irqsclr, .set_irqsclr = gk20a_pmu_set_irqsclr,
.set_irqsset = gk20a_pmu_set_irqsset, .set_irqsset = gk20a_pmu_set_irqsset,

View File

@@ -1414,6 +1414,10 @@ static const struct gops_pmu ga10b_ops_pmu = {
.get_irqmask = ga10b_pmu_get_irqmask, .get_irqmask = ga10b_pmu_get_irqmask,
.pmu_isr = gk20a_pmu_isr, .pmu_isr = gk20a_pmu_isr,
.handle_ext_irq = ga10b_pmu_handle_ext_irq, .handle_ext_irq = ga10b_pmu_handle_ext_irq,
.set_mailbox1 = gk20a_pmu_set_mailbox1,
.get_ecc_address = gv11b_pmu_get_ecc_address,
.get_ecc_status = gv11b_pmu_get_ecc_status,
.set_ecc_status = gv11b_pmu_set_ecc_status,
.get_irqstat = gk20a_pmu_get_irqstat, .get_irqstat = gk20a_pmu_get_irqstat,
.set_irqsclr = gk20a_pmu_set_irqsclr, .set_irqsclr = gk20a_pmu_set_irqsclr,
.set_irqsset = gk20a_pmu_set_irqsset, .set_irqsset = gk20a_pmu_set_irqsset,

View File

@@ -860,6 +860,7 @@ static const struct gops_pmu gm20b_ops_pmu = {
.pmu_mutex_release = gk20a_pmu_mutex_release, .pmu_mutex_release = gk20a_pmu_mutex_release,
.pmu_is_interrupted = gk20a_pmu_is_interrupted, .pmu_is_interrupted = gk20a_pmu_is_interrupted,
.get_irqmask = gk20a_pmu_get_irqmask, .get_irqmask = gk20a_pmu_get_irqmask,
.set_mailbox1 = gk20a_pmu_set_mailbox1,
.get_irqstat = gk20a_pmu_get_irqstat, .get_irqstat = gk20a_pmu_get_irqstat,
.set_irqsclr = gk20a_pmu_set_irqsclr, .set_irqsclr = gk20a_pmu_set_irqsclr,
.set_irqsset = gk20a_pmu_set_irqsset, .set_irqsset = gk20a_pmu_set_irqsset,

View File

@@ -1173,6 +1173,10 @@ static const struct gops_pmu gv11b_ops_pmu = {
.pmu_enable_irq = gv11b_pmu_enable_irq, .pmu_enable_irq = gv11b_pmu_enable_irq,
.get_irqdest = gv11b_pmu_get_irqdest, .get_irqdest = gv11b_pmu_get_irqdest,
.get_irqmask = gk20a_pmu_get_irqmask, .get_irqmask = gk20a_pmu_get_irqmask,
.set_mailbox1 = gk20a_pmu_set_mailbox1,
.get_ecc_address = gv11b_pmu_get_ecc_address,
.get_ecc_status = gv11b_pmu_get_ecc_status,
.set_ecc_status = gv11b_pmu_set_ecc_status,
.get_irqstat = gk20a_pmu_get_irqstat, .get_irqstat = gk20a_pmu_get_irqstat,
.set_irqsclr = gk20a_pmu_set_irqsclr, .set_irqsclr = gk20a_pmu_set_irqsclr,
.set_irqsset = gk20a_pmu_set_irqsset, .set_irqsset = gk20a_pmu_set_irqsset,

View File

@@ -1236,6 +1236,10 @@ static const struct gops_pmu tu104_ops_pmu = {
.pmu_mutex_release = gk20a_pmu_mutex_release, .pmu_mutex_release = gk20a_pmu_mutex_release,
.pmu_is_interrupted = gk20a_pmu_is_interrupted, .pmu_is_interrupted = gk20a_pmu_is_interrupted,
.get_irqmask = gk20a_pmu_get_irqmask, .get_irqmask = gk20a_pmu_get_irqmask,
.set_mailbox1 = gk20a_pmu_set_mailbox1,
.get_ecc_address = gv11b_pmu_get_ecc_address,
.get_ecc_status = gv11b_pmu_get_ecc_status,
.set_ecc_status = gv11b_pmu_set_ecc_status,
.get_irqstat = gk20a_pmu_get_irqstat, .get_irqstat = gk20a_pmu_get_irqstat,
.set_irqsclr = gk20a_pmu_set_irqsclr, .set_irqsclr = gk20a_pmu_set_irqsclr,
.set_irqsset = gk20a_pmu_set_irqsset, .set_irqsset = gk20a_pmu_set_irqsset,

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -437,7 +437,7 @@ static int ga10b_pmu_handle_ecc(struct gk20a *g)
int ret = 0; int ret = 0;
u32 ecc_status = 0; u32 ecc_status = 0;
ecc_status = nvgpu_readl(g, pwr_pmu_falcon_ecc_status_r()); ecc_status = g->ops.pmu.get_ecc_status(g);
if ((ecc_status & if ((ecc_status &
pwr_pmu_falcon_ecc_status_uncorrected_err_imem_m()) != 0U) { pwr_pmu_falcon_ecc_status_uncorrected_err_imem_m()) != 0U) {
@@ -481,7 +481,7 @@ static int ga10b_pmu_handle_ecc(struct gk20a *g)
if (ret != 0) { if (ret != 0) {
nvgpu_err(g, "ecc_addr(0x%x)", nvgpu_err(g, "ecc_addr(0x%x)",
nvgpu_readl(g, pwr_pmu_falcon_ecc_address_r())); g->ops.pmu.get_ecc_address(g));
} }
return ret; return ret;

View File

@@ -34,6 +34,7 @@ struct pmu_mutexes;
void gk20a_pmu_isr(struct gk20a *g); void gk20a_pmu_isr(struct gk20a *g);
u32 gk20a_pmu_get_irqmask(struct gk20a *g); u32 gk20a_pmu_get_irqmask(struct gk20a *g);
void gk20a_pmu_set_mailbox1(struct gk20a *g, u32 val);
u32 gk20a_pmu_get_irqstat(struct gk20a *g); u32 gk20a_pmu_get_irqstat(struct gk20a *g);
void gk20a_pmu_set_irqsclr(struct gk20a *g, u32 intr); void gk20a_pmu_set_irqsclr(struct gk20a *g, u32 intr);
void gk20a_pmu_set_irqsset(struct gk20a *g, u32 intr); void gk20a_pmu_set_irqsset(struct gk20a *g, u32 intr);

View File

@@ -38,6 +38,11 @@ u32 gk20a_pmu_get_irqmask(struct gk20a *g)
return mask; return mask;
} }
void gk20a_pmu_set_mailbox1(struct gk20a *g, u32 val)
{
nvgpu_writel(g, pwr_falcon_mailbox1_r(), val);
}
u32 gk20a_pmu_get_irqstat(struct gk20a *g) u32 gk20a_pmu_get_irqstat(struct gk20a *g)
{ {
return nvgpu_readl(g, pwr_falcon_irqstat_r()); return nvgpu_readl(g, pwr_falcon_irqstat_r());

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2015-2022, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2015-2023, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -194,7 +194,7 @@ void gm20b_pmu_flcn_setup_boot_config(struct gk20a *g)
} }
/* Clearing mailbox register used to reflect capabilities */ /* Clearing mailbox register used to reflect capabilities */
gk20a_writel(g, pwr_falcon_mailbox1_r(), 0); g->ops.pmu.set_mailbox1(g, 0);
/* enable the context interface */ /* enable the context interface */
gk20a_writel(g, pwr_falcon_itfen_r(), gk20a_writel(g, pwr_falcon_itfen_r(),

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2023, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -38,6 +38,9 @@ void gv11b_pmu_engine_reset(struct gk20a *g, bool do_reset);
u32 gv11b_pmu_falcon_base_addr(void); u32 gv11b_pmu_falcon_base_addr(void);
bool gv11b_is_pmu_supported(struct gk20a *g); bool gv11b_is_pmu_supported(struct gk20a *g);
void gv11b_pmu_handle_ext_irq(struct gk20a *g, u32 intr0); void gv11b_pmu_handle_ext_irq(struct gk20a *g, u32 intr0);
u32 gv11b_pmu_get_ecc_address(struct gk20a *g);
u32 gv11b_pmu_get_ecc_status(struct gk20a *g);
void gv11b_pmu_set_ecc_status(struct gk20a *g, u32 val);
#ifdef CONFIG_NVGPU_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
int gv11b_pmu_bootstrap(struct gk20a *g, struct nvgpu_pmu *pmu, int gv11b_pmu_bootstrap(struct gk20a *g, struct nvgpu_pmu *pmu,

View File

@@ -81,6 +81,21 @@ static u32 pmu_bar0_hosterr_etype(u32 val)
PMU_BAR0_WRITE_HOSTERR : PMU_BAR0_READ_HOSTERR; PMU_BAR0_WRITE_HOSTERR : PMU_BAR0_READ_HOSTERR;
} }
u32 gv11b_pmu_get_ecc_address(struct gk20a *g)
{
return nvgpu_readl(g, pwr_pmu_falcon_ecc_address_r());
}
u32 gv11b_pmu_get_ecc_status(struct gk20a *g)
{
return nvgpu_readl(g, pwr_pmu_falcon_ecc_status_r());
}
void gv11b_pmu_set_ecc_status(struct gk20a *g, u32 val)
{
nvgpu_writel(g, pwr_pmu_falcon_ecc_status_r(), val);
}
int gv11b_pmu_bar0_error_status(struct gk20a *g, u32 *bar0_status, int gv11b_pmu_bar0_error_status(struct gk20a *g, u32 *bar0_status,
u32 *etype) u32 *etype)
{ {
@@ -168,8 +183,8 @@ bool gv11b_pmu_validate_mem_integrity(struct gk20a *g)
{ {
u32 ecc_status, ecc_addr; u32 ecc_status, ecc_addr;
ecc_status = nvgpu_readl(g, pwr_pmu_falcon_ecc_status_r()); ecc_status = g->ops.pmu.get_ecc_status(g);
ecc_addr = nvgpu_readl(g, pwr_pmu_falcon_ecc_address_r()); ecc_addr = g->ops.pmu.get_ecc_address(g);
return ((gv11b_pmu_correct_ecc(g, ecc_status, ecc_addr) == 0) ? true : return ((gv11b_pmu_correct_ecc(g, ecc_status, ecc_addr) == 0) ? true :
false); false);
@@ -195,7 +210,7 @@ void gv11b_pmu_flcn_setup_boot_config(struct gk20a *g)
} }
/* Clearing mailbox register used to reflect capabilities */ /* Clearing mailbox register used to reflect capabilities */
nvgpu_writel(g, pwr_falcon_mailbox1_r(), PWR_FALCON_MAILBOX1_DATA_INIT); g->ops.pmu.set_mailbox1(g, PWR_FALCON_MAILBOX1_DATA_INIT);
/* enable the context interface */ /* enable the context interface */
nvgpu_writel(g, pwr_falcon_itfen_r(), nvgpu_writel(g, pwr_falcon_itfen_r(),
@@ -342,10 +357,8 @@ static void gv11b_pmu_handle_ecc_irq(struct gk20a *g)
return; return;
} }
ecc_status = nvgpu_readl(g, ecc_status = g->ops.pmu.get_ecc_status(g);
pwr_pmu_falcon_ecc_status_r()); ecc_addr = g->ops.pmu.get_ecc_address(g);
ecc_addr = nvgpu_readl(g,
pwr_pmu_falcon_ecc_address_r());
corrected_cnt = nvgpu_readl(g, corrected_cnt = nvgpu_readl(g,
pwr_pmu_falcon_ecc_corrected_err_count_r()); pwr_pmu_falcon_ecc_corrected_err_count_r());
uncorrected_cnt = nvgpu_readl(g, uncorrected_cnt = nvgpu_readl(g,
@@ -374,8 +387,7 @@ static void gv11b_pmu_handle_ecc_irq(struct gk20a *g)
pwr_pmu_falcon_ecc_uncorrected_err_count_r(), 0); pwr_pmu_falcon_ecc_uncorrected_err_count_r(), 0);
} }
nvgpu_writel(g, pwr_pmu_falcon_ecc_status_r(), g->ops.pmu.set_ecc_status(g, pwr_pmu_falcon_ecc_status_reset_task_f());
pwr_pmu_falcon_ecc_status_reset_task_f());
/* update counters per slice */ /* update counters per slice */
if (corrected_overflow != 0U) { if (corrected_overflow != 0U) {

View File

@@ -473,6 +473,10 @@ struct gops_pmu {
/** @cond DOXYGEN_SHOULD_SKIP_THIS */ /** @cond DOXYGEN_SHOULD_SKIP_THIS */
void (*handle_swgen1_irq)(struct gk20a *g, u32 intr); void (*handle_swgen1_irq)(struct gk20a *g, u32 intr);
u32 (*get_irqmask)(struct gk20a *g); u32 (*get_irqmask)(struct gk20a *g);
void (*set_mailbox1)(struct gk20a *g, u32 val);
u32 (*get_ecc_address)(struct gk20a *g);
u32 (*get_ecc_status)(struct gk20a *g);
void (*set_ecc_status)(struct gk20a *g, u32 val);
u32 (*get_irqstat)(struct gk20a *g); u32 (*get_irqstat)(struct gk20a *g);
void (*set_irqsclr)(struct gk20a *g, u32 intr); void (*set_irqsclr)(struct gk20a *g, u32 intr);
void (*set_irqsset)(struct gk20a *g, u32 intr); void (*set_irqsset)(struct gk20a *g, u32 intr);