gpu: nvgpu: fix the hw header accessors

Various gv11b register accessors are passed as function pointer to
NVGPU_ECC_ERR. pmu logic needs access to head, tail, mutex registers
as function pointers. fix the same.

JIRA NVGPU-3733

Change-Id: I5668fedaac187fab052ee5d68a10f7e2d6d35413
Signed-off-by: Sagar Kamble <skamble@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2150880
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Sagar Kamble
2019-07-09 15:58:09 +05:30
committed by mobile promotions
parent 8a3f7a4496
commit 9bb347edec
16 changed files with 344 additions and 58 deletions

View File

@@ -31,19 +31,49 @@
#include <nvgpu/hw/gv11b/hw_fb_gv11b.h> #include <nvgpu/hw/gv11b/hw_fb_gv11b.h>
static inline u32 l2tlb_ecc_control_r(void)
{
return fb_mmu_l2tlb_ecc_control_r();
}
static inline u32 l2tlb_ecc_control_inject_uncorrected_err_f(u32 v)
{
return fb_mmu_l2tlb_ecc_control_inject_uncorrected_err_f(v);
}
static inline u32 hubtlb_ecc_control_r(void)
{
return fb_mmu_hubtlb_ecc_control_r();
}
static inline u32 hubtlb_ecc_control_inject_uncorrected_err_f(u32 v)
{
return fb_mmu_hubtlb_ecc_control_inject_uncorrected_err_f(v);
}
static inline u32 fillunit_ecc_control_r(void)
{
return fb_mmu_fillunit_ecc_control_r();
}
static inline u32 fillunit_ecc_control_inject_uncorrected_err_f(u32 v)
{
return fb_mmu_fillunit_ecc_control_inject_uncorrected_err_f(v);
}
static struct nvgpu_hw_err_inject_info hubmmu_ecc_err_desc[] = { static struct nvgpu_hw_err_inject_info hubmmu_ecc_err_desc[] = {
NVGPU_ECC_ERR("hubmmu_l2tlb_sa_data_ecc_uncorrected", NVGPU_ECC_ERR("hubmmu_l2tlb_sa_data_ecc_uncorrected",
gv11b_fb_intr_inject_hubmmu_ecc_error, gv11b_fb_intr_inject_hubmmu_ecc_error,
fb_mmu_l2tlb_ecc_control_r, l2tlb_ecc_control_r,
fb_mmu_l2tlb_ecc_control_inject_uncorrected_err_f), l2tlb_ecc_control_inject_uncorrected_err_f),
NVGPU_ECC_ERR("hubmmu_tlb_sa_data_ecc_uncorrected", NVGPU_ECC_ERR("hubmmu_tlb_sa_data_ecc_uncorrected",
gv11b_fb_intr_inject_hubmmu_ecc_error, gv11b_fb_intr_inject_hubmmu_ecc_error,
fb_mmu_hubtlb_ecc_control_r, hubtlb_ecc_control_r,
fb_mmu_hubtlb_ecc_control_inject_uncorrected_err_f), hubtlb_ecc_control_inject_uncorrected_err_f),
NVGPU_ECC_ERR("hubmmu_pte_data_ecc_uncorrected", NVGPU_ECC_ERR("hubmmu_pte_data_ecc_uncorrected",
gv11b_fb_intr_inject_hubmmu_ecc_error, gv11b_fb_intr_inject_hubmmu_ecc_error,
fb_mmu_fillunit_ecc_control_r, fillunit_ecc_control_r,
fb_mmu_fillunit_ecc_control_inject_uncorrected_err_f), fillunit_ecc_control_inject_uncorrected_err_f),
}; };
static struct nvgpu_hw_err_inject_info_desc hubmmu_err_desc; static struct nvgpu_hw_err_inject_info_desc hubmmu_err_desc;

View File

@@ -28,15 +28,30 @@
#include "ecc_gv11b.h" #include "ecc_gv11b.h"
static inline u32 fecs_falcon_ecc_control_r(void)
{
return gr_fecs_falcon_ecc_control_r();
}
static inline u32 fecs_falcon_ecc_control_inject_corrected_err_f(u32 v)
{
return gr_fecs_falcon_ecc_control_inject_corrected_err_f(v);
}
static inline u32 fecs_falcon_ecc_control_inject_uncorrected_err_f(u32 v)
{
return gr_fecs_falcon_ecc_control_inject_uncorrected_err_f(v);
}
static struct nvgpu_hw_err_inject_info fecs_ecc_err_desc[] = { static struct nvgpu_hw_err_inject_info fecs_ecc_err_desc[] = {
NVGPU_ECC_ERR("falcon_imem_ecc_corrected", NVGPU_ECC_ERR("falcon_imem_ecc_corrected",
gv11b_gr_intr_inject_fecs_ecc_error, gv11b_gr_intr_inject_fecs_ecc_error,
gr_fecs_falcon_ecc_control_r, fecs_falcon_ecc_control_r,
gr_fecs_falcon_ecc_control_inject_corrected_err_f), fecs_falcon_ecc_control_inject_corrected_err_f),
NVGPU_ECC_ERR("falcon_imem_ecc_uncorrected", NVGPU_ECC_ERR("falcon_imem_ecc_uncorrected",
gv11b_gr_intr_inject_fecs_ecc_error, gv11b_gr_intr_inject_fecs_ecc_error,
gr_fecs_falcon_ecc_control_r, fecs_falcon_ecc_control_r,
gr_fecs_falcon_ecc_control_inject_uncorrected_err_f), fecs_falcon_ecc_control_inject_uncorrected_err_f),
}; };
static struct nvgpu_hw_err_inject_info_desc fecs_err_desc; static struct nvgpu_hw_err_inject_info_desc fecs_err_desc;
@@ -52,15 +67,30 @@ gv11b_gr_intr_get_fecs_err_desc(struct gk20a *g)
return &fecs_err_desc; return &fecs_err_desc;
} }
static inline u32 gpccs_falcon_ecc_control_r(void)
{
return gr_gpccs_falcon_ecc_control_r();
}
static inline u32 gpccs_falcon_ecc_control_inject_corrected_err_f(u32 v)
{
return gr_gpccs_falcon_ecc_control_inject_corrected_err_f(v);
}
static inline u32 gpccs_falcon_ecc_control_inject_uncorrected_err_f(u32 v)
{
return gr_gpccs_falcon_ecc_control_inject_uncorrected_err_f(v);
}
static struct nvgpu_hw_err_inject_info gpccs_ecc_err_desc[] = { static struct nvgpu_hw_err_inject_info gpccs_ecc_err_desc[] = {
NVGPU_ECC_ERR("falcon_imem_ecc_corrected", NVGPU_ECC_ERR("falcon_imem_ecc_corrected",
gv11b_gr_intr_inject_gpccs_ecc_error, gv11b_gr_intr_inject_gpccs_ecc_error,
gr_gpccs_falcon_ecc_control_r, gpccs_falcon_ecc_control_r,
gr_gpccs_falcon_ecc_control_inject_corrected_err_f), gpccs_falcon_ecc_control_inject_corrected_err_f),
NVGPU_ECC_ERR("falcon_imem_ecc_uncorrected", NVGPU_ECC_ERR("falcon_imem_ecc_uncorrected",
gv11b_gr_intr_inject_gpccs_ecc_error, gv11b_gr_intr_inject_gpccs_ecc_error,
gr_gpccs_falcon_ecc_control_r, gpccs_falcon_ecc_control_r,
gr_gpccs_falcon_ecc_control_inject_uncorrected_err_f), gpccs_falcon_ecc_control_inject_uncorrected_err_f),
}; };
static struct nvgpu_hw_err_inject_info_desc gpccs_err_desc; static struct nvgpu_hw_err_inject_info_desc gpccs_err_desc;
@@ -76,31 +106,106 @@ gv11b_gr_intr_get_gpccs_err_desc(struct gk20a *g)
return &gpccs_err_desc; return &gpccs_err_desc;
} }
static inline u32 pri_gpc0_tpc0_sm_l1_tag_ecc_control_r(void)
{
return gr_pri_gpc0_tpc0_sm_l1_tag_ecc_control_r();
}
static inline u32 pri_gpc0_tpc0_sm_l1_tag_ecc_control_inject_corrected_err_f(u32 v)
{
return gr_pri_gpc0_tpc0_sm_l1_tag_ecc_control_inject_corrected_err_f(v);
}
static inline u32 pri_gpc0_tpc0_sm_l1_tag_ecc_control_inject_uncorrected_err_f(u32 v)
{
return gr_pri_gpc0_tpc0_sm_l1_tag_ecc_control_inject_uncorrected_err_f(v);
}
static inline u32 pri_gpc0_tpc0_sm_cbu_ecc_control_r(void)
{
return gr_pri_gpc0_tpc0_sm_cbu_ecc_control_r();
}
static inline u32 pri_gpc0_tpc0_sm_cbu_ecc_control_inject_uncorrected_err_f(u32 v)
{
return gr_pri_gpc0_tpc0_sm_cbu_ecc_control_inject_uncorrected_err_f(v);
}
static inline u32 pri_gpc0_tpc0_sm_lrf_ecc_control_r(void)
{
return gr_pri_gpc0_tpc0_sm_lrf_ecc_control_r();
}
static inline u32 pri_gpc0_tpc0_sm_lrf_ecc_control_inject_uncorrected_err_f(u32 v)
{
return gr_pri_gpc0_tpc0_sm_lrf_ecc_control_inject_uncorrected_err_f(v);
}
static inline u32 pri_gpc0_tpc0_sm_l1_data_ecc_control_r(void)
{
return gr_pri_gpc0_tpc0_sm_l1_data_ecc_control_r();
}
static inline u32 pri_gpc0_tpc0_sm_l1_data_ecc_control_inject_uncorrected_err_f(u32 v)
{
return gr_pri_gpc0_tpc0_sm_l1_data_ecc_control_inject_uncorrected_err_f(v);
}
static inline u32 pri_gpc0_tpc0_sm_icache_ecc_control_r(void)
{
return gr_pri_gpc0_tpc0_sm_icache_ecc_control_r();
}
static inline u32 pri_gpc0_tpc0_sm_icache_ecc_control_inject_uncorrected_err_f(u32 v)
{
return gr_pri_gpc0_tpc0_sm_icache_ecc_control_inject_uncorrected_err_f(v);
}
static inline u32 pri_gpc0_mmu_l1tlb_ecc_control_r(void)
{
return gr_gpc0_mmu_l1tlb_ecc_control_r();
}
static inline u32 pri_gpc0_mmu_l1tlb_ecc_control_inject_uncorrected_err_f(u32 v)
{
return gr_gpc0_mmu_l1tlb_ecc_control_inject_uncorrected_err_f(v);
}
static inline u32 pri_gpc0_gcc_l15_ecc_control_r(void)
{
return gr_pri_gpc0_gcc_l15_ecc_control_r();
}
static inline u32 pri_gpc0_gcc_l15_ecc_control_inject_uncorrected_err_f(u32 v)
{
return gr_pri_gpc0_gcc_l15_ecc_control_inject_uncorrected_err_f(v);
}
static struct nvgpu_hw_err_inject_info sm_ecc_err_desc[] = { static struct nvgpu_hw_err_inject_info sm_ecc_err_desc[] = {
NVGPU_ECC_ERR("l1_tag_ecc_corrected", NVGPU_ECC_ERR("l1_tag_ecc_corrected",
gv11b_gr_intr_inject_sm_ecc_error, gv11b_gr_intr_inject_sm_ecc_error,
gr_pri_gpc0_tpc0_sm_l1_tag_ecc_control_r, pri_gpc0_tpc0_sm_l1_tag_ecc_control_r,
gr_pri_gpc0_tpc0_sm_l1_tag_ecc_control_inject_corrected_err_f), pri_gpc0_tpc0_sm_l1_tag_ecc_control_inject_corrected_err_f),
NVGPU_ECC_ERR("l1_tag_ecc_uncorrected", NVGPU_ECC_ERR("l1_tag_ecc_uncorrected",
gv11b_gr_intr_inject_sm_ecc_error, gv11b_gr_intr_inject_sm_ecc_error,
gr_pri_gpc0_tpc0_sm_l1_tag_ecc_control_r, pri_gpc0_tpc0_sm_l1_tag_ecc_control_r,
gr_pri_gpc0_tpc0_sm_l1_tag_ecc_control_inject_uncorrected_err_f), pri_gpc0_tpc0_sm_l1_tag_ecc_control_inject_uncorrected_err_f),
NVGPU_ECC_ERR("cbu_ecc_uncorrected", NVGPU_ECC_ERR("cbu_ecc_uncorrected",
gv11b_gr_intr_inject_sm_ecc_error, gv11b_gr_intr_inject_sm_ecc_error,
gr_pri_gpc0_tpc0_sm_cbu_ecc_control_r, pri_gpc0_tpc0_sm_cbu_ecc_control_r,
gr_pri_gpc0_tpc0_sm_cbu_ecc_control_inject_uncorrected_err_f), pri_gpc0_tpc0_sm_cbu_ecc_control_inject_uncorrected_err_f),
NVGPU_ECC_ERR("lrf_ecc_uncorrected", NVGPU_ECC_ERR("lrf_ecc_uncorrected",
gv11b_gr_intr_inject_sm_ecc_error, gv11b_gr_intr_inject_sm_ecc_error,
gr_pri_gpc0_tpc0_sm_lrf_ecc_control_r, pri_gpc0_tpc0_sm_lrf_ecc_control_r,
gr_pri_gpc0_tpc0_sm_lrf_ecc_control_inject_uncorrected_err_f), pri_gpc0_tpc0_sm_lrf_ecc_control_inject_uncorrected_err_f),
NVGPU_ECC_ERR("l1_data_ecc_uncorrected", NVGPU_ECC_ERR("l1_data_ecc_uncorrected",
gv11b_gr_intr_inject_sm_ecc_error, gv11b_gr_intr_inject_sm_ecc_error,
gr_pri_gpc0_tpc0_sm_l1_data_ecc_control_r, pri_gpc0_tpc0_sm_l1_data_ecc_control_r,
gr_pri_gpc0_tpc0_sm_l1_data_ecc_control_inject_uncorrected_err_f), pri_gpc0_tpc0_sm_l1_data_ecc_control_inject_uncorrected_err_f),
NVGPU_ECC_ERR("icache_l0_data_ecc_uncorrected", NVGPU_ECC_ERR("icache_l0_data_ecc_uncorrected",
gv11b_gr_intr_inject_sm_ecc_error, gv11b_gr_intr_inject_sm_ecc_error,
gr_pri_gpc0_tpc0_sm_icache_ecc_control_r, pri_gpc0_tpc0_sm_icache_ecc_control_r,
gr_pri_gpc0_tpc0_sm_icache_ecc_control_inject_uncorrected_err_f), pri_gpc0_tpc0_sm_icache_ecc_control_inject_uncorrected_err_f),
}; };
static struct nvgpu_hw_err_inject_info_desc sm_err_desc; static struct nvgpu_hw_err_inject_info_desc sm_err_desc;
@@ -119,8 +224,8 @@ gv11b_gr_intr_get_sm_err_desc(struct gk20a *g)
static struct nvgpu_hw_err_inject_info mmu_ecc_err_desc[] = { static struct nvgpu_hw_err_inject_info mmu_ecc_err_desc[] = {
NVGPU_ECC_ERR("l1tlb_sa_data_ecc_uncorrected", NVGPU_ECC_ERR("l1tlb_sa_data_ecc_uncorrected",
gv11b_gr_intr_inject_mmu_ecc_error, gv11b_gr_intr_inject_mmu_ecc_error,
gr_gpc0_mmu_l1tlb_ecc_control_r, pri_gpc0_mmu_l1tlb_ecc_control_r,
gr_gpc0_mmu_l1tlb_ecc_control_inject_uncorrected_err_f), pri_gpc0_mmu_l1tlb_ecc_control_inject_uncorrected_err_f),
}; };
static struct nvgpu_hw_err_inject_info_desc mmu_err_desc; static struct nvgpu_hw_err_inject_info_desc mmu_err_desc;
@@ -139,8 +244,8 @@ gv11b_gr_intr_get_mmu_err_desc(struct gk20a *g)
static struct nvgpu_hw_err_inject_info gcc_ecc_err_desc[] = { static struct nvgpu_hw_err_inject_info gcc_ecc_err_desc[] = {
NVGPU_ECC_ERR("l15_ecc_uncorrected", NVGPU_ECC_ERR("l15_ecc_uncorrected",
gv11b_gr_intr_inject_gcc_ecc_error, gv11b_gr_intr_inject_gcc_ecc_error,
gr_pri_gpc0_gcc_l15_ecc_control_r, pri_gpc0_gcc_l15_ecc_control_r,
gr_pri_gpc0_gcc_l15_ecc_control_inject_uncorrected_err_f), pri_gpc0_gcc_l15_ecc_control_inject_uncorrected_err_f),
}; };
static struct nvgpu_hw_err_inject_info_desc gcc_err_desc; static struct nvgpu_hw_err_inject_info_desc gcc_err_desc;

View File

@@ -833,14 +833,14 @@ static const struct gpu_ops gm20b_ops = {
.flcn_setup_boot_config = gm20b_pmu_flcn_setup_boot_config, .flcn_setup_boot_config = gm20b_pmu_flcn_setup_boot_config,
.pmu_enable_irq = gk20a_pmu_enable_irq, .pmu_enable_irq = gk20a_pmu_enable_irq,
.pmu_setup_elpg = gm20b_pmu_setup_elpg, .pmu_setup_elpg = gm20b_pmu_setup_elpg,
.pmu_get_queue_head = pwr_pmu_queue_head_r, .pmu_get_queue_head = gm20b_pmu_queue_head_r,
.pmu_get_queue_head_size = pwr_pmu_queue_head__size_1_v, .pmu_get_queue_head_size = gm20b_pmu_queue_head__size_1_v,
.pmu_get_queue_tail = pwr_pmu_queue_tail_r, .pmu_get_queue_tail = gm20b_pmu_queue_tail_r,
.pmu_get_queue_tail_size = pwr_pmu_queue_tail__size_1_v, .pmu_get_queue_tail_size = gm20b_pmu_queue_tail__size_1_v,
.pmu_queue_head = gk20a_pmu_queue_head, .pmu_queue_head = gk20a_pmu_queue_head,
.pmu_queue_tail = gk20a_pmu_queue_tail, .pmu_queue_tail = gk20a_pmu_queue_tail,
.pmu_msgq_tail = gk20a_pmu_msgq_tail, .pmu_msgq_tail = gk20a_pmu_msgq_tail,
.pmu_mutex_size = pwr_pmu_mutex__size_1_v, .pmu_mutex_size = gm20b_pmu_mutex__size_1_v,
.pmu_mutex_owner = gk20a_pmu_mutex_owner, .pmu_mutex_owner = gk20a_pmu_mutex_owner,
.pmu_mutex_acquire = gk20a_pmu_mutex_acquire, .pmu_mutex_acquire = gk20a_pmu_mutex_acquire,
.pmu_mutex_release = gk20a_pmu_mutex_release, .pmu_mutex_release = gk20a_pmu_mutex_release,

View File

@@ -914,15 +914,15 @@ static const struct gpu_ops gp10b_ops = {
.setup_apertures = gm20b_pmu_setup_apertures, .setup_apertures = gm20b_pmu_setup_apertures,
.secured_pmu_start = gm20b_secured_pmu_start, .secured_pmu_start = gm20b_secured_pmu_start,
.pmu_setup_elpg = gp10b_pmu_setup_elpg, .pmu_setup_elpg = gp10b_pmu_setup_elpg,
.pmu_get_queue_head = pwr_pmu_queue_head_r, .pmu_get_queue_head = gp10b_pmu_queue_head_r,
.pmu_get_queue_head_size = pwr_pmu_queue_head__size_1_v, .pmu_get_queue_head_size = gp10b_pmu_queue_head__size_1_v,
.pmu_get_queue_tail = pwr_pmu_queue_tail_r, .pmu_get_queue_tail = gp10b_pmu_queue_tail_r,
.pmu_get_queue_tail_size = pwr_pmu_queue_tail__size_1_v, .pmu_get_queue_tail_size = gp10b_pmu_queue_tail__size_1_v,
.pmu_reset = nvgpu_pmu_reset, .pmu_reset = nvgpu_pmu_reset,
.pmu_queue_head = gk20a_pmu_queue_head, .pmu_queue_head = gk20a_pmu_queue_head,
.pmu_queue_tail = gk20a_pmu_queue_tail, .pmu_queue_tail = gk20a_pmu_queue_tail,
.pmu_msgq_tail = gk20a_pmu_msgq_tail, .pmu_msgq_tail = gk20a_pmu_msgq_tail,
.pmu_mutex_size = pwr_pmu_mutex__size_1_v, .pmu_mutex_size = gp10b_pmu_mutex__size_1_v,
.pmu_mutex_owner = gk20a_pmu_mutex_owner, .pmu_mutex_owner = gk20a_pmu_mutex_owner,
.pmu_mutex_acquire = gk20a_pmu_mutex_acquire, .pmu_mutex_acquire = gk20a_pmu_mutex_acquire,
.pmu_mutex_release = gk20a_pmu_mutex_release, .pmu_mutex_release = gk20a_pmu_mutex_release,

View File

@@ -1088,15 +1088,15 @@ static const struct gpu_ops gv11b_ops = {
.pmu_is_interrupted = gk20a_pmu_is_interrupted, .pmu_is_interrupted = gk20a_pmu_is_interrupted,
.pmu_isr = gk20a_pmu_isr, .pmu_isr = gk20a_pmu_isr,
/* queue */ /* queue */
.pmu_get_queue_head = pwr_pmu_queue_head_r, .pmu_get_queue_head = gv11b_pmu_queue_head_r,
.pmu_get_queue_head_size = pwr_pmu_queue_head__size_1_v, .pmu_get_queue_head_size = gv11b_pmu_queue_head__size_1_v,
.pmu_get_queue_tail = pwr_pmu_queue_tail_r, .pmu_get_queue_tail = gv11b_pmu_queue_tail_r,
.pmu_get_queue_tail_size = pwr_pmu_queue_tail__size_1_v, .pmu_get_queue_tail_size = gv11b_pmu_queue_tail__size_1_v,
.pmu_queue_head = gk20a_pmu_queue_head, .pmu_queue_head = gk20a_pmu_queue_head,
.pmu_queue_tail = gk20a_pmu_queue_tail, .pmu_queue_tail = gk20a_pmu_queue_tail,
.pmu_msgq_tail = gk20a_pmu_msgq_tail, .pmu_msgq_tail = gk20a_pmu_msgq_tail,
/* mutex */ /* mutex */
.pmu_mutex_size = pwr_pmu_mutex__size_1_v, .pmu_mutex_size = gv11b_pmu_mutex__size_1_v,
.pmu_mutex_owner = gk20a_pmu_mutex_owner, .pmu_mutex_owner = gk20a_pmu_mutex_owner,
.pmu_mutex_acquire = gk20a_pmu_mutex_acquire, .pmu_mutex_acquire = gk20a_pmu_mutex_acquire,
.pmu_mutex_release = gk20a_pmu_mutex_release, .pmu_mutex_release = gk20a_pmu_mutex_release,

View File

@@ -1109,7 +1109,7 @@ static const struct gpu_ops tu104_ops = {
.pmu = { .pmu = {
.falcon_base_addr = gp106_pmu_falcon_base_addr, .falcon_base_addr = gp106_pmu_falcon_base_addr,
.pmu_queue_tail = gk20a_pmu_queue_tail, .pmu_queue_tail = gk20a_pmu_queue_tail,
.pmu_get_queue_head = pwr_pmu_queue_head_r, .pmu_get_queue_head = tu104_pmu_queue_head_r,
.pmu_mutex_release = gk20a_pmu_mutex_release, .pmu_mutex_release = gk20a_pmu_mutex_release,
.pmu_is_interrupted = gk20a_pmu_is_interrupted, .pmu_is_interrupted = gk20a_pmu_is_interrupted,
.pmu_isr = gk20a_pmu_isr, .pmu_isr = gk20a_pmu_isr,
@@ -1127,15 +1127,15 @@ static const struct gpu_ops tu104_ops = {
.pmu_mutex_owner = gk20a_pmu_mutex_owner, .pmu_mutex_owner = gk20a_pmu_mutex_owner,
.pmu_mutex_acquire = gk20a_pmu_mutex_acquire, .pmu_mutex_acquire = gk20a_pmu_mutex_acquire,
.pmu_msgq_tail = gk20a_pmu_msgq_tail, .pmu_msgq_tail = gk20a_pmu_msgq_tail,
.pmu_get_queue_head_size = pwr_pmu_queue_head__size_1_v, .pmu_get_queue_head_size = tu104_pmu_queue_head__size_1_v,
.pmu_reset = nvgpu_pmu_reset, .pmu_reset = nvgpu_pmu_reset,
.pmu_queue_head = gk20a_pmu_queue_head, .pmu_queue_head = gk20a_pmu_queue_head,
.pmu_get_queue_tail_size = pwr_pmu_queue_tail__size_1_v, .pmu_get_queue_tail_size = tu104_pmu_queue_tail__size_1_v,
.reset_engine = gp106_pmu_engine_reset, .reset_engine = gp106_pmu_engine_reset,
.write_dmatrfbase = gp10b_write_dmatrfbase, .write_dmatrfbase = gp10b_write_dmatrfbase,
.pmu_mutex_size = pwr_pmu_mutex__size_1_v, .pmu_mutex_size = tu104_pmu_mutex__size_1_v,
.is_engine_in_reset = gp106_pmu_is_engine_in_reset, .is_engine_in_reset = gp106_pmu_is_engine_in_reset,
.pmu_get_queue_tail = pwr_pmu_queue_tail_r, .pmu_get_queue_tail = tu104_pmu_queue_tail_r,
.get_irqdest = gk20a_pmu_get_irqdest, .get_irqdest = gk20a_pmu_get_irqdest,
.handle_ext_irq = gv11b_pmu_handle_ext_irq, .handle_ext_irq = gv11b_pmu_handle_ext_irq,
.is_debug_mode_enabled = gm20b_pmu_is_debug_mode_en, .is_debug_mode_enabled = gm20b_pmu_is_debug_mode_en,

View File

@@ -34,15 +34,30 @@
#include <nvgpu/utils.h> #include <nvgpu/utils.h>
static inline u32 ltc0_lts0_l1_cache_ecc_control_r(void)
{
return ltc_ltc0_lts0_l1_cache_ecc_control_r();
}
static inline u32 ltc0_lts0_l1_cache_ecc_control_inject_corrected_err_f(u32 v)
{
return ltc_ltc0_lts0_l1_cache_ecc_control_inject_corrected_err_f(v);
}
static inline u32 ltc0_lts0_l1_cache_ecc_control_inject_uncorrected_err_f(u32 v)
{
return ltc_ltc0_lts0_l1_cache_ecc_control_inject_uncorrected_err_f(v);
}
static struct nvgpu_hw_err_inject_info ltc_ecc_err_desc[] = { static struct nvgpu_hw_err_inject_info ltc_ecc_err_desc[] = {
NVGPU_ECC_ERR("cache_rstg_ecc_corrected", NVGPU_ECC_ERR("cache_rstg_ecc_corrected",
gv11b_ltc_inject_ecc_error, gv11b_ltc_inject_ecc_error,
ltc_ltc0_lts0_l1_cache_ecc_control_r, ltc0_lts0_l1_cache_ecc_control_r,
ltc_ltc0_lts0_l1_cache_ecc_control_inject_corrected_err_f), ltc0_lts0_l1_cache_ecc_control_inject_corrected_err_f),
NVGPU_ECC_ERR("cache_rstg_ecc_uncorrected", NVGPU_ECC_ERR("cache_rstg_ecc_uncorrected",
gv11b_ltc_inject_ecc_error, gv11b_ltc_inject_ecc_error,
ltc_ltc0_lts0_l1_cache_ecc_control_r, ltc0_lts0_l1_cache_ecc_control_r,
ltc_ltc0_lts0_l1_cache_ecc_control_inject_uncorrected_err_f), ltc0_lts0_l1_cache_ecc_control_inject_uncorrected_err_f),
}; };
static struct nvgpu_hw_err_inject_info_desc ltc_err_desc; static struct nvgpu_hw_err_inject_info_desc ltc_err_desc;

View File

@@ -233,3 +233,28 @@ void gm20b_clear_pmu_bar0_host_err_status(struct gk20a *g)
status = gk20a_readl(g, pwr_pmu_bar0_host_error_r()); status = gk20a_readl(g, pwr_pmu_bar0_host_error_r());
gk20a_writel(g, pwr_pmu_bar0_host_error_r(), status); gk20a_writel(g, pwr_pmu_bar0_host_error_r(), status);
} }
u32 gm20b_pmu_queue_head_r(u32 i)
{
return pwr_pmu_queue_head_r(i);
}
u32 gm20b_pmu_queue_head__size_1_v(void)
{
return pwr_pmu_queue_head__size_1_v();
}
u32 gm20b_pmu_queue_tail_r(u32 i)
{
return pwr_pmu_queue_tail_r(i);
}
u32 gm20b_pmu_queue_tail__size_1_v(void)
{
return pwr_pmu_queue_tail__size_1_v();
}
u32 gm20b_pmu_mutex__size_1_v(void)
{
return pwr_pmu_mutex__size_1_v();
}

View File

@@ -37,5 +37,10 @@ void gm20b_pmu_flcn_setup_boot_config(struct gk20a *g);
void gm20b_secured_pmu_start(struct gk20a *g); void gm20b_secured_pmu_start(struct gk20a *g);
bool gm20b_is_pmu_supported(struct gk20a *g); bool gm20b_is_pmu_supported(struct gk20a *g);
void gm20b_clear_pmu_bar0_host_err_status(struct gk20a *g); void gm20b_clear_pmu_bar0_host_err_status(struct gk20a *g);
u32 gm20b_pmu_queue_head_r(u32 i);
u32 gm20b_pmu_queue_head__size_1_v(void);
u32 gm20b_pmu_queue_tail_r(u32 i);
u32 gm20b_pmu_queue_tail__size_1_v(void);
u32 gm20b_pmu_mutex__size_1_v(void);
#endif /* PMU_GM20B_H */ #endif /* PMU_GM20B_H */

View File

@@ -163,3 +163,28 @@ bool gp10b_is_pmu_supported(struct gk20a *g)
{ {
return true; return true;
} }
u32 gp10b_pmu_queue_head_r(u32 i)
{
return pwr_pmu_queue_head_r(i);
}
u32 gp10b_pmu_queue_head__size_1_v(void)
{
return pwr_pmu_queue_head__size_1_v();
}
u32 gp10b_pmu_queue_tail_r(u32 i)
{
return pwr_pmu_queue_tail_r(i);
}
u32 gp10b_pmu_queue_tail__size_1_v(void)
{
return pwr_pmu_queue_tail__size_1_v();
}
u32 gp10b_pmu_mutex__size_1_v(void)
{
return pwr_pmu_mutex__size_1_v();
}

View File

@@ -30,5 +30,10 @@ struct gk20a;
bool gp10b_is_pmu_supported(struct gk20a *g); bool gp10b_is_pmu_supported(struct gk20a *g);
void gp10b_pmu_setup_elpg(struct gk20a *g); void gp10b_pmu_setup_elpg(struct gk20a *g);
void gp10b_write_dmatrfbase(struct gk20a *g, u32 addr); void gp10b_write_dmatrfbase(struct gk20a *g, u32 addr);
u32 gp10b_pmu_queue_head_r(u32 i);
u32 gp10b_pmu_queue_head__size_1_v(void);
u32 gp10b_pmu_queue_tail_r(u32 i);
u32 gp10b_pmu_queue_tail__size_1_v(void);
u32 gp10b_pmu_mutex__size_1_v(void);
#endif /* PMU_GP10B_H */ #endif /* PMU_GP10B_H */

View File

@@ -356,6 +356,31 @@ u32 gv11b_pmu_get_irqdest(struct gk20a *g)
return intr_dest; return intr_dest;
} }
u32 gv11b_pmu_queue_head_r(u32 i)
{
return pwr_pmu_queue_head_r(i);
}
u32 gv11b_pmu_queue_head__size_1_v(void)
{
return pwr_pmu_queue_head__size_1_v();
}
u32 gv11b_pmu_queue_tail_r(u32 i)
{
return pwr_pmu_queue_tail_r(i);
}
u32 gv11b_pmu_queue_tail__size_1_v(void)
{
return pwr_pmu_queue_tail__size_1_v();
}
u32 gv11b_pmu_mutex__size_1_v(void)
{
return pwr_pmu_mutex__size_1_v();
}
#endif #endif
/* error handler */ /* error handler */

View File

@@ -45,6 +45,11 @@ int gv11b_pmu_bootstrap(struct gk20a *g, struct nvgpu_pmu *pmu,
void gv11b_pmu_setup_elpg(struct gk20a *g); void gv11b_pmu_setup_elpg(struct gk20a *g);
u32 gv11b_pmu_get_irqdest(struct gk20a *g); u32 gv11b_pmu_get_irqdest(struct gk20a *g);
void gv11b_pmu_handle_ext_irq(struct gk20a *g, u32 intr0); void gv11b_pmu_handle_ext_irq(struct gk20a *g, u32 intr0);
u32 gv11b_pmu_queue_head_r(u32 i);
u32 gv11b_pmu_queue_head__size_1_v(void);
u32 gv11b_pmu_queue_tail_r(u32 i);
u32 gv11b_pmu_queue_tail__size_1_v(void);
u32 gv11b_pmu_mutex__size_1_v(void);
#endif #endif
void gv11b_clear_pmu_bar0_host_err_status(struct gk20a *g); void gv11b_clear_pmu_bar0_host_err_status(struct gk20a *g);

View File

@@ -39,15 +39,30 @@
#define ALIGN_4KB 12 #define ALIGN_4KB 12
static inline u32 pmu_falcon_ecc_control_r(void)
{
return pwr_pmu_falcon_ecc_control_r();
}
static inline u32 pmu_falcon_ecc_control_inject_corrected_err_f(u32 v)
{
return pwr_pmu_falcon_ecc_control_inject_corrected_err_f(v);
}
static inline u32 pmu_falcon_ecc_control_inject_uncorrected_err_f(u32 v)
{
return pwr_pmu_falcon_ecc_control_inject_uncorrected_err_f(v);
}
static struct nvgpu_hw_err_inject_info pmu_ecc_err_desc[] = { static struct nvgpu_hw_err_inject_info pmu_ecc_err_desc[] = {
NVGPU_ECC_ERR("falcon_imem_ecc_corrected", NVGPU_ECC_ERR("falcon_imem_ecc_corrected",
gv11b_pmu_inject_ecc_error, gv11b_pmu_inject_ecc_error,
pwr_pmu_falcon_ecc_control_r, pmu_falcon_ecc_control_r,
pwr_pmu_falcon_ecc_control_inject_corrected_err_f), pmu_falcon_ecc_control_inject_corrected_err_f),
NVGPU_ECC_ERR("falcon_imem_ecc_uncorrected", NVGPU_ECC_ERR("falcon_imem_ecc_uncorrected",
gv11b_pmu_inject_ecc_error, gv11b_pmu_inject_ecc_error,
pwr_pmu_falcon_ecc_control_r, pmu_falcon_ecc_control_r,
pwr_pmu_falcon_ecc_control_inject_uncorrected_err_f), pmu_falcon_ecc_control_inject_uncorrected_err_f),
}; };
static struct nvgpu_hw_err_inject_info_desc pmu_err_desc; static struct nvgpu_hw_err_inject_info_desc pmu_err_desc;

View File

@@ -26,6 +26,8 @@
#include "pmu_tu104.h" #include "pmu_tu104.h"
#include <nvgpu/hw/tu104/hw_pwr_tu104.h>
bool tu104_is_pmu_supported(struct gk20a *g) bool tu104_is_pmu_supported(struct gk20a *g)
{ {
#ifdef CONFIG_NVGPU_SIM #ifdef CONFIG_NVGPU_SIM
@@ -39,3 +41,27 @@ bool tu104_is_pmu_supported(struct gk20a *g)
} }
} }
u32 tu104_pmu_queue_head_r(u32 i)
{
return pwr_pmu_queue_head_r(i);
}
u32 tu104_pmu_queue_head__size_1_v(void)
{
return pwr_pmu_queue_head__size_1_v();
}
u32 tu104_pmu_queue_tail_r(u32 i)
{
return pwr_pmu_queue_tail_r(i);
}
u32 tu104_pmu_queue_tail__size_1_v(void)
{
return pwr_pmu_queue_tail__size_1_v();
}
u32 tu104_pmu_mutex__size_1_v(void)
{
return pwr_pmu_mutex__size_1_v();
}

View File

@@ -26,5 +26,10 @@
struct gk20a; struct gk20a;
bool tu104_is_pmu_supported(struct gk20a *g); bool tu104_is_pmu_supported(struct gk20a *g);
u32 tu104_pmu_queue_head_r(u32 i);
u32 tu104_pmu_queue_head__size_1_v(void);
u32 tu104_pmu_queue_tail_r(u32 i);
u32 tu104_pmu_queue_tail__size_1_v(void);
u32 tu104_pmu_mutex__size_1_v(void);
#endif /* PMU_TU104_H */ #endif /* PMU_TU104_H */