gpu: nvgpu: reduce code complexity in ltc intr unit

Reduced code complexity for gv11b_ltc_intr_handle_rstg_ecc_interrupts function
from 19 to 7 using following helper functions:
gv11b_ltc_intr_init_counters: code complexity 5
gv11b_ltc_intr_handle_rstg_ecc_interrupts: code complexity 3
gv11b_ltc_intr_handle_tstg_ecc_interrupts: code complexity 3
gv11b_ltc_intr_handle_dstg_ecc_interrupts: code complexity 5

JIRA NVGPU-3976

Change-Id: Iad3aad58c28255629087ecba943118f040cdbbd5
Signed-off-by: Seshendra Gadagottu <sgadagottu@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2192091
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Seshendra Gadagottu
2019-09-06 16:04:09 -07:00
committed by Alex Waterman
parent 6766a7c09f
commit 07b86032ef

View File

@@ -78,8 +78,124 @@ void gv11b_ltc_intr_en_illegal_compstat(struct gk20a *g, bool enable)
nvgpu_writel(g, ltc_ltcs_ltss_intr_r(), val);
}
static void gv11b_ltc_intr_init_counters(struct gk20a *g,
u32 corrected_delta, u32 corrected_overflow,
u32 uncorrected_delta, u32 uncorrected_overflow,
u32 offset)
{
if ((corrected_delta > 0U) || (corrected_overflow != 0U)) {
nvgpu_writel_check(g,
nvgpu_safe_add_u32(
ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_r(),
offset), 0);
}
if ((uncorrected_delta > 0U) || (uncorrected_overflow != 0U)) {
nvgpu_writel_check(g,
nvgpu_safe_add_u32(
ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_r(),
offset), 0);
}
}
static void gv11b_ltc_intr_handle_rstg_ecc_interrupts(struct gk20a *g,
u32 ltc, u32 slice, u32 ecc_status, u32 ecc_addr)
{
if ((ecc_status &
ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_rstg_m())
!= 0U) {
(void) nvgpu_report_ecc_err(g,
NVGPU_ERR_MODULE_LTC,
(ltc << 8U) | slice,
GPU_LTC_CACHE_RSTG_ECC_CORRECTED, ecc_addr,
g->ecc.ltc.ecc_sec_count[ltc][slice].counter);
nvgpu_log(g, gpu_dbg_intr, "rstg ecc error corrected");
}
if ((ecc_status &
ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_rstg_m())
!= 0U) {
(void) nvgpu_report_ecc_err(g,
NVGPU_ERR_MODULE_LTC,
(ltc << 8U) | slice,
GPU_LTC_CACHE_RSTG_ECC_UNCORRECTED, ecc_addr,
g->ecc.ltc.ecc_ded_count[ltc][slice].counter);
nvgpu_log(g, gpu_dbg_intr, "rstg ecc error uncorrected");
}
}
static void gv11b_ltc_intr_handle_tstg_ecc_interrupts(struct gk20a *g,
u32 ltc, u32 slice, u32 ecc_status, u32 ecc_addr)
{
if ((ecc_status &
ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_tstg_m())
!= 0U) {
(void) nvgpu_report_ecc_err(g,
NVGPU_ERR_MODULE_LTC,
(ltc << 8U) | slice,
GPU_LTC_CACHE_TSTG_ECC_CORRECTED, ecc_addr,
g->ecc.ltc.ecc_sec_count[ltc][slice].counter);
nvgpu_log(g, gpu_dbg_intr, "tstg ecc error corrected");
}
if ((ecc_status &
ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_tstg_m())
!= 0U) {
(void) nvgpu_report_ecc_err(g,
NVGPU_ERR_MODULE_LTC,
(ltc << 8U) | slice,
GPU_LTC_CACHE_TSTG_ECC_UNCORRECTED, ecc_addr,
g->ecc.ltc.ecc_ded_count[ltc][slice].counter);
nvgpu_log(g, gpu_dbg_intr, "tstg ecc error uncorrected");
}
}
static void gv11b_ltc_intr_handle_dstg_ecc_interrupts(struct gk20a *g,
u32 ltc, u32 slice, u32 ecc_status, u32 dstg_ecc_addr,
u32 ecc_addr)
{
if ((ecc_status &
ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_dstg_m())
!= 0U) {
if ((dstg_ecc_addr &
ltc_ltc0_lts0_dstg_ecc_address_info_ram_m())
== 0U) {
(void) nvgpu_report_ecc_err(g,
NVGPU_ERR_MODULE_LTC,
(ltc << 8U) | slice,
GPU_LTC_CACHE_DSTG_ECC_CORRECTED, ecc_addr,
g->ecc.ltc.ecc_sec_count[ltc][slice].counter);
} else {
(void) nvgpu_report_ecc_err(g,
NVGPU_ERR_MODULE_LTC,
(ltc << 8U) | slice,
GPU_LTC_CACHE_DSTG_BE_ECC_CORRECTED, ecc_addr,
g->ecc.ltc.ecc_sec_count[ltc][slice].counter);
}
nvgpu_log(g, gpu_dbg_intr, "dstg ecc error corrected");
}
if ((ecc_status &
ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_dstg_m())
!= 0U) {
if ((dstg_ecc_addr &
ltc_ltc0_lts0_dstg_ecc_address_info_ram_m()) == 0U) {
(void) nvgpu_report_ecc_err(g,
NVGPU_ERR_MODULE_LTC,
(ltc << 8U) | slice,
GPU_LTC_CACHE_DSTG_ECC_UNCORRECTED, ecc_addr,
g->ecc.ltc.ecc_ded_count[ltc][slice].counter);
} else {
(void) nvgpu_report_ecc_err(g,
NVGPU_ERR_MODULE_LTC,
(ltc << 8U) | slice,
GPU_LTC_CACHE_DSTG_BE_ECC_UNCORRECTED, ecc_addr,
g->ecc.ltc.ecc_ded_count[ltc][slice].counter);
}
nvgpu_log(g, gpu_dbg_intr, "dstg ecc error uncorrected");
}
}
static void gv11b_ltc_intr_handle_lts_interrupts(struct gk20a *g,
u32 ltc,u32 slice)
u32 ltc, u32 slice)
{
u32 offset;
u32 ltc_intr3;
@@ -125,19 +241,9 @@ static void gv11b_ltc_intr_handle_lts_interrupts(struct gk20a *g,
uncorrected_overflow = ecc_status &
ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_total_counter_overflow_m();
/* clear the interrupt */
if ((corrected_delta > 0U) || (corrected_overflow != 0U)) {
nvgpu_writel_check(g,
nvgpu_safe_add_u32(
ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_r(),
offset), 0);
}
if ((uncorrected_delta > 0U) || (uncorrected_overflow != 0U)) {
nvgpu_writel_check(g,
nvgpu_safe_add_u32(
ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_r(),
offset), 0);
}
gv11b_ltc_intr_init_counters(g,
corrected_delta, corrected_overflow,
uncorrected_delta, uncorrected_overflow, offset);
nvgpu_writel_check(g,
nvgpu_safe_add_u32(
@@ -176,78 +282,15 @@ static void gv11b_ltc_intr_handle_lts_interrupts(struct gk20a *g,
slice = slice & 0xFFU;
}
if ((ecc_status &
ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_rstg_m()) != 0U) {
(void) nvgpu_report_ecc_err(g,
NVGPU_ERR_MODULE_LTC,
(ltc << 8U) | slice,
GPU_LTC_CACHE_RSTG_ECC_CORRECTED, ecc_addr,
g->ecc.ltc.ecc_sec_count[ltc][slice].counter);
nvgpu_log(g, gpu_dbg_intr, "rstg ecc error corrected");
}
if ((ecc_status &
ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_rstg_m()) != 0U) {
(void) nvgpu_report_ecc_err(g,
NVGPU_ERR_MODULE_LTC,
(ltc << 8U) | slice,
GPU_LTC_CACHE_RSTG_ECC_UNCORRECTED, ecc_addr,
g->ecc.ltc.ecc_ded_count[ltc][slice].counter);
nvgpu_log(g, gpu_dbg_intr, "rstg ecc error uncorrected");
}
if ((ecc_status &
ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_tstg_m()) != 0U) {
(void) nvgpu_report_ecc_err(g,
NVGPU_ERR_MODULE_LTC,
(ltc << 8U) | slice,
GPU_LTC_CACHE_TSTG_ECC_CORRECTED, ecc_addr,
g->ecc.ltc.ecc_sec_count[ltc][slice].counter);
nvgpu_log(g, gpu_dbg_intr, "tstg ecc error corrected");
}
if ((ecc_status &
ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_tstg_m()) != 0U) {
(void) nvgpu_report_ecc_err(g,
NVGPU_ERR_MODULE_LTC,
(ltc << 8U) | slice,
GPU_LTC_CACHE_TSTG_ECC_UNCORRECTED, ecc_addr,
g->ecc.ltc.ecc_ded_count[ltc][slice].counter);
nvgpu_log(g, gpu_dbg_intr,
"tstg ecc error uncorrected");
}
if ((ecc_status &
ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_dstg_m()) != 0U) {
if ((dstg_ecc_addr &
ltc_ltc0_lts0_dstg_ecc_address_info_ram_m()) == 0U) {
(void) nvgpu_report_ecc_err(g,
NVGPU_ERR_MODULE_LTC,
(ltc << 8U) | slice,
GPU_LTC_CACHE_DSTG_ECC_CORRECTED, ecc_addr,
g->ecc.ltc.ecc_sec_count[ltc][slice].counter);
} else {
(void) nvgpu_report_ecc_err(g,
NVGPU_ERR_MODULE_LTC,
(ltc << 8U) | slice,
GPU_LTC_CACHE_DSTG_BE_ECC_CORRECTED, ecc_addr,
g->ecc.ltc.ecc_sec_count[ltc][slice].counter);
}
nvgpu_log(g, gpu_dbg_intr, "dstg ecc error corrected");
}
if ((ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_dstg_m()) != 0U) {
if ((dstg_ecc_addr & ltc_ltc0_lts0_dstg_ecc_address_info_ram_m()) == 0U) {
(void) nvgpu_report_ecc_err(g,
NVGPU_ERR_MODULE_LTC,
(ltc << 8U) | slice,
GPU_LTC_CACHE_DSTG_ECC_UNCORRECTED, ecc_addr,
g->ecc.ltc.ecc_ded_count[ltc][slice].counter);
} else {
(void) nvgpu_report_ecc_err(g,
NVGPU_ERR_MODULE_LTC,
(ltc << 8U) | slice,
GPU_LTC_CACHE_DSTG_BE_ECC_UNCORRECTED, ecc_addr,
g->ecc.ltc.ecc_ded_count[ltc][slice].counter);
}
nvgpu_log(g, gpu_dbg_intr,
"dstg ecc error uncorrected");
}
gv11b_ltc_intr_handle_rstg_ecc_interrupts(g, ltc, slice,
ecc_status, ecc_addr);
gv11b_ltc_intr_handle_tstg_ecc_interrupts(g, ltc, slice,
ecc_status, ecc_addr);
gv11b_ltc_intr_handle_dstg_ecc_interrupts(g, ltc, slice,
ecc_status, dstg_ecc_addr,
ecc_addr);
if ((corrected_overflow != 0U) ||
(uncorrected_overflow != 0U)) {