gpu: nvgpu: fix CERT-C errors in hal.ltc.intr driver

Fixed CERT-C issues in hal ltc intr driver by replacing
arithmetic operations with nvgpu safe ops.

JIRA NVGPU-3623

Change-Id: I80c3dd9e42dd20bb853db6e60d6a1fd36415ab36
Signed-off-by: Seshendra Gadagottu <sgadagottu@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2134686
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Seshendra Gadagottu
2019-06-09 22:14:54 -07:00
committed by mobile promotions
parent 330bf3ac2a
commit bf68ff68f0
3 changed files with 61 additions and 36 deletions

View File

@@ -25,6 +25,7 @@
#include <nvgpu/ltc.h>
#include <nvgpu/io.h>
#include <nvgpu/gk20a.h>
#include <nvgpu/safe_ops.h>
#include "ltc_intr_gm20b.h"
@@ -50,18 +51,21 @@ static void gm20b_ltc_intr_handle_lts_interrupts(struct gk20a *g,
u32 ltc_stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE);
u32 lts_stride = nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE);
ltc_intr = nvgpu_readl(g, ltc_ltc0_lts0_intr_r() +
ltc_stride * ltc + lts_stride * slice);
ltc_intr = nvgpu_readl(g, nvgpu_safe_add_u32(ltc_ltc0_lts0_intr_r(),
nvgpu_safe_add_u32(nvgpu_safe_mult_u32(ltc_stride, ltc),
nvgpu_safe_mult_u32(lts_stride, slice))));
nvgpu_err(g, "ltc%d, slice %d: %08x", ltc, slice, ltc_intr);
nvgpu_writel(g, ltc_ltc0_lts0_intr_r() + ltc_stride * ltc +
lts_stride * slice, ltc_intr);
nvgpu_writel(g, nvgpu_safe_add_u32(ltc_ltc0_lts0_intr_r(),
nvgpu_safe_add_u32(nvgpu_safe_mult_u32(ltc_stride, ltc),
nvgpu_safe_mult_u32(lts_stride, slice))), ltc_intr);
}
void gm20b_ltc_intr_isr(struct gk20a *g, u32 ltc)
{
u32 slice;
for (slice = 0U; slice < g->ltc->slices_per_ltc; slice++) {
for (slice = 0U; slice < g->ltc->slices_per_ltc; slice =
nvgpu_safe_add_u32(slice, 1U)) {
gm20b_ltc_intr_handle_lts_interrupts(g, ltc, slice);
}
}

View File

@@ -26,6 +26,7 @@
#include <nvgpu/log.h>
#include <nvgpu/io.h>
#include <nvgpu/gk20a.h>
#include <nvgpu/safe_ops.h>
#include <nvgpu/hw/gp10b/hw_ltc_gp10b.h>
@@ -39,8 +40,10 @@ void gp10b_ltc_intr_handle_lts_interrupts(struct gk20a *g, u32 ltc, u32 slice)
u32 ltc_stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE);
u32 lts_stride = nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE);
offset = ltc_stride * ltc + lts_stride * slice;
ltc_intr = nvgpu_readl(g, ltc_ltc0_lts0_intr_r() + offset);
offset = nvgpu_safe_add_u32(nvgpu_safe_mult_u32(ltc_stride, ltc),
nvgpu_safe_mult_u32(lts_stride, slice));
ltc_intr = nvgpu_readl(g, nvgpu_safe_add_u32(
ltc_ltc0_lts0_intr_r(), offset));
/* Detect and handle ECC errors */
if ((ltc_intr &
@@ -51,15 +54,18 @@ void gp10b_ltc_intr_handle_lts_interrupts(struct gk20a *g, u32 ltc, u32 slice)
"Single bit error detected in GPU L2!");
ecc_stats_reg_val =
nvgpu_readl(g,
ltc_ltc0_lts0_dstg_ecc_report_r() + offset);
g->ecc.ltc.ecc_sec_count[ltc][slice].counter +=
ltc_ltc0_lts0_dstg_ecc_report_sec_count_v(
ecc_stats_reg_val);
nvgpu_readl(g, nvgpu_safe_add_u32(
ltc_ltc0_lts0_dstg_ecc_report_r(), offset));
g->ecc.ltc.ecc_sec_count[ltc][slice].counter =
nvgpu_safe_add_u32(
g->ecc.ltc.ecc_sec_count[ltc][slice].counter,
ltc_ltc0_lts0_dstg_ecc_report_sec_count_v(
ecc_stats_reg_val));
ecc_stats_reg_val &=
~(ltc_ltc0_lts0_dstg_ecc_report_sec_count_m());
nvgpu_writel_check(g,
ltc_ltc0_lts0_dstg_ecc_report_r() + offset,
nvgpu_safe_add_u32(
ltc_ltc0_lts0_dstg_ecc_report_r(), offset),
ecc_stats_reg_val);
if (g->ops.mm.cache.l2_flush(g, true) != 0) {
nvgpu_err(g, "l2_flush failed");
@@ -75,26 +81,32 @@ void gp10b_ltc_intr_handle_lts_interrupts(struct gk20a *g, u32 ltc, u32 slice)
ecc_stats_reg_val =
nvgpu_readl(g,
ltc_ltc0_lts0_dstg_ecc_report_r() + offset);
g->ecc.ltc.ecc_ded_count[ltc][slice].counter +=
ltc_ltc0_lts0_dstg_ecc_report_ded_count_v(
ecc_stats_reg_val);
g->ecc.ltc.ecc_ded_count[ltc][slice].counter =
nvgpu_safe_add_u32(
g->ecc.ltc.ecc_ded_count[ltc][slice].counter,
ltc_ltc0_lts0_dstg_ecc_report_ded_count_v(
ecc_stats_reg_val));
ecc_stats_reg_val &=
~(ltc_ltc0_lts0_dstg_ecc_report_ded_count_m());
nvgpu_writel_check(g,
ltc_ltc0_lts0_dstg_ecc_report_r() + offset,
nvgpu_safe_add_u32(
ltc_ltc0_lts0_dstg_ecc_report_r(), offset),
ecc_stats_reg_val);
}
nvgpu_err(g, "ltc%d, slice %d: %08x", ltc, slice, ltc_intr);
nvgpu_writel_check(g, ltc_ltc0_lts0_intr_r() +
ltc_stride * ltc + lts_stride * slice, ltc_intr);
nvgpu_writel_check(g, nvgpu_safe_add_u32(ltc_ltc0_lts0_intr_r(),
nvgpu_safe_add_u32(nvgpu_safe_mult_u32(ltc_stride, ltc),
nvgpu_safe_mult_u32(lts_stride, slice))),
ltc_intr);
}
void gp10b_ltc_intr_isr(struct gk20a *g, u32 ltc)
{
u32 slice;
for (slice = 0U; slice < g->ltc->slices_per_ltc; slice++) {
for (slice = 0U; slice < g->ltc->slices_per_ltc; slice =
nvgpu_safe_add_u32(slice, 1U)) {
gp10b_ltc_intr_handle_lts_interrupts(g, ltc, slice);
}
}

View File

@@ -24,6 +24,7 @@
#include <nvgpu/io.h>
#include <nvgpu/gk20a.h>
#include <nvgpu/safe_ops.h>
#include <nvgpu/nvgpu_err.h>
#include "ltc_intr_gp10b.h"
@@ -88,9 +89,10 @@ static void gv11b_ltc_intr_handle_lts_interrupts(struct gk20a *g,
u32 ltc_stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE);
u32 lts_stride = nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE);
offset = ltc_stride * ltc + lts_stride * slice;
ltc_intr3 = nvgpu_readl(g, ltc_ltc0_lts0_intr3_r() +
offset);
offset = nvgpu_safe_add_u32(nvgpu_safe_mult_u32(ltc_stride, ltc),
nvgpu_safe_mult_u32(lts_stride, slice));
ltc_intr3 = nvgpu_readl(g, nvgpu_safe_add_u32(
ltc_ltc0_lts0_intr3_r(), offset));
/* Detect and handle ECC PARITY errors */
if ((ltc_intr3 &
@@ -98,17 +100,19 @@ static void gv11b_ltc_intr_handle_lts_interrupts(struct gk20a *g,
ltc_ltcs_ltss_intr3_ecc_corrected_m())) != 0U) {
ecc_status = nvgpu_readl(g,
ltc_ltc0_lts0_l2_cache_ecc_status_r() + offset);
ecc_addr = nvgpu_readl(g,
ltc_ltc0_lts0_l2_cache_ecc_address_r() + offset);
nvgpu_safe_add_u32(
ltc_ltc0_lts0_l2_cache_ecc_status_r(), offset));
ecc_addr = nvgpu_readl(g, nvgpu_safe_add_u32(
ltc_ltc0_lts0_l2_cache_ecc_address_r(), offset));
dstg_ecc_addr = nvgpu_readl(g,
ltc_ltc0_lts0_dstg_ecc_address_r() + offset);
corrected_cnt = nvgpu_readl(g,
ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_r() +
offset);
uncorrected_cnt = nvgpu_readl(g,
ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_r() +
offset);
nvgpu_safe_add_u32(
ltc_ltc0_lts0_dstg_ecc_address_r(), offset));
corrected_cnt = nvgpu_readl(g, nvgpu_safe_add_u32(
ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_r(),
offset));
uncorrected_cnt = nvgpu_readl(g, nvgpu_safe_add_u32(
ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_r(),
offset));
corrected_delta =
ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_total_v(
@@ -124,15 +128,20 @@ static void gv11b_ltc_intr_handle_lts_interrupts(struct gk20a *g,
/* clear the interrupt */
if ((corrected_delta > 0U) || (corrected_overflow != 0U)) {
nvgpu_writel_check(g,
ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_r() + offset, 0);
nvgpu_safe_add_u32(
ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_r(),
offset), 0);
}
if ((uncorrected_delta > 0U) || (uncorrected_overflow != 0U)) {
nvgpu_writel_check(g,
ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_r() + offset, 0);
nvgpu_safe_add_u32(
ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_r(),
offset), 0);
}
nvgpu_writel_check(g,
ltc_ltc0_lts0_l2_cache_ecc_status_r() + offset,
nvgpu_safe_add_u32(
ltc_ltc0_lts0_l2_cache_ecc_status_r(), offset),
ltc_ltc0_lts0_l2_cache_ecc_status_reset_task_f());
/* update counters per slice */