gpu: nvgpu: fix ecc counter free

ECC counter structures are freed without removing the node from the
stats_list. This can lead to invalid access due to dangling pointers.

Update the ecc counter free logic to set them to NULL upon free, to
remove them from stats_list and free them by validation.

Also updated some of the ecc init paths where error was not propa-
gated to callers and full ecc counters deallocation was not done.

Now, calling unit ecc_free from any context (with counters alloc-
ated or not) is harmless as requisite checks are in place.

bug 3326612
bug 3345977

Change-Id: I05eb6ed226cff9197ad37776912da9dcb7e0716d
Signed-off-by: Sagar Kamble <skamble@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2565264
Tested-by: Ashish Mhetre <amhetre@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Reviewed-by: svc_kernel_abi <svc_kernel_abi@nvidia.com>
Reviewed-by: Konsta Holtta <kholtta@nvidia.com>
Reviewed-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
GVS: Gerrit_Virtual_Submit
This commit is contained in:
Sagar Kamble
2021-06-17 11:34:36 +05:30
committed by mobile promotions
parent 2887d06e3b
commit 40064ef1ec
33 changed files with 546 additions and 218 deletions

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -30,26 +30,58 @@ void nvgpu_ecc_stat_add(struct gk20a *g, struct nvgpu_ecc_stat *stat)
nvgpu_init_list_node(&stat->node);
nvgpu_mutex_acquire(&ecc->stats_lock);
nvgpu_list_add_tail(&stat->node, &ecc->stats_list);
ecc->stats_count = nvgpu_safe_add_s32(ecc->stats_count, 1);
nvgpu_mutex_release(&ecc->stats_lock);
}
void nvgpu_ecc_stat_del(struct gk20a *g, struct nvgpu_ecc_stat *stat)
{
struct nvgpu_ecc *ecc = &g->ecc;
nvgpu_mutex_acquire(&ecc->stats_lock);
nvgpu_list_del(&stat->node);
ecc->stats_count = nvgpu_safe_sub_s32(ecc->stats_count, 1);
nvgpu_mutex_release(&ecc->stats_lock);
}
int nvgpu_ecc_counter_init(struct gk20a *g,
struct nvgpu_ecc_stat **stat, const char *name)
struct nvgpu_ecc_stat **statp, const char *name)
{
struct nvgpu_ecc_stat *stats;
struct nvgpu_ecc_stat *stat;
stats = nvgpu_kzalloc(g, sizeof(*stats));
if (stats == NULL) {
stat = nvgpu_kzalloc(g, sizeof(*stat));
if (stat == NULL) {
nvgpu_err(g, "ecc counter alloc failed");
return -ENOMEM;
}
(void)strncpy(stats->name, name, NVGPU_ECC_STAT_NAME_MAX_SIZE - 1U);
nvgpu_ecc_stat_add(g, stats);
*stat = stats;
(void)strncpy(stat->name, name, NVGPU_ECC_STAT_NAME_MAX_SIZE - 1U);
nvgpu_ecc_stat_add(g, stat);
*statp = stat;
return 0;
}
void nvgpu_ecc_counter_deinit(struct gk20a *g, struct nvgpu_ecc_stat **statp)
{
struct nvgpu_ecc_stat *stat;
if (*statp == NULL) {
return;
}
stat = *statp;
nvgpu_ecc_stat_del(g, stat);
nvgpu_kfree(g, stat);
*statp = NULL;
}
/* release all ecc_stat */
void nvgpu_ecc_free(struct gk20a *g)
{
@@ -72,6 +104,10 @@ void nvgpu_ecc_free(struct gk20a *g)
g->ops.pmu.ecc_free(g);
}
nvgpu_mutex_acquire(&ecc->stats_lock);
WARN_ON(!nvgpu_list_empty(&ecc->stats_list));
nvgpu_mutex_release(&ecc->stats_lock);
(void)memset(ecc, 0, sizeof(*ecc));
}
@@ -83,6 +119,7 @@ int nvgpu_ecc_init_support(struct gk20a *g)
return 0;
}
nvgpu_mutex_init(&ecc->stats_lock);
nvgpu_init_list_node(&ecc->stats_list);
return 0;
@@ -125,4 +162,6 @@ void nvgpu_ecc_remove_support(struct gk20a *g)
nvgpu_ecc_sysfs_remove(g);
#endif
nvgpu_ecc_free(g);
nvgpu_mutex_destroy(&g->ecc.stats_lock);
}

View File

@@ -996,7 +996,7 @@ int nvgpu_gr_alloc(struct gk20a *g)
* FECS ECC errors during FECS load need to be handled and reported
* using the ECC counters.
*/
if (g->ops.gr.ecc.fecs_ecc_init != NULL) {
if ((g->ops.gr.ecc.fecs_ecc_init != NULL) && !g->ecc.initialized) {
err = g->ops.gr.ecc.fecs_ecc_init(g);
if (err != 0) {
nvgpu_err(g, "failed to init gr fecs ecc");

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -80,6 +80,7 @@ int nvgpu_ecc_counter_init_per_tpc(struct gk20a *g,
if (stats == NULL) {
return -ENOMEM;
}
for (gpc = 0; gpc < gpc_count; gpc++) {
stats[gpc] = nvgpu_kzalloc(g,
nvgpu_safe_mult_u64(sizeof(*stats[gpc]),
@@ -128,11 +129,10 @@ int nvgpu_ecc_counter_init_per_tpc(struct gk20a *g,
fail:
if (err != 0) {
#ifdef CONFIG_NVGPU_DGPU
while (gpc-- != 0u) {
nvgpu_kfree(g, stats[gpc]);
}
#endif
nvgpu_kfree(g, stats);
}
@@ -178,85 +178,92 @@ int nvgpu_ecc_counter_init_per_gpc(struct gk20a *g,
return 0;
}
/* helper function that frees the count array if non-NULL. */
static void free_ecc_stat_count_array(struct gk20a *g,
struct nvgpu_ecc_stat **stat,
u32 gpc_count)
void nvgpu_ecc_counter_deinit_per_gr(struct gk20a *g,
struct nvgpu_ecc_stat **stats_p)
{
struct nvgpu_ecc_stat *stats = NULL;
u32 i;
if (stat != NULL) {
for (i = 0; i < gpc_count; i++) {
nvgpu_kfree(g, stat[i]);
if (*stats_p != NULL) {
stats = *stats_p;
for (i = 0; i < g->num_gr_instances; i++) {
nvgpu_ecc_stat_del(g, &stats[i]);
}
nvgpu_kfree(g, stat);
nvgpu_kfree(g, stats);
*stats_p = NULL;
}
}
void nvgpu_ecc_counter_deinit_per_tpc(struct gk20a *g,
struct nvgpu_ecc_stat ***stats_p)
{
struct nvgpu_gr_config *gr_config = nvgpu_gr_get_config_ptr(g);
struct nvgpu_ecc_stat **stats = NULL;
u32 gpc_count;
u32 gpc, tpc;
if (*stats_p != NULL) {
gpc_count = nvgpu_gr_config_get_gpc_count(gr_config);
stats = *stats_p;
for (gpc = 0; gpc < gpc_count; gpc++) {
if (stats[gpc] == NULL) {
continue;
}
for (tpc = 0;
tpc < nvgpu_gr_config_get_gpc_tpc_count(gr_config, gpc);
tpc++) {
nvgpu_ecc_stat_del(g, &stats[gpc][tpc]);
}
nvgpu_kfree(g, stats[gpc]);
stats[gpc] = NULL;
}
nvgpu_kfree(g, stats);
*stats_p = NULL;
}
}
void nvgpu_ecc_counter_deinit_per_gpc(struct gk20a *g,
struct nvgpu_ecc_stat **stats_p)
{
struct nvgpu_gr_config *gr_config = nvgpu_gr_get_config_ptr(g);
struct nvgpu_ecc_stat *stats = NULL;
u32 gpc_count;
u32 gpc;
if (*stats_p != NULL) {
gpc_count = nvgpu_gr_config_get_gpc_count(gr_config);
stats = *stats_p;
for (gpc = 0; gpc < gpc_count; gpc++) {
nvgpu_ecc_stat_del(g, &stats[gpc]);
}
nvgpu_kfree(g, stats);
*stats_p = NULL;
}
}
void nvgpu_gr_ecc_free(struct gk20a *g)
{
struct nvgpu_ecc *ecc = &g->ecc;
struct nvgpu_gr_config *gr_config = nvgpu_gr_get_config_ptr(g);
u32 gpc_count;
nvgpu_log(g, gpu_dbg_gr, " ");
if (gr_config == NULL) {
return;
}
gpc_count = nvgpu_gr_config_get_gpc_count(gr_config);
if (g->ops.gr.ecc.fecs_ecc_deinit != NULL) {
g->ops.gr.ecc.fecs_ecc_deinit(g);
}
free_ecc_stat_count_array(g, ecc->gr.sm_lrf_ecc_single_err_count,
gpc_count);
free_ecc_stat_count_array(g, ecc->gr.sm_lrf_ecc_double_err_count,
gpc_count);
free_ecc_stat_count_array(g, ecc->gr.sm_shm_ecc_sec_count,
gpc_count);
free_ecc_stat_count_array(g, ecc->gr.sm_shm_ecc_sed_count,
gpc_count);
free_ecc_stat_count_array(g, ecc->gr.sm_shm_ecc_ded_count,
gpc_count);
free_ecc_stat_count_array(g, ecc->gr.tex_ecc_total_sec_pipe0_count,
gpc_count);
free_ecc_stat_count_array(g, ecc->gr.tex_ecc_total_ded_pipe0_count,
gpc_count);
free_ecc_stat_count_array(g, ecc->gr.tex_unique_ecc_sec_pipe0_count,
gpc_count);
free_ecc_stat_count_array(g, ecc->gr.tex_unique_ecc_ded_pipe0_count,
gpc_count);
free_ecc_stat_count_array(g, ecc->gr.tex_ecc_total_sec_pipe1_count,
gpc_count);
free_ecc_stat_count_array(g, ecc->gr.tex_ecc_total_ded_pipe1_count,
gpc_count);
free_ecc_stat_count_array(g, ecc->gr.tex_unique_ecc_sec_pipe1_count,
gpc_count);
free_ecc_stat_count_array(g, ecc->gr.tex_unique_ecc_ded_pipe1_count,
gpc_count);
free_ecc_stat_count_array(g, ecc->gr.sm_l1_tag_ecc_corrected_err_count,
gpc_count);
free_ecc_stat_count_array(g,
ecc->gr.sm_l1_tag_ecc_uncorrected_err_count,
gpc_count);
free_ecc_stat_count_array(g, ecc->gr.sm_cbu_ecc_corrected_err_count,
gpc_count);
free_ecc_stat_count_array(g, ecc->gr.sm_cbu_ecc_uncorrected_err_count,
gpc_count);
free_ecc_stat_count_array(g, ecc->gr.sm_l1_data_ecc_corrected_err_count,
gpc_count);
free_ecc_stat_count_array(g,
ecc->gr.sm_l1_data_ecc_uncorrected_err_count,
gpc_count);
free_ecc_stat_count_array(g, ecc->gr.sm_icache_ecc_corrected_err_count,
gpc_count);
free_ecc_stat_count_array(g,
ecc->gr.sm_icache_ecc_uncorrected_err_count,
gpc_count);
nvgpu_kfree(g, ecc->gr.gcc_l15_ecc_corrected_err_count);
nvgpu_kfree(g, ecc->gr.gcc_l15_ecc_uncorrected_err_count);
nvgpu_kfree(g, ecc->gr.gpccs_ecc_corrected_err_count);
nvgpu_kfree(g, ecc->gr.gpccs_ecc_uncorrected_err_count);
nvgpu_kfree(g, ecc->gr.mmu_l1tlb_ecc_corrected_err_count);
nvgpu_kfree(g, ecc->gr.mmu_l1tlb_ecc_uncorrected_err_count);
nvgpu_kfree(g, ecc->gr.fecs_ecc_corrected_err_count);
nvgpu_kfree(g, ecc->gr.fecs_ecc_uncorrected_err_count);
if (g->ops.gr.ecc.gpc_tpc_ecc_deinit != NULL) {
g->ops.gr.ecc.gpc_tpc_ecc_deinit(g);
}
}

View File

@@ -126,6 +126,7 @@ int nvgpu_ecc_counter_init_per_lts(struct gk20a *g,
if (stats == NULL) {
return -ENOMEM;
}
for (ltc = 0; ltc < ltc_count; ltc++) {
stats[ltc] = nvgpu_kzalloc(g,
nvgpu_safe_mult_u64(sizeof(*stats[ltc]),
@@ -184,17 +185,45 @@ fail:
void nvgpu_ltc_ecc_free(struct gk20a *g)
{
struct nvgpu_ecc *ecc = &g->ecc;
u32 i;
struct nvgpu_ecc_stat *stat;
u32 slices_per_ltc;
u32 ltc_count;
u32 ltc, lts;
for (i = 0; i < nvgpu_ltc_get_ltc_count(g); i++) {
if (ecc->ltc.ecc_sec_count != NULL) {
nvgpu_kfree(g, ecc->ltc.ecc_sec_count[i]);
if (g->ltc == NULL) {
return;
}
ltc_count = nvgpu_ltc_get_ltc_count(g);
slices_per_ltc = nvgpu_ltc_get_slices_per_ltc(g);
for (ltc = 0; ltc < ltc_count; ltc++) {
if (ecc->ltc.ecc_sec_count != NULL &&
ecc->ltc.ecc_sec_count[ltc] != NULL) {
for (lts = 0; lts < slices_per_ltc; lts++) {
stat = &ecc->ltc.ecc_sec_count[ltc][lts];
nvgpu_ecc_stat_del(g, stat);
}
nvgpu_kfree(g, ecc->ltc.ecc_sec_count[ltc]);
ecc->ltc.ecc_sec_count[ltc] = NULL;
}
if (ecc->ltc.ecc_ded_count != NULL) {
nvgpu_kfree(g, ecc->ltc.ecc_ded_count[i]);
if (ecc->ltc.ecc_ded_count != NULL &&
ecc->ltc.ecc_ded_count[ltc] != NULL) {
for (lts = 0; lts < slices_per_ltc; lts++) {
stat = &ecc->ltc.ecc_ded_count[ltc][lts];
nvgpu_ecc_stat_del(g, stat);
}
nvgpu_kfree(g, ecc->ltc.ecc_ded_count[ltc]);
ecc->ltc.ecc_ded_count[ltc] = NULL;
}
}
nvgpu_kfree(g, ecc->ltc.ecc_sec_count);
ecc->ltc.ecc_sec_count = NULL;
nvgpu_kfree(g, ecc->ltc.ecc_ded_count);
ecc->ltc.ecc_ded_count = NULL;
}