gpu: nvgpu: fix ecc counter free

ECC counter structures are freed without removing the node from the
stats_list. This can lead to invalid access due to dangling pointers.

Update the ecc counter free logic to set them to NULL upon free, to
remove them from stats_list and free them by validation.

Also updated some of the ecc init paths where error was not propa-
gated to callers and full ecc counters deallocation was not done.

Now, calling unit ecc_free from any context (with counters alloc-
ated or not) is harmless as requisite checks are in place.

bug 3326612
bug 3345977

Change-Id: I05eb6ed226cff9197ad37776912da9dcb7e0716d
Signed-off-by: Sagar Kamble <skamble@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2565264
Tested-by: Ashish Mhetre <amhetre@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Reviewed-by: svc_kernel_abi <svc_kernel_abi@nvidia.com>
Reviewed-by: Konsta Holtta <kholtta@nvidia.com>
Reviewed-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
GVS: Gerrit_Virtual_Submit
This commit is contained in:
Sagar Kamble
2021-06-17 11:34:36 +05:30
committed by mobile promotions
parent 2887d06e3b
commit 40064ef1ec
33 changed files with 546 additions and 218 deletions

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -30,26 +30,58 @@ void nvgpu_ecc_stat_add(struct gk20a *g, struct nvgpu_ecc_stat *stat)
nvgpu_init_list_node(&stat->node); nvgpu_init_list_node(&stat->node);
nvgpu_mutex_acquire(&ecc->stats_lock);
nvgpu_list_add_tail(&stat->node, &ecc->stats_list); nvgpu_list_add_tail(&stat->node, &ecc->stats_list);
ecc->stats_count = nvgpu_safe_add_s32(ecc->stats_count, 1); ecc->stats_count = nvgpu_safe_add_s32(ecc->stats_count, 1);
nvgpu_mutex_release(&ecc->stats_lock);
}
void nvgpu_ecc_stat_del(struct gk20a *g, struct nvgpu_ecc_stat *stat)
{
struct nvgpu_ecc *ecc = &g->ecc;
nvgpu_mutex_acquire(&ecc->stats_lock);
nvgpu_list_del(&stat->node);
ecc->stats_count = nvgpu_safe_sub_s32(ecc->stats_count, 1);
nvgpu_mutex_release(&ecc->stats_lock);
} }
int nvgpu_ecc_counter_init(struct gk20a *g, int nvgpu_ecc_counter_init(struct gk20a *g,
struct nvgpu_ecc_stat **stat, const char *name) struct nvgpu_ecc_stat **statp, const char *name)
{ {
struct nvgpu_ecc_stat *stats; struct nvgpu_ecc_stat *stat;
stats = nvgpu_kzalloc(g, sizeof(*stats)); stat = nvgpu_kzalloc(g, sizeof(*stat));
if (stats == NULL) { if (stat == NULL) {
nvgpu_err(g, "ecc counter alloc failed");
return -ENOMEM; return -ENOMEM;
} }
(void)strncpy(stats->name, name, NVGPU_ECC_STAT_NAME_MAX_SIZE - 1U); (void)strncpy(stat->name, name, NVGPU_ECC_STAT_NAME_MAX_SIZE - 1U);
nvgpu_ecc_stat_add(g, stats); nvgpu_ecc_stat_add(g, stat);
*stat = stats; *statp = stat;
return 0; return 0;
} }
void nvgpu_ecc_counter_deinit(struct gk20a *g, struct nvgpu_ecc_stat **statp)
{
struct nvgpu_ecc_stat *stat;
if (*statp == NULL) {
return;
}
stat = *statp;
nvgpu_ecc_stat_del(g, stat);
nvgpu_kfree(g, stat);
*statp = NULL;
}
/* release all ecc_stat */ /* release all ecc_stat */
void nvgpu_ecc_free(struct gk20a *g) void nvgpu_ecc_free(struct gk20a *g)
{ {
@@ -72,6 +104,10 @@ void nvgpu_ecc_free(struct gk20a *g)
g->ops.pmu.ecc_free(g); g->ops.pmu.ecc_free(g);
} }
nvgpu_mutex_acquire(&ecc->stats_lock);
WARN_ON(!nvgpu_list_empty(&ecc->stats_list));
nvgpu_mutex_release(&ecc->stats_lock);
(void)memset(ecc, 0, sizeof(*ecc)); (void)memset(ecc, 0, sizeof(*ecc));
} }
@@ -83,6 +119,7 @@ int nvgpu_ecc_init_support(struct gk20a *g)
return 0; return 0;
} }
nvgpu_mutex_init(&ecc->stats_lock);
nvgpu_init_list_node(&ecc->stats_list); nvgpu_init_list_node(&ecc->stats_list);
return 0; return 0;
@@ -125,4 +162,6 @@ void nvgpu_ecc_remove_support(struct gk20a *g)
nvgpu_ecc_sysfs_remove(g); nvgpu_ecc_sysfs_remove(g);
#endif #endif
nvgpu_ecc_free(g); nvgpu_ecc_free(g);
nvgpu_mutex_destroy(&g->ecc.stats_lock);
} }

View File

@@ -996,7 +996,7 @@ int nvgpu_gr_alloc(struct gk20a *g)
* FECS ECC errors during FECS load need to be handled and reported * FECS ECC errors during FECS load need to be handled and reported
* using the ECC counters. * using the ECC counters.
*/ */
if (g->ops.gr.ecc.fecs_ecc_init != NULL) { if ((g->ops.gr.ecc.fecs_ecc_init != NULL) && !g->ecc.initialized) {
err = g->ops.gr.ecc.fecs_ecc_init(g); err = g->ops.gr.ecc.fecs_ecc_init(g);
if (err != 0) { if (err != 0) {
nvgpu_err(g, "failed to init gr fecs ecc"); nvgpu_err(g, "failed to init gr fecs ecc");

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -80,6 +80,7 @@ int nvgpu_ecc_counter_init_per_tpc(struct gk20a *g,
if (stats == NULL) { if (stats == NULL) {
return -ENOMEM; return -ENOMEM;
} }
for (gpc = 0; gpc < gpc_count; gpc++) { for (gpc = 0; gpc < gpc_count; gpc++) {
stats[gpc] = nvgpu_kzalloc(g, stats[gpc] = nvgpu_kzalloc(g,
nvgpu_safe_mult_u64(sizeof(*stats[gpc]), nvgpu_safe_mult_u64(sizeof(*stats[gpc]),
@@ -128,11 +129,10 @@ int nvgpu_ecc_counter_init_per_tpc(struct gk20a *g,
fail: fail:
if (err != 0) { if (err != 0) {
#ifdef CONFIG_NVGPU_DGPU
while (gpc-- != 0u) { while (gpc-- != 0u) {
nvgpu_kfree(g, stats[gpc]); nvgpu_kfree(g, stats[gpc]);
} }
#endif
nvgpu_kfree(g, stats); nvgpu_kfree(g, stats);
} }
@@ -178,85 +178,92 @@ int nvgpu_ecc_counter_init_per_gpc(struct gk20a *g,
return 0; return 0;
} }
/* helper function that frees the count array if non-NULL. */ void nvgpu_ecc_counter_deinit_per_gr(struct gk20a *g,
static void free_ecc_stat_count_array(struct gk20a *g, struct nvgpu_ecc_stat **stats_p)
struct nvgpu_ecc_stat **stat,
u32 gpc_count)
{ {
struct nvgpu_ecc_stat *stats = NULL;
u32 i; u32 i;
if (stat != NULL) { if (*stats_p != NULL) {
for (i = 0; i < gpc_count; i++) { stats = *stats_p;
nvgpu_kfree(g, stat[i]);
for (i = 0; i < g->num_gr_instances; i++) {
nvgpu_ecc_stat_del(g, &stats[i]);
} }
nvgpu_kfree(g, stat);
nvgpu_kfree(g, stats);
*stats_p = NULL;
}
}
void nvgpu_ecc_counter_deinit_per_tpc(struct gk20a *g,
struct nvgpu_ecc_stat ***stats_p)
{
struct nvgpu_gr_config *gr_config = nvgpu_gr_get_config_ptr(g);
struct nvgpu_ecc_stat **stats = NULL;
u32 gpc_count;
u32 gpc, tpc;
if (*stats_p != NULL) {
gpc_count = nvgpu_gr_config_get_gpc_count(gr_config);
stats = *stats_p;
for (gpc = 0; gpc < gpc_count; gpc++) {
if (stats[gpc] == NULL) {
continue;
}
for (tpc = 0;
tpc < nvgpu_gr_config_get_gpc_tpc_count(gr_config, gpc);
tpc++) {
nvgpu_ecc_stat_del(g, &stats[gpc][tpc]);
}
nvgpu_kfree(g, stats[gpc]);
stats[gpc] = NULL;
}
nvgpu_kfree(g, stats);
*stats_p = NULL;
}
}
void nvgpu_ecc_counter_deinit_per_gpc(struct gk20a *g,
struct nvgpu_ecc_stat **stats_p)
{
struct nvgpu_gr_config *gr_config = nvgpu_gr_get_config_ptr(g);
struct nvgpu_ecc_stat *stats = NULL;
u32 gpc_count;
u32 gpc;
if (*stats_p != NULL) {
gpc_count = nvgpu_gr_config_get_gpc_count(gr_config);
stats = *stats_p;
for (gpc = 0; gpc < gpc_count; gpc++) {
nvgpu_ecc_stat_del(g, &stats[gpc]);
}
nvgpu_kfree(g, stats);
*stats_p = NULL;
} }
} }
void nvgpu_gr_ecc_free(struct gk20a *g) void nvgpu_gr_ecc_free(struct gk20a *g)
{ {
struct nvgpu_ecc *ecc = &g->ecc;
struct nvgpu_gr_config *gr_config = nvgpu_gr_get_config_ptr(g); struct nvgpu_gr_config *gr_config = nvgpu_gr_get_config_ptr(g);
u32 gpc_count;
nvgpu_log(g, gpu_dbg_gr, " ");
if (gr_config == NULL) { if (gr_config == NULL) {
return; return;
} }
gpc_count = nvgpu_gr_config_get_gpc_count(gr_config); if (g->ops.gr.ecc.fecs_ecc_deinit != NULL) {
g->ops.gr.ecc.fecs_ecc_deinit(g);
}
free_ecc_stat_count_array(g, ecc->gr.sm_lrf_ecc_single_err_count, if (g->ops.gr.ecc.gpc_tpc_ecc_deinit != NULL) {
gpc_count); g->ops.gr.ecc.gpc_tpc_ecc_deinit(g);
free_ecc_stat_count_array(g, ecc->gr.sm_lrf_ecc_double_err_count, }
gpc_count);
free_ecc_stat_count_array(g, ecc->gr.sm_shm_ecc_sec_count,
gpc_count);
free_ecc_stat_count_array(g, ecc->gr.sm_shm_ecc_sed_count,
gpc_count);
free_ecc_stat_count_array(g, ecc->gr.sm_shm_ecc_ded_count,
gpc_count);
free_ecc_stat_count_array(g, ecc->gr.tex_ecc_total_sec_pipe0_count,
gpc_count);
free_ecc_stat_count_array(g, ecc->gr.tex_ecc_total_ded_pipe0_count,
gpc_count);
free_ecc_stat_count_array(g, ecc->gr.tex_unique_ecc_sec_pipe0_count,
gpc_count);
free_ecc_stat_count_array(g, ecc->gr.tex_unique_ecc_ded_pipe0_count,
gpc_count);
free_ecc_stat_count_array(g, ecc->gr.tex_ecc_total_sec_pipe1_count,
gpc_count);
free_ecc_stat_count_array(g, ecc->gr.tex_ecc_total_ded_pipe1_count,
gpc_count);
free_ecc_stat_count_array(g, ecc->gr.tex_unique_ecc_sec_pipe1_count,
gpc_count);
free_ecc_stat_count_array(g, ecc->gr.tex_unique_ecc_ded_pipe1_count,
gpc_count);
free_ecc_stat_count_array(g, ecc->gr.sm_l1_tag_ecc_corrected_err_count,
gpc_count);
free_ecc_stat_count_array(g,
ecc->gr.sm_l1_tag_ecc_uncorrected_err_count,
gpc_count);
free_ecc_stat_count_array(g, ecc->gr.sm_cbu_ecc_corrected_err_count,
gpc_count);
free_ecc_stat_count_array(g, ecc->gr.sm_cbu_ecc_uncorrected_err_count,
gpc_count);
free_ecc_stat_count_array(g, ecc->gr.sm_l1_data_ecc_corrected_err_count,
gpc_count);
free_ecc_stat_count_array(g,
ecc->gr.sm_l1_data_ecc_uncorrected_err_count,
gpc_count);
free_ecc_stat_count_array(g, ecc->gr.sm_icache_ecc_corrected_err_count,
gpc_count);
free_ecc_stat_count_array(g,
ecc->gr.sm_icache_ecc_uncorrected_err_count,
gpc_count);
nvgpu_kfree(g, ecc->gr.gcc_l15_ecc_corrected_err_count);
nvgpu_kfree(g, ecc->gr.gcc_l15_ecc_uncorrected_err_count);
nvgpu_kfree(g, ecc->gr.gpccs_ecc_corrected_err_count);
nvgpu_kfree(g, ecc->gr.gpccs_ecc_uncorrected_err_count);
nvgpu_kfree(g, ecc->gr.mmu_l1tlb_ecc_corrected_err_count);
nvgpu_kfree(g, ecc->gr.mmu_l1tlb_ecc_uncorrected_err_count);
nvgpu_kfree(g, ecc->gr.fecs_ecc_corrected_err_count);
nvgpu_kfree(g, ecc->gr.fecs_ecc_uncorrected_err_count);
} }

View File

@@ -126,6 +126,7 @@ int nvgpu_ecc_counter_init_per_lts(struct gk20a *g,
if (stats == NULL) { if (stats == NULL) {
return -ENOMEM; return -ENOMEM;
} }
for (ltc = 0; ltc < ltc_count; ltc++) { for (ltc = 0; ltc < ltc_count; ltc++) {
stats[ltc] = nvgpu_kzalloc(g, stats[ltc] = nvgpu_kzalloc(g,
nvgpu_safe_mult_u64(sizeof(*stats[ltc]), nvgpu_safe_mult_u64(sizeof(*stats[ltc]),
@@ -184,17 +185,45 @@ fail:
void nvgpu_ltc_ecc_free(struct gk20a *g) void nvgpu_ltc_ecc_free(struct gk20a *g)
{ {
struct nvgpu_ecc *ecc = &g->ecc; struct nvgpu_ecc *ecc = &g->ecc;
u32 i; struct nvgpu_ecc_stat *stat;
u32 slices_per_ltc;
u32 ltc_count;
u32 ltc, lts;
for (i = 0; i < nvgpu_ltc_get_ltc_count(g); i++) { if (g->ltc == NULL) {
if (ecc->ltc.ecc_sec_count != NULL) { return;
nvgpu_kfree(g, ecc->ltc.ecc_sec_count[i]);
} }
if (ecc->ltc.ecc_ded_count != NULL) { ltc_count = nvgpu_ltc_get_ltc_count(g);
nvgpu_kfree(g, ecc->ltc.ecc_ded_count[i]); slices_per_ltc = nvgpu_ltc_get_slices_per_ltc(g);
for (ltc = 0; ltc < ltc_count; ltc++) {
if (ecc->ltc.ecc_sec_count != NULL &&
ecc->ltc.ecc_sec_count[ltc] != NULL) {
for (lts = 0; lts < slices_per_ltc; lts++) {
stat = &ecc->ltc.ecc_sec_count[ltc][lts];
nvgpu_ecc_stat_del(g, stat);
}
nvgpu_kfree(g, ecc->ltc.ecc_sec_count[ltc]);
ecc->ltc.ecc_sec_count[ltc] = NULL;
}
if (ecc->ltc.ecc_ded_count != NULL &&
ecc->ltc.ecc_ded_count[ltc] != NULL) {
for (lts = 0; lts < slices_per_ltc; lts++) {
stat = &ecc->ltc.ecc_ded_count[ltc][lts];
nvgpu_ecc_stat_del(g, stat);
}
nvgpu_kfree(g, ecc->ltc.ecc_ded_count[ltc]);
ecc->ltc.ecc_ded_count[ltc] = NULL;
} }
} }
nvgpu_kfree(g, ecc->ltc.ecc_sec_count); nvgpu_kfree(g, ecc->ltc.ecc_sec_count);
ecc->ltc.ecc_sec_count = NULL;
nvgpu_kfree(g, ecc->ltc.ecc_ded_count); nvgpu_kfree(g, ecc->ltc.ecc_ded_count);
ecc->ltc.ecc_ded_count = NULL;
} }

View File

@@ -1,7 +1,7 @@
/* /*
* GA10B FB ECC * GA10B FB ECC
* *
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -50,61 +50,52 @@ int ga10b_fb_ecc_init(struct gk20a *g)
err = gv11b_fb_ecc_init(g); err = gv11b_fb_ecc_init(g);
if (err != 0) { if (err != 0) {
goto init_fb_gv11b_counters_fail; goto init_fb_ecc_err;
} }
err = NVGPU_ECC_COUNTER_INIT_FB(mmu_l2tlb_ecc_uncorrected_unique_err_count); err = NVGPU_ECC_COUNTER_INIT_FB(mmu_l2tlb_ecc_uncorrected_unique_err_count);
if (err != 0) { if (err != 0) {
goto init_l2tlb_ecc_uncorrected_unique_fail; goto init_fb_ecc_err;
} }
err = NVGPU_ECC_COUNTER_INIT_FB(mmu_l2tlb_ecc_corrected_unique_err_count); err = NVGPU_ECC_COUNTER_INIT_FB(mmu_l2tlb_ecc_corrected_unique_err_count);
if (err != 0) { if (err != 0) {
goto init_l2tlb_ecc_corrected_unique_fail; goto init_fb_ecc_err;
} }
err = NVGPU_ECC_COUNTER_INIT_FB(mmu_hubtlb_ecc_uncorrected_unique_err_count); err = NVGPU_ECC_COUNTER_INIT_FB(mmu_hubtlb_ecc_uncorrected_unique_err_count);
if (err != 0) { if (err != 0) {
goto init_hubtlb_ecc_uncorrected_unique_fail; goto init_fb_ecc_err;
} }
err = NVGPU_ECC_COUNTER_INIT_FB(mmu_hubtlb_ecc_corrected_unique_err_count); err = NVGPU_ECC_COUNTER_INIT_FB(mmu_hubtlb_ecc_corrected_unique_err_count);
if (err != 0) { if (err != 0) {
goto init_hubtlb_ecc_corrected_unique_fail; goto init_fb_ecc_err;
} }
err = NVGPU_ECC_COUNTER_INIT_FB(mmu_fillunit_ecc_uncorrected_unique_err_count); err = NVGPU_ECC_COUNTER_INIT_FB(mmu_fillunit_ecc_uncorrected_unique_err_count);
if (err != 0) { if (err != 0) {
goto init_fillunit_ecc_uncorrected_unique_fail; goto init_fb_ecc_err;
} }
err = NVGPU_ECC_COUNTER_INIT_FB(mmu_fillunit_ecc_corrected_unique_err_count); err = NVGPU_ECC_COUNTER_INIT_FB(mmu_fillunit_ecc_corrected_unique_err_count);
if (err != 0) { if (err != 0) {
goto init_fillunit_ecc_corrected_unique_fail; goto init_fb_ecc_err;
} }
return 0; init_fb_ecc_err:
if (err != 0) {
nvgpu_err(g, "ecc counter allocate failed, err=%d", err);
ga10b_fb_ecc_free(g);
}
init_fillunit_ecc_corrected_unique_fail:
NVGPU_ECC_COUNTER_FREE_FB(mmu_fillunit_ecc_uncorrected_unique_err_count);
init_fillunit_ecc_uncorrected_unique_fail:
NVGPU_ECC_COUNTER_FREE_FB(mmu_hubtlb_ecc_corrected_unique_err_count);
init_hubtlb_ecc_corrected_unique_fail:
NVGPU_ECC_COUNTER_FREE_FB(mmu_hubtlb_ecc_uncorrected_unique_err_count);
init_hubtlb_ecc_uncorrected_unique_fail:
NVGPU_ECC_COUNTER_FREE_FB(mmu_l2tlb_ecc_corrected_unique_err_count);
init_l2tlb_ecc_corrected_unique_fail:
NVGPU_ECC_COUNTER_FREE_FB(mmu_l2tlb_ecc_uncorrected_unique_err_count);
init_l2tlb_ecc_uncorrected_unique_fail:
gv11b_fb_ecc_free(g);
init_fb_gv11b_counters_fail:
return err; return err;
} }
void ga10b_fb_ecc_free(struct gk20a *g) void ga10b_fb_ecc_free(struct gk20a *g)
{ {
struct nvgpu_ecc *ecc = &g->ecc; NVGPU_ECC_COUNTER_FREE_FB(mmu_l2tlb_ecc_corrected_unique_err_count);
NVGPU_ECC_COUNTER_FREE_FB(mmu_l2tlb_ecc_uncorrected_unique_err_count);
NVGPU_ECC_COUNTER_FREE_FB(mmu_hubtlb_ecc_corrected_unique_err_count);
NVGPU_ECC_COUNTER_FREE_FB(mmu_hubtlb_ecc_uncorrected_unique_err_count);
NVGPU_ECC_COUNTER_FREE_FB(mmu_fillunit_ecc_corrected_unique_err_count);
NVGPU_ECC_COUNTER_FREE_FB(mmu_fillunit_ecc_uncorrected_unique_err_count);
nvgpu_kfree(g, ecc->fb.mmu_l2tlb_ecc_corrected_unique_err_count);
nvgpu_kfree(g, ecc->fb.mmu_l2tlb_ecc_uncorrected_unique_err_count);
nvgpu_kfree(g, ecc->fb.mmu_hubtlb_ecc_corrected_unique_err_count);
nvgpu_kfree(g, ecc->fb.mmu_hubtlb_ecc_uncorrected_unique_err_count);
nvgpu_kfree(g, ecc->fb.mmu_fillunit_ecc_corrected_unique_err_count);
nvgpu_kfree(g, ecc->fb.mmu_fillunit_ecc_uncorrected_unique_err_count);
gv11b_fb_ecc_free(g); gv11b_fb_ecc_free(g);
} }

View File

@@ -1,7 +1,7 @@
/* /*
* GV11B FB ECC * GV11B FB ECC
* *
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -37,7 +37,7 @@ struct gk20a;
nvgpu_ecc_counter_init(g, &g->ecc.fb.stat, #stat) nvgpu_ecc_counter_init(g, &g->ecc.fb.stat, #stat)
#define NVGPU_ECC_COUNTER_FREE_FB(stat) \ #define NVGPU_ECC_COUNTER_FREE_FB(stat) \
nvgpu_kfree(g, g->ecc.fb.stat) nvgpu_ecc_counter_deinit(g, &g->ecc.fb.stat)
int gv11b_fb_ecc_init(struct gk20a *g); int gv11b_fb_ecc_init(struct gk20a *g);
void gv11b_fb_ecc_free(struct gk20a *g); void gv11b_fb_ecc_free(struct gk20a *g);

View File

@@ -1,7 +1,7 @@
/* /*
* GV11B FB ECC * GV11B FB ECC
* *
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -36,57 +36,49 @@ int gv11b_fb_ecc_init(struct gk20a *g)
err = NVGPU_ECC_COUNTER_INIT_FB(mmu_l2tlb_ecc_uncorrected_err_count); err = NVGPU_ECC_COUNTER_INIT_FB(mmu_l2tlb_ecc_uncorrected_err_count);
if (err != 0) { if (err != 0) {
goto init_l2tlb_ecc_uncorrected_fail; goto init_fb_ecc_err;
} }
err = NVGPU_ECC_COUNTER_INIT_FB(mmu_l2tlb_ecc_corrected_err_count); err = NVGPU_ECC_COUNTER_INIT_FB(mmu_l2tlb_ecc_corrected_err_count);
if (err != 0) { if (err != 0) {
goto init_l2tlb_ecc_corrected_fail; goto init_fb_ecc_err;
} }
err = NVGPU_ECC_COUNTER_INIT_FB(mmu_hubtlb_ecc_uncorrected_err_count); err = NVGPU_ECC_COUNTER_INIT_FB(mmu_hubtlb_ecc_uncorrected_err_count);
if (err != 0) { if (err != 0) {
goto init_hubtlb_ecc_uncorrected_fail; goto init_fb_ecc_err;
} }
err = NVGPU_ECC_COUNTER_INIT_FB(mmu_hubtlb_ecc_corrected_err_count); err = NVGPU_ECC_COUNTER_INIT_FB(mmu_hubtlb_ecc_corrected_err_count);
if (err != 0) { if (err != 0) {
goto init_hubtlb_ecc_corrected_fail; goto init_fb_ecc_err;
} }
err = NVGPU_ECC_COUNTER_INIT_FB( err = NVGPU_ECC_COUNTER_INIT_FB(
mmu_fillunit_ecc_uncorrected_err_count); mmu_fillunit_ecc_uncorrected_err_count);
if (err != 0) { if (err != 0) {
goto init_fillunit_ecc_uncorrected_fail; goto init_fb_ecc_err;
} }
err = NVGPU_ECC_COUNTER_INIT_FB( err = NVGPU_ECC_COUNTER_INIT_FB(
mmu_fillunit_ecc_corrected_err_count); mmu_fillunit_ecc_corrected_err_count);
if (err != 0) { if (err != 0) {
goto init_fillunit_ecc_corrected_fail; goto init_fb_ecc_err;
} }
return 0; init_fb_ecc_err:
if (err != 0) {
nvgpu_err(g, "ecc counter allocate failed, err=%d", err);
gv11b_fb_ecc_free(g);
}
init_fillunit_ecc_corrected_fail:
NVGPU_ECC_COUNTER_FREE_FB(mmu_fillunit_ecc_uncorrected_err_count);
init_fillunit_ecc_uncorrected_fail:
NVGPU_ECC_COUNTER_FREE_FB(mmu_hubtlb_ecc_corrected_err_count);
init_hubtlb_ecc_corrected_fail:
NVGPU_ECC_COUNTER_FREE_FB(mmu_hubtlb_ecc_uncorrected_err_count);
init_hubtlb_ecc_uncorrected_fail:
NVGPU_ECC_COUNTER_FREE_FB(mmu_l2tlb_ecc_corrected_err_count);
init_l2tlb_ecc_corrected_fail:
NVGPU_ECC_COUNTER_FREE_FB(mmu_l2tlb_ecc_uncorrected_err_count);
init_l2tlb_ecc_uncorrected_fail:
return err; return err;
} }
void gv11b_fb_ecc_free(struct gk20a *g) void gv11b_fb_ecc_free(struct gk20a *g)
{ {
struct nvgpu_ecc *ecc = &g->ecc; NVGPU_ECC_COUNTER_FREE_FB(mmu_l2tlb_ecc_corrected_err_count);
NVGPU_ECC_COUNTER_FREE_FB(mmu_l2tlb_ecc_uncorrected_err_count);
nvgpu_kfree(g, ecc->fb.mmu_l2tlb_ecc_corrected_err_count); NVGPU_ECC_COUNTER_FREE_FB(mmu_hubtlb_ecc_corrected_err_count);
nvgpu_kfree(g, ecc->fb.mmu_l2tlb_ecc_uncorrected_err_count); NVGPU_ECC_COUNTER_FREE_FB(mmu_hubtlb_ecc_uncorrected_err_count);
nvgpu_kfree(g, ecc->fb.mmu_hubtlb_ecc_corrected_err_count); NVGPU_ECC_COUNTER_FREE_FB(mmu_fillunit_ecc_corrected_err_count);
nvgpu_kfree(g, ecc->fb.mmu_hubtlb_ecc_uncorrected_err_count); NVGPU_ECC_COUNTER_FREE_FB(mmu_fillunit_ecc_uncorrected_err_count);
nvgpu_kfree(g, ecc->fb.mmu_fillunit_ecc_corrected_err_count);
nvgpu_kfree(g, ecc->fb.mmu_fillunit_ecc_uncorrected_err_count);
} }
void gv11b_fb_ecc_l2tlb_error_mask(u32 *corrected_error_mask, void gv11b_fb_ecc_l2tlb_error_mask(u32 *corrected_error_mask,

View File

@@ -152,6 +152,25 @@ int nvgpu_ecc_counter_init_per_fbpa(struct gk20a *g,
return 0; return 0;
} }
static void free_fbpa_ecc_stat_count_array(struct gk20a *g,
struct nvgpu_ecc_stat **stats_p)
{
u32 num_fbpa = nvgpu_get_litter_value(g, GPU_LIT_NUM_FBPAS);
struct nvgpu_ecc_stat *stats;
u32 i;
if (*stats_p != NULL) {
stats = *stats_p;
for (i = 0; i < num_fbpa; i++) {
nvgpu_ecc_stat_del(g, &stats[i]);
}
nvgpu_kfree(g, stats);
*stats_p = NULL;
}
}
int tu104_fbpa_ecc_init(struct gk20a *g) int tu104_fbpa_ecc_init(struct gk20a *g)
{ {
int err; int err;
@@ -168,7 +187,7 @@ int tu104_fbpa_ecc_init(struct gk20a *g)
done: done:
if (err != 0) { if (err != 0) {
nvgpu_err(g, "ecc counter allocate failed, err=%d", err); nvgpu_err(g, "ecc counter allocate failed, err=%d", err);
nvgpu_ecc_free(g); tu104_fbpa_ecc_free(g);
} }
return err; return err;
@@ -178,6 +197,6 @@ void tu104_fbpa_ecc_free(struct gk20a *g)
{ {
struct nvgpu_ecc *ecc = &g->ecc; struct nvgpu_ecc *ecc = &g->ecc;
nvgpu_kfree(g, ecc->fbpa.fbpa_ecc_sec_err_count); free_fbpa_ecc_stat_count_array(g, &ecc->fbpa.fbpa_ecc_sec_err_count);
nvgpu_kfree(g, ecc->fbpa.fbpa_ecc_ded_err_count); free_fbpa_ecc_stat_count_array(g, &ecc->fbpa.fbpa_ecc_ded_err_count);
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -36,5 +36,6 @@ struct nvgpu_hw_err_inject_info_desc *
void ga10b_ecc_detect_enabled_units(struct gk20a *g); void ga10b_ecc_detect_enabled_units(struct gk20a *g);
int ga10b_gr_gpc_tpc_ecc_init(struct gk20a *g); int ga10b_gr_gpc_tpc_ecc_init(struct gk20a *g);
void ga10b_gr_gpc_tpc_ecc_deinit(struct gk20a *g);
#endif /* NVGPU_ECC_GA10B_H */ #endif /* NVGPU_ECC_GA10B_H */

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -158,11 +158,40 @@ void ga10b_ecc_detect_enabled_units(struct gk20a *g)
} }
} }
int ga10b_gr_gpc_tpc_ecc_init(struct gk20a *g) static int _ga10b_gr_gpc_tpc_ecc_init(struct gk20a *g)
{ {
gv11b_gr_gpc_tpc_ecc_init(g); NVGPU_ECC_COUNTER_INIT_PER_TPC_OR_RETURN(sm_rams_ecc_corrected_err_count);
NVGPU_ECC_COUNTER_INIT_PER_TPC(sm_rams_ecc_corrected_err_count); NVGPU_ECC_COUNTER_INIT_PER_TPC_OR_RETURN(sm_rams_ecc_uncorrected_err_count);
NVGPU_ECC_COUNTER_INIT_PER_TPC(sm_rams_ecc_uncorrected_err_count);
return 0; return 0;
} }
int ga10b_gr_gpc_tpc_ecc_init(struct gk20a *g)
{
int err;
err = gv11b_gr_gpc_tpc_ecc_init(g);
if (err != 0) {
goto done;
}
err = _ga10b_gr_gpc_tpc_ecc_init(g);
if (err != 0) {
goto done;
}
done:
if (err != 0) {
nvgpu_err(g, "ecc counter allocate failed, err=%d", err);
ga10b_gr_gpc_tpc_ecc_deinit(g);
}
return 0;
}
void ga10b_gr_gpc_tpc_ecc_deinit(struct gk20a *g)
{
gv11b_gr_gpc_tpc_ecc_deinit(g);
NVGPU_ECC_COUNTER_DEINIT_PER_TPC(sm_rams_ecc_corrected_err_count);
NVGPU_ECC_COUNTER_DEINIT_PER_TPC(sm_rams_ecc_uncorrected_err_count);
}

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -126,25 +126,25 @@ void gp10b_ecc_detect_enabled_units(struct gk20a *g)
static int gp10b_ecc_init_tpc_sm(struct gk20a *g) static int gp10b_ecc_init_tpc_sm(struct gk20a *g)
{ {
NVGPU_ECC_COUNTER_INIT_PER_TPC(sm_lrf_ecc_single_err_count); NVGPU_ECC_COUNTER_INIT_PER_TPC_OR_RETURN(sm_lrf_ecc_single_err_count);
NVGPU_ECC_COUNTER_INIT_PER_TPC(sm_lrf_ecc_double_err_count); NVGPU_ECC_COUNTER_INIT_PER_TPC_OR_RETURN(sm_lrf_ecc_double_err_count);
NVGPU_ECC_COUNTER_INIT_PER_TPC(sm_shm_ecc_sec_count); NVGPU_ECC_COUNTER_INIT_PER_TPC_OR_RETURN(sm_shm_ecc_sec_count);
NVGPU_ECC_COUNTER_INIT_PER_TPC(sm_shm_ecc_sed_count); NVGPU_ECC_COUNTER_INIT_PER_TPC_OR_RETURN(sm_shm_ecc_sed_count);
NVGPU_ECC_COUNTER_INIT_PER_TPC(sm_shm_ecc_ded_count); NVGPU_ECC_COUNTER_INIT_PER_TPC_OR_RETURN(sm_shm_ecc_ded_count);
return 0; return 0;
} }
static int gp10b_ecc_init_tpc_tex(struct gk20a *g) static int gp10b_ecc_init_tpc_tex(struct gk20a *g)
{ {
NVGPU_ECC_COUNTER_INIT_PER_TPC(tex_ecc_total_sec_pipe0_count); NVGPU_ECC_COUNTER_INIT_PER_TPC_OR_RETURN(tex_ecc_total_sec_pipe0_count);
NVGPU_ECC_COUNTER_INIT_PER_TPC(tex_ecc_total_ded_pipe0_count); NVGPU_ECC_COUNTER_INIT_PER_TPC_OR_RETURN(tex_ecc_total_ded_pipe0_count);
NVGPU_ECC_COUNTER_INIT_PER_TPC(tex_unique_ecc_sec_pipe0_count); NVGPU_ECC_COUNTER_INIT_PER_TPC_OR_RETURN(tex_unique_ecc_sec_pipe0_count);
NVGPU_ECC_COUNTER_INIT_PER_TPC(tex_unique_ecc_ded_pipe0_count); NVGPU_ECC_COUNTER_INIT_PER_TPC_OR_RETURN(tex_unique_ecc_ded_pipe0_count);
NVGPU_ECC_COUNTER_INIT_PER_TPC(tex_ecc_total_sec_pipe1_count); NVGPU_ECC_COUNTER_INIT_PER_TPC_OR_RETURN(tex_ecc_total_sec_pipe1_count);
NVGPU_ECC_COUNTER_INIT_PER_TPC(tex_ecc_total_ded_pipe1_count); NVGPU_ECC_COUNTER_INIT_PER_TPC_OR_RETURN(tex_ecc_total_ded_pipe1_count);
NVGPU_ECC_COUNTER_INIT_PER_TPC(tex_unique_ecc_sec_pipe1_count); NVGPU_ECC_COUNTER_INIT_PER_TPC_OR_RETURN(tex_unique_ecc_sec_pipe1_count);
NVGPU_ECC_COUNTER_INIT_PER_TPC(tex_unique_ecc_ded_pipe1_count); NVGPU_ECC_COUNTER_INIT_PER_TPC_OR_RETURN(tex_unique_ecc_ded_pipe1_count);
return 0; return 0;
} }
@@ -170,8 +170,36 @@ int gp10b_gr_ecc_init(struct gk20a *g)
err = gp10b_ecc_init_tpc(g); err = gp10b_ecc_init_tpc(g);
if (err != 0) { if (err != 0) {
nvgpu_err(g, "ecc counter allocate failed, err=%d", err); nvgpu_err(g, "ecc counter allocate failed, err=%d", err);
nvgpu_ecc_free(g); gp10b_gr_ecc_deinit(g);
} }
return err; return err;
} }
static void gp10b_ecc_deinit_tpc_sm(struct gk20a *g)
{
NVGPU_ECC_COUNTER_DEINIT_PER_TPC(sm_lrf_ecc_single_err_count);
NVGPU_ECC_COUNTER_DEINIT_PER_TPC(sm_lrf_ecc_double_err_count);
NVGPU_ECC_COUNTER_DEINIT_PER_TPC(sm_shm_ecc_sec_count);
NVGPU_ECC_COUNTER_DEINIT_PER_TPC(sm_shm_ecc_sed_count);
NVGPU_ECC_COUNTER_DEINIT_PER_TPC(sm_shm_ecc_ded_count);
}
static void gp10b_ecc_deinit_tpc_tex(struct gk20a *g)
{
NVGPU_ECC_COUNTER_DEINIT_PER_TPC(tex_ecc_total_sec_pipe0_count);
NVGPU_ECC_COUNTER_DEINIT_PER_TPC(tex_ecc_total_ded_pipe0_count);
NVGPU_ECC_COUNTER_DEINIT_PER_TPC(tex_unique_ecc_sec_pipe0_count);
NVGPU_ECC_COUNTER_DEINIT_PER_TPC(tex_unique_ecc_ded_pipe0_count);
NVGPU_ECC_COUNTER_DEINIT_PER_TPC(tex_ecc_total_sec_pipe1_count);
NVGPU_ECC_COUNTER_DEINIT_PER_TPC(tex_ecc_total_ded_pipe1_count);
NVGPU_ECC_COUNTER_DEINIT_PER_TPC(tex_unique_ecc_sec_pipe1_count);
NVGPU_ECC_COUNTER_DEINIT_PER_TPC(tex_unique_ecc_ded_pipe1_count);
}
void gp10b_gr_ecc_deinit(struct gk20a *g)
{
gp10b_ecc_deinit_tpc_sm(g);
gp10b_ecc_deinit_tpc_tex(g);
}

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -27,5 +27,6 @@ struct gk20a;
void gp10b_ecc_detect_enabled_units(struct gk20a *g); void gp10b_ecc_detect_enabled_units(struct gk20a *g);
int gp10b_gr_ecc_init(struct gk20a *g); int gp10b_gr_ecc_init(struct gk20a *g);
void gp10b_gr_ecc_deinit(struct gk20a *g);
#endif /* NVGPU_ECC_GP10B_H */ #endif /* NVGPU_ECC_GP10B_H */

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -35,6 +35,8 @@ void gv11b_ecc_detect_enabled_units(struct gk20a *g);
int gv11b_gr_gpc_tpc_ecc_init(struct gk20a *g); int gv11b_gr_gpc_tpc_ecc_init(struct gk20a *g);
int gv11b_gr_fecs_ecc_init(struct gk20a *g); int gv11b_gr_fecs_ecc_init(struct gk20a *g);
void gv11b_gr_gpc_tpc_ecc_deinit(struct gk20a *g);
void gv11b_gr_fecs_ecc_deinit(struct gk20a *g);
#ifdef CONFIG_NVGPU_INJECT_HWERR #ifdef CONFIG_NVGPU_INJECT_HWERR
void gv11b_gr_intr_inject_fecs_ecc_error(struct gk20a *g, void gv11b_gr_intr_inject_fecs_ecc_error(struct gk20a *g,

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -177,20 +177,20 @@ void gv11b_ecc_detect_enabled_units(struct gk20a *g)
static int gv11b_ecc_init_sm_corrected_err_count(struct gk20a *g) static int gv11b_ecc_init_sm_corrected_err_count(struct gk20a *g)
{ {
NVGPU_ECC_COUNTER_INIT_PER_TPC(sm_l1_tag_ecc_corrected_err_count); NVGPU_ECC_COUNTER_INIT_PER_TPC_OR_RETURN(sm_l1_tag_ecc_corrected_err_count);
NVGPU_ECC_COUNTER_INIT_PER_TPC(sm_cbu_ecc_corrected_err_count); NVGPU_ECC_COUNTER_INIT_PER_TPC_OR_RETURN(sm_cbu_ecc_corrected_err_count);
NVGPU_ECC_COUNTER_INIT_PER_TPC(sm_l1_data_ecc_corrected_err_count); NVGPU_ECC_COUNTER_INIT_PER_TPC_OR_RETURN(sm_l1_data_ecc_corrected_err_count);
NVGPU_ECC_COUNTER_INIT_PER_TPC(sm_icache_ecc_corrected_err_count); NVGPU_ECC_COUNTER_INIT_PER_TPC_OR_RETURN(sm_icache_ecc_corrected_err_count);
return 0; return 0;
} }
static int gv11b_ecc_init_sm_uncorrected_err_count(struct gk20a *g) static int gv11b_ecc_init_sm_uncorrected_err_count(struct gk20a *g)
{ {
NVGPU_ECC_COUNTER_INIT_PER_TPC(sm_l1_tag_ecc_uncorrected_err_count); NVGPU_ECC_COUNTER_INIT_PER_TPC_OR_RETURN(sm_l1_tag_ecc_uncorrected_err_count);
NVGPU_ECC_COUNTER_INIT_PER_TPC(sm_cbu_ecc_uncorrected_err_count); NVGPU_ECC_COUNTER_INIT_PER_TPC_OR_RETURN(sm_cbu_ecc_uncorrected_err_count);
NVGPU_ECC_COUNTER_INIT_PER_TPC(sm_l1_data_ecc_uncorrected_err_count); NVGPU_ECC_COUNTER_INIT_PER_TPC_OR_RETURN(sm_l1_data_ecc_uncorrected_err_count);
NVGPU_ECC_COUNTER_INIT_PER_TPC(sm_icache_ecc_uncorrected_err_count); NVGPU_ECC_COUNTER_INIT_PER_TPC_OR_RETURN(sm_icache_ecc_uncorrected_err_count);
return 0; return 0;
} }
@@ -199,8 +199,8 @@ static int gv11b_ecc_init_tpc(struct gk20a *g)
{ {
int ret; int ret;
NVGPU_ECC_COUNTER_INIT_PER_TPC(sm_lrf_ecc_single_err_count); NVGPU_ECC_COUNTER_INIT_PER_TPC_OR_RETURN(sm_lrf_ecc_single_err_count);
NVGPU_ECC_COUNTER_INIT_PER_TPC(sm_lrf_ecc_double_err_count); NVGPU_ECC_COUNTER_INIT_PER_TPC_OR_RETURN(sm_lrf_ecc_double_err_count);
ret = gv11b_ecc_init_sm_corrected_err_count(g); ret = gv11b_ecc_init_sm_corrected_err_count(g);
if (ret != 0) { if (ret != 0) {
@@ -268,22 +268,23 @@ int gv11b_gr_gpc_tpc_ecc_init(struct gk20a *g)
done: done:
if (err != 0) { if (err != 0) {
nvgpu_err(g, "ecc counter allocate failed, err=%d", err); nvgpu_err(g, "ecc counter allocate failed, err=%d", err);
nvgpu_ecc_free(g); gv11b_gr_gpc_tpc_ecc_deinit(g);
} }
return err; return err;
} }
int gv11b_gr_fecs_ecc_init(struct gk20a *g) int gv11b_gr_fecs_ecc_init(struct gk20a *g)
{ {
int err; int err;
nvgpu_log(g, gpu_dbg_gr, " "); nvgpu_log(g, gpu_dbg_gr, " ");
err = NVGPU_ECC_COUNTER_INIT_GR(fecs_ecc_uncorrected_err_count); err = NVGPU_ECC_COUNTER_INIT_PER_GR(fecs_ecc_uncorrected_err_count);
if (err != 0) { if (err != 0) {
goto done; goto done;
} }
err = NVGPU_ECC_COUNTER_INIT_GR(fecs_ecc_corrected_err_count); err = NVGPU_ECC_COUNTER_INIT_PER_GR(fecs_ecc_corrected_err_count);
if (err != 0) { if (err != 0) {
goto done; goto done;
} }
@@ -291,8 +292,60 @@ int gv11b_gr_fecs_ecc_init(struct gk20a *g)
done: done:
if (err != 0) { if (err != 0) {
nvgpu_err(g, "ecc counter allocate failed, err=%d", err); nvgpu_err(g, "ecc counter allocate failed, err=%d", err);
nvgpu_ecc_free(g); gv11b_gr_fecs_ecc_deinit(g);
} }
return err; return err;
} }
static void gv11b_ecc_deinit_sm_corrected_err_count(struct gk20a *g)
{
NVGPU_ECC_COUNTER_DEINIT_PER_TPC(sm_l1_tag_ecc_corrected_err_count);
NVGPU_ECC_COUNTER_DEINIT_PER_TPC(sm_cbu_ecc_corrected_err_count);
NVGPU_ECC_COUNTER_DEINIT_PER_TPC(sm_l1_data_ecc_corrected_err_count);
NVGPU_ECC_COUNTER_DEINIT_PER_TPC(sm_icache_ecc_corrected_err_count);
}
static void gv11b_ecc_deinit_sm_uncorrected_err_count(struct gk20a *g)
{
NVGPU_ECC_COUNTER_DEINIT_PER_TPC(sm_l1_tag_ecc_uncorrected_err_count);
NVGPU_ECC_COUNTER_DEINIT_PER_TPC(sm_cbu_ecc_uncorrected_err_count);
NVGPU_ECC_COUNTER_DEINIT_PER_TPC(sm_l1_data_ecc_uncorrected_err_count);
NVGPU_ECC_COUNTER_DEINIT_PER_TPC(sm_icache_ecc_uncorrected_err_count);
}
static void gv11b_ecc_deinit_tpc(struct gk20a *g)
{
NVGPU_ECC_COUNTER_DEINIT_PER_TPC(sm_lrf_ecc_single_err_count);
NVGPU_ECC_COUNTER_DEINIT_PER_TPC(sm_lrf_ecc_double_err_count);
gv11b_ecc_deinit_sm_corrected_err_count(g);
gv11b_ecc_deinit_sm_uncorrected_err_count(g);
}
static void gv11b_ecc_deinit_gpc(struct gk20a *g)
{
NVGPU_ECC_COUNTER_DEINIT_PER_GPC(gcc_l15_ecc_corrected_err_count);
NVGPU_ECC_COUNTER_DEINIT_PER_GPC(gcc_l15_ecc_uncorrected_err_count);
NVGPU_ECC_COUNTER_DEINIT_PER_GPC(gpccs_ecc_uncorrected_err_count);
NVGPU_ECC_COUNTER_DEINIT_PER_GPC(gpccs_ecc_corrected_err_count);
NVGPU_ECC_COUNTER_DEINIT_PER_GPC(mmu_l1tlb_ecc_uncorrected_err_count);
NVGPU_ECC_COUNTER_DEINIT_PER_GPC(mmu_l1tlb_ecc_corrected_err_count);
}
void gv11b_gr_gpc_tpc_ecc_deinit(struct gk20a *g)
{
nvgpu_log(g, gpu_dbg_gr, " ");
gv11b_ecc_deinit_tpc(g);
gv11b_ecc_deinit_gpc(g);
}
void gv11b_gr_fecs_ecc_deinit(struct gk20a *g)
{
nvgpu_log(g, gpu_dbg_gr, " ");
NVGPU_ECC_COUNTER_DEINIT_PER_GR(fecs_ecc_uncorrected_err_count);
NVGPU_ECC_COUNTER_DEINIT_PER_GR(fecs_ecc_corrected_err_count);
}

View File

@@ -430,6 +430,8 @@ static const struct gops_gr_ecc ga100_ops_gr_ecc = {
.detect = ga10b_ecc_detect_enabled_units, .detect = ga10b_ecc_detect_enabled_units,
.gpc_tpc_ecc_init = ga10b_gr_gpc_tpc_ecc_init, .gpc_tpc_ecc_init = ga10b_gr_gpc_tpc_ecc_init,
.fecs_ecc_init = gv11b_gr_fecs_ecc_init, .fecs_ecc_init = gv11b_gr_fecs_ecc_init,
.gpc_tpc_ecc_deinit = ga10b_gr_gpc_tpc_ecc_deinit,
.fecs_ecc_deinit = gv11b_gr_fecs_ecc_deinit,
#ifdef CONFIG_NVGPU_INJECT_HWERR #ifdef CONFIG_NVGPU_INJECT_HWERR
.get_mmu_err_desc = ga10b_gr_ecc_get_mmu_err_desc, .get_mmu_err_desc = ga10b_gr_ecc_get_mmu_err_desc,
.get_gcc_err_desc = gv11b_gr_intr_get_gcc_err_desc, .get_gcc_err_desc = gv11b_gr_intr_get_gcc_err_desc,

View File

@@ -394,6 +394,8 @@ static const struct gops_gr_ecc ga10b_ops_gr_ecc = {
.detect = ga10b_ecc_detect_enabled_units, .detect = ga10b_ecc_detect_enabled_units,
.gpc_tpc_ecc_init = ga10b_gr_gpc_tpc_ecc_init, .gpc_tpc_ecc_init = ga10b_gr_gpc_tpc_ecc_init,
.fecs_ecc_init = gv11b_gr_fecs_ecc_init, .fecs_ecc_init = gv11b_gr_fecs_ecc_init,
.gpc_tpc_ecc_deinit = ga10b_gr_gpc_tpc_ecc_deinit,
.fecs_ecc_deinit = gv11b_gr_fecs_ecc_deinit,
#ifdef CONFIG_NVGPU_INJECT_HWERR #ifdef CONFIG_NVGPU_INJECT_HWERR
.get_mmu_err_desc = ga10b_gr_ecc_get_mmu_err_desc, .get_mmu_err_desc = ga10b_gr_ecc_get_mmu_err_desc,
.get_gcc_err_desc = gv11b_gr_intr_get_gcc_err_desc, .get_gcc_err_desc = gv11b_gr_intr_get_gcc_err_desc,

View File

@@ -247,6 +247,7 @@ static const struct gops_ce gp10b_ops_ce = {
static const struct gops_gr_ecc gp10b_ops_gr_ecc = { static const struct gops_gr_ecc gp10b_ops_gr_ecc = {
.detect = gp10b_ecc_detect_enabled_units, .detect = gp10b_ecc_detect_enabled_units,
.gpc_tpc_ecc_init = gp10b_gr_ecc_init, .gpc_tpc_ecc_init = gp10b_gr_ecc_init,
.gpc_tpc_ecc_deinit = gp10b_gr_ecc_deinit,
}; };
static const struct gops_gr_ctxsw_prog gp10b_ops_gr_ctxsw_prog = { static const struct gops_gr_ctxsw_prog gp10b_ops_gr_ctxsw_prog = {

View File

@@ -316,6 +316,8 @@ static const struct gops_gr_ecc gv11b_ops_gr_ecc = {
.detect = gv11b_ecc_detect_enabled_units, .detect = gv11b_ecc_detect_enabled_units,
.gpc_tpc_ecc_init = gv11b_gr_gpc_tpc_ecc_init, .gpc_tpc_ecc_init = gv11b_gr_gpc_tpc_ecc_init,
.fecs_ecc_init = gv11b_gr_fecs_ecc_init, .fecs_ecc_init = gv11b_gr_fecs_ecc_init,
.gpc_tpc_ecc_deinit = gv11b_gr_gpc_tpc_ecc_deinit,
.fecs_ecc_deinit = gv11b_gr_fecs_ecc_deinit,
#ifdef CONFIG_NVGPU_INJECT_HWERR #ifdef CONFIG_NVGPU_INJECT_HWERR
.get_mmu_err_desc = gv11b_gr_intr_get_mmu_err_desc, .get_mmu_err_desc = gv11b_gr_intr_get_mmu_err_desc,
.get_gcc_err_desc = gv11b_gr_intr_get_gcc_err_desc, .get_gcc_err_desc = gv11b_gr_intr_get_gcc_err_desc,

View File

@@ -368,6 +368,8 @@ static const struct gops_gr_ecc tu104_ops_gr_ecc = {
.detect = NULL, .detect = NULL,
.gpc_tpc_ecc_init = gv11b_gr_gpc_tpc_ecc_init, .gpc_tpc_ecc_init = gv11b_gr_gpc_tpc_ecc_init,
.fecs_ecc_init = gv11b_gr_fecs_ecc_init, .fecs_ecc_init = gv11b_gr_fecs_ecc_init,
.gpc_tpc_ecc_deinit = gv11b_gr_gpc_tpc_ecc_deinit,
.fecs_ecc_deinit = gv11b_gr_fecs_ecc_deinit,
}; };
static const struct gops_gr_ctxsw_prog tu104_ops_gr_ctxsw_prog = { static const struct gops_gr_ctxsw_prog tu104_ops_gr_ctxsw_prog = {

View File

@@ -1,7 +1,7 @@
/* /*
* GP10B L2 * GP10B L2
* *
* Copyright (c) 2014-2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2014-2021, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -56,7 +56,7 @@ int gp10b_lts_ecc_init(struct gk20a *g)
init_lts_err: init_lts_err:
if (err != 0) { if (err != 0) {
nvgpu_err(g, "ecc counter allocate failed, err=%d", err); nvgpu_err(g, "ecc counter allocate failed, err=%d", err);
nvgpu_ecc_free(g); nvgpu_ltc_ecc_free(g);
} }
return err; return err;

View File

@@ -73,7 +73,7 @@ int gv11b_lts_ecc_init(struct gk20a *g)
done: done:
if (err != 0) { if (err != 0) {
nvgpu_err(g, "ecc counter allocate failed, err=%d", err); nvgpu_err(g, "ecc counter allocate failed, err=%d", err);
nvgpu_ecc_free(g); nvgpu_ltc_ecc_free(g);
} }
return err; return err;

View File

@@ -328,7 +328,7 @@ int gv11b_pmu_ecc_init(struct gk20a *g)
done: done:
if (err != 0) { if (err != 0) {
nvgpu_err(g, "ecc counter allocate failed, err=%d", err); nvgpu_err(g, "ecc counter allocate failed, err=%d", err);
nvgpu_ecc_free(g); gv11b_pmu_ecc_free(g);
} }
return err; return err;
@@ -336,10 +336,8 @@ done:
void gv11b_pmu_ecc_free(struct gk20a *g) void gv11b_pmu_ecc_free(struct gk20a *g)
{ {
struct nvgpu_ecc *ecc = &g->ecc; NVGPU_ECC_COUNTER_FREE_PMU(pmu_ecc_corrected_err_count);
NVGPU_ECC_COUNTER_FREE_PMU(pmu_ecc_uncorrected_err_count);
nvgpu_kfree(g, ecc->pmu.pmu_ecc_corrected_err_count);
nvgpu_kfree(g, ecc->pmu.pmu_ecc_uncorrected_err_count);
} }
static void gv11b_pmu_handle_ecc_irq(struct gk20a *g) static void gv11b_pmu_handle_ecc_irq(struct gk20a *g)

View File

@@ -77,6 +77,7 @@
#include <nvgpu/types.h> #include <nvgpu/types.h>
#include <nvgpu/list.h> #include <nvgpu/list.h>
#include <nvgpu/lock.h>
#define NVGPU_ECC_STAT_NAME_MAX_SIZE 100UL #define NVGPU_ECC_STAT_NAME_MAX_SIZE 100UL
@@ -268,6 +269,8 @@ struct nvgpu_ecc {
/** Contains the head to the list of error statistics. */ /** Contains the head to the list of error statistics. */
struct nvgpu_list_node stats_list; struct nvgpu_list_node stats_list;
/** Lock to protect the stats_list updates. */
struct nvgpu_mutex stats_lock;
/** Contains the number of error statistics. */ /** Contains the number of error statistics. */
int stats_count; int stats_count;
/** /**
@@ -281,29 +284,50 @@ struct nvgpu_ecc {
* @brief Allocates, initializes an error counter with specified name. * @brief Allocates, initializes an error counter with specified name.
* *
* @param g [in] The GPU driver struct. * @param g [in] The GPU driver struct.
* @param stat [out] Pointer to array of tpc error counters. * @param statp [out] Pointer to error counter pointer.
* @param name [in] Unique name for error counter. * @param name [in] Unique name for error counter.
* *
* Allocate memory for one error counter, initializes the counter with 0 and the * Allocate memory for one error counter, initializes the counter with 0 and the
* specified string identifier. Finally the counter is added to the status_list * specified string identifier. Finally the counter is added to the stats_list
* of struct nvgpu_ecc. * of struct nvgpu_ecc.
* *
* @return 0 in case of success, less than 0 for failure. * @return 0 in case of success, less than 0 for failure.
* @return -ENOMEM if there is not enough memory to allocate ecc stats. * @return -ENOMEM if there is not enough memory to allocate ecc stats.
*/ */
int nvgpu_ecc_counter_init(struct gk20a *g, int nvgpu_ecc_counter_init(struct gk20a *g,
struct nvgpu_ecc_stat **stat, const char *name); struct nvgpu_ecc_stat **statp, const char *name);
/** /**
* @brief Concatenates the error counter to status list. * @brief Deallocates an error counter.
*
* @param g [in] The GPU driver struct.
* @param statp [in] Pointer to error counter pointer.
*
* Delete the counter from the nvgpu_ecc stats_list. Deallocate memory for the
* error counter.
*/
void nvgpu_ecc_counter_deinit(struct gk20a *g, struct nvgpu_ecc_stat **statp);
/**
* @brief Concatenates the error counter to stats list.
* *
* @param g [in] The GPU driver struct. * @param g [in] The GPU driver struct.
* @param stat [in] Pointer to error counter. * @param stat [in] Pointer to error counter.
* *
* The counter is added to the status_list of struct nvgpu_ecc. * The counter is added to the stats_list of struct nvgpu_ecc.
*/ */
void nvgpu_ecc_stat_add(struct gk20a *g, struct nvgpu_ecc_stat *stat); void nvgpu_ecc_stat_add(struct gk20a *g, struct nvgpu_ecc_stat *stat);
/**
* @brief Deletes the error counter from the stats list.
*
* @param g [in] The GPU driver struct.
* @param stat [in] Pointer to error counter.
*
* The counter is removed from the stats_list of struct nvgpu_ecc.
*/
void nvgpu_ecc_stat_del(struct gk20a *g, struct nvgpu_ecc_stat *stat);
/** /**
* @brief Release memory associated with all error counters. * @brief Release memory associated with all error counters.
* *

View File

@@ -105,6 +105,27 @@ struct gops_gr_ecc {
*/ */
int (*fecs_ecc_init)(struct gk20a *g); int (*fecs_ecc_init)(struct gk20a *g);
/**
* @brief Deinitialize GR unit ECC support.
*
* @param g [in] Pointer to GPU driver struct.
*
* This function deallocates memory to track the ecc error counts
* for GR unit and subunits of GR (like GPCs, TPCs etc) and removes
* it from global list.
*/
void (*gpc_tpc_ecc_deinit)(struct gk20a *g);
/**
* @brief Deinitialize GR unit ECC support.
*
* @param g [in] Pointer to GPU driver struct.
*
* This function deallocates memory to track the ecc error counts
* for FECS in GR and removes it from global list.
*/
void (*fecs_ecc_deinit)(struct gk20a *g);
/** /**
* @brief Detect ECC enabled units in GR engine. * @brief Detect ECC enabled units in GR engine.
* *

View File

@@ -49,7 +49,7 @@ int nvgpu_ecc_counter_init_per_tpc(struct gk20a *g,
* @param stat [in] Address of pointer to struct nvgpu_ecc_stat. * @param stat [in] Address of pointer to struct nvgpu_ecc_stat.
* *
*/ */
#define NVGPU_ECC_COUNTER_INIT_PER_TPC(stat) \ #define NVGPU_ECC_COUNTER_INIT_PER_TPC_OR_RETURN(stat) \
do { \ do { \
int err = 0; \ int err = 0; \
err = nvgpu_ecc_counter_init_per_tpc(g, \ err = nvgpu_ecc_counter_init_per_tpc(g, \
@@ -59,6 +59,27 @@ int nvgpu_ecc_counter_init_per_tpc(struct gk20a *g,
} \ } \
} while (false) } while (false)
/**
* @brief Free error counter of all tpc instances in all gpc instances.
*
* @param g [in] The GPU driver struct.
* @param stats_p [out] Pointer to 2D array of error counters in tpcs in gpcs.
*
* Removes the error counter of all gpc instances from stats_list in struct
* nvgpu_ecc and frees the memory allocated for it.
*/
void nvgpu_ecc_counter_deinit_per_tpc(struct gk20a *g,
struct nvgpu_ecc_stat ***stats_p);
/*
* @brief Frees counters for memories shared across a TPCs in GPCs.
*
* @param stat [in] error counter member from g->ecc.gr.
*
*/
#define NVGPU_ECC_COUNTER_DEINIT_PER_TPC(stat) \
nvgpu_ecc_counter_deinit_per_tpc(g, &g->ecc.gr.stat)
/** /**
* @brief Allocate and initialize error counter specified by name for all gpc * @brief Allocate and initialize error counter specified by name for all gpc
* instances. * instances.
@@ -79,12 +100,33 @@ int nvgpu_ecc_counter_init_per_gpc(struct gk20a *g,
/* /*
* @brief Allocate and initialize counters for memories shared across a GPC. * @brief Allocate and initialize counters for memories shared across a GPC.
* *
* @param stat [in] Address of pointer to struct nvgpu_ecc_stat. * @param stat [in] error counter member from g->ecc.gr.
* *
*/ */
#define NVGPU_ECC_COUNTER_INIT_PER_GPC(stat) \ #define NVGPU_ECC_COUNTER_INIT_PER_GPC(stat) \
nvgpu_ecc_counter_init_per_gpc(g, &g->ecc.gr.stat, #stat) nvgpu_ecc_counter_init_per_gpc(g, &g->ecc.gr.stat, #stat)
/**
* @brief Free error counter of all gpc instances.
*
* @param g [in] The GPU driver struct.
* @param stats_p [out] Pointer to array of gpc error counters.
*
* Removes the error counter of all gpc instances from stats_list in struct
* nvgpu_ecc and frees the memory allocated for it.
*/
void nvgpu_ecc_counter_deinit_per_gpc(struct gk20a *g,
struct nvgpu_ecc_stat **stats_p);
/*
* @brief Frees counters for memories shared across a GPC.
*
* @param stat [in] error counter member from g->ecc.gr.
*
*/
#define NVGPU_ECC_COUNTER_DEINIT_PER_GPC(stat) \
nvgpu_ecc_counter_deinit_per_gpc(g, &g->ecc.gr.stat)
/** /**
* @brief Allocate and initialize error counter specified by name for all gr * @brief Allocate and initialize error counter specified by name for all gr
* instances. * instances.
@@ -107,9 +149,30 @@ int nvgpu_ecc_counter_init_per_gr(struct gk20a *g,
* @param stat [in] Address of pointer to struct nvgpu_ecc_stat. * @param stat [in] Address of pointer to struct nvgpu_ecc_stat.
* *
*/ */
#define NVGPU_ECC_COUNTER_INIT_GR(stat) \ #define NVGPU_ECC_COUNTER_INIT_PER_GR(stat) \
nvgpu_ecc_counter_init_per_gr(g, &g->ecc.gr.stat, #stat) nvgpu_ecc_counter_init_per_gr(g, &g->ecc.gr.stat, #stat)
/**
* @brief Free error counter of all gr instances.
*
* @param g [in] The GPU driver struct.
* @param stats_p [out] Pointer to array of gr error counters.
*
* Removes the error counter of all gr instances from stats_list in struct
* nvgpu_ecc and frees the memory allocated for it.
*/
void nvgpu_ecc_counter_deinit_per_gr(struct gk20a *g,
struct nvgpu_ecc_stat **stats_p);
/*
* @brief Frees counters for memories shared across a GR instances.
*
* @param stat [in] error counter member from g->ecc.gr.
*
*/
#define NVGPU_ECC_COUNTER_DEINIT_PER_GR(stat) \
nvgpu_ecc_counter_deinit_per_gr(g, &g->ecc.gr.stat)
/** /**
* @brief Release all GR ECC stats counters. * @brief Release all GR ECC stats counters.
* *

View File

@@ -528,7 +528,7 @@ int nvgpu_pmu_early_init(struct gk20a *g);
void nvgpu_pmu_remove_support(struct gk20a *g, struct nvgpu_pmu *pmu); void nvgpu_pmu_remove_support(struct gk20a *g, struct nvgpu_pmu *pmu);
/* /*
* @brief Allocate and initialize counter for memories within PMU. * @brief Allocate and initialize ECC counter for memories within PMU.
* *
* @param stat [in] Address of pointer to struct nvgpu_ecc_stat. * @param stat [in] Address of pointer to struct nvgpu_ecc_stat.
* *
@@ -536,5 +536,14 @@ void nvgpu_pmu_remove_support(struct gk20a *g, struct nvgpu_pmu *pmu);
#define NVGPU_ECC_COUNTER_INIT_PMU(stat) \ #define NVGPU_ECC_COUNTER_INIT_PMU(stat) \
nvgpu_ecc_counter_init(g, &g->ecc.pmu.stat, #stat) nvgpu_ecc_counter_init(g, &g->ecc.pmu.stat, #stat)
/*
* @brief Remove ECC counter from the list and free the counter.
*
* @param stat [in] Address of pointer to struct nvgpu_ecc_stat.
*
*/
#define NVGPU_ECC_COUNTER_FREE_PMU(stat) \
nvgpu_ecc_counter_deinit(g, &g->ecc.pmu.stat)
#endif /* NVGPU_PMU_H */ #endif /* NVGPU_PMU_H */

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License, * under the terms and conditions of the GNU General Public License,
@@ -28,9 +28,13 @@ int nvgpu_ecc_sysfs_init(struct gk20a *g)
struct nvgpu_ecc_stat *stat; struct nvgpu_ecc_stat *stat;
int i = 0, err; int i = 0, err;
nvgpu_mutex_acquire(&ecc->stats_lock);
attr = nvgpu_kzalloc(g, sizeof(*attr) * ecc->stats_count); attr = nvgpu_kzalloc(g, sizeof(*attr) * ecc->stats_count);
if (!attr) if (!attr) {
nvgpu_mutex_release(&ecc->stats_lock);
return -ENOMEM; return -ENOMEM;
}
nvgpu_list_for_each_entry(stat, nvgpu_list_for_each_entry(stat,
&ecc->stats_list, nvgpu_ecc_stat, node) { &ecc->stats_list, nvgpu_ecc_stat, node) {
@@ -54,6 +58,8 @@ int nvgpu_ecc_sysfs_init(struct gk20a *g)
i++; i++;
} }
nvgpu_mutex_release(&ecc->stats_lock);
if (err) { if (err) {
while (i-- > 0) while (i-- > 0)
device_remove_file(dev, &attr[i].attr); device_remove_file(dev, &attr[i].attr);
@@ -73,8 +79,13 @@ void nvgpu_ecc_sysfs_remove(struct gk20a *g)
struct nvgpu_ecc *ecc = &g->ecc; struct nvgpu_ecc *ecc = &g->ecc;
int i; int i;
nvgpu_mutex_acquire(&ecc->stats_lock);
for (i = 0; i < ecc->stats_count; i++) for (i = 0; i < ecc->stats_count; i++)
device_remove_file(dev, &l->ecc_attrs[i].attr); device_remove_file(dev, &l->ecc_attrs[i].attr);
nvgpu_mutex_release(&ecc->stats_lock);
nvgpu_kfree(g, l->ecc_attrs); nvgpu_kfree(g, l->ecc_attrs);
l->ecc_attrs = NULL; l->ecc_attrs = NULL;
} }

View File

@@ -779,6 +779,7 @@ nvgpu_get_nvhost_dev
nvgpu_free_nvhost_dev nvgpu_free_nvhost_dev
nvgpu_ecc_free nvgpu_ecc_free
nvgpu_ecc_counter_init nvgpu_ecc_counter_init
nvgpu_ecc_counter_deinit
nvgpu_ecc_finalize_support nvgpu_ecc_finalize_support
nvgpu_rc_fifo_recover nvgpu_rc_fifo_recover
nvgpu_rc_ctxsw_timeout nvgpu_rc_ctxsw_timeout

View File

@@ -796,6 +796,7 @@ nvgpu_get_nvhost_dev
nvgpu_free_nvhost_dev nvgpu_free_nvhost_dev
nvgpu_ecc_free nvgpu_ecc_free
nvgpu_ecc_counter_init nvgpu_ecc_counter_init
nvgpu_ecc_counter_deinit
nvgpu_ecc_finalize_support nvgpu_ecc_finalize_support
nvgpu_rc_fifo_recover nvgpu_rc_fifo_recover
nvgpu_rc_ctxsw_timeout nvgpu_rc_ctxsw_timeout

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -121,7 +121,7 @@ int test_ecc_counter_init(struct unit_module *m, struct gk20a *g,
ret = UNIT_FAIL; ret = UNIT_FAIL;
goto cleanup; goto cleanup;
} }
nvgpu_kfree(g, stat); nvgpu_ecc_counter_deinit(g, &stat);
/* /*
* Case #2: * Case #2:
@@ -147,12 +147,17 @@ int test_ecc_counter_init(struct unit_module *m, struct gk20a *g,
ret = UNIT_FAIL; ret = UNIT_FAIL;
goto cleanup; goto cleanup;
} }
nvgpu_kfree(g, stat);
stat = NULL; nvgpu_ecc_counter_deinit(g, &stat);
if (!nvgpu_list_empty(&g->ecc.stats_list)) {
ret = UNIT_FAIL;
goto cleanup;
}
cleanup: cleanup:
if (stat != NULL) { if (stat != NULL) {
nvgpu_kfree(g, stat); nvgpu_ecc_counter_deinit(g, &stat);
} }
nvgpu_kfree(g, name); nvgpu_kfree(g, name);

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -110,6 +110,8 @@ int test_ecc_finalize_support(struct unit_module *m,
* - Set counter name to string with invalid length equal to * - Set counter name to string with invalid length equal to
* NVGPU_ECC_STAT_NAME_MAX_SIZE. * NVGPU_ECC_STAT_NAME_MAX_SIZE.
* - "nvgpu_ecc_counter_init" will truncate the counter name and return 0. * - "nvgpu_ecc_counter_init" will truncate the counter name and return 0.
* - Test case #4
* - Verify that the g->ecc.stats_list is empty.
* *
* Output: * Output:
* - UNIT_FAIL under the following conditions: * - UNIT_FAIL under the following conditions:

View File

@@ -120,6 +120,8 @@ int fb_gv11b_init_test(struct unit_module *m, struct gk20a *g, void *args)
if (err != -ENOMEM) { if (err != -ENOMEM) {
unit_return_fail(m, "gv11b_fb_ecc_init did not fail as expected (%d)\n", i); unit_return_fail(m, "gv11b_fb_ecc_init did not fail as expected (%d)\n", i);
} }
g->ops.ecc.ecc_init_support(g);
} }
err = g->ops.fb.ecc.init(g); err = g->ops.fb.ecc.init(g);

View File

@@ -440,7 +440,6 @@ static int mock_l2_flush(struct gk20a *g, bool inv)
int test_ltc_intr(struct unit_module *m, struct gk20a *g, void *args) int test_ltc_intr(struct unit_module *m, struct gk20a *g, void *args)
{ {
int err = UNIT_SUCCESS; int err = UNIT_SUCCESS;
u32 i;
const u32 offset1 = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE) * const u32 offset1 = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE) *
nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE); nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE);
int (*save_func)(struct gk20a *g, bool inv); int (*save_func)(struct gk20a *g, bool inv);
@@ -560,15 +559,7 @@ int test_ltc_intr(struct unit_module *m, struct gk20a *g, void *args)
g->ops.mm.cache.l2_flush = save_func; g->ops.mm.cache.l2_flush = save_func;
done: done:
for (i = 0; i < nvgpu_ltc_get_ltc_count(g); i++) { nvgpu_ltc_ecc_free(g);
if (g->ecc.ltc.ecc_sec_count != NULL) {
nvgpu_kfree(g, g->ecc.ltc.ecc_sec_count[i]);
}
if (g->ecc.ltc.ecc_ded_count != NULL) {
nvgpu_kfree(g, g->ecc.ltc.ecc_ded_count[i]);
}
}
return err; return err;
} }