gpu: nvgpu: add errata NVGPU_ERRATA_3524791

Update PES, ROP exception handling for NVGPU_ERRATA_3524791. Enable the
errata for all Volta+ chips.

ROP, PES exceptions are being reported using the physical-id,
where logical-id should have been used. All ESR status registers are
reported using logical-id, so this matches with the SW expectation.
To address the (1), update ROP, PES exception handler translate from
physical to logical-id before reading the status registers.

Bug 3524791

Change-Id: Ieacbfb306bb0e69cf0113dc92f18e401573722e3
Signed-off-by: Antony Clince Alex <aalex@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2680029
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Antony Clince Alex
2022-03-11 07:08:53 +00:00
committed by mobile promotions
parent 32a9c6923c
commit 83fe3fd35e
6 changed files with 59 additions and 10 deletions

View File

@@ -25,11 +25,14 @@
#include <nvgpu/class.h>
#include <nvgpu/engines.h>
#include <nvgpu/nvgpu_err.h>
#include <nvgpu/errata.h>
#include <nvgpu/gr/config.h>
#include <nvgpu/gr/gr.h>
#include <nvgpu/gr/gr_instances.h>
#include <nvgpu/gr/gr_intr.h>
#include "common/gr/gr_priv.h"
#include "common/gr/gr_intr_priv.h"
#include "hal/gr/intr/gr_intr_gm20b.h"
#include "hal/gr/intr/gr_intr_gp10b.h"
@@ -925,6 +928,9 @@ void ga10b_gr_intr_handle_gpc_crop_hww(struct gk20a *g, u32 gpc,
u32 num_crop_pending_masks =
sizeof(crop_pending_masks)/sizeof(*crop_pending_masks);
u32 i = 0U;
struct nvgpu_gr *gr = nvgpu_gr_get_cur_instance_ptr(g);
struct nvgpu_gr_config *config = gr->config;
u32 rop_id;
if ((gpc_exception & (gr_gpc0_gpccs_gpc_exception_crop0_pending_f() |
gr_gpc0_gpccs_gpc_exception_crop1_pending_f())) == 0U) {
@@ -933,17 +939,24 @@ void ga10b_gr_intr_handle_gpc_crop_hww(struct gk20a *g, u32 gpc,
gpc_offset = nvgpu_gr_gpc_offset(g, gpc);
for (i = 0U; i < num_crop_pending_masks; i++) {
rop_id = i;
if ((gpc_exception & crop_pending_masks[i]) == 0U) {
continue;
}
if (nvgpu_is_errata_present(g, NVGPU_ERRATA_3524791)) {
rop_id = gr_config_get_gpc_rop_logical_id_map(
config, gpc)[i];
nvgpu_assert(rop_id != UINT_MAX);
}
reg_offset = nvgpu_safe_add_u32(gpc_offset,
nvgpu_gr_rop_offset(g, i));
nvgpu_gr_rop_offset(g, rop_id));
reg_offset = nvgpu_safe_add_u32(
gr_gpc0_rop0_crop_hww_esr_r(),
reg_offset);
hww_esr = nvgpu_readl(g, reg_offset);
nvgpu_err(g, "gpc(%u) rop(%u) crop_hww_esr(0x%08x)", gpc, i,
nvgpu_err(g,
"gpc(%u) rop(%u) crop_hww_esr(0x%08x)", gpc, rop_id,
hww_esr);
nvgpu_writel(g, reg_offset,
gr_gpc0_rop0_crop_hww_esr_reset_active_f() |
@@ -964,6 +977,9 @@ void ga10b_gr_intr_handle_gpc_zrop_hww(struct gk20a *g, u32 gpc,
u32 num_zrop_pending_masks =
sizeof(zrop_pending_masks)/sizeof(*zrop_pending_masks);
u32 i = 0U;
struct nvgpu_gr *gr = nvgpu_gr_get_cur_instance_ptr(g);
struct nvgpu_gr_config *config = gr->config;
u32 rop_id;
if ((gpc_exception & (gr_gpc0_gpccs_gpc_exception_zrop0_pending_f() |
gr_gpc0_gpccs_gpc_exception_zrop1_pending_f())) == 0U) {
@@ -972,18 +988,24 @@ void ga10b_gr_intr_handle_gpc_zrop_hww(struct gk20a *g, u32 gpc,
gpc_offset = nvgpu_gr_gpc_offset(g, gpc);
for (i = 0U; i < num_zrop_pending_masks; i++) {
rop_id = i;
if ((gpc_exception & zrop_pending_masks[i]) == 0U) {
continue;
}
if (nvgpu_is_errata_present(g, NVGPU_ERRATA_3524791)) {
rop_id = gr_config_get_gpc_rop_logical_id_map(
config, gpc)[i];
nvgpu_assert(rop_id != UINT_MAX);
}
reg_offset = nvgpu_safe_add_u32(gpc_offset,
nvgpu_gr_rop_offset(g, i));
nvgpu_gr_rop_offset(g, rop_id));
reg_offset = nvgpu_safe_add_u32(
gr_gpc0_rop0_zrop_hww_esr_r(),
reg_offset);
hww_esr = nvgpu_readl(g, reg_offset);
nvgpu_err(g,
"gpc(%u) rop(%u) zrop_hww_esr(0x%08x)", gpc, i,
"gpc(%u) rop(%u) zrop_hww_esr(0x%08x)", gpc, rop_id,
hww_esr);
nvgpu_writel(g, reg_offset,
@@ -1006,27 +1028,35 @@ void ga10b_gr_intr_handle_gpc_rrh_hww(struct gk20a *g, u32 gpc,
u32 num_rrh_pending_masks =
sizeof(rrh_pending_masks)/sizeof(*rrh_pending_masks);
u32 i = 0U;
struct nvgpu_gr *gr = nvgpu_gr_get_cur_instance_ptr(g);
struct nvgpu_gr_config *config = gr->config;
u32 rop_id;
if ((gpc_exception & (gr_gpc0_gpccs_gpc_exception_rrh0_pending_f() |
gr_gpc0_gpccs_gpc_exception_rrh1_pending_f())) == 0U) {
return;
}
gpc_offset = nvgpu_gr_gpc_offset(g, gpc);
for (i = 0U; i < num_rrh_pending_masks; i++) {
rop_id = i;
if ((gpc_exception & rrh_pending_masks[i]) == 0U) {
continue;
}
if (nvgpu_is_errata_present(g, NVGPU_ERRATA_3524791)) {
rop_id = gr_config_get_gpc_rop_logical_id_map(
config, gpc)[i];
nvgpu_assert(rop_id != UINT_MAX);
}
reg_offset = nvgpu_safe_add_u32(gpc_offset,
nvgpu_gr_rop_offset(g, i));
nvgpu_gr_rop_offset(g, rop_id));
reg_offset = nvgpu_safe_add_u32(
gr_gpc0_rop0_rrh_status_r(),
reg_offset);
status = nvgpu_readl(g, reg_offset);
nvgpu_err(g, "gpc(%u) rop(%u) rrh exception status(0x%08x)",
gpc, i, status);
gpc, rop_id, status);
}
}

View File

@@ -26,12 +26,17 @@
#include <nvgpu/static_analysis.h>
#include <nvgpu/nvgpu_err.h>
#include <nvgpu/string.h>
#include <nvgpu/errata.h>
#include <nvgpu/gr/config.h>
#include <nvgpu/gr/config.h>
#include <nvgpu/gr/gr.h>
#include <nvgpu/gr/gr_instances.h>
#include <nvgpu/gr/gr_intr.h>
#include <nvgpu/gr/gr_falcon.h>
#include "common/gr/gr_priv.h"
#include "gr_intr_gp10b.h"
#include "gr_intr_gv11b.h"
@@ -615,6 +620,9 @@ void gv11b_gr_intr_handle_gpc_pes_exception(struct gk20a *g, u32 gpc,
u32 num_pes_pending_masks =
sizeof(pes_pending_masks)/sizeof(*pes_pending_masks);
u32 i = 0U;
struct nvgpu_gr *gr = nvgpu_gr_get_cur_instance_ptr(g);
struct nvgpu_gr_config *config = gr->config;
u32 pes_id;
if (((gpc_exception & gr_gpc0_gpccs_gpc_exception_pes0_m()) == 0U) &&
((gpc_exception & gr_gpc0_gpccs_gpc_exception_pes1_m())
@@ -623,13 +631,19 @@ void gv11b_gr_intr_handle_gpc_pes_exception(struct gk20a *g, u32 gpc,
}
for (i = 0U; i < num_pes_pending_masks; i++) {
pes_id = i;
if ((gpc_exception & pes_pending_masks[i]) == 0U) {
continue;
}
if (nvgpu_is_errata_present(g, NVGPU_ERRATA_3524791)) {
pes_id = gr_config_get_gpc_pes_logical_id_map(
config, gpc)[i];
nvgpu_assert(pes_id != UINT_MAX);
}
reg_offset = nvgpu_safe_add_u32(gr_gpc0_ppc0_pes_hww_esr_r(),
gpc_offset);
reg_offset = nvgpu_safe_add_u32(reg_offset, nvgpu_safe_mult_u32(
ppc_in_gpc_stride, i));
ppc_in_gpc_stride, pes_id));
hww_esr = nvgpu_readl(g, reg_offset);
nvgpu_report_err_to_sdl(g, NVGPU_ERR_MODULE_PGRAPH,

View File

@@ -1861,6 +1861,7 @@ int ga10b_init_hal(struct gk20a *g)
nvgpu_set_errata(g, NVGPU_ERRATA_3288192, true);
nvgpu_set_errata(g, NVGPU_ERRATA_SYNCPT_INVALID_ID_0, true);
nvgpu_set_errata(g, NVGPU_ERRATA_2557724, true);
nvgpu_set_errata(g, NVGPU_ERRATA_3524791, true);
nvgpu_set_enabled(g, NVGPU_GR_USE_DMA_FOR_FW_BOOTSTRAP, false);

View File

@@ -1594,6 +1594,7 @@ int gv11b_init_hal(struct gk20a *g)
nvgpu_set_errata(g, NVGPU_ERRATA_2016608, true);
nvgpu_set_errata(g, NVGPU_ERRATA_200391931, true);
nvgpu_set_errata(g, NVGPU_ERRATA_SYNCPT_INVALID_ID_0, true);
nvgpu_set_errata(g, NVGPU_ERRATA_3524791, true);
nvgpu_set_enabled(g, NVGPU_GR_USE_DMA_FOR_FW_BOOTSTRAP, false);

View File

@@ -1782,6 +1782,7 @@ int tu104_init_hal(struct gk20a *g)
nvgpu_set_errata(g, NVGPU_ERRATA_VBIOS_NVLINK_MASK, true);
nvgpu_set_errata(g, NVGPU_ERRATA_200391931, true);
nvgpu_set_errata(g, NVGPU_ERRATA_SYNCPT_INVALID_ID_0, true);
nvgpu_set_errata(g, NVGPU_ERRATA_3524791, true);
nvgpu_set_enabled(g, NVGPU_SEC_PRIVSECURITY, true);
nvgpu_set_enabled(g, NVGPU_SEC_SECUREGPCCS, true);

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -52,6 +52,8 @@ struct gk20a;
DEFINE_ERRATA(NVGPU_ERRATA_200391931, "GP10B", "GR Perf"), \
/* GV11B */ \
DEFINE_ERRATA(NVGPU_ERRATA_2016608, "GV11B", "FIFO Runlist preempt"), \
DEFINE_ERRATA(NVGPU_ERRATA_3524791, "GV11B", \
"Non Virtualized GPC Exceptions"), \
/* GV100 */ \
DEFINE_ERRATA(NVGPU_ERRATA_1888034, "GV100", "Nvlink"), \
/* TU104 */ \