mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-23 18:16:01 +03:00
gpu: nvgpu: move non-safe functions from fusa hal to non-fusa hal
Multiple non-safe functions under NVGPU_DEBUGGER, NVGPU_CILP and other config flags were moved to fusa files. Although they are guarded by the C flags, it makes sense to keep those functions in non-fusa files. Make this change for all hals. JIRA NVGPU-3853 Change-Id: I8151b55a60cb50c5058af48bab9e8068f929ac3b Signed-off-by: Sagar Kamble <skamble@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2204352 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
Alex Waterman
parent
e5259f5819
commit
ec293030c1
@@ -364,12 +364,15 @@ mm:
|
|||||||
safe: no
|
safe: no
|
||||||
sources: [ hal/mm/gmmu/gmmu_gk20a.c,
|
sources: [ hal/mm/gmmu/gmmu_gk20a.c,
|
||||||
hal/mm/gmmu/gmmu_gm20b.c]
|
hal/mm/gmmu/gmmu_gm20b.c]
|
||||||
cache:
|
cache_fusa:
|
||||||
safe: yes
|
safe: yes
|
||||||
sources: [ hal/mm/cache/flush_gk20a_fusa.c,
|
sources: [ hal/mm/cache/flush_gk20a_fusa.c,
|
||||||
hal/mm/cache/flush_gk20a.h,
|
hal/mm/cache/flush_gk20a.h,
|
||||||
hal/mm/cache/flush_gv11b_fusa.c,
|
hal/mm/cache/flush_gv11b_fusa.c,
|
||||||
hal/mm/cache/flush_gv11b.h ]
|
hal/mm/cache/flush_gv11b.h ]
|
||||||
|
cache:
|
||||||
|
safe: no
|
||||||
|
sources: [ hal/mm/cache/flush_gk20a.c ]
|
||||||
mmu_fault:
|
mmu_fault:
|
||||||
safe: yes
|
safe: yes
|
||||||
sources: [ hal/mm/mmu_fault/mmu_fault_gv11b_fusa.c,
|
sources: [ hal/mm/mmu_fault/mmu_fault_gv11b_fusa.c,
|
||||||
@@ -459,7 +462,8 @@ gr:
|
|||||||
hal/gr/ecc/ecc_gv11b.h ]
|
hal/gr/ecc/ecc_gv11b.h ]
|
||||||
ecc:
|
ecc:
|
||||||
safe: no
|
safe: no
|
||||||
sources: [hal/gr/ecc/ecc_gp10b.c,
|
sources: [hal/gr/ecc/ecc_gv11b.c,
|
||||||
|
hal/gr/ecc/ecc_gp10b.c,
|
||||||
hal/gr/ecc/ecc_tu104.c,
|
hal/gr/ecc/ecc_tu104.c,
|
||||||
hal/gr/ecc/ecc_gp10b.h,
|
hal/gr/ecc/ecc_gp10b.h,
|
||||||
hal/gr/ecc/ecc_tu104.h ]
|
hal/gr/ecc/ecc_tu104.h ]
|
||||||
@@ -634,7 +638,8 @@ fb:
|
|||||||
hal/fb/fb_tu104.c, hal/fb/fb_tu104.h,
|
hal/fb/fb_tu104.c, hal/fb/fb_tu104.h,
|
||||||
hal/fb/intr/fb_intr_gv100.h, hal/fb/intr/fb_intr_gv100.c,
|
hal/fb/intr/fb_intr_gv100.h, hal/fb/intr/fb_intr_gv100.c,
|
||||||
hal/fb/fb_mmu_fault_tu104.h, hal/fb/fb_mmu_fault_tu104.c,
|
hal/fb/fb_mmu_fault_tu104.h, hal/fb/fb_mmu_fault_tu104.c,
|
||||||
hal/fb/intr/fb_intr_tu104.c, hal/fb/intr/fb_intr_tu104.h ]
|
hal/fb/intr/fb_intr_tu104.c, hal/fb/intr/fb_intr_tu104.h,
|
||||||
|
hal/fb/intr/fb_intr_ecc_gv11b.c ]
|
||||||
|
|
||||||
pmu_fusa:
|
pmu_fusa:
|
||||||
safe: yes
|
safe: yes
|
||||||
|
|||||||
@@ -209,6 +209,7 @@ nvgpu-y += \
|
|||||||
hal/clk/clk_gm20b.o \
|
hal/clk/clk_gm20b.o \
|
||||||
hal/clk/clk_tu104.o \
|
hal/clk/clk_tu104.o \
|
||||||
hal/gr/ecc/ecc_gp10b.o \
|
hal/gr/ecc/ecc_gp10b.o \
|
||||||
|
hal/gr/ecc/ecc_gv11b.o \
|
||||||
hal/gr/ecc/ecc_tu104.o \
|
hal/gr/ecc/ecc_tu104.o \
|
||||||
hal/gr/zcull/zcull_gm20b.o \
|
hal/gr/zcull/zcull_gm20b.o \
|
||||||
hal/gr/zcull/zcull_gv11b.o \
|
hal/gr/zcull/zcull_gv11b.o \
|
||||||
@@ -257,6 +258,7 @@ nvgpu-y += \
|
|||||||
hal/fb/fb_gv100.o \
|
hal/fb/fb_gv100.o \
|
||||||
hal/fb/fb_tu104.o \
|
hal/fb/fb_tu104.o \
|
||||||
hal/fb/fb_mmu_fault_tu104.o \
|
hal/fb/fb_mmu_fault_tu104.o \
|
||||||
|
hal/fb/intr/fb_intr_ecc_gv11b.o \
|
||||||
hal/fb/intr/fb_intr_gv100.o \
|
hal/fb/intr/fb_intr_gv100.o \
|
||||||
hal/fb/intr/fb_intr_tu104.o \
|
hal/fb/intr/fb_intr_tu104.o \
|
||||||
hal/fuse/fuse_gm20b.o \
|
hal/fuse/fuse_gm20b.o \
|
||||||
@@ -685,6 +687,7 @@ nvgpu-$(CONFIG_NVGPU_HAL_NON_FUSA) += \
|
|||||||
hal/ltc/ltc_gm20b.o \
|
hal/ltc/ltc_gm20b.o \
|
||||||
hal/ltc/ltc_gm20b_dbg.o \
|
hal/ltc/ltc_gm20b_dbg.o \
|
||||||
hal/mc/mc_gm20b.o \
|
hal/mc/mc_gm20b.o \
|
||||||
|
hal/mm/cache/flush_gk20a.o \
|
||||||
hal/mm/mm_gm20b.o \
|
hal/mm/mm_gm20b.o \
|
||||||
hal/mm/mm_gk20a.o \
|
hal/mm/mm_gk20a.o \
|
||||||
hal/mm/mm_gv100.o \
|
hal/mm/mm_gv100.o \
|
||||||
|
|||||||
@@ -248,6 +248,7 @@ srcs += hal/init/hal_gp10b.c \
|
|||||||
hal/init/hal_gp10b_litter.c \
|
hal/init/hal_gp10b_litter.c \
|
||||||
hal/init/hal_gm20b.c \
|
hal/init/hal_gm20b.c \
|
||||||
hal/init/hal_gm20b_litter.c \
|
hal/init/hal_gm20b_litter.c \
|
||||||
|
hal/mm/cache/flush_gk20a.c \
|
||||||
hal/mm/mm_gm20b.c \
|
hal/mm/mm_gm20b.c \
|
||||||
hal/mm/mm_gk20a.c \
|
hal/mm/mm_gk20a.c \
|
||||||
hal/mm/gmmu/gmmu_gk20a.c \
|
hal/mm/gmmu/gmmu_gk20a.c \
|
||||||
@@ -260,6 +261,7 @@ srcs += hal/init/hal_gp10b.c \
|
|||||||
hal/falcon/falcon_gk20a.c \
|
hal/falcon/falcon_gk20a.c \
|
||||||
hal/gr/config/gr_config_gm20b.c \
|
hal/gr/config/gr_config_gm20b.c \
|
||||||
hal/gr/ecc/ecc_gp10b.c \
|
hal/gr/ecc/ecc_gp10b.c \
|
||||||
|
hal/gr/ecc/ecc_gv11b.c \
|
||||||
hal/gr/init/gr_init_gm20b.c \
|
hal/gr/init/gr_init_gm20b.c \
|
||||||
hal/gr/init/gr_init_gp10b.c \
|
hal/gr/init/gr_init_gp10b.c \
|
||||||
hal/gr/init/gr_init_gv11b.c \
|
hal/gr/init/gr_init_gv11b.c \
|
||||||
@@ -281,6 +283,7 @@ srcs += hal/init/hal_gp10b.c \
|
|||||||
hal/fb/fb_gp106.c \
|
hal/fb/fb_gp106.c \
|
||||||
hal/fb/fb_gm20b.c \
|
hal/fb/fb_gm20b.c \
|
||||||
hal/fb/fb_gv11b.c \
|
hal/fb/fb_gv11b.c \
|
||||||
|
hal/fb/intr/fb_intr_ecc_gv11b.c \
|
||||||
hal/fuse/fuse_gm20b.c \
|
hal/fuse/fuse_gm20b.c \
|
||||||
hal/fifo/fifo_gk20a.c \
|
hal/fifo/fifo_gk20a.c \
|
||||||
hal/fifo/preempt_gk20a.c \
|
hal/fifo/preempt_gk20a.c \
|
||||||
|
|||||||
@@ -91,3 +91,37 @@ u64 gm20b_fb_compression_align_mask(struct gk20a *g)
|
|||||||
return SZ_64K - 1UL;
|
return SZ_64K - 1UL;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_NVGPU_DEBUGGER
|
||||||
|
bool gm20b_fb_debug_mode_enabled(struct gk20a *g)
|
||||||
|
{
|
||||||
|
u32 debug_ctrl = gk20a_readl(g, fb_mmu_debug_ctrl_r());
|
||||||
|
|
||||||
|
return fb_mmu_debug_ctrl_debug_v(debug_ctrl) ==
|
||||||
|
fb_mmu_debug_ctrl_debug_enabled_v();
|
||||||
|
}
|
||||||
|
|
||||||
|
void gm20b_fb_set_mmu_debug_mode(struct gk20a *g, bool enable)
|
||||||
|
{
|
||||||
|
u32 reg_val, fb_debug_ctrl;
|
||||||
|
|
||||||
|
if (enable) {
|
||||||
|
fb_debug_ctrl = fb_mmu_debug_ctrl_debug_enabled_f();
|
||||||
|
g->mmu_debug_ctrl = true;
|
||||||
|
} else {
|
||||||
|
fb_debug_ctrl = fb_mmu_debug_ctrl_debug_disabled_f();
|
||||||
|
g->mmu_debug_ctrl = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
reg_val = nvgpu_readl(g, fb_mmu_debug_ctrl_r());
|
||||||
|
reg_val = set_field(reg_val,
|
||||||
|
fb_mmu_debug_ctrl_debug_m(), fb_debug_ctrl);
|
||||||
|
nvgpu_writel(g, fb_mmu_debug_ctrl_r(), reg_val);
|
||||||
|
}
|
||||||
|
|
||||||
|
void gm20b_fb_set_debug_mode(struct gk20a *g, bool enable)
|
||||||
|
{
|
||||||
|
gm20b_fb_set_mmu_debug_mode(g, enable);
|
||||||
|
g->ops.gr.set_debug_mode(g, enable);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|||||||
@@ -42,39 +42,6 @@
|
|||||||
#define VPR_INFO_FETCH_WAIT (5)
|
#define VPR_INFO_FETCH_WAIT (5)
|
||||||
#define WPR_INFO_ADDR_ALIGNMENT 0x0000000c
|
#define WPR_INFO_ADDR_ALIGNMENT 0x0000000c
|
||||||
|
|
||||||
#ifdef CONFIG_NVGPU_DEBUGGER
|
|
||||||
bool gm20b_fb_debug_mode_enabled(struct gk20a *g)
|
|
||||||
{
|
|
||||||
u32 debug_ctrl = gk20a_readl(g, fb_mmu_debug_ctrl_r());
|
|
||||||
return fb_mmu_debug_ctrl_debug_v(debug_ctrl) ==
|
|
||||||
fb_mmu_debug_ctrl_debug_enabled_v();
|
|
||||||
}
|
|
||||||
|
|
||||||
void gm20b_fb_set_mmu_debug_mode(struct gk20a *g, bool enable)
|
|
||||||
{
|
|
||||||
u32 reg_val, fb_debug_ctrl;
|
|
||||||
|
|
||||||
if (enable) {
|
|
||||||
fb_debug_ctrl = fb_mmu_debug_ctrl_debug_enabled_f();
|
|
||||||
g->mmu_debug_ctrl = true;
|
|
||||||
} else {
|
|
||||||
fb_debug_ctrl = fb_mmu_debug_ctrl_debug_disabled_f();
|
|
||||||
g->mmu_debug_ctrl = false;
|
|
||||||
}
|
|
||||||
|
|
||||||
reg_val = nvgpu_readl(g, fb_mmu_debug_ctrl_r());
|
|
||||||
reg_val = set_field(reg_val,
|
|
||||||
fb_mmu_debug_ctrl_debug_m(), fb_debug_ctrl);
|
|
||||||
nvgpu_writel(g, fb_mmu_debug_ctrl_r(), reg_val);
|
|
||||||
}
|
|
||||||
|
|
||||||
void gm20b_fb_set_debug_mode(struct gk20a *g, bool enable)
|
|
||||||
{
|
|
||||||
gm20b_fb_set_mmu_debug_mode(g, enable);
|
|
||||||
g->ops.gr.set_debug_mode(g, enable);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
void gm20b_fb_init_hw(struct gk20a *g)
|
void gm20b_fb_init_hw(struct gk20a *g)
|
||||||
{
|
{
|
||||||
u64 addr = nvgpu_mem_get_addr(g, &g->mm.sysmem_flush) >> 8;
|
u64 addr = nvgpu_mem_get_addr(g, &g->mm.sysmem_flush) >> 8;
|
||||||
|
|||||||
102
drivers/gpu/nvgpu/hal/fb/intr/fb_intr_ecc_gv11b.c
Normal file
102
drivers/gpu/nvgpu/hal/fb/intr/fb_intr_ecc_gv11b.c
Normal file
@@ -0,0 +1,102 @@
|
|||||||
|
/*
|
||||||
|
* GV11B ECC INTR
|
||||||
|
*
|
||||||
|
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
|
||||||
|
*
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
|
* to deal in the Software without restriction, including without limitation
|
||||||
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||||
|
* and/or sell copies of the Software, and to permit persons to whom the
|
||||||
|
* Software is furnished to do so, subject to the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice shall be included in
|
||||||
|
* all copies or substantial portions of the Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||||
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||||
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||||
|
* DEALINGS IN THE SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <nvgpu/log.h>
|
||||||
|
#include <nvgpu/io.h>
|
||||||
|
#include <nvgpu/gk20a.h>
|
||||||
|
#include <nvgpu/nvgpu_err.h>
|
||||||
|
|
||||||
|
#include "fb_intr_ecc_gv11b.h"
|
||||||
|
|
||||||
|
#include <nvgpu/hw/gv11b/hw_fb_gv11b.h>
|
||||||
|
|
||||||
|
#ifdef CONFIG_NVGPU_INJECT_HWERR
|
||||||
|
void gv11b_fb_intr_inject_hubmmu_ecc_error(struct gk20a *g,
|
||||||
|
struct nvgpu_hw_err_inject_info *err,
|
||||||
|
u32 error_info)
|
||||||
|
{
|
||||||
|
unsigned int reg_addr = err->get_reg_addr();
|
||||||
|
|
||||||
|
nvgpu_info(g, "Injecting HUBMMU fault %s", err->name);
|
||||||
|
nvgpu_writel(g, reg_addr, err->get_reg_val(1U));
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline u32 l2tlb_ecc_control_r(void)
|
||||||
|
{
|
||||||
|
return fb_mmu_l2tlb_ecc_control_r();
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline u32 l2tlb_ecc_control_inject_uncorrected_err_f(u32 v)
|
||||||
|
{
|
||||||
|
return fb_mmu_l2tlb_ecc_control_inject_uncorrected_err_f(v);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline u32 hubtlb_ecc_control_r(void)
|
||||||
|
{
|
||||||
|
return fb_mmu_hubtlb_ecc_control_r();
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline u32 hubtlb_ecc_control_inject_uncorrected_err_f(u32 v)
|
||||||
|
{
|
||||||
|
return fb_mmu_hubtlb_ecc_control_inject_uncorrected_err_f(v);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline u32 fillunit_ecc_control_r(void)
|
||||||
|
{
|
||||||
|
return fb_mmu_fillunit_ecc_control_r();
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline u32 fillunit_ecc_control_inject_uncorrected_err_f(u32 v)
|
||||||
|
{
|
||||||
|
return fb_mmu_fillunit_ecc_control_inject_uncorrected_err_f(v);
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct nvgpu_hw_err_inject_info hubmmu_ecc_err_desc[] = {
|
||||||
|
NVGPU_ECC_ERR("hubmmu_l2tlb_sa_data_ecc_uncorrected",
|
||||||
|
gv11b_fb_intr_inject_hubmmu_ecc_error,
|
||||||
|
l2tlb_ecc_control_r,
|
||||||
|
l2tlb_ecc_control_inject_uncorrected_err_f),
|
||||||
|
NVGPU_ECC_ERR("hubmmu_tlb_sa_data_ecc_uncorrected",
|
||||||
|
gv11b_fb_intr_inject_hubmmu_ecc_error,
|
||||||
|
hubtlb_ecc_control_r,
|
||||||
|
hubtlb_ecc_control_inject_uncorrected_err_f),
|
||||||
|
NVGPU_ECC_ERR("hubmmu_pte_data_ecc_uncorrected",
|
||||||
|
gv11b_fb_intr_inject_hubmmu_ecc_error,
|
||||||
|
fillunit_ecc_control_r,
|
||||||
|
fillunit_ecc_control_inject_uncorrected_err_f),
|
||||||
|
};
|
||||||
|
|
||||||
|
static struct nvgpu_hw_err_inject_info_desc hubmmu_err_desc;
|
||||||
|
|
||||||
|
struct nvgpu_hw_err_inject_info_desc *
|
||||||
|
gv11b_fb_intr_get_hubmmu_err_desc(struct gk20a *g)
|
||||||
|
{
|
||||||
|
hubmmu_err_desc.info_ptr = hubmmu_ecc_err_desc;
|
||||||
|
hubmmu_err_desc.info_size = nvgpu_safe_cast_u64_to_u32(
|
||||||
|
sizeof(hubmmu_ecc_err_desc) /
|
||||||
|
sizeof(struct nvgpu_hw_err_inject_info));
|
||||||
|
|
||||||
|
return &hubmmu_err_desc;
|
||||||
|
}
|
||||||
|
#endif /* CONFIG_NVGPU_INJECT_HWERR */
|
||||||
@@ -31,76 +31,6 @@
|
|||||||
|
|
||||||
#include <nvgpu/hw/gv11b/hw_fb_gv11b.h>
|
#include <nvgpu/hw/gv11b/hw_fb_gv11b.h>
|
||||||
|
|
||||||
#ifdef CONFIG_NVGPU_INJECT_HWERR
|
|
||||||
void gv11b_fb_intr_inject_hubmmu_ecc_error(struct gk20a *g,
|
|
||||||
struct nvgpu_hw_err_inject_info *err,
|
|
||||||
u32 error_info)
|
|
||||||
{
|
|
||||||
unsigned int reg_addr = err->get_reg_addr();
|
|
||||||
|
|
||||||
nvgpu_info(g, "Injecting HUBMMU fault %s", err->name);
|
|
||||||
nvgpu_writel(g, reg_addr, err->get_reg_val(1U));
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline u32 l2tlb_ecc_control_r(void)
|
|
||||||
{
|
|
||||||
return fb_mmu_l2tlb_ecc_control_r();
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline u32 l2tlb_ecc_control_inject_uncorrected_err_f(u32 v)
|
|
||||||
{
|
|
||||||
return fb_mmu_l2tlb_ecc_control_inject_uncorrected_err_f(v);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline u32 hubtlb_ecc_control_r(void)
|
|
||||||
{
|
|
||||||
return fb_mmu_hubtlb_ecc_control_r();
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline u32 hubtlb_ecc_control_inject_uncorrected_err_f(u32 v)
|
|
||||||
{
|
|
||||||
return fb_mmu_hubtlb_ecc_control_inject_uncorrected_err_f(v);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline u32 fillunit_ecc_control_r(void)
|
|
||||||
{
|
|
||||||
return fb_mmu_fillunit_ecc_control_r();
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline u32 fillunit_ecc_control_inject_uncorrected_err_f(u32 v)
|
|
||||||
{
|
|
||||||
return fb_mmu_fillunit_ecc_control_inject_uncorrected_err_f(v);
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct nvgpu_hw_err_inject_info hubmmu_ecc_err_desc[] = {
|
|
||||||
NVGPU_ECC_ERR("hubmmu_l2tlb_sa_data_ecc_uncorrected",
|
|
||||||
gv11b_fb_intr_inject_hubmmu_ecc_error,
|
|
||||||
l2tlb_ecc_control_r,
|
|
||||||
l2tlb_ecc_control_inject_uncorrected_err_f),
|
|
||||||
NVGPU_ECC_ERR("hubmmu_tlb_sa_data_ecc_uncorrected",
|
|
||||||
gv11b_fb_intr_inject_hubmmu_ecc_error,
|
|
||||||
hubtlb_ecc_control_r,
|
|
||||||
hubtlb_ecc_control_inject_uncorrected_err_f),
|
|
||||||
NVGPU_ECC_ERR("hubmmu_pte_data_ecc_uncorrected",
|
|
||||||
gv11b_fb_intr_inject_hubmmu_ecc_error,
|
|
||||||
fillunit_ecc_control_r,
|
|
||||||
fillunit_ecc_control_inject_uncorrected_err_f),
|
|
||||||
};
|
|
||||||
|
|
||||||
static struct nvgpu_hw_err_inject_info_desc hubmmu_err_desc;
|
|
||||||
|
|
||||||
struct nvgpu_hw_err_inject_info_desc *
|
|
||||||
gv11b_fb_intr_get_hubmmu_err_desc(struct gk20a *g)
|
|
||||||
{
|
|
||||||
hubmmu_err_desc.info_ptr = hubmmu_ecc_err_desc;
|
|
||||||
hubmmu_err_desc.info_size = nvgpu_safe_cast_u64_to_u32(
|
|
||||||
sizeof(hubmmu_ecc_err_desc) /
|
|
||||||
sizeof(struct nvgpu_hw_err_inject_info));
|
|
||||||
|
|
||||||
return &hubmmu_err_desc;
|
|
||||||
}
|
|
||||||
#endif /* CONFIG_NVGPU_INJECT_HWERR */
|
|
||||||
|
|
||||||
static void gv11b_fb_intr_handle_ecc_l2tlb(struct gk20a *g, u32 ecc_status)
|
static void gv11b_fb_intr_handle_ecc_l2tlb(struct gk20a *g, u32 ecc_status)
|
||||||
{
|
{
|
||||||
u32 ecc_addr, corrected_cnt, uncorrected_cnt;
|
u32 ecc_addr, corrected_cnt, uncorrected_cnt;
|
||||||
|
|||||||
@@ -46,3 +46,82 @@ u32 gk20a_runlist_count_max(void)
|
|||||||
{
|
{
|
||||||
return fifo_eng_runlist_base__size_1_v();
|
return fifo_eng_runlist_base__size_1_v();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_NVGPU_CHANNEL_TSG_SCHEDULING
|
||||||
|
/* trigger host preempt of GR pending load ctx if that ctx is not for ch */
|
||||||
|
int gk20a_fifo_reschedule_preempt_next(struct nvgpu_channel *ch,
|
||||||
|
bool wait_preempt)
|
||||||
|
{
|
||||||
|
struct gk20a *g = ch->g;
|
||||||
|
struct nvgpu_runlist_info *runlist =
|
||||||
|
g->fifo.runlist_info[ch->runlist_id];
|
||||||
|
int ret = 0;
|
||||||
|
u32 gr_eng_id = 0;
|
||||||
|
u32 fecsstat0 = 0, fecsstat1 = 0;
|
||||||
|
u32 preempt_id;
|
||||||
|
u32 preempt_type = 0;
|
||||||
|
struct nvgpu_engine_status_info engine_status;
|
||||||
|
|
||||||
|
if (nvgpu_engine_get_ids(
|
||||||
|
g, &gr_eng_id, 1, NVGPU_ENGINE_GR) != 1U) {
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
if ((runlist->eng_bitmask & BIT32(gr_eng_id)) == 0U) {
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (wait_preempt) {
|
||||||
|
u32 val = nvgpu_readl(g, fifo_preempt_r());
|
||||||
|
|
||||||
|
if ((val & fifo_preempt_pending_true_f()) != 0U) {
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fecsstat0 = g->ops.gr.falcon.read_fecs_ctxsw_mailbox(g,
|
||||||
|
NVGPU_GR_FALCON_FECS_CTXSW_MAILBOX0);
|
||||||
|
g->ops.engine_status.read_engine_status_info(g, gr_eng_id,
|
||||||
|
&engine_status);
|
||||||
|
if (nvgpu_engine_status_is_ctxsw_switch(&engine_status)) {
|
||||||
|
nvgpu_engine_status_get_next_ctx_id_type(&engine_status,
|
||||||
|
&preempt_id, &preempt_type);
|
||||||
|
} else {
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
if ((preempt_id == ch->tsgid) && (preempt_type != 0U)) {
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
fecsstat1 = g->ops.gr.falcon.read_fecs_ctxsw_mailbox(g,
|
||||||
|
NVGPU_GR_FALCON_FECS_CTXSW_MAILBOX0);
|
||||||
|
if (fecsstat0 != FECS_MAILBOX_0_ACK_RESTORE ||
|
||||||
|
fecsstat1 != FECS_MAILBOX_0_ACK_RESTORE) {
|
||||||
|
/* preempt useless if FECS acked save and started restore */
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
g->ops.fifo.preempt_trigger(g, preempt_id, preempt_type != 0U);
|
||||||
|
#ifdef TRACEPOINTS_ENABLED
|
||||||
|
trace_gk20a_reschedule_preempt_next(ch->chid, fecsstat0,
|
||||||
|
engine_status.reg_data, fecsstat1,
|
||||||
|
g->ops.gr.falcon.read_fecs_ctxsw_mailbox(g,
|
||||||
|
NVGPU_GR_FALCON_FECS_CTXSW_MAILBOX0),
|
||||||
|
nvgpu_readl(g, fifo_preempt_r()));
|
||||||
|
#endif
|
||||||
|
if (wait_preempt) {
|
||||||
|
if (g->ops.fifo.is_preempt_pending(g, preempt_id,
|
||||||
|
preempt_type) != 0) {
|
||||||
|
nvgpu_err(g, "fifo preempt timed out");
|
||||||
|
/*
|
||||||
|
* This function does not care if preempt
|
||||||
|
* times out since it is here only to improve
|
||||||
|
* latency. If a timeout happens, it will be
|
||||||
|
* handled by other fifo handling code.
|
||||||
|
*/
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#ifdef TRACEPOINTS_ENABLED
|
||||||
|
trace_gk20a_reschedule_preempted_next(ch->chid);
|
||||||
|
#endif
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|||||||
@@ -125,82 +125,3 @@ void gk20a_runlist_write_state(struct gk20a *g, u32 runlists_mask,
|
|||||||
|
|
||||||
nvgpu_writel(g, fifo_sched_disable_r(), reg_val);
|
nvgpu_writel(g, fifo_sched_disable_r(), reg_val);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_NVGPU_CHANNEL_TSG_SCHEDULING
|
|
||||||
/* trigger host preempt of GR pending load ctx if that ctx is not for ch */
|
|
||||||
int gk20a_fifo_reschedule_preempt_next(struct nvgpu_channel *ch,
|
|
||||||
bool wait_preempt)
|
|
||||||
{
|
|
||||||
struct gk20a *g = ch->g;
|
|
||||||
struct nvgpu_runlist_info *runlist =
|
|
||||||
g->fifo.runlist_info[ch->runlist_id];
|
|
||||||
int ret = 0;
|
|
||||||
u32 gr_eng_id = 0;
|
|
||||||
u32 fecsstat0 = 0, fecsstat1 = 0;
|
|
||||||
u32 preempt_id;
|
|
||||||
u32 preempt_type = 0;
|
|
||||||
struct nvgpu_engine_status_info engine_status;
|
|
||||||
|
|
||||||
if (1U != nvgpu_engine_get_ids(
|
|
||||||
g, &gr_eng_id, 1, NVGPU_ENGINE_GR)) {
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
if ((runlist->eng_bitmask & BIT32(gr_eng_id)) == 0U) {
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (wait_preempt) {
|
|
||||||
u32 val = nvgpu_readl(g, fifo_preempt_r());
|
|
||||||
|
|
||||||
if ((val & fifo_preempt_pending_true_f()) != 0U) {
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fecsstat0 = g->ops.gr.falcon.read_fecs_ctxsw_mailbox(g,
|
|
||||||
NVGPU_GR_FALCON_FECS_CTXSW_MAILBOX0);
|
|
||||||
g->ops.engine_status.read_engine_status_info(g, gr_eng_id,
|
|
||||||
&engine_status);
|
|
||||||
if (nvgpu_engine_status_is_ctxsw_switch(&engine_status)) {
|
|
||||||
nvgpu_engine_status_get_next_ctx_id_type(&engine_status,
|
|
||||||
&preempt_id, &preempt_type);
|
|
||||||
} else {
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
if ((preempt_id == ch->tsgid) && (preempt_type != 0U)) {
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
fecsstat1 = g->ops.gr.falcon.read_fecs_ctxsw_mailbox(g,
|
|
||||||
NVGPU_GR_FALCON_FECS_CTXSW_MAILBOX0);
|
|
||||||
if (fecsstat0 != FECS_MAILBOX_0_ACK_RESTORE ||
|
|
||||||
fecsstat1 != FECS_MAILBOX_0_ACK_RESTORE) {
|
|
||||||
/* preempt useless if FECS acked save and started restore */
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
g->ops.fifo.preempt_trigger(g, preempt_id, preempt_type != 0U);
|
|
||||||
#ifdef TRACEPOINTS_ENABLED
|
|
||||||
trace_gk20a_reschedule_preempt_next(ch->chid, fecsstat0,
|
|
||||||
engine_status.reg_data, fecsstat1,
|
|
||||||
g->ops.gr.falcon.read_fecs_ctxsw_mailbox(g,
|
|
||||||
NVGPU_GR_FALCON_FECS_CTXSW_MAILBOX0),
|
|
||||||
nvgpu_readl(g, fifo_preempt_r()));
|
|
||||||
#endif
|
|
||||||
if (wait_preempt) {
|
|
||||||
if (g->ops.fifo.is_preempt_pending(g, preempt_id,
|
|
||||||
preempt_type) != 0) {
|
|
||||||
nvgpu_err(g, "fifo preempt timed out");
|
|
||||||
/*
|
|
||||||
* This function does not care if preempt
|
|
||||||
* times out since it is here only to improve
|
|
||||||
* latency. If a timeout happens, it will be
|
|
||||||
* handled by other fifo handling code.
|
|
||||||
*/
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#ifdef TRACEPOINTS_ENABLED
|
|
||||||
trace_gk20a_reschedule_preempted_next(ch->chid);
|
|
||||||
#endif
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|||||||
@@ -46,3 +46,70 @@ void gp10b_ctxsw_prog_set_full_preemption_ptr(struct gk20a *g,
|
|||||||
ctxsw_prog_main_image_full_preemption_ptr_o(), u64_lo32(addr));
|
ctxsw_prog_main_image_full_preemption_ptr_o(), u64_lo32(addr));
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_NVGPU_GRAPHICS */
|
#endif /* CONFIG_NVGPU_GRAPHICS */
|
||||||
|
|
||||||
|
#ifdef CONFIG_NVGPU_CILP
|
||||||
|
void gp10b_ctxsw_prog_set_compute_preemption_mode_cilp(struct gk20a *g,
|
||||||
|
struct nvgpu_mem *ctx_mem)
|
||||||
|
{
|
||||||
|
nvgpu_mem_wr(g, ctx_mem,
|
||||||
|
ctxsw_prog_main_image_compute_preemption_options_o(),
|
||||||
|
ctxsw_prog_main_image_compute_preemption_options_control_cilp_f());
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_NVGPU_DEBUGGER
|
||||||
|
void gp10b_ctxsw_prog_set_pmu_options_boost_clock_frequencies(struct gk20a *g,
|
||||||
|
struct nvgpu_mem *ctx_mem, u32 boosted_ctx)
|
||||||
|
{
|
||||||
|
u32 data = ctxsw_prog_main_image_pmu_options_boost_clock_frequencies_f(boosted_ctx);
|
||||||
|
|
||||||
|
nvgpu_mem_wr(g, ctx_mem, ctxsw_prog_main_image_pmu_options_o(), data);
|
||||||
|
}
|
||||||
|
#endif /* CONFIG_NVGPU_DEBUGGER */
|
||||||
|
|
||||||
|
#ifdef CONFIG_DEBUG_FS
|
||||||
|
void gp10b_ctxsw_prog_dump_ctxsw_stats(struct gk20a *g,
|
||||||
|
struct nvgpu_mem *ctx_mem)
|
||||||
|
{
|
||||||
|
nvgpu_err(g, "ctxsw_prog_main_image_magic_value_o : %x (expect %x)",
|
||||||
|
nvgpu_mem_rd(g, ctx_mem,
|
||||||
|
ctxsw_prog_main_image_magic_value_o()),
|
||||||
|
ctxsw_prog_main_image_magic_value_v_value_v());
|
||||||
|
|
||||||
|
nvgpu_err(g, "ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi : %x",
|
||||||
|
nvgpu_mem_rd(g, ctx_mem,
|
||||||
|
ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi_o()));
|
||||||
|
|
||||||
|
nvgpu_err(g, "ctxsw_prog_main_image_context_timestamp_buffer_ptr : %x",
|
||||||
|
nvgpu_mem_rd(g, ctx_mem,
|
||||||
|
ctxsw_prog_main_image_context_timestamp_buffer_ptr_o()));
|
||||||
|
|
||||||
|
nvgpu_err(g, "ctxsw_prog_main_image_context_timestamp_buffer_control : %x",
|
||||||
|
nvgpu_mem_rd(g, ctx_mem,
|
||||||
|
ctxsw_prog_main_image_context_timestamp_buffer_control_o()));
|
||||||
|
|
||||||
|
nvgpu_err(g, "NUM_SAVE_OPERATIONS : %d",
|
||||||
|
nvgpu_mem_rd(g, ctx_mem,
|
||||||
|
ctxsw_prog_main_image_num_save_ops_o()));
|
||||||
|
nvgpu_err(g, "WFI_SAVE_OPERATIONS : %d",
|
||||||
|
nvgpu_mem_rd(g, ctx_mem,
|
||||||
|
ctxsw_prog_main_image_num_wfi_save_ops_o()));
|
||||||
|
nvgpu_err(g, "CTA_SAVE_OPERATIONS : %d",
|
||||||
|
nvgpu_mem_rd(g, ctx_mem,
|
||||||
|
ctxsw_prog_main_image_num_cta_save_ops_o()));
|
||||||
|
nvgpu_err(g, "GFXP_SAVE_OPERATIONS : %d",
|
||||||
|
nvgpu_mem_rd(g, ctx_mem,
|
||||||
|
ctxsw_prog_main_image_num_gfxp_save_ops_o()));
|
||||||
|
nvgpu_err(g, "CILP_SAVE_OPERATIONS : %d",
|
||||||
|
nvgpu_mem_rd(g, ctx_mem,
|
||||||
|
ctxsw_prog_main_image_num_cilp_save_ops_o()));
|
||||||
|
nvgpu_err(g,
|
||||||
|
"image gfx preemption option (GFXP is 1) %x",
|
||||||
|
nvgpu_mem_rd(g, ctx_mem,
|
||||||
|
ctxsw_prog_main_image_graphics_preemption_options_o()));
|
||||||
|
nvgpu_err(g,
|
||||||
|
"image compute preemption option (CTA is 1) %x",
|
||||||
|
nvgpu_mem_rd(g, ctx_mem,
|
||||||
|
ctxsw_prog_main_image_compute_preemption_options_o()));
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|||||||
@@ -29,72 +29,6 @@
|
|||||||
|
|
||||||
#include <nvgpu/hw/gp10b/hw_ctxsw_prog_gp10b.h>
|
#include <nvgpu/hw/gp10b/hw_ctxsw_prog_gp10b.h>
|
||||||
|
|
||||||
#ifdef CONFIG_NVGPU_CILP
|
|
||||||
void gp10b_ctxsw_prog_set_compute_preemption_mode_cilp(struct gk20a *g,
|
|
||||||
struct nvgpu_mem *ctx_mem)
|
|
||||||
{
|
|
||||||
nvgpu_mem_wr(g, ctx_mem,
|
|
||||||
ctxsw_prog_main_image_compute_preemption_options_o(),
|
|
||||||
ctxsw_prog_main_image_compute_preemption_options_control_cilp_f());
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_NVGPU_DEBUGGER
|
|
||||||
void gp10b_ctxsw_prog_set_pmu_options_boost_clock_frequencies(struct gk20a *g,
|
|
||||||
struct nvgpu_mem *ctx_mem, u32 boosted_ctx)
|
|
||||||
{
|
|
||||||
u32 data = ctxsw_prog_main_image_pmu_options_boost_clock_frequencies_f(boosted_ctx);
|
|
||||||
nvgpu_mem_wr(g, ctx_mem, ctxsw_prog_main_image_pmu_options_o(), data);
|
|
||||||
}
|
|
||||||
#endif /* CONFIG_NVGPU_DEBUGGER */
|
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_FS
|
|
||||||
void gp10b_ctxsw_prog_dump_ctxsw_stats(struct gk20a *g,
|
|
||||||
struct nvgpu_mem *ctx_mem)
|
|
||||||
{
|
|
||||||
nvgpu_err(g, "ctxsw_prog_main_image_magic_value_o : %x (expect %x)",
|
|
||||||
nvgpu_mem_rd(g, ctx_mem,
|
|
||||||
ctxsw_prog_main_image_magic_value_o()),
|
|
||||||
ctxsw_prog_main_image_magic_value_v_value_v());
|
|
||||||
|
|
||||||
nvgpu_err(g, "ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi : %x",
|
|
||||||
nvgpu_mem_rd(g, ctx_mem,
|
|
||||||
ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi_o()));
|
|
||||||
|
|
||||||
nvgpu_err(g, "ctxsw_prog_main_image_context_timestamp_buffer_ptr : %x",
|
|
||||||
nvgpu_mem_rd(g, ctx_mem,
|
|
||||||
ctxsw_prog_main_image_context_timestamp_buffer_ptr_o()));
|
|
||||||
|
|
||||||
nvgpu_err(g, "ctxsw_prog_main_image_context_timestamp_buffer_control : %x",
|
|
||||||
nvgpu_mem_rd(g, ctx_mem,
|
|
||||||
ctxsw_prog_main_image_context_timestamp_buffer_control_o()));
|
|
||||||
|
|
||||||
nvgpu_err(g, "NUM_SAVE_OPERATIONS : %d",
|
|
||||||
nvgpu_mem_rd(g, ctx_mem,
|
|
||||||
ctxsw_prog_main_image_num_save_ops_o()));
|
|
||||||
nvgpu_err(g, "WFI_SAVE_OPERATIONS : %d",
|
|
||||||
nvgpu_mem_rd(g, ctx_mem,
|
|
||||||
ctxsw_prog_main_image_num_wfi_save_ops_o()));
|
|
||||||
nvgpu_err(g, "CTA_SAVE_OPERATIONS : %d",
|
|
||||||
nvgpu_mem_rd(g, ctx_mem,
|
|
||||||
ctxsw_prog_main_image_num_cta_save_ops_o()));
|
|
||||||
nvgpu_err(g, "GFXP_SAVE_OPERATIONS : %d",
|
|
||||||
nvgpu_mem_rd(g, ctx_mem,
|
|
||||||
ctxsw_prog_main_image_num_gfxp_save_ops_o()));
|
|
||||||
nvgpu_err(g, "CILP_SAVE_OPERATIONS : %d",
|
|
||||||
nvgpu_mem_rd(g, ctx_mem,
|
|
||||||
ctxsw_prog_main_image_num_cilp_save_ops_o()));
|
|
||||||
nvgpu_err(g,
|
|
||||||
"image gfx preemption option (GFXP is 1) %x",
|
|
||||||
nvgpu_mem_rd(g, ctx_mem,
|
|
||||||
ctxsw_prog_main_image_graphics_preemption_options_o()));
|
|
||||||
nvgpu_err(g,
|
|
||||||
"image compute preemption option (CTA is 1) %x",
|
|
||||||
nvgpu_mem_rd(g, ctx_mem,
|
|
||||||
ctxsw_prog_main_image_compute_preemption_options_o()));
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
void gp10b_ctxsw_prog_set_compute_preemption_mode_cta(struct gk20a *g,
|
void gp10b_ctxsw_prog_set_compute_preemption_mode_cta(struct gk20a *g,
|
||||||
struct nvgpu_mem *ctx_mem)
|
struct nvgpu_mem *ctx_mem)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -64,3 +64,25 @@ void gv11b_ctxsw_prog_set_full_preemption_ptr_veid0(struct gk20a *g,
|
|||||||
u64_hi32(addr));
|
u64_hi32(addr));
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_NVGPU_GRAPHICS */
|
#endif /* CONFIG_NVGPU_GRAPHICS */
|
||||||
|
|
||||||
|
#ifdef CONFIG_NVGPU_DEBUGGER
|
||||||
|
void gv11b_ctxsw_prog_set_pm_ptr(struct gk20a *g, struct nvgpu_mem *ctx_mem,
|
||||||
|
u64 addr)
|
||||||
|
{
|
||||||
|
addr = addr >> 8;
|
||||||
|
nvgpu_mem_wr(g, ctx_mem, ctxsw_prog_main_image_pm_ptr_o(),
|
||||||
|
u64_lo32(addr));
|
||||||
|
nvgpu_mem_wr(g, ctx_mem, ctxsw_prog_main_image_pm_ptr_hi_o(),
|
||||||
|
u64_hi32(addr));
|
||||||
|
}
|
||||||
|
|
||||||
|
u32 gv11b_ctxsw_prog_hw_get_pm_mode_stream_out_ctxsw(void)
|
||||||
|
{
|
||||||
|
return ctxsw_prog_main_image_pm_mode_stream_out_ctxsw_f();
|
||||||
|
}
|
||||||
|
|
||||||
|
u32 gv11b_ctxsw_prog_hw_get_perf_counter_register_stride(void)
|
||||||
|
{
|
||||||
|
return ctxsw_prog_extended_sm_dsm_perf_counter_register_stride_v();
|
||||||
|
}
|
||||||
|
#endif /* CONFIG_NVGPU_DEBUGGER */
|
||||||
|
|||||||
@@ -29,28 +29,6 @@
|
|||||||
|
|
||||||
#include <nvgpu/hw/gv11b/hw_ctxsw_prog_gv11b.h>
|
#include <nvgpu/hw/gv11b/hw_ctxsw_prog_gv11b.h>
|
||||||
|
|
||||||
#ifdef CONFIG_NVGPU_DEBUGGER
|
|
||||||
void gv11b_ctxsw_prog_set_pm_ptr(struct gk20a *g, struct nvgpu_mem *ctx_mem,
|
|
||||||
u64 addr)
|
|
||||||
{
|
|
||||||
addr = addr >> 8;
|
|
||||||
nvgpu_mem_wr(g, ctx_mem, ctxsw_prog_main_image_pm_ptr_o(),
|
|
||||||
u64_lo32(addr));
|
|
||||||
nvgpu_mem_wr(g, ctx_mem, ctxsw_prog_main_image_pm_ptr_hi_o(),
|
|
||||||
u64_hi32(addr));
|
|
||||||
}
|
|
||||||
|
|
||||||
u32 gv11b_ctxsw_prog_hw_get_pm_mode_stream_out_ctxsw(void)
|
|
||||||
{
|
|
||||||
return ctxsw_prog_main_image_pm_mode_stream_out_ctxsw_f();
|
|
||||||
}
|
|
||||||
|
|
||||||
u32 gv11b_ctxsw_prog_hw_get_perf_counter_register_stride(void)
|
|
||||||
{
|
|
||||||
return ctxsw_prog_extended_sm_dsm_perf_counter_register_stride_v();
|
|
||||||
}
|
|
||||||
#endif /* CONFIG_NVGPU_DEBUGGER */
|
|
||||||
|
|
||||||
void gv11b_ctxsw_prog_set_context_buffer_ptr(struct gk20a *g,
|
void gv11b_ctxsw_prog_set_context_buffer_ptr(struct gk20a *g,
|
||||||
struct nvgpu_mem *ctx_mem, u64 addr)
|
struct nvgpu_mem *ctx_mem, u64 addr)
|
||||||
{
|
{
|
||||||
|
|||||||
327
drivers/gpu/nvgpu/hal/gr/ecc/ecc_gv11b.c
Normal file
327
drivers/gpu/nvgpu/hal/gr/ecc/ecc_gv11b.c
Normal file
@@ -0,0 +1,327 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
|
||||||
|
*
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
|
* to deal in the Software without restriction, including without limitation
|
||||||
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||||
|
* and/or sell copies of the Software, and to permit persons to whom the
|
||||||
|
* Software is furnished to do so, subject to the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice shall be included in
|
||||||
|
* all copies or substantial portions of the Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||||
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||||
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||||
|
* DEALINGS IN THE SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <nvgpu/io.h>
|
||||||
|
#include <nvgpu/ecc.h>
|
||||||
|
#include <nvgpu/gk20a.h>
|
||||||
|
|
||||||
|
#include <nvgpu/hw/gv11b/hw_gr_gv11b.h>
|
||||||
|
|
||||||
|
#include "ecc_gv11b.h"
|
||||||
|
|
||||||
|
#ifdef CONFIG_NVGPU_INJECT_HWERR
|
||||||
|
void gv11b_gr_intr_inject_fecs_ecc_error(struct gk20a *g,
|
||||||
|
struct nvgpu_hw_err_inject_info *err, u32 error_info)
|
||||||
|
{
|
||||||
|
nvgpu_info(g, "Injecting FECS fault %s", err->name);
|
||||||
|
nvgpu_writel(g, err->get_reg_addr(), err->get_reg_val(1U));
|
||||||
|
}
|
||||||
|
|
||||||
|
void gv11b_gr_intr_inject_gpccs_ecc_error(struct gk20a *g,
|
||||||
|
struct nvgpu_hw_err_inject_info *err, u32 error_info)
|
||||||
|
{
|
||||||
|
unsigned int gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
|
||||||
|
unsigned int gpc = (error_info & 0xFFU);
|
||||||
|
unsigned int reg_addr = nvgpu_safe_add_u32(err->get_reg_addr(),
|
||||||
|
nvgpu_safe_mult_u32(gpc, gpc_stride));
|
||||||
|
|
||||||
|
nvgpu_info(g, "Injecting GPCCS fault %s for gpc: %d", err->name, gpc);
|
||||||
|
nvgpu_writel(g, reg_addr, err->get_reg_val(1U));
|
||||||
|
}
|
||||||
|
|
||||||
|
void gv11b_gr_intr_inject_sm_ecc_error(struct gk20a *g,
|
||||||
|
struct nvgpu_hw_err_inject_info *err,
|
||||||
|
u32 error_info)
|
||||||
|
{
|
||||||
|
unsigned int gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
|
||||||
|
unsigned int tpc_stride =
|
||||||
|
nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_STRIDE);
|
||||||
|
unsigned int gpc = (error_info & 0xFF00U) >> 8U;
|
||||||
|
unsigned int tpc = (error_info & 0xFFU);
|
||||||
|
unsigned int reg_addr = nvgpu_safe_add_u32(err->get_reg_addr(),
|
||||||
|
nvgpu_safe_add_u32(
|
||||||
|
nvgpu_safe_mult_u32(gpc, gpc_stride),
|
||||||
|
nvgpu_safe_mult_u32(tpc, tpc_stride)));
|
||||||
|
|
||||||
|
nvgpu_info(g, "Injecting SM fault %s for gpc: %d, tpc: %d",
|
||||||
|
err->name, gpc, tpc);
|
||||||
|
nvgpu_writel(g, reg_addr, err->get_reg_val(1U));
|
||||||
|
}
|
||||||
|
|
||||||
|
void gv11b_gr_intr_inject_mmu_ecc_error(struct gk20a *g,
|
||||||
|
struct nvgpu_hw_err_inject_info *err, u32 error_info)
|
||||||
|
{
|
||||||
|
unsigned int gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
|
||||||
|
unsigned int gpc = (error_info & 0xFFU);
|
||||||
|
unsigned int reg_addr = nvgpu_safe_add_u32(err->get_reg_addr(),
|
||||||
|
nvgpu_safe_mult_u32(gpc, gpc_stride));
|
||||||
|
|
||||||
|
nvgpu_info(g, "Injecting MMU fault %s for gpc: %d", err->name, gpc);
|
||||||
|
nvgpu_writel(g, reg_addr, err->get_reg_val(1U));
|
||||||
|
}
|
||||||
|
|
||||||
|
void gv11b_gr_intr_inject_gcc_ecc_error(struct gk20a *g,
|
||||||
|
struct nvgpu_hw_err_inject_info *err, u32 error_info)
|
||||||
|
{
|
||||||
|
unsigned int gpc_stride = nvgpu_get_litter_value(g,
|
||||||
|
GPU_LIT_GPC_STRIDE);
|
||||||
|
unsigned int gpc = (error_info & 0xFFU);
|
||||||
|
unsigned int reg_addr = nvgpu_safe_add_u32(err->get_reg_addr(),
|
||||||
|
nvgpu_safe_mult_u32(gpc, gpc_stride));
|
||||||
|
|
||||||
|
nvgpu_info(g, "Injecting GCC fault %s for gpc: %d", err->name, gpc);
|
||||||
|
nvgpu_writel(g, reg_addr, err->get_reg_val(1U));
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline u32 fecs_falcon_ecc_control_r(void)
|
||||||
|
{
|
||||||
|
return gr_fecs_falcon_ecc_control_r();
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline u32 fecs_falcon_ecc_control_inject_corrected_err_f(u32 v)
|
||||||
|
{
|
||||||
|
return gr_fecs_falcon_ecc_control_inject_corrected_err_f(v);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline u32 fecs_falcon_ecc_control_inject_uncorrected_err_f(u32 v)
|
||||||
|
{
|
||||||
|
return gr_fecs_falcon_ecc_control_inject_uncorrected_err_f(v);
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct nvgpu_hw_err_inject_info fecs_ecc_err_desc[] = {
|
||||||
|
NVGPU_ECC_ERR("falcon_imem_ecc_corrected",
|
||||||
|
gv11b_gr_intr_inject_fecs_ecc_error,
|
||||||
|
fecs_falcon_ecc_control_r,
|
||||||
|
fecs_falcon_ecc_control_inject_corrected_err_f),
|
||||||
|
NVGPU_ECC_ERR("falcon_imem_ecc_uncorrected",
|
||||||
|
gv11b_gr_intr_inject_fecs_ecc_error,
|
||||||
|
fecs_falcon_ecc_control_r,
|
||||||
|
fecs_falcon_ecc_control_inject_uncorrected_err_f),
|
||||||
|
};
|
||||||
|
|
||||||
|
static struct nvgpu_hw_err_inject_info_desc fecs_err_desc;
|
||||||
|
|
||||||
|
struct nvgpu_hw_err_inject_info_desc *
|
||||||
|
gv11b_gr_intr_get_fecs_err_desc(struct gk20a *g)
|
||||||
|
{
|
||||||
|
fecs_err_desc.info_ptr = fecs_ecc_err_desc;
|
||||||
|
fecs_err_desc.info_size = nvgpu_safe_cast_u64_to_u32(
|
||||||
|
sizeof(fecs_ecc_err_desc) /
|
||||||
|
sizeof(struct nvgpu_hw_err_inject_info));
|
||||||
|
|
||||||
|
return &fecs_err_desc;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline u32 gpccs_falcon_ecc_control_r(void)
|
||||||
|
{
|
||||||
|
return gr_gpccs_falcon_ecc_control_r();
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline u32 gpccs_falcon_ecc_control_inject_corrected_err_f(u32 v)
|
||||||
|
{
|
||||||
|
return gr_gpccs_falcon_ecc_control_inject_corrected_err_f(v);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline u32 gpccs_falcon_ecc_control_inject_uncorrected_err_f(u32 v)
|
||||||
|
{
|
||||||
|
return gr_gpccs_falcon_ecc_control_inject_uncorrected_err_f(v);
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct nvgpu_hw_err_inject_info gpccs_ecc_err_desc[] = {
|
||||||
|
NVGPU_ECC_ERR("falcon_imem_ecc_corrected",
|
||||||
|
gv11b_gr_intr_inject_gpccs_ecc_error,
|
||||||
|
gpccs_falcon_ecc_control_r,
|
||||||
|
gpccs_falcon_ecc_control_inject_corrected_err_f),
|
||||||
|
NVGPU_ECC_ERR("falcon_imem_ecc_uncorrected",
|
||||||
|
gv11b_gr_intr_inject_gpccs_ecc_error,
|
||||||
|
gpccs_falcon_ecc_control_r,
|
||||||
|
gpccs_falcon_ecc_control_inject_uncorrected_err_f),
|
||||||
|
};
|
||||||
|
|
||||||
|
static struct nvgpu_hw_err_inject_info_desc gpccs_err_desc;
|
||||||
|
|
||||||
|
struct nvgpu_hw_err_inject_info_desc *
|
||||||
|
gv11b_gr_intr_get_gpccs_err_desc(struct gk20a *g)
|
||||||
|
{
|
||||||
|
gpccs_err_desc.info_ptr = gpccs_ecc_err_desc;
|
||||||
|
gpccs_err_desc.info_size = nvgpu_safe_cast_u64_to_u32(
|
||||||
|
sizeof(gpccs_ecc_err_desc) /
|
||||||
|
sizeof(struct nvgpu_hw_err_inject_info));
|
||||||
|
|
||||||
|
return &gpccs_err_desc;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline u32 pri_gpc0_tpc0_sm_l1_tag_ecc_control_r(void)
|
||||||
|
{
|
||||||
|
return gr_pri_gpc0_tpc0_sm_l1_tag_ecc_control_r();
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline u32 pri_gpc0_tpc0_sm_l1_tag_ecc_control_inject_corrected_err_f(u32 v)
|
||||||
|
{
|
||||||
|
return gr_pri_gpc0_tpc0_sm_l1_tag_ecc_control_inject_corrected_err_f(v);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline u32 pri_gpc0_tpc0_sm_l1_tag_ecc_control_inject_uncorrected_err_f(u32 v)
|
||||||
|
{
|
||||||
|
return gr_pri_gpc0_tpc0_sm_l1_tag_ecc_control_inject_uncorrected_err_f(v);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline u32 pri_gpc0_tpc0_sm_cbu_ecc_control_r(void)
|
||||||
|
{
|
||||||
|
return gr_pri_gpc0_tpc0_sm_cbu_ecc_control_r();
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline u32 pri_gpc0_tpc0_sm_cbu_ecc_control_inject_uncorrected_err_f(u32 v)
|
||||||
|
{
|
||||||
|
return gr_pri_gpc0_tpc0_sm_cbu_ecc_control_inject_uncorrected_err_f(v);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline u32 pri_gpc0_tpc0_sm_lrf_ecc_control_r(void)
|
||||||
|
{
|
||||||
|
return gr_pri_gpc0_tpc0_sm_lrf_ecc_control_r();
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline u32 pri_gpc0_tpc0_sm_lrf_ecc_control_inject_uncorrected_err_f(u32 v)
|
||||||
|
{
|
||||||
|
return gr_pri_gpc0_tpc0_sm_lrf_ecc_control_inject_uncorrected_err_f(v);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline u32 pri_gpc0_tpc0_sm_l1_data_ecc_control_r(void)
|
||||||
|
{
|
||||||
|
return gr_pri_gpc0_tpc0_sm_l1_data_ecc_control_r();
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline u32 pri_gpc0_tpc0_sm_l1_data_ecc_control_inject_uncorrected_err_f(u32 v)
|
||||||
|
{
|
||||||
|
return gr_pri_gpc0_tpc0_sm_l1_data_ecc_control_inject_uncorrected_err_f(v);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline u32 pri_gpc0_tpc0_sm_icache_ecc_control_r(void)
|
||||||
|
{
|
||||||
|
return gr_pri_gpc0_tpc0_sm_icache_ecc_control_r();
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline u32 pri_gpc0_tpc0_sm_icache_ecc_control_inject_uncorrected_err_f(u32 v)
|
||||||
|
{
|
||||||
|
return gr_pri_gpc0_tpc0_sm_icache_ecc_control_inject_uncorrected_err_f(v);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline u32 pri_gpc0_mmu_l1tlb_ecc_control_r(void)
|
||||||
|
{
|
||||||
|
return gr_gpc0_mmu_l1tlb_ecc_control_r();
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline u32 pri_gpc0_mmu_l1tlb_ecc_control_inject_uncorrected_err_f(u32 v)
|
||||||
|
{
|
||||||
|
return gr_gpc0_mmu_l1tlb_ecc_control_inject_uncorrected_err_f(v);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline u32 pri_gpc0_gcc_l15_ecc_control_r(void)
|
||||||
|
{
|
||||||
|
return gr_pri_gpc0_gcc_l15_ecc_control_r();
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline u32 pri_gpc0_gcc_l15_ecc_control_inject_uncorrected_err_f(u32 v)
|
||||||
|
{
|
||||||
|
return gr_pri_gpc0_gcc_l15_ecc_control_inject_uncorrected_err_f(v);
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct nvgpu_hw_err_inject_info sm_ecc_err_desc[] = {
|
||||||
|
NVGPU_ECC_ERR("l1_tag_ecc_corrected",
|
||||||
|
gv11b_gr_intr_inject_sm_ecc_error,
|
||||||
|
pri_gpc0_tpc0_sm_l1_tag_ecc_control_r,
|
||||||
|
pri_gpc0_tpc0_sm_l1_tag_ecc_control_inject_corrected_err_f),
|
||||||
|
NVGPU_ECC_ERR("l1_tag_ecc_uncorrected",
|
||||||
|
gv11b_gr_intr_inject_sm_ecc_error,
|
||||||
|
pri_gpc0_tpc0_sm_l1_tag_ecc_control_r,
|
||||||
|
pri_gpc0_tpc0_sm_l1_tag_ecc_control_inject_uncorrected_err_f),
|
||||||
|
NVGPU_ECC_ERR("cbu_ecc_uncorrected",
|
||||||
|
gv11b_gr_intr_inject_sm_ecc_error,
|
||||||
|
pri_gpc0_tpc0_sm_cbu_ecc_control_r,
|
||||||
|
pri_gpc0_tpc0_sm_cbu_ecc_control_inject_uncorrected_err_f),
|
||||||
|
NVGPU_ECC_ERR("lrf_ecc_uncorrected",
|
||||||
|
gv11b_gr_intr_inject_sm_ecc_error,
|
||||||
|
pri_gpc0_tpc0_sm_lrf_ecc_control_r,
|
||||||
|
pri_gpc0_tpc0_sm_lrf_ecc_control_inject_uncorrected_err_f),
|
||||||
|
NVGPU_ECC_ERR("l1_data_ecc_uncorrected",
|
||||||
|
gv11b_gr_intr_inject_sm_ecc_error,
|
||||||
|
pri_gpc0_tpc0_sm_l1_data_ecc_control_r,
|
||||||
|
pri_gpc0_tpc0_sm_l1_data_ecc_control_inject_uncorrected_err_f),
|
||||||
|
NVGPU_ECC_ERR("icache_l0_data_ecc_uncorrected",
|
||||||
|
gv11b_gr_intr_inject_sm_ecc_error,
|
||||||
|
pri_gpc0_tpc0_sm_icache_ecc_control_r,
|
||||||
|
pri_gpc0_tpc0_sm_icache_ecc_control_inject_uncorrected_err_f),
|
||||||
|
};
|
||||||
|
|
||||||
|
static struct nvgpu_hw_err_inject_info_desc sm_err_desc;
|
||||||
|
|
||||||
|
struct nvgpu_hw_err_inject_info_desc *
|
||||||
|
gv11b_gr_intr_get_sm_err_desc(struct gk20a *g)
|
||||||
|
{
|
||||||
|
sm_err_desc.info_ptr = sm_ecc_err_desc;
|
||||||
|
sm_err_desc.info_size = nvgpu_safe_cast_u64_to_u32(
|
||||||
|
sizeof(sm_ecc_err_desc) /
|
||||||
|
sizeof(struct nvgpu_hw_err_inject_info));
|
||||||
|
|
||||||
|
return &sm_err_desc;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct nvgpu_hw_err_inject_info mmu_ecc_err_desc[] = {
|
||||||
|
NVGPU_ECC_ERR("l1tlb_sa_data_ecc_uncorrected",
|
||||||
|
gv11b_gr_intr_inject_mmu_ecc_error,
|
||||||
|
pri_gpc0_mmu_l1tlb_ecc_control_r,
|
||||||
|
pri_gpc0_mmu_l1tlb_ecc_control_inject_uncorrected_err_f),
|
||||||
|
};
|
||||||
|
|
||||||
|
static struct nvgpu_hw_err_inject_info_desc mmu_err_desc;
|
||||||
|
|
||||||
|
struct nvgpu_hw_err_inject_info_desc *
|
||||||
|
gv11b_gr_intr_get_mmu_err_desc(struct gk20a *g)
|
||||||
|
{
|
||||||
|
mmu_err_desc.info_ptr = mmu_ecc_err_desc;
|
||||||
|
mmu_err_desc.info_size = nvgpu_safe_cast_u64_to_u32(
|
||||||
|
sizeof(mmu_ecc_err_desc) /
|
||||||
|
sizeof(struct nvgpu_hw_err_inject_info));
|
||||||
|
|
||||||
|
return &mmu_err_desc;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct nvgpu_hw_err_inject_info gcc_ecc_err_desc[] = {
|
||||||
|
NVGPU_ECC_ERR("l15_ecc_uncorrected",
|
||||||
|
gv11b_gr_intr_inject_gcc_ecc_error,
|
||||||
|
pri_gpc0_gcc_l15_ecc_control_r,
|
||||||
|
pri_gpc0_gcc_l15_ecc_control_inject_uncorrected_err_f),
|
||||||
|
};
|
||||||
|
|
||||||
|
static struct nvgpu_hw_err_inject_info_desc gcc_err_desc;
|
||||||
|
|
||||||
|
struct nvgpu_hw_err_inject_info_desc *
|
||||||
|
gv11b_gr_intr_get_gcc_err_desc(struct gk20a *g)
|
||||||
|
{
|
||||||
|
gcc_err_desc.info_ptr = gcc_ecc_err_desc;
|
||||||
|
gcc_err_desc.info_size = nvgpu_safe_cast_u64_to_u32(
|
||||||
|
sizeof(gcc_ecc_err_desc) /
|
||||||
|
sizeof(struct nvgpu_hw_err_inject_info));
|
||||||
|
|
||||||
|
return &gcc_err_desc;
|
||||||
|
}
|
||||||
|
#endif /* CONFIG_NVGPU_INJECT_HWERR */
|
||||||
@@ -28,304 +28,6 @@
|
|||||||
|
|
||||||
#include "ecc_gv11b.h"
|
#include "ecc_gv11b.h"
|
||||||
|
|
||||||
#ifdef CONFIG_NVGPU_INJECT_HWERR
|
|
||||||
void gv11b_gr_intr_inject_fecs_ecc_error(struct gk20a *g,
|
|
||||||
struct nvgpu_hw_err_inject_info *err, u32 error_info)
|
|
||||||
{
|
|
||||||
nvgpu_info(g, "Injecting FECS fault %s", err->name);
|
|
||||||
nvgpu_writel(g, err->get_reg_addr(), err->get_reg_val(1U));
|
|
||||||
}
|
|
||||||
|
|
||||||
void gv11b_gr_intr_inject_gpccs_ecc_error(struct gk20a *g,
|
|
||||||
struct nvgpu_hw_err_inject_info *err, u32 error_info)
|
|
||||||
{
|
|
||||||
unsigned int gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
|
|
||||||
unsigned int gpc = (error_info & 0xFFU);
|
|
||||||
unsigned int reg_addr = nvgpu_safe_add_u32(err->get_reg_addr(),
|
|
||||||
nvgpu_safe_mult_u32(gpc , gpc_stride));
|
|
||||||
|
|
||||||
nvgpu_info(g, "Injecting GPCCS fault %s for gpc: %d", err->name, gpc);
|
|
||||||
nvgpu_writel(g, reg_addr, err->get_reg_val(1U));
|
|
||||||
}
|
|
||||||
|
|
||||||
void gv11b_gr_intr_inject_sm_ecc_error(struct gk20a *g,
|
|
||||||
struct nvgpu_hw_err_inject_info *err,
|
|
||||||
u32 error_info)
|
|
||||||
{
|
|
||||||
unsigned int gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
|
|
||||||
unsigned int tpc_stride =
|
|
||||||
nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_STRIDE);
|
|
||||||
unsigned int gpc = (error_info & 0xFF00U) >> 8U;
|
|
||||||
unsigned int tpc = (error_info & 0xFFU);
|
|
||||||
unsigned int reg_addr = nvgpu_safe_add_u32(err->get_reg_addr(),
|
|
||||||
nvgpu_safe_add_u32(
|
|
||||||
nvgpu_safe_mult_u32(gpc , gpc_stride),
|
|
||||||
nvgpu_safe_mult_u32(tpc , tpc_stride)));
|
|
||||||
|
|
||||||
nvgpu_info(g, "Injecting SM fault %s for gpc: %d, tpc: %d",
|
|
||||||
err->name, gpc, tpc);
|
|
||||||
nvgpu_writel(g, reg_addr, err->get_reg_val(1U));
|
|
||||||
}
|
|
||||||
|
|
||||||
void gv11b_gr_intr_inject_mmu_ecc_error(struct gk20a *g,
|
|
||||||
struct nvgpu_hw_err_inject_info *err, u32 error_info)
|
|
||||||
{
|
|
||||||
unsigned int gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
|
|
||||||
unsigned int gpc = (error_info & 0xFFU);
|
|
||||||
unsigned int reg_addr = nvgpu_safe_add_u32(err->get_reg_addr(),
|
|
||||||
nvgpu_safe_mult_u32(gpc , gpc_stride));
|
|
||||||
|
|
||||||
nvgpu_info(g, "Injecting MMU fault %s for gpc: %d", err->name, gpc);
|
|
||||||
nvgpu_writel(g, reg_addr, err->get_reg_val(1U));
|
|
||||||
}
|
|
||||||
|
|
||||||
void gv11b_gr_intr_inject_gcc_ecc_error(struct gk20a *g,
|
|
||||||
struct nvgpu_hw_err_inject_info *err, u32 error_info)
|
|
||||||
{
|
|
||||||
unsigned int gpc_stride = nvgpu_get_litter_value(g,
|
|
||||||
GPU_LIT_GPC_STRIDE);
|
|
||||||
unsigned int gpc = (error_info & 0xFFU);
|
|
||||||
unsigned int reg_addr = nvgpu_safe_add_u32(err->get_reg_addr(),
|
|
||||||
nvgpu_safe_mult_u32(gpc , gpc_stride));
|
|
||||||
|
|
||||||
nvgpu_info(g, "Injecting GCC fault %s for gpc: %d", err->name, gpc);
|
|
||||||
nvgpu_writel(g, reg_addr, err->get_reg_val(1U));
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline u32 fecs_falcon_ecc_control_r(void)
|
|
||||||
{
|
|
||||||
return gr_fecs_falcon_ecc_control_r();
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline u32 fecs_falcon_ecc_control_inject_corrected_err_f(u32 v)
|
|
||||||
{
|
|
||||||
return gr_fecs_falcon_ecc_control_inject_corrected_err_f(v);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline u32 fecs_falcon_ecc_control_inject_uncorrected_err_f(u32 v)
|
|
||||||
{
|
|
||||||
return gr_fecs_falcon_ecc_control_inject_uncorrected_err_f(v);
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct nvgpu_hw_err_inject_info fecs_ecc_err_desc[] = {
|
|
||||||
NVGPU_ECC_ERR("falcon_imem_ecc_corrected",
|
|
||||||
gv11b_gr_intr_inject_fecs_ecc_error,
|
|
||||||
fecs_falcon_ecc_control_r,
|
|
||||||
fecs_falcon_ecc_control_inject_corrected_err_f),
|
|
||||||
NVGPU_ECC_ERR("falcon_imem_ecc_uncorrected",
|
|
||||||
gv11b_gr_intr_inject_fecs_ecc_error,
|
|
||||||
fecs_falcon_ecc_control_r,
|
|
||||||
fecs_falcon_ecc_control_inject_uncorrected_err_f),
|
|
||||||
};
|
|
||||||
|
|
||||||
static struct nvgpu_hw_err_inject_info_desc fecs_err_desc;
|
|
||||||
|
|
||||||
struct nvgpu_hw_err_inject_info_desc *
|
|
||||||
gv11b_gr_intr_get_fecs_err_desc(struct gk20a *g)
|
|
||||||
{
|
|
||||||
fecs_err_desc.info_ptr = fecs_ecc_err_desc;
|
|
||||||
fecs_err_desc.info_size = nvgpu_safe_cast_u64_to_u32(
|
|
||||||
sizeof(fecs_ecc_err_desc) /
|
|
||||||
sizeof(struct nvgpu_hw_err_inject_info));
|
|
||||||
|
|
||||||
return &fecs_err_desc;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline u32 gpccs_falcon_ecc_control_r(void)
|
|
||||||
{
|
|
||||||
return gr_gpccs_falcon_ecc_control_r();
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline u32 gpccs_falcon_ecc_control_inject_corrected_err_f(u32 v)
|
|
||||||
{
|
|
||||||
return gr_gpccs_falcon_ecc_control_inject_corrected_err_f(v);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline u32 gpccs_falcon_ecc_control_inject_uncorrected_err_f(u32 v)
|
|
||||||
{
|
|
||||||
return gr_gpccs_falcon_ecc_control_inject_uncorrected_err_f(v);
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct nvgpu_hw_err_inject_info gpccs_ecc_err_desc[] = {
|
|
||||||
NVGPU_ECC_ERR("falcon_imem_ecc_corrected",
|
|
||||||
gv11b_gr_intr_inject_gpccs_ecc_error,
|
|
||||||
gpccs_falcon_ecc_control_r,
|
|
||||||
gpccs_falcon_ecc_control_inject_corrected_err_f),
|
|
||||||
NVGPU_ECC_ERR("falcon_imem_ecc_uncorrected",
|
|
||||||
gv11b_gr_intr_inject_gpccs_ecc_error,
|
|
||||||
gpccs_falcon_ecc_control_r,
|
|
||||||
gpccs_falcon_ecc_control_inject_uncorrected_err_f),
|
|
||||||
};
|
|
||||||
|
|
||||||
static struct nvgpu_hw_err_inject_info_desc gpccs_err_desc;
|
|
||||||
|
|
||||||
struct nvgpu_hw_err_inject_info_desc *
|
|
||||||
gv11b_gr_intr_get_gpccs_err_desc(struct gk20a *g)
|
|
||||||
{
|
|
||||||
gpccs_err_desc.info_ptr = gpccs_ecc_err_desc;
|
|
||||||
gpccs_err_desc.info_size = nvgpu_safe_cast_u64_to_u32(
|
|
||||||
sizeof(gpccs_ecc_err_desc) /
|
|
||||||
sizeof(struct nvgpu_hw_err_inject_info));
|
|
||||||
|
|
||||||
return &gpccs_err_desc;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline u32 pri_gpc0_tpc0_sm_l1_tag_ecc_control_r(void)
|
|
||||||
{
|
|
||||||
return gr_pri_gpc0_tpc0_sm_l1_tag_ecc_control_r();
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline u32 pri_gpc0_tpc0_sm_l1_tag_ecc_control_inject_corrected_err_f(u32 v)
|
|
||||||
{
|
|
||||||
return gr_pri_gpc0_tpc0_sm_l1_tag_ecc_control_inject_corrected_err_f(v);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline u32 pri_gpc0_tpc0_sm_l1_tag_ecc_control_inject_uncorrected_err_f(u32 v)
|
|
||||||
{
|
|
||||||
return gr_pri_gpc0_tpc0_sm_l1_tag_ecc_control_inject_uncorrected_err_f(v);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline u32 pri_gpc0_tpc0_sm_cbu_ecc_control_r(void)
|
|
||||||
{
|
|
||||||
return gr_pri_gpc0_tpc0_sm_cbu_ecc_control_r();
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline u32 pri_gpc0_tpc0_sm_cbu_ecc_control_inject_uncorrected_err_f(u32 v)
|
|
||||||
{
|
|
||||||
return gr_pri_gpc0_tpc0_sm_cbu_ecc_control_inject_uncorrected_err_f(v);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline u32 pri_gpc0_tpc0_sm_lrf_ecc_control_r(void)
|
|
||||||
{
|
|
||||||
return gr_pri_gpc0_tpc0_sm_lrf_ecc_control_r();
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline u32 pri_gpc0_tpc0_sm_lrf_ecc_control_inject_uncorrected_err_f(u32 v)
|
|
||||||
{
|
|
||||||
return gr_pri_gpc0_tpc0_sm_lrf_ecc_control_inject_uncorrected_err_f(v);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline u32 pri_gpc0_tpc0_sm_l1_data_ecc_control_r(void)
|
|
||||||
{
|
|
||||||
return gr_pri_gpc0_tpc0_sm_l1_data_ecc_control_r();
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline u32 pri_gpc0_tpc0_sm_l1_data_ecc_control_inject_uncorrected_err_f(u32 v)
|
|
||||||
{
|
|
||||||
return gr_pri_gpc0_tpc0_sm_l1_data_ecc_control_inject_uncorrected_err_f(v);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline u32 pri_gpc0_tpc0_sm_icache_ecc_control_r(void)
|
|
||||||
{
|
|
||||||
return gr_pri_gpc0_tpc0_sm_icache_ecc_control_r();
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline u32 pri_gpc0_tpc0_sm_icache_ecc_control_inject_uncorrected_err_f(u32 v)
|
|
||||||
{
|
|
||||||
return gr_pri_gpc0_tpc0_sm_icache_ecc_control_inject_uncorrected_err_f(v);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline u32 pri_gpc0_mmu_l1tlb_ecc_control_r(void)
|
|
||||||
{
|
|
||||||
return gr_gpc0_mmu_l1tlb_ecc_control_r();
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline u32 pri_gpc0_mmu_l1tlb_ecc_control_inject_uncorrected_err_f(u32 v)
|
|
||||||
{
|
|
||||||
return gr_gpc0_mmu_l1tlb_ecc_control_inject_uncorrected_err_f(v);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline u32 pri_gpc0_gcc_l15_ecc_control_r(void)
|
|
||||||
{
|
|
||||||
return gr_pri_gpc0_gcc_l15_ecc_control_r();
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline u32 pri_gpc0_gcc_l15_ecc_control_inject_uncorrected_err_f(u32 v)
|
|
||||||
{
|
|
||||||
return gr_pri_gpc0_gcc_l15_ecc_control_inject_uncorrected_err_f(v);
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct nvgpu_hw_err_inject_info sm_ecc_err_desc[] = {
|
|
||||||
NVGPU_ECC_ERR("l1_tag_ecc_corrected",
|
|
||||||
gv11b_gr_intr_inject_sm_ecc_error,
|
|
||||||
pri_gpc0_tpc0_sm_l1_tag_ecc_control_r,
|
|
||||||
pri_gpc0_tpc0_sm_l1_tag_ecc_control_inject_corrected_err_f),
|
|
||||||
NVGPU_ECC_ERR("l1_tag_ecc_uncorrected",
|
|
||||||
gv11b_gr_intr_inject_sm_ecc_error,
|
|
||||||
pri_gpc0_tpc0_sm_l1_tag_ecc_control_r,
|
|
||||||
pri_gpc0_tpc0_sm_l1_tag_ecc_control_inject_uncorrected_err_f),
|
|
||||||
NVGPU_ECC_ERR("cbu_ecc_uncorrected",
|
|
||||||
gv11b_gr_intr_inject_sm_ecc_error,
|
|
||||||
pri_gpc0_tpc0_sm_cbu_ecc_control_r,
|
|
||||||
pri_gpc0_tpc0_sm_cbu_ecc_control_inject_uncorrected_err_f),
|
|
||||||
NVGPU_ECC_ERR("lrf_ecc_uncorrected",
|
|
||||||
gv11b_gr_intr_inject_sm_ecc_error,
|
|
||||||
pri_gpc0_tpc0_sm_lrf_ecc_control_r,
|
|
||||||
pri_gpc0_tpc0_sm_lrf_ecc_control_inject_uncorrected_err_f),
|
|
||||||
NVGPU_ECC_ERR("l1_data_ecc_uncorrected",
|
|
||||||
gv11b_gr_intr_inject_sm_ecc_error,
|
|
||||||
pri_gpc0_tpc0_sm_l1_data_ecc_control_r,
|
|
||||||
pri_gpc0_tpc0_sm_l1_data_ecc_control_inject_uncorrected_err_f),
|
|
||||||
NVGPU_ECC_ERR("icache_l0_data_ecc_uncorrected",
|
|
||||||
gv11b_gr_intr_inject_sm_ecc_error,
|
|
||||||
pri_gpc0_tpc0_sm_icache_ecc_control_r,
|
|
||||||
pri_gpc0_tpc0_sm_icache_ecc_control_inject_uncorrected_err_f),
|
|
||||||
};
|
|
||||||
|
|
||||||
static struct nvgpu_hw_err_inject_info_desc sm_err_desc;
|
|
||||||
|
|
||||||
struct nvgpu_hw_err_inject_info_desc *
|
|
||||||
gv11b_gr_intr_get_sm_err_desc(struct gk20a *g)
|
|
||||||
{
|
|
||||||
sm_err_desc.info_ptr = sm_ecc_err_desc;
|
|
||||||
sm_err_desc.info_size = nvgpu_safe_cast_u64_to_u32(
|
|
||||||
sizeof(sm_ecc_err_desc) /
|
|
||||||
sizeof(struct nvgpu_hw_err_inject_info));
|
|
||||||
|
|
||||||
return &sm_err_desc;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct nvgpu_hw_err_inject_info mmu_ecc_err_desc[] = {
|
|
||||||
NVGPU_ECC_ERR("l1tlb_sa_data_ecc_uncorrected",
|
|
||||||
gv11b_gr_intr_inject_mmu_ecc_error,
|
|
||||||
pri_gpc0_mmu_l1tlb_ecc_control_r,
|
|
||||||
pri_gpc0_mmu_l1tlb_ecc_control_inject_uncorrected_err_f),
|
|
||||||
};
|
|
||||||
|
|
||||||
static struct nvgpu_hw_err_inject_info_desc mmu_err_desc;
|
|
||||||
|
|
||||||
struct nvgpu_hw_err_inject_info_desc *
|
|
||||||
gv11b_gr_intr_get_mmu_err_desc(struct gk20a *g)
|
|
||||||
{
|
|
||||||
mmu_err_desc.info_ptr = mmu_ecc_err_desc;
|
|
||||||
mmu_err_desc.info_size = nvgpu_safe_cast_u64_to_u32(
|
|
||||||
sizeof(mmu_ecc_err_desc) /
|
|
||||||
sizeof(struct nvgpu_hw_err_inject_info));
|
|
||||||
|
|
||||||
return &mmu_err_desc;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct nvgpu_hw_err_inject_info gcc_ecc_err_desc[] = {
|
|
||||||
NVGPU_ECC_ERR("l15_ecc_uncorrected",
|
|
||||||
gv11b_gr_intr_inject_gcc_ecc_error,
|
|
||||||
pri_gpc0_gcc_l15_ecc_control_r,
|
|
||||||
pri_gpc0_gcc_l15_ecc_control_inject_uncorrected_err_f),
|
|
||||||
};
|
|
||||||
|
|
||||||
static struct nvgpu_hw_err_inject_info_desc gcc_err_desc;
|
|
||||||
|
|
||||||
struct nvgpu_hw_err_inject_info_desc *
|
|
||||||
gv11b_gr_intr_get_gcc_err_desc(struct gk20a *g)
|
|
||||||
{
|
|
||||||
gcc_err_desc.info_ptr = gcc_ecc_err_desc;
|
|
||||||
gcc_err_desc.info_size = nvgpu_safe_cast_u64_to_u32(
|
|
||||||
sizeof(gcc_ecc_err_desc) /
|
|
||||||
sizeof(struct nvgpu_hw_err_inject_info));
|
|
||||||
|
|
||||||
return &gcc_err_desc;
|
|
||||||
}
|
|
||||||
#endif /* CONFIG_NVGPU_INJECT_HWERR */
|
|
||||||
|
|
||||||
static void gv11b_ecc_enable_smlrf(struct gk20a *g,
|
static void gv11b_ecc_enable_smlrf(struct gk20a *g,
|
||||||
u32 fecs_feature_override_ecc, bool opt_ecc_en)
|
u32 fecs_feature_override_ecc, bool opt_ecc_en)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -190,3 +190,48 @@ void gv11b_gr_init_commit_gfxp_wfi_timeout(struct gk20a *g,
|
|||||||
GFXP_WFI_TIMEOUT_COUNT_IN_USEC_DEFAULT, patch);
|
GFXP_WFI_TIMEOUT_COUNT_IN_USEC_DEFAULT, patch);
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_NVGPU_GRAPHICS */
|
#endif /* CONFIG_NVGPU_GRAPHICS */
|
||||||
|
|
||||||
|
#ifdef CONFIG_NVGPU_SET_FALCON_ACCESS_MAP
|
||||||
|
void gv11b_gr_init_get_access_map(struct gk20a *g,
|
||||||
|
u32 **whitelist, u32 *num_entries)
|
||||||
|
{
|
||||||
|
static u32 wl_addr_gv11b[] = {
|
||||||
|
/* this list must be sorted (low to high) */
|
||||||
|
0x404468, /* gr_pri_mme_max_instructions */
|
||||||
|
0x418300, /* gr_pri_gpcs_rasterarb_line_class */
|
||||||
|
0x418800, /* gr_pri_gpcs_setup_debug */
|
||||||
|
0x418e00, /* gr_pri_gpcs_swdx_config */
|
||||||
|
0x418e40, /* gr_pri_gpcs_swdx_tc_bundle_ctrl */
|
||||||
|
0x418e44, /* gr_pri_gpcs_swdx_tc_bundle_ctrl */
|
||||||
|
0x418e48, /* gr_pri_gpcs_swdx_tc_bundle_ctrl */
|
||||||
|
0x418e4c, /* gr_pri_gpcs_swdx_tc_bundle_ctrl */
|
||||||
|
0x418e50, /* gr_pri_gpcs_swdx_tc_bundle_ctrl */
|
||||||
|
0x418e58, /* gr_pri_gpcs_swdx_tc_bundle_addr */
|
||||||
|
0x418e5c, /* gr_pri_gpcs_swdx_tc_bundle_addr */
|
||||||
|
0x418e60, /* gr_pri_gpcs_swdx_tc_bundle_addr */
|
||||||
|
0x418e64, /* gr_pri_gpcs_swdx_tc_bundle_addr */
|
||||||
|
0x418e68, /* gr_pri_gpcs_swdx_tc_bundle_addr */
|
||||||
|
0x418e6c, /* gr_pri_gpcs_swdx_tc_bundle_addr */
|
||||||
|
0x418e70, /* gr_pri_gpcs_swdx_tc_bundle_addr */
|
||||||
|
0x418e74, /* gr_pri_gpcs_swdx_tc_bundle_addr */
|
||||||
|
0x418e78, /* gr_pri_gpcs_swdx_tc_bundle_addr */
|
||||||
|
0x418e7c, /* gr_pri_gpcs_swdx_tc_bundle_addr */
|
||||||
|
0x418e80, /* gr_pri_gpcs_swdx_tc_bundle_addr */
|
||||||
|
0x418e84, /* gr_pri_gpcs_swdx_tc_bundle_addr */
|
||||||
|
0x418e88, /* gr_pri_gpcs_swdx_tc_bundle_addr */
|
||||||
|
0x418e8c, /* gr_pri_gpcs_swdx_tc_bundle_addr */
|
||||||
|
0x418e90, /* gr_pri_gpcs_swdx_tc_bundle_addr */
|
||||||
|
0x418e94, /* gr_pri_gpcs_swdx_tc_bundle_addr */
|
||||||
|
0x419864, /* gr_pri_gpcs_tpcs_pe_l2_evict_policy */
|
||||||
|
0x419a04, /* gr_pri_gpcs_tpcs_tex_lod_dbg */
|
||||||
|
0x419a08, /* gr_pri_gpcs_tpcs_tex_samp_dbg */
|
||||||
|
0x419e84, /* gr_pri_gpcs_tpcs_sms_dbgr_control0 */
|
||||||
|
0x419ba4, /* gr_pri_gpcs_tpcs_sm_disp_ctrl */
|
||||||
|
};
|
||||||
|
size_t array_size;
|
||||||
|
|
||||||
|
*whitelist = wl_addr_gv11b;
|
||||||
|
array_size = ARRAY_SIZE(wl_addr_gv11b);
|
||||||
|
*num_entries = nvgpu_safe_cast_u64_to_u32(array_size);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|||||||
@@ -357,51 +357,6 @@ void gv11b_gr_init_gpc_mmu(struct gk20a *g)
|
|||||||
g->ops.fb.mmu_debug_rd(g));
|
g->ops.fb.mmu_debug_rd(g));
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_NVGPU_SET_FALCON_ACCESS_MAP
|
|
||||||
void gv11b_gr_init_get_access_map(struct gk20a *g,
|
|
||||||
u32 **whitelist, u32 *num_entries)
|
|
||||||
{
|
|
||||||
static u32 wl_addr_gv11b[] = {
|
|
||||||
/* this list must be sorted (low to high) */
|
|
||||||
0x404468, /* gr_pri_mme_max_instructions */
|
|
||||||
0x418300, /* gr_pri_gpcs_rasterarb_line_class */
|
|
||||||
0x418800, /* gr_pri_gpcs_setup_debug */
|
|
||||||
0x418e00, /* gr_pri_gpcs_swdx_config */
|
|
||||||
0x418e40, /* gr_pri_gpcs_swdx_tc_bundle_ctrl */
|
|
||||||
0x418e44, /* gr_pri_gpcs_swdx_tc_bundle_ctrl */
|
|
||||||
0x418e48, /* gr_pri_gpcs_swdx_tc_bundle_ctrl */
|
|
||||||
0x418e4c, /* gr_pri_gpcs_swdx_tc_bundle_ctrl */
|
|
||||||
0x418e50, /* gr_pri_gpcs_swdx_tc_bundle_ctrl */
|
|
||||||
0x418e58, /* gr_pri_gpcs_swdx_tc_bundle_addr */
|
|
||||||
0x418e5c, /* gr_pri_gpcs_swdx_tc_bundle_addr */
|
|
||||||
0x418e60, /* gr_pri_gpcs_swdx_tc_bundle_addr */
|
|
||||||
0x418e64, /* gr_pri_gpcs_swdx_tc_bundle_addr */
|
|
||||||
0x418e68, /* gr_pri_gpcs_swdx_tc_bundle_addr */
|
|
||||||
0x418e6c, /* gr_pri_gpcs_swdx_tc_bundle_addr */
|
|
||||||
0x418e70, /* gr_pri_gpcs_swdx_tc_bundle_addr */
|
|
||||||
0x418e74, /* gr_pri_gpcs_swdx_tc_bundle_addr */
|
|
||||||
0x418e78, /* gr_pri_gpcs_swdx_tc_bundle_addr */
|
|
||||||
0x418e7c, /* gr_pri_gpcs_swdx_tc_bundle_addr */
|
|
||||||
0x418e80, /* gr_pri_gpcs_swdx_tc_bundle_addr */
|
|
||||||
0x418e84, /* gr_pri_gpcs_swdx_tc_bundle_addr */
|
|
||||||
0x418e88, /* gr_pri_gpcs_swdx_tc_bundle_addr */
|
|
||||||
0x418e8c, /* gr_pri_gpcs_swdx_tc_bundle_addr */
|
|
||||||
0x418e90, /* gr_pri_gpcs_swdx_tc_bundle_addr */
|
|
||||||
0x418e94, /* gr_pri_gpcs_swdx_tc_bundle_addr */
|
|
||||||
0x419864, /* gr_pri_gpcs_tpcs_pe_l2_evict_policy */
|
|
||||||
0x419a04, /* gr_pri_gpcs_tpcs_tex_lod_dbg */
|
|
||||||
0x419a08, /* gr_pri_gpcs_tpcs_tex_samp_dbg */
|
|
||||||
0x419e84, /* gr_pri_gpcs_tpcs_sms_dbgr_control0 */
|
|
||||||
0x419ba4, /* gr_pri_gpcs_tpcs_sm_disp_ctrl */
|
|
||||||
};
|
|
||||||
size_t array_size;
|
|
||||||
|
|
||||||
*whitelist = wl_addr_gv11b;
|
|
||||||
array_size = ARRAY_SIZE(wl_addr_gv11b);
|
|
||||||
*num_entries = nvgpu_safe_cast_u64_to_u32(array_size);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
void gv11b_gr_init_sm_id_numbering(struct gk20a *g, u32 gpc, u32 tpc, u32 smid,
|
void gv11b_gr_init_sm_id_numbering(struct gk20a *g, u32 gpc, u32 tpc, u32 smid,
|
||||||
struct nvgpu_gr_config *gr_config)
|
struct nvgpu_gr_config *gr_config)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -345,3 +345,39 @@ u32 gm20b_gr_intr_get_sm_no_lock_down_hww_global_esr_mask(struct gk20a *g)
|
|||||||
|
|
||||||
return global_esr_mask;
|
return global_esr_mask;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_NVGPU_DEBUGGER
|
||||||
|
u64 gm20b_gr_intr_tpc_enabled_exceptions(struct gk20a *g)
|
||||||
|
{
|
||||||
|
u32 sm_id;
|
||||||
|
u64 tpc_exception_en = 0;
|
||||||
|
u32 sm_bit_in_tpc = 0U;
|
||||||
|
u32 offset, regval, tpc_offset, gpc_offset;
|
||||||
|
u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
|
||||||
|
u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_STRIDE);
|
||||||
|
u32 no_of_sm = g->ops.gr.init.get_no_of_sm(g);
|
||||||
|
struct nvgpu_gr_config *config = nvgpu_gr_get_config_ptr(g);
|
||||||
|
|
||||||
|
for (sm_id = 0; sm_id < no_of_sm; sm_id++) {
|
||||||
|
struct nvgpu_sm_info *sm_info =
|
||||||
|
nvgpu_gr_config_get_sm_info(config, sm_id);
|
||||||
|
tpc_offset = nvgpu_safe_mult_u32(tpc_in_gpc_stride,
|
||||||
|
nvgpu_gr_config_get_sm_info_tpc_index(sm_info));
|
||||||
|
gpc_offset = nvgpu_safe_mult_u32(gpc_stride,
|
||||||
|
nvgpu_gr_config_get_sm_info_gpc_index(sm_info));
|
||||||
|
offset = nvgpu_safe_add_u32(tpc_offset, gpc_offset);
|
||||||
|
|
||||||
|
regval = gk20a_readl(g, nvgpu_safe_add_u32(
|
||||||
|
gr_gpc0_tpc0_tpccs_tpc_exception_en_r(), offset));
|
||||||
|
/*
|
||||||
|
* Each bit represents corresponding enablement state, bit 0
|
||||||
|
* corrsponds to SM0.
|
||||||
|
*/
|
||||||
|
sm_bit_in_tpc =
|
||||||
|
gr_gpc0_tpc0_tpccs_tpc_exception_en_sm_v(regval);
|
||||||
|
tpc_exception_en |= (u64)sm_bit_in_tpc << sm_id;
|
||||||
|
}
|
||||||
|
|
||||||
|
return tpc_exception_en;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|||||||
@@ -450,36 +450,3 @@ u32 gm20b_gr_intr_nonstall_isr(struct gk20a *g)
|
|||||||
}
|
}
|
||||||
return ops;
|
return ops;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_NVGPU_DEBUGGER
|
|
||||||
u64 gm20b_gr_intr_tpc_enabled_exceptions(struct gk20a *g)
|
|
||||||
{
|
|
||||||
u32 sm_id;
|
|
||||||
u64 tpc_exception_en = 0;
|
|
||||||
u32 sm_bit_in_tpc = 0U;
|
|
||||||
u32 offset, regval, tpc_offset, gpc_offset;
|
|
||||||
u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
|
|
||||||
u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_STRIDE);
|
|
||||||
u32 no_of_sm = g->ops.gr.init.get_no_of_sm(g);
|
|
||||||
struct nvgpu_gr_config *config = nvgpu_gr_get_config_ptr(g);
|
|
||||||
|
|
||||||
for (sm_id = 0; sm_id < no_of_sm; sm_id++) {
|
|
||||||
struct nvgpu_sm_info *sm_info =
|
|
||||||
nvgpu_gr_config_get_sm_info(config, sm_id);
|
|
||||||
tpc_offset = nvgpu_safe_mult_u32(tpc_in_gpc_stride,
|
|
||||||
nvgpu_gr_config_get_sm_info_tpc_index(sm_info));
|
|
||||||
gpc_offset = nvgpu_safe_mult_u32(gpc_stride,
|
|
||||||
nvgpu_gr_config_get_sm_info_gpc_index(sm_info));
|
|
||||||
offset = nvgpu_safe_add_u32(tpc_offset, gpc_offset);
|
|
||||||
|
|
||||||
regval = gk20a_readl(g, nvgpu_safe_add_u32(
|
|
||||||
gr_gpc0_tpc0_tpccs_tpc_exception_en_r(), offset));
|
|
||||||
/* Each bit represents corresponding enablement state, bit 0 corrsponds to SM0 */
|
|
||||||
sm_bit_in_tpc =
|
|
||||||
gr_gpc0_tpc0_tpccs_tpc_exception_en_sm_v(regval);
|
|
||||||
tpc_exception_en |= (u64)sm_bit_in_tpc << sm_id;
|
|
||||||
}
|
|
||||||
|
|
||||||
return tpc_exception_en;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|||||||
@@ -53,8 +53,10 @@ static int gp10b_gr_intr_clear_cilp_preempt_pending(struct gk20a *g,
|
|||||||
|
|
||||||
gr_ctx = tsg->gr_ctx;
|
gr_ctx = tsg->gr_ctx;
|
||||||
|
|
||||||
/* The ucode is self-clearing, so all we need to do here is
|
/*
|
||||||
to clear cilp_preempt_pending. */
|
* The ucode is self-clearing, so all we need to do here is
|
||||||
|
* to clear cilp_preempt_pending.
|
||||||
|
*/
|
||||||
if (!nvgpu_gr_ctx_get_cilp_preempt_pending(gr_ctx)) {
|
if (!nvgpu_gr_ctx_get_cilp_preempt_pending(gr_ctx)) {
|
||||||
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr,
|
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr,
|
||||||
"CILP is already cleared for chid %d\n",
|
"CILP is already cleared for chid %d\n",
|
||||||
|
|||||||
@@ -49,3 +49,59 @@ void gv11b_ltc_set_zbc_stencil_entry(struct gk20a *g,
|
|||||||
stencil_depth);
|
stencil_depth);
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_NVGPU_GRAPHICS */
|
#endif /* CONFIG_NVGPU_GRAPHICS */
|
||||||
|
|
||||||
|
#ifdef CONFIG_NVGPU_INJECT_HWERR
|
||||||
|
void gv11b_ltc_inject_ecc_error(struct gk20a *g,
|
||||||
|
struct nvgpu_hw_err_inject_info *err, u32 error_info)
|
||||||
|
{
|
||||||
|
u32 ltc_stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE);
|
||||||
|
u32 lts_stride = nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE);
|
||||||
|
u32 ltc = (error_info & 0xFF00U) >> 8U;
|
||||||
|
u32 lts = (error_info & 0xFFU);
|
||||||
|
u32 reg_addr = nvgpu_safe_add_u32(err->get_reg_addr(),
|
||||||
|
nvgpu_safe_add_u32(nvgpu_safe_mult_u32(ltc, ltc_stride),
|
||||||
|
nvgpu_safe_mult_u32(lts, lts_stride)));
|
||||||
|
|
||||||
|
nvgpu_info(g, "Injecting LTC fault %s for ltc: %d, lts: %d",
|
||||||
|
err->name, ltc, lts);
|
||||||
|
nvgpu_writel(g, reg_addr, err->get_reg_val(1U));
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline u32 ltc0_lts0_l1_cache_ecc_control_r(void)
|
||||||
|
{
|
||||||
|
return ltc_ltc0_lts0_l1_cache_ecc_control_r();
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline u32 ltc0_lts0_l1_cache_ecc_control_inject_corrected_err_f(u32 v)
|
||||||
|
{
|
||||||
|
return ltc_ltc0_lts0_l1_cache_ecc_control_inject_corrected_err_f(v);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline u32 ltc0_lts0_l1_cache_ecc_control_inject_uncorrected_err_f(u32 v)
|
||||||
|
{
|
||||||
|
return ltc_ltc0_lts0_l1_cache_ecc_control_inject_uncorrected_err_f(v);
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct nvgpu_hw_err_inject_info ltc_ecc_err_desc[] = {
|
||||||
|
NVGPU_ECC_ERR("cache_rstg_ecc_corrected",
|
||||||
|
gv11b_ltc_inject_ecc_error,
|
||||||
|
ltc0_lts0_l1_cache_ecc_control_r,
|
||||||
|
ltc0_lts0_l1_cache_ecc_control_inject_corrected_err_f),
|
||||||
|
NVGPU_ECC_ERR("cache_rstg_ecc_uncorrected",
|
||||||
|
gv11b_ltc_inject_ecc_error,
|
||||||
|
ltc0_lts0_l1_cache_ecc_control_r,
|
||||||
|
ltc0_lts0_l1_cache_ecc_control_inject_uncorrected_err_f),
|
||||||
|
};
|
||||||
|
|
||||||
|
static struct nvgpu_hw_err_inject_info_desc ltc_err_desc;
|
||||||
|
|
||||||
|
struct nvgpu_hw_err_inject_info_desc *gv11b_ltc_get_err_desc(struct gk20a *g)
|
||||||
|
{
|
||||||
|
ltc_err_desc.info_ptr = ltc_ecc_err_desc;
|
||||||
|
ltc_err_desc.info_size = nvgpu_safe_cast_u64_to_u32(
|
||||||
|
sizeof(ltc_ecc_err_desc) /
|
||||||
|
sizeof(struct nvgpu_hw_err_inject_info));
|
||||||
|
|
||||||
|
return <c_err_desc;
|
||||||
|
}
|
||||||
|
#endif /* CONFIG_NVGPU_INJECT_HWERR */
|
||||||
|
|||||||
@@ -33,62 +33,6 @@
|
|||||||
|
|
||||||
#include <nvgpu/utils.h>
|
#include <nvgpu/utils.h>
|
||||||
|
|
||||||
#ifdef CONFIG_NVGPU_INJECT_HWERR
|
|
||||||
void gv11b_ltc_inject_ecc_error(struct gk20a *g,
|
|
||||||
struct nvgpu_hw_err_inject_info *err, u32 error_info)
|
|
||||||
{
|
|
||||||
u32 ltc_stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE);
|
|
||||||
u32 lts_stride = nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE);
|
|
||||||
u32 ltc = (error_info & 0xFF00U) >> 8U;
|
|
||||||
u32 lts = (error_info & 0xFFU);
|
|
||||||
u32 reg_addr = nvgpu_safe_add_u32(err->get_reg_addr(),
|
|
||||||
nvgpu_safe_add_u32(nvgpu_safe_mult_u32(ltc, ltc_stride),
|
|
||||||
nvgpu_safe_mult_u32(lts, lts_stride)));
|
|
||||||
|
|
||||||
nvgpu_info(g, "Injecting LTC fault %s for ltc: %d, lts: %d",
|
|
||||||
err->name, ltc, lts);
|
|
||||||
nvgpu_writel(g, reg_addr, err->get_reg_val(1U));
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline u32 ltc0_lts0_l1_cache_ecc_control_r(void)
|
|
||||||
{
|
|
||||||
return ltc_ltc0_lts0_l1_cache_ecc_control_r();
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline u32 ltc0_lts0_l1_cache_ecc_control_inject_corrected_err_f(u32 v)
|
|
||||||
{
|
|
||||||
return ltc_ltc0_lts0_l1_cache_ecc_control_inject_corrected_err_f(v);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline u32 ltc0_lts0_l1_cache_ecc_control_inject_uncorrected_err_f(u32 v)
|
|
||||||
{
|
|
||||||
return ltc_ltc0_lts0_l1_cache_ecc_control_inject_uncorrected_err_f(v);
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct nvgpu_hw_err_inject_info ltc_ecc_err_desc[] = {
|
|
||||||
NVGPU_ECC_ERR("cache_rstg_ecc_corrected",
|
|
||||||
gv11b_ltc_inject_ecc_error,
|
|
||||||
ltc0_lts0_l1_cache_ecc_control_r,
|
|
||||||
ltc0_lts0_l1_cache_ecc_control_inject_corrected_err_f),
|
|
||||||
NVGPU_ECC_ERR("cache_rstg_ecc_uncorrected",
|
|
||||||
gv11b_ltc_inject_ecc_error,
|
|
||||||
ltc0_lts0_l1_cache_ecc_control_r,
|
|
||||||
ltc0_lts0_l1_cache_ecc_control_inject_uncorrected_err_f),
|
|
||||||
};
|
|
||||||
|
|
||||||
static struct nvgpu_hw_err_inject_info_desc ltc_err_desc;
|
|
||||||
|
|
||||||
struct nvgpu_hw_err_inject_info_desc * gv11b_ltc_get_err_desc(struct gk20a *g)
|
|
||||||
{
|
|
||||||
ltc_err_desc.info_ptr = ltc_ecc_err_desc;
|
|
||||||
ltc_err_desc.info_size = nvgpu_safe_cast_u64_to_u32(
|
|
||||||
sizeof(ltc_ecc_err_desc) /
|
|
||||||
sizeof(struct nvgpu_hw_err_inject_info));
|
|
||||||
|
|
||||||
return <c_err_desc;
|
|
||||||
}
|
|
||||||
#endif /* CONFIG_NVGPU_INJECT_HWERR */
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Sets the ZBC stencil for the passed index.
|
* Sets the ZBC stencil for the passed index.
|
||||||
*/
|
*/
|
||||||
|
|||||||
84
drivers/gpu/nvgpu/hal/mm/cache/flush_gk20a.c
vendored
Normal file
84
drivers/gpu/nvgpu/hal/mm/cache/flush_gk20a.c
vendored
Normal file
@@ -0,0 +1,84 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
|
||||||
|
*
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
|
* to deal in the Software without restriction, including without limitation
|
||||||
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||||
|
* and/or sell copies of the Software, and to permit persons to whom the
|
||||||
|
* Software is furnished to do so, subject to the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice shall be included in
|
||||||
|
* all copies or substantial portions of the Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||||
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||||
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||||
|
* DEALINGS IN THE SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifdef CONFIG_NVGPU_TRACE
|
||||||
|
#include <trace/events/gk20a.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#include <nvgpu/mm.h>
|
||||||
|
#include <nvgpu/io.h>
|
||||||
|
#include <nvgpu/gk20a.h>
|
||||||
|
#include <nvgpu/timers.h>
|
||||||
|
|
||||||
|
#include <nvgpu/hw/gk20a/hw_flush_gk20a.h>
|
||||||
|
|
||||||
|
#include "flush_gk20a.h"
|
||||||
|
|
||||||
|
#ifdef CONFIG_NVGPU_COMPRESSION
|
||||||
|
void gk20a_mm_cbc_clean(struct gk20a *g)
|
||||||
|
{
|
||||||
|
struct mm_gk20a *mm = &g->mm;
|
||||||
|
u32 data;
|
||||||
|
struct nvgpu_timeout timeout;
|
||||||
|
u32 retries = 200;
|
||||||
|
|
||||||
|
nvgpu_log_fn(g, " ");
|
||||||
|
|
||||||
|
gk20a_busy_noresume(g);
|
||||||
|
if (!g->power_on) {
|
||||||
|
goto hw_was_off;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (g->ops.mm.get_flush_retries != NULL) {
|
||||||
|
retries = g->ops.mm.get_flush_retries(g, NVGPU_FLUSH_CBC_CLEAN);
|
||||||
|
}
|
||||||
|
|
||||||
|
nvgpu_assert(nvgpu_timeout_init(g, &timeout, retries,
|
||||||
|
NVGPU_TIMER_RETRY_TIMER) == 0);
|
||||||
|
|
||||||
|
nvgpu_mutex_acquire(&mm->l2_op_lock);
|
||||||
|
|
||||||
|
/* Flush all dirty lines from the CBC to L2 */
|
||||||
|
nvgpu_writel(g, flush_l2_clean_comptags_r(),
|
||||||
|
flush_l2_clean_comptags_pending_busy_f());
|
||||||
|
|
||||||
|
do {
|
||||||
|
data = nvgpu_readl(g, flush_l2_clean_comptags_r());
|
||||||
|
|
||||||
|
if (flush_l2_clean_comptags_outstanding_v(data) ==
|
||||||
|
flush_l2_clean_comptags_outstanding_true_v() ||
|
||||||
|
flush_l2_clean_comptags_pending_v(data) ==
|
||||||
|
flush_l2_clean_comptags_pending_busy_v()) {
|
||||||
|
nvgpu_log_info(g, "l2_clean_comptags 0x%x", data);
|
||||||
|
nvgpu_udelay(5);
|
||||||
|
} else {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
} while (nvgpu_timeout_expired_msg(&timeout,
|
||||||
|
"l2_clean_comptags too many retries") == 0);
|
||||||
|
|
||||||
|
nvgpu_mutex_release(&mm->l2_op_lock);
|
||||||
|
|
||||||
|
hw_was_off:
|
||||||
|
gk20a_idle_nosuspend(g);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
@@ -228,53 +228,3 @@ hw_was_off:
|
|||||||
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_NVGPU_COMPRESSION
|
|
||||||
void gk20a_mm_cbc_clean(struct gk20a *g)
|
|
||||||
{
|
|
||||||
struct mm_gk20a *mm = &g->mm;
|
|
||||||
u32 data;
|
|
||||||
struct nvgpu_timeout timeout;
|
|
||||||
u32 retries = 200;
|
|
||||||
|
|
||||||
nvgpu_log_fn(g, " ");
|
|
||||||
|
|
||||||
gk20a_busy_noresume(g);
|
|
||||||
if (!g->power_on) {
|
|
||||||
goto hw_was_off;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (g->ops.mm.get_flush_retries != NULL) {
|
|
||||||
retries = g->ops.mm.get_flush_retries(g, NVGPU_FLUSH_CBC_CLEAN);
|
|
||||||
}
|
|
||||||
|
|
||||||
nvgpu_assert(nvgpu_timeout_init(g, &timeout, retries,
|
|
||||||
NVGPU_TIMER_RETRY_TIMER) == 0);
|
|
||||||
|
|
||||||
nvgpu_mutex_acquire(&mm->l2_op_lock);
|
|
||||||
|
|
||||||
/* Flush all dirty lines from the CBC to L2 */
|
|
||||||
nvgpu_writel(g, flush_l2_clean_comptags_r(),
|
|
||||||
flush_l2_clean_comptags_pending_busy_f());
|
|
||||||
|
|
||||||
do {
|
|
||||||
data = nvgpu_readl(g, flush_l2_clean_comptags_r());
|
|
||||||
|
|
||||||
if (flush_l2_clean_comptags_outstanding_v(data) ==
|
|
||||||
flush_l2_clean_comptags_outstanding_true_v() ||
|
|
||||||
flush_l2_clean_comptags_pending_v(data) ==
|
|
||||||
flush_l2_clean_comptags_pending_busy_v()) {
|
|
||||||
nvgpu_log_info(g, "l2_clean_comptags 0x%x", data);
|
|
||||||
nvgpu_udelay(5);
|
|
||||||
} else {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
} while (nvgpu_timeout_expired_msg(&timeout,
|
|
||||||
"l2_clean_comptags too many retries") == 0);
|
|
||||||
|
|
||||||
nvgpu_mutex_release(&mm->l2_op_lock);
|
|
||||||
|
|
||||||
hw_was_off:
|
|
||||||
gk20a_idle_nosuspend(g);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|||||||
@@ -374,4 +374,50 @@ u32 gv11b_pmu_mutex__size_1_v(void)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* error handler */
|
#ifdef CONFIG_NVGPU_INJECT_HWERR
|
||||||
|
void gv11b_pmu_inject_ecc_error(struct gk20a *g,
|
||||||
|
struct nvgpu_hw_err_inject_info *err, u32 error_info)
|
||||||
|
{
|
||||||
|
nvgpu_info(g, "Injecting PMU fault %s", err->name);
|
||||||
|
nvgpu_writel(g, err->get_reg_addr(), err->get_reg_val(1U));
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline u32 pmu_falcon_ecc_control_r(void)
|
||||||
|
{
|
||||||
|
return pwr_pmu_falcon_ecc_control_r();
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline u32 pmu_falcon_ecc_control_inject_corrected_err_f(u32 v)
|
||||||
|
{
|
||||||
|
return pwr_pmu_falcon_ecc_control_inject_corrected_err_f(v);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline u32 pmu_falcon_ecc_control_inject_uncorrected_err_f(u32 v)
|
||||||
|
{
|
||||||
|
return pwr_pmu_falcon_ecc_control_inject_uncorrected_err_f(v);
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct nvgpu_hw_err_inject_info pmu_ecc_err_desc[] = {
|
||||||
|
NVGPU_ECC_ERR("falcon_imem_ecc_corrected",
|
||||||
|
gv11b_pmu_inject_ecc_error,
|
||||||
|
pmu_falcon_ecc_control_r,
|
||||||
|
pmu_falcon_ecc_control_inject_corrected_err_f),
|
||||||
|
NVGPU_ECC_ERR("falcon_imem_ecc_uncorrected",
|
||||||
|
gv11b_pmu_inject_ecc_error,
|
||||||
|
pmu_falcon_ecc_control_r,
|
||||||
|
pmu_falcon_ecc_control_inject_uncorrected_err_f),
|
||||||
|
};
|
||||||
|
|
||||||
|
static struct nvgpu_hw_err_inject_info_desc pmu_err_desc;
|
||||||
|
|
||||||
|
struct nvgpu_hw_err_inject_info_desc *
|
||||||
|
gv11b_pmu_intr_get_err_desc(struct gk20a *g)
|
||||||
|
{
|
||||||
|
pmu_err_desc.info_ptr = pmu_ecc_err_desc;
|
||||||
|
pmu_err_desc.info_size = nvgpu_safe_cast_u64_to_u32(
|
||||||
|
sizeof(pmu_ecc_err_desc) /
|
||||||
|
sizeof(struct nvgpu_hw_err_inject_info));
|
||||||
|
|
||||||
|
return &pmu_err_desc;
|
||||||
|
}
|
||||||
|
#endif /* CONFIG_NVGPU_INJECT_HWERR */
|
||||||
|
|||||||
@@ -39,54 +39,6 @@
|
|||||||
|
|
||||||
#define ALIGN_4KB 12
|
#define ALIGN_4KB 12
|
||||||
|
|
||||||
#ifdef CONFIG_NVGPU_INJECT_HWERR
|
|
||||||
void gv11b_pmu_inject_ecc_error(struct gk20a *g,
|
|
||||||
struct nvgpu_hw_err_inject_info *err, u32 error_info)
|
|
||||||
{
|
|
||||||
nvgpu_info(g, "Injecting PMU fault %s", err->name);
|
|
||||||
nvgpu_writel(g, err->get_reg_addr(), err->get_reg_val(1U));
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline u32 pmu_falcon_ecc_control_r(void)
|
|
||||||
{
|
|
||||||
return pwr_pmu_falcon_ecc_control_r();
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline u32 pmu_falcon_ecc_control_inject_corrected_err_f(u32 v)
|
|
||||||
{
|
|
||||||
return pwr_pmu_falcon_ecc_control_inject_corrected_err_f(v);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline u32 pmu_falcon_ecc_control_inject_uncorrected_err_f(u32 v)
|
|
||||||
{
|
|
||||||
return pwr_pmu_falcon_ecc_control_inject_uncorrected_err_f(v);
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct nvgpu_hw_err_inject_info pmu_ecc_err_desc[] = {
|
|
||||||
NVGPU_ECC_ERR("falcon_imem_ecc_corrected",
|
|
||||||
gv11b_pmu_inject_ecc_error,
|
|
||||||
pmu_falcon_ecc_control_r,
|
|
||||||
pmu_falcon_ecc_control_inject_corrected_err_f),
|
|
||||||
NVGPU_ECC_ERR("falcon_imem_ecc_uncorrected",
|
|
||||||
gv11b_pmu_inject_ecc_error,
|
|
||||||
pmu_falcon_ecc_control_r,
|
|
||||||
pmu_falcon_ecc_control_inject_uncorrected_err_f),
|
|
||||||
};
|
|
||||||
|
|
||||||
static struct nvgpu_hw_err_inject_info_desc pmu_err_desc;
|
|
||||||
|
|
||||||
struct nvgpu_hw_err_inject_info_desc *
|
|
||||||
gv11b_pmu_intr_get_err_desc(struct gk20a *g)
|
|
||||||
{
|
|
||||||
pmu_err_desc.info_ptr = pmu_ecc_err_desc;
|
|
||||||
pmu_err_desc.info_size = nvgpu_safe_cast_u64_to_u32(
|
|
||||||
sizeof(pmu_ecc_err_desc) /
|
|
||||||
sizeof(struct nvgpu_hw_err_inject_info));
|
|
||||||
|
|
||||||
return &pmu_err_desc;
|
|
||||||
}
|
|
||||||
#endif /* CONFIG_NVGPU_INJECT_HWERR */
|
|
||||||
|
|
||||||
/* error handler */
|
/* error handler */
|
||||||
void gv11b_clear_pmu_bar0_host_err_status(struct gk20a *g)
|
void gv11b_clear_pmu_bar0_host_err_status(struct gk20a *g)
|
||||||
{
|
{
|
||||||
|
|||||||
Reference in New Issue
Block a user