mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 17:36:20 +03:00
gpu: nvgpu: remove SW methods from safety build
Improved SDL heartbeat mechanism detects the interrupts triggered by SW method and treats them as errors. Hence remove the SW method support completely from safety build. Registers set by SW methods are now set by default for all the contexts. Implement new HAL gops.gr.init.set_default_compute_regs() to set the registers in patch context. Call this HAL while creating each context. Update gv11b_gr_intr_handle_sw_method() to treat all compute SW methods as invalid. Update unit test test_gr_intr_sw_exceptions() so that it now expects failure for any method/data. Bug 200748548 Change-Id: I614f6411bbe7000c22f1891bbaf06982e8bd7f0b Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2527249 (cherry picked from commit bb6e0f9aa1404f79bcfbdd308b8c174a4fc83250) Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2602638 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com> Reviewed-by: svcacv <svcacv@nvidia.com> Reviewed-by: Rajesh Devaraj <rdevaraj@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> GVS: Gerrit_Virtual_Submit Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
19fa7004aa
commit
d1f3f81553
@@ -906,6 +906,10 @@ int nvgpu_gr_obj_ctx_alloc(struct gk20a *g,
|
|||||||
nvgpu_gr_obj_ctx_update_ctxsw_preemption_mode(g, config, gr_ctx,
|
nvgpu_gr_obj_ctx_update_ctxsw_preemption_mode(g, config, gr_ctx,
|
||||||
subctx);
|
subctx);
|
||||||
|
|
||||||
|
#ifndef CONFIG_NVGPU_NON_FUSA
|
||||||
|
g->ops.gr.init.set_default_compute_regs(g, gr_ctx);
|
||||||
|
#endif
|
||||||
|
|
||||||
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gr, "done");
|
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gr, "done");
|
||||||
return 0;
|
return 0;
|
||||||
out:
|
out:
|
||||||
|
|||||||
@@ -87,6 +87,11 @@ u32 gv11b_gr_init_get_patch_slots(struct gk20a *g,
|
|||||||
struct nvgpu_gr_config *config);
|
struct nvgpu_gr_config *config);
|
||||||
void gv11b_gr_init_detect_sm_arch(struct gk20a *g);
|
void gv11b_gr_init_detect_sm_arch(struct gk20a *g);
|
||||||
|
|
||||||
|
#ifndef CONFIG_NVGPU_NON_FUSA
|
||||||
|
void gv11b_gr_init_set_default_compute_regs(struct gk20a *g,
|
||||||
|
struct nvgpu_gr_ctx *gr_ctx);
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_NVGPU_GRAPHICS
|
#ifdef CONFIG_NVGPU_GRAPHICS
|
||||||
void gv11b_gr_init_rop_mapping(struct gk20a *g,
|
void gv11b_gr_init_rop_mapping(struct gk20a *g,
|
||||||
struct nvgpu_gr_config *gr_config);
|
struct nvgpu_gr_config *gr_config);
|
||||||
|
|||||||
@@ -937,6 +937,32 @@ void gv11b_gr_init_detect_sm_arch(struct gk20a *g)
|
|||||||
gr_gpc0_tpc0_sm_arch_warp_count_v(v);
|
gr_gpc0_tpc0_sm_arch_warp_count_v(v);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifndef CONFIG_NVGPU_NON_FUSA
|
||||||
|
void gv11b_gr_init_set_default_compute_regs(struct gk20a *g,
|
||||||
|
struct nvgpu_gr_ctx *gr_ctx)
|
||||||
|
{
|
||||||
|
u32 reg_val;
|
||||||
|
|
||||||
|
nvgpu_gr_ctx_patch_write_begin(g, gr_ctx, true);
|
||||||
|
|
||||||
|
reg_val = nvgpu_readl(g, gr_sked_hww_esr_en_r());
|
||||||
|
reg_val = set_field(reg_val,
|
||||||
|
gr_sked_hww_esr_en_skedcheck18_l1_config_too_small_m(),
|
||||||
|
gr_sked_hww_esr_en_skedcheck18_l1_config_too_small_disabled_f());
|
||||||
|
nvgpu_gr_ctx_patch_write(g, gr_ctx, gr_sked_hww_esr_en_r(),
|
||||||
|
reg_val, true);
|
||||||
|
|
||||||
|
reg_val = nvgpu_readl(g, gr_gpcs_tpcs_sm_l1tag_ctrl_r());
|
||||||
|
reg_val = set_field(reg_val,
|
||||||
|
gr_gpcs_tpcs_sm_l1tag_ctrl_always_cut_collector_m(),
|
||||||
|
gr_gpcs_tpcs_sm_l1tag_ctrl_always_cut_collector_enable_f());
|
||||||
|
nvgpu_gr_ctx_patch_write(g, gr_ctx, gr_gpcs_tpcs_sm_l1tag_ctrl_r(),
|
||||||
|
reg_val, true);
|
||||||
|
|
||||||
|
nvgpu_gr_ctx_patch_write_end(g, gr_ctx, true);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_NVGPU_GR_GOLDEN_CTX_VERIFICATION
|
#ifdef CONFIG_NVGPU_GR_GOLDEN_CTX_VERIFICATION
|
||||||
int gv11b_gr_init_load_sw_bundle_init(struct gk20a *g,
|
int gv11b_gr_init_load_sw_bundle_init(struct gk20a *g,
|
||||||
struct netlist_av_list *sw_bundle_init)
|
struct netlist_av_list *sw_bundle_init)
|
||||||
|
|||||||
@@ -118,16 +118,15 @@ int ga100_gr_intr_handle_sw_method(struct gk20a *g, u32 addr,
|
|||||||
{
|
{
|
||||||
nvgpu_log_fn(g, " ");
|
nvgpu_log_fn(g, " ");
|
||||||
|
|
||||||
|
#ifdef CONFIG_NVGPU_HAL_NON_FUSA
|
||||||
if (class_num == AMPERE_COMPUTE_A) {
|
if (class_num == AMPERE_COMPUTE_A) {
|
||||||
switch (offset << NVGPU_GA100_SW_METHOD_SHIFT) {
|
switch (offset << NVGPU_GA100_SW_METHOD_SHIFT) {
|
||||||
case NVC6C0_SET_BES_CROP_DEBUG4:
|
case NVC6C0_SET_BES_CROP_DEBUG4:
|
||||||
g->ops.gr.set_bes_crop_debug4(g, data);
|
g->ops.gr.set_bes_crop_debug4(g, data);
|
||||||
return 0;
|
return 0;
|
||||||
#ifdef CONFIG_NVGPU_HAL_NON_FUSA
|
|
||||||
case NVC6C0_SET_SHADER_EXCEPTIONS:
|
case NVC6C0_SET_SHADER_EXCEPTIONS:
|
||||||
g->ops.gr.intr.set_shader_exceptions(g, data);
|
g->ops.gr.intr.set_shader_exceptions(g, data);
|
||||||
return 0;
|
return 0;
|
||||||
#endif
|
|
||||||
case NVC6C0_SET_TEX_IN_DBG:
|
case NVC6C0_SET_TEX_IN_DBG:
|
||||||
gv11b_gr_intr_set_tex_in_dbg(g, data);
|
gv11b_gr_intr_set_tex_in_dbg(g, data);
|
||||||
return 0;
|
return 0;
|
||||||
@@ -136,6 +135,8 @@ int ga100_gr_intr_handle_sw_method(struct gk20a *g, u32 addr,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
#if defined(CONFIG_NVGPU_DEBUGGER) && defined(CONFIG_NVGPU_GRAPHICS)
|
#if defined(CONFIG_NVGPU_DEBUGGER) && defined(CONFIG_NVGPU_GRAPHICS)
|
||||||
if (class_num == AMPERE_A) {
|
if (class_num == AMPERE_A) {
|
||||||
switch (offset << NVGPU_GA100_SW_METHOD_SHIFT) {
|
switch (offset << NVGPU_GA100_SW_METHOD_SHIFT) {
|
||||||
|
|||||||
@@ -97,13 +97,12 @@ int ga10b_gr_intr_handle_sw_method(struct gk20a *g, u32 addr,
|
|||||||
|
|
||||||
nvgpu_log_fn(g, " ");
|
nvgpu_log_fn(g, " ");
|
||||||
|
|
||||||
|
#ifdef CONFIG_NVGPU_HAL_NON_FUSA
|
||||||
if (class_num == AMPERE_COMPUTE_B) {
|
if (class_num == AMPERE_COMPUTE_B) {
|
||||||
switch (offset << left_shift_by_2) {
|
switch (offset << left_shift_by_2) {
|
||||||
#ifdef CONFIG_NVGPU_HAL_NON_FUSA
|
|
||||||
case NVC7C0_SET_SHADER_EXCEPTIONS:
|
case NVC7C0_SET_SHADER_EXCEPTIONS:
|
||||||
g->ops.gr.intr.set_shader_exceptions(g, data);
|
g->ops.gr.intr.set_shader_exceptions(g, data);
|
||||||
return 0;
|
return 0;
|
||||||
#endif
|
|
||||||
case NVC7C0_SET_CB_BASE:
|
case NVC7C0_SET_CB_BASE:
|
||||||
/*
|
/*
|
||||||
* This method is only implemented for gm107 in resman
|
* This method is only implemented for gm107 in resman
|
||||||
@@ -123,6 +122,7 @@ int ga10b_gr_intr_handle_sw_method(struct gk20a *g, u32 addr,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
#if defined(CONFIG_NVGPU_DEBUGGER) && defined(CONFIG_NVGPU_GRAPHICS)
|
#if defined(CONFIG_NVGPU_DEBUGGER) && defined(CONFIG_NVGPU_GRAPHICS)
|
||||||
if (class_num == AMPERE_B) {
|
if (class_num == AMPERE_B) {
|
||||||
|
|||||||
@@ -93,8 +93,10 @@ struct nvgpu_gr_sm_ecc_status {
|
|||||||
int gv11b_gr_intr_handle_fecs_error(struct gk20a *g,
|
int gv11b_gr_intr_handle_fecs_error(struct gk20a *g,
|
||||||
struct nvgpu_channel *ch_ptr,
|
struct nvgpu_channel *ch_ptr,
|
||||||
struct nvgpu_gr_isr_data *isr_data);
|
struct nvgpu_gr_isr_data *isr_data);
|
||||||
|
#ifdef CONFIG_NVGPU_HAL_NON_FUSA
|
||||||
void gv11b_gr_intr_set_shader_cut_collector(struct gk20a *g, u32 data);
|
void gv11b_gr_intr_set_shader_cut_collector(struct gk20a *g, u32 data);
|
||||||
void gv11b_gr_intr_set_skedcheck(struct gk20a *g, u32 data);
|
void gv11b_gr_intr_set_skedcheck(struct gk20a *g, u32 data);
|
||||||
|
#endif
|
||||||
int gv11b_gr_intr_handle_sw_method(struct gk20a *g, u32 addr,
|
int gv11b_gr_intr_handle_sw_method(struct gk20a *g, u32 addr,
|
||||||
u32 class_num, u32 offset, u32 data);
|
u32 class_num, u32 offset, u32 data);
|
||||||
void gv11b_gr_intr_handle_gcc_exception(struct gk20a *g, u32 gpc,
|
void gv11b_gr_intr_handle_gcc_exception(struct gk20a *g, u32 gpc,
|
||||||
|
|||||||
@@ -128,6 +128,7 @@ int gv11b_gr_intr_handle_fecs_error(struct gk20a *g,
|
|||||||
return gp10b_gr_intr_handle_fecs_error(g, ch_ptr, isr_data);
|
return gp10b_gr_intr_handle_fecs_error(g, ch_ptr, isr_data);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_NVGPU_HAL_NON_FUSA
|
||||||
void gv11b_gr_intr_set_skedcheck(struct gk20a *g, u32 data)
|
void gv11b_gr_intr_set_skedcheck(struct gk20a *g, u32 data)
|
||||||
{
|
{
|
||||||
u32 reg_val;
|
u32 reg_val;
|
||||||
@@ -172,6 +173,7 @@ void gv11b_gr_intr_set_shader_cut_collector(struct gk20a *g, u32 data)
|
|||||||
}
|
}
|
||||||
nvgpu_writel(g, gr_gpcs_tpcs_sm_l1tag_ctrl_r(), val);
|
nvgpu_writel(g, gr_gpcs_tpcs_sm_l1tag_ctrl_r(), val);
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
int gv11b_gr_intr_handle_sw_method(struct gk20a *g, u32 addr,
|
int gv11b_gr_intr_handle_sw_method(struct gk20a *g, u32 addr,
|
||||||
u32 class_num, u32 offset, u32 data)
|
u32 class_num, u32 offset, u32 data)
|
||||||
@@ -180,14 +182,13 @@ int gv11b_gr_intr_handle_sw_method(struct gk20a *g, u32 addr,
|
|||||||
|
|
||||||
nvgpu_log_fn(g, " ");
|
nvgpu_log_fn(g, " ");
|
||||||
|
|
||||||
|
#ifdef CONFIG_NVGPU_HAL_NON_FUSA
|
||||||
if (class_num == VOLTA_COMPUTE_A) {
|
if (class_num == VOLTA_COMPUTE_A) {
|
||||||
switch (offset << 2) {
|
switch (offset << 2) {
|
||||||
#ifdef CONFIG_NVGPU_HAL_NON_FUSA
|
|
||||||
case NVC0C0_SET_SHADER_EXCEPTIONS:
|
case NVC0C0_SET_SHADER_EXCEPTIONS:
|
||||||
g->ops.gr.intr.set_shader_exceptions(g, data);
|
g->ops.gr.intr.set_shader_exceptions(g, data);
|
||||||
err = 0;
|
err = 0;
|
||||||
break;
|
break;
|
||||||
#endif
|
|
||||||
case NVC3C0_SET_SKEDCHECK:
|
case NVC3C0_SET_SKEDCHECK:
|
||||||
gv11b_gr_intr_set_skedcheck(g, data);
|
gv11b_gr_intr_set_skedcheck(g, data);
|
||||||
err = 0;
|
err = 0;
|
||||||
@@ -201,6 +202,7 @@ int gv11b_gr_intr_handle_sw_method(struct gk20a *g, u32 addr,
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
#if defined(CONFIG_NVGPU_DEBUGGER) && defined(CONFIG_NVGPU_GRAPHICS)
|
#if defined(CONFIG_NVGPU_DEBUGGER) && defined(CONFIG_NVGPU_GRAPHICS)
|
||||||
if (class_num == VOLTA_A) {
|
if (class_num == VOLTA_A) {
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -33,6 +33,7 @@
|
|||||||
|
|
||||||
#include <nvgpu/hw/tu104/hw_gr_tu104.h>
|
#include <nvgpu/hw/tu104/hw_gr_tu104.h>
|
||||||
|
|
||||||
|
#ifdef CONFIG_NVGPU_HAL_NON_FUSA
|
||||||
static void gr_tu104_set_sm_disp_ctrl(struct gk20a *g, u32 data)
|
static void gr_tu104_set_sm_disp_ctrl(struct gk20a *g, u32 data)
|
||||||
{
|
{
|
||||||
u32 reg_val;
|
u32 reg_val;
|
||||||
@@ -59,19 +60,19 @@ static void gr_tu104_set_sm_disp_ctrl(struct gk20a *g, u32 data)
|
|||||||
|
|
||||||
nvgpu_writel(g, gr_gpcs_tpcs_sm_disp_ctrl_r(), reg_val);
|
nvgpu_writel(g, gr_gpcs_tpcs_sm_disp_ctrl_r(), reg_val);
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
int tu104_gr_intr_handle_sw_method(struct gk20a *g, u32 addr,
|
int tu104_gr_intr_handle_sw_method(struct gk20a *g, u32 addr,
|
||||||
u32 class_num, u32 offset, u32 data)
|
u32 class_num, u32 offset, u32 data)
|
||||||
{
|
{
|
||||||
nvgpu_log_fn(g, " ");
|
nvgpu_log_fn(g, " ");
|
||||||
|
|
||||||
|
#ifdef CONFIG_NVGPU_HAL_NON_FUSA
|
||||||
if (class_num == TURING_COMPUTE_A) {
|
if (class_num == TURING_COMPUTE_A) {
|
||||||
switch (offset << 2) {
|
switch (offset << 2) {
|
||||||
#ifdef CONFIG_NVGPU_HAL_NON_FUSA
|
|
||||||
case NVC5C0_SET_SHADER_EXCEPTIONS:
|
case NVC5C0_SET_SHADER_EXCEPTIONS:
|
||||||
g->ops.gr.intr.set_shader_exceptions(g, data);
|
g->ops.gr.intr.set_shader_exceptions(g, data);
|
||||||
return 0;
|
return 0;
|
||||||
#endif
|
|
||||||
case NVC5C0_SET_SKEDCHECK:
|
case NVC5C0_SET_SKEDCHECK:
|
||||||
gv11b_gr_intr_set_skedcheck(g, data);
|
gv11b_gr_intr_set_skedcheck(g, data);
|
||||||
return 0;
|
return 0;
|
||||||
@@ -83,7 +84,7 @@ int tu104_gr_intr_handle_sw_method(struct gk20a *g, u32 addr,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
#if defined(CONFIG_NVGPU_DEBUGGER) && defined(CONFIG_NVGPU_GRAPHICS)
|
#if defined(CONFIG_NVGPU_DEBUGGER) && defined(CONFIG_NVGPU_GRAPHICS)
|
||||||
if (class_num == TURING_A) {
|
if (class_num == TURING_A) {
|
||||||
|
|||||||
@@ -526,6 +526,9 @@ static const struct gops_gr_init gv11b_ops_gr_init = {
|
|||||||
.get_max_subctx_count = gv11b_gr_init_get_max_subctx_count,
|
.get_max_subctx_count = gv11b_gr_init_get_max_subctx_count,
|
||||||
.get_patch_slots = gv11b_gr_init_get_patch_slots,
|
.get_patch_slots = gv11b_gr_init_get_patch_slots,
|
||||||
.detect_sm_arch = gv11b_gr_init_detect_sm_arch,
|
.detect_sm_arch = gv11b_gr_init_detect_sm_arch,
|
||||||
|
#ifndef CONFIG_NVGPU_NON_FUSA
|
||||||
|
.set_default_compute_regs = gv11b_gr_init_set_default_compute_regs,
|
||||||
|
#endif
|
||||||
.get_supported__preemption_modes = gp10b_gr_init_get_supported_preemption_modes,
|
.get_supported__preemption_modes = gp10b_gr_init_get_supported_preemption_modes,
|
||||||
.get_default_preemption_modes = gp10b_gr_init_get_default_preemption_modes,
|
.get_default_preemption_modes = gp10b_gr_init_get_default_preemption_modes,
|
||||||
.is_allowed_sw_bundle = gm20b_gr_init_is_allowed_sw_bundle,
|
.is_allowed_sw_bundle = gm20b_gr_init_is_allowed_sw_bundle,
|
||||||
|
|||||||
@@ -655,6 +655,24 @@ struct gops_gr_init {
|
|||||||
*/
|
*/
|
||||||
void (*detect_sm_arch)(struct gk20a *g);
|
void (*detect_sm_arch)(struct gk20a *g);
|
||||||
|
|
||||||
|
#ifndef CONFIG_NVGPU_NON_FUSA
|
||||||
|
/**
|
||||||
|
* @brief Set compute specific register values.
|
||||||
|
*
|
||||||
|
* @param g [in] Pointer to GPU driver struct.
|
||||||
|
* @param gr_ctx [in] Pointer to GR engine context image.
|
||||||
|
*
|
||||||
|
* This function sets below compute specific bits in given registers
|
||||||
|
* using patch context in safety build :
|
||||||
|
* Register : gr_sked_hww_esr_en_r()
|
||||||
|
* Value : gr_sked_hww_esr_en_skedcheck18_l1_config_too_small_disabled_f()
|
||||||
|
*
|
||||||
|
* Register : gr_gpcs_tpcs_sm_l1tag_ctrl_r()
|
||||||
|
* Value : gr_gpcs_tpcs_sm_l1tag_ctrl_always_cut_collector_enable_f()
|
||||||
|
*/
|
||||||
|
void (*set_default_compute_regs)(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx);
|
||||||
|
#endif
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Get supported preemption mode flags.
|
* @brief Get supported preemption mode flags.
|
||||||
*
|
*
|
||||||
|
|||||||
@@ -571,13 +571,6 @@ int test_gr_intr_sw_exceptions(struct unit_module *m,
|
|||||||
unit_return_fail(m, "sw_method passed for invalid class\n");
|
unit_return_fail(m, "sw_method passed for invalid class\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Fault injection - sw_method with null data */
|
|
||||||
err = g->ops.gr.intr.handle_sw_method(g, 0, VOLTA_COMPUTE_A,
|
|
||||||
(NVC3C0_SET_SKEDCHECK >> 2), 0);
|
|
||||||
if (err != 0) {
|
|
||||||
unit_return_fail(m, "sw_method failed for invalid data\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
return UNIT_SUCCESS;
|
return UNIT_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -124,8 +124,6 @@ int test_gr_intr_setup_channel(struct unit_module *m,
|
|||||||
* Test specification for: test_gr_intr_sw_exceptions.
|
* Test specification for: test_gr_intr_sw_exceptions.
|
||||||
*
|
*
|
||||||
* Description: Helps to verify pending interrupts for illegal method.
|
* Description: Helps to verify pending interrupts for illegal method.
|
||||||
* Helps to verify exceptions for SET_SHADER_EXCEPTIONS,
|
|
||||||
* SET_SKEDCHECK and SET_SHADER_CUT_COLLECTOR.
|
|
||||||
*
|
*
|
||||||
* Test Type: Feature, Error guessing
|
* Test Type: Feature, Error guessing
|
||||||
*
|
*
|
||||||
@@ -133,8 +131,6 @@ int test_gr_intr_setup_channel(struct unit_module *m,
|
|||||||
* gops_gr_intr.flush_channel_tlb, nvgpu_gr_intr_flush_channel_tlb,
|
* gops_gr_intr.flush_channel_tlb, nvgpu_gr_intr_flush_channel_tlb,
|
||||||
* gops_gr_intr.handle_sw_method,
|
* gops_gr_intr.handle_sw_method,
|
||||||
* gv11b_gr_intr_handle_sw_method,
|
* gv11b_gr_intr_handle_sw_method,
|
||||||
* gv11b_gr_intr_set_skedcheck,
|
|
||||||
* gv11b_gr_intr_set_shader_cut_collector,
|
|
||||||
* gops_gr_intr.trapped_method_info,
|
* gops_gr_intr.trapped_method_info,
|
||||||
* gm20b_gr_intr_get_trapped_method_info,
|
* gm20b_gr_intr_get_trapped_method_info,
|
||||||
* nvgpu_gr_intr_set_error_notifier,
|
* nvgpu_gr_intr_set_error_notifier,
|
||||||
|
|||||||
@@ -65,7 +65,9 @@ struct unit_module;
|
|||||||
* gp10b_gr_init_get_ctx_attrib_cb_size,
|
* gp10b_gr_init_get_ctx_attrib_cb_size,
|
||||||
* gops_gr_falcon.ctrl_ctxsw,
|
* gops_gr_falcon.ctrl_ctxsw,
|
||||||
* gp10b_gr_falcon_ctrl_ctxsw,
|
* gp10b_gr_falcon_ctrl_ctxsw,
|
||||||
* gm20b_gr_falcon_ctrl_ctxsw
|
* gm20b_gr_falcon_ctrl_ctxsw,
|
||||||
|
* gops_gr_init.set_default_compute_regs,
|
||||||
|
* gv11b_gr_init_set_default_compute_regs
|
||||||
*
|
*
|
||||||
* Input: gr_obj_ctx_setup must have been executed successfully.
|
* Input: gr_obj_ctx_setup must have been executed successfully.
|
||||||
*
|
*
|
||||||
|
|||||||
Reference in New Issue
Block a user