gpu: nvgpu: implement scg, pbdma and cilp rules

Only certain combination of channels of GFX/Compute object classes can
be assigned to particular pbdma and/or VEID. CILP can be enabled only
in certain configs. Implement checks for the configurations verified
during alloc_obj_ctx and/or setting preemption mode.

Bug 3677982

Change-Id: Ie7026cbb240819c1727b3736ed34044d7138d3cd
Signed-off-by: Sagar Kamble <skamble@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2719995
Reviewed-by: Ankur Kishore <ankkishore@nvidia.com>
GVS: Gerrit_Virtual_Submit <buildbot_gerritrpt@nvidia.com>
This commit is contained in:
Sagar Kamble
2022-04-27 22:14:31 +05:30
committed by mobile promotions
parent 06410ba862
commit ef99d9f010
11 changed files with 305 additions and 26 deletions

View File

@@ -43,6 +43,154 @@
#include <nvgpu/profiler.h>
#endif
#define MATCH_VEID true
#define MATCH_PBDMA_ID true
#define DONT_MATCH_VEID false
#define DONT_MATCH_PBDMA_ID false
/**
* Refer section "SCG, PBDMA, and CILP Rules" from https://p4viewer.nvidia.com/
* /get//hw/doc/gpu/volta/volta/design/Functional_Descriptions/
* /Volta_Subcontexts_Functional_Description.docx
*/
int nvgpu_tsg_validate_class_veid_pbdma(struct nvgpu_channel *ch)
{
struct gk20a *g = ch->g;
struct nvgpu_tsg *tsg = nvgpu_tsg_from_ch(ch);
bool veidnz_pbdma0_compute_active = false;
bool veid0_pbdmanz_compute_active = false;
bool veid0_gfx_active = false;
if (!nvgpu_is_enabled(g, NVGPU_SUPPORT_TSG_SUBCONTEXTS)) {
return 0;
}
if (g->ops.gpu_class.is_valid_gfx(ch->obj_class)) {
if (ch->runqueue_sel != 0) {
nvgpu_err(g, "Can't have Graphics in non-zero'th PBDMA");
return -EINVAL;
}
if (ch->subctx_id != CHANNEL_INFO_VEID0) {
nvgpu_err(g, "Can't have Graphics in non-zero'th VEID");
return -EINVAL;
}
}
veid0_gfx_active = nvgpu_tsg_channel_type_active(tsg,
MATCH_VEID, CHANNEL_INFO_VEID0,
MATCH_PBDMA_ID, CHANNEL_INFO_PBDMA0,
g->ops.gpu_class.is_valid_gfx);
if (veid0_gfx_active) {
if (g->ops.gpu_class.is_valid_compute(ch->obj_class)) {
if (ch->subctx_id == CHANNEL_INFO_VEID0) {
if (ch->runqueue_sel != 0) {
nvgpu_err(g, "VEID0 can't do Graphics and Async Compute");
return -EINVAL;
}
} else {
if (ch->runqueue_sel == 0) {
nvgpu_err(g, "Async Compute can't be mixed with Graphics on PBDMA0");
return -EINVAL;
}
}
}
}
veid0_pbdmanz_compute_active = nvgpu_tsg_channel_type_active(tsg,
MATCH_VEID, CHANNEL_INFO_VEID0,
DONT_MATCH_PBDMA_ID, CHANNEL_INFO_PBDMA0,
g->ops.gpu_class.is_valid_compute);
if (veid0_pbdmanz_compute_active) {
if (g->ops.gpu_class.is_valid_gfx(ch->obj_class)) {
nvgpu_err(g, "VEID0 can't do Graphics and Async Compute");
return -EINVAL;
}
}
veidnz_pbdma0_compute_active = nvgpu_tsg_channel_type_active(tsg,
DONT_MATCH_VEID, CHANNEL_INFO_VEID0,
MATCH_PBDMA_ID, CHANNEL_INFO_PBDMA0,
g->ops.gpu_class.is_valid_compute);
if (veidnz_pbdma0_compute_active) {
if (g->ops.gpu_class.is_valid_gfx(ch->obj_class)) {
nvgpu_err(g, "Async Compute can't be mixed with Graphics on PBDMA0");
return -EINVAL;
}
}
return 0;
}
int nvgpu_tsg_validate_cilp_config(struct nvgpu_channel *ch)
{
#ifdef CONFIG_NVGPU_CILP
struct gk20a *g = ch->g;
struct nvgpu_tsg *tsg = nvgpu_tsg_from_ch(ch);
bool veidnz_compute_active;
bool veid0_compute_active;
bool veid0_gfx_active;
bool cilp_enabled;
if (!nvgpu_is_enabled(g, NVGPU_SUPPORT_TSG_SUBCONTEXTS)) {
return 0;
}
veid0_gfx_active = nvgpu_tsg_channel_type_active(tsg,
MATCH_VEID, CHANNEL_INFO_VEID0,
MATCH_PBDMA_ID, CHANNEL_INFO_PBDMA0,
g->ops.gpu_class.is_valid_gfx);
veidnz_compute_active = nvgpu_tsg_channel_type_active(tsg,
DONT_MATCH_VEID, CHANNEL_INFO_VEID0,
DONT_MATCH_PBDMA_ID, CHANNEL_INFO_PBDMA0,
g->ops.gpu_class.is_valid_compute);
if (veid0_gfx_active) {
if (g->ops.gpu_class.is_valid_compute(ch->obj_class)) {
if (ch->subctx_id != CHANNEL_INFO_VEID0) {
nvgpu_assert(ch->runqueue_sel != CHANNEL_INFO_PBDMA0);
nvgpu_err(g, "SCG, with or without Sync Compute - CILP not allowed");
return -EINVAL;
} else if (veidnz_compute_active) {
nvgpu_err(g, "SCG, with or without Sync Compute - CILP not allowed");
return -EINVAL;
}
}
}
cilp_enabled = (nvgpu_gr_ctx_get_compute_preemption_mode(tsg->gr_ctx) ==
NVGPU_PREEMPTION_MODE_COMPUTE_CILP);
if (!cilp_enabled) {
nvgpu_log(g, gpu_dbg_gr, "CILP not enabled currently.");
return 0;
}
if (veidnz_compute_active) {
if (g->ops.gpu_class.is_valid_gfx(ch->obj_class)) {
nvgpu_err(g, "SCG without Sync Compute - CILP not allowed");
return -EINVAL;
}
}
veid0_compute_active = nvgpu_tsg_channel_type_active(tsg,
MATCH_VEID, CHANNEL_INFO_VEID0,
MATCH_PBDMA_ID, CHANNEL_INFO_PBDMA0,
g->ops.gpu_class.is_valid_compute);
if (veid0_compute_active && veidnz_compute_active) {
if (g->ops.gpu_class.is_valid_gfx(ch->obj_class)) {
nvgpu_err(g, "SCG with Sync Compute - CILP not allowed");
return -EINVAL;
}
}
#else
(void)ch;
#endif
return 0;
}
void nvgpu_tsg_disable(struct nvgpu_tsg *tsg)
{
struct gk20a *g = tsg->g;

View File

@@ -356,3 +356,58 @@ void nvgpu_tsg_subctxs_set_pm_buffer_va(struct nvgpu_tsg *tsg,
nvgpu_log(g, gpu_dbg_gr, "done");
}
#endif /* CONFIG_NVGPU_DEBUGGER */
static inline struct nvgpu_channel *
nvgpu_channel_from_subctx_entry(struct nvgpu_list_node *node)
{
return (struct nvgpu_channel *)
((uintptr_t)node - offsetof(struct nvgpu_channel, subctx_entry));
};
bool nvgpu_tsg_channel_type_active(struct nvgpu_tsg *tsg,
bool match_subctx, u32 subctx_id,
bool match_pbdma, u32 pbdma_id,
bool (*is_valid_class)(u32 class_num))
{
struct nvgpu_tsg_subctx *subctx = NULL;
bool channel_active = false;
struct gk20a *g = tsg->g;
struct nvgpu_channel *ch;
nvgpu_log(g, gpu_dbg_gr, " ");
if (is_valid_class == NULL) {
return false;
}
nvgpu_rwsem_down_write(&tsg->ch_list_lock);
nvgpu_list_for_each_entry(subctx, &tsg->subctx_list,
nvgpu_tsg_subctx, tsg_entry) {
if ((match_subctx && (subctx->subctx_id == subctx_id)) ||
(!match_subctx && (subctx->subctx_id != subctx_id))) {
nvgpu_list_for_each_entry(ch, &subctx->ch_list,
nvgpu_channel, subctx_entry) {
if ((*is_valid_class)(ch->obj_class)) {
if ((match_pbdma && (ch->runqueue_sel == pbdma_id)) ||
(!match_pbdma && (ch->runqueue_sel != pbdma_id))) {
channel_active = true;
break;
}
}
}
if (channel_active == true) {
break;
}
}
}
nvgpu_rwsem_up_write(&tsg->ch_list_lock);
nvgpu_log(g, gpu_dbg_gr, "done");
return channel_active;
}

View File

@@ -516,11 +516,17 @@ u32 nvgpu_gr_ctx_get_compute_preemption_mode(struct nvgpu_gr_ctx *gr_ctx)
}
bool nvgpu_gr_ctx_check_valid_preemption_mode(struct gk20a *g,
struct nvgpu_channel *ch,
struct nvgpu_gr_ctx *gr_ctx,
u32 graphics_preempt_mode, u32 compute_preempt_mode)
{
u32 supported_graphics_preempt_mode = 0U;
u32 supported_compute_preempt_mode = 0U;
#if defined(CONFIG_NVGPU_CILP) && defined(CONFIG_NVGPU_GFXP)
int err;
#endif
(void)ch;
if ((graphics_preempt_mode == 0U) && (compute_preempt_mode == 0U)) {
return false;
@@ -558,6 +564,23 @@ bool nvgpu_gr_ctx_check_valid_preemption_mode(struct gk20a *g,
(compute_preempt_mode == NVGPU_PREEMPTION_MODE_COMPUTE_CILP)) {
return false;
}
if (g->ops.gpu_class.is_valid_compute(ch->obj_class) &&
compute_preempt_mode == NVGPU_PREEMPTION_MODE_COMPUTE_CILP) {
err = nvgpu_tsg_validate_cilp_config(ch);
if (err != 0) {
nvgpu_err(g, "Invalid class/veid/pbdma config. CILP not allowed.");
return false;
}
}
if (g->ops.gpu_class.is_valid_gfx(ch->obj_class)) {
err = nvgpu_tsg_validate_cilp_config(ch);
if (err != 0) {
nvgpu_err(g, "Invalid class/veid/pbdma config. CILP not allowed.");
return false;
}
}
#endif
return true;

View File

@@ -185,6 +185,13 @@ int nvgpu_gr_setup_alloc_obj_ctx(struct nvgpu_channel *c, u32 class_num,
nvgpu_mutex_acquire(&tsg->ctx_init_lock);
err = nvgpu_tsg_validate_class_veid_pbdma(c);
if (err != 0) {
nvgpu_err(g, "Invalid class/veid/pbdma config");
nvgpu_mutex_release(&tsg->ctx_init_lock);
goto out;
}
err = nvgpu_tsg_subctx_alloc_gr_subctx(g, c);
if (err != 0) {
nvgpu_err(g, "failed to alloc gr subctx");
@@ -210,7 +217,7 @@ int nvgpu_gr_setup_alloc_obj_ctx(struct nvgpu_channel *c, u32 class_num,
err = nvgpu_gr_obj_ctx_alloc(g, gr->golden_image,
gr->global_ctx_buffer, gr->gr_ctx_desc,
gr->config, gr_ctx, c->subctx,
gr->config, gr_ctx, c, c->subctx,
mappings, &c->inst_block, class_num, flags,
c->cde, c->vpr);
if (err != 0) {
@@ -384,7 +391,7 @@ int nvgpu_gr_setup_set_preemption_mode(struct nvgpu_channel *ch,
ch->chid, ch->tsgid, ch->tgid,
graphics_preempt_mode, compute_preempt_mode);
err = nvgpu_gr_obj_ctx_set_ctxsw_preemption_mode(g, gr->config,
err = nvgpu_gr_obj_ctx_set_ctxsw_preemption_mode(g, ch, gr->config,
gr->gr_ctx_desc, gr_ctx, class_num,
graphics_preempt_mode, compute_preempt_mode);
if (err != 0) {

View File

@@ -155,6 +155,7 @@ static void nvgpu_gr_obj_ctx_init_ctxsw_preemption_mode(struct gk20a *g,
}
static int nvgpu_gr_obj_ctx_init_ctxsw_preemption(struct gk20a *g,
struct nvgpu_channel *ch,
struct nvgpu_gr_config *config, struct nvgpu_gr_ctx_desc *gr_ctx_desc,
struct nvgpu_gr_ctx *gr_ctx,
u32 class_num, u32 flags)
@@ -193,7 +194,7 @@ static int nvgpu_gr_obj_ctx_init_ctxsw_preemption(struct gk20a *g,
&compute_preempt_mode);
}
err = nvgpu_gr_obj_ctx_set_ctxsw_preemption_mode(g, config,
err = nvgpu_gr_obj_ctx_set_ctxsw_preemption_mode(g, ch, config,
gr_ctx_desc, gr_ctx, class_num, graphics_preempt_mode,
compute_preempt_mode);
if (err != 0) {
@@ -296,6 +297,7 @@ static int nvgpu_gr_obj_ctx_set_compute_preemption_mode(struct gk20a *g,
}
int nvgpu_gr_obj_ctx_set_ctxsw_preemption_mode(struct gk20a *g,
struct nvgpu_channel *ch,
struct nvgpu_gr_config *config, struct nvgpu_gr_ctx_desc *gr_ctx_desc,
struct nvgpu_gr_ctx *gr_ctx, u32 class_num,
u32 graphics_preempt_mode, u32 compute_preempt_mode)
@@ -303,7 +305,7 @@ int nvgpu_gr_obj_ctx_set_ctxsw_preemption_mode(struct gk20a *g,
int err = 0;
/* check for invalid combinations */
if (nvgpu_gr_ctx_check_valid_preemption_mode(g, gr_ctx,
if (nvgpu_gr_ctx_check_valid_preemption_mode(g, ch, gr_ctx,
graphics_preempt_mode, compute_preempt_mode) == false) {
err = -EINVAL;
goto fail;
@@ -939,6 +941,7 @@ static void nvgpu_gr_obj_ctx_patch_ctx_set_size(struct gk20a *g,
}
static int nvgpu_gr_obj_ctx_alloc_buffers(struct gk20a *g,
struct nvgpu_channel *ch,
struct nvgpu_gr_obj_ctx_golden_image *golden_image,
struct nvgpu_gr_ctx_desc *gr_ctx_desc,
struct nvgpu_gr_config *config,
@@ -947,6 +950,7 @@ static int nvgpu_gr_obj_ctx_alloc_buffers(struct gk20a *g,
{
int err;
(void)ch;
(void)class_num;
(void)flags;
@@ -959,7 +963,7 @@ static int nvgpu_gr_obj_ctx_alloc_buffers(struct gk20a *g,
}
#if defined(CONFIG_NVGPU_GFXP) || defined(CONFIG_NVGPU_CILP)
err = nvgpu_gr_obj_ctx_init_ctxsw_preemption(g, config,
err = nvgpu_gr_obj_ctx_init_ctxsw_preemption(g, ch, config,
gr_ctx_desc, gr_ctx, class_num, flags);
if (err != 0) {
nvgpu_err(g, "fail to init preemption mode");
@@ -1149,6 +1153,7 @@ int nvgpu_gr_obj_ctx_alloc(struct gk20a *g,
struct nvgpu_gr_ctx_desc *gr_ctx_desc,
struct nvgpu_gr_config *config,
struct nvgpu_gr_ctx *gr_ctx,
struct nvgpu_channel *c,
struct nvgpu_tsg_subctx *subctx,
struct nvgpu_gr_ctx_mappings *mappings,
struct nvgpu_mem *inst_block,
@@ -1159,7 +1164,7 @@ int nvgpu_gr_obj_ctx_alloc(struct gk20a *g,
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gr, " ");
err = nvgpu_gr_obj_ctx_alloc_buffers(g, golden_image, gr_ctx_desc,
err = nvgpu_gr_obj_ctx_alloc_buffers(g, c, golden_image, gr_ctx_desc,
config, gr_ctx, class_num, flags);
if (err != 0) {
nvgpu_err(g, "failed to alloc ctx buffers");

View File

@@ -1,7 +1,7 @@
/*
* FIFO common definitions.
*
* Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -224,6 +224,9 @@
/** Subctx id 0 */
#define CHANNEL_INFO_VEID0 0U
/** Pbdma id 0 */
#define CHANNEL_INFO_PBDMA0 0U
struct gk20a;
struct nvgpu_runlist;
struct nvgpu_channel;

View File

@@ -356,6 +356,7 @@ u32 nvgpu_gr_ctx_get_compute_preemption_mode(struct nvgpu_gr_ctx *gr_ctx);
* @brief Check if given preemption modes are valid.
*
* @param g [in] Pointer to GPU driver struct.
* @param ch [in] Pointer to Channel struct.
* @param gr_ctx [in] Pointer to graphics context struct.
* @param graphics_preempt_mode Requested graphics preemption mode.
* @param compute_preempt_mode Requested compute preemption mode.
@@ -377,6 +378,7 @@ u32 nvgpu_gr_ctx_get_compute_preemption_mode(struct nvgpu_gr_ctx *gr_ctx);
* @return true if requested preemption modes are valid, false otherwise.
*/
bool nvgpu_gr_ctx_check_valid_preemption_mode(struct gk20a *g,
struct nvgpu_channel *ch,
struct nvgpu_gr_ctx *gr_ctx,
u32 graphics_preempt_mode, u32 compute_preempt_mode);

View File

@@ -103,6 +103,7 @@ bool nvgpu_gr_obj_ctx_is_gfx_engine(struct gk20a *g,
* brief Initialize preemption mode in context struct.
*
* @param g [in] Pointer to GPU driver struct.
* @param ch [in] Pointer to Channel struct.
* @param config [in] Pointer to GR configuration struct.
* @param gr_ctx_desc [in] Pointer to GR context descriptor struct.
* @param gr_ctx [in] Pointer to graphics context.
@@ -124,6 +125,7 @@ bool nvgpu_gr_obj_ctx_is_gfx_engine(struct gk20a *g,
* @see nvgpu_gr_setup_set_preemption_mode.
*/
int nvgpu_gr_obj_ctx_set_ctxsw_preemption_mode(struct gk20a *g,
struct nvgpu_channel *ch,
struct nvgpu_gr_config *config, struct nvgpu_gr_ctx_desc *gr_ctx_desc,
struct nvgpu_gr_ctx *gr_ctx, u32 class_num,
u32 graphics_preempt_mode, u32 compute_preempt_mode);
@@ -252,6 +254,7 @@ int nvgpu_gr_obj_ctx_alloc_golden_ctx_image(struct gk20a *g,
* @param gr_ctx_desc [in] Pointer to GR context descriptor struct.
* @param config [in] Pointer to GR configuration struct.
* @param gr_ctx [in] Pointer to graphics context.
* @param c [in] Pointer to Channel struct.
* @param subctx [in] Pointer to TSG subcontext struct.
* @param mappings [in] Pointer to mappings of the GR context buffers.
* @param inst_block [in] Pointer to channel instance block.
@@ -292,6 +295,7 @@ int nvgpu_gr_obj_ctx_alloc(struct gk20a *g,
struct nvgpu_gr_ctx_desc *gr_ctx_desc,
struct nvgpu_gr_config *config,
struct nvgpu_gr_ctx *gr_ctx,
struct nvgpu_channel *c,
struct nvgpu_tsg_subctx *subctx,
struct nvgpu_gr_ctx_mappings *mappings,
struct nvgpu_mem *inst_block,

View File

@@ -486,6 +486,32 @@ struct nvgpu_tsg *nvgpu_tsg_check_and_get_from_id(struct gk20a *g, u32 tsgid);
int nvgpu_tsg_unbind_channel(struct nvgpu_tsg *tsg, struct nvgpu_channel *ch,
bool force);
/**
* @brief Validate the channel class and VEID/PBDMA assignment.
*
* @param ch [in] Pointer to the Channel struct.
*
* Refer section "SCG, PBDMA, and CILP Rules" from https://p4viewer.nvidia.com/
* /get//hw/doc/gpu/volta/volta/design/Functional_Descriptions/
* /Volta_Subcontexts_Functional_Description.docx.
*
* @return 0 in case of success, < 0 in case of failure.
*/
int nvgpu_tsg_validate_class_veid_pbdma(struct nvgpu_channel *ch);
/**
* @brief Validate the CILP configuration with subcontexts.
*
* @param ch [in] Pointer to the Channel struct.
*
* Refer section "SCG, PBDMA, and CILP Rules" from https://p4viewer.nvidia.com/
* /get//hw/doc/gpu/volta/volta/design/Functional_Descriptions/
* /Volta_Subcontexts_Functional_Description.docx.
*
* @return 0 in case of success, < 0 in case of failure.
*/
int nvgpu_tsg_validate_cilp_config(struct nvgpu_channel *ch);
/**
* @brief Check h/w channel status before unbinding Channel.
*

View File

@@ -211,4 +211,9 @@ void nvgpu_tsg_subctxs_set_pm_buffer_va(struct nvgpu_tsg *tsg,
bool set_pm_ctx_gpu_va);
#endif /* CONFIG_NVGPU_DEBUGGER */
bool nvgpu_tsg_channel_type_active(struct nvgpu_tsg *tsg,
bool match_subctx, u32 subctx_id,
bool match_pbdma, u32 pbdma_id,
bool (*is_valid_class)(u32 class_num));
#endif /* NVGPU_TSG_SUBCTX_H */

View File

@@ -227,7 +227,7 @@ int test_gr_obj_ctx_error_injection(struct unit_module *m,
/* Fail gr_ctx allocation */
nvgpu_posix_enable_fault_injection(kmem_fi, true, 0);
err = nvgpu_gr_obj_ctx_alloc(g, golden_image, global_desc, desc,
config, gr_ctx, subctx, mappings, &inst_block,
config, gr_ctx, channel, subctx, mappings, &inst_block,
VOLTA_COMPUTE_A, 0, false, false);
if (err == 0) {
unit_return_fail(m, "unexpected success");
@@ -236,7 +236,7 @@ int test_gr_obj_ctx_error_injection(struct unit_module *m,
/* Fail patch_ctx allocation */
nvgpu_posix_enable_fault_injection(kmem_fi, true, 3);
err = nvgpu_gr_obj_ctx_alloc(g, golden_image, global_desc, desc,
config, gr_ctx, subctx, mappings, &inst_block,
config, gr_ctx, channel, subctx, mappings, &inst_block,
VOLTA_COMPUTE_A, 0, false, false);
if (err == 0) {
unit_return_fail(m, "unexpected success");
@@ -245,7 +245,7 @@ int test_gr_obj_ctx_error_injection(struct unit_module *m,
/* Fail circular buffer mapping */
nvgpu_posix_enable_fault_injection(kmem_fi, true, 8);
err = nvgpu_gr_obj_ctx_alloc(g, golden_image, global_desc, desc,
config, gr_ctx, subctx, mappings, &inst_block,
config, gr_ctx, channel, subctx, mappings, &inst_block,
VOLTA_COMPUTE_A, 0, false, false);
if (err == 0) {
unit_return_fail(m, "unexpected success");
@@ -257,7 +257,7 @@ int test_gr_obj_ctx_error_injection(struct unit_module *m,
g->ops.gr.init.fe_pwr_mode_force_on = test_fe_pwr_mode_force_on;
fe_pwr_mode_count = 0;
err = nvgpu_gr_obj_ctx_alloc(g, golden_image, global_desc, desc,
config, gr_ctx, subctx, mappings, &inst_block,
config, gr_ctx, channel, subctx, mappings, &inst_block,
VOLTA_COMPUTE_A, 0, false, false);
if (err == 0) {
unit_return_fail(m, "unexpected success");
@@ -266,7 +266,7 @@ int test_gr_obj_ctx_error_injection(struct unit_module *m,
/* Fail second call to gops.gr.init.fe_pwr_mode_force_on */
fe_pwr_mode_count = 1;
err = nvgpu_gr_obj_ctx_alloc(g, golden_image, global_desc, desc,
config, gr_ctx, subctx, mappings, &inst_block,
config, gr_ctx, channel, subctx, mappings, &inst_block,
VOLTA_COMPUTE_A, 0, false, false);
if (err == 0) {
unit_return_fail(m, "unexpected success");
@@ -281,7 +281,7 @@ int test_gr_obj_ctx_error_injection(struct unit_module *m,
g->ops.gr.falcon.ctrl_ctxsw = test_falcon_ctrl_ctxsw;
ctrl_ctxsw_count = -1;
err = nvgpu_gr_obj_ctx_alloc(g, golden_image, global_desc, desc,
config, gr_ctx, subctx, mappings, &inst_block,
config, gr_ctx, channel, subctx, mappings, &inst_block,
VOLTA_COMPUTE_A, 0, false, false);
if (err == 0) {
unit_return_fail(m, "unexpected success");
@@ -294,7 +294,7 @@ int test_gr_obj_ctx_error_injection(struct unit_module *m,
g->ops.gr.init.wait_idle = test_gr_wait_idle;
gr_wait_idle_count = 2;
err = nvgpu_gr_obj_ctx_alloc(g, golden_image, global_desc, desc,
config, gr_ctx, subctx, mappings, &inst_block,
config, gr_ctx, channel, subctx, mappings, &inst_block,
VOLTA_COMPUTE_A, 0, false, false);
if (err == 0) {
unit_return_fail(m, "unexpected success");
@@ -307,7 +307,7 @@ int test_gr_obj_ctx_error_injection(struct unit_module *m,
g->ops.gr.init.load_sw_bundle_init = test_load_sw_bundle;
load_sw_bundle_count = 0;
err = nvgpu_gr_obj_ctx_alloc(g, golden_image, global_desc, desc,
config, gr_ctx, subctx, mappings, &inst_block,
config, gr_ctx, channel, subctx, mappings, &inst_block,
VOLTA_COMPUTE_A, 0, false, false);
if (err == 0) {
unit_return_fail(m, "unexpected success");
@@ -317,7 +317,7 @@ int test_gr_obj_ctx_error_injection(struct unit_module *m,
g->ops.gr.init.load_sw_veid_bundle = test_load_sw_bundle;
load_sw_bundle_count = 1;
err = nvgpu_gr_obj_ctx_alloc(g, golden_image, global_desc, desc,
config, gr_ctx, subctx, mappings, &inst_block,
config, gr_ctx, channel, subctx, mappings, &inst_block,
VOLTA_COMPUTE_A, 0, false, false);
if (err == 0) {
unit_return_fail(m, "unexpected success");
@@ -337,7 +337,7 @@ int test_gr_obj_ctx_error_injection(struct unit_module *m,
g->ops.gr.init.wait_idle = test_gr_wait_idle;
gr_wait_idle_count = 4;
err = nvgpu_gr_obj_ctx_alloc(g, golden_image, global_desc, desc,
config, gr_ctx, subctx, mappings, &inst_block,
config, gr_ctx, channel, subctx, mappings, &inst_block,
VOLTA_COMPUTE_A, 0, false, false);
if (err == 0) {
unit_return_fail(m, "unexpected success");
@@ -352,7 +352,7 @@ int test_gr_obj_ctx_error_injection(struct unit_module *m,
*/
ctrl_ctxsw_count = 1;
err = nvgpu_gr_obj_ctx_alloc(g, golden_image, global_desc, desc,
config, gr_ctx, subctx, mappings, &inst_block,
config, gr_ctx, channel, subctx, mappings, &inst_block,
VOLTA_COMPUTE_A, 0, false, false);
if (err == 0) {
unit_return_fail(m, "unexpected success");
@@ -364,7 +364,7 @@ int test_gr_obj_ctx_error_injection(struct unit_module *m,
*/
ctrl_ctxsw_count = 2;
err = nvgpu_gr_obj_ctx_alloc(g, golden_image, global_desc, desc,
config, gr_ctx, subctx, mappings, &inst_block,
config, gr_ctx, channel, subctx, mappings, &inst_block,
VOLTA_COMPUTE_A, 0, false, false);
if (err == 0) {
unit_return_fail(m, "unexpected success");
@@ -376,7 +376,7 @@ int test_gr_obj_ctx_error_injection(struct unit_module *m,
/* Fail golden context verification */
nvgpu_posix_enable_fault_injection(golden_ctx_verif_fi, true, 0);
err = nvgpu_gr_obj_ctx_alloc(g, golden_image, global_desc, desc,
config, gr_ctx, subctx, mappings, &inst_block,
config, gr_ctx, channel, subctx, mappings, &inst_block,
VOLTA_COMPUTE_A, 0, false, false);
if (err == 0) {
unit_return_fail(m, "unexpected success");
@@ -387,7 +387,7 @@ int test_gr_obj_ctx_error_injection(struct unit_module *m,
/* Finally, successful obj_ctx allocation */
err = nvgpu_gr_obj_ctx_alloc(g, golden_image, global_desc, desc,
config, gr_ctx, subctx, mappings, &inst_block,
config, gr_ctx, channel, subctx, mappings, &inst_block,
VOLTA_COMPUTE_A, 0, false, false);
if (err != 0) {
unit_return_fail(m, "failed to allocate obj_ctx");
@@ -400,15 +400,16 @@ int test_gr_obj_ctx_error_injection(struct unit_module *m,
/* Reallocation with golden image already created */
err = nvgpu_gr_obj_ctx_alloc(g, golden_image, global_desc, desc,
config, gr_ctx, subctx, mappings, &inst_block,
config, gr_ctx, channel, subctx, mappings, &inst_block,
VOLTA_COMPUTE_A, 0, false, false);
if (err != 0) {
unit_return_fail(m, "failed to re-allocate obj_ctx");
}
/* Set preemption mode with invalid compute class */
err = nvgpu_gr_obj_ctx_set_ctxsw_preemption_mode(g, config, desc, gr_ctx,
VOLTA_DMA_COPY_A, 0, NVGPU_PREEMPTION_MODE_COMPUTE_CTA);
err = nvgpu_gr_obj_ctx_set_ctxsw_preemption_mode(g, channel, config,
desc, gr_ctx, VOLTA_DMA_COPY_A, 0,
NVGPU_PREEMPTION_MODE_COMPUTE_CTA);
if (err == 0) {
unit_return_fail(m, "unexpected success");
}