gpu: nvgpu: fix issues identified by common.gr.obj_ctx negative testing

- nvgpu_gr_ctx_load_golden_ctx_image() does not return any error, change
  the return type to void
- Check for preemption modes greater than CILP in
  nvgpu_gr_ctx_check_valid_preemption_mode
- Check if received class is valid or not in
  nvgpu_gr_setup_set_preemption_mode
- Compile out entire nvgpu_gr_obj_ctx_init_ctxsw_preemption_mode since
  it is really not doing anything in safety
- Remove the switch statement in nvgpu_gr_obj_ctx_set_compute_preemption_mode
  since it is not possible to receive any other value than supported.
  Previous function calls ensure that input values are validated.
- nvgpu_gr_obj_ctx_commit_global_ctx_buffers() does not return any
  error, change the return type to void
- gops.gr.init.preemption_state HAL is not needed in safety since it
  only configures gfxp related timeout
- remove redundant call to gops.gr.init.wait_idle in
  nvgpu_gr_obj_ctx_commit_hw_state. We trigger wait despite earlier
  failure in same function call.

Jira NVGPU-4457

Change-Id: I06a474ef7cc1b16fbc3846e0cad1cda6bb2bf2af
Signed-off-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2260938
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Deepak Nibade
2019-12-09 20:39:23 +05:30
committed by Alex Waterman
parent 71040ef04f
commit 34020a5999
8 changed files with 57 additions and 68 deletions

View File

@@ -448,7 +448,7 @@ struct nvgpu_mem *nvgpu_gr_ctx_get_ctx_mem(struct nvgpu_gr_ctx *gr_ctx)
}
/* load saved fresh copy of gloden image into channel gr_ctx */
int nvgpu_gr_ctx_load_golden_ctx_image(struct gk20a *g,
void nvgpu_gr_ctx_load_golden_ctx_image(struct gk20a *g,
struct nvgpu_gr_ctx *gr_ctx,
struct nvgpu_gr_global_ctx_local_golden_image *local_golden_image,
bool cde)
@@ -507,8 +507,6 @@ int nvgpu_gr_ctx_load_golden_ctx_image(struct gk20a *g,
g->ops.gr.ctxsw_prog.set_pm_mode(g, mem, gr_ctx->pm_ctx.pm_mode);
g->ops.gr.ctxsw_prog.set_pm_ptr(g, mem, virt_addr);
#endif
return 0;
}
/*
@@ -601,7 +599,11 @@ bool nvgpu_gr_ctx_check_valid_preemption_mode(struct nvgpu_gr_ctx *gr_ctx,
}
#endif
#ifndef CONFIG_NVGPU_CILP
#ifdef CONFIG_NVGPU_CILP
if (compute_preempt_mode > NVGPU_PREEMPTION_MODE_COMPUTE_CILP) {
return false;
}
#else
if (compute_preempt_mode > NVGPU_PREEMPTION_MODE_COMPUTE_CTA) {
return false;
}

View File

@@ -296,6 +296,11 @@ int nvgpu_gr_setup_set_preemption_mode(struct nvgpu_channel *ch,
return -EINVAL;
}
if (!g->ops.gpu_class.is_valid(class_num)) {
nvgpu_err(g, "invalid obj class 0x%x", class_num);
return -EINVAL;
}
tsg = nvgpu_tsg_from_ch(ch);
if (tsg == NULL) {
return -EINVAL;

View File

@@ -65,16 +65,15 @@ void nvgpu_gr_obj_ctx_commit_inst(struct gk20a *g, struct nvgpu_mem *inst_block,
}
}
#if defined(CONFIG_NVGPU_GRAPHICS) || defined(CONFIG_NVGPU_CILP)
static int nvgpu_gr_obj_ctx_init_ctxsw_preemption_mode(struct gk20a *g,
struct nvgpu_gr_config *config, struct nvgpu_gr_ctx_desc *gr_ctx_desc,
struct nvgpu_gr_ctx *gr_ctx, struct vm_gk20a *vm,
u32 class_num, u32 flags)
{
#if defined(CONFIG_NVGPU_GRAPHICS) || defined(CONFIG_NVGPU_CILP)
int err;
u32 graphics_preempt_mode = 0U;
u32 compute_preempt_mode = 0U;
#endif
nvgpu_log_fn(g, " ");
@@ -98,7 +97,6 @@ static int nvgpu_gr_obj_ctx_init_ctxsw_preemption_mode(struct gk20a *g,
}
#endif
#if defined(CONFIG_NVGPU_GRAPHICS) || defined(CONFIG_NVGPU_CILP)
if ((graphics_preempt_mode != 0U) || (compute_preempt_mode != 0U)) {
err = nvgpu_gr_obj_ctx_set_ctxsw_preemption_mode(g, config,
gr_ctx_desc, gr_ctx, vm, class_num, graphics_preempt_mode,
@@ -108,12 +106,12 @@ static int nvgpu_gr_obj_ctx_init_ctxsw_preemption_mode(struct gk20a *g,
return err;
}
}
#endif
nvgpu_log_fn(g, "done");
return 0;
}
#endif
#ifdef CONFIG_NVGPU_GRAPHICS
static int nvgpu_gr_obj_ctx_set_graphics_preemption_mode(struct gk20a *g,
@@ -190,23 +188,13 @@ static int nvgpu_gr_obj_ctx_set_compute_preemption_mode(struct gk20a *g,
|| g->ops.gpu_class.is_valid_gfx(class_num)
#endif
) {
switch (compute_preempt_mode) {
case NVGPU_PREEMPTION_MODE_COMPUTE_WFI:
case NVGPU_PREEMPTION_MODE_COMPUTE_CTA:
#ifdef CONFIG_NVGPU_CILP
case NVGPU_PREEMPTION_MODE_COMPUTE_CILP:
#endif
nvgpu_gr_ctx_init_compute_preemption_mode(gr_ctx,
compute_preempt_mode);
break;
default:
nvgpu_log_info(g, "compute_preempt_mode=%u",
compute_preempt_mode);
break;
}
return 0;
} else {
return -EINVAL;
}
return 0;
}
int nvgpu_gr_obj_ctx_set_ctxsw_preemption_mode(struct gk20a *g,
@@ -327,7 +315,7 @@ void nvgpu_gr_obj_ctx_update_ctxsw_preemption_mode(struct gk20a *g,
nvgpu_log_fn(g, "done");
}
int nvgpu_gr_obj_ctx_commit_global_ctx_buffers(struct gk20a *g,
void nvgpu_gr_obj_ctx_commit_global_ctx_buffers(struct gk20a *g,
struct nvgpu_gr_global_ctx_buffer_desc *global_ctx_buffer,
struct nvgpu_gr_config *config, struct nvgpu_gr_ctx *gr_ctx, bool patch)
{
@@ -378,8 +366,6 @@ int nvgpu_gr_obj_ctx_commit_global_ctx_buffers(struct gk20a *g,
if (patch) {
nvgpu_gr_ctx_patch_write_end(g, gr_ctx, false);
}
return 0;
}
static int nvgpu_gr_obj_ctx_alloc_sw_bundle(struct gk20a *g)
@@ -470,12 +456,15 @@ static int nvgpu_gr_obj_ctx_init_hw_state(struct gk20a *g,
sw_ctx_load->l[i].value);
}
#ifdef CONFIG_NVGPU_GRAPHICS
if (g->ops.gr.init.preemption_state != NULL) {
err = g->ops.gr.init.preemption_state(g);
if (err != 0) {
goto clean_up;
}
}
#endif
nvgpu_cg_blcg_gr_load_enable(g);
err = g->ops.gr.init.wait_idle(g);
@@ -499,11 +488,8 @@ static int nvgpu_gr_obj_ctx_commit_hw_state(struct gk20a *g,
/* disable fe_go_idle */
g->ops.gr.init.fe_go_idle_timeout(g, false);
err = nvgpu_gr_obj_ctx_commit_global_ctx_buffers(g, global_ctx_buffer,
nvgpu_gr_obj_ctx_commit_global_ctx_buffers(g, global_ctx_buffer,
config, gr_ctx, false);
if (err != 0) {
goto clean_up;
}
/* override a few ctx state registers */
g->ops.gr.init.commit_global_timeslice(g);
@@ -511,7 +497,7 @@ static int nvgpu_gr_obj_ctx_commit_hw_state(struct gk20a *g,
/* floorsweep anything left */
err = nvgpu_gr_fs_state_init(g, config);
if (err != 0) {
goto clean_up;
goto restore_fe_go_idle;
}
err = g->ops.gr.init.wait_idle(g);
@@ -521,17 +507,12 @@ static int nvgpu_gr_obj_ctx_commit_hw_state(struct gk20a *g,
err = nvgpu_gr_obj_ctx_alloc_sw_bundle(g);
if (err != 0) {
goto clean_up;
goto restore_fe_go_idle;
}
restore_fe_go_idle:
/* restore fe_go_idle */
g->ops.gr.init.fe_go_idle_timeout(g, true);
if ((err != 0) || (g->ops.gr.init.wait_idle(g) != 0)) {
goto clean_up;
}
/* load method init */
g->ops.gr.init.load_method_init(g, sw_method_init);
@@ -544,6 +525,15 @@ restore_fe_go_idle:
#endif
err = g->ops.gr.init.wait_idle(g);
if (err != 0) {
goto clean_up;
}
return 0;
restore_fe_go_idle:
/* restore fe_go_idle */
g->ops.gr.init.fe_go_idle_timeout(g, true);
clean_up:
return err;
@@ -773,12 +763,14 @@ int nvgpu_gr_obj_ctx_alloc(struct gk20a *g,
}
}
#if defined(CONFIG_NVGPU_GRAPHICS) || defined(CONFIG_NVGPU_CILP)
err = nvgpu_gr_obj_ctx_init_ctxsw_preemption_mode(g, config,
gr_ctx_desc, gr_ctx, vm, class_num, flags);
if (err != 0) {
nvgpu_err(g, "fail to init preemption mode");
goto out;
}
#endif
/* map global buffer to channel gpu_va and commit */
err = nvgpu_gr_ctx_map_global_ctx_buffers(g, gr_ctx,
@@ -788,12 +780,8 @@ int nvgpu_gr_obj_ctx_alloc(struct gk20a *g,
goto out;
}
err = nvgpu_gr_obj_ctx_commit_global_ctx_buffers(g, global_ctx_buffer,
nvgpu_gr_obj_ctx_commit_global_ctx_buffers(g, global_ctx_buffer,
config, gr_ctx, true);
if (err != 0) {
nvgpu_err(g, "fail to commit global ctx buffer");
goto out;
}
/* commit gr ctx buffer */
nvgpu_gr_obj_ctx_commit_inst(g, inst_block, gr_ctx, subctx,
@@ -820,12 +808,8 @@ int nvgpu_gr_obj_ctx_alloc(struct gk20a *g,
#endif
/* load golden image */
err = nvgpu_gr_ctx_load_golden_ctx_image(g, gr_ctx,
nvgpu_gr_ctx_load_golden_ctx_image(g, gr_ctx,
golden_image->local_golden_image, cde);
if (err != 0) {
nvgpu_err(g, "fail to load golden ctx image");
goto out;
}
nvgpu_gr_obj_ctx_update_ctxsw_preemption_mode(g, config, gr_ctx,
subctx);

View File

@@ -189,6 +189,21 @@ void gv11b_gr_init_commit_gfxp_wfi_timeout(struct gk20a *g,
nvgpu_gr_ctx_patch_write(g, gr_ctx, gr_fe_gfxp_wfi_timeout_r(),
GFXP_WFI_TIMEOUT_COUNT_IN_USEC_DEFAULT, patch);
}
int gv11b_gr_init_preemption_state(struct gk20a *g)
{
u32 debug_2;
nvgpu_log_fn(g, " ");
debug_2 = nvgpu_readl(g, gr_debug_2_r());
debug_2 = set_field(debug_2,
gr_debug_2_gfxp_wfi_timeout_unit_m(),
gr_debug_2_gfxp_wfi_timeout_unit_usec_f());
nvgpu_writel(g, gr_debug_2_r(), debug_2);
return 0;
}
#endif /* CONFIG_NVGPU_GRAPHICS */
#ifdef CONFIG_NVGPU_SET_FALCON_ACCESS_MAP

View File

@@ -526,21 +526,6 @@ void gv11b_gr_init_fs_state(struct gk20a *g)
nvgpu_ltc_get_ltc_count(g)));
}
int gv11b_gr_init_preemption_state(struct gk20a *g)
{
u32 debug_2;
nvgpu_log_fn(g, " ");
debug_2 = nvgpu_readl(g, gr_debug_2_r());
debug_2 = set_field(debug_2,
gr_debug_2_gfxp_wfi_timeout_unit_m(),
gr_debug_2_gfxp_wfi_timeout_unit_usec_f());
nvgpu_writel(g, gr_debug_2_r(), debug_2);
return 0;
}
void gv11b_gr_init_commit_global_timeslice(struct gk20a *g)
{
u32 pd_ab_dist_cfg0;

View File

@@ -553,7 +553,9 @@ static const struct gpu_ops gv11b_ops = {
gm20b_gr_init_fe_pwr_mode_force_on,
.override_context_reset =
gm20b_gr_init_override_context_reset,
#ifdef CONFIG_NVGPU_GRAPHICS
.preemption_state = gv11b_gr_init_preemption_state,
#endif
.fe_go_idle_timeout = gm20b_gr_init_fe_go_idle_timeout,
.load_method_init = gm20b_gr_init_load_method_init,
.commit_global_timeslice =

View File

@@ -316,10 +316,8 @@ struct nvgpu_mem *nvgpu_gr_ctx_get_ctx_mem(struct nvgpu_gr_ctx *gr_ctx);
* Local golden image copy is saved while creating first graphics context
* buffer. Subsequent graphics contexts can be initialized by loading
* golden image into new context with this function.
*
* @return 0 in case of success, < 0 in case of failure.
*/
int nvgpu_gr_ctx_load_golden_ctx_image(struct gk20a *g,
void nvgpu_gr_ctx_load_golden_ctx_image(struct gk20a *g,
struct nvgpu_gr_ctx *gr_ctx,
struct nvgpu_gr_global_ctx_local_golden_image *local_golden_image,
bool cde);

View File

@@ -145,10 +145,8 @@ void nvgpu_gr_obj_ctx_update_ctxsw_preemption_mode(struct gk20a *g,
*
* If flag #patch is set, patch context image is used to update the
* graphics context, otherwise updates are done with register writes.
*
* @return 0 in case of success, < 0 in case of failure.
*/
int nvgpu_gr_obj_ctx_commit_global_ctx_buffers(struct gk20a *g,
void nvgpu_gr_obj_ctx_commit_global_ctx_buffers(struct gk20a *g,
struct nvgpu_gr_global_ctx_buffer_desc *global_ctx_buffer,
struct nvgpu_gr_config *config, struct nvgpu_gr_ctx *gr_ctx, bool patch);