diff --git a/drivers/gpu/nvgpu/common/gr/gr_falcon.c b/drivers/gpu/nvgpu/common/gr/gr_falcon.c index 4c204d496..1c13ad6fc 100644 --- a/drivers/gpu/nvgpu/common/gr/gr_falcon.c +++ b/drivers/gpu/nvgpu/common/gr/gr_falcon.c @@ -600,7 +600,7 @@ static int gr_falcon_recovery_bootstrap(struct gk20a *g, err = gr_falcon_sec2_or_ls_pmu_bootstrap(g, &bootstrap, falcon_idmask); - if (!bootstrap) { + if ((err == 0) && (!bootstrap)) { err = nvgpu_acr_bootstrap_hs_acr(g, g->acr); if (err != 0) { nvgpu_err(g, @@ -636,7 +636,7 @@ static int gr_falcon_coldboot_bootstrap(struct gk20a *g, err = gr_falcon_sec2_or_ls_pmu_bootstrap(g, &bootstrap, (u32)falcon_id_mask); - if (!bootstrap) { + if ((err == 0) && (!bootstrap)) { /* GR falcons bootstrapped by ACR */ err = 0; } diff --git a/drivers/gpu/nvgpu/common/gr/obj_ctx.c b/drivers/gpu/nvgpu/common/gr/obj_ctx.c index 6a69d3c59..2d9fcc06e 100644 --- a/drivers/gpu/nvgpu/common/gr/obj_ctx.c +++ b/drivers/gpu/nvgpu/common/gr/obj_ctx.c @@ -69,9 +69,11 @@ static int nvgpu_gr_obj_ctx_init_ctxsw_preemption_mode(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx, struct vm_gk20a *vm, u32 class_num, u32 flags) { +#if defined(CONFIG_NVGPU_GRAPHICS) || defined(CONFIG_NVGPU_CILP) int err; - u32 graphics_preempt_mode = 0; - u32 compute_preempt_mode = 0; + u32 graphics_preempt_mode = 0U; + u32 compute_preempt_mode = 0U; +#endif nvgpu_log_fn(g, " "); @@ -81,7 +83,6 @@ static int nvgpu_gr_obj_ctx_init_ctxsw_preemption_mode(struct gk20a *g, nvgpu_gr_ctx_init_compute_preemption_mode(gr_ctx, NVGPU_PREEMPTION_MODE_COMPUTE_CTA); } - return 0; } @@ -96,6 +97,7 @@ static int nvgpu_gr_obj_ctx_init_ctxsw_preemption_mode(struct gk20a *g, } #endif +#if defined(CONFIG_NVGPU_GRAPHICS) || defined(CONFIG_NVGPU_CILP) if ((graphics_preempt_mode != 0U) || (compute_preempt_mode != 0U)) { err = nvgpu_gr_obj_ctx_set_ctxsw_preemption_mode(g, config, gr_ctx_desc, gr_ctx, vm, class_num, graphics_preempt_mode, @@ -105,6 +107,7 @@ static int nvgpu_gr_obj_ctx_init_ctxsw_preemption_mode(struct gk20a *g, return err; } } +#endif nvgpu_log_fn(g, "done"); @@ -204,9 +207,6 @@ static int nvgpu_gr_obj_ctx_set_compute_preemption_mode(struct gk20a *g, return 0; } - - - int nvgpu_gr_obj_ctx_set_ctxsw_preemption_mode(struct gk20a *g, struct nvgpu_gr_config *config, struct nvgpu_gr_ctx_desc *gr_ctx_desc, struct nvgpu_gr_ctx *gr_ctx, struct vm_gk20a *vm, u32 class_num, @@ -537,7 +537,7 @@ restore_fe_go_idle: #ifdef CONFIG_NVGPU_GR_GOLDEN_CTX_VERIFICATION /* restore stats bundle data through mme shadow methods */ - if (g->ops.gr.init.restore_stats_counter_bundle_data != 0) { + if (g->ops.gr.init.restore_stats_counter_bundle_data != NULL) { g->ops.gr.init.restore_stats_counter_bundle_data(g, sw_bundle_init); } diff --git a/drivers/gpu/nvgpu/hal/gr/falcon/gr_falcon_gp10b_fusa.c b/drivers/gpu/nvgpu/hal/gr/falcon/gr_falcon_gp10b_fusa.c index 991bfe84c..390a17eaf 100644 --- a/drivers/gpu/nvgpu/hal/gr/falcon/gr_falcon_gp10b_fusa.c +++ b/drivers/gpu/nvgpu/hal/gr/falcon/gr_falcon_gp10b_fusa.c @@ -72,8 +72,8 @@ int gp10b_gr_falcon_ctrl_ctxsw(struct gk20a *g, u32 fecs_method, nvgpu_log_info(g, "fecs method %d data 0x%x ret_val %p", fecs_method, data, ret_val); - switch (fecs_method) { #ifdef CONFIG_NVGPU_GRAPHICS + switch (fecs_method) { case NVGPU_GR_FALCON_METHOD_PREEMPT_IMAGE_SIZE: op.method.addr = gr_fecs_method_push_adr_discover_preemption_image_size_v(); @@ -90,11 +90,14 @@ int gp10b_gr_falcon_ctrl_ctxsw(struct gk20a *g, u32 fecs_method, op.cond.ok = GR_IS_UCODE_OP_EQUAL; ret = gm20b_gr_falcon_submit_fecs_sideband_method_op(g, op); break; -#endif + default: +#endif ret = gm20b_gr_falcon_ctrl_ctxsw(g, fecs_method, data, ret_val); +#ifdef CONFIG_NVGPU_GRAPHICS break; } +#endif return ret; } diff --git a/drivers/gpu/nvgpu/hal/gr/intr/gr_intr_gm20b_fusa.c b/drivers/gpu/nvgpu/hal/gr/intr/gr_intr_gm20b_fusa.c index 79e46f893..59fc2a601 100644 --- a/drivers/gpu/nvgpu/hal/gr/intr/gr_intr_gm20b_fusa.c +++ b/drivers/gpu/nvgpu/hal/gr/intr/gr_intr_gm20b_fusa.c @@ -456,6 +456,7 @@ u64 gm20b_gr_intr_tpc_enabled_exceptions(struct gk20a *g) { u32 sm_id; u64 tpc_exception_en = 0; + u32 sm_bit_in_tpc = 0U; u32 offset, regval, tpc_offset, gpc_offset; u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_STRIDE); @@ -474,9 +475,9 @@ u64 gm20b_gr_intr_tpc_enabled_exceptions(struct gk20a *g) regval = gk20a_readl(g, nvgpu_safe_add_u32( gr_gpc0_tpc0_tpccs_tpc_exception_en_r(), offset)); /* Each bit represents corresponding enablement state, bit 0 corrsponds to SM0 */ - tpc_exception_en |= - (u64)gr_gpc0_tpc0_tpccs_tpc_exception_en_sm_v(regval) << - (u64)sm_id; + sm_bit_in_tpc = + gr_gpc0_tpc0_tpccs_tpc_exception_en_sm_v(regval); + tpc_exception_en |= (u64)sm_bit_in_tpc << sm_id; } return tpc_exception_en;