mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 17:36:20 +03:00
gpu: nvgpu: fix misra errors in gr units
Fix misra errors in gr units misra 14.3 rule - there shall be no dead code. misra_c_2012_rule_14_3_violation: The condition "graphics_preempt_mode != 0U" cannot be true. misra_c_2012_rule_16_1_violation: The switch statement is not well formed. misra_c_2012_rule_10_8_violation: Cast from 32 bit width expression "(regval >> 1U) & 1U" to a wider 64 bit type. Jira NVGPU-3872 Change-Id: Ibb53d0756d464d2ae3279d1b841b3c91a16df9be Signed-off-by: Vinod G <vinodg@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2182562 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com> Reviewed-by: Automatic_Commit_Validation_User GVS: Gerrit_Virtual_Submit Reviewed-by: Seshendra Gadagottu <sgadagottu@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
0a9f633fc3
commit
70a2a1bfcb
@@ -600,7 +600,7 @@ static int gr_falcon_recovery_bootstrap(struct gk20a *g,
|
|||||||
err = gr_falcon_sec2_or_ls_pmu_bootstrap(g,
|
err = gr_falcon_sec2_or_ls_pmu_bootstrap(g,
|
||||||
&bootstrap,
|
&bootstrap,
|
||||||
falcon_idmask);
|
falcon_idmask);
|
||||||
if (!bootstrap) {
|
if ((err == 0) && (!bootstrap)) {
|
||||||
err = nvgpu_acr_bootstrap_hs_acr(g, g->acr);
|
err = nvgpu_acr_bootstrap_hs_acr(g, g->acr);
|
||||||
if (err != 0) {
|
if (err != 0) {
|
||||||
nvgpu_err(g,
|
nvgpu_err(g,
|
||||||
@@ -636,7 +636,7 @@ static int gr_falcon_coldboot_bootstrap(struct gk20a *g,
|
|||||||
err = gr_falcon_sec2_or_ls_pmu_bootstrap(g,
|
err = gr_falcon_sec2_or_ls_pmu_bootstrap(g,
|
||||||
&bootstrap,
|
&bootstrap,
|
||||||
(u32)falcon_id_mask);
|
(u32)falcon_id_mask);
|
||||||
if (!bootstrap) {
|
if ((err == 0) && (!bootstrap)) {
|
||||||
/* GR falcons bootstrapped by ACR */
|
/* GR falcons bootstrapped by ACR */
|
||||||
err = 0;
|
err = 0;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -69,9 +69,11 @@ static int nvgpu_gr_obj_ctx_init_ctxsw_preemption_mode(struct gk20a *g,
|
|||||||
struct nvgpu_gr_ctx *gr_ctx, struct vm_gk20a *vm,
|
struct nvgpu_gr_ctx *gr_ctx, struct vm_gk20a *vm,
|
||||||
u32 class_num, u32 flags)
|
u32 class_num, u32 flags)
|
||||||
{
|
{
|
||||||
|
#if defined(CONFIG_NVGPU_GRAPHICS) || defined(CONFIG_NVGPU_CILP)
|
||||||
int err;
|
int err;
|
||||||
u32 graphics_preempt_mode = 0;
|
u32 graphics_preempt_mode = 0U;
|
||||||
u32 compute_preempt_mode = 0;
|
u32 compute_preempt_mode = 0U;
|
||||||
|
#endif
|
||||||
|
|
||||||
nvgpu_log_fn(g, " ");
|
nvgpu_log_fn(g, " ");
|
||||||
|
|
||||||
@@ -81,7 +83,6 @@ static int nvgpu_gr_obj_ctx_init_ctxsw_preemption_mode(struct gk20a *g,
|
|||||||
nvgpu_gr_ctx_init_compute_preemption_mode(gr_ctx,
|
nvgpu_gr_ctx_init_compute_preemption_mode(gr_ctx,
|
||||||
NVGPU_PREEMPTION_MODE_COMPUTE_CTA);
|
NVGPU_PREEMPTION_MODE_COMPUTE_CTA);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -96,6 +97,7 @@ static int nvgpu_gr_obj_ctx_init_ctxsw_preemption_mode(struct gk20a *g,
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#if defined(CONFIG_NVGPU_GRAPHICS) || defined(CONFIG_NVGPU_CILP)
|
||||||
if ((graphics_preempt_mode != 0U) || (compute_preempt_mode != 0U)) {
|
if ((graphics_preempt_mode != 0U) || (compute_preempt_mode != 0U)) {
|
||||||
err = nvgpu_gr_obj_ctx_set_ctxsw_preemption_mode(g, config,
|
err = nvgpu_gr_obj_ctx_set_ctxsw_preemption_mode(g, config,
|
||||||
gr_ctx_desc, gr_ctx, vm, class_num, graphics_preempt_mode,
|
gr_ctx_desc, gr_ctx, vm, class_num, graphics_preempt_mode,
|
||||||
@@ -105,6 +107,7 @@ static int nvgpu_gr_obj_ctx_init_ctxsw_preemption_mode(struct gk20a *g,
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
nvgpu_log_fn(g, "done");
|
nvgpu_log_fn(g, "done");
|
||||||
|
|
||||||
@@ -204,9 +207,6 @@ static int nvgpu_gr_obj_ctx_set_compute_preemption_mode(struct gk20a *g,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
int nvgpu_gr_obj_ctx_set_ctxsw_preemption_mode(struct gk20a *g,
|
int nvgpu_gr_obj_ctx_set_ctxsw_preemption_mode(struct gk20a *g,
|
||||||
struct nvgpu_gr_config *config, struct nvgpu_gr_ctx_desc *gr_ctx_desc,
|
struct nvgpu_gr_config *config, struct nvgpu_gr_ctx_desc *gr_ctx_desc,
|
||||||
struct nvgpu_gr_ctx *gr_ctx, struct vm_gk20a *vm, u32 class_num,
|
struct nvgpu_gr_ctx *gr_ctx, struct vm_gk20a *vm, u32 class_num,
|
||||||
@@ -537,7 +537,7 @@ restore_fe_go_idle:
|
|||||||
|
|
||||||
#ifdef CONFIG_NVGPU_GR_GOLDEN_CTX_VERIFICATION
|
#ifdef CONFIG_NVGPU_GR_GOLDEN_CTX_VERIFICATION
|
||||||
/* restore stats bundle data through mme shadow methods */
|
/* restore stats bundle data through mme shadow methods */
|
||||||
if (g->ops.gr.init.restore_stats_counter_bundle_data != 0) {
|
if (g->ops.gr.init.restore_stats_counter_bundle_data != NULL) {
|
||||||
g->ops.gr.init.restore_stats_counter_bundle_data(g,
|
g->ops.gr.init.restore_stats_counter_bundle_data(g,
|
||||||
sw_bundle_init);
|
sw_bundle_init);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -72,8 +72,8 @@ int gp10b_gr_falcon_ctrl_ctxsw(struct gk20a *g, u32 fecs_method,
|
|||||||
nvgpu_log_info(g, "fecs method %d data 0x%x ret_val %p",
|
nvgpu_log_info(g, "fecs method %d data 0x%x ret_val %p",
|
||||||
fecs_method, data, ret_val);
|
fecs_method, data, ret_val);
|
||||||
|
|
||||||
switch (fecs_method) {
|
|
||||||
#ifdef CONFIG_NVGPU_GRAPHICS
|
#ifdef CONFIG_NVGPU_GRAPHICS
|
||||||
|
switch (fecs_method) {
|
||||||
case NVGPU_GR_FALCON_METHOD_PREEMPT_IMAGE_SIZE:
|
case NVGPU_GR_FALCON_METHOD_PREEMPT_IMAGE_SIZE:
|
||||||
op.method.addr =
|
op.method.addr =
|
||||||
gr_fecs_method_push_adr_discover_preemption_image_size_v();
|
gr_fecs_method_push_adr_discover_preemption_image_size_v();
|
||||||
@@ -90,11 +90,14 @@ int gp10b_gr_falcon_ctrl_ctxsw(struct gk20a *g, u32 fecs_method,
|
|||||||
op.cond.ok = GR_IS_UCODE_OP_EQUAL;
|
op.cond.ok = GR_IS_UCODE_OP_EQUAL;
|
||||||
ret = gm20b_gr_falcon_submit_fecs_sideband_method_op(g, op);
|
ret = gm20b_gr_falcon_submit_fecs_sideband_method_op(g, op);
|
||||||
break;
|
break;
|
||||||
#endif
|
|
||||||
default:
|
default:
|
||||||
|
#endif
|
||||||
ret = gm20b_gr_falcon_ctrl_ctxsw(g, fecs_method, data, ret_val);
|
ret = gm20b_gr_falcon_ctrl_ctxsw(g, fecs_method, data, ret_val);
|
||||||
|
#ifdef CONFIG_NVGPU_GRAPHICS
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -456,6 +456,7 @@ u64 gm20b_gr_intr_tpc_enabled_exceptions(struct gk20a *g)
|
|||||||
{
|
{
|
||||||
u32 sm_id;
|
u32 sm_id;
|
||||||
u64 tpc_exception_en = 0;
|
u64 tpc_exception_en = 0;
|
||||||
|
u32 sm_bit_in_tpc = 0U;
|
||||||
u32 offset, regval, tpc_offset, gpc_offset;
|
u32 offset, regval, tpc_offset, gpc_offset;
|
||||||
u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
|
u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
|
||||||
u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_STRIDE);
|
u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_STRIDE);
|
||||||
@@ -474,9 +475,9 @@ u64 gm20b_gr_intr_tpc_enabled_exceptions(struct gk20a *g)
|
|||||||
regval = gk20a_readl(g, nvgpu_safe_add_u32(
|
regval = gk20a_readl(g, nvgpu_safe_add_u32(
|
||||||
gr_gpc0_tpc0_tpccs_tpc_exception_en_r(), offset));
|
gr_gpc0_tpc0_tpccs_tpc_exception_en_r(), offset));
|
||||||
/* Each bit represents corresponding enablement state, bit 0 corrsponds to SM0 */
|
/* Each bit represents corresponding enablement state, bit 0 corrsponds to SM0 */
|
||||||
tpc_exception_en |=
|
sm_bit_in_tpc =
|
||||||
(u64)gr_gpc0_tpc0_tpccs_tpc_exception_en_sm_v(regval) <<
|
gr_gpc0_tpc0_tpccs_tpc_exception_en_sm_v(regval);
|
||||||
(u64)sm_id;
|
tpc_exception_en |= (u64)sm_bit_in_tpc << sm_id;
|
||||||
}
|
}
|
||||||
|
|
||||||
return tpc_exception_en;
|
return tpc_exception_en;
|
||||||
|
|||||||
Reference in New Issue
Block a user