gpu: nvgpu: fix misra errors in gr unit

Fix few misra 4.7 and misra 14.3 violations in gr units.

misra_c_2012_rule_14_3_violation:
The condition "compute_preempt_mode != 0U" must be true.

Fix misra_c_2012_directive_4_7_violation using following functions
nvgpu_gr_global_ctx_buffer_sys_alloc
nvgpu_gr_setup_validate_channel_and_class
gr_gv11b_ecc_scrub_is_done

Jira NVGPU-4054

Change-Id: I64ba6fb29d202abbe12a38b94f6080f63c070db9
Signed-off-by: Vinod G <vinodg@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2196596
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com>
Reviewed-by: Seshendra Gadagottu <sgadagottu@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Vinod G
2019-09-12 15:53:36 -07:00
committed by Alex Waterman
parent 9d6e774f20
commit 8a7e76b8a2
4 changed files with 57 additions and 35 deletions

View File

@@ -624,7 +624,10 @@ bool nvgpu_gr_ctx_check_valid_preemption_mode(struct nvgpu_gr_ctx *gr_ctx,
}
#endif
if ((compute_preempt_mode != 0U) &&
if (
#ifdef CONFIG_NVGPU_GRAPHICS
(compute_preempt_mode != 0U) &&
#endif
(compute_preempt_mode < gr_ctx->compute_preempt_mode)) {
return false;
}

View File

@@ -221,8 +221,9 @@ int nvgpu_gr_global_ctx_buffer_alloc(struct gk20a *g,
return -EINVAL;
}
if (nvgpu_gr_global_ctx_buffer_sys_alloc(g, desc) != 0) {
goto clean_up;
err = nvgpu_gr_global_ctx_buffer_sys_alloc(g, desc);
if (err != 0) {
goto clean_up;
}
#ifdef CONFIG_NVGPU_FECS_TRACE

View File

@@ -147,7 +147,8 @@ int nvgpu_gr_setup_alloc_obj_ctx(struct nvgpu_channel *c, u32 class_num,
nvgpu_log_fn(g, " ");
if (nvgpu_gr_setup_validate_channel_and_class(g, c, class_num) != 0) {
err = nvgpu_gr_setup_validate_channel_and_class(g, c, class_num);
if (err != 0) {
goto out;
}

View File

@@ -113,14 +113,15 @@ static int gr_gv11b_ecc_scrub_is_done(struct gk20a *g,
return 0;
}
static int gr_gv11b_ecc_scrub_sm_lrf(struct gk20a *g,
static void gr_gv11b_ecc_scrub_sm_lrf(struct gk20a *g,
struct nvgpu_gr_config *gr_config)
{
u32 scrub_mask, scrub_done;
int err = 0;
if (!nvgpu_is_enabled(g, NVGPU_ECC_ENABLED_SM_LRF)) {
nvgpu_log_info(g, "ECC SM LRF is disabled");
return 0;
return;
}
nvgpu_log_info(g, "gr_gv11b_ecc_scrub_sm_lrf");
@@ -147,19 +148,23 @@ static int gr_gv11b_ecc_scrub_sm_lrf(struct gk20a *g,
gr_pri_gpc0_tpc0_sm_lrf_ecc_control_scrub_qrfdp6_init_f() |
gr_pri_gpc0_tpc0_sm_lrf_ecc_control_scrub_qrfdp7_init_f());
return gr_gv11b_ecc_scrub_is_done(g, gr_config,
err = gr_gv11b_ecc_scrub_is_done(g, gr_config,
gr_pri_gpc0_tpc0_sm_lrf_ecc_control_r(),
scrub_mask, scrub_done);
if (err != 0) {
nvgpu_warn(g, "ECC SCRUB SM LRF Failed");
}
}
static int gr_gv11b_ecc_scrub_sm_l1_data(struct gk20a *g,
static void gr_gv11b_ecc_scrub_sm_l1_data(struct gk20a *g,
struct nvgpu_gr_config *gr_config)
{
u32 scrub_mask, scrub_done;
int err = 0;
if (!nvgpu_is_enabled(g, NVGPU_ECC_ENABLED_SM_L1_DATA)) {
nvgpu_log_info(g, "ECC L1DATA is disabled");
return 0;
return;
}
nvgpu_log_info(g, "gr_gv11b_ecc_scrub_sm_l1_data");
scrub_mask =
@@ -172,19 +177,24 @@ static int gr_gv11b_ecc_scrub_sm_l1_data(struct gk20a *g,
scrub_done =
(gr_pri_gpc0_tpc0_sm_l1_data_ecc_control_scrub_el1_0_init_f() |
gr_pri_gpc0_tpc0_sm_l1_data_ecc_control_scrub_el1_1_init_f());
return gr_gv11b_ecc_scrub_is_done(g, gr_config,
err = gr_gv11b_ecc_scrub_is_done(g, gr_config,
gr_pri_gpc0_tpc0_sm_l1_data_ecc_control_r(),
scrub_mask, scrub_done);
if (err != 0) {
nvgpu_warn(g, "ECC SCRUB SM L1 DATA Failed");
}
}
static int gr_gv11b_ecc_scrub_sm_l1_tag(struct gk20a *g,
static void gr_gv11b_ecc_scrub_sm_l1_tag(struct gk20a *g,
struct nvgpu_gr_config *gr_config)
{
u32 scrub_mask, scrub_done;
int err = 0;
if (!nvgpu_is_enabled(g, NVGPU_ECC_ENABLED_SM_L1_TAG)) {
nvgpu_log_info(g, "ECC L1TAG is disabled");
return 0;
return;
}
nvgpu_log_info(g, "gr_gv11b_ecc_scrub_sm_l1_tag");
scrub_mask =
@@ -200,19 +210,24 @@ static int gr_gv11b_ecc_scrub_sm_l1_tag(struct gk20a *g,
gr_pri_gpc0_tpc0_sm_l1_tag_ecc_control_scrub_el1_1_init_f() |
gr_pri_gpc0_tpc0_sm_l1_tag_ecc_control_scrub_pixprf_init_f() |
gr_pri_gpc0_tpc0_sm_l1_tag_ecc_control_scrub_miss_fifo_init_f());
return gr_gv11b_ecc_scrub_is_done(g, gr_config,
err = gr_gv11b_ecc_scrub_is_done(g, gr_config,
gr_pri_gpc0_tpc0_sm_l1_tag_ecc_control_r(),
scrub_mask, scrub_done);
if (err != 0) {
nvgpu_warn(g, "ECC SCRUB SM L1 TAG Failed");
}
}
static int gr_gv11b_ecc_scrub_sm_cbu(struct gk20a *g,
static void gr_gv11b_ecc_scrub_sm_cbu(struct gk20a *g,
struct nvgpu_gr_config *gr_config)
{
u32 scrub_mask, scrub_done;
int err = 0;
if (!nvgpu_is_enabled(g, NVGPU_ECC_ENABLED_SM_CBU)) {
nvgpu_log_info(g, "ECC CBU is disabled");
return 0;
return;
}
nvgpu_log_info(g, "gr_gv11b_ecc_scrub_sm_cbu");
scrub_mask =
@@ -227,19 +242,24 @@ static int gr_gv11b_ecc_scrub_sm_cbu(struct gk20a *g,
gr_pri_gpc0_tpc0_sm_cbu_ecc_control_scrub_warp_sm1_init_f() |
gr_pri_gpc0_tpc0_sm_cbu_ecc_control_scrub_barrier_sm0_init_f() |
gr_pri_gpc0_tpc0_sm_cbu_ecc_control_scrub_barrier_sm1_init_f());
return gr_gv11b_ecc_scrub_is_done(g, gr_config,
err = gr_gv11b_ecc_scrub_is_done(g, gr_config,
gr_pri_gpc0_tpc0_sm_cbu_ecc_control_r(),
scrub_mask, scrub_done);
if (err != 0) {
nvgpu_warn(g, "ECC SCRUB SM CBU Failed");
}
}
static int gr_gv11b_ecc_scrub_sm_icahe(struct gk20a *g,
static void gr_gv11b_ecc_scrub_sm_icahe(struct gk20a *g,
struct nvgpu_gr_config *gr_config)
{
u32 scrub_mask, scrub_done;
int err = 0;
if (!nvgpu_is_enabled(g, NVGPU_ECC_ENABLED_SM_ICACHE)) {
nvgpu_log_info(g, "ECC ICAHE is disabled");
return 0;
return;
}
nvgpu_log_info(g, "gr_gv11b_ecc_scrub_sm_icahe");
scrub_mask =
@@ -255,32 +275,29 @@ static int gr_gv11b_ecc_scrub_sm_icahe(struct gk20a *g,
gr_pri_gpc0_tpc0_sm_icache_ecc_control_scrub_l0_predecode_init_f() |
gr_pri_gpc0_tpc0_sm_icache_ecc_control_scrub_l1_data_init_f() |
gr_pri_gpc0_tpc0_sm_icache_ecc_control_scrub_l1_predecode_init_f());
return gr_gv11b_ecc_scrub_is_done(g, gr_config,
err = gr_gv11b_ecc_scrub_is_done(g, gr_config,
gr_pri_gpc0_tpc0_sm_icache_ecc_control_r(),
scrub_mask, scrub_done);
if (err != 0) {
nvgpu_warn(g, "ECC SCRUB SM ICACHE Failed");
}
}
void gv11b_gr_init_ecc_scrub_reg(struct gk20a *g,
struct nvgpu_gr_config *gr_config)
{
nvgpu_log_fn(g, "ecc srub start ");
nvgpu_log_fn(g, "ecc srub start");
if (gr_gv11b_ecc_scrub_sm_lrf(g, gr_config) != 0) {
nvgpu_warn(g, "ECC SCRUB SM LRF Failed");
}
if (gr_gv11b_ecc_scrub_sm_l1_data(g, gr_config) != 0) {
nvgpu_warn(g, "ECC SCRUB SM L1 DATA Failed");
}
if (gr_gv11b_ecc_scrub_sm_l1_tag(g, gr_config) != 0) {
nvgpu_warn(g, "ECC SCRUB SM L1 TAG Failed");
}
if (gr_gv11b_ecc_scrub_sm_cbu(g, gr_config) != 0) {
nvgpu_warn(g, "ECC SCRUB SM CBU Failed");
}
if (gr_gv11b_ecc_scrub_sm_icahe(g, gr_config) != 0) {
nvgpu_warn(g, "ECC SCRUB SM ICACHE Failed");
}
gr_gv11b_ecc_scrub_sm_lrf(g, gr_config);
gr_gv11b_ecc_scrub_sm_l1_data(g, gr_config);
gr_gv11b_ecc_scrub_sm_l1_tag(g, gr_config);
gr_gv11b_ecc_scrub_sm_cbu(g, gr_config);
gr_gv11b_ecc_scrub_sm_icahe(g, gr_config);
}
u32 gv11b_gr_init_get_nonpes_aware_tpc(struct gk20a *g, u32 gpc, u32 tpc,