gpu: nvgpu: gv11b: MISRA Rule 10.1 fixes

MISRA rule 10.1 mandates that the correct data types are used as
operands of operators. For example, only unsigned integers can be used
as operands of bitwise operators.

This patch fixes rule 10.1 vioaltions for gv11b.

JIRA NVGPU-777
JIRA NVGPU-1006

Change-Id: Idda45b86c95b535154b4d4632fdc23a18950e380
Signed-off-by: Sai Nikhil <snikhil@nvidia.com>
Signed-off-by: Adeel Raza <araza@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1971168
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Sai Nikhil
2018-12-12 14:04:31 +05:30
committed by mobile promotions
parent aeb5819658
commit 67aec1f12a
2 changed files with 19 additions and 19 deletions

View File

@@ -1026,10 +1026,10 @@ void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask,
unsigned long pbdma_id;
struct fifo_runlist_info_gk20a *runlist = NULL;
unsigned long engine_id;
u32 client_type = ~0;
u32 client_type = ~U32(0U);
struct fifo_gk20a *f = &g->fifo;
u32 runlist_id = FIFO_INVAL_RUNLIST_ID;
u32 num_runlists = 0;
u32 num_runlists = 0U;
unsigned long runlist_served_pbdmas;
nvgpu_log_fn(g, "acquire runlist_lock for all runlists");
@@ -1358,8 +1358,8 @@ int gv11b_init_fifo_reset_enable_hw(struct gk20a *g)
/* clear and enable pbdma interrupt */
for (i = 0; i < host_num_pbdma; i++) {
gk20a_writel(g, pbdma_intr_0_r(i), 0xFFFFFFFF);
gk20a_writel(g, pbdma_intr_1_r(i), 0xFFFFFFFF);
gk20a_writel(g, pbdma_intr_0_r(i), 0xFFFFFFFFU);
gk20a_writel(g, pbdma_intr_1_r(i), 0xFFFFFFFFU);
intr_stall = gk20a_readl(g, pbdma_intr_stall_r(i));
nvgpu_log_info(g, "pbdma id:%u, intr_en_0 0x%08x", i, intr_stall);
@@ -1376,7 +1376,7 @@ int gv11b_init_fifo_reset_enable_hw(struct gk20a *g)
}
/* clear ctxsw timeout interrupts */
gk20a_writel(g, fifo_intr_ctxsw_timeout_r(), ~0);
gk20a_writel(g, fifo_intr_ctxsw_timeout_r(), ~U32(0U));
if (nvgpu_platform_is_silicon(g)) {
/* enable ctxsw timeout */
@@ -1400,15 +1400,15 @@ int gv11b_init_fifo_reset_enable_hw(struct gk20a *g)
}
/* clear runlist interrupts */
gk20a_writel(g, fifo_intr_runlist_r(), ~0);
gk20a_writel(g, fifo_intr_runlist_r(), ~U32(0U));
/* clear and enable pfifo interrupt */
gk20a_writel(g, fifo_intr_0_r(), 0xFFFFFFFF);
gk20a_writel(g, fifo_intr_0_r(), 0xFFFFFFFFU);
mask = gv11b_fifo_intr_0_en_mask(g);
nvgpu_log_info(g, "fifo_intr_en_0 0x%08x", mask);
gk20a_writel(g, fifo_intr_en_0_r(), mask);
nvgpu_log_info(g, "fifo_intr_en_1 = 0x80000000");
gk20a_writel(g, fifo_intr_en_1_r(), 0x80000000);
gk20a_writel(g, fifo_intr_en_1_r(), 0x80000000U);
nvgpu_log_fn(g, "done");

View File

@@ -1,7 +1,7 @@
/*
* GV11b GPU GR
*
* Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -441,8 +441,8 @@ void gr_gv11b_enable_exceptions(struct gk20a *g)
*/
/* enable exceptions */
gk20a_writel(g, gr_exception2_en_r(), 0x0); /* BE not enabled */
gk20a_writel(g, gr_exception1_en_r(), (1 << gr->gpc_count) - 1);
gk20a_writel(g, gr_exception2_en_r(), 0x0U); /* BE not enabled */
gk20a_writel(g, gr_exception1_en_r(), BIT32(gr->gpc_count) - 1U);
reg_val = gr_exception_en_fe_enabled_f() |
gr_exception_en_memfmt_enabled_f() |
@@ -1133,12 +1133,12 @@ void gr_gv11b_enable_gpc_exceptions(struct gk20a *g)
tpc_mask =
gr_gpcs_gpccs_gpc_exception_en_tpc_f(
(1 << gr->max_tpc_per_gpc_count) - 1);
BIT32(gr->max_tpc_per_gpc_count) - 1U);
gk20a_writel(g, gr_gpcs_gpccs_gpc_exception_en_r(),
(tpc_mask | gr_gpcs_gpccs_gpc_exception_en_gcc_f(1) |
gr_gpcs_gpccs_gpc_exception_en_gpccs_f(1) |
gr_gpcs_gpccs_gpc_exception_en_gpcmmu_f(1)));
(tpc_mask | gr_gpcs_gpccs_gpc_exception_en_gcc_f(1U) |
gr_gpcs_gpccs_gpc_exception_en_gpccs_f(1U) |
gr_gpcs_gpccs_gpc_exception_en_gpcmmu_f(1U)));
}
int gr_gv11b_handle_tex_exception(struct gk20a *g, u32 gpc, u32 tpc,
@@ -1302,9 +1302,9 @@ u32 gr_gv11b_calc_global_ctx_buffer_size(struct gk20a *g)
gr->alpha_cb_size = gr->alpha_cb_default_size;
gr->attrib_cb_size = min(gr->attrib_cb_size,
gr_gpc0_ppc0_cbm_beta_cb_size_v_f(~0) / g->gr.tpc_count);
gr_gpc0_ppc0_cbm_beta_cb_size_v_f(~U32(0U)) / g->gr.tpc_count);
gr->alpha_cb_size = min(gr->alpha_cb_size,
gr_gpc0_ppc0_cbm_alpha_cb_size_v_f(~0) / g->gr.tpc_count);
gr_gpc0_ppc0_cbm_alpha_cb_size_v_f(~U32(0U)) / g->gr.tpc_count);
size = gr->attrib_cb_size *
gr_gpc0_ppc0_cbm_beta_cb_size_v_granularity_v() *
@@ -1527,7 +1527,7 @@ void gr_gv11b_set_alpha_circular_buffer_size(struct gk20a *g, u32 data)
gk20a_writel(g, gr_ds_tga_constraintlogic_alpha_r(),
(gk20a_readl(g, gr_ds_tga_constraintlogic_alpha_r()) &
~gr_ds_tga_constraintlogic_alpha_cbsize_f(~0)) |
~gr_ds_tga_constraintlogic_alpha_cbsize_f(~U32(0U))) |
gr_ds_tga_constraintlogic_alpha_cbsize_f(alpha_cb_size));
pd_ab_max_output = alpha_cb_size *
@@ -1582,7 +1582,7 @@ void gr_gv11b_set_circular_buffer_size(struct gk20a *g, u32 data)
gk20a_writel(g, gr_ds_tga_constraintlogic_beta_r(),
(gk20a_readl(g, gr_ds_tga_constraintlogic_beta_r()) &
~gr_ds_tga_constraintlogic_beta_cbsize_f(~0)) |
~gr_ds_tga_constraintlogic_beta_cbsize_f(~U32(0U))) |
gr_ds_tga_constraintlogic_beta_cbsize_f(cb_size_steady));
for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) {