gpu: nvgpu: MISRA 14.4 bitwise operation as boolean

MISRA rule 14.4 doesn't allow the usage of integer types as booleans
in the controlling expression of an if statement or an iteration
statement.

Fix violations where the result of a bitwise operation is used as a
boolean in the controlling expression of if and loop statements.

JIRA NVGPU-1020

Change-Id: If910150072c3dd67c31fe9819c3a9e738fd3c1c6
Signed-off-by: Amurthyreddy <amurthyreddy@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1932389
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-misra-checker <svc-misra-checker@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Adeel Raza <araza@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Amurthyreddy
2018-10-22 16:35:48 +05:30
committed by mobile promotions
parent 0d065df144
commit 89660dbd62
21 changed files with 226 additions and 206 deletions

View File

@@ -80,11 +80,11 @@ void gk20a_ce2_isr(struct gk20a *g, u32 inst_id, u32 pri_base)
nvgpu_log(g, gpu_dbg_intr, "ce2 isr %08x\n", ce2_intr);
/* clear blocking interrupts: they exibit broken behavior */
if (ce2_intr & ce2_intr_status_blockpipe_pending_f()) {
if ((ce2_intr & ce2_intr_status_blockpipe_pending_f()) != 0U) {
clear_intr |= ce2_blockpipe_isr(g, ce2_intr);
}
if (ce2_intr & ce2_intr_status_launcherr_pending_f()) {
if ((ce2_intr & ce2_intr_status_launcherr_pending_f()) != 0U) {
clear_intr |= ce2_launcherr_isr(g, ce2_intr);
}
@@ -99,7 +99,7 @@ u32 gk20a_ce2_nonstall_isr(struct gk20a *g, u32 inst_id, u32 pri_base)
nvgpu_log(g, gpu_dbg_intr, "ce2 nonstall isr %08x\n", ce2_intr);
if (ce2_intr & ce2_intr_status_nonblockpipe_pending_f()) {
if ((ce2_intr & ce2_intr_status_nonblockpipe_pending_f()) != 0U) {
gk20a_writel(g, ce2_intr_status_r(),
ce2_nonblockpipe_isr(g, ce2_intr));
ops |= (GK20A_NONSTALL_OPS_WAKEUP_SEMAPHORE |

View File

@@ -2461,38 +2461,38 @@ static u32 fifo_error_isr(struct gk20a *g, u32 fifo_intr)
nvgpu_log_fn(g, "fifo_intr=0x%08x", fifo_intr);
if (fifo_intr & fifo_intr_0_pio_error_pending_f()) {
if ((fifo_intr & fifo_intr_0_pio_error_pending_f()) != 0U) {
/* pio mode is unused. this shouldn't happen, ever. */
/* should we clear it or just leave it pending? */
nvgpu_err(g, "fifo pio error!");
BUG_ON(1);
}
if (fifo_intr & fifo_intr_0_bind_error_pending_f()) {
if ((fifo_intr & fifo_intr_0_bind_error_pending_f()) != 0U) {
u32 bind_error = gk20a_readl(g, fifo_intr_bind_error_r());
nvgpu_err(g, "fifo bind error: 0x%08x", bind_error);
print_channel_reset_log = true;
handled |= fifo_intr_0_bind_error_pending_f();
}
if (fifo_intr & fifo_intr_0_sched_error_pending_f()) {
if ((fifo_intr & fifo_intr_0_sched_error_pending_f()) != 0U) {
print_channel_reset_log = g->ops.fifo.handle_sched_error(g);
handled |= fifo_intr_0_sched_error_pending_f();
}
if (fifo_intr & fifo_intr_0_chsw_error_pending_f()) {
if ((fifo_intr & fifo_intr_0_chsw_error_pending_f()) != 0U) {
gk20a_fifo_handle_chsw_fault(g);
handled |= fifo_intr_0_chsw_error_pending_f();
}
if (fifo_intr & fifo_intr_0_mmu_fault_pending_f()) {
if ((fifo_intr & fifo_intr_0_mmu_fault_pending_f()) != 0U) {
if (gk20a_fifo_handle_mmu_fault(g, 0, ~(u32)0, false)) {
print_channel_reset_log = true;
}
handled |= fifo_intr_0_mmu_fault_pending_f();
}
if (fifo_intr & fifo_intr_0_dropped_mmu_fault_pending_f()) {
if ((fifo_intr & fifo_intr_0_dropped_mmu_fault_pending_f()) != 0U) {
gk20a_fifo_handle_dropped_mmu_fault(g);
handled |= fifo_intr_0_dropped_mmu_fault_pending_f();
}
@@ -2616,7 +2616,7 @@ unsigned int gk20a_fifo_handle_pbdma_intr_0(struct gk20a *g, u32 pbdma_id,
pbdma_intr_0);
}
if (pbdma_intr_0 & pbdma_intr_0_acquire_pending_f()) {
if ((pbdma_intr_0 & pbdma_intr_0_acquire_pending_f()) != 0U) {
u32 val = gk20a_readl(g, pbdma_acquire_r(pbdma_id));
val &= ~pbdma_acquire_timeout_en_enable_f();
@@ -2630,24 +2630,24 @@ unsigned int gk20a_fifo_handle_pbdma_intr_0(struct gk20a *g, u32 pbdma_id,
*handled |= pbdma_intr_0_acquire_pending_f();
}
if (pbdma_intr_0 & pbdma_intr_0_pbentry_pending_f()) {
if ((pbdma_intr_0 & pbdma_intr_0_pbentry_pending_f()) != 0U) {
gk20a_fifo_reset_pbdma_header(g, pbdma_id);
gk20a_fifo_reset_pbdma_method(g, pbdma_id, 0);
rc_type = RC_TYPE_PBDMA_FAULT;
}
if (pbdma_intr_0 & pbdma_intr_0_method_pending_f()) {
if ((pbdma_intr_0 & pbdma_intr_0_method_pending_f()) != 0U) {
gk20a_fifo_reset_pbdma_method(g, pbdma_id, 0);
rc_type = RC_TYPE_PBDMA_FAULT;
}
if (pbdma_intr_0 & pbdma_intr_0_pbcrc_pending_f()) {
if ((pbdma_intr_0 & pbdma_intr_0_pbcrc_pending_f()) != 0U) {
*error_notifier =
NVGPU_ERR_NOTIFIER_PBDMA_PUSHBUFFER_CRC_MISMATCH;
rc_type = RC_TYPE_PBDMA_FAULT;
}
if (pbdma_intr_0 & pbdma_intr_0_device_pending_f()) {
if ((pbdma_intr_0 & pbdma_intr_0_device_pending_f()) != 0U) {
gk20a_fifo_reset_pbdma_header(g, pbdma_id);
for (i = 0; i < 4; i++) {
@@ -2795,11 +2795,11 @@ void gk20a_fifo_isr(struct gk20a *g)
nvgpu_log(g, gpu_dbg_intr, "fifo isr %08x\n", fifo_intr);
/* handle runlist update */
if (fifo_intr & fifo_intr_0_runlist_event_pending_f()) {
if ((fifo_intr & fifo_intr_0_runlist_event_pending_f()) != 0U) {
gk20a_fifo_handle_runlist_event(g);
clear_intr |= fifo_intr_0_runlist_event_pending_f();
}
if (fifo_intr & fifo_intr_0_pbdma_intr_pending_f()) {
if ((fifo_intr & fifo_intr_0_pbdma_intr_pending_f()) != 0U) {
clear_intr |= fifo_pbdma_isr(g, fifo_intr);
}
@@ -2825,7 +2825,7 @@ u32 gk20a_fifo_nonstall_isr(struct gk20a *g)
nvgpu_log(g, gpu_dbg_intr, "fifo nonstall isr %08x\n", fifo_intr);
if (fifo_intr & fifo_intr_0_channel_intr_pending_f()) {
if ((fifo_intr & fifo_intr_0_channel_intr_pending_f()) != 0U) {
clear_intr = fifo_intr_0_channel_intr_pending_f();
}
@@ -3784,8 +3784,8 @@ int gk20a_fifo_suspend(struct gk20a *g)
bool gk20a_fifo_mmu_fault_pending(struct gk20a *g)
{
if (gk20a_readl(g, fifo_intr_0_r()) &
fifo_intr_0_mmu_fault_pending_f()) {
if ((gk20a_readl(g, fifo_intr_0_r()) &
fifo_intr_0_mmu_fault_pending_f()) != 0U) {
return true;
} else {
return false;

View File

@@ -132,8 +132,9 @@ static bool gk20a_is_falcon_scrubbing_done(struct nvgpu_falcon *flcn)
unit_status = gk20a_readl(g,
base_addr + falcon_falcon_dmactl_r());
if (unit_status & (falcon_falcon_dmactl_dmem_scrubbing_m() |
falcon_falcon_dmactl_imem_scrubbing_m())) {
if ((unit_status &
(falcon_falcon_dmactl_dmem_scrubbing_m() |
falcon_falcon_dmactl_imem_scrubbing_m())) != 0U) {
status = false;
} else {
status = true;

View File

@@ -5850,11 +5850,13 @@ static int gk20a_gr_handle_gpc_exception(struct gk20a *g, bool *post_event,
static int gk20a_gr_post_bpt_events(struct gk20a *g, struct tsg_gk20a *tsg,
u32 global_esr)
{
if (global_esr & gr_gpc0_tpc0_sm_hww_global_esr_bpt_int_pending_f()) {
if ((global_esr &
gr_gpc0_tpc0_sm_hww_global_esr_bpt_int_pending_f()) != 0U) {
g->ops.fifo.post_event_id(tsg, NVGPU_EVENT_ID_BPT_INT);
}
if (global_esr & gr_gpc0_tpc0_sm_hww_global_esr_bpt_pause_pending_f()) {
if ((global_esr &
gr_gpc0_tpc0_sm_hww_global_esr_bpt_pause_pending_f()) != 0U) {
g->ops.fifo.post_event_id(tsg, NVGPU_EVENT_ID_BPT_PAUSE);
}
@@ -5927,21 +5929,21 @@ int gk20a_gr_isr(struct gk20a *g)
isr_data.curr_ctx, isr_data.offset,
isr_data.sub_chan, isr_data.class_num);
if (gr_intr & gr_intr_notify_pending_f()) {
if ((gr_intr & gr_intr_notify_pending_f()) != 0U) {
g->ops.gr.handle_notify_pending(g, &isr_data);
gk20a_writel(g, gr_intr_r(),
gr_intr_notify_reset_f());
gr_intr &= ~gr_intr_notify_pending_f();
}
if (gr_intr & gr_intr_semaphore_pending_f()) {
if ((gr_intr & gr_intr_semaphore_pending_f()) != 0U) {
g->ops.gr.handle_semaphore_pending(g, &isr_data);
gk20a_writel(g, gr_intr_r(),
gr_intr_semaphore_reset_f());
gr_intr &= ~gr_intr_semaphore_pending_f();
}
if (gr_intr & gr_intr_semaphore_timeout_pending_f()) {
if ((gr_intr & gr_intr_semaphore_timeout_pending_f()) != 0U) {
if (gk20a_gr_handle_semaphore_timeout_pending(g,
&isr_data) != 0) {
need_reset = true;
@@ -5951,7 +5953,7 @@ int gk20a_gr_isr(struct gk20a *g)
gr_intr &= ~gr_intr_semaphore_pending_f();
}
if (gr_intr & gr_intr_illegal_notify_pending_f()) {
if ((gr_intr & gr_intr_illegal_notify_pending_f()) != 0U) {
if (gk20a_gr_intr_illegal_notify_pending(g,
&isr_data) != 0) {
need_reset = true;
@@ -5961,7 +5963,7 @@ int gk20a_gr_isr(struct gk20a *g)
gr_intr &= ~gr_intr_illegal_notify_pending_f();
}
if (gr_intr & gr_intr_illegal_method_pending_f()) {
if ((gr_intr & gr_intr_illegal_method_pending_f()) != 0U) {
if (gk20a_gr_handle_illegal_method(g, &isr_data) != 0) {
need_reset = true;
}
@@ -5970,7 +5972,7 @@ int gk20a_gr_isr(struct gk20a *g)
gr_intr &= ~gr_intr_illegal_method_pending_f();
}
if (gr_intr & gr_intr_illegal_class_pending_f()) {
if ((gr_intr & gr_intr_illegal_class_pending_f()) != 0U) {
if (gk20a_gr_handle_illegal_class(g, &isr_data) != 0) {
need_reset = true;
}
@@ -5979,7 +5981,7 @@ int gk20a_gr_isr(struct gk20a *g)
gr_intr &= ~gr_intr_illegal_class_pending_f();
}
if (gr_intr & gr_intr_fecs_error_pending_f()) {
if ((gr_intr & gr_intr_fecs_error_pending_f()) != 0U) {
if (g->ops.gr.handle_fecs_error(g, ch, &isr_data) != 0) {
need_reset = true;
}
@@ -5988,7 +5990,7 @@ int gk20a_gr_isr(struct gk20a *g)
gr_intr &= ~gr_intr_fecs_error_pending_f();
}
if (gr_intr & gr_intr_class_error_pending_f()) {
if ((gr_intr & gr_intr_class_error_pending_f()) != 0U) {
if (gk20a_gr_handle_class_error(g, &isr_data) != 0) {
need_reset = true;
}
@@ -5999,7 +6001,7 @@ int gk20a_gr_isr(struct gk20a *g)
/* this one happens if someone tries to hit a non-whitelisted
* register using set_falcon[4] */
if (gr_intr & gr_intr_firmware_method_pending_f()) {
if ((gr_intr & gr_intr_firmware_method_pending_f()) != 0U) {
if (gk20a_gr_handle_firmware_method(g, &isr_data) != 0) {
need_reset = true;
}
@@ -6009,12 +6011,12 @@ int gk20a_gr_isr(struct gk20a *g)
gr_intr &= ~gr_intr_firmware_method_pending_f();
}
if (gr_intr & gr_intr_exception_pending_f()) {
if ((gr_intr & gr_intr_exception_pending_f()) != 0U) {
u32 exception = gk20a_readl(g, gr_exception_r());
nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "exception %08x\n", exception);
if (exception & gr_exception_fe_m()) {
if ((exception & gr_exception_fe_m()) != 0U) {
u32 fe = gk20a_readl(g, gr_fe_hww_esr_r());
u32 info = gk20a_readl(g, gr_fe_hww_esr_info_r());
@@ -6025,7 +6027,7 @@ int gk20a_gr_isr(struct gk20a *g)
need_reset = true;
}
if (exception & gr_exception_memfmt_m()) {
if ((exception & gr_exception_memfmt_m()) != 0U) {
u32 memfmt = gk20a_readl(g, gr_memfmt_hww_esr_r());
nvgpu_err(g, "memfmt exception: esr %08x", memfmt);
@@ -6034,7 +6036,7 @@ int gk20a_gr_isr(struct gk20a *g)
need_reset = true;
}
if (exception & gr_exception_pd_m()) {
if ((exception & gr_exception_pd_m()) != 0U) {
u32 pd = gk20a_readl(g, gr_pd_hww_esr_r());
nvgpu_err(g, "pd exception: esr 0x%08x", pd);
@@ -6043,7 +6045,7 @@ int gk20a_gr_isr(struct gk20a *g)
need_reset = true;
}
if (exception & gr_exception_scc_m()) {
if ((exception & gr_exception_scc_m()) != 0U) {
u32 scc = gk20a_readl(g, gr_scc_hww_esr_r());
nvgpu_err(g, "scc exception: esr 0x%08x", scc);
@@ -6052,7 +6054,7 @@ int gk20a_gr_isr(struct gk20a *g)
need_reset = true;
}
if (exception & gr_exception_ds_m()) {
if ((exception & gr_exception_ds_m()) != 0U) {
u32 ds = gk20a_readl(g, gr_ds_hww_esr_r());
nvgpu_err(g, "ds exception: esr: 0x%08x", ds);
@@ -6061,7 +6063,7 @@ int gk20a_gr_isr(struct gk20a *g)
need_reset = true;
}
if (exception & gr_exception_ssync_m()) {
if ((exception & gr_exception_ssync_m()) != 0U) {
if (g->ops.gr.handle_ssync_hww != NULL) {
if (g->ops.gr.handle_ssync_hww(g) != 0) {
need_reset = true;
@@ -6071,7 +6073,7 @@ int gk20a_gr_isr(struct gk20a *g)
}
}
if (exception & gr_exception_mme_m()) {
if ((exception & gr_exception_mme_m()) != 0U) {
u32 mme = gk20a_readl(g, gr_mme_hww_esr_r());
u32 info = gk20a_readl(g, gr_mme_hww_esr_info_r());
@@ -6082,7 +6084,7 @@ int gk20a_gr_isr(struct gk20a *g)
need_reset = true;
}
if (exception & gr_exception_sked_m()) {
if ((exception & gr_exception_sked_m()) != 0U) {
u32 sked = gk20a_readl(g, gr_sked_hww_esr_r());
nvgpu_err(g, "sked exception: esr 0x%08x", sked);
@@ -6449,10 +6451,10 @@ int gr_gk20a_create_priv_addr_table(struct gk20a *g,
/* The GPC/TPC unicast registers are included in the compressed PRI
* tables. Convert a GPC/TPC broadcast address to unicast addresses so
* that we can look up the offsets. */
if (broadcast_flags & PRI_BROADCAST_FLAGS_GPC) {
if ((broadcast_flags & PRI_BROADCAST_FLAGS_GPC) != 0U) {
for (gpc_num = 0; gpc_num < g->gr.gpc_count; gpc_num++) {
if (broadcast_flags & PRI_BROADCAST_FLAGS_TPC) {
if ((broadcast_flags & PRI_BROADCAST_FLAGS_TPC) != 0U) {
for (tpc_num = 0;
tpc_num < g->gr.gpc_tpc_count[gpc_num];
tpc_num++) {
@@ -6461,7 +6463,7 @@ int gr_gk20a_create_priv_addr_table(struct gk20a *g,
gpc_num, tpc_num);
}
} else if (broadcast_flags & PRI_BROADCAST_FLAGS_PPC) {
} else if ((broadcast_flags & PRI_BROADCAST_FLAGS_PPC) != 0U) {
err = gr_gk20a_split_ppc_broadcast_addr(g, addr, gpc_num,
priv_addr_table, &t);
if (err != 0) {
@@ -6487,18 +6489,18 @@ int gr_gk20a_create_priv_addr_table(struct gk20a *g,
nvgpu_log(g, gpu_dbg_gpu_dbg, "addr_type : EGPC/ETPC");
g->ops.gr.egpc_etpc_priv_addr_table(g, addr, gpc_num, tpc_num,
broadcast_flags, priv_addr_table, &t);
} else if (broadcast_flags & PRI_BROADCAST_FLAGS_LTSS) {
} else if ((broadcast_flags & PRI_BROADCAST_FLAGS_LTSS) != 0U) {
g->ops.ltc.split_lts_broadcast_addr(g, addr,
priv_addr_table, &t);
} else if (broadcast_flags & PRI_BROADCAST_FLAGS_LTCS) {
} else if ((broadcast_flags & PRI_BROADCAST_FLAGS_LTCS) != 0U) {
g->ops.ltc.split_ltc_broadcast_addr(g, addr,
priv_addr_table, &t);
} else if (broadcast_flags & PRI_BROADCAST_FLAGS_FBPA) {
} else if ((broadcast_flags & PRI_BROADCAST_FLAGS_FBPA) != 0U) {
g->ops.gr.split_fbpa_broadcast_addr(g, addr,
nvgpu_get_litter_value(g, GPU_LIT_NUM_FBPAS),
priv_addr_table, &t);
} else if ((broadcast_flags & PRI_BROADCAST_FLAGS_GPC) == 0U) {
if (broadcast_flags & PRI_BROADCAST_FLAGS_TPC) {
if ((broadcast_flags & PRI_BROADCAST_FLAGS_TPC) != 0U) {
for (tpc_num = 0;
tpc_num < g->gr.gpc_tpc_count[gpc_num];
tpc_num++) {
@@ -6506,7 +6508,7 @@ int gr_gk20a_create_priv_addr_table(struct gk20a *g,
pri_tpc_addr(g, pri_tpccs_addr_mask(addr),
gpc_num, tpc_num);
}
} else if (broadcast_flags & PRI_BROADCAST_FLAGS_PPC) {
} else if ((broadcast_flags & PRI_BROADCAST_FLAGS_PPC) != 0U) {
err = gr_gk20a_split_ppc_broadcast_addr(g,
addr, gpc_num, priv_addr_table, &t);
} else {

View File

@@ -708,7 +708,7 @@ void gk20a_pmu_isr(struct gk20a *g)
return;
}
if (intr & pwr_falcon_irqstat_halt_true_f()) {
if ((intr & pwr_falcon_irqstat_halt_true_f()) != 0U) {
nvgpu_err(g, "pmu halt intr not implemented");
nvgpu_pmu_dump_falcon_stats(pmu);
if (gk20a_readl(g, pwr_pmu_mailbox_r
@@ -719,7 +719,7 @@ void gk20a_pmu_isr(struct gk20a *g)
}
}
}
if (intr & pwr_falcon_irqstat_exterr_true_f()) {
if ((intr & pwr_falcon_irqstat_exterr_true_f()) != 0U) {
nvgpu_err(g,
"pmu exterr intr not implemented. Clearing interrupt.");
nvgpu_pmu_dump_falcon_stats(pmu);
@@ -733,7 +733,7 @@ void gk20a_pmu_isr(struct gk20a *g)
g->ops.pmu.handle_ext_irq(g, intr);
}
if (intr & pwr_falcon_irqstat_swgen0_true_f()) {
if ((intr & pwr_falcon_irqstat_swgen0_true_f()) != 0U) {
nvgpu_pmu_process_message(pmu);
recheck = true;
}