diff --git a/drivers/gpu/nvgpu/common/bus/bus_gk20a.c b/drivers/gpu/nvgpu/common/bus/bus_gk20a.c index 61fd12230..adbcac7d5 100644 --- a/drivers/gpu/nvgpu/common/bus/bus_gk20a.c +++ b/drivers/gpu/nvgpu/common/bus/bus_gk20a.c @@ -50,9 +50,9 @@ void gk20a_bus_isr(struct gk20a *g) val = gk20a_readl(g, bus_intr_0_r()); - if (val & (bus_intr_0_pri_squash_m() | + if ((val & (bus_intr_0_pri_squash_m() | bus_intr_0_pri_fecserr_m() | - bus_intr_0_pri_timeout_m())) { + bus_intr_0_pri_timeout_m())) != 0U) { g->ops.ptimer.isr(g); } else { diff --git a/drivers/gpu/nvgpu/common/fb/fb_gv11b.c b/drivers/gpu/nvgpu/common/fb/fb_gv11b.c index 778d44e54..243d74652 100644 --- a/drivers/gpu/nvgpu/common/fb/fb_gv11b.c +++ b/drivers/gpu/nvgpu/common/fb/fb_gv11b.c @@ -250,7 +250,7 @@ static void gv11b_fb_fault_buffer_get_ptr_update(struct gk20a *g, /* while the fault is being handled it is possible for overflow * to happen, */ - if (reg_val & fb_mmu_fault_buffer_get_overflow_m()) { + if ((reg_val & fb_mmu_fault_buffer_get_overflow_m()) != 0U) { reg_val |= fb_mmu_fault_buffer_get_overflow_clear_f(); } @@ -462,12 +462,12 @@ void gv11b_handle_l2tlb_ecc_isr(struct gk20a *g, u32 ecc_status) g->ecc.fb.mmu_l2tlb_ecc_uncorrected_err_count[0].counter += uncorrected_delta; - if (ecc_status & - fb_mmu_l2tlb_ecc_status_corrected_err_l2tlb_sa_data_m()) { + if ((ecc_status & + fb_mmu_l2tlb_ecc_status_corrected_err_l2tlb_sa_data_m()) != 0U) { nvgpu_log(g, gpu_dbg_intr, "corrected ecc sa data error"); } - if (ecc_status & - fb_mmu_l2tlb_ecc_status_uncorrected_err_l2tlb_sa_data_m()) { + if ((ecc_status & + fb_mmu_l2tlb_ecc_status_uncorrected_err_l2tlb_sa_data_m()) != 0U) { nvgpu_log(g, gpu_dbg_intr, "uncorrected ecc sa data error"); } if (corrected_overflow || uncorrected_overflow) { @@ -529,10 +529,12 @@ void gv11b_handle_hubtlb_ecc_isr(struct gk20a *g, u32 ecc_status) g->ecc.fb.mmu_hubtlb_ecc_uncorrected_err_count[0].counter += uncorrected_delta; - if (ecc_status & fb_mmu_hubtlb_ecc_status_corrected_err_sa_data_m()) { + if ((ecc_status & + fb_mmu_hubtlb_ecc_status_corrected_err_sa_data_m()) != 0U) { nvgpu_log(g, gpu_dbg_intr, "corrected ecc sa data error"); } - if (ecc_status & fb_mmu_hubtlb_ecc_status_uncorrected_err_sa_data_m()) { + if ((ecc_status & + fb_mmu_hubtlb_ecc_status_uncorrected_err_sa_data_m()) != 0U) { nvgpu_log(g, gpu_dbg_intr, "uncorrected ecc sa data error"); } if (corrected_overflow || uncorrected_overflow) { @@ -594,20 +596,20 @@ void gv11b_handle_fillunit_ecc_isr(struct gk20a *g, u32 ecc_status) g->ecc.fb.mmu_fillunit_ecc_uncorrected_err_count[0].counter += uncorrected_delta; - if (ecc_status & - fb_mmu_fillunit_ecc_status_corrected_err_pte_data_m()) { + if ((ecc_status & + fb_mmu_fillunit_ecc_status_corrected_err_pte_data_m()) != 0U) { nvgpu_log(g, gpu_dbg_intr, "corrected ecc pte data error"); } - if (ecc_status & - fb_mmu_fillunit_ecc_status_uncorrected_err_pte_data_m()) { + if ((ecc_status & + fb_mmu_fillunit_ecc_status_uncorrected_err_pte_data_m()) != 0U) { nvgpu_log(g, gpu_dbg_intr, "uncorrected ecc pte data error"); } - if (ecc_status & - fb_mmu_fillunit_ecc_status_corrected_err_pde0_data_m()) { + if ((ecc_status & + fb_mmu_fillunit_ecc_status_corrected_err_pde0_data_m()) != 0U) { nvgpu_log(g, gpu_dbg_intr, "corrected ecc pde0 data error"); } - if (ecc_status & - fb_mmu_fillunit_ecc_status_uncorrected_err_pde0_data_m()) { + if ((ecc_status & + fb_mmu_fillunit_ecc_status_uncorrected_err_pde0_data_m()) != 0U) { nvgpu_log(g, gpu_dbg_intr, "uncorrected ecc pde0 data error"); } @@ -974,16 +976,16 @@ static int gv11b_fb_replay_or_cancel_faults(struct gk20a *g, nvgpu_log_fn(g, " "); - if (invalidate_replay_val & - fb_mmu_invalidate_replay_cancel_global_f()) { + if ((invalidate_replay_val & + fb_mmu_invalidate_replay_cancel_global_f()) != 0U) { /* * cancel faults so that next time it faults as * replayable faults and channel recovery can be done */ err = g->ops.fb.mmu_invalidate_replay(g, fb_mmu_invalidate_replay_cancel_global_f()); - } else if (invalidate_replay_val & - fb_mmu_invalidate_replay_start_ack_all_f()) { + } else if ((invalidate_replay_val & + fb_mmu_invalidate_replay_start_ack_all_f()) != 0U) { /* pte valid is fixed. replay faulting request */ err = g->ops.fb.mmu_invalidate_replay(g, fb_mmu_invalidate_replay_start_ack_all_f()); @@ -1026,7 +1028,7 @@ void gv11b_fb_handle_mmu_nonreplay_replay_fault(struct gk20a *g, offset + gmmu_fault_buf_entry_valid_w()); nvgpu_log(g, gpu_dbg_intr, "entry valid offset val = 0x%x", rd32_val); - while ((rd32_val & gmmu_fault_buf_entry_valid_m())) { + while ((rd32_val & gmmu_fault_buf_entry_valid_m()) != 0U) { nvgpu_log(g, gpu_dbg_intr, "entry valid = 0x%x", rd32_val); @@ -1156,8 +1158,8 @@ void gv11b_fb_handle_replay_fault_overflow(struct gk20a *g, reg_val = g->ops.fb.read_mmu_fault_buffer_get(g, index); - if (fault_status & - fb_mmu_fault_status_replayable_getptr_corrupted_m()) { + if ((fault_status & + fb_mmu_fault_status_replayable_getptr_corrupted_m()) != 0U) { nvgpu_err(g, "replayable getptr corrupted set"); @@ -1168,8 +1170,8 @@ void gv11b_fb_handle_replay_fault_overflow(struct gk20a *g, fb_mmu_fault_buffer_get_getptr_corrupted_clear_f()); } - if (fault_status & - fb_mmu_fault_status_replayable_overflow_m()) { + if ((fault_status & + fb_mmu_fault_status_replayable_overflow_m()) != 0U) { bool buffer_full = gv11b_fb_is_fault_buffer_full(g, index); nvgpu_err(g, "replayable overflow: buffer full:%s", @@ -1191,8 +1193,8 @@ void gv11b_fb_handle_nonreplay_fault_overflow(struct gk20a *g, reg_val = g->ops.fb.read_mmu_fault_buffer_get(g, index); - if (fault_status & - fb_mmu_fault_status_non_replayable_getptr_corrupted_m()) { + if ((fault_status & + fb_mmu_fault_status_non_replayable_getptr_corrupted_m()) != 0U) { nvgpu_err(g, "non replayable getptr corrupted set"); @@ -1203,8 +1205,8 @@ void gv11b_fb_handle_nonreplay_fault_overflow(struct gk20a *g, fb_mmu_fault_buffer_get_getptr_corrupted_clear_f()); } - if (fault_status & - fb_mmu_fault_status_non_replayable_overflow_m()) { + if ((fault_status & + fb_mmu_fault_status_non_replayable_overflow_m()) != 0U) { bool buffer_full = gv11b_fb_is_fault_buffer_full(g, index); @@ -1222,14 +1224,15 @@ void gv11b_fb_handle_nonreplay_fault_overflow(struct gk20a *g, static void gv11b_fb_handle_bar2_fault(struct gk20a *g, struct mmu_fault_info *mmfault, u32 fault_status) { - if (fault_status & fb_mmu_fault_status_non_replayable_error_m()) { + if ((fault_status & + fb_mmu_fault_status_non_replayable_error_m()) != 0U) { if (gv11b_fb_is_fault_buf_enabled(g, NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX)) { gv11b_fb_fault_buf_configure_hw(g, NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX); } } - if (fault_status & fb_mmu_fault_status_replayable_error_m()) { + if ((fault_status & fb_mmu_fault_status_replayable_error_m()) != 0U) { if (gv11b_fb_is_fault_buf_enabled(g, NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX)) { gv11b_fb_fault_buf_configure_hw(g, @@ -1319,8 +1322,8 @@ static void gv11b_fb_handle_mmu_fault(struct gk20a *g, u32 niso_intr) nvgpu_log(g, gpu_dbg_intr, "mmu_fault_status = 0x%08x", fault_status); - if (niso_intr & - fb_niso_intr_mmu_other_fault_notify_m()) { + if ((niso_intr & + fb_niso_intr_mmu_other_fault_notify_m()) != 0U) { gv11b_fb_handle_dropped_mmu_fault(g, fault_status); @@ -1329,8 +1332,8 @@ static void gv11b_fb_handle_mmu_fault(struct gk20a *g, u32 niso_intr) if (gv11b_fb_is_fault_buf_enabled(g, NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX)) { - if (niso_intr & - fb_niso_intr_mmu_nonreplayable_fault_notify_m()) { + if ((niso_intr & + fb_niso_intr_mmu_nonreplayable_fault_notify_m()) != 0U) { gv11b_fb_handle_mmu_nonreplay_replay_fault(g, fault_status, @@ -1342,8 +1345,8 @@ static void gv11b_fb_handle_mmu_fault(struct gk20a *g, u32 niso_intr) * bit will be reset by HW */ } - if (niso_intr & - fb_niso_intr_mmu_nonreplayable_fault_overflow_m()) { + if ((niso_intr & + fb_niso_intr_mmu_nonreplayable_fault_overflow_m()) != 0U) { gv11b_fb_handle_nonreplay_fault_overflow(g, fault_status); @@ -1353,15 +1356,15 @@ static void gv11b_fb_handle_mmu_fault(struct gk20a *g, u32 niso_intr) if (gv11b_fb_is_fault_buf_enabled(g, NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX)) { - if (niso_intr & - fb_niso_intr_mmu_replayable_fault_notify_m()) { + if ((niso_intr & + fb_niso_intr_mmu_replayable_fault_notify_m()) != 0U) { gv11b_fb_handle_mmu_nonreplay_replay_fault(g, fault_status, NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX); } - if (niso_intr & - fb_niso_intr_mmu_replayable_fault_overflow_m()) { + if ((niso_intr & + fb_niso_intr_mmu_replayable_fault_overflow_m()) != 0U) { gv11b_fb_handle_replay_fault_overflow(g, fault_status); @@ -1385,14 +1388,14 @@ void gv11b_fb_hub_isr(struct gk20a *g) nvgpu_log(g, gpu_dbg_intr, "enter hub isr, niso_intr = 0x%08x", niso_intr); - if (niso_intr & + if ((niso_intr & (fb_niso_intr_hub_access_counter_notify_m() | - fb_niso_intr_hub_access_counter_error_m())) { + fb_niso_intr_hub_access_counter_error_m())) != 0U) { nvgpu_info(g, "hub access counter notify/error"); } - if (niso_intr & - fb_niso_intr_mmu_ecc_uncorrected_error_notify_pending_f()) { + if ((niso_intr & + fb_niso_intr_mmu_ecc_uncorrected_error_notify_pending_f()) != 0U) { nvgpu_info(g, "ecc uncorrected error notify"); @@ -1411,12 +1414,12 @@ void gv11b_fb_hub_isr(struct gk20a *g) gv11b_handle_fillunit_ecc_isr(g, status); } } - if (niso_intr & + if ((niso_intr & (fb_niso_intr_mmu_other_fault_notify_m() | fb_niso_intr_mmu_replayable_fault_notify_m() | fb_niso_intr_mmu_replayable_fault_overflow_m() | fb_niso_intr_mmu_nonreplayable_fault_notify_m() | - fb_niso_intr_mmu_nonreplayable_fault_overflow_m())) { + fb_niso_intr_mmu_nonreplayable_fault_overflow_m())) != 0U) { nvgpu_log(g, gpu_dbg_intr, "MMU Fault"); gv11b_fb_handle_mmu_fault(g, niso_intr); @@ -1427,13 +1430,13 @@ void gv11b_fb_hub_isr(struct gk20a *g) bool gv11b_fb_mmu_fault_pending(struct gk20a *g) { - if (gk20a_readl(g, fb_niso_intr_r()) & + if ((gk20a_readl(g, fb_niso_intr_r()) & (fb_niso_intr_mmu_other_fault_notify_m() | fb_niso_intr_mmu_ecc_uncorrected_error_notify_m() | fb_niso_intr_mmu_replayable_fault_notify_m() | fb_niso_intr_mmu_replayable_fault_overflow_m() | fb_niso_intr_mmu_nonreplayable_fault_notify_m() | - fb_niso_intr_mmu_nonreplayable_fault_overflow_m())) { + fb_niso_intr_mmu_nonreplayable_fault_overflow_m())) != 0U) { return true; } @@ -1506,14 +1509,14 @@ static int gv11b_fb_fix_page_fault(struct gk20a *g, "pte all zeros, do not set valid"); return -1; } - if (pte[0] & gmmu_new_pte_valid_true_f()) { + if ((pte[0] & gmmu_new_pte_valid_true_f()) != 0U) { nvgpu_log(g, gpu_dbg_intr | gpu_dbg_pte, "pte valid already set"); return -1; } pte[0] |= gmmu_new_pte_valid_true_f(); - if (pte[0] & gmmu_new_pte_read_only_true_f()) { + if ((pte[0] & gmmu_new_pte_read_only_true_f()) != 0U) { pte[0] &= ~(gmmu_new_pte_read_only_true_f()); } nvgpu_log(g, gpu_dbg_intr | gpu_dbg_pte, diff --git a/drivers/gpu/nvgpu/common/ltc/ltc_gp10b.c b/drivers/gpu/nvgpu/common/ltc/ltc_gp10b.c index 3653ceaeb..519620220 100644 --- a/drivers/gpu/nvgpu/common/ltc/ltc_gp10b.c +++ b/drivers/gpu/nvgpu/common/ltc/ltc_gp10b.c @@ -243,8 +243,8 @@ void gp10b_ltc_lts_isr(struct gk20a *g, unsigned int ltc, unsigned int slice) ltc_intr = gk20a_readl(g, ltc_ltc0_lts0_intr_r() + offset); /* Detect and handle ECC errors */ - if (ltc_intr & - ltc_ltcs_ltss_intr_ecc_sec_error_pending_f()) { + if ((ltc_intr & + ltc_ltcs_ltss_intr_ecc_sec_error_pending_f()) != 0U) { u32 ecc_stats_reg_val; nvgpu_err(g, @@ -262,8 +262,8 @@ void gp10b_ltc_lts_isr(struct gk20a *g, unsigned int ltc, unsigned int slice) ecc_stats_reg_val); g->ops.mm.l2_flush(g, true); } - if (ltc_intr & - ltc_ltcs_ltss_intr_ecc_ded_error_pending_f()) { + if ((ltc_intr & + ltc_ltcs_ltss_intr_ecc_ded_error_pending_f()) != 0U) { u32 ecc_stats_reg_val; nvgpu_err(g, diff --git a/drivers/gpu/nvgpu/common/ltc/ltc_gv11b.c b/drivers/gpu/nvgpu/common/ltc/ltc_gv11b.c index 4ddc7ac5b..fbfe3f226 100644 --- a/drivers/gpu/nvgpu/common/ltc/ltc_gv11b.c +++ b/drivers/gpu/nvgpu/common/ltc/ltc_gv11b.c @@ -121,9 +121,9 @@ void gv11b_ltc_lts_isr(struct gk20a *g, unsigned int ltc, unsigned int slice) offset); /* Detect and handle ECC PARITY errors */ - if (ltc_intr3 & + if ((ltc_intr3 & (ltc_ltcs_ltss_intr3_ecc_uncorrected_m() | - ltc_ltcs_ltss_intr3_ecc_corrected_m())) { + ltc_ltcs_ltss_intr3_ecc_corrected_m())) != 0U) { ecc_status = gk20a_readl(g, ltc_ltc0_lts0_l2_cache_ecc_status_r() + @@ -173,22 +173,22 @@ void gv11b_ltc_lts_isr(struct gk20a *g, unsigned int ltc, unsigned int slice) nvgpu_log(g, gpu_dbg_intr, "ltc:%d lts: %d cache ecc interrupt intr: 0x%x", ltc, slice, ltc_intr3); - if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_rstg_m()) { + if ((ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_rstg_m()) != 0U) { nvgpu_log(g, gpu_dbg_intr, "rstg ecc error corrected"); } - if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_rstg_m()) { + if ((ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_rstg_m()) != 0U) { nvgpu_log(g, gpu_dbg_intr, "rstg ecc error uncorrected"); } - if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_tstg_m()) { + if ((ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_tstg_m()) != 0U) { nvgpu_log(g, gpu_dbg_intr, "tstg ecc error corrected"); } - if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_tstg_m()) { + if ((ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_tstg_m()) != 0U) { nvgpu_log(g, gpu_dbg_intr, "tstg ecc error uncorrected"); } - if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_dstg_m()) { + if ((ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_dstg_m()) != 0U) { nvgpu_log(g, gpu_dbg_intr, "dstg ecc error corrected"); } - if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_dstg_m()) { + if ((ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_dstg_m()) != 0U) { nvgpu_log(g, gpu_dbg_intr, "dstg ecc error uncorrected"); } diff --git a/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c b/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c index 5bbdf8181..e0890c50e 100644 --- a/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c @@ -80,11 +80,11 @@ void gk20a_ce2_isr(struct gk20a *g, u32 inst_id, u32 pri_base) nvgpu_log(g, gpu_dbg_intr, "ce2 isr %08x\n", ce2_intr); /* clear blocking interrupts: they exibit broken behavior */ - if (ce2_intr & ce2_intr_status_blockpipe_pending_f()) { + if ((ce2_intr & ce2_intr_status_blockpipe_pending_f()) != 0U) { clear_intr |= ce2_blockpipe_isr(g, ce2_intr); } - if (ce2_intr & ce2_intr_status_launcherr_pending_f()) { + if ((ce2_intr & ce2_intr_status_launcherr_pending_f()) != 0U) { clear_intr |= ce2_launcherr_isr(g, ce2_intr); } @@ -99,7 +99,7 @@ u32 gk20a_ce2_nonstall_isr(struct gk20a *g, u32 inst_id, u32 pri_base) nvgpu_log(g, gpu_dbg_intr, "ce2 nonstall isr %08x\n", ce2_intr); - if (ce2_intr & ce2_intr_status_nonblockpipe_pending_f()) { + if ((ce2_intr & ce2_intr_status_nonblockpipe_pending_f()) != 0U) { gk20a_writel(g, ce2_intr_status_r(), ce2_nonblockpipe_isr(g, ce2_intr)); ops |= (GK20A_NONSTALL_OPS_WAKEUP_SEMAPHORE | diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c index 6f3b7de9e..5a8d48475 100644 --- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c @@ -2461,38 +2461,38 @@ static u32 fifo_error_isr(struct gk20a *g, u32 fifo_intr) nvgpu_log_fn(g, "fifo_intr=0x%08x", fifo_intr); - if (fifo_intr & fifo_intr_0_pio_error_pending_f()) { + if ((fifo_intr & fifo_intr_0_pio_error_pending_f()) != 0U) { /* pio mode is unused. this shouldn't happen, ever. */ /* should we clear it or just leave it pending? */ nvgpu_err(g, "fifo pio error!"); BUG_ON(1); } - if (fifo_intr & fifo_intr_0_bind_error_pending_f()) { + if ((fifo_intr & fifo_intr_0_bind_error_pending_f()) != 0U) { u32 bind_error = gk20a_readl(g, fifo_intr_bind_error_r()); nvgpu_err(g, "fifo bind error: 0x%08x", bind_error); print_channel_reset_log = true; handled |= fifo_intr_0_bind_error_pending_f(); } - if (fifo_intr & fifo_intr_0_sched_error_pending_f()) { + if ((fifo_intr & fifo_intr_0_sched_error_pending_f()) != 0U) { print_channel_reset_log = g->ops.fifo.handle_sched_error(g); handled |= fifo_intr_0_sched_error_pending_f(); } - if (fifo_intr & fifo_intr_0_chsw_error_pending_f()) { + if ((fifo_intr & fifo_intr_0_chsw_error_pending_f()) != 0U) { gk20a_fifo_handle_chsw_fault(g); handled |= fifo_intr_0_chsw_error_pending_f(); } - if (fifo_intr & fifo_intr_0_mmu_fault_pending_f()) { + if ((fifo_intr & fifo_intr_0_mmu_fault_pending_f()) != 0U) { if (gk20a_fifo_handle_mmu_fault(g, 0, ~(u32)0, false)) { print_channel_reset_log = true; } handled |= fifo_intr_0_mmu_fault_pending_f(); } - if (fifo_intr & fifo_intr_0_dropped_mmu_fault_pending_f()) { + if ((fifo_intr & fifo_intr_0_dropped_mmu_fault_pending_f()) != 0U) { gk20a_fifo_handle_dropped_mmu_fault(g); handled |= fifo_intr_0_dropped_mmu_fault_pending_f(); } @@ -2616,7 +2616,7 @@ unsigned int gk20a_fifo_handle_pbdma_intr_0(struct gk20a *g, u32 pbdma_id, pbdma_intr_0); } - if (pbdma_intr_0 & pbdma_intr_0_acquire_pending_f()) { + if ((pbdma_intr_0 & pbdma_intr_0_acquire_pending_f()) != 0U) { u32 val = gk20a_readl(g, pbdma_acquire_r(pbdma_id)); val &= ~pbdma_acquire_timeout_en_enable_f(); @@ -2630,24 +2630,24 @@ unsigned int gk20a_fifo_handle_pbdma_intr_0(struct gk20a *g, u32 pbdma_id, *handled |= pbdma_intr_0_acquire_pending_f(); } - if (pbdma_intr_0 & pbdma_intr_0_pbentry_pending_f()) { + if ((pbdma_intr_0 & pbdma_intr_0_pbentry_pending_f()) != 0U) { gk20a_fifo_reset_pbdma_header(g, pbdma_id); gk20a_fifo_reset_pbdma_method(g, pbdma_id, 0); rc_type = RC_TYPE_PBDMA_FAULT; } - if (pbdma_intr_0 & pbdma_intr_0_method_pending_f()) { + if ((pbdma_intr_0 & pbdma_intr_0_method_pending_f()) != 0U) { gk20a_fifo_reset_pbdma_method(g, pbdma_id, 0); rc_type = RC_TYPE_PBDMA_FAULT; } - if (pbdma_intr_0 & pbdma_intr_0_pbcrc_pending_f()) { + if ((pbdma_intr_0 & pbdma_intr_0_pbcrc_pending_f()) != 0U) { *error_notifier = NVGPU_ERR_NOTIFIER_PBDMA_PUSHBUFFER_CRC_MISMATCH; rc_type = RC_TYPE_PBDMA_FAULT; } - if (pbdma_intr_0 & pbdma_intr_0_device_pending_f()) { + if ((pbdma_intr_0 & pbdma_intr_0_device_pending_f()) != 0U) { gk20a_fifo_reset_pbdma_header(g, pbdma_id); for (i = 0; i < 4; i++) { @@ -2795,11 +2795,11 @@ void gk20a_fifo_isr(struct gk20a *g) nvgpu_log(g, gpu_dbg_intr, "fifo isr %08x\n", fifo_intr); /* handle runlist update */ - if (fifo_intr & fifo_intr_0_runlist_event_pending_f()) { + if ((fifo_intr & fifo_intr_0_runlist_event_pending_f()) != 0U) { gk20a_fifo_handle_runlist_event(g); clear_intr |= fifo_intr_0_runlist_event_pending_f(); } - if (fifo_intr & fifo_intr_0_pbdma_intr_pending_f()) { + if ((fifo_intr & fifo_intr_0_pbdma_intr_pending_f()) != 0U) { clear_intr |= fifo_pbdma_isr(g, fifo_intr); } @@ -2825,7 +2825,7 @@ u32 gk20a_fifo_nonstall_isr(struct gk20a *g) nvgpu_log(g, gpu_dbg_intr, "fifo nonstall isr %08x\n", fifo_intr); - if (fifo_intr & fifo_intr_0_channel_intr_pending_f()) { + if ((fifo_intr & fifo_intr_0_channel_intr_pending_f()) != 0U) { clear_intr = fifo_intr_0_channel_intr_pending_f(); } @@ -3784,8 +3784,8 @@ int gk20a_fifo_suspend(struct gk20a *g) bool gk20a_fifo_mmu_fault_pending(struct gk20a *g) { - if (gk20a_readl(g, fifo_intr_0_r()) & - fifo_intr_0_mmu_fault_pending_f()) { + if ((gk20a_readl(g, fifo_intr_0_r()) & + fifo_intr_0_mmu_fault_pending_f()) != 0U) { return true; } else { return false; diff --git a/drivers/gpu/nvgpu/gk20a/flcn_gk20a.c b/drivers/gpu/nvgpu/gk20a/flcn_gk20a.c index 0801dc49e..85ff7d31b 100644 --- a/drivers/gpu/nvgpu/gk20a/flcn_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/flcn_gk20a.c @@ -132,8 +132,9 @@ static bool gk20a_is_falcon_scrubbing_done(struct nvgpu_falcon *flcn) unit_status = gk20a_readl(g, base_addr + falcon_falcon_dmactl_r()); - if (unit_status & (falcon_falcon_dmactl_dmem_scrubbing_m() | - falcon_falcon_dmactl_imem_scrubbing_m())) { + if ((unit_status & + (falcon_falcon_dmactl_dmem_scrubbing_m() | + falcon_falcon_dmactl_imem_scrubbing_m())) != 0U) { status = false; } else { status = true; diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c index 17aa188c7..ee29fd183 100644 --- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c @@ -5850,11 +5850,13 @@ static int gk20a_gr_handle_gpc_exception(struct gk20a *g, bool *post_event, static int gk20a_gr_post_bpt_events(struct gk20a *g, struct tsg_gk20a *tsg, u32 global_esr) { - if (global_esr & gr_gpc0_tpc0_sm_hww_global_esr_bpt_int_pending_f()) { + if ((global_esr & + gr_gpc0_tpc0_sm_hww_global_esr_bpt_int_pending_f()) != 0U) { g->ops.fifo.post_event_id(tsg, NVGPU_EVENT_ID_BPT_INT); } - if (global_esr & gr_gpc0_tpc0_sm_hww_global_esr_bpt_pause_pending_f()) { + if ((global_esr & + gr_gpc0_tpc0_sm_hww_global_esr_bpt_pause_pending_f()) != 0U) { g->ops.fifo.post_event_id(tsg, NVGPU_EVENT_ID_BPT_PAUSE); } @@ -5927,21 +5929,21 @@ int gk20a_gr_isr(struct gk20a *g) isr_data.curr_ctx, isr_data.offset, isr_data.sub_chan, isr_data.class_num); - if (gr_intr & gr_intr_notify_pending_f()) { + if ((gr_intr & gr_intr_notify_pending_f()) != 0U) { g->ops.gr.handle_notify_pending(g, &isr_data); gk20a_writel(g, gr_intr_r(), gr_intr_notify_reset_f()); gr_intr &= ~gr_intr_notify_pending_f(); } - if (gr_intr & gr_intr_semaphore_pending_f()) { + if ((gr_intr & gr_intr_semaphore_pending_f()) != 0U) { g->ops.gr.handle_semaphore_pending(g, &isr_data); gk20a_writel(g, gr_intr_r(), gr_intr_semaphore_reset_f()); gr_intr &= ~gr_intr_semaphore_pending_f(); } - if (gr_intr & gr_intr_semaphore_timeout_pending_f()) { + if ((gr_intr & gr_intr_semaphore_timeout_pending_f()) != 0U) { if (gk20a_gr_handle_semaphore_timeout_pending(g, &isr_data) != 0) { need_reset = true; @@ -5951,7 +5953,7 @@ int gk20a_gr_isr(struct gk20a *g) gr_intr &= ~gr_intr_semaphore_pending_f(); } - if (gr_intr & gr_intr_illegal_notify_pending_f()) { + if ((gr_intr & gr_intr_illegal_notify_pending_f()) != 0U) { if (gk20a_gr_intr_illegal_notify_pending(g, &isr_data) != 0) { need_reset = true; @@ -5961,7 +5963,7 @@ int gk20a_gr_isr(struct gk20a *g) gr_intr &= ~gr_intr_illegal_notify_pending_f(); } - if (gr_intr & gr_intr_illegal_method_pending_f()) { + if ((gr_intr & gr_intr_illegal_method_pending_f()) != 0U) { if (gk20a_gr_handle_illegal_method(g, &isr_data) != 0) { need_reset = true; } @@ -5970,7 +5972,7 @@ int gk20a_gr_isr(struct gk20a *g) gr_intr &= ~gr_intr_illegal_method_pending_f(); } - if (gr_intr & gr_intr_illegal_class_pending_f()) { + if ((gr_intr & gr_intr_illegal_class_pending_f()) != 0U) { if (gk20a_gr_handle_illegal_class(g, &isr_data) != 0) { need_reset = true; } @@ -5979,7 +5981,7 @@ int gk20a_gr_isr(struct gk20a *g) gr_intr &= ~gr_intr_illegal_class_pending_f(); } - if (gr_intr & gr_intr_fecs_error_pending_f()) { + if ((gr_intr & gr_intr_fecs_error_pending_f()) != 0U) { if (g->ops.gr.handle_fecs_error(g, ch, &isr_data) != 0) { need_reset = true; } @@ -5988,7 +5990,7 @@ int gk20a_gr_isr(struct gk20a *g) gr_intr &= ~gr_intr_fecs_error_pending_f(); } - if (gr_intr & gr_intr_class_error_pending_f()) { + if ((gr_intr & gr_intr_class_error_pending_f()) != 0U) { if (gk20a_gr_handle_class_error(g, &isr_data) != 0) { need_reset = true; } @@ -5999,7 +6001,7 @@ int gk20a_gr_isr(struct gk20a *g) /* this one happens if someone tries to hit a non-whitelisted * register using set_falcon[4] */ - if (gr_intr & gr_intr_firmware_method_pending_f()) { + if ((gr_intr & gr_intr_firmware_method_pending_f()) != 0U) { if (gk20a_gr_handle_firmware_method(g, &isr_data) != 0) { need_reset = true; } @@ -6009,12 +6011,12 @@ int gk20a_gr_isr(struct gk20a *g) gr_intr &= ~gr_intr_firmware_method_pending_f(); } - if (gr_intr & gr_intr_exception_pending_f()) { + if ((gr_intr & gr_intr_exception_pending_f()) != 0U) { u32 exception = gk20a_readl(g, gr_exception_r()); nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "exception %08x\n", exception); - if (exception & gr_exception_fe_m()) { + if ((exception & gr_exception_fe_m()) != 0U) { u32 fe = gk20a_readl(g, gr_fe_hww_esr_r()); u32 info = gk20a_readl(g, gr_fe_hww_esr_info_r()); @@ -6025,7 +6027,7 @@ int gk20a_gr_isr(struct gk20a *g) need_reset = true; } - if (exception & gr_exception_memfmt_m()) { + if ((exception & gr_exception_memfmt_m()) != 0U) { u32 memfmt = gk20a_readl(g, gr_memfmt_hww_esr_r()); nvgpu_err(g, "memfmt exception: esr %08x", memfmt); @@ -6034,7 +6036,7 @@ int gk20a_gr_isr(struct gk20a *g) need_reset = true; } - if (exception & gr_exception_pd_m()) { + if ((exception & gr_exception_pd_m()) != 0U) { u32 pd = gk20a_readl(g, gr_pd_hww_esr_r()); nvgpu_err(g, "pd exception: esr 0x%08x", pd); @@ -6043,7 +6045,7 @@ int gk20a_gr_isr(struct gk20a *g) need_reset = true; } - if (exception & gr_exception_scc_m()) { + if ((exception & gr_exception_scc_m()) != 0U) { u32 scc = gk20a_readl(g, gr_scc_hww_esr_r()); nvgpu_err(g, "scc exception: esr 0x%08x", scc); @@ -6052,7 +6054,7 @@ int gk20a_gr_isr(struct gk20a *g) need_reset = true; } - if (exception & gr_exception_ds_m()) { + if ((exception & gr_exception_ds_m()) != 0U) { u32 ds = gk20a_readl(g, gr_ds_hww_esr_r()); nvgpu_err(g, "ds exception: esr: 0x%08x", ds); @@ -6061,7 +6063,7 @@ int gk20a_gr_isr(struct gk20a *g) need_reset = true; } - if (exception & gr_exception_ssync_m()) { + if ((exception & gr_exception_ssync_m()) != 0U) { if (g->ops.gr.handle_ssync_hww != NULL) { if (g->ops.gr.handle_ssync_hww(g) != 0) { need_reset = true; @@ -6071,7 +6073,7 @@ int gk20a_gr_isr(struct gk20a *g) } } - if (exception & gr_exception_mme_m()) { + if ((exception & gr_exception_mme_m()) != 0U) { u32 mme = gk20a_readl(g, gr_mme_hww_esr_r()); u32 info = gk20a_readl(g, gr_mme_hww_esr_info_r()); @@ -6082,7 +6084,7 @@ int gk20a_gr_isr(struct gk20a *g) need_reset = true; } - if (exception & gr_exception_sked_m()) { + if ((exception & gr_exception_sked_m()) != 0U) { u32 sked = gk20a_readl(g, gr_sked_hww_esr_r()); nvgpu_err(g, "sked exception: esr 0x%08x", sked); @@ -6449,10 +6451,10 @@ int gr_gk20a_create_priv_addr_table(struct gk20a *g, /* The GPC/TPC unicast registers are included in the compressed PRI * tables. Convert a GPC/TPC broadcast address to unicast addresses so * that we can look up the offsets. */ - if (broadcast_flags & PRI_BROADCAST_FLAGS_GPC) { + if ((broadcast_flags & PRI_BROADCAST_FLAGS_GPC) != 0U) { for (gpc_num = 0; gpc_num < g->gr.gpc_count; gpc_num++) { - if (broadcast_flags & PRI_BROADCAST_FLAGS_TPC) { + if ((broadcast_flags & PRI_BROADCAST_FLAGS_TPC) != 0U) { for (tpc_num = 0; tpc_num < g->gr.gpc_tpc_count[gpc_num]; tpc_num++) { @@ -6461,7 +6463,7 @@ int gr_gk20a_create_priv_addr_table(struct gk20a *g, gpc_num, tpc_num); } - } else if (broadcast_flags & PRI_BROADCAST_FLAGS_PPC) { + } else if ((broadcast_flags & PRI_BROADCAST_FLAGS_PPC) != 0U) { err = gr_gk20a_split_ppc_broadcast_addr(g, addr, gpc_num, priv_addr_table, &t); if (err != 0) { @@ -6487,18 +6489,18 @@ int gr_gk20a_create_priv_addr_table(struct gk20a *g, nvgpu_log(g, gpu_dbg_gpu_dbg, "addr_type : EGPC/ETPC"); g->ops.gr.egpc_etpc_priv_addr_table(g, addr, gpc_num, tpc_num, broadcast_flags, priv_addr_table, &t); - } else if (broadcast_flags & PRI_BROADCAST_FLAGS_LTSS) { + } else if ((broadcast_flags & PRI_BROADCAST_FLAGS_LTSS) != 0U) { g->ops.ltc.split_lts_broadcast_addr(g, addr, priv_addr_table, &t); - } else if (broadcast_flags & PRI_BROADCAST_FLAGS_LTCS) { + } else if ((broadcast_flags & PRI_BROADCAST_FLAGS_LTCS) != 0U) { g->ops.ltc.split_ltc_broadcast_addr(g, addr, priv_addr_table, &t); - } else if (broadcast_flags & PRI_BROADCAST_FLAGS_FBPA) { + } else if ((broadcast_flags & PRI_BROADCAST_FLAGS_FBPA) != 0U) { g->ops.gr.split_fbpa_broadcast_addr(g, addr, nvgpu_get_litter_value(g, GPU_LIT_NUM_FBPAS), priv_addr_table, &t); } else if ((broadcast_flags & PRI_BROADCAST_FLAGS_GPC) == 0U) { - if (broadcast_flags & PRI_BROADCAST_FLAGS_TPC) { + if ((broadcast_flags & PRI_BROADCAST_FLAGS_TPC) != 0U) { for (tpc_num = 0; tpc_num < g->gr.gpc_tpc_count[gpc_num]; tpc_num++) { @@ -6506,7 +6508,7 @@ int gr_gk20a_create_priv_addr_table(struct gk20a *g, pri_tpc_addr(g, pri_tpccs_addr_mask(addr), gpc_num, tpc_num); } - } else if (broadcast_flags & PRI_BROADCAST_FLAGS_PPC) { + } else if ((broadcast_flags & PRI_BROADCAST_FLAGS_PPC) != 0U) { err = gr_gk20a_split_ppc_broadcast_addr(g, addr, gpc_num, priv_addr_table, &t); } else { diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c index fd08eef55..51bbe8273 100644 --- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c @@ -708,7 +708,7 @@ void gk20a_pmu_isr(struct gk20a *g) return; } - if (intr & pwr_falcon_irqstat_halt_true_f()) { + if ((intr & pwr_falcon_irqstat_halt_true_f()) != 0U) { nvgpu_err(g, "pmu halt intr not implemented"); nvgpu_pmu_dump_falcon_stats(pmu); if (gk20a_readl(g, pwr_pmu_mailbox_r @@ -719,7 +719,7 @@ void gk20a_pmu_isr(struct gk20a *g) } } } - if (intr & pwr_falcon_irqstat_exterr_true_f()) { + if ((intr & pwr_falcon_irqstat_exterr_true_f()) != 0U) { nvgpu_err(g, "pmu exterr intr not implemented. Clearing interrupt."); nvgpu_pmu_dump_falcon_stats(pmu); @@ -733,7 +733,7 @@ void gk20a_pmu_isr(struct gk20a *g) g->ops.pmu.handle_ext_irq(g, intr); } - if (intr & pwr_falcon_irqstat_swgen0_true_f()) { + if ((intr & pwr_falcon_irqstat_swgen0_true_f()) != 0U) { nvgpu_pmu_process_message(pmu); recheck = true; } diff --git a/drivers/gpu/nvgpu/gm20b/clk_gm20b.c b/drivers/gpu/nvgpu/gm20b/clk_gm20b.c index 16e659fb7..07c1f6acc 100644 --- a/drivers/gpu/nvgpu/gm20b/clk_gm20b.c +++ b/drivers/gpu/nvgpu/gm20b/clk_gm20b.c @@ -841,7 +841,7 @@ static int clk_lock_gpc_pll_under_bypass(struct gk20a *g, struct pll *gpll) /* lock pll */ cfg = gk20a_readl(g, trim_sys_gpcpll_cfg_r()); - if (cfg & trim_sys_gpcpll_cfg_enb_lckdet_power_off_f()){ + if ((cfg & trim_sys_gpcpll_cfg_enb_lckdet_power_off_f()) != 0U) { cfg = set_field(cfg, trim_sys_gpcpll_cfg_enb_lckdet_m(), trim_sys_gpcpll_cfg_enb_lckdet_power_on_f()); gk20a_writel(g, trim_sys_gpcpll_cfg_r(), cfg); @@ -853,7 +853,7 @@ static int clk_lock_gpc_pll_under_bypass(struct gk20a *g, struct pll *gpll) do { nvgpu_udelay(1); cfg = gk20a_readl(g, trim_sys_gpcpll_cfg_r()); - if (cfg & trim_sys_gpcpll_cfg_pll_lock_true_f()) { + if ((cfg & trim_sys_gpcpll_cfg_pll_lock_true_f()) != 0U) { goto pll_locked; } } while (--timeout > 0); diff --git a/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c b/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c index e9df8038d..b3ff4e89c 100644 --- a/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c +++ b/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c @@ -112,8 +112,8 @@ void gm20b_fifo_trigger_mmu_fault(struct gk20a *g, /* Wait for MMU fault to trigger */ do { - if (gk20a_readl(g, fifo_intr_0_r()) & - fifo_intr_0_mmu_fault_pending_f()) { + if ((gk20a_readl(g, fifo_intr_0_r()) & + fifo_intr_0_mmu_fault_pending_f()) != 0U) { ret = 0; break; } diff --git a/drivers/gpu/nvgpu/gp10b/ce_gp10b.c b/drivers/gpu/nvgpu/gp10b/ce_gp10b.c index 1800ac818..14c9eb2dd 100644 --- a/drivers/gpu/nvgpu/gp10b/ce_gp10b.c +++ b/drivers/gpu/nvgpu/gp10b/ce_gp10b.c @@ -51,11 +51,11 @@ void gp10b_ce_isr(struct gk20a *g, u32 inst_id, u32 pri_base) nvgpu_log(g, gpu_dbg_intr, "ce isr %08x %08x\n", ce_intr, inst_id); /* clear blocking interrupts: they exibit broken behavior */ - if (ce_intr & ce_intr_status_blockpipe_pending_f()) { + if ((ce_intr & ce_intr_status_blockpipe_pending_f()) != 0U) { clear_intr |= ce_blockpipe_isr(g, ce_intr); } - if (ce_intr & ce_intr_status_launcherr_pending_f()) { + if ((ce_intr & ce_intr_status_launcherr_pending_f()) != 0U) { clear_intr |= ce_launcherr_isr(g, ce_intr); } @@ -70,7 +70,7 @@ u32 gp10b_ce_nonstall_isr(struct gk20a *g, u32 inst_id, u32 pri_base) nvgpu_log(g, gpu_dbg_intr, "ce nonstall isr %08x %08x\n", ce_intr, inst_id); - if (ce_intr & ce_intr_status_nonblockpipe_pending_f()) { + if ((ce_intr & ce_intr_status_nonblockpipe_pending_f()) != 0U) { gk20a_writel(g, ce_intr_status_r(inst_id), ce_intr_status_nonblockpipe_pending_f()); ops |= (GK20A_NONSTALL_OPS_WAKEUP_SEMAPHORE | diff --git a/drivers/gpu/nvgpu/gp10b/gr_gp10b.c b/drivers/gpu/nvgpu/gp10b/gr_gp10b.c index b203be82f..a6b9ba435 100644 --- a/drivers/gpu/nvgpu/gp10b/gr_gp10b.c +++ b/drivers/gpu/nvgpu/gp10b/gr_gp10b.c @@ -270,7 +270,7 @@ int gr_gp10b_handle_tex_exception(struct gk20a *g, u32 gpc, u32 tpc, gr_gpc0_tpc0_tex_m_hww_esr_r() + offset); nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "0x%08x", esr); - if (esr & gr_gpc0_tpc0_tex_m_hww_esr_ecc_sec_pending_f()) { + if ((esr & gr_gpc0_tpc0_tex_m_hww_esr_ecc_sec_pending_f()) != 0U) { nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr, "Single bit error detected in TEX!"); @@ -326,7 +326,7 @@ int gr_gp10b_handle_tex_exception(struct gk20a *g, u32 gpc, u32 tpc, gr_pri_gpc0_tpc0_tex_m_routing_r() + offset, gr_pri_gpc0_tpc0_tex_m_routing_sel_default_f()); } - if (esr & gr_gpc0_tpc0_tex_m_hww_esr_ecc_ded_pending_f()) { + if ((esr & gr_gpc0_tpc0_tex_m_hww_esr_ecc_ded_pending_f()) != 0U) { nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr, "Double bit error detected in TEX!"); @@ -1869,12 +1869,12 @@ int gr_gp10b_pre_process_sm_exception(struct gk20a *g, gpc, tpc, global_esr); if (cilp_enabled && sm_debugger_attached) { - if (global_esr & gr_gpc0_tpc0_sm_hww_global_esr_bpt_int_pending_f()) { + if ((global_esr & gr_gpc0_tpc0_sm_hww_global_esr_bpt_int_pending_f()) != 0U) { gk20a_writel(g, gr_gpc0_tpc0_sm_hww_global_esr_r() + offset, gr_gpc0_tpc0_sm_hww_global_esr_bpt_int_pending_f()); } - if (global_esr & gr_gpc0_tpc0_sm_hww_global_esr_single_step_complete_pending_f()) { + if ((global_esr & gr_gpc0_tpc0_sm_hww_global_esr_single_step_complete_pending_f()) != 0U) { gk20a_writel(g, gr_gpc0_tpc0_sm_hww_global_esr_r() + offset, gr_gpc0_tpc0_sm_hww_global_esr_single_step_complete_pending_f()); } @@ -1924,7 +1924,7 @@ int gr_gp10b_pre_process_sm_exception(struct gk20a *g, } dbgr_control0 = gk20a_readl(g, gr_gpc0_tpc0_sm_dbgr_control0_r() + offset); - if (dbgr_control0 & gr_gpcs_tpcs_sm_dbgr_control0_single_step_mode_enable_f()) { + if ((dbgr_control0 & gr_gpcs_tpcs_sm_dbgr_control0_single_step_mode_enable_f()) != 0U) { nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: clearing SINGLE_STEP_MODE before resume for gpc %d tpc %d\n", gpc, tpc); diff --git a/drivers/gpu/nvgpu/gp10b/mm_gp10b.c b/drivers/gpu/nvgpu/gp10b/mm_gp10b.c index e925c8c30..8746e0452 100644 --- a/drivers/gpu/nvgpu/gp10b/mm_gp10b.c +++ b/drivers/gpu/nvgpu/gp10b/mm_gp10b.c @@ -303,9 +303,10 @@ static u32 gp10b_get_pde0_pgsz(struct gk20a *g, const struct gk20a_mmu_level *l, /* * Check if the aperture AND address are set */ - if (pde_v[2] & (gmmu_new_dual_pde_aperture_small_sys_mem_ncoh_f() | - gmmu_new_dual_pde_aperture_small_sys_mem_coh_f() | - gmmu_new_dual_pde_aperture_small_video_memory_f())) { + if ((pde_v[2] & + (gmmu_new_dual_pde_aperture_small_sys_mem_ncoh_f() | + gmmu_new_dual_pde_aperture_small_sys_mem_coh_f() | + gmmu_new_dual_pde_aperture_small_video_memory_f())) != 0U) { u64 addr = ((U64(pde_v[3]) << U64(32)) | (U64(pde_v[2]) & U64(gmmu_new_dual_pde_address_small_sys_f(~0)))) << U64(gmmu_new_dual_pde_address_shift_v()); @@ -315,9 +316,10 @@ static u32 gp10b_get_pde0_pgsz(struct gk20a *g, const struct gk20a_mmu_level *l, } } - if (pde_v[0] & (gmmu_new_dual_pde_aperture_big_sys_mem_ncoh_f() | - gmmu_new_dual_pde_aperture_big_sys_mem_coh_f() | - gmmu_new_dual_pde_aperture_big_video_memory_f())) { + if ((pde_v[0] & + (gmmu_new_dual_pde_aperture_big_sys_mem_ncoh_f() | + gmmu_new_dual_pde_aperture_big_sys_mem_coh_f() | + gmmu_new_dual_pde_aperture_big_video_memory_f())) != 0U) { u64 addr = ((U64(pde_v[1]) << U64(32)) | (U64(pde_v[0]) & U64(gmmu_new_dual_pde_address_big_sys_f(~0)))) << U64(gmmu_new_dual_pde_address_big_shift_v()); diff --git a/drivers/gpu/nvgpu/gv11b/ce_gv11b.c b/drivers/gpu/nvgpu/gv11b/ce_gv11b.c index 2eea11dfa..a960beea2 100644 --- a/drivers/gpu/nvgpu/gv11b/ce_gv11b.c +++ b/drivers/gpu/nvgpu/gv11b/ce_gv11b.c @@ -59,7 +59,7 @@ void gv11b_ce_isr(struct gk20a *g, u32 inst_id, u32 pri_base) * registers. This is a fatal error and the LCE will have to be * reset to get back to a working state. */ - if (ce_intr & ce_intr_status_invalid_config_pending_f()) { + if ((ce_intr & ce_intr_status_invalid_config_pending_f()) != 0U) { nvgpu_log(g, gpu_dbg_intr, "ce: inst %d: invalid config", inst_id); clear_intr |= ce_intr_status_invalid_config_reset_f(); @@ -70,7 +70,7 @@ void gv11b_ce_isr(struct gk20a *g, u32 inst_id, u32 pri_base) * This is a fatal interrupt and will require at least the LCE to be * reset before operations can start again, if not the entire GPU. */ - if (ce_intr & ce_intr_status_mthd_buffer_fault_pending_f()) { + if ((ce_intr & ce_intr_status_mthd_buffer_fault_pending_f()) != 0U) { nvgpu_log(g, gpu_dbg_intr, "ce: inst %d: mthd buffer fault", inst_id); clear_intr |= ce_intr_status_mthd_buffer_fault_reset_f(); @@ -100,7 +100,7 @@ void gv11b_ce_mthd_buffer_fault_in_bar2_fault(struct gk20a *g) for (lce = 0; lce < num_lce; lce++) { reg_val = gk20a_readl(g, ce_intr_status_r(lce)); - if (reg_val & ce_intr_status_mthd_buffer_fault_pending_f()) { + if ((reg_val & ce_intr_status_mthd_buffer_fault_pending_f()) != 0U) { nvgpu_log(g, gpu_dbg_intr, "ce: lce %d: mthd buffer fault", lce); clear_intr = ce_intr_status_mthd_buffer_fault_reset_f(); diff --git a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c index 5eae521a5..44d7dbd8c 100644 --- a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c +++ b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c @@ -1621,7 +1621,7 @@ unsigned int gv11b_fifo_handle_pbdma_intr_0(struct gk20a *g, rc_type = gk20a_fifo_handle_pbdma_intr_0(g, pbdma_id, pbdma_intr_0, handled, error_notifier); - if (pbdma_intr_0 & pbdma_intr_0_clear_faulted_error_pending_f()) { + if ((pbdma_intr_0 & pbdma_intr_0_clear_faulted_error_pending_f()) != 0U) { nvgpu_log(g, gpu_dbg_intr, "clear faulted error on pbdma id %d", pbdma_id); gk20a_fifo_reset_pbdma_method(g, pbdma_id, 0); @@ -1629,7 +1629,7 @@ unsigned int gv11b_fifo_handle_pbdma_intr_0(struct gk20a *g, rc_type = RC_TYPE_PBDMA_FAULT; } - if (pbdma_intr_0 & pbdma_intr_0_eng_reset_pending_f()) { + if ((pbdma_intr_0 & pbdma_intr_0_eng_reset_pending_f()) != 0U) { nvgpu_log(g, gpu_dbg_intr, "eng reset intr on pbdma id %d", pbdma_id); *handled |= pbdma_intr_0_eng_reset_pending_f(); @@ -1678,7 +1678,7 @@ unsigned int gv11b_fifo_handle_pbdma_intr_1(struct gk20a *g, return RC_TYPE_NO_RC; } - if (pbdma_intr_1 & pbdma_intr_1_ctxnotvalid_pending_f()) { + if ((pbdma_intr_1 & pbdma_intr_1_ctxnotvalid_pending_f()) != 0U) { nvgpu_log(g, gpu_dbg_intr, "ctxnotvalid intr on pbdma id %d", pbdma_id); nvgpu_err(g, "pbdma_intr_1(%d)= 0x%08x ", diff --git a/drivers/gpu/nvgpu/gv11b/gr_gv11b.c b/drivers/gpu/nvgpu/gv11b/gr_gv11b.c index 914f10bfd..aa500115c 100644 --- a/drivers/gpu/nvgpu/gv11b/gr_gv11b.c +++ b/drivers/gpu/nvgpu/gv11b/gr_gv11b.c @@ -851,16 +851,20 @@ static int gr_gv11b_handle_gpcmmu_ecc_exception(struct gk20a *g, u32 gpc, nvgpu_log(g, gpu_dbg_intr, "mmu l1tlb gpc:%d ecc interrupt intr: 0x%x", gpc, hww_esr); - if (ecc_status & gr_gpc0_mmu_l1tlb_ecc_status_corrected_err_l1tlb_sa_data_m()) { + if ((ecc_status & + gr_gpc0_mmu_l1tlb_ecc_status_corrected_err_l1tlb_sa_data_m()) != 0U) { nvgpu_log(g, gpu_dbg_intr, "corrected ecc sa data error"); } - if (ecc_status & gr_gpc0_mmu_l1tlb_ecc_status_uncorrected_err_l1tlb_sa_data_m()) { + if ((ecc_status & + gr_gpc0_mmu_l1tlb_ecc_status_uncorrected_err_l1tlb_sa_data_m()) != 0U) { nvgpu_log(g, gpu_dbg_intr, "uncorrected ecc sa data error"); } - if (ecc_status & gr_gpc0_mmu_l1tlb_ecc_status_corrected_err_l1tlb_fa_data_m()) { + if ((ecc_status & + gr_gpc0_mmu_l1tlb_ecc_status_corrected_err_l1tlb_fa_data_m()) != 0U) { nvgpu_log(g, gpu_dbg_intr, "corrected ecc fa data error"); } - if (ecc_status & gr_gpc0_mmu_l1tlb_ecc_status_uncorrected_err_l1tlb_fa_data_m()) { + if ((ecc_status & + gr_gpc0_mmu_l1tlb_ecc_status_uncorrected_err_l1tlb_fa_data_m()) != 0U) { nvgpu_log(g, gpu_dbg_intr, "uncorrected ecc fa data error"); } if (corrected_overflow || uncorrected_overflow) { @@ -937,19 +941,20 @@ static int gr_gv11b_handle_gpccs_ecc_exception(struct gk20a *g, u32 gpc, nvgpu_log(g, gpu_dbg_intr, "gppcs gpc:%d ecc interrupt intr: 0x%x", gpc, hww_esr); - if (ecc_status & gr_gpc0_gpccs_falcon_ecc_status_corrected_err_imem_m()) { + if ((ecc_status & + gr_gpc0_gpccs_falcon_ecc_status_corrected_err_imem_m()) != 0U) { nvgpu_log(g, gpu_dbg_intr, "imem ecc error corrected"); } - if (ecc_status & - gr_gpc0_gpccs_falcon_ecc_status_uncorrected_err_imem_m()) { + if ((ecc_status & + gr_gpc0_gpccs_falcon_ecc_status_uncorrected_err_imem_m()) != 0U) { nvgpu_log(g, gpu_dbg_intr, "imem ecc error uncorrected"); } - if (ecc_status & - gr_gpc0_gpccs_falcon_ecc_status_corrected_err_dmem_m()) { + if ((ecc_status & + gr_gpc0_gpccs_falcon_ecc_status_corrected_err_dmem_m()) != 0U) { nvgpu_log(g, gpu_dbg_intr, "dmem ecc error corrected"); } - if (ecc_status & - gr_gpc0_gpccs_falcon_ecc_status_uncorrected_err_dmem_m()) { + if ((ecc_status & + gr_gpc0_gpccs_falcon_ecc_status_uncorrected_err_dmem_m()) != 0U) { nvgpu_log(g, gpu_dbg_intr, "dmem ecc error uncorrected"); } if (corrected_overflow || uncorrected_overflow) { @@ -971,7 +976,7 @@ static int gr_gv11b_handle_gpccs_ecc_exception(struct gk20a *g, u32 gpc, int gr_gv11b_handle_gpc_gpcmmu_exception(struct gk20a *g, u32 gpc, u32 gpc_exception) { - if (gpc_exception & gr_gpc0_gpccs_gpc_exception_gpcmmu_m()) { + if ((gpc_exception & gr_gpc0_gpccs_gpc_exception_gpcmmu_m()) != 0U) { return gr_gv11b_handle_gpcmmu_ecc_exception(g, gpc, gpc_exception); } @@ -981,7 +986,7 @@ int gr_gv11b_handle_gpc_gpcmmu_exception(struct gk20a *g, u32 gpc, int gr_gv11b_handle_gpc_gpccs_exception(struct gk20a *g, u32 gpc, u32 gpc_exception) { - if (gpc_exception & gr_gpc0_gpccs_gpc_exception_gpccs_m()) { + if ((gpc_exception & gr_gpc0_gpccs_gpc_exception_gpccs_m()) != 0U) { return gr_gv11b_handle_gpccs_ecc_exception(g, gpc, gpc_exception); } @@ -2388,12 +2393,14 @@ int gr_gv11b_pre_process_sm_exception(struct gk20a *g, gpc, tpc, sm, global_esr); if (cilp_enabled && sm_debugger_attached) { - if (global_esr & gr_gpc0_tpc0_sm0_hww_global_esr_bpt_int_pending_f()) { + if ((global_esr & + gr_gpc0_tpc0_sm0_hww_global_esr_bpt_int_pending_f()) != 0U) { gk20a_writel(g, gr_gpc0_tpc0_sm0_hww_global_esr_r() + offset, gr_gpc0_tpc0_sm0_hww_global_esr_bpt_int_pending_f()); } - if (global_esr & gr_gpc0_tpc0_sm0_hww_global_esr_single_step_complete_pending_f()) { + if ((global_esr & + gr_gpc0_tpc0_sm0_hww_global_esr_single_step_complete_pending_f()) != 0U) { gk20a_writel(g, gr_gpc0_tpc0_sm0_hww_global_esr_r() + offset, gr_gpc0_tpc0_sm0_hww_global_esr_single_step_complete_pending_f()); } @@ -2445,7 +2452,8 @@ int gr_gv11b_pre_process_sm_exception(struct gk20a *g, } dbgr_control0 = gk20a_readl(g, gr_gpc0_tpc0_sm0_dbgr_control0_r() + offset); - if (dbgr_control0 & gr_gpc0_tpc0_sm0_dbgr_control0_single_step_mode_enable_f()) { + if ((dbgr_control0 & + gr_gpc0_tpc0_sm0_dbgr_control0_single_step_mode_enable_f()) != 0U) { nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: clearing SINGLE_STEP_MODE " "before resume for gpc %d tpc %d sm %d", @@ -2478,8 +2486,8 @@ static void gr_gv11b_handle_fecs_ecc_error(struct gk20a *g, u32 intr) u32 corrected_delta, uncorrected_delta; u32 corrected_overflow, uncorrected_overflow; - if (intr & (gr_fecs_host_int_status_ecc_uncorrected_m() | - gr_fecs_host_int_status_ecc_corrected_m())) { + if ((intr & (gr_fecs_host_int_status_ecc_uncorrected_m() | + gr_fecs_host_int_status_ecc_corrected_m())) != 0U) { ecc_status = gk20a_readl(g, gr_fecs_falcon_ecc_status_r()); ecc_addr = gk20a_readl(g, gr_fecs_falcon_ecc_address_r()); @@ -2529,21 +2537,21 @@ static void gr_gv11b_handle_fecs_ecc_error(struct gk20a *g, u32 intr) nvgpu_log(g, gpu_dbg_intr, "fecs ecc interrupt intr: 0x%x", intr); - if (ecc_status & - gr_fecs_falcon_ecc_status_corrected_err_imem_m()) { + if ((ecc_status & + gr_fecs_falcon_ecc_status_corrected_err_imem_m()) != 0U) { nvgpu_log(g, gpu_dbg_intr, "imem ecc error corrected"); } - if (ecc_status & - gr_fecs_falcon_ecc_status_uncorrected_err_imem_m()) { + if ((ecc_status & + gr_fecs_falcon_ecc_status_uncorrected_err_imem_m()) != 0U) { nvgpu_log(g, gpu_dbg_intr, "imem ecc error uncorrected"); } - if (ecc_status & - gr_fecs_falcon_ecc_status_corrected_err_dmem_m()) { + if ((ecc_status & + gr_fecs_falcon_ecc_status_corrected_err_dmem_m()) != 0U) { nvgpu_log(g, gpu_dbg_intr, "dmem ecc error corrected"); } - if (ecc_status & - gr_fecs_falcon_ecc_status_uncorrected_err_dmem_m()) { + if ((ecc_status & + gr_fecs_falcon_ecc_status_uncorrected_err_dmem_m()) != 0U) { nvgpu_log(g, gpu_dbg_intr, "dmem ecc error uncorrected"); } @@ -4193,17 +4201,17 @@ void gv11b_gr_egpc_etpc_priv_addr_table(struct gk20a *g, u32 addr, * tables. Convert a GPC/TPC broadcast address to unicast addresses so * that we can look up the offsets. */ - if (broadcast_flags & PRI_BROADCAST_FLAGS_EGPC) { + if ((broadcast_flags & PRI_BROADCAST_FLAGS_EGPC) != 0U) { nvgpu_log_info(g, "broadcast flags egpc"); for (gpc_num = 0; gpc_num < g->gr.gpc_count; gpc_num++) { - if (broadcast_flags & PRI_BROADCAST_FLAGS_ETPC) { + if ((broadcast_flags & PRI_BROADCAST_FLAGS_ETPC) != 0U) { nvgpu_log_info(g, "broadcast flags etpc"); for (tpc_num = 0; tpc_num < g->gr.gpc_tpc_count[gpc_num]; tpc_num++) { - if (broadcast_flags & - PRI_BROADCAST_FLAGS_SMPC) { + if ((broadcast_flags & + PRI_BROADCAST_FLAGS_SMPC) != 0U) { gv11b_gr_update_priv_addr_table_smpc( g, gpc_num, tpc_num, addr, priv_addr_table, t); @@ -4218,7 +4226,8 @@ void gv11b_gr_egpc_etpc_priv_addr_table(struct gk20a *g, u32 addr, (*t)++; } } - } else if (broadcast_flags & PRI_BROADCAST_FLAGS_SMPC) { + } else if ((broadcast_flags & + PRI_BROADCAST_FLAGS_SMPC) != 0U) { gv11b_gr_update_priv_addr_table_smpc( g, gpc_num, tpc_num, addr, priv_addr_table, t); @@ -4239,14 +4248,14 @@ void gv11b_gr_egpc_etpc_priv_addr_table(struct gk20a *g, u32 addr, (*t)++; } } - } else if (!(broadcast_flags & PRI_BROADCAST_FLAGS_EGPC)) { - if (broadcast_flags & PRI_BROADCAST_FLAGS_ETPC) { + } else if ((broadcast_flags & PRI_BROADCAST_FLAGS_EGPC) == 0U) { + if ((broadcast_flags & PRI_BROADCAST_FLAGS_ETPC) != 0U) { nvgpu_log_info(g, "broadcast flags etpc but not egpc"); for (tpc_num = 0; tpc_num < g->gr.gpc_tpc_count[gpc_num]; tpc_num++) { - if (broadcast_flags & - PRI_BROADCAST_FLAGS_SMPC) { + if ((broadcast_flags & + PRI_BROADCAST_FLAGS_SMPC) != 0U) { gv11b_gr_update_priv_addr_table_smpc( g, gpc_num, tpc_num, addr, priv_addr_table, t); @@ -4261,7 +4270,7 @@ void gv11b_gr_egpc_etpc_priv_addr_table(struct gk20a *g, u32 addr, (*t)++; } } - } else if (broadcast_flags & PRI_BROADCAST_FLAGS_SMPC) { + } else if ((broadcast_flags & PRI_BROADCAST_FLAGS_SMPC) != 0U) { gv11b_gr_update_priv_addr_table_smpc( g, gpc_num, tpc_num, addr, priv_addr_table, t); @@ -4864,7 +4873,7 @@ int gr_gv11b_create_priv_addr_table(struct gk20a *g, * so that we can look up the offset */ if ((addr_type == CTXSW_ADDR_TYPE_BE) && - !(broadcast_flags & PRI_BROADCAST_FLAGS_BE)) { + (broadcast_flags & PRI_BROADCAST_FLAGS_BE) == 0U) { priv_addr_table[t++] = pri_be_shared_addr(g, addr); } else { priv_addr_table[t++] = addr; @@ -4879,10 +4888,10 @@ int gr_gv11b_create_priv_addr_table(struct gk20a *g, * tables. Convert a GPC/TPC broadcast address to unicast addresses so * that we can look up the offsets */ - if (broadcast_flags & PRI_BROADCAST_FLAGS_GPC) { + if ((broadcast_flags & PRI_BROADCAST_FLAGS_GPC) != 0U) { for (gpc_num = 0; gpc_num < g->gr.gpc_count; gpc_num++) { - if (broadcast_flags & PRI_BROADCAST_FLAGS_TPC) { + if ((broadcast_flags & PRI_BROADCAST_FLAGS_TPC) != 0U) { for (tpc_num = 0; tpc_num < g->gr.gpc_tpc_count[gpc_num]; tpc_num++) { @@ -4893,7 +4902,7 @@ int gr_gv11b_create_priv_addr_table(struct gk20a *g, } } - else if (broadcast_flags & PRI_BROADCAST_FLAGS_PPC) { + else if ((broadcast_flags & PRI_BROADCAST_FLAGS_PPC) != 0U) { err = gr_gk20a_split_ppc_broadcast_addr(g, addr, gpc_num, priv_addr_table, &t); if (err != 0) { @@ -4913,26 +4922,28 @@ int gr_gv11b_create_priv_addr_table(struct gk20a *g, priv_addr_table[t++] = priv_addr; } } - } else if (broadcast_flags & PRI_BROADCAST_FLAGS_PMMGPC) { + } else if ((broadcast_flags & PRI_BROADCAST_FLAGS_PMMGPC) != 0U) { u32 pmm_domain_start = 0; u32 domain_idx = 0; u32 num_domains = 0; u32 offset = 0; - if (broadcast_flags & PRI_BROADCAST_FLAGS_PMM_GPCGS_GPCTPCA) { + if ((broadcast_flags & + PRI_BROADCAST_FLAGS_PMM_GPCGS_GPCTPCA) != 0U) { pmm_domain_start = nvgpu_get_litter_value(g, GPU_LIT_PERFMON_PMMGPCTPCA_DOMAIN_START); num_domains = nvgpu_get_litter_value(g, GPU_LIT_PERFMON_PMMGPCTPC_DOMAIN_COUNT); offset = PRI_PMMGS_OFFSET_MASK(addr); - } else if (broadcast_flags & - PRI_BROADCAST_FLAGS_PMM_GPCGS_GPCTPCB) { + } else if ((broadcast_flags & + PRI_BROADCAST_FLAGS_PMM_GPCGS_GPCTPCB) != 0U) { pmm_domain_start = nvgpu_get_litter_value(g, GPU_LIT_PERFMON_PMMGPCTPCB_DOMAIN_START); num_domains = nvgpu_get_litter_value(g, GPU_LIT_PERFMON_PMMGPCTPC_DOMAIN_COUNT); offset = PRI_PMMGS_OFFSET_MASK(addr); - } else if (broadcast_flags & PRI_BROADCAST_FLAGS_PMM_GPCS) { + } else if ((broadcast_flags & + PRI_BROADCAST_FLAGS_PMM_GPCS) != 0U) { pmm_domain_start = (addr - (NV_PERF_PMMGPC_GPCS + PRI_PMMS_ADDR_MASK(addr)))/ perf_pmmgpc_perdomain_offset_v(); @@ -4957,13 +4968,13 @@ int gr_gv11b_create_priv_addr_table(struct gk20a *g, nvgpu_log(g, gpu_dbg_gpu_dbg, "addr_type : EGPC/ETPC"); g->ops.gr.egpc_etpc_priv_addr_table(g, addr, gpc_num, tpc_num, broadcast_flags, priv_addr_table, &t); - } else if (broadcast_flags & PRI_BROADCAST_FLAGS_LTSS) { + } else if ((broadcast_flags & PRI_BROADCAST_FLAGS_LTSS) != 0U) { g->ops.ltc.split_lts_broadcast_addr(g, addr, priv_addr_table, &t); - } else if (broadcast_flags & PRI_BROADCAST_FLAGS_LTCS) { + } else if ((broadcast_flags & PRI_BROADCAST_FLAGS_LTCS) != 0U) { g->ops.ltc.split_ltc_broadcast_addr(g, addr, priv_addr_table, &t); - } else if (broadcast_flags & PRI_BROADCAST_FLAGS_FBPA) { + } else if ((broadcast_flags & PRI_BROADCAST_FLAGS_FBPA) != 0U) { g->ops.gr.split_fbpa_broadcast_addr(g, addr, nvgpu_get_litter_value(g, GPU_LIT_NUM_FBPAS), priv_addr_table, &t); @@ -4992,8 +5003,8 @@ int gr_gv11b_create_priv_addr_table(struct gk20a *g, PRI_PMMS_ADDR_MASK(addr), priv_addr_table, &t, domain_start, 1); - } else if (!(broadcast_flags & PRI_BROADCAST_FLAGS_GPC)) { - if (broadcast_flags & PRI_BROADCAST_FLAGS_TPC) { + } else if ((broadcast_flags & PRI_BROADCAST_FLAGS_GPC) == 0U) { + if ((broadcast_flags & PRI_BROADCAST_FLAGS_TPC) != 0U) { for (tpc_num = 0; tpc_num < g->gr.gpc_tpc_count[gpc_num]; tpc_num++) { @@ -5002,7 +5013,7 @@ int gr_gv11b_create_priv_addr_table(struct gk20a *g, pri_tpccs_addr_mask(addr), gpc_num, tpc_num); } - } else if (broadcast_flags & PRI_BROADCAST_FLAGS_PPC) { + } else if ((broadcast_flags & PRI_BROADCAST_FLAGS_PPC) != 0U) { err = gr_gk20a_split_ppc_broadcast_addr(g, addr, gpc_num, priv_addr_table, &t); } else { diff --git a/drivers/gpu/nvgpu/gv11b/gr_gv11b.h b/drivers/gpu/nvgpu/gv11b/gr_gv11b.h index 2f7653361..d0ff576bf 100644 --- a/drivers/gpu/nvgpu/gv11b/gr_gv11b.h +++ b/drivers/gpu/nvgpu/gv11b/gr_gv11b.h @@ -28,7 +28,7 @@ #define EGPC_PRI_BASE 0x580000 #define EGPC_PRI_SHARED_BASE 0x480000 -#define PRI_BROADCAST_FLAGS_SMPC BIT(17) +#define PRI_BROADCAST_FLAGS_SMPC BIT32(17) #define GV11B_ZBC_TYPE_STENCIL T19X_ZBC #define ZBC_STENCIL_CLEAR_FMT_INVAILD 0 diff --git a/drivers/gpu/nvgpu/gv11b/pmu_gv11b.c b/drivers/gpu/nvgpu/gv11b/pmu_gv11b.c index 34a688609..93260113c 100644 --- a/drivers/gpu/nvgpu/gv11b/pmu_gv11b.c +++ b/drivers/gpu/nvgpu/gv11b/pmu_gv11b.c @@ -299,10 +299,11 @@ void gv11b_pmu_handle_ext_irq(struct gk20a *g, u32 intr0) /* * handle the ECC interrupt */ - if (intr0 & pwr_falcon_irqstat_ext_ecc_parity_true_f()) { + if ((intr0 & pwr_falcon_irqstat_ext_ecc_parity_true_f()) != 0U) { intr1 = gk20a_readl(g, pwr_pmu_ecc_intr_status_r()); - if (intr1 & (pwr_pmu_ecc_intr_status_corrected_m() | - pwr_pmu_ecc_intr_status_uncorrected_m())) { + if ((intr1 & + (pwr_pmu_ecc_intr_status_corrected_m() | + pwr_pmu_ecc_intr_status_uncorrected_m())) != 0U) { ecc_status = gk20a_readl(g, pwr_pmu_falcon_ecc_status_r()); @@ -353,19 +354,19 @@ void gv11b_pmu_handle_ext_irq(struct gk20a *g, u32 intr0) nvgpu_log(g, gpu_dbg_intr, "pmu ecc interrupt intr1: 0x%x", intr1); - if (ecc_status & pwr_pmu_falcon_ecc_status_corrected_err_imem_m()) { + if ((ecc_status & pwr_pmu_falcon_ecc_status_corrected_err_imem_m()) != 0U) { nvgpu_log(g, gpu_dbg_intr, "imem ecc error corrected"); } - if (ecc_status & pwr_pmu_falcon_ecc_status_uncorrected_err_imem_m()) { + if ((ecc_status & pwr_pmu_falcon_ecc_status_uncorrected_err_imem_m()) != 0U) { nvgpu_log(g, gpu_dbg_intr, "imem ecc error uncorrected"); } - if (ecc_status & pwr_pmu_falcon_ecc_status_corrected_err_dmem_m()) { + if ((ecc_status & pwr_pmu_falcon_ecc_status_corrected_err_dmem_m()) != 0U) { nvgpu_log(g, gpu_dbg_intr, "dmem ecc error corrected"); } - if (ecc_status & pwr_pmu_falcon_ecc_status_uncorrected_err_dmem_m()) { + if ((ecc_status & pwr_pmu_falcon_ecc_status_uncorrected_err_dmem_m()) != 0U) { nvgpu_log(g, gpu_dbg_intr, "dmem ecc error uncorrected"); } diff --git a/drivers/gpu/nvgpu/tu104/fbpa_tu104.c b/drivers/gpu/nvgpu/tu104/fbpa_tu104.c index 582430802..1d19fd1a1 100644 --- a/drivers/gpu/nvgpu/tu104/fbpa_tu104.c +++ b/drivers/gpu/nvgpu/tu104/fbpa_tu104.c @@ -52,24 +52,24 @@ static void tu104_fbpa_handle_ecc_intr(struct gk20a *g, status = gk20a_readl(g, offset + fbpa_0_ecc_status_r(subp_id)); - if (status & fbpa_0_ecc_status_sec_counter_overflow_pending_f()) { + if ((status & fbpa_0_ecc_status_sec_counter_overflow_pending_f()) != 0U) { nvgpu_err(g, "fbpa %u subp %u ecc sec counter overflow", fbpa_id, subp_id); } - if (status & fbpa_0_ecc_status_ded_counter_overflow_pending_f()) { + if ((status & fbpa_0_ecc_status_ded_counter_overflow_pending_f()) != 0U) { nvgpu_err(g, "fbpa %u subp %u ecc ded counter overflow", fbpa_id, subp_id); } - if (status & fbpa_0_ecc_status_sec_intr_pending_f()) { + if ((status & fbpa_0_ecc_status_sec_intr_pending_f()) != 0U) { sec_cnt = gk20a_readl(g, offset + fbpa_0_ecc_sec_count_r(subp_id)); gk20a_writel(g, offset + fbpa_0_ecc_sec_count_r(subp_id), 0u); g->ecc.fbpa.fbpa_ecc_sec_err_count[cnt_idx].counter += sec_cnt; } - if (status & fbpa_0_ecc_status_ded_intr_pending_f()) { + if ((status & fbpa_0_ecc_status_ded_intr_pending_f()) != 0U) { ded_cnt = gk20a_readl(g, offset + fbpa_0_ecc_ded_count_r(subp_id)); gk20a_writel(g, offset + fbpa_0_ecc_ded_count_r(subp_id), 0u); diff --git a/drivers/gpu/nvgpu/tu104/sec2_tu104.c b/drivers/gpu/nvgpu/tu104/sec2_tu104.c index d5182dc45..9d10e67c1 100644 --- a/drivers/gpu/nvgpu/tu104/sec2_tu104.c +++ b/drivers/gpu/nvgpu/tu104/sec2_tu104.c @@ -393,11 +393,11 @@ void tu104_sec2_isr(struct gk20a *g) return; } - if (intr & psec_falcon_irqstat_halt_true_f()) { + if ((intr & psec_falcon_irqstat_halt_true_f()) != 0U) { nvgpu_err(g, "sec2 halt intr not implemented"); nvgpu_flcn_dump_stats(&g->sec2_flcn); } - if (intr & psec_falcon_irqstat_exterr_true_f()) { + if ((intr & psec_falcon_irqstat_exterr_true_f()) != 0U) { nvgpu_err(g, "sec2 exterr intr not implemented. Clearing interrupt."); @@ -406,7 +406,7 @@ void tu104_sec2_isr(struct gk20a *g) ~psec_falcon_exterrstat_valid_m()); } - if (intr & psec_falcon_irqstat_swgen0_true_f()) { + if ((intr & psec_falcon_irqstat_swgen0_true_f()) != 0U) { if (nvgpu_sec2_process_message(sec2)) { gk20a_writel(g, psec_falcon_irqsclr_r(), intr); goto exit;