gpu: nvgpu: MISRA 14.4 boolean fixes

MISRA rule 14.4 doesn't allow the usage of non-boolean variable as
boolean in the controlling expression of an if statement or an
iteration statement.

Fix violations where a non-boolean variable is used as a boolean in the
controlling expression of if and loop statements.

JIRA NVGPU-1022

Change-Id: I61a2d24830428ffc2655bd9c45bb5403c7f22c09
Signed-off-by: Amurthyreddy <amurthyreddy@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1943058
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Amurthyreddy
2018-11-05 12:23:08 +05:30
committed by mobile promotions
parent 710aab6ba4
commit 1023c6af14
43 changed files with 179 additions and 176 deletions

View File

@@ -1047,7 +1047,7 @@ static u32 clkdomaingetfpoints
goto done;
}
totalcount += fpointscount;
if (*pfpointscount) {
if (*pfpointscount != 0U) {
remainingcount -= fpointscount;
fpointscount = remainingcount;
} else {

View File

@@ -1330,7 +1330,7 @@ static struct clk_prog *construct_clk_prog(struct gk20a *g, void *pargs)
}
if (status) {
if (board_obj_ptr) {
if (board_obj_ptr != NULL) {
board_obj_ptr->destruct(board_obj_ptr);
}
return NULL;

View File

@@ -484,7 +484,7 @@ int boardobjgrp_pmuset_impl(struct gk20a *g, struct boardobjgrp *pboardobjgrp)
nvgpu_log_info(g, " ");
if (check_boardobjgrp_param(g, pboardobjgrp)) {
if (check_boardobjgrp_param(g, pboardobjgrp) != 0) {
return -EINVAL;
}
@@ -547,7 +547,7 @@ int boardobjgrp_pmuset_impl_v1(struct gk20a *g,
nvgpu_log_info(g, " ");
if (check_boardobjgrp_param(g, pboardobjgrp)) {
if (check_boardobjgrp_param(g, pboardobjgrp) != 0) {
return -EINVAL;
}
@@ -606,7 +606,7 @@ boardobjgrp_pmugetstatus_impl(struct gk20a *g, struct boardobjgrp *pboardobjgrp,
nvgpu_log_info(g, " ");
if (check_boardobjgrp_param(g, pboardobjgrp)) {
if (check_boardobjgrp_param(g, pboardobjgrp) != 0) {
return -EINVAL;
}
@@ -677,7 +677,7 @@ boardobjgrp_pmugetstatus_impl_v1(struct gk20a *g, struct boardobjgrp *pboardobjg
nvgpu_log_info(g, " ");
if (check_boardobjgrp_param(g, pboardobjgrp)) {
if (check_boardobjgrp_param(g, pboardobjgrp) != 0) {
return -EINVAL;
}
@@ -862,7 +862,7 @@ static int boardobjgrp_objremoveanddestroy_final(
pboardobjgrp->objmask &= ~BIT(index);
stat = boardobjgrpmask_bitclr(pboardobjgrp->mask, index);
if (stat) {
if (stat != 0) {
if (status == 0) {
status = stat;
}

View File

@@ -60,7 +60,7 @@ int gm20b_bus_bar1_bind(struct gk20a *g, struct nvgpu_mem *bar1_inst)
nvgpu_udelay(5);
} while (nvgpu_timeout_expired(&timeout) == 0);
if (nvgpu_timeout_peek_expired(&timeout)) {
if (nvgpu_timeout_peek_expired(&timeout) != 0) {
err = -EINVAL;
}

View File

@@ -57,7 +57,7 @@ int gp10b_bus_bar2_bind(struct gk20a *g, struct nvgpu_mem *bar2_inst)
nvgpu_udelay(5);
} while (nvgpu_timeout_expired(&timeout) == 0);
if (nvgpu_timeout_peek_expired(&timeout)) {
if (nvgpu_timeout_peek_expired(&timeout) != 0) {
err = -EINVAL;
}

View File

@@ -65,7 +65,7 @@ int bus_tu104_bar2_bind(struct gk20a *g, struct nvgpu_mem *bar2_inst)
nvgpu_udelay(5);
} while (!nvgpu_timeout_expired(&timeout));
if (nvgpu_timeout_peek_expired(&timeout)) {
if (nvgpu_timeout_peek_expired(&timeout) != 0) {
err = -EINVAL;
}

View File

@@ -49,12 +49,12 @@ int nvgpu_flcn_wait_idle(struct nvgpu_falcon *flcn)
do {
idle_stat = flcn_ops->is_falcon_idle(flcn);
if (idle_stat) {
if (idle_stat != 0U) {
break;
}
if (nvgpu_timeout_expired_msg(&timeout,
"waiting for falcon idle: 0x%08x", idle_stat)) {
"waiting for falcon idle: 0x%08x", idle_stat) != 0) {
return -EBUSY;
}
@@ -81,7 +81,7 @@ int nvgpu_flcn_mem_scrub_wait(struct nvgpu_falcon *flcn)
nvgpu_udelay(MEM_SCRUBBING_TIMEOUT_DEFAULT);
} while (nvgpu_timeout_expired(&timeout) == 0);
if (nvgpu_timeout_peek_expired(&timeout)) {
if (nvgpu_timeout_peek_expired(&timeout) != 0) {
status = -ETIMEDOUT;
}
@@ -167,7 +167,7 @@ int nvgpu_flcn_wait_for_halt(struct nvgpu_falcon *flcn, unsigned int timeout)
nvgpu_udelay(10);
} while (nvgpu_timeout_expired(&to) == 0);
if (nvgpu_timeout_peek_expired(&to)) {
if (nvgpu_timeout_peek_expired(&to) != 0) {
status = -EBUSY;
}
@@ -197,7 +197,7 @@ int nvgpu_flcn_clear_halt_intr_status(struct nvgpu_falcon *flcn,
nvgpu_udelay(1);
} while (nvgpu_timeout_expired(&to) == 0);
if (nvgpu_timeout_peek_expired(&to)) {
if (nvgpu_timeout_peek_expired(&to) != 0) {
status = -EBUSY;
}
@@ -337,7 +337,7 @@ static void nvgpu_flcn_print_mem(struct nvgpu_falcon *flcn, u32 src,
src += byte_read_count;
size -= byte_read_count;
} while (total_block_read--);
} while (total_block_read-- != 0U);
}
void nvgpu_flcn_print_dmem(struct nvgpu_falcon *flcn, u32 src, u32 size)

View File

@@ -104,7 +104,7 @@ int gm20b_fb_tlb_invalidate(struct gk20a *g, struct nvgpu_mem *pdb)
} while (!nvgpu_timeout_expired_msg(&timeout,
"wait mmu fifo space"));
if (nvgpu_timeout_peek_expired(&timeout)) {
if (nvgpu_timeout_peek_expired(&timeout) != 0) {
err = -ETIMEDOUT;
goto out;
}
@@ -263,7 +263,7 @@ static int gm20b_fb_vpr_info_fetch_wait(struct gk20a *g,
int gm20b_fb_vpr_info_fetch(struct gk20a *g)
{
if (gm20b_fb_vpr_info_fetch_wait(g, VPR_INFO_FETCH_WAIT)) {
if (gm20b_fb_vpr_info_fetch_wait(g, VPR_INFO_FETCH_WAIT) != 0) {
return -ETIMEDOUT;
}

View File

@@ -40,12 +40,13 @@ void gp106_fb_init_fs_state(struct gk20a *g)
/* wait for memory to be accessible */
do {
u32 w = gk20a_readl(g, fb_niso_scrub_status_r());
if (fb_niso_scrub_status_flag_v(w)) {
if (fb_niso_scrub_status_flag_v(w) != 0U) {
nvgpu_log_fn(g, "done");
break;
}
nvgpu_udelay(HW_SCRUB_TIMEOUT_DEFAULT);
} while (--retries);
--retries;
} while (retries != 0);
val = gk20a_readl(g, fb_mmu_priv_level_mask_r());
val &= ~fb_mmu_priv_level_mask_write_violation_m();
@@ -60,7 +61,7 @@ size_t gp106_fb_get_vidmem_size(struct gk20a *g)
u32 ecc = fb_mmu_local_memory_range_ecc_mode_v(range);
size_t bytes = ((size_t)mag << scale) * SZ_1M;
if (ecc) {
if (ecc != 0U) {
bytes = bytes / 16U * 15U;
}

View File

@@ -62,12 +62,13 @@ void gv100_fb_reset(struct gk20a *g)
/* wait for memory to be accessible */
do {
u32 w = gk20a_readl(g, fb_niso_scrub_status_r());
if (fb_niso_scrub_status_flag_v(w)) {
if (fb_niso_scrub_status_flag_v(w) != 0U) {
nvgpu_log_info(g, "done");
break;
}
nvgpu_udelay(HW_SCRUB_TIMEOUT_DEFAULT);
} while (--retries);
--retries;
} while (retries != 0);
val = gk20a_readl(g, fb_mmu_priv_level_mask_r());
val &= ~fb_mmu_priv_level_mask_write_violation_m();
@@ -219,7 +220,7 @@ int gv100_fb_memory_unlock(struct gk20a *g)
/* check mem unlock status */
val = nvgpu_flcn_mailbox_read(&g->nvdec_flcn, 0);
if (val) {
if (val != 0U) {
nvgpu_err(g, "memory unlock failed, err %x", val);
nvgpu_flcn_dump_stats(&g->nvdec_flcn);
err = -1;
@@ -227,7 +228,7 @@ int gv100_fb_memory_unlock(struct gk20a *g)
}
exit:
if (mem_unlock_fw) {
if (mem_unlock_fw != NULL) {
nvgpu_release_firmware(g, mem_unlock_fw);
}
@@ -294,7 +295,7 @@ size_t gv100_fb_get_vidmem_size(struct gk20a *g)
u32 ecc = fb_mmu_local_memory_range_ecc_mode_v(range);
size_t bytes = ((size_t)mag << scale) * SZ_1M;
if (ecc) {
if (ecc != 0U) {
bytes = bytes / 16U * 15U;
}

View File

@@ -449,10 +449,10 @@ void gv11b_handle_l2tlb_ecc_isr(struct gk20a *g, u32 ecc_status)
fb_mmu_l2tlb_ecc_status_reset_clear_f());
/* Handle overflow */
if (corrected_overflow) {
if (corrected_overflow != 0U) {
corrected_delta += (0x1UL << fb_mmu_l2tlb_ecc_corrected_err_count_total_s());
}
if (uncorrected_overflow) {
if (uncorrected_overflow != 0U) {
uncorrected_delta += (0x1UL << fb_mmu_l2tlb_ecc_uncorrected_err_count_total_s());
}
@@ -516,10 +516,10 @@ void gv11b_handle_hubtlb_ecc_isr(struct gk20a *g, u32 ecc_status)
fb_mmu_hubtlb_ecc_status_reset_clear_f());
/* Handle overflow */
if (corrected_overflow) {
if (corrected_overflow != 0U) {
corrected_delta += (0x1UL << fb_mmu_hubtlb_ecc_corrected_err_count_total_s());
}
if (uncorrected_overflow) {
if (uncorrected_overflow != 0U) {
uncorrected_delta += (0x1UL << fb_mmu_hubtlb_ecc_uncorrected_err_count_total_s());
}
@@ -583,10 +583,10 @@ void gv11b_handle_fillunit_ecc_isr(struct gk20a *g, u32 ecc_status)
fb_mmu_fillunit_ecc_status_reset_clear_f());
/* Handle overflow */
if (corrected_overflow) {
if (corrected_overflow != 0U) {
corrected_delta += (0x1UL << fb_mmu_fillunit_ecc_corrected_err_count_total_s());
}
if (uncorrected_overflow) {
if (uncorrected_overflow != 0U) {
uncorrected_delta += (0x1UL << fb_mmu_fillunit_ecc_uncorrected_err_count_total_s());
}
@@ -757,7 +757,7 @@ static void gv11b_fb_copy_from_hw_fault_buf(struct gk20a *g,
/* refch will be put back after fault is handled */
refch = gk20a_refch_from_inst_ptr(g, inst_ptr);
if (refch) {
if (refch != NULL) {
chid = refch->chid;
}
@@ -868,7 +868,7 @@ static void gv11b_fb_handle_mmu_fault_common(struct gk20a *g,
if (err == 0) {
nvgpu_log(g, gpu_dbg_intr, "CE Page Fault Fixed");
*invalidate_replay_val = 0;
if (mmfault->refch) {
if (mmfault->refch != NULL) {
gk20a_channel_put(mmfault->refch);
mmfault->refch = NULL;
}
@@ -888,7 +888,7 @@ static void gv11b_fb_handle_mmu_fault_common(struct gk20a *g,
*/
rc_type = RC_TYPE_MMU_FAULT;
} else if (mmfault->refch) {
} else if (mmfault->refch != NULL) {
if (mmfault->refch->mmu_nack_handled) {
/* We have already recovered for the same
* context, skip doing another recovery.
@@ -936,7 +936,7 @@ static void gv11b_fb_handle_mmu_fault_common(struct gk20a *g,
/* refch in mmfault is assigned at the time of copying
* fault info from snap reg or bar2 fault buf
*/
if (mmfault->refch) {
if (mmfault->refch != NULL) {
gk20a_channel_put(mmfault->refch);
mmfault->refch = NULL;
}
@@ -964,7 +964,7 @@ static void gv11b_fb_handle_mmu_fault_common(struct gk20a *g,
/* refch in mmfault is assigned at the time of copying
* fault info from snap reg or bar2 fault buf
*/
if (mmfault->refch) {
if (mmfault->refch != NULL) {
gk20a_channel_put(mmfault->refch);
mmfault->refch = NULL;
}
@@ -1059,7 +1059,7 @@ void gv11b_fb_handle_mmu_nonreplay_replay_fault(struct gk20a *g,
next_fault_addr = mmfault->fault_addr;
if (prev_fault_addr == next_fault_addr) {
nvgpu_log(g, gpu_dbg_intr, "pte already scanned");
if (mmfault->refch) {
if (mmfault->refch != NULL) {
gk20a_channel_put(mmfault->refch);
mmfault->refch = NULL;
}
@@ -1104,7 +1104,7 @@ static void gv11b_mm_copy_from_fault_snap_reg(struct gk20a *g,
/* refch will be put back after fault is handled */
refch = gk20a_refch_from_inst_ptr(g, inst_ptr);
if (refch) {
if (refch != NULL) {
chid = refch->chid;
}
@@ -1245,7 +1245,7 @@ static void gv11b_fb_handle_bar2_fault(struct gk20a *g,
g->ops.bus.bar2_bind(g, &g->mm.bar2.inst_block);
if (mmfault->refch) {
if (mmfault->refch != NULL) {
gk20a_channel_put(mmfault->refch);
mmfault->refch = NULL;
}
@@ -1275,7 +1275,7 @@ void gv11b_fb_handle_other_fault_notify(struct gk20a *g,
gv11b_fb_handle_mmu_fault_common(g, mmfault,
&invalidate_replay_val);
if (invalidate_replay_val) {
if (invalidate_replay_val != 0U) {
gv11b_fb_replay_or_cancel_faults(g,
invalidate_replay_val);
}

View File

@@ -195,10 +195,10 @@ void gk20a_channel_abort_clean_up(struct channel_gk20a *ch)
/* ensure no fences are pending */
nvgpu_mutex_acquire(&ch->sync_lock);
if (ch->sync) {
if (ch->sync != NULL) {
nvgpu_channel_sync_set_min_eq_max(ch->sync);
}
if (ch->user_sync) {
if (ch->user_sync != NULL) {
nvgpu_channel_sync_set_safe_state(ch->user_sync);
}
nvgpu_mutex_release(&ch->sync_lock);
@@ -308,7 +308,7 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force)
trace_gk20a_free_channel(ch->chid);
if (g->os_channel.close) {
if (g->os_channel.close != NULL) {
g->os_channel.close(ch);
}
@@ -374,7 +374,7 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force)
" deferred, running now");
/* if lock is already taken, a reset is taking place
so no need to repeat */
if (nvgpu_mutex_tryacquire(&g->fifo.gr_reset_mutex)) {
if (nvgpu_mutex_tryacquire(&g->fifo.gr_reset_mutex) != 0) {
gk20a_fifo_deferred_reset(g, ch);
nvgpu_mutex_release(&g->fifo.gr_reset_mutex);
}
@@ -414,11 +414,11 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force)
/* sync must be destroyed before releasing channel vm */
nvgpu_mutex_acquire(&ch->sync_lock);
if (ch->sync) {
if (ch->sync != NULL) {
nvgpu_channel_sync_destroy(ch->sync, false);
ch->sync = NULL;
}
if (ch->user_sync) {
if (ch->user_sync != NULL) {
/*
* Set user managed syncpoint to safe state
* But it's already done if channel has timedout
@@ -437,7 +437,7 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force)
* we need to do this before releasing the address space,
* as the semaphore pool might get freed after that point.
*/
if (ch->hw_sema) {
if (ch->hw_sema != NULL) {
nvgpu_semaphore_free_hw_sema(ch);
}
@@ -665,7 +665,7 @@ struct channel_gk20a *gk20a_open_new_channel(struct gk20a *g,
trace_gk20a_open_new_channel(ch->chid);
BUG_ON(ch->g);
BUG_ON(ch->g != NULL);
ch->g = g;
/* Runlist for the channel */
@@ -716,7 +716,7 @@ struct channel_gk20a *gk20a_open_new_channel(struct gk20a *g,
nvgpu_cond_init(&ch->notifier_wq);
nvgpu_cond_init(&ch->semaphore_wq);
if (g->os_channel.open) {
if (g->os_channel.open != NULL) {
g->os_channel.open(ch);
}
@@ -1163,7 +1163,7 @@ int nvgpu_channel_setup_bind(struct channel_gk20a *c,
}
if (args->flags & NVGPU_SETUP_BIND_FLAGS_USERMODE_SUPPORT) {
if (g->os_channel.alloc_usermode_buffers) {
if (g->os_channel.alloc_usermode_buffers != NULL) {
err = g->os_channel.alloc_usermode_buffers(c, args);
if (err != 0) {
nvgpu_err(g, "Usermode buffer alloc failed");
@@ -1272,7 +1272,7 @@ clean_up_prealloc:
channel_gk20a_free_prealloc_resources(c);
}
clean_up_sync:
if (c->sync) {
if (c->sync != NULL) {
nvgpu_channel_sync_destroy(c->sync, false);
c->sync = NULL;
}
@@ -1560,7 +1560,7 @@ static void gk20a_channel_poll_timeouts(struct gk20a *g)
for (chid = 0; chid < g->fifo.num_channels; chid++) {
struct channel_gk20a *ch = &g->fifo.channel[chid];
if (gk20a_channel_get(ch)) {
if (gk20a_channel_get(ch) != NULL) {
gk20a_channel_timeout_check(ch);
gk20a_channel_put(ch);
}
@@ -1705,7 +1705,7 @@ static int gk20a_channel_poll_worker(void *arg)
gk20a_channel_worker_process(g, &get);
}
if (nvgpu_timeout_peek_expired(&timeout)) {
if (nvgpu_timeout_peek_expired(&timeout) != 0) {
gk20a_channel_poll_timeouts(g);
nvgpu_timeout_init(g, &timeout, watchdog_interval,
NVGPU_TIMER_CPU_TIMER);
@@ -1880,7 +1880,7 @@ int gk20a_channel_add_job(struct channel_gk20a *c,
*/
c = gk20a_channel_get(c);
if (c) {
if (c != NULL) {
job->num_mapped_buffers = num_mapped_buffers;
job->mapped_buffers = mapped_buffers;
@@ -2002,7 +2002,7 @@ void gk20a_channel_clean_up_jobs(struct channel_gk20a *c,
g->os_channel.signal_os_fence_framework(c);
}
if (g->aggressive_sync_destroy_thresh) {
if (g->aggressive_sync_destroy_thresh != 0U) {
nvgpu_mutex_acquire(&c->sync_lock);
if (nvgpu_channel_sync_put_ref_and_check(c->sync)
&& g->aggressive_sync_destroy) {
@@ -2014,7 +2014,7 @@ void gk20a_channel_clean_up_jobs(struct channel_gk20a *c,
}
}
if (job->num_mapped_buffers) {
if (job->num_mapped_buffers != 0) {
nvgpu_vm_put_buffers(vm, job->mapped_buffers,
job->num_mapped_buffers);
}
@@ -2160,7 +2160,7 @@ void gk20a_channel_deterministic_unidle(struct gk20a *g)
* which we took in deterministic_idle.
*/
if (ch->deterministic && !ch->deterministic_railgate_allowed) {
if (gk20a_busy(g)) {
if (gk20a_busy(g) != 0) {
nvgpu_err(g, "cannot busy() again!");
}
/* Took this in idle() */
@@ -2268,7 +2268,7 @@ int gk20a_channel_suspend(struct gk20a *g)
/* preempt the channel */
gk20a_fifo_preempt(g, ch);
/* wait for channel update notifiers */
if (g->os_channel.work_completion_cancel_sync) {
if (g->os_channel.work_completion_cancel_sync != NULL) {
g->os_channel.work_completion_cancel_sync(ch);
}
@@ -2284,7 +2284,7 @@ int gk20a_channel_suspend(struct gk20a *g)
gk20a_fifo_update_runlist_ids(g, active_runlist_ids, ~0, false, true);
for (chid = 0; chid < f->num_channels; chid++) {
if (gk20a_channel_get(&f->channel[chid])) {
if (gk20a_channel_get(&f->channel[chid]) != NULL) {
g->ops.fifo.unbind_channel(&f->channel[chid]);
gk20a_channel_put(&f->channel[chid]);
}
@@ -2305,7 +2305,7 @@ int gk20a_channel_resume(struct gk20a *g)
nvgpu_log_fn(g, " ");
for (chid = 0; chid < f->num_channels; chid++) {
if (gk20a_channel_get(&f->channel[chid])) {
if (gk20a_channel_get(&f->channel[chid]) != NULL) {
nvgpu_log_info(g, "resume channel %d", chid);
g->ops.fifo.bind_channel(&f->channel[chid]);
channels_in_use = true;
@@ -2337,8 +2337,8 @@ void gk20a_channel_semaphore_wakeup(struct gk20a *g, bool post_events)
for (chid = 0; chid < f->num_channels; chid++) {
struct channel_gk20a *c = g->fifo.channel+chid;
if (gk20a_channel_get(c)) {
if (nvgpu_atomic_read(&c->bound)) {
if (gk20a_channel_get(c) != NULL) {
if (nvgpu_atomic_read(&c->bound) != 0) {
nvgpu_cond_broadcast_interruptible(
&c->semaphore_wq);
if (post_events) {

View File

@@ -58,7 +58,7 @@ static int nvgpu_submit_prepare_syncs(struct channel_gk20a *c,
bool sync_fence = (flags & NVGPU_SUBMIT_FLAGS_SYNC_FENCE) != 0U;
bool fence_wait = (flags & NVGPU_SUBMIT_FLAGS_FENCE_WAIT) != 0U;
if (g->aggressive_sync_destroy_thresh) {
if (g->aggressive_sync_destroy_thresh != 0U) {
nvgpu_mutex_acquire(&c->sync_lock);
if (c->sync == NULL) {
c->sync = nvgpu_channel_sync_create(c, false);
@@ -172,7 +172,7 @@ clean_up_post_fence:
gk20a_fence_put(job->post_fence);
job->post_fence = NULL;
clean_up_wait_cmd:
if (job->wait_cmd) {
if (job->wait_cmd != NULL) {
free_priv_cmdbuf(c, job->wait_cmd);
}
if (!pre_alloc_enabled) {
@@ -536,7 +536,7 @@ static int nvgpu_submit_channel_gpfifo(struct channel_gk20a *c,
gk20a_fifo_profile_snapshot(profile, PROFILE_JOB_TRACKING);
if (wait_cmd) {
if (wait_cmd != NULL) {
nvgpu_submit_append_priv_cmdbuf(c, wait_cmd);
}
@@ -550,11 +550,11 @@ static int nvgpu_submit_channel_gpfifo(struct channel_gk20a *c,
* And here's where we add the incr_cmd we generated earlier. It should
* always run!
*/
if (incr_cmd) {
if (incr_cmd != NULL) {
nvgpu_submit_append_priv_cmdbuf(c, incr_cmd);
}
if (fence_out) {
if (fence_out != NULL) {
*fence_out = gk20a_fence_get(post_fence);
}

View File

@@ -45,14 +45,14 @@ int gm20b_fuse_check_priv_security(struct gk20a *g)
return 0;
}
if (nvgpu_tegra_fuse_read_gcplex_config_fuse(g, &gcplex_config)) {
if (nvgpu_tegra_fuse_read_gcplex_config_fuse(g, &gcplex_config) != 0) {
nvgpu_err(g, "err reading gcplex config fuse, check fuse clk");
return -EINVAL;
}
__nvgpu_set_enabled(g, NVGPU_SEC_SECUREGPCCS, false);
if (gk20a_readl(g, fuse_opt_priv_sec_en_r())) {
if (gk20a_readl(g, fuse_opt_priv_sec_en_r()) != 0U) {
/*
* all falcons have to boot in LS mode and this needs
* wpr_enabled set to 1 and vpr_auto_fetch_disable
@@ -65,7 +65,7 @@ int gm20b_fuse_check_priv_security(struct gk20a *g)
is_auto_fetch_disable =
(gcplex_config & GCPLEX_CONFIG_VPR_AUTO_FETCH_DISABLE_MASK) != 0U;
if (is_wpr_enabled && !is_auto_fetch_disable) {
if (gk20a_readl(g, fuse_opt_sec_debug_en_r())) {
if (gk20a_readl(g, fuse_opt_sec_debug_en_r()) != 0U) {
nvgpu_log(g, gpu_dbg_info,
"gcplex_config = 0x%08x, "
"secure mode: ACR debug",

View File

@@ -139,7 +139,7 @@ u32 gp106_fuse_read_vin_cal_slope_intercept_fuse(struct gk20a *g,
return -EINVAL;
}
if (fuse_vin_cal_gpc1_delta_icpt_sign_data_v(data)) {
if (fuse_vin_cal_gpc1_delta_icpt_sign_data_v(data) != 0U) {
*intercept = gpc0interceptdata - interceptdata;
} else {
*intercept = gpc0interceptdata + interceptdata;
@@ -172,7 +172,7 @@ u32 gp106_fuse_read_vin_cal_slope_intercept_fuse(struct gk20a *g,
return -EINVAL;
}
if (fuse_vin_cal_gpc1_delta_slope_sign_data_v(data)) {
if (fuse_vin_cal_gpc1_delta_slope_sign_data_v(data) != 0U) {
*slope = gpc0slopedata - slopedata;
} else {
*slope = gpc0slopedata + slopedata;

View File

@@ -46,12 +46,12 @@ int gp10b_fuse_check_priv_security(struct gk20a *g)
return 0;
}
if (nvgpu_tegra_fuse_read_gcplex_config_fuse(g, &gcplex_config)) {
if (nvgpu_tegra_fuse_read_gcplex_config_fuse(g, &gcplex_config) != 0) {
nvgpu_err(g, "err reading gcplex config fuse, check fuse clk");
return -EINVAL;
}
if (gk20a_readl(g, fuse_opt_priv_sec_en_r())) {
if (gk20a_readl(g, fuse_opt_priv_sec_en_r()) != 0U) {
/*
* all falcons have to boot in LS mode and this needs
* wpr_enabled set to 1 and vpr_auto_fetch_disable
@@ -65,7 +65,7 @@ int gp10b_fuse_check_priv_security(struct gk20a *g)
is_auto_fetch_disable =
(gcplex_config & GCPLEX_CONFIG_VPR_AUTO_FETCH_DISABLE_MASK) != 0U;
if (is_wpr_enabled && !is_auto_fetch_disable) {
if (gk20a_readl(g, fuse_opt_sec_debug_en_r())) {
if (gk20a_readl(g, fuse_opt_sec_debug_en_r()) != 0U) {
nvgpu_log(g, gpu_dbg_info,
"gcplex_config = 0x%08x, "
"secure mode: ACR debug",

View File

@@ -180,7 +180,7 @@ int gm20b_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op,
nvgpu_udelay(5);
} while (nvgpu_timeout_expired(&timeout) == 0);
if (nvgpu_timeout_peek_expired(&timeout)) {
if (nvgpu_timeout_peek_expired(&timeout) != 0) {
nvgpu_err(g, "comp tag clear timeout");
err = -EBUSY;
goto out;

View File

@@ -86,7 +86,7 @@ int gp10b_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr)
}
/* Already initialized */
if (gr->max_comptag_lines) {
if (gr->max_comptag_lines != 0U) {
return 0;
}
@@ -207,7 +207,7 @@ int gp10b_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op,
nvgpu_udelay(5);
} while (nvgpu_timeout_expired(&timeout) == 0);
if (nvgpu_timeout_peek_expired(&timeout)) {
if (nvgpu_timeout_peek_expired(&timeout) != 0) {
nvgpu_err(g, "comp tag clear timeout");
err = -EBUSY;
goto out;

View File

@@ -161,10 +161,10 @@ void gv11b_ltc_lts_isr(struct gk20a *g, unsigned int ltc, unsigned int slice)
ltc_ltc0_lts0_l2_cache_ecc_status_reset_task_f());
/* update counters per slice */
if (corrected_overflow) {
if (corrected_overflow != 0U) {
corrected_delta += BIT32(ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_total_s());
}
if (uncorrected_overflow) {
if (uncorrected_overflow != 0U) {
uncorrected_delta += BIT32(ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_total_s());
}

View File

@@ -76,7 +76,7 @@ int ltc_tu104_init_comptags(struct gk20a *g, struct gr_gk20a *gr)
}
/* Already initialized */
if (gr->max_comptag_lines) {
if (gr->max_comptag_lines != 0U) {
return 0;
}
@@ -205,7 +205,7 @@ int ltc_tu104_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op,
nvgpu_udelay(5);
} while (!nvgpu_timeout_expired(&timeout));
if (nvgpu_timeout_peek_expired(&timeout)) {
if (nvgpu_timeout_peek_expired(&timeout) != 0) {
nvgpu_err(g, "comp tag clear timeout");
err = -EBUSY;
goto out;

View File

@@ -327,7 +327,7 @@ u32 intr_tu104_stall(struct gk20a *g)
u32 mc_intr_0;
mc_intr_0 = mc_gp10b_intr_stall(g);
if (mc_intr_0) {
if (mc_intr_0 != 0U) {
return mc_intr_0;
}
@@ -368,12 +368,12 @@ void intr_tu104_log_pending_intrs(struct gk20a *g)
u32 intr, i;
intr = intr_tu104_nonstall(g);
if (intr) {
if (intr != 0U) {
nvgpu_info(g, "Pending nonstall intr=0x%08x", intr);
}
intr = mc_gp10b_intr_stall(g);
if (intr) {
if (intr != 0U) {
nvgpu_info(g, "Pending stall intr=0x%08x", intr);
}

View File

@@ -54,7 +54,7 @@ static int pmu_enable_hw(struct nvgpu_pmu *pmu, bool enable)
g->blcg_enabled);
}
if (nvgpu_flcn_mem_scrub_wait(pmu->flcn)) {
if (nvgpu_flcn_mem_scrub_wait(pmu->flcn) != 0) {
/* keep PMU falcon/engine in reset
* if IMEM/DMEM scrubbing fails
*/
@@ -567,7 +567,7 @@ int nvgpu_pmu_destroy(struct gk20a *g)
nvgpu_pmu_get_pg_stats(g,
PMU_PG_ELPG_ENGINE_ID_GRAPHICS, &pg_stat_data);
if (nvgpu_pmu_disable_elpg(g)) {
if (nvgpu_pmu_disable_elpg(g) != 0) {
nvgpu_err(g, "failed to set disable elpg");
}
pmu->initialized = false;

View File

@@ -1634,15 +1634,15 @@ static void nvgpu_remove_pmu_support(struct nvgpu_pmu *pmu)
pboardobj->destruct(pboardobj);
}
if (pmu->fw) {
if (pmu->fw != NULL) {
nvgpu_release_firmware(g, pmu->fw);
}
if (g->acr.pmu_fw) {
if (g->acr.pmu_fw != NULL) {
nvgpu_release_firmware(g, g->acr.pmu_fw);
}
if (g->acr.pmu_desc) {
if (g->acr.pmu_desc != NULL) {
nvgpu_release_firmware(g, g->acr.pmu_desc);
}

View File

@@ -192,13 +192,13 @@ static bool pmu_validate_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd,
}
in_size = PMU_CMD_HDR_SIZE;
if (payload->in.buf) {
if (payload->in.buf != NULL) {
in_size += payload->in.offset;
in_size += g->ops.pmu_ver.get_pmu_allocation_struct_size(pmu);
}
out_size = PMU_CMD_HDR_SIZE;
if (payload->out.buf) {
if (payload->out.buf != NULL) {
out_size += payload->out.offset;
out_size += g->ops.pmu_ver.get_pmu_allocation_struct_size(pmu);
}
@@ -414,11 +414,11 @@ static int pmu_cmd_payload_extract(struct gk20a *g, struct pmu_cmd *cmd,
clean_up:
if (err != 0) {
nvgpu_log_fn(g, "fail");
if (in) {
if (in != NULL) {
nvgpu_free(&pmu->dmem,
pv->pmu_allocation_get_dmem_offset(pmu, in));
}
if (out) {
if (out != NULL) {
nvgpu_free(&pmu->dmem,
pv->pmu_allocation_get_dmem_offset(pmu, out));
}
@@ -526,7 +526,7 @@ static int pmu_response_handle(struct nvgpu_pmu *pmu,
msg->msg.rc.msg_type == PMU_RC_MSG_TYPE_UNHANDLED_CMD) {
nvgpu_err(g, "unhandled cmd: seq %d", seq->id);
} else if (seq->state != PMU_SEQ_STATE_CANCELLED) {
if (seq->msg) {
if (seq->msg != NULL) {
if (seq->msg->hdr.size >= msg->hdr.size) {
(void) memcpy(seq->msg, msg, msg->hdr.size);
} else {
@@ -584,7 +584,7 @@ static int pmu_response_handle(struct nvgpu_pmu *pmu,
seq->in_mem = NULL;
}
if (seq->callback) {
if (seq->callback != NULL) {
seq->callback(g, msg, seq->cb_params, seq->desc, ret);
}
@@ -771,7 +771,7 @@ static void pmu_rpc_handler(struct gk20a *g, struct pmu_msg *msg,
nvgpu_memcpy((u8 *)&rpc, (u8 *)rpc_payload->rpc_buff,
sizeof(struct nv_pmu_rpc_header));
if (rpc.flcn_status) {
if (rpc.flcn_status != 0U) {
nvgpu_err(g, " failed RPC response, status=0x%x, func=0x%x",
rpc.flcn_status, rpc.function);
goto exit;
@@ -963,7 +963,7 @@ int nvgpu_pmu_rpc_execute(struct nvgpu_pmu *pmu, struct nv_pmu_rpc_header *rpc,
exit:
if (status != 0) {
if (rpc_payload) {
if (rpc_payload != NULL) {
nvgpu_kfree(g, rpc_payload);
}
}

View File

@@ -31,7 +31,7 @@ int nvgpu_get_timestamps_zipper(struct gk20a *g,
int err = 0;
unsigned int i = 0;
if (gk20a_busy(g)) {
if (gk20a_busy(g) != 0) {
nvgpu_err(g, "GPU not powered on\n");
err = -EINVAL;
goto end;

View File

@@ -33,7 +33,7 @@ void gk20a_ptimer_isr(struct gk20a *g)
u32 save0, save1, fecs_errcode = 0;
save0 = gk20a_readl(g, timer_pri_timeout_save_0_r());
if (timer_pri_timeout_save_0_fecs_tgt_v(save0)) {
if (timer_pri_timeout_save_0_fecs_tgt_v(save0) != 0U) {
/*
* write & addr fields in timeout_save0
* might not be reliable
@@ -52,7 +52,7 @@ void gk20a_ptimer_isr(struct gk20a *g)
gk20a_writel(g, timer_pri_timeout_save_0_r(), 0);
gk20a_writel(g, timer_pri_timeout_save_1_r(), 0);
if (fecs_errcode) {
if (fecs_errcode != 0U) {
nvgpu_err(g, "FECS_ERRCODE 0x%08x", fecs_errcode);
if (g->ops.priv_ring.decode_error_code != NULL) {
g->ops.priv_ring.decode_error_code(g,

View File

@@ -248,7 +248,7 @@ static int __do_xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed)
}
} while (nvgpu_timeout_expired(&timeout) == 0);
if (nvgpu_timeout_peek_expired(&timeout)) {
if (nvgpu_timeout_peek_expired(&timeout) != 0) {
err_status = -ETIMEDOUT;
goto done;
}
@@ -321,7 +321,7 @@ static int __do_xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed)
}
} while (nvgpu_timeout_expired(&timeout) == 0);
if (nvgpu_timeout_peek_expired(&timeout)) {
if (nvgpu_timeout_peek_expired(&timeout) != 0) {
err_status = -ETIMEDOUT;
goto done;
}
@@ -357,7 +357,7 @@ static int __do_xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed)
}
} while (nvgpu_timeout_expired(&timeout) == 0);
if (nvgpu_timeout_peek_expired(&timeout)) {
if (nvgpu_timeout_peek_expired(&timeout) != 0) {
err_status = -ETIMEDOUT;
xv_sc_dbg(g, EXEC_CHANGE, " timeout; pl_link_config = 0x%x",
pl_link_config);

View File

@@ -345,7 +345,7 @@ int prepare_ucode_blob(struct gk20a *g)
struct nvgpu_pmu *pmu = &g->pmu;
struct wpr_carveout_info wpr_inf;
if (g->acr.ucode_blob.cpu_va) {
if (g->acr.ucode_blob.cpu_va != NULL) {
/*Recovery case, we do not need to form
non WPR blob of ucodes*/
err = nvgpu_init_pmu_fw_support(pmu);
@@ -672,7 +672,7 @@ static void lsfm_init_wpr_contents(struct gk20a *g, struct ls_flcn_mgr *plsfm,
* flush any bl args to the storage area relative to the
* ucode image (appended on the end as a DMEM area).
*/
while (pnode) {
while (pnode != NULL) {
/* Flush WPR header to memory*/
nvgpu_mem_wr_n(g, ucode, i * sizeof(pnode->wpr_header),
&pnode->wpr_header, sizeof(pnode->wpr_header));
@@ -798,7 +798,7 @@ static void lsfm_fill_static_lsb_hdr_info(struct gk20a *g,
u32 full_app_size = 0;
u32 data = 0;
if (pnode->ucode_img.lsf_desc) {
if (pnode->ucode_img.lsf_desc != NULL) {
(void) memcpy(&pnode->lsb_header.signature,
pnode->ucode_img.lsf_desc,
sizeof(struct lsf_ucode_desc));
@@ -806,7 +806,7 @@ static void lsfm_fill_static_lsb_hdr_info(struct gk20a *g,
pnode->lsb_header.ucode_size = pnode->ucode_img.data_size;
/* The remainder of the LSB depends on the loader usage */
if (pnode->ucode_img.header) {
if (pnode->ucode_img.header != NULL) {
/* Does not use a loader */
pnode->lsb_header.data_size = 0;
pnode->lsb_header.bl_code_size = 0;
@@ -923,7 +923,7 @@ static void free_acr_resources(struct gk20a *g, struct ls_flcn_mgr *plsfm)
{
u32 cnt = plsfm->managed_flcn_cnt;
struct lsfm_managed_ucode_img *mg_ucode_img;
while (cnt) {
while (cnt != 0U) {
mg_ucode_img = plsfm->ucode_img_list;
if (mg_ucode_img->ucode_img.lsf_desc->falcon_id ==
LSF_FALCON_ID_PMU) {
@@ -955,7 +955,7 @@ static int lsf_gen_wpr_requirements(struct gk20a *g, struct ls_flcn_mgr *plsfm)
/* Walk the managed falcons, accounting for the LSB structs
as well as the ucode images. */
while (pnode) {
while (pnode != NULL) {
/* Align, save off, and include an LSB header size */
wpr_offset = ALIGN(wpr_offset,
LSF_LSB_HEADER_ALIGNMENT);

View File

@@ -493,7 +493,7 @@ static int clk_enbale_pll_dvfs(struct gk20a *g)
gk20a_writel(g, trim_sys_gpcpll_dvfs1_r(), data);
/* Set VCO_CTRL */
if (p->vco_ctrl) {
if (p->vco_ctrl != 0U) {
data = gk20a_readl(g, trim_sys_gpcpll_cfg3_r());
data = set_field(data, trim_sys_gpcpll_cfg3_vco_ctrl_m(),
trim_sys_gpcpll_cfg3_vco_ctrl_f(p->vco_ctrl));
@@ -501,7 +501,7 @@ static int clk_enbale_pll_dvfs(struct gk20a *g)
}
/* Set NA mode DFS control */
if (p->dfs_ctrl) {
if (p->dfs_ctrl != 0U) {
data = gk20a_readl(g, trim_sys_gpcpll_dvfs1_r());
data = set_field(data, trim_sys_gpcpll_dvfs1_dfs_ctrl_m(),
trim_sys_gpcpll_dvfs1_dfs_ctrl_f(p->dfs_ctrl));
@@ -557,7 +557,7 @@ static int clk_enbale_pll_dvfs(struct gk20a *g)
/* Wait for internal calibration done (spec < 2us). */
do {
data = gk20a_readl(g, trim_sys_gpcpll_dvfs1_r());
if (trim_sys_gpcpll_dvfs1_dfs_cal_done_v(data)) {
if (trim_sys_gpcpll_dvfs1_dfs_cal_done_v(data) != 0U) {
break;
}
nvgpu_udelay(1);
@@ -690,7 +690,7 @@ static int clk_slide_gpc_pll(struct gk20a *g, struct pll *gpll)
ramp_timeout--;
data = gk20a_readl(
g, trim_gpc_bcast_gpcpll_ndiv_slowdown_debug_r());
if (trim_gpc_bcast_gpcpll_ndiv_slowdown_debug_pll_dynramp_done_synced_v(data)) {
if (trim_gpc_bcast_gpcpll_ndiv_slowdown_debug_pll_dynramp_done_synced_v(data) != 0U) {
break;
}
} while (ramp_timeout > 0);
@@ -779,7 +779,7 @@ static int clk_lock_gpc_pll_under_bypass(struct gk20a *g, struct pll *gpll)
cfg = gk20a_readl(g, trim_sys_gpcpll_cfg_r());
nvgpu_udelay(1);
if (trim_sys_gpcpll_cfg_iddq_v(cfg)) {
if (trim_sys_gpcpll_cfg_iddq_v(cfg) != 0U) {
/* get out from IDDQ (1st power up) */
cfg = set_field(cfg, trim_sys_gpcpll_cfg_iddq_m(),
trim_sys_gpcpll_cfg_iddq_power_on_v());
@@ -961,7 +961,7 @@ static int clk_program_gpc_pll(struct gk20a *g, struct pll *gpll_new,
if (pldiv_only) {
/* Insert interim PLDIV state if necessary */
u32 interim_pl = get_interim_pldiv(g, gpll_new->PL, gpll.PL);
if (interim_pl) {
if (interim_pl != 0U) {
coeff = set_field(coeff,
trim_sys_gpcpll_coeff_pldiv_m(),
trim_sys_gpcpll_coeff_pldiv_f(interim_pl));
@@ -1421,7 +1421,7 @@ static int set_pll_target(struct gk20a *g, u32 freq, u32 old_freq)
if (freq != old_freq) {
/* gpc_pll.freq is changed to new value here */
if (clk_config_pll(clk, &clk->gpc_pll, &gpc_pll_params,
&freq, true)) {
&freq, true) != 0) {
nvgpu_err(g, "failed to set pll target for %d", freq);
return -EINVAL;
}

View File

@@ -77,7 +77,7 @@ static inline u32 gm20b_engine_id_to_mmu_id(struct gk20a *g, u32 engine_id)
engine_info = gk20a_fifo_get_engine_info(g, engine_id);
if (engine_info) {
if (engine_info != NULL) {
fault_id = engine_info->fault_id;
} else {
nvgpu_err(g, "engine_id is not in active list/invalid %d", engine_id);
@@ -143,7 +143,7 @@ void gm20b_device_info_data_parse(struct gk20a *g,
{
if (top_device_info_data_type_v(table_entry) ==
top_device_info_data_type_enum2_v()) {
if (pri_base) {
if (pri_base != NULL) {
*pri_base =
(top_device_info_data_pri_base_v(table_entry)
<< top_device_info_data_pri_base_align_v());

View File

@@ -91,7 +91,7 @@ int gp106_alloc_blob_space(struct gk20a *g,
struct wpr_carveout_info wpr_inf;
int err;
if (mem->size) {
if (mem->size != 0ULL) {
return 0;
}
@@ -484,7 +484,7 @@ static u32 lsfm_discover_and_add_sub_wprs(struct gk20a *g,
break;
}
if (size_4K) {
if (size_4K != 0U) {
pnode = nvgpu_kzalloc(g, sizeof(struct lsfm_sub_wpr));
if (pnode == NULL) {
return -ENOMEM;
@@ -511,7 +511,7 @@ int gp106_prepare_ucode_blob(struct gk20a *g)
struct nvgpu_pmu *pmu = &g->pmu;
struct wpr_carveout_info wpr_inf;
if (g->acr.ucode_blob.cpu_va) {
if (g->acr.ucode_blob.cpu_va != NULL) {
/*Recovery case, we do not need to form
non WPR blob of ucodes*/
err = nvgpu_init_pmu_fw_support(pmu);
@@ -851,7 +851,7 @@ static u32 lsfm_init_sub_wpr_contents(struct gk20a *g,
*/
psub_wpr_node = plsfm->psub_wpr_list;
i = 0;
while (psub_wpr_node) {
while (psub_wpr_node != NULL) {
nvgpu_mem_wr_n(g, ucode,
sub_wpr_header_offset + (i * temp_size),
&psub_wpr_node->sub_wpr_header, temp_size);
@@ -890,7 +890,7 @@ void lsfm_init_wpr_contents(struct gk20a *g,
* flush any bl args to the storage area relative to the
* ucode image (appended on the end as a DMEM area).
*/
while (pnode) {
while (pnode != NULL) {
/* Flush WPR header to memory*/
nvgpu_mem_wr_n(g, ucode, i * sizeof(pnode->wpr_header),
&pnode->wpr_header, sizeof(pnode->wpr_header));
@@ -1016,7 +1016,7 @@ void lsfm_fill_static_lsb_hdr_info(struct gk20a *g,
u32 full_app_size = 0;
u32 data = 0;
if (pnode->ucode_img.lsf_desc) {
if (pnode->ucode_img.lsf_desc != NULL) {
(void) memcpy(&pnode->lsb_header.signature,
pnode->ucode_img.lsf_desc,
sizeof(struct lsf_ucode_desc_v1));
@@ -1024,7 +1024,7 @@ void lsfm_fill_static_lsb_hdr_info(struct gk20a *g,
pnode->lsb_header.ucode_size = pnode->ucode_img.data_size;
/* The remainder of the LSB depends on the loader usage */
if (pnode->ucode_img.header) {
if (pnode->ucode_img.header != NULL) {
/* Does not use a loader */
pnode->lsb_header.data_size = 0;
pnode->lsb_header.bl_code_size = 0;
@@ -1144,7 +1144,7 @@ void free_acr_resources(struct gk20a *g, struct ls_flcn_mgr_v1 *plsfm)
u32 cnt = plsfm->managed_flcn_cnt;
struct lsfm_managed_ucode_img_v2 *mg_ucode_img;
while (cnt) {
while (cnt != 0U) {
mg_ucode_img = plsfm->ucode_img_list;
if (mg_ucode_img->ucode_img.lsf_desc->falcon_id ==
LSF_FALCON_ID_PMU) {
@@ -1194,7 +1194,7 @@ int lsf_gen_wpr_requirements(struct gk20a *g,
/* Walk the managed falcons, accounting for the LSB structs
as well as the ucode images. */
while (pnode) {
while (pnode != NULL) {
/* Align, save off, and include an LSB header size */
wpr_offset = ALIGN(wpr_offset,
LSF_LSB_HEADER_ALIGNMENT);
@@ -1258,7 +1258,7 @@ int lsf_gen_wpr_requirements(struct gk20a *g,
/* Walk through the sub wpr headers to accommodate
* sub wprs in WPR request
*/
while (pnode_sub_wpr) {
while (pnode_sub_wpr != NULL) {
wpr_offset = ALIGN_UP(wpr_offset,
SUB_WPR_SIZE_ALIGNMENT);
pnode_sub_wpr->sub_wpr_header.start_addr = wpr_offset;

View File

@@ -82,7 +82,7 @@ int gp106_bios_devinit(struct gk20a *g)
nvgpu_log_fn(g, " ");
if (nvgpu_flcn_reset(g->pmu.flcn)) {
if (nvgpu_flcn_reset(g->pmu.flcn) != 0) {
err = -ETIMEDOUT;
goto out;
}
@@ -122,7 +122,7 @@ int gp106_bios_devinit(struct gk20a *g)
nvgpu_udelay(PMU_BOOT_TIMEOUT_DEFAULT);
} while (!devinit_completed && (nvgpu_timeout_expired(&timeout) == 0));
if (nvgpu_timeout_peek_expired(&timeout)) {
if (nvgpu_timeout_peek_expired(&timeout) != 0) {
err = -ETIMEDOUT;
}
@@ -138,7 +138,8 @@ int gp106_bios_preos_wait_for_halt(struct gk20a *g)
{
int err = 0;
if (nvgpu_flcn_wait_for_halt(g->pmu.flcn, PMU_BOOT_TIMEOUT_MAX / 1000)) {
if (nvgpu_flcn_wait_for_halt(g->pmu.flcn,
PMU_BOOT_TIMEOUT_MAX / 1000) != 0) {
err = -ETIMEDOUT;
}
@@ -151,7 +152,7 @@ int gp106_bios_preos(struct gk20a *g)
nvgpu_log_fn(g, " ");
if (nvgpu_flcn_reset(g->pmu.flcn)) {
if (nvgpu_flcn_reset(g->pmu.flcn) != 0) {
err = -ETIMEDOUT;
goto out;
}
@@ -263,7 +264,7 @@ int gp106_bios_init(struct gk20a *g)
return 0;
free_firmware:
if (g->bios.data) {
if (g->bios.data != NULL) {
nvgpu_vfree(g, g->bios.data);
}
return err;

View File

@@ -3038,7 +3038,7 @@ static void mclk_seq_pmucmdhandler(struct gk20a *g, struct pmu_msg *_msg,
goto status_update;
}
if (seq_msg->error_code) {
if (seq_msg->error_code != 0U) {
msg_status = -ENOENT;
goto status_update;
}

View File

@@ -153,7 +153,7 @@ int gp10b_fifo_resetup_ramfc(struct channel_gk20a *c)
v = nvgpu_mem_rd32(c->g, &c->inst_block,
ram_fc_allowed_syncpoints_w());
old_syncpt = pbdma_allowed_syncpoints_0_index_v(v);
if (c->sync) {
if (c->sync != NULL) {
sync_syncpt = nvgpu_channel_sync_to_syncpt(c->sync);
if (sync_syncpt != NULL) {
new_syncpt = nvgpu_channel_sync_get_syncpt_id(sync_syncpt);
@@ -208,10 +208,10 @@ void gp10b_device_info_data_parse(struct gk20a *g, u32 table_entry,
{
if (top_device_info_data_type_v(table_entry) ==
top_device_info_data_type_enum2_v()) {
if (inst_id) {
if (inst_id != NULL) {
*inst_id = top_device_info_data_inst_id_v(table_entry);
}
if (pri_base) {
if (pri_base != NULL) {
*pri_base =
(top_device_info_data_pri_base_v(table_entry)
<< top_device_info_data_pri_base_align_v());

View File

@@ -93,7 +93,7 @@ bool gr_gp10b_is_valid_compute_class(struct gk20a *g, u32 class_num)
}
static void gr_gp10b_sm_lrf_ecc_overcount_war(int single_err,
static void gr_gp10b_sm_lrf_ecc_overcount_war(bool single_err,
u32 sed_status,
u32 ded_status,
u32 *count_to_adjust,
@@ -172,11 +172,11 @@ int gr_gp10b_handle_sm_exception(struct gk20a *g,
gk20a_writel(g,
gr_pri_gpc0_tpc0_sm_lrf_ecc_double_err_count_r() + offset,
0);
if (lrf_ecc_sed_status) {
if (lrf_ecc_sed_status != 0U) {
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr,
"Single bit error detected in SM LRF!");
gr_gp10b_sm_lrf_ecc_overcount_war(1,
gr_gp10b_sm_lrf_ecc_overcount_war(true,
lrf_ecc_sed_status,
lrf_ecc_ded_status,
&lrf_single_count_delta,
@@ -184,11 +184,11 @@ int gr_gp10b_handle_sm_exception(struct gk20a *g,
g->ecc.gr.sm_lrf_ecc_single_err_count[gpc][tpc].counter +=
lrf_single_count_delta;
}
if (lrf_ecc_ded_status) {
if (lrf_ecc_ded_status != 0U) {
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr,
"Double bit error detected in SM LRF!");
gr_gp10b_sm_lrf_ecc_overcount_war(0,
gr_gp10b_sm_lrf_ecc_overcount_war(false,
lrf_ecc_sed_status,
lrf_ecc_ded_status,
&lrf_double_count_delta,
@@ -433,7 +433,7 @@ int gr_gp10b_commit_global_cb_manager(struct gk20a *g,
gr_gpc0_ppc0_cbm_beta_cb_size_v_granularity_v()) /
gr_pd_ab_dist_cfg1_max_output_granularity_v();
if (g->gr.pd_max_batches) {
if (g->gr.pd_max_batches != 0U) {
gr_gk20a_ctx_patch_write(g, gr_ctx, gr_pd_ab_dist_cfg1_r(),
gr_pd_ab_dist_cfg1_max_output_f(pd_ab_max_output) |
gr_pd_ab_dist_cfg1_max_batches_f(g->gr.pd_max_batches), patch);
@@ -783,7 +783,7 @@ void gr_gp10b_set_alpha_circular_buffer_size(struct gk20a *g, u32 data)
gr_gpc0_ppc0_cbm_alpha_cb_size_v_granularity_v() /
gr_pd_ab_dist_cfg1_max_output_granularity_v();
if (g->gr.pd_max_batches) {
if (g->gr.pd_max_batches != 0U) {
gk20a_writel(g, gr_pd_ab_dist_cfg1_r(),
gr_pd_ab_dist_cfg1_max_output_f(pd_ab_max_output) |
gr_pd_ab_dist_cfg1_max_batches_f(g->gr.pd_max_batches));
@@ -1222,13 +1222,13 @@ void gr_gp10b_update_ctxsw_preemption_mode(struct gk20a *g,
cta_preempt_option);
}
if (gr_ctx->preempt_ctxsw_buffer.gpu_va) {
if (gr_ctx->preempt_ctxsw_buffer.gpu_va != 0ULL) {
u32 addr;
u32 size;
u32 cbes_reserve;
if (g->ops.gr.set_preemption_buffer_va != NULL) {
if (ctxheader->gpu_va) {
if (ctxheader->gpu_va != 0ULL) {
g->ops.gr.set_preemption_buffer_va(g, ctxheader,
gr_ctx->preempt_ctxsw_buffer.gpu_va);
} else {
@@ -1435,7 +1435,7 @@ int gr_gp10b_dump_gr_status_regs(struct gk20a *g,
static bool gr_activity_empty_or_preempted(u32 val)
{
while(val) {
while(val != 0U) {
u32 v = val & 7;
if (v != gr_activity_4_gpc0_empty_v() &&
v != gr_activity_4_gpc0_preempted_v()) {
@@ -1500,7 +1500,7 @@ void gr_gp10b_commit_global_attrib_cb(struct gk20a *g,
{
int attrBufferSize;
if (gr_ctx->preempt_ctxsw_buffer.gpu_va) {
if (gr_ctx->preempt_ctxsw_buffer.gpu_va != 0ULL) {
attrBufferSize = gr_ctx->betacb_ctxsw_buffer.size;
} else {
attrBufferSize = g->ops.gr.calc_global_ctx_buffer_size(g);
@@ -1855,7 +1855,7 @@ int gr_gp10b_pre_process_sm_exception(struct gk20a *g,
*early_exit = false;
*ignore_debugger = false;
if (fault_ch) {
if (fault_ch != NULL) {
tsg = tsg_gk20a_from_ch(fault_ch);
if (!tsg) {
return -EINVAL;
@@ -2158,7 +2158,7 @@ int gr_gp10b_suspend_contexts(struct gk20a *g,
nvgpu_mutex_release(&g->dbg_sessions_lock);
if (cilp_preempt_pending_ch) {
if (cilp_preempt_pending_ch != NULL) {
struct tsg_gk20a *tsg;
struct nvgpu_gr_ctx *gr_ctx;
struct nvgpu_timeout timeout;

View File

@@ -218,7 +218,7 @@ static void __update_pte(struct vm_gk20a *vm,
pte_w[0] |= gmmu_new_pte_vol_true_f();
}
if (attrs->ctag) {
if (attrs->ctag != 0ULL) {
attrs->ctag += page_size;
}
@@ -243,7 +243,7 @@ static void update_gmmu_pte_locked(struct vm_gk20a *vm,
u32 pd_offset = pd_offset_from_index(l, pd_idx);
u32 pte_w[2] = {0, 0};
if (phys_addr) {
if (phys_addr != 0ULL) {
__update_pte(vm, pte_w, phys_addr, attrs);
} else if (attrs->sparse) {
__update_pte_sparse(pte_w);
@@ -311,7 +311,7 @@ static u32 gp10b_get_pde0_pgsz(struct gk20a *g, const struct gk20a_mmu_level *l,
U64(gmmu_new_dual_pde_address_small_sys_f(~0)))) <<
U64(gmmu_new_dual_pde_address_shift_v());
if (addr) {
if (addr != 0ULL) {
pgsz = GMMU_PAGE_SIZE_SMALL;
}
}
@@ -324,7 +324,7 @@ static u32 gp10b_get_pde0_pgsz(struct gk20a *g, const struct gk20a_mmu_level *l,
U64(gmmu_new_dual_pde_address_big_sys_f(~0)))) <<
U64(gmmu_new_dual_pde_address_big_shift_v());
if (addr) {
if (addr != 0ULL) {
/*
* If small is set that means that somehow MM allowed
* both small and big to be set, the PDE is not valid

View File

@@ -73,7 +73,7 @@ struct boardobj {
u8 type; /*type of the device*/
u8 idx; /*index of boardobj within in its group*/
/* true if allocated in constructor. destructor should free */
u8 allocated;
bool allocated;
u32 type_mask; /*mask of types this boardobjimplements*/
boardobj_implements *implements;
boardobj_destruct *destruct;

View File

@@ -381,7 +381,7 @@ void boardobjgrpe32hdrset(struct nv_pmu_boardobjgrp *hdr, u32 objmask);
#define HIGHESTBITIDX_32(n32) \
{ \
u32 count = 0U; \
while ((n32) >>= 1U) { \
while (((n32) >>= 1U) != 0U) { \
count++; \
} \
(n32) = count; \

View File

@@ -314,7 +314,7 @@ u32 nvgpu_lpwr_post_init(struct gk20a *g)
return status;
}
u32 nvgpu_lpwr_is_mscg_supported(struct gk20a *g, u32 pstate_num)
bool nvgpu_lpwr_is_mscg_supported(struct gk20a *g, u32 pstate_num)
{
struct nvgpu_lpwr_bios_ms_data *pms_data =
&g->perf_pmu.lpwr.lwpr_bios_data.ms;
@@ -326,18 +326,18 @@ u32 nvgpu_lpwr_is_mscg_supported(struct gk20a *g, u32 pstate_num)
nvgpu_log_fn(g, " ");
if (!pstate) {
return 0;
return false;
}
ms_idx = pidx_data->entry[pstate->lpwr_entry_idx].ms_idx;
if (pms_data->entry[ms_idx].ms_enabled) {
return 1;
return true;
} else {
return 0;
return false;
}
}
u32 nvgpu_lpwr_is_rppg_supported(struct gk20a *g, u32 pstate_num)
bool nvgpu_lpwr_is_rppg_supported(struct gk20a *g, u32 pstate_num)
{
struct nvgpu_lpwr_bios_gr_data *pgr_data =
&g->perf_pmu.lpwr.lwpr_bios_data.gr;
@@ -349,14 +349,14 @@ u32 nvgpu_lpwr_is_rppg_supported(struct gk20a *g, u32 pstate_num)
nvgpu_log_fn(g, " ");
if (!pstate) {
return 0;
return false;
}
idx = pidx_data->entry[pstate->lpwr_entry_idx].gr_idx;
if (pgr_data->entry[idx].gr_enabled) {
return 1;
return true;
} else {
return 0;
return false;
}
}
@@ -365,8 +365,8 @@ int nvgpu_lpwr_enable_pg(struct gk20a *g, bool pstate_lock)
{
struct nvgpu_pmu *pmu = &g->pmu;
u32 status = 0;
u32 is_mscg_supported = 0;
u32 is_rppg_supported = 0;
bool is_mscg_supported = false;
bool is_rppg_supported = false;
u32 present_pstate = 0;
nvgpu_log_fn(g, " ");
@@ -406,8 +406,8 @@ int nvgpu_lpwr_disable_pg(struct gk20a *g, bool pstate_lock)
{
struct nvgpu_pmu *pmu = &g->pmu;
int status = 0;
u32 is_mscg_supported = 0;
u32 is_rppg_supported = 0;
bool is_mscg_supported = false;
bool is_rppg_supported = false;
u32 present_pstate = 0;
nvgpu_log_fn(g, " ");
@@ -433,7 +433,7 @@ int nvgpu_lpwr_disable_pg(struct gk20a *g, bool pstate_lock)
is_mscg_supported = nvgpu_lpwr_is_mscg_supported(g,
present_pstate);
if (is_mscg_supported && g->mscg_enabled) {
if (pmu->mscg_stat) {
if (pmu->mscg_stat != 0U) {
pmu->mscg_stat = PMU_MSCG_DISABLED;
}
}

View File

@@ -94,8 +94,8 @@ int nvgpu_lpwr_pg_setup(struct gk20a *g);
int nvgpu_lwpr_mclk_change(struct gk20a *g, u32 pstate);
int nvgpu_lpwr_enable_pg(struct gk20a *g, bool pstate_lock);
int nvgpu_lpwr_disable_pg(struct gk20a *g, bool pstate_lock);
u32 nvgpu_lpwr_is_mscg_supported(struct gk20a *g, u32 pstate_num);
u32 nvgpu_lpwr_is_rppg_supported(struct gk20a *g, u32 pstate_num);
bool nvgpu_lpwr_is_mscg_supported(struct gk20a *g, u32 pstate_num);
bool nvgpu_lpwr_is_rppg_supported(struct gk20a *g, u32 pstate_num);
u32 nvgpu_lpwr_post_init(struct gk20a *g);
#endif /* NVGPU_LPWR_H */

View File

@@ -376,7 +376,7 @@ static int pmgr_send_pwr_policy_to_pmu(struct gk20a *g)
}
exit:
if (ppwrpack) {
if (ppwrpack != NULL) {
nvgpu_kfree(g, ppwrpack);
}

View File

@@ -104,7 +104,7 @@ static int therm_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd,
goto exit;
}
if (cb_param) {
if (cb_param != NULL) {
handlerparams = (struct therm_pmucmdhandler_params*)cb_param;
pmu_wait_message_cond(&g->pmu,

View File

@@ -407,7 +407,7 @@ void tu104_sec2_isr(struct gk20a *g)
}
if ((intr & psec_falcon_irqstat_swgen0_true_f()) != 0U) {
if (nvgpu_sec2_process_message(sec2)) {
if (nvgpu_sec2_process_message(sec2) != 0) {
gk20a_writel(g, psec_falcon_irqsclr_r(), intr);
goto exit;
}