gpu: nvgpu: MISRA 10.1 boolean fixes

MISRA rule 10.1 doesn't allow the usage of non-boolean variables as
booleans.

Fix violations where a variable of type non-boolean is used as a
boolean and changed few instances of BIT() to BIT32() or BIT64().

JIRA NVGPU-646

Change-Id: I100606a69717c12839aa9c35e7bf6c18749db56e
Signed-off-by: Amulya <Amurthyreddy@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1809836
GVS: Gerrit_Virtual_Submit
Tested-by: Amulya Murthyreddy <amurthyreddy@nvidia.com>
Reviewed-by: Adeel Raza <araza@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Amulya
2018-08-30 14:01:57 +05:30
committed by mobile promotions
parent 25ef9f869b
commit 999eabbcd7
34 changed files with 332 additions and 219 deletions

View File

@@ -256,7 +256,7 @@ int boardobjgrp_pmucmd_pmuinithandle_impl(struct gk20a *g,
goto boardobjgrp_pmucmd_pmuinithandle_exit;
}
if (!pcmd->fbsize) {
if (pcmd->fbsize == 0U) {
goto boardobjgrp_pmucmd_pmuinithandle_exit;
}
@@ -294,7 +294,7 @@ int boardobjgrp_pmuinithandle_impl(struct gk20a *g,
/* If the GRP_SET CMD has not been allocated, nothing left to do. */
if ((g->ops.pmu_ver.boardobj.is_boardobjgrp_pmucmd_id_valid(g,
pboardobjgrp, &pboardobjgrp->pmu.set))||
pboardobjgrp, &pboardobjgrp->pmu.set) != 0)||
(BOARDOBJGRP_IS_EMPTY(pboardobjgrp))) {
goto boardobjgrp_pmuinithandle_exit;
}

View File

@@ -56,14 +56,16 @@ int bus_tu104_bar2_bind(struct gk20a *g, struct nvgpu_mem *bar2_inst)
do {
u32 val = nvgpu_func_readl(g,
func_priv_bind_status_r());
u32 pending = bus_bind_status_bar2_pending_v(val);
u32 outstanding = bus_bind_status_bar2_outstanding_v(val);
bool pending = (bus_bind_status_bar2_pending_v(val) ==
bus_bind_status_bar2_pending_busy_v());
bool outstanding = (bus_bind_status_bar2_outstanding_v(val) ==
bus_bind_status_bar2_outstanding_true_v());
if (!pending && !outstanding) {
break;
}
nvgpu_udelay(5);
} while (!nvgpu_timeout_expired(&timeout));
} while (nvgpu_timeout_expired(&timeout) == 0);
if (nvgpu_timeout_peek_expired(&timeout) != 0) {
err = -EINVAL;

View File

@@ -101,8 +101,8 @@ int gm20b_fb_tlb_invalidate(struct gk20a *g, struct nvgpu_mem *pdb)
break;
}
nvgpu_udelay(2);
} while (!nvgpu_timeout_expired_msg(&timeout,
"wait mmu fifo space"));
} while (nvgpu_timeout_expired_msg(&timeout,
"wait mmu fifo space") == 0);
if (nvgpu_timeout_peek_expired(&timeout) != 0) {
err = -ETIMEDOUT;
@@ -129,8 +129,8 @@ int gm20b_fb_tlb_invalidate(struct gk20a *g, struct nvgpu_mem *pdb)
break;
}
nvgpu_udelay(2);
} while (!nvgpu_timeout_expired_msg(&timeout,
"wait mmu invalidate"));
} while (nvgpu_timeout_expired_msg(&timeout,
"wait mmu invalidate") == 0);
trace_gk20a_mm_tlb_invalidate_done(g->name);

View File

@@ -369,8 +369,8 @@ int fb_tu104_tlb_invalidate(struct gk20a *g, struct nvgpu_mem *pdb)
break;
}
nvgpu_udelay(2);
} while (!nvgpu_timeout_expired_msg(&timeout,
"wait mmu invalidate"));
} while (nvgpu_timeout_expired_msg(&timeout,
"wait mmu invalidate") == 0);
trace_gk20a_mm_tlb_invalidate_done(g->name);
@@ -413,8 +413,8 @@ int fb_tu104_mmu_invalidate_replay(struct gk20a *g,
break;
}
nvgpu_udelay(5);
} while (!nvgpu_timeout_expired_msg(&timeout,
"invalidate replay failed on 0x%llx"));
} while (nvgpu_timeout_expired_msg(&timeout,
"invalidate replay failed on 0x%llx") == 0);
if (err != 0) {
nvgpu_err(g, "invalidate replay timedout");
}
@@ -487,7 +487,7 @@ static int tu104_fb_wait_mmu_bind(struct gk20a *g)
return 0;
}
nvgpu_udelay(2);
} while (!nvgpu_timeout_expired_msg(&timeout, "mmu bind timedout"));
} while (nvgpu_timeout_expired_msg(&timeout, "mmu bind timedout") == 0);
return -ETIMEDOUT;
}

View File

@@ -147,11 +147,11 @@ void gv11b_ltc_lts_isr(struct gk20a *g, unsigned int ltc, unsigned int slice)
ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_total_counter_overflow_m();
/* clear the interrupt */
if ((corrected_delta > 0U) || corrected_overflow) {
if ((corrected_delta > 0U) || (corrected_overflow != 0U)) {
nvgpu_writel_check(g,
ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_r() + offset, 0);
}
if ((uncorrected_delta > 0U) || uncorrected_overflow) {
if ((uncorrected_delta > 0U) || (uncorrected_overflow !=0U)) {
nvgpu_writel_check(g,
ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_r() + offset, 0);
}
@@ -192,7 +192,7 @@ void gv11b_ltc_lts_isr(struct gk20a *g, unsigned int ltc, unsigned int slice)
nvgpu_log(g, gpu_dbg_intr, "dstg ecc error uncorrected");
}
if (corrected_overflow || uncorrected_overflow) {
if ((corrected_overflow != 0U) || (uncorrected_overflow != 0U)) {
nvgpu_info(g, "ecc counter overflow!");
}

View File

@@ -199,11 +199,11 @@ int ltc_tu104_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op,
NVGPU_TIMER_RETRY_TIMER);
do {
val = nvgpu_readl(g, ctrl1);
if (!(val & hw_op)) {
if ((val & hw_op) == 0U) {
break;
}
nvgpu_udelay(5);
} while (!nvgpu_timeout_expired(&timeout));
} while (nvgpu_timeout_expired(&timeout) == 0);
if (nvgpu_timeout_peek_expired(&timeout) != 0) {
nvgpu_err(g, "comp tag clear timeout");

View File

@@ -144,7 +144,8 @@ void mc_gp10b_isr_stall(struct gk20a *g)
g->ops.mc.is_intr_nvlink_pending(g, mc_intr_0)) {
g->ops.nvlink.isr(g);
}
if (mc_intr_0 & mc_intr_pfb_pending_f() && g->ops.mc.fbpa_isr) {
if ((mc_intr_0 & mc_intr_pfb_pending_f()) != 0U &&
(g->ops.mc.fbpa_isr != NULL)) {
g->ops.mc.fbpa_isr(g);
}

View File

@@ -404,7 +404,7 @@ void mc_tu104_fbpa_isr(struct gk20a *g)
num_fbpas = nvgpu_get_litter_value(g, GPU_LIT_NUM_FBPAS);
for (i = 0u; i < num_fbpas; i++) {
if (!(fbpas & (1 << i))) {
if ((fbpas & BIT32(i)) == 0U) {
continue;
}
g->ops.fb.handle_fbpa_intr(g, i);

View File

@@ -77,9 +77,9 @@ nvgpu_buddy_from_rbtree_node(struct nvgpu_rbtree_node *node)
};
#define __buddy_flag_ops(flag, flag_up) \
static inline int buddy_is_ ## flag(struct nvgpu_buddy *b) \
static inline bool buddy_is_ ## flag(struct nvgpu_buddy *b) \
{ \
return b->flags & BALLOC_BUDDY_ ## flag_up; \
return (b->flags & BALLOC_BUDDY_ ## flag_up) != 0U; \
} \
static inline void buddy_set_ ## flag(struct nvgpu_buddy *b) \
{ \
@@ -91,15 +91,15 @@ nvgpu_buddy_from_rbtree_node(struct nvgpu_rbtree_node *node)
}
/*
* int buddy_is_alloced(struct nvgpu_buddy *b);
* bool buddy_is_alloced(struct nvgpu_buddy *b);
* void buddy_set_alloced(struct nvgpu_buddy *b);
* void buddy_clr_alloced(struct nvgpu_buddy *b);
*
* int buddy_is_split(struct nvgpu_buddy *b);
* bool buddy_is_split(struct nvgpu_buddy *b);
* void buddy_set_split(struct nvgpu_buddy *b);
* void buddy_clr_split(struct nvgpu_buddy *b);
*
* int buddy_is_in_list(struct nvgpu_buddy *b);
* bool buddy_is_in_list(struct nvgpu_buddy *b);
* void buddy_set_in_list(struct nvgpu_buddy *b);
* void buddy_clr_in_list(struct nvgpu_buddy *b);
*/

View File

@@ -138,7 +138,7 @@ int nvgpu_dma_alloc_map_flags_sys(struct vm_gk20a *vm, unsigned long flags,
mem->gpu_va = nvgpu_gmmu_map(vm, mem, size, 0,
gk20a_mem_flag_none, false,
mem->aperture);
if (!mem->gpu_va) {
if (mem->gpu_va == 0ULL) {
err = -ENOMEM;
goto fail_free;
}
@@ -169,7 +169,7 @@ int nvgpu_dma_alloc_map_flags_vid(struct vm_gk20a *vm, unsigned long flags,
mem->gpu_va = nvgpu_gmmu_map(vm, mem, size, 0,
gk20a_mem_flag_none, false,
mem->aperture);
if (!mem->gpu_va) {
if (mem->gpu_va == 0ULL) {
err = -ENOMEM;
goto fail_free;
}

View File

@@ -326,12 +326,12 @@ void gv11b_pmu_handle_ext_irq(struct gk20a *g, u32 intr0)
pwr_pmu_falcon_ecc_status_corrected_err_total_counter_overflow_m();
/* clear the interrupt */
if ((intr1 & pwr_pmu_ecc_intr_status_corrected_m()) ||
corrected_overflow) {
if (((intr1 & pwr_pmu_ecc_intr_status_corrected_m()) != 0U) ||
(corrected_overflow != 0U)) {
gk20a_writel(g, pwr_pmu_falcon_ecc_corrected_err_count_r(), 0);
}
if ((intr1 & pwr_pmu_ecc_intr_status_uncorrected_m()) ||
uncorrected_overflow) {
if (((intr1 & pwr_pmu_ecc_intr_status_uncorrected_m()) != 0U) ||
(uncorrected_overflow != 0U)) {
gk20a_writel(g,
pwr_pmu_falcon_ecc_uncorrected_err_count_r(), 0);
}
@@ -370,7 +370,7 @@ void gv11b_pmu_handle_ext_irq(struct gk20a *g, u32 intr0)
"dmem ecc error uncorrected");
}
if (corrected_overflow || uncorrected_overflow) {
if ((corrected_overflow != 0U) || (uncorrected_overflow != 0U)) {
nvgpu_info(g, "ecc counter overflow!");
}

View File

@@ -127,7 +127,7 @@ static int sec2_write_cmd(struct nvgpu_sec2 *sec2,
do {
err = nvgpu_flcn_queue_push(&g->sec2_flcn, queue, cmd,
cmd->hdr.size);
if (err == -EAGAIN && !nvgpu_timeout_expired(&timeout)) {
if ((err == -EAGAIN) && (nvgpu_timeout_expired(&timeout) == 0)) {
nvgpu_usleep_range(1000U, 2000U);
} else {
break;
@@ -255,7 +255,7 @@ static bool sec2_read_message(struct nvgpu_sec2 *sec2,
err = nvgpu_flcn_queue_pop(sec2->flcn, queue, &msg->hdr,
PMU_MSG_HDR_SIZE, &bytes_read);
if (err || bytes_read != PMU_MSG_HDR_SIZE) {
if ((err != 0) || (bytes_read != PMU_MSG_HDR_SIZE)) {
nvgpu_err(g, "fail to read msg from queue %d", queue->id);
*status = err | -EINVAL;
goto clean_up;
@@ -272,7 +272,7 @@ static bool sec2_read_message(struct nvgpu_sec2 *sec2,
/* read again after rewind */
err = nvgpu_flcn_queue_pop(sec2->flcn, queue, &msg->hdr,
PMU_MSG_HDR_SIZE, &bytes_read);
if (err || bytes_read != PMU_MSG_HDR_SIZE) {
if ((err != 0) || (bytes_read != PMU_MSG_HDR_SIZE)) {
nvgpu_err(g,
"fail to read msg from queue %d", queue->id);
*status = err | -EINVAL;
@@ -291,7 +291,7 @@ static bool sec2_read_message(struct nvgpu_sec2 *sec2,
read_size = msg->hdr.size - PMU_MSG_HDR_SIZE;
err = nvgpu_flcn_queue_pop(sec2->flcn, queue, &msg->msg,
read_size, &bytes_read);
if (err || bytes_read != read_size) {
if ((err != 0) || (bytes_read != read_size)) {
nvgpu_err(g,
"fail to read msg from queue %d", queue->id);
*status = err;
@@ -421,7 +421,7 @@ int nvgpu_sec2_wait_message_cond(struct nvgpu_sec2 *sec2, u32 timeout_ms,
nvgpu_usleep_range(delay, delay * 2U);
delay = min_t(u32, delay << 1U, GR_IDLE_CHECK_MAX);
} while (!nvgpu_timeout_expired(&timeout));
} while (nvgpu_timeout_expired(&timeout) == 0);
return -ETIMEDOUT;
}

View File

@@ -161,12 +161,12 @@ int gp10b_fifo_resetup_ramfc(struct channel_gk20a *c)
new_syncpt = -EINVAL;
}
}
if (new_syncpt && new_syncpt != old_syncpt) {
if ((new_syncpt != 0U) && (new_syncpt != old_syncpt)) {
/* disable channel */
gk20a_disable_channel_tsg(c->g, c);
/* preempt the channel */
WARN_ON(gk20a_fifo_preempt(c->g, c));
WARN_ON(gk20a_fifo_preempt(c->g, c) != 0);
v = pbdma_allowed_syncpoints_0_valid_f(1);
@@ -217,8 +217,9 @@ void gp10b_device_info_data_parse(struct gk20a *g, u32 table_entry,
<< top_device_info_data_pri_base_align_v());
nvgpu_log_info(g, "device info: pri_base: %d", *pri_base);
}
if (fault_id && (top_device_info_data_fault_id_v(table_entry) ==
top_device_info_data_fault_id_valid_v())) {
if ((fault_id != NULL) &&
(top_device_info_data_fault_id_v(table_entry) ==
top_device_info_data_fault_id_valid_v())) {
*fault_id =
g->ops.fifo.device_info_fault_id(table_entry);
nvgpu_log_info(g, "device info: fault_id: %d", *fault_id);

View File

@@ -117,7 +117,7 @@ static void gr_gp10b_sm_lrf_ecc_overcount_war(bool single_err,
/* If both a SBE and a DBE occur on the same partition, then we have an
overcount for the subpartition if the opposite error counts are
zero. */
if ((sed_status & ded_status) && (opposite_count == 0)) {
if (((sed_status & ded_status) != 0U) && (opposite_count == 0U)) {
over_count +=
hweight32(sed_status & ded_status);
}
@@ -203,13 +203,13 @@ int gr_gp10b_handle_sm_exception(struct gk20a *g,
shm_ecc_status = gk20a_readl(g,
gr_pri_gpc0_tpc0_sm_shm_ecc_status_r() + offset);
if ((shm_ecc_status &
gr_pri_gpc0_tpc0_sm_shm_ecc_status_single_err_corrected_shm0_pending_f()) ||
gr_pri_gpc0_tpc0_sm_shm_ecc_status_single_err_corrected_shm0_pending_f()) != 0U ||
(shm_ecc_status &
gr_pri_gpc0_tpc0_sm_shm_ecc_status_single_err_corrected_shm1_pending_f()) ||
gr_pri_gpc0_tpc0_sm_shm_ecc_status_single_err_corrected_shm1_pending_f()) != 0U ||
(shm_ecc_status &
gr_pri_gpc0_tpc0_sm_shm_ecc_status_single_err_detected_shm0_pending_f()) ||
gr_pri_gpc0_tpc0_sm_shm_ecc_status_single_err_detected_shm0_pending_f()) != 0U ||
(shm_ecc_status &
gr_pri_gpc0_tpc0_sm_shm_ecc_status_single_err_detected_shm1_pending_f()) ) {
gr_pri_gpc0_tpc0_sm_shm_ecc_status_single_err_detected_shm1_pending_f()) != 0U ) {
u32 ecc_stats_reg_val;
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr,
@@ -228,10 +228,10 @@ int gr_gp10b_handle_sm_exception(struct gk20a *g,
gr_pri_gpc0_tpc0_sm_shm_ecc_err_count_r() + offset,
ecc_stats_reg_val);
}
if ( (shm_ecc_status &
gr_pri_gpc0_tpc0_sm_shm_ecc_status_double_err_detected_shm0_pending_f()) ||
if ((shm_ecc_status &
gr_pri_gpc0_tpc0_sm_shm_ecc_status_double_err_detected_shm0_pending_f()) != 0U ||
(shm_ecc_status &
gr_pri_gpc0_tpc0_sm_shm_ecc_status_double_err_detected_shm1_pending_f()) ) {
gr_pri_gpc0_tpc0_sm_shm_ecc_status_double_err_detected_shm1_pending_f()) != 0U) {
u32 ecc_stats_reg_val;
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr,
@@ -410,7 +410,7 @@ int gr_gp10b_commit_global_cb_manager(struct gk20a *g,
nvgpu_log_fn(g, " ");
tsg = tsg_gk20a_from_ch(c);
if (!tsg) {
if (tsg == NULL) {
return -EINVAL;
}
@@ -744,7 +744,7 @@ void gr_gp10b_cb_size_default(struct gk20a *g)
{
struct gr_gk20a *gr = &g->gr;
if (!gr->attrib_cb_default_size) {
if (gr->attrib_cb_default_size == 0U) {
gr->attrib_cb_default_size = 0x800;
}
gr->alpha_cb_default_size =
@@ -900,7 +900,7 @@ int gr_gp10b_init_ctx_state(struct gk20a *g)
return err;
}
if (!g->gr.ctx_vars.preempt_image_size) {
if (g->gr.ctx_vars.preempt_image_size == 0U) {
op.method.addr =
gr_fecs_method_push_adr_discover_preemption_image_size_v();
op.mailbox.ret = &g->gr.ctx_vars.preempt_image_size;
@@ -940,7 +940,7 @@ int gr_gp10b_alloc_buffer(struct vm_gk20a *vm, size_t size,
false,
mem->aperture);
if (!mem->gpu_va) {
if (mem->gpu_va == 0ULL) {
err = -ENOMEM;
goto fail_free;
}
@@ -971,7 +971,7 @@ int gr_gp10b_set_ctxsw_preemption_mode(struct gk20a *g,
}
/* check for invalid combinations */
if ((graphics_preempt_mode == 0) && (compute_preempt_mode == 0)) {
if ((graphics_preempt_mode == 0U) && (compute_preempt_mode == 0U)) {
return -EINVAL;
}
@@ -981,13 +981,13 @@ int gr_gp10b_set_ctxsw_preemption_mode(struct gk20a *g,
}
/* Do not allow lower preemption modes than current ones */
if (graphics_preempt_mode &&
(graphics_preempt_mode < gr_ctx->graphics_preempt_mode)) {
if ((graphics_preempt_mode != 0U) &&
(graphics_preempt_mode < gr_ctx->graphics_preempt_mode)) {
return -EINVAL;
}
if (compute_preempt_mode &&
(compute_preempt_mode < gr_ctx->compute_preempt_mode)) {
if ((compute_preempt_mode != 0U) &&
(compute_preempt_mode < gr_ctx->compute_preempt_mode)) {
return -EINVAL;
}
@@ -1107,7 +1107,7 @@ int gr_gp10b_alloc_gr_ctx(struct gk20a *g,
compute_preempt_mode = NVGPU_PREEMPTION_MODE_COMPUTE_CILP;
}
if (graphics_preempt_mode || compute_preempt_mode) {
if ((graphics_preempt_mode != 0U) || (compute_preempt_mode != 0U)) {
if (g->ops.gr.set_ctxsw_preemption_mode != NULL) {
err = g->ops.gr.set_ctxsw_preemption_mode(g, gr_ctx, vm,
class, graphics_preempt_mode, compute_preempt_mode);
@@ -1195,7 +1195,7 @@ void gr_gp10b_update_ctxsw_preemption_mode(struct gk20a *g,
nvgpu_log_fn(g, " ");
tsg = tsg_gk20a_from_ch(c);
if (!tsg) {
if (tsg == NULL) {
return;
}
@@ -1346,7 +1346,7 @@ int gr_gp10b_dump_gr_status_regs(struct gk20a *g,
gk20a_readl(g, gr_pri_gpc0_gpccs_gpc_activity3_r()));
gk20a_debug_output(o, "NV_PGRAPH_PRI_GPC0_TPC0_TPCCS_TPC_ACTIVITY0: 0x%x\n",
gk20a_readl(g, gr_pri_gpc0_tpc0_tpccs_tpc_activity_0_r()));
if (gr->gpc_tpc_count && gr->gpc_tpc_count[0] == 2) {
if ((gr->gpc_tpc_count != NULL) && (gr->gpc_tpc_count[0] == 2U)) {
gk20a_debug_output(o, "NV_PGRAPH_PRI_GPC0_TPC1_TPCCS_TPC_ACTIVITY0: 0x%x\n",
gk20a_readl(g, gr_pri_gpc0_tpc1_tpccs_tpc_activity_0_r()));
}
@@ -1362,7 +1362,7 @@ int gr_gp10b_dump_gr_status_regs(struct gk20a *g,
gk20a_readl(g, gr_pri_gpcs_gpccs_gpc_activity_3_r()));
gk20a_debug_output(o, "NV_PGRAPH_PRI_GPCS_TPC0_TPCCS_TPC_ACTIVITY0: 0x%x\n",
gk20a_readl(g, gr_pri_gpcs_tpc0_tpccs_tpc_activity_0_r()));
if (gr->gpc_tpc_count && gr->gpc_tpc_count[0] == 2) {
if ((gr->gpc_tpc_count != NULL) && (gr->gpc_tpc_count[0] == 2U)) {
gk20a_debug_output(o, "NV_PGRAPH_PRI_GPCS_TPC1_TPCCS_TPC_ACTIVITY0: 0x%x\n",
gk20a_readl(g, gr_pri_gpcs_tpc1_tpccs_tpc_activity_0_r()));
}
@@ -1485,7 +1485,7 @@ int gr_gp10b_wait_empty(struct gk20a *g, unsigned long duration_ms,
nvgpu_usleep_range(delay, delay * 2);
delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX);
} while (!nvgpu_timeout_expired(&timeout));
} while (nvgpu_timeout_expired(&timeout) == 0);
nvgpu_err(g,
"timeout, ctxsw busy : %d, gr busy : %d, %08x, %08x, %08x, %08x",
@@ -1565,7 +1565,7 @@ int gr_gp10b_load_smid_config(struct gk20a *g)
u32 max_gpcs = nvgpu_get_litter_value(g, GPU_LIT_NUM_GPCS);
tpc_sm_id = nvgpu_kcalloc(g, gr_cwd_sm_id__size_1_v(), sizeof(u32));
if (!tpc_sm_id) {
if (tpc_sm_id == NULL) {
return -ENOMEM;
}
@@ -1733,7 +1733,7 @@ int gr_gp10b_set_cilp_preempt_pending(struct gk20a *g,
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, " ");
tsg = tsg_gk20a_from_ch(fault_ch);
if (!tsg) {
if (tsg == NULL) {
return -EINVAL;
}
@@ -1814,7 +1814,7 @@ static int gr_gp10b_clear_cilp_preempt_pending(struct gk20a *g,
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, " ");
tsg = tsg_gk20a_from_ch(fault_ch);
if (!tsg) {
if (tsg == NULL) {
return -EINVAL;
}
@@ -1857,7 +1857,7 @@ int gr_gp10b_pre_process_sm_exception(struct gk20a *g,
if (fault_ch != NULL) {
tsg = tsg_gk20a_from_ch(fault_ch);
if (!tsg) {
if (tsg == NULL) {
return -EINVAL;
}
@@ -1959,12 +1959,12 @@ static int gr_gp10b_get_cilp_preempt_pending_chid(struct gk20a *g, int *__chid)
chid = g->gr.cilp_preempt_pending_chid;
ch = gk20a_channel_get(gk20a_fifo_channel_from_chid(g, chid));
if (!ch) {
if (ch == NULL) {
return ret;
}
tsg = tsg_gk20a_from_ch(ch);
if (!tsg) {
if (tsg == NULL) {
return -EINVAL;
}
@@ -2011,7 +2011,7 @@ int gr_gp10b_handle_fecs_error(struct gk20a *g,
ch = gk20a_channel_get(
gk20a_fifo_channel_from_chid(g, chid));
if (!ch) {
if (ch == NULL) {
goto clean_up;
}
@@ -2047,7 +2047,7 @@ u32 gp10b_gr_get_sm_hww_warp_esr(struct gk20a *g,
u32 hww_warp_esr = gk20a_readl(g,
gr_gpc0_tpc0_sm_hww_warp_esr_r() + offset);
if (!(hww_warp_esr & gr_gpc0_tpc0_sm_hww_warp_esr_addr_valid_m())) {
if ((hww_warp_esr & gr_gpc0_tpc0_sm_hww_warp_esr_addr_valid_m()) == 0U) {
hww_warp_esr = set_field(hww_warp_esr,
gr_gpc0_tpc0_sm_hww_warp_esr_addr_error_type_m(),
gr_gpc0_tpc0_sm_hww_warp_esr_addr_error_type_none_f());
@@ -2080,7 +2080,7 @@ bool gr_gp10b_suspend_context(struct channel_gk20a *ch,
int err = 0;
tsg = tsg_gk20a_from_ch(ch);
if (!tsg) {
if (tsg == NULL) {
return -EINVAL;
}
@@ -2168,7 +2168,7 @@ int gr_gp10b_suspend_contexts(struct gk20a *g,
gk20a_get_gr_idle_timeout(g));
tsg = tsg_gk20a_from_ch(cilp_preempt_pending_ch);
if (!tsg) {
if (tsg == NULL) {
err = -EINVAL;
goto clean_up;
}
@@ -2184,7 +2184,7 @@ int gr_gp10b_suspend_contexts(struct gk20a *g,
nvgpu_usleep_range(delay, delay * 2);
delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX);
} while (!nvgpu_timeout_expired(&timeout));
} while (nvgpu_timeout_expired(&timeout) == 0);
/* If cilp is still pending at this point, timeout */
if (gr_ctx->cilp_preempt_pending) {
@@ -2208,7 +2208,7 @@ int gr_gp10b_set_boosted_ctx(struct channel_gk20a *ch,
int err = 0;
tsg = tsg_gk20a_from_ch(ch);
if (!tsg) {
if (tsg == NULL) {
return -EINVAL;
}
@@ -2260,12 +2260,12 @@ int gr_gp10b_set_preemption_mode(struct channel_gk20a *ch,
int err = 0;
class = ch->obj_class;
if (!class) {
if (class == 0U) {
return -EINVAL;
}
tsg = tsg_gk20a_from_ch(ch);
if (!tsg) {
if (tsg == NULL) {
return -EINVAL;
}
@@ -2274,17 +2274,17 @@ int gr_gp10b_set_preemption_mode(struct channel_gk20a *ch,
mem = &gr_ctx->mem;
/* skip setting anything if both modes are already set */
if (graphics_preempt_mode &&
(graphics_preempt_mode == gr_ctx->graphics_preempt_mode)) {
if ((graphics_preempt_mode != 0U) &&
(graphics_preempt_mode == gr_ctx->graphics_preempt_mode)) {
graphics_preempt_mode = 0;
}
if (compute_preempt_mode &&
(compute_preempt_mode == gr_ctx->compute_preempt_mode)) {
if ((compute_preempt_mode != 0U) &&
(compute_preempt_mode == gr_ctx->compute_preempt_mode)) {
compute_preempt_mode = 0;
}
if (graphics_preempt_mode == 0 && compute_preempt_mode == 0) {
if ((graphics_preempt_mode == 0U) && (compute_preempt_mode == 0U)) {
return 0;
}

View File

@@ -59,7 +59,7 @@ int gp10b_init_bar2_vm(struct gk20a *g)
mm->bar2.vm = nvgpu_vm_init(g, big_page_size, SZ_4K,
mm->bar2.aperture_size - SZ_4K,
mm->bar2.aperture_size, false, false, "bar2");
if (!mm->bar2.vm) {
if (mm->bar2.vm == NULL) {
return -ENOMEM;
}
@@ -292,7 +292,7 @@ static u32 gp10b_get_pde0_pgsz(struct gk20a *g, const struct gk20a_mmu_level *l,
u32 i;
u32 pgsz = GMMU_NR_PAGE_SIZES;
if (!pd->mem) {
if (pd->mem == NULL) {
return pgsz;
}

View File

@@ -70,7 +70,7 @@ static int gv100_pmu_handle_perf_event(struct gk20a *g, void *pmumsg)
nvgpu_cond_signal(&perf_pmu->vfe_init.wq);
break;
default:
WARN_ON(1);
WARN_ON(true);
break;
}
return 0;

View File

@@ -352,10 +352,11 @@ void gv11b_dump_channel_status_ramfc(struct gk20a *g,
info->refs,
info->deterministic ? ", deterministic" : "");
gk20a_debug_output(o, "channel status: %s in use %s %s\n",
ccsr_channel_enable_v(info->channel_reg) ? "" : "not",
(ccsr_channel_enable_v(info->channel_reg) ==
ccsr_channel_enable_in_use_v()) ? "" : "not",
gk20a_decode_ccsr_chan_status(status),
ccsr_channel_busy_v(info->channel_reg) ?
"busy" : "not busy");
(ccsr_channel_busy_v(info->channel_reg) ==
ccsr_channel_busy_true_v()) ? "busy" : "not busy");
gk20a_debug_output(o,
"RAMFC : TOP: %016llx PUT: %016llx GET: %016llx "
"FETCH: %016llx\n"
@@ -398,11 +399,11 @@ void gv11b_dump_eng_status(struct gk20a *g,
gk20a_debug_output(o,
"id: %d (%s), next_id: %d (%s), ctx status: %s ",
fifo_engine_status_id_v(status),
fifo_engine_status_id_type_v(status) ?
"tsg" : "channel",
(fifo_engine_status_id_type_v(status) ==
fifo_engine_status_id_type_tsgid_v()) ? "tsg" : "channel",
fifo_engine_status_next_id_v(status),
fifo_engine_status_next_id_type_v(status) ?
"tsg" : "channel",
(fifo_engine_status_next_id_type_v(status) ==
fifo_engine_status_next_id_type_tsgid_v()) ? "tsg" : "channel",
gk20a_decode_pbdma_chan_eng_ctx_status(ctx_status));
if (fifo_engine_status_eng_reload_v(status) != 0U) {
@@ -518,7 +519,7 @@ static int gv11b_fifo_poll_pbdma_chan_status(struct gk20a *g, u32 id,
nvgpu_usleep_range(delay, delay * 2);
delay = min_t(unsigned long,
delay << 1, GR_IDLE_CHECK_MAX);
} while (!nvgpu_timeout_expired(&timeout));
} while (nvgpu_timeout_expired(&timeout) == 0);
if (ret != 0) {
nvgpu_err(g, "preempt timeout pbdma: %u pbdma_stat: %u "
@@ -596,7 +597,7 @@ static int gv11b_fifo_poll_eng_ctx_status(struct gk20a *g, u32 id,
/* Eng save hasn't started yet. Continue polling */
if (eng_intr_pending != 0U) {
/* if eng intr, stop polling */
*reset_eng_bitmask |= BIT(act_eng_id);
*reset_eng_bitmask |= BIT32(act_eng_id);
ret = 0;
break;
}
@@ -609,7 +610,7 @@ static int gv11b_fifo_poll_eng_ctx_status(struct gk20a *g, u32 id,
if (id == fifo_engine_status_id_v(eng_stat)) {
if (eng_intr_pending != 0U) {
/* preemption will not finish */
*reset_eng_bitmask |= BIT(act_eng_id);
*reset_eng_bitmask |= BIT32(act_eng_id);
ret = 0;
break;
}
@@ -625,7 +626,7 @@ static int gv11b_fifo_poll_eng_ctx_status(struct gk20a *g, u32 id,
if (id == fifo_engine_status_next_id_v(eng_stat)) {
if (eng_intr_pending != 0U) {
/* preemption will not finish */
*reset_eng_bitmask |= BIT(act_eng_id);
*reset_eng_bitmask |= BIT32(act_eng_id);
ret = 0;
break;
}
@@ -643,7 +644,7 @@ static int gv11b_fifo_poll_eng_ctx_status(struct gk20a *g, u32 id,
nvgpu_usleep_range(delay, delay * 2);
delay = min_t(unsigned long,
delay << 1, GR_IDLE_CHECK_MAX);
} while (!nvgpu_timeout_expired(&timeout));
} while (nvgpu_timeout_expired(&timeout) == 0);
if (ret != 0) {
/*
@@ -655,7 +656,7 @@ static int gv11b_fifo_poll_eng_ctx_status(struct gk20a *g, u32 id,
*/
nvgpu_err(g, "preempt timeout eng: %u ctx_stat: %u tsgid: %u",
act_eng_id, ctx_stat, id);
*reset_eng_bitmask |= BIT(act_eng_id);
*reset_eng_bitmask |= BIT32(act_eng_id);
}
return ret;
@@ -707,7 +708,7 @@ void gv11b_fifo_reset_pbdma_and_eng_faulted(struct gk20a *g,
struct tsg_gk20a *tsg,
u32 faulted_pbdma, u32 faulted_engine)
{
if (!tsg) {
if (tsg == NULL) {
return;
}
@@ -741,9 +742,9 @@ static u32 gv11b_fifo_get_runlists_mask(struct gk20a *g, u32 act_eng_bitmask,
}
}
if (rc_type == RC_TYPE_MMU_FAULT && mmfault) {
if ((rc_type == RC_TYPE_MMU_FAULT) && (mmfault != NULL)) {
if (mmfault->faulted_pbdma != FIFO_INVAL_PBDMA_ID) {
pbdma_bitmask = BIT(mmfault->faulted_pbdma);
pbdma_bitmask = BIT32(mmfault->faulted_pbdma);
}
for (rlid = 0; rlid < f->max_runlists; rlid++) {
@@ -902,7 +903,7 @@ int gv11b_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
ret = __locked_fifo_preempt(g, tsgid, true);
if (!mutex_ret) {
if (mutex_ret == 0U) {
nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
}
@@ -976,7 +977,7 @@ static void gv11b_fifo_locked_abort_runlist_active_tsgs(struct gk20a *g,
for (rlid = 0; rlid < g->fifo.max_runlists;
rlid++) {
if (!(runlists_mask & BIT(rlid))) {
if ((runlists_mask & BIT32(rlid)) == 0U) {
continue;
}
nvgpu_log(g, gpu_dbg_info, "abort runlist id %d",
@@ -1017,7 +1018,7 @@ static void gv11b_fifo_locked_abort_runlist_active_tsgs(struct gk20a *g,
nvgpu_log(g, gpu_dbg_info, "aborted tsg id %lu", tsgid);
}
}
if (!mutex_ret) {
if (mutex_ret == 0U) {
nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
}
}
@@ -1172,15 +1173,15 @@ void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask,
for (rlid = 0; rlid < g->fifo.max_runlists; rlid++) {
runlist = &g->fifo.runlist_info[rlid];
if ((runlists_mask & BIT(rlid)) &&
runlist->reset_eng_bitmask) {
if (((runlists_mask & BIT32(rlid)) != 0U) &&
(runlist->reset_eng_bitmask != 0U)) {
unsigned long __reset_eng_bitmask =
runlist->reset_eng_bitmask;
for_each_set_bit(engine_id, &__reset_eng_bitmask,
g->fifo.max_engines) {
if (tsg &&
if ((tsg != NULL) &&
gk20a_fifo_should_defer_engine_reset(g,
engine_id, client_type, false)) {
@@ -1581,7 +1582,7 @@ bool gv11b_fifo_handle_ctxsw_timeout(struct gk20a *g, u32 fifo_intr)
const char *info_status_str;
if (!(fifo_intr & fifo_intr_0_ctxsw_timeout_pending_f())) {
if ((fifo_intr & fifo_intr_0_ctxsw_timeout_pending_f()) == 0U) {
return ret;
}
@@ -1633,8 +1634,8 @@ bool gv11b_fifo_handle_ctxsw_timeout(struct gk20a *g, u32 fifo_intr)
/* Cancel all channels' timeout */
gk20a_channel_timeout_restart_all_channels(g);
gk20a_fifo_recover(g, BIT(active_eng_id), tsgid,
true, true, verbose,
gk20a_fifo_recover(g, BIT32(active_eng_id),
tsgid, true, true, verbose,
RC_TYPE_CTXSW_TIMEOUT);
} else {
nvgpu_log_info(g,
@@ -1705,8 +1706,8 @@ unsigned int gv11b_fifo_handle_pbdma_intr_1(struct gk20a *g,
u32 pbdma_intr_1_current = gk20a_readl(g, pbdma_intr_1_r(pbdma_id));
/* minimize race with the gpu clearing the pending interrupt */
if (!(pbdma_intr_1_current &
pbdma_intr_1_ctxnotvalid_pending_f())) {
if ((pbdma_intr_1_current &
pbdma_intr_1_ctxnotvalid_pending_f()) == 0U) {
pbdma_intr_1 &= ~pbdma_intr_1_ctxnotvalid_pending_f();
}

View File

@@ -790,12 +790,12 @@ static int gr_gv11b_handle_gpcmmu_ecc_exception(struct gk20a *g, u32 gpc,
u32 ecc_status, ecc_addr, corrected_cnt, uncorrected_cnt;
u32 corrected_delta, uncorrected_delta;
u32 corrected_overflow, uncorrected_overflow;
int hww_esr;
u32 hww_esr;
hww_esr = gk20a_readl(g, gr_gpc0_mmu_gpcmmu_global_esr_r() + offset);
if (!(hww_esr & (gr_gpc0_mmu_gpcmmu_global_esr_ecc_corrected_m() |
gr_gpc0_mmu_gpcmmu_global_esr_ecc_uncorrected_m()))) {
if ((hww_esr & (gr_gpc0_mmu_gpcmmu_global_esr_ecc_corrected_m() |
gr_gpc0_mmu_gpcmmu_global_esr_ecc_uncorrected_m())) == 0U) {
return ret;
}
@@ -820,12 +820,12 @@ static int gr_gv11b_handle_gpcmmu_ecc_exception(struct gk20a *g, u32 gpc,
/* clear the interrupt */
if ((corrected_delta > 0) || corrected_overflow) {
if ((corrected_delta > 0U) || (corrected_overflow != 0U)) {
gk20a_writel(g,
gr_gpc0_mmu_l1tlb_ecc_corrected_err_count_r() +
offset, 0);
}
if ((uncorrected_delta > 0) || uncorrected_overflow) {
if ((uncorrected_delta > 0U) || (uncorrected_overflow != 0U)) {
gk20a_writel(g,
gr_gpc0_mmu_l1tlb_ecc_uncorrected_err_count_r() +
offset, 0);
@@ -866,7 +866,7 @@ static int gr_gv11b_handle_gpcmmu_ecc_exception(struct gk20a *g, u32 gpc,
gr_gpc0_mmu_l1tlb_ecc_status_uncorrected_err_l1tlb_fa_data_m()) != 0U) {
nvgpu_log(g, gpu_dbg_intr, "uncorrected ecc fa data error");
}
if (corrected_overflow || uncorrected_overflow) {
if ((corrected_overflow != 0U) || (uncorrected_overflow != 0U)) {
nvgpu_info(g, "mmu l1tlb ecc counter overflow!");
}
@@ -889,12 +889,12 @@ static int gr_gv11b_handle_gpccs_ecc_exception(struct gk20a *g, u32 gpc,
u32 ecc_status, ecc_addr, corrected_cnt, uncorrected_cnt;
u32 corrected_delta, uncorrected_delta;
u32 corrected_overflow, uncorrected_overflow;
int hww_esr;
u32 hww_esr;
hww_esr = gk20a_readl(g, gr_gpc0_gpccs_hww_esr_r() + offset);
if (!(hww_esr & (gr_gpc0_gpccs_hww_esr_ecc_uncorrected_m() |
gr_gpc0_gpccs_hww_esr_ecc_corrected_m()))) {
if ((hww_esr & (gr_gpc0_gpccs_hww_esr_ecc_uncorrected_m() |
gr_gpc0_gpccs_hww_esr_ecc_corrected_m())) == 0U) {
return ret;
}
@@ -919,12 +919,12 @@ static int gr_gv11b_handle_gpccs_ecc_exception(struct gk20a *g, u32 gpc,
/* clear the interrupt */
if ((corrected_delta > 0) || corrected_overflow) {
if ((corrected_delta > 0U) || (corrected_overflow != 0U)) {
gk20a_writel(g,
gr_gpc0_gpccs_falcon_ecc_corrected_err_count_r() +
offset, 0);
}
if ((uncorrected_delta > 0) || uncorrected_overflow) {
if ((uncorrected_delta > 0U) || (uncorrected_overflow != 0U)) {
gk20a_writel(g,
gr_gpc0_gpccs_falcon_ecc_uncorrected_err_count_r() +
offset, 0);
@@ -956,7 +956,7 @@ static int gr_gv11b_handle_gpccs_ecc_exception(struct gk20a *g, u32 gpc,
gr_gpc0_gpccs_falcon_ecc_status_uncorrected_err_dmem_m()) != 0U) {
nvgpu_log(g, gpu_dbg_intr, "dmem ecc error uncorrected");
}
if (corrected_overflow || uncorrected_overflow) {
if ((corrected_overflow != 0U) || (uncorrected_overflow != 0U)) {
nvgpu_info(g, "gpccs ecc counter overflow!");
}
@@ -1050,9 +1050,9 @@ bool gr_gv11b_add_zbc_type_s(struct gk20a *g, struct gr_gk20a *gr,
s_tbl = &gr->zbc_s_tbl[i];
if (s_tbl->ref_cnt &&
s_tbl->stencil == zbc_val->depth &&
s_tbl->format == zbc_val->format) {
if ((s_tbl->ref_cnt != 0U) &&
(s_tbl->stencil == zbc_val->depth) &&
(s_tbl->format == zbc_val->format)) {
added = true;
s_tbl->ref_cnt++;
*ret_val = 0;
@@ -1069,7 +1069,7 @@ bool gr_gv11b_add_zbc_type_s(struct gk20a *g, struct gr_gk20a *gr,
*ret_val = g->ops.gr.add_zbc_s(g, gr,
zbc_val, gr->max_used_s_index);
if (!(*ret_val)) {
if ((*ret_val) == 0) {
gr->max_used_s_index++;
}
}
@@ -1217,18 +1217,18 @@ void gr_gv11b_set_tex_in_dbg(struct gk20a *g, u32 data)
nvgpu_log_fn(g, " ");
val = gk20a_readl(g, gr_gpcs_tpcs_tex_in_dbg_r());
flag = (data & NVC397_SET_TEX_IN_DBG_TSL1_RVCH_INVALIDATE) ? 1 : 0;
flag = (data & NVC397_SET_TEX_IN_DBG_TSL1_RVCH_INVALIDATE) != 0U;
val = set_field(val, gr_gpcs_tpcs_tex_in_dbg_tsl1_rvch_invalidate_m(),
gr_gpcs_tpcs_tex_in_dbg_tsl1_rvch_invalidate_f(flag));
gk20a_writel(g, gr_gpcs_tpcs_tex_in_dbg_r(), val);
val = gk20a_readl(g, gr_gpcs_tpcs_sm_l1tag_ctrl_r());
flag = (data &
NVC397_SET_TEX_IN_DBG_SM_L1TAG_CTRL_CACHE_SURFACE_LD) ? 1 : 0;
NVC397_SET_TEX_IN_DBG_SM_L1TAG_CTRL_CACHE_SURFACE_LD) != 0U;
val = set_field(val, gr_gpcs_tpcs_sm_l1tag_ctrl_cache_surface_ld_m(),
gr_gpcs_tpcs_sm_l1tag_ctrl_cache_surface_ld_f(flag));
flag = (data &
NVC397_SET_TEX_IN_DBG_SM_L1TAG_CTRL_CACHE_SURFACE_ST) ? 1 : 0;
NVC397_SET_TEX_IN_DBG_SM_L1TAG_CTRL_CACHE_SURFACE_ST) != 0U;
val = set_field(val, gr_gpcs_tpcs_sm_l1tag_ctrl_cache_surface_st_m(),
gr_gpcs_tpcs_sm_l1tag_ctrl_cache_surface_st_f(flag));
gk20a_writel(g, gr_gpcs_tpcs_sm_l1tag_ctrl_r(), val);
@@ -1371,7 +1371,7 @@ void gr_gv11b_cb_size_default(struct gk20a *g)
{
struct gr_gk20a *gr = &g->gr;
if (!gr->attrib_cb_default_size) {
if (gr->attrib_cb_default_size == 0U) {
gr->attrib_cb_default_size =
gr_gpc0_ppc0_cbm_beta_cb_size_v_default_v();
}
@@ -1517,7 +1517,7 @@ int gr_gv11b_alloc_buffer(struct vm_gk20a *vm, size_t size,
false,
mem->aperture);
if (!mem->gpu_va) {
if (mem->gpu_va == 0ULL) {
err = -ENOMEM;
goto fail_free;
}
@@ -1558,13 +1558,13 @@ int gr_gv11b_set_ctxsw_preemption_mode(struct gk20a *g,
}
/* Do not allow lower preemption modes than current ones */
if (graphics_preempt_mode &&
(graphics_preempt_mode < gr_ctx->graphics_preempt_mode)) {
if ((graphics_preempt_mode != 0U) &&
(graphics_preempt_mode < gr_ctx->graphics_preempt_mode)) {
return -EINVAL;
}
if (compute_preempt_mode &&
(compute_preempt_mode < gr_ctx->compute_preempt_mode)) {
if ((compute_preempt_mode != 0U) &&
(compute_preempt_mode < gr_ctx->compute_preempt_mode)) {
return -EINVAL;
}
@@ -1677,7 +1677,7 @@ void gr_gv11b_update_ctxsw_preemption_mode(struct gk20a *g,
nvgpu_log_fn(g, " ");
tsg = tsg_gk20a_from_ch(c);
if (!tsg) {
if (tsg == NULL) {
return;
}
@@ -1920,7 +1920,7 @@ int gr_gv11b_dump_gr_status_regs(struct gk20a *g,
gk20a_readl(g, gr_pri_gpc0_gpccs_gpc_activity3_r()));
gk20a_debug_output(o, "NV_PGRAPH_PRI_GPC0_TPC0_TPCCS_TPC_ACTIVITY0: 0x%x\n",
gk20a_readl(g, gr_pri_gpc0_tpc0_tpccs_tpc_activity_0_r()));
if (gr->gpc_tpc_count && gr->gpc_tpc_count[0] == 2) {
if ((gr->gpc_tpc_count != NULL) && (gr->gpc_tpc_count[0] == 2U)) {
gk20a_debug_output(o, "NV_PGRAPH_PRI_GPC0_TPC1_TPCCS_TPC_ACTIVITY0: 0x%x\n",
gk20a_readl(g, gr_pri_gpc0_tpc1_tpccs_tpc_activity_0_r()));
}
@@ -1936,7 +1936,7 @@ int gr_gv11b_dump_gr_status_regs(struct gk20a *g,
gk20a_readl(g, gr_pri_gpcs_gpccs_gpc_activity_3_r()));
gk20a_debug_output(o, "NV_PGRAPH_PRI_GPCS_TPC0_TPCCS_TPC_ACTIVITY0: 0x%x\n",
gk20a_readl(g, gr_pri_gpcs_tpc0_tpccs_tpc_activity_0_r()));
if (gr->gpc_tpc_count && gr->gpc_tpc_count[0] == 2) {
if ((gr->gpc_tpc_count != NULL) && (gr->gpc_tpc_count[0] == 2U)) {
gk20a_debug_output(o, "NV_PGRAPH_PRI_GPCS_TPC1_TPCCS_TPC_ACTIVITY0: 0x%x\n",
gk20a_readl(g, gr_pri_gpcs_tpc1_tpccs_tpc_activity_0_r()));
}
@@ -2067,7 +2067,7 @@ int gr_gv11b_wait_empty(struct gk20a *g, unsigned long duration_ms,
nvgpu_usleep_range(delay, delay * 2);
delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX);
} while (!nvgpu_timeout_expired(&timeout));
} while (nvgpu_timeout_expired(&timeout) == 0);
nvgpu_err(g,
"timeout, ctxsw busy : %d, gr busy : %d, %08x, %08x, %08x, %08x",
@@ -2108,7 +2108,7 @@ void gr_gv11b_set_gpc_tpc_mask(struct gk20a *g, u32 gpc_index)
{
u32 fuse_val;
if (!g->gr.gpc_tpc_mask[gpc_index]) {
if (g->gr.gpc_tpc_mask[gpc_index] == 0U) {
return;
}
@@ -2379,7 +2379,7 @@ int gr_gv11b_pre_process_sm_exception(struct gk20a *g,
if (fault_ch != NULL) {
tsg = tsg_gk20a_from_ch(fault_ch);
if (!tsg) {
if (tsg == NULL) {
return -EINVAL;
}
@@ -2508,11 +2508,11 @@ static void gr_gv11b_handle_fecs_ecc_error(struct gk20a *g, u32 intr)
gr_fecs_falcon_ecc_status_uncorrected_err_total_counter_overflow_m();
/* clear the interrupt */
if ((corrected_delta > 0) || corrected_overflow) {
if ((corrected_delta > 0U) || (corrected_overflow != 0U)) {
gk20a_writel(g,
gr_fecs_falcon_ecc_corrected_err_count_r(), 0);
}
if ((uncorrected_delta > 0) || uncorrected_overflow) {
if ((uncorrected_delta > 0U) || (uncorrected_overflow != 0U)) {
gk20a_writel(g,
gr_fecs_falcon_ecc_uncorrected_err_count_r(),
0);
@@ -2554,7 +2554,7 @@ static void gr_gv11b_handle_fecs_ecc_error(struct gk20a *g, u32 intr)
nvgpu_log(g, gpu_dbg_intr,
"dmem ecc error uncorrected");
}
if (corrected_overflow || uncorrected_overflow) {
if ((corrected_overflow != 0U) || (uncorrected_overflow != 0U)) {
nvgpu_info(g, "fecs ecc counter overflow!");
}
@@ -2598,7 +2598,7 @@ int gr_gv11b_setup_rop_mapping(struct gk20a *g, struct gr_gk20a *gr)
nvgpu_log_fn(g, " ");
if (!gr->map_tiles) {
if (gr->map_tiles == NULL) {
return -1;
}
@@ -2790,12 +2790,12 @@ u32 gr_gv11b_get_nonpes_aware_tpc(struct gk20a *g, u32 gpc, u32 tpc)
struct gr_gk20a *gr = &g->gr;
for (pes = 0; pes < gr->gpc_ppc_count[gpc]; pes++) {
if (gr->pes_tpc_mask[pes][gpc] & BIT(tpc)) {
if ((gr->pes_tpc_mask[pes][gpc] & BIT32(tpc)) != 0U) {
break;
}
tpc_new += gr->pes_tpc_count[pes][gpc];
}
temp = (BIT(tpc) - 1) & gr->pes_tpc_mask[pes][gpc];
temp = (BIT32(tpc) - 1U) & gr->pes_tpc_mask[pes][gpc];
temp = hweight32(temp);
tpc_new += temp;
@@ -2833,7 +2833,7 @@ int gr_gv11b_load_smid_config(struct gk20a *g)
int num_gpcs = nvgpu_get_litter_value(g, GPU_LIT_NUM_GPCS);
tpc_sm_id = nvgpu_kcalloc(g, gr_cwd_sm_id__size_1_v(), sizeof(u32));
if (!tpc_sm_id) {
if (tpc_sm_id == NULL) {
return -ENOMEM;
}
@@ -2999,9 +2999,9 @@ void gr_gv11b_load_tpc_mask(struct gk20a *g)
nvgpu_log_info(g, "pes_tpc_mask %u\n", pes_tpc_mask);
fuse_tpc_mask = g->ops.gr.get_gpc_tpc_mask(g, gpc);
if (g->tpc_fs_mask_user &&
g->tpc_fs_mask_user != fuse_tpc_mask &&
fuse_tpc_mask == BIT32(g->gr.max_tpc_count) - U32(1)) {
if ((g->tpc_fs_mask_user != 0U) &&
(g->tpc_fs_mask_user != fuse_tpc_mask) &&
(fuse_tpc_mask == BIT32(g->gr.max_tpc_count) - U32(1))) {
val = g->tpc_fs_mask_user;
val &= BIT32(g->gr.max_tpc_count) - U32(1);
val = BIT32(hweight32(val)) - U32(1);
@@ -3228,14 +3228,14 @@ int gv11b_gr_set_sm_debug_mode(struct gk20a *g,
int err;
ops = nvgpu_kcalloc(g, g->gr.no_of_sm, sizeof(*ops));
if (!ops) {
if (ops == NULL) {
return -ENOMEM;
}
for (sm_id = 0; sm_id < g->gr.no_of_sm; sm_id++) {
u32 gpc, tpc, sm;
u32 reg_offset, reg_mask, reg_val;
if (!(sms & BIT64(sm_id))) {
if ((sms & BIT64(sm_id)) == 0ULL) {
continue;
}
@@ -3800,7 +3800,7 @@ int gv11b_gr_wait_for_sm_lock_down(struct gk20a *g,
nvgpu_usleep_range(delay, delay * 2);
delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX);
} while (!nvgpu_timeout_expired(&timeout));
} while (nvgpu_timeout_expired(&timeout) == 0);
nvgpu_err(g, "GPC%d TPC%d: timed out while trying to "
"lock down SM%d", gpc, tpc, sm);
@@ -3859,7 +3859,7 @@ int gr_gv11b_handle_tpc_mpc_exception(struct gk20a *g,
u32 tpc_exception = gk20a_readl(g, gr_gpc0_tpc0_tpccs_tpc_exception_r()
+ offset);
if (!(tpc_exception & gr_gpc0_tpc0_tpccs_tpc_exception_mpc_m())) {
if ((tpc_exception & gr_gpc0_tpc0_tpccs_tpc_exception_mpc_m()) == 0U) {
return 0;
}
@@ -4766,7 +4766,8 @@ int gr_gv11b_decode_priv_addr(struct gk20a *g, u32 addr,
return 0;
}
return 0;
} else if (g->ops.gr.is_egpc_addr && g->ops.gr.is_egpc_addr(g, addr)) {
} else if ((g->ops.gr.is_egpc_addr != NULL) &&
g->ops.gr.is_egpc_addr(g, addr)) {
return g->ops.gr.decode_egpc_addr(g,
addr, addr_type, gpc_num,
tpc_num, broadcast_flags);
@@ -4968,8 +4969,8 @@ int gr_gv11b_create_priv_addr_table(struct gk20a *g,
}
}
} else if (((addr_type == CTXSW_ADDR_TYPE_EGPC) ||
(addr_type == CTXSW_ADDR_TYPE_ETPC)) &&
g->ops.gr.egpc_etpc_priv_addr_table) {
(addr_type == CTXSW_ADDR_TYPE_ETPC)) &&
(g->ops.gr.egpc_etpc_priv_addr_table != NULL)) {
nvgpu_log(g, gpu_dbg_gpu_dbg, "addr_type : EGPC/ETPC");
g->ops.gr.egpc_etpc_priv_addr_table(g, addr, gpc_num, tpc_num,
broadcast_flags, priv_addr_table, &t);
@@ -4984,21 +4985,21 @@ int gr_gv11b_create_priv_addr_table(struct gk20a *g,
nvgpu_get_litter_value(g, GPU_LIT_NUM_FBPAS),
priv_addr_table, &t);
} else if ((addr_type == CTXSW_ADDR_TYPE_LTCS) &&
(broadcast_flags & PRI_BROADCAST_FLAGS_PMM_FBPGS_LTC)) {
((broadcast_flags & PRI_BROADCAST_FLAGS_PMM_FBPGS_LTC) != 0U)) {
gr_gv11b_split_pmm_fbp_broadcast_address(g,
PRI_PMMGS_OFFSET_MASK(addr),
priv_addr_table, &t,
nvgpu_get_litter_value(g, GPU_LIT_PERFMON_PMMFBP_LTC_DOMAIN_START),
nvgpu_get_litter_value(g, GPU_LIT_PERFMON_PMMFBP_LTC_DOMAIN_COUNT));
} else if ((addr_type == CTXSW_ADDR_TYPE_ROP) &&
(broadcast_flags & PRI_BROADCAST_FLAGS_PMM_FBPGS_ROP)) {
((broadcast_flags & PRI_BROADCAST_FLAGS_PMM_FBPGS_ROP) != 0U)) {
gr_gv11b_split_pmm_fbp_broadcast_address(g,
PRI_PMMGS_OFFSET_MASK(addr),
priv_addr_table, &t,
nvgpu_get_litter_value(g, GPU_LIT_PERFMON_PMMFBP_ROP_DOMAIN_START),
nvgpu_get_litter_value(g, GPU_LIT_PERFMON_PMMFBP_ROP_DOMAIN_COUNT));
} else if ((addr_type == CTXSW_ADDR_TYPE_FBP) &&
(broadcast_flags & PRI_BROADCAST_FLAGS_PMM_FBPS)) {
((broadcast_flags & PRI_BROADCAST_FLAGS_PMM_FBPS) != 0U)) {
u32 domain_start;
domain_start = (addr -

View File

@@ -65,9 +65,9 @@ enum {
#define NVC397_SET_BES_CROP_DEBUG4 0x10b0
#define NVC397_SET_SHADER_CUT_COLLECTOR 0x10c8
#define NVC397_SET_TEX_IN_DBG_TSL1_RVCH_INVALIDATE 0x1
#define NVC397_SET_TEX_IN_DBG_SM_L1TAG_CTRL_CACHE_SURFACE_LD 0x2
#define NVC397_SET_TEX_IN_DBG_SM_L1TAG_CTRL_CACHE_SURFACE_ST 0x4
#define NVC397_SET_TEX_IN_DBG_TSL1_RVCH_INVALIDATE BIT32(0)
#define NVC397_SET_TEX_IN_DBG_SM_L1TAG_CTRL_CACHE_SURFACE_LD BIT32(1)
#define NVC397_SET_TEX_IN_DBG_SM_L1TAG_CTRL_CACHE_SURFACE_ST BIT32(2)
#define NVC397_SET_SKEDCHECK_18_MASK 0x3
#define NVC397_SET_SKEDCHECK_18_DEFAULT 0x0

View File

@@ -55,7 +55,7 @@ void gv11b_init_inst_block(struct nvgpu_mem *inst_block,
g->ops.mm.init_pdb(g, inst_block, vm);
if (big_page_size && g->ops.mm.set_big_page_size) {
if ((big_page_size != 0U) && (g->ops.mm.set_big_page_size != NULL)) {
g->ops.mm.set_big_page_size(g, inst_block, big_page_size);
}
@@ -225,7 +225,7 @@ void gv11b_mm_l2_flush(struct gk20a *g, bool invalidate)
u64 gv11b_gpu_phys_addr(struct gk20a *g,
struct nvgpu_gmmu_attrs *attrs, u64 phys)
{
if (attrs && attrs->l3_alloc) {
if ((attrs != NULL) && attrs->l3_alloc) {
return phys | NVGPU_L3_ALLOC_BIT;
}

View File

@@ -77,7 +77,7 @@ int gv11b_alloc_subctx_header(struct channel_gk20a *c)
0, /* not GPU-cacheable */
gk20a_mem_flag_none, true,
ctxheader->aperture);
if (!ctxheader->gpu_va) {
if (ctxheader->gpu_va == 0ULL) {
nvgpu_err(g, "failed to map ctx header");
nvgpu_dma_free(g, ctxheader);
return -ENOMEM;
@@ -105,7 +105,7 @@ int gv11b_update_subctx_header(struct channel_gk20a *c, u64 gpu_va)
struct nvgpu_gr_ctx *gr_ctx;
tsg = tsg_gk20a_from_ch(c);
if (!tsg) {
if (tsg == NULL) {
return -EINVAL;
}

View File

@@ -352,7 +352,7 @@ channel_gk20a_from_worker_item(struct nvgpu_list_node *node)
static inline bool gk20a_channel_as_bound(struct channel_gk20a *ch)
{
return !!ch->vm;
return (ch->vm != NULL);
}
int channel_gk20a_commit_va(struct channel_gk20a *c);
int gk20a_init_channel_support(struct gk20a *g, u32 chid);

View File

@@ -56,6 +56,8 @@
#ifndef NVGPU_HW_BUS_GM20B_H
#define NVGPU_HW_BUS_GM20B_H
#include <nvgpu/types.h>
static inline u32 bus_bar0_window_r(void)
{
return 0x00001700U;
@@ -168,10 +170,18 @@ static inline u32 bus_bind_status_bar2_pending_v(u32 r)
{
return (r >> 2U) & 0x1U;
}
static inline u32 bus_bind_status_bar2_pending_empty_v(void)
{
return 0x00000000U;
}
static inline u32 bus_bind_status_bar2_pending_empty_f(void)
{
return 0x0U;
}
static inline u32 bus_bind_status_bar2_pending_busy_v(void)
{
return 0x00000001U;
}
static inline u32 bus_bind_status_bar2_pending_busy_f(void)
{
return 0x4U;
@@ -180,10 +190,18 @@ static inline u32 bus_bind_status_bar2_outstanding_v(u32 r)
{
return (r >> 3U) & 0x1U;
}
static inline u32 bus_bind_status_bar2_outstanding_false_v(void)
{
return 0x00000000U;
}
static inline u32 bus_bind_status_bar2_outstanding_false_f(void)
{
return 0x0U;
}
static inline u32 bus_bind_status_bar2_outstanding_true_v(void)
{
return 0x00000001U;
}
static inline u32 bus_bind_status_bar2_outstanding_true_f(void)
{
return 0x8U;
@@ -194,15 +212,15 @@ static inline u32 bus_intr_0_r(void)
}
static inline u32 bus_intr_0_pri_squash_m(void)
{
return 0x1U << 1U;
return U32(0x1U) << 1U;
}
static inline u32 bus_intr_0_pri_fecserr_m(void)
{
return 0x1U << 2U;
return U32(0x1U) << 2U;
}
static inline u32 bus_intr_0_pri_timeout_m(void)
{
return 0x1U << 3U;
return U32(0x1U) << 3U;
}
static inline u32 bus_intr_en_0_r(void)
{
@@ -210,14 +228,14 @@ static inline u32 bus_intr_en_0_r(void)
}
static inline u32 bus_intr_en_0_pri_squash_m(void)
{
return 0x1U << 1U;
return U32(0x1U) << 1U;
}
static inline u32 bus_intr_en_0_pri_fecserr_m(void)
{
return 0x1U << 2U;
return U32(0x1U) << 2U;
}
static inline u32 bus_intr_en_0_pri_timeout_m(void)
{
return 0x1U << 3U;
return U32(0x1U) << 3U;
}
#endif

View File

@@ -56,6 +56,8 @@
#ifndef NVGPU_HW_BUS_GP106_H
#define NVGPU_HW_BUS_GP106_H
#include <nvgpu/types.h>
static inline u32 bus_bar0_window_r(void)
{
return 0x00001700U;
@@ -168,10 +170,18 @@ static inline u32 bus_bind_status_bar2_pending_v(u32 r)
{
return (r >> 2U) & 0x1U;
}
static inline u32 bus_bind_status_bar2_pending_empty_v(void)
{
return 0x00000000U;
}
static inline u32 bus_bind_status_bar2_pending_empty_f(void)
{
return 0x0U;
}
static inline u32 bus_bind_status_bar2_pending_busy_v(void)
{
return 0x00000001U;
}
static inline u32 bus_bind_status_bar2_pending_busy_f(void)
{
return 0x4U;
@@ -180,10 +190,18 @@ static inline u32 bus_bind_status_bar2_outstanding_v(u32 r)
{
return (r >> 3U) & 0x1U;
}
static inline u32 bus_bind_status_bar2_outstanding_false_v(void)
{
return 0x00000000U;
}
static inline u32 bus_bind_status_bar2_outstanding_false_f(void)
{
return 0x0U;
}
static inline u32 bus_bind_status_bar2_outstanding_true_v(void)
{
return 0x00000001U;
}
static inline u32 bus_bind_status_bar2_outstanding_true_f(void)
{
return 0x8U;
@@ -194,15 +212,15 @@ static inline u32 bus_intr_0_r(void)
}
static inline u32 bus_intr_0_pri_squash_m(void)
{
return 0x1U << 1U;
return U32(0x1U) << 1U;
}
static inline u32 bus_intr_0_pri_fecserr_m(void)
{
return 0x1U << 2U;
return U32(0x1U) << 2U;
}
static inline u32 bus_intr_0_pri_timeout_m(void)
{
return 0x1U << 3U;
return U32(0x1U) << 3U;
}
static inline u32 bus_intr_en_0_r(void)
{
@@ -210,14 +228,14 @@ static inline u32 bus_intr_en_0_r(void)
}
static inline u32 bus_intr_en_0_pri_squash_m(void)
{
return 0x1U << 1U;
return U32(0x1U) << 1U;
}
static inline u32 bus_intr_en_0_pri_fecserr_m(void)
{
return 0x1U << 2U;
return U32(0x1U) << 2U;
}
static inline u32 bus_intr_en_0_pri_timeout_m(void)
{
return 0x1U << 3U;
return U32(0x1U) << 3U;
}
#endif

View File

@@ -56,6 +56,8 @@
#ifndef NVGPU_HW_BUS_GP10B_H
#define NVGPU_HW_BUS_GP10B_H
#include <nvgpu/types.h>
static inline u32 bus_bar0_window_r(void)
{
return 0x00001700U;
@@ -168,10 +170,18 @@ static inline u32 bus_bind_status_bar2_pending_v(u32 r)
{
return (r >> 2U) & 0x1U;
}
static inline u32 bus_bind_status_bar2_pending_empty_v(void)
{
return 0x00000000U;
}
static inline u32 bus_bind_status_bar2_pending_empty_f(void)
{
return 0x0U;
}
static inline u32 bus_bind_status_bar2_pending_busy_v(void)
{
return 0x00000001U;
}
static inline u32 bus_bind_status_bar2_pending_busy_f(void)
{
return 0x4U;
@@ -180,10 +190,18 @@ static inline u32 bus_bind_status_bar2_outstanding_v(u32 r)
{
return (r >> 3U) & 0x1U;
}
static inline u32 bus_bind_status_bar2_outstanding_false_v(void)
{
return 0x00000000U;
}
static inline u32 bus_bind_status_bar2_outstanding_false_f(void)
{
return 0x0U;
}
static inline u32 bus_bind_status_bar2_outstanding_true_v(void)
{
return 0x00000001U;
}
static inline u32 bus_bind_status_bar2_outstanding_true_f(void)
{
return 0x8U;
@@ -194,15 +212,15 @@ static inline u32 bus_intr_0_r(void)
}
static inline u32 bus_intr_0_pri_squash_m(void)
{
return 0x1U << 1U;
return U32(0x1U) << 1U;
}
static inline u32 bus_intr_0_pri_fecserr_m(void)
{
return 0x1U << 2U;
return U32(0x1U) << 2U;
}
static inline u32 bus_intr_0_pri_timeout_m(void)
{
return 0x1U << 3U;
return U32(0x1U) << 3U;
}
static inline u32 bus_intr_en_0_r(void)
{
@@ -210,14 +228,14 @@ static inline u32 bus_intr_en_0_r(void)
}
static inline u32 bus_intr_en_0_pri_squash_m(void)
{
return 0x1U << 1U;
return U32(0x1U) << 1U;
}
static inline u32 bus_intr_en_0_pri_fecserr_m(void)
{
return 0x1U << 2U;
return U32(0x1U) << 2U;
}
static inline u32 bus_intr_en_0_pri_timeout_m(void)
{
return 0x1U << 3U;
return U32(0x1U) << 3U;
}
#endif

View File

@@ -56,6 +56,8 @@
#ifndef NVGPU_HW_BUS_GV100_H
#define NVGPU_HW_BUS_GV100_H
#include <nvgpu/types.h>
static inline u32 bus_sw_scratch_r(u32 i)
{
return 0x00001580U + i*4U;
@@ -172,10 +174,18 @@ static inline u32 bus_bind_status_bar2_pending_v(u32 r)
{
return (r >> 2U) & 0x1U;
}
static inline u32 bus_bind_status_bar2_pending_empty_v(void)
{
return 0x00000000U;
}
static inline u32 bus_bind_status_bar2_pending_empty_f(void)
{
return 0x0U;
}
static inline u32 bus_bind_status_bar2_pending_busy_v(void)
{
return 0x00000001U;
}
static inline u32 bus_bind_status_bar2_pending_busy_f(void)
{
return 0x4U;
@@ -184,10 +194,18 @@ static inline u32 bus_bind_status_bar2_outstanding_v(u32 r)
{
return (r >> 3U) & 0x1U;
}
static inline u32 bus_bind_status_bar2_outstanding_false_v(void)
{
return 0x00000000U;
}
static inline u32 bus_bind_status_bar2_outstanding_false_f(void)
{
return 0x0U;
}
static inline u32 bus_bind_status_bar2_outstanding_true_v(void)
{
return 0x00000001U;
}
static inline u32 bus_bind_status_bar2_outstanding_true_f(void)
{
return 0x8U;
@@ -198,15 +216,15 @@ static inline u32 bus_intr_0_r(void)
}
static inline u32 bus_intr_0_pri_squash_m(void)
{
return 0x1U << 1U;
return U32(0x1U) << 1U;
}
static inline u32 bus_intr_0_pri_fecserr_m(void)
{
return 0x1U << 2U;
return U32(0x1U) << 2U;
}
static inline u32 bus_intr_0_pri_timeout_m(void)
{
return 0x1U << 3U;
return U32(0x1U) << 3U;
}
static inline u32 bus_intr_en_0_r(void)
{
@@ -214,14 +232,14 @@ static inline u32 bus_intr_en_0_r(void)
}
static inline u32 bus_intr_en_0_pri_squash_m(void)
{
return 0x1U << 1U;
return U32(0x1U) << 1U;
}
static inline u32 bus_intr_en_0_pri_fecserr_m(void)
{
return 0x1U << 2U;
return U32(0x1U) << 2U;
}
static inline u32 bus_intr_en_0_pri_timeout_m(void)
{
return 0x1U << 3U;
return U32(0x1U) << 3U;
}
#endif

View File

@@ -56,6 +56,8 @@
#ifndef NVGPU_HW_BUS_GV11B_H
#define NVGPU_HW_BUS_GV11B_H
#include <nvgpu/types.h>
static inline u32 bus_bar0_window_r(void)
{
return 0x00001700U;
@@ -168,10 +170,18 @@ static inline u32 bus_bind_status_bar2_pending_v(u32 r)
{
return (r >> 2U) & 0x1U;
}
static inline u32 bus_bind_status_bar2_pending_empty_v(void)
{
return 0x00000000U;
}
static inline u32 bus_bind_status_bar2_pending_empty_f(void)
{
return 0x0U;
}
static inline u32 bus_bind_status_bar2_pending_busy_v(void)
{
return 0x00000001U;
}
static inline u32 bus_bind_status_bar2_pending_busy_f(void)
{
return 0x4U;
@@ -180,10 +190,18 @@ static inline u32 bus_bind_status_bar2_outstanding_v(u32 r)
{
return (r >> 3U) & 0x1U;
}
static inline u32 bus_bind_status_bar2_outstanding_false_v(void)
{
return 0x00000000U;
}
static inline u32 bus_bind_status_bar2_outstanding_false_f(void)
{
return 0x0U;
}
static inline u32 bus_bind_status_bar2_outstanding_true_v(void)
{
return 0x00000001U;
}
static inline u32 bus_bind_status_bar2_outstanding_true_f(void)
{
return 0x8U;
@@ -194,15 +212,15 @@ static inline u32 bus_intr_0_r(void)
}
static inline u32 bus_intr_0_pri_squash_m(void)
{
return 0x1U << 1U;
return U32(0x1U) << 1U;
}
static inline u32 bus_intr_0_pri_fecserr_m(void)
{
return 0x1U << 2U;
return U32(0x1U) << 2U;
}
static inline u32 bus_intr_0_pri_timeout_m(void)
{
return 0x1U << 3U;
return U32(0x1U) << 3U;
}
static inline u32 bus_intr_en_0_r(void)
{
@@ -210,14 +228,14 @@ static inline u32 bus_intr_en_0_r(void)
}
static inline u32 bus_intr_en_0_pri_squash_m(void)
{
return 0x1U << 1U;
return U32(0x1U) << 1U;
}
static inline u32 bus_intr_en_0_pri_fecserr_m(void)
{
return 0x1U << 2U;
return U32(0x1U) << 2U;
}
static inline u32 bus_intr_en_0_pri_timeout_m(void)
{
return 0x1U << 3U;
return U32(0x1U) << 3U;
}
#endif

View File

@@ -172,10 +172,18 @@ static inline u32 bus_bind_status_bar2_pending_v(u32 r)
{
return (r >> 2U) & 0x1U;
}
static inline u32 bus_bind_status_bar2_pending_empty_v(void)
{
return 0x00000000U;
}
static inline u32 bus_bind_status_bar2_pending_empty_f(void)
{
return 0x0U;
}
static inline u32 bus_bind_status_bar2_pending_busy_v(void)
{
return 0x00000001U;
}
static inline u32 bus_bind_status_bar2_pending_busy_f(void)
{
return 0x4U;
@@ -184,10 +192,18 @@ static inline u32 bus_bind_status_bar2_outstanding_v(u32 r)
{
return (r >> 3U) & 0x1U;
}
static inline u32 bus_bind_status_bar2_outstanding_false_v(void)
{
return 0x00000000U;
}
static inline u32 bus_bind_status_bar2_outstanding_false_f(void)
{
return 0x0U;
}
static inline u32 bus_bind_status_bar2_outstanding_true_v(void)
{
return 0x00000001U;
}
static inline u32 bus_bind_status_bar2_outstanding_true_f(void)
{
return 0x8U;

View File

@@ -24,14 +24,15 @@
#include <nvgpu/types.h>
#include <nvgpu/log2.h>
#include <nvgpu/bitops.h>
/*
* For the available speeds bitmap.
*/
#define GPU_XVE_SPEED_2P5 (1 << 0)
#define GPU_XVE_SPEED_5P0 (1 << 1)
#define GPU_XVE_SPEED_8P0 (1 << 2)
#define GPU_XVE_NR_SPEEDS 3
#define GPU_XVE_SPEED_2P5 BIT32(0)
#define GPU_XVE_SPEED_5P0 BIT32(1)
#define GPU_XVE_SPEED_8P0 BIT32(2)
#define GPU_XVE_NR_SPEEDS 3U
#define GPU_XVE_SPEED_MASK (GPU_XVE_SPEED_2P5 | \
GPU_XVE_SPEED_5P0 | \
@@ -54,14 +55,14 @@
*/
static inline const char *xve_speed_to_str(u32 speed)
{
if (!speed || !is_power_of_2(speed) ||
!(speed & GPU_XVE_SPEED_MASK)) {
if ((speed == 0U) || !is_power_of_2(speed) ||
(speed & GPU_XVE_SPEED_MASK) == 0U) {
return "Unknown ???";
}
return speed & GPU_XVE_SPEED_2P5 ? "Gen1" :
speed & GPU_XVE_SPEED_5P0 ? "Gen2" :
speed & GPU_XVE_SPEED_8P0 ? "Gen3" :
return (speed & GPU_XVE_SPEED_2P5) != 0U ? "Gen1" :
(speed & GPU_XVE_SPEED_5P0) != 0U ? "Gen2" :
(speed & GPU_XVE_SPEED_8P0) != 0U ? "Gen3" :
"Unknown ???";
}

View File

@@ -59,7 +59,7 @@ int tu104_bios_verify_devinit(struct gk20a *g)
}
nvgpu_udelay(NV_DEVINIT_VERIFY_TIMEOUT_DELAY_US);
} while (!nvgpu_timeout_expired(&timeout));
} while (nvgpu_timeout_expired(&timeout) == 0);
return -ETIMEDOUT;
}

View File

@@ -90,7 +90,7 @@ void tu104_fbpa_handle_intr(struct gk20a *g, u32 fbpa_id)
offset = nvgpu_get_litter_value(g, GPU_LIT_FBPA_STRIDE) * fbpa_id;
status = gk20a_readl(g, offset + fbpa_0_intr_status_r());
if (!(status & (ecc_subp0_mask | ecc_subp1_mask))) {
if ((status & (ecc_subp0_mask | ecc_subp1_mask)) == 0U) {
nvgpu_err(g, "unknown interrupt fbpa %u status %08x",
fbpa_id, status);
return;

View File

@@ -165,7 +165,7 @@ int tu104_fifo_runlist_wait_pending(struct gk20a *g, u32 runlist_id)
nvgpu_usleep_range(delay, delay * 2);
delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX);
} while (!nvgpu_timeout_expired(&timeout));
} while (nvgpu_timeout_expired(&timeout) == 0);
return ret;
}

View File

@@ -171,7 +171,7 @@ int gr_tu104_map_global_ctx_buffers(struct gk20a *g,
nvgpu_log_fn(g, " ");
tsg = tsg_gk20a_from_ch(ch);
if (!tsg) {
if (tsg == NULL) {
return -EINVAL;
}
@@ -183,7 +183,7 @@ int gr_tu104_map_global_ctx_buffers(struct gk20a *g,
mem = &gr->global_ctx_buffer[RTV_CIRCULAR_BUFFER].mem;
gpu_va = nvgpu_gmmu_map(ch_vm, mem, mem->size, 0,
gk20a_mem_flag_none, true, mem->aperture);
if (!gpu_va) {
if (gpu_va == 0ULL) {
return -ENOMEM;
}
@@ -234,7 +234,7 @@ int gr_tu104_commit_global_ctx_buffers(struct gk20a *g,
}
tsg = tsg_gk20a_from_ch(ch);
if (!tsg) {
if (tsg == NULL) {
return -EINVAL;
}
@@ -279,7 +279,7 @@ void gr_tu104_cb_size_default(struct gk20a *g)
{
struct gr_gk20a *gr = &g->gr;
if (!gr->attrib_cb_default_size) {
if (gr->attrib_cb_default_size == 0U) {
gr->attrib_cb_default_size =
gr_gpc0_ppc0_cbm_beta_cb_size_v_default_v();
}

View File

@@ -387,7 +387,7 @@ void tu104_sec2_isr(struct gk20a *g)
intr = gk20a_readl(g, psec_falcon_irqstat_r());
intr = gk20a_readl(g, psec_falcon_irqstat_r()) & mask;
if (!intr) {
if (intr == 0U) {
gk20a_writel(g, psec_falcon_irqsclr_r(), intr);
nvgpu_mutex_release(&sec2->isr_mutex);
return;