gpu: nvgpu: hal: misra 12.1 fixes

MISRA Advisory Rule states that the precedence of operators within
expressions should be made explicit.

This change removes the Advisory Rule 12.1 violations from hal code.

Jira NVGPU-3178

Change-Id: If903544e1aa7264dc07f959a65ff666dfe89a230
Signed-off-by: Scott Long <scottl@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2277478
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com>
Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Adeel Raza <araza@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Scott Long
2020-01-10 10:42:44 -08:00
committed by Alex Waterman
parent 318bd53f1d
commit a54c207c37
17 changed files with 86 additions and 79 deletions

View File

@@ -1,7 +1,7 @@
/*
* GM20B MMU
*
* Copyright (c) 2014-2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -60,7 +60,7 @@ int gm20b_bus_bar1_bind(struct gk20a *g, struct nvgpu_mem *bar1_inst)
u32 val = gk20a_readl(g, bus_bind_status_r());
u32 pending = bus_bind_status_bar1_pending_v(val);
u32 outstanding = bus_bind_status_bar1_outstanding_v(val);
if (pending == 0U && outstanding == 0U) {
if ((pending == 0U) && (outstanding == 0U)) {
break;
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2014-2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -57,7 +57,7 @@ int gp10b_bus_bar2_bind(struct gk20a *g, struct nvgpu_mem *bar2_inst)
u32 val = gk20a_readl(g, bus_bind_status_r());
u32 pending = bus_bind_status_bar2_pending_v(val);
u32 outstanding = bus_bind_status_bar2_outstanding_v(val);
if (pending == 0U && outstanding == 0U) {
if ((pending == 0U) && (outstanding == 0U)) {
break;
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -64,8 +64,8 @@ bool gk20a_is_falcon_idle(struct nvgpu_falcon *flcn)
unit_status = gk20a_falcon_readl(flcn, falcon_falcon_idlestate_r());
if (falcon_falcon_idlestate_falcon_busy_v(unit_status) == 0U &&
falcon_falcon_idlestate_ext_busy_v(unit_status) == 0U) {
if ((falcon_falcon_idlestate_falcon_busy_v(unit_status) == 0U) &&
(falcon_falcon_idlestate_ext_busy_v(unit_status) == 0U)) {
status = true;
} else {
status = false;
@@ -233,7 +233,7 @@ static void falcon_copy_to_imem_unaligned_src(struct nvgpu_falcon *flcn,
nvgpu_memcpy((u8 *)&src_tmp[0], &src[offset],
sizeof(src_tmp));
for (i = 0; i < ARRAY_SIZE(src_tmp); i++) {
if (j++ % 64U == 0U) {
if ((j++ % 64U) == 0U) {
/* tag is always 256B aligned */
gk20a_falcon_writel(flcn,
falcon_falcon_imemt_r(port), tag);
@@ -256,7 +256,7 @@ static void falcon_copy_to_imem_unaligned_src(struct nvgpu_falcon *flcn,
nvgpu_memcpy((u8 *)&src_tmp[0], &src[offset],
(u64)elems * elem_size);
for (i = 0; i < elems; i++) {
if (j++ % 64U == 0U) {
if ((j++ % 64U) == 0U) {
/* tag is always 256B aligned */
gk20a_falcon_writel(flcn,
falcon_falcon_imemt_r(port), tag);
@@ -270,7 +270,7 @@ static void falcon_copy_to_imem_unaligned_src(struct nvgpu_falcon *flcn,
}
/* WARNING : setting remaining bytes in block to 0x0 */
while (j % 64U != 0U) {
while ((j % 64U) != 0U) {
gk20a_falcon_writel(flcn,
falcon_falcon_imemd_r(port), 0);
j++;
@@ -305,7 +305,7 @@ NVGPU_COV_WHITELIST(deviate, NVGPU_MISRA(Rule, 11_3), "TID-415")
src_u32 = (u32 *)src;
for (i = 0U; i < words; i++) {
if (i % 64U == 0U) {
if ((i % 64U) == 0U) {
/* tag is always 256B aligned */
gk20a_falcon_writel(flcn,
falcon_falcon_imemt_r(port), tag);
@@ -317,7 +317,7 @@ NVGPU_COV_WHITELIST(deviate, NVGPU_MISRA(Rule, 11_3), "TID-415")
}
/* WARNING : setting remaining bytes in block to 0x0 */
while (i % 64U != 0U) {
while ((i % 64U) != 0U) {
gk20a_falcon_writel(flcn,
falcon_falcon_imemd_r(port), 0);
i++;
@@ -348,7 +348,7 @@ void gk20a_falcon_bootstrap(struct nvgpu_falcon *flcn,
u32 gk20a_falcon_mailbox_read(struct nvgpu_falcon *flcn,
u32 mailbox_index)
{
return gk20a_falcon_readl(flcn, mailbox_index != 0U ?
return gk20a_falcon_readl(flcn, (mailbox_index != 0U) ?
falcon_falcon_mailbox1_r() :
falcon_falcon_mailbox0_r());
}
@@ -356,7 +356,7 @@ u32 gk20a_falcon_mailbox_read(struct nvgpu_falcon *flcn,
void gk20a_falcon_mailbox_write(struct nvgpu_falcon *flcn,
u32 mailbox_index, u32 data)
{
gk20a_falcon_writel(flcn, mailbox_index != 0U ?
gk20a_falcon_writel(flcn, (mailbox_index != 0U) ?
falcon_falcon_mailbox1_r() :
falcon_falcon_mailbox0_r(), data);
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2016-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -295,7 +295,7 @@ void gv11b_fb_write_mmu_fault_status(struct gk20a *g, u32 reg_val)
void gv11b_fb_mmu_fault_info_dump(struct gk20a *g,
struct mmu_fault_info *mmufault)
{
if (mmufault != NULL && mmufault->valid) {
if ((mmufault != NULL) && mmufault->valid) {
nvgpu_err(g, "[MMU FAULT] "
"mmu engine id: %d, "
"ch id: %d, "

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2011-2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2011-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -79,16 +79,23 @@ void gk20a_channel_read_state(struct gk20a *g, struct nvgpu_channel *ch,
state->enabled = ccsr_channel_enable_v(reg) ==
ccsr_channel_enable_in_use_v();
state->ctx_reload =
status_v == ccsr_channel_status_pending_ctx_reload_v() ||
status_v == ccsr_channel_status_pending_acq_ctx_reload_v() ||
status_v == ccsr_channel_status_on_pbdma_ctx_reload_v() ||
status_v == ccsr_channel_status_on_pbdma_and_eng_ctx_reload_v() ||
status_v == ccsr_channel_status_on_eng_ctx_reload_v() ||
status_v == ccsr_channel_status_on_eng_pending_ctx_reload_v() ||
status_v == ccsr_channel_status_on_eng_pending_acq_ctx_reload_v();
(status_v ==
ccsr_channel_status_pending_ctx_reload_v()) ||
(status_v ==
ccsr_channel_status_pending_acq_ctx_reload_v()) ||
(status_v ==
ccsr_channel_status_on_pbdma_ctx_reload_v()) ||
(status_v ==
ccsr_channel_status_on_pbdma_and_eng_ctx_reload_v()) ||
(status_v ==
ccsr_channel_status_on_eng_ctx_reload_v()) ||
(status_v ==
ccsr_channel_status_on_eng_pending_ctx_reload_v()) ||
(status_v ==
ccsr_channel_status_on_eng_pending_acq_ctx_reload_v());
state->busy = ccsr_channel_busy_v(reg) == ccsr_channel_busy_true_v();
state->pending_acquire =
status_v == ccsr_channel_status_pending_acquire_v() ||
status_v == ccsr_channel_status_on_eng_pending_acquire_v();
(status_v == ccsr_channel_status_pending_acquire_v()) ||
(status_v == ccsr_channel_status_on_eng_pending_acquire_v());
state->status_string = ccsr_chan_status_str[status_v];
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2016-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -100,10 +100,10 @@ static u32 gv11b_fifo_ctxsw_timeout_info(struct gk20a *g, u32 active_eng_id,
tsgid = fifo_intr_ctxsw_timeout_info_next_tsgid_v(timeout_info);
} else if (ctx_status ==
fifo_intr_ctxsw_timeout_info_ctxsw_state_switch_v() ||
ctx_status ==
fifo_intr_ctxsw_timeout_info_ctxsw_state_save_v()) {
} else if ((ctx_status ==
fifo_intr_ctxsw_timeout_info_ctxsw_state_switch_v()) ||
(ctx_status ==
fifo_intr_ctxsw_timeout_info_ctxsw_state_save_v())) {
tsgid = fifo_intr_ctxsw_timeout_info_prev_tsgid_v(timeout_info);
} else {

View File

@@ -1,7 +1,7 @@
/*
* GV11B fifo
*
* Copyright (c) 2015-2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2015-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -105,10 +105,10 @@ u32 gv11b_fifo_mmu_fault_id_to_pbdma_id(struct gk20a *g, u32 mmu_fault_id)
num_pbdma = fifo_cfg0_num_pbdma_v(reg_val);
fault_id_pbdma0 = fifo_cfg0_pbdma_fault_id_v(reg_val);
if (mmu_fault_id >= fault_id_pbdma0 &&
mmu_fault_id <= nvgpu_safe_sub_u32(
if ((mmu_fault_id >= fault_id_pbdma0) &&
(mmu_fault_id <= nvgpu_safe_sub_u32(
nvgpu_safe_add_u32(fault_id_pbdma0,
num_pbdma), 1U)) {
num_pbdma), 1U))) {
return mmu_fault_id - fault_id_pbdma0;
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2014-2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -71,9 +71,9 @@ static bool gm20b_pbdma_is_sw_method_subch(struct gk20a *g, u32 pbdma_id,
pbdma_method_subch = pbdma_method0_subch_v(
nvgpu_readl(g, pbdma_method_reg));
if (pbdma_method_subch == 5U ||
pbdma_method_subch == 6U ||
pbdma_method_subch == 7U) {
if ((pbdma_method_subch == 5U) ||
(pbdma_method_subch == 6U) ||
(pbdma_method_subch == 7U)) {
return true;
}
@@ -251,7 +251,7 @@ u32 gm20b_pbdma_acquire_val(u64 timeout)
do_div(timeout, 1024U); /* in unit of 1024ns */
exponent = 0;
while (timeout > pbdma_acquire_timeout_man_max_v() &&
while ((timeout > pbdma_acquire_timeout_man_max_v()) &&
(exponent <= pbdma_acquire_timeout_exp_max_v())) {
timeout >>= 1;
exponent++;

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2015-2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2015-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -243,9 +243,8 @@ static int fifo_check_eng_intr_pending(struct gk20a *g, u32 id,
ret = 0;
}
} else if (ctx_stat == fifo_engine_status_ctx_status_valid_v() ||
ctx_stat ==
fifo_engine_status_ctx_status_ctxsw_save_v()) {
} else if ((ctx_stat == fifo_engine_status_ctx_status_valid_v()) ||
(ctx_stat == fifo_engine_status_ctx_status_ctxsw_save_v())) {
if (id == fifo_engine_status_id_v(eng_stat)) {
if (eng_intr_pending != 0U) {
@@ -421,7 +420,7 @@ int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 id,
err = gv11b_fifo_preempt_poll_eng(g,
tsgid, engine_id,
&f->runlist_info[runlist_id]->reset_eng_bitmask);
if (err != 0 && ret == 0) {
if ((err != 0) && (ret == 0)) {
ret = err;
}
}

View File

@@ -154,7 +154,7 @@ remove_tpc_err:
static u32 gr_gv100_find_max_gpc(u32 *num_tpc_gpc, u32 gpc_id, u32 max_tpc_gpc)
{
return num_tpc_gpc[gpc_id] > max_tpc_gpc ?
return (num_tpc_gpc[gpc_id] > max_tpc_gpc) ?
num_tpc_gpc[gpc_id] : max_tpc_gpc;
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -448,8 +448,8 @@ static int gm20b_gr_falcon_ctx_wait_ucode(struct gk20a *g, u32 mailbox_id,
* Exit with success if opcode status is set to skip for both
* success and failure.
*/
if (opc_success == GR_IS_UCODE_OP_SKIP &&
opc_fail == GR_IS_UCODE_OP_SKIP) {
if ((opc_success == GR_IS_UCODE_OP_SKIP) &&
(opc_fail == GR_IS_UCODE_OP_SKIP)) {
check = WAIT_UCODE_OK;
break;
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -147,7 +147,7 @@ void gm20b_gr_init_pd_skip_table_gpc(struct gk20a *g,
u32 skip_mask = 0;
for (gpc_index = 0;
gpc_index < gr_pd_dist_skip_table__size_1_v() * 4U;
gpc_index < (gr_pd_dist_skip_table__size_1_v() * 4U);
gpc_index += 4U) {
if ((gr_pd_dist_skip_table_gpc_4n0_mask_f(
nvgpu_gr_config_get_gpc_skip_mask(gr_config,

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -47,8 +47,8 @@ static bool gr_activity_empty_or_preempted(u32 val)
while (val != 0U) {
u32 v = val & 7U;
if (v != gr_activity_4_gpc0_empty_v() &&
v != gr_activity_4_gpc0_preempted_v()) {
if ((v != gr_activity_4_gpc0_empty_v()) &&
(v != gr_activity_4_gpc0_preempted_v())) {
return false;
}
val >>= 3;
@@ -89,9 +89,9 @@ int gp10b_gr_init_wait_empty(struct gk20a *g)
activity4 = nvgpu_readl(g, gr_activity_4_r());
gr_busy = !(gr_activity_empty_or_preempted(activity0) &&
gr_activity_empty_or_preempted(activity1) &&
activity2 == 0U &&
gr_activity_empty_or_preempted(activity4));
gr_activity_empty_or_preempted(activity1) &&
(activity2 == 0U) &&
gr_activity_empty_or_preempted(activity4));
if (!gr_busy && !ctxsw_active) {
nvgpu_log_fn(g, "done");

View File

@@ -537,7 +537,7 @@ void gv11b_gr_init_fs_state(struct gk20a *g)
gr_gpcs_tpcs_sm_texio_control_oor_addr_check_mode_arm_63_48_match_f());
nvgpu_writel(g, gr_gpcs_tpcs_sm_texio_control_r(), data);
if (ver == NVGPU_GPUID_GV11B && nvgpu_is_soc_t194_a01(g)) {
if ((ver == NVGPU_GPUID_GV11B) && nvgpu_is_soc_t194_a01(g)) {
/*
* For t194 A01, Disable CBM alpha and beta invalidations
* Disable SCC pagepool invalidates
@@ -810,8 +810,8 @@ int gv11b_gr_init_load_sw_veid_bundle(struct gk20a *g,
for (i = 0U; i < sw_veid_bundle_init->count; i++) {
nvgpu_log_fn(g, "veid bundle count: %d", i);
if (i == 0U || last_bundle_data !=
sw_veid_bundle_init->l[i].value) {
if ((i == 0U) || (last_bundle_data !=
sw_veid_bundle_init->l[i].value)) {
nvgpu_writel(g, gr_pipe_bundle_data_r(),
sw_veid_bundle_init->l[i].value);
last_bundle_data = sw_veid_bundle_init->l[i].value;
@@ -940,7 +940,8 @@ int gv11b_gr_init_load_sw_bundle_init(struct gk20a *g,
u32 bundle_data = 0;
for (i = 0U; i < sw_bundle_init->count; i++) {
if (i == 0U || last_bundle_data != sw_bundle_init->l[i].value) {
if ((i == 0U) || (last_bundle_data !=
sw_bundle_init->l[i].value)) {
bundle_data = sw_bundle_init->l[i].value;
/*
* For safety golden context comparison,

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -75,10 +75,10 @@ int gk20a_mm_fb_flush(struct gk20a *g)
do {
data = nvgpu_readl(g, flush_fb_flush_r());
if (flush_fb_flush_outstanding_v(data) ==
flush_fb_flush_outstanding_true_v() ||
flush_fb_flush_pending_v(data) ==
flush_fb_flush_pending_busy_v()) {
if ((flush_fb_flush_outstanding_v(data) ==
flush_fb_flush_outstanding_true_v()) ||
(flush_fb_flush_pending_v(data) ==
flush_fb_flush_pending_busy_v())) {
nvgpu_log_info(g, "fb_flush 0x%x", data);
nvgpu_udelay(5);
} else {
@@ -132,10 +132,10 @@ static void gk20a_mm_l2_invalidate_locked(struct gk20a *g)
do {
data = nvgpu_readl(g, flush_l2_system_invalidate_r());
if (flush_l2_system_invalidate_outstanding_v(data) ==
flush_l2_system_invalidate_outstanding_true_v() ||
flush_l2_system_invalidate_pending_v(data) ==
flush_l2_system_invalidate_pending_busy_v()) {
if ((flush_l2_system_invalidate_outstanding_v(data) ==
flush_l2_system_invalidate_outstanding_true_v()) ||
(flush_l2_system_invalidate_pending_v(data) ==
flush_l2_system_invalidate_pending_busy_v())) {
nvgpu_log_info(g, "l2_system_invalidate 0x%x",
data);
nvgpu_udelay(5);
@@ -201,10 +201,10 @@ int gk20a_mm_l2_flush(struct gk20a *g, bool invalidate)
do {
data = nvgpu_readl(g, flush_l2_flush_dirty_r());
if (flush_l2_flush_dirty_outstanding_v(data) ==
flush_l2_flush_dirty_outstanding_true_v() ||
flush_l2_flush_dirty_pending_v(data) ==
flush_l2_flush_dirty_pending_busy_v()) {
if ((flush_l2_flush_dirty_outstanding_v(data) ==
flush_l2_flush_dirty_outstanding_true_v()) ||
(flush_l2_flush_dirty_pending_v(data) ==
flush_l2_flush_dirty_pending_busy_v())) {
nvgpu_log_info(g, "l2_flush_dirty 0x%x", data);
nvgpu_udelay(5);
} else {

View File

@@ -185,7 +185,7 @@ static void update_pte(struct vm_gk20a *vm,
gmmu_new_pte_valid_true_f() :
gmmu_new_pte_valid_false_f();
u64 phys_shifted = phys_addr >> gmmu_new_pte_address_shift_v();
u32 pte_addr = attrs->aperture == APERTURE_SYSMEM ?
u32 pte_addr = (attrs->aperture == APERTURE_SYSMEM) ?
gmmu_new_pte_address_sys_f(u64_lo32(phys_shifted)) :
gmmu_new_pte_address_vid_f(u64_lo32(phys_shifted));
u32 pte_tgt = gmmu_aperture_mask(g,
@@ -333,7 +333,7 @@ static u32 gp10b_get_pde0_pgsz(struct gk20a *g, const struct gk20a_mmu_level *l,
return pgsz;
}
for (idx = 0; idx < GP10B_PDE0_ENTRY_SIZE >> 2; idx++) {
for (idx = 0; idx < (GP10B_PDE0_ENTRY_SIZE >> 2); idx++) {
pde_v[idx] =
nvgpu_mem_rd32(g, pd->mem, (u64)pde_offset + (u64)idx);
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -573,8 +573,8 @@ void gv11b_mm_mmu_fault_handle_nonreplay_replay_fault(struct gk20a *g,
return;
}
nvgpu_log(g, gpu_dbg_intr, "%s MMU FAULT",
index == NVGPU_MMU_FAULT_REPLAY_REG_INDX ?
"REPLAY" : "NON-REPLAY");
(index == NVGPU_MMU_FAULT_REPLAY_REG_INDX) ?
"REPLAY" : "NON-REPLAY");
nvgpu_log(g, gpu_dbg_intr, "get ptr = %d", get_indx);