gpu: nvgpu: gk20a: Fix MISRA 15.6 violations

This fixes errors due to single statement loop bodies
without braces, which is part of Rule 15.6 of MISRA.
This patch covers in gpu/nvgpu/gk20a/

JIRA NVGPU-989

Change-Id: I2f422e9bc2b03229f4d2c3198613169ce5e7f3ee
Signed-off-by: Srirangan <smadhavan@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1791019
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Srirangan
2018-08-02 14:15:54 +05:30
committed by mobile promotions
parent 6c9daf7626
commit 17aeea4a2f
7 changed files with 84 additions and 42 deletions

View File

@@ -907,8 +907,9 @@ int gk20a_init_fifo_setup_sw_common(struct gk20a *g)
memset(f->active_engines_list, 0xff, (f->max_engines * sizeof(u32)));
/* pbdma map needs to be in place before calling engine info init */
for (i = 0; i < f->num_pbdma; ++i)
for (i = 0; i < f->num_pbdma; ++i) {
f->pbdma_map[i] = gk20a_readl(g, fifo_pbdma_map_r(i));
}
g->ops.fifo.init_engine_info(f);
@@ -2496,9 +2497,10 @@ unsigned int gk20a_fifo_handle_pbdma_intr_0(struct gk20a *g, u32 pbdma_id,
f->intr.pbdma.restartable_0) & pbdma_intr_0) {
pbdma_intr_err = (unsigned long)pbdma_intr_0;
for_each_set_bit(bit, &pbdma_intr_err, 32)
for_each_set_bit(bit, &pbdma_intr_err, 32) {
nvgpu_err(g, "PBDMA intr %s Error",
pbdma_intr_fault_type_desc[bit]);
}
nvgpu_err(g,
"pbdma_intr_0(%d):0x%08x PBH: %08x "
@@ -2851,8 +2853,9 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, u32 chid)
return 0;
/* we have no idea which runlist we are using. lock all */
for (i = 0; i < g->fifo.max_runlists; i++)
for (i = 0; i < g->fifo.max_runlists; i++) {
nvgpu_mutex_acquire(&f->runlist_info[i].runlist_lock);
}
mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
@@ -2861,8 +2864,9 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, u32 chid)
if (!mutex_ret)
nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
for (i = 0; i < g->fifo.max_runlists; i++)
for (i = 0; i < g->fifo.max_runlists; i++) {
nvgpu_mutex_release(&f->runlist_info[i].runlist_lock);
}
if (ret) {
if (nvgpu_platform_is_silicon(g)) {
@@ -2891,8 +2895,9 @@ int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
return 0;
/* we have no idea which runlist we are using. lock all */
for (i = 0; i < g->fifo.max_runlists; i++)
for (i = 0; i < g->fifo.max_runlists; i++) {
nvgpu_mutex_acquire(&f->runlist_info[i].runlist_lock);
}
mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
@@ -2901,8 +2906,9 @@ int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
if (!mutex_ret)
nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
for (i = 0; i < g->fifo.max_runlists; i++)
for (i = 0; i < g->fifo.max_runlists; i++) {
nvgpu_mutex_release(&f->runlist_info[i].runlist_lock);
}
if (ret) {
if (nvgpu_platform_is_silicon(g)) {

View File

@@ -213,14 +213,16 @@ static int gk20a_flcn_copy_from_dmem(struct nvgpu_falcon *flcn,
gk20a_writel(g, base_addr + falcon_falcon_dmemc_r(port),
src | falcon_falcon_dmemc_aincr_f(1));
for (i = 0; i < words; i++)
for (i = 0; i < words; i++) {
dst_u32[i] = gk20a_readl(g,
base_addr + falcon_falcon_dmemd_r(port));
}
if (bytes > 0) {
data = gk20a_readl(g, base_addr + falcon_falcon_dmemd_r(port));
for (i = 0; i < bytes; i++)
for (i = 0; i < bytes; i++) {
dst[(words << 2) + i] = ((u8 *)&data)[i];
}
}
nvgpu_mutex_release(&flcn->copy_lock);
@@ -256,14 +258,16 @@ static int gk20a_flcn_copy_to_dmem(struct nvgpu_falcon *flcn,
gk20a_writel(g, base_addr + falcon_falcon_dmemc_r(port),
dst | falcon_falcon_dmemc_aincw_f(1));
for (i = 0; i < words; i++)
for (i = 0; i < words; i++) {
gk20a_writel(g,
base_addr + falcon_falcon_dmemd_r(port), src_u32[i]);
}
if (bytes > 0) {
data = 0;
for (i = 0; i < bytes; i++)
for (i = 0; i < bytes; i++) {
((u8 *)&data)[i] = src[(words << 2) + i];
}
gk20a_writel(g, base_addr + falcon_falcon_dmemd_r(port), data);
}
@@ -313,14 +317,16 @@ static int gk20a_flcn_copy_from_imem(struct nvgpu_falcon *flcn, u32 src,
falcon_falcon_imemc_blk_f(blk) |
falcon_falcon_dmemc_aincr_f(1));
for (i = 0; i < words; i++)
for (i = 0; i < words; i++) {
dst_u32[i] = gk20a_readl(g,
base_addr + falcon_falcon_imemd_r(port));
}
if (bytes > 0) {
data = gk20a_readl(g, base_addr + falcon_falcon_imemd_r(port));
for (i = 0; i < bytes; i++)
for (i = 0; i < bytes; i++) {
dst[(words << 2) + i] = ((u8 *)&data)[i];
}
}
nvgpu_mutex_release(&flcn->copy_lock);

View File

@@ -402,8 +402,9 @@ int gk20a_wait_for_idle(struct gk20a *g)
return -ENODEV;
while ((nvgpu_atomic_read(&g->usage_count) != target_usage_count)
&& (wait_length-- >= 0))
&& (wait_length-- >= 0)) {
nvgpu_msleep(20);
}
if (wait_length < 0) {
nvgpu_warn(g, "Timed out waiting for idle (%d)!\n",

View File

@@ -154,21 +154,25 @@ int gr_gk20a_init_ctx_vars_sim(struct gk20a *g, struct gr_gk20a *gr)
goto fail;
}
for (i = 0; i < g->gr.ctx_vars.ucode.fecs.inst.count; i++)
for (i = 0; i < g->gr.ctx_vars.ucode.fecs.inst.count; i++) {
g->sim->esc_readl(g, "GRCTX_UCODE_INST_FECS",
i, &g->gr.ctx_vars.ucode.fecs.inst.l[i]);
}
for (i = 0; i < g->gr.ctx_vars.ucode.fecs.data.count; i++)
for (i = 0; i < g->gr.ctx_vars.ucode.fecs.data.count; i++) {
g->sim->esc_readl(g, "GRCTX_UCODE_DATA_FECS",
i, &g->gr.ctx_vars.ucode.fecs.data.l[i]);
}
for (i = 0; i < g->gr.ctx_vars.ucode.gpccs.inst.count; i++)
for (i = 0; i < g->gr.ctx_vars.ucode.gpccs.inst.count; i++) {
g->sim->esc_readl(g, "GRCTX_UCODE_INST_GPCCS",
i, &g->gr.ctx_vars.ucode.gpccs.inst.l[i]);
}
for (i = 0; i < g->gr.ctx_vars.ucode.gpccs.data.count; i++)
for (i = 0; i < g->gr.ctx_vars.ucode.gpccs.data.count; i++) {
g->sim->esc_readl(g, "GRCTX_UCODE_DATA_GPCCS",
i, &g->gr.ctx_vars.ucode.gpccs.data.l[i]);
}
for (i = 0; i < g->gr.ctx_vars.sw_bundle_init.count; i++) {
struct av_gk20a *l = g->gr.ctx_vars.sw_bundle_init.l;

View File

@@ -149,9 +149,10 @@ void gk20a_fecs_dump_falcon_stats(struct gk20a *g)
nvgpu_err(g, "gr_fecs_ctxsw_status_1_r : 0x%x",
gk20a_readl(g, gr_fecs_ctxsw_status_1_r()));
for (i = 0; i < g->ops.gr.fecs_ctxsw_mailbox_size(); i++)
for (i = 0; i < g->ops.gr.fecs_ctxsw_mailbox_size(); i++) {
nvgpu_err(g, "gr_fecs_ctxsw_mailbox_r(%d) : 0x%x",
i, gk20a_readl(g, gr_fecs_ctxsw_mailbox_r(i)));
}
nvgpu_err(g, "gr_fecs_engctl_r : 0x%x",
gk20a_readl(g, gr_fecs_engctl_r()));
@@ -1144,8 +1145,9 @@ static inline u32 count_bits(u32 mask)
{
u32 temp = mask;
u32 count;
for (count = 0; temp != 0; count++)
for (count = 0; temp != 0; count++) {
temp &= temp - 1;
}
return count;
}
@@ -1485,9 +1487,10 @@ static int gr_gk20a_init_golden_ctx_image(struct gk20a *g,
GR_IDLE_CHECK_DEFAULT);
/* load ctx init */
for (i = 0; i < sw_ctx_load->count; i++)
for (i = 0; i < sw_ctx_load->count; i++) {
gk20a_writel(g, sw_ctx_load->l[i].addr,
sw_ctx_load->l[i].value);
}
if (g->ops.gr.init_preemption_state)
g->ops.gr.init_preemption_state(g);
@@ -2029,8 +2032,9 @@ static int gr_gk20a_copy_ctxsw_ucode_segments(
/* compute a "checksum" for the boot binary to detect its version */
segments->boot_signature = 0;
for (i = 0; i < segments->boot.size / sizeof(u32); i++)
for (i = 0; i < segments->boot.size / sizeof(u32); i++) {
segments->boot_signature += bootimage[i];
}
return 0;
}
@@ -3335,33 +3339,41 @@ static int gr_gk20a_init_gr_config(struct gk20a *g, struct gr_gk20a *gr)
nvgpu_log_info(g, "tpc_count: %d", gr->tpc_count);
nvgpu_log_info(g, "ppc_count: %d", gr->ppc_count);
for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++)
for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) {
nvgpu_log_info(g, "gpc_tpc_count[%d] : %d",
gpc_index, gr->gpc_tpc_count[gpc_index]);
for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++)
}
for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) {
nvgpu_log_info(g, "gpc_zcb_count[%d] : %d",
gpc_index, gr->gpc_zcb_count[gpc_index]);
for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++)
}
for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) {
nvgpu_log_info(g, "gpc_ppc_count[%d] : %d",
gpc_index, gr->gpc_ppc_count[gpc_index]);
for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++)
}
for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) {
nvgpu_log_info(g, "gpc_skip_mask[%d] : %d",
gpc_index, gr->gpc_skip_mask[gpc_index]);
for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++)
}
for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) {
for (pes_index = 0;
pes_index < gr->pe_count_per_gpc;
pes_index++)
pes_index++) {
nvgpu_log_info(g, "pes_tpc_count[%d][%d] : %d",
pes_index, gpc_index,
gr->pes_tpc_count[pes_index][gpc_index]);
}
}
for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++)
for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) {
for (pes_index = 0;
pes_index < gr->pe_count_per_gpc;
pes_index++)
pes_index++) {
nvgpu_log_info(g, "pes_tpc_mask[%d][%d] : %d",
pes_index, gpc_index,
gr->pes_tpc_mask[pes_index][gpc_index]);
}
}
g->ops.gr.bundle_cb_defaults(g);
g->ops.gr.cb_size_default(g);
@@ -3537,9 +3549,11 @@ static int gr_gk20a_init_map_tiles(struct gk20a *g, struct gr_gk20a *gr)
}
}
for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++)
if (gr->gpc_tpc_count[gpc_index] > max_tpc_count)
for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) {
if (gr->gpc_tpc_count[gpc_index] > max_tpc_count) {
max_tpc_count = gr->gpc_tpc_count[gpc_index];
}
}
mul_factor = gr->gpc_count * max_tpc_count;
if (mul_factor & 0x1)
@@ -4534,9 +4548,10 @@ static int gk20a_init_gr_setup_hw(struct gk20a *g)
g->ops.gr.disable_rd_coalesce(g);
/* load ctx init */
for (i = 0; i < sw_ctx_load->count; i++)
for (i = 0; i < sw_ctx_load->count; i++) {
gk20a_writel(g, sw_ctx_load->l[i].addr,
sw_ctx_load->l[i].value);
}
err = gr_gk20a_wait_idle(g, gk20a_get_gr_idle_timeout(g),
GR_IDLE_CHECK_DEFAULT);
@@ -4764,9 +4779,10 @@ static int gk20a_init_gr_reset_enable_hw(struct gk20a *g)
gk20a_writel(g, gr_intr_en_r(), ~0);
/* load non_ctx init */
for (i = 0; i < sw_non_ctx_load->count; i++)
for (i = 0; i < sw_non_ctx_load->count; i++) {
gk20a_writel(g, sw_non_ctx_load->l[i].addr,
sw_non_ctx_load->l[i].value);
}
err = gr_gk20a_wait_mem_scrubbing(g);
if (err)
@@ -6321,9 +6337,10 @@ void gr_gk20a_split_fbpa_broadcast_addr(struct gk20a *g, u32 addr,
{
u32 fbpa_id;
for (fbpa_id = 0; fbpa_id < num_fbpas; fbpa_id++)
for (fbpa_id = 0; fbpa_id < num_fbpas; fbpa_id++) {
priv_addr_table[(*t)++] = pri_fbpa_addr(g,
pri_fbpa_addr_mask(g, addr), fbpa_id);
}
}
int gr_gk20a_split_ppc_broadcast_addr(struct gk20a *g, u32 addr,
@@ -6334,9 +6351,10 @@ int gr_gk20a_split_ppc_broadcast_addr(struct gk20a *g, u32 addr,
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr);
for (ppc_num = 0; ppc_num < g->gr.gpc_ppc_count[gpc_num]; ppc_num++)
for (ppc_num = 0; ppc_num < g->gr.gpc_ppc_count[gpc_num]; ppc_num++) {
priv_addr_table[(*t)++] = pri_ppc_addr(g, pri_ppccs_addr_mask(addr),
gpc_num, ppc_num);
}
return 0;
}
@@ -6396,10 +6414,11 @@ int gr_gk20a_create_priv_addr_table(struct gk20a *g,
if (broadcast_flags & PRI_BROADCAST_FLAGS_TPC)
for (tpc_num = 0;
tpc_num < g->gr.gpc_tpc_count[gpc_num];
tpc_num++)
tpc_num++) {
priv_addr_table[t++] =
pri_tpc_addr(g, pri_tpccs_addr_mask(addr),
gpc_num, tpc_num);
}
else if (broadcast_flags & PRI_BROADCAST_FLAGS_PPC) {
err = gr_gk20a_split_ppc_broadcast_addr(g, addr, gpc_num,
@@ -6439,10 +6458,11 @@ int gr_gk20a_create_priv_addr_table(struct gk20a *g,
if (broadcast_flags & PRI_BROADCAST_FLAGS_TPC)
for (tpc_num = 0;
tpc_num < g->gr.gpc_tpc_count[gpc_num];
tpc_num++)
tpc_num++) {
priv_addr_table[t++] =
pri_tpc_addr(g, pri_tpccs_addr_mask(addr),
gpc_num, tpc_num);
}
else if (broadcast_flags & PRI_BROADCAST_FLAGS_PPC)
err = gr_gk20a_split_ppc_broadcast_addr(g,
addr, gpc_num, priv_addr_table, &t);
@@ -7793,8 +7813,9 @@ static int gr_gk20a_create_hwpm_ctxsw_buffer_offset_map(struct gk20a *g)
nvgpu_log_info(g, "Reg Addr => HWPM Ctxt switch buffer offset");
for (i = 0; i < count; i++)
for (i = 0; i < count; i++) {
nvgpu_log_info(g, "%08x => %08x", map[i].addr, map[i].offset);
}
return 0;
cleanup:

View File

@@ -81,9 +81,10 @@ static void print_pmu_trace(struct nvgpu_pmu *pmu)
nvgpu_err(g, "dump PMU trace buffer");
for (i = 0; i < GK20A_PMU_TRACE_BUFSIZE; i += 0x40) {
for (j = 0; j < 0x40; j++)
for (j = 0; j < 0x40; j++) {
if (trace1[(i / 4) + j])
break;
}
if (j == 0x40)
break;
count = scnprintf(buf, 0x40, "Index %x: ", trace1[(i / 4)]);
@@ -634,13 +635,15 @@ void gk20a_pmu_dump_falcon_stats(struct nvgpu_pmu *pmu)
struct gk20a *g = gk20a_from_pmu(pmu);
unsigned int i;
for (i = 0; i < pwr_pmu_mailbox__size_1_v(); i++)
for (i = 0; i < pwr_pmu_mailbox__size_1_v(); i++) {
nvgpu_err(g, "pwr_pmu_mailbox_r(%d) : 0x%x",
i, gk20a_readl(g, pwr_pmu_mailbox_r(i)));
}
for (i = 0; i < pwr_pmu_debug__size_1_v(); i++)
for (i = 0; i < pwr_pmu_debug__size_1_v(); i++) {
nvgpu_err(g, "pwr_pmu_debug_r(%d) : 0x%x",
i, gk20a_readl(g, pwr_pmu_debug_r(i)));
}
i = gk20a_readl(g, pwr_pmu_bar0_error_status_r());
nvgpu_err(g, "pwr_pmu_bar0_error_status_r : 0x%x", i);

View File

@@ -47,9 +47,10 @@ static int regop_bsearch_range_cmp(const void *pkey, const void *pelem)
static inline bool linear_search(u32 offset, const u32 *list, int size)
{
int i;
for (i = 0; i < size; i++)
for (i = 0; i < size; i++) {
if (list[i] == offset)
return true;
}
return false;
}