gpu: nvgpu: common: Fix MISRA 15.6 violations

This fixes errors due to single statement loop bodies
 without braces, which is part of Rule 15.6 of MISRA.
 This patch covers in gpu/nvgpu/common/

JIRA NVGPU-989

Change-Id: Ic6a98a1cd04e4524dabf650e2f6e73c6b5a1db9d
Signed-off-by: Srirangan <smadhavan@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1786207
Reviewed-by: svc-misra-checker <svc-misra-checker@nvidia.com>
Reviewed-by: Adeel Raza <araza@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Alex Waterman <alexw@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Srirangan
2018-08-01 13:02:14 +05:30
committed by mobile promotions
parent e6c135ecb7
commit 63e6e8ee3e
10 changed files with 44 additions and 25 deletions

View File

@@ -284,10 +284,11 @@ static void nvgpu_flcn_print_mem(struct nvgpu_falcon *flcn, u32 src,
break;
}
for (i = 0; i < (byte_read_count >> 2); i += 4)
for (i = 0; i < (byte_read_count >> 2); i += 4) {
nvgpu_info(flcn->g, "%#06x: %#010x %#010x %#010x %#010x",
src + (i << 2), buff[i], buff[i+1],
buff[i+2], buff[i+3]);
src + (i << 2), buff[i], buff[i+1],
buff[i+2], buff[i+3]);
}
src += byte_read_count;
size -= byte_read_count;

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -214,8 +214,9 @@ static int balloc_init_lists(struct nvgpu_buddy_allocator *a)
bend = a->end;
/* First make sure the LLs are valid. */
for (i = 0; i < GPU_BALLOC_ORDER_LIST_LEN; i++)
for (i = 0; i < GPU_BALLOC_ORDER_LIST_LEN; i++) {
nvgpu_init_list_node(balloc_get_order_list(a, i));
}
while (bstart < bend) {
order = __balloc_max_order_in(a, bstart, bend);
@@ -505,8 +506,9 @@ static u64 __balloc_do_alloc(struct nvgpu_buddy_allocator *a,
split_order = order;
while (split_order <= a->max_order &&
!(bud = __balloc_find_buddy(a, split_order, pte_size)))
!(bud = __balloc_find_buddy(a, split_order, pte_size))) {
split_order++;
}
/* Out of memory! */
if (!bud)
@@ -885,8 +887,9 @@ static u64 __nvgpu_balloc_fixed_buddy(struct nvgpu_allocator *__a,
balloc_alloc_fixed(a, falloc);
nvgpu_list_for_each_entry(bud, &falloc->buddies,
nvgpu_buddy, buddy_entry)
nvgpu_buddy, buddy_entry) {
real_bytes += (bud->end - bud->start);
}
a->bytes_alloced += len;
a->bytes_alloced_real += real_bytes;

View File

@@ -861,8 +861,9 @@ static int __nvgpu_locate_pte(struct gk20a *g, struct vm_gk20a *vm,
pte_size = (u32)(l->entry_size / sizeof(u32));
if (data) {
for (i = 0; i < pte_size; i++)
for (i = 0; i < pte_size; i++) {
data[i] = nvgpu_mem_rd32(g, pd->mem, pte_base + i);
}
}
if (pd_out)

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -194,8 +194,9 @@ int nvgpu_lockless_allocator_init(struct gk20a *g, struct nvgpu_allocator *__a,
/* chain the elements together to form the initial free list */
nr_nodes = (int)count;
for (i = 0; i < nr_nodes; i++)
for (i = 0; i < nr_nodes; i++) {
a->next[i] = i + 1;
}
a->next[nr_nodes - 1] = -1;
a->base = base;

View File

@@ -75,8 +75,9 @@ int nvgpu_vm_pde_coverage_bit_count(struct vm_gk20a *vm)
* heirarchy: the last level is PTEs so we really want the level
* before that which is the last level of PDEs.
*/
while (vm->mmu_levels[final_pde_level + 2].update_entry)
while (vm->mmu_levels[final_pde_level + 2].update_entry) {
final_pde_level++;
}
return vm->mmu_levels[final_pde_level].lo_bit[0];
}
@@ -93,9 +94,10 @@ static void __nvgpu_vm_free_entries(struct vm_gk20a *vm,
}
if (pd->entries) {
for (i = 0; i < pd->num_entries; i++)
for (i = 0; i < pd->num_entries; i++) {
__nvgpu_vm_free_entries(vm, &pd->entries[i],
level + 1);
}
nvgpu_vfree(vm->mm->g, pd->entries);
pd->entries = NULL;
}
@@ -112,8 +114,9 @@ static void nvgpu_vm_free_entries(struct vm_gk20a *vm,
if (!pdb->entries)
return;
for (i = 0; i < pdb->num_entries; i++)
for (i = 0; i < pdb->num_entries; i++) {
__nvgpu_vm_free_entries(vm, &pdb->entries[i], 1);
}
nvgpu_vfree(g, pdb->entries);
pdb->entries = NULL;
@@ -750,8 +753,9 @@ void nvgpu_vm_put_buffers(struct vm_gk20a *vm,
nvgpu_vm_mapping_batch_start(&batch);
vm->kref_put_batch = &batch;
for (i = 0; i < num_buffers; ++i)
for (i = 0; i < num_buffers; ++i) {
nvgpu_ref_put(&mapped_buffers[i]->ref, __nvgpu_vm_unmap_ref);
}
vm->kref_put_batch = NULL;
nvgpu_vm_mapping_batch_finish_locked(vm, &batch);

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -358,8 +358,9 @@ int nvgpu_pmu_process_init_msg(struct nvgpu_pmu *pmu,
}
}
for (i = 0; i < PMU_QUEUE_COUNT; i++)
for (i = 0; i < PMU_QUEUE_COUNT; i++) {
nvgpu_pmu_queue_init(pmu, i, init);
}
if (!nvgpu_alloc_initialized(&pmu->dmem)) {
/* Align start and end addresses */
@@ -480,8 +481,9 @@ static int nvgpu_pg_init_task(void *arg)
}
while (!nvgpu_thread_should_stop(&pg_init->state_task))
while (!nvgpu_thread_should_stop(&pg_init->state_task)) {
nvgpu_usleep_range(5000, 5100);
}
nvgpu_log_fn(g, "thread exit");

View File

@@ -881,8 +881,9 @@ static void get_pmu_init_msg_pmu_queue_params_v4(
queue->index = init->queue_index[tmp_id];
queue->size = init->queue_size[tmp_id];
if (tmp_id != 0) {
for (i = 0 ; i < tmp_id; i++)
for (i = 0 ; i < tmp_id; i++) {
current_ptr += init->queue_size[i];
}
}
queue->offset = init->queue_offset + current_ptr;
}
@@ -908,8 +909,9 @@ static void get_pmu_init_msg_pmu_queue_params_v5(
queue->index = init->queue_index[tmp_id];
queue->size = init->queue_size[tmp_id];
if (tmp_id != 0) {
for (i = 0 ; i < tmp_id; i++)
for (i = 0 ; i < tmp_id; i++) {
current_ptr += init->queue_size[i];
}
}
queue->offset = init->queue_offset + current_ptr;
}
@@ -935,8 +937,9 @@ static void get_pmu_init_msg_pmu_queue_params_v3(
queue->index = init->queue_index[tmp_id];
queue->size = init->queue_size[tmp_id];
if (tmp_id != 0) {
for (i = 0 ; i < tmp_id; i++)
for (i = 0 ; i < tmp_id; i++) {
current_ptr += init->queue_size[i];
}
}
queue->offset = init->queue_offset + current_ptr;
}

View File

@@ -39,8 +39,9 @@ void nvgpu_pmu_seq_init(struct nvgpu_pmu *pmu)
memset(pmu->pmu_seq_tbl, 0,
sizeof(pmu->pmu_seq_tbl));
for (i = 0; i < PMU_MAX_NUM_SEQUENCES; i++)
for (i = 0; i < PMU_MAX_NUM_SEQUENCES; i++) {
pmu->seq[i].id = i;
}
}
static int pmu_seq_acquire(struct nvgpu_pmu *pmu,

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -273,8 +273,9 @@ void nvgpu_rbtree_unlink(struct nvgpu_rbtree_node *node,
} else {
/* find tree successor */
y = z->right;
while (y->left)
while (y->left) {
y = y->left;
}
}
/* x is y's only child */
@@ -420,8 +421,9 @@ void nvgpu_rbtree_enum_next(struct nvgpu_rbtree_node **node,
/* pick the leftmost node of the right subtree ? */
if (curr->right) {
curr = curr->right;
for (; curr->left;)
for (; curr->left;) {
curr = curr->left;
}
} else {
/* go up until we find the right inorder node */
for (curr = curr->parent; curr; curr = curr->parent) {

View File

@@ -77,8 +77,9 @@ static int __nvgpu_semaphore_sea_grow(struct nvgpu_semaphore_sea *sea)
* integer range. This way any buggy comparisons would start to fail
* sooner rather than later.
*/
for (i = 0; i < PAGE_SIZE * SEMAPHORE_POOL_COUNT; i += 4)
for (i = 0; i < PAGE_SIZE * SEMAPHORE_POOL_COUNT; i += 4) {
nvgpu_mem_wr(gk20a, &sea->sea_mem, i, 0xfffffff0);
}
out:
__unlock_sema_sea(sea);