mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-25 02:52:51 +03:00
gpu: nvgpu: fix MISRA rule 14.2 for loops
MISRA Rule 14.2 requires for loop to be well-formed. A well-formed for loop has below requirements: 1. first clause can be empty or should assign value to a single loop counter 2. second clause should exist and use loop counter or loop control flag. It should not use any variable modified in the loop body. 3. third cluase should only update loop counter and should not use objects modified in the loop body. This modifies for loops to process single loop counter. The patch moves additional initializations before for loop, conditions at loop start and variable updates at the end of for loop. Jira NVGPU-855 Change-Id: I93ccf1ac0677ff355364a718d2d953467f1d9d95 Signed-off-by: Vedashree Vidwans <vvidwans@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2108188 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
4b1d58e3f9
commit
e9e9ae9b92
@@ -168,8 +168,8 @@ int nvgpu_clk_arb_update_vf_table(struct nvgpu_clk_arb *arb)
|
||||
* second verifies that the clocks minimum is satisfied and sets
|
||||
* the voltages,the later part is done in nvgpu_clk_set_req_fll_clk_ps35
|
||||
*/
|
||||
for (i = 0, j = 0, num_points = 0, clk_cur = 0;
|
||||
i < table->gpc2clk_num_points; i++) {
|
||||
j = 0; num_points = 0; clk_cur = 0;
|
||||
for (i = 0; i < table->gpc2clk_num_points; i++) {
|
||||
struct nvgpu_set_fll_clk setfllclk;
|
||||
|
||||
if ((arb->gpc2clk_f_points[i] >= arb->gpc2clk_min) &&
|
||||
|
||||
@@ -174,10 +174,11 @@ static int parse_pstate_table_6x(struct gk20a *g,
|
||||
U32(hdr->clock_entry_count) *
|
||||
U32(hdr->clock_entry_size);
|
||||
|
||||
for (i = 0; i < hdr->base_entry_count; i++, p += entry_size) {
|
||||
for (i = 0; i < hdr->base_entry_count; i++) {
|
||||
entry = (struct vbios_pstate_entry_6x *)p;
|
||||
|
||||
if (entry->pstate_level == VBIOS_PERFLEVEL_SKIP_ENTRY) {
|
||||
p += entry_size;
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -195,6 +196,7 @@ static int parse_pstate_table_6x(struct gk20a *g,
|
||||
if (err != 0) {
|
||||
goto done;
|
||||
}
|
||||
p += entry_size;
|
||||
}
|
||||
|
||||
done:
|
||||
|
||||
@@ -567,8 +567,7 @@ static int devinit_get_pwr_policy_table(struct gk20a *g,
|
||||
|
||||
ptr += (u32)hdr.header_size;
|
||||
|
||||
for (index = 0; index < hdr.num_table_entries;
|
||||
index++, ptr += (u32)hdr.table_entry_size) {
|
||||
for (index = 0; index < hdr.num_table_entries; index++) {
|
||||
|
||||
struct pwr_policy_3x_entry_struct *packed_entry;
|
||||
struct pwr_policy_3x_entry_unpacked entry;
|
||||
@@ -581,6 +580,7 @@ static int devinit_get_pwr_policy_table(struct gk20a *g,
|
||||
NV_VBIOS_POWER_POLICY_3X_ENTRY_FLAGS0_CLASS);
|
||||
|
||||
if (class_type != NV_VBIOS_POWER_POLICY_3X_ENTRY_FLAGS0_CLASS_HW_THRESHOLD) {
|
||||
ptr += (u32)hdr.table_entry_size;
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -691,8 +691,9 @@ static int devinit_get_pwr_policy_table(struct gk20a *g,
|
||||
status = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
|
||||
++obj_index;
|
||||
|
||||
ptr += (u32)hdr.table_entry_size;
|
||||
}
|
||||
|
||||
if (g->hardcode_sw_threshold) {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -438,7 +438,7 @@ void nvgpu_rbtree_enum_next(struct nvgpu_rbtree_node **node,
|
||||
/* pick the leftmost node of the right subtree ? */
|
||||
if (curr->right != NULL) {
|
||||
curr = curr->right;
|
||||
for (; curr->left != NULL;) {
|
||||
while (curr->left != NULL) {
|
||||
curr = curr->left;
|
||||
}
|
||||
} else {
|
||||
|
||||
@@ -245,7 +245,8 @@ int gv100_gr_config_init_sm_id_table(struct gk20a *g,
|
||||
gpc_tpc_mask[gpc_table[gtpc]] &= ~(BIT64(tpc_table[gtpc]));
|
||||
}
|
||||
|
||||
for (tpc = 0, sm_id = 0; sm_id < num_sm; tpc++, sm_id += sm_per_tpc) {
|
||||
tpc = 0;
|
||||
for (sm_id = 0; sm_id < num_sm; sm_id += sm_per_tpc) {
|
||||
for (sm = 0; sm < sm_per_tpc; sm++) {
|
||||
u32 index = sm_id + sm;
|
||||
struct nvgpu_sm_info *sm_info =
|
||||
@@ -265,6 +266,7 @@ int gv100_gr_config_init_sm_id_table(struct gk20a *g,
|
||||
nvgpu_gr_config_get_sm_info_global_tpc_index(sm_info));
|
||||
|
||||
}
|
||||
tpc++;
|
||||
}
|
||||
|
||||
nvgpu_gr_config_set_no_of_sm(gr_config, num_sm);
|
||||
|
||||
@@ -1412,7 +1412,10 @@ static int gr_exec_ctx_ops(struct nvgpu_channel *ch,
|
||||
if (ch_is_curr_ctx) {
|
||||
for (pass = 0; pass < 2; pass++) {
|
||||
ctx_op_nr = 0;
|
||||
for (i = 0; (ctx_op_nr < num_ctx_ops[pass]) && (i < num_ops); ++i) {
|
||||
for (i = 0; i < num_ops; ++i) {
|
||||
if (ctx_op_nr >= num_ctx_ops[pass]) {
|
||||
break;
|
||||
}
|
||||
/* only do ctx ops and only on the right pass */
|
||||
if ((ctx_ops[i].type == REGOP(TYPE_GLOBAL)) ||
|
||||
(((pass == 0) && reg_op_is_read(ctx_ops[i].op)) ||
|
||||
@@ -1500,9 +1503,13 @@ static int gr_exec_ctx_ops(struct nvgpu_channel *ch,
|
||||
/* first pass is writes, second reads */
|
||||
for (pass = 0; pass < 2; pass++) {
|
||||
ctx_op_nr = 0;
|
||||
for (i = 0; (ctx_op_nr < num_ctx_ops[pass]) && (i < num_ops); ++i) {
|
||||
for (i = 0; i < num_ops; ++i) {
|
||||
u32 num_offsets;
|
||||
|
||||
if (ctx_op_nr >= num_ctx_ops[pass]) {
|
||||
break;
|
||||
}
|
||||
|
||||
/* only do ctx ops and only on the right pass */
|
||||
if ((ctx_ops[i].type == REGOP(TYPE_GLOBAL)) ||
|
||||
(((pass == 0) && reg_op_is_read(ctx_ops[i].op)) ||
|
||||
|
||||
Reference in New Issue
Block a user