gpu: nvgpu: MISRA Rule 10.1 fixes

MISRA rule 10.1 mandates that the correct data types are used as
operands of operators. For example, only unsigned integers can be used
as operands of bitwise operators.

This patch fixes a few miscellaneous rule 10.1 violations.

JIRA NVGPU-777
JIRA NVGPU-1006

Change-Id: Iec24a6736e60873382901210e60b1f68d07c3e77
Signed-off-by: Sai Nikhil <snikhil@nvidia.com>
Signed-off-by: Adeel Raza <araza@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1971222
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Sai Nikhil
2018-12-12 15:20:34 +05:30
committed by mobile promotions
parent e824ea0963
commit eddf9b3505
5 changed files with 14 additions and 14 deletions

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -79,7 +79,7 @@ static int _pwr_domains_pmudatainit_ina3221(struct gk20a *g,
ina3221_desc->configuration = ina3221->configuration; ina3221_desc->configuration = ina3221->configuration;
ina3221_desc->mask_enable = ina3221->mask_enable; ina3221_desc->mask_enable = ina3221->mask_enable;
/* configure NV_PMU_THERM_EVENT_EXT_OVERT */ /* configure NV_PMU_THERM_EVENT_EXT_OVERT */
ina3221_desc->event_mask = (1 << 0); ina3221_desc->event_mask = BIT32(0);
ina3221_desc->curr_correct_m = ina3221->curr_correct_m; ina3221_desc->curr_correct_m = ina3221->curr_correct_m;
ina3221_desc->curr_correct_b = ina3221->curr_correct_b; ina3221_desc->curr_correct_b = ina3221->curr_correct_b;

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -267,7 +267,7 @@ static int devinit_get_pwr_topology_table(struct gk20a *g,
pwr_topology_data.boardobj.type = CTRL_PMGR_PWR_CHANNEL_TYPE_SENSOR; pwr_topology_data.boardobj.type = CTRL_PMGR_PWR_CHANNEL_TYPE_SENSOR;
pwr_topology_data.pwrchannel.pwr_rail = (u8)pwr_topology_table_entry.pwr_rail; pwr_topology_data.pwrchannel.pwr_rail = (u8)pwr_topology_table_entry.pwr_rail;
pwr_topology_data.pwrchannel.volt_fixed_uv = pwr_topology_table_entry.param0; pwr_topology_data.pwrchannel.volt_fixed_uv = pwr_topology_table_entry.param0;
pwr_topology_data.pwrchannel.pwr_corr_slope = (1 << 12); pwr_topology_data.pwrchannel.pwr_corr_slope = BIT32(12);
pwr_topology_data.pwrchannel.pwr_corr_offset_mw = 0; pwr_topology_data.pwrchannel.pwr_corr_offset_mw = 0;
pwr_topology_data.pwrchannel.curr_corr_slope = pwr_topology_data.pwrchannel.curr_corr_slope =
(u32)pwr_topology_table_entry.curr_corr_slope; (u32)pwr_topology_table_entry.curr_corr_slope;

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -1113,7 +1113,7 @@ static int devinit_get_vfe_var_table(struct gk20a *g,
var.param3; var.param3;
} else { } else {
var_data.single_sensed_fuse.vfield_info.hw_correction_scale = var_data.single_sensed_fuse.vfield_info.hw_correction_scale =
1 << 12; BIT32(12);
var_data.single_sensed_fuse.vfield_info.hw_correction_offset = var_data.single_sensed_fuse.vfield_info.hw_correction_offset =
0; 0;
if ((var_data.single_sensed_fuse.vfield_info.v_field_id == if ((var_data.single_sensed_fuse.vfield_info.v_field_id ==
@@ -1121,7 +1121,7 @@ static int devinit_get_vfe_var_table(struct gk20a *g,
(var_data.single_sensed_fuse.vfield_info.v_field_id == (var_data.single_sensed_fuse.vfield_info.v_field_id ==
VFIELD_ID_STRAP_IDDQ_1)) { VFIELD_ID_STRAP_IDDQ_1)) {
var_data.single_sensed_fuse.vfield_info.hw_correction_scale = var_data.single_sensed_fuse.vfield_info.hw_correction_scale =
50 << 12; 50U << 12U;
} }
} }
break; break;

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -210,7 +210,7 @@ static int therm_send_slct_configuration_to_pmu(struct gk20a *g)
rpccall.function = NV_PMU_THERM_RPC_ID_SLCT; rpccall.function = NV_PMU_THERM_RPC_ID_SLCT;
rpccall.params.slct.mask_enabled = rpccall.params.slct.mask_enabled =
(1 << NV_PMU_THERM_EVENT_THERMAL_1); BIT32(NV_PMU_THERM_EVENT_THERMAL_1);
rpccall.b_supported = 0; rpccall.b_supported = 0;
cmd.hdr.unit_id = PMU_UNIT_THERM; cmd.hdr.unit_id = PMU_UNIT_THERM;

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -347,12 +347,12 @@ void gr_tu104_enable_gpc_exceptions(struct gk20a *g)
gr_gpcs_tpcs_tpccs_tpc_exception_en_sm_enabled_f()); gr_gpcs_tpcs_tpccs_tpc_exception_en_sm_enabled_f());
tpc_mask = tpc_mask =
gr_gpcs_gpccs_gpc_exception_en_tpc_f((1 << gr->max_tpc_per_gpc_count) - 1); gr_gpcs_gpccs_gpc_exception_en_tpc_f(BIT32(gr->max_tpc_per_gpc_count) - 1U);
gk20a_writel(g, gr_gpcs_gpccs_gpc_exception_en_r(), gk20a_writel(g, gr_gpcs_gpccs_gpc_exception_en_r(),
(tpc_mask | gr_gpcs_gpccs_gpc_exception_en_gcc_f(1) | (tpc_mask | gr_gpcs_gpccs_gpc_exception_en_gcc_f(1U) |
gr_gpcs_gpccs_gpc_exception_en_gpccs_f(1) | gr_gpcs_gpccs_gpc_exception_en_gpccs_f(1U) |
gr_gpcs_gpccs_gpc_exception_en_gpcmmu_f(1))); gr_gpcs_gpccs_gpc_exception_en_gpcmmu_f(1U)));
} }
int gr_tu104_get_offset_in_gpccs_segment(struct gk20a *g, int gr_tu104_get_offset_in_gpccs_segment(struct gk20a *g,