gpu: nvgpu: common: MISRA Rule 10.1 fixes

MISRA rule 10.1 mandates that the correct data types are used as
operands of operators. For example, only unsigned integers can be used
as operands of bitwise operators.

This patch fixes rule 10.1 vioaltions for drivers/gpu/nvgpu/common.

JIRA NVGPU-777
JIRA NVGPU-1006

Change-Id: I53fe750f1b41816a183c595e5beb7bd263c27725
Signed-off-by: Sai Nikhil <snikhil@nvidia.com>
Signed-off-by: Adeel Raza <araza@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1971221
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Sai Nikhil
2018-12-12 15:15:37 +05:30
committed by mobile promotions
parent 67aec1f12a
commit e824ea0963
7 changed files with 22 additions and 22 deletions

View File

@@ -466,14 +466,14 @@ int gk20a_tsg_open_common(struct gk20a *g, struct tsg_gk20a *tsg)
}
tsg->g = g;
tsg->num_active_channels = 0;
tsg->num_active_channels = 0U;
nvgpu_ref_init(&tsg->refcount);
tsg->vm = NULL;
tsg->interleave_level = NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_LOW;
tsg->timeslice_us = 0;
tsg->timeslice_timeout = 0;
tsg->timeslice_scale = 0;
tsg->timeslice_us = 0U;
tsg->timeslice_timeout = 0U;
tsg->timeslice_scale = 0U;
tsg->runlist_id = FIFO_INVAL_TSG_ID;
tsg->sm_exception_mask_type = NVGPU_SM_EXCEPTION_TYPE_MASK_NONE;
tsg->gr_ctx = nvgpu_kzalloc(g, sizeof(*tsg->gr_ctx));

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -102,9 +102,9 @@ int ltc_tu104_init_comptags(struct gk20a *g, struct gr_gk20a *gr)
ltc_ltcs_ltss_cbc_param_bytes_per_comptagline_per_slice_v(cbc_param);
ctags_per_cacheline = gr->cacheline_size / ctags_size;
amap_divide_rounding = (2 * 1024) <<
amap_divide_rounding = (U32(2U) * U32(1024U)) <<
ltc_ltcs_ltss_cbc_param_amap_divide_rounding_v(cbc_param);
amap_swizzle_rounding = (64 * 1024) <<
amap_swizzle_rounding = (U32(64U) * U32(1024U)) <<
ltc_ltcs_ltss_cbc_param_amap_swizzle_rounding_v(cbc_param);
compbit_backing_size =

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2011-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -113,7 +113,7 @@ static int nvgpu_netlist_init_ctx_vars_fw(struct gk20a *g)
struct nvgpu_firmware *netlist_fw;
struct netlist_image *netlist = NULL;
char name[MAX_NETLIST_NAME];
u32 i, major_v = ~0, major_v_hw, netlist_num;
u32 i, major_v = ~U32(0U), major_v_hw, netlist_num;
int net, max, err = -ENOENT;
nvgpu_log_fn(g, " ");
@@ -121,7 +121,7 @@ static int nvgpu_netlist_init_ctx_vars_fw(struct gk20a *g)
if (g->ops.netlist.is_fw_defined()) {
net = NETLIST_FINAL;
max = 0;
major_v_hw = ~0;
major_v_hw = ~U32(0U);
netlist_vars->dynamic = false;
} else {
net = NETLIST_SLOT_A;

View File

@@ -246,7 +246,7 @@ int gm20b_load_falcon_ucode(struct gk20a *g, u32 falconidmask)
}
/* load FECS */
gk20a_writel(g,
gr_fecs_ctxsw_mailbox_clear_r(0), ~0x0);
gr_fecs_ctxsw_mailbox_clear_r(0), ~U32(0x0U));
gm20b_pmu_load_lsf(g, FALCON_ID_FECS, flags);
err = pmu_gm20b_ctx_wait_lsf_ready(g, timeout,
0x55AA55AA);

View File

@@ -248,7 +248,7 @@ static void sec2_load_ls_falcons(struct gk20a *g, struct nvgpu_sec2 *sec2,
command_ack = false;
err = nvgpu_sec2_cmd_post(g, &cmd, NULL, PMU_COMMAND_QUEUE_HPQ,
sec2_handle_lsfm_boot_acr_msg, &command_ack, &seq, ~0);
sec2_handle_lsfm_boot_acr_msg, &command_ack, &seq, ~0UL);
if (err != 0) {
nvgpu_err(g, "command post failed");
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -184,9 +184,9 @@ struct mm_gk20a {
#define gk20a_from_mm(mm) ((mm)->g)
#define gk20a_from_vm(vm) ((vm)->mm->g)
static inline int bar1_aperture_size_mb_gk20a(void)
static inline u32 bar1_aperture_size_mb_gk20a(void)
{
return 16; /* 16MB is more than enough atm. */
return 16U; /* 16MB is more than enough atm. */
}
/* The maximum GPU VA range supported */

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -24,13 +24,13 @@
/*perfmon task defines*/
#define PMU_DOMAIN_GROUP_PSTATE 0
#define PMU_DOMAIN_GROUP_GPC2CLK 1
#define PMU_DOMAIN_GROUP_NUM 2
#define PMU_DOMAIN_GROUP_PSTATE 0U
#define PMU_DOMAIN_GROUP_GPC2CLK 1U
#define PMU_DOMAIN_GROUP_NUM 2U
#define PMU_PERFMON_FLAG_ENABLE_INCREASE (0x00000001)
#define PMU_PERFMON_FLAG_ENABLE_DECREASE (0x00000002)
#define PMU_PERFMON_FLAG_CLEAR_PREV (0x00000004)
#define PMU_PERFMON_FLAG_ENABLE_INCREASE BIT8(0)
#define PMU_PERFMON_FLAG_ENABLE_DECREASE BIT8(1)
#define PMU_PERFMON_FLAG_CLEAR_PREV BIT8(2)
#define NV_PMU_PERFMON_MAX_COUNTERS 10U