gpu: nvgpu: MISRA Rule 7.2 misc fixes

MISRA Rule 7.2 Definition: A "u" or "U" suffix shall be applied to all
integer constants that are represented in an unsigned type.

This patch adds a "U" suffix to integer literals which are being
assigned to unsigned integer variables. In most cases the integer
literal is a hexadecimal value.

JIRA NVGPU-844

Change-Id: I8a68c4120681605261b11e5de00f7fc0773454e8
Signed-off-by: Sai Nikhil <snikhil@nvidia.com>
Signed-off-by: Adeel Raza <araza@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1959189
Reviewed-by: Scott Long <scottl@nvidia.com>
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Sai Nikhil
2018-11-27 14:49:03 +05:30
committed by mobile promotions
parent b5d787083c
commit 7ffbbdae6e
23 changed files with 100 additions and 100 deletions

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -121,7 +121,7 @@ int boardobjgrpmask_set(struct boardobjgrpmask *mask)
return -EINVAL;
}
for (index = 0; index < mask->maskdatacount; index++) {
mask->data[index] = 0xFFFFFFFF;
mask->data[index] = 0xFFFFFFFFU;
}
BOARDOBJGRPMASK_NORMALIZE(mask);
return 0;

View File

@@ -89,7 +89,7 @@ static void gk20a_falcon_set_irq(struct nvgpu_falcon *flcn, bool enable)
flcn->intr_dest);
} else {
gk20a_writel(g, base_addr + falcon_falcon_irqmclr_r(),
0xffffffff);
0xffffffffU);
}
}

View File

@@ -211,7 +211,7 @@ int gv100_fb_memory_unlock(struct gk20a *g)
* mem_unlock bin to denote its return status.
*/
nvgpu_falcon_mailbox_write(g->nvdec_flcn,
FALCON_MAILBOX_0, 0xdeadbeef);
FALCON_MAILBOX_0, 0xdeadbeefU);
/* set BOOTVEC to start of non-secure code */
nvgpu_falcon_bootstrap(g->nvdec_flcn, 0);

View File

@@ -1,7 +1,7 @@
/*
* GV11B FB
*
* Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -134,7 +134,7 @@ void gv11b_fb_init_cbc(struct gk20a *g, struct gr_gk20a *gr)
nvgpu_log(g, gpu_dbg_info | gpu_dbg_map_v | gpu_dbg_pte,
"compbit base.pa: 0x%x,%08x cbc_base:0x%08x\n",
(u32)(compbit_store_iova >> 32),
(u32)(compbit_store_iova & 0xffffffff),
(u32)(compbit_store_iova & 0xffffffffU),
compbit_base_post_divide);
nvgpu_log(g, gpu_dbg_fn, "cbc base %x",
gk20a_readl(g, fb_mmu_cbc_base_r()));

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -461,7 +461,7 @@ void fb_tu104_init_cbc(struct gk20a *g, struct gr_gk20a *gr)
nvgpu_log(g, gpu_dbg_info | gpu_dbg_map_v | gpu_dbg_pte,
"compbit base.pa: 0x%x,%08x cbc_base:0x%llx\n",
(u32)(compbit_store_pa >> 32),
(u32)(compbit_store_pa & 0xffffffff),
(u32)(compbit_store_pa & 0xffffffffU),
compbit_store_base);
gr->compbit_store.base_hw = compbit_store_base;

View File

@@ -1,7 +1,7 @@
/*
* GP106 FUSE
*
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -49,7 +49,7 @@ int gp106_fuse_read_vin_cal_slope_intercept_fuse(struct gk20a *g,
/* read gpc0 irrespective of vin id */
gpc0data = gk20a_readl(g, fuse_vin_cal_gpc0_r());
if (gpc0data == 0xFFFFFFFF) {
if (gpc0data == 0xFFFFFFFFU) {
return -EINVAL;
}
@@ -86,7 +86,7 @@ int gp106_fuse_read_vin_cal_slope_intercept_fuse(struct gk20a *g,
default:
return -EINVAL;
}
if (data == 0xFFFFFFFF) {
if (data == 0xFFFFFFFFU) {
return -EINVAL;
}
@@ -200,7 +200,7 @@ int gp106_fuse_read_vin_cal_gain_offset_fuse(struct gk20a *g,
default:
return -EINVAL;
}
if (reg_val == 0xFFFFFFFF) {
if (reg_val == 0xFFFFFFFFU) {
return -EINVAL;
}
data = (reg_val >> 16U) & 0x1fU;

View File

@@ -1,7 +1,7 @@
/*
* GM20B L2
*
* Copyright (c) 2014-2018 NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2014-2019 NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -478,7 +478,7 @@ void gm20b_ltc_init_cbc(struct gk20a *g, struct gr_gk20a *gr)
nvgpu_log(g, gpu_dbg_info | gpu_dbg_map_v | gpu_dbg_pte,
"compbit base.pa: 0x%x,%08x cbc_base:0x%08x\n",
(u32)(compbit_store_iova >> 32),
(u32)(compbit_store_iova & 0xffffffff),
(u32)(compbit_store_iova & 0xffffffffU),
compbit_base_post_divide);
gr->compbit_store.base_hw = compbit_base_post_divide;

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -137,7 +137,7 @@ static void intr_tu104_stall_enable(struct gk20a *g)
u32 eng_intr_mask = gk20a_fifo_engine_interrupt_mask(g);
nvgpu_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_STALLING),
0xffffffff);
0xffffffffU);
g->mc_intr_mask_restore[NVGPU_MC_INTR_STALLING] =
mc_intr_pfifo_pending_f() |
@@ -161,7 +161,7 @@ static void intr_tu104_nonstall_enable(struct gk20a *g)
/* Keep NV_PMC_INTR(1) disabled */
nvgpu_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_NONSTALLING),
0xffffffff);
0xffffffffU);
/*
* Enable nonstall interrupts in TOP
@@ -205,15 +205,15 @@ void intr_tu104_mask(struct gk20a *g)
u32 size, reg, i;
nvgpu_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_STALLING),
0xffffffff);
0xffffffffU);
nvgpu_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_NONSTALLING),
0xffffffff);
0xffffffffU);
size = func_priv_cpu_intr_top_en_clear__size_1_v();
for (i = 0; i < size; i++) {
reg = func_priv_cpu_intr_top_en_clear_r(i);
nvgpu_func_writel(g, reg, 0xffffffff);
nvgpu_func_writel(g, reg, 0xffffffffU);
}
}

View File

@@ -1050,8 +1050,8 @@ static void gv100_nvlink_dlpl_intr_enable(struct gk20a *g, u32 link_id,
}
/* Clear interrupt register to get rid of stale state (W1C) */
DLPL_REG_WR32(g, link_id, nvl_intr_r(), 0xffffffff);
DLPL_REG_WR32(g, link_id, nvl_intr_sw2_r(), 0xffffffff);
DLPL_REG_WR32(g, link_id, nvl_intr_r(), 0xffffffffU);
DLPL_REG_WR32(g, link_id, nvl_intr_sw2_r(), 0xffffffffU);
reg = nvl_intr_stall_en_tx_recovery_long_enable_f() |
nvl_intr_stall_en_tx_fault_ram_enable_f() |
@@ -1128,7 +1128,7 @@ static void gv100_nvlink_dlpl_isr(struct gk20a *g, u32 link_id)
/* Clear interrupts */
DLPL_REG_WR32(g, link_id, nvl_intr_r(), (non_fatal_mask | fatal_mask));
DLPL_REG_WR32(g, link_id, nvl_intr_sw2_r(), 0xffffffff);
DLPL_REG_WR32(g, link_id, nvl_intr_sw2_r(), 0xffffffffU);
}
/*

View File

@@ -147,8 +147,8 @@ int gm20b_pmu_init_acr(struct gk20a *g)
cmd.hdr.size = PMU_CMD_HDR_SIZE +
sizeof(struct pmu_acr_cmd_init_wpr_details);
cmd.cmd.acr.init_wpr.cmd_type = PMU_ACR_CMD_ID_INIT_WPR_REGION;
cmd.cmd.acr.init_wpr.regionid = 0x01;
cmd.cmd.acr.init_wpr.wproffset = 0x00;
cmd.cmd.acr.init_wpr.regionid = 0x01U;
cmd.cmd.acr.init_wpr.wproffset = 0x00U;
nvgpu_pmu_dbg(g, "cmd post PMU_ACR_CMD_ID_INIT_WPR_REGION");
nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
pmu_handle_acr_init_wpr_msg, pmu, &seq);
@@ -249,7 +249,7 @@ int gm20b_load_falcon_ucode(struct gk20a *g, u32 falconidmask)
gr_fecs_ctxsw_mailbox_clear_r(0), ~U32(0x0U));
gm20b_pmu_load_lsf(g, FALCON_ID_FECS, flags);
err = pmu_gm20b_ctx_wait_lsf_ready(g, timeout,
0x55AA55AA);
0x55AA55AAU);
return err;
}

View File

@@ -1,7 +1,7 @@
/*
* GP10B PMU
*
* Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2015-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -161,8 +161,8 @@ static void gp10b_pmu_load_multiple_falcons(struct gk20a *g, u32 falconidmask,
cmd.cmd.acr.boot_falcons.falconidmask =
falconidmask;
cmd.cmd.acr.boot_falcons.usevamask = 0;
cmd.cmd.acr.boot_falcons.wprvirtualbase.lo = 0x0;
cmd.cmd.acr.boot_falcons.wprvirtualbase.hi = 0x0;
cmd.cmd.acr.boot_falcons.wprvirtualbase.lo = 0x0U;
cmd.cmd.acr.boot_falcons.wprvirtualbase.hi = 0x0U;
gp10b_dbg_pmu(g, "PMU_ACR_CMD_ID_BOOTSTRAP_MULTIPLE_FALCONS:%x\n",
falconidmask);
nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
@@ -301,7 +301,7 @@ void gp10b_write_dmatrfbase(struct gk20a *g, u32 addr)
gk20a_writel(g, pwr_falcon_dmatrfbase_r(),
addr);
gk20a_writel(g, pwr_falcon_dmatrfbase1_r(),
0x0);
0x0U);
}
bool gp10b_is_lazy_bootstrap(u32 falcon_id)

View File

@@ -1,7 +1,7 @@
/*
* GV11B PMU
*
* Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -260,26 +260,26 @@ int gv11b_pmu_bootstrap(struct nvgpu_pmu *pmu)
addr_load_lo = u64_lo32((pmu->ucode.gpu_va +
desc->bootloader_start_offset) >> 8);
gk20a_writel(g, pwr_falcon_dmemd_r(0), 0x0);
gk20a_writel(g, pwr_falcon_dmemd_r(0), 0x0);
gk20a_writel(g, pwr_falcon_dmemd_r(0), 0x0);
gk20a_writel(g, pwr_falcon_dmemd_r(0), 0x0);
gk20a_writel(g, pwr_falcon_dmemd_r(0), 0x0);
gk20a_writel(g, pwr_falcon_dmemd_r(0), 0x0);
gk20a_writel(g, pwr_falcon_dmemd_r(0), 0x0);
gk20a_writel(g, pwr_falcon_dmemd_r(0), 0x0);
gk20a_writel(g, pwr_falcon_dmemd_r(0), 0x0U);
gk20a_writel(g, pwr_falcon_dmemd_r(0), 0x0U);
gk20a_writel(g, pwr_falcon_dmemd_r(0), 0x0U);
gk20a_writel(g, pwr_falcon_dmemd_r(0), 0x0U);
gk20a_writel(g, pwr_falcon_dmemd_r(0), 0x0U);
gk20a_writel(g, pwr_falcon_dmemd_r(0), 0x0U);
gk20a_writel(g, pwr_falcon_dmemd_r(0), 0x0U);
gk20a_writel(g, pwr_falcon_dmemd_r(0), 0x0U);
gk20a_writel(g, pwr_falcon_dmemd_r(0), GK20A_PMU_DMAIDX_UCODE);
gk20a_writel(g, pwr_falcon_dmemd_r(0), addr_code_lo << 8);
gk20a_writel(g, pwr_falcon_dmemd_r(0), addr_code_hi);
gk20a_writel(g, pwr_falcon_dmemd_r(0), desc->app_resident_code_offset);
gk20a_writel(g, pwr_falcon_dmemd_r(0), desc->app_resident_code_size);
gk20a_writel(g, pwr_falcon_dmemd_r(0), 0x0);
gk20a_writel(g, pwr_falcon_dmemd_r(0), 0x0);
gk20a_writel(g, pwr_falcon_dmemd_r(0), 0x0U);
gk20a_writel(g, pwr_falcon_dmemd_r(0), 0x0U);
gk20a_writel(g, pwr_falcon_dmemd_r(0), desc->app_imem_entry);
gk20a_writel(g, pwr_falcon_dmemd_r(0), addr_data_lo << 8);
gk20a_writel(g, pwr_falcon_dmemd_r(0), addr_data_hi);
gk20a_writel(g, pwr_falcon_dmemd_r(0), desc->app_resident_data_size);
gk20a_writel(g, pwr_falcon_dmemd_r(0), 0x1);
gk20a_writel(g, pwr_falcon_dmemd_r(0), 0x1U);
gk20a_writel(g, pwr_falcon_dmemd_r(0), addr_args);
g->ops.pmu.write_dmatrfbase(g,

View File

@@ -1,7 +1,7 @@
/*
* Nvgpu Semaphores
*
* Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2014-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -80,7 +80,7 @@ static int __nvgpu_semaphore_sea_grow(struct nvgpu_semaphore_sea *sea)
* sooner rather than later.
*/
for (i = 0U; i < PAGE_SIZE * SEMAPHORE_POOL_COUNT; i += 4U) {
nvgpu_mem_wr(gk20a, &sea->sea_mem, i, 0xfffffff0);
nvgpu_mem_wr(gk20a, &sea->sea_mem, i, 0xfffffff0U);
}
out:
@@ -552,7 +552,7 @@ static bool __nvgpu_semaphore_value_released(u32 goal, u32 racer)
* effectively the same as: signed_racer - signed_goal > 0.
*/
return racer - goal < 0x80000000;
return racer - goal < 0x80000000U;
}
u32 nvgpu_semaphore_get_value(struct nvgpu_semaphore *s)

View File

@@ -526,8 +526,8 @@ int gk20a_init_fifo_reset_enable_hw(struct gk20a *g)
/* clear and enable pbdma interrupt */
for (i = 0; i < host_num_pbdma; i++) {
gk20a_writel(g, pbdma_intr_0_r(i), 0xFFFFFFFF);
gk20a_writel(g, pbdma_intr_1_r(i), 0xFFFFFFFF);
gk20a_writel(g, pbdma_intr_0_r(i), 0xFFFFFFFFU);
gk20a_writel(g, pbdma_intr_1_r(i), 0xFFFFFFFFU);
intr_stall = gk20a_readl(g, pbdma_intr_stall_r(i));
intr_stall &= ~pbdma_intr_stall_lbreq_enabled_f();
@@ -548,12 +548,12 @@ int gk20a_init_fifo_reset_enable_hw(struct gk20a *g)
gk20a_writel(g, fifo_intr_runlist_r(), U32_MAX);
/* clear and enable pfifo interrupt */
gk20a_writel(g, fifo_intr_0_r(), 0xFFFFFFFF);
gk20a_writel(g, fifo_intr_0_r(), 0xFFFFFFFFU);
mask = gk20a_fifo_intr_0_en_mask(g);
nvgpu_log_info(g, "fifo_intr_en_0 0x%08x", mask);
gk20a_writel(g, fifo_intr_en_0_r(), mask);
nvgpu_log_info(g, "fifo_intr_en_1 = 0x80000000");
gk20a_writel(g, fifo_intr_en_1_r(), 0x80000000);
gk20a_writel(g, fifo_intr_en_1_r(), 0x80000000U);
nvgpu_log_fn(g, "done");
@@ -3653,7 +3653,7 @@ void gk20a_fifo_add_sema_cmd(struct gk20a *g,
/* semaphore_b */
nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010005U);
/* offset */
nvgpu_mem_wr32(g, cmd->mem, off++, (u32)sema_va & 0xffffffff);
nvgpu_mem_wr32(g, cmd->mem, off++, (u32)sema_va & 0xffffffffU);
if (acquire) {
/* semaphore_c */

View File

@@ -2164,9 +2164,9 @@ int gr_gk20a_load_ctxsw_ucode(struct gk20a *g)
if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) {
gk20a_writel(g, gr_fecs_ctxsw_mailbox_r(7),
gr_fecs_ctxsw_mailbox_value_f(0xc0de7777));
gr_fecs_ctxsw_mailbox_value_f(0xc0de7777U));
gk20a_writel(g, gr_gpccs_ctxsw_mailbox_r(7),
gr_gpccs_ctxsw_mailbox_value_f(0xc0de7777));
gr_gpccs_ctxsw_mailbox_value_f(0xc0de7777U));
}
/*
@@ -2213,7 +2213,7 @@ static int gr_gk20a_wait_ctxsw_ready(struct gk20a *g)
gr_fecs_current_ctx_valid_false_f());
}
gk20a_writel(g, gr_fecs_ctxsw_mailbox_clear_r(0), 0xffffffff);
gk20a_writel(g, gr_fecs_ctxsw_mailbox_clear_r(0), 0xffffffffU);
gk20a_writel(g, gr_fecs_method_data_r(), 0x7fffffff);
gk20a_writel(g, gr_fecs_method_push_r(),
gr_fecs_method_push_adr_set_watchdog_timeout_f());
@@ -3478,11 +3478,11 @@ int gr_gk20a_load_zbc_default_table(struct gk20a *g, struct gr_gk20a *gr)
/* Opaque black (i.e. solid black, fmt 0x28 = A8B8G8R8) */
zbc_val.format = gr_ds_zbc_color_fmt_val_a8_b8_g8_r8_v();
for (i = 0; i < GK20A_ZBC_COLOR_VALUE_SIZE; i++) {
zbc_val.color_ds[i] = 0;
zbc_val.color_l2[i] = 0;
zbc_val.color_ds[i] = 0U;
zbc_val.color_l2[i] = 0U;
}
zbc_val.color_l2[0] = 0xff000000;
zbc_val.color_ds[3] = 0x3f800000;
zbc_val.color_l2[0] = 0xff000000U;
zbc_val.color_ds[3] = 0x3f800000U;
err = gr_gk20a_add_zbc(g, gr, &zbc_val);
if (err != 0) {
goto color_fail;
@@ -3491,8 +3491,8 @@ int gr_gk20a_load_zbc_default_table(struct gk20a *g, struct gr_gk20a *gr)
/* Transparent black = (fmt 1 = zero) */
zbc_val.format = gr_ds_zbc_color_fmt_val_zero_v();
for (i = 0; i < GK20A_ZBC_COLOR_VALUE_SIZE; i++) {
zbc_val.color_ds[i] = 0;
zbc_val.color_l2[i] = 0;
zbc_val.color_ds[i] = 0U;
zbc_val.color_l2[i] = 0U;
}
err = gr_gk20a_add_zbc(g, gr, &zbc_val);
if (err != 0) {
@@ -3502,8 +3502,8 @@ int gr_gk20a_load_zbc_default_table(struct gk20a *g, struct gr_gk20a *gr)
/* Opaque white (i.e. solid white) = (fmt 2 = uniform 1) */
zbc_val.format = gr_ds_zbc_color_fmt_val_unorm_one_v();
for (i = 0; i < GK20A_ZBC_COLOR_VALUE_SIZE; i++) {
zbc_val.color_ds[i] = 0x3f800000;
zbc_val.color_l2[i] = 0xffffffff;
zbc_val.color_ds[i] = 0x3f800000U;
zbc_val.color_l2[i] = 0xffffffffU;
}
err = gr_gk20a_add_zbc(g, gr, &zbc_val);
if (err != 0) {
@@ -3829,12 +3829,12 @@ static int gr_gk20a_zcull_init_hw(struct gk20a *g, struct gr_gk20a *gr)
void gk20a_gr_enable_exceptions(struct gk20a *g)
{
gk20a_writel(g, gr_exception_r(), 0xFFFFFFFF);
gk20a_writel(g, gr_exception_en_r(), 0xFFFFFFFF);
gk20a_writel(g, gr_exception1_r(), 0xFFFFFFFF);
gk20a_writel(g, gr_exception1_en_r(), 0xFFFFFFFF);
gk20a_writel(g, gr_exception2_r(), 0xFFFFFFFF);
gk20a_writel(g, gr_exception2_en_r(), 0xFFFFFFFF);
gk20a_writel(g, gr_exception_r(), 0xFFFFFFFFU);
gk20a_writel(g, gr_exception_en_r(), 0xFFFFFFFFU);
gk20a_writel(g, gr_exception1_r(), 0xFFFFFFFFU);
gk20a_writel(g, gr_exception1_en_r(), 0xFFFFFFFFU);
gk20a_writel(g, gr_exception2_r(), 0xFFFFFFFFU);
gk20a_writel(g, gr_exception2_en_r(), 0xFFFFFFFFU);
}
void gk20a_gr_enable_gpc_exceptions(struct gk20a *g)
@@ -3909,8 +3909,8 @@ static int gk20a_init_gr_setup_hw(struct gk20a *g)
/* TBD: reload gr ucode when needed */
/* enable interrupts */
gk20a_writel(g, gr_intr_r(), 0xFFFFFFFF);
gk20a_writel(g, gr_intr_en_r(), 0xFFFFFFFF);
gk20a_writel(g, gr_intr_r(), 0xFFFFFFFFU);
gk20a_writel(g, gr_intr_en_r(), 0xFFFFFFFFU);
/* enable fecs error interrupts */
g->ops.gr.fecs_host_int_enable(g);
@@ -4794,7 +4794,7 @@ static inline bool is_valid_cyclestats_bar0_offset_gk20a(struct gk20a *g,
u32 offset)
{
/* support only 24-bit 4-byte aligned offsets */
bool valid = !(offset & 0xFF000003);
bool valid = !(offset & 0xFF000003U);
if (g->allow_all)
return true;

View File

@@ -781,9 +781,9 @@ int gr_gm20b_load_ctxsw_ucode(struct gk20a *g)
if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) {
gk20a_writel(g, gr_fecs_ctxsw_mailbox_r(7),
gr_fecs_ctxsw_mailbox_value_f(0xc0de7777));
gr_fecs_ctxsw_mailbox_value_f(0xc0de7777U));
gk20a_writel(g, gr_gpccs_ctxsw_mailbox_r(7),
gr_gpccs_ctxsw_mailbox_value_f(0xc0de7777));
gr_gpccs_ctxsw_mailbox_value_f(0xc0de7777U));
}
g->pmu_lsf_loaded_falcon_id = 0;
@@ -1150,7 +1150,7 @@ void gr_gm20b_init_cyclestats(struct gk20a *g)
#if defined(CONFIG_GK20A_CYCLE_STATS)
__nvgpu_set_enabled(g, NVGPU_SUPPORT_CYCLE_STATS, true);
__nvgpu_set_enabled(g, NVGPU_SUPPORT_CYCLE_STATS_SNAPSHOT, true);
g->gr.max_css_buffer_size = 0xffffffff;
g->gr.max_css_buffer_size = 0xffffffffU;
#else
(void)g;
#endif

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -3261,7 +3261,7 @@ int gp106_mclk_init(struct gk20a *g)
}
/* FBPA gain WAR */
gk20a_writel(g, fb_fbpa_fbio_iref_byte_rx_ctrl_r(), 0x22222222);
gk20a_writel(g, fb_fbpa_fbio_iref_byte_rx_ctrl_r(), 0x22222222U);
mclk->speed = GP106_MCLK_LOW_SPEED; /* Value from Devinit */
@@ -3401,13 +3401,13 @@ int gp106_mclk_change(struct gk20a *g, u16 val)
#endif
if (speed == GP106_MCLK_HIGH_SPEED) {
gk20a_writel(g, 0x132000, 0x98010000);
gk20a_writel(g, 0x132000U, 0x98010000U);
/* Introduce delay */
(void) gk20a_readl(g, 0x132000);
(void) gk20a_readl(g, 0x132000);
(void) gk20a_readl(g, 0x132000U);
(void) gk20a_readl(g, 0x132000U);
}
gk20a_writel(g, 0x137300, 0x20000103);
gk20a_writel(g, 0x137300U, 0x20000103U);
/* Read sequencer binary*/
payload.in.buf = seq_script_ptr;

View File

@@ -1979,7 +1979,7 @@ void gv11b_fifo_add_syncpt_wait_cmd(struct gk20a *g,
/* sema_addr_lo */
nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010017);
nvgpu_mem_wr32(g, cmd->mem, off++, gpu_va & 0xffffffff);
nvgpu_mem_wr32(g, cmd->mem, off++, gpu_va & 0xffffffffU);
/* sema_addr_hi */
nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010018);
@@ -2018,7 +2018,7 @@ void gv11b_fifo_add_syncpt_incr_cmd(struct gk20a *g,
/* sema_addr_lo */
nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010017);
nvgpu_mem_wr32(g, cmd->mem, off++, gpu_va & 0xffffffff);
nvgpu_mem_wr32(g, cmd->mem, off++, gpu_va & 0xffffffffU);
/* sema_addr_hi */
nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010018);

View File

@@ -3414,8 +3414,8 @@ void gv11b_gr_set_hww_esr_report_mask(struct gk20a *g)
{
/* clear hww */
gk20a_writel(g, gr_gpcs_tpcs_sms_hww_global_esr_r(), 0xffffffff);
gk20a_writel(g, gr_gpcs_tpcs_sms_hww_global_esr_r(), 0xffffffff);
gk20a_writel(g, gr_gpcs_tpcs_sms_hww_global_esr_r(), 0xffffffffU);
gk20a_writel(g, gr_gpcs_tpcs_sms_hww_global_esr_r(), 0xffffffffU);
/* setup sm warp esr report masks */
gk20a_writel(g, gr_gpcs_tpcs_sms_hww_warp_esr_report_mask_r(),

View File

@@ -138,8 +138,8 @@ static void gv11b_subctx_commit_valid_mask(struct vm_gk20a *vm,
struct gk20a *g = gk20a_from_vm(vm);
/* Make all subctx pdbs valid */
nvgpu_mem_wr32(g, inst_block, 166, 0xffffffff);
nvgpu_mem_wr32(g, inst_block, 167, 0xffffffff);
nvgpu_mem_wr32(g, inst_block, 166, 0xffffffffU);
nvgpu_mem_wr32(g, inst_block, 167, 0xffffffffU);
}
static void gv11b_subctx_commit_pdb(struct vm_gk20a *vm,

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -71,10 +71,10 @@ struct pmu_cmdline_args_v6 {
};
/* GPU ID */
#define PMU_SHA1_GID_SIGNATURE 0xA7C66AD2
#define PMU_SHA1_GID_SIGNATURE_SIZE 4
#define PMU_SHA1_GID_SIGNATURE 0xA7C66AD2U
#define PMU_SHA1_GID_SIGNATURE_SIZE 4U
#define PMU_SHA1_GID_SIZE 16
#define PMU_SHA1_GID_SIZE 16U
struct pmu_sha1_gid {
bool valid;
@@ -107,13 +107,13 @@ struct pmu_init_msg_pmu_v1 {
u16 sw_managed_area_size;
};
#define PMU_QUEUE_COUNT_FOR_V5 4
#define PMU_QUEUE_COUNT_FOR_V4 5
#define PMU_QUEUE_COUNT_FOR_V3 3
#define PMU_QUEUE_HPQ_IDX_FOR_V3 0
#define PMU_QUEUE_LPQ_IDX_FOR_V3 1
#define PMU_QUEUE_MSG_IDX_FOR_V3 2
#define PMU_QUEUE_MSG_IDX_FOR_V5 3
#define PMU_QUEUE_COUNT_FOR_V5 4U
#define PMU_QUEUE_COUNT_FOR_V4 5U
#define PMU_QUEUE_COUNT_FOR_V3 3U
#define PMU_QUEUE_HPQ_IDX_FOR_V3 0U
#define PMU_QUEUE_LPQ_IDX_FOR_V3 1U
#define PMU_QUEUE_MSG_IDX_FOR_V3 2U
#define PMU_QUEUE_MSG_IDX_FOR_V5 3U
struct pmu_init_msg_pmu_v3 {
u8 msg_type;
u8 queue_index[PMU_QUEUE_COUNT_FOR_V3];

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -161,13 +161,13 @@ static inline unsigned long __hweight16(uint16_t x)
static inline unsigned long __hweight32(uint32_t x)
{
return __hweight16((uint16_t)x) +
__hweight16((uint16_t)((x & 0xffff0000) >> 16));
__hweight16((uint16_t)((x & 0xffff0000U) >> 16));
}
static inline unsigned long __hweight64(uint64_t x)
{
return __hweight32((uint32_t)x) +
__hweight32((uint32_t)((x & 0xffffffff00000000) >> 32));
__hweight32((uint32_t)((x & 0xffffffff00000000U) >> 32));
}
#define hweight32 __hweight32

View File

@@ -183,7 +183,7 @@ static int dev_init_get_vfield_info(struct gk20a *g,
u8 *vfieldregtableptr = NULL;
u32 vfieldregheadersize = VFIELD_REG_HEADER_SIZE;
u32 i;
u32 oldindex = 0xFFFFFFFF;
u32 oldindex = 0xFFFFFFFFU;
u32 currindex;
struct vfield_reg_header vregheader;
struct vfield_reg_entry vregentry;