gpu: nvgpu: MISRA fixes for composite expressions

MISRA rules 10.6, 10.7, and 10.8 prevent mixing of types in composite
expressions. Resolve these violations by casting variables/constants to
the appropriate types.

Jira NVGPU-850
Jira NVGPU-853
Jira NVGPU-851

Change-Id: If6db312187211bc428cf465929082118565dacf4
Signed-off-by: Adeel Raza <araza@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1931156
GVS: Gerrit_Virtual_Submit
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Adeel Raza
2018-10-23 13:24:39 -07:00
committed by mobile promotions
parent a733659f19
commit dc37ca4559
49 changed files with 184 additions and 150 deletions

View File

@@ -330,8 +330,8 @@ static int devinit_get_fll_device_table(struct gk20a *g,
(u8)fll_desc_table_entry.min_freq_vfe_idx;
fll_dev_data.freq_ctrl_idx = CTRL_BOARDOBJ_IDX_INVALID;
vbios_domain = (u32)(fll_desc_table_entry.clk_domain &
NV_PERF_DOMAIN_4X_CLOCK_DOMAIN_MASK);
vbios_domain = U32(fll_desc_table_entry.clk_domain) &
U32(NV_PERF_DOMAIN_4X_CLOCK_DOMAIN_MASK);
fll_dev_data.clk_domain =
g->ops.pmu_ver.clk.get_vbios_clk_domain(vbios_domain);

View File

@@ -1391,7 +1391,8 @@ static u32 vfflatten_prog_1x_master(struct gk20a *g,
do {
clkvfpointfreqmhzset(g, &vf_point_data.vf_point,
p1xmaster->super.freq_max_mhz -
step_count * freq_step_size_mhz);
U16(step_count) *
U16(freq_step_size_mhz));
status = _clk_prog_1x_master_rail_construct_vf_point(g, pclk,
p1xmaster, p_vf_rail,

View File

@@ -76,7 +76,7 @@ static int gk20a_vm_alloc_share(struct gk20a_as_share *as_share,
snprintf(name, sizeof(name), "as_%d", as_share->id);
vm = nvgpu_vm_init(g, big_page_size,
big_page_size << 10,
U64(big_page_size) << U64(10),
mm->channel.kernel_size,
mm->channel.user_size + mm->channel.kernel_size,
!mm->disable_bigpage, userspace_managed, name);

View File

@@ -1019,7 +1019,7 @@ void gv11b_fb_handle_mmu_nonreplay_replay_fault(struct gk20a *g,
entries = gv11b_fb_fault_buffer_size_val(g, index);
nvgpu_log(g, gpu_dbg_intr, "buffer num entries = %d", entries);
offset = (get_indx * gmmu_fault_buf_size_v()) / sizeof(u32);
offset = (get_indx * gmmu_fault_buf_size_v()) / U32(sizeof(u32));
nvgpu_log(g, gpu_dbg_intr, "starting word offset = 0x%x", offset);
rd32_val = nvgpu_mem_rd32(g, mem,
@@ -1037,7 +1037,8 @@ void gv11b_fb_handle_mmu_nonreplay_replay_fault(struct gk20a *g,
gv11b_fb_fault_buffer_get_ptr_update(g, index, get_indx);
offset = (get_indx * gmmu_fault_buf_size_v()) / sizeof(u32);
offset = (get_indx * gmmu_fault_buf_size_v()) /
U32(sizeof(u32));
nvgpu_log(g, gpu_dbg_intr, "next word offset = 0x%x", offset);
rd32_val = nvgpu_mem_rd32(g, mem,

View File

@@ -507,7 +507,8 @@ int tu104_fb_apply_pdb_cache_war(struct gk20a *g)
/* Bind 256 instance blocks to unused engine ID 0x0 */
for (i = 0U; i < 256U; i++) {
inst_blk_addr = u64_lo32((inst_blk_base_addr + (i * PAGE_SIZE))
inst_blk_addr = u64_lo32((inst_blk_base_addr +
(U64(i) * U64(PAGE_SIZE)))
>> fb_mmu_bind_imb_addr_alignment_v());
nvgpu_writel(g, fb_mmu_bind_imb_r(),

View File

@@ -401,7 +401,7 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force)
if (ch->usermode_submit_enabled) {
gk20a_channel_free_usermode_buffers(ch);
ch->userd_iova = nvgpu_mem_get_addr(g, &f->userd) +
ch->chid * f->userd_entry_size;
U64(ch->chid) * U64(f->userd_entry_size);
ch->usermode_submit_enabled = false;
}
@@ -1031,7 +1031,9 @@ static int channel_gk20a_prealloc_resources(struct channel_gk20a *c,
*/
size = sizeof(struct priv_cmd_entry);
if (num_jobs <= ULONG_MAX / (size << 1)) {
entries = nvgpu_vzalloc(c->g, (num_jobs << 1) * size);
entries = nvgpu_vzalloc(c->g,
((unsigned long)num_jobs << 1UL) *
(unsigned long)size);
}
if (entries == NULL) {
err = -ENOMEM;
@@ -1161,7 +1163,7 @@ int nvgpu_channel_setup_bind(struct channel_gk20a *c,
gpfifo_gpu_va = c->usermode_gpfifo.gpu_va;
} else {
err = nvgpu_dma_alloc_map_sys(ch_vm,
gpfifo_size * gpfifo_entry_size,
(size_t)gpfifo_size * (size_t)gpfifo_entry_size,
&c->gpfifo.mem);
if (err != 0) {
nvgpu_err(g, "memory allocation failed");
@@ -1170,7 +1172,8 @@ int nvgpu_channel_setup_bind(struct channel_gk20a *c,
if (c->gpfifo.mem.aperture == APERTURE_VIDMEM) {
c->gpfifo.pipe = nvgpu_big_malloc(g,
gpfifo_size * gpfifo_entry_size);
(size_t)gpfifo_size *
(size_t)gpfifo_entry_size);
if (c->gpfifo.pipe == NULL) {
err = -ENOMEM;
goto clean_up_unmap;
@@ -1260,7 +1263,7 @@ clean_up_unmap:
if (c->usermode_submit_enabled) {
gk20a_channel_free_usermode_buffers(c);
c->userd_iova = nvgpu_mem_get_addr(g, &g->fifo.userd) +
c->chid * g->fifo.userd_entry_size;
U64(c->chid) * U64(g->fifo.userd_entry_size);
c->usermode_submit_enabled = false;
}
clean_up:

View File

@@ -25,9 +25,9 @@
#ifndef NVGPU_FUSE_GM20B_H
#define NVGPU_FUSE_GM20B_H
#define GCPLEX_CONFIG_VPR_AUTO_FETCH_DISABLE_MASK ((u32)(1 << 0))
#define GCPLEX_CONFIG_VPR_ENABLED_MASK ((u32)(1 << 1))
#define GCPLEX_CONFIG_WPR_ENABLED_MASK ((u32)(1 << 2))
#define GCPLEX_CONFIG_VPR_AUTO_FETCH_DISABLE_MASK BIT32(0)
#define GCPLEX_CONFIG_VPR_ENABLED_MASK BIT32(1)
#define GCPLEX_CONFIG_WPR_ENABLED_MASK BIT32(2)
struct gk20a;

View File

@@ -76,7 +76,8 @@ int gm20b_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr)
g->ltc_count << ltc_ltcs_ltss_cbc_base_alignment_shift_v();
/* must be a multiple of 64KB */
compbit_backing_size = roundup(compbit_backing_size, 64*1024);
compbit_backing_size = roundup(compbit_backing_size,
U32(64) * U32(1024));
max_comptag_lines =
(compbit_backing_size * comptags_per_cacheline) /
@@ -218,7 +219,7 @@ void gm20b_ltc_init_fs_state(struct gk20a *g)
reg = gk20a_readl(g, ltc_ltcs_ltss_cbc_param_r());
gr->slices_per_ltc = ltc_ltcs_ltss_cbc_param_slices_per_ltc_v(reg);;
gr->cacheline_size =
512U << ltc_ltcs_ltss_cbc_param_cache_line_size_v(reg);
U32(512) << ltc_ltcs_ltss_cbc_param_cache_line_size_v(reg);
gk20a_writel(g, ltc_ltcs_ltss_cbc_num_active_ltcs_r(),
g->ltc_count);

View File

@@ -106,7 +106,8 @@ int gp10b_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr)
g->ltc_count << ltc_ltcs_ltss_cbc_base_alignment_shift_v();
/* must be a multiple of 64KB */
compbit_backing_size = roundup(compbit_backing_size, 64*1024);
compbit_backing_size = roundup(compbit_backing_size,
U32(64) * U32(1024));
nvgpu_log_info(g, "compbit backing store size : %d",
compbit_backing_size);

View File

@@ -67,7 +67,7 @@ void gv11b_ltc_init_fs_state(struct gk20a *g)
reg = gk20a_readl(g, ltc_ltcs_ltss_cbc_param_r());
gr->slices_per_ltc = ltc_ltcs_ltss_cbc_param_slices_per_ltc_v(reg);;
gr->cacheline_size =
512U << ltc_ltcs_ltss_cbc_param_cache_line_size_v(reg);
U32(512) << ltc_ltcs_ltss_cbc_param_cache_line_size_v(reg);
/* Disable LTC interrupts */
reg = gk20a_readl(g, ltc_ltcs_ltss_intr_r());
@@ -162,10 +162,10 @@ void gv11b_ltc_lts_isr(struct gk20a *g, unsigned int ltc, unsigned int slice)
/* update counters per slice */
if (corrected_overflow) {
corrected_delta += (0x1U << ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_total_s());
corrected_delta += BIT32(ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_total_s());
}
if (uncorrected_overflow) {
uncorrected_delta += (0x1U << ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_total_s());
uncorrected_delta += BIT32(ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_total_s());
}
g->ecc.ltc.ecc_sec_count[ltc][slice].counter += corrected_delta;

View File

@@ -46,7 +46,7 @@ void ltc_tu104_init_fs_state(struct gk20a *g)
gr->slices_per_ltc =
ltc_ltcs_ltss_cbc_param2_slices_per_ltc_v(reg);
gr->cacheline_size =
512U << ltc_ltcs_ltss_cbc_param2_cache_line_size_v(reg);
U32(512) << ltc_ltcs_ltss_cbc_param2_cache_line_size_v(reg);
}
u64 ltc_tu104_get_cbc_base_divisor(struct gk20a *g)
@@ -106,7 +106,8 @@ int ltc_tu104_init_comptags(struct gk20a *g, struct gr_gk20a *gr)
compbit_backing_size += amap_swizzle_rounding;
/* must be a multiple of 64KB */
compbit_backing_size = roundup(compbit_backing_size, 64 * 1024);
compbit_backing_size = roundup(compbit_backing_size,
U32(64) * U32(1024));
err = nvgpu_ltc_alloc_cbc(g, compbit_backing_size, true);
if (err != 0) {

View File

@@ -128,7 +128,8 @@ bool intr_tu104_vector_intr_pending(struct gk20a *g, u32 intr_vector)
func_priv_cpu_intr_leaf_r(
NV_CPU_INTR_GPU_VECTOR_TO_LEAF_REG(intr_vector)));
return leaf_val & BIT(NV_CPU_INTR_GPU_VECTOR_TO_LEAF_BIT(intr_vector));
return leaf_val &
BIT32(NV_CPU_INTR_GPU_VECTOR_TO_LEAF_BIT(intr_vector));
}
static void intr_tu104_stall_enable(struct gk20a *g)
@@ -177,14 +178,14 @@ static void intr_tu104_nonstall_enable(struct gk20a *g)
active_engine_id = g->fifo.active_engines_list[i];
intr_mask = g->fifo.engine_info[active_engine_id].intr_mask;
nonstall_intr_mask |= intr_mask << nonstall_intr_base;
nonstall_intr_mask |= U64(intr_mask) << U64(nonstall_intr_base);
}
nvgpu_func_writel(g,
func_priv_cpu_intr_top_en_set_r(
NV_CPU_INTR_SUBTREE_TO_TOP_IDX(
NV_CPU_INTR_TOP_NONSTALL_SUBTREE)),
BIT(NV_CPU_INTR_SUBTREE_TO_TOP_BIT(
BIT32(NV_CPU_INTR_SUBTREE_TO_TOP_BIT(
NV_CPU_INTR_TOP_NONSTALL_SUBTREE)));
nvgpu_func_writel(g,
@@ -234,7 +235,7 @@ u32 intr_tu104_nonstall(struct gk20a *g)
NV_CPU_INTR_SUBTREE_TO_TOP_IDX(
NV_CPU_INTR_TOP_NONSTALL_SUBTREE)));
nonstall_intr_set_mask = BIT(
nonstall_intr_set_mask = BIT32(
NV_CPU_INTR_SUBTREE_TO_TOP_BIT(
NV_CPU_INTR_TOP_NONSTALL_SUBTREE));
@@ -248,7 +249,7 @@ void intr_tu104_nonstall_pause(struct gk20a *g)
func_priv_cpu_intr_top_en_clear_r(
NV_CPU_INTR_SUBTREE_TO_TOP_IDX(
NV_CPU_INTR_TOP_NONSTALL_SUBTREE)),
BIT(NV_CPU_INTR_SUBTREE_TO_TOP_BIT(
BIT32(NV_CPU_INTR_SUBTREE_TO_TOP_BIT(
NV_CPU_INTR_TOP_NONSTALL_SUBTREE)));
}
@@ -259,7 +260,7 @@ void intr_tu104_nonstall_resume(struct gk20a *g)
func_priv_cpu_intr_top_en_set_r(
NV_CPU_INTR_SUBTREE_TO_TOP_IDX(
NV_CPU_INTR_TOP_NONSTALL_SUBTREE)),
BIT(NV_CPU_INTR_SUBTREE_TO_TOP_BIT(
BIT32(NV_CPU_INTR_SUBTREE_TO_TOP_BIT(
NV_CPU_INTR_TOP_NONSTALL_SUBTREE)));
}
@@ -291,7 +292,7 @@ u32 intr_tu104_isr_nonstall(struct gk20a *g)
active_engine_id = g->fifo.active_engines_list[i];
intr_mask = g->fifo.engine_info[active_engine_id].intr_mask;
nonstall_intr_mask = intr_mask << nonstall_intr_base;
nonstall_intr_mask = U64(intr_mask) << U64(nonstall_intr_base);
nonstall_intr_mask_lo = u64_lo32(nonstall_intr_mask);
nonstall_intr_mask_hi = u64_hi32(nonstall_intr_mask);

View File

@@ -34,7 +34,7 @@
#define NV_CPU_INTR_GPU_VECTOR_TO_LEAF_BIT(i) ((i) % 32)
#define NV_CPU_INTR_GPU_VECTOR_TO_SUBTREE(i) ((NV_CPU_INTR_GPU_VECTOR_TO_LEAF_REG(i)) / 2)
#define NV_CPU_INTR_TOP_NONSTALL_SUBTREE 0
#define NV_CPU_INTR_TOP_NONSTALL_SUBTREE 0U
struct gk20a;

View File

@@ -1351,8 +1351,10 @@ int nvgpu_buddy_allocator_init(struct gk20a *g, struct nvgpu_allocator *na,
* requirement is not necessary.
*/
if (is_gva_space) {
base_big_page = base & ((vm->big_page_size << 10U) - 1U);
size_big_page = size & ((vm->big_page_size << 10U) - 1U);
base_big_page = base &
((U64(vm->big_page_size) << U64(10)) - U64(1));
size_big_page = size &
((U64(vm->big_page_size) << U64(10)) - U64(1));
if (vm->big_pages &&
(base_big_page != 0ULL || size_big_page != 0ULL)) {
return -EINVAL;

View File

@@ -200,7 +200,7 @@ static inline struct nvgpu_list_node *balloc_get_order_list(
static inline u64 balloc_order_to_len(struct nvgpu_buddy_allocator *a,
int order)
{
return (1 << order) * a->blk_size;
return BIT64(order) * a->blk_size;
}
static inline u64 balloc_base_shift(struct nvgpu_buddy_allocator *a,

View File

@@ -62,7 +62,7 @@ void gk20a_comptaglines_free(struct gk20a_comptag_allocator *allocator,
WARN_ON(offset == 0U);
WARN_ON(addr > allocator->size);
WARN_ON(addr + len > allocator->size);
WARN_ON((unsigned long)addr + (unsigned long)len > allocator->size);
nvgpu_mutex_acquire(&allocator->lock);
bitmap_clear(allocator->bitmap, addr, len);

View File

@@ -249,7 +249,7 @@ static u32 pd_entries(const struct gk20a_mmu_level *l,
* used to index the page directory. That is simply 2 raised to the
* number of bits.
*/
return 1UL << (l->hi_bit[attrs->pgsz] - l->lo_bit[attrs->pgsz] + 1UL);
return BIT32(l->hi_bit[attrs->pgsz] - l->lo_bit[attrs->pgsz] + 1);
}
/*
@@ -679,7 +679,7 @@ static int __nvgpu_gmmu_update_page_table(struct vm_gk20a *vm,
page_size = vm->gmmu_page_sizes[attrs->pgsz];
if (space_to_skip & (page_size - 1U)) {
if (space_to_skip & (U64(page_size) - U64(1))) {
return -EINVAL;
}
@@ -775,7 +775,7 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm,
* boundaries.
*/
if (attrs.ctag) {
attrs.ctag += buffer_offset & (ctag_granularity - 1U);
attrs.ctag += buffer_offset & (U64(ctag_granularity) - U64(1));
}
attrs.l3_alloc = (bool)(flags & NVGPU_VM_MAP_L3_ALLOC);
@@ -1000,7 +1000,7 @@ int __nvgpu_set_pte(struct gk20a *g, struct vm_gk20a *vm, u64 vaddr, u32 *pte)
pte_size = __nvgpu_pte_words(g);
for (i = 0; i < pte_size; i++) {
pd_write(g, pd, pd_offs + i, pte[i]);
pd_write(g, pd, (size_t)pd_offs + (size_t)i, pte[i]);
pte_dbg(g, attrs_ptr,
"PTE: idx=%-4u (%d) 0x%08x", pd_idx, i, pte[i]);
}

View File

@@ -276,7 +276,7 @@ static int nvgpu_init_cde_vm(struct mm_gk20a *mm)
u32 big_page_size = g->ops.mm.get_default_big_page_size();
mm->cde.vm = nvgpu_vm_init(g, big_page_size,
big_page_size << 10,
U64(big_page_size) << U64(10),
NV_MM_DEFAULT_KERNEL_SIZE,
NV_MM_DEFAULT_KERNEL_SIZE + NV_MM_DEFAULT_USER_SIZE,
false, false, "cde");
@@ -292,7 +292,7 @@ static int nvgpu_init_ce_vm(struct mm_gk20a *mm)
u32 big_page_size = g->ops.mm.get_default_big_page_size();
mm->ce.vm = nvgpu_vm_init(g, big_page_size,
big_page_size << 10,
U64(big_page_size) << U64(10),
NV_MM_DEFAULT_KERNEL_SIZE,
NV_MM_DEFAULT_KERNEL_SIZE + NV_MM_DEFAULT_USER_SIZE,
false, false, "ce");
@@ -430,9 +430,9 @@ static int nvgpu_init_mm_setup_sw(struct gk20a *g)
NV_MM_DEFAULT_KERNEL_SIZE;
mm->channel.kernel_size = NV_MM_DEFAULT_KERNEL_SIZE;
nvgpu_log_info(g, "channel vm size: user %dMB kernel %dMB",
(int)(mm->channel.user_size >> 20),
(int)(mm->channel.kernel_size >> 20));
nvgpu_log_info(g, "channel vm size: user %uMB kernel %uMB",
U32(mm->channel.user_size >> U64(20)),
U32(mm->channel.kernel_size >> U64(20)));
nvgpu_init_pramin(mm);

View File

@@ -1001,7 +1001,7 @@ static int nvgpu_page_alloc_init_slabs(struct nvgpu_page_allocator *a)
for (i = 0; i < nr_slabs; i++) {
struct page_alloc_slab *slab = &a->slabs[i];
slab->slab_size = SZ_4K * (1 << i);
slab->slab_size = U32(SZ_4K) * BIT32(i);
nvgpu_init_list_node(&slab->empty);
nvgpu_init_list_node(&slab->partial);
nvgpu_init_list_node(&slab->full);

View File

@@ -76,7 +76,8 @@
static u32 nvgpu_pd_cache_nr(u32 bytes)
{
return ilog2(bytes >> (NVGPU_PD_CACHE_MIN_SHIFT - 1U));
return ilog2((unsigned long)bytes >>
((unsigned long)NVGPU_PD_CACHE_MIN_SHIFT - 1UL));
}
static u32 nvgpu_pd_cache_get_mask(struct nvgpu_pd_mem_entry *pentry)
@@ -259,7 +260,7 @@ static int nvgpu_pd_cache_alloc_from_partial(struct gk20a *g,
return -ENOMEM;
}
pentry->alloc_map |= 1 << bit_offs;
pentry->alloc_map |= BIT64(bit_offs);
pd_dbg(g, "PD-Alloc [C] Partial: offs=%lu", bit_offs);

View File

@@ -57,7 +57,7 @@ int nvgpu_vm_area_validate_buffer(struct vm_gk20a *vm,
return -EINVAL;
}
if (map_addr & (vm->gmmu_page_sizes[pgsz_idx] - 1U)) {
if (map_addr & (U64(vm->gmmu_page_sizes[pgsz_idx]) - U64(1))) {
nvgpu_err(g, "map offset must be buffer page size aligned 0x%llx",
map_addr);
return -EINVAL;

View File

@@ -1112,7 +1112,7 @@ static void pg_cmd_eng_buf_load_set_dma_idx_v1(struct pmu_pg_cmd *pg,
static void pg_cmd_eng_buf_load_set_dma_idx_v2(struct pmu_pg_cmd *pg,
u8 value)
{
pg->eng_buf_load_v2.dma_desc.params |= (value << 24);
pg->eng_buf_load_v2.dma_desc.params |= (U32(value) << U32(24));
}
static int nvgpu_init_pmu_fw_ver_ops(struct nvgpu_pmu *pmu)

View File

@@ -43,16 +43,17 @@ struct condition_entry {
static u16 nvgpu_bios_rdu16(struct gk20a *g, int offset)
{
u16 val = (g->bios.data[offset+1] << 8) + g->bios.data[offset];
u16 val = (U16(g->bios.data[offset+1]) << U16(8)) +
U16(g->bios.data[offset]);
return val;
}
static u32 nvgpu_bios_rdu32(struct gk20a *g, int offset)
{
u32 val = (g->bios.data[offset+3] << 24) +
(g->bios.data[offset+2] << 16) +
(g->bios.data[offset+1] << 8) +
g->bios.data[offset];
u32 val = (U32(g->bios.data[offset+3]) << U32(24)) +
(U32(g->bios.data[offset+2]) << U32(16)) +
(U32(g->bios.data[offset+1]) << U32(8)) +
U32(g->bios.data[offset]);
return val;
}
@@ -693,7 +694,7 @@ void *nvgpu_bios_get_perf_table_ptrs(struct gk20a *g,
nvgpu_log_info(g, "Perf_Tbl_ID-offset 0x%x Tbl_ID_Ptr-offset- 0x%x",
(ptoken->data_ptr +
(table_id * data_size)),
(U16(table_id) * U16(data_size))),
perf_table_id_offset);
if (perf_table_id_offset != 0U) {
@@ -831,9 +832,9 @@ static void nvgpu_bios_init_xmemsel_zm_nv_reg_array(struct gk20a *g, bool *condi
strap) : strap;
for (i = 0; i < count; i++) {
data = nvgpu_bios_read_u32(g, data_table_offset + ((i *
g->bios.mem_strap_data_count + index) *
sizeof(u32)));
data = nvgpu_bios_read_u32(g, data_table_offset +
((U32(i) * U32(g->bios.mem_strap_data_count) +
index) * U32(sizeof(u32))));
gk20a_writel(g, reg, data);
reg += stride;
}

View File

@@ -177,9 +177,11 @@ static inline unsigned int gk20a_ce_get_method_size(int request_operation,
}
if (request_operation & NVGPU_CE_PHYS_MODE_TRANSFER) {
methodsize = (2 + (16 * iterations)) * sizeof(u32);
methodsize = (2U + (16U * iterations)) *
(unsigned int)sizeof(u32);
} else if (request_operation & NVGPU_CE_MEMSET) {
methodsize = (2 + (15 * iterations)) * sizeof(u32);
methodsize = (2U + (15U * iterations)) *
(unsigned int)sizeof(u32);
}
return methodsize;

View File

@@ -736,7 +736,8 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f)
goto clean_up_runlist;
}
runlist_size = f->runlist_entry_size * f->num_runlist_entries;
runlist_size = (size_t)f->runlist_entry_size *
(size_t)f->num_runlist_entries;
nvgpu_log(g, gpu_dbg_info,
"runlist_entries %d runlist size %zu",
f->num_runlist_entries, runlist_size);
@@ -1040,11 +1041,12 @@ int gk20a_init_fifo_setup_sw(struct gk20a *g)
if (g->ops.mm.is_bar1_supported(g)) {
err = nvgpu_dma_alloc_map_sys(g->mm.bar1.vm,
f->userd_entry_size * f->num_channels,
(size_t)f->userd_entry_size *
(size_t)f->num_channels,
&f->userd);
} else {
err = nvgpu_dma_alloc_sys(g, f->userd_entry_size *
f->num_channels, &f->userd);
err = nvgpu_dma_alloc_sys(g, (size_t)f->userd_entry_size *
(size_t)f->num_channels, &f->userd);
}
if (err != 0) {
nvgpu_err(g, "userd memory allocation failed");
@@ -1055,9 +1057,9 @@ int gk20a_init_fifo_setup_sw(struct gk20a *g)
userd_base = nvgpu_mem_get_addr(g, &f->userd);
for (chid = 0; chid < f->num_channels; chid++) {
f->channel[chid].userd_iova = userd_base +
chid * f->userd_entry_size;
U64(chid) * U64(f->userd_entry_size);
f->channel[chid].userd_gpu_va =
f->userd.gpu_va + chid * f->userd_entry_size;
f->userd.gpu_va + U64(chid) * U64(f->userd_entry_size);
}
err = nvgpu_channel_worker_init(g);
@@ -1556,7 +1558,7 @@ void gk20a_fifo_abort_tsg(struct gk20a *g, u32 tsgid, bool preempt)
int gk20a_fifo_deferred_reset(struct gk20a *g, struct channel_gk20a *ch)
{
u32 engine_id, engines;
unsigned long engine_id, engines;
nvgpu_mutex_acquire(&g->dbg_sessions_lock);
gr_gk20a_disable_ctxsw(g);
@@ -1578,8 +1580,8 @@ int gk20a_fifo_deferred_reset(struct gk20a *g, struct channel_gk20a *ch)
* If deferred reset is set for an engine, and channel is running
* on that engine, reset it
*/
for_each_set_bit(engine_id, &g->fifo.deferred_fault_engines, 32) {
if (BIT(engine_id) & engines) {
for_each_set_bit(engine_id, &g->fifo.deferred_fault_engines, 32UL) {
if (BIT64(engine_id) & engines) {
gk20a_fifo_reset_engine(g, engine_id);
}
}
@@ -3600,7 +3602,7 @@ int gk20a_fifo_update_runlist_ids(struct gk20a *g, u32 runlist_ids, u32 chid,
bool add, bool wait_for_finish)
{
int ret = -EINVAL;
u32 runlist_id = 0;
unsigned long runlist_id = 0;
int errcode;
unsigned long ulong_runlist_ids = (unsigned long)runlist_ids;
@@ -3614,7 +3616,8 @@ int gk20a_fifo_update_runlist_ids(struct gk20a *g, u32 runlist_ids, u32 chid,
errcode = g->ops.fifo.update_runlist(g, runlist_id, chid, add, wait_for_finish);
if (errcode) {
nvgpu_err(g,
"failed to update_runlist %d %d", runlist_id, errcode);
"failed to update_runlist %lu %d",
runlist_id, errcode);
ret = errcode;
}
}
@@ -4285,7 +4288,8 @@ int gk20a_fifo_setup_userd(struct channel_gk20a *c)
offset = 0;
} else {
mem = &g->fifo.userd;
offset = c->chid * g->fifo.userd_entry_size / (u32)sizeof(u32);
offset = U32(c->chid) * g->fifo.userd_entry_size /
U32(sizeof(u32));
}
nvgpu_mem_wr32(g, mem, offset + ram_userd_put_w(), 0);

View File

@@ -1214,7 +1214,7 @@ int gr_gk20a_init_fs_state(struct gk20a *g)
if ((g->tpc_fs_mask_user != 0U) &&
(fuse_tpc_mask == BIT32(gr->max_tpc_count) - 1U)) {
u32 val = g->tpc_fs_mask_user;
val &= (0x1U << gr->max_tpc_count) - 1U;
val &= BIT32(gr->max_tpc_count) - U32(1);
gk20a_writel(g, gr_cwd_fs_r(),
gr_cwd_fs_num_gpcs_f(gr->gpc_count) |
gr_cwd_fs_num_tpcs_f(hweight32(val)));
@@ -3246,11 +3246,13 @@ static int gr_gk20a_init_gr_config(struct gk20a *g, struct gr_gk20a *gr)
if (gr->gpc_skip_mask == NULL) {
gr->gpc_skip_mask =
nvgpu_kzalloc(g, gr_pd_dist_skip_table__size_1_v() *
4 * sizeof(u32));
nvgpu_kzalloc(g,
(size_t)gr_pd_dist_skip_table__size_1_v() *
(size_t)4 * sizeof(u32));
} else {
memset(gr->gpc_skip_mask, 0, gr_pd_dist_skip_table__size_1_v() *
4 * sizeof(u32));
memset(gr->gpc_skip_mask, 0,
(size_t)gr_pd_dist_skip_table__size_1_v() *
(size_t)4 * sizeof(u32));
}
if ((gr->gpc_tpc_count == NULL) || (gr->gpc_tpc_mask == NULL) ||
@@ -3347,13 +3349,15 @@ static int gr_gk20a_init_gr_config(struct gk20a *g, struct gr_gk20a *gr)
/* allocate for max tpc per gpc */
if (gr->sm_to_cluster == NULL) {
gr->sm_to_cluster = nvgpu_kzalloc(g, gr->gpc_count *
gr->max_tpc_per_gpc_count *
sm_per_tpc * sizeof(struct sm_info));
gr->sm_to_cluster = nvgpu_kzalloc(g, (size_t)gr->gpc_count *
(size_t)gr->max_tpc_per_gpc_count *
(size_t)sm_per_tpc *
sizeof(struct sm_info));
} else {
memset(gr->sm_to_cluster, 0, gr->gpc_count *
gr->max_tpc_per_gpc_count *
sm_per_tpc * sizeof(struct sm_info));
memset(gr->sm_to_cluster, 0, (size_t)gr->gpc_count *
(size_t)gr->max_tpc_per_gpc_count *
(size_t)sm_per_tpc *
sizeof(struct sm_info));
}
gr->no_of_sm = 0;
@@ -3457,9 +3461,11 @@ static int gr_gk20a_init_map_tiles(struct gk20a *g, struct gr_gk20a *gr)
init_err = nvgpu_kzalloc(g, num_gpcs * sizeof(s32));
run_err = nvgpu_kzalloc(g, num_gpcs * sizeof(s32));
sorted_num_tpcs =
nvgpu_kzalloc(g, num_gpcs * num_tpc_per_gpc * sizeof(s32));
nvgpu_kzalloc(g, (size_t)num_gpcs *
(size_t)num_tpc_per_gpc *
sizeof(s32));
sorted_to_unsorted_gpc_map =
nvgpu_kzalloc(g, num_gpcs * sizeof(s32));
nvgpu_kzalloc(g, (size_t)num_gpcs * sizeof(s32));
if (!((init_frac != NULL) &&
(init_err != NULL) &&

View File

@@ -190,8 +190,8 @@ static void update_gmmu_pde_locked(struct vm_gk20a *vm,
virt_addr, phys_addr,
pde_v[1], pde_v[0]);
pd_write(g, &vm->pdb, pd_offset + 0, pde_v[0]);
pd_write(g, &vm->pdb, pd_offset + 1, pde_v[1]);
pd_write(g, &vm->pdb, (size_t)pd_offset + (size_t)0, pde_v[0]);
pd_write(g, &vm->pdb, (size_t)pd_offset + (size_t)1, pde_v[1]);
}
static void __update_pte_sparse(u32 *pte_w)
@@ -292,8 +292,8 @@ static void update_gmmu_pte_locked(struct vm_gk20a *vm,
(u32)attrs->ctag >> ctag_shift,
pte_w[1], pte_w[0]);
pd_write(g, pd, pd_offset + 0, pte_w[0]);
pd_write(g, pd, pd_offset + 1, pte_w[1]);
pd_write(g, pd, (size_t)pd_offset + (size_t)0, pte_w[0]);
pd_write(g, pd, (size_t)pd_offset + (size_t)1, pte_w[1]);
}
u32 gk20a_get_pde_pgsz(struct gk20a *g, const struct gk20a_mmu_level *l,

View File

@@ -246,7 +246,7 @@ int pmu_bootstrap(struct nvgpu_pmu *pmu)
gk20a_writel(g, pwr_falcon_dmemd_r(0), addr_args);
g->ops.pmu.write_dmatrfbase(g,
addr_load - (desc->bootloader_imem_offset >> 8));
U32(addr_load) - (desc->bootloader_imem_offset >> U32(8)));
blocks = ((desc->bootloader_size + 0xFF) & ~0xFF) >> 8;

View File

@@ -38,8 +38,8 @@ static int regop_bsearch_range_cmp(const void *pkey, const void *pelem)
struct regop_offset_range *prange = (struct regop_offset_range *)pelem;
if (key < prange->base) {
return -1;
} else if (prange->base <= key && key < (prange->base +
(prange->count * 4U))) {
} else if (prange->base <= key && key < (U32(prange->base) +
(U32(prange->count) * U32(4)))) {
return 0;
}
return 1;

View File

@@ -947,8 +947,8 @@ static int lsf_gen_wpr_requirements(struct gk20a *g, struct ls_flcn_mgr *plsfm)
The expectation here is that the secure falcon will do a single DMA
read of this array and cache it internally so it's OK to pack these.
Also, we add 1 to the falcon count to indicate the end of the array.*/
wpr_offset = sizeof(struct lsf_wpr_header) *
(plsfm->managed_flcn_cnt+1);
wpr_offset = U32(sizeof(struct lsf_wpr_header)) *
(U32(plsfm->managed_flcn_cnt) + U32(1));
/* Walk the managed falcons, accounting for the LSB structs
as well as the ucode images. */

View File

@@ -214,7 +214,8 @@ static int clk_config_pll(struct clk_gk20a *clk, struct pll *pll,
if (vco_f >= min_vco_f && vco_f <= max_vco_f) {
lwv = (vco_f + (nvgpu_pl_to_div(pl) / 2))
/ nvgpu_pl_to_div(pl);
delta = abs((s32)(lwv - target_clk_f));
delta = abs(S32(lwv) -
S32(target_clk_f));
if (delta < best_delta) {
best_delta = delta;
@@ -378,8 +379,8 @@ static void clk_config_dvfs_ndiv(int mv, u32 n_eff, struct na_dvfs *d)
det_delta = min(det_delta, d->dfs_det_max);
det_delta = det_delta * d->dfs_coeff;
n = (int)(n_eff << DFS_DET_RANGE) - det_delta;
BUG_ON((n < 0) || (n > (int)(p->max_N << DFS_DET_RANGE)));
n = ((int)n_eff << DFS_DET_RANGE) - det_delta;
BUG_ON((n < 0) || (n > (int)p->max_N << DFS_DET_RANGE));
d->n_int = ((u32)n) >> DFS_DET_RANGE;
rem = ((u32)n) & ((1 << DFS_DET_RANGE) - 1);

View File

@@ -611,11 +611,11 @@ void gr_gm20b_load_tpc_mask(struct gk20a *g)
fuse_tpc_mask = g->ops.gr.get_gpc_tpc_mask(g, 0);
if ((g->tpc_fs_mask_user != 0U) &&
(g->tpc_fs_mask_user != fuse_tpc_mask) &&
(fuse_tpc_mask == BIT32(g->gr.max_tpc_count) - 1U)) {
(fuse_tpc_mask == BIT32(g->gr.max_tpc_count) - U32(1))) {
u32 val = g->tpc_fs_mask_user;
val &= BIT32(g->gr.max_tpc_count) - 1U;
val &= BIT32(g->gr.max_tpc_count) - U32(1);
/* skip tpc to disable the other tpc cause channel timeout */
val = BIT32(hweight32(val)) - 1U;
val = BIT32(hweight32(val)) - U32(1);
gk20a_writel(g, gr_fe_tpc_fs_r(), val);
} else {
gk20a_writel(g, gr_fe_tpc_fs_r(), pes_tpc_mask);
@@ -1154,7 +1154,8 @@ u32 gr_gm20b_get_max_lts_per_ltc(struct gk20a *g)
u32 *gr_gm20b_rop_l2_en_mask(struct gk20a *g)
{
struct gr_gk20a *gr = &g->gr;
u32 i, tmp, max_fbps_count, max_ltc_per_fbp;
unsigned long i;
u32 tmp, max_fbps_count, max_ltc_per_fbp;
unsigned long fbp_en_mask;
u32 rop_l2_all_en;

View File

@@ -1168,8 +1168,8 @@ int lsf_gen_wpr_requirements(struct gk20a *g,
The expectation here is that the secure falcon will do a single DMA
read of this array and cache it internally so it's OK to pack these.
Also, we add 1 to the falcon count to indicate the end of the array.*/
wpr_offset = sizeof(struct lsf_wpr_header_v1) *
(plsfm->managed_flcn_cnt+1);
wpr_offset = U32(sizeof(struct lsf_wpr_header_v1)) *
(U32(plsfm->managed_flcn_cnt) + U32(1));
if (nvgpu_is_enabled(g, NVGPU_SUPPORT_MULTIPLE_WPR)) {
wpr_offset = ALIGN_UP(wpr_offset,
@@ -1183,8 +1183,8 @@ int lsf_gen_wpr_requirements(struct gk20a *g,
wpr_offset = ALIGN_UP(wpr_offset,
LSF_SUB_WPR_HEADER_ALIGNMENT);
wpr_offset = wpr_offset +
(sizeof(struct lsf_shared_sub_wpr_header) *
(plsfm->managed_sub_wpr_count + 1));
(U32(sizeof(struct lsf_shared_sub_wpr_header)) *
(U32(plsfm->managed_sub_wpr_count) + U32(1)));
}
/* Walk the managed falcons, accounting for the LSB structs

View File

@@ -331,7 +331,7 @@ void gp10b_clk_arb_run_arbiter_cb(struct nvgpu_clk_arb *arb)
* pass (gpcclk) freq = (gpc2clk) freq / 2
*/
status = g->ops.clk.clk_get_round_rate(g,
CTRL_CLK_DOMAIN_GPCCLK, (gpc2clk_session_target/2) * 1000000UL, &rounded_rate);
CTRL_CLK_DOMAIN_GPCCLK, ((unsigned long)gpc2clk_session_target / 2UL) * 1000000UL, &rounded_rate);
clk_arb_dbg(g, "rounded_rate: %lu\n",
rounded_rate);

View File

@@ -101,8 +101,8 @@ static void update_gmmu_pde3_locked(struct vm_gk20a *vm,
pde_v[0] |= gmmu_new_pde_vol_true_f();
pde_v[1] |= phys_addr >> 24;
pd_write(g, pd, pd_offset + 0, pde_v[0]);
pd_write(g, pd, pd_offset + 1, pde_v[1]);
pd_write(g, pd, (size_t)pd_offset + (size_t)0, pde_v[0]);
pd_write(g, pd, (size_t)pd_offset + (size_t)1, pde_v[1]);
pte_dbg(g, attrs,
"PDE: i=%-4u size=%-2u offs=%-4u pgsz: -- | "
@@ -160,10 +160,10 @@ static void update_gmmu_pde0_locked(struct vm_gk20a *vm,
pde_v[1] |= big_addr >> 28;
}
pd_write(g, pd, pd_offset + 0, pde_v[0]);
pd_write(g, pd, pd_offset + 1, pde_v[1]);
pd_write(g, pd, pd_offset + 2, pde_v[2]);
pd_write(g, pd, pd_offset + 3, pde_v[3]);
pd_write(g, pd, (size_t)pd_offset + (size_t)0, pde_v[0]);
pd_write(g, pd, (size_t)pd_offset + (size_t)1, pde_v[1]);
pd_write(g, pd, (size_t)pd_offset + (size_t)2, pde_v[2]);
pd_write(g, pd, (size_t)pd_offset + (size_t)3, pde_v[3]);
pte_dbg(g, attrs,
"PDE: i=%-4u size=%-2u offs=%-4u pgsz: %c%c | "
@@ -271,8 +271,8 @@ static void update_gmmu_pte_locked(struct vm_gk20a *vm,
(u32)attrs->ctag / g->ops.fb.compression_page_size(g),
pte_w[1], pte_w[0]);
pd_write(g, pd, pd_offset + 0, pte_w[0]);
pd_write(g, pd, pd_offset + 1, pte_w[1]);
pd_write(g, pd, (size_t)pd_offset + (size_t)0, pte_w[0]);
pd_write(g, pd, (size_t)pd_offset + (size_t)1, pte_w[1]);
}
#define GP10B_PDE0_ENTRY_SIZE 16
@@ -306,9 +306,9 @@ static u32 gp10b_get_pde0_pgsz(struct gk20a *g, const struct gk20a_mmu_level *l,
if (pde_v[2] & (gmmu_new_dual_pde_aperture_small_sys_mem_ncoh_f() |
gmmu_new_dual_pde_aperture_small_sys_mem_coh_f() |
gmmu_new_dual_pde_aperture_small_video_memory_f())) {
u64 addr = (((u64) pde_v[3] << 32) | (u64) (pde_v[2] &
gmmu_new_dual_pde_address_small_sys_f(~0))) <<
gmmu_new_dual_pde_address_shift_v();
u64 addr = ((U64(pde_v[3]) << U64(32)) | (U64(pde_v[2]) &
U64(gmmu_new_dual_pde_address_small_sys_f(~0)))) <<
U64(gmmu_new_dual_pde_address_shift_v());
if (addr) {
pgsz = GMMU_PAGE_SIZE_SMALL;
@@ -318,9 +318,9 @@ static u32 gp10b_get_pde0_pgsz(struct gk20a *g, const struct gk20a_mmu_level *l,
if (pde_v[0] & (gmmu_new_dual_pde_aperture_big_sys_mem_ncoh_f() |
gmmu_new_dual_pde_aperture_big_sys_mem_coh_f() |
gmmu_new_dual_pde_aperture_big_video_memory_f())) {
u64 addr = (((u64) pde_v[1] << 32) | (u64) (pde_v[0] &
gmmu_new_dual_pde_address_big_sys_f(~0))) <<
gmmu_new_dual_pde_address_big_shift_v();
u64 addr = ((U64(pde_v[1]) << U64(32)) | (U64(pde_v[0]) &
U64(gmmu_new_dual_pde_address_big_sys_f(~0)))) <<
U64(gmmu_new_dual_pde_address_big_shift_v());
if (addr) {
/*

View File

@@ -207,7 +207,8 @@ void gr_gv100_set_gpc_tpc_mask(struct gk20a *g, u32 gpc_index)
int gr_gv100_init_sm_id_table(struct gk20a *g)
{
u32 gpc, tpc, sm, pes, gtpc;
unsigned long tpc;
u32 gpc, sm, pes, gtpc;
u32 sm_id = 0;
u32 sm_per_tpc = nvgpu_get_litter_value(g, GPU_LIT_NUM_SM_PER_TPC);
u32 num_sm = sm_per_tpc * g->gr.tpc_count;

View File

@@ -769,8 +769,8 @@ int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 id,
struct fifo_gk20a *f = &g->fifo;
unsigned long runlist_served_pbdmas;
unsigned long runlist_served_engines;
u32 pbdma_id;
u32 act_eng_id;
unsigned long pbdma_id;
unsigned long act_eng_id;
u32 runlist_id;
int ret = 0;
u32 tsgid;
@@ -925,7 +925,8 @@ static void gv11b_fifo_locked_abort_runlist_active_tsgs(struct gk20a *g,
u32 runlists_mask)
{
struct tsg_gk20a *tsg = NULL;
u32 rlid, tsgid;
unsigned long tsgid;
u32 rlid;
struct fifo_runlist_info_gk20a *runlist = NULL;
u32 token = PMU_INVALID_MUTEX_OWNER_ID;
u32 mutex_ret = 0;
@@ -948,7 +949,7 @@ static void gv11b_fifo_locked_abort_runlist_active_tsgs(struct gk20a *g,
for_each_set_bit(tsgid, runlist->active_tsgs,
g->fifo.num_channels) {
nvgpu_log(g, gpu_dbg_info, "abort tsg id %d", tsgid);
nvgpu_log(g, gpu_dbg_info, "abort tsg id %lu", tsgid);
tsg = &g->fifo.tsg[tsgid];
gk20a_disable_tsg(tsg);
@@ -977,7 +978,7 @@ static void gv11b_fifo_locked_abort_runlist_active_tsgs(struct gk20a *g,
gk20a_fifo_abort_tsg(g, tsg->tsgid, false);
nvgpu_log(g, gpu_dbg_info, "aborted tsg id %d", tsgid);
nvgpu_log(g, gpu_dbg_info, "aborted tsg id %lu", tsgid);
}
}
if (!mutex_ret) {
@@ -990,9 +991,11 @@ void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask,
struct mmu_fault_info *mmfault)
{
struct tsg_gk20a *tsg = NULL;
u32 runlists_mask, rlid, pbdma_id;
u32 runlists_mask, rlid;
unsigned long pbdma_id;
struct fifo_runlist_info_gk20a *runlist = NULL;
u32 engine_id, client_type = ~0;
unsigned long engine_id;
u32 client_type = ~0;
struct fifo_gk20a *f = &g->fifo;
u32 runlist_id = FIFO_INVAL_RUNLIST_ID;
u32 num_runlists = 0;
@@ -1146,7 +1149,7 @@ void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask,
engine_id, client_type, false)) {
g->fifo.deferred_fault_engines |=
BIT(engine_id);
BIT64(engine_id);
/* handled during channel free */
g->fifo.deferred_reset_pending = true;

View File

@@ -2994,10 +2994,10 @@ void gr_gv11b_load_tpc_mask(struct gk20a *g)
fuse_tpc_mask = g->ops.gr.get_gpc_tpc_mask(g, gpc);
if (g->tpc_fs_mask_user &&
g->tpc_fs_mask_user != fuse_tpc_mask &&
fuse_tpc_mask == (0x1U << g->gr.max_tpc_count) - 1U) {
fuse_tpc_mask == BIT32(g->gr.max_tpc_count) - U32(1)) {
val = g->tpc_fs_mask_user;
val &= (0x1U << g->gr.max_tpc_count) - 1U;
val = (0x1U << hweight32(val)) - 1U;
val &= BIT32(g->gr.max_tpc_count) - U32(1);
val = BIT32(hweight32(val)) - U32(1);
gk20a_writel(g, gr_fe_tpc_fs_r(0), val);
} else {
gk20a_writel(g, gr_fe_tpc_fs_r(0), pes_tpc_mask);

View File

@@ -122,8 +122,8 @@ static void gv11b_mm_mmu_hw_fault_buf_init(struct gk20a *g)
size_t fb_size;
/* Max entries take care of 1 entry used for full detection */
fb_size = (g->ops.fifo.get_num_fifos(g) + 1) *
gmmu_fault_buf_size_v();
fb_size = ((size_t)g->ops.fifo.get_num_fifos(g) + (size_t)1) *
(size_t)gmmu_fault_buf_size_v();
if (!nvgpu_mem_is_valid(
&g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_OTHER_AND_NONREPLAY])) {

View File

@@ -265,7 +265,8 @@ int gv11b_pmu_bootstrap(struct nvgpu_pmu *pmu)
gk20a_writel(g, pwr_falcon_dmemd_r(0), addr_args);
g->ops.pmu.write_dmatrfbase(g,
addr_load_lo - (desc->bootloader_imem_offset >> 8));
U32(addr_load_lo) -
(desc->bootloader_imem_offset >> U32(8)));
blocks = ((desc->bootloader_size + 0xFF) & ~0xFF) >> 8;

View File

@@ -27,10 +27,10 @@
/*
* Explicit sizes for bit definitions. Please use these instead of BIT().
*/
#define BIT8(i) (U8(1) << (i))
#define BIT16(i) (U16(1) << (i))
#define BIT32(i) (U32(1) << (i))
#define BIT64(i) (U64(1) << (i))
#define BIT8(i) (U8(1) << U8(i))
#define BIT16(i) (U16(1) << U16(i))
#define BIT32(i) (U32(1) << U32(i))
#define BIT64(i) (U64(1) << U64(i))
#ifdef __KERNEL__
#include <linux/bitops.h>

View File

@@ -55,7 +55,7 @@ struct nvgpu_clk_session;
__fls((a)->pstates) :\
VF_POINT_INVALID_PSTATE)
#define VF_POINT_COMMON_PSTATE(a, b) (((a)->pstates & (b)->pstates) != 0U ?\
__fls((a)->pstates & (b)->pstates) :\
__fls((unsigned long)((a)->pstates) & (unsigned long)((b)->pstates)) :\
VF_POINT_INVALID_PSTATE)
/*

View File

@@ -265,7 +265,7 @@ void nvgpu_pd_cache_fini(struct gk20a *g);
static inline u32 pd_offset_from_index(const struct gk20a_mmu_level *l,
u32 pd_idx)
{
return (pd_idx * l->entry_size) / sizeof(u32);
return (pd_idx * l->entry_size) / U32(sizeof(u32));
}
static inline void pd_write(struct gk20a *g, struct nvgpu_gmmu_pd *pd,

View File

@@ -37,7 +37,7 @@ struct nvgpu_cpu_time_correlation_sample {
static inline u32 ptimer_scalingfactor10x(u32 ptimer_src_freq)
{
return (u32)(((u64)(PTIMER_REF_FREQ_HZ * 10)) / ptimer_src_freq);
return U32((U64(PTIMER_REF_FREQ_HZ) * U64(10)) / U64(ptimer_src_freq));
}
static inline u32 scale_ptimer(u32 timeout , u32 scale10x)

View File

@@ -145,7 +145,7 @@ static int get_lpwr_ms_table(struct gk20a *g)
pms_data->default_entry_idx = (u8)header.default_entry_idx;
pms_data->idle_threshold_us = (u32)(header.idle_threshold_us * 10);
pms_data->idle_threshold_us = U32(header.idle_threshold_us) * U32(10);
/* Parse the LPWR MS Table entries.*/
for (idx = 0; idx < header.entry_count; idx++) {

View File

@@ -31,8 +31,8 @@
unsigned long __nvgpu_posix_ffs(unsigned long word)
{
return (__builtin_ffsl(word) - 1) &
((sizeof(unsigned long) * 8UL) - 1UL);
return ((unsigned long)__builtin_ffsl(word) - 1UL) &
(((unsigned long)sizeof(unsigned long) * 8UL) - 1UL);
}
unsigned long __nvgpu_posix_fls(unsigned long word)

View File

@@ -139,8 +139,8 @@ int nvgpu_mem_create_from_mem(struct gk20a *g,
struct nvgpu_mem *dest, struct nvgpu_mem *src,
u64 start_page, int nr_pages)
{
u64 start = start_page * PAGE_SIZE;
u64 size = nr_pages * PAGE_SIZE;
u64 start = start_page * U64(PAGE_SIZE);
u64 size = U64(nr_pages) * U64(PAGE_SIZE);
if (src->aperture != APERTURE_SYSMEM)
return -EINVAL;

View File

@@ -227,7 +227,7 @@ static u32 dev_init_get_vfield_info(struct gk20a *g,
(i * vheader.entry_size),
vheader.entry_size);
currindex = VFIELD_BIT_REG(ventry);
currindex = U32(VFIELD_BIT_REG(ventry));
if (currindex != oldindex) {
memcpy(&vregentry, vfieldregtableptr +

View File

@@ -369,8 +369,9 @@ static int parse_pstate_table_5x(struct gk20a *g,
p += hdr->header_size;
entry_size = hdr->base_entry_size +
hdr->clock_entry_count * hdr->clock_entry_size;
entry_size = U32(hdr->base_entry_size) +
U32(hdr->clock_entry_count) *
U32(hdr->clock_entry_size);
for (i = 0; i < hdr->base_entry_count; i++, p += entry_size) {
entry = (struct vbios_pstate_entry_5x *)p;