mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 02:22:34 +03:00
gpu: nvgpu: common: fix MISRA Rule 10.4 Violations
MISRA Rule 10.4 only allows the usage of arithmetic operations on operands of the same essential type category. Adding "U" at the end of the integer literals or casting operands to have same type of operands when an arithmetic operation is performed. This fixes violations where an arithmetic operation is performed on signed and unsigned int types. JIRA NVGPU-992 Change-Id: I27e3e59c3559c377b4bd3cbcfced90fdf90350f2 Signed-off-by: Sai Nikhil <snikhil@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1921459 Reviewed-by: svc-misra-checker <svc-misra-checker@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Adeel Raza <araza@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
2bded93b28
commit
303fc7496c
@@ -56,10 +56,10 @@ static int gk20a_vm_alloc_share(struct gk20a_as_share *as_share,
|
||||
struct vm_gk20a *vm;
|
||||
char name[32];
|
||||
const bool userspace_managed =
|
||||
(flags & NVGPU_AS_ALLOC_USERSPACE_MANAGED) != 0;
|
||||
(flags & NVGPU_AS_ALLOC_USERSPACE_MANAGED) != 0U;
|
||||
const bool unified_va =
|
||||
nvgpu_is_enabled(g, NVGPU_MM_UNIFY_ADDRESS_SPACES) ||
|
||||
(flags & NVGPU_AS_ALLOC_UNIFIED_VA) != 0;
|
||||
(flags & NVGPU_AS_ALLOC_UNIFIED_VA) != 0U;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
|
||||
@@ -69,7 +69,7 @@ u32 gk20a_bus_set_bar0_window(struct gk20a *g, struct nvgpu_mem *mem,
|
||||
u64 addr = bufbase + w * sizeof(u32);
|
||||
u32 hi = (u32)((addr & ~(u64)0xfffff)
|
||||
>> bus_bar0_window_target_bar0_window_base_shift_v());
|
||||
u32 lo = (u32)(addr & 0xfffff);
|
||||
u32 lo = U32(addr & 0xfffffULL);
|
||||
u32 win = nvgpu_aperture_mask(g, mem,
|
||||
bus_bar0_window_target_sys_mem_noncoherent_f(),
|
||||
bus_bar0_window_target_sys_mem_coherent_f(),
|
||||
|
||||
@@ -169,7 +169,7 @@ int nvgpu_ecc_counter_init_per_fbpa(struct gk20a *g,
|
||||
int num_fbpa = nvgpu_get_litter_value(g, GPU_LIT_NUM_FBPAS);
|
||||
struct nvgpu_ecc_stat *stats;
|
||||
|
||||
stats = nvgpu_kzalloc(g, sizeof(*stats) * num_fbpa);
|
||||
stats = nvgpu_kzalloc(g, sizeof(*stats) * (size_t)num_fbpa);
|
||||
if (stats == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@@ -314,9 +314,9 @@ static void falcon_print_mem(struct nvgpu_falcon *flcn, u32 src,
|
||||
break;
|
||||
}
|
||||
|
||||
for (i = 0; i < (byte_read_count >> 2); i += 4U) {
|
||||
for (i = 0U; i < (byte_read_count >> 2U); i += 4U) {
|
||||
nvgpu_info(flcn->g, "%#06x: %#010x %#010x %#010x %#010x",
|
||||
src + (i << 2), buff[i], buff[i+1U],
|
||||
src + (i << 2U), buff[i], buff[i+1U],
|
||||
buff[i+2U], buff[i+3U]);
|
||||
}
|
||||
|
||||
|
||||
@@ -97,7 +97,7 @@ int gm20b_fb_tlb_invalidate(struct gk20a *g, struct nvgpu_mem *pdb)
|
||||
|
||||
do {
|
||||
data = gk20a_readl(g, fb_mmu_ctrl_r());
|
||||
if (fb_mmu_ctrl_pri_fifo_space_v(data) != 0) {
|
||||
if (fb_mmu_ctrl_pri_fifo_space_v(data) != 0U) {
|
||||
break;
|
||||
}
|
||||
nvgpu_udelay(2);
|
||||
@@ -203,7 +203,7 @@ unsigned int gm20b_fb_compressible_page_size(struct gk20a *g)
|
||||
|
||||
u32 gm20b_fb_compression_align_mask(struct gk20a *g)
|
||||
{
|
||||
return SZ_64K - 1;
|
||||
return SZ_64K - 1U;
|
||||
}
|
||||
|
||||
void gm20b_fb_dump_vpr_info(struct gk20a *g)
|
||||
@@ -212,7 +212,7 @@ void gm20b_fb_dump_vpr_info(struct gk20a *g)
|
||||
|
||||
/* print vpr info */
|
||||
val = gk20a_readl(g, fb_mmu_vpr_info_r());
|
||||
val &= ~0x3;
|
||||
val &= ~0x3U;
|
||||
val |= fb_mmu_vpr_info_index_addr_lo_v();
|
||||
gk20a_writel(g, fb_mmu_vpr_info_r(), val);
|
||||
nvgpu_err(g, "VPR: %08x %08x %08x %08x",
|
||||
@@ -228,7 +228,7 @@ void gm20b_fb_dump_wpr_info(struct gk20a *g)
|
||||
|
||||
/* print wpr info */
|
||||
val = gk20a_readl(g, fb_mmu_wpr_info_r());
|
||||
val &= ~0xf;
|
||||
val &= ~0xfU;
|
||||
val |= (fb_mmu_wpr_info_index_allow_read_v());
|
||||
gk20a_writel(g, fb_mmu_wpr_info_r(), val);
|
||||
nvgpu_err(g, "WPR: %08x %08x %08x %08x %08x %08x",
|
||||
@@ -280,7 +280,7 @@ void gm20b_fb_read_wpr_info(struct gk20a *g, struct wpr_carveout_info *inf)
|
||||
u64 wpr_end = 0;
|
||||
|
||||
val = gk20a_readl(g, fb_mmu_wpr_info_r());
|
||||
val &= ~0xF;
|
||||
val &= ~0xFU;
|
||||
val |= fb_mmu_wpr_info_index_wpr1_addr_lo_v();
|
||||
gk20a_writel(g, fb_mmu_wpr_info_r(), val);
|
||||
|
||||
@@ -290,7 +290,7 @@ void gm20b_fb_read_wpr_info(struct gk20a *g, struct wpr_carveout_info *inf)
|
||||
(val << WPR_INFO_ADDR_ALIGNMENT));
|
||||
|
||||
val = gk20a_readl(g, fb_mmu_wpr_info_r());
|
||||
val &= ~0xF;
|
||||
val &= ~0xFU;
|
||||
val |= fb_mmu_wpr_info_index_wpr1_addr_hi_v();
|
||||
gk20a_writel(g, fb_mmu_wpr_info_r(), val);
|
||||
|
||||
|
||||
@@ -141,7 +141,7 @@ void gv11b_fb_init_cbc(struct gk20a *g, struct gr_gk20a *gr)
|
||||
gr->compbit_store.base_hw = compbit_base_post_divide;
|
||||
|
||||
g->ops.ltc.cbc_ctrl(g, gk20a_cbc_op_invalidate,
|
||||
0, max_comptag_lines - 1);
|
||||
0, max_comptag_lines - 1U);
|
||||
|
||||
}
|
||||
|
||||
@@ -308,7 +308,7 @@ static bool gv11b_fb_is_fault_buffer_full(struct gk20a *g, u32 index)
|
||||
|
||||
entries = gv11b_fb_fault_buffer_size_val(g, index);
|
||||
|
||||
return get_idx == ((put_idx + 1) % entries);
|
||||
return get_idx == ((put_idx + 1U) % entries);
|
||||
}
|
||||
|
||||
void gv11b_fb_fault_buf_set_state_hw(struct gk20a *g,
|
||||
@@ -354,7 +354,7 @@ void gv11b_fb_fault_buf_set_state_hw(struct gk20a *g,
|
||||
nvgpu_log_info(g, "fault status busy set, check again");
|
||||
fault_status = g->ops.fb.read_mmu_fault_status(g);
|
||||
|
||||
nvgpu_usleep_range(delay, delay * 2);
|
||||
nvgpu_usleep_range(delay, delay * 2U);
|
||||
delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX);
|
||||
} while (nvgpu_timeout_expired_msg(&timeout,
|
||||
"fault status busy set") == 0);
|
||||
@@ -1036,7 +1036,7 @@ void gv11b_fb_handle_mmu_nonreplay_replay_fault(struct gk20a *g,
|
||||
|
||||
gv11b_fb_copy_from_hw_fault_buf(g, mem, offset, mmfault);
|
||||
|
||||
get_indx = (get_indx + 1) % entries;
|
||||
get_indx = (get_indx + 1U) % entries;
|
||||
nvgpu_log(g, gpu_dbg_intr, "new get index = %d", get_indx);
|
||||
|
||||
gv11b_fb_fault_buffer_get_ptr_update(g, index, get_indx);
|
||||
@@ -1506,7 +1506,7 @@ static int gv11b_fb_fix_page_fault(struct gk20a *g,
|
||||
nvgpu_log(g, gpu_dbg_intr | gpu_dbg_pte,
|
||||
"pte: %#08x %#08x", pte[1], pte[0]);
|
||||
|
||||
if (pte[0] == 0x0 && pte[1] == 0x0) {
|
||||
if (pte[0] == 0x0U && pte[1] == 0x0U) {
|
||||
nvgpu_log(g, gpu_dbg_intr | gpu_dbg_pte,
|
||||
"pte all zeros, do not set valid");
|
||||
return -1;
|
||||
|
||||
@@ -466,7 +466,7 @@ void fb_tu104_init_cbc(struct gk20a *g, struct gr_gk20a *gr)
|
||||
gr->compbit_store.base_hw = compbit_store_base;
|
||||
|
||||
g->ops.ltc.cbc_ctrl(g, gk20a_cbc_op_invalidate,
|
||||
0, gr->max_comptag_lines - 1);
|
||||
0, gr->max_comptag_lines - 1U);
|
||||
}
|
||||
|
||||
static int tu104_fb_wait_mmu_bind(struct gk20a *g)
|
||||
@@ -553,8 +553,8 @@ int tu104_fb_apply_pdb_cache_war(struct gk20a *g)
|
||||
}
|
||||
|
||||
/* Bind 257th (last) instance block that reserves PDB cache entry 255 */
|
||||
inst_blk_addr = u64_lo32((inst_blk_base_addr + (256 * PAGE_SIZE))
|
||||
>> fb_mmu_bind_imb_addr_alignment_v());
|
||||
inst_blk_addr = u64_lo32((inst_blk_base_addr + (256ULL * U64(PAGE_SIZE)))
|
||||
>> U64(fb_mmu_bind_imb_addr_alignment_v()));
|
||||
|
||||
nvgpu_writel(g, fb_mmu_bind_imb_r(),
|
||||
fb_mmu_bind_imb_addr_f(inst_blk_addr) |
|
||||
|
||||
@@ -138,7 +138,7 @@ int gk20a_channel_get_timescale_from_timeslice(struct gk20a *g,
|
||||
|
||||
/* value field is 8 bits long */
|
||||
while (value >= BIT32(8)) {
|
||||
value >>= 1;
|
||||
value >>= 1U;
|
||||
shift++;
|
||||
}
|
||||
|
||||
@@ -830,7 +830,7 @@ static void channel_gk20a_free_priv_cmdbuf(struct channel_gk20a *c)
|
||||
struct vm_gk20a *ch_vm = c->vm;
|
||||
struct priv_cmd_queue *q = &c->priv_cmd_q;
|
||||
|
||||
if (q->size == 0) {
|
||||
if (q->size == 0U) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -865,7 +865,7 @@ int gk20a_channel_alloc_priv_cmdbuf(struct channel_gk20a *c, u32 orig_size,
|
||||
nvgpu_log_info(c->g, "ch %d: priv cmd queue get:put %d:%d",
|
||||
c->chid, q->get, q->put);
|
||||
|
||||
free_count = (q->size - (q->put - q->get) - 1) % q->size;
|
||||
free_count = (q->size - (q->put - q->get) - 1U) % q->size;
|
||||
|
||||
if (size > free_count) {
|
||||
return -EAGAIN;
|
||||
@@ -883,7 +883,7 @@ int gk20a_channel_alloc_priv_cmdbuf(struct channel_gk20a *c, u32 orig_size,
|
||||
} else {
|
||||
e->off = q->put;
|
||||
e->gva = q->mem.gpu_va + q->put * sizeof(u32);
|
||||
q->put = (q->put + orig_size) & (q->size - 1);
|
||||
q->put = (q->put + orig_size) & (q->size - 1U);
|
||||
}
|
||||
|
||||
/* we already handled q->put + size > q->size so BUG_ON this */
|
||||
@@ -1009,7 +1009,7 @@ static void channel_gk20a_joblist_add(struct channel_gk20a *c,
|
||||
struct channel_gk20a_job *job)
|
||||
{
|
||||
if (channel_gk20a_is_prealloc_enabled(c)) {
|
||||
c->joblist.pre_alloc.put = (c->joblist.pre_alloc.put + 1) %
|
||||
c->joblist.pre_alloc.put = (c->joblist.pre_alloc.put + 1U) %
|
||||
(c->joblist.pre_alloc.length);
|
||||
} else {
|
||||
nvgpu_list_add_tail(&job->list, &c->joblist.dynamic.jobs);
|
||||
@@ -1020,7 +1020,7 @@ static void channel_gk20a_joblist_delete(struct channel_gk20a *c,
|
||||
struct channel_gk20a_job *job)
|
||||
{
|
||||
if (channel_gk20a_is_prealloc_enabled(c)) {
|
||||
c->joblist.pre_alloc.get = (c->joblist.pre_alloc.get + 1) %
|
||||
c->joblist.pre_alloc.get = (c->joblist.pre_alloc.get + 1U) %
|
||||
(c->joblist.pre_alloc.length);
|
||||
} else {
|
||||
nvgpu_list_del(&job->list);
|
||||
@@ -1358,7 +1358,7 @@ static inline u32 update_gp_get(struct gk20a *g,
|
||||
|
||||
u32 nvgpu_gp_free_count(struct channel_gk20a *c)
|
||||
{
|
||||
return (c->gpfifo.entry_num - (c->gpfifo.put - c->gpfifo.get) - 1) %
|
||||
return (c->gpfifo.entry_num - (c->gpfifo.put - c->gpfifo.get) - 1U) %
|
||||
c->gpfifo.entry_num;
|
||||
}
|
||||
|
||||
@@ -1889,7 +1889,7 @@ int gk20a_free_priv_cmdbuf(struct channel_gk20a *c, struct priv_cmd_entry *e)
|
||||
if (e->valid) {
|
||||
/* read the entry's valid flag before reading its contents */
|
||||
nvgpu_smp_rmb();
|
||||
if ((q->get != e->off) && e->off != 0) {
|
||||
if ((q->get != e->off) && e->off != 0U) {
|
||||
nvgpu_err(g, "requests out-of-order, ch=%d",
|
||||
c->chid);
|
||||
}
|
||||
|
||||
@@ -219,8 +219,8 @@ static int nvgpu_submit_append_gpfifo_user_direct(struct channel_gk20a *c,
|
||||
|
||||
if (end > gpfifo_size) {
|
||||
/* wrap-around */
|
||||
int length0 = gpfifo_size - start;
|
||||
int length1 = len - length0;
|
||||
u32 length0 = gpfifo_size - start;
|
||||
u32 length1 = len - length0;
|
||||
|
||||
err = g->os_channel.copy_user_gpfifo(
|
||||
gpfifo_cpu + start, userdata,
|
||||
@@ -261,8 +261,8 @@ static void nvgpu_submit_append_gpfifo_common(struct channel_gk20a *c,
|
||||
|
||||
if (end > gpfifo_size) {
|
||||
/* wrap-around */
|
||||
int length0 = gpfifo_size - start;
|
||||
int length1 = len - length0;
|
||||
u32 length0 = gpfifo_size - start;
|
||||
u32 length1 = len - length0;
|
||||
struct nvgpu_gpfifo_entry *src2 = src + length0;
|
||||
|
||||
nvgpu_mem_wr_n(g, gpfifo_mem, start, src, length0);
|
||||
|
||||
@@ -165,7 +165,7 @@ int gp106_fuse_read_vin_cal_slope_intercept_fuse(struct gk20a *g,
|
||||
case CTRL_CLK_VIN_ID_LTC:
|
||||
case CTRL_CLK_VIN_ID_SRAM:
|
||||
slopedata =
|
||||
(fuse_vin_cal_gpc1_delta_slope_int_data_v(data)) * 1000;
|
||||
(fuse_vin_cal_gpc1_delta_slope_int_data_v(data)) * 1000U;
|
||||
break;
|
||||
|
||||
default:
|
||||
|
||||
@@ -83,7 +83,7 @@ int gk20a_prepare_poweroff(struct gk20a *g)
|
||||
|
||||
if (g->ops.fifo.channel_suspend != NULL) {
|
||||
ret = g->ops.fifo.channel_suspend(g);
|
||||
if (ret != 0) {
|
||||
if (ret != 0U) {
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -126,12 +126,12 @@ int gm20b_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op,
|
||||
|
||||
trace_gk20a_ltc_cbc_ctrl_start(g->name, op, min, max);
|
||||
|
||||
if (gr->compbit_store.mem.size == 0) {
|
||||
if (gr->compbit_store.mem.size == 0ULL) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
while (true) {
|
||||
const u32 iter_max = min(min + max_lines - 1, max);
|
||||
const u32 iter_max = min(min + max_lines - 1U, max);
|
||||
bool full_cache_op = true;
|
||||
|
||||
nvgpu_mutex_acquire(&g->mm.l2_op_lock);
|
||||
@@ -194,7 +194,7 @@ int gm20b_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op,
|
||||
}
|
||||
|
||||
/* note: iter_max is inclusive upper bound */
|
||||
min = iter_max + 1;
|
||||
min = iter_max + 1U;
|
||||
|
||||
/* give a chance for higher-priority threads to progress */
|
||||
nvgpu_mutex_release(&g->mm.l2_op_lock);
|
||||
@@ -259,7 +259,7 @@ void gm20b_ltc_isr(struct gk20a *g, unsigned int ltc)
|
||||
{
|
||||
unsigned int slice;
|
||||
|
||||
for (slice = 0; slice < g->gr.slices_per_ltc; slice++) {
|
||||
for (slice = 0U; slice < g->gr.slices_per_ltc; slice++) {
|
||||
gm20b_ltc_lts_isr(g, ltc, slice);
|
||||
}
|
||||
}
|
||||
@@ -269,7 +269,7 @@ u32 gm20b_ltc_cbc_fix_config(struct gk20a *g, int base)
|
||||
u32 val = gk20a_readl(g, ltc_ltcs_ltss_cbc_num_active_ltcs_r());
|
||||
if (val == 2U) {
|
||||
return base * 2;
|
||||
} else if (val != 1) {
|
||||
} else if (val != 1U) {
|
||||
nvgpu_err(g, "Invalid number of active ltcs: %08x", val);
|
||||
}
|
||||
|
||||
@@ -484,7 +484,7 @@ void gm20b_ltc_init_cbc(struct gk20a *g, struct gr_gk20a *gr)
|
||||
gr->compbit_store.base_hw = compbit_base_post_divide;
|
||||
|
||||
g->ops.ltc.cbc_ctrl(g, gk20a_cbc_op_invalidate,
|
||||
0, max_comptag_lines - 1);
|
||||
0, max_comptag_lines - 1U);
|
||||
|
||||
}
|
||||
|
||||
@@ -525,7 +525,7 @@ bool gm20b_ltc_is_ltcn_ltss_addr(struct gk20a *g, u32 addr)
|
||||
{
|
||||
u32 lts_shared_base = ltc_ltc0_ltss_v();
|
||||
u32 lts_stride = nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE);
|
||||
u32 addr_mask = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE) - 1;
|
||||
u32 addr_mask = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE) - 1U;
|
||||
u32 base_offset = lts_shared_base & addr_mask;
|
||||
u32 end_offset = base_offset + lts_stride;
|
||||
|
||||
@@ -548,7 +548,7 @@ static void gm20b_ltc_update_ltc_lts_addr(struct gk20a *g, u32 addr, u32 ltc_num
|
||||
priv_addr_table[index++] = ltc_ltc0_lts0_v() +
|
||||
ltc_num * ltc_stride +
|
||||
lts_num * lts_stride +
|
||||
(addr & (lts_stride - 1));
|
||||
(addr & (lts_stride - 1U));
|
||||
}
|
||||
|
||||
*priv_addr_table_index = index;
|
||||
|
||||
@@ -46,7 +46,7 @@ int gp10b_determine_L2_size_bytes(struct gk20a *g)
|
||||
tmp = gk20a_readl(g, ltc_ltc0_lts0_tstg_info_1_r());
|
||||
|
||||
ret = g->ltc_count *
|
||||
ltc_ltc0_lts0_tstg_info_1_slice_size_in_kb_v(tmp)*1024 *
|
||||
ltc_ltc0_lts0_tstg_info_1_slice_size_in_kb_v(tmp) * 1024U *
|
||||
ltc_ltc0_lts0_tstg_info_1_slices_per_l2_v(tmp);
|
||||
|
||||
nvgpu_log(g, gpu_dbg_info, "L2 size: %d\n", ret);
|
||||
@@ -156,7 +156,7 @@ int gp10b_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op,
|
||||
}
|
||||
|
||||
while (true) {
|
||||
const u32 iter_max = min(min + max_lines - 1, max);
|
||||
const u32 iter_max = min(min + max_lines - 1U, max);
|
||||
bool full_cache_op = true;
|
||||
|
||||
nvgpu_mutex_acquire(&g->mm.l2_op_lock);
|
||||
@@ -221,7 +221,7 @@ int gp10b_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op,
|
||||
}
|
||||
|
||||
/* note: iter_max is inclusive upper bound */
|
||||
min = iter_max + 1;
|
||||
min = iter_max + 1U;
|
||||
|
||||
/* give a chance for higher-priority threads to progress */
|
||||
nvgpu_mutex_release(&g->mm.l2_op_lock);
|
||||
@@ -292,7 +292,7 @@ void gp10b_ltc_isr(struct gk20a *g, unsigned int ltc)
|
||||
{
|
||||
unsigned int slice;
|
||||
|
||||
for (slice = 0; slice < g->gr.slices_per_ltc; slice++) {
|
||||
for (slice = 0U; slice < g->gr.slices_per_ltc; slice++) {
|
||||
gp10b_ltc_lts_isr(g, ltc, slice);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -207,7 +207,7 @@ void gv11b_ltc_isr(struct gk20a *g, unsigned int ltc)
|
||||
{
|
||||
unsigned int slice;
|
||||
|
||||
for (slice = 0; slice < g->gr.slices_per_ltc; slice++) {
|
||||
for (slice = 0U; slice < g->gr.slices_per_ltc; slice++) {
|
||||
gv11b_ltc_lts_isr(g, ltc, slice);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -155,7 +155,7 @@ int ltc_tu104_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op,
|
||||
}
|
||||
|
||||
while (true) {
|
||||
const u32 iter_max = min(min + max_lines - 1, max);
|
||||
const u32 iter_max = min(min + max_lines - 1U, max);
|
||||
bool full_cache_op = true;
|
||||
|
||||
nvgpu_mutex_acquire(&g->mm.l2_op_lock);
|
||||
@@ -219,7 +219,7 @@ int ltc_tu104_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op,
|
||||
}
|
||||
|
||||
/* note: iter_max is inclusive upper bound */
|
||||
min = iter_max + 1;
|
||||
min = iter_max + 1U;
|
||||
|
||||
/* give a chance for higher-priority threads to progress */
|
||||
nvgpu_mutex_release(&g->mm.l2_op_lock);
|
||||
|
||||
@@ -350,7 +350,7 @@ void gm20b_mc_ltc_isr(struct gk20a *g)
|
||||
mc_intr = gk20a_readl(g, mc_intr_ltc_r());
|
||||
nvgpu_err(g, "mc_ltc_intr: %08x", mc_intr);
|
||||
for (ltc = 0; ltc < g->ltc_count; ltc++) {
|
||||
if ((mc_intr & 1U << ltc) == 0) {
|
||||
if ((mc_intr & BIT32(ltc)) == 0U) {
|
||||
continue;
|
||||
}
|
||||
g->ops.ltc.isr(g, ltc);
|
||||
|
||||
@@ -232,7 +232,7 @@ void mc_gp10b_ltc_isr(struct gk20a *g)
|
||||
mc_intr = gk20a_readl(g, mc_intr_ltc_r());
|
||||
nvgpu_err(g, "mc_ltc_intr: %08x", mc_intr);
|
||||
for (ltc = 0; ltc < g->ltc_count; ltc++) {
|
||||
if ((mc_intr & 1U << ltc) == 0) {
|
||||
if ((mc_intr & BIT32(ltc)) == 0U) {
|
||||
continue;
|
||||
}
|
||||
g->ops.ltc.isr(g, ltc);
|
||||
|
||||
@@ -25,14 +25,14 @@
|
||||
|
||||
#include <nvgpu/types.h>
|
||||
|
||||
#define NV_CPU_INTR_SUBTREE_TO_TOP_IDX(i) ((i) / 32)
|
||||
#define NV_CPU_INTR_SUBTREE_TO_TOP_BIT(i) ((i) % 32)
|
||||
#define NV_CPU_INTR_SUBTREE_TO_LEAF_REG0(i) ((i)*2)
|
||||
#define NV_CPU_INTR_SUBTREE_TO_LEAF_REG1(i) (((i)*2) + 1)
|
||||
#define NV_CPU_INTR_SUBTREE_TO_TOP_IDX(i) ((i) / 32U)
|
||||
#define NV_CPU_INTR_SUBTREE_TO_TOP_BIT(i) ((i) % 32U)
|
||||
#define NV_CPU_INTR_SUBTREE_TO_LEAF_REG0(i) ((i)*2U)
|
||||
#define NV_CPU_INTR_SUBTREE_TO_LEAF_REG1(i) (((i)*2U) + 1U)
|
||||
|
||||
#define NV_CPU_INTR_GPU_VECTOR_TO_LEAF_REG(i) ((i) / 32)
|
||||
#define NV_CPU_INTR_GPU_VECTOR_TO_LEAF_BIT(i) ((i) % 32)
|
||||
#define NV_CPU_INTR_GPU_VECTOR_TO_SUBTREE(i) ((NV_CPU_INTR_GPU_VECTOR_TO_LEAF_REG(i)) / 2)
|
||||
#define NV_CPU_INTR_GPU_VECTOR_TO_LEAF_REG(i) ((i) / 32U)
|
||||
#define NV_CPU_INTR_GPU_VECTOR_TO_LEAF_BIT(i) ((i) % 32U)
|
||||
#define NV_CPU_INTR_GPU_VECTOR_TO_SUBTREE(i) ((NV_CPU_INTR_GPU_VECTOR_TO_LEAF_REG(i)) / 2U)
|
||||
|
||||
#define NV_CPU_INTR_TOP_NONSTALL_SUBTREE 0U
|
||||
|
||||
|
||||
@@ -72,7 +72,7 @@ static u64 nvgpu_lockless_alloc(struct nvgpu_allocator *a, u64 len)
|
||||
new_head = NV_ACCESS_ONCE(pa->next[head]);
|
||||
ret = cmpxchg(&pa->head, head, new_head);
|
||||
if (ret == head) {
|
||||
addr = pa->base + head * pa->blk_size;
|
||||
addr = pa->base + U64(head) * pa->blk_size;
|
||||
nvgpu_atomic_inc(&pa->nr_allocs);
|
||||
alloc_dbg(a, "Alloc node # %d @ addr 0x%llx", head,
|
||||
addr);
|
||||
@@ -178,7 +178,7 @@ int nvgpu_lockless_allocator_init(struct gk20a *g, struct nvgpu_allocator *na,
|
||||
* In order to control memory footprint, we require count < INT_MAX
|
||||
*/
|
||||
count = length / blk_size;
|
||||
if (base == 0ULL || count == 0ULL || count > INT_MAX) {
|
||||
if (base == 0ULL || count == 0ULL || count > U64(INT_MAX)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
||||
@@ -48,9 +48,9 @@
|
||||
/*
|
||||
* VIDMEM page size is 4k.
|
||||
*/
|
||||
#define PAGE_SIZE 0x1000
|
||||
#define PAGE_ALIGN(addr) ((addr + (PAGE_SIZE - 1)) & \
|
||||
((typeof(addr)) ~(PAGE_SIZE - 1)))
|
||||
#define PAGE_SIZE 0x1000U
|
||||
#define PAGE_ALIGN(addr) ((addr + (PAGE_SIZE - 1U)) & \
|
||||
((typeof(addr)) ~(PAGE_SIZE - 1U)))
|
||||
|
||||
/*
|
||||
* Handle the book-keeping for these operations.
|
||||
|
||||
@@ -351,7 +351,7 @@ static int pd_allocate_children(struct vm_gk20a *vm,
|
||||
|
||||
pd->num_entries = pd_entries(l, attrs);
|
||||
pd->entries = nvgpu_vzalloc(g, sizeof(struct nvgpu_gmmu_pd) *
|
||||
pd->num_entries);
|
||||
(unsigned long)pd->num_entries);
|
||||
if (pd->entries == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
@@ -725,7 +725,7 @@ static int __nvgpu_gmmu_update_page_table(struct vm_gk20a *vm,
|
||||
(sgt != NULL) ? "MAP" : "UNMAP",
|
||||
virt_addr,
|
||||
length,
|
||||
(sgt != NULL) ? nvgpu_sgt_get_phys(g, sgt, sgt->sgl) : 0,
|
||||
(sgt != NULL) ? nvgpu_sgt_get_phys(g, sgt, sgt->sgl) : 0ULL,
|
||||
space_to_skip,
|
||||
page_size >> 10,
|
||||
nvgpu_gmmu_perm_str(attrs->rw_flag),
|
||||
|
||||
@@ -761,7 +761,7 @@ int nvgpu_vm_get_buffers(struct vm_gk20a *vm,
|
||||
nvgpu_mutex_acquire(&vm->update_gmmu_lock);
|
||||
|
||||
buffer_list = nvgpu_big_zalloc(vm->mm->g, sizeof(*buffer_list) *
|
||||
vm->num_user_mapped_buffers);
|
||||
(size_t)vm->num_user_mapped_buffers);
|
||||
if (buffer_list == NULL) {
|
||||
nvgpu_mutex_release(&vm->update_gmmu_lock);
|
||||
return -ENOMEM;
|
||||
|
||||
@@ -58,7 +58,7 @@ u32 *nvgpu_netlist_alloc_u32_list(struct gk20a *g, struct netlist_u32_list *u32l
|
||||
static int nvgpu_netlist_alloc_load_u32_list(struct gk20a *g, u8 *src, u32 len,
|
||||
struct netlist_u32_list *u32_list)
|
||||
{
|
||||
u32_list->count = (len + sizeof(u32) - 1) / sizeof(u32);
|
||||
u32_list->count = (len + U32(sizeof(u32)) - 1U) / U32(sizeof(u32));
|
||||
if (nvgpu_netlist_alloc_u32_list(g, u32_list) == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@@ -1057,11 +1057,11 @@ int acr_ucode_patch_sig(struct gk20a *g,
|
||||
}
|
||||
|
||||
/* Patching logic:*/
|
||||
for (i = 0; i < sizeof(*p_patch_loc)>>2; i++) {
|
||||
p_img[(p_patch_loc[i]>>2)] = p_sig[(p_patch_ind[i]<<2)];
|
||||
p_img[(p_patch_loc[i]>>2)+1] = p_sig[(p_patch_ind[i]<<2)+1];
|
||||
p_img[(p_patch_loc[i]>>2)+2] = p_sig[(p_patch_ind[i]<<2)+2];
|
||||
p_img[(p_patch_loc[i]>>2)+3] = p_sig[(p_patch_ind[i]<<2)+3];
|
||||
for (i = 0U; i < sizeof(*p_patch_loc)>>2U; i++) {
|
||||
p_img[(p_patch_loc[i]>>2U)] = p_sig[(p_patch_ind[i]<<2U)];
|
||||
p_img[(p_patch_loc[i]>>2U)+1U] = p_sig[(p_patch_ind[i]<<2U)+1U];
|
||||
p_img[(p_patch_loc[i]>>2U)+2U] = p_sig[(p_patch_ind[i]<<2U)+2U];
|
||||
p_img[(p_patch_loc[i]>>2U)+3U] = p_sig[(p_patch_ind[i]<<2U)+3U];
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@@ -1077,7 +1077,7 @@ static int nvgpu_gm20b_acr_wait_for_completion(struct gk20a *g,
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
completion = nvgpu_falcon_wait_for_halt(flcn, timeout);
|
||||
if (completion != 0U) {
|
||||
if (completion != 0) {
|
||||
nvgpu_err(g, "flcn-%d: ACR boot timed out", flcn_id);
|
||||
goto exit;
|
||||
}
|
||||
@@ -1181,7 +1181,7 @@ static int gm20b_acr_hs_bl_exec(struct gk20a *g, struct nvgpu_acr *acr,
|
||||
/* wait for ACR halt*/
|
||||
err = nvgpu_gm20b_acr_wait_for_completion(g, acr_desc->acr_flcn,
|
||||
ACR_COMPLETION_TIMEOUT_MS);
|
||||
if (err != 0U) {
|
||||
if (err != 0) {
|
||||
goto err_unmap_bl;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -54,10 +54,10 @@ typedef int (*gp106_get_ucode_details)(struct gk20a *g,
|
||||
struct flcn_ucode_img_v1 *udata);
|
||||
|
||||
/* Both size and address of WPR need to be 128K-aligned */
|
||||
#define WPR_ALIGNMENT 0x20000
|
||||
#define WPR_ALIGNMENT 0x20000U
|
||||
#define GP106_DGPU_NONWPR NVGPU_VIDMEM_BOOTSTRAP_ALLOCATOR_BASE
|
||||
#define GP106_DGPU_WPR_OFFSET 0x400000
|
||||
#define DGPU_WPR_SIZE 0x100000
|
||||
#define GP106_DGPU_WPR_OFFSET 0x400000U
|
||||
#define DGPU_WPR_SIZE 0x100000U
|
||||
|
||||
/*Externs*/
|
||||
|
||||
|
||||
@@ -428,9 +428,9 @@ int nvgpu_pmu_process_init_msg(struct nvgpu_pmu *pmu,
|
||||
/* Align start and end addresses */
|
||||
u32 start = ALIGN(pv->get_pmu_init_msg_pmu_sw_mg_off(init),
|
||||
PMU_DMEM_ALLOC_ALIGNMENT);
|
||||
u32 end = (pv->get_pmu_init_msg_pmu_sw_mg_off(init) +
|
||||
pv->get_pmu_init_msg_pmu_sw_mg_size(init)) &
|
||||
~(PMU_DMEM_ALLOC_ALIGNMENT - 1);
|
||||
u32 end = (U32(pv->get_pmu_init_msg_pmu_sw_mg_off(init)) +
|
||||
U32(pv->get_pmu_init_msg_pmu_sw_mg_size(init))) &
|
||||
~(PMU_DMEM_ALLOC_ALIGNMENT - 1U);
|
||||
u32 size = end - start;
|
||||
|
||||
nvgpu_bitmap_allocator_init(g, &pmu->dmem, "gk20a_pmu_dmem",
|
||||
|
||||
@@ -230,7 +230,7 @@ int gm20b_load_falcon_ucode(struct gk20a *g, u32 falconidmask)
|
||||
unsigned long timeout = gk20a_get_gr_idle_timeout(g);
|
||||
|
||||
/* GM20B PMU supports loading FECS only */
|
||||
if (!(falconidmask == (1 << FALCON_ID_FECS))) {
|
||||
if (!(falconidmask == BIT32(FALCON_ID_FECS))) {
|
||||
return -EINVAL;
|
||||
}
|
||||
/* check whether pmu is ready to bootstrap lsf if not wait for it */
|
||||
|
||||
@@ -103,7 +103,7 @@ static void pmu_handle_param_msg(struct gk20a *g, struct pmu_msg *msg,
|
||||
{
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
if (status != 0) {
|
||||
if (status != 0U) {
|
||||
nvgpu_err(g, "PG PARAM cmd aborted");
|
||||
return;
|
||||
}
|
||||
@@ -123,7 +123,7 @@ int gp106_pg_param_init(struct gk20a *g, u32 pg_engine_id)
|
||||
if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) {
|
||||
|
||||
status = init_rppg(g);
|
||||
if (status != 0) {
|
||||
if (status != 0U) {
|
||||
nvgpu_err(g, "RPPG init Failed");
|
||||
return -1;
|
||||
}
|
||||
@@ -275,7 +275,7 @@ int gp106_load_falcon_ucode(struct gk20a *g, u32 falconidmask)
|
||||
u32 flags = PMU_ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES;
|
||||
|
||||
/* GM20B PMU supports loading FECS and GPCCS only */
|
||||
if (falconidmask == 0) {
|
||||
if (falconidmask == 0U) {
|
||||
return -EINVAL;
|
||||
}
|
||||
if ((falconidmask &
|
||||
|
||||
@@ -178,7 +178,7 @@ int gp10b_load_falcon_ucode(struct gk20a *g, u32 falconidmask)
|
||||
u32 flags = PMU_ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES;
|
||||
|
||||
/* GM20B PMU supports loading FECS and GPCCS only */
|
||||
if (falconidmask == 0) {
|
||||
if (falconidmask == 0U) {
|
||||
return -EINVAL;
|
||||
}
|
||||
if ((falconidmask &
|
||||
@@ -214,7 +214,7 @@ static void pmu_handle_gr_param_msg(struct gk20a *g, struct pmu_msg *msg,
|
||||
{
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
if (status != 0) {
|
||||
if (status != 0U) {
|
||||
nvgpu_err(g, "GR PARAM cmd aborted");
|
||||
/* TBD: disable ELPG */
|
||||
return;
|
||||
|
||||
@@ -53,7 +53,7 @@ int gv100_load_falcon_ucode(struct gk20a *g, u32 falconidmask)
|
||||
u32 flags = PMU_ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES;
|
||||
int status = 0;
|
||||
|
||||
if (falconidmask == 0) {
|
||||
if (falconidmask == 0U) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@@ -93,7 +93,7 @@ int gv100_load_falcon_ucode(struct gk20a *g, u32 falconidmask)
|
||||
pmu_wait_message_cond(&g->pmu, gk20a_get_gr_idle_timeout(g),
|
||||
&g->pmu_lsf_loaded_falcon_id, 1);
|
||||
|
||||
if (g->pmu_lsf_loaded_falcon_id != 1) {
|
||||
if (g->pmu_lsf_loaded_falcon_id != 1U) {
|
||||
status = -ETIMEDOUT;
|
||||
}
|
||||
|
||||
|
||||
@@ -267,7 +267,7 @@ int gv11b_pmu_bootstrap(struct nvgpu_pmu *pmu)
|
||||
U32(addr_load_lo) -
|
||||
(desc->bootloader_imem_offset >> U32(8)));
|
||||
|
||||
blocks = ((desc->bootloader_size + 0xFF) & ~0xFF) >> 8;
|
||||
blocks = ((desc->bootloader_size + 0xFFU) & ~0xFFU) >> 8U;
|
||||
|
||||
for (i = 0; i < blocks; i++) {
|
||||
gk20a_writel(g, pwr_falcon_dmatrfmoffs_r(),
|
||||
@@ -418,7 +418,7 @@ static void pmu_handle_pg_sub_feature_msg(struct gk20a *g, struct pmu_msg *msg,
|
||||
{
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
if (status != 0) {
|
||||
if (status != 0U) {
|
||||
nvgpu_err(g, "Sub-feature mask update cmd aborted\n");
|
||||
return;
|
||||
}
|
||||
@@ -432,7 +432,7 @@ static void pmu_handle_pg_param_msg(struct gk20a *g, struct pmu_msg *msg,
|
||||
{
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
if (status != 0) {
|
||||
if (status != 0U) {
|
||||
nvgpu_err(g, "GR PARAM cmd aborted\n");
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -222,11 +222,11 @@ static bool pmu_validate_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd,
|
||||
invalid_cmd:
|
||||
nvgpu_err(g, "invalid pmu cmd :\n"
|
||||
"queue_id=%d,\n"
|
||||
"cmd_size=%d, cmd_unit_id=%d, msg=%p, msg_size=%d,\n"
|
||||
"cmd_size=%d, cmd_unit_id=%d, msg=%p, msg_size=%u,\n"
|
||||
"payload in=%p, in_size=%d, in_offset=%d,\n"
|
||||
"payload out=%p, out_size=%d, out_offset=%d",
|
||||
queue_id, cmd->hdr.size, cmd->hdr.unit_id,
|
||||
msg, (msg != NULL) ? msg->hdr.unit_id : ~0,
|
||||
msg, (msg != NULL) ? msg->hdr.unit_id : ~0U,
|
||||
&payload->in, payload->in.size, payload->in.offset,
|
||||
&payload->out, payload->out.size, payload->out.offset);
|
||||
|
||||
|
||||
@@ -78,7 +78,7 @@ static void nvgpu_pramin_access_batched(struct gk20a *g, struct nvgpu_mem *mem,
|
||||
byteoff = g->ops.bus.set_bar0_window(g, mem, sgt, sgl,
|
||||
offset / sizeof(u32));
|
||||
start_reg = g->ops.pramin.data032_r(byteoff / sizeof(u32));
|
||||
until_end = SZ_1M - (byteoff & (SZ_1M - 1));
|
||||
until_end = U32(SZ_1M) - (byteoff & (U32(SZ_1M) - 1U));
|
||||
|
||||
n = min3(size, until_end, (u32)(sgl_len - offset));
|
||||
|
||||
|
||||
@@ -72,7 +72,7 @@ void gm20b_priv_ring_isr(struct gk20a *g)
|
||||
nvgpu_log(g, gpu_dbg_intr, "ringmaster intr status0: 0x%08x,"
|
||||
"status1: 0x%08x", status0, status1);
|
||||
|
||||
if (pri_ringmaster_intr_status0_gbl_write_error_sys_v(status0) != 0) {
|
||||
if (pri_ringmaster_intr_status0_gbl_write_error_sys_v(status0) != 0U) {
|
||||
nvgpu_log(g, gpu_dbg_intr, "SYS write error. ADR %08x WRDAT %08x INFO %08x, CODE %08x",
|
||||
gk20a_readl(g, pri_ringstation_sys_priv_error_adr_r()),
|
||||
gk20a_readl(g, pri_ringstation_sys_priv_error_wrdat_r()),
|
||||
|
||||
@@ -69,28 +69,28 @@ void gp10b_priv_ring_decode_error_code(struct gk20a *g,
|
||||
{
|
||||
u32 error_type_index;
|
||||
|
||||
error_type_index = (error_code & 0x00000f00) >> 16;
|
||||
error_code = error_code & 0xBADFf000;
|
||||
error_type_index = (error_code & 0x00000f00U) >> 16U;
|
||||
error_code = error_code & 0xBADFf000U;
|
||||
|
||||
if (error_code == 0xBADF1000) {
|
||||
if (error_code == 0xBADF1000U) {
|
||||
if (error_type_index <
|
||||
ARRAY_SIZE(error_type_badf1xyy)) {
|
||||
nvgpu_err(g, "%s",
|
||||
error_type_badf1xyy[error_type_index]);
|
||||
}
|
||||
} else if (error_code == 0xBADF2000) {
|
||||
} else if (error_code == 0xBADF2000U) {
|
||||
if (error_type_index <
|
||||
ARRAY_SIZE(error_type_badf2xyy)) {
|
||||
nvgpu_err(g, "%s",
|
||||
error_type_badf2xyy[error_type_index]);
|
||||
}
|
||||
} else if (error_code == 0xBADF3000) {
|
||||
} else if (error_code == 0xBADF3000U) {
|
||||
if (error_type_index <
|
||||
ARRAY_SIZE(error_type_badf3xyy)) {
|
||||
nvgpu_err(g, "%s",
|
||||
error_type_badf3xyy[error_type_index]);
|
||||
}
|
||||
} else if (error_code == 0xBADF5000) {
|
||||
} else if (error_code == 0xBADF5000U) {
|
||||
if (error_type_index <
|
||||
ARRAY_SIZE(error_type_badf5xyy)) {
|
||||
nvgpu_err(g, "%s",
|
||||
@@ -123,20 +123,20 @@ void gp10b_priv_ring_isr(struct gk20a *g)
|
||||
nvgpu_err(g, "ringmaster intr status0: 0x%08x,"
|
||||
"status1: 0x%08x", status0, status1);
|
||||
|
||||
if (pri_ringmaster_intr_status0_ring_start_conn_fault_v(status0) != 0) {
|
||||
if (pri_ringmaster_intr_status0_ring_start_conn_fault_v(status0) != 0U) {
|
||||
nvgpu_err(g,
|
||||
"BUG: connectivity problem on the startup sequence");
|
||||
}
|
||||
|
||||
if (pri_ringmaster_intr_status0_disconnect_fault_v(status0) != 0) {
|
||||
if (pri_ringmaster_intr_status0_disconnect_fault_v(status0) != 0U) {
|
||||
nvgpu_err(g, "ring disconnected");
|
||||
}
|
||||
|
||||
if (pri_ringmaster_intr_status0_overflow_fault_v(status0) != 0) {
|
||||
if (pri_ringmaster_intr_status0_overflow_fault_v(status0) != 0U) {
|
||||
nvgpu_err(g, "ring overflowed");
|
||||
}
|
||||
|
||||
if (pri_ringmaster_intr_status0_gbl_write_error_sys_v(status0) != 0) {
|
||||
if (pri_ringmaster_intr_status0_gbl_write_error_sys_v(status0) != 0U) {
|
||||
error_info =
|
||||
gk20a_readl(g, pri_ringstation_sys_priv_error_info_r());
|
||||
error_code =
|
||||
|
||||
@@ -360,7 +360,7 @@ static int sec2_process_init_msg(struct nvgpu_sec2 *sec2,
|
||||
|
||||
u32 end = (sec2_init->nv_managed_area_offset +
|
||||
sec2_init->nv_managed_area_size) &
|
||||
~(PMU_DMEM_ALLOC_ALIGNMENT - 1);
|
||||
~(PMU_DMEM_ALLOC_ALIGNMENT - 1U);
|
||||
u32 size = end - start;
|
||||
|
||||
nvgpu_bitmap_allocator_init(g, &sec2->dmem, "sec2_dmem",
|
||||
|
||||
@@ -46,18 +46,18 @@ struct condition_entry {
|
||||
u32 cond_compare;
|
||||
} __packed;
|
||||
|
||||
static u16 nvgpu_bios_rdu16(struct gk20a *g, int offset)
|
||||
static u16 nvgpu_bios_rdu16(struct gk20a *g, u32 offset)
|
||||
{
|
||||
u16 val = (U16(g->bios.data[offset+1]) << U16(8)) +
|
||||
u16 val = (U16(g->bios.data[offset+1U]) << U16(8)) +
|
||||
U16(g->bios.data[offset]);
|
||||
return val;
|
||||
}
|
||||
|
||||
static u32 nvgpu_bios_rdu32(struct gk20a *g, int offset)
|
||||
static u32 nvgpu_bios_rdu32(struct gk20a *g, u32 offset)
|
||||
{
|
||||
u32 val = (U32(g->bios.data[offset+3]) << U32(24)) +
|
||||
(U32(g->bios.data[offset+2]) << U32(16)) +
|
||||
(U32(g->bios.data[offset+1]) << U32(8)) +
|
||||
u32 val = (U32(g->bios.data[offset+3U]) << U32(24)) +
|
||||
(U32(g->bios.data[offset+2U]) << U32(16)) +
|
||||
(U32(g->bios.data[offset+1U]) << U32(8)) +
|
||||
U32(g->bios.data[offset]);
|
||||
return val;
|
||||
}
|
||||
@@ -299,7 +299,7 @@ struct pci_ext_data_struct {
|
||||
u8 flags;
|
||||
} __packed;
|
||||
|
||||
static void nvgpu_bios_parse_bit(struct gk20a *g, int offset);
|
||||
static void nvgpu_bios_parse_bit(struct gk20a *g, u32 offset);
|
||||
|
||||
int nvgpu_bios_parse_rom(struct gk20a *g)
|
||||
{
|
||||
@@ -357,8 +357,8 @@ int nvgpu_bios_parse_rom(struct gk20a *g)
|
||||
&g->bios.data[(offset +
|
||||
pci_rom->pci_data_struct_ptr +
|
||||
pci_data->pci_data_struct_len +
|
||||
0xf)
|
||||
& ~0xf];
|
||||
0xfU)
|
||||
& ~0xfU];
|
||||
nvgpu_log_fn(g, "pci ext data sig %08x rev %x len %x sub_image_len %x priv_last %d flags %x",
|
||||
pci_ext_data->sig,
|
||||
pci_ext_data->nv_pci_data_ext_rev,
|
||||
@@ -399,7 +399,7 @@ int nvgpu_bios_parse_rom(struct gk20a *g)
|
||||
}
|
||||
}
|
||||
|
||||
static void nvgpu_bios_parse_biosdata(struct gk20a *g, int offset)
|
||||
static void nvgpu_bios_parse_biosdata(struct gk20a *g, u32 offset)
|
||||
{
|
||||
struct biosdata biosdata;
|
||||
|
||||
@@ -412,7 +412,7 @@ static void nvgpu_bios_parse_biosdata(struct gk20a *g, int offset)
|
||||
g->bios.vbios_oem_version = biosdata.oem_version;
|
||||
}
|
||||
|
||||
static void nvgpu_bios_parse_nvinit_ptrs(struct gk20a *g, int offset)
|
||||
static void nvgpu_bios_parse_nvinit_ptrs(struct gk20a *g, u32 offset)
|
||||
{
|
||||
struct nvinit_ptrs nvinit_ptrs;
|
||||
|
||||
@@ -533,7 +533,7 @@ static void nvgpu_bios_parse_memory_ptrs(struct gk20a *g, int offset, u8 version
|
||||
return;
|
||||
}
|
||||
|
||||
static void nvgpu_bios_parse_devinit_appinfo(struct gk20a *g, int dmem_offset)
|
||||
static void nvgpu_bios_parse_devinit_appinfo(struct gk20a *g, u32 dmem_offset)
|
||||
{
|
||||
struct devinit_engine_interface interface;
|
||||
|
||||
@@ -552,10 +552,10 @@ static void nvgpu_bios_parse_devinit_appinfo(struct gk20a *g, int dmem_offset)
|
||||
g->bios.devinit_script_phys_base = interface.script_phys_base;
|
||||
}
|
||||
|
||||
static int nvgpu_bios_parse_appinfo_table(struct gk20a *g, int offset)
|
||||
static int nvgpu_bios_parse_appinfo_table(struct gk20a *g, u32 offset)
|
||||
{
|
||||
struct application_interface_table_hdr_v1 hdr;
|
||||
int i;
|
||||
u32 i;
|
||||
|
||||
nvgpu_memcpy((u8 *)&hdr, &g->bios.data[offset], sizeof(hdr));
|
||||
|
||||
@@ -567,8 +567,8 @@ static int nvgpu_bios_parse_appinfo_table(struct gk20a *g, int offset)
|
||||
return 0;
|
||||
}
|
||||
|
||||
offset += sizeof(hdr);
|
||||
for (i = 0; i < hdr.entry_count; i++) {
|
||||
offset += U32(sizeof(hdr));
|
||||
for (i = 0U; i < hdr.entry_count; i++) {
|
||||
struct application_interface_entry_v1 entry;
|
||||
|
||||
nvgpu_memcpy((u8 *)&entry, &g->bios.data[offset],
|
||||
@@ -588,7 +588,7 @@ static int nvgpu_bios_parse_appinfo_table(struct gk20a *g, int offset)
|
||||
}
|
||||
|
||||
static int nvgpu_bios_parse_falcon_ucode_desc(struct gk20a *g,
|
||||
struct nvgpu_bios_ucode *ucode, int offset)
|
||||
struct nvgpu_bios_ucode *ucode, u32 offset)
|
||||
{
|
||||
union falcon_ucode_desc udesc;
|
||||
struct falcon_ucode_desc_v2 desc;
|
||||
@@ -664,16 +664,16 @@ static int nvgpu_bios_parse_falcon_ucode_desc(struct gk20a *g,
|
||||
ucode->dmem_size = desc.dmem_load_size;
|
||||
|
||||
ret = nvgpu_bios_parse_appinfo_table(g,
|
||||
offset + desc_size +
|
||||
offset + U32(desc_size) +
|
||||
desc.dmem_offset + desc.interface_offset);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int nvgpu_bios_parse_falcon_ucode_table(struct gk20a *g, int offset)
|
||||
static int nvgpu_bios_parse_falcon_ucode_table(struct gk20a *g, u32 offset)
|
||||
{
|
||||
struct falcon_ucode_table_hdr_v1 hdr;
|
||||
int i;
|
||||
u32 i;
|
||||
|
||||
nvgpu_memcpy((u8 *)&hdr, &g->bios.data[offset], sizeof(hdr));
|
||||
nvgpu_log_fn(g, "falcon ucode table ver %d size %d entrySize %d entryCount %d descVer %d descSize %d",
|
||||
@@ -687,7 +687,7 @@ static int nvgpu_bios_parse_falcon_ucode_table(struct gk20a *g, int offset)
|
||||
|
||||
offset += hdr.header_size;
|
||||
|
||||
for (i = 0; i < hdr.entry_count; i++) {
|
||||
for (i = 0U; i < hdr.entry_count; i++) {
|
||||
struct falcon_ucode_table_entry_v1 entry;
|
||||
|
||||
nvgpu_memcpy((u8 *)&entry, &g->bios.data[offset],
|
||||
@@ -739,7 +739,7 @@ static int nvgpu_bios_parse_falcon_ucode_table(struct gk20a *g, int offset)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void nvgpu_bios_parse_falcon_data_v2(struct gk20a *g, int offset)
|
||||
static void nvgpu_bios_parse_falcon_data_v2(struct gk20a *g, u32 offset)
|
||||
{
|
||||
struct falcon_data_v2 falcon_data;
|
||||
int err;
|
||||
@@ -773,12 +773,12 @@ void *nvgpu_bios_get_perf_table_ptrs(struct gk20a *g,
|
||||
if (ptoken->token_id == TOKEN_ID_VIRT_PTRS) {
|
||||
perf_table_id_offset = *((u16 *)&g->bios.data[
|
||||
ptoken->data_ptr +
|
||||
(table_id * PERF_PTRS_WIDTH_16)]);
|
||||
(U16(table_id) * U16(PERF_PTRS_WIDTH_16))]);
|
||||
data_size = PERF_PTRS_WIDTH_16;
|
||||
} else {
|
||||
perf_table_id_offset = *((u32 *)&g->bios.data[
|
||||
ptoken->data_ptr +
|
||||
(table_id * PERF_PTRS_WIDTH)]);
|
||||
(U16(table_id) * U16(PERF_PTRS_WIDTH))]);
|
||||
data_size = PERF_PTRS_WIDTH;
|
||||
}
|
||||
} else {
|
||||
@@ -813,11 +813,11 @@ void *nvgpu_bios_get_perf_table_ptrs(struct gk20a *g,
|
||||
return (void *)perf_table_ptr;
|
||||
}
|
||||
|
||||
static void nvgpu_bios_parse_bit(struct gk20a *g, int offset)
|
||||
static void nvgpu_bios_parse_bit(struct gk20a *g, u32 offset)
|
||||
{
|
||||
struct bios_bit bit;
|
||||
struct bit_token bit_token;
|
||||
int i;
|
||||
u32 i;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
nvgpu_memcpy((u8 *)&bit, &g->bios.data[offset], sizeof(bit));
|
||||
@@ -827,7 +827,7 @@ static void nvgpu_bios_parse_bit(struct gk20a *g, int offset)
|
||||
bit.token_entries, bit.token_size);
|
||||
|
||||
offset += bit.header_size;
|
||||
for (i = 0; i < bit.token_entries; i++) {
|
||||
for (i = 0U; i < bit.token_entries; i++) {
|
||||
nvgpu_memcpy((u8 *)&bit_token, &g->bios.data[offset],
|
||||
sizeof(bit_token));
|
||||
|
||||
|
||||
@@ -33,7 +33,7 @@
|
||||
#include <nvgpu/hw/gp106/hw_xp_gp106.h>
|
||||
#include <nvgpu/hw/gp106/hw_xve_gp106.h>
|
||||
|
||||
#define NV_PCFG 0x88000
|
||||
#define NV_PCFG 0x88000U
|
||||
|
||||
void xve_xve_writel_gp106(struct gk20a *g, u32 reg, u32 val)
|
||||
{
|
||||
@@ -350,7 +350,7 @@ static int __do_xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed)
|
||||
NVGPU_TIMER_CPU_TIMER);
|
||||
do {
|
||||
pl_link_config = gk20a_readl(g, xp_pl_link_config_r(0));
|
||||
if (pl_link_config != 0xfffffff &&
|
||||
if (pl_link_config != 0xfffffffU &&
|
||||
(xp_pl_link_config_ltssm_status_f(pl_link_config) ==
|
||||
xp_pl_link_config_ltssm_status_idle_v()) &&
|
||||
(xp_pl_link_config_ltssm_directive_f(pl_link_config) ==
|
||||
@@ -367,7 +367,7 @@ static int __do_xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed)
|
||||
|
||||
xv_sc_dbg(g, EXEC_CHANGE, " Change done... Checking status");
|
||||
|
||||
if (pl_link_config == 0xffffffff) {
|
||||
if (pl_link_config == 0xffffffffU) {
|
||||
WARN(1, "GPU fell of PCI bus!?");
|
||||
|
||||
/*
|
||||
@@ -462,7 +462,7 @@ int xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed)
|
||||
u32 current_link_speed;
|
||||
int err;
|
||||
|
||||
if ((next_link_speed & GPU_XVE_SPEED_MASK) == 0) {
|
||||
if ((next_link_speed & GPU_XVE_SPEED_MASK) == 0U) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
||||
@@ -27,8 +27,8 @@
|
||||
|
||||
struct gk20a;
|
||||
|
||||
#define PERF_PTRS_WIDTH 0x4
|
||||
#define PERF_PTRS_WIDTH_16 0x2
|
||||
#define PERF_PTRS_WIDTH U8(0x4)
|
||||
#define PERF_PTRS_WIDTH_16 U8(0x2)
|
||||
|
||||
enum {
|
||||
CLOCKS_TABLE = 2,
|
||||
|
||||
@@ -176,7 +176,7 @@ struct gk20a;
|
||||
/*
|
||||
* Must be greater than the largest bit offset in the above list.
|
||||
*/
|
||||
#define NVGPU_MAX_ENABLED_BITS 70
|
||||
#define NVGPU_MAX_ENABLED_BITS 70U
|
||||
|
||||
/**
|
||||
* nvgpu_is_enabled - Check if the passed flag is enabled.
|
||||
|
||||
@@ -23,7 +23,7 @@
|
||||
#ifndef NVGPU_FLCNIF_CMN_H
|
||||
#define NVGPU_FLCNIF_CMN_H
|
||||
|
||||
#define PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED 0
|
||||
#define PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED 0U
|
||||
|
||||
struct falc_u64 {
|
||||
u32 lo;
|
||||
@@ -104,18 +104,18 @@ struct pmu_hdr {
|
||||
#define nv_pmu_hdr pmu_hdr
|
||||
typedef u8 flcn_status;
|
||||
|
||||
#define PMU_DMEM_ALLOC_ALIGNMENT (32)
|
||||
#define PMU_DMEM_ALIGNMENT (4)
|
||||
#define PMU_DMEM_ALLOC_ALIGNMENT 32U
|
||||
#define PMU_DMEM_ALIGNMENT 4U
|
||||
|
||||
#define PMU_CMD_FLAGS_PMU_MASK (0xF0)
|
||||
#define PMU_CMD_FLAGS_PMU_MASK U8(0xF0U)
|
||||
|
||||
#define PMU_CMD_FLAGS_STATUS BIT8(0)
|
||||
#define PMU_CMD_FLAGS_INTR BIT8(1)
|
||||
#define PMU_CMD_FLAGS_EVENT BIT8(2)
|
||||
#define PMU_CMD_FLAGS_WATERMARK BIT8(3)
|
||||
|
||||
#define ALIGN_UP(v, gran) (((v) + ((gran) - 1)) & ~((gran)-1))
|
||||
#define ALIGN_UP(v, gran) (((v) + ((gran) - 1U)) & ~((gran)-1U))
|
||||
|
||||
#define NV_UNSIGNED_ROUNDED_DIV(a, b) (((a) + ((b) / 2)) / (b))
|
||||
#define NV_UNSIGNED_ROUNDED_DIV(a, b) (((a) + ((b) / 2U)) / (b))
|
||||
|
||||
#endif /* NVGPU_FLCNIF_CMN_H */
|
||||
|
||||
@@ -25,13 +25,13 @@
|
||||
|
||||
#define ilog2(x) (fls(x) - 1U)
|
||||
|
||||
#define roundup_pow_of_two(x) (1UL << fls((x) - 1))
|
||||
#define rounddown_pow_of_two(x) (1UL << (fls(x) - 1))
|
||||
#define roundup_pow_of_two(x) (1UL << fls((x) - 1UL))
|
||||
#define rounddown_pow_of_two(x) (1UL << (fls(x) - 1UL))
|
||||
|
||||
#define is_power_of_2(x) \
|
||||
({ \
|
||||
typeof(x) __x__ = (x); \
|
||||
(__x__ != 0 && ((__x__ & (__x__ - 1)) == 0)); \
|
||||
(__x__ != 0U && ((__x__ & (__x__ - 1U)) == 0U)); \
|
||||
})
|
||||
|
||||
#endif
|
||||
|
||||
Reference in New Issue
Block a user