diff --git a/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c b/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c index 8f0cb51a7..3740fa922 100644 --- a/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c @@ -46,7 +46,7 @@ /* * Copy engine defines line size in pixels */ -#define MAX_CE_SHIFT 31 /* 4Gpixels -1 */ +#define MAX_CE_SHIFT 31U /* 4Gpixels -1 */ #define MAX_CE_MASK ((u32) (~(~0U << MAX_CE_SHIFT))) #define MAX_CE_ALIGN(a) ((a) & MAX_CE_MASK) @@ -270,7 +270,7 @@ int gk20a_ce_prepare_submit(u64 src_buf, cmd_buf_cpu_va[methodSize++] = 0x00000001; } - launch |= 0x00001000; + launch |= 0x00001000U; } else if (request_operation & NVGPU_CE_MEMSET) { /* Remap from component A on 1 byte wide pixels */ cmd_buf_cpu_va[methodSize++] = 0x200181c2; @@ -279,7 +279,7 @@ int gk20a_ce_prepare_submit(u64 src_buf, cmd_buf_cpu_va[methodSize++] = 0x200181c0; cmd_buf_cpu_va[methodSize++] = payload; - launch |= 0x00000400; + launch |= 0x00000400U; } else { /* Illegal size */ return 0; @@ -308,18 +308,18 @@ int gk20a_ce_prepare_submit(u64 src_buf, cmd_buf_cpu_va[methodSize++] = 0x00000001; } - launch |= 0x00002005; + launch |= 0x00002005U; if (launch_flags & NVGPU_CE_SRC_MEMORY_LAYOUT_BLOCKLINEAR) { - launch |= 0x00000000; + launch |= 0x00000000U; } else { - launch |= 0x00000080; + launch |= 0x00000080U; } if (launch_flags & NVGPU_CE_DST_MEMORY_LAYOUT_BLOCKLINEAR) { - launch |= 0x00000000; + launch |= 0x00000000U; } else { - launch |= 0x00000100; + launch |= 0x00000100U; } cmd_buf_cpu_va[methodSize++] = 0x200180c0; diff --git a/drivers/gpu/nvgpu/gk20a/ce2_gk20a.h b/drivers/gpu/nvgpu/gk20a/ce2_gk20a.h index e16a0e6ab..971906698 100644 --- a/drivers/gpu/nvgpu/gk20a/ce2_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/ce2_gk20a.h @@ -34,11 +34,11 @@ void gk20a_ce2_isr(struct gk20a *g, u32 inst_id, u32 pri_base); u32 gk20a_ce2_nonstall_isr(struct gk20a *g, u32 inst_id, u32 pri_base); /* CE command utility macros */ -#define NVGPU_CE_LOWER_ADDRESS_OFFSET_MASK 0xffffffff -#define NVGPU_CE_UPPER_ADDRESS_OFFSET_MASK 0xff +#define NVGPU_CE_LOWER_ADDRESS_OFFSET_MASK 0xffffffffU +#define NVGPU_CE_UPPER_ADDRESS_OFFSET_MASK 0xffU -#define NVGPU_CE_MAX_INFLIGHT_JOBS 32 -#define NVGPU_CE_MAX_COMMAND_BUFF_BYTES_PER_KICKOFF 256 +#define NVGPU_CE_MAX_INFLIGHT_JOBS 32U +#define NVGPU_CE_MAX_COMMAND_BUFF_BYTES_PER_KICKOFF 256U /* dma launch_flags */ enum { diff --git a/drivers/gpu/nvgpu/gk20a/clk_gk20a.h b/drivers/gpu/nvgpu/gk20a/clk_gk20a.h index d84e59692..9a9cb5407 100644 --- a/drivers/gpu/nvgpu/gk20a/clk_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/clk_gk20a.h @@ -119,18 +119,18 @@ struct clk_gk20a { struct gpu_ops; -#define KHZ 1000 -#define MHZ 1000000 +#define KHZ 1000U +#define MHZ 1000000U static inline unsigned long rate_gpc2clk_to_gpu(unsigned long rate) { /* convert the kHz gpc2clk frequency to Hz gpcpll frequency */ - return (rate * KHZ) / 2; + return (rate * KHZ) / 2U; } static inline unsigned long rate_gpu_to_gpc2clk(unsigned long rate) { /* convert the Hz gpcpll frequency to kHz gpc2clk frequency */ - return (rate * 2) / KHZ; + return (rate * 2U) / KHZ; } #endif /* CLK_GK20A_H */ diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c index 6762a4d34..669a775ff 100644 --- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c @@ -57,8 +57,8 @@ #include #include -#define FECS_METHOD_WFI_RESTORE 0x80000 -#define FECS_MAILBOX_0_ACK_RESTORE 0x4 +#define FECS_METHOD_WFI_RESTORE 0x80000U +#define FECS_MAILBOX_0_ACK_RESTORE 0x4U static u32 gk20a_fifo_engines_on_id(struct gk20a *g, u32 id, bool is_tsg); @@ -624,7 +624,7 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f) size_t runlist_size; u32 active_engine_id, pbdma_id, engine_id; u32 flags = nvgpu_is_enabled(g, NVGPU_MM_USE_PHYSICAL_SG) ? - NVGPU_DMA_FORCE_CONTIGUOUS : 0; + NVGPU_DMA_FORCE_CONTIGUOUS : 0U; int err = 0; nvgpu_log_fn(g, " "); @@ -1575,7 +1575,7 @@ static bool gk20a_fifo_handle_mmu_fault_locked( /* go through all faulted engines */ - for_each_set_bit(engine_mmu_fault_id, &fault_id, 32) { + for_each_set_bit(engine_mmu_fault_id, &fault_id, 32U) { /* bits in fifo_intr_mmu_fault_id_r do not correspond 1:1 to * engines. Convert engine_mmu_id to engine_id */ u32 engine_id = gk20a_mmu_id_to_engine_id(g, @@ -1847,7 +1847,7 @@ static u32 gk20a_fifo_engines_on_id(struct gk20a *g, u32 id, bool is_tsg) return engines; } -void gk20a_fifo_recover_ch(struct gk20a *g, u32 chid, bool verbose, int rc_type) +void gk20a_fifo_recover_ch(struct gk20a *g, u32 chid, bool verbose, u32 rc_type) { u32 engines; @@ -1880,7 +1880,7 @@ void gk20a_fifo_recover_ch(struct gk20a *g, u32 chid, bool verbose, int rc_type) } void gk20a_fifo_recover_tsg(struct gk20a *g, u32 tsgid, bool verbose, - int rc_type) + u32 rc_type) { u32 engines; @@ -1938,7 +1938,7 @@ void gk20a_fifo_teardown_ch_tsg(struct gk20a *g, u32 __engine_ids, ref_id_is_tsg = id_is_tsg; /* atleast one engine will get passed during sched err*/ engine_ids |= __engine_ids; - for_each_set_bit(engine_id, &engine_ids, 32) { + for_each_set_bit(engine_id, &engine_ids, 32U) { u32 mmu_id = gk20a_engine_id_to_mmu_id(g, (u32)engine_id); @@ -1948,7 +1948,7 @@ void gk20a_fifo_teardown_ch_tsg(struct gk20a *g, u32 __engine_ids, } } else { /* store faulted engines in advance */ - for_each_set_bit(engine_id, &_engine_ids, 32) { + for_each_set_bit(engine_id, &_engine_ids, 32U) { gk20a_fifo_get_faulty_id_type(g, (u32)engine_id, &ref_id, &ref_type); if (ref_type == fifo_engine_status_id_type_tsgid_v()) { @@ -2006,7 +2006,7 @@ void gk20a_fifo_teardown_ch_tsg(struct gk20a *g, u32 __engine_ids, void gk20a_fifo_recover(struct gk20a *g, u32 __engine_ids, u32 hw_id, bool id_is_tsg, - bool id_is_known, bool verbose, int rc_type) + bool id_is_known, bool verbose, u32 rc_type) { unsigned int id_type; @@ -2221,7 +2221,7 @@ bool gk20a_fifo_check_ch_ctxsw_timeout(struct channel_gk20a *ch, if (gk20a_channel_get(ch) != NULL) { recover = gk20a_channel_update_and_check_timeout(ch, - g->fifo_eng_timeout_us / 1000, + g->fifo_eng_timeout_us / 1000U, &progress); *verbose = ch->timeout_debug_dump; *ms = ch->timeout_accumulated_ms; @@ -2244,7 +2244,7 @@ bool gk20a_fifo_check_tsg_ctxsw_timeout(struct tsg_gk20a *tsg, struct gk20a *g = tsg->g; *verbose = false; - *ms = g->fifo_eng_timeout_us / 1000; + *ms = g->fifo_eng_timeout_us / 1000U; nvgpu_rwsem_down_read(&tsg->ch_list_lock); @@ -2295,7 +2295,7 @@ bool gk20a_fifo_check_tsg_ctxsw_timeout(struct tsg_gk20a *tsg, nvgpu_log_info(g, "progress on tsg=%d ch=%d", tsg->tsgid, ch->chid); gk20a_channel_put(ch); - *ms = g->fifo_eng_timeout_us / 1000; + *ms = g->fifo_eng_timeout_us / 1000U; nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) { if (gk20a_channel_get(ch) != NULL) { @@ -2433,7 +2433,7 @@ static inline void gk20a_fifo_reset_pbdma_header(struct gk20a *g, u32 pbdma_id) } void gk20a_fifo_reset_pbdma_method(struct gk20a *g, u32 pbdma_id, - int pbdma_method_index) + u32 pbdma_method_index) { u32 pbdma_method_stride; u32 pbdma_method_reg; @@ -2452,7 +2452,7 @@ void gk20a_fifo_reset_pbdma_method(struct gk20a *g, u32 pbdma_id, } static bool gk20a_fifo_is_sw_method_subch(struct gk20a *g, u32 pbdma_id, - int pbdma_method_index) + u32 pbdma_method_index) { u32 pbdma_method_stride; u32 pbdma_method_reg, pbdma_method_subch; @@ -2466,9 +2466,9 @@ static bool gk20a_fifo_is_sw_method_subch(struct gk20a *g, u32 pbdma_id, pbdma_method_subch = pbdma_method0_subch_v( gk20a_readl(g, pbdma_method_reg)); - if (pbdma_method_subch == 5 || - pbdma_method_subch == 6 || - pbdma_method_subch == 7) { + if (pbdma_method_subch == 5U || + pbdma_method_subch == 6U || + pbdma_method_subch == 7U) { return true; } @@ -2480,7 +2480,7 @@ unsigned int gk20a_fifo_handle_pbdma_intr_0(struct gk20a *g, u32 pbdma_id, { struct fifo_gk20a *f = &g->fifo; unsigned int rc_type = RC_TYPE_NO_RC; - int i; + u32 i; unsigned long pbdma_intr_err; unsigned long bit; @@ -2489,7 +2489,7 @@ unsigned int gk20a_fifo_handle_pbdma_intr_0(struct gk20a *g, u32 pbdma_id, f->intr.pbdma.restartable_0) & pbdma_intr_0) { pbdma_intr_err = (unsigned long)pbdma_intr_0; - for_each_set_bit(bit, &pbdma_intr_err, 32) { + for_each_set_bit(bit, &pbdma_intr_err, 32U) { nvgpu_err(g, "PBDMA intr %s Error", pbdma_intr_fault_type_desc[bit]); } @@ -2550,7 +2550,7 @@ unsigned int gk20a_fifo_handle_pbdma_intr_0(struct gk20a *g, u32 pbdma_id, if ((pbdma_intr_0 & pbdma_intr_0_device_pending_f()) != 0U) { gk20a_fifo_reset_pbdma_header(g, pbdma_id); - for (i = 0; i < 4; i++) { + for (i = 0U; i < 4U; i++) { if (gk20a_fifo_is_sw_method_subch(g, pbdma_id, i)) { gk20a_fifo_reset_pbdma_method(g, @@ -2755,7 +2755,7 @@ static u32 gk20a_fifo_get_preempt_timeout(struct gk20a *g) * triggered every 100 ms and context switch recovery * happens every 3000 ms */ - return g->fifo_eng_timeout_us / 1000; + return g->fifo_eng_timeout_us / 1000U; } int gk20a_fifo_is_preempt_pending(struct gk20a *g, u32 id, @@ -2774,7 +2774,7 @@ int gk20a_fifo_is_preempt_pending(struct gk20a *g, u32 id, break; } - nvgpu_usleep_range(delay, delay * 2); + nvgpu_usleep_range(delay, delay * 2U); delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX); } while (nvgpu_timeout_expired(&timeout) == 0); @@ -2864,7 +2864,7 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, u32 chid) ret = __locked_fifo_preempt(g, chid, false); - if (mutex_ret == 0U) { + if (mutex_ret == 0) { nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); } @@ -2908,7 +2908,7 @@ int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid) ret = __locked_fifo_preempt(g, tsgid, true); - if (mutex_ret == 0U) { + if (mutex_ret == 0) { nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); } @@ -2971,7 +2971,7 @@ void gk20a_fifo_set_runlist_state(struct gk20a *g, u32 runlists_mask, gk20a_fifo_sched_disable_rw(g, runlists_mask, runlist_state); - if (mutex_ret == 0U) { + if (mutex_ret == 0) { nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); } } @@ -3080,7 +3080,7 @@ int gk20a_fifo_disable_engine_activity(struct gk20a *g, } clean_up: - if (mutex_ret == 0U) { + if (mutex_ret == 0) { nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); } @@ -3117,7 +3117,7 @@ int gk20a_fifo_disable_all_engine_activity(struct gk20a *g, } if (err != 0) { - while (i-- != 0) { + while (i-- != 0U) { active_engine_id = g->fifo.active_engines_list[i]; err = gk20a_fifo_enable_engine_activity(g, &g->fifo.engine_info[active_engine_id]); @@ -3167,12 +3167,12 @@ int gk20a_fifo_runlist_wait_pending(struct gk20a *g, u32 runlist_id) do { if ((gk20a_readl(g, fifo_eng_runlist_r(runlist_id)) & - fifo_eng_runlist_pending_true_f()) == 0) { + fifo_eng_runlist_pending_true_f()) == 0U) { ret = 0; break; } - nvgpu_usleep_range(delay, delay * 2); + nvgpu_usleep_range(delay, delay * 2U); delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX); } while (nvgpu_timeout_expired(&timeout) == 0); @@ -3521,7 +3521,7 @@ void gk20a_fifo_runlist_hw_submit(struct gk20a *g, u32 runlist_id, nvgpu_spinlock_acquire(&g->fifo.runlist_submit_lock); - if (count != 0) { + if (count != 0U) { gk20a_writel(g, fifo_runlist_base_r(), fifo_runlist_base_ptr_f(u64_lo32(runlist_iova >> 12)) | nvgpu_aperture_mask(g, &runlist->mem[buffer_index], @@ -3564,7 +3564,7 @@ int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id, runlist->active_channels)) { return 0; } - if ((tsg != NULL) && (++tsg->num_active_channels != 0)) { + if ((tsg != NULL) && (++tsg->num_active_channels != 0U)) { set_bit((int)f->channel[chid].tsgid, runlist->active_tsgs); } @@ -3647,7 +3647,7 @@ int gk20a_fifo_update_runlist_ids(struct gk20a *g, u32 runlist_ids, u32 chid, } ret = 0; - for_each_set_bit(runlist_id, &ulong_runlist_ids, 32) { + for_each_set_bit(runlist_id, &ulong_runlist_ids, 32U) { /* Capture the last failure error code */ errcode = g->ops.fifo.update_runlist(g, (u32)runlist_id, chid, add, wait_for_finish); @@ -3675,7 +3675,7 @@ static int __locked_fifo_reschedule_preempt_next(struct channel_gk20a *ch, u32 preempt_id; u32 preempt_type = 0; - if (1 != gk20a_fifo_get_engine_ids( + if (1U != gk20a_fifo_get_engine_ids( g, &gr_eng_id, 1, ENGINE_GR_GK20A)) { return ret; } @@ -3755,7 +3755,7 @@ int nvgpu_fifo_reschedule_runlist(struct channel_gk20a *ch, bool preempt_next, gk20a_fifo_runlist_wait_pending(g, ch->runlist_id); - if (mutex_ret == 0U) { + if (mutex_ret == 0) { nvgpu_pmu_mutex_release( &g->pmu, PMU_MUTEX_ID_FIFO, &token); } @@ -3788,7 +3788,7 @@ int gk20a_fifo_update_runlist(struct gk20a *g, u32 runlist_id, u32 chid, ret = gk20a_fifo_update_runlist_locked(g, runlist_id, chid, add, wait_for_finish); - if (mutex_ret == 0U) { + if (mutex_ret == 0) { nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); } @@ -3869,7 +3869,7 @@ int gk20a_fifo_wait_engine_idle(struct gk20a *g) break; } - nvgpu_usleep_range(delay, delay * 2); + nvgpu_usleep_range(delay, delay * 2U); delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX); } while (nvgpu_timeout_expired(&timeout) == 0); @@ -4476,20 +4476,20 @@ u32 gk20a_fifo_pbdma_acquire_val(u64 timeout) } timeout *= 80UL; - do_div(timeout, 100); /* set acquire timeout to 80% of channel wdt */ + do_div(timeout, 100U); /* set acquire timeout to 80% of channel wdt */ timeout *= 1000000UL; /* ms -> ns */ - do_div(timeout, 1024); /* in unit of 1024ns */ - tmp = fls(timeout >> 32); + do_div(timeout, 1024U); /* in unit of 1024ns */ + tmp = fls(timeout >> 32U); BUG_ON(tmp > U64(U32_MAX)); val_len = (u32)tmp + 32U; - if (val_len == 32) { + if (val_len == 32U) { val_len = (u32)fls(timeout); } if (val_len > 16U + pbdma_acquire_timeout_exp_max_v()) { /* man: 16bits */ exponent = pbdma_acquire_timeout_exp_max_v(); mantissa = pbdma_acquire_timeout_man_max_v(); - } else if (val_len > 16) { - exponent = val_len - 16; + } else if (val_len > 16U) { + exponent = val_len - 16U; BUG_ON((timeout >> exponent) > U64(U32_MAX)); mantissa = (u32)(timeout >> exponent); } else { @@ -4540,39 +4540,39 @@ void gk20a_fifo_add_sema_cmd(struct gk20a *g, nvgpu_log_fn(g, " "); /* semaphore_a */ - nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010004); + nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010004U); /* offset_upper */ nvgpu_mem_wr32(g, cmd->mem, off++, (u32)(sema_va >> 32) & 0xffU); /* semaphore_b */ - nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010005); + nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010005U); /* offset */ nvgpu_mem_wr32(g, cmd->mem, off++, (u32)sema_va & 0xffffffff); if (acquire) { /* semaphore_c */ - nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010006); + nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010006U); /* payload */ nvgpu_mem_wr32(g, cmd->mem, off++, nvgpu_semaphore_get_value(s)); /* semaphore_d */ - nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010007); + nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010007U); /* operation: acq_geq, switch_en */ - nvgpu_mem_wr32(g, cmd->mem, off++, 0x4 | (0x1 << 12)); + nvgpu_mem_wr32(g, cmd->mem, off++, 0x4U | BIT32(12)); } else { /* semaphore_c */ - nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010006); + nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010006U); /* payload */ nvgpu_mem_wr32(g, cmd->mem, off++, nvgpu_semaphore_get_value(s)); /* semaphore_d */ - nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010007); + nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010007U); /* operation: release, wfi */ nvgpu_mem_wr32(g, cmd->mem, off++, 0x2UL | ((wfi ? 0x0UL : 0x1UL) << 20)); /* non_stall_int */ - nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010008); + nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010008U); /* ignored */ - nvgpu_mem_wr32(g, cmd->mem, off++, 0); + nvgpu_mem_wr32(g, cmd->mem, off++, 0U); } } @@ -4585,13 +4585,13 @@ void gk20a_fifo_add_syncpt_wait_cmd(struct gk20a *g, off = cmd->off + off; /* syncpoint_a */ - nvgpu_mem_wr32(g, cmd->mem, off++, 0x2001001C); + nvgpu_mem_wr32(g, cmd->mem, off++, 0x2001001CU); /* payload */ nvgpu_mem_wr32(g, cmd->mem, off++, thresh); /* syncpoint_b */ - nvgpu_mem_wr32(g, cmd->mem, off++, 0x2001001D); + nvgpu_mem_wr32(g, cmd->mem, off++, 0x2001001DU); /* syncpt_id, switch_en, wait */ - nvgpu_mem_wr32(g, cmd->mem, off++, (id << 8) | 0x10); + nvgpu_mem_wr32(g, cmd->mem, off++, (id << 8U) | 0x10U); } u32 gk20a_fifo_get_syncpt_wait_cmd_size(void) @@ -4613,22 +4613,22 @@ void gk20a_fifo_add_syncpt_incr_cmd(struct gk20a *g, nvgpu_log_fn(g, " "); if (wfi_cmd) { /* wfi */ - nvgpu_mem_wr32(g, cmd->mem, off++, 0x2001001E); + nvgpu_mem_wr32(g, cmd->mem, off++, 0x2001001EU); /* handle, ignored */ - nvgpu_mem_wr32(g, cmd->mem, off++, 0x00000000); + nvgpu_mem_wr32(g, cmd->mem, off++, 0x00000000U); } /* syncpoint_a */ - nvgpu_mem_wr32(g, cmd->mem, off++, 0x2001001C); + nvgpu_mem_wr32(g, cmd->mem, off++, 0x2001001CU); /* payload, ignored */ - nvgpu_mem_wr32(g, cmd->mem, off++, 0); + nvgpu_mem_wr32(g, cmd->mem, off++, 0U); /* syncpoint_b */ - nvgpu_mem_wr32(g, cmd->mem, off++, 0x2001001D); + nvgpu_mem_wr32(g, cmd->mem, off++, 0x2001001DU); /* syncpt_id, incr */ - nvgpu_mem_wr32(g, cmd->mem, off++, (id << 8) | 0x1); + nvgpu_mem_wr32(g, cmd->mem, off++, (id << 8U) | 0x1U); /* syncpoint_b */ - nvgpu_mem_wr32(g, cmd->mem, off++, 0x2001001D); + nvgpu_mem_wr32(g, cmd->mem, off++, 0x2001001DU); /* syncpt_id, incr */ - nvgpu_mem_wr32(g, cmd->mem, off++, (id << 8) | 0x1); + nvgpu_mem_wr32(g, cmd->mem, off++, (id << 8U) | 0x1U); } diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h index 69d757ead..986dae738 100644 --- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h @@ -37,7 +37,7 @@ struct tsg_gk20a; #define NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_HIGH 2U #define NVGPU_FIFO_RUNLIST_INTERLEAVE_NUM_LEVELS 3U -#define MAX_RUNLIST_BUFFERS 2 +#define MAX_RUNLIST_BUFFERS 2U #define FIFO_INVAL_ENGINE_ID ((u32)~0) #define FIFO_INVAL_CHANNEL_ID ((u32)~0) @@ -48,20 +48,20 @@ struct tsg_gk20a; #define ID_TYPE_TSG 1U #define ID_TYPE_UNKNOWN ((u32)~0) -#define RC_YES 1 -#define RC_NO 0 +#define RC_YES 1U +#define RC_NO 0U -#define GRFIFO_TIMEOUT_CHECK_PERIOD_US 100000 +#define GRFIFO_TIMEOUT_CHECK_PERIOD_US 100000U -#define RC_TYPE_NO_RC 0 -#define RC_TYPE_MMU_FAULT 1 -#define RC_TYPE_PBDMA_FAULT 2 -#define RC_TYPE_GR_FAULT 3 -#define RC_TYPE_PREEMPT_TIMEOUT 4 -#define RC_TYPE_CTXSW_TIMEOUT 5 -#define RC_TYPE_RUNLIST_UPDATE_TIMEOUT 6 -#define RC_TYPE_FORCE_RESET 7 -#define RC_TYPE_SCHED_ERR 8 +#define RC_TYPE_NO_RC 0U +#define RC_TYPE_MMU_FAULT 1U +#define RC_TYPE_PBDMA_FAULT 2U +#define RC_TYPE_GR_FAULT 3U +#define RC_TYPE_PREEMPT_TIMEOUT 4U +#define RC_TYPE_CTXSW_TIMEOUT 5U +#define RC_TYPE_RUNLIST_UPDATE_TIMEOUT 6U +#define RC_TYPE_FORCE_RESET 7U +#define RC_TYPE_SCHED_ERR 8U #define NVGPU_FIFO_DEFAULT_TIMESLICE_TIMEOUT 128UL #define NVGPU_FIFO_DEFAULT_TIMESLICE_SCALE 3UL @@ -72,11 +72,11 @@ struct tsg_gk20a; * significative on a histogram on a 5% step */ #ifdef CONFIG_DEBUG_FS -#define FIFO_PROFILING_ENTRIES 16384 +#define FIFO_PROFILING_ENTRIES 16384U #endif -#define RUNLIST_DISABLED 0 -#define RUNLIST_ENABLED 1 +#define RUNLIST_DISABLED 0U +#define RUNLIST_ENABLED 1U /* generally corresponds to the "pbdma" engine */ @@ -288,11 +288,11 @@ void gk20a_fifo_recover(struct gk20a *g, u32 engine_ids, /* if zero, will be queried from HW */ u32 hw_id, /* if ~0, will be queried from HW */ bool hw_id_is_tsg, /* ignored if hw_id == ~0 */ - bool id_is_known, bool verbose, int rc_type); + bool id_is_known, bool verbose, u32 rc_type); void gk20a_fifo_recover_ch(struct gk20a *g, u32 chid, bool verbose, - int rc_type); + u32 rc_type); void gk20a_fifo_recover_tsg(struct gk20a *g, u32 tsgid, bool verbose, - int rc_type); + u32 rc_type); int gk20a_fifo_force_reset_ch(struct channel_gk20a *ch, u32 err_code, bool verbose); void gk20a_fifo_reset_engine(struct gk20a *g, u32 engine_id); @@ -455,7 +455,7 @@ bool gk20a_fifo_check_tsg_ctxsw_timeout(struct tsg_gk20a *tsg, bool gk20a_fifo_handle_sched_error(struct gk20a *g); void gk20a_fifo_reset_pbdma_method(struct gk20a *g, u32 pbdma_id, - int pbdma_method_index); + u32 pbdma_method_index); unsigned int gk20a_fifo_handle_pbdma_intr_0(struct gk20a *g, u32 pbdma_id, u32 pbdma_intr_0, u32 *handled, u32 *error_notifier); unsigned int gk20a_fifo_handle_pbdma_intr_1(struct gk20a *g, u32 pbdma_id, diff --git a/drivers/gpu/nvgpu/gk20a/flcn_gk20a.c b/drivers/gpu/nvgpu/gk20a/flcn_gk20a.c index e651f7dcc..45c12a1e0 100644 --- a/drivers/gpu/nvgpu/gk20a/flcn_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/flcn_gk20a.c @@ -58,7 +58,7 @@ static bool gk20a_flcn_clear_halt_interrupt_status(struct nvgpu_falcon *flcn) gk20a_writel(g, base_addr + falcon_falcon_irqsclr_r(), gk20a_readl(g, base_addr + falcon_falcon_irqsclr_r()) | - (0x10)); + 0x10U); data = gk20a_readl(g, (base_addr + falcon_falcon_irqstat_r())); if ((data & falcon_falcon_irqstat_halt_true_f()) != @@ -112,8 +112,8 @@ static bool gk20a_is_falcon_idle(struct nvgpu_falcon *flcn) unit_status = gk20a_readl(g, base_addr + falcon_falcon_idlestate_r()); - if (falcon_falcon_idlestate_falcon_busy_v(unit_status) == 0 && - falcon_falcon_idlestate_ext_busy_v(unit_status) == 0) { + if (falcon_falcon_idlestate_falcon_busy_v(unit_status) == 0U && + falcon_falcon_idlestate_ext_busy_v(unit_status) == 0U) { status = true; } else { status = false; @@ -168,12 +168,12 @@ static int flcn_mem_overflow_check(struct nvgpu_falcon *flcn, struct gk20a *g = flcn->g; u32 mem_size = 0; - if (size == 0) { + if (size == 0U) { nvgpu_err(g, "size is zero"); return -EINVAL; } - if (offset & 0x3) { + if (offset & 0x3U) { nvgpu_err(g, "offset (0x%08x) not 4-byte aligned", offset); return -EINVAL; } @@ -209,7 +209,7 @@ static int gk20a_flcn_copy_from_dmem(struct nvgpu_falcon *flcn, nvgpu_mutex_acquire(&flcn->copy_lock); words = size >> 2; - bytes = size & 0x3; + bytes = size & 0x3U; addr_mask = falcon_falcon_dmemc_offs_m() | falcon_falcon_dmemc_blk_m(); @@ -224,7 +224,7 @@ static int gk20a_flcn_copy_from_dmem(struct nvgpu_falcon *flcn, base_addr + falcon_falcon_dmemd_r(port)); } - if (bytes > 0) { + if (bytes > 0U) { data = gk20a_readl(g, base_addr + falcon_falcon_dmemd_r(port)); for (i = 0; i < bytes; i++) { dst[(words << 2) + i] = ((u8 *)&data)[i]; @@ -254,7 +254,7 @@ static int gk20a_flcn_copy_to_dmem(struct nvgpu_falcon *flcn, nvgpu_mutex_acquire(&flcn->copy_lock); words = size >> 2; - bytes = size & 0x3; + bytes = size & 0x3U; addr_mask = falcon_falcon_dmemc_offs_m() | falcon_falcon_dmemc_blk_m(); @@ -269,7 +269,7 @@ static int gk20a_flcn_copy_to_dmem(struct nvgpu_falcon *flcn, base_addr + falcon_falcon_dmemd_r(port), src_u32[i]); } - if (bytes > 0) { + if (bytes > 0U) { data = 0; for (i = 0; i < bytes; i++) { ((u8 *)&data)[i] = src[(words << 2) + i]; @@ -312,7 +312,7 @@ static int gk20a_flcn_copy_from_imem(struct nvgpu_falcon *flcn, u32 src, nvgpu_mutex_acquire(&flcn->copy_lock); words = size >> 2; - bytes = size & 0x3; + bytes = size & 0x3U; blk = src >> 8; nvgpu_log_info(g, "download %d words from 0x%x block %d", @@ -328,7 +328,7 @@ static int gk20a_flcn_copy_from_imem(struct nvgpu_falcon *flcn, u32 src, base_addr + falcon_falcon_imemd_r(port)); } - if (bytes > 0) { + if (bytes > 0U) { data = gk20a_readl(g, base_addr + falcon_falcon_imemd_r(port)); for (i = 0; i < bytes; i++) { dst[(words << 2) + i] = ((u8 *)&data)[i]; @@ -372,7 +372,7 @@ static int gk20a_flcn_copy_to_imem(struct nvgpu_falcon *flcn, u32 dst, falcon_falcon_imemc_aincw_f(1) | falcon_falcon_imemc_secure_f(sec ? 1U : 0U)); - for (i = 0; i < words; i++) { + for (i = 0U; i < words; i++) { if (i % 64U == 0U) { /* tag is always 256B aligned */ gk20a_writel(g, base_addr + falcon_falcon_imemt_r(0), @@ -500,12 +500,12 @@ static void gk20a_falcon_dump_imblk(struct nvgpu_falcon *flcn) flcn->flcn_base + falcon_falcon_hwcfg_r())); /* block_count must be multiple of 8 */ - block_count &= ~0x7; + block_count &= ~0x7U; nvgpu_err(g, "FALCON IMEM BLK MAPPING (PA->VA) (%d TOTAL):", block_count); - for (i = 0; i < block_count; i += 8) { - for (j = 0; j < 8; j++) { + for (i = 0U; i < block_count; i += 8U) { + for (j = 0U; j < 8U; j++) { gk20a_writel(g, flcn->flcn_base + falcon_falcon_imctl_debug_r(), falcon_falcon_imctl_debug_cmd_f(0x2) | @@ -518,7 +518,7 @@ static void gk20a_falcon_dump_imblk(struct nvgpu_falcon *flcn) nvgpu_err(g, " %#04x: %#010x %#010x %#010x %#010x", i, data[0], data[1], data[2], data[3]); nvgpu_err(g, " %#04x: %#010x %#010x %#010x %#010x", - i + 4, data[4], data[5], data[6], data[7]); + i + 4U, data[4], data[5], data[6], data[7]); } } @@ -530,7 +530,7 @@ static void gk20a_falcon_dump_pc_trace(struct nvgpu_falcon *flcn) u32 pc = 0; u32 i = 0; - if (gk20a_readl(g, base_addr + falcon_falcon_sctl_r()) & 0x02) { + if (gk20a_readl(g, base_addr + falcon_falcon_sctl_r()) & 0x02U) { nvgpu_err(g, " falcon is in HS mode, PC TRACE dump not supported"); return; } @@ -567,7 +567,7 @@ void gk20a_falcon_dump_stats(struct nvgpu_falcon *flcn) nvgpu_err(g, "FALCON ICD REGISTERS DUMP"); - for (i = 0; i < 4; i++) { + for (i = 0U; i < 4U; i++) { gk20a_writel(g, base_addr + falcon_falcon_icd_cmd_r(), falcon_falcon_icd_cmd_opc_rreg_f() | falcon_falcon_icd_cmd_idx_f(FALCON_REG_PC)); @@ -613,7 +613,7 @@ void gk20a_falcon_dump_stats(struct nvgpu_falcon *flcn) nvgpu_err(g, "FALCON_REG_EXCI : 0x%x", gk20a_readl(g, base_addr + falcon_falcon_icd_rdata_r())); - for (i = 0; i < 6; i++) { + for (i = 0U; i < 6U; i++) { gk20a_writel(g, base_addr + falcon_falcon_icd_cmd_r(), falcon_falcon_icd_cmd_opc_rreg_f() | falcon_falcon_icd_cmd_idx_f( diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c index d880e952b..56f41b4a1 100644 --- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c @@ -233,7 +233,7 @@ static void __update_pte(struct vm_gk20a *vm, vm->mm->use_full_comp_tag_line && ((phys_addr & 0x10000ULL) != 0ULL)) { pte_w[1] |= gmmu_pte_comptagline_f( - 1 << (gmmu_pte_comptagline_s() - 1)); + 1 << (gmmu_pte_comptagline_s() - 1U)); } if (attrs->rw_flag == gk20a_mem_flag_read_only) { @@ -393,10 +393,10 @@ void gk20a_init_inst_block(struct nvgpu_mem *inst_block, struct vm_gk20a *vm, g->ops.mm.init_pdb(g, inst_block, vm); nvgpu_mem_wr32(g, inst_block, ram_in_adr_limit_lo_w(), - u64_lo32(vm->va_limit - 1) & ~0xfff); + u64_lo32(vm->va_limit - 1U) & ~0xfffU); nvgpu_mem_wr32(g, inst_block, ram_in_adr_limit_hi_w(), - ram_in_adr_limit_hi_f(u64_hi32(vm->va_limit - 1))); + ram_in_adr_limit_hi_f(u64_hi32(vm->va_limit - 1U))); if ((big_page_size != 0U) && (g->ops.mm.set_big_page_size != NULL)) { g->ops.mm.set_big_page_size(g, inst_block, big_page_size); diff --git a/drivers/gpu/nvgpu/gk20a/regops_gk20a.c b/drivers/gpu/nvgpu/gk20a/regops_gk20a.c index b754da0da..2eafd4975 100644 --- a/drivers/gpu/nvgpu/gk20a/regops_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/regops_gk20a.c @@ -155,7 +155,7 @@ int exec_regops_gk20a(struct dbg_session_gk20a *dbg_s, case REGOP(READ_64): ops[i].value_lo = gk20a_readl(g, ops[i].offset); ops[i].value_hi = - gk20a_readl(g, ops[i].offset + 4); + gk20a_readl(g, ops[i].offset + 4U); nvgpu_log(g, gpu_dbg_gpu_dbg, "read_64 0x%08x:%08x from 0x%08x", ops[i].value_hi, ops[i].value_lo, @@ -189,7 +189,7 @@ int exec_regops_gk20a(struct dbg_session_gk20a *dbg_s, /* if desired, read second 32bits */ if ((ops[i].op == REGOP(WRITE_64)) && !skip_read_hi) { - data32_hi = gk20a_readl(g, ops[i].offset + 4); + data32_hi = gk20a_readl(g, ops[i].offset + 4U); data32_hi &= ~ops[i].and_n_mask_hi; data32_hi |= ops[i].value_hi; } @@ -200,9 +200,9 @@ int exec_regops_gk20a(struct dbg_session_gk20a *dbg_s, data32_lo, ops[i].offset); /* if desired, update second 32bits */ if (ops[i].op == REGOP(WRITE_64)) { - gk20a_writel(g, ops[i].offset + 4, data32_hi); + gk20a_writel(g, ops[i].offset + 4U, data32_hi); nvgpu_log(g, gpu_dbg_gpu_dbg, "Wrote 0x%08x to 0x%08x ", - data32_hi, ops[i].offset + 4); + data32_hi, ops[i].offset + 4U); } @@ -365,7 +365,7 @@ static int validate_reg_op_offset(struct dbg_session_gk20a *dbg_s, valid = check_whitelists(dbg_s, op, offset); if ((op->op == REGOP(READ_64) || op->op == REGOP(WRITE_64)) && valid) { - valid = check_whitelists(dbg_s, op, offset + 4); + valid = check_whitelists(dbg_s, op, offset + 4U); } if (valid && (op->type != REGOP(TYPE_GLOBAL))) { diff --git a/drivers/gpu/nvgpu/gk20a/regops_gk20a.h b/drivers/gpu/nvgpu/gk20a/regops_gk20a.h index 967058798..47d350d08 100644 --- a/drivers/gpu/nvgpu/gk20a/regops_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/regops_gk20a.h @@ -30,31 +30,31 @@ * attached to debug session */ /* valid op values */ -#define NVGPU_DBG_REG_OP_READ_32 (0x00000000) -#define NVGPU_DBG_REG_OP_WRITE_32 (0x00000001) -#define NVGPU_DBG_REG_OP_READ_64 (0x00000002) -#define NVGPU_DBG_REG_OP_WRITE_64 (0x00000003) +#define NVGPU_DBG_REG_OP_READ_32 0x00000000U +#define NVGPU_DBG_REG_OP_WRITE_32 0x00000001U +#define NVGPU_DBG_REG_OP_READ_64 0x00000002U +#define NVGPU_DBG_REG_OP_WRITE_64 0x00000003U /* note: 8b ops are unsupported */ -#define NVGPU_DBG_REG_OP_READ_08 (0x00000004) -#define NVGPU_DBG_REG_OP_WRITE_08 (0x00000005) +#define NVGPU_DBG_REG_OP_READ_08 0x00000004U +#define NVGPU_DBG_REG_OP_WRITE_08 0x00000005U /* valid type values */ -#define NVGPU_DBG_REG_OP_TYPE_GLOBAL (0x00000000) -#define NVGPU_DBG_REG_OP_TYPE_GR_CTX (0x00000001) -#define NVGPU_DBG_REG_OP_TYPE_GR_CTX_TPC (0x00000002) -#define NVGPU_DBG_REG_OP_TYPE_GR_CTX_SM (0x00000004) -#define NVGPU_DBG_REG_OP_TYPE_GR_CTX_CROP (0x00000008) -#define NVGPU_DBG_REG_OP_TYPE_GR_CTX_ZROP (0x00000010) +#define NVGPU_DBG_REG_OP_TYPE_GLOBAL 0x00000000U +#define NVGPU_DBG_REG_OP_TYPE_GR_CTX 0x00000001U +#define NVGPU_DBG_REG_OP_TYPE_GR_CTX_TPC 0x00000002U +#define NVGPU_DBG_REG_OP_TYPE_GR_CTX_SM 0x00000004U +#define NVGPU_DBG_REG_OP_TYPE_GR_CTX_CROP 0x00000008U +#define NVGPU_DBG_REG_OP_TYPE_GR_CTX_ZROP 0x00000010U /*#define NVGPU_DBG_REG_OP_TYPE_FB (0x00000020)*/ -#define NVGPU_DBG_REG_OP_TYPE_GR_CTX_QUAD (0x00000040) +#define NVGPU_DBG_REG_OP_TYPE_GR_CTX_QUAD 0x00000040U /* valid status values */ -#define NVGPU_DBG_REG_OP_STATUS_SUCCESS (0x00000000) -#define NVGPU_DBG_REG_OP_STATUS_INVALID_OP (0x00000001) -#define NVGPU_DBG_REG_OP_STATUS_INVALID_TYPE (0x00000002) -#define NVGPU_DBG_REG_OP_STATUS_INVALID_OFFSET (0x00000004) -#define NVGPU_DBG_REG_OP_STATUS_UNSUPPORTED_OP (0x00000008) -#define NVGPU_DBG_REG_OP_STATUS_INVALID_MASK (0x00000010) +#define NVGPU_DBG_REG_OP_STATUS_SUCCESS 0x00000000U +#define NVGPU_DBG_REG_OP_STATUS_INVALID_OP 0x00000001U +#define NVGPU_DBG_REG_OP_STATUS_INVALID_TYPE 0x00000002U +#define NVGPU_DBG_REG_OP_STATUS_INVALID_OFFSET 0x00000004U +#define NVGPU_DBG_REG_OP_STATUS_UNSUPPORTED_OP 0x00000008U +#define NVGPU_DBG_REG_OP_STATUS_INVALID_MASK 0x00000010U struct nvgpu_dbg_reg_op { u8 op; diff --git a/drivers/gpu/nvgpu/include/nvgpu/debugger.h b/drivers/gpu/nvgpu/include/nvgpu/debugger.h index 76c6667ff..199b385ff 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/debugger.h +++ b/drivers/gpu/nvgpu/include/nvgpu/debugger.h @@ -136,10 +136,10 @@ u32 nvgpu_set_powergate_locked(struct dbg_session_gk20a *dbg_s, /* PM Context Switch Mode */ /*This mode says that the pms are not to be context switched. */ -#define NVGPU_DBG_HWPM_CTXSW_MODE_NO_CTXSW (0x00000000) +#define NVGPU_DBG_HWPM_CTXSW_MODE_NO_CTXSW (0x00000000U) /* This mode says that the pms in Mode-B are to be context switched */ -#define NVGPU_DBG_HWPM_CTXSW_MODE_CTXSW (0x00000001) +#define NVGPU_DBG_HWPM_CTXSW_MODE_CTXSW (0x00000001U) /* This mode says that the pms in Mode-E (stream out) are to be context switched. */ -#define NVGPU_DBG_HWPM_CTXSW_MODE_STREAM_OUT_CTXSW (0x00000002) +#define NVGPU_DBG_HWPM_CTXSW_MODE_STREAM_OUT_CTXSW (0x00000002U) #endif /* NVGPU_DEBUGGER_H */ diff --git a/drivers/gpu/nvgpu/include/nvgpu/falcon.h b/drivers/gpu/nvgpu/include/nvgpu/falcon.h index dd422fb6c..770c93ee7 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/falcon.h +++ b/drivers/gpu/nvgpu/include/nvgpu/falcon.h @@ -40,57 +40,57 @@ /* * Falcon Base address Defines */ -#define FALCON_NVDEC_BASE 0x00084000 -#define FALCON_PWR_BASE 0x0010a000 -#define FALCON_SEC_BASE 0x00087000 -#define FALCON_FECS_BASE 0x00409000 -#define FALCON_GPCCS_BASE 0x0041a000 +#define FALCON_NVDEC_BASE 0x00084000U +#define FALCON_PWR_BASE 0x0010a000U +#define FALCON_SEC_BASE 0x00087000U +#define FALCON_FECS_BASE 0x00409000U +#define FALCON_GPCCS_BASE 0x0041a000U /* Falcon Register index */ -#define FALCON_REG_R0 (0) -#define FALCON_REG_R1 (1) -#define FALCON_REG_R2 (2) -#define FALCON_REG_R3 (3) -#define FALCON_REG_R4 (4) -#define FALCON_REG_R5 (5) -#define FALCON_REG_R6 (6) -#define FALCON_REG_R7 (7) -#define FALCON_REG_R8 (8) -#define FALCON_REG_R9 (9) -#define FALCON_REG_R10 (10) -#define FALCON_REG_R11 (11) -#define FALCON_REG_R12 (12) -#define FALCON_REG_R13 (13) -#define FALCON_REG_R14 (14) -#define FALCON_REG_R15 (15) -#define FALCON_REG_IV0 (16) -#define FALCON_REG_IV1 (17) -#define FALCON_REG_UNDEFINED (18) -#define FALCON_REG_EV (19) -#define FALCON_REG_SP (20) -#define FALCON_REG_PC (21) -#define FALCON_REG_IMB (22) -#define FALCON_REG_DMB (23) -#define FALCON_REG_CSW (24) -#define FALCON_REG_CCR (25) -#define FALCON_REG_SEC (26) -#define FALCON_REG_CTX (27) -#define FALCON_REG_EXCI (28) -#define FALCON_REG_RSVD0 (29) -#define FALCON_REG_RSVD1 (30) -#define FALCON_REG_RSVD2 (31) -#define FALCON_REG_SIZE (32) +#define FALCON_REG_R0 (0U) +#define FALCON_REG_R1 (1U) +#define FALCON_REG_R2 (2U) +#define FALCON_REG_R3 (3U) +#define FALCON_REG_R4 (4U) +#define FALCON_REG_R5 (5U) +#define FALCON_REG_R6 (6U) +#define FALCON_REG_R7 (7U) +#define FALCON_REG_R8 (8U) +#define FALCON_REG_R9 (9U) +#define FALCON_REG_R10 (10U) +#define FALCON_REG_R11 (11U) +#define FALCON_REG_R12 (12U) +#define FALCON_REG_R13 (13U) +#define FALCON_REG_R14 (14U) +#define FALCON_REG_R15 (15U) +#define FALCON_REG_IV0 (16U) +#define FALCON_REG_IV1 (17U) +#define FALCON_REG_UNDEFINED (18U) +#define FALCON_REG_EV (19U) +#define FALCON_REG_SP (20U) +#define FALCON_REG_PC (21U) +#define FALCON_REG_IMB (22U) +#define FALCON_REG_DMB (23U) +#define FALCON_REG_CSW (24U) +#define FALCON_REG_CCR (25U) +#define FALCON_REG_SEC (26U) +#define FALCON_REG_CTX (27U) +#define FALCON_REG_EXCI (28U) +#define FALCON_REG_RSVD0 (29U) +#define FALCON_REG_RSVD1 (30U) +#define FALCON_REG_RSVD2 (31U) +#define FALCON_REG_SIZE (32U) -#define FALCON_MAILBOX_0 0x0 -#define FALCON_MAILBOX_1 0x1 -#define FALCON_MAILBOX_COUNT 0x02 +#define FALCON_MAILBOX_0 0x0U +#define FALCON_MAILBOX_1 0x1U +#define FALCON_MAILBOX_COUNT 0x02U #define FALCON_BLOCK_SIZE 0x100U -#define GET_IMEM_TAG(IMEM_ADDR) ((IMEM_ADDR) >> 8) +#define GET_IMEM_TAG(IMEM_ADDR) ((IMEM_ADDR) >> 8U) #define GET_NEXT_BLOCK(ADDR) \ - (((((ADDR) + (FALCON_BLOCK_SIZE - 1)) & ~(FALCON_BLOCK_SIZE-1)) \ - / FALCON_BLOCK_SIZE) << 8) + (((((ADDR) + (FALCON_BLOCK_SIZE - 1U)) & ~(FALCON_BLOCK_SIZE-1U)) \ + / FALCON_BLOCK_SIZE) << 8U) /* * Falcon HWCFG request read types defines @@ -113,8 +113,8 @@ enum flcn_hwcfg_write { FALCON_ITF_EN }; -#define FALCON_MEM_SCRUBBING_TIMEOUT_MAX 1000 -#define FALCON_MEM_SCRUBBING_TIMEOUT_DEFAULT 10 +#define FALCON_MEM_SCRUBBING_TIMEOUT_MAX 1000U +#define FALCON_MEM_SCRUBBING_TIMEOUT_DEFAULT 10U enum flcn_dma_dir { DMA_TO_FB = 0, @@ -145,13 +145,13 @@ enum flcn_mem_type { * OS Ovl Offset * OS Ovl Size */ -#define OS_CODE_OFFSET 0x0 -#define OS_CODE_SIZE 0x1 -#define OS_DATA_OFFSET 0x2 -#define OS_DATA_SIZE 0x3 -#define NUM_APPS 0x4 -#define APP_0_CODE_OFFSET 0x5 -#define APP_0_CODE_SIZE 0x6 +#define OS_CODE_OFFSET 0x0U +#define OS_CODE_SIZE 0x1U +#define OS_DATA_OFFSET 0x2U +#define OS_DATA_SIZE 0x3U +#define NUM_APPS 0x4U +#define APP_0_CODE_OFFSET 0x5U +#define APP_0_CODE_SIZE 0x6U struct nvgpu_falcon_dma_info { u32 fb_base; diff --git a/drivers/gpu/nvgpu/include/nvgpu/pmu.h b/drivers/gpu/nvgpu/include/nvgpu/pmu.h index 2cde5f37f..f9725ab9c 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/pmu.h +++ b/drivers/gpu/nvgpu/include/nvgpu/pmu.h @@ -38,50 +38,50 @@ nvgpu_log(g, gpu_dbg_pmu, fmt, ##args) /* defined by pmu hw spec */ -#define GK20A_PMU_VA_SIZE (512 * 1024 * 1024) -#define GK20A_PMU_UCODE_SIZE_MAX (256 * 1024) -#define GK20A_PMU_SEQ_BUF_SIZE 4096 +#define GK20A_PMU_VA_SIZE (512U * 1024U * 1024U) +#define GK20A_PMU_UCODE_SIZE_MAX (256U * 1024U) +#define GK20A_PMU_SEQ_BUF_SIZE 4096U -#define GK20A_PMU_TRACE_BUFSIZE 0x4000 /* 4K */ -#define GK20A_PMU_DMEM_BLKSIZE2 8 +#define GK20A_PMU_TRACE_BUFSIZE 0x4000U /* 4K */ +#define GK20A_PMU_DMEM_BLKSIZE2 8U -#define PMU_MODE_MISMATCH_STATUS_MAILBOX_R 6 -#define PMU_MODE_MISMATCH_STATUS_VAL 0xDEADDEAD +#define PMU_MODE_MISMATCH_STATUS_MAILBOX_R 6U +#define PMU_MODE_MISMATCH_STATUS_VAL 0xDEADDEADU /* Falcon Register index */ -#define PMU_FALCON_REG_R0 (0) -#define PMU_FALCON_REG_R1 (1) -#define PMU_FALCON_REG_R2 (2) -#define PMU_FALCON_REG_R3 (3) -#define PMU_FALCON_REG_R4 (4) -#define PMU_FALCON_REG_R5 (5) -#define PMU_FALCON_REG_R6 (6) -#define PMU_FALCON_REG_R7 (7) -#define PMU_FALCON_REG_R8 (8) -#define PMU_FALCON_REG_R9 (9) -#define PMU_FALCON_REG_R10 (10) -#define PMU_FALCON_REG_R11 (11) -#define PMU_FALCON_REG_R12 (12) -#define PMU_FALCON_REG_R13 (13) -#define PMU_FALCON_REG_R14 (14) -#define PMU_FALCON_REG_R15 (15) -#define PMU_FALCON_REG_IV0 (16) -#define PMU_FALCON_REG_IV1 (17) -#define PMU_FALCON_REG_UNDEFINED (18) -#define PMU_FALCON_REG_EV (19) -#define PMU_FALCON_REG_SP (20) -#define PMU_FALCON_REG_PC (21) -#define PMU_FALCON_REG_IMB (22) -#define PMU_FALCON_REG_DMB (23) -#define PMU_FALCON_REG_CSW (24) -#define PMU_FALCON_REG_CCR (25) -#define PMU_FALCON_REG_SEC (26) -#define PMU_FALCON_REG_CTX (27) -#define PMU_FALCON_REG_EXCI (28) -#define PMU_FALCON_REG_RSVD0 (29) -#define PMU_FALCON_REG_RSVD1 (30) -#define PMU_FALCON_REG_RSVD2 (31) -#define PMU_FALCON_REG_SIZE (32) +#define PMU_FALCON_REG_R0 (0U) +#define PMU_FALCON_REG_R1 (1U) +#define PMU_FALCON_REG_R2 (2U) +#define PMU_FALCON_REG_R3 (3U) +#define PMU_FALCON_REG_R4 (4U) +#define PMU_FALCON_REG_R5 (5U) +#define PMU_FALCON_REG_R6 (6U) +#define PMU_FALCON_REG_R7 (7U) +#define PMU_FALCON_REG_R8 (8U) +#define PMU_FALCON_REG_R9 (9U) +#define PMU_FALCON_REG_R10 (10U) +#define PMU_FALCON_REG_R11 (11U) +#define PMU_FALCON_REG_R12 (12U) +#define PMU_FALCON_REG_R13 (13U) +#define PMU_FALCON_REG_R14 (14U) +#define PMU_FALCON_REG_R15 (15U) +#define PMU_FALCON_REG_IV0 (16U) +#define PMU_FALCON_REG_IV1 (17U) +#define PMU_FALCON_REG_UNDEFINED (18U) +#define PMU_FALCON_REG_EV (19U) +#define PMU_FALCON_REG_SP (20U) +#define PMU_FALCON_REG_PC (21U) +#define PMU_FALCON_REG_IMB (22U) +#define PMU_FALCON_REG_DMB (23U) +#define PMU_FALCON_REG_CSW (24U) +#define PMU_FALCON_REG_CCR (25U) +#define PMU_FALCON_REG_SEC (26U) +#define PMU_FALCON_REG_CTX (27U) +#define PMU_FALCON_REG_EXCI (28U) +#define PMU_FALCON_REG_RSVD0 (29U) +#define PMU_FALCON_REG_RSVD1 (30U) +#define PMU_FALCON_REG_RSVD2 (31U) +#define PMU_FALCON_REG_SIZE (32U) /* Choices for pmu_state */ #define PMU_STATE_OFF 0U /* PMU is off */ @@ -102,7 +102,7 @@ #define PMU_SEQ_TBL_SIZE \ (PMU_MAX_NUM_SEQUENCES >> PMU_SEQ_BIT_SHIFT) -#define PMU_INVALID_SEQ_DESC (~0) +#define PMU_INVALID_SEQ_DESC (~0U) enum { GK20A_PMU_DMAIDX_UCODE = 0, @@ -123,26 +123,26 @@ enum { }; /*PG defines used by nvpgu-pmu*/ -#define PMU_PG_IDLE_THRESHOLD_SIM 1000 -#define PMU_PG_POST_POWERUP_IDLE_THRESHOLD_SIM 4000000 +#define PMU_PG_IDLE_THRESHOLD_SIM 1000U +#define PMU_PG_POST_POWERUP_IDLE_THRESHOLD_SIM 4000000U /* TBD: QT or else ? */ -#define PMU_PG_IDLE_THRESHOLD 15000 -#define PMU_PG_POST_POWERUP_IDLE_THRESHOLD 1000000 +#define PMU_PG_IDLE_THRESHOLD 15000U +#define PMU_PG_POST_POWERUP_IDLE_THRESHOLD 1000000U -#define PMU_PG_LPWR_FEATURE_RPPG 0x0 -#define PMU_PG_LPWR_FEATURE_MSCG 0x1 +#define PMU_PG_LPWR_FEATURE_RPPG 0x0U +#define PMU_PG_LPWR_FEATURE_MSCG 0x1U #define PMU_MSCG_DISABLED 0U #define PMU_MSCG_ENABLED 1U /* Default Sampling Period of AELPG */ -#define APCTRL_SAMPLING_PERIOD_PG_DEFAULT_US (1000000) +#define APCTRL_SAMPLING_PERIOD_PG_DEFAULT_US (1000000U) /* Default values of APCTRL parameters */ -#define APCTRL_MINIMUM_IDLE_FILTER_DEFAULT_US (100) -#define APCTRL_MINIMUM_TARGET_SAVING_DEFAULT_US (10000) -#define APCTRL_POWER_BREAKEVEN_DEFAULT_US (2000) -#define APCTRL_CYCLES_PER_SAMPLE_MAX_DEFAULT (200) +#define APCTRL_MINIMUM_IDLE_FILTER_DEFAULT_US (100U) +#define APCTRL_MINIMUM_TARGET_SAVING_DEFAULT_US (10000U) +#define APCTRL_POWER_BREAKEVEN_DEFAULT_US (2000U) +#define APCTRL_CYCLES_PER_SAMPLE_MAX_DEFAULT (200U) /* RPC */ #define PMU_RPC_EXECUTE(_stat, _pmu, _unit, _func, _prpc, _size)\ @@ -353,7 +353,7 @@ struct nvgpu_pmu { u32 pmu_state; -#define PMU_ELPG_ENABLE_ALLOW_DELAY_MSEC 1 /* msec */ +#define PMU_ELPG_ENABLE_ALLOW_DELAY_MSEC 1U /* msec */ struct nvgpu_pg_init pg_init; struct nvgpu_mutex pg_mutex; /* protect pg-RPPG/MSCG enable/disable */ struct nvgpu_mutex elpg_mutex; /* protect elpg enable/disable */