diff --git a/drivers/gpu/nvgpu/gm20b/clk_gm20b.c b/drivers/gpu/nvgpu/gm20b/clk_gm20b.c index 491a6275f..4489c4e43 100644 --- a/drivers/gpu/nvgpu/gm20b/clk_gm20b.c +++ b/drivers/gpu/nvgpu/gm20b/clk_gm20b.c @@ -41,8 +41,8 @@ #define gk20a_dbg_clk(g, fmt, arg...) \ nvgpu_log(g, gpu_dbg_clk, fmt, ##arg) -#define DFS_DET_RANGE 6 /* -2^6 ... 2^6-1 */ -#define SDM_DIN_RANGE 12 /* -2^12 ... 2^12-1 */ +#define DFS_DET_RANGE 6U /* -2^6 ... 2^6-1 */ +#define SDM_DIN_RANGE 12U /* -2^12 ... 2^12-1 */ #define DFS_TESTOUT_DET BIT32(0) #define DFS_EXT_CAL_EN BIT32(9) #define DFS_EXT_STROBE BIT32(16) @@ -51,7 +51,7 @@ #define BOOT_GPU_UV_C1 800000 /* gpu rail boot voltage 0.8V */ #define ADC_SLOPE_UV 10000 /* default ADC detection slope 10mV */ -#define DVFS_SAFE_MARGIN 10 /* 10% */ +#define DVFS_SAFE_MARGIN 10U /* 10% */ static struct pll_parms gpc_pll_params_b1 = { 128000, 2600000, /* freq */ @@ -126,8 +126,8 @@ static u32 get_interim_pldiv(struct gk20a *g, u32 old_pl, u32 new_pl) return 0; } - pl = old_pl | BIT(ffs(new_pl) - 1); /* pl never 0 */ - new_pl |= BIT(ffs(old_pl) - 1); + pl = old_pl | BIT32(ffs(new_pl) - 1U); /* pl never 0 */ + new_pl |= BIT32(ffs(old_pl) - 1U); return min(pl, new_pl); } @@ -164,13 +164,13 @@ static int clk_config_pll(struct clk_gk20a *clk, struct pll *pll, best_N = pll_params->min_N; best_PL = pll_params->min_PL; - target_vco_f = target_clk_f + target_clk_f / 50; + target_vco_f = target_clk_f + target_clk_f / 50U; if (max_vco_f < target_vco_f) { max_vco_f = target_vco_f; } /* Set PL search boundaries. */ - high_PL = nvgpu_div_to_pl((max_vco_f + target_vco_f - 1) / target_vco_f); + high_PL = nvgpu_div_to_pl((max_vco_f + target_vco_f - 1U) / target_vco_f); high_PL = min(high_PL, pll_params->max_PL); high_PL = max(high_PL, pll_params->min_PL); @@ -195,7 +195,7 @@ static int clk_config_pll(struct clk_gk20a *clk, struct pll *pll, } n = (target_vco_f * m) / ref_clk_f; - n2 = ((target_vco_f * m) + (ref_clk_f - 1)) / ref_clk_f; + n2 = ((target_vco_f * m) + (ref_clk_f - 1U)) / ref_clk_f; if (n > pll_params->max_N) { break; @@ -212,7 +212,7 @@ static int clk_config_pll(struct clk_gk20a *clk, struct pll *pll, vco_f = ref_clk_f * n / m; if (vco_f >= min_vco_f && vco_f <= max_vco_f) { - lwv = (vco_f + (nvgpu_pl_to_div(pl) / 2)) + lwv = (vco_f + (nvgpu_pl_to_div(pl) / 2U)) / nvgpu_pl_to_div(pl); delta = abs(S32(lwv) - S32(target_clk_f)); @@ -223,9 +223,9 @@ static int clk_config_pll(struct clk_gk20a *clk, struct pll *pll, best_N = n; best_PL = pl; - if (best_delta == 0 || + if (best_delta == 0U || /* 0.45% for non best fit */ - (!best_fit && (vco_f / best_delta > 218))) { + (!best_fit && (vco_f / best_delta > 218U))) { goto found_match; } @@ -240,7 +240,7 @@ static int clk_config_pll(struct clk_gk20a *clk, struct pll *pll, found_match: BUG_ON(best_delta == ~0U); - if (best_fit && best_delta != 0) { + if (best_fit && best_delta != 0U) { gk20a_dbg_clk(g, "no best match for target @ %dMHz on gpc_pll", target_clk_f); } @@ -264,21 +264,21 @@ found_match: /* GPCPLL NA/DVFS mode methods */ -static inline int fuse_get_gpcpll_adc_rev(u32 val) +static inline u32 fuse_get_gpcpll_adc_rev(u32 val) { - return (val >> 30) & 0x3; + return (val >> 30) & 0x3U; } static inline int fuse_get_gpcpll_adc_slope_uv(u32 val) { /* Integer part in mV * 1000 + fractional part in uV */ - return ((val >> 24) & 0x3f) * 1000 + ((val >> 14) & 0x3ff); + return ((val >> 24) & 0x3fU) * 1000U + ((val >> 14) & 0x3ffU); } static inline int fuse_get_gpcpll_adc_intercept_uv(u32 val) { /* Integer part in mV * 1000 + fractional part in 100uV */ - return ((val >> 4) & 0x3ff) * 1000 + ((val >> 0) & 0xf) * 100; + return ((val >> 4) & 0x3ffU) * 1000U + ((val >> 0) & 0xfU) * 100U; } static int nvgpu_fuse_calib_gpcpll_get_adc(struct gk20a *g, @@ -292,7 +292,7 @@ static int nvgpu_fuse_calib_gpcpll_get_adc(struct gk20a *g, return ret; } - if (fuse_get_gpcpll_adc_rev(val) == 0) { + if (fuse_get_gpcpll_adc_rev(val) == 0U) { return -EINVAL; } @@ -383,10 +383,10 @@ static void clk_config_dvfs_ndiv(int mv, u32 n_eff, struct na_dvfs *d) BUG_ON((n < 0) || (n > (int)p->max_N << DFS_DET_RANGE)); d->n_int = ((u32)n) >> DFS_DET_RANGE; - rem = ((u32)n) & ((1 << DFS_DET_RANGE) - 1); - rem_range = SDM_DIN_RANGE + 1 - DFS_DET_RANGE; - d->sdm_din = (rem << rem_range) - (1 << SDM_DIN_RANGE); - d->sdm_din = (d->sdm_din >> BITS_PER_BYTE) & 0xff; + rem = ((u32)n) & (BIT32(DFS_DET_RANGE) - 1U); + rem_range = SDM_DIN_RANGE + 1U - DFS_DET_RANGE; + d->sdm_din = (rem << rem_range) - BIT32(SDM_DIN_RANGE); + d->sdm_din = (d->sdm_din >> BITS_PER_BYTE) & 0xffU; } /* Voltage dependent configuration */ @@ -441,8 +441,8 @@ static void clk_set_dfs_ext_cal(struct gk20a *g, u32 dfs_det_cal) u32 data, ctrl; data = gk20a_readl(g, trim_gpc_bcast_gpcpll_dvfs2_r()); - data &= ~(BIT(DFS_DET_RANGE + 1) - 1); - data |= dfs_det_cal & (BIT(DFS_DET_RANGE + 1) - 1); + data &= ~(BIT32(DFS_DET_RANGE + 1U) - 1U); + data |= dfs_det_cal & (BIT32(DFS_DET_RANGE + 1U) - 1U); gk20a_writel(g, trim_gpc_bcast_gpcpll_dvfs2_r(), data); data = gk20a_readl(g, trim_sys_gpcpll_dvfs1_r()); @@ -580,7 +580,7 @@ static int clk_enbale_pll_dvfs(struct gk20a *g) return -ETIMEDOUT; } - p->uvdet_offs = g->clk.pll_poweron_uv - data * ADC_SLOPE_UV; + p->uvdet_offs = g->clk.pll_poweron_uv - (int)data * ADC_SLOPE_UV; p->uvdet_slope = ADC_SLOPE_UV; return 0; } @@ -831,8 +831,8 @@ static int clk_lock_gpc_pll_under_bypass(struct gk20a *g, struct pll *gpll) (void) gk20a_readl(g, trim_sys_gpcpll_cfg_r()); nvgpu_udelay(gpc_pll_params.na_lock_delay); gk20a_dbg_clk(g, "NA config_pll under bypass: %u (%u) kHz %d mV", - gpll->freq, gpll->freq / 2, - (trim_sys_gpcpll_cfg3_dfs_testout_v( + gpll->freq, gpll->freq / 2U, + ((int)trim_sys_gpcpll_cfg3_dfs_testout_v( gk20a_readl(g, trim_sys_gpcpll_cfg3_r())) * gpc_pll_params.uvdet_slope + gpc_pll_params.uvdet_offs) / 1000); @@ -849,14 +849,15 @@ static int clk_lock_gpc_pll_under_bypass(struct gk20a *g, struct pll *gpll) } /* wait pll lock */ - timeout = gpc_pll_params.lock_timeout + 1; + timeout = gpc_pll_params.lock_timeout + 1U; do { nvgpu_udelay(1); cfg = gk20a_readl(g, trim_sys_gpcpll_cfg_r()); if ((cfg & trim_sys_gpcpll_cfg_pll_lock_true_f()) != 0U) { goto pll_locked; } - } while (--timeout > 0); + timeout--; + } while (timeout > 0U); /* PLL is messed up. What can we do here? */ dump_gpc_pll(g, gpll, cfg); @@ -945,7 +946,7 @@ static int clk_program_gpc_pll(struct gk20a *g, struct pll *gpll_new, * transition is not really glitch-less - see get_interim_pldiv * function header). */ - if ((gpll_new->PL < 2) || (gpll.PL < 2)) { + if ((gpll_new->PL < 2U) || (gpll.PL < 2U)) { data = gk20a_readl(g, trim_sys_gpc2clk_out_r()); data = set_field(data, trim_sys_gpc2clk_out_vcodiv_m(), trim_sys_gpc2clk_out_vcodiv_f(2)); @@ -1028,7 +1029,7 @@ static void clk_config_pll_safe_dvfs(struct gk20a *g, struct pll *gpll) u32 nsafe, nmin; if (gpll->freq > g->clk.dvfs_safe_max_freq) { - gpll->freq = gpll->freq * (100 - DVFS_SAFE_MARGIN) / 100; + gpll->freq = gpll->freq * (100U - DVFS_SAFE_MARGIN) / 100U; } nmin = DIV_ROUND_UP(gpll->M * gpc_pll_params.min_vco, gpll->clk_in); @@ -1069,7 +1070,7 @@ static int clk_program_na_gpc_pll(struct gk20a *g, struct pll *gpll_new, struct pll gpll_safe; struct pll *gpll_old = &g->clk.gpc_pll_last; - BUG_ON(gpll_new->M != 1); /* the only MDIV in NA mode */ + BUG_ON(gpll_new->M != 1U); /* the only MDIV in NA mode */ clk_config_dvfs(g, gpll_new); /* @@ -1223,16 +1224,16 @@ int gm20b_init_clk_setup_sw(struct gk20a *g) } clk->gpc_pll.clk_in = g->ops.clk.get_ref_clock_rate(g) / KHZ; - if (clk->gpc_pll.clk_in == 0) { + if (clk->gpc_pll.clk_in == 0U) { nvgpu_err(g, "GPCPLL reference clock is zero"); err = -EINVAL; goto fail; } safe_rate = g->ops.clk.get_fmax_at_vmin_safe(g); - safe_rate = safe_rate * (100 - DVFS_SAFE_MARGIN) / 100; + safe_rate = safe_rate * (100UL - (unsigned long)DVFS_SAFE_MARGIN) / 100UL; clk->dvfs_safe_max_freq = rate_gpu_to_gpc2clk(safe_rate); - clk->gpc_pll.PL = (clk->dvfs_safe_max_freq == 0) ? 0 : + clk->gpc_pll.PL = (clk->dvfs_safe_max_freq == 0UL) ? 0U : DIV_ROUND_UP(gpc_pll_params.min_vco, clk->dvfs_safe_max_freq); /* Initial freq: low enough to be safe at Vmin (default 1/3 VCO min) */ @@ -1535,8 +1536,8 @@ int gm20b_clk_get_voltage(struct clk_gk20a *clk, u64 *val) det_out = gk20a_readl(g, trim_sys_gpcpll_cfg3_r()); det_out = trim_sys_gpcpll_cfg3_dfs_testout_v(det_out); - *val = div64_u64((u64)det_out * gpc_pll_params->uvdet_slope + - gpc_pll_params->uvdet_offs, 1000ULL); + *val = div64_u64((u64)det_out * (u64)gpc_pll_params->uvdet_slope + + (u64)gpc_pll_params->uvdet_offs, 1000ULL); nvgpu_mutex_release(&g->clk.clk_mutex); diff --git a/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c b/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c index 2327589f8..2d4d81b62 100644 --- a/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c +++ b/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c @@ -94,7 +94,7 @@ void gm20b_fifo_trigger_mmu_fault(struct gk20a *g, struct nvgpu_timeout timeout; /* trigger faults for all bad engines */ - for_each_set_bit(engine_id, &engine_ids, 32) { + for_each_set_bit(engine_id, &engine_ids, 32UL) { if (!gk20a_fifo_is_valid_engine_id(g, engine_id)) { nvgpu_err(g, "faulting unknown engine %ld", engine_id); } else { @@ -118,7 +118,7 @@ void gm20b_fifo_trigger_mmu_fault(struct gk20a *g, break; } - nvgpu_usleep_range(delay, delay * 2); + nvgpu_usleep_range(delay, delay * 2UL); delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX); } while (nvgpu_timeout_expired(&timeout) == 0); @@ -127,7 +127,7 @@ void gm20b_fifo_trigger_mmu_fault(struct gk20a *g, } /* release mmu fault trigger */ - for_each_set_bit(engine_id, &engine_ids, 32) { + for_each_set_bit(engine_id, &engine_ids, 32UL) { gk20a_writel(g, fifo_trigger_mmu_fault_r(engine_id), 0); } } diff --git a/drivers/gpu/nvgpu/gm20b/gr_gm20b.c b/drivers/gpu/nvgpu/gm20b/gr_gm20b.c index fb85a253f..94906d723 100644 --- a/drivers/gpu/nvgpu/gm20b/gr_gm20b.c +++ b/drivers/gpu/nvgpu/gm20b/gr_gm20b.c @@ -251,7 +251,7 @@ int gr_gm20b_commit_global_cb_manager(struct gk20a *g, gr_gk20a_ctx_patch_write(g, ch_ctx, gr_gpcs_swdx_tc_beta_cb_size_r(ppc_index + temp2), gr_gpcs_swdx_tc_beta_cb_size_v_f(cbm_cfg_size1) | - gr_gpcs_swdx_tc_beta_cb_size_div3_f(cbm_cfg_size1/3), + gr_gpcs_swdx_tc_beta_cb_size_div3_f(cbm_cfg_size1/3U), patch); } } @@ -332,7 +332,7 @@ void gr_gm20b_set_alpha_circular_buffer_size(struct gk20a *g, u32 data) struct gr_gk20a *gr = &g->gr; u32 gpc_index, ppc_index, stride, val; u32 pd_ab_max_output; - u32 alpha_cb_size = data * 4; + u32 alpha_cb_size = data * 4U; u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); u32 ppc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_PPC_IN_GPC_STRIDE); @@ -382,7 +382,7 @@ void gr_gm20b_set_circular_buffer_size(struct gk20a *g, u32 data) { struct gr_gk20a *gr = &g->gr; u32 gpc_index, ppc_index, stride, val; - u32 cb_size = data * 4; + u32 cb_size = data * 4U; u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); u32 ppc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_PPC_IN_GPC_STRIDE); @@ -426,7 +426,7 @@ void gr_gm20b_set_circular_buffer_size(struct gk20a *g, u32 data) val = set_field(val, gr_gpcs_swdx_tc_beta_cb_size_div3_m(), gr_gpcs_swdx_tc_beta_cb_size_div3_f((cb_size * - gr->gpc_ppc_count[gpc_index])/3)); + gr->gpc_ppc_count[gpc_index])/3U)); gk20a_writel(g, gr_gpcs_swdx_tc_beta_cb_size_r( ppc_index + gpc_index), val); @@ -517,7 +517,7 @@ static u32 _sm_dsm_perf_ctrl_regs[2]; void gr_gm20b_init_sm_dsm_reg_info(void) { - if (_sm_dsm_perf_ctrl_regs[0] != 0) { + if (_sm_dsm_perf_ctrl_regs[0] != 0U) { return; } @@ -574,7 +574,7 @@ u32 gr_gm20b_get_gpc_tpc_mask(struct gk20a *g, u32 gpc_index) /* Toggle the bits of NV_FUSE_STATUS_OPT_TPC_GPC */ val = g->ops.fuse.fuse_status_opt_tpc_gpc(g, gpc_index); - return (~val) & ((0x1 << gr->max_tpc_per_gpc_count) - 1); + return (~val) & (BIT32(gr->max_tpc_per_gpc_count) - 1U); } void gr_gm20b_set_gpc_tpc_mask(struct gk20a *g, u32 gpc_index) @@ -582,10 +582,10 @@ void gr_gm20b_set_gpc_tpc_mask(struct gk20a *g, u32 gpc_index) nvgpu_tegra_fuse_write_bypass(g, 0x1); nvgpu_tegra_fuse_write_access_sw(g, 0x0); - if (g->gr.gpc_tpc_mask[gpc_index] == 0x1) { + if (g->gr.gpc_tpc_mask[gpc_index] == 0x1U) { nvgpu_tegra_fuse_write_opt_gpu_tpc0_disable(g, 0x0); nvgpu_tegra_fuse_write_opt_gpu_tpc1_disable(g, 0x1); - } else if (g->gr.gpc_tpc_mask[gpc_index] == 0x2) { + } else if (g->gr.gpc_tpc_mask[gpc_index] == 0x2U) { nvgpu_tegra_fuse_write_opt_gpu_tpc0_disable(g, 0x1); nvgpu_tegra_fuse_write_opt_gpu_tpc1_disable(g, 0x0); } else { @@ -649,13 +649,13 @@ int gr_gm20b_load_smid_config(struct gk20a *g) } /* Each NV_PGRAPH_PRI_CWD_GPC_TPC_ID can store 4 TPCs.*/ - for (i = 0; i <= ((g->gr.tpc_count-1) / 4); i++) { + for (i = 0U; i <= ((g->gr.tpc_count-1U) / 4U); i++) { u32 reg = 0; u32 bit_stride = gr_cwd_gpc_tpc_id_gpc0_s() + gr_cwd_gpc_tpc_id_tpc0_s(); - for (j = 0; j < 4; j++) { - u32 sm_id = (i * 4) + j; + for (j = 0U; j < 4U; j++) { + u32 sm_id = (i * 4U) + j; u32 bits; if (sm_id >= g->gr.tpc_count) { @@ -830,10 +830,10 @@ int gr_gm20b_load_ctxsw_ucode(struct gk20a *g) /* bind WPR VA inst block */ gr_gk20a_load_falcon_bind_instblk(g); if (g->ops.pmu.is_lazy_bootstrap(LSF_FALCON_ID_FECS)) { - falcon_id_mask |= (1 << LSF_FALCON_ID_FECS); + falcon_id_mask |= BIT8(LSF_FALCON_ID_FECS); } if (g->ops.pmu.is_lazy_bootstrap(LSF_FALCON_ID_GPCCS)) { - falcon_id_mask |= (1 << LSF_FALCON_ID_GPCCS); + falcon_id_mask |= BIT8(LSF_FALCON_ID_GPCCS); } if (nvgpu_is_enabled(g, NVGPU_SUPPORT_SEC2_RTOS)) { @@ -989,7 +989,7 @@ int gr_gm20b_dump_gr_status_regs(struct gk20a *g, gk20a_readl(g, gr_pri_gpc0_gpccs_gpc_activity3_r())); gk20a_debug_output(o, "NV_PGRAPH_PRI_GPC0_TPC0_TPCCS_TPC_ACTIVITY0: 0x%x\n", gk20a_readl(g, gr_pri_gpc0_tpc0_tpccs_tpc_activity_0_r())); - if ((gr->gpc_tpc_count != NULL) && (gr->gpc_tpc_count[0] == 2)) { + if ((gr->gpc_tpc_count != NULL) && (gr->gpc_tpc_count[0] == 2U)) { gk20a_debug_output(o, "NV_PGRAPH_PRI_GPC0_TPC1_TPCCS_TPC_ACTIVITY0: 0x%x\n", gk20a_readl(g, gr_pri_gpc0_tpc1_tpccs_tpc_activity_0_r())); } @@ -1005,7 +1005,7 @@ int gr_gm20b_dump_gr_status_regs(struct gk20a *g, gk20a_readl(g, gr_pri_gpcs_gpccs_gpc_activity_3_r())); gk20a_debug_output(o, "NV_PGRAPH_PRI_GPCS_TPC0_TPCCS_TPC_ACTIVITY0: 0x%x\n", gk20a_readl(g, gr_pri_gpcs_tpc0_tpccs_tpc_activity_0_r())); - if ((gr->gpc_tpc_count != NULL) && (gr->gpc_tpc_count[0] == 2)) { + if ((gr->gpc_tpc_count != NULL) && (gr->gpc_tpc_count[0] == 2U)) { gk20a_debug_output(o, "NV_PGRAPH_PRI_GPCS_TPC1_TPCCS_TPC_ACTIVITY0: 0x%x\n", gk20a_readl(g, gr_pri_gpcs_tpc1_tpccs_tpc_activity_0_r())); } @@ -1129,7 +1129,7 @@ u32 gr_gm20b_get_fbp_en_mask(struct gk20a *g) */ fbp_en_mask = g->ops.fuse.fuse_status_opt_fbp(g); fbp_en_mask = ~fbp_en_mask; - fbp_en_mask = fbp_en_mask & ((1 << max_fbps_count) - 1); + fbp_en_mask = fbp_en_mask & (BIT32(max_fbps_count) - 1U); return fbp_en_mask; } @@ -1225,15 +1225,15 @@ void gr_gm20b_bpt_reg_info(struct gk20a *g, struct nvgpu_warpstate *w_state) reg_offset = tpc_offset + gpc_offset; /* 64 bit read */ - warps_valid = (u64)gk20a_readl(g, gr_gpc0_tpc0_sm_warp_valid_mask_r() + reg_offset + 4) << 32; + warps_valid = (u64)gk20a_readl(g, gr_gpc0_tpc0_sm_warp_valid_mask_r() + reg_offset + 4U) << 32; warps_valid |= gk20a_readl(g, gr_gpc0_tpc0_sm_warp_valid_mask_r() + reg_offset); /* 64 bit read */ - warps_paused = (u64)gk20a_readl(g, gr_gpc0_tpc0_sm_dbgr_bpt_pause_mask_r() + reg_offset + 4) << 32; + warps_paused = (u64)gk20a_readl(g, gr_gpc0_tpc0_sm_dbgr_bpt_pause_mask_r() + reg_offset + 4U) << 32; warps_paused |= gk20a_readl(g, gr_gpc0_tpc0_sm_dbgr_bpt_pause_mask_r() + reg_offset); /* 64 bit read */ - warps_trapped = (u64)gk20a_readl(g, gr_gpc0_tpc0_sm_dbgr_bpt_trap_mask_r() + reg_offset + 4) << 32; + warps_trapped = (u64)gk20a_readl(g, gr_gpc0_tpc0_sm_dbgr_bpt_trap_mask_r() + reg_offset + 4U) << 32; warps_trapped |= gk20a_readl(g, gr_gpc0_tpc0_sm_dbgr_bpt_trap_mask_r() + reg_offset); w_state[sm_id].valid_warps[0] = warps_valid; @@ -1241,17 +1241,17 @@ void gr_gm20b_bpt_reg_info(struct gk20a *g, struct nvgpu_warpstate *w_state) w_state[sm_id].paused_warps[0] = warps_paused; - if (numWarpPerTpc > 64) { + if (numWarpPerTpc > 64U) { /* 64 bit read */ - warps_valid = (u64)gk20a_readl(g, gr_gpc0_tpc0_sm_warp_valid_mask_2_r() + reg_offset + 4) << 32; + warps_valid = (u64)gk20a_readl(g, gr_gpc0_tpc0_sm_warp_valid_mask_2_r() + reg_offset + 4U) << 32; warps_valid |= gk20a_readl(g, gr_gpc0_tpc0_sm_warp_valid_mask_2_r() + reg_offset); /* 64 bit read */ - warps_paused = (u64)gk20a_readl(g, gr_gpc0_tpc0_sm_dbgr_bpt_pause_mask_2_r() + reg_offset + 4) << 32; + warps_paused = (u64)gk20a_readl(g, gr_gpc0_tpc0_sm_dbgr_bpt_pause_mask_2_r() + reg_offset + 4U) << 32; warps_paused |= gk20a_readl(g, gr_gpc0_tpc0_sm_dbgr_bpt_pause_mask_2_r() + reg_offset); /* 64 bit read */ - warps_trapped = (u64)gk20a_readl(g, gr_gpc0_tpc0_sm_dbgr_bpt_trap_mask_2_r() + reg_offset + 4) << 32; + warps_trapped = (u64)gk20a_readl(g, gr_gpc0_tpc0_sm_dbgr_bpt_trap_mask_2_r() + reg_offset + 4U) << 32; warps_trapped |= gk20a_readl(g, gr_gpc0_tpc0_sm_dbgr_bpt_trap_mask_2_r() + reg_offset); w_state[sm_id].valid_warps[1] = warps_valid; @@ -1468,7 +1468,7 @@ void gm20a_gr_disable_rd_coalesce(struct gk20a *g) u32 gr_gm20b_get_pmm_per_chiplet_offset(void) { - return (perf_pmmsys_extent_v() - perf_pmmsys_base_v() + 1); + return (perf_pmmsys_extent_v() - perf_pmmsys_base_v() + 1U); } void gm20b_gr_set_debug_mode(struct gk20a *g, bool enable) diff --git a/drivers/gpu/nvgpu/include/nvgpu/acr/acr_lsfm.h b/drivers/gpu/nvgpu/include/nvgpu/acr/acr_lsfm.h index 939aa2d17..fd82e5706 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/acr/acr_lsfm.h +++ b/drivers/gpu/nvgpu/include/nvgpu/acr/acr_lsfm.h @@ -31,17 +31,17 @@ * READ/WRITE masks for WPR region */ /* Readable only from level 2 and 3 client */ -#define LSF_WPR_REGION_RMASK (0xC) +#define LSF_WPR_REGION_RMASK (0xCU) /* Writable only from level 2 and 3 client */ -#define LSF_WPR_REGION_WMASK (0xC) +#define LSF_WPR_REGION_WMASK (0xCU) /* Readable only from level 3 client */ -#define LSF_WPR_REGION_RMASK_SUB_WPR_ENABLED (0x8) +#define LSF_WPR_REGION_RMASK_SUB_WPR_ENABLED (0x8U) /* Writable only from level 3 client */ -#define LSF_WPR_REGION_WMASK_SUB_WPR_ENABLED (0x8) +#define LSF_WPR_REGION_WMASK_SUB_WPR_ENABLED (0x8U) /* Disallow read mis-match for all clients */ -#define LSF_WPR_REGION_ALLOW_READ_MISMATCH_NO (0x0) +#define LSF_WPR_REGION_ALLOW_READ_MISMATCH_NO (0x0U) /* Disallow write mis-match for all clients */ -#define LSF_WPR_REGION_ALLOW_WRITE_MISMATCH_NO (0x0) +#define LSF_WPR_REGION_ALLOW_WRITE_MISMATCH_NO (0x0U) /* * Falcon Id Defines @@ -124,7 +124,7 @@ enum { #define LSF_SHARED_DATA_SUB_WPR_USE_CASE_ID_MAX \ LSF_SHARED_DATA_SUB_WPR_USE_CASE_ID_PLAYREADY_SHARED_DATA -#define LSF_SHARED_DATA_SUB_WPR_USE_CASE_ID_INVALID (0xFFFFFFFF) +#define LSF_SHARED_DATA_SUB_WPR_USE_CASE_ID_INVALID (0xFFFFFFFFU) #define MAX_SUPPORTED_SHARED_SUB_WPR_USE_CASES \ LSF_SHARED_DATA_SUB_WPR_USE_CASE_ID_MAX @@ -132,9 +132,9 @@ enum { /* Static sizes of shared subWPRs */ /* Minimum granularity supported is 4K */ /* 1MB in 4K */ -#define LSF_SHARED_DATA_SUB_WPR_FRTS_VBIOS_TABLES_SIZE_IN_4K (0x100) +#define LSF_SHARED_DATA_SUB_WPR_FRTS_VBIOS_TABLES_SIZE_IN_4K (0x100U) /* 4K */ -#define LSF_SHARED_DATA_SUB_WPR_PLAYREADY_SHARED_DATA_SIZE_IN_4K (0x1) +#define LSF_SHARED_DATA_SUB_WPR_PLAYREADY_SHARED_DATA_SIZE_IN_4K (0x1U) /* * Bootstrap Owner Defines @@ -144,13 +144,13 @@ enum { /* * Image Status Defines */ -#define LSF_IMAGE_STATUS_NONE (0) -#define LSF_IMAGE_STATUS_COPY (1) -#define LSF_IMAGE_STATUS_VALIDATION_CODE_FAILED (2) -#define LSF_IMAGE_STATUS_VALIDATION_DATA_FAILED (3) -#define LSF_IMAGE_STATUS_VALIDATION_DONE (4) -#define LSF_IMAGE_STATUS_VALIDATION_SKIPPED (5) -#define LSF_IMAGE_STATUS_BOOTSTRAP_READY (6) +#define LSF_IMAGE_STATUS_NONE (0U) +#define LSF_IMAGE_STATUS_COPY (1U) +#define LSF_IMAGE_STATUS_VALIDATION_CODE_FAILED (2U) +#define LSF_IMAGE_STATUS_VALIDATION_DATA_FAILED (3U) +#define LSF_IMAGE_STATUS_VALIDATION_DONE (4U) +#define LSF_IMAGE_STATUS_VALIDATION_SKIPPED (5U) +#define LSF_IMAGE_STATUS_BOOTSTRAP_READY (6U) /*Light Secure Bootstrap header related defines*/ #define NV_FLCN_ACR_LSF_FLAG_LOAD_CODE_AT_0_FALSE 0U @@ -227,18 +227,18 @@ struct lsf_lsb_header_v1 { LSF_SUB_WPR_HEADER_ALIGNMENT)) -#define LSF_UCODE_DATA_ALIGNMENT 4096 +#define LSF_UCODE_DATA_ALIGNMENT 4096U /* Defined for 1MB alignment */ -#define SHIFT_1MB (20) -#define SHIFT_4KB (12) +#define SHIFT_1MB (20U) +#define SHIFT_4KB (12U) /* * Supporting maximum of 2 regions. * This is needed to pre-allocate space in DMEM */ -#define NVGPU_FLCN_ACR_MAX_REGIONS (2) -#define LSF_BOOTSTRAP_OWNER_RESERVED_DMEM_SIZE (0x200) +#define NVGPU_FLCN_ACR_MAX_REGIONS (2U) +#define LSF_BOOTSTRAP_OWNER_RESERVED_DMEM_SIZE (0x200U) /* * start_addr - Starting address of region diff --git a/drivers/gpu/nvgpu/include/nvgpu/acr/acr_objflcn.h b/drivers/gpu/nvgpu/include/nvgpu/acr/acr_objflcn.h index 6164f90d2..1d6ef5674 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/acr/acr_objflcn.h +++ b/drivers/gpu/nvgpu/include/nvgpu/acr/acr_objflcn.h @@ -56,36 +56,36 @@ struct flcn_ucode_img_v1 { /* * Falcon UCODE header index. */ -#define FLCN_NL_UCODE_HDR_OS_CODE_OFF_IND (0) -#define FLCN_NL_UCODE_HDR_OS_CODE_SIZE_IND (1) -#define FLCN_NL_UCODE_HDR_OS_DATA_OFF_IND (2) -#define FLCN_NL_UCODE_HDR_OS_DATA_SIZE_IND (3) -#define FLCN_NL_UCODE_HDR_NUM_APPS_IND (4) +#define FLCN_NL_UCODE_HDR_OS_CODE_OFF_IND (0U) +#define FLCN_NL_UCODE_HDR_OS_CODE_SIZE_IND (1U) +#define FLCN_NL_UCODE_HDR_OS_DATA_OFF_IND (2U) +#define FLCN_NL_UCODE_HDR_OS_DATA_SIZE_IND (3U) +#define FLCN_NL_UCODE_HDR_NUM_APPS_IND (4U) /* * There are total N number of Apps with code and offset defined in UCODE header * This macro provides the CODE and DATA offset and size of Ath application. */ -#define FLCN_NL_UCODE_HDR_APP_CODE_START_IND (5) +#define FLCN_NL_UCODE_HDR_APP_CODE_START_IND (5U) #define FLCN_NL_UCODE_HDR_APP_CODE_OFF_IND(N, A) \ - (FLCN_NL_UCODE_HDR_APP_CODE_START_IND + ((A)*2)) + (FLCN_NL_UCODE_HDR_APP_CODE_START_IND + ((A)*2U)) #define FLCN_NL_UCODE_HDR_APP_CODE_SIZE_IND(N, A) \ - (FLCN_NL_UCODE_HDR_APP_CODE_START_IND + ((A)*2) + 1) + (FLCN_NL_UCODE_HDR_APP_CODE_START_IND + ((A)*2U) + 1U) #define FLCN_NL_UCODE_HDR_APP_CODE_END_IND(N) \ - (FLCN_NL_UCODE_HDR_APP_CODE_START_IND + ((N)*2) - 1) + (FLCN_NL_UCODE_HDR_APP_CODE_START_IND + ((N)*2U) - 1U) #define FLCN_NL_UCODE_HDR_APP_DATA_START_IND(N) \ - (FLCN_NL_UCODE_HDR_APP_CODE_END_IND(N) + 1) + (FLCN_NL_UCODE_HDR_APP_CODE_END_IND(N) + 1U) #define FLCN_NL_UCODE_HDR_APP_DATA_OFF_IND(N, A) \ - (FLCN_NL_UCODE_HDR_APP_DATA_START_IND(N) + ((A)*2)) + (FLCN_NL_UCODE_HDR_APP_DATA_START_IND(N) + ((A)*2U)) #define FLCN_NL_UCODE_HDR_APP_DATA_SIZE_IND(N, A) \ - (FLCN_NL_UCODE_HDR_APP_DATA_START_IND(N) + ((A)*2) + 1) + (FLCN_NL_UCODE_HDR_APP_DATA_START_IND(N) + ((A)*2U) + 1U) #define FLCN_NL_UCODE_HDR_APP_DATA_END_IND(N) \ - (FLCN_NL_UCODE_HDR_APP_DATA_START_IND(N) + ((N)*2) - 1) + (FLCN_NL_UCODE_HDR_APP_DATA_START_IND(N) + ((N)*2U) - 1U) #define FLCN_NL_UCODE_HDR_OS_OVL_OFF_IND(N) \ - (FLCN_NL_UCODE_HDR_APP_DATA_END_IND(N) + 1) + (FLCN_NL_UCODE_HDR_APP_DATA_END_IND(N) + 1U) #define FLCN_NL_UCODE_HDR_OS_OVL_SIZE_IND(N) \ - (FLCN_NL_UCODE_HDR_APP_DATA_END_IND(N) + 2) + (FLCN_NL_UCODE_HDR_APP_DATA_END_IND(N) + 2U) #endif /* NVGPU_ACR_OBJFLCN_H */ diff --git a/drivers/gpu/nvgpu/include/nvgpu/acr/nvgpu_acr.h b/drivers/gpu/nvgpu/include/nvgpu/acr/nvgpu_acr.h index 0c3561b51..96ea7bdb0 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/acr/nvgpu_acr.h +++ b/drivers/gpu/nvgpu/include/nvgpu/acr/nvgpu_acr.h @@ -49,9 +49,9 @@ struct nvgpu_acr; #define LSF_SEC2_UCODE_DESC_BIN "sec2_ucode_desc.bin" #define LSF_SEC2_UCODE_SIG_BIN "sec2_sig.bin" -#define MAX_SUPPORTED_LSFM 3 /*PMU, FECS, GPCCS*/ +#define MAX_SUPPORTED_LSFM 3U /*PMU, FECS, GPCCS*/ -#define ACR_COMPLETION_TIMEOUT_MS 10000 /*in msec */ +#define ACR_COMPLETION_TIMEOUT_MS 10000U /*in msec */ #define PMU_SECURE_MODE BIT8(0) #define PMU_LSFM_MANAGED BIT8(1) diff --git a/drivers/gpu/nvgpu/include/nvgpu/pmuif/gpmuif_acr.h b/drivers/gpu/nvgpu/include/nvgpu/pmuif/gpmuif_acr.h index c305589c5..f3230c7df 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/pmuif/gpmuif_acr.h +++ b/drivers/gpu/nvgpu/include/nvgpu/pmuif/gpmuif_acr.h @@ -61,8 +61,8 @@ struct pmu_acr_cmd_bootstrap_multiple_falcons { struct falc_u64 wprvirtualbase; }; -#define PMU_ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_NO 1 -#define PMU_ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES 0 +#define PMU_ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_NO 1U +#define PMU_ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES 0U struct pmu_acr_cmd { @@ -79,18 +79,18 @@ struct pmu_acr_cmd { /* * returns the WPR region init information */ -#define PMU_ACR_MSG_ID_INIT_WPR_REGION 0 +#define PMU_ACR_MSG_ID_INIT_WPR_REGION 0U /* * Returns the Bootstrapped falcon ID to RM */ -#define PMU_ACR_MSG_ID_BOOTSTRAP_FALCON 1 +#define PMU_ACR_MSG_ID_BOOTSTRAP_FALCON 1U /* * Returns the WPR init status */ -#define PMU_ACR_SUCCESS 0 -#define PMU_ACR_ERROR 1 +#define PMU_ACR_SUCCESS 0U +#define PMU_ACR_ERROR 1U /* * PMU notifies about bootstrap status of falcon @@ -111,11 +111,11 @@ struct pmu_acr_msg { }; /* ACR RPC */ -#define NV_PMU_RPC_ID_ACR_INIT_WPR_REGION 0x00 -#define NV_PMU_RPC_ID_ACR_WRITE_CBC_BASE 0x01 -#define NV_PMU_RPC_ID_ACR_BOOTSTRAP_FALCON 0x02 -#define NV_PMU_RPC_ID_ACR_BOOTSTRAP_GR_FALCONS 0x03 -#define NV_PMU_RPC_ID_ACR__COUNT 0x04 +#define NV_PMU_RPC_ID_ACR_INIT_WPR_REGION 0x00U +#define NV_PMU_RPC_ID_ACR_WRITE_CBC_BASE 0x01U +#define NV_PMU_RPC_ID_ACR_BOOTSTRAP_FALCON 0x02U +#define NV_PMU_RPC_ID_ACR_BOOTSTRAP_GR_FALCONS 0x03U +#define NV_PMU_RPC_ID_ACR__COUNT 0x04U /* * structure that holds data used