mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 10:34:43 +03:00
gpu: nvgpu: fix MISRA 17.7 in gm20b
MISRA Rule-17.7 requires the return value of all functions to be used. Fix is either to use the return value or change the function to return void. This patch contains fix for all 17.7 violations in gm20b files. JIRA NVGPU-677 Change-Id: I63182d52213494f871c187b5efc1637bc36bdf3d Signed-off-by: Nicolas Benech <nbenech@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2003230 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
3c55163713
commit
6573828d01
@@ -139,7 +139,7 @@ static u32 get_interim_pldiv(struct gk20a *g, u32 old_pl, u32 new_pl)
|
||||
vco_f = u_f * N = ref_clk_f * N / M;
|
||||
PLL output = gpc2clk = target clock frequency = vco_f / pl_to_pdiv(PL);
|
||||
gpcclk = gpc2clk / 2; */
|
||||
static int clk_config_pll(struct clk_gk20a *clk, struct pll *pll,
|
||||
static void clk_config_pll(struct clk_gk20a *clk, struct pll *pll,
|
||||
struct pll_parms *pll_params, u32 *target_freq, bool best_fit)
|
||||
{
|
||||
struct gk20a *g = clk->g;
|
||||
@@ -258,8 +258,6 @@ found_match:
|
||||
*target_freq, pll->M, pll->N, pll->PL, nvgpu_pl_to_div(pll->PL));
|
||||
|
||||
nvgpu_log_fn(g, "done");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* GPCPLL NA/DVFS mode methods */
|
||||
@@ -312,7 +310,7 @@ static bool nvgpu_fuse_can_use_na_gpcpll(struct gk20a *g)
|
||||
* Read ADC characteristic parmeters from fuses.
|
||||
* Determine clibration settings.
|
||||
*/
|
||||
static int clk_config_calibration_params(struct gk20a *g)
|
||||
static void clk_config_calibration_params(struct gk20a *g)
|
||||
{
|
||||
int slope, offs;
|
||||
struct pll_parms *p = &gpc_pll_params;
|
||||
@@ -329,9 +327,7 @@ static int clk_config_calibration_params(struct gk20a *g)
|
||||
* boot internal calibration with default slope.
|
||||
*/
|
||||
nvgpu_err(g, "ADC coeff are not fused");
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -735,7 +731,7 @@ static u32 throttle_disable(struct gk20a *g)
|
||||
}
|
||||
|
||||
/* GPCPLL bypass methods */
|
||||
static int clk_change_pldiv_under_bypass(struct gk20a *g, struct pll *gpll)
|
||||
static void clk_change_pldiv_under_bypass(struct gk20a *g, struct pll *gpll)
|
||||
{
|
||||
u32 data, coeff, throt;
|
||||
|
||||
@@ -762,11 +758,9 @@ static int clk_change_pldiv_under_bypass(struct gk20a *g, struct pll *gpll)
|
||||
trim_sys_sel_vco_gpc2clk_out_vco_f());
|
||||
gk20a_writel(g, trim_sys_sel_vco_r(), data);
|
||||
throttle_enable(g, throt);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int clk_lock_gpc_pll_under_bypass(struct gk20a *g, struct pll *gpll)
|
||||
static void clk_lock_gpc_pll_under_bypass(struct gk20a *g, struct pll *gpll)
|
||||
{
|
||||
u32 data, cfg, coeff, timeout, throt;
|
||||
|
||||
@@ -863,7 +857,6 @@ static int clk_lock_gpc_pll_under_bypass(struct gk20a *g, struct pll *gpll)
|
||||
/* PLL is messed up. What can we do here? */
|
||||
dump_gpc_pll(g, gpll, cfg);
|
||||
BUG();
|
||||
return -EBUSY;
|
||||
|
||||
pll_locked:
|
||||
gk20a_dbg_clk(g, "locked config_pll under bypass r=0x%x v=0x%x",
|
||||
@@ -882,8 +875,6 @@ pll_locked:
|
||||
trim_sys_sel_vco_gpc2clk_out_vco_f());
|
||||
gk20a_writel(g, trim_sys_sel_vco_r(), data);
|
||||
throttle_enable(g, throt);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1144,11 +1135,12 @@ static int clk_program_na_gpc_pll(struct gk20a *g, struct pll *gpll_new,
|
||||
return clk_program_gpc_pll(g, gpll_new, true);
|
||||
}
|
||||
|
||||
static int clk_disable_gpcpll(struct gk20a *g, bool allow_slide)
|
||||
static void clk_disable_gpcpll(struct gk20a *g, bool allow_slide)
|
||||
{
|
||||
u32 cfg, coeff, throt;
|
||||
struct clk_gk20a *clk = &g->clk;
|
||||
struct pll gpll = clk->gpc_pll;
|
||||
int err = 0;
|
||||
|
||||
/* slide to VCO min */
|
||||
cfg = gk20a_readl(g, trim_sys_gpcpll_cfg_r());
|
||||
@@ -1160,7 +1152,10 @@ static int clk_disable_gpcpll(struct gk20a *g, bool allow_slide)
|
||||
if (gpll.mode == GPC_PLL_MODE_DVFS) {
|
||||
clk_config_dvfs_ndiv(gpll.dvfs.mv, gpll.N, &gpll.dvfs);
|
||||
}
|
||||
clk_slide_gpc_pll(g, &gpll);
|
||||
err = clk_slide_gpc_pll(g, &gpll);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "slide_gpc failed, err=%d", err);
|
||||
}
|
||||
}
|
||||
|
||||
/* put PLL in bypass before disabling it */
|
||||
@@ -1186,7 +1181,6 @@ static int clk_disable_gpcpll(struct gk20a *g, bool allow_slide)
|
||||
|
||||
clk->gpc_pll.enabled = false;
|
||||
clk->gpc_pll_last.enabled = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct pll_parms *gm20b_get_gpc_pll_parms(void)
|
||||
@@ -1426,11 +1420,8 @@ static int set_pll_target(struct gk20a *g, u32 freq, u32 old_freq)
|
||||
|
||||
if (freq != old_freq) {
|
||||
/* gpc_pll.freq is changed to new value here */
|
||||
if (clk_config_pll(clk, &clk->gpc_pll, &gpc_pll_params,
|
||||
&freq, true) != 0) {
|
||||
nvgpu_err(g, "failed to set pll target for %d", freq);
|
||||
return -EINVAL;
|
||||
}
|
||||
clk_config_pll(clk, &clk->gpc_pll, &gpc_pll_params, &freq,
|
||||
true);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@@ -1511,7 +1502,7 @@ int gm20b_suspend_clk_support(struct gk20a *g)
|
||||
/* The prev call may not disable PLL if gbus is unbalanced - force it */
|
||||
nvgpu_mutex_acquire(&g->clk.clk_mutex);
|
||||
if (g->clk.gpc_pll.enabled) {
|
||||
ret = clk_disable_gpcpll(g, true);
|
||||
clk_disable_gpcpll(g, true);
|
||||
}
|
||||
g->clk.clk_hw_on = false;
|
||||
nvgpu_mutex_release(&g->clk.clk_mutex);
|
||||
|
||||
@@ -89,7 +89,7 @@ void gm20b_fifo_trigger_mmu_fault(struct gk20a *g,
|
||||
{
|
||||
unsigned long delay = GR_IDLE_CHECK_DEFAULT;
|
||||
unsigned long engine_id;
|
||||
int ret = -EBUSY;
|
||||
int ret;
|
||||
struct nvgpu_timeout timeout;
|
||||
|
||||
/* trigger faults for all bad engines */
|
||||
@@ -106,10 +106,14 @@ void gm20b_fifo_trigger_mmu_fault(struct gk20a *g,
|
||||
}
|
||||
}
|
||||
|
||||
nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g),
|
||||
ret = nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g),
|
||||
NVGPU_TIMER_CPU_TIMER);
|
||||
if (ret != 0) {
|
||||
nvgpu_err(g, "timeout init failed err=%d", ret);
|
||||
}
|
||||
|
||||
/* Wait for MMU fault to trigger */
|
||||
ret = -EBUSY;
|
||||
do {
|
||||
if ((gk20a_readl(g, fifo_intr_0_r()) &
|
||||
fifo_intr_0_mmu_fault_pending_f()) != 0U) {
|
||||
|
||||
@@ -696,12 +696,15 @@ int gr_gm20b_init_fs_state(struct gk20a *g)
|
||||
gk20a_readl(g, gr_be0_crop_debug3_r()) |
|
||||
gr_bes_crop_debug3_comp_vdc_4to2_disable_m());
|
||||
|
||||
g->ops.gr.load_smid_config(g);
|
||||
err = g->ops.gr.load_smid_config(g);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "load_smid_config failed err=%d", err);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int gr_gm20b_load_ctxsw_ucode_segments(struct gk20a *g, u64 addr_base,
|
||||
void gr_gm20b_load_ctxsw_ucode_segments(struct gk20a *g, u64 addr_base,
|
||||
struct gk20a_ctxsw_ucode_segments *segments, u32 reg_offset)
|
||||
{
|
||||
gk20a_writel(g, reg_offset + gr_fecs_dmactl_r(),
|
||||
@@ -716,8 +719,6 @@ int gr_gm20b_load_ctxsw_ucode_segments(struct gk20a *g, u64 addr_base,
|
||||
gk20a_writel(g, reg_offset + gr_fecs_cpuctl_r(),
|
||||
gr_fecs_cpuctl_startcpu_f(0x01));
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool gr_gm20b_is_tpc_addr_shared(struct gk20a *g, u32 addr)
|
||||
|
||||
@@ -86,7 +86,7 @@ void gr_gm20b_load_tpc_mask(struct gk20a *g);
|
||||
void gr_gm20b_program_sm_id_numbering(struct gk20a *g,
|
||||
u32 gpc, u32 tpc, u32 smid);
|
||||
int gr_gm20b_load_smid_config(struct gk20a *g);
|
||||
int gr_gm20b_load_ctxsw_ucode_segments(struct gk20a *g, u64 addr_base,
|
||||
void gr_gm20b_load_ctxsw_ucode_segments(struct gk20a *g, u64 addr_base,
|
||||
struct gk20a_ctxsw_ucode_segments *segments, u32 reg_offset);
|
||||
bool gr_gm20b_is_tpc_addr(struct gk20a *g, u32 addr);
|
||||
u32 gr_gm20b_get_tpc_num(struct gk20a *g, u32 addr);
|
||||
|
||||
@@ -284,7 +284,7 @@ struct gpu_ops {
|
||||
void (*set_hww_esr_report_mask)(struct gk20a *g);
|
||||
int (*setup_alpha_beta_tables)(struct gk20a *g,
|
||||
struct gr_gk20a *gr);
|
||||
int (*falcon_load_ucode)(struct gk20a *g,
|
||||
void (*falcon_load_ucode)(struct gk20a *g,
|
||||
u64 addr_base,
|
||||
struct gk20a_ctxsw_ucode_segments *segments,
|
||||
u32 reg_offset);
|
||||
|
||||
Reference in New Issue
Block a user