gpu: nvgpu: MISRA 14.4 err/ret/status as boolean

MISRA rule 14.4 doesn't allow the usage of integer types as booleans
in the controlling expression of an if statement or an iteration
statement.

Fix violations where the integer variables err, ret, status are used
as booleans in the controlling expression of if and loop statements.

JIRA NVGPU-1019

Change-Id: I8c9ad786a741b78293d0ebc4e1c33d4d0fc8f9b4
Signed-off-by: Amurthyreddy <amurthyreddy@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1921260
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Amurthyreddy
2018-10-08 10:54:08 +05:30
committed by mobile promotions
parent 745e720089
commit c94643155e
27 changed files with 203 additions and 203 deletions

View File

@@ -103,7 +103,7 @@ int gp106_alloc_blob_space(struct gk20a *g,
err = nvgpu_dma_alloc_vid_at(g,
wpr_inf.size,
&g->acr.wpr_dummy, wpr_inf.wpr_base);
if (err) {
if (err != 0) {
return err;
}
@@ -150,7 +150,7 @@ int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img_v1 *p_img)
g->acr.pmu_desc = pmu_desc;
err = nvgpu_init_pmu_fw_support(pmu);
if (err) {
if (err != 0) {
nvgpu_err(g, "failed to set function pointers");
goto release_sig;
}
@@ -514,7 +514,7 @@ int gp106_prepare_ucode_blob(struct gk20a *g)
/*Recovery case, we do not need to form
non WPR blob of ucodes*/
err = nvgpu_init_pmu_fw_support(pmu);
if (err) {
if (err != 0) {
gp106_dbg_pmu(g, "failed to set function pointers\n");
return err;
}
@@ -531,7 +531,7 @@ int gp106_prepare_ucode_blob(struct gk20a *g)
/* Discover all managed falcons*/
err = lsfm_discover_ucode_images(g, plsfm);
gp106_dbg_pmu(g, " Managed Falcon cnt %d\n", plsfm->managed_flcn_cnt);
if (err) {
if (err != 0) {
goto exit_err;
}
@@ -543,14 +543,14 @@ int gp106_prepare_ucode_blob(struct gk20a *g)
(g->acr.ucode_blob.cpu_va == NULL)) {
/* Generate WPR requirements*/
err = lsf_gen_wpr_requirements(g, plsfm);
if (err) {
if (err != 0) {
goto exit_err;
}
/*Alloc memory to hold ucode blob contents*/
err = g->acr.alloc_blob_space(g, plsfm->wpr_size
,&g->acr.ucode_blob);
if (err) {
if (err != 0) {
goto exit_err;
}
@@ -588,7 +588,7 @@ int lsfm_discover_ucode_images(struct gk20a *g,
/* Obtain the PMU ucode image and add it to the list if required*/
memset(&ucode_img, 0, sizeof(ucode_img));
status = pmu_ucode_details(g, &ucode_img);
if (status) {
if (status != 0) {
return status;
}

View File

@@ -219,7 +219,7 @@ int gp106_bios_init(struct gk20a *g)
}
err = nvgpu_bios_parse_rom(g);
if (err) {
if (err != 0) {
goto free_firmware;
}
@@ -236,7 +236,7 @@ int gp106_bios_init(struct gk20a *g)
if (g->ops.bios.devinit) {
err = g->ops.bios.devinit(g);
if (err) {
if (err != 0) {
nvgpu_err(g, "devinit failed");
goto free_firmware;
}
@@ -245,7 +245,7 @@ int gp106_bios_init(struct gk20a *g)
if (nvgpu_is_enabled(g, NVGPU_PMU_RUN_PREOS) &&
(g->ops.bios.preos != NULL)) {
err = g->ops.bios.preos(g);
if (err) {
if (err != 0) {
nvgpu_err(g, "pre-os failed");
goto free_firmware;
}
@@ -253,7 +253,7 @@ int gp106_bios_init(struct gk20a *g)
if (g->ops.bios.verify_devinit) {
err = g->ops.bios.verify_devinit(g);
if (err) {
if (err != 0) {
nvgpu_err(g, "devinit status verification failed");
goto free_firmware;
}

View File

@@ -139,7 +139,7 @@ int gp106_init_clk_arbiter(struct gk20a *g)
arb->clk_arb_events_supported = true;
err = nvgpu_mutex_init(&arb->pstate_lock);
if (err)
if (err != 0)
goto mutex_fail;
nvgpu_spinlock_init(&arb->sessions_lock);
nvgpu_spinlock_init(&arb->users_lock);

View File

@@ -77,12 +77,12 @@ unsigned long gp106_clk_measure_freq(struct gk20a *g, u32 api_domain)
int gp106_init_clk_support(struct gk20a *g)
{
struct clk_gk20a *clk = &g->clk;
u32 err = 0;
int err = 0;
nvgpu_log_fn(g, " ");
err = nvgpu_mutex_init(&clk->clk_mutex);
if (err) {
if (err != 0) {
return err;
}

View File

@@ -192,7 +192,7 @@ int gr_gp106_set_ctxsw_preemption_mode(struct gk20a *g,
err = gr_gp10b_alloc_buffer(vm,
g->gr.ctx_vars.preempt_image_size,
&gr_ctx->preempt_ctxsw_buffer);
if (err) {
if (err != 0) {
nvgpu_err(g, "cannot allocate preempt buffer");
goto fail;
}
@@ -200,7 +200,7 @@ int gr_gp106_set_ctxsw_preemption_mode(struct gk20a *g,
err = gr_gp10b_alloc_buffer(vm,
spill_size,
&gr_ctx->spill_ctxsw_buffer);
if (err) {
if (err != 0) {
nvgpu_err(g, "cannot allocate spill buffer");
goto fail_free_preempt;
}
@@ -208,7 +208,7 @@ int gr_gp106_set_ctxsw_preemption_mode(struct gk20a *g,
err = gr_gp10b_alloc_buffer(vm,
attrib_cb_size,
&gr_ctx->betacb_ctxsw_buffer);
if (err) {
if (err != 0) {
nvgpu_err(g, "cannot allocate beta buffer");
goto fail_free_spill;
}
@@ -216,7 +216,7 @@ int gr_gp106_set_ctxsw_preemption_mode(struct gk20a *g,
err = gr_gp10b_alloc_buffer(vm,
pagepool_size,
&gr_ctx->pagepool_ctxsw_buffer);
if (err) {
if (err != 0) {
nvgpu_err(g, "cannot allocate page pool");
goto fail_free_betacb;
}

View File

@@ -226,7 +226,7 @@ static int gp106_init_gpu_characteristics(struct gk20a *g)
int err;
err = gk20a_init_gpu_characteristics(g);
if (err) {
if (err != 0) {
return err;
}

View File

@@ -3222,12 +3222,12 @@ int gp106_mclk_init(struct gk20a *g)
mclk = &g->clk_pmu.clk_mclk;
err = nvgpu_mutex_init(&mclk->mclk_lock);
if (err) {
if (err != 0) {
return err;
}
err = nvgpu_mutex_init(&mclk->data_lock);
if (err) {
if (err != 0) {
goto fail_mclk_mutex;
}
@@ -3400,7 +3400,7 @@ int gp106_mclk_change(struct gk20a *g, u16 val)
PMU_COMMAND_QUEUE_LPQ,
mclk_seq_pmucmdhandler,
&seq_running, &seqdesc, ~0UL);
if (status) {
if (status != 0) {
nvgpu_err(g, "unable to post seq script exec cmd for unit %x",
cmd.hdr.unit_id);
goto exit_status;

View File

@@ -48,7 +48,7 @@ int gp10b_fecs_trace_flush(struct gk20a *g)
err = gr_gk20a_elpg_protected_call(g,
gr_gk20a_submit_fecs_method_op(g, op, false));
if (err)
if (err != 0)
nvgpu_err(g, "write timestamp record failed");
return err;

View File

@@ -896,7 +896,7 @@ int gr_gp10b_init_ctx_state(struct gk20a *g)
nvgpu_log_fn(g, " ");
err = gr_gk20a_init_ctx_state(g);
if (err) {
if (err != 0) {
return err;
}
@@ -905,7 +905,7 @@ int gr_gp10b_init_ctx_state(struct gk20a *g)
gr_fecs_method_push_adr_discover_preemption_image_size_v();
op.mailbox.ret = &g->gr.ctx_vars.preempt_image_size;
err = gr_gk20a_submit_fecs_method_op(g, op, false);
if (err) {
if (err != 0) {
nvgpu_err(g, "query preempt image size failed");
return err;
}
@@ -928,7 +928,7 @@ int gr_gp10b_alloc_buffer(struct vm_gk20a *vm, size_t size,
nvgpu_log_fn(g, " ");
err = nvgpu_dma_alloc_sys(vm->mm->g, size, mem);
if (err) {
if (err != 0) {
return err;
}
@@ -1016,7 +1016,7 @@ int gr_gp10b_set_ctxsw_preemption_mode(struct gk20a *g,
err = gr_gp10b_alloc_buffer(vm,
g->gr.ctx_vars.preempt_image_size,
&gr_ctx->preempt_ctxsw_buffer);
if (err) {
if (err != 0) {
nvgpu_err(g, "cannot allocate preempt buffer");
goto fail;
}
@@ -1024,7 +1024,7 @@ int gr_gp10b_set_ctxsw_preemption_mode(struct gk20a *g,
err = gr_gp10b_alloc_buffer(vm,
spill_size,
&gr_ctx->spill_ctxsw_buffer);
if (err) {
if (err != 0) {
nvgpu_err(g, "cannot allocate spill buffer");
goto fail_free_preempt;
}
@@ -1032,7 +1032,7 @@ int gr_gp10b_set_ctxsw_preemption_mode(struct gk20a *g,
err = gr_gp10b_alloc_buffer(vm,
attrib_cb_size,
&gr_ctx->betacb_ctxsw_buffer);
if (err) {
if (err != 0) {
nvgpu_err(g, "cannot allocate beta buffer");
goto fail_free_spill;
}
@@ -1040,7 +1040,7 @@ int gr_gp10b_set_ctxsw_preemption_mode(struct gk20a *g,
err = gr_gp10b_alloc_buffer(vm,
pagepool_size,
&gr_ctx->pagepool_ctxsw_buffer);
if (err) {
if (err != 0) {
nvgpu_err(g, "cannot allocate page pool");
goto fail_free_betacb;
}
@@ -1094,7 +1094,7 @@ int gr_gp10b_alloc_gr_ctx(struct gk20a *g,
nvgpu_log_fn(g, " ");
err = gr_gk20a_alloc_gr_ctx(g, gr_ctx, vm, class, flags);
if (err) {
if (err != 0) {
return err;
}
@@ -1111,7 +1111,7 @@ int gr_gp10b_alloc_gr_ctx(struct gk20a *g,
if (g->ops.gr.set_ctxsw_preemption_mode) {
err = g->ops.gr.set_ctxsw_preemption_mode(g, gr_ctx, vm,
class, graphics_preempt_mode, compute_preempt_mode);
if (err) {
if (err != 0) {
nvgpu_err(g, "set_ctxsw_preemption_mode failed");
goto fail_free_gk20a_ctx;
}
@@ -1238,7 +1238,7 @@ void gr_gp10b_update_ctxsw_preemption_mode(struct gk20a *g,
}
err = gr_gk20a_ctx_patch_write_begin(g, gr_ctx, true);
if (err) {
if (err != 0) {
nvgpu_err(g, "can't map patch context");
goto out;
}
@@ -1692,14 +1692,14 @@ static int gr_gp10b_disable_channel_or_tsg(struct gk20a *g, struct channel_gk20a
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, " ");
ret = gk20a_disable_channel_tsg(g, fault_ch);
if (ret) {
if (ret != 0) {
nvgpu_err(g,
"CILP: failed to disable channel/TSG!");
return ret;
}
ret = g->ops.fifo.update_runlist(g, fault_ch->runlist_id, ~0, true, false);
if (ret) {
if (ret != 0) {
nvgpu_err(g,
"CILP: failed to restart runlist 0!");
return ret;
@@ -1751,7 +1751,7 @@ int gr_gp10b_set_cilp_preempt_pending(struct gk20a *g,
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr,
"CILP: looking up ctx id");
ret = gr_gk20a_get_ctx_id(g, fault_ch, &gr_ctx->ctx_id);
if (ret) {
if (ret != 0) {
nvgpu_err(g, "CILP: error looking up ctx id!");
return ret;
}
@@ -1775,7 +1775,7 @@ int gr_gp10b_set_cilp_preempt_pending(struct gk20a *g,
.cond.ok = GR_IS_UCODE_OP_EQUAL,
.cond.fail = GR_IS_UCODE_OP_SKIP});
if (ret) {
if (ret != 0) {
nvgpu_err(g, "CILP: failed to enable ctxsw interrupt!");
return ret;
}
@@ -1788,7 +1788,7 @@ int gr_gp10b_set_cilp_preempt_pending(struct gk20a *g,
fault_ch->chid);
ret = gr_gp10b_disable_channel_or_tsg(g, fault_ch);
if (ret) {
if (ret != 0) {
nvgpu_err(g, "CILP: failed to disable channel!!");
return ret;
}
@@ -1918,7 +1918,7 @@ int gr_gp10b_pre_process_sm_exception(struct gk20a *g,
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: Setting CILP preempt pending\n");
ret = gr_gp10b_set_cilp_preempt_pending(g, fault_ch);
if (ret) {
if (ret != 0) {
nvgpu_err(g, "CILP: error while setting CILP preempt pending!");
return ret;
}
@@ -2005,7 +2005,7 @@ int gr_gp10b_handle_fecs_error(struct gk20a *g,
gr_fecs_host_int_clear_ctxsw_intr1_clear_f());
ret = gr_gp10b_get_cilp_preempt_pending_chid(g, &chid);
if (ret) {
if (ret != 0) {
goto clean_up;
}
@@ -2018,7 +2018,7 @@ int gr_gp10b_handle_fecs_error(struct gk20a *g,
/* set preempt_pending to false */
ret = gr_gp10b_clear_cilp_preempt_pending(g, ch);
if (ret) {
if (ret != 0) {
nvgpu_err(g, "CILP: error while unsetting CILP preempt pending!");
gk20a_channel_put(ch);
goto clean_up;
@@ -2093,7 +2093,7 @@ bool gr_gp10b_suspend_context(struct channel_gk20a *ch,
if (gr_ctx->compute_preempt_mode == NVGPU_PREEMPTION_MODE_COMPUTE_CILP) {
err = gr_gp10b_set_cilp_preempt_pending(g, ch);
if (err) {
if (err != 0) {
nvgpu_err(g, "unable to set CILP preempt pending");
} else {
*cilp_preempt_pending = true;
@@ -2126,7 +2126,7 @@ int gr_gp10b_suspend_contexts(struct gk20a *g,
nvgpu_mutex_acquire(&g->dbg_sessions_lock);
err = gr_gk20a_disable_ctxsw(g);
if (err) {
if (err != 0) {
nvgpu_err(g, "unable to stop gr ctxsw");
nvgpu_mutex_release(&g->dbg_sessions_lock);
goto clean_up;
@@ -2151,7 +2151,7 @@ int gr_gp10b_suspend_contexts(struct gk20a *g,
nvgpu_mutex_release(&dbg_s->ch_list_lock);
err = gr_gk20a_enable_ctxsw(g);
if (err) {
if (err != 0) {
nvgpu_mutex_release(&g->dbg_sessions_lock);
goto clean_up;
}
@@ -2217,12 +2217,12 @@ int gr_gp10b_set_boosted_ctx(struct channel_gk20a *ch,
mem = &gr_ctx->mem;
err = gk20a_disable_channel_tsg(g, ch);
if (err) {
if (err != 0) {
return err;
}
err = gk20a_fifo_preempt(g, ch);
if (err) {
if (err != 0) {
goto enable_ch;
}
@@ -2299,19 +2299,19 @@ int gr_gp10b_set_preemption_mode(struct channel_gk20a *ch,
compute_preempt_mode);
err = g->ops.gr.set_ctxsw_preemption_mode(g, gr_ctx, vm, class,
graphics_preempt_mode, compute_preempt_mode);
if (err) {
if (err != 0) {
nvgpu_err(g, "set_ctxsw_preemption_mode failed");
return err;
}
}
err = gk20a_disable_channel_tsg(g, ch);
if (err) {
if (err != 0) {
return err;
}
err = gk20a_fifo_preempt(g, ch);
if (err) {
if (err != 0) {
goto enable_ch;
}
@@ -2320,7 +2320,7 @@ int gr_gp10b_set_preemption_mode(struct channel_gk20a *ch,
ch, mem);
err = gr_gk20a_ctx_patch_write_begin(g, gr_ctx, true);
if (err) {
if (err != 0) {
nvgpu_err(g, "can't map patch context");
goto enable_ch;
}

View File

@@ -65,7 +65,7 @@ int gp10b_init_bar2_vm(struct gk20a *g)
/* allocate instance mem for bar2 */
err = g->ops.mm.alloc_inst_block(g, inst_block);
if (err) {
if (err != 0) {
goto clean_up_va;
}

View File

@@ -242,7 +242,7 @@ int gr_gv100_init_sm_id_table(struct gk20a *g)
err = gr_gv100_scg_estimate_perf(g,
gpc_tpc_mask, gpc, tpc, &perf);
if (err) {
if (err != 0) {
nvgpu_err(g,
"Error while estimating perf");
goto exit_build_table;

View File

@@ -278,7 +278,7 @@ int gv100_init_gpu_characteristics(struct gk20a *g)
int err;
err = gk20a_init_gpu_characteristics(g);
if (err)
if (err != 0)
return err;
__nvgpu_set_enabled(g, NVGPU_SUPPORT_TSG_SUBCONTEXTS, true);

View File

@@ -219,7 +219,7 @@ static const char *__gv100_device_type_to_str(u32 type)
* Function prototypes
*/
static u32 __gv100_nvlink_get_link_reset_mask(struct gk20a *g);
static u32 gv100_nvlink_rxcal_en(struct gk20a *g, unsigned long mask);
static int gv100_nvlink_rxcal_en(struct gk20a *g, unsigned long mask);
/*
@@ -738,7 +738,7 @@ int gv100_nvlink_minion_send_command(struct gk20a *g, u32 link_id,
/* Check last command succeded */
err = gv100_nvlink_minion_command_complete(g, link_id);
if (err)
if (err != 0)
return -EINVAL;
nvgpu_log(g, gpu_dbg_nvlink,
@@ -763,10 +763,10 @@ int gv100_nvlink_minion_send_command(struct gk20a *g, u32 link_id,
/*
* Init UPHY
*/
static u32 gv100_nvlink_minion_init_uphy(struct gk20a *g, unsigned long mask,
static int gv100_nvlink_minion_init_uphy(struct gk20a *g, unsigned long mask,
bool sync)
{
u32 err = 0;
int err = 0;
u32 init_pll_cmd;
u32 link_id, master_pll, slave_pll;
u32 master_state, slave_state;
@@ -813,7 +813,7 @@ static u32 gv100_nvlink_minion_init_uphy(struct gk20a *g, unsigned long mask,
if (!(BIT(master_pll) & g->nvlink.init_pll_done)) {
err = gv100_nvlink_minion_send_command(g, master_pll,
init_pll_cmd, 0, sync);
if (err) {
if (err != 0) {
nvgpu_err(g, " Error sending INITPLL to minion");
return err;
}
@@ -823,7 +823,7 @@ static u32 gv100_nvlink_minion_init_uphy(struct gk20a *g, unsigned long mask,
}
err = g->ops.nvlink.setup_pll(g, mask);
if (err) {
if (err != 0) {
nvgpu_err(g, "Error setting up PLL");
return err;
}
@@ -832,7 +832,7 @@ static u32 gv100_nvlink_minion_init_uphy(struct gk20a *g, unsigned long mask,
for_each_set_bit(link_id, &mask, 32) {
err = gv100_nvlink_minion_send_command(g, link_id,
minion_nvlink_dl_cmd_command_initphy_v(), 0, sync);
if (err) {
if (err != 0) {
nvgpu_err(g, "Error on INITPHY minion DL command %u",
link_id);
return err;
@@ -845,10 +845,10 @@ static u32 gv100_nvlink_minion_init_uphy(struct gk20a *g, unsigned long mask,
/*
* Configure AC coupling
*/
static u32 gv100_nvlink_minion_configure_ac_coupling(struct gk20a *g,
static int gv100_nvlink_minion_configure_ac_coupling(struct gk20a *g,
unsigned long mask, bool sync)
{
u32 err = 0;
int err = 0;
u32 i;
u32 temp;
@@ -863,7 +863,7 @@ static u32 gv100_nvlink_minion_configure_ac_coupling(struct gk20a *g,
err = gv100_nvlink_minion_send_command(g, i,
minion_nvlink_dl_cmd_command_setacmode_v(), 0, sync);
if (err)
if (err != 0)
return err;
}
@@ -883,7 +883,7 @@ int gv100_nvlink_minion_data_ready_en(struct gk20a *g,
ret = gv100_nvlink_minion_send_command(g, link_id,
minion_nvlink_dl_cmd_command_initlaneenable_v(), 0,
sync);
if (ret) {
if (ret != 0) {
nvgpu_err(g, "Failed initlaneenable on link %u",
link_id);
return ret;
@@ -893,7 +893,7 @@ int gv100_nvlink_minion_data_ready_en(struct gk20a *g,
for_each_set_bit(link_id, &link_mask, 32) {
ret = gv100_nvlink_minion_send_command(g, link_id,
minion_nvlink_dl_cmd_command_initdlpl_v(), 0, sync);
if (ret) {
if (ret != 0) {
nvgpu_err(g, "Failed initdlpl on link %u", link_id);
return ret;
}
@@ -904,15 +904,15 @@ int gv100_nvlink_minion_data_ready_en(struct gk20a *g,
/*
* Request that minion disable the lane
*/
static u32 gv100_nvlink_minion_lane_disable(struct gk20a *g, u32 link_id,
static int gv100_nvlink_minion_lane_disable(struct gk20a *g, u32 link_id,
bool sync)
{
u32 err = 0;
int err = 0;
err = gv100_nvlink_minion_send_command(g, link_id,
minion_nvlink_dl_cmd_command_lanedisable_v(), 0, sync);
if (err)
if (err != 0)
nvgpu_err(g, " failed to disable lane on %d", link_id);
return err;
@@ -921,15 +921,15 @@ static u32 gv100_nvlink_minion_lane_disable(struct gk20a *g, u32 link_id,
/*
* Request that minion shutdown the lane
*/
static u32 gv100_nvlink_minion_lane_shutdown(struct gk20a *g, u32 link_id,
static int gv100_nvlink_minion_lane_shutdown(struct gk20a *g, u32 link_id,
bool sync)
{
u32 err = 0;
int err = 0;
err = gv100_nvlink_minion_send_command(g, link_id,
minion_nvlink_dl_cmd_command_laneshutdown_v(), 0, sync);
if (err)
if (err != 0)
nvgpu_err(g, " failed to shutdown lane on %d", link_id);
return err;
@@ -1110,7 +1110,7 @@ static void gv100_nvlink_dlpl_isr(struct gk20a *g, u32 link_id)
u32 fatal_mask = 0;
u32 intr = 0;
bool retrain = false;
u32 err;
int err;
intr = DLPL_REG_RD32(g, link_id, nvl_intr_r()) &
DLPL_REG_RD32(g, link_id, nvl_intr_stall_en_r());
@@ -1135,7 +1135,7 @@ static void gv100_nvlink_dlpl_isr(struct gk20a *g, u32 link_id)
if (retrain) {
err = nvgpu_nvlink_train(g, link_id, false);
if (err)
if (err != 0)
nvgpu_err(g, "failed to retrain link %d", link_id);
}
@@ -1539,7 +1539,7 @@ static int gv100_nvlink_enable_links_pre_top(struct gk20a *g, u32 links)
u32 tmp;
u32 reg;
u32 delay = ioctrl_reset_sw_post_reset_delay_microseconds_v();
u32 err;
int err;
nvgpu_log(g, gpu_dbg_nvlink, " enabling 0x%lx links", enabled_links);
/* Take links out of reset */
@@ -1571,7 +1571,7 @@ static int gv100_nvlink_enable_links_pre_top(struct gk20a *g, u32 links)
*/
if (g->ops.nvlink.rxdet) {
err = g->ops.nvlink.rxdet(g, link_id);
if (err)
if (err != 0)
return err;
}
@@ -1583,19 +1583,19 @@ static int gv100_nvlink_enable_links_pre_top(struct gk20a *g, u32 links)
/* This should be done by the NVLINK API */
err = gv100_nvlink_minion_init_uphy(g, BIT(link_id), true);
if (err) {
if (err != 0) {
nvgpu_err(g, "Failed to init phy of link: %u", link_id);
return err;
}
err = gv100_nvlink_rxcal_en(g, BIT(link_id));
if (err) {
if (err != 0) {
nvgpu_err(g, "Failed to RXcal on link: %u", link_id);
return err;
}
err = gv100_nvlink_minion_data_ready_en(g, BIT(link_id), true);
if (err) {
if (err != 0) {
nvgpu_err(g, "Failed to set data ready link:%u",
link_id);
return err;
@@ -1669,7 +1669,7 @@ static u32 gv100_nvlink_prbs_gen_en(struct gk20a *g, unsigned long mask)
return 0;
}
static u32 gv100_nvlink_rxcal_en(struct gk20a *g, unsigned long mask)
static int gv100_nvlink_rxcal_en(struct gk20a *g, unsigned long mask)
{
u32 link_id;
struct nvgpu_timeout timeout;
@@ -1719,7 +1719,7 @@ int gv100_nvlink_init(struct gk20a *g)
return -ENODEV;
err = nvgpu_nvlink_enumerate(g);
if (err) {
if (err != 0) {
nvgpu_err(g, "failed to enumerate nvlink");
goto fail;
}
@@ -1728,7 +1728,7 @@ int gv100_nvlink_init(struct gk20a *g)
__nvgpu_set_enabled(g, NVGPU_MM_USE_PHYSICAL_SG, true);
err = g->ops.fb.enable_nvlink(g);
if (err) {
if (err != 0) {
nvgpu_err(g, "failed switch to nvlink sysmem");
goto fail;
}
@@ -2202,7 +2202,7 @@ int gv100_nvlink_link_early_init(struct gk20a *g, unsigned long mask)
int err;
err = gv100_nvlink_enable_links_pre_top(g, mask);
if (err) {
if (err != 0) {
nvgpu_err(g, "Pre topology failed for links %lx", mask);
return err;
}
@@ -2230,7 +2230,7 @@ int gv100_nvlink_interface_init(struct gk20a *g)
}
err = g->ops.fb.init_nvlink(g);
if (err) {
if (err != 0) {
nvgpu_err(g, "failed to setup nvlinks for sysmem");
return err;
}
@@ -2263,7 +2263,7 @@ int gv100_nvlink_reg_init(struct gk20a *g)
endp = link->remote_info.device_type;
err = gv100_nvlink_get_tlc_reginit(endp, &reg, &count);
if (err) {
if (err != 0) {
nvgpu_err(g, "no reginit for endp=%u", endp);
continue;
}
@@ -2329,7 +2329,7 @@ int gv100_nvlink_link_set_mode(struct gk20a *g, u32 link_id, u32 mode)
{
u32 state;
u32 reg;
u32 err = 0;
int err = 0;
nvgpu_log(g, gpu_dbg_nvlink, "link :%d, mode:%u", link_id, mode);
@@ -2468,7 +2468,7 @@ int gv100_nvlink_link_set_sublink_mode(struct gk20a *g, u32 link_id,
return -EINVAL;
err = gv100_nvlink_link_sublink_check_change(g, link_id);
if (err)
if (err != 0)
return err;
if (is_rx_sublink)
@@ -2500,7 +2500,7 @@ int gv100_nvlink_link_set_sublink_mode(struct gk20a *g, u32 link_id,
DLPL_REG_WR32(g, link_id, nvl_sublink_change_r(), reg);
err = gv100_nvlink_link_sublink_check_change(g, link_id);
if (err) {
if (err != 0) {
nvgpu_err(g, "Error in TX to HS");
return err;
}
@@ -2534,7 +2534,7 @@ int gv100_nvlink_link_set_sublink_mode(struct gk20a *g, u32 link_id,
DLPL_REG_WR32(g, link_id, nvl_sublink_change_r(), reg);
err = gv100_nvlink_link_sublink_check_change(g, link_id);
if (err) {
if (err != 0) {
nvgpu_err(g, "Error in TX to SAFE");
return err;
}
@@ -2560,7 +2560,7 @@ int gv100_nvlink_link_set_sublink_mode(struct gk20a *g, u32 link_id,
DLPL_REG_WR32(g, link_id, nvl_sublink_change_r(), reg);
err = gv100_nvlink_link_sublink_check_change(g, link_id);
if (err) {
if (err != 0) {
nvgpu_err(g, "Error in TX to OFF");
return err;
}
@@ -2591,7 +2591,7 @@ int gv100_nvlink_link_set_sublink_mode(struct gk20a *g, u32 link_id,
DLPL_REG_WR32(g, link_id, nvl_sublink_change_r(), reg);
err = gv100_nvlink_link_sublink_check_change(g, link_id);
if (err) {
if (err != 0) {
nvgpu_err(g, "Error in RX to OFF");
return err;
}
@@ -2613,7 +2613,7 @@ int gv100_nvlink_link_set_sublink_mode(struct gk20a *g, u32 link_id,
nvgpu_err(g, "MODE %u", mode);
}
if (err)
if (err != 0)
nvgpu_err(g, " failed on set_sublink_mode");
return err;
}
@@ -2695,13 +2695,13 @@ int gv100_nvlink_early_init(struct gk20a *g)
return -EINVAL;
err = nvgpu_bios_get_nvlink_config_data(g);
if (err) {
if (err != 0) {
nvgpu_err(g, "failed to read nvlink vbios data");
goto nvlink_init_exit;
}
err = g->ops.nvlink.discover_ioctrl(g);
if (err)
if (err != 0)
goto nvlink_init_exit;
/* Enable NVLINK in MC */
@@ -2711,7 +2711,7 @@ int gv100_nvlink_early_init(struct gk20a *g)
g->ops.mc.reset(g, mc_reset_nvlink_mask);
err = g->ops.nvlink.discover_link(g);
if (err || g->nvlink.discovered_links == 0) {
if ((err != 0) || (g->nvlink.discovered_links == 0)) {
nvgpu_err(g, "No links available");
goto nvlink_init_exit;
}
@@ -2757,13 +2757,13 @@ int gv100_nvlink_early_init(struct gk20a *g)
g->nvlink.speed = nvgpu_nvlink_speed_20G;
err = __gv100_nvlink_state_load_hal(g);
if (err) {
if (err != 0) {
nvgpu_err(g, " failed Nvlink state load");
goto nvlink_init_exit;
}
err = gv100_nvlink_minion_configure_ac_coupling(g,
g->nvlink.ac_coupling_mask, true);
if (err) {
if (err != 0) {
nvgpu_err(g, " failed Nvlink state load");
goto nvlink_init_exit;
}

View File

@@ -37,7 +37,7 @@ int gv100_pmu_init_acr(struct gk20a *g)
rpc.wpr_regionId = 0x1;
rpc.wpr_offset = 0x0;
PMU_RPC_EXECUTE(status, pmu, ACR, INIT_WPR_REGION, &rpc, 0);
if (status) {
if (status != 0) {
nvgpu_err(g, "Failed to execute RPC status=0x%x",
status);
}
@@ -80,7 +80,7 @@ int gv100_load_falcon_ucode(struct gk20a *g, u32 falconidmask)
rpc.wpr_base_virtual.lo = 0;
rpc.wpr_base_virtual.hi = 0;
PMU_RPC_EXECUTE(status, pmu, ACR, BOOTSTRAP_GR_FALCONS, &rpc, 0);
if (status) {
if (status != 0) {
nvgpu_err(g, "Failed to execute RPC, status=0x%x", status);
goto exit;
}

View File

@@ -484,7 +484,7 @@ static int gv11b_fifo_poll_pbdma_chan_status(struct gk20a *g, u32 id,
delay << 1, GR_IDLE_CHECK_MAX);
} while (!nvgpu_timeout_expired(&timeout));
if (ret) {
if (ret != 0) {
nvgpu_err(g, "preempt timeout pbdma: %u pbdma_stat: %u "
"tsgid: %u", pbdma_id, pbdma_stat, id);
}
@@ -609,7 +609,7 @@ static int gv11b_fifo_poll_eng_ctx_status(struct gk20a *g, u32 id,
delay << 1, GR_IDLE_CHECK_MAX);
} while (!nvgpu_timeout_expired(&timeout));
if (ret) {
if (ret != 0) {
/*
* The reasons a preempt can fail are:
* 1.Some other stalling interrupt is asserted preventing
@@ -841,7 +841,7 @@ int gv11b_fifo_enable_tsg(struct tsg_gk20a *tsg)
int gv11b_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
{
struct fifo_gk20a *f = &g->fifo;
u32 ret = 0;
int ret = 0;
u32 token = PMU_INVALID_MUTEX_OWNER_ID;
u32 mutex_ret = 0;
u32 runlist_id;
@@ -875,7 +875,7 @@ int gv11b_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
nvgpu_mutex_release(&f->runlist_info[runlist_id].runlist_lock);
if (ret) {
if (ret != 0) {
if (nvgpu_platform_is_silicon(g)) {
nvgpu_err(g, "preempt timed out for tsgid: %u, "
"ctxsw timeout will trigger recovery if needed", tsgid);
@@ -970,7 +970,7 @@ static void gv11b_fifo_locked_abort_runlist_active_tsgs(struct gk20a *g,
/* (chid == ~0 && !add) remove all act ch from runlist*/
err = gk20a_fifo_update_runlist_locked(g, rlid,
FIFO_INVAL_CHANNEL_ID, add, wait_for_finish);
if (err) {
if (err != 0) {
nvgpu_err(g, "runlist id %d is not cleaned up",
rlid);
}
@@ -1763,11 +1763,11 @@ void gv11b_fifo_init_eng_method_buffers(struct gk20a *g,
for (runque = 0; runque < num_pbdma; runque++) {
err = nvgpu_dma_alloc_map_sys(vm, method_buffer_size,
&tsg->eng_method_buffers[runque]);
if (err) {
if (err != 0) {
break;
}
}
if (err) {
if (err != 0) {
for (i = (runque - 1); i >= 0; i--) {
nvgpu_dma_unmap_free(vm,
&tsg->eng_method_buffers[i]);
@@ -1887,7 +1887,7 @@ int gv11b_fifo_alloc_syncpt_buf(struct channel_gk20a *c,
nvgpu_mutex_acquire(&c->vm->syncpt_ro_map_lock);
err = set_syncpt_ro_map_gpu_va_locked(c->vm);
nvgpu_mutex_release(&c->vm->syncpt_ro_map_lock);
if (err)
if (err != 0)
return err;
nr_pages = DIV_ROUND_UP(g->syncpt_size, PAGE_SIZE);
@@ -1923,7 +1923,7 @@ int gv11b_fifo_get_sync_ro_map(struct vm_gk20a *vm,
nvgpu_mutex_acquire(&vm->syncpt_ro_map_lock);
err = set_syncpt_ro_map_gpu_va_locked(vm);
nvgpu_mutex_release(&vm->syncpt_ro_map_lock);
if (err)
if (err != 0)
return err;
*base_gpuva = vm->syncpt_ro_map_gpu_va;

View File

@@ -1148,7 +1148,7 @@ int gr_gv11b_load_stencil_tbl(struct gk20a *g, struct gr_gk20a *gr)
zbc_val.format = s_tbl->format;
ret = g->ops.gr.add_zbc_s(g, gr, &zbc_val, i);
if (ret) {
if (ret != 0) {
return ret;
}
}
@@ -1501,7 +1501,7 @@ int gr_gv11b_alloc_buffer(struct vm_gk20a *vm, size_t size,
nvgpu_log_fn(g, " ");
err = nvgpu_dma_alloc_sys(vm->mm->g, size, mem);
if (err) {
if (err != 0) {
return err;
}
@@ -1589,7 +1589,7 @@ int gr_gv11b_set_ctxsw_preemption_mode(struct gk20a *g,
err = gr_gp10b_alloc_buffer(vm,
g->gr.ctx_vars.preempt_image_size,
&gr_ctx->preempt_ctxsw_buffer);
if (err) {
if (err != 0) {
nvgpu_err(g, "cannot allocate preempt buffer");
goto fail;
}
@@ -1597,7 +1597,7 @@ int gr_gv11b_set_ctxsw_preemption_mode(struct gk20a *g,
err = gr_gp10b_alloc_buffer(vm,
spill_size,
&gr_ctx->spill_ctxsw_buffer);
if (err) {
if (err != 0) {
nvgpu_err(g, "cannot allocate spill buffer");
goto fail_free_preempt;
}
@@ -1605,7 +1605,7 @@ int gr_gv11b_set_ctxsw_preemption_mode(struct gk20a *g,
err = gr_gp10b_alloc_buffer(vm,
attrib_cb_size,
&gr_ctx->betacb_ctxsw_buffer);
if (err) {
if (err != 0) {
nvgpu_err(g, "cannot allocate beta buffer");
goto fail_free_spill;
}
@@ -1613,7 +1613,7 @@ int gr_gv11b_set_ctxsw_preemption_mode(struct gk20a *g,
err = gr_gp10b_alloc_buffer(vm,
pagepool_size,
&gr_ctx->pagepool_ctxsw_buffer);
if (err) {
if (err != 0) {
nvgpu_err(g, "cannot allocate page pool");
goto fail_free_betacb;
}
@@ -1719,7 +1719,7 @@ void gr_gv11b_update_ctxsw_preemption_mode(struct gk20a *g,
}
err = gr_gk20a_ctx_patch_write_begin(g, gr_ctx, true);
if (err) {
if (err != 0) {
nvgpu_err(g, "can't map patch context");
goto out;
}
@@ -2193,7 +2193,7 @@ static int gr_gv11b_handle_warp_esr_error_mmu_nack(struct gk20a *g,
* recovery path even if channel is invalid. We want to explicitly check
* for teardown value in mmu fault handler.
*/
if (!err) {
if (err == 0) {
gk20a_channel_put(fault_ch);
}
@@ -2369,7 +2369,7 @@ int gr_gv11b_pre_process_sm_exception(struct gk20a *g,
*/
ret = gr_gv11b_handle_all_warp_esr_errors(g, gpc, tpc, sm,
warp_esr_error, fault_ch);
if (ret) {
if (ret != 0) {
return ret;
}
@@ -2439,7 +2439,7 @@ int gr_gv11b_pre_process_sm_exception(struct gk20a *g,
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: Setting CILP preempt pending\n");
ret = gr_gp10b_set_cilp_preempt_pending(g, fault_ch);
if (ret) {
if (ret != 0) {
nvgpu_err(g, "CILP: error while setting CILP preempt pending!");
return ret;
}
@@ -2699,7 +2699,7 @@ int gr_gv11b_init_sw_veid_bundle(struct gk20a *g)
&g->gr.ctx_vars.sw_veid_bundle_init;
u32 i;
u32 last_bundle_data = 0;
u32 err = 0;
int err = 0;
for (i = 0; i < sw_veid_bundle_init->count; i++) {
nvgpu_log_fn(g, "veid bundle count: %d", i);
@@ -2718,14 +2718,14 @@ int gr_gv11b_init_sw_veid_bundle(struct gk20a *g)
nvgpu_log_fn(g, "go idle bundle");
gk20a_writel(g, gr_pipe_bundle_address_r(),
sw_veid_bundle_init->l[i].addr);
err |= gr_gk20a_wait_idle(g,
err = gr_gk20a_wait_idle(g,
gk20a_get_gr_idle_timeout(g),
GR_IDLE_CHECK_DEFAULT);
} else {
err = gv11b_write_bundle_veid_state(g, i);
}
if (err) {
if (err != 0) {
nvgpu_err(g, "failed to init sw veid bundle");
break;
}
@@ -2879,12 +2879,12 @@ int gr_gv11b_commit_inst(struct channel_gk20a *c, u64 gpu_va)
nvgpu_log_fn(g, " ");
err = gv11b_alloc_subctx_header(c);
if (err) {
if (err != 0) {
return err;
}
err = gv11b_update_subctx_header(c, gpu_va);
if (err) {
if (err != 0) {
return err;
}
@@ -3081,7 +3081,7 @@ int gr_gv11b_init_fs_state(struct gk20a *g)
}
err = gr_gk20a_init_fs_state(g);
if (err) {
if (err != 0) {
return err;
}
@@ -3268,7 +3268,7 @@ int gv11b_gr_set_sm_debug_mode(struct gk20a *g,
}
err = gr_gk20a_exec_ctx_ops(ch, ops, i, i, 0, NULL);
if (err) {
if (err != 0) {
nvgpu_err(g, "Failed to access register\n");
}
nvgpu_kfree(g, ops);
@@ -3416,7 +3416,7 @@ void gv11b_gr_suspend_single_sm(struct gk20a *g,
err = g->ops.gr.wait_for_sm_lock_down(g, gpc, tpc, sm,
global_esr_mask, check_errors);
if (err) {
if (err != 0) {
nvgpu_err(g,
"SuspendSm failed");
return;
@@ -3458,7 +3458,7 @@ void gv11b_gr_suspend_all_sms(struct gk20a *g,
err = g->ops.gr.wait_for_sm_lock_down(g,
gpc, tpc, sm,
global_esr_mask, check_errors);
if (err) {
if (err != 0) {
nvgpu_err(g,
"SuspendAllSms failed");
return;
@@ -4852,7 +4852,7 @@ int gr_gv11b_create_priv_addr_table(struct gk20a *g,
&gpc_num, &tpc_num, &ppc_num, &be_num,
&broadcast_flags);
nvgpu_log(g, gpu_dbg_gpu_dbg, "addr_type = %d", addr_type);
if (err) {
if (err != 0) {
return err;
}
@@ -4896,7 +4896,7 @@ int gr_gv11b_create_priv_addr_table(struct gk20a *g,
else if (broadcast_flags & PRI_BROADCAST_FLAGS_PPC) {
err = gr_gk20a_split_ppc_broadcast_addr(g,
addr, gpc_num, priv_addr_table, &t);
if (err) {
if (err != 0) {
return err;
}
} else {

View File

@@ -130,7 +130,7 @@ static void gv11b_mm_mmu_hw_fault_buf_init(struct gk20a *g)
err = nvgpu_dma_alloc_map_sys(vm, fb_size,
&g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_OTHER_AND_NONREPLAY]);
if (err) {
if (err != 0) {
nvgpu_err(g,
"Error in hw mmu fault buf [0] alloc in bar2 vm ");
/* Fault will be snapped in pri reg but not in buffer */
@@ -142,7 +142,7 @@ static void gv11b_mm_mmu_hw_fault_buf_init(struct gk20a *g)
&g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_REPLAY])) {
err = nvgpu_dma_alloc_map_sys(vm, fb_size,
&g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_REPLAY]);
if (err) {
if (err != 0) {
nvgpu_err(g,
"Error in hw mmu fault buf [1] alloc in bar2 vm ");
/* Fault will be snapped in pri reg but not in buffer */
@@ -178,7 +178,7 @@ static int gv11b_mm_mmu_fault_setup_sw(struct gk20a *g)
err = gv11b_mm_mmu_fault_info_buf_init(g);
if (!err) {
if (err == 0) {
gv11b_mm_mmu_hw_fault_buf_init(g);
}
@@ -194,7 +194,7 @@ int gv11b_init_mm_setup_hw(struct gk20a *g)
err = gk20a_init_mm_setup_hw(g);
err = gv11b_mm_mmu_fault_setup_sw(g);
if (!err) {
if (err == 0) {
gv11b_mm_mmu_fault_setup_hw(g);
}

View File

@@ -67,7 +67,7 @@ int gv11b_alloc_subctx_header(struct channel_gk20a *c)
if (!nvgpu_mem_is_valid(ctxheader)) {
ret = nvgpu_dma_alloc_sys(g, ctxsw_prog_fecs_header_v(),
ctxheader);
if (ret) {
if (ret != 0) {
nvgpu_err(g, "failed to allocate sub ctx header");
return ret;
}

View File

@@ -191,19 +191,19 @@ static int get_lpwr_ms_table(struct gk20a *g)
return 0;
}
u32 nvgpu_lpwr_pg_setup(struct gk20a *g)
int nvgpu_lpwr_pg_setup(struct gk20a *g)
{
u32 err = 0;
int err = 0;
nvgpu_log_fn(g, " ");
err = get_lpwr_gr_table(g);
if (err) {
if (err != 0) {
return err;
}
err = get_lpwr_ms_table(g);
if (err) {
if (err != 0) {
return err;
}
@@ -220,7 +220,7 @@ static void nvgpu_pmu_handle_param_lpwr_msg(struct gk20a *g,
nvgpu_log_fn(g, " ");
if (status != 0) {
if (status != 0U) {
nvgpu_err(g, "LWPR PARAM cmd aborted");
return;
}
@@ -423,7 +423,7 @@ int nvgpu_lpwr_disable_pg(struct gk20a *g, bool pstate_lock)
if (is_rppg_supported) {
if (g->support_pmu && g->elpg_enabled) {
status = nvgpu_pmu_disable_elpg(g);
if (status) {
if (status != 0) {
goto exit_unlock;
}
}

View File

@@ -90,7 +90,7 @@ struct obj_lwpr {
u32 mclk_change_cache;
};
u32 nvgpu_lpwr_pg_setup(struct gk20a *g);
int nvgpu_lpwr_pg_setup(struct gk20a *g);
int nvgpu_lwpr_mclk_change(struct gk20a *g, u32 pstate);
int nvgpu_lpwr_enable_pg(struct gk20a *g, bool pstate_lock);
int nvgpu_lpwr_disable_pg(struct gk20a *g, bool pstate_lock);

View File

@@ -32,7 +32,7 @@ static void pmu_handle_rppg_init_msg(struct gk20a *g, struct pmu_msg *msg,
{
u32 *success = param;
if (status == 0) {
if (status == 0U) {
switch (msg->msg.pg.rppg_msg.cmn.msg_id) {
case NV_PMU_RPPG_MSG_ID_INIT_CTRL_ACK:
*success = 1;
@@ -46,11 +46,11 @@ static void pmu_handle_rppg_init_msg(struct gk20a *g, struct pmu_msg *msg,
msg->msg.pg.msg_type);
}
static u32 rppg_send_cmd(struct gk20a *g, struct nv_pmu_rppg_cmd *prppg_cmd)
static int rppg_send_cmd(struct gk20a *g, struct nv_pmu_rppg_cmd *prppg_cmd)
{
struct pmu_cmd cmd;
u32 seq;
u32 status = 0;
int status = 0;
u32 success = 0;
memset(&cmd, 0, sizeof(struct pmu_cmd));
@@ -82,7 +82,7 @@ static u32 rppg_send_cmd(struct gk20a *g, struct nv_pmu_rppg_cmd *prppg_cmd)
status = nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
pmu_handle_rppg_init_msg, &success, &seq, ~0);
if (status) {
if (status != 0) {
nvgpu_err(g, "Unable to submit parameter command %d",
prppg_cmd->cmn.cmd_id);
goto exit;
@@ -102,7 +102,7 @@ exit:
return status;
}
static u32 rppg_init(struct gk20a *g)
static int rppg_init(struct gk20a *g)
{
struct nv_pmu_rppg_cmd rppg_cmd;
@@ -111,7 +111,7 @@ static u32 rppg_init(struct gk20a *g)
return rppg_send_cmd(g, &rppg_cmd);
}
static u32 rppg_ctrl_init(struct gk20a *g, u8 ctrl_id)
static int rppg_ctrl_init(struct gk20a *g, u8 ctrl_id)
{
struct nv_pmu_rppg_cmd rppg_cmd;
@@ -128,9 +128,9 @@ static u32 rppg_ctrl_init(struct gk20a *g, u8 ctrl_id)
return rppg_send_cmd(g, &rppg_cmd);
}
u32 init_rppg(struct gk20a *g)
int init_rppg(struct gk20a *g)
{
u32 status;
int status;
status = rppg_init(g);
if (status != 0) {

View File

@@ -22,5 +22,5 @@
#ifndef NVGPU_LPWR_RPPG_H
#define NVGPU_LPWR_RPPG_H
u32 init_rppg(struct gk20a *g);
int init_rppg(struct gk20a *g);
#endif /* NVGPU_LPWR_RPPG_H */

View File

@@ -50,82 +50,82 @@ int gk20a_init_pstate_support(struct gk20a *g)
nvgpu_log_fn(g, " ");
err = volt_rail_sw_setup(g);
if (err) {
if (err != 0) {
return err;
}
err = volt_dev_sw_setup(g);
if (err) {
if (err != 0) {
return err;
}
err = volt_policy_sw_setup(g);
if (err) {
if (err != 0) {
return err;
}
err = clk_vin_sw_setup(g);
if (err) {
if (err != 0) {
return err;
}
err = clk_fll_sw_setup(g);
if (err) {
if (err != 0) {
return err;
}
err = therm_domain_sw_setup(g);
if (err) {
if (err != 0) {
return err;
}
err = vfe_var_sw_setup(g);
if (err) {
if (err != 0) {
return err;
}
err = vfe_equ_sw_setup(g);
if (err) {
if (err != 0) {
return err;
}
err = clk_domain_sw_setup(g);
if (err) {
if (err != 0) {
return err;
}
err = clk_vf_point_sw_setup(g);
if (err) {
if (err != 0) {
return err;
}
err = clk_prog_sw_setup(g);
if (err) {
if (err != 0) {
return err;
}
err = pstate_sw_setup(g);
if (err) {
if (err != 0) {
return err;
}
if(g->ops.clk.support_pmgr_domain) {
err = pmgr_domain_sw_setup(g);
if (err) {
if (err != 0) {
return err;
}
}
if (g->ops.clk.support_clk_freq_controller) {
err = clk_freq_controller_sw_setup(g);
if (err) {
if (err != 0) {
return err;
}
}
if(g->ops.clk.support_lpwr_pg) {
err = nvgpu_lpwr_pg_setup(g);
if (err) {
if (err != 0) {
return err;
}
}
@@ -142,29 +142,29 @@ int gk20a_init_pstate_pmu_support(struct gk20a *g)
if (g->ops.clk.mclk_init) {
err = g->ops.clk.mclk_init(g);
if (err) {
if (err != 0U) {
nvgpu_err(g, "failed to set mclk");
/* Indicate error and continue */
}
}
err = volt_rail_pmu_setup(g);
if (err) {
if (err != 0U) {
return err;
}
err = volt_dev_pmu_setup(g);
if (err) {
if (err != 0U) {
return err;
}
err = volt_policy_pmu_setup(g);
if (err) {
if (err != 0U) {
return err;
}
err = g->ops.pmu_ver.volt.volt_send_load_cmd_to_pmu(g);
if (err) {
if (err != 0U) {
nvgpu_err(g,
"Failed to send VOLT LOAD CMD to PMU: status = 0x%08x.",
err);
@@ -172,58 +172,58 @@ int gk20a_init_pstate_pmu_support(struct gk20a *g)
}
err = therm_domain_pmu_setup(g);
if (err) {
if (err != 0U) {
return err;
}
err = vfe_var_pmu_setup(g);
if (err) {
if (err != 0U) {
return err;
}
err = vfe_equ_pmu_setup(g);
if (err) {
if (err != 0U) {
return err;
}
err = clk_domain_pmu_setup(g);
if (err) {
if (err != 0U) {
return err;
}
err = clk_prog_pmu_setup(g);
if (err) {
if (err != 0U) {
return err;
}
err = clk_vin_pmu_setup(g);
if (err) {
if (err != 0U) {
return err;
}
err = clk_fll_pmu_setup(g);
if (err) {
if (err != 0U) {
return err;
}
err = clk_vf_point_pmu_setup(g);
if (err) {
if (err != 0U) {
return err;
}
if (g->ops.clk.support_clk_freq_controller) {
err = clk_freq_controller_pmu_setup(g);
if (err) {
if (err != 0U) {
return err;
}
}
err = clk_pmu_vin_load(g);
if (err) {
if (err != 0U) {
return err;
}
err = g->ops.clk.perf_pmu_vfe_load(g);
if (err) {
if (err != 0U) {
return err;
}
@@ -242,7 +242,7 @@ static int pstate_construct_super(struct gk20a *g, struct boardobj **ppboardobj,
int err;
err = boardobj_construct_super(g, ppboardobj, size, args);
if (err) {
if (err != 0) {
return err;
}
@@ -286,7 +286,7 @@ static int pstate_insert(struct gk20a *g, struct pstate *pstate, int index)
err = boardobjgrp_objinsert(&pstates->super.super,
(struct boardobj *)pstate, index);
if (err) {
if (err != 0) {
nvgpu_err(g,
"error adding pstate boardobj %d", index);
return err;
@@ -380,7 +380,7 @@ static int parse_pstate_table_5x(struct gk20a *g,
}
err = parse_pstate_entry_5x(g, hdr, entry, &_pstate);
if (err) {
if (err != 0) {
goto done;
}
@@ -390,7 +390,7 @@ static int parse_pstate_table_5x(struct gk20a *g,
}
err = pstate_insert(g, pstate, i);
if (err) {
if (err != 0) {
goto done;
}
}
@@ -409,12 +409,12 @@ static int pstate_sw_setup(struct gk20a *g)
nvgpu_cond_init(&g->perf_pmu.pstatesobjs.pstate_notifier_wq);
err = nvgpu_mutex_init(&g->perf_pmu.pstatesobjs.pstate_mutex);
if (err) {
if (err != 0) {
return err;
}
err = boardobjgrpconstruct_e32(g, &g->perf_pmu.pstatesobjs.super);
if (err) {
if (err != 0) {
nvgpu_err(g,
"error creating boardobjgrp for pstates, err=%d",
err);
@@ -440,7 +440,7 @@ static int pstate_sw_setup(struct gk20a *g)
err = parse_pstate_table_5x(g, hdr);
done:
if (err) {
if (err != 0) {
nvgpu_mutex_destroy(&g->perf_pmu.pstatesobjs.pstate_mutex);
}
return err;

View File

@@ -217,7 +217,7 @@ int tu104_init_pdb_cache_war(struct gk20a *g)
* PDB bound to 257th instance block
*/
err = nvgpu_dma_alloc_sys(g, size, &g->pdb_cache_war_mem);
if (err) {
if (err != 0) {
return err;
}

View File

@@ -136,12 +136,12 @@ int gr_tu104_alloc_global_ctx_buffers(struct gk20a *g)
err = gk20a_gr_alloc_ctx_buffer(g,
&gr->global_ctx_buffer[RTV_CIRCULAR_BUFFER],
rtv_circular_buffer_size);
if (err) {
if (err != 0) {
return err;
}
err = gr_gk20a_alloc_global_ctx_buffers(g);
if (err) {
if (err != 0) {
goto clean_up;
}
@@ -192,7 +192,7 @@ int gr_tu104_map_global_ctx_buffers(struct gk20a *g,
g_bfr_index[RTV_CIRCULAR_BUFFER_VA] = RTV_CIRCULAR_BUFFER;
err = gr_gk20a_map_global_ctx_buffers(g, ch);
if (err) {
if (err != 0) {
goto clean_up;
}
@@ -229,7 +229,7 @@ int gr_tu104_commit_global_ctx_buffers(struct gk20a *g,
u32 size;
err = gr_gk20a_commit_global_ctx_buffers(g, ch, patch);
if (err) {
if (err != 0) {
return err;
}
@@ -242,7 +242,7 @@ int gr_tu104_commit_global_ctx_buffers(struct gk20a *g,
if (patch) {
int err;
err = gr_gk20a_ctx_patch_write_begin(g, gr_ctx, false);
if (err) {
if (err != 0) {
return err;
}
}

View File

@@ -291,7 +291,7 @@ static int tu104_init_gpu_characteristics(struct gk20a *g)
int err;
err = gk20a_init_gpu_characteristics(g);
if (err) {
if (err != 0) {
return err;
}

View File

@@ -38,13 +38,13 @@
int tu104_nvlink_rxdet(struct gk20a *g, u32 link_id)
{
u32 ret = 0;
int ret = 0;
u32 reg;
struct nvgpu_timeout timeout;
ret = gv100_nvlink_minion_send_command(g, link_id,
0x00000005U, 0, true);
if (ret) {
if (ret != 0) {
nvgpu_err(g, "Error during INITRXTERM minion DLCMD on link %u",
link_id);
return ret;
@@ -52,7 +52,7 @@ int tu104_nvlink_rxdet(struct gk20a *g, u32 link_id)
ret = gv100_nvlink_minion_send_command(g, link_id,
minion_nvlink_dl_cmd_command_turing_rxdet_v(), 0, true);
if (ret) {
if (ret != 0) {
nvgpu_err(g, "Error during RXDET minion DLCMD on link %u",
link_id);
return ret;
@@ -98,7 +98,7 @@ int tu104_nvlink_setup_pll(struct gk20a *g, unsigned long link_mask)
ret = gv100_nvlink_minion_send_command(g, link_id,
minion_nvlink_dl_cmd_command_txclkswitch_pll_v(),
0, true);
if (ret) {
if (ret != 0) {
nvgpu_err(g, "Error: TXCLKSWITCH_PLL dlcmd on link %u",
link_id);
return ret;
@@ -106,7 +106,7 @@ int tu104_nvlink_setup_pll(struct gk20a *g, unsigned long link_mask)
ret = nvgpu_timeout_init(g, &timeout,
NV_NVLINK_REG_POLL_TIMEOUT_MS, NVGPU_TIMER_CPU_TIMER);
if (ret) {
if (ret != 0) {
nvgpu_err(g, "Error during timeout init");
return ret;
}
@@ -210,7 +210,7 @@ int tu104_nvlink_minion_data_ready_en(struct gk20a *g,
ret = gv100_nvlink_minion_send_command(g, link_id,
minion_nvlink_dl_cmd_command_initdlpl_v(), 0,
sync);
if (ret) {
if (ret != 0) {
nvgpu_err(g, "Minion initdlpl failed on link %u",
link_id);
return ret;
@@ -220,7 +220,7 @@ int tu104_nvlink_minion_data_ready_en(struct gk20a *g,
ret = gv100_nvlink_minion_send_command(g, link_id,
minion_nvlink_dl_cmd_command_turing_initdlpl_to_chipa_v(),
0, sync);
if (ret) {
if (ret != 0) {
nvgpu_err(g, "Minion initdlpl_to_chipA failed on link\
%u", link_id);
return ret;
@@ -230,7 +230,7 @@ int tu104_nvlink_minion_data_ready_en(struct gk20a *g,
ret = gv100_nvlink_minion_send_command(g, link_id,
minion_nvlink_dl_cmd_command_inittl_v(), 0,
sync);
if (ret) {
if (ret != 0) {
nvgpu_err(g, "Minion inittl failed on link %u",
link_id);
return ret;
@@ -240,7 +240,7 @@ int tu104_nvlink_minion_data_ready_en(struct gk20a *g,
ret = gv100_nvlink_minion_send_command(g, link_id,
minion_nvlink_dl_cmd_command_initlaneenable_v(), 0,
sync);
if (ret) {
if (ret != 0) {
nvgpu_err(g, "Minion initlaneenable failed on link %u",
link_id);
return ret;