diff --git a/drivers/gpu/nvgpu/gp106/acr_gp106.c b/drivers/gpu/nvgpu/gp106/acr_gp106.c index 7b0a9d005..2929f5449 100644 --- a/drivers/gpu/nvgpu/gp106/acr_gp106.c +++ b/drivers/gpu/nvgpu/gp106/acr_gp106.c @@ -103,7 +103,7 @@ int gp106_alloc_blob_space(struct gk20a *g, err = nvgpu_dma_alloc_vid_at(g, wpr_inf.size, &g->acr.wpr_dummy, wpr_inf.wpr_base); - if (err) { + if (err != 0) { return err; } @@ -150,7 +150,7 @@ int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img_v1 *p_img) g->acr.pmu_desc = pmu_desc; err = nvgpu_init_pmu_fw_support(pmu); - if (err) { + if (err != 0) { nvgpu_err(g, "failed to set function pointers"); goto release_sig; } @@ -514,7 +514,7 @@ int gp106_prepare_ucode_blob(struct gk20a *g) /*Recovery case, we do not need to form non WPR blob of ucodes*/ err = nvgpu_init_pmu_fw_support(pmu); - if (err) { + if (err != 0) { gp106_dbg_pmu(g, "failed to set function pointers\n"); return err; } @@ -531,7 +531,7 @@ int gp106_prepare_ucode_blob(struct gk20a *g) /* Discover all managed falcons*/ err = lsfm_discover_ucode_images(g, plsfm); gp106_dbg_pmu(g, " Managed Falcon cnt %d\n", plsfm->managed_flcn_cnt); - if (err) { + if (err != 0) { goto exit_err; } @@ -543,14 +543,14 @@ int gp106_prepare_ucode_blob(struct gk20a *g) (g->acr.ucode_blob.cpu_va == NULL)) { /* Generate WPR requirements*/ err = lsf_gen_wpr_requirements(g, plsfm); - if (err) { + if (err != 0) { goto exit_err; } /*Alloc memory to hold ucode blob contents*/ err = g->acr.alloc_blob_space(g, plsfm->wpr_size ,&g->acr.ucode_blob); - if (err) { + if (err != 0) { goto exit_err; } @@ -588,7 +588,7 @@ int lsfm_discover_ucode_images(struct gk20a *g, /* Obtain the PMU ucode image and add it to the list if required*/ memset(&ucode_img, 0, sizeof(ucode_img)); status = pmu_ucode_details(g, &ucode_img); - if (status) { + if (status != 0) { return status; } diff --git a/drivers/gpu/nvgpu/gp106/bios_gp106.c b/drivers/gpu/nvgpu/gp106/bios_gp106.c index 6a8400bc4..c26b42025 100644 --- a/drivers/gpu/nvgpu/gp106/bios_gp106.c +++ b/drivers/gpu/nvgpu/gp106/bios_gp106.c @@ -219,7 +219,7 @@ int gp106_bios_init(struct gk20a *g) } err = nvgpu_bios_parse_rom(g); - if (err) { + if (err != 0) { goto free_firmware; } @@ -236,7 +236,7 @@ int gp106_bios_init(struct gk20a *g) if (g->ops.bios.devinit) { err = g->ops.bios.devinit(g); - if (err) { + if (err != 0) { nvgpu_err(g, "devinit failed"); goto free_firmware; } @@ -245,7 +245,7 @@ int gp106_bios_init(struct gk20a *g) if (nvgpu_is_enabled(g, NVGPU_PMU_RUN_PREOS) && (g->ops.bios.preos != NULL)) { err = g->ops.bios.preos(g); - if (err) { + if (err != 0) { nvgpu_err(g, "pre-os failed"); goto free_firmware; } @@ -253,7 +253,7 @@ int gp106_bios_init(struct gk20a *g) if (g->ops.bios.verify_devinit) { err = g->ops.bios.verify_devinit(g); - if (err) { + if (err != 0) { nvgpu_err(g, "devinit status verification failed"); goto free_firmware; } diff --git a/drivers/gpu/nvgpu/gp106/clk_arb_gp106.c b/drivers/gpu/nvgpu/gp106/clk_arb_gp106.c index e3d3abdba..04c4b80b1 100644 --- a/drivers/gpu/nvgpu/gp106/clk_arb_gp106.c +++ b/drivers/gpu/nvgpu/gp106/clk_arb_gp106.c @@ -139,7 +139,7 @@ int gp106_init_clk_arbiter(struct gk20a *g) arb->clk_arb_events_supported = true; err = nvgpu_mutex_init(&arb->pstate_lock); - if (err) + if (err != 0) goto mutex_fail; nvgpu_spinlock_init(&arb->sessions_lock); nvgpu_spinlock_init(&arb->users_lock); diff --git a/drivers/gpu/nvgpu/gp106/clk_gp106.c b/drivers/gpu/nvgpu/gp106/clk_gp106.c index 09d8728e7..2567f6921 100644 --- a/drivers/gpu/nvgpu/gp106/clk_gp106.c +++ b/drivers/gpu/nvgpu/gp106/clk_gp106.c @@ -77,12 +77,12 @@ unsigned long gp106_clk_measure_freq(struct gk20a *g, u32 api_domain) int gp106_init_clk_support(struct gk20a *g) { struct clk_gk20a *clk = &g->clk; - u32 err = 0; + int err = 0; nvgpu_log_fn(g, " "); err = nvgpu_mutex_init(&clk->clk_mutex); - if (err) { + if (err != 0) { return err; } diff --git a/drivers/gpu/nvgpu/gp106/gr_gp106.c b/drivers/gpu/nvgpu/gp106/gr_gp106.c index 299cccd80..143bcdb7f 100644 --- a/drivers/gpu/nvgpu/gp106/gr_gp106.c +++ b/drivers/gpu/nvgpu/gp106/gr_gp106.c @@ -192,7 +192,7 @@ int gr_gp106_set_ctxsw_preemption_mode(struct gk20a *g, err = gr_gp10b_alloc_buffer(vm, g->gr.ctx_vars.preempt_image_size, &gr_ctx->preempt_ctxsw_buffer); - if (err) { + if (err != 0) { nvgpu_err(g, "cannot allocate preempt buffer"); goto fail; } @@ -200,7 +200,7 @@ int gr_gp106_set_ctxsw_preemption_mode(struct gk20a *g, err = gr_gp10b_alloc_buffer(vm, spill_size, &gr_ctx->spill_ctxsw_buffer); - if (err) { + if (err != 0) { nvgpu_err(g, "cannot allocate spill buffer"); goto fail_free_preempt; } @@ -208,7 +208,7 @@ int gr_gp106_set_ctxsw_preemption_mode(struct gk20a *g, err = gr_gp10b_alloc_buffer(vm, attrib_cb_size, &gr_ctx->betacb_ctxsw_buffer); - if (err) { + if (err != 0) { nvgpu_err(g, "cannot allocate beta buffer"); goto fail_free_spill; } @@ -216,7 +216,7 @@ int gr_gp106_set_ctxsw_preemption_mode(struct gk20a *g, err = gr_gp10b_alloc_buffer(vm, pagepool_size, &gr_ctx->pagepool_ctxsw_buffer); - if (err) { + if (err != 0) { nvgpu_err(g, "cannot allocate page pool"); goto fail_free_betacb; } diff --git a/drivers/gpu/nvgpu/gp106/hal_gp106.c b/drivers/gpu/nvgpu/gp106/hal_gp106.c index 7a289565b..46af94368 100644 --- a/drivers/gpu/nvgpu/gp106/hal_gp106.c +++ b/drivers/gpu/nvgpu/gp106/hal_gp106.c @@ -226,7 +226,7 @@ static int gp106_init_gpu_characteristics(struct gk20a *g) int err; err = gk20a_init_gpu_characteristics(g); - if (err) { + if (err != 0) { return err; } diff --git a/drivers/gpu/nvgpu/gp106/mclk_gp106.c b/drivers/gpu/nvgpu/gp106/mclk_gp106.c index ea3d8d9b0..704006dac 100644 --- a/drivers/gpu/nvgpu/gp106/mclk_gp106.c +++ b/drivers/gpu/nvgpu/gp106/mclk_gp106.c @@ -3222,12 +3222,12 @@ int gp106_mclk_init(struct gk20a *g) mclk = &g->clk_pmu.clk_mclk; err = nvgpu_mutex_init(&mclk->mclk_lock); - if (err) { + if (err != 0) { return err; } err = nvgpu_mutex_init(&mclk->data_lock); - if (err) { + if (err != 0) { goto fail_mclk_mutex; } @@ -3400,7 +3400,7 @@ int gp106_mclk_change(struct gk20a *g, u16 val) PMU_COMMAND_QUEUE_LPQ, mclk_seq_pmucmdhandler, &seq_running, &seqdesc, ~0UL); - if (status) { + if (status != 0) { nvgpu_err(g, "unable to post seq script exec cmd for unit %x", cmd.hdr.unit_id); goto exit_status; diff --git a/drivers/gpu/nvgpu/gp10b/fecs_trace_gp10b.c b/drivers/gpu/nvgpu/gp10b/fecs_trace_gp10b.c index 3dc672ff2..567411041 100644 --- a/drivers/gpu/nvgpu/gp10b/fecs_trace_gp10b.c +++ b/drivers/gpu/nvgpu/gp10b/fecs_trace_gp10b.c @@ -48,7 +48,7 @@ int gp10b_fecs_trace_flush(struct gk20a *g) err = gr_gk20a_elpg_protected_call(g, gr_gk20a_submit_fecs_method_op(g, op, false)); - if (err) + if (err != 0) nvgpu_err(g, "write timestamp record failed"); return err; diff --git a/drivers/gpu/nvgpu/gp10b/gr_gp10b.c b/drivers/gpu/nvgpu/gp10b/gr_gp10b.c index 913434abc..9bc439b7c 100644 --- a/drivers/gpu/nvgpu/gp10b/gr_gp10b.c +++ b/drivers/gpu/nvgpu/gp10b/gr_gp10b.c @@ -896,7 +896,7 @@ int gr_gp10b_init_ctx_state(struct gk20a *g) nvgpu_log_fn(g, " "); err = gr_gk20a_init_ctx_state(g); - if (err) { + if (err != 0) { return err; } @@ -905,7 +905,7 @@ int gr_gp10b_init_ctx_state(struct gk20a *g) gr_fecs_method_push_adr_discover_preemption_image_size_v(); op.mailbox.ret = &g->gr.ctx_vars.preempt_image_size; err = gr_gk20a_submit_fecs_method_op(g, op, false); - if (err) { + if (err != 0) { nvgpu_err(g, "query preempt image size failed"); return err; } @@ -928,7 +928,7 @@ int gr_gp10b_alloc_buffer(struct vm_gk20a *vm, size_t size, nvgpu_log_fn(g, " "); err = nvgpu_dma_alloc_sys(vm->mm->g, size, mem); - if (err) { + if (err != 0) { return err; } @@ -1016,7 +1016,7 @@ int gr_gp10b_set_ctxsw_preemption_mode(struct gk20a *g, err = gr_gp10b_alloc_buffer(vm, g->gr.ctx_vars.preempt_image_size, &gr_ctx->preempt_ctxsw_buffer); - if (err) { + if (err != 0) { nvgpu_err(g, "cannot allocate preempt buffer"); goto fail; } @@ -1024,7 +1024,7 @@ int gr_gp10b_set_ctxsw_preemption_mode(struct gk20a *g, err = gr_gp10b_alloc_buffer(vm, spill_size, &gr_ctx->spill_ctxsw_buffer); - if (err) { + if (err != 0) { nvgpu_err(g, "cannot allocate spill buffer"); goto fail_free_preempt; } @@ -1032,7 +1032,7 @@ int gr_gp10b_set_ctxsw_preemption_mode(struct gk20a *g, err = gr_gp10b_alloc_buffer(vm, attrib_cb_size, &gr_ctx->betacb_ctxsw_buffer); - if (err) { + if (err != 0) { nvgpu_err(g, "cannot allocate beta buffer"); goto fail_free_spill; } @@ -1040,7 +1040,7 @@ int gr_gp10b_set_ctxsw_preemption_mode(struct gk20a *g, err = gr_gp10b_alloc_buffer(vm, pagepool_size, &gr_ctx->pagepool_ctxsw_buffer); - if (err) { + if (err != 0) { nvgpu_err(g, "cannot allocate page pool"); goto fail_free_betacb; } @@ -1094,7 +1094,7 @@ int gr_gp10b_alloc_gr_ctx(struct gk20a *g, nvgpu_log_fn(g, " "); err = gr_gk20a_alloc_gr_ctx(g, gr_ctx, vm, class, flags); - if (err) { + if (err != 0) { return err; } @@ -1111,7 +1111,7 @@ int gr_gp10b_alloc_gr_ctx(struct gk20a *g, if (g->ops.gr.set_ctxsw_preemption_mode) { err = g->ops.gr.set_ctxsw_preemption_mode(g, gr_ctx, vm, class, graphics_preempt_mode, compute_preempt_mode); - if (err) { + if (err != 0) { nvgpu_err(g, "set_ctxsw_preemption_mode failed"); goto fail_free_gk20a_ctx; } @@ -1238,7 +1238,7 @@ void gr_gp10b_update_ctxsw_preemption_mode(struct gk20a *g, } err = gr_gk20a_ctx_patch_write_begin(g, gr_ctx, true); - if (err) { + if (err != 0) { nvgpu_err(g, "can't map patch context"); goto out; } @@ -1692,14 +1692,14 @@ static int gr_gp10b_disable_channel_or_tsg(struct gk20a *g, struct channel_gk20a nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, " "); ret = gk20a_disable_channel_tsg(g, fault_ch); - if (ret) { + if (ret != 0) { nvgpu_err(g, "CILP: failed to disable channel/TSG!"); return ret; } ret = g->ops.fifo.update_runlist(g, fault_ch->runlist_id, ~0, true, false); - if (ret) { + if (ret != 0) { nvgpu_err(g, "CILP: failed to restart runlist 0!"); return ret; @@ -1751,7 +1751,7 @@ int gr_gp10b_set_cilp_preempt_pending(struct gk20a *g, nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, "CILP: looking up ctx id"); ret = gr_gk20a_get_ctx_id(g, fault_ch, &gr_ctx->ctx_id); - if (ret) { + if (ret != 0) { nvgpu_err(g, "CILP: error looking up ctx id!"); return ret; } @@ -1775,7 +1775,7 @@ int gr_gp10b_set_cilp_preempt_pending(struct gk20a *g, .cond.ok = GR_IS_UCODE_OP_EQUAL, .cond.fail = GR_IS_UCODE_OP_SKIP}); - if (ret) { + if (ret != 0) { nvgpu_err(g, "CILP: failed to enable ctxsw interrupt!"); return ret; } @@ -1788,7 +1788,7 @@ int gr_gp10b_set_cilp_preempt_pending(struct gk20a *g, fault_ch->chid); ret = gr_gp10b_disable_channel_or_tsg(g, fault_ch); - if (ret) { + if (ret != 0) { nvgpu_err(g, "CILP: failed to disable channel!!"); return ret; } @@ -1918,7 +1918,7 @@ int gr_gp10b_pre_process_sm_exception(struct gk20a *g, nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: Setting CILP preempt pending\n"); ret = gr_gp10b_set_cilp_preempt_pending(g, fault_ch); - if (ret) { + if (ret != 0) { nvgpu_err(g, "CILP: error while setting CILP preempt pending!"); return ret; } @@ -2005,7 +2005,7 @@ int gr_gp10b_handle_fecs_error(struct gk20a *g, gr_fecs_host_int_clear_ctxsw_intr1_clear_f()); ret = gr_gp10b_get_cilp_preempt_pending_chid(g, &chid); - if (ret) { + if (ret != 0) { goto clean_up; } @@ -2018,7 +2018,7 @@ int gr_gp10b_handle_fecs_error(struct gk20a *g, /* set preempt_pending to false */ ret = gr_gp10b_clear_cilp_preempt_pending(g, ch); - if (ret) { + if (ret != 0) { nvgpu_err(g, "CILP: error while unsetting CILP preempt pending!"); gk20a_channel_put(ch); goto clean_up; @@ -2093,7 +2093,7 @@ bool gr_gp10b_suspend_context(struct channel_gk20a *ch, if (gr_ctx->compute_preempt_mode == NVGPU_PREEMPTION_MODE_COMPUTE_CILP) { err = gr_gp10b_set_cilp_preempt_pending(g, ch); - if (err) { + if (err != 0) { nvgpu_err(g, "unable to set CILP preempt pending"); } else { *cilp_preempt_pending = true; @@ -2126,7 +2126,7 @@ int gr_gp10b_suspend_contexts(struct gk20a *g, nvgpu_mutex_acquire(&g->dbg_sessions_lock); err = gr_gk20a_disable_ctxsw(g); - if (err) { + if (err != 0) { nvgpu_err(g, "unable to stop gr ctxsw"); nvgpu_mutex_release(&g->dbg_sessions_lock); goto clean_up; @@ -2151,7 +2151,7 @@ int gr_gp10b_suspend_contexts(struct gk20a *g, nvgpu_mutex_release(&dbg_s->ch_list_lock); err = gr_gk20a_enable_ctxsw(g); - if (err) { + if (err != 0) { nvgpu_mutex_release(&g->dbg_sessions_lock); goto clean_up; } @@ -2217,12 +2217,12 @@ int gr_gp10b_set_boosted_ctx(struct channel_gk20a *ch, mem = &gr_ctx->mem; err = gk20a_disable_channel_tsg(g, ch); - if (err) { + if (err != 0) { return err; } err = gk20a_fifo_preempt(g, ch); - if (err) { + if (err != 0) { goto enable_ch; } @@ -2299,19 +2299,19 @@ int gr_gp10b_set_preemption_mode(struct channel_gk20a *ch, compute_preempt_mode); err = g->ops.gr.set_ctxsw_preemption_mode(g, gr_ctx, vm, class, graphics_preempt_mode, compute_preempt_mode); - if (err) { + if (err != 0) { nvgpu_err(g, "set_ctxsw_preemption_mode failed"); return err; } } err = gk20a_disable_channel_tsg(g, ch); - if (err) { + if (err != 0) { return err; } err = gk20a_fifo_preempt(g, ch); - if (err) { + if (err != 0) { goto enable_ch; } @@ -2320,7 +2320,7 @@ int gr_gp10b_set_preemption_mode(struct channel_gk20a *ch, ch, mem); err = gr_gk20a_ctx_patch_write_begin(g, gr_ctx, true); - if (err) { + if (err != 0) { nvgpu_err(g, "can't map patch context"); goto enable_ch; } diff --git a/drivers/gpu/nvgpu/gp10b/mm_gp10b.c b/drivers/gpu/nvgpu/gp10b/mm_gp10b.c index 2c0056e1f..d3db528b2 100644 --- a/drivers/gpu/nvgpu/gp10b/mm_gp10b.c +++ b/drivers/gpu/nvgpu/gp10b/mm_gp10b.c @@ -65,7 +65,7 @@ int gp10b_init_bar2_vm(struct gk20a *g) /* allocate instance mem for bar2 */ err = g->ops.mm.alloc_inst_block(g, inst_block); - if (err) { + if (err != 0) { goto clean_up_va; } diff --git a/drivers/gpu/nvgpu/gv100/gr_gv100.c b/drivers/gpu/nvgpu/gv100/gr_gv100.c index 5e8a99dfb..3df7b580f 100644 --- a/drivers/gpu/nvgpu/gv100/gr_gv100.c +++ b/drivers/gpu/nvgpu/gv100/gr_gv100.c @@ -242,7 +242,7 @@ int gr_gv100_init_sm_id_table(struct gk20a *g) err = gr_gv100_scg_estimate_perf(g, gpc_tpc_mask, gpc, tpc, &perf); - if (err) { + if (err != 0) { nvgpu_err(g, "Error while estimating perf"); goto exit_build_table; diff --git a/drivers/gpu/nvgpu/gv100/hal_gv100.c b/drivers/gpu/nvgpu/gv100/hal_gv100.c index aa86cdfff..8b996e490 100644 --- a/drivers/gpu/nvgpu/gv100/hal_gv100.c +++ b/drivers/gpu/nvgpu/gv100/hal_gv100.c @@ -278,7 +278,7 @@ int gv100_init_gpu_characteristics(struct gk20a *g) int err; err = gk20a_init_gpu_characteristics(g); - if (err) + if (err != 0) return err; __nvgpu_set_enabled(g, NVGPU_SUPPORT_TSG_SUBCONTEXTS, true); diff --git a/drivers/gpu/nvgpu/gv100/nvlink_gv100.c b/drivers/gpu/nvgpu/gv100/nvlink_gv100.c index 822ca6b4b..4ec993c64 100644 --- a/drivers/gpu/nvgpu/gv100/nvlink_gv100.c +++ b/drivers/gpu/nvgpu/gv100/nvlink_gv100.c @@ -219,7 +219,7 @@ static const char *__gv100_device_type_to_str(u32 type) * Function prototypes */ static u32 __gv100_nvlink_get_link_reset_mask(struct gk20a *g); -static u32 gv100_nvlink_rxcal_en(struct gk20a *g, unsigned long mask); +static int gv100_nvlink_rxcal_en(struct gk20a *g, unsigned long mask); /* @@ -738,7 +738,7 @@ int gv100_nvlink_minion_send_command(struct gk20a *g, u32 link_id, /* Check last command succeded */ err = gv100_nvlink_minion_command_complete(g, link_id); - if (err) + if (err != 0) return -EINVAL; nvgpu_log(g, gpu_dbg_nvlink, @@ -763,10 +763,10 @@ int gv100_nvlink_minion_send_command(struct gk20a *g, u32 link_id, /* * Init UPHY */ -static u32 gv100_nvlink_minion_init_uphy(struct gk20a *g, unsigned long mask, +static int gv100_nvlink_minion_init_uphy(struct gk20a *g, unsigned long mask, bool sync) { - u32 err = 0; + int err = 0; u32 init_pll_cmd; u32 link_id, master_pll, slave_pll; u32 master_state, slave_state; @@ -813,7 +813,7 @@ static u32 gv100_nvlink_minion_init_uphy(struct gk20a *g, unsigned long mask, if (!(BIT(master_pll) & g->nvlink.init_pll_done)) { err = gv100_nvlink_minion_send_command(g, master_pll, init_pll_cmd, 0, sync); - if (err) { + if (err != 0) { nvgpu_err(g, " Error sending INITPLL to minion"); return err; } @@ -823,7 +823,7 @@ static u32 gv100_nvlink_minion_init_uphy(struct gk20a *g, unsigned long mask, } err = g->ops.nvlink.setup_pll(g, mask); - if (err) { + if (err != 0) { nvgpu_err(g, "Error setting up PLL"); return err; } @@ -832,7 +832,7 @@ static u32 gv100_nvlink_minion_init_uphy(struct gk20a *g, unsigned long mask, for_each_set_bit(link_id, &mask, 32) { err = gv100_nvlink_minion_send_command(g, link_id, minion_nvlink_dl_cmd_command_initphy_v(), 0, sync); - if (err) { + if (err != 0) { nvgpu_err(g, "Error on INITPHY minion DL command %u", link_id); return err; @@ -845,10 +845,10 @@ static u32 gv100_nvlink_minion_init_uphy(struct gk20a *g, unsigned long mask, /* * Configure AC coupling */ -static u32 gv100_nvlink_minion_configure_ac_coupling(struct gk20a *g, +static int gv100_nvlink_minion_configure_ac_coupling(struct gk20a *g, unsigned long mask, bool sync) { - u32 err = 0; + int err = 0; u32 i; u32 temp; @@ -863,7 +863,7 @@ static u32 gv100_nvlink_minion_configure_ac_coupling(struct gk20a *g, err = gv100_nvlink_minion_send_command(g, i, minion_nvlink_dl_cmd_command_setacmode_v(), 0, sync); - if (err) + if (err != 0) return err; } @@ -883,7 +883,7 @@ int gv100_nvlink_minion_data_ready_en(struct gk20a *g, ret = gv100_nvlink_minion_send_command(g, link_id, minion_nvlink_dl_cmd_command_initlaneenable_v(), 0, sync); - if (ret) { + if (ret != 0) { nvgpu_err(g, "Failed initlaneenable on link %u", link_id); return ret; @@ -893,7 +893,7 @@ int gv100_nvlink_minion_data_ready_en(struct gk20a *g, for_each_set_bit(link_id, &link_mask, 32) { ret = gv100_nvlink_minion_send_command(g, link_id, minion_nvlink_dl_cmd_command_initdlpl_v(), 0, sync); - if (ret) { + if (ret != 0) { nvgpu_err(g, "Failed initdlpl on link %u", link_id); return ret; } @@ -904,15 +904,15 @@ int gv100_nvlink_minion_data_ready_en(struct gk20a *g, /* * Request that minion disable the lane */ -static u32 gv100_nvlink_minion_lane_disable(struct gk20a *g, u32 link_id, +static int gv100_nvlink_minion_lane_disable(struct gk20a *g, u32 link_id, bool sync) { - u32 err = 0; + int err = 0; err = gv100_nvlink_minion_send_command(g, link_id, minion_nvlink_dl_cmd_command_lanedisable_v(), 0, sync); - if (err) + if (err != 0) nvgpu_err(g, " failed to disable lane on %d", link_id); return err; @@ -921,15 +921,15 @@ static u32 gv100_nvlink_minion_lane_disable(struct gk20a *g, u32 link_id, /* * Request that minion shutdown the lane */ -static u32 gv100_nvlink_minion_lane_shutdown(struct gk20a *g, u32 link_id, +static int gv100_nvlink_minion_lane_shutdown(struct gk20a *g, u32 link_id, bool sync) { - u32 err = 0; + int err = 0; err = gv100_nvlink_minion_send_command(g, link_id, minion_nvlink_dl_cmd_command_laneshutdown_v(), 0, sync); - if (err) + if (err != 0) nvgpu_err(g, " failed to shutdown lane on %d", link_id); return err; @@ -1110,7 +1110,7 @@ static void gv100_nvlink_dlpl_isr(struct gk20a *g, u32 link_id) u32 fatal_mask = 0; u32 intr = 0; bool retrain = false; - u32 err; + int err; intr = DLPL_REG_RD32(g, link_id, nvl_intr_r()) & DLPL_REG_RD32(g, link_id, nvl_intr_stall_en_r()); @@ -1135,7 +1135,7 @@ static void gv100_nvlink_dlpl_isr(struct gk20a *g, u32 link_id) if (retrain) { err = nvgpu_nvlink_train(g, link_id, false); - if (err) + if (err != 0) nvgpu_err(g, "failed to retrain link %d", link_id); } @@ -1539,7 +1539,7 @@ static int gv100_nvlink_enable_links_pre_top(struct gk20a *g, u32 links) u32 tmp; u32 reg; u32 delay = ioctrl_reset_sw_post_reset_delay_microseconds_v(); - u32 err; + int err; nvgpu_log(g, gpu_dbg_nvlink, " enabling 0x%lx links", enabled_links); /* Take links out of reset */ @@ -1571,7 +1571,7 @@ static int gv100_nvlink_enable_links_pre_top(struct gk20a *g, u32 links) */ if (g->ops.nvlink.rxdet) { err = g->ops.nvlink.rxdet(g, link_id); - if (err) + if (err != 0) return err; } @@ -1583,19 +1583,19 @@ static int gv100_nvlink_enable_links_pre_top(struct gk20a *g, u32 links) /* This should be done by the NVLINK API */ err = gv100_nvlink_minion_init_uphy(g, BIT(link_id), true); - if (err) { + if (err != 0) { nvgpu_err(g, "Failed to init phy of link: %u", link_id); return err; } err = gv100_nvlink_rxcal_en(g, BIT(link_id)); - if (err) { + if (err != 0) { nvgpu_err(g, "Failed to RXcal on link: %u", link_id); return err; } err = gv100_nvlink_minion_data_ready_en(g, BIT(link_id), true); - if (err) { + if (err != 0) { nvgpu_err(g, "Failed to set data ready link:%u", link_id); return err; @@ -1669,7 +1669,7 @@ static u32 gv100_nvlink_prbs_gen_en(struct gk20a *g, unsigned long mask) return 0; } -static u32 gv100_nvlink_rxcal_en(struct gk20a *g, unsigned long mask) +static int gv100_nvlink_rxcal_en(struct gk20a *g, unsigned long mask) { u32 link_id; struct nvgpu_timeout timeout; @@ -1719,7 +1719,7 @@ int gv100_nvlink_init(struct gk20a *g) return -ENODEV; err = nvgpu_nvlink_enumerate(g); - if (err) { + if (err != 0) { nvgpu_err(g, "failed to enumerate nvlink"); goto fail; } @@ -1728,7 +1728,7 @@ int gv100_nvlink_init(struct gk20a *g) __nvgpu_set_enabled(g, NVGPU_MM_USE_PHYSICAL_SG, true); err = g->ops.fb.enable_nvlink(g); - if (err) { + if (err != 0) { nvgpu_err(g, "failed switch to nvlink sysmem"); goto fail; } @@ -2202,7 +2202,7 @@ int gv100_nvlink_link_early_init(struct gk20a *g, unsigned long mask) int err; err = gv100_nvlink_enable_links_pre_top(g, mask); - if (err) { + if (err != 0) { nvgpu_err(g, "Pre topology failed for links %lx", mask); return err; } @@ -2230,7 +2230,7 @@ int gv100_nvlink_interface_init(struct gk20a *g) } err = g->ops.fb.init_nvlink(g); - if (err) { + if (err != 0) { nvgpu_err(g, "failed to setup nvlinks for sysmem"); return err; } @@ -2263,7 +2263,7 @@ int gv100_nvlink_reg_init(struct gk20a *g) endp = link->remote_info.device_type; err = gv100_nvlink_get_tlc_reginit(endp, ®, &count); - if (err) { + if (err != 0) { nvgpu_err(g, "no reginit for endp=%u", endp); continue; } @@ -2329,7 +2329,7 @@ int gv100_nvlink_link_set_mode(struct gk20a *g, u32 link_id, u32 mode) { u32 state; u32 reg; - u32 err = 0; + int err = 0; nvgpu_log(g, gpu_dbg_nvlink, "link :%d, mode:%u", link_id, mode); @@ -2468,7 +2468,7 @@ int gv100_nvlink_link_set_sublink_mode(struct gk20a *g, u32 link_id, return -EINVAL; err = gv100_nvlink_link_sublink_check_change(g, link_id); - if (err) + if (err != 0) return err; if (is_rx_sublink) @@ -2500,7 +2500,7 @@ int gv100_nvlink_link_set_sublink_mode(struct gk20a *g, u32 link_id, DLPL_REG_WR32(g, link_id, nvl_sublink_change_r(), reg); err = gv100_nvlink_link_sublink_check_change(g, link_id); - if (err) { + if (err != 0) { nvgpu_err(g, "Error in TX to HS"); return err; } @@ -2534,7 +2534,7 @@ int gv100_nvlink_link_set_sublink_mode(struct gk20a *g, u32 link_id, DLPL_REG_WR32(g, link_id, nvl_sublink_change_r(), reg); err = gv100_nvlink_link_sublink_check_change(g, link_id); - if (err) { + if (err != 0) { nvgpu_err(g, "Error in TX to SAFE"); return err; } @@ -2560,7 +2560,7 @@ int gv100_nvlink_link_set_sublink_mode(struct gk20a *g, u32 link_id, DLPL_REG_WR32(g, link_id, nvl_sublink_change_r(), reg); err = gv100_nvlink_link_sublink_check_change(g, link_id); - if (err) { + if (err != 0) { nvgpu_err(g, "Error in TX to OFF"); return err; } @@ -2591,7 +2591,7 @@ int gv100_nvlink_link_set_sublink_mode(struct gk20a *g, u32 link_id, DLPL_REG_WR32(g, link_id, nvl_sublink_change_r(), reg); err = gv100_nvlink_link_sublink_check_change(g, link_id); - if (err) { + if (err != 0) { nvgpu_err(g, "Error in RX to OFF"); return err; } @@ -2613,7 +2613,7 @@ int gv100_nvlink_link_set_sublink_mode(struct gk20a *g, u32 link_id, nvgpu_err(g, "MODE %u", mode); } - if (err) + if (err != 0) nvgpu_err(g, " failed on set_sublink_mode"); return err; } @@ -2695,13 +2695,13 @@ int gv100_nvlink_early_init(struct gk20a *g) return -EINVAL; err = nvgpu_bios_get_nvlink_config_data(g); - if (err) { + if (err != 0) { nvgpu_err(g, "failed to read nvlink vbios data"); goto nvlink_init_exit; } err = g->ops.nvlink.discover_ioctrl(g); - if (err) + if (err != 0) goto nvlink_init_exit; /* Enable NVLINK in MC */ @@ -2711,7 +2711,7 @@ int gv100_nvlink_early_init(struct gk20a *g) g->ops.mc.reset(g, mc_reset_nvlink_mask); err = g->ops.nvlink.discover_link(g); - if (err || g->nvlink.discovered_links == 0) { + if ((err != 0) || (g->nvlink.discovered_links == 0)) { nvgpu_err(g, "No links available"); goto nvlink_init_exit; } @@ -2757,13 +2757,13 @@ int gv100_nvlink_early_init(struct gk20a *g) g->nvlink.speed = nvgpu_nvlink_speed_20G; err = __gv100_nvlink_state_load_hal(g); - if (err) { + if (err != 0) { nvgpu_err(g, " failed Nvlink state load"); goto nvlink_init_exit; } err = gv100_nvlink_minion_configure_ac_coupling(g, g->nvlink.ac_coupling_mask, true); - if (err) { + if (err != 0) { nvgpu_err(g, " failed Nvlink state load"); goto nvlink_init_exit; } diff --git a/drivers/gpu/nvgpu/gv100/pmu_gv100.c b/drivers/gpu/nvgpu/gv100/pmu_gv100.c index 22f50f579..9b765fdcb 100644 --- a/drivers/gpu/nvgpu/gv100/pmu_gv100.c +++ b/drivers/gpu/nvgpu/gv100/pmu_gv100.c @@ -37,7 +37,7 @@ int gv100_pmu_init_acr(struct gk20a *g) rpc.wpr_regionId = 0x1; rpc.wpr_offset = 0x0; PMU_RPC_EXECUTE(status, pmu, ACR, INIT_WPR_REGION, &rpc, 0); - if (status) { + if (status != 0) { nvgpu_err(g, "Failed to execute RPC status=0x%x", status); } @@ -80,7 +80,7 @@ int gv100_load_falcon_ucode(struct gk20a *g, u32 falconidmask) rpc.wpr_base_virtual.lo = 0; rpc.wpr_base_virtual.hi = 0; PMU_RPC_EXECUTE(status, pmu, ACR, BOOTSTRAP_GR_FALCONS, &rpc, 0); - if (status) { + if (status != 0) { nvgpu_err(g, "Failed to execute RPC, status=0x%x", status); goto exit; } diff --git a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c index 76b14d923..64777e18f 100644 --- a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c +++ b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c @@ -484,7 +484,7 @@ static int gv11b_fifo_poll_pbdma_chan_status(struct gk20a *g, u32 id, delay << 1, GR_IDLE_CHECK_MAX); } while (!nvgpu_timeout_expired(&timeout)); - if (ret) { + if (ret != 0) { nvgpu_err(g, "preempt timeout pbdma: %u pbdma_stat: %u " "tsgid: %u", pbdma_id, pbdma_stat, id); } @@ -609,7 +609,7 @@ static int gv11b_fifo_poll_eng_ctx_status(struct gk20a *g, u32 id, delay << 1, GR_IDLE_CHECK_MAX); } while (!nvgpu_timeout_expired(&timeout)); - if (ret) { + if (ret != 0) { /* * The reasons a preempt can fail are: * 1.Some other stalling interrupt is asserted preventing @@ -841,7 +841,7 @@ int gv11b_fifo_enable_tsg(struct tsg_gk20a *tsg) int gv11b_fifo_preempt_tsg(struct gk20a *g, u32 tsgid) { struct fifo_gk20a *f = &g->fifo; - u32 ret = 0; + int ret = 0; u32 token = PMU_INVALID_MUTEX_OWNER_ID; u32 mutex_ret = 0; u32 runlist_id; @@ -875,7 +875,7 @@ int gv11b_fifo_preempt_tsg(struct gk20a *g, u32 tsgid) nvgpu_mutex_release(&f->runlist_info[runlist_id].runlist_lock); - if (ret) { + if (ret != 0) { if (nvgpu_platform_is_silicon(g)) { nvgpu_err(g, "preempt timed out for tsgid: %u, " "ctxsw timeout will trigger recovery if needed", tsgid); @@ -970,7 +970,7 @@ static void gv11b_fifo_locked_abort_runlist_active_tsgs(struct gk20a *g, /* (chid == ~0 && !add) remove all act ch from runlist*/ err = gk20a_fifo_update_runlist_locked(g, rlid, FIFO_INVAL_CHANNEL_ID, add, wait_for_finish); - if (err) { + if (err != 0) { nvgpu_err(g, "runlist id %d is not cleaned up", rlid); } @@ -1763,11 +1763,11 @@ void gv11b_fifo_init_eng_method_buffers(struct gk20a *g, for (runque = 0; runque < num_pbdma; runque++) { err = nvgpu_dma_alloc_map_sys(vm, method_buffer_size, &tsg->eng_method_buffers[runque]); - if (err) { + if (err != 0) { break; } } - if (err) { + if (err != 0) { for (i = (runque - 1); i >= 0; i--) { nvgpu_dma_unmap_free(vm, &tsg->eng_method_buffers[i]); @@ -1887,7 +1887,7 @@ int gv11b_fifo_alloc_syncpt_buf(struct channel_gk20a *c, nvgpu_mutex_acquire(&c->vm->syncpt_ro_map_lock); err = set_syncpt_ro_map_gpu_va_locked(c->vm); nvgpu_mutex_release(&c->vm->syncpt_ro_map_lock); - if (err) + if (err != 0) return err; nr_pages = DIV_ROUND_UP(g->syncpt_size, PAGE_SIZE); @@ -1923,7 +1923,7 @@ int gv11b_fifo_get_sync_ro_map(struct vm_gk20a *vm, nvgpu_mutex_acquire(&vm->syncpt_ro_map_lock); err = set_syncpt_ro_map_gpu_va_locked(vm); nvgpu_mutex_release(&vm->syncpt_ro_map_lock); - if (err) + if (err != 0) return err; *base_gpuva = vm->syncpt_ro_map_gpu_va; diff --git a/drivers/gpu/nvgpu/gv11b/gr_gv11b.c b/drivers/gpu/nvgpu/gv11b/gr_gv11b.c index c46845aaf..ba0cc4611 100644 --- a/drivers/gpu/nvgpu/gv11b/gr_gv11b.c +++ b/drivers/gpu/nvgpu/gv11b/gr_gv11b.c @@ -1148,7 +1148,7 @@ int gr_gv11b_load_stencil_tbl(struct gk20a *g, struct gr_gk20a *gr) zbc_val.format = s_tbl->format; ret = g->ops.gr.add_zbc_s(g, gr, &zbc_val, i); - if (ret) { + if (ret != 0) { return ret; } } @@ -1501,7 +1501,7 @@ int gr_gv11b_alloc_buffer(struct vm_gk20a *vm, size_t size, nvgpu_log_fn(g, " "); err = nvgpu_dma_alloc_sys(vm->mm->g, size, mem); - if (err) { + if (err != 0) { return err; } @@ -1589,7 +1589,7 @@ int gr_gv11b_set_ctxsw_preemption_mode(struct gk20a *g, err = gr_gp10b_alloc_buffer(vm, g->gr.ctx_vars.preempt_image_size, &gr_ctx->preempt_ctxsw_buffer); - if (err) { + if (err != 0) { nvgpu_err(g, "cannot allocate preempt buffer"); goto fail; } @@ -1597,7 +1597,7 @@ int gr_gv11b_set_ctxsw_preemption_mode(struct gk20a *g, err = gr_gp10b_alloc_buffer(vm, spill_size, &gr_ctx->spill_ctxsw_buffer); - if (err) { + if (err != 0) { nvgpu_err(g, "cannot allocate spill buffer"); goto fail_free_preempt; } @@ -1605,7 +1605,7 @@ int gr_gv11b_set_ctxsw_preemption_mode(struct gk20a *g, err = gr_gp10b_alloc_buffer(vm, attrib_cb_size, &gr_ctx->betacb_ctxsw_buffer); - if (err) { + if (err != 0) { nvgpu_err(g, "cannot allocate beta buffer"); goto fail_free_spill; } @@ -1613,7 +1613,7 @@ int gr_gv11b_set_ctxsw_preemption_mode(struct gk20a *g, err = gr_gp10b_alloc_buffer(vm, pagepool_size, &gr_ctx->pagepool_ctxsw_buffer); - if (err) { + if (err != 0) { nvgpu_err(g, "cannot allocate page pool"); goto fail_free_betacb; } @@ -1719,7 +1719,7 @@ void gr_gv11b_update_ctxsw_preemption_mode(struct gk20a *g, } err = gr_gk20a_ctx_patch_write_begin(g, gr_ctx, true); - if (err) { + if (err != 0) { nvgpu_err(g, "can't map patch context"); goto out; } @@ -2193,7 +2193,7 @@ static int gr_gv11b_handle_warp_esr_error_mmu_nack(struct gk20a *g, * recovery path even if channel is invalid. We want to explicitly check * for teardown value in mmu fault handler. */ - if (!err) { + if (err == 0) { gk20a_channel_put(fault_ch); } @@ -2369,7 +2369,7 @@ int gr_gv11b_pre_process_sm_exception(struct gk20a *g, */ ret = gr_gv11b_handle_all_warp_esr_errors(g, gpc, tpc, sm, warp_esr_error, fault_ch); - if (ret) { + if (ret != 0) { return ret; } @@ -2439,7 +2439,7 @@ int gr_gv11b_pre_process_sm_exception(struct gk20a *g, nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: Setting CILP preempt pending\n"); ret = gr_gp10b_set_cilp_preempt_pending(g, fault_ch); - if (ret) { + if (ret != 0) { nvgpu_err(g, "CILP: error while setting CILP preempt pending!"); return ret; } @@ -2699,7 +2699,7 @@ int gr_gv11b_init_sw_veid_bundle(struct gk20a *g) &g->gr.ctx_vars.sw_veid_bundle_init; u32 i; u32 last_bundle_data = 0; - u32 err = 0; + int err = 0; for (i = 0; i < sw_veid_bundle_init->count; i++) { nvgpu_log_fn(g, "veid bundle count: %d", i); @@ -2718,14 +2718,14 @@ int gr_gv11b_init_sw_veid_bundle(struct gk20a *g) nvgpu_log_fn(g, "go idle bundle"); gk20a_writel(g, gr_pipe_bundle_address_r(), sw_veid_bundle_init->l[i].addr); - err |= gr_gk20a_wait_idle(g, + err = gr_gk20a_wait_idle(g, gk20a_get_gr_idle_timeout(g), GR_IDLE_CHECK_DEFAULT); } else { err = gv11b_write_bundle_veid_state(g, i); } - if (err) { + if (err != 0) { nvgpu_err(g, "failed to init sw veid bundle"); break; } @@ -2879,12 +2879,12 @@ int gr_gv11b_commit_inst(struct channel_gk20a *c, u64 gpu_va) nvgpu_log_fn(g, " "); err = gv11b_alloc_subctx_header(c); - if (err) { + if (err != 0) { return err; } err = gv11b_update_subctx_header(c, gpu_va); - if (err) { + if (err != 0) { return err; } @@ -3081,7 +3081,7 @@ int gr_gv11b_init_fs_state(struct gk20a *g) } err = gr_gk20a_init_fs_state(g); - if (err) { + if (err != 0) { return err; } @@ -3268,7 +3268,7 @@ int gv11b_gr_set_sm_debug_mode(struct gk20a *g, } err = gr_gk20a_exec_ctx_ops(ch, ops, i, i, 0, NULL); - if (err) { + if (err != 0) { nvgpu_err(g, "Failed to access register\n"); } nvgpu_kfree(g, ops); @@ -3416,7 +3416,7 @@ void gv11b_gr_suspend_single_sm(struct gk20a *g, err = g->ops.gr.wait_for_sm_lock_down(g, gpc, tpc, sm, global_esr_mask, check_errors); - if (err) { + if (err != 0) { nvgpu_err(g, "SuspendSm failed"); return; @@ -3458,7 +3458,7 @@ void gv11b_gr_suspend_all_sms(struct gk20a *g, err = g->ops.gr.wait_for_sm_lock_down(g, gpc, tpc, sm, global_esr_mask, check_errors); - if (err) { + if (err != 0) { nvgpu_err(g, "SuspendAllSms failed"); return; @@ -4852,7 +4852,7 @@ int gr_gv11b_create_priv_addr_table(struct gk20a *g, &gpc_num, &tpc_num, &ppc_num, &be_num, &broadcast_flags); nvgpu_log(g, gpu_dbg_gpu_dbg, "addr_type = %d", addr_type); - if (err) { + if (err != 0) { return err; } @@ -4896,7 +4896,7 @@ int gr_gv11b_create_priv_addr_table(struct gk20a *g, else if (broadcast_flags & PRI_BROADCAST_FLAGS_PPC) { err = gr_gk20a_split_ppc_broadcast_addr(g, addr, gpc_num, priv_addr_table, &t); - if (err) { + if (err != 0) { return err; } } else { diff --git a/drivers/gpu/nvgpu/gv11b/mm_gv11b.c b/drivers/gpu/nvgpu/gv11b/mm_gv11b.c index 8027d1657..090d99ea1 100644 --- a/drivers/gpu/nvgpu/gv11b/mm_gv11b.c +++ b/drivers/gpu/nvgpu/gv11b/mm_gv11b.c @@ -130,7 +130,7 @@ static void gv11b_mm_mmu_hw_fault_buf_init(struct gk20a *g) err = nvgpu_dma_alloc_map_sys(vm, fb_size, &g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_OTHER_AND_NONREPLAY]); - if (err) { + if (err != 0) { nvgpu_err(g, "Error in hw mmu fault buf [0] alloc in bar2 vm "); /* Fault will be snapped in pri reg but not in buffer */ @@ -142,7 +142,7 @@ static void gv11b_mm_mmu_hw_fault_buf_init(struct gk20a *g) &g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_REPLAY])) { err = nvgpu_dma_alloc_map_sys(vm, fb_size, &g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_REPLAY]); - if (err) { + if (err != 0) { nvgpu_err(g, "Error in hw mmu fault buf [1] alloc in bar2 vm "); /* Fault will be snapped in pri reg but not in buffer */ @@ -178,7 +178,7 @@ static int gv11b_mm_mmu_fault_setup_sw(struct gk20a *g) err = gv11b_mm_mmu_fault_info_buf_init(g); - if (!err) { + if (err == 0) { gv11b_mm_mmu_hw_fault_buf_init(g); } @@ -194,7 +194,7 @@ int gv11b_init_mm_setup_hw(struct gk20a *g) err = gk20a_init_mm_setup_hw(g); err = gv11b_mm_mmu_fault_setup_sw(g); - if (!err) { + if (err == 0) { gv11b_mm_mmu_fault_setup_hw(g); } diff --git a/drivers/gpu/nvgpu/gv11b/subctx_gv11b.c b/drivers/gpu/nvgpu/gv11b/subctx_gv11b.c index 499a00a88..661b06f20 100644 --- a/drivers/gpu/nvgpu/gv11b/subctx_gv11b.c +++ b/drivers/gpu/nvgpu/gv11b/subctx_gv11b.c @@ -67,7 +67,7 @@ int gv11b_alloc_subctx_header(struct channel_gk20a *c) if (!nvgpu_mem_is_valid(ctxheader)) { ret = nvgpu_dma_alloc_sys(g, ctxsw_prog_fecs_header_v(), ctxheader); - if (ret) { + if (ret != 0) { nvgpu_err(g, "failed to allocate sub ctx header"); return ret; } diff --git a/drivers/gpu/nvgpu/lpwr/lpwr.c b/drivers/gpu/nvgpu/lpwr/lpwr.c index c8cfb8403..104944cae 100644 --- a/drivers/gpu/nvgpu/lpwr/lpwr.c +++ b/drivers/gpu/nvgpu/lpwr/lpwr.c @@ -191,19 +191,19 @@ static int get_lpwr_ms_table(struct gk20a *g) return 0; } -u32 nvgpu_lpwr_pg_setup(struct gk20a *g) +int nvgpu_lpwr_pg_setup(struct gk20a *g) { - u32 err = 0; + int err = 0; nvgpu_log_fn(g, " "); err = get_lpwr_gr_table(g); - if (err) { + if (err != 0) { return err; } err = get_lpwr_ms_table(g); - if (err) { + if (err != 0) { return err; } @@ -220,7 +220,7 @@ static void nvgpu_pmu_handle_param_lpwr_msg(struct gk20a *g, nvgpu_log_fn(g, " "); - if (status != 0) { + if (status != 0U) { nvgpu_err(g, "LWPR PARAM cmd aborted"); return; } @@ -423,7 +423,7 @@ int nvgpu_lpwr_disable_pg(struct gk20a *g, bool pstate_lock) if (is_rppg_supported) { if (g->support_pmu && g->elpg_enabled) { status = nvgpu_pmu_disable_elpg(g); - if (status) { + if (status != 0) { goto exit_unlock; } } diff --git a/drivers/gpu/nvgpu/lpwr/lpwr.h b/drivers/gpu/nvgpu/lpwr/lpwr.h index c38ba6296..038010677 100644 --- a/drivers/gpu/nvgpu/lpwr/lpwr.h +++ b/drivers/gpu/nvgpu/lpwr/lpwr.h @@ -90,7 +90,7 @@ struct obj_lwpr { u32 mclk_change_cache; }; -u32 nvgpu_lpwr_pg_setup(struct gk20a *g); +int nvgpu_lpwr_pg_setup(struct gk20a *g); int nvgpu_lwpr_mclk_change(struct gk20a *g, u32 pstate); int nvgpu_lpwr_enable_pg(struct gk20a *g, bool pstate_lock); int nvgpu_lpwr_disable_pg(struct gk20a *g, bool pstate_lock); diff --git a/drivers/gpu/nvgpu/lpwr/rppg.c b/drivers/gpu/nvgpu/lpwr/rppg.c index 13e81264c..786738ad6 100644 --- a/drivers/gpu/nvgpu/lpwr/rppg.c +++ b/drivers/gpu/nvgpu/lpwr/rppg.c @@ -32,7 +32,7 @@ static void pmu_handle_rppg_init_msg(struct gk20a *g, struct pmu_msg *msg, { u32 *success = param; - if (status == 0) { + if (status == 0U) { switch (msg->msg.pg.rppg_msg.cmn.msg_id) { case NV_PMU_RPPG_MSG_ID_INIT_CTRL_ACK: *success = 1; @@ -46,11 +46,11 @@ static void pmu_handle_rppg_init_msg(struct gk20a *g, struct pmu_msg *msg, msg->msg.pg.msg_type); } -static u32 rppg_send_cmd(struct gk20a *g, struct nv_pmu_rppg_cmd *prppg_cmd) +static int rppg_send_cmd(struct gk20a *g, struct nv_pmu_rppg_cmd *prppg_cmd) { struct pmu_cmd cmd; u32 seq; - u32 status = 0; + int status = 0; u32 success = 0; memset(&cmd, 0, sizeof(struct pmu_cmd)); @@ -82,7 +82,7 @@ static u32 rppg_send_cmd(struct gk20a *g, struct nv_pmu_rppg_cmd *prppg_cmd) status = nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, pmu_handle_rppg_init_msg, &success, &seq, ~0); - if (status) { + if (status != 0) { nvgpu_err(g, "Unable to submit parameter command %d", prppg_cmd->cmn.cmd_id); goto exit; @@ -102,7 +102,7 @@ exit: return status; } -static u32 rppg_init(struct gk20a *g) +static int rppg_init(struct gk20a *g) { struct nv_pmu_rppg_cmd rppg_cmd; @@ -111,7 +111,7 @@ static u32 rppg_init(struct gk20a *g) return rppg_send_cmd(g, &rppg_cmd); } -static u32 rppg_ctrl_init(struct gk20a *g, u8 ctrl_id) +static int rppg_ctrl_init(struct gk20a *g, u8 ctrl_id) { struct nv_pmu_rppg_cmd rppg_cmd; @@ -128,9 +128,9 @@ static u32 rppg_ctrl_init(struct gk20a *g, u8 ctrl_id) return rppg_send_cmd(g, &rppg_cmd); } -u32 init_rppg(struct gk20a *g) +int init_rppg(struct gk20a *g) { - u32 status; + int status; status = rppg_init(g); if (status != 0) { diff --git a/drivers/gpu/nvgpu/lpwr/rppg.h b/drivers/gpu/nvgpu/lpwr/rppg.h index d66600a07..0a0d41518 100644 --- a/drivers/gpu/nvgpu/lpwr/rppg.h +++ b/drivers/gpu/nvgpu/lpwr/rppg.h @@ -22,5 +22,5 @@ #ifndef NVGPU_LPWR_RPPG_H #define NVGPU_LPWR_RPPG_H -u32 init_rppg(struct gk20a *g); +int init_rppg(struct gk20a *g); #endif /* NVGPU_LPWR_RPPG_H */ diff --git a/drivers/gpu/nvgpu/pstate/pstate.c b/drivers/gpu/nvgpu/pstate/pstate.c index c1f696aa0..967a9f15e 100644 --- a/drivers/gpu/nvgpu/pstate/pstate.c +++ b/drivers/gpu/nvgpu/pstate/pstate.c @@ -50,82 +50,82 @@ int gk20a_init_pstate_support(struct gk20a *g) nvgpu_log_fn(g, " "); err = volt_rail_sw_setup(g); - if (err) { + if (err != 0) { return err; } err = volt_dev_sw_setup(g); - if (err) { + if (err != 0) { return err; } err = volt_policy_sw_setup(g); - if (err) { + if (err != 0) { return err; } err = clk_vin_sw_setup(g); - if (err) { + if (err != 0) { return err; } err = clk_fll_sw_setup(g); - if (err) { + if (err != 0) { return err; } err = therm_domain_sw_setup(g); - if (err) { + if (err != 0) { return err; } err = vfe_var_sw_setup(g); - if (err) { + if (err != 0) { return err; } err = vfe_equ_sw_setup(g); - if (err) { + if (err != 0) { return err; } err = clk_domain_sw_setup(g); - if (err) { + if (err != 0) { return err; } err = clk_vf_point_sw_setup(g); - if (err) { + if (err != 0) { return err; } err = clk_prog_sw_setup(g); - if (err) { + if (err != 0) { return err; } err = pstate_sw_setup(g); - if (err) { + if (err != 0) { return err; } if(g->ops.clk.support_pmgr_domain) { err = pmgr_domain_sw_setup(g); - if (err) { + if (err != 0) { return err; } } if (g->ops.clk.support_clk_freq_controller) { err = clk_freq_controller_sw_setup(g); - if (err) { + if (err != 0) { return err; } } if(g->ops.clk.support_lpwr_pg) { err = nvgpu_lpwr_pg_setup(g); - if (err) { + if (err != 0) { return err; } } @@ -142,29 +142,29 @@ int gk20a_init_pstate_pmu_support(struct gk20a *g) if (g->ops.clk.mclk_init) { err = g->ops.clk.mclk_init(g); - if (err) { + if (err != 0U) { nvgpu_err(g, "failed to set mclk"); /* Indicate error and continue */ } } err = volt_rail_pmu_setup(g); - if (err) { + if (err != 0U) { return err; } err = volt_dev_pmu_setup(g); - if (err) { + if (err != 0U) { return err; } err = volt_policy_pmu_setup(g); - if (err) { + if (err != 0U) { return err; } err = g->ops.pmu_ver.volt.volt_send_load_cmd_to_pmu(g); - if (err) { + if (err != 0U) { nvgpu_err(g, "Failed to send VOLT LOAD CMD to PMU: status = 0x%08x.", err); @@ -172,58 +172,58 @@ int gk20a_init_pstate_pmu_support(struct gk20a *g) } err = therm_domain_pmu_setup(g); - if (err) { + if (err != 0U) { return err; } err = vfe_var_pmu_setup(g); - if (err) { + if (err != 0U) { return err; } err = vfe_equ_pmu_setup(g); - if (err) { + if (err != 0U) { return err; } err = clk_domain_pmu_setup(g); - if (err) { + if (err != 0U) { return err; } err = clk_prog_pmu_setup(g); - if (err) { + if (err != 0U) { return err; } err = clk_vin_pmu_setup(g); - if (err) { + if (err != 0U) { return err; } err = clk_fll_pmu_setup(g); - if (err) { + if (err != 0U) { return err; } err = clk_vf_point_pmu_setup(g); - if (err) { + if (err != 0U) { return err; } if (g->ops.clk.support_clk_freq_controller) { err = clk_freq_controller_pmu_setup(g); - if (err) { + if (err != 0U) { return err; } } err = clk_pmu_vin_load(g); - if (err) { + if (err != 0U) { return err; } err = g->ops.clk.perf_pmu_vfe_load(g); - if (err) { + if (err != 0U) { return err; } @@ -242,7 +242,7 @@ static int pstate_construct_super(struct gk20a *g, struct boardobj **ppboardobj, int err; err = boardobj_construct_super(g, ppboardobj, size, args); - if (err) { + if (err != 0) { return err; } @@ -286,7 +286,7 @@ static int pstate_insert(struct gk20a *g, struct pstate *pstate, int index) err = boardobjgrp_objinsert(&pstates->super.super, (struct boardobj *)pstate, index); - if (err) { + if (err != 0) { nvgpu_err(g, "error adding pstate boardobj %d", index); return err; @@ -380,7 +380,7 @@ static int parse_pstate_table_5x(struct gk20a *g, } err = parse_pstate_entry_5x(g, hdr, entry, &_pstate); - if (err) { + if (err != 0) { goto done; } @@ -390,7 +390,7 @@ static int parse_pstate_table_5x(struct gk20a *g, } err = pstate_insert(g, pstate, i); - if (err) { + if (err != 0) { goto done; } } @@ -409,12 +409,12 @@ static int pstate_sw_setup(struct gk20a *g) nvgpu_cond_init(&g->perf_pmu.pstatesobjs.pstate_notifier_wq); err = nvgpu_mutex_init(&g->perf_pmu.pstatesobjs.pstate_mutex); - if (err) { + if (err != 0) { return err; } err = boardobjgrpconstruct_e32(g, &g->perf_pmu.pstatesobjs.super); - if (err) { + if (err != 0) { nvgpu_err(g, "error creating boardobjgrp for pstates, err=%d", err); @@ -440,7 +440,7 @@ static int pstate_sw_setup(struct gk20a *g) err = parse_pstate_table_5x(g, hdr); done: - if (err) { + if (err != 0) { nvgpu_mutex_destroy(&g->perf_pmu.pstatesobjs.pstate_mutex); } return err; diff --git a/drivers/gpu/nvgpu/tu104/fifo_tu104.c b/drivers/gpu/nvgpu/tu104/fifo_tu104.c index 75f832faf..b5d585420 100644 --- a/drivers/gpu/nvgpu/tu104/fifo_tu104.c +++ b/drivers/gpu/nvgpu/tu104/fifo_tu104.c @@ -217,7 +217,7 @@ int tu104_init_pdb_cache_war(struct gk20a *g) * PDB bound to 257th instance block */ err = nvgpu_dma_alloc_sys(g, size, &g->pdb_cache_war_mem); - if (err) { + if (err != 0) { return err; } diff --git a/drivers/gpu/nvgpu/tu104/gr_tu104.c b/drivers/gpu/nvgpu/tu104/gr_tu104.c index e7a33de7b..2e71d0469 100644 --- a/drivers/gpu/nvgpu/tu104/gr_tu104.c +++ b/drivers/gpu/nvgpu/tu104/gr_tu104.c @@ -136,12 +136,12 @@ int gr_tu104_alloc_global_ctx_buffers(struct gk20a *g) err = gk20a_gr_alloc_ctx_buffer(g, &gr->global_ctx_buffer[RTV_CIRCULAR_BUFFER], rtv_circular_buffer_size); - if (err) { + if (err != 0) { return err; } err = gr_gk20a_alloc_global_ctx_buffers(g); - if (err) { + if (err != 0) { goto clean_up; } @@ -192,7 +192,7 @@ int gr_tu104_map_global_ctx_buffers(struct gk20a *g, g_bfr_index[RTV_CIRCULAR_BUFFER_VA] = RTV_CIRCULAR_BUFFER; err = gr_gk20a_map_global_ctx_buffers(g, ch); - if (err) { + if (err != 0) { goto clean_up; } @@ -229,7 +229,7 @@ int gr_tu104_commit_global_ctx_buffers(struct gk20a *g, u32 size; err = gr_gk20a_commit_global_ctx_buffers(g, ch, patch); - if (err) { + if (err != 0) { return err; } @@ -242,7 +242,7 @@ int gr_tu104_commit_global_ctx_buffers(struct gk20a *g, if (patch) { int err; err = gr_gk20a_ctx_patch_write_begin(g, gr_ctx, false); - if (err) { + if (err != 0) { return err; } } diff --git a/drivers/gpu/nvgpu/tu104/hal_tu104.c b/drivers/gpu/nvgpu/tu104/hal_tu104.c index 14775bf6d..d9ef78950 100644 --- a/drivers/gpu/nvgpu/tu104/hal_tu104.c +++ b/drivers/gpu/nvgpu/tu104/hal_tu104.c @@ -291,7 +291,7 @@ static int tu104_init_gpu_characteristics(struct gk20a *g) int err; err = gk20a_init_gpu_characteristics(g); - if (err) { + if (err != 0) { return err; } diff --git a/drivers/gpu/nvgpu/tu104/nvlink_tu104.c b/drivers/gpu/nvgpu/tu104/nvlink_tu104.c index 807805282..b4fc6ce23 100644 --- a/drivers/gpu/nvgpu/tu104/nvlink_tu104.c +++ b/drivers/gpu/nvgpu/tu104/nvlink_tu104.c @@ -38,13 +38,13 @@ int tu104_nvlink_rxdet(struct gk20a *g, u32 link_id) { - u32 ret = 0; + int ret = 0; u32 reg; struct nvgpu_timeout timeout; ret = gv100_nvlink_minion_send_command(g, link_id, 0x00000005U, 0, true); - if (ret) { + if (ret != 0) { nvgpu_err(g, "Error during INITRXTERM minion DLCMD on link %u", link_id); return ret; @@ -52,7 +52,7 @@ int tu104_nvlink_rxdet(struct gk20a *g, u32 link_id) ret = gv100_nvlink_minion_send_command(g, link_id, minion_nvlink_dl_cmd_command_turing_rxdet_v(), 0, true); - if (ret) { + if (ret != 0) { nvgpu_err(g, "Error during RXDET minion DLCMD on link %u", link_id); return ret; @@ -98,7 +98,7 @@ int tu104_nvlink_setup_pll(struct gk20a *g, unsigned long link_mask) ret = gv100_nvlink_minion_send_command(g, link_id, minion_nvlink_dl_cmd_command_txclkswitch_pll_v(), 0, true); - if (ret) { + if (ret != 0) { nvgpu_err(g, "Error: TXCLKSWITCH_PLL dlcmd on link %u", link_id); return ret; @@ -106,7 +106,7 @@ int tu104_nvlink_setup_pll(struct gk20a *g, unsigned long link_mask) ret = nvgpu_timeout_init(g, &timeout, NV_NVLINK_REG_POLL_TIMEOUT_MS, NVGPU_TIMER_CPU_TIMER); - if (ret) { + if (ret != 0) { nvgpu_err(g, "Error during timeout init"); return ret; } @@ -210,7 +210,7 @@ int tu104_nvlink_minion_data_ready_en(struct gk20a *g, ret = gv100_nvlink_minion_send_command(g, link_id, minion_nvlink_dl_cmd_command_initdlpl_v(), 0, sync); - if (ret) { + if (ret != 0) { nvgpu_err(g, "Minion initdlpl failed on link %u", link_id); return ret; @@ -220,7 +220,7 @@ int tu104_nvlink_minion_data_ready_en(struct gk20a *g, ret = gv100_nvlink_minion_send_command(g, link_id, minion_nvlink_dl_cmd_command_turing_initdlpl_to_chipa_v(), 0, sync); - if (ret) { + if (ret != 0) { nvgpu_err(g, "Minion initdlpl_to_chipA failed on link\ %u", link_id); return ret; @@ -230,7 +230,7 @@ int tu104_nvlink_minion_data_ready_en(struct gk20a *g, ret = gv100_nvlink_minion_send_command(g, link_id, minion_nvlink_dl_cmd_command_inittl_v(), 0, sync); - if (ret) { + if (ret != 0) { nvgpu_err(g, "Minion inittl failed on link %u", link_id); return ret; @@ -240,7 +240,7 @@ int tu104_nvlink_minion_data_ready_en(struct gk20a *g, ret = gv100_nvlink_minion_send_command(g, link_id, minion_nvlink_dl_cmd_command_initlaneenable_v(), 0, sync); - if (ret) { + if (ret != 0) { nvgpu_err(g, "Minion initlaneenable failed on link %u", link_id); return ret;