diff --git a/drivers/gpu/nvgpu/common/mm/bitmap_allocator.c b/drivers/gpu/nvgpu/common/mm/bitmap_allocator.c index 396f04724..2b4a17859 100644 --- a/drivers/gpu/nvgpu/common/mm/bitmap_allocator.c +++ b/drivers/gpu/nvgpu/common/mm/bitmap_allocator.c @@ -405,7 +405,7 @@ int nvgpu_bitmap_allocator_init(struct gk20a *g, struct nvgpu_allocator *na, } err = nvgpu_alloc_common_init(na, g, name, a, false, &bitmap_ops); - if (err) { + if (err != 0) { goto fail; } diff --git a/drivers/gpu/nvgpu/common/mm/buddy_allocator.c b/drivers/gpu/nvgpu/common/mm/buddy_allocator.c index a7121c97a..e3bd5e304 100644 --- a/drivers/gpu/nvgpu/common/mm/buddy_allocator.c +++ b/drivers/gpu/nvgpu/common/mm/buddy_allocator.c @@ -1320,7 +1320,7 @@ int nvgpu_buddy_allocator_init(struct gk20a *g, struct nvgpu_allocator *na, } err = nvgpu_alloc_common_init(na, g, name, a, false, &buddy_ops); - if (err) { + if (err != 0) { goto fail; } @@ -1375,7 +1375,7 @@ int nvgpu_buddy_allocator_init(struct gk20a *g, struct nvgpu_allocator *na, a->fixed_allocs = NULL; nvgpu_init_list_node(&a->co_list); err = balloc_init_lists(a); - if (err) { + if (err != 0) { goto fail; } diff --git a/drivers/gpu/nvgpu/common/mm/dma.c b/drivers/gpu/nvgpu/common/mm/dma.c index 60fe36589..6f648c696 100644 --- a/drivers/gpu/nvgpu/common/mm/dma.c +++ b/drivers/gpu/nvgpu/common/mm/dma.c @@ -50,7 +50,7 @@ int nvgpu_dma_alloc_flags(struct gk20a *g, unsigned long flags, size_t size, NVGPU_DMA_NO_KERNEL_MAPPING, size, mem); - if (!err) { + if (err == 0) { return 0; } @@ -107,7 +107,7 @@ int nvgpu_dma_alloc_map_flags(struct vm_gk20a *vm, unsigned long flags, flags | NVGPU_DMA_NO_KERNEL_MAPPING, size, mem); - if (!err) { + if (err == 0) { return 0; } @@ -131,7 +131,7 @@ int nvgpu_dma_alloc_map_flags_sys(struct vm_gk20a *vm, unsigned long flags, { int err = nvgpu_dma_alloc_flags_sys(vm->mm->g, flags, size, mem); - if (err) { + if (err != 0) { return err; } @@ -162,7 +162,7 @@ int nvgpu_dma_alloc_map_flags_vid(struct vm_gk20a *vm, unsigned long flags, { int err = nvgpu_dma_alloc_flags_vid(vm->mm->g, flags, size, mem); - if (err) { + if (err != 0) { return err; } diff --git a/drivers/gpu/nvgpu/common/mm/gmmu.c b/drivers/gpu/nvgpu/common/mm/gmmu.c index a25dd4bb3..cc65c68cd 100644 --- a/drivers/gpu/nvgpu/common/mm/gmmu.c +++ b/drivers/gpu/nvgpu/common/mm/gmmu.c @@ -278,7 +278,7 @@ static int pd_allocate(struct vm_gk20a *vm, } err = nvgpu_pd_alloc(vm, pd, pd_size(l, attrs)); - if (err) { + if (err != 0) { nvgpu_info(vm->mm->g, "error allocating page directory!"); return err; } @@ -451,7 +451,7 @@ static int __set_pd_level(struct vm_gk20a *vm, chunk_size, attrs); - if (err) { + if (err != 0) { return err; } } @@ -560,7 +560,7 @@ static int __nvgpu_gmmu_do_update_page_table(struct vm_gk20a *vm, virt_addr, chunk_length, attrs); - if (err) { + if (err != 0) { break; } @@ -740,7 +740,7 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm, err = __nvgpu_gmmu_update_page_table(vm, sgt, buffer_offset, vaddr, size, &attrs); - if (err) { + if (err != 0) { nvgpu_err(g, "failed to update ptes on map"); goto fail_validate; } @@ -793,7 +793,7 @@ void gk20a_locked_gmmu_unmap(struct vm_gk20a *vm, /* unmap here needs to know the page size we assigned at mapping */ err = __nvgpu_gmmu_update_page_table(vm, NULL, 0, vaddr, size, &attrs); - if (err) { + if (err != 0) { nvgpu_err(g, "failed to update gmmu ptes on unmap"); } @@ -929,7 +929,7 @@ int __nvgpu_set_pte(struct gk20a *g, struct vm_gk20a *vm, u64 vaddr, u32 *pte) err = __nvgpu_locate_pte(g, vm, &vm->pdb, vaddr, 0, &attrs, NULL, &pd, &pd_idx, &pd_offs); - if (err) { + if (err != 0) { return err; } diff --git a/drivers/gpu/nvgpu/common/mm/lockless_allocator.c b/drivers/gpu/nvgpu/common/mm/lockless_allocator.c index 59fae76db..0f89373ab 100644 --- a/drivers/gpu/nvgpu/common/mm/lockless_allocator.c +++ b/drivers/gpu/nvgpu/common/mm/lockless_allocator.c @@ -188,7 +188,7 @@ int nvgpu_lockless_allocator_init(struct gk20a *g, struct nvgpu_allocator *na, } err = nvgpu_alloc_common_init(na, g, name, a, false, &pool_ops); - if (err) { + if (err != 0) { goto fail; } diff --git a/drivers/gpu/nvgpu/common/mm/mm.c b/drivers/gpu/nvgpu/common/mm/mm.c index c9aac4aff..04fd6c43d 100644 --- a/drivers/gpu/nvgpu/common/mm/mm.c +++ b/drivers/gpu/nvgpu/common/mm/mm.c @@ -241,7 +241,7 @@ static int nvgpu_init_system_vm(struct mm_gk20a *mm) } err = g->ops.mm.alloc_inst_block(g, inst_block); - if (err) { + if (err != 0) { goto clean_up_vm; } g->ops.mm.init_inst_block(inst_block, mm->pmu.vm, big_page_size); @@ -260,7 +260,7 @@ static int nvgpu_init_hwpm(struct mm_gk20a *mm) struct nvgpu_mem *inst_block = &mm->hwpm.inst_block; err = g->ops.mm.alloc_inst_block(g, inst_block); - if (err) { + if (err != 0) { return err; } g->ops.mm.init_inst_block(inst_block, mm->pmu.vm, 0); @@ -307,14 +307,14 @@ static int nvgpu_init_mmu_debug(struct mm_gk20a *mm) if (!nvgpu_mem_is_valid(&mm->mmu_wr_mem)) { err = nvgpu_dma_alloc_sys(g, SZ_4K, &mm->mmu_wr_mem); - if (err) { + if (err != 0) { goto err; } } if (!nvgpu_mem_is_valid(&mm->mmu_rd_mem)) { err = nvgpu_dma_alloc_sys(g, SZ_4K, &mm->mmu_rd_mem); - if (err) { + if (err != 0) { goto err_free_wr_mem; } } @@ -394,7 +394,7 @@ static int nvgpu_init_bar1_vm(struct mm_gk20a *mm) } err = g->ops.mm.alloc_inst_block(g, inst_block); - if (err) { + if (err != 0) { goto clean_up_vm; } g->ops.mm.init_inst_block(inst_block, mm->bar1.vm, big_page_size); @@ -437,7 +437,7 @@ static int nvgpu_init_mm_setup_sw(struct gk20a *g) mm->vidmem.ce_ctx_id = (u32)~0; err = nvgpu_vidmem_init(mm); - if (err) { + if (err != 0) { return err; } @@ -449,51 +449,51 @@ static int nvgpu_init_mm_setup_sw(struct gk20a *g) if (g->acr.alloc_blob_space != NULL && !nvgpu_is_enabled(g, NVGPU_MM_UNIFIED_MEMORY)) { err = g->acr.alloc_blob_space(g, 0, &g->acr.ucode_blob); - if (err) { + if (err != 0) { return err; } } err = nvgpu_alloc_sysmem_flush(g); - if (err) { + if (err != 0) { return err; } err = nvgpu_init_bar1_vm(mm); - if (err) { + if (err != 0) { return err; } if (g->ops.mm.init_bar2_vm) { err = g->ops.mm.init_bar2_vm(g); - if (err) { + if (err != 0) { return err; } } err = nvgpu_init_system_vm(mm); - if (err) { + if (err != 0) { return err; } err = nvgpu_init_hwpm(mm); - if (err) { + if (err != 0) { return err; } if (g->has_cde) { err = nvgpu_init_cde_vm(mm); - if (err) { + if (err != 0) { return err; } } err = nvgpu_init_ce_vm(mm); - if (err) { + if (err != 0) { return err; } err = nvgpu_init_mmu_debug(mm); - if (err) + if (err != 0) return err; mm->remove_support = nvgpu_remove_mm_support; @@ -510,14 +510,14 @@ static int nvgpu_init_mm_pdb_cache_war(struct gk20a *g) if (g->ops.fifo.init_pdb_cache_war) { err = g->ops.fifo.init_pdb_cache_war(g); - if (err) { + if (err != 0) { return err; } } if (g->ops.fb.apply_pdb_cache_war) { err = g->ops.fb.apply_pdb_cache_war(g); - if (err) { + if (err != 0) { return err; } } @@ -527,20 +527,20 @@ static int nvgpu_init_mm_pdb_cache_war(struct gk20a *g) int nvgpu_init_mm_support(struct gk20a *g) { - u32 err; + int err; err = nvgpu_init_mm_reset_enable_hw(g); - if (err) { + if (err != 0) { return err; } err = nvgpu_init_mm_pdb_cache_war(g); - if (err) { + if (err != 0) { return err; } err = nvgpu_init_mm_setup_sw(g); - if (err) { + if (err != 0) { return err; } diff --git a/drivers/gpu/nvgpu/common/mm/nvgpu_allocator.c b/drivers/gpu/nvgpu/common/mm/nvgpu_allocator.c index c49dcfd28..e4b773400 100644 --- a/drivers/gpu/nvgpu/common/mm/nvgpu_allocator.c +++ b/drivers/gpu/nvgpu/common/mm/nvgpu_allocator.c @@ -164,7 +164,7 @@ int nvgpu_alloc_common_init(struct nvgpu_allocator *a, struct gk20a *g, } err = nvgpu_mutex_init(&a->lock); - if (err) { + if (err != 0) { return err; } diff --git a/drivers/gpu/nvgpu/common/mm/page_allocator.c b/drivers/gpu/nvgpu/common/mm/page_allocator.c index 951aefa8a..21feadec1 100644 --- a/drivers/gpu/nvgpu/common/mm/page_allocator.c +++ b/drivers/gpu/nvgpu/common/mm/page_allocator.c @@ -437,7 +437,7 @@ static struct nvgpu_page_alloc *nvgpu_alloc_slab( alloc->sgt.sgl = (struct nvgpu_sgl *)sgl; err = do_slab_alloc(a, slab, alloc); - if (err) { + if (err != 0) { goto fail; } @@ -1023,7 +1023,7 @@ int nvgpu_page_allocator_init(struct gk20a *g, struct nvgpu_allocator *na, } err = nvgpu_alloc_common_init(na, g, name, a, false, &page_ops); - if (err) { + if (err != 0) { goto fail; } @@ -1047,7 +1047,7 @@ int nvgpu_page_allocator_init(struct gk20a *g, struct nvgpu_allocator *na, if ((flags & GPU_ALLOC_4K_VIDMEM_PAGES) != 0ULL && blk_size > SZ_4K) { err = nvgpu_page_alloc_init_slabs(a); - if (err) { + if (err != 0) { goto fail; } } @@ -1057,7 +1057,7 @@ int nvgpu_page_allocator_init(struct gk20a *g, struct nvgpu_allocator *na, err = nvgpu_buddy_allocator_init(g, &a->source_allocator, NULL, buddy_name, base, length, blk_size, 0ULL, 0ULL); - if (err) { + if (err != 0) { goto fail; } diff --git a/drivers/gpu/nvgpu/common/mm/pd_cache.c b/drivers/gpu/nvgpu/common/mm/pd_cache.c index 4fba7d990..ed596b3f7 100644 --- a/drivers/gpu/nvgpu/common/mm/pd_cache.c +++ b/drivers/gpu/nvgpu/common/mm/pd_cache.c @@ -179,7 +179,7 @@ int nvgpu_pd_cache_alloc_direct(struct gk20a *g, } err = nvgpu_dma_alloc_flags(g, flags, bytes, pd->mem); - if (err) { + if (err != 0) { nvgpu_err(g, "OOM allocating page directory!"); nvgpu_kfree(g, pd->mem); return -ENOMEM; @@ -327,7 +327,7 @@ static int nvgpu_pd_cache_alloc(struct gk20a *g, struct nvgpu_pd_cache *cache, err = nvgpu_pd_cache_alloc_from_partial(g, cache, pentry, pd); } - if (err) { + if (err != 0) { nvgpu_err(g, "PD-Alloc [C] Failed!"); } @@ -350,7 +350,7 @@ int nvgpu_pd_alloc(struct vm_gk20a *vm, struct nvgpu_gmmu_pd *pd, u32 bytes) */ if (bytes >= PAGE_SIZE) { err = nvgpu_pd_cache_alloc_direct(g, pd, bytes); - if (err) { + if (err != 0) { return err; } diff --git a/drivers/gpu/nvgpu/common/mm/vidmem.c b/drivers/gpu/nvgpu/common/mm/vidmem.c index d7df2ef4f..8eefaf4ec 100644 --- a/drivers/gpu/nvgpu/common/mm/vidmem.c +++ b/drivers/gpu/nvgpu/common/mm/vidmem.c @@ -103,7 +103,7 @@ static int __nvgpu_vidmem_do_clear_all(struct gk20a *g) NVGPU_CE_MEMSET, 0, &gk20a_fence_out); - if (err) { + if (err != 0) { nvgpu_err(g, "Failed to clear vidmem : %d", err); return err; @@ -123,7 +123,7 @@ static int __nvgpu_vidmem_do_clear_all(struct gk20a *g) !nvgpu_timeout_expired(&timeout)); gk20a_fence_put(gk20a_fence_out); - if (err) { + if (err != 0) { nvgpu_err(g, "fence wait failed for CE execute ops"); return err; @@ -321,7 +321,7 @@ int nvgpu_vidmem_init(struct mm_gk20a *mm) base, size - base, default_page_size, GPU_ALLOC_4K_VIDMEM_PAGES); - if (err) { + if (err != 0) { nvgpu_err(g, "Failed to register vidmem for size %zu: %d", size, err); return err; @@ -336,7 +336,7 @@ int nvgpu_vidmem_init(struct mm_gk20a *mm) mm->vidmem.bootstrap_size = bootstrap_size; err = nvgpu_cond_init(&mm->vidmem.clearing_thread_cond); - if (err) + if (err != 0) goto fail; nvgpu_atomic64_set(&mm->vidmem.bytes_pending, 0); @@ -358,7 +358,7 @@ int nvgpu_vidmem_init(struct mm_gk20a *mm) err = nvgpu_thread_create(&mm->vidmem.clearing_thread, mm, nvgpu_vidmem_clear_pending_allocs_thr, "vidmem-clear"); - if (err) + if (err != 0) goto fail; vidmem_dbg(g, "VIDMEM Total: %zu MB", size >> 20); @@ -427,7 +427,7 @@ int nvgpu_vidmem_clear(struct gk20a *g, struct nvgpu_mem *mem) 0, &gk20a_fence_out); - if (err) { + if (err != 0) { nvgpu_err(g, "Failed gk20a_ce_execute_ops[%d]", err); return err; @@ -454,7 +454,7 @@ int nvgpu_vidmem_clear(struct gk20a *g, struct nvgpu_mem *mem) !nvgpu_timeout_expired(&timeout)); gk20a_fence_put(gk20a_last_fence); - if (err) + if (err != 0) nvgpu_err(g, "fence wait failed for CE execute ops"); } @@ -474,7 +474,7 @@ static int nvgpu_vidmem_clear_all(struct gk20a *g) nvgpu_mutex_acquire(&g->mm.vidmem.first_clear_mutex); if (!g->mm.vidmem.cleared) { err = __nvgpu_vidmem_do_clear_all(g); - if (err) { + if (err != 0) { nvgpu_mutex_release(&g->mm.vidmem.first_clear_mutex); nvgpu_err(g, "failed to clear whole vidmem"); return err; @@ -491,7 +491,7 @@ struct nvgpu_vidmem_buf *nvgpu_vidmem_user_alloc(struct gk20a *g, size_t bytes) int err; err = nvgpu_vidmem_clear_all(g); - if (err) + if (err != 0) return ERR_PTR(-ENOMEM); buf = nvgpu_kzalloc(g, sizeof(*buf)); @@ -506,7 +506,7 @@ struct nvgpu_vidmem_buf *nvgpu_vidmem_user_alloc(struct gk20a *g, size_t bytes) } err = nvgpu_dma_alloc_vid(g, bytes, buf->mem); - if (err) + if (err != 0) goto fail; /* diff --git a/drivers/gpu/nvgpu/common/mm/vm.c b/drivers/gpu/nvgpu/common/mm/vm.c index 517f27ae7..128f9ebdc 100644 --- a/drivers/gpu/nvgpu/common/mm/vm.c +++ b/drivers/gpu/nvgpu/common/mm/vm.c @@ -260,7 +260,7 @@ static int nvgpu_init_sema_pool(struct vm_gk20a *vm) } err = nvgpu_semaphore_pool_map(vm->sema_pool, vm); - if (err) { + if (err != 0) { nvgpu_semaphore_pool_unmap(vm->sema_pool, vm); nvgpu_free(vm->vma[GMMU_PAGE_SIZE_SMALL], vm->sema_pool->gpu_va); @@ -339,7 +339,7 @@ int __nvgpu_vm_init(struct mm_gk20a *mm, /* Initialize the page table data structures. */ strncpy(vm->name, name, min(strlen(name), sizeof(vm->name))); err = nvgpu_gmmu_init_page_table(vm); - if (err) { + if (err != 0) { goto clean_up_vgpu_vm; } @@ -427,7 +427,7 @@ int __nvgpu_vm_init(struct mm_gk20a *mm, SZ_4K, GPU_BALLOC_MAX_ORDER, GPU_ALLOC_GVA_SPACE); - if (err) { + if (err != 0) { goto clean_up_page_tables; } } else { @@ -453,7 +453,7 @@ int __nvgpu_vm_init(struct mm_gk20a *mm, vm->big_page_size, GPU_BALLOC_MAX_ORDER, GPU_ALLOC_GVA_SPACE); - if (err) { + if (err != 0) { goto clean_up_allocators; } } @@ -469,7 +469,7 @@ int __nvgpu_vm_init(struct mm_gk20a *mm, SZ_4K, GPU_BALLOC_MAX_ORDER, kernel_vma_flags); - if (err) { + if (err != 0) { goto clean_up_allocators; } @@ -498,7 +498,7 @@ int __nvgpu_vm_init(struct mm_gk20a *mm, */ if (vm->va_limit > 4ULL * SZ_1G) { err = nvgpu_init_sema_pool(vm); - if (err) { + if (err != 0) { goto clean_up_gmmu_lock; } } @@ -917,7 +917,7 @@ struct nvgpu_mapped_buf *nvgpu_vm_map(struct vm_gk20a *vm, map_size, binfo.pgsz_idx, &vm_area); - if (err) { + if (err != 0) { goto clean_up; } @@ -925,7 +925,7 @@ struct nvgpu_mapped_buf *nvgpu_vm_map(struct vm_gk20a *vm, } err = nvgpu_vm_compute_compression(vm, &binfo); - if (err) { + if (err != 0) { nvgpu_err(g, "failure setting up compression"); goto clean_up; } @@ -962,7 +962,7 @@ struct nvgpu_mapped_buf *nvgpu_vm_map(struct vm_gk20a *vm, err = gk20a_alloc_or_get_comptags(g, os_buf, &g->gr.comp_tags, &comptags); - if (err) { + if (err != 0) { /* * This is an irrecoverable failure and we need to * abort. In particular, it is not safe to proceed with @@ -987,7 +987,7 @@ struct nvgpu_mapped_buf *nvgpu_vm_map(struct vm_gk20a *vm, comptags.lines - 1U)); gk20a_comptags_finish_clear( os_buf, err == 0); - if (err) { + if (err != 0) { goto clean_up; } } @@ -1073,7 +1073,7 @@ struct nvgpu_mapped_buf *nvgpu_vm_map(struct vm_gk20a *vm, mapped_buffer->vm_area = vm_area; err = nvgpu_insert_mapped_buf(vm, mapped_buffer); - if (err) { + if (err != 0) { nvgpu_err(g, "failed to insert into mapped buffer tree"); goto clean_up; } diff --git a/drivers/gpu/nvgpu/common/perf/cyclestats_snapshot.c b/drivers/gpu/nvgpu/common/perf/cyclestats_snapshot.c index eccc2f7f6..5ec18311a 100644 --- a/drivers/gpu/nvgpu/common/perf/cyclestats_snapshot.c +++ b/drivers/gpu/nvgpu/common/perf/cyclestats_snapshot.c @@ -118,7 +118,7 @@ int nvgpu_css_enable_snapshot(struct channel_gk20a *ch, ret = nvgpu_dma_alloc_map_sys(g->mm.pmu.vm, snapshot_size, &data->hw_memdesc); - if (ret) + if (ret != 0) return ret; /* perf output buffer may not cross a 4GB boundary - with a separate */ @@ -233,7 +233,7 @@ static int css_gr_flush_snapshots(struct channel_gk20a *ch) /* check data available */ err = g->ops.css.check_data_available(ch, &pending, &hw_overflow); - if (err) + if (err != 0) return err; if (!pending) @@ -460,17 +460,17 @@ int gr_gk20a_css_attach(struct channel_gk20a *ch, nvgpu_mutex_acquire(&gr->cs_lock); ret = css_gr_create_shared_data(gr); - if (ret) + if (ret != 0) goto failed; ret = css_gr_create_client_data(g, gr->cs_data, perfmon_count, cs_client); - if (ret) + if (ret != 0) goto failed; ret = g->ops.css.enable_snapshot(ch, cs_client); - if (ret) + if (ret != 0) goto failed; if (perfmon_start) diff --git a/drivers/gpu/nvgpu/common/pmu/pmu.c b/drivers/gpu/nvgpu/common/pmu/pmu.c index aa55fb6a0..6840de1a8 100644 --- a/drivers/gpu/nvgpu/common/pmu/pmu.c +++ b/drivers/gpu/nvgpu/common/pmu/pmu.c @@ -85,12 +85,12 @@ static int pmu_enable(struct nvgpu_pmu *pmu, bool enable) } } else { err = pmu_enable_hw(pmu, true); - if (err) { + if (err != 0) { goto exit; } err = nvgpu_flcn_wait_idle(pmu->flcn); - if (err) { + if (err != 0) { goto exit; } @@ -110,12 +110,12 @@ int nvgpu_pmu_reset(struct gk20a *g) nvgpu_log_fn(g, " %s ", g->name); err = nvgpu_flcn_wait_idle(pmu->flcn); - if (err) { + if (err != 0) { goto exit; } err = pmu_enable(pmu, false); - if (err) { + if (err != 0) { goto exit; } @@ -141,7 +141,7 @@ static int nvgpu_init_task_pg_init(struct gk20a *g) err = nvgpu_thread_create(&pmu->pg_init.state_task, g, nvgpu_pg_init_task, thread_name); - if (err) { + if (err != 0) { nvgpu_err(g, "failed to start nvgpu_pg_init thread"); } @@ -234,7 +234,7 @@ static int nvgpu_init_pmu_setup_sw(struct gk20a *g) err = nvgpu_dma_alloc_map_sys(vm, GK20A_PMU_SEQ_BUF_SIZE, &pmu->seq_buf); - if (err) { + if (err != 0) { nvgpu_err(g, "failed to allocate memory"); goto err_free_seq; } @@ -254,14 +254,14 @@ static int nvgpu_init_pmu_setup_sw(struct gk20a *g) err = g->ops.pmu.alloc_super_surface(g, &pmu->super_surface_buf, sizeof(struct nv_pmu_super_surface)); - if (err) { + if (err != 0) { goto err_free_seq_buf; } } err = nvgpu_dma_alloc_map(vm, GK20A_PMU_TRACE_BUFSIZE, &pmu->trace_buf); - if (err) { + if (err != 0) { nvgpu_err(g, "failed to allocate pmu trace buffer\n"); goto err_free_super_surface; } @@ -613,7 +613,7 @@ int nvgpu_pmu_vidmem_surface_alloc(struct gk20a *g, struct nvgpu_mem *mem, int err; err = nvgpu_dma_alloc_map_vid(vm, size, mem); - if (err) { + if (err != 0) { nvgpu_err(g, "memory allocation failed"); return -ENOMEM; } @@ -629,7 +629,7 @@ int nvgpu_pmu_sysmem_surface_alloc(struct gk20a *g, struct nvgpu_mem *mem, int err; err = nvgpu_dma_alloc_map_sys(vm, size, mem); - if (err) { + if (err != 0) { nvgpu_err(g, "failed to allocate memory\n"); return -ENOMEM; } @@ -646,7 +646,7 @@ int nvgpu_pmu_super_surface_alloc(struct gk20a *g, nvgpu_log_fn(g, " "); err = nvgpu_dma_alloc_map(vm, size, mem_surface); - if (err) { + if (err != 0) { nvgpu_err(g, "failed to allocate pmu suffer surface\n"); err = -ENOMEM; } diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_fw.c b/drivers/gpu/nvgpu/common/pmu/pmu_fw.c index 56e79f5a4..3441cdbe1 100644 --- a/drivers/gpu/nvgpu/common/pmu/pmu_fw.c +++ b/drivers/gpu/nvgpu/common/pmu/pmu_fw.c @@ -1666,34 +1666,34 @@ int nvgpu_init_pmu_fw_support(struct nvgpu_pmu *pmu) nvgpu_log_fn(g, " "); err = nvgpu_mutex_init(&pmu->elpg_mutex); - if (err) { + if (err != 0) { return err; } err = nvgpu_mutex_init(&pmu->pg_mutex); - if (err) { + if (err != 0) { goto fail_elpg; } err = nvgpu_mutex_init(&pmu->isr_mutex); - if (err) { + if (err != 0) { goto fail_pg; } err = nvgpu_mutex_init(&pmu->pmu_copy_lock); - if (err) { + if (err != 0) { goto fail_isr; } err = nvgpu_mutex_init(&pmu->pmu_seq_lock); - if (err) { + if (err != 0) { goto fail_pmu_copy; } pmu->remove_support = nvgpu_remove_pmu_support; err = nvgpu_init_pmu_fw_ver_ops(pmu); - if (err) { + if (err != 0) { goto fail_pmu_seq; } @@ -1740,7 +1740,7 @@ int nvgpu_pmu_prepare_ns_ucode_blob(struct gk20a *g) err = nvgpu_dma_alloc_map_sys(vm, GK20A_PMU_UCODE_SIZE_MAX, &pmu->ucode); - if (err) { + if (err != 0) { goto err_release_fw; } diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c b/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c index 16f9ba57f..55a7a6351 100644 --- a/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c +++ b/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c @@ -250,7 +250,7 @@ static int pmu_write_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd, } } while (1); - if (err) { + if (err != 0) { nvgpu_err(g, "fail to write cmd to queue %d", queue_id); } else { nvgpu_log_fn(g, "done"); @@ -293,7 +293,7 @@ static int pmu_cmd_payload_extract_rpc(struct gk20a *g, struct pmu_cmd *cmd, dmem_alloc_offset); clean_up: - if (err) { + if (err != 0) { nvgpu_log_fn(g, "fail"); } else { nvgpu_log_fn(g, "done"); @@ -411,7 +411,7 @@ static int pmu_cmd_payload_extract(struct gk20a *g, struct pmu_cmd *cmd, } clean_up: - if (err) { + if (err != 0) { nvgpu_log_fn(g, "fail"); if (in) { nvgpu_free(&pmu->dmem, @@ -457,7 +457,7 @@ int nvgpu_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd, } err = pmu_seq_acquire(pmu, &seq); - if (err) { + if (err != 0) { return err; } @@ -481,14 +481,14 @@ int nvgpu_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd, err = pmu_cmd_payload_extract(g, cmd, payload, seq); } - if (err) { + if (err != 0) { goto clean_up; } seq->state = PMU_SEQ_STATE_USED; err = pmu_write_cmd(pmu, cmd, queue_id, timeout); - if (err) { + if (err != 0) { seq->state = PMU_SEQ_STATE_PENDING; } @@ -938,7 +938,7 @@ int nvgpu_pmu_rpc_execute(struct nvgpu_pmu *pmu, struct nv_pmu_rpc_header *rpc, status = nvgpu_pmu_cmd_post(g, &cmd, NULL, &payload, PMU_COMMAND_QUEUE_LPQ, callback, rpc_payload, &seq, ~0); - if (status) { + if (status != 0) { nvgpu_err(g, "Failed to execute RPC status=0x%x, func=0x%x", status, rpc->function); goto exit; @@ -960,7 +960,7 @@ int nvgpu_pmu_rpc_execute(struct nvgpu_pmu *pmu, struct nv_pmu_rpc_header *rpc, } exit: - if (status) { + if (status != 0) { if (rpc_payload) { nvgpu_kfree(g, rpc_payload); } diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_perfmon.c b/drivers/gpu/nvgpu/common/pmu/pmu_perfmon.c index 57a4ea400..b1bfd3293 100644 --- a/drivers/gpu/nvgpu/common/pmu/pmu_perfmon.c +++ b/drivers/gpu/nvgpu/common/pmu/pmu_perfmon.c @@ -339,7 +339,7 @@ int nvgpu_pmu_init_perfmon_rpc(struct nvgpu_pmu *pmu) nvgpu_pmu_dbg(g, "RPC post NV_PMU_RPC_ID_PERFMON_INIT"); PMU_RPC_EXECUTE(status, pmu, PERFMON_T18X, INIT, &rpc, 0); - if (status) { + if (status != 0) { nvgpu_err(g, "Failed to execute RPC, status=0x%x", status); goto exit; } @@ -372,7 +372,7 @@ int nvgpu_pmu_perfmon_start_sampling_rpc(struct nvgpu_pmu *pmu) nvgpu_pmu_dbg(g, "RPC post NV_PMU_RPC_ID_PERFMON_START\n"); PMU_RPC_EXECUTE(status, pmu, PERFMON_T18X, START, &rpc, 0); - if (status) { + if (status != 0) { nvgpu_err(g, "Failed to execute RPC, status=0x%x", status); } @@ -395,7 +395,7 @@ int nvgpu_pmu_perfmon_stop_sampling_rpc(struct nvgpu_pmu *pmu) /* PERFMON Stop */ nvgpu_pmu_dbg(g, "RPC post NV_PMU_RPC_ID_PERFMON_STOP\n"); PMU_RPC_EXECUTE(status, pmu, PERFMON_T18X, STOP, &rpc, 0); - if (status) { + if (status != 0) { nvgpu_err(g, "Failed to execute RPC, status=0x%x", status); } @@ -418,7 +418,7 @@ int nvgpu_pmu_perfmon_get_samples_rpc(struct nvgpu_pmu *pmu) /* PERFMON QUERY */ nvgpu_pmu_dbg(g, "RPC post NV_PMU_RPC_ID_PERFMON_QUERY\n"); PMU_RPC_EXECUTE(status, pmu, PERFMON_T18X, QUERY, &rpc, 0); - if (status) { + if (status != 0) { nvgpu_err(g, "Failed to execute RPC, status=0x%x", status); } diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_pg.c b/drivers/gpu/nvgpu/common/pmu/pmu_pg.c index 3d46298dc..a5767d168 100644 --- a/drivers/gpu/nvgpu/common/pmu/pmu_pg.c +++ b/drivers/gpu/nvgpu/common/pmu/pmu_pg.c @@ -410,7 +410,7 @@ static int pmu_pg_init_send(struct gk20a *g, u32 pg_engine_id) nvgpu_pmu_dbg(g, "cmd post PMU_PG_ELPG_CMD_INIT"); err = nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, pmu_handle_pg_elpg_msg, pmu, &seq, ~0); - if (err) { + if (err != 0) { nvgpu_err(g, "PMU_PG_ELPG_CMD_INIT cmd failed\n"); } @@ -427,7 +427,7 @@ static int pmu_pg_init_send(struct gk20a *g, u32 pg_engine_id) nvgpu_pmu_dbg(g, "cmd post PMU_PG_STAT_CMD_ALLOC_DMEM"); err = nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_LPQ, pmu_handle_pg_stat_msg, pmu, &seq, ~0); - if (err) { + if (err != 0) { nvgpu_err(g, "PMU_PG_STAT_CMD_ALLOC_DMEM cmd failed\n"); } @@ -450,7 +450,7 @@ static int pmu_pg_init_send(struct gk20a *g, u32 pg_engine_id) nvgpu_pmu_dbg(g, "cmd post PMU_PG_ELPG_CMD_DISALLOW"); err = nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, pmu_handle_pg_elpg_msg, pmu, &seq, ~0); - if (err) { + if (err != 0) { nvgpu_err(g, "PMU_PG_ELPG_CMD_DISALLOW cmd failed\n"); } @@ -558,7 +558,7 @@ int nvgpu_pmu_init_bind_fecs(struct gk20a *g) nvgpu_pmu_state_change(g, PMU_STATE_LOADING_PG_BUF, false); err = nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_LPQ, pmu_handle_pg_buf_config_msg, pmu, &desc, ~0); - if (err) { + if (err != 0) { nvgpu_err(g, "cmd LOAD PMU_PGENG_GR_BUFFER_IDX_FECS failed\n"); } @@ -599,7 +599,7 @@ void nvgpu_pmu_setup_hw_load_zbc(struct gk20a *g) nvgpu_pmu_state_change(g, PMU_STATE_LOADING_ZBC, false); err = nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_LPQ, pmu_handle_pg_buf_config_msg, pmu, &desc, ~0); - if (err) { + if (err != 0) { nvgpu_err(g, "CMD LOAD PMU_PGENG_GR_BUFFER_IDX_ZBC failed\n"); } } @@ -659,7 +659,7 @@ int nvgpu_pmu_ap_send_command(struct gk20a *g, { struct nvgpu_pmu *pmu = &g->pmu; /* FIXME: where is the PG structure defined?? */ - u32 status = 0; + int status = 0; struct pmu_cmd cmd; u32 seq; pmu_callback p_callback = NULL; @@ -722,7 +722,7 @@ int nvgpu_pmu_ap_send_command(struct gk20a *g, status = nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, p_callback, pmu, &seq, ~0); - if (status) { + if (status != 0) { nvgpu_pmu_dbg(g, "%s: Unable to submit Adaptive Power Command %d\n", __func__, p_ap_cmd->cmn.cmd_id); diff --git a/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c b/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c index 6df8f6e43..a80962802 100644 --- a/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c @@ -357,7 +357,7 @@ int gk20a_init_ce_support(struct gk20a *g) nvgpu_log(g, gpu_dbg_fn, "ce: init"); err = nvgpu_mutex_init(&ce_app->app_mutex); - if (err) { + if (err != 0) { return err; } @@ -438,7 +438,7 @@ u32 gk20a_ce_create_context(struct gk20a *g, } err = nvgpu_mutex_init(&ce_ctx->gpu_ctx_mutex); - if (err) { + if (err != 0) { nvgpu_kfree(g, ce_ctx); return ctx_id; } @@ -469,13 +469,13 @@ u32 gk20a_ce_create_context(struct gk20a *g, /* bind the channel to the vm */ err = g->ops.mm.vm_bind_channel(g->mm.ce.vm, ce_ctx->ch); - if (err) { + if (err != 0) { nvgpu_err(g, "ce: could not bind vm"); goto end; } err = gk20a_tsg_bind_channel(ce_ctx->tsg, ce_ctx->ch); - if (err) { + if (err != 0) { nvgpu_err(g, "ce: unable to bind to tsg"); goto end; } @@ -485,7 +485,7 @@ u32 gk20a_ce_create_context(struct gk20a *g, gpfifo_args.flags = 0; /* allocate gpfifo (1024 should be more than enough) */ err = gk20a_channel_alloc_gpfifo(ce_ctx->ch, &gpfifo_args); - if (err) { + if (err != 0) { nvgpu_err(g, "ce: unable to allocate gpfifo"); goto end; } @@ -495,7 +495,7 @@ u32 gk20a_ce_create_context(struct gk20a *g, NVGPU_CE_MAX_INFLIGHT_JOBS * NVGPU_CE_MAX_COMMAND_BUFF_BYTES_PER_KICKOFF, &ce_ctx->cmd_buf_mem); - if (err) { + if (err != 0) { nvgpu_err(g, "ce: could not allocate command buffer for CE context"); goto end; @@ -506,7 +506,7 @@ u32 gk20a_ce_create_context(struct gk20a *g, /* -1 means default channel timeslice value */ if (timeslice != -1) { err = gk20a_fifo_tsg_set_timeslice(ce_ctx->tsg, timeslice); - if (err) { + if (err != 0) { nvgpu_err(g, "ce: could not set the channel timeslice value for CE context"); goto end; @@ -517,7 +517,7 @@ u32 gk20a_ce_create_context(struct gk20a *g, if (runlist_level != -1) { err = gk20a_tsg_set_runlist_interleave(ce_ctx->tsg, runlist_level); - if (err) { + if (err != 0) { nvgpu_err(g, "ce: could not set the runlist interleave for CE context"); goto end; diff --git a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c index 8ca9cbdf7..3e09df61b 100644 --- a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c @@ -207,7 +207,7 @@ int dbg_set_powergate(struct dbg_session_gk20a *dbg_s, bool disable_powergate) nvgpu_log(g, gpu_dbg_gpu_dbg | gpu_dbg_fn, "module busy"); err = gk20a_busy(g); - if (err) { + if (err != 0) { return err; } diff --git a/drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c b/drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c index b30d17433..ed6b3b2af 100644 --- a/drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c @@ -418,10 +418,10 @@ int gk20a_fecs_trace_init(struct gk20a *g) g->fecs_trace = trace; err = nvgpu_mutex_init(&trace->poll_lock); - if (err) + if (err != 0) goto clean; err = nvgpu_mutex_init(&trace->hash_lock); - if (err) + if (err != 0) goto clean_poll_lock; BUG_ON(!is_power_of_2(GK20A_FECS_TRACE_NUM_RECORDS)); @@ -609,7 +609,7 @@ int gk20a_fecs_trace_enable(struct gk20a *g) err = nvgpu_thread_create(&trace->poll_task, g, gk20a_fecs_trace_periodic_polling, __func__); - if (err) { + if (err != 0) { nvgpu_warn(g, "failed to create FECS polling task"); return err; diff --git a/drivers/gpu/nvgpu/gk20a/fence_gk20a.c b/drivers/gpu/nvgpu/gk20a/fence_gk20a.c index af4213043..80611cd04 100644 --- a/drivers/gpu/nvgpu/gk20a/fence_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/fence_gk20a.c @@ -136,7 +136,7 @@ int gk20a_alloc_fence_pool(struct channel_gk20a *c, unsigned int count) err = nvgpu_lockless_allocator_init(c->g, &c->fence_allocator, "fence_pool", (size_t)fence_pool, size, sizeof(struct gk20a_fence), 0); - if (err) { + if (err != 0) { goto fail; } diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c index 918f29a4d..c65874ec1 100644 --- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c @@ -738,7 +738,7 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f) err = nvgpu_dma_alloc_flags_sys(g, flags, runlist_size, &runlist->mem[i]); - if (err) { + if (err != 0) { nvgpu_err(g, "memory allocation failed"); goto clean_up_runlist; } @@ -908,13 +908,13 @@ int gk20a_init_fifo_setup_sw_common(struct gk20a *g) f->g = g; err = nvgpu_mutex_init(&f->intr.isr.mutex); - if (err) { + if (err != 0) { nvgpu_err(g, "failed to init isr.mutex"); return err; } err = nvgpu_mutex_init(&f->gr_reset_mutex); - if (err) { + if (err != 0) { nvgpu_err(g, "failed to init gr_reset_mutex"); return err; } @@ -953,7 +953,7 @@ int gk20a_init_fifo_setup_sw_common(struct gk20a *g) g->ops.fifo.init_engine_info(f); err = init_runlist(g, f); - if (err) { + if (err != 0) { nvgpu_err(g, "failed to init runlist"); goto clean_up; } @@ -961,7 +961,7 @@ int gk20a_init_fifo_setup_sw_common(struct gk20a *g) nvgpu_init_list_node(&f->free_chs); err = nvgpu_mutex_init(&f->free_chs_mutex); - if (err) { + if (err != 0) { nvgpu_err(g, "failed to init free_chs_mutex"); goto clean_up; } @@ -972,7 +972,7 @@ int gk20a_init_fifo_setup_sw_common(struct gk20a *g) } err = nvgpu_mutex_init(&f->tsg_inuse_mutex); - if (err) { + if (err != 0) { nvgpu_err(g, "failed to init tsg_inuse_mutex"); goto clean_up; } @@ -982,7 +982,7 @@ int gk20a_init_fifo_setup_sw_common(struct gk20a *g) f->deferred_reset_pending = false; err = nvgpu_mutex_init(&f->deferred_reset_mutex); - if (err) { + if (err != 0) { nvgpu_err(g, "failed to init deferred_reset_mutex"); goto clean_up; } @@ -1022,7 +1022,7 @@ int gk20a_init_fifo_setup_sw(struct gk20a *g) } err = gk20a_init_fifo_setup_sw_common(g); - if (err) { + if (err != 0) { nvgpu_err(g, "fail: err: %d", err); return err; } @@ -1035,7 +1035,7 @@ int gk20a_init_fifo_setup_sw(struct gk20a *g) err = nvgpu_dma_alloc_sys(g, f->userd_entry_size * f->num_channels, &f->userd); } - if (err) { + if (err != 0) { nvgpu_err(g, "userd memory allocation failed"); goto clean_up; } @@ -1050,7 +1050,7 @@ int gk20a_init_fifo_setup_sw(struct gk20a *g) } err = nvgpu_channel_worker_init(g); - if (err) { + if (err != 0) { goto clean_up; } @@ -1144,17 +1144,17 @@ int gk20a_init_fifo_setup_hw(struct gk20a *g) int gk20a_init_fifo_support(struct gk20a *g) { - u32 err; + int err; err = g->ops.fifo.setup_sw(g); - if (err) { + if (err != 0) { return err; } if (g->ops.fifo.init_fifo_setup_hw) { err = g->ops.fifo.init_fifo_setup_hw(g); } - if (err) { + if (err != 0) { return err; } @@ -2198,20 +2198,20 @@ int gk20a_fifo_tsg_unbind_channel(struct channel_gk20a *ch) g->ops.fifo.disable_tsg(tsg); err = g->ops.fifo.preempt_tsg(g, tsg->tsgid); - if (err) { + if (err != 0) { goto fail_enable_tsg; } if (g->ops.fifo.tsg_verify_channel_status && !tsg_timedout) { err = g->ops.fifo.tsg_verify_channel_status(ch); - if (err) { + if (err != 0) { goto fail_enable_tsg; } } /* Channel should be seen as TSG channel while updating runlist */ err = channel_gk20a_update_runlist(ch, false); - if (err) { + if (err != 0) { goto fail_enable_tsg; } @@ -2893,7 +2893,7 @@ int gk20a_fifo_is_preempt_pending(struct gk20a *g, u32 id, delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX); } while (!nvgpu_timeout_expired(&timeout)); - if (ret) { + if (ret != 0) { nvgpu_err(g, "preempt timeout: id: %u id_type: %d ", id, id_type); } @@ -2960,7 +2960,7 @@ int __locked_fifo_preempt(struct gk20a *g, u32 id, bool is_tsg) int gk20a_fifo_preempt_channel(struct gk20a *g, u32 chid) { struct fifo_gk20a *f = &g->fifo; - u32 ret = 0; + int ret = 0; u32 token = PMU_INVALID_MUTEX_OWNER_ID; u32 mutex_ret = 0; u32 i; @@ -2987,7 +2987,7 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, u32 chid) nvgpu_mutex_release(&f->runlist_info[i].runlist_lock); } - if (ret) { + if (ret != 0) { if (nvgpu_platform_is_silicon(g)) { nvgpu_err(g, "preempt timed out for chid: %u, " "ctxsw timeout will trigger recovery if needed", chid); @@ -3004,7 +3004,7 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, u32 chid) int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid) { struct fifo_gk20a *f = &g->fifo; - u32 ret = 0; + int ret = 0; u32 token = PMU_INVALID_MUTEX_OWNER_ID; u32 mutex_ret = 0; u32 i; @@ -3031,7 +3031,7 @@ int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid) nvgpu_mutex_release(&f->runlist_info[i].runlist_lock); } - if (ret) { + if (ret != 0) { if (nvgpu_platform_is_silicon(g)) { nvgpu_err(g, "preempt timed out for tsgid: %u, " "ctxsw timeout will trigger recovery if needed", tsgid); @@ -3123,7 +3123,7 @@ int gk20a_fifo_enable_all_engine_activity(struct gk20a *g) u32 active_engine_id = g->fifo.active_engines_list[i]; err = gk20a_fifo_enable_engine_activity(g, &g->fifo.engine_info[active_engine_id]); - if (err) { + if (err != 0) { nvgpu_err(g, "failed to enable engine %d activity", active_engine_id); ret = err; @@ -3142,7 +3142,7 @@ int gk20a_fifo_disable_engine_activity(struct gk20a *g, u32 engine_chid = FIFO_INVAL_CHANNEL_ID; u32 token = PMU_INVALID_MUTEX_OWNER_ID; u32 mutex_ret; - u32 err = 0; + int err = 0; nvgpu_log_fn(g, " "); @@ -3171,7 +3171,7 @@ int gk20a_fifo_disable_engine_activity(struct gk20a *g, if (pbdma_chid != FIFO_INVAL_CHANNEL_ID) { err = g->ops.fifo.preempt_channel(g, pbdma_chid); - if (err) { + if (err != 0) { goto clean_up; } } @@ -3189,7 +3189,7 @@ int gk20a_fifo_disable_engine_activity(struct gk20a *g, if (engine_chid != FIFO_INVAL_ENGINE_ID && engine_chid != pbdma_chid) { err = g->ops.fifo.preempt_channel(g, engine_chid); - if (err) { + if (err != 0) { goto clean_up; } } @@ -3199,7 +3199,7 @@ clean_up: nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); } - if (err) { + if (err != 0) { nvgpu_log_fn(g, "failed"); if (gk20a_fifo_enable_engine_activity(g, eng_info)) { nvgpu_err(g, @@ -3223,7 +3223,7 @@ int gk20a_fifo_disable_all_engine_activity(struct gk20a *g, err = gk20a_fifo_disable_engine_activity(g, &g->fifo.engine_info[active_engine_id], wait_for_idle); - if (err) { + if (err != 0) { nvgpu_err(g, "failed to disable engine %d activity", active_engine_id); ret = err; @@ -3231,12 +3231,12 @@ int gk20a_fifo_disable_all_engine_activity(struct gk20a *g, } } - if (err) { + if (err != 0) { while (i-- != 0) { active_engine_id = g->fifo.active_engines_list[i]; err = gk20a_fifo_enable_engine_activity(g, &g->fifo.engine_info[active_engine_id]); - if (err) { + if (err != 0) { nvgpu_err(g, "failed to re-enable engine %d activity", active_engine_id); @@ -3291,7 +3291,7 @@ int gk20a_fifo_runlist_wait_pending(struct gk20a *g, u32 runlist_id) delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX); } while (!nvgpu_timeout_expired(&timeout)); - if (ret) { + if (ret != 0) { nvgpu_err(g, "runlist wait timeout: runlist id: %u", runlist_id); } @@ -3847,7 +3847,7 @@ int gk20a_fifo_wait_engine_idle(struct gk20a *g) delay << 1, GR_IDLE_CHECK_MAX); } while (!nvgpu_timeout_expired(&timeout)); - if (ret) { + if (ret != 0) { nvgpu_log_info(g, "cannot idle engine %u", i); break; } @@ -4315,7 +4315,7 @@ int gk20a_fifo_alloc_inst(struct gk20a *g, struct channel_gk20a *ch) nvgpu_log_fn(g, " "); err = g->ops.mm.alloc_inst_block(g, &ch->inst_block); - if (err) { + if (err != 0) { return err; } diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.c b/drivers/gpu/nvgpu/gk20a/gk20a.c index 7855493d0..477c0325f 100644 --- a/drivers/gpu/nvgpu/gk20a/gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/gk20a.c @@ -83,7 +83,7 @@ int gk20a_prepare_poweroff(struct gk20a *g) if (g->ops.fifo.channel_suspend) { ret = g->ops.fifo.channel_suspend(g); - if (ret) { + if (ret != 0) { return ret; } } @@ -148,7 +148,7 @@ int gk20a_finalize_poweron(struct gk20a *g) * buffers. */ err = nvgpu_pd_cache_init(g); - if (err) { + if (err != 0) { return err; } @@ -182,7 +182,7 @@ int gk20a_finalize_poweron(struct gk20a *g) if (g->ops.bios.init) { err = g->ops.bios.init(g); } - if (err) { + if (err != 0) { goto done; } @@ -202,7 +202,7 @@ int gk20a_finalize_poweron(struct gk20a *g) saving features (blcg/slcg) are enabled. For now, do it here. */ if (g->ops.clk.init_clk_support) { err = g->ops.clk.init_clk_support(g); - if (err) { + if (err != 0) { nvgpu_err(g, "failed to init gk20a clk"); goto done; } @@ -210,7 +210,7 @@ int gk20a_finalize_poweron(struct gk20a *g) if (nvgpu_is_enabled(g, NVGPU_SUPPORT_NVLINK)) { err = g->ops.nvlink.init(g); - if (err) { + if (err != 0) { nvgpu_err(g, "failed to init nvlink"); goto done; } @@ -218,7 +218,7 @@ int gk20a_finalize_poweron(struct gk20a *g) if (g->ops.fb.init_fbpa) { err = g->ops.fb.init_fbpa(g); - if (err) { + if (err != 0) { nvgpu_err(g, "failed to init fbpa"); goto done; } @@ -226,7 +226,7 @@ int gk20a_finalize_poweron(struct gk20a *g) if (g->ops.fb.mem_unlock) { err = g->ops.fb.mem_unlock(g); - if (err) { + if (err != 0) { nvgpu_err(g, "failed to unlock memory"); goto done; } @@ -234,25 +234,25 @@ int gk20a_finalize_poweron(struct gk20a *g) err = g->ops.fifo.reset_enable_hw(g); - if (err) { + if (err != 0) { nvgpu_err(g, "failed to reset gk20a fifo"); goto done; } err = nvgpu_init_ltc_support(g); - if (err) { + if (err != 0) { nvgpu_err(g, "failed to init ltc"); goto done; } err = nvgpu_init_mm_support(g); - if (err) { + if (err != 0) { nvgpu_err(g, "failed to init gk20a mm"); goto done; } err = gk20a_init_fifo_support(g); - if (err) { + if (err != 0) { nvgpu_err(g, "failed to init gk20a fifo"); goto done; } @@ -282,7 +282,7 @@ int gk20a_finalize_poweron(struct gk20a *g) } err = gk20a_enable_gr_hw(g); - if (err) { + if (err != 0) { nvgpu_err(g, "failed to enable gr"); nvgpu_mutex_release(&g->tpc_pg_lock); goto done; @@ -292,7 +292,7 @@ int gk20a_finalize_poweron(struct gk20a *g) if (g->ops.pmu.prepare_ucode) { err = g->ops.pmu.prepare_ucode(g); } - if (err) { + if (err != 0) { nvgpu_err(g, "failed to init pmu ucode"); nvgpu_mutex_release(&g->tpc_pg_lock); goto done; @@ -301,7 +301,7 @@ int gk20a_finalize_poweron(struct gk20a *g) if (nvgpu_is_enabled(g, NVGPU_PMU_PSTATE)) { err = gk20a_init_pstate_support(g); - if (err) { + if (err != 0) { nvgpu_err(g, "failed to init pstates"); nvgpu_mutex_release(&g->tpc_pg_lock); goto done; @@ -327,7 +327,7 @@ int gk20a_finalize_poweron(struct gk20a *g) if (g->ops.pmu.is_pmu_supported(g)) { err = nvgpu_init_pmu_support(g); - if (err) { + if (err != 0) { nvgpu_err(g, "failed to init gk20a pmu"); nvgpu_mutex_release(&g->tpc_pg_lock); goto done; @@ -335,7 +335,7 @@ int gk20a_finalize_poweron(struct gk20a *g) } err = gk20a_init_gr_support(g); - if (err) { + if (err != 0) { nvgpu_err(g, "failed to init gk20a gr"); nvgpu_mutex_release(&g->tpc_pg_lock); goto done; @@ -345,7 +345,7 @@ int gk20a_finalize_poweron(struct gk20a *g) if (nvgpu_is_enabled(g, NVGPU_PMU_PSTATE)) { err = gk20a_init_pstate_pmu_support(g); - if (err) { + if (err != 0) { nvgpu_err(g, "failed to init pstates"); goto done; } @@ -355,27 +355,27 @@ int gk20a_finalize_poweron(struct gk20a *g) g->ops.pmu_ver.clk.clk_set_boot_clk(g); } else { err = nvgpu_clk_arb_init_arbiter(g); - if (err) { + if (err != 0) { nvgpu_err(g, "failed to init clk arb"); goto done; } } err = nvgpu_init_therm_support(g); - if (err) { + if (err != 0) { nvgpu_err(g, "failed to init gk20a therm"); goto done; } err = g->ops.chip_init_gpu_characteristics(g); - if (err) { + if (err != 0) { nvgpu_err(g, "failed to init gk20a gpu characteristics"); goto done; } #ifdef CONFIG_GK20A_CTXSW_TRACE err = gk20a_ctxsw_trace_init(g); - if (err) + if (err != 0) nvgpu_warn(g, "could not initialize ctxsw tracing"); #endif @@ -396,7 +396,7 @@ int gk20a_finalize_poweron(struct gk20a *g) /* Set to max speed */ speed = 1 << (fls(speed) - 1); err = g->ops.xve.set_speed(g, speed); - if (err) { + if (err != 0) { nvgpu_err(g, "Failed to set PCIe bus speed!"); goto done; } @@ -417,7 +417,7 @@ int gk20a_finalize_poweron(struct gk20a *g) } done: - if (err) { + if (err != 0) { g->power_on = false; } diff --git a/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a.c index 8b9ac3264..17529fb42 100644 --- a/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a.c @@ -132,7 +132,7 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr) nvgpu_log_info(g, "NETLIST_REGIONID_FECS_UCODE_DATA"); err = gr_gk20a_alloc_load_netlist_u32(g, src, size, &g->gr.ctx_vars.ucode.fecs.data); - if (err) { + if (err != 0) { goto clean_up; } break; @@ -140,7 +140,7 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr) nvgpu_log_info(g, "NETLIST_REGIONID_FECS_UCODE_INST"); err = gr_gk20a_alloc_load_netlist_u32(g, src, size, &g->gr.ctx_vars.ucode.fecs.inst); - if (err) { + if (err != 0) { goto clean_up; } break; @@ -148,7 +148,7 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr) nvgpu_log_info(g, "NETLIST_REGIONID_GPCCS_UCODE_DATA"); err = gr_gk20a_alloc_load_netlist_u32(g, src, size, &g->gr.ctx_vars.ucode.gpccs.data); - if (err) { + if (err != 0) { goto clean_up; } break; @@ -156,7 +156,7 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr) nvgpu_log_info(g, "NETLIST_REGIONID_GPCCS_UCODE_INST"); err = gr_gk20a_alloc_load_netlist_u32(g, src, size, &g->gr.ctx_vars.ucode.gpccs.inst); - if (err) { + if (err != 0) { goto clean_up; } break; @@ -164,7 +164,7 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr) nvgpu_log_info(g, "NETLIST_REGIONID_SW_BUNDLE_INIT"); err = gr_gk20a_alloc_load_netlist_av(g, src, size, &g->gr.ctx_vars.sw_bundle_init); - if (err) { + if (err != 0) { goto clean_up; } break; @@ -172,7 +172,7 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr) nvgpu_log_info(g, "NETLIST_REGIONID_SW_METHOD_INIT"); err = gr_gk20a_alloc_load_netlist_av(g, src, size, &g->gr.ctx_vars.sw_method_init); - if (err) { + if (err != 0) { goto clean_up; } break; @@ -180,7 +180,7 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr) nvgpu_log_info(g, "NETLIST_REGIONID_SW_CTX_LOAD"); err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.sw_ctx_load); - if (err) { + if (err != 0) { goto clean_up; } break; @@ -188,7 +188,7 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr) nvgpu_log_info(g, "NETLIST_REGIONID_SW_NON_CTX_LOAD"); err = gr_gk20a_alloc_load_netlist_av(g, src, size, &g->gr.ctx_vars.sw_non_ctx_load); - if (err) { + if (err != 0) { goto clean_up; } break; @@ -198,7 +198,7 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr) err = gr_gk20a_alloc_load_netlist_av(g, src, size, &g->gr.ctx_vars.sw_veid_bundle_init); - if (err) { + if (err != 0) { goto clean_up; } break; @@ -206,7 +206,7 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr) nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_SYS"); err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.sys); - if (err) { + if (err != 0) { goto clean_up; } break; @@ -214,7 +214,7 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr) nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_GPC"); err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.gpc); - if (err) { + if (err != 0) { goto clean_up; } break; @@ -222,7 +222,7 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr) nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_TPC"); err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.tpc); - if (err) { + if (err != 0) { goto clean_up; } break; @@ -230,7 +230,7 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr) nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_ZCULL_GPC"); err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.zcull_gpc); - if (err) { + if (err != 0) { goto clean_up; } break; @@ -238,7 +238,7 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr) nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PPC"); err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.ppc); - if (err) { + if (err != 0) { goto clean_up; } break; @@ -246,7 +246,7 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr) nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PM_SYS"); err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.pm_sys); - if (err) { + if (err != 0) { goto clean_up; } break; @@ -254,7 +254,7 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr) nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PM_GPC"); err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.pm_gpc); - if (err) { + if (err != 0) { goto clean_up; } break; @@ -262,7 +262,7 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr) nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PM_TPC"); err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.pm_tpc); - if (err) { + if (err != 0) { goto clean_up; } break; @@ -290,7 +290,7 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr) nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PMPPC"); err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.pm_ppc); - if (err) { + if (err != 0) { goto clean_up; } break; @@ -298,7 +298,7 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr) nvgpu_log_info(g, "NETLIST_REGIONID_NVPERF_CTXREG_SYS"); err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.perf_sys); - if (err) { + if (err != 0) { goto clean_up; } break; @@ -306,7 +306,7 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr) nvgpu_log_info(g, "NETLIST_REGIONID_NVPERF_FBP_CTXREGS"); err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.fbp); - if (err) { + if (err != 0) { goto clean_up; } break; @@ -314,7 +314,7 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr) nvgpu_log_info(g, "NETLIST_REGIONID_NVPERF_CTXREG_GPC"); err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.perf_gpc); - if (err) { + if (err != 0) { goto clean_up; } break; @@ -322,7 +322,7 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr) nvgpu_log_info(g, "NETLIST_REGIONID_NVPERF_FBP_ROUTER"); err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.fbp_router); - if (err) { + if (err != 0) { goto clean_up; } break; @@ -330,7 +330,7 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr) nvgpu_log_info(g, "NETLIST_REGIONID_NVPERF_GPC_ROUTER"); err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.gpc_router); - if (err) { + if (err != 0) { goto clean_up; } break; @@ -338,7 +338,7 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr) nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PMLTC"); err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.pm_ltc); - if (err) { + if (err != 0) { goto clean_up; } break; @@ -346,7 +346,7 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr) nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PMFBPA"); err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.pm_fbpa); - if (err) { + if (err != 0) { goto clean_up; } break; @@ -354,7 +354,7 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr) nvgpu_log_info(g, "NETLIST_REGIONID_NVPERF_SYS_ROUTER"); err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.perf_sys_router); - if (err) { + if (err != 0) { goto clean_up; } break; @@ -362,7 +362,7 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr) nvgpu_log_info(g, "NETLIST_REGIONID_NVPERF_PMA"); err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.perf_pma); - if (err) { + if (err != 0) { goto clean_up; } break; @@ -370,7 +370,7 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr) nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PMROP"); err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.pm_rop); - if (err) { + if (err != 0) { goto clean_up; } break; @@ -378,7 +378,7 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr) nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PMUCGPC"); err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.pm_ucgpc); - if (err) { + if (err != 0) { goto clean_up; } break; @@ -386,7 +386,7 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr) nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_ETPC"); err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.etpc); - if (err) { + if (err != 0) { goto clean_up; } break; @@ -395,7 +395,7 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr) err = gr_gk20a_alloc_load_netlist_av64(g, src, size, &g->gr.ctx_vars.sw_bundle64_init); - if (err) { + if (err != 0) { goto clean_up; } break; @@ -404,7 +404,7 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr) err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.pm_cau); - if (err) { + if (err != 0) { goto clean_up; } break; diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c index 3a166e063..881779529 100644 --- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c @@ -574,7 +574,7 @@ int gr_gk20a_submit_fecs_method_op(struct gk20a *g, op.cond.ok, op.mailbox.ok, op.cond.fail, op.mailbox.fail, sleepduringwait); - if (ret) { + if (ret != 0) { nvgpu_err(g,"fecs method: data=0x%08x push adr=0x%08x", op.method.data, op.method.addr); } @@ -604,7 +604,7 @@ int gr_gk20a_submit_fecs_sideband_method_op(struct gk20a *g, op.cond.ok, op.mailbox.ok, op.cond.fail, op.mailbox.fail, false); - if (ret) { + if (ret != 0) { nvgpu_err(g,"fecs method: data=0x%08x push adr=0x%08x", op.method.data, op.method.addr); } @@ -782,7 +782,7 @@ int gr_gk20a_fecs_ctx_bind_channel(struct gk20a *g, u32 inst_base_ptr = u64_lo32(nvgpu_inst_block_addr(g, &c->inst_block) >> ram_in_base_shift_v()); u32 data = fecs_current_ctx_data(g, &c->inst_block); - u32 ret; + int ret; nvgpu_log_info(g, "bind channel %d inst ptr 0x%08x", c->chid, inst_base_ptr); @@ -798,7 +798,7 @@ int gr_gk20a_fecs_ctx_bind_channel(struct gk20a *g, .fail = 0x20, }, .cond.ok = GR_IS_UCODE_OP_AND, .cond.fail = GR_IS_UCODE_OP_AND}, true); - if (ret) { + if (ret != 0) { nvgpu_err(g, "bind channel instance failed"); } @@ -849,12 +849,12 @@ static int gr_gk20a_ctx_zcull_setup(struct gk20a *g, struct channel_gk20a *c) } ret = gk20a_disable_channel_tsg(g, c); - if (ret) { + if (ret != 0) { nvgpu_err(g, "failed to disable channel/TSG"); return ret; } ret = gk20a_fifo_preempt(g, c); - if (ret) { + if (ret != 0) { gk20a_enable_channel_tsg(g, c); nvgpu_err(g, "failed to preempt channel/TSG"); return ret; @@ -1324,7 +1324,7 @@ int gr_gk20a_fecs_ctx_image_save(struct channel_gk20a *c, u32 save_type) .cond.fail = GR_IS_UCODE_OP_AND, }, true); - if (ret) { + if (ret != 0) { nvgpu_err(g, "save context image failed"); } @@ -1696,12 +1696,12 @@ int gr_gk20a_update_smpc_ctxsw_mode(struct gk20a *g, } ret = gk20a_disable_channel_tsg(g, c); - if (ret) { + if (ret != 0) { nvgpu_err(g, "failed to disable channel/TSG"); goto out; } ret = gk20a_fifo_preempt(g, c); - if (ret) { + if (ret != 0) { gk20a_enable_channel_tsg(g, c); nvgpu_err(g, "failed to preempt channel/TSG"); goto out; @@ -1784,13 +1784,13 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g, } ret = gk20a_disable_channel_tsg(g, c); - if (ret) { + if (ret != 0) { nvgpu_err(g, "failed to disable channel/TSG"); return ret; } ret = gk20a_fifo_preempt(g, c); - if (ret) { + if (ret != 0) { gk20a_enable_channel_tsg(g, c); nvgpu_err(g, "failed to preempt channel/TSG"); return ret; @@ -1806,7 +1806,7 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g, ret = nvgpu_dma_alloc_sys(g, g->gr.ctx_vars.pm_ctxsw_image_size, &pm_ctx->mem); - if (ret) { + if (ret != 0) { c->g->ops.fifo.enable_channel(c); nvgpu_err(g, "failed to allocate pm ctxt buffer"); @@ -2449,7 +2449,7 @@ int gr_gk20a_load_ctxsw_ucode(struct gk20a *g) static int gr_gk20a_wait_ctxsw_ready(struct gk20a *g) { - u32 ret; + int ret; nvgpu_log_fn(g, " "); @@ -2457,7 +2457,7 @@ static int gr_gk20a_wait_ctxsw_ready(struct gk20a *g) GR_IS_UCODE_OP_EQUAL, eUcodeHandshakeInitComplete, GR_IS_UCODE_OP_SKIP, 0, false); - if (ret) { + if (ret != 0) { nvgpu_err(g, "falcon ucode init timeout"); return ret; } @@ -2479,7 +2479,7 @@ static int gr_gk20a_wait_ctxsw_ready(struct gk20a *g) int gr_gk20a_init_ctx_state(struct gk20a *g) { - u32 ret; + int ret; struct fecs_method_op_gk20a op = { .mailbox = { .id = 0, .data = 0, .clr = ~0, .ok = 0, .fail = 0}, @@ -2495,7 +2495,7 @@ int gr_gk20a_init_ctx_state(struct gk20a *g) gr_fecs_method_push_adr_discover_image_size_v(); op.mailbox.ret = &g->gr.ctx_vars.golden_image_size; ret = gr_gk20a_submit_fecs_method_op(g, op, false); - if (ret) { + if (ret != 0) { nvgpu_err(g, "query golden image size failed"); return ret; @@ -2504,7 +2504,7 @@ int gr_gk20a_init_ctx_state(struct gk20a *g) gr_fecs_method_push_adr_discover_zcull_image_size_v(); op.mailbox.ret = &g->gr.ctx_vars.zcull_ctxsw_image_size; ret = gr_gk20a_submit_fecs_method_op(g, op, false); - if (ret) { + if (ret != 0) { nvgpu_err(g, "query zcull ctx image size failed"); return ret; @@ -2513,7 +2513,7 @@ int gr_gk20a_init_ctx_state(struct gk20a *g) gr_fecs_method_push_adr_discover_pm_image_size_v(); op.mailbox.ret = &g->gr.ctx_vars.pm_ctxsw_image_size; ret = gr_gk20a_submit_fecs_method_op(g, op, false); - if (ret) { + if (ret != 0) { nvgpu_err(g, "query pm ctx image size failed"); return ret; @@ -3683,7 +3683,7 @@ clean_up: nvgpu_kfree(g, sorted_num_tpcs); nvgpu_kfree(g, sorted_to_unsorted_gpc_map); - if (ret) { + if (ret != 0) { nvgpu_err(g, "fail"); } else { nvgpu_log_fn(g, "done"); @@ -3847,14 +3847,14 @@ void gr_gk20a_pmu_save_zbc(struct gk20a *g, u32 entries) { struct fifo_gk20a *f = &g->fifo; struct fifo_engine_info_gk20a *gr_info = NULL; - u32 ret; + int ret; u32 engine_id; engine_id = gk20a_fifo_get_gr_engine_id(g); gr_info = (f->engine_info + engine_id); ret = gk20a_fifo_disable_engine_activity(g, gr_info, true); - if (ret) { + if (ret != 0) { nvgpu_err(g, "failed to disable gr engine activity"); return; @@ -3862,7 +3862,7 @@ void gr_gk20a_pmu_save_zbc(struct gk20a *g, u32 entries) ret = g->ops.gr.wait_empty(g, gk20a_get_gr_idle_timeout(g), GR_IDLE_CHECK_DEFAULT); - if (ret) { + if (ret != 0) { nvgpu_err(g, "failed to idle graphics"); goto clean_up; @@ -3873,7 +3873,7 @@ void gr_gk20a_pmu_save_zbc(struct gk20a *g, u32 entries) clean_up: ret = gk20a_fifo_enable_engine_activity(g, gr_info); - if (ret) { + if (ret != 0) { nvgpu_err(g, "failed to enable gr engine activity"); } @@ -4066,7 +4066,7 @@ static int gr_gk20a_load_zbc_table(struct gk20a *g, struct gr_gk20a *gr) ret = g->ops.gr.add_zbc_color(g, gr, &zbc_val, i); - if (ret) { + if (ret != 0) { return ret; } } @@ -4079,14 +4079,14 @@ static int gr_gk20a_load_zbc_table(struct gk20a *g, struct gr_gk20a *gr) zbc_val.format = d_tbl->format; ret = g->ops.gr.add_zbc_depth(g, gr, &zbc_val, i); - if (ret) { + if (ret != 0) { return ret; } } if (g->ops.gr.load_zbc_s_tbl) { ret = g->ops.gr.load_zbc_s_tbl(g, gr); - if (ret) { + if (ret != 0) { return ret; } } @@ -4194,7 +4194,7 @@ int _gk20a_gr_zbc_set_table(struct gk20a *g, struct gr_gk20a *gr, gr_info = (f->engine_info + engine_id); ret = gk20a_fifo_disable_engine_activity(g, gr_info, true); - if (ret) { + if (ret != 0) { nvgpu_err(g, "failed to disable gr engine activity"); return ret; @@ -4202,7 +4202,7 @@ int _gk20a_gr_zbc_set_table(struct gk20a *g, struct gr_gk20a *gr, ret = g->ops.gr.wait_empty(g, gk20a_get_gr_idle_timeout(g), GR_IDLE_CHECK_DEFAULT); - if (ret) { + if (ret != 0) { nvgpu_err(g, "failed to idle graphics"); goto clean_up; @@ -5258,7 +5258,7 @@ static int gk20a_gr_handle_illegal_method(struct gk20a *g, int ret = g->ops.gr.handle_sw_method(g, isr_data->addr, isr_data->class_num, isr_data->offset, isr_data->data_lo); - if (ret) { + if (ret != 0) { gk20a_gr_set_error_notifier(g, isr_data, NVGPU_ERR_NOTIFIER_GR_ILLEGAL_NOTIFY); nvgpu_err(g, "invalid method class 0x%08x" @@ -5691,7 +5691,7 @@ int gr_gk20a_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc, u32 sm, fault_ch, &early_exit, &ignore_debugger); - if (ret) { + if (ret != 0) { nvgpu_err(g, "could not pre-process sm error!"); return ret; } @@ -5735,7 +5735,7 @@ int gr_gk20a_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc, u32 sm, if (do_warp_sync) { ret = g->ops.gr.lock_down_sm(g, gpc, tpc, sm, global_mask, true); - if (ret) { + if (ret != 0) { nvgpu_err(g, "sm did not lock down!"); return ret; } @@ -6321,7 +6321,7 @@ int gk20a_gr_suspend(struct gk20a *g) ret = g->ops.gr.wait_empty(g, gk20a_get_gr_idle_timeout(g), GR_IDLE_CHECK_DEFAULT); - if (ret) { + if (ret != 0U) { return ret; } diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c index 644531f11..66d656451 100644 --- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c @@ -109,7 +109,7 @@ int gk20a_init_mm_setup_hw(struct gk20a *g) if (g->ops.bus.bar2_bind) { err = g->ops.bus.bar2_bind(g, &mm->bar2.inst_block); - if (err) { + if (err != 0) { return err; } } @@ -350,7 +350,7 @@ int gk20a_vm_bind_channel(struct vm_gk20a *vm, struct channel_gk20a *ch) nvgpu_vm_get(vm); ch->vm = vm; err = channel_gk20a_commit_va(ch); - if (err) { + if (err != 0) { ch->vm = NULL; } @@ -409,7 +409,7 @@ int gk20a_alloc_inst_block(struct gk20a *g, struct nvgpu_mem *inst_block) nvgpu_log_fn(g, " "); err = nvgpu_dma_alloc(g, ram_in_alloc_size_v(), inst_block); - if (err) { + if (err != 0) { nvgpu_err(g, "%s: memory allocation failed", __func__); return err; } diff --git a/drivers/gpu/nvgpu/gk20a/regops_gk20a.c b/drivers/gpu/nvgpu/gk20a/regops_gk20a.c index 0aec4f868..512edf31f 100644 --- a/drivers/gpu/nvgpu/gk20a/regops_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/regops_gk20a.c @@ -73,7 +73,7 @@ static bool gr_context_info_available(struct gr_gk20a *gr) nvgpu_mutex_acquire(&gr->ctx_mutex); err = !gr->ctx_vars.golden_image_initialized; nvgpu_mutex_release(&gr->ctx_mutex); - if (err) { + if (err != 0) { return false; } @@ -222,7 +222,7 @@ int exec_regops_gk20a(struct dbg_session_gk20a *dbg_s, err = gr_gk20a_exec_ctx_ops(ch, ops, num_ops, ctx_wr_count, ctx_rd_count, is_current_ctx); - if (err) { + if (err != 0) { nvgpu_warn(g, "failed to perform ctx ops\n"); goto clean_up; } @@ -377,7 +377,7 @@ static int validate_reg_op_offset(struct dbg_session_gk20a *dbg_s, &num_offsets, op->type == REGOP(TYPE_GR_CTX_QUAD), op->quad); - if (err) { + if (err != 0) { err = gr_gk20a_get_pm_ctx_buffer_offsets(dbg_s->g, op->offset, 1, @@ -385,7 +385,7 @@ static int validate_reg_op_offset(struct dbg_session_gk20a *dbg_s, &buf_offset_addr, &num_offsets); - if (err) { + if (err != 0) { op->status |= REGOP(STATUS_INVALID_OFFSET); return -EINVAL; }