diff --git a/drivers/gpu/nvgpu/common/mm/bitmap_allocator.c b/drivers/gpu/nvgpu/common/mm/bitmap_allocator.c index e5b9b3785..396f04724 100644 --- a/drivers/gpu/nvgpu/common/mm/bitmap_allocator.c +++ b/drivers/gpu/nvgpu/common/mm/bitmap_allocator.c @@ -275,7 +275,8 @@ static void nvgpu_bitmap_free(struct nvgpu_allocator *na, u64 addr) alloc_lock(na); if (a->flags & GPU_ALLOC_NO_ALLOC_PAGE) { - WARN(1, "Using wrong free for NO_ALLOC_PAGE bitmap allocator"); + (void) WARN(1, + "Using wrong free for NO_ALLOC_PAGE bitmap allocator"); goto done; } diff --git a/drivers/gpu/nvgpu/common/mm/buddy_allocator.c b/drivers/gpu/nvgpu/common/mm/buddy_allocator.c index 0ce9ae8a9..a7121c97a 100644 --- a/drivers/gpu/nvgpu/common/mm/buddy_allocator.c +++ b/drivers/gpu/nvgpu/common/mm/buddy_allocator.c @@ -308,7 +308,7 @@ static void nvgpu_buddy_allocator_destroy(struct nvgpu_allocator *na) while (node) { bud = nvgpu_buddy_from_rbtree_node(node); - balloc_free_buddy(a, bud->start); + (void) balloc_free_buddy(a, bud->start); balloc_blist_add(a, bud); balloc_coalesce(a, bud); @@ -821,7 +821,7 @@ err_and_cleanup: nvgpu_buddy, buddy_entry); balloc_buddy_list_do_rem(a, bud); - balloc_free_buddy(a, bud->start); + (void) balloc_free_buddy(a, bud->start); nvgpu_kmem_cache_free(a->buddy_cache, bud); } @@ -839,7 +839,7 @@ static void balloc_do_free_fixed(struct nvgpu_buddy_allocator *a, buddy_entry); balloc_buddy_list_do_rem(a, bud); - balloc_free_buddy(a, bud->start); + (void) balloc_free_buddy(a, bud->start); balloc_blist_add(a, bud); a->bytes_freed += balloc_order_to_len(a, bud->order); diff --git a/drivers/gpu/nvgpu/common/mm/gmmu.c b/drivers/gpu/nvgpu/common/mm/gmmu.c index afbad75c6..a25dd4bb3 100644 --- a/drivers/gpu/nvgpu/common/mm/gmmu.c +++ b/drivers/gpu/nvgpu/common/mm/gmmu.c @@ -787,11 +787,7 @@ void gk20a_locked_gmmu_unmap(struct vm_gk20a *vm, }; if (va_allocated) { - err = __nvgpu_vm_free_va(vm, vaddr, pgsz_idx); - if (err) { - nvgpu_err(g, "failed to free va"); - return; - } + __nvgpu_vm_free_va(vm, vaddr, pgsz_idx); } /* unmap here needs to know the page size we assigned at mapping */ diff --git a/drivers/gpu/nvgpu/common/mm/page_allocator.c b/drivers/gpu/nvgpu/common/mm/page_allocator.c index 35c7e1201..951aefa8a 100644 --- a/drivers/gpu/nvgpu/common/mm/page_allocator.c +++ b/drivers/gpu/nvgpu/common/mm/page_allocator.c @@ -246,14 +246,13 @@ static void nvgpu_page_alloc_free_pages(struct nvgpu_page_allocator *a, nvgpu_kmem_cache_free(a->alloc_cache, alloc); } -static int insert_page_alloc(struct nvgpu_page_allocator *a, +static void insert_page_alloc(struct nvgpu_page_allocator *a, struct nvgpu_page_alloc *alloc) { alloc->tree_entry.key_start = alloc->base; alloc->tree_entry.key_end = alloc->base + alloc->length; nvgpu_rbtree_insert(&alloc->tree_entry, &a->allocs); - return 0; } static struct nvgpu_page_alloc *find_page_alloc( @@ -368,7 +367,7 @@ static int do_slab_alloc(struct nvgpu_page_allocator *a, slab_page->nr_objects, 0, 1, 0); if (offs >= slab_page->nr_objects) { - WARN(1, "Empty/partial slab with no free objects?"); + (void) WARN(1, "Empty/partial slab with no free objects?"); /* Add the buggy page to the full list... This isn't ideal. */ add_slab_page_to_full(slab, slab_page); @@ -767,7 +766,8 @@ static struct nvgpu_page_alloc *nvgpu_alloc_pages_fixed( alloc->sgt.ops = &page_alloc_sgl_ops; alloc->base = nvgpu_alloc_fixed(&a->source_allocator, base, length, 0); if (alloc->base == 0ULL) { - WARN(1, "nvgpu: failed to fixed alloc pages @ 0x%010llx", base); + (void) WARN(1, "nvgpu: failed to fixed alloc pages @ 0x%010llx", + base); goto fail; } diff --git a/drivers/gpu/nvgpu/common/mm/pd_cache.c b/drivers/gpu/nvgpu/common/mm/pd_cache.c index d9dc3db06..4fba7d990 100644 --- a/drivers/gpu/nvgpu/common/mm/pd_cache.c +++ b/drivers/gpu/nvgpu/common/mm/pd_cache.c @@ -441,7 +441,7 @@ static void nvgpu_pd_cache_free(struct gk20a *g, struct nvgpu_pd_cache *cache, pentry = nvgpu_pd_cache_look_up(g, cache, pd); if (pentry == NULL) { - WARN(1, "Attempting to free non-existent pd"); + (void) WARN(1, "Attempting to free non-existent pd"); return; } diff --git a/drivers/gpu/nvgpu/common/mm/vm.c b/drivers/gpu/nvgpu/common/mm/vm.c index 7f0b3d104..8888d6f3f 100644 --- a/drivers/gpu/nvgpu/common/mm/vm.c +++ b/drivers/gpu/nvgpu/common/mm/vm.c @@ -160,13 +160,11 @@ u64 __nvgpu_vm_alloc_va(struct vm_gk20a *vm, u64 size, u32 pgsz_idx) return addr; } -int __nvgpu_vm_free_va(struct vm_gk20a *vm, u64 addr, u32 pgsz_idx) +void __nvgpu_vm_free_va(struct vm_gk20a *vm, u64 addr, u32 pgsz_idx) { struct nvgpu_allocator *vma = vm->vma[pgsz_idx]; nvgpu_free(vma, addr); - - return 0; } void nvgpu_vm_mapping_batch_start(struct vm_gk20a_mapping_batch *mapping_batch) @@ -1175,12 +1173,17 @@ static int nvgpu_vm_unmap_sync_buffer(struct vm_gk20a *vm, struct nvgpu_timeout timeout; int ret = 0; - nvgpu_mutex_release(&vm->update_gmmu_lock); - /* * 100ms timer. */ - nvgpu_timeout_init(vm->mm->g, &timeout, 100, NVGPU_TIMER_CPU_TIMER); + ret = nvgpu_timeout_init(vm->mm->g, &timeout, 100, + NVGPU_TIMER_CPU_TIMER); + if (ret != 0) { + nvgpu_err(vm->mm->g, "timeout_init failed (%d)", ret); + return ret; + } + + nvgpu_mutex_release(&vm->update_gmmu_lock); do { if (nvgpu_atomic_read(&mapped_buffer->ref.refcount) == 1) { diff --git a/drivers/gpu/nvgpu/include/nvgpu/kref.h b/drivers/gpu/nvgpu/include/nvgpu/kref.h index 486040e81..460eb4b38 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/kref.h +++ b/drivers/gpu/nvgpu/include/nvgpu/kref.h @@ -57,12 +57,30 @@ static inline void nvgpu_ref_get(struct nvgpu_ref *ref) * @release: pointer to the function that would be invoked to clean up the * object when the reference count becomes zero, i.e. the last * reference corresponding to this object is removed. + */ +static inline void nvgpu_ref_put(struct nvgpu_ref *ref, + void (*release)(struct nvgpu_ref *r)) +{ + if (nvgpu_atomic_sub_and_test(1, &ref->refcount)) { + if (release != NULL) { + release(ref); + } + } +} + +/* + * Decrement reference count for the object, call release() if it becomes + * zero and return the status of the removal. + * @ref: the nvgpu_ref object + * @release: pointer to the function that would be invoked to clean up the + * object when the reference count becomes zero, i.e. the last + * reference corresponding to this object is removed. * Return 1 if object was removed, otherwise return 0. The user should not * make any assumptions about the status of the object in the memory when * the function returns 0 and should only use it to know that there are no * further references to this object. */ -static inline int nvgpu_ref_put(struct nvgpu_ref *ref, +static inline int nvgpu_ref_put_return(struct nvgpu_ref *ref, void (*release)(struct nvgpu_ref *r)) { if (nvgpu_atomic_sub_and_test(1, &ref->refcount)) { diff --git a/drivers/gpu/nvgpu/include/nvgpu/vm.h b/drivers/gpu/nvgpu/include/nvgpu/vm.h index 667497174..cb9f06d5a 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/vm.h +++ b/drivers/gpu/nvgpu/include/nvgpu/vm.h @@ -323,7 +323,7 @@ struct vm_gk20a *nvgpu_vm_init(struct gk20a *g, */ u64 __nvgpu_vm_alloc_va(struct vm_gk20a *vm, u64 size, u32 pgsz_idx); -int __nvgpu_vm_free_va(struct vm_gk20a *vm, u64 addr, +void __nvgpu_vm_free_va(struct vm_gk20a *vm, u64 addr, u32 pgsz_idx); #endif /* NVGPU_VM_H */ diff --git a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c index ae35cafc5..cae54475a 100644 --- a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c +++ b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c @@ -107,9 +107,7 @@ void vgpu_locked_gmmu_unmap(struct vm_gk20a *vm, nvgpu_err(g, "failed to update gmmu ptes on unmap"); if (va_allocated) { - err = __nvgpu_vm_free_va(vm, vaddr, pgsz_idx); - if (err) - nvgpu_err(g, "failed to free va"); + __nvgpu_vm_free_va(vm, vaddr, pgsz_idx); } /* TLB invalidate handled on server side */ }