diff --git a/drivers/gpu/nvgpu/common/mm/allocators/nvgpu_allocator.c b/drivers/gpu/nvgpu/common/mm/allocators/nvgpu_allocator.c index 3ce6e07fa..4f8323219 100644 --- a/drivers/gpu/nvgpu/common/mm/allocators/nvgpu_allocator.c +++ b/drivers/gpu/nvgpu/common/mm/allocators/nvgpu_allocator.c @@ -1,7 +1,7 @@ /* * gk20a allocator * - * Copyright (c) 2011-2019, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2011-2020, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -204,5 +204,9 @@ int nvgpu_allocator_init(struct gk20a *g, struct nvgpu_allocator *na, nvgpu_err(g, "Incorrect allocator type, couldn't initialize"); break; } + + if (err < 0) { + nvgpu_err(g, "Failed!"); + } return err; } diff --git a/drivers/gpu/nvgpu/common/mm/dma.c b/drivers/gpu/nvgpu/common/mm/dma.c index e99242b32..3a07b0574 100644 --- a/drivers/gpu/nvgpu/common/mm/dma.c +++ b/drivers/gpu/nvgpu/common/mm/dma.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -94,12 +94,19 @@ int nvgpu_dma_alloc_vid_at(struct gk20a *g, int nvgpu_dma_alloc_map(struct vm_gk20a *vm, size_t size, struct nvgpu_mem *mem) { - return nvgpu_dma_alloc_map_flags(vm, 0, size, mem); + int err = nvgpu_dma_alloc_map_flags(vm, 0, size, mem); + + if (err < 0) { + nvgpu_err(vm->mm->g, "Failed!"); + } + return err; } int nvgpu_dma_alloc_map_flags(struct vm_gk20a *vm, unsigned long flags, size_t size, struct nvgpu_mem *mem) { + int err = 0; + #ifdef CONFIG_NVGPU_DGPU if (!nvgpu_is_enabled(gk20a_from_vm(vm), NVGPU_MM_UNIFIED_MEMORY)) { /* @@ -108,7 +115,7 @@ int nvgpu_dma_alloc_map_flags(struct vm_gk20a *vm, unsigned long flags, * using nvgpu_dma_alloc_map and it's vidmem, or if there's a * difference, the user should use the flag explicitly anyway. */ - int err = nvgpu_dma_alloc_map_flags_vid(vm, + err = nvgpu_dma_alloc_map_flags_vid(vm, flags | NVGPU_DMA_NO_KERNEL_MAPPING, size, mem); @@ -123,13 +130,23 @@ int nvgpu_dma_alloc_map_flags(struct vm_gk20a *vm, unsigned long flags, } #endif - return nvgpu_dma_alloc_map_flags_sys(vm, flags, size, mem); + err = nvgpu_dma_alloc_map_flags_sys(vm, flags, size, mem); + if (err < 0) { + nvgpu_err(vm->mm->g, "Failed!"); + } + return err; } int nvgpu_dma_alloc_map_sys(struct vm_gk20a *vm, size_t size, struct nvgpu_mem *mem) { - return nvgpu_dma_alloc_map_flags_sys(vm, 0, size, mem); + int err = 0; + + err = nvgpu_dma_alloc_map_flags_sys(vm, 0, size, mem); + if (err < 0) { + nvgpu_err(vm->mm->g, "Failed!"); + } + return err; } int nvgpu_dma_alloc_map_flags_sys(struct vm_gk20a *vm, unsigned long flags, diff --git a/drivers/gpu/nvgpu/common/mm/gmmu/page_table.c b/drivers/gpu/nvgpu/common/mm/gmmu/page_table.c index 8ac23fe2a..6bc3152f6 100644 --- a/drivers/gpu/nvgpu/common/mm/gmmu/page_table.c +++ b/drivers/gpu/nvgpu/common/mm/gmmu/page_table.c @@ -385,6 +385,8 @@ static int nvgpu_set_pd_level_is_next_level_pde(struct vm_gk20a *vm, struct nvgpu_gmmu_pd *next_pd = *next_pd_ptr; if (next_l->update_entry != NULL) { + int err = 0; + if (pd_allocate_children(vm, l, pd, attrs) != 0) { return -ENOMEM; } @@ -400,8 +402,9 @@ static int nvgpu_set_pd_level_is_next_level_pde(struct vm_gk20a *vm, /* * Allocate the backing memory for next_pd. */ - if (pd_allocate(vm, next_pd, next_l, attrs) != 0) { - return -ENOMEM; + err = pd_allocate(vm, next_pd, next_l, attrs); + if (err != 0) { + return err; } } *next_pd_ptr = next_pd; @@ -728,6 +731,9 @@ static int nvgpu_gmmu_do_update_page_table(struct vm_gk20a *vm, 0, virt_addr, length, attrs); + if (err != 0) { + nvgpu_err(g, "Failed!"); + } return err; } @@ -772,6 +778,9 @@ static int nvgpu_gmmu_do_update_page_table(struct vm_gk20a *vm, err = nvgpu_gmmu_do_update_page_table_no_iommu(vm, sgt, space_to_skip, virt_addr, length, attrs); + if (err < 0) { + nvgpu_err(g, "Failed!"); + } return err; } @@ -1158,13 +1167,18 @@ static int nvgpu_locate_pte(struct gk20a *g, struct vm_gk20a *vm, int nvgpu_get_pte(struct gk20a *g, struct vm_gk20a *vm, u64 vaddr, u32 *pte) { + int err = 0; struct nvgpu_gmmu_attrs attrs = { .pgsz = 0, }; - return nvgpu_locate_pte(g, vm, &vm->pdb, + err = nvgpu_locate_pte(g, vm, &vm->pdb, vaddr, 0U, &attrs, pte, NULL, NULL, NULL); + if (err < 0) { + nvgpu_err(g, "Failed!"); + } + return err; } int nvgpu_set_pte(struct gk20a *g, struct vm_gk20a *vm, u64 vaddr, u32 *pte)