mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 17:36:20 +03:00
gpu: nvgpu: fix MISRA Directive 4.7 errors in MM
Directive 4.7 requires function returned error information to be tested before returning the error. This patch prints error message if returned value indicates error. Jira NVGPU-4780 Change-Id: I9e461b94369a72fb695d05a9b6482c9b66ede55d Signed-off-by: Vedashree Vidwans <vvidwans@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2271509 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
Alex Waterman
parent
399a8f3125
commit
79c64d64be
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
* gk20a allocator
|
||||
*
|
||||
* Copyright (c) 2011-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2011-2020, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -204,5 +204,9 @@ int nvgpu_allocator_init(struct gk20a *g, struct nvgpu_allocator *na,
|
||||
nvgpu_err(g, "Incorrect allocator type, couldn't initialize");
|
||||
break;
|
||||
}
|
||||
|
||||
if (err < 0) {
|
||||
nvgpu_err(g, "Failed!");
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -94,12 +94,19 @@ int nvgpu_dma_alloc_vid_at(struct gk20a *g,
|
||||
int nvgpu_dma_alloc_map(struct vm_gk20a *vm, size_t size,
|
||||
struct nvgpu_mem *mem)
|
||||
{
|
||||
return nvgpu_dma_alloc_map_flags(vm, 0, size, mem);
|
||||
int err = nvgpu_dma_alloc_map_flags(vm, 0, size, mem);
|
||||
|
||||
if (err < 0) {
|
||||
nvgpu_err(vm->mm->g, "Failed!");
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
int nvgpu_dma_alloc_map_flags(struct vm_gk20a *vm, unsigned long flags,
|
||||
size_t size, struct nvgpu_mem *mem)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
#ifdef CONFIG_NVGPU_DGPU
|
||||
if (!nvgpu_is_enabled(gk20a_from_vm(vm), NVGPU_MM_UNIFIED_MEMORY)) {
|
||||
/*
|
||||
@@ -108,7 +115,7 @@ int nvgpu_dma_alloc_map_flags(struct vm_gk20a *vm, unsigned long flags,
|
||||
* using nvgpu_dma_alloc_map and it's vidmem, or if there's a
|
||||
* difference, the user should use the flag explicitly anyway.
|
||||
*/
|
||||
int err = nvgpu_dma_alloc_map_flags_vid(vm,
|
||||
err = nvgpu_dma_alloc_map_flags_vid(vm,
|
||||
flags | NVGPU_DMA_NO_KERNEL_MAPPING,
|
||||
size, mem);
|
||||
|
||||
@@ -123,13 +130,23 @@ int nvgpu_dma_alloc_map_flags(struct vm_gk20a *vm, unsigned long flags,
|
||||
}
|
||||
#endif
|
||||
|
||||
return nvgpu_dma_alloc_map_flags_sys(vm, flags, size, mem);
|
||||
err = nvgpu_dma_alloc_map_flags_sys(vm, flags, size, mem);
|
||||
if (err < 0) {
|
||||
nvgpu_err(vm->mm->g, "Failed!");
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
int nvgpu_dma_alloc_map_sys(struct vm_gk20a *vm, size_t size,
|
||||
struct nvgpu_mem *mem)
|
||||
{
|
||||
return nvgpu_dma_alloc_map_flags_sys(vm, 0, size, mem);
|
||||
int err = 0;
|
||||
|
||||
err = nvgpu_dma_alloc_map_flags_sys(vm, 0, size, mem);
|
||||
if (err < 0) {
|
||||
nvgpu_err(vm->mm->g, "Failed!");
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
int nvgpu_dma_alloc_map_flags_sys(struct vm_gk20a *vm, unsigned long flags,
|
||||
|
||||
@@ -385,6 +385,8 @@ static int nvgpu_set_pd_level_is_next_level_pde(struct vm_gk20a *vm,
|
||||
struct nvgpu_gmmu_pd *next_pd = *next_pd_ptr;
|
||||
|
||||
if (next_l->update_entry != NULL) {
|
||||
int err = 0;
|
||||
|
||||
if (pd_allocate_children(vm, l, pd, attrs) != 0) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
@@ -400,8 +402,9 @@ static int nvgpu_set_pd_level_is_next_level_pde(struct vm_gk20a *vm,
|
||||
/*
|
||||
* Allocate the backing memory for next_pd.
|
||||
*/
|
||||
if (pd_allocate(vm, next_pd, next_l, attrs) != 0) {
|
||||
return -ENOMEM;
|
||||
err = pd_allocate(vm, next_pd, next_l, attrs);
|
||||
if (err != 0) {
|
||||
return err;
|
||||
}
|
||||
}
|
||||
*next_pd_ptr = next_pd;
|
||||
@@ -728,6 +731,9 @@ static int nvgpu_gmmu_do_update_page_table(struct vm_gk20a *vm,
|
||||
0,
|
||||
virt_addr, length,
|
||||
attrs);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "Failed!");
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -772,6 +778,9 @@ static int nvgpu_gmmu_do_update_page_table(struct vm_gk20a *vm,
|
||||
err = nvgpu_gmmu_do_update_page_table_no_iommu(vm, sgt, space_to_skip,
|
||||
virt_addr, length, attrs);
|
||||
|
||||
if (err < 0) {
|
||||
nvgpu_err(g, "Failed!");
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -1158,13 +1167,18 @@ static int nvgpu_locate_pte(struct gk20a *g, struct vm_gk20a *vm,
|
||||
|
||||
int nvgpu_get_pte(struct gk20a *g, struct vm_gk20a *vm, u64 vaddr, u32 *pte)
|
||||
{
|
||||
int err = 0;
|
||||
struct nvgpu_gmmu_attrs attrs = {
|
||||
.pgsz = 0,
|
||||
};
|
||||
|
||||
return nvgpu_locate_pte(g, vm, &vm->pdb,
|
||||
err = nvgpu_locate_pte(g, vm, &vm->pdb,
|
||||
vaddr, 0U, &attrs,
|
||||
pte, NULL, NULL, NULL);
|
||||
if (err < 0) {
|
||||
nvgpu_err(g, "Failed!");
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
int nvgpu_set_pte(struct gk20a *g, struct vm_gk20a *vm, u64 vaddr, u32 *pte)
|
||||
|
||||
Reference in New Issue
Block a user