gpu: nvgpu: fix Cert-C errors in vm.c

INT30-C requires that unsigned integer operations do not wrap. This
patch adds safe operation APIs to resolve Cert-C errors.

Jira NVGPU-4677

Change-Id: I7dad28e8de9fe8ea1bdc0ca33b8cebe103cac5a7
Signed-off-by: Vedashree Vidwans <vvidwans@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2264218
Reviewed-by: Automatic_Commit_Validation_User
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com>
Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Sagar Kamble <skamble@nvidia.com>
Reviewed-by: Alex Waterman <alexw@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Vedashree Vidwans
2019-12-17 09:26:14 -08:00
committed by Alex Waterman
parent a50802510f
commit 1ec4a4f8ec

View File

@@ -504,11 +504,13 @@ static void nvgpu_vm_init_check_big_pages(struct vm_gk20a *vm,
if (unified_va) { if (unified_va) {
vm->big_pages = nvgpu_big_pages_possible(vm, vm->big_pages = nvgpu_big_pages_possible(vm,
user_vma_start, user_vma_start,
user_vma_limit - user_vma_start); nvgpu_safe_sub_u64(user_vma_limit,
user_vma_start));
} else { } else {
vm->big_pages = nvgpu_big_pages_possible(vm, vm->big_pages = nvgpu_big_pages_possible(vm,
user_lp_vma_start, user_lp_vma_start,
user_lp_vma_limit - user_lp_vma_start); nvgpu_safe_sub_u64(user_lp_vma_limit,
user_lp_vma_start));
} }
} }
} }
@@ -555,7 +557,7 @@ static int nvgpu_vm_init_vma(struct gk20a *g, struct vm_gk20a *vm,
u64 kernel_vma_flags; u64 kernel_vma_flags;
/* Setup vma limits. */ /* Setup vma limits. */
if (kernel_reserved + low_hole < aperture_size) { if (nvgpu_safe_add_u64(kernel_reserved, low_hole) < aperture_size) {
/* /*
* If big_pages are disabled for this VM then it only makes * If big_pages are disabled for this VM then it only makes
* sense to make one VM, same as if the unified address flag * sense to make one VM, same as if the unified address flag
@@ -563,14 +565,16 @@ static int nvgpu_vm_init_vma(struct gk20a *g, struct vm_gk20a *vm,
*/ */
if (!big_pages || unified_va) { if (!big_pages || unified_va) {
user_vma_start = low_hole; user_vma_start = low_hole;
user_vma_limit = vm->va_limit - kernel_reserved; user_vma_limit = nvgpu_safe_sub_u64(vm->va_limit,
kernel_reserved);
user_lp_vma_start = user_vma_limit; user_lp_vma_start = user_vma_limit;
user_lp_vma_limit = user_vma_limit; user_lp_vma_limit = user_vma_limit;
} else { } else {
user_vma_start = low_hole; user_vma_start = low_hole;
user_vma_limit = nvgpu_gmmu_va_small_page_limit(); user_vma_limit = nvgpu_gmmu_va_small_page_limit();
user_lp_vma_start = nvgpu_gmmu_va_small_page_limit(); user_lp_vma_start = nvgpu_gmmu_va_small_page_limit();
user_lp_vma_limit = vm->va_limit - kernel_reserved; user_lp_vma_limit = nvgpu_safe_sub_u64(vm->va_limit,
kernel_reserved);
} }
} else { } else {
user_vma_start = 0; user_vma_start = 0;
@@ -578,7 +582,7 @@ static int nvgpu_vm_init_vma(struct gk20a *g, struct vm_gk20a *vm,
user_lp_vma_start = 0; user_lp_vma_start = 0;
user_lp_vma_limit = 0; user_lp_vma_limit = 0;
} }
kernel_vma_start = vm->va_limit - kernel_reserved; kernel_vma_start = nvgpu_safe_sub_u64(vm->va_limit, kernel_reserved);
kernel_vma_limit = vm->va_limit; kernel_vma_limit = vm->va_limit;
nvgpu_log_info(g, "user_vma [0x%llx,0x%llx)", nvgpu_log_info(g, "user_vma [0x%llx,0x%llx)",
@@ -598,8 +602,9 @@ static int nvgpu_vm_init_vma(struct gk20a *g, struct vm_gk20a *vm,
goto clean_up_page_tables; goto clean_up_page_tables;
} }
kernel_vma_flags = (kernel_reserved + low_hole) == aperture_size ? kernel_vma_flags = nvgpu_safe_add_u64(kernel_reserved, low_hole) ==
0ULL : GPU_ALLOC_GVA_SPACE; aperture_size ?
0ULL : GPU_ALLOC_GVA_SPACE;
nvgpu_vm_init_check_big_pages(vm, user_vma_start, user_vma_limit, nvgpu_vm_init_check_big_pages(vm, user_vma_start, user_vma_limit,
user_lp_vma_start, user_lp_vma_limit, user_lp_vma_start, user_lp_vma_limit,