gpu: nvgpu: fix some assertion/nvgpu_safe* APIs call in devctl path

Fix following issues in devctl processing path
- Remove assertion for kind>=0. It is already checked in function
  nvgpu_vm_do_map.
- Check for possible overflow of map_addr and mapping size without using
  nvgpu_safe* API for NVGPU_AS_DEVCTL_MAP_BUFFER_EX and
  NVGPU_AS_DEVCTL_ALLOC_SPACE devctl.

Jira NVGPU-6496

Change-Id: I569c89d50900100f57bc9727fd032d6cd2c331e4
Signed-off-by: shashank singh <shashsingh@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2487550
(cherry picked from commit 6d340d7e73ba8e031f50679991d259daa682a006)
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2492291
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
shashank singh
2021-02-22 12:10:39 +05:30
committed by mobile promotions
parent b91f57d933
commit 1d86da257b
3 changed files with 10 additions and 6 deletions

View File

@@ -1,7 +1,7 @@
/* /*
* gk20a allocator * gk20a allocator
* *
* Copyright (c) 2011-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -89,6 +89,10 @@ void nvgpu_free(struct nvgpu_allocator *a, u64 addr)
u64 nvgpu_alloc_fixed(struct nvgpu_allocator *a, u64 base, u64 len, u64 nvgpu_alloc_fixed(struct nvgpu_allocator *a, u64 base, u64 len,
u32 page_size) u32 page_size)
{ {
if ((U64_MAX - base) < len) {
return 0ULL;
}
if (a->ops->alloc_fixed != NULL) { if (a->ops->alloc_fixed != NULL) {
return a->ops->alloc_fixed(a, base, len, page_size); return a->ops->alloc_fixed(a, base, len, page_size);
} }

View File

@@ -1536,7 +1536,6 @@ int nvgpu_vm_map(struct vm_gk20a *vm,
mapped_buffer->pgsz_idx = binfo.pgsz_idx; mapped_buffer->pgsz_idx = binfo.pgsz_idx;
mapped_buffer->vm = vm; mapped_buffer->vm = vm;
mapped_buffer->flags = binfo.flags; mapped_buffer->flags = binfo.flags;
nvgpu_assert(map_key_kind >= 0);
mapped_buffer->kind = map_key_kind; mapped_buffer->kind = map_key_kind;
mapped_buffer->va_allocated = va_allocated; mapped_buffer->va_allocated = va_allocated;
mapped_buffer->vm_area = vm_area; mapped_buffer->vm_area = vm_area;

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -50,13 +50,14 @@ int nvgpu_vm_area_validate_buffer(struct vm_gk20a *vm,
struct gk20a *g = vm->mm->g; struct gk20a *g = vm->mm->g;
struct nvgpu_vm_area *vm_area; struct nvgpu_vm_area *vm_area;
struct nvgpu_mapped_buf *buffer; struct nvgpu_mapped_buf *buffer;
u64 map_end = nvgpu_safe_add_u64(map_addr, map_size); u64 map_end;
/* can wrap around with insane map_size; zero is disallowed too */ /* can wrap around with insane map_size; zero is disallowed too */
if (map_end <= map_addr) { if (((U64_MAX - map_size) < map_addr) || (map_size == 0ULL)) {
nvgpu_warn(g, "fixed offset mapping with invalid map_size"); nvgpu_warn(g, "fixed offset mapping with invalid map_size");
return -EINVAL; return -EINVAL;
} }
map_end = map_addr + map_size;
if ((map_addr & if ((map_addr &
nvgpu_safe_sub_u64(U64(vm->gmmu_page_sizes[pgsz_idx]), U64(1))) nvgpu_safe_sub_u64(U64(vm->gmmu_page_sizes[pgsz_idx]), U64(1)))
@@ -88,7 +89,7 @@ int nvgpu_vm_area_validate_buffer(struct vm_gk20a *vm,
* mappings by checking the buffer with the highest GPU VA * mappings by checking the buffer with the highest GPU VA
* that is less than our buffer end */ * that is less than our buffer end */
buffer = nvgpu_vm_find_mapped_buf_less_than( buffer = nvgpu_vm_find_mapped_buf_less_than(
vm, nvgpu_safe_add_u64(map_addr, map_size)); vm, map_end);
if (buffer != NULL) { if (buffer != NULL) {
if (nvgpu_safe_add_u64(buffer->addr, buffer->size) > map_addr) { if (nvgpu_safe_add_u64(buffer->addr, buffer->size) > map_addr) {
nvgpu_warn(g, "overlapping buffer map requested"); nvgpu_warn(g, "overlapping buffer map requested");