mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 17:36:20 +03:00
gpu: nvgpu: mm: fix CERT-C violations in page_allocator
CERT-C Rule INT30 requires checking for values wrapping when doing arithmetic operations on unsigned values. Use the safe ops or asserts to ensure unsigned arithmetic operations will no wrap. JIRA NVGPU-3586 Change-Id: Ia1fc05711520135e788023e0614c70778c076f6a Signed-off-by: Philip Elcan <pelcan@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2131158 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com> Reviewed-by: Alex Waterman <alexw@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Nitin Kumbhar <nkumbhar@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
be7c7aa040
commit
0a645e66ee
@@ -27,6 +27,7 @@
|
|||||||
#include <nvgpu/bug.h>
|
#include <nvgpu/bug.h>
|
||||||
#include <nvgpu/log2.h>
|
#include <nvgpu/log2.h>
|
||||||
#include <nvgpu/sizes.h>
|
#include <nvgpu/sizes.h>
|
||||||
|
#include <nvgpu/safe_ops.h>
|
||||||
|
|
||||||
#include "buddy_allocator_priv.h"
|
#include "buddy_allocator_priv.h"
|
||||||
|
|
||||||
@@ -49,7 +50,7 @@
|
|||||||
* VIDMEM page size is 4k.
|
* VIDMEM page size is 4k.
|
||||||
*/
|
*/
|
||||||
#define PAGE_SIZE 0x1000U
|
#define PAGE_SIZE 0x1000U
|
||||||
#define PAGE_ALIGN(addr) ((addr + (PAGE_SIZE - 1U)) & \
|
#define PAGE_ALIGN(addr) (nvgpu_safe_add_u64(addr, (PAGE_SIZE - 1U)) & \
|
||||||
((typeof(addr)) ~(PAGE_SIZE - 1U)))
|
((typeof(addr)) ~(PAGE_SIZE - 1U)))
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -257,7 +258,8 @@ static void insert_page_alloc(struct nvgpu_page_allocator *a,
|
|||||||
struct nvgpu_page_alloc *alloc)
|
struct nvgpu_page_alloc *alloc)
|
||||||
{
|
{
|
||||||
alloc->tree_entry.key_start = alloc->base;
|
alloc->tree_entry.key_start = alloc->base;
|
||||||
alloc->tree_entry.key_end = alloc->base + alloc->length;
|
alloc->tree_entry.key_end = nvgpu_safe_add_u64(alloc->base,
|
||||||
|
alloc->length);
|
||||||
|
|
||||||
nvgpu_rbtree_insert(&alloc->tree_entry, &a->allocs);
|
nvgpu_rbtree_insert(&alloc->tree_entry, &a->allocs);
|
||||||
}
|
}
|
||||||
@@ -309,6 +311,7 @@ static struct page_alloc_slab_page *alloc_slab_page(
|
|||||||
slab_page->owner = slab;
|
slab_page->owner = slab;
|
||||||
slab_page->state = SP_NONE;
|
slab_page->state = SP_NONE;
|
||||||
|
|
||||||
|
nvgpu_assert(a->pages_alloced < U64_MAX);
|
||||||
a->pages_alloced++;
|
a->pages_alloced++;
|
||||||
|
|
||||||
palloc_dbg(a, "Allocated new slab page @ 0x%012llx size=%u",
|
palloc_dbg(a, "Allocated new slab page @ 0x%012llx size=%u",
|
||||||
@@ -328,6 +331,7 @@ static void free_slab_page(struct nvgpu_page_allocator *a,
|
|||||||
slab_page->bitmap != 0U);
|
slab_page->bitmap != 0U);
|
||||||
|
|
||||||
nvgpu_free(&a->source_allocator, slab_page->page_addr);
|
nvgpu_free(&a->source_allocator, slab_page->page_addr);
|
||||||
|
nvgpu_assert(a->pages_freed < U64_MAX);
|
||||||
a->pages_freed++;
|
a->pages_freed++;
|
||||||
|
|
||||||
nvgpu_kmem_cache_free(a->slab_page_cache, slab_page);
|
nvgpu_kmem_cache_free(a->slab_page_cache, slab_page);
|
||||||
@@ -386,6 +390,7 @@ static int do_slab_alloc(struct nvgpu_page_allocator *a,
|
|||||||
|
|
||||||
nvgpu_assert(offs <= U64(U32_MAX));
|
nvgpu_assert(offs <= U64(U32_MAX));
|
||||||
nvgpu_bitmap_set(&slab_page->bitmap, U32(offs), 1U);
|
nvgpu_bitmap_set(&slab_page->bitmap, U32(offs), 1U);
|
||||||
|
nvgpu_assert(slab_page->nr_objects_alloced < U32_MAX);
|
||||||
slab_page->nr_objects_alloced++;
|
slab_page->nr_objects_alloced++;
|
||||||
|
|
||||||
if (slab_page->nr_objects_alloced < slab_page->nr_objects) {
|
if (slab_page->nr_objects_alloced < slab_page->nr_objects) {
|
||||||
@@ -403,7 +408,8 @@ static int do_slab_alloc(struct nvgpu_page_allocator *a,
|
|||||||
alloc->slab_page = slab_page;
|
alloc->slab_page = slab_page;
|
||||||
alloc->nr_chunks = 1;
|
alloc->nr_chunks = 1;
|
||||||
alloc->length = slab_page->slab_size;
|
alloc->length = slab_page->slab_size;
|
||||||
alloc->base = slab_page->page_addr + (offs * slab_page->slab_size);
|
alloc->base = nvgpu_safe_add_u64(slab_page->page_addr,
|
||||||
|
nvgpu_safe_mult_u64(offs, slab_page->slab_size));
|
||||||
|
|
||||||
sgl = (struct nvgpu_mem_sgl *)alloc->sgt.sgl;
|
sgl = (struct nvgpu_mem_sgl *)alloc->sgt.sgl;
|
||||||
sgl->phys = alloc->base;
|
sgl->phys = alloc->base;
|
||||||
@@ -455,6 +461,7 @@ static struct nvgpu_page_alloc *nvgpu_alloc_slab(
|
|||||||
|
|
||||||
palloc_dbg(a, "Alloc 0x%04llx sr=%llu id=0x%010llx [slab]",
|
palloc_dbg(a, "Alloc 0x%04llx sr=%llu id=0x%010llx [slab]",
|
||||||
len, slab_nr, alloc->base);
|
len, slab_nr, alloc->base);
|
||||||
|
nvgpu_assert(a->nr_slab_allocs < U64_MAX);
|
||||||
a->nr_slab_allocs++;
|
a->nr_slab_allocs++;
|
||||||
|
|
||||||
return alloc;
|
return alloc;
|
||||||
@@ -477,9 +484,11 @@ static void nvgpu_free_slab(struct nvgpu_page_allocator *a,
|
|||||||
enum slab_page_state new_state;
|
enum slab_page_state new_state;
|
||||||
u32 offs;
|
u32 offs;
|
||||||
|
|
||||||
offs = (u32)(alloc->base - slab_page->page_addr) / slab_page->slab_size;
|
offs = (u32)nvgpu_safe_sub_u64(alloc->base, slab_page->page_addr) /
|
||||||
|
slab_page->slab_size;
|
||||||
nvgpu_bitmap_clear(&slab_page->bitmap, offs, 1U);
|
nvgpu_bitmap_clear(&slab_page->bitmap, offs, 1U);
|
||||||
|
|
||||||
|
nvgpu_assert(slab_page->nr_objects_alloced < U32_MAX);
|
||||||
slab_page->nr_objects_alloced--;
|
slab_page->nr_objects_alloced--;
|
||||||
|
|
||||||
if (slab_page->nr_objects_alloced == 0U) {
|
if (slab_page->nr_objects_alloced == 0U) {
|
||||||
@@ -515,6 +524,7 @@ static void nvgpu_free_slab(struct nvgpu_page_allocator *a,
|
|||||||
* Now handle the page_alloc.
|
* Now handle the page_alloc.
|
||||||
*/
|
*/
|
||||||
nvgpu_page_alloc_free_pages(a, alloc, false);
|
nvgpu_page_alloc_free_pages(a, alloc, false);
|
||||||
|
nvgpu_assert(a->nr_slab_frees < U64_MAX);
|
||||||
a->nr_slab_frees++;
|
a->nr_slab_frees++;
|
||||||
|
|
||||||
return;
|
return;
|
||||||
@@ -595,7 +605,7 @@ static struct nvgpu_page_alloc *do_nvgpu_alloc_pages(
|
|||||||
goto fail_cleanup;
|
goto fail_cleanup;
|
||||||
}
|
}
|
||||||
|
|
||||||
pages -= chunk_pages;
|
pages = nvgpu_safe_sub_u64(pages, chunk_pages);
|
||||||
|
|
||||||
sgl->phys = chunk_addr;
|
sgl->phys = chunk_addr;
|
||||||
sgl->dma = chunk_addr;
|
sgl->dma = chunk_addr;
|
||||||
@@ -708,6 +718,7 @@ static u64 nvgpu_page_palloc(struct nvgpu_allocator *na, u64 len)
|
|||||||
|
|
||||||
insert_page_alloc(a, alloc);
|
insert_page_alloc(a, alloc);
|
||||||
|
|
||||||
|
nvgpu_assert(a->nr_allocs < U64_MAX);
|
||||||
a->nr_allocs++;
|
a->nr_allocs++;
|
||||||
if (real_len > a->page_size / 2U) {
|
if (real_len > a->page_size / 2U) {
|
||||||
a->pages_alloced += alloc->length >> a->page_shift;
|
a->pages_alloced += alloc->length >> a->page_shift;
|
||||||
@@ -744,6 +755,7 @@ static void nvgpu_page_free(struct nvgpu_allocator *na, u64 base)
|
|||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
nvgpu_assert(a->nr_frees < U64_MAX);
|
||||||
a->nr_frees++;
|
a->nr_frees++;
|
||||||
|
|
||||||
palloc_dbg(a, "Free 0x%llx id=0x%010llx",
|
palloc_dbg(a, "Free 0x%llx id=0x%010llx",
|
||||||
@@ -755,7 +767,8 @@ static void nvgpu_page_free(struct nvgpu_allocator *na, u64 base)
|
|||||||
if (alloc->slab_page != NULL) {
|
if (alloc->slab_page != NULL) {
|
||||||
nvgpu_free_slab(a, alloc);
|
nvgpu_free_slab(a, alloc);
|
||||||
} else {
|
} else {
|
||||||
a->pages_freed += (alloc->length >> a->page_shift);
|
a->pages_freed = nvgpu_safe_add_u64(a->pages_freed,
|
||||||
|
alloc->length >> a->page_shift);
|
||||||
nvgpu_page_alloc_free_pages(a, alloc, true);
|
nvgpu_page_alloc_free_pages(a, alloc, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -842,8 +855,9 @@ static u64 nvgpu_page_palloc_fixed(struct nvgpu_allocator *na,
|
|||||||
sgl = nvgpu_sgt_get_next(&alloc->sgt, sgl);
|
sgl = nvgpu_sgt_get_next(&alloc->sgt, sgl);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
nvgpu_assert(a->nr_fixed_allocs < U64_MAX);
|
||||||
a->nr_fixed_allocs++;
|
a->nr_fixed_allocs++;
|
||||||
a->pages_alloced += pages;
|
a->pages_alloced = nvgpu_safe_add_u64(a->pages_alloced, pages);
|
||||||
|
|
||||||
if ((a->flags & GPU_ALLOC_NO_SCATTER_GATHER) != 0ULL) {
|
if ((a->flags & GPU_ALLOC_NO_SCATTER_GATHER) != 0ULL) {
|
||||||
return alloc->base;
|
return alloc->base;
|
||||||
@@ -872,8 +886,10 @@ static void nvgpu_page_free_fixed(struct nvgpu_allocator *na,
|
|||||||
palloc_dbg(a, "Free [fixed] 0x%010llx + 0x%llx",
|
palloc_dbg(a, "Free [fixed] 0x%010llx + 0x%llx",
|
||||||
alloc->base, alloc->length);
|
alloc->base, alloc->length);
|
||||||
|
|
||||||
|
nvgpu_assert(a->nr_fixed_frees < U64_MAX);
|
||||||
a->nr_fixed_frees++;
|
a->nr_fixed_frees++;
|
||||||
a->pages_freed += (alloc->length >> a->page_shift);
|
a->pages_freed = nvgpu_safe_add_u64(a->pages_freed,
|
||||||
|
alloc->length >> a->page_shift);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This works for the time being since the buddy allocator
|
* This works for the time being since the buddy allocator
|
||||||
|
|||||||
Reference in New Issue
Block a user