From 5e75f90f1c38a65c2588f0b6ebb57ab8d5709af3 Mon Sep 17 00:00:00 2001 From: Philip Elcan Date: Tue, 2 Jul 2019 10:32:52 -0400 Subject: [PATCH] gpu: nvgpu: mm: fix CERT-C bugs in pd_cache Fix CERT-C INT-30 and INT-31 violations in nvgpu.common.mm.gmmu.pd_cache by using safe ops. JIRA NVGPU-3637 Change-Id: Iecc7769e46c5c9c7dabaa852067e8f4052a73ac5 Signed-off-by: Philip Elcan Reviewed-on: https://git-master.nvidia.com/r/2146987 Reviewed-by: svc-mobile-coverity Reviewed-by: svc-mobile-misra GVS: Gerrit_Virtual_Submit Reviewed-by: Nitin Kumbhar Reviewed-by: mobile promotions Tested-by: mobile promotions --- drivers/gpu/nvgpu/common/mm/gmmu/pd_cache.c | 34 +++++++++++++-------- 1 file changed, 21 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/nvgpu/common/mm/gmmu/pd_cache.c b/drivers/gpu/nvgpu/common/mm/gmmu/pd_cache.c index 8dbd87674..cf93d8bdd 100644 --- a/drivers/gpu/nvgpu/common/mm/gmmu/pd_cache.c +++ b/drivers/gpu/nvgpu/common/mm/gmmu/pd_cache.c @@ -29,6 +29,7 @@ #include #include #include +#include #include "pd_cache_priv.h" @@ -75,19 +76,22 @@ u64 nvgpu_pd_gpu_addr(struct gk20a *g, struct nvgpu_gmmu_pd *pd) page_addr = nvgpu_mem_get_addr(g, pd->mem); } - return page_addr + pd->mem_offs; + return nvgpu_safe_add_u64(page_addr, U64(pd->mem_offs)); } u32 nvgpu_pd_offset_from_index(const struct gk20a_mmu_level *l, u32 pd_idx) { - return (pd_idx * l->entry_size) / U32(sizeof(u32)); + return nvgpu_safe_mult_u32(pd_idx, l->entry_size) / U32(sizeof(u32)); } void nvgpu_pd_write(struct gk20a *g, struct nvgpu_gmmu_pd *pd, size_t w, u32 data) { + u64 tmp_offset = nvgpu_safe_add_u64((pd->mem_offs / sizeof(u32)), w); + nvgpu_mem_wr32(g, pd->mem, - (u32)((pd->mem_offs / sizeof(u32)) + w), data); + nvgpu_safe_cast_u64_to_u32(tmp_offset), + data); } int nvgpu_pd_cache_init(struct gk20a *g) @@ -244,25 +248,25 @@ static int nvgpu_pd_cache_alloc_from_partial(struct gk20a *g, struct nvgpu_pd_mem_entry *pentry, struct nvgpu_gmmu_pd *pd) { - unsigned long bit_offs; + u32 bit_offs; u32 mem_offs; u32 nr_bits = nvgpu_pd_cache_get_nr_entries(pentry); /* * Find and allocate an open PD. */ - bit_offs = find_first_zero_bit(pentry->alloc_map, nr_bits); - nvgpu_assert(bit_offs <= U32_MAX); - mem_offs = (u32)bit_offs * pentry->pd_size; + bit_offs = nvgpu_safe_cast_u64_to_u32( + find_first_zero_bit(pentry->alloc_map, nr_bits)); + mem_offs = nvgpu_safe_mult_u32(bit_offs, pentry->pd_size); - pd_dbg(g, "PD-Alloc [C] Partial: offs=%lu nr_bits=%d src=0x%p", + pd_dbg(g, "PD-Alloc [C] Partial: offs=%u nr_bits=%d src=0x%p", bit_offs, nr_bits, pentry); /* Bit map full. Somethings wrong. */ nvgpu_assert(bit_offs < nr_bits); - nvgpu_set_bit((u32)bit_offs, pentry->alloc_map); - pentry->allocs += 1U; + nvgpu_set_bit(bit_offs, pentry->alloc_map); + pentry->allocs = nvgpu_safe_add_u32(pentry->allocs, 1U); /* * First update the pd. @@ -311,11 +315,15 @@ static int nvgpu_pd_cache_alloc(struct gk20a *g, struct nvgpu_pd_cache *cache, { struct nvgpu_pd_mem_entry *pentry; int err; + bool bytes_valid; pd_dbg(g, "PD-Alloc [C] %u bytes", bytes); - if ((bytes & (bytes - 1U)) != 0U || - bytes < NVGPU_PD_CACHE_MIN) { + bytes_valid = bytes >= NVGPU_PD_CACHE_MIN; + if (bytes_valid) { + bytes_valid = (bytes & nvgpu_safe_sub_u32(bytes, 1U)) == 0U; + } + if (!bytes_valid) { pd_dbg(g, "PD-Alloc [C] Invalid (bytes=%u)!", bytes); return -EINVAL; } @@ -408,7 +416,7 @@ static void nvgpu_pd_cache_do_free(struct gk20a *g, /* Mark entry as free. */ nvgpu_clear_bit(bit, pentry->alloc_map); - pentry->allocs -= 1U; + pentry->allocs = nvgpu_safe_sub_u32(pentry->allocs, 1U); if (pentry->allocs > 0U) { /*