From a3ba265dd62b1fc574213137d98f702abb1c5d2f Mon Sep 17 00:00:00 2001 From: Philip Elcan Date: Fri, 8 Mar 2019 17:10:39 -0500 Subject: [PATCH] gpu: nvgpu: mm: fix MISRA 10.3 violations MISRA Rule 10.3 prohibits implicit assignment of objects of different size or essential type. This resolves a number of 10.3 violations in the nvgpu/common/mm unit. JIRA NVGPU-2935 Change-Id: Ic9d826bf67417962aab433d08d3e922de26e3edc Signed-off-by: Philip Elcan Reviewed-on: https://git-master.nvidia.com/r/2036117 GVS: Gerrit_Virtual_Submit Reviewed-by: Adeel Raza Reviewed-by: mobile promotions Tested-by: mobile promotions --- .../mm/allocators/buddy_allocator_priv.h | 2 +- .../common/mm/allocators/lockless_allocator.c | 4 +++- .../common/mm/allocators/page_allocator.c | 17 ++++++++------- drivers/gpu/nvgpu/common/mm/comptags.c | 7 ++++--- drivers/gpu/nvgpu/common/mm/gmmu/page_table.c | 21 +++++++++++-------- drivers/gpu/nvgpu/common/mm/mm.c | 4 ++-- drivers/gpu/nvgpu/common/mm/vidmem.c | 4 ++-- drivers/gpu/nvgpu/common/mm/vm.c | 6 +++--- drivers/gpu/nvgpu/common/vgpu/mm/mm_vgpu.c | 3 ++- drivers/gpu/nvgpu/include/nvgpu/gmmu.h | 4 ++-- .../gpu/nvgpu/include/nvgpu/page_allocator.h | 2 +- drivers/gpu/nvgpu/include/nvgpu/pd_cache.h | 4 ++-- .../gpu/nvgpu/include/nvgpu/vgpu/tegra_vgpu.h | 4 ++-- drivers/gpu/nvgpu/include/nvgpu/vm.h | 2 +- 14 files changed, 47 insertions(+), 37 deletions(-) diff --git a/drivers/gpu/nvgpu/common/mm/allocators/buddy_allocator_priv.h b/drivers/gpu/nvgpu/common/mm/allocators/buddy_allocator_priv.h index d3453cfc0..2c57862e9 100644 --- a/drivers/gpu/nvgpu/common/mm/allocators/buddy_allocator_priv.h +++ b/drivers/gpu/nvgpu/common/mm/allocators/buddy_allocator_priv.h @@ -198,7 +198,7 @@ static inline struct nvgpu_list_node *balloc_get_order_list( } static inline u64 balloc_order_to_len(struct nvgpu_buddy_allocator *a, - int order) + u64 order) { return BIT64(order) * a->blk_size; } diff --git a/drivers/gpu/nvgpu/common/mm/allocators/lockless_allocator.c b/drivers/gpu/nvgpu/common/mm/allocators/lockless_allocator.c index cd48a418d..a7efaf047 100644 --- a/drivers/gpu/nvgpu/common/mm/allocators/lockless_allocator.c +++ b/drivers/gpu/nvgpu/common/mm/allocators/lockless_allocator.c @@ -24,6 +24,7 @@ #include #include #include +#include #include "lockless_allocator_priv.h" @@ -103,7 +104,8 @@ static void nvgpu_lockless_free(struct nvgpu_allocator *a, u64 addr) while (true) { head = NV_ACCESS_ONCE(pa->head); NV_ACCESS_ONCE(pa->next[cur_idx]) = head; - ret = cmpxchg(&pa->head, head, cur_idx); + nvgpu_assert(cur_idx <= U64(INT_MAX)); + ret = cmpxchg(&pa->head, head, (int)cur_idx); if (ret == head) { nvgpu_atomic_dec(&pa->nr_allocs); alloc_dbg(a, "Free node # %llu", cur_idx); diff --git a/drivers/gpu/nvgpu/common/mm/allocators/page_allocator.c b/drivers/gpu/nvgpu/common/mm/allocators/page_allocator.c index feb74c5df..67c48566e 100644 --- a/drivers/gpu/nvgpu/common/mm/allocators/page_allocator.c +++ b/drivers/gpu/nvgpu/common/mm/allocators/page_allocator.c @@ -382,7 +382,8 @@ static int do_slab_alloc(struct nvgpu_page_allocator *a, return -ENOMEM; } - bitmap_set(&slab_page->bitmap, offs, 1); + nvgpu_assert(offs <= U64(U32_MAX)); + bitmap_set(&slab_page->bitmap, U32(offs), 1); slab_page->nr_objects_alloced++; if (slab_page->nr_objects_alloced < slab_page->nr_objects) { @@ -471,7 +472,7 @@ static void nvgpu_free_slab(struct nvgpu_page_allocator *a, struct page_alloc_slab_page *slab_page = alloc->slab_page; struct page_alloc_slab *slab = slab_page->owner; enum slab_page_state new_state; - int offs; + u32 offs; offs = (u32)(alloc->base - slab_page->page_addr) / slab_page->slab_size; bitmap_clear(&slab_page->bitmap, offs, 1); @@ -898,7 +899,7 @@ static void nvgpu_page_print_stats(struct nvgpu_allocator *na, struct seq_file *s, int lock) { struct nvgpu_page_allocator *a = page_allocator(na); - int i; + u32 i; if (lock) alloc_lock(na); @@ -987,11 +988,13 @@ static const struct nvgpu_allocator_ops page_ops = { */ static int nvgpu_page_alloc_init_slabs(struct nvgpu_page_allocator *a) { - size_t nr_slabs = ilog2(a->page_size >> 12); - unsigned int i; + /* Use temp var for MISRA 10.8 */ + unsigned long tmp_nr_slabs = ilog2(a->page_size >> 12); + u32 nr_slabs = U32(tmp_nr_slabs); + u32 i; a->slabs = nvgpu_kcalloc(nvgpu_alloc_to_gpu(a->owner), - nr_slabs, + (size_t)nr_slabs, sizeof(struct page_alloc_slab)); if (a->slabs == NULL) { return -ENOMEM; @@ -1047,7 +1050,7 @@ int nvgpu_page_allocator_init(struct gk20a *g, struct nvgpu_allocator *na, a->base = base; a->length = length; a->page_size = blk_size; - a->page_shift = __ffs(blk_size); + a->page_shift = U32(__ffs(blk_size)); a->allocs = NULL; a->owner = na; a->flags = flags; diff --git a/drivers/gpu/nvgpu/common/mm/comptags.c b/drivers/gpu/nvgpu/common/mm/comptags.c index fe4c3016d..8ba34aefb 100644 --- a/drivers/gpu/nvgpu/common/mm/comptags.c +++ b/drivers/gpu/nvgpu/common/mm/comptags.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -40,8 +40,9 @@ int gk20a_comptaglines_alloc(struct gk20a_comptag_allocator *allocator, 0, len, 0); if (addr < allocator->size) { /* number zero is reserved; bitmap base is 1 */ - *offset = 1U + addr; - bitmap_set(allocator->bitmap, addr, len); + nvgpu_assert(addr < U64(U32_MAX)); + *offset = 1U + U32(addr); + bitmap_set(allocator->bitmap, U32(addr), len); } else { err = -ENOMEM; } diff --git a/drivers/gpu/nvgpu/common/mm/gmmu/page_table.c b/drivers/gpu/nvgpu/common/mm/gmmu/page_table.c index f03a4a245..7f3334e3d 100644 --- a/drivers/gpu/nvgpu/common/mm/gmmu/page_table.c +++ b/drivers/gpu/nvgpu/common/mm/gmmu/page_table.c @@ -231,7 +231,7 @@ static u32 pd_entries(const struct gk20a_mmu_level *l, * used to index the page directory. That is simply 2 raised to the * number of bits. */ - return BIT32(l->hi_bit[attrs->pgsz] - l->lo_bit[attrs->pgsz] + 1); + return BIT32(l->hi_bit[attrs->pgsz] - l->lo_bit[attrs->pgsz] + 1U); } /* @@ -292,13 +292,16 @@ static u32 pd_index(const struct gk20a_mmu_level *l, u64 virt, struct nvgpu_gmmu_attrs *attrs) { u64 pd_mask = (1ULL << ((u64)l->hi_bit[attrs->pgsz] + 1U)) - 1ULL; - u32 pd_shift = (u64)l->lo_bit[attrs->pgsz]; + u32 pd_shift = l->lo_bit[attrs->pgsz]; + u64 tmp_index; /* * For convenience we don't bother computing the lower bound of the * mask; it's easier to just shift it off. */ - return (virt & pd_mask) >> pd_shift; + tmp_index = (virt & pd_mask) >> pd_shift; + nvgpu_assert(tmp_index <= U64(U32_MAX)); + return U32(tmp_index); } static int pd_allocate_children(struct vm_gk20a *vm, @@ -334,7 +337,7 @@ static int pd_allocate_children(struct vm_gk20a *vm, * there would be mixing (which, remember, is prevented by the buddy * allocator). */ - if (pd->num_entries >= (int)pd_entries(l, attrs)) { + if (pd->num_entries >= pd_entries(l, attrs)) { return 0; } @@ -777,12 +780,12 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm, struct gk20a *g = gk20a_from_vm(vm); int err = 0; bool allocated = false; - int ctag_granularity = g->ops.fb.compression_page_size(g); + u32 ctag_granularity = g->ops.fb.compression_page_size(g); struct nvgpu_gmmu_attrs attrs = { .pgsz = pgsz_idx, .kind_v = kind_v, .ctag = (u64)ctag_offset * (u64)ctag_granularity, - .cacheable = flags & NVGPU_VM_MAP_CACHEABLE, + .cacheable = ((flags & NVGPU_VM_MAP_CACHEABLE) != 0U), .rw_flag = rw_flag, .sparse = sparse, .priv = priv, @@ -855,11 +858,11 @@ void gk20a_locked_gmmu_unmap(struct vm_gk20a *vm, .pgsz = pgsz_idx, .kind_v = 0, .ctag = 0, - .cacheable = 0, + .cacheable = false, .rw_flag = rw_flag, .sparse = sparse, - .priv = 0, - .valid = 0, + .priv = false, + .valid = false, .aperture = APERTURE_INVALID, }; diff --git a/drivers/gpu/nvgpu/common/mm/mm.c b/drivers/gpu/nvgpu/common/mm/mm.c index 5ea3964e3..fff41f92b 100644 --- a/drivers/gpu/nvgpu/common/mm/mm.c +++ b/drivers/gpu/nvgpu/common/mm/mm.c @@ -226,7 +226,7 @@ static int nvgpu_init_system_vm(struct mm_gk20a *mm) * size. No reason AFAICT for this. Probably a bug somewhere. */ if (nvgpu_is_enabled(g, NVGPU_MM_FORCE_128K_PMU_VM)) { - big_page_size = SZ_128K; + big_page_size = U32(SZ_128K); } /* @@ -343,7 +343,7 @@ void nvgpu_init_mm_ce_context(struct gk20a *g) (g->mm.vidmem.ce_ctx_id == NVGPU_CE_INVAL_CTX_ID)) { g->mm.vidmem.ce_ctx_id = gk20a_ce_create_context(g, - gk20a_fifo_get_fast_ce_runlist_id(g), + (int)gk20a_fifo_get_fast_ce_runlist_id(g), -1, -1); diff --git a/drivers/gpu/nvgpu/common/mm/vidmem.c b/drivers/gpu/nvgpu/common/mm/vidmem.c index 05b544168..7aec7c850 100644 --- a/drivers/gpu/nvgpu/common/mm/vidmem.c +++ b/drivers/gpu/nvgpu/common/mm/vidmem.c @@ -201,7 +201,7 @@ int nvgpu_vidmem_clear_list_enqueue(struct gk20a *g, struct nvgpu_mem *mem) nvgpu_mutex_acquire(&mm->vidmem.clear_list_mutex); nvgpu_list_add_tail(&mem->clear_list_entry, &mm->vidmem.clear_list_head); - nvgpu_atomic64_add(mem->aligned_size, &mm->vidmem.bytes_pending); + nvgpu_atomic64_add((long)mem->aligned_size, &mm->vidmem.bytes_pending); nvgpu_mutex_release(&mm->vidmem.clear_list_mutex); nvgpu_cond_signal_interruptible(&mm->vidmem.clearing_thread_cond); @@ -234,7 +234,7 @@ static void nvgpu_vidmem_clear_pending_allocs(struct mm_gk20a *mm) while ((mem = nvgpu_vidmem_clear_list_dequeue(mm)) != NULL) { nvgpu_vidmem_clear(g, mem); - WARN_ON(nvgpu_atomic64_sub_return(mem->aligned_size, + WARN_ON(nvgpu_atomic64_sub_return((long)mem->aligned_size, &g->mm.vidmem.bytes_pending) < 0); mem->size = 0; mem->aperture = APERTURE_INVALID; diff --git a/drivers/gpu/nvgpu/common/mm/vm.c b/drivers/gpu/nvgpu/common/mm/vm.c index 7565518d4..7632396b0 100644 --- a/drivers/gpu/nvgpu/common/mm/vm.c +++ b/drivers/gpu/nvgpu/common/mm/vm.c @@ -67,7 +67,7 @@ int vm_aspace_id(struct vm_gk20a *vm) * example, for gp10b, with a last level address bit PDE range of 28 to 21 the * amount of memory each last level PDE addresses is 21 bits - i.e 2MB. */ -int nvgpu_vm_pde_coverage_bit_count(struct vm_gk20a *vm) +u32 nvgpu_vm_pde_coverage_bit_count(struct vm_gk20a *vm) { int final_pde_level = 0; @@ -87,7 +87,7 @@ static void nvgpu_vm_do_free_entries(struct vm_gk20a *vm, struct nvgpu_gmmu_pd *pd, int level) { - int i; + u32 i; if (pd->mem != NULL) { nvgpu_pd_free(vm, pd); @@ -108,7 +108,7 @@ static void nvgpu_vm_free_entries(struct vm_gk20a *vm, struct nvgpu_gmmu_pd *pdb) { struct gk20a *g = vm->mm->g; - int i; + u32 i; nvgpu_pd_free(vm, pdb); diff --git a/drivers/gpu/nvgpu/common/vgpu/mm/mm_vgpu.c b/drivers/gpu/nvgpu/common/vgpu/mm/mm_vgpu.c index 4c9d9cf70..1e0ff57b8 100644 --- a/drivers/gpu/nvgpu/common/vgpu/mm/mm_vgpu.c +++ b/drivers/gpu/nvgpu/common/vgpu/mm/mm_vgpu.c @@ -397,7 +397,8 @@ u64 vgpu_locked_gmmu_map(struct vm_gk20a *vm, p->gpu_va = map_offset; p->size = buffer_size; p->mem_desc_count = mem_desc_count; - p->pgsz_idx = pgsz_idx; + nvgpu_assert(pgsz_idx <= U32(U8_MAX)); + p->pgsz_idx = U8(pgsz_idx); p->iova = 0; p->kind = kind_v; if (flags & NVGPU_VM_MAP_CACHEABLE) { diff --git a/drivers/gpu/nvgpu/include/nvgpu/gmmu.h b/drivers/gpu/nvgpu/include/nvgpu/gmmu.h index 762d1d6e8..28241b450 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/gmmu.h +++ b/drivers/gpu/nvgpu/include/nvgpu/gmmu.h @@ -90,8 +90,8 @@ struct nvgpu_gmmu_attrs { }; struct gk20a_mmu_level { - int hi_bit[2]; - int lo_bit[2]; + u32 hi_bit[2]; + u32 lo_bit[2]; /* * Build map from virt_addr -> phys_addr. diff --git a/drivers/gpu/nvgpu/include/nvgpu/page_allocator.h b/drivers/gpu/nvgpu/include/nvgpu/page_allocator.h index cf60349cb..fabff0be4 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/page_allocator.h +++ b/drivers/gpu/nvgpu/include/nvgpu/page_allocator.h @@ -151,7 +151,7 @@ struct nvgpu_page_allocator { struct nvgpu_rbtree_node *allocs; /* Outstanding allocations. */ struct page_alloc_slab *slabs; - int nr_slabs; + u32 nr_slabs; struct nvgpu_kmem_cache *alloc_cache; struct nvgpu_kmem_cache *slab_page_cache; diff --git a/drivers/gpu/nvgpu/include/nvgpu/pd_cache.h b/drivers/gpu/nvgpu/include/nvgpu/pd_cache.h index 557c808d9..9b453a93c 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/pd_cache.h +++ b/drivers/gpu/nvgpu/include/nvgpu/pd_cache.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -50,7 +50,7 @@ struct nvgpu_gmmu_pd { * need to be populated when this PD is pointing to PTEs. */ struct nvgpu_gmmu_pd *entries; - int num_entries; + u32 num_entries; }; int nvgpu_pd_alloc(struct vm_gk20a *vm, struct nvgpu_gmmu_pd *pd, u32 bytes); diff --git a/drivers/gpu/nvgpu/include/nvgpu/vgpu/tegra_vgpu.h b/drivers/gpu/nvgpu/include/nvgpu/vgpu/tegra_vgpu.h index dfb76b76f..011555e0b 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/vgpu/tegra_vgpu.h +++ b/drivers/gpu/nvgpu/include/nvgpu/vgpu/tegra_vgpu.h @@ -166,7 +166,7 @@ struct tegra_vgpu_as_map_params { u8 iova; u8 kind; u8 cacheable; - u8 clear_ctags; + bool clear_ctags; u8 prot; u32 offset; }; @@ -185,7 +185,7 @@ struct tegra_vgpu_as_map_ex_params { u8 iova; u8 kind; u32 flags; - u8 clear_ctags; + bool clear_ctags; u8 prot; u32 ctag_offset; }; diff --git a/drivers/gpu/nvgpu/include/nvgpu/vm.h b/drivers/gpu/nvgpu/include/nvgpu/vm.h index cd3a2fd37..6d13aa1f3 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/vm.h +++ b/drivers/gpu/nvgpu/include/nvgpu/vm.h @@ -226,7 +226,7 @@ void nvgpu_vm_put(struct vm_gk20a *vm); int vm_aspace_id(struct vm_gk20a *vm); bool nvgpu_big_pages_possible(struct vm_gk20a *vm, u64 base, u64 size); -int nvgpu_vm_pde_coverage_bit_count(struct vm_gk20a *vm); +u32 nvgpu_vm_pde_coverage_bit_count(struct vm_gk20a *vm); /* batching eliminates redundant cache flushes and invalidates */ void nvgpu_vm_mapping_batch_start(struct vm_gk20a_mapping_batch *mapping_batch);