mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 10:34:43 +03:00
gpu: nvgpu: mm: fix MISRA 10.3 violations
MISRA Rule 10.3 prohibits implicit assignment of objects of different size or essential type. This resolves a number of 10.3 violations in the nvgpu/common/mm unit. JIRA NVGPU-2935 Change-Id: Ic9d826bf67417962aab433d08d3e922de26e3edc Signed-off-by: Philip Elcan <pelcan@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2036117 GVS: Gerrit_Virtual_Submit Reviewed-by: Adeel Raza <araza@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
2f5e5596d3
commit
a3ba265dd6
@@ -198,7 +198,7 @@ static inline struct nvgpu_list_node *balloc_get_order_list(
|
||||
}
|
||||
|
||||
static inline u64 balloc_order_to_len(struct nvgpu_buddy_allocator *a,
|
||||
int order)
|
||||
u64 order)
|
||||
{
|
||||
return BIT64(order) * a->blk_size;
|
||||
}
|
||||
|
||||
@@ -24,6 +24,7 @@
|
||||
#include <nvgpu/allocator.h>
|
||||
#include <nvgpu/kmem.h>
|
||||
#include <nvgpu/barrier.h>
|
||||
#include <nvgpu/bug.h>
|
||||
|
||||
#include "lockless_allocator_priv.h"
|
||||
|
||||
@@ -103,7 +104,8 @@ static void nvgpu_lockless_free(struct nvgpu_allocator *a, u64 addr)
|
||||
while (true) {
|
||||
head = NV_ACCESS_ONCE(pa->head);
|
||||
NV_ACCESS_ONCE(pa->next[cur_idx]) = head;
|
||||
ret = cmpxchg(&pa->head, head, cur_idx);
|
||||
nvgpu_assert(cur_idx <= U64(INT_MAX));
|
||||
ret = cmpxchg(&pa->head, head, (int)cur_idx);
|
||||
if (ret == head) {
|
||||
nvgpu_atomic_dec(&pa->nr_allocs);
|
||||
alloc_dbg(a, "Free node # %llu", cur_idx);
|
||||
|
||||
@@ -382,7 +382,8 @@ static int do_slab_alloc(struct nvgpu_page_allocator *a,
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
bitmap_set(&slab_page->bitmap, offs, 1);
|
||||
nvgpu_assert(offs <= U64(U32_MAX));
|
||||
bitmap_set(&slab_page->bitmap, U32(offs), 1);
|
||||
slab_page->nr_objects_alloced++;
|
||||
|
||||
if (slab_page->nr_objects_alloced < slab_page->nr_objects) {
|
||||
@@ -471,7 +472,7 @@ static void nvgpu_free_slab(struct nvgpu_page_allocator *a,
|
||||
struct page_alloc_slab_page *slab_page = alloc->slab_page;
|
||||
struct page_alloc_slab *slab = slab_page->owner;
|
||||
enum slab_page_state new_state;
|
||||
int offs;
|
||||
u32 offs;
|
||||
|
||||
offs = (u32)(alloc->base - slab_page->page_addr) / slab_page->slab_size;
|
||||
bitmap_clear(&slab_page->bitmap, offs, 1);
|
||||
@@ -898,7 +899,7 @@ static void nvgpu_page_print_stats(struct nvgpu_allocator *na,
|
||||
struct seq_file *s, int lock)
|
||||
{
|
||||
struct nvgpu_page_allocator *a = page_allocator(na);
|
||||
int i;
|
||||
u32 i;
|
||||
|
||||
if (lock)
|
||||
alloc_lock(na);
|
||||
@@ -987,11 +988,13 @@ static const struct nvgpu_allocator_ops page_ops = {
|
||||
*/
|
||||
static int nvgpu_page_alloc_init_slabs(struct nvgpu_page_allocator *a)
|
||||
{
|
||||
size_t nr_slabs = ilog2(a->page_size >> 12);
|
||||
unsigned int i;
|
||||
/* Use temp var for MISRA 10.8 */
|
||||
unsigned long tmp_nr_slabs = ilog2(a->page_size >> 12);
|
||||
u32 nr_slabs = U32(tmp_nr_slabs);
|
||||
u32 i;
|
||||
|
||||
a->slabs = nvgpu_kcalloc(nvgpu_alloc_to_gpu(a->owner),
|
||||
nr_slabs,
|
||||
(size_t)nr_slabs,
|
||||
sizeof(struct page_alloc_slab));
|
||||
if (a->slabs == NULL) {
|
||||
return -ENOMEM;
|
||||
@@ -1047,7 +1050,7 @@ int nvgpu_page_allocator_init(struct gk20a *g, struct nvgpu_allocator *na,
|
||||
a->base = base;
|
||||
a->length = length;
|
||||
a->page_size = blk_size;
|
||||
a->page_shift = __ffs(blk_size);
|
||||
a->page_shift = U32(__ffs(blk_size));
|
||||
a->allocs = NULL;
|
||||
a->owner = na;
|
||||
a->flags = flags;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -40,8 +40,9 @@ int gk20a_comptaglines_alloc(struct gk20a_comptag_allocator *allocator,
|
||||
0, len, 0);
|
||||
if (addr < allocator->size) {
|
||||
/* number zero is reserved; bitmap base is 1 */
|
||||
*offset = 1U + addr;
|
||||
bitmap_set(allocator->bitmap, addr, len);
|
||||
nvgpu_assert(addr < U64(U32_MAX));
|
||||
*offset = 1U + U32(addr);
|
||||
bitmap_set(allocator->bitmap, U32(addr), len);
|
||||
} else {
|
||||
err = -ENOMEM;
|
||||
}
|
||||
|
||||
@@ -231,7 +231,7 @@ static u32 pd_entries(const struct gk20a_mmu_level *l,
|
||||
* used to index the page directory. That is simply 2 raised to the
|
||||
* number of bits.
|
||||
*/
|
||||
return BIT32(l->hi_bit[attrs->pgsz] - l->lo_bit[attrs->pgsz] + 1);
|
||||
return BIT32(l->hi_bit[attrs->pgsz] - l->lo_bit[attrs->pgsz] + 1U);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -292,13 +292,16 @@ static u32 pd_index(const struct gk20a_mmu_level *l, u64 virt,
|
||||
struct nvgpu_gmmu_attrs *attrs)
|
||||
{
|
||||
u64 pd_mask = (1ULL << ((u64)l->hi_bit[attrs->pgsz] + 1U)) - 1ULL;
|
||||
u32 pd_shift = (u64)l->lo_bit[attrs->pgsz];
|
||||
u32 pd_shift = l->lo_bit[attrs->pgsz];
|
||||
u64 tmp_index;
|
||||
|
||||
/*
|
||||
* For convenience we don't bother computing the lower bound of the
|
||||
* mask; it's easier to just shift it off.
|
||||
*/
|
||||
return (virt & pd_mask) >> pd_shift;
|
||||
tmp_index = (virt & pd_mask) >> pd_shift;
|
||||
nvgpu_assert(tmp_index <= U64(U32_MAX));
|
||||
return U32(tmp_index);
|
||||
}
|
||||
|
||||
static int pd_allocate_children(struct vm_gk20a *vm,
|
||||
@@ -334,7 +337,7 @@ static int pd_allocate_children(struct vm_gk20a *vm,
|
||||
* there would be mixing (which, remember, is prevented by the buddy
|
||||
* allocator).
|
||||
*/
|
||||
if (pd->num_entries >= (int)pd_entries(l, attrs)) {
|
||||
if (pd->num_entries >= pd_entries(l, attrs)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -777,12 +780,12 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm,
|
||||
struct gk20a *g = gk20a_from_vm(vm);
|
||||
int err = 0;
|
||||
bool allocated = false;
|
||||
int ctag_granularity = g->ops.fb.compression_page_size(g);
|
||||
u32 ctag_granularity = g->ops.fb.compression_page_size(g);
|
||||
struct nvgpu_gmmu_attrs attrs = {
|
||||
.pgsz = pgsz_idx,
|
||||
.kind_v = kind_v,
|
||||
.ctag = (u64)ctag_offset * (u64)ctag_granularity,
|
||||
.cacheable = flags & NVGPU_VM_MAP_CACHEABLE,
|
||||
.cacheable = ((flags & NVGPU_VM_MAP_CACHEABLE) != 0U),
|
||||
.rw_flag = rw_flag,
|
||||
.sparse = sparse,
|
||||
.priv = priv,
|
||||
@@ -855,11 +858,11 @@ void gk20a_locked_gmmu_unmap(struct vm_gk20a *vm,
|
||||
.pgsz = pgsz_idx,
|
||||
.kind_v = 0,
|
||||
.ctag = 0,
|
||||
.cacheable = 0,
|
||||
.cacheable = false,
|
||||
.rw_flag = rw_flag,
|
||||
.sparse = sparse,
|
||||
.priv = 0,
|
||||
.valid = 0,
|
||||
.priv = false,
|
||||
.valid = false,
|
||||
.aperture = APERTURE_INVALID,
|
||||
};
|
||||
|
||||
|
||||
@@ -226,7 +226,7 @@ static int nvgpu_init_system_vm(struct mm_gk20a *mm)
|
||||
* size. No reason AFAICT for this. Probably a bug somewhere.
|
||||
*/
|
||||
if (nvgpu_is_enabled(g, NVGPU_MM_FORCE_128K_PMU_VM)) {
|
||||
big_page_size = SZ_128K;
|
||||
big_page_size = U32(SZ_128K);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -343,7 +343,7 @@ void nvgpu_init_mm_ce_context(struct gk20a *g)
|
||||
(g->mm.vidmem.ce_ctx_id == NVGPU_CE_INVAL_CTX_ID)) {
|
||||
g->mm.vidmem.ce_ctx_id =
|
||||
gk20a_ce_create_context(g,
|
||||
gk20a_fifo_get_fast_ce_runlist_id(g),
|
||||
(int)gk20a_fifo_get_fast_ce_runlist_id(g),
|
||||
-1,
|
||||
-1);
|
||||
|
||||
|
||||
@@ -201,7 +201,7 @@ int nvgpu_vidmem_clear_list_enqueue(struct gk20a *g, struct nvgpu_mem *mem)
|
||||
nvgpu_mutex_acquire(&mm->vidmem.clear_list_mutex);
|
||||
nvgpu_list_add_tail(&mem->clear_list_entry,
|
||||
&mm->vidmem.clear_list_head);
|
||||
nvgpu_atomic64_add(mem->aligned_size, &mm->vidmem.bytes_pending);
|
||||
nvgpu_atomic64_add((long)mem->aligned_size, &mm->vidmem.bytes_pending);
|
||||
nvgpu_mutex_release(&mm->vidmem.clear_list_mutex);
|
||||
|
||||
nvgpu_cond_signal_interruptible(&mm->vidmem.clearing_thread_cond);
|
||||
@@ -234,7 +234,7 @@ static void nvgpu_vidmem_clear_pending_allocs(struct mm_gk20a *mm)
|
||||
while ((mem = nvgpu_vidmem_clear_list_dequeue(mm)) != NULL) {
|
||||
nvgpu_vidmem_clear(g, mem);
|
||||
|
||||
WARN_ON(nvgpu_atomic64_sub_return(mem->aligned_size,
|
||||
WARN_ON(nvgpu_atomic64_sub_return((long)mem->aligned_size,
|
||||
&g->mm.vidmem.bytes_pending) < 0);
|
||||
mem->size = 0;
|
||||
mem->aperture = APERTURE_INVALID;
|
||||
|
||||
@@ -67,7 +67,7 @@ int vm_aspace_id(struct vm_gk20a *vm)
|
||||
* example, for gp10b, with a last level address bit PDE range of 28 to 21 the
|
||||
* amount of memory each last level PDE addresses is 21 bits - i.e 2MB.
|
||||
*/
|
||||
int nvgpu_vm_pde_coverage_bit_count(struct vm_gk20a *vm)
|
||||
u32 nvgpu_vm_pde_coverage_bit_count(struct vm_gk20a *vm)
|
||||
{
|
||||
int final_pde_level = 0;
|
||||
|
||||
@@ -87,7 +87,7 @@ static void nvgpu_vm_do_free_entries(struct vm_gk20a *vm,
|
||||
struct nvgpu_gmmu_pd *pd,
|
||||
int level)
|
||||
{
|
||||
int i;
|
||||
u32 i;
|
||||
|
||||
if (pd->mem != NULL) {
|
||||
nvgpu_pd_free(vm, pd);
|
||||
@@ -108,7 +108,7 @@ static void nvgpu_vm_free_entries(struct vm_gk20a *vm,
|
||||
struct nvgpu_gmmu_pd *pdb)
|
||||
{
|
||||
struct gk20a *g = vm->mm->g;
|
||||
int i;
|
||||
u32 i;
|
||||
|
||||
nvgpu_pd_free(vm, pdb);
|
||||
|
||||
|
||||
@@ -397,7 +397,8 @@ u64 vgpu_locked_gmmu_map(struct vm_gk20a *vm,
|
||||
p->gpu_va = map_offset;
|
||||
p->size = buffer_size;
|
||||
p->mem_desc_count = mem_desc_count;
|
||||
p->pgsz_idx = pgsz_idx;
|
||||
nvgpu_assert(pgsz_idx <= U32(U8_MAX));
|
||||
p->pgsz_idx = U8(pgsz_idx);
|
||||
p->iova = 0;
|
||||
p->kind = kind_v;
|
||||
if (flags & NVGPU_VM_MAP_CACHEABLE) {
|
||||
|
||||
@@ -90,8 +90,8 @@ struct nvgpu_gmmu_attrs {
|
||||
};
|
||||
|
||||
struct gk20a_mmu_level {
|
||||
int hi_bit[2];
|
||||
int lo_bit[2];
|
||||
u32 hi_bit[2];
|
||||
u32 lo_bit[2];
|
||||
|
||||
/*
|
||||
* Build map from virt_addr -> phys_addr.
|
||||
|
||||
@@ -151,7 +151,7 @@ struct nvgpu_page_allocator {
|
||||
struct nvgpu_rbtree_node *allocs; /* Outstanding allocations. */
|
||||
|
||||
struct page_alloc_slab *slabs;
|
||||
int nr_slabs;
|
||||
u32 nr_slabs;
|
||||
|
||||
struct nvgpu_kmem_cache *alloc_cache;
|
||||
struct nvgpu_kmem_cache *slab_page_cache;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -50,7 +50,7 @@ struct nvgpu_gmmu_pd {
|
||||
* need to be populated when this PD is pointing to PTEs.
|
||||
*/
|
||||
struct nvgpu_gmmu_pd *entries;
|
||||
int num_entries;
|
||||
u32 num_entries;
|
||||
};
|
||||
|
||||
int nvgpu_pd_alloc(struct vm_gk20a *vm, struct nvgpu_gmmu_pd *pd, u32 bytes);
|
||||
|
||||
@@ -166,7 +166,7 @@ struct tegra_vgpu_as_map_params {
|
||||
u8 iova;
|
||||
u8 kind;
|
||||
u8 cacheable;
|
||||
u8 clear_ctags;
|
||||
bool clear_ctags;
|
||||
u8 prot;
|
||||
u32 offset;
|
||||
};
|
||||
@@ -185,7 +185,7 @@ struct tegra_vgpu_as_map_ex_params {
|
||||
u8 iova;
|
||||
u8 kind;
|
||||
u32 flags;
|
||||
u8 clear_ctags;
|
||||
bool clear_ctags;
|
||||
u8 prot;
|
||||
u32 ctag_offset;
|
||||
};
|
||||
|
||||
@@ -226,7 +226,7 @@ void nvgpu_vm_put(struct vm_gk20a *vm);
|
||||
int vm_aspace_id(struct vm_gk20a *vm);
|
||||
bool nvgpu_big_pages_possible(struct vm_gk20a *vm, u64 base, u64 size);
|
||||
|
||||
int nvgpu_vm_pde_coverage_bit_count(struct vm_gk20a *vm);
|
||||
u32 nvgpu_vm_pde_coverage_bit_count(struct vm_gk20a *vm);
|
||||
|
||||
/* batching eliminates redundant cache flushes and invalidates */
|
||||
void nvgpu_vm_mapping_batch_start(struct vm_gk20a_mapping_batch *mapping_batch);
|
||||
|
||||
Reference in New Issue
Block a user