gpu: nvgpu; fix MISRA errors in nvgpu.common.mm

Rule 2.2 doesn't allow unused variable assignments. The reason is
presence of unused variable assignments may indicate error in program's
logic.
Rule 21.x doesn't allow reserved identifier or macro names starting with
'_' to be reused or defined.

Jira NVGPU-3864

Change-Id: I8ee31c0ee522cd4de00b317b0b4463868ac958ef
Signed-off-by: Vedashree Vidwans <vvidwans@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2163723
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Vedashree Vidwans
2019-07-29 12:51:15 -07:00
committed by mobile promotions
parent 2d93706fe1
commit 19c80f89be
13 changed files with 72 additions and 71 deletions

View File

@@ -363,18 +363,18 @@ static void nvgpu_bitmap_print_stats(struct nvgpu_allocator *na,
{
struct nvgpu_bitmap_allocator *a = bitmap_allocator(na);
__alloc_pstat(s, na, "Bitmap allocator params:");
__alloc_pstat(s, na, " start = 0x%llx", a->base);
__alloc_pstat(s, na, " end = 0x%llx", a->base + a->length);
__alloc_pstat(s, na, " blks = 0x%llx", a->num_bits);
alloc_pstat(s, na, "Bitmap allocator params:");
alloc_pstat(s, na, " start = 0x%llx", a->base);
alloc_pstat(s, na, " end = 0x%llx", a->base + a->length);
alloc_pstat(s, na, " blks = 0x%llx", a->num_bits);
/* Actual stats. */
__alloc_pstat(s, na, "Stats:");
__alloc_pstat(s, na, " Number allocs = 0x%llx", a->nr_allocs);
__alloc_pstat(s, na, " Number fixed = 0x%llx", a->nr_fixed_allocs);
__alloc_pstat(s, na, " Bytes alloced = 0x%llx", a->bytes_alloced);
__alloc_pstat(s, na, " Bytes freed = 0x%llx", a->bytes_freed);
__alloc_pstat(s, na, " Outstanding = 0x%llx",
alloc_pstat(s, na, "Stats:");
alloc_pstat(s, na, " Number allocs = 0x%llx", a->nr_allocs);
alloc_pstat(s, na, " Number fixed = 0x%llx", a->nr_fixed_allocs);
alloc_pstat(s, na, " Bytes alloced = 0x%llx", a->bytes_alloced);
alloc_pstat(s, na, " Bytes freed = 0x%llx", a->bytes_freed);
alloc_pstat(s, na, " Outstanding = 0x%llx",
a->bytes_alloced - a->bytes_freed);
}
#endif

View File

@@ -1236,32 +1236,32 @@ static void nvgpu_buddy_print_stats(struct nvgpu_allocator *na,
struct nvgpu_alloc_carveout *tmp;
struct nvgpu_buddy_allocator *a = na->priv;
__alloc_pstat(s, na, "base = %llu, limit = %llu, blk_size = %llu",
alloc_pstat(s, na, "base = %llu, limit = %llu, blk_size = %llu",
a->base, a->length, a->blk_size);
__alloc_pstat(s, na, "Internal params:");
__alloc_pstat(s, na, " start = 0x%llx", a->start);
__alloc_pstat(s, na, " end = 0x%llx", a->end);
__alloc_pstat(s, na, " count = 0x%llx", a->count);
__alloc_pstat(s, na, " blks = 0x%llx", a->blks);
__alloc_pstat(s, na, " max_order = %llu", a->max_order);
alloc_pstat(s, na, "Internal params:");
alloc_pstat(s, na, " start = 0x%llx", a->start);
alloc_pstat(s, na, " end = 0x%llx", a->end);
alloc_pstat(s, na, " count = 0x%llx", a->count);
alloc_pstat(s, na, " blks = 0x%llx", a->blks);
alloc_pstat(s, na, " max_order = %llu", a->max_order);
if (lock)
alloc_lock(na);
if (!nvgpu_list_empty(&a->co_list)) {
__alloc_pstat(s, na, "");
__alloc_pstat(s, na, "Carveouts:");
alloc_pstat(s, na, "");
alloc_pstat(s, na, "Carveouts:");
nvgpu_list_for_each_entry(tmp, &a->co_list,
nvgpu_alloc_carveout, co_entry)
__alloc_pstat(s, na,
alloc_pstat(s, na,
" CO %2d: %-20s 0x%010llx + 0x%llx",
i++, tmp->name, tmp->base, tmp->length);
}
__alloc_pstat(s, na, "");
__alloc_pstat(s, na, "Buddy blocks:");
__alloc_pstat(s, na, " Order Free Alloced Split");
__alloc_pstat(s, na, " ----- ---- ------- -----");
alloc_pstat(s, na, "");
alloc_pstat(s, na, "Buddy blocks:");
alloc_pstat(s, na, " Order Free Alloced Split");
alloc_pstat(s, na, " ----- ---- ------- -----");
for (i = a->max_order; i >= 0; i--) {
if (a->buddy_list_len[i] == 0 &&
@@ -1269,31 +1269,31 @@ static void nvgpu_buddy_print_stats(struct nvgpu_allocator *na,
a->buddy_list_split[i] == 0)
continue;
__alloc_pstat(s, na, " %3d %-7llu %-9llu %llu", i,
alloc_pstat(s, na, " %3d %-7llu %-9llu %llu", i,
a->buddy_list_len[i],
a->buddy_list_alloced[i],
a->buddy_list_split[i]);
}
__alloc_pstat(s, na, "");
alloc_pstat(s, na, "");
nvgpu_rbtree_enum_start(0, &node, a->fixed_allocs);
i = 1;
while (node) {
falloc = nvgpu_fixed_alloc_from_rbtree_node(node);
__alloc_pstat(s, na, "Fixed alloc (%d): [0x%llx -> 0x%llx]",
alloc_pstat(s, na, "Fixed alloc (%d): [0x%llx -> 0x%llx]",
i, falloc->start, falloc->end);
nvgpu_rbtree_enum_next(&node, a->fixed_allocs);
}
__alloc_pstat(s, na, "");
__alloc_pstat(s, na, "Bytes allocated: %llu",
alloc_pstat(s, na, "");
alloc_pstat(s, na, "Bytes allocated: %llu",
a->bytes_alloced);
__alloc_pstat(s, na, "Bytes allocated (real): %llu",
alloc_pstat(s, na, "Bytes allocated (real): %llu",
a->bytes_alloced_real);
__alloc_pstat(s, na, "Bytes freed: %llu",
alloc_pstat(s, na, "Bytes freed: %llu",
a->bytes_freed);
if (lock)

View File

@@ -132,15 +132,15 @@ static void nvgpu_lockless_print_stats(struct nvgpu_allocator *a,
{
struct nvgpu_lockless_allocator *pa = a->priv;
__alloc_pstat(s, a, "Lockless allocator params:");
__alloc_pstat(s, a, " start = 0x%llx", pa->base);
__alloc_pstat(s, a, " end = 0x%llx", pa->base + pa->length);
alloc_pstat(s, a, "Lockless allocator params:");
alloc_pstat(s, a, " start = 0x%llx", pa->base);
alloc_pstat(s, a, " end = 0x%llx", pa->base + pa->length);
/* Actual stats. */
__alloc_pstat(s, a, "Stats:");
__alloc_pstat(s, a, " Number allocs = %d",
alloc_pstat(s, a, "Stats:");
alloc_pstat(s, a, " Number allocs = %d",
nvgpu_atomic_read(&pa->nr_allocs));
__alloc_pstat(s, a, " Number free = %d",
alloc_pstat(s, a, " Number free = %d",
pa->nr_nodes - nvgpu_atomic_read(&pa->nr_allocs));
}
#endif

View File

@@ -933,47 +933,47 @@ static void nvgpu_page_print_stats(struct nvgpu_allocator *na,
if (lock)
alloc_lock(na);
__alloc_pstat(s, na, "Page allocator:");
__alloc_pstat(s, na, " allocs %lld", a->nr_allocs);
__alloc_pstat(s, na, " frees %lld", a->nr_frees);
__alloc_pstat(s, na, " fixed_allocs %lld", a->nr_fixed_allocs);
__alloc_pstat(s, na, " fixed_frees %lld", a->nr_fixed_frees);
__alloc_pstat(s, na, " slab_allocs %lld", a->nr_slab_allocs);
__alloc_pstat(s, na, " slab_frees %lld", a->nr_slab_frees);
__alloc_pstat(s, na, " pages alloced %lld", a->pages_alloced);
__alloc_pstat(s, na, " pages freed %lld", a->pages_freed);
__alloc_pstat(s, na, "");
alloc_pstat(s, na, "Page allocator:");
alloc_pstat(s, na, " allocs %lld", a->nr_allocs);
alloc_pstat(s, na, " frees %lld", a->nr_frees);
alloc_pstat(s, na, " fixed_allocs %lld", a->nr_fixed_allocs);
alloc_pstat(s, na, " fixed_frees %lld", a->nr_fixed_frees);
alloc_pstat(s, na, " slab_allocs %lld", a->nr_slab_allocs);
alloc_pstat(s, na, " slab_frees %lld", a->nr_slab_frees);
alloc_pstat(s, na, " pages alloced %lld", a->pages_alloced);
alloc_pstat(s, na, " pages freed %lld", a->pages_freed);
alloc_pstat(s, na, "");
__alloc_pstat(s, na, "Page size: %lld KB",
alloc_pstat(s, na, "Page size: %lld KB",
a->page_size >> 10);
__alloc_pstat(s, na, "Total pages: %lld (%lld MB)",
alloc_pstat(s, na, "Total pages: %lld (%lld MB)",
a->length / a->page_size,
a->length >> 20);
__alloc_pstat(s, na, "Available pages: %lld (%lld MB)",
alloc_pstat(s, na, "Available pages: %lld (%lld MB)",
nvgpu_alloc_space(&a->source_allocator) / a->page_size,
nvgpu_alloc_space(&a->source_allocator) >> 20);
__alloc_pstat(s, na, "");
alloc_pstat(s, na, "");
/*
* Slab info.
*/
if (a->flags & GPU_ALLOC_4K_VIDMEM_PAGES) {
__alloc_pstat(s, na, "Slabs:");
__alloc_pstat(s, na, " size empty partial full");
__alloc_pstat(s, na, " ---- ----- ------- ----");
alloc_pstat(s, na, "Slabs:");
alloc_pstat(s, na, " size empty partial full");
alloc_pstat(s, na, " ---- ----- ------- ----");
for (i = 0; i < a->nr_slabs; i++) {
struct page_alloc_slab *slab = &a->slabs[i];
__alloc_pstat(s, na, " %-9u %-9d %-9u %u",
alloc_pstat(s, na, " %-9u %-9d %-9u %u",
slab->slab_size,
slab->nr_empty, slab->nr_partial,
slab->nr_full);
}
__alloc_pstat(s, na, "");
alloc_pstat(s, na, "");
}
__alloc_pstat(s, na, "Source alloc: %s",
alloc_pstat(s, na, "Source alloc: %s",
a->source_allocator.name);
nvgpu_alloc_print_stats(&a->source_allocator, s, lock);

View File

@@ -343,7 +343,7 @@ int nvgpu_mem_create_from_phys(struct gk20a *g, struct nvgpu_mem *dest,
dest->aperture = APERTURE_SYSMEM;
dest->size = nr_pages * SZ_4K;
dest->aligned_size = dest->size;
dest->mem_flags = __NVGPU_MEM_FLAG_NO_DMA;
dest->mem_flags = NVGPU_MEM_FLAG_NO_DMA;
dest->phys_sgt = sgt;
sgl->next = NULL;

View File

@@ -131,7 +131,7 @@ u64 nvgpu_sgt_alignment(struct gk20a *g, struct nvgpu_sgt *sgt)
struct nvgpu_sgt *nvgpu_sgt_create_from_mem(struct gk20a *g,
struct nvgpu_mem *mem)
{
if ((mem->mem_flags & __NVGPU_MEM_FLAG_NO_DMA) != 0U) {
if ((mem->mem_flags & NVGPU_MEM_FLAG_NO_DMA) != 0U) {
return mem->phys_sgt;
}

View File

@@ -259,7 +259,7 @@ static void nvgpu_vidmem_clear_pending_allocs(struct mm_gk20a *mm)
mem->size = 0;
mem->aperture = APERTURE_INVALID;
__nvgpu_mem_free_vidmem_alloc(g, mem);
nvgpu_mem_free_vidmem_alloc(g, mem);
nvgpu_kfree(g, mem);
}

View File

@@ -1041,7 +1041,7 @@ int nvgpu_vm_map(struct vm_gk20a *vm,
mapped_buffer = nvgpu_vm_find_mapping(vm,
os_buf,
map_addr,
flags,
binfo.flags,
map_key_kind);
if (mapped_buffer != NULL) {
@@ -1226,7 +1226,7 @@ int nvgpu_vm_map(struct vm_gk20a *vm,
binfo.pgsz_idx,
pte_kind,
ctag_offset,
flags,
binfo.flags,
rw,
clear_ctags,
false,
@@ -1251,7 +1251,7 @@ int nvgpu_vm_map(struct vm_gk20a *vm,
mapped_buffer->size = map_size;
mapped_buffer->pgsz_idx = binfo.pgsz_idx;
mapped_buffer->vm = vm;
mapped_buffer->flags = flags;
mapped_buffer->flags = binfo.flags;
nvgpu_assert(map_key_kind >= 0);
mapped_buffer->kind = (u32)map_key_kind;
mapped_buffer->va_allocated = va_allocated;

View File

@@ -330,7 +330,7 @@ static inline void nvgpu_alloc_disable_dbg(struct nvgpu_allocator *a)
* Debug stuff.
*/
#ifdef __KERNEL__
#define __alloc_pstat(seq, allocator, fmt, arg...) \
#define alloc_pstat(seq, allocator, fmt, arg...) \
do { \
if (seq) \
seq_printf(seq, fmt "\n", ##arg); \

View File

@@ -103,7 +103,7 @@ struct nvgpu_mem {
* However, this will not stop the DMA API from freeing other parts of
* nvgpu_mem in a system specific way.
*/
#define __NVGPU_MEM_FLAG_NO_DMA BIT64(3)
#define NVGPU_MEM_FLAG_NO_DMA BIT64(3)
/*
* Some nvgpu_mem objects act as facades to memory buffers owned by
* someone else. This internal flag specifies that the sgt field is
@@ -256,8 +256,9 @@ int nvgpu_mem_create_from_phys(struct gk20a *g, struct nvgpu_mem *dest,
* This is implemented in the OS specific code. If it's not necessary it can
* be a noop. But the symbol must at least be present.
*/
void __nvgpu_mem_free_vidmem_alloc(struct gk20a *g, struct nvgpu_mem *vidmem);
#ifdef CONFIG_NVGPU_DGPU
void nvgpu_mem_free_vidmem_alloc(struct gk20a *g, struct nvgpu_mem *vidmem);
#endif
/*
* Buffer accessors. Sysmem buffers always have a CPU mapping and vidmem
* buffers are accessed via PRAMIN.

View File

@@ -260,7 +260,7 @@ int nvgpu_vidmem_buf_access_memory(struct gk20a *g, struct dma_buf *dmabuf,
return err;
}
void __nvgpu_mem_free_vidmem_alloc(struct gk20a *g, struct nvgpu_mem *vidmem)
void nvgpu_mem_free_vidmem_alloc(struct gk20a *g, struct nvgpu_mem *vidmem)
{
nvgpu_free(vidmem->allocator,
(u64)nvgpu_vidmem_get_page_alloc(vidmem->priv.sgt->sgl));

View File

@@ -436,7 +436,7 @@ void nvgpu_dma_free_sys(struct gk20a *g, struct nvgpu_mem *mem)
dma_dbg_free(g, mem->size, mem->priv.flags, "sysmem");
if (!(mem->mem_flags & NVGPU_MEM_FLAG_SHADOW_COPY) &&
!(mem->mem_flags & __NVGPU_MEM_FLAG_NO_DMA) &&
!(mem->mem_flags & NVGPU_MEM_FLAG_NO_DMA) &&
(mem->cpu_va || mem->priv.pages)) {
void *cpu_addr = mem->cpu_va;
@@ -464,7 +464,7 @@ void nvgpu_dma_free_sys(struct gk20a *g, struct nvgpu_mem *mem)
* When this flag is set this means we are freeing a "phys" nvgpu_mem.
* To handle this just nvgpu_kfree() the nvgpu_sgt and nvgpu_sgl.
*/
if (mem->mem_flags & __NVGPU_MEM_FLAG_NO_DMA) {
if (mem->mem_flags & NVGPU_MEM_FLAG_NO_DMA) {
nvgpu_kfree(g, mem->phys_sgt->sgl);
nvgpu_kfree(g, mem->phys_sgt);
}

View File

@@ -54,7 +54,7 @@ struct nvgpu_page_alloc *nvgpu_vidmem_get_page_alloc(struct nvgpu_mem_sgl *sgl)
}
void __nvgpu_mem_free_vidmem_alloc(struct gk20a *g, struct nvgpu_mem *vidmem)
void nvgpu_mem_free_vidmem_alloc(struct gk20a *g, struct nvgpu_mem *vidmem)
{
BUG();
}