gpu: nvgpu: fix MISRA violations in bitops unit

MISRA rule 21.1 states that #define and #undef shall not be used on
a reserved identifier or reserved macro name.  Fix violations of
rule 21.1 in bitops unit.
MISRA rule 21.2 states that a reserved identifier or macro name
shall not be declared.  Fix violations of rule 21.2 in bitops unit.

Jira NVGPU-3545

Change-Id: Ie551d7ce5e19287107403f2c991bcc55bd11a4e8
Signed-off-by: ajesh <akv@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2125842
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
ajesh
2019-05-27 14:17:08 +05:30
committed by mobile promotions
parent 795940faee
commit 8901faae57
8 changed files with 20 additions and 21 deletions

View File

@@ -233,7 +233,7 @@ static inline unsigned int nvgpu_ce_get_method_size(u32 request_operation,
iterations++;
shift = (MAX_CE_ALIGN(chunk) != 0ULL) ?
__ffs(MAX_CE_ALIGN(chunk)) : MAX_CE_SHIFT;
(ffs(MAX_CE_ALIGN(chunk)) - 1UL) : MAX_CE_SHIFT;
width = chunk >> shift;
height = BIT32(shift);
width = MAX_CE_ALIGN(width);
@@ -307,7 +307,7 @@ u32 nvgpu_ce_prepare_submit(u64 src_buf,
*/
shift = (MAX_CE_ALIGN(chunk) != 0ULL) ?
__ffs(MAX_CE_ALIGN(chunk)) : MAX_CE_SHIFT;
(ffs(MAX_CE_ALIGN(chunk)) - 1UL) : MAX_CE_SHIFT;
height = chunk >> shift;
width = BIT32(shift);
height = MAX_CE_ALIGN(height);

View File

@@ -441,7 +441,7 @@ int nvgpu_bitmap_allocator_init(struct gk20a *g, struct nvgpu_allocator *na,
a->base = base;
a->length = length;
a->blk_size = blk_size;
a->blk_shift = __ffs(a->blk_size);
a->blk_shift = (ffs(a->blk_size) - 1UL);
a->num_bits = length >> a->blk_shift;
a->bit_offs = a->base >> a->blk_shift;
a->flags = flags;

View File

@@ -759,11 +759,11 @@ static u64 balloc_do_alloc_fixed(struct nvgpu_buddy_allocator *a,
shifted_base = balloc_base_shift(a, base);
if (shifted_base == 0U) {
align_order = __ffs(len >> a->blk_shift);
align_order = (ffs(len >> a->blk_shift) - 1UL);
} else {
align_order = min_t(u64,
__ffs(shifted_base >> a->blk_shift),
__ffs(len >> a->blk_shift));
(ffs(shifted_base >> a->blk_shift) - 1UL),
(ffs(len >> a->blk_shift) - 1UL));
}
if (align_order > a->max_order) {
@@ -800,7 +800,7 @@ static u64 balloc_do_alloc_fixed(struct nvgpu_buddy_allocator *a,
/* Book keeping. */
inc_base += order_len;
remaining = (shifted_base + len) - inc_base;
align_order = __ffs(inc_base >> a->blk_shift);
align_order = (ffs(inc_base >> a->blk_shift) - 1UL);
/* If we don't have much left - trim down align_order. */
if (balloc_order_to_len(a, align_order) > remaining) {
@@ -1347,7 +1347,7 @@ int nvgpu_buddy_allocator_init(struct gk20a *g, struct nvgpu_allocator *na,
a->base = base;
a->length = size;
a->blk_size = blk_size;
a->blk_shift = __ffs(blk_size);
a->blk_shift = (ffs(blk_size) - 1UL);
a->owner = na;
/*

View File

@@ -546,7 +546,7 @@ static struct nvgpu_page_alloc *do_nvgpu_alloc_pages(
while (pages != 0ULL) {
u64 chunk_addr = 0;
u64 chunk_pages = (u64)1 << __fls(pages);
u64 chunk_pages = (u64)1 << (fls(pages) - 1UL);
u64 chunk_len = chunk_pages << a->page_shift;
/*
@@ -1064,7 +1064,7 @@ int nvgpu_page_allocator_init(struct gk20a *g, struct nvgpu_allocator *na,
a->base = base;
a->length = length;
a->page_size = blk_size;
a->page_shift = U32(__ffs(blk_size));
a->page_shift = U32((ffs(blk_size) - 1UL));
a->allocs = NULL;
a->owner = na;
a->flags = flags;

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -103,7 +103,7 @@ u64 nvgpu_sgt_alignment(struct gk20a *g, struct nvgpu_sgt *sgt)
if (nvgpu_iommuable(g) &&
nvgpu_sgt_iommuable(g, sgt) &&
nvgpu_sgt_get_dma(sgt, sgt->sgl) != 0ULL) {
return 1ULL << __ffs(nvgpu_sgt_get_dma(sgt, sgt->sgl));
return 1ULL << (ffs(nvgpu_sgt_get_dma(sgt, sgt->sgl)) - 1UL);
}
/*
@@ -112,8 +112,9 @@ u64 nvgpu_sgt_alignment(struct gk20a *g, struct nvgpu_sgt *sgt)
* of the SGT.
*/
nvgpu_sgt_for_each_sgl(sgl, sgt) {
chunk_align = 1ULL << __ffs(nvgpu_sgt_get_phys(g, sgt, sgl) |
nvgpu_sgt_get_length(sgt, sgl));
chunk_align = 1ULL << (ffs(nvgpu_sgt_get_phys(g, sgt, sgl) |
nvgpu_sgt_get_length(sgt, sgl)) -
1UL);
if (align != 0ULL) {
align = min(align, chunk_align);

View File

@@ -51,10 +51,11 @@ struct nvgpu_clk_session;
#define VF_POINT_INVALID_PSTATE ~0U
#define VF_POINT_SET_PSTATE_SUPPORTED(a, b) ((a)->pstates |= (BIT16(b)))
#define VF_POINT_GET_PSTATE(a) (((a)->pstates) ?\
__fls((a)->pstates) :\
(fls((a)->pstates) - 1UL) :\
VF_POINT_INVALID_PSTATE)
#define VF_POINT_COMMON_PSTATE(a, b) (((a)->pstates & (b)->pstates) != 0U ?\
__fls((unsigned long)((a)->pstates) & (unsigned long)((b)->pstates)) :\
(fls((unsigned long)((a)->pstates) & \
(unsigned long)((b)->pstates)) - 1UL) :\
VF_POINT_INVALID_PSTATE)
/*

View File

@@ -56,10 +56,7 @@ unsigned long nvgpu_posix_fls(unsigned long word);
#define ffs(word) nvgpu_posix_ffs(word)
#define fls(word) nvgpu_posix_fls(word)
#define __ffs(word) ((ffs(word)) - 1UL)
#define __fls(word) ((fls(word)) - 1UL)
#define ffz(word) __ffs(~(word))
#define ffz(word) (ffs(~(word)) - 1UL)
unsigned long find_first_bit(const unsigned long *addr, unsigned long size);
unsigned long find_next_bit(const unsigned long *addr, unsigned long size,

View File

@@ -103,7 +103,7 @@ static unsigned long __find_next_bit(const unsigned long *addr,
w = addr[idx] ^ invert_mask;
}
return min(n, __ffs(w) + idx * BITS_PER_LONG);
return min(n, (ffs(w) - 1UL) + idx * BITS_PER_LONG);
}
unsigned long find_first_bit(const unsigned long *addr, unsigned long size)