mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-23 09:57:08 +03:00
gpu: nvgpu: modify the ffs and fls interface
Modify the ffs/fls interface function names to nvgpu_ffs and nvgpu_fls. The return bit values are numbered from 1 to 64. A return value of 0 indicates an input of 0 value. Jira NVGPU-3601 Change-Id: I1c151eeac1f94fe3b5b85bd5daf0488f75c5efa0 Signed-off-by: ajesh <akv@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2146119 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Philip Elcan <pelcan@nvidia.com> Reviewed-by: Nitin Kumbhar <nkumbhar@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
@@ -235,7 +235,8 @@ static inline unsigned int nvgpu_ce_get_method_size(u32 request_operation,
|
|||||||
iterations++;
|
iterations++;
|
||||||
|
|
||||||
shift = (MAX_CE_ALIGN(chunk) != 0ULL) ?
|
shift = (MAX_CE_ALIGN(chunk) != 0ULL) ?
|
||||||
(ffs(MAX_CE_ALIGN(chunk)) - 1UL) : MAX_CE_SHIFT;
|
(nvgpu_ffs(MAX_CE_ALIGN(chunk)) - 1UL) :
|
||||||
|
MAX_CE_SHIFT;
|
||||||
width = chunk >> shift;
|
width = chunk >> shift;
|
||||||
height = BIT32(shift);
|
height = BIT32(shift);
|
||||||
width = MAX_CE_ALIGN(width);
|
width = MAX_CE_ALIGN(width);
|
||||||
@@ -309,7 +310,8 @@ u32 nvgpu_ce_prepare_submit(u64 src_buf,
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
shift = (MAX_CE_ALIGN(chunk) != 0ULL) ?
|
shift = (MAX_CE_ALIGN(chunk) != 0ULL) ?
|
||||||
(ffs(MAX_CE_ALIGN(chunk)) - 1UL) : MAX_CE_SHIFT;
|
(nvgpu_ffs(MAX_CE_ALIGN(chunk)) - 1UL) :
|
||||||
|
MAX_CE_SHIFT;
|
||||||
height = chunk >> shift;
|
height = chunk >> shift;
|
||||||
width = BIT32(shift);
|
width = BIT32(shift);
|
||||||
height = MAX_CE_ALIGN(height);
|
height = MAX_CE_ALIGN(height);
|
||||||
|
|||||||
@@ -508,7 +508,14 @@ int gk20a_finalize_poweron(struct gk20a *g)
|
|||||||
g->ops.xve.available_speeds(g, &speed);
|
g->ops.xve.available_speeds(g, &speed);
|
||||||
|
|
||||||
/* Set to max speed */
|
/* Set to max speed */
|
||||||
speed = BIT32(fls(speed) - 1U);
|
speed = (u32)nvgpu_fls(speed);
|
||||||
|
|
||||||
|
if (speed > 0U) {
|
||||||
|
speed = BIT32((speed - 1U));
|
||||||
|
} else {
|
||||||
|
speed = BIT32(speed);
|
||||||
|
}
|
||||||
|
|
||||||
err = g->ops.xve.set_speed(g, speed);
|
err = g->ops.xve.set_speed(g, speed);
|
||||||
if (err != 0) {
|
if (err != 0) {
|
||||||
nvgpu_err(g, "Failed to set PCIe bus speed!");
|
nvgpu_err(g, "Failed to set PCIe bus speed!");
|
||||||
|
|||||||
@@ -454,7 +454,7 @@ int nvgpu_bitmap_allocator_init(struct gk20a *g, struct nvgpu_allocator *na,
|
|||||||
a->base = base;
|
a->base = base;
|
||||||
a->length = length;
|
a->length = length;
|
||||||
a->blk_size = blk_size;
|
a->blk_size = blk_size;
|
||||||
a->blk_shift = nvgpu_safe_sub_u64(ffs(a->blk_size), 1UL);
|
a->blk_shift = nvgpu_safe_sub_u64(nvgpu_ffs(a->blk_size), 1UL);
|
||||||
a->num_bits = length >> a->blk_shift;
|
a->num_bits = length >> a->blk_shift;
|
||||||
a->bit_offs = a->base >> a->blk_shift;
|
a->bit_offs = a->base >> a->blk_shift;
|
||||||
a->flags = flags;
|
a->flags = flags;
|
||||||
|
|||||||
@@ -218,7 +218,7 @@ static u64 balloc_get_order(struct nvgpu_buddy_allocator *a, u64 len)
|
|||||||
len--;
|
len--;
|
||||||
len >>= a->blk_shift;
|
len >>= a->blk_shift;
|
||||||
|
|
||||||
return fls(len);
|
return nvgpu_fls(len);
|
||||||
}
|
}
|
||||||
|
|
||||||
static u64 balloc_max_order_in(struct nvgpu_buddy_allocator *a,
|
static u64 balloc_max_order_in(struct nvgpu_buddy_allocator *a,
|
||||||
@@ -774,13 +774,15 @@ static u64 balloc_do_alloc_fixed(struct nvgpu_buddy_allocator *a,
|
|||||||
|
|
||||||
shifted_base = balloc_base_shift(a, base);
|
shifted_base = balloc_base_shift(a, base);
|
||||||
if (shifted_base == 0U) {
|
if (shifted_base == 0U) {
|
||||||
align_order = nvgpu_safe_sub_u64(ffs(len >> a->blk_shift), 1UL);
|
align_order = nvgpu_safe_sub_u64(
|
||||||
|
nvgpu_ffs(len >> a->blk_shift), 1UL);
|
||||||
} else {
|
} else {
|
||||||
u64 shifted_base_order =
|
u64 shifted_base_order =
|
||||||
nvgpu_safe_sub_u64(
|
nvgpu_safe_sub_u64(
|
||||||
ffs(shifted_base >> a->blk_shift), 1UL);
|
nvgpu_ffs(shifted_base >> a->blk_shift), 1UL);
|
||||||
u64 len_order =
|
u64 len_order =
|
||||||
nvgpu_safe_sub_u64(ffs(len >> a->blk_shift), 1UL);
|
nvgpu_safe_sub_u64(
|
||||||
|
nvgpu_ffs(len >> a->blk_shift), 1UL);
|
||||||
align_order = min_t(u64, shifted_base_order, len_order);
|
align_order = min_t(u64, shifted_base_order, len_order);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -818,8 +820,8 @@ static u64 balloc_do_alloc_fixed(struct nvgpu_buddy_allocator *a,
|
|||||||
/* Book keeping. */
|
/* Book keeping. */
|
||||||
inc_base = nvgpu_safe_add_u64(inc_base, order_len);
|
inc_base = nvgpu_safe_add_u64(inc_base, order_len);
|
||||||
remaining = (shifted_base + len) - inc_base;
|
remaining = (shifted_base + len) - inc_base;
|
||||||
align_order = nvgpu_safe_sub_u64(ffs(inc_base >> a->blk_shift),
|
align_order = nvgpu_safe_sub_u64(
|
||||||
1UL);
|
nvgpu_ffs(inc_base >> a->blk_shift), 1UL);
|
||||||
|
|
||||||
/* If we don't have much left - trim down align_order. */
|
/* If we don't have much left - trim down align_order. */
|
||||||
if (balloc_order_to_len(a, align_order) > remaining) {
|
if (balloc_order_to_len(a, align_order) > remaining) {
|
||||||
@@ -1389,7 +1391,7 @@ int nvgpu_buddy_allocator_init(struct gk20a *g, struct nvgpu_allocator *na,
|
|||||||
a->base = base;
|
a->base = base;
|
||||||
a->length = size;
|
a->length = size;
|
||||||
a->blk_size = blk_size;
|
a->blk_size = blk_size;
|
||||||
a->blk_shift = (ffs(blk_size) - 1UL);
|
a->blk_shift = (nvgpu_ffs(blk_size) - 1UL);
|
||||||
a->owner = na;
|
a->owner = na;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|||||||
@@ -564,7 +564,7 @@ static struct nvgpu_page_alloc *do_nvgpu_alloc_pages(
|
|||||||
|
|
||||||
while (pages != 0ULL) {
|
while (pages != 0ULL) {
|
||||||
u64 chunk_addr = 0;
|
u64 chunk_addr = 0;
|
||||||
u64 chunk_pages = (u64)1 << (fls(pages) - 1UL);
|
u64 chunk_pages = (u64)1 << (nvgpu_fls(pages) - 1UL);
|
||||||
u64 chunk_len = chunk_pages << a->page_shift;
|
u64 chunk_len = chunk_pages << a->page_shift;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -1090,7 +1090,7 @@ int nvgpu_page_allocator_init(struct gk20a *g, struct nvgpu_allocator *na,
|
|||||||
a->base = base;
|
a->base = base;
|
||||||
a->length = length;
|
a->length = length;
|
||||||
a->page_size = blk_size;
|
a->page_size = blk_size;
|
||||||
a->page_shift = nvgpu_safe_cast_u64_to_u32((ffs(blk_size) - 1UL));
|
a->page_shift = nvgpu_safe_cast_u64_to_u32((nvgpu_ffs(blk_size) - 1UL));
|
||||||
a->allocs = NULL;
|
a->allocs = NULL;
|
||||||
a->owner = na;
|
a->owner = na;
|
||||||
a->flags = flags;
|
a->flags = flags;
|
||||||
|
|||||||
@@ -103,7 +103,8 @@ u64 nvgpu_sgt_alignment(struct gk20a *g, struct nvgpu_sgt *sgt)
|
|||||||
if (nvgpu_iommuable(g) &&
|
if (nvgpu_iommuable(g) &&
|
||||||
nvgpu_sgt_iommuable(g, sgt) &&
|
nvgpu_sgt_iommuable(g, sgt) &&
|
||||||
nvgpu_sgt_get_dma(sgt, sgt->sgl) != 0ULL) {
|
nvgpu_sgt_get_dma(sgt, sgt->sgl) != 0ULL) {
|
||||||
return 1ULL << (ffs(nvgpu_sgt_get_dma(sgt, sgt->sgl)) - 1UL);
|
return 1ULL << (nvgpu_ffs(nvgpu_sgt_get_dma(sgt, sgt->sgl))
|
||||||
|
- 1UL);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -112,7 +113,8 @@ u64 nvgpu_sgt_alignment(struct gk20a *g, struct nvgpu_sgt *sgt)
|
|||||||
* of the SGT.
|
* of the SGT.
|
||||||
*/
|
*/
|
||||||
nvgpu_sgt_for_each_sgl(sgl, sgt) {
|
nvgpu_sgt_for_each_sgl(sgl, sgt) {
|
||||||
chunk_align = 1ULL << (ffs(nvgpu_sgt_get_phys(g, sgt, sgl) |
|
chunk_align = 1ULL << (nvgpu_ffs(
|
||||||
|
nvgpu_sgt_get_phys(g, sgt, sgl) |
|
||||||
nvgpu_sgt_get_length(sgt, sgl)) -
|
nvgpu_sgt_get_length(sgt, sgl)) -
|
||||||
1UL);
|
1UL);
|
||||||
|
|
||||||
|
|||||||
@@ -40,7 +40,7 @@ static u32 nvgpu_nvlink_get_link(struct gk20a *g)
|
|||||||
|
|
||||||
/* Lets find the detected link */
|
/* Lets find the detected link */
|
||||||
if (g->nvlink.initialized_links != 0U) {
|
if (g->nvlink.initialized_links != 0U) {
|
||||||
link_id = (u32)(ffs(g->nvlink.initialized_links) - 1UL);
|
link_id = (u32)(nvgpu_ffs(g->nvlink.initialized_links) - 1UL);
|
||||||
} else {
|
} else {
|
||||||
return NVLINK_MAX_LINKS_SW;
|
return NVLINK_MAX_LINKS_SW;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -45,7 +45,7 @@ int nvgpu_nvlink_link_early_init(struct gk20a *g)
|
|||||||
* First check the topology and setup connectivity
|
* First check the topology and setup connectivity
|
||||||
* HACK: we are only enabling one link for now!!!
|
* HACK: we are only enabling one link for now!!!
|
||||||
*/
|
*/
|
||||||
link_id = (u32)(ffs(g->nvlink.discovered_links) - 1UL);
|
link_id = (u32)(nvgpu_ffs(g->nvlink.discovered_links) - 1UL);
|
||||||
g->nvlink.links[link_id].remote_info.is_connected = true;
|
g->nvlink.links[link_id].remote_info.is_connected = true;
|
||||||
g->nvlink.links[link_id].remote_info.device_type =
|
g->nvlink.links[link_id].remote_info.device_type =
|
||||||
nvgpu_nvlink_endp_tegra;
|
nvgpu_nvlink_endp_tegra;
|
||||||
|
|||||||
@@ -127,8 +127,8 @@ static u32 get_interim_pldiv(struct gk20a *g, u32 old_pl, u32 new_pl)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
pl = old_pl | BIT32(ffs(new_pl) - 1U); /* pl never 0 */
|
pl = old_pl | BIT32(nvgpu_ffs(new_pl) - 1U); /* pl never 0 */
|
||||||
new_pl |= BIT32(ffs(old_pl) - 1U);
|
new_pl |= BIT32(nvgpu_ffs(old_pl) - 1U);
|
||||||
|
|
||||||
return min(pl, new_pl);
|
return min(pl, new_pl);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -233,11 +233,11 @@ u32 gm20b_pbdma_acquire_val(u64 timeout)
|
|||||||
do_div(timeout, 100U); /* set acquire timeout to 80% of channel wdt */
|
do_div(timeout, 100U); /* set acquire timeout to 80% of channel wdt */
|
||||||
timeout *= 1000000UL; /* ms -> ns */
|
timeout *= 1000000UL; /* ms -> ns */
|
||||||
do_div(timeout, 1024U); /* in unit of 1024ns */
|
do_div(timeout, 1024U); /* in unit of 1024ns */
|
||||||
tmp = fls(timeout >> 32U);
|
tmp = nvgpu_fls(timeout >> 32U);
|
||||||
BUG_ON(tmp > U64(U32_MAX));
|
BUG_ON(tmp > U64(U32_MAX));
|
||||||
val_len = (u32)tmp + 32U;
|
val_len = (u32)tmp + 32U;
|
||||||
if (val_len == 32U) {
|
if (val_len == 32U) {
|
||||||
val_len = (u32)fls(timeout);
|
val_len = (u32)nvgpu_fls(timeout);
|
||||||
}
|
}
|
||||||
if (val_len > 16U + pbdma_acquire_timeout_exp_max_v()) { /* man: 16bits */
|
if (val_len > 16U + pbdma_acquire_timeout_exp_max_v()) { /* man: 16bits */
|
||||||
exponent = pbdma_acquire_timeout_exp_max_v();
|
exponent = pbdma_acquire_timeout_exp_max_v();
|
||||||
|
|||||||
@@ -51,10 +51,10 @@ struct nvgpu_clk_session;
|
|||||||
#define VF_POINT_INVALID_PSTATE ~0U
|
#define VF_POINT_INVALID_PSTATE ~0U
|
||||||
#define VF_POINT_SET_PSTATE_SUPPORTED(a, b) ((a)->pstates |= (BIT16(b)))
|
#define VF_POINT_SET_PSTATE_SUPPORTED(a, b) ((a)->pstates |= (BIT16(b)))
|
||||||
#define VF_POINT_GET_PSTATE(a) (((a)->pstates) ?\
|
#define VF_POINT_GET_PSTATE(a) (((a)->pstates) ?\
|
||||||
(fls((a)->pstates) - 1UL) :\
|
(nvgpu_fls((a)->pstates) - 1UL) :\
|
||||||
VF_POINT_INVALID_PSTATE)
|
VF_POINT_INVALID_PSTATE)
|
||||||
#define VF_POINT_COMMON_PSTATE(a, b) (((a)->pstates & (b)->pstates) != 0U ?\
|
#define VF_POINT_COMMON_PSTATE(a, b) (((a)->pstates & (b)->pstates) != 0U ?\
|
||||||
(fls((unsigned long)((a)->pstates) & \
|
(nvgpu_fls((unsigned long)((a)->pstates) & \
|
||||||
(unsigned long)((b)->pstates)) - 1UL) :\
|
(unsigned long)((b)->pstates)) - 1UL) :\
|
||||||
VF_POINT_INVALID_PSTATE)
|
VF_POINT_INVALID_PSTATE)
|
||||||
|
|
||||||
|
|||||||
@@ -67,4 +67,30 @@ static inline void nvgpu_clear_bit(unsigned int nr,
|
|||||||
BUG_ON(nr > U32(INT_MAX));
|
BUG_ON(nr > U32(INT_MAX));
|
||||||
clear_bit((int)nr, addr);
|
clear_bit((int)nr, addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline unsigned long nvgpu_ffs(unsigned long word)
|
||||||
|
{
|
||||||
|
unsigned long ret = 0UL;
|
||||||
|
|
||||||
|
if (word == 0UL) {
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = __ffs(word) + 1UL;
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
static inline unsigned long nvgpu_fls(unsigned long word)
|
||||||
|
{
|
||||||
|
unsigned long ret = 0UL;
|
||||||
|
|
||||||
|
if (word == 0UL) {
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = __fls(word) + 1UL;
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* NVGPU_LOCK_LINUX_H */
|
#endif /* NVGPU_LOCK_LINUX_H */
|
||||||
|
|||||||
@@ -60,10 +60,10 @@
|
|||||||
unsigned long nvgpu_posix_ffs(unsigned long word);
|
unsigned long nvgpu_posix_ffs(unsigned long word);
|
||||||
unsigned long nvgpu_posix_fls(unsigned long word);
|
unsigned long nvgpu_posix_fls(unsigned long word);
|
||||||
|
|
||||||
#define ffs(word) nvgpu_posix_ffs(word)
|
#define nvgpu_ffs(word) nvgpu_posix_ffs(word)
|
||||||
#define fls(word) nvgpu_posix_fls(word)
|
#define nvgpu_fls(word) nvgpu_posix_fls(word)
|
||||||
|
|
||||||
#define ffz(word) (ffs(~(word)) - 1UL)
|
#define ffz(word) (nvgpu_ffs(~(word)) - 1UL)
|
||||||
|
|
||||||
unsigned long find_first_bit(const unsigned long *addr, unsigned long size);
|
unsigned long find_first_bit(const unsigned long *addr, unsigned long size);
|
||||||
unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
|
unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
|
||||||
|
|||||||
@@ -26,7 +26,7 @@
|
|||||||
#include <nvgpu/bitops.h>
|
#include <nvgpu/bitops.h>
|
||||||
|
|
||||||
#define ilog2(x) ({ \
|
#define ilog2(x) ({ \
|
||||||
unsigned long fls_val = fls(x); \
|
unsigned long fls_val = nvgpu_fls(x); \
|
||||||
\
|
\
|
||||||
nvgpu_assert(fls_val > 0ULL); \
|
nvgpu_assert(fls_val > 0ULL); \
|
||||||
fls_val = fls_val - 1U; \
|
fls_val = fls_val - 1U; \
|
||||||
@@ -40,7 +40,8 @@
|
|||||||
if ((x) == 0UL) { \
|
if ((x) == 0UL) { \
|
||||||
BUG(); \
|
BUG(); \
|
||||||
} else { \
|
} else { \
|
||||||
ret = 1UL << fls((x) - 1UL); \
|
ret = 1UL << \
|
||||||
|
nvgpu_fls((x) - 1UL); \
|
||||||
} \
|
} \
|
||||||
ret; \
|
ret; \
|
||||||
})
|
})
|
||||||
@@ -52,7 +53,8 @@
|
|||||||
if ((x) == 0UL) { \
|
if ((x) == 0UL) { \
|
||||||
BUG(); \
|
BUG(); \
|
||||||
} else { \
|
} else { \
|
||||||
ret = 1UL << (fls(x) - 1UL); \
|
ret = 1UL << \
|
||||||
|
nvgpu_(fls(x) - 1UL); \
|
||||||
} \
|
} \
|
||||||
ret; \
|
ret; \
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -117,7 +117,7 @@ static unsigned long nvgpu_posix_find_next_bit(const unsigned long *addr,
|
|||||||
w = addr[idx] ^ invert_mask;
|
w = addr[idx] ^ invert_mask;
|
||||||
}
|
}
|
||||||
|
|
||||||
return min(n, (nvgpu_safe_add_u64(((ffs(w)) - 1UL),
|
return min(n, (nvgpu_safe_add_u64(((nvgpu_ffs(w)) - 1UL),
|
||||||
(nvgpu_safe_mult_u64(idx, BITS_PER_LONG)))));
|
(nvgpu_safe_mult_u64(idx, BITS_PER_LONG)))));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -47,7 +47,7 @@ bool test_fifo_subtest_pruned(u32 branches, u32 final_branches)
|
|||||||
if (match == 0U) {
|
if (match == 0U) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
bit = ffs(match) - 1;
|
bit = nvgpu_ffs(match) - 1;
|
||||||
|
|
||||||
return (branches > BIT(bit));
|
return (branches > BIT(bit));
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -143,7 +143,7 @@ static bool pruned(u32 branches, u32 final_branches)
|
|||||||
if (match == 0U) {
|
if (match == 0U) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
bit = ffs(match) - 1;
|
bit = nvgpu_ffs(match) - 1;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Skip the test if it attempts to test some branches
|
* Skip the test if it attempts to test some branches
|
||||||
|
|||||||
@@ -57,7 +57,7 @@ static int test_ffs(struct unit_module *m, struct gk20a *g, void *args)
|
|||||||
{
|
{
|
||||||
#define CHECK_FFS_WORD(w, answer) \
|
#define CHECK_FFS_WORD(w, answer) \
|
||||||
do { \
|
do { \
|
||||||
unsigned long ret = ffs(w); \
|
unsigned long ret = nvgpu_ffs(w); \
|
||||||
\
|
\
|
||||||
if (ret != (answer)) \
|
if (ret != (answer)) \
|
||||||
unit_return_fail(m, \
|
unit_return_fail(m, \
|
||||||
@@ -86,9 +86,9 @@ static int test_ffs(struct unit_module *m, struct gk20a *g, void *args)
|
|||||||
* possible return values of the function.
|
* possible return values of the function.
|
||||||
*/
|
*/
|
||||||
for (i = 0; i < BITS_PER_LONG; i++) {
|
for (i = 0; i < BITS_PER_LONG; i++) {
|
||||||
if (ffs(BIT(i)) != (i + 1))
|
if (nvgpu_ffs(BIT(i)) != (i + 1))
|
||||||
unit_return_fail(m, "ffs(1 << %lu) != %lu [%lu]!\n",
|
unit_return_fail(m, "ffs(1 << %lu) != %lu [%lu]!\n",
|
||||||
i, i, ffs(BIT(i)));
|
i, i, nvgpu_ffs(BIT(i)));
|
||||||
}
|
}
|
||||||
|
|
||||||
return UNIT_SUCCESS;
|
return UNIT_SUCCESS;
|
||||||
@@ -98,7 +98,7 @@ static int test_fls(struct unit_module *m, struct gk20a *g, void *args)
|
|||||||
{
|
{
|
||||||
#define CHECK_FLS_WORD(w, answer) \
|
#define CHECK_FLS_WORD(w, answer) \
|
||||||
do { \
|
do { \
|
||||||
unsigned long ret = fls(w); \
|
unsigned long ret = nvgpu_fls(w); \
|
||||||
\
|
\
|
||||||
if (ret != (answer)) \
|
if (ret != (answer)) \
|
||||||
unit_return_fail(m, \
|
unit_return_fail(m, \
|
||||||
@@ -123,9 +123,9 @@ static int test_fls(struct unit_module *m, struct gk20a *g, void *args)
|
|||||||
#undef CHECK_FLS_WORD
|
#undef CHECK_FLS_WORD
|
||||||
|
|
||||||
for (i = 0; i < BITS_PER_LONG; i++) {
|
for (i = 0; i < BITS_PER_LONG; i++) {
|
||||||
if (fls(BIT(i)) != (i+1))
|
if (nvgpu_fls(BIT(i)) != (i+1))
|
||||||
unit_return_fail(m, "fls(1 << %lu) != %lu! [%lu]\n",
|
unit_return_fail(m, "fls(1 << %lu) != %lu! [%lu]\n",
|
||||||
i, i, fls(BIT(i)));
|
i, i, nvgpu_fls(BIT(i)));
|
||||||
}
|
}
|
||||||
|
|
||||||
return UNIT_SUCCESS;
|
return UNIT_SUCCESS;
|
||||||
|
|||||||
Reference in New Issue
Block a user