mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 02:22:34 +03:00
gpu: nvgpu: Add BIT32() and BIT64() macros
Provide both a BIT32() and BIT64() macro so that bit fields can be sized appropriately. The existing BIT() macro is now deprecated and should not be used. Instead use the explicitly sized macros. JIRA NVGPU-781 Change-Id: I9309bd0cda8f811934b7388990e12d0e02436eb0 Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1797197 Reviewed-by: Scott Long <scottl@nvidia.com> Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
bf8a1e0019
commit
14cf2edac7
@@ -22,6 +22,14 @@
|
||||
#ifndef __NVGPU_BITOPS_H__
|
||||
#define __NVGPU_BITOPS_H__
|
||||
|
||||
#include <nvgpu/types.h>
|
||||
|
||||
/*
|
||||
* Explicit sizes for bit definitions. Please use these instead of BIT().
|
||||
*/
|
||||
#define BIT32(i) (U32(1) << (i))
|
||||
#define BIT64(i) (U64(1) << (i))
|
||||
|
||||
#ifdef __KERNEL__
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/bitmap.h>
|
||||
|
||||
@@ -33,7 +33,10 @@
|
||||
#define BITS_TO_LONGS(bits) \
|
||||
(bits + (BITS_PER_LONG - 1) / BITS_PER_LONG)
|
||||
|
||||
#define BIT(i) (1ULL << (i))
|
||||
/*
|
||||
* Deprecated; use the explicit BITxx() macros instead.
|
||||
*/
|
||||
#define BIT(i) BIT64(i)
|
||||
|
||||
#define GENMASK(h, l) \
|
||||
(((~0UL) - (1UL << (l)) + 1) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
|
||||
|
||||
Reference in New Issue
Block a user