mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 10:34:43 +03:00
gpu: nvgpu: add U*_MAX macros
Linux prefers U8_MAX, U16_MAX, etc to UCHAR_MAX, UINT_MAX, etc, so define them for building nvgpu driver on non-Linux OSes. JIRA NVGPU-647 Change-Id: I141f87d19a561de71762f7edfe0b41dff6ad31ec Signed-off-by: Philip Elcan <pelcan@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1918214 Reviewed-by: svc-misra-checker <svc-misra-checker@nvidia.com> Reviewed-by: Automatic_Commit_Validation_User GVS: Gerrit_Virtual_Submit Reviewed-by: Adeel Raza <araza@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
80b5e2b8d6
commit
ac2e423af8
@@ -60,11 +60,14 @@
|
||||
#define U32(x) ((u32)(x))
|
||||
#define U64(x) ((u64)(x))
|
||||
|
||||
/* Linux uses U8_MAX instead of UCHAR_MAX. We define it here for non-Linux
|
||||
* OSes
|
||||
/* Linux uses U8_MAX, U32_MAX, etc instead of UCHAR_MAX, UINT32_MAX. We define
|
||||
* them here for non-Linux OSes
|
||||
*/
|
||||
#if !defined(__KERNEL__) && !defined(U8_MAX)
|
||||
#define U8_MAX ((u8)255)
|
||||
#define U8_MAX ((u8)~0U)
|
||||
#define U16_MAX ((u16)~0U)
|
||||
#define U32_MAX ((u32)~0U)
|
||||
#define U64_MAX ((u64)~0ULL)
|
||||
#endif
|
||||
|
||||
#endif /* NVGPU_TYPES_H */
|
||||
|
||||
Reference in New Issue
Block a user