gpu: nvgpu: Add safe ops for s64

Add safe addition and multiplication functions for s64.

Jira NVGPU-3607

Change-Id: I8078679ee906dfcfcdab24ca221ec4e6b27e58db
Signed-off-by: ajesh <akv@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2133656
Reviewed-by: Alex Waterman <alexw@nvidia.com>
Reviewed-by: Nitin Kumbhar <nkumbhar@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
ajesh
2019-06-10 19:41:08 +05:30
committed by mobile promotions
parent 6f37ac5de2
commit b05a529219

View File

@@ -54,6 +54,16 @@ static inline u64 nvgpu_safe_add_u64(u64 ul_a, u64 ul_b)
} }
} }
static inline s64 nvgpu_safe_add_s64(s64 sl_a, s64 sl_b)
{
if (((sl_b > 0) && (sl_a > (LONG_MAX - sl_b))) ||
((sl_b < 0) && (sl_a < (LONG_MIN - sl_b)))) {
BUG();
} else {
return sl_a + sl_b;
}
}
static inline u32 nvgpu_safe_sub_u32(u32 ui_a, u32 ui_b) static inline u32 nvgpu_safe_sub_u32(u32 ui_a, u32 ui_b)
{ {
if (ui_a < ui_b) { if (ui_a < ui_b) {
@@ -104,6 +114,33 @@ static inline u64 nvgpu_safe_mult_u64(u64 ul_a, u64 ul_b)
} }
} }
static inline s64 nvgpu_safe_mult_s64(s64 sl_a, s64 sl_b)
{
if (sl_a > 0) {
if (sl_b > 0) {
if (sl_a > (LONG_MAX / sl_b)) {
BUG();
}
} else {
if (sl_b < (LONG_MIN / sl_a)) {
BUG();
}
}
} else {
if (sl_b > 0) {
if (sl_a < (LONG_MIN / sl_b)) {
BUG();
}
} else {
if ((sl_a != 0) && (sl_b < (LONG_MAX / sl_a))) {
BUG();
}
}
}
return sl_a * sl_b;
}
static inline u16 nvgpu_safe_cast_u64_to_u16(u64 ul_a) static inline u16 nvgpu_safe_cast_u64_to_u16(u64 ul_a)
{ {
if (ul_a > USHRT_MAX) { if (ul_a > USHRT_MAX) {