gpu: nvgpu: fix MISRA violations in atomic unit

MISRA rule 21.2 forbids the usage of identifier names which start with
an underscore.  Fix the violations of MISRA rule 21.2 in atomic unit.

Jira NVGPU-3139

Change-Id: I4fbed30542bdd2a2444a5619b5bb2bb5c7736472
Signed-off-by: ajesh <akv@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2111441
Reviewed-by: Automatic_Commit_Validation_User
GVS: Gerrit_Virtual_Submit
Reviewed-by: Philip Elcan <pelcan@nvidia.com>
Reviewed-by: Alex Waterman <alexw@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
ajesh
2019-05-03 23:55:56 +05:30
committed by mobile promotions
parent eaf6aa07f9
commit 67b3cb8a54
3 changed files with 103 additions and 103 deletions

View File

@@ -28,138 +28,138 @@
#include <nvgpu/posix/atomic.h>
#endif
#define NVGPU_ATOMIC_INIT(i) __nvgpu_atomic_init(i)
#define NVGPU_ATOMIC64_INIT(i) __nvgpu_atomic64_init(i)
#define NVGPU_ATOMIC_INIT(i) nvgpu_atomic_init_impl(i)
#define NVGPU_ATOMIC64_INIT(i) nvgpu_atomic64_init_impl(i)
static inline void nvgpu_atomic_set(nvgpu_atomic_t *v, int i)
{
__nvgpu_atomic_set(v, i);
nvgpu_atomic_set_impl(v, i);
}
static inline int nvgpu_atomic_read(nvgpu_atomic_t *v)
{
return __nvgpu_atomic_read(v);
return nvgpu_atomic_read_impl(v);
}
static inline void nvgpu_atomic_inc(nvgpu_atomic_t *v)
{
__nvgpu_atomic_inc(v);
nvgpu_atomic_inc_impl(v);
}
static inline int nvgpu_atomic_inc_return(nvgpu_atomic_t *v)
{
return __nvgpu_atomic_inc_return(v);
return nvgpu_atomic_inc_return_impl(v);
}
static inline void nvgpu_atomic_dec(nvgpu_atomic_t *v)
{
__nvgpu_atomic_dec(v);
nvgpu_atomic_dec_impl(v);
}
static inline int nvgpu_atomic_dec_return(nvgpu_atomic_t *v)
{
return __nvgpu_atomic_dec_return(v);
return nvgpu_atomic_dec_return_impl(v);
}
static inline int nvgpu_atomic_cmpxchg(nvgpu_atomic_t *v, int old, int new)
{
return __nvgpu_atomic_cmpxchg(v, old, new);
return nvgpu_atomic_cmpxchg_impl(v, old, new);
}
static inline int nvgpu_atomic_xchg(nvgpu_atomic_t *v, int new)
{
return __nvgpu_atomic_xchg(v, new);
return nvgpu_atomic_xchg_impl(v, new);
}
static inline bool nvgpu_atomic_inc_and_test(nvgpu_atomic_t *v)
{
return __nvgpu_atomic_inc_and_test(v);
return nvgpu_atomic_inc_and_test_impl(v);
}
static inline bool nvgpu_atomic_dec_and_test(nvgpu_atomic_t *v)
{
return __nvgpu_atomic_dec_and_test(v);
return nvgpu_atomic_dec_and_test_impl(v);
}
static inline bool nvgpu_atomic_sub_and_test(int i, nvgpu_atomic_t *v)
{
return __nvgpu_atomic_sub_and_test(i, v);
return nvgpu_atomic_sub_and_test_impl(i, v);
}
static inline void nvgpu_atomic_add(int i, nvgpu_atomic_t *v)
{
__nvgpu_atomic_add(i, v);
nvgpu_atomic_add_impl(i, v);
}
static inline int nvgpu_atomic_sub_return(int i, nvgpu_atomic_t *v)
{
return __nvgpu_atomic_sub_return(i, v);
return nvgpu_atomic_sub_return_impl(i, v);
}
static inline void nvgpu_atomic_sub(int i, nvgpu_atomic_t *v)
{
__nvgpu_atomic_sub(i, v);
nvgpu_atomic_sub_impl(i, v);
}
static inline int nvgpu_atomic_add_return(int i, nvgpu_atomic_t *v)
{
return __nvgpu_atomic_add_return(i, v);
return nvgpu_atomic_add_return_impl(i, v);
}
static inline int nvgpu_atomic_add_unless(nvgpu_atomic_t *v, int a, int u)
{
return __nvgpu_atomic_add_unless(v, a, u);
return nvgpu_atomic_add_unless_impl(v, a, u);
}
static inline void nvgpu_atomic64_set(nvgpu_atomic64_t *v, long x)
{
return __nvgpu_atomic64_set(v, x);
return nvgpu_atomic64_set_impl(v, x);
}
static inline long nvgpu_atomic64_read(nvgpu_atomic64_t *v)
{
return __nvgpu_atomic64_read(v);
return nvgpu_atomic64_read_impl(v);
}
static inline void nvgpu_atomic64_add(long x, nvgpu_atomic64_t *v)
{
__nvgpu_atomic64_add(x, v);
nvgpu_atomic64_add_impl(x, v);
}
static inline void nvgpu_atomic64_inc(nvgpu_atomic64_t *v)
{
__nvgpu_atomic64_inc(v);
nvgpu_atomic64_inc_impl(v);
}
static inline long nvgpu_atomic64_inc_return(nvgpu_atomic64_t *v)
{
return __nvgpu_atomic64_inc_return(v);
return nvgpu_atomic64_inc_return_impl(v);
}
static inline void nvgpu_atomic64_dec(nvgpu_atomic64_t *v)
{
__nvgpu_atomic64_dec(v);
nvgpu_atomic64_dec_impl(v);
}
static inline long nvgpu_atomic64_dec_return(nvgpu_atomic64_t *v)
{
return __nvgpu_atomic64_dec_return(v);
return nvgpu_atomic64_dec_return_impl(v);
}
static inline long nvgpu_atomic64_xchg(nvgpu_atomic64_t *v, long new)
{
return __nvgpu_atomic64_xchg(v, new);
return nvgpu_atomic64_xchg_impl(v, new);
}
static inline long nvgpu_atomic64_cmpxchg(nvgpu_atomic64_t *v, long old,
long new)
{
return __nvgpu_atomic64_cmpxchg(v, old, new);
return nvgpu_atomic64_cmpxchg_impl(v, old, new);
}
static inline long nvgpu_atomic64_add_return(long x, nvgpu_atomic64_t *v)
{
return __nvgpu_atomic64_add_return(x, v);
return nvgpu_atomic64_add_return_impl(x, v);
}
static inline long nvgpu_atomic64_add_unless(nvgpu_atomic64_t *v, long a,
long u)
{
return __nvgpu_atomic64_add_unless(v, a, u);
return nvgpu_atomic64_add_unless_impl(v, a, u);
}
static inline void nvgpu_atomic64_sub(long x, nvgpu_atomic64_t *v)
{
__nvgpu_atomic64_sub(x, v);
nvgpu_atomic64_sub_impl(x, v);
}
static inline bool nvgpu_atomic64_inc_and_test(nvgpu_atomic64_t *v)
{
return __nvgpu_atomic64_inc_and_test(v);
return nvgpu_atomic64_inc_and_test_impl(v);
}
static inline bool nvgpu_atomic64_dec_and_test(nvgpu_atomic64_t *v)
{
return __nvgpu_atomic64_dec_and_test(v);
return nvgpu_atomic64_dec_and_test_impl(v);
}
static inline bool nvgpu_atomic64_sub_and_test(long x, nvgpu_atomic64_t *v)
{
return __nvgpu_atomic64_sub_and_test(x, v);
return nvgpu_atomic64_sub_and_test_impl(x, v);
}
static inline long nvgpu_atomic64_sub_return(long x, nvgpu_atomic64_t *v)
{
return __nvgpu_atomic64_sub_return(x, v);
return nvgpu_atomic64_sub_return_impl(x, v);
}
#endif /* NVGPU_ATOMIC_H */

View File

@@ -28,167 +28,167 @@ typedef struct nvgpu_atomic64 {
atomic64_t atomic_var;
} nvgpu_atomic64_t;
#define __nvgpu_atomic_init(i) { ATOMIC_INIT(i) }
#define __nvgpu_atomic64_init(i) { ATOMIC64_INIT(i) }
#define nvgpu_atomic_init_impl(i) { ATOMIC_INIT(i) }
#define nvgpu_atomic64_init_impl(i) { ATOMIC64_INIT(i) }
static inline void __nvgpu_atomic_set(nvgpu_atomic_t *v, int i)
static inline void nvgpu_atomic_set_impl(nvgpu_atomic_t *v, int i)
{
atomic_set(&v->atomic_var, i);
}
static inline int __nvgpu_atomic_read(nvgpu_atomic_t *v)
static inline int nvgpu_atomic_read_impl(nvgpu_atomic_t *v)
{
return atomic_read(&v->atomic_var);
}
static inline void __nvgpu_atomic_inc(nvgpu_atomic_t *v)
static inline void nvgpu_atomic_inc_impl(nvgpu_atomic_t *v)
{
atomic_inc(&v->atomic_var);
}
static inline int __nvgpu_atomic_inc_return(nvgpu_atomic_t *v)
static inline int nvgpu_atomic_inc_return_impl(nvgpu_atomic_t *v)
{
return atomic_inc_return(&v->atomic_var);
}
static inline void __nvgpu_atomic_dec(nvgpu_atomic_t *v)
static inline void nvgpu_atomic_dec_impl(nvgpu_atomic_t *v)
{
atomic_dec(&v->atomic_var);
}
static inline int __nvgpu_atomic_dec_return(nvgpu_atomic_t *v)
static inline int nvgpu_atomic_dec_return_impl(nvgpu_atomic_t *v)
{
return atomic_dec_return(&v->atomic_var);
}
static inline int __nvgpu_atomic_cmpxchg(nvgpu_atomic_t *v, int old, int new)
static inline int nvgpu_atomic_cmpxchg_impl(nvgpu_atomic_t *v, int old, int new)
{
return atomic_cmpxchg(&v->atomic_var, old, new);
}
static inline int __nvgpu_atomic_xchg(nvgpu_atomic_t *v, int new)
static inline int nvgpu_atomic_xchg_impl(nvgpu_atomic_t *v, int new)
{
return atomic_xchg(&v->atomic_var, new);
}
static inline bool __nvgpu_atomic_inc_and_test(nvgpu_atomic_t *v)
static inline bool nvgpu_atomic_inc_and_test_impl(nvgpu_atomic_t *v)
{
return atomic_inc_and_test(&v->atomic_var);
}
static inline bool __nvgpu_atomic_dec_and_test(nvgpu_atomic_t *v)
static inline bool nvgpu_atomic_dec_and_test_impl(nvgpu_atomic_t *v)
{
return atomic_dec_and_test(&v->atomic_var);
}
static inline void __nvgpu_atomic_sub(int i, nvgpu_atomic_t *v)
static inline void nvgpu_atomic_sub_impl(int i, nvgpu_atomic_t *v)
{
atomic_sub(i, &v->atomic_var);
}
static inline int __nvgpu_atomic_sub_return(int i, nvgpu_atomic_t *v)
static inline int nvgpu_atomic_sub_return_impl(int i, nvgpu_atomic_t *v)
{
return atomic_sub_return(i, &v->atomic_var);
}
static inline bool __nvgpu_atomic_sub_and_test(int i, nvgpu_atomic_t *v)
static inline bool nvgpu_atomic_sub_and_test_impl(int i, nvgpu_atomic_t *v)
{
return atomic_sub_and_test(i, &v->atomic_var);
}
static inline void __nvgpu_atomic_add(int i, nvgpu_atomic_t *v)
static inline void nvgpu_atomic_add_impl(int i, nvgpu_atomic_t *v)
{
atomic_add(i, &v->atomic_var);
}
static inline int __nvgpu_atomic_add_return(int i, nvgpu_atomic_t *v)
static inline int nvgpu_atomic_add_return_impl(int i, nvgpu_atomic_t *v)
{
return atomic_add_return(i, &v->atomic_var);
}
static inline int __nvgpu_atomic_add_unless(nvgpu_atomic_t *v, int a, int u)
static inline int nvgpu_atomic_add_unless_impl(nvgpu_atomic_t *v, int a, int u)
{
return atomic_add_unless(&v->atomic_var, a, u);
}
static inline void __nvgpu_atomic64_set(nvgpu_atomic64_t *v, long x)
static inline void nvgpu_atomic64_set_impl(nvgpu_atomic64_t *v, long x)
{
atomic64_set(&v->atomic_var, x);
}
static inline long __nvgpu_atomic64_read(nvgpu_atomic64_t *v)
static inline long nvgpu_atomic64_read_impl(nvgpu_atomic64_t *v)
{
return atomic64_read(&v->atomic_var);
}
static inline void __nvgpu_atomic64_add(long x, nvgpu_atomic64_t *v)
static inline void nvgpu_atomic64_add_impl(long x, nvgpu_atomic64_t *v)
{
atomic64_add(x, &v->atomic_var);
}
static inline long __nvgpu_atomic64_add_return(long x, nvgpu_atomic64_t *v)
static inline long nvgpu_atomic64_add_return_impl(long x, nvgpu_atomic64_t *v)
{
return atomic64_add_return(x, &v->atomic_var);
}
static inline long __nvgpu_atomic64_add_unless(nvgpu_atomic64_t *v, long a,
static inline long nvgpu_atomic64_add_unless_impl(nvgpu_atomic64_t *v, long a,
long u)
{
return atomic64_add_unless(&v->atomic_var, a, u);
}
static inline void __nvgpu_atomic64_inc(nvgpu_atomic64_t *v)
static inline void nvgpu_atomic64_inc_impl(nvgpu_atomic64_t *v)
{
atomic64_inc(&v->atomic_var);
}
static inline long __nvgpu_atomic64_inc_return(nvgpu_atomic64_t *v)
static inline long nvgpu_atomic64_inc_return_impl(nvgpu_atomic64_t *v)
{
return atomic64_inc_return(&v->atomic_var);
}
static inline bool __nvgpu_atomic64_inc_and_test(nvgpu_atomic64_t *v)
static inline bool nvgpu_atomic64_inc_and_test_impl(nvgpu_atomic64_t *v)
{
return atomic64_inc_and_test(&v->atomic_var);
}
static inline void __nvgpu_atomic64_dec(nvgpu_atomic64_t *v)
static inline void nvgpu_atomic64_dec_impl(nvgpu_atomic64_t *v)
{
atomic64_dec(&v->atomic_var);
}
static inline long __nvgpu_atomic64_dec_return(nvgpu_atomic64_t *v)
static inline long nvgpu_atomic64_dec_return_impl(nvgpu_atomic64_t *v)
{
return atomic64_dec_return(&v->atomic_var);
}
static inline bool __nvgpu_atomic64_dec_and_test(nvgpu_atomic64_t *v)
static inline bool nvgpu_atomic64_dec_and_test_impl(nvgpu_atomic64_t *v)
{
return atomic64_dec_and_test(&v->atomic_var);
}
static inline long __nvgpu_atomic64_xchg(nvgpu_atomic64_t *v, long new)
static inline long nvgpu_atomic64_xchg_impl(nvgpu_atomic64_t *v, long new)
{
return atomic64_xchg(&v->atomic_var, new);
}
static inline long __nvgpu_atomic64_cmpxchg(nvgpu_atomic64_t *v,
static inline long nvgpu_atomic64_cmpxchg_impl(nvgpu_atomic64_t *v,
long old, long new)
{
return atomic64_cmpxchg(&v->atomic_var, old, new);
}
static inline void __nvgpu_atomic64_sub(long x, nvgpu_atomic64_t *v)
static inline void nvgpu_atomic64_sub_impl(long x, nvgpu_atomic64_t *v)
{
atomic64_sub(x, &v->atomic_var);
}
static inline long __nvgpu_atomic64_sub_return(long x, nvgpu_atomic64_t *v)
static inline long nvgpu_atomic64_sub_return_impl(long x, nvgpu_atomic64_t *v)
{
return atomic64_sub_return(x, &v->atomic_var);
}
static inline bool __nvgpu_atomic64_sub_and_test(long x, nvgpu_atomic64_t *v)
static inline bool nvgpu_atomic64_sub_and_test_impl(long x, nvgpu_atomic64_t *v)
{
return atomic64_sub_and_test(x, &v->atomic_var);
}

View File

@@ -38,9 +38,9 @@ typedef struct __nvgpu_posix_atomic64 {
atomic_long v;
} nvgpu_atomic64_t;
#define __nvgpu_atomic_init(i) { i }
#define nvgpu_atomic_init_impl(i) { i }
#define __nvgpu_atomic64_init(i) { i }
#define nvgpu_atomic64_init_impl(i) { i }
/*
* These macros define the common cases to maximize code reuse, especially
@@ -53,7 +53,7 @@ typedef struct __nvgpu_posix_atomic64 {
#define NVGPU_POSIX_ATOMIC_ADD_RETURN(v, i) \
({ \
typeof(v->v) tmp; \
typeof(v->v) tmp; \
\
tmp = atomic_fetch_add(&(v->v), i); \
tmp += i; \
@@ -93,164 +93,164 @@ typedef struct __nvgpu_posix_atomic64 {
old; \
})
static inline void __nvgpu_atomic_set(nvgpu_atomic_t *v, int i)
static inline void nvgpu_atomic_set_impl(nvgpu_atomic_t *v, int i)
{
NVGPU_POSIX_ATOMIC_SET(v, i);
}
static inline int __nvgpu_atomic_read(nvgpu_atomic_t *v)
static inline int nvgpu_atomic_read_impl(nvgpu_atomic_t *v)
{
return NVGPU_POSIX_ATOMIC_READ(v);
}
static inline void __nvgpu_atomic_inc(nvgpu_atomic_t *v)
static inline void nvgpu_atomic_inc_impl(nvgpu_atomic_t *v)
{
(void)NVGPU_POSIX_ATOMIC_ADD_RETURN(v, 1);
}
static inline int __nvgpu_atomic_inc_return(nvgpu_atomic_t *v)
static inline int nvgpu_atomic_inc_return_impl(nvgpu_atomic_t *v)
{
return NVGPU_POSIX_ATOMIC_ADD_RETURN(v, 1);
}
static inline void __nvgpu_atomic_dec(nvgpu_atomic_t *v)
static inline void nvgpu_atomic_dec_impl(nvgpu_atomic_t *v)
{
(void)NVGPU_POSIX_ATOMIC_SUB_RETURN(v, 1);
}
static inline int __nvgpu_atomic_dec_return(nvgpu_atomic_t *v)
static inline int nvgpu_atomic_dec_return_impl(nvgpu_atomic_t *v)
{
return NVGPU_POSIX_ATOMIC_SUB_RETURN(v, 1);
}
static inline int __nvgpu_atomic_cmpxchg(nvgpu_atomic_t *v, int old, int new)
static inline int nvgpu_atomic_cmpxchg_impl(nvgpu_atomic_t *v, int old, int new)
{
return NVGPU_POSIX_ATOMIC_CMPXCHG(v, old, new);
}
static inline int __nvgpu_atomic_xchg(nvgpu_atomic_t *v, int new)
static inline int nvgpu_atomic_xchg_impl(nvgpu_atomic_t *v, int new)
{
return NVGPU_POSIX_ATOMIC_XCHG(v, new);
}
static inline bool __nvgpu_atomic_inc_and_test(nvgpu_atomic_t *v)
static inline bool nvgpu_atomic_inc_and_test_impl(nvgpu_atomic_t *v)
{
return NVGPU_POSIX_ATOMIC_ADD_RETURN(v, 1) == 0;
}
static inline bool __nvgpu_atomic_dec_and_test(nvgpu_atomic_t *v)
static inline bool nvgpu_atomic_dec_and_test_impl(nvgpu_atomic_t *v)
{
return NVGPU_POSIX_ATOMIC_SUB_RETURN(v, 1) == 0;
}
static inline void __nvgpu_atomic_sub(int i, nvgpu_atomic_t *v)
static inline void nvgpu_atomic_sub_impl(int i, nvgpu_atomic_t *v)
{
(void)NVGPU_POSIX_ATOMIC_SUB_RETURN(v, i);
}
static inline int __nvgpu_atomic_sub_return(int i, nvgpu_atomic_t *v)
static inline int nvgpu_atomic_sub_return_impl(int i, nvgpu_atomic_t *v)
{
return NVGPU_POSIX_ATOMIC_SUB_RETURN(v, i);
}
static inline bool __nvgpu_atomic_sub_and_test(int i, nvgpu_atomic_t *v)
static inline bool nvgpu_atomic_sub_and_test_impl(int i, nvgpu_atomic_t *v)
{
return NVGPU_POSIX_ATOMIC_SUB_RETURN(v, i) == 0;
}
static inline void __nvgpu_atomic_add(int i, nvgpu_atomic_t *v)
static inline void nvgpu_atomic_add_impl(int i, nvgpu_atomic_t *v)
{
(void)NVGPU_POSIX_ATOMIC_ADD_RETURN(v, i);
}
static inline int __nvgpu_atomic_add_return(int i, nvgpu_atomic_t *v)
static inline int nvgpu_atomic_add_return_impl(int i, nvgpu_atomic_t *v)
{
return NVGPU_POSIX_ATOMIC_ADD_RETURN(v, i);
}
static inline int __nvgpu_atomic_add_unless(nvgpu_atomic_t *v, int a, int u)
static inline int nvgpu_atomic_add_unless_impl(nvgpu_atomic_t *v, int a, int u)
{
return NVGPU_POSIX_ATOMIC_ADD_UNLESS(v, a, u);
}
static inline void __nvgpu_atomic64_set(nvgpu_atomic64_t *v, long i)
static inline void nvgpu_atomic64_set_impl(nvgpu_atomic64_t *v, long i)
{
NVGPU_POSIX_ATOMIC_SET(v, i);
}
static inline long __nvgpu_atomic64_read(nvgpu_atomic64_t *v)
static inline long nvgpu_atomic64_read_impl(nvgpu_atomic64_t *v)
{
return NVGPU_POSIX_ATOMIC_READ(v);
}
static inline void __nvgpu_atomic64_add(long x, nvgpu_atomic64_t *v)
static inline void nvgpu_atomic64_add_impl(long x, nvgpu_atomic64_t *v)
{
(void)NVGPU_POSIX_ATOMIC_ADD_RETURN(v, x);
}
static inline long __nvgpu_atomic64_add_return(long x, nvgpu_atomic64_t *v)
static inline long nvgpu_atomic64_add_return_impl(long x, nvgpu_atomic64_t *v)
{
return NVGPU_POSIX_ATOMIC_ADD_RETURN(v, x);
}
static inline long __nvgpu_atomic64_add_unless(nvgpu_atomic64_t *v, long a,
static inline long nvgpu_atomic64_add_unless_impl(nvgpu_atomic64_t *v, long a,
long u)
{
return NVGPU_POSIX_ATOMIC_ADD_UNLESS(v, a, u);
}
static inline void __nvgpu_atomic64_inc(nvgpu_atomic64_t *v)
static inline void nvgpu_atomic64_inc_impl(nvgpu_atomic64_t *v)
{
(void)NVGPU_POSIX_ATOMIC_ADD_RETURN(v, 1);
}
static inline long __nvgpu_atomic64_inc_return(nvgpu_atomic64_t *v)
static inline long nvgpu_atomic64_inc_return_impl(nvgpu_atomic64_t *v)
{
return NVGPU_POSIX_ATOMIC_ADD_RETURN(v, 1);
}
static inline bool __nvgpu_atomic64_inc_and_test(nvgpu_atomic64_t *v)
static inline bool nvgpu_atomic64_inc_and_test_impl(nvgpu_atomic64_t *v)
{
return NVGPU_POSIX_ATOMIC_ADD_RETURN(v, 1) == 0;
}
static inline void __nvgpu_atomic64_dec(nvgpu_atomic64_t *v)
static inline void nvgpu_atomic64_dec_impl(nvgpu_atomic64_t *v)
{
(void)NVGPU_POSIX_ATOMIC_SUB_RETURN(v, 1);
}
static inline long __nvgpu_atomic64_dec_return(nvgpu_atomic64_t *v)
static inline long nvgpu_atomic64_dec_return_impl(nvgpu_atomic64_t *v)
{
return NVGPU_POSIX_ATOMIC_SUB_RETURN(v, 1);
}
static inline bool __nvgpu_atomic64_dec_and_test(nvgpu_atomic64_t *v)
static inline bool nvgpu_atomic64_dec_and_test_impl(nvgpu_atomic64_t *v)
{
return NVGPU_POSIX_ATOMIC_SUB_RETURN(v, 1) == 0;
}
static inline long __nvgpu_atomic64_xchg(nvgpu_atomic64_t *v, long new)
static inline long nvgpu_atomic64_xchg_impl(nvgpu_atomic64_t *v, long new)
{
return NVGPU_POSIX_ATOMIC_XCHG(v, new);
}
static inline long __nvgpu_atomic64_cmpxchg(nvgpu_atomic64_t *v,
static inline long nvgpu_atomic64_cmpxchg_impl(nvgpu_atomic64_t *v,
long old, long new)
{
return NVGPU_POSIX_ATOMIC_CMPXCHG(v, old, new);
}
static inline void __nvgpu_atomic64_sub(long x, nvgpu_atomic64_t *v)
static inline void nvgpu_atomic64_sub_impl(long x, nvgpu_atomic64_t *v)
{
(void)NVGPU_POSIX_ATOMIC_SUB_RETURN(v, x);
}
static inline long __nvgpu_atomic64_sub_return(long x, nvgpu_atomic64_t *v)
static inline long nvgpu_atomic64_sub_return_impl(long x, nvgpu_atomic64_t *v)
{
return NVGPU_POSIX_ATOMIC_SUB_RETURN(v, x);
}
static inline bool __nvgpu_atomic64_sub_and_test(long x, nvgpu_atomic64_t *v)
static inline bool nvgpu_atomic64_sub_and_test_impl(long x, nvgpu_atomic64_t *v)
{
return NVGPU_POSIX_ATOMIC_SUB_RETURN(v, x) == 0;
}