gpu: nvgpu: include: fix compile error of new compile flags

It's preparing to add bellow CFLAGS:
    -Werror -Wall -Wextra \
    -Wmissing-braces -Wpointer-arith -Wundef \
    -Wconversion -Wsign-conversion \
    -Wformat-security \
    -Wmissing-declarations -Wredundant-decls -Wimplicit-fallthrough

Jira GVSCI-11640

Signed-off-by: Richard Zhao <rizhao@nvidia.com>
Change-Id: I7a7afff85231aed52a20f77854c30fe5c755cae5
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2555058
Reviewed-by: Shashank Singh <shashsingh@nvidia.com>
Reviewed-by: Aparna Das <aparnad@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
GVS: Gerrit_Virtual_Submit
This commit is contained in:
Richard Zhao
2021-07-06 21:32:15 -07:00
committed by mobile promotions
parent e81a36e56a
commit 9e5c88c1ef
21 changed files with 345 additions and 90 deletions

View File

@@ -791,8 +791,15 @@ void nvgpu_init_alloc_debug(struct gk20a *g, struct nvgpu_allocator *a);
void nvgpu_fini_alloc_debug(struct nvgpu_allocator *a);
#else
static inline void nvgpu_init_alloc_debug(struct gk20a *g,
struct nvgpu_allocator *a) {}
static inline void nvgpu_fini_alloc_debug(struct nvgpu_allocator *a) {}
struct nvgpu_allocator *a)
{
(void)g;
(void)a;
}
static inline void nvgpu_fini_alloc_debug(struct nvgpu_allocator *a)
{
(void)a;
}
#endif /* CONFIG_DEBUG_FS */
/**

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -591,6 +591,7 @@ static inline bool nvgpu_channel_is_deterministic(struct nvgpu_channel *c)
#ifdef CONFIG_NVGPU_DETERMINISTIC_CHANNELS
return c->deterministic;
#else
(void)c;
return false;
#endif
}
@@ -1040,6 +1041,8 @@ void trace_write_pushbuffers(struct nvgpu_channel *c, u32 count);
#else
static inline void trace_write_pushbuffers(struct nvgpu_channel *c, u32 count)
{
(void)c;
(void)count;
}
#endif
@@ -1191,9 +1194,16 @@ void nvgpu_channel_restart_all_wdts(struct gk20a *g);
*/
void nvgpu_channel_set_wdt_debug_dump(struct nvgpu_channel *ch, bool dump);
#else
static inline void nvgpu_channel_restart_all_wdts(struct gk20a *g) {}
static inline void nvgpu_channel_restart_all_wdts(struct gk20a *g)
{
(void)g;
}
static inline void nvgpu_channel_set_wdt_debug_dump(struct nvgpu_channel *ch,
bool dump) {}
bool dump)
{
(void)ch;
(void)dump;
}
#endif
/**

View File

@@ -1,7 +1,7 @@
/*
* GK20A Debug functionality
*
* Copyright (c) 2014-2021, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2014-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -55,14 +55,39 @@ void gk20a_debug_deinit(struct gk20a *g);
#else
__attribute__((format (printf, 2, 3)))
static inline void gk20a_debug_output(struct nvgpu_debug_context *o,
const char *fmt, ...) {}
const char *fmt, ...)
{
(void)o;
(void)fmt;
}
static inline void gk20a_debug_dump(struct gk20a *g)
{
(void)g;
}
static inline void gk20a_debug_dump(struct gk20a *g) {}
static inline void gk20a_debug_show_dump(struct gk20a *g,
struct nvgpu_debug_context *o) {}
static inline void gk20a_gr_debug_dump(struct gk20a *g) {}
static inline void gk20a_debug_init(struct gk20a *g, const char *debugfs_symlink) {}
static inline void gk20a_debug_deinit(struct gk20a *g) {}
struct nvgpu_debug_context *o)
{
(void)g;
(void)o;
}
static inline void gk20a_gr_debug_dump(struct gk20a *g)
{
(void)g;
}
static inline void gk20a_debug_init(struct gk20a *g, const char *debugfs_symlink)
{
(void)g;
(void)debugfs_symlink;
}
static inline void gk20a_debug_deinit(struct gk20a *g)
{
(void)g;
}
#endif
/** @endcond DOXYGEN_SHOULD_SKIP_THIS */

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved.
*
* GK20A Graphics
*
@@ -877,6 +877,7 @@ static inline bool nvgpu_is_timeouts_enabled(struct gk20a *g)
#ifdef CONFIG_NVGPU_DEBUGGER
return nvgpu_atomic_read(&g->timeouts_disabled_refcount) == 0;
#else
(void)g;
return true;
#endif
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -54,7 +54,7 @@
NVGPU_GPU_CTXSW_TAG_INVALID_TIMESTAMP
#define NVGPU_GPU_CTXSW_FILTER_ISSET(n, p) \
((p)->tag_bits[(n) / 64] & (1 << ((n) & 63)))
((p)->tag_bits[(n) / 64] & (1U << ((n) & 63)))
#define NVGPU_GPU_CTXSW_FILTER_SIZE (NVGPU_GPU_CTXSW_TAG_LAST + 1)
#define NVGPU_FECS_TRACE_FEATURE_CONTROL_BIT 31

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -23,6 +23,8 @@
#ifndef NVGPU_LOG_COMMON_H
#define NVGPU_LOG_COMMON_H
#include <nvgpu/bitops.h>
enum nvgpu_log_type {
NVGPU_ERROR = 0,
NVGPU_WARNING,

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -105,21 +105,27 @@ void nvgpu_nvs_domain_put(struct gk20a *g, struct nvgpu_nvs_domain *dom);
#else
static inline int nvgpu_nvs_init(struct gk20a *g)
{
(void)g;
return 0;
}
static inline void nvgpu_nvs_remove_support(struct gk20a *g)
{
(void)g;
}
static inline struct nvgpu_nvs_domain *
nvgpu_nvs_domain_by_name(struct gk20a *g, const char *name)
{
(void)g;
(void)name;
return NULL;
}
static inline void nvgpu_nvs_domain_put(struct gk20a *g, struct nvgpu_nvs_domain *dom)
{
(void)g;
(void)dom;
}
#endif

View File

@@ -1,7 +1,7 @@
/*
* nvgpu os fence
*
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -100,6 +100,7 @@ struct nvgpu_os_fence {
static inline bool nvgpu_os_fence_is_initialized(struct nvgpu_os_fence *fence)
{
(void)fence;
return false;
}

View File

@@ -1,7 +1,7 @@
/*
* nvgpu os fence semas
*
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -73,6 +73,9 @@ static inline int nvgpu_os_fence_sema_create(
struct nvgpu_channel *c,
struct nvgpu_semaphore *sema)
{
(void)fence_out;
(void)c;
(void)sema;
return -ENOSYS;
}
@@ -80,6 +83,8 @@ static inline int nvgpu_os_fence_get_semas(
struct nvgpu_os_fence_sema *fence_sema_out,
struct nvgpu_os_fence *fence_in)
{
(void)fence_sema_out;
(void)fence_in;
return -EINVAL;
}
@@ -87,11 +92,15 @@ static inline void nvgpu_os_fence_sema_extract_nth_semaphore(
struct nvgpu_os_fence_sema *fence, u32 n,
struct nvgpu_semaphore **semaphore_out)
{
(void)fence;
(void)n;
(void)semaphore_out;
}
static inline u32 nvgpu_os_fence_sema_get_num_semaphores(
struct nvgpu_os_fence_sema *fence)
{
(void)fence;
return 0;
}

View File

@@ -1,7 +1,7 @@
/*
* nvgpu os fence syncpts
*
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -86,6 +86,11 @@ static inline int nvgpu_os_fence_syncpt_create(
struct nvgpu_nvhost_dev *nvhost_device,
u32 id, u32 thresh)
{
(void)fence_out;
(void)c;
(void)nvhost_device;
(void)id;
(void)thresh;
return -ENOSYS;
}
@@ -93,12 +98,15 @@ static inline int nvgpu_os_fence_get_syncpts(
struct nvgpu_os_fence_syncpt *fence_syncpt_out,
struct nvgpu_os_fence *fence_in)
{
(void)fence_syncpt_out;
(void)fence_in;
return -EINVAL;
}
static inline u32 nvgpu_os_fence_syncpt_get_num_syncpoints(
struct nvgpu_os_fence_syncpt *fence)
{
(void)fence;
return 0;
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -23,12 +23,13 @@
#ifndef NVGPU_POSIX_ATOMIC_H
#define NVGPU_POSIX_ATOMIC_H
#include <stdatomic.h>
#include <nvgpu/types.h>
#include <nvgpu/static_analysis.h>
#include <nvgpu/utils.h>
#include <nvgpu/cov_whitelist.h>
#include <stdatomic.h>
/*
* Note: this code uses the GCC builtins to implement atomics.
*/
@@ -117,8 +118,8 @@ typedef struct nvgpu_posix_atomic64 {
NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_MISRA(Rule, 10_3)) \
tmp = __builtin_choose_expr( \
IS_SIGNED_LONG_TYPE(i), \
(nvgpu_safe_add_s64((tmp), (i))), \
(nvgpu_safe_add_s32((tmp), (i)))); \
(nvgpu_safe_add_s64((s64)(tmp), (s64)(i))), \
(nvgpu_safe_add_s32((s32)(tmp), (s32)(i)))); \
tmp; \
})
@@ -149,8 +150,8 @@ typedef struct nvgpu_posix_atomic64 {
NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_MISRA(Rule, 10_3)) \
tmp = __builtin_choose_expr( \
IS_SIGNED_LONG_TYPE(i), \
(nvgpu_safe_sub_s64((tmp), (i))), \
(nvgpu_safe_sub_s32((tmp), (i)))); \
(nvgpu_safe_sub_s64((s64)(tmp), (s64)(i))), \
(nvgpu_safe_sub_s32((s32)(tmp), (s32)(i)))); \
tmp; \
})

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -38,7 +38,7 @@
*
* @return Count of elements in the buffer.
*/
#define CIRC_CNT(head, tail, size) (((head) - (tail)) & ((size)-1U))
#define CIRC_CNT(head, tail, size) ((__typeof(head))(((head) - (tail))) & ((size)-1U))
/**
* @brief Return space in buffer.

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -152,6 +152,7 @@ struct nvgpu_raw_spinlock {
static inline void nvgpu_spinlock_irqsave(struct nvgpu_spinlock *mutex,
unsigned long flags)
{
(void)flags;
nvgpu_posix_lock_acquire(&mutex->lock);
}
@@ -171,6 +172,7 @@ static inline void nvgpu_spinlock_irqsave(struct nvgpu_spinlock *mutex,
static inline void nvgpu_spinunlock_irqrestore(struct nvgpu_spinlock *mutex,
unsigned long flags)
{
(void)flags;
nvgpu_posix_lock_release(&mutex->lock);
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -56,7 +56,7 @@
*/
#define roundup_pow_of_two(x) \
({ \
unsigned long ret; \
unsigned long ret = 0U; \
\
if ((x) == 0UL) { \
BUG(); \

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -41,6 +41,7 @@ static void sort(void *base, size_t num, size_t size,
int (*cmp)(const void *a, const void *b),
void (*swap)(void *a, void *b, int n))
{
(void)swap;
qsort(base, num, size, cmp);
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -23,9 +23,6 @@
#ifndef NVGPU_POSIX_TIMERS_H
#define NVGPU_POSIX_TIMERS_H
#include <sys/time.h>
#include <time.h>
#include <nvgpu/types.h>
#include <nvgpu/log.h>

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -25,24 +25,62 @@
#include <nvgpu/types.h>
static inline void trace_gk20a_mm_fb_flush(const char *name){}
static inline void trace_gk20a_mm_fb_flush(const char *name)
{
(void)name;
}
static inline void trace_gk20a_mm_fb_flush_done(const char *name){}
static inline void trace_gk20a_mm_fb_flush_done(const char *name)
{
(void)name;
}
static inline void trace_gk20a_mm_l2_invalidate(const char *name){}
static inline void trace_gk20a_mm_l2_invalidate(const char *name)
{
(void)name;
}
static inline void trace_gk20a_mm_l2_invalidate_done(const char *name){}
static inline void trace_gk20a_mm_l2_invalidate_done(const char *name)
{
(void)name;
}
static inline void trace_gk20a_mm_l2_flush(const char *name){}
static inline void trace_gk20a_mm_l2_flush(const char *name)
{
(void)name;
}
static inline void trace_gk20a_mm_l2_flush_done(const char *name){}
static inline void trace_gk20a_mm_l2_flush_done(const char *name)
{
(void)name;
}
static inline void trace_nvgpu_channel_open_new(int chid){}
static inline void trace_gk20a_release_used_channel(int chid){}
static inline void trace_nvgpu_channel_get(u32 chid, const char *caller){}
static inline void trace_nvgpu_channel_put(u32 chid, const char *caller){}
static inline void trace_gk20a_free_channel(int chid){}
static inline void trace_nvgpu_channel_update(int chid){}
static inline void trace_nvgpu_channel_open_new(u32 chid)
{
(void)chid;
}
static inline void trace_gk20a_release_used_channel(u32 chid)
{
(void)chid;
}
static inline void trace_nvgpu_channel_get(u32 chid, const char *caller)
{
(void)chid;
(void)caller;
}
static inline void trace_nvgpu_channel_put(u32 chid, const char *caller)
{
(void)chid;
(void)caller;
}
static inline void trace_gk20a_free_channel(u32 chid)
{
(void)chid;
}
static inline void trace_nvgpu_channel_update(u32 chid)
{
(void)chid;
}
static inline void trace_gk20a_mmu_fault(u64 fault_addr,
u32 fault_type,
u32 access_type,
@@ -50,31 +88,83 @@ static inline void trace_gk20a_mmu_fault(u64 fault_addr,
u32 engine_id,
const char *client_type_desc,
const char *client_id_desc,
const char *fault_type_desc){}
const char *fault_type_desc)
{
(void)fault_addr;
(void)fault_type;
(void)access_type;
(void)inst_ptr;
(void)engine_id;
(void)client_type_desc;
(void)client_id_desc;
(void)fault_type_desc;
}
#ifdef CONFIG_NVGPU_COMPRESSION
static inline void trace_gk20a_ltc_cbc_ctrl_start(const char *name,
u32 cbc_ctrl, u32 min_value, u32 max_value) {}
static inline void trace_gk20a_ltc_cbc_ctrl_done(const char *name) {}
u32 cbc_ctrl, u32 min_value, u32 max_value)
{
(void)name;
(void)cbc_ctrl;
(void)min_value;
(void)max_value;
}
static inline void trace_gk20a_ltc_cbc_ctrl_done(const char *name)
{
(void)name;
}
#endif
static inline void trace_gk20a_mm_tlb_invalidate(const char *name) {}
static inline void trace_gk20a_mm_tlb_invalidate_done(const char *name) {}
static inline void trace_gk20a_channel_reset(u32 chid, u32 tsgid) {}
static inline void trace_gk20a_mm_tlb_invalidate(const char *name)
{
(void)name;
}
static inline void trace_gk20a_mm_tlb_invalidate_done(const char *name)
{
(void)name;
}
static inline void trace_gk20a_channel_reset(u32 chid, u32 tsgid)
{
(void)chid;
(void)tsgid;
}
static inline void trace_gk20a_channel_submit_gpfifo(const char *name,
u32 chid,
u32 num_entries,
u32 flags,
u32 wait_id,
u32 wait_value) {}
u32 wait_value)
{
(void)name;
(void)chid;
(void)num_entries;
(void)flags;
(void)wait_id;
(void)wait_value;
}
static inline void trace_gk20a_channel_submitted_gpfifo(const char *name,
u32 chid,
u32 num_entries,
u32 flags,
u32 incr_id,
u32 incr_value) {}
u32 incr_value)
{
(void)name;
(void)chid;
(void)num_entries;
(void)flags;
(void)incr_id;
(void)incr_value;
}
static inline void trace_gk20a_push_cmdbuf(const char *name,
u32 mem_id,
u32 words,
u32 offset,
void *cmdbuf) {}
void *cmdbuf)
{
(void)name;
(void)mem_id;
(void)words;
(void)offset;
(void)cmdbuf;
}
#endif

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -306,10 +306,8 @@
#define ALIGN_MASK(x, mask) \
__builtin_choose_expr( \
(IS_UNSIGNED_TYPE(x) && IS_UNSIGNED_TYPE(mask)), \
__builtin_choose_expr( \
IS_UNSIGNED_LONG_TYPE(x), \
(nvgpu_safe_add_u64((x), (mask)) & ~(mask)), \
(nvgpu_safe_add_u32((x), (mask)) & ~(mask))), \
(NVGPU_SAFE_ADD_UNSIGNED((x), (mask)) & \
~(typeof(x))(mask)), \
/* Results in build error. Make x/mask type unsigned */ \
(void)0)
@@ -326,17 +324,12 @@
*
* @return Returns \a x aligned with the value mentioned in \a a.
*/
#define NVGPU_ALIGN(x, a) \
#define NVGPU_ALIGN(x, a) \
__builtin_choose_expr( \
(IS_UNSIGNED_TYPE(x) && IS_UNSIGNED_TYPE(a)), \
__builtin_choose_expr( \
IS_UNSIGNED_LONG_TYPE(x), \
ALIGN_MASK((x), \
(nvgpu_safe_sub_u64((typeof(x))(a), 1))), \
ALIGN_MASK((x), \
(nvgpu_safe_sub_u32((typeof(x))(a), 1)))), \
/* Results in build error. Make x/a type unsigned */ \
(void)0)
ALIGN_MASK((x), NVGPU_SAFE_SUB_UNSIGNED(a, 1)), \
/* Results in build error. Make x/a type unsigned */ \
(void)0)
/**
* @brief Align with #PAGE_SIZE.
@@ -479,7 +472,7 @@ static inline unsigned int nvgpu_posix_hweight8(uint8_t x)
result = nvgpu_safe_sub_u8(x, result);
result = (result & mask2) + ((result >> shift2) & mask2);
result = (u8)((result & mask2) + ((result >> shift2) & mask2));
result = (result + (result >> shift4)) & mask3;
ret = (unsigned int)result;

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -63,6 +63,7 @@ static inline u32 nvgpu_safe_add_u32(u32 ui_a, u32 ui_b)
{
if ((UINT_MAX - ui_a) < ui_b) {
BUG();
return 0U;
} else {
return ui_a + ui_b;
}
@@ -84,6 +85,7 @@ static inline s32 nvgpu_safe_add_s32(s32 si_a, s32 si_b)
if (((si_b > 0) && (si_a > (INT_MAX - si_b))) ||
((si_b < 0) && (si_a < (INT_MIN - si_b)))) {
BUG();
return 0U;
} else {
return si_a + si_b;
}
@@ -105,6 +107,7 @@ static inline u64 nvgpu_safe_add_u64(u64 ul_a, u64 ul_b)
NVGPU_COV_WHITELIST(false_positive, NVGPU_CERT(INT30_C), "Bug 2643092")
if ((ULONG_MAX - ul_a) < ul_b) {
BUG();
return 0U;
} else {
return ul_a + ul_b;
}
@@ -126,11 +129,25 @@ static inline s64 nvgpu_safe_add_s64(s64 sl_a, s64 sl_b)
if (((sl_b > 0) && (sl_a > (LONG_MAX - sl_b))) ||
((sl_b < 0) && (sl_a < (LONG_MIN - sl_b)))) {
BUG();
return 0;
} else {
return sl_a + sl_b;
}
}
#define NVGPU_SAFE_ADD_UNSIGNED(a, b) \
({ \
typeof(a) _a = (a), _b = (typeof(a))(b), ret = 0U; \
typeof(_a) max = (typeof(_a))(-1LL); \
\
if ((max - _a) < _b) { \
BUG(); \
} else { \
ret = _a + _b; \
} \
ret; \
})
/**
* @brief Add two u32 values with wraparound arithmetic
*
@@ -216,8 +233,9 @@ static inline u8 nvgpu_safe_sub_u8(u8 uc_a, u8 uc_b)
{
if (uc_a < uc_b) {
BUG();
return 0U;
} else {
return uc_a - uc_b;
return (u8)(uc_a - uc_b);
}
}
@@ -236,6 +254,7 @@ static inline u32 nvgpu_safe_sub_u32(u32 ui_a, u32 ui_b)
{
if (ui_a < ui_b) {
BUG();
return 0U;
} else {
return ui_a - ui_b;
}
@@ -257,6 +276,7 @@ static inline s32 nvgpu_safe_sub_s32(s32 si_a, s32 si_b)
if (((si_b > 0) && (si_a < (INT_MIN + si_b))) ||
((si_b < 0) && (si_a > (INT_MAX + si_b)))) {
BUG();
return 0;
} else {
return si_a - si_b;
}
@@ -277,11 +297,23 @@ static inline u64 nvgpu_safe_sub_u64(u64 ul_a, u64 ul_b)
{
if (ul_a < ul_b) {
BUG();
return 0U;
} else {
return ul_a - ul_b;
}
}
#define NVGPU_SAFE_SUB_UNSIGNED(a, b) \
({ \
typeof(a) _a = (a), _b = (typeof(a))(b), ret = 0U; \
if (_a < _b) { \
BUG(); \
} else { \
ret = (typeof(_a))(_a - _b); \
} \
ret; \
})
/**
* @brief Subtract two s64 values and check for underflow.
*
@@ -298,6 +330,7 @@ static inline s64 nvgpu_safe_sub_s64(s64 si_a, s64 si_b)
if (((si_b > 0) && (si_a < (LONG_MIN + si_b))) ||
((si_b < 0) && (si_a > (LONG_MAX + si_b)))) {
BUG();
return 0;
} else {
return si_a - si_b;
}
@@ -320,6 +353,7 @@ static inline u32 nvgpu_safe_mult_u32(u32 ui_a, u32 ui_b)
return 0U;
} else if (ui_a > (UINT_MAX / ui_b)) {
BUG();
return 0U;
} else {
return ui_a * ui_b;
}
@@ -342,6 +376,7 @@ static inline u64 nvgpu_safe_mult_u64(u64 ul_a, u64 ul_b)
return 0UL;
} else if (ul_a > (ULONG_MAX / ul_b)) {
BUG();
return 0U;
} else {
return ul_a * ul_b;
}
@@ -365,20 +400,24 @@ static inline s64 nvgpu_safe_mult_s64(s64 sl_a, s64 sl_b)
if (sl_b > 0) {
if (sl_a > (LONG_MAX / sl_b)) {
BUG();
return 0;
}
} else {
if (sl_b < (LONG_MIN / sl_a)) {
BUG();
return 0;
}
}
} else {
if (sl_b > 0) {
if (sl_a < (LONG_MIN / sl_b)) {
BUG();
return 0;
}
} else {
if ((sl_a != 0) && (sl_b < (LONG_MAX / sl_a))) {
BUG();
return 0;
}
}
}
@@ -400,6 +439,7 @@ static inline u16 nvgpu_safe_cast_u64_to_u16(u64 ul_a)
{
if (ul_a > USHRT_MAX) {
BUG();
return 0U;
} else {
return (u16)ul_a;
}
@@ -419,6 +459,7 @@ static inline u32 nvgpu_safe_cast_u64_to_u32(u64 ul_a)
{
if (ul_a > UINT_MAX) {
BUG();
return 0U;
} else {
return (u32)ul_a;
}
@@ -438,6 +479,7 @@ static inline u8 nvgpu_safe_cast_u64_to_u8(u64 ul_a)
{
if (ul_a > nvgpu_safe_cast_s32_to_u64(UCHAR_MAX)) {
BUG();
return 0U;
} else {
return (u8)ul_a;
}
@@ -457,6 +499,7 @@ static inline u32 nvgpu_safe_cast_s64_to_u32(s64 l_a)
{
if ((l_a < 0) || (l_a > nvgpu_safe_cast_u64_to_s64(U64(UINT_MAX)))) {
BUG();
return 0U;
} else {
return (u32)l_a;
}
@@ -476,6 +519,7 @@ static inline u64 nvgpu_safe_cast_s64_to_u64(s64 l_a)
{
if (l_a < 0) {
BUG();
return 0U;
} else {
return (u64)l_a;
}
@@ -508,6 +552,7 @@ static inline u8 nvgpu_safe_cast_s8_to_u8(s8 sc_a)
NVGPU_COV_WHITELIST(false_positive, NVGPU_CERT(STR34_C), "Bug 2673832")
if (sc_a < 0) {
BUG();
return 0U;
} else {
return (u8)sc_a;
}
@@ -527,6 +572,7 @@ static inline u32 nvgpu_safe_cast_s32_to_u32(s32 si_a)
{
if (si_a < 0) {
BUG();
return 0U;
} else {
return (u32)si_a;
}
@@ -546,6 +592,7 @@ static inline u64 nvgpu_safe_cast_s32_to_u64(s32 si_a)
{
if (si_a < 0) {
BUG();
return 0U;
} else {
return (u64)si_a;
}
@@ -565,6 +612,7 @@ static inline u16 nvgpu_safe_cast_u32_to_u16(u32 ui_a)
{
if (ui_a > USHRT_MAX) {
BUG();
return 0U;
} else {
return (u16)ui_a;
}
@@ -584,6 +632,7 @@ static inline u8 nvgpu_safe_cast_u32_to_u8(u32 ui_a)
{
if (ui_a > nvgpu_safe_cast_s32_to_u32(UCHAR_MAX)) {
BUG();
return 0U;
} else {
return (u8)ui_a;
}
@@ -603,6 +652,7 @@ static inline s8 nvgpu_safe_cast_u32_to_s8(u32 ui_a)
{
if (ui_a > nvgpu_safe_cast_s32_to_u32(SCHAR_MAX)) {
BUG();
return 0;
} else {
return (s8)ui_a;
}
@@ -622,6 +672,7 @@ static inline s32 nvgpu_safe_cast_u32_to_s32(u32 ui_a)
{
if (ui_a > nvgpu_safe_cast_s32_to_u32(INT_MAX)) {
BUG();
return 0;
} else {
return (s32)ui_a;
}
@@ -641,6 +692,7 @@ static inline s32 nvgpu_safe_cast_u64_to_s32(u64 ul_a)
{
if (ul_a > nvgpu_safe_cast_s32_to_u64(INT_MAX)) {
BUG();
return 0;
} else {
return (s32)ul_a;
}
@@ -661,6 +713,7 @@ static inline s64 nvgpu_safe_cast_u64_to_s64(u64 ul_a)
NVGPU_COV_WHITELIST(false_positive, NVGPU_MISRA(Rule, 14_3), "Bug 2615925")
if (ul_a > nvgpu_safe_cast_s64_to_u64(LONG_MAX)) {
BUG();
return 0;
} else {
return (s64)ul_a;
}
@@ -680,6 +733,7 @@ static inline s32 nvgpu_safe_cast_s64_to_s32(s64 sl_a)
{
if ((sl_a > INT_MAX) || (sl_a < INT_MIN)) {
BUG();
return 0;
} else {
return (s32)sl_a;
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -40,18 +40,36 @@ void nvgpu_trace_intr_thread_stall_done(struct gk20a *g);
#ifdef CONFIG_NVGPU_TRACE
#include <nvgpu/posix/trace_gk20a.h>
#endif /* CONFIG_NVGPU_TRACE */
static inline void nvgpu_trace_intr_stall_start(struct gk20a *g) {}
static inline void nvgpu_trace_intr_stall_done(struct gk20a *g) {}
static inline void nvgpu_trace_intr_thread_stall_start(struct gk20a *g) {}
static inline void nvgpu_trace_intr_thread_stall_done(struct gk20a *g) {}
static inline void nvgpu_trace_intr_stall_start(struct gk20a *g)
{
(void)g;
}
static inline void nvgpu_trace_intr_stall_done(struct gk20a *g)
{
(void)g;
}
static inline void nvgpu_trace_intr_thread_stall_start(struct gk20a *g)
{
(void)g;
}
static inline void nvgpu_trace_intr_thread_stall_done(struct gk20a *g)
{
(void)g;
}
#else
#ifdef CONFIG_NVGPU_TRACE
#include <nvgpu/posix/trace_gk20a.h>
#endif
static inline void nvgpu_trace_intr_stall_start(struct gk20a *g) {}
static inline void nvgpu_trace_intr_stall_done(struct gk20a *g) {}
static inline void nvgpu_trace_intr_stall_start(struct gk20a *g)
{
(void)g;
}
static inline void nvgpu_trace_intr_stall_done(struct gk20a *g)
{
(void)g;
}
void nvgpu_trace_intr_thread_stall_start(struct gk20a *g);
void nvgpu_trace_intr_thread_stall_done(struct gk20a *g);

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -60,33 +60,63 @@ bool nvgpu_channel_wdt_check(struct nvgpu_channel_wdt *wdt,
static inline struct nvgpu_channel_wdt *nvgpu_channel_wdt_alloc(
struct gk20a *g)
{
(void)g;
return NULL;
}
static inline void nvgpu_channel_wdt_destroy(struct nvgpu_channel_wdt *wdt) {}
static inline void nvgpu_channel_wdt_enable(struct nvgpu_channel_wdt *wdt) {}
static inline void nvgpu_channel_wdt_disable(struct nvgpu_channel_wdt *wdt) {}
static inline void nvgpu_channel_wdt_destroy(struct nvgpu_channel_wdt *wdt)
{
(void)wdt;
}
static inline void nvgpu_channel_wdt_enable(struct nvgpu_channel_wdt *wdt)
{
(void)wdt;
}
static inline void nvgpu_channel_wdt_disable(struct nvgpu_channel_wdt *wdt)
{
(void)wdt;
}
static inline bool nvgpu_channel_wdt_enabled(struct nvgpu_channel_wdt *wdt)
{
(void)wdt;
return false;
}
static inline void nvgpu_channel_wdt_set_limit(struct nvgpu_channel_wdt *wdt,
u32 limit_ms) {}
u32 limit_ms)
{
(void)wdt;
(void)limit_ms;
}
static inline u32 nvgpu_channel_wdt_limit(struct nvgpu_channel_wdt *wdt)
{
(void)wdt;
return 0U;
}
static inline void nvgpu_channel_wdt_start(struct nvgpu_channel_wdt *wdt,
struct nvgpu_channel_wdt_state *state) {}
struct nvgpu_channel_wdt_state *state)
{
(void)wdt;
(void)state;
}
static inline bool nvgpu_channel_wdt_stop(struct nvgpu_channel_wdt *wdt)
{
(void)wdt;
return false;
}
static inline void nvgpu_channel_wdt_continue(struct nvgpu_channel_wdt *wdt) {}
static inline void nvgpu_channel_wdt_continue(struct nvgpu_channel_wdt *wdt)
{
(void)wdt;
}
static inline void nvgpu_channel_wdt_rewind(struct nvgpu_channel_wdt *wdt,
struct nvgpu_channel_wdt_state *state) {}
struct nvgpu_channel_wdt_state *state)
{
(void)wdt;
(void)state;
}
static inline bool nvgpu_channel_wdt_check(struct nvgpu_channel_wdt *wdt,
struct nvgpu_channel_wdt_state *state) {
(void)wdt;
(void)state;
return false;
}