gpu: nvgpu: add more compile flags to posix iGPU

Added bellow CFLAGS:
    -Werror -Wall -Wextra \
    -Wmissing-braces -Wpointer-arith -Wundef \
    -Wconversion -Wsign-conversion \
    -Wformat-security \
    -Wmissing-declarations -Wredundant-decls -Wimplicit-fallthrough

Also fixed all of compile errors for posix.

It's preparing for porting gpu server.

Jira GVSCI-11640

Signed-off-by: Richard Zhao <rizhao@nvidia.com>
Change-Id: I30b09a62a57396abd642922e22f2e550a96f42c2
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2555059
Reviewed-by: Shashank Singh <shashsingh@nvidia.com>
Reviewed-by: Aparna Das <aparnad@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
GVS: Gerrit_Virtual_Submit
This commit is contained in:
Richard Zhao
2021-07-06 21:32:50 -07:00
committed by mobile promotions
parent 9e5c88c1ef
commit 09cf3642ef
21 changed files with 169 additions and 27 deletions

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -79,6 +79,8 @@ static void nvgpu_posix_dump_stack(int skip_frames)
} }
free(trace_syms); free(trace_syms);
#else
(void)skip_frames;
#endif #endif
return; return;
} }
@@ -101,6 +103,7 @@ static void nvgpu_bug_init(void)
void nvgpu_bug_exit(int status) void nvgpu_bug_exit(int status)
{ {
(void)status;
#ifndef __NVGPU_UNIT_TEST__ #ifndef __NVGPU_UNIT_TEST__
nvgpu_err(NULL, "SW quiesce done. Exiting."); nvgpu_err(NULL, "SW quiesce done. Exiting.");
exit(status); exit(status);
@@ -209,6 +212,7 @@ done:
bool nvgpu_posix_warn(const char *func, int line_no, bool cond, const char *fmt, ...) bool nvgpu_posix_warn(const char *func, int line_no, bool cond, const char *fmt, ...)
{ {
(void)fmt;
if (!cond) { if (!cond) {
goto done; goto done;
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -116,6 +116,8 @@ struct nvgpu_firmware *nvgpu_request_firmware(struct gk20a *g,
int ret; int ret;
size_t full_path_len; size_t full_path_len;
(void)flags;
if (fw_name == NULL) { if (fw_name == NULL) {
return NULL; return NULL;
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -66,6 +66,7 @@ int nvgpu_tegra_fuse_read_gcplex_config_fuse(struct gk20a *g, u32 *val)
int nvgpu_tegra_fuse_read_per_device_identifier(struct gk20a *g, u64 *pdi) int nvgpu_tegra_fuse_read_per_device_identifier(struct gk20a *g, u64 *pdi)
{ {
(void)g;
*pdi = 0; *pdi = 0;
return 0; return 0;

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -118,6 +118,7 @@ void *nvgpu_kmem_cache_alloc(struct nvgpu_kmem_cache *cache)
void nvgpu_kmem_cache_free(struct nvgpu_kmem_cache *cache, void *ptr) void nvgpu_kmem_cache_free(struct nvgpu_kmem_cache *cache, void *ptr)
{ {
(void)cache;
free(ptr); free(ptr);
} }
@@ -125,6 +126,9 @@ void *nvgpu_kmalloc_impl(struct gk20a *g, size_t size, void *ip)
{ {
void *ptr; void *ptr;
(void)g;
(void)ip;
#ifdef NVGPU_UNITTEST_FAULT_INJECTION_ENABLEMENT #ifdef NVGPU_UNITTEST_FAULT_INJECTION_ENABLEMENT
if (nvgpu_posix_fault_injection_handle_call( if (nvgpu_posix_fault_injection_handle_call(
nvgpu_kmem_get_fault_injection())) { nvgpu_kmem_get_fault_injection())) {
@@ -153,6 +157,9 @@ void *nvgpu_kzalloc_impl(struct gk20a *g, size_t size, void *ip)
void *ptr; void *ptr;
const size_t num = 1; const size_t num = 1;
(void)g;
(void)ip;
#ifdef NVGPU_UNITTEST_FAULT_INJECTION_ENABLEMENT #ifdef NVGPU_UNITTEST_FAULT_INJECTION_ENABLEMENT
if (nvgpu_posix_fault_injection_handle_call( if (nvgpu_posix_fault_injection_handle_call(
nvgpu_kmem_get_fault_injection())) { nvgpu_kmem_get_fault_injection())) {
@@ -174,6 +181,9 @@ void *nvgpu_kcalloc_impl(struct gk20a *g, size_t n, size_t size, void *ip)
void *ptr; void *ptr;
const size_t num = 1; const size_t num = 1;
(void)g;
(void)ip;
#ifdef NVGPU_UNITTEST_FAULT_INJECTION_ENABLEMENT #ifdef NVGPU_UNITTEST_FAULT_INJECTION_ENABLEMENT
if (nvgpu_posix_fault_injection_handle_call( if (nvgpu_posix_fault_injection_handle_call(
nvgpu_kmem_get_fault_injection())) { nvgpu_kmem_get_fault_injection())) {
@@ -202,6 +212,7 @@ void *nvgpu_vzalloc_impl(struct gk20a *g, unsigned long size, void *ip)
void nvgpu_kfree_impl(struct gk20a *g, void *addr) void nvgpu_kfree_impl(struct gk20a *g, void *addr)
{ {
(void)g;
free(addr); free(addr);
} }
@@ -226,6 +237,7 @@ void nvgpu_big_free(struct gk20a *g, void *p)
int nvgpu_kmem_init(struct gk20a *g) int nvgpu_kmem_init(struct gk20a *g)
{ {
(void)g;
#ifdef NVGPU_UNITTEST_FAULT_INJECTION_ENABLEMENT #ifdef NVGPU_UNITTEST_FAULT_INJECTION_ENABLEMENT
if (nvgpu_posix_fault_injection_handle_call( if (nvgpu_posix_fault_injection_handle_call(
nvgpu_kmem_get_fault_injection())) { nvgpu_kmem_get_fault_injection())) {
@@ -238,4 +250,6 @@ int nvgpu_kmem_init(struct gk20a *g)
void nvgpu_kmem_fini(struct gk20a *g, int flags) void nvgpu_kmem_fini(struct gk20a *g, int flags)
{ {
(void)g;
(void)flags;
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -45,6 +45,7 @@ static const char *log_types[] = {
static inline const char *nvgpu_log_name(struct gk20a *g) static inline const char *nvgpu_log_name(struct gk20a *g)
{ {
(void)g;
return "gpu.USS"; return "gpu.USS";
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -144569,6 +144569,8 @@ static const uint32_t nvgpu_gv11b_ccsr_regs[] = {
int nvgpu_get_mock_reglist(struct gk20a *g, u32 reg_idx, int nvgpu_get_mock_reglist(struct gk20a *g, u32 reg_idx,
struct nvgpu_mock_iospace *iospace) struct nvgpu_mock_iospace *iospace)
{ {
(void)g;
switch (reg_idx) { switch (reg_idx) {
case MOCK_REGS_GR: case MOCK_REGS_GR:
iospace->data = nvgpu_gv11b_gr_regs; iospace->data = nvgpu_gv11b_gr_regs;

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -91,6 +91,7 @@ static struct nvgpu_posix_io_callbacks default_posix_reg_callbacks = {
*/ */
void nvgpu_kernel_restart(void *cmd) void nvgpu_kernel_restart(void *cmd)
{ {
(void)cmd;
BUG(); BUG();
} }
@@ -101,11 +102,13 @@ void nvgpu_start_gpu_idle(struct gk20a *g)
int nvgpu_enable_irqs(struct gk20a *g) int nvgpu_enable_irqs(struct gk20a *g)
{ {
(void)g;
return 0; return 0;
} }
void nvgpu_disable_irqs(struct gk20a *g) void nvgpu_disable_irqs(struct gk20a *g)
{ {
(void)g;
} }
/* /*
@@ -113,10 +116,12 @@ void nvgpu_disable_irqs(struct gk20a *g)
*/ */
void gk20a_busy_noresume(struct gk20a *g) void gk20a_busy_noresume(struct gk20a *g)
{ {
(void)g;
} }
void gk20a_idle_nosuspend(struct gk20a *g) void gk20a_idle_nosuspend(struct gk20a *g)
{ {
(void)g;
} }
int gk20a_busy(struct gk20a *g) int gk20a_busy(struct gk20a *g)
@@ -151,7 +156,7 @@ static void nvgpu_posix_load_regs(struct gk20a *g)
continue; continue;
} }
err = nvgpu_posix_io_add_reg_space(g, space.base, space.size); err = nvgpu_posix_io_add_reg_space(g, space.base, (u32)space.size);
nvgpu_assert(err == 0); nvgpu_assert(err == 0);
regs = nvgpu_posix_io_get_reg_space(g, space.base); regs = nvgpu_posix_io_get_reg_space(g, space.base);

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -29,6 +29,7 @@
int nvgpu_current_pid(struct gk20a *g) int nvgpu_current_pid(struct gk20a *g)
{ {
(void)g;
/* /*
* In the kernel this gets us the PID of the calling process for IOCTLs. * In the kernel this gets us the PID of the calling process for IOCTLs.
* But since we are in userspace this doesn't quite mean the same thing. * But since we are in userspace this doesn't quite mean the same thing.
@@ -39,6 +40,7 @@ int nvgpu_current_pid(struct gk20a *g)
int nvgpu_current_tid(struct gk20a *g) int nvgpu_current_tid(struct gk20a *g)
{ {
(void)g;
/* /*
* In POSIX thread ID is not the same as a process ID. In Linux threads * In POSIX thread ID is not the same as a process ID. In Linux threads
* and processes are represented by the same thing, but userspace can't * and processes are represented by the same thing, but userspace can't
@@ -54,6 +56,9 @@ int nvgpu_current_tid(struct gk20a *g)
void nvgpu_print_current_impl(struct gk20a *g, const char *func_name, int line, void nvgpu_print_current_impl(struct gk20a *g, const char *func_name, int line,
void *ctx, enum nvgpu_log_type type) void *ctx, enum nvgpu_log_type type)
{ {
(void)func_name;
(void)line;
(void)ctx;
const char *log_message = "(unknown process)"; const char *log_message = "(unknown process)";
#if defined(__NVGPU_POSIX__) #if defined(__NVGPU_POSIX__)

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -24,5 +24,6 @@
u32 nvgpu_channel_get_max_subctx_count(struct nvgpu_channel *ch) u32 nvgpu_channel_get_max_subctx_count(struct nvgpu_channel *ch)
{ {
(void)ch;
return 64; return 64;
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -30,5 +30,6 @@
void nvgpu_clk_arb_event_post_event(struct nvgpu_clk_dev *dev) void nvgpu_clk_arb_event_post_event(struct nvgpu_clk_dev *dev)
{ {
(void)dev;
BUG(); BUG();
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -54,6 +54,10 @@ static int __nvgpu_do_dma_alloc(struct gk20a *g, unsigned long flags,
enum nvgpu_aperture ap) enum nvgpu_aperture ap)
{ {
void *memory; void *memory;
(void)g;
(void)flags;
#ifdef NVGPU_UNITTEST_FAULT_INJECTION_ENABLEMENT #ifdef NVGPU_UNITTEST_FAULT_INJECTION_ENABLEMENT
if (nvgpu_posix_fault_injection_handle_call( if (nvgpu_posix_fault_injection_handle_call(
nvgpu_dma_alloc_get_fault_injection())) { nvgpu_dma_alloc_get_fault_injection())) {
@@ -111,6 +115,7 @@ static u64 __nvgpu_dma_alloc(struct nvgpu_allocator *allocator, u64 at,
static size_t mock_fb_get_vidmem_size(struct gk20a *g) static size_t mock_fb_get_vidmem_size(struct gk20a *g)
{ {
(void)g;
return SZ_4G; return SZ_4G;
} }
@@ -252,6 +257,8 @@ void nvgpu_dma_free_vid(struct gk20a *g, struct nvgpu_mem *mem)
void nvgpu_dma_free_sys(struct gk20a *g, struct nvgpu_mem *mem) void nvgpu_dma_free_sys(struct gk20a *g, struct nvgpu_mem *mem)
{ {
(void)g;
if (!(mem->mem_flags & NVGPU_MEM_FLAG_SHADOW_COPY)) { if (!(mem->mem_flags & NVGPU_MEM_FLAG_SHADOW_COPY)) {
free(mem->cpu_va); free(mem->cpu_va);
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -26,6 +26,10 @@
int nvgpu_dt_read_u32_index(struct gk20a *g, const char *name, int nvgpu_dt_read_u32_index(struct gk20a *g, const char *name,
u32 index, u32 *value) u32 index, u32 *value)
{ {
(void)g;
(void)name;
(void)index;
(void)value;
BUG(); BUG();
return 0; return 0;
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -245,6 +245,7 @@ int nvgpu_posix_io_register_reg_space(struct gk20a *g,
void nvgpu_posix_io_unregister_reg_space(struct gk20a *g, void nvgpu_posix_io_unregister_reg_space(struct gk20a *g,
struct nvgpu_posix_io_reg_space *reg_space) struct nvgpu_posix_io_reg_space *reg_space)
{ {
(void)g;
nvgpu_list_del(&reg_space->link); nvgpu_list_del(&reg_space->link);
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -103,11 +103,14 @@ u64 nvgpu_mem_sgl_phys(struct gk20a *g, void *sgl)
{ {
struct nvgpu_mem_sgl *mem = (struct nvgpu_mem_sgl *)sgl; struct nvgpu_mem_sgl *mem = (struct nvgpu_mem_sgl *)sgl;
(void)g;
return (u64)(uintptr_t)mem->phys; return (u64)(uintptr_t)mem->phys;
} }
u64 nvgpu_mem_sgl_ipa_to_pa(struct gk20a *g, void *sgl, u64 ipa, u64 *pa_len) u64 nvgpu_mem_sgl_ipa_to_pa(struct gk20a *g, void *sgl, u64 ipa, u64 *pa_len)
{ {
(void)ipa;
(void)pa_len;
return nvgpu_mem_sgl_phys(g, sgl); return nvgpu_mem_sgl_phys(g, sgl);
} }
@@ -145,6 +148,7 @@ bool nvgpu_mem_sgt_iommuable(struct gk20a *g, struct nvgpu_sgt *sgt)
{ {
struct nvgpu_os_posix *p = nvgpu_os_posix_from_gk20a(g); struct nvgpu_os_posix *p = nvgpu_os_posix_from_gk20a(g);
(void)sgt;
return p->mm_sgt_is_iommuable; return p->mm_sgt_is_iommuable;
} }
@@ -301,6 +305,8 @@ int nvgpu_mem_create_from_mem(struct gk20a *g,
u64 start = start_page * U64(NVGPU_CPU_PAGE_SIZE); u64 start = start_page * U64(NVGPU_CPU_PAGE_SIZE);
u64 size = U64(nr_pages) * U64(NVGPU_CPU_PAGE_SIZE); u64 size = U64(nr_pages) * U64(NVGPU_CPU_PAGE_SIZE);
(void)g;
if (src->aperture != APERTURE_SYSMEM) { if (src->aperture != APERTURE_SYSMEM) {
return -EINVAL; return -EINVAL;
} }
@@ -327,6 +333,10 @@ int nvgpu_mem_create_from_mem(struct gk20a *g,
int __nvgpu_mem_create_from_phys(struct gk20a *g, struct nvgpu_mem *dest, int __nvgpu_mem_create_from_phys(struct gk20a *g, struct nvgpu_mem *dest,
u64 src_phys, int nr_pages) u64 src_phys, int nr_pages)
{ {
(void)g;
(void)dest;
(void)src_phys;
(void)nr_pages;
BUG(); BUG();
return 0; return 0;
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -41,12 +41,12 @@ static void allocate_new_syncpt(struct nvgpu_nvhost_dev *nvgpu_syncpt_dev)
{ {
u32 syncpt_id, syncpt_val; u32 syncpt_id, syncpt_val;
srand(time(NULL)); srand((unsigned int)time(NULL));
/* Limit the range between {1, NUM_HW_PTS} */ /* Limit the range between {1, NUM_HW_PTS} */
syncpt_id = (rand() % NUM_HW_PTS) + 1; syncpt_id = ((unsigned int)rand() % NUM_HW_PTS) + 1U;
/* Limit the range between {1, UINT_MAX - SYNCPT_SAFE_STATE_INCR - 1} */ /* Limit the range between {1, UINT_MAX - SYNCPT_SAFE_STATE_INCR - 1} */
syncpt_val = (rand() % (UINT_MAX - SYNCPT_SAFE_STATE_INCR - 1)); syncpt_val = ((unsigned int)rand() % (UINT_MAX - SYNCPT_SAFE_STATE_INCR - 1));
nvgpu_syncpt_dev->syncpt_id = syncpt_id; nvgpu_syncpt_dev->syncpt_id = syncpt_id;
nvgpu_syncpt_dev->syncpt_value = syncpt_val; nvgpu_syncpt_dev->syncpt_value = syncpt_val;
@@ -99,23 +99,31 @@ int nvgpu_nvhost_get_syncpt_aperture(
const char *nvgpu_nvhost_syncpt_get_name( const char *nvgpu_nvhost_syncpt_get_name(
struct nvgpu_nvhost_dev *nvgpu_syncpt_dev, int id) struct nvgpu_nvhost_dev *nvgpu_syncpt_dev, int id)
{ {
(void)nvgpu_syncpt_dev;
(void)id;
return NULL; return NULL;
} }
u32 nvgpu_nvhost_syncpt_unit_interface_get_byte_offset(struct gk20a *g, u32 nvgpu_nvhost_syncpt_unit_interface_get_byte_offset(struct gk20a *g,
u32 syncpt_id) u32 syncpt_id)
{ {
(void)g;
(void)syncpt_id;
return nvgpu_safe_mult_u32(syncpt_id, 0x1000U); return nvgpu_safe_mult_u32(syncpt_id, 0x1000U);
} }
void nvgpu_nvhost_syncpt_set_minval(struct nvgpu_nvhost_dev *nvgpu_syncpt_dev, void nvgpu_nvhost_syncpt_set_minval(struct nvgpu_nvhost_dev *nvgpu_syncpt_dev,
u32 id, u32 val) u32 id, u32 val)
{ {
(void)nvgpu_syncpt_dev;
(void)id;
(void)val;
} }
void nvgpu_nvhost_syncpt_put_ref_ext( void nvgpu_nvhost_syncpt_put_ref_ext(
struct nvgpu_nvhost_dev *nvhost_dev, u32 id) struct nvgpu_nvhost_dev *nvhost_dev, u32 id)
{ {
(void)id;
nvhost_dev->syncpt_id = 0U; nvhost_dev->syncpt_id = 0U;
nvhost_dev->syncpt_value = 0U; nvhost_dev->syncpt_value = 0U;
} }
@@ -124,6 +132,8 @@ u32 nvgpu_nvhost_get_syncpt_client_managed(
struct nvgpu_nvhost_dev *nvhost_dev, struct nvgpu_nvhost_dev *nvhost_dev,
const char *syncpt_name) const char *syncpt_name)
{ {
(void)syncpt_name;
/* Only allocate new syncpt if nothing exists already */ /* Only allocate new syncpt if nothing exists already */
if (nvhost_dev->syncpt_id == 0U) { if (nvhost_dev->syncpt_id == 0U) {
allocate_new_syncpt(nvhost_dev); allocate_new_syncpt(nvhost_dev);
@@ -150,12 +160,17 @@ void nvgpu_nvhost_syncpt_set_safe_state(
bool nvgpu_nvhost_syncpt_is_expired_ext( bool nvgpu_nvhost_syncpt_is_expired_ext(
struct nvgpu_nvhost_dev *nvhost_dev, u32 id, u32 thresh) struct nvgpu_nvhost_dev *nvhost_dev, u32 id, u32 thresh)
{ {
(void)nvhost_dev;
(void)id;
(void)thresh;
return true; return true;
} }
bool nvgpu_nvhost_syncpt_is_valid_pt_ext( bool nvgpu_nvhost_syncpt_is_valid_pt_ext(
struct nvgpu_nvhost_dev *nvhost_dev, u32 id) struct nvgpu_nvhost_dev *nvhost_dev, u32 id)
{ {
(void)nvhost_dev;
(void)id;
return true; return true;
} }
@@ -163,12 +178,20 @@ int nvgpu_nvhost_intr_register_notifier(
struct nvgpu_nvhost_dev *nvhost_dev, u32 id, u32 thresh, struct nvgpu_nvhost_dev *nvhost_dev, u32 id, u32 thresh,
void (*callback)(void *, int), void *private_data) void (*callback)(void *, int), void *private_data)
{ {
(void)nvhost_dev;
(void)id;
(void)thresh;
(void)callback;
(void)private_data;
return -ENOSYS; return -ENOSYS;
} }
int nvgpu_nvhost_syncpt_read_ext_check( int nvgpu_nvhost_syncpt_read_ext_check(
struct nvgpu_nvhost_dev *nvhost_dev, u32 id, u32 *val) struct nvgpu_nvhost_dev *nvhost_dev, u32 id, u32 *val)
{ {
(void)nvhost_dev;
(void)id;
(void)val;
return -ENOSYS; return -ENOSYS;
} }
@@ -176,5 +199,10 @@ int nvgpu_nvhost_syncpt_wait_timeout_ext(
struct nvgpu_nvhost_dev *nvhost_dev, u32 id, struct nvgpu_nvhost_dev *nvhost_dev, u32 id,
u32 thresh, u32 timeout, u32 waiter_index) u32 thresh, u32 timeout, u32 waiter_index)
{ {
(void)nvhost_dev;
(void)id;
(void)thresh;
(void)timeout;
(void)waiter_index;
return -ENOSYS; return -ENOSYS;
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -29,6 +29,7 @@
struct vgpu_priv_data *vgpu_get_priv_data(struct gk20a *g) struct vgpu_priv_data *vgpu_get_priv_data(struct gk20a *g)
{ {
(void)g;
BUG(); BUG();
return NULL; return NULL;
} }
@@ -36,17 +37,25 @@ struct vgpu_priv_data *vgpu_get_priv_data(struct gk20a *g)
int vgpu_ivc_init(struct gk20a *g, u32 elems, int vgpu_ivc_init(struct gk20a *g, u32 elems,
const size_t *queue_sizes, u32 queue_start, u32 num_queues) const size_t *queue_sizes, u32 queue_start, u32 num_queues)
{ {
(void)g;
(void)elems;
(void)queue_sizes;
(void)queue_start;
(void)num_queues;
BUG(); BUG();
return 0; return 0;
} }
void vgpu_ivc_deinit(u32 queue_start, u32 num_queues) void vgpu_ivc_deinit(u32 queue_start, u32 num_queues)
{ {
(void)queue_start;
(void)num_queues;
BUG(); BUG();
} }
void vgpu_ivc_release(void *handle) void vgpu_ivc_release(void *handle)
{ {
(void)handle;
BUG(); BUG();
} }
@@ -59,12 +68,21 @@ u32 vgpu_ivc_get_server_vmid(void)
int vgpu_ivc_recv(u32 index, void **handle, void **data, int vgpu_ivc_recv(u32 index, void **handle, void **data,
size_t *size, u32 *sender) size_t *size, u32 *sender)
{ {
(void)index;
(void)handle;
(void)data;
(void)size;
(void)sender;
BUG(); BUG();
return 0; return 0;
} }
int vgpu_ivc_send(u32 peer, u32 index, void *data, size_t size) int vgpu_ivc_send(u32 peer, u32 index, void *data, size_t size)
{ {
(void)peer;
(void)index;
(void)data;
(void)size;
BUG(); BUG();
return 0; return 0;
} }
@@ -72,6 +90,11 @@ int vgpu_ivc_send(u32 peer, u32 index, void *data, size_t size)
int vgpu_ivc_sendrecv(u32 peer, u32 index, void **handle, int vgpu_ivc_sendrecv(u32 peer, u32 index, void **handle,
void **data, size_t *size) void **data, size_t *size)
{ {
(void)peer;
(void)index;
(void)handle;
(void)data;
(void)size;
BUG(); BUG();
return 0; return 0;
} }
@@ -85,42 +108,52 @@ u32 vgpu_ivc_get_peer_self(void)
void *vgpu_ivc_oob_get_ptr(u32 peer, u32 index, void **ptr, void *vgpu_ivc_oob_get_ptr(u32 peer, u32 index, void **ptr,
size_t *size) size_t *size)
{ {
(void)peer;
(void)index;
(void)ptr;
(void)size;
BUG(); BUG();
return NULL; return NULL;
} }
void vgpu_ivc_oob_put_ptr(void *handle) void vgpu_ivc_oob_put_ptr(void *handle)
{ {
(void)handle;
BUG(); BUG();
} }
struct tegra_hv_ivm_cookie *vgpu_ivm_mempool_reserve(unsigned int id) struct tegra_hv_ivm_cookie *vgpu_ivm_mempool_reserve(unsigned int id)
{ {
(void)id;
BUG(); BUG();
return NULL; return NULL;
} }
int vgpu_ivm_mempool_unreserve(struct tegra_hv_ivm_cookie *cookie) int vgpu_ivm_mempool_unreserve(struct tegra_hv_ivm_cookie *cookie)
{ {
(void)cookie;
BUG(); BUG();
return 0; return 0;
} }
u64 vgpu_ivm_get_ipa(struct tegra_hv_ivm_cookie *cookie) u64 vgpu_ivm_get_ipa(struct tegra_hv_ivm_cookie *cookie)
{ {
(void)cookie;
BUG(); BUG();
return 0ULL; return 0ULL;
} }
u64 vgpu_ivm_get_size(struct tegra_hv_ivm_cookie *cookie) u64 vgpu_ivm_get_size(struct tegra_hv_ivm_cookie *cookie)
{ {
(void)cookie;
BUG(); BUG();
return 0ULL; return 0ULL;
} }
void *vgpu_ivm_mempool_map(struct tegra_hv_ivm_cookie *cookie) void *vgpu_ivm_mempool_map(struct tegra_hv_ivm_cookie *cookie)
{ {
(void)cookie;
BUG(); BUG();
return NULL; return NULL;
} }
@@ -128,10 +161,13 @@ void *vgpu_ivm_mempool_map(struct tegra_hv_ivm_cookie *cookie)
void vgpu_ivm_mempool_unmap(struct tegra_hv_ivm_cookie *cookie, void vgpu_ivm_mempool_unmap(struct tegra_hv_ivm_cookie *cookie,
void *addr) void *addr)
{ {
(void)cookie;
(void)addr;
BUG(); BUG();
} }
int vgpu_init_hal_os(struct gk20a *g) int vgpu_init_hal_os(struct gk20a *g)
{ {
(void)g;
BUG(); BUG();
return -ENOSYS; return -ENOSYS;
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -40,6 +40,9 @@ struct nvgpu_mapped_buf *nvgpu_vm_find_mapping(struct vm_gk20a *vm,
{ {
struct nvgpu_mapped_buf *mapped_buffer = NULL; struct nvgpu_mapped_buf *mapped_buffer = NULL;
(void)os_buf;
(void)kind;
mapped_buffer = nvgpu_vm_find_mapped_buf(vm, map_addr); mapped_buffer = nvgpu_vm_find_mapped_buf(vm, map_addr);
if (mapped_buffer == NULL) { if (mapped_buffer == NULL) {
return NULL; return NULL;

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -47,6 +47,7 @@ bool nvgpu_platform_is_fpga(struct gk20a *g)
bool nvgpu_is_hypervisor_mode(struct gk20a *g) bool nvgpu_is_hypervisor_mode(struct gk20a *g)
{ {
(void)g;
return false; return false;
} }
@@ -59,5 +60,6 @@ bool nvgpu_is_soc_t194_a01(struct gk20a *g)
int nvgpu_init_soc_vars(struct gk20a *g) int nvgpu_init_soc_vars(struct gk20a *g)
{ {
(void)g;
return 0; return 0;
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -52,5 +52,9 @@ void nvgpu_ecc_sysfs_remove(struct gk20a *g)
int nvgpu_cic_mon_report_err_safety_services(struct gk20a *g, int nvgpu_cic_mon_report_err_safety_services(struct gk20a *g,
void *err_info, size_t err_size, bool is_critical) void *err_info, size_t err_size, bool is_critical)
{ {
(void)g;
(void)err_info;
(void)err_size;
(void)is_critical;
return 0; return 0;
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -82,6 +82,7 @@ s64 nvgpu_current_time_us(void)
#ifdef __NVGPU_POSIX__ #ifdef __NVGPU_POSIX__
void nvgpu_delay_usecs(unsigned int usecs) void nvgpu_delay_usecs(unsigned int usecs)
{ {
(void)usecs;
} }
#ifdef CONFIG_NVGPU_NON_FUSA #ifdef CONFIG_NVGPU_NON_FUSA
@@ -199,7 +200,7 @@ static void nvgpu_usleep(unsigned int usecs)
NVGPU_COV_WHITELIST_BLOCK_BEGIN(deviate, 1, NVGPU_MISRA(Rule, 10_3), "SWE-NVGPU-204-SWSADR.docx") NVGPU_COV_WHITELIST_BLOCK_BEGIN(deviate, 1, NVGPU_MISRA(Rule, 10_3), "SWE-NVGPU-204-SWSADR.docx")
NVGPU_COV_WHITELIST_BLOCK_BEGIN(deviate, 1, NVGPU_CERT(INT31_C), "SWE-NVGPU-209-SWSADR.docx") NVGPU_COV_WHITELIST_BLOCK_BEGIN(deviate, 1, NVGPU_CERT(INT31_C), "SWE-NVGPU-209-SWSADR.docx")
ret = clock_nanosleep(CLOCK_MONOTONIC, TIMER_ABSTIME, &rqtp, NULL); ret = clock_nanosleep(CLOCK_MONOTONIC, (int)TIMER_ABSTIME, &rqtp, NULL);
NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_CERT(INT31_C)) NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_CERT(INT31_C))
NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_MISRA(Rule, 10_3)) NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_MISRA(Rule, 10_3))
if (ret != 0) { if (ret != 0) {
@@ -218,6 +219,7 @@ void nvgpu_udelay(unsigned int usecs)
void nvgpu_usleep_range(unsigned int min_us, unsigned int max_us) void nvgpu_usleep_range(unsigned int min_us, unsigned int max_us)
{ {
(void)max_us;
nvgpu_udelay(min_us); nvgpu_udelay(min_us);
} }
@@ -238,7 +240,7 @@ void nvgpu_msleep(unsigned int msecs)
NVGPU_COV_WHITELIST_BLOCK_BEGIN(deviate, 1, NVGPU_MISRA(Rule, 10_3), "SWE-NVGPU-204-SWSADR.docx") NVGPU_COV_WHITELIST_BLOCK_BEGIN(deviate, 1, NVGPU_MISRA(Rule, 10_3), "SWE-NVGPU-204-SWSADR.docx")
NVGPU_COV_WHITELIST_BLOCK_BEGIN(deviate, 1, NVGPU_CERT(INT31_C), "SWE-NVGPU-209-SWSADR.docx") NVGPU_COV_WHITELIST_BLOCK_BEGIN(deviate, 1, NVGPU_CERT(INT31_C), "SWE-NVGPU-209-SWSADR.docx")
ret = clock_nanosleep(CLOCK_MONOTONIC, TIMER_ABSTIME, &rqtp, NULL); ret = clock_nanosleep(CLOCK_MONOTONIC, (int)TIMER_ABSTIME, &rqtp, NULL);
NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_MISRA(Rule, 10_3)) NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_MISRA(Rule, 10_3))
NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_CERT(INT31_C)) NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_CERT(INT31_C))
if (ret != 0) { if (ret != 0) {

View File

@@ -1,6 +1,6 @@
################################### tell Emacs this is a -*- makefile-gmake -*- ################################### tell Emacs this is a -*- makefile-gmake -*-
# #
# Copyright (c) 2020, NVIDIA CORPORATION. All Rights Reserved. # Copyright (c) 2020-2022, NVIDIA CORPORATION. All Rights Reserved.
# #
# NVIDIA CORPORATION and its licensors retain all intellectual property # NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation # and proprietary rights in and to this software, related documentation
@@ -64,6 +64,15 @@ NV_COMPONENT_CFLAGS += -DNVGPU_UNITTEST_FAULT_INJECTION_ENABLEMENT
_NV_TOOLCHAIN_CFLAGS += -rdynamic -g _NV_TOOLCHAIN_CFLAGS += -rdynamic -g
ifneq ($(CONFIG_NVGPU_DGPU),1)
_NV_TOOLCHAIN_CFLAGS += \
-Werror -Wall -Wextra \
-Wmissing-braces -Wpointer-arith -Wundef \
-Wconversion -Wsign-conversion \
-Wformat-security \
-Wmissing-declarations -Wredundant-decls -Wimplicit-fallthrough
endif
-include $(NVGPU_SOURCE)/Makefile.sources -include $(NVGPU_SOURCE)/Makefile.sources
-include $(NVGPU_NEXT_SOURCE)/Makefile.sources -include $(NVGPU_NEXT_SOURCE)/Makefile.sources