diff --git a/drivers/gpu/nvgpu/common/acr/acr_blob_construct.c b/drivers/gpu/nvgpu/common/acr/acr_blob_construct.c index b46e45630..89f7c9d1c 100644 --- a/drivers/gpu/nvgpu/common/acr/acr_blob_construct.c +++ b/drivers/gpu/nvgpu/common/acr/acr_blob_construct.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -977,6 +977,8 @@ static int lsf_gen_wpr_requirements(struct gk20a *g, u32 wpr_offset; u32 flcn_cnt; + (void)g; + /* * Start with an array of WPR headers at the base of the WPR. * The expectation here is that the secure falcon will do a single DMA diff --git a/drivers/gpu/nvgpu/common/acr/acr_blob_construct_v0.c b/drivers/gpu/nvgpu/common/acr/acr_blob_construct_v0.c index 848d4c0ca..a08c8dbea 100644 --- a/drivers/gpu/nvgpu/common/acr/acr_blob_construct_v0.c +++ b/drivers/gpu/nvgpu/common/acr/acr_blob_construct_v0.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -348,6 +348,8 @@ static int lsf_gen_wpr_requirements(struct gk20a *g, struct ls_flcn_mgr_v0 *plsf struct lsfm_managed_ucode_img_v0 *pnode = plsfm->ucode_img_list; u32 wpr_offset; + (void)g; + /* * Start with an array of WPR headers at the base of the WPR. * The expectation here is that the secure falcon will do a single DMA diff --git a/drivers/gpu/nvgpu/common/acr/acr_bootstrap.c b/drivers/gpu/nvgpu/common/acr/acr_bootstrap.c index d247ec897..01becd067 100644 --- a/drivers/gpu/nvgpu/common/acr/acr_bootstrap.c +++ b/drivers/gpu/nvgpu/common/acr/acr_bootstrap.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -386,7 +386,7 @@ int nvgpu_acr_bootstrap_hs_ucode_riscv(struct gk20a *g, struct nvgpu_acr *acr) timeout = RISCV_BR_COMPLETION_TIMEOUT_NON_SILICON_MS; } - err = nvgpu_acr_wait_for_riscv_brom_completion(flcn, timeout); + err = nvgpu_acr_wait_for_riscv_brom_completion(flcn, (int)timeout); if (err == 0x0) { nvgpu_acr_dbg(g, "RISCV BROM passed"); diff --git a/drivers/gpu/nvgpu/common/acr/acr_sw_ga10b.c b/drivers/gpu/nvgpu/common/acr/acr_sw_ga10b.c index 88d19c396..c95778e00 100644 --- a/drivers/gpu/nvgpu/common/acr/acr_sw_ga10b.c +++ b/drivers/gpu/nvgpu/common/acr/acr_sw_ga10b.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -53,6 +53,8 @@ static int ga10b_bootstrap_hs_acr(struct gk20a *g, struct nvgpu_acr *acr) { int err = 0; + (void)acr; + nvgpu_log_fn(g, " "); err = nvgpu_acr_bootstrap_hs_ucode_riscv(g, g->acr); @@ -75,6 +77,9 @@ static int ga10b_acr_patch_wpr_info_to_ucode(struct gk20a *g, &acr_desc->acr_falcon2_sysmem_desc; struct flcn2_acr_desc *acr_sysmem_desc = &acr_desc->acr_sysmem_desc; + (void)acr; + (void)is_recovery; + nvgpu_log_fn(g, " "); #ifdef CONFIG_NVGPU_NON_FUSA @@ -160,7 +165,7 @@ static int ga10b_acr_patch_wpr_info_to_ucode(struct gk20a *g, if (nvgpu_is_enabled(g, NVGPU_SUPPORT_MIG)) { acr_sysmem_desc->gpu_mode |= MIG_MODE; } else { - acr_sysmem_desc->gpu_mode &= ~MIG_MODE; + acr_sysmem_desc->gpu_mode &= (u32)(~MIG_MODE); } } diff --git a/drivers/gpu/nvgpu/common/acr/acr_sw_gm20b.c b/drivers/gpu/nvgpu/common/acr/acr_sw_gm20b.c index 22cc471fc..2e873b64a 100644 --- a/drivers/gpu/nvgpu/common/acr/acr_sw_gm20b.c +++ b/drivers/gpu/nvgpu/common/acr/acr_sw_gm20b.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015-2019, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2015-2022, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -38,6 +38,8 @@ static int gm20b_bootstrap_hs_acr(struct gk20a *g, struct nvgpu_acr *acr) { int err = 0; + (void)acr; + nvgpu_log_fn(g, " "); err = nvgpu_acr_bootstrap_hs_ucode(g, g->acr, &g->acr->acr); @@ -58,6 +60,8 @@ static int gm20b_acr_patch_wpr_info_to_ucode(struct gk20a *g, u32 *acr_ucode_header = NULL; u32 *acr_ucode_data = NULL; + (void)acr; + nvgpu_log_fn(g, " "); if (is_recovery) { @@ -95,6 +99,7 @@ static int gm20b_acr_patch_wpr_info_to_ucode(struct gk20a *g, static u32 gm20b_acr_lsf_pmu(struct gk20a *g, struct acr_lsf_config *lsf) { + (void)g; /* PMU LS falcon info */ lsf->falcon_id = FALCON_ID_PMU; lsf->falcon_dma_idx = GK20A_PMU_DMAIDX_UCODE; @@ -110,6 +115,7 @@ static u32 gm20b_acr_lsf_pmu(struct gk20a *g, static u32 gm20b_acr_lsf_fecs(struct gk20a *g, struct acr_lsf_config *lsf) { + (void)g; /* FECS LS falcon info */ lsf->falcon_id = FALCON_ID_FECS; lsf->falcon_dma_idx = GK20A_PMU_DMAIDX_UCODE; diff --git a/drivers/gpu/nvgpu/common/acr/acr_sw_gp10b.c b/drivers/gpu/nvgpu/common/acr/acr_sw_gp10b.c index 315f37fb0..824ee3128 100644 --- a/drivers/gpu/nvgpu/common/acr/acr_sw_gp10b.c +++ b/drivers/gpu/nvgpu/common/acr/acr_sw_gp10b.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -36,6 +36,7 @@ static u32 gp10b_acr_lsf_gpccs(struct gk20a *g, struct acr_lsf_config *lsf) { + (void)g; /* GPCCS LS falcon info */ lsf->falcon_id = FALCON_ID_GPCCS; lsf->falcon_dma_idx = GK20A_PMU_DMAIDX_UCODE; diff --git a/drivers/gpu/nvgpu/common/acr/acr_sw_gv11b.c b/drivers/gpu/nvgpu/common/acr/acr_sw_gv11b.c index 0e258c3ac..62e24b09b 100644 --- a/drivers/gpu/nvgpu/common/acr/acr_sw_gv11b.c +++ b/drivers/gpu/nvgpu/common/acr/acr_sw_gv11b.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -43,6 +43,8 @@ static int gv11b_bootstrap_hs_acr(struct gk20a *g, struct nvgpu_acr *acr) { int err = 0; + (void)acr; + nvgpu_log_fn(g, " "); err = nvgpu_acr_bootstrap_hs_ucode(g, g->acr, &g->acr->acr); @@ -64,6 +66,9 @@ static int gv11b_acr_patch_wpr_info_to_ucode(struct gk20a *g, u32 *acr_ucode_data = NULL; const u32 acr_desc_offset = 2U; + (void)acr; + (void)is_recovery; + nvgpu_log_fn(g, " "); #ifdef CONFIG_NVGPU_NON_FUSA if (is_recovery) { diff --git a/drivers/gpu/nvgpu/common/cbc/cbc.c b/drivers/gpu/nvgpu/common/cbc/cbc.c index 272c41f93..8871edb9f 100644 --- a/drivers/gpu/nvgpu/common/cbc/cbc.c +++ b/drivers/gpu/nvgpu/common/cbc/cbc.c @@ -1,7 +1,7 @@ /* * CBC * - * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -27,6 +27,7 @@ #include #include #include +#include #include #include @@ -94,6 +95,8 @@ int nvgpu_cbc_alloc(struct gk20a *g, size_t compbit_backing_size, { struct nvgpu_cbc *cbc = g->cbc; + (void)vidmem_alloc; + if (nvgpu_mem_is_valid(&cbc->compbit_store.mem) != 0) { return 0; } diff --git a/drivers/gpu/nvgpu/common/cic/mon/mon_ctxsw.c b/drivers/gpu/nvgpu/common/cic/mon/mon_ctxsw.c index ca591e4d8..6895fa7cf 100644 --- a/drivers/gpu/nvgpu/common/cic/mon/mon_ctxsw.c +++ b/drivers/gpu/nvgpu/common/cic/mon/mon_ctxsw.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -25,6 +25,7 @@ #include #include #include +#include #include "cic_mon_priv.h" @@ -91,6 +92,8 @@ void nvgpu_inject_ctxsw_swerror(struct gk20a *g, u32 hw_unit, { struct ctxsw_err_info err_info; + (void)inst; + (void)memset(&err_info, ERR_INJECT_TEST_PATTERN, sizeof(err_info)); nvgpu_report_ctxsw_err(g, hw_unit, err_index, (void *)&err_info); diff --git a/drivers/gpu/nvgpu/common/cic/mon/mon_gr.c b/drivers/gpu/nvgpu/common/cic/mon/mon_gr.c index c2fe3c055..79b309258 100644 --- a/drivers/gpu/nvgpu/common/cic/mon/mon_gr.c +++ b/drivers/gpu/nvgpu/common/cic/mon/mon_gr.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -25,6 +25,7 @@ #include #include #include +#include #include "cic_mon_priv.h" diff --git a/drivers/gpu/nvgpu/common/cic/mon/mon_lut.c b/drivers/gpu/nvgpu/common/cic/mon/mon_lut.c index 4d5f55ad7..7196a0b04 100644 --- a/drivers/gpu/nvgpu/common/cic/mon/mon_lut.c +++ b/drivers/gpu/nvgpu/common/cic/mon/mon_lut.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -96,5 +96,5 @@ int nvgpu_cic_mon_get_num_hw_modules(struct gk20a *g) return -EINVAL; } - return g->cic_mon->num_hw_modules; + return (int)g->cic_mon->num_hw_modules; } diff --git a/drivers/gpu/nvgpu/common/cic/mon/mon_mmu.c b/drivers/gpu/nvgpu/common/cic/mon/mon_mmu.c index 0c7e0c0f8..b4b523689 100644 --- a/drivers/gpu/nvgpu/common/cic/mon/mon_mmu.c +++ b/drivers/gpu/nvgpu/common/cic/mon/mon_mmu.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -25,6 +25,7 @@ #include #include #include +#include #include "cic_mon_priv.h" diff --git a/drivers/gpu/nvgpu/common/cic/rm/rm_intr.c b/drivers/gpu/nvgpu/common/cic/rm/rm_intr.c index 85136e8df..4ec4f708c 100644 --- a/drivers/gpu/nvgpu/common/cic/rm/rm_intr.c +++ b/drivers/gpu/nvgpu/common/cic/rm/rm_intr.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -27,12 +27,12 @@ void nvgpu_cic_rm_set_irq_stall(struct gk20a *g, u32 value) { - nvgpu_atomic_set(&g->cic_rm->sw_irq_stall_pending, value); + nvgpu_atomic_set(&g->cic_rm->sw_irq_stall_pending, (int)value); } void nvgpu_cic_rm_set_irq_nonstall(struct gk20a *g, u32 value) { - nvgpu_atomic_set(&g->cic_rm->sw_irq_nonstall_pending, value); + nvgpu_atomic_set(&g->cic_rm->sw_irq_nonstall_pending, (int)value); } int nvgpu_cic_rm_broadcast_last_irq_stall(struct gk20a *g) diff --git a/drivers/gpu/nvgpu/common/debugger.c b/drivers/gpu/nvgpu/common/debugger.c index 18fa4bacd..ed316f1dd 100644 --- a/drivers/gpu/nvgpu/common/debugger.c +++ b/drivers/gpu/nvgpu/common/debugger.c @@ -1,7 +1,7 @@ /* * Tegra GK20A GPU Debugger/Profiler Driver * - * Copyright (c) 2013-2019, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2013-2022, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -161,7 +161,7 @@ u32 nvgpu_set_powergate_locked(struct dbg_session_gk20a *dbg_s, * the global pg disabled refcount is zero */ if (g->dbg_powergating_disabled_refcount == 0) { - err = g->ops.debugger.dbg_set_powergate(dbg_s, + err = (u32)g->ops.debugger.dbg_set_powergate(dbg_s, mode); } diff --git a/drivers/gpu/nvgpu/common/device.c b/drivers/gpu/nvgpu/common/device.c index ba2f3de42..96fe7189f 100644 --- a/drivers/gpu/nvgpu/common/device.c +++ b/drivers/gpu/nvgpu/common/device.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -319,6 +319,8 @@ u32 nvgpu_device_get_copies(struct gk20a *g, */ bool nvgpu_device_is_ce(struct gk20a *g, const struct nvgpu_device *dev) { + (void)g; + if (dev->type == NVGPU_DEVTYPE_COPY0 || dev->type == NVGPU_DEVTYPE_COPY1 || dev->type == NVGPU_DEVTYPE_COPY2 || @@ -331,5 +333,7 @@ bool nvgpu_device_is_ce(struct gk20a *g, const struct nvgpu_device *dev) bool nvgpu_device_is_graphics(struct gk20a *g, const struct nvgpu_device *dev) { + (void)g; + return dev->type == NVGPU_DEVTYPE_GRAPHICS; } diff --git a/drivers/gpu/nvgpu/common/ecc.c b/drivers/gpu/nvgpu/common/ecc.c index 830e5a05a..c284474f3 100644 --- a/drivers/gpu/nvgpu/common/ecc.c +++ b/drivers/gpu/nvgpu/common/ecc.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -23,6 +23,7 @@ #include #include #include +#include void nvgpu_ecc_stat_add(struct gk20a *g, struct nvgpu_ecc_stat *stat) { diff --git a/drivers/gpu/nvgpu/common/engine_queues/engine_fb_queue.c b/drivers/gpu/nvgpu/common/engine_queues/engine_fb_queue.c index 894d90f61..b6194e5e3 100644 --- a/drivers/gpu/nvgpu/common/engine_queues/engine_fb_queue.c +++ b/drivers/gpu/nvgpu/common/engine_queues/engine_fb_queue.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -70,6 +70,8 @@ static bool engine_fb_queue_has_room(struct nvgpu_engine_fb_queue *queue, u32 next_head = 0; int err = 0; + (void)size; + err = queue->head(queue, &head, QUEUE_GET); if (err != 0) { nvgpu_err(queue->g, "queue head GET failed"); @@ -97,6 +99,9 @@ static int engine_fb_queue_write(struct nvgpu_engine_fb_queue *queue, u32 entry_offset = 0U; int err = 0; + (void)src; + (void)size; + if (queue->fbq.work_buffer == NULL) { nvgpu_err(g, "Invalid/Unallocated work buffer"); err = -EINVAL; diff --git a/drivers/gpu/nvgpu/common/fbp/fbp.c b/drivers/gpu/nvgpu/common/fbp/fbp.c index 9c013d1a6..892d9b56d 100644 --- a/drivers/gpu/nvgpu/common/fbp/fbp.c +++ b/drivers/gpu/nvgpu/common/fbp/fbp.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -84,7 +84,7 @@ int nvgpu_fbp_init_support(struct gk20a *g) /* get active L2 mask per FBP */ for_each_set_bit(i, &fbp_en_mask_tmp, fbp->max_fbps_count) { - tmp = g->ops.fuse.fuse_status_opt_l2_fbp(g, i); + tmp = g->ops.fuse.fuse_status_opt_l2_fbp(g, (u32)i); fbp->fbp_l2_en_mask[i] = l2_all_en_mask ^ tmp; } #endif diff --git a/drivers/gpu/nvgpu/common/fence/fence_syncpt.c b/drivers/gpu/nvgpu/common/fence/fence_syncpt.c index f57d1e54d..2329002d6 100644 --- a/drivers/gpu/nvgpu/common/fence/fence_syncpt.c +++ b/drivers/gpu/nvgpu/common/fence/fence_syncpt.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2014-2022, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -65,6 +65,7 @@ static bool nvgpu_fence_syncpt_is_expired(struct nvgpu_fence_type *f) static void nvgpu_fence_syncpt_release(struct nvgpu_fence_type *f) { + (void)f; } static const struct nvgpu_fence_ops nvgpu_fence_syncpt_ops = { diff --git a/drivers/gpu/nvgpu/common/fifo/channel.c b/drivers/gpu/nvgpu/common/fifo/channel.c index 9d500cc9d..2cb4ef515 100644 --- a/drivers/gpu/nvgpu/common/fifo/channel.c +++ b/drivers/gpu/nvgpu/common/fifo/channel.c @@ -1,7 +1,7 @@ /* * GK20A Graphics channel * - * Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -65,6 +65,7 @@ #endif #include #include +#include #include "channel_wdt.h" #include "channel_worker.h" @@ -829,6 +830,8 @@ static void channel_free_invoke_deferred_engine_reset(struct nvgpu_channel *ch) nvgpu_mutex_release(&g->fifo.engines_reset_mutex); } +#else + (void)ch; #endif } @@ -848,6 +851,8 @@ static void channel_free_invoke_sync_destroy(struct nvgpu_channel *ch) ch->user_sync = NULL; } nvgpu_mutex_release(&ch->sync_lock); +#else + (void)ch; #endif } @@ -881,6 +886,8 @@ static void channel_free_unlink_debug_session(struct nvgpu_channel *ch) } nvgpu_mutex_release(&g->dbg_sessions_lock); +#else + (void)ch; #endif } @@ -1097,6 +1104,8 @@ static void channel_dump_ref_actions(struct nvgpu_channel *ch) } nvgpu_spinlock_release(&ch->ref_actions_lock); +#else + (void)ch; #endif } @@ -1158,6 +1167,8 @@ struct nvgpu_channel *nvgpu_channel_get__func(struct nvgpu_channel *ch, if (ret != NULL) { trace_nvgpu_channel_get(ch->chid, caller); } +#else + (void)caller; #endif return ret; @@ -1170,6 +1181,8 @@ void nvgpu_channel_put__func(struct nvgpu_channel *ch, const char *caller) #endif #ifdef CONFIG_NVGPU_TRACE trace_nvgpu_channel_put(ch->chid, caller); +#else + (void)caller; #endif nvgpu_atomic_dec(&ch->ref_count); if (nvgpu_cond_broadcast(&ch->ref_count_dec_wq) != 0) { @@ -1962,6 +1975,8 @@ static void nvgpu_channel_semaphore_signal(struct nvgpu_channel *c, { struct gk20a *g = c->g; + (void)post_events; + if (nvgpu_cond_broadcast_interruptible( &c->semaphore_wq) != 0) { nvgpu_warn(g, "failed to broadcast"); } @@ -2091,6 +2106,10 @@ static void nvgpu_channel_sync_debug_dump(struct gk20a *g, info->inst.semaphored); g->ops.pbdma.syncpt_debug_dump(g, o, info); +#else + (void)g; + (void)o; + (void)info; #endif } diff --git a/drivers/gpu/nvgpu/common/fifo/channel_wdt.h b/drivers/gpu/nvgpu/common/fifo/channel_wdt.h index d262ae073..8f6380d3d 100644 --- a/drivers/gpu/nvgpu/common/fifo/channel_wdt.h +++ b/drivers/gpu/nvgpu/common/fifo/channel_wdt.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015-2020, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2015-2022, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -36,7 +36,10 @@ void nvgpu_channel_worker_poll_wakeup_post_process_item( u32 nvgpu_channel_worker_poll_wakeup_condition_get_timeout( struct nvgpu_worker *worker); #else -static inline void nvgpu_channel_launch_wdt(struct nvgpu_channel *ch) {} +static inline void nvgpu_channel_launch_wdt(struct nvgpu_channel *ch) +{ + (void)ch; +} #endif /* CONFIG_NVGPU_CHANNEL_WDT */ #endif /* NVGPU_COMMON_FIFO_CHANNEL_WDT_H */ diff --git a/drivers/gpu/nvgpu/common/fifo/job.c b/drivers/gpu/nvgpu/common/fifo/job.c index 635957acb..564984fda 100644 --- a/drivers/gpu/nvgpu/common/fifo/job.c +++ b/drivers/gpu/nvgpu/common/fifo/job.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011-2020, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -29,13 +29,7 @@ #include #include #include - -static inline struct nvgpu_channel_job * -channel_gk20a_job_from_list(struct nvgpu_list_node *node) -{ - return (struct nvgpu_channel_job *) - ((uintptr_t)node - offsetof(struct nvgpu_channel_job, list)); -}; +#include int nvgpu_channel_alloc_job(struct nvgpu_channel *c, struct nvgpu_channel_job **job_out) @@ -58,6 +52,8 @@ int nvgpu_channel_alloc_job(struct nvgpu_channel *c, void nvgpu_channel_free_job(struct nvgpu_channel *c, struct nvgpu_channel_job *job) { + (void)c; + (void)job; /* * Nothing needed for now. The job contents are preallocated. The * completion fence may briefly outlive the job, but the job memory is @@ -88,6 +84,7 @@ struct nvgpu_channel_job *nvgpu_channel_joblist_peek(struct nvgpu_channel *c) void nvgpu_channel_joblist_add(struct nvgpu_channel *c, struct nvgpu_channel_job *job) { + (void)job; c->joblist.pre_alloc.put = (c->joblist.pre_alloc.put + 1U) % (c->joblist.pre_alloc.length); } @@ -95,6 +92,7 @@ void nvgpu_channel_joblist_add(struct nvgpu_channel *c, void nvgpu_channel_joblist_delete(struct nvgpu_channel *c, struct nvgpu_channel_job *job) { + (void)job; c->joblist.pre_alloc.get = (c->joblist.pre_alloc.get + 1U) % (c->joblist.pre_alloc.length); } diff --git a/drivers/gpu/nvgpu/common/fifo/pbdma.c b/drivers/gpu/nvgpu/common/fifo/pbdma.c index ffbe25d5c..2572c1581 100644 --- a/drivers/gpu/nvgpu/common/fifo/pbdma.c +++ b/drivers/gpu/nvgpu/common/fifo/pbdma.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011-2020, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -51,5 +51,6 @@ int nvgpu_pbdma_setup_sw(struct gk20a *g) void nvgpu_pbdma_cleanup_sw(struct gk20a *g) { + (void)g; return; } diff --git a/drivers/gpu/nvgpu/common/fifo/priv_cmdbuf.c b/drivers/gpu/nvgpu/common/fifo/priv_cmdbuf.c index 638807461..44a0552f8 100644 --- a/drivers/gpu/nvgpu/common/fifo/priv_cmdbuf.c +++ b/drivers/gpu/nvgpu/common/fifo/priv_cmdbuf.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011-2020, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -32,6 +32,7 @@ #include #include #include +#include struct priv_cmd_entry { struct nvgpu_mem *mem; @@ -313,6 +314,8 @@ void nvgpu_priv_cmdbuf_append_zeros(struct gk20a *g, struct priv_cmd_entry *e, void nvgpu_priv_cmdbuf_finish(struct gk20a *g, struct priv_cmd_entry *e, u64 *gva, u32 *size) { + (void)g; + /* * The size is written to the pushbuf entry, so make sure this buffer * is complete at this point. The responsibility of the channel sync is diff --git a/drivers/gpu/nvgpu/common/fifo/runlist.c b/drivers/gpu/nvgpu/common/fifo/runlist.c index 43fadf5e4..77b16f343 100644 --- a/drivers/gpu/nvgpu/common/fifo/runlist.c +++ b/drivers/gpu/nvgpu/common/fifo/runlist.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -919,6 +919,8 @@ static u32 nvgpu_runlist_get_pbdma_mask(struct gk20a *g, u32 i; u32 pbdma_id; + (void)g; + nvgpu_assert(runlist != NULL); for ( i = 0U; i < PBDMA_PER_RUNLIST_SIZE; i++) { @@ -1019,12 +1021,12 @@ static struct nvgpu_runlist_domain *nvgpu_runlist_domain_alloc(struct gk20a *g, (void)strncpy(domain->name, name, sizeof(domain->name) - 1U); - domain->mem = init_rl_mem(g, runlist_size); + domain->mem = init_rl_mem(g, (u32)runlist_size); if (domain->mem == NULL) { goto free_domain; } - domain->mem_hw = init_rl_mem(g, runlist_size); + domain->mem_hw = init_rl_mem(g, (u32)runlist_size); if (domain->mem_hw == NULL) { goto free_mem; } diff --git a/drivers/gpu/nvgpu/common/fifo/submit.c b/drivers/gpu/nvgpu/common/fifo/submit.c index 4c4ab77b7..ee16f4777 100644 --- a/drivers/gpu/nvgpu/common/fifo/submit.c +++ b/drivers/gpu/nvgpu/common/fifo/submit.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -284,11 +284,11 @@ static int nvgpu_submit_append_gpfifo(struct nvgpu_channel *c, { int err; - if ((kern_gpfifo == NULL) #ifdef CONFIG_NVGPU_DGPU - && (c->gpfifo.pipe == NULL) + if ((kern_gpfifo == NULL) && (c->gpfifo.pipe == NULL)) { +#else + if (kern_gpfifo == NULL) { #endif - ) { /* * This path (from userspace to sysmem) is special in order to * avoid two copies unnecessarily (from user to pipe, then from diff --git a/drivers/gpu/nvgpu/common/fifo/tsg.c b/drivers/gpu/nvgpu/common/fifo/tsg.c index 62c8d0531..ad3405c63 100644 --- a/drivers/gpu/nvgpu/common/fifo/tsg.c +++ b/drivers/gpu/nvgpu/common/fifo/tsg.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014-2021, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2014-2022, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -536,6 +536,8 @@ void nvgpu_tsg_set_unserviceable(struct gk20a *g, { struct nvgpu_channel *ch = NULL; + (void)g; + nvgpu_rwsem_down_read(&tsg->ch_list_lock); nvgpu_list_for_each_entry(ch, &tsg->ch_list, nvgpu_channel, ch_entry) { if (nvgpu_channel_get(ch) != NULL) { @@ -798,6 +800,7 @@ int nvgpu_tsg_set_long_timeslice(struct nvgpu_tsg *tsg, u32 timeslice_us) u32 nvgpu_tsg_default_timeslice_us(struct gk20a *g) { + (void)g; return NVGPU_TSG_TIMESLICE_DEFAULT_US; } diff --git a/drivers/gpu/nvgpu/common/gr/ctx.c b/drivers/gpu/nvgpu/common/gr/ctx.c index a50601566..57b7dc82d 100644 --- a/drivers/gpu/nvgpu/common/gr/ctx.c +++ b/drivers/gpu/nvgpu/common/gr/ctx.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -28,6 +28,7 @@ #include #include #include +#include #include #include "common/gr/ctx_priv.h" @@ -162,6 +163,8 @@ void nvgpu_gr_ctx_free_patch_ctx(struct gk20a *g, struct vm_gk20a *vm, { struct patch_desc *patch_ctx = &gr_ctx->patch_ctx; + (void)g; + if (nvgpu_mem_is_valid(&patch_ctx->mem)) { nvgpu_dma_unmap_free(vm, &patch_ctx->mem); patch_ctx->data_count = 0; @@ -201,6 +204,9 @@ static int nvgpu_gr_ctx_map_ctx_circular_buffer(struct gk20a *g, u32 *g_bfr_index; u64 gpu_va = 0ULL; + (void)g; + (void)vpr; + g_bfr_va = &gr_ctx->global_ctx_buffer_va[0]; g_bfr_index = &gr_ctx->global_ctx_buffer_index[0]; @@ -242,6 +248,9 @@ static int nvgpu_gr_ctx_map_ctx_attribute_buffer(struct gk20a *g, u32 *g_bfr_index; u64 gpu_va = 0ULL; + (void)g; + (void)vpr; + g_bfr_va = &gr_ctx->global_ctx_buffer_va[0]; g_bfr_index = &gr_ctx->global_ctx_buffer_index[0]; @@ -284,6 +293,9 @@ static int nvgpu_gr_ctx_map_ctx_pagepool_buffer(struct gk20a *g, u32 *g_bfr_index; u64 gpu_va = 0ULL; + (void)g; + (void)vpr; + g_bfr_va = &gr_ctx->global_ctx_buffer_va[0]; g_bfr_index = &gr_ctx->global_ctx_buffer_index[0]; @@ -326,6 +338,8 @@ static int nvgpu_gr_ctx_map_ctx_buffer(struct gk20a *g, u32 *g_bfr_index; u64 gpu_va = 0ULL; + (void)g; + g_bfr_va = &gr_ctx->global_ctx_buffer_va[0]; g_bfr_index = &gr_ctx->global_ctx_buffer_index[0]; @@ -479,6 +493,8 @@ void nvgpu_gr_ctx_load_golden_ctx_image(struct gk20a *g, u64 virt_addr = 0; #endif + (void)cde; + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gr, " "); mem = &gr_ctx->mem; @@ -724,6 +740,8 @@ void nvgpu_gr_ctx_set_zcull_ctx(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx, { struct zcull_ctx_desc *zcull_ctx = &gr_ctx->zcull_ctx; + (void)g; + zcull_ctx->ctx_sw_mode = mode; zcull_ctx->gpu_va = gpu_va; } diff --git a/drivers/gpu/nvgpu/common/gr/fecs_trace.c b/drivers/gpu/nvgpu/common/gr/fecs_trace.c index 0deae927e..241a3cc2c 100644 --- a/drivers/gpu/nvgpu/common/gr/fecs_trace.c +++ b/drivers/gpu/nvgpu/common/gr/fecs_trace.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -135,7 +135,7 @@ int nvgpu_gr_fecs_trace_init(struct gk20a *g) { struct nvgpu_gr_fecs_trace *trace; - if (!is_power_of_2(GK20A_FECS_TRACE_NUM_RECORDS)) { + if (!is_power_of_2((u32)GK20A_FECS_TRACE_NUM_RECORDS)) { nvgpu_err(g, "invalid NUM_RECORDS chosen"); nvgpu_set_enabled(g, NVGPU_SUPPORT_FECS_CTXSW_TRACE, false); return -EINVAL; @@ -189,8 +189,8 @@ int nvgpu_gr_fecs_trace_deinit(struct gk20a *g) int nvgpu_gr_fecs_trace_num_ts(struct gk20a *g) { - return (g->ops.gr.ctxsw_prog.hw_get_ts_record_size_in_bytes() - - sizeof(struct nvgpu_fecs_trace_record)) / sizeof(u64); + return (int)((g->ops.gr.ctxsw_prog.hw_get_ts_record_size_in_bytes() + - sizeof(struct nvgpu_fecs_trace_record)) / sizeof(u64)); } struct nvgpu_fecs_trace_record *nvgpu_gr_fecs_trace_get_record( @@ -207,7 +207,7 @@ struct nvgpu_fecs_trace_record *nvgpu_gr_fecs_trace_get_record( return (struct nvgpu_fecs_trace_record *) ((u8 *) mem->cpu_va + - (idx * g->ops.gr.ctxsw_prog.hw_get_ts_record_size_in_bytes())); + ((u32)idx * g->ops.gr.ctxsw_prog.hw_get_ts_record_size_in_bytes())); } bool nvgpu_gr_fecs_trace_is_valid_record(struct gk20a *g, @@ -262,8 +262,8 @@ int nvgpu_gr_fecs_trace_enable(struct gk20a *g) * (Bit 31:31) should be set to 1. Bits 30:0 represents * actual pointer value. */ - write = write | - (BIT32(NVGPU_FECS_TRACE_FEATURE_CONTROL_BIT)); + write = (int)((u32)write | + (BIT32(NVGPU_FECS_TRACE_FEATURE_CONTROL_BIT))); } g->ops.gr.fecs_trace.set_read_index(g, write); @@ -315,8 +315,8 @@ int nvgpu_gr_fecs_trace_disable(struct gk20a *g) * For disabling FECS trace support, MAILBOX1's MSB * (Bit 31:31) should be set to 0. */ - read = g->ops.gr.fecs_trace.get_read_index(g) & - (~(BIT32(NVGPU_FECS_TRACE_FEATURE_CONTROL_BIT))); + read = (int)((u32)(g->ops.gr.fecs_trace.get_read_index(g)) & + (~(BIT32(NVGPU_FECS_TRACE_FEATURE_CONTROL_BIT)))); g->ops.gr.fecs_trace.set_read_index(g, read); @@ -420,7 +420,7 @@ int nvgpu_gr_fecs_trace_ring_read(struct gk20a *g, int index, /* break out FECS record into trace events */ for (i = 0; i < nvgpu_gr_fecs_trace_num_ts(g); i++) { - entry.tag = g->ops.gr.ctxsw_prog.hw_get_ts_tag(r->ts[i]); + entry.tag = (u8)g->ops.gr.ctxsw_prog.hw_get_ts_tag(r->ts[i]); entry.timestamp = g->ops.gr.ctxsw_prog.hw_record_ts_timestamp(r->ts[i]); entry.timestamp <<= GK20A_FECS_TRACE_PTIMER_SHIFT; @@ -434,8 +434,8 @@ int nvgpu_gr_fecs_trace_ring_read(struct gk20a *g, int index, case NVGPU_GPU_CTXSW_TAG_RESTORE_START: case NVGPU_GPU_CTXSW_TAG_CONTEXT_START: entry.context_id = r->new_context_id; - entry.pid = new_pid; - entry.vmid = new_vmid; + entry.pid = (u64)new_pid; + entry.vmid = (u8)new_vmid; break; case NVGPU_GPU_CTXSW_TAG_CTXSW_REQ_BY_HOST: @@ -446,8 +446,8 @@ int nvgpu_gr_fecs_trace_ring_read(struct gk20a *g, int index, case NVGPU_GPU_CTXSW_TAG_FE_ACK_CILP: case NVGPU_GPU_CTXSW_TAG_SAVE_END: entry.context_id = r->context_id; - entry.pid = cur_pid; - entry.vmid = cur_vmid; + entry.pid = (u64)cur_pid; + entry.vmid = (u8)cur_vmid; break; default: @@ -474,7 +474,7 @@ int nvgpu_gr_fecs_trace_ring_read(struct gk20a *g, int index, count++; } - nvgpu_gr_fecs_trace_wake_up(g, vmid); + nvgpu_gr_fecs_trace_wake_up(g, (int)vmid); return count; } @@ -524,7 +524,7 @@ int nvgpu_gr_fecs_trace_poll(struct gk20a *g) if (nvgpu_is_enabled(g, NVGPU_FECS_TRACE_FEATURE_CONTROL)) { /* Bits 30:0 of MAILBOX1 represents actual read pointer value */ - read = read & (~(BIT32(NVGPU_FECS_TRACE_FEATURE_CONTROL_BIT))); + read = ((u32)read) & (~(BIT32(NVGPU_FECS_TRACE_FEATURE_CONTROL_BIT))); } while (read != write) { @@ -543,7 +543,7 @@ int nvgpu_gr_fecs_trace_poll(struct gk20a *g) * So, MSB of read pointer should be set back to 1. This will * keep FECS trace enabled. */ - read = read | (BIT32(NVGPU_FECS_TRACE_FEATURE_CONTROL_BIT)); + read = (int)(((u32)read) | (BIT32(NVGPU_FECS_TRACE_FEATURE_CONTROL_BIT))); } /* ensure FECS records has been updated before incrementing read index */ diff --git a/drivers/gpu/nvgpu/common/gr/global_ctx.c b/drivers/gpu/nvgpu/common/gr/global_ctx.c index 6963a2f59..f434cede8 100644 --- a/drivers/gpu/nvgpu/common/gr/global_ctx.c +++ b/drivers/gpu/nvgpu/common/gr/global_ctx.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -390,6 +390,7 @@ void nvgpu_gr_global_ctx_init_local_golden_image(struct gk20a *g, struct nvgpu_gr_global_ctx_local_golden_image *local_golden_image, struct nvgpu_mem *source_mem, size_t size) { + (void)size; nvgpu_mem_rd_n(g, source_mem, 0, local_golden_image->context, nvgpu_safe_cast_u64_to_u32(local_golden_image->size)); } diff --git a/drivers/gpu/nvgpu/common/gr/gr.c b/drivers/gpu/nvgpu/common/gr/gr.c index 90230e664..c33f4a376 100644 --- a/drivers/gpu/nvgpu/common/gr/gr.c +++ b/drivers/gpu/nvgpu/common/gr/gr.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -119,7 +119,7 @@ static int gr_alloc_global_ctx_buffers(struct gk20a *g, struct nvgpu_gr *gr) NVGPU_GR_GLOBAL_CTX_PRIV_ACCESS_MAP, size); #ifdef CONFIG_NVGPU_FECS_TRACE - size = nvgpu_gr_fecs_trace_buffer_size(g); + size = (u32)nvgpu_gr_fecs_trace_buffer_size(g); nvgpu_log(g, gpu_dbg_info | gpu_dbg_gr, "fecs_trace_buffer_size : %d", size); nvgpu_gr_global_ctx_set_size(gr->global_ctx_buffer, diff --git a/drivers/gpu/nvgpu/common/gr/gr_config.c b/drivers/gpu/nvgpu/common/gr/gr_config.c index 503b211f4..184fc47c0 100644 --- a/drivers/gpu/nvgpu/common/gr/gr_config.c +++ b/drivers/gpu/nvgpu/common/gr/gr_config.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -159,6 +159,8 @@ static void gr_config_set_gpc_mask(struct gk20a *g, if (g->ops.gr.config.get_gpc_mask != NULL) { config->gpc_mask = g->ops.gr.config.get_gpc_mask(g); } else +#else + (void)g; #endif { config->gpc_mask = nvgpu_safe_sub_u32(BIT32(config->gpc_count), diff --git a/drivers/gpu/nvgpu/common/gr/gr_intr.c b/drivers/gpu/nvgpu/common/gr/gr_intr.c index 3aa4b1486..94c3a99ab 100644 --- a/drivers/gpu/nvgpu/common/gr/gr_intr.c +++ b/drivers/gpu/nvgpu/common/gr/gr_intr.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -31,6 +31,7 @@ #if defined(CONFIG_NVGPU_CYCLESTATS) #include #endif +#include #include #include @@ -437,6 +438,8 @@ int nvgpu_gr_intr_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc, u32 sm, bool disable_sm_exceptions = true; #endif + (void)post_event; + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " "); global_esr = g->ops.gr.intr.get_sm_hww_global_esr(g, gpc, tpc, sm); @@ -525,6 +528,8 @@ int nvgpu_gr_intr_handle_fecs_error(struct gk20a *g, struct nvgpu_channel *ch, u32 mailbox_id = NVGPU_GR_FALCON_FECS_CTXSW_MAILBOX6; struct nvgpu_fecs_host_intr_status *fecs_host_intr; + (void)ch; + gr_fecs_intr = isr_data->fecs_intr; if (gr_fecs_intr == 0U) { return 0; diff --git a/drivers/gpu/nvgpu/common/gr/obj_ctx.c b/drivers/gpu/nvgpu/common/gr/obj_ctx.c index 5e5d1ad6c..32e90be59 100644 --- a/drivers/gpu/nvgpu/common/gr/obj_ctx.c +++ b/drivers/gpu/nvgpu/common/gr/obj_ctx.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -143,6 +143,10 @@ static int nvgpu_gr_obj_ctx_set_graphics_preemption_mode(struct gk20a *g, { int err = 0; + (void)config; + (void)gr_ctx_desc; + (void)vm; + /* set preemption modes */ switch (graphics_preempt_mode) { #ifdef CONFIG_NVGPU_GFXP @@ -263,6 +267,9 @@ void nvgpu_gr_obj_ctx_update_ctxsw_preemption_mode(struct gk20a *g, struct nvgpu_mem *mem; #endif + (void)config; + (void)subctx; + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gr, " "); nvgpu_gr_ctx_set_preemption_modes(g, gr_ctx); @@ -802,6 +809,9 @@ int nvgpu_gr_obj_ctx_alloc(struct gk20a *g, { int err = 0; + (void)class_num; + (void)flags; + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gr, " "); err = nvgpu_gr_obj_ctx_gr_ctx_alloc(g, golden_image, gr_ctx_desc, diff --git a/drivers/gpu/nvgpu/common/gr/zbc.c b/drivers/gpu/nvgpu/common/gr/zbc.c index 8299dfdc0..d95f05795 100644 --- a/drivers/gpu/nvgpu/common/gr/zbc.c +++ b/drivers/gpu/nvgpu/common/gr/zbc.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -412,6 +412,8 @@ static void nvgpu_gr_zbc_load_default_sw_stencil_table(struct gk20a *g, { u32 index = zbc->min_stencil_index; + (void)g; + zbc->zbc_s_tbl[index].stencil = 0x0; zbc->zbc_s_tbl[index].format = GR_ZBC_STENCIL_CLEAR_FMT_U8; zbc->zbc_s_tbl[index].ref_cnt = @@ -437,6 +439,8 @@ static void nvgpu_gr_zbc_load_default_sw_depth_table(struct gk20a *g, { u32 index = zbc->min_depth_index; + (void)g; + zbc->zbc_dep_tbl[index].format = GR_ZBC_Z_FMT_VAL_FP32; zbc->zbc_dep_tbl[index].depth = 0x3f800000; zbc->zbc_dep_tbl[index].ref_cnt = @@ -457,6 +461,8 @@ static void nvgpu_gr_zbc_load_default_sw_color_table(struct gk20a *g, u32 i; u32 index = zbc->min_color_index; + (void)g; + /* Opaque black (i.e. solid black, fmt 0x28 = A8B8G8R8) */ zbc->zbc_col_tbl[index].format = GR_ZBC_SOLID_BLACK_COLOR_FMT; for (i = 0U; i < NVGPU_GR_ZBC_COLOR_VALUE_SIZE; i++) { diff --git a/drivers/gpu/nvgpu/common/gr/zcull.c b/drivers/gpu/nvgpu/common/gr/zcull.c index e86cf9562..f745ea9dd 100644 --- a/drivers/gpu/nvgpu/common/gr/zcull.c +++ b/drivers/gpu/nvgpu/common/gr/zcull.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -82,6 +82,8 @@ void nvgpu_gr_zcull_deinit(struct gk20a *g, struct nvgpu_gr_zcull *gr_zcull) u32 nvgpu_gr_get_ctxsw_zcull_size(struct gk20a *g, struct nvgpu_gr_zcull *gr_zcull) { + (void)g; + /* assuming zcull has already been initialized */ return gr_zcull->zcull_ctxsw_image_size; } diff --git a/drivers/gpu/nvgpu/common/grmgr/grmgr.c b/drivers/gpu/nvgpu/common/grmgr/grmgr.c index 12ce62bd3..033d3fcdc 100644 --- a/drivers/gpu/nvgpu/common/grmgr/grmgr.c +++ b/drivers/gpu/nvgpu/common/grmgr/grmgr.c @@ -1,7 +1,7 @@ /* * GR MANAGER * - * Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -93,7 +93,7 @@ int nvgpu_init_gr_manager(struct gk20a *g) for (gpc_id = 0U; gpc_id < gr_syspipe->num_gpc; gpc_id++) { gr_syspipe->gpcs[gpc_id].logical_id = gpc_id; nvgpu_assert(local_gpc_mask != 0U); - ffs_bit = nvgpu_ffs(local_gpc_mask) - 1U; + ffs_bit = (u32)(nvgpu_ffs(local_gpc_mask) - 1U); local_gpc_mask &= ~(1U << ffs_bit); gr_syspipe->gpcs[gpc_id].physical_id = ffs_bit; gr_syspipe->gpcs[gpc_id].gpcgrp_id = 0U; @@ -391,6 +391,10 @@ int nvgpu_grmgr_config_gr_remap_window(struct gk20a *g, g->mig.cur_tid, g->mig.current_gr_syspipe_id, gr_syspipe_id, enable, g->mig.recursive_ref_count); } +#else + (void)g; + (void)gr_syspipe_id; + (void)enable; #endif return err; } diff --git a/drivers/gpu/nvgpu/common/init/nvgpu_init.c b/drivers/gpu/nvgpu/common/init/nvgpu_init.c index 61068908b..eb3e61745 100644 --- a/drivers/gpu/nvgpu/common/init/nvgpu_init.c +++ b/drivers/gpu/nvgpu/common/init/nvgpu_init.c @@ -529,6 +529,7 @@ static int nvgpu_init_boot_clk_or_clk_arb(struct gk20a *g) { int err = 0; + (void)g; #ifdef CONFIG_NVGPU_LS_PMU if (nvgpu_is_enabled(g, NVGPU_PMU_PSTATE) && (g->pmu->fw->ops.clk.clk_set_boot_clk != NULL)) { @@ -566,6 +567,7 @@ static int nvgpu_init_per_device_identifier(struct gk20a *g) static int nvgpu_init_set_debugger_mode(struct gk20a *g) { + (void)g; #ifdef CONFIG_NVGPU_DEBUGGER /* Restore the debug setting */ g->ops.fb.set_debug_mode(g, g->mmu_debug_ctrl); @@ -603,6 +605,8 @@ static int nvgpu_init_xve_set_speed(struct gk20a *g) return err; } } +#else + (void)g; #endif return 0; } diff --git a/drivers/gpu/nvgpu/common/mm/allocators/bitmap_allocator.c b/drivers/gpu/nvgpu/common/mm/allocators/bitmap_allocator.c index 1697160ba..62582243e 100644 --- a/drivers/gpu/nvgpu/common/mm/allocators/bitmap_allocator.c +++ b/drivers/gpu/nvgpu/common/mm/allocators/bitmap_allocator.c @@ -67,6 +67,8 @@ static u64 nvgpu_bitmap_balloc_fixed(struct nvgpu_allocator *na, struct nvgpu_bitmap_allocator *a = bitmap_allocator(na); u64 blks, offs, ret; + (void)page_size; + /* Compute the bit offset and make sure it's aligned to a block. */ offs = base >> a->blk_shift; if (nvgpu_safe_mult_u64(offs, a->blk_size) != base) { diff --git a/drivers/gpu/nvgpu/common/mm/allocators/buddy_allocator.c b/drivers/gpu/nvgpu/common/mm/allocators/buddy_allocator.c index 868e7ac13..82164698b 100644 --- a/drivers/gpu/nvgpu/common/mm/allocators/buddy_allocator.c +++ b/drivers/gpu/nvgpu/common/mm/allocators/buddy_allocator.c @@ -28,6 +28,7 @@ #include #include #include +#include #include "buddy_allocator_priv.h" diff --git a/drivers/gpu/nvgpu/common/mm/allocators/nvgpu_allocator.c b/drivers/gpu/nvgpu/common/mm/allocators/nvgpu_allocator.c index fbcb08afb..c19dc6a21 100644 --- a/drivers/gpu/nvgpu/common/mm/allocators/nvgpu_allocator.c +++ b/drivers/gpu/nvgpu/common/mm/allocators/nvgpu_allocator.c @@ -1,7 +1,7 @@ /* * gk20a allocator * - * Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -25,6 +25,7 @@ #include #include +#include u64 nvgpu_alloc_length(struct nvgpu_allocator *a) { diff --git a/drivers/gpu/nvgpu/common/mm/gmmu/page_table.c b/drivers/gpu/nvgpu/common/mm/gmmu/page_table.c index 9b215962d..f3728295e 100644 --- a/drivers/gpu/nvgpu/common/mm/gmmu/page_table.c +++ b/drivers/gpu/nvgpu/common/mm/gmmu/page_table.c @@ -940,6 +940,15 @@ static void nvgpu_gmmu_update_page_table_dbg_print(struct gk20a *g, attrs->priv ? 'P' : '-', attrs->valid ? 'V' : '-', attrs->platform_atomic ? 'A' : '-'); +#else + (void)g; + (void)attrs; + (void)vm; + (void)sgt; + (void)space_to_skip; + (void)virt_addr; + (void)length; + (void)page_size; #endif /* CONFIG_NVGPU_TRACE */ } @@ -1077,6 +1086,8 @@ u64 nvgpu_gmmu_map_locked(struct vm_gk20a *vm, attrs.l3_alloc = false; } #endif + (void)clear_ctags; + (void)ctag_offset; /* * Only allocate a new GPU VA range if we haven't already been passed a @@ -1171,6 +1182,8 @@ void nvgpu_gmmu_unmap_locked(struct vm_gk20a *vm, struct gk20a *g = gk20a_from_vm(vm); struct nvgpu_gmmu_attrs attrs = gmmu_unmap_attrs(pgsz_idx); + (void)rw_flag; + attrs.sparse = sparse; if (va_allocated) { diff --git a/drivers/gpu/nvgpu/common/mm/gmmu/pd_cache.c b/drivers/gpu/nvgpu/common/mm/gmmu/pd_cache.c index 4a7b15eaa..8815b9471 100644 --- a/drivers/gpu/nvgpu/common/mm/gmmu/pd_cache.c +++ b/drivers/gpu/nvgpu/common/mm/gmmu/pd_cache.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -30,6 +30,7 @@ #include #include #include +#include #include "pd_cache_priv.h" diff --git a/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c b/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c index 8a2df3f7a..6fb423356 100644 --- a/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c +++ b/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -332,12 +332,16 @@ static u64 nvgpu_mem_phys_sgl_phys(struct gk20a *g, void *sgl) { struct nvgpu_mem_sgl *sgl_impl = (struct nvgpu_mem_sgl *)sgl; + (void)g; return sgl_impl->phys; } static u64 nvgpu_mem_phys_sgl_ipa_to_pa(struct gk20a *g, void *sgl, u64 ipa, u64 *pa_len) { + (void)g; + (void)sgl; + (void)pa_len; return ipa; } @@ -353,11 +357,15 @@ static u64 nvgpu_mem_phys_sgl_gpu_addr(struct gk20a *g, void *sgl, { struct nvgpu_mem_sgl *sgl_impl = (struct nvgpu_mem_sgl *)sgl; + (void)g; + (void)attrs; return sgl_impl->phys; } static void nvgpu_mem_phys_sgt_free(struct gk20a *g, struct nvgpu_sgt *sgt) { + (void)g; + (void)sgt; /* * No-op here. The free is handled by freeing the nvgpu_mem itself. */ diff --git a/drivers/gpu/nvgpu/common/mm/vm.c b/drivers/gpu/nvgpu/common/mm/vm.c index 01d380fd4..f879e2910 100644 --- a/drivers/gpu/nvgpu/common/mm/vm.c +++ b/drivers/gpu/nvgpu/common/mm/vm.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -41,6 +41,7 @@ #include #include #include +#include struct nvgpu_ctag_buffer_info { u64 size; @@ -593,6 +594,7 @@ static int nvgpu_vm_init_check_vma_limits(struct gk20a *g, struct vm_gk20a *vm, u64 user_lp_vma_start, u64 user_lp_vma_limit, u64 kernel_vma_start, u64 kernel_vma_limit) { + (void)vm; if ((user_vma_start > user_vma_limit) || (user_lp_vma_start > user_lp_vma_limit) || (kernel_vma_start >= kernel_vma_limit)) { @@ -723,6 +725,8 @@ static int nvgpu_vm_init_attributes(struct mm_gk20a *mm, u64 aperture_size; u64 default_aperture_size; + (void)big_pages; + g->ops.mm.get_default_va_sizes(&default_aperture_size, NULL, NULL); aperture_size = nvgpu_safe_add_u64(kernel_reserved, @@ -1185,6 +1189,8 @@ static int nvgpu_vm_do_map(struct vm_gk20a *vm, */ u8 pte_kind; + (void)os_buf; + (void)flags; #ifdef CONFIG_NVGPU_COMPRESSION err = nvgpu_vm_compute_compression(vm, binfo_ptr); if (err != 0) { @@ -1216,7 +1222,7 @@ static int nvgpu_vm_do_map(struct vm_gk20a *vm, } if (binfo_ptr->compr_kind != NVGPU_KIND_INVALID) { - struct gk20a_comptags comptags = { 0 }; + struct gk20a_comptags comptags = { }; /* * Get the comptags state @@ -1410,6 +1416,8 @@ static int nvgpu_vm_map_check_attributes(struct vm_gk20a *vm, { struct gk20a *g = gk20a_from_vm(vm); + (void)compr_kind; + if (vm->userspace_managed && ((flags & NVGPU_VM_MAP_FIXED_OFFSET) == 0U)) { nvgpu_err(g, @@ -1461,7 +1469,7 @@ int nvgpu_vm_map(struct vm_gk20a *vm, { struct gk20a *g = gk20a_from_vm(vm); struct nvgpu_mapped_buf *mapped_buffer = NULL; - struct nvgpu_ctag_buffer_info binfo = { 0 }; + struct nvgpu_ctag_buffer_info binfo = { }; enum gk20a_mem_rw_flag rw = buffer_rw_mode; struct nvgpu_vm_area *vm_area = NULL; int err = 0; diff --git a/drivers/gpu/nvgpu/common/netlist/netlist.c b/drivers/gpu/nvgpu/common/netlist/netlist.c index eaa195b3c..7349dcc63 100644 --- a/drivers/gpu/nvgpu/common/netlist/netlist.c +++ b/drivers/gpu/nvgpu/common/netlist/netlist.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -263,6 +263,8 @@ static bool nvgpu_netlist_handle_generic_region_id(struct gk20a *g, { bool handled = true; + (void)size; + switch (region_id) { case NETLIST_REGIONID_BUFFER_SIZE: nvgpu_memcpy((u8 *)&netlist_vars->buffer_size, diff --git a/drivers/gpu/nvgpu/common/perf/cyclestats_snapshot.c b/drivers/gpu/nvgpu/common/perf/cyclestats_snapshot.c index 0b084186b..ecf7a94a9 100644 --- a/drivers/gpu/nvgpu/common/perf/cyclestats_snapshot.c +++ b/drivers/gpu/nvgpu/common/perf/cyclestats_snapshot.c @@ -1,7 +1,7 @@ /* * Cycle stats snapshots support * - * Copyright (c) 2015-2020, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2015-2022, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -36,6 +36,7 @@ #include #include #include +#include /* check client for pointed perfmon ownership */ #define CONTAINS_PERFMON(cl, pm) \ @@ -336,12 +337,12 @@ next_hw_fifo_entry: /* re-set HW buffer after processing taking wrapping into account */ if (css->hw_get < src) { (void) memset(css->hw_get, 0xff, - (src - css->hw_get) * sizeof(*src)); + (size_t)(src - css->hw_get) * sizeof(*src)); } else { (void) memset(css->hw_snapshot, 0xff, - (src - css->hw_snapshot) * sizeof(*src)); + (size_t)(src - css->hw_snapshot) * sizeof(*src)); (void) memset(css->hw_get, 0xff, - (css->hw_end - css->hw_get) * sizeof(*src)); + (size_t)(css->hw_end - css->hw_get) * sizeof(*src)); } g->cs_data->hw_get = src; @@ -602,5 +603,6 @@ int nvgpu_css_check_data_available(struct nvgpu_channel *ch, u32 *pending, u32 nvgpu_css_get_max_buffer_size(struct gk20a *g) { + (void)g; return 0xffffffffU; } diff --git a/drivers/gpu/nvgpu/common/profiler/pm_reservation.c b/drivers/gpu/nvgpu/common/profiler/pm_reservation.c index 380d81eb9..2b5cc5c9e 100644 --- a/drivers/gpu/nvgpu/common/profiler/pm_reservation.c +++ b/drivers/gpu/nvgpu/common/profiler/pm_reservation.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -245,7 +245,9 @@ void nvgpu_pm_reservation_release_all_per_vmid(struct gk20a *g, u32 vmid) nvgpu_list_del(&reservation_entry->entry); reservations->count--; nvgpu_kfree(g, reservation_entry); - prepare_resource_reservation(g, i, false); + prepare_resource_reservation(g, + (enum nvgpu_profiler_pm_resource_type)i, + false); } } nvgpu_mutex_release(&reservations->lock); diff --git a/drivers/gpu/nvgpu/common/profiler/profiler.c b/drivers/gpu/nvgpu/common/profiler/profiler.c index c2eecf95b..62bf30d83 100644 --- a/drivers/gpu/nvgpu/common/profiler/profiler.c +++ b/drivers/gpu/nvgpu/common/profiler/profiler.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -63,7 +63,7 @@ int nvgpu_profiler_alloc(struct gk20a *g, return -ENOMEM; } - prof->prof_handle = generate_unique_id(); + prof->prof_handle = (u32)generate_unique_id(); prof->scope = scope; prof->gpu_instance_id = gpu_instance_id; prof->g = g; @@ -138,7 +138,8 @@ int nvgpu_profiler_unbind_context(struct nvgpu_profiler_object *prof) if (prof->reserved[i]) { nvgpu_warn(g, "Releasing reserved resource %u for handle %u", i, prof->prof_handle); - nvgpu_profiler_pm_resource_release(prof, i); + nvgpu_profiler_pm_resource_release(prof, + (enum nvgpu_profiler_pm_resource_type)i); } } diff --git a/drivers/gpu/nvgpu/common/ptimer/ptimer.c b/drivers/gpu/nvgpu/common/ptimer/ptimer.c index bf53fec10..e53eb3cb9 100644 --- a/drivers/gpu/nvgpu/common/ptimer/ptimer.c +++ b/drivers/gpu/nvgpu/common/ptimer/ptimer.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -83,6 +83,8 @@ int nvgpu_get_timestamps_zipper(struct gk20a *g, int err = 0; unsigned int i = 0; + (void)source_id; + if (gk20a_busy(g) != 0) { nvgpu_err(g, "GPU not powered on\n"); err = -EINVAL; diff --git a/drivers/gpu/nvgpu/common/rc/rc.c b/drivers/gpu/nvgpu/common/rc/rc.c index 9dd868e23..450419ef5 100644 --- a/drivers/gpu/nvgpu/common/rc/rc.c +++ b/drivers/gpu/nvgpu/common/rc/rc.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -61,6 +61,12 @@ void nvgpu_rc_fifo_recover(struct gk20a *g, u32 eng_bitmask, rc_type, NULL); #else WARN_ON(!g->sw_quiesce_pending); + (void)eng_bitmask; + (void)hw_id; + (void)id_is_tsg; + (void)id_is_known; + (void)debug_dump; + (void)rc_type; #endif } @@ -83,6 +89,8 @@ void nvgpu_rc_ctxsw_timeout(struct gk20a *g, u32 eng_bitmask, RC_TYPE_CTXSW_TIMEOUT); #else WARN_ON(!g->sw_quiesce_pending); + (void)eng_bitmask; + (void)debug_dump; #endif } @@ -162,6 +170,7 @@ void nvgpu_rc_runlist_update(struct gk20a *g, u32 runlist_id) * on time. */ WARN_ON(!g->sw_quiesce_pending); + (void)runlist_id; #endif } @@ -209,6 +218,8 @@ void nvgpu_rc_gr_fault(struct gk20a *g, struct nvgpu_tsg *tsg, } #else WARN_ON(!g->sw_quiesce_pending); + (void)tsg; + (void)ch; #endif nvgpu_log(g, gpu_dbg_gr, "done"); } @@ -292,6 +303,9 @@ void nvgpu_rc_tsg_and_related_engines(struct gk20a *g, struct nvgpu_tsg *tsg, #endif #else WARN_ON(!g->sw_quiesce_pending); + (void)tsg; + (void)debug_dump; + (void)rc_type; #endif } @@ -313,5 +327,7 @@ void nvgpu_rc_mmu_fault(struct gk20a *g, u32 act_eng_bitmask, } WARN_ON(!g->sw_quiesce_pending); + (void)rc_type; + (void)mmufault; #endif } diff --git a/drivers/gpu/nvgpu/common/regops/regops.c b/drivers/gpu/nvgpu/common/regops/regops.c index 5502a2d2a..eafbf8a05 100644 --- a/drivers/gpu/nvgpu/common/regops/regops.c +++ b/drivers/gpu/nvgpu/common/regops/regops.c @@ -1,7 +1,7 @@ /* * Tegra GK20A GPU Debugger Driver Register Ops * - * Copyright (c) 2013-2021, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2013-2022, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -368,7 +368,7 @@ static int profiler_obj_validate_reg_op_offset(struct nvgpu_profiler_object *pro nvgpu_assert(type == type64); } - op->type = prof->reg_op_type[type]; + op->type = (u8)prof->reg_op_type[type]; return 0; } diff --git a/drivers/gpu/nvgpu/common/riscv/riscv.c b/drivers/gpu/nvgpu/common/riscv/riscv.c index 10820c096..1a15fad51 100644 --- a/drivers/gpu/nvgpu/common/riscv/riscv.c +++ b/drivers/gpu/nvgpu/common/riscv/riscv.c @@ -84,7 +84,7 @@ int nvgpu_riscv_hs_ucode_load_bootstrap(struct nvgpu_falcon *flcn, g->ops.falcon.set_bcr(flcn); err = nvgpu_falcon_get_mem_size(flcn, MEM_DMEM, &dmem_size); err = nvgpu_falcon_copy_to_imem(flcn, 0x0, code_fw->data, - code_fw->size, 0, true, 0x0); + (u32)code_fw->size, 0, true, 0x0); if (err != 0) { nvgpu_err(g, "RISCV code copy to IMEM failed"); @@ -92,14 +92,14 @@ int nvgpu_riscv_hs_ucode_load_bootstrap(struct nvgpu_falcon *flcn, } err = nvgpu_falcon_copy_to_dmem(flcn, 0x0, data_fw->data, - data_fw->size, 0x0); + (u32)data_fw->size, 0x0); if (err != 0) { nvgpu_err(g, "RISCV data copy to DMEM failed"); goto exit; } - err = nvgpu_falcon_copy_to_dmem(flcn, dmem_size - manifest_fw->size, - manifest_fw->data, manifest_fw->size, 0x0); + err = nvgpu_falcon_copy_to_dmem(flcn, (u32)(dmem_size - manifest_fw->size), + manifest_fw->data, (u32)manifest_fw->size, 0x0); if (err != 0) { nvgpu_err(g, "RISCV manifest copy to DMEM failed"); goto exit; diff --git a/drivers/gpu/nvgpu/common/sim/sim.c b/drivers/gpu/nvgpu/common/sim/sim.c index 34e9b5d20..a371713de 100644 --- a/drivers/gpu/nvgpu/common/sim/sim.c +++ b/drivers/gpu/nvgpu/common/sim/sim.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -98,17 +98,17 @@ static int rpc_send_message(struct gk20a *g) { /* calculations done in units of u32s */ u32 send_base = sim_send_put_pointer_v(g->sim->send_ring_put) * 2; - u32 dma_offset = send_base + sim_dma_r()/sizeof(u32); - u32 dma_hi_offset = send_base + sim_dma_hi_r()/sizeof(u32); + u32 dma_offset = (u32)(send_base + sim_dma_r()/sizeof(u32)); + u32 dma_hi_offset = (u32)(send_base + sim_dma_hi_r()/sizeof(u32)); - *sim_send_ring_bfr(g, dma_offset*sizeof(u32)) = + *sim_send_ring_bfr(g, (u32)(dma_offset*sizeof(u32))) = sim_dma_target_phys_pci_coherent_f() | sim_dma_status_valid_f() | sim_dma_size_4kb_f() | - sim_dma_addr_lo_f(nvgpu_mem_get_addr(g, &g->sim->msg_bfr) - >> sim_dma_addr_lo_b()); + sim_dma_addr_lo_f((u32)(nvgpu_mem_get_addr(g, &g->sim->msg_bfr) + >> sim_dma_addr_lo_b())); - *sim_send_ring_bfr(g, dma_hi_offset*sizeof(u32)) = + *sim_send_ring_bfr(g, (u32)(dma_hi_offset*sizeof(u32))) = u64_hi32(nvgpu_mem_get_addr(g, &g->sim->msg_bfr)); *sim_msg_hdr(g, sim_msg_sequence_r()) = g->sim->sequence_base++; @@ -198,7 +198,7 @@ int issue_rpc_and_wait(struct gk20a *g) if (*sim_msg_hdr(g, sim_msg_result_r()) != sim_msg_result_success_v()) { nvgpu_err(g, "%s received failed status!", __func__); - return -(*sim_msg_hdr(g, sim_msg_result_r())); + return -(int)(*sim_msg_hdr(g, sim_msg_result_r())); } return 0; } @@ -214,7 +214,7 @@ static void nvgpu_sim_esc_readl(struct gk20a *g, sim_escape_read_hdr_size()); *sim_msg_param(g, 0) = index; *sim_msg_param(g, 4) = sizeof(u32); - data_offset = round_up(0xc + pathlen + 1, sizeof(u32)); + data_offset = (u32)round_up(0xc + pathlen + 1, sizeof(u32)); *sim_msg_param(g, 8) = data_offset; strcpy((char *)sim_msg_param(g, 0xc), path); @@ -264,7 +264,7 @@ static int nvgpu_sim_init_late(struct gk20a *g) sim_send_ring_status_valid_f() | sim_send_ring_target_phys_pci_coherent_f() | sim_send_ring_size_4kb_f() | - sim_send_ring_addr_lo_f(phys >> sim_send_ring_addr_lo_b())); + sim_send_ring_addr_lo_f((u32)(phys >> sim_send_ring_addr_lo_b()))); /*repeat for recv ring (but swap put,get as roles are opposite) */ sim_writel(g->sim, sim_recv_ring_r(), sim_recv_ring_status_invalid_f()); @@ -281,7 +281,7 @@ static int nvgpu_sim_init_late(struct gk20a *g) sim_recv_ring_status_valid_f() | sim_recv_ring_target_phys_pci_coherent_f() | sim_recv_ring_size_4kb_f() | - sim_recv_ring_addr_lo_f(phys >> sim_recv_ring_addr_lo_b())); + sim_recv_ring_addr_lo_f((u32)(phys >> sim_recv_ring_addr_lo_b()))); return 0; diff --git a/drivers/gpu/nvgpu/common/sim/sim_pci.c b/drivers/gpu/nvgpu/common/sim/sim_pci.c index 8b5d518f0..2afce4ee9 100644 --- a/drivers/gpu/nvgpu/common/sim/sim_pci.c +++ b/drivers/gpu/nvgpu/common/sim/sim_pci.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -67,22 +67,22 @@ static int rpc_send_message(struct gk20a *g) { /* calculations done in units of u32s */ u32 send_base = sim_send_put_pointer_v(g->sim->send_ring_put) * 2; - u32 dma_offset = send_base + sim_dma_r()/sizeof(u32); - u32 dma_hi_offset = send_base + sim_dma_hi_r()/sizeof(u32); + u32 dma_offset = send_base + sim_dma_r()/(u32)sizeof(u32); + u32 dma_hi_offset = send_base + sim_dma_hi_r()/4U; - *sim_send_ring_bfr(g, dma_offset*sizeof(u32)) = + *sim_send_ring_bfr(g, dma_offset*4U) = sim_dma_target_phys_pci_coherent_f() | sim_dma_status_valid_f() | sim_dma_size_4kb_f() | - sim_dma_addr_lo_f(nvgpu_mem_get_phys_addr(g, &g->sim->msg_bfr) - >> sim_dma_addr_lo_b()); + sim_dma_addr_lo_f((u32)(nvgpu_mem_get_phys_addr(g, &g->sim->msg_bfr) + >> sim_dma_addr_lo_b())); - *sim_send_ring_bfr(g, dma_hi_offset*sizeof(u32)) = + *sim_send_ring_bfr(g, dma_hi_offset*4U) = u64_hi32(nvgpu_mem_get_phys_addr(g, &g->sim->msg_bfr)); *sim_msg_hdr(g, sim_msg_sequence_r()) = g->sim->sequence_base++; - g->sim->send_ring_put = (g->sim->send_ring_put + 2 * sizeof(u32)) % + g->sim->send_ring_put = (g->sim->send_ring_put + 2 * 4U) % SIM_BFR_SIZE; /* Update the put pointer. This will trap into the host. */ @@ -130,7 +130,7 @@ static int rpc_recv_poll(struct gk20a *g) } /* Update GET pointer */ - g->sim->recv_ring_get = (g->sim->recv_ring_get + 2*sizeof(u32)) + g->sim->recv_ring_get = (g->sim->recv_ring_get + 2*4U) % SIM_BFR_SIZE; sim_writel(g->sim, sim_recv_get_r(), g->sim->recv_ring_get); @@ -175,8 +175,8 @@ static void nvgpu_sim_esc_readl(struct gk20a *g, pci_sim_write_hdr(g, sim_msg_function_sim_escape_read_v(), sim_escape_read_hdr_size()); *pci_sim_msg_param(g, 0) = index; - *pci_sim_msg_param(g, 4) = sizeof(u32); - data_offset = round_up(pathlen + 1, sizeof(u32)); + *pci_sim_msg_param(g, 4) = 4U; + data_offset = (u32)(round_up(pathlen + 1, 4U)); *pci_sim_msg_param(g, 8) = data_offset; strcpy((char *)pci_sim_msg_param(g, sim_escape_read_hdr_size()), path); @@ -187,7 +187,7 @@ static void nvgpu_sim_esc_readl(struct gk20a *g, (u8 *)pci_sim_msg_param(g, nvgpu_safe_add_u32(data_offset, sim_escape_read_hdr_size())), - sizeof(u32)); + 4U); } else { *data = 0xffffffff; WARN(1, "pci_issue_rpc_and_wait failed err=%d", err); @@ -229,7 +229,7 @@ static int nvgpu_sim_init_late(struct gk20a *g) sim_send_ring_status_valid_f() | sim_send_ring_target_phys_pci_coherent_f() | sim_send_ring_size_4kb_f() | - sim_send_ring_addr_lo_f(phys >> sim_send_ring_addr_lo_b())); + sim_send_ring_addr_lo_f((u32)(phys >> sim_send_ring_addr_lo_b()))); /* repeat for recv ring (but swap put,get as roles are opposite) */ sim_writel(g->sim, sim_recv_ring_r(), sim_recv_ring_status_invalid_f()); @@ -246,7 +246,7 @@ static int nvgpu_sim_init_late(struct gk20a *g) sim_recv_ring_status_valid_f() | sim_recv_ring_target_phys_pci_coherent_f() | sim_recv_ring_size_4kb_f() | - sim_recv_ring_addr_lo_f(phys >> sim_recv_ring_addr_lo_b())); + sim_recv_ring_addr_lo_f((u32)(phys >> sim_recv_ring_addr_lo_b()))); return 0; fail: diff --git a/drivers/gpu/nvgpu/common/swdebug/profile.c b/drivers/gpu/nvgpu/common/swdebug/profile.c index 53c632214..4dcd1c8b4 100644 --- a/drivers/gpu/nvgpu/common/swdebug/profile.c +++ b/drivers/gpu/nvgpu/common/swdebug/profile.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -188,7 +188,7 @@ void nvgpu_swprofile_snapshot(struct nvgpu_swprofiler *p, u32 idx) */ index = matrix_to_linear_index(p, p->sample_index, idx); - p->samples[index] = nvgpu_current_time_ns(); + p->samples[index] = (u64)nvgpu_current_time_ns(); } void nvgpu_swprofile_begin_sample(struct nvgpu_swprofiler *p) @@ -210,14 +210,14 @@ void nvgpu_swprofile_begin_sample(struct nvgpu_swprofiler *p) /* * Reference time for subsequent subsamples in this sample. */ - p->samples_start[p->sample_index] = nvgpu_current_time_ns(); + p->samples_start[p->sample_index] = (u64)nvgpu_current_time_ns(); nvgpu_mutex_release(&p->lock); } static int profile_cmp(const void *a, const void *b) { - return *((const u64 *) a) - *((const u64 *) b); + return (int)(*((const u64 *) a) - *((const u64 *) b)); } #define PERCENTILE_WIDTH 5 @@ -350,6 +350,8 @@ void nvgpu_swprofile_print_raw_data(struct gk20a *g, { u32 i, j; + (void)g; + nvgpu_mutex_acquire(&p->lock); if (p->samples == NULL) { @@ -408,6 +410,8 @@ static u32 nvgpu_swprofile_subsample_basic_stats(struct gk20a *g, u64 sigma_2 = 0U; u32 i; + (void)g; + /* * First, let's work out min, max, sum, and number of samples of data. With this we * can then get the mean, median, and sigma^2. @@ -461,7 +465,7 @@ static u32 nvgpu_swprofile_subsample_basic_stats(struct gk20a *g, results[3] = median; results[4] = sigma_2; - return samples; + return (u32)samples; } /* diff --git a/drivers/gpu/nvgpu/common/sync/channel_sync_semaphore.c b/drivers/gpu/nvgpu/common/sync/channel_sync_semaphore.c index 4233d4507..fcc5fa02f 100644 --- a/drivers/gpu/nvgpu/common/sync/channel_sync_semaphore.c +++ b/drivers/gpu/nvgpu/common/sync/channel_sync_semaphore.c @@ -1,7 +1,7 @@ /* * GK20A Channel Synchronization Abstraction * - * Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2014-2022, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -95,7 +95,7 @@ static void add_sema_incr_cmd(struct gk20a *g, struct nvgpu_channel *c, struct nvgpu_semaphore *s, struct priv_cmd_entry *cmd, bool wfi, struct nvgpu_hw_semaphore *hw_sema) { - int ch = c->chid; + u32 ch = c->chid; u64 va; /* release will need to write back to the semaphore memory. */ @@ -105,7 +105,7 @@ static void add_sema_incr_cmd(struct gk20a *g, struct nvgpu_channel *c, nvgpu_semaphore_prepare(s, hw_sema); g->ops.sync.sema.add_incr_cmd(g, cmd, s, va, wfi); - gpu_sema_verbose_dbg(g, "(R) c=%d INCR %u (%u) pool=%-3llu" + gpu_sema_verbose_dbg(g, "(R) c=%u INCR %u (%u) pool=%-3llu" "va=0x%llx entry=%p", ch, nvgpu_semaphore_get_value(s), nvgpu_semaphore_read(s), @@ -170,6 +170,9 @@ cleanup: struct nvgpu_channel_sync_semaphore *sema = nvgpu_channel_sync_semaphore_from_base(s); + (void)fd; + (void)entry; + (void)max_wait_cmds; nvgpu_err(sema->c->g, "trying to use sync fds with CONFIG_NVGPU_SYNCFD_NONE"); return -ENODEV; @@ -259,6 +262,10 @@ static int channel_sync_semaphore_incr_user( struct nvgpu_channel_sync_semaphore *sema = nvgpu_channel_sync_semaphore_from_base(s); + (void)entry; + (void)fence; + (void)wfi; + (void)need_sync_fence; nvgpu_err(sema->c->g, "trying to use sync fds with CONFIG_NVGPU_SYNCFD_NONE"); return -ENODEV; @@ -271,6 +278,8 @@ static void channel_sync_semaphore_mark_progress(struct nvgpu_channel_sync *s, struct nvgpu_channel_sync_semaphore *sp = nvgpu_channel_sync_semaphore_from_base(s); + (void)register_irq; + (void)nvgpu_hw_semaphore_update_next(sp->hw_sema); /* * register_irq is ignored: there is only one semaphore interrupt that diff --git a/drivers/gpu/nvgpu/common/sync/channel_sync_syncpt.c b/drivers/gpu/nvgpu/common/sync/channel_sync_syncpt.c index 740eb3143..9f093c3ec 100644 --- a/drivers/gpu/nvgpu/common/sync/channel_sync_syncpt.c +++ b/drivers/gpu/nvgpu/common/sync/channel_sync_syncpt.c @@ -1,7 +1,7 @@ /* * GK20A Channel Synchronization Abstraction * - * Copyright (c) 2014-2021, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2014-2022, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -165,6 +165,10 @@ static int channel_sync_syncpt_wait_fd(struct nvgpu_channel_sync *s, int fd, { struct nvgpu_channel_sync_syncpt *sp = nvgpu_channel_sync_syncpt_from_base(s); + (void)s; + (void)fd; + (void)wait_cmd; + (void)max_wait_cmds; nvgpu_err(sp->c->g, "trying to use sync fds with CONFIG_NVGPU_SYNCFD_NONE"); return -ENODEV; @@ -175,6 +179,8 @@ static void channel_sync_syncpt_update(void *priv, int nr_completed) { struct nvgpu_channel *ch = priv; + (void)nr_completed; + nvgpu_channel_update(ch); /* note: channel_get() is in channel_sync_syncpt_mark_progress() */ diff --git a/drivers/gpu/nvgpu/common/utils/worker.c b/drivers/gpu/nvgpu/common/utils/worker.c index a5bbe655e..fbdff554f 100644 --- a/drivers/gpu/nvgpu/common/utils/worker.c +++ b/drivers/gpu/nvgpu/common/utils/worker.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -23,6 +23,7 @@ #include #include #include +#include static void nvgpu_worker_pre_process(struct nvgpu_worker *worker) {