gpu: nvgpu: common: fix compile error of new compile flags

It's preparing to add bellow CFLAGS:
    -Werror -Wall -Wextra \
    -Wmissing-braces -Wpointer-arith -Wundef \
    -Wconversion -Wsign-conversion \
    -Wformat-security \
    -Wmissing-declarations -Wredundant-decls -Wimplicit-fallthrough

Jira GVSCI-11640

Signed-off-by: Richard Zhao <rizhao@nvidia.com>
Change-Id: Ia8f508c65071aa4775d71b8ee5dbf88a33b5cbd5
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2555056
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Richard Zhao
2021-07-06 21:29:38 -07:00
committed by mobile promotions
parent 851666b632
commit 9ab1271269
59 changed files with 334 additions and 134 deletions

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -977,6 +977,8 @@ static int lsf_gen_wpr_requirements(struct gk20a *g,
u32 wpr_offset; u32 wpr_offset;
u32 flcn_cnt; u32 flcn_cnt;
(void)g;
/* /*
* Start with an array of WPR headers at the base of the WPR. * Start with an array of WPR headers at the base of the WPR.
* The expectation here is that the secure falcon will do a single DMA * The expectation here is that the secure falcon will do a single DMA

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -348,6 +348,8 @@ static int lsf_gen_wpr_requirements(struct gk20a *g, struct ls_flcn_mgr_v0 *plsf
struct lsfm_managed_ucode_img_v0 *pnode = plsfm->ucode_img_list; struct lsfm_managed_ucode_img_v0 *pnode = plsfm->ucode_img_list;
u32 wpr_offset; u32 wpr_offset;
(void)g;
/* /*
* Start with an array of WPR headers at the base of the WPR. * Start with an array of WPR headers at the base of the WPR.
* The expectation here is that the secure falcon will do a single DMA * The expectation here is that the secure falcon will do a single DMA

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -386,7 +386,7 @@ int nvgpu_acr_bootstrap_hs_ucode_riscv(struct gk20a *g, struct nvgpu_acr *acr)
timeout = RISCV_BR_COMPLETION_TIMEOUT_NON_SILICON_MS; timeout = RISCV_BR_COMPLETION_TIMEOUT_NON_SILICON_MS;
} }
err = nvgpu_acr_wait_for_riscv_brom_completion(flcn, timeout); err = nvgpu_acr_wait_for_riscv_brom_completion(flcn, (int)timeout);
if (err == 0x0) { if (err == 0x0) {
nvgpu_acr_dbg(g, "RISCV BROM passed"); nvgpu_acr_dbg(g, "RISCV BROM passed");

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -53,6 +53,8 @@ static int ga10b_bootstrap_hs_acr(struct gk20a *g, struct nvgpu_acr *acr)
{ {
int err = 0; int err = 0;
(void)acr;
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
err = nvgpu_acr_bootstrap_hs_ucode_riscv(g, g->acr); err = nvgpu_acr_bootstrap_hs_ucode_riscv(g, g->acr);
@@ -75,6 +77,9 @@ static int ga10b_acr_patch_wpr_info_to_ucode(struct gk20a *g,
&acr_desc->acr_falcon2_sysmem_desc; &acr_desc->acr_falcon2_sysmem_desc;
struct flcn2_acr_desc *acr_sysmem_desc = &acr_desc->acr_sysmem_desc; struct flcn2_acr_desc *acr_sysmem_desc = &acr_desc->acr_sysmem_desc;
(void)acr;
(void)is_recovery;
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
#ifdef CONFIG_NVGPU_NON_FUSA #ifdef CONFIG_NVGPU_NON_FUSA
@@ -160,7 +165,7 @@ static int ga10b_acr_patch_wpr_info_to_ucode(struct gk20a *g,
if (nvgpu_is_enabled(g, NVGPU_SUPPORT_MIG)) { if (nvgpu_is_enabled(g, NVGPU_SUPPORT_MIG)) {
acr_sysmem_desc->gpu_mode |= MIG_MODE; acr_sysmem_desc->gpu_mode |= MIG_MODE;
} else { } else {
acr_sysmem_desc->gpu_mode &= ~MIG_MODE; acr_sysmem_desc->gpu_mode &= (u32)(~MIG_MODE);
} }
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2015-2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2015-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -38,6 +38,8 @@ static int gm20b_bootstrap_hs_acr(struct gk20a *g, struct nvgpu_acr *acr)
{ {
int err = 0; int err = 0;
(void)acr;
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
err = nvgpu_acr_bootstrap_hs_ucode(g, g->acr, &g->acr->acr); err = nvgpu_acr_bootstrap_hs_ucode(g, g->acr, &g->acr->acr);
@@ -58,6 +60,8 @@ static int gm20b_acr_patch_wpr_info_to_ucode(struct gk20a *g,
u32 *acr_ucode_header = NULL; u32 *acr_ucode_header = NULL;
u32 *acr_ucode_data = NULL; u32 *acr_ucode_data = NULL;
(void)acr;
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
if (is_recovery) { if (is_recovery) {
@@ -95,6 +99,7 @@ static int gm20b_acr_patch_wpr_info_to_ucode(struct gk20a *g,
static u32 gm20b_acr_lsf_pmu(struct gk20a *g, static u32 gm20b_acr_lsf_pmu(struct gk20a *g,
struct acr_lsf_config *lsf) struct acr_lsf_config *lsf)
{ {
(void)g;
/* PMU LS falcon info */ /* PMU LS falcon info */
lsf->falcon_id = FALCON_ID_PMU; lsf->falcon_id = FALCON_ID_PMU;
lsf->falcon_dma_idx = GK20A_PMU_DMAIDX_UCODE; lsf->falcon_dma_idx = GK20A_PMU_DMAIDX_UCODE;
@@ -110,6 +115,7 @@ static u32 gm20b_acr_lsf_pmu(struct gk20a *g,
static u32 gm20b_acr_lsf_fecs(struct gk20a *g, static u32 gm20b_acr_lsf_fecs(struct gk20a *g,
struct acr_lsf_config *lsf) struct acr_lsf_config *lsf)
{ {
(void)g;
/* FECS LS falcon info */ /* FECS LS falcon info */
lsf->falcon_id = FALCON_ID_FECS; lsf->falcon_id = FALCON_ID_FECS;
lsf->falcon_dma_idx = GK20A_PMU_DMAIDX_UCODE; lsf->falcon_dma_idx = GK20A_PMU_DMAIDX_UCODE;

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -36,6 +36,7 @@
static u32 gp10b_acr_lsf_gpccs(struct gk20a *g, static u32 gp10b_acr_lsf_gpccs(struct gk20a *g,
struct acr_lsf_config *lsf) struct acr_lsf_config *lsf)
{ {
(void)g;
/* GPCCS LS falcon info */ /* GPCCS LS falcon info */
lsf->falcon_id = FALCON_ID_GPCCS; lsf->falcon_id = FALCON_ID_GPCCS;
lsf->falcon_dma_idx = GK20A_PMU_DMAIDX_UCODE; lsf->falcon_dma_idx = GK20A_PMU_DMAIDX_UCODE;

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -43,6 +43,8 @@ static int gv11b_bootstrap_hs_acr(struct gk20a *g, struct nvgpu_acr *acr)
{ {
int err = 0; int err = 0;
(void)acr;
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
err = nvgpu_acr_bootstrap_hs_ucode(g, g->acr, &g->acr->acr); err = nvgpu_acr_bootstrap_hs_ucode(g, g->acr, &g->acr->acr);
@@ -64,6 +66,9 @@ static int gv11b_acr_patch_wpr_info_to_ucode(struct gk20a *g,
u32 *acr_ucode_data = NULL; u32 *acr_ucode_data = NULL;
const u32 acr_desc_offset = 2U; const u32 acr_desc_offset = 2U;
(void)acr;
(void)is_recovery;
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
#ifdef CONFIG_NVGPU_NON_FUSA #ifdef CONFIG_NVGPU_NON_FUSA
if (is_recovery) { if (is_recovery) {

View File

@@ -1,7 +1,7 @@
/* /*
* CBC * CBC
* *
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -27,6 +27,7 @@
#include <nvgpu/cbc.h> #include <nvgpu/cbc.h>
#include <nvgpu/dma.h> #include <nvgpu/dma.h>
#include <nvgpu/log.h> #include <nvgpu/log.h>
#include <nvgpu/string.h>
#include <nvgpu/nvgpu_mem.h> #include <nvgpu/nvgpu_mem.h>
#include <nvgpu/comptags.h> #include <nvgpu/comptags.h>
@@ -94,6 +95,8 @@ int nvgpu_cbc_alloc(struct gk20a *g, size_t compbit_backing_size,
{ {
struct nvgpu_cbc *cbc = g->cbc; struct nvgpu_cbc *cbc = g->cbc;
(void)vidmem_alloc;
if (nvgpu_mem_is_valid(&cbc->compbit_store.mem) != 0) { if (nvgpu_mem_is_valid(&cbc->compbit_store.mem) != 0) {
return 0; return 0;
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -25,6 +25,7 @@
#include <nvgpu/nvgpu_err.h> #include <nvgpu/nvgpu_err.h>
#include <nvgpu/nvgpu_err_info.h> #include <nvgpu/nvgpu_err_info.h>
#include <nvgpu/cic_mon.h> #include <nvgpu/cic_mon.h>
#include <nvgpu/string.h>
#include "cic_mon_priv.h" #include "cic_mon_priv.h"
@@ -91,6 +92,8 @@ void nvgpu_inject_ctxsw_swerror(struct gk20a *g, u32 hw_unit,
{ {
struct ctxsw_err_info err_info; struct ctxsw_err_info err_info;
(void)inst;
(void)memset(&err_info, ERR_INJECT_TEST_PATTERN, sizeof(err_info)); (void)memset(&err_info, ERR_INJECT_TEST_PATTERN, sizeof(err_info));
nvgpu_report_ctxsw_err(g, hw_unit, err_index, (void *)&err_info); nvgpu_report_ctxsw_err(g, hw_unit, err_index, (void *)&err_info);

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -25,6 +25,7 @@
#include <nvgpu/nvgpu_err.h> #include <nvgpu/nvgpu_err.h>
#include <nvgpu/nvgpu_err_info.h> #include <nvgpu/nvgpu_err_info.h>
#include <nvgpu/cic_mon.h> #include <nvgpu/cic_mon.h>
#include <nvgpu/string.h>
#include "cic_mon_priv.h" #include "cic_mon_priv.h"

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -96,5 +96,5 @@ int nvgpu_cic_mon_get_num_hw_modules(struct gk20a *g)
return -EINVAL; return -EINVAL;
} }
return g->cic_mon->num_hw_modules; return (int)g->cic_mon->num_hw_modules;
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -25,6 +25,7 @@
#include <nvgpu/nvgpu_err.h> #include <nvgpu/nvgpu_err.h>
#include <nvgpu/nvgpu_err_info.h> #include <nvgpu/nvgpu_err_info.h>
#include <nvgpu/cic_mon.h> #include <nvgpu/cic_mon.h>
#include <nvgpu/string.h>
#include "cic_mon_priv.h" #include "cic_mon_priv.h"

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -27,12 +27,12 @@
void nvgpu_cic_rm_set_irq_stall(struct gk20a *g, u32 value) void nvgpu_cic_rm_set_irq_stall(struct gk20a *g, u32 value)
{ {
nvgpu_atomic_set(&g->cic_rm->sw_irq_stall_pending, value); nvgpu_atomic_set(&g->cic_rm->sw_irq_stall_pending, (int)value);
} }
void nvgpu_cic_rm_set_irq_nonstall(struct gk20a *g, u32 value) void nvgpu_cic_rm_set_irq_nonstall(struct gk20a *g, u32 value)
{ {
nvgpu_atomic_set(&g->cic_rm->sw_irq_nonstall_pending, value); nvgpu_atomic_set(&g->cic_rm->sw_irq_nonstall_pending, (int)value);
} }
int nvgpu_cic_rm_broadcast_last_irq_stall(struct gk20a *g) int nvgpu_cic_rm_broadcast_last_irq_stall(struct gk20a *g)

View File

@@ -1,7 +1,7 @@
/* /*
* Tegra GK20A GPU Debugger/Profiler Driver * Tegra GK20A GPU Debugger/Profiler Driver
* *
* Copyright (c) 2013-2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2013-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -161,7 +161,7 @@ u32 nvgpu_set_powergate_locked(struct dbg_session_gk20a *dbg_s,
* the global pg disabled refcount is zero * the global pg disabled refcount is zero
*/ */
if (g->dbg_powergating_disabled_refcount == 0) { if (g->dbg_powergating_disabled_refcount == 0) {
err = g->ops.debugger.dbg_set_powergate(dbg_s, err = (u32)g->ops.debugger.dbg_set_powergate(dbg_s,
mode); mode);
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -319,6 +319,8 @@ u32 nvgpu_device_get_copies(struct gk20a *g,
*/ */
bool nvgpu_device_is_ce(struct gk20a *g, const struct nvgpu_device *dev) bool nvgpu_device_is_ce(struct gk20a *g, const struct nvgpu_device *dev)
{ {
(void)g;
if (dev->type == NVGPU_DEVTYPE_COPY0 || if (dev->type == NVGPU_DEVTYPE_COPY0 ||
dev->type == NVGPU_DEVTYPE_COPY1 || dev->type == NVGPU_DEVTYPE_COPY1 ||
dev->type == NVGPU_DEVTYPE_COPY2 || dev->type == NVGPU_DEVTYPE_COPY2 ||
@@ -331,5 +333,7 @@ bool nvgpu_device_is_ce(struct gk20a *g, const struct nvgpu_device *dev)
bool nvgpu_device_is_graphics(struct gk20a *g, const struct nvgpu_device *dev) bool nvgpu_device_is_graphics(struct gk20a *g, const struct nvgpu_device *dev)
{ {
(void)g;
return dev->type == NVGPU_DEVTYPE_GRAPHICS; return dev->type == NVGPU_DEVTYPE_GRAPHICS;
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -23,6 +23,7 @@
#include <nvgpu/gk20a.h> #include <nvgpu/gk20a.h>
#include <nvgpu/gr/gr_ecc.h> #include <nvgpu/gr/gr_ecc.h>
#include <nvgpu/ltc.h> #include <nvgpu/ltc.h>
#include <nvgpu/string.h>
void nvgpu_ecc_stat_add(struct gk20a *g, struct nvgpu_ecc_stat *stat) void nvgpu_ecc_stat_add(struct gk20a *g, struct nvgpu_ecc_stat *stat)
{ {

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -70,6 +70,8 @@ static bool engine_fb_queue_has_room(struct nvgpu_engine_fb_queue *queue,
u32 next_head = 0; u32 next_head = 0;
int err = 0; int err = 0;
(void)size;
err = queue->head(queue, &head, QUEUE_GET); err = queue->head(queue, &head, QUEUE_GET);
if (err != 0) { if (err != 0) {
nvgpu_err(queue->g, "queue head GET failed"); nvgpu_err(queue->g, "queue head GET failed");
@@ -97,6 +99,9 @@ static int engine_fb_queue_write(struct nvgpu_engine_fb_queue *queue,
u32 entry_offset = 0U; u32 entry_offset = 0U;
int err = 0; int err = 0;
(void)src;
(void)size;
if (queue->fbq.work_buffer == NULL) { if (queue->fbq.work_buffer == NULL) {
nvgpu_err(g, "Invalid/Unallocated work buffer"); nvgpu_err(g, "Invalid/Unallocated work buffer");
err = -EINVAL; err = -EINVAL;

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -84,7 +84,7 @@ int nvgpu_fbp_init_support(struct gk20a *g)
/* get active L2 mask per FBP */ /* get active L2 mask per FBP */
for_each_set_bit(i, &fbp_en_mask_tmp, fbp->max_fbps_count) { for_each_set_bit(i, &fbp_en_mask_tmp, fbp->max_fbps_count) {
tmp = g->ops.fuse.fuse_status_opt_l2_fbp(g, i); tmp = g->ops.fuse.fuse_status_opt_l2_fbp(g, (u32)i);
fbp->fbp_l2_en_mask[i] = l2_all_en_mask ^ tmp; fbp->fbp_l2_en_mask[i] = l2_all_en_mask ^ tmp;
} }
#endif #endif

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2014-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -65,6 +65,7 @@ static bool nvgpu_fence_syncpt_is_expired(struct nvgpu_fence_type *f)
static void nvgpu_fence_syncpt_release(struct nvgpu_fence_type *f) static void nvgpu_fence_syncpt_release(struct nvgpu_fence_type *f)
{ {
(void)f;
} }
static const struct nvgpu_fence_ops nvgpu_fence_syncpt_ops = { static const struct nvgpu_fence_ops nvgpu_fence_syncpt_ops = {

View File

@@ -1,7 +1,7 @@
/* /*
* GK20A Graphics channel * GK20A Graphics channel
* *
* Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -65,6 +65,7 @@
#endif #endif
#include <nvgpu/job.h> #include <nvgpu/job.h>
#include <nvgpu/priv_cmdbuf.h> #include <nvgpu/priv_cmdbuf.h>
#include <nvgpu/string.h>
#include "channel_wdt.h" #include "channel_wdt.h"
#include "channel_worker.h" #include "channel_worker.h"
@@ -829,6 +830,8 @@ static void channel_free_invoke_deferred_engine_reset(struct nvgpu_channel *ch)
nvgpu_mutex_release(&g->fifo.engines_reset_mutex); nvgpu_mutex_release(&g->fifo.engines_reset_mutex);
} }
#else
(void)ch;
#endif #endif
} }
@@ -848,6 +851,8 @@ static void channel_free_invoke_sync_destroy(struct nvgpu_channel *ch)
ch->user_sync = NULL; ch->user_sync = NULL;
} }
nvgpu_mutex_release(&ch->sync_lock); nvgpu_mutex_release(&ch->sync_lock);
#else
(void)ch;
#endif #endif
} }
@@ -881,6 +886,8 @@ static void channel_free_unlink_debug_session(struct nvgpu_channel *ch)
} }
nvgpu_mutex_release(&g->dbg_sessions_lock); nvgpu_mutex_release(&g->dbg_sessions_lock);
#else
(void)ch;
#endif #endif
} }
@@ -1097,6 +1104,8 @@ static void channel_dump_ref_actions(struct nvgpu_channel *ch)
} }
nvgpu_spinlock_release(&ch->ref_actions_lock); nvgpu_spinlock_release(&ch->ref_actions_lock);
#else
(void)ch;
#endif #endif
} }
@@ -1158,6 +1167,8 @@ struct nvgpu_channel *nvgpu_channel_get__func(struct nvgpu_channel *ch,
if (ret != NULL) { if (ret != NULL) {
trace_nvgpu_channel_get(ch->chid, caller); trace_nvgpu_channel_get(ch->chid, caller);
} }
#else
(void)caller;
#endif #endif
return ret; return ret;
@@ -1170,6 +1181,8 @@ void nvgpu_channel_put__func(struct nvgpu_channel *ch, const char *caller)
#endif #endif
#ifdef CONFIG_NVGPU_TRACE #ifdef CONFIG_NVGPU_TRACE
trace_nvgpu_channel_put(ch->chid, caller); trace_nvgpu_channel_put(ch->chid, caller);
#else
(void)caller;
#endif #endif
nvgpu_atomic_dec(&ch->ref_count); nvgpu_atomic_dec(&ch->ref_count);
if (nvgpu_cond_broadcast(&ch->ref_count_dec_wq) != 0) { if (nvgpu_cond_broadcast(&ch->ref_count_dec_wq) != 0) {
@@ -1962,6 +1975,8 @@ static void nvgpu_channel_semaphore_signal(struct nvgpu_channel *c,
{ {
struct gk20a *g = c->g; struct gk20a *g = c->g;
(void)post_events;
if (nvgpu_cond_broadcast_interruptible( &c->semaphore_wq) != 0) { if (nvgpu_cond_broadcast_interruptible( &c->semaphore_wq) != 0) {
nvgpu_warn(g, "failed to broadcast"); nvgpu_warn(g, "failed to broadcast");
} }
@@ -2091,6 +2106,10 @@ static void nvgpu_channel_sync_debug_dump(struct gk20a *g,
info->inst.semaphored); info->inst.semaphored);
g->ops.pbdma.syncpt_debug_dump(g, o, info); g->ops.pbdma.syncpt_debug_dump(g, o, info);
#else
(void)g;
(void)o;
(void)info;
#endif #endif
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2015-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2015-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -36,7 +36,10 @@ void nvgpu_channel_worker_poll_wakeup_post_process_item(
u32 nvgpu_channel_worker_poll_wakeup_condition_get_timeout( u32 nvgpu_channel_worker_poll_wakeup_condition_get_timeout(
struct nvgpu_worker *worker); struct nvgpu_worker *worker);
#else #else
static inline void nvgpu_channel_launch_wdt(struct nvgpu_channel *ch) {} static inline void nvgpu_channel_launch_wdt(struct nvgpu_channel *ch)
{
(void)ch;
}
#endif /* CONFIG_NVGPU_CHANNEL_WDT */ #endif /* CONFIG_NVGPU_CHANNEL_WDT */
#endif /* NVGPU_COMMON_FIFO_CHANNEL_WDT_H */ #endif /* NVGPU_COMMON_FIFO_CHANNEL_WDT_H */

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2011-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -29,13 +29,7 @@
#include <nvgpu/job.h> #include <nvgpu/job.h>
#include <nvgpu/priv_cmdbuf.h> #include <nvgpu/priv_cmdbuf.h>
#include <nvgpu/fence.h> #include <nvgpu/fence.h>
#include <nvgpu/string.h>
static inline struct nvgpu_channel_job *
channel_gk20a_job_from_list(struct nvgpu_list_node *node)
{
return (struct nvgpu_channel_job *)
((uintptr_t)node - offsetof(struct nvgpu_channel_job, list));
};
int nvgpu_channel_alloc_job(struct nvgpu_channel *c, int nvgpu_channel_alloc_job(struct nvgpu_channel *c,
struct nvgpu_channel_job **job_out) struct nvgpu_channel_job **job_out)
@@ -58,6 +52,8 @@ int nvgpu_channel_alloc_job(struct nvgpu_channel *c,
void nvgpu_channel_free_job(struct nvgpu_channel *c, void nvgpu_channel_free_job(struct nvgpu_channel *c,
struct nvgpu_channel_job *job) struct nvgpu_channel_job *job)
{ {
(void)c;
(void)job;
/* /*
* Nothing needed for now. The job contents are preallocated. The * Nothing needed for now. The job contents are preallocated. The
* completion fence may briefly outlive the job, but the job memory is * completion fence may briefly outlive the job, but the job memory is
@@ -88,6 +84,7 @@ struct nvgpu_channel_job *nvgpu_channel_joblist_peek(struct nvgpu_channel *c)
void nvgpu_channel_joblist_add(struct nvgpu_channel *c, void nvgpu_channel_joblist_add(struct nvgpu_channel *c,
struct nvgpu_channel_job *job) struct nvgpu_channel_job *job)
{ {
(void)job;
c->joblist.pre_alloc.put = (c->joblist.pre_alloc.put + 1U) % c->joblist.pre_alloc.put = (c->joblist.pre_alloc.put + 1U) %
(c->joblist.pre_alloc.length); (c->joblist.pre_alloc.length);
} }
@@ -95,6 +92,7 @@ void nvgpu_channel_joblist_add(struct nvgpu_channel *c,
void nvgpu_channel_joblist_delete(struct nvgpu_channel *c, void nvgpu_channel_joblist_delete(struct nvgpu_channel *c,
struct nvgpu_channel_job *job) struct nvgpu_channel_job *job)
{ {
(void)job;
c->joblist.pre_alloc.get = (c->joblist.pre_alloc.get + 1U) % c->joblist.pre_alloc.get = (c->joblist.pre_alloc.get + 1U) %
(c->joblist.pre_alloc.length); (c->joblist.pre_alloc.length);
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2011-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -51,5 +51,6 @@ int nvgpu_pbdma_setup_sw(struct gk20a *g)
void nvgpu_pbdma_cleanup_sw(struct gk20a *g) void nvgpu_pbdma_cleanup_sw(struct gk20a *g)
{ {
(void)g;
return; return;
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2011-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -32,6 +32,7 @@
#include <nvgpu/gk20a.h> #include <nvgpu/gk20a.h>
#include <nvgpu/trace.h> #include <nvgpu/trace.h>
#include <nvgpu/circ_buf.h> #include <nvgpu/circ_buf.h>
#include <nvgpu/string.h>
struct priv_cmd_entry { struct priv_cmd_entry {
struct nvgpu_mem *mem; struct nvgpu_mem *mem;
@@ -313,6 +314,8 @@ void nvgpu_priv_cmdbuf_append_zeros(struct gk20a *g, struct priv_cmd_entry *e,
void nvgpu_priv_cmdbuf_finish(struct gk20a *g, struct priv_cmd_entry *e, void nvgpu_priv_cmdbuf_finish(struct gk20a *g, struct priv_cmd_entry *e,
u64 *gva, u32 *size) u64 *gva, u32 *size)
{ {
(void)g;
/* /*
* The size is written to the pushbuf entry, so make sure this buffer * The size is written to the pushbuf entry, so make sure this buffer
* is complete at this point. The responsibility of the channel sync is * is complete at this point. The responsibility of the channel sync is

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -919,6 +919,8 @@ static u32 nvgpu_runlist_get_pbdma_mask(struct gk20a *g,
u32 i; u32 i;
u32 pbdma_id; u32 pbdma_id;
(void)g;
nvgpu_assert(runlist != NULL); nvgpu_assert(runlist != NULL);
for ( i = 0U; i < PBDMA_PER_RUNLIST_SIZE; i++) { for ( i = 0U; i < PBDMA_PER_RUNLIST_SIZE; i++) {
@@ -1019,12 +1021,12 @@ static struct nvgpu_runlist_domain *nvgpu_runlist_domain_alloc(struct gk20a *g,
(void)strncpy(domain->name, name, sizeof(domain->name) - 1U); (void)strncpy(domain->name, name, sizeof(domain->name) - 1U);
domain->mem = init_rl_mem(g, runlist_size); domain->mem = init_rl_mem(g, (u32)runlist_size);
if (domain->mem == NULL) { if (domain->mem == NULL) {
goto free_domain; goto free_domain;
} }
domain->mem_hw = init_rl_mem(g, runlist_size); domain->mem_hw = init_rl_mem(g, (u32)runlist_size);
if (domain->mem_hw == NULL) { if (domain->mem_hw == NULL) {
goto free_mem; goto free_mem;
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -284,11 +284,11 @@ static int nvgpu_submit_append_gpfifo(struct nvgpu_channel *c,
{ {
int err; int err;
if ((kern_gpfifo == NULL)
#ifdef CONFIG_NVGPU_DGPU #ifdef CONFIG_NVGPU_DGPU
&& (c->gpfifo.pipe == NULL) if ((kern_gpfifo == NULL) && (c->gpfifo.pipe == NULL)) {
#else
if (kern_gpfifo == NULL) {
#endif #endif
) {
/* /*
* This path (from userspace to sysmem) is special in order to * This path (from userspace to sysmem) is special in order to
* avoid two copies unnecessarily (from user to pipe, then from * avoid two copies unnecessarily (from user to pipe, then from

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2014-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2014-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -536,6 +536,8 @@ void nvgpu_tsg_set_unserviceable(struct gk20a *g,
{ {
struct nvgpu_channel *ch = NULL; struct nvgpu_channel *ch = NULL;
(void)g;
nvgpu_rwsem_down_read(&tsg->ch_list_lock); nvgpu_rwsem_down_read(&tsg->ch_list_lock);
nvgpu_list_for_each_entry(ch, &tsg->ch_list, nvgpu_channel, ch_entry) { nvgpu_list_for_each_entry(ch, &tsg->ch_list, nvgpu_channel, ch_entry) {
if (nvgpu_channel_get(ch) != NULL) { if (nvgpu_channel_get(ch) != NULL) {
@@ -798,6 +800,7 @@ int nvgpu_tsg_set_long_timeslice(struct nvgpu_tsg *tsg, u32 timeslice_us)
u32 nvgpu_tsg_default_timeslice_us(struct gk20a *g) u32 nvgpu_tsg_default_timeslice_us(struct gk20a *g)
{ {
(void)g;
return NVGPU_TSG_TIMESLICE_DEFAULT_US; return NVGPU_TSG_TIMESLICE_DEFAULT_US;
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -28,6 +28,7 @@
#include <nvgpu/io.h> #include <nvgpu/io.h>
#include <nvgpu/gmmu.h> #include <nvgpu/gmmu.h>
#include <nvgpu/dma.h> #include <nvgpu/dma.h>
#include <nvgpu/string.h>
#include <nvgpu/power_features/pg.h> #include <nvgpu/power_features/pg.h>
#include "common/gr/ctx_priv.h" #include "common/gr/ctx_priv.h"
@@ -162,6 +163,8 @@ void nvgpu_gr_ctx_free_patch_ctx(struct gk20a *g, struct vm_gk20a *vm,
{ {
struct patch_desc *patch_ctx = &gr_ctx->patch_ctx; struct patch_desc *patch_ctx = &gr_ctx->patch_ctx;
(void)g;
if (nvgpu_mem_is_valid(&patch_ctx->mem)) { if (nvgpu_mem_is_valid(&patch_ctx->mem)) {
nvgpu_dma_unmap_free(vm, &patch_ctx->mem); nvgpu_dma_unmap_free(vm, &patch_ctx->mem);
patch_ctx->data_count = 0; patch_ctx->data_count = 0;
@@ -201,6 +204,9 @@ static int nvgpu_gr_ctx_map_ctx_circular_buffer(struct gk20a *g,
u32 *g_bfr_index; u32 *g_bfr_index;
u64 gpu_va = 0ULL; u64 gpu_va = 0ULL;
(void)g;
(void)vpr;
g_bfr_va = &gr_ctx->global_ctx_buffer_va[0]; g_bfr_va = &gr_ctx->global_ctx_buffer_va[0];
g_bfr_index = &gr_ctx->global_ctx_buffer_index[0]; g_bfr_index = &gr_ctx->global_ctx_buffer_index[0];
@@ -242,6 +248,9 @@ static int nvgpu_gr_ctx_map_ctx_attribute_buffer(struct gk20a *g,
u32 *g_bfr_index; u32 *g_bfr_index;
u64 gpu_va = 0ULL; u64 gpu_va = 0ULL;
(void)g;
(void)vpr;
g_bfr_va = &gr_ctx->global_ctx_buffer_va[0]; g_bfr_va = &gr_ctx->global_ctx_buffer_va[0];
g_bfr_index = &gr_ctx->global_ctx_buffer_index[0]; g_bfr_index = &gr_ctx->global_ctx_buffer_index[0];
@@ -284,6 +293,9 @@ static int nvgpu_gr_ctx_map_ctx_pagepool_buffer(struct gk20a *g,
u32 *g_bfr_index; u32 *g_bfr_index;
u64 gpu_va = 0ULL; u64 gpu_va = 0ULL;
(void)g;
(void)vpr;
g_bfr_va = &gr_ctx->global_ctx_buffer_va[0]; g_bfr_va = &gr_ctx->global_ctx_buffer_va[0];
g_bfr_index = &gr_ctx->global_ctx_buffer_index[0]; g_bfr_index = &gr_ctx->global_ctx_buffer_index[0];
@@ -326,6 +338,8 @@ static int nvgpu_gr_ctx_map_ctx_buffer(struct gk20a *g,
u32 *g_bfr_index; u32 *g_bfr_index;
u64 gpu_va = 0ULL; u64 gpu_va = 0ULL;
(void)g;
g_bfr_va = &gr_ctx->global_ctx_buffer_va[0]; g_bfr_va = &gr_ctx->global_ctx_buffer_va[0];
g_bfr_index = &gr_ctx->global_ctx_buffer_index[0]; g_bfr_index = &gr_ctx->global_ctx_buffer_index[0];
@@ -479,6 +493,8 @@ void nvgpu_gr_ctx_load_golden_ctx_image(struct gk20a *g,
u64 virt_addr = 0; u64 virt_addr = 0;
#endif #endif
(void)cde;
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gr, " "); nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gr, " ");
mem = &gr_ctx->mem; mem = &gr_ctx->mem;
@@ -724,6 +740,8 @@ void nvgpu_gr_ctx_set_zcull_ctx(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx,
{ {
struct zcull_ctx_desc *zcull_ctx = &gr_ctx->zcull_ctx; struct zcull_ctx_desc *zcull_ctx = &gr_ctx->zcull_ctx;
(void)g;
zcull_ctx->ctx_sw_mode = mode; zcull_ctx->ctx_sw_mode = mode;
zcull_ctx->gpu_va = gpu_va; zcull_ctx->gpu_va = gpu_va;
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -135,7 +135,7 @@ int nvgpu_gr_fecs_trace_init(struct gk20a *g)
{ {
struct nvgpu_gr_fecs_trace *trace; struct nvgpu_gr_fecs_trace *trace;
if (!is_power_of_2(GK20A_FECS_TRACE_NUM_RECORDS)) { if (!is_power_of_2((u32)GK20A_FECS_TRACE_NUM_RECORDS)) {
nvgpu_err(g, "invalid NUM_RECORDS chosen"); nvgpu_err(g, "invalid NUM_RECORDS chosen");
nvgpu_set_enabled(g, NVGPU_SUPPORT_FECS_CTXSW_TRACE, false); nvgpu_set_enabled(g, NVGPU_SUPPORT_FECS_CTXSW_TRACE, false);
return -EINVAL; return -EINVAL;
@@ -189,8 +189,8 @@ int nvgpu_gr_fecs_trace_deinit(struct gk20a *g)
int nvgpu_gr_fecs_trace_num_ts(struct gk20a *g) int nvgpu_gr_fecs_trace_num_ts(struct gk20a *g)
{ {
return (g->ops.gr.ctxsw_prog.hw_get_ts_record_size_in_bytes() return (int)((g->ops.gr.ctxsw_prog.hw_get_ts_record_size_in_bytes()
- sizeof(struct nvgpu_fecs_trace_record)) / sizeof(u64); - sizeof(struct nvgpu_fecs_trace_record)) / sizeof(u64));
} }
struct nvgpu_fecs_trace_record *nvgpu_gr_fecs_trace_get_record( struct nvgpu_fecs_trace_record *nvgpu_gr_fecs_trace_get_record(
@@ -207,7 +207,7 @@ struct nvgpu_fecs_trace_record *nvgpu_gr_fecs_trace_get_record(
return (struct nvgpu_fecs_trace_record *) return (struct nvgpu_fecs_trace_record *)
((u8 *) mem->cpu_va + ((u8 *) mem->cpu_va +
(idx * g->ops.gr.ctxsw_prog.hw_get_ts_record_size_in_bytes())); ((u32)idx * g->ops.gr.ctxsw_prog.hw_get_ts_record_size_in_bytes()));
} }
bool nvgpu_gr_fecs_trace_is_valid_record(struct gk20a *g, bool nvgpu_gr_fecs_trace_is_valid_record(struct gk20a *g,
@@ -262,8 +262,8 @@ int nvgpu_gr_fecs_trace_enable(struct gk20a *g)
* (Bit 31:31) should be set to 1. Bits 30:0 represents * (Bit 31:31) should be set to 1. Bits 30:0 represents
* actual pointer value. * actual pointer value.
*/ */
write = write | write = (int)((u32)write |
(BIT32(NVGPU_FECS_TRACE_FEATURE_CONTROL_BIT)); (BIT32(NVGPU_FECS_TRACE_FEATURE_CONTROL_BIT)));
} }
g->ops.gr.fecs_trace.set_read_index(g, write); g->ops.gr.fecs_trace.set_read_index(g, write);
@@ -315,8 +315,8 @@ int nvgpu_gr_fecs_trace_disable(struct gk20a *g)
* For disabling FECS trace support, MAILBOX1's MSB * For disabling FECS trace support, MAILBOX1's MSB
* (Bit 31:31) should be set to 0. * (Bit 31:31) should be set to 0.
*/ */
read = g->ops.gr.fecs_trace.get_read_index(g) & read = (int)((u32)(g->ops.gr.fecs_trace.get_read_index(g)) &
(~(BIT32(NVGPU_FECS_TRACE_FEATURE_CONTROL_BIT))); (~(BIT32(NVGPU_FECS_TRACE_FEATURE_CONTROL_BIT))));
g->ops.gr.fecs_trace.set_read_index(g, read); g->ops.gr.fecs_trace.set_read_index(g, read);
@@ -420,7 +420,7 @@ int nvgpu_gr_fecs_trace_ring_read(struct gk20a *g, int index,
/* break out FECS record into trace events */ /* break out FECS record into trace events */
for (i = 0; i < nvgpu_gr_fecs_trace_num_ts(g); i++) { for (i = 0; i < nvgpu_gr_fecs_trace_num_ts(g); i++) {
entry.tag = g->ops.gr.ctxsw_prog.hw_get_ts_tag(r->ts[i]); entry.tag = (u8)g->ops.gr.ctxsw_prog.hw_get_ts_tag(r->ts[i]);
entry.timestamp = entry.timestamp =
g->ops.gr.ctxsw_prog.hw_record_ts_timestamp(r->ts[i]); g->ops.gr.ctxsw_prog.hw_record_ts_timestamp(r->ts[i]);
entry.timestamp <<= GK20A_FECS_TRACE_PTIMER_SHIFT; entry.timestamp <<= GK20A_FECS_TRACE_PTIMER_SHIFT;
@@ -434,8 +434,8 @@ int nvgpu_gr_fecs_trace_ring_read(struct gk20a *g, int index,
case NVGPU_GPU_CTXSW_TAG_RESTORE_START: case NVGPU_GPU_CTXSW_TAG_RESTORE_START:
case NVGPU_GPU_CTXSW_TAG_CONTEXT_START: case NVGPU_GPU_CTXSW_TAG_CONTEXT_START:
entry.context_id = r->new_context_id; entry.context_id = r->new_context_id;
entry.pid = new_pid; entry.pid = (u64)new_pid;
entry.vmid = new_vmid; entry.vmid = (u8)new_vmid;
break; break;
case NVGPU_GPU_CTXSW_TAG_CTXSW_REQ_BY_HOST: case NVGPU_GPU_CTXSW_TAG_CTXSW_REQ_BY_HOST:
@@ -446,8 +446,8 @@ int nvgpu_gr_fecs_trace_ring_read(struct gk20a *g, int index,
case NVGPU_GPU_CTXSW_TAG_FE_ACK_CILP: case NVGPU_GPU_CTXSW_TAG_FE_ACK_CILP:
case NVGPU_GPU_CTXSW_TAG_SAVE_END: case NVGPU_GPU_CTXSW_TAG_SAVE_END:
entry.context_id = r->context_id; entry.context_id = r->context_id;
entry.pid = cur_pid; entry.pid = (u64)cur_pid;
entry.vmid = cur_vmid; entry.vmid = (u8)cur_vmid;
break; break;
default: default:
@@ -474,7 +474,7 @@ int nvgpu_gr_fecs_trace_ring_read(struct gk20a *g, int index,
count++; count++;
} }
nvgpu_gr_fecs_trace_wake_up(g, vmid); nvgpu_gr_fecs_trace_wake_up(g, (int)vmid);
return count; return count;
} }
@@ -524,7 +524,7 @@ int nvgpu_gr_fecs_trace_poll(struct gk20a *g)
if (nvgpu_is_enabled(g, NVGPU_FECS_TRACE_FEATURE_CONTROL)) { if (nvgpu_is_enabled(g, NVGPU_FECS_TRACE_FEATURE_CONTROL)) {
/* Bits 30:0 of MAILBOX1 represents actual read pointer value */ /* Bits 30:0 of MAILBOX1 represents actual read pointer value */
read = read & (~(BIT32(NVGPU_FECS_TRACE_FEATURE_CONTROL_BIT))); read = ((u32)read) & (~(BIT32(NVGPU_FECS_TRACE_FEATURE_CONTROL_BIT)));
} }
while (read != write) { while (read != write) {
@@ -543,7 +543,7 @@ int nvgpu_gr_fecs_trace_poll(struct gk20a *g)
* So, MSB of read pointer should be set back to 1. This will * So, MSB of read pointer should be set back to 1. This will
* keep FECS trace enabled. * keep FECS trace enabled.
*/ */
read = read | (BIT32(NVGPU_FECS_TRACE_FEATURE_CONTROL_BIT)); read = (int)(((u32)read) | (BIT32(NVGPU_FECS_TRACE_FEATURE_CONTROL_BIT)));
} }
/* ensure FECS records has been updated before incrementing read index */ /* ensure FECS records has been updated before incrementing read index */

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -390,6 +390,7 @@ void nvgpu_gr_global_ctx_init_local_golden_image(struct gk20a *g,
struct nvgpu_gr_global_ctx_local_golden_image *local_golden_image, struct nvgpu_gr_global_ctx_local_golden_image *local_golden_image,
struct nvgpu_mem *source_mem, size_t size) struct nvgpu_mem *source_mem, size_t size)
{ {
(void)size;
nvgpu_mem_rd_n(g, source_mem, 0, local_golden_image->context, nvgpu_mem_rd_n(g, source_mem, 0, local_golden_image->context,
nvgpu_safe_cast_u64_to_u32(local_golden_image->size)); nvgpu_safe_cast_u64_to_u32(local_golden_image->size));
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -119,7 +119,7 @@ static int gr_alloc_global_ctx_buffers(struct gk20a *g, struct nvgpu_gr *gr)
NVGPU_GR_GLOBAL_CTX_PRIV_ACCESS_MAP, size); NVGPU_GR_GLOBAL_CTX_PRIV_ACCESS_MAP, size);
#ifdef CONFIG_NVGPU_FECS_TRACE #ifdef CONFIG_NVGPU_FECS_TRACE
size = nvgpu_gr_fecs_trace_buffer_size(g); size = (u32)nvgpu_gr_fecs_trace_buffer_size(g);
nvgpu_log(g, gpu_dbg_info | gpu_dbg_gr, "fecs_trace_buffer_size : %d", size); nvgpu_log(g, gpu_dbg_info | gpu_dbg_gr, "fecs_trace_buffer_size : %d", size);
nvgpu_gr_global_ctx_set_size(gr->global_ctx_buffer, nvgpu_gr_global_ctx_set_size(gr->global_ctx_buffer,

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -159,6 +159,8 @@ static void gr_config_set_gpc_mask(struct gk20a *g,
if (g->ops.gr.config.get_gpc_mask != NULL) { if (g->ops.gr.config.get_gpc_mask != NULL) {
config->gpc_mask = g->ops.gr.config.get_gpc_mask(g); config->gpc_mask = g->ops.gr.config.get_gpc_mask(g);
} else } else
#else
(void)g;
#endif #endif
{ {
config->gpc_mask = nvgpu_safe_sub_u32(BIT32(config->gpc_count), config->gpc_mask = nvgpu_safe_sub_u32(BIT32(config->gpc_count),

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -31,6 +31,7 @@
#if defined(CONFIG_NVGPU_CYCLESTATS) #if defined(CONFIG_NVGPU_CYCLESTATS)
#include <nvgpu/cyclestats.h> #include <nvgpu/cyclestats.h>
#endif #endif
#include <nvgpu/string.h>
#include <nvgpu/gr/gr.h> #include <nvgpu/gr/gr.h>
#include <nvgpu/gr/gr_intr.h> #include <nvgpu/gr/gr_intr.h>
@@ -437,6 +438,8 @@ int nvgpu_gr_intr_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc, u32 sm,
bool disable_sm_exceptions = true; bool disable_sm_exceptions = true;
#endif #endif
(void)post_event;
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " "); nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
global_esr = g->ops.gr.intr.get_sm_hww_global_esr(g, gpc, tpc, sm); global_esr = g->ops.gr.intr.get_sm_hww_global_esr(g, gpc, tpc, sm);
@@ -525,6 +528,8 @@ int nvgpu_gr_intr_handle_fecs_error(struct gk20a *g, struct nvgpu_channel *ch,
u32 mailbox_id = NVGPU_GR_FALCON_FECS_CTXSW_MAILBOX6; u32 mailbox_id = NVGPU_GR_FALCON_FECS_CTXSW_MAILBOX6;
struct nvgpu_fecs_host_intr_status *fecs_host_intr; struct nvgpu_fecs_host_intr_status *fecs_host_intr;
(void)ch;
gr_fecs_intr = isr_data->fecs_intr; gr_fecs_intr = isr_data->fecs_intr;
if (gr_fecs_intr == 0U) { if (gr_fecs_intr == 0U) {
return 0; return 0;

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -143,6 +143,10 @@ static int nvgpu_gr_obj_ctx_set_graphics_preemption_mode(struct gk20a *g,
{ {
int err = 0; int err = 0;
(void)config;
(void)gr_ctx_desc;
(void)vm;
/* set preemption modes */ /* set preemption modes */
switch (graphics_preempt_mode) { switch (graphics_preempt_mode) {
#ifdef CONFIG_NVGPU_GFXP #ifdef CONFIG_NVGPU_GFXP
@@ -263,6 +267,9 @@ void nvgpu_gr_obj_ctx_update_ctxsw_preemption_mode(struct gk20a *g,
struct nvgpu_mem *mem; struct nvgpu_mem *mem;
#endif #endif
(void)config;
(void)subctx;
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gr, " "); nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gr, " ");
nvgpu_gr_ctx_set_preemption_modes(g, gr_ctx); nvgpu_gr_ctx_set_preemption_modes(g, gr_ctx);
@@ -802,6 +809,9 @@ int nvgpu_gr_obj_ctx_alloc(struct gk20a *g,
{ {
int err = 0; int err = 0;
(void)class_num;
(void)flags;
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gr, " "); nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gr, " ");
err = nvgpu_gr_obj_ctx_gr_ctx_alloc(g, golden_image, gr_ctx_desc, err = nvgpu_gr_obj_ctx_gr_ctx_alloc(g, golden_image, gr_ctx_desc,

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -412,6 +412,8 @@ static void nvgpu_gr_zbc_load_default_sw_stencil_table(struct gk20a *g,
{ {
u32 index = zbc->min_stencil_index; u32 index = zbc->min_stencil_index;
(void)g;
zbc->zbc_s_tbl[index].stencil = 0x0; zbc->zbc_s_tbl[index].stencil = 0x0;
zbc->zbc_s_tbl[index].format = GR_ZBC_STENCIL_CLEAR_FMT_U8; zbc->zbc_s_tbl[index].format = GR_ZBC_STENCIL_CLEAR_FMT_U8;
zbc->zbc_s_tbl[index].ref_cnt = zbc->zbc_s_tbl[index].ref_cnt =
@@ -437,6 +439,8 @@ static void nvgpu_gr_zbc_load_default_sw_depth_table(struct gk20a *g,
{ {
u32 index = zbc->min_depth_index; u32 index = zbc->min_depth_index;
(void)g;
zbc->zbc_dep_tbl[index].format = GR_ZBC_Z_FMT_VAL_FP32; zbc->zbc_dep_tbl[index].format = GR_ZBC_Z_FMT_VAL_FP32;
zbc->zbc_dep_tbl[index].depth = 0x3f800000; zbc->zbc_dep_tbl[index].depth = 0x3f800000;
zbc->zbc_dep_tbl[index].ref_cnt = zbc->zbc_dep_tbl[index].ref_cnt =
@@ -457,6 +461,8 @@ static void nvgpu_gr_zbc_load_default_sw_color_table(struct gk20a *g,
u32 i; u32 i;
u32 index = zbc->min_color_index; u32 index = zbc->min_color_index;
(void)g;
/* Opaque black (i.e. solid black, fmt 0x28 = A8B8G8R8) */ /* Opaque black (i.e. solid black, fmt 0x28 = A8B8G8R8) */
zbc->zbc_col_tbl[index].format = GR_ZBC_SOLID_BLACK_COLOR_FMT; zbc->zbc_col_tbl[index].format = GR_ZBC_SOLID_BLACK_COLOR_FMT;
for (i = 0U; i < NVGPU_GR_ZBC_COLOR_VALUE_SIZE; i++) { for (i = 0U; i < NVGPU_GR_ZBC_COLOR_VALUE_SIZE; i++) {

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -82,6 +82,8 @@ void nvgpu_gr_zcull_deinit(struct gk20a *g, struct nvgpu_gr_zcull *gr_zcull)
u32 nvgpu_gr_get_ctxsw_zcull_size(struct gk20a *g, u32 nvgpu_gr_get_ctxsw_zcull_size(struct gk20a *g,
struct nvgpu_gr_zcull *gr_zcull) struct nvgpu_gr_zcull *gr_zcull)
{ {
(void)g;
/* assuming zcull has already been initialized */ /* assuming zcull has already been initialized */
return gr_zcull->zcull_ctxsw_image_size; return gr_zcull->zcull_ctxsw_image_size;
} }

View File

@@ -1,7 +1,7 @@
/* /*
* GR MANAGER * GR MANAGER
* *
* Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -93,7 +93,7 @@ int nvgpu_init_gr_manager(struct gk20a *g)
for (gpc_id = 0U; gpc_id < gr_syspipe->num_gpc; gpc_id++) { for (gpc_id = 0U; gpc_id < gr_syspipe->num_gpc; gpc_id++) {
gr_syspipe->gpcs[gpc_id].logical_id = gpc_id; gr_syspipe->gpcs[gpc_id].logical_id = gpc_id;
nvgpu_assert(local_gpc_mask != 0U); nvgpu_assert(local_gpc_mask != 0U);
ffs_bit = nvgpu_ffs(local_gpc_mask) - 1U; ffs_bit = (u32)(nvgpu_ffs(local_gpc_mask) - 1U);
local_gpc_mask &= ~(1U << ffs_bit); local_gpc_mask &= ~(1U << ffs_bit);
gr_syspipe->gpcs[gpc_id].physical_id = ffs_bit; gr_syspipe->gpcs[gpc_id].physical_id = ffs_bit;
gr_syspipe->gpcs[gpc_id].gpcgrp_id = 0U; gr_syspipe->gpcs[gpc_id].gpcgrp_id = 0U;
@@ -391,6 +391,10 @@ int nvgpu_grmgr_config_gr_remap_window(struct gk20a *g,
g->mig.cur_tid, g->mig.current_gr_syspipe_id, g->mig.cur_tid, g->mig.current_gr_syspipe_id,
gr_syspipe_id, enable, g->mig.recursive_ref_count); gr_syspipe_id, enable, g->mig.recursive_ref_count);
} }
#else
(void)g;
(void)gr_syspipe_id;
(void)enable;
#endif #endif
return err; return err;
} }

View File

@@ -529,6 +529,7 @@ static int nvgpu_init_boot_clk_or_clk_arb(struct gk20a *g)
{ {
int err = 0; int err = 0;
(void)g;
#ifdef CONFIG_NVGPU_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
if (nvgpu_is_enabled(g, NVGPU_PMU_PSTATE) && if (nvgpu_is_enabled(g, NVGPU_PMU_PSTATE) &&
(g->pmu->fw->ops.clk.clk_set_boot_clk != NULL)) { (g->pmu->fw->ops.clk.clk_set_boot_clk != NULL)) {
@@ -566,6 +567,7 @@ static int nvgpu_init_per_device_identifier(struct gk20a *g)
static int nvgpu_init_set_debugger_mode(struct gk20a *g) static int nvgpu_init_set_debugger_mode(struct gk20a *g)
{ {
(void)g;
#ifdef CONFIG_NVGPU_DEBUGGER #ifdef CONFIG_NVGPU_DEBUGGER
/* Restore the debug setting */ /* Restore the debug setting */
g->ops.fb.set_debug_mode(g, g->mmu_debug_ctrl); g->ops.fb.set_debug_mode(g, g->mmu_debug_ctrl);
@@ -603,6 +605,8 @@ static int nvgpu_init_xve_set_speed(struct gk20a *g)
return err; return err;
} }
} }
#else
(void)g;
#endif #endif
return 0; return 0;
} }

View File

@@ -67,6 +67,8 @@ static u64 nvgpu_bitmap_balloc_fixed(struct nvgpu_allocator *na,
struct nvgpu_bitmap_allocator *a = bitmap_allocator(na); struct nvgpu_bitmap_allocator *a = bitmap_allocator(na);
u64 blks, offs, ret; u64 blks, offs, ret;
(void)page_size;
/* Compute the bit offset and make sure it's aligned to a block. */ /* Compute the bit offset and make sure it's aligned to a block. */
offs = base >> a->blk_shift; offs = base >> a->blk_shift;
if (nvgpu_safe_mult_u64(offs, a->blk_size) != base) { if (nvgpu_safe_mult_u64(offs, a->blk_size) != base) {

View File

@@ -28,6 +28,7 @@
#include <nvgpu/mm.h> #include <nvgpu/mm.h>
#include <nvgpu/vm.h> #include <nvgpu/vm.h>
#include <nvgpu/static_analysis.h> #include <nvgpu/static_analysis.h>
#include <nvgpu/string.h>
#include "buddy_allocator_priv.h" #include "buddy_allocator_priv.h"

View File

@@ -1,7 +1,7 @@
/* /*
* gk20a allocator * gk20a allocator
* *
* Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -25,6 +25,7 @@
#include <nvgpu/allocator.h> #include <nvgpu/allocator.h>
#include <nvgpu/gk20a.h> #include <nvgpu/gk20a.h>
#include <nvgpu/string.h>
u64 nvgpu_alloc_length(struct nvgpu_allocator *a) u64 nvgpu_alloc_length(struct nvgpu_allocator *a)
{ {

View File

@@ -940,6 +940,15 @@ static void nvgpu_gmmu_update_page_table_dbg_print(struct gk20a *g,
attrs->priv ? 'P' : '-', attrs->priv ? 'P' : '-',
attrs->valid ? 'V' : '-', attrs->valid ? 'V' : '-',
attrs->platform_atomic ? 'A' : '-'); attrs->platform_atomic ? 'A' : '-');
#else
(void)g;
(void)attrs;
(void)vm;
(void)sgt;
(void)space_to_skip;
(void)virt_addr;
(void)length;
(void)page_size;
#endif /* CONFIG_NVGPU_TRACE */ #endif /* CONFIG_NVGPU_TRACE */
} }
@@ -1077,6 +1086,8 @@ u64 nvgpu_gmmu_map_locked(struct vm_gk20a *vm,
attrs.l3_alloc = false; attrs.l3_alloc = false;
} }
#endif #endif
(void)clear_ctags;
(void)ctag_offset;
/* /*
* Only allocate a new GPU VA range if we haven't already been passed a * Only allocate a new GPU VA range if we haven't already been passed a
@@ -1171,6 +1182,8 @@ void nvgpu_gmmu_unmap_locked(struct vm_gk20a *vm,
struct gk20a *g = gk20a_from_vm(vm); struct gk20a *g = gk20a_from_vm(vm);
struct nvgpu_gmmu_attrs attrs = gmmu_unmap_attrs(pgsz_idx); struct nvgpu_gmmu_attrs attrs = gmmu_unmap_attrs(pgsz_idx);
(void)rw_flag;
attrs.sparse = sparse; attrs.sparse = sparse;
if (va_allocated) { if (va_allocated) {

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -30,6 +30,7 @@
#include <nvgpu/gk20a.h> #include <nvgpu/gk20a.h>
#include <nvgpu/enabled.h> #include <nvgpu/enabled.h>
#include <nvgpu/static_analysis.h> #include <nvgpu/static_analysis.h>
#include <nvgpu/string.h>
#include "pd_cache_priv.h" #include "pd_cache_priv.h"

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -332,12 +332,16 @@ static u64 nvgpu_mem_phys_sgl_phys(struct gk20a *g, void *sgl)
{ {
struct nvgpu_mem_sgl *sgl_impl = (struct nvgpu_mem_sgl *)sgl; struct nvgpu_mem_sgl *sgl_impl = (struct nvgpu_mem_sgl *)sgl;
(void)g;
return sgl_impl->phys; return sgl_impl->phys;
} }
static u64 nvgpu_mem_phys_sgl_ipa_to_pa(struct gk20a *g, static u64 nvgpu_mem_phys_sgl_ipa_to_pa(struct gk20a *g,
void *sgl, u64 ipa, u64 *pa_len) void *sgl, u64 ipa, u64 *pa_len)
{ {
(void)g;
(void)sgl;
(void)pa_len;
return ipa; return ipa;
} }
@@ -353,11 +357,15 @@ static u64 nvgpu_mem_phys_sgl_gpu_addr(struct gk20a *g, void *sgl,
{ {
struct nvgpu_mem_sgl *sgl_impl = (struct nvgpu_mem_sgl *)sgl; struct nvgpu_mem_sgl *sgl_impl = (struct nvgpu_mem_sgl *)sgl;
(void)g;
(void)attrs;
return sgl_impl->phys; return sgl_impl->phys;
} }
static void nvgpu_mem_phys_sgt_free(struct gk20a *g, struct nvgpu_sgt *sgt) static void nvgpu_mem_phys_sgt_free(struct gk20a *g, struct nvgpu_sgt *sgt)
{ {
(void)g;
(void)sgt;
/* /*
* No-op here. The free is handled by freeing the nvgpu_mem itself. * No-op here. The free is handled by freeing the nvgpu_mem itself.
*/ */

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -41,6 +41,7 @@
#include <nvgpu/static_analysis.h> #include <nvgpu/static_analysis.h>
#include <nvgpu/power_features/pg.h> #include <nvgpu/power_features/pg.h>
#include <nvgpu/nvhost.h> #include <nvgpu/nvhost.h>
#include <nvgpu/string.h>
struct nvgpu_ctag_buffer_info { struct nvgpu_ctag_buffer_info {
u64 size; u64 size;
@@ -593,6 +594,7 @@ static int nvgpu_vm_init_check_vma_limits(struct gk20a *g, struct vm_gk20a *vm,
u64 user_lp_vma_start, u64 user_lp_vma_limit, u64 user_lp_vma_start, u64 user_lp_vma_limit,
u64 kernel_vma_start, u64 kernel_vma_limit) u64 kernel_vma_start, u64 kernel_vma_limit)
{ {
(void)vm;
if ((user_vma_start > user_vma_limit) || if ((user_vma_start > user_vma_limit) ||
(user_lp_vma_start > user_lp_vma_limit) || (user_lp_vma_start > user_lp_vma_limit) ||
(kernel_vma_start >= kernel_vma_limit)) { (kernel_vma_start >= kernel_vma_limit)) {
@@ -723,6 +725,8 @@ static int nvgpu_vm_init_attributes(struct mm_gk20a *mm,
u64 aperture_size; u64 aperture_size;
u64 default_aperture_size; u64 default_aperture_size;
(void)big_pages;
g->ops.mm.get_default_va_sizes(&default_aperture_size, NULL, NULL); g->ops.mm.get_default_va_sizes(&default_aperture_size, NULL, NULL);
aperture_size = nvgpu_safe_add_u64(kernel_reserved, aperture_size = nvgpu_safe_add_u64(kernel_reserved,
@@ -1185,6 +1189,8 @@ static int nvgpu_vm_do_map(struct vm_gk20a *vm,
*/ */
u8 pte_kind; u8 pte_kind;
(void)os_buf;
(void)flags;
#ifdef CONFIG_NVGPU_COMPRESSION #ifdef CONFIG_NVGPU_COMPRESSION
err = nvgpu_vm_compute_compression(vm, binfo_ptr); err = nvgpu_vm_compute_compression(vm, binfo_ptr);
if (err != 0) { if (err != 0) {
@@ -1216,7 +1222,7 @@ static int nvgpu_vm_do_map(struct vm_gk20a *vm,
} }
if (binfo_ptr->compr_kind != NVGPU_KIND_INVALID) { if (binfo_ptr->compr_kind != NVGPU_KIND_INVALID) {
struct gk20a_comptags comptags = { 0 }; struct gk20a_comptags comptags = { };
/* /*
* Get the comptags state * Get the comptags state
@@ -1410,6 +1416,8 @@ static int nvgpu_vm_map_check_attributes(struct vm_gk20a *vm,
{ {
struct gk20a *g = gk20a_from_vm(vm); struct gk20a *g = gk20a_from_vm(vm);
(void)compr_kind;
if (vm->userspace_managed && if (vm->userspace_managed &&
((flags & NVGPU_VM_MAP_FIXED_OFFSET) == 0U)) { ((flags & NVGPU_VM_MAP_FIXED_OFFSET) == 0U)) {
nvgpu_err(g, nvgpu_err(g,
@@ -1461,7 +1469,7 @@ int nvgpu_vm_map(struct vm_gk20a *vm,
{ {
struct gk20a *g = gk20a_from_vm(vm); struct gk20a *g = gk20a_from_vm(vm);
struct nvgpu_mapped_buf *mapped_buffer = NULL; struct nvgpu_mapped_buf *mapped_buffer = NULL;
struct nvgpu_ctag_buffer_info binfo = { 0 }; struct nvgpu_ctag_buffer_info binfo = { };
enum gk20a_mem_rw_flag rw = buffer_rw_mode; enum gk20a_mem_rw_flag rw = buffer_rw_mode;
struct nvgpu_vm_area *vm_area = NULL; struct nvgpu_vm_area *vm_area = NULL;
int err = 0; int err = 0;

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -263,6 +263,8 @@ static bool nvgpu_netlist_handle_generic_region_id(struct gk20a *g,
{ {
bool handled = true; bool handled = true;
(void)size;
switch (region_id) { switch (region_id) {
case NETLIST_REGIONID_BUFFER_SIZE: case NETLIST_REGIONID_BUFFER_SIZE:
nvgpu_memcpy((u8 *)&netlist_vars->buffer_size, nvgpu_memcpy((u8 *)&netlist_vars->buffer_size,

View File

@@ -1,7 +1,7 @@
/* /*
* Cycle stats snapshots support * Cycle stats snapshots support
* *
* Copyright (c) 2015-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2015-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -36,6 +36,7 @@
#include <nvgpu/gk20a.h> #include <nvgpu/gk20a.h>
#include <nvgpu/channel.h> #include <nvgpu/channel.h>
#include <nvgpu/cyclestats_snapshot.h> #include <nvgpu/cyclestats_snapshot.h>
#include <nvgpu/string.h>
/* check client for pointed perfmon ownership */ /* check client for pointed perfmon ownership */
#define CONTAINS_PERFMON(cl, pm) \ #define CONTAINS_PERFMON(cl, pm) \
@@ -336,12 +337,12 @@ next_hw_fifo_entry:
/* re-set HW buffer after processing taking wrapping into account */ /* re-set HW buffer after processing taking wrapping into account */
if (css->hw_get < src) { if (css->hw_get < src) {
(void) memset(css->hw_get, 0xff, (void) memset(css->hw_get, 0xff,
(src - css->hw_get) * sizeof(*src)); (size_t)(src - css->hw_get) * sizeof(*src));
} else { } else {
(void) memset(css->hw_snapshot, 0xff, (void) memset(css->hw_snapshot, 0xff,
(src - css->hw_snapshot) * sizeof(*src)); (size_t)(src - css->hw_snapshot) * sizeof(*src));
(void) memset(css->hw_get, 0xff, (void) memset(css->hw_get, 0xff,
(css->hw_end - css->hw_get) * sizeof(*src)); (size_t)(css->hw_end - css->hw_get) * sizeof(*src));
} }
g->cs_data->hw_get = src; g->cs_data->hw_get = src;
@@ -602,5 +603,6 @@ int nvgpu_css_check_data_available(struct nvgpu_channel *ch, u32 *pending,
u32 nvgpu_css_get_max_buffer_size(struct gk20a *g) u32 nvgpu_css_get_max_buffer_size(struct gk20a *g)
{ {
(void)g;
return 0xffffffffU; return 0xffffffffU;
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -245,7 +245,9 @@ void nvgpu_pm_reservation_release_all_per_vmid(struct gk20a *g, u32 vmid)
nvgpu_list_del(&reservation_entry->entry); nvgpu_list_del(&reservation_entry->entry);
reservations->count--; reservations->count--;
nvgpu_kfree(g, reservation_entry); nvgpu_kfree(g, reservation_entry);
prepare_resource_reservation(g, i, false); prepare_resource_reservation(g,
(enum nvgpu_profiler_pm_resource_type)i,
false);
} }
} }
nvgpu_mutex_release(&reservations->lock); nvgpu_mutex_release(&reservations->lock);

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -63,7 +63,7 @@ int nvgpu_profiler_alloc(struct gk20a *g,
return -ENOMEM; return -ENOMEM;
} }
prof->prof_handle = generate_unique_id(); prof->prof_handle = (u32)generate_unique_id();
prof->scope = scope; prof->scope = scope;
prof->gpu_instance_id = gpu_instance_id; prof->gpu_instance_id = gpu_instance_id;
prof->g = g; prof->g = g;
@@ -138,7 +138,8 @@ int nvgpu_profiler_unbind_context(struct nvgpu_profiler_object *prof)
if (prof->reserved[i]) { if (prof->reserved[i]) {
nvgpu_warn(g, "Releasing reserved resource %u for handle %u", nvgpu_warn(g, "Releasing reserved resource %u for handle %u",
i, prof->prof_handle); i, prof->prof_handle);
nvgpu_profiler_pm_resource_release(prof, i); nvgpu_profiler_pm_resource_release(prof,
(enum nvgpu_profiler_pm_resource_type)i);
} }
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -83,6 +83,8 @@ int nvgpu_get_timestamps_zipper(struct gk20a *g,
int err = 0; int err = 0;
unsigned int i = 0; unsigned int i = 0;
(void)source_id;
if (gk20a_busy(g) != 0) { if (gk20a_busy(g) != 0) {
nvgpu_err(g, "GPU not powered on\n"); nvgpu_err(g, "GPU not powered on\n");
err = -EINVAL; err = -EINVAL;

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -61,6 +61,12 @@ void nvgpu_rc_fifo_recover(struct gk20a *g, u32 eng_bitmask,
rc_type, NULL); rc_type, NULL);
#else #else
WARN_ON(!g->sw_quiesce_pending); WARN_ON(!g->sw_quiesce_pending);
(void)eng_bitmask;
(void)hw_id;
(void)id_is_tsg;
(void)id_is_known;
(void)debug_dump;
(void)rc_type;
#endif #endif
} }
@@ -83,6 +89,8 @@ void nvgpu_rc_ctxsw_timeout(struct gk20a *g, u32 eng_bitmask,
RC_TYPE_CTXSW_TIMEOUT); RC_TYPE_CTXSW_TIMEOUT);
#else #else
WARN_ON(!g->sw_quiesce_pending); WARN_ON(!g->sw_quiesce_pending);
(void)eng_bitmask;
(void)debug_dump;
#endif #endif
} }
@@ -162,6 +170,7 @@ void nvgpu_rc_runlist_update(struct gk20a *g, u32 runlist_id)
* on time. * on time.
*/ */
WARN_ON(!g->sw_quiesce_pending); WARN_ON(!g->sw_quiesce_pending);
(void)runlist_id;
#endif #endif
} }
@@ -209,6 +218,8 @@ void nvgpu_rc_gr_fault(struct gk20a *g, struct nvgpu_tsg *tsg,
} }
#else #else
WARN_ON(!g->sw_quiesce_pending); WARN_ON(!g->sw_quiesce_pending);
(void)tsg;
(void)ch;
#endif #endif
nvgpu_log(g, gpu_dbg_gr, "done"); nvgpu_log(g, gpu_dbg_gr, "done");
} }
@@ -292,6 +303,9 @@ void nvgpu_rc_tsg_and_related_engines(struct gk20a *g, struct nvgpu_tsg *tsg,
#endif #endif
#else #else
WARN_ON(!g->sw_quiesce_pending); WARN_ON(!g->sw_quiesce_pending);
(void)tsg;
(void)debug_dump;
(void)rc_type;
#endif #endif
} }
@@ -313,5 +327,7 @@ void nvgpu_rc_mmu_fault(struct gk20a *g, u32 act_eng_bitmask,
} }
WARN_ON(!g->sw_quiesce_pending); WARN_ON(!g->sw_quiesce_pending);
(void)rc_type;
(void)mmufault;
#endif #endif
} }

View File

@@ -1,7 +1,7 @@
/* /*
* Tegra GK20A GPU Debugger Driver Register Ops * Tegra GK20A GPU Debugger Driver Register Ops
* *
* Copyright (c) 2013-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2013-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -368,7 +368,7 @@ static int profiler_obj_validate_reg_op_offset(struct nvgpu_profiler_object *pro
nvgpu_assert(type == type64); nvgpu_assert(type == type64);
} }
op->type = prof->reg_op_type[type]; op->type = (u8)prof->reg_op_type[type];
return 0; return 0;
} }

View File

@@ -84,7 +84,7 @@ int nvgpu_riscv_hs_ucode_load_bootstrap(struct nvgpu_falcon *flcn,
g->ops.falcon.set_bcr(flcn); g->ops.falcon.set_bcr(flcn);
err = nvgpu_falcon_get_mem_size(flcn, MEM_DMEM, &dmem_size); err = nvgpu_falcon_get_mem_size(flcn, MEM_DMEM, &dmem_size);
err = nvgpu_falcon_copy_to_imem(flcn, 0x0, code_fw->data, err = nvgpu_falcon_copy_to_imem(flcn, 0x0, code_fw->data,
code_fw->size, 0, true, 0x0); (u32)code_fw->size, 0, true, 0x0);
if (err != 0) { if (err != 0) {
nvgpu_err(g, "RISCV code copy to IMEM failed"); nvgpu_err(g, "RISCV code copy to IMEM failed");
@@ -92,14 +92,14 @@ int nvgpu_riscv_hs_ucode_load_bootstrap(struct nvgpu_falcon *flcn,
} }
err = nvgpu_falcon_copy_to_dmem(flcn, 0x0, data_fw->data, err = nvgpu_falcon_copy_to_dmem(flcn, 0x0, data_fw->data,
data_fw->size, 0x0); (u32)data_fw->size, 0x0);
if (err != 0) { if (err != 0) {
nvgpu_err(g, "RISCV data copy to DMEM failed"); nvgpu_err(g, "RISCV data copy to DMEM failed");
goto exit; goto exit;
} }
err = nvgpu_falcon_copy_to_dmem(flcn, dmem_size - manifest_fw->size, err = nvgpu_falcon_copy_to_dmem(flcn, (u32)(dmem_size - manifest_fw->size),
manifest_fw->data, manifest_fw->size, 0x0); manifest_fw->data, (u32)manifest_fw->size, 0x0);
if (err != 0) { if (err != 0) {
nvgpu_err(g, "RISCV manifest copy to DMEM failed"); nvgpu_err(g, "RISCV manifest copy to DMEM failed");
goto exit; goto exit;

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -98,17 +98,17 @@ static int rpc_send_message(struct gk20a *g)
{ {
/* calculations done in units of u32s */ /* calculations done in units of u32s */
u32 send_base = sim_send_put_pointer_v(g->sim->send_ring_put) * 2; u32 send_base = sim_send_put_pointer_v(g->sim->send_ring_put) * 2;
u32 dma_offset = send_base + sim_dma_r()/sizeof(u32); u32 dma_offset = (u32)(send_base + sim_dma_r()/sizeof(u32));
u32 dma_hi_offset = send_base + sim_dma_hi_r()/sizeof(u32); u32 dma_hi_offset = (u32)(send_base + sim_dma_hi_r()/sizeof(u32));
*sim_send_ring_bfr(g, dma_offset*sizeof(u32)) = *sim_send_ring_bfr(g, (u32)(dma_offset*sizeof(u32))) =
sim_dma_target_phys_pci_coherent_f() | sim_dma_target_phys_pci_coherent_f() |
sim_dma_status_valid_f() | sim_dma_status_valid_f() |
sim_dma_size_4kb_f() | sim_dma_size_4kb_f() |
sim_dma_addr_lo_f(nvgpu_mem_get_addr(g, &g->sim->msg_bfr) sim_dma_addr_lo_f((u32)(nvgpu_mem_get_addr(g, &g->sim->msg_bfr)
>> sim_dma_addr_lo_b()); >> sim_dma_addr_lo_b()));
*sim_send_ring_bfr(g, dma_hi_offset*sizeof(u32)) = *sim_send_ring_bfr(g, (u32)(dma_hi_offset*sizeof(u32))) =
u64_hi32(nvgpu_mem_get_addr(g, &g->sim->msg_bfr)); u64_hi32(nvgpu_mem_get_addr(g, &g->sim->msg_bfr));
*sim_msg_hdr(g, sim_msg_sequence_r()) = g->sim->sequence_base++; *sim_msg_hdr(g, sim_msg_sequence_r()) = g->sim->sequence_base++;
@@ -198,7 +198,7 @@ int issue_rpc_and_wait(struct gk20a *g)
if (*sim_msg_hdr(g, sim_msg_result_r()) != sim_msg_result_success_v()) { if (*sim_msg_hdr(g, sim_msg_result_r()) != sim_msg_result_success_v()) {
nvgpu_err(g, "%s received failed status!", nvgpu_err(g, "%s received failed status!",
__func__); __func__);
return -(*sim_msg_hdr(g, sim_msg_result_r())); return -(int)(*sim_msg_hdr(g, sim_msg_result_r()));
} }
return 0; return 0;
} }
@@ -214,7 +214,7 @@ static void nvgpu_sim_esc_readl(struct gk20a *g,
sim_escape_read_hdr_size()); sim_escape_read_hdr_size());
*sim_msg_param(g, 0) = index; *sim_msg_param(g, 0) = index;
*sim_msg_param(g, 4) = sizeof(u32); *sim_msg_param(g, 4) = sizeof(u32);
data_offset = round_up(0xc + pathlen + 1, sizeof(u32)); data_offset = (u32)round_up(0xc + pathlen + 1, sizeof(u32));
*sim_msg_param(g, 8) = data_offset; *sim_msg_param(g, 8) = data_offset;
strcpy((char *)sim_msg_param(g, 0xc), path); strcpy((char *)sim_msg_param(g, 0xc), path);
@@ -264,7 +264,7 @@ static int nvgpu_sim_init_late(struct gk20a *g)
sim_send_ring_status_valid_f() | sim_send_ring_status_valid_f() |
sim_send_ring_target_phys_pci_coherent_f() | sim_send_ring_target_phys_pci_coherent_f() |
sim_send_ring_size_4kb_f() | sim_send_ring_size_4kb_f() |
sim_send_ring_addr_lo_f(phys >> sim_send_ring_addr_lo_b())); sim_send_ring_addr_lo_f((u32)(phys >> sim_send_ring_addr_lo_b())));
/*repeat for recv ring (but swap put,get as roles are opposite) */ /*repeat for recv ring (but swap put,get as roles are opposite) */
sim_writel(g->sim, sim_recv_ring_r(), sim_recv_ring_status_invalid_f()); sim_writel(g->sim, sim_recv_ring_r(), sim_recv_ring_status_invalid_f());
@@ -281,7 +281,7 @@ static int nvgpu_sim_init_late(struct gk20a *g)
sim_recv_ring_status_valid_f() | sim_recv_ring_status_valid_f() |
sim_recv_ring_target_phys_pci_coherent_f() | sim_recv_ring_target_phys_pci_coherent_f() |
sim_recv_ring_size_4kb_f() | sim_recv_ring_size_4kb_f() |
sim_recv_ring_addr_lo_f(phys >> sim_recv_ring_addr_lo_b())); sim_recv_ring_addr_lo_f((u32)(phys >> sim_recv_ring_addr_lo_b())));
return 0; return 0;

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -67,22 +67,22 @@ static int rpc_send_message(struct gk20a *g)
{ {
/* calculations done in units of u32s */ /* calculations done in units of u32s */
u32 send_base = sim_send_put_pointer_v(g->sim->send_ring_put) * 2; u32 send_base = sim_send_put_pointer_v(g->sim->send_ring_put) * 2;
u32 dma_offset = send_base + sim_dma_r()/sizeof(u32); u32 dma_offset = send_base + sim_dma_r()/(u32)sizeof(u32);
u32 dma_hi_offset = send_base + sim_dma_hi_r()/sizeof(u32); u32 dma_hi_offset = send_base + sim_dma_hi_r()/4U;
*sim_send_ring_bfr(g, dma_offset*sizeof(u32)) = *sim_send_ring_bfr(g, dma_offset*4U) =
sim_dma_target_phys_pci_coherent_f() | sim_dma_target_phys_pci_coherent_f() |
sim_dma_status_valid_f() | sim_dma_status_valid_f() |
sim_dma_size_4kb_f() | sim_dma_size_4kb_f() |
sim_dma_addr_lo_f(nvgpu_mem_get_phys_addr(g, &g->sim->msg_bfr) sim_dma_addr_lo_f((u32)(nvgpu_mem_get_phys_addr(g, &g->sim->msg_bfr)
>> sim_dma_addr_lo_b()); >> sim_dma_addr_lo_b()));
*sim_send_ring_bfr(g, dma_hi_offset*sizeof(u32)) = *sim_send_ring_bfr(g, dma_hi_offset*4U) =
u64_hi32(nvgpu_mem_get_phys_addr(g, &g->sim->msg_bfr)); u64_hi32(nvgpu_mem_get_phys_addr(g, &g->sim->msg_bfr));
*sim_msg_hdr(g, sim_msg_sequence_r()) = g->sim->sequence_base++; *sim_msg_hdr(g, sim_msg_sequence_r()) = g->sim->sequence_base++;
g->sim->send_ring_put = (g->sim->send_ring_put + 2 * sizeof(u32)) % g->sim->send_ring_put = (g->sim->send_ring_put + 2 * 4U) %
SIM_BFR_SIZE; SIM_BFR_SIZE;
/* Update the put pointer. This will trap into the host. */ /* Update the put pointer. This will trap into the host. */
@@ -130,7 +130,7 @@ static int rpc_recv_poll(struct gk20a *g)
} }
/* Update GET pointer */ /* Update GET pointer */
g->sim->recv_ring_get = (g->sim->recv_ring_get + 2*sizeof(u32)) g->sim->recv_ring_get = (g->sim->recv_ring_get + 2*4U)
% SIM_BFR_SIZE; % SIM_BFR_SIZE;
sim_writel(g->sim, sim_recv_get_r(), g->sim->recv_ring_get); sim_writel(g->sim, sim_recv_get_r(), g->sim->recv_ring_get);
@@ -175,8 +175,8 @@ static void nvgpu_sim_esc_readl(struct gk20a *g,
pci_sim_write_hdr(g, sim_msg_function_sim_escape_read_v(), pci_sim_write_hdr(g, sim_msg_function_sim_escape_read_v(),
sim_escape_read_hdr_size()); sim_escape_read_hdr_size());
*pci_sim_msg_param(g, 0) = index; *pci_sim_msg_param(g, 0) = index;
*pci_sim_msg_param(g, 4) = sizeof(u32); *pci_sim_msg_param(g, 4) = 4U;
data_offset = round_up(pathlen + 1, sizeof(u32)); data_offset = (u32)(round_up(pathlen + 1, 4U));
*pci_sim_msg_param(g, 8) = data_offset; *pci_sim_msg_param(g, 8) = data_offset;
strcpy((char *)pci_sim_msg_param(g, sim_escape_read_hdr_size()), path); strcpy((char *)pci_sim_msg_param(g, sim_escape_read_hdr_size()), path);
@@ -187,7 +187,7 @@ static void nvgpu_sim_esc_readl(struct gk20a *g,
(u8 *)pci_sim_msg_param(g, (u8 *)pci_sim_msg_param(g,
nvgpu_safe_add_u32(data_offset, nvgpu_safe_add_u32(data_offset,
sim_escape_read_hdr_size())), sim_escape_read_hdr_size())),
sizeof(u32)); 4U);
} else { } else {
*data = 0xffffffff; *data = 0xffffffff;
WARN(1, "pci_issue_rpc_and_wait failed err=%d", err); WARN(1, "pci_issue_rpc_and_wait failed err=%d", err);
@@ -229,7 +229,7 @@ static int nvgpu_sim_init_late(struct gk20a *g)
sim_send_ring_status_valid_f() | sim_send_ring_status_valid_f() |
sim_send_ring_target_phys_pci_coherent_f() | sim_send_ring_target_phys_pci_coherent_f() |
sim_send_ring_size_4kb_f() | sim_send_ring_size_4kb_f() |
sim_send_ring_addr_lo_f(phys >> sim_send_ring_addr_lo_b())); sim_send_ring_addr_lo_f((u32)(phys >> sim_send_ring_addr_lo_b())));
/* repeat for recv ring (but swap put,get as roles are opposite) */ /* repeat for recv ring (but swap put,get as roles are opposite) */
sim_writel(g->sim, sim_recv_ring_r(), sim_recv_ring_status_invalid_f()); sim_writel(g->sim, sim_recv_ring_r(), sim_recv_ring_status_invalid_f());
@@ -246,7 +246,7 @@ static int nvgpu_sim_init_late(struct gk20a *g)
sim_recv_ring_status_valid_f() | sim_recv_ring_status_valid_f() |
sim_recv_ring_target_phys_pci_coherent_f() | sim_recv_ring_target_phys_pci_coherent_f() |
sim_recv_ring_size_4kb_f() | sim_recv_ring_size_4kb_f() |
sim_recv_ring_addr_lo_f(phys >> sim_recv_ring_addr_lo_b())); sim_recv_ring_addr_lo_f((u32)(phys >> sim_recv_ring_addr_lo_b())));
return 0; return 0;
fail: fail:

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -188,7 +188,7 @@ void nvgpu_swprofile_snapshot(struct nvgpu_swprofiler *p, u32 idx)
*/ */
index = matrix_to_linear_index(p, p->sample_index, idx); index = matrix_to_linear_index(p, p->sample_index, idx);
p->samples[index] = nvgpu_current_time_ns(); p->samples[index] = (u64)nvgpu_current_time_ns();
} }
void nvgpu_swprofile_begin_sample(struct nvgpu_swprofiler *p) void nvgpu_swprofile_begin_sample(struct nvgpu_swprofiler *p)
@@ -210,14 +210,14 @@ void nvgpu_swprofile_begin_sample(struct nvgpu_swprofiler *p)
/* /*
* Reference time for subsequent subsamples in this sample. * Reference time for subsequent subsamples in this sample.
*/ */
p->samples_start[p->sample_index] = nvgpu_current_time_ns(); p->samples_start[p->sample_index] = (u64)nvgpu_current_time_ns();
nvgpu_mutex_release(&p->lock); nvgpu_mutex_release(&p->lock);
} }
static int profile_cmp(const void *a, const void *b) static int profile_cmp(const void *a, const void *b)
{ {
return *((const u64 *) a) - *((const u64 *) b); return (int)(*((const u64 *) a) - *((const u64 *) b));
} }
#define PERCENTILE_WIDTH 5 #define PERCENTILE_WIDTH 5
@@ -350,6 +350,8 @@ void nvgpu_swprofile_print_raw_data(struct gk20a *g,
{ {
u32 i, j; u32 i, j;
(void)g;
nvgpu_mutex_acquire(&p->lock); nvgpu_mutex_acquire(&p->lock);
if (p->samples == NULL) { if (p->samples == NULL) {
@@ -408,6 +410,8 @@ static u32 nvgpu_swprofile_subsample_basic_stats(struct gk20a *g,
u64 sigma_2 = 0U; u64 sigma_2 = 0U;
u32 i; u32 i;
(void)g;
/* /*
* First, let's work out min, max, sum, and number of samples of data. With this we * First, let's work out min, max, sum, and number of samples of data. With this we
* can then get the mean, median, and sigma^2. * can then get the mean, median, and sigma^2.
@@ -461,7 +465,7 @@ static u32 nvgpu_swprofile_subsample_basic_stats(struct gk20a *g,
results[3] = median; results[3] = median;
results[4] = sigma_2; results[4] = sigma_2;
return samples; return (u32)samples;
} }
/* /*

View File

@@ -1,7 +1,7 @@
/* /*
* GK20A Channel Synchronization Abstraction * GK20A Channel Synchronization Abstraction
* *
* Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2014-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -95,7 +95,7 @@ static void add_sema_incr_cmd(struct gk20a *g, struct nvgpu_channel *c,
struct nvgpu_semaphore *s, struct priv_cmd_entry *cmd, struct nvgpu_semaphore *s, struct priv_cmd_entry *cmd,
bool wfi, struct nvgpu_hw_semaphore *hw_sema) bool wfi, struct nvgpu_hw_semaphore *hw_sema)
{ {
int ch = c->chid; u32 ch = c->chid;
u64 va; u64 va;
/* release will need to write back to the semaphore memory. */ /* release will need to write back to the semaphore memory. */
@@ -105,7 +105,7 @@ static void add_sema_incr_cmd(struct gk20a *g, struct nvgpu_channel *c,
nvgpu_semaphore_prepare(s, hw_sema); nvgpu_semaphore_prepare(s, hw_sema);
g->ops.sync.sema.add_incr_cmd(g, cmd, s, va, wfi); g->ops.sync.sema.add_incr_cmd(g, cmd, s, va, wfi);
gpu_sema_verbose_dbg(g, "(R) c=%d INCR %u (%u) pool=%-3llu" gpu_sema_verbose_dbg(g, "(R) c=%u INCR %u (%u) pool=%-3llu"
"va=0x%llx entry=%p", "va=0x%llx entry=%p",
ch, nvgpu_semaphore_get_value(s), ch, nvgpu_semaphore_get_value(s),
nvgpu_semaphore_read(s), nvgpu_semaphore_read(s),
@@ -170,6 +170,9 @@ cleanup:
struct nvgpu_channel_sync_semaphore *sema = struct nvgpu_channel_sync_semaphore *sema =
nvgpu_channel_sync_semaphore_from_base(s); nvgpu_channel_sync_semaphore_from_base(s);
(void)fd;
(void)entry;
(void)max_wait_cmds;
nvgpu_err(sema->c->g, nvgpu_err(sema->c->g,
"trying to use sync fds with CONFIG_NVGPU_SYNCFD_NONE"); "trying to use sync fds with CONFIG_NVGPU_SYNCFD_NONE");
return -ENODEV; return -ENODEV;
@@ -259,6 +262,10 @@ static int channel_sync_semaphore_incr_user(
struct nvgpu_channel_sync_semaphore *sema = struct nvgpu_channel_sync_semaphore *sema =
nvgpu_channel_sync_semaphore_from_base(s); nvgpu_channel_sync_semaphore_from_base(s);
(void)entry;
(void)fence;
(void)wfi;
(void)need_sync_fence;
nvgpu_err(sema->c->g, nvgpu_err(sema->c->g,
"trying to use sync fds with CONFIG_NVGPU_SYNCFD_NONE"); "trying to use sync fds with CONFIG_NVGPU_SYNCFD_NONE");
return -ENODEV; return -ENODEV;
@@ -271,6 +278,8 @@ static void channel_sync_semaphore_mark_progress(struct nvgpu_channel_sync *s,
struct nvgpu_channel_sync_semaphore *sp = struct nvgpu_channel_sync_semaphore *sp =
nvgpu_channel_sync_semaphore_from_base(s); nvgpu_channel_sync_semaphore_from_base(s);
(void)register_irq;
(void)nvgpu_hw_semaphore_update_next(sp->hw_sema); (void)nvgpu_hw_semaphore_update_next(sp->hw_sema);
/* /*
* register_irq is ignored: there is only one semaphore interrupt that * register_irq is ignored: there is only one semaphore interrupt that

View File

@@ -1,7 +1,7 @@
/* /*
* GK20A Channel Synchronization Abstraction * GK20A Channel Synchronization Abstraction
* *
* Copyright (c) 2014-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2014-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -165,6 +165,10 @@ static int channel_sync_syncpt_wait_fd(struct nvgpu_channel_sync *s, int fd,
{ {
struct nvgpu_channel_sync_syncpt *sp = struct nvgpu_channel_sync_syncpt *sp =
nvgpu_channel_sync_syncpt_from_base(s); nvgpu_channel_sync_syncpt_from_base(s);
(void)s;
(void)fd;
(void)wait_cmd;
(void)max_wait_cmds;
nvgpu_err(sp->c->g, nvgpu_err(sp->c->g,
"trying to use sync fds with CONFIG_NVGPU_SYNCFD_NONE"); "trying to use sync fds with CONFIG_NVGPU_SYNCFD_NONE");
return -ENODEV; return -ENODEV;
@@ -175,6 +179,8 @@ static void channel_sync_syncpt_update(void *priv, int nr_completed)
{ {
struct nvgpu_channel *ch = priv; struct nvgpu_channel *ch = priv;
(void)nr_completed;
nvgpu_channel_update(ch); nvgpu_channel_update(ch);
/* note: channel_get() is in channel_sync_syncpt_mark_progress() */ /* note: channel_get() is in channel_sync_syncpt_mark_progress() */

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -23,6 +23,7 @@
#include <nvgpu/log.h> #include <nvgpu/log.h>
#include <nvgpu/bug.h> #include <nvgpu/bug.h>
#include <nvgpu/worker.h> #include <nvgpu/worker.h>
#include <nvgpu/string.h>
static void nvgpu_worker_pre_process(struct nvgpu_worker *worker) static void nvgpu_worker_pre_process(struct nvgpu_worker *worker)
{ {