gpu: nvgpu: fix compile error of new compile flags

Preparing to push hvrtos gpu server changes which requires bellow CFLAGS:
        -Werror -Wall -Wextra \
        -Wmissing-braces -Wpointer-arith -Wundef \
        -Wconversion -Wsign-conversion \
        -Wformat-security \
        -Wmissing-declarations -Wredundant-decls -Wimplicit-fallthrough

Jira GVSCI-11640

Signed-off-by: Richard Zhao <rizhao@nvidia.com>
Change-Id: I25167f17f231ed741f19af87ca0aa72991563a0f
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2653746
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svcacv <svcacv@nvidia.com>
Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com>
Reviewed-by: Alex Waterman <alexw@nvidia.com>
GVS: Gerrit_Virtual_Submit
This commit is contained in:
Richard Zhao
2022-05-04 15:23:31 -07:00
committed by mobile promotions
parent 69ec2dcff7
commit 1ce899ce46
38 changed files with 93 additions and 49 deletions

View File

@@ -31,6 +31,7 @@
#include <nvgpu/bug.h> #include <nvgpu/bug.h>
#include <nvgpu/dma.h> #include <nvgpu/dma.h>
#include <nvgpu/rc.h> #include <nvgpu/rc.h>
#include <nvgpu/string.h>
#include <nvgpu/static_analysis.h> #include <nvgpu/static_analysis.h>
#ifdef CONFIG_NVGPU_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
#include <nvgpu/pmu/mutex.h> #include <nvgpu/pmu/mutex.h>

View File

@@ -1043,6 +1043,8 @@ void nvgpu_gr_ctx_free_pm_ctx(struct gk20a *g, struct vm_gk20a *vm,
if (pm_ctx->mem.gpu_va != 0ULL) { if (pm_ctx->mem.gpu_va != 0ULL) {
nvgpu_dma_unmap_free(vm, &pm_ctx->mem); nvgpu_dma_unmap_free(vm, &pm_ctx->mem);
} }
(void)g;
} }
struct nvgpu_mem *nvgpu_gr_ctx_get_pm_ctx_mem(struct nvgpu_gr_ctx *gr_ctx) struct nvgpu_mem *nvgpu_gr_ctx_get_pm_ctx_mem(struct nvgpu_gr_ctx *gr_ctx)

View File

@@ -515,7 +515,7 @@ int nvgpu_gr_fecs_trace_poll(struct gk20a *g)
read = g->ops.gr.fecs_trace.get_read_index(g); read = g->ops.gr.fecs_trace.get_read_index(g);
cnt = CIRC_CNT(write, read, GK20A_FECS_TRACE_NUM_RECORDS); cnt = CIRC_CNT((u32)write, (u32)read, GK20A_FECS_TRACE_NUM_RECORDS);
if (!cnt) if (!cnt)
goto done; goto done;

View File

@@ -88,9 +88,9 @@ static int gsp_ucode_load_and_bootstrap(struct gk20a *g,
struct nvgpu_falcon *flcn, struct gsp_fw *gsp_ucode) struct nvgpu_falcon *flcn, struct gsp_fw *gsp_ucode)
{ {
u32 dmem_size = 0U; u32 dmem_size = 0U;
u32 code_size = gsp_ucode->code->size; u32 code_size = (u32)gsp_ucode->code->size;
u32 data_size = gsp_ucode->data->size; u32 data_size = (u32)gsp_ucode->data->size;
u32 manifest_size = gsp_ucode->manifest->size; u32 manifest_size = (u32)gsp_ucode->manifest->size;
int err = 0; int err = 0;
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");

View File

@@ -91,16 +91,19 @@ int nvgpu_gsp_debug_buf_init(struct gk20a *g, u32 queue_no, u32 buffer_size)
void nvgpu_gsp_isr_mutex_acquire(struct gk20a *g, struct nvgpu_gsp *gsp) void nvgpu_gsp_isr_mutex_acquire(struct gk20a *g, struct nvgpu_gsp *gsp)
{ {
(void)g;
nvgpu_mutex_acquire(&gsp->isr_mutex); nvgpu_mutex_acquire(&gsp->isr_mutex);
} }
void nvgpu_gsp_isr_mutex_release(struct gk20a *g, struct nvgpu_gsp *gsp) void nvgpu_gsp_isr_mutex_release(struct gk20a *g, struct nvgpu_gsp *gsp)
{ {
(void)g;
nvgpu_mutex_release(&gsp->isr_mutex); nvgpu_mutex_release(&gsp->isr_mutex);
} }
bool nvgpu_gsp_is_isr_enable(struct gk20a *g, struct nvgpu_gsp *gsp) bool nvgpu_gsp_is_isr_enable(struct gk20a *g, struct nvgpu_gsp *gsp)
{ {
(void)g;
return gsp->isr_enabled; return gsp->isr_enabled;
} }

View File

@@ -29,6 +29,7 @@
#include <nvgpu/device.h> #include <nvgpu/device.h>
#include <nvgpu/nvgpu_mem.h> #include <nvgpu/nvgpu_mem.h>
#include <nvgpu/runlist.h> #include <nvgpu/runlist.h>
#include <nvgpu/string.h>
#include "ipc/gsp_cmd.h" #include "ipc/gsp_cmd.h"
#include "ipc/gsp_msg.h" #include "ipc/gsp_msg.h"
@@ -58,6 +59,8 @@ static void gsp_handle_cmd_ack(struct gk20a *g, struct nv_flcn_msg_gsp *msg,
*command_ack = false; *command_ack = false;
break; break;
} }
(void)status;
} }
static void gsp_get_runlist_info(struct gk20a *g, static void gsp_get_runlist_info(struct gk20a *g,
@@ -148,7 +151,7 @@ int nvgpu_gsp_send_devices_info(struct gk20a *g)
cmd.hdr.unit_id = NV_GSP_UNIT_DEVICES_INFO; cmd.hdr.unit_id = NV_GSP_UNIT_DEVICES_INFO;
tmp_size = GSP_CMD_HDR_SIZE + sizeof(struct nvgpu_gsp_device_info); tmp_size = GSP_CMD_HDR_SIZE + sizeof(struct nvgpu_gsp_device_info);
nvgpu_assert(tmp_size <= U64(U8_MAX)); nvgpu_assert(tmp_size <= U64(U8_MAX));
cmd.hdr.size = tmp_size; cmd.hdr.size = (u8)tmp_size;
/* copy domain info into cmd buffer */ /* copy domain info into cmd buffer */
gsp_get_device_info(g, &cmd.cmd.device); gsp_get_device_info(g, &cmd.cmd.device);

View File

@@ -146,5 +146,6 @@ exit:
u32 nvgpu_gsp_get_last_cmd_id(struct gk20a *g) u32 nvgpu_gsp_get_last_cmd_id(struct gk20a *g)
{ {
(void)g;
return GSP_NV_CMDQ_LOG_ID__LAST; return GSP_NV_CMDQ_LOG_ID__LAST;
} }

View File

@@ -49,6 +49,7 @@ static int gsp_handle_event(struct nvgpu_gsp_sched *gsp_sched,
break; break;
} }
(void)gsp_sched;
return err; return err;
} }
@@ -193,7 +194,7 @@ int nvgpu_gsp_process_message(struct gk20a *g)
nvgpu_info(g, "ctrl_flags = 0x%08x, seq_id = 0x%08x", nvgpu_info(g, "ctrl_flags = 0x%08x, seq_id = 0x%08x",
msg.hdr.ctrl_flags, msg.hdr.seq_id); msg.hdr.ctrl_flags, msg.hdr.seq_id);
msg.hdr.ctrl_flags &= ~GSP_CMD_FLAGS_MASK; msg.hdr.ctrl_flags &= (u8)~GSP_CMD_FLAGS_MASK;
if (msg.hdr.ctrl_flags == GSP_CMD_FLAGS_EVENT) { if (msg.hdr.ctrl_flags == GSP_CMD_FLAGS_EVENT) {
gsp_handle_event(gsp_sched, &msg); gsp_handle_event(gsp_sched, &msg);

View File

@@ -92,7 +92,6 @@ int nvgpu_gsp_seq_acquire(struct gk20a *g,
gsp_callback callback, void *cb_params) gsp_callback callback, void *cb_params)
{ {
struct gsp_sequence *seq; struct gsp_sequence *seq;
u16 size_of_seq_tbl = 0;
u32 index = 0; u32 index = 0;
int err = 0; int err = 0;
@@ -100,10 +99,7 @@ int nvgpu_gsp_seq_acquire(struct gk20a *g,
nvgpu_mutex_acquire(&sequences->gsp_seq_lock); nvgpu_mutex_acquire(&sequences->gsp_seq_lock);
size_of_seq_tbl = sizeof(sequences->gsp_seq_tbl) * index = (u32)find_first_zero_bit(sequences->gsp_seq_tbl,
sizeof(sequences->gsp_seq_tbl[0]);
index = find_first_zero_bit(sequences->gsp_seq_tbl,
GSP_MAX_NUM_SEQUENCES); GSP_MAX_NUM_SEQUENCES);
if (index >= GSP_MAX_NUM_SEQUENCES) { if (index >= GSP_MAX_NUM_SEQUENCES) {

View File

@@ -287,7 +287,6 @@ void nvgpu_gsp_test_sw_deinit(struct gk20a *g)
int nvgpu_gsp_stress_test_sw_init(struct gk20a *g) int nvgpu_gsp_stress_test_sw_init(struct gk20a *g)
{ {
int err = 0; int err = 0;
struct nvgpu_gsp_test *gsp_stest;
struct nvgpu_gsp *gsp; struct nvgpu_gsp *gsp;
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
@@ -318,7 +317,6 @@ int nvgpu_gsp_stress_test_sw_init(struct gk20a *g)
goto de_init; goto de_init;
} }
gsp_stest = g->gsp_stest;
gsp = g->gsp_stest->gsp; gsp = g->gsp_stest->gsp;
/* gsp falcon software state */ /* gsp falcon software state */

View File

@@ -1079,8 +1079,10 @@ u64 nvgpu_gmmu_map_locked(struct vm_gk20a *vm,
#if defined(CONFIG_NVGPU_NON_FUSA) #if defined(CONFIG_NVGPU_NON_FUSA)
if (nvgpu_is_errata_present(g, NVGPU_ERRATA_3288192) && if (nvgpu_is_errata_present(g, NVGPU_ERRATA_3288192) &&
(attrs.l3_alloc)) { (attrs.l3_alloc)) {
#ifdef CONFIG_NVGPU_TRACE
nvgpu_gmmu_dbg_v(g, &attrs, nvgpu_gmmu_dbg_v(g, &attrs,
"L3 alloc is requested when L3 cache is not supported"); "L3 alloc is requested when L3 cache is not supported");
#endif
attrs.l3_alloc = false; attrs.l3_alloc = false;
} }
#endif #endif

View File

@@ -21,6 +21,7 @@
*/ */
#include <nvgpu/gk20a.h> #include <nvgpu/gk20a.h>
#include <nvgpu/string.h>
#include <nvgpu/pmu.h> #include <nvgpu/pmu.h>
#include <nvgpu/pmu/pmu_pg.h> #include <nvgpu/pmu/pmu_pg.h>
#include <nvgpu/pmu/pmuif/pg.h> #include <nvgpu/pmu/pmuif/pg.h>
@@ -361,7 +362,7 @@ static int ga10b_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id,
(void) memset(&rpc, 0, (void) memset(&rpc, 0,
sizeof(struct pmu_rpc_struct_lpwr_pg_ctrl_stats_get)); sizeof(struct pmu_rpc_struct_lpwr_pg_ctrl_stats_get));
rpc.ctrl_id = (u32)pg_engine_id; rpc.ctrl_id = (u8)pg_engine_id;
PMU_RPC_EXECUTE_CPB(status, g->pmu, PG, PG_CTRL_STATS_GET, &rpc, 0); PMU_RPC_EXECUTE_CPB(status, g->pmu, PG, PG_CTRL_STATS_GET, &rpc, 0);
if (status != 0) { if (status != 0) {

View File

@@ -1036,6 +1036,8 @@ void nvgpu_pmu_pg_destroy(struct gk20a *g, struct nvgpu_pmu *pmu,
} }
pg->zbc_ready = false; pg->zbc_ready = false;
(void)pmu;
} }
int nvgpu_pmu_pg_init(struct gk20a *g, struct nvgpu_pmu *pmu, int nvgpu_pmu_pg_init(struct gk20a *g, struct nvgpu_pmu *pmu,

View File

@@ -434,6 +434,8 @@ static int nvgpu_profiler_quiesce_hwpm_streamout_resident(struct gk20a *g,
u64 bytes_available; u64 bytes_available;
int err = 0; int err = 0;
(void)gr_instance_id;
nvgpu_log(g, gpu_dbg_prof, nvgpu_log(g, gpu_dbg_prof,
"HWPM streamout quiesce in resident state started"); "HWPM streamout quiesce in resident state started");

View File

@@ -574,6 +574,7 @@ static int profiler_obj_validate_reg_op_offset(struct nvgpu_profiler_object *pro
return 0; return 0;
error: error:
op->status |= REGOP(STATUS_INVALID_OFFSET); op->status |= REGOP(STATUS_INVALID_OFFSET);
(void)ret;
return -EINVAL; return -EINVAL;
} }
@@ -615,7 +616,7 @@ static int validate_reg_op_offset(struct gk20a *g,
} }
} }
#endif #endif
(void)ret;
return 0; return 0;
} }

View File

@@ -390,7 +390,7 @@ int ga10b_fb_set_remote_swizid(struct gk20a *g, bool enable)
while (pbdma_id_mask != 0U) { while (pbdma_id_mask != 0U) {
u32 fault_id; u32 fault_id;
u32 pbdma_id = nvgpu_safe_sub_u32( u32 pbdma_id = nvgpu_safe_sub_u32(
nvgpu_ffs(pbdma_id_mask), 1UL); (u32)nvgpu_ffs(pbdma_id_mask), 1UL);
fault_id = fault_id =
g->ops.pbdma.get_mmu_fault_id(g, pbdma_id); g->ops.pbdma.get_mmu_fault_id(g, pbdma_id);

View File

@@ -72,7 +72,7 @@ int ga10b_fb_vab_init(struct gk20a *g)
* Each packet contains 32B access bits and 32B meta data. * Each packet contains 32B access bits and 32B meta data.
* Thus, total entry size is twice of the VAB access bits. * Thus, total entry size is twice of the VAB access bits.
*/ */
vab_entry_size = nvgpu_safe_mult_u32(vab_size_bytes, 2UL); vab_entry_size = nvgpu_safe_mult_u32((u32)vab_size_bytes, 2UL);
nvgpu_log(g, gpu_dbg_vab, "vab_entry_size 0x%lx", vab_entry_size); nvgpu_log(g, gpu_dbg_vab, "vab_entry_size 0x%lx", vab_entry_size);
vab->entry_size = vab_entry_size; vab->entry_size = vab_entry_size;
@@ -81,7 +81,7 @@ int ga10b_fb_vab_init(struct gk20a *g)
if (!nvgpu_mem_is_valid(vab_buf)) { if (!nvgpu_mem_is_valid(vab_buf)) {
/* Allocate memory for single VAB entry */ /* Allocate memory for single VAB entry */
err = nvgpu_dma_alloc_map_sys(vm, nvgpu_safe_mult_u32( err = nvgpu_dma_alloc_map_sys(vm, nvgpu_safe_mult_u32(
vab->entry_size, vab->num_entries), vab_buf); (u32)vab->entry_size, vab->num_entries), vab_buf);
if (err != 0) { if (err != 0) {
nvgpu_err(g, "Error in vab buffer alloc in bar2 vm "); nvgpu_err(g, "Error in vab buffer alloc in bar2 vm ");
return -ENOMEM; return -ENOMEM;
@@ -220,7 +220,7 @@ static int ga10b_fb_vab_config_address_range(struct gk20a *g,
U32(vab_range_checker[i].start_phys_addr >> 32U)); U32(vab_range_checker[i].start_phys_addr >> 32U));
nvgpu_writel(g, fb_mmu_vidmem_access_bit_start_addr_lo_r(i), nvgpu_writel(g, fb_mmu_vidmem_access_bit_start_addr_lo_r(i),
(vab_range_checker[i].start_phys_addr & ((u32)vab_range_checker[i].start_phys_addr &
fb_mmu_vidmem_access_bit_start_addr_lo_val_m()) | fb_mmu_vidmem_access_bit_start_addr_lo_val_m()) |
fb_mmu_vidmem_access_bit_start_addr_lo_granularity_f( fb_mmu_vidmem_access_bit_start_addr_lo_granularity_f(
granularity_shift_bits)); granularity_shift_bits));

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -27,6 +27,7 @@
#include <nvgpu/barrier.h> #include <nvgpu/barrier.h>
#include <nvgpu/bug.h> #include <nvgpu/bug.h>
#include <nvgpu/gk20a.h> #include <nvgpu/gk20a.h>
#include <nvgpu/string.h>
#include "hal/fifo/pbdma_gm20b.h" #include "hal/fifo/pbdma_gm20b.h"

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -27,6 +27,7 @@
#include <nvgpu/engines.h> #include <nvgpu/engines.h>
#include <nvgpu/device.h> #include <nvgpu/device.h>
#include <nvgpu/fifo.h> #include <nvgpu/fifo.h>
#include <nvgpu/string.h>
#include "engine_status_ga10b.h" #include "engine_status_ga10b.h"
#include <nvgpu/hw/ga10b/hw_runlist_ga10b.h> #include <nvgpu/hw/ga10b/hw_runlist_ga10b.h>

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -23,6 +23,7 @@
#include <nvgpu/io.h> #include <nvgpu/io.h>
#include <nvgpu/gk20a.h> #include <nvgpu/gk20a.h>
#include <nvgpu/pbdma_status.h> #include <nvgpu/pbdma_status.h>
#include <nvgpu/string.h>
#include "pbdma_status_ga10b.h" #include "pbdma_status_ga10b.h"
#include <nvgpu/hw/ga10b/hw_pbdma_ga10b.h> #include <nvgpu/hw/ga10b/hw_pbdma_ga10b.h>

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -78,7 +78,7 @@ int ga10b_fifo_reschedule_preempt_next(struct nvgpu_channel *ch,
fecsstat0 = g->ops.gr.falcon.read_fecs_ctxsw_mailbox(g, fecsstat0 = g->ops.gr.falcon.read_fecs_ctxsw_mailbox(g,
NVGPU_GR_FALCON_FECS_CTXSW_MAILBOX0); NVGPU_GR_FALCON_FECS_CTXSW_MAILBOX0);
g->ops.engine_status.read_engine_status_info(g, nvgpu_safe_sub_u32( g->ops.engine_status.read_engine_status_info(g, nvgpu_safe_sub_u32(
nvgpu_ffs(runlist->eng_bitmask & eng_bitmask), 1U), (u32)nvgpu_ffs(runlist->eng_bitmask & eng_bitmask), 1U),
&engine_status); &engine_status);
if (nvgpu_engine_status_is_ctxsw_switch(&engine_status)) { if (nvgpu_engine_status_is_ctxsw_switch(&engine_status)) {
nvgpu_engine_status_get_next_ctx_id_type(&engine_status, nvgpu_engine_status_get_next_ctx_id_type(&engine_status,

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -27,6 +27,8 @@
#include <nvgpu/hw/ga100/hw_ctxsw_prog_ga100.h> #include <nvgpu/hw/ga100/hw_ctxsw_prog_ga100.h>
#include <nvgpu/hw/ga100/hw_xbar_ga100.h> #include <nvgpu/hw/ga100/hw_xbar_ga100.h>
#include "ctxsw_prog_ga100.h"
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
void ga100_ctxsw_prog_dump_ctxsw_stats(struct gk20a *g, void ga100_ctxsw_prog_dump_ctxsw_stats(struct gk20a *g,
struct nvgpu_mem *ctx_mem) struct nvgpu_mem *ctx_mem)
@@ -86,6 +88,7 @@ void ga100_ctxsw_prog_dump_ctxsw_stats(struct gk20a *g,
#ifdef CONFIG_NVGPU_DEBUGGER #ifdef CONFIG_NVGPU_DEBUGGER
u32 ga100_ctxsw_prog_hw_get_pm_gpc_gnic_stride(struct gk20a *g) u32 ga100_ctxsw_prog_hw_get_pm_gpc_gnic_stride(struct gk20a *g)
{ {
(void)g;
return (xbar_mxbar_pri_gpc1_gnic0_preg_pm_ctrl_r() - return (xbar_mxbar_pri_gpc1_gnic0_preg_pm_ctrl_r() -
xbar_mxbar_pri_gpc0_gnic0_preg_pm_ctrl_r()); xbar_mxbar_pri_gpc0_gnic0_preg_pm_ctrl_r());
} }

View File

@@ -47,6 +47,7 @@ u32 ga10b_ctxsw_prog_hw_get_gpccs_header_stride(void)
u32 ga10b_ctxsw_prog_get_tpc_segment_pri_layout(struct gk20a *g, u32 *main_hdr) u32 ga10b_ctxsw_prog_get_tpc_segment_pri_layout(struct gk20a *g, u32 *main_hdr)
{ {
(void)g;
return ctxsw_prog_main_tpc_segment_pri_layout_v_v( return ctxsw_prog_main_tpc_segment_pri_layout_v_v(
main_hdr[ctxsw_prog_main_tpc_segment_pri_layout_o() >> main_hdr[ctxsw_prog_main_tpc_segment_pri_layout_o() >>
BYTE_TO_DW_SHIFT]); BYTE_TO_DW_SHIFT]);

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -170,6 +170,7 @@ u32 gm20b_ctxsw_prog_get_local_priv_register_ctl_offset(u32 *context)
u32 gm20b_ctxsw_prog_hw_get_pm_gpc_gnic_stride(struct gk20a *g) u32 gm20b_ctxsw_prog_hw_get_pm_gpc_gnic_stride(struct gk20a *g)
{ {
(void)g;
return NV_XBAR_MXBAR_PRI_GPC_GNIC_STRIDE; return NV_XBAR_MXBAR_PRI_GPC_GNIC_STRIDE;
} }
#endif /* CONFIG_NVGPU_DEBUGGER */ #endif /* CONFIG_NVGPU_DEBUGGER */

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -56,6 +56,8 @@ struct nvgpu_hw_err_inject_info_desc mmu_err_desc;
struct nvgpu_hw_err_inject_info_desc * struct nvgpu_hw_err_inject_info_desc *
ga10b_gr_ecc_get_mmu_err_desc(struct gk20a *g) ga10b_gr_ecc_get_mmu_err_desc(struct gk20a *g)
{ {
(void)g;
mmu_err_desc.info_ptr = mmu_ecc_err_desc; mmu_err_desc.info_ptr = mmu_ecc_err_desc;
mmu_err_desc.info_size = nvgpu_safe_cast_u64_to_u32( mmu_err_desc.info_size = nvgpu_safe_cast_u64_to_u32(
sizeof(mmu_ecc_err_desc) / sizeof(mmu_ecc_err_desc) /

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -23,6 +23,7 @@
#include <nvgpu/io.h> #include <nvgpu/io.h>
#include <nvgpu/gk20a.h> #include <nvgpu/gk20a.h>
#include <nvgpu/fuse.h> #include <nvgpu/fuse.h>
#include <nvgpu/string.h>
#include <nvgpu/gr/gr_ecc.h> #include <nvgpu/gr/gr_ecc.h>
#include <hal/gr/ecc/ecc_gv11b.h> #include <hal/gr/ecc/ecc_gv11b.h>

View File

@@ -619,6 +619,8 @@ int gr_ga10b_process_context_buffer_priv_segment(struct gk20a *g,
u32 tpc_segment_pri_layout; u32 tpc_segment_pri_layout;
bool is_tpc_layout_interleaved = false; bool is_tpc_layout_interleaved = false;
(void)ppc_mask;
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "pri_addr=0x%x", pri_addr); nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "pri_addr=0x%x", pri_addr);
if (!g->netlist_valid) { if (!g->netlist_valid) {
@@ -1135,7 +1137,7 @@ void ga10b_gr_vab_reserve(struct gk20a *g, u32 vab_reg, u32 num_range_checkers,
U32(vab_range_checker[i].start_phys_addr >> 32U)); U32(vab_range_checker[i].start_phys_addr >> 32U));
nvgpu_writel(g, gr_gpcs_mmu_vidmem_access_bit_start_addr_lo_r(i), nvgpu_writel(g, gr_gpcs_mmu_vidmem_access_bit_start_addr_lo_r(i),
(vab_range_checker[i].start_phys_addr & (u32)(vab_range_checker[i].start_phys_addr &
gr_gpcs_mmu_vidmem_access_bit_start_addr_lo_val_m()) | gr_gpcs_mmu_vidmem_access_bit_start_addr_lo_val_m()) |
gr_gpcs_mmu_vidmem_access_bit_start_addr_lo_granularity_f( gr_gpcs_mmu_vidmem_access_bit_start_addr_lo_granularity_f(
granularity_shift_bits)); granularity_shift_bits));

View File

@@ -120,6 +120,7 @@ void ga10b_gr_init_get_access_map(struct gk20a *g,
}; };
size_t array_size; size_t array_size;
(void)g;
*whitelist = wl_addr_ga10b; *whitelist = wl_addr_ga10b;
array_size = ARRAY_SIZE(wl_addr_ga10b); array_size = ARRAY_SIZE(wl_addr_ga10b);
*num_entries = nvgpu_safe_cast_u64_to_u32(array_size); *num_entries = nvgpu_safe_cast_u64_to_u32(array_size);

View File

@@ -222,16 +222,19 @@ void tu104_gr_init_commit_gfxp_rtv_cb(struct gk20a *g,
u32 tu104_gr_init_get_attrib_cb_gfxp_default_size(struct gk20a *g) u32 tu104_gr_init_get_attrib_cb_gfxp_default_size(struct gk20a *g)
{ {
(void)g;
return gr_gpc0_ppc0_cbm_beta_cb_size_v_gfxp_v(); return gr_gpc0_ppc0_cbm_beta_cb_size_v_gfxp_v();
} }
u32 tu104_gr_init_get_attrib_cb_gfxp_size(struct gk20a *g) u32 tu104_gr_init_get_attrib_cb_gfxp_size(struct gk20a *g)
{ {
(void)g;
return gr_gpc0_ppc0_cbm_beta_cb_size_v_gfxp_v(); return gr_gpc0_ppc0_cbm_beta_cb_size_v_gfxp_v();
} }
u32 tu104_gr_init_get_ctx_spill_size(struct gk20a *g) u32 tu104_gr_init_get_ctx_spill_size(struct gk20a *g)
{ {
(void)g;
return nvgpu_safe_mult_u32( return nvgpu_safe_mult_u32(
gr_gpc0_swdx_rm_spill_buffer_size_256b_default_v(), gr_gpc0_swdx_rm_spill_buffer_size_256b_default_v(),
gr_gpc0_swdx_rm_spill_buffer_size_256b_byte_granularity_v()); gr_gpc0_swdx_rm_spill_buffer_size_256b_byte_granularity_v());
@@ -248,6 +251,7 @@ u32 tu104_gr_init_get_ctx_betacb_size(struct gk20a *g)
u32 tu104_gr_init_get_gfxp_rtv_cb_size(struct gk20a *g) u32 tu104_gr_init_get_gfxp_rtv_cb_size(struct gk20a *g)
{ {
(void)g;
return nvgpu_safe_mult_u32( return nvgpu_safe_mult_u32(
nvgpu_safe_add_u32( nvgpu_safe_add_u32(
nvgpu_safe_add_u32( nvgpu_safe_add_u32(

View File

@@ -26,6 +26,7 @@
#include <nvgpu/engines.h> #include <nvgpu/engines.h>
#include <nvgpu/nvgpu_err.h> #include <nvgpu/nvgpu_err.h>
#include <nvgpu/errata.h> #include <nvgpu/errata.h>
#include <nvgpu/string.h>
#include <nvgpu/gr/config.h> #include <nvgpu/gr/config.h>
#include <nvgpu/gr/gr.h> #include <nvgpu/gr/gr.h>

View File

@@ -190,7 +190,7 @@ static u32 ga10b_grmgr_get_local_gr_syspipe_index(struct gk20a *g,
while (gr_syspipe_mask != 0U) { while (gr_syspipe_mask != 0U) {
u32 bit_position = nvgpu_safe_sub_u32( u32 bit_position = nvgpu_safe_sub_u32(
nvgpu_ffs(gr_syspipe_mask), 1UL); (u32)nvgpu_ffs(gr_syspipe_mask), 1UL);
++local_gr_syspipe_index; ++local_gr_syspipe_index;
gr_syspipe_mask ^= BIT32(bit_position); gr_syspipe_mask ^= BIT32(bit_position);
} }
@@ -218,7 +218,7 @@ static u32 ga10b_grmgr_get_gr_syspipe_id_from_local_gr_syspipe_index(
while (temp_gr_syspipe_index < max_allowed_syspipe_index) { while (temp_gr_syspipe_index < max_allowed_syspipe_index) {
gr_syspipe_id = nvgpu_safe_sub_u32( gr_syspipe_id = nvgpu_safe_sub_u32(
nvgpu_ffs(usable_gr_syspipe_mask), 1UL); (u32)nvgpu_ffs(usable_gr_syspipe_mask), 1UL);
++temp_gr_syspipe_index; ++temp_gr_syspipe_index;
usable_gr_syspipe_mask ^= BIT32(gr_syspipe_id); usable_gr_syspipe_mask ^= BIT32(gr_syspipe_id);
} }
@@ -245,7 +245,7 @@ static u32 ga10b_grmgr_get_num_gr_syspipe_enabled(struct gk20a *g,
while (gr_syspipe_enabled_mask != 0U) { while (gr_syspipe_enabled_mask != 0U) {
u32 bit_pos = nvgpu_safe_sub_u32( u32 bit_pos = nvgpu_safe_sub_u32(
nvgpu_ffs(gr_syspipe_enabled_mask), 1UL); (u32)nvgpu_ffs(gr_syspipe_enabled_mask), 1UL);
gr_syspipe_enabled_mask ^= BIT32(bit_pos); gr_syspipe_enabled_mask ^= BIT32(bit_pos);
++gr_syspipe_enabled_count; ++gr_syspipe_enabled_count;
} }
@@ -399,7 +399,7 @@ static int ga10b_grmgr_get_gpu_instance(struct gk20a *g,
(gpu_instance_static_config[index].num_gpc))) { (gpu_instance_static_config[index].num_gpc))) {
logical_gpc_id = nvgpu_safe_sub_u32( logical_gpc_id = nvgpu_safe_sub_u32(
nvgpu_ffs(temp_gpc_mask), 1UL); (u32)nvgpu_ffs(temp_gpc_mask), 1UL);
if ((gpcs[logical_gpc_id].gpcgrp_id == if ((gpcs[logical_gpc_id].gpcgrp_id ==
gpu_instance_gpcgrp_id[index]) || gpu_instance_gpcgrp_id[index]) ||
@@ -499,7 +499,7 @@ static int ga10b_grmgr_get_gpu_instance(struct gk20a *g,
nvgpu_safe_add_u32(local_gr_syspipe_index, nvgpu_safe_add_u32(local_gr_syspipe_index,
temp_lce_cnt)); temp_lce_cnt));
physical_ce_id = nvgpu_safe_sub_u32( physical_ce_id = nvgpu_safe_sub_u32(
nvgpu_ffs(temp_lce_mask), 1UL); (u32)nvgpu_ffs(temp_lce_mask), 1UL);
if (ga10b_grmgr_is_syspipe_lce(g, if (ga10b_grmgr_is_syspipe_lce(g,
nvgpu_device_get(g, NVGPU_DEVTYPE_GRAPHICS, nvgpu_device_get(g, NVGPU_DEVTYPE_GRAPHICS,
gr_syspipe_id), gr_syspipe_id),
@@ -536,7 +536,7 @@ static int ga10b_grmgr_get_gpu_instance(struct gk20a *g,
struct nvgpu_gr_syspipe *local_gr_syspipe = struct nvgpu_gr_syspipe *local_gr_syspipe =
&gpu_instance[gpu_instance_id].gr_syspipe; &gpu_instance[gpu_instance_id].gr_syspipe;
physical_ce_id = nvgpu_safe_sub_u32( physical_ce_id = nvgpu_safe_sub_u32(
nvgpu_ffs(lce_mask), 1UL); (u32)nvgpu_ffs(lce_mask), 1UL);
temp_lce_cnt = gpu_instance[gpu_instance_id].num_lce; temp_lce_cnt = gpu_instance[gpu_instance_id].num_lce;
gpu_instance[gpu_instance_id].lce_devs[temp_lce_cnt] = gpu_instance[gpu_instance_id].lce_devs[temp_lce_cnt] =
lces[physical_ce_id]; lces[physical_ce_id];
@@ -798,7 +798,7 @@ int ga10b_grmgr_init_gr_manager(struct gk20a *g)
continue; continue;
} }
gr_syspipe = &g->mig.gpu_instance[index].gr_syspipe; gr_syspipe = &g->mig.gpu_instance[index].gr_syspipe;
g->mig.gr_syspipe_en_mask |= BIT(gr_syspipe->gr_syspipe_id); g->mig.gr_syspipe_en_mask |= BIT32(gr_syspipe->gr_syspipe_id);
gr_dev = nvgpu_device_get(g, NVGPU_DEVTYPE_GRAPHICS, gr_dev = nvgpu_device_get(g, NVGPU_DEVTYPE_GRAPHICS,
gr_syspipe->gr_syspipe_id); gr_syspipe->gr_syspipe_id);
@@ -851,11 +851,13 @@ int ga10b_grmgr_init_gr_manager(struct gk20a *g)
u32 ga10b_grmgr_get_max_sys_pipes(struct gk20a *g) u32 ga10b_grmgr_get_max_sys_pipes(struct gk20a *g)
{ {
(void)g;
return smcarb_max_partitionable_sys_pipes_v(); return smcarb_max_partitionable_sys_pipes_v();
} }
u32 ga10b_grmgr_get_allowed_swizzid_size(struct gk20a *g) u32 ga10b_grmgr_get_allowed_swizzid_size(struct gk20a *g)
{ {
(void)g;
return smcarb_allowed_swizzid__size1_v(); return smcarb_allowed_swizzid__size1_v();
} }

View File

@@ -203,7 +203,7 @@ static void ga10b_gsp_clr_intr(struct gk20a *g, u32 intr)
gk20a_writel(g, pgsp_falcon_irqsclr_r(), intr); gk20a_writel(g, pgsp_falcon_irqsclr_r(), intr);
} }
void ga10b_gsp_handle_interrupts(struct gk20a *g, u32 intr) static void ga10b_gsp_handle_interrupts(struct gk20a *g, u32 intr)
{ {
int err = 0; int err = 0;
@@ -561,6 +561,7 @@ void ga10b_gsp_msgq_tail(struct gk20a *g, struct nvgpu_gsp *gsp,
} else { } else {
gk20a_writel(g, pgsp_msgq_tail_r(0U), *tail); gk20a_writel(g, pgsp_msgq_tail_r(0U), *tail);
} }
(void)gsp;
} }
void ga10b_gsp_set_msg_intr(struct gk20a *g) void ga10b_gsp_set_msg_intr(struct gk20a *g)

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -21,6 +21,7 @@
*/ */
#include <nvgpu/gk20a.h> #include <nvgpu/gk20a.h>
#include <nvgpu/string.h>
#include "netlist_ga10b.h" #include "netlist_ga10b.h"

View File

@@ -370,7 +370,7 @@ void ga10b_perf_enable_membuf(struct gk20a *g, u32 size, u64 buf_addr)
void ga10b_perf_disable_membuf(struct gk20a *g) void ga10b_perf_disable_membuf(struct gk20a *g)
{ {
int zero_value = 0; u32 zero_value = 0U;
u32 i; u32 i;
nvgpu_assert(perf_pmasys_channel_outbase__size_1_v() == nvgpu_assert(perf_pmasys_channel_outbase__size_1_v() ==
@@ -516,20 +516,24 @@ u32 ga10b_perf_get_pmmfbprouter_per_chiplet_offset(void)
u32 ga10b_get_hwpm_fbp_perfmon_regs_base(struct gk20a *g) u32 ga10b_get_hwpm_fbp_perfmon_regs_base(struct gk20a *g)
{ {
(void)g;
return perf_pmmfbp_base_v(); return perf_pmmfbp_base_v();
} }
u32 ga10b_get_hwpm_gpc_perfmon_regs_base(struct gk20a *g) u32 ga10b_get_hwpm_gpc_perfmon_regs_base(struct gk20a *g)
{ {
(void)g;
return perf_pmmgpc_base_v(); return perf_pmmgpc_base_v();
} }
u32 ga10b_get_hwpm_fbprouter_perfmon_regs_base(struct gk20a *g) u32 ga10b_get_hwpm_fbprouter_perfmon_regs_base(struct gk20a *g)
{ {
(void)g;
return perf_pmmfbprouter_base_v(); return perf_pmmfbprouter_base_v();
} }
u32 ga10b_get_hwpm_gpcrouter_perfmon_regs_base(struct gk20a *g) u32 ga10b_get_hwpm_gpcrouter_perfmon_regs_base(struct gk20a *g)
{ {
(void)g;
return perf_pmmgpcrouter_base_v(); return perf_pmmgpcrouter_base_v();
} }
@@ -666,7 +670,7 @@ int ga10b_perf_update_get_put(struct gk20a *g, u64 bytes_consumed,
if (bytes_consumed != 0U) { if (bytes_consumed != 0U) {
nvgpu_writel(g, perf_pmasys_channel_mem_bump_r(inst_zero), bytes_consumed); nvgpu_writel(g, perf_pmasys_channel_mem_bump_r(inst_zero), (u32)bytes_consumed);
} }
if (update_available_bytes) { if (update_available_bytes) {

View File

@@ -87,8 +87,9 @@ static int ga10b_pmu_ns_falcon_bootstrap(struct gk20a *g, struct nvgpu_pmu *pmu,
struct pmu_ucode_desc_v1 *desc = NULL; struct pmu_ucode_desc_v1 *desc = NULL;
u32 addr_code_lo, addr_data_lo, addr_load_lo; u32 addr_code_lo, addr_data_lo, addr_load_lo;
u32 addr_code_hi, addr_data_hi; u32 addr_code_hi, addr_data_hi;
u32 blocks, i, err; u32 blocks, i;
u32 inst_block_ptr; u32 inst_block_ptr;
int err;
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
@@ -154,7 +155,7 @@ static int ga10b_pmu_ns_falcon_bootstrap(struct gk20a *g, struct nvgpu_pmu *pmu,
addr_load_lo - addr_load_lo -
(right_shift_8bits(desc->bootloader_imem_offset))); (right_shift_8bits(desc->bootloader_imem_offset)));
blocks = right_shift_8bits(((desc->bootloader_size + U8_MAX) & ~U8_MAX)); blocks = right_shift_8bits(((desc->bootloader_size + U8_MAX) & ~(u32)U8_MAX));
for (i = DMA_OFFSET_START; i < blocks; i++) { for (i = DMA_OFFSET_START; i < blocks; i++) {
nvgpu_writel(g, pwr_falcon_dmatrfmoffs_r(), nvgpu_writel(g, pwr_falcon_dmatrfmoffs_r(),
@@ -197,6 +198,8 @@ static int ga10b_pmu_ns_nvriscv_bootstrap(struct gk20a *g, struct nvgpu_pmu *pm
u64 fmc_data_addr = 0; u64 fmc_data_addr = 0;
u64 manifest_addr = 0; u64 manifest_addr = 0;
(void)args_offset;
desc = (struct falcon_next_core_ucode_desc *)(void *) desc = (struct falcon_next_core_ucode_desc *)(void *)
rtos_fw->fw_desc->data; rtos_fw->fw_desc->data;

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -43,7 +43,7 @@ static void nvgpu_sim_esc_readl_ga10b(struct gk20a *g,
sim_escape_read_hdr_size()); sim_escape_read_hdr_size());
*sim_msg_param(g, 0) = index; *sim_msg_param(g, 0) = index;
*sim_msg_param(g, 4) = sizeof(u32); *sim_msg_param(g, 4) = sizeof(u32);
data_offset = round_up( data_offset = (u32)round_up(
nvgpu_safe_add_u64(strlen(path), 1ULL), sizeof(u32)); nvgpu_safe_add_u64(strlen(path), 1ULL), sizeof(u32));
*sim_msg_param(g, 8) = data_offset; *sim_msg_param(g, 8) = data_offset;
strcpy((char *)sim_msg_param(g, sim_escape_read_hdr_size()), path); strcpy((char *)sim_msg_param(g, sim_escape_read_hdr_size()), path);

View File

@@ -32,6 +32,7 @@
#include <nvgpu/pmu/pmuif/pg.h> #include <nvgpu/pmu/pmuif/pg.h>
#include <nvgpu/timers.h> #include <nvgpu/timers.h>
#include <nvgpu/nvgpu_mem.h> #include <nvgpu/nvgpu_mem.h>
#include <nvgpu/atomic.h>
#include <include/nvgpu/pmu.h> #include <include/nvgpu/pmu.h>
struct nvgpu_pmu; struct nvgpu_pmu;

View File

@@ -1,7 +1,7 @@
/* /*
* Tegra GPU Virtualization Interfaces to Server * Tegra GPU Virtualization Interfaces to Server
* *
* Copyright (c) 2014-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2014-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -452,10 +452,10 @@ struct tegra_vgpu_engines_info {
} info[TEGRA_VGPU_MAX_ENGINES]; } info[TEGRA_VGPU_MAX_ENGINES];
}; };
#define TEGRA_VGPU_MAX_GPC_COUNT 2 #define TEGRA_VGPU_MAX_GPC_COUNT 2U
#define TEGRA_VGPU_MAX_TPC_COUNT_PER_GPC 4 #define TEGRA_VGPU_MAX_TPC_COUNT_PER_GPC 4U
#define TEGRA_VGPU_MAX_PES_COUNT_PER_GPC 3 #define TEGRA_VGPU_MAX_PES_COUNT_PER_GPC 3U
#define TEGRA_VGPU_L2_EN_MASK 32 #define TEGRA_VGPU_L2_EN_MASK 32U
struct tegra_vgpu_constants_params { struct tegra_vgpu_constants_params {
u32 arch; u32 arch;