mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 10:34:43 +03:00
gpu: nvgpu: fix compile error of new compile flags
Preparing to push hvrtos gpu server changes which requires bellow CFLAGS:
-Werror -Wall -Wextra \
-Wmissing-braces -Wpointer-arith -Wundef \
-Wconversion -Wsign-conversion \
-Wformat-security \
-Wmissing-declarations -Wredundant-decls -Wimplicit-fallthrough
Jira GVSCI-11640
Signed-off-by: Richard Zhao <rizhao@nvidia.com>
Change-Id: I25167f17f231ed741f19af87ca0aa72991563a0f
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2653746
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svcacv <svcacv@nvidia.com>
Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com>
Reviewed-by: Alex Waterman <alexw@nvidia.com>
GVS: Gerrit_Virtual_Submit
This commit is contained in:
committed by
mobile promotions
parent
69ec2dcff7
commit
1ce899ce46
@@ -390,7 +390,7 @@ int ga10b_fb_set_remote_swizid(struct gk20a *g, bool enable)
|
||||
while (pbdma_id_mask != 0U) {
|
||||
u32 fault_id;
|
||||
u32 pbdma_id = nvgpu_safe_sub_u32(
|
||||
nvgpu_ffs(pbdma_id_mask), 1UL);
|
||||
(u32)nvgpu_ffs(pbdma_id_mask), 1UL);
|
||||
|
||||
fault_id =
|
||||
g->ops.pbdma.get_mmu_fault_id(g, pbdma_id);
|
||||
|
||||
@@ -72,7 +72,7 @@ int ga10b_fb_vab_init(struct gk20a *g)
|
||||
* Each packet contains 32B access bits and 32B meta data.
|
||||
* Thus, total entry size is twice of the VAB access bits.
|
||||
*/
|
||||
vab_entry_size = nvgpu_safe_mult_u32(vab_size_bytes, 2UL);
|
||||
vab_entry_size = nvgpu_safe_mult_u32((u32)vab_size_bytes, 2UL);
|
||||
nvgpu_log(g, gpu_dbg_vab, "vab_entry_size 0x%lx", vab_entry_size);
|
||||
|
||||
vab->entry_size = vab_entry_size;
|
||||
@@ -81,7 +81,7 @@ int ga10b_fb_vab_init(struct gk20a *g)
|
||||
if (!nvgpu_mem_is_valid(vab_buf)) {
|
||||
/* Allocate memory for single VAB entry */
|
||||
err = nvgpu_dma_alloc_map_sys(vm, nvgpu_safe_mult_u32(
|
||||
vab->entry_size, vab->num_entries), vab_buf);
|
||||
(u32)vab->entry_size, vab->num_entries), vab_buf);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "Error in vab buffer alloc in bar2 vm ");
|
||||
return -ENOMEM;
|
||||
@@ -220,7 +220,7 @@ static int ga10b_fb_vab_config_address_range(struct gk20a *g,
|
||||
U32(vab_range_checker[i].start_phys_addr >> 32U));
|
||||
|
||||
nvgpu_writel(g, fb_mmu_vidmem_access_bit_start_addr_lo_r(i),
|
||||
(vab_range_checker[i].start_phys_addr &
|
||||
((u32)vab_range_checker[i].start_phys_addr &
|
||||
fb_mmu_vidmem_access_bit_start_addr_lo_val_m()) |
|
||||
fb_mmu_vidmem_access_bit_start_addr_lo_granularity_f(
|
||||
granularity_shift_bits));
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -27,6 +27,7 @@
|
||||
#include <nvgpu/barrier.h>
|
||||
#include <nvgpu/bug.h>
|
||||
#include <nvgpu/gk20a.h>
|
||||
#include <nvgpu/string.h>
|
||||
|
||||
#include "hal/fifo/pbdma_gm20b.h"
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -27,6 +27,7 @@
|
||||
#include <nvgpu/engines.h>
|
||||
#include <nvgpu/device.h>
|
||||
#include <nvgpu/fifo.h>
|
||||
#include <nvgpu/string.h>
|
||||
|
||||
#include "engine_status_ga10b.h"
|
||||
#include <nvgpu/hw/ga10b/hw_runlist_ga10b.h>
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -23,6 +23,7 @@
|
||||
#include <nvgpu/io.h>
|
||||
#include <nvgpu/gk20a.h>
|
||||
#include <nvgpu/pbdma_status.h>
|
||||
#include <nvgpu/string.h>
|
||||
|
||||
#include "pbdma_status_ga10b.h"
|
||||
#include <nvgpu/hw/ga10b/hw_pbdma_ga10b.h>
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -78,7 +78,7 @@ int ga10b_fifo_reschedule_preempt_next(struct nvgpu_channel *ch,
|
||||
fecsstat0 = g->ops.gr.falcon.read_fecs_ctxsw_mailbox(g,
|
||||
NVGPU_GR_FALCON_FECS_CTXSW_MAILBOX0);
|
||||
g->ops.engine_status.read_engine_status_info(g, nvgpu_safe_sub_u32(
|
||||
nvgpu_ffs(runlist->eng_bitmask & eng_bitmask), 1U),
|
||||
(u32)nvgpu_ffs(runlist->eng_bitmask & eng_bitmask), 1U),
|
||||
&engine_status);
|
||||
if (nvgpu_engine_status_is_ctxsw_switch(&engine_status)) {
|
||||
nvgpu_engine_status_get_next_ctx_id_type(&engine_status,
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -27,6 +27,8 @@
|
||||
#include <nvgpu/hw/ga100/hw_ctxsw_prog_ga100.h>
|
||||
#include <nvgpu/hw/ga100/hw_xbar_ga100.h>
|
||||
|
||||
#include "ctxsw_prog_ga100.h"
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
void ga100_ctxsw_prog_dump_ctxsw_stats(struct gk20a *g,
|
||||
struct nvgpu_mem *ctx_mem)
|
||||
@@ -86,6 +88,7 @@ void ga100_ctxsw_prog_dump_ctxsw_stats(struct gk20a *g,
|
||||
#ifdef CONFIG_NVGPU_DEBUGGER
|
||||
u32 ga100_ctxsw_prog_hw_get_pm_gpc_gnic_stride(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return (xbar_mxbar_pri_gpc1_gnic0_preg_pm_ctrl_r() -
|
||||
xbar_mxbar_pri_gpc0_gnic0_preg_pm_ctrl_r());
|
||||
}
|
||||
|
||||
@@ -47,6 +47,7 @@ u32 ga10b_ctxsw_prog_hw_get_gpccs_header_stride(void)
|
||||
|
||||
u32 ga10b_ctxsw_prog_get_tpc_segment_pri_layout(struct gk20a *g, u32 *main_hdr)
|
||||
{
|
||||
(void)g;
|
||||
return ctxsw_prog_main_tpc_segment_pri_layout_v_v(
|
||||
main_hdr[ctxsw_prog_main_tpc_segment_pri_layout_o() >>
|
||||
BYTE_TO_DW_SHIFT]);
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -170,6 +170,7 @@ u32 gm20b_ctxsw_prog_get_local_priv_register_ctl_offset(u32 *context)
|
||||
|
||||
u32 gm20b_ctxsw_prog_hw_get_pm_gpc_gnic_stride(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return NV_XBAR_MXBAR_PRI_GPC_GNIC_STRIDE;
|
||||
}
|
||||
#endif /* CONFIG_NVGPU_DEBUGGER */
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -56,6 +56,8 @@ struct nvgpu_hw_err_inject_info_desc mmu_err_desc;
|
||||
struct nvgpu_hw_err_inject_info_desc *
|
||||
ga10b_gr_ecc_get_mmu_err_desc(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
|
||||
mmu_err_desc.info_ptr = mmu_ecc_err_desc;
|
||||
mmu_err_desc.info_size = nvgpu_safe_cast_u64_to_u32(
|
||||
sizeof(mmu_ecc_err_desc) /
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -23,6 +23,7 @@
|
||||
#include <nvgpu/io.h>
|
||||
#include <nvgpu/gk20a.h>
|
||||
#include <nvgpu/fuse.h>
|
||||
#include <nvgpu/string.h>
|
||||
#include <nvgpu/gr/gr_ecc.h>
|
||||
#include <hal/gr/ecc/ecc_gv11b.h>
|
||||
|
||||
|
||||
@@ -619,6 +619,8 @@ int gr_ga10b_process_context_buffer_priv_segment(struct gk20a *g,
|
||||
u32 tpc_segment_pri_layout;
|
||||
bool is_tpc_layout_interleaved = false;
|
||||
|
||||
(void)ppc_mask;
|
||||
|
||||
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "pri_addr=0x%x", pri_addr);
|
||||
|
||||
if (!g->netlist_valid) {
|
||||
@@ -1135,7 +1137,7 @@ void ga10b_gr_vab_reserve(struct gk20a *g, u32 vab_reg, u32 num_range_checkers,
|
||||
U32(vab_range_checker[i].start_phys_addr >> 32U));
|
||||
|
||||
nvgpu_writel(g, gr_gpcs_mmu_vidmem_access_bit_start_addr_lo_r(i),
|
||||
(vab_range_checker[i].start_phys_addr &
|
||||
(u32)(vab_range_checker[i].start_phys_addr &
|
||||
gr_gpcs_mmu_vidmem_access_bit_start_addr_lo_val_m()) |
|
||||
gr_gpcs_mmu_vidmem_access_bit_start_addr_lo_granularity_f(
|
||||
granularity_shift_bits));
|
||||
|
||||
@@ -120,6 +120,7 @@ void ga10b_gr_init_get_access_map(struct gk20a *g,
|
||||
};
|
||||
size_t array_size;
|
||||
|
||||
(void)g;
|
||||
*whitelist = wl_addr_ga10b;
|
||||
array_size = ARRAY_SIZE(wl_addr_ga10b);
|
||||
*num_entries = nvgpu_safe_cast_u64_to_u32(array_size);
|
||||
|
||||
@@ -222,16 +222,19 @@ void tu104_gr_init_commit_gfxp_rtv_cb(struct gk20a *g,
|
||||
|
||||
u32 tu104_gr_init_get_attrib_cb_gfxp_default_size(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return gr_gpc0_ppc0_cbm_beta_cb_size_v_gfxp_v();
|
||||
}
|
||||
|
||||
u32 tu104_gr_init_get_attrib_cb_gfxp_size(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return gr_gpc0_ppc0_cbm_beta_cb_size_v_gfxp_v();
|
||||
}
|
||||
|
||||
u32 tu104_gr_init_get_ctx_spill_size(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return nvgpu_safe_mult_u32(
|
||||
gr_gpc0_swdx_rm_spill_buffer_size_256b_default_v(),
|
||||
gr_gpc0_swdx_rm_spill_buffer_size_256b_byte_granularity_v());
|
||||
@@ -248,6 +251,7 @@ u32 tu104_gr_init_get_ctx_betacb_size(struct gk20a *g)
|
||||
|
||||
u32 tu104_gr_init_get_gfxp_rtv_cb_size(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return nvgpu_safe_mult_u32(
|
||||
nvgpu_safe_add_u32(
|
||||
nvgpu_safe_add_u32(
|
||||
|
||||
@@ -26,6 +26,7 @@
|
||||
#include <nvgpu/engines.h>
|
||||
#include <nvgpu/nvgpu_err.h>
|
||||
#include <nvgpu/errata.h>
|
||||
#include <nvgpu/string.h>
|
||||
|
||||
#include <nvgpu/gr/config.h>
|
||||
#include <nvgpu/gr/gr.h>
|
||||
|
||||
@@ -190,7 +190,7 @@ static u32 ga10b_grmgr_get_local_gr_syspipe_index(struct gk20a *g,
|
||||
|
||||
while (gr_syspipe_mask != 0U) {
|
||||
u32 bit_position = nvgpu_safe_sub_u32(
|
||||
nvgpu_ffs(gr_syspipe_mask), 1UL);
|
||||
(u32)nvgpu_ffs(gr_syspipe_mask), 1UL);
|
||||
++local_gr_syspipe_index;
|
||||
gr_syspipe_mask ^= BIT32(bit_position);
|
||||
}
|
||||
@@ -218,7 +218,7 @@ static u32 ga10b_grmgr_get_gr_syspipe_id_from_local_gr_syspipe_index(
|
||||
|
||||
while (temp_gr_syspipe_index < max_allowed_syspipe_index) {
|
||||
gr_syspipe_id = nvgpu_safe_sub_u32(
|
||||
nvgpu_ffs(usable_gr_syspipe_mask), 1UL);
|
||||
(u32)nvgpu_ffs(usable_gr_syspipe_mask), 1UL);
|
||||
++temp_gr_syspipe_index;
|
||||
usable_gr_syspipe_mask ^= BIT32(gr_syspipe_id);
|
||||
}
|
||||
@@ -245,7 +245,7 @@ static u32 ga10b_grmgr_get_num_gr_syspipe_enabled(struct gk20a *g,
|
||||
|
||||
while (gr_syspipe_enabled_mask != 0U) {
|
||||
u32 bit_pos = nvgpu_safe_sub_u32(
|
||||
nvgpu_ffs(gr_syspipe_enabled_mask), 1UL);
|
||||
(u32)nvgpu_ffs(gr_syspipe_enabled_mask), 1UL);
|
||||
gr_syspipe_enabled_mask ^= BIT32(bit_pos);
|
||||
++gr_syspipe_enabled_count;
|
||||
}
|
||||
@@ -399,7 +399,7 @@ static int ga10b_grmgr_get_gpu_instance(struct gk20a *g,
|
||||
(gpu_instance_static_config[index].num_gpc))) {
|
||||
|
||||
logical_gpc_id = nvgpu_safe_sub_u32(
|
||||
nvgpu_ffs(temp_gpc_mask), 1UL);
|
||||
(u32)nvgpu_ffs(temp_gpc_mask), 1UL);
|
||||
|
||||
if ((gpcs[logical_gpc_id].gpcgrp_id ==
|
||||
gpu_instance_gpcgrp_id[index]) ||
|
||||
@@ -499,7 +499,7 @@ static int ga10b_grmgr_get_gpu_instance(struct gk20a *g,
|
||||
nvgpu_safe_add_u32(local_gr_syspipe_index,
|
||||
temp_lce_cnt));
|
||||
physical_ce_id = nvgpu_safe_sub_u32(
|
||||
nvgpu_ffs(temp_lce_mask), 1UL);
|
||||
(u32)nvgpu_ffs(temp_lce_mask), 1UL);
|
||||
if (ga10b_grmgr_is_syspipe_lce(g,
|
||||
nvgpu_device_get(g, NVGPU_DEVTYPE_GRAPHICS,
|
||||
gr_syspipe_id),
|
||||
@@ -536,7 +536,7 @@ static int ga10b_grmgr_get_gpu_instance(struct gk20a *g,
|
||||
struct nvgpu_gr_syspipe *local_gr_syspipe =
|
||||
&gpu_instance[gpu_instance_id].gr_syspipe;
|
||||
physical_ce_id = nvgpu_safe_sub_u32(
|
||||
nvgpu_ffs(lce_mask), 1UL);
|
||||
(u32)nvgpu_ffs(lce_mask), 1UL);
|
||||
temp_lce_cnt = gpu_instance[gpu_instance_id].num_lce;
|
||||
gpu_instance[gpu_instance_id].lce_devs[temp_lce_cnt] =
|
||||
lces[physical_ce_id];
|
||||
@@ -798,7 +798,7 @@ int ga10b_grmgr_init_gr_manager(struct gk20a *g)
|
||||
continue;
|
||||
}
|
||||
gr_syspipe = &g->mig.gpu_instance[index].gr_syspipe;
|
||||
g->mig.gr_syspipe_en_mask |= BIT(gr_syspipe->gr_syspipe_id);
|
||||
g->mig.gr_syspipe_en_mask |= BIT32(gr_syspipe->gr_syspipe_id);
|
||||
|
||||
gr_dev = nvgpu_device_get(g, NVGPU_DEVTYPE_GRAPHICS,
|
||||
gr_syspipe->gr_syspipe_id);
|
||||
@@ -851,11 +851,13 @@ int ga10b_grmgr_init_gr_manager(struct gk20a *g)
|
||||
|
||||
u32 ga10b_grmgr_get_max_sys_pipes(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return smcarb_max_partitionable_sys_pipes_v();
|
||||
}
|
||||
|
||||
u32 ga10b_grmgr_get_allowed_swizzid_size(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return smcarb_allowed_swizzid__size1_v();
|
||||
}
|
||||
|
||||
|
||||
@@ -203,7 +203,7 @@ static void ga10b_gsp_clr_intr(struct gk20a *g, u32 intr)
|
||||
gk20a_writel(g, pgsp_falcon_irqsclr_r(), intr);
|
||||
}
|
||||
|
||||
void ga10b_gsp_handle_interrupts(struct gk20a *g, u32 intr)
|
||||
static void ga10b_gsp_handle_interrupts(struct gk20a *g, u32 intr)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
@@ -561,6 +561,7 @@ void ga10b_gsp_msgq_tail(struct gk20a *g, struct nvgpu_gsp *gsp,
|
||||
} else {
|
||||
gk20a_writel(g, pgsp_msgq_tail_r(0U), *tail);
|
||||
}
|
||||
(void)gsp;
|
||||
}
|
||||
|
||||
void ga10b_gsp_set_msg_intr(struct gk20a *g)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -21,6 +21,7 @@
|
||||
*/
|
||||
|
||||
#include <nvgpu/gk20a.h>
|
||||
#include <nvgpu/string.h>
|
||||
|
||||
#include "netlist_ga10b.h"
|
||||
|
||||
|
||||
@@ -370,7 +370,7 @@ void ga10b_perf_enable_membuf(struct gk20a *g, u32 size, u64 buf_addr)
|
||||
|
||||
void ga10b_perf_disable_membuf(struct gk20a *g)
|
||||
{
|
||||
int zero_value = 0;
|
||||
u32 zero_value = 0U;
|
||||
u32 i;
|
||||
|
||||
nvgpu_assert(perf_pmasys_channel_outbase__size_1_v() ==
|
||||
@@ -516,20 +516,24 @@ u32 ga10b_perf_get_pmmfbprouter_per_chiplet_offset(void)
|
||||
|
||||
u32 ga10b_get_hwpm_fbp_perfmon_regs_base(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return perf_pmmfbp_base_v();
|
||||
}
|
||||
u32 ga10b_get_hwpm_gpc_perfmon_regs_base(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return perf_pmmgpc_base_v();
|
||||
}
|
||||
|
||||
u32 ga10b_get_hwpm_fbprouter_perfmon_regs_base(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return perf_pmmfbprouter_base_v();
|
||||
}
|
||||
|
||||
u32 ga10b_get_hwpm_gpcrouter_perfmon_regs_base(struct gk20a *g)
|
||||
{
|
||||
(void)g;
|
||||
return perf_pmmgpcrouter_base_v();
|
||||
}
|
||||
|
||||
@@ -666,7 +670,7 @@ int ga10b_perf_update_get_put(struct gk20a *g, u64 bytes_consumed,
|
||||
|
||||
|
||||
if (bytes_consumed != 0U) {
|
||||
nvgpu_writel(g, perf_pmasys_channel_mem_bump_r(inst_zero), bytes_consumed);
|
||||
nvgpu_writel(g, perf_pmasys_channel_mem_bump_r(inst_zero), (u32)bytes_consumed);
|
||||
}
|
||||
|
||||
if (update_available_bytes) {
|
||||
|
||||
@@ -87,8 +87,9 @@ static int ga10b_pmu_ns_falcon_bootstrap(struct gk20a *g, struct nvgpu_pmu *pmu,
|
||||
struct pmu_ucode_desc_v1 *desc = NULL;
|
||||
u32 addr_code_lo, addr_data_lo, addr_load_lo;
|
||||
u32 addr_code_hi, addr_data_hi;
|
||||
u32 blocks, i, err;
|
||||
u32 blocks, i;
|
||||
u32 inst_block_ptr;
|
||||
int err;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
@@ -154,7 +155,7 @@ static int ga10b_pmu_ns_falcon_bootstrap(struct gk20a *g, struct nvgpu_pmu *pmu,
|
||||
addr_load_lo -
|
||||
(right_shift_8bits(desc->bootloader_imem_offset)));
|
||||
|
||||
blocks = right_shift_8bits(((desc->bootloader_size + U8_MAX) & ~U8_MAX));
|
||||
blocks = right_shift_8bits(((desc->bootloader_size + U8_MAX) & ~(u32)U8_MAX));
|
||||
|
||||
for (i = DMA_OFFSET_START; i < blocks; i++) {
|
||||
nvgpu_writel(g, pwr_falcon_dmatrfmoffs_r(),
|
||||
@@ -197,6 +198,8 @@ static int ga10b_pmu_ns_nvriscv_bootstrap(struct gk20a *g, struct nvgpu_pmu *pm
|
||||
u64 fmc_data_addr = 0;
|
||||
u64 manifest_addr = 0;
|
||||
|
||||
(void)args_offset;
|
||||
|
||||
desc = (struct falcon_next_core_ucode_desc *)(void *)
|
||||
rtos_fw->fw_desc->data;
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -43,7 +43,7 @@ static void nvgpu_sim_esc_readl_ga10b(struct gk20a *g,
|
||||
sim_escape_read_hdr_size());
|
||||
*sim_msg_param(g, 0) = index;
|
||||
*sim_msg_param(g, 4) = sizeof(u32);
|
||||
data_offset = round_up(
|
||||
data_offset = (u32)round_up(
|
||||
nvgpu_safe_add_u64(strlen(path), 1ULL), sizeof(u32));
|
||||
*sim_msg_param(g, 8) = data_offset;
|
||||
strcpy((char *)sim_msg_param(g, sim_escape_read_hdr_size()), path);
|
||||
|
||||
Reference in New Issue
Block a user