gpu: nvgpu: fix CERT-C errors in gv100 kernel hw headers

Register generator tool is added to fix CERT-C errors
associated with u32 arithmetic operations. Generated
hw headers for gv100 with updated register generator.

JIRA NVGPU-3520

Change-Id: Id01e5ab6a3d79f8ecb6105ea8802d65f6de4db24
Signed-off-by: Seshendra Gadagottu <sgadagottu@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2124637
GVS: Gerrit_Virtual_Submit
Reviewed-by: Vinod Gopalakrishnakurup <vinodg@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Seshendra Gadagottu
2019-05-23 17:05:07 -07:00
committed by mobile promotions
parent 14483ae421
commit b0d01715e2
37 changed files with 229 additions and 184 deletions

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -57,10 +57,11 @@
#define NVGPU_HW_BUS_GV100_H
#include <nvgpu/types.h>
#include <nvgpu/safe_ops.h>
static inline u32 bus_sw_scratch_r(u32 i)
{
return 0x00001580U + i*4U;
return nvgpu_safe_add_u32(0x00001580U, nvgpu_safe_mult_u32(i, 4U));
}
static inline u32 bus_bar0_window_r(void)
{

View File

@@ -57,10 +57,11 @@
#define NVGPU_HW_CCSR_GV100_H
#include <nvgpu/types.h>
#include <nvgpu/safe_ops.h>
static inline u32 ccsr_channel_inst_r(u32 i)
{
return 0x00800000U + i*8U;
return nvgpu_safe_add_u32(0x00800000U, nvgpu_safe_mult_u32(i, 8U));
}
static inline u32 ccsr_channel_inst__size_1_v(void)
{
@@ -92,7 +93,7 @@ static inline u32 ccsr_channel_inst_bind_true_f(void)
}
static inline u32 ccsr_channel_r(u32 i)
{
return 0x00800004U + i*8U;
return nvgpu_safe_add_u32(0x00800004U, nvgpu_safe_mult_u32(i, 8U));
}
static inline u32 ccsr_channel__size_1_v(void)
{

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -57,10 +57,11 @@
#define NVGPU_HW_CE_GV100_H
#include <nvgpu/types.h>
#include <nvgpu/safe_ops.h>
static inline u32 ce_intr_status_r(u32 i)
{
return 0x00104410U + i*128U;
return nvgpu_safe_add_u32(0x00104410U, nvgpu_safe_mult_u32(i, 128U));
}
static inline u32 ce_intr_status_blockpipe_pending_f(void)
{

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -57,6 +57,7 @@
#define NVGPU_HW_CTXSW_PROG_GV100_H
#include <nvgpu/types.h>
#include <nvgpu/safe_ops.h>
static inline u32 ctxsw_prog_fecs_header_v(void)
{

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -57,6 +57,7 @@
#define NVGPU_HW_FALCON_GV100_H
#include <nvgpu/types.h>
#include <nvgpu/safe_ops.h>
static inline u32 falcon_falcon_irqsset_r(void)
{
@@ -344,7 +345,7 @@ static inline u32 falcon_falcon_cpuctl_alias_startcpu_f(u32 v)
}
static inline u32 falcon_falcon_imemc_r(u32 i)
{
return 0x00000180U + i*16U;
return nvgpu_safe_add_u32(0x00000180U, nvgpu_safe_mult_u32(i, 16U));
}
static inline u32 falcon_falcon_imemc_offs_f(u32 v)
{
@@ -364,11 +365,11 @@ static inline u32 falcon_falcon_imemc_secure_f(u32 v)
}
static inline u32 falcon_falcon_imemd_r(u32 i)
{
return 0x00000184U + i*16U;
return nvgpu_safe_add_u32(0x00000184U, nvgpu_safe_mult_u32(i, 16U));
}
static inline u32 falcon_falcon_imemt_r(u32 i)
{
return 0x00000188U + i*16U;
return nvgpu_safe_add_u32(0x00000188U, nvgpu_safe_mult_u32(i, 16U));
}
static inline u32 falcon_falcon_sctl_r(void)
{
@@ -544,7 +545,7 @@ static inline u32 falcon_falcon_icd_rdata_r(void)
}
static inline u32 falcon_falcon_dmemc_r(u32 i)
{
return 0x000001c0U + i*8U;
return nvgpu_safe_add_u32(0x000001c0U, nvgpu_safe_mult_u32(i, 8U));
}
static inline u32 falcon_falcon_dmemc_offs_f(u32 v)
{
@@ -572,7 +573,7 @@ static inline u32 falcon_falcon_dmemc_aincr_f(u32 v)
}
static inline u32 falcon_falcon_dmemd_r(u32 i)
{
return 0x000001c4U + i*8U;
return nvgpu_safe_add_u32(0x000001c4U, nvgpu_safe_mult_u32(i, 8U));
}
static inline u32 falcon_falcon_debug1_r(void)
{

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -57,6 +57,7 @@
#define NVGPU_HW_FB_GV100_H
#include <nvgpu/types.h>
#include <nvgpu/safe_ops.h>
static inline u32 fb_fbhub_num_active_ltcs_r(void)
{
@@ -772,7 +773,7 @@ static inline u32 fb_niso_intr_mmu_other_fault_notify_pending_f(void)
}
static inline u32 fb_niso_intr_en_r(u32 i)
{
return 0x00100a24U + i*4U;
return nvgpu_safe_add_u32(0x00100a24U, nvgpu_safe_mult_u32(i, 4U));
}
static inline u32 fb_niso_intr_en__size_1_v(void)
{
@@ -836,7 +837,7 @@ static inline u32 fb_niso_intr_en_mmu_other_fault_notify_enabled_f(void)
}
static inline u32 fb_niso_intr_en_set_r(u32 i)
{
return 0x00100a2cU + i*4U;
return nvgpu_safe_add_u32(0x00100a2cU, nvgpu_safe_mult_u32(i, 4U));
}
static inline u32 fb_niso_intr_en_set__size_1_v(void)
{
@@ -900,7 +901,7 @@ static inline u32 fb_niso_intr_en_set_mmu_other_fault_notify_set_f(void)
}
static inline u32 fb_niso_intr_en_clr_r(u32 i)
{
return 0x00100a34U + i*4U;
return nvgpu_safe_add_u32(0x00100a34U, nvgpu_safe_mult_u32(i, 4U));
}
static inline u32 fb_niso_intr_en_clr__size_1_v(void)
{
@@ -972,7 +973,7 @@ static inline u32 fb_niso_intr_en_clr_mmu_replay_fault_buffer_v(void)
}
static inline u32 fb_mmu_fault_buffer_lo_r(u32 i)
{
return 0x00100e24U + i*20U;
return nvgpu_safe_add_u32(0x00100e24U, nvgpu_safe_mult_u32(i, 20U));
}
static inline u32 fb_mmu_fault_buffer_lo__size_1_v(void)
{
@@ -1044,7 +1045,7 @@ static inline u32 fb_mmu_fault_buffer_lo_addr_v(u32 r)
}
static inline u32 fb_mmu_fault_buffer_hi_r(u32 i)
{
return 0x00100e28U + i*20U;
return nvgpu_safe_add_u32(0x00100e28U, nvgpu_safe_mult_u32(i, 20U));
}
static inline u32 fb_mmu_fault_buffer_hi__size_1_v(void)
{
@@ -1060,7 +1061,7 @@ static inline u32 fb_mmu_fault_buffer_hi_addr_v(u32 r)
}
static inline u32 fb_mmu_fault_buffer_get_r(u32 i)
{
return 0x00100e2cU + i*20U;
return nvgpu_safe_add_u32(0x00100e2cU, nvgpu_safe_mult_u32(i, 20U));
}
static inline u32 fb_mmu_fault_buffer_get__size_1_v(void)
{
@@ -1112,7 +1113,7 @@ static inline u32 fb_mmu_fault_buffer_get_overflow_clear_f(void)
}
static inline u32 fb_mmu_fault_buffer_put_r(u32 i)
{
return 0x00100e30U + i*20U;
return nvgpu_safe_add_u32(0x00100e30U, nvgpu_safe_mult_u32(i, 20U));
}
static inline u32 fb_mmu_fault_buffer_put__size_1_v(void)
{
@@ -1168,7 +1169,7 @@ static inline u32 fb_mmu_fault_buffer_put_overflow_yes_f(void)
}
static inline u32 fb_mmu_fault_buffer_size_r(u32 i)
{
return 0x00100e34U + i*20U;
return nvgpu_safe_add_u32(0x00100e34U, nvgpu_safe_mult_u32(i, 20U));
}
static inline u32 fb_mmu_fault_buffer_size__size_1_v(void)
{

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -57,6 +57,7 @@
#define NVGPU_HW_FIFO_GV100_H
#include <nvgpu/types.h>
#include <nvgpu/safe_ops.h>
static inline u32 fifo_userd_writeback_r(void)
{
@@ -116,7 +117,7 @@ static inline u32 fifo_runlist_engine_f(u32 v)
}
static inline u32 fifo_eng_runlist_base_r(u32 i)
{
return 0x00002280U + i*8U;
return nvgpu_safe_add_u32(0x00002280U, nvgpu_safe_mult_u32(i, 8U));
}
static inline u32 fifo_eng_runlist_base__size_1_v(void)
{
@@ -124,7 +125,7 @@ static inline u32 fifo_eng_runlist_base__size_1_v(void)
}
static inline u32 fifo_eng_runlist_r(u32 i)
{
return 0x00002284U + i*8U;
return nvgpu_safe_add_u32(0x00002284U, nvgpu_safe_mult_u32(i, 8U));
}
static inline u32 fifo_eng_runlist__size_1_v(void)
{
@@ -144,7 +145,7 @@ static inline u32 fifo_eng_runlist_pending_true_f(void)
}
static inline u32 fifo_pb_timeslice_r(u32 i)
{
return 0x00002350U + i*4U;
return nvgpu_safe_add_u32(0x00002350U, nvgpu_safe_mult_u32(i, 4U));
}
static inline u32 fifo_pb_timeslice_timeout_16_f(void)
{
@@ -160,7 +161,7 @@ static inline u32 fifo_pb_timeslice_enable_true_f(void)
}
static inline u32 fifo_pbdma_map_r(u32 i)
{
return 0x00002390U + i*4U;
return nvgpu_safe_add_u32(0x00002390U, nvgpu_safe_mult_u32(i, 4U));
}
static inline u32 fifo_intr_0_r(void)
{
@@ -344,7 +345,7 @@ static inline u32 fifo_preempt_id_f(u32 v)
}
static inline u32 fifo_engine_status_r(u32 i)
{
return 0x00002640U + i*8U;
return nvgpu_safe_add_u32(0x00002640U, nvgpu_safe_mult_u32(i, 8U));
}
static inline u32 fifo_engine_status__size_1_v(void)
{
@@ -440,7 +441,7 @@ static inline u32 fifo_engine_status_ctxsw_in_progress_f(void)
}
static inline u32 fifo_pbdma_status_r(u32 i)
{
return 0x00003080U + i*4U;
return nvgpu_safe_add_u32(0x00003080U, nvgpu_safe_mult_u32(i, 4U));
}
static inline u32 fifo_pbdma_status__size_1_v(void)
{

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -57,6 +57,7 @@
#define NVGPU_HW_FLUSH_GV100_H
#include <nvgpu/types.h>
#include <nvgpu/safe_ops.h>
static inline u32 flush_l2_system_invalidate_r(void)
{

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -57,6 +57,7 @@
#define NVGPU_HW_FUSE_GV100_H
#include <nvgpu/types.h>
#include <nvgpu/safe_ops.h>
static inline u32 fuse_status_opt_gpc_r(void)
{
@@ -64,11 +65,11 @@ static inline u32 fuse_status_opt_gpc_r(void)
}
static inline u32 fuse_status_opt_tpc_gpc_r(u32 i)
{
return 0x00021c38U + i*4U;
return nvgpu_safe_add_u32(0x00021c38U, nvgpu_safe_mult_u32(i, 4U));
}
static inline u32 fuse_ctrl_opt_tpc_gpc_r(u32 i)
{
return 0x00021838U + i*4U;
return nvgpu_safe_add_u32(0x00021838U, nvgpu_safe_mult_u32(i, 4U));
}
static inline u32 fuse_ctrl_opt_ram_svop_pdp_r(void)
{
@@ -128,7 +129,7 @@ static inline u32 fuse_status_opt_fbio_data_v(u32 r)
}
static inline u32 fuse_status_opt_rop_l2_fbp_r(u32 i)
{
return 0x00021d70U + i*4U;
return nvgpu_safe_add_u32(0x00021d70U, nvgpu_safe_mult_u32(i, 4U));
}
static inline u32 fuse_status_opt_fbp_r(void)
{

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -57,6 +57,7 @@
#define NVGPU_HW_GMMU_GV100_H
#include <nvgpu/types.h>
#include <nvgpu/safe_ops.h>
static inline u32 gmmu_new_pde_is_pte_w(void)
{

View File

@@ -57,6 +57,7 @@
#define NVGPU_HW_GR_GV100_H
#include <nvgpu/types.h>
#include <nvgpu/safe_ops.h>
static inline u32 gr_intr_r(void)
{
@@ -1008,7 +1009,7 @@ static inline u32 gr_fe_go_idle_timeout_count_prod_f(void)
}
static inline u32 gr_fe_object_table_r(u32 i)
{
return 0x00404200U + i*4U;
return nvgpu_safe_add_u32(0x00404200U, nvgpu_safe_mult_u32(i, 4U));
}
static inline u32 gr_fe_object_table_nvclass_v(u32 r)
{
@@ -1016,7 +1017,7 @@ static inline u32 gr_fe_object_table_nvclass_v(u32 r)
}
static inline u32 gr_fe_tpc_fs_r(u32 i)
{
return 0x0040a200U + i*4U;
return nvgpu_safe_add_u32(0x0040a200U, nvgpu_safe_mult_u32(i, 4U));
}
static inline u32 gr_pri_mme_shadow_raw_index_r(void)
{
@@ -1180,7 +1181,7 @@ static inline u32 gr_fecs_icd_rdata_r(void)
}
static inline u32 gr_fecs_imemc_r(u32 i)
{
return 0x00409180U + i*16U;
return nvgpu_safe_add_u32(0x00409180U, nvgpu_safe_mult_u32(i, 16U));
}
static inline u32 gr_fecs_imemc_offs_f(u32 v)
{
@@ -1196,11 +1197,11 @@ static inline u32 gr_fecs_imemc_aincw_f(u32 v)
}
static inline u32 gr_fecs_imemd_r(u32 i)
{
return 0x00409184U + i*16U;
return nvgpu_safe_add_u32(0x00409184U, nvgpu_safe_mult_u32(i, 16U));
}
static inline u32 gr_fecs_imemt_r(u32 i)
{
return 0x00409188U + i*16U;
return nvgpu_safe_add_u32(0x00409188U, nvgpu_safe_mult_u32(i, 16U));
}
static inline u32 gr_fecs_imemt_tag_f(u32 v)
{
@@ -1208,7 +1209,7 @@ static inline u32 gr_fecs_imemt_tag_f(u32 v)
}
static inline u32 gr_fecs_dmemc_r(u32 i)
{
return 0x004091c0U + i*8U;
return nvgpu_safe_add_u32(0x004091c0U, nvgpu_safe_mult_u32(i, 8U));
}
static inline u32 gr_fecs_dmemc_offs_s(void)
{
@@ -1236,7 +1237,7 @@ static inline u32 gr_fecs_dmemc_aincw_f(u32 v)
}
static inline u32 gr_fecs_dmemd_r(u32 i)
{
return 0x004091c4U + i*8U;
return nvgpu_safe_add_u32(0x004091c4U, nvgpu_safe_mult_u32(i, 8U));
}
static inline u32 gr_fecs_dmatrfbase_r(void)
{
@@ -1564,7 +1565,7 @@ static inline u32 gr_fecs_ctx_state_store_major_rev_id_r(void)
}
static inline u32 gr_fecs_ctxsw_mailbox_r(u32 i)
{
return 0x00409800U + i*4U;
return nvgpu_safe_add_u32(0x00409800U, nvgpu_safe_mult_u32(i, 4U));
}
static inline u32 gr_fecs_ctxsw_mailbox__size_1_v(void)
{
@@ -1584,7 +1585,7 @@ static inline u32 gr_fecs_ctxsw_mailbox_value_fail_v(void)
}
static inline u32 gr_fecs_ctxsw_mailbox_set_r(u32 i)
{
return 0x004098c0U + i*4U;
return nvgpu_safe_add_u32(0x004098c0U, nvgpu_safe_mult_u32(i, 4U));
}
static inline u32 gr_fecs_ctxsw_mailbox_set_value_f(u32 v)
{
@@ -1592,7 +1593,7 @@ static inline u32 gr_fecs_ctxsw_mailbox_set_value_f(u32 v)
}
static inline u32 gr_fecs_ctxsw_mailbox_clear_r(u32 i)
{
return 0x00409840U + i*4U;
return nvgpu_safe_add_u32(0x00409840U, nvgpu_safe_mult_u32(i, 4U));
}
static inline u32 gr_fecs_ctxsw_mailbox_clear_value_f(u32 v)
{
@@ -1836,7 +1837,7 @@ static inline u32 gr_gpc0_gpccs_ctxsw_idlestate_r(void)
}
static inline u32 gr_rstr2d_gpc_map_r(u32 i)
{
return 0x0040780cU + i*4U;
return nvgpu_safe_add_u32(0x0040780cU, nvgpu_safe_mult_u32(i, 4U));
}
static inline u32 gr_rstr2d_map_table_cfg_r(void)
{
@@ -1864,7 +1865,7 @@ static inline u32 gr_pd_hww_esr_en_enable_f(void)
}
static inline u32 gr_pd_num_tpc_per_gpc_r(u32 i)
{
return 0x00406028U + i*4U;
return nvgpu_safe_add_u32(0x00406028U, nvgpu_safe_mult_u32(i, 4U));
}
static inline u32 gr_pd_num_tpc_per_gpc__size_1_v(void)
{
@@ -1956,7 +1957,7 @@ static inline u32 gr_pd_ab_dist_cfg2_state_limit_min_gpm_fifo_depths_v(void)
}
static inline u32 gr_pd_dist_skip_table_r(u32 i)
{
return 0x004064d0U + i*4U;
return nvgpu_safe_add_u32(0x004064d0U, nvgpu_safe_mult_u32(i, 4U));
}
static inline u32 gr_pd_dist_skip_table__size_1_v(void)
{
@@ -2312,7 +2313,7 @@ static inline u32 gr_ds_hww_report_mask_2_sph24_err_report_f(void)
}
static inline u32 gr_ds_num_tpc_per_gpc_r(u32 i)
{
return 0x00405870U + i*4U;
return nvgpu_safe_add_u32(0x00405870U, nvgpu_safe_mult_u32(i, 4U));
}
static inline u32 gr_scc_bundle_cb_base_r(void)
{
@@ -2468,7 +2469,7 @@ static inline u32 gr_cwd_fs_num_tpcs_f(u32 v)
}
static inline u32 gr_cwd_gpc_tpc_id_r(u32 i)
{
return 0x00405b60U + i*4U;
return nvgpu_safe_add_u32(0x00405b60U, nvgpu_safe_mult_u32(i, 4U));
}
static inline u32 gr_cwd_gpc_tpc_id_tpc0_s(void)
{
@@ -2492,7 +2493,7 @@ static inline u32 gr_cwd_gpc_tpc_id_tpc1_f(u32 v)
}
static inline u32 gr_cwd_sm_id_r(u32 i)
{
return 0x00405ba0U + i*4U;
return nvgpu_safe_add_u32(0x00405ba0U, nvgpu_safe_mult_u32(i, 4U));
}
static inline u32 gr_cwd_sm_id__size_1_v(void)
{
@@ -2620,7 +2621,7 @@ static inline u32 gr_gpc0_zcull_total_ram_size_num_aliquots_f(u32 v)
}
static inline u32 gr_gpc0_zcull_zcsize_r(u32 i)
{
return 0x00500a04U + i*32U;
return nvgpu_safe_add_u32(0x00500a04U, nvgpu_safe_mult_u32(i, 32U));
}
static inline u32 gr_gpc0_zcull_zcsize_height_subregion__multiple_v(void)
{
@@ -2632,7 +2633,7 @@ static inline u32 gr_gpc0_zcull_zcsize_width_subregion__multiple_v(void)
}
static inline u32 gr_gpc0_gpm_pd_sm_id_r(u32 i)
{
return 0x00500c10U + i*4U;
return nvgpu_safe_add_u32(0x00500c10U, nvgpu_safe_mult_u32(i, 4U));
}
static inline u32 gr_gpc0_gpm_pd_sm_id_id_f(u32 v)
{
@@ -2640,7 +2641,7 @@ static inline u32 gr_gpc0_gpm_pd_sm_id_id_f(u32 v)
}
static inline u32 gr_gpc0_gpm_pd_pes_tpc_id_mask_r(u32 i)
{
return 0x00500c30U + i*4U;
return nvgpu_safe_add_u32(0x00500c30U, nvgpu_safe_mult_u32(i, 4U));
}
static inline u32 gr_gpc0_gpm_pd_pes_tpc_id_mask_mask_v(u32 r)
{
@@ -2904,7 +2905,7 @@ static inline u32 gr_gpccs_dmactl_imem_scrubbing_m(void)
}
static inline u32 gr_gpccs_imemc_r(u32 i)
{
return 0x0041a180U + i*16U;
return nvgpu_safe_add_u32(0x0041a180U, nvgpu_safe_mult_u32(i, 16U));
}
static inline u32 gr_gpccs_imemc_offs_f(u32 v)
{
@@ -2920,11 +2921,11 @@ static inline u32 gr_gpccs_imemc_aincw_f(u32 v)
}
static inline u32 gr_gpccs_imemd_r(u32 i)
{
return 0x0041a184U + i*16U;
return nvgpu_safe_add_u32(0x0041a184U, nvgpu_safe_mult_u32(i, 16U));
}
static inline u32 gr_gpccs_imemt_r(u32 i)
{
return 0x0041a188U + i*16U;
return nvgpu_safe_add_u32(0x0041a188U, nvgpu_safe_mult_u32(i, 16U));
}
static inline u32 gr_gpccs_imemt__size_1_v(void)
{
@@ -2936,7 +2937,7 @@ static inline u32 gr_gpccs_imemt_tag_f(u32 v)
}
static inline u32 gr_gpccs_dmemc_r(u32 i)
{
return 0x0041a1c0U + i*8U;
return nvgpu_safe_add_u32(0x0041a1c0U, nvgpu_safe_mult_u32(i, 8U));
}
static inline u32 gr_gpccs_dmemc_offs_f(u32 v)
{
@@ -2952,11 +2953,11 @@ static inline u32 gr_gpccs_dmemc_aincw_f(u32 v)
}
static inline u32 gr_gpccs_dmemd_r(u32 i)
{
return 0x0041a1c4U + i*8U;
return nvgpu_safe_add_u32(0x0041a1c4U, nvgpu_safe_mult_u32(i, 8U));
}
static inline u32 gr_gpccs_ctxsw_mailbox_r(u32 i)
{
return 0x0041a800U + i*4U;
return nvgpu_safe_add_u32(0x0041a800U, nvgpu_safe_mult_u32(i, 4U));
}
static inline u32 gr_gpccs_ctxsw_mailbox_value_f(u32 v)
{
@@ -3100,7 +3101,7 @@ static inline u32 gr_gpcs_ppcs_cbm_beta_cb_ctrl_cbes_reserve_f(u32 v)
}
static inline u32 gr_gpcs_swdx_tc_beta_cb_size_r(u32 i)
{
return 0x00418ea0U + i*4U;
return nvgpu_safe_add_u32(0x00418ea0U, nvgpu_safe_mult_u32(i, 4U));
}
static inline u32 gr_gpcs_swdx_tc_beta_cb_size_v_f(u32 v)
{
@@ -3112,7 +3113,7 @@ static inline u32 gr_gpcs_swdx_tc_beta_cb_size_v_m(void)
}
static inline u32 gr_gpcs_swdx_dss_zbc_color_r_r(u32 i)
{
return 0x00418010U + i*4U;
return nvgpu_safe_add_u32(0x00418010U, nvgpu_safe_mult_u32(i, 4U));
}
static inline u32 gr_gpcs_swdx_dss_zbc_color_r_val_f(u32 v)
{
@@ -3120,7 +3121,7 @@ static inline u32 gr_gpcs_swdx_dss_zbc_color_r_val_f(u32 v)
}
static inline u32 gr_gpcs_swdx_dss_zbc_color_g_r(u32 i)
{
return 0x0041804cU + i*4U;
return nvgpu_safe_add_u32(0x0041804cU, nvgpu_safe_mult_u32(i, 4U));
}
static inline u32 gr_gpcs_swdx_dss_zbc_color_g_val_f(u32 v)
{
@@ -3128,7 +3129,7 @@ static inline u32 gr_gpcs_swdx_dss_zbc_color_g_val_f(u32 v)
}
static inline u32 gr_gpcs_swdx_dss_zbc_color_b_r(u32 i)
{
return 0x00418088U + i*4U;
return nvgpu_safe_add_u32(0x00418088U, nvgpu_safe_mult_u32(i, 4U));
}
static inline u32 gr_gpcs_swdx_dss_zbc_color_b_val_f(u32 v)
{
@@ -3136,7 +3137,7 @@ static inline u32 gr_gpcs_swdx_dss_zbc_color_b_val_f(u32 v)
}
static inline u32 gr_gpcs_swdx_dss_zbc_color_a_r(u32 i)
{
return 0x004180c4U + i*4U;
return nvgpu_safe_add_u32(0x004180c4U, nvgpu_safe_mult_u32(i, 4U));
}
static inline u32 gr_gpcs_swdx_dss_zbc_color_a_val_f(u32 v)
{
@@ -3148,7 +3149,7 @@ static inline u32 gr_gpcs_swdx_dss_zbc_c_01_to_04_format_r(void)
}
static inline u32 gr_gpcs_swdx_dss_zbc_z_r(u32 i)
{
return 0x00418110U + i*4U;
return nvgpu_safe_add_u32(0x00418110U, nvgpu_safe_mult_u32(i, 4U));
}
static inline u32 gr_gpcs_swdx_dss_zbc_z_val_f(u32 v)
{
@@ -3160,7 +3161,7 @@ static inline u32 gr_gpcs_swdx_dss_zbc_z_01_to_04_format_r(void)
}
static inline u32 gr_gpcs_swdx_dss_zbc_s_r(u32 i)
{
return 0x0041815cU + i*4U;
return nvgpu_safe_add_u32(0x0041815cU, nvgpu_safe_mult_u32(i, 4U));
}
static inline u32 gr_gpcs_swdx_dss_zbc_s_val_f(u32 v)
{
@@ -3188,7 +3189,7 @@ static inline u32 gr_gpcs_setup_attrib_cb_base_valid_true_f(void)
}
static inline u32 gr_crstr_gpc_map_r(u32 i)
{
return 0x00418b08U + i*4U;
return nvgpu_safe_add_u32(0x00418b08U, nvgpu_safe_mult_u32(i, 4U));
}
static inline u32 gr_crstr_gpc_map_tile0_f(u32 v)
{
@@ -3228,7 +3229,7 @@ static inline u32 gr_crstr_map_table_cfg_num_entries_f(u32 v)
}
static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map_r(u32 i)
{
return 0x00418980U + i*4U;
return nvgpu_safe_add_u32(0x00418980U, nvgpu_safe_mult_u32(i, 4U));
}
static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map_tile_0_f(u32 v)
{
@@ -3764,7 +3765,7 @@ static inline u32 gr_gpcs_tpcs_pes_vsc_vpc_fast_mode_switch_true_f(void)
}
static inline u32 gr_ppcs_wwdx_map_gpc_map_r(u32 i)
{
return 0x0041bf00U + i*4U;
return nvgpu_safe_add_u32(0x0041bf00U, nvgpu_safe_mult_u32(i, 4U));
}
static inline u32 gr_ppcs_wwdx_map_table_cfg_r(void)
{
@@ -3796,7 +3797,7 @@ static inline u32 gr_gpcs_ppcs_wwdx_sm_num_rcp_conservative_f(u32 v)
}
static inline u32 gr_ppcs_wwdx_map_table_cfg_coeff_r(u32 i)
{
return 0x0041bfb0U + i*4U;
return nvgpu_safe_add_u32(0x0041bfb0U, nvgpu_safe_mult_u32(i, 4U));
}
static inline u32 gr_ppcs_wwdx_map_table_cfg_coeff__size_1_v(void)
{
@@ -4010,6 +4011,14 @@ static inline u32 gr_gpcs_pri_mmu_ctrl_mmu_disable_m(void)
{
return U32(0x1U) << 31U;
}
static inline u32 gr_gpcs_pri_mmu_ctrl_atomic_capability_mode_m(void)
{
return U32(0x3U) << 24U;
}
static inline u32 gr_gpcs_pri_mmu_ctrl_atomic_capability_mode_rmw_f(void)
{
return 0x2000000U;
}
static inline u32 gr_gpcs_pri_mmu_pm_unit_mask_r(void)
{
return 0x00418890U;

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -57,6 +57,7 @@
#define NVGPU_HW_IOCTRL_GV100_H
#include <nvgpu/types.h>
#include <nvgpu/safe_ops.h>
static inline u32 ioctrl_reset_r(void)
{
@@ -108,7 +109,7 @@ static inline u32 ioctrl_debug_reset_common_v(u32 r)
}
static inline u32 ioctrl_clock_control_r(u32 i)
{
return 0x00000180U + i*4U;
return nvgpu_safe_add_u32(0x00000180U, nvgpu_safe_mult_u32(i, 4U));
}
static inline u32 ioctrl_clock_control__size_1_v(void)
{
@@ -244,7 +245,7 @@ static inline u32 ioctrl_common_intr_0_status_intrb_v(u32 r)
}
static inline u32 ioctrl_link_intr_0_mask_r(u32 i)
{
return 0x00000240U + i*20U;
return nvgpu_safe_add_u32(0x00000240U, nvgpu_safe_mult_u32(i, 20U));
}
static inline u32 ioctrl_link_intr_0_mask_fatal_f(u32 v)
{
@@ -288,7 +289,7 @@ static inline u32 ioctrl_link_intr_0_mask_intrb_v(u32 r)
}
static inline u32 ioctrl_link_intr_0_status_r(u32 i)
{
return 0x00000244U + i*20U;
return nvgpu_safe_add_u32(0x00000244U, nvgpu_safe_mult_u32(i, 20U));
}
static inline u32 ioctrl_link_intr_0_status_fatal_f(u32 v)
{

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -57,6 +57,7 @@
#define NVGPU_HW_IOCTRLMIF_GV100_H
#include <nvgpu/types.h>
#include <nvgpu/safe_ops.h>
static inline u32 ioctrlmif_rx_err_contain_en_0_r(void)
{

View File

@@ -57,6 +57,7 @@
#define NVGPU_HW_LTC_GV100_H
#include <nvgpu/types.h>
#include <nvgpu/safe_ops.h>
static inline u32 ltc_pltcg_base_v(void)
{
@@ -248,7 +249,7 @@ static inline u32 ltc_ltcs_ltss_dstg_zbc_index_address_f(u32 v)
}
static inline u32 ltc_ltcs_ltss_dstg_zbc_color_clear_value_r(u32 i)
{
return 0x0017e33cU + i*4U;
return nvgpu_safe_add_u32(0x0017e33cU, nvgpu_safe_mult_u32(i, 4U));
}
static inline u32 ltc_ltcs_ltss_dstg_zbc_color_clear_value__size_1_v(void)
{

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -57,6 +57,7 @@
#define NVGPU_HW_MC_GV100_H
#include <nvgpu/types.h>
#include <nvgpu/safe_ops.h>
static inline u32 mc_boot_0_r(void)
{
@@ -80,7 +81,7 @@ static inline u32 mc_boot_0_minor_revision_v(u32 r)
}
static inline u32 mc_intr_r(u32 i)
{
return 0x00000100U + i*4U;
return nvgpu_safe_add_u32(0x00000100U, nvgpu_safe_mult_u32(i, 4U));
}
static inline u32 mc_intr_pfifo_pending_f(void)
{
@@ -116,15 +117,15 @@ static inline u32 mc_intr_nvlink_pending_f(void)
}
static inline u32 mc_intr_en_r(u32 i)
{
return 0x00000140U + i*4U;
return nvgpu_safe_add_u32(0x00000140U, nvgpu_safe_mult_u32(i, 4U));
}
static inline u32 mc_intr_en_set_r(u32 i)
{
return 0x00000160U + i*4U;
return nvgpu_safe_add_u32(0x00000160U, nvgpu_safe_mult_u32(i, 4U));
}
static inline u32 mc_intr_en_clear_r(u32 i)
{
return 0x00000180U + i*4U;
return nvgpu_safe_add_u32(0x00000180U, nvgpu_safe_mult_u32(i, 4U));
}
static inline u32 mc_enable_r(void)
{

View File

@@ -57,6 +57,7 @@
#define NVGPU_HW_MINION_GV100_H
#include <nvgpu/types.h>
#include <nvgpu/safe_ops.h>
static inline u32 minion_minion_status_r(void)
{
@@ -648,7 +649,7 @@ static inline u32 minion_minion_intr_stall_en_link_v(u32 r)
}
static inline u32 minion_nvlink_dl_cmd_r(u32 i)
{
return 0x00000900U + i*4U;
return nvgpu_safe_add_u32(0x00000900U, nvgpu_safe_mult_u32(i, 4U));
}
static inline u32 minion_nvlink_dl_cmd___size_1_v(void)
{
@@ -872,7 +873,7 @@ static inline u32 minion_misc_0_scratch_swrw_0_v(u32 r)
}
static inline u32 minion_nvlink_link_intr_r(u32 i)
{
return 0x00000a00U + i*4U;
return nvgpu_safe_add_u32(0x00000a00U, nvgpu_safe_mult_u32(i, 4U));
}
static inline u32 minion_nvlink_link_intr___size_1_v(void)
{

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -57,6 +57,7 @@
#define NVGPU_HW_NVL_GV100_H
#include <nvgpu/types.h>
#include <nvgpu/safe_ops.h>
static inline u32 nvl_link_state_r(void)
{
@@ -192,7 +193,7 @@ static inline u32 nvl_link_activity_blkact_v(u32 r)
}
static inline u32 nvl_sublink_activity_r(u32 i)
{
return 0x00000010U + i*4U;
return nvgpu_safe_add_u32(0x00000010U, nvgpu_safe_mult_u32(i, 4U));
}
static inline u32 nvl_sublink_activity_blkact0_f(u32 v)
{

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -57,6 +57,7 @@
#define NVGPU_HW_NVLINKIP_DISCOVERY_GV100_H
#include <nvgpu/types.h>
#include <nvgpu/safe_ops.h>
static inline u32 nvlinkip_discovery_common_entry_f(u32 v)
{

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -57,6 +57,7 @@
#define NVGPU_HW_NVLIPT_GV100_H
#include <nvgpu/types.h>
#include <nvgpu/safe_ops.h>
static inline u32 nvlipt_intr_control_link0_r(void)
{

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -57,6 +57,7 @@
#define NVGPU_HW_NVTLC_GV100_H
#include <nvgpu/types.h>
#include <nvgpu/safe_ops.h>
static inline u32 nvtlc_tx_err_report_en_0_r(void)
{

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -57,6 +57,7 @@
#define NVGPU_HW_PBDMA_GV100_H
#include <nvgpu/types.h>
#include <nvgpu/safe_ops.h>
static inline u32 pbdma_gp_entry1_r(void)
{
@@ -76,7 +77,7 @@ static inline u32 pbdma_gp_entry1_length_v(u32 r)
}
static inline u32 pbdma_gp_base_r(u32 i)
{
return 0x00040048U + i*8192U;
return nvgpu_safe_add_u32(0x00040048U, nvgpu_safe_mult_u32(i, 8192U));
}
static inline u32 pbdma_gp_base__size_1_v(void)
{
@@ -92,7 +93,7 @@ static inline u32 pbdma_gp_base_rsvd_s(void)
}
static inline u32 pbdma_gp_base_hi_r(u32 i)
{
return 0x0004004cU + i*8192U;
return nvgpu_safe_add_u32(0x0004004cU, nvgpu_safe_mult_u32(i, 8192U));
}
static inline u32 pbdma_gp_base_hi_offset_f(u32 v)
{
@@ -104,43 +105,43 @@ static inline u32 pbdma_gp_base_hi_limit2_f(u32 v)
}
static inline u32 pbdma_gp_fetch_r(u32 i)
{
return 0x00040050U + i*8192U;
return nvgpu_safe_add_u32(0x00040050U, nvgpu_safe_mult_u32(i, 8192U));
}
static inline u32 pbdma_gp_get_r(u32 i)
{
return 0x00040014U + i*8192U;
return nvgpu_safe_add_u32(0x00040014U, nvgpu_safe_mult_u32(i, 8192U));
}
static inline u32 pbdma_gp_put_r(u32 i)
{
return 0x00040000U + i*8192U;
return nvgpu_safe_add_u32(0x00040000U, nvgpu_safe_mult_u32(i, 8192U));
}
static inline u32 pbdma_pb_fetch_r(u32 i)
{
return 0x00040054U + i*8192U;
return nvgpu_safe_add_u32(0x00040054U, nvgpu_safe_mult_u32(i, 8192U));
}
static inline u32 pbdma_pb_fetch_hi_r(u32 i)
{
return 0x00040058U + i*8192U;
return nvgpu_safe_add_u32(0x00040058U, nvgpu_safe_mult_u32(i, 8192U));
}
static inline u32 pbdma_get_r(u32 i)
{
return 0x00040018U + i*8192U;
return nvgpu_safe_add_u32(0x00040018U, nvgpu_safe_mult_u32(i, 8192U));
}
static inline u32 pbdma_get_hi_r(u32 i)
{
return 0x0004001cU + i*8192U;
return nvgpu_safe_add_u32(0x0004001cU, nvgpu_safe_mult_u32(i, 8192U));
}
static inline u32 pbdma_put_r(u32 i)
{
return 0x0004005cU + i*8192U;
return nvgpu_safe_add_u32(0x0004005cU, nvgpu_safe_mult_u32(i, 8192U));
}
static inline u32 pbdma_put_hi_r(u32 i)
{
return 0x00040060U + i*8192U;
return nvgpu_safe_add_u32(0x00040060U, nvgpu_safe_mult_u32(i, 8192U));
}
static inline u32 pbdma_pb_header_r(u32 i)
{
return 0x00040084U + i*8192U;
return nvgpu_safe_add_u32(0x00040084U, nvgpu_safe_mult_u32(i, 8192U));
}
static inline u32 pbdma_pb_header_priv_user_f(void)
{
@@ -172,19 +173,19 @@ static inline u32 pbdma_pb_header_type_non_inc_f(void)
}
static inline u32 pbdma_hdr_shadow_r(u32 i)
{
return 0x00040118U + i*8192U;
return nvgpu_safe_add_u32(0x00040118U, nvgpu_safe_mult_u32(i, 8192U));
}
static inline u32 pbdma_gp_shadow_0_r(u32 i)
{
return 0x00040110U + i*8192U;
return nvgpu_safe_add_u32(0x00040110U, nvgpu_safe_mult_u32(i, 8192U));
}
static inline u32 pbdma_gp_shadow_1_r(u32 i)
{
return 0x00040114U + i*8192U;
return nvgpu_safe_add_u32(0x00040114U, nvgpu_safe_mult_u32(i, 8192U));
}
static inline u32 pbdma_subdevice_r(u32 i)
{
return 0x00040094U + i*8192U;
return nvgpu_safe_add_u32(0x00040094U, nvgpu_safe_mult_u32(i, 8192U));
}
static inline u32 pbdma_subdevice_id_f(u32 v)
{
@@ -200,7 +201,7 @@ static inline u32 pbdma_subdevice_channel_dma_enable_f(void)
}
static inline u32 pbdma_method0_r(u32 i)
{
return 0x000400c0U + i*8192U;
return nvgpu_safe_add_u32(0x000400c0U, nvgpu_safe_mult_u32(i, 8192U));
}
static inline u32 pbdma_method0_fifo_size_v(void)
{
@@ -228,23 +229,23 @@ static inline u32 pbdma_method0_valid_true_f(void)
}
static inline u32 pbdma_method1_r(u32 i)
{
return 0x000400c8U + i*8192U;
return nvgpu_safe_add_u32(0x000400c8U, nvgpu_safe_mult_u32(i, 8192U));
}
static inline u32 pbdma_method2_r(u32 i)
{
return 0x000400d0U + i*8192U;
return nvgpu_safe_add_u32(0x000400d0U, nvgpu_safe_mult_u32(i, 8192U));
}
static inline u32 pbdma_method3_r(u32 i)
{
return 0x000400d8U + i*8192U;
return nvgpu_safe_add_u32(0x000400d8U, nvgpu_safe_mult_u32(i, 8192U));
}
static inline u32 pbdma_data0_r(u32 i)
{
return 0x000400c4U + i*8192U;
return nvgpu_safe_add_u32(0x000400c4U, nvgpu_safe_mult_u32(i, 8192U));
}
static inline u32 pbdma_acquire_r(u32 i)
{
return 0x00040030U + i*8192U;
return nvgpu_safe_add_u32(0x00040030U, nvgpu_safe_mult_u32(i, 8192U));
}
static inline u32 pbdma_acquire_retry_man_2_f(void)
{
@@ -288,15 +289,15 @@ static inline u32 pbdma_acquire_timeout_en_disable_f(void)
}
static inline u32 pbdma_status_r(u32 i)
{
return 0x00040100U + i*8192U;
return nvgpu_safe_add_u32(0x00040100U, nvgpu_safe_mult_u32(i, 8192U));
}
static inline u32 pbdma_channel_r(u32 i)
{
return 0x00040120U + i*8192U;
return nvgpu_safe_add_u32(0x00040120U, nvgpu_safe_mult_u32(i, 8192U));
}
static inline u32 pbdma_signature_r(u32 i)
{
return 0x00040010U + i*8192U;
return nvgpu_safe_add_u32(0x00040010U, nvgpu_safe_mult_u32(i, 8192U));
}
static inline u32 pbdma_signature_hw_valid_f(void)
{
@@ -308,7 +309,7 @@ static inline u32 pbdma_signature_sw_zero_f(void)
}
static inline u32 pbdma_userd_r(u32 i)
{
return 0x00040008U + i*8192U;
return nvgpu_safe_add_u32(0x00040008U, nvgpu_safe_mult_u32(i, 8192U));
}
static inline u32 pbdma_userd_target_vid_mem_f(void)
{
@@ -328,7 +329,7 @@ static inline u32 pbdma_userd_addr_f(u32 v)
}
static inline u32 pbdma_config_r(u32 i)
{
return 0x000400f4U + i*8192U;
return nvgpu_safe_add_u32(0x000400f4U, nvgpu_safe_mult_u32(i, 8192U));
}
static inline u32 pbdma_config_l2_evict_first_f(void)
{
@@ -364,7 +365,7 @@ static inline u32 pbdma_config_userd_writeback_enable_f(void)
}
static inline u32 pbdma_userd_hi_r(u32 i)
{
return 0x0004000cU + i*8192U;
return nvgpu_safe_add_u32(0x0004000cU, nvgpu_safe_mult_u32(i, 8192U));
}
static inline u32 pbdma_userd_hi_addr_f(u32 v)
{
@@ -372,7 +373,7 @@ static inline u32 pbdma_userd_hi_addr_f(u32 v)
}
static inline u32 pbdma_hce_ctrl_r(u32 i)
{
return 0x000400e4U + i*8192U;
return nvgpu_safe_add_u32(0x000400e4U, nvgpu_safe_mult_u32(i, 8192U));
}
static inline u32 pbdma_hce_ctrl_hce_priv_mode_yes_f(void)
{
@@ -380,7 +381,7 @@ static inline u32 pbdma_hce_ctrl_hce_priv_mode_yes_f(void)
}
static inline u32 pbdma_intr_0_r(u32 i)
{
return 0x00040108U + i*8192U;
return nvgpu_safe_add_u32(0x00040108U, nvgpu_safe_mult_u32(i, 8192U));
}
static inline u32 pbdma_intr_0_memreq_v(u32 r)
{
@@ -512,7 +513,7 @@ static inline u32 pbdma_intr_0_signature_pending_f(void)
}
static inline u32 pbdma_intr_1_r(u32 i)
{
return 0x00040148U + i*8192U;
return nvgpu_safe_add_u32(0x00040148U, nvgpu_safe_mult_u32(i, 8192U));
}
static inline u32 pbdma_intr_1_ctxnotvalid_m(void)
{
@@ -524,7 +525,7 @@ static inline u32 pbdma_intr_1_ctxnotvalid_pending_f(void)
}
static inline u32 pbdma_intr_en_0_r(u32 i)
{
return 0x0004010cU + i*8192U;
return nvgpu_safe_add_u32(0x0004010cU, nvgpu_safe_mult_u32(i, 8192U));
}
static inline u32 pbdma_intr_en_0_lbreq_enabled_f(void)
{
@@ -532,11 +533,11 @@ static inline u32 pbdma_intr_en_0_lbreq_enabled_f(void)
}
static inline u32 pbdma_intr_en_1_r(u32 i)
{
return 0x0004014cU + i*8192U;
return nvgpu_safe_add_u32(0x0004014cU, nvgpu_safe_mult_u32(i, 8192U));
}
static inline u32 pbdma_intr_stall_r(u32 i)
{
return 0x0004013cU + i*8192U;
return nvgpu_safe_add_u32(0x0004013cU, nvgpu_safe_mult_u32(i, 8192U));
}
static inline u32 pbdma_intr_stall_lbreq_enabled_f(void)
{
@@ -544,7 +545,7 @@ static inline u32 pbdma_intr_stall_lbreq_enabled_f(void)
}
static inline u32 pbdma_intr_stall_1_r(u32 i)
{
return 0x00040140U + i*8192U;
return nvgpu_safe_add_u32(0x00040140U, nvgpu_safe_mult_u32(i, 8192U));
}
static inline u32 pbdma_intr_stall_1_hce_illegal_op_enabled_f(void)
{
@@ -556,7 +557,7 @@ static inline u32 pbdma_udma_nop_r(void)
}
static inline u32 pbdma_runlist_timeslice_r(u32 i)
{
return 0x000400f8U + i*8192U;
return nvgpu_safe_add_u32(0x000400f8U, nvgpu_safe_mult_u32(i, 8192U));
}
static inline u32 pbdma_runlist_timeslice_timeout_128_f(void)
{
@@ -572,7 +573,7 @@ static inline u32 pbdma_runlist_timeslice_enable_true_f(void)
}
static inline u32 pbdma_target_r(u32 i)
{
return 0x000400acU + i*8192U;
return nvgpu_safe_add_u32(0x000400acU, nvgpu_safe_mult_u32(i, 8192U));
}
static inline u32 pbdma_target_engine_sw_f(void)
{
@@ -628,7 +629,7 @@ static inline u32 pbdma_target_needs_host_tsg_event_false_f(void)
}
static inline u32 pbdma_set_channel_info_r(u32 i)
{
return 0x000400fcU + i*8192U;
return nvgpu_safe_add_u32(0x000400fcU, nvgpu_safe_mult_u32(i, 8192U));
}
static inline u32 pbdma_set_channel_info_veid_f(u32 v)
{
@@ -636,7 +637,7 @@ static inline u32 pbdma_set_channel_info_veid_f(u32 v)
}
static inline u32 pbdma_timeout_r(u32 i)
{
return 0x0004012cU + i*8192U;
return nvgpu_safe_add_u32(0x0004012cU, nvgpu_safe_mult_u32(i, 8192U));
}
static inline u32 pbdma_timeout_period_m(void)
{

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -57,6 +57,7 @@
#define NVGPU_HW_PERF_GV100_H
#include <nvgpu/types.h>
#include <nvgpu/safe_ops.h>
static inline u32 perf_pmmgpc_perdomain_offset_v(void)
{
@@ -240,7 +241,7 @@ static inline u32 perf_pmasys_enginestatus_rbufempty_empty_f(void)
}
static inline u32 perf_pmmsys_engine_sel_r(u32 i)
{
return 0x0024006cU + i*512U;
return nvgpu_safe_add_u32(0x0024006cU, nvgpu_safe_mult_u32(i, 512U));
}
static inline u32 perf_pmmsys_engine_sel__size_1_v(void)
{
@@ -248,7 +249,7 @@ static inline u32 perf_pmmsys_engine_sel__size_1_v(void)
}
static inline u32 perf_pmmfbp_engine_sel_r(u32 i)
{
return 0x0020006cU + i*512U;
return nvgpu_safe_add_u32(0x0020006cU, nvgpu_safe_mult_u32(i, 512U));
}
static inline u32 perf_pmmfbp_engine_sel__size_1_v(void)
{
@@ -256,7 +257,7 @@ static inline u32 perf_pmmfbp_engine_sel__size_1_v(void)
}
static inline u32 perf_pmmgpc_engine_sel_r(u32 i)
{
return 0x0018006cU + i*512U;
return nvgpu_safe_add_u32(0x0018006cU, nvgpu_safe_mult_u32(i, 512U));
}
static inline u32 perf_pmmgpc_engine_sel__size_1_v(void)
{

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -57,6 +57,7 @@
#define NVGPU_HW_PGSP_GV100_H
#include <nvgpu/types.h>
#include <nvgpu/safe_ops.h>
static inline u32 pgsp_falcon_irqsset_r(void)
{
@@ -360,7 +361,7 @@ static inline u32 pgsp_falcon_cpuctl_alias_startcpu_f(u32 v)
}
static inline u32 pgsp_falcon_imemc_r(u32 i)
{
return 0x00110180U + i*16U;
return nvgpu_safe_add_u32(0x00110180U, nvgpu_safe_mult_u32(i, 16U));
}
static inline u32 pgsp_falcon_imemc_offs_f(u32 v)
{
@@ -376,11 +377,11 @@ static inline u32 pgsp_falcon_imemc_aincw_f(u32 v)
}
static inline u32 pgsp_falcon_imemd_r(u32 i)
{
return 0x00110184U + i*16U;
return nvgpu_safe_add_u32(0x00110184U, nvgpu_safe_mult_u32(i, 16U));
}
static inline u32 pgsp_falcon_imemt_r(u32 i)
{
return 0x00110188U + i*16U;
return nvgpu_safe_add_u32(0x00110188U, nvgpu_safe_mult_u32(i, 16U));
}
static inline u32 pgsp_falcon_sctl_r(void)
{
@@ -520,7 +521,7 @@ static inline u32 pgsp_sec2_falcon_icd_rdata_r(void)
}
static inline u32 pgsp_falcon_dmemc_r(u32 i)
{
return 0x001101c0U + i*8U;
return nvgpu_safe_add_u32(0x001101c0U, nvgpu_safe_mult_u32(i, 8U));
}
static inline u32 pgsp_falcon_dmemc_offs_f(u32 v)
{
@@ -548,7 +549,7 @@ static inline u32 pgsp_falcon_dmemc_aincr_f(u32 v)
}
static inline u32 pgsp_falcon_dmemd_r(u32 i)
{
return 0x001101c4U + i*8U;
return nvgpu_safe_add_u32(0x001101c4U, nvgpu_safe_mult_u32(i, 8U));
}
static inline u32 pgsp_falcon_debug1_r(void)
{
@@ -576,7 +577,7 @@ static inline u32 pgsp_falcon_debug1_ctxsw_mode_init_f(void)
}
static inline u32 pgsp_fbif_transcfg_r(u32 i)
{
return 0x00110600U + i*4U;
return nvgpu_safe_add_u32(0x00110600U, nvgpu_safe_mult_u32(i, 4U));
}
static inline u32 pgsp_fbif_transcfg_target_local_fb_f(void)
{

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -57,9 +57,10 @@
#define NVGPU_HW_PRAM_GV100_H
#include <nvgpu/types.h>
#include <nvgpu/safe_ops.h>
static inline u32 pram_data032_r(u32 i)
{
return 0x00700000U + i*4U;
return nvgpu_safe_add_u32(0x00700000U, nvgpu_safe_mult_u32(i, 4U));
}
#endif

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -57,6 +57,7 @@
#define NVGPU_HW_PRI_RINGMASTER_GV100_H
#include <nvgpu/types.h>
#include <nvgpu/safe_ops.h>
static inline u32 pri_ringmaster_command_r(void)
{

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -57,10 +57,11 @@
#define NVGPU_HW_PRI_RINGSTATION_GPC_GV100_H
#include <nvgpu/types.h>
#include <nvgpu/safe_ops.h>
static inline u32 pri_ringstation_gpc_master_config_r(u32 i)
{
return 0x00128300U + i*4U;
return nvgpu_safe_add_u32(0x00128300U, nvgpu_safe_mult_u32(i, 4U));
}
static inline u32 pri_ringstation_gpc_gpc0_priv_error_adr_r(void)
{

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -57,10 +57,11 @@
#define NVGPU_HW_PRI_RINGSTATION_SYS_GV100_H
#include <nvgpu/types.h>
#include <nvgpu/safe_ops.h>
static inline u32 pri_ringstation_sys_master_config_r(u32 i)
{
return 0x00122300U + i*4U;
return nvgpu_safe_add_u32(0x00122300U, nvgpu_safe_mult_u32(i, 4U));
}
static inline u32 pri_ringstation_sys_decode_config_r(void)
{

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -57,6 +57,7 @@
#define NVGPU_HW_PROJ_GV100_H
#include <nvgpu/types.h>
#include <nvgpu/safe_ops.h>
static inline u32 proj_gpc_base_v(void)
{

View File

@@ -57,6 +57,7 @@
#define NVGPU_HW_PWR_GV100_H
#include <nvgpu/types.h>
#include <nvgpu/safe_ops.h>
static inline u32 pwr_falcon_irqsset_r(void)
{
@@ -452,7 +453,7 @@ static inline u32 pwr_pmu_scpctl_stat_debug_mode_v(u32 r)
}
static inline u32 pwr_falcon_imemc_r(u32 i)
{
return 0x0010a180U + i*16U;
return nvgpu_safe_add_u32(0x0010a180U, nvgpu_safe_mult_u32(i, 16U));
}
static inline u32 pwr_falcon_imemc_offs_f(u32 v)
{
@@ -468,11 +469,11 @@ static inline u32 pwr_falcon_imemc_aincw_f(u32 v)
}
static inline u32 pwr_falcon_imemd_r(u32 i)
{
return 0x0010a184U + i*16U;
return nvgpu_safe_add_u32(0x0010a184U, nvgpu_safe_mult_u32(i, 16U));
}
static inline u32 pwr_falcon_imemt_r(u32 i)
{
return 0x0010a188U + i*16U;
return nvgpu_safe_add_u32(0x0010a188U, nvgpu_safe_mult_u32(i, 16U));
}
static inline u32 pwr_falcon_sctl_r(void)
{
@@ -608,7 +609,7 @@ static inline u32 pwr_pmu_falcon_icd_rdata_r(void)
}
static inline u32 pwr_falcon_dmemc_r(u32 i)
{
return 0x0010a1c0U + i*8U;
return nvgpu_safe_add_u32(0x0010a1c0U, nvgpu_safe_mult_u32(i, 8U));
}
static inline u32 pwr_falcon_dmemc_offs_f(u32 v)
{
@@ -636,7 +637,7 @@ static inline u32 pwr_falcon_dmemc_aincr_f(u32 v)
}
static inline u32 pwr_falcon_dmemd_r(u32 i)
{
return 0x0010a1c4U + i*8U;
return nvgpu_safe_add_u32(0x0010a1c4U, nvgpu_safe_mult_u32(i, 8U));
}
static inline u32 pwr_pmu_new_instblk_r(void)
{
@@ -700,7 +701,7 @@ static inline u32 pwr_pmu_mutex_id_release_value_init_f(void)
}
static inline u32 pwr_pmu_mutex_r(u32 i)
{
return 0x0010a580U + i*4U;
return nvgpu_safe_add_u32(0x0010a580U, nvgpu_safe_mult_u32(i, 4U));
}
static inline u32 pwr_pmu_mutex__size_1_v(void)
{
@@ -720,7 +721,7 @@ static inline u32 pwr_pmu_mutex_value_initial_lock_f(void)
}
static inline u32 pwr_pmu_queue_head_r(u32 i)
{
return 0x0010a800U + i*4U;
return nvgpu_safe_add_u32(0x0010a800U, nvgpu_safe_mult_u32(i, 4U));
}
static inline u32 pwr_pmu_queue_head__size_1_v(void)
{
@@ -736,7 +737,7 @@ static inline u32 pwr_pmu_queue_head_address_v(u32 r)
}
static inline u32 pwr_pmu_queue_tail_r(u32 i)
{
return 0x0010a820U + i*4U;
return nvgpu_safe_add_u32(0x0010a820U, nvgpu_safe_mult_u32(i, 4U));
}
static inline u32 pwr_pmu_queue_tail__size_1_v(void)
{
@@ -776,7 +777,7 @@ static inline u32 pwr_pmu_msgq_tail_val_v(u32 r)
}
static inline u32 pwr_pmu_idle_mask_r(u32 i)
{
return 0x0010a504U + i*16U;
return nvgpu_safe_add_u32(0x0010a504U, nvgpu_safe_mult_u32(i, 16U));
}
static inline u32 pwr_pmu_idle_mask_gr_enabled_f(void)
{
@@ -788,7 +789,7 @@ static inline u32 pwr_pmu_idle_mask_ce_2_enabled_f(void)
}
static inline u32 pwr_pmu_idle_count_r(u32 i)
{
return 0x0010a508U + i*16U;
return nvgpu_safe_add_u32(0x0010a508U, nvgpu_safe_mult_u32(i, 16U));
}
static inline u32 pwr_pmu_idle_count_value_f(u32 v)
{
@@ -804,7 +805,7 @@ static inline u32 pwr_pmu_idle_count_reset_f(u32 v)
}
static inline u32 pwr_pmu_idle_ctrl_r(u32 i)
{
return 0x0010a50cU + i*16U;
return nvgpu_safe_add_u32(0x0010a50cU, nvgpu_safe_mult_u32(i, 16U));
}
static inline u32 pwr_pmu_idle_ctrl_value_m(void)
{
@@ -828,7 +829,7 @@ static inline u32 pwr_pmu_idle_ctrl_filter_disabled_f(void)
}
static inline u32 pwr_pmu_idle_threshold_r(u32 i)
{
return 0x0010a8a0U + i*4U;
return nvgpu_safe_add_u32(0x0010a8a0U, nvgpu_safe_mult_u32(i, 4U));
}
static inline u32 pwr_pmu_idle_threshold_value_f(u32 v)
{
@@ -876,19 +877,19 @@ static inline u32 pwr_pmu_idle_intr_status_intr_clear_v(void)
}
static inline u32 pwr_pmu_idle_mask_supp_r(u32 i)
{
return 0x0010a9f0U + i*8U;
return nvgpu_safe_add_u32(0x0010a9f0U, nvgpu_safe_mult_u32(i, 8U));
}
static inline u32 pwr_pmu_idle_mask_1_supp_r(u32 i)
{
return 0x0010a9f4U + i*8U;
return nvgpu_safe_add_u32(0x0010a9f4U, nvgpu_safe_mult_u32(i, 8U));
}
static inline u32 pwr_pmu_idle_ctrl_supp_r(u32 i)
{
return 0x0010aa30U + i*8U;
return nvgpu_safe_add_u32(0x0010aa30U, nvgpu_safe_mult_u32(i, 8U));
}
static inline u32 pwr_pmu_debug_r(u32 i)
{
return 0x0010a5c0U + i*4U;
return nvgpu_safe_add_u32(0x0010a5c0U, nvgpu_safe_mult_u32(i, 4U));
}
static inline u32 pwr_pmu_debug__size_1_v(void)
{
@@ -896,7 +897,7 @@ static inline u32 pwr_pmu_debug__size_1_v(void)
}
static inline u32 pwr_pmu_mailbox_r(u32 i)
{
return 0x0010a450U + i*4U;
return nvgpu_safe_add_u32(0x0010a450U, nvgpu_safe_mult_u32(i, 4U));
}
static inline u32 pwr_pmu_mailbox__size_1_v(void)
{
@@ -956,23 +957,23 @@ static inline u32 pwr_pmu_bar0_error_status_fecserr_m(void)
}
static inline u32 pwr_pmu_pg_idlefilth_r(u32 i)
{
return 0x0010a6c0U + i*4U;
return nvgpu_safe_add_u32(0x0010a6c0U, nvgpu_safe_mult_u32(i, 4U));
}
static inline u32 pwr_pmu_pg_ppuidlefilth_r(u32 i)
{
return 0x0010a6e8U + i*4U;
return nvgpu_safe_add_u32(0x0010a6e8U, nvgpu_safe_mult_u32(i, 4U));
}
static inline u32 pwr_pmu_pg_idle_cnt_r(u32 i)
{
return 0x0010a710U + i*4U;
return nvgpu_safe_add_u32(0x0010a710U, nvgpu_safe_mult_u32(i, 4U));
}
static inline u32 pwr_pmu_pg_intren_r(u32 i)
{
return 0x0010a760U + i*4U;
return nvgpu_safe_add_u32(0x0010a760U, nvgpu_safe_mult_u32(i, 4U));
}
static inline u32 pwr_fbif_transcfg_r(u32 i)
{
return 0x0010ae00U + i*4U;
return nvgpu_safe_add_u32(0x0010ae00U, nvgpu_safe_mult_u32(i, 4U));
}
static inline u32 pwr_fbif_transcfg_target_local_fb_f(void)
{

View File

@@ -57,6 +57,7 @@
#define NVGPU_HW_RAM_GV100_H
#include <nvgpu/types.h>
#include <nvgpu/safe_ops.h>
static inline u32 ram_in_ramfc_s(void)
{

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -57,6 +57,7 @@
#define NVGPU_HW_THERM_GV100_H
#include <nvgpu/types.h>
#include <nvgpu/safe_ops.h>
static inline u32 therm_weight_1_r(void)
{
@@ -80,7 +81,7 @@ static inline u32 therm_config2_grad_enable_f(u32 v)
}
static inline u32 therm_gate_ctrl_r(u32 i)
{
return 0x00020200U + i*4U;
return nvgpu_safe_add_u32(0x00020200U, nvgpu_safe_mult_u32(i, 4U));
}
static inline u32 therm_gate_ctrl_eng_clk_m(void)
{
@@ -172,7 +173,7 @@ static inline u32 therm_hubmmu_idle_filter_value_m(void)
}
static inline u32 therm_clk_slowdown_r(u32 i)
{
return 0x00020160U + i*4U;
return nvgpu_safe_add_u32(0x00020160U, nvgpu_safe_mult_u32(i, 4U));
}
static inline u32 therm_clk_slowdown_idle_factor_f(u32 v)
{
@@ -192,7 +193,7 @@ static inline u32 therm_clk_slowdown_idle_factor_disabled_f(void)
}
static inline u32 therm_grad_stepping_table_r(u32 i)
{
return 0x000202c8U + i*4U;
return nvgpu_safe_add_u32(0x000202c8U, nvgpu_safe_mult_u32(i, 4U));
}
static inline u32 therm_grad_stepping_table_slowdown_factor0_f(u32 v)
{
@@ -284,7 +285,7 @@ static inline u32 therm_grad_stepping1_pdiv_duration_f(u32 v)
}
static inline u32 therm_clk_timing_r(u32 i)
{
return 0x000203c0U + i*4U;
return nvgpu_safe_add_u32(0x000203c0U, nvgpu_safe_mult_u32(i, 4U));
}
static inline u32 therm_clk_timing_grad_slowdown_f(u32 v)
{

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -57,6 +57,7 @@
#define NVGPU_HW_TIMER_GV100_H
#include <nvgpu/types.h>
#include <nvgpu/safe_ops.h>
static inline u32 timer_pri_timeout_r(void)
{

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -57,6 +57,7 @@
#define NVGPU_HW_TOP_GV100_H
#include <nvgpu/types.h>
#include <nvgpu/safe_ops.h>
static inline u32 top_num_gpcs_r(void)
{
@@ -120,7 +121,7 @@ static inline u32 top_num_ces_value_v(u32 r)
}
static inline u32 top_device_info_r(u32 i)
{
return 0x00022700U + i*4U;
return nvgpu_safe_add_u32(0x00022700U, nvgpu_safe_mult_u32(i, 4U));
}
static inline u32 top_device_info__size_1_v(void)
{

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -57,6 +57,7 @@
#define NVGPU_HW_TRIM_GV100_H
#include <nvgpu/types.h>
#include <nvgpu/safe_ops.h>
static inline u32 trim_sys_nvlink_uphy_cfg_r(void)
{

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -57,6 +57,7 @@
#define NVGPU_HW_USERMODE_GV100_H
#include <nvgpu/types.h>
#include <nvgpu/safe_ops.h>
static inline u32 usermode_cfg0_r(void)
{

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -57,10 +57,11 @@
#define NVGPU_HW_XP_GV100_H
#include <nvgpu/types.h>
#include <nvgpu/safe_ops.h>
static inline u32 xp_dl_mgr_r(u32 i)
{
return 0x0008b8c0U + i*4U;
return nvgpu_safe_add_u32(0x0008b8c0U, nvgpu_safe_mult_u32(i, 4U));
}
static inline u32 xp_dl_mgr_safe_timing_f(u32 v)
{
@@ -68,7 +69,7 @@ static inline u32 xp_dl_mgr_safe_timing_f(u32 v)
}
static inline u32 xp_pl_link_config_r(u32 i)
{
return 0x0008c040U + i*4U;
return nvgpu_safe_add_u32(0x0008c040U, nvgpu_safe_mult_u32(i, 4U));
}
static inline u32 xp_pl_link_config_ltssm_status_f(u32 v)
{

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -57,6 +57,7 @@
#define NVGPU_HW_XVE_GV100_H
#include <nvgpu/types.h>
#include <nvgpu/safe_ops.h>
static inline u32 xve_rom_ctrl_r(void)
{