mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-23 18:16:01 +03:00
gpu: nvgpu: fix CERT-C errors in tu104 kernel hw headers
Register generator tool is added to fix CERT-C errors associated with u32 arithmetic operations. Generated hw headers for tu104 with updated register generator. JIRA NVGPU-3520 Change-Id: Ief620a2d46010dfae232bc0151aa93c3e260fa69 Signed-off-by: Seshendra Gadagottu <sgadagottu@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2124635 GVS: Gerrit_Virtual_Submit Reviewed-by: Vinod Gopalakrishnakurup <vinodg@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
8901faae57
commit
401f36ccbc
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -57,10 +57,11 @@
|
|||||||
#define NVGPU_HW_BUS_TU104_H
|
#define NVGPU_HW_BUS_TU104_H
|
||||||
|
|
||||||
#include <nvgpu/types.h>
|
#include <nvgpu/types.h>
|
||||||
|
#include <nvgpu/safe_ops.h>
|
||||||
|
|
||||||
static inline u32 bus_sw_scratch_r(u32 i)
|
static inline u32 bus_sw_scratch_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00001400U + i*4U;
|
return nvgpu_safe_add_u32(0x00001400U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 bus_bar0_window_r(void)
|
static inline u32 bus_bar0_window_r(void)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -57,10 +57,11 @@
|
|||||||
#define NVGPU_HW_CCSR_TU104_H
|
#define NVGPU_HW_CCSR_TU104_H
|
||||||
|
|
||||||
#include <nvgpu/types.h>
|
#include <nvgpu/types.h>
|
||||||
|
#include <nvgpu/safe_ops.h>
|
||||||
|
|
||||||
static inline u32 ccsr_channel_inst_r(u32 i)
|
static inline u32 ccsr_channel_inst_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00800000U + i*8U;
|
return nvgpu_safe_add_u32(0x00800000U, nvgpu_safe_mult_u32(i, 8U));
|
||||||
}
|
}
|
||||||
static inline u32 ccsr_channel_inst__size_1_v(void)
|
static inline u32 ccsr_channel_inst__size_1_v(void)
|
||||||
{
|
{
|
||||||
@@ -92,7 +93,7 @@ static inline u32 ccsr_channel_inst_bind_true_f(void)
|
|||||||
}
|
}
|
||||||
static inline u32 ccsr_channel_r(u32 i)
|
static inline u32 ccsr_channel_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00800004U + i*8U;
|
return nvgpu_safe_add_u32(0x00800004U, nvgpu_safe_mult_u32(i, 8U));
|
||||||
}
|
}
|
||||||
static inline u32 ccsr_channel__size_1_v(void)
|
static inline u32 ccsr_channel__size_1_v(void)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -57,10 +57,11 @@
|
|||||||
#define NVGPU_HW_CE_TU104_H
|
#define NVGPU_HW_CE_TU104_H
|
||||||
|
|
||||||
#include <nvgpu/types.h>
|
#include <nvgpu/types.h>
|
||||||
|
#include <nvgpu/safe_ops.h>
|
||||||
|
|
||||||
static inline u32 ce_intr_status_r(u32 i)
|
static inline u32 ce_intr_status_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00104410U + i*128U;
|
return nvgpu_safe_add_u32(0x00104410U, nvgpu_safe_mult_u32(i, 128U));
|
||||||
}
|
}
|
||||||
static inline u32 ce_intr_status_blockpipe_pending_f(void)
|
static inline u32 ce_intr_status_blockpipe_pending_f(void)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -57,10 +57,11 @@
|
|||||||
#define NVGPU_HW_CTRL_TU104_H
|
#define NVGPU_HW_CTRL_TU104_H
|
||||||
|
|
||||||
#include <nvgpu/types.h>
|
#include <nvgpu/types.h>
|
||||||
|
#include <nvgpu/safe_ops.h>
|
||||||
|
|
||||||
static inline u32 ctrl_doorbell_r(u32 i)
|
static inline u32 ctrl_doorbell_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00b64000U + i*8U;
|
return nvgpu_safe_add_u32(0x00b64000U, nvgpu_safe_mult_u32(i, 8U));
|
||||||
}
|
}
|
||||||
static inline u32 ctrl_doorbell_vector_f(u32 v)
|
static inline u32 ctrl_doorbell_vector_f(u32 v)
|
||||||
{
|
{
|
||||||
@@ -72,7 +73,7 @@ static inline u32 ctrl_doorbell_runlist_id_f(u32 v)
|
|||||||
}
|
}
|
||||||
static inline u32 ctrl_virtual_channel_cfg_r(u32 i)
|
static inline u32 ctrl_virtual_channel_cfg_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00b65000U + i*4U;
|
return nvgpu_safe_add_u32(0x00b65000U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 ctrl_virtual_channel_cfg_pending_enable_true_f(void)
|
static inline u32 ctrl_virtual_channel_cfg_pending_enable_true_f(void)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -57,6 +57,7 @@
|
|||||||
#define NVGPU_HW_CTXSW_PROG_TU104_H
|
#define NVGPU_HW_CTXSW_PROG_TU104_H
|
||||||
|
|
||||||
#include <nvgpu/types.h>
|
#include <nvgpu/types.h>
|
||||||
|
#include <nvgpu/safe_ops.h>
|
||||||
|
|
||||||
static inline u32 ctxsw_prog_fecs_header_v(void)
|
static inline u32 ctxsw_prog_fecs_header_v(void)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -57,6 +57,7 @@
|
|||||||
#define NVGPU_HW_FALCON_TU104_H
|
#define NVGPU_HW_FALCON_TU104_H
|
||||||
|
|
||||||
#include <nvgpu/types.h>
|
#include <nvgpu/types.h>
|
||||||
|
#include <nvgpu/safe_ops.h>
|
||||||
|
|
||||||
static inline u32 falcon_falcon_irqsset_r(void)
|
static inline u32 falcon_falcon_irqsset_r(void)
|
||||||
{
|
{
|
||||||
@@ -344,7 +345,7 @@ static inline u32 falcon_falcon_cpuctl_alias_startcpu_f(u32 v)
|
|||||||
}
|
}
|
||||||
static inline u32 falcon_falcon_imemc_r(u32 i)
|
static inline u32 falcon_falcon_imemc_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00000180U + i*16U;
|
return nvgpu_safe_add_u32(0x00000180U, nvgpu_safe_mult_u32(i, 16U));
|
||||||
}
|
}
|
||||||
static inline u32 falcon_falcon_imemc_offs_f(u32 v)
|
static inline u32 falcon_falcon_imemc_offs_f(u32 v)
|
||||||
{
|
{
|
||||||
@@ -364,11 +365,11 @@ static inline u32 falcon_falcon_imemc_secure_f(u32 v)
|
|||||||
}
|
}
|
||||||
static inline u32 falcon_falcon_imemd_r(u32 i)
|
static inline u32 falcon_falcon_imemd_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00000184U + i*16U;
|
return nvgpu_safe_add_u32(0x00000184U, nvgpu_safe_mult_u32(i, 16U));
|
||||||
}
|
}
|
||||||
static inline u32 falcon_falcon_imemt_r(u32 i)
|
static inline u32 falcon_falcon_imemt_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00000188U + i*16U;
|
return nvgpu_safe_add_u32(0x00000188U, nvgpu_safe_mult_u32(i, 16U));
|
||||||
}
|
}
|
||||||
static inline u32 falcon_falcon_sctl_r(void)
|
static inline u32 falcon_falcon_sctl_r(void)
|
||||||
{
|
{
|
||||||
@@ -544,7 +545,7 @@ static inline u32 falcon_falcon_icd_rdata_r(void)
|
|||||||
}
|
}
|
||||||
static inline u32 falcon_falcon_dmemc_r(u32 i)
|
static inline u32 falcon_falcon_dmemc_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x000001c0U + i*8U;
|
return nvgpu_safe_add_u32(0x000001c0U, nvgpu_safe_mult_u32(i, 8U));
|
||||||
}
|
}
|
||||||
static inline u32 falcon_falcon_dmemc_offs_f(u32 v)
|
static inline u32 falcon_falcon_dmemc_offs_f(u32 v)
|
||||||
{
|
{
|
||||||
@@ -572,7 +573,7 @@ static inline u32 falcon_falcon_dmemc_aincr_f(u32 v)
|
|||||||
}
|
}
|
||||||
static inline u32 falcon_falcon_dmemd_r(u32 i)
|
static inline u32 falcon_falcon_dmemd_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x000001c4U + i*8U;
|
return nvgpu_safe_add_u32(0x000001c4U, nvgpu_safe_mult_u32(i, 8U));
|
||||||
}
|
}
|
||||||
static inline u32 falcon_falcon_debug1_r(void)
|
static inline u32 falcon_falcon_debug1_r(void)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -57,6 +57,7 @@
|
|||||||
#define NVGPU_HW_FB_TU104_H
|
#define NVGPU_HW_FB_TU104_H
|
||||||
|
|
||||||
#include <nvgpu/types.h>
|
#include <nvgpu/types.h>
|
||||||
|
#include <nvgpu/safe_ops.h>
|
||||||
|
|
||||||
static inline u32 fb_fbhub_num_active_ltcs_r(void)
|
static inline u32 fb_fbhub_num_active_ltcs_r(void)
|
||||||
{
|
{
|
||||||
@@ -1104,7 +1105,7 @@ static inline u32 fb_niso_intr_mmu_other_fault_notify_pending_f(void)
|
|||||||
}
|
}
|
||||||
static inline u32 fb_niso_intr_en_r(u32 i)
|
static inline u32 fb_niso_intr_en_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00100a24U + i*4U;
|
return nvgpu_safe_add_u32(0x00100a24U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 fb_niso_intr_en__size_1_v(void)
|
static inline u32 fb_niso_intr_en__size_1_v(void)
|
||||||
{
|
{
|
||||||
@@ -1168,7 +1169,7 @@ static inline u32 fb_niso_intr_en_mmu_other_fault_notify_enabled_f(void)
|
|||||||
}
|
}
|
||||||
static inline u32 fb_niso_intr_en_set_r(u32 i)
|
static inline u32 fb_niso_intr_en_set_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00100a2cU + i*4U;
|
return nvgpu_safe_add_u32(0x00100a2cU, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 fb_niso_intr_en_set__size_1_v(void)
|
static inline u32 fb_niso_intr_en_set__size_1_v(void)
|
||||||
{
|
{
|
||||||
@@ -1232,7 +1233,7 @@ static inline u32 fb_niso_intr_en_set_mmu_other_fault_notify_set_f(void)
|
|||||||
}
|
}
|
||||||
static inline u32 fb_niso_intr_en_clr_r(u32 i)
|
static inline u32 fb_niso_intr_en_clr_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00100a34U + i*4U;
|
return nvgpu_safe_add_u32(0x00100a34U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 fb_niso_intr_en_clr__size_1_v(void)
|
static inline u32 fb_niso_intr_en_clr__size_1_v(void)
|
||||||
{
|
{
|
||||||
@@ -1304,7 +1305,7 @@ static inline u32 fb_niso_intr_en_clr_mmu_replay_fault_buffer_v(void)
|
|||||||
}
|
}
|
||||||
static inline u32 fb_mmu_fault_buffer_lo_r(u32 i)
|
static inline u32 fb_mmu_fault_buffer_lo_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00100e24U + i*20U;
|
return nvgpu_safe_add_u32(0x00100e24U, nvgpu_safe_mult_u32(i, 20U));
|
||||||
}
|
}
|
||||||
static inline u32 fb_mmu_fault_buffer_lo__size_1_v(void)
|
static inline u32 fb_mmu_fault_buffer_lo__size_1_v(void)
|
||||||
{
|
{
|
||||||
@@ -1376,7 +1377,7 @@ static inline u32 fb_mmu_fault_buffer_lo_addr_v(u32 r)
|
|||||||
}
|
}
|
||||||
static inline u32 fb_mmu_fault_buffer_hi_r(u32 i)
|
static inline u32 fb_mmu_fault_buffer_hi_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00100e28U + i*20U;
|
return nvgpu_safe_add_u32(0x00100e28U, nvgpu_safe_mult_u32(i, 20U));
|
||||||
}
|
}
|
||||||
static inline u32 fb_mmu_fault_buffer_hi__size_1_v(void)
|
static inline u32 fb_mmu_fault_buffer_hi__size_1_v(void)
|
||||||
{
|
{
|
||||||
@@ -1392,7 +1393,7 @@ static inline u32 fb_mmu_fault_buffer_hi_addr_v(u32 r)
|
|||||||
}
|
}
|
||||||
static inline u32 fb_mmu_fault_buffer_get_r(u32 i)
|
static inline u32 fb_mmu_fault_buffer_get_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00100e2cU + i*20U;
|
return nvgpu_safe_add_u32(0x00100e2cU, nvgpu_safe_mult_u32(i, 20U));
|
||||||
}
|
}
|
||||||
static inline u32 fb_mmu_fault_buffer_get__size_1_v(void)
|
static inline u32 fb_mmu_fault_buffer_get__size_1_v(void)
|
||||||
{
|
{
|
||||||
@@ -1444,7 +1445,7 @@ static inline u32 fb_mmu_fault_buffer_get_overflow_clear_f(void)
|
|||||||
}
|
}
|
||||||
static inline u32 fb_mmu_fault_buffer_put_r(u32 i)
|
static inline u32 fb_mmu_fault_buffer_put_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00100e30U + i*20U;
|
return nvgpu_safe_add_u32(0x00100e30U, nvgpu_safe_mult_u32(i, 20U));
|
||||||
}
|
}
|
||||||
static inline u32 fb_mmu_fault_buffer_put__size_1_v(void)
|
static inline u32 fb_mmu_fault_buffer_put__size_1_v(void)
|
||||||
{
|
{
|
||||||
@@ -1500,7 +1501,7 @@ static inline u32 fb_mmu_fault_buffer_put_overflow_yes_f(void)
|
|||||||
}
|
}
|
||||||
static inline u32 fb_mmu_fault_buffer_size_r(u32 i)
|
static inline u32 fb_mmu_fault_buffer_size_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00100e34U + i*20U;
|
return nvgpu_safe_add_u32(0x00100e34U, nvgpu_safe_mult_u32(i, 20U));
|
||||||
}
|
}
|
||||||
static inline u32 fb_mmu_fault_buffer_size__size_1_v(void)
|
static inline u32 fb_mmu_fault_buffer_size__size_1_v(void)
|
||||||
{
|
{
|
||||||
@@ -2244,7 +2245,7 @@ static inline u32 fb_mmu_int_vector_ecc_error_vector_v(u32 r)
|
|||||||
}
|
}
|
||||||
static inline u32 fb_mmu_int_vector_fault_r(u32 i)
|
static inline u32 fb_mmu_int_vector_fault_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00100ee4U + i*4U;
|
return nvgpu_safe_add_u32(0x00100ee4U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 fb_mmu_int_vector_fault_error_v(u32 r)
|
static inline u32 fb_mmu_int_vector_fault_error_v(u32 r)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -57,6 +57,7 @@
|
|||||||
#define NVGPU_HW_FBPA_TU104_H
|
#define NVGPU_HW_FBPA_TU104_H
|
||||||
|
|
||||||
#include <nvgpu/types.h>
|
#include <nvgpu/types.h>
|
||||||
|
#include <nvgpu/safe_ops.h>
|
||||||
|
|
||||||
static inline u32 fbpa_0_intr_status_r(void)
|
static inline u32 fbpa_0_intr_status_r(void)
|
||||||
{
|
{
|
||||||
@@ -100,7 +101,7 @@ static inline u32 fbpa_ecc_intr_ctrl_ded_intr_en_enabled_f(void)
|
|||||||
}
|
}
|
||||||
static inline u32 fbpa_0_ecc_status_r(u32 i)
|
static inline u32 fbpa_0_ecc_status_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00900478U + i*4U;
|
return nvgpu_safe_add_u32(0x00900478U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 fbpa_0_ecc_status_sec_intr_pending_f(void)
|
static inline u32 fbpa_0_ecc_status_sec_intr_pending_f(void)
|
||||||
{
|
{
|
||||||
@@ -120,10 +121,10 @@ static inline u32 fbpa_0_ecc_status_ded_counter_overflow_pending_f(void)
|
|||||||
}
|
}
|
||||||
static inline u32 fbpa_0_ecc_sec_count_r(u32 i)
|
static inline u32 fbpa_0_ecc_sec_count_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00900480U + i*4U;
|
return nvgpu_safe_add_u32(0x00900480U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 fbpa_0_ecc_ded_count_r(u32 i)
|
static inline u32 fbpa_0_ecc_ded_count_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00900488U + i*4U;
|
return nvgpu_safe_add_u32(0x00900488U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -57,6 +57,7 @@
|
|||||||
#define NVGPU_HW_FIFO_TU104_H
|
#define NVGPU_HW_FIFO_TU104_H
|
||||||
|
|
||||||
#include <nvgpu/types.h>
|
#include <nvgpu/types.h>
|
||||||
|
#include <nvgpu/safe_ops.h>
|
||||||
|
|
||||||
static inline u32 fifo_userd_writeback_r(void)
|
static inline u32 fifo_userd_writeback_r(void)
|
||||||
{
|
{
|
||||||
@@ -88,7 +89,7 @@ static inline u32 fifo_userd_writeback_timescale_0_v(void)
|
|||||||
}
|
}
|
||||||
static inline u32 fifo_runlist_base_lo_r(u32 i)
|
static inline u32 fifo_runlist_base_lo_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00002b00U + i*16U;
|
return nvgpu_safe_add_u32(0x00002b00U, nvgpu_safe_mult_u32(i, 16U));
|
||||||
}
|
}
|
||||||
static inline u32 fifo_runlist_base_lo__size_1_v(void)
|
static inline u32 fifo_runlist_base_lo__size_1_v(void)
|
||||||
{
|
{
|
||||||
@@ -116,7 +117,7 @@ static inline u32 fifo_runlist_base_lo_target_sys_mem_ncoh_f(void)
|
|||||||
}
|
}
|
||||||
static inline u32 fifo_runlist_base_hi_r(u32 i)
|
static inline u32 fifo_runlist_base_hi_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00002b04U + i*16U;
|
return nvgpu_safe_add_u32(0x00002b04U, nvgpu_safe_mult_u32(i, 16U));
|
||||||
}
|
}
|
||||||
static inline u32 fifo_runlist_base_hi_ptr_hi_f(u32 v)
|
static inline u32 fifo_runlist_base_hi_ptr_hi_f(u32 v)
|
||||||
{
|
{
|
||||||
@@ -124,7 +125,7 @@ static inline u32 fifo_runlist_base_hi_ptr_hi_f(u32 v)
|
|||||||
}
|
}
|
||||||
static inline u32 fifo_runlist_submit_r(u32 i)
|
static inline u32 fifo_runlist_submit_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00002b08U + i*16U;
|
return nvgpu_safe_add_u32(0x00002b08U, nvgpu_safe_mult_u32(i, 16U));
|
||||||
}
|
}
|
||||||
static inline u32 fifo_runlist_submit_length_f(u32 v)
|
static inline u32 fifo_runlist_submit_length_f(u32 v)
|
||||||
{
|
{
|
||||||
@@ -132,7 +133,7 @@ static inline u32 fifo_runlist_submit_length_f(u32 v)
|
|||||||
}
|
}
|
||||||
static inline u32 fifo_runlist_submit_info_r(u32 i)
|
static inline u32 fifo_runlist_submit_info_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00002b0cU + i*16U;
|
return nvgpu_safe_add_u32(0x00002b0cU, nvgpu_safe_mult_u32(i, 16U));
|
||||||
}
|
}
|
||||||
static inline u32 fifo_runlist_submit_info_pending_true_f(void)
|
static inline u32 fifo_runlist_submit_info_pending_true_f(void)
|
||||||
{
|
{
|
||||||
@@ -140,7 +141,7 @@ static inline u32 fifo_runlist_submit_info_pending_true_f(void)
|
|||||||
}
|
}
|
||||||
static inline u32 fifo_pbdma_map_r(u32 i)
|
static inline u32 fifo_pbdma_map_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00002390U + i*4U;
|
return nvgpu_safe_add_u32(0x00002390U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 fifo_intr_0_r(void)
|
static inline u32 fifo_intr_0_r(void)
|
||||||
{
|
{
|
||||||
@@ -328,7 +329,7 @@ static inline u32 fifo_preempt_id_f(u32 v)
|
|||||||
}
|
}
|
||||||
static inline u32 fifo_engine_status_r(u32 i)
|
static inline u32 fifo_engine_status_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00002640U + i*8U;
|
return nvgpu_safe_add_u32(0x00002640U, nvgpu_safe_mult_u32(i, 8U));
|
||||||
}
|
}
|
||||||
static inline u32 fifo_engine_status__size_1_v(void)
|
static inline u32 fifo_engine_status__size_1_v(void)
|
||||||
{
|
{
|
||||||
@@ -424,7 +425,7 @@ static inline u32 fifo_engine_status_ctxsw_in_progress_f(void)
|
|||||||
}
|
}
|
||||||
static inline u32 fifo_pbdma_status_r(u32 i)
|
static inline u32 fifo_pbdma_status_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00003080U + i*4U;
|
return nvgpu_safe_add_u32(0x00003080U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 fifo_pbdma_status__size_1_v(void)
|
static inline u32 fifo_pbdma_status__size_1_v(void)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -57,6 +57,7 @@
|
|||||||
#define NVGPU_HW_FLUSH_TU104_H
|
#define NVGPU_HW_FLUSH_TU104_H
|
||||||
|
|
||||||
#include <nvgpu/types.h>
|
#include <nvgpu/types.h>
|
||||||
|
#include <nvgpu/safe_ops.h>
|
||||||
|
|
||||||
static inline u32 flush_l2_system_invalidate_r(void)
|
static inline u32 flush_l2_system_invalidate_r(void)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -57,6 +57,7 @@
|
|||||||
#define NVGPU_HW_FUNC_TU104_H
|
#define NVGPU_HW_FUNC_TU104_H
|
||||||
|
|
||||||
#include <nvgpu/types.h>
|
#include <nvgpu/types.h>
|
||||||
|
#include <nvgpu/safe_ops.h>
|
||||||
|
|
||||||
static inline u32 func_full_phys_offset_v(void)
|
static inline u32 func_full_phys_offset_v(void)
|
||||||
{
|
{
|
||||||
@@ -72,11 +73,11 @@ static inline u32 func_cfg0_r(void)
|
|||||||
}
|
}
|
||||||
static inline u32 func_priv_cpu_intr_top_en_set_r(u32 i)
|
static inline u32 func_priv_cpu_intr_top_en_set_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00001608U + i*4U;
|
return nvgpu_safe_add_u32(0x00001608U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 func_priv_cpu_intr_top_en_clear_r(u32 i)
|
static inline u32 func_priv_cpu_intr_top_en_clear_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00001610U + i*4U;
|
return nvgpu_safe_add_u32(0x00001610U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 func_priv_cpu_intr_top_en_clear__size_1_v(void)
|
static inline u32 func_priv_cpu_intr_top_en_clear__size_1_v(void)
|
||||||
{
|
{
|
||||||
@@ -84,11 +85,11 @@ static inline u32 func_priv_cpu_intr_top_en_clear__size_1_v(void)
|
|||||||
}
|
}
|
||||||
static inline u32 func_priv_cpu_intr_leaf_en_set_r(u32 i)
|
static inline u32 func_priv_cpu_intr_leaf_en_set_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00001200U + i*4U;
|
return nvgpu_safe_add_u32(0x00001200U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 func_priv_cpu_intr_leaf_en_clear_r(u32 i)
|
static inline u32 func_priv_cpu_intr_leaf_en_clear_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00001400U + i*4U;
|
return nvgpu_safe_add_u32(0x00001400U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 func_priv_cpu_intr_leaf_en_clear__size_1_v(void)
|
static inline u32 func_priv_cpu_intr_leaf_en_clear__size_1_v(void)
|
||||||
{
|
{
|
||||||
@@ -96,31 +97,31 @@ static inline u32 func_priv_cpu_intr_leaf_en_clear__size_1_v(void)
|
|||||||
}
|
}
|
||||||
static inline u32 func_priv_cpu_intr_top_r(u32 i)
|
static inline u32 func_priv_cpu_intr_top_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00001600U + i*4U;
|
return nvgpu_safe_add_u32(0x00001600U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 func_priv_cpu_intr_leaf_r(u32 i)
|
static inline u32 func_priv_cpu_intr_leaf_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00001000U + i*4U;
|
return nvgpu_safe_add_u32(0x00001000U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 func_priv_mmu_fault_buffer_lo_r(u32 i)
|
static inline u32 func_priv_mmu_fault_buffer_lo_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00003000U + i*32U;
|
return nvgpu_safe_add_u32(0x00003000U, nvgpu_safe_mult_u32(i, 32U));
|
||||||
}
|
}
|
||||||
static inline u32 func_priv_mmu_fault_buffer_hi_r(u32 i)
|
static inline u32 func_priv_mmu_fault_buffer_hi_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00003004U + i*32U;
|
return nvgpu_safe_add_u32(0x00003004U, nvgpu_safe_mult_u32(i, 32U));
|
||||||
}
|
}
|
||||||
static inline u32 func_priv_mmu_fault_buffer_get_r(u32 i)
|
static inline u32 func_priv_mmu_fault_buffer_get_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00003008U + i*32U;
|
return nvgpu_safe_add_u32(0x00003008U, nvgpu_safe_mult_u32(i, 32U));
|
||||||
}
|
}
|
||||||
static inline u32 func_priv_mmu_fault_buffer_put_r(u32 i)
|
static inline u32 func_priv_mmu_fault_buffer_put_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x0000300cU + i*32U;
|
return nvgpu_safe_add_u32(0x0000300cU, nvgpu_safe_mult_u32(i, 32U));
|
||||||
}
|
}
|
||||||
static inline u32 func_priv_mmu_fault_buffer_size_r(u32 i)
|
static inline u32 func_priv_mmu_fault_buffer_size_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00003010U + i*32U;
|
return nvgpu_safe_add_u32(0x00003010U, nvgpu_safe_mult_u32(i, 32U));
|
||||||
}
|
}
|
||||||
static inline u32 func_priv_mmu_fault_addr_lo_r(void)
|
static inline u32 func_priv_mmu_fault_addr_lo_r(void)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -57,6 +57,7 @@
|
|||||||
#define NVGPU_HW_FUSE_TU104_H
|
#define NVGPU_HW_FUSE_TU104_H
|
||||||
|
|
||||||
#include <nvgpu/types.h>
|
#include <nvgpu/types.h>
|
||||||
|
#include <nvgpu/safe_ops.h>
|
||||||
|
|
||||||
static inline u32 fuse_status_opt_gpc_r(void)
|
static inline u32 fuse_status_opt_gpc_r(void)
|
||||||
{
|
{
|
||||||
@@ -64,11 +65,11 @@ static inline u32 fuse_status_opt_gpc_r(void)
|
|||||||
}
|
}
|
||||||
static inline u32 fuse_status_opt_tpc_gpc_r(u32 i)
|
static inline u32 fuse_status_opt_tpc_gpc_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00021c38U + i*4U;
|
return nvgpu_safe_add_u32(0x00021c38U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 fuse_ctrl_opt_tpc_gpc_r(u32 i)
|
static inline u32 fuse_ctrl_opt_tpc_gpc_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00021838U + i*4U;
|
return nvgpu_safe_add_u32(0x00021838U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 fuse_status_opt_fbio_r(void)
|
static inline u32 fuse_status_opt_fbio_r(void)
|
||||||
{
|
{
|
||||||
@@ -88,7 +89,7 @@ static inline u32 fuse_status_opt_fbio_data_v(u32 r)
|
|||||||
}
|
}
|
||||||
static inline u32 fuse_status_opt_rop_l2_fbp_r(u32 i)
|
static inline u32 fuse_status_opt_rop_l2_fbp_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00021d70U + i*4U;
|
return nvgpu_safe_add_u32(0x00021d70U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 fuse_status_opt_fbp_r(void)
|
static inline u32 fuse_status_opt_fbp_r(void)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -57,9 +57,10 @@
|
|||||||
#define NVGPU_HW_GC6_TU104_H
|
#define NVGPU_HW_GC6_TU104_H
|
||||||
|
|
||||||
#include <nvgpu/types.h>
|
#include <nvgpu/types.h>
|
||||||
|
#include <nvgpu/safe_ops.h>
|
||||||
|
|
||||||
static inline u32 gc6_aon_secure_scratch_group_05_r(u32 i)
|
static inline u32 gc6_aon_secure_scratch_group_05_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00118234U + i*4U;
|
return nvgpu_safe_add_u32(0x00118234U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -57,6 +57,7 @@
|
|||||||
#define NVGPU_HW_GMMU_TU104_H
|
#define NVGPU_HW_GMMU_TU104_H
|
||||||
|
|
||||||
#include <nvgpu/types.h>
|
#include <nvgpu/types.h>
|
||||||
|
#include <nvgpu/safe_ops.h>
|
||||||
|
|
||||||
static inline u32 gmmu_new_pde_is_pte_w(void)
|
static inline u32 gmmu_new_pde_is_pte_w(void)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -57,6 +57,7 @@
|
|||||||
#define NVGPU_HW_GR_TU104_H
|
#define NVGPU_HW_GR_TU104_H
|
||||||
|
|
||||||
#include <nvgpu/types.h>
|
#include <nvgpu/types.h>
|
||||||
|
#include <nvgpu/safe_ops.h>
|
||||||
|
|
||||||
static inline u32 gr_intr_r(void)
|
static inline u32 gr_intr_r(void)
|
||||||
{
|
{
|
||||||
@@ -1004,7 +1005,7 @@ static inline u32 gr_fe_go_idle_timeout_count_prod_f(void)
|
|||||||
}
|
}
|
||||||
static inline u32 gr_fe_object_table_r(u32 i)
|
static inline u32 gr_fe_object_table_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00404200U + i*4U;
|
return nvgpu_safe_add_u32(0x00404200U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 gr_fe_object_table_nvclass_v(u32 r)
|
static inline u32 gr_fe_object_table_nvclass_v(u32 r)
|
||||||
{
|
{
|
||||||
@@ -1012,7 +1013,7 @@ static inline u32 gr_fe_object_table_nvclass_v(u32 r)
|
|||||||
}
|
}
|
||||||
static inline u32 gr_fe_tpc_fs_r(u32 i)
|
static inline u32 gr_fe_tpc_fs_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x0040a200U + i*4U;
|
return nvgpu_safe_add_u32(0x0040a200U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 gr_pri_mme_shadow_raw_index_r(void)
|
static inline u32 gr_pri_mme_shadow_raw_index_r(void)
|
||||||
{
|
{
|
||||||
@@ -1236,7 +1237,7 @@ static inline u32 gr_fecs_icd_rdata_r(void)
|
|||||||
}
|
}
|
||||||
static inline u32 gr_fecs_imemc_r(u32 i)
|
static inline u32 gr_fecs_imemc_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00409180U + i*16U;
|
return nvgpu_safe_add_u32(0x00409180U, nvgpu_safe_mult_u32(i, 16U));
|
||||||
}
|
}
|
||||||
static inline u32 gr_fecs_imemc_offs_f(u32 v)
|
static inline u32 gr_fecs_imemc_offs_f(u32 v)
|
||||||
{
|
{
|
||||||
@@ -1252,11 +1253,11 @@ static inline u32 gr_fecs_imemc_aincw_f(u32 v)
|
|||||||
}
|
}
|
||||||
static inline u32 gr_fecs_imemd_r(u32 i)
|
static inline u32 gr_fecs_imemd_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00409184U + i*16U;
|
return nvgpu_safe_add_u32(0x00409184U, nvgpu_safe_mult_u32(i, 16U));
|
||||||
}
|
}
|
||||||
static inline u32 gr_fecs_imemt_r(u32 i)
|
static inline u32 gr_fecs_imemt_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00409188U + i*16U;
|
return nvgpu_safe_add_u32(0x00409188U, nvgpu_safe_mult_u32(i, 16U));
|
||||||
}
|
}
|
||||||
static inline u32 gr_fecs_imemt_tag_f(u32 v)
|
static inline u32 gr_fecs_imemt_tag_f(u32 v)
|
||||||
{
|
{
|
||||||
@@ -1264,7 +1265,7 @@ static inline u32 gr_fecs_imemt_tag_f(u32 v)
|
|||||||
}
|
}
|
||||||
static inline u32 gr_fecs_dmemc_r(u32 i)
|
static inline u32 gr_fecs_dmemc_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x004091c0U + i*8U;
|
return nvgpu_safe_add_u32(0x004091c0U, nvgpu_safe_mult_u32(i, 8U));
|
||||||
}
|
}
|
||||||
static inline u32 gr_fecs_dmemc_offs_s(void)
|
static inline u32 gr_fecs_dmemc_offs_s(void)
|
||||||
{
|
{
|
||||||
@@ -1292,7 +1293,7 @@ static inline u32 gr_fecs_dmemc_aincw_f(u32 v)
|
|||||||
}
|
}
|
||||||
static inline u32 gr_fecs_dmemd_r(u32 i)
|
static inline u32 gr_fecs_dmemd_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x004091c4U + i*8U;
|
return nvgpu_safe_add_u32(0x004091c4U, nvgpu_safe_mult_u32(i, 8U));
|
||||||
}
|
}
|
||||||
static inline u32 gr_fecs_dmatrfbase_r(void)
|
static inline u32 gr_fecs_dmatrfbase_r(void)
|
||||||
{
|
{
|
||||||
@@ -1620,7 +1621,7 @@ static inline u32 gr_fecs_ctx_state_store_major_rev_id_r(void)
|
|||||||
}
|
}
|
||||||
static inline u32 gr_fecs_ctxsw_mailbox_r(u32 i)
|
static inline u32 gr_fecs_ctxsw_mailbox_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00409800U + i*4U;
|
return nvgpu_safe_add_u32(0x00409800U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 gr_fecs_ctxsw_mailbox__size_1_v(void)
|
static inline u32 gr_fecs_ctxsw_mailbox__size_1_v(void)
|
||||||
{
|
{
|
||||||
@@ -1640,7 +1641,7 @@ static inline u32 gr_fecs_ctxsw_mailbox_value_fail_v(void)
|
|||||||
}
|
}
|
||||||
static inline u32 gr_fecs_ctxsw_mailbox_set_r(u32 i)
|
static inline u32 gr_fecs_ctxsw_mailbox_set_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x004098c0U + i*4U;
|
return nvgpu_safe_add_u32(0x004098c0U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 gr_fecs_ctxsw_mailbox_set_value_f(u32 v)
|
static inline u32 gr_fecs_ctxsw_mailbox_set_value_f(u32 v)
|
||||||
{
|
{
|
||||||
@@ -1648,7 +1649,7 @@ static inline u32 gr_fecs_ctxsw_mailbox_set_value_f(u32 v)
|
|||||||
}
|
}
|
||||||
static inline u32 gr_fecs_ctxsw_mailbox_clear_r(u32 i)
|
static inline u32 gr_fecs_ctxsw_mailbox_clear_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00409840U + i*4U;
|
return nvgpu_safe_add_u32(0x00409840U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 gr_fecs_ctxsw_mailbox_clear_value_f(u32 v)
|
static inline u32 gr_fecs_ctxsw_mailbox_clear_value_f(u32 v)
|
||||||
{
|
{
|
||||||
@@ -1892,7 +1893,7 @@ static inline u32 gr_gpc0_gpccs_ctxsw_idlestate_r(void)
|
|||||||
}
|
}
|
||||||
static inline u32 gr_rstr2d_gpc_map_r(u32 i)
|
static inline u32 gr_rstr2d_gpc_map_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x0040780cU + i*4U;
|
return nvgpu_safe_add_u32(0x0040780cU, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 gr_rstr2d_map_table_cfg_r(void)
|
static inline u32 gr_rstr2d_map_table_cfg_r(void)
|
||||||
{
|
{
|
||||||
@@ -1920,7 +1921,7 @@ static inline u32 gr_pd_hww_esr_en_enable_f(void)
|
|||||||
}
|
}
|
||||||
static inline u32 gr_pd_num_tpc_per_gpc_r(u32 i)
|
static inline u32 gr_pd_num_tpc_per_gpc_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00406028U + i*4U;
|
return nvgpu_safe_add_u32(0x00406028U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 gr_pd_num_tpc_per_gpc__size_1_v(void)
|
static inline u32 gr_pd_num_tpc_per_gpc__size_1_v(void)
|
||||||
{
|
{
|
||||||
@@ -2012,7 +2013,7 @@ static inline u32 gr_pd_ab_dist_cfg2_state_limit_min_gpm_fifo_depths_v(void)
|
|||||||
}
|
}
|
||||||
static inline u32 gr_pd_dist_skip_table_r(u32 i)
|
static inline u32 gr_pd_dist_skip_table_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x004064d0U + i*4U;
|
return nvgpu_safe_add_u32(0x004064d0U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 gr_pd_dist_skip_table__size_1_v(void)
|
static inline u32 gr_pd_dist_skip_table__size_1_v(void)
|
||||||
{
|
{
|
||||||
@@ -2236,7 +2237,7 @@ static inline u32 gr_ds_hww_report_mask_2_sph24_err_report_f(void)
|
|||||||
}
|
}
|
||||||
static inline u32 gr_ds_num_tpc_per_gpc_r(u32 i)
|
static inline u32 gr_ds_num_tpc_per_gpc_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00405870U + i*4U;
|
return nvgpu_safe_add_u32(0x00405870U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 gr_scc_bundle_cb_base_r(void)
|
static inline u32 gr_scc_bundle_cb_base_r(void)
|
||||||
{
|
{
|
||||||
@@ -2448,7 +2449,7 @@ static inline u32 gr_cwd_fs_num_tpcs_f(u32 v)
|
|||||||
}
|
}
|
||||||
static inline u32 gr_cwd_gpc_tpc_id_r(u32 i)
|
static inline u32 gr_cwd_gpc_tpc_id_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00405b60U + i*4U;
|
return nvgpu_safe_add_u32(0x00405b60U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 gr_cwd_gpc_tpc_id_tpc0_s(void)
|
static inline u32 gr_cwd_gpc_tpc_id_tpc0_s(void)
|
||||||
{
|
{
|
||||||
@@ -2472,7 +2473,7 @@ static inline u32 gr_cwd_gpc_tpc_id_tpc1_f(u32 v)
|
|||||||
}
|
}
|
||||||
static inline u32 gr_cwd_sm_id_r(u32 i)
|
static inline u32 gr_cwd_sm_id_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00405ba0U + i*4U;
|
return nvgpu_safe_add_u32(0x00405ba0U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 gr_cwd_sm_id__size_1_v(void)
|
static inline u32 gr_cwd_sm_id__size_1_v(void)
|
||||||
{
|
{
|
||||||
@@ -2600,7 +2601,7 @@ static inline u32 gr_gpc0_zcull_total_ram_size_num_aliquots_f(u32 v)
|
|||||||
}
|
}
|
||||||
static inline u32 gr_gpc0_zcull_zcsize_r(u32 i)
|
static inline u32 gr_gpc0_zcull_zcsize_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00500a04U + i*32U;
|
return nvgpu_safe_add_u32(0x00500a04U, nvgpu_safe_mult_u32(i, 32U));
|
||||||
}
|
}
|
||||||
static inline u32 gr_gpc0_zcull_zcsize_height_subregion__multiple_v(void)
|
static inline u32 gr_gpc0_zcull_zcsize_height_subregion__multiple_v(void)
|
||||||
{
|
{
|
||||||
@@ -2612,7 +2613,7 @@ static inline u32 gr_gpc0_zcull_zcsize_width_subregion__multiple_v(void)
|
|||||||
}
|
}
|
||||||
static inline u32 gr_gpc0_gpm_pd_sm_id_r(u32 i)
|
static inline u32 gr_gpc0_gpm_pd_sm_id_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00500c10U + i*4U;
|
return nvgpu_safe_add_u32(0x00500c10U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 gr_gpc0_gpm_pd_sm_id_id_f(u32 v)
|
static inline u32 gr_gpc0_gpm_pd_sm_id_id_f(u32 v)
|
||||||
{
|
{
|
||||||
@@ -2620,7 +2621,7 @@ static inline u32 gr_gpc0_gpm_pd_sm_id_id_f(u32 v)
|
|||||||
}
|
}
|
||||||
static inline u32 gr_gpc0_gpm_pd_pes_tpc_id_mask_r(u32 i)
|
static inline u32 gr_gpc0_gpm_pd_pes_tpc_id_mask_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00500c30U + i*4U;
|
return nvgpu_safe_add_u32(0x00500c30U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 gr_gpc0_gpm_pd_pes_tpc_id_mask_mask_v(u32 r)
|
static inline u32 gr_gpc0_gpm_pd_pes_tpc_id_mask_mask_v(u32 r)
|
||||||
{
|
{
|
||||||
@@ -2884,7 +2885,7 @@ static inline u32 gr_gpccs_dmactl_imem_scrubbing_m(void)
|
|||||||
}
|
}
|
||||||
static inline u32 gr_gpccs_imemc_r(u32 i)
|
static inline u32 gr_gpccs_imemc_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x0041a180U + i*16U;
|
return nvgpu_safe_add_u32(0x0041a180U, nvgpu_safe_mult_u32(i, 16U));
|
||||||
}
|
}
|
||||||
static inline u32 gr_gpccs_imemc_offs_f(u32 v)
|
static inline u32 gr_gpccs_imemc_offs_f(u32 v)
|
||||||
{
|
{
|
||||||
@@ -2900,11 +2901,11 @@ static inline u32 gr_gpccs_imemc_aincw_f(u32 v)
|
|||||||
}
|
}
|
||||||
static inline u32 gr_gpccs_imemd_r(u32 i)
|
static inline u32 gr_gpccs_imemd_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x0041a184U + i*16U;
|
return nvgpu_safe_add_u32(0x0041a184U, nvgpu_safe_mult_u32(i, 16U));
|
||||||
}
|
}
|
||||||
static inline u32 gr_gpccs_imemt_r(u32 i)
|
static inline u32 gr_gpccs_imemt_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x0041a188U + i*16U;
|
return nvgpu_safe_add_u32(0x0041a188U, nvgpu_safe_mult_u32(i, 16U));
|
||||||
}
|
}
|
||||||
static inline u32 gr_gpccs_imemt__size_1_v(void)
|
static inline u32 gr_gpccs_imemt__size_1_v(void)
|
||||||
{
|
{
|
||||||
@@ -2916,7 +2917,7 @@ static inline u32 gr_gpccs_imemt_tag_f(u32 v)
|
|||||||
}
|
}
|
||||||
static inline u32 gr_gpccs_dmemc_r(u32 i)
|
static inline u32 gr_gpccs_dmemc_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x0041a1c0U + i*8U;
|
return nvgpu_safe_add_u32(0x0041a1c0U, nvgpu_safe_mult_u32(i, 8U));
|
||||||
}
|
}
|
||||||
static inline u32 gr_gpccs_dmemc_offs_f(u32 v)
|
static inline u32 gr_gpccs_dmemc_offs_f(u32 v)
|
||||||
{
|
{
|
||||||
@@ -2932,11 +2933,11 @@ static inline u32 gr_gpccs_dmemc_aincw_f(u32 v)
|
|||||||
}
|
}
|
||||||
static inline u32 gr_gpccs_dmemd_r(u32 i)
|
static inline u32 gr_gpccs_dmemd_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x0041a1c4U + i*8U;
|
return nvgpu_safe_add_u32(0x0041a1c4U, nvgpu_safe_mult_u32(i, 8U));
|
||||||
}
|
}
|
||||||
static inline u32 gr_gpccs_ctxsw_mailbox_r(u32 i)
|
static inline u32 gr_gpccs_ctxsw_mailbox_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x0041a800U + i*4U;
|
return nvgpu_safe_add_u32(0x0041a800U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 gr_gpccs_ctxsw_mailbox_value_f(u32 v)
|
static inline u32 gr_gpccs_ctxsw_mailbox_value_f(u32 v)
|
||||||
{
|
{
|
||||||
@@ -3080,7 +3081,7 @@ static inline u32 gr_gpcs_ppcs_cbm_beta_cb_ctrl_cbes_reserve_f(u32 v)
|
|||||||
}
|
}
|
||||||
static inline u32 gr_gpcs_swdx_tc_beta_cb_size_r(u32 i)
|
static inline u32 gr_gpcs_swdx_tc_beta_cb_size_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00418ea0U + i*4U;
|
return nvgpu_safe_add_u32(0x00418ea0U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 gr_gpcs_swdx_tc_beta_cb_size_v_f(u32 v)
|
static inline u32 gr_gpcs_swdx_tc_beta_cb_size_v_f(u32 v)
|
||||||
{
|
{
|
||||||
@@ -3092,7 +3093,7 @@ static inline u32 gr_gpcs_swdx_tc_beta_cb_size_v_m(void)
|
|||||||
}
|
}
|
||||||
static inline u32 gr_gpcs_swdx_dss_zbc_color_r_r(u32 i)
|
static inline u32 gr_gpcs_swdx_dss_zbc_color_r_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00418010U + i*4U;
|
return nvgpu_safe_add_u32(0x00418010U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 gr_gpcs_swdx_dss_zbc_color_r_val_f(u32 v)
|
static inline u32 gr_gpcs_swdx_dss_zbc_color_r_val_f(u32 v)
|
||||||
{
|
{
|
||||||
@@ -3100,7 +3101,7 @@ static inline u32 gr_gpcs_swdx_dss_zbc_color_r_val_f(u32 v)
|
|||||||
}
|
}
|
||||||
static inline u32 gr_gpcs_swdx_dss_zbc_color_g_r(u32 i)
|
static inline u32 gr_gpcs_swdx_dss_zbc_color_g_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x0041804cU + i*4U;
|
return nvgpu_safe_add_u32(0x0041804cU, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 gr_gpcs_swdx_dss_zbc_color_g_val_f(u32 v)
|
static inline u32 gr_gpcs_swdx_dss_zbc_color_g_val_f(u32 v)
|
||||||
{
|
{
|
||||||
@@ -3108,7 +3109,7 @@ static inline u32 gr_gpcs_swdx_dss_zbc_color_g_val_f(u32 v)
|
|||||||
}
|
}
|
||||||
static inline u32 gr_gpcs_swdx_dss_zbc_color_b_r(u32 i)
|
static inline u32 gr_gpcs_swdx_dss_zbc_color_b_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00418088U + i*4U;
|
return nvgpu_safe_add_u32(0x00418088U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 gr_gpcs_swdx_dss_zbc_color_b_val_f(u32 v)
|
static inline u32 gr_gpcs_swdx_dss_zbc_color_b_val_f(u32 v)
|
||||||
{
|
{
|
||||||
@@ -3116,7 +3117,7 @@ static inline u32 gr_gpcs_swdx_dss_zbc_color_b_val_f(u32 v)
|
|||||||
}
|
}
|
||||||
static inline u32 gr_gpcs_swdx_dss_zbc_color_a_r(u32 i)
|
static inline u32 gr_gpcs_swdx_dss_zbc_color_a_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x004180c4U + i*4U;
|
return nvgpu_safe_add_u32(0x004180c4U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 gr_gpcs_swdx_dss_zbc_color_a_val_f(u32 v)
|
static inline u32 gr_gpcs_swdx_dss_zbc_color_a_val_f(u32 v)
|
||||||
{
|
{
|
||||||
@@ -3128,7 +3129,7 @@ static inline u32 gr_gpcs_swdx_dss_zbc_c_01_to_04_format_r(void)
|
|||||||
}
|
}
|
||||||
static inline u32 gr_gpcs_swdx_dss_zbc_z_r(u32 i)
|
static inline u32 gr_gpcs_swdx_dss_zbc_z_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00418110U + i*4U;
|
return nvgpu_safe_add_u32(0x00418110U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 gr_gpcs_swdx_dss_zbc_z_val_f(u32 v)
|
static inline u32 gr_gpcs_swdx_dss_zbc_z_val_f(u32 v)
|
||||||
{
|
{
|
||||||
@@ -3140,7 +3141,7 @@ static inline u32 gr_gpcs_swdx_dss_zbc_z_01_to_04_format_r(void)
|
|||||||
}
|
}
|
||||||
static inline u32 gr_gpcs_swdx_dss_zbc_s_r(u32 i)
|
static inline u32 gr_gpcs_swdx_dss_zbc_s_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x0041815cU + i*4U;
|
return nvgpu_safe_add_u32(0x0041815cU, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 gr_gpcs_swdx_dss_zbc_s_val_f(u32 v)
|
static inline u32 gr_gpcs_swdx_dss_zbc_s_val_f(u32 v)
|
||||||
{
|
{
|
||||||
@@ -3168,7 +3169,7 @@ static inline u32 gr_gpcs_setup_attrib_cb_base_valid_true_f(void)
|
|||||||
}
|
}
|
||||||
static inline u32 gr_crstr_gpc_map_r(u32 i)
|
static inline u32 gr_crstr_gpc_map_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00418b08U + i*4U;
|
return nvgpu_safe_add_u32(0x00418b08U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 gr_crstr_gpc_map_tile0_f(u32 v)
|
static inline u32 gr_crstr_gpc_map_tile0_f(u32 v)
|
||||||
{
|
{
|
||||||
@@ -3208,7 +3209,7 @@ static inline u32 gr_crstr_map_table_cfg_num_entries_f(u32 v)
|
|||||||
}
|
}
|
||||||
static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map_r(u32 i)
|
static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00418980U + i*4U;
|
return nvgpu_safe_add_u32(0x00418980U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map_tile_0_f(u32 v)
|
static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map_tile_0_f(u32 v)
|
||||||
{
|
{
|
||||||
@@ -3740,7 +3741,7 @@ static inline u32 gr_gpcs_tpcs_pes_vsc_vpc_fast_mode_switch_true_f(void)
|
|||||||
}
|
}
|
||||||
static inline u32 gr_ppcs_wwdx_map_gpc_map_r(u32 i)
|
static inline u32 gr_ppcs_wwdx_map_gpc_map_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x0041bf00U + i*4U;
|
return nvgpu_safe_add_u32(0x0041bf00U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 gr_ppcs_wwdx_map_table_cfg_r(void)
|
static inline u32 gr_ppcs_wwdx_map_table_cfg_r(void)
|
||||||
{
|
{
|
||||||
@@ -3772,7 +3773,7 @@ static inline u32 gr_gpcs_ppcs_wwdx_sm_num_rcp_conservative_f(u32 v)
|
|||||||
}
|
}
|
||||||
static inline u32 gr_ppcs_wwdx_map_table_cfg_coeff_r(u32 i)
|
static inline u32 gr_ppcs_wwdx_map_table_cfg_coeff_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x0041bfb0U + i*4U;
|
return nvgpu_safe_add_u32(0x0041bfb0U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 gr_ppcs_wwdx_map_table_cfg_coeff__size_1_v(void)
|
static inline u32 gr_ppcs_wwdx_map_table_cfg_coeff__size_1_v(void)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -57,6 +57,7 @@
|
|||||||
#define NVGPU_HW_IOCTRL_TU104_H
|
#define NVGPU_HW_IOCTRL_TU104_H
|
||||||
|
|
||||||
#include <nvgpu/types.h>
|
#include <nvgpu/types.h>
|
||||||
|
#include <nvgpu/safe_ops.h>
|
||||||
|
|
||||||
static inline u32 ioctrl_reset_r(void)
|
static inline u32 ioctrl_reset_r(void)
|
||||||
{
|
{
|
||||||
@@ -108,7 +109,7 @@ static inline u32 ioctrl_debug_reset_common_v(u32 r)
|
|||||||
}
|
}
|
||||||
static inline u32 ioctrl_clock_control_r(u32 i)
|
static inline u32 ioctrl_clock_control_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00000180U + i*4U;
|
return nvgpu_safe_add_u32(0x00000180U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 ioctrl_clock_control__size_1_v(void)
|
static inline u32 ioctrl_clock_control__size_1_v(void)
|
||||||
{
|
{
|
||||||
@@ -244,7 +245,7 @@ static inline u32 ioctrl_common_intr_0_status_intrb_v(u32 r)
|
|||||||
}
|
}
|
||||||
static inline u32 ioctrl_link_intr_0_mask_r(u32 i)
|
static inline u32 ioctrl_link_intr_0_mask_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00000240U + i*20U;
|
return nvgpu_safe_add_u32(0x00000240U, nvgpu_safe_mult_u32(i, 20U));
|
||||||
}
|
}
|
||||||
static inline u32 ioctrl_link_intr_0_mask_fatal_f(u32 v)
|
static inline u32 ioctrl_link_intr_0_mask_fatal_f(u32 v)
|
||||||
{
|
{
|
||||||
@@ -288,7 +289,7 @@ static inline u32 ioctrl_link_intr_0_mask_intrb_v(u32 r)
|
|||||||
}
|
}
|
||||||
static inline u32 ioctrl_link_intr_0_status_r(u32 i)
|
static inline u32 ioctrl_link_intr_0_status_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00000244U + i*20U;
|
return nvgpu_safe_add_u32(0x00000244U, nvgpu_safe_mult_u32(i, 20U));
|
||||||
}
|
}
|
||||||
static inline u32 ioctrl_link_intr_0_status_fatal_f(u32 v)
|
static inline u32 ioctrl_link_intr_0_status_fatal_f(u32 v)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -57,6 +57,7 @@
|
|||||||
#define NVGPU_HW_IOCTRLMIF_TU104_H
|
#define NVGPU_HW_IOCTRLMIF_TU104_H
|
||||||
|
|
||||||
#include <nvgpu/types.h>
|
#include <nvgpu/types.h>
|
||||||
|
#include <nvgpu/safe_ops.h>
|
||||||
|
|
||||||
static inline u32 ioctrlmif_rx_err_contain_en_0_r(void)
|
static inline u32 ioctrlmif_rx_err_contain_en_0_r(void)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -57,6 +57,7 @@
|
|||||||
#define NVGPU_HW_LTC_TU104_H
|
#define NVGPU_HW_LTC_TU104_H
|
||||||
|
|
||||||
#include <nvgpu/types.h>
|
#include <nvgpu/types.h>
|
||||||
|
#include <nvgpu/safe_ops.h>
|
||||||
|
|
||||||
static inline u32 ltc_pltcg_base_v(void)
|
static inline u32 ltc_pltcg_base_v(void)
|
||||||
{
|
{
|
||||||
@@ -260,7 +261,7 @@ static inline u32 ltc_ltcs_ltss_dstg_zbc_index_address_f(u32 v)
|
|||||||
}
|
}
|
||||||
static inline u32 ltc_ltcs_ltss_dstg_zbc_color_clear_value_r(u32 i)
|
static inline u32 ltc_ltcs_ltss_dstg_zbc_color_clear_value_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x0017e33cU + i*4U;
|
return nvgpu_safe_add_u32(0x0017e33cU, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 ltc_ltcs_ltss_dstg_zbc_color_clear_value__size_1_v(void)
|
static inline u32 ltc_ltcs_ltss_dstg_zbc_color_clear_value__size_1_v(void)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -57,6 +57,7 @@
|
|||||||
#define NVGPU_HW_MC_TU104_H
|
#define NVGPU_HW_MC_TU104_H
|
||||||
|
|
||||||
#include <nvgpu/types.h>
|
#include <nvgpu/types.h>
|
||||||
|
#include <nvgpu/safe_ops.h>
|
||||||
|
|
||||||
static inline u32 mc_boot_0_r(void)
|
static inline u32 mc_boot_0_r(void)
|
||||||
{
|
{
|
||||||
@@ -80,7 +81,7 @@ static inline u32 mc_boot_0_minor_revision_v(u32 r)
|
|||||||
}
|
}
|
||||||
static inline u32 mc_intr_r(u32 i)
|
static inline u32 mc_intr_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00000100U + i*4U;
|
return nvgpu_safe_add_u32(0x00000100U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 mc_intr_pfifo_pending_f(void)
|
static inline u32 mc_intr_pfifo_pending_f(void)
|
||||||
{
|
{
|
||||||
@@ -120,15 +121,15 @@ static inline u32 mc_intr_nvlink_pending_f(void)
|
|||||||
}
|
}
|
||||||
static inline u32 mc_intr_en_r(u32 i)
|
static inline u32 mc_intr_en_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00000140U + i*4U;
|
return nvgpu_safe_add_u32(0x00000140U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 mc_intr_en_set_r(u32 i)
|
static inline u32 mc_intr_en_set_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00000160U + i*4U;
|
return nvgpu_safe_add_u32(0x00000160U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 mc_intr_en_clear_r(u32 i)
|
static inline u32 mc_intr_en_clear_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00000180U + i*4U;
|
return nvgpu_safe_add_u32(0x00000180U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 mc_enable_r(void)
|
static inline u32 mc_enable_r(void)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -57,6 +57,7 @@
|
|||||||
#define NVGPU_HW_MINION_TU104_H
|
#define NVGPU_HW_MINION_TU104_H
|
||||||
|
|
||||||
#include <nvgpu/types.h>
|
#include <nvgpu/types.h>
|
||||||
|
#include <nvgpu/safe_ops.h>
|
||||||
|
|
||||||
static inline u32 minion_minion_status_r(void)
|
static inline u32 minion_minion_status_r(void)
|
||||||
{
|
{
|
||||||
@@ -632,7 +633,7 @@ static inline u32 minion_minion_intr_stall_en_link_v(u32 r)
|
|||||||
}
|
}
|
||||||
static inline u32 minion_nvlink_dl_cmd_r(u32 i)
|
static inline u32 minion_nvlink_dl_cmd_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00000900U + i*4U;
|
return nvgpu_safe_add_u32(0x00000900U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 minion_nvlink_dl_cmd___size_1_v(void)
|
static inline u32 minion_nvlink_dl_cmd___size_1_v(void)
|
||||||
{
|
{
|
||||||
@@ -868,7 +869,7 @@ static inline u32 minion_misc_0_scratch_swrw_0_v(u32 r)
|
|||||||
}
|
}
|
||||||
static inline u32 minion_nvlink_link_intr_r(u32 i)
|
static inline u32 minion_nvlink_link_intr_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00000a00U + i*4U;
|
return nvgpu_safe_add_u32(0x00000a00U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 minion_nvlink_link_intr___size_1_v(void)
|
static inline u32 minion_nvlink_link_intr___size_1_v(void)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -57,6 +57,7 @@
|
|||||||
#define NVGPU_HW_NVL_TU104_H
|
#define NVGPU_HW_NVL_TU104_H
|
||||||
|
|
||||||
#include <nvgpu/types.h>
|
#include <nvgpu/types.h>
|
||||||
|
#include <nvgpu/safe_ops.h>
|
||||||
|
|
||||||
static inline u32 nvl_link_state_r(void)
|
static inline u32 nvl_link_state_r(void)
|
||||||
{
|
{
|
||||||
@@ -192,7 +193,7 @@ static inline u32 nvl_link_activity_blkact_v(u32 r)
|
|||||||
}
|
}
|
||||||
static inline u32 nvl_sublink_activity_r(u32 i)
|
static inline u32 nvl_sublink_activity_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00000010U + i*4U;
|
return nvgpu_safe_add_u32(0x00000010U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 nvl_sublink_activity_blkact0_f(u32 v)
|
static inline u32 nvl_sublink_activity_blkact0_f(u32 v)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -57,5 +57,6 @@
|
|||||||
#define NVGPU_HW_NVLINKIP_DISCOVERY_TU104_H
|
#define NVGPU_HW_NVLINKIP_DISCOVERY_TU104_H
|
||||||
|
|
||||||
#include <nvgpu/types.h>
|
#include <nvgpu/types.h>
|
||||||
|
#include <nvgpu/safe_ops.h>
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -57,6 +57,7 @@
|
|||||||
#define NVGPU_HW_NVLIPT_TU104_H
|
#define NVGPU_HW_NVLIPT_TU104_H
|
||||||
|
|
||||||
#include <nvgpu/types.h>
|
#include <nvgpu/types.h>
|
||||||
|
#include <nvgpu/safe_ops.h>
|
||||||
|
|
||||||
static inline u32 nvlipt_intr_control_link0_r(void)
|
static inline u32 nvlipt_intr_control_link0_r(void)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -57,6 +57,7 @@
|
|||||||
#define NVGPU_HW_NVTLC_TU104_H
|
#define NVGPU_HW_NVTLC_TU104_H
|
||||||
|
|
||||||
#include <nvgpu/types.h>
|
#include <nvgpu/types.h>
|
||||||
|
#include <nvgpu/safe_ops.h>
|
||||||
|
|
||||||
static inline u32 nvtlc_tx_err_report_en_0_r(void)
|
static inline u32 nvtlc_tx_err_report_en_0_r(void)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -57,6 +57,7 @@
|
|||||||
#define NVGPU_HW_PBDMA_TU104_H
|
#define NVGPU_HW_PBDMA_TU104_H
|
||||||
|
|
||||||
#include <nvgpu/types.h>
|
#include <nvgpu/types.h>
|
||||||
|
#include <nvgpu/safe_ops.h>
|
||||||
|
|
||||||
static inline u32 pbdma_gp_entry1_r(void)
|
static inline u32 pbdma_gp_entry1_r(void)
|
||||||
{
|
{
|
||||||
@@ -76,7 +77,7 @@ static inline u32 pbdma_gp_entry1_length_v(u32 r)
|
|||||||
}
|
}
|
||||||
static inline u32 pbdma_gp_base_r(u32 i)
|
static inline u32 pbdma_gp_base_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00040048U + i*8192U;
|
return nvgpu_safe_add_u32(0x00040048U, nvgpu_safe_mult_u32(i, 8192U));
|
||||||
}
|
}
|
||||||
static inline u32 pbdma_gp_base__size_1_v(void)
|
static inline u32 pbdma_gp_base__size_1_v(void)
|
||||||
{
|
{
|
||||||
@@ -92,7 +93,7 @@ static inline u32 pbdma_gp_base_rsvd_s(void)
|
|||||||
}
|
}
|
||||||
static inline u32 pbdma_gp_base_hi_r(u32 i)
|
static inline u32 pbdma_gp_base_hi_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x0004004cU + i*8192U;
|
return nvgpu_safe_add_u32(0x0004004cU, nvgpu_safe_mult_u32(i, 8192U));
|
||||||
}
|
}
|
||||||
static inline u32 pbdma_gp_base_hi_offset_f(u32 v)
|
static inline u32 pbdma_gp_base_hi_offset_f(u32 v)
|
||||||
{
|
{
|
||||||
@@ -104,43 +105,43 @@ static inline u32 pbdma_gp_base_hi_limit2_f(u32 v)
|
|||||||
}
|
}
|
||||||
static inline u32 pbdma_gp_fetch_r(u32 i)
|
static inline u32 pbdma_gp_fetch_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00040050U + i*8192U;
|
return nvgpu_safe_add_u32(0x00040050U, nvgpu_safe_mult_u32(i, 8192U));
|
||||||
}
|
}
|
||||||
static inline u32 pbdma_gp_get_r(u32 i)
|
static inline u32 pbdma_gp_get_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00040014U + i*8192U;
|
return nvgpu_safe_add_u32(0x00040014U, nvgpu_safe_mult_u32(i, 8192U));
|
||||||
}
|
}
|
||||||
static inline u32 pbdma_gp_put_r(u32 i)
|
static inline u32 pbdma_gp_put_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00040000U + i*8192U;
|
return nvgpu_safe_add_u32(0x00040000U, nvgpu_safe_mult_u32(i, 8192U));
|
||||||
}
|
}
|
||||||
static inline u32 pbdma_pb_fetch_r(u32 i)
|
static inline u32 pbdma_pb_fetch_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00040054U + i*8192U;
|
return nvgpu_safe_add_u32(0x00040054U, nvgpu_safe_mult_u32(i, 8192U));
|
||||||
}
|
}
|
||||||
static inline u32 pbdma_pb_fetch_hi_r(u32 i)
|
static inline u32 pbdma_pb_fetch_hi_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00040058U + i*8192U;
|
return nvgpu_safe_add_u32(0x00040058U, nvgpu_safe_mult_u32(i, 8192U));
|
||||||
}
|
}
|
||||||
static inline u32 pbdma_get_r(u32 i)
|
static inline u32 pbdma_get_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00040018U + i*8192U;
|
return nvgpu_safe_add_u32(0x00040018U, nvgpu_safe_mult_u32(i, 8192U));
|
||||||
}
|
}
|
||||||
static inline u32 pbdma_get_hi_r(u32 i)
|
static inline u32 pbdma_get_hi_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x0004001cU + i*8192U;
|
return nvgpu_safe_add_u32(0x0004001cU, nvgpu_safe_mult_u32(i, 8192U));
|
||||||
}
|
}
|
||||||
static inline u32 pbdma_put_r(u32 i)
|
static inline u32 pbdma_put_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x0004005cU + i*8192U;
|
return nvgpu_safe_add_u32(0x0004005cU, nvgpu_safe_mult_u32(i, 8192U));
|
||||||
}
|
}
|
||||||
static inline u32 pbdma_put_hi_r(u32 i)
|
static inline u32 pbdma_put_hi_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00040060U + i*8192U;
|
return nvgpu_safe_add_u32(0x00040060U, nvgpu_safe_mult_u32(i, 8192U));
|
||||||
}
|
}
|
||||||
static inline u32 pbdma_pb_header_r(u32 i)
|
static inline u32 pbdma_pb_header_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00040084U + i*8192U;
|
return nvgpu_safe_add_u32(0x00040084U, nvgpu_safe_mult_u32(i, 8192U));
|
||||||
}
|
}
|
||||||
static inline u32 pbdma_pb_header_method_zero_f(void)
|
static inline u32 pbdma_pb_header_method_zero_f(void)
|
||||||
{
|
{
|
||||||
@@ -176,19 +177,19 @@ static inline u32 pbdma_pb_header_type_immd_f(void)
|
|||||||
}
|
}
|
||||||
static inline u32 pbdma_hdr_shadow_r(u32 i)
|
static inline u32 pbdma_hdr_shadow_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00040118U + i*8192U;
|
return nvgpu_safe_add_u32(0x00040118U, nvgpu_safe_mult_u32(i, 8192U));
|
||||||
}
|
}
|
||||||
static inline u32 pbdma_gp_shadow_0_r(u32 i)
|
static inline u32 pbdma_gp_shadow_0_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00040110U + i*8192U;
|
return nvgpu_safe_add_u32(0x00040110U, nvgpu_safe_mult_u32(i, 8192U));
|
||||||
}
|
}
|
||||||
static inline u32 pbdma_gp_shadow_1_r(u32 i)
|
static inline u32 pbdma_gp_shadow_1_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00040114U + i*8192U;
|
return nvgpu_safe_add_u32(0x00040114U, nvgpu_safe_mult_u32(i, 8192U));
|
||||||
}
|
}
|
||||||
static inline u32 pbdma_subdevice_r(u32 i)
|
static inline u32 pbdma_subdevice_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00040094U + i*8192U;
|
return nvgpu_safe_add_u32(0x00040094U, nvgpu_safe_mult_u32(i, 8192U));
|
||||||
}
|
}
|
||||||
static inline u32 pbdma_subdevice_id_f(u32 v)
|
static inline u32 pbdma_subdevice_id_f(u32 v)
|
||||||
{
|
{
|
||||||
@@ -204,7 +205,7 @@ static inline u32 pbdma_subdevice_channel_dma_enable_f(void)
|
|||||||
}
|
}
|
||||||
static inline u32 pbdma_method0_r(u32 i)
|
static inline u32 pbdma_method0_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x000400c0U + i*8192U;
|
return nvgpu_safe_add_u32(0x000400c0U, nvgpu_safe_mult_u32(i, 8192U));
|
||||||
}
|
}
|
||||||
static inline u32 pbdma_method0_fifo_size_v(void)
|
static inline u32 pbdma_method0_fifo_size_v(void)
|
||||||
{
|
{
|
||||||
@@ -232,19 +233,19 @@ static inline u32 pbdma_method0_valid_true_f(void)
|
|||||||
}
|
}
|
||||||
static inline u32 pbdma_method1_r(u32 i)
|
static inline u32 pbdma_method1_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x000400c8U + i*8192U;
|
return nvgpu_safe_add_u32(0x000400c8U, nvgpu_safe_mult_u32(i, 8192U));
|
||||||
}
|
}
|
||||||
static inline u32 pbdma_method2_r(u32 i)
|
static inline u32 pbdma_method2_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x000400d0U + i*8192U;
|
return nvgpu_safe_add_u32(0x000400d0U, nvgpu_safe_mult_u32(i, 8192U));
|
||||||
}
|
}
|
||||||
static inline u32 pbdma_method3_r(u32 i)
|
static inline u32 pbdma_method3_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x000400d8U + i*8192U;
|
return nvgpu_safe_add_u32(0x000400d8U, nvgpu_safe_mult_u32(i, 8192U));
|
||||||
}
|
}
|
||||||
static inline u32 pbdma_pb_count_r(u32 i)
|
static inline u32 pbdma_pb_count_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00040088U + i*8192U;
|
return nvgpu_safe_add_u32(0x00040088U, nvgpu_safe_mult_u32(i, 8192U));
|
||||||
}
|
}
|
||||||
static inline u32 pbdma_pb_count_value_v(u32 r)
|
static inline u32 pbdma_pb_count_value_v(u32 r)
|
||||||
{
|
{
|
||||||
@@ -256,11 +257,11 @@ static inline u32 pbdma_pb_count_value_zero_f(void)
|
|||||||
}
|
}
|
||||||
static inline u32 pbdma_data0_r(u32 i)
|
static inline u32 pbdma_data0_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x000400c4U + i*8192U;
|
return nvgpu_safe_add_u32(0x000400c4U, nvgpu_safe_mult_u32(i, 8192U));
|
||||||
}
|
}
|
||||||
static inline u32 pbdma_acquire_r(u32 i)
|
static inline u32 pbdma_acquire_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00040030U + i*8192U;
|
return nvgpu_safe_add_u32(0x00040030U, nvgpu_safe_mult_u32(i, 8192U));
|
||||||
}
|
}
|
||||||
static inline u32 pbdma_acquire_retry_man_2_f(void)
|
static inline u32 pbdma_acquire_retry_man_2_f(void)
|
||||||
{
|
{
|
||||||
@@ -304,15 +305,15 @@ static inline u32 pbdma_acquire_timeout_en_disable_f(void)
|
|||||||
}
|
}
|
||||||
static inline u32 pbdma_status_r(u32 i)
|
static inline u32 pbdma_status_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00040100U + i*8192U;
|
return nvgpu_safe_add_u32(0x00040100U, nvgpu_safe_mult_u32(i, 8192U));
|
||||||
}
|
}
|
||||||
static inline u32 pbdma_channel_r(u32 i)
|
static inline u32 pbdma_channel_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00040120U + i*8192U;
|
return nvgpu_safe_add_u32(0x00040120U, nvgpu_safe_mult_u32(i, 8192U));
|
||||||
}
|
}
|
||||||
static inline u32 pbdma_signature_r(u32 i)
|
static inline u32 pbdma_signature_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00040010U + i*8192U;
|
return nvgpu_safe_add_u32(0x00040010U, nvgpu_safe_mult_u32(i, 8192U));
|
||||||
}
|
}
|
||||||
static inline u32 pbdma_signature_hw_valid_f(void)
|
static inline u32 pbdma_signature_hw_valid_f(void)
|
||||||
{
|
{
|
||||||
@@ -324,7 +325,7 @@ static inline u32 pbdma_signature_sw_zero_f(void)
|
|||||||
}
|
}
|
||||||
static inline u32 pbdma_userd_r(u32 i)
|
static inline u32 pbdma_userd_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00040008U + i*8192U;
|
return nvgpu_safe_add_u32(0x00040008U, nvgpu_safe_mult_u32(i, 8192U));
|
||||||
}
|
}
|
||||||
static inline u32 pbdma_userd_target_vid_mem_f(void)
|
static inline u32 pbdma_userd_target_vid_mem_f(void)
|
||||||
{
|
{
|
||||||
@@ -344,7 +345,7 @@ static inline u32 pbdma_userd_addr_f(u32 v)
|
|||||||
}
|
}
|
||||||
static inline u32 pbdma_config_r(u32 i)
|
static inline u32 pbdma_config_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x000400f4U + i*8192U;
|
return nvgpu_safe_add_u32(0x000400f4U, nvgpu_safe_mult_u32(i, 8192U));
|
||||||
}
|
}
|
||||||
static inline u32 pbdma_config_l2_evict_first_f(void)
|
static inline u32 pbdma_config_l2_evict_first_f(void)
|
||||||
{
|
{
|
||||||
@@ -380,7 +381,7 @@ static inline u32 pbdma_config_userd_writeback_enable_f(void)
|
|||||||
}
|
}
|
||||||
static inline u32 pbdma_userd_hi_r(u32 i)
|
static inline u32 pbdma_userd_hi_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x0004000cU + i*8192U;
|
return nvgpu_safe_add_u32(0x0004000cU, nvgpu_safe_mult_u32(i, 8192U));
|
||||||
}
|
}
|
||||||
static inline u32 pbdma_userd_hi_addr_f(u32 v)
|
static inline u32 pbdma_userd_hi_addr_f(u32 v)
|
||||||
{
|
{
|
||||||
@@ -388,7 +389,7 @@ static inline u32 pbdma_userd_hi_addr_f(u32 v)
|
|||||||
}
|
}
|
||||||
static inline u32 pbdma_hce_ctrl_r(u32 i)
|
static inline u32 pbdma_hce_ctrl_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x000400e4U + i*8192U;
|
return nvgpu_safe_add_u32(0x000400e4U, nvgpu_safe_mult_u32(i, 8192U));
|
||||||
}
|
}
|
||||||
static inline u32 pbdma_hce_ctrl_hce_priv_mode_yes_f(void)
|
static inline u32 pbdma_hce_ctrl_hce_priv_mode_yes_f(void)
|
||||||
{
|
{
|
||||||
@@ -396,7 +397,7 @@ static inline u32 pbdma_hce_ctrl_hce_priv_mode_yes_f(void)
|
|||||||
}
|
}
|
||||||
static inline u32 pbdma_intr_0_r(u32 i)
|
static inline u32 pbdma_intr_0_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00040108U + i*8192U;
|
return nvgpu_safe_add_u32(0x00040108U, nvgpu_safe_mult_u32(i, 8192U));
|
||||||
}
|
}
|
||||||
static inline u32 pbdma_intr_0_memreq_v(u32 r)
|
static inline u32 pbdma_intr_0_memreq_v(u32 r)
|
||||||
{
|
{
|
||||||
@@ -524,7 +525,7 @@ static inline u32 pbdma_intr_0_signature_pending_f(void)
|
|||||||
}
|
}
|
||||||
static inline u32 pbdma_intr_1_r(u32 i)
|
static inline u32 pbdma_intr_1_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00040148U + i*8192U;
|
return nvgpu_safe_add_u32(0x00040148U, nvgpu_safe_mult_u32(i, 8192U));
|
||||||
}
|
}
|
||||||
static inline u32 pbdma_intr_1_ctxnotvalid_m(void)
|
static inline u32 pbdma_intr_1_ctxnotvalid_m(void)
|
||||||
{
|
{
|
||||||
@@ -536,19 +537,19 @@ static inline u32 pbdma_intr_1_ctxnotvalid_pending_f(void)
|
|||||||
}
|
}
|
||||||
static inline u32 pbdma_intr_en_0_r(u32 i)
|
static inline u32 pbdma_intr_en_0_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x0004010cU + i*8192U;
|
return nvgpu_safe_add_u32(0x0004010cU, nvgpu_safe_mult_u32(i, 8192U));
|
||||||
}
|
}
|
||||||
static inline u32 pbdma_intr_en_1_r(u32 i)
|
static inline u32 pbdma_intr_en_1_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x0004014cU + i*8192U;
|
return nvgpu_safe_add_u32(0x0004014cU, nvgpu_safe_mult_u32(i, 8192U));
|
||||||
}
|
}
|
||||||
static inline u32 pbdma_intr_stall_r(u32 i)
|
static inline u32 pbdma_intr_stall_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x0004013cU + i*8192U;
|
return nvgpu_safe_add_u32(0x0004013cU, nvgpu_safe_mult_u32(i, 8192U));
|
||||||
}
|
}
|
||||||
static inline u32 pbdma_intr_stall_1_r(u32 i)
|
static inline u32 pbdma_intr_stall_1_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00040140U + i*8192U;
|
return nvgpu_safe_add_u32(0x00040140U, nvgpu_safe_mult_u32(i, 8192U));
|
||||||
}
|
}
|
||||||
static inline u32 pbdma_intr_stall_1_hce_illegal_op_enabled_f(void)
|
static inline u32 pbdma_intr_stall_1_hce_illegal_op_enabled_f(void)
|
||||||
{
|
{
|
||||||
@@ -560,7 +561,7 @@ static inline u32 pbdma_udma_nop_r(void)
|
|||||||
}
|
}
|
||||||
static inline u32 pbdma_target_r(u32 i)
|
static inline u32 pbdma_target_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x000400acU + i*8192U;
|
return nvgpu_safe_add_u32(0x000400acU, nvgpu_safe_mult_u32(i, 8192U));
|
||||||
}
|
}
|
||||||
static inline u32 pbdma_target_engine_sw_f(void)
|
static inline u32 pbdma_target_engine_sw_f(void)
|
||||||
{
|
{
|
||||||
@@ -616,7 +617,7 @@ static inline u32 pbdma_target_needs_host_tsg_event_false_f(void)
|
|||||||
}
|
}
|
||||||
static inline u32 pbdma_set_channel_info_r(u32 i)
|
static inline u32 pbdma_set_channel_info_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x000400fcU + i*8192U;
|
return nvgpu_safe_add_u32(0x000400fcU, nvgpu_safe_mult_u32(i, 8192U));
|
||||||
}
|
}
|
||||||
static inline u32 pbdma_set_channel_info_veid_f(u32 v)
|
static inline u32 pbdma_set_channel_info_veid_f(u32 v)
|
||||||
{
|
{
|
||||||
@@ -624,7 +625,7 @@ static inline u32 pbdma_set_channel_info_veid_f(u32 v)
|
|||||||
}
|
}
|
||||||
static inline u32 pbdma_timeout_r(u32 i)
|
static inline u32 pbdma_timeout_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x0004012cU + i*8192U;
|
return nvgpu_safe_add_u32(0x0004012cU, nvgpu_safe_mult_u32(i, 8192U));
|
||||||
}
|
}
|
||||||
static inline u32 pbdma_timeout_period_m(void)
|
static inline u32 pbdma_timeout_period_m(void)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -57,6 +57,7 @@
|
|||||||
#define NVGPU_HW_PERF_TU104_H
|
#define NVGPU_HW_PERF_TU104_H
|
||||||
|
|
||||||
#include <nvgpu/types.h>
|
#include <nvgpu/types.h>
|
||||||
|
#include <nvgpu/safe_ops.h>
|
||||||
|
|
||||||
static inline u32 perf_pmmgpc_perdomain_offset_v(void)
|
static inline u32 perf_pmmgpc_perdomain_offset_v(void)
|
||||||
{
|
{
|
||||||
@@ -240,7 +241,7 @@ static inline u32 perf_pmasys_enginestatus_rbufempty_empty_f(void)
|
|||||||
}
|
}
|
||||||
static inline u32 perf_pmmsys_engine_sel_r(u32 i)
|
static inline u32 perf_pmmsys_engine_sel_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x0024006cU + i*512U;
|
return nvgpu_safe_add_u32(0x0024006cU, nvgpu_safe_mult_u32(i, 512U));
|
||||||
}
|
}
|
||||||
static inline u32 perf_pmmsys_engine_sel__size_1_v(void)
|
static inline u32 perf_pmmsys_engine_sel__size_1_v(void)
|
||||||
{
|
{
|
||||||
@@ -248,7 +249,7 @@ static inline u32 perf_pmmsys_engine_sel__size_1_v(void)
|
|||||||
}
|
}
|
||||||
static inline u32 perf_pmmfbp_engine_sel_r(u32 i)
|
static inline u32 perf_pmmfbp_engine_sel_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x0020006cU + i*512U;
|
return nvgpu_safe_add_u32(0x0020006cU, nvgpu_safe_mult_u32(i, 512U));
|
||||||
}
|
}
|
||||||
static inline u32 perf_pmmfbp_engine_sel__size_1_v(void)
|
static inline u32 perf_pmmfbp_engine_sel__size_1_v(void)
|
||||||
{
|
{
|
||||||
@@ -256,7 +257,7 @@ static inline u32 perf_pmmfbp_engine_sel__size_1_v(void)
|
|||||||
}
|
}
|
||||||
static inline u32 perf_pmmgpc_engine_sel_r(u32 i)
|
static inline u32 perf_pmmgpc_engine_sel_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x0018006cU + i*512U;
|
return nvgpu_safe_add_u32(0x0018006cU, nvgpu_safe_mult_u32(i, 512U));
|
||||||
}
|
}
|
||||||
static inline u32 perf_pmmgpc_engine_sel__size_1_v(void)
|
static inline u32 perf_pmmgpc_engine_sel__size_1_v(void)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -56,6 +56,9 @@
|
|||||||
#ifndef NVGPU_HW_PGSP_TU104_H
|
#ifndef NVGPU_HW_PGSP_TU104_H
|
||||||
#define NVGPU_HW_PGSP_TU104_H
|
#define NVGPU_HW_PGSP_TU104_H
|
||||||
|
|
||||||
|
#include <nvgpu/types.h>
|
||||||
|
#include <nvgpu/safe_ops.h>
|
||||||
|
|
||||||
static inline u32 pgsp_falcon_irqsset_r(void)
|
static inline u32 pgsp_falcon_irqsset_r(void)
|
||||||
{
|
{
|
||||||
return 0x00110000U;
|
return 0x00110000U;
|
||||||
@@ -330,7 +333,7 @@ static inline u32 pgsp_falcon_cpuctl_halt_intr_f(u32 v)
|
|||||||
}
|
}
|
||||||
static inline u32 pgsp_falcon_cpuctl_halt_intr_m(void)
|
static inline u32 pgsp_falcon_cpuctl_halt_intr_m(void)
|
||||||
{
|
{
|
||||||
return 0x1U << 4U;
|
return U32(0x1U) << 4U;
|
||||||
}
|
}
|
||||||
static inline u32 pgsp_falcon_cpuctl_halt_intr_v(u32 r)
|
static inline u32 pgsp_falcon_cpuctl_halt_intr_v(u32 r)
|
||||||
{
|
{
|
||||||
@@ -342,7 +345,7 @@ static inline u32 pgsp_falcon_cpuctl_cpuctl_alias_en_f(u32 v)
|
|||||||
}
|
}
|
||||||
static inline u32 pgsp_falcon_cpuctl_cpuctl_alias_en_m(void)
|
static inline u32 pgsp_falcon_cpuctl_cpuctl_alias_en_m(void)
|
||||||
{
|
{
|
||||||
return 0x1U << 6U;
|
return U32(0x1U) << 6U;
|
||||||
}
|
}
|
||||||
static inline u32 pgsp_falcon_cpuctl_cpuctl_alias_en_v(u32 r)
|
static inline u32 pgsp_falcon_cpuctl_cpuctl_alias_en_v(u32 r)
|
||||||
{
|
{
|
||||||
@@ -358,7 +361,7 @@ static inline u32 pgsp_falcon_cpuctl_alias_startcpu_f(u32 v)
|
|||||||
}
|
}
|
||||||
static inline u32 pgsp_falcon_imemc_r(u32 i)
|
static inline u32 pgsp_falcon_imemc_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00110180U + i*16U;
|
return nvgpu_safe_add_u32(0x00110180U, nvgpu_safe_mult_u32(i, 16U));
|
||||||
}
|
}
|
||||||
static inline u32 pgsp_falcon_imemc_offs_f(u32 v)
|
static inline u32 pgsp_falcon_imemc_offs_f(u32 v)
|
||||||
{
|
{
|
||||||
@@ -374,11 +377,11 @@ static inline u32 pgsp_falcon_imemc_aincw_f(u32 v)
|
|||||||
}
|
}
|
||||||
static inline u32 pgsp_falcon_imemd_r(u32 i)
|
static inline u32 pgsp_falcon_imemd_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00110184U + i*16U;
|
return nvgpu_safe_add_u32(0x00110184U, nvgpu_safe_mult_u32(i, 16U));
|
||||||
}
|
}
|
||||||
static inline u32 pgsp_falcon_imemt_r(u32 i)
|
static inline u32 pgsp_falcon_imemt_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00110188U + i*16U;
|
return nvgpu_safe_add_u32(0x00110188U, nvgpu_safe_mult_u32(i, 16U));
|
||||||
}
|
}
|
||||||
static inline u32 pgsp_falcon_sctl_r(void)
|
static inline u32 pgsp_falcon_sctl_r(void)
|
||||||
{
|
{
|
||||||
@@ -402,11 +405,11 @@ static inline u32 pgsp_falcon_dmactl_r(void)
|
|||||||
}
|
}
|
||||||
static inline u32 pgsp_falcon_dmactl_dmem_scrubbing_m(void)
|
static inline u32 pgsp_falcon_dmactl_dmem_scrubbing_m(void)
|
||||||
{
|
{
|
||||||
return 0x1U << 1U;
|
return U32(0x1U) << 1U;
|
||||||
}
|
}
|
||||||
static inline u32 pgsp_falcon_dmactl_imem_scrubbing_m(void)
|
static inline u32 pgsp_falcon_dmactl_imem_scrubbing_m(void)
|
||||||
{
|
{
|
||||||
return 0x1U << 2U;
|
return U32(0x1U) << 2U;
|
||||||
}
|
}
|
||||||
static inline u32 pgsp_falcon_dmactl_require_ctx_f(u32 v)
|
static inline u32 pgsp_falcon_dmactl_require_ctx_f(u32 v)
|
||||||
{
|
{
|
||||||
@@ -470,7 +473,7 @@ static inline u32 pgsp_falcon_exterrstat_r(void)
|
|||||||
}
|
}
|
||||||
static inline u32 pgsp_falcon_exterrstat_valid_m(void)
|
static inline u32 pgsp_falcon_exterrstat_valid_m(void)
|
||||||
{
|
{
|
||||||
return 0x1U << 31U;
|
return U32(0x1U) << 31U;
|
||||||
}
|
}
|
||||||
static inline u32 pgsp_falcon_exterrstat_valid_v(u32 r)
|
static inline u32 pgsp_falcon_exterrstat_valid_v(u32 r)
|
||||||
{
|
{
|
||||||
@@ -494,7 +497,7 @@ static inline u32 pgsp_sec2_falcon_icd_cmd_opc_f(u32 v)
|
|||||||
}
|
}
|
||||||
static inline u32 pgsp_sec2_falcon_icd_cmd_opc_m(void)
|
static inline u32 pgsp_sec2_falcon_icd_cmd_opc_m(void)
|
||||||
{
|
{
|
||||||
return 0xfU << 0U;
|
return U32(0xfU) << 0U;
|
||||||
}
|
}
|
||||||
static inline u32 pgsp_sec2_falcon_icd_cmd_opc_v(u32 r)
|
static inline u32 pgsp_sec2_falcon_icd_cmd_opc_v(u32 r)
|
||||||
{
|
{
|
||||||
@@ -518,7 +521,7 @@ static inline u32 pgsp_sec2_falcon_icd_rdata_r(void)
|
|||||||
}
|
}
|
||||||
static inline u32 pgsp_falcon_dmemc_r(u32 i)
|
static inline u32 pgsp_falcon_dmemc_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x001101c0U + i*8U;
|
return nvgpu_safe_add_u32(0x001101c0U, nvgpu_safe_mult_u32(i, 8U));
|
||||||
}
|
}
|
||||||
static inline u32 pgsp_falcon_dmemc_offs_f(u32 v)
|
static inline u32 pgsp_falcon_dmemc_offs_f(u32 v)
|
||||||
{
|
{
|
||||||
@@ -526,7 +529,7 @@ static inline u32 pgsp_falcon_dmemc_offs_f(u32 v)
|
|||||||
}
|
}
|
||||||
static inline u32 pgsp_falcon_dmemc_offs_m(void)
|
static inline u32 pgsp_falcon_dmemc_offs_m(void)
|
||||||
{
|
{
|
||||||
return 0x3fU << 2U;
|
return U32(0x3fU) << 2U;
|
||||||
}
|
}
|
||||||
static inline u32 pgsp_falcon_dmemc_blk_f(u32 v)
|
static inline u32 pgsp_falcon_dmemc_blk_f(u32 v)
|
||||||
{
|
{
|
||||||
@@ -534,7 +537,7 @@ static inline u32 pgsp_falcon_dmemc_blk_f(u32 v)
|
|||||||
}
|
}
|
||||||
static inline u32 pgsp_falcon_dmemc_blk_m(void)
|
static inline u32 pgsp_falcon_dmemc_blk_m(void)
|
||||||
{
|
{
|
||||||
return 0xffU << 8U;
|
return U32(0xffU) << 8U;
|
||||||
}
|
}
|
||||||
static inline u32 pgsp_falcon_dmemc_aincw_f(u32 v)
|
static inline u32 pgsp_falcon_dmemc_aincw_f(u32 v)
|
||||||
{
|
{
|
||||||
@@ -546,7 +549,7 @@ static inline u32 pgsp_falcon_dmemc_aincr_f(u32 v)
|
|||||||
}
|
}
|
||||||
static inline u32 pgsp_falcon_dmemd_r(u32 i)
|
static inline u32 pgsp_falcon_dmemd_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x001101c4U + i*8U;
|
return nvgpu_safe_add_u32(0x001101c4U, nvgpu_safe_mult_u32(i, 8U));
|
||||||
}
|
}
|
||||||
static inline u32 pgsp_falcon_debug1_r(void)
|
static inline u32 pgsp_falcon_debug1_r(void)
|
||||||
{
|
{
|
||||||
@@ -562,7 +565,7 @@ static inline u32 pgsp_falcon_debug1_ctxsw_mode_f(u32 v)
|
|||||||
}
|
}
|
||||||
static inline u32 pgsp_falcon_debug1_ctxsw_mode_m(void)
|
static inline u32 pgsp_falcon_debug1_ctxsw_mode_m(void)
|
||||||
{
|
{
|
||||||
return 0x1U << 16U;
|
return U32(0x1U) << 16U;
|
||||||
}
|
}
|
||||||
static inline u32 pgsp_falcon_debug1_ctxsw_mode_v(u32 r)
|
static inline u32 pgsp_falcon_debug1_ctxsw_mode_v(u32 r)
|
||||||
{
|
{
|
||||||
@@ -574,7 +577,7 @@ static inline u32 pgsp_falcon_debug1_ctxsw_mode_init_f(void)
|
|||||||
}
|
}
|
||||||
static inline u32 pgsp_fbif_transcfg_r(u32 i)
|
static inline u32 pgsp_fbif_transcfg_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00110600U + i*4U;
|
return nvgpu_safe_add_u32(0x00110600U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 pgsp_fbif_transcfg_target_local_fb_f(void)
|
static inline u32 pgsp_fbif_transcfg_target_local_fb_f(void)
|
||||||
{
|
{
|
||||||
@@ -598,7 +601,7 @@ static inline u32 pgsp_fbif_transcfg_mem_type_f(u32 v)
|
|||||||
}
|
}
|
||||||
static inline u32 pgsp_fbif_transcfg_mem_type_m(void)
|
static inline u32 pgsp_fbif_transcfg_mem_type_m(void)
|
||||||
{
|
{
|
||||||
return 0x1U << 2U;
|
return U32(0x1U) << 2U;
|
||||||
}
|
}
|
||||||
static inline u32 pgsp_fbif_transcfg_mem_type_v(u32 r)
|
static inline u32 pgsp_fbif_transcfg_mem_type_v(u32 r)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -57,9 +57,10 @@
|
|||||||
#define NVGPU_HW_PNVDEC_TU104_H
|
#define NVGPU_HW_PNVDEC_TU104_H
|
||||||
|
|
||||||
#include <nvgpu/types.h>
|
#include <nvgpu/types.h>
|
||||||
|
#include <nvgpu/safe_ops.h>
|
||||||
|
|
||||||
static inline u32 pnvdec_falcon_irqsset_r(u32 i)
|
static inline u32 pnvdec_falcon_irqsset_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00830000U + i*16384U;
|
return nvgpu_safe_add_u32(0x00830000U, nvgpu_safe_mult_u32(i, 16384U));
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -57,9 +57,10 @@
|
|||||||
#define NVGPU_HW_PRAM_TU104_H
|
#define NVGPU_HW_PRAM_TU104_H
|
||||||
|
|
||||||
#include <nvgpu/types.h>
|
#include <nvgpu/types.h>
|
||||||
|
#include <nvgpu/safe_ops.h>
|
||||||
|
|
||||||
static inline u32 pram_data032_r(u32 i)
|
static inline u32 pram_data032_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00700000U + i*4U;
|
return nvgpu_safe_add_u32(0x00700000U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -57,6 +57,7 @@
|
|||||||
#define NVGPU_HW_PRI_RINGMASTER_TU104_H
|
#define NVGPU_HW_PRI_RINGMASTER_TU104_H
|
||||||
|
|
||||||
#include <nvgpu/types.h>
|
#include <nvgpu/types.h>
|
||||||
|
#include <nvgpu/safe_ops.h>
|
||||||
|
|
||||||
static inline u32 pri_ringmaster_command_r(void)
|
static inline u32 pri_ringmaster_command_r(void)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -57,6 +57,7 @@
|
|||||||
#define NVGPU_HW_PRI_RINGSTATION_GPC_TU104_H
|
#define NVGPU_HW_PRI_RINGSTATION_GPC_TU104_H
|
||||||
|
|
||||||
#include <nvgpu/types.h>
|
#include <nvgpu/types.h>
|
||||||
|
#include <nvgpu/safe_ops.h>
|
||||||
|
|
||||||
static inline u32 pri_ringstation_gpc_gpc0_priv_error_adr_r(void)
|
static inline u32 pri_ringstation_gpc_gpc0_priv_error_adr_r(void)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -57,6 +57,7 @@
|
|||||||
#define NVGPU_HW_PRI_RINGSTATION_SYS_TU104_H
|
#define NVGPU_HW_PRI_RINGSTATION_SYS_TU104_H
|
||||||
|
|
||||||
#include <nvgpu/types.h>
|
#include <nvgpu/types.h>
|
||||||
|
#include <nvgpu/safe_ops.h>
|
||||||
|
|
||||||
static inline u32 pri_ringstation_sys_decode_config_r(void)
|
static inline u32 pri_ringstation_sys_decode_config_r(void)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -57,6 +57,7 @@
|
|||||||
#define NVGPU_HW_PROJ_TU104_H
|
#define NVGPU_HW_PROJ_TU104_H
|
||||||
|
|
||||||
#include <nvgpu/types.h>
|
#include <nvgpu/types.h>
|
||||||
|
#include <nvgpu/safe_ops.h>
|
||||||
|
|
||||||
static inline u32 proj_gpc_base_v(void)
|
static inline u32 proj_gpc_base_v(void)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -57,6 +57,7 @@
|
|||||||
#define NVGPU_HW_PSEC_TU104_H
|
#define NVGPU_HW_PSEC_TU104_H
|
||||||
|
|
||||||
#include <nvgpu/types.h>
|
#include <nvgpu/types.h>
|
||||||
|
#include <nvgpu/safe_ops.h>
|
||||||
|
|
||||||
static inline u32 psec_falcon_irqsset_r(void)
|
static inline u32 psec_falcon_irqsset_r(void)
|
||||||
{
|
{
|
||||||
@@ -332,7 +333,7 @@ static inline u32 psec_falcon_cpuctl_alias_startcpu_f(u32 v)
|
|||||||
}
|
}
|
||||||
static inline u32 psec_falcon_imemc_r(u32 i)
|
static inline u32 psec_falcon_imemc_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00840180U + i*16U;
|
return nvgpu_safe_add_u32(0x00840180U, nvgpu_safe_mult_u32(i, 16U));
|
||||||
}
|
}
|
||||||
static inline u32 psec_falcon_imemc_offs_f(u32 v)
|
static inline u32 psec_falcon_imemc_offs_f(u32 v)
|
||||||
{
|
{
|
||||||
@@ -348,11 +349,11 @@ static inline u32 psec_falcon_imemc_aincw_f(u32 v)
|
|||||||
}
|
}
|
||||||
static inline u32 psec_falcon_imemd_r(u32 i)
|
static inline u32 psec_falcon_imemd_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00840184U + i*16U;
|
return nvgpu_safe_add_u32(0x00840184U, nvgpu_safe_mult_u32(i, 16U));
|
||||||
}
|
}
|
||||||
static inline u32 psec_falcon_imemt_r(u32 i)
|
static inline u32 psec_falcon_imemt_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00840188U + i*16U;
|
return nvgpu_safe_add_u32(0x00840188U, nvgpu_safe_mult_u32(i, 16U));
|
||||||
}
|
}
|
||||||
static inline u32 psec_falcon_sctl_r(void)
|
static inline u32 psec_falcon_sctl_r(void)
|
||||||
{
|
{
|
||||||
@@ -492,7 +493,7 @@ static inline u32 psec_sec2_falcon_icd_rdata_r(void)
|
|||||||
}
|
}
|
||||||
static inline u32 psec_falcon_dmemc_r(u32 i)
|
static inline u32 psec_falcon_dmemc_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x008401c0U + i*8U;
|
return nvgpu_safe_add_u32(0x008401c0U, nvgpu_safe_mult_u32(i, 8U));
|
||||||
}
|
}
|
||||||
static inline u32 psec_falcon_dmemc_offs_f(u32 v)
|
static inline u32 psec_falcon_dmemc_offs_f(u32 v)
|
||||||
{
|
{
|
||||||
@@ -520,7 +521,7 @@ static inline u32 psec_falcon_dmemc_aincr_f(u32 v)
|
|||||||
}
|
}
|
||||||
static inline u32 psec_falcon_dmemd_r(u32 i)
|
static inline u32 psec_falcon_dmemd_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x008401c4U + i*8U;
|
return nvgpu_safe_add_u32(0x008401c4U, nvgpu_safe_mult_u32(i, 8U));
|
||||||
}
|
}
|
||||||
static inline u32 psec_falcon_debug1_r(void)
|
static inline u32 psec_falcon_debug1_r(void)
|
||||||
{
|
{
|
||||||
@@ -548,7 +549,7 @@ static inline u32 psec_falcon_debug1_ctxsw_mode_init_f(void)
|
|||||||
}
|
}
|
||||||
static inline u32 psec_fbif_transcfg_r(u32 i)
|
static inline u32 psec_fbif_transcfg_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00840600U + i*4U;
|
return nvgpu_safe_add_u32(0x00840600U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 psec_fbif_transcfg_target_local_fb_f(void)
|
static inline u32 psec_fbif_transcfg_target_local_fb_f(void)
|
||||||
{
|
{
|
||||||
@@ -648,7 +649,7 @@ static inline u32 psec_falcon_hwcfg1_dmem_tag_width_v(u32 r)
|
|||||||
}
|
}
|
||||||
static inline u32 psec_ememc_r(u32 i)
|
static inline u32 psec_ememc_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00840ac0U + i*8U;
|
return nvgpu_safe_add_u32(0x00840ac0U, nvgpu_safe_mult_u32(i, 8U));
|
||||||
}
|
}
|
||||||
static inline u32 psec_ememc__size_1_v(void)
|
static inline u32 psec_ememc__size_1_v(void)
|
||||||
{
|
{
|
||||||
@@ -704,7 +705,7 @@ static inline u32 psec_ememc_aincr_v(u32 r)
|
|||||||
}
|
}
|
||||||
static inline u32 psec_ememd_r(u32 i)
|
static inline u32 psec_ememd_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00840ac4U + i*8U;
|
return nvgpu_safe_add_u32(0x00840ac4U, nvgpu_safe_mult_u32(i, 8U));
|
||||||
}
|
}
|
||||||
static inline u32 psec_ememd__size_1_v(void)
|
static inline u32 psec_ememd__size_1_v(void)
|
||||||
{
|
{
|
||||||
@@ -724,7 +725,7 @@ static inline u32 psec_ememd_data_v(u32 r)
|
|||||||
}
|
}
|
||||||
static inline u32 psec_msgq_head_r(u32 i)
|
static inline u32 psec_msgq_head_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00840c80U + i*8U;
|
return nvgpu_safe_add_u32(0x00840c80U, nvgpu_safe_mult_u32(i, 8U));
|
||||||
}
|
}
|
||||||
static inline u32 psec_msgq_head_val_f(u32 v)
|
static inline u32 psec_msgq_head_val_f(u32 v)
|
||||||
{
|
{
|
||||||
@@ -740,7 +741,7 @@ static inline u32 psec_msgq_head_val_v(u32 r)
|
|||||||
}
|
}
|
||||||
static inline u32 psec_msgq_tail_r(u32 i)
|
static inline u32 psec_msgq_tail_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00840c84U + i*8U;
|
return nvgpu_safe_add_u32(0x00840c84U, nvgpu_safe_mult_u32(i, 8U));
|
||||||
}
|
}
|
||||||
static inline u32 psec_msgq_tail_val_f(u32 v)
|
static inline u32 psec_msgq_tail_val_f(u32 v)
|
||||||
{
|
{
|
||||||
@@ -756,7 +757,7 @@ static inline u32 psec_msgq_tail_val_v(u32 r)
|
|||||||
}
|
}
|
||||||
static inline u32 psec_queue_head_r(u32 i)
|
static inline u32 psec_queue_head_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00840c00U + i*8U;
|
return nvgpu_safe_add_u32(0x00840c00U, nvgpu_safe_mult_u32(i, 8U));
|
||||||
}
|
}
|
||||||
static inline u32 psec_queue_head_address_f(u32 v)
|
static inline u32 psec_queue_head_address_f(u32 v)
|
||||||
{
|
{
|
||||||
@@ -772,7 +773,7 @@ static inline u32 psec_queue_head_address_v(u32 r)
|
|||||||
}
|
}
|
||||||
static inline u32 psec_queue_tail_r(u32 i)
|
static inline u32 psec_queue_tail_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00840c04U + i*8U;
|
return nvgpu_safe_add_u32(0x00840c04U, nvgpu_safe_mult_u32(i, 8U));
|
||||||
}
|
}
|
||||||
static inline u32 psec_queue_tail_address_f(u32 v)
|
static inline u32 psec_queue_tail_address_f(u32 v)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -57,6 +57,7 @@
|
|||||||
#define NVGPU_HW_PWR_TU104_H
|
#define NVGPU_HW_PWR_TU104_H
|
||||||
|
|
||||||
#include <nvgpu/types.h>
|
#include <nvgpu/types.h>
|
||||||
|
#include <nvgpu/safe_ops.h>
|
||||||
|
|
||||||
static inline u32 pwr_falcon_irqsset_r(void)
|
static inline u32 pwr_falcon_irqsset_r(void)
|
||||||
{
|
{
|
||||||
@@ -452,7 +453,7 @@ static inline u32 pwr_pmu_scpctl_stat_debug_mode_v(u32 r)
|
|||||||
}
|
}
|
||||||
static inline u32 pwr_falcon_imemc_r(u32 i)
|
static inline u32 pwr_falcon_imemc_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x0010a180U + i*16U;
|
return nvgpu_safe_add_u32(0x0010a180U, nvgpu_safe_mult_u32(i, 16U));
|
||||||
}
|
}
|
||||||
static inline u32 pwr_falcon_imemc_offs_f(u32 v)
|
static inline u32 pwr_falcon_imemc_offs_f(u32 v)
|
||||||
{
|
{
|
||||||
@@ -468,11 +469,11 @@ static inline u32 pwr_falcon_imemc_aincw_f(u32 v)
|
|||||||
}
|
}
|
||||||
static inline u32 pwr_falcon_imemd_r(u32 i)
|
static inline u32 pwr_falcon_imemd_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x0010a184U + i*16U;
|
return nvgpu_safe_add_u32(0x0010a184U, nvgpu_safe_mult_u32(i, 16U));
|
||||||
}
|
}
|
||||||
static inline u32 pwr_falcon_imemt_r(u32 i)
|
static inline u32 pwr_falcon_imemt_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x0010a188U + i*16U;
|
return nvgpu_safe_add_u32(0x0010a188U, nvgpu_safe_mult_u32(i, 16U));
|
||||||
}
|
}
|
||||||
static inline u32 pwr_falcon_sctl_r(void)
|
static inline u32 pwr_falcon_sctl_r(void)
|
||||||
{
|
{
|
||||||
@@ -608,7 +609,7 @@ static inline u32 pwr_pmu_falcon_icd_rdata_r(void)
|
|||||||
}
|
}
|
||||||
static inline u32 pwr_falcon_dmemc_r(u32 i)
|
static inline u32 pwr_falcon_dmemc_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x0010a1c0U + i*8U;
|
return nvgpu_safe_add_u32(0x0010a1c0U, nvgpu_safe_mult_u32(i, 8U));
|
||||||
}
|
}
|
||||||
static inline u32 pwr_falcon_dmemc_offs_f(u32 v)
|
static inline u32 pwr_falcon_dmemc_offs_f(u32 v)
|
||||||
{
|
{
|
||||||
@@ -636,7 +637,7 @@ static inline u32 pwr_falcon_dmemc_aincr_f(u32 v)
|
|||||||
}
|
}
|
||||||
static inline u32 pwr_falcon_dmemd_r(u32 i)
|
static inline u32 pwr_falcon_dmemd_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x0010a1c4U + i*8U;
|
return nvgpu_safe_add_u32(0x0010a1c4U, nvgpu_safe_mult_u32(i, 8U));
|
||||||
}
|
}
|
||||||
static inline u32 pwr_pmu_new_instblk_r(void)
|
static inline u32 pwr_pmu_new_instblk_r(void)
|
||||||
{
|
{
|
||||||
@@ -700,7 +701,7 @@ static inline u32 pwr_pmu_mutex_id_release_value_init_f(void)
|
|||||||
}
|
}
|
||||||
static inline u32 pwr_pmu_mutex_r(u32 i)
|
static inline u32 pwr_pmu_mutex_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x0010a580U + i*4U;
|
return nvgpu_safe_add_u32(0x0010a580U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 pwr_pmu_mutex__size_1_v(void)
|
static inline u32 pwr_pmu_mutex__size_1_v(void)
|
||||||
{
|
{
|
||||||
@@ -720,7 +721,7 @@ static inline u32 pwr_pmu_mutex_value_initial_lock_f(void)
|
|||||||
}
|
}
|
||||||
static inline u32 pwr_pmu_queue_head_r(u32 i)
|
static inline u32 pwr_pmu_queue_head_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x0010a800U + i*4U;
|
return nvgpu_safe_add_u32(0x0010a800U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 pwr_pmu_queue_head__size_1_v(void)
|
static inline u32 pwr_pmu_queue_head__size_1_v(void)
|
||||||
{
|
{
|
||||||
@@ -736,7 +737,7 @@ static inline u32 pwr_pmu_queue_head_address_v(u32 r)
|
|||||||
}
|
}
|
||||||
static inline u32 pwr_pmu_queue_tail_r(u32 i)
|
static inline u32 pwr_pmu_queue_tail_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x0010a820U + i*4U;
|
return nvgpu_safe_add_u32(0x0010a820U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 pwr_pmu_queue_tail__size_1_v(void)
|
static inline u32 pwr_pmu_queue_tail__size_1_v(void)
|
||||||
{
|
{
|
||||||
@@ -776,7 +777,7 @@ static inline u32 pwr_pmu_msgq_tail_val_v(u32 r)
|
|||||||
}
|
}
|
||||||
static inline u32 pwr_pmu_idle_mask_r(u32 i)
|
static inline u32 pwr_pmu_idle_mask_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x0010be40U + i*4U;
|
return nvgpu_safe_add_u32(0x0010be40U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 pwr_pmu_idle_mask_gr_enabled_f(void)
|
static inline u32 pwr_pmu_idle_mask_gr_enabled_f(void)
|
||||||
{
|
{
|
||||||
@@ -788,7 +789,7 @@ static inline u32 pwr_pmu_idle_mask_ce_2_enabled_f(void)
|
|||||||
}
|
}
|
||||||
static inline u32 pwr_pmu_idle_count_r(u32 i)
|
static inline u32 pwr_pmu_idle_count_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x0010bf80U + i*4U;
|
return nvgpu_safe_add_u32(0x0010bf80U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 pwr_pmu_idle_count_value_f(u32 v)
|
static inline u32 pwr_pmu_idle_count_value_f(u32 v)
|
||||||
{
|
{
|
||||||
@@ -804,7 +805,7 @@ static inline u32 pwr_pmu_idle_count_reset_f(u32 v)
|
|||||||
}
|
}
|
||||||
static inline u32 pwr_pmu_idle_ctrl_r(u32 i)
|
static inline u32 pwr_pmu_idle_ctrl_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x0010bfc0U + i*4U;
|
return nvgpu_safe_add_u32(0x0010bfc0U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 pwr_pmu_idle_ctrl_value_m(void)
|
static inline u32 pwr_pmu_idle_ctrl_value_m(void)
|
||||||
{
|
{
|
||||||
@@ -828,7 +829,7 @@ static inline u32 pwr_pmu_idle_ctrl_filter_disabled_f(void)
|
|||||||
}
|
}
|
||||||
static inline u32 pwr_pmu_idle_threshold_r(u32 i)
|
static inline u32 pwr_pmu_idle_threshold_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x0010be00U + i*4U;
|
return nvgpu_safe_add_u32(0x0010be00U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 pwr_pmu_idle_threshold_value_f(u32 v)
|
static inline u32 pwr_pmu_idle_threshold_value_f(u32 v)
|
||||||
{
|
{
|
||||||
@@ -876,19 +877,19 @@ static inline u32 pwr_pmu_idle_intr_status_intr_clear_v(void)
|
|||||||
}
|
}
|
||||||
static inline u32 pwr_pmu_idle_mask_supp_r(u32 i)
|
static inline u32 pwr_pmu_idle_mask_supp_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x0010a9f0U + i*8U;
|
return nvgpu_safe_add_u32(0x0010a9f0U, nvgpu_safe_mult_u32(i, 8U));
|
||||||
}
|
}
|
||||||
static inline u32 pwr_pmu_idle_mask_1_supp_r(u32 i)
|
static inline u32 pwr_pmu_idle_mask_1_supp_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x0010a9f4U + i*8U;
|
return nvgpu_safe_add_u32(0x0010a9f4U, nvgpu_safe_mult_u32(i, 8U));
|
||||||
}
|
}
|
||||||
static inline u32 pwr_pmu_idle_ctrl_supp_r(u32 i)
|
static inline u32 pwr_pmu_idle_ctrl_supp_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x0010aa30U + i*8U;
|
return nvgpu_safe_add_u32(0x0010aa30U, nvgpu_safe_mult_u32(i, 8U));
|
||||||
}
|
}
|
||||||
static inline u32 pwr_pmu_debug_r(u32 i)
|
static inline u32 pwr_pmu_debug_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x0010a5c0U + i*4U;
|
return nvgpu_safe_add_u32(0x0010a5c0U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 pwr_pmu_debug__size_1_v(void)
|
static inline u32 pwr_pmu_debug__size_1_v(void)
|
||||||
{
|
{
|
||||||
@@ -896,7 +897,7 @@ static inline u32 pwr_pmu_debug__size_1_v(void)
|
|||||||
}
|
}
|
||||||
static inline u32 pwr_pmu_mailbox_r(u32 i)
|
static inline u32 pwr_pmu_mailbox_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x0010a450U + i*4U;
|
return nvgpu_safe_add_u32(0x0010a450U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 pwr_pmu_mailbox__size_1_v(void)
|
static inline u32 pwr_pmu_mailbox__size_1_v(void)
|
||||||
{
|
{
|
||||||
@@ -956,23 +957,23 @@ static inline u32 pwr_pmu_bar0_error_status_fecserr_m(void)
|
|||||||
}
|
}
|
||||||
static inline u32 pwr_pmu_pg_idlefilth_r(u32 i)
|
static inline u32 pwr_pmu_pg_idlefilth_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x0010a6c0U + i*4U;
|
return nvgpu_safe_add_u32(0x0010a6c0U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 pwr_pmu_pg_ppuidlefilth_r(u32 i)
|
static inline u32 pwr_pmu_pg_ppuidlefilth_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x0010a6e8U + i*4U;
|
return nvgpu_safe_add_u32(0x0010a6e8U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 pwr_pmu_pg_idle_cnt_r(u32 i)
|
static inline u32 pwr_pmu_pg_idle_cnt_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x0010a710U + i*4U;
|
return nvgpu_safe_add_u32(0x0010a710U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 pwr_pmu_pg_intren_r(u32 i)
|
static inline u32 pwr_pmu_pg_intren_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x0010a760U + i*4U;
|
return nvgpu_safe_add_u32(0x0010a760U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 pwr_fbif_transcfg_r(u32 i)
|
static inline u32 pwr_fbif_transcfg_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x0010ae00U + i*4U;
|
return nvgpu_safe_add_u32(0x0010ae00U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 pwr_fbif_transcfg_target_local_fb_f(void)
|
static inline u32 pwr_fbif_transcfg_target_local_fb_f(void)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -57,6 +57,7 @@
|
|||||||
#define NVGPU_HW_RAM_TU104_H
|
#define NVGPU_HW_RAM_TU104_H
|
||||||
|
|
||||||
#include <nvgpu/types.h>
|
#include <nvgpu/types.h>
|
||||||
|
#include <nvgpu/safe_ops.h>
|
||||||
|
|
||||||
static inline u32 ram_in_ramfc_s(void)
|
static inline u32 ram_in_ramfc_s(void)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -57,6 +57,7 @@
|
|||||||
#define NVGPU_HW_THERM_TU104_H
|
#define NVGPU_HW_THERM_TU104_H
|
||||||
|
|
||||||
#include <nvgpu/types.h>
|
#include <nvgpu/types.h>
|
||||||
|
#include <nvgpu/safe_ops.h>
|
||||||
|
|
||||||
static inline u32 therm_weight_1_r(void)
|
static inline u32 therm_weight_1_r(void)
|
||||||
{
|
{
|
||||||
@@ -80,7 +81,7 @@ static inline u32 therm_config2_grad_enable_f(u32 v)
|
|||||||
}
|
}
|
||||||
static inline u32 therm_gate_ctrl_r(u32 i)
|
static inline u32 therm_gate_ctrl_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00020200U + i*4U;
|
return nvgpu_safe_add_u32(0x00020200U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 therm_gate_ctrl_eng_clk_m(void)
|
static inline u32 therm_gate_ctrl_eng_clk_m(void)
|
||||||
{
|
{
|
||||||
@@ -172,7 +173,7 @@ static inline u32 therm_hubmmu_idle_filter_value_m(void)
|
|||||||
}
|
}
|
||||||
static inline u32 therm_clk_slowdown_r(u32 i)
|
static inline u32 therm_clk_slowdown_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00020160U + i*4U;
|
return nvgpu_safe_add_u32(0x00020160U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 therm_clk_slowdown_idle_factor_f(u32 v)
|
static inline u32 therm_clk_slowdown_idle_factor_f(u32 v)
|
||||||
{
|
{
|
||||||
@@ -192,7 +193,7 @@ static inline u32 therm_clk_slowdown_idle_factor_disabled_f(void)
|
|||||||
}
|
}
|
||||||
static inline u32 therm_grad_stepping_table_r(u32 i)
|
static inline u32 therm_grad_stepping_table_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x000202c8U + i*4U;
|
return nvgpu_safe_add_u32(0x000202c8U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 therm_grad_stepping_table_slowdown_factor0_f(u32 v)
|
static inline u32 therm_grad_stepping_table_slowdown_factor0_f(u32 v)
|
||||||
{
|
{
|
||||||
@@ -284,7 +285,7 @@ static inline u32 therm_grad_stepping1_pdiv_duration_f(u32 v)
|
|||||||
}
|
}
|
||||||
static inline u32 therm_clk_timing_r(u32 i)
|
static inline u32 therm_clk_timing_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x000203c0U + i*4U;
|
return nvgpu_safe_add_u32(0x000203c0U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 therm_clk_timing_grad_slowdown_f(u32 v)
|
static inline u32 therm_clk_timing_grad_slowdown_f(u32 v)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -57,6 +57,7 @@
|
|||||||
#define NVGPU_HW_TIMER_TU104_H
|
#define NVGPU_HW_TIMER_TU104_H
|
||||||
|
|
||||||
#include <nvgpu/types.h>
|
#include <nvgpu/types.h>
|
||||||
|
#include <nvgpu/safe_ops.h>
|
||||||
|
|
||||||
static inline u32 timer_pri_timeout_r(void)
|
static inline u32 timer_pri_timeout_r(void)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -57,6 +57,7 @@
|
|||||||
#define NVGPU_HW_TOP_TU104_H
|
#define NVGPU_HW_TOP_TU104_H
|
||||||
|
|
||||||
#include <nvgpu/types.h>
|
#include <nvgpu/types.h>
|
||||||
|
#include <nvgpu/safe_ops.h>
|
||||||
|
|
||||||
static inline u32 top_num_gpcs_r(void)
|
static inline u32 top_num_gpcs_r(void)
|
||||||
{
|
{
|
||||||
@@ -120,7 +121,7 @@ static inline u32 top_num_ces_value_v(u32 r)
|
|||||||
}
|
}
|
||||||
static inline u32 top_device_info_r(u32 i)
|
static inline u32 top_device_info_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x00022700U + i*4U;
|
return nvgpu_safe_add_u32(0x00022700U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 top_device_info__size_1_v(void)
|
static inline u32 top_device_info__size_1_v(void)
|
||||||
{
|
{
|
||||||
@@ -194,18 +195,34 @@ static inline u32 top_device_info_engine_v(u32 r)
|
|||||||
{
|
{
|
||||||
return (r >> 5U) & 0x1U;
|
return (r >> 5U) & 0x1U;
|
||||||
}
|
}
|
||||||
|
static inline u32 top_device_info_engine_valid_v(void)
|
||||||
|
{
|
||||||
|
return 0x00000001U;
|
||||||
|
}
|
||||||
static inline u32 top_device_info_runlist_v(u32 r)
|
static inline u32 top_device_info_runlist_v(u32 r)
|
||||||
{
|
{
|
||||||
return (r >> 4U) & 0x1U;
|
return (r >> 4U) & 0x1U;
|
||||||
}
|
}
|
||||||
|
static inline u32 top_device_info_runlist_valid_v(void)
|
||||||
|
{
|
||||||
|
return 0x00000001U;
|
||||||
|
}
|
||||||
static inline u32 top_device_info_intr_v(u32 r)
|
static inline u32 top_device_info_intr_v(u32 r)
|
||||||
{
|
{
|
||||||
return (r >> 3U) & 0x1U;
|
return (r >> 3U) & 0x1U;
|
||||||
}
|
}
|
||||||
|
static inline u32 top_device_info_intr_valid_v(void)
|
||||||
|
{
|
||||||
|
return 0x00000001U;
|
||||||
|
}
|
||||||
static inline u32 top_device_info_reset_v(u32 r)
|
static inline u32 top_device_info_reset_v(u32 r)
|
||||||
{
|
{
|
||||||
return (r >> 2U) & 0x1U;
|
return (r >> 2U) & 0x1U;
|
||||||
}
|
}
|
||||||
|
static inline u32 top_device_info_reset_valid_v(void)
|
||||||
|
{
|
||||||
|
return 0x00000001U;
|
||||||
|
}
|
||||||
static inline u32 top_device_info_entry_v(u32 r)
|
static inline u32 top_device_info_entry_v(u32 r)
|
||||||
{
|
{
|
||||||
return (r >> 0U) & 0x3U;
|
return (r >> 0U) & 0x3U;
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -57,6 +57,7 @@
|
|||||||
#define NVGPU_HW_TRIM_TU104_H
|
#define NVGPU_HW_TRIM_TU104_H
|
||||||
|
|
||||||
#include <nvgpu/types.h>
|
#include <nvgpu/types.h>
|
||||||
|
#include <nvgpu/safe_ops.h>
|
||||||
|
|
||||||
static inline u32 trim_sys_nvlink_uphy_cfg_r(void)
|
static inline u32 trim_sys_nvlink_uphy_cfg_r(void)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -57,6 +57,7 @@
|
|||||||
#define NVGPU_HW_USERMODE_TU104_H
|
#define NVGPU_HW_USERMODE_TU104_H
|
||||||
|
|
||||||
#include <nvgpu/types.h>
|
#include <nvgpu/types.h>
|
||||||
|
#include <nvgpu/safe_ops.h>
|
||||||
|
|
||||||
static inline u32 usermode_cfg0_r(void)
|
static inline u32 usermode_cfg0_r(void)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -57,10 +57,11 @@
|
|||||||
#define NVGPU_HW_XP_TU104_H
|
#define NVGPU_HW_XP_TU104_H
|
||||||
|
|
||||||
#include <nvgpu/types.h>
|
#include <nvgpu/types.h>
|
||||||
|
#include <nvgpu/safe_ops.h>
|
||||||
|
|
||||||
static inline u32 xp_dl_mgr_r(u32 i)
|
static inline u32 xp_dl_mgr_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x0008b8c0U + i*4U;
|
return nvgpu_safe_add_u32(0x0008b8c0U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 xp_dl_mgr_safe_timing_f(u32 v)
|
static inline u32 xp_dl_mgr_safe_timing_f(u32 v)
|
||||||
{
|
{
|
||||||
@@ -68,7 +69,7 @@ static inline u32 xp_dl_mgr_safe_timing_f(u32 v)
|
|||||||
}
|
}
|
||||||
static inline u32 xp_pl_link_config_r(u32 i)
|
static inline u32 xp_pl_link_config_r(u32 i)
|
||||||
{
|
{
|
||||||
return 0x0008c040U + i*4U;
|
return nvgpu_safe_add_u32(0x0008c040U, nvgpu_safe_mult_u32(i, 4U));
|
||||||
}
|
}
|
||||||
static inline u32 xp_pl_link_config_ltssm_status_f(u32 v)
|
static inline u32 xp_pl_link_config_ltssm_status_f(u32 v)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -57,6 +57,7 @@
|
|||||||
#define NVGPU_HW_XVE_TU104_H
|
#define NVGPU_HW_XVE_TU104_H
|
||||||
|
|
||||||
#include <nvgpu/types.h>
|
#include <nvgpu/types.h>
|
||||||
|
#include <nvgpu/safe_ops.h>
|
||||||
|
|
||||||
static inline u32 xve_rom_ctrl_r(void)
|
static inline u32 xve_rom_ctrl_r(void)
|
||||||
{
|
{
|
||||||
|
|||||||
Reference in New Issue
Block a user