Revert "Revert "gpu: nvgpu: add turing support""

This reverts commit 278842d6ff4e15467e0b8761c6e1b2a05f926f91.

Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com>
Change-Id: I37f47c137c048ddc3a728e143b6f30be525de120
Reviewed-on: https://git-master.nvidia.com/r/1918622
This commit is contained in:
Terje Bergstrom
2018-10-03 15:15:24 -07:00
committed by Abdul Salam
parent b74a4dbd26
commit 3bda3a0678
84 changed files with 29863 additions and 17 deletions

View File

@@ -146,3 +146,10 @@ config NVGPU_USE_TEGRA_ALLOC_FD
fd limit.
It is only available in Tegra kernel.
config TEGRA_GPU_NEXT
bool "Turing family GPU"
depends on GK20A && ARCH_TEGRA_19x_SOC
default y
help
Support for NVIDIA Turing family of GPU

View File

@@ -4,7 +4,6 @@ ccflags-y += -I$(srctree.nvgpu)/drivers/gpu/nvgpu/include
ccflags-y += -I$(srctree.nvgpu)/drivers/gpu/nvgpu
ccflags-y += -I$(srctree.nvgpu)/include
ccflags-y += -I$(srctree.nvgpu)/include/uapi
ccflags-y += -I$(srctree.nvgpu-next)/drivers/gpu/nvgpu
ccflags-y += -I$(srctree)/drivers/devfreq
@@ -370,3 +369,27 @@ nvgpu-y += \
lpwr/rppg.o \
lpwr/lpwr.o \
gv100/clk_gv100.o
nvgpu-$(CONFIG_TEGRA_GPU_NEXT) += \
common/bus/bus_tu104.o \
common/fb/fb_tu104.o \
common/ltc/ltc_tu104.o \
common/mc/mc_tu104.o
nvgpu-$(CONFIG_TEGRA_GPU_NEXT) += \
tu104/hal_tu104.o \
tu104/fifo_tu104.o \
tu104/gr_tu104.o \
tu104/func_tu104.o \
tu104/bios_tu104.o \
tu104/nvlink_tu104.o \
tu104/fbpa_tu104.o \
tu104/gr_ctx_tu104.o \
tu104/flcn_tu104.o \
tu104/sec2_tu104.o \
tu104/ecc_tu104.o \
tu104/regops_tu104.o \
tu104/acr_tu104.o
nvgpu-$(CONFIG_TEGRA_GPU_NEXT) += \
os/linux/os_ops_tu104.o

View File

@@ -222,4 +222,21 @@ srcs := os/posix/nvgpu.c \
gv100/pmu_gv100.c \
gv100/perf_gv100.c \
gv100/gsp_gv100.c \
gv100/clk_gv100.c
gv100/clk_gv100.c \
common/bus/bus_tu104.c \
common/fb/fb_tu104.c \
common/ltc/ltc_tu104.c \
common/mc/mc_tu104.c \
tu104/bios_tu104.c \
tu104/ecc_tu104.c \
tu104/fbpa_tu104.c \
tu104/fifo_tu104.c \
tu104/flcn_tu104.c \
tu104/gr_ctx_tu104.c \
tu104/gr_tu104.c \
tu104/hal_tu104.c \
tu104/nvlink_tu104.c \
tu104/sec2_tu104.c \
tu104/func_tu104.c \
tu104/regops_tu104.c \
tu104/acr_tu104.c

View File

@@ -0,0 +1,74 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvgpu/timers.h>
#include <nvgpu/mm.h>
#include "gk20a/gk20a.h"
#include "bus_tu104.h"
#include "tu104/func_tu104.h"
#include <nvgpu/hw/tu104/hw_bus_tu104.h>
#include <nvgpu/hw/tu104/hw_func_tu104.h>
int bus_tu104_bar2_bind(struct gk20a *g, struct nvgpu_mem *bar2_inst)
{
struct nvgpu_timeout timeout;
int err = 0;
u64 iova = nvgpu_inst_block_addr(g, bar2_inst);
u32 ptr_v = (u32)(iova >> bus_bar2_block_ptr_shift_v());
nvgpu_log_info(g, "bar2 inst block ptr: 0x%08x", ptr_v);
err = nvgpu_timeout_init(g, &timeout, 1000, NVGPU_TIMER_RETRY_TIMER);
if (err != 0) {
return err;
}
nvgpu_func_writel(g, func_priv_bar2_block_r(),
nvgpu_aperture_mask(g, bar2_inst,
bus_bar2_block_target_sys_mem_ncoh_f(),
bus_bar2_block_target_sys_mem_coh_f(),
bus_bar2_block_target_vid_mem_f()) |
bus_bar2_block_mode_virtual_f() |
bus_bar2_block_ptr_f(ptr_v));
do {
u32 val = nvgpu_func_readl(g,
func_priv_bind_status_r());
u32 pending = bus_bind_status_bar2_pending_v(val);
u32 outstanding = bus_bind_status_bar2_outstanding_v(val);
if (!pending && !outstanding) {
break;
}
nvgpu_udelay(5);
} while (!nvgpu_timeout_expired(&timeout));
if (nvgpu_timeout_peek_expired(&timeout)) {
err = -EINVAL;
}
return err;
}

View File

@@ -0,0 +1,31 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVGPU_TU104_BUS__
#define __NVGPU_TU104_BUS__
struct gk20a;
struct nvgpu_mem;
int bus_tu104_bar2_bind(struct gk20a *g, struct nvgpu_mem *bar2_inst);
#endif

View File

@@ -0,0 +1,576 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <trace/events/gk20a.h>
#include <nvgpu/log.h>
#include <nvgpu/types.h>
#include <nvgpu/timers.h>
#include <nvgpu/nvgpu_mem.h>
#include <nvgpu/io.h>
#include <nvgpu/utils.h>
#include "common/fb/fb_gv11b.h"
#include "common/mc/mc_tu104.h"
#include "gk20a/gk20a.h"
#include "tu104/func_tu104.h"
#include "fb_tu104.h"
#include "nvgpu/hw/tu104/hw_fb_tu104.h"
#include "nvgpu/hw/tu104/hw_func_tu104.h"
void tu104_fb_enable_hub_intr(struct gk20a *g)
{
u32 info_fault = nvgpu_readl(g, fb_mmu_int_vector_info_fault_r());
u32 nonreplay_fault = nvgpu_readl(g,
fb_mmu_int_vector_fault_r(NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX));
u32 replay_fault = nvgpu_readl(g,
fb_mmu_int_vector_fault_r(NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX));
u32 ecc_error = nvgpu_readl(g, fb_mmu_int_vector_ecc_error_r());
intr_tu104_vector_en_set(g,
fb_mmu_int_vector_info_fault_vector_v(info_fault));
intr_tu104_vector_en_set(g,
fb_mmu_int_vector_fault_notify_v(nonreplay_fault));
intr_tu104_vector_en_set(g,
fb_mmu_int_vector_fault_error_v(nonreplay_fault));
intr_tu104_vector_en_set(g,
fb_mmu_int_vector_fault_notify_v(replay_fault));
intr_tu104_vector_en_set(g,
fb_mmu_int_vector_fault_error_v(replay_fault));
intr_tu104_vector_en_set(g,
fb_mmu_int_vector_ecc_error_vector_v(ecc_error));
}
void tu104_fb_disable_hub_intr(struct gk20a *g)
{
u32 info_fault = nvgpu_readl(g, fb_mmu_int_vector_info_fault_r());
u32 nonreplay_fault = nvgpu_readl(g,
fb_mmu_int_vector_fault_r(NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX));
u32 replay_fault = nvgpu_readl(g,
fb_mmu_int_vector_fault_r(NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX));
u32 ecc_error = nvgpu_readl(g, fb_mmu_int_vector_ecc_error_r());
intr_tu104_vector_en_clear(g,
fb_mmu_int_vector_info_fault_vector_v(info_fault));
intr_tu104_vector_en_clear(g,
fb_mmu_int_vector_fault_notify_v(nonreplay_fault));
intr_tu104_vector_en_clear(g,
fb_mmu_int_vector_fault_error_v(nonreplay_fault));
intr_tu104_vector_en_clear(g,
fb_mmu_int_vector_fault_notify_v(replay_fault));
intr_tu104_vector_en_clear(g,
fb_mmu_int_vector_fault_error_v(replay_fault));
intr_tu104_vector_en_clear(g,
fb_mmu_int_vector_ecc_error_vector_v(ecc_error));
}
bool tu104_fb_mmu_fault_pending(struct gk20a *g)
{
u32 info_fault = nvgpu_readl(g, fb_mmu_int_vector_info_fault_r());
u32 nonreplay_fault = nvgpu_readl(g,
fb_mmu_int_vector_fault_r(NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX));
u32 replay_fault = nvgpu_readl(g,
fb_mmu_int_vector_fault_r(NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX));
u32 ecc_error = nvgpu_readl(g, fb_mmu_int_vector_ecc_error_r());
if (intr_tu104_vector_intr_pending(g,
fb_mmu_int_vector_fault_notify_v(replay_fault)) ||
intr_tu104_vector_intr_pending(g,
fb_mmu_int_vector_fault_error_v(replay_fault)) ||
intr_tu104_vector_intr_pending(g,
fb_mmu_int_vector_fault_notify_v(nonreplay_fault)) ||
intr_tu104_vector_intr_pending(g,
fb_mmu_int_vector_fault_error_v(nonreplay_fault)) ||
intr_tu104_vector_intr_pending(g,
fb_mmu_int_vector_info_fault_vector_v(info_fault)) ||
intr_tu104_vector_intr_pending(g,
fb_mmu_int_vector_ecc_error_vector_v(ecc_error))) {
return true;
}
return false;
}
static void tu104_fb_handle_mmu_fault(struct gk20a *g)
{
u32 info_fault = nvgpu_readl(g, fb_mmu_int_vector_info_fault_r());
u32 nonreplay_fault = nvgpu_readl(g,
fb_mmu_int_vector_fault_r(NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX));
u32 replay_fault = nvgpu_readl(g,
fb_mmu_int_vector_fault_r(NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX));
u32 fault_status = g->ops.fb.read_mmu_fault_status(g);
nvgpu_log(g, gpu_dbg_intr, "mmu_fault_status = 0x%08x", fault_status);
if (intr_tu104_vector_intr_pending(g,
fb_mmu_int_vector_info_fault_vector_v(info_fault))) {
intr_tu104_intr_clear_leaf_vector(g,
fb_mmu_int_vector_info_fault_vector_v(info_fault));
gv11b_fb_handle_dropped_mmu_fault(g, fault_status);
gv11b_fb_handle_other_fault_notify(g, fault_status);
}
if (gv11b_fb_is_fault_buf_enabled(g,
NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX)) {
if (intr_tu104_vector_intr_pending(g,
fb_mmu_int_vector_fault_notify_v(nonreplay_fault))) {
intr_tu104_intr_clear_leaf_vector(g,
fb_mmu_int_vector_fault_notify_v(nonreplay_fault));
gv11b_fb_handle_mmu_nonreplay_replay_fault(g,
fault_status,
NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX);
/*
* When all the faults are processed,
* GET and PUT will have same value and mmu fault status
* bit will be reset by HW
*/
}
if (intr_tu104_vector_intr_pending(g,
fb_mmu_int_vector_fault_error_v(nonreplay_fault))) {
intr_tu104_intr_clear_leaf_vector(g,
fb_mmu_int_vector_fault_error_v(nonreplay_fault));
gv11b_fb_handle_nonreplay_fault_overflow(g,
fault_status);
}
}
if (gv11b_fb_is_fault_buf_enabled(g,
NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX)) {
if (intr_tu104_vector_intr_pending(g,
fb_mmu_int_vector_fault_notify_v(replay_fault))) {
intr_tu104_intr_clear_leaf_vector(g,
fb_mmu_int_vector_fault_notify_v(replay_fault));
gv11b_fb_handle_mmu_nonreplay_replay_fault(g,
fault_status,
NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX);
}
if (intr_tu104_vector_intr_pending(g,
fb_mmu_int_vector_fault_error_v(replay_fault))) {
intr_tu104_intr_clear_leaf_vector(g,
fb_mmu_int_vector_fault_error_v(replay_fault));
gv11b_fb_handle_replay_fault_overflow(g,
fault_status);
}
}
nvgpu_log(g, gpu_dbg_intr, "clear mmu fault status");
g->ops.fb.write_mmu_fault_status(g,
fb_mmu_fault_status_valid_clear_f());
}
void tu104_fb_hub_isr(struct gk20a *g)
{
u32 info_fault = nvgpu_readl(g, fb_mmu_int_vector_info_fault_r());
u32 nonreplay_fault = nvgpu_readl(g,
fb_mmu_int_vector_fault_r(NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX));
u32 replay_fault = nvgpu_readl(g,
fb_mmu_int_vector_fault_r(NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX));
u32 ecc_error = nvgpu_readl(g, fb_mmu_int_vector_ecc_error_r());
u32 status;
nvgpu_mutex_acquire(&g->mm.hub_isr_mutex);
if (intr_tu104_vector_intr_pending(g,
fb_mmu_int_vector_ecc_error_vector_v(ecc_error))) {
nvgpu_info(g, "ecc uncorrected error notify");
intr_tu104_intr_clear_leaf_vector(g,
fb_mmu_int_vector_ecc_error_vector_v(ecc_error));
status = nvgpu_readl(g, fb_mmu_l2tlb_ecc_status_r());
if (status) {
gv11b_handle_l2tlb_ecc_isr(g, status);
}
status = nvgpu_readl(g, fb_mmu_hubtlb_ecc_status_r());
if (status) {
gv11b_handle_hubtlb_ecc_isr(g, status);
}
status = nvgpu_readl(g, fb_mmu_fillunit_ecc_status_r());
if (status) {
gv11b_handle_fillunit_ecc_isr(g, status);
}
}
if (intr_tu104_vector_intr_pending(g,
fb_mmu_int_vector_fault_notify_v(replay_fault)) ||
intr_tu104_vector_intr_pending(g,
fb_mmu_int_vector_fault_error_v(replay_fault)) ||
intr_tu104_vector_intr_pending(g,
fb_mmu_int_vector_fault_notify_v(nonreplay_fault)) ||
intr_tu104_vector_intr_pending(g,
fb_mmu_int_vector_fault_error_v(nonreplay_fault)) ||
intr_tu104_vector_intr_pending(g,
fb_mmu_int_vector_info_fault_vector_v(info_fault))) {
nvgpu_log(g, gpu_dbg_intr, "MMU Fault");
tu104_fb_handle_mmu_fault(g);
}
nvgpu_mutex_release(&g->mm.hub_isr_mutex);
}
void fb_tu104_write_mmu_fault_buffer_lo_hi(struct gk20a *g, u32 index,
u32 addr_lo, u32 addr_hi)
{
nvgpu_func_writel(g,
func_priv_mmu_fault_buffer_lo_r(index), addr_lo);
nvgpu_func_writel(g,
func_priv_mmu_fault_buffer_hi_r(index), addr_hi);
}
u32 fb_tu104_read_mmu_fault_buffer_get(struct gk20a *g, u32 index)
{
return nvgpu_func_readl(g,
func_priv_mmu_fault_buffer_get_r(index));
}
void fb_tu104_write_mmu_fault_buffer_get(struct gk20a *g, u32 index,
u32 reg_val)
{
nvgpu_func_writel(g,
func_priv_mmu_fault_buffer_get_r(index),
reg_val);
}
u32 fb_tu104_read_mmu_fault_buffer_put(struct gk20a *g, u32 index)
{
return nvgpu_func_readl(g,
func_priv_mmu_fault_buffer_put_r(index));
}
u32 fb_tu104_read_mmu_fault_buffer_size(struct gk20a *g, u32 index)
{
return nvgpu_func_readl(g,
func_priv_mmu_fault_buffer_size_r(index));
}
void fb_tu104_write_mmu_fault_buffer_size(struct gk20a *g, u32 index,
u32 reg_val)
{
nvgpu_func_writel(g,
func_priv_mmu_fault_buffer_size_r(index),
reg_val);
}
void fb_tu104_read_mmu_fault_addr_lo_hi(struct gk20a *g,
u32 *addr_lo, u32 *addr_hi)
{
*addr_lo = nvgpu_func_readl(g,
func_priv_mmu_fault_addr_lo_r());
*addr_hi = nvgpu_func_readl(g,
func_priv_mmu_fault_addr_hi_r());
}
void fb_tu104_read_mmu_fault_inst_lo_hi(struct gk20a *g,
u32 *inst_lo, u32 *inst_hi)
{
*inst_lo = nvgpu_func_readl(g,
func_priv_mmu_fault_inst_lo_r());
*inst_hi = nvgpu_func_readl(g,
func_priv_mmu_fault_inst_hi_r());
}
u32 fb_tu104_read_mmu_fault_info(struct gk20a *g)
{
return nvgpu_func_readl(g,
func_priv_mmu_fault_info_r());
}
u32 fb_tu104_read_mmu_fault_status(struct gk20a *g)
{
return nvgpu_func_readl(g,
func_priv_mmu_fault_status_r());
}
void fb_tu104_write_mmu_fault_status(struct gk20a *g, u32 reg_val)
{
nvgpu_func_writel(g, func_priv_mmu_fault_status_r(),
reg_val);
}
int fb_tu104_tlb_invalidate(struct gk20a *g, struct nvgpu_mem *pdb)
{
struct nvgpu_timeout timeout;
u32 addr_lo;
u32 data;
int err = 0;
nvgpu_log_fn(g, " ");
/*
* pagetables are considered sw states which are preserved after
* prepare_poweroff. When gk20a deinit releases those pagetables,
* common code in vm unmap path calls tlb invalidate that touches
* hw. Use the power_on flag to skip tlb invalidation when gpu
* power is turned off
*/
if (!g->power_on) {
return 0;
}
addr_lo = u64_lo32(nvgpu_mem_get_addr(g, pdb) >> 12);
err = nvgpu_timeout_init(g, &timeout, 1000, NVGPU_TIMER_RETRY_TIMER);
if (err != 0) {
return err;
}
nvgpu_mutex_acquire(&g->mm.tlb_lock);
trace_gk20a_mm_tlb_invalidate(g->name);
nvgpu_func_writel(g, func_priv_mmu_invalidate_pdb_r(),
fb_mmu_invalidate_pdb_addr_f(addr_lo) |
nvgpu_aperture_mask(g, pdb,
fb_mmu_invalidate_pdb_aperture_sys_mem_f(),
fb_mmu_invalidate_pdb_aperture_sys_mem_f(),
fb_mmu_invalidate_pdb_aperture_vid_mem_f()));
nvgpu_func_writel(g, func_priv_mmu_invalidate_r(),
fb_mmu_invalidate_all_va_true_f() |
fb_mmu_invalidate_trigger_true_f());
do {
data = nvgpu_func_readl(g,
func_priv_mmu_invalidate_r());
if (fb_mmu_invalidate_trigger_v(data) !=
fb_mmu_invalidate_trigger_true_v()) {
break;
}
nvgpu_udelay(2);
} while (!nvgpu_timeout_expired_msg(&timeout,
"wait mmu invalidate"));
trace_gk20a_mm_tlb_invalidate_done(g->name);
nvgpu_mutex_release(&g->mm.tlb_lock);
return err;
}
int fb_tu104_mmu_invalidate_replay(struct gk20a *g,
u32 invalidate_replay_val)
{
int err = -ETIMEDOUT;
u32 reg_val;
struct nvgpu_timeout timeout;
nvgpu_log_fn(g, " ");
/* retry 200 times */
err = nvgpu_timeout_init(g, &timeout, 200, NVGPU_TIMER_RETRY_TIMER);
if (err != 0) {
return err;
}
nvgpu_mutex_acquire(&g->mm.tlb_lock);
reg_val = nvgpu_func_readl(g, func_priv_mmu_invalidate_r());
reg_val |= fb_mmu_invalidate_all_va_true_f() |
fb_mmu_invalidate_all_pdb_true_f() |
invalidate_replay_val |
fb_mmu_invalidate_trigger_true_f();
nvgpu_func_writel(g, func_priv_mmu_invalidate_r(), reg_val);
do {
reg_val = nvgpu_func_readl(g,
func_priv_mmu_invalidate_r());
if (fb_mmu_invalidate_trigger_v(reg_val) !=
fb_mmu_invalidate_trigger_true_v()) {
err = 0;
break;
}
nvgpu_udelay(5);
} while (!nvgpu_timeout_expired_msg(&timeout,
"invalidate replay failed on 0x%llx"));
if (err) {
nvgpu_err(g, "invalidate replay timedout");
}
nvgpu_mutex_release(&g->mm.tlb_lock);
return err;
}
void fb_tu104_init_cbc(struct gk20a *g, struct gr_gk20a *gr)
{
u64 base_divisor;
u64 compbit_store_base;
u64 compbit_store_pa;
u64 cbc_start_addr, cbc_end_addr;
u64 cbc_top;
u32 cbc_top_size;
u32 cbc_max;
compbit_store_pa = nvgpu_mem_get_addr(g, &gr->compbit_store.mem);
base_divisor = g->ops.ltc.get_cbc_base_divisor(g);
compbit_store_base = DIV_ROUND_UP(compbit_store_pa, base_divisor);
cbc_start_addr = (u64)g->ltc_count * (compbit_store_base <<
fb_mmu_cbc_base_address_alignment_shift_v());
cbc_end_addr = cbc_start_addr + gr->compbit_backing_size;
cbc_top = (cbc_end_addr / g->ltc_count) >>
fb_mmu_cbc_base_address_alignment_shift_v();
cbc_top_size = u64_lo32(cbc_top) - compbit_store_base;
nvgpu_writel(g, fb_mmu_cbc_top_r(),
fb_mmu_cbc_top_size_f(cbc_top_size));
cbc_max = nvgpu_readl(g, fb_mmu_cbc_max_r());
cbc_max = set_field(cbc_max,
fb_mmu_cbc_max_comptagline_m(),
fb_mmu_cbc_max_comptagline_f(gr->max_comptag_lines));
nvgpu_writel(g, fb_mmu_cbc_max_r(), cbc_max);
nvgpu_writel(g, fb_mmu_cbc_base_r(),
fb_mmu_cbc_base_address_f(compbit_store_base));
nvgpu_log(g, gpu_dbg_info | gpu_dbg_map_v | gpu_dbg_pte,
"compbit base.pa: 0x%x,%08x cbc_base:0x%llx\n",
(u32)(compbit_store_pa >> 32),
(u32)(compbit_store_pa & 0xffffffff),
compbit_store_base);
gr->compbit_store.base_hw = compbit_store_base;
g->ops.ltc.cbc_ctrl(g, gk20a_cbc_op_invalidate,
0, gr->max_comptag_lines - 1);
}
static int tu104_fb_wait_mmu_bind(struct gk20a *g)
{
struct nvgpu_timeout timeout;
u32 val;
int err;
err = nvgpu_timeout_init(g, &timeout, 1000, NVGPU_TIMER_RETRY_TIMER);
if (err) {
return err;
}
do {
val = nvgpu_readl(g, fb_mmu_bind_r());
if ((val & fb_mmu_bind_trigger_true_f()) !=
fb_mmu_bind_trigger_true_f()) {
return 0;
}
nvgpu_udelay(2);
} while (!nvgpu_timeout_expired_msg(&timeout, "mmu bind timedout"));
return -ETIMEDOUT;
}
int tu104_fb_apply_pdb_cache_war(struct gk20a *g)
{
u64 inst_blk_base_addr;
u32 inst_blk_addr;
u32 i;
int err;
if (!nvgpu_mem_is_valid(&g->pdb_cache_war_mem)) {
return -EINVAL;
}
inst_blk_base_addr = nvgpu_mem_get_addr(g, &g->pdb_cache_war_mem);
/* Bind 256 instance blocks to unused engine ID 0x0 */
for (i = 0U; i < 256U; i++) {
inst_blk_addr = u64_lo32((inst_blk_base_addr + (i * PAGE_SIZE))
>> fb_mmu_bind_imb_addr_alignment_v());
nvgpu_writel(g, fb_mmu_bind_imb_r(),
fb_mmu_bind_imb_addr_f(inst_blk_addr) |
nvgpu_aperture_mask(g, &g->pdb_cache_war_mem,
fb_mmu_bind_imb_aperture_sys_mem_nc_f(),
fb_mmu_bind_imb_aperture_sys_mem_c_f(),
fb_mmu_bind_imb_aperture_vid_mem_f()));
nvgpu_writel(g, fb_mmu_bind_r(),
fb_mmu_bind_engine_id_f(0x0U) |
fb_mmu_bind_trigger_true_f());
err = tu104_fb_wait_mmu_bind(g);
if (err) {
return err;
}
}
/* first unbind */
nvgpu_writel(g, fb_mmu_bind_imb_r(),
fb_mmu_bind_imb_aperture_f(0x1U) |
fb_mmu_bind_imb_addr_f(0x0U));
nvgpu_writel(g, fb_mmu_bind_r(),
fb_mmu_bind_engine_id_f(0x0U) |
fb_mmu_bind_trigger_true_f());
err = tu104_fb_wait_mmu_bind(g);
if (err) {
return err;
}
/* second unbind */
nvgpu_writel(g, fb_mmu_bind_r(),
fb_mmu_bind_engine_id_f(0x0U) |
fb_mmu_bind_trigger_true_f());
err = tu104_fb_wait_mmu_bind(g);
if (err) {
return err;
}
/* Bind 257th (last) instance block that reserves PDB cache entry 255 */
inst_blk_addr = u64_lo32((inst_blk_base_addr + (256 * PAGE_SIZE))
>> fb_mmu_bind_imb_addr_alignment_v());
nvgpu_writel(g, fb_mmu_bind_imb_r(),
fb_mmu_bind_imb_addr_f(inst_blk_addr) |
nvgpu_aperture_mask(g, &g->pdb_cache_war_mem,
fb_mmu_bind_imb_aperture_sys_mem_nc_f(),
fb_mmu_bind_imb_aperture_sys_mem_c_f(),
fb_mmu_bind_imb_aperture_vid_mem_f()));
nvgpu_writel(g, fb_mmu_bind_r(),
fb_mmu_bind_engine_id_f(0x0U) |
fb_mmu_bind_trigger_true_f());
err = tu104_fb_wait_mmu_bind(g);
if (err) {
return err;
}
return 0;
}

View File

@@ -0,0 +1,62 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __FB_TU104_H__
#define __FB_TU104_H__
#include <nvgpu/types.h>
struct gk20a;
struct gr_gk20a;
struct nvgpu_mem;
void tu104_fb_enable_hub_intr(struct gk20a *g);
void tu104_fb_disable_hub_intr(struct gk20a *g);
bool tu104_fb_mmu_fault_pending(struct gk20a *g);
void tu104_fb_hub_isr(struct gk20a *g);
void fb_tu104_write_mmu_fault_buffer_lo_hi(struct gk20a *g, u32 index,
u32 addr_lo, u32 addr_hi);
u32 fb_tu104_read_mmu_fault_buffer_get(struct gk20a *g, u32 index);
void fb_tu104_write_mmu_fault_buffer_get(struct gk20a *g, u32 index,
u32 reg_val);
u32 fb_tu104_read_mmu_fault_buffer_put(struct gk20a *g, u32 index);
u32 fb_tu104_read_mmu_fault_buffer_size(struct gk20a *g, u32 index);
void fb_tu104_write_mmu_fault_buffer_size(struct gk20a *g, u32 index,
u32 reg_val);
void fb_tu104_read_mmu_fault_addr_lo_hi(struct gk20a *g,
u32 *addr_lo, u32 *addr_hi);
void fb_tu104_read_mmu_fault_inst_lo_hi(struct gk20a *g,
u32 *inst_lo, u32 *inst_hi);
u32 fb_tu104_read_mmu_fault_info(struct gk20a *g);
u32 fb_tu104_read_mmu_fault_status(struct gk20a *g);
void fb_tu104_write_mmu_fault_status(struct gk20a *g, u32 reg_val);
int fb_tu104_tlb_invalidate(struct gk20a *g, struct nvgpu_mem *pdb);
int fb_tu104_mmu_invalidate_replay(struct gk20a *g,
u32 invalidate_replay_val);
void fb_tu104_init_cbc(struct gk20a *g, struct gr_gk20a *gr);
int tu104_fb_apply_pdb_cache_war(struct gk20a *g);
#endif

View File

@@ -0,0 +1,243 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvgpu/types.h>
#include <nvgpu/ltc.h>
#include <nvgpu/comptags.h>
#include <nvgpu/io.h>
#include <nvgpu/timers.h>
#include <trace/events/gk20a.h>
#include "gk20a/gk20a.h"
#include "ltc_tu104.h"
#include "common/ltc/ltc_gv11b.h"
#include <nvgpu/hw/tu104/hw_ltc_tu104.h>
void ltc_tu104_init_fs_state(struct gk20a *g)
{
struct gr_gk20a *gr = &g->gr;
u32 reg;
gv11b_ltc_init_fs_state(g);
reg = nvgpu_readl(g, ltc_ltcs_ltss_cbc_param2_r());
gr->slices_per_ltc =
ltc_ltcs_ltss_cbc_param2_slices_per_ltc_v(reg);
gr->cacheline_size =
512U << ltc_ltcs_ltss_cbc_param2_cache_line_size_v(reg);
}
u64 ltc_tu104_get_cbc_base_divisor(struct gk20a *g)
{
return (u64)g->ltc_count <<
ltc_ltcs_ltss_cbc_base_alignment_shift_v();
}
int ltc_tu104_init_comptags(struct gk20a *g, struct gr_gk20a *gr)
{
/* max memory size (MB) to cover */
u32 max_size = gr->max_comptag_mem;
/* one tag line covers 64KB */
u32 max_comptag_lines = max_size << 4U;
u32 compbit_backing_size;
u32 hw_max_comptag_lines;
u32 cbc_param;
u32 ctags_size;
u32 ctags_per_cacheline;
u32 amap_divide_rounding, amap_swizzle_rounding;
int err;
nvgpu_log_fn(g, " ");
if (max_comptag_lines == 0U) {
return 0;
}
/* Already initialized */
if (gr->max_comptag_lines) {
return 0;
}
hw_max_comptag_lines =
ltc_ltcs_ltss_cbc_ctrl3_clear_upper_bound_init_v();
if (max_comptag_lines > hw_max_comptag_lines) {
max_comptag_lines = hw_max_comptag_lines;
}
cbc_param = nvgpu_readl(g, ltc_ltcs_ltss_cbc_param_r());
ctags_size =
ltc_ltcs_ltss_cbc_param_bytes_per_comptagline_per_slice_v(cbc_param);
ctags_per_cacheline = gr->cacheline_size / ctags_size;
amap_divide_rounding = (2 * 1024) <<
ltc_ltcs_ltss_cbc_param_amap_divide_rounding_v(cbc_param);
amap_swizzle_rounding = (64 * 1024) <<
ltc_ltcs_ltss_cbc_param_amap_swizzle_rounding_v(cbc_param);
compbit_backing_size =
roundup(max_comptag_lines * ctags_size, gr->cacheline_size);
compbit_backing_size =
compbit_backing_size * gr->slices_per_ltc * g->ltc_count;
compbit_backing_size += g->ltc_count * amap_divide_rounding;
compbit_backing_size += amap_swizzle_rounding;
/* must be a multiple of 64KB */
compbit_backing_size = roundup(compbit_backing_size, 64 * 1024);
err = nvgpu_ltc_alloc_cbc(g, compbit_backing_size, true);
if (err) {
return err;
}
err = gk20a_comptag_allocator_init(g, &gr->comp_tags, max_comptag_lines);
if (err) {
return err;
}
gr->max_comptag_lines = max_comptag_lines;
gr->comptags_per_cacheline = ctags_per_cacheline;
gr->gobs_per_comptagline_per_slice = ctags_size;
gr->compbit_backing_size = compbit_backing_size;
nvgpu_log_info(g, "compbit backing store size : %d",
compbit_backing_size);
nvgpu_log_info(g, "max comptag lines : %d",
max_comptag_lines);
nvgpu_log_info(g, "gobs_per_comptagline_per_slice: %d",
gr->gobs_per_comptagline_per_slice);
return 0;
}
int ltc_tu104_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op,
u32 min, u32 max)
{
struct gr_gk20a *gr = &g->gr;
struct nvgpu_timeout timeout;
int err = 0;
u32 ltc, slice, ctrl1, val, hw_op = 0U;
u32 slices_per_ltc = gr->slices_per_ltc;
u32 ltc_stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE);
u32 lts_stride = nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE);
const u32 max_lines = 16384U;
nvgpu_log_fn(g, " ");
trace_gk20a_ltc_cbc_ctrl_start(g->name, op, min, max);
if (gr->compbit_store.mem.size == 0U) {
return 0;
}
while (1) {
const u32 iter_max = min(min + max_lines - 1, max);
bool full_cache_op = true;
nvgpu_mutex_acquire(&g->mm.l2_op_lock);
nvgpu_log_info(g, "clearing CBC lines %u..%u", min, iter_max);
if (op == gk20a_cbc_op_clear) {
nvgpu_writel(
g, ltc_ltcs_ltss_cbc_ctrl2_r(),
ltc_ltcs_ltss_cbc_ctrl2_clear_lower_bound_f(
min));
nvgpu_writel(
g, ltc_ltcs_ltss_cbc_ctrl3_r(),
ltc_ltcs_ltss_cbc_ctrl3_clear_upper_bound_f(
iter_max));
hw_op = ltc_ltcs_ltss_cbc_ctrl1_clear_active_f();
full_cache_op = false;
} else if (op == gk20a_cbc_op_clean) {
/* this is full-cache op */
hw_op = ltc_ltcs_ltss_cbc_ctrl1_clean_active_f();
} else if (op == gk20a_cbc_op_invalidate) {
/* this is full-cache op */
hw_op = ltc_ltcs_ltss_cbc_ctrl1_invalidate_active_f();
} else {
nvgpu_err(g, "Unknown op: %u", (unsigned)op);
err = -EINVAL;
goto out;
}
nvgpu_writel(g, ltc_ltcs_ltss_cbc_ctrl1_r(),
nvgpu_readl(g,
ltc_ltcs_ltss_cbc_ctrl1_r()) | hw_op);
for (ltc = 0; ltc < g->ltc_count; ltc++) {
for (slice = 0; slice < slices_per_ltc; slice++) {
ctrl1 = ltc_ltc0_lts0_cbc_ctrl1_r() +
ltc * ltc_stride + slice * lts_stride;
nvgpu_timeout_init(g, &timeout, 2000,
NVGPU_TIMER_RETRY_TIMER);
do {
val = nvgpu_readl(g, ctrl1);
if (!(val & hw_op)) {
break;
}
nvgpu_udelay(5);
} while (!nvgpu_timeout_expired(&timeout));
if (nvgpu_timeout_peek_expired(&timeout)) {
nvgpu_err(g, "comp tag clear timeout");
err = -EBUSY;
goto out;
}
}
}
/* are we done? */
if (full_cache_op || iter_max == max) {
break;
}
/* note: iter_max is inclusive upper bound */
min = iter_max + 1;
/* give a chance for higher-priority threads to progress */
nvgpu_mutex_release(&g->mm.l2_op_lock);
}
out:
trace_gk20a_ltc_cbc_ctrl_done(g->name);
nvgpu_mutex_release(&g->mm.l2_op_lock);
return err;
}
void tu104_ltc_isr(struct gk20a *g)
{
unsigned int ltc, slice;
/* Go through all the LTCs explicitly */
for (ltc = 0; ltc < g->ltc_count; ltc++) {
for (slice = 0; slice < g->gr.slices_per_ltc; slice++) {
gv11b_ltc_lts_isr(g, ltc, slice);
}
}
}

View File

@@ -0,0 +1,39 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef LTC_TU104_H
#define LTC_TU104_H
#include <nvgpu/types.h>
enum gk20a_cbc_op;
struct gk20a;
struct gr_gk20a;
void tu104_ltc_isr(struct gk20a *g);
u64 ltc_tu104_get_cbc_base_divisor(struct gk20a *g);
void ltc_tu104_init_fs_state(struct gk20a *g);
int ltc_tu104_init_comptags(struct gk20a *g, struct gr_gk20a *gr);
int ltc_tu104_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op,
u32 min, u32 max);
#endif

View File

@@ -0,0 +1,412 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvgpu/types.h>
#include <nvgpu/io.h>
#include <nvgpu/utils.h>
#include <nvgpu/mc.h>
#include "gk20a/gk20a.h"
#include "common/mc/mc_gp10b.h"
#include "mc_tu104.h"
#include "tu104/func_tu104.h"
#include "nvgpu/hw/tu104/hw_mc_tu104.h"
#include "nvgpu/hw/tu104/hw_func_tu104.h"
#include "nvgpu/hw/tu104/hw_ctrl_tu104.h"
/* helper to set leaf_reg_bit in LEAF_EN_SET(leaf_reg_index) register */
void intr_tu104_leaf_en_set(struct gk20a *g, u32 leaf_reg_index,
u32 leaf_reg_bit)
{
u32 val;
val = nvgpu_func_readl(g,
func_priv_cpu_intr_leaf_en_set_r(leaf_reg_index));
val |= BIT(leaf_reg_bit);
nvgpu_func_writel(g,
func_priv_cpu_intr_leaf_en_set_r(leaf_reg_index),
val);
}
/* helper to set leaf_reg_bit in LEAF_EN_CLEAR(leaf_reg_index) register */
void intr_tu104_leaf_en_clear(struct gk20a *g, u32 leaf_reg_index,
u32 leaf_reg_bit)
{
u32 val;
val = nvgpu_func_readl(g,
func_priv_cpu_intr_leaf_en_clear_r(leaf_reg_index));
val |= BIT(leaf_reg_bit);
nvgpu_func_writel(g,
func_priv_cpu_intr_leaf_en_clear_r(leaf_reg_index),
val);
}
/* helper to set leaf_reg_bit in LEAF(leaf_reg_index) register */
static void intr_tu104_leaf_clear(struct gk20a *g, u32 leaf_reg_index,
u32 leaf_reg_bit)
{
nvgpu_func_writel(g,
func_priv_cpu_intr_leaf_r(leaf_reg_index),
BIT(leaf_reg_bit));
}
/* helper to set top_reg_bit in TOP_EN_SET(top_reg_index) register */
void intr_tu104_top_en_set(struct gk20a *g, u32 top_reg_index,
u32 top_reg_bit)
{
u32 val;
val = nvgpu_func_readl(g,
func_priv_cpu_intr_top_en_set_r(top_reg_index));
val |= BIT(top_reg_bit);
nvgpu_func_writel(g,
func_priv_cpu_intr_top_en_set_r(top_reg_index),
val);
}
/* helper to enable interrupt vector in both LEAF and TOP registers */
void intr_tu104_vector_en_set(struct gk20a *g, u32 intr_vector)
{
intr_tu104_leaf_en_set(g,
NV_CPU_INTR_GPU_VECTOR_TO_LEAF_REG(intr_vector),
NV_CPU_INTR_GPU_VECTOR_TO_LEAF_BIT(intr_vector));
intr_tu104_top_en_set(g,
NV_CPU_INTR_SUBTREE_TO_TOP_IDX(
NV_CPU_INTR_GPU_VECTOR_TO_SUBTREE(intr_vector)),
(NV_CPU_INTR_SUBTREE_TO_TOP_BIT(
NV_CPU_INTR_GPU_VECTOR_TO_SUBTREE(intr_vector))));
}
/* helper to disable interrupt vector in LEAF register */
void intr_tu104_vector_en_clear(struct gk20a *g, u32 intr_vector)
{
intr_tu104_leaf_en_clear(g,
NV_CPU_INTR_GPU_VECTOR_TO_LEAF_REG(intr_vector),
NV_CPU_INTR_GPU_VECTOR_TO_LEAF_BIT(intr_vector));
}
/* helper to clear an interrupt vector in LEAF register */
void intr_tu104_intr_clear_leaf_vector(struct gk20a *g, u32 intr_vector)
{
intr_tu104_leaf_clear(g,
NV_CPU_INTR_GPU_VECTOR_TO_LEAF_REG(intr_vector),
NV_CPU_INTR_GPU_VECTOR_TO_LEAF_BIT(intr_vector));
}
/* helper to check if interrupt is pending for interrupt vector */
bool intr_tu104_vector_intr_pending(struct gk20a *g, u32 intr_vector)
{
u32 leaf_val;
leaf_val = nvgpu_func_readl(g,
func_priv_cpu_intr_leaf_r(
NV_CPU_INTR_GPU_VECTOR_TO_LEAF_REG(intr_vector)));
return leaf_val & BIT(NV_CPU_INTR_GPU_VECTOR_TO_LEAF_BIT(intr_vector));
}
static void intr_tu104_stall_enable(struct gk20a *g)
{
u32 eng_intr_mask = gk20a_fifo_engine_interrupt_mask(g);
nvgpu_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_STALLING),
0xffffffff);
g->mc_intr_mask_restore[NVGPU_MC_INTR_STALLING] =
mc_intr_pfifo_pending_f() |
mc_intr_priv_ring_pending_f() |
mc_intr_pbus_pending_f() |
mc_intr_ltc_pending_f() |
mc_intr_nvlink_pending_f() |
mc_intr_pfb_pending_f() |
eng_intr_mask;
nvgpu_writel(g, mc_intr_en_set_r(NVGPU_MC_INTR_STALLING),
g->mc_intr_mask_restore[NVGPU_MC_INTR_STALLING]);
}
static void intr_tu104_nonstall_enable(struct gk20a *g)
{
u32 i;
u32 nonstall_intr_base = 0;
u64 nonstall_intr_mask = 0;
u32 active_engine_id, intr_mask;
/* Keep NV_PMC_INTR(1) disabled */
nvgpu_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_NONSTALLING),
0xffffffff);
/*
* Enable nonstall interrupts in TOP
* Enable all engine specific non-stall interrupts in LEAF
*
* We need to read and add
* ctrl_legacy_engine_nonstall_intr_base_vectorid_r()
* to get correct interrupt id in NV_CTRL tree
*/
nonstall_intr_base = nvgpu_readl(g,
ctrl_legacy_engine_nonstall_intr_base_vectorid_r());
for (i = 0; i < g->fifo.num_engines; i++) {
active_engine_id = g->fifo.active_engines_list[i];
intr_mask = g->fifo.engine_info[active_engine_id].intr_mask;
nonstall_intr_mask |= intr_mask << nonstall_intr_base;
}
nvgpu_func_writel(g,
func_priv_cpu_intr_top_en_set_r(
NV_CPU_INTR_SUBTREE_TO_TOP_IDX(
NV_CPU_INTR_TOP_NONSTALL_SUBTREE)),
BIT(NV_CPU_INTR_SUBTREE_TO_TOP_BIT(
NV_CPU_INTR_TOP_NONSTALL_SUBTREE)));
nvgpu_func_writel(g,
func_priv_cpu_intr_leaf_en_set_r(
NV_CPU_INTR_SUBTREE_TO_LEAF_REG0(
NV_CPU_INTR_TOP_NONSTALL_SUBTREE)),
u64_lo32(nonstall_intr_mask));
nvgpu_func_writel(g,
func_priv_cpu_intr_leaf_en_set_r(
NV_CPU_INTR_SUBTREE_TO_LEAF_REG1(
NV_CPU_INTR_TOP_NONSTALL_SUBTREE)),
u64_hi32(nonstall_intr_mask));
}
void intr_tu104_mask(struct gk20a *g)
{
u32 size, reg, i;
nvgpu_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_STALLING),
0xffffffff);
nvgpu_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_NONSTALLING),
0xffffffff);
size = func_priv_cpu_intr_top_en_clear__size_1_v();
for (i = 0; i < size; i++) {
reg = func_priv_cpu_intr_top_en_clear_r(i);
nvgpu_func_writel(g, reg, 0xffffffff);
}
}
/* Enable all required interrupts */
void intr_tu104_enable(struct gk20a *g)
{
intr_tu104_stall_enable(g);
intr_tu104_nonstall_enable(g);
}
/* Return non-zero if nonstall interrupts are pending */
u32 intr_tu104_nonstall(struct gk20a *g)
{
u32 nonstall_intr_status;
u32 nonstall_intr_set_mask;
nonstall_intr_status =
nvgpu_func_readl(g, func_priv_cpu_intr_top_r(
NV_CPU_INTR_SUBTREE_TO_TOP_IDX(
NV_CPU_INTR_TOP_NONSTALL_SUBTREE)));
nonstall_intr_set_mask = BIT(
NV_CPU_INTR_SUBTREE_TO_TOP_BIT(
NV_CPU_INTR_TOP_NONSTALL_SUBTREE));
return nonstall_intr_status & nonstall_intr_set_mask;
}
/* pause all nonstall interrupts */
void intr_tu104_nonstall_pause(struct gk20a *g)
{
nvgpu_func_writel(g,
func_priv_cpu_intr_top_en_clear_r(
NV_CPU_INTR_SUBTREE_TO_TOP_IDX(
NV_CPU_INTR_TOP_NONSTALL_SUBTREE)),
BIT(NV_CPU_INTR_SUBTREE_TO_TOP_BIT(
NV_CPU_INTR_TOP_NONSTALL_SUBTREE)));
}
/* resume all nonstall interrupts */
void intr_tu104_nonstall_resume(struct gk20a *g)
{
nvgpu_func_writel(g,
func_priv_cpu_intr_top_en_set_r(
NV_CPU_INTR_SUBTREE_TO_TOP_IDX(
NV_CPU_INTR_TOP_NONSTALL_SUBTREE)),
BIT(NV_CPU_INTR_SUBTREE_TO_TOP_BIT(
NV_CPU_INTR_TOP_NONSTALL_SUBTREE)));
}
/* Handle and clear all nonstall interrupts */
u32 intr_tu104_isr_nonstall(struct gk20a *g)
{
u32 i;
u32 nonstall_intr_base = 0;
u64 nonstall_intr_mask = 0;
u64 nonstall_intr_mask_lo, nonstall_intr_mask_hi;
u32 intr_leaf_reg0, intr_leaf_reg1;
u32 active_engine_id, intr_mask;
u32 ops = 0;
intr_leaf_reg0 = nvgpu_func_readl(g,
func_priv_cpu_intr_leaf_r(
NV_CPU_INTR_SUBTREE_TO_LEAF_REG0(
NV_CPU_INTR_TOP_NONSTALL_SUBTREE)));
intr_leaf_reg1 = nvgpu_func_readl(g,
func_priv_cpu_intr_leaf_r(
NV_CPU_INTR_SUBTREE_TO_LEAF_REG1(
NV_CPU_INTR_TOP_NONSTALL_SUBTREE)));
nonstall_intr_base = nvgpu_readl(g,
ctrl_legacy_engine_nonstall_intr_base_vectorid_r());
for (i = 0; i < g->fifo.num_engines; i++) {
active_engine_id = g->fifo.active_engines_list[i];
intr_mask = g->fifo.engine_info[active_engine_id].intr_mask;
nonstall_intr_mask = intr_mask << nonstall_intr_base;
nonstall_intr_mask_lo = u64_lo32(nonstall_intr_mask);
nonstall_intr_mask_hi = u64_hi32(nonstall_intr_mask);
if ((nonstall_intr_mask_lo & intr_leaf_reg0) ||
(nonstall_intr_mask_hi & intr_leaf_reg1)) {
nvgpu_log(g, gpu_dbg_intr, "nonstall intr from engine %d",
active_engine_id);
nvgpu_func_writel(g,
func_priv_cpu_intr_leaf_r(
NV_CPU_INTR_SUBTREE_TO_LEAF_REG0(
NV_CPU_INTR_TOP_NONSTALL_SUBTREE)),
nonstall_intr_mask_lo);
nvgpu_func_writel(g,
func_priv_cpu_intr_leaf_r(
NV_CPU_INTR_SUBTREE_TO_LEAF_REG1(
NV_CPU_INTR_TOP_NONSTALL_SUBTREE)),
nonstall_intr_mask_hi);
ops |= (GK20A_NONSTALL_OPS_WAKEUP_SEMAPHORE |
GK20A_NONSTALL_OPS_POST_EVENTS);
}
}
return ops;
}
/* Return non-zero if stall interrupts are pending */
u32 intr_tu104_stall(struct gk20a *g)
{
u32 mc_intr_0;
mc_intr_0 = mc_gp10b_intr_stall(g);
if (mc_intr_0) {
return mc_intr_0;
}
if (g->ops.mc.is_intr_hub_pending) {
return g->ops.mc.is_intr_hub_pending(g, 0);
}
return 0;
}
/* Return true if HUB interrupt is pending */
bool intr_tu104_is_intr_hub_pending(struct gk20a *g, u32 mc_intr_0)
{
return g->ops.mm.mmu_fault_pending(g);
}
/* pause all stall interrupts */
void intr_tu104_stall_pause(struct gk20a *g)
{
mc_gp10b_intr_stall_pause(g);
g->ops.fb.disable_hub_intr(g);
}
/* resume all stall interrupts */
void intr_tu104_stall_resume(struct gk20a *g)
{
mc_gp10b_intr_stall_resume(g);
g->ops.fb.enable_hub_intr(g);
}
#define MAX_INTR_TOP_REGS (2U)
void intr_tu104_log_pending_intrs(struct gk20a *g)
{
bool pending;
u32 intr, i;
intr = intr_tu104_nonstall(g);
if (intr) {
nvgpu_info(g, "Pending nonstall intr=0x%08x", intr);
}
intr = mc_gp10b_intr_stall(g);
if (intr) {
nvgpu_info(g, "Pending stall intr=0x%08x", intr);
}
if (g->ops.mc.is_intr_hub_pending) {
pending = g->ops.mc.is_intr_hub_pending(g, 0);
if (pending) {
nvgpu_info(g, "Pending hub intr");
}
}
for (i = 0; i < MAX_INTR_TOP_REGS; i++) {
intr = nvgpu_func_readl(g,
func_priv_cpu_intr_top_r(i));
if (intr == 0U) {
continue;
}
nvgpu_info(g, "Pending TOP%d intr=0x%08x", i, intr);
}
}
void mc_tu104_fbpa_isr(struct gk20a *g)
{
u32 intr_fbpa, fbpas;
u32 i, num_fbpas;
intr_fbpa = gk20a_readl(g, mc_intr_fbpa_r());
fbpas = mc_intr_fbpa_part_mask_v(intr_fbpa);
num_fbpas = nvgpu_get_litter_value(g, GPU_LIT_NUM_FBPAS);
for (i = 0u; i < num_fbpas; i++) {
if (!(fbpas & (1 << i))) {
continue;
}
g->ops.fb.handle_fbpa_intr(g, i);
}
}

View File

@@ -0,0 +1,65 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_MC_TU104_H
#define NVGPU_MC_TU104_H
#include <nvgpu/types.h>
#define NV_CPU_INTR_SUBTREE_TO_TOP_IDX(i) ((i) / 32)
#define NV_CPU_INTR_SUBTREE_TO_TOP_BIT(i) ((i) % 32)
#define NV_CPU_INTR_SUBTREE_TO_LEAF_REG0(i) ((i)*2)
#define NV_CPU_INTR_SUBTREE_TO_LEAF_REG1(i) (((i)*2) + 1)
#define NV_CPU_INTR_GPU_VECTOR_TO_LEAF_REG(i) ((i) / 32)
#define NV_CPU_INTR_GPU_VECTOR_TO_LEAF_BIT(i) ((i) % 32)
#define NV_CPU_INTR_GPU_VECTOR_TO_SUBTREE(i) ((NV_CPU_INTR_GPU_VECTOR_TO_LEAF_REG(i)) / 2)
#define NV_CPU_INTR_TOP_NONSTALL_SUBTREE 0
struct gk20a;
void intr_tu104_leaf_en_set(struct gk20a *g, u32 leaf_reg_index,
u32 leaf_reg_bit);
void intr_tu104_leaf_en_clear(struct gk20a *g, u32 leaf_reg_index,
u32 leaf_reg_bit);
void intr_tu104_top_en_set(struct gk20a *g, u32 top_reg_index,
u32 top_reg_bit);
void intr_tu104_vector_en_set(struct gk20a *g, u32 intr_vector);
void intr_tu104_vector_en_clear(struct gk20a *g, u32 intr_vector);
bool intr_tu104_vector_intr_pending(struct gk20a *g, u32 intr_vector);
void intr_tu104_intr_clear_leaf_vector(struct gk20a *g, u32 intr_vector);
void intr_tu104_mask(struct gk20a *g);
void intr_tu104_enable(struct gk20a *g);
u32 intr_tu104_stall(struct gk20a *g);
void intr_tu104_stall_pause(struct gk20a *g);
void intr_tu104_stall_resume(struct gk20a *g);
u32 intr_tu104_nonstall(struct gk20a *g);
void intr_tu104_nonstall_pause(struct gk20a *g);
void intr_tu104_nonstall_resume(struct gk20a *g);
u32 intr_tu104_isr_nonstall(struct gk20a *g);
bool intr_tu104_is_intr_hub_pending(struct gk20a *g, u32 mc_intr_0);
void intr_tu104_log_pending_intrs(struct gk20a *g);
void mc_tu104_fbpa_isr(struct gk20a *g);
#endif /* NVGPU_MC_TU104_H */

View File

@@ -0,0 +1,227 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* Function naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
*
* <x>_o(void) : Returns the offset for element <x>.
*
* <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
*
* <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
*
* <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
* and masked to place it at field <y> of register <x>. This value
* can be |'d with others to produce a full register value for
* register <x>.
*
* <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
* value can be ~'d and then &'d to clear the value of field <y> for
* register <x>.
*
* <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
* to place it at field <y> of register <x>. This value can be |'d
* with others to produce a full register value for <x>.
*
* <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
* <x> value 'r' after being shifted to place its LSB at bit 0.
* This value is suitable for direct comparison with other unshifted
* values appropriate for use in field <y> of register <x>.
*
* <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
* field <y> of register <x>. This value is suitable for direct
* comparison with unshifted values appropriate for use in field <y>
* of register <x>.
*/
#ifndef _hw_bus_tu104_h_
#define _hw_bus_tu104_h_
static inline u32 bus_sw_scratch_r(u32 i)
{
return 0x00001400U + i*4U;
}
static inline u32 bus_bar0_window_r(void)
{
return 0x00001700U;
}
static inline u32 bus_bar0_window_base_f(u32 v)
{
return (v & 0xffffffU) << 0U;
}
static inline u32 bus_bar0_window_target_vid_mem_f(void)
{
return 0x0U;
}
static inline u32 bus_bar0_window_target_sys_mem_coherent_f(void)
{
return 0x2000000U;
}
static inline u32 bus_bar0_window_target_sys_mem_noncoherent_f(void)
{
return 0x3000000U;
}
static inline u32 bus_bar0_window_target_bar0_window_base_shift_v(void)
{
return 0x00000010U;
}
static inline u32 bus_bar1_block_r(void)
{
return 0x00001704U;
}
static inline u32 bus_bar1_block_ptr_f(u32 v)
{
return (v & 0xfffffffU) << 0U;
}
static inline u32 bus_bar1_block_target_vid_mem_f(void)
{
return 0x0U;
}
static inline u32 bus_bar1_block_target_sys_mem_coh_f(void)
{
return 0x20000000U;
}
static inline u32 bus_bar1_block_target_sys_mem_ncoh_f(void)
{
return 0x30000000U;
}
static inline u32 bus_bar1_block_mode_virtual_f(void)
{
return 0x80000000U;
}
static inline u32 bus_bar2_block_r(void)
{
return 0x00001714U;
}
static inline u32 bus_bar2_block_ptr_f(u32 v)
{
return (v & 0xfffffffU) << 0U;
}
static inline u32 bus_bar2_block_target_vid_mem_f(void)
{
return 0x0U;
}
static inline u32 bus_bar2_block_target_sys_mem_coh_f(void)
{
return 0x20000000U;
}
static inline u32 bus_bar2_block_target_sys_mem_ncoh_f(void)
{
return 0x30000000U;
}
static inline u32 bus_bar2_block_mode_virtual_f(void)
{
return 0x80000000U;
}
static inline u32 bus_bar1_block_ptr_shift_v(void)
{
return 0x0000000cU;
}
static inline u32 bus_bar2_block_ptr_shift_v(void)
{
return 0x0000000cU;
}
static inline u32 bus_bind_status_r(void)
{
return 0x00001710U;
}
static inline u32 bus_bind_status_bar1_pending_v(u32 r)
{
return (r >> 0U) & 0x1U;
}
static inline u32 bus_bind_status_bar1_pending_empty_f(void)
{
return 0x0U;
}
static inline u32 bus_bind_status_bar1_pending_busy_f(void)
{
return 0x1U;
}
static inline u32 bus_bind_status_bar1_outstanding_v(u32 r)
{
return (r >> 1U) & 0x1U;
}
static inline u32 bus_bind_status_bar1_outstanding_false_f(void)
{
return 0x0U;
}
static inline u32 bus_bind_status_bar1_outstanding_true_f(void)
{
return 0x2U;
}
static inline u32 bus_bind_status_bar2_pending_v(u32 r)
{
return (r >> 2U) & 0x1U;
}
static inline u32 bus_bind_status_bar2_pending_empty_f(void)
{
return 0x0U;
}
static inline u32 bus_bind_status_bar2_pending_busy_f(void)
{
return 0x4U;
}
static inline u32 bus_bind_status_bar2_outstanding_v(u32 r)
{
return (r >> 3U) & 0x1U;
}
static inline u32 bus_bind_status_bar2_outstanding_false_f(void)
{
return 0x0U;
}
static inline u32 bus_bind_status_bar2_outstanding_true_f(void)
{
return 0x8U;
}
static inline u32 bus_intr_0_r(void)
{
return 0x00001100U;
}
static inline u32 bus_intr_0_pri_squash_m(void)
{
return 0x1U << 1U;
}
static inline u32 bus_intr_0_pri_fecserr_m(void)
{
return 0x1U << 2U;
}
static inline u32 bus_intr_0_pri_timeout_m(void)
{
return 0x1U << 3U;
}
static inline u32 bus_intr_en_0_r(void)
{
return 0x00001140U;
}
static inline u32 bus_intr_en_0_pri_squash_m(void)
{
return 0x1U << 1U;
}
static inline u32 bus_intr_en_0_pri_fecserr_m(void)
{
return 0x1U << 2U;
}
static inline u32 bus_intr_en_0_pri_timeout_m(void)
{
return 0x1U << 3U;
}
#endif

View File

@@ -0,0 +1,187 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* Function naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
*
* <x>_o(void) : Returns the offset for element <x>.
*
* <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
*
* <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
*
* <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
* and masked to place it at field <y> of register <x>. This value
* can be |'d with others to produce a full register value for
* register <x>.
*
* <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
* value can be ~'d and then &'d to clear the value of field <y> for
* register <x>.
*
* <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
* to place it at field <y> of register <x>. This value can be |'d
* with others to produce a full register value for <x>.
*
* <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
* <x> value 'r' after being shifted to place its LSB at bit 0.
* This value is suitable for direct comparison with other unshifted
* values appropriate for use in field <y> of register <x>.
*
* <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
* field <y> of register <x>. This value is suitable for direct
* comparison with unshifted values appropriate for use in field <y>
* of register <x>.
*/
#ifndef _hw_ccsr_tu104_h_
#define _hw_ccsr_tu104_h_
static inline u32 ccsr_channel_inst_r(u32 i)
{
return 0x00800000U + i*8U;
}
static inline u32 ccsr_channel_inst__size_1_v(void)
{
return 0x00001000U;
}
static inline u32 ccsr_channel_inst_ptr_f(u32 v)
{
return (v & 0xfffffffU) << 0U;
}
static inline u32 ccsr_channel_inst_target_vid_mem_f(void)
{
return 0x0U;
}
static inline u32 ccsr_channel_inst_target_sys_mem_coh_f(void)
{
return 0x20000000U;
}
static inline u32 ccsr_channel_inst_target_sys_mem_ncoh_f(void)
{
return 0x30000000U;
}
static inline u32 ccsr_channel_inst_bind_false_f(void)
{
return 0x0U;
}
static inline u32 ccsr_channel_inst_bind_true_f(void)
{
return 0x80000000U;
}
static inline u32 ccsr_channel_r(u32 i)
{
return 0x00800004U + i*8U;
}
static inline u32 ccsr_channel__size_1_v(void)
{
return 0x00001000U;
}
static inline u32 ccsr_channel_enable_v(u32 r)
{
return (r >> 0U) & 0x1U;
}
static inline u32 ccsr_channel_enable_set_f(u32 v)
{
return (v & 0x1U) << 10U;
}
static inline u32 ccsr_channel_enable_set_true_f(void)
{
return 0x400U;
}
static inline u32 ccsr_channel_enable_clr_true_f(void)
{
return 0x800U;
}
static inline u32 ccsr_channel_status_v(u32 r)
{
return (r >> 24U) & 0xfU;
}
static inline u32 ccsr_channel_status_pending_ctx_reload_v(void)
{
return 0x00000002U;
}
static inline u32 ccsr_channel_status_pending_acq_ctx_reload_v(void)
{
return 0x00000004U;
}
static inline u32 ccsr_channel_status_on_pbdma_ctx_reload_v(void)
{
return 0x0000000aU;
}
static inline u32 ccsr_channel_status_on_pbdma_and_eng_ctx_reload_v(void)
{
return 0x0000000bU;
}
static inline u32 ccsr_channel_status_on_eng_ctx_reload_v(void)
{
return 0x0000000cU;
}
static inline u32 ccsr_channel_status_on_eng_pending_ctx_reload_v(void)
{
return 0x0000000dU;
}
static inline u32 ccsr_channel_status_on_eng_pending_acq_ctx_reload_v(void)
{
return 0x0000000eU;
}
static inline u32 ccsr_channel_next_v(u32 r)
{
return (r >> 1U) & 0x1U;
}
static inline u32 ccsr_channel_next_true_v(void)
{
return 0x00000001U;
}
static inline u32 ccsr_channel_force_ctx_reload_true_f(void)
{
return 0x100U;
}
static inline u32 ccsr_channel_pbdma_faulted_f(u32 v)
{
return (v & 0x1U) << 22U;
}
static inline u32 ccsr_channel_pbdma_faulted_reset_f(void)
{
return 0x400000U;
}
static inline u32 ccsr_channel_eng_faulted_f(u32 v)
{
return (v & 0x1U) << 23U;
}
static inline u32 ccsr_channel_eng_faulted_v(u32 r)
{
return (r >> 23U) & 0x1U;
}
static inline u32 ccsr_channel_eng_faulted_reset_f(void)
{
return 0x800000U;
}
static inline u32 ccsr_channel_eng_faulted_true_v(void)
{
return 0x00000001U;
}
static inline u32 ccsr_channel_busy_v(u32 r)
{
return (r >> 28U) & 0x1U;
}
#endif

View File

@@ -0,0 +1,107 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* Function naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
*
* <x>_o(void) : Returns the offset for element <x>.
*
* <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
*
* <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
*
* <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
* and masked to place it at field <y> of register <x>. This value
* can be |'d with others to produce a full register value for
* register <x>.
*
* <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
* value can be ~'d and then &'d to clear the value of field <y> for
* register <x>.
*
* <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
* to place it at field <y> of register <x>. This value can be |'d
* with others to produce a full register value for <x>.
*
* <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
* <x> value 'r' after being shifted to place its LSB at bit 0.
* This value is suitable for direct comparison with other unshifted
* values appropriate for use in field <y> of register <x>.
*
* <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
* field <y> of register <x>. This value is suitable for direct
* comparison with unshifted values appropriate for use in field <y>
* of register <x>.
*/
#ifndef _hw_ce_tu104_h_
#define _hw_ce_tu104_h_
static inline u32 ce_intr_status_r(u32 i)
{
return 0x00104410U + i*128U;
}
static inline u32 ce_intr_status_blockpipe_pending_f(void)
{
return 0x1U;
}
static inline u32 ce_intr_status_blockpipe_reset_f(void)
{
return 0x1U;
}
static inline u32 ce_intr_status_nonblockpipe_pending_f(void)
{
return 0x2U;
}
static inline u32 ce_intr_status_nonblockpipe_reset_f(void)
{
return 0x2U;
}
static inline u32 ce_intr_status_launcherr_pending_f(void)
{
return 0x4U;
}
static inline u32 ce_intr_status_launcherr_reset_f(void)
{
return 0x4U;
}
static inline u32 ce_intr_status_invalid_config_pending_f(void)
{
return 0x8U;
}
static inline u32 ce_intr_status_invalid_config_reset_f(void)
{
return 0x8U;
}
static inline u32 ce_intr_status_mthd_buffer_fault_pending_f(void)
{
return 0x10U;
}
static inline u32 ce_intr_status_mthd_buffer_fault_reset_f(void)
{
return 0x10U;
}
static inline u32 ce_pce_map_r(void)
{
return 0x00104028U;
}
#endif

View File

@@ -0,0 +1,87 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* Function naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
*
* <x>_o(void) : Returns the offset for element <x>.
*
* <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
*
* <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
*
* <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
* and masked to place it at field <y> of register <x>. This value
* can be |'d with others to produce a full register value for
* register <x>.
*
* <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
* value can be ~'d and then &'d to clear the value of field <y> for
* register <x>.
*
* <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
* to place it at field <y> of register <x>. This value can be |'d
* with others to produce a full register value for <x>.
*
* <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
* <x> value 'r' after being shifted to place its LSB at bit 0.
* This value is suitable for direct comparison with other unshifted
* values appropriate for use in field <y> of register <x>.
*
* <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
* field <y> of register <x>. This value is suitable for direct
* comparison with unshifted values appropriate for use in field <y>
* of register <x>.
*/
#ifndef _hw_ctrl_tu104_h_
#define _hw_ctrl_tu104_h_
static inline u32 ctrl_doorbell_r(u32 i)
{
return 0x00b64000U + i*8U;
}
static inline u32 ctrl_doorbell_vector_f(u32 v)
{
return (v & 0xfffU) << 0U;
}
static inline u32 ctrl_doorbell_runlist_id_f(u32 v)
{
return (v & 0x7fU) << 16U;
}
static inline u32 ctrl_virtual_channel_cfg_r(u32 i)
{
return 0x00b65000U + i*4U;
}
static inline u32 ctrl_virtual_channel_cfg_pending_enable_true_f(void)
{
return 0x80000000U;
}
static inline u32 ctrl_legacy_engine_nonstall_intr_base_vectorid_r(void)
{
return 0x00b66884U;
}
static inline u32 ctrl_legacy_engine_nonstall_intr_base_vectorid_vector_v(u32 r)
{
return (r >> 0U) & 0xfffU;
}
#endif

View File

@@ -0,0 +1,443 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* Function naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
*
* <x>_o(void) : Returns the offset for element <x>.
*
* <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
*
* <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
*
* <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
* and masked to place it at field <y> of register <x>. This value
* can be |'d with others to produce a full register value for
* register <x>.
*
* <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
* value can be ~'d and then &'d to clear the value of field <y> for
* register <x>.
*
* <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
* to place it at field <y> of register <x>. This value can be |'d
* with others to produce a full register value for <x>.
*
* <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
* <x> value 'r' after being shifted to place its LSB at bit 0.
* This value is suitable for direct comparison with other unshifted
* values appropriate for use in field <y> of register <x>.
*
* <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
* field <y> of register <x>. This value is suitable for direct
* comparison with unshifted values appropriate for use in field <y>
* of register <x>.
*/
#ifndef _hw_ctxsw_prog_tu104_h_
#define _hw_ctxsw_prog_tu104_h_
static inline u32 ctxsw_prog_fecs_header_v(void)
{
return 0x00000100U;
}
static inline u32 ctxsw_prog_main_image_num_gpcs_o(void)
{
return 0x00000008U;
}
static inline u32 ctxsw_prog_main_image_ctl_o(void)
{
return 0x0000000cU;
}
static inline u32 ctxsw_prog_main_image_ctl_type_f(u32 v)
{
return (v & 0x3fU) << 0U;
}
static inline u32 ctxsw_prog_main_image_ctl_type_undefined_v(void)
{
return 0x00000000U;
}
static inline u32 ctxsw_prog_main_image_ctl_type_opengl_v(void)
{
return 0x00000008U;
}
static inline u32 ctxsw_prog_main_image_ctl_type_dx9_v(void)
{
return 0x00000010U;
}
static inline u32 ctxsw_prog_main_image_ctl_type_dx10_v(void)
{
return 0x00000011U;
}
static inline u32 ctxsw_prog_main_image_ctl_type_dx11_v(void)
{
return 0x00000012U;
}
static inline u32 ctxsw_prog_main_image_ctl_type_compute_v(void)
{
return 0x00000020U;
}
static inline u32 ctxsw_prog_main_image_ctl_type_per_veid_header_v(void)
{
return 0x00000021U;
}
static inline u32 ctxsw_prog_main_image_patch_count_o(void)
{
return 0x00000010U;
}
static inline u32 ctxsw_prog_main_image_context_id_o(void)
{
return 0x000000f0U;
}
static inline u32 ctxsw_prog_main_image_patch_adr_lo_o(void)
{
return 0x00000014U;
}
static inline u32 ctxsw_prog_main_image_patch_adr_hi_o(void)
{
return 0x00000018U;
}
static inline u32 ctxsw_prog_main_image_zcull_o(void)
{
return 0x0000001cU;
}
static inline u32 ctxsw_prog_main_image_zcull_mode_no_ctxsw_v(void)
{
return 0x00000001U;
}
static inline u32 ctxsw_prog_main_image_zcull_mode_separate_buffer_v(void)
{
return 0x00000002U;
}
static inline u32 ctxsw_prog_main_image_zcull_ptr_o(void)
{
return 0x00000020U;
}
static inline u32 ctxsw_prog_main_image_pm_o(void)
{
return 0x00000028U;
}
static inline u32 ctxsw_prog_main_image_pm_mode_m(void)
{
return 0x7U << 0U;
}
static inline u32 ctxsw_prog_main_image_pm_mode_no_ctxsw_f(void)
{
return 0x0U;
}
static inline u32 ctxsw_prog_main_image_pm_mode_stream_out_ctxsw_f(void)
{
return 0x2U;
}
static inline u32 ctxsw_prog_main_image_pm_smpc_mode_m(void)
{
return 0x7U << 3U;
}
static inline u32 ctxsw_prog_main_image_pm_smpc_mode_ctxsw_f(void)
{
return 0x8U;
}
static inline u32 ctxsw_prog_main_image_pm_smpc_mode_no_ctxsw_f(void)
{
return 0x0U;
}
static inline u32 ctxsw_prog_main_image_pm_ptr_o(void)
{
return 0x0000002cU;
}
static inline u32 ctxsw_prog_main_image_num_save_ops_o(void)
{
return 0x000000f4U;
}
static inline u32 ctxsw_prog_main_image_num_wfi_save_ops_o(void)
{
return 0x000000d0U;
}
static inline u32 ctxsw_prog_main_image_num_cta_save_ops_o(void)
{
return 0x000000d4U;
}
static inline u32 ctxsw_prog_main_image_num_gfxp_save_ops_o(void)
{
return 0x000000d8U;
}
static inline u32 ctxsw_prog_main_image_num_cilp_save_ops_o(void)
{
return 0x000000dcU;
}
static inline u32 ctxsw_prog_main_image_num_restore_ops_o(void)
{
return 0x000000f8U;
}
static inline u32 ctxsw_prog_main_image_zcull_ptr_hi_o(void)
{
return 0x00000060U;
}
static inline u32 ctxsw_prog_main_image_zcull_ptr_hi_v_f(u32 v)
{
return (v & 0x1ffffU) << 0U;
}
static inline u32 ctxsw_prog_main_image_pm_ptr_hi_o(void)
{
return 0x00000094U;
}
static inline u32 ctxsw_prog_main_image_full_preemption_ptr_hi_o(void)
{
return 0x00000064U;
}
static inline u32 ctxsw_prog_main_image_full_preemption_ptr_hi_v_f(u32 v)
{
return (v & 0x1ffffU) << 0U;
}
static inline u32 ctxsw_prog_main_image_full_preemption_ptr_o(void)
{
return 0x00000068U;
}
static inline u32 ctxsw_prog_main_image_full_preemption_ptr_v_f(u32 v)
{
return (v & 0xffffffffU) << 0U;
}
static inline u32 ctxsw_prog_main_image_full_preemption_ptr_veid0_hi_o(void)
{
return 0x00000070U;
}
static inline u32 ctxsw_prog_main_image_full_preemption_ptr_veid0_hi_v_f(u32 v)
{
return (v & 0x1ffffU) << 0U;
}
static inline u32 ctxsw_prog_main_image_full_preemption_ptr_veid0_o(void)
{
return 0x00000074U;
}
static inline u32 ctxsw_prog_main_image_full_preemption_ptr_veid0_v_f(u32 v)
{
return (v & 0xffffffffU) << 0U;
}
static inline u32 ctxsw_prog_main_image_context_buffer_ptr_hi_o(void)
{
return 0x00000078U;
}
static inline u32 ctxsw_prog_main_image_context_buffer_ptr_hi_v_f(u32 v)
{
return (v & 0x1ffffU) << 0U;
}
static inline u32 ctxsw_prog_main_image_context_buffer_ptr_o(void)
{
return 0x0000007cU;
}
static inline u32 ctxsw_prog_main_image_context_buffer_ptr_v_f(u32 v)
{
return (v & 0xffffffffU) << 0U;
}
static inline u32 ctxsw_prog_main_image_magic_value_o(void)
{
return 0x000000fcU;
}
static inline u32 ctxsw_prog_main_image_magic_value_v_value_v(void)
{
return 0x600dc0deU;
}
static inline u32 ctxsw_prog_local_priv_register_ctl_o(void)
{
return 0x0000000cU;
}
static inline u32 ctxsw_prog_local_priv_register_ctl_offset_v(u32 r)
{
return (r >> 0U) & 0xffffU;
}
static inline u32 ctxsw_prog_main_image_global_cb_ptr_o(void)
{
return 0x000000b8U;
}
static inline u32 ctxsw_prog_main_image_global_cb_ptr_v_f(u32 v)
{
return (v & 0xffffffffU) << 0U;
}
static inline u32 ctxsw_prog_main_image_global_cb_ptr_hi_o(void)
{
return 0x000000bcU;
}
static inline u32 ctxsw_prog_main_image_global_cb_ptr_hi_v_f(u32 v)
{
return (v & 0x1ffffU) << 0U;
}
static inline u32 ctxsw_prog_main_image_global_pagepool_ptr_o(void)
{
return 0x000000c0U;
}
static inline u32 ctxsw_prog_main_image_global_pagepool_ptr_v_f(u32 v)
{
return (v & 0xffffffffU) << 0U;
}
static inline u32 ctxsw_prog_main_image_global_pagepool_ptr_hi_o(void)
{
return 0x000000c4U;
}
static inline u32 ctxsw_prog_main_image_global_pagepool_ptr_hi_v_f(u32 v)
{
return (v & 0x1ffffU) << 0U;
}
static inline u32 ctxsw_prog_main_image_control_block_ptr_o(void)
{
return 0x000000c8U;
}
static inline u32 ctxsw_prog_main_image_control_block_ptr_v_f(u32 v)
{
return (v & 0xffffffffU) << 0U;
}
static inline u32 ctxsw_prog_main_image_control_block_ptr_hi_o(void)
{
return 0x000000ccU;
}
static inline u32 ctxsw_prog_main_image_control_block_ptr_hi_v_f(u32 v)
{
return (v & 0x1ffffU) << 0U;
}
static inline u32 ctxsw_prog_local_image_ppc_info_o(void)
{
return 0x000000f4U;
}
static inline u32 ctxsw_prog_local_image_ppc_info_num_ppcs_v(u32 r)
{
return (r >> 0U) & 0xffffU;
}
static inline u32 ctxsw_prog_local_image_ppc_info_ppc_mask_v(u32 r)
{
return (r >> 16U) & 0xffffU;
}
static inline u32 ctxsw_prog_local_image_num_tpcs_o(void)
{
return 0x000000f8U;
}
static inline u32 ctxsw_prog_local_magic_value_o(void)
{
return 0x000000fcU;
}
static inline u32 ctxsw_prog_local_magic_value_v_value_v(void)
{
return 0xad0becabU;
}
static inline u32 ctxsw_prog_main_extended_buffer_ctl_o(void)
{
return 0x000000ecU;
}
static inline u32 ctxsw_prog_main_extended_buffer_ctl_offset_v(u32 r)
{
return (r >> 0U) & 0xffffU;
}
static inline u32 ctxsw_prog_main_extended_buffer_ctl_size_v(u32 r)
{
return (r >> 16U) & 0xffU;
}
static inline u32 ctxsw_prog_extended_buffer_segments_size_in_bytes_v(void)
{
return 0x00000100U;
}
static inline u32 ctxsw_prog_extended_marker_size_in_bytes_v(void)
{
return 0x00000004U;
}
static inline u32 ctxsw_prog_extended_sm_dsm_perf_counter_register_stride_v(void)
{
return 0x00000000U;
}
static inline u32 ctxsw_prog_extended_sm_dsm_perf_counter_control_register_stride_v(void)
{
return 0x00000002U;
}
static inline u32 ctxsw_prog_main_image_priv_access_map_config_o(void)
{
return 0x000000a0U;
}
static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_s(void)
{
return 2U;
}
static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_f(u32 v)
{
return (v & 0x3U) << 0U;
}
static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_m(void)
{
return 0x3U << 0U;
}
static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_v(u32 r)
{
return (r >> 0U) & 0x3U;
}
static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_allow_all_f(void)
{
return 0x0U;
}
static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_use_map_f(void)
{
return 0x2U;
}
static inline u32 ctxsw_prog_main_image_priv_access_map_addr_lo_o(void)
{
return 0x000000a4U;
}
static inline u32 ctxsw_prog_main_image_priv_access_map_addr_hi_o(void)
{
return 0x000000a8U;
}
static inline u32 ctxsw_prog_main_image_misc_options_o(void)
{
return 0x0000003cU;
}
static inline u32 ctxsw_prog_main_image_misc_options_verif_features_m(void)
{
return 0x1U << 3U;
}
static inline u32 ctxsw_prog_main_image_misc_options_verif_features_disabled_f(void)
{
return 0x0U;
}
static inline u32 ctxsw_prog_main_image_graphics_preemption_options_o(void)
{
return 0x00000080U;
}
static inline u32 ctxsw_prog_main_image_graphics_preemption_options_control_f(u32 v)
{
return (v & 0x3U) << 0U;
}
static inline u32 ctxsw_prog_main_image_graphics_preemption_options_control_gfxp_f(void)
{
return 0x1U;
}
static inline u32 ctxsw_prog_main_image_compute_preemption_options_o(void)
{
return 0x00000084U;
}
static inline u32 ctxsw_prog_main_image_compute_preemption_options_control_f(u32 v)
{
return (v & 0x3U) << 0U;
}
static inline u32 ctxsw_prog_main_image_compute_preemption_options_control_cta_f(void)
{
return 0x1U;
}
static inline u32 ctxsw_prog_main_image_compute_preemption_options_control_cilp_f(void)
{
return 0x2U;
}
#endif

View File

@@ -0,0 +1,603 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* Function naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
*
* <x>_o(void) : Returns the offset for element <x>.
*
* <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
*
* <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
*
* <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
* and masked to place it at field <y> of register <x>. This value
* can be |'d with others to produce a full register value for
* register <x>.
*
* <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
* value can be ~'d and then &'d to clear the value of field <y> for
* register <x>.
*
* <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
* to place it at field <y> of register <x>. This value can be |'d
* with others to produce a full register value for <x>.
*
* <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
* <x> value 'r' after being shifted to place its LSB at bit 0.
* This value is suitable for direct comparison with other unshifted
* values appropriate for use in field <y> of register <x>.
*
* <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
* field <y> of register <x>. This value is suitable for direct
* comparison with unshifted values appropriate for use in field <y>
* of register <x>.
*/
#ifndef _hw_falcon_tu104_h_
#define _hw_falcon_tu104_h_
static inline u32 falcon_falcon_irqsset_r(void)
{
return 0x00000000U;
}
static inline u32 falcon_falcon_irqsset_swgen0_set_f(void)
{
return 0x40U;
}
static inline u32 falcon_falcon_irqsclr_r(void)
{
return 0x00000004U;
}
static inline u32 falcon_falcon_irqstat_r(void)
{
return 0x00000008U;
}
static inline u32 falcon_falcon_irqstat_halt_true_f(void)
{
return 0x10U;
}
static inline u32 falcon_falcon_irqstat_exterr_true_f(void)
{
return 0x20U;
}
static inline u32 falcon_falcon_irqstat_swgen0_true_f(void)
{
return 0x40U;
}
static inline u32 falcon_falcon_irqmode_r(void)
{
return 0x0000000cU;
}
static inline u32 falcon_falcon_irqmset_r(void)
{
return 0x00000010U;
}
static inline u32 falcon_falcon_irqmset_gptmr_f(u32 v)
{
return (v & 0x1U) << 0U;
}
static inline u32 falcon_falcon_irqmset_wdtmr_f(u32 v)
{
return (v & 0x1U) << 1U;
}
static inline u32 falcon_falcon_irqmset_mthd_f(u32 v)
{
return (v & 0x1U) << 2U;
}
static inline u32 falcon_falcon_irqmset_ctxsw_f(u32 v)
{
return (v & 0x1U) << 3U;
}
static inline u32 falcon_falcon_irqmset_halt_f(u32 v)
{
return (v & 0x1U) << 4U;
}
static inline u32 falcon_falcon_irqmset_exterr_f(u32 v)
{
return (v & 0x1U) << 5U;
}
static inline u32 falcon_falcon_irqmset_swgen0_f(u32 v)
{
return (v & 0x1U) << 6U;
}
static inline u32 falcon_falcon_irqmset_swgen1_f(u32 v)
{
return (v & 0x1U) << 7U;
}
static inline u32 falcon_falcon_irqmclr_r(void)
{
return 0x00000014U;
}
static inline u32 falcon_falcon_irqmclr_gptmr_f(u32 v)
{
return (v & 0x1U) << 0U;
}
static inline u32 falcon_falcon_irqmclr_wdtmr_f(u32 v)
{
return (v & 0x1U) << 1U;
}
static inline u32 falcon_falcon_irqmclr_mthd_f(u32 v)
{
return (v & 0x1U) << 2U;
}
static inline u32 falcon_falcon_irqmclr_ctxsw_f(u32 v)
{
return (v & 0x1U) << 3U;
}
static inline u32 falcon_falcon_irqmclr_halt_f(u32 v)
{
return (v & 0x1U) << 4U;
}
static inline u32 falcon_falcon_irqmclr_exterr_f(u32 v)
{
return (v & 0x1U) << 5U;
}
static inline u32 falcon_falcon_irqmclr_swgen0_f(u32 v)
{
return (v & 0x1U) << 6U;
}
static inline u32 falcon_falcon_irqmclr_swgen1_f(u32 v)
{
return (v & 0x1U) << 7U;
}
static inline u32 falcon_falcon_irqmclr_ext_f(u32 v)
{
return (v & 0xffU) << 8U;
}
static inline u32 falcon_falcon_irqmask_r(void)
{
return 0x00000018U;
}
static inline u32 falcon_falcon_irqdest_r(void)
{
return 0x0000001cU;
}
static inline u32 falcon_falcon_irqdest_host_gptmr_f(u32 v)
{
return (v & 0x1U) << 0U;
}
static inline u32 falcon_falcon_irqdest_host_wdtmr_f(u32 v)
{
return (v & 0x1U) << 1U;
}
static inline u32 falcon_falcon_irqdest_host_mthd_f(u32 v)
{
return (v & 0x1U) << 2U;
}
static inline u32 falcon_falcon_irqdest_host_ctxsw_f(u32 v)
{
return (v & 0x1U) << 3U;
}
static inline u32 falcon_falcon_irqdest_host_halt_f(u32 v)
{
return (v & 0x1U) << 4U;
}
static inline u32 falcon_falcon_irqdest_host_exterr_f(u32 v)
{
return (v & 0x1U) << 5U;
}
static inline u32 falcon_falcon_irqdest_host_swgen0_f(u32 v)
{
return (v & 0x1U) << 6U;
}
static inline u32 falcon_falcon_irqdest_host_swgen1_f(u32 v)
{
return (v & 0x1U) << 7U;
}
static inline u32 falcon_falcon_irqdest_host_ext_f(u32 v)
{
return (v & 0xffU) << 8U;
}
static inline u32 falcon_falcon_irqdest_target_gptmr_f(u32 v)
{
return (v & 0x1U) << 16U;
}
static inline u32 falcon_falcon_irqdest_target_wdtmr_f(u32 v)
{
return (v & 0x1U) << 17U;
}
static inline u32 falcon_falcon_irqdest_target_mthd_f(u32 v)
{
return (v & 0x1U) << 18U;
}
static inline u32 falcon_falcon_irqdest_target_ctxsw_f(u32 v)
{
return (v & 0x1U) << 19U;
}
static inline u32 falcon_falcon_irqdest_target_halt_f(u32 v)
{
return (v & 0x1U) << 20U;
}
static inline u32 falcon_falcon_irqdest_target_exterr_f(u32 v)
{
return (v & 0x1U) << 21U;
}
static inline u32 falcon_falcon_irqdest_target_swgen0_f(u32 v)
{
return (v & 0x1U) << 22U;
}
static inline u32 falcon_falcon_irqdest_target_swgen1_f(u32 v)
{
return (v & 0x1U) << 23U;
}
static inline u32 falcon_falcon_irqdest_target_ext_f(u32 v)
{
return (v & 0xffU) << 24U;
}
static inline u32 falcon_falcon_curctx_r(void)
{
return 0x00000050U;
}
static inline u32 falcon_falcon_nxtctx_r(void)
{
return 0x00000054U;
}
static inline u32 falcon_falcon_mailbox0_r(void)
{
return 0x00000040U;
}
static inline u32 falcon_falcon_mailbox1_r(void)
{
return 0x00000044U;
}
static inline u32 falcon_falcon_itfen_r(void)
{
return 0x00000048U;
}
static inline u32 falcon_falcon_itfen_ctxen_enable_f(void)
{
return 0x1U;
}
static inline u32 falcon_falcon_idlestate_r(void)
{
return 0x0000004cU;
}
static inline u32 falcon_falcon_idlestate_falcon_busy_v(u32 r)
{
return (r >> 0U) & 0x1U;
}
static inline u32 falcon_falcon_idlestate_ext_busy_v(u32 r)
{
return (r >> 1U) & 0x7fffU;
}
static inline u32 falcon_falcon_os_r(void)
{
return 0x00000080U;
}
static inline u32 falcon_falcon_engctl_r(void)
{
return 0x000000a4U;
}
static inline u32 falcon_falcon_cpuctl_r(void)
{
return 0x00000100U;
}
static inline u32 falcon_falcon_cpuctl_startcpu_f(u32 v)
{
return (v & 0x1U) << 1U;
}
static inline u32 falcon_falcon_cpuctl_sreset_f(u32 v)
{
return (v & 0x1U) << 2U;
}
static inline u32 falcon_falcon_cpuctl_hreset_f(u32 v)
{
return (v & 0x1U) << 3U;
}
static inline u32 falcon_falcon_cpuctl_halt_intr_f(u32 v)
{
return (v & 0x1U) << 4U;
}
static inline u32 falcon_falcon_cpuctl_halt_intr_m(void)
{
return 0x1U << 4U;
}
static inline u32 falcon_falcon_cpuctl_halt_intr_v(u32 r)
{
return (r >> 4U) & 0x1U;
}
static inline u32 falcon_falcon_cpuctl_stopped_m(void)
{
return 0x1U << 5U;
}
static inline u32 falcon_falcon_cpuctl_cpuctl_alias_en_f(u32 v)
{
return (v & 0x1U) << 6U;
}
static inline u32 falcon_falcon_cpuctl_cpuctl_alias_en_m(void)
{
return 0x1U << 6U;
}
static inline u32 falcon_falcon_cpuctl_cpuctl_alias_en_v(u32 r)
{
return (r >> 6U) & 0x1U;
}
static inline u32 falcon_falcon_cpuctl_alias_r(void)
{
return 0x00000130U;
}
static inline u32 falcon_falcon_cpuctl_alias_startcpu_f(u32 v)
{
return (v & 0x1U) << 1U;
}
static inline u32 falcon_falcon_imemc_r(u32 i)
{
return 0x00000180U + i*16U;
}
static inline u32 falcon_falcon_imemc_offs_f(u32 v)
{
return (v & 0x3fU) << 2U;
}
static inline u32 falcon_falcon_imemc_blk_f(u32 v)
{
return (v & 0xffU) << 8U;
}
static inline u32 falcon_falcon_imemc_aincw_f(u32 v)
{
return (v & 0x1U) << 24U;
}
static inline u32 falcon_falcon_imemc_secure_f(u32 v)
{
return (v & 0x1U) << 28U;
}
static inline u32 falcon_falcon_imemd_r(u32 i)
{
return 0x00000184U + i*16U;
}
static inline u32 falcon_falcon_imemt_r(u32 i)
{
return 0x00000188U + i*16U;
}
static inline u32 falcon_falcon_sctl_r(void)
{
return 0x00000240U;
}
static inline u32 falcon_falcon_mmu_phys_sec_r(void)
{
return 0x00100ce4U;
}
static inline u32 falcon_falcon_bootvec_r(void)
{
return 0x00000104U;
}
static inline u32 falcon_falcon_bootvec_vec_f(u32 v)
{
return (v & 0xffffffffU) << 0U;
}
static inline u32 falcon_falcon_dmactl_r(void)
{
return 0x0000010cU;
}
static inline u32 falcon_falcon_dmactl_dmem_scrubbing_m(void)
{
return 0x1U << 1U;
}
static inline u32 falcon_falcon_dmactl_imem_scrubbing_m(void)
{
return 0x1U << 2U;
}
static inline u32 falcon_falcon_dmactl_require_ctx_f(u32 v)
{
return (v & 0x1U) << 0U;
}
static inline u32 falcon_falcon_hwcfg_r(void)
{
return 0x00000108U;
}
static inline u32 falcon_falcon_hwcfg_imem_size_v(u32 r)
{
return (r >> 0U) & 0x1ffU;
}
static inline u32 falcon_falcon_hwcfg_dmem_size_v(u32 r)
{
return (r >> 9U) & 0x1ffU;
}
static inline u32 falcon_falcon_dmatrfbase_r(void)
{
return 0x00000110U;
}
static inline u32 falcon_falcon_dmatrfbase1_r(void)
{
return 0x00000128U;
}
static inline u32 falcon_falcon_dmatrfmoffs_r(void)
{
return 0x00000114U;
}
static inline u32 falcon_falcon_dmatrfcmd_r(void)
{
return 0x00000118U;
}
static inline u32 falcon_falcon_dmatrfcmd_imem_f(u32 v)
{
return (v & 0x1U) << 4U;
}
static inline u32 falcon_falcon_dmatrfcmd_write_f(u32 v)
{
return (v & 0x1U) << 5U;
}
static inline u32 falcon_falcon_dmatrfcmd_size_f(u32 v)
{
return (v & 0x7U) << 8U;
}
static inline u32 falcon_falcon_dmatrfcmd_ctxdma_f(u32 v)
{
return (v & 0x7U) << 12U;
}
static inline u32 falcon_falcon_dmatrffboffs_r(void)
{
return 0x0000011cU;
}
static inline u32 falcon_falcon_imctl_debug_r(void)
{
return 0x0000015cU;
}
static inline u32 falcon_falcon_imctl_debug_addr_blk_f(u32 v)
{
return (v & 0xffffffU) << 0U;
}
static inline u32 falcon_falcon_imctl_debug_cmd_f(u32 v)
{
return (v & 0x7U) << 24U;
}
static inline u32 falcon_falcon_imstat_r(void)
{
return 0x00000144U;
}
static inline u32 falcon_falcon_traceidx_r(void)
{
return 0x00000148U;
}
static inline u32 falcon_falcon_traceidx_maxidx_v(u32 r)
{
return (r >> 16U) & 0xffU;
}
static inline u32 falcon_falcon_traceidx_idx_f(u32 v)
{
return (v & 0xffU) << 0U;
}
static inline u32 falcon_falcon_tracepc_r(void)
{
return 0x0000014cU;
}
static inline u32 falcon_falcon_tracepc_pc_v(u32 r)
{
return (r >> 0U) & 0xffffffU;
}
static inline u32 falcon_falcon_exterraddr_r(void)
{
return 0x00000168U;
}
static inline u32 falcon_falcon_exterrstat_r(void)
{
return 0x0000016cU;
}
static inline u32 falcon_falcon_exterrstat_valid_m(void)
{
return 0x1U << 31U;
}
static inline u32 falcon_falcon_exterrstat_valid_v(u32 r)
{
return (r >> 31U) & 0x1U;
}
static inline u32 falcon_falcon_exterrstat_valid_true_v(void)
{
return 0x00000001U;
}
static inline u32 falcon_falcon_icd_cmd_r(void)
{
return 0x00000200U;
}
static inline u32 falcon_falcon_icd_cmd_opc_s(void)
{
return 4U;
}
static inline u32 falcon_falcon_icd_cmd_opc_f(u32 v)
{
return (v & 0xfU) << 0U;
}
static inline u32 falcon_falcon_icd_cmd_opc_m(void)
{
return 0xfU << 0U;
}
static inline u32 falcon_falcon_icd_cmd_opc_v(u32 r)
{
return (r >> 0U) & 0xfU;
}
static inline u32 falcon_falcon_icd_cmd_opc_rreg_f(void)
{
return 0x8U;
}
static inline u32 falcon_falcon_icd_cmd_opc_rstat_f(void)
{
return 0xeU;
}
static inline u32 falcon_falcon_icd_cmd_idx_f(u32 v)
{
return (v & 0x1fU) << 8U;
}
static inline u32 falcon_falcon_icd_rdata_r(void)
{
return 0x0000020cU;
}
static inline u32 falcon_falcon_dmemc_r(u32 i)
{
return 0x000001c0U + i*8U;
}
static inline u32 falcon_falcon_dmemc_offs_f(u32 v)
{
return (v & 0x3fU) << 2U;
}
static inline u32 falcon_falcon_dmemc_offs_m(void)
{
return 0x3fU << 2U;
}
static inline u32 falcon_falcon_dmemc_blk_f(u32 v)
{
return (v & 0xffU) << 8U;
}
static inline u32 falcon_falcon_dmemc_blk_m(void)
{
return 0xffU << 8U;
}
static inline u32 falcon_falcon_dmemc_aincw_f(u32 v)
{
return (v & 0x1U) << 24U;
}
static inline u32 falcon_falcon_dmemc_aincr_f(u32 v)
{
return (v & 0x1U) << 25U;
}
static inline u32 falcon_falcon_dmemd_r(u32 i)
{
return 0x000001c4U + i*8U;
}
static inline u32 falcon_falcon_debug1_r(void)
{
return 0x00000090U;
}
static inline u32 falcon_falcon_debug1_ctxsw_mode_s(void)
{
return 1U;
}
static inline u32 falcon_falcon_debug1_ctxsw_mode_f(u32 v)
{
return (v & 0x1U) << 16U;
}
static inline u32 falcon_falcon_debug1_ctxsw_mode_m(void)
{
return 0x1U << 16U;
}
static inline u32 falcon_falcon_debug1_ctxsw_mode_v(u32 r)
{
return (r >> 16U) & 0x1U;
}
static inline u32 falcon_falcon_debug1_ctxsw_mode_init_f(void)
{
return 0x0U;
}
static inline u32 falcon_falcon_debuginfo_r(void)
{
return 0x00000094U;
}
#endif

View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,127 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* Function naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
*
* <x>_o(void) : Returns the offset for element <x>.
*
* <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
*
* <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
*
* <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
* and masked to place it at field <y> of register <x>. This value
* can be |'d with others to produce a full register value for
* register <x>.
*
* <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
* value can be ~'d and then &'d to clear the value of field <y> for
* register <x>.
*
* <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
* to place it at field <y> of register <x>. This value can be |'d
* with others to produce a full register value for <x>.
*
* <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
* <x> value 'r' after being shifted to place its LSB at bit 0.
* This value is suitable for direct comparison with other unshifted
* values appropriate for use in field <y> of register <x>.
*
* <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
* field <y> of register <x>. This value is suitable for direct
* comparison with unshifted values appropriate for use in field <y>
* of register <x>.
*/
#ifndef _hw_fbpa_tu104_h_
#define _hw_fbpa_tu104_h_
static inline u32 fbpa_0_intr_status_r(void)
{
return 0x00900398U;
}
static inline u32 fbpa_0_intr_status_sec_subp0_pending_f(void)
{
return 0x1U;
}
static inline u32 fbpa_0_intr_status_ded_subp0_pending_f(void)
{
return 0x2U;
}
static inline u32 fbpa_0_intr_status_sec_subp1_pending_f(void)
{
return 0x10000U;
}
static inline u32 fbpa_0_intr_status_ded_subp1_pending_f(void)
{
return 0x20000U;
}
static inline u32 fbpa_ecc_intr_ctrl_r(void)
{
return 0x009a0474U;
}
static inline u32 fbpa_ecc_intr_ctrl_sec_intr_en_f(u32 v)
{
return (v & 0x1U) << 0U;
}
static inline u32 fbpa_ecc_intr_ctrl_sec_intr_en_enabled_f(void)
{
return 0x1U;
}
static inline u32 fbpa_ecc_intr_ctrl_ded_intr_en_f(u32 v)
{
return (v & 0x1U) << 1U;
}
static inline u32 fbpa_ecc_intr_ctrl_ded_intr_en_enabled_f(void)
{
return 0x2U;
}
static inline u32 fbpa_0_ecc_status_r(u32 i)
{
return 0x00900478U + i*4U;
}
static inline u32 fbpa_0_ecc_status_sec_intr_pending_f(void)
{
return 0x2U;
}
static inline u32 fbpa_0_ecc_status_ded_intr_pending_f(void)
{
return 0x4U;
}
static inline u32 fbpa_0_ecc_status_sec_counter_overflow_pending_f(void)
{
return 0x20000U;
}
static inline u32 fbpa_0_ecc_status_ded_counter_overflow_pending_f(void)
{
return 0x40000U;
}
static inline u32 fbpa_0_ecc_sec_count_r(u32 i)
{
return 0x00900480U + i*4U;
}
static inline u32 fbpa_0_ecc_ded_count_r(u32 i)
{
return 0x00900488U + i*4U;
}
#endif

View File

@@ -0,0 +1,495 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* Function naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
*
* <x>_o(void) : Returns the offset for element <x>.
*
* <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
*
* <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
*
* <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
* and masked to place it at field <y> of register <x>. This value
* can be |'d with others to produce a full register value for
* register <x>.
*
* <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
* value can be ~'d and then &'d to clear the value of field <y> for
* register <x>.
*
* <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
* to place it at field <y> of register <x>. This value can be |'d
* with others to produce a full register value for <x>.
*
* <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
* <x> value 'r' after being shifted to place its LSB at bit 0.
* This value is suitable for direct comparison with other unshifted
* values appropriate for use in field <y> of register <x>.
*
* <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
* field <y> of register <x>. This value is suitable for direct
* comparison with unshifted values appropriate for use in field <y>
* of register <x>.
*/
#ifndef _hw_fifo_tu104_h_
#define _hw_fifo_tu104_h_
static inline u32 fifo_userd_writeback_r(void)
{
return 0x0000225cU;
}
static inline u32 fifo_userd_writeback_timer_f(u32 v)
{
return (v & 0xffU) << 0U;
}
static inline u32 fifo_userd_writeback_timer_disabled_v(void)
{
return 0x00000000U;
}
static inline u32 fifo_userd_writeback_timer_shorter_v(void)
{
return 0x00000003U;
}
static inline u32 fifo_userd_writeback_timer_100us_v(void)
{
return 0x00000064U;
}
static inline u32 fifo_userd_writeback_timescale_f(u32 v)
{
return (v & 0xfU) << 12U;
}
static inline u32 fifo_userd_writeback_timescale_0_v(void)
{
return 0x00000000U;
}
static inline u32 fifo_runlist_base_lo_r(u32 i)
{
return 0x00002b00U + i*16U;
}
static inline u32 fifo_runlist_base_lo__size_1_v(void)
{
return 0x0000000bU;
}
static inline u32 fifo_runlist_base_lo_ptr_align_shift_v(void)
{
return 0x0000000cU;
}
static inline u32 fifo_runlist_base_lo_ptr_lo_f(u32 v)
{
return (v & 0xfffffU) << 12U;
}
static inline u32 fifo_runlist_base_lo_target_vid_mem_f(void)
{
return 0x0U;
}
static inline u32 fifo_runlist_base_lo_target_sys_mem_coh_f(void)
{
return 0x2U;
}
static inline u32 fifo_runlist_base_lo_target_sys_mem_ncoh_f(void)
{
return 0x3U;
}
static inline u32 fifo_runlist_base_hi_r(u32 i)
{
return 0x00002b04U + i*16U;
}
static inline u32 fifo_runlist_base_hi_ptr_hi_f(u32 v)
{
return (v & 0xffU) << 0U;
}
static inline u32 fifo_runlist_submit_r(u32 i)
{
return 0x00002b08U + i*16U;
}
static inline u32 fifo_runlist_submit_length_f(u32 v)
{
return (v & 0xffffU) << 0U;
}
static inline u32 fifo_runlist_submit_info_r(u32 i)
{
return 0x00002b0cU + i*16U;
}
static inline u32 fifo_runlist_submit_info_pending_true_f(void)
{
return 0x8000U;
}
static inline u32 fifo_pbdma_map_r(u32 i)
{
return 0x00002390U + i*4U;
}
static inline u32 fifo_intr_0_r(void)
{
return 0x00002100U;
}
static inline u32 fifo_intr_0_bind_error_pending_f(void)
{
return 0x1U;
}
static inline u32 fifo_intr_0_bind_error_reset_f(void)
{
return 0x1U;
}
static inline u32 fifo_intr_0_sched_error_pending_f(void)
{
return 0x100U;
}
static inline u32 fifo_intr_0_sched_error_reset_f(void)
{
return 0x100U;
}
static inline u32 fifo_intr_0_chsw_error_pending_f(void)
{
return 0x10000U;
}
static inline u32 fifo_intr_0_chsw_error_reset_f(void)
{
return 0x10000U;
}
static inline u32 fifo_intr_0_memop_timeout_pending_f(void)
{
return 0x800000U;
}
static inline u32 fifo_intr_0_memop_timeout_reset_f(void)
{
return 0x800000U;
}
static inline u32 fifo_intr_0_lb_error_pending_f(void)
{
return 0x1000000U;
}
static inline u32 fifo_intr_0_lb_error_reset_f(void)
{
return 0x1000000U;
}
static inline u32 fifo_intr_0_pbdma_intr_pending_f(void)
{
return 0x20000000U;
}
static inline u32 fifo_intr_0_runlist_event_pending_f(void)
{
return 0x40000000U;
}
static inline u32 fifo_intr_0_channel_intr_pending_f(void)
{
return 0x80000000U;
}
static inline u32 fifo_intr_0_ctxsw_timeout_pending_f(void)
{
return 0x2U;
}
static inline u32 fifo_intr_en_0_r(void)
{
return 0x00002140U;
}
static inline u32 fifo_intr_en_0_sched_error_f(u32 v)
{
return (v & 0x1U) << 8U;
}
static inline u32 fifo_intr_en_0_sched_error_m(void)
{
return 0x1U << 8U;
}
static inline u32 fifo_intr_en_1_r(void)
{
return 0x00002528U;
}
static inline u32 fifo_intr_bind_error_r(void)
{
return 0x0000252cU;
}
static inline u32 fifo_intr_sched_error_r(void)
{
return 0x0000254cU;
}
static inline u32 fifo_intr_sched_error_code_f(u32 v)
{
return (v & 0xffU) << 0U;
}
static inline u32 fifo_intr_chsw_error_r(void)
{
return 0x0000256cU;
}
static inline u32 fifo_intr_pbdma_id_r(void)
{
return 0x000025a0U;
}
static inline u32 fifo_intr_pbdma_id_status_f(u32 v, u32 i)
{
return (v & 0x1U) << (0U + i*1U);
}
static inline u32 fifo_intr_pbdma_id_status_v(u32 r, u32 i)
{
return (r >> (0U + i*1U)) & 0x1U;
}
static inline u32 fifo_intr_pbdma_id_status__size_1_v(void)
{
return 0x0000000cU;
}
static inline u32 fifo_intr_runlist_r(void)
{
return 0x00002a00U;
}
static inline u32 fifo_fb_timeout_r(void)
{
return 0x00002a04U;
}
static inline u32 fifo_fb_timeout_period_m(void)
{
return 0x3fffffffU << 0U;
}
static inline u32 fifo_fb_timeout_period_max_f(void)
{
return 0x3fffffffU;
}
static inline u32 fifo_fb_timeout_period_init_f(void)
{
return 0x3c00U;
}
static inline u32 fifo_sched_disable_r(void)
{
return 0x00002630U;
}
static inline u32 fifo_sched_disable_runlist_f(u32 v, u32 i)
{
return (v & 0x1U) << (0U + i*1U);
}
static inline u32 fifo_sched_disable_runlist_m(u32 i)
{
return 0x1U << (0U + i*1U);
}
static inline u32 fifo_sched_disable_true_v(void)
{
return 0x00000001U;
}
static inline u32 fifo_runlist_preempt_r(void)
{
return 0x00002638U;
}
static inline u32 fifo_runlist_preempt_runlist_f(u32 v, u32 i)
{
return (v & 0x1U) << (0U + i*1U);
}
static inline u32 fifo_runlist_preempt_runlist_m(u32 i)
{
return 0x1U << (0U + i*1U);
}
static inline u32 fifo_runlist_preempt_runlist_pending_v(void)
{
return 0x00000001U;
}
static inline u32 fifo_preempt_r(void)
{
return 0x00002634U;
}
static inline u32 fifo_preempt_pending_true_f(void)
{
return 0x100000U;
}
static inline u32 fifo_preempt_type_channel_f(void)
{
return 0x0U;
}
static inline u32 fifo_preempt_type_tsg_f(void)
{
return 0x1000000U;
}
static inline u32 fifo_preempt_chid_f(u32 v)
{
return (v & 0xfffU) << 0U;
}
static inline u32 fifo_preempt_id_f(u32 v)
{
return (v & 0xfffU) << 0U;
}
static inline u32 fifo_engine_status_r(u32 i)
{
return 0x00002640U + i*8U;
}
static inline u32 fifo_engine_status__size_1_v(void)
{
return 0x0000000dU;
}
static inline u32 fifo_engine_status_id_v(u32 r)
{
return (r >> 0U) & 0xfffU;
}
static inline u32 fifo_engine_status_id_type_v(u32 r)
{
return (r >> 12U) & 0x1U;
}
static inline u32 fifo_engine_status_id_type_chid_v(void)
{
return 0x00000000U;
}
static inline u32 fifo_engine_status_id_type_tsgid_v(void)
{
return 0x00000001U;
}
static inline u32 fifo_engine_status_ctx_status_v(u32 r)
{
return (r >> 13U) & 0x7U;
}
static inline u32 fifo_engine_status_ctx_status_valid_v(void)
{
return 0x00000001U;
}
static inline u32 fifo_engine_status_ctx_status_ctxsw_load_v(void)
{
return 0x00000005U;
}
static inline u32 fifo_engine_status_ctx_status_ctxsw_save_v(void)
{
return 0x00000006U;
}
static inline u32 fifo_engine_status_ctx_status_ctxsw_switch_v(void)
{
return 0x00000007U;
}
static inline u32 fifo_engine_status_next_id_v(u32 r)
{
return (r >> 16U) & 0xfffU;
}
static inline u32 fifo_engine_status_next_id_type_v(u32 r)
{
return (r >> 28U) & 0x1U;
}
static inline u32 fifo_engine_status_next_id_type_chid_v(void)
{
return 0x00000000U;
}
static inline u32 fifo_engine_status_eng_reload_v(u32 r)
{
return (r >> 29U) & 0x1U;
}
static inline u32 fifo_engine_status_faulted_v(u32 r)
{
return (r >> 30U) & 0x1U;
}
static inline u32 fifo_engine_status_faulted_true_v(void)
{
return 0x00000001U;
}
static inline u32 fifo_engine_status_engine_v(u32 r)
{
return (r >> 31U) & 0x1U;
}
static inline u32 fifo_engine_status_engine_idle_v(void)
{
return 0x00000000U;
}
static inline u32 fifo_engine_status_engine_busy_v(void)
{
return 0x00000001U;
}
static inline u32 fifo_engine_status_ctxsw_v(u32 r)
{
return (r >> 15U) & 0x1U;
}
static inline u32 fifo_engine_status_ctxsw_in_progress_v(void)
{
return 0x00000001U;
}
static inline u32 fifo_engine_status_ctxsw_in_progress_f(void)
{
return 0x8000U;
}
static inline u32 fifo_pbdma_status_r(u32 i)
{
return 0x00003080U + i*4U;
}
static inline u32 fifo_pbdma_status__size_1_v(void)
{
return 0x0000000cU;
}
static inline u32 fifo_pbdma_status_id_v(u32 r)
{
return (r >> 0U) & 0xfffU;
}
static inline u32 fifo_pbdma_status_id_type_v(u32 r)
{
return (r >> 12U) & 0x1U;
}
static inline u32 fifo_pbdma_status_id_type_chid_v(void)
{
return 0x00000000U;
}
static inline u32 fifo_pbdma_status_id_type_tsgid_v(void)
{
return 0x00000001U;
}
static inline u32 fifo_pbdma_status_chan_status_v(u32 r)
{
return (r >> 13U) & 0x7U;
}
static inline u32 fifo_pbdma_status_chan_status_valid_v(void)
{
return 0x00000001U;
}
static inline u32 fifo_pbdma_status_chan_status_chsw_load_v(void)
{
return 0x00000005U;
}
static inline u32 fifo_pbdma_status_chan_status_chsw_save_v(void)
{
return 0x00000006U;
}
static inline u32 fifo_pbdma_status_chan_status_chsw_switch_v(void)
{
return 0x00000007U;
}
static inline u32 fifo_pbdma_status_next_id_v(u32 r)
{
return (r >> 16U) & 0xfffU;
}
static inline u32 fifo_pbdma_status_next_id_type_v(u32 r)
{
return (r >> 28U) & 0x1U;
}
static inline u32 fifo_pbdma_status_next_id_type_chid_v(void)
{
return 0x00000000U;
}
static inline u32 fifo_pbdma_status_chsw_v(u32 r)
{
return (r >> 15U) & 0x1U;
}
static inline u32 fifo_pbdma_status_chsw_in_progress_v(void)
{
return 0x00000001U;
}
static inline u32 fifo_cfg0_r(void)
{
return 0x00002004U;
}
static inline u32 fifo_cfg0_num_pbdma_v(u32 r)
{
return (r >> 0U) & 0xffU;
}
static inline u32 fifo_cfg0_pbdma_fault_id_v(u32 r)
{
return (r >> 16U) & 0xffU;
}
#endif

View File

@@ -0,0 +1,187 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* Function naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
*
* <x>_o(void) : Returns the offset for element <x>.
*
* <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
*
* <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
*
* <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
* and masked to place it at field <y> of register <x>. This value
* can be |'d with others to produce a full register value for
* register <x>.
*
* <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
* value can be ~'d and then &'d to clear the value of field <y> for
* register <x>.
*
* <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
* to place it at field <y> of register <x>. This value can be |'d
* with others to produce a full register value for <x>.
*
* <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
* <x> value 'r' after being shifted to place its LSB at bit 0.
* This value is suitable for direct comparison with other unshifted
* values appropriate for use in field <y> of register <x>.
*
* <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
* field <y> of register <x>. This value is suitable for direct
* comparison with unshifted values appropriate for use in field <y>
* of register <x>.
*/
#ifndef _hw_flush_tu104_h_
#define _hw_flush_tu104_h_
static inline u32 flush_l2_system_invalidate_r(void)
{
return 0x00070004U;
}
static inline u32 flush_l2_system_invalidate_pending_v(u32 r)
{
return (r >> 0U) & 0x1U;
}
static inline u32 flush_l2_system_invalidate_pending_busy_v(void)
{
return 0x00000001U;
}
static inline u32 flush_l2_system_invalidate_pending_busy_f(void)
{
return 0x1U;
}
static inline u32 flush_l2_system_invalidate_outstanding_v(u32 r)
{
return (r >> 1U) & 0x1U;
}
static inline u32 flush_l2_system_invalidate_outstanding_true_v(void)
{
return 0x00000001U;
}
static inline u32 flush_l2_flush_dirty_r(void)
{
return 0x00070010U;
}
static inline u32 flush_l2_flush_dirty_pending_v(u32 r)
{
return (r >> 0U) & 0x1U;
}
static inline u32 flush_l2_flush_dirty_pending_empty_v(void)
{
return 0x00000000U;
}
static inline u32 flush_l2_flush_dirty_pending_empty_f(void)
{
return 0x0U;
}
static inline u32 flush_l2_flush_dirty_pending_busy_v(void)
{
return 0x00000001U;
}
static inline u32 flush_l2_flush_dirty_pending_busy_f(void)
{
return 0x1U;
}
static inline u32 flush_l2_flush_dirty_outstanding_v(u32 r)
{
return (r >> 1U) & 0x1U;
}
static inline u32 flush_l2_flush_dirty_outstanding_false_v(void)
{
return 0x00000000U;
}
static inline u32 flush_l2_flush_dirty_outstanding_false_f(void)
{
return 0x0U;
}
static inline u32 flush_l2_flush_dirty_outstanding_true_v(void)
{
return 0x00000001U;
}
static inline u32 flush_l2_clean_comptags_r(void)
{
return 0x0007000cU;
}
static inline u32 flush_l2_clean_comptags_pending_v(u32 r)
{
return (r >> 0U) & 0x1U;
}
static inline u32 flush_l2_clean_comptags_pending_empty_v(void)
{
return 0x00000000U;
}
static inline u32 flush_l2_clean_comptags_pending_empty_f(void)
{
return 0x0U;
}
static inline u32 flush_l2_clean_comptags_pending_busy_v(void)
{
return 0x00000001U;
}
static inline u32 flush_l2_clean_comptags_pending_busy_f(void)
{
return 0x1U;
}
static inline u32 flush_l2_clean_comptags_outstanding_v(u32 r)
{
return (r >> 1U) & 0x1U;
}
static inline u32 flush_l2_clean_comptags_outstanding_false_v(void)
{
return 0x00000000U;
}
static inline u32 flush_l2_clean_comptags_outstanding_false_f(void)
{
return 0x0U;
}
static inline u32 flush_l2_clean_comptags_outstanding_true_v(void)
{
return 0x00000001U;
}
static inline u32 flush_fb_flush_r(void)
{
return 0x00070000U;
}
static inline u32 flush_fb_flush_pending_v(u32 r)
{
return (r >> 0U) & 0x1U;
}
static inline u32 flush_fb_flush_pending_busy_v(void)
{
return 0x00000001U;
}
static inline u32 flush_fb_flush_pending_busy_f(void)
{
return 0x1U;
}
static inline u32 flush_fb_flush_outstanding_v(u32 r)
{
return (r >> 1U) & 0x1U;
}
static inline u32 flush_fb_flush_outstanding_true_v(void)
{
return 0x00000001U;
}
#endif

View File

@@ -0,0 +1,159 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* Function naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
*
* <x>_o(void) : Returns the offset for element <x>.
*
* <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
*
* <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
*
* <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
* and masked to place it at field <y> of register <x>. This value
* can be |'d with others to produce a full register value for
* register <x>.
*
* <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
* value can be ~'d and then &'d to clear the value of field <y> for
* register <x>.
*
* <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
* to place it at field <y> of register <x>. This value can be |'d
* with others to produce a full register value for <x>.
*
* <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
* <x> value 'r' after being shifted to place its LSB at bit 0.
* This value is suitable for direct comparison with other unshifted
* values appropriate for use in field <y> of register <x>.
*
* <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
* field <y> of register <x>. This value is suitable for direct
* comparison with unshifted values appropriate for use in field <y>
* of register <x>.
*/
#ifndef _hw_func_tu104_h_
#define _hw_func_tu104_h_
static inline u32 func_full_phys_offset_v(void)
{
return 0x00b80000U;
}
static inline u32 func_doorbell_r(void)
{
return 0x00030090U;
}
static inline u32 func_priv_cpu_intr_top_en_set_r(u32 i)
{
return 0x00001608U + i*4U;
}
static inline u32 func_priv_cpu_intr_top_en_clear_r(u32 i)
{
return 0x00001610U + i*4U;
}
static inline u32 func_priv_cpu_intr_top_en_clear__size_1_v(void)
{
return 0x00000001U;
}
static inline u32 func_priv_cpu_intr_leaf_en_set_r(u32 i)
{
return 0x00001200U + i*4U;
}
static inline u32 func_priv_cpu_intr_leaf_en_clear_r(u32 i)
{
return 0x00001400U + i*4U;
}
static inline u32 func_priv_cpu_intr_leaf_en_clear__size_1_v(void)
{
return 0x00000008U;
}
static inline u32 func_priv_cpu_intr_top_r(u32 i)
{
return 0x00001600U + i*4U;
}
static inline u32 func_priv_cpu_intr_leaf_r(u32 i)
{
return 0x00001000U + i*4U;
}
static inline u32 func_priv_mmu_fault_buffer_lo_r(u32 i)
{
return 0x00003000U + i*32U;
}
static inline u32 func_priv_mmu_fault_buffer_hi_r(u32 i)
{
return 0x00003004U + i*32U;
}
static inline u32 func_priv_mmu_fault_buffer_get_r(u32 i)
{
return 0x00003008U + i*32U;
}
static inline u32 func_priv_mmu_fault_buffer_put_r(u32 i)
{
return 0x0000300cU + i*32U;
}
static inline u32 func_priv_mmu_fault_buffer_size_r(u32 i)
{
return 0x00003010U + i*32U;
}
static inline u32 func_priv_mmu_fault_addr_lo_r(void)
{
return 0x00003080U;
}
static inline u32 func_priv_mmu_fault_addr_hi_r(void)
{
return 0x00003084U;
}
static inline u32 func_priv_mmu_fault_inst_lo_r(void)
{
return 0x00003088U;
}
static inline u32 func_priv_mmu_fault_inst_hi_r(void)
{
return 0x0000308cU;
}
static inline u32 func_priv_mmu_fault_info_r(void)
{
return 0x00003090U;
}
static inline u32 func_priv_mmu_fault_status_r(void)
{
return 0x00003094U;
}
static inline u32 func_priv_bar2_block_r(void)
{
return 0x00000f48U;
}
static inline u32 func_priv_bind_status_r(void)
{
return 0x00000f50U;
}
static inline u32 func_priv_mmu_invalidate_pdb_r(void)
{
return 0x000030a0U;
}
static inline u32 func_priv_mmu_invalidate_r(void)
{
return 0x000030b0U;
}
#endif

View File

@@ -0,0 +1,107 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* Function naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
*
* <x>_o(void) : Returns the offset for element <x>.
*
* <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
*
* <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
*
* <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
* and masked to place it at field <y> of register <x>. This value
* can be |'d with others to produce a full register value for
* register <x>.
*
* <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
* value can be ~'d and then &'d to clear the value of field <y> for
* register <x>.
*
* <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
* to place it at field <y> of register <x>. This value can be |'d
* with others to produce a full register value for <x>.
*
* <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
* <x> value 'r' after being shifted to place its LSB at bit 0.
* This value is suitable for direct comparison with other unshifted
* values appropriate for use in field <y> of register <x>.
*
* <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
* field <y> of register <x>. This value is suitable for direct
* comparison with unshifted values appropriate for use in field <y>
* of register <x>.
*/
#ifndef _hw_fuse_tu104_h_
#define _hw_fuse_tu104_h_
static inline u32 fuse_status_opt_gpc_r(void)
{
return 0x00021c1cU;
}
static inline u32 fuse_status_opt_tpc_gpc_r(u32 i)
{
return 0x00021c38U + i*4U;
}
static inline u32 fuse_ctrl_opt_tpc_gpc_r(u32 i)
{
return 0x00021838U + i*4U;
}
static inline u32 fuse_status_opt_fbio_r(void)
{
return 0x00021c14U;
}
static inline u32 fuse_status_opt_fbio_data_f(u32 v)
{
return (v & 0xffffU) << 0U;
}
static inline u32 fuse_status_opt_fbio_data_m(void)
{
return 0xffffU << 0U;
}
static inline u32 fuse_status_opt_fbio_data_v(u32 r)
{
return (r >> 0U) & 0xffffU;
}
static inline u32 fuse_status_opt_rop_l2_fbp_r(u32 i)
{
return 0x00021d70U + i*4U;
}
static inline u32 fuse_status_opt_fbp_r(void)
{
return 0x00021d38U;
}
static inline u32 fuse_status_opt_fbp_idx_v(u32 r, u32 i)
{
return (r >> (0U + i*1U)) & 0x1U;
}
static inline u32 fuse_opt_ecc_en_r(void)
{
return 0x00021228U;
}
static inline u32 fuse_opt_feature_fuses_override_disable_r(void)
{
return 0x000213f0U;
}
#endif

View File

@@ -0,0 +1,63 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* Function naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
*
* <x>_o(void) : Returns the offset for element <x>.
*
* <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
*
* <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
*
* <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
* and masked to place it at field <y> of register <x>. This value
* can be |'d with others to produce a full register value for
* register <x>.
*
* <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
* value can be ~'d and then &'d to clear the value of field <y> for
* register <x>.
*
* <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
* to place it at field <y> of register <x>. This value can be |'d
* with others to produce a full register value for <x>.
*
* <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
* <x> value 'r' after being shifted to place its LSB at bit 0.
* This value is suitable for direct comparison with other unshifted
* values appropriate for use in field <y> of register <x>.
*
* <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
* field <y> of register <x>. This value is suitable for direct
* comparison with unshifted values appropriate for use in field <y>
* of register <x>.
*/
#ifndef _hw_gc6_tu104_h_
#define _hw_gc6_tu104_h_
static inline u32 gc6_aon_secure_scratch_group_05_r(u32 i)
{
return 0x00118234U + i*4U;
}
#endif

View File

@@ -0,0 +1,355 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* Function naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
*
* <x>_o(void) : Returns the offset for element <x>.
*
* <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
*
* <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
*
* <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
* and masked to place it at field <y> of register <x>. This value
* can be |'d with others to produce a full register value for
* register <x>.
*
* <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
* value can be ~'d and then &'d to clear the value of field <y> for
* register <x>.
*
* <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
* to place it at field <y> of register <x>. This value can be |'d
* with others to produce a full register value for <x>.
*
* <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
* <x> value 'r' after being shifted to place its LSB at bit 0.
* This value is suitable for direct comparison with other unshifted
* values appropriate for use in field <y> of register <x>.
*
* <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
* field <y> of register <x>. This value is suitable for direct
* comparison with unshifted values appropriate for use in field <y>
* of register <x>.
*/
#ifndef _hw_gmmu_tu104_h_
#define _hw_gmmu_tu104_h_
static inline u32 gmmu_new_pde_is_pte_w(void)
{
return 0U;
}
static inline u32 gmmu_new_pde_is_pte_false_f(void)
{
return 0x0U;
}
static inline u32 gmmu_new_pde_aperture_w(void)
{
return 0U;
}
static inline u32 gmmu_new_pde_aperture_invalid_f(void)
{
return 0x0U;
}
static inline u32 gmmu_new_pde_aperture_video_memory_f(void)
{
return 0x2U;
}
static inline u32 gmmu_new_pde_aperture_sys_mem_coh_f(void)
{
return 0x4U;
}
static inline u32 gmmu_new_pde_aperture_sys_mem_ncoh_f(void)
{
return 0x6U;
}
static inline u32 gmmu_new_pde_address_sys_f(u32 v)
{
return (v & 0xffffffU) << 8U;
}
static inline u32 gmmu_new_pde_address_sys_w(void)
{
return 0U;
}
static inline u32 gmmu_new_pde_vol_w(void)
{
return 0U;
}
static inline u32 gmmu_new_pde_vol_true_f(void)
{
return 0x8U;
}
static inline u32 gmmu_new_pde_vol_false_f(void)
{
return 0x0U;
}
static inline u32 gmmu_new_pde_address_shift_v(void)
{
return 0x0000000cU;
}
static inline u32 gmmu_new_pde__size_v(void)
{
return 0x00000008U;
}
static inline u32 gmmu_new_dual_pde_is_pte_w(void)
{
return 0U;
}
static inline u32 gmmu_new_dual_pde_is_pte_false_f(void)
{
return 0x0U;
}
static inline u32 gmmu_new_dual_pde_aperture_big_w(void)
{
return 0U;
}
static inline u32 gmmu_new_dual_pde_aperture_big_invalid_f(void)
{
return 0x0U;
}
static inline u32 gmmu_new_dual_pde_aperture_big_video_memory_f(void)
{
return 0x2U;
}
static inline u32 gmmu_new_dual_pde_aperture_big_sys_mem_coh_f(void)
{
return 0x4U;
}
static inline u32 gmmu_new_dual_pde_aperture_big_sys_mem_ncoh_f(void)
{
return 0x6U;
}
static inline u32 gmmu_new_dual_pde_address_big_sys_f(u32 v)
{
return (v & 0xfffffffU) << 4U;
}
static inline u32 gmmu_new_dual_pde_address_big_sys_w(void)
{
return 0U;
}
static inline u32 gmmu_new_dual_pde_aperture_small_w(void)
{
return 2U;
}
static inline u32 gmmu_new_dual_pde_aperture_small_invalid_f(void)
{
return 0x0U;
}
static inline u32 gmmu_new_dual_pde_aperture_small_video_memory_f(void)
{
return 0x2U;
}
static inline u32 gmmu_new_dual_pde_aperture_small_sys_mem_coh_f(void)
{
return 0x4U;
}
static inline u32 gmmu_new_dual_pde_aperture_small_sys_mem_ncoh_f(void)
{
return 0x6U;
}
static inline u32 gmmu_new_dual_pde_vol_small_w(void)
{
return 2U;
}
static inline u32 gmmu_new_dual_pde_vol_small_true_f(void)
{
return 0x8U;
}
static inline u32 gmmu_new_dual_pde_vol_small_false_f(void)
{
return 0x0U;
}
static inline u32 gmmu_new_dual_pde_vol_big_w(void)
{
return 0U;
}
static inline u32 gmmu_new_dual_pde_vol_big_true_f(void)
{
return 0x8U;
}
static inline u32 gmmu_new_dual_pde_vol_big_false_f(void)
{
return 0x0U;
}
static inline u32 gmmu_new_dual_pde_address_small_sys_f(u32 v)
{
return (v & 0xffffffU) << 8U;
}
static inline u32 gmmu_new_dual_pde_address_small_sys_w(void)
{
return 2U;
}
static inline u32 gmmu_new_dual_pde_address_shift_v(void)
{
return 0x0000000cU;
}
static inline u32 gmmu_new_dual_pde_address_big_shift_v(void)
{
return 0x00000008U;
}
static inline u32 gmmu_new_dual_pde__size_v(void)
{
return 0x00000010U;
}
static inline u32 gmmu_new_pte__size_v(void)
{
return 0x00000008U;
}
static inline u32 gmmu_new_pte_valid_w(void)
{
return 0U;
}
static inline u32 gmmu_new_pte_valid_true_f(void)
{
return 0x1U;
}
static inline u32 gmmu_new_pte_valid_false_f(void)
{
return 0x0U;
}
static inline u32 gmmu_new_pte_privilege_w(void)
{
return 0U;
}
static inline u32 gmmu_new_pte_privilege_true_f(void)
{
return 0x20U;
}
static inline u32 gmmu_new_pte_privilege_false_f(void)
{
return 0x0U;
}
static inline u32 gmmu_new_pte_address_sys_f(u32 v)
{
return (v & 0xffffffU) << 8U;
}
static inline u32 gmmu_new_pte_address_sys_w(void)
{
return 0U;
}
static inline u32 gmmu_new_pte_address_vid_f(u32 v)
{
return (v & 0xffffffU) << 8U;
}
static inline u32 gmmu_new_pte_address_vid_w(void)
{
return 0U;
}
static inline u32 gmmu_new_pte_vol_w(void)
{
return 0U;
}
static inline u32 gmmu_new_pte_vol_true_f(void)
{
return 0x8U;
}
static inline u32 gmmu_new_pte_vol_false_f(void)
{
return 0x0U;
}
static inline u32 gmmu_new_pte_aperture_w(void)
{
return 0U;
}
static inline u32 gmmu_new_pte_aperture_video_memory_f(void)
{
return 0x0U;
}
static inline u32 gmmu_new_pte_aperture_sys_mem_coh_f(void)
{
return 0x4U;
}
static inline u32 gmmu_new_pte_aperture_sys_mem_ncoh_f(void)
{
return 0x6U;
}
static inline u32 gmmu_new_pte_read_only_w(void)
{
return 0U;
}
static inline u32 gmmu_new_pte_read_only_true_f(void)
{
return 0x40U;
}
static inline u32 gmmu_new_pte_comptagline_f(u32 v)
{
return (v & 0xfffffU) << 4U;
}
static inline u32 gmmu_new_pte_comptagline_w(void)
{
return 1U;
}
static inline u32 gmmu_new_pte_kind_f(u32 v)
{
return (v & 0xffU) << 24U;
}
static inline u32 gmmu_new_pte_kind_w(void)
{
return 1U;
}
static inline u32 gmmu_new_pte_address_shift_v(void)
{
return 0x0000000cU;
}
static inline u32 gmmu_pte_kind_f(u32 v)
{
return (v & 0xffU) << 4U;
}
static inline u32 gmmu_pte_kind_w(void)
{
return 1U;
}
static inline u32 gmmu_pte_kind_invalid_v(void)
{
return 0x00000007U;
}
static inline u32 gmmu_pte_kind_pitch_v(void)
{
return 0x00000000U;
}
static inline u32 gmmu_fault_client_type_gpc_v(void)
{
return 0x00000000U;
}
static inline u32 gmmu_fault_client_type_hub_v(void)
{
return 0x00000001U;
}
static inline u32 gmmu_fault_type_unbound_inst_block_v(void)
{
return 0x00000004U;
}
static inline u32 gmmu_fault_mmu_eng_id_bar2_v(void)
{
return 0x000000c0U;
}
static inline u32 gmmu_fault_mmu_eng_id_physical_v(void)
{
return 0x0000001fU;
}
static inline u32 gmmu_fault_mmu_eng_id_ce0_v(void)
{
return 0x0000000fU;
}
#endif

View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,331 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* Function naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
*
* <x>_o(void) : Returns the offset for element <x>.
*
* <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
*
* <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
*
* <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
* and masked to place it at field <y> of register <x>. This value
* can be |'d with others to produce a full register value for
* register <x>.
*
* <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
* value can be ~'d and then &'d to clear the value of field <y> for
* register <x>.
*
* <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
* to place it at field <y> of register <x>. This value can be |'d
* with others to produce a full register value for <x>.
*
* <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
* <x> value 'r' after being shifted to place its LSB at bit 0.
* This value is suitable for direct comparison with other unshifted
* values appropriate for use in field <y> of register <x>.
*
* <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
* field <y> of register <x>. This value is suitable for direct
* comparison with unshifted values appropriate for use in field <y>
* of register <x>.
*/
#ifndef _hw_ioctrl_tu104_h_
#define _hw_ioctrl_tu104_h_
static inline u32 ioctrl_reset_r(void)
{
return 0x00000140U;
}
static inline u32 ioctrl_reset_sw_post_reset_delay_microseconds_v(void)
{
return 0x00000008U;
}
static inline u32 ioctrl_reset_linkreset_f(u32 v)
{
return (v & 0x3U) << 8U;
}
static inline u32 ioctrl_reset_linkreset_m(void)
{
return 0x3U << 8U;
}
static inline u32 ioctrl_reset_linkreset_v(u32 r)
{
return (r >> 8U) & 0x3U;
}
static inline u32 ioctrl_debug_reset_r(void)
{
return 0x00000144U;
}
static inline u32 ioctrl_debug_reset_link_f(u32 v)
{
return (v & 0x3U) << 0U;
}
static inline u32 ioctrl_debug_reset_link_m(void)
{
return 0x3U << 0U;
}
static inline u32 ioctrl_debug_reset_link_v(u32 r)
{
return (r >> 0U) & 0x3U;
}
static inline u32 ioctrl_debug_reset_common_f(u32 v)
{
return (v & 0x1U) << 31U;
}
static inline u32 ioctrl_debug_reset_common_m(void)
{
return 0x1U << 31U;
}
static inline u32 ioctrl_debug_reset_common_v(u32 r)
{
return (r >> 31U) & 0x1U;
}
static inline u32 ioctrl_clock_control_r(u32 i)
{
return 0x00000180U + i*4U;
}
static inline u32 ioctrl_clock_control__size_1_v(void)
{
return 0x00000002U;
}
static inline u32 ioctrl_clock_control_clkdis_f(u32 v)
{
return (v & 0x1U) << 0U;
}
static inline u32 ioctrl_clock_control_clkdis_m(void)
{
return 0x1U << 0U;
}
static inline u32 ioctrl_clock_control_clkdis_v(u32 r)
{
return (r >> 0U) & 0x1U;
}
static inline u32 ioctrl_top_intr_0_status_r(void)
{
return 0x00000200U;
}
static inline u32 ioctrl_top_intr_0_status_link_f(u32 v)
{
return (v & 0x3U) << 0U;
}
static inline u32 ioctrl_top_intr_0_status_link_m(void)
{
return 0x3U << 0U;
}
static inline u32 ioctrl_top_intr_0_status_link_v(u32 r)
{
return (r >> 0U) & 0x3U;
}
static inline u32 ioctrl_top_intr_0_status_common_f(u32 v)
{
return (v & 0x1U) << 31U;
}
static inline u32 ioctrl_top_intr_0_status_common_m(void)
{
return 0x1U << 31U;
}
static inline u32 ioctrl_top_intr_0_status_common_v(u32 r)
{
return (r >> 31U) & 0x1U;
}
static inline u32 ioctrl_common_intr_0_mask_r(void)
{
return 0x00000220U;
}
static inline u32 ioctrl_common_intr_0_mask_fatal_f(u32 v)
{
return (v & 0x1U) << 0U;
}
static inline u32 ioctrl_common_intr_0_mask_fatal_v(u32 r)
{
return (r >> 0U) & 0x1U;
}
static inline u32 ioctrl_common_intr_0_mask_nonfatal_f(u32 v)
{
return (v & 0x1U) << 1U;
}
static inline u32 ioctrl_common_intr_0_mask_nonfatal_v(u32 r)
{
return (r >> 1U) & 0x1U;
}
static inline u32 ioctrl_common_intr_0_mask_correctable_f(u32 v)
{
return (v & 0x1U) << 2U;
}
static inline u32 ioctrl_common_intr_0_mask_correctable_v(u32 r)
{
return (r >> 2U) & 0x1U;
}
static inline u32 ioctrl_common_intr_0_mask_intra_f(u32 v)
{
return (v & 0x1U) << 3U;
}
static inline u32 ioctrl_common_intr_0_mask_intra_v(u32 r)
{
return (r >> 3U) & 0x1U;
}
static inline u32 ioctrl_common_intr_0_mask_intrb_f(u32 v)
{
return (v & 0x1U) << 4U;
}
static inline u32 ioctrl_common_intr_0_mask_intrb_v(u32 r)
{
return (r >> 4U) & 0x1U;
}
static inline u32 ioctrl_common_intr_0_status_r(void)
{
return 0x00000224U;
}
static inline u32 ioctrl_common_intr_0_status_fatal_f(u32 v)
{
return (v & 0x1U) << 0U;
}
static inline u32 ioctrl_common_intr_0_status_fatal_v(u32 r)
{
return (r >> 0U) & 0x1U;
}
static inline u32 ioctrl_common_intr_0_status_nonfatal_f(u32 v)
{
return (v & 0x1U) << 1U;
}
static inline u32 ioctrl_common_intr_0_status_nonfatal_v(u32 r)
{
return (r >> 1U) & 0x1U;
}
static inline u32 ioctrl_common_intr_0_status_correctable_f(u32 v)
{
return (v & 0x1U) << 2U;
}
static inline u32 ioctrl_common_intr_0_status_correctable_v(u32 r)
{
return (r >> 2U) & 0x1U;
}
static inline u32 ioctrl_common_intr_0_status_intra_f(u32 v)
{
return (v & 0x1U) << 3U;
}
static inline u32 ioctrl_common_intr_0_status_intra_v(u32 r)
{
return (r >> 3U) & 0x1U;
}
static inline u32 ioctrl_common_intr_0_status_intrb_f(u32 v)
{
return (v & 0x1U) << 4U;
}
static inline u32 ioctrl_common_intr_0_status_intrb_v(u32 r)
{
return (r >> 4U) & 0x1U;
}
static inline u32 ioctrl_link_intr_0_mask_r(u32 i)
{
return 0x00000240U + i*20U;
}
static inline u32 ioctrl_link_intr_0_mask_fatal_f(u32 v)
{
return (v & 0x1U) << 0U;
}
static inline u32 ioctrl_link_intr_0_mask_fatal_v(u32 r)
{
return (r >> 0U) & 0x1U;
}
static inline u32 ioctrl_link_intr_0_mask_nonfatal_f(u32 v)
{
return (v & 0x1U) << 1U;
}
static inline u32 ioctrl_link_intr_0_mask_nonfatal_v(u32 r)
{
return (r >> 1U) & 0x1U;
}
static inline u32 ioctrl_link_intr_0_mask_correctable_f(u32 v)
{
return (v & 0x1U) << 2U;
}
static inline u32 ioctrl_link_intr_0_mask_correctable_v(u32 r)
{
return (r >> 2U) & 0x1U;
}
static inline u32 ioctrl_link_intr_0_mask_intra_f(u32 v)
{
return (v & 0x1U) << 3U;
}
static inline u32 ioctrl_link_intr_0_mask_intra_v(u32 r)
{
return (r >> 3U) & 0x1U;
}
static inline u32 ioctrl_link_intr_0_mask_intrb_f(u32 v)
{
return (v & 0x1U) << 4U;
}
static inline u32 ioctrl_link_intr_0_mask_intrb_v(u32 r)
{
return (r >> 4U) & 0x1U;
}
static inline u32 ioctrl_link_intr_0_status_r(u32 i)
{
return 0x00000244U + i*20U;
}
static inline u32 ioctrl_link_intr_0_status_fatal_f(u32 v)
{
return (v & 0x1U) << 0U;
}
static inline u32 ioctrl_link_intr_0_status_fatal_v(u32 r)
{
return (r >> 0U) & 0x1U;
}
static inline u32 ioctrl_link_intr_0_status_nonfatal_f(u32 v)
{
return (v & 0x1U) << 1U;
}
static inline u32 ioctrl_link_intr_0_status_nonfatal_v(u32 r)
{
return (r >> 1U) & 0x1U;
}
static inline u32 ioctrl_link_intr_0_status_correctable_f(u32 v)
{
return (v & 0x1U) << 2U;
}
static inline u32 ioctrl_link_intr_0_status_correctable_v(u32 r)
{
return (r >> 2U) & 0x1U;
}
static inline u32 ioctrl_link_intr_0_status_intra_f(u32 v)
{
return (v & 0x1U) << 3U;
}
static inline u32 ioctrl_link_intr_0_status_intra_v(u32 r)
{
return (r >> 3U) & 0x1U;
}
static inline u32 ioctrl_link_intr_0_status_intrb_f(u32 v)
{
return (v & 0x1U) << 4U;
}
static inline u32 ioctrl_link_intr_0_status_intrb_v(u32 r)
{
return (r >> 4U) & 0x1U;
}
#endif

View File

@@ -0,0 +1,323 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* Function naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
*
* <x>_o(void) : Returns the offset for element <x>.
*
* <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
*
* <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
*
* <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
* and masked to place it at field <y> of register <x>. This value
* can be |'d with others to produce a full register value for
* register <x>.
*
* <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
* value can be ~'d and then &'d to clear the value of field <y> for
* register <x>.
*
* <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
* to place it at field <y> of register <x>. This value can be |'d
* with others to produce a full register value for <x>.
*
* <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
* <x> value 'r' after being shifted to place its LSB at bit 0.
* This value is suitable for direct comparison with other unshifted
* values appropriate for use in field <y> of register <x>.
*
* <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
* field <y> of register <x>. This value is suitable for direct
* comparison with unshifted values appropriate for use in field <y>
* of register <x>.
*/
#ifndef _hw_ioctrlmif_tu104_h_
#define _hw_ioctrlmif_tu104_h_
static inline u32 ioctrlmif_rx_err_contain_en_0_r(void)
{
return 0x00000e0cU;
}
static inline u32 ioctrlmif_rx_err_contain_en_0_rxramdataparityerr_f(u32 v)
{
return (v & 0x1U) << 3U;
}
static inline u32 ioctrlmif_rx_err_contain_en_0_rxramdataparityerr_m(void)
{
return 0x1U << 3U;
}
static inline u32 ioctrlmif_rx_err_contain_en_0_rxramdataparityerr_v(u32 r)
{
return (r >> 3U) & 0x1U;
}
static inline u32 ioctrlmif_rx_err_contain_en_0_rxramhdrparityerr_f(u32 v)
{
return (v & 0x1U) << 4U;
}
static inline u32 ioctrlmif_rx_err_contain_en_0_rxramhdrparityerr_m(void)
{
return 0x1U << 4U;
}
static inline u32 ioctrlmif_rx_err_contain_en_0_rxramhdrparityerr_v(u32 r)
{
return (r >> 4U) & 0x1U;
}
static inline u32 ioctrlmif_rx_err_contain_en_0_rxramhdrparityerr__prod_v(void)
{
return 0x00000001U;
}
static inline u32 ioctrlmif_rx_err_contain_en_0_rxramhdrparityerr__prod_f(void)
{
return 0x10U;
}
static inline u32 ioctrlmif_rx_err_log_en_0_r(void)
{
return 0x00000e04U;
}
static inline u32 ioctrlmif_rx_err_log_en_0_rxramdataparityerr_f(u32 v)
{
return (v & 0x1U) << 3U;
}
static inline u32 ioctrlmif_rx_err_log_en_0_rxramdataparityerr_m(void)
{
return 0x1U << 3U;
}
static inline u32 ioctrlmif_rx_err_log_en_0_rxramdataparityerr_v(u32 r)
{
return (r >> 3U) & 0x1U;
}
static inline u32 ioctrlmif_rx_err_log_en_0_rxramhdrparityerr_f(u32 v)
{
return (v & 0x1U) << 4U;
}
static inline u32 ioctrlmif_rx_err_log_en_0_rxramhdrparityerr_m(void)
{
return 0x1U << 4U;
}
static inline u32 ioctrlmif_rx_err_log_en_0_rxramhdrparityerr_v(u32 r)
{
return (r >> 4U) & 0x1U;
}
static inline u32 ioctrlmif_rx_err_report_en_0_r(void)
{
return 0x00000e08U;
}
static inline u32 ioctrlmif_rx_err_report_en_0_rxramdataparityerr_f(u32 v)
{
return (v & 0x1U) << 3U;
}
static inline u32 ioctrlmif_rx_err_report_en_0_rxramdataparityerr_m(void)
{
return 0x1U << 3U;
}
static inline u32 ioctrlmif_rx_err_report_en_0_rxramdataparityerr_v(u32 r)
{
return (r >> 3U) & 0x1U;
}
static inline u32 ioctrlmif_rx_err_report_en_0_rxramhdrparityerr_f(u32 v)
{
return (v & 0x1U) << 4U;
}
static inline u32 ioctrlmif_rx_err_report_en_0_rxramhdrparityerr_m(void)
{
return 0x1U << 4U;
}
static inline u32 ioctrlmif_rx_err_report_en_0_rxramhdrparityerr_v(u32 r)
{
return (r >> 4U) & 0x1U;
}
static inline u32 ioctrlmif_rx_err_status_0_r(void)
{
return 0x00000e00U;
}
static inline u32 ioctrlmif_rx_err_status_0_rxramdataparityerr_f(u32 v)
{
return (v & 0x1U) << 3U;
}
static inline u32 ioctrlmif_rx_err_status_0_rxramdataparityerr_m(void)
{
return 0x1U << 3U;
}
static inline u32 ioctrlmif_rx_err_status_0_rxramdataparityerr_v(u32 r)
{
return (r >> 3U) & 0x1U;
}
static inline u32 ioctrlmif_rx_err_status_0_rxramhdrparityerr_f(u32 v)
{
return (v & 0x1U) << 4U;
}
static inline u32 ioctrlmif_rx_err_status_0_rxramhdrparityerr_m(void)
{
return 0x1U << 4U;
}
static inline u32 ioctrlmif_rx_err_status_0_rxramhdrparityerr_v(u32 r)
{
return (r >> 4U) & 0x1U;
}
static inline u32 ioctrlmif_rx_err_first_0_r(void)
{
return 0x00000e14U;
}
static inline u32 ioctrlmif_tx_err_contain_en_0_r(void)
{
return 0x00000a90U;
}
static inline u32 ioctrlmif_tx_err_contain_en_0_txramdataparityerr_f(u32 v)
{
return (v & 0x1U) << 0U;
}
static inline u32 ioctrlmif_tx_err_contain_en_0_txramdataparityerr_m(void)
{
return 0x1U << 0U;
}
static inline u32 ioctrlmif_tx_err_contain_en_0_txramdataparityerr_v(u32 r)
{
return (r >> 0U) & 0x1U;
}
static inline u32 ioctrlmif_tx_err_contain_en_0_txramdataparityerr__prod_v(void)
{
return 0x00000001U;
}
static inline u32 ioctrlmif_tx_err_contain_en_0_txramdataparityerr__prod_f(void)
{
return 0x1U;
}
static inline u32 ioctrlmif_tx_err_contain_en_0_txramhdrparityerr_f(u32 v)
{
return (v & 0x1U) << 1U;
}
static inline u32 ioctrlmif_tx_err_contain_en_0_txramhdrparityerr_m(void)
{
return 0x1U << 1U;
}
static inline u32 ioctrlmif_tx_err_contain_en_0_txramhdrparityerr_v(u32 r)
{
return (r >> 1U) & 0x1U;
}
static inline u32 ioctrlmif_tx_err_contain_en_0_txramhdrparityerr__prod_v(void)
{
return 0x00000001U;
}
static inline u32 ioctrlmif_tx_err_contain_en_0_txramhdrparityerr__prod_f(void)
{
return 0x2U;
}
static inline u32 ioctrlmif_tx_err_log_en_0_r(void)
{
return 0x00000a88U;
}
static inline u32 ioctrlmif_tx_err_log_en_0_txramdataparityerr_f(u32 v)
{
return (v & 0x1U) << 0U;
}
static inline u32 ioctrlmif_tx_err_log_en_0_txramdataparityerr_m(void)
{
return 0x1U << 0U;
}
static inline u32 ioctrlmif_tx_err_log_en_0_txramdataparityerr_v(u32 r)
{
return (r >> 0U) & 0x1U;
}
static inline u32 ioctrlmif_tx_err_log_en_0_txramhdrparityerr_f(u32 v)
{
return (v & 0x1U) << 1U;
}
static inline u32 ioctrlmif_tx_err_log_en_0_txramhdrparityerr_m(void)
{
return 0x1U << 1U;
}
static inline u32 ioctrlmif_tx_err_log_en_0_txramhdrparityerr_v(u32 r)
{
return (r >> 1U) & 0x1U;
}
static inline u32 ioctrlmif_tx_err_report_en_0_r(void)
{
return 0x00000e08U;
}
static inline u32 ioctrlmif_tx_err_report_en_0_txramdataparityerr_f(u32 v)
{
return (v & 0x1U) << 0U;
}
static inline u32 ioctrlmif_tx_err_report_en_0_txramdataparityerr_m(void)
{
return 0x1U << 0U;
}
static inline u32 ioctrlmif_tx_err_report_en_0_txramdataparityerr_v(u32 r)
{
return (r >> 0U) & 0x1U;
}
static inline u32 ioctrlmif_tx_err_report_en_0_txramhdrparityerr_f(u32 v)
{
return (v & 0x1U) << 1U;
}
static inline u32 ioctrlmif_tx_err_report_en_0_txramhdrparityerr_m(void)
{
return 0x1U << 1U;
}
static inline u32 ioctrlmif_tx_err_report_en_0_txramhdrparityerr_v(u32 r)
{
return (r >> 1U) & 0x1U;
}
static inline u32 ioctrlmif_tx_err_status_0_r(void)
{
return 0x00000a84U;
}
static inline u32 ioctrlmif_tx_err_status_0_txramdataparityerr_f(u32 v)
{
return (v & 0x1U) << 0U;
}
static inline u32 ioctrlmif_tx_err_status_0_txramdataparityerr_m(void)
{
return 0x1U << 0U;
}
static inline u32 ioctrlmif_tx_err_status_0_txramdataparityerr_v(u32 r)
{
return (r >> 0U) & 0x1U;
}
static inline u32 ioctrlmif_tx_err_status_0_txramhdrparityerr_f(u32 v)
{
return (v & 0x1U) << 1U;
}
static inline u32 ioctrlmif_tx_err_status_0_txramhdrparityerr_m(void)
{
return 0x1U << 1U;
}
static inline u32 ioctrlmif_tx_err_status_0_txramhdrparityerr_v(u32 r)
{
return (r >> 1U) & 0x1U;
}
static inline u32 ioctrlmif_tx_err_first_0_r(void)
{
return 0x00000a98U;
}
static inline u32 ioctrlmif_tx_ctrl_buffer_ready_r(void)
{
return 0x00000a7cU;
}
static inline u32 ioctrlmif_rx_ctrl_buffer_ready_r(void)
{
return 0x00000dfcU;
}
#endif

View File

@@ -0,0 +1,643 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* Function naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
*
* <x>_o(void) : Returns the offset for element <x>.
*
* <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
*
* <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
*
* <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
* and masked to place it at field <y> of register <x>. This value
* can be |'d with others to produce a full register value for
* register <x>.
*
* <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
* value can be ~'d and then &'d to clear the value of field <y> for
* register <x>.
*
* <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
* to place it at field <y> of register <x>. This value can be |'d
* with others to produce a full register value for <x>.
*
* <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
* <x> value 'r' after being shifted to place its LSB at bit 0.
* This value is suitable for direct comparison with other unshifted
* values appropriate for use in field <y> of register <x>.
*
* <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
* field <y> of register <x>. This value is suitable for direct
* comparison with unshifted values appropriate for use in field <y>
* of register <x>.
*/
#ifndef _hw_ltc_tu104_h_
#define _hw_ltc_tu104_h_
static inline u32 ltc_pltcg_base_v(void)
{
return 0x00140000U;
}
static inline u32 ltc_pltcg_extent_v(void)
{
return 0x0017ffffU;
}
static inline u32 ltc_ltc0_ltss_v(void)
{
return 0x00140200U;
}
static inline u32 ltc_ltc0_lts0_v(void)
{
return 0x00140400U;
}
static inline u32 ltc_ltcs_ltss_v(void)
{
return 0x0017e200U;
}
static inline u32 ltc_ltcs_lts0_cbc_ctrl1_r(void)
{
return 0x0014046cU;
}
static inline u32 ltc_ltc0_lts0_dstg_cfg0_r(void)
{
return 0x00140518U;
}
static inline u32 ltc_ltcs_ltss_dstg_cfg0_r(void)
{
return 0x0017e318U;
}
static inline u32 ltc_ltcs_ltss_dstg_cfg0_vdc_4to2_disable_m(void)
{
return 0x1U << 15U;
}
static inline u32 ltc_ltc0_lts0_tstg_cfg1_r(void)
{
return 0x00140494U;
}
static inline u32 ltc_ltc0_lts0_tstg_cfg1_active_ways_v(u32 r)
{
return (r >> 0U) & 0xffffU;
}
static inline u32 ltc_ltc0_lts0_tstg_cfg1_active_sets_v(u32 r)
{
return (r >> 16U) & 0x3U;
}
static inline u32 ltc_ltc0_lts0_tstg_cfg1_active_sets_all_v(void)
{
return 0x00000000U;
}
static inline u32 ltc_ltc0_lts0_tstg_cfg1_active_sets_half_v(void)
{
return 0x00000001U;
}
static inline u32 ltc_ltc0_lts0_tstg_cfg1_active_sets_quarter_v(void)
{
return 0x00000002U;
}
static inline u32 ltc_ltcs_ltss_cbc_ctrl1_r(void)
{
return 0x0017e26cU;
}
static inline u32 ltc_ltcs_ltss_cbc_ctrl1_clean_active_f(void)
{
return 0x1U;
}
static inline u32 ltc_ltcs_ltss_cbc_ctrl1_invalidate_active_f(void)
{
return 0x2U;
}
static inline u32 ltc_ltcs_ltss_cbc_ctrl1_clear_v(u32 r)
{
return (r >> 2U) & 0x1U;
}
static inline u32 ltc_ltcs_ltss_cbc_ctrl1_clear_active_v(void)
{
return 0x00000001U;
}
static inline u32 ltc_ltcs_ltss_cbc_ctrl1_clear_active_f(void)
{
return 0x4U;
}
static inline u32 ltc_ltc0_lts0_cbc_ctrl1_r(void)
{
return 0x0014046cU;
}
static inline u32 ltc_ltcs_ltss_cbc_ctrl2_r(void)
{
return 0x0017e270U;
}
static inline u32 ltc_ltcs_ltss_cbc_ctrl2_clear_lower_bound_f(u32 v)
{
return (v & 0xfffffU) << 0U;
}
static inline u32 ltc_ltcs_ltss_cbc_ctrl3_r(void)
{
return 0x0017e274U;
}
static inline u32 ltc_ltcs_ltss_cbc_ctrl3_clear_upper_bound_f(u32 v)
{
return (v & 0xfffffU) << 0U;
}
static inline u32 ltc_ltcs_ltss_cbc_ctrl3_clear_upper_bound_init_v(void)
{
return 0x000fffffU;
}
static inline u32 ltc_ltcs_ltss_cbc_base_r(void)
{
return 0x0017e278U;
}
static inline u32 ltc_ltcs_ltss_cbc_base_alignment_shift_v(void)
{
return 0x0000000bU;
}
static inline u32 ltc_ltcs_ltss_cbc_base_address_v(u32 r)
{
return (r >> 0U) & 0x3ffffffU;
}
static inline u32 ltc_ltcs_ltss_cbc_num_active_ltcs_r(void)
{
return 0x0017e27cU;
}
static inline u32 ltc_ltcs_ltss_cbc_num_active_ltcs__v(u32 r)
{
return (r >> 0U) & 0x1fU;
}
static inline u32 ltc_ltcs_ltss_cbc_num_active_ltcs_nvlink_peer_through_l2_f(u32 v)
{
return (v & 0x1U) << 24U;
}
static inline u32 ltc_ltcs_ltss_cbc_num_active_ltcs_nvlink_peer_through_l2_v(u32 r)
{
return (r >> 24U) & 0x1U;
}
static inline u32 ltc_ltcs_ltss_cbc_num_active_ltcs_serialize_f(u32 v)
{
return (v & 0x1U) << 25U;
}
static inline u32 ltc_ltcs_ltss_cbc_num_active_ltcs_serialize_v(u32 r)
{
return (r >> 25U) & 0x1U;
}
static inline u32 ltc_ltcs_misc_ltc_num_active_ltcs_r(void)
{
return 0x0017e000U;
}
static inline u32 ltc_ltcs_ltss_cbc_param_r(void)
{
return 0x0017e280U;
}
static inline u32 ltc_ltcs_ltss_cbc_param_bytes_per_comptagline_per_slice_v(u32 r)
{
return (r >> 0U) & 0x3ffU;
}
static inline u32 ltc_ltcs_ltss_cbc_param_amap_divide_rounding_v(u32 r)
{
return (r >> 10U) & 0x3U;
}
static inline u32 ltc_ltcs_ltss_cbc_param_amap_swizzle_rounding_v(u32 r)
{
return (r >> 12U) & 0x3U;
}
static inline u32 ltc_ltcs_ltss_cbc_param2_r(void)
{
return 0x0017e3f4U;
}
static inline u32 ltc_ltcs_ltss_cbc_param2_gobs_per_comptagline_per_slice_v(u32 r)
{
return (r >> 0U) & 0xffffU;
}
static inline u32 ltc_ltcs_ltss_cbc_param2_num_cache_lines_v(u32 r)
{
return (r >> 16U) & 0xffU;
}
static inline u32 ltc_ltcs_ltss_cbc_param2_cache_line_size_v(u32 r)
{
return (r >> 24U) & 0xfU;
}
static inline u32 ltc_ltcs_ltss_cbc_param2_slices_per_ltc_v(u32 r)
{
return (r >> 28U) & 0xfU;
}
static inline u32 ltc_ltcs_ltss_tstg_set_mgmt_r(void)
{
return 0x0017e2acU;
}
static inline u32 ltc_ltcs_ltss_tstg_set_mgmt_max_ways_evict_last_f(u32 v)
{
return (v & 0x1fU) << 16U;
}
static inline u32 ltc_ltcs_ltss_dstg_zbc_index_r(void)
{
return 0x0017e338U;
}
static inline u32 ltc_ltcs_ltss_dstg_zbc_index_address_f(u32 v)
{
return (v & 0xfU) << 0U;
}
static inline u32 ltc_ltcs_ltss_dstg_zbc_color_clear_value_r(u32 i)
{
return 0x0017e33cU + i*4U;
}
static inline u32 ltc_ltcs_ltss_dstg_zbc_color_clear_value__size_1_v(void)
{
return 0x00000004U;
}
static inline u32 ltc_ltcs_ltss_dstg_zbc_depth_clear_value_r(void)
{
return 0x0017e34cU;
}
static inline u32 ltc_ltcs_ltss_dstg_zbc_depth_clear_value_field_s(void)
{
return 32U;
}
static inline u32 ltc_ltcs_ltss_dstg_zbc_depth_clear_value_field_f(u32 v)
{
return (v & 0xffffffffU) << 0U;
}
static inline u32 ltc_ltcs_ltss_dstg_zbc_depth_clear_value_field_m(void)
{
return 0xffffffffU << 0U;
}
static inline u32 ltc_ltcs_ltss_dstg_zbc_depth_clear_value_field_v(u32 r)
{
return (r >> 0U) & 0xffffffffU;
}
static inline u32 ltc_ltcs_ltss_dstg_zbc_stencil_clear_value_r(void)
{
return 0x0017e204U;
}
static inline u32 ltc_ltcs_ltss_dstg_zbc_stencil_clear_value_field_s(void)
{
return 8U;
}
static inline u32 ltc_ltcs_ltss_dstg_zbc_stencil_clear_value_field_f(u32 v)
{
return (v & 0xffU) << 0U;
}
static inline u32 ltc_ltcs_ltss_dstg_zbc_stencil_clear_value_field_m(void)
{
return 0xffU << 0U;
}
static inline u32 ltc_ltcs_ltss_dstg_zbc_stencil_clear_value_field_v(u32 r)
{
return (r >> 0U) & 0xffU;
}
static inline u32 ltc_ltcs_ltss_tstg_set_mgmt_2_r(void)
{
return 0x0017e2b0U;
}
static inline u32 ltc_ltcs_ltss_tstg_set_mgmt_2_l2_bypass_mode_enabled_f(void)
{
return 0x10000000U;
}
static inline u32 ltc_ltcs_ltss_g_elpg_r(void)
{
return 0x0017e214U;
}
static inline u32 ltc_ltcs_ltss_g_elpg_flush_v(u32 r)
{
return (r >> 0U) & 0x1U;
}
static inline u32 ltc_ltcs_ltss_g_elpg_flush_pending_v(void)
{
return 0x00000001U;
}
static inline u32 ltc_ltcs_ltss_g_elpg_flush_pending_f(void)
{
return 0x1U;
}
static inline u32 ltc_ltc0_ltss_g_elpg_r(void)
{
return 0x00140214U;
}
static inline u32 ltc_ltc0_ltss_g_elpg_flush_v(u32 r)
{
return (r >> 0U) & 0x1U;
}
static inline u32 ltc_ltc0_ltss_g_elpg_flush_pending_v(void)
{
return 0x00000001U;
}
static inline u32 ltc_ltc0_ltss_g_elpg_flush_pending_f(void)
{
return 0x1U;
}
static inline u32 ltc_ltc1_ltss_g_elpg_r(void)
{
return 0x00142214U;
}
static inline u32 ltc_ltc1_ltss_g_elpg_flush_v(u32 r)
{
return (r >> 0U) & 0x1U;
}
static inline u32 ltc_ltc1_ltss_g_elpg_flush_pending_v(void)
{
return 0x00000001U;
}
static inline u32 ltc_ltc1_ltss_g_elpg_flush_pending_f(void)
{
return 0x1U;
}
static inline u32 ltc_ltcs_ltss_intr_r(void)
{
return 0x0017e20cU;
}
static inline u32 ltc_ltcs_ltss_intr_ecc_sec_error_pending_f(void)
{
return 0x100U;
}
static inline u32 ltc_ltcs_ltss_intr_ecc_ded_error_pending_f(void)
{
return 0x200U;
}
static inline u32 ltc_ltcs_ltss_intr_en_evicted_cb_m(void)
{
return 0x1U << 20U;
}
static inline u32 ltc_ltcs_ltss_intr_en_illegal_compstat_m(void)
{
return 0x1U << 21U;
}
static inline u32 ltc_ltcs_ltss_intr_en_illegal_compstat_enabled_f(void)
{
return 0x200000U;
}
static inline u32 ltc_ltcs_ltss_intr_en_illegal_compstat_disabled_f(void)
{
return 0x0U;
}
static inline u32 ltc_ltcs_ltss_intr_en_illegal_compstat_access_m(void)
{
return 0x1U << 30U;
}
static inline u32 ltc_ltcs_ltss_intr_en_ecc_sec_error_enabled_f(void)
{
return 0x1000000U;
}
static inline u32 ltc_ltcs_ltss_intr_en_ecc_ded_error_enabled_f(void)
{
return 0x2000000U;
}
static inline u32 ltc_ltc0_lts0_intr_r(void)
{
return 0x0014040cU;
}
static inline u32 ltc_ltc0_lts0_dstg_ecc_report_r(void)
{
return 0x0014051cU;
}
static inline u32 ltc_ltc0_lts0_dstg_ecc_report_sec_count_m(void)
{
return 0xffU << 0U;
}
static inline u32 ltc_ltc0_lts0_dstg_ecc_report_sec_count_v(u32 r)
{
return (r >> 0U) & 0xffU;
}
static inline u32 ltc_ltc0_lts0_dstg_ecc_report_ded_count_m(void)
{
return 0xffU << 16U;
}
static inline u32 ltc_ltc0_lts0_dstg_ecc_report_ded_count_v(u32 r)
{
return (r >> 16U) & 0xffU;
}
static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_r(void)
{
return 0x0017e2a0U;
}
static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_v(u32 r)
{
return (r >> 0U) & 0x1U;
}
static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_pending_v(void)
{
return 0x00000001U;
}
static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_pending_f(void)
{
return 0x1U;
}
static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_max_cycles_between_invalidates_v(u32 r)
{
return (r >> 8U) & 0xfU;
}
static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_max_cycles_between_invalidates_3_v(void)
{
return 0x00000003U;
}
static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_max_cycles_between_invalidates_3_f(void)
{
return 0x300U;
}
static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_last_class_v(u32 r)
{
return (r >> 28U) & 0x1U;
}
static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_last_class_true_v(void)
{
return 0x00000001U;
}
static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_last_class_true_f(void)
{
return 0x10000000U;
}
static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_normal_class_v(u32 r)
{
return (r >> 29U) & 0x1U;
}
static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_normal_class_true_v(void)
{
return 0x00000001U;
}
static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_normal_class_true_f(void)
{
return 0x20000000U;
}
static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_first_class_v(u32 r)
{
return (r >> 30U) & 0x1U;
}
static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_first_class_true_v(void)
{
return 0x00000001U;
}
static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_first_class_true_f(void)
{
return 0x40000000U;
}
static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_r(void)
{
return 0x0017e2a4U;
}
static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_v(u32 r)
{
return (r >> 0U) & 0x1U;
}
static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_pending_v(void)
{
return 0x00000001U;
}
static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_pending_f(void)
{
return 0x1U;
}
static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_max_cycles_between_cleans_v(u32 r)
{
return (r >> 8U) & 0xfU;
}
static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_max_cycles_between_cleans_3_v(void)
{
return 0x00000003U;
}
static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_max_cycles_between_cleans_3_f(void)
{
return 0x300U;
}
static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_wait_for_fb_to_pull_v(u32 r)
{
return (r >> 16U) & 0x1U;
}
static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_wait_for_fb_to_pull_true_v(void)
{
return 0x00000001U;
}
static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_wait_for_fb_to_pull_true_f(void)
{
return 0x10000U;
}
static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_last_class_v(u32 r)
{
return (r >> 28U) & 0x1U;
}
static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_last_class_true_v(void)
{
return 0x00000001U;
}
static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_last_class_true_f(void)
{
return 0x10000000U;
}
static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_normal_class_v(u32 r)
{
return (r >> 29U) & 0x1U;
}
static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_normal_class_true_v(void)
{
return 0x00000001U;
}
static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_normal_class_true_f(void)
{
return 0x20000000U;
}
static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_first_class_v(u32 r)
{
return (r >> 30U) & 0x1U;
}
static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_first_class_true_v(void)
{
return 0x00000001U;
}
static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_first_class_true_f(void)
{
return 0x40000000U;
}
static inline u32 ltc_ltc0_ltss_tstg_cmgmt0_r(void)
{
return 0x001402a0U;
}
static inline u32 ltc_ltc0_ltss_tstg_cmgmt0_invalidate_v(u32 r)
{
return (r >> 0U) & 0x1U;
}
static inline u32 ltc_ltc0_ltss_tstg_cmgmt0_invalidate_pending_v(void)
{
return 0x00000001U;
}
static inline u32 ltc_ltc0_ltss_tstg_cmgmt0_invalidate_pending_f(void)
{
return 0x1U;
}
static inline u32 ltc_ltc0_ltss_tstg_cmgmt1_r(void)
{
return 0x001402a4U;
}
static inline u32 ltc_ltc0_ltss_tstg_cmgmt1_clean_v(u32 r)
{
return (r >> 0U) & 0x1U;
}
static inline u32 ltc_ltc0_ltss_tstg_cmgmt1_clean_pending_v(void)
{
return 0x00000001U;
}
static inline u32 ltc_ltc0_ltss_tstg_cmgmt1_clean_pending_f(void)
{
return 0x1U;
}
static inline u32 ltc_ltc1_ltss_tstg_cmgmt0_r(void)
{
return 0x001422a0U;
}
static inline u32 ltc_ltc1_ltss_tstg_cmgmt0_invalidate_v(u32 r)
{
return (r >> 0U) & 0x1U;
}
static inline u32 ltc_ltc1_ltss_tstg_cmgmt0_invalidate_pending_v(void)
{
return 0x00000001U;
}
static inline u32 ltc_ltc1_ltss_tstg_cmgmt0_invalidate_pending_f(void)
{
return 0x1U;
}
static inline u32 ltc_ltc1_ltss_tstg_cmgmt1_r(void)
{
return 0x001422a4U;
}
static inline u32 ltc_ltc1_ltss_tstg_cmgmt1_clean_v(u32 r)
{
return (r >> 0U) & 0x1U;
}
static inline u32 ltc_ltc1_ltss_tstg_cmgmt1_clean_pending_v(void)
{
return 0x00000001U;
}
static inline u32 ltc_ltc1_ltss_tstg_cmgmt1_clean_pending_f(void)
{
return 0x1U;
}
static inline u32 ltc_ltc0_lts0_tstg_info_1_r(void)
{
return 0x0014058cU;
}
static inline u32 ltc_ltc0_lts0_tstg_info_1_slice_size_in_kb_v(u32 r)
{
return (r >> 0U) & 0xffffU;
}
static inline u32 ltc_ltc0_lts0_tstg_info_1_slices_per_l2_v(u32 r)
{
return (r >> 16U) & 0x1fU;
}
#endif

View File

@@ -0,0 +1,227 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* Function naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
*
* <x>_o(void) : Returns the offset for element <x>.
*
* <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
*
* <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
*
* <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
* and masked to place it at field <y> of register <x>. This value
* can be |'d with others to produce a full register value for
* register <x>.
*
* <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
* value can be ~'d and then &'d to clear the value of field <y> for
* register <x>.
*
* <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
* to place it at field <y> of register <x>. This value can be |'d
* with others to produce a full register value for <x>.
*
* <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
* <x> value 'r' after being shifted to place its LSB at bit 0.
* This value is suitable for direct comparison with other unshifted
* values appropriate for use in field <y> of register <x>.
*
* <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
* field <y> of register <x>. This value is suitable for direct
* comparison with unshifted values appropriate for use in field <y>
* of register <x>.
*/
#ifndef _hw_mc_tu104_h_
#define _hw_mc_tu104_h_
static inline u32 mc_boot_0_r(void)
{
return 0x00000000U;
}
static inline u32 mc_boot_0_architecture_v(u32 r)
{
return (r >> 24U) & 0x1fU;
}
static inline u32 mc_boot_0_implementation_v(u32 r)
{
return (r >> 20U) & 0xfU;
}
static inline u32 mc_boot_0_major_revision_v(u32 r)
{
return (r >> 4U) & 0xfU;
}
static inline u32 mc_boot_0_minor_revision_v(u32 r)
{
return (r >> 0U) & 0xfU;
}
static inline u32 mc_intr_r(u32 i)
{
return 0x00000100U + i*4U;
}
static inline u32 mc_intr_pfifo_pending_f(void)
{
return 0x100U;
}
static inline u32 mc_intr_hub_pending_f(void)
{
return 0x200U;
}
static inline u32 mc_intr_pfb_pending_f(void)
{
return 0x2000U;
}
static inline u32 mc_intr_pgraph_pending_f(void)
{
return 0x1000U;
}
static inline u32 mc_intr_pmu_pending_f(void)
{
return 0x1000000U;
}
static inline u32 mc_intr_ltc_pending_f(void)
{
return 0x2000000U;
}
static inline u32 mc_intr_priv_ring_pending_f(void)
{
return 0x40000000U;
}
static inline u32 mc_intr_pbus_pending_f(void)
{
return 0x10000000U;
}
static inline u32 mc_intr_nvlink_pending_f(void)
{
return 0x400000U;
}
static inline u32 mc_intr_en_r(u32 i)
{
return 0x00000140U + i*4U;
}
static inline u32 mc_intr_en_set_r(u32 i)
{
return 0x00000160U + i*4U;
}
static inline u32 mc_intr_en_clear_r(u32 i)
{
return 0x00000180U + i*4U;
}
static inline u32 mc_enable_r(void)
{
return 0x00000200U;
}
static inline u32 mc_enable_pmedia_s(void)
{
return 1U;
}
static inline u32 mc_enable_pmedia_f(u32 v)
{
return (v & 0x1U) << 4U;
}
static inline u32 mc_enable_pmedia_m(void)
{
return 0x1U << 4U;
}
static inline u32 mc_enable_pmedia_v(u32 r)
{
return (r >> 4U) & 0x1U;
}
static inline u32 mc_enable_ce0_m(void)
{
return 0x1U << 6U;
}
static inline u32 mc_enable_pfifo_enabled_f(void)
{
return 0x100U;
}
static inline u32 mc_enable_pgraph_enabled_f(void)
{
return 0x1000U;
}
static inline u32 mc_enable_pwr_v(u32 r)
{
return (r >> 13U) & 0x1U;
}
static inline u32 mc_enable_pwr_disabled_v(void)
{
return 0x00000000U;
}
static inline u32 mc_enable_pwr_enabled_f(void)
{
return 0x2000U;
}
static inline u32 mc_enable_ce2_m(void)
{
return 0x1U << 21U;
}
static inline u32 mc_enable_ce2_enabled_f(void)
{
return 0x200000U;
}
static inline u32 mc_enable_blg_enabled_f(void)
{
return 0x8000000U;
}
static inline u32 mc_enable_perfmon_enabled_f(void)
{
return 0x10000000U;
}
static inline u32 mc_enable_nvdec_disabled_v(void)
{
return 0x00000000U;
}
static inline u32 mc_enable_nvdec_enabled_f(void)
{
return 0x8000U;
}
static inline u32 mc_enable_nvlink_disabled_v(void)
{
return 0x00000000U;
}
static inline u32 mc_enable_nvlink_disabled_f(void)
{
return 0x0U;
}
static inline u32 mc_enable_nvlink_enabled_v(void)
{
return 0x00000001U;
}
static inline u32 mc_enable_nvlink_enabled_f(void)
{
return 0x2000000U;
}
static inline u32 mc_intr_ltc_r(void)
{
return 0x000001c0U;
}
static inline u32 mc_intr_fbpa_r(void)
{
return 0x000001d0U;
}
static inline u32 mc_intr_fbpa_part_mask_v(u32 r)
{
return (r >> 0U) & 0x1ffffU;
}
#endif

View File

@@ -0,0 +1,935 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* Function naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
*
* <x>_o(void) : Returns the offset for element <x>.
*
* <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
*
* <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
*
* <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
* and masked to place it at field <y> of register <x>. This value
* can be |'d with others to produce a full register value for
* register <x>.
*
* <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
* value can be ~'d and then &'d to clear the value of field <y> for
* register <x>.
*
* <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
* to place it at field <y> of register <x>. This value can be |'d
* with others to produce a full register value for <x>.
*
* <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
* <x> value 'r' after being shifted to place its LSB at bit 0.
* This value is suitable for direct comparison with other unshifted
* values appropriate for use in field <y> of register <x>.
*
* <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
* field <y> of register <x>. This value is suitable for direct
* comparison with unshifted values appropriate for use in field <y>
* of register <x>.
*/
#ifndef _hw_minion_tu104_h_
#define _hw_minion_tu104_h_
static inline u32 minion_minion_status_r(void)
{
return 0x00000830U;
}
static inline u32 minion_minion_status_status_f(u32 v)
{
return (v & 0xffU) << 0U;
}
static inline u32 minion_minion_status_status_m(void)
{
return 0xffU << 0U;
}
static inline u32 minion_minion_status_status_v(u32 r)
{
return (r >> 0U) & 0xffU;
}
static inline u32 minion_minion_status_status_boot_v(void)
{
return 0x00000001U;
}
static inline u32 minion_minion_status_status_boot_f(void)
{
return 0x1U;
}
static inline u32 minion_minion_status_intr_code_f(u32 v)
{
return (v & 0xffffffU) << 8U;
}
static inline u32 minion_minion_status_intr_code_m(void)
{
return 0xffffffU << 8U;
}
static inline u32 minion_minion_status_intr_code_v(u32 r)
{
return (r >> 8U) & 0xffffffU;
}
static inline u32 minion_falcon_irqstat_r(void)
{
return 0x00000008U;
}
static inline u32 minion_falcon_irqstat_halt_f(u32 v)
{
return (v & 0x1U) << 4U;
}
static inline u32 minion_falcon_irqstat_halt_v(u32 r)
{
return (r >> 4U) & 0x1U;
}
static inline u32 minion_falcon_irqmask_r(void)
{
return 0x00000018U;
}
static inline u32 minion_falcon_irqsclr_r(void)
{
return 0x00000004U;
}
static inline u32 minion_falcon_irqsset_r(void)
{
return 0x00000000U;
}
static inline u32 minion_falcon_irqmset_r(void)
{
return 0x00000010U;
}
static inline u32 minion_falcon_irqmset_wdtmr_f(u32 v)
{
return (v & 0x1U) << 1U;
}
static inline u32 minion_falcon_irqmset_wdtmr_m(void)
{
return 0x1U << 1U;
}
static inline u32 minion_falcon_irqmset_wdtmr_v(u32 r)
{
return (r >> 1U) & 0x1U;
}
static inline u32 minion_falcon_irqmset_wdtmr_set_v(void)
{
return 0x00000001U;
}
static inline u32 minion_falcon_irqmset_wdtmr_set_f(void)
{
return 0x2U;
}
static inline u32 minion_falcon_irqmset_halt_f(u32 v)
{
return (v & 0x1U) << 4U;
}
static inline u32 minion_falcon_irqmset_halt_m(void)
{
return 0x1U << 4U;
}
static inline u32 minion_falcon_irqmset_halt_v(u32 r)
{
return (r >> 4U) & 0x1U;
}
static inline u32 minion_falcon_irqmset_halt_set_v(void)
{
return 0x00000001U;
}
static inline u32 minion_falcon_irqmset_halt_set_f(void)
{
return 0x10U;
}
static inline u32 minion_falcon_irqmset_exterr_f(u32 v)
{
return (v & 0x1U) << 5U;
}
static inline u32 minion_falcon_irqmset_exterr_m(void)
{
return 0x1U << 5U;
}
static inline u32 minion_falcon_irqmset_exterr_v(u32 r)
{
return (r >> 5U) & 0x1U;
}
static inline u32 minion_falcon_irqmset_exterr_set_v(void)
{
return 0x00000001U;
}
static inline u32 minion_falcon_irqmset_exterr_set_f(void)
{
return 0x20U;
}
static inline u32 minion_falcon_irqmset_swgen0_f(u32 v)
{
return (v & 0x1U) << 6U;
}
static inline u32 minion_falcon_irqmset_swgen0_m(void)
{
return 0x1U << 6U;
}
static inline u32 minion_falcon_irqmset_swgen0_v(u32 r)
{
return (r >> 6U) & 0x1U;
}
static inline u32 minion_falcon_irqmset_swgen0_set_v(void)
{
return 0x00000001U;
}
static inline u32 minion_falcon_irqmset_swgen0_set_f(void)
{
return 0x40U;
}
static inline u32 minion_falcon_irqmset_swgen1_f(u32 v)
{
return (v & 0x1U) << 7U;
}
static inline u32 minion_falcon_irqmset_swgen1_m(void)
{
return 0x1U << 7U;
}
static inline u32 minion_falcon_irqmset_swgen1_v(u32 r)
{
return (r >> 7U) & 0x1U;
}
static inline u32 minion_falcon_irqmset_swgen1_set_v(void)
{
return 0x00000001U;
}
static inline u32 minion_falcon_irqmset_swgen1_set_f(void)
{
return 0x80U;
}
static inline u32 minion_falcon_irqdest_r(void)
{
return 0x0000001cU;
}
static inline u32 minion_falcon_irqdest_host_wdtmr_f(u32 v)
{
return (v & 0x1U) << 1U;
}
static inline u32 minion_falcon_irqdest_host_wdtmr_m(void)
{
return 0x1U << 1U;
}
static inline u32 minion_falcon_irqdest_host_wdtmr_v(u32 r)
{
return (r >> 1U) & 0x1U;
}
static inline u32 minion_falcon_irqdest_host_wdtmr_host_v(void)
{
return 0x00000001U;
}
static inline u32 minion_falcon_irqdest_host_wdtmr_host_f(void)
{
return 0x2U;
}
static inline u32 minion_falcon_irqdest_host_halt_f(u32 v)
{
return (v & 0x1U) << 4U;
}
static inline u32 minion_falcon_irqdest_host_halt_m(void)
{
return 0x1U << 4U;
}
static inline u32 minion_falcon_irqdest_host_halt_v(u32 r)
{
return (r >> 4U) & 0x1U;
}
static inline u32 minion_falcon_irqdest_host_halt_host_v(void)
{
return 0x00000001U;
}
static inline u32 minion_falcon_irqdest_host_halt_host_f(void)
{
return 0x10U;
}
static inline u32 minion_falcon_irqdest_host_exterr_f(u32 v)
{
return (v & 0x1U) << 5U;
}
static inline u32 minion_falcon_irqdest_host_exterr_m(void)
{
return 0x1U << 5U;
}
static inline u32 minion_falcon_irqdest_host_exterr_v(u32 r)
{
return (r >> 5U) & 0x1U;
}
static inline u32 minion_falcon_irqdest_host_exterr_host_v(void)
{
return 0x00000001U;
}
static inline u32 minion_falcon_irqdest_host_exterr_host_f(void)
{
return 0x20U;
}
static inline u32 minion_falcon_irqdest_host_swgen0_f(u32 v)
{
return (v & 0x1U) << 6U;
}
static inline u32 minion_falcon_irqdest_host_swgen0_m(void)
{
return 0x1U << 6U;
}
static inline u32 minion_falcon_irqdest_host_swgen0_v(u32 r)
{
return (r >> 6U) & 0x1U;
}
static inline u32 minion_falcon_irqdest_host_swgen0_host_v(void)
{
return 0x00000001U;
}
static inline u32 minion_falcon_irqdest_host_swgen0_host_f(void)
{
return 0x40U;
}
static inline u32 minion_falcon_irqdest_host_swgen1_f(u32 v)
{
return (v & 0x1U) << 7U;
}
static inline u32 minion_falcon_irqdest_host_swgen1_m(void)
{
return 0x1U << 7U;
}
static inline u32 minion_falcon_irqdest_host_swgen1_v(u32 r)
{
return (r >> 7U) & 0x1U;
}
static inline u32 minion_falcon_irqdest_host_swgen1_host_v(void)
{
return 0x00000001U;
}
static inline u32 minion_falcon_irqdest_host_swgen1_host_f(void)
{
return 0x80U;
}
static inline u32 minion_falcon_irqdest_target_wdtmr_f(u32 v)
{
return (v & 0x1U) << 17U;
}
static inline u32 minion_falcon_irqdest_target_wdtmr_m(void)
{
return 0x1U << 17U;
}
static inline u32 minion_falcon_irqdest_target_wdtmr_v(u32 r)
{
return (r >> 17U) & 0x1U;
}
static inline u32 minion_falcon_irqdest_target_wdtmr_host_normal_v(void)
{
return 0x00000000U;
}
static inline u32 minion_falcon_irqdest_target_wdtmr_host_normal_f(void)
{
return 0x0U;
}
static inline u32 minion_falcon_irqdest_target_halt_f(u32 v)
{
return (v & 0x1U) << 20U;
}
static inline u32 minion_falcon_irqdest_target_halt_m(void)
{
return 0x1U << 20U;
}
static inline u32 minion_falcon_irqdest_target_halt_v(u32 r)
{
return (r >> 20U) & 0x1U;
}
static inline u32 minion_falcon_irqdest_target_halt_host_normal_v(void)
{
return 0x00000000U;
}
static inline u32 minion_falcon_irqdest_target_halt_host_normal_f(void)
{
return 0x0U;
}
static inline u32 minion_falcon_irqdest_target_exterr_f(u32 v)
{
return (v & 0x1U) << 21U;
}
static inline u32 minion_falcon_irqdest_target_exterr_m(void)
{
return 0x1U << 21U;
}
static inline u32 minion_falcon_irqdest_target_exterr_v(u32 r)
{
return (r >> 21U) & 0x1U;
}
static inline u32 minion_falcon_irqdest_target_exterr_host_normal_v(void)
{
return 0x00000000U;
}
static inline u32 minion_falcon_irqdest_target_exterr_host_normal_f(void)
{
return 0x0U;
}
static inline u32 minion_falcon_irqdest_target_swgen0_f(u32 v)
{
return (v & 0x1U) << 22U;
}
static inline u32 minion_falcon_irqdest_target_swgen0_m(void)
{
return 0x1U << 22U;
}
static inline u32 minion_falcon_irqdest_target_swgen0_v(u32 r)
{
return (r >> 22U) & 0x1U;
}
static inline u32 minion_falcon_irqdest_target_swgen0_host_normal_v(void)
{
return 0x00000000U;
}
static inline u32 minion_falcon_irqdest_target_swgen0_host_normal_f(void)
{
return 0x0U;
}
static inline u32 minion_falcon_irqdest_target_swgen1_f(u32 v)
{
return (v & 0x1U) << 23U;
}
static inline u32 minion_falcon_irqdest_target_swgen1_m(void)
{
return 0x1U << 23U;
}
static inline u32 minion_falcon_irqdest_target_swgen1_v(u32 r)
{
return (r >> 23U) & 0x1U;
}
static inline u32 minion_falcon_irqdest_target_swgen1_host_normal_v(void)
{
return 0x00000000U;
}
static inline u32 minion_falcon_irqdest_target_swgen1_host_normal_f(void)
{
return 0x0U;
}
static inline u32 minion_falcon_os_r(void)
{
return 0x00000080U;
}
static inline u32 minion_falcon_mailbox1_r(void)
{
return 0x00000044U;
}
static inline u32 minion_minion_intr_r(void)
{
return 0x00000810U;
}
static inline u32 minion_minion_intr_fatal_f(u32 v)
{
return (v & 0x1U) << 0U;
}
static inline u32 minion_minion_intr_fatal_m(void)
{
return 0x1U << 0U;
}
static inline u32 minion_minion_intr_fatal_v(u32 r)
{
return (r >> 0U) & 0x1U;
}
static inline u32 minion_minion_intr_nonfatal_f(u32 v)
{
return (v & 0x1U) << 1U;
}
static inline u32 minion_minion_intr_nonfatal_m(void)
{
return 0x1U << 1U;
}
static inline u32 minion_minion_intr_nonfatal_v(u32 r)
{
return (r >> 1U) & 0x1U;
}
static inline u32 minion_minion_intr_falcon_stall_f(u32 v)
{
return (v & 0x1U) << 2U;
}
static inline u32 minion_minion_intr_falcon_stall_m(void)
{
return 0x1U << 2U;
}
static inline u32 minion_minion_intr_falcon_stall_v(u32 r)
{
return (r >> 2U) & 0x1U;
}
static inline u32 minion_minion_intr_falcon_nostall_f(u32 v)
{
return (v & 0x1U) << 3U;
}
static inline u32 minion_minion_intr_falcon_nostall_m(void)
{
return 0x1U << 3U;
}
static inline u32 minion_minion_intr_falcon_nostall_v(u32 r)
{
return (r >> 3U) & 0x1U;
}
static inline u32 minion_minion_intr_link_f(u32 v)
{
return (v & 0xffffU) << 16U;
}
static inline u32 minion_minion_intr_link_m(void)
{
return 0xffffU << 16U;
}
static inline u32 minion_minion_intr_link_v(u32 r)
{
return (r >> 16U) & 0xffffU;
}
static inline u32 minion_minion_intr_nonstall_en_r(void)
{
return 0x0000081cU;
}
static inline u32 minion_minion_intr_stall_en_r(void)
{
return 0x00000818U;
}
static inline u32 minion_minion_intr_stall_en_fatal_f(u32 v)
{
return (v & 0x1U) << 0U;
}
static inline u32 minion_minion_intr_stall_en_fatal_m(void)
{
return 0x1U << 0U;
}
static inline u32 minion_minion_intr_stall_en_fatal_v(u32 r)
{
return (r >> 0U) & 0x1U;
}
static inline u32 minion_minion_intr_stall_en_fatal_enable_v(void)
{
return 0x00000001U;
}
static inline u32 minion_minion_intr_stall_en_fatal_enable_f(void)
{
return 0x1U;
}
static inline u32 minion_minion_intr_stall_en_fatal_disable_v(void)
{
return 0x00000000U;
}
static inline u32 minion_minion_intr_stall_en_fatal_disable_f(void)
{
return 0x0U;
}
static inline u32 minion_minion_intr_stall_en_nonfatal_f(u32 v)
{
return (v & 0x1U) << 1U;
}
static inline u32 minion_minion_intr_stall_en_nonfatal_m(void)
{
return 0x1U << 1U;
}
static inline u32 minion_minion_intr_stall_en_nonfatal_v(u32 r)
{
return (r >> 1U) & 0x1U;
}
static inline u32 minion_minion_intr_stall_en_nonfatal_enable_v(void)
{
return 0x00000001U;
}
static inline u32 minion_minion_intr_stall_en_nonfatal_enable_f(void)
{
return 0x2U;
}
static inline u32 minion_minion_intr_stall_en_nonfatal_disable_v(void)
{
return 0x00000000U;
}
static inline u32 minion_minion_intr_stall_en_nonfatal_disable_f(void)
{
return 0x0U;
}
static inline u32 minion_minion_intr_stall_en_falcon_stall_f(u32 v)
{
return (v & 0x1U) << 2U;
}
static inline u32 minion_minion_intr_stall_en_falcon_stall_m(void)
{
return 0x1U << 2U;
}
static inline u32 minion_minion_intr_stall_en_falcon_stall_v(u32 r)
{
return (r >> 2U) & 0x1U;
}
static inline u32 minion_minion_intr_stall_en_falcon_stall_enable_v(void)
{
return 0x00000001U;
}
static inline u32 minion_minion_intr_stall_en_falcon_stall_enable_f(void)
{
return 0x4U;
}
static inline u32 minion_minion_intr_stall_en_falcon_stall_disable_v(void)
{
return 0x00000000U;
}
static inline u32 minion_minion_intr_stall_en_falcon_stall_disable_f(void)
{
return 0x0U;
}
static inline u32 minion_minion_intr_stall_en_falcon_nostall_f(u32 v)
{
return (v & 0x1U) << 3U;
}
static inline u32 minion_minion_intr_stall_en_falcon_nostall_m(void)
{
return 0x1U << 3U;
}
static inline u32 minion_minion_intr_stall_en_falcon_nostall_v(u32 r)
{
return (r >> 3U) & 0x1U;
}
static inline u32 minion_minion_intr_stall_en_falcon_nostall_enable_v(void)
{
return 0x00000001U;
}
static inline u32 minion_minion_intr_stall_en_falcon_nostall_enable_f(void)
{
return 0x8U;
}
static inline u32 minion_minion_intr_stall_en_falcon_nostall_disable_v(void)
{
return 0x00000000U;
}
static inline u32 minion_minion_intr_stall_en_falcon_nostall_disable_f(void)
{
return 0x0U;
}
static inline u32 minion_minion_intr_stall_en_link_f(u32 v)
{
return (v & 0xffffU) << 16U;
}
static inline u32 minion_minion_intr_stall_en_link_m(void)
{
return 0xffffU << 16U;
}
static inline u32 minion_minion_intr_stall_en_link_v(u32 r)
{
return (r >> 16U) & 0xffffU;
}
static inline u32 minion_nvlink_dl_cmd_r(u32 i)
{
return 0x00000900U + i*4U;
}
static inline u32 minion_nvlink_dl_cmd___size_1_v(void)
{
return 0x00000002U;
}
static inline u32 minion_nvlink_dl_cmd_command_f(u32 v)
{
return (v & 0xffU) << 0U;
}
static inline u32 minion_nvlink_dl_cmd_command_v(u32 r)
{
return (r >> 0U) & 0xffU;
}
static inline u32 minion_nvlink_dl_cmd_command_configeom_v(void)
{
return 0x00000040U;
}
static inline u32 minion_nvlink_dl_cmd_command_configeom_f(void)
{
return 0x40U;
}
static inline u32 minion_nvlink_dl_cmd_command_nop_v(void)
{
return 0x00000000U;
}
static inline u32 minion_nvlink_dl_cmd_command_nop_f(void)
{
return 0x0U;
}
static inline u32 minion_nvlink_dl_cmd_command_initphy_v(void)
{
return 0x00000001U;
}
static inline u32 minion_nvlink_dl_cmd_command_initphy_f(void)
{
return 0x1U;
}
static inline u32 minion_nvlink_dl_cmd_command_initlaneenable_v(void)
{
return 0x00000003U;
}
static inline u32 minion_nvlink_dl_cmd_command_initlaneenable_f(void)
{
return 0x3U;
}
static inline u32 minion_nvlink_dl_cmd_command_initdlpl_v(void)
{
return 0x00000004U;
}
static inline u32 minion_nvlink_dl_cmd_command_initdlpl_f(void)
{
return 0x4U;
}
static inline u32 minion_nvlink_dl_cmd_command_lanedisable_v(void)
{
return 0x00000008U;
}
static inline u32 minion_nvlink_dl_cmd_command_lanedisable_f(void)
{
return 0x8U;
}
static inline u32 minion_nvlink_dl_cmd_command_fastlanedisable_v(void)
{
return 0x00000009U;
}
static inline u32 minion_nvlink_dl_cmd_command_fastlanedisable_f(void)
{
return 0x9U;
}
static inline u32 minion_nvlink_dl_cmd_command_laneshutdown_v(void)
{
return 0x0000000cU;
}
static inline u32 minion_nvlink_dl_cmd_command_laneshutdown_f(void)
{
return 0xcU;
}
static inline u32 minion_nvlink_dl_cmd_command_setacmode_v(void)
{
return 0x0000000aU;
}
static inline u32 minion_nvlink_dl_cmd_command_setacmode_f(void)
{
return 0xaU;
}
static inline u32 minion_nvlink_dl_cmd_command_clracmode_v(void)
{
return 0x0000000bU;
}
static inline u32 minion_nvlink_dl_cmd_command_clracmode_f(void)
{
return 0xbU;
}
static inline u32 minion_nvlink_dl_cmd_command_enablepm_v(void)
{
return 0x00000010U;
}
static inline u32 minion_nvlink_dl_cmd_command_enablepm_f(void)
{
return 0x10U;
}
static inline u32 minion_nvlink_dl_cmd_command_disablepm_v(void)
{
return 0x00000011U;
}
static inline u32 minion_nvlink_dl_cmd_command_disablepm_f(void)
{
return 0x11U;
}
static inline u32 minion_nvlink_dl_cmd_command_savestate_v(void)
{
return 0x00000018U;
}
static inline u32 minion_nvlink_dl_cmd_command_savestate_f(void)
{
return 0x18U;
}
static inline u32 minion_nvlink_dl_cmd_command_restorestate_v(void)
{
return 0x00000019U;
}
static inline u32 minion_nvlink_dl_cmd_command_restorestate_f(void)
{
return 0x19U;
}
static inline u32 minion_nvlink_dl_cmd_command_initpll_0_v(void)
{
return 0x00000020U;
}
static inline u32 minion_nvlink_dl_cmd_command_initpll_0_f(void)
{
return 0x20U;
}
static inline u32 minion_nvlink_dl_cmd_command_initpll_1_v(void)
{
return 0x00000021U;
}
static inline u32 minion_nvlink_dl_cmd_command_initpll_1_f(void)
{
return 0x21U;
}
static inline u32 minion_nvlink_dl_cmd_command_initpll_2_v(void)
{
return 0x00000022U;
}
static inline u32 minion_nvlink_dl_cmd_command_initpll_2_f(void)
{
return 0x22U;
}
static inline u32 minion_nvlink_dl_cmd_command_initpll_3_v(void)
{
return 0x00000023U;
}
static inline u32 minion_nvlink_dl_cmd_command_initpll_3_f(void)
{
return 0x23U;
}
static inline u32 minion_nvlink_dl_cmd_command_initpll_4_v(void)
{
return 0x00000024U;
}
static inline u32 minion_nvlink_dl_cmd_command_initpll_4_f(void)
{
return 0x24U;
}
static inline u32 minion_nvlink_dl_cmd_command_initpll_5_v(void)
{
return 0x00000025U;
}
static inline u32 minion_nvlink_dl_cmd_command_initpll_5_f(void)
{
return 0x25U;
}
static inline u32 minion_nvlink_dl_cmd_command_initpll_6_v(void)
{
return 0x00000026U;
}
static inline u32 minion_nvlink_dl_cmd_command_initpll_6_f(void)
{
return 0x26U;
}
static inline u32 minion_nvlink_dl_cmd_command_initpll_7_v(void)
{
return 0x00000027U;
}
static inline u32 minion_nvlink_dl_cmd_command_initpll_7_f(void)
{
return 0x27U;
}
static inline u32 minion_nvlink_dl_cmd_command_turing_rxdet_v(void)
{
return 0x00000058U;
}
static inline u32 minion_nvlink_dl_cmd_command_txclkswitch_pll_v(void)
{
return 0x00000014U;
}
static inline u32 minion_nvlink_dl_cmd_command_turing_initdlpl_to_chipa_v(void)
{
return 0x00000060U;
}
static inline u32 minion_nvlink_dl_cmd_command_inittl_v(void)
{
return 0x00000006U;
}
static inline u32 minion_nvlink_dl_cmd_fault_f(u32 v)
{
return (v & 0x1U) << 30U;
}
static inline u32 minion_nvlink_dl_cmd_fault_v(u32 r)
{
return (r >> 30U) & 0x1U;
}
static inline u32 minion_nvlink_dl_cmd_ready_f(u32 v)
{
return (v & 0x1U) << 31U;
}
static inline u32 minion_nvlink_dl_cmd_ready_v(u32 r)
{
return (r >> 31U) & 0x1U;
}
static inline u32 minion_misc_0_r(void)
{
return 0x000008b0U;
}
static inline u32 minion_misc_0_scratch_swrw_0_f(u32 v)
{
return (v & 0xffffffffU) << 0U;
}
static inline u32 minion_misc_0_scratch_swrw_0_v(u32 r)
{
return (r >> 0U) & 0xffffffffU;
}
static inline u32 minion_nvlink_link_intr_r(u32 i)
{
return 0x00000a00U + i*4U;
}
static inline u32 minion_nvlink_link_intr___size_1_v(void)
{
return 0x00000002U;
}
static inline u32 minion_nvlink_link_intr_code_f(u32 v)
{
return (v & 0xffU) << 0U;
}
static inline u32 minion_nvlink_link_intr_code_m(void)
{
return 0xffU << 0U;
}
static inline u32 minion_nvlink_link_intr_code_v(u32 r)
{
return (r >> 0U) & 0xffU;
}
static inline u32 minion_nvlink_link_intr_code_na_v(void)
{
return 0x00000000U;
}
static inline u32 minion_nvlink_link_intr_code_na_f(void)
{
return 0x0U;
}
static inline u32 minion_nvlink_link_intr_code_swreq_v(void)
{
return 0x00000001U;
}
static inline u32 minion_nvlink_link_intr_code_swreq_f(void)
{
return 0x1U;
}
static inline u32 minion_nvlink_link_intr_code_dlreq_v(void)
{
return 0x00000002U;
}
static inline u32 minion_nvlink_link_intr_code_dlreq_f(void)
{
return 0x2U;
}
static inline u32 minion_nvlink_link_intr_subcode_f(u32 v)
{
return (v & 0xffU) << 8U;
}
static inline u32 minion_nvlink_link_intr_subcode_m(void)
{
return 0xffU << 8U;
}
static inline u32 minion_nvlink_link_intr_subcode_v(u32 r)
{
return (r >> 8U) & 0xffU;
}
static inline u32 minion_nvlink_link_intr_state_f(u32 v)
{
return (v & 0x1U) << 31U;
}
static inline u32 minion_nvlink_link_intr_state_m(void)
{
return 0x1U << 31U;
}
static inline u32 minion_nvlink_link_intr_state_v(u32 r)
{
return (r >> 31U) & 0x1U;
}
#endif

View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,59 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* Function naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
*
* <x>_o(void) : Returns the offset for element <x>.
*
* <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
*
* <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
*
* <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
* and masked to place it at field <y> of register <x>. This value
* can be |'d with others to produce a full register value for
* register <x>.
*
* <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
* value can be ~'d and then &'d to clear the value of field <y> for
* register <x>.
*
* <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
* to place it at field <y> of register <x>. This value can be |'d
* with others to produce a full register value for <x>.
*
* <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
* <x> value 'r' after being shifted to place its LSB at bit 0.
* This value is suitable for direct comparison with other unshifted
* values appropriate for use in field <y> of register <x>.
*
* <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
* field <y> of register <x>. This value is suitable for direct
* comparison with unshifted values appropriate for use in field <y>
* of register <x>.
*/
#ifndef _hw_nvlinkip_discovery_tu104_h_
#define _hw_nvlinkip_discovery_tu104_h_
#endif

View File

@@ -0,0 +1,279 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* Function naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
*
* <x>_o(void) : Returns the offset for element <x>.
*
* <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
*
* <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
*
* <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
* and masked to place it at field <y> of register <x>. This value
* can be |'d with others to produce a full register value for
* register <x>.
*
* <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
* value can be ~'d and then &'d to clear the value of field <y> for
* register <x>.
*
* <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
* to place it at field <y> of register <x>. This value can be |'d
* with others to produce a full register value for <x>.
*
* <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
* <x> value 'r' after being shifted to place its LSB at bit 0.
* This value is suitable for direct comparison with other unshifted
* values appropriate for use in field <y> of register <x>.
*
* <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
* field <y> of register <x>. This value is suitable for direct
* comparison with unshifted values appropriate for use in field <y>
* of register <x>.
*/
#ifndef _hw_nvlipt_tu104_h_
#define _hw_nvlipt_tu104_h_
static inline u32 nvlipt_intr_control_link0_r(void)
{
return 0x000004b4U;
}
static inline u32 nvlipt_intr_control_link0_stallenable_f(u32 v)
{
return (v & 0x1U) << 0U;
}
static inline u32 nvlipt_intr_control_link0_stallenable_m(void)
{
return 0x1U << 0U;
}
static inline u32 nvlipt_intr_control_link0_stallenable_v(u32 r)
{
return (r >> 0U) & 0x1U;
}
static inline u32 nvlipt_intr_control_link0_nostallenable_f(u32 v)
{
return (v & 0x1U) << 1U;
}
static inline u32 nvlipt_intr_control_link0_nostallenable_m(void)
{
return 0x1U << 1U;
}
static inline u32 nvlipt_intr_control_link0_nostallenable_v(u32 r)
{
return (r >> 1U) & 0x1U;
}
static inline u32 nvlipt_err_uc_status_link0_r(void)
{
return 0x00000524U;
}
static inline u32 nvlipt_err_uc_status_link0_dlprotocol_f(u32 v)
{
return (v & 0x1U) << 4U;
}
static inline u32 nvlipt_err_uc_status_link0_dlprotocol_v(u32 r)
{
return (r >> 4U) & 0x1U;
}
static inline u32 nvlipt_err_uc_status_link0_datapoisoned_f(u32 v)
{
return (v & 0x1U) << 12U;
}
static inline u32 nvlipt_err_uc_status_link0_datapoisoned_v(u32 r)
{
return (r >> 12U) & 0x1U;
}
static inline u32 nvlipt_err_uc_status_link0_flowcontrol_f(u32 v)
{
return (v & 0x1U) << 13U;
}
static inline u32 nvlipt_err_uc_status_link0_flowcontrol_v(u32 r)
{
return (r >> 13U) & 0x1U;
}
static inline u32 nvlipt_err_uc_status_link0_responsetimeout_f(u32 v)
{
return (v & 0x1U) << 14U;
}
static inline u32 nvlipt_err_uc_status_link0_responsetimeout_v(u32 r)
{
return (r >> 14U) & 0x1U;
}
static inline u32 nvlipt_err_uc_status_link0_targeterror_f(u32 v)
{
return (v & 0x1U) << 15U;
}
static inline u32 nvlipt_err_uc_status_link0_targeterror_v(u32 r)
{
return (r >> 15U) & 0x1U;
}
static inline u32 nvlipt_err_uc_status_link0_unexpectedresponse_f(u32 v)
{
return (v & 0x1U) << 16U;
}
static inline u32 nvlipt_err_uc_status_link0_unexpectedresponse_v(u32 r)
{
return (r >> 16U) & 0x1U;
}
static inline u32 nvlipt_err_uc_status_link0_receiveroverflow_f(u32 v)
{
return (v & 0x1U) << 17U;
}
static inline u32 nvlipt_err_uc_status_link0_receiveroverflow_v(u32 r)
{
return (r >> 17U) & 0x1U;
}
static inline u32 nvlipt_err_uc_status_link0_malformedpacket_f(u32 v)
{
return (v & 0x1U) << 18U;
}
static inline u32 nvlipt_err_uc_status_link0_malformedpacket_v(u32 r)
{
return (r >> 18U) & 0x1U;
}
static inline u32 nvlipt_err_uc_status_link0_stompedpacketreceived_f(u32 v)
{
return (v & 0x1U) << 19U;
}
static inline u32 nvlipt_err_uc_status_link0_stompedpacketreceived_v(u32 r)
{
return (r >> 19U) & 0x1U;
}
static inline u32 nvlipt_err_uc_status_link0_unsupportedrequest_f(u32 v)
{
return (v & 0x1U) << 20U;
}
static inline u32 nvlipt_err_uc_status_link0_unsupportedrequest_v(u32 r)
{
return (r >> 20U) & 0x1U;
}
static inline u32 nvlipt_err_uc_status_link0_ucinternal_f(u32 v)
{
return (v & 0x1U) << 22U;
}
static inline u32 nvlipt_err_uc_status_link0_ucinternal_v(u32 r)
{
return (r >> 22U) & 0x1U;
}
static inline u32 nvlipt_err_uc_mask_link0_r(void)
{
return 0x00000528U;
}
static inline u32 nvlipt_err_uc_severity_link0_r(void)
{
return 0x0000052cU;
}
static inline u32 nvlipt_err_uc_first_link0_r(void)
{
return 0x00000530U;
}
static inline u32 nvlipt_err_uc_advisory_link0_r(void)
{
return 0x00000534U;
}
static inline u32 nvlipt_err_c_status_link0_r(void)
{
return 0x00000538U;
}
static inline u32 nvlipt_err_c_mask_link0_r(void)
{
return 0x0000053cU;
}
static inline u32 nvlipt_err_c_first_link0_r(void)
{
return 0x00000540U;
}
static inline u32 nvlipt_err_control_link0_r(void)
{
return 0x00000544U;
}
static inline u32 nvlipt_err_control_link0_fatalenable_f(u32 v)
{
return (v & 0x1U) << 1U;
}
static inline u32 nvlipt_err_control_link0_fatalenable_m(void)
{
return 0x1U << 1U;
}
static inline u32 nvlipt_err_control_link0_fatalenable_v(u32 r)
{
return (r >> 1U) & 0x1U;
}
static inline u32 nvlipt_err_control_link0_nonfatalenable_f(u32 v)
{
return (v & 0x1U) << 2U;
}
static inline u32 nvlipt_err_control_link0_nonfatalenable_m(void)
{
return 0x1U << 2U;
}
static inline u32 nvlipt_err_control_link0_nonfatalenable_v(u32 r)
{
return (r >> 2U) & 0x1U;
}
static inline u32 nvlipt_intr_control_common_r(void)
{
return 0x000004b0U;
}
static inline u32 nvlipt_intr_control_common_stallenable_f(u32 v)
{
return (v & 0x1U) << 0U;
}
static inline u32 nvlipt_intr_control_common_stallenable_m(void)
{
return 0x1U << 0U;
}
static inline u32 nvlipt_intr_control_common_stallenable_v(u32 r)
{
return (r >> 0U) & 0x1U;
}
static inline u32 nvlipt_intr_control_common_nonstallenable_f(u32 v)
{
return (v & 0x1U) << 1U;
}
static inline u32 nvlipt_intr_control_common_nonstallenable_m(void)
{
return 0x1U << 1U;
}
static inline u32 nvlipt_intr_control_common_nonstallenable_v(u32 r)
{
return (r >> 1U) & 0x1U;
}
static inline u32 nvlipt_scratch_cold_r(void)
{
return 0x000007d4U;
}
static inline u32 nvlipt_scratch_cold_data_f(u32 v)
{
return (v & 0xffffffffU) << 0U;
}
static inline u32 nvlipt_scratch_cold_data_v(u32 r)
{
return (r >> 0U) & 0xffffffffU;
}
static inline u32 nvlipt_scratch_cold_data_init_v(void)
{
return 0xdeadbaadU;
}
#endif

View File

@@ -0,0 +1,95 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* Function naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
*
* <x>_o(void) : Returns the offset for element <x>.
*
* <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
*
* <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
*
* <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
* and masked to place it at field <y> of register <x>. This value
* can be |'d with others to produce a full register value for
* register <x>.
*
* <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
* value can be ~'d and then &'d to clear the value of field <y> for
* register <x>.
*
* <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
* to place it at field <y> of register <x>. This value can be |'d
* with others to produce a full register value for <x>.
*
* <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
* <x> value 'r' after being shifted to place its LSB at bit 0.
* This value is suitable for direct comparison with other unshifted
* values appropriate for use in field <y> of register <x>.
*
* <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
* field <y> of register <x>. This value is suitable for direct
* comparison with unshifted values appropriate for use in field <y>
* of register <x>.
*/
#ifndef _hw_nvtlc_tu104_h_
#define _hw_nvtlc_tu104_h_
static inline u32 nvtlc_tx_err_report_en_0_r(void)
{
return 0x00000708U;
}
static inline u32 nvtlc_rx_err_report_en_0_r(void)
{
return 0x00000f08U;
}
static inline u32 nvtlc_rx_err_report_en_1_r(void)
{
return 0x00000f20U;
}
static inline u32 nvtlc_tx_err_status_0_r(void)
{
return 0x00000700U;
}
static inline u32 nvtlc_rx_err_status_0_r(void)
{
return 0x00000f00U;
}
static inline u32 nvtlc_rx_err_status_1_r(void)
{
return 0x00000f18U;
}
static inline u32 nvtlc_tx_err_first_0_r(void)
{
return 0x00000714U;
}
static inline u32 nvtlc_rx_err_first_0_r(void)
{
return 0x00000f14U;
}
static inline u32 nvtlc_rx_err_first_1_r(void)
{
return 0x00000f2cU;
}
#endif

View File

@@ -0,0 +1,619 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* Function naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
*
* <x>_o(void) : Returns the offset for element <x>.
*
* <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
*
* <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
*
* <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
* and masked to place it at field <y> of register <x>. This value
* can be |'d with others to produce a full register value for
* register <x>.
*
* <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
* value can be ~'d and then &'d to clear the value of field <y> for
* register <x>.
*
* <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
* to place it at field <y> of register <x>. This value can be |'d
* with others to produce a full register value for <x>.
*
* <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
* <x> value 'r' after being shifted to place its LSB at bit 0.
* This value is suitable for direct comparison with other unshifted
* values appropriate for use in field <y> of register <x>.
*
* <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
* field <y> of register <x>. This value is suitable for direct
* comparison with unshifted values appropriate for use in field <y>
* of register <x>.
*/
#ifndef _hw_pbdma_tu104_h_
#define _hw_pbdma_tu104_h_
static inline u32 pbdma_gp_entry1_r(void)
{
return 0x10000004U;
}
static inline u32 pbdma_gp_entry1_get_hi_v(u32 r)
{
return (r >> 0U) & 0xffU;
}
static inline u32 pbdma_gp_entry1_length_f(u32 v)
{
return (v & 0x1fffffU) << 10U;
}
static inline u32 pbdma_gp_entry1_length_v(u32 r)
{
return (r >> 10U) & 0x1fffffU;
}
static inline u32 pbdma_gp_base_r(u32 i)
{
return 0x00040048U + i*8192U;
}
static inline u32 pbdma_gp_base__size_1_v(void)
{
return 0x0000000cU;
}
static inline u32 pbdma_gp_base_offset_f(u32 v)
{
return (v & 0x1fffffffU) << 3U;
}
static inline u32 pbdma_gp_base_rsvd_s(void)
{
return 3U;
}
static inline u32 pbdma_gp_base_hi_r(u32 i)
{
return 0x0004004cU + i*8192U;
}
static inline u32 pbdma_gp_base_hi_offset_f(u32 v)
{
return (v & 0xffU) << 0U;
}
static inline u32 pbdma_gp_base_hi_limit2_f(u32 v)
{
return (v & 0x1fU) << 16U;
}
static inline u32 pbdma_gp_fetch_r(u32 i)
{
return 0x00040050U + i*8192U;
}
static inline u32 pbdma_gp_get_r(u32 i)
{
return 0x00040014U + i*8192U;
}
static inline u32 pbdma_gp_put_r(u32 i)
{
return 0x00040000U + i*8192U;
}
static inline u32 pbdma_pb_fetch_r(u32 i)
{
return 0x00040054U + i*8192U;
}
static inline u32 pbdma_pb_fetch_hi_r(u32 i)
{
return 0x00040058U + i*8192U;
}
static inline u32 pbdma_get_r(u32 i)
{
return 0x00040018U + i*8192U;
}
static inline u32 pbdma_get_hi_r(u32 i)
{
return 0x0004001cU + i*8192U;
}
static inline u32 pbdma_put_r(u32 i)
{
return 0x0004005cU + i*8192U;
}
static inline u32 pbdma_put_hi_r(u32 i)
{
return 0x00040060U + i*8192U;
}
static inline u32 pbdma_pb_header_r(u32 i)
{
return 0x00040084U + i*8192U;
}
static inline u32 pbdma_pb_header_method_zero_f(void)
{
return 0x0U;
}
static inline u32 pbdma_pb_header_subchannel_zero_f(void)
{
return 0x0U;
}
static inline u32 pbdma_pb_header_level_main_f(void)
{
return 0x0U;
}
static inline u32 pbdma_pb_header_first_true_f(void)
{
return 0x400000U;
}
static inline u32 pbdma_pb_header_type_inc_f(void)
{
return 0x20000000U;
}
static inline u32 pbdma_pb_header_type_non_inc_f(void)
{
return 0x60000000U;
}
static inline u32 pbdma_hdr_shadow_r(u32 i)
{
return 0x00040118U + i*8192U;
}
static inline u32 pbdma_gp_shadow_0_r(u32 i)
{
return 0x00040110U + i*8192U;
}
static inline u32 pbdma_gp_shadow_1_r(u32 i)
{
return 0x00040114U + i*8192U;
}
static inline u32 pbdma_subdevice_r(u32 i)
{
return 0x00040094U + i*8192U;
}
static inline u32 pbdma_subdevice_id_f(u32 v)
{
return (v & 0xfffU) << 0U;
}
static inline u32 pbdma_subdevice_status_active_f(void)
{
return 0x10000000U;
}
static inline u32 pbdma_subdevice_channel_dma_enable_f(void)
{
return 0x20000000U;
}
static inline u32 pbdma_method0_r(u32 i)
{
return 0x000400c0U + i*8192U;
}
static inline u32 pbdma_method0_fifo_size_v(void)
{
return 0x00000004U;
}
static inline u32 pbdma_method0_addr_f(u32 v)
{
return (v & 0xfffU) << 2U;
}
static inline u32 pbdma_method0_addr_v(u32 r)
{
return (r >> 2U) & 0xfffU;
}
static inline u32 pbdma_method0_subch_v(u32 r)
{
return (r >> 16U) & 0x7U;
}
static inline u32 pbdma_method0_first_true_f(void)
{
return 0x400000U;
}
static inline u32 pbdma_method0_valid_true_f(void)
{
return 0x80000000U;
}
static inline u32 pbdma_method1_r(u32 i)
{
return 0x000400c8U + i*8192U;
}
static inline u32 pbdma_method2_r(u32 i)
{
return 0x000400d0U + i*8192U;
}
static inline u32 pbdma_method3_r(u32 i)
{
return 0x000400d8U + i*8192U;
}
static inline u32 pbdma_data0_r(u32 i)
{
return 0x000400c4U + i*8192U;
}
static inline u32 pbdma_acquire_r(u32 i)
{
return 0x00040030U + i*8192U;
}
static inline u32 pbdma_acquire_retry_man_2_f(void)
{
return 0x2U;
}
static inline u32 pbdma_acquire_retry_exp_2_f(void)
{
return 0x100U;
}
static inline u32 pbdma_acquire_timeout_exp_f(u32 v)
{
return (v & 0xfU) << 11U;
}
static inline u32 pbdma_acquire_timeout_exp_max_v(void)
{
return 0x0000000fU;
}
static inline u32 pbdma_acquire_timeout_exp_max_f(void)
{
return 0x7800U;
}
static inline u32 pbdma_acquire_timeout_man_f(u32 v)
{
return (v & 0xffffU) << 15U;
}
static inline u32 pbdma_acquire_timeout_man_max_v(void)
{
return 0x0000ffffU;
}
static inline u32 pbdma_acquire_timeout_man_max_f(void)
{
return 0x7fff8000U;
}
static inline u32 pbdma_acquire_timeout_en_enable_f(void)
{
return 0x80000000U;
}
static inline u32 pbdma_acquire_timeout_en_disable_f(void)
{
return 0x0U;
}
static inline u32 pbdma_status_r(u32 i)
{
return 0x00040100U + i*8192U;
}
static inline u32 pbdma_channel_r(u32 i)
{
return 0x00040120U + i*8192U;
}
static inline u32 pbdma_signature_r(u32 i)
{
return 0x00040010U + i*8192U;
}
static inline u32 pbdma_signature_hw_valid_f(void)
{
return 0xfaceU;
}
static inline u32 pbdma_signature_sw_zero_f(void)
{
return 0x0U;
}
static inline u32 pbdma_userd_r(u32 i)
{
return 0x00040008U + i*8192U;
}
static inline u32 pbdma_userd_target_vid_mem_f(void)
{
return 0x0U;
}
static inline u32 pbdma_userd_target_sys_mem_coh_f(void)
{
return 0x2U;
}
static inline u32 pbdma_userd_target_sys_mem_ncoh_f(void)
{
return 0x3U;
}
static inline u32 pbdma_userd_addr_f(u32 v)
{
return (v & 0x7fffffU) << 9U;
}
static inline u32 pbdma_config_r(u32 i)
{
return 0x000400f4U + i*8192U;
}
static inline u32 pbdma_config_l2_evict_first_f(void)
{
return 0x0U;
}
static inline u32 pbdma_config_l2_evict_normal_f(void)
{
return 0x1U;
}
static inline u32 pbdma_config_ce_split_enable_f(void)
{
return 0x0U;
}
static inline u32 pbdma_config_ce_split_disable_f(void)
{
return 0x10U;
}
static inline u32 pbdma_config_auth_level_non_privileged_f(void)
{
return 0x0U;
}
static inline u32 pbdma_config_auth_level_privileged_f(void)
{
return 0x100U;
}
static inline u32 pbdma_config_userd_writeback_disable_f(void)
{
return 0x0U;
}
static inline u32 pbdma_config_userd_writeback_enable_f(void)
{
return 0x1000U;
}
static inline u32 pbdma_userd_hi_r(u32 i)
{
return 0x0004000cU + i*8192U;
}
static inline u32 pbdma_userd_hi_addr_f(u32 v)
{
return (v & 0xffU) << 0U;
}
static inline u32 pbdma_hce_ctrl_r(u32 i)
{
return 0x000400e4U + i*8192U;
}
static inline u32 pbdma_hce_ctrl_hce_priv_mode_yes_f(void)
{
return 0x20U;
}
static inline u32 pbdma_intr_0_r(u32 i)
{
return 0x00040108U + i*8192U;
}
static inline u32 pbdma_intr_0_memreq_v(u32 r)
{
return (r >> 0U) & 0x1U;
}
static inline u32 pbdma_intr_0_memreq_pending_f(void)
{
return 0x1U;
}
static inline u32 pbdma_intr_0_memack_timeout_pending_f(void)
{
return 0x2U;
}
static inline u32 pbdma_intr_0_memack_extra_pending_f(void)
{
return 0x4U;
}
static inline u32 pbdma_intr_0_memdat_timeout_pending_f(void)
{
return 0x8U;
}
static inline u32 pbdma_intr_0_memdat_extra_pending_f(void)
{
return 0x10U;
}
static inline u32 pbdma_intr_0_memflush_pending_f(void)
{
return 0x20U;
}
static inline u32 pbdma_intr_0_memop_pending_f(void)
{
return 0x40U;
}
static inline u32 pbdma_intr_0_lbconnect_pending_f(void)
{
return 0x80U;
}
static inline u32 pbdma_intr_0_lback_timeout_pending_f(void)
{
return 0x200U;
}
static inline u32 pbdma_intr_0_lback_extra_pending_f(void)
{
return 0x400U;
}
static inline u32 pbdma_intr_0_lbdat_timeout_pending_f(void)
{
return 0x800U;
}
static inline u32 pbdma_intr_0_lbdat_extra_pending_f(void)
{
return 0x1000U;
}
static inline u32 pbdma_intr_0_gpfifo_pending_f(void)
{
return 0x2000U;
}
static inline u32 pbdma_intr_0_gpptr_pending_f(void)
{
return 0x4000U;
}
static inline u32 pbdma_intr_0_gpentry_pending_f(void)
{
return 0x8000U;
}
static inline u32 pbdma_intr_0_gpcrc_pending_f(void)
{
return 0x10000U;
}
static inline u32 pbdma_intr_0_pbptr_pending_f(void)
{
return 0x20000U;
}
static inline u32 pbdma_intr_0_pbentry_pending_f(void)
{
return 0x40000U;
}
static inline u32 pbdma_intr_0_pbcrc_pending_f(void)
{
return 0x80000U;
}
static inline u32 pbdma_intr_0_clear_faulted_error_pending_f(void)
{
return 0x100000U;
}
static inline u32 pbdma_intr_0_method_pending_f(void)
{
return 0x200000U;
}
static inline u32 pbdma_intr_0_methodcrc_pending_f(void)
{
return 0x400000U;
}
static inline u32 pbdma_intr_0_device_pending_f(void)
{
return 0x800000U;
}
static inline u32 pbdma_intr_0_eng_reset_pending_f(void)
{
return 0x1000000U;
}
static inline u32 pbdma_intr_0_semaphore_pending_f(void)
{
return 0x2000000U;
}
static inline u32 pbdma_intr_0_acquire_pending_f(void)
{
return 0x4000000U;
}
static inline u32 pbdma_intr_0_pri_pending_f(void)
{
return 0x8000000U;
}
static inline u32 pbdma_intr_0_no_ctxsw_seg_pending_f(void)
{
return 0x20000000U;
}
static inline u32 pbdma_intr_0_pbseg_pending_f(void)
{
return 0x40000000U;
}
static inline u32 pbdma_intr_0_signature_pending_f(void)
{
return 0x80000000U;
}
static inline u32 pbdma_intr_1_r(u32 i)
{
return 0x00040148U + i*8192U;
}
static inline u32 pbdma_intr_1_ctxnotvalid_m(void)
{
return 0x1U << 31U;
}
static inline u32 pbdma_intr_1_ctxnotvalid_pending_f(void)
{
return 0x80000000U;
}
static inline u32 pbdma_intr_en_0_r(u32 i)
{
return 0x0004010cU + i*8192U;
}
static inline u32 pbdma_intr_en_1_r(u32 i)
{
return 0x0004014cU + i*8192U;
}
static inline u32 pbdma_intr_stall_r(u32 i)
{
return 0x0004013cU + i*8192U;
}
static inline u32 pbdma_intr_stall_1_r(u32 i)
{
return 0x00040140U + i*8192U;
}
static inline u32 pbdma_intr_stall_1_hce_illegal_op_enabled_f(void)
{
return 0x1U;
}
static inline u32 pbdma_udma_nop_r(void)
{
return 0x00000008U;
}
static inline u32 pbdma_target_r(u32 i)
{
return 0x000400acU + i*8192U;
}
static inline u32 pbdma_target_engine_sw_f(void)
{
return 0x1fU;
}
static inline u32 pbdma_target_eng_ctx_valid_true_f(void)
{
return 0x10000U;
}
static inline u32 pbdma_target_eng_ctx_valid_false_f(void)
{
return 0x0U;
}
static inline u32 pbdma_target_ce_ctx_valid_true_f(void)
{
return 0x20000U;
}
static inline u32 pbdma_target_ce_ctx_valid_false_f(void)
{
return 0x0U;
}
static inline u32 pbdma_target_host_tsg_event_reason_pbdma_idle_f(void)
{
return 0x0U;
}
static inline u32 pbdma_target_host_tsg_event_reason_semaphore_acquire_failure_f(void)
{
return 0x1000000U;
}
static inline u32 pbdma_target_host_tsg_event_reason_tsg_yield_f(void)
{
return 0x2000000U;
}
static inline u32 pbdma_target_host_tsg_event_reason_host_subchannel_switch_f(void)
{
return 0x3000000U;
}
static inline u32 pbdma_target_should_send_tsg_event_true_f(void)
{
return 0x20000000U;
}
static inline u32 pbdma_target_should_send_tsg_event_false_f(void)
{
return 0x0U;
}
static inline u32 pbdma_target_needs_host_tsg_event_true_f(void)
{
return 0x80000000U;
}
static inline u32 pbdma_target_needs_host_tsg_event_false_f(void)
{
return 0x0U;
}
static inline u32 pbdma_set_channel_info_r(u32 i)
{
return 0x000400fcU + i*8192U;
}
static inline u32 pbdma_set_channel_info_veid_f(u32 v)
{
return (v & 0x3fU) << 8U;
}
static inline u32 pbdma_timeout_r(u32 i)
{
return 0x0004012cU + i*8192U;
}
static inline u32 pbdma_timeout_period_m(void)
{
return 0xffffffffU << 0U;
}
static inline u32 pbdma_timeout_period_max_f(void)
{
return 0xffffffffU;
}
static inline u32 pbdma_timeout_period_init_f(void)
{
return 0x10000U;
}
#endif

View File

@@ -0,0 +1,263 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* Function naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
*
* <x>_o(void) : Returns the offset for element <x>.
*
* <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
*
* <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
*
* <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
* and masked to place it at field <y> of register <x>. This value
* can be |'d with others to produce a full register value for
* register <x>.
*
* <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
* value can be ~'d and then &'d to clear the value of field <y> for
* register <x>.
*
* <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
* to place it at field <y> of register <x>. This value can be |'d
* with others to produce a full register value for <x>.
*
* <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
* <x> value 'r' after being shifted to place its LSB at bit 0.
* This value is suitable for direct comparison with other unshifted
* values appropriate for use in field <y> of register <x>.
*
* <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
* field <y> of register <x>. This value is suitable for direct
* comparison with unshifted values appropriate for use in field <y>
* of register <x>.
*/
#ifndef _hw_perf_tu104_h_
#define _hw_perf_tu104_h_
static inline u32 perf_pmmgpc_perdomain_offset_v(void)
{
return 0x00000200U;
}
static inline u32 perf_pmmsys_perdomain_offset_v(void)
{
return 0x00000200U;
}
static inline u32 perf_pmmgpc_base_v(void)
{
return 0x00180000U;
}
static inline u32 perf_pmmgpc_extent_v(void)
{
return 0x00183fffU;
}
static inline u32 perf_pmmsys_base_v(void)
{
return 0x00240000U;
}
static inline u32 perf_pmmsys_extent_v(void)
{
return 0x00243fffU;
}
static inline u32 perf_pmmfbp_base_v(void)
{
return 0x00200000U;
}
static inline u32 perf_pmasys_control_r(void)
{
return 0x0024a000U;
}
static inline u32 perf_pmasys_control_membuf_status_v(u32 r)
{
return (r >> 4U) & 0x1U;
}
static inline u32 perf_pmasys_control_membuf_status_overflowed_v(void)
{
return 0x00000001U;
}
static inline u32 perf_pmasys_control_membuf_status_overflowed_f(void)
{
return 0x10U;
}
static inline u32 perf_pmasys_control_membuf_clear_status_f(u32 v)
{
return (v & 0x1U) << 5U;
}
static inline u32 perf_pmasys_control_membuf_clear_status_v(u32 r)
{
return (r >> 5U) & 0x1U;
}
static inline u32 perf_pmasys_control_membuf_clear_status_doit_v(void)
{
return 0x00000001U;
}
static inline u32 perf_pmasys_control_membuf_clear_status_doit_f(void)
{
return 0x20U;
}
static inline u32 perf_pmasys_mem_block_r(void)
{
return 0x0024a070U;
}
static inline u32 perf_pmasys_mem_block_base_f(u32 v)
{
return (v & 0xfffffffU) << 0U;
}
static inline u32 perf_pmasys_mem_block_target_f(u32 v)
{
return (v & 0x3U) << 28U;
}
static inline u32 perf_pmasys_mem_block_target_v(u32 r)
{
return (r >> 28U) & 0x3U;
}
static inline u32 perf_pmasys_mem_block_target_lfb_v(void)
{
return 0x00000000U;
}
static inline u32 perf_pmasys_mem_block_target_lfb_f(void)
{
return 0x0U;
}
static inline u32 perf_pmasys_mem_block_target_sys_coh_v(void)
{
return 0x00000002U;
}
static inline u32 perf_pmasys_mem_block_target_sys_coh_f(void)
{
return 0x20000000U;
}
static inline u32 perf_pmasys_mem_block_target_sys_ncoh_v(void)
{
return 0x00000003U;
}
static inline u32 perf_pmasys_mem_block_target_sys_ncoh_f(void)
{
return 0x30000000U;
}
static inline u32 perf_pmasys_mem_block_valid_f(u32 v)
{
return (v & 0x1U) << 31U;
}
static inline u32 perf_pmasys_mem_block_valid_v(u32 r)
{
return (r >> 31U) & 0x1U;
}
static inline u32 perf_pmasys_mem_block_valid_true_v(void)
{
return 0x00000001U;
}
static inline u32 perf_pmasys_mem_block_valid_true_f(void)
{
return 0x80000000U;
}
static inline u32 perf_pmasys_mem_block_valid_false_v(void)
{
return 0x00000000U;
}
static inline u32 perf_pmasys_mem_block_valid_false_f(void)
{
return 0x0U;
}
static inline u32 perf_pmasys_outbase_r(void)
{
return 0x0024a074U;
}
static inline u32 perf_pmasys_outbase_ptr_f(u32 v)
{
return (v & 0x7ffffffU) << 5U;
}
static inline u32 perf_pmasys_outbaseupper_r(void)
{
return 0x0024a078U;
}
static inline u32 perf_pmasys_outbaseupper_ptr_f(u32 v)
{
return (v & 0xffU) << 0U;
}
static inline u32 perf_pmasys_outsize_r(void)
{
return 0x0024a07cU;
}
static inline u32 perf_pmasys_outsize_numbytes_f(u32 v)
{
return (v & 0x7ffffffU) << 5U;
}
static inline u32 perf_pmasys_mem_bytes_r(void)
{
return 0x0024a084U;
}
static inline u32 perf_pmasys_mem_bytes_numbytes_f(u32 v)
{
return (v & 0xfffffffU) << 4U;
}
static inline u32 perf_pmasys_mem_bump_r(void)
{
return 0x0024a088U;
}
static inline u32 perf_pmasys_mem_bump_numbytes_f(u32 v)
{
return (v & 0xfffffffU) << 4U;
}
static inline u32 perf_pmasys_enginestatus_r(void)
{
return 0x0024a0a4U;
}
static inline u32 perf_pmasys_enginestatus_rbufempty_f(u32 v)
{
return (v & 0x1U) << 4U;
}
static inline u32 perf_pmasys_enginestatus_rbufempty_empty_v(void)
{
return 0x00000001U;
}
static inline u32 perf_pmasys_enginestatus_rbufempty_empty_f(void)
{
return 0x10U;
}
static inline u32 perf_pmmsys_engine_sel_r(u32 i)
{
return 0x0024006cU + i*512U;
}
static inline u32 perf_pmmsys_engine_sel__size_1_v(void)
{
return 0x00000020U;
}
static inline u32 perf_pmmfbp_engine_sel_r(u32 i)
{
return 0x0020006cU + i*512U;
}
static inline u32 perf_pmmfbp_engine_sel__size_1_v(void)
{
return 0x00000020U;
}
static inline u32 perf_pmmgpc_engine_sel_r(u32 i)
{
return 0x0018006cU + i*512U;
}
static inline u32 perf_pmmgpc_engine_sel__size_1_v(void)
{
return 0x00000020U;
}
#endif

View File

@@ -0,0 +1,63 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* Function naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
*
* <x>_o(void) : Returns the offset for element <x>.
*
* <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
*
* <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
*
* <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
* and masked to place it at field <y> of register <x>. This value
* can be |'d with others to produce a full register value for
* register <x>.
*
* <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
* value can be ~'d and then &'d to clear the value of field <y> for
* register <x>.
*
* <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
* to place it at field <y> of register <x>. This value can be |'d
* with others to produce a full register value for <x>.
*
* <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
* <x> value 'r' after being shifted to place its LSB at bit 0.
* This value is suitable for direct comparison with other unshifted
* values appropriate for use in field <y> of register <x>.
*
* <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
* field <y> of register <x>. This value is suitable for direct
* comparison with unshifted values appropriate for use in field <y>
* of register <x>.
*/
#ifndef _hw_pnvdec_tu104_h_
#define _hw_pnvdec_tu104_h_
static inline u32 pnvdec_falcon_irqsset_r(u32 i)
{
return 0x00830000U + i*16384U;
}
#endif

View File

@@ -0,0 +1,63 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* Function naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
*
* <x>_o(void) : Returns the offset for element <x>.
*
* <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
*
* <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
*
* <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
* and masked to place it at field <y> of register <x>. This value
* can be |'d with others to produce a full register value for
* register <x>.
*
* <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
* value can be ~'d and then &'d to clear the value of field <y> for
* register <x>.
*
* <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
* to place it at field <y> of register <x>. This value can be |'d
* with others to produce a full register value for <x>.
*
* <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
* <x> value 'r' after being shifted to place its LSB at bit 0.
* This value is suitable for direct comparison with other unshifted
* values appropriate for use in field <y> of register <x>.
*
* <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
* field <y> of register <x>. This value is suitable for direct
* comparison with unshifted values appropriate for use in field <y>
* of register <x>.
*/
#ifndef _hw_pram_tu104_h_
#define _hw_pram_tu104_h_
static inline u32 pram_data032_r(u32 i)
{
return 0x00700000U + i*4U;
}
#endif

View File

@@ -0,0 +1,167 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* Function naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
*
* <x>_o(void) : Returns the offset for element <x>.
*
* <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
*
* <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
*
* <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
* and masked to place it at field <y> of register <x>. This value
* can be |'d with others to produce a full register value for
* register <x>.
*
* <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
* value can be ~'d and then &'d to clear the value of field <y> for
* register <x>.
*
* <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
* to place it at field <y> of register <x>. This value can be |'d
* with others to produce a full register value for <x>.
*
* <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
* <x> value 'r' after being shifted to place its LSB at bit 0.
* This value is suitable for direct comparison with other unshifted
* values appropriate for use in field <y> of register <x>.
*
* <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
* field <y> of register <x>. This value is suitable for direct
* comparison with unshifted values appropriate for use in field <y>
* of register <x>.
*/
#ifndef _hw_pri_ringmaster_tu104_h_
#define _hw_pri_ringmaster_tu104_h_
static inline u32 pri_ringmaster_command_r(void)
{
return 0x0012004cU;
}
static inline u32 pri_ringmaster_command_cmd_m(void)
{
return 0x3fU << 0U;
}
static inline u32 pri_ringmaster_command_cmd_v(u32 r)
{
return (r >> 0U) & 0x3fU;
}
static inline u32 pri_ringmaster_command_cmd_no_cmd_v(void)
{
return 0x00000000U;
}
static inline u32 pri_ringmaster_command_cmd_start_ring_f(void)
{
return 0x1U;
}
static inline u32 pri_ringmaster_command_cmd_ack_interrupt_f(void)
{
return 0x2U;
}
static inline u32 pri_ringmaster_command_cmd_enumerate_stations_f(void)
{
return 0x3U;
}
static inline u32 pri_ringmaster_command_cmd_enumerate_stations_bc_grp_all_f(void)
{
return 0x0U;
}
static inline u32 pri_ringmaster_command_data_r(void)
{
return 0x00120048U;
}
static inline u32 pri_ringmaster_start_results_r(void)
{
return 0x00120050U;
}
static inline u32 pri_ringmaster_start_results_connectivity_v(u32 r)
{
return (r >> 0U) & 0x1U;
}
static inline u32 pri_ringmaster_start_results_connectivity_pass_v(void)
{
return 0x00000001U;
}
static inline u32 pri_ringmaster_intr_status0_r(void)
{
return 0x00120058U;
}
static inline u32 pri_ringmaster_intr_status0_ring_start_conn_fault_v(u32 r)
{
return (r >> 0U) & 0x1U;
}
static inline u32 pri_ringmaster_intr_status0_disconnect_fault_v(u32 r)
{
return (r >> 1U) & 0x1U;
}
static inline u32 pri_ringmaster_intr_status0_overflow_fault_v(u32 r)
{
return (r >> 2U) & 0x1U;
}
static inline u32 pri_ringmaster_intr_status0_gbl_write_error_sys_v(u32 r)
{
return (r >> 8U) & 0x1U;
}
static inline u32 pri_ringmaster_intr_status1_r(void)
{
return 0x0012005cU;
}
static inline u32 pri_ringmaster_global_ctl_r(void)
{
return 0x00120060U;
}
static inline u32 pri_ringmaster_global_ctl_ring_reset_asserted_f(void)
{
return 0x1U;
}
static inline u32 pri_ringmaster_global_ctl_ring_reset_deasserted_f(void)
{
return 0x0U;
}
static inline u32 pri_ringmaster_enum_fbp_r(void)
{
return 0x00120074U;
}
static inline u32 pri_ringmaster_enum_fbp_count_v(u32 r)
{
return (r >> 0U) & 0x1fU;
}
static inline u32 pri_ringmaster_enum_gpc_r(void)
{
return 0x00120078U;
}
static inline u32 pri_ringmaster_enum_gpc_count_v(u32 r)
{
return (r >> 0U) & 0x1fU;
}
static inline u32 pri_ringmaster_enum_ltc_r(void)
{
return 0x0012006cU;
}
static inline u32 pri_ringmaster_enum_ltc_count_v(u32 r)
{
return (r >> 0U) & 0x1fU;
}
#endif

View File

@@ -0,0 +1,75 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* Function naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
*
* <x>_o(void) : Returns the offset for element <x>.
*
* <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
*
* <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
*
* <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
* and masked to place it at field <y> of register <x>. This value
* can be |'d with others to produce a full register value for
* register <x>.
*
* <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
* value can be ~'d and then &'d to clear the value of field <y> for
* register <x>.
*
* <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
* to place it at field <y> of register <x>. This value can be |'d
* with others to produce a full register value for <x>.
*
* <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
* <x> value 'r' after being shifted to place its LSB at bit 0.
* This value is suitable for direct comparison with other unshifted
* values appropriate for use in field <y> of register <x>.
*
* <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
* field <y> of register <x>. This value is suitable for direct
* comparison with unshifted values appropriate for use in field <y>
* of register <x>.
*/
#ifndef _hw_pri_ringstation_gpc_tu104_h_
#define _hw_pri_ringstation_gpc_tu104_h_
static inline u32 pri_ringstation_gpc_gpc0_priv_error_adr_r(void)
{
return 0x00128120U;
}
static inline u32 pri_ringstation_gpc_gpc0_priv_error_wrdat_r(void)
{
return 0x00128124U;
}
static inline u32 pri_ringstation_gpc_gpc0_priv_error_info_r(void)
{
return 0x00128128U;
}
static inline u32 pri_ringstation_gpc_gpc0_priv_error_code_r(void)
{
return 0x0012812cU;
}
#endif

View File

@@ -0,0 +1,87 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* Function naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
*
* <x>_o(void) : Returns the offset for element <x>.
*
* <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
*
* <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
*
* <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
* and masked to place it at field <y> of register <x>. This value
* can be |'d with others to produce a full register value for
* register <x>.
*
* <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
* value can be ~'d and then &'d to clear the value of field <y> for
* register <x>.
*
* <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
* to place it at field <y> of register <x>. This value can be |'d
* with others to produce a full register value for <x>.
*
* <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
* <x> value 'r' after being shifted to place its LSB at bit 0.
* This value is suitable for direct comparison with other unshifted
* values appropriate for use in field <y> of register <x>.
*
* <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
* field <y> of register <x>. This value is suitable for direct
* comparison with unshifted values appropriate for use in field <y>
* of register <x>.
*/
#ifndef _hw_pri_ringstation_sys_tu104_h_
#define _hw_pri_ringstation_sys_tu104_h_
static inline u32 pri_ringstation_sys_decode_config_r(void)
{
return 0x00122204U;
}
static inline u32 pri_ringstation_sys_decode_config_ring_m(void)
{
return 0x7U << 0U;
}
static inline u32 pri_ringstation_sys_decode_config_ring_drop_on_ring_not_started_f(void)
{
return 0x1U;
}
static inline u32 pri_ringstation_sys_priv_error_adr_r(void)
{
return 0x00122120U;
}
static inline u32 pri_ringstation_sys_priv_error_wrdat_r(void)
{
return 0x00122124U;
}
static inline u32 pri_ringstation_sys_priv_error_info_r(void)
{
return 0x00122128U;
}
static inline u32 pri_ringstation_sys_priv_error_code_r(void)
{
return 0x0012212cU;
}
#endif

View File

@@ -0,0 +1,199 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* Function naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
*
* <x>_o(void) : Returns the offset for element <x>.
*
* <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
*
* <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
*
* <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
* and masked to place it at field <y> of register <x>. This value
* can be |'d with others to produce a full register value for
* register <x>.
*
* <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
* value can be ~'d and then &'d to clear the value of field <y> for
* register <x>.
*
* <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
* to place it at field <y> of register <x>. This value can be |'d
* with others to produce a full register value for <x>.
*
* <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
* <x> value 'r' after being shifted to place its LSB at bit 0.
* This value is suitable for direct comparison with other unshifted
* values appropriate for use in field <y> of register <x>.
*
* <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
* field <y> of register <x>. This value is suitable for direct
* comparison with unshifted values appropriate for use in field <y>
* of register <x>.
*/
#ifndef _hw_proj_tu104_h_
#define _hw_proj_tu104_h_
static inline u32 proj_gpc_base_v(void)
{
return 0x00500000U;
}
static inline u32 proj_gpc_shared_base_v(void)
{
return 0x00418000U;
}
static inline u32 proj_gpc_stride_v(void)
{
return 0x00008000U;
}
static inline u32 proj_gpc_priv_stride_v(void)
{
return 0x00000800U;
}
static inline u32 proj_ltc_stride_v(void)
{
return 0x00002000U;
}
static inline u32 proj_lts_stride_v(void)
{
return 0x00000200U;
}
static inline u32 proj_fbpa_base_v(void)
{
return 0x00900000U;
}
static inline u32 proj_fbpa_shared_base_v(void)
{
return 0x009a0000U;
}
static inline u32 proj_fbpa_stride_v(void)
{
return 0x00004000U;
}
static inline u32 proj_ppc_in_gpc_base_v(void)
{
return 0x00003000U;
}
static inline u32 proj_ppc_in_gpc_shared_base_v(void)
{
return 0x00003e00U;
}
static inline u32 proj_ppc_in_gpc_stride_v(void)
{
return 0x00000200U;
}
static inline u32 proj_rop_base_v(void)
{
return 0x00410000U;
}
static inline u32 proj_rop_shared_base_v(void)
{
return 0x00408800U;
}
static inline u32 proj_rop_stride_v(void)
{
return 0x00000400U;
}
static inline u32 proj_tpc_in_gpc_base_v(void)
{
return 0x00004000U;
}
static inline u32 proj_tpc_in_gpc_stride_v(void)
{
return 0x00000800U;
}
static inline u32 proj_tpc_in_gpc_shared_base_v(void)
{
return 0x00001800U;
}
static inline u32 proj_smpc_base_v(void)
{
return 0x00000200U;
}
static inline u32 proj_smpc_shared_base_v(void)
{
return 0x00000300U;
}
static inline u32 proj_smpc_unique_base_v(void)
{
return 0x00000600U;
}
static inline u32 proj_smpc_stride_v(void)
{
return 0x00000100U;
}
static inline u32 proj_host_num_engines_v(void)
{
return 0x0000000dU;
}
static inline u32 proj_host_num_pbdma_v(void)
{
return 0x0000000cU;
}
static inline u32 proj_scal_litter_num_tpc_per_gpc_v(void)
{
return 0x00000006U;
}
static inline u32 proj_scal_litter_num_fbps_v(void)
{
return 0x00000008U;
}
static inline u32 proj_scal_litter_num_fbpas_v(void)
{
return 0x00000010U;
}
static inline u32 proj_scal_litter_num_gpcs_v(void)
{
return 0x00000006U;
}
static inline u32 proj_scal_litter_num_pes_per_gpc_v(void)
{
return 0x00000003U;
}
static inline u32 proj_scal_litter_num_tpcs_per_pes_v(void)
{
return 0x00000002U;
}
static inline u32 proj_scal_litter_num_zcull_banks_v(void)
{
return 0x00000004U;
}
static inline u32 proj_scal_litter_num_sm_per_tpc_v(void)
{
return 0x00000002U;
}
static inline u32 proj_scal_max_gpcs_v(void)
{
return 0x00000020U;
}
static inline u32 proj_scal_max_tpc_per_gpc_v(void)
{
return 0x00000008U;
}
static inline u32 proj_sm_stride_v(void)
{
return 0x00000080U;
}
#endif

View File

@@ -0,0 +1,787 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* Function naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
*
* <x>_o(void) : Returns the offset for element <x>.
*
* <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
*
* <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
*
* <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
* and masked to place it at field <y> of register <x>. This value
* can be |'d with others to produce a full register value for
* register <x>.
*
* <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
* value can be ~'d and then &'d to clear the value of field <y> for
* register <x>.
*
* <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
* to place it at field <y> of register <x>. This value can be |'d
* with others to produce a full register value for <x>.
*
* <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
* <x> value 'r' after being shifted to place its LSB at bit 0.
* This value is suitable for direct comparison with other unshifted
* values appropriate for use in field <y> of register <x>.
*
* <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
* field <y> of register <x>. This value is suitable for direct
* comparison with unshifted values appropriate for use in field <y>
* of register <x>.
*/
#ifndef _hw_psec_tu104_h_
#define _hw_psec_tu104_h_
static inline u32 psec_falcon_irqsset_r(void)
{
return 0x00840000U;
}
static inline u32 psec_falcon_irqsset_swgen0_set_f(void)
{
return 0x40U;
}
static inline u32 psec_falcon_irqsclr_r(void)
{
return 0x00840004U;
}
static inline u32 psec_falcon_irqstat_r(void)
{
return 0x00840008U;
}
static inline u32 psec_falcon_irqstat_halt_true_f(void)
{
return 0x10U;
}
static inline u32 psec_falcon_irqstat_exterr_true_f(void)
{
return 0x20U;
}
static inline u32 psec_falcon_irqstat_swgen0_true_f(void)
{
return 0x40U;
}
static inline u32 psec_falcon_irqmode_r(void)
{
return 0x0084000cU;
}
static inline u32 psec_falcon_irqmset_r(void)
{
return 0x00840010U;
}
static inline u32 psec_falcon_irqmset_gptmr_f(u32 v)
{
return (v & 0x1U) << 0U;
}
static inline u32 psec_falcon_irqmset_wdtmr_f(u32 v)
{
return (v & 0x1U) << 1U;
}
static inline u32 psec_falcon_irqmset_mthd_f(u32 v)
{
return (v & 0x1U) << 2U;
}
static inline u32 psec_falcon_irqmset_ctxsw_f(u32 v)
{
return (v & 0x1U) << 3U;
}
static inline u32 psec_falcon_irqmset_halt_f(u32 v)
{
return (v & 0x1U) << 4U;
}
static inline u32 psec_falcon_irqmset_exterr_f(u32 v)
{
return (v & 0x1U) << 5U;
}
static inline u32 psec_falcon_irqmset_swgen0_f(u32 v)
{
return (v & 0x1U) << 6U;
}
static inline u32 psec_falcon_irqmset_swgen1_f(u32 v)
{
return (v & 0x1U) << 7U;
}
static inline u32 psec_falcon_irqmclr_r(void)
{
return 0x00840014U;
}
static inline u32 psec_falcon_irqmclr_gptmr_f(u32 v)
{
return (v & 0x1U) << 0U;
}
static inline u32 psec_falcon_irqmclr_wdtmr_f(u32 v)
{
return (v & 0x1U) << 1U;
}
static inline u32 psec_falcon_irqmclr_mthd_f(u32 v)
{
return (v & 0x1U) << 2U;
}
static inline u32 psec_falcon_irqmclr_ctxsw_f(u32 v)
{
return (v & 0x1U) << 3U;
}
static inline u32 psec_falcon_irqmclr_halt_f(u32 v)
{
return (v & 0x1U) << 4U;
}
static inline u32 psec_falcon_irqmclr_exterr_f(u32 v)
{
return (v & 0x1U) << 5U;
}
static inline u32 psec_falcon_irqmclr_swgen0_f(u32 v)
{
return (v & 0x1U) << 6U;
}
static inline u32 psec_falcon_irqmclr_swgen1_f(u32 v)
{
return (v & 0x1U) << 7U;
}
static inline u32 psec_falcon_irqmclr_ext_f(u32 v)
{
return (v & 0xffU) << 8U;
}
static inline u32 psec_falcon_irqmask_r(void)
{
return 0x00840018U;
}
static inline u32 psec_falcon_irqdest_r(void)
{
return 0x0084001cU;
}
static inline u32 psec_falcon_irqdest_host_gptmr_f(u32 v)
{
return (v & 0x1U) << 0U;
}
static inline u32 psec_falcon_irqdest_host_wdtmr_f(u32 v)
{
return (v & 0x1U) << 1U;
}
static inline u32 psec_falcon_irqdest_host_mthd_f(u32 v)
{
return (v & 0x1U) << 2U;
}
static inline u32 psec_falcon_irqdest_host_ctxsw_f(u32 v)
{
return (v & 0x1U) << 3U;
}
static inline u32 psec_falcon_irqdest_host_halt_f(u32 v)
{
return (v & 0x1U) << 4U;
}
static inline u32 psec_falcon_irqdest_host_exterr_f(u32 v)
{
return (v & 0x1U) << 5U;
}
static inline u32 psec_falcon_irqdest_host_swgen0_f(u32 v)
{
return (v & 0x1U) << 6U;
}
static inline u32 psec_falcon_irqdest_host_swgen1_f(u32 v)
{
return (v & 0x1U) << 7U;
}
static inline u32 psec_falcon_irqdest_host_ext_f(u32 v)
{
return (v & 0xffU) << 8U;
}
static inline u32 psec_falcon_irqdest_target_gptmr_f(u32 v)
{
return (v & 0x1U) << 16U;
}
static inline u32 psec_falcon_irqdest_target_wdtmr_f(u32 v)
{
return (v & 0x1U) << 17U;
}
static inline u32 psec_falcon_irqdest_target_mthd_f(u32 v)
{
return (v & 0x1U) << 18U;
}
static inline u32 psec_falcon_irqdest_target_ctxsw_f(u32 v)
{
return (v & 0x1U) << 19U;
}
static inline u32 psec_falcon_irqdest_target_halt_f(u32 v)
{
return (v & 0x1U) << 20U;
}
static inline u32 psec_falcon_irqdest_target_exterr_f(u32 v)
{
return (v & 0x1U) << 21U;
}
static inline u32 psec_falcon_irqdest_target_swgen0_f(u32 v)
{
return (v & 0x1U) << 22U;
}
static inline u32 psec_falcon_irqdest_target_swgen1_f(u32 v)
{
return (v & 0x1U) << 23U;
}
static inline u32 psec_falcon_irqdest_target_ext_f(u32 v)
{
return (v & 0xffU) << 24U;
}
static inline u32 psec_falcon_curctx_r(void)
{
return 0x00840050U;
}
static inline u32 psec_falcon_nxtctx_r(void)
{
return 0x00840054U;
}
static inline u32 psec_falcon_mailbox0_r(void)
{
return 0x00840040U;
}
static inline u32 psec_falcon_mailbox1_r(void)
{
return 0x00840044U;
}
static inline u32 psec_falcon_itfen_r(void)
{
return 0x00840048U;
}
static inline u32 psec_falcon_itfen_ctxen_enable_f(void)
{
return 0x1U;
}
static inline u32 psec_falcon_idlestate_r(void)
{
return 0x0084004cU;
}
static inline u32 psec_falcon_idlestate_falcon_busy_v(u32 r)
{
return (r >> 0U) & 0x1U;
}
static inline u32 psec_falcon_idlestate_ext_busy_v(u32 r)
{
return (r >> 1U) & 0x7fffU;
}
static inline u32 psec_falcon_os_r(void)
{
return 0x00840080U;
}
static inline u32 psec_falcon_engctl_r(void)
{
return 0x008400a4U;
}
static inline u32 psec_falcon_cpuctl_r(void)
{
return 0x00840100U;
}
static inline u32 psec_falcon_cpuctl_startcpu_f(u32 v)
{
return (v & 0x1U) << 1U;
}
static inline u32 psec_falcon_cpuctl_halt_intr_f(u32 v)
{
return (v & 0x1U) << 4U;
}
static inline u32 psec_falcon_cpuctl_halt_intr_m(void)
{
return 0x1U << 4U;
}
static inline u32 psec_falcon_cpuctl_halt_intr_v(u32 r)
{
return (r >> 4U) & 0x1U;
}
static inline u32 psec_falcon_cpuctl_cpuctl_alias_en_f(u32 v)
{
return (v & 0x1U) << 6U;
}
static inline u32 psec_falcon_cpuctl_cpuctl_alias_en_m(void)
{
return 0x1U << 6U;
}
static inline u32 psec_falcon_cpuctl_cpuctl_alias_en_v(u32 r)
{
return (r >> 6U) & 0x1U;
}
static inline u32 psec_falcon_cpuctl_alias_r(void)
{
return 0x00840130U;
}
static inline u32 psec_falcon_cpuctl_alias_startcpu_f(u32 v)
{
return (v & 0x1U) << 1U;
}
static inline u32 psec_falcon_imemc_r(u32 i)
{
return 0x00840180U + i*16U;
}
static inline u32 psec_falcon_imemc_offs_f(u32 v)
{
return (v & 0x3fU) << 2U;
}
static inline u32 psec_falcon_imemc_blk_f(u32 v)
{
return (v & 0xffU) << 8U;
}
static inline u32 psec_falcon_imemc_aincw_f(u32 v)
{
return (v & 0x1U) << 24U;
}
static inline u32 psec_falcon_imemd_r(u32 i)
{
return 0x00840184U + i*16U;
}
static inline u32 psec_falcon_imemt_r(u32 i)
{
return 0x00840188U + i*16U;
}
static inline u32 psec_falcon_sctl_r(void)
{
return 0x00840240U;
}
static inline u32 psec_falcon_mmu_phys_sec_r(void)
{
return 0x00100ce4U;
}
static inline u32 psec_falcon_bootvec_r(void)
{
return 0x00840104U;
}
static inline u32 psec_falcon_bootvec_vec_f(u32 v)
{
return (v & 0xffffffffU) << 0U;
}
static inline u32 psec_falcon_dmactl_r(void)
{
return 0x0084010cU;
}
static inline u32 psec_falcon_dmactl_dmem_scrubbing_m(void)
{
return 0x1U << 1U;
}
static inline u32 psec_falcon_dmactl_imem_scrubbing_m(void)
{
return 0x1U << 2U;
}
static inline u32 psec_falcon_dmactl_require_ctx_f(u32 v)
{
return (v & 0x1U) << 0U;
}
static inline u32 psec_falcon_hwcfg_r(void)
{
return 0x00840108U;
}
static inline u32 psec_falcon_hwcfg_imem_size_v(u32 r)
{
return (r >> 0U) & 0x1ffU;
}
static inline u32 psec_falcon_hwcfg_dmem_size_v(u32 r)
{
return (r >> 9U) & 0x1ffU;
}
static inline u32 psec_falcon_dmatrfbase_r(void)
{
return 0x00840110U;
}
static inline u32 psec_falcon_dmatrfbase1_r(void)
{
return 0x00840128U;
}
static inline u32 psec_falcon_dmatrfmoffs_r(void)
{
return 0x00840114U;
}
static inline u32 psec_falcon_dmatrfcmd_r(void)
{
return 0x00840118U;
}
static inline u32 psec_falcon_dmatrfcmd_imem_f(u32 v)
{
return (v & 0x1U) << 4U;
}
static inline u32 psec_falcon_dmatrfcmd_write_f(u32 v)
{
return (v & 0x1U) << 5U;
}
static inline u32 psec_falcon_dmatrfcmd_size_f(u32 v)
{
return (v & 0x7U) << 8U;
}
static inline u32 psec_falcon_dmatrfcmd_ctxdma_f(u32 v)
{
return (v & 0x7U) << 12U;
}
static inline u32 psec_falcon_dmatrffboffs_r(void)
{
return 0x0084011cU;
}
static inline u32 psec_falcon_exterraddr_r(void)
{
return 0x00840168U;
}
static inline u32 psec_falcon_exterrstat_r(void)
{
return 0x0084016cU;
}
static inline u32 psec_falcon_exterrstat_valid_m(void)
{
return 0x1U << 31U;
}
static inline u32 psec_falcon_exterrstat_valid_v(u32 r)
{
return (r >> 31U) & 0x1U;
}
static inline u32 psec_falcon_exterrstat_valid_true_v(void)
{
return 0x00000001U;
}
static inline u32 psec_sec2_falcon_icd_cmd_r(void)
{
return 0x00840200U;
}
static inline u32 psec_sec2_falcon_icd_cmd_opc_s(void)
{
return 4U;
}
static inline u32 psec_sec2_falcon_icd_cmd_opc_f(u32 v)
{
return (v & 0xfU) << 0U;
}
static inline u32 psec_sec2_falcon_icd_cmd_opc_m(void)
{
return 0xfU << 0U;
}
static inline u32 psec_sec2_falcon_icd_cmd_opc_v(u32 r)
{
return (r >> 0U) & 0xfU;
}
static inline u32 psec_sec2_falcon_icd_cmd_opc_rreg_f(void)
{
return 0x8U;
}
static inline u32 psec_sec2_falcon_icd_cmd_opc_rstat_f(void)
{
return 0xeU;
}
static inline u32 psec_sec2_falcon_icd_cmd_idx_f(u32 v)
{
return (v & 0x1fU) << 8U;
}
static inline u32 psec_sec2_falcon_icd_rdata_r(void)
{
return 0x0084020cU;
}
static inline u32 psec_falcon_dmemc_r(u32 i)
{
return 0x008401c0U + i*8U;
}
static inline u32 psec_falcon_dmemc_offs_f(u32 v)
{
return (v & 0x3fU) << 2U;
}
static inline u32 psec_falcon_dmemc_offs_m(void)
{
return 0x3fU << 2U;
}
static inline u32 psec_falcon_dmemc_blk_f(u32 v)
{
return (v & 0xffU) << 8U;
}
static inline u32 psec_falcon_dmemc_blk_m(void)
{
return 0xffU << 8U;
}
static inline u32 psec_falcon_dmemc_aincw_f(u32 v)
{
return (v & 0x1U) << 24U;
}
static inline u32 psec_falcon_dmemc_aincr_f(u32 v)
{
return (v & 0x1U) << 25U;
}
static inline u32 psec_falcon_dmemd_r(u32 i)
{
return 0x008401c4U + i*8U;
}
static inline u32 psec_falcon_debug1_r(void)
{
return 0x00840090U;
}
static inline u32 psec_falcon_debug1_ctxsw_mode_s(void)
{
return 1U;
}
static inline u32 psec_falcon_debug1_ctxsw_mode_f(u32 v)
{
return (v & 0x1U) << 16U;
}
static inline u32 psec_falcon_debug1_ctxsw_mode_m(void)
{
return 0x1U << 16U;
}
static inline u32 psec_falcon_debug1_ctxsw_mode_v(u32 r)
{
return (r >> 16U) & 0x1U;
}
static inline u32 psec_falcon_debug1_ctxsw_mode_init_f(void)
{
return 0x0U;
}
static inline u32 psec_fbif_transcfg_r(u32 i)
{
return 0x00840600U + i*4U;
}
static inline u32 psec_fbif_transcfg_target_local_fb_f(void)
{
return 0x0U;
}
static inline u32 psec_fbif_transcfg_target_coherent_sysmem_f(void)
{
return 0x1U;
}
static inline u32 psec_fbif_transcfg_target_noncoherent_sysmem_f(void)
{
return 0x2U;
}
static inline u32 psec_fbif_transcfg_mem_type_s(void)
{
return 1U;
}
static inline u32 psec_fbif_transcfg_mem_type_f(u32 v)
{
return (v & 0x1U) << 2U;
}
static inline u32 psec_fbif_transcfg_mem_type_m(void)
{
return 0x1U << 2U;
}
static inline u32 psec_fbif_transcfg_mem_type_v(u32 r)
{
return (r >> 2U) & 0x1U;
}
static inline u32 psec_fbif_transcfg_mem_type_virtual_f(void)
{
return 0x0U;
}
static inline u32 psec_fbif_transcfg_mem_type_physical_f(void)
{
return 0x4U;
}
static inline u32 psec_falcon_engine_r(void)
{
return 0x008403c0U;
}
static inline u32 psec_falcon_engine_reset_true_f(void)
{
return 0x1U;
}
static inline u32 psec_falcon_engine_reset_false_f(void)
{
return 0x0U;
}
static inline u32 psec_fbif_ctl_r(void)
{
return 0x00840624U;
}
static inline u32 psec_fbif_ctl_allow_phys_no_ctx_init_f(void)
{
return 0x0U;
}
static inline u32 psec_fbif_ctl_allow_phys_no_ctx_disallow_f(void)
{
return 0x0U;
}
static inline u32 psec_fbif_ctl_allow_phys_no_ctx_allow_f(void)
{
return 0x80U;
}
static inline u32 psec_hwcfg_r(void)
{
return 0x00840abcU;
}
static inline u32 psec_hwcfg_emem_size_f(u32 v)
{
return (v & 0x1ffU) << 0U;
}
static inline u32 psec_hwcfg_emem_size_m(void)
{
return 0x1ffU << 0U;
}
static inline u32 psec_hwcfg_emem_size_v(u32 r)
{
return (r >> 0U) & 0x1ffU;
}
static inline u32 psec_falcon_hwcfg1_r(void)
{
return 0x0084012cU;
}
static inline u32 psec_falcon_hwcfg1_dmem_tag_width_f(u32 v)
{
return (v & 0x1fU) << 21U;
}
static inline u32 psec_falcon_hwcfg1_dmem_tag_width_m(void)
{
return 0x1fU << 21U;
}
static inline u32 psec_falcon_hwcfg1_dmem_tag_width_v(u32 r)
{
return (r >> 21U) & 0x1fU;
}
static inline u32 psec_ememc_r(u32 i)
{
return 0x00840ac0U + i*8U;
}
static inline u32 psec_ememc__size_1_v(void)
{
return 0x00000004U;
}
static inline u32 psec_ememc_blk_f(u32 v)
{
return (v & 0xffU) << 8U;
}
static inline u32 psec_ememc_blk_m(void)
{
return 0xffU << 8U;
}
static inline u32 psec_ememc_blk_v(u32 r)
{
return (r >> 8U) & 0xffU;
}
static inline u32 psec_ememc_offs_f(u32 v)
{
return (v & 0x3fU) << 2U;
}
static inline u32 psec_ememc_offs_m(void)
{
return 0x3fU << 2U;
}
static inline u32 psec_ememc_offs_v(u32 r)
{
return (r >> 2U) & 0x3fU;
}
static inline u32 psec_ememc_aincw_f(u32 v)
{
return (v & 0x1U) << 24U;
}
static inline u32 psec_ememc_aincw_m(void)
{
return 0x1U << 24U;
}
static inline u32 psec_ememc_aincw_v(u32 r)
{
return (r >> 24U) & 0x1U;
}
static inline u32 psec_ememc_aincr_f(u32 v)
{
return (v & 0x1U) << 25U;
}
static inline u32 psec_ememc_aincr_m(void)
{
return 0x1U << 25U;
}
static inline u32 psec_ememc_aincr_v(u32 r)
{
return (r >> 25U) & 0x1U;
}
static inline u32 psec_ememd_r(u32 i)
{
return 0x00840ac4U + i*8U;
}
static inline u32 psec_ememd__size_1_v(void)
{
return 0x00000004U;
}
static inline u32 psec_ememd_data_f(u32 v)
{
return (v & 0xffffffffU) << 0U;
}
static inline u32 psec_ememd_data_m(void)
{
return 0xffffffffU << 0U;
}
static inline u32 psec_ememd_data_v(u32 r)
{
return (r >> 0U) & 0xffffffffU;
}
static inline u32 psec_msgq_head_r(u32 i)
{
return 0x00840c80U + i*8U;
}
static inline u32 psec_msgq_head_val_f(u32 v)
{
return (v & 0xffffffffU) << 0U;
}
static inline u32 psec_msgq_head_val_m(void)
{
return 0xffffffffU << 0U;
}
static inline u32 psec_msgq_head_val_v(u32 r)
{
return (r >> 0U) & 0xffffffffU;
}
static inline u32 psec_msgq_tail_r(u32 i)
{
return 0x00840c84U + i*8U;
}
static inline u32 psec_msgq_tail_val_f(u32 v)
{
return (v & 0xffffffffU) << 0U;
}
static inline u32 psec_msgq_tail_val_m(void)
{
return 0xffffffffU << 0U;
}
static inline u32 psec_msgq_tail_val_v(u32 r)
{
return (r >> 0U) & 0xffffffffU;
}
static inline u32 psec_queue_head_r(u32 i)
{
return 0x00840c00U + i*8U;
}
static inline u32 psec_queue_head_address_f(u32 v)
{
return (v & 0xffffffffU) << 0U;
}
static inline u32 psec_queue_head_address_m(void)
{
return 0xffffffffU << 0U;
}
static inline u32 psec_queue_head_address_v(u32 r)
{
return (r >> 0U) & 0xffffffffU;
}
static inline u32 psec_queue_tail_r(u32 i)
{
return 0x00840c04U + i*8U;
}
static inline u32 psec_queue_tail_address_f(u32 v)
{
return (v & 0xffffffffU) << 0U;
}
static inline u32 psec_queue_tail_address_m(void)
{
return 0xffffffffU << 0U;
}
static inline u32 psec_queue_tail_address_v(u32 r)
{
return (r >> 0U) & 0xffffffffU;
}
#endif

View File

@@ -0,0 +1,935 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* Function naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
*
* <x>_o(void) : Returns the offset for element <x>.
*
* <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
*
* <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
*
* <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
* and masked to place it at field <y> of register <x>. This value
* can be |'d with others to produce a full register value for
* register <x>.
*
* <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
* value can be ~'d and then &'d to clear the value of field <y> for
* register <x>.
*
* <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
* to place it at field <y> of register <x>. This value can be |'d
* with others to produce a full register value for <x>.
*
* <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
* <x> value 'r' after being shifted to place its LSB at bit 0.
* This value is suitable for direct comparison with other unshifted
* values appropriate for use in field <y> of register <x>.
*
* <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
* field <y> of register <x>. This value is suitable for direct
* comparison with unshifted values appropriate for use in field <y>
* of register <x>.
*/
#ifndef _hw_pwr_tu104_h_
#define _hw_pwr_tu104_h_
static inline u32 pwr_falcon_irqsset_r(void)
{
return 0x0010a000U;
}
static inline u32 pwr_falcon_irqsset_swgen0_set_f(void)
{
return 0x40U;
}
static inline u32 pwr_falcon_irqsclr_r(void)
{
return 0x0010a004U;
}
static inline u32 pwr_falcon_irqstat_r(void)
{
return 0x0010a008U;
}
static inline u32 pwr_falcon_irqstat_halt_true_f(void)
{
return 0x10U;
}
static inline u32 pwr_falcon_irqstat_exterr_true_f(void)
{
return 0x20U;
}
static inline u32 pwr_falcon_irqstat_swgen0_true_f(void)
{
return 0x40U;
}
static inline u32 pwr_falcon_irqstat_ext_second_true_f(void)
{
return 0x800U;
}
static inline u32 pwr_falcon_irqmode_r(void)
{
return 0x0010a00cU;
}
static inline u32 pwr_falcon_irqmset_r(void)
{
return 0x0010a010U;
}
static inline u32 pwr_falcon_irqmset_gptmr_f(u32 v)
{
return (v & 0x1U) << 0U;
}
static inline u32 pwr_falcon_irqmset_wdtmr_f(u32 v)
{
return (v & 0x1U) << 1U;
}
static inline u32 pwr_falcon_irqmset_mthd_f(u32 v)
{
return (v & 0x1U) << 2U;
}
static inline u32 pwr_falcon_irqmset_ctxsw_f(u32 v)
{
return (v & 0x1U) << 3U;
}
static inline u32 pwr_falcon_irqmset_halt_f(u32 v)
{
return (v & 0x1U) << 4U;
}
static inline u32 pwr_falcon_irqmset_exterr_f(u32 v)
{
return (v & 0x1U) << 5U;
}
static inline u32 pwr_falcon_irqmset_swgen0_f(u32 v)
{
return (v & 0x1U) << 6U;
}
static inline u32 pwr_falcon_irqmset_swgen1_f(u32 v)
{
return (v & 0x1U) << 7U;
}
static inline u32 pwr_falcon_irqmset_ext_f(u32 v)
{
return (v & 0xffU) << 8U;
}
static inline u32 pwr_falcon_irqmset_ext_ctxe_f(u32 v)
{
return (v & 0x1U) << 8U;
}
static inline u32 pwr_falcon_irqmset_ext_limitv_f(u32 v)
{
return (v & 0x1U) << 9U;
}
static inline u32 pwr_falcon_irqmset_ext_second_f(u32 v)
{
return (v & 0x1U) << 11U;
}
static inline u32 pwr_falcon_irqmset_ext_therm_f(u32 v)
{
return (v & 0x1U) << 12U;
}
static inline u32 pwr_falcon_irqmset_ext_miscio_f(u32 v)
{
return (v & 0x1U) << 13U;
}
static inline u32 pwr_falcon_irqmset_ext_rttimer_f(u32 v)
{
return (v & 0x1U) << 14U;
}
static inline u32 pwr_falcon_irqmclr_r(void)
{
return 0x0010a014U;
}
static inline u32 pwr_falcon_irqmclr_gptmr_f(u32 v)
{
return (v & 0x1U) << 0U;
}
static inline u32 pwr_falcon_irqmclr_wdtmr_f(u32 v)
{
return (v & 0x1U) << 1U;
}
static inline u32 pwr_falcon_irqmclr_mthd_f(u32 v)
{
return (v & 0x1U) << 2U;
}
static inline u32 pwr_falcon_irqmclr_ctxsw_f(u32 v)
{
return (v & 0x1U) << 3U;
}
static inline u32 pwr_falcon_irqmclr_halt_f(u32 v)
{
return (v & 0x1U) << 4U;
}
static inline u32 pwr_falcon_irqmclr_exterr_f(u32 v)
{
return (v & 0x1U) << 5U;
}
static inline u32 pwr_falcon_irqmclr_swgen0_f(u32 v)
{
return (v & 0x1U) << 6U;
}
static inline u32 pwr_falcon_irqmclr_swgen1_f(u32 v)
{
return (v & 0x1U) << 7U;
}
static inline u32 pwr_falcon_irqmclr_ext_f(u32 v)
{
return (v & 0xffU) << 8U;
}
static inline u32 pwr_falcon_irqmclr_ext_ctxe_f(u32 v)
{
return (v & 0x1U) << 8U;
}
static inline u32 pwr_falcon_irqmclr_ext_limitv_f(u32 v)
{
return (v & 0x1U) << 9U;
}
static inline u32 pwr_falcon_irqmclr_ext_second_f(u32 v)
{
return (v & 0x1U) << 11U;
}
static inline u32 pwr_falcon_irqmclr_ext_therm_f(u32 v)
{
return (v & 0x1U) << 12U;
}
static inline u32 pwr_falcon_irqmclr_ext_miscio_f(u32 v)
{
return (v & 0x1U) << 13U;
}
static inline u32 pwr_falcon_irqmclr_ext_rttimer_f(u32 v)
{
return (v & 0x1U) << 14U;
}
static inline u32 pwr_falcon_irqmask_r(void)
{
return 0x0010a018U;
}
static inline u32 pwr_falcon_irqdest_r(void)
{
return 0x0010a01cU;
}
static inline u32 pwr_falcon_irqdest_host_gptmr_f(u32 v)
{
return (v & 0x1U) << 0U;
}
static inline u32 pwr_falcon_irqdest_host_wdtmr_f(u32 v)
{
return (v & 0x1U) << 1U;
}
static inline u32 pwr_falcon_irqdest_host_mthd_f(u32 v)
{
return (v & 0x1U) << 2U;
}
static inline u32 pwr_falcon_irqdest_host_ctxsw_f(u32 v)
{
return (v & 0x1U) << 3U;
}
static inline u32 pwr_falcon_irqdest_host_halt_f(u32 v)
{
return (v & 0x1U) << 4U;
}
static inline u32 pwr_falcon_irqdest_host_exterr_f(u32 v)
{
return (v & 0x1U) << 5U;
}
static inline u32 pwr_falcon_irqdest_host_swgen0_f(u32 v)
{
return (v & 0x1U) << 6U;
}
static inline u32 pwr_falcon_irqdest_host_swgen1_f(u32 v)
{
return (v & 0x1U) << 7U;
}
static inline u32 pwr_falcon_irqdest_host_ext_f(u32 v)
{
return (v & 0xffU) << 8U;
}
static inline u32 pwr_falcon_irqdest_host_ext_ctxe_f(u32 v)
{
return (v & 0x1U) << 8U;
}
static inline u32 pwr_falcon_irqdest_host_ext_limitv_f(u32 v)
{
return (v & 0x1U) << 9U;
}
static inline u32 pwr_falcon_irqdest_host_ext_second_f(u32 v)
{
return (v & 0x1U) << 11U;
}
static inline u32 pwr_falcon_irqdest_host_ext_therm_f(u32 v)
{
return (v & 0x1U) << 12U;
}
static inline u32 pwr_falcon_irqdest_host_ext_miscio_f(u32 v)
{
return (v & 0x1U) << 13U;
}
static inline u32 pwr_falcon_irqdest_host_ext_rttimer_f(u32 v)
{
return (v & 0x1U) << 14U;
}
static inline u32 pwr_falcon_irqdest_target_gptmr_f(u32 v)
{
return (v & 0x1U) << 16U;
}
static inline u32 pwr_falcon_irqdest_target_wdtmr_f(u32 v)
{
return (v & 0x1U) << 17U;
}
static inline u32 pwr_falcon_irqdest_target_mthd_f(u32 v)
{
return (v & 0x1U) << 18U;
}
static inline u32 pwr_falcon_irqdest_target_ctxsw_f(u32 v)
{
return (v & 0x1U) << 19U;
}
static inline u32 pwr_falcon_irqdest_target_halt_f(u32 v)
{
return (v & 0x1U) << 20U;
}
static inline u32 pwr_falcon_irqdest_target_exterr_f(u32 v)
{
return (v & 0x1U) << 21U;
}
static inline u32 pwr_falcon_irqdest_target_swgen0_f(u32 v)
{
return (v & 0x1U) << 22U;
}
static inline u32 pwr_falcon_irqdest_target_swgen1_f(u32 v)
{
return (v & 0x1U) << 23U;
}
static inline u32 pwr_falcon_irqdest_target_ext_f(u32 v)
{
return (v & 0xffU) << 24U;
}
static inline u32 pwr_falcon_irqdest_target_ext_ctxe_f(u32 v)
{
return (v & 0x1U) << 24U;
}
static inline u32 pwr_falcon_irqdest_target_ext_limitv_f(u32 v)
{
return (v & 0x1U) << 25U;
}
static inline u32 pwr_falcon_irqdest_target_ext_second_f(u32 v)
{
return (v & 0x1U) << 27U;
}
static inline u32 pwr_falcon_irqdest_target_ext_therm_f(u32 v)
{
return (v & 0x1U) << 28U;
}
static inline u32 pwr_falcon_irqdest_target_ext_miscio_f(u32 v)
{
return (v & 0x1U) << 29U;
}
static inline u32 pwr_falcon_irqdest_target_ext_rttimer_f(u32 v)
{
return (v & 0x1U) << 30U;
}
static inline u32 pwr_falcon_curctx_r(void)
{
return 0x0010a050U;
}
static inline u32 pwr_falcon_nxtctx_r(void)
{
return 0x0010a054U;
}
static inline u32 pwr_falcon_mailbox0_r(void)
{
return 0x0010a040U;
}
static inline u32 pwr_falcon_mailbox1_r(void)
{
return 0x0010a044U;
}
static inline u32 pwr_falcon_itfen_r(void)
{
return 0x0010a048U;
}
static inline u32 pwr_falcon_itfen_ctxen_enable_f(void)
{
return 0x1U;
}
static inline u32 pwr_falcon_idlestate_r(void)
{
return 0x0010a04cU;
}
static inline u32 pwr_falcon_idlestate_falcon_busy_v(u32 r)
{
return (r >> 0U) & 0x1U;
}
static inline u32 pwr_falcon_idlestate_ext_busy_v(u32 r)
{
return (r >> 1U) & 0x7fffU;
}
static inline u32 pwr_falcon_os_r(void)
{
return 0x0010a080U;
}
static inline u32 pwr_falcon_engctl_r(void)
{
return 0x0010a0a4U;
}
static inline u32 pwr_falcon_cpuctl_r(void)
{
return 0x0010a100U;
}
static inline u32 pwr_falcon_cpuctl_startcpu_f(u32 v)
{
return (v & 0x1U) << 1U;
}
static inline u32 pwr_falcon_cpuctl_halt_intr_f(u32 v)
{
return (v & 0x1U) << 4U;
}
static inline u32 pwr_falcon_cpuctl_halt_intr_m(void)
{
return 0x1U << 4U;
}
static inline u32 pwr_falcon_cpuctl_halt_intr_v(u32 r)
{
return (r >> 4U) & 0x1U;
}
static inline u32 pwr_falcon_cpuctl_cpuctl_alias_en_f(u32 v)
{
return (v & 0x1U) << 6U;
}
static inline u32 pwr_falcon_cpuctl_cpuctl_alias_en_m(void)
{
return 0x1U << 6U;
}
static inline u32 pwr_falcon_cpuctl_cpuctl_alias_en_v(u32 r)
{
return (r >> 6U) & 0x1U;
}
static inline u32 pwr_falcon_cpuctl_alias_r(void)
{
return 0x0010a130U;
}
static inline u32 pwr_falcon_cpuctl_alias_startcpu_f(u32 v)
{
return (v & 0x1U) << 1U;
}
static inline u32 pwr_pmu_scpctl_stat_r(void)
{
return 0x0010ac08U;
}
static inline u32 pwr_pmu_scpctl_stat_debug_mode_f(u32 v)
{
return (v & 0x1U) << 20U;
}
static inline u32 pwr_pmu_scpctl_stat_debug_mode_m(void)
{
return 0x1U << 20U;
}
static inline u32 pwr_pmu_scpctl_stat_debug_mode_v(u32 r)
{
return (r >> 20U) & 0x1U;
}
static inline u32 pwr_falcon_imemc_r(u32 i)
{
return 0x0010a180U + i*16U;
}
static inline u32 pwr_falcon_imemc_offs_f(u32 v)
{
return (v & 0x3fU) << 2U;
}
static inline u32 pwr_falcon_imemc_blk_f(u32 v)
{
return (v & 0xffU) << 8U;
}
static inline u32 pwr_falcon_imemc_aincw_f(u32 v)
{
return (v & 0x1U) << 24U;
}
static inline u32 pwr_falcon_imemd_r(u32 i)
{
return 0x0010a184U + i*16U;
}
static inline u32 pwr_falcon_imemt_r(u32 i)
{
return 0x0010a188U + i*16U;
}
static inline u32 pwr_falcon_sctl_r(void)
{
return 0x0010a240U;
}
static inline u32 pwr_falcon_mmu_phys_sec_r(void)
{
return 0x00100ce4U;
}
static inline u32 pwr_falcon_bootvec_r(void)
{
return 0x0010a104U;
}
static inline u32 pwr_falcon_bootvec_vec_f(u32 v)
{
return (v & 0xffffffffU) << 0U;
}
static inline u32 pwr_falcon_dmactl_r(void)
{
return 0x0010a10cU;
}
static inline u32 pwr_falcon_dmactl_dmem_scrubbing_m(void)
{
return 0x1U << 1U;
}
static inline u32 pwr_falcon_dmactl_imem_scrubbing_m(void)
{
return 0x1U << 2U;
}
static inline u32 pwr_falcon_hwcfg_r(void)
{
return 0x0010a108U;
}
static inline u32 pwr_falcon_hwcfg_imem_size_v(u32 r)
{
return (r >> 0U) & 0x1ffU;
}
static inline u32 pwr_falcon_hwcfg_dmem_size_v(u32 r)
{
return (r >> 9U) & 0x1ffU;
}
static inline u32 pwr_falcon_dmatrfbase_r(void)
{
return 0x0010a110U;
}
static inline u32 pwr_falcon_dmatrfbase1_r(void)
{
return 0x0010a128U;
}
static inline u32 pwr_falcon_dmatrfmoffs_r(void)
{
return 0x0010a114U;
}
static inline u32 pwr_falcon_dmatrfcmd_r(void)
{
return 0x0010a118U;
}
static inline u32 pwr_falcon_dmatrfcmd_imem_f(u32 v)
{
return (v & 0x1U) << 4U;
}
static inline u32 pwr_falcon_dmatrfcmd_write_f(u32 v)
{
return (v & 0x1U) << 5U;
}
static inline u32 pwr_falcon_dmatrfcmd_size_f(u32 v)
{
return (v & 0x7U) << 8U;
}
static inline u32 pwr_falcon_dmatrfcmd_ctxdma_f(u32 v)
{
return (v & 0x7U) << 12U;
}
static inline u32 pwr_falcon_dmatrffboffs_r(void)
{
return 0x0010a11cU;
}
static inline u32 pwr_falcon_exterraddr_r(void)
{
return 0x0010a168U;
}
static inline u32 pwr_falcon_exterrstat_r(void)
{
return 0x0010a16cU;
}
static inline u32 pwr_falcon_exterrstat_valid_m(void)
{
return 0x1U << 31U;
}
static inline u32 pwr_falcon_exterrstat_valid_v(u32 r)
{
return (r >> 31U) & 0x1U;
}
static inline u32 pwr_falcon_exterrstat_valid_true_v(void)
{
return 0x00000001U;
}
static inline u32 pwr_pmu_falcon_icd_cmd_r(void)
{
return 0x0010a200U;
}
static inline u32 pwr_pmu_falcon_icd_cmd_opc_s(void)
{
return 4U;
}
static inline u32 pwr_pmu_falcon_icd_cmd_opc_f(u32 v)
{
return (v & 0xfU) << 0U;
}
static inline u32 pwr_pmu_falcon_icd_cmd_opc_m(void)
{
return 0xfU << 0U;
}
static inline u32 pwr_pmu_falcon_icd_cmd_opc_v(u32 r)
{
return (r >> 0U) & 0xfU;
}
static inline u32 pwr_pmu_falcon_icd_cmd_opc_rreg_f(void)
{
return 0x8U;
}
static inline u32 pwr_pmu_falcon_icd_cmd_opc_rstat_f(void)
{
return 0xeU;
}
static inline u32 pwr_pmu_falcon_icd_cmd_idx_f(u32 v)
{
return (v & 0x1fU) << 8U;
}
static inline u32 pwr_pmu_falcon_icd_rdata_r(void)
{
return 0x0010a20cU;
}
static inline u32 pwr_falcon_dmemc_r(u32 i)
{
return 0x0010a1c0U + i*8U;
}
static inline u32 pwr_falcon_dmemc_offs_f(u32 v)
{
return (v & 0x3fU) << 2U;
}
static inline u32 pwr_falcon_dmemc_offs_m(void)
{
return 0x3fU << 2U;
}
static inline u32 pwr_falcon_dmemc_blk_f(u32 v)
{
return (v & 0xffU) << 8U;
}
static inline u32 pwr_falcon_dmemc_blk_m(void)
{
return 0xffU << 8U;
}
static inline u32 pwr_falcon_dmemc_aincw_f(u32 v)
{
return (v & 0x1U) << 24U;
}
static inline u32 pwr_falcon_dmemc_aincr_f(u32 v)
{
return (v & 0x1U) << 25U;
}
static inline u32 pwr_falcon_dmemd_r(u32 i)
{
return 0x0010a1c4U + i*8U;
}
static inline u32 pwr_pmu_new_instblk_r(void)
{
return 0x0010a480U;
}
static inline u32 pwr_pmu_new_instblk_ptr_f(u32 v)
{
return (v & 0xfffffffU) << 0U;
}
static inline u32 pwr_pmu_new_instblk_target_fb_f(void)
{
return 0x0U;
}
static inline u32 pwr_pmu_new_instblk_target_sys_coh_f(void)
{
return 0x20000000U;
}
static inline u32 pwr_pmu_new_instblk_target_sys_ncoh_f(void)
{
return 0x30000000U;
}
static inline u32 pwr_pmu_new_instblk_valid_f(u32 v)
{
return (v & 0x1U) << 30U;
}
static inline u32 pwr_pmu_mutex_id_r(void)
{
return 0x0010a488U;
}
static inline u32 pwr_pmu_mutex_id_value_v(u32 r)
{
return (r >> 0U) & 0xffU;
}
static inline u32 pwr_pmu_mutex_id_value_init_v(void)
{
return 0x00000000U;
}
static inline u32 pwr_pmu_mutex_id_value_not_avail_v(void)
{
return 0x000000ffU;
}
static inline u32 pwr_pmu_mutex_id_release_r(void)
{
return 0x0010a48cU;
}
static inline u32 pwr_pmu_mutex_id_release_value_f(u32 v)
{
return (v & 0xffU) << 0U;
}
static inline u32 pwr_pmu_mutex_id_release_value_m(void)
{
return 0xffU << 0U;
}
static inline u32 pwr_pmu_mutex_id_release_value_init_v(void)
{
return 0x00000000U;
}
static inline u32 pwr_pmu_mutex_id_release_value_init_f(void)
{
return 0x0U;
}
static inline u32 pwr_pmu_mutex_r(u32 i)
{
return 0x0010a580U + i*4U;
}
static inline u32 pwr_pmu_mutex__size_1_v(void)
{
return 0x00000010U;
}
static inline u32 pwr_pmu_mutex_value_f(u32 v)
{
return (v & 0xffU) << 0U;
}
static inline u32 pwr_pmu_mutex_value_v(u32 r)
{
return (r >> 0U) & 0xffU;
}
static inline u32 pwr_pmu_mutex_value_initial_lock_f(void)
{
return 0x0U;
}
static inline u32 pwr_pmu_queue_head_r(u32 i)
{
return 0x0010a800U + i*4U;
}
static inline u32 pwr_pmu_queue_head__size_1_v(void)
{
return 0x00000008U;
}
static inline u32 pwr_pmu_queue_head_address_f(u32 v)
{
return (v & 0xffffffffU) << 0U;
}
static inline u32 pwr_pmu_queue_head_address_v(u32 r)
{
return (r >> 0U) & 0xffffffffU;
}
static inline u32 pwr_pmu_queue_tail_r(u32 i)
{
return 0x0010a820U + i*4U;
}
static inline u32 pwr_pmu_queue_tail__size_1_v(void)
{
return 0x00000008U;
}
static inline u32 pwr_pmu_queue_tail_address_f(u32 v)
{
return (v & 0xffffffffU) << 0U;
}
static inline u32 pwr_pmu_queue_tail_address_v(u32 r)
{
return (r >> 0U) & 0xffffffffU;
}
static inline u32 pwr_pmu_msgq_head_r(void)
{
return 0x0010a4c8U;
}
static inline u32 pwr_pmu_msgq_head_val_f(u32 v)
{
return (v & 0xffffffffU) << 0U;
}
static inline u32 pwr_pmu_msgq_head_val_v(u32 r)
{
return (r >> 0U) & 0xffffffffU;
}
static inline u32 pwr_pmu_msgq_tail_r(void)
{
return 0x0010a4ccU;
}
static inline u32 pwr_pmu_msgq_tail_val_f(u32 v)
{
return (v & 0xffffffffU) << 0U;
}
static inline u32 pwr_pmu_msgq_tail_val_v(u32 r)
{
return (r >> 0U) & 0xffffffffU;
}
static inline u32 pwr_pmu_idle_mask_r(u32 i)
{
return 0x0010be40U + i*4U;
}
static inline u32 pwr_pmu_idle_mask_gr_enabled_f(void)
{
return 0x1U;
}
static inline u32 pwr_pmu_idle_mask_ce_2_enabled_f(void)
{
return 0x200000U;
}
static inline u32 pwr_pmu_idle_count_r(u32 i)
{
return 0x0010bf80U + i*4U;
}
static inline u32 pwr_pmu_idle_count_value_f(u32 v)
{
return (v & 0x7fffffffU) << 0U;
}
static inline u32 pwr_pmu_idle_count_value_v(u32 r)
{
return (r >> 0U) & 0x7fffffffU;
}
static inline u32 pwr_pmu_idle_count_reset_f(u32 v)
{
return (v & 0x1U) << 31U;
}
static inline u32 pwr_pmu_idle_ctrl_r(u32 i)
{
return 0x0010bfc0U + i*4U;
}
static inline u32 pwr_pmu_idle_ctrl_value_m(void)
{
return 0x3U << 0U;
}
static inline u32 pwr_pmu_idle_ctrl_value_busy_f(void)
{
return 0x2U;
}
static inline u32 pwr_pmu_idle_ctrl_value_always_f(void)
{
return 0x3U;
}
static inline u32 pwr_pmu_idle_ctrl_filter_m(void)
{
return 0x1U << 2U;
}
static inline u32 pwr_pmu_idle_ctrl_filter_disabled_f(void)
{
return 0x0U;
}
static inline u32 pwr_pmu_idle_mask_supp_r(u32 i)
{
return 0x0010a9f0U + i*8U;
}
static inline u32 pwr_pmu_idle_mask_1_supp_r(u32 i)
{
return 0x0010a9f4U + i*8U;
}
static inline u32 pwr_pmu_idle_ctrl_supp_r(u32 i)
{
return 0x0010aa30U + i*8U;
}
static inline u32 pwr_pmu_debug_r(u32 i)
{
return 0x0010a5c0U + i*4U;
}
static inline u32 pwr_pmu_debug__size_1_v(void)
{
return 0x00000004U;
}
static inline u32 pwr_pmu_mailbox_r(u32 i)
{
return 0x0010a450U + i*4U;
}
static inline u32 pwr_pmu_mailbox__size_1_v(void)
{
return 0x0000000cU;
}
static inline u32 pwr_pmu_bar0_addr_r(void)
{
return 0x0010a7a0U;
}
static inline u32 pwr_pmu_bar0_data_r(void)
{
return 0x0010a7a4U;
}
static inline u32 pwr_pmu_bar0_ctl_r(void)
{
return 0x0010a7acU;
}
static inline u32 pwr_pmu_bar0_timeout_r(void)
{
return 0x0010a7a8U;
}
static inline u32 pwr_pmu_bar0_fecs_error_r(void)
{
return 0x0010a988U;
}
static inline u32 pwr_pmu_bar0_error_status_r(void)
{
return 0x0010a7b0U;
}
static inline u32 pwr_pmu_pg_idlefilth_r(u32 i)
{
return 0x0010a6c0U + i*4U;
}
static inline u32 pwr_pmu_pg_ppuidlefilth_r(u32 i)
{
return 0x0010a6e8U + i*4U;
}
static inline u32 pwr_pmu_pg_idle_cnt_r(u32 i)
{
return 0x0010a710U + i*4U;
}
static inline u32 pwr_pmu_pg_intren_r(u32 i)
{
return 0x0010a760U + i*4U;
}
static inline u32 pwr_fbif_transcfg_r(u32 i)
{
return 0x0010ae00U + i*4U;
}
static inline u32 pwr_fbif_transcfg_target_local_fb_f(void)
{
return 0x0U;
}
static inline u32 pwr_fbif_transcfg_target_coherent_sysmem_f(void)
{
return 0x1U;
}
static inline u32 pwr_fbif_transcfg_target_noncoherent_sysmem_f(void)
{
return 0x2U;
}
static inline u32 pwr_fbif_transcfg_mem_type_s(void)
{
return 1U;
}
static inline u32 pwr_fbif_transcfg_mem_type_f(u32 v)
{
return (v & 0x1U) << 2U;
}
static inline u32 pwr_fbif_transcfg_mem_type_m(void)
{
return 0x1U << 2U;
}
static inline u32 pwr_fbif_transcfg_mem_type_v(u32 r)
{
return (r >> 2U) & 0x1U;
}
static inline u32 pwr_fbif_transcfg_mem_type_virtual_f(void)
{
return 0x0U;
}
static inline u32 pwr_fbif_transcfg_mem_type_physical_f(void)
{
return 0x4U;
}
#endif

View File

@@ -0,0 +1,771 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* Function naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
*
* <x>_o(void) : Returns the offset for element <x>.
*
* <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
*
* <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
*
* <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
* and masked to place it at field <y> of register <x>. This value
* can be |'d with others to produce a full register value for
* register <x>.
*
* <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
* value can be ~'d and then &'d to clear the value of field <y> for
* register <x>.
*
* <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
* to place it at field <y> of register <x>. This value can be |'d
* with others to produce a full register value for <x>.
*
* <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
* <x> value 'r' after being shifted to place its LSB at bit 0.
* This value is suitable for direct comparison with other unshifted
* values appropriate for use in field <y> of register <x>.
*
* <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
* field <y> of register <x>. This value is suitable for direct
* comparison with unshifted values appropriate for use in field <y>
* of register <x>.
*/
#ifndef _hw_ram_tu104_h_
#define _hw_ram_tu104_h_
static inline u32 ram_in_ramfc_s(void)
{
return 4096U;
}
static inline u32 ram_in_ramfc_w(void)
{
return 0U;
}
static inline u32 ram_in_page_dir_base_target_f(u32 v)
{
return (v & 0x3U) << 0U;
}
static inline u32 ram_in_page_dir_base_target_w(void)
{
return 128U;
}
static inline u32 ram_in_page_dir_base_target_vid_mem_f(void)
{
return 0x0U;
}
static inline u32 ram_in_page_dir_base_target_sys_mem_coh_f(void)
{
return 0x2U;
}
static inline u32 ram_in_page_dir_base_target_sys_mem_ncoh_f(void)
{
return 0x3U;
}
static inline u32 ram_in_page_dir_base_vol_w(void)
{
return 128U;
}
static inline u32 ram_in_page_dir_base_vol_true_f(void)
{
return 0x4U;
}
static inline u32 ram_in_page_dir_base_vol_false_f(void)
{
return 0x0U;
}
static inline u32 ram_in_page_dir_base_fault_replay_tex_f(u32 v)
{
return (v & 0x1U) << 4U;
}
static inline u32 ram_in_page_dir_base_fault_replay_tex_m(void)
{
return 0x1U << 4U;
}
static inline u32 ram_in_page_dir_base_fault_replay_tex_w(void)
{
return 128U;
}
static inline u32 ram_in_page_dir_base_fault_replay_tex_true_f(void)
{
return 0x10U;
}
static inline u32 ram_in_page_dir_base_fault_replay_gcc_f(u32 v)
{
return (v & 0x1U) << 5U;
}
static inline u32 ram_in_page_dir_base_fault_replay_gcc_m(void)
{
return 0x1U << 5U;
}
static inline u32 ram_in_page_dir_base_fault_replay_gcc_w(void)
{
return 128U;
}
static inline u32 ram_in_page_dir_base_fault_replay_gcc_true_f(void)
{
return 0x20U;
}
static inline u32 ram_in_use_ver2_pt_format_f(u32 v)
{
return (v & 0x1U) << 10U;
}
static inline u32 ram_in_use_ver2_pt_format_m(void)
{
return 0x1U << 10U;
}
static inline u32 ram_in_use_ver2_pt_format_w(void)
{
return 128U;
}
static inline u32 ram_in_use_ver2_pt_format_true_f(void)
{
return 0x400U;
}
static inline u32 ram_in_use_ver2_pt_format_false_f(void)
{
return 0x0U;
}
static inline u32 ram_in_big_page_size_f(u32 v)
{
return (v & 0x1U) << 11U;
}
static inline u32 ram_in_big_page_size_m(void)
{
return 0x1U << 11U;
}
static inline u32 ram_in_big_page_size_w(void)
{
return 128U;
}
static inline u32 ram_in_big_page_size_128kb_f(void)
{
return 0x0U;
}
static inline u32 ram_in_big_page_size_64kb_f(void)
{
return 0x800U;
}
static inline u32 ram_in_page_dir_base_lo_f(u32 v)
{
return (v & 0xfffffU) << 12U;
}
static inline u32 ram_in_page_dir_base_lo_w(void)
{
return 128U;
}
static inline u32 ram_in_page_dir_base_hi_f(u32 v)
{
return (v & 0xffffffffU) << 0U;
}
static inline u32 ram_in_page_dir_base_hi_w(void)
{
return 129U;
}
static inline u32 ram_in_engine_cs_w(void)
{
return 132U;
}
static inline u32 ram_in_engine_cs_wfi_v(void)
{
return 0x00000000U;
}
static inline u32 ram_in_engine_cs_wfi_f(void)
{
return 0x0U;
}
static inline u32 ram_in_engine_cs_fg_v(void)
{
return 0x00000001U;
}
static inline u32 ram_in_engine_cs_fg_f(void)
{
return 0x8U;
}
static inline u32 ram_in_engine_wfi_mode_f(u32 v)
{
return (v & 0x1U) << 2U;
}
static inline u32 ram_in_engine_wfi_mode_w(void)
{
return 132U;
}
static inline u32 ram_in_engine_wfi_mode_physical_v(void)
{
return 0x00000000U;
}
static inline u32 ram_in_engine_wfi_mode_virtual_v(void)
{
return 0x00000001U;
}
static inline u32 ram_in_engine_wfi_target_f(u32 v)
{
return (v & 0x3U) << 0U;
}
static inline u32 ram_in_engine_wfi_target_w(void)
{
return 132U;
}
static inline u32 ram_in_engine_wfi_target_sys_mem_coh_v(void)
{
return 0x00000002U;
}
static inline u32 ram_in_engine_wfi_target_sys_mem_ncoh_v(void)
{
return 0x00000003U;
}
static inline u32 ram_in_engine_wfi_target_local_mem_v(void)
{
return 0x00000000U;
}
static inline u32 ram_in_engine_wfi_ptr_lo_f(u32 v)
{
return (v & 0xfffffU) << 12U;
}
static inline u32 ram_in_engine_wfi_ptr_lo_w(void)
{
return 132U;
}
static inline u32 ram_in_engine_wfi_ptr_hi_f(u32 v)
{
return (v & 0xffU) << 0U;
}
static inline u32 ram_in_engine_wfi_ptr_hi_w(void)
{
return 133U;
}
static inline u32 ram_in_engine_wfi_veid_f(u32 v)
{
return (v & 0x3fU) << 0U;
}
static inline u32 ram_in_engine_wfi_veid_w(void)
{
return 134U;
}
static inline u32 ram_in_eng_method_buffer_addr_lo_f(u32 v)
{
return (v & 0xffffffffU) << 0U;
}
static inline u32 ram_in_eng_method_buffer_addr_lo_w(void)
{
return 136U;
}
static inline u32 ram_in_eng_method_buffer_addr_hi_f(u32 v)
{
return (v & 0x1ffffU) << 0U;
}
static inline u32 ram_in_eng_method_buffer_addr_hi_w(void)
{
return 137U;
}
static inline u32 ram_in_sc_page_dir_base_target_f(u32 v, u32 i)
{
return (v & 0x3U) << (0U + i*0U);
}
static inline u32 ram_in_sc_page_dir_base_target__size_1_v(void)
{
return 0x00000040U;
}
static inline u32 ram_in_sc_page_dir_base_target_vid_mem_v(void)
{
return 0x00000000U;
}
static inline u32 ram_in_sc_page_dir_base_target_invalid_v(void)
{
return 0x00000001U;
}
static inline u32 ram_in_sc_page_dir_base_target_sys_mem_coh_v(void)
{
return 0x00000002U;
}
static inline u32 ram_in_sc_page_dir_base_target_sys_mem_ncoh_v(void)
{
return 0x00000003U;
}
static inline u32 ram_in_sc_page_dir_base_vol_f(u32 v, u32 i)
{
return (v & 0x1U) << (2U + i*0U);
}
static inline u32 ram_in_sc_page_dir_base_vol__size_1_v(void)
{
return 0x00000040U;
}
static inline u32 ram_in_sc_page_dir_base_vol_true_v(void)
{
return 0x00000001U;
}
static inline u32 ram_in_sc_page_dir_base_vol_false_v(void)
{
return 0x00000000U;
}
static inline u32 ram_in_sc_page_dir_base_fault_replay_tex_f(u32 v, u32 i)
{
return (v & 0x1U) << (4U + i*0U);
}
static inline u32 ram_in_sc_page_dir_base_fault_replay_tex__size_1_v(void)
{
return 0x00000040U;
}
static inline u32 ram_in_sc_page_dir_base_fault_replay_tex_enabled_v(void)
{
return 0x00000001U;
}
static inline u32 ram_in_sc_page_dir_base_fault_replay_tex_disabled_v(void)
{
return 0x00000000U;
}
static inline u32 ram_in_sc_page_dir_base_fault_replay_gcc_f(u32 v, u32 i)
{
return (v & 0x1U) << (5U + i*0U);
}
static inline u32 ram_in_sc_page_dir_base_fault_replay_gcc__size_1_v(void)
{
return 0x00000040U;
}
static inline u32 ram_in_sc_page_dir_base_fault_replay_gcc_enabled_v(void)
{
return 0x00000001U;
}
static inline u32 ram_in_sc_page_dir_base_fault_replay_gcc_disabled_v(void)
{
return 0x00000000U;
}
static inline u32 ram_in_sc_use_ver2_pt_format_f(u32 v, u32 i)
{
return (v & 0x1U) << (10U + i*0U);
}
static inline u32 ram_in_sc_use_ver2_pt_format__size_1_v(void)
{
return 0x00000040U;
}
static inline u32 ram_in_sc_use_ver2_pt_format_false_v(void)
{
return 0x00000000U;
}
static inline u32 ram_in_sc_use_ver2_pt_format_true_v(void)
{
return 0x00000001U;
}
static inline u32 ram_in_sc_big_page_size_f(u32 v, u32 i)
{
return (v & 0x1U) << (11U + i*0U);
}
static inline u32 ram_in_sc_big_page_size__size_1_v(void)
{
return 0x00000040U;
}
static inline u32 ram_in_sc_big_page_size_64kb_v(void)
{
return 0x00000001U;
}
static inline u32 ram_in_sc_page_dir_base_lo_f(u32 v, u32 i)
{
return (v & 0xfffffU) << (12U + i*0U);
}
static inline u32 ram_in_sc_page_dir_base_lo__size_1_v(void)
{
return 0x00000040U;
}
static inline u32 ram_in_sc_page_dir_base_hi_f(u32 v, u32 i)
{
return (v & 0xffffffffU) << (0U + i*0U);
}
static inline u32 ram_in_sc_page_dir_base_hi__size_1_v(void)
{
return 0x00000040U;
}
static inline u32 ram_in_sc_page_dir_base_target_0_f(u32 v)
{
return (v & 0x3U) << 0U;
}
static inline u32 ram_in_sc_page_dir_base_target_0_w(void)
{
return 168U;
}
static inline u32 ram_in_sc_page_dir_base_vol_0_f(u32 v)
{
return (v & 0x1U) << 2U;
}
static inline u32 ram_in_sc_page_dir_base_vol_0_w(void)
{
return 168U;
}
static inline u32 ram_in_sc_page_dir_base_fault_replay_tex_0_f(u32 v)
{
return (v & 0x1U) << 4U;
}
static inline u32 ram_in_sc_page_dir_base_fault_replay_tex_0_w(void)
{
return 168U;
}
static inline u32 ram_in_sc_page_dir_base_fault_replay_gcc_0_f(u32 v)
{
return (v & 0x1U) << 5U;
}
static inline u32 ram_in_sc_page_dir_base_fault_replay_gcc_0_w(void)
{
return 168U;
}
static inline u32 ram_in_sc_use_ver2_pt_format_0_f(u32 v)
{
return (v & 0x1U) << 10U;
}
static inline u32 ram_in_sc_use_ver2_pt_format_0_w(void)
{
return 168U;
}
static inline u32 ram_in_sc_big_page_size_0_f(u32 v)
{
return (v & 0x1U) << 11U;
}
static inline u32 ram_in_sc_big_page_size_0_w(void)
{
return 168U;
}
static inline u32 ram_in_sc_page_dir_base_lo_0_f(u32 v)
{
return (v & 0xfffffU) << 12U;
}
static inline u32 ram_in_sc_page_dir_base_lo_0_w(void)
{
return 168U;
}
static inline u32 ram_in_sc_page_dir_base_hi_0_f(u32 v)
{
return (v & 0xffffffffU) << 0U;
}
static inline u32 ram_in_sc_page_dir_base_hi_0_w(void)
{
return 169U;
}
static inline u32 ram_in_base_shift_v(void)
{
return 0x0000000cU;
}
static inline u32 ram_in_alloc_size_v(void)
{
return 0x00001000U;
}
static inline u32 ram_fc_size_val_v(void)
{
return 0x00000200U;
}
static inline u32 ram_fc_gp_put_w(void)
{
return 0U;
}
static inline u32 ram_fc_userd_w(void)
{
return 2U;
}
static inline u32 ram_fc_userd_hi_w(void)
{
return 3U;
}
static inline u32 ram_fc_signature_w(void)
{
return 4U;
}
static inline u32 ram_fc_gp_get_w(void)
{
return 5U;
}
static inline u32 ram_fc_pb_get_w(void)
{
return 6U;
}
static inline u32 ram_fc_pb_get_hi_w(void)
{
return 7U;
}
static inline u32 ram_fc_pb_top_level_get_w(void)
{
return 8U;
}
static inline u32 ram_fc_pb_top_level_get_hi_w(void)
{
return 9U;
}
static inline u32 ram_fc_acquire_w(void)
{
return 12U;
}
static inline u32 ram_fc_sem_addr_hi_w(void)
{
return 14U;
}
static inline u32 ram_fc_sem_addr_lo_w(void)
{
return 15U;
}
static inline u32 ram_fc_sem_payload_lo_w(void)
{
return 16U;
}
static inline u32 ram_fc_sem_payload_hi_w(void)
{
return 39U;
}
static inline u32 ram_fc_sem_execute_w(void)
{
return 17U;
}
static inline u32 ram_fc_gp_base_w(void)
{
return 18U;
}
static inline u32 ram_fc_gp_base_hi_w(void)
{
return 19U;
}
static inline u32 ram_fc_gp_fetch_w(void)
{
return 20U;
}
static inline u32 ram_fc_pb_fetch_w(void)
{
return 21U;
}
static inline u32 ram_fc_pb_fetch_hi_w(void)
{
return 22U;
}
static inline u32 ram_fc_pb_put_w(void)
{
return 23U;
}
static inline u32 ram_fc_pb_put_hi_w(void)
{
return 24U;
}
static inline u32 ram_fc_pb_header_w(void)
{
return 33U;
}
static inline u32 ram_fc_pb_count_w(void)
{
return 34U;
}
static inline u32 ram_fc_subdevice_w(void)
{
return 37U;
}
static inline u32 ram_fc_target_w(void)
{
return 43U;
}
static inline u32 ram_fc_hce_ctrl_w(void)
{
return 57U;
}
static inline u32 ram_fc_config_w(void)
{
return 61U;
}
static inline u32 ram_fc_set_channel_info_w(void)
{
return 63U;
}
static inline u32 ram_userd_base_shift_v(void)
{
return 0x00000009U;
}
static inline u32 ram_userd_chan_size_v(void)
{
return 0x00000200U;
}
static inline u32 ram_userd_put_w(void)
{
return 16U;
}
static inline u32 ram_userd_get_w(void)
{
return 17U;
}
static inline u32 ram_userd_ref_w(void)
{
return 18U;
}
static inline u32 ram_userd_put_hi_w(void)
{
return 19U;
}
static inline u32 ram_userd_top_level_get_w(void)
{
return 22U;
}
static inline u32 ram_userd_top_level_get_hi_w(void)
{
return 23U;
}
static inline u32 ram_userd_get_hi_w(void)
{
return 24U;
}
static inline u32 ram_userd_gp_get_w(void)
{
return 34U;
}
static inline u32 ram_userd_gp_put_w(void)
{
return 35U;
}
static inline u32 ram_userd_gp_top_level_get_w(void)
{
return 22U;
}
static inline u32 ram_userd_gp_top_level_get_hi_w(void)
{
return 23U;
}
static inline u32 ram_rl_entry_size_v(void)
{
return 0x00000010U;
}
static inline u32 ram_rl_entry_type_f(u32 v)
{
return (v & 0x1U) << 0U;
}
static inline u32 ram_rl_entry_type_channel_v(void)
{
return 0x00000000U;
}
static inline u32 ram_rl_entry_type_tsg_v(void)
{
return 0x00000001U;
}
static inline u32 ram_rl_entry_id_f(u32 v)
{
return (v & 0xfffU) << 0U;
}
static inline u32 ram_rl_entry_chan_runqueue_selector_f(u32 v)
{
return (v & 0x1U) << 1U;
}
static inline u32 ram_rl_entry_chan_inst_target_f(u32 v)
{
return (v & 0x3U) << 4U;
}
static inline u32 ram_rl_entry_chan_inst_target_sys_mem_ncoh_v(void)
{
return 0x00000003U;
}
static inline u32 ram_rl_entry_chan_inst_target_sys_mem_coh_v(void)
{
return 0x00000002U;
}
static inline u32 ram_rl_entry_chan_inst_target_vid_mem_v(void)
{
return 0x00000000U;
}
static inline u32 ram_rl_entry_chan_userd_target_f(u32 v)
{
return (v & 0x3U) << 6U;
}
static inline u32 ram_rl_entry_chan_userd_target_vid_mem_v(void)
{
return 0x00000000U;
}
static inline u32 ram_rl_entry_chan_userd_target_vid_mem_nvlink_coh_v(void)
{
return 0x00000001U;
}
static inline u32 ram_rl_entry_chan_userd_target_sys_mem_coh_v(void)
{
return 0x00000002U;
}
static inline u32 ram_rl_entry_chan_userd_target_sys_mem_ncoh_v(void)
{
return 0x00000003U;
}
static inline u32 ram_rl_entry_chan_userd_ptr_lo_f(u32 v)
{
return (v & 0xffffffU) << 8U;
}
static inline u32 ram_rl_entry_chan_userd_ptr_hi_f(u32 v)
{
return (v & 0xffffffffU) << 0U;
}
static inline u32 ram_rl_entry_chid_f(u32 v)
{
return (v & 0xfffU) << 0U;
}
static inline u32 ram_rl_entry_chan_inst_ptr_lo_f(u32 v)
{
return (v & 0xfffffU) << 12U;
}
static inline u32 ram_rl_entry_chan_inst_ptr_hi_f(u32 v)
{
return (v & 0xffffffffU) << 0U;
}
static inline u32 ram_rl_entry_tsg_timeslice_scale_f(u32 v)
{
return (v & 0xfU) << 16U;
}
static inline u32 ram_rl_entry_tsg_timeslice_scale_3_v(void)
{
return 0x00000003U;
}
static inline u32 ram_rl_entry_tsg_timeslice_timeout_f(u32 v)
{
return (v & 0xffU) << 24U;
}
static inline u32 ram_rl_entry_tsg_timeslice_timeout_128_v(void)
{
return 0x00000080U;
}
static inline u32 ram_rl_entry_tsg_length_f(u32 v)
{
return (v & 0xffU) << 0U;
}
static inline u32 ram_rl_entry_tsg_length_init_v(void)
{
return 0x00000000U;
}
static inline u32 ram_rl_entry_tsg_length_min_v(void)
{
return 0x00000001U;
}
static inline u32 ram_rl_entry_tsg_length_max_v(void)
{
return 0x00000080U;
}
static inline u32 ram_rl_entry_tsg_tsgid_f(u32 v)
{
return (v & 0xfffU) << 0U;
}
static inline u32 ram_rl_entry_chan_userd_ptr_align_shift_v(void)
{
return 0x00000008U;
}
static inline u32 ram_rl_entry_chan_userd_align_shift_v(void)
{
return 0x00000008U;
}
static inline u32 ram_rl_entry_chan_inst_ptr_align_shift_v(void)
{
return 0x0000000cU;
}
#endif

View File

@@ -0,0 +1,299 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* Function naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
*
* <x>_o(void) : Returns the offset for element <x>.
*
* <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
*
* <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
*
* <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
* and masked to place it at field <y> of register <x>. This value
* can be |'d with others to produce a full register value for
* register <x>.
*
* <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
* value can be ~'d and then &'d to clear the value of field <y> for
* register <x>.
*
* <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
* to place it at field <y> of register <x>. This value can be |'d
* with others to produce a full register value for <x>.
*
* <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
* <x> value 'r' after being shifted to place its LSB at bit 0.
* This value is suitable for direct comparison with other unshifted
* values appropriate for use in field <y> of register <x>.
*
* <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
* field <y> of register <x>. This value is suitable for direct
* comparison with unshifted values appropriate for use in field <y>
* of register <x>.
*/
#ifndef _hw_therm_tu104_h_
#define _hw_therm_tu104_h_
static inline u32 therm_weight_1_r(void)
{
return 0x00020024U;
}
static inline u32 therm_config1_r(void)
{
return 0x00020050U;
}
static inline u32 therm_config2_r(void)
{
return 0x00020130U;
}
static inline u32 therm_config2_slowdown_factor_extended_f(u32 v)
{
return (v & 0x1U) << 24U;
}
static inline u32 therm_config2_grad_enable_f(u32 v)
{
return (v & 0x1U) << 31U;
}
static inline u32 therm_gate_ctrl_r(u32 i)
{
return 0x00020200U + i*4U;
}
static inline u32 therm_gate_ctrl_eng_clk_m(void)
{
return 0x3U << 0U;
}
static inline u32 therm_gate_ctrl_eng_clk_run_f(void)
{
return 0x0U;
}
static inline u32 therm_gate_ctrl_eng_clk_auto_f(void)
{
return 0x1U;
}
static inline u32 therm_gate_ctrl_eng_clk_stop_f(void)
{
return 0x2U;
}
static inline u32 therm_gate_ctrl_blk_clk_m(void)
{
return 0x3U << 2U;
}
static inline u32 therm_gate_ctrl_blk_clk_run_f(void)
{
return 0x0U;
}
static inline u32 therm_gate_ctrl_blk_clk_auto_f(void)
{
return 0x4U;
}
static inline u32 therm_gate_ctrl_idle_holdoff_m(void)
{
return 0x1U << 4U;
}
static inline u32 therm_gate_ctrl_idle_holdoff_off_f(void)
{
return 0x0U;
}
static inline u32 therm_gate_ctrl_idle_holdoff_on_f(void)
{
return 0x10U;
}
static inline u32 therm_gate_ctrl_eng_idle_filt_exp_f(u32 v)
{
return (v & 0x1fU) << 8U;
}
static inline u32 therm_gate_ctrl_eng_idle_filt_exp_m(void)
{
return 0x1fU << 8U;
}
static inline u32 therm_gate_ctrl_eng_idle_filt_mant_f(u32 v)
{
return (v & 0x7U) << 13U;
}
static inline u32 therm_gate_ctrl_eng_idle_filt_mant_m(void)
{
return 0x7U << 13U;
}
static inline u32 therm_gate_ctrl_eng_delay_before_f(u32 v)
{
return (v & 0xfU) << 16U;
}
static inline u32 therm_gate_ctrl_eng_delay_before_m(void)
{
return 0xfU << 16U;
}
static inline u32 therm_gate_ctrl_eng_delay_after_f(u32 v)
{
return (v & 0xfU) << 20U;
}
static inline u32 therm_gate_ctrl_eng_delay_after_m(void)
{
return 0xfU << 20U;
}
static inline u32 therm_fecs_idle_filter_r(void)
{
return 0x00020288U;
}
static inline u32 therm_fecs_idle_filter_value_m(void)
{
return 0xffffffffU << 0U;
}
static inline u32 therm_hubmmu_idle_filter_r(void)
{
return 0x0002028cU;
}
static inline u32 therm_hubmmu_idle_filter_value_m(void)
{
return 0xffffffffU << 0U;
}
static inline u32 therm_clk_slowdown_r(u32 i)
{
return 0x00020160U + i*4U;
}
static inline u32 therm_clk_slowdown_idle_factor_f(u32 v)
{
return (v & 0x3fU) << 16U;
}
static inline u32 therm_clk_slowdown_idle_factor_m(void)
{
return 0x3fU << 16U;
}
static inline u32 therm_clk_slowdown_idle_factor_v(u32 r)
{
return (r >> 16U) & 0x3fU;
}
static inline u32 therm_clk_slowdown_idle_factor_disabled_f(void)
{
return 0x0U;
}
static inline u32 therm_grad_stepping_table_r(u32 i)
{
return 0x000202c8U + i*4U;
}
static inline u32 therm_grad_stepping_table_slowdown_factor0_f(u32 v)
{
return (v & 0x3fU) << 0U;
}
static inline u32 therm_grad_stepping_table_slowdown_factor0_m(void)
{
return 0x3fU << 0U;
}
static inline u32 therm_grad_stepping_table_slowdown_factor0_fpdiv_by1p5_f(void)
{
return 0x1U;
}
static inline u32 therm_grad_stepping_table_slowdown_factor0_fpdiv_by2_f(void)
{
return 0x2U;
}
static inline u32 therm_grad_stepping_table_slowdown_factor0_fpdiv_by4_f(void)
{
return 0x6U;
}
static inline u32 therm_grad_stepping_table_slowdown_factor0_fpdiv_by8_f(void)
{
return 0xeU;
}
static inline u32 therm_grad_stepping_table_slowdown_factor1_f(u32 v)
{
return (v & 0x3fU) << 6U;
}
static inline u32 therm_grad_stepping_table_slowdown_factor1_m(void)
{
return 0x3fU << 6U;
}
static inline u32 therm_grad_stepping_table_slowdown_factor2_f(u32 v)
{
return (v & 0x3fU) << 12U;
}
static inline u32 therm_grad_stepping_table_slowdown_factor2_m(void)
{
return 0x3fU << 12U;
}
static inline u32 therm_grad_stepping_table_slowdown_factor3_f(u32 v)
{
return (v & 0x3fU) << 18U;
}
static inline u32 therm_grad_stepping_table_slowdown_factor3_m(void)
{
return 0x3fU << 18U;
}
static inline u32 therm_grad_stepping_table_slowdown_factor4_f(u32 v)
{
return (v & 0x3fU) << 24U;
}
static inline u32 therm_grad_stepping_table_slowdown_factor4_m(void)
{
return 0x3fU << 24U;
}
static inline u32 therm_grad_stepping0_r(void)
{
return 0x000202c0U;
}
static inline u32 therm_grad_stepping0_feature_s(void)
{
return 1U;
}
static inline u32 therm_grad_stepping0_feature_f(u32 v)
{
return (v & 0x1U) << 0U;
}
static inline u32 therm_grad_stepping0_feature_m(void)
{
return 0x1U << 0U;
}
static inline u32 therm_grad_stepping0_feature_v(u32 r)
{
return (r >> 0U) & 0x1U;
}
static inline u32 therm_grad_stepping0_feature_enable_f(void)
{
return 0x1U;
}
static inline u32 therm_grad_stepping1_r(void)
{
return 0x000202c4U;
}
static inline u32 therm_grad_stepping1_pdiv_duration_f(u32 v)
{
return (v & 0x1ffffU) << 0U;
}
static inline u32 therm_clk_timing_r(u32 i)
{
return 0x000203c0U + i*4U;
}
static inline u32 therm_clk_timing_grad_slowdown_f(u32 v)
{
return (v & 0x1U) << 16U;
}
static inline u32 therm_clk_timing_grad_slowdown_m(void)
{
return 0x1U << 16U;
}
static inline u32 therm_clk_timing_grad_slowdown_enabled_f(void)
{
return 0x10000U;
}
#endif

View File

@@ -0,0 +1,115 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* Function naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
*
* <x>_o(void) : Returns the offset for element <x>.
*
* <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
*
* <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
*
* <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
* and masked to place it at field <y> of register <x>. This value
* can be |'d with others to produce a full register value for
* register <x>.
*
* <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
* value can be ~'d and then &'d to clear the value of field <y> for
* register <x>.
*
* <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
* to place it at field <y> of register <x>. This value can be |'d
* with others to produce a full register value for <x>.
*
* <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
* <x> value 'r' after being shifted to place its LSB at bit 0.
* This value is suitable for direct comparison with other unshifted
* values appropriate for use in field <y> of register <x>.
*
* <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
* field <y> of register <x>. This value is suitable for direct
* comparison with unshifted values appropriate for use in field <y>
* of register <x>.
*/
#ifndef _hw_timer_tu104_h_
#define _hw_timer_tu104_h_
static inline u32 timer_pri_timeout_r(void)
{
return 0x00009080U;
}
static inline u32 timer_pri_timeout_period_f(u32 v)
{
return (v & 0xffffffU) << 0U;
}
static inline u32 timer_pri_timeout_period_m(void)
{
return 0xffffffU << 0U;
}
static inline u32 timer_pri_timeout_period_v(u32 r)
{
return (r >> 0U) & 0xffffffU;
}
static inline u32 timer_pri_timeout_en_f(u32 v)
{
return (v & 0x1U) << 31U;
}
static inline u32 timer_pri_timeout_en_m(void)
{
return 0x1U << 31U;
}
static inline u32 timer_pri_timeout_en_v(u32 r)
{
return (r >> 31U) & 0x1U;
}
static inline u32 timer_pri_timeout_en_en_enabled_f(void)
{
return 0x80000000U;
}
static inline u32 timer_pri_timeout_en_en_disabled_f(void)
{
return 0x0U;
}
static inline u32 timer_pri_timeout_save_0_r(void)
{
return 0x00009084U;
}
static inline u32 timer_pri_timeout_save_1_r(void)
{
return 0x00009088U;
}
static inline u32 timer_pri_timeout_fecs_errcode_r(void)
{
return 0x0000908cU;
}
static inline u32 timer_time_0_r(void)
{
return 0x00009400U;
}
static inline u32 timer_time_1_r(void)
{
return 0x00009410U;
}
#endif

View File

@@ -0,0 +1,255 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* Function naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
*
* <x>_o(void) : Returns the offset for element <x>.
*
* <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
*
* <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
*
* <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
* and masked to place it at field <y> of register <x>. This value
* can be |'d with others to produce a full register value for
* register <x>.
*
* <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
* value can be ~'d and then &'d to clear the value of field <y> for
* register <x>.
*
* <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
* to place it at field <y> of register <x>. This value can be |'d
* with others to produce a full register value for <x>.
*
* <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
* <x> value 'r' after being shifted to place its LSB at bit 0.
* This value is suitable for direct comparison with other unshifted
* values appropriate for use in field <y> of register <x>.
*
* <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
* field <y> of register <x>. This value is suitable for direct
* comparison with unshifted values appropriate for use in field <y>
* of register <x>.
*/
#ifndef _hw_top_tu104_h_
#define _hw_top_tu104_h_
static inline u32 top_num_gpcs_r(void)
{
return 0x00022430U;
}
static inline u32 top_num_gpcs_value_v(u32 r)
{
return (r >> 0U) & 0x1fU;
}
static inline u32 top_tpc_per_gpc_r(void)
{
return 0x00022434U;
}
static inline u32 top_tpc_per_gpc_value_v(u32 r)
{
return (r >> 0U) & 0x1fU;
}
static inline u32 top_num_fbps_r(void)
{
return 0x00022438U;
}
static inline u32 top_num_fbps_value_v(u32 r)
{
return (r >> 0U) & 0x1fU;
}
static inline u32 top_num_fbpas_r(void)
{
return 0x0002243cU;
}
static inline u32 top_num_fbpas_value_v(u32 r)
{
return (r >> 0U) & 0x1fU;
}
static inline u32 top_ltc_per_fbp_r(void)
{
return 0x00022450U;
}
static inline u32 top_ltc_per_fbp_value_v(u32 r)
{
return (r >> 0U) & 0x1fU;
}
static inline u32 top_slices_per_ltc_r(void)
{
return 0x0002245cU;
}
static inline u32 top_slices_per_ltc_value_v(u32 r)
{
return (r >> 0U) & 0x1fU;
}
static inline u32 top_num_ltcs_r(void)
{
return 0x00022454U;
}
static inline u32 top_num_ces_r(void)
{
return 0x00022444U;
}
static inline u32 top_num_ces_value_v(u32 r)
{
return (r >> 0U) & 0x1fU;
}
static inline u32 top_device_info_r(u32 i)
{
return 0x00022700U + i*4U;
}
static inline u32 top_device_info__size_1_v(void)
{
return 0x00000040U;
}
static inline u32 top_device_info_chain_v(u32 r)
{
return (r >> 31U) & 0x1U;
}
static inline u32 top_device_info_chain_enable_v(void)
{
return 0x00000001U;
}
static inline u32 top_device_info_engine_enum_v(u32 r)
{
return (r >> 26U) & 0xfU;
}
static inline u32 top_device_info_runlist_enum_v(u32 r)
{
return (r >> 21U) & 0xfU;
}
static inline u32 top_device_info_intr_enum_v(u32 r)
{
return (r >> 15U) & 0x1fU;
}
static inline u32 top_device_info_reset_enum_v(u32 r)
{
return (r >> 9U) & 0x1fU;
}
static inline u32 top_device_info_type_enum_v(u32 r)
{
return (r >> 2U) & 0x1fffffffU;
}
static inline u32 top_device_info_type_enum_graphics_v(void)
{
return 0x00000000U;
}
static inline u32 top_device_info_type_enum_graphics_f(void)
{
return 0x0U;
}
static inline u32 top_device_info_type_enum_copy2_v(void)
{
return 0x00000003U;
}
static inline u32 top_device_info_type_enum_copy2_f(void)
{
return 0xcU;
}
static inline u32 top_device_info_type_enum_lce_v(void)
{
return 0x00000013U;
}
static inline u32 top_device_info_type_enum_lce_f(void)
{
return 0x4cU;
}
static inline u32 top_device_info_type_enum_ioctrl_v(void)
{
return 0x00000012U;
}
static inline u32 top_device_info_type_enum_ioctrl_f(void)
{
return 0x48U;
}
static inline u32 top_device_info_engine_v(u32 r)
{
return (r >> 5U) & 0x1U;
}
static inline u32 top_device_info_runlist_v(u32 r)
{
return (r >> 4U) & 0x1U;
}
static inline u32 top_device_info_intr_v(u32 r)
{
return (r >> 3U) & 0x1U;
}
static inline u32 top_device_info_reset_v(u32 r)
{
return (r >> 2U) & 0x1U;
}
static inline u32 top_device_info_entry_v(u32 r)
{
return (r >> 0U) & 0x3U;
}
static inline u32 top_device_info_entry_not_valid_v(void)
{
return 0x00000000U;
}
static inline u32 top_device_info_entry_enum_v(void)
{
return 0x00000002U;
}
static inline u32 top_device_info_entry_data_v(void)
{
return 0x00000001U;
}
static inline u32 top_device_info_entry_engine_type_v(void)
{
return 0x00000003U;
}
static inline u32 top_device_info_data_type_v(u32 r)
{
return (r >> 30U) & 0x1U;
}
static inline u32 top_device_info_data_type_enum2_v(void)
{
return 0x00000000U;
}
static inline u32 top_device_info_data_inst_id_v(u32 r)
{
return (r >> 26U) & 0xfU;
}
static inline u32 top_device_info_data_pri_base_v(u32 r)
{
return (r >> 12U) & 0xfffU;
}
static inline u32 top_device_info_data_pri_base_align_v(void)
{
return 0x0000000cU;
}
static inline u32 top_device_info_data_fault_id_enum_v(u32 r)
{
return (r >> 3U) & 0x7fU;
}
static inline u32 top_device_info_data_fault_id_v(u32 r)
{
return (r >> 2U) & 0x1U;
}
static inline u32 top_device_info_data_fault_id_valid_v(void)
{
return 0x00000001U;
}
#endif

View File

@@ -0,0 +1,199 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* Function naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
*
* <x>_o(void) : Returns the offset for element <x>.
*
* <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
*
* <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
*
* <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
* and masked to place it at field <y> of register <x>. This value
* can be |'d with others to produce a full register value for
* register <x>.
*
* <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
* value can be ~'d and then &'d to clear the value of field <y> for
* register <x>.
*
* <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
* to place it at field <y> of register <x>. This value can be |'d
* with others to produce a full register value for <x>.
*
* <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
* <x> value 'r' after being shifted to place its LSB at bit 0.
* This value is suitable for direct comparison with other unshifted
* values appropriate for use in field <y> of register <x>.
*
* <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
* field <y> of register <x>. This value is suitable for direct
* comparison with unshifted values appropriate for use in field <y>
* of register <x>.
*/
#ifndef _hw_trim_tu104_h_
#define _hw_trim_tu104_h_
static inline u32 trim_sys_nvlink_uphy_cfg_r(void)
{
return 0x00132410U;
}
static inline u32 trim_sys_nvlink_uphy_cfg_lockdect_wait_dly_length_f(u32 v)
{
return (v & 0x3ffU) << 0U;
}
static inline u32 trim_sys_nvlink_uphy_cfg_lockdect_wait_dly_length_m(void)
{
return 0x3ffU << 0U;
}
static inline u32 trim_sys_nvlink_uphy_cfg_lockdect_wait_dly_length_v(u32 r)
{
return (r >> 0U) & 0x3ffU;
}
static inline u32 trim_sys_nvlink_uphy_cfg_phy2clks_use_lockdet_f(u32 v)
{
return (v & 0x1U) << 12U;
}
static inline u32 trim_sys_nvlink_uphy_cfg_phy2clks_use_lockdet_m(void)
{
return 0x1U << 12U;
}
static inline u32 trim_sys_nvlink_uphy_cfg_phy2clks_use_lockdet_v(u32 r)
{
return (r >> 12U) & 0x1U;
}
static inline u32 trim_sys_nvlink_uphy_cfg_nvlink_wait_dly_f(u32 v)
{
return (v & 0xffU) << 16U;
}
static inline u32 trim_sys_nvlink_uphy_cfg_nvlink_wait_dly_m(void)
{
return 0xffU << 16U;
}
static inline u32 trim_sys_nvlink_uphy_cfg_nvlink_wait_dly_v(u32 r)
{
return (r >> 16U) & 0xffU;
}
static inline u32 trim_sys_nvlink0_ctrl_r(void)
{
return 0x00132420U;
}
static inline u32 trim_sys_nvlink0_ctrl_unit2clks_pll_turn_off_f(u32 v)
{
return (v & 0x1U) << 0U;
}
static inline u32 trim_sys_nvlink0_ctrl_unit2clks_pll_turn_off_m(void)
{
return 0x1U << 0U;
}
static inline u32 trim_sys_nvlink0_ctrl_unit2clks_pll_turn_off_v(u32 r)
{
return (r >> 0U) & 0x1U;
}
static inline u32 trim_sys_nvlink0_status_r(void)
{
return 0x00132424U;
}
static inline u32 trim_sys_nvlink0_status_pll_off_f(u32 v)
{
return (v & 0x1U) << 5U;
}
static inline u32 trim_sys_nvlink0_status_pll_off_m(void)
{
return 0x1U << 5U;
}
static inline u32 trim_sys_nvlink0_status_pll_off_v(u32 r)
{
return (r >> 5U) & 0x1U;
}
static inline u32 trim_sys_nvl_common_clk_alt_switch_r(void)
{
return 0x001371c4U;
}
static inline u32 trim_sys_nvl_common_clk_alt_switch_slowclk_f(u32 v)
{
return (v & 0x3U) << 16U;
}
static inline u32 trim_sys_nvl_common_clk_alt_switch_slowclk_m(void)
{
return 0x3U << 16U;
}
static inline u32 trim_sys_nvl_common_clk_alt_switch_slowclk_v(u32 r)
{
return (r >> 16U) & 0x3U;
}
static inline u32 trim_sys_nvl_common_clk_alt_switch_slowclk_xtal4x_v(void)
{
return 0x00000003U;
}
static inline u32 trim_sys_nvl_common_clk_alt_switch_slowclk_xtal4x_f(void)
{
return 0x30000U;
}
static inline u32 trim_sys_nvl_common_clk_alt_switch_slowclk_xtal_in_v(void)
{
return 0x00000000U;
}
static inline u32 trim_sys_nvl_common_clk_alt_switch_slowclk_xtal_in_f(void)
{
return 0x0U;
}
static inline u32 trim_sys_nvl_common_clk_alt_switch_finalsel_f(u32 v)
{
return (v & 0x3U) << 0U;
}
static inline u32 trim_sys_nvl_common_clk_alt_switch_finalsel_m(void)
{
return 0x3U << 0U;
}
static inline u32 trim_sys_nvl_common_clk_alt_switch_finalsel_v(u32 r)
{
return (r >> 0U) & 0x3U;
}
static inline u32 trim_sys_nvl_common_clk_alt_switch_finalsel_slowclk_v(void)
{
return 0x00000000U;
}
static inline u32 trim_sys_nvl_common_clk_alt_switch_finalsel_slowclk_f(void)
{
return 0x0U;
}
static inline u32 trim_sys_nvl_common_clk_alt_switch_finalsel_miscclk_v(void)
{
return 0x00000002U;
}
static inline u32 trim_sys_nvl_common_clk_alt_switch_finalsel_miscclk_f(void)
{
return 0x2U;
}
static inline u32 trim_sys_nvl_common_clk_alt_switch_finalsel_onesrcclk_v(void)
{
return 0x00000003U;
}
static inline u32 trim_sys_nvl_common_clk_alt_switch_finalsel_onesrcclk_f(void)
{
return 0x3U;
}
#endif

View File

@@ -0,0 +1,87 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* Function naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
*
* <x>_o(void) : Returns the offset for element <x>.
*
* <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
*
* <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
*
* <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
* and masked to place it at field <y> of register <x>. This value
* can be |'d with others to produce a full register value for
* register <x>.
*
* <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
* value can be ~'d and then &'d to clear the value of field <y> for
* register <x>.
*
* <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
* to place it at field <y> of register <x>. This value can be |'d
* with others to produce a full register value for <x>.
*
* <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
* <x> value 'r' after being shifted to place its LSB at bit 0.
* This value is suitable for direct comparison with other unshifted
* values appropriate for use in field <y> of register <x>.
*
* <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
* field <y> of register <x>. This value is suitable for direct
* comparison with unshifted values appropriate for use in field <y>
* of register <x>.
*/
#ifndef _hw_usermode_tu104_h_
#define _hw_usermode_tu104_h_
static inline u32 usermode_cfg0_r(void)
{
return 0x00810000U;
}
static inline u32 usermode_cfg0_class_id_f(u32 v)
{
return (v & 0xffffU) << 0U;
}
static inline u32 usermode_cfg0_class_id_value_v(void)
{
return 0x0000c461U;
}
static inline u32 usermode_time_0_r(void)
{
return 0x00810080U;
}
static inline u32 usermode_time_0_nsec_f(u32 v)
{
return (v & 0x7ffffffU) << 5U;
}
static inline u32 usermode_time_1_r(void)
{
return 0x00810084U;
}
static inline u32 usermode_time_1_nsec_f(u32 v)
{
return (v & 0x1fffffffU) << 0U;
}
#endif

View File

@@ -0,0 +1,143 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* Function naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
*
* <x>_o(void) : Returns the offset for element <x>.
*
* <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
*
* <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
*
* <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
* and masked to place it at field <y> of register <x>. This value
* can be |'d with others to produce a full register value for
* register <x>.
*
* <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
* value can be ~'d and then &'d to clear the value of field <y> for
* register <x>.
*
* <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
* to place it at field <y> of register <x>. This value can be |'d
* with others to produce a full register value for <x>.
*
* <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
* <x> value 'r' after being shifted to place its LSB at bit 0.
* This value is suitable for direct comparison with other unshifted
* values appropriate for use in field <y> of register <x>.
*
* <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
* field <y> of register <x>. This value is suitable for direct
* comparison with unshifted values appropriate for use in field <y>
* of register <x>.
*/
#ifndef _hw_xp_tu104_h_
#define _hw_xp_tu104_h_
static inline u32 xp_dl_mgr_r(u32 i)
{
return 0x0008b8c0U + i*4U;
}
static inline u32 xp_dl_mgr_safe_timing_f(u32 v)
{
return (v & 0x1U) << 2U;
}
static inline u32 xp_pl_link_config_r(u32 i)
{
return 0x0008c040U + i*4U;
}
static inline u32 xp_pl_link_config_ltssm_status_f(u32 v)
{
return (v & 0x1U) << 4U;
}
static inline u32 xp_pl_link_config_ltssm_status_idle_v(void)
{
return 0x00000000U;
}
static inline u32 xp_pl_link_config_ltssm_directive_f(u32 v)
{
return (v & 0xfU) << 0U;
}
static inline u32 xp_pl_link_config_ltssm_directive_m(void)
{
return 0xfU << 0U;
}
static inline u32 xp_pl_link_config_ltssm_directive_normal_operations_v(void)
{
return 0x00000000U;
}
static inline u32 xp_pl_link_config_ltssm_directive_change_speed_v(void)
{
return 0x00000001U;
}
static inline u32 xp_pl_link_config_max_link_rate_f(u32 v)
{
return (v & 0x3U) << 18U;
}
static inline u32 xp_pl_link_config_max_link_rate_m(void)
{
return 0x3U << 18U;
}
static inline u32 xp_pl_link_config_max_link_rate_2500_mtps_v(void)
{
return 0x00000003U;
}
static inline u32 xp_pl_link_config_max_link_rate_5000_mtps_v(void)
{
return 0x00000002U;
}
static inline u32 xp_pl_link_config_max_link_rate_8000_mtps_v(void)
{
return 0x00000001U;
}
static inline u32 xp_pl_link_config_target_tx_width_f(u32 v)
{
return (v & 0x7U) << 20U;
}
static inline u32 xp_pl_link_config_target_tx_width_m(void)
{
return 0x7U << 20U;
}
static inline u32 xp_pl_link_config_target_tx_width_x1_v(void)
{
return 0x00000007U;
}
static inline u32 xp_pl_link_config_target_tx_width_x2_v(void)
{
return 0x00000006U;
}
static inline u32 xp_pl_link_config_target_tx_width_x4_v(void)
{
return 0x00000005U;
}
static inline u32 xp_pl_link_config_target_tx_width_x8_v(void)
{
return 0x00000004U;
}
static inline u32 xp_pl_link_config_target_tx_width_x16_v(void)
{
return 0x00000000U;
}
#endif

View File

@@ -0,0 +1,207 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* Function naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
*
* <x>_o(void) : Returns the offset for element <x>.
*
* <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
*
* <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
*
* <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
* and masked to place it at field <y> of register <x>. This value
* can be |'d with others to produce a full register value for
* register <x>.
*
* <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
* value can be ~'d and then &'d to clear the value of field <y> for
* register <x>.
*
* <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
* to place it at field <y> of register <x>. This value can be |'d
* with others to produce a full register value for <x>.
*
* <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
* <x> value 'r' after being shifted to place its LSB at bit 0.
* This value is suitable for direct comparison with other unshifted
* values appropriate for use in field <y> of register <x>.
*
* <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
* field <y> of register <x>. This value is suitable for direct
* comparison with unshifted values appropriate for use in field <y>
* of register <x>.
*/
#ifndef _hw_xve_tu104_h_
#define _hw_xve_tu104_h_
static inline u32 xve_rom_ctrl_r(void)
{
return 0x00000050U;
}
static inline u32 xve_rom_ctrl_rom_shadow_f(u32 v)
{
return (v & 0x1U) << 0U;
}
static inline u32 xve_rom_ctrl_rom_shadow_disabled_f(void)
{
return 0x0U;
}
static inline u32 xve_rom_ctrl_rom_shadow_enabled_f(void)
{
return 0x1U;
}
static inline u32 xve_link_control_status_r(void)
{
return 0x00000088U;
}
static inline u32 xve_link_control_status_link_speed_m(void)
{
return 0xfU << 16U;
}
static inline u32 xve_link_control_status_link_speed_v(u32 r)
{
return (r >> 16U) & 0xfU;
}
static inline u32 xve_link_control_status_link_speed_link_speed_2p5_v(void)
{
return 0x00000001U;
}
static inline u32 xve_link_control_status_link_speed_link_speed_5p0_v(void)
{
return 0x00000002U;
}
static inline u32 xve_link_control_status_link_speed_link_speed_8p0_v(void)
{
return 0x00000003U;
}
static inline u32 xve_link_control_status_link_width_m(void)
{
return 0x3fU << 20U;
}
static inline u32 xve_link_control_status_link_width_v(u32 r)
{
return (r >> 20U) & 0x3fU;
}
static inline u32 xve_link_control_status_link_width_x1_v(void)
{
return 0x00000001U;
}
static inline u32 xve_link_control_status_link_width_x2_v(void)
{
return 0x00000002U;
}
static inline u32 xve_link_control_status_link_width_x4_v(void)
{
return 0x00000004U;
}
static inline u32 xve_link_control_status_link_width_x8_v(void)
{
return 0x00000008U;
}
static inline u32 xve_link_control_status_link_width_x16_v(void)
{
return 0x00000010U;
}
static inline u32 xve_priv_xv_r(void)
{
return 0x00000150U;
}
static inline u32 xve_priv_xv_cya_l0s_enable_f(u32 v)
{
return (v & 0x1U) << 7U;
}
static inline u32 xve_priv_xv_cya_l0s_enable_m(void)
{
return 0x1U << 7U;
}
static inline u32 xve_priv_xv_cya_l0s_enable_v(u32 r)
{
return (r >> 7U) & 0x1U;
}
static inline u32 xve_priv_xv_cya_l1_enable_f(u32 v)
{
return (v & 0x1U) << 8U;
}
static inline u32 xve_priv_xv_cya_l1_enable_m(void)
{
return 0x1U << 8U;
}
static inline u32 xve_priv_xv_cya_l1_enable_v(u32 r)
{
return (r >> 8U) & 0x1U;
}
static inline u32 xve_cya_2_r(void)
{
return 0x00000704U;
}
static inline u32 xve_reset_r(void)
{
return 0x00000718U;
}
static inline u32 xve_reset_reset_m(void)
{
return 0x1U << 0U;
}
static inline u32 xve_reset_gpu_on_sw_reset_m(void)
{
return 0x1U << 1U;
}
static inline u32 xve_reset_counter_en_m(void)
{
return 0x1U << 2U;
}
static inline u32 xve_reset_counter_val_f(u32 v)
{
return (v & 0x7ffU) << 4U;
}
static inline u32 xve_reset_counter_val_m(void)
{
return 0x7ffU << 4U;
}
static inline u32 xve_reset_counter_val_v(u32 r)
{
return (r >> 4U) & 0x7ffU;
}
static inline u32 xve_reset_clock_on_sw_reset_m(void)
{
return 0x1U << 15U;
}
static inline u32 xve_reset_clock_counter_en_m(void)
{
return 0x1U << 16U;
}
static inline u32 xve_reset_clock_counter_val_f(u32 v)
{
return (v & 0x7ffU) << 17U;
}
static inline u32 xve_reset_clock_counter_val_m(void)
{
return 0x7ffU << 17U;
}
static inline u32 xve_reset_clock_counter_val_v(u32 r)
{
return (r >> 17U) & 0x7ffU;
}
#endif

View File

@@ -0,0 +1,39 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVGPU_GPUID_NEXT_H__
#define __NVGPU_GPUID_NEXT_H__
#define NVGPU_GPUID_NEXT 0x00000164
#define NVGPU_GPU_NEXT_FECS_UCODE_SIG "tu104/fecs_sig.bin"
#define NVGPU_GPU_NEXT_GPCCS_UCODE_SIG "tu104/gpccs_sig.bin"
#define NVGPU_NEXT_INIT_HAL tu104_init_hal
#define NVGPU_NEXT_INIT_OS_OPS nvgpu_tu104_init_os_ops
struct nvgpu_os_linux;
extern int tu104_init_hal(struct gk20a *g);
extern void nvgpu_tu104_init_os_ops(struct nvgpu_os_linux *l);
#endif

View File

@@ -0,0 +1,35 @@
/*
* Copyright (c) 2018, NVIDIA Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "os/linux/os_linux.h"
#include "os/linux/debug_therm_gp106.h"
#include "os/linux/debug_clk_gv100.h"
static struct nvgpu_os_linux_ops tu104_os_linux_ops = {
.therm = {
.init_debugfs = gp106_therm_init_debugfs,
},
.clk = {
.init_debugfs = gv100_clk_init_debugfs,
},
};
void nvgpu_tu104_init_os_ops(struct nvgpu_os_linux *l)
{
l->ops.therm = tu104_os_linux_ops.therm;
l->ops.clk = tu104_os_linux_ops.clk;
}

View File

@@ -0,0 +1,22 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __LINUX_OS_OPS_TU104_H
#define __LINUX_OS_OPS_TU104_H
void nvgpu_tu104_init_os_ops(struct nvgpu_os_linux *l)
#endif

View File

@@ -0,0 +1,46 @@
/*
* Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvgpu/acr/nvgpu_acr.h>
#include <nvgpu/enabled.h>
#include <nvgpu/utils.h>
#include <nvgpu/debug.h>
#include <nvgpu/kmem.h>
#include <nvgpu/pmu.h>
#include <nvgpu/dma.h>
#include "tu104/acr_tu104.h"
#include "gp106/acr_gp106.h"
#include "tu104/sec2_tu104.h"
void nvgpu_tu104_acr_sw_init(struct gk20a *g, struct nvgpu_acr *acr)
{
nvgpu_log_fn(g, " ");
nvgpu_gp106_acr_sw_init(g, acr);
acr->bootstrap_owner = LSF_FALCON_ID_SEC2;
acr->max_supported_lsfm = MAX_SUPPORTED_LSFM;
acr->acr.acr_flcn_setup_hw_and_bl_bootstrap =
tu104_sec2_setup_hw_and_bl_bootstrap;
}

View File

@@ -0,0 +1,30 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_ACR_TU104_H
#define NVGPU_ACR_TU104_H
#define TU104_MAX_SUPPORTED_LSFM 4
void nvgpu_tu104_acr_sw_init(struct gk20a *g, struct nvgpu_acr *acr);
#endif /*NVGPU_ACR_TU104_H*/

View File

@@ -0,0 +1,66 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvgpu/types.h>
#include <nvgpu/timers.h>
#include <nvgpu/io.h>
#include "gk20a/gk20a.h"
#include "tu104/bios_tu104.h"
#include "nvgpu/hw/tu104/hw_gc6_tu104.h"
#define NV_DEVINIT_VERIFY_TIMEOUT_MS 1000
#define NV_DEVINIT_VERIFY_TIMEOUT_DELAY_US 10
#define NV_PGC6_AON_SECURE_SCRATCH_GROUP_05_0_GFW_BOOT_PROGRESS_MASK \
0xFF
#define NV_PGC6_AON_SECURE_SCRATCH_GROUP_05_0_GFW_BOOT_PROGRESS_COMPLETED \
0xFF
int tu104_bios_verify_devinit(struct gk20a *g)
{
struct nvgpu_timeout timeout;
u32 val;
int err;
err = nvgpu_timeout_init(g, &timeout, NV_DEVINIT_VERIFY_TIMEOUT_MS,
NVGPU_TIMER_CPU_TIMER);
if (err != 0) {
return err;
}
do {
val = nvgpu_readl(g, gc6_aon_secure_scratch_group_05_r(0));
val &= NV_PGC6_AON_SECURE_SCRATCH_GROUP_05_0_GFW_BOOT_PROGRESS_MASK;
if (val == NV_PGC6_AON_SECURE_SCRATCH_GROUP_05_0_GFW_BOOT_PROGRESS_COMPLETED) {
nvgpu_log_info(g, "devinit complete");
return 0;
}
nvgpu_udelay(NV_DEVINIT_VERIFY_TIMEOUT_DELAY_US);
} while (!nvgpu_timeout_expired(&timeout));
return -ETIMEDOUT;
}

View File

@@ -0,0 +1,30 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __BIOS_TU104_H__
#define __BIOS_TU104_H__
struct gk20a;
int tu104_bios_verify_devinit(struct gk20a *g);
#endif

View File

@@ -0,0 +1,54 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvgpu/ecc.h>
#include "gk20a/gk20a.h"
#include "gv11b/ecc_gv11b.h"
#include "tu104/ecc_tu104.h"
int tu104_ecc_init(struct gk20a *g)
{
int err;
err = gv11b_ecc_init(g);
if (err != 0) {
return err;
}
err = NVGPU_ECC_COUNTER_INIT_PER_FBPA(fbpa_ecc_sec_err_count);
if (err != 0) {
goto done;
}
err = NVGPU_ECC_COUNTER_INIT_PER_FBPA(fbpa_ecc_ded_err_count);
if (err != 0) {
goto done;
}
done:
if (err != 0) {
nvgpu_err(g, "ecc counter allocate failed, err=%d", err);
nvgpu_ecc_free(g);
}
return err;
}

View File

@@ -0,0 +1,28 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __ECC_TU104_H__
#define __ECC_TU104_H__
int tu104_ecc_init(struct gk20a *g);
#endif

View File

@@ -0,0 +1,105 @@
/*
* TU104 FBPA
*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvgpu/types.h>
#include <nvgpu/io.h>
#include <nvgpu/hw/tu104/hw_fbpa_tu104.h>
#include "gk20a/gk20a.h"
#include "tu104/fbpa_tu104.h"
int tu104_fbpa_init(struct gk20a *g)
{
u32 val;
val = gk20a_readl(g, fbpa_ecc_intr_ctrl_r());
val |= fbpa_ecc_intr_ctrl_sec_intr_en_enabled_f() |
fbpa_ecc_intr_ctrl_ded_intr_en_enabled_f();
gk20a_writel(g, fbpa_ecc_intr_ctrl_r(), val);
/* read back broadcast register */
(void) gk20a_readl(g, fbpa_ecc_intr_ctrl_r());
return 0;
}
static void tu104_fbpa_handle_ecc_intr(struct gk20a *g,
u32 fbpa_id, u32 subp_id)
{
u32 status, sec_cnt, ded_cnt;
u32 offset = nvgpu_get_litter_value(g, GPU_LIT_FBPA_STRIDE) * fbpa_id;
u32 cnt_idx = fbpa_id * 2 + subp_id;
status = gk20a_readl(g, offset + fbpa_0_ecc_status_r(subp_id));
if (status & fbpa_0_ecc_status_sec_counter_overflow_pending_f()) {
nvgpu_err(g, "fbpa %u subp %u ecc sec counter overflow",
fbpa_id, subp_id);
}
if (status & fbpa_0_ecc_status_ded_counter_overflow_pending_f()) {
nvgpu_err(g, "fbpa %u subp %u ecc ded counter overflow",
fbpa_id, subp_id);
}
if (status & fbpa_0_ecc_status_sec_intr_pending_f()) {
sec_cnt = gk20a_readl(g,
offset + fbpa_0_ecc_sec_count_r(subp_id));
gk20a_writel(g, offset + fbpa_0_ecc_sec_count_r(subp_id), 0u);
g->ecc.fbpa.fbpa_ecc_sec_err_count[cnt_idx].counter += sec_cnt;
}
if (status & fbpa_0_ecc_status_ded_intr_pending_f()) {
ded_cnt = gk20a_readl(g,
offset + fbpa_0_ecc_ded_count_r(subp_id));
gk20a_writel(g, offset + fbpa_0_ecc_ded_count_r(subp_id), 0u);
g->ecc.fbpa.fbpa_ecc_ded_err_count[cnt_idx].counter += ded_cnt;
}
gk20a_writel(g, offset + fbpa_0_ecc_status_r(subp_id), status);
}
void tu104_fbpa_handle_intr(struct gk20a *g, u32 fbpa_id)
{
u32 offset, status;
u32 ecc_subp0_mask = fbpa_0_intr_status_sec_subp0_pending_f() |
fbpa_0_intr_status_ded_subp0_pending_f();
u32 ecc_subp1_mask = fbpa_0_intr_status_sec_subp1_pending_f() |
fbpa_0_intr_status_ded_subp1_pending_f();
offset = nvgpu_get_litter_value(g, GPU_LIT_FBPA_STRIDE) * fbpa_id;
status = gk20a_readl(g, offset + fbpa_0_intr_status_r());
if (!(status & (ecc_subp0_mask | ecc_subp1_mask))) {
nvgpu_err(g, "unknown interrupt fbpa %u status %08x",
fbpa_id, status);
return;
}
if (status & ecc_subp0_mask) {
tu104_fbpa_handle_ecc_intr(g, fbpa_id, 0u);
}
if (status & ecc_subp1_mask) {
tu104_fbpa_handle_ecc_intr(g, fbpa_id, 1u);
}
}

View File

@@ -0,0 +1,33 @@
/*
* TU104 FBPA
*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NVGPU_TU104_FBPA
#define _NVGPU_TU104_FBPA
struct gk20a;
int tu104_fbpa_init(struct gk20a *g);
void tu104_fbpa_handle_intr(struct gk20a *g, u32 fbpa_id);
#endif

View File

@@ -0,0 +1,283 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvgpu/dma.h>
#include <nvgpu/types.h>
#include <nvgpu/timers.h>
#include <nvgpu/io.h>
#include <nvgpu/utils.h>
#include <nvgpu/log2.h>
#include <nvgpu/channel.h>
#include "gk20a/gk20a.h"
#include "gk20a/fifo_gk20a.h"
#include "gp10b/fifo_gp10b.h"
#include "gv11b/fifo_gv11b.h"
#include "tu104/fifo_tu104.h"
#include "tu104/func_tu104.h"
#include <nvgpu/hw/tu104/hw_fifo_tu104.h>
#include <nvgpu/hw/tu104/hw_pbdma_tu104.h>
#include <nvgpu/hw/tu104/hw_ram_tu104.h>
#include <nvgpu/hw/tu104/hw_func_tu104.h>
#include <nvgpu/hw/tu104/hw_ctrl_tu104.h>
int channel_tu104_setup_ramfc(struct channel_gk20a *c,
u64 gpfifo_base, u32 gpfifo_entries,
unsigned long acquire_timeout, u32 flags)
{
struct gk20a *g = c->g;
struct nvgpu_mem *mem = &c->inst_block;
u32 data;
nvgpu_log_fn(g, " ");
nvgpu_memset(g, mem, 0, 0, ram_fc_size_val_v());
nvgpu_mem_wr32(g, mem, ram_fc_gp_base_w(),
pbdma_gp_base_offset_f(
u64_lo32(gpfifo_base >> pbdma_gp_base_rsvd_s())));
nvgpu_mem_wr32(g, mem, ram_fc_gp_base_hi_w(),
pbdma_gp_base_hi_offset_f(u64_hi32(gpfifo_base)) |
pbdma_gp_base_hi_limit2_f(ilog2(gpfifo_entries)));
nvgpu_mem_wr32(g, mem, ram_fc_signature_w(),
c->g->ops.fifo.get_pbdma_signature(c->g));
nvgpu_mem_wr32(g, mem, ram_fc_pb_header_w(),
pbdma_pb_header_method_zero_f() |
pbdma_pb_header_subchannel_zero_f() |
pbdma_pb_header_level_main_f() |
pbdma_pb_header_first_true_f() |
pbdma_pb_header_type_inc_f());
nvgpu_mem_wr32(g, mem, ram_fc_subdevice_w(),
pbdma_subdevice_id_f(PBDMA_SUBDEVICE_ID) |
pbdma_subdevice_status_active_f() |
pbdma_subdevice_channel_dma_enable_f());
nvgpu_mem_wr32(g, mem, ram_fc_target_w(),
pbdma_target_eng_ctx_valid_true_f() |
pbdma_target_ce_ctx_valid_true_f() |
pbdma_target_engine_sw_f());
nvgpu_mem_wr32(g, mem, ram_fc_acquire_w(),
g->ops.fifo.pbdma_acquire_val(acquire_timeout));
nvgpu_mem_wr32(g, mem, ram_fc_set_channel_info_w(),
pbdma_set_channel_info_veid_f(c->subctx_id));
nvgpu_mem_wr32(g, mem, ram_in_engine_wfi_veid_w(),
ram_in_engine_wfi_veid_f(c->subctx_id));
gv11b_fifo_init_ramfc_eng_method_buffer(g, c, mem);
if (c->is_privileged_channel) {
/* Set privilege level for channel */
nvgpu_mem_wr32(g, mem, ram_fc_config_w(),
pbdma_config_auth_level_privileged_f());
gk20a_fifo_setup_ramfc_for_privileged_channel(c);
}
/* Enable userd writeback */
data = nvgpu_mem_rd32(g, mem, ram_fc_config_w());
data = data | pbdma_config_userd_writeback_enable_f();
nvgpu_mem_wr32(g, mem, ram_fc_config_w(),data);
gv11b_userd_writeback_config(g);
return channel_gp10b_commit_userd(c);
}
void tu104_fifo_runlist_hw_submit(struct gk20a *g, u32 runlist_id,
u32 count, u32 buffer_index)
{
struct fifo_runlist_info_gk20a *runlist = NULL;
u64 runlist_iova;
u32 runlist_iova_lo, runlist_iova_hi;
runlist = &g->fifo.runlist_info[runlist_id];
runlist_iova = nvgpu_mem_get_addr(g, &runlist->mem[buffer_index]);
runlist_iova_lo = u64_lo32(runlist_iova) >>
fifo_runlist_base_lo_ptr_align_shift_v();
runlist_iova_hi = u64_hi32(runlist_iova);
if (count != 0) {
nvgpu_writel(g, fifo_runlist_base_lo_r(runlist_id),
fifo_runlist_base_lo_ptr_lo_f(runlist_iova_lo) |
nvgpu_aperture_mask(g, &runlist->mem[buffer_index],
fifo_runlist_base_lo_target_sys_mem_ncoh_f(),
fifo_runlist_base_lo_target_sys_mem_coh_f(),
fifo_runlist_base_lo_target_vid_mem_f()));
nvgpu_writel(g, fifo_runlist_base_hi_r(runlist_id),
fifo_runlist_base_hi_ptr_hi_f(runlist_iova_hi));
}
nvgpu_writel(g, fifo_runlist_submit_r(runlist_id),
fifo_runlist_submit_length_f(count));
}
int tu104_fifo_runlist_wait_pending(struct gk20a *g, u32 runlist_id)
{
struct nvgpu_timeout timeout;
unsigned long delay = GR_IDLE_CHECK_DEFAULT;
int ret = -ETIMEDOUT;
ret = nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g),
NVGPU_TIMER_CPU_TIMER);
if (ret != 0) {
return ret;
}
ret = -ETIMEDOUT;
do {
if ((nvgpu_readl(g, fifo_runlist_submit_info_r(runlist_id)) &
fifo_runlist_submit_info_pending_true_f()) == 0) {
ret = 0;
break;
}
nvgpu_usleep_range(delay, delay * 2);
delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX);
} while (!nvgpu_timeout_expired(&timeout));
return ret;
}
int tu104_init_fifo_setup_hw(struct gk20a *g)
{
u32 val;
nvgpu_log_fn(g, " ");
/*
* Required settings for tu104_ring_channel_doorbell()
*/
val = nvgpu_readl(g, ctrl_virtual_channel_cfg_r(0));
val |= ctrl_virtual_channel_cfg_pending_enable_true_f();
nvgpu_writel(g, ctrl_virtual_channel_cfg_r(0), val);
return gv11b_init_fifo_setup_hw(g);
}
void tu104_ring_channel_doorbell(struct channel_gk20a *c)
{
struct fifo_gk20a *f = &c->g->fifo;
u32 hw_chid = f->channel_base + c->chid;
nvgpu_log_info(c->g, "channel ring door bell %d, runlist %d",
c->chid, c->runlist_id);
nvgpu_func_writel(c->g, func_doorbell_r(),
ctrl_doorbell_vector_f(hw_chid) |
ctrl_doorbell_runlist_id_f(c->runlist_id));
}
int tu104_init_pdb_cache_war(struct gk20a *g)
{
u32 size = PAGE_SIZE * 258U;
u64 last_bind_pdb_addr;
u64 pdb_addr;
u32 pdb_addr_lo, pdb_addr_hi;
u32 i;
int err;
if (nvgpu_mem_is_valid(&g->pdb_cache_war_mem)) {
return 0;
}
/*
* Allocate memory for 257 instance block binds +
* PDB bound to 257th instance block
*/
err = nvgpu_dma_alloc_sys(g, size, &g->pdb_cache_war_mem);
if (err) {
return err;
}
/*
* 257th instance block (i.e. last bind) needs to be bound to
* valid memory
* First 256 binds can happen to dummy addresses
*/
pdb_addr = PAGE_SIZE;
last_bind_pdb_addr = nvgpu_mem_get_addr(g, &g->pdb_cache_war_mem) +
(257U * PAGE_SIZE);
/* Setup first 256 instance blocks */
for (i = 0U; i < 256U; i++) {
pdb_addr_lo = u64_lo32(pdb_addr >> ram_in_base_shift_v());
pdb_addr_hi = u64_hi32(pdb_addr);
nvgpu_mem_wr32(g, &g->pdb_cache_war_mem,
ram_in_page_dir_base_lo_w() + (i * PAGE_SIZE / 4),
nvgpu_aperture_mask(g, &g->pdb_cache_war_mem,
ram_in_page_dir_base_target_sys_mem_ncoh_f(),
ram_in_page_dir_base_target_sys_mem_coh_f(),
ram_in_page_dir_base_target_vid_mem_f()) |
ram_in_page_dir_base_vol_true_f() |
ram_in_big_page_size_64kb_f() |
ram_in_page_dir_base_lo_f(pdb_addr_lo) |
ram_in_use_ver2_pt_format_true_f());
nvgpu_mem_wr32(g, &g->pdb_cache_war_mem,
ram_in_page_dir_base_hi_w() + (i * PAGE_SIZE / 4),
ram_in_page_dir_base_hi_f(pdb_addr_hi));
pdb_addr += PAGE_SIZE;
}
/* Setup 257th instance block */
pdb_addr_lo = u64_lo32(last_bind_pdb_addr >> ram_in_base_shift_v());
pdb_addr_hi = u64_hi32(last_bind_pdb_addr);
nvgpu_mem_wr32(g, &g->pdb_cache_war_mem,
ram_in_page_dir_base_lo_w() + (256U * PAGE_SIZE / 4),
nvgpu_aperture_mask(g, &g->pdb_cache_war_mem,
ram_in_page_dir_base_target_sys_mem_ncoh_f(),
ram_in_page_dir_base_target_sys_mem_coh_f(),
ram_in_page_dir_base_target_vid_mem_f()) |
ram_in_page_dir_base_vol_true_f() |
ram_in_big_page_size_64kb_f() |
ram_in_page_dir_base_lo_f(pdb_addr_lo) |
ram_in_use_ver2_pt_format_true_f());
nvgpu_mem_wr32(g, &g->pdb_cache_war_mem,
ram_in_page_dir_base_hi_w() + (256U * PAGE_SIZE / 4),
ram_in_page_dir_base_hi_f(pdb_addr_hi));
return 0;
}
void tu104_deinit_pdb_cache_war(struct gk20a *g)
{
if (nvgpu_mem_is_valid(&g->pdb_cache_war_mem)) {
nvgpu_dma_free(g, &g->pdb_cache_war_mem);
}
}

View File

@@ -0,0 +1,43 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __FIFO_TU104_H__
#define __FIFO_TU104_H__
#include <nvgpu/types.h>
struct gk20a;
struct channel_gk20a;
int channel_tu104_setup_ramfc(struct channel_gk20a *c,
u64 gpfifo_base, u32 gpfifo_entries,
unsigned long acquire_timeout, u32 flags);
void tu104_fifo_runlist_hw_submit(struct gk20a *g, u32 runlist_id,
u32 count, u32 buffer_index);
int tu104_fifo_runlist_wait_pending(struct gk20a *g, u32 runlist_id);
int tu104_init_fifo_setup_hw(struct gk20a *g);
void tu104_ring_channel_doorbell(struct channel_gk20a *c);
int tu104_init_pdb_cache_war(struct gk20a *g);
void tu104_deinit_pdb_cache_war(struct gk20a *g);
#endif

View File

@@ -0,0 +1,101 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvgpu/falcon.h>
#include "gk20a/gk20a.h"
#include "gk20a/flcn_gk20a.h"
#include "gv100/flcn_gv100.h"
#include "tu104/flcn_tu104.h"
#include "tu104/sec2_tu104.h"
#include <nvgpu/hw/tu104/hw_psec_tu104.h>
#include <nvgpu/hw/tu104/hw_pnvdec_tu104.h>
static void tu104_falcon_engine_dependency_ops(struct nvgpu_falcon *flcn)
{
struct nvgpu_falcon_engine_dependency_ops *flcn_eng_dep_ops =
&flcn->flcn_engine_dep_ops;
switch (flcn->flcn_id) {
case FALCON_ID_SEC2:
flcn_eng_dep_ops->reset_eng = tu104_sec2_reset;
flcn_eng_dep_ops->copy_to_emem = tu104_sec2_flcn_copy_to_emem;
flcn_eng_dep_ops->copy_from_emem = tu104_sec2_flcn_copy_from_emem;
flcn_eng_dep_ops->queue_head = tu104_sec2_queue_head;
flcn_eng_dep_ops->queue_tail = tu104_sec2_queue_tail;
break;
default:
flcn_eng_dep_ops->reset_eng = NULL;
break;
}
}
static void tu104_falcon_ops(struct nvgpu_falcon *flcn)
{
gk20a_falcon_ops(flcn);
tu104_falcon_engine_dependency_ops(flcn);
}
int tu104_falcon_hal_sw_init(struct nvgpu_falcon *flcn)
{
struct gk20a *g = flcn->g;
int err = 0;
switch (flcn->flcn_id) {
case FALCON_ID_SEC2:
flcn->flcn_base = psec_falcon_irqsset_r();
flcn->is_falcon_supported = true;
flcn->is_interrupt_enabled = true;
break;
case FALCON_ID_NVDEC:
flcn->flcn_base = pnvdec_falcon_irqsset_r(0);
flcn->is_falcon_supported = true;
flcn->is_interrupt_enabled = true;
break;
default:
/*
* set false to inherit falcon support
* from previous chips HAL
*/
flcn->is_falcon_supported = false;
break;
}
if (flcn->is_falcon_supported) {
err = nvgpu_mutex_init(&flcn->copy_lock);
if (err != 0) {
nvgpu_err(g, "Error in flcn.copy_lock mutex initialization");
} else {
tu104_falcon_ops(flcn);
}
} else {
/*
* Forward call to previous chips HAL
* to fetch info for requested
* falcon as no changes between
* current & previous chips.
*/
err = gv100_falcon_hal_sw_init(flcn);
}
return err;
}

View File

@@ -0,0 +1,28 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __FLCN_TU104_H__
#define __FLCN_TU104_H__
int tu104_falcon_hal_sw_init(struct nvgpu_falcon *flcn);
#endif /* __FLCN_TU104_H__ */

View File

@@ -0,0 +1,39 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvgpu/types.h>
#include <nvgpu/io.h>
#include "gk20a/gk20a.h"
#include "tu104/func_tu104.h"
#include <nvgpu/hw/tu104/hw_func_tu104.h>
void nvgpu_func_writel(struct gk20a *g, u32 r, u32 v)
{
nvgpu_writel(g, r + func_full_phys_offset_v(), v);
}
u32 nvgpu_func_readl(struct gk20a *g, u32 r)
{
return nvgpu_readl(g, r + func_full_phys_offset_v());
}

View File

@@ -0,0 +1,33 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef FUNC_TU104_H
#define FUNC_TU104_H
#include <nvgpu/types.h>
struct gk20a;
void nvgpu_func_writel(struct gk20a *g, u32 r, u32 v);
u32 nvgpu_func_readl(struct gk20a *g, u32 r);
#endif

View File

@@ -0,0 +1,47 @@
/*
* TU104 Graphics Context
*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "gk20a/gk20a.h"
#include "gr_ctx_tu104.h"
#include "nvgpu_gpuid_next.h"
int gr_tu104_get_netlist_name(struct gk20a *g, int index, char *name)
{
u32 ver = g->params.gpu_arch + g->params.gpu_impl;
switch (ver) {
case NVGPU_GPUID_NEXT:
sprintf(name, "%s/%s", "tu104", "NETC_img.bin");
break;
default:
nvgpu_err(g, "no support for GPUID %x", ver);
}
return 0;
}
bool gr_tu104_is_firmware_defined(void)
{
return true;
}

View File

@@ -0,0 +1,28 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __GR_CTX_TU104_H__
#define __GR_CTX_TU104_H__
int gr_tu104_get_netlist_name(struct gk20a *g, int index, char *name);
bool gr_tu104_is_firmware_defined(void);
#endif /*__GR_CTX_TU104_H__*/

View File

@@ -0,0 +1,488 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvgpu/types.h>
#include <nvgpu/soc.h>
#include <nvgpu/io.h>
#include <nvgpu/utils.h>
#include <nvgpu/channel.h>
#include "gk20a/gk20a.h"
#include "gk20a/gr_gk20a.h"
#include "gk20a/gr_ctx_gk20a.h"
#include "gk20a/gr_pri_gk20a.h"
#include "gp10b/gr_gp10b.h"
#include "gv11b/gr_gv11b.h"
#include "tu104/gr_tu104.h"
#include <nvgpu/hw/tu104/hw_gr_tu104.h>
bool gr_tu104_is_valid_class(struct gk20a *g, u32 class_num)
{
switch (class_num) {
case TURING_CHANNEL_GPFIFO_A:
case TURING_A:
case TURING_COMPUTE_A:
case TURING_DMA_COPY_A:
return true;
default:
break;
}
return gr_gv11b_is_valid_class(g, class_num);
};
bool gr_tu104_is_valid_gfx_class(struct gk20a *g, u32 class_num)
{
switch (class_num) {
case TURING_A:
return true;
default:
break;
}
return gr_gv11b_is_valid_gfx_class(g, class_num);
}
bool gr_tu104_is_valid_compute_class(struct gk20a *g, u32 class_num)
{
switch (class_num) {
case TURING_COMPUTE_A:
return true;
default:
break;
}
return gr_gv11b_is_valid_compute_class(g, class_num);
}
int gr_tu104_init_sw_bundle64(struct gk20a *g)
{
u32 i;
u32 last_bundle_data_lo = 0;
u32 last_bundle_data_hi = 0;
u32 err = 0;
struct av64_list_gk20a *sw_bundle64_init =
&g->gr.ctx_vars.sw_bundle64_init;
for (i = 0; i < sw_bundle64_init->count; i++) {
if (i == 0 ||
(last_bundle_data_lo != sw_bundle64_init->l[i].value_lo) ||
(last_bundle_data_hi != sw_bundle64_init->l[i].value_hi)) {
nvgpu_writel(g, gr_pipe_bundle_data_r(),
sw_bundle64_init->l[i].value_lo);
nvgpu_writel(g, gr_pipe_bundle_data_hi_r(),
sw_bundle64_init->l[i].value_hi);
last_bundle_data_lo = sw_bundle64_init->l[i].value_lo;
last_bundle_data_hi = sw_bundle64_init->l[i].value_hi;
}
nvgpu_writel(g, gr_pipe_bundle_address_r(),
sw_bundle64_init->l[i].addr);
if (gr_pipe_bundle_address_value_v(sw_bundle64_init->l[i].addr)
== GR_GO_IDLE_BUNDLE) {
err |= gr_gk20a_wait_idle(g,
gk20a_get_gr_idle_timeout(g),
GR_IDLE_CHECK_DEFAULT);
} else if (nvgpu_platform_is_silicon(g)) {
err |= gr_gk20a_wait_fe_idle(g,
gk20a_get_gr_idle_timeout(g),
GR_IDLE_CHECK_DEFAULT);
}
}
return err;
}
int gr_tu104_alloc_global_ctx_buffers(struct gk20a *g)
{
int err;
struct gr_gk20a *gr = &g->gr;
u32 rtv_circular_buffer_size;
nvgpu_log_fn(g, " ");
rtv_circular_buffer_size =
(gr_scc_rm_rtv_cb_size_div_256b_default_f() +
gr_scc_rm_rtv_cb_size_div_256b_db_adder_f()) *
gr_scc_bundle_cb_size_div_256b_byte_granularity_v();
nvgpu_log_info(g, "rtv_circular_buffer_size : %u",
rtv_circular_buffer_size);
err = gk20a_gr_alloc_ctx_buffer(g,
&gr->global_ctx_buffer[RTV_CIRCULAR_BUFFER],
rtv_circular_buffer_size);
if (err) {
return err;
}
err = gr_gk20a_alloc_global_ctx_buffers(g);
if (err) {
goto clean_up;
}
return 0;
clean_up:
nvgpu_err(g, "fail");
gk20a_gr_destroy_ctx_buffer(g,
&gr->global_ctx_buffer[RTV_CIRCULAR_BUFFER]);
return err;
}
int gr_tu104_map_global_ctx_buffers(struct gk20a *g,
struct channel_gk20a *ch)
{
int err;
struct tsg_gk20a *tsg;
struct vm_gk20a *ch_vm = ch->vm;
u64 *g_bfr_va;
u64 *g_bfr_size;
int *g_bfr_index;
struct gr_gk20a *gr = &g->gr;
struct nvgpu_mem *mem;
u64 gpu_va;
nvgpu_log_fn(g, " ");
tsg = tsg_gk20a_from_ch(ch);
if (!tsg) {
return -EINVAL;
}
g_bfr_va = tsg->gr_ctx.global_ctx_buffer_va;
g_bfr_size = tsg->gr_ctx.global_ctx_buffer_size;
g_bfr_index = tsg->gr_ctx.global_ctx_buffer_index;
/* RTV circular buffer */
mem = &gr->global_ctx_buffer[RTV_CIRCULAR_BUFFER].mem;
gpu_va = nvgpu_gmmu_map(ch_vm, mem, mem->size, 0,
gk20a_mem_flag_none, true, mem->aperture);
if (!gpu_va) {
return -ENOMEM;
}
g_bfr_va[RTV_CIRCULAR_BUFFER_VA] = gpu_va;
g_bfr_size[RTV_CIRCULAR_BUFFER_VA] = mem->size;
g_bfr_index[RTV_CIRCULAR_BUFFER_VA] = RTV_CIRCULAR_BUFFER;
err = gr_gk20a_map_global_ctx_buffers(g, ch);
if (err) {
goto clean_up;
}
return 0;
clean_up:
nvgpu_err(g, "fail");
nvgpu_gmmu_unmap(ch_vm, mem, gpu_va);
return err;
}
static void gr_tu104_commit_rtv_circular_buffer(struct gk20a *g,
struct nvgpu_gr_ctx *gr_ctx,
u64 addr, u32 size, bool patch)
{
gr_gk20a_ctx_patch_write(g, gr_ctx, gr_scc_rm_rtv_cb_base_r(),
gr_scc_rm_rtv_cb_base_addr_39_8_f(addr), patch);
gr_gk20a_ctx_patch_write(g, gr_ctx, gr_scc_rm_rtv_cb_size_r(),
gr_scc_rm_rtv_cb_size_div_256b_f(size), patch);
gr_gk20a_ctx_patch_write(g, gr_ctx, gr_gpcs_gcc_rm_rtv_cb_base_r(),
gr_gpcs_gcc_rm_rtv_cb_base_addr_39_8_f(addr), patch);
gr_gk20a_ctx_patch_write(g, gr_ctx, gr_scc_rm_gfxp_reserve_r(),
gr_scc_rm_gfxp_reserve_rtv_cb_size_div_256b_f(0), patch);
}
int gr_tu104_commit_global_ctx_buffers(struct gk20a *g,
struct channel_gk20a *ch, bool patch)
{
int err;
struct tsg_gk20a *tsg;
struct nvgpu_gr_ctx *gr_ctx = NULL;
u64 addr;
u32 size;
err = gr_gk20a_commit_global_ctx_buffers(g, ch, patch);
if (err) {
return err;
}
tsg = tsg_gk20a_from_ch(ch);
if (!tsg) {
return -EINVAL;
}
gr_ctx = &tsg->gr_ctx;
if (patch) {
int err;
err = gr_gk20a_ctx_patch_write_begin(g, gr_ctx, false);
if (err) {
return err;
}
}
/* RTV circular buffer */
addr = (u64_lo32(gr_ctx->global_ctx_buffer_va[RTV_CIRCULAR_BUFFER_VA]) >>
gr_scc_rm_rtv_cb_base_addr_39_8_align_bits_f()) |
(u64_hi32(gr_ctx->global_ctx_buffer_va[RTV_CIRCULAR_BUFFER_VA]) <<
(32 - gr_scc_rm_rtv_cb_base_addr_39_8_align_bits_f()));
size = (gr_scc_rm_rtv_cb_size_div_256b_default_f() +
gr_scc_rm_rtv_cb_size_div_256b_db_adder_f());
gr_tu104_commit_rtv_circular_buffer(g, gr_ctx, addr, size, patch);
if (patch) {
gr_gk20a_ctx_patch_write_end(g, gr_ctx, false);
}
return 0;
}
void gr_tu104_bundle_cb_defaults(struct gk20a *g)
{
struct gr_gk20a *gr = &g->gr;
gr->bundle_cb_default_size =
gr_scc_bundle_cb_size_div_256b__prod_v();
gr->min_gpm_fifo_depth =
gr_pd_ab_dist_cfg2_state_limit_min_gpm_fifo_depths_v();
gr->bundle_cb_token_limit =
gr_pd_ab_dist_cfg2_token_limit_init_v();
}
void gr_tu104_cb_size_default(struct gk20a *g)
{
struct gr_gk20a *gr = &g->gr;
if (!gr->attrib_cb_default_size) {
gr->attrib_cb_default_size =
gr_gpc0_ppc0_cbm_beta_cb_size_v_default_v();
}
gr->alpha_cb_default_size =
gr_gpc0_ppc0_cbm_alpha_cb_size_v_default_v();
}
int gr_tu104_get_preemption_mode_flags(struct gk20a *g,
struct nvgpu_preemption_modes_rec *preemption_modes_rec)
{
preemption_modes_rec->graphics_preemption_mode_flags = (
NVGPU_PREEMPTION_MODE_GRAPHICS_WFI);
preemption_modes_rec->compute_preemption_mode_flags = (
NVGPU_PREEMPTION_MODE_COMPUTE_WFI |
NVGPU_PREEMPTION_MODE_COMPUTE_CTA |
NVGPU_PREEMPTION_MODE_COMPUTE_CILP);
preemption_modes_rec->default_graphics_preempt_mode =
NVGPU_PREEMPTION_MODE_GRAPHICS_WFI;
preemption_modes_rec->default_compute_preempt_mode =
NVGPU_PREEMPTION_MODE_COMPUTE_WFI;
return 0;
}
void gr_tu104_enable_gpc_exceptions(struct gk20a *g)
{
struct gr_gk20a *gr = &g->gr;
u32 tpc_mask;
gk20a_writel(g, gr_gpcs_tpcs_tpccs_tpc_exception_en_r(),
gr_gpcs_tpcs_tpccs_tpc_exception_en_sm_enabled_f());
tpc_mask =
gr_gpcs_gpccs_gpc_exception_en_tpc_f((1 << gr->max_tpc_per_gpc_count) - 1);
gk20a_writel(g, gr_gpcs_gpccs_gpc_exception_en_r(),
(tpc_mask | gr_gpcs_gpccs_gpc_exception_en_gcc_f(1) |
gr_gpcs_gpccs_gpc_exception_en_gpccs_f(1) |
gr_gpcs_gpccs_gpc_exception_en_gpcmmu_f(1)));
}
int gr_tu104_get_offset_in_gpccs_segment(struct gk20a *g,
enum ctxsw_addr_type addr_type,
u32 num_tpcs,
u32 num_ppcs,
u32 reg_list_ppc_count,
u32 *__offset_in_segment)
{
u32 offset_in_segment = 0;
struct gr_gk20a *gr = &g->gr;
u32 num_pes_per_gpc = nvgpu_get_litter_value(g,
GPU_LIT_NUM_PES_PER_GPC);
if (addr_type == CTXSW_ADDR_TYPE_TPC) {
/*
* reg = gr->ctx_vars.ctxsw_regs.tpc.l;
* offset_in_segment = 0;
*/
} else if (addr_type == CTXSW_ADDR_TYPE_PPC) {
/*
* The ucode stores TPC data before PPC data.
* Advance offset past TPC data to PPC data.
*/
offset_in_segment =
((gr->ctx_vars.ctxsw_regs.tpc.count *
num_tpcs) << 2);
} else if (addr_type == CTXSW_ADDR_TYPE_GPC) {
/*
* The ucode stores TPC/PPC data before GPC data.
* Advance offset past TPC/PPC data to GPC data.
*
* Note 1 PES_PER_GPC case
*/
if (num_pes_per_gpc > 1) {
offset_in_segment =
(((gr->ctx_vars.ctxsw_regs.tpc.count *
num_tpcs) << 2) +
((reg_list_ppc_count * num_ppcs) << 2));
} else {
offset_in_segment =
((gr->ctx_vars.ctxsw_regs.tpc.count *
num_tpcs) << 2);
}
} else if ((addr_type == CTXSW_ADDR_TYPE_EGPC) ||
(addr_type == CTXSW_ADDR_TYPE_ETPC)) {
if (num_pes_per_gpc > 1) {
offset_in_segment =
((gr->ctx_vars.ctxsw_regs.tpc.count *
num_tpcs) << 2) +
((reg_list_ppc_count * num_ppcs) << 2) +
(gr->ctx_vars.ctxsw_regs.gpc.count << 2);
} else {
offset_in_segment =
((gr->ctx_vars.ctxsw_regs.tpc.count *
num_tpcs) << 2) +
(gr->ctx_vars.ctxsw_regs.gpc.count << 2);
}
/* aligned to next 256 byte */
offset_in_segment = ALIGN(offset_in_segment, 256);
nvgpu_log(g, gpu_dbg_info | gpu_dbg_gpu_dbg,
"egpc etpc offset_in_segment 0x%#08x",
offset_in_segment);
} else {
nvgpu_log_fn(g, "Unknown address type.");
return -EINVAL;
}
*__offset_in_segment = offset_in_segment;
return 0;
}
static void gr_tu104_set_sm_disp_ctrl(struct gk20a *g, u32 data)
{
u32 reg_val;
nvgpu_log_fn(g, " ");
reg_val = nvgpu_readl(g, gr_gpcs_tpcs_sm_disp_ctrl_r());
if ((data & NVC5C0_SET_SM_DISP_CTRL_COMPUTE_SHADER_QUAD_MASK)
== NVC5C0_SET_SM_DISP_CTRL_COMPUTE_SHADER_QUAD_DISABLE) {
reg_val = set_field(reg_val,
gr_gpcs_tpcs_sm_disp_ctrl_compute_shader_quad_m(),
gr_gpcs_tpcs_sm_disp_ctrl_compute_shader_quad_disable_f()
);
} else if ((data & NVC5C0_SET_SM_DISP_CTRL_COMPUTE_SHADER_QUAD_MASK)
== NVC5C0_SET_SM_DISP_CTRL_COMPUTE_SHADER_QUAD_ENABLE) {
reg_val = set_field(reg_val,
gr_gpcs_tpcs_sm_disp_ctrl_compute_shader_quad_m(),
gr_gpcs_tpcs_sm_disp_ctrl_compute_shader_quad_enable_f()
);
}
nvgpu_writel(g, gr_gpcs_tpcs_sm_disp_ctrl_r(), reg_val);
}
int gr_tu104_handle_sw_method(struct gk20a *g, u32 addr,
u32 class_num, u32 offset, u32 data)
{
nvgpu_log_fn(g, " ");
if (class_num == TURING_COMPUTE_A) {
switch (offset << 2) {
case NVC5C0_SET_SHADER_EXCEPTIONS:
gv11b_gr_set_shader_exceptions(g, data);
break;
case NVC5C0_SET_SKEDCHECK:
gr_gv11b_set_skedcheck(g, data);
break;
case NVC5C0_SET_SM_DISP_CTRL:
gr_tu104_set_sm_disp_ctrl(g, data);
break;
case NVC5C0_SET_SHADER_CUT_COLLECTOR:
gr_gv11b_set_shader_cut_collector(g, data);
break;
default:
goto fail;
}
}
if (class_num == TURING_A) {
switch (offset << 2) {
case NVC597_SET_SHADER_EXCEPTIONS:
gv11b_gr_set_shader_exceptions(g, data);
break;
case NVC597_SET_CIRCULAR_BUFFER_SIZE:
g->ops.gr.set_circular_buffer_size(g, data);
break;
case NVC597_SET_ALPHA_CIRCULAR_BUFFER_SIZE:
g->ops.gr.set_alpha_circular_buffer_size(g, data);
break;
case NVC597_SET_GO_IDLE_TIMEOUT:
gr_gv11b_set_go_idle_timeout(g, data);
break;
case NVC097_SET_COALESCE_BUFFER_SIZE:
gr_gv11b_set_coalesce_buffer_size(g, data);
break;
case NVC597_SET_TEX_IN_DBG:
gr_gv11b_set_tex_in_dbg(g, data);
break;
case NVC597_SET_SKEDCHECK:
gr_gv11b_set_skedcheck(g, data);
break;
case NVC597_SET_BES_CROP_DEBUG3:
g->ops.gr.set_bes_crop_debug3(g, data);
break;
case NVC597_SET_BES_CROP_DEBUG4:
g->ops.gr.set_bes_crop_debug4(g, data);
break;
case NVC597_SET_SHADER_CUT_COLLECTOR:
gr_gv11b_set_shader_cut_collector(g, data);
break;
default:
goto fail;
}
}
return 0;
fail:
return -EINVAL;
}

View File

@@ -0,0 +1,90 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __GR_TU104_H__
#define __GR_TU104_H__
#include <nvgpu/types.h>
struct gk20a;
struct nvgpu_preemption_modes_rec;
enum {
TURING_CHANNEL_GPFIFO_A = 0xC46F,
TURING_A = 0xC597,
TURING_COMPUTE_A = 0xC5C0,
TURING_DMA_COPY_A = 0xC5B5,
};
#define NVC5C0_SET_SHADER_EXCEPTIONS 0x1528
#define NVC5C0_SET_SKEDCHECK 0x23c
#define NVC5C0_SET_SHADER_CUT_COLLECTOR 0x254
#define NVC5C0_SET_SM_DISP_CTRL 0x250
#define NVC5C0_SET_SM_DISP_CTRL_COMPUTE_SHADER_QUAD_MASK 0x1
#define NVC5C0_SET_SM_DISP_CTRL_COMPUTE_SHADER_QUAD_DISABLE 0
#define NVC5C0_SET_SM_DISP_CTRL_COMPUTE_SHADER_QUAD_ENABLE 1
#define NVC597_SET_SHADER_EXCEPTIONS 0x1528
#define NVC597_SET_CIRCULAR_BUFFER_SIZE 0x1280
#define NVC597_SET_ALPHA_CIRCULAR_BUFFER_SIZE 0x02dc
#define NVC597_SET_GO_IDLE_TIMEOUT 0x022c
#define NVC597_SET_TEX_IN_DBG 0x10bc
#define NVC597_SET_SKEDCHECK 0x10c0
#define NVC597_SET_BES_CROP_DEBUG3 0x10c4
#define NVC597_SET_BES_CROP_DEBUG4 0x10b0
#define NVC597_SET_SHADER_CUT_COLLECTOR 0x10d0
/* TODO: merge these into global context buffer list in gr_gk20a.h */
#define RTV_CIRCULAR_BUFFER 8
#define RTV_CIRCULAR_BUFFER_VA 5
bool gr_tu104_is_valid_class(struct gk20a *g, u32 class_num);
bool gr_tu104_is_valid_gfx_class(struct gk20a *g, u32 class_num);
bool gr_tu104_is_valid_compute_class(struct gk20a *g, u32 class_num);
int gr_tu104_init_sw_bundle64(struct gk20a *g);
void gr_tu10x_create_sysfs(struct gk20a *g);
void gr_tu10x_remove_sysfs(struct gk20a *g);
int gr_tu104_alloc_global_ctx_buffers(struct gk20a *g);
int gr_tu104_map_global_ctx_buffers(struct gk20a *g,
struct channel_gk20a *ch);
int gr_tu104_commit_global_ctx_buffers(struct gk20a *g,
struct channel_gk20a *ch, bool patch);
void gr_tu104_bundle_cb_defaults(struct gk20a *g);
void gr_tu104_cb_size_default(struct gk20a *g);
int gr_tu104_get_preemption_mode_flags(struct gk20a *g,
struct nvgpu_preemption_modes_rec *preemption_modes_rec);
void gr_tu104_enable_gpc_exceptions(struct gk20a *g);
int gr_tu104_get_offset_in_gpccs_segment(struct gk20a *g,
enum ctxsw_addr_type addr_type, u32 num_tpcs, u32 num_ppcs,
u32 reg_list_ppc_count, u32 *__offset_in_segment);
int gr_tu104_handle_sw_method(struct gk20a *g, u32 addr,
u32 class_num, u32 offset, u32 data);
#endif

View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,32 @@
/*
* TU104 Tegra HAL interface
*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVGPU_HAL_TU104_H__
#define __NVGPU_HAL_TU104_H__
struct gk20a;
int tu104_init_hal(struct gk20a *gops);
#endif

View File

@@ -0,0 +1,257 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifdef CONFIG_TEGRA_NVLINK
#include <nvgpu/nvgpu_common.h>
#include <nvgpu/bitops.h>
#include <nvgpu/nvlink.h>
#include <nvgpu/enabled.h>
#include <nvgpu/io.h>
#include <nvgpu/timers.h>
#include "gk20a/gk20a.h"
#include "gv100/nvlink_gv100.h"
#include "nvlink_tu104.h"
#include <nvgpu/hw/tu104/hw_minion_tu104.h>
#include <nvgpu/hw/tu104/hw_nvl_tu104.h>
int tu104_nvlink_rxdet(struct gk20a *g, u32 link_id)
{
u32 ret = 0;
u32 reg;
struct nvgpu_timeout timeout;
ret = gv100_nvlink_minion_send_command(g, link_id,
0x00000005U, 0, true);
if (ret) {
nvgpu_err(g, "Error during INITRXTERM minion DLCMD on link %u",
link_id);
return ret;
}
ret = gv100_nvlink_minion_send_command(g, link_id,
minion_nvlink_dl_cmd_command_turing_rxdet_v(), 0, true);
if (ret) {
nvgpu_err(g, "Error during RXDET minion DLCMD on link %u",
link_id);
return ret;
}
ret = nvgpu_timeout_init(g, &timeout, NV_NVLINK_REG_POLL_TIMEOUT_MS,
NVGPU_TIMER_CPU_TIMER);
if (ret != 0) {
nvgpu_err(g, "Error during timeout init");
return ret;
}
do {
reg = DLPL_REG_RD32(g, link_id, nvl_sl0_link_rxdet_status_r());
if (nvl_sl0_link_rxdet_status_sts_v(reg) ==
nvl_sl0_link_rxdet_status_sts_found_v()) {
nvgpu_log(g, gpu_dbg_nvlink,
"RXDET successful on link %u", link_id);
return ret;
}
if (nvl_sl0_link_rxdet_status_sts_v(reg) ==
nvl_sl0_link_rxdet_status_sts_timeout_v()) {
nvgpu_log(g, gpu_dbg_nvlink,
"RXDET failed on link %u", link_id);
break;
}
nvgpu_udelay(NV_NVLINK_TIMEOUT_DELAY_US);
} while (!nvgpu_timeout_expired_msg(
&timeout,
"RXDET status check timed out on link %u",
link_id));
return -ETIMEDOUT;
}
int tu104_nvlink_setup_pll(struct gk20a *g, unsigned long link_mask)
{
int ret = 0;
u32 link_id;
u32 reg;
struct nvgpu_timeout timeout;
for_each_set_bit(link_id, &link_mask, 32) {
ret = gv100_nvlink_minion_send_command(g, link_id,
minion_nvlink_dl_cmd_command_txclkswitch_pll_v(),
0, true);
if (ret) {
nvgpu_err(g, "Error: TXCLKSWITCH_PLL dlcmd on link %u",
link_id);
return ret;
}
ret = nvgpu_timeout_init(g, &timeout,
NV_NVLINK_REG_POLL_TIMEOUT_MS, NVGPU_TIMER_CPU_TIMER);
if (ret) {
nvgpu_err(g, "Error during timeout init");
return ret;
}
do {
reg = DLPL_REG_RD32(g, link_id, nvl_clk_status_r());
if (nvl_clk_status_txclk_sts_v(reg) ==
nvl_clk_status_txclk_sts_pll_clk_v()) {
nvgpu_log(g, gpu_dbg_nvlink,
"PLL SETUP successful on link %u",
link_id);
break;
}
nvgpu_udelay(NV_NVLINK_TIMEOUT_DELAY_US);
} while ((!nvgpu_timeout_expired_msg(&timeout,
"Timed out setting pll on link %u",
link_id)));
if (nvgpu_timeout_peek_expired(&timeout)) {
return -ETIMEDOUT;
}
}
return ret;
}
u32 tu104_nvlink_link_get_tx_sublink_state(struct gk20a *g, u32 link_id)
{
u32 reg;
struct nvgpu_timeout timeout;
int err = 0;
err = nvgpu_timeout_init(g, &timeout, NV_NVLINK_REG_POLL_TIMEOUT_MS,
NVGPU_TIMER_CPU_TIMER);
if (err != 0) {
nvgpu_err(g, "Failed to init timeout: %d", err);
goto result;
}
/* Poll till substate value becomes STABLE */
do {
reg = DLPL_REG_RD32(g, link_id, nvl_sl0_slsm_status_tx_r());
if (nvl_sl0_slsm_status_tx_substate_v(reg) ==
nvl_sl0_slsm_status_tx_substate_stable_v()) {
return nvl_sl0_slsm_status_tx_primary_state_v(reg);
}
nvgpu_udelay(NV_NVLINK_TIMEOUT_DELAY_US);
} while (!nvgpu_timeout_expired_msg(&timeout,
"Timeout on TX SLSM substate = stable check"));
nvgpu_log(g, gpu_dbg_nvlink, "TX SLSM primary state :%u, substate:%u",
nvl_sl0_slsm_status_tx_primary_state_v(reg),
nvl_sl0_slsm_status_tx_substate_v(reg));
result:
return nvl_sl0_slsm_status_tx_primary_state_unknown_v();
}
u32 tu104_nvlink_link_get_rx_sublink_state(struct gk20a *g, u32 link_id)
{
u32 reg;
struct nvgpu_timeout timeout;
int err = 0;
err = nvgpu_timeout_init(g, &timeout, NV_NVLINK_REG_POLL_TIMEOUT_MS,
NVGPU_TIMER_CPU_TIMER);
if (err != 0) {
nvgpu_err(g, "Failed to init timeout: %d", err);
goto result;
}
/* Poll till substate value becomes STABLE */
do {
reg = DLPL_REG_RD32(g, link_id, nvl_sl1_slsm_status_rx_r());
if (nvl_sl1_slsm_status_rx_substate_v(reg) ==
nvl_sl1_slsm_status_rx_substate_stable_v()) {
return nvl_sl1_slsm_status_rx_primary_state_v(reg);
}
nvgpu_udelay(NV_NVLINK_TIMEOUT_DELAY_US);
} while (!nvgpu_timeout_expired_msg(&timeout,
"Timeout on RX SLSM substate = stable check"));
nvgpu_log(g, gpu_dbg_nvlink, "RX SLSM primary state :%u, substate:%u",
nvl_sl1_slsm_status_rx_primary_state_v(reg),
nvl_sl1_slsm_status_rx_substate_v(reg));
result:
return nvl_sl1_slsm_status_rx_primary_state_unknown_v();
}
int tu104_nvlink_minion_data_ready_en(struct gk20a *g,
unsigned long link_mask, bool sync)
{
int ret = 0;
u32 link_id;
/* On Volta, the order of INIT* DLCMDs was arbitrary.
* On Turing, the INIT* DLCMDs need to be executed in the following
* order -
* INITDLPL -> INITL -> INITLANEENABLE.
* INITDLPL_TO_CHIPA is needed additionally when connected to 2.0 dev.
*/
for_each_set_bit(link_id, &link_mask, 32) {
ret = gv100_nvlink_minion_send_command(g, link_id,
minion_nvlink_dl_cmd_command_initdlpl_v(), 0,
sync);
if (ret) {
nvgpu_err(g, "Minion initdlpl failed on link %u",
link_id);
return ret;
}
}
for_each_set_bit(link_id, &link_mask, 32) {
ret = gv100_nvlink_minion_send_command(g, link_id,
minion_nvlink_dl_cmd_command_turing_initdlpl_to_chipa_v(),
0, sync);
if (ret) {
nvgpu_err(g, "Minion initdlpl_to_chipA failed on link\
%u", link_id);
return ret;
}
}
for_each_set_bit(link_id, &link_mask, 32) {
ret = gv100_nvlink_minion_send_command(g, link_id,
minion_nvlink_dl_cmd_command_inittl_v(), 0,
sync);
if (ret) {
nvgpu_err(g, "Minion inittl failed on link %u",
link_id);
return ret;
}
}
for_each_set_bit(link_id, &link_mask, 32) {
ret = gv100_nvlink_minion_send_command(g, link_id,
minion_nvlink_dl_cmd_command_initlaneenable_v(), 0,
sync);
if (ret) {
nvgpu_err(g, "Minion initlaneenable failed on link %u",
link_id);
return ret;
}
}
return ret;
}
void tu104_nvlink_get_connected_link_mask(u32 *link_mask)
{
*link_mask = TU104_CONNECTED_LINK_MASK;
}
#endif /* CONFIG_TEGRA_NVLINK */

View File

@@ -0,0 +1,38 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_NVLINK_TU104_H
#define NVGPU_NVLINK_TU104_H
#define TU104_CONNECTED_LINK_MASK 0x1
struct gk20a;
/* API */
int tu104_nvlink_rxdet(struct gk20a *g, u32 link_id);
int tu104_nvlink_setup_pll(struct gk20a *g, unsigned long mask);
u32 tu104_nvlink_link_get_tx_sublink_state(struct gk20a *g, u32 link_id);
u32 tu104_nvlink_link_get_rx_sublink_state(struct gk20a *g, u32 link_id);
int tu104_nvlink_minion_data_ready_en(struct gk20a *g, unsigned long mask,
bool sync);
void tu104_nvlink_get_connected_link_mask(u32 *link_mask);
#endif

View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,39 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __REGOPS_TU104_H_
#define __REGOPS_TU104_H_
const struct regop_offset_range *tu104_get_global_whitelist_ranges(void);
u64 tu104_get_global_whitelist_ranges_count(void);
const struct regop_offset_range *tu104_get_context_whitelist_ranges(void);
u64 tu104_get_context_whitelist_ranges_count(void);
const u32 *tu104_get_runcontrol_whitelist(void);
u64 tu104_get_runcontrol_whitelist_count(void);
const struct regop_offset_range *tu104_get_runcontrol_whitelist_ranges(void);
u64 tu104_get_runcontrol_whitelist_ranges_count(void);
const u32 *tu104_get_qctl_whitelist(void);
u64 tu104_get_qctl_whitelist_count(void);
const struct regop_offset_range *tu104_get_qctl_whitelist_ranges(void);
u64 tu104_get_qctl_whitelist_ranges_count(void);
int tu104_apply_smpc_war(struct dbg_session_gk20a *dbg_s);
#endif /* __REGOPS_TU104_H_ */

View File

@@ -0,0 +1,436 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvgpu/gk20a.h>
#include <nvgpu/pmu.h>
#include <nvgpu/falcon.h>
#include <nvgpu/mm.h>
#include <nvgpu/io.h>
#include <nvgpu/timers.h>
#include <nvgpu/falcon.h>
#include <nvgpu/sec2.h>
#include "sec2_tu104.h"
#include <nvgpu/hw/tu104/hw_pwr_tu104.h>
#include <nvgpu/hw/tu104/hw_psec_tu104.h>
int tu104_sec2_reset(struct gk20a *g)
{
nvgpu_log_fn(g, " ");
gk20a_writel(g, psec_falcon_engine_r(),
psec_falcon_engine_reset_true_f());
nvgpu_udelay(10);
gk20a_writel(g, psec_falcon_engine_r(),
psec_falcon_engine_reset_false_f());
nvgpu_log_fn(g, "done");
return 0;
}
static int tu104_sec2_emem_transfer(struct gk20a *g, u32 dmem_addr, u8 *buf,
u32 size_in_bytes, u8 port, bool is_copy_from)
{
u32 *data = (u32 *)(void *)buf;
u32 num_words = 0;
u32 num_bytes = 0;
u32 start_emem = 0;
u32 end_emem = 0;
u32 reg = 0;
u32 i = 0;
u32 emem_c_offset = 0;
u32 emem_d_offset = 0;
u8 max_emem_ports = (u8)psec_ememc__size_1_v();
int status = 0;
if (size_in_bytes == 0U) {
nvgpu_err(g, "zero-byte copy requested");
status = -EINVAL;
goto exit;
}
if (port >= max_emem_ports) {
nvgpu_err(g, "only %d ports supported. Accessed port=%d\n",
max_emem_ports, port);
status = -EINVAL;
goto exit;
}
/*
* Get the EMEMC/D register addresses
* for the specified port
*/
emem_c_offset = psec_ememc_r(port);
emem_d_offset = psec_ememd_r(port);
/*
* EMEM is mapped at the top of DMEM VA space
* START_EMEM = DMEM_VA_MAX = 2^(DMEM_TAG_WIDTH + 8)
*/
start_emem = (u32)1U << ((u32)psec_falcon_hwcfg1_dmem_tag_width_v(
gk20a_readl(g, psec_falcon_hwcfg1_r())) + (u32)8U);
end_emem = start_emem +
((u32)psec_hwcfg_emem_size_f(gk20a_readl(g, psec_hwcfg_r()))
* (u32)256U);
if (dmem_addr < start_emem ||
(dmem_addr + size_in_bytes) > end_emem) {
nvgpu_err(g, "copy must be in emem aperature [0x%x, 0x%x]",
start_emem, end_emem);
status = -EINVAL;
goto exit;
}
/* Convert to emem offset for use by EMEMC/EMEMD */
dmem_addr -= start_emem;
/* Mask off all but the OFFSET and BLOCK in EMEM offset */
reg = dmem_addr & (psec_ememc_offs_m() |
psec_ememc_blk_m());
if (is_copy_from) {
/* mark auto-increment on read */
reg |= psec_ememc_aincr_m();
} else {
/* mark auto-increment on write */
reg |= psec_ememc_aincw_m();
}
gk20a_writel(g, emem_c_offset, reg);
/* Calculate the number of words and bytes */
num_words = size_in_bytes >> 2U;
num_bytes = size_in_bytes & 0x3U;
/* Directly copy words to emem*/
for (i = 0; i < num_words; i++) {
if (is_copy_from) {
data[i] = gk20a_readl(g, emem_d_offset);
} else {
gk20a_writel(g, emem_d_offset, data[i]);
}
}
/* Check if there are leftover bytes to copy */
if (num_bytes > 0U) {
u32 bytes_copied = num_words << 2U;
reg = gk20a_readl(g, emem_d_offset);
if (is_copy_from) {
for (i = 0; i < num_bytes; i++) {
buf[bytes_copied + i] = ((u8 *)&reg)[i];
}
} else {
for (i = 0; i < num_bytes; i++) {
((u8 *)&reg)[i] = buf[bytes_copied + i];
}
gk20a_writel(g, emem_d_offset, reg);
}
}
exit:
return status;
}
int tu104_sec2_flcn_copy_to_emem(struct nvgpu_falcon *flcn,
u32 dst, u8 *src, u32 size, u8 port)
{
struct gk20a *g = flcn->g;
return tu104_sec2_emem_transfer(g, dst, src, size, port, false);
}
int tu104_sec2_flcn_copy_from_emem(struct nvgpu_falcon *flcn,
u32 src, u8 *dst, u32 size, u8 port)
{
struct gk20a *g = flcn->g;
return tu104_sec2_emem_transfer(g, src, dst, size, port, true);
}
static int tu104_sec2_flcn_bl_bootstrap(struct gk20a *g,
struct nvgpu_falcon_bl_info *bl_info)
{
struct mm_gk20a *mm = &g->mm;
u32 data = 0;
nvgpu_log_fn(g, " ");
/* SEC2 Config */
gk20a_writel(g, psec_falcon_itfen_r(),
gk20a_readl(g, psec_falcon_itfen_r()) |
psec_falcon_itfen_ctxen_enable_f());
gk20a_writel(g, psec_falcon_nxtctx_r(),
pwr_pmu_new_instblk_ptr_f(
nvgpu_inst_block_addr(g, &mm->pmu.inst_block) >> 12U) |
pwr_pmu_new_instblk_valid_f(1U) |
nvgpu_aperture_mask(g, &mm->pmu.inst_block,
pwr_pmu_new_instblk_target_sys_ncoh_f(),
pwr_pmu_new_instblk_target_sys_coh_f(),
pwr_pmu_new_instblk_target_fb_f()));
data = gk20a_readl(g, psec_falcon_debug1_r());
data |= psec_falcon_debug1_ctxsw_mode_m();
gk20a_writel(g, psec_falcon_debug1_r(), data);
data = gk20a_readl(g, psec_falcon_engctl_r());
data |= (1U << 3U);
gk20a_writel(g, psec_falcon_engctl_r(), data);
return nvgpu_flcn_bl_bootstrap(&g->sec2_flcn, bl_info);
}
int tu104_sec2_setup_hw_and_bl_bootstrap(struct gk20a *g,
struct hs_acr *acr_desc,
struct nvgpu_falcon_bl_info *bl_info)
{
u32 data = 0U;
nvgpu_log_fn(g, " ");
nvgpu_flcn_reset(&g->sec2_flcn);
data = gk20a_readl(g, psec_fbif_ctl_r());
data |= psec_fbif_ctl_allow_phys_no_ctx_allow_f();
gk20a_writel(g, psec_fbif_ctl_r(), data);
/* setup apertures - virtual */
gk20a_writel(g, psec_fbif_transcfg_r(GK20A_PMU_DMAIDX_UCODE),
psec_fbif_transcfg_mem_type_physical_f() |
psec_fbif_transcfg_target_local_fb_f());
gk20a_writel(g, psec_fbif_transcfg_r(GK20A_PMU_DMAIDX_VIRT),
psec_fbif_transcfg_mem_type_virtual_f());
/* setup apertures - physical */
gk20a_writel(g, psec_fbif_transcfg_r(GK20A_PMU_DMAIDX_PHYS_VID),
psec_fbif_transcfg_mem_type_physical_f() |
psec_fbif_transcfg_target_local_fb_f());
gk20a_writel(g, psec_fbif_transcfg_r(GK20A_PMU_DMAIDX_PHYS_SYS_COH),
psec_fbif_transcfg_mem_type_physical_f() |
psec_fbif_transcfg_target_coherent_sysmem_f());
gk20a_writel(g, psec_fbif_transcfg_r(GK20A_PMU_DMAIDX_PHYS_SYS_NCOH),
psec_fbif_transcfg_mem_type_physical_f() |
psec_fbif_transcfg_target_noncoherent_sysmem_f());
return tu104_sec2_flcn_bl_bootstrap(g, bl_info);
}
int tu104_sec2_queue_head(struct gk20a *g, struct nvgpu_falcon_queue *queue,
u32 *head, bool set)
{
u32 queue_head_size = 8;
if (queue->id <= SEC2_NV_CMDQ_LOG_ID__LAST) {
if (queue->index >= queue_head_size) {
return -EINVAL;
}
if (!set) {
*head = psec_queue_head_address_v(
gk20a_readl(g, psec_queue_head_r(queue->index)));
} else {
gk20a_writel(g, psec_queue_head_r(queue->index),
psec_queue_head_address_f(*head));
}
} else {
if (!set) {
*head = psec_msgq_head_val_v(
gk20a_readl(g, psec_msgq_head_r(0U)));
} else {
gk20a_writel(g,
psec_msgq_head_r(0U),
psec_msgq_head_val_f(*head));
}
}
return 0;
}
int tu104_sec2_queue_tail(struct gk20a *g, struct nvgpu_falcon_queue *queue,
u32 *tail, bool set)
{
u32 queue_tail_size = 8;
if (queue->id <= SEC2_NV_CMDQ_LOG_ID__LAST) {
if (queue->index >= queue_tail_size) {
return -EINVAL;
}
if (!set) {
*tail = psec_queue_tail_address_v(
gk20a_readl(g, psec_queue_tail_r(queue->index)));
} else {
gk20a_writel(g,
psec_queue_tail_r(queue->index),
psec_queue_tail_address_f(*tail));
}
} else {
if (!set) {
*tail = psec_msgq_tail_val_v(
gk20a_readl(g, psec_msgq_tail_r(0U)));
} else {
gk20a_writel(g, psec_msgq_tail_r(0U),
psec_msgq_tail_val_f(*tail));
}
}
return 0;
}
void tu104_sec2_msgq_tail(struct gk20a *g, struct nvgpu_sec2 *sec2,
u32 *tail, bool set)
{
if (!set) {
*tail = gk20a_readl(g, psec_msgq_tail_r(0U));
} else {
gk20a_writel(g, psec_msgq_tail_r(0U), *tail);
}
}
void tu104_sec2_enable_irq(struct nvgpu_sec2 *sec2, bool enable)
{
struct gk20a *g = sec2->g;
u32 intr_mask;
u32 intr_dest;
nvgpu_flcn_set_irq(&g->sec2_flcn, false, 0x0, 0x0);
if (enable) {
/* dest 0=falcon, 1=host; level 0=irq0, 1=irq1 */
intr_dest = psec_falcon_irqdest_host_gptmr_f(0) |
psec_falcon_irqdest_host_wdtmr_f(1) |
psec_falcon_irqdest_host_mthd_f(0) |
psec_falcon_irqdest_host_ctxsw_f(0) |
psec_falcon_irqdest_host_halt_f(1) |
psec_falcon_irqdest_host_exterr_f(0) |
psec_falcon_irqdest_host_swgen0_f(1) |
psec_falcon_irqdest_host_swgen1_f(0) |
psec_falcon_irqdest_host_ext_f(0xff) |
psec_falcon_irqdest_target_gptmr_f(1) |
psec_falcon_irqdest_target_wdtmr_f(0) |
psec_falcon_irqdest_target_mthd_f(0) |
psec_falcon_irqdest_target_ctxsw_f(0) |
psec_falcon_irqdest_target_halt_f(0) |
psec_falcon_irqdest_target_exterr_f(0) |
psec_falcon_irqdest_target_swgen0_f(0) |
psec_falcon_irqdest_target_swgen1_f(0) |
psec_falcon_irqdest_target_ext_f(0xff);
/* 0=disable, 1=enable */
intr_mask = psec_falcon_irqmset_gptmr_f(1) |
psec_falcon_irqmset_wdtmr_f(1) |
psec_falcon_irqmset_mthd_f(0) |
psec_falcon_irqmset_ctxsw_f(0) |
psec_falcon_irqmset_halt_f(1) |
psec_falcon_irqmset_exterr_f(1) |
psec_falcon_irqmset_swgen0_f(1) |
psec_falcon_irqmset_swgen1_f(1);
nvgpu_flcn_set_irq(&g->sec2_flcn, true, intr_mask, intr_dest);
}
}
bool tu104_sec2_is_interrupted(struct nvgpu_sec2 *sec2)
{
struct gk20a *g = sec2->g;
u32 servicedpmuint = 0U;
servicedpmuint = psec_falcon_irqstat_halt_true_f() |
psec_falcon_irqstat_exterr_true_f() |
psec_falcon_irqstat_swgen0_true_f();
if (gk20a_readl(g, psec_falcon_irqstat_r()) &
servicedpmuint) {
return true;
}
return false;
}
void tu104_sec2_isr(struct gk20a *g)
{
struct nvgpu_sec2 *sec2 = &g->sec2;
struct nvgpu_falcon_queue *queue;
u32 intr, mask;
bool recheck = false;
nvgpu_mutex_acquire(&sec2->isr_mutex);
if (!sec2->isr_enabled) {
nvgpu_mutex_release(&sec2->isr_mutex);
return;
}
mask = gk20a_readl(g, psec_falcon_irqmask_r()) &
gk20a_readl(g, psec_falcon_irqdest_r());
intr = gk20a_readl(g, psec_falcon_irqstat_r());
intr = gk20a_readl(g, psec_falcon_irqstat_r()) & mask;
if (!intr) {
gk20a_writel(g, psec_falcon_irqsclr_r(), intr);
nvgpu_mutex_release(&sec2->isr_mutex);
return;
}
if (intr & psec_falcon_irqstat_halt_true_f()) {
nvgpu_err(g, "sec2 halt intr not implemented");
nvgpu_flcn_dump_stats(&g->sec2_flcn);
}
if (intr & psec_falcon_irqstat_exterr_true_f()) {
nvgpu_err(g,
"sec2 exterr intr not implemented. Clearing interrupt.");
gk20a_writel(g, psec_falcon_exterrstat_r(),
gk20a_readl(g, psec_falcon_exterrstat_r()) &
~psec_falcon_exterrstat_valid_m());
}
if (intr & psec_falcon_irqstat_swgen0_true_f()) {
if (nvgpu_sec2_process_message(sec2)) {
gk20a_writel(g, psec_falcon_irqsclr_r(), intr);
goto exit;
}
recheck = true;
}
gk20a_writel(g, psec_falcon_irqsclr_r(), intr);
if (recheck) {
queue = &sec2->queue[SEC2_NV_MSGQ_LOG_ID];
if (!nvgpu_flcn_queue_is_empty(sec2->flcn, queue)) {
gk20a_writel(g, psec_falcon_irqsset_r(),
psec_falcon_irqsset_swgen0_set_f());
}
}
exit:
nvgpu_sec2_dbg(g, "Done");
nvgpu_mutex_release(&sec2->isr_mutex);
}
void tu104_start_sec2_secure(struct gk20a *g)
{
gk20a_writel(g, psec_falcon_cpuctl_alias_r(),
psec_falcon_cpuctl_alias_startcpu_f(1U));
}

View File

@@ -0,0 +1,49 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __SEC2_TU104_H__
#define __SEC2_TU104_H__
struct nvgpu_sec2;
int tu104_sec2_reset(struct gk20a *g);
int tu104_sec2_flcn_copy_to_emem(struct nvgpu_falcon *flcn,
u32 dst, u8 *src, u32 size, u8 port);
int tu104_sec2_flcn_copy_from_emem(struct nvgpu_falcon *flcn,
u32 src, u8 *dst, u32 size, u8 port);
int tu104_sec2_setup_hw_and_bl_bootstrap(struct gk20a *g,
struct hs_acr *acr_desc,
struct nvgpu_falcon_bl_info *bl_info);
int tu104_sec2_queue_head(struct gk20a *g, struct nvgpu_falcon_queue *queue,
u32 *head, bool set);
int tu104_sec2_queue_tail(struct gk20a *g, struct nvgpu_falcon_queue *queue,
u32 *tail, bool set);
void tu104_sec2_msgq_tail(struct gk20a *g, struct nvgpu_sec2 *sec2,
u32 *tail, bool set);
void tu104_sec2_isr(struct gk20a *g);
bool tu104_sec2_is_interrupted(struct nvgpu_sec2 *sec2);
void tu104_sec2_enable_irq(struct nvgpu_sec2 *sec2, bool enable);
void tu104_start_sec2_secure(struct gk20a *g);
#endif /*__SEC2_TU104_H__*/

View File

@@ -39,8 +39,6 @@ CORE_OUT=$(OUT)/nvgpu_unit_core
# Nvgpu driver code.
NVGPU_SRC=$(TWD)/../drivers/gpu/nvgpu
NVGPU_OUT=$(OUT)/libnvgpu
# Nvgpu_next driver code.
NVGPU_NEXT_SRC=$(TWD)/../../nvgpu-next/drivers/gpu/nvgpu
# Unit tests themselves.
UNIT_SRC=$(TWD)/units
@@ -49,8 +47,6 @@ UNIT_OUT=$(OUT)/units
INCLUDES= \
-I$(NVGPU_SRC) \
-I$(NVGPU_SRC)/include \
-I$(NVGPU_NEXT_SRC) \
-I$(NVGPU_NEXT_SRC)/include \
-I$(TWD)/../include \
-I$(TWD)/../include/uapi \
-I$(TWD)/include
@@ -111,14 +107,6 @@ $(NVGPU_OUT)/%.o : $(NVGPU_SRC)/%.c $(HEADERS)
fi
$(CC) --coverage $(CFLAGS) $(configs) -c -o $@ $<
# Default build target for all the nvgpu-next driver object files we want to
# build in userspace. These too get bundled into libnvgpu-drv.so.
$(NVGPU_OUT)/%.o : $(NVGPU_NEXT_SRC)/%.c $(HEADERS) $(HEADERS_NEXT)
@if [ ! -d $(dir $@) ] ; then \
mkdir -p $(dir $@) ; \
fi
$(CC) --coverage $(CFLAGS) $(configs) -c -o $@ $<
# Build target for unit test files. These are not part of the libnvgpu-drv.so.
# These comprise the unit test framework.
$(CORE_OUT)/%.o : $(CORE_SRC)/%.c $(CORE_HEADERS)

View File

@@ -21,9 +21,8 @@
# DEALINGS IN THE SOFTWARE.
include $(NVGPU_SRC)/Makefile.sources
-include $(NVGPU_NEXT_SRC)/Makefile.sources
OBJS := $(srcs:%.c=$(NVGPU_OUT)/%.o) $(srcs_next:%.c=$(NVGPU_OUT)/%.o)
OBJS := $(srcs:%.c=$(NVGPU_OUT)/%.o)
HEADERS := \
$(NVGPU_SRC)/include/nvgpu/*.h \
@@ -31,7 +30,8 @@ HEADERS := \
$(NVGPU_SRC)/gk20a/*.h \
$(NVGPU_SRC)/gm20b/*.h \
$(NVGPU_SRC)/gp10b/*.h \
$(NVGPU_SRC)/gv11b/*.h
$(NVGPU_SRC)/gv11b/*.h \
$(NVGPU_SRC)/tu104/*.h
CORE_OBJS := \
$(CORE_OUT)/unit_main.o \