Merge remote-tracking branch 'remotes/origin/dev/linux-nvgpu-t19x' into linux-nvgpu

Bug 200363166

Change-Id: Ic662d7b44b673db28dc0aeba338ae67cf2a43d64
Signed-off-by: Deepak Nibade <dnibade@nvidia.com>
This commit is contained in:
Deepak Nibade
2017-11-15 23:21:19 -08:00
152 changed files with 48673 additions and 2 deletions

View File

@@ -63,5 +63,3 @@ S: Supported
F: drivers/gpu/nvgpu/* F: drivers/gpu/nvgpu/*
F: include/* F: include/*
F: ../../gpu-firmware-private/ F: ../../gpu-firmware-private/

View File

@@ -136,3 +136,10 @@ config GK20A_VIDMEM
Enable support for using and allocating buffers in a distinct video Enable support for using and allocating buffers in a distinct video
memory aperture (in contrast to general system memory), available on memory aperture (in contrast to general system memory), available on
GPUs that have their own banks. PCIe GPUs have this, for example. GPUs that have their own banks. PCIe GPUs have this, for example.
config TEGRA_19x_GPU
bool "Tegra 19x family GPU"
depends on GK20A && ARCH_TEGRA_19x_SOC
default y
help
Support for NVIDIA Tegra 19x family of GPU

View File

@@ -258,3 +258,51 @@ nvgpu-$(CONFIG_TEGRA_GR_VIRTUALIZATION) += \
vgpu/gp10b/vgpu_gr_gp10b.o \ vgpu/gp10b/vgpu_gr_gp10b.o \
vgpu/gp10b/vgpu_mm_gp10b.o vgpu/gp10b/vgpu_mm_gp10b.o
endif endif
ifeq ($(CONFIG_ARCH_TEGRA_19x_SOC),y)
nvgpu-y += \
common/mm/gmmu_t19x.o \
common/linux/ioctl_tsg_t19x.o \
common/linux/ioctl_ctrl_t19x.o \
common/linux/io_t19x.o \
common/linux/module_t19x.o \
common/linux/pci_t19x.o \
gv11b/gv11b.o \
gv11b/css_gr_gv11b.o \
gv11b/dbg_gpu_gv11b.o \
gv11b/mc_gv11b.o \
gv11b/ltc_gv11b.o \
gv11b/hal_gv11b.o \
gv11b/gv11b_gating_reglist.o \
gv11b/gr_gv11b.o \
gv11b/fb_gv11b.o \
gv11b/fifo_gv11b.o \
gv11b/mm_gv11b.o \
gv11b/ce_gv11b.o \
gv11b/gr_ctx_gv11b.o \
gv11b/pmu_gv11b.o \
gv11b/acr_gv11b.o \
gv11b/subctx_gv11b.o \
gv11b/regops_gv11b.o \
gv11b/therm_gv11b.o \
gv100/mm_gv100.o \
gv100/gr_ctx_gv100.o \
gv100/fb_gv100.o \
gv100/bios_gv100.o \
gv100/fifo_gv100.o \
gv100/gr_gv100.o \
gv100/regops_gv100.o \
gv100/hal_gv100.o
nvgpu-$(CONFIG_TEGRA_GK20A) += gv11b/platform_gv11b_tegra.o
nvgpu-$(CONFIG_TEGRA_GK20A_NVHOST) += common/linux/nvhost_t19x.o
nvgpu-$(CONFIG_TEGRA_GR_VIRTUALIZATION) += \
vgpu/gv11b/platform_gv11b_vgpu_tegra.o \
vgpu/gv11b/vgpu_gv11b.o \
vgpu/gv11b/vgpu_hal_gv11b.o \
vgpu/gv11b/vgpu_gr_gv11b.o \
vgpu/gv11b/vgpu_fifo_gv11b.o \
vgpu/gv11b/vgpu_subctx_gv11b.o \
vgpu/gv11b/vgpu_tsg_gv11b.o
endif

View File

@@ -0,0 +1,29 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NVGPU_ACR_T19X_H_
#define _NVGPU_ACR_T19X_H_
#define BIGGPU_FECS_UCODE_SIG "gv100/fecs_sig.bin"
#define BIGGPU_GPCCS_UCODE_SIG "gv100/gpccs_sig.bin"
#endif

View File

@@ -0,0 +1,33 @@
/*
* NVIDIA T19x Channel info
*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NVGPU_CHANNEL_T19X_H_
#define _NVGPU_CHANNEL_T19X_H_
struct channel_t19x {
u32 subctx_id;
u32 runqueue_sel;
};
#endif

View File

@@ -0,0 +1,29 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include <nvgpu/io.h>
#include <nvgpu/types.h>
#include "common/linux/os_linux.h"
#include "gk20a/gk20a.h"
#include <nvgpu/hw/gv11b/hw_usermode_gv11b.h>
void gv11b_usermode_writel(struct gk20a *g, u32 r, u32 v)
{
struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
void __iomem *reg = l->t19x.usermode_regs + (r - usermode_cfg0_r());
writel_relaxed(v, reg);
gk20a_dbg(gpu_dbg_reg, "usermode r=0x%x v=0x%x", r, v);
}

View File

@@ -0,0 +1,33 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include <uapi/linux/nvgpu.h>
#include <nvgpu/types.h>
#include <nvgpu/enabled.h>
#include <nvgpu/enabled_t19x.h>
#include "ioctl_ctrl_t19x.h"
#include "common/linux/os_linux.h"
#include "gk20a/gk20a.h"
u64 nvgpu_ctrl_ioctl_gpu_characteristics_flags_t19x(struct gk20a *g)
{
u64 ioctl_flags = 0;
if (nvgpu_is_enabled(g, NVGPU_SUPPORT_TSG_SUBCONTEXTS))
ioctl_flags |= NVGPU_GPU_FLAGS_SUPPORT_TSG_SUBCONTEXTS;
return ioctl_flags;
}

View File

@@ -0,0 +1,23 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#ifndef _NVGPU_IOCTL_CTRL_T19X
#define _NVGPU_IOCTL_CTRL_T19X
#include <nvgpu/types.h>
struct gk20a;
u64 nvgpu_ctrl_ioctl_gpu_characteristics_flags_t19x(struct gk20a *g);
#endif

View File

@@ -0,0 +1,115 @@
/*
* GV11B TSG IOCTL Handler
*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include <linux/types.h>
#include <uapi/linux/nvgpu.h>
#include "gk20a/gk20a.h"
#include "gv11b/fifo_gv11b.h"
#include "gv11b/subctx_gv11b.h"
#include "ioctl_tsg_t19x.h"
#include "common/linux/os_linux.h"
static int gv11b_tsg_ioctl_bind_channel_ex(struct gk20a *g,
struct tsg_gk20a *tsg, struct nvgpu_tsg_bind_channel_ex_args *arg)
{
struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
struct gk20a_sched_ctrl *sched = &l->sched_ctrl;
struct channel_gk20a *ch;
struct gr_gk20a *gr = &g->gr;
int err = 0;
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid);
nvgpu_mutex_acquire(&sched->control_lock);
if (sched->control_locked) {
err = -EPERM;
goto mutex_release;
}
err = gk20a_busy(g);
if (err) {
nvgpu_err(g, "failed to power on gpu");
goto mutex_release;
}
ch = gk20a_get_channel_from_file(arg->channel_fd);
if (!ch) {
err = -EINVAL;
goto idle;
}
if (arg->tpc_pg_enabled && (!tsg->t19x.tpc_num_initialized)) {
if ((arg->num_active_tpcs > gr->max_tpc_count) ||
!(arg->num_active_tpcs)) {
nvgpu_err(g, "Invalid num of active TPCs");
err = -EINVAL;
goto ch_put;
}
tsg->t19x.tpc_num_initialized = true;
tsg->t19x.num_active_tpcs = arg->num_active_tpcs;
tsg->t19x.tpc_pg_enabled = true;
} else {
tsg->t19x.tpc_pg_enabled = false;
nvgpu_log(g, gpu_dbg_info, "dynamic TPC-PG not enabled");
}
if (arg->subcontext_id < g->fifo.t19x.max_subctx_count) {
ch->t19x.subctx_id = arg->subcontext_id;
} else {
err = -EINVAL;
goto ch_put;
}
nvgpu_log(g, gpu_dbg_info, "channel id : %d : subctx: %d",
ch->chid, ch->t19x.subctx_id);
/* Use runqueue selector 1 for all ASYNC ids */
if (ch->t19x.subctx_id > CHANNEL_INFO_VEID0)
ch->t19x.runqueue_sel = 1;
err = ch->g->ops.fifo.tsg_bind_channel(tsg, ch);
ch_put:
gk20a_channel_put(ch);
idle:
gk20a_idle(g);
mutex_release:
nvgpu_mutex_release(&sched->control_lock);
return err;
}
int t19x_tsg_ioctl_handler(struct gk20a *g, struct tsg_gk20a *tsg,
unsigned int cmd, u8 *buf)
{
int err = 0;
nvgpu_log(g, gpu_dbg_fn, "t19x_tsg_ioctl_handler");
switch (cmd) {
case NVGPU_TSG_IOCTL_BIND_CHANNEL_EX:
{
err = gv11b_tsg_ioctl_bind_channel_ex(g, tsg,
(struct nvgpu_tsg_bind_channel_ex_args *)buf);
break;
}
default:
nvgpu_err(g, "unrecognized tsg gpu ioctl cmd: 0x%x",
cmd);
err = -ENOTTY;
break;
}
return err;
}

View File

@@ -0,0 +1,21 @@
/*
* GV11B TSG IOCTL handler
*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#ifndef _NVGPU_IOCTL_TSG_T19X
#define _NVGPU_IOCTL_TSG_T19X
int t19x_tsg_ioctl_handler(struct gk20a *g, struct tsg_gk20a *tsg,
unsigned int cmd, u8 *arg);
#endif

View File

@@ -0,0 +1,62 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <nvgpu/types.h>
#include <nvgpu/hw/gv11b/hw_usermode_gv11b.h>
#include "common/linux/os_linux.h"
/*
* Locks out the driver from accessing GPU registers. This prevents access to
* thse registers after the GPU has been clock or power gated. This should help
* find annoying bugs where register reads and writes are silently dropped
* after the GPU has been turned off. On older chips these reads and writes can
* also lock the entire CPU up.
*/
void t19x_lockout_registers(struct gk20a *g)
{
struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
l->t19x.usermode_regs = NULL;
}
/*
* Undoes t19x_lockout_registers().
*/
void t19x_restore_registers(struct gk20a *g)
{
struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
l->t19x.usermode_regs = l->t19x.usermode_regs_saved;
}
void t19x_remove_support(struct gk20a *g)
{
struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
if (l->t19x.usermode_regs) {
l->t19x.usermode_regs = NULL;
}
}
void t19x_init_support(struct gk20a *g)
{
struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
l->t19x.usermode_regs = l->regs + usermode_cfg0_r();
l->t19x.usermode_regs_saved = l->t19x.usermode_regs;
}

View File

@@ -0,0 +1,35 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/nvhost.h>
#include <linux/nvhost_t194.h>
#include <nvgpu/nvhost_t19x.h>
#include "common/linux/nvhost_priv.h"
int nvgpu_nvhost_syncpt_unit_interface_get_aperture(
struct nvgpu_nvhost_dev *nvhost_dev,
u64 *base, size_t *size)
{
return nvhost_syncpt_unit_interface_get_aperture(
nvhost_dev->host1x_pdev, (phys_addr_t *)base, size);
}
u32 nvgpu_nvhost_syncpt_unit_interface_get_byte_offset(u32 syncpt_id)
{
return nvhost_syncpt_unit_interface_get_byte_offset(syncpt_id);
}

View File

@@ -0,0 +1,24 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include <nvgpu/types.h>
#include <nvgpu/hw/gv11b/hw_usermode_gv11b.h>
#include "common/linux/os_linux.h"
void t19x_nvgpu_pci_init_support(struct nvgpu_os_linux *l)
{
l->t19x.usermode_regs = l->regs + usermode_cfg0_r();
l->t19x.usermode_regs_saved = l->t19x.usermode_regs;
}

View File

@@ -0,0 +1,31 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <uapi/linux/nvgpu.h>
#include <nvgpu/gmmu.h>
void nvgpu_gmmu_add_t19x_attrs(struct nvgpu_gmmu_attrs *attrs, u32 flags)
{
attrs->t19x_attrs.l3_alloc = (bool)(flags &
NVGPU_AS_MAP_BUFFER_FLAGS_L3_ALLOC);
}

View File

@@ -0,0 +1,29 @@
/*
* NVIDIA T19x ECC
*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NVGPU_ECC_T19X_H_
#define _NVGPU_ECC_T19X_H_
#include "gv11b/ecc_gv11b.h"
#endif

View File

@@ -0,0 +1,30 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _FIFO_T19X_H_
#define _FIFO_T19X_H_
struct fifo_t19x {
u32 max_subctx_count;
};
#endif

View File

@@ -0,0 +1,29 @@
/*
* NVIDIA T19x GR
*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NVGPU_GR_T19X_H_
#define _NVGPU_GR_T19X_H_
#include "gv11b/gr_gv11b.h"
#endif

View File

@@ -0,0 +1,108 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvgpu/bios.h>
#include <nvgpu/nvgpu_common.h>
#include <nvgpu/timers.h>
#include "gk20a/gk20a.h"
#include "gp106/bios_gp106.h"
#include "bios_gv100.h"
#include <nvgpu/hw/gv100/hw_pwr_gv100.h>
#include <nvgpu/hw/gv100/hw_bus_gv100.h>
#define PMU_BOOT_TIMEOUT_DEFAULT 100 /* usec */
#define PMU_BOOT_TIMEOUT_MAX 2000000 /* usec */
#define SCRATCH_PREOS_PROGRESS 6
#define PREOS_PROGRESS_MASK(r) ((r >> 12) & 0xf)
#define PREOS_PROGRESS_NOT_STARTED 0
#define PREOS_PROGRESS_STARTED 1
#define PREOS_PROGRESS_EXIT 2
#define PREOS_PROGRESS_EXIT_SECUREMODE 3
#define PREOS_PROGRESS_ABORTED 6
#define SCRATCH_PMU_EXIT_AND_HALT 1
#define PMU_EXIT_AND_HALT_SET(r, v) ((r & ~0x200UL) | v)
#define PMU_EXIT_AND_HALT_YES (0x1UL << 9)
#define SCRATCH_PRE_OS_RELOAD 1
#define PRE_OS_RELOAD_SET(r, v) ((r & ~0x100UL) | v)
#define PRE_OS_RELOAD_YES (0x1UL << 8)
void gv100_bios_preos_reload_check(struct gk20a *g)
{
u32 progress = gk20a_readl(g,
bus_sw_scratch_r(SCRATCH_PREOS_PROGRESS));
if (PREOS_PROGRESS_MASK(progress) != PREOS_PROGRESS_NOT_STARTED) {
u32 reload = gk20a_readl(g,
bus_sw_scratch_r(SCRATCH_PRE_OS_RELOAD));
gk20a_writel(g, bus_sw_scratch_r(SCRATCH_PRE_OS_RELOAD),
PRE_OS_RELOAD_SET(reload, PRE_OS_RELOAD_YES));
}
}
int gv100_bios_preos_wait_for_halt(struct gk20a *g)
{
int err = -EINVAL;
u32 progress;
u32 tmp;
int preos_completed;
struct nvgpu_timeout timeout;
nvgpu_udelay(PMU_BOOT_TIMEOUT_DEFAULT);
/* Check the progress */
progress = gk20a_readl(g, bus_sw_scratch_r(SCRATCH_PREOS_PROGRESS));
if (PREOS_PROGRESS_MASK(progress) == PREOS_PROGRESS_STARTED) {
err = 0;
/* Complete the handshake */
tmp = gk20a_readl(g,
bus_sw_scratch_r(SCRATCH_PMU_EXIT_AND_HALT));
gk20a_writel(g, bus_sw_scratch_r(SCRATCH_PMU_EXIT_AND_HALT),
PMU_EXIT_AND_HALT_SET(tmp, PMU_EXIT_AND_HALT_YES));
nvgpu_timeout_init(g, &timeout,
PMU_BOOT_TIMEOUT_MAX /
PMU_BOOT_TIMEOUT_DEFAULT,
NVGPU_TIMER_RETRY_TIMER);
do {
progress = gk20a_readl(g,
bus_sw_scratch_r(SCRATCH_PREOS_PROGRESS));
preos_completed = pwr_falcon_cpuctl_halt_intr_v(
gk20a_readl(g, pwr_falcon_cpuctl_r())) &&
(PREOS_PROGRESS_MASK(progress) ==
PREOS_PROGRESS_EXIT);
nvgpu_udelay(PMU_BOOT_TIMEOUT_DEFAULT);
} while (!preos_completed && !nvgpu_timeout_expired(&timeout));
}
return err;
}

View File

@@ -0,0 +1,31 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_BIOS_GV100_H
#define NVGPU_BIOS_GV100_H
struct gk20a;
void gv100_bios_preos_reload_check(struct gk20a *g);
int gv100_bios_preos_wait_for_halt(struct gk20a *g);
#endif

View File

@@ -0,0 +1,184 @@
/*
* GV100 FB
*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvgpu/types.h>
#include <nvgpu/dma.h>
#include <nvgpu/log.h>
#include <nvgpu/enabled.h>
#include <nvgpu/gmmu.h>
#include <nvgpu/nvgpu_common.h>
#include <nvgpu/kmem.h>
#include <nvgpu/nvgpu_mem.h>
#include <nvgpu/acr/nvgpu_acr.h>
#include <nvgpu/firmware.h>
#include <nvgpu/pmu.h>
#include <nvgpu/falcon.h>
#include "gk20a/gk20a.h"
#include "gv100/fb_gv100.h"
#include "gm20b/acr_gm20b.h"
#include <nvgpu/hw/gv100/hw_fb_gv100.h>
#include <nvgpu/hw/gv100/hw_falcon_gv100.h>
#include <nvgpu/hw/gv100/hw_mc_gv100.h>
#define HW_SCRUB_TIMEOUT_DEFAULT 100 /* usec */
#define HW_SCRUB_TIMEOUT_MAX 2000000 /* usec */
#define MEM_UNLOCK_TIMEOUT 3500 /* msec */
void gv100_fb_reset(struct gk20a *g)
{
u32 val;
int retries = HW_SCRUB_TIMEOUT_MAX / HW_SCRUB_TIMEOUT_DEFAULT;
nvgpu_info(g, "reset gv100 fb");
/* wait for memory to be accessible */
do {
u32 w = gk20a_readl(g, fb_niso_scrub_status_r());
if (fb_niso_scrub_status_flag_v(w)) {
nvgpu_info(g, "done");
break;
}
nvgpu_udelay(HW_SCRUB_TIMEOUT_DEFAULT);
} while (--retries);
val = gk20a_readl(g, fb_mmu_priv_level_mask_r());
val &= ~fb_mmu_priv_level_mask_write_violation_m();
gk20a_writel(g, fb_mmu_priv_level_mask_r(), val);
}
int gv100_fb_memory_unlock(struct gk20a *g)
{
struct nvgpu_firmware *mem_unlock_fw = NULL;
struct bin_hdr *hsbin_hdr = NULL;
struct acr_fw_header *fw_hdr = NULL;
u32 *mem_unlock_ucode = NULL;
u32 *mem_unlock_ucode_header = NULL;
u32 sec_imem_dest = 0;
u32 val = 0;
int err = 0;
nvgpu_log_fn(g, " ");
/* Check vpr enable status */
val = gk20a_readl(g, fb_mmu_vpr_info_r());
val &= ~fb_mmu_vpr_info_index_m();
val |= fb_mmu_vpr_info_index_cya_lo_v();
gk20a_writel(g, fb_mmu_vpr_info_r(), val);
val = gk20a_readl(g, fb_mmu_vpr_info_r());
if (!(val & fb_mmu_vpr_info_cya_lo_in_use_m())) {
nvgpu_log_info(g, "mem unlock not required on this SKU, skipping");
goto exit;
}
/* get mem unlock ucode binary */
mem_unlock_fw = nvgpu_request_firmware(g, "mem_unlock.bin", 0);
if (!mem_unlock_fw) {
nvgpu_err(g, "mem unlock ucode get fail");
err = -ENOENT;
goto exit;
}
/* Enable nvdec */
g->ops.mc.enable(g, mc_enable_nvdec_enabled_f());
/* nvdec falcon reset */
nvgpu_flcn_reset(&g->nvdec_flcn);
hsbin_hdr = (struct bin_hdr *)mem_unlock_fw->data;
fw_hdr = (struct acr_fw_header *)(mem_unlock_fw->data +
hsbin_hdr->header_offset);
mem_unlock_ucode_header = (u32 *)(mem_unlock_fw->data +
fw_hdr->hdr_offset);
mem_unlock_ucode = (u32 *)(mem_unlock_fw->data +
hsbin_hdr->data_offset);
/* Patch Ucode singnatures */
if (acr_ucode_patch_sig(g, mem_unlock_ucode,
(u32 *)(mem_unlock_fw->data + fw_hdr->sig_prod_offset),
(u32 *)(mem_unlock_fw->data + fw_hdr->sig_dbg_offset),
(u32 *)(mem_unlock_fw->data + fw_hdr->patch_loc),
(u32 *)(mem_unlock_fw->data + fw_hdr->patch_sig)) < 0) {
nvgpu_err(g, "mem unlock patch signatures fail");
err = -EPERM;
goto exit;
}
/* Clear interrupts */
nvgpu_flcn_set_irq(&g->nvdec_flcn, false, 0x0, 0x0);
/* Copy Non Secure IMEM code */
nvgpu_flcn_copy_to_imem(&g->nvdec_flcn, 0,
(u8 *)&mem_unlock_ucode[
mem_unlock_ucode_header[OS_CODE_OFFSET] >> 2],
mem_unlock_ucode_header[OS_CODE_SIZE], 0, false,
GET_IMEM_TAG(mem_unlock_ucode_header[OS_CODE_OFFSET]));
/* Put secure code after non-secure block */
sec_imem_dest = GET_NEXT_BLOCK(mem_unlock_ucode_header[OS_CODE_SIZE]);
nvgpu_flcn_copy_to_imem(&g->nvdec_flcn, sec_imem_dest,
(u8 *)&mem_unlock_ucode[
mem_unlock_ucode_header[APP_0_CODE_OFFSET] >> 2],
mem_unlock_ucode_header[APP_0_CODE_SIZE], 0, true,
GET_IMEM_TAG(mem_unlock_ucode_header[APP_0_CODE_OFFSET]));
/* load DMEM: ensure that signatures are patched */
nvgpu_flcn_copy_to_dmem(&g->nvdec_flcn, 0, (u8 *)&mem_unlock_ucode[
mem_unlock_ucode_header[OS_DATA_OFFSET] >> 2],
mem_unlock_ucode_header[OS_DATA_SIZE], 0);
nvgpu_log_info(g, "nvdec sctl reg %x\n",
gk20a_readl(g, g->nvdec_flcn.flcn_base +
falcon_falcon_sctl_r()));
/* set BOOTVEC to start of non-secure code */
nvgpu_flcn_bootstrap(&g->nvdec_flcn, 0);
/* wait for complete & halt */
nvgpu_flcn_wait_for_halt(&g->nvdec_flcn, MEM_UNLOCK_TIMEOUT);
/* check mem unlock status */
val = nvgpu_flcn_mailbox_read(&g->nvdec_flcn, 0);
if (val) {
nvgpu_err(g, "memory unlock failed, err %x", val);
err = -1;
goto exit;
}
nvgpu_log_info(g, "nvdec sctl reg %x\n",
gk20a_readl(g, g->nvdec_flcn.flcn_base +
falcon_falcon_sctl_r()));
exit:
if (mem_unlock_fw)
nvgpu_release_firmware(g, mem_unlock_fw);
nvgpu_log_fn(g, "done, status - %d", err);
return err;
}

View File

@@ -0,0 +1,32 @@
/*
* GV100 FB
*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NVGPU_GV100_FB
#define _NVGPU_GV100_FB
struct gk20a;
void gv100_fb_reset(struct gk20a *g);
int gv100_fb_memory_unlock(struct gk20a *g);
#endif

View File

@@ -0,0 +1,40 @@
/*
* GV100 fifo
*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "fifo_gv100.h"
#include <nvgpu/hw/gv100/hw_ccsr_gv100.h>
#define DEFAULT_FIFO_PREEMPT_TIMEOUT 0x3FFFFFUL
u32 gv100_fifo_get_num_fifos(struct gk20a *g)
{
return ccsr_channel__size_1_v();
}
u32 gv100_fifo_get_preempt_timeout(struct gk20a *g)
{
return DEFAULT_FIFO_PREEMPT_TIMEOUT;
}

View File

@@ -0,0 +1,33 @@
/*
* GV100 Fifo
*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef FIFO_GV100_H
#define FIFO_GV100_H
#include <nvgpu/types.h>
struct gk20a;
u32 gv100_fifo_get_num_fifos(struct gk20a *g);
u32 gv100_fifo_get_preempt_timeout(struct gk20a *g);
#endif

View File

@@ -0,0 +1,47 @@
/*
* GV100 Graphics Context
*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "gk20a/gk20a.h"
#include "gr_ctx_gv100.h"
int gr_gv100_get_netlist_name(struct gk20a *g, int index, char *name)
{
u32 ver = g->params.gpu_arch + g->params.gpu_impl;
switch (ver) {
case NVGPU_GPUID_GV100:
sprintf(name, "%s/%s", "gv100",
GV100_NETLIST_IMAGE_FW_NAME);
break;
default:
nvgpu_err(g, "no support for GPUID %x", ver);
}
return 0;
}
bool gr_gv100_is_firmware_defined(void)
{
return true;
}

View File

@@ -0,0 +1,34 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __GR_CTX_GV100_H__
#define __GR_CTX_GV100_H__
#include "gk20a/gr_ctx_gk20a.h"
#include "nvgpu_gpuid_t19x.h"
/* production netlist, one and only one from below */
#define GV100_NETLIST_IMAGE_FW_NAME GK20A_NETLIST_IMAGE_D
int gr_gv100_get_netlist_name(struct gk20a *g, int index, char *name);
bool gr_gv100_is_firmware_defined(void);
#endif /*__GR_CTX_GV100_H__*/

View File

@@ -0,0 +1,349 @@
/*
* GV100 GPU GR
*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvgpu/log.h>
#include <nvgpu/debug.h>
#include <nvgpu/enabled.h>
#include "gk20a/gk20a.h"
#include "gk20a/gr_gk20a.h"
#include "gv100/gr_gv100.h"
#include "gv11b/subctx_gv11b.h"
#include <nvgpu/hw/gv100/hw_gr_gv100.h>
#include <nvgpu/hw/gv100/hw_proj_gv100.h>
/*
* Estimate performance if the given logical TPC in the given logical GPC were
* removed.
*/
static int gr_gv100_scg_estimate_perf(struct gk20a *g,
unsigned long *gpc_tpc_mask,
u32 disable_gpc_id, u32 disable_tpc_id,
int *perf)
{
struct gr_gk20a *gr = &g->gr;
int err = 0;
u32 scale_factor = 512UL; /* Use fx23.9 */
u32 pix_scale = 1024*1024UL; /* Pix perf in [29:20] */
u32 world_scale = 1024UL; /* World performance in [19:10] */
u32 tpc_scale = 1; /* TPC balancing in [9:0] */
u32 scg_num_pes = 0;
u32 min_scg_gpc_pix_perf = scale_factor; /* Init perf as maximum */
u32 average_tpcs = 0; /* Average of # of TPCs per GPC */
u32 deviation; /* absolute diff between TPC# and
* average_tpcs, averaged across GPCs
*/
u32 norm_tpc_deviation; /* deviation/max_tpc_per_gpc */
u32 tpc_balance;
u32 scg_gpc_pix_perf;
u32 scg_world_perf;
u32 gpc_id;
u32 pes_id;
int diff;
bool is_tpc_removed_gpc = false;
bool is_tpc_removed_pes = false;
u32 max_tpc_gpc = 0;
u32 num_tpc_mask;
u32 *num_tpc_gpc = nvgpu_kzalloc(g, sizeof(u32) *
nvgpu_get_litter_value(g, GPU_LIT_NUM_GPCS));
if (!num_tpc_gpc)
return -ENOMEM;
/* Calculate pix-perf-reduction-rate per GPC and find bottleneck TPC */
for (gpc_id = 0; gpc_id < gr->gpc_count; gpc_id++) {
num_tpc_mask = gpc_tpc_mask[gpc_id];
if ((gpc_id == disable_gpc_id) && num_tpc_mask &
(0x1 << disable_tpc_id)) {
/* Safety check if a TPC is removed twice */
if (is_tpc_removed_gpc) {
err = -EINVAL;
goto free_resources;
}
/* Remove logical TPC from set */
num_tpc_mask &= ~(0x1 << disable_tpc_id);
is_tpc_removed_gpc = true;
}
/* track balancing of tpcs across gpcs */
num_tpc_gpc[gpc_id] = hweight32(num_tpc_mask);
average_tpcs += num_tpc_gpc[gpc_id];
/* save the maximum numer of gpcs */
max_tpc_gpc = num_tpc_gpc[gpc_id] > max_tpc_gpc ?
num_tpc_gpc[gpc_id] : max_tpc_gpc;
/*
* Calculate ratio between TPC count and post-FS and post-SCG
*
* ratio represents relative throughput of the GPC
*/
scg_gpc_pix_perf = scale_factor * num_tpc_gpc[gpc_id] /
gr->gpc_tpc_count[gpc_id];
if (min_scg_gpc_pix_perf > scg_gpc_pix_perf)
min_scg_gpc_pix_perf = scg_gpc_pix_perf;
/* Calculate # of surviving PES */
for (pes_id = 0; pes_id < gr->gpc_ppc_count[gpc_id]; pes_id++) {
/* Count the number of TPC on the set */
num_tpc_mask = gr->pes_tpc_mask[pes_id][gpc_id] &
gpc_tpc_mask[gpc_id];
if ((gpc_id == disable_gpc_id) && (num_tpc_mask &
(0x1 << disable_tpc_id))) {
if (is_tpc_removed_pes) {
err = -EINVAL;
goto free_resources;
}
num_tpc_mask &= ~(0x1 << disable_tpc_id);
is_tpc_removed_pes = true;
}
if (hweight32(num_tpc_mask))
scg_num_pes++;
}
}
if (!is_tpc_removed_gpc || !is_tpc_removed_pes) {
err = -EINVAL;
goto free_resources;
}
if (max_tpc_gpc == 0) {
*perf = 0;
goto free_resources;
}
/* Now calculate perf */
scg_world_perf = (scale_factor * scg_num_pes) / gr->ppc_count;
deviation = 0;
average_tpcs = scale_factor * average_tpcs / gr->gpc_count;
for (gpc_id =0; gpc_id < gr->gpc_count; gpc_id++) {
diff = average_tpcs - scale_factor * num_tpc_gpc[gpc_id];
if (diff < 0)
diff = -diff;
deviation += diff;
}
deviation /= gr->gpc_count;
norm_tpc_deviation = deviation / max_tpc_gpc;
tpc_balance = scale_factor - norm_tpc_deviation;
if ((tpc_balance > scale_factor) ||
(scg_world_perf > scale_factor) ||
(min_scg_gpc_pix_perf > scale_factor) ||
(norm_tpc_deviation > scale_factor)) {
err = -EINVAL;
goto free_resources;
}
*perf = (pix_scale * min_scg_gpc_pix_perf) +
(world_scale * scg_world_perf) +
(tpc_scale * tpc_balance);
free_resources:
nvgpu_kfree(g, num_tpc_gpc);
return err;
}
void gr_gv100_bundle_cb_defaults(struct gk20a *g)
{
struct gr_gk20a *gr = &g->gr;
gr->bundle_cb_default_size =
gr_scc_bundle_cb_size_div_256b__prod_v();
gr->min_gpm_fifo_depth =
gr_pd_ab_dist_cfg2_state_limit_min_gpm_fifo_depths_v();
gr->bundle_cb_token_limit =
gr_pd_ab_dist_cfg2_token_limit_init_v();
}
void gr_gv100_cb_size_default(struct gk20a *g)
{
struct gr_gk20a *gr = &g->gr;
if (!gr->attrib_cb_default_size)
gr->attrib_cb_default_size =
gr_gpc0_ppc0_cbm_beta_cb_size_v_default_v();
gr->alpha_cb_default_size =
gr_gpc0_ppc0_cbm_alpha_cb_size_v_default_v();
}
void gr_gv100_set_gpc_tpc_mask(struct gk20a *g, u32 gpc_index)
{
}
void gr_gv100_init_sm_id_table(struct gk20a *g)
{
u32 gpc, tpc, sm, pes, gtpc;
u32 sm_id = 0;
u32 sm_per_tpc = nvgpu_get_litter_value(g, GPU_LIT_NUM_SM_PER_TPC);
u32 num_sm = sm_per_tpc * g->gr.tpc_count;
int perf, maxperf;
int err;
unsigned long *gpc_tpc_mask;
u32 *tpc_table, *gpc_table;
gpc_table = nvgpu_kzalloc(g, g->gr.tpc_count * sizeof(u32));
tpc_table = nvgpu_kzalloc(g, g->gr.tpc_count * sizeof(u32));
gpc_tpc_mask = nvgpu_kzalloc(g, sizeof(unsigned long) *
nvgpu_get_litter_value(g, GPU_LIT_NUM_GPCS));
if (!gpc_table || !tpc_table || !gpc_tpc_mask) {
nvgpu_err(g, "Error allocating memory for sm tables");
goto exit_build_table;
}
for (gpc = 0; gpc < g->gr.gpc_count; gpc++)
for (pes = 0; pes < g->gr.gpc_ppc_count[gpc]; pes++)
gpc_tpc_mask[gpc] |= g->gr.pes_tpc_mask[pes][gpc];
for (gtpc = 0; gtpc < g->gr.tpc_count; gtpc++) {
maxperf = -1;
for (gpc = 0; gpc < g->gr.gpc_count; gpc++) {
for_each_set_bit(tpc, &gpc_tpc_mask[gpc],
g->gr.gpc_tpc_count[gpc]) {
perf = -1;
err = gr_gv100_scg_estimate_perf(g,
gpc_tpc_mask, gpc, tpc, &perf);
if (err) {
nvgpu_err(g,
"Error while estimating perf");
goto exit_build_table;
}
if (perf >= maxperf) {
maxperf = perf;
gpc_table[gtpc] = gpc;
tpc_table[gtpc] = tpc;
}
}
}
gpc_tpc_mask[gpc_table[gtpc]] &= ~(0x1 << tpc_table[gtpc]);
}
for (tpc = 0, sm_id = 0; sm_id < num_sm; tpc++, sm_id += sm_per_tpc) {
for (sm = 0; sm < sm_per_tpc; sm++) {
u32 index = sm_id + sm;
g->gr.sm_to_cluster[index].gpc_index = gpc_table[tpc];
g->gr.sm_to_cluster[index].tpc_index = tpc_table[tpc];
g->gr.sm_to_cluster[index].sm_index = sm;
g->gr.sm_to_cluster[index].global_tpc_index = tpc;
nvgpu_log_info(g,
"gpc : %d tpc %d sm_index %d global_index: %d",
g->gr.sm_to_cluster[index].gpc_index,
g->gr.sm_to_cluster[index].tpc_index,
g->gr.sm_to_cluster[index].sm_index,
g->gr.sm_to_cluster[index].global_tpc_index);
}
}
g->gr.no_of_sm = num_sm;
nvgpu_log_info(g, " total number of sm = %d", g->gr.no_of_sm);
exit_build_table:
nvgpu_kfree(g, gpc_table);
nvgpu_kfree(g, tpc_table);
nvgpu_kfree(g, gpc_tpc_mask);
}
void gr_gv100_load_tpc_mask(struct gk20a *g)
{
u64 pes_tpc_mask = 0x0ULL;
u32 gpc, pes;
u32 num_tpc_per_gpc = nvgpu_get_litter_value(g,
GPU_LIT_NUM_TPC_PER_GPC);
/* gv100 has 6 GPC and 7 TPC/GPC */
for (gpc = 0; gpc < g->gr.gpc_count; gpc++) {
for (pes = 0; pes < g->gr.pe_count_per_gpc; pes++) {
pes_tpc_mask |= (u64) g->gr.pes_tpc_mask[pes][gpc] <<
(num_tpc_per_gpc * gpc);
}
}
nvgpu_log_info(g, "pes_tpc_mask: %016llx\n", pes_tpc_mask);
gk20a_writel(g, gr_fe_tpc_fs_r(0), u64_lo32(pes_tpc_mask));
gk20a_writel(g, gr_fe_tpc_fs_r(1), u64_hi32(pes_tpc_mask));
}
u32 gr_gv100_get_patch_slots(struct gk20a *g)
{
struct gr_gk20a *gr = &g->gr;
struct fifo_gk20a *f = &g->fifo;
u32 size = 0;
/*
* CMD to update PE table
*/
size++;
/*
* Update PE table contents
* for PE table, each patch buffer update writes 32 TPCs
*/
size += DIV_ROUND_UP(gr->tpc_count, 32);
/*
* Update the PL table contents
* For PL table, each patch buffer update configures 4 TPCs
*/
size += DIV_ROUND_UP(gr->tpc_count, 4);
/*
* We need this for all subcontexts
*/
size *= f->t19x.max_subctx_count;
/*
* Add space for a partition mode change as well
* reserve two slots since DYNAMIC -> STATIC requires
* DYNAMIC -> NONE -> STATIC
*/
size += 2;
/*
* Add current patch buffer size
*/
size += gr_gk20a_get_patch_slots(g);
/*
* Align to 4K size
*/
size = ALIGN(size, PATCH_CTX_SLOTS_PER_PAGE);
/*
* Increase the size to accommodate for additional TPC partition update
*/
size += 2 * PATCH_CTX_SLOTS_PER_PAGE;
return size;
}

View File

@@ -0,0 +1,36 @@
/*
* GV100 GPU GR
*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NVGPU_GR_GV100_H_
#define _NVGPU_GR_GV100_H_
void gr_gv100_bundle_cb_defaults(struct gk20a *g);
void gr_gv100_cb_size_default(struct gk20a *g);
void gr_gv100_set_gpc_tpc_mask(struct gk20a *g, u32 gpc_index);
void gr_gv100_init_sm_id_table(struct gk20a *g);
void gr_gv100_program_sm_id_numbering(struct gk20a *g,
u32 gpc, u32 tpc, u32 smid);
int gr_gv100_load_smid_config(struct gk20a *g);
u32 gr_gv100_get_patch_slots(struct gk20a *g);
#endif

View File

@@ -0,0 +1,32 @@
/*
* GV100 Graphics
*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef GV100_H
#define GV100_H
#include "gk20a/gk20a.h"
int gv100_init_gpu_characteristics(struct gk20a *g);
#endif /* GV11B_H */

View File

@@ -0,0 +1,769 @@
/*
* GV100 Tegra HAL interface
*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <linux/types.h>
#include <linux/printk.h>
#include <linux/types.h>
#include <linux/tegra_gpu_t19x.h>
#include "gk20a/gk20a.h"
#include "gk20a/fifo_gk20a.h"
#include "gk20a/fecs_trace_gk20a.h"
#include "gk20a/css_gr_gk20a.h"
#include "gk20a/mc_gk20a.h"
#include "gk20a/dbg_gpu_gk20a.h"
#include "gk20a/bus_gk20a.h"
#include "gk20a/pramin_gk20a.h"
#include "gk20a/flcn_gk20a.h"
#include "gk20a/regops_gk20a.h"
#include "gk20a/fb_gk20a.h"
#include "gk20a/mm_gk20a.h"
#include "gk20a/pmu_gk20a.h"
#include "gk20a/gr_gk20a.h"
#include "gm20b/ltc_gm20b.h"
#include "gm20b/gr_gm20b.h"
#include "gm20b/fifo_gm20b.h"
#include "gm20b/fb_gm20b.h"
#include "gm20b/mm_gm20b.h"
#include "gm20b/pmu_gm20b.h"
#include "gm20b/acr_gm20b.h"
#include "gp10b/fb_gp10b.h"
#include "gp10b/gr_gp10b.h"
#include "gp106/clk_gp106.h"
#include "gp106/clk_arb_gp106.h"
#include "gp106/pmu_gp106.h"
#include "gp106/acr_gp106.h"
#include "gp106/sec2_gp106.h"
#include "gp106/bios_gp106.h"
#include "gv100/bios_gv100.h"
#include "gp106/therm_gp106.h"
#include "gp106/xve_gp106.h"
#include "gp106/clk_gp106.h"
#include "gp106/flcn_gp106.h"
#include "gp10b/ltc_gp10b.h"
#include "gp10b/therm_gp10b.h"
#include "gp10b/mc_gp10b.h"
#include "gp10b/ce_gp10b.h"
#include "gp10b/priv_ring_gp10b.h"
#include "gp10b/fifo_gp10b.h"
#include "gp10b/fecs_trace_gp10b.h"
#include "gp10b/mm_gp10b.h"
#include "gp10b/pmu_gp10b.h"
#include "gv11b/css_gr_gv11b.h"
#include "gv11b/dbg_gpu_gv11b.h"
#include "gv11b/hal_gv11b.h"
#include "gv100/gr_gv100.h"
#include "gv11b/mc_gv11b.h"
#include "gv11b/ltc_gv11b.h"
#include "gv11b/gv11b.h"
#include "gv11b/ce_gv11b.h"
#include "gv100/gr_ctx_gv100.h"
#include "gv11b/mm_gv11b.h"
#include "gv11b/pmu_gv11b.h"
#include "gv11b/fb_gv11b.h"
#include "gv100/mm_gv100.h"
#include "gv11b/pmu_gv11b.h"
#include "gv100/fb_gv100.h"
#include "gv100/fifo_gv100.h"
#include "gv11b/fifo_gv11b.h"
#include "gv11b/regops_gv11b.h"
#include "gv11b/gv11b_gating_reglist.h"
#include "gv100/regops_gv100.h"
#include "gv11b/subctx_gv11b.h"
#include "gv100.h"
#include "hal_gv100.h"
#include "gv100/fb_gv100.h"
#include "gv100/mm_gv100.h"
#include <nvgpu/bus.h>
#include <nvgpu/debug.h>
#include <nvgpu/enabled.h>
#include <nvgpu/enabled_t19x.h>
#include <nvgpu/ctxsw_trace.h>
#include <nvgpu/hw/gv100/hw_proj_gv100.h>
#include <nvgpu/hw/gv100/hw_fifo_gv100.h>
#include <nvgpu/hw/gv100/hw_ram_gv100.h>
#include <nvgpu/hw/gv100/hw_top_gv100.h>
#include <nvgpu/hw/gv100/hw_pram_gv100.h>
#include <nvgpu/hw/gv100/hw_pwr_gv100.h>
static int gv100_get_litter_value(struct gk20a *g, int value)
{
int ret = EINVAL;
switch (value) {
case GPU_LIT_NUM_GPCS:
ret = proj_scal_litter_num_gpcs_v();
break;
case GPU_LIT_NUM_PES_PER_GPC:
ret = proj_scal_litter_num_pes_per_gpc_v();
break;
case GPU_LIT_NUM_ZCULL_BANKS:
ret = proj_scal_litter_num_zcull_banks_v();
break;
case GPU_LIT_NUM_TPC_PER_GPC:
ret = proj_scal_litter_num_tpc_per_gpc_v();
break;
case GPU_LIT_NUM_SM_PER_TPC:
ret = proj_scal_litter_num_sm_per_tpc_v();
break;
case GPU_LIT_NUM_FBPS:
ret = proj_scal_litter_num_fbps_v();
break;
case GPU_LIT_GPC_BASE:
ret = proj_gpc_base_v();
break;
case GPU_LIT_GPC_STRIDE:
ret = proj_gpc_stride_v();
break;
case GPU_LIT_GPC_SHARED_BASE:
ret = proj_gpc_shared_base_v();
break;
case GPU_LIT_TPC_IN_GPC_BASE:
ret = proj_tpc_in_gpc_base_v();
break;
case GPU_LIT_TPC_IN_GPC_STRIDE:
ret = proj_tpc_in_gpc_stride_v();
break;
case GPU_LIT_TPC_IN_GPC_SHARED_BASE:
ret = proj_tpc_in_gpc_shared_base_v();
break;
case GPU_LIT_PPC_IN_GPC_BASE:
ret = proj_ppc_in_gpc_base_v();
break;
case GPU_LIT_PPC_IN_GPC_STRIDE:
ret = proj_ppc_in_gpc_stride_v();
break;
case GPU_LIT_PPC_IN_GPC_SHARED_BASE:
ret = proj_ppc_in_gpc_shared_base_v();
break;
case GPU_LIT_ROP_BASE:
ret = proj_rop_base_v();
break;
case GPU_LIT_ROP_STRIDE:
ret = proj_rop_stride_v();
break;
case GPU_LIT_ROP_SHARED_BASE:
ret = proj_rop_shared_base_v();
break;
case GPU_LIT_HOST_NUM_ENGINES:
ret = proj_host_num_engines_v();
break;
case GPU_LIT_HOST_NUM_PBDMA:
ret = proj_host_num_pbdma_v();
break;
case GPU_LIT_LTC_STRIDE:
ret = proj_ltc_stride_v();
break;
case GPU_LIT_LTS_STRIDE:
ret = proj_lts_stride_v();
break;
case GPU_LIT_NUM_FBPAS:
ret = proj_scal_litter_num_fbpas_v();
break;
case GPU_LIT_FBPA_SHARED_BASE:
ret = proj_fbpa_shared_base_v();
break;
case GPU_LIT_FBPA_BASE:
ret = proj_fbpa_base_v();
break;
case GPU_LIT_FBPA_STRIDE:
ret = proj_fbpa_stride_v();
break;
case GPU_LIT_SM_PRI_STRIDE:
ret = proj_sm_stride_v();
break;
case GPU_LIT_SMPC_PRI_BASE:
ret = proj_smpc_base_v();
break;
case GPU_LIT_SMPC_PRI_SHARED_BASE:
ret = proj_smpc_shared_base_v();
break;
case GPU_LIT_SMPC_PRI_UNIQUE_BASE:
ret = proj_smpc_unique_base_v();
break;
case GPU_LIT_SMPC_PRI_STRIDE:
ret = proj_smpc_stride_v();
break;
case GPU_LIT_TWOD_CLASS:
ret = FERMI_TWOD_A;
break;
case GPU_LIT_THREED_CLASS:
ret = VOLTA_A;
break;
case GPU_LIT_COMPUTE_CLASS:
ret = VOLTA_COMPUTE_A;
break;
case GPU_LIT_GPFIFO_CLASS:
ret = VOLTA_CHANNEL_GPFIFO_A;
break;
case GPU_LIT_I2M_CLASS:
ret = KEPLER_INLINE_TO_MEMORY_B;
break;
case GPU_LIT_DMA_COPY_CLASS:
ret = VOLTA_DMA_COPY_A;
break;
default:
break;
}
return ret;
}
int gv100_init_gpu_characteristics(struct gk20a *g)
{
int err;
err = gk20a_init_gpu_characteristics(g);
if (err)
return err;
__nvgpu_set_enabled(g, NVGPU_SUPPORT_TSG_SUBCONTEXTS, true);
return 0;
}
static const struct gpu_ops gv100_ops = {
.bios = {
.init = gp106_bios_init,
.preos_wait_for_halt = gv100_bios_preos_wait_for_halt,
.preos_reload_check = gv100_bios_preos_reload_check,
},
.ltc = {
.determine_L2_size_bytes = gp10b_determine_L2_size_bytes,
.set_zbc_s_entry = gv11b_ltc_set_zbc_stencil_entry,
.set_zbc_color_entry = gm20b_ltc_set_zbc_color_entry,
.set_zbc_depth_entry = gm20b_ltc_set_zbc_depth_entry,
.init_cbc = NULL,
.init_fs_state = gv11b_ltc_init_fs_state,
.init_comptags = gp10b_ltc_init_comptags,
.cbc_ctrl = gm20b_ltc_cbc_ctrl,
.isr = gv11b_ltc_isr,
.cbc_fix_config = NULL,
.flush = gm20b_flush_ltc,
.set_enabled = gp10b_ltc_set_enabled,
},
.ce2 = {
.isr_stall = gv11b_ce_isr,
.isr_nonstall = gp10b_ce_nonstall_isr,
.get_num_pce = gv11b_ce_get_num_pce,
},
.gr = {
.get_patch_slots = gr_gv100_get_patch_slots,
.init_gpc_mmu = gr_gv11b_init_gpc_mmu,
.bundle_cb_defaults = gr_gv100_bundle_cb_defaults,
.cb_size_default = gr_gv100_cb_size_default,
.calc_global_ctx_buffer_size =
gr_gv11b_calc_global_ctx_buffer_size,
.commit_global_attrib_cb = gr_gv11b_commit_global_attrib_cb,
.commit_global_bundle_cb = gr_gp10b_commit_global_bundle_cb,
.commit_global_cb_manager = gr_gp10b_commit_global_cb_manager,
.commit_global_pagepool = gr_gp10b_commit_global_pagepool,
.handle_sw_method = gr_gv11b_handle_sw_method,
.set_alpha_circular_buffer_size =
gr_gv11b_set_alpha_circular_buffer_size,
.set_circular_buffer_size = gr_gv11b_set_circular_buffer_size,
.enable_hww_exceptions = gr_gv11b_enable_hww_exceptions,
.is_valid_class = gr_gv11b_is_valid_class,
.is_valid_gfx_class = gr_gv11b_is_valid_gfx_class,
.is_valid_compute_class = gr_gv11b_is_valid_compute_class,
.get_sm_dsm_perf_regs = gv11b_gr_get_sm_dsm_perf_regs,
.get_sm_dsm_perf_ctrl_regs = gv11b_gr_get_sm_dsm_perf_ctrl_regs,
.init_fs_state = gr_gv11b_init_fs_state,
.set_hww_esr_report_mask = gv11b_gr_set_hww_esr_report_mask,
.falcon_load_ucode = gr_gm20b_load_ctxsw_ucode_segments,
.load_ctxsw_ucode = gr_gm20b_load_ctxsw_ucode,
.set_gpc_tpc_mask = gr_gv100_set_gpc_tpc_mask,
.get_gpc_tpc_mask = gr_gm20b_get_gpc_tpc_mask,
.free_channel_ctx = gk20a_free_channel_ctx,
.alloc_obj_ctx = gk20a_alloc_obj_ctx,
.bind_ctxsw_zcull = gr_gk20a_bind_ctxsw_zcull,
.get_zcull_info = gr_gk20a_get_zcull_info,
.is_tpc_addr = gr_gm20b_is_tpc_addr,
.get_tpc_num = gr_gm20b_get_tpc_num,
.detect_sm_arch = gr_gv11b_detect_sm_arch,
.add_zbc_color = gr_gp10b_add_zbc_color,
.add_zbc_depth = gr_gp10b_add_zbc_depth,
.zbc_set_table = gk20a_gr_zbc_set_table,
.zbc_query_table = gr_gk20a_query_zbc,
.pmu_save_zbc = gk20a_pmu_save_zbc,
.add_zbc = gr_gk20a_add_zbc,
.pagepool_default_size = gr_gv11b_pagepool_default_size,
.init_ctx_state = gr_gp10b_init_ctx_state,
.alloc_gr_ctx = gr_gp10b_alloc_gr_ctx,
.free_gr_ctx = gr_gp10b_free_gr_ctx,
.update_ctxsw_preemption_mode =
gr_gp10b_update_ctxsw_preemption_mode,
.dump_gr_regs = gr_gv11b_dump_gr_status_regs,
.update_pc_sampling = gr_gm20b_update_pc_sampling,
.get_fbp_en_mask = gr_gm20b_get_fbp_en_mask,
.get_max_ltc_per_fbp = gr_gm20b_get_max_ltc_per_fbp,
.get_max_lts_per_ltc = gr_gm20b_get_max_lts_per_ltc,
.get_rop_l2_en_mask = gr_gm20b_rop_l2_en_mask,
.get_max_fbps_count = gr_gm20b_get_max_fbps_count,
.init_sm_dsm_reg_info = gv11b_gr_init_sm_dsm_reg_info,
.wait_empty = gr_gv11b_wait_empty,
.init_cyclestats = gr_gm20b_init_cyclestats,
.set_sm_debug_mode = gv11b_gr_set_sm_debug_mode,
.enable_cde_in_fecs = gr_gm20b_enable_cde_in_fecs,
.bpt_reg_info = gv11b_gr_bpt_reg_info,
.get_access_map = gr_gv11b_get_access_map,
.handle_fecs_error = gr_gv11b_handle_fecs_error,
.handle_sm_exception = gr_gk20a_handle_sm_exception,
.handle_tex_exception = gr_gv11b_handle_tex_exception,
.enable_gpc_exceptions = gr_gv11b_enable_gpc_exceptions,
.enable_exceptions = gr_gv11b_enable_exceptions,
.get_lrf_tex_ltc_dram_override = get_ecc_override_val,
.update_smpc_ctxsw_mode = gr_gk20a_update_smpc_ctxsw_mode,
.update_hwpm_ctxsw_mode = gr_gk20a_update_hwpm_ctxsw_mode,
.record_sm_error_state = gv11b_gr_record_sm_error_state,
.update_sm_error_state = gv11b_gr_update_sm_error_state,
.clear_sm_error_state = gm20b_gr_clear_sm_error_state,
.suspend_contexts = gr_gp10b_suspend_contexts,
.resume_contexts = gr_gk20a_resume_contexts,
.get_preemption_mode_flags = gr_gp10b_get_preemption_mode_flags,
.init_sm_id_table = gr_gv100_init_sm_id_table,
.load_smid_config = gr_gv11b_load_smid_config,
.program_sm_id_numbering = gr_gv11b_program_sm_id_numbering,
.is_ltcs_ltss_addr = gr_gm20b_is_ltcs_ltss_addr,
.is_ltcn_ltss_addr = gr_gm20b_is_ltcn_ltss_addr,
.split_lts_broadcast_addr = gr_gm20b_split_lts_broadcast_addr,
.split_ltc_broadcast_addr = gr_gm20b_split_ltc_broadcast_addr,
.setup_rop_mapping = gr_gv11b_setup_rop_mapping,
.program_zcull_mapping = gr_gv11b_program_zcull_mapping,
.commit_global_timeslice = gr_gv11b_commit_global_timeslice,
.commit_inst = gr_gv11b_commit_inst,
.write_zcull_ptr = gr_gv11b_write_zcull_ptr,
.write_pm_ptr = gr_gv11b_write_pm_ptr,
.init_elcg_mode = gr_gv11b_init_elcg_mode,
.load_tpc_mask = gr_gv11b_load_tpc_mask,
.inval_icache = gr_gk20a_inval_icache,
.trigger_suspend = gv11b_gr_sm_trigger_suspend,
.wait_for_pause = gr_gk20a_wait_for_pause,
.resume_from_pause = gv11b_gr_resume_from_pause,
.clear_sm_errors = gr_gk20a_clear_sm_errors,
.tpc_enabled_exceptions = gr_gk20a_tpc_enabled_exceptions,
.get_esr_sm_sel = gv11b_gr_get_esr_sm_sel,
.sm_debugger_attached = gv11b_gr_sm_debugger_attached,
.suspend_single_sm = gv11b_gr_suspend_single_sm,
.suspend_all_sms = gv11b_gr_suspend_all_sms,
.resume_single_sm = gv11b_gr_resume_single_sm,
.resume_all_sms = gv11b_gr_resume_all_sms,
.get_sm_hww_warp_esr = gv11b_gr_get_sm_hww_warp_esr,
.get_sm_hww_global_esr = gv11b_gr_get_sm_hww_global_esr,
.get_sm_no_lock_down_hww_global_esr_mask =
gv11b_gr_get_sm_no_lock_down_hww_global_esr_mask,
.lock_down_sm = gv11b_gr_lock_down_sm,
.wait_for_sm_lock_down = gv11b_gr_wait_for_sm_lock_down,
.clear_sm_hww = gv11b_gr_clear_sm_hww,
.init_ovr_sm_dsm_perf = gv11b_gr_init_ovr_sm_dsm_perf,
.get_ovr_perf_regs = gv11b_gr_get_ovr_perf_regs,
.disable_rd_coalesce = gm20a_gr_disable_rd_coalesce,
.set_boosted_ctx = gr_gp10b_set_boosted_ctx,
.set_preemption_mode = gr_gp10b_set_preemption_mode,
.set_czf_bypass = NULL,
.pre_process_sm_exception = gr_gv11b_pre_process_sm_exception,
.set_preemption_buffer_va = gr_gv11b_set_preemption_buffer_va,
.init_preemption_state = NULL,
.update_boosted_ctx = gr_gp10b_update_boosted_ctx,
.set_bes_crop_debug3 = gr_gp10b_set_bes_crop_debug3,
.create_gr_sysfs = gr_gv11b_create_sysfs,
.set_ctxsw_preemption_mode = gr_gp10b_set_ctxsw_preemption_mode,
.is_etpc_addr = gv11b_gr_pri_is_etpc_addr,
.egpc_etpc_priv_addr_table = gv11b_gr_egpc_etpc_priv_addr_table,
.handle_tpc_mpc_exception = gr_gv11b_handle_tpc_mpc_exception,
.zbc_s_query_table = gr_gv11b_zbc_s_query_table,
.load_zbc_s_default_tbl = gr_gv11b_load_stencil_default_tbl,
.handle_gpc_gpcmmu_exception =
gr_gv11b_handle_gpc_gpcmmu_exception,
.add_zbc_type_s = gr_gv11b_add_zbc_type_s,
.get_egpc_base = gv11b_gr_get_egpc_base,
.get_egpc_etpc_num = gv11b_gr_get_egpc_etpc_num,
.handle_gpc_gpccs_exception =
gr_gv11b_handle_gpc_gpccs_exception,
.load_zbc_s_tbl = gr_gv11b_load_stencil_tbl,
.access_smpc_reg = gv11b_gr_access_smpc_reg,
.is_egpc_addr = gv11b_gr_pri_is_egpc_addr,
.add_zbc_s = gr_gv11b_add_zbc_stencil,
.handle_gcc_exception = gr_gv11b_handle_gcc_exception,
.init_sw_veid_bundle = gr_gv11b_init_sw_veid_bundle,
.handle_tpc_sm_ecc_exception =
gr_gv11b_handle_tpc_sm_ecc_exception,
.decode_egpc_addr = gv11b_gr_decode_egpc_addr,
},
.fb = {
.reset = gv100_fb_reset,
.init_hw = gk20a_fb_init_hw,
.init_fs_state = NULL,
.set_mmu_page_size = gm20b_fb_set_mmu_page_size,
.set_use_full_comp_tag_line =
gm20b_fb_set_use_full_comp_tag_line,
.compression_page_size = gp10b_fb_compression_page_size,
.compressible_page_size = gp10b_fb_compressible_page_size,
.vpr_info_fetch = gm20b_fb_vpr_info_fetch,
.dump_vpr_wpr_info = gm20b_fb_dump_vpr_wpr_info,
.read_wpr_info = gm20b_fb_read_wpr_info,
.is_debug_mode_enabled = gm20b_fb_debug_mode_enabled,
.set_debug_mode = gm20b_fb_set_debug_mode,
.tlb_invalidate = gk20a_fb_tlb_invalidate,
.hub_isr = gv11b_fb_hub_isr,
.mem_unlock = gv100_fb_memory_unlock,
},
.fifo = {
.get_preempt_timeout = gv100_fifo_get_preempt_timeout,
.init_fifo_setup_hw = gv11b_init_fifo_setup_hw,
.bind_channel = channel_gm20b_bind,
.unbind_channel = channel_gv11b_unbind,
.disable_channel = gk20a_fifo_disable_channel,
.enable_channel = gk20a_fifo_enable_channel,
.alloc_inst = gk20a_fifo_alloc_inst,
.free_inst = gk20a_fifo_free_inst,
.setup_ramfc = channel_gv11b_setup_ramfc,
.channel_set_timeslice = gk20a_fifo_set_timeslice,
.default_timeslice_us = gk20a_fifo_default_timeslice_us,
.setup_userd = gk20a_fifo_setup_userd,
.userd_gp_get = gv11b_userd_gp_get,
.userd_gp_put = gv11b_userd_gp_put,
.userd_pb_get = gv11b_userd_pb_get,
.pbdma_acquire_val = gk20a_fifo_pbdma_acquire_val,
.preempt_channel = gv11b_fifo_preempt_channel,
.preempt_tsg = gv11b_fifo_preempt_tsg,
.enable_tsg = gv11b_fifo_enable_tsg,
.disable_tsg = gk20a_disable_tsg,
.tsg_verify_channel_status = gk20a_fifo_tsg_unbind_channel_verify_status,
.tsg_verify_status_ctx_reload = gm20b_fifo_tsg_verify_status_ctx_reload,
.tsg_verify_status_faulted = gv11b_fifo_tsg_verify_status_faulted,
.update_runlist = gk20a_fifo_update_runlist,
.trigger_mmu_fault = NULL,
.get_mmu_fault_info = NULL,
.wait_engine_idle = gk20a_fifo_wait_engine_idle,
.get_num_fifos = gv100_fifo_get_num_fifos,
.get_pbdma_signature = gp10b_fifo_get_pbdma_signature,
.set_runlist_interleave = gk20a_fifo_set_runlist_interleave,
.tsg_set_timeslice = gk20a_fifo_tsg_set_timeslice,
.force_reset_ch = gk20a_fifo_force_reset_ch,
.engine_enum_from_type = gp10b_fifo_engine_enum_from_type,
.device_info_data_parse = gp10b_device_info_data_parse,
.eng_runlist_base_size = fifo_eng_runlist_base__size_1_v,
.init_engine_info = gk20a_fifo_init_engine_info,
.runlist_entry_size = ram_rl_entry_size_v,
.get_tsg_runlist_entry = gv11b_get_tsg_runlist_entry,
.get_ch_runlist_entry = gv11b_get_ch_runlist_entry,
.is_fault_engine_subid_gpc = gv11b_is_fault_engine_subid_gpc,
.dump_pbdma_status = gk20a_dump_pbdma_status,
.dump_eng_status = gv11b_dump_eng_status,
.dump_channel_status_ramfc = gv11b_dump_channel_status_ramfc,
.intr_0_error_mask = gv11b_fifo_intr_0_error_mask,
.is_preempt_pending = gv11b_fifo_is_preempt_pending,
.init_pbdma_intr_descs = gv11b_fifo_init_pbdma_intr_descs,
.reset_enable_hw = gk20a_init_fifo_reset_enable_hw,
.teardown_ch_tsg = gv11b_fifo_teardown_ch_tsg,
.handle_sched_error = gv11b_fifo_handle_sched_error,
.handle_pbdma_intr_0 = gv11b_fifo_handle_pbdma_intr_0,
.handle_pbdma_intr_1 = gv11b_fifo_handle_pbdma_intr_1,
.init_eng_method_buffers = gv11b_fifo_init_eng_method_buffers,
.deinit_eng_method_buffers =
gv11b_fifo_deinit_eng_method_buffers,
.tsg_bind_channel = gk20a_tsg_bind_channel,
.tsg_unbind_channel = gk20a_tsg_unbind_channel,
#ifdef CONFIG_TEGRA_GK20A_NVHOST
.alloc_syncpt_buf = gv11b_fifo_alloc_syncpt_buf,
.free_syncpt_buf = gv11b_fifo_free_syncpt_buf,
.add_syncpt_wait_cmd = gv11b_fifo_add_syncpt_wait_cmd,
.get_syncpt_wait_cmd_size = gv11b_fifo_get_syncpt_wait_cmd_size,
.add_syncpt_incr_cmd = gv11b_fifo_add_syncpt_incr_cmd,
.get_syncpt_incr_cmd_size = gv11b_fifo_get_syncpt_incr_cmd_size,
#endif
.resetup_ramfc = NULL,
.device_info_fault_id = top_device_info_data_fault_id_enum_v,
.free_channel_ctx_header = gv11b_free_subctx_header,
.preempt_ch_tsg = gv11b_fifo_preempt_ch_tsg,
.handle_ctxsw_timeout = gv11b_fifo_handle_ctxsw_timeout,
},
.gr_ctx = {
.get_netlist_name = gr_gv100_get_netlist_name,
.is_fw_defined = gr_gv100_is_firmware_defined,
},
#ifdef CONFIG_GK20A_CTXSW_TRACE
.fecs_trace = {
.alloc_user_buffer = NULL,
.free_user_buffer = NULL,
.mmap_user_buffer = NULL,
.init = NULL,
.deinit = NULL,
.enable = NULL,
.disable = NULL,
.is_enabled = NULL,
.reset = NULL,
.flush = NULL,
.poll = NULL,
.bind_channel = NULL,
.unbind_channel = NULL,
.max_entries = NULL,
},
#endif /* CONFIG_GK20A_CTXSW_TRACE */
.mm = {
.support_sparse = gm20b_mm_support_sparse,
.gmmu_map = gk20a_locked_gmmu_map,
.gmmu_unmap = gk20a_locked_gmmu_unmap,
.vm_bind_channel = gk20a_vm_bind_channel,
.fb_flush = gk20a_mm_fb_flush,
.l2_invalidate = gk20a_mm_l2_invalidate,
.l2_flush = gk20a_mm_l2_flush,
.cbc_clean = gk20a_mm_cbc_clean,
.set_big_page_size = gm20b_mm_set_big_page_size,
.get_big_page_sizes = gm20b_mm_get_big_page_sizes,
.get_default_big_page_size = gp10b_mm_get_default_big_page_size,
.gpu_phys_addr = gv11b_gpu_phys_addr,
.get_mmu_levels = gp10b_mm_get_mmu_levels,
.get_vidmem_size = gv100_mm_get_vidmem_size,
.init_pdb = gp10b_mm_init_pdb,
.init_mm_setup_hw = gv11b_init_mm_setup_hw,
.is_bar1_supported = gv11b_mm_is_bar1_supported,
.alloc_inst_block = gk20a_alloc_inst_block,
.init_inst_block = gv11b_init_inst_block,
.mmu_fault_pending = gv11b_mm_mmu_fault_pending,
.get_kind_invalid = gm20b_get_kind_invalid,
.get_kind_pitch = gm20b_get_kind_pitch,
.init_bar2_vm = gb10b_init_bar2_vm,
.init_bar2_mm_hw_setup = gv11b_init_bar2_mm_hw_setup,
.remove_bar2_vm = gv11b_mm_remove_bar2_vm,
.fault_info_mem_destroy = gv11b_mm_fault_info_mem_destroy,
.get_flush_retries = gv100_mm_get_flush_retries,
},
.pramin = {
.enter = gk20a_pramin_enter,
.exit = gk20a_pramin_exit,
.data032_r = pram_data032_r,
},
.pmu = {
.init_wpr_region = gm20b_pmu_init_acr,
.load_lsfalcon_ucode = gp106_load_falcon_ucode,
.is_lazy_bootstrap = gp106_is_lazy_bootstrap,
.is_priv_load = gp106_is_priv_load,
.prepare_ucode = gp106_prepare_ucode_blob,
.pmu_setup_hw_and_bootstrap = gp106_bootstrap_hs_flcn,
.get_wpr = gp106_wpr_info,
.alloc_blob_space = gp106_alloc_blob_space,
.pmu_populate_loader_cfg = gp106_pmu_populate_loader_cfg,
.flcn_populate_bl_dmem_desc = gp106_flcn_populate_bl_dmem_desc,
.falcon_wait_for_halt = sec2_wait_for_halt,
.falcon_clear_halt_interrupt_status =
sec2_clear_halt_interrupt_status,
.init_falcon_setup_hw = init_sec2_setup_hw1,
.pmu_queue_tail = gk20a_pmu_queue_tail,
.pmu_get_queue_head = pwr_pmu_queue_head_r,
.pmu_mutex_release = gk20a_pmu_mutex_release,
.is_pmu_supported = gp106_is_pmu_supported,
.pmu_pg_supported_engines_list = gp106_pmu_pg_engines_list,
.pmu_elpg_statistics = gp106_pmu_elpg_statistics,
.pmu_mutex_acquire = gk20a_pmu_mutex_acquire,
.pmu_is_lpwr_feature_supported =
gp106_pmu_is_lpwr_feature_supported,
.pmu_msgq_tail = gk20a_pmu_msgq_tail,
.pmu_pg_engines_feature_list = gp106_pmu_pg_feature_list,
.pmu_get_queue_head_size = pwr_pmu_queue_head__size_1_v,
.pmu_queue_head = gk20a_pmu_queue_head,
.pmu_pg_param_post_init = nvgpu_lpwr_post_init,
.pmu_get_queue_tail_size = pwr_pmu_queue_tail__size_1_v,
.pmu_pg_init_param = gp106_pg_param_init,
.reset_engine = gp106_pmu_engine_reset,
.write_dmatrfbase = gp10b_write_dmatrfbase,
.pmu_mutex_size = pwr_pmu_mutex__size_1_v,
.is_engine_in_reset = gp106_pmu_is_engine_in_reset,
.pmu_get_queue_tail = pwr_pmu_queue_tail_r,
},
.clk = {
.init_clk_support = gp106_init_clk_support,
.get_crystal_clk_hz = gp106_crystal_clk_hz,
.measure_freq = gp106_clk_measure_freq,
.suspend_clk_support = gp106_suspend_clk_support,
},
.clk_arb = {
.get_arbiter_clk_domains = gp106_get_arbiter_clk_domains,
.get_arbiter_clk_range = gp106_get_arbiter_clk_range,
.get_arbiter_clk_default = gp106_get_arbiter_clk_default,
.get_current_pstate = nvgpu_clk_arb_get_current_pstate,
},
.regops = {
.get_global_whitelist_ranges =
gv100_get_global_whitelist_ranges,
.get_global_whitelist_ranges_count =
gv100_get_global_whitelist_ranges_count,
.get_context_whitelist_ranges =
gv100_get_context_whitelist_ranges,
.get_context_whitelist_ranges_count =
gv100_get_context_whitelist_ranges_count,
.get_runcontrol_whitelist = gv100_get_runcontrol_whitelist,
.get_runcontrol_whitelist_count =
gv100_get_runcontrol_whitelist_count,
.get_runcontrol_whitelist_ranges =
gv100_get_runcontrol_whitelist_ranges,
.get_runcontrol_whitelist_ranges_count =
gv100_get_runcontrol_whitelist_ranges_count,
.get_qctl_whitelist = gv100_get_qctl_whitelist,
.get_qctl_whitelist_count = gv100_get_qctl_whitelist_count,
.get_qctl_whitelist_ranges = gv100_get_qctl_whitelist_ranges,
.get_qctl_whitelist_ranges_count =
gv100_get_qctl_whitelist_ranges_count,
.apply_smpc_war = gv100_apply_smpc_war,
},
.mc = {
.intr_enable = mc_gv11b_intr_enable,
.intr_unit_config = mc_gp10b_intr_unit_config,
.isr_stall = mc_gp10b_isr_stall,
.intr_stall = mc_gp10b_intr_stall,
.intr_stall_pause = mc_gp10b_intr_stall_pause,
.intr_stall_resume = mc_gp10b_intr_stall_resume,
.intr_nonstall = mc_gp10b_intr_nonstall,
.intr_nonstall_pause = mc_gp10b_intr_nonstall_pause,
.intr_nonstall_resume = mc_gp10b_intr_nonstall_resume,
.enable = gk20a_mc_enable,
.disable = gk20a_mc_disable,
.reset = gk20a_mc_reset,
.boot_0 = gk20a_mc_boot_0,
.is_intr1_pending = mc_gp10b_is_intr1_pending,
.is_intr_hub_pending = gv11b_mc_is_intr_hub_pending,
},
.debug = {
.show_dump = gk20a_debug_show_dump,
},
.dbg_session_ops = {
.exec_reg_ops = exec_regops_gk20a,
.dbg_set_powergate = dbg_set_powergate,
.check_and_set_global_reservation =
nvgpu_check_and_set_global_reservation,
.check_and_set_context_reservation =
nvgpu_check_and_set_context_reservation,
.release_profiler_reservation =
nvgpu_release_profiler_reservation,
.perfbuffer_enable = gv11b_perfbuf_enable_locked,
.perfbuffer_disable = gv11b_perfbuf_disable_locked,
},
.bus = {
.init_hw = gk20a_bus_init_hw,
.isr = gk20a_bus_isr,
.read_ptimer = gk20a_read_ptimer,
.get_timestamps_zipper = nvgpu_get_timestamps_zipper,
.bar1_bind = NULL,
},
#if defined(CONFIG_GK20A_CYCLE_STATS)
.css = {
.enable_snapshot = gv11b_css_hw_enable_snapshot,
.disable_snapshot = gv11b_css_hw_disable_snapshot,
.check_data_available = gv11b_css_hw_check_data_available,
.set_handled_snapshots = css_hw_set_handled_snapshots,
.allocate_perfmon_ids = css_gr_allocate_perfmon_ids,
.release_perfmon_ids = css_gr_release_perfmon_ids,
},
#endif
.xve = {
.get_speed = xve_get_speed_gp106,
.set_speed = xve_set_speed_gp106,
.available_speeds = xve_available_speeds_gp106,
.xve_readl = xve_xve_readl_gp106,
.xve_writel = xve_xve_writel_gp106,
.disable_aspm = xve_disable_aspm_gp106,
.reset_gpu = xve_reset_gpu_gp106,
#if defined(CONFIG_PCI_MSI)
.rearm_msi = xve_rearm_msi_gp106,
#endif
.enable_shadow_rom = xve_enable_shadow_rom_gp106,
.disable_shadow_rom = xve_disable_shadow_rom_gp106,
},
.falcon = {
.falcon_hal_sw_init = gp106_falcon_hal_sw_init,
},
.priv_ring = {
.isr = gp10b_priv_ring_isr,
},
.chip_init_gpu_characteristics = gv100_init_gpu_characteristics,
.get_litter_value = gv100_get_litter_value,
};
int gv100_init_hal(struct gk20a *g)
{
struct gpu_ops *gops = &g->ops;
gops->bios = gv100_ops.bios;
gops->ltc = gv100_ops.ltc;
gops->ce2 = gv100_ops.ce2;
gops->gr = gv100_ops.gr;
gops->fb = gv100_ops.fb;
gops->clock_gating = gv100_ops.clock_gating;
gops->fifo = gv100_ops.fifo;
gops->gr_ctx = gv100_ops.gr_ctx;
gops->mm = gv100_ops.mm;
#ifdef CONFIG_GK20A_CTXSW_TRACE
gops->fecs_trace = gv100_ops.fecs_trace;
#endif
gops->pramin = gv100_ops.pramin;
gops->therm = gv100_ops.therm;
gops->pmu = gv100_ops.pmu;
gops->regops = gv100_ops.regops;
gops->mc = gv100_ops.mc;
gops->debug = gv100_ops.debug;
gops->dbg_session_ops = gv100_ops.dbg_session_ops;
gops->bus = gv100_ops.bus;
#if defined(CONFIG_GK20A_CYCLE_STATS)
gops->css = gv100_ops.css;
#endif
gops->xve = gv100_ops.xve;
gops->falcon = gv100_ops.falcon;
gops->priv_ring = gv100_ops.priv_ring;
/* clocks */
gops->clk.init_clk_support = gv100_ops.clk.init_clk_support;
gops->clk.get_crystal_clk_hz = gv100_ops.clk.get_crystal_clk_hz;
gops->clk.measure_freq = gv100_ops.clk.measure_freq;
gops->clk.suspend_clk_support = gv100_ops.clk.suspend_clk_support;
/* Lone functions */
gops->chip_init_gpu_characteristics =
gv100_ops.chip_init_gpu_characteristics;
gops->get_litter_value = gv100_ops.get_litter_value;
__nvgpu_set_enabled(g, NVGPU_GR_USE_DMA_FOR_FW_BOOTSTRAP, true);
__nvgpu_set_enabled(g, NVGPU_SEC_PRIVSECURITY, true);
__nvgpu_set_enabled(g, NVGPU_SEC_SECUREGPCCS, true);
__nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false);
/* for now */
__nvgpu_set_enabled(g, NVGPU_PMU_PSTATE, false);
g->pmu_lsf_pmu_wpr_init_done = 0;
g->bootstrap_owner = LSF_FALCON_ID_SEC2;
g->name = "gv10x";
return 0;
}

View File

@@ -0,0 +1,30 @@
/*
* GV100 Tegra HAL interface
*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NVGPU_HAL_GV11B_H
#define _NVGPU_HAL_GV11B_H
struct gk20a;
int gv100_init_hal(struct gk20a *gops);
#endif

View File

@@ -0,0 +1,55 @@
/*
* GV100 memory management
*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "gk20a/gk20a.h"
#include "gv100/mm_gv100.h"
#include <nvgpu/hw/gv100/hw_fb_gv100.h>
size_t gv100_mm_get_vidmem_size(struct gk20a *g)
{
u32 range = gk20a_readl(g, fb_mmu_local_memory_range_r());
u32 mag = fb_mmu_local_memory_range_lower_mag_v(range);
u32 scale = fb_mmu_local_memory_range_lower_scale_v(range);
u32 ecc = fb_mmu_local_memory_range_ecc_mode_v(range);
size_t bytes = ((size_t)mag << scale) * SZ_1M;
if (ecc)
bytes = bytes / 16 * 15;
return bytes;
}
u32 gv100_mm_get_flush_retries(struct gk20a *g, enum nvgpu_flush_op op)
{
switch (op) {
/* GV100 has a large FB so it needs larger timeouts */
case NVGPU_FLUSH_FB:
return 2000;
case NVGPU_FLUSH_L2_FLUSH:
return 2000;
default:
return 200; /* Default retry timer */
}
}

View File

@@ -0,0 +1,33 @@
/*
* GV100 memory management
*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef MM_GV100_H
#define MM_GV100_H
struct gk20a;
size_t gv100_mm_get_vidmem_size(struct gk20a *g);
u32 gv100_mm_get_flush_retries(struct gk20a *g, enum nvgpu_flush_op op);
#endif

View File

@@ -0,0 +1,463 @@
/*
* Tegra GV100 GPU Driver Register Ops
*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "gk20a/gk20a.h"
#include "gk20a/dbg_gpu_gk20a.h"
#include "gk20a/regops_gk20a.h"
#include "regops_gv100.h"
static const struct regop_offset_range gv100_global_whitelist_ranges[] = {
{ 0x000004f0, 1},
{ 0x00001a00, 3},
{ 0x00002800, 128},
{ 0x00009400, 1},
{ 0x00009410, 1},
{ 0x00009480, 1},
{ 0x00020200, 24},
{ 0x00021c00, 4},
{ 0x00021c14, 3},
{ 0x00021c24, 1},
{ 0x00021c2c, 69},
{ 0x00021d44, 1},
{ 0x00021d4c, 1},
{ 0x00021d54, 1},
{ 0x00021d5c, 1},
{ 0x00021d64, 2},
{ 0x00021d70, 16},
{ 0x00022430, 7},
{ 0x00022450, 1},
{ 0x0002245c, 2},
{ 0x00070000, 5},
{ 0x000884e0, 1},
{ 0x0008e00c, 1},
{ 0x00100c18, 3},
{ 0x00100c84, 1},
{ 0x00104038, 1},
{ 0x0010a0a8, 1},
{ 0x0010a4f0, 1},
{ 0x0010e490, 1},
{ 0x0013cc14, 1},
{ 0x00140028, 1},
{ 0x00140280, 1},
{ 0x001402a0, 1},
{ 0x00140350, 1},
{ 0x00140480, 1},
{ 0x001404a0, 1},
{ 0x00140550, 1},
{ 0x00142028, 1},
{ 0x00142280, 1},
{ 0x001422a0, 1},
{ 0x00142350, 1},
{ 0x00142480, 1},
{ 0x001424a0, 1},
{ 0x00142550, 1},
{ 0x0017e028, 1},
{ 0x0017e280, 1},
{ 0x0017e294, 1},
{ 0x0017e29c, 2},
{ 0x0017e2ac, 1},
{ 0x0017e350, 1},
{ 0x0017e39c, 1},
{ 0x0017e480, 1},
{ 0x0017e4a0, 1},
{ 0x0017e550, 1},
{ 0x00180040, 41},
{ 0x001800ec, 10},
{ 0x00180240, 41},
{ 0x001802ec, 10},
{ 0x00180440, 41},
{ 0x001804ec, 10},
{ 0x00180640, 41},
{ 0x001806ec, 10},
{ 0x00180840, 41},
{ 0x001808ec, 10},
{ 0x00180a40, 41},
{ 0x00180aec, 10},
{ 0x00180c40, 41},
{ 0x00180cec, 10},
{ 0x00180e40, 41},
{ 0x00180eec, 10},
{ 0x001a0040, 41},
{ 0x001a00ec, 10},
{ 0x001a0240, 41},
{ 0x001a02ec, 10},
{ 0x001a0440, 41},
{ 0x001a04ec, 10},
{ 0x001a0640, 41},
{ 0x001a06ec, 10},
{ 0x001a0840, 41},
{ 0x001a08ec, 10},
{ 0x001a0a40, 41},
{ 0x001a0aec, 10},
{ 0x001a0c40, 41},
{ 0x001a0cec, 10},
{ 0x001a0e40, 41},
{ 0x001a0eec, 10},
{ 0x001b0040, 41},
{ 0x001b00ec, 10},
{ 0x001b0240, 41},
{ 0x001b02ec, 10},
{ 0x001b0440, 41},
{ 0x001b04ec, 10},
{ 0x001b0640, 41},
{ 0x001b06ec, 10},
{ 0x001b0840, 41},
{ 0x001b08ec, 10},
{ 0x001b0a40, 41},
{ 0x001b0aec, 10},
{ 0x001b0c40, 41},
{ 0x001b0cec, 10},
{ 0x001b0e40, 41},
{ 0x001b0eec, 10},
{ 0x001b4000, 1},
{ 0x001b4008, 1},
{ 0x001b4010, 3},
{ 0x001b4020, 3},
{ 0x001b4030, 3},
{ 0x001b4040, 3},
{ 0x001b4050, 3},
{ 0x001b4060, 4},
{ 0x001b4074, 7},
{ 0x001b4094, 3},
{ 0x001b40a4, 1},
{ 0x001b4100, 6},
{ 0x001b4128, 1},
{ 0x001b8000, 1},
{ 0x001b8008, 1},
{ 0x001b8010, 2},
{ 0x001bc000, 1},
{ 0x001bc008, 1},
{ 0x001bc010, 2},
{ 0x001be000, 1},
{ 0x001be008, 1},
{ 0x001be010, 2},
{ 0x00400500, 1},
{ 0x0040415c, 1},
{ 0x00404468, 1},
{ 0x00404498, 1},
{ 0x00405800, 1},
{ 0x00405840, 2},
{ 0x00405850, 1},
{ 0x00405908, 1},
{ 0x00405b40, 1},
{ 0x00405b50, 1},
{ 0x00406024, 5},
{ 0x00407010, 1},
{ 0x00407808, 1},
{ 0x0040803c, 1},
{ 0x00408804, 1},
{ 0x0040880c, 1},
{ 0x00408900, 2},
{ 0x00408910, 1},
{ 0x00408944, 1},
{ 0x00408984, 1},
{ 0x004090a8, 1},
{ 0x004098a0, 1},
{ 0x00409b00, 1},
{ 0x0041000c, 1},
{ 0x00410110, 1},
{ 0x00410184, 1},
{ 0x0041040c, 1},
{ 0x00410510, 1},
{ 0x00410584, 1},
{ 0x00418000, 1},
{ 0x00418008, 1},
{ 0x00418380, 2},
{ 0x00418400, 2},
{ 0x004184a0, 1},
{ 0x00418604, 1},
{ 0x00418680, 1},
{ 0x00418704, 1},
{ 0x00418714, 1},
{ 0x00418800, 1},
{ 0x0041881c, 1},
{ 0x00418830, 1},
{ 0x00418884, 1},
{ 0x004188b0, 1},
{ 0x004188c8, 3},
{ 0x004188fc, 1},
{ 0x00418b04, 1},
{ 0x00418c04, 1},
{ 0x00418c10, 8},
{ 0x00418c88, 1},
{ 0x00418d00, 1},
{ 0x00418e00, 1},
{ 0x00418e08, 1},
{ 0x00418e34, 1},
{ 0x00418e40, 4},
{ 0x00418e58, 16},
{ 0x00418f08, 1},
{ 0x00419000, 1},
{ 0x0041900c, 1},
{ 0x00419018, 1},
{ 0x00419854, 1},
{ 0x00419864, 1},
{ 0x00419a04, 2},
{ 0x00419a14, 1},
{ 0x00419ab0, 1},
{ 0x00419ab8, 3},
{ 0x00419c0c, 1},
{ 0x00419c8c, 2},
{ 0x00419d00, 1},
{ 0x00419d08, 2},
{ 0x00419e00, 11},
{ 0x00419e34, 2},
{ 0x00419e44, 11},
{ 0x00419e74, 10},
{ 0x00419ea4, 1},
{ 0x00419eac, 2},
{ 0x00419ee8, 1},
{ 0x00419ef0, 28},
{ 0x00419f70, 1},
{ 0x00419f78, 2},
{ 0x00419f98, 2},
{ 0x0041a02c, 2},
{ 0x0041a0a8, 1},
{ 0x0041a8a0, 3},
{ 0x0041b014, 1},
{ 0x0041b0a0, 1},
{ 0x0041b0cc, 1},
{ 0x0041b1dc, 1},
{ 0x0041be0c, 3},
{ 0x0041bea0, 1},
{ 0x0041becc, 1},
{ 0x0041bfdc, 1},
{ 0x0041c054, 1},
{ 0x0041c2b0, 1},
{ 0x0041c2b8, 3},
{ 0x0041c40c, 1},
{ 0x0041c48c, 2},
{ 0x0041c500, 1},
{ 0x0041c508, 2},
{ 0x0041c600, 11},
{ 0x0041c634, 2},
{ 0x0041c644, 11},
{ 0x0041c674, 10},
{ 0x0041c6a4, 1},
{ 0x0041c6ac, 2},
{ 0x0041c6e8, 1},
{ 0x0041c6f0, 28},
{ 0x0041c770, 1},
{ 0x0041c778, 2},
{ 0x0041c798, 2},
{ 0x0041c854, 1},
{ 0x0041cab0, 1},
{ 0x0041cab8, 3},
{ 0x0041cc0c, 1},
{ 0x0041cc8c, 2},
{ 0x0041cd00, 1},
{ 0x0041cd08, 2},
{ 0x0041ce00, 11},
{ 0x0041ce34, 2},
{ 0x0041ce44, 11},
{ 0x0041ce74, 10},
{ 0x0041cea4, 1},
{ 0x0041ceac, 2},
{ 0x0041cee8, 1},
{ 0x0041cef0, 28},
{ 0x0041cf70, 1},
{ 0x0041cf78, 2},
{ 0x0041cf98, 2},
{ 0x00500384, 1},
{ 0x005004a0, 1},
{ 0x00500604, 1},
{ 0x00500680, 1},
{ 0x00500714, 1},
{ 0x0050081c, 1},
{ 0x00500884, 1},
{ 0x005008b0, 1},
{ 0x005008c8, 3},
{ 0x005008fc, 1},
{ 0x00500b04, 1},
{ 0x00500c04, 1},
{ 0x00500c10, 8},
{ 0x00500c88, 1},
{ 0x00500d00, 1},
{ 0x00500e08, 1},
{ 0x00500f08, 1},
{ 0x00501000, 1},
{ 0x0050100c, 1},
{ 0x00501018, 1},
{ 0x00501854, 1},
{ 0x00501ab0, 1},
{ 0x00501ab8, 3},
{ 0x00501c0c, 1},
{ 0x00501c8c, 2},
{ 0x00501d00, 1},
{ 0x00501d08, 2},
{ 0x00501e00, 11},
{ 0x00501e34, 2},
{ 0x00501e44, 11},
{ 0x00501e74, 10},
{ 0x00501ea4, 1},
{ 0x00501eac, 2},
{ 0x00501ee8, 1},
{ 0x00501ef0, 28},
{ 0x00501f70, 1},
{ 0x00501f78, 2},
{ 0x00501f98, 2},
{ 0x0050202c, 2},
{ 0x005020a8, 1},
{ 0x005028a0, 3},
{ 0x00503014, 1},
{ 0x005030a0, 1},
{ 0x005030cc, 1},
{ 0x005031dc, 1},
{ 0x00503e14, 1},
{ 0x00503ea0, 1},
{ 0x00503ecc, 1},
{ 0x00503fdc, 1},
{ 0x00504054, 1},
{ 0x005042b0, 1},
{ 0x005042b8, 3},
{ 0x0050440c, 1},
{ 0x0050448c, 2},
{ 0x00504500, 1},
{ 0x00504508, 2},
{ 0x00504600, 11},
{ 0x00504634, 2},
{ 0x00504644, 11},
{ 0x00504674, 10},
{ 0x005046a4, 1},
{ 0x005046ac, 2},
{ 0x005046e8, 1},
{ 0x005046f0, 28},
{ 0x00504770, 1},
{ 0x00504778, 2},
{ 0x00504798, 2},
{ 0x00504854, 1},
{ 0x00504ab0, 1},
{ 0x00504ab8, 3},
{ 0x00504c0c, 1},
{ 0x00504c8c, 2},
{ 0x00504d00, 1},
{ 0x00504d08, 2},
{ 0x00504e00, 11},
{ 0x00504e34, 2},
{ 0x00504e44, 11},
{ 0x00504e74, 10},
{ 0x00504ea4, 1},
{ 0x00504eac, 2},
{ 0x00504ee8, 1},
{ 0x00504ef0, 28},
{ 0x00504f70, 1},
{ 0x00504f78, 2},
{ 0x00504f98, 2},
{ 0x00900100, 1},
{ 0x009a0100, 1},};
static const u32 gv100_global_whitelist_ranges_count =
ARRAY_SIZE(gv100_global_whitelist_ranges);
/* context */
/* runcontrol */
static const u32 gv100_runcontrol_whitelist[] = {
};
static const u32 gv100_runcontrol_whitelist_count =
ARRAY_SIZE(gv100_runcontrol_whitelist);
static const struct regop_offset_range gv100_runcontrol_whitelist_ranges[] = {
};
static const u32 gv100_runcontrol_whitelist_ranges_count =
ARRAY_SIZE(gv100_runcontrol_whitelist_ranges);
/* quad ctl */
static const u32 gv100_qctl_whitelist[] = {
};
static const u32 gv100_qctl_whitelist_count =
ARRAY_SIZE(gv100_qctl_whitelist);
static const struct regop_offset_range gv100_qctl_whitelist_ranges[] = {
};
static const u32 gv100_qctl_whitelist_ranges_count =
ARRAY_SIZE(gv100_qctl_whitelist_ranges);
const struct regop_offset_range *gv100_get_global_whitelist_ranges(void)
{
return gv100_global_whitelist_ranges;
}
int gv100_get_global_whitelist_ranges_count(void)
{
return gv100_global_whitelist_ranges_count;
}
const struct regop_offset_range *gv100_get_context_whitelist_ranges(void)
{
return gv100_global_whitelist_ranges;
}
int gv100_get_context_whitelist_ranges_count(void)
{
return gv100_global_whitelist_ranges_count;
}
const u32 *gv100_get_runcontrol_whitelist(void)
{
return gv100_runcontrol_whitelist;
}
int gv100_get_runcontrol_whitelist_count(void)
{
return gv100_runcontrol_whitelist_count;
}
const struct regop_offset_range *gv100_get_runcontrol_whitelist_ranges(void)
{
return gv100_runcontrol_whitelist_ranges;
}
int gv100_get_runcontrol_whitelist_ranges_count(void)
{
return gv100_runcontrol_whitelist_ranges_count;
}
const u32 *gv100_get_qctl_whitelist(void)
{
return gv100_qctl_whitelist;
}
int gv100_get_qctl_whitelist_count(void)
{
return gv100_qctl_whitelist_count;
}
const struct regop_offset_range *gv100_get_qctl_whitelist_ranges(void)
{
return gv100_qctl_whitelist_ranges;
}
int gv100_get_qctl_whitelist_ranges_count(void)
{
return gv100_qctl_whitelist_ranges_count;
}
int gv100_apply_smpc_war(struct dbg_session_gk20a *dbg_s)
{
/* Not needed on gv100 */
return 0;
}

View File

@@ -0,0 +1,42 @@
/*
*
* Tegra GV100 GPU Driver Register Ops
*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __REGOPS_GV100_H_
#define __REGOPS_GV100_H_
const struct regop_offset_range *gv100_get_global_whitelist_ranges(void);
int gv100_get_global_whitelist_ranges_count(void);
const struct regop_offset_range *gv100_get_context_whitelist_ranges(void);
int gv100_get_context_whitelist_ranges_count(void);
const u32 *gv100_get_runcontrol_whitelist(void);
int gv100_get_runcontrol_whitelist_count(void);
const struct regop_offset_range *gv100_get_runcontrol_whitelist_ranges(void);
int gv100_get_runcontrol_whitelist_ranges_count(void);
const u32 *gv100_get_qctl_whitelist(void);
int gv100_get_qctl_whitelist_count(void);
const struct regop_offset_range *gv100_get_qctl_whitelist_ranges(void);
int gv100_get_qctl_whitelist_ranges_count(void);
int gv100_apply_smpc_war(struct dbg_session_gk20a *dbg_s);
#endif /* __REGOPS_GV11B_H_ */

View File

@@ -0,0 +1,294 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>
#endif
#include <nvgpu/types.h>
#include <linux/platform/tegra/mc.h>
#include <nvgpu/dma.h>
#include <nvgpu/gmmu.h>
#include <nvgpu/timers.h>
#include <nvgpu/nvgpu_common.h>
#include <nvgpu/kmem.h>
#include <nvgpu/nvgpu_mem.h>
#include <nvgpu/acr/nvgpu_acr.h>
#include <nvgpu/firmware.h>
#include <nvgpu/mm.h>
#include "gk20a/gk20a.h"
#include "acr_gv11b.h"
#include "pmu_gv11b.h"
#include "gk20a/pmu_gk20a.h"
#include "gm20b/mm_gm20b.h"
#include "gm20b/acr_gm20b.h"
#include "gp106/acr_gp106.h"
#include <nvgpu/hw/gv11b/hw_pwr_gv11b.h>
/*Defines*/
#define gv11b_dbg_pmu(fmt, arg...) \
gk20a_dbg(gpu_dbg_pmu, fmt, ##arg)
static void flcn64_set_dma(struct falc_u64 *dma_addr, u64 value)
{
dma_addr->lo |= u64_lo32(value);
dma_addr->hi |= u64_hi32(value);
}
/*Externs*/
/*Forwards*/
/*Loads ACR bin to FB mem and bootstraps PMU with bootloader code
* start and end are addresses of ucode blob in non-WPR region*/
int gv11b_bootstrap_hs_flcn(struct gk20a *g)
{
struct mm_gk20a *mm = &g->mm;
struct vm_gk20a *vm = mm->pmu.vm;
int err = 0;
u64 *acr_dmem;
u32 img_size_in_bytes = 0;
u32 status, size, index;
u64 start;
struct acr_desc *acr = &g->acr;
struct nvgpu_firmware *acr_fw = acr->acr_fw;
struct flcn_bl_dmem_desc_v1 *bl_dmem_desc = &acr->bl_dmem_desc_v1;
u32 *acr_ucode_header_t210_load;
u32 *acr_ucode_data_t210_load;
start = nvgpu_mem_get_addr(g, &acr->ucode_blob);
size = acr->ucode_blob.size;
gv11b_dbg_pmu("acr ucode blob start %llx\n", start);
gv11b_dbg_pmu("acr ucode blob size %x\n", size);
gv11b_dbg_pmu("");
if (!acr_fw) {
/*First time init case*/
acr_fw = nvgpu_request_firmware(g,
GM20B_HSBIN_PMU_UCODE_IMAGE, 0);
if (!acr_fw) {
nvgpu_err(g, "pmu ucode get fail");
return -ENOENT;
}
acr->acr_fw = acr_fw;
acr->hsbin_hdr = (struct bin_hdr *)acr_fw->data;
acr->fw_hdr = (struct acr_fw_header *)(acr_fw->data +
acr->hsbin_hdr->header_offset);
acr_ucode_data_t210_load = (u32 *)(acr_fw->data +
acr->hsbin_hdr->data_offset);
acr_ucode_header_t210_load = (u32 *)(acr_fw->data +
acr->fw_hdr->hdr_offset);
img_size_in_bytes = ALIGN((acr->hsbin_hdr->data_size), 256);
gv11b_dbg_pmu("sig dbg offset %u\n",
acr->fw_hdr->sig_dbg_offset);
gv11b_dbg_pmu("sig dbg size %u\n", acr->fw_hdr->sig_dbg_size);
gv11b_dbg_pmu("sig prod offset %u\n",
acr->fw_hdr->sig_prod_offset);
gv11b_dbg_pmu("sig prod size %u\n",
acr->fw_hdr->sig_prod_size);
gv11b_dbg_pmu("patch loc %u\n", acr->fw_hdr->patch_loc);
gv11b_dbg_pmu("patch sig %u\n", acr->fw_hdr->patch_sig);
gv11b_dbg_pmu("header offset %u\n", acr->fw_hdr->hdr_offset);
gv11b_dbg_pmu("header size %u\n", acr->fw_hdr->hdr_size);
/* Lets patch the signatures first.. */
if (acr_ucode_patch_sig(g, acr_ucode_data_t210_load,
(u32 *)(acr_fw->data +
acr->fw_hdr->sig_prod_offset),
(u32 *)(acr_fw->data +
acr->fw_hdr->sig_dbg_offset),
(u32 *)(acr_fw->data +
acr->fw_hdr->patch_loc),
(u32 *)(acr_fw->data +
acr->fw_hdr->patch_sig)) < 0) {
nvgpu_err(g, "patch signatures fail");
err = -1;
goto err_release_acr_fw;
}
err = nvgpu_dma_alloc_map_sys(vm, img_size_in_bytes,
&acr->acr_ucode);
if (err) {
err = -ENOMEM;
goto err_release_acr_fw;
}
for (index = 0; index < 9; index++)
gv11b_dbg_pmu("acr_ucode_header_t210_load %u\n",
acr_ucode_header_t210_load[index]);
acr_dmem = (u64 *)
&(((u8 *)acr_ucode_data_t210_load)[
acr_ucode_header_t210_load[2]]);
acr->acr_dmem_desc_v1 = (struct flcn_acr_desc_v1 *)((u8 *)(
acr->acr_ucode.cpu_va) + acr_ucode_header_t210_load[2]);
((struct flcn_acr_desc_v1 *)acr_dmem)->nonwpr_ucode_blob_start =
(start);
((struct flcn_acr_desc_v1 *)acr_dmem)->nonwpr_ucode_blob_size =
size;
((struct flcn_acr_desc_v1 *)acr_dmem)->regions.no_regions = 2;
((struct flcn_acr_desc_v1 *)acr_dmem)->wpr_offset = 0;
nvgpu_mem_wr_n(g, &acr->acr_ucode, 0,
acr_ucode_data_t210_load, img_size_in_bytes);
/*
* In order to execute this binary, we will be using
* a bootloader which will load this image into PMU IMEM/DMEM.
* Fill up the bootloader descriptor for PMU HAL to use..
* TODO: Use standard descriptor which the generic bootloader is
* checked in.
*/
bl_dmem_desc->signature[0] = 0;
bl_dmem_desc->signature[1] = 0;
bl_dmem_desc->signature[2] = 0;
bl_dmem_desc->signature[3] = 0;
bl_dmem_desc->ctx_dma = GK20A_PMU_DMAIDX_VIRT;
flcn64_set_dma(&bl_dmem_desc->code_dma_base,
acr->acr_ucode.gpu_va);
bl_dmem_desc->non_sec_code_off = acr_ucode_header_t210_load[0];
bl_dmem_desc->non_sec_code_size = acr_ucode_header_t210_load[1];
bl_dmem_desc->sec_code_off = acr_ucode_header_t210_load[5];
bl_dmem_desc->sec_code_size = acr_ucode_header_t210_load[6];
bl_dmem_desc->code_entry_point = 0; /* Start at 0th offset */
flcn64_set_dma(&bl_dmem_desc->data_dma_base,
acr->acr_ucode.gpu_va +
acr_ucode_header_t210_load[2]);
bl_dmem_desc->data_size = acr_ucode_header_t210_load[3];
} else
acr->acr_dmem_desc_v1->nonwpr_ucode_blob_size = 0;
status = pmu_exec_gen_bl(g, bl_dmem_desc, 1);
if (status != 0) {
err = status;
goto err_free_ucode_map;
}
return 0;
err_free_ucode_map:
nvgpu_dma_unmap_free(vm, &acr->acr_ucode);
err_release_acr_fw:
nvgpu_release_firmware(g, acr_fw);
acr->acr_fw = NULL;
return err;
}
static int bl_bootstrap(struct nvgpu_pmu *pmu,
struct flcn_bl_dmem_desc_v1 *pbl_desc, u32 bl_sz)
{
struct gk20a *g = gk20a_from_pmu(pmu);
struct acr_desc *acr = &g->acr;
struct mm_gk20a *mm = &g->mm;
u32 virt_addr = 0;
struct hsflcn_bl_desc *pmu_bl_gm10x_desc = g->acr.pmu_hsbl_desc;
u32 dst;
gk20a_dbg_fn("");
gk20a_writel(g, pwr_falcon_itfen_r(),
gk20a_readl(g, pwr_falcon_itfen_r()) |
pwr_falcon_itfen_ctxen_enable_f());
gk20a_writel(g, pwr_pmu_new_instblk_r(),
pwr_pmu_new_instblk_ptr_f(
nvgpu_inst_block_addr(g, &mm->pmu.inst_block) >> 12) |
pwr_pmu_new_instblk_valid_f(1) |
pwr_pmu_new_instblk_target_sys_ncoh_f());
/*copy bootloader interface structure to dmem*/
nvgpu_flcn_copy_to_dmem(pmu->flcn, 0, (u8 *)pbl_desc,
sizeof(struct flcn_bl_dmem_desc_v1), 0);
/* copy bootloader to TOP of IMEM */
dst = (pwr_falcon_hwcfg_imem_size_v(
gk20a_readl(g, pwr_falcon_hwcfg_r())) << 8) - bl_sz;
nvgpu_flcn_copy_to_imem(pmu->flcn, dst,
(u8 *)(acr->hsbl_ucode.cpu_va), bl_sz, 0, 0,
pmu_bl_gm10x_desc->bl_start_tag);
gv11b_dbg_pmu("Before starting falcon with BL\n");
virt_addr = pmu_bl_gm10x_desc->bl_start_tag << 8;
nvgpu_flcn_bootstrap(pmu->flcn, virt_addr);
return 0;
}
int gv11b_init_pmu_setup_hw1(struct gk20a *g,
void *desc, u32 bl_sz)
{
struct nvgpu_pmu *pmu = &g->pmu;
int err;
gk20a_dbg_fn("");
nvgpu_mutex_acquire(&pmu->isr_mutex);
nvgpu_flcn_reset(pmu->flcn);
pmu->isr_enabled = true;
nvgpu_mutex_release(&pmu->isr_mutex);
/* setup apertures - virtual */
gk20a_writel(g, pwr_fbif_transcfg_r(GK20A_PMU_DMAIDX_UCODE),
pwr_fbif_transcfg_mem_type_physical_f() |
pwr_fbif_transcfg_target_noncoherent_sysmem_f());
gk20a_writel(g, pwr_fbif_transcfg_r(GK20A_PMU_DMAIDX_VIRT),
pwr_fbif_transcfg_mem_type_virtual_f());
/* setup apertures - physical */
gk20a_writel(g, pwr_fbif_transcfg_r(GK20A_PMU_DMAIDX_PHYS_VID),
pwr_fbif_transcfg_mem_type_physical_f() |
pwr_fbif_transcfg_target_noncoherent_sysmem_f());
gk20a_writel(g, pwr_fbif_transcfg_r(GK20A_PMU_DMAIDX_PHYS_SYS_COH),
pwr_fbif_transcfg_mem_type_physical_f() |
pwr_fbif_transcfg_target_coherent_sysmem_f());
gk20a_writel(g, pwr_fbif_transcfg_r(GK20A_PMU_DMAIDX_PHYS_SYS_NCOH),
pwr_fbif_transcfg_mem_type_physical_f() |
pwr_fbif_transcfg_target_noncoherent_sysmem_f());
/*Copying pmu cmdline args*/
g->ops.pmu_ver.set_pmu_cmdline_args_cpu_freq(pmu,
g->ops.clk.get_rate(g, CTRL_CLK_DOMAIN_PWRCLK));
g->ops.pmu_ver.set_pmu_cmdline_args_secure_mode(pmu, 1);
g->ops.pmu_ver.set_pmu_cmdline_args_trace_size(
pmu, GK20A_PMU_TRACE_BUFSIZE);
g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_base(pmu);
g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_idx(
pmu, GK20A_PMU_DMAIDX_VIRT);
nvgpu_flcn_copy_to_dmem(pmu->flcn, g->acr.pmu_args,
(u8 *)(g->ops.pmu_ver.get_pmu_cmdline_args_ptr(pmu)),
g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu), 0);
/*disable irqs for hs falcon booting as we will poll for halt*/
nvgpu_mutex_acquire(&pmu->isr_mutex);
pmu_enable_irq(pmu, false);
pmu->isr_enabled = false;
nvgpu_mutex_release(&pmu->isr_mutex);
/*Clearing mailbox register used to reflect capabilities*/
gk20a_writel(g, pwr_falcon_mailbox1_r(), 0);
err = bl_bootstrap(pmu, desc, bl_sz);
if (err)
return err;
return 0;
}

View File

@@ -0,0 +1,30 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __ACR_GV11B_H_
#define __ACR_GV11B_H_
int gv11b_bootstrap_hs_flcn(struct gk20a *g);
int gv11b_init_pmu_setup_hw1(struct gk20a *g,
void *desc, u32 bl_sz);
#endif /*__PMU_GP106_H_*/

View File

@@ -0,0 +1,110 @@
/*
* Volta GPU series Copy Engine.
*
* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nvgpu/log.h"
#include "nvgpu/bitops.h"
#include "gk20a/gk20a.h"
#include "gp10b/ce_gp10b.h"
#include "ce_gv11b.h"
#include <nvgpu/hw/gv11b/hw_ce_gv11b.h>
#include <nvgpu/hw/gv11b/hw_top_gv11b.h>
u32 gv11b_ce_get_num_pce(struct gk20a *g)
{
/* register contains a bitmask indicating which physical copy
* engines are present (and not floorswept).
*/
u32 num_pce;
u32 ce_pce_map = gk20a_readl(g, ce_pce_map_r());
num_pce = hweight32(ce_pce_map);
nvgpu_log_info(g, "num PCE: %d", num_pce);
return num_pce;
}
void gv11b_ce_isr(struct gk20a *g, u32 inst_id, u32 pri_base)
{
u32 ce_intr = gk20a_readl(g, ce_intr_status_r(inst_id));
u32 clear_intr = 0;
nvgpu_log(g, gpu_dbg_intr, "ce isr 0x%08x 0x%08x", ce_intr, inst_id);
/* An INVALID_CONFIG interrupt will be generated if a floorswept
* PCE is assigned to a valid LCE in the NV_CE_PCE2LCE_CONFIG
* registers. This is a fatal error and the LCE will have to be
* reset to get back to a working state.
*/
if (ce_intr & ce_intr_status_invalid_config_pending_f()) {
nvgpu_log(g, gpu_dbg_intr,
"ce: inst %d: invalid config", inst_id);
clear_intr |= ce_intr_status_invalid_config_reset_f();
}
/* A MTHD_BUFFER_FAULT interrupt will be triggered if any access
* to a method buffer during context load or save encounters a fault.
* This is a fatal interrupt and will require at least the LCE to be
* reset before operations can start again, if not the entire GPU.
*/
if (ce_intr & ce_intr_status_mthd_buffer_fault_pending_f()) {
nvgpu_log(g, gpu_dbg_intr,
"ce: inst %d: mthd buffer fault", inst_id);
clear_intr |= ce_intr_status_mthd_buffer_fault_reset_f();
}
gk20a_writel(g, ce_intr_status_r(inst_id), clear_intr);
gp10b_ce_isr(g, inst_id, pri_base);
}
u32 gv11b_ce_get_num_lce(struct gk20a *g)
{
u32 reg_val, num_lce;
reg_val = gk20a_readl(g, top_num_ces_r());
num_lce = top_num_ces_value_v(reg_val);
nvgpu_log_info(g, "num LCE: %d", num_lce);
return num_lce;
}
void gv11b_ce_mthd_buffer_fault_in_bar2_fault(struct gk20a *g)
{
u32 reg_val, num_lce, lce, clear_intr;
num_lce = gv11b_ce_get_num_lce(g);
for (lce = 0; lce < num_lce; lce++) {
reg_val = gk20a_readl(g, ce_intr_status_r(lce));
if (reg_val & ce_intr_status_mthd_buffer_fault_pending_f()) {
nvgpu_log(g, gpu_dbg_intr,
"ce: lce %d: mthd buffer fault", lce);
clear_intr = ce_intr_status_mthd_buffer_fault_reset_f();
gk20a_writel(g, ce_intr_status_r(lce), clear_intr);
}
}
}

View File

@@ -0,0 +1,35 @@
/*
*
* Volta GPU series copy engine
*
* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __CE_GV11B_H__
#define __CE_GV11B_H__
struct gk20a;
void gv11b_ce_mthd_buffer_fault_in_bar2_fault(struct gk20a *g);
u32 gv11b_ce_get_num_lce(struct gk20a *g);
u32 gv11b_ce_get_num_pce(struct gk20a *g);
void gv11b_ce_isr(struct gk20a *g, u32 inst_id, u32 pri_base);
#endif /*__CE2_GV11B_H__*/

View File

@@ -0,0 +1,206 @@
/*
* GV11B Cycle stats snapshots support
*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <linux/dma-mapping.h>
#include <linux/dma-buf.h>
#include <nvgpu/bitops.h>
#include <nvgpu/kmem.h>
#include <nvgpu/lock.h>
#include <nvgpu/dma.h>
#include <nvgpu/mm.h>
#include "gk20a/gk20a.h"
#include "gk20a/css_gr_gk20a.h"
#include "css_gr_gv11b.h"
#include <nvgpu/log.h>
#include <nvgpu/bug.h>
#include <nvgpu/hw/gv11b/hw_perf_gv11b.h>
#include <nvgpu/hw/gv11b/hw_mc_gv11b.h>
/* reports whether the hw queue overflowed */
static inline bool css_hw_get_overflow_status(struct gk20a *g)
{
const u32 st = perf_pmasys_control_membuf_status_overflowed_f();
return st == (gk20a_readl(g, perf_pmasys_control_r()) & st);
}
/* returns how many pending snapshot entries are pending */
static inline u32 css_hw_get_pending_snapshots(struct gk20a *g)
{
return gk20a_readl(g, perf_pmasys_mem_bytes_r()) /
sizeof(struct gk20a_cs_snapshot_fifo_entry);
}
/* informs hw how many snapshots have been processed (frees up fifo space) */
static inline void gv11b_css_hw_set_handled_snapshots(struct gk20a *g, u32 done)
{
if (done > 0) {
gk20a_writel(g, perf_pmasys_mem_bump_r(),
done * sizeof(struct gk20a_cs_snapshot_fifo_entry));
}
}
/* disable streaming to memory */
static void gv11b_css_hw_reset_streaming(struct gk20a *g)
{
u32 engine_status;
/* reset the perfmon */
g->ops.mc.reset(g, mc_enable_perfmon_enabled_f());
/* RBUFEMPTY must be set -- otherwise we'll pick up */
/* snapshot that have been queued up from earlier */
engine_status = gk20a_readl(g, perf_pmasys_enginestatus_r());
/* turn off writes */
gk20a_writel(g, perf_pmasys_control_r(),
perf_pmasys_control_membuf_clear_status_doit_f());
/* pointing all pending snapshots as handled */
gv11b_css_hw_set_handled_snapshots(g, css_hw_get_pending_snapshots(g));
}
int gv11b_css_hw_enable_snapshot(struct channel_gk20a *ch,
struct gk20a_cs_snapshot_client *cs_client)
{
struct gk20a *g = ch->g;
struct gr_gk20a *gr = &g->gr;
struct gk20a_cs_snapshot *data = gr->cs_data;
u32 snapshot_size = cs_client->snapshot_size;
int ret;
u32 virt_addr_lo;
u32 virt_addr_hi;
u32 inst_pa_page;
if (data->hw_snapshot)
return 0;
if (snapshot_size < CSS_MIN_HW_SNAPSHOT_SIZE)
snapshot_size = CSS_MIN_HW_SNAPSHOT_SIZE;
ret = nvgpu_dma_alloc_map_sys(g->mm.pmu.vm, snapshot_size,
&data->hw_memdesc);
if (ret)
return ret;
/* perf output buffer may not cross a 4GB boundary - with a separate */
/* va smaller than that, it won't but check anyway */
if (!data->hw_memdesc.cpu_va ||
data->hw_memdesc.size < snapshot_size ||
data->hw_memdesc.gpu_va + u64_lo32(snapshot_size) > SZ_4G) {
ret = -EFAULT;
goto failed_allocation;
}
data->hw_snapshot =
(struct gk20a_cs_snapshot_fifo_entry *)data->hw_memdesc.cpu_va;
data->hw_end = data->hw_snapshot +
snapshot_size / sizeof(struct gk20a_cs_snapshot_fifo_entry);
data->hw_get = data->hw_snapshot;
memset(data->hw_snapshot, 0xff, snapshot_size);
virt_addr_lo = u64_lo32(data->hw_memdesc.gpu_va);
virt_addr_hi = u64_hi32(data->hw_memdesc.gpu_va);
gv11b_css_hw_reset_streaming(g);
gk20a_writel(g, perf_pmasys_outbase_r(), virt_addr_lo);
gk20a_writel(g, perf_pmasys_outbaseupper_r(),
perf_pmasys_outbaseupper_ptr_f(virt_addr_hi));
gk20a_writel(g, perf_pmasys_outsize_r(), snapshot_size);
/* this field is aligned to 4K */
inst_pa_page = nvgpu_inst_block_addr(g, &g->mm.hwpm.inst_block) >> 12;
gk20a_writel(g, perf_pmasys_mem_block_r(),
perf_pmasys_mem_block_base_f(inst_pa_page) |
perf_pmasys_mem_block_valid_true_f() |
nvgpu_aperture_mask(g, &g->mm.hwpm.inst_block,
perf_pmasys_mem_block_target_sys_ncoh_f(),
perf_pmasys_mem_block_target_lfb_f()));
gk20a_dbg_info("cyclestats: buffer for hardware snapshots enabled\n");
return 0;
failed_allocation:
if (data->hw_memdesc.size) {
nvgpu_dma_unmap_free(g->mm.pmu.vm, &data->hw_memdesc);
memset(&data->hw_memdesc, 0, sizeof(data->hw_memdesc));
}
data->hw_snapshot = NULL;
return ret;
}
void gv11b_css_hw_disable_snapshot(struct gr_gk20a *gr)
{
struct gk20a *g = gr->g;
struct gk20a_cs_snapshot *data = gr->cs_data;
if (!data->hw_snapshot)
return;
gv11b_css_hw_reset_streaming(g);
gk20a_writel(g, perf_pmasys_outbase_r(), 0);
gk20a_writel(g, perf_pmasys_outbaseupper_r(),
perf_pmasys_outbaseupper_ptr_f(0));
gk20a_writel(g, perf_pmasys_outsize_r(), 0);
gk20a_writel(g, perf_pmasys_mem_block_r(),
perf_pmasys_mem_block_base_f(0) |
perf_pmasys_mem_block_valid_false_f() |
perf_pmasys_mem_block_target_f(0));
nvgpu_dma_unmap_free(g->mm.pmu.vm, &data->hw_memdesc);
memset(&data->hw_memdesc, 0, sizeof(data->hw_memdesc));
data->hw_snapshot = NULL;
gk20a_dbg_info("cyclestats: buffer for hardware snapshots disabled\n");
}
int gv11b_css_hw_check_data_available(struct channel_gk20a *ch, u32 *pending,
bool *hw_overflow)
{
struct gk20a *g = ch->g;
struct gr_gk20a *gr = &g->gr;
struct gk20a_cs_snapshot *css = gr->cs_data;
if (!css->hw_snapshot)
return -EINVAL;
*pending = css_hw_get_pending_snapshots(g);
if (!*pending)
return 0;
*hw_overflow = css_hw_get_overflow_status(g);
return 0;
}

View File

@@ -0,0 +1,34 @@
/*
* GV11B Cycle stats snapshots support
*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef CSS_GR_GV11B_H
#define CSS_GR_GV11B_H
int gv11b_css_hw_enable_snapshot(struct channel_gk20a *ch,
struct gk20a_cs_snapshot_client *cs_client);
void gv11b_css_hw_disable_snapshot(struct gr_gk20a *gr);
int gv11b_css_hw_check_data_available(struct channel_gk20a *ch, u32 *pending,
bool *hw_overflow);
#endif /* CSS_GR_GV11B_H */

View File

@@ -0,0 +1,99 @@
/*
* Tegra GV11B GPU Debugger/Profiler Driver
*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <uapi/linux/nvgpu.h>
#include <nvgpu/log.h>
#include "gk20a/gk20a.h"
#include <nvgpu/hw/gv11b/hw_perf_gv11b.h>
int gv11b_perfbuf_enable_locked(struct gk20a *g, u64 offset, u32 size)
{
struct mm_gk20a *mm = &g->mm;
u32 virt_addr_lo;
u32 virt_addr_hi;
u32 inst_pa_page;
int err;
gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
err = gk20a_busy(g);
if (err) {
nvgpu_err(g, "failed to poweron");
return err;
}
err = gk20a_alloc_inst_block(g, &mm->perfbuf.inst_block);
if (err)
return err;
g->ops.mm.init_inst_block(&mm->perfbuf.inst_block, mm->perfbuf.vm, 0);
virt_addr_lo = u64_lo32(offset);
virt_addr_hi = u64_hi32(offset);
gk20a_writel(g, perf_pmasys_outbase_r(), virt_addr_lo);
gk20a_writel(g, perf_pmasys_outbaseupper_r(),
perf_pmasys_outbaseupper_ptr_f(virt_addr_hi));
gk20a_writel(g, perf_pmasys_outsize_r(), size);
/* this field is aligned to 4K */
inst_pa_page = nvgpu_inst_block_addr(g, &mm->perfbuf.inst_block) >> 12;
gk20a_writel(g, perf_pmasys_mem_block_r(),
perf_pmasys_mem_block_base_f(inst_pa_page) |
perf_pmasys_mem_block_valid_true_f() |
nvgpu_aperture_mask(g, &mm->perfbuf.inst_block,
+ perf_pmasys_mem_block_target_sys_ncoh_f(),
+ perf_pmasys_mem_block_target_lfb_f()));
gk20a_idle(g);
return 0;
}
/* must be called with dbg_sessions_lock held */
int gv11b_perfbuf_disable_locked(struct gk20a *g)
{
int err;
gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
err = gk20a_busy(g);
if (err) {
nvgpu_err(g, "failed to poweron");
return err;
}
gk20a_writel(g, perf_pmasys_outbase_r(), 0);
gk20a_writel(g, perf_pmasys_outbaseupper_r(),
perf_pmasys_outbaseupper_ptr_f(0));
gk20a_writel(g, perf_pmasys_outsize_r(), 0);
gk20a_writel(g, perf_pmasys_mem_block_r(),
perf_pmasys_mem_block_base_f(0) |
perf_pmasys_mem_block_valid_false_f() |
perf_pmasys_mem_block_target_f(0));
gk20a_idle(g);
return 0;
}

View File

@@ -0,0 +1,28 @@
/*
* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef DBG_GPU_GV11B_H
#define DBG_GPU_GV11B_H
int gv11b_perfbuf_enable_locked(struct gk20a *g, u64 offset, u32 size);
int gv11b_perfbuf_disable_locked(struct gk20a *g);
#endif /* DBG_GPU_GV11B_H */

View File

@@ -0,0 +1,64 @@
/*
* GV11B GPU ECC
*
* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NVGPU_ECC_GV11B_H_
#define _NVGPU_ECC_GV11B_H_
struct ecc_gr_t19x {
struct gk20a_ecc_stat sm_l1_tag_corrected_err_count;
struct gk20a_ecc_stat sm_l1_tag_uncorrected_err_count;
struct gk20a_ecc_stat sm_cbu_corrected_err_count;
struct gk20a_ecc_stat sm_cbu_uncorrected_err_count;
struct gk20a_ecc_stat sm_l1_data_corrected_err_count;
struct gk20a_ecc_stat sm_l1_data_uncorrected_err_count;
struct gk20a_ecc_stat sm_icache_corrected_err_count;
struct gk20a_ecc_stat sm_icache_uncorrected_err_count;
struct gk20a_ecc_stat gcc_l15_corrected_err_count;
struct gk20a_ecc_stat gcc_l15_uncorrected_err_count;
struct gk20a_ecc_stat fecs_corrected_err_count;
struct gk20a_ecc_stat fecs_uncorrected_err_count;
struct gk20a_ecc_stat gpccs_corrected_err_count;
struct gk20a_ecc_stat gpccs_uncorrected_err_count;
struct gk20a_ecc_stat mmu_l1tlb_corrected_err_count;
struct gk20a_ecc_stat mmu_l1tlb_uncorrected_err_count;
};
struct ecc_ltc_t19x {
struct gk20a_ecc_stat l2_cache_corrected_err_count;
struct gk20a_ecc_stat l2_cache_uncorrected_err_count;
};
/* TODO: PMU and FB ECC features are still under embargo */
struct ecc_eng_t19x {
/* FB */
struct gk20a_ecc_stat mmu_l2tlb_corrected_err_count;
struct gk20a_ecc_stat mmu_l2tlb_uncorrected_err_count;
struct gk20a_ecc_stat mmu_hubtlb_corrected_err_count;
struct gk20a_ecc_stat mmu_hubtlb_uncorrected_err_count;
struct gk20a_ecc_stat mmu_fillunit_corrected_err_count;
struct gk20a_ecc_stat mmu_fillunit_uncorrected_err_count;
/* PMU */
};
#endif

View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,72 @@
/*
* GV11B FB
*
* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NVGPU_GV11B_FB
#define _NVGPU_GV11B_FB
#define STALL_REG_INDEX 0
#define NONSTALL_REG_INDEX 1
#define NONREPLAY_REG_INDEX 0
#define REPLAY_REG_INDEX 1
#define FAULT_BUF_DISABLED 0
#define FAULT_BUF_ENABLED 1
#define FAULT_BUF_INVALID 0
#define FAULT_BUF_VALID 1
#define HUB_INTR_TYPE_OTHER 1 /* bit 0 */
#define HUB_INTR_TYPE_NONREPLAY 2 /* bit 1 */
#define HUB_INTR_TYPE_REPLAY 4 /* bit 2 */
#define HUB_INTR_TYPE_ECC_UNCORRECTED 8 /* bit 3 */
#define HUB_INTR_TYPE_ACCESS_COUNTER 16 /* bit 4 */
#define HUB_INTR_TYPE_ALL (HUB_INTR_TYPE_OTHER | \
HUB_INTR_TYPE_NONREPLAY | \
HUB_INTR_TYPE_REPLAY | \
HUB_INTR_TYPE_ECC_UNCORRECTED | \
HUB_INTR_TYPE_ACCESS_COUNTER)
#define FAULT_TYPE_OTHER_AND_NONREPLAY 0
#define FAULT_TYPE_REPLAY 1
struct gk20a;
void gv11b_fb_init_fs_state(struct gk20a *g);
void gv11b_fb_init_cbc(struct gk20a *g, struct gr_gk20a *gr);
void gv11b_fb_reset(struct gk20a *g);
void gv11b_fb_hub_isr(struct gk20a *g);
u32 gv11b_fb_is_fault_buf_enabled(struct gk20a *g,
unsigned int index);
void gv11b_fb_fault_buf_set_state_hw(struct gk20a *g,
unsigned int index, unsigned int state);
void gv11b_fb_fault_buf_configure_hw(struct gk20a *g, unsigned int index);
void gv11b_fb_enable_hub_intr(struct gk20a *g,
unsigned int index, unsigned int intr_type);
void gv11b_fb_disable_hub_intr(struct gk20a *g,
unsigned int index, unsigned int intr_type);
bool gv11b_fb_mmu_fault_pending(struct gk20a *g);
#endif

View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,117 @@
/*
* GV11B Fifo
*
* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef FIFO_GV11B_H
#define FIFO_GV11B_H
#define FIFO_INVAL_PBDMA_ID ((u32)~0)
#define FIFO_INVAL_VEID ((u32)~0)
/* engine context-switch request occurred while the engine was in reset */
#define SCHED_ERROR_CODE_ENGINE_RESET 0x00000005
/*
* ERROR_CODE_BAD_TSG indicates that Host encountered a badly formed TSG header
* or a badly formed channel type runlist entry in the runlist. This is typically
* caused by encountering a new TSG entry in the middle of a TSG definition.
* A channel type entry having wrong runqueue selector can also cause this.
* Additionally this error code can indicate when a channel is encountered on
* the runlist which is outside of a TSG.
*/
#define SCHED_ERROR_CODE_BAD_TSG 0x00000020
/* can be removed after runque support is added */
#define GR_RUNQUE 0 /* pbdma 0 */
#define ASYNC_CE_RUNQUE 2 /* pbdma 2 */
#define CHANNEL_INFO_VEID0 0
struct gpu_ops;
void gv11b_fifo_reset_pbdma_and_eng_faulted(struct gk20a *g,
struct channel_gk20a *refch,
u32 faulted_pbdma, u32 faulted_engine);
void gv11b_mmu_fault_id_to_eng_pbdma_id_and_veid(struct gk20a *g,
u32 mmu_fault_id, u32 *active_engine_id, u32 *veid, u32 *pbdma_id);
void gv11b_get_tsg_runlist_entry(struct tsg_gk20a *tsg, u32 *runlist);
void gv11b_get_ch_runlist_entry(struct channel_gk20a *c, u32 *runlist);
int channel_gv11b_setup_ramfc(struct channel_gk20a *c,
u64 gpfifo_base, u32 gpfifo_entries,
unsigned long acquire_timeout, u32 flags);
u32 gv11b_userd_gp_get(struct gk20a *g, struct channel_gk20a *c);
u64 gv11b_userd_pb_get(struct gk20a *g, struct channel_gk20a *c);
void gv11b_userd_gp_put(struct gk20a *g, struct channel_gk20a *c);
void channel_gv11b_unbind(struct channel_gk20a *ch);
u32 gv11b_fifo_get_num_fifos(struct gk20a *g);
bool gv11b_is_fault_engine_subid_gpc(struct gk20a *g, u32 engine_subid);
void gv11b_dump_channel_status_ramfc(struct gk20a *g,
struct gk20a_debug_output *o,
u32 chid,
struct ch_state *ch_state);
void gv11b_dump_eng_status(struct gk20a *g,
struct gk20a_debug_output *o);
u32 gv11b_fifo_intr_0_error_mask(struct gk20a *g);
int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 id,
unsigned int id_type, unsigned int timeout_rc_type);
int gv11b_fifo_preempt_channel(struct gk20a *g, u32 chid);
int gv11b_fifo_preempt_tsg(struct gk20a *g, u32 tsgid);
int gv11b_fifo_enable_tsg(struct tsg_gk20a *tsg);
int gv11b_fifo_preempt_ch_tsg(struct gk20a *g, u32 id,
unsigned int id_type, unsigned int timeout_rc_type);
void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask,
u32 id, unsigned int id_type, unsigned int rc_type,
struct mmu_fault_info *mmfault);
void gv11b_fifo_init_pbdma_intr_descs(struct fifo_gk20a *f);
int gv11b_init_fifo_reset_enable_hw(struct gk20a *g);
bool gv11b_fifo_handle_sched_error(struct gk20a *g);
bool gv11b_fifo_handle_ctxsw_timeout(struct gk20a *g, u32 fifo_intr);
unsigned int gv11b_fifo_handle_pbdma_intr_0(struct gk20a *g,
u32 pbdma_id, u32 pbdma_intr_0,
u32 *handled, u32 *error_notifier);
unsigned int gv11b_fifo_handle_pbdma_intr_1(struct gk20a *g,
u32 pbdma_id, u32 pbdma_intr_1,
u32 *handled, u32 *error_notifier);
void gv11b_fifo_init_eng_method_buffers(struct gk20a *g,
struct tsg_gk20a *tsg);
void gv11b_fifo_deinit_eng_method_buffers(struct gk20a *g,
struct tsg_gk20a *tsg);
int gv11b_fifo_alloc_syncpt_buf(struct channel_gk20a *c,
u32 syncpt_id, struct nvgpu_mem *syncpt_buf);
void gv11b_fifo_free_syncpt_buf(struct channel_gk20a *c,
struct nvgpu_mem *syncpt_buf);
void gv11b_fifo_add_syncpt_wait_cmd(struct gk20a *g,
struct priv_cmd_entry *cmd, u32 off,
u32 id, u32 thresh, u64 gpu_va_base);
u32 gv11b_fifo_get_syncpt_wait_cmd_size(void);
void gv11b_fifo_add_syncpt_incr_cmd(struct gk20a *g,
bool wfi_cmd, struct priv_cmd_entry *cmd,
u32 id, u64 gpu_va_base);
u32 gv11b_fifo_get_syncpt_incr_cmd_size(bool wfi_cmd);
int gv11b_init_fifo_setup_hw(struct gk20a *g);
void gv11b_fifo_tsg_verify_status_faulted(struct channel_gk20a *ch);
u32 gv11b_fifo_get_preempt_timeout(struct gk20a *g);
#endif

View File

@@ -0,0 +1,72 @@
/*
*
* GV11B Graphics Context
*
* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "gk20a/gk20a.h"
#include "gr_ctx_gv11b.h"
int gr_gv11b_get_netlist_name(struct gk20a *g, int index, char *name)
{
switch (index) {
#ifdef GV11B_NETLIST_IMAGE_FW_NAME
case NETLIST_FINAL:
sprintf(name, GV11B_NETLIST_IMAGE_FW_NAME);
return 0;
#endif
#ifdef GK20A_NETLIST_IMAGE_A
case NETLIST_SLOT_A:
sprintf(name, GK20A_NETLIST_IMAGE_A);
return 0;
#endif
#ifdef GK20A_NETLIST_IMAGE_B
case NETLIST_SLOT_B:
sprintf(name, GK20A_NETLIST_IMAGE_B);
return 0;
#endif
#ifdef GK20A_NETLIST_IMAGE_C
case NETLIST_SLOT_C:
sprintf(name, GK20A_NETLIST_IMAGE_C);
return 0;
#endif
#ifdef GK20A_NETLIST_IMAGE_D
case NETLIST_SLOT_D:
sprintf(name, GK20A_NETLIST_IMAGE_D);
return 0;
#endif
default:
return -1;
}
return -1;
}
bool gr_gv11b_is_firmware_defined(void)
{
#ifdef GV11B_NETLIST_IMAGE_FW_NAME
return true;
#else
return false;
#endif
}

View File

@@ -0,0 +1,36 @@
/*
* GV11B Graphics Context
*
* Copyright (c) 2016 - 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __GR_CTX_GV11B_H__
#define __GR_CTX_GV11B_H__
#include "gk20a/gr_ctx_gk20a.h"
/* Define netlist for silicon only */
#define GV11B_NETLIST_IMAGE_FW_NAME GK20A_NETLIST_IMAGE_D
int gr_gv11b_get_netlist_name(struct gk20a *g, int index, char *name);
bool gr_gv11b_is_firmware_defined(void);
#endif /*__GR_CTX_GV11B_H__*/

View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,215 @@
/*
* GV11B GPU GR
*
* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NVGPU_GR_GV11B_H_
#define _NVGPU_GR_GV11B_H_
#define EGPC_PRI_BASE 0x580000
#define EGPC_PRI_SHARED_BASE 0x480000
#define PRI_BROADCAST_FLAGS_SMPC BIT(17)
#define GV11B_ZBC_TYPE_STENCIL T19X_ZBC
#define ZBC_STENCIL_CLEAR_FMT_INVAILD 0
#define ZBC_STENCIL_CLEAR_FMT_U8 1
struct zbc_s_table {
u32 stencil;
u32 format;
u32 ref_cnt;
};
struct gk20a;
struct zbc_entry;
struct zbc_query_params;
struct channel_ctx_gk20a;
struct nvgpu_warpstate;
struct nvgpu_gr_sm_error_state;
enum {
VOLTA_CHANNEL_GPFIFO_A = 0xC36F,
VOLTA_A = 0xC397,
VOLTA_COMPUTE_A = 0xC3C0,
VOLTA_DMA_COPY_A = 0xC3B5,
};
#define NVC397_SET_SHADER_EXCEPTIONS 0x1528
#define NVC397_SET_CIRCULAR_BUFFER_SIZE 0x1280
#define NVC397_SET_ALPHA_CIRCULAR_BUFFER_SIZE 0x02dc
#define NVC397_SET_GO_IDLE_TIMEOUT 0x022c
#define NVC397_SET_TEX_IN_DBG 0x10bc
#define NVC397_SET_SKEDCHECK 0x10c0
#define NVC397_SET_BES_CROP_DEBUG3 0x10c4
#define NVC397_SET_TEX_IN_DBG_TSL1_RVCH_INVALIDATE 0x1
#define NVC397_SET_TEX_IN_DBG_SM_L1TAG_CTRL_CACHE_SURFACE_LD 0x2
#define NVC397_SET_TEX_IN_DBG_SM_L1TAG_CTRL_CACHE_SURFACE_ST 0x4
#define NVC397_SET_SKEDCHECK_18_MASK 0x3
#define NVC397_SET_SKEDCHECK_18_DEFAULT 0x0
#define NVC397_SET_SKEDCHECK_18_DISABLE 0x1
#define NVC397_SET_SKEDCHECK_18_ENABLE 0x2
#define NVC3C0_SET_SKEDCHECK 0x23c
#define NVA297_SET_SHADER_EXCEPTIONS_ENABLE_FALSE 0
int gr_gv11b_alloc_buffer(struct vm_gk20a *vm, size_t size,
struct nvgpu_mem *mem);
/*zcull*/
void gr_gv11b_program_zcull_mapping(struct gk20a *g, u32 zcull_num_entries,
u32 *zcull_map_tiles);
void gr_gv11b_create_sysfs(struct gk20a *g);
bool gr_gv11b_is_valid_class(struct gk20a *g, u32 class_num);
bool gr_gv11b_is_valid_gfx_class(struct gk20a *g, u32 class_num);
bool gr_gv11b_is_valid_compute_class(struct gk20a *g, u32 class_num);
void gr_gv11b_enable_hww_exceptions(struct gk20a *g);
void gr_gv11b_enable_exceptions(struct gk20a *g);
int gr_gv11b_handle_tpc_sm_ecc_exception(struct gk20a *g,
u32 gpc, u32 tpc,
bool *post_event, struct channel_gk20a *fault_ch,
u32 *hww_global_esr);
int gr_gv11b_handle_gcc_exception(struct gk20a *g, u32 gpc, u32 tpc,
bool *post_event, struct channel_gk20a *fault_ch,
u32 *hww_global_esr);
int gr_gv11b_handle_gpc_gpcmmu_exception(struct gk20a *g, u32 gpc,
u32 gpc_exception);
int gr_gv11b_handle_gpc_gpccs_exception(struct gk20a *g, u32 gpc,
u32 gpc_exception);
void gr_gv11b_enable_gpc_exceptions(struct gk20a *g);
int gr_gv11b_handle_tex_exception(struct gk20a *g, u32 gpc, u32 tpc,
bool *post_event);
int gr_gv11b_zbc_s_query_table(struct gk20a *g, struct gr_gk20a *gr,
struct zbc_query_params *query_params);
bool gr_gv11b_add_zbc_type_s(struct gk20a *g, struct gr_gk20a *gr,
struct zbc_entry *zbc_val, int *ret_val);
int gr_gv11b_add_zbc_stencil(struct gk20a *g, struct gr_gk20a *gr,
struct zbc_entry *stencil_val, u32 index);
int gr_gv11b_load_stencil_default_tbl(struct gk20a *g,
struct gr_gk20a *gr);
int gr_gv11b_load_stencil_tbl(struct gk20a *g, struct gr_gk20a *gr);
u32 gr_gv11b_pagepool_default_size(struct gk20a *g);
int gr_gv11b_calc_global_ctx_buffer_size(struct gk20a *g);
int gr_gv11b_handle_sw_method(struct gk20a *g, u32 addr,
u32 class_num, u32 offset, u32 data);
void gr_gv11b_bundle_cb_defaults(struct gk20a *g);
void gr_gv11b_cb_size_default(struct gk20a *g);
void gr_gv11b_set_alpha_circular_buffer_size(struct gk20a *g, u32 data);
void gr_gv11b_set_circular_buffer_size(struct gk20a *g, u32 data);
int gr_gv11b_dump_gr_status_regs(struct gk20a *g,
struct gk20a_debug_output *o);
int gr_gv11b_wait_empty(struct gk20a *g, unsigned long duration_ms,
u32 expect_delay);
void gr_gv11b_commit_global_attrib_cb(struct gk20a *g,
struct channel_ctx_gk20a *ch_ctx,
u64 addr, bool patch);
void gr_gv11b_set_gpc_tpc_mask(struct gk20a *g, u32 gpc_index);
void gr_gv11b_get_access_map(struct gk20a *g,
u32 **whitelist, int *num_entries);
int gr_gv11b_pre_process_sm_exception(struct gk20a *g,
u32 gpc, u32 tpc, u32 sm, u32 global_esr, u32 warp_esr,
bool sm_debugger_attached, struct channel_gk20a *fault_ch,
bool *early_exit, bool *ignore_debugger);
int gr_gv11b_handle_fecs_error(struct gk20a *g,
struct channel_gk20a *__ch,
struct gr_gk20a_isr_data *isr_data);
int gr_gv11b_setup_rop_mapping(struct gk20a *g, struct gr_gk20a *gr);
int gr_gv11b_init_sw_veid_bundle(struct gk20a *g);
void gr_gv11b_detect_sm_arch(struct gk20a *g);
void gr_gv11b_program_sm_id_numbering(struct gk20a *g,
u32 gpc, u32 tpc, u32 smid);
int gr_gv11b_load_smid_config(struct gk20a *g);
int gr_gv11b_commit_inst(struct channel_gk20a *c, u64 gpu_va);
int gr_gv11b_commit_global_timeslice(struct gk20a *g, struct channel_gk20a *c);
void gr_gv11b_write_zcull_ptr(struct gk20a *g,
struct nvgpu_mem *mem, u64 gpu_va);
void gr_gv11b_write_pm_ptr(struct gk20a *g,
struct nvgpu_mem *mem, u64 gpu_va);
void gr_gv11b_init_elcg_mode(struct gk20a *g, u32 mode, u32 engine);
void gr_gv11b_load_tpc_mask(struct gk20a *g);
void gr_gv11b_set_preemption_buffer_va(struct gk20a *g,
struct nvgpu_mem *mem, u64 gpu_va);
int gr_gv11b_init_fs_state(struct gk20a *g);
void gv11b_gr_get_esr_sm_sel(struct gk20a *g, u32 gpc, u32 tpc,
u32 *esr_sm_sel);
int gv11b_gr_sm_trigger_suspend(struct gk20a *g);
void gv11b_gr_bpt_reg_info(struct gk20a *g, struct nvgpu_warpstate *w_state);
int gv11b_gr_update_sm_error_state(struct gk20a *g,
struct channel_gk20a *ch, u32 sm_id,
struct nvgpu_gr_sm_error_state *sm_error_state);
int gv11b_gr_set_sm_debug_mode(struct gk20a *g,
struct channel_gk20a *ch, u64 sms, bool enable);
int gv11b_gr_record_sm_error_state(struct gk20a *g, u32 gpc, u32 tpc);
void gv11b_gr_set_hww_esr_report_mask(struct gk20a *g);
bool gv11b_gr_sm_debugger_attached(struct gk20a *g);
void gv11b_gr_suspend_single_sm(struct gk20a *g,
u32 gpc, u32 tpc, u32 sm,
u32 global_esr_mask, bool check_errors);
void gv11b_gr_suspend_all_sms(struct gk20a *g,
u32 global_esr_mask, bool check_errors);
void gv11b_gr_resume_single_sm(struct gk20a *g,
u32 gpc, u32 tpc, u32 sm);
void gv11b_gr_resume_all_sms(struct gk20a *g);
int gv11b_gr_resume_from_pause(struct gk20a *g);
u32 gv11b_gr_get_sm_hww_warp_esr(struct gk20a *g,
u32 gpc, u32 tpc, u32 sm);
u32 gv11b_gr_get_sm_hww_global_esr(struct gk20a *g,
u32 gpc, u32 tpc, u32 sm);
u32 gv11b_gr_get_sm_no_lock_down_hww_global_esr_mask(struct gk20a *g);
int gv11b_gr_wait_for_sm_lock_down(struct gk20a *g,
u32 gpc, u32 tpc, u32 sm,
u32 global_esr_mask, bool check_errors);
int gv11b_gr_lock_down_sm(struct gk20a *g,
u32 gpc, u32 tpc, u32 sm, u32 global_esr_mask,
bool check_errors);
void gv11b_gr_clear_sm_hww(struct gk20a *g, u32 gpc, u32 tpc, u32 sm,
u32 global_esr);
int gr_gv11b_handle_tpc_mpc_exception(struct gk20a *g,
u32 gpc, u32 tpc, bool *post_event);
void gv11b_gr_init_ovr_sm_dsm_perf(void);
void gv11b_gr_init_sm_dsm_reg_info(void);
void gv11b_gr_get_sm_dsm_perf_regs(struct gk20a *g,
u32 *num_sm_dsm_perf_regs,
u32 **sm_dsm_perf_regs,
u32 *perf_register_stride);
void gv11b_gr_get_sm_dsm_perf_ctrl_regs(struct gk20a *g,
u32 *num_sm_dsm_perf_ctrl_regs,
u32 **sm_dsm_perf_ctrl_regs,
u32 *ctrl_register_stride);
void gv11b_gr_get_ovr_perf_regs(struct gk20a *g, u32 *num_ovr_perf_regs,
u32 **ovr_perf_regs);
void gv11b_gr_access_smpc_reg(struct gk20a *g, u32 quad, u32 offset);
bool gv11b_gr_pri_is_egpc_addr(struct gk20a *g, u32 addr);
bool gv11b_gr_pri_is_etpc_addr(struct gk20a *g, u32 addr);
void gv11b_gr_get_egpc_etpc_num(struct gk20a *g, u32 addr,
u32 *egpc_num, u32 *etpc_num);
int gv11b_gr_decode_egpc_addr(struct gk20a *g, u32 addr, int *addr_type,
u32 *gpc_num, u32 *tpc_num, u32 *broadcast_flags);
void gv11b_gr_egpc_etpc_priv_addr_table(struct gk20a *g, u32 addr,
u32 gpc, u32 broadcast_flags, u32 *priv_addr_table, u32 *t);
u32 gv11b_gr_get_egpc_base(struct gk20a *g);
void gr_gv11b_init_gpc_mmu(struct gk20a *g);
#endif

View File

@@ -0,0 +1,38 @@
/*
* GV11B Graphics
*
* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvgpu/enabled.h>
#include <nvgpu/enabled_t19x.h>
#include "gk20a/gk20a.h"
#include "gv11b/gv11b.h"
int gv11b_init_gpu_characteristics(struct gk20a *g)
{
gk20a_init_gpu_characteristics(g);
__nvgpu_set_enabled(g, NVGPU_SUPPORT_TSG_SUBCONTEXTS, true);
__nvgpu_set_enabled(g, NVGPU_SUPPORT_IO_COHERENCE, true);
return 0;
}

View File

@@ -0,0 +1,32 @@
/*
* GV11B Graphics
*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef GV11B_H
#define GV11B_H
#include "gk20a/gk20a.h"
int gv11b_init_gpu_characteristics(struct gk20a *g);
#endif /* GV11B_H */

View File

@@ -0,0 +1,748 @@
/*
* Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* This file is autogenerated. Do not edit.
*/
#ifndef __gv11b_gating_reglist_h__
#define __gv11b_gating_reglist_h__
#include <linux/types.h>
#include "gv11b_gating_reglist.h"
#include <nvgpu/enabled.h>
struct gating_desc {
u32 addr;
u32 prod;
u32 disable;
};
/* slcg bus */
static const struct gating_desc gv11b_slcg_bus[] = {
{.addr = 0x00001c04, .prod = 0x00000000, .disable = 0x000003fe},
};
/* slcg ce2 */
static const struct gating_desc gv11b_slcg_ce2[] = {
{.addr = 0x00104204, .prod = 0x00000040, .disable = 0x000007fe},
};
/* slcg chiplet */
static const struct gating_desc gv11b_slcg_chiplet[] = {
{.addr = 0x0010c07c, .prod = 0x00000000, .disable = 0x00000007},
{.addr = 0x0010e07c, .prod = 0x00000000, .disable = 0x00000007},
{.addr = 0x0010d07c, .prod = 0x00000000, .disable = 0x00000007},
{.addr = 0x0010e17c, .prod = 0x00000000, .disable = 0x00000007},
};
/* slcg fb */
static const struct gating_desc gv11b_slcg_fb[] = {
{.addr = 0x00100d14, .prod = 0x00000000, .disable = 0xfffffffe},
{.addr = 0x00100c9c, .prod = 0x00000000, .disable = 0x000001fe},
};
/* slcg fifo */
static const struct gating_desc gv11b_slcg_fifo[] = {
{.addr = 0x000026ec, .prod = 0x00000000, .disable = 0x0001fffe},
};
/* slcg gr */
static const struct gating_desc gv11b_slcg_gr[] = {
{.addr = 0x004041f4, .prod = 0x00000000, .disable = 0x07fffffe},
{.addr = 0x00409134, .prod = 0x00020008, .disable = 0x0003fffe},
{.addr = 0x00409894, .prod = 0x00000000, .disable = 0x0000fffe},
{.addr = 0x004078c4, .prod = 0x00000000, .disable = 0x000001fe},
{.addr = 0x00406004, .prod = 0x00000200, .disable = 0x0001fffe},
{.addr = 0x00405864, .prod = 0x00000000, .disable = 0x000001fe},
{.addr = 0x00405910, .prod = 0xfffffff0, .disable = 0xfffffffe},
{.addr = 0x00408044, .prod = 0x00000000, .disable = 0x000007fe},
{.addr = 0x00407004, .prod = 0x00000000, .disable = 0x000001fe},
{.addr = 0x00405bf4, .prod = 0x00000000, .disable = 0x00000002},
{.addr = 0x0041a134, .prod = 0x00020008, .disable = 0x0003fffe},
{.addr = 0x0041a894, .prod = 0x00000000, .disable = 0x0000fffe},
{.addr = 0x00418504, .prod = 0x00000000, .disable = 0x0007fffe},
{.addr = 0x0041860c, .prod = 0x00000000, .disable = 0x000001fe},
{.addr = 0x0041868c, .prod = 0x00000000, .disable = 0x0000001e},
{.addr = 0x0041871c, .prod = 0x00000000, .disable = 0x000003fe},
{.addr = 0x00418388, .prod = 0x00000000, .disable = 0x00000001},
{.addr = 0x0041882c, .prod = 0x00000000, .disable = 0x0001fffe},
{.addr = 0x00418bc0, .prod = 0x00000000, .disable = 0x000001fe},
{.addr = 0x00418974, .prod = 0x00000000, .disable = 0x0001fffe},
{.addr = 0x00418c74, .prod = 0xffffff80, .disable = 0xfffffffe},
{.addr = 0x00418cf4, .prod = 0xfffffff8, .disable = 0xfffffffe},
{.addr = 0x00418d74, .prod = 0xffffffe0, .disable = 0xfffffffe},
{.addr = 0x00418f10, .prod = 0xffffffe0, .disable = 0xfffffffe},
{.addr = 0x00418e10, .prod = 0xfffffffe, .disable = 0xfffffffe},
{.addr = 0x00419024, .prod = 0x000001fe, .disable = 0x000001fe},
{.addr = 0x0041889c, .prod = 0x00000000, .disable = 0x000001fe},
{.addr = 0x00419d24, .prod = 0x00000000, .disable = 0x000000ff},
{.addr = 0x0041986c, .prod = 0x00000104, .disable = 0x00fffffe},
{.addr = 0x00419c74, .prod = 0x0000001e, .disable = 0x0000001e},
{.addr = 0x00419c84, .prod = 0x0003fff8, .disable = 0x0003fffe},
{.addr = 0x00419c8c, .prod = 0xffffff84, .disable = 0xfffffffe},
{.addr = 0x00419c94, .prod = 0x00080040, .disable = 0x000ffffe},
{.addr = 0x00419ca4, .prod = 0x00003ffe, .disable = 0x00003ffe},
{.addr = 0x00419cac, .prod = 0x0001fffe, .disable = 0x0001fffe},
{.addr = 0x00419a44, .prod = 0x00000008, .disable = 0x0000000e},
{.addr = 0x00419a4c, .prod = 0x000001f8, .disable = 0x000001fe},
{.addr = 0x00419a54, .prod = 0x0000003c, .disable = 0x0000003e},
{.addr = 0x00419a5c, .prod = 0x0000000c, .disable = 0x0000000e},
{.addr = 0x00419a64, .prod = 0x000001ba, .disable = 0x000001fe},
{.addr = 0x00419a7c, .prod = 0x0000003c, .disable = 0x0000003e},
{.addr = 0x00419a84, .prod = 0x0000000c, .disable = 0x0000000e},
{.addr = 0x0041be2c, .prod = 0x04115fc0, .disable = 0xfffffffe},
{.addr = 0x0041bfec, .prod = 0xfffffff0, .disable = 0xfffffffe},
{.addr = 0x0041bed4, .prod = 0xfffffff8, .disable = 0xfffffffe},
{.addr = 0x00408814, .prod = 0x00000000, .disable = 0x0001fffe},
{.addr = 0x00408a84, .prod = 0x00000000, .disable = 0x0001fffe},
{.addr = 0x004089ac, .prod = 0x00000000, .disable = 0x0001fffe},
{.addr = 0x00408a24, .prod = 0x00000000, .disable = 0x000000ff},
};
/* slcg ltc */
static const struct gating_desc gv11b_slcg_ltc[] = {
{.addr = 0x0017e050, .prod = 0x00000000, .disable = 0xfffffffe},
{.addr = 0x0017e35c, .prod = 0x00000000, .disable = 0xfffffffe},
};
/* slcg perf */
static const struct gating_desc gv11b_slcg_perf[] = {
{.addr = 0x00248018, .prod = 0xffffffff, .disable = 0x00000000},
{.addr = 0x00248018, .prod = 0xffffffff, .disable = 0x00000000},
{.addr = 0x00246018, .prod = 0xffffffff, .disable = 0x00000000},
{.addr = 0x00246018, .prod = 0xffffffff, .disable = 0x00000000},
{.addr = 0x00246018, .prod = 0xffffffff, .disable = 0x00000000},
{.addr = 0x00244018, .prod = 0xffffffff, .disable = 0x00000000},
{.addr = 0x00244018, .prod = 0xffffffff, .disable = 0x00000000},
{.addr = 0x00244018, .prod = 0xffffffff, .disable = 0x00000000},
{.addr = 0x0024a124, .prod = 0x00000001, .disable = 0x00000000},
};
/* slcg PriRing */
static const struct gating_desc gv11b_slcg_priring[] = {
{.addr = 0x001200a8, .prod = 0x00000000, .disable = 0x00000001},
};
/* slcg pwr_csb */
static const struct gating_desc gv11b_slcg_pwr_csb[] = {
{.addr = 0x00000134, .prod = 0x00020008, .disable = 0x0003fffe},
{.addr = 0x00000e74, .prod = 0x00000000, .disable = 0x0000000f},
{.addr = 0x00000a74, .prod = 0x00004040, .disable = 0x00007ffe},
{.addr = 0x000206b8, .prod = 0x00000008, .disable = 0x0000000f},
};
/* slcg pmu */
static const struct gating_desc gv11b_slcg_pmu[] = {
{.addr = 0x0010a134, .prod = 0x00020008, .disable = 0x0003fffe},
{.addr = 0x0010aa74, .prod = 0x00004040, .disable = 0x00007ffe},
{.addr = 0x0010ae74, .prod = 0x00000000, .disable = 0x0000000f},
};
/* therm gr */
static const struct gating_desc gv11b_slcg_therm[] = {
{.addr = 0x000206b8, .prod = 0x00000008, .disable = 0x0000000f},
};
/* slcg Xbar */
static const struct gating_desc gv11b_slcg_xbar[] = {
{.addr = 0x0013c824, .prod = 0x00000000, .disable = 0x7ffffffe},
{.addr = 0x0013dc08, .prod = 0x00000000, .disable = 0xfffffffe},
{.addr = 0x0013c924, .prod = 0x00000000, .disable = 0x7ffffffe},
{.addr = 0x0013cbe4, .prod = 0x00000000, .disable = 0x1ffffffe},
{.addr = 0x0013cc04, .prod = 0x00000000, .disable = 0x1ffffffe},
};
/* blcg bus */
static const struct gating_desc gv11b_blcg_bus[] = {
{.addr = 0x00001c00, .prod = 0x00000042, .disable = 0x00000000},
};
/* blcg ce */
static const struct gating_desc gv11b_blcg_ce[] = {
{.addr = 0x00104200, .prod = 0x0000c242, .disable = 0x00000000},
};
/* blcg ctxsw prog */
static const struct gating_desc gv11b_blcg_ctxsw_prog[] = {
};
/* blcg fb */
static const struct gating_desc gv11b_blcg_fb[] = {
{.addr = 0x00100d10, .prod = 0x0000c242, .disable = 0x00000000},
{.addr = 0x00100d30, .prod = 0x0000c242, .disable = 0x00000000},
{.addr = 0x00100d3c, .prod = 0x00000242, .disable = 0x00000000},
{.addr = 0x00100d48, .prod = 0x0000c242, .disable = 0x00000000},
{.addr = 0x00100d1c, .prod = 0x00000042, .disable = 0x00000000},
{.addr = 0x00100c98, .prod = 0x00004242, .disable = 0x00000000},
};
/* blcg fifo */
static const struct gating_desc gv11b_blcg_fifo[] = {
{.addr = 0x000026e0, .prod = 0x0000c244, .disable = 0x00000000},
};
/* blcg gr */
static const struct gating_desc gv11b_blcg_gr[] = {
{.addr = 0x004041f0, .prod = 0x0000c646, .disable = 0x00000000},
{.addr = 0x00409890, .prod = 0x0000007f, .disable = 0x00000000},
{.addr = 0x004098b0, .prod = 0x0000007f, .disable = 0x00000000},
{.addr = 0x004078c0, .prod = 0x00004242, .disable = 0x00000000},
{.addr = 0x00406000, .prod = 0x0000c444, .disable = 0x00000000},
{.addr = 0x00405860, .prod = 0x0000c242, .disable = 0x00000000},
{.addr = 0x0040590c, .prod = 0x0000c444, .disable = 0x00000000},
{.addr = 0x00408040, .prod = 0x0000c444, .disable = 0x00000000},
{.addr = 0x00407000, .prod = 0x4000c242, .disable = 0x00000000},
{.addr = 0x00405bf0, .prod = 0x0000c444, .disable = 0x00000000},
{.addr = 0x0041a890, .prod = 0x0000427f, .disable = 0x00000000},
{.addr = 0x0041a8b0, .prod = 0x0000007f, .disable = 0x00000000},
{.addr = 0x00418500, .prod = 0x0000c244, .disable = 0x00000000},
{.addr = 0x00418608, .prod = 0x0000c242, .disable = 0x00000000},
{.addr = 0x00418688, .prod = 0x0000c242, .disable = 0x00000000},
{.addr = 0x00418718, .prod = 0x00000042, .disable = 0x00000000},
{.addr = 0x00418828, .prod = 0x00008444, .disable = 0x00000000},
{.addr = 0x00418bbc, .prod = 0x0000c242, .disable = 0x00000000},
{.addr = 0x00418970, .prod = 0x0000c242, .disable = 0x00000000},
{.addr = 0x00418c70, .prod = 0x0000c444, .disable = 0x00000000},
{.addr = 0x00418cf0, .prod = 0x0000c444, .disable = 0x00000000},
{.addr = 0x00418d70, .prod = 0x0000c444, .disable = 0x00000000},
{.addr = 0x00418f0c, .prod = 0x0000c444, .disable = 0x00000000},
{.addr = 0x00418e0c, .prod = 0x0000c444, .disable = 0x00000000},
{.addr = 0x00419020, .prod = 0x0000c242, .disable = 0x00000000},
{.addr = 0x00419038, .prod = 0x00000042, .disable = 0x00000000},
{.addr = 0x00418898, .prod = 0x00004242, .disable = 0x00000000},
{.addr = 0x00419868, .prod = 0x00008243, .disable = 0x00000000},
{.addr = 0x00419c70, .prod = 0x0000c444, .disable = 0x00000000},
{.addr = 0x00419c80, .prod = 0x00004045, .disable = 0x00000000},
{.addr = 0x00419c88, .prod = 0x00004043, .disable = 0x00000000},
{.addr = 0x00419c90, .prod = 0x0000004a, .disable = 0x00000000},
{.addr = 0x00419c98, .prod = 0x00000042, .disable = 0x00000000},
{.addr = 0x00419ca0, .prod = 0x00000043, .disable = 0x00000000},
{.addr = 0x00419ca8, .prod = 0x00000003, .disable = 0x00000000},
{.addr = 0x00419cb0, .prod = 0x00000002, .disable = 0x00000000},
{.addr = 0x00419a40, .prod = 0x00000242, .disable = 0x00000000},
{.addr = 0x00419a48, .prod = 0x00000242, .disable = 0x00000000},
{.addr = 0x00419a50, .prod = 0x00000242, .disable = 0x00000000},
{.addr = 0x00419a58, .prod = 0x00000242, .disable = 0x00000000},
{.addr = 0x00419a60, .prod = 0x00000202, .disable = 0x00000000},
{.addr = 0x00419a68, .prod = 0x00000202, .disable = 0x00000000},
{.addr = 0x00419a78, .prod = 0x00000242, .disable = 0x00000000},
{.addr = 0x00419a80, .prod = 0x00000242, .disable = 0x00000000},
{.addr = 0x0041be28, .prod = 0x00008242, .disable = 0x00000000},
{.addr = 0x0041bfe8, .prod = 0x0000c444, .disable = 0x00000000},
{.addr = 0x0041bed0, .prod = 0x0000c444, .disable = 0x00000000},
{.addr = 0x00408810, .prod = 0x0000c242, .disable = 0x00000000},
{.addr = 0x00408a80, .prod = 0x0000c242, .disable = 0x00000000},
{.addr = 0x004089a8, .prod = 0x0000c242, .disable = 0x00000000},
};
/* blcg ltc */
static const struct gating_desc gv11b_blcg_ltc[] = {
{.addr = 0x0017e030, .prod = 0x00000044, .disable = 0x00000000},
{.addr = 0x0017e040, .prod = 0x00000044, .disable = 0x00000000},
{.addr = 0x0017e3e0, .prod = 0x00000044, .disable = 0x00000000},
{.addr = 0x0017e3c8, .prod = 0x00000044, .disable = 0x00000000},
};
/* blcg pwr_csb */
static const struct gating_desc gv11b_blcg_pwr_csb[] = {
{.addr = 0x00000a70, .prod = 0x00000045, .disable = 0x00000000},
};
/* blcg pmu */
static const struct gating_desc gv11b_blcg_pmu[] = {
{.addr = 0x0010aa70, .prod = 0x00000045, .disable = 0x00000000},
};
/* blcg Xbar */
static const struct gating_desc gv11b_blcg_xbar[] = {
{.addr = 0x0013c820, .prod = 0x0001004a, .disable = 0x00000000},
{.addr = 0x0013dc04, .prod = 0x0001004a, .disable = 0x00000000},
{.addr = 0x0013c920, .prod = 0x0000004a, .disable = 0x00000000},
{.addr = 0x0013cbe0, .prod = 0x00000042, .disable = 0x00000000},
{.addr = 0x0013cc00, .prod = 0x00000042, .disable = 0x00000000},
};
/* pg gr */
static const struct gating_desc gv11b_pg_gr[] = {
};
/* inline functions */
void gv11b_slcg_bus_load_gating_prod(struct gk20a *g,
bool prod)
{
u32 i;
u32 size = sizeof(gv11b_slcg_bus) / sizeof(struct gating_desc);
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
return;
for (i = 0; i < size; i++) {
if (prod)
gk20a_writel(g, gv11b_slcg_bus[i].addr,
gv11b_slcg_bus[i].prod);
else
gk20a_writel(g, gv11b_slcg_bus[i].addr,
gv11b_slcg_bus[i].disable);
}
}
void gv11b_slcg_ce2_load_gating_prod(struct gk20a *g,
bool prod)
{
u32 i;
u32 size = sizeof(gv11b_slcg_ce2) / sizeof(struct gating_desc);
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
return;
for (i = 0; i < size; i++) {
if (prod)
gk20a_writel(g, gv11b_slcg_ce2[i].addr,
gv11b_slcg_ce2[i].prod);
else
gk20a_writel(g, gv11b_slcg_ce2[i].addr,
gv11b_slcg_ce2[i].disable);
}
}
void gv11b_slcg_chiplet_load_gating_prod(struct gk20a *g,
bool prod)
{
u32 i;
u32 size = sizeof(gv11b_slcg_chiplet) / sizeof(struct gating_desc);
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
return;
for (i = 0; i < size; i++) {
if (prod)
gk20a_writel(g, gv11b_slcg_chiplet[i].addr,
gv11b_slcg_chiplet[i].prod);
else
gk20a_writel(g, gv11b_slcg_chiplet[i].addr,
gv11b_slcg_chiplet[i].disable);
}
}
void gv11b_slcg_ctxsw_firmware_load_gating_prod(struct gk20a *g,
bool prod)
{
}
void gv11b_slcg_fb_load_gating_prod(struct gk20a *g,
bool prod)
{
u32 i;
u32 size = sizeof(gv11b_slcg_fb) / sizeof(struct gating_desc);
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
return;
for (i = 0; i < size; i++) {
if (prod)
gk20a_writel(g, gv11b_slcg_fb[i].addr,
gv11b_slcg_fb[i].prod);
else
gk20a_writel(g, gv11b_slcg_fb[i].addr,
gv11b_slcg_fb[i].disable);
}
}
void gv11b_slcg_fifo_load_gating_prod(struct gk20a *g,
bool prod)
{
u32 i;
u32 size = sizeof(gv11b_slcg_fifo) / sizeof(struct gating_desc);
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
return;
for (i = 0; i < size; i++) {
if (prod)
gk20a_writel(g, gv11b_slcg_fifo[i].addr,
gv11b_slcg_fifo[i].prod);
else
gk20a_writel(g, gv11b_slcg_fifo[i].addr,
gv11b_slcg_fifo[i].disable);
}
}
void gr_gv11b_slcg_gr_load_gating_prod(struct gk20a *g,
bool prod)
{
u32 i;
u32 size = sizeof(gv11b_slcg_gr) / sizeof(struct gating_desc);
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
return;
for (i = 0; i < size; i++) {
if (prod)
gk20a_writel(g, gv11b_slcg_gr[i].addr,
gv11b_slcg_gr[i].prod);
else
gk20a_writel(g, gv11b_slcg_gr[i].addr,
gv11b_slcg_gr[i].disable);
}
}
void ltc_gv11b_slcg_ltc_load_gating_prod(struct gk20a *g,
bool prod)
{
u32 i;
u32 size = sizeof(gv11b_slcg_ltc) / sizeof(struct gating_desc);
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
return;
for (i = 0; i < size; i++) {
if (prod)
gk20a_writel(g, gv11b_slcg_ltc[i].addr,
gv11b_slcg_ltc[i].prod);
else
gk20a_writel(g, gv11b_slcg_ltc[i].addr,
gv11b_slcg_ltc[i].disable);
}
}
void gv11b_slcg_perf_load_gating_prod(struct gk20a *g,
bool prod)
{
u32 i;
u32 size = sizeof(gv11b_slcg_perf) / sizeof(struct gating_desc);
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
return;
for (i = 0; i < size; i++) {
if (prod)
gk20a_writel(g, gv11b_slcg_perf[i].addr,
gv11b_slcg_perf[i].prod);
else
gk20a_writel(g, gv11b_slcg_perf[i].addr,
gv11b_slcg_perf[i].disable);
}
}
void gv11b_slcg_priring_load_gating_prod(struct gk20a *g,
bool prod)
{
u32 i;
u32 size = sizeof(gv11b_slcg_priring) / sizeof(struct gating_desc);
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
return;
for (i = 0; i < size; i++) {
if (prod)
gk20a_writel(g, gv11b_slcg_priring[i].addr,
gv11b_slcg_priring[i].prod);
else
gk20a_writel(g, gv11b_slcg_priring[i].addr,
gv11b_slcg_priring[i].disable);
}
}
void gv11b_slcg_pwr_csb_load_gating_prod(struct gk20a *g,
bool prod)
{
u32 i;
u32 size = sizeof(gv11b_slcg_pwr_csb) / sizeof(struct gating_desc);
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
return;
for (i = 0; i < size; i++) {
if (prod)
gk20a_writel(g, gv11b_slcg_pwr_csb[i].addr,
gv11b_slcg_pwr_csb[i].prod);
else
gk20a_writel(g, gv11b_slcg_pwr_csb[i].addr,
gv11b_slcg_pwr_csb[i].disable);
}
}
void gv11b_slcg_pmu_load_gating_prod(struct gk20a *g,
bool prod)
{
u32 i;
u32 size = sizeof(gv11b_slcg_pmu) / sizeof(struct gating_desc);
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
return;
for (i = 0; i < size; i++) {
if (prod)
gk20a_writel(g, gv11b_slcg_pmu[i].addr,
gv11b_slcg_pmu[i].prod);
else
gk20a_writel(g, gv11b_slcg_pmu[i].addr,
gv11b_slcg_pmu[i].disable);
}
}
void gv11b_slcg_therm_load_gating_prod(struct gk20a *g,
bool prod)
{
u32 i;
u32 size = sizeof(gv11b_slcg_therm) / sizeof(struct gating_desc);
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
return;
for (i = 0; i < size; i++) {
if (prod)
gk20a_writel(g, gv11b_slcg_therm[i].addr,
gv11b_slcg_therm[i].prod);
else
gk20a_writel(g, gv11b_slcg_therm[i].addr,
gv11b_slcg_therm[i].disable);
}
}
void gv11b_slcg_xbar_load_gating_prod(struct gk20a *g,
bool prod)
{
u32 i;
u32 size = sizeof(gv11b_slcg_xbar) / sizeof(struct gating_desc);
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
return;
for (i = 0; i < size; i++) {
if (prod)
gk20a_writel(g, gv11b_slcg_xbar[i].addr,
gv11b_slcg_xbar[i].prod);
else
gk20a_writel(g, gv11b_slcg_xbar[i].addr,
gv11b_slcg_xbar[i].disable);
}
}
void gv11b_blcg_bus_load_gating_prod(struct gk20a *g,
bool prod)
{
u32 i;
u32 size = sizeof(gv11b_blcg_bus) / sizeof(struct gating_desc);
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
return;
for (i = 0; i < size; i++) {
if (prod)
gk20a_writel(g, gv11b_blcg_bus[i].addr,
gv11b_blcg_bus[i].prod);
else
gk20a_writel(g, gv11b_blcg_bus[i].addr,
gv11b_blcg_bus[i].disable);
}
}
void gv11b_blcg_ce_load_gating_prod(struct gk20a *g,
bool prod)
{
u32 i;
u32 size = sizeof(gv11b_blcg_ce) / sizeof(struct gating_desc);
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
return;
for (i = 0; i < size; i++) {
if (prod)
gk20a_writel(g, gv11b_blcg_ce[i].addr,
gv11b_blcg_ce[i].prod);
else
gk20a_writel(g, gv11b_blcg_ce[i].addr,
gv11b_blcg_ce[i].disable);
}
}
void gv11b_blcg_ctxsw_firmware_load_gating_prod(struct gk20a *g,
bool prod)
{
u32 i;
u32 size = sizeof(gv11b_blcg_ctxsw_prog) / sizeof(struct gating_desc);
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
return;
for (i = 0; i < size; i++) {
if (prod)
gk20a_writel(g, gv11b_blcg_ctxsw_prog[i].addr,
gv11b_blcg_ctxsw_prog[i].prod);
else
gk20a_writel(g, gv11b_blcg_ctxsw_prog[i].addr,
gv11b_blcg_ctxsw_prog[i].disable);
}
}
void gv11b_blcg_fb_load_gating_prod(struct gk20a *g,
bool prod)
{
u32 i;
u32 size = sizeof(gv11b_blcg_fb) / sizeof(struct gating_desc);
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
return;
for (i = 0; i < size; i++) {
if (prod)
gk20a_writel(g, gv11b_blcg_fb[i].addr,
gv11b_blcg_fb[i].prod);
else
gk20a_writel(g, gv11b_blcg_fb[i].addr,
gv11b_blcg_fb[i].disable);
}
}
void gv11b_blcg_fifo_load_gating_prod(struct gk20a *g,
bool prod)
{
u32 i;
u32 size = sizeof(gv11b_blcg_fifo) / sizeof(struct gating_desc);
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
return;
for (i = 0; i < size; i++) {
if (prod)
gk20a_writel(g, gv11b_blcg_fifo[i].addr,
gv11b_blcg_fifo[i].prod);
else
gk20a_writel(g, gv11b_blcg_fifo[i].addr,
gv11b_blcg_fifo[i].disable);
}
}
void gv11b_blcg_gr_load_gating_prod(struct gk20a *g,
bool prod)
{
u32 i;
u32 size = sizeof(gv11b_blcg_gr) / sizeof(struct gating_desc);
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
return;
for (i = 0; i < size; i++) {
if (prod)
gk20a_writel(g, gv11b_blcg_gr[i].addr,
gv11b_blcg_gr[i].prod);
else
gk20a_writel(g, gv11b_blcg_gr[i].addr,
gv11b_blcg_gr[i].disable);
}
}
void gv11b_blcg_ltc_load_gating_prod(struct gk20a *g,
bool prod)
{
u32 i;
u32 size = sizeof(gv11b_blcg_ltc) / sizeof(struct gating_desc);
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
return;
for (i = 0; i < size; i++) {
if (prod)
gk20a_writel(g, gv11b_blcg_ltc[i].addr,
gv11b_blcg_ltc[i].prod);
else
gk20a_writel(g, gv11b_blcg_ltc[i].addr,
gv11b_blcg_ltc[i].disable);
}
}
void gv11b_blcg_pwr_csb_load_gating_prod(struct gk20a *g,
bool prod)
{
u32 i;
u32 size = sizeof(gv11b_blcg_pwr_csb) / sizeof(struct gating_desc);
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
return;
for (i = 0; i < size; i++) {
if (prod)
gk20a_writel(g, gv11b_blcg_pwr_csb[i].addr,
gv11b_blcg_pwr_csb[i].prod);
else
gk20a_writel(g, gv11b_blcg_pwr_csb[i].addr,
gv11b_blcg_pwr_csb[i].disable);
}
}
void gv11b_blcg_pmu_load_gating_prod(struct gk20a *g,
bool prod)
{
u32 i;
u32 size = sizeof(gv11b_blcg_pmu) / sizeof(struct gating_desc);
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
return;
for (i = 0; i < size; i++) {
if (prod)
gk20a_writel(g, gv11b_blcg_pmu[i].addr,
gv11b_blcg_pmu[i].prod);
else
gk20a_writel(g, gv11b_blcg_pmu[i].addr,
gv11b_blcg_pmu[i].disable);
}
}
void gv11b_blcg_xbar_load_gating_prod(struct gk20a *g,
bool prod)
{
u32 i;
u32 size = sizeof(gv11b_blcg_xbar) / sizeof(struct gating_desc);
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
return;
for (i = 0; i < size; i++) {
if (prod)
gk20a_writel(g, gv11b_blcg_xbar[i].addr,
gv11b_blcg_xbar[i].prod);
else
gk20a_writel(g, gv11b_blcg_xbar[i].addr,
gv11b_blcg_xbar[i].disable);
}
}
void gr_gv11b_pg_gr_load_gating_prod(struct gk20a *g,
bool prod)
{
u32 i;
u32 size = sizeof(gv11b_pg_gr) / sizeof(struct gating_desc);
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
return;
for (i = 0; i < size; i++) {
if (prod)
gk20a_writel(g, gv11b_pg_gr[i].addr,
gv11b_pg_gr[i].prod);
else
gk20a_writel(g, gv11b_pg_gr[i].addr,
gv11b_pg_gr[i].disable);
}
}
#endif /* __gv11b_gating_reglist_h__ */

View File

@@ -0,0 +1,99 @@
/*
* Copyright (c) 2016, NVIDIA Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "gk20a/gk20a.h"
void gv11b_slcg_bus_load_gating_prod(struct gk20a *g,
bool prod);
void gv11b_slcg_ce2_load_gating_prod(struct gk20a *g,
bool prod);
void gv11b_slcg_chiplet_load_gating_prod(struct gk20a *g,
bool prod);
void gv11b_slcg_ctxsw_firmware_load_gating_prod(struct gk20a *g,
bool prod);
void gv11b_slcg_fb_load_gating_prod(struct gk20a *g,
bool prod);
void gv11b_slcg_fifo_load_gating_prod(struct gk20a *g,
bool prod);
void gr_gv11b_slcg_gr_load_gating_prod(struct gk20a *g,
bool prod);
void ltc_gv11b_slcg_ltc_load_gating_prod(struct gk20a *g,
bool prod);
void gv11b_slcg_perf_load_gating_prod(struct gk20a *g,
bool prod);
void gv11b_slcg_priring_load_gating_prod(struct gk20a *g,
bool prod);
void gv11b_slcg_pwr_csb_load_gating_prod(struct gk20a *g,
bool prod);
void gv11b_slcg_pmu_load_gating_prod(struct gk20a *g,
bool prod);
void gv11b_slcg_therm_load_gating_prod(struct gk20a *g,
bool prod);
void gv11b_slcg_xbar_load_gating_prod(struct gk20a *g,
bool prod);
void gv11b_blcg_bus_load_gating_prod(struct gk20a *g,
bool prod);
void gv11b_blcg_ce_load_gating_prod(struct gk20a *g,
bool prod);
void gv11b_blcg_ctxsw_firmware_load_gating_prod(struct gk20a *g,
bool prod);
void gv11b_blcg_fb_load_gating_prod(struct gk20a *g,
bool prod);
void gv11b_blcg_fifo_load_gating_prod(struct gk20a *g,
bool prod);
void gv11b_blcg_gr_load_gating_prod(struct gk20a *g,
bool prod);
void gv11b_blcg_ltc_load_gating_prod(struct gk20a *g,
bool prod);
void gv11b_blcg_pwr_csb_load_gating_prod(struct gk20a *g,
bool prod);
void gv11b_blcg_pmu_load_gating_prod(struct gk20a *g,
bool prod);
void gv11b_blcg_xbar_load_gating_prod(struct gk20a *g,
bool prod);
void gr_gv11b_pg_gr_load_gating_prod(struct gk20a *g,
bool prod);

View File

@@ -0,0 +1,778 @@
/*
* GV11B Tegra HAL interface
*
* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <linux/types.h>
#include <linux/printk.h>
#include <linux/types.h>
#include <linux/tegra_gpu_t19x.h>
#include "gk20a/gk20a.h"
#include "gk20a/fifo_gk20a.h"
#include "gk20a/fecs_trace_gk20a.h"
#include "gk20a/css_gr_gk20a.h"
#include "gk20a/mc_gk20a.h"
#include "gk20a/mm_gk20a.h"
#include "gk20a/dbg_gpu_gk20a.h"
#include "gk20a/bus_gk20a.h"
#include "gk20a/flcn_gk20a.h"
#include "gk20a/regops_gk20a.h"
#include "gk20a/fb_gk20a.h"
#include "gk20a/pmu_gk20a.h"
#include "gk20a/gr_gk20a.h"
#include "gm20b/ltc_gm20b.h"
#include "gm20b/gr_gm20b.h"
#include "gm20b/fb_gm20b.h"
#include "gm20b/fifo_gm20b.h"
#include "gm20b/mm_gm20b.h"
#include "gm20b/acr_gm20b.h"
#include "gm20b/pmu_gm20b.h"
#include "gp10b/ltc_gp10b.h"
#include "gp10b/therm_gp10b.h"
#include "gp10b/mc_gp10b.h"
#include "gp10b/ce_gp10b.h"
#include "gp10b/priv_ring_gp10b.h"
#include "gp10b/fifo_gp10b.h"
#include "gp10b/fecs_trace_gp10b.h"
#include "gp10b/fb_gp10b.h"
#include "gp10b/mm_gp10b.h"
#include "gp10b/pmu_gp10b.h"
#include "gp10b/gr_gp10b.h"
#include "gp106/pmu_gp106.h"
#include "gp106/acr_gp106.h"
#include "gv100/gr_gv100.h"
#include "dbg_gpu_gv11b.h"
#include "hal_gv11b.h"
#include "css_gr_gv11b.h"
#include "gr_gv11b.h"
#include "mc_gv11b.h"
#include "ltc_gv11b.h"
#include "gv11b.h"
#include "ce_gv11b.h"
#include "gr_ctx_gv11b.h"
#include "mm_gv11b.h"
#include "pmu_gv11b.h"
#include "acr_gv11b.h"
#include "fb_gv11b.h"
#include "fifo_gv11b.h"
#include "gv11b_gating_reglist.h"
#include "regops_gv11b.h"
#include "subctx_gv11b.h"
#include "therm_gv11b.h"
#include <nvgpu/bus.h>
#include <nvgpu/debug.h>
#include <nvgpu/enabled.h>
#include <nvgpu/ctxsw_trace.h>
#include <nvgpu/hw/gv11b/hw_proj_gv11b.h>
#include <nvgpu/hw/gv11b/hw_fifo_gv11b.h>
#include <nvgpu/hw/gv11b/hw_ram_gv11b.h>
#include <nvgpu/hw/gv11b/hw_top_gv11b.h>
#include <nvgpu/hw/gv11b/hw_pwr_gv11b.h>
#include <nvgpu/hw/gv11b/hw_fuse_gv11b.h>
int gv11b_get_litter_value(struct gk20a *g, int value)
{
int ret = EINVAL;
switch (value) {
case GPU_LIT_NUM_GPCS:
ret = proj_scal_litter_num_gpcs_v();
break;
case GPU_LIT_NUM_PES_PER_GPC:
ret = proj_scal_litter_num_pes_per_gpc_v();
break;
case GPU_LIT_NUM_ZCULL_BANKS:
ret = proj_scal_litter_num_zcull_banks_v();
break;
case GPU_LIT_NUM_TPC_PER_GPC:
ret = proj_scal_litter_num_tpc_per_gpc_v();
break;
case GPU_LIT_NUM_SM_PER_TPC:
ret = proj_scal_litter_num_sm_per_tpc_v();
break;
case GPU_LIT_NUM_FBPS:
ret = proj_scal_litter_num_fbps_v();
break;
case GPU_LIT_GPC_BASE:
ret = proj_gpc_base_v();
break;
case GPU_LIT_GPC_STRIDE:
ret = proj_gpc_stride_v();
break;
case GPU_LIT_GPC_SHARED_BASE:
ret = proj_gpc_shared_base_v();
break;
case GPU_LIT_TPC_IN_GPC_BASE:
ret = proj_tpc_in_gpc_base_v();
break;
case GPU_LIT_TPC_IN_GPC_STRIDE:
ret = proj_tpc_in_gpc_stride_v();
break;
case GPU_LIT_TPC_IN_GPC_SHARED_BASE:
ret = proj_tpc_in_gpc_shared_base_v();
break;
case GPU_LIT_PPC_IN_GPC_BASE:
ret = proj_ppc_in_gpc_base_v();
break;
case GPU_LIT_PPC_IN_GPC_SHARED_BASE:
ret = proj_ppc_in_gpc_shared_base_v();
break;
case GPU_LIT_PPC_IN_GPC_STRIDE:
ret = proj_ppc_in_gpc_stride_v();
break;
case GPU_LIT_ROP_BASE:
ret = proj_rop_base_v();
break;
case GPU_LIT_ROP_STRIDE:
ret = proj_rop_stride_v();
break;
case GPU_LIT_ROP_SHARED_BASE:
ret = proj_rop_shared_base_v();
break;
case GPU_LIT_HOST_NUM_ENGINES:
ret = proj_host_num_engines_v();
break;
case GPU_LIT_HOST_NUM_PBDMA:
ret = proj_host_num_pbdma_v();
break;
case GPU_LIT_LTC_STRIDE:
ret = proj_ltc_stride_v();
break;
case GPU_LIT_LTS_STRIDE:
ret = proj_lts_stride_v();
break;
case GPU_LIT_SM_PRI_STRIDE:
ret = proj_sm_stride_v();
break;
case GPU_LIT_SMPC_PRI_BASE:
ret = proj_smpc_base_v();
break;
case GPU_LIT_SMPC_PRI_SHARED_BASE:
ret = proj_smpc_shared_base_v();
break;
case GPU_LIT_SMPC_PRI_UNIQUE_BASE:
ret = proj_smpc_unique_base_v();
break;
case GPU_LIT_SMPC_PRI_STRIDE:
ret = proj_smpc_stride_v();
break;
/* Even though GV11B doesn't have an FBPA unit, the HW reports one,
* and the microcode as a result leaves space in the context buffer
* for one, so make sure SW accounts for this also.
*/
case GPU_LIT_NUM_FBPAS:
ret = proj_scal_litter_num_fbpas_v();
break;
/* Hardcode FBPA values other than NUM_FBPAS to 0. */
case GPU_LIT_FBPA_STRIDE:
case GPU_LIT_FBPA_BASE:
case GPU_LIT_FBPA_SHARED_BASE:
ret = 0;
break;
case GPU_LIT_TWOD_CLASS:
ret = FERMI_TWOD_A;
break;
case GPU_LIT_THREED_CLASS:
ret = VOLTA_A;
break;
case GPU_LIT_COMPUTE_CLASS:
ret = VOLTA_COMPUTE_A;
break;
case GPU_LIT_GPFIFO_CLASS:
ret = VOLTA_CHANNEL_GPFIFO_A;
break;
case GPU_LIT_I2M_CLASS:
ret = KEPLER_INLINE_TO_MEMORY_B;
break;
case GPU_LIT_DMA_COPY_CLASS:
ret = VOLTA_DMA_COPY_A;
break;
default:
nvgpu_err(g, "Missing definition %d", value);
BUG();
break;
}
return ret;
}
static const struct gpu_ops gv11b_ops = {
.ltc = {
.determine_L2_size_bytes = gp10b_determine_L2_size_bytes,
.set_zbc_s_entry = gv11b_ltc_set_zbc_stencil_entry,
.set_zbc_color_entry = gm20b_ltc_set_zbc_color_entry,
.set_zbc_depth_entry = gm20b_ltc_set_zbc_depth_entry,
.init_cbc = NULL,
.init_fs_state = gv11b_ltc_init_fs_state,
.init_comptags = gp10b_ltc_init_comptags,
.cbc_ctrl = gm20b_ltc_cbc_ctrl,
.isr = gv11b_ltc_isr,
.cbc_fix_config = gv11b_ltc_cbc_fix_config,
.flush = gm20b_flush_ltc,
.set_enabled = gp10b_ltc_set_enabled,
},
.ce2 = {
.isr_stall = gv11b_ce_isr,
.isr_nonstall = gp10b_ce_nonstall_isr,
.get_num_pce = gv11b_ce_get_num_pce,
},
.gr = {
.get_patch_slots = gr_gv100_get_patch_slots,
.init_gpc_mmu = gr_gv11b_init_gpc_mmu,
.bundle_cb_defaults = gr_gv11b_bundle_cb_defaults,
.cb_size_default = gr_gv11b_cb_size_default,
.calc_global_ctx_buffer_size =
gr_gv11b_calc_global_ctx_buffer_size,
.commit_global_attrib_cb = gr_gv11b_commit_global_attrib_cb,
.commit_global_bundle_cb = gr_gp10b_commit_global_bundle_cb,
.commit_global_cb_manager = gr_gp10b_commit_global_cb_manager,
.commit_global_pagepool = gr_gp10b_commit_global_pagepool,
.handle_sw_method = gr_gv11b_handle_sw_method,
.set_alpha_circular_buffer_size =
gr_gv11b_set_alpha_circular_buffer_size,
.set_circular_buffer_size = gr_gv11b_set_circular_buffer_size,
.enable_hww_exceptions = gr_gv11b_enable_hww_exceptions,
.is_valid_class = gr_gv11b_is_valid_class,
.is_valid_gfx_class = gr_gv11b_is_valid_gfx_class,
.is_valid_compute_class = gr_gv11b_is_valid_compute_class,
.get_sm_dsm_perf_regs = gv11b_gr_get_sm_dsm_perf_regs,
.get_sm_dsm_perf_ctrl_regs = gv11b_gr_get_sm_dsm_perf_ctrl_regs,
.init_fs_state = gr_gv11b_init_fs_state,
.set_hww_esr_report_mask = gv11b_gr_set_hww_esr_report_mask,
.falcon_load_ucode = gr_gm20b_load_ctxsw_ucode_segments,
.load_ctxsw_ucode = gr_gk20a_load_ctxsw_ucode,
.set_gpc_tpc_mask = gr_gv11b_set_gpc_tpc_mask,
.get_gpc_tpc_mask = gr_gm20b_get_gpc_tpc_mask,
.free_channel_ctx = gk20a_free_channel_ctx,
.alloc_obj_ctx = gk20a_alloc_obj_ctx,
.bind_ctxsw_zcull = gr_gk20a_bind_ctxsw_zcull,
.get_zcull_info = gr_gk20a_get_zcull_info,
.is_tpc_addr = gr_gm20b_is_tpc_addr,
.get_tpc_num = gr_gm20b_get_tpc_num,
.detect_sm_arch = gr_gv11b_detect_sm_arch,
.add_zbc_color = gr_gp10b_add_zbc_color,
.add_zbc_depth = gr_gp10b_add_zbc_depth,
.zbc_set_table = gk20a_gr_zbc_set_table,
.zbc_query_table = gr_gk20a_query_zbc,
.pmu_save_zbc = gk20a_pmu_save_zbc,
.add_zbc = gr_gk20a_add_zbc,
.pagepool_default_size = gr_gv11b_pagepool_default_size,
.init_ctx_state = gr_gp10b_init_ctx_state,
.alloc_gr_ctx = gr_gp10b_alloc_gr_ctx,
.free_gr_ctx = gr_gp10b_free_gr_ctx,
.update_ctxsw_preemption_mode =
gr_gp10b_update_ctxsw_preemption_mode,
.dump_gr_regs = gr_gv11b_dump_gr_status_regs,
.update_pc_sampling = gr_gm20b_update_pc_sampling,
.get_fbp_en_mask = gr_gm20b_get_fbp_en_mask,
.get_max_ltc_per_fbp = gr_gm20b_get_max_ltc_per_fbp,
.get_max_lts_per_ltc = gr_gm20b_get_max_lts_per_ltc,
.get_rop_l2_en_mask = gr_gm20b_rop_l2_en_mask,
.get_max_fbps_count = gr_gm20b_get_max_fbps_count,
.init_sm_dsm_reg_info = gv11b_gr_init_sm_dsm_reg_info,
.wait_empty = gr_gv11b_wait_empty,
.init_cyclestats = gr_gm20b_init_cyclestats,
.set_sm_debug_mode = gv11b_gr_set_sm_debug_mode,
.enable_cde_in_fecs = gr_gm20b_enable_cde_in_fecs,
.bpt_reg_info = gv11b_gr_bpt_reg_info,
.get_access_map = gr_gv11b_get_access_map,
.handle_fecs_error = gr_gv11b_handle_fecs_error,
.handle_sm_exception = gr_gk20a_handle_sm_exception,
.handle_tex_exception = gr_gv11b_handle_tex_exception,
.enable_gpc_exceptions = gr_gv11b_enable_gpc_exceptions,
.enable_exceptions = gr_gv11b_enable_exceptions,
.get_lrf_tex_ltc_dram_override = get_ecc_override_val,
.update_smpc_ctxsw_mode = gr_gk20a_update_smpc_ctxsw_mode,
.update_hwpm_ctxsw_mode = gr_gk20a_update_hwpm_ctxsw_mode,
.record_sm_error_state = gv11b_gr_record_sm_error_state,
.update_sm_error_state = gv11b_gr_update_sm_error_state,
.clear_sm_error_state = gm20b_gr_clear_sm_error_state,
.suspend_contexts = gr_gp10b_suspend_contexts,
.resume_contexts = gr_gk20a_resume_contexts,
.get_preemption_mode_flags = gr_gp10b_get_preemption_mode_flags,
.init_sm_id_table = gr_gv100_init_sm_id_table,
.load_smid_config = gr_gv11b_load_smid_config,
.program_sm_id_numbering = gr_gv11b_program_sm_id_numbering,
.is_ltcs_ltss_addr = gr_gm20b_is_ltcs_ltss_addr,
.is_ltcn_ltss_addr = gr_gm20b_is_ltcn_ltss_addr,
.split_lts_broadcast_addr = gr_gm20b_split_lts_broadcast_addr,
.split_ltc_broadcast_addr = gr_gm20b_split_ltc_broadcast_addr,
.setup_rop_mapping = gr_gv11b_setup_rop_mapping,
.program_zcull_mapping = gr_gv11b_program_zcull_mapping,
.commit_global_timeslice = gr_gv11b_commit_global_timeslice,
.commit_inst = gr_gv11b_commit_inst,
.write_zcull_ptr = gr_gv11b_write_zcull_ptr,
.write_pm_ptr = gr_gv11b_write_pm_ptr,
.init_elcg_mode = gr_gv11b_init_elcg_mode,
.load_tpc_mask = gr_gv11b_load_tpc_mask,
.inval_icache = gr_gk20a_inval_icache,
.trigger_suspend = gv11b_gr_sm_trigger_suspend,
.wait_for_pause = gr_gk20a_wait_for_pause,
.resume_from_pause = gv11b_gr_resume_from_pause,
.clear_sm_errors = gr_gk20a_clear_sm_errors,
.tpc_enabled_exceptions = gr_gk20a_tpc_enabled_exceptions,
.get_esr_sm_sel = gv11b_gr_get_esr_sm_sel,
.sm_debugger_attached = gv11b_gr_sm_debugger_attached,
.suspend_single_sm = gv11b_gr_suspend_single_sm,
.suspend_all_sms = gv11b_gr_suspend_all_sms,
.resume_single_sm = gv11b_gr_resume_single_sm,
.resume_all_sms = gv11b_gr_resume_all_sms,
.get_sm_hww_warp_esr = gv11b_gr_get_sm_hww_warp_esr,
.get_sm_hww_global_esr = gv11b_gr_get_sm_hww_global_esr,
.get_sm_no_lock_down_hww_global_esr_mask =
gv11b_gr_get_sm_no_lock_down_hww_global_esr_mask,
.lock_down_sm = gv11b_gr_lock_down_sm,
.wait_for_sm_lock_down = gv11b_gr_wait_for_sm_lock_down,
.clear_sm_hww = gv11b_gr_clear_sm_hww,
.init_ovr_sm_dsm_perf = gv11b_gr_init_ovr_sm_dsm_perf,
.get_ovr_perf_regs = gv11b_gr_get_ovr_perf_regs,
.disable_rd_coalesce = gm20a_gr_disable_rd_coalesce,
.set_boosted_ctx = gr_gp10b_set_boosted_ctx,
.set_preemption_mode = gr_gp10b_set_preemption_mode,
.set_czf_bypass = NULL,
.pre_process_sm_exception = gr_gv11b_pre_process_sm_exception,
.set_preemption_buffer_va = gr_gv11b_set_preemption_buffer_va,
.init_preemption_state = NULL,
.update_boosted_ctx = gr_gp10b_update_boosted_ctx,
.set_bes_crop_debug3 = gr_gp10b_set_bes_crop_debug3,
.create_gr_sysfs = gr_gv11b_create_sysfs,
.set_ctxsw_preemption_mode = gr_gp10b_set_ctxsw_preemption_mode,
.is_etpc_addr = gv11b_gr_pri_is_etpc_addr,
.egpc_etpc_priv_addr_table = gv11b_gr_egpc_etpc_priv_addr_table,
.handle_tpc_mpc_exception = gr_gv11b_handle_tpc_mpc_exception,
.zbc_s_query_table = gr_gv11b_zbc_s_query_table,
.load_zbc_s_default_tbl = gr_gv11b_load_stencil_default_tbl,
.handle_gpc_gpcmmu_exception =
gr_gv11b_handle_gpc_gpcmmu_exception,
.add_zbc_type_s = gr_gv11b_add_zbc_type_s,
.get_egpc_base = gv11b_gr_get_egpc_base,
.get_egpc_etpc_num = gv11b_gr_get_egpc_etpc_num,
.handle_gpc_gpccs_exception =
gr_gv11b_handle_gpc_gpccs_exception,
.load_zbc_s_tbl = gr_gv11b_load_stencil_tbl,
.access_smpc_reg = gv11b_gr_access_smpc_reg,
.is_egpc_addr = gv11b_gr_pri_is_egpc_addr,
.add_zbc_s = gr_gv11b_add_zbc_stencil,
.handle_gcc_exception = gr_gv11b_handle_gcc_exception,
.init_sw_veid_bundle = gr_gv11b_init_sw_veid_bundle,
.handle_tpc_sm_ecc_exception =
gr_gv11b_handle_tpc_sm_ecc_exception,
.decode_egpc_addr = gv11b_gr_decode_egpc_addr,
.init_ctxsw_hdr_data = gr_gp10b_init_ctxsw_hdr_data,
},
.fb = {
.reset = gv11b_fb_reset,
.init_hw = gk20a_fb_init_hw,
.init_fs_state = gv11b_fb_init_fs_state,
.init_cbc = gv11b_fb_init_cbc,
.set_mmu_page_size = gm20b_fb_set_mmu_page_size,
.set_use_full_comp_tag_line =
gm20b_fb_set_use_full_comp_tag_line,
.compression_page_size = gp10b_fb_compression_page_size,
.compressible_page_size = gp10b_fb_compressible_page_size,
.vpr_info_fetch = gm20b_fb_vpr_info_fetch,
.dump_vpr_wpr_info = gm20b_fb_dump_vpr_wpr_info,
.read_wpr_info = gm20b_fb_read_wpr_info,
.is_debug_mode_enabled = gm20b_fb_debug_mode_enabled,
.set_debug_mode = gm20b_fb_set_debug_mode,
.tlb_invalidate = gk20a_fb_tlb_invalidate,
.hub_isr = gv11b_fb_hub_isr,
.mem_unlock = NULL,
},
.clock_gating = {
.slcg_bus_load_gating_prod =
gv11b_slcg_bus_load_gating_prod,
.slcg_ce2_load_gating_prod =
gv11b_slcg_ce2_load_gating_prod,
.slcg_chiplet_load_gating_prod =
gv11b_slcg_chiplet_load_gating_prod,
.slcg_ctxsw_firmware_load_gating_prod =
gv11b_slcg_ctxsw_firmware_load_gating_prod,
.slcg_fb_load_gating_prod =
gv11b_slcg_fb_load_gating_prod,
.slcg_fifo_load_gating_prod =
gv11b_slcg_fifo_load_gating_prod,
.slcg_gr_load_gating_prod =
gr_gv11b_slcg_gr_load_gating_prod,
.slcg_ltc_load_gating_prod =
ltc_gv11b_slcg_ltc_load_gating_prod,
.slcg_perf_load_gating_prod =
gv11b_slcg_perf_load_gating_prod,
.slcg_priring_load_gating_prod =
gv11b_slcg_priring_load_gating_prod,
.slcg_pmu_load_gating_prod =
gv11b_slcg_pmu_load_gating_prod,
.slcg_therm_load_gating_prod =
gv11b_slcg_therm_load_gating_prod,
.slcg_xbar_load_gating_prod =
gv11b_slcg_xbar_load_gating_prod,
.blcg_bus_load_gating_prod =
gv11b_blcg_bus_load_gating_prod,
.blcg_ce_load_gating_prod =
gv11b_blcg_ce_load_gating_prod,
.blcg_ctxsw_firmware_load_gating_prod =
gv11b_blcg_ctxsw_firmware_load_gating_prod,
.blcg_fb_load_gating_prod =
gv11b_blcg_fb_load_gating_prod,
.blcg_fifo_load_gating_prod =
gv11b_blcg_fifo_load_gating_prod,
.blcg_gr_load_gating_prod =
gv11b_blcg_gr_load_gating_prod,
.blcg_ltc_load_gating_prod =
gv11b_blcg_ltc_load_gating_prod,
.blcg_pwr_csb_load_gating_prod =
gv11b_blcg_pwr_csb_load_gating_prod,
.blcg_pmu_load_gating_prod =
gv11b_blcg_pmu_load_gating_prod,
.blcg_xbar_load_gating_prod =
gv11b_blcg_xbar_load_gating_prod,
.pg_gr_load_gating_prod =
gr_gv11b_pg_gr_load_gating_prod,
},
.fifo = {
.get_preempt_timeout = gv11b_fifo_get_preempt_timeout,
.init_fifo_setup_hw = gv11b_init_fifo_setup_hw,
.bind_channel = channel_gm20b_bind,
.unbind_channel = channel_gv11b_unbind,
.disable_channel = gk20a_fifo_disable_channel,
.enable_channel = gk20a_fifo_enable_channel,
.alloc_inst = gk20a_fifo_alloc_inst,
.free_inst = gk20a_fifo_free_inst,
.setup_ramfc = channel_gv11b_setup_ramfc,
.channel_set_timeslice = gk20a_fifo_set_timeslice,
.default_timeslice_us = gk20a_fifo_default_timeslice_us,
.setup_userd = gk20a_fifo_setup_userd,
.userd_gp_get = gv11b_userd_gp_get,
.userd_gp_put = gv11b_userd_gp_put,
.userd_pb_get = gv11b_userd_pb_get,
.pbdma_acquire_val = gk20a_fifo_pbdma_acquire_val,
.preempt_channel = gv11b_fifo_preempt_channel,
.preempt_tsg = gv11b_fifo_preempt_tsg,
.enable_tsg = gv11b_fifo_enable_tsg,
.disable_tsg = gk20a_disable_tsg,
.tsg_verify_channel_status = gk20a_fifo_tsg_unbind_channel_verify_status,
.tsg_verify_status_ctx_reload = gm20b_fifo_tsg_verify_status_ctx_reload,
.tsg_verify_status_faulted = gv11b_fifo_tsg_verify_status_faulted,
.update_runlist = gk20a_fifo_update_runlist,
.trigger_mmu_fault = NULL,
.get_mmu_fault_info = NULL,
.wait_engine_idle = gk20a_fifo_wait_engine_idle,
.get_num_fifos = gv11b_fifo_get_num_fifos,
.get_pbdma_signature = gp10b_fifo_get_pbdma_signature,
.set_runlist_interleave = gk20a_fifo_set_runlist_interleave,
.tsg_set_timeslice = gk20a_fifo_tsg_set_timeslice,
.force_reset_ch = gk20a_fifo_force_reset_ch,
.engine_enum_from_type = gp10b_fifo_engine_enum_from_type,
.device_info_data_parse = gp10b_device_info_data_parse,
.eng_runlist_base_size = fifo_eng_runlist_base__size_1_v,
.init_engine_info = gk20a_fifo_init_engine_info,
.runlist_entry_size = ram_rl_entry_size_v,
.get_tsg_runlist_entry = gv11b_get_tsg_runlist_entry,
.get_ch_runlist_entry = gv11b_get_ch_runlist_entry,
.is_fault_engine_subid_gpc = gv11b_is_fault_engine_subid_gpc,
.dump_pbdma_status = gk20a_dump_pbdma_status,
.dump_eng_status = gv11b_dump_eng_status,
.dump_channel_status_ramfc = gv11b_dump_channel_status_ramfc,
.intr_0_error_mask = gv11b_fifo_intr_0_error_mask,
.is_preempt_pending = gv11b_fifo_is_preempt_pending,
.init_pbdma_intr_descs = gv11b_fifo_init_pbdma_intr_descs,
.reset_enable_hw = gv11b_init_fifo_reset_enable_hw,
.teardown_ch_tsg = gv11b_fifo_teardown_ch_tsg,
.handle_sched_error = gv11b_fifo_handle_sched_error,
.handle_pbdma_intr_0 = gv11b_fifo_handle_pbdma_intr_0,
.handle_pbdma_intr_1 = gv11b_fifo_handle_pbdma_intr_1,
.init_eng_method_buffers = gv11b_fifo_init_eng_method_buffers,
.deinit_eng_method_buffers =
gv11b_fifo_deinit_eng_method_buffers,
.tsg_bind_channel = gk20a_tsg_bind_channel,
.tsg_unbind_channel = gk20a_tsg_unbind_channel,
#ifdef CONFIG_TEGRA_GK20A_NVHOST
.alloc_syncpt_buf = gv11b_fifo_alloc_syncpt_buf,
.free_syncpt_buf = gv11b_fifo_free_syncpt_buf,
.add_syncpt_wait_cmd = gv11b_fifo_add_syncpt_wait_cmd,
.get_syncpt_wait_cmd_size = gv11b_fifo_get_syncpt_wait_cmd_size,
.add_syncpt_incr_cmd = gv11b_fifo_add_syncpt_incr_cmd,
.get_syncpt_incr_cmd_size = gv11b_fifo_get_syncpt_incr_cmd_size,
#endif
.resetup_ramfc = NULL,
.device_info_fault_id = top_device_info_data_fault_id_enum_v,
.free_channel_ctx_header = gv11b_free_subctx_header,
.preempt_ch_tsg = gv11b_fifo_preempt_ch_tsg,
.handle_ctxsw_timeout = gv11b_fifo_handle_ctxsw_timeout,
},
.gr_ctx = {
.get_netlist_name = gr_gv11b_get_netlist_name,
.is_fw_defined = gr_gv11b_is_firmware_defined,
},
#ifdef CONFIG_GK20A_CTXSW_TRACE
.fecs_trace = {
.alloc_user_buffer = NULL,
.free_user_buffer = NULL,
.mmap_user_buffer = NULL,
.init = NULL,
.deinit = NULL,
.enable = NULL,
.disable = NULL,
.is_enabled = NULL,
.reset = NULL,
.flush = NULL,
.poll = NULL,
.bind_channel = NULL,
.unbind_channel = NULL,
.max_entries = NULL,
},
#endif /* CONFIG_GK20A_CTXSW_TRACE */
.mm = {
.support_sparse = gm20b_mm_support_sparse,
.gmmu_map = gk20a_locked_gmmu_map,
.gmmu_unmap = gk20a_locked_gmmu_unmap,
.vm_bind_channel = gk20a_vm_bind_channel,
.fb_flush = gk20a_mm_fb_flush,
.l2_invalidate = gk20a_mm_l2_invalidate,
.l2_flush = gv11b_mm_l2_flush,
.cbc_clean = gk20a_mm_cbc_clean,
.set_big_page_size = gm20b_mm_set_big_page_size,
.get_big_page_sizes = gm20b_mm_get_big_page_sizes,
.get_default_big_page_size = gp10b_mm_get_default_big_page_size,
.gpu_phys_addr = gv11b_gpu_phys_addr,
.get_iommu_bit = gp10b_mm_get_iommu_bit,
.get_mmu_levels = gp10b_mm_get_mmu_levels,
.init_pdb = gp10b_mm_init_pdb,
.init_mm_setup_hw = gv11b_init_mm_setup_hw,
.is_bar1_supported = gv11b_mm_is_bar1_supported,
.alloc_inst_block = gk20a_alloc_inst_block,
.init_inst_block = gv11b_init_inst_block,
.mmu_fault_pending = gv11b_mm_mmu_fault_pending,
.get_kind_invalid = gm20b_get_kind_invalid,
.get_kind_pitch = gm20b_get_kind_pitch,
.init_bar2_vm = gb10b_init_bar2_vm,
.init_bar2_mm_hw_setup = gv11b_init_bar2_mm_hw_setup,
.remove_bar2_vm = gv11b_mm_remove_bar2_vm,
.fault_info_mem_destroy = gv11b_mm_fault_info_mem_destroy,
},
.therm = {
.init_therm_setup_hw = gp10b_init_therm_setup_hw,
.elcg_init_idle_filters = gv11b_elcg_init_idle_filters,
},
.pmu = {
.pmu_setup_elpg = gp10b_pmu_setup_elpg,
.pmu_get_queue_head = pwr_pmu_queue_head_r,
.pmu_get_queue_head_size = pwr_pmu_queue_head__size_1_v,
.pmu_get_queue_tail = pwr_pmu_queue_tail_r,
.pmu_get_queue_tail_size = pwr_pmu_queue_tail__size_1_v,
.pmu_queue_head = gk20a_pmu_queue_head,
.pmu_queue_tail = gk20a_pmu_queue_tail,
.pmu_msgq_tail = gk20a_pmu_msgq_tail,
.pmu_mutex_size = pwr_pmu_mutex__size_1_v,
.pmu_mutex_acquire = gk20a_pmu_mutex_acquire,
.pmu_mutex_release = gk20a_pmu_mutex_release,
.write_dmatrfbase = gp10b_write_dmatrfbase,
.pmu_elpg_statistics = gp106_pmu_elpg_statistics,
.pmu_pg_init_param = gv11b_pg_gr_init,
.pmu_pg_supported_engines_list = gk20a_pmu_pg_engines_list,
.pmu_pg_engines_feature_list = gk20a_pmu_pg_feature_list,
.dump_secure_fuses = pmu_dump_security_fuses_gp10b,
.reset_engine = gp106_pmu_engine_reset,
.is_engine_in_reset = gp106_pmu_is_engine_in_reset,
.pmu_nsbootstrap = gv11b_pmu_bootstrap,
.pmu_pg_set_sub_feature_mask = gv11b_pg_set_subfeature_mask,
.is_pmu_supported = gv11b_is_pmu_supported,
},
.regops = {
.get_global_whitelist_ranges =
gv11b_get_global_whitelist_ranges,
.get_global_whitelist_ranges_count =
gv11b_get_global_whitelist_ranges_count,
.get_context_whitelist_ranges =
gv11b_get_context_whitelist_ranges,
.get_context_whitelist_ranges_count =
gv11b_get_context_whitelist_ranges_count,
.get_runcontrol_whitelist = gv11b_get_runcontrol_whitelist,
.get_runcontrol_whitelist_count =
gv11b_get_runcontrol_whitelist_count,
.get_runcontrol_whitelist_ranges =
gv11b_get_runcontrol_whitelist_ranges,
.get_runcontrol_whitelist_ranges_count =
gv11b_get_runcontrol_whitelist_ranges_count,
.get_qctl_whitelist = gv11b_get_qctl_whitelist,
.get_qctl_whitelist_count = gv11b_get_qctl_whitelist_count,
.get_qctl_whitelist_ranges = gv11b_get_qctl_whitelist_ranges,
.get_qctl_whitelist_ranges_count =
gv11b_get_qctl_whitelist_ranges_count,
.apply_smpc_war = gv11b_apply_smpc_war,
},
.mc = {
.intr_enable = mc_gv11b_intr_enable,
.intr_unit_config = mc_gp10b_intr_unit_config,
.isr_stall = mc_gp10b_isr_stall,
.intr_stall = mc_gp10b_intr_stall,
.intr_stall_pause = mc_gp10b_intr_stall_pause,
.intr_stall_resume = mc_gp10b_intr_stall_resume,
.intr_nonstall = mc_gp10b_intr_nonstall,
.intr_nonstall_pause = mc_gp10b_intr_nonstall_pause,
.intr_nonstall_resume = mc_gp10b_intr_nonstall_resume,
.enable = gk20a_mc_enable,
.disable = gk20a_mc_disable,
.reset = gk20a_mc_reset,
.boot_0 = gk20a_mc_boot_0,
.is_intr1_pending = mc_gp10b_is_intr1_pending,
.is_intr_hub_pending = gv11b_mc_is_intr_hub_pending,
},
.debug = {
.show_dump = gk20a_debug_show_dump,
},
.dbg_session_ops = {
.exec_reg_ops = exec_regops_gk20a,
.dbg_set_powergate = dbg_set_powergate,
.check_and_set_global_reservation =
nvgpu_check_and_set_global_reservation,
.check_and_set_context_reservation =
nvgpu_check_and_set_context_reservation,
.release_profiler_reservation =
nvgpu_release_profiler_reservation,
.perfbuffer_enable = gv11b_perfbuf_enable_locked,
.perfbuffer_disable = gv11b_perfbuf_disable_locked,
},
.bus = {
.init_hw = gk20a_bus_init_hw,
.isr = gk20a_bus_isr,
.read_ptimer = gk20a_read_ptimer,
.get_timestamps_zipper = nvgpu_get_timestamps_zipper,
.bar1_bind = NULL,
},
#if defined(CONFIG_GK20A_CYCLE_STATS)
.css = {
.enable_snapshot = gv11b_css_hw_enable_snapshot,
.disable_snapshot = gv11b_css_hw_disable_snapshot,
.check_data_available = gv11b_css_hw_check_data_available,
.set_handled_snapshots = css_hw_set_handled_snapshots,
.allocate_perfmon_ids = css_gr_allocate_perfmon_ids,
.release_perfmon_ids = css_gr_release_perfmon_ids,
},
#endif
.falcon = {
.falcon_hal_sw_init = gk20a_falcon_hal_sw_init,
},
.priv_ring = {
.isr = gp10b_priv_ring_isr,
},
.chip_init_gpu_characteristics = gv11b_init_gpu_characteristics,
.get_litter_value = gv11b_get_litter_value,
};
int gv11b_init_hal(struct gk20a *g)
{
struct gpu_ops *gops = &g->ops;
u32 val;
bool priv_security;
gops->ltc = gv11b_ops.ltc;
gops->ce2 = gv11b_ops.ce2;
gops->gr = gv11b_ops.gr;
gops->fb = gv11b_ops.fb;
gops->clock_gating = gv11b_ops.clock_gating;
gops->fifo = gv11b_ops.fifo;
gops->gr_ctx = gv11b_ops.gr_ctx;
gops->mm = gv11b_ops.mm;
#ifdef CONFIG_GK20A_CTXSW_TRACE
gops->fecs_trace = gv11b_ops.fecs_trace;
#endif
gops->therm = gv11b_ops.therm;
gops->pmu = gv11b_ops.pmu;
gops->regops = gv11b_ops.regops;
gops->mc = gv11b_ops.mc;
gops->debug = gv11b_ops.debug;
gops->dbg_session_ops = gv11b_ops.dbg_session_ops;
gops->bus = gv11b_ops.bus;
#if defined(CONFIG_GK20A_CYCLE_STATS)
gops->css = gv11b_ops.css;
#endif
gops->falcon = gv11b_ops.falcon;
gops->priv_ring = gv11b_ops.priv_ring;
/* Lone functions */
gops->chip_init_gpu_characteristics =
gv11b_ops.chip_init_gpu_characteristics;
gops->get_litter_value = gv11b_ops.get_litter_value;
val = gk20a_readl(g, fuse_opt_priv_sec_en_r());
if (val) {
priv_security = true;
pr_err("priv security is enabled\n");
} else {
priv_security = false;
pr_err("priv security is disabled\n");
}
__nvgpu_set_enabled(g, NVGPU_GR_USE_DMA_FOR_FW_BOOTSTRAP, false);
__nvgpu_set_enabled(g, NVGPU_SEC_PRIVSECURITY, priv_security);
__nvgpu_set_enabled(g, NVGPU_SEC_SECUREGPCCS, priv_security);
/* priv security dependent ops */
if (nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) {
/* Add in ops from gm20b acr */
gops->pmu.prepare_ucode = gp106_prepare_ucode_blob,
gops->pmu.pmu_setup_hw_and_bootstrap = gv11b_bootstrap_hs_flcn,
gops->pmu.get_wpr = gm20b_wpr_info,
gops->pmu.alloc_blob_space = gm20b_alloc_blob_space,
gops->pmu.pmu_populate_loader_cfg =
gp106_pmu_populate_loader_cfg,
gops->pmu.flcn_populate_bl_dmem_desc =
gp106_flcn_populate_bl_dmem_desc,
gops->pmu.falcon_wait_for_halt = pmu_wait_for_halt,
gops->pmu.falcon_clear_halt_interrupt_status =
clear_halt_interrupt_status,
gops->pmu.init_falcon_setup_hw = gv11b_init_pmu_setup_hw1,
gops->pmu.init_wpr_region = gm20b_pmu_init_acr;
gops->pmu.load_lsfalcon_ucode = gp10b_load_falcon_ucode;
gops->pmu.is_lazy_bootstrap = gv11b_is_lazy_bootstrap,
gops->pmu.is_priv_load = gv11b_is_priv_load,
gops->gr.load_ctxsw_ucode = gr_gm20b_load_ctxsw_ucode;
} else {
/* Inherit from gk20a */
gops->pmu.prepare_ucode = nvgpu_pmu_prepare_ns_ucode_blob,
gops->pmu.pmu_setup_hw_and_bootstrap = gk20a_init_pmu_setup_hw1,
gops->pmu.load_lsfalcon_ucode = NULL;
gops->pmu.init_wpr_region = NULL;
gops->pmu.pmu_setup_hw_and_bootstrap = gp10b_init_pmu_setup_hw1;
gops->gr.load_ctxsw_ucode = gr_gk20a_load_ctxsw_ucode;
}
__nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false);
g->bootstrap_owner = LSF_BOOTSTRAP_OWNER_DEFAULT;
g->name = "gv11b";
return 0;
}

View File

@@ -0,0 +1,31 @@
/*
* GV11B Tegra HAL interface
*
* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NVGPU_HAL_GV11B_H
#define _NVGPU_HAL_GV11B_H
struct gk20a;
int gv11b_init_hal(struct gk20a *gops);
int gv11b_get_litter_value(struct gk20a *g, int value);
#endif

View File

@@ -0,0 +1,205 @@
/*
* GV11B LTC
*
* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "gk20a/gk20a.h"
#include "gp10b/ltc_gp10b.h"
#include "ltc_gv11b.h"
#include <nvgpu/hw/gv11b/hw_ltc_gv11b.h>
#include <nvgpu/hw/gv11b/hw_mc_gv11b.h>
#include <nvgpu/hw/gv11b/hw_top_gv11b.h>
#include <nvgpu/hw/gv11b/hw_mc_gv11b.h>
#include <nvgpu/hw/gv11b/hw_pri_ringmaster_gv11b.h>
/*
* Sets the ZBC stencil for the passed index.
*/
void gv11b_ltc_set_zbc_stencil_entry(struct gk20a *g,
struct zbc_entry *stencil_val,
u32 index)
{
u32 real_index = index + GK20A_STARTOF_ZBC_TABLE;
gk20a_writel(g, ltc_ltcs_ltss_dstg_zbc_index_r(),
ltc_ltcs_ltss_dstg_zbc_index_address_f(real_index));
gk20a_writel(g, ltc_ltcs_ltss_dstg_zbc_stencil_clear_value_r(),
stencil_val->depth);
gk20a_readl(g, ltc_ltcs_ltss_dstg_zbc_index_r());
}
void gv11b_ltc_init_fs_state(struct gk20a *g)
{
u32 ltc_intr;
u32 reg;
gk20a_dbg_info("initialize gv11b l2");
g->ops.mc.reset(g, mc_enable_pfb_enabled_f() |
mc_enable_l2_enabled_f());
reg = gk20a_readl(g, mc_elpg_enable_r());
reg |= mc_elpg_enable_l2_enabled_f();
gk20a_writel(g, mc_elpg_enable_r(), reg);
g->max_ltc_count = gk20a_readl(g, top_num_ltcs_r());
g->ltc_count = gk20a_readl(g, pri_ringmaster_enum_ltc_r());
gk20a_dbg_info("%u ltcs out of %u", g->ltc_count, g->max_ltc_count);
gk20a_writel(g, ltc_ltcs_ltss_dstg_cfg0_r(),
gk20a_readl(g, ltc_ltc0_lts0_dstg_cfg0_r()) |
ltc_ltcs_ltss_dstg_cfg0_vdc_4to2_disable_m());
/* Disable LTC interrupts */
reg = gk20a_readl(g, ltc_ltcs_ltss_intr_r());
reg &= ~ltc_ltcs_ltss_intr_en_evicted_cb_m();
reg &= ~ltc_ltcs_ltss_intr_en_illegal_compstat_access_m();
gk20a_writel(g, ltc_ltcs_ltss_intr_r(), reg);
/* Enable ECC interrupts */
ltc_intr = gk20a_readl(g, ltc_ltcs_ltss_intr_r());
ltc_intr |= ltc_ltcs_ltss_intr_en_ecc_sec_error_enabled_f() |
ltc_ltcs_ltss_intr_en_ecc_ded_error_enabled_f();
gk20a_writel(g, ltc_ltcs_ltss_intr_r(),
ltc_intr);
}
void gv11b_ltc_isr(struct gk20a *g)
{
u32 mc_intr, ltc_intr3;
unsigned int ltc, slice;
u32 ltc_stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE);
u32 lts_stride = nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE);
u32 ecc_status, ecc_addr, corrected_cnt, uncorrected_cnt;
u32 corrected_delta, uncorrected_delta;
u32 corrected_overflow, uncorrected_overflow;
u32 ltc_corrected, ltc_uncorrected;
mc_intr = gk20a_readl(g, mc_intr_ltc_r());
for (ltc = 0; ltc < g->ltc_count; ltc++) {
if ((mc_intr & 1 << ltc) == 0)
continue;
ltc_corrected = ltc_uncorrected = 0;
for (slice = 0; slice < g->gr.slices_per_ltc; slice++) {
u32 offset = ltc_stride * ltc + lts_stride * slice;
ltc_intr3 = gk20a_readl(g, ltc_ltc0_lts0_intr3_r() +
offset);
/* Detect and handle ECC PARITY errors */
if (ltc_intr3 &
(ltc_ltcs_ltss_intr3_ecc_uncorrected_m() |
ltc_ltcs_ltss_intr3_ecc_corrected_m())) {
ecc_status = gk20a_readl(g,
ltc_ltc0_lts0_l2_cache_ecc_status_r() +
offset);
ecc_addr = gk20a_readl(g,
ltc_ltc0_lts0_l2_cache_ecc_address_r() +
offset);
corrected_cnt = gk20a_readl(g,
ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_r() + offset);
uncorrected_cnt = gk20a_readl(g,
ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_r() + offset);
corrected_delta =
ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_total_v(corrected_cnt);
uncorrected_delta =
ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_total_v(uncorrected_cnt);
corrected_overflow = ecc_status &
ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_total_counter_overflow_m();
uncorrected_overflow = ecc_status &
ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_total_counter_overflow_m();
/* clear the interrupt */
if ((corrected_delta > 0) || corrected_overflow) {
gk20a_writel(g, ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_r() + offset, 0);
}
if ((uncorrected_delta > 0) || uncorrected_overflow) {
gk20a_writel(g,
ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_r() + offset, 0);
}
gk20a_writel(g, ltc_ltc0_lts0_l2_cache_ecc_status_r() + offset,
ltc_ltc0_lts0_l2_cache_ecc_status_reset_task_f());
/* update counters per slice */
if (corrected_overflow)
corrected_delta += (0x1UL << ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_total_s());
if (uncorrected_overflow)
uncorrected_delta += (0x1UL << ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_total_s());
ltc_corrected += corrected_delta;
ltc_uncorrected += uncorrected_delta;
nvgpu_log(g, gpu_dbg_intr,
"ltc:%d lts: %d cache ecc interrupt intr: 0x%x", ltc, slice, ltc_intr3);
if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_rstg_m())
nvgpu_log(g, gpu_dbg_intr, "rstg ecc error corrected");
if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_rstg_m())
nvgpu_log(g, gpu_dbg_intr, "rstg ecc error uncorrected");
if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_tstg_m())
nvgpu_log(g, gpu_dbg_intr, "tstg ecc error corrected");
if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_tstg_m())
nvgpu_log(g, gpu_dbg_intr, "tstg ecc error uncorrected");
if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_dstg_m())
nvgpu_log(g, gpu_dbg_intr, "dstg ecc error corrected");
if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_dstg_m())
nvgpu_log(g, gpu_dbg_intr, "dstg ecc error uncorrected");
if (corrected_overflow || uncorrected_overflow)
nvgpu_info(g, "ecc counter overflow!");
nvgpu_log(g, gpu_dbg_intr,
"ecc error address: 0x%x", ecc_addr);
}
}
g->ecc.ltc.t19x.l2_cache_corrected_err_count.counters[ltc] +=
ltc_corrected;
g->ecc.ltc.t19x.l2_cache_uncorrected_err_count.counters[ltc] +=
ltc_uncorrected;
}
/* fallback to other interrupts */
gp10b_ltc_isr(g);
}
u32 gv11b_ltc_cbc_fix_config(struct gk20a *g, int base)
{
u32 val = gk20a_readl(g, ltc_ltcs_ltss_cbc_num_active_ltcs_r());
if (ltc_ltcs_ltss_cbc_num_active_ltcs__v(val) == 2)
return base * 2;
else if (ltc_ltcs_ltss_cbc_num_active_ltcs__v(val) != 1) {
nvgpu_err(g, "Invalid number of active ltcs: %08x", val);
}
return base;
}

View File

@@ -0,0 +1,34 @@
/*
* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef LTC_GV11B_H
#define LTC_GV11B_H
struct gk20a;
void gv11b_ltc_set_zbc_stencil_entry(struct gk20a *g,
struct zbc_entry *stencil_val,
u32 index);
void gv11b_ltc_init_fs_state(struct gk20a *g);
void gv11b_ltc_isr(struct gk20a *g);
u32 gv11b_ltc_cbc_fix_config(struct gk20a *g, int base);
#endif

View File

@@ -0,0 +1,92 @@
/*
* GV11B master
*
* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <linux/types.h>
#include "gk20a/gk20a.h"
#include "gp10b/mc_gp10b.h"
#include "mc_gv11b.h"
#include "fb_gv11b.h"
#include <nvgpu/hw/gv11b/hw_mc_gv11b.h>
void mc_gv11b_intr_enable(struct gk20a *g)
{
u32 eng_intr_mask = gk20a_fifo_engine_interrupt_mask(g);
gk20a_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_STALLING),
0xffffffff);
gk20a_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_NONSTALLING),
0xffffffff);
gv11b_fb_disable_hub_intr(g, STALL_REG_INDEX, HUB_INTR_TYPE_ALL);
g->mc_intr_mask_restore[NVGPU_MC_INTR_STALLING] =
mc_intr_pfifo_pending_f() |
mc_intr_hub_pending_f() |
mc_intr_priv_ring_pending_f() |
mc_intr_pbus_pending_f() |
mc_intr_ltc_pending_f() |
eng_intr_mask;
g->mc_intr_mask_restore[NVGPU_MC_INTR_NONSTALLING] =
mc_intr_pfifo_pending_f()
| eng_intr_mask;
/* TODO: Enable PRI faults for HUB ECC err intr */
gv11b_fb_enable_hub_intr(g, STALL_REG_INDEX, g->mm.hub_intr_types);
gk20a_writel(g, mc_intr_en_set_r(NVGPU_MC_INTR_STALLING),
g->mc_intr_mask_restore[NVGPU_MC_INTR_STALLING]);
gk20a_writel(g, mc_intr_en_set_r(NVGPU_MC_INTR_NONSTALLING),
g->mc_intr_mask_restore[NVGPU_MC_INTR_NONSTALLING]);
}
bool gv11b_mc_is_intr_hub_pending(struct gk20a *g, u32 mc_intr_0)
{
return ((mc_intr_0 & mc_intr_hub_pending_f()) ? true : false);
}
bool gv11b_mc_is_stall_and_eng_intr_pending(struct gk20a *g, u32 act_eng_id)
{
u32 mc_intr_0 = gk20a_readl(g, mc_intr_r(0));
u32 stall_intr, eng_intr_mask;
eng_intr_mask = gk20a_fifo_act_eng_interrupt_mask(g, act_eng_id);
if (mc_intr_0 & eng_intr_mask)
return true;
stall_intr = mc_intr_pfifo_pending_f() |
mc_intr_hub_pending_f() |
mc_intr_priv_ring_pending_f() |
mc_intr_pbus_pending_f() |
mc_intr_ltc_pending_f();
if (mc_intr_0 & stall_intr)
return true;
return false;
}

View File

@@ -0,0 +1,30 @@
/*
* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef MC_GV11B_H
#define MC_GV11B_H
struct gk20a;
void mc_gv11b_intr_enable(struct gk20a *g);
bool gv11b_mc_is_intr_hub_pending(struct gk20a *g, u32 mc_intr_0);
bool gv11b_mc_is_stall_and_eng_intr_pending(struct gk20a *g, u32 act_eng_id);
#endif

View File

@@ -0,0 +1,330 @@
/*
* GV11B MMU
*
* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <linux/pm_runtime.h>
#include <nvgpu/kmem.h>
#include <nvgpu/dma.h>
#include <nvgpu/log.h>
#include <nvgpu/mm.h>
#include "gk20a/gk20a.h"
#include "gk20a/mm_gk20a.h"
#include "gp10b/mm_gp10b.h"
#include "gp10b/mc_gp10b.h"
#include "mm_gv11b.h"
#include "fb_gv11b.h"
#include <nvgpu/hw/gv11b/hw_fb_gv11b.h>
#include <nvgpu/hw/gv11b/hw_gmmu_gv11b.h>
#include <nvgpu/hw/gv11b/hw_bus_gv11b.h>
#define NVGPU_L3_ALLOC_BIT BIT(36)
bool gv11b_mm_is_bar1_supported(struct gk20a *g)
{
return false;
}
void gv11b_init_inst_block(struct nvgpu_mem *inst_block,
struct vm_gk20a *vm, u32 big_page_size)
{
struct gk20a *g = gk20a_from_vm(vm);
gk20a_dbg_info("inst block phys = 0x%llx, kv = 0x%p",
nvgpu_inst_block_addr(g, inst_block), inst_block->cpu_va);
g->ops.mm.init_pdb(g, inst_block, vm);
if (big_page_size && g->ops.mm.set_big_page_size)
g->ops.mm.set_big_page_size(g, inst_block, big_page_size);
}
bool gv11b_mm_mmu_fault_pending(struct gk20a *g)
{
return gv11b_fb_mmu_fault_pending(g);
}
void gv11b_mm_fault_info_mem_destroy(struct gk20a *g)
{
nvgpu_log_fn(g, " ");
nvgpu_mutex_acquire(&g->mm.hub_isr_mutex);
gv11b_fb_disable_hub_intr(g, STALL_REG_INDEX, HUB_INTR_TYPE_OTHER |
HUB_INTR_TYPE_NONREPLAY | HUB_INTR_TYPE_REPLAY);
nvgpu_kfree(g, g->mm.fault_info[FAULT_TYPE_OTHER_AND_NONREPLAY]);
g->mm.fault_info[FAULT_TYPE_OTHER_AND_NONREPLAY] = NULL;
g->mm.fault_info[FAULT_TYPE_REPLAY] = NULL;
nvgpu_mutex_release(&g->mm.hub_isr_mutex);
nvgpu_mutex_destroy(&g->mm.hub_isr_mutex);
}
static int gv11b_mm_mmu_fault_info_buf_init(struct gk20a *g,
u32 *hub_intr_types)
{
struct mmu_fault_info *fault_info_mem;
fault_info_mem = nvgpu_kzalloc(g, sizeof(struct mmu_fault_info) *
FAULT_TYPE_NUM);
if (!fault_info_mem) {
nvgpu_log_info(g, "failed to alloc shadow fault info");
return -ENOMEM;
}
/* shadow buffer for copying mmu fault info */
g->mm.fault_info[FAULT_TYPE_OTHER_AND_NONREPLAY] =
&fault_info_mem[FAULT_TYPE_OTHER_AND_NONREPLAY];
g->mm.fault_info[FAULT_TYPE_REPLAY] =
&fault_info_mem[FAULT_TYPE_REPLAY];
*hub_intr_types |= HUB_INTR_TYPE_OTHER;
return 0;
}
static void gv11b_mm_mmu_hw_fault_buf_init(struct gk20a *g,
u32 *hub_intr_types)
{
struct vm_gk20a *vm = g->mm.bar2.vm;
int err = 0;
size_t fb_size;
/* Max entries take care of 1 entry used for full detection */
fb_size = (g->ops.fifo.get_num_fifos(g) + 1) *
gmmu_fault_buf_size_v();
err = nvgpu_dma_alloc_map_sys(vm, fb_size,
&g->mm.hw_fault_buf[FAULT_TYPE_OTHER_AND_NONREPLAY]);
if (err) {
nvgpu_err(g,
"Error in hw mmu fault buf [0] alloc in bar2 vm ");
/* Fault will be snapped in pri reg but not in buffer */
return;
}
g->mm.hw_fault_buf_status[NONREPLAY_REG_INDEX] =
HW_FAULT_BUF_STATUS_ALLOC_TRUE;
*hub_intr_types |= HUB_INTR_TYPE_NONREPLAY;
err = nvgpu_dma_alloc_map_sys(vm, fb_size,
&g->mm.hw_fault_buf[FAULT_TYPE_REPLAY]);
if (err) {
nvgpu_err(g,
"Error in hw mmu fault buf [1] alloc in bar2 vm ");
/* Fault will be snapped in pri reg but not in buffer */
return;
}
g->mm.hw_fault_buf_status[REPLAY_REG_INDEX] =
HW_FAULT_BUF_STATUS_ALLOC_TRUE;
*hub_intr_types |= HUB_INTR_TYPE_REPLAY;
}
static void gv11b_mm_mmu_hw_fault_buf_deinit(struct gk20a *g)
{
struct vm_gk20a *vm = g->mm.bar2.vm;
nvgpu_log_fn(g, " ");
gv11b_fb_disable_hub_intr(g, STALL_REG_INDEX, HUB_INTR_TYPE_NONREPLAY |
HUB_INTR_TYPE_REPLAY);
g->mm.hub_intr_types &= (~(HUB_INTR_TYPE_NONREPLAY |
HUB_INTR_TYPE_REPLAY));
if ((gv11b_fb_is_fault_buf_enabled(g, NONREPLAY_REG_INDEX))) {
gv11b_fb_fault_buf_set_state_hw(g, NONREPLAY_REG_INDEX,
FAULT_BUF_DISABLED);
}
if ((gv11b_fb_is_fault_buf_enabled(g, REPLAY_REG_INDEX))) {
gv11b_fb_fault_buf_set_state_hw(g, REPLAY_REG_INDEX,
FAULT_BUF_DISABLED);
}
if (g->mm.hw_fault_buf_status[NONREPLAY_REG_INDEX] ==
HW_FAULT_BUF_STATUS_ALLOC_TRUE) {
nvgpu_dma_unmap_free(vm,
&g->mm.hw_fault_buf[FAULT_TYPE_OTHER_AND_NONREPLAY]);
g->mm.hw_fault_buf_status[NONREPLAY_REG_INDEX] =
HW_FAULT_BUF_STATUS_ALLOC_FALSE;
}
if (g->mm.hw_fault_buf_status[REPLAY_REG_INDEX] ==
HW_FAULT_BUF_STATUS_ALLOC_TRUE) {
nvgpu_dma_unmap_free(vm,
&g->mm.hw_fault_buf[FAULT_TYPE_REPLAY]);
g->mm.hw_fault_buf_status[REPLAY_REG_INDEX] =
HW_FAULT_BUF_STATUS_ALLOC_FALSE;
}
}
void gv11b_mm_remove_bar2_vm(struct gk20a *g)
{
struct mm_gk20a *mm = &g->mm;
nvgpu_log_fn(g, " ");
gv11b_mm_mmu_hw_fault_buf_deinit(g);
nvgpu_free_inst_block(g, &mm->bar2.inst_block);
nvgpu_vm_put(mm->bar2.vm);
}
static void gv11b_mm_mmu_fault_setup_hw(struct gk20a *g)
{
if (g->mm.hw_fault_buf_status[NONREPLAY_REG_INDEX] ==
HW_FAULT_BUF_STATUS_ALLOC_TRUE) {
gv11b_fb_fault_buf_configure_hw(g, NONREPLAY_REG_INDEX);
}
if (g->mm.hw_fault_buf_status[REPLAY_REG_INDEX] ==
HW_FAULT_BUF_STATUS_ALLOC_TRUE) {
gv11b_fb_fault_buf_configure_hw(g, REPLAY_REG_INDEX);
}
}
static int gv11b_mm_mmu_fault_setup_sw(struct gk20a *g)
{
int err;
nvgpu_log_fn(g, " ");
nvgpu_mutex_init(&g->mm.hub_isr_mutex);
g->mm.hw_fault_buf_status[NONREPLAY_REG_INDEX] =
HW_FAULT_BUF_STATUS_ALLOC_FALSE;
g->mm.hw_fault_buf_status[REPLAY_REG_INDEX] =
HW_FAULT_BUF_STATUS_ALLOC_FALSE;
g->mm.hub_intr_types = HUB_INTR_TYPE_ECC_UNCORRECTED;
err = gv11b_mm_mmu_fault_info_buf_init(g, &g->mm.hub_intr_types);
if (!err)
gv11b_mm_mmu_hw_fault_buf_init(g, &g->mm.hub_intr_types);
return err;
}
int gv11b_init_mm_setup_hw(struct gk20a *g)
{
int err = 0;
nvgpu_log_fn(g, " ");
g->ops.fb.set_mmu_page_size(g);
g->ops.fb.init_hw(g);
err = g->ops.mm.init_bar2_mm_hw_setup(g);
if (err)
return err;
if (gk20a_mm_fb_flush(g) || gk20a_mm_fb_flush(g))
return -EBUSY;
err = gv11b_mm_mmu_fault_setup_sw(g);
if (!err)
gv11b_mm_mmu_fault_setup_hw(g);
nvgpu_log_fn(g, "end");
return err;
}
void gv11b_mm_l2_flush(struct gk20a *g, bool invalidate)
{
nvgpu_log(g, gpu_dbg_fn, "gv11b_mm_l2_flush");
g->ops.mm.fb_flush(g);
gk20a_mm_l2_flush(g, invalidate);
g->ops.mm.fb_flush(g);
}
/*
* On Volta the GPU determines whether to do L3 allocation for a mapping by
* checking bit 36 of the phsyical address. So if a mapping should allocte lines
* in the L3 this bit must be set.
*/
u64 gv11b_gpu_phys_addr(struct gk20a *g,
struct nvgpu_gmmu_attrs *attrs, u64 phys)
{
if (attrs && attrs->t19x_attrs.l3_alloc)
return phys | NVGPU_L3_ALLOC_BIT;
return phys;
}
int gv11b_init_bar2_mm_hw_setup(struct gk20a *g)
{
struct mm_gk20a *mm = &g->mm;
struct nvgpu_mem *inst_block = &mm->bar2.inst_block;
u64 inst_pa = nvgpu_inst_block_addr(g, inst_block);
u32 reg_val;
struct nvgpu_timeout timeout;
u32 delay = GR_IDLE_CHECK_DEFAULT;
nvgpu_log_fn(g, " ");
g->ops.fb.set_mmu_page_size(g);
inst_pa = (u32)(inst_pa >> bus_bar2_block_ptr_shift_v());
nvgpu_log_info(g, "bar2 inst block ptr: 0x%08x", (u32)inst_pa);
gk20a_writel(g, bus_bar2_block_r(),
nvgpu_aperture_mask(g, inst_block,
bus_bar2_block_target_sys_mem_ncoh_f(),
bus_bar2_block_target_vid_mem_f()) |
bus_bar2_block_mode_virtual_f() |
bus_bar2_block_ptr_f(inst_pa));
/* This is needed as BAR1 support is removed and there is no way
* to know if gpu successfully accessed memory.
* To avoid deadlocks and non-deterministic virtual address translation
* behavior, after writing BAR2_BLOCK to bind BAR2 to a virtual address
* space, SW must ensure that the bind has completed prior to issuing
* any further BAR2 requests by polling for both
* BUS_BIND_STATUS_BAR2_PENDING to return to EMPTY and
* BUS_BIND_STATUS_BAR2_OUTSTANDING to return to FALSE
*/
nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g),
NVGPU_TIMER_CPU_TIMER);
nvgpu_log_info(g, "check bar2 bind status");
do {
reg_val = gk20a_readl(g, bus_bind_status_r());
if (!((reg_val & bus_bind_status_bar2_pending_busy_f()) ||
(reg_val & bus_bind_status_bar2_outstanding_true_f())))
return 0;
nvgpu_usleep_range(delay, delay * 2);
delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX);
} while (!nvgpu_timeout_expired_msg(&timeout, "bar2 bind timedout"));
nvgpu_err(g, "bar2 bind failed. gpu unable to access memory");
return -EBUSY;
}

View File

@@ -0,0 +1,46 @@
/*
* GV11B MM
* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef MM_GV11B_H
#define MM_GV11B_H
#define HW_FAULT_BUF_STATUS_ALLOC_TRUE 1
#define HW_FAULT_BUF_STATUS_ALLOC_FALSE 0
struct gk20a;
struct nvgpu_mem;
struct vm_gk20a;
bool gv11b_mm_is_bar1_supported(struct gk20a *g);
void gv11b_init_inst_block(struct nvgpu_mem *inst_block,
struct vm_gk20a *vm, u32 big_page_size);
bool gv11b_mm_mmu_fault_pending(struct gk20a *g);
void gv11b_mm_remove_bar2_vm(struct gk20a *g);
int gv11b_init_mm_setup_hw(struct gk20a *g);
int gv11b_init_bar2_mm_hw_setup(struct gk20a *g);
void gv11b_mm_l2_flush(struct gk20a *g, bool invalidate);
u64 gv11b_gpu_phys_addr(struct gk20a *g,
struct nvgpu_gmmu_attrs *attrs, u64 phys);
void gv11b_mm_fault_info_mem_destroy(struct gk20a *g);
#endif

View File

@@ -0,0 +1,549 @@
/*
* GV11B Tegra Platform Interface
*
* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <linux/of_platform.h>
#include <linux/debugfs.h>
#include <linux/dma-buf.h>
#include <linux/nvmap.h>
#include <linux/reset.h>
#include <linux/hashtable.h>
#include <linux/clk.h>
#include <nvgpu/nvhost.h>
#include <nvgpu/nvhost_t19x.h>
#include <uapi/linux/nvgpu.h>
#include <soc/tegra/tegra_bpmp.h>
#include <soc/tegra/tegra_powergate.h>
#include "gk20a/gk20a.h"
#include "common/linux/platform_gk20a.h"
#include "common/linux/clk.h"
#include "gp10b/platform_gp10b.h"
#include "common/linux/platform_gp10b_tegra.h"
#include "common/linux/os_linux.h"
#include "common/linux/platform_gk20a_tegra.h"
#include "gr_gv11b.h"
#include "nvgpu_gpuid_t19x.h"
static void gr_gv11b_remove_sysfs(struct device *dev);
static int gv11b_tegra_probe(struct device *dev)
{
struct gk20a_platform *platform = dev_get_drvdata(dev);
#ifdef CONFIG_TEGRA_GK20A_NVHOST
struct gk20a *g = platform->g;
int err = 0;
err = nvgpu_get_nvhost_dev(g);
if (err) {
dev_err(dev, "host1x device not available");
return err;
}
err = nvgpu_nvhost_syncpt_unit_interface_get_aperture(
g->nvhost_dev,
&g->syncpt_unit_base,
&g->syncpt_unit_size);
if (err) {
dev_err(dev, "Failed to get syncpt interface");
return -ENOSYS;
}
g->syncpt_size = nvgpu_nvhost_syncpt_unit_interface_get_byte_offset(1);
gk20a_dbg_info("syncpt_unit_base %llx syncpt_unit_size %zx size %x\n",
g->syncpt_unit_base, g->syncpt_unit_size,
g->syncpt_size);
#endif
platform->bypass_smmu = !device_is_iommuable(dev);
platform->disable_bigpage = platform->bypass_smmu;
platform->g->gr.t18x.ctx_vars.dump_ctxsw_stats_on_channel_close
= false;
platform->g->gr.t18x.ctx_vars.dump_ctxsw_stats_on_channel_close
= false;
platform->g->gr.t18x.ctx_vars.force_preemption_gfxp = false;
platform->g->gr.t18x.ctx_vars.force_preemption_cilp = false;
gp10b_tegra_get_clocks(dev);
nvgpu_linux_init_clk_support(platform->g);
return 0;
}
static int gv11b_tegra_remove(struct device *dev)
{
gp10b_tegra_remove(dev);
gr_gv11b_remove_sysfs(dev);
return 0;
}
static bool gv11b_tegra_is_railgated(struct device *dev)
{
bool ret = false;
#ifdef TEGRA194_POWER_DOMAIN_GPU
struct gk20a *g = get_gk20a(dev);
if (tegra_bpmp_running()) {
nvgpu_log(g, gpu_dbg_info, "bpmp running");
ret = !tegra_powergate_is_powered(TEGRA194_POWER_DOMAIN_GPU);
nvgpu_log(g, gpu_dbg_info, "railgated? %s", ret ? "yes" : "no");
} else {
nvgpu_log(g, gpu_dbg_info, "bpmp not running");
}
#endif
return ret;
}
static int gv11b_tegra_railgate(struct device *dev)
{
#ifdef TEGRA194_POWER_DOMAIN_GPU
struct gk20a_platform *platform = gk20a_get_platform(dev);
struct gk20a *g = get_gk20a(dev);
int i;
if (tegra_bpmp_running()) {
nvgpu_log(g, gpu_dbg_info, "bpmp running");
if (!tegra_powergate_is_powered(TEGRA194_POWER_DOMAIN_GPU)) {
nvgpu_log(g, gpu_dbg_info, "powergate is not powered");
return 0;
}
nvgpu_log(g, gpu_dbg_info, "clk_disable_unprepare");
for (i = 0; i < platform->num_clks; i++) {
if (platform->clk[i])
clk_disable_unprepare(platform->clk[i]);
}
nvgpu_log(g, gpu_dbg_info, "powergate_partition");
tegra_powergate_partition(TEGRA194_POWER_DOMAIN_GPU);
} else {
nvgpu_log(g, gpu_dbg_info, "bpmp not running");
}
#endif
return 0;
}
static int gv11b_tegra_unrailgate(struct device *dev)
{
int ret = 0;
#ifdef TEGRA194_POWER_DOMAIN_GPU
struct gk20a_platform *platform = gk20a_get_platform(dev);
struct gk20a *g = get_gk20a(dev);
int i;
if (tegra_bpmp_running()) {
nvgpu_log(g, gpu_dbg_info, "bpmp running");
ret = tegra_unpowergate_partition(TEGRA194_POWER_DOMAIN_GPU);
if (ret) {
nvgpu_log(g, gpu_dbg_info,
"unpowergate partition failed");
return ret;
}
nvgpu_log(g, gpu_dbg_info, "clk_prepare_enable");
for (i = 0; i < platform->num_clks; i++) {
if (platform->clk[i])
clk_prepare_enable(platform->clk[i]);
}
} else {
nvgpu_log(g, gpu_dbg_info, "bpmp not running");
}
#endif
return ret;
}
static int gv11b_tegra_suspend(struct device *dev)
{
return 0;
}
struct gk20a_platform t19x_gpu_tegra_platform = {
.has_syncpoints = true,
/* power management configuration */
/* ptimer src frequency in hz*/
.ptimer_src_freq = 31250000,
.probe = gv11b_tegra_probe,
.remove = gv11b_tegra_remove,
.enable_slcg = false,
.enable_blcg = false,
.enable_elcg = false,
.can_slcg = false,
.can_blcg = false,
.can_elcg = false,
/* power management callbacks */
.suspend = gv11b_tegra_suspend,
.railgate = gv11b_tegra_railgate,
.unrailgate = gv11b_tegra_unrailgate,
.is_railgated = gv11b_tegra_is_railgated,
.busy = gk20a_tegra_busy,
.idle = gk20a_tegra_idle,
.dump_platform_dependencies = gk20a_tegra_debug_dump,
.soc_name = "tegra19x",
.honors_aperture = true,
.unified_memory = true,
.reset_assert = gp10b_tegra_reset_assert,
.reset_deassert = gp10b_tegra_reset_deassert,
};
static struct device_attribute *dev_attr_sm_l1_tag_ecc_corrected_err_count_array;
static struct device_attribute *dev_attr_sm_l1_tag_ecc_uncorrected_err_count_array;
static struct device_attribute *dev_attr_sm_cbu_ecc_corrected_err_count_array;
static struct device_attribute *dev_attr_sm_cbu_ecc_uncorrected_err_count_array;
static struct device_attribute *dev_attr_sm_l1_data_ecc_corrected_err_count_array;
static struct device_attribute *dev_attr_sm_l1_data_ecc_uncorrected_err_count_array;
static struct device_attribute *dev_attr_sm_icache_ecc_corrected_err_count_array;
static struct device_attribute *dev_attr_sm_icache_ecc_uncorrected_err_count_array;
static struct device_attribute *dev_attr_gcc_l15_ecc_corrected_err_count_array;
static struct device_attribute *dev_attr_gcc_l15_ecc_uncorrected_err_count_array;
static struct device_attribute *dev_attr_mmu_l1tlb_ecc_corrected_err_count_array;
static struct device_attribute *dev_attr_mmu_l1tlb_ecc_uncorrected_err_count_array;
static struct device_attribute *dev_attr_fecs_ecc_corrected_err_count_array;
static struct device_attribute *dev_attr_fecs_ecc_uncorrected_err_count_array;
static struct device_attribute *dev_attr_gpccs_ecc_corrected_err_count_array;
static struct device_attribute *dev_attr_gpccs_ecc_uncorrected_err_count_array;
static struct device_attribute *dev_attr_l2_cache_ecc_corrected_err_count_array;
static struct device_attribute *dev_attr_l2_cache_ecc_uncorrected_err_count_array;
static struct device_attribute *dev_attr_mmu_l2tlb_ecc_corrected_err_count_array;
static struct device_attribute *dev_attr_mmu_l2tlb_ecc_uncorrected_err_count_array;
static struct device_attribute *dev_attr_mmu_hubtlb_ecc_corrected_err_count_array;
static struct device_attribute *dev_attr_mmu_hubtlb_ecc_uncorrected_err_count_array;
static struct device_attribute *dev_attr_mmu_fillunit_ecc_corrected_err_count_array;
static struct device_attribute *dev_attr_mmu_fillunit_ecc_uncorrected_err_count_array;
void gr_gv11b_create_sysfs(struct gk20a *g)
{
struct device *dev = dev_from_gk20a(g);
int error = 0;
/* This stat creation function is called on GR init. GR can get
initialized multiple times but we only need to create the ECC
stats once. Therefore, add the following check to avoid
creating duplicate stat sysfs nodes. */
if (g->ecc.gr.t19x.sm_l1_tag_corrected_err_count.counters != NULL)
return;
gr_gp10b_create_sysfs(g);
error |= gr_gp10b_ecc_stat_create(dev,
0,
"sm_l1_tag_ecc_corrected_err_count",
&g->ecc.gr.t19x.sm_l1_tag_corrected_err_count,
&dev_attr_sm_l1_tag_ecc_corrected_err_count_array);
error |= gr_gp10b_ecc_stat_create(dev,
0,
"sm_l1_tag_ecc_uncorrected_err_count",
&g->ecc.gr.t19x.sm_l1_tag_uncorrected_err_count,
&dev_attr_sm_l1_tag_ecc_uncorrected_err_count_array);
error |= gr_gp10b_ecc_stat_create(dev,
0,
"sm_cbu_ecc_corrected_err_count",
&g->ecc.gr.t19x.sm_cbu_corrected_err_count,
&dev_attr_sm_cbu_ecc_corrected_err_count_array);
error |= gr_gp10b_ecc_stat_create(dev,
0,
"sm_cbu_ecc_uncorrected_err_count",
&g->ecc.gr.t19x.sm_cbu_uncorrected_err_count,
&dev_attr_sm_cbu_ecc_uncorrected_err_count_array);
error |= gr_gp10b_ecc_stat_create(dev,
0,
"sm_l1_data_ecc_corrected_err_count",
&g->ecc.gr.t19x.sm_l1_data_corrected_err_count,
&dev_attr_sm_l1_data_ecc_corrected_err_count_array);
error |= gr_gp10b_ecc_stat_create(dev,
0,
"sm_l1_data_ecc_uncorrected_err_count",
&g->ecc.gr.t19x.sm_l1_data_uncorrected_err_count,
&dev_attr_sm_l1_data_ecc_uncorrected_err_count_array);
error |= gr_gp10b_ecc_stat_create(dev,
0,
"sm_icache_ecc_corrected_err_count",
&g->ecc.gr.t19x.sm_icache_corrected_err_count,
&dev_attr_sm_icache_ecc_corrected_err_count_array);
error |= gr_gp10b_ecc_stat_create(dev,
0,
"sm_icache_ecc_uncorrected_err_count",
&g->ecc.gr.t19x.sm_icache_uncorrected_err_count,
&dev_attr_sm_icache_ecc_uncorrected_err_count_array);
error |= gr_gp10b_ecc_stat_create(dev,
0,
"gcc_l15_ecc_corrected_err_count",
&g->ecc.gr.t19x.gcc_l15_corrected_err_count,
&dev_attr_gcc_l15_ecc_corrected_err_count_array);
error |= gr_gp10b_ecc_stat_create(dev,
0,
"gcc_l15_ecc_uncorrected_err_count",
&g->ecc.gr.t19x.gcc_l15_uncorrected_err_count,
&dev_attr_gcc_l15_ecc_uncorrected_err_count_array);
error |= gp10b_ecc_stat_create(dev,
g->ltc_count,
"ltc",
"l2_cache_uncorrected_err_count",
&g->ecc.ltc.t19x.l2_cache_uncorrected_err_count,
&dev_attr_l2_cache_ecc_uncorrected_err_count_array);
error |= gp10b_ecc_stat_create(dev,
g->ltc_count,
"ltc",
"l2_cache_corrected_err_count",
&g->ecc.ltc.t19x.l2_cache_corrected_err_count,
&dev_attr_l2_cache_ecc_corrected_err_count_array);
error |= gp10b_ecc_stat_create(dev,
1,
"gpc",
"fecs_ecc_uncorrected_err_count",
&g->ecc.gr.t19x.fecs_uncorrected_err_count,
&dev_attr_fecs_ecc_uncorrected_err_count_array);
error |= gp10b_ecc_stat_create(dev,
1,
"gpc",
"fecs_ecc_corrected_err_count",
&g->ecc.gr.t19x.fecs_corrected_err_count,
&dev_attr_fecs_ecc_corrected_err_count_array);
error |= gp10b_ecc_stat_create(dev,
g->gr.gpc_count,
"gpc",
"gpccs_ecc_uncorrected_err_count",
&g->ecc.gr.t19x.gpccs_uncorrected_err_count,
&dev_attr_gpccs_ecc_uncorrected_err_count_array);
error |= gp10b_ecc_stat_create(dev,
g->gr.gpc_count,
"gpc",
"gpccs_ecc_corrected_err_count",
&g->ecc.gr.t19x.gpccs_corrected_err_count,
&dev_attr_gpccs_ecc_corrected_err_count_array);
error |= gp10b_ecc_stat_create(dev,
g->gr.gpc_count,
"gpc",
"mmu_l1tlb_ecc_uncorrected_err_count",
&g->ecc.gr.t19x.mmu_l1tlb_uncorrected_err_count,
&dev_attr_mmu_l1tlb_ecc_uncorrected_err_count_array);
error |= gp10b_ecc_stat_create(dev,
g->gr.gpc_count,
"gpc",
"mmu_l1tlb_ecc_corrected_err_count",
&g->ecc.gr.t19x.mmu_l1tlb_corrected_err_count,
&dev_attr_mmu_l1tlb_ecc_corrected_err_count_array);
error |= gp10b_ecc_stat_create(dev,
1,
"eng",
"mmu_l2tlb_ecc_uncorrected_err_count",
&g->ecc.eng.t19x.mmu_l2tlb_uncorrected_err_count,
&dev_attr_mmu_l2tlb_ecc_uncorrected_err_count_array);
error |= gp10b_ecc_stat_create(dev,
1,
"eng",
"mmu_l2tlb_ecc_corrected_err_count",
&g->ecc.eng.t19x.mmu_l2tlb_corrected_err_count,
&dev_attr_mmu_l2tlb_ecc_corrected_err_count_array);
error |= gp10b_ecc_stat_create(dev,
1,
"eng",
"mmu_hubtlb_ecc_uncorrected_err_count",
&g->ecc.eng.t19x.mmu_hubtlb_uncorrected_err_count,
&dev_attr_mmu_hubtlb_ecc_uncorrected_err_count_array);
error |= gp10b_ecc_stat_create(dev,
1,
"eng",
"mmu_hubtlb_ecc_corrected_err_count",
&g->ecc.eng.t19x.mmu_hubtlb_corrected_err_count,
&dev_attr_mmu_hubtlb_ecc_corrected_err_count_array);
error |= gp10b_ecc_stat_create(dev,
1,
"eng",
"mmu_fillunit_ecc_uncorrected_err_count",
&g->ecc.eng.t19x.mmu_fillunit_uncorrected_err_count,
&dev_attr_mmu_fillunit_ecc_uncorrected_err_count_array);
error |= gp10b_ecc_stat_create(dev,
1,
"eng",
"mmu_fillunit_ecc_corrected_err_count",
&g->ecc.eng.t19x.mmu_fillunit_corrected_err_count,
&dev_attr_mmu_fillunit_ecc_corrected_err_count_array);
if (error)
dev_err(dev, "Failed to create gv11b sysfs attributes!\n");
}
static void gr_gv11b_remove_sysfs(struct device *dev)
{
struct gk20a *g = get_gk20a(dev);
gr_gp10b_ecc_stat_remove(dev,
0,
&g->ecc.gr.t19x.sm_l1_tag_corrected_err_count,
dev_attr_sm_l1_tag_ecc_corrected_err_count_array);
gr_gp10b_ecc_stat_remove(dev,
0,
&g->ecc.gr.t19x.sm_l1_tag_uncorrected_err_count,
dev_attr_sm_l1_tag_ecc_uncorrected_err_count_array);
gr_gp10b_ecc_stat_remove(dev,
0,
&g->ecc.gr.t19x.sm_cbu_corrected_err_count,
dev_attr_sm_cbu_ecc_corrected_err_count_array);
gr_gp10b_ecc_stat_remove(dev,
0,
&g->ecc.gr.t19x.sm_cbu_uncorrected_err_count,
dev_attr_sm_cbu_ecc_uncorrected_err_count_array);
gr_gp10b_ecc_stat_remove(dev,
0,
&g->ecc.gr.t19x.sm_l1_data_corrected_err_count,
dev_attr_sm_l1_data_ecc_corrected_err_count_array);
gr_gp10b_ecc_stat_remove(dev,
0,
&g->ecc.gr.t19x.sm_l1_data_uncorrected_err_count,
dev_attr_sm_l1_data_ecc_uncorrected_err_count_array);
gr_gp10b_ecc_stat_remove(dev,
0,
&g->ecc.gr.t19x.sm_icache_corrected_err_count,
dev_attr_sm_icache_ecc_corrected_err_count_array);
gr_gp10b_ecc_stat_remove(dev,
0,
&g->ecc.gr.t19x.sm_icache_uncorrected_err_count,
dev_attr_sm_icache_ecc_uncorrected_err_count_array);
gr_gp10b_ecc_stat_remove(dev,
0,
&g->ecc.gr.t19x.gcc_l15_corrected_err_count,
dev_attr_gcc_l15_ecc_corrected_err_count_array);
gr_gp10b_ecc_stat_remove(dev,
0,
&g->ecc.gr.t19x.gcc_l15_uncorrected_err_count,
dev_attr_gcc_l15_ecc_uncorrected_err_count_array);
gp10b_ecc_stat_remove(dev,
g->ltc_count,
&g->ecc.ltc.t19x.l2_cache_uncorrected_err_count,
dev_attr_l2_cache_ecc_uncorrected_err_count_array);
gp10b_ecc_stat_remove(dev,
g->ltc_count,
&g->ecc.ltc.t19x.l2_cache_corrected_err_count,
dev_attr_l2_cache_ecc_corrected_err_count_array);
gp10b_ecc_stat_remove(dev,
1,
&g->ecc.gr.t19x.fecs_uncorrected_err_count,
dev_attr_fecs_ecc_uncorrected_err_count_array);
gp10b_ecc_stat_remove(dev,
1,
&g->ecc.gr.t19x.fecs_corrected_err_count,
dev_attr_fecs_ecc_corrected_err_count_array);
gp10b_ecc_stat_remove(dev,
g->gr.gpc_count,
&g->ecc.gr.t19x.gpccs_uncorrected_err_count,
dev_attr_gpccs_ecc_uncorrected_err_count_array);
gp10b_ecc_stat_remove(dev,
g->gr.gpc_count,
&g->ecc.gr.t19x.gpccs_corrected_err_count,
dev_attr_gpccs_ecc_corrected_err_count_array);
gp10b_ecc_stat_remove(dev,
g->gr.gpc_count,
&g->ecc.gr.t19x.mmu_l1tlb_uncorrected_err_count,
dev_attr_mmu_l1tlb_ecc_uncorrected_err_count_array);
gp10b_ecc_stat_remove(dev,
g->gr.gpc_count,
&g->ecc.gr.t19x.mmu_l1tlb_corrected_err_count,
dev_attr_mmu_l1tlb_ecc_corrected_err_count_array);
gp10b_ecc_stat_remove(dev,
1,
&g->ecc.eng.t19x.mmu_l2tlb_uncorrected_err_count,
dev_attr_mmu_l2tlb_ecc_uncorrected_err_count_array);
gp10b_ecc_stat_remove(dev,
1,
&g->ecc.eng.t19x.mmu_l2tlb_corrected_err_count,
dev_attr_mmu_l2tlb_ecc_corrected_err_count_array);
gp10b_ecc_stat_remove(dev,
1,
&g->ecc.eng.t19x.mmu_hubtlb_uncorrected_err_count,
dev_attr_mmu_hubtlb_ecc_uncorrected_err_count_array);
gp10b_ecc_stat_remove(dev,
1,
&g->ecc.eng.t19x.mmu_hubtlb_corrected_err_count,
dev_attr_mmu_hubtlb_ecc_corrected_err_count_array);
gp10b_ecc_stat_remove(dev,
1,
&g->ecc.eng.t19x.mmu_fillunit_uncorrected_err_count,
dev_attr_mmu_fillunit_ecc_uncorrected_err_count_array);
gp10b_ecc_stat_remove(dev,
1,
&g->ecc.eng.t19x.mmu_fillunit_corrected_err_count,
dev_attr_mmu_fillunit_ecc_corrected_err_count_array);
}

View File

@@ -0,0 +1,283 @@
/*
* GV11B PMU
*
* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <linux/delay.h> /* for udelay */
#include <linux/clk.h>
#include <soc/tegra/fuse.h>
#include <nvgpu/pmu.h>
#include <nvgpu/falcon.h>
#include <nvgpu/enabled.h>
#include <nvgpu/mm.h>
#include "gk20a/gk20a.h"
#include "gp10b/pmu_gp10b.h"
#include "gp106/pmu_gp106.h"
#include "pmu_gv11b.h"
#include "acr_gv11b.h"
#include <nvgpu/hw/gv11b/hw_pwr_gv11b.h>
#define gv11b_dbg_pmu(fmt, arg...) \
gk20a_dbg(gpu_dbg_pmu, fmt, ##arg)
#define ALIGN_4KB 12
bool gv11b_is_pmu_supported(struct gk20a *g)
{
return true;
}
bool gv11b_is_lazy_bootstrap(u32 falcon_id)
{
bool enable_status = false;
switch (falcon_id) {
case LSF_FALCON_ID_FECS:
enable_status = true;
break;
case LSF_FALCON_ID_GPCCS:
enable_status = true;
break;
default:
break;
}
return enable_status;
}
bool gv11b_is_priv_load(u32 falcon_id)
{
bool enable_status = false;
switch (falcon_id) {
case LSF_FALCON_ID_FECS:
enable_status = true;
break;
case LSF_FALCON_ID_GPCCS:
enable_status = true;
break;
default:
break;
}
return enable_status;
}
int gv11b_pmu_bootstrap(struct nvgpu_pmu *pmu)
{
struct gk20a *g = gk20a_from_pmu(pmu);
struct mm_gk20a *mm = &g->mm;
struct pmu_ucode_desc *desc = pmu->desc;
u64 addr_code_lo, addr_data_lo, addr_load_lo;
u64 addr_code_hi, addr_data_hi, addr_load_hi;
u32 i, blocks, addr_args;
gk20a_dbg_fn("");
gk20a_writel(g, pwr_falcon_itfen_r(),
gk20a_readl(g, pwr_falcon_itfen_r()) |
pwr_falcon_itfen_ctxen_enable_f());
gk20a_writel(g, pwr_pmu_new_instblk_r(),
pwr_pmu_new_instblk_ptr_f(
nvgpu_inst_block_addr(g, &mm->pmu.inst_block) >> ALIGN_4KB)
| pwr_pmu_new_instblk_valid_f(1)
| pwr_pmu_new_instblk_target_sys_ncoh_f());
/* TBD: load all other surfaces */
g->ops.pmu_ver.set_pmu_cmdline_args_trace_size(
pmu, GK20A_PMU_TRACE_BUFSIZE);
g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_base(pmu);
g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_idx(
pmu, GK20A_PMU_DMAIDX_VIRT);
g->ops.pmu_ver.set_pmu_cmdline_args_cpu_freq(pmu,
g->ops.clk.get_rate(g, CTRL_CLK_DOMAIN_PWRCLK));
addr_args = (pwr_falcon_hwcfg_dmem_size_v(
gk20a_readl(g, pwr_falcon_hwcfg_r()))
<< GK20A_PMU_DMEM_BLKSIZE2) -
g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu);
nvgpu_flcn_copy_to_dmem(pmu->flcn, addr_args,
(u8 *)(g->ops.pmu_ver.get_pmu_cmdline_args_ptr(pmu)),
g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu), 0);
gk20a_writel(g, pwr_falcon_dmemc_r(0),
pwr_falcon_dmemc_offs_f(0) |
pwr_falcon_dmemc_blk_f(0) |
pwr_falcon_dmemc_aincw_f(1));
addr_code_lo = u64_lo32((pmu->ucode.gpu_va +
desc->app_start_offset +
desc->app_resident_code_offset) >> 8);
addr_code_hi = u64_hi32((pmu->ucode.gpu_va +
desc->app_start_offset +
desc->app_resident_code_offset) >> 8);
addr_data_lo = u64_lo32((pmu->ucode.gpu_va +
desc->app_start_offset +
desc->app_resident_data_offset) >> 8);
addr_data_hi = u64_hi32((pmu->ucode.gpu_va +
desc->app_start_offset +
desc->app_resident_data_offset) >> 8);
addr_load_lo = u64_lo32((pmu->ucode.gpu_va +
desc->bootloader_start_offset) >> 8);
addr_load_hi = u64_hi32((pmu->ucode.gpu_va +
desc->bootloader_start_offset) >> 8);
gk20a_writel(g, pwr_falcon_dmemd_r(0), 0x0);
gk20a_writel(g, pwr_falcon_dmemd_r(0), 0x0);
gk20a_writel(g, pwr_falcon_dmemd_r(0), 0x0);
gk20a_writel(g, pwr_falcon_dmemd_r(0), 0x0);
gk20a_writel(g, pwr_falcon_dmemd_r(0), 0x0);
gk20a_writel(g, pwr_falcon_dmemd_r(0), 0x0);
gk20a_writel(g, pwr_falcon_dmemd_r(0), 0x0);
gk20a_writel(g, pwr_falcon_dmemd_r(0), 0x0);
gk20a_writel(g, pwr_falcon_dmemd_r(0), GK20A_PMU_DMAIDX_UCODE);
gk20a_writel(g, pwr_falcon_dmemd_r(0), addr_code_lo << 8);
gk20a_writel(g, pwr_falcon_dmemd_r(0), addr_code_hi);
gk20a_writel(g, pwr_falcon_dmemd_r(0), desc->app_resident_code_offset);
gk20a_writel(g, pwr_falcon_dmemd_r(0), desc->app_resident_code_size);
gk20a_writel(g, pwr_falcon_dmemd_r(0), 0x0);
gk20a_writel(g, pwr_falcon_dmemd_r(0), 0x0);
gk20a_writel(g, pwr_falcon_dmemd_r(0), desc->app_imem_entry);
gk20a_writel(g, pwr_falcon_dmemd_r(0), addr_data_lo << 8);
gk20a_writel(g, pwr_falcon_dmemd_r(0), addr_data_hi);
gk20a_writel(g, pwr_falcon_dmemd_r(0), desc->app_resident_data_size);
gk20a_writel(g, pwr_falcon_dmemd_r(0), 0x1);
gk20a_writel(g, pwr_falcon_dmemd_r(0), addr_args);
g->ops.pmu.write_dmatrfbase(g,
addr_load_lo - (desc->bootloader_imem_offset >> 8));
blocks = ((desc->bootloader_size + 0xFF) & ~0xFF) >> 8;
for (i = 0; i < blocks; i++) {
gk20a_writel(g, pwr_falcon_dmatrfmoffs_r(),
desc->bootloader_imem_offset + (i << 8));
gk20a_writel(g, pwr_falcon_dmatrffboffs_r(),
desc->bootloader_imem_offset + (i << 8));
gk20a_writel(g, pwr_falcon_dmatrfcmd_r(),
pwr_falcon_dmatrfcmd_imem_f(1) |
pwr_falcon_dmatrfcmd_write_f(0) |
pwr_falcon_dmatrfcmd_size_f(6) |
pwr_falcon_dmatrfcmd_ctxdma_f(GK20A_PMU_DMAIDX_UCODE));
}
nvgpu_flcn_bootstrap(pmu->flcn, desc->bootloader_entry_point);
gk20a_writel(g, pwr_falcon_os_r(), desc->app_version);
return 0;
}
static void pmu_handle_pg_sub_feature_msg(struct gk20a *g, struct pmu_msg *msg,
void *param, u32 handle, u32 status)
{
gk20a_dbg_fn("");
if (status != 0) {
nvgpu_err(g, "Sub-feature mask update cmd aborted\n");
return;
}
gv11b_dbg_pmu("sub-feature mask update is acknowledged from PMU %x\n",
msg->msg.pg.msg_type);
}
static void pmu_handle_pg_param_msg(struct gk20a *g, struct pmu_msg *msg,
void *param, u32 handle, u32 status)
{
gk20a_dbg_fn("");
if (status != 0) {
nvgpu_err(g, "GR PARAM cmd aborted\n");
return;
}
gv11b_dbg_pmu("GR PARAM is acknowledged from PMU %x\n",
msg->msg.pg.msg_type);
}
int gv11b_pg_gr_init(struct gk20a *g, u32 pg_engine_id)
{
struct nvgpu_pmu *pmu = &g->pmu;
struct pmu_cmd cmd;
u32 seq;
if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) {
memset(&cmd, 0, sizeof(struct pmu_cmd));
cmd.hdr.unit_id = PMU_UNIT_PG;
cmd.hdr.size = PMU_CMD_HDR_SIZE +
sizeof(struct pmu_pg_cmd_gr_init_param_v1);
cmd.cmd.pg.gr_init_param_v1.cmd_type =
PMU_PG_CMD_ID_PG_PARAM;
cmd.cmd.pg.gr_init_param_v1.sub_cmd_id =
PMU_PG_PARAM_CMD_GR_INIT_PARAM;
cmd.cmd.pg.gr_init_param_v1.featuremask =
PMU_PG_FEATURE_GR_POWER_GATING_ENABLED;
gv11b_dbg_pmu("cmd post PMU_PG_CMD_ID_PG_PARAM_INIT\n");
nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
pmu_handle_pg_param_msg, pmu, &seq, ~0);
} else
return -EINVAL;
return 0;
}
int gv11b_pg_set_subfeature_mask(struct gk20a *g, u32 pg_engine_id)
{
struct nvgpu_pmu *pmu = &g->pmu;
struct pmu_cmd cmd;
u32 seq;
if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) {
memset(&cmd, 0, sizeof(struct pmu_cmd));
cmd.hdr.unit_id = PMU_UNIT_PG;
cmd.hdr.size = PMU_CMD_HDR_SIZE +
sizeof(struct pmu_pg_cmd_sub_feature_mask_update);
cmd.cmd.pg.sf_mask_update.cmd_type =
PMU_PG_CMD_ID_PG_PARAM;
cmd.cmd.pg.sf_mask_update.sub_cmd_id =
PMU_PG_PARAM_CMD_SUB_FEATURE_MASK_UPDATE;
cmd.cmd.pg.sf_mask_update.ctrl_id =
PMU_PG_ELPG_ENGINE_ID_GRAPHICS;
cmd.cmd.pg.sf_mask_update.enabled_mask =
PMU_PG_FEATURE_GR_POWER_GATING_ENABLED;
gv11b_dbg_pmu("cmd post PMU_PG_CMD_SUB_FEATURE_MASK_UPDATE\n");
nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
pmu_handle_pg_sub_feature_msg, pmu, &seq, ~0);
} else
return -EINVAL;
return 0;
}

View File

@@ -0,0 +1,37 @@
/*
* GV11B PMU
*
* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __PMU_GV11B_H_
#define __PMU_GV11B_H_
struct gk20a;
bool gv11b_is_pmu_supported(struct gk20a *g);
int gv11b_pmu_bootstrap(struct nvgpu_pmu *pmu);
int gv11b_pg_gr_init(struct gk20a *g, u32 pg_engine_id);
int gv11b_pg_set_subfeature_mask(struct gk20a *g, u32 pg_engine_id);
bool gv11b_is_lazy_bootstrap(u32 falcon_id);
bool gv11b_is_priv_load(u32 falcon_id);
#endif /*__PMU_GV11B_H_*/

View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,42 @@
/*
*
* Tegra GV11B GPU Driver Register Ops
*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __REGOPS_GV11B_H_
#define __REGOPS_GV11B_H_
const struct regop_offset_range *gv11b_get_global_whitelist_ranges(void);
int gv11b_get_global_whitelist_ranges_count(void);
const struct regop_offset_range *gv11b_get_context_whitelist_ranges(void);
int gv11b_get_context_whitelist_ranges_count(void);
const u32 *gv11b_get_runcontrol_whitelist(void);
int gv11b_get_runcontrol_whitelist_count(void);
const struct regop_offset_range *gv11b_get_runcontrol_whitelist_ranges(void);
int gv11b_get_runcontrol_whitelist_ranges_count(void);
const u32 *gv11b_get_qctl_whitelist(void);
int gv11b_get_qctl_whitelist_count(void);
const struct regop_offset_range *gv11b_get_qctl_whitelist_ranges(void);
int gv11b_get_qctl_whitelist_ranges_count(void);
int gv11b_apply_smpc_war(struct dbg_session_gk20a *dbg_s);
#endif /* __REGOPS_GV11B_H_ */

View File

@@ -0,0 +1,185 @@
/*
* Volta GPU series Subcontext
*
* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "gk20a/gk20a.h"
#include "gv11b/subctx_gv11b.h"
#include <nvgpu/dma.h>
#include <nvgpu/log.h>
#include <nvgpu/gmmu.h>
#include <nvgpu/hw/gv11b/hw_ram_gv11b.h>
#include <nvgpu/hw/gv11b/hw_ctxsw_prog_gv11b.h>
static void gv11b_init_subcontext_pdb(struct channel_gk20a *c,
struct nvgpu_mem *inst_block);
static void gv11b_subctx_commit_valid_mask(struct channel_gk20a *c,
struct nvgpu_mem *inst_block);
static void gv11b_subctx_commit_pdb(struct channel_gk20a *c,
struct nvgpu_mem *inst_block);
void gv11b_free_subctx_header(struct channel_gk20a *c)
{
struct ctx_header_desc *ctx = &c->ch_ctx.ctx_header;
struct gk20a *g = c->g;
nvgpu_log(g, gpu_dbg_fn, "gv11b_free_subctx_header");
if (ctx->mem.gpu_va) {
nvgpu_gmmu_unmap(c->vm, &ctx->mem, ctx->mem.gpu_va);
nvgpu_dma_free(g, &ctx->mem);
}
}
int gv11b_alloc_subctx_header(struct channel_gk20a *c)
{
struct ctx_header_desc *ctx = &c->ch_ctx.ctx_header;
struct gk20a *g = c->g;
int ret = 0;
nvgpu_log(g, gpu_dbg_fn, "gv11b_alloc_subctx_header");
if (ctx->mem.gpu_va == 0) {
ret = nvgpu_dma_alloc_flags_sys(g,
0, /* No Special flags */
ctxsw_prog_fecs_header_v(),
&ctx->mem);
if (ret) {
nvgpu_err(g, "failed to allocate sub ctx header");
return ret;
}
ctx->mem.gpu_va = nvgpu_gmmu_map(c->vm,
&ctx->mem,
ctx->mem.size,
0, /* not GPU-cacheable */
gk20a_mem_flag_none, true,
ctx->mem.aperture);
if (!ctx->mem.gpu_va) {
nvgpu_err(g, "failed to map ctx header");
nvgpu_dma_free(g, &ctx->mem);
return -ENOMEM;
}
/* Now clear the buffer */
if (nvgpu_mem_begin(g, &ctx->mem))
return -ENOMEM;
nvgpu_memset(g, &ctx->mem, 0, 0, ctx->mem.size);
nvgpu_mem_end(g, &ctx->mem);
gv11b_init_subcontext_pdb(c, &c->inst_block);
}
return ret;
}
static void gv11b_init_subcontext_pdb(struct channel_gk20a *c,
struct nvgpu_mem *inst_block)
{
struct gk20a *g = c->g;
gv11b_subctx_commit_pdb(c, inst_block);
gv11b_subctx_commit_valid_mask(c, inst_block);
nvgpu_log(g, gpu_dbg_info, " subctx %d instblk set", c->t19x.subctx_id);
nvgpu_mem_wr32(g, inst_block, ram_in_engine_wfi_veid_w(),
ram_in_engine_wfi_veid_f(c->t19x.subctx_id));
}
int gv11b_update_subctx_header(struct channel_gk20a *c, u64 gpu_va)
{
struct ctx_header_desc *ctx = &c->ch_ctx.ctx_header;
struct nvgpu_mem *gr_mem;
struct gk20a *g = c->g;
int ret = 0;
u32 addr_lo, addr_hi;
addr_lo = u64_lo32(gpu_va);
addr_hi = u64_hi32(gpu_va);
gr_mem = &ctx->mem;
g->ops.mm.l2_flush(g, true);
if (nvgpu_mem_begin(g, gr_mem))
return -ENOMEM;
nvgpu_mem_wr(g, gr_mem,
ctxsw_prog_main_image_context_buffer_ptr_hi_o(), addr_hi);
nvgpu_mem_wr(g, gr_mem,
ctxsw_prog_main_image_context_buffer_ptr_o(), addr_lo);
nvgpu_mem_wr(g, gr_mem,
ctxsw_prog_main_image_ctl_o(),
ctxsw_prog_main_image_ctl_type_per_veid_header_v());
nvgpu_mem_end(g, gr_mem);
return ret;
}
void gv11b_subctx_commit_valid_mask(struct channel_gk20a *c,
struct nvgpu_mem *inst_block)
{
struct gk20a *g = c->g;
/* Make all subctx pdbs valid */
nvgpu_mem_wr32(g, inst_block, 166, 0xffffffff);
nvgpu_mem_wr32(g, inst_block, 167, 0xffffffff);
}
void gv11b_subctx_commit_pdb(struct channel_gk20a *c,
struct nvgpu_mem *inst_block)
{
struct gk20a *g = c->g;
struct fifo_gk20a *f = &g->fifo;
struct vm_gk20a *vm = c->vm;
u32 lo, hi;
u32 subctx_id = 0;
u32 format_word;
u32 pdb_addr_lo, pdb_addr_hi;
u64 pdb_addr;
u32 aperture = nvgpu_aperture_mask(g, vm->pdb.mem,
ram_in_sc_page_dir_base_target_sys_mem_ncoh_v(),
ram_in_sc_page_dir_base_target_vid_mem_v());
pdb_addr = nvgpu_mem_get_addr(g, vm->pdb.mem);
pdb_addr_lo = u64_lo32(pdb_addr >> ram_in_base_shift_v());
pdb_addr_hi = u64_hi32(pdb_addr);
format_word = ram_in_sc_page_dir_base_target_f(
aperture, 0) |
ram_in_sc_page_dir_base_vol_f(
ram_in_sc_page_dir_base_vol_true_v(), 0) |
ram_in_sc_page_dir_base_fault_replay_tex_f(1, 0) |
ram_in_sc_page_dir_base_fault_replay_gcc_f(1, 0) |
ram_in_sc_use_ver2_pt_format_f(1, 0) |
ram_in_sc_big_page_size_f(1, 0) |
ram_in_sc_page_dir_base_lo_0_f(pdb_addr_lo);
nvgpu_log(g, gpu_dbg_info, " pdb info lo %x hi %x",
format_word, pdb_addr_hi);
for (subctx_id = 0; subctx_id < f->t19x.max_subctx_count; subctx_id++) {
lo = ram_in_sc_page_dir_base_vol_0_w() + (4 * subctx_id);
hi = ram_in_sc_page_dir_base_hi_0_w() + (4 * subctx_id);
nvgpu_mem_wr32(g, inst_block, lo, format_word);
nvgpu_mem_wr32(g, inst_block, hi, pdb_addr_hi);
}
}

View File

@@ -0,0 +1,34 @@
/*
*
* Volta GPU series Subcontext
*
* Copyright (c) 2016 - 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __SUBCONTEXT_GV11B_H__
#define __SUBCONTEXT_GV11B_H__
int gv11b_alloc_subctx_header(struct channel_gk20a *c);
void gv11b_free_subctx_header(struct channel_gk20a *c);
int gv11b_update_subctx_header(struct channel_gk20a *c, u64 gpu_va);
#endif /* __SUBCONTEXT_GV11B_H__ */

View File

@@ -0,0 +1,75 @@
/*
* GV11B Therm
*
* Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "gk20a/gk20a.h"
#include <nvgpu/soc.h>
#include <nvgpu/hw/gv11b/hw_therm_gv11b.h>
int gv11b_elcg_init_idle_filters(struct gk20a *g)
{
u32 gate_ctrl, idle_filter;
u32 engine_id;
u32 active_engine_id = 0;
struct fifo_gk20a *f = &g->fifo;
if (nvgpu_platform_is_simulation(g))
return 0;
gk20a_dbg_info("init clock/power gate reg");
for (engine_id = 0; engine_id < f->num_engines; engine_id++) {
active_engine_id = f->active_engines_list[engine_id];
gate_ctrl = gk20a_readl(g, therm_gate_ctrl_r(active_engine_id));
gate_ctrl = set_field(gate_ctrl,
therm_gate_ctrl_eng_idle_filt_exp_m(),
therm_gate_ctrl_eng_idle_filt_exp__prod_f());
gate_ctrl = set_field(gate_ctrl,
therm_gate_ctrl_eng_idle_filt_mant_m(),
therm_gate_ctrl_eng_idle_filt_mant__prod_f());
gate_ctrl = set_field(gate_ctrl,
therm_gate_ctrl_eng_delay_before_m(),
therm_gate_ctrl_eng_delay_before__prod_f());
gate_ctrl = set_field(gate_ctrl,
therm_gate_ctrl_eng_delay_after_m(),
therm_gate_ctrl_eng_delay_after__prod_f());
gk20a_writel(g, therm_gate_ctrl_r(active_engine_id), gate_ctrl);
}
idle_filter = gk20a_readl(g, therm_fecs_idle_filter_r());
idle_filter = set_field(idle_filter,
therm_fecs_idle_filter_value_m(),
therm_fecs_idle_filter_value__prod_f());
gk20a_writel(g, therm_fecs_idle_filter_r(), idle_filter);
idle_filter = gk20a_readl(g, therm_hubmmu_idle_filter_r());
idle_filter = set_field(idle_filter,
therm_hubmmu_idle_filter_value_m(),
therm_hubmmu_idle_filter_value__prod_f());
gk20a_writel(g, therm_hubmmu_idle_filter_r(), idle_filter);
return 0;
}

View File

@@ -0,0 +1,28 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef THERM_GV11B_H
#define THERM_GV11B_H
struct gk20a;
int gv11b_elcg_init_idle_filters(struct gk20a *g);
#endif /* THERM_GV11B_H */

View File

@@ -0,0 +1,29 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVGPU_ENABLED_T19X_H__
#define __NVGPU_ENABLED_T19X_H__
/* subcontexts are available */
#define NVGPU_SUPPORT_TSG_SUBCONTEXTS 63
#endif

View File

@@ -0,0 +1,34 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVGPU_GMMU_T19X_H__
#define __NVGPU_GMMU_T19X_H__
struct nvgpu_gmmu_attrs;
struct nvgpu_gmmu_attrs_t19x {
bool l3_alloc;
};
void nvgpu_gmmu_add_t19x_attrs(struct nvgpu_gmmu_attrs *attrs, u32 flags);
#endif

View File

@@ -0,0 +1,227 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* Function naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
*
* <x>_o(void) : Returns the offset for element <x>.
*
* <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
*
* <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
*
* <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
* and masked to place it at field <y> of register <x>. This value
* can be |'d with others to produce a full register value for
* register <x>.
*
* <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
* value can be ~'d and then &'d to clear the value of field <y> for
* register <x>.
*
* <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
* to place it at field <y> of register <x>. This value can be |'d
* with others to produce a full register value for <x>.
*
* <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
* <x> value 'r' after being shifted to place its LSB at bit 0.
* This value is suitable for direct comparison with other unshifted
* values appropriate for use in field <y> of register <x>.
*
* <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
* field <y> of register <x>. This value is suitable for direct
* comparison with unshifted values appropriate for use in field <y>
* of register <x>.
*/
#ifndef _hw_bus_gv100_h_
#define _hw_bus_gv100_h_
static inline u32 bus_sw_scratch_r(u32 i)
{
return 0x00001580U + i*4U;
}
static inline u32 bus_bar0_window_r(void)
{
return 0x00001700U;
}
static inline u32 bus_bar0_window_base_f(u32 v)
{
return (v & 0xffffffU) << 0U;
}
static inline u32 bus_bar0_window_target_vid_mem_f(void)
{
return 0x0U;
}
static inline u32 bus_bar0_window_target_sys_mem_coherent_f(void)
{
return 0x2000000U;
}
static inline u32 bus_bar0_window_target_sys_mem_noncoherent_f(void)
{
return 0x3000000U;
}
static inline u32 bus_bar0_window_target_bar0_window_base_shift_v(void)
{
return 0x00000010U;
}
static inline u32 bus_bar1_block_r(void)
{
return 0x00001704U;
}
static inline u32 bus_bar1_block_ptr_f(u32 v)
{
return (v & 0xfffffffU) << 0U;
}
static inline u32 bus_bar1_block_target_vid_mem_f(void)
{
return 0x0U;
}
static inline u32 bus_bar1_block_target_sys_mem_coh_f(void)
{
return 0x20000000U;
}
static inline u32 bus_bar1_block_target_sys_mem_ncoh_f(void)
{
return 0x30000000U;
}
static inline u32 bus_bar1_block_mode_virtual_f(void)
{
return 0x80000000U;
}
static inline u32 bus_bar2_block_r(void)
{
return 0x00001714U;
}
static inline u32 bus_bar2_block_ptr_f(u32 v)
{
return (v & 0xfffffffU) << 0U;
}
static inline u32 bus_bar2_block_target_vid_mem_f(void)
{
return 0x0U;
}
static inline u32 bus_bar2_block_target_sys_mem_coh_f(void)
{
return 0x20000000U;
}
static inline u32 bus_bar2_block_target_sys_mem_ncoh_f(void)
{
return 0x30000000U;
}
static inline u32 bus_bar2_block_mode_virtual_f(void)
{
return 0x80000000U;
}
static inline u32 bus_bar1_block_ptr_shift_v(void)
{
return 0x0000000cU;
}
static inline u32 bus_bar2_block_ptr_shift_v(void)
{
return 0x0000000cU;
}
static inline u32 bus_bind_status_r(void)
{
return 0x00001710U;
}
static inline u32 bus_bind_status_bar1_pending_v(u32 r)
{
return (r >> 0U) & 0x1U;
}
static inline u32 bus_bind_status_bar1_pending_empty_f(void)
{
return 0x0U;
}
static inline u32 bus_bind_status_bar1_pending_busy_f(void)
{
return 0x1U;
}
static inline u32 bus_bind_status_bar1_outstanding_v(u32 r)
{
return (r >> 1U) & 0x1U;
}
static inline u32 bus_bind_status_bar1_outstanding_false_f(void)
{
return 0x0U;
}
static inline u32 bus_bind_status_bar1_outstanding_true_f(void)
{
return 0x2U;
}
static inline u32 bus_bind_status_bar2_pending_v(u32 r)
{
return (r >> 2U) & 0x1U;
}
static inline u32 bus_bind_status_bar2_pending_empty_f(void)
{
return 0x0U;
}
static inline u32 bus_bind_status_bar2_pending_busy_f(void)
{
return 0x4U;
}
static inline u32 bus_bind_status_bar2_outstanding_v(u32 r)
{
return (r >> 3U) & 0x1U;
}
static inline u32 bus_bind_status_bar2_outstanding_false_f(void)
{
return 0x0U;
}
static inline u32 bus_bind_status_bar2_outstanding_true_f(void)
{
return 0x8U;
}
static inline u32 bus_intr_0_r(void)
{
return 0x00001100U;
}
static inline u32 bus_intr_0_pri_squash_m(void)
{
return 0x1U << 1U;
}
static inline u32 bus_intr_0_pri_fecserr_m(void)
{
return 0x1U << 2U;
}
static inline u32 bus_intr_0_pri_timeout_m(void)
{
return 0x1U << 3U;
}
static inline u32 bus_intr_en_0_r(void)
{
return 0x00001140U;
}
static inline u32 bus_intr_en_0_pri_squash_m(void)
{
return 0x1U << 1U;
}
static inline u32 bus_intr_en_0_pri_fecserr_m(void)
{
return 0x1U << 2U;
}
static inline u32 bus_intr_en_0_pri_timeout_m(void)
{
return 0x1U << 3U;
}
#endif

View File

@@ -0,0 +1,187 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* Function naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
*
* <x>_o(void) : Returns the offset for element <x>.
*
* <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
*
* <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
*
* <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
* and masked to place it at field <y> of register <x>. This value
* can be |'d with others to produce a full register value for
* register <x>.
*
* <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
* value can be ~'d and then &'d to clear the value of field <y> for
* register <x>.
*
* <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
* to place it at field <y> of register <x>. This value can be |'d
* with others to produce a full register value for <x>.
*
* <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
* <x> value 'r' after being shifted to place its LSB at bit 0.
* This value is suitable for direct comparison with other unshifted
* values appropriate for use in field <y> of register <x>.
*
* <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
* field <y> of register <x>. This value is suitable for direct
* comparison with unshifted values appropriate for use in field <y>
* of register <x>.
*/
#ifndef _hw_ccsr_gv100_h_
#define _hw_ccsr_gv100_h_
static inline u32 ccsr_channel_inst_r(u32 i)
{
return 0x00800000U + i*8U;
}
static inline u32 ccsr_channel_inst__size_1_v(void)
{
return 0x00001000U;
}
static inline u32 ccsr_channel_inst_ptr_f(u32 v)
{
return (v & 0xfffffffU) << 0U;
}
static inline u32 ccsr_channel_inst_target_vid_mem_f(void)
{
return 0x0U;
}
static inline u32 ccsr_channel_inst_target_sys_mem_coh_f(void)
{
return 0x20000000U;
}
static inline u32 ccsr_channel_inst_target_sys_mem_ncoh_f(void)
{
return 0x30000000U;
}
static inline u32 ccsr_channel_inst_bind_false_f(void)
{
return 0x0U;
}
static inline u32 ccsr_channel_inst_bind_true_f(void)
{
return 0x80000000U;
}
static inline u32 ccsr_channel_r(u32 i)
{
return 0x00800004U + i*8U;
}
static inline u32 ccsr_channel__size_1_v(void)
{
return 0x00001000U;
}
static inline u32 ccsr_channel_enable_v(u32 r)
{
return (r >> 0U) & 0x1U;
}
static inline u32 ccsr_channel_enable_set_f(u32 v)
{
return (v & 0x1U) << 10U;
}
static inline u32 ccsr_channel_enable_set_true_f(void)
{
return 0x400U;
}
static inline u32 ccsr_channel_enable_clr_true_f(void)
{
return 0x800U;
}
static inline u32 ccsr_channel_status_v(u32 r)
{
return (r >> 24U) & 0xfU;
}
static inline u32 ccsr_channel_status_pending_ctx_reload_v(void)
{
return 0x00000002U;
}
static inline u32 ccsr_channel_status_pending_acq_ctx_reload_v(void)
{
return 0x00000004U;
}
static inline u32 ccsr_channel_status_on_pbdma_ctx_reload_v(void)
{
return 0x0000000aU;
}
static inline u32 ccsr_channel_status_on_pbdma_and_eng_ctx_reload_v(void)
{
return 0x0000000bU;
}
static inline u32 ccsr_channel_status_on_eng_ctx_reload_v(void)
{
return 0x0000000cU;
}
static inline u32 ccsr_channel_status_on_eng_pending_ctx_reload_v(void)
{
return 0x0000000dU;
}
static inline u32 ccsr_channel_status_on_eng_pending_acq_ctx_reload_v(void)
{
return 0x0000000eU;
}
static inline u32 ccsr_channel_next_v(u32 r)
{
return (r >> 1U) & 0x1U;
}
static inline u32 ccsr_channel_next_true_v(void)
{
return 0x00000001U;
}
static inline u32 ccsr_channel_force_ctx_reload_true_f(void)
{
return 0x100U;
}
static inline u32 ccsr_channel_pbdma_faulted_f(u32 v)
{
return (v & 0x1U) << 22U;
}
static inline u32 ccsr_channel_pbdma_faulted_reset_f(void)
{
return 0x400000U;
}
static inline u32 ccsr_channel_eng_faulted_f(u32 v)
{
return (v & 0x1U) << 23U;
}
static inline u32 ccsr_channel_eng_faulted_v(u32 r)
{
return (r >> 23U) & 0x1U;
}
static inline u32 ccsr_channel_eng_faulted_reset_f(void)
{
return 0x800000U;
}
static inline u32 ccsr_channel_eng_faulted_true_v(void)
{
return 0x00000001U;
}
static inline u32 ccsr_channel_busy_v(u32 r)
{
return (r >> 28U) & 0x1U;
}
#endif

View File

@@ -0,0 +1,107 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* Function naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
*
* <x>_o(void) : Returns the offset for element <x>.
*
* <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
*
* <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
*
* <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
* and masked to place it at field <y> of register <x>. This value
* can be |'d with others to produce a full register value for
* register <x>.
*
* <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
* value can be ~'d and then &'d to clear the value of field <y> for
* register <x>.
*
* <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
* to place it at field <y> of register <x>. This value can be |'d
* with others to produce a full register value for <x>.
*
* <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
* <x> value 'r' after being shifted to place its LSB at bit 0.
* This value is suitable for direct comparison with other unshifted
* values appropriate for use in field <y> of register <x>.
*
* <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
* field <y> of register <x>. This value is suitable for direct
* comparison with unshifted values appropriate for use in field <y>
* of register <x>.
*/
#ifndef _hw_ce_gv100_h_
#define _hw_ce_gv100_h_
static inline u32 ce_intr_status_r(u32 i)
{
return 0x00104410U + i*128U;
}
static inline u32 ce_intr_status_blockpipe_pending_f(void)
{
return 0x1U;
}
static inline u32 ce_intr_status_blockpipe_reset_f(void)
{
return 0x1U;
}
static inline u32 ce_intr_status_nonblockpipe_pending_f(void)
{
return 0x2U;
}
static inline u32 ce_intr_status_nonblockpipe_reset_f(void)
{
return 0x2U;
}
static inline u32 ce_intr_status_launcherr_pending_f(void)
{
return 0x4U;
}
static inline u32 ce_intr_status_launcherr_reset_f(void)
{
return 0x4U;
}
static inline u32 ce_intr_status_invalid_config_pending_f(void)
{
return 0x8U;
}
static inline u32 ce_intr_status_invalid_config_reset_f(void)
{
return 0x8U;
}
static inline u32 ce_intr_status_mthd_buffer_fault_pending_f(void)
{
return 0x10U;
}
static inline u32 ce_intr_status_mthd_buffer_fault_reset_f(void)
{
return 0x10U;
}
static inline u32 ce_pce_map_r(void)
{
return 0x00104028U;
}
#endif

View File

@@ -0,0 +1,455 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* Function naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
*
* <x>_o(void) : Returns the offset for element <x>.
*
* <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
*
* <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
*
* <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
* and masked to place it at field <y> of register <x>. This value
* can be |'d with others to produce a full register value for
* register <x>.
*
* <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
* value can be ~'d and then &'d to clear the value of field <y> for
* register <x>.
*
* <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
* to place it at field <y> of register <x>. This value can be |'d
* with others to produce a full register value for <x>.
*
* <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
* <x> value 'r' after being shifted to place its LSB at bit 0.
* This value is suitable for direct comparison with other unshifted
* values appropriate for use in field <y> of register <x>.
*
* <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
* field <y> of register <x>. This value is suitable for direct
* comparison with unshifted values appropriate for use in field <y>
* of register <x>.
*/
#ifndef _hw_ctxsw_prog_gv100_h_
#define _hw_ctxsw_prog_gv100_h_
static inline u32 ctxsw_prog_fecs_header_v(void)
{
return 0x00000100U;
}
static inline u32 ctxsw_prog_main_image_num_gpcs_o(void)
{
return 0x00000008U;
}
static inline u32 ctxsw_prog_main_image_ctl_o(void)
{
return 0x0000000cU;
}
static inline u32 ctxsw_prog_main_image_ctl_type_f(u32 v)
{
return (v & 0x3fU) << 0U;
}
static inline u32 ctxsw_prog_main_image_ctl_type_undefined_v(void)
{
return 0x00000000U;
}
static inline u32 ctxsw_prog_main_image_ctl_type_opengl_v(void)
{
return 0x00000008U;
}
static inline u32 ctxsw_prog_main_image_ctl_type_dx9_v(void)
{
return 0x00000010U;
}
static inline u32 ctxsw_prog_main_image_ctl_type_dx10_v(void)
{
return 0x00000011U;
}
static inline u32 ctxsw_prog_main_image_ctl_type_dx11_v(void)
{
return 0x00000012U;
}
static inline u32 ctxsw_prog_main_image_ctl_type_compute_v(void)
{
return 0x00000020U;
}
static inline u32 ctxsw_prog_main_image_ctl_type_per_veid_header_v(void)
{
return 0x00000021U;
}
static inline u32 ctxsw_prog_main_image_patch_count_o(void)
{
return 0x00000010U;
}
static inline u32 ctxsw_prog_main_image_context_id_o(void)
{
return 0x000000f0U;
}
static inline u32 ctxsw_prog_main_image_patch_adr_lo_o(void)
{
return 0x00000014U;
}
static inline u32 ctxsw_prog_main_image_patch_adr_hi_o(void)
{
return 0x00000018U;
}
static inline u32 ctxsw_prog_main_image_zcull_o(void)
{
return 0x0000001cU;
}
static inline u32 ctxsw_prog_main_image_zcull_mode_no_ctxsw_v(void)
{
return 0x00000001U;
}
static inline u32 ctxsw_prog_main_image_zcull_mode_separate_buffer_v(void)
{
return 0x00000002U;
}
static inline u32 ctxsw_prog_main_image_zcull_ptr_o(void)
{
return 0x00000020U;
}
static inline u32 ctxsw_prog_main_image_pm_o(void)
{
return 0x00000028U;
}
static inline u32 ctxsw_prog_main_image_pm_mode_m(void)
{
return 0x7U << 0U;
}
static inline u32 ctxsw_prog_main_image_pm_mode_no_ctxsw_f(void)
{
return 0x0U;
}
static inline u32 ctxsw_prog_main_image_pm_smpc_mode_m(void)
{
return 0x7U << 3U;
}
static inline u32 ctxsw_prog_main_image_pm_smpc_mode_ctxsw_f(void)
{
return 0x8U;
}
static inline u32 ctxsw_prog_main_image_pm_smpc_mode_no_ctxsw_f(void)
{
return 0x0U;
}
static inline u32 ctxsw_prog_main_image_pm_ptr_o(void)
{
return 0x0000002cU;
}
static inline u32 ctxsw_prog_main_image_num_save_ops_o(void)
{
return 0x000000f4U;
}
static inline u32 ctxsw_prog_main_image_num_wfi_save_ops_o(void)
{
return 0x000000d0U;
}
static inline u32 ctxsw_prog_main_image_num_cta_save_ops_o(void)
{
return 0x000000d4U;
}
static inline u32 ctxsw_prog_main_image_num_gfxp_save_ops_o(void)
{
return 0x000000d8U;
}
static inline u32 ctxsw_prog_main_image_num_cilp_save_ops_o(void)
{
return 0x000000dcU;
}
static inline u32 ctxsw_prog_main_image_num_restore_ops_o(void)
{
return 0x000000f8U;
}
static inline u32 ctxsw_prog_main_image_zcull_ptr_hi_o(void)
{
return 0x00000060U;
}
static inline u32 ctxsw_prog_main_image_zcull_ptr_hi_v_f(u32 v)
{
return (v & 0x1ffffU) << 0U;
}
static inline u32 ctxsw_prog_main_image_pm_ptr_hi_o(void)
{
return 0x00000094U;
}
static inline u32 ctxsw_prog_main_image_full_preemption_ptr_hi_o(void)
{
return 0x00000064U;
}
static inline u32 ctxsw_prog_main_image_full_preemption_ptr_hi_v_f(u32 v)
{
return (v & 0x1ffffU) << 0U;
}
static inline u32 ctxsw_prog_main_image_full_preemption_ptr_o(void)
{
return 0x00000068U;
}
static inline u32 ctxsw_prog_main_image_full_preemption_ptr_v_f(u32 v)
{
return (v & 0xffffffffU) << 0U;
}
static inline u32 ctxsw_prog_main_image_full_preemption_ptr_veid0_hi_o(void)
{
return 0x00000070U;
}
static inline u32 ctxsw_prog_main_image_full_preemption_ptr_veid0_hi_v_f(u32 v)
{
return (v & 0x1ffffU) << 0U;
}
static inline u32 ctxsw_prog_main_image_full_preemption_ptr_veid0_o(void)
{
return 0x00000074U;
}
static inline u32 ctxsw_prog_main_image_full_preemption_ptr_veid0_v_f(u32 v)
{
return (v & 0xffffffffU) << 0U;
}
static inline u32 ctxsw_prog_main_image_context_buffer_ptr_hi_o(void)
{
return 0x00000078U;
}
static inline u32 ctxsw_prog_main_image_context_buffer_ptr_hi_v_f(u32 v)
{
return (v & 0x1ffffU) << 0U;
}
static inline u32 ctxsw_prog_main_image_context_buffer_ptr_o(void)
{
return 0x0000007cU;
}
static inline u32 ctxsw_prog_main_image_context_buffer_ptr_v_f(u32 v)
{
return (v & 0xffffffffU) << 0U;
}
static inline u32 ctxsw_prog_main_image_magic_value_o(void)
{
return 0x000000fcU;
}
static inline u32 ctxsw_prog_main_image_magic_value_v_value_v(void)
{
return 0x600dc0deU;
}
static inline u32 ctxsw_prog_local_priv_register_ctl_o(void)
{
return 0x0000000cU;
}
static inline u32 ctxsw_prog_local_priv_register_ctl_offset_v(u32 r)
{
return (r >> 0U) & 0xffffU;
}
static inline u32 ctxsw_prog_main_image_global_cb_ptr_o(void)
{
return 0x000000b8U;
}
static inline u32 ctxsw_prog_main_image_global_cb_ptr_v_f(u32 v)
{
return (v & 0xffffffffU) << 0U;
}
static inline u32 ctxsw_prog_main_image_global_cb_ptr_hi_o(void)
{
return 0x000000bcU;
}
static inline u32 ctxsw_prog_main_image_global_cb_ptr_hi_v_f(u32 v)
{
return (v & 0x1ffffU) << 0U;
}
static inline u32 ctxsw_prog_main_image_global_pagepool_ptr_o(void)
{
return 0x000000c0U;
}
static inline u32 ctxsw_prog_main_image_global_pagepool_ptr_v_f(u32 v)
{
return (v & 0xffffffffU) << 0U;
}
static inline u32 ctxsw_prog_main_image_global_pagepool_ptr_hi_o(void)
{
return 0x000000c4U;
}
static inline u32 ctxsw_prog_main_image_global_pagepool_ptr_hi_v_f(u32 v)
{
return (v & 0x1ffffU) << 0U;
}
static inline u32 ctxsw_prog_main_image_control_block_ptr_o(void)
{
return 0x000000c8U;
}
static inline u32 ctxsw_prog_main_image_control_block_ptr_v_f(u32 v)
{
return (v & 0xffffffffU) << 0U;
}
static inline u32 ctxsw_prog_main_image_control_block_ptr_hi_o(void)
{
return 0x000000ccU;
}
static inline u32 ctxsw_prog_main_image_control_block_ptr_hi_v_f(u32 v)
{
return (v & 0x1ffffU) << 0U;
}
static inline u32 ctxsw_prog_main_image_context_ramchain_buffer_addr_lo_o(void)
{
return 0x000000e0U;
}
static inline u32 ctxsw_prog_main_image_context_ramchain_buffer_addr_lo_v_f(u32 v)
{
return (v & 0xffffffffU) << 0U;
}
static inline u32 ctxsw_prog_main_image_context_ramchain_buffer_addr_hi_o(void)
{
return 0x000000e4U;
}
static inline u32 ctxsw_prog_main_image_context_ramchain_buffer_addr_hi_v_f(u32 v)
{
return (v & 0x1ffffU) << 0U;
}
static inline u32 ctxsw_prog_local_image_ppc_info_o(void)
{
return 0x000000f4U;
}
static inline u32 ctxsw_prog_local_image_ppc_info_num_ppcs_v(u32 r)
{
return (r >> 0U) & 0xffffU;
}
static inline u32 ctxsw_prog_local_image_ppc_info_ppc_mask_v(u32 r)
{
return (r >> 16U) & 0xffffU;
}
static inline u32 ctxsw_prog_local_image_num_tpcs_o(void)
{
return 0x000000f8U;
}
static inline u32 ctxsw_prog_local_magic_value_o(void)
{
return 0x000000fcU;
}
static inline u32 ctxsw_prog_local_magic_value_v_value_v(void)
{
return 0xad0becabU;
}
static inline u32 ctxsw_prog_main_extended_buffer_ctl_o(void)
{
return 0x000000ecU;
}
static inline u32 ctxsw_prog_main_extended_buffer_ctl_offset_v(u32 r)
{
return (r >> 0U) & 0xffffU;
}
static inline u32 ctxsw_prog_main_extended_buffer_ctl_size_v(u32 r)
{
return (r >> 16U) & 0xffU;
}
static inline u32 ctxsw_prog_extended_buffer_segments_size_in_bytes_v(void)
{
return 0x00000100U;
}
static inline u32 ctxsw_prog_extended_marker_size_in_bytes_v(void)
{
return 0x00000004U;
}
static inline u32 ctxsw_prog_extended_sm_dsm_perf_counter_register_stride_v(void)
{
return 0x00000000U;
}
static inline u32 ctxsw_prog_extended_sm_dsm_perf_counter_control_register_stride_v(void)
{
return 0x00000002U;
}
static inline u32 ctxsw_prog_main_image_priv_access_map_config_o(void)
{
return 0x000000a0U;
}
static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_s(void)
{
return 2U;
}
static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_f(u32 v)
{
return (v & 0x3U) << 0U;
}
static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_m(void)
{
return 0x3U << 0U;
}
static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_v(u32 r)
{
return (r >> 0U) & 0x3U;
}
static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_allow_all_f(void)
{
return 0x0U;
}
static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_use_map_f(void)
{
return 0x2U;
}
static inline u32 ctxsw_prog_main_image_priv_access_map_addr_lo_o(void)
{
return 0x000000a4U;
}
static inline u32 ctxsw_prog_main_image_priv_access_map_addr_hi_o(void)
{
return 0x000000a8U;
}
static inline u32 ctxsw_prog_main_image_misc_options_o(void)
{
return 0x0000003cU;
}
static inline u32 ctxsw_prog_main_image_misc_options_verif_features_m(void)
{
return 0x1U << 3U;
}
static inline u32 ctxsw_prog_main_image_misc_options_verif_features_disabled_f(void)
{
return 0x0U;
}
static inline u32 ctxsw_prog_main_image_graphics_preemption_options_o(void)
{
return 0x00000080U;
}
static inline u32 ctxsw_prog_main_image_graphics_preemption_options_control_f(u32 v)
{
return (v & 0x3U) << 0U;
}
static inline u32 ctxsw_prog_main_image_graphics_preemption_options_control_gfxp_f(void)
{
return 0x1U;
}
static inline u32 ctxsw_prog_main_image_compute_preemption_options_o(void)
{
return 0x00000084U;
}
static inline u32 ctxsw_prog_main_image_compute_preemption_options_control_f(u32 v)
{
return (v & 0x3U) << 0U;
}
static inline u32 ctxsw_prog_main_image_compute_preemption_options_control_cta_f(void)
{
return 0x1U;
}
static inline u32 ctxsw_prog_main_image_compute_preemption_options_control_cilp_f(void)
{
return 0x2U;
}
#endif

View File

@@ -0,0 +1,599 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* Function naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
*
* <x>_o(void) : Returns the offset for element <x>.
*
* <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
*
* <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
*
* <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
* and masked to place it at field <y> of register <x>. This value
* can be |'d with others to produce a full register value for
* register <x>.
*
* <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
* value can be ~'d and then &'d to clear the value of field <y> for
* register <x>.
*
* <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
* to place it at field <y> of register <x>. This value can be |'d
* with others to produce a full register value for <x>.
*
* <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
* <x> value 'r' after being shifted to place its LSB at bit 0.
* This value is suitable for direct comparison with other unshifted
* values appropriate for use in field <y> of register <x>.
*
* <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
* field <y> of register <x>. This value is suitable for direct
* comparison with unshifted values appropriate for use in field <y>
* of register <x>.
*/
#ifndef _hw_falcon_gv100_h_
#define _hw_falcon_gv100_h_
static inline u32 falcon_falcon_irqsset_r(void)
{
return 0x00000000U;
}
static inline u32 falcon_falcon_irqsset_swgen0_set_f(void)
{
return 0x40U;
}
static inline u32 falcon_falcon_irqsclr_r(void)
{
return 0x00000004U;
}
static inline u32 falcon_falcon_irqstat_r(void)
{
return 0x00000008U;
}
static inline u32 falcon_falcon_irqstat_halt_true_f(void)
{
return 0x10U;
}
static inline u32 falcon_falcon_irqstat_exterr_true_f(void)
{
return 0x20U;
}
static inline u32 falcon_falcon_irqstat_swgen0_true_f(void)
{
return 0x40U;
}
static inline u32 falcon_falcon_irqmode_r(void)
{
return 0x0000000cU;
}
static inline u32 falcon_falcon_irqmset_r(void)
{
return 0x00000010U;
}
static inline u32 falcon_falcon_irqmset_gptmr_f(u32 v)
{
return (v & 0x1U) << 0U;
}
static inline u32 falcon_falcon_irqmset_wdtmr_f(u32 v)
{
return (v & 0x1U) << 1U;
}
static inline u32 falcon_falcon_irqmset_mthd_f(u32 v)
{
return (v & 0x1U) << 2U;
}
static inline u32 falcon_falcon_irqmset_ctxsw_f(u32 v)
{
return (v & 0x1U) << 3U;
}
static inline u32 falcon_falcon_irqmset_halt_f(u32 v)
{
return (v & 0x1U) << 4U;
}
static inline u32 falcon_falcon_irqmset_exterr_f(u32 v)
{
return (v & 0x1U) << 5U;
}
static inline u32 falcon_falcon_irqmset_swgen0_f(u32 v)
{
return (v & 0x1U) << 6U;
}
static inline u32 falcon_falcon_irqmset_swgen1_f(u32 v)
{
return (v & 0x1U) << 7U;
}
static inline u32 falcon_falcon_irqmclr_r(void)
{
return 0x00000014U;
}
static inline u32 falcon_falcon_irqmclr_gptmr_f(u32 v)
{
return (v & 0x1U) << 0U;
}
static inline u32 falcon_falcon_irqmclr_wdtmr_f(u32 v)
{
return (v & 0x1U) << 1U;
}
static inline u32 falcon_falcon_irqmclr_mthd_f(u32 v)
{
return (v & 0x1U) << 2U;
}
static inline u32 falcon_falcon_irqmclr_ctxsw_f(u32 v)
{
return (v & 0x1U) << 3U;
}
static inline u32 falcon_falcon_irqmclr_halt_f(u32 v)
{
return (v & 0x1U) << 4U;
}
static inline u32 falcon_falcon_irqmclr_exterr_f(u32 v)
{
return (v & 0x1U) << 5U;
}
static inline u32 falcon_falcon_irqmclr_swgen0_f(u32 v)
{
return (v & 0x1U) << 6U;
}
static inline u32 falcon_falcon_irqmclr_swgen1_f(u32 v)
{
return (v & 0x1U) << 7U;
}
static inline u32 falcon_falcon_irqmclr_ext_f(u32 v)
{
return (v & 0xffU) << 8U;
}
static inline u32 falcon_falcon_irqmask_r(void)
{
return 0x00000018U;
}
static inline u32 falcon_falcon_irqdest_r(void)
{
return 0x0000001cU;
}
static inline u32 falcon_falcon_irqdest_host_gptmr_f(u32 v)
{
return (v & 0x1U) << 0U;
}
static inline u32 falcon_falcon_irqdest_host_wdtmr_f(u32 v)
{
return (v & 0x1U) << 1U;
}
static inline u32 falcon_falcon_irqdest_host_mthd_f(u32 v)
{
return (v & 0x1U) << 2U;
}
static inline u32 falcon_falcon_irqdest_host_ctxsw_f(u32 v)
{
return (v & 0x1U) << 3U;
}
static inline u32 falcon_falcon_irqdest_host_halt_f(u32 v)
{
return (v & 0x1U) << 4U;
}
static inline u32 falcon_falcon_irqdest_host_exterr_f(u32 v)
{
return (v & 0x1U) << 5U;
}
static inline u32 falcon_falcon_irqdest_host_swgen0_f(u32 v)
{
return (v & 0x1U) << 6U;
}
static inline u32 falcon_falcon_irqdest_host_swgen1_f(u32 v)
{
return (v & 0x1U) << 7U;
}
static inline u32 falcon_falcon_irqdest_host_ext_f(u32 v)
{
return (v & 0xffU) << 8U;
}
static inline u32 falcon_falcon_irqdest_target_gptmr_f(u32 v)
{
return (v & 0x1U) << 16U;
}
static inline u32 falcon_falcon_irqdest_target_wdtmr_f(u32 v)
{
return (v & 0x1U) << 17U;
}
static inline u32 falcon_falcon_irqdest_target_mthd_f(u32 v)
{
return (v & 0x1U) << 18U;
}
static inline u32 falcon_falcon_irqdest_target_ctxsw_f(u32 v)
{
return (v & 0x1U) << 19U;
}
static inline u32 falcon_falcon_irqdest_target_halt_f(u32 v)
{
return (v & 0x1U) << 20U;
}
static inline u32 falcon_falcon_irqdest_target_exterr_f(u32 v)
{
return (v & 0x1U) << 21U;
}
static inline u32 falcon_falcon_irqdest_target_swgen0_f(u32 v)
{
return (v & 0x1U) << 22U;
}
static inline u32 falcon_falcon_irqdest_target_swgen1_f(u32 v)
{
return (v & 0x1U) << 23U;
}
static inline u32 falcon_falcon_irqdest_target_ext_f(u32 v)
{
return (v & 0xffU) << 24U;
}
static inline u32 falcon_falcon_curctx_r(void)
{
return 0x00000050U;
}
static inline u32 falcon_falcon_nxtctx_r(void)
{
return 0x00000054U;
}
static inline u32 falcon_falcon_mailbox0_r(void)
{
return 0x00000040U;
}
static inline u32 falcon_falcon_mailbox1_r(void)
{
return 0x00000044U;
}
static inline u32 falcon_falcon_itfen_r(void)
{
return 0x00000048U;
}
static inline u32 falcon_falcon_itfen_ctxen_enable_f(void)
{
return 0x1U;
}
static inline u32 falcon_falcon_idlestate_r(void)
{
return 0x0000004cU;
}
static inline u32 falcon_falcon_idlestate_falcon_busy_v(u32 r)
{
return (r >> 0U) & 0x1U;
}
static inline u32 falcon_falcon_idlestate_ext_busy_v(u32 r)
{
return (r >> 1U) & 0x7fffU;
}
static inline u32 falcon_falcon_os_r(void)
{
return 0x00000080U;
}
static inline u32 falcon_falcon_engctl_r(void)
{
return 0x000000a4U;
}
static inline u32 falcon_falcon_cpuctl_r(void)
{
return 0x00000100U;
}
static inline u32 falcon_falcon_cpuctl_startcpu_f(u32 v)
{
return (v & 0x1U) << 1U;
}
static inline u32 falcon_falcon_cpuctl_sreset_f(u32 v)
{
return (v & 0x1U) << 2U;
}
static inline u32 falcon_falcon_cpuctl_hreset_f(u32 v)
{
return (v & 0x1U) << 3U;
}
static inline u32 falcon_falcon_cpuctl_halt_intr_f(u32 v)
{
return (v & 0x1U) << 4U;
}
static inline u32 falcon_falcon_cpuctl_halt_intr_m(void)
{
return 0x1U << 4U;
}
static inline u32 falcon_falcon_cpuctl_halt_intr_v(u32 r)
{
return (r >> 4U) & 0x1U;
}
static inline u32 falcon_falcon_cpuctl_stopped_m(void)
{
return 0x1U << 5U;
}
static inline u32 falcon_falcon_cpuctl_cpuctl_alias_en_f(u32 v)
{
return (v & 0x1U) << 6U;
}
static inline u32 falcon_falcon_cpuctl_cpuctl_alias_en_m(void)
{
return 0x1U << 6U;
}
static inline u32 falcon_falcon_cpuctl_cpuctl_alias_en_v(u32 r)
{
return (r >> 6U) & 0x1U;
}
static inline u32 falcon_falcon_cpuctl_alias_r(void)
{
return 0x00000130U;
}
static inline u32 falcon_falcon_cpuctl_alias_startcpu_f(u32 v)
{
return (v & 0x1U) << 1U;
}
static inline u32 falcon_falcon_imemc_r(u32 i)
{
return 0x00000180U + i*16U;
}
static inline u32 falcon_falcon_imemc_offs_f(u32 v)
{
return (v & 0x3fU) << 2U;
}
static inline u32 falcon_falcon_imemc_blk_f(u32 v)
{
return (v & 0xffU) << 8U;
}
static inline u32 falcon_falcon_imemc_aincw_f(u32 v)
{
return (v & 0x1U) << 24U;
}
static inline u32 falcon_falcon_imemd_r(u32 i)
{
return 0x00000184U + i*16U;
}
static inline u32 falcon_falcon_imemt_r(u32 i)
{
return 0x00000188U + i*16U;
}
static inline u32 falcon_falcon_sctl_r(void)
{
return 0x00000240U;
}
static inline u32 falcon_falcon_mmu_phys_sec_r(void)
{
return 0x00100ce4U;
}
static inline u32 falcon_falcon_bootvec_r(void)
{
return 0x00000104U;
}
static inline u32 falcon_falcon_bootvec_vec_f(u32 v)
{
return (v & 0xffffffffU) << 0U;
}
static inline u32 falcon_falcon_dmactl_r(void)
{
return 0x0000010cU;
}
static inline u32 falcon_falcon_dmactl_dmem_scrubbing_m(void)
{
return 0x1U << 1U;
}
static inline u32 falcon_falcon_dmactl_imem_scrubbing_m(void)
{
return 0x1U << 2U;
}
static inline u32 falcon_falcon_dmactl_require_ctx_f(u32 v)
{
return (v & 0x1U) << 0U;
}
static inline u32 falcon_falcon_hwcfg_r(void)
{
return 0x00000108U;
}
static inline u32 falcon_falcon_hwcfg_imem_size_v(u32 r)
{
return (r >> 0U) & 0x1ffU;
}
static inline u32 falcon_falcon_hwcfg_dmem_size_v(u32 r)
{
return (r >> 9U) & 0x1ffU;
}
static inline u32 falcon_falcon_dmatrfbase_r(void)
{
return 0x00000110U;
}
static inline u32 falcon_falcon_dmatrfbase1_r(void)
{
return 0x00000128U;
}
static inline u32 falcon_falcon_dmatrfmoffs_r(void)
{
return 0x00000114U;
}
static inline u32 falcon_falcon_dmatrfcmd_r(void)
{
return 0x00000118U;
}
static inline u32 falcon_falcon_dmatrfcmd_imem_f(u32 v)
{
return (v & 0x1U) << 4U;
}
static inline u32 falcon_falcon_dmatrfcmd_write_f(u32 v)
{
return (v & 0x1U) << 5U;
}
static inline u32 falcon_falcon_dmatrfcmd_size_f(u32 v)
{
return (v & 0x7U) << 8U;
}
static inline u32 falcon_falcon_dmatrfcmd_ctxdma_f(u32 v)
{
return (v & 0x7U) << 12U;
}
static inline u32 falcon_falcon_dmatrffboffs_r(void)
{
return 0x0000011cU;
}
static inline u32 falcon_falcon_imctl_debug_r(void)
{
return 0x0000015cU;
}
static inline u32 falcon_falcon_imctl_debug_addr_blk_f(u32 v)
{
return (v & 0xffffffU) << 0U;
}
static inline u32 falcon_falcon_imctl_debug_cmd_f(u32 v)
{
return (v & 0x7U) << 24U;
}
static inline u32 falcon_falcon_imstat_r(void)
{
return 0x00000144U;
}
static inline u32 falcon_falcon_traceidx_r(void)
{
return 0x00000148U;
}
static inline u32 falcon_falcon_traceidx_maxidx_v(u32 r)
{
return (r >> 16U) & 0xffU;
}
static inline u32 falcon_falcon_traceidx_idx_f(u32 v)
{
return (v & 0xffU) << 0U;
}
static inline u32 falcon_falcon_tracepc_r(void)
{
return 0x0000014cU;
}
static inline u32 falcon_falcon_tracepc_pc_v(u32 r)
{
return (r >> 0U) & 0xffffffU;
}
static inline u32 falcon_falcon_exterraddr_r(void)
{
return 0x00000168U;
}
static inline u32 falcon_falcon_exterrstat_r(void)
{
return 0x0000016cU;
}
static inline u32 falcon_falcon_exterrstat_valid_m(void)
{
return 0x1U << 31U;
}
static inline u32 falcon_falcon_exterrstat_valid_v(u32 r)
{
return (r >> 31U) & 0x1U;
}
static inline u32 falcon_falcon_exterrstat_valid_true_v(void)
{
return 0x00000001U;
}
static inline u32 falcon_falcon_icd_cmd_r(void)
{
return 0x00000200U;
}
static inline u32 falcon_falcon_icd_cmd_opc_s(void)
{
return 4U;
}
static inline u32 falcon_falcon_icd_cmd_opc_f(u32 v)
{
return (v & 0xfU) << 0U;
}
static inline u32 falcon_falcon_icd_cmd_opc_m(void)
{
return 0xfU << 0U;
}
static inline u32 falcon_falcon_icd_cmd_opc_v(u32 r)
{
return (r >> 0U) & 0xfU;
}
static inline u32 falcon_falcon_icd_cmd_opc_rreg_f(void)
{
return 0x8U;
}
static inline u32 falcon_falcon_icd_cmd_opc_rstat_f(void)
{
return 0xeU;
}
static inline u32 falcon_falcon_icd_cmd_idx_f(u32 v)
{
return (v & 0x1fU) << 8U;
}
static inline u32 falcon_falcon_icd_rdata_r(void)
{
return 0x0000020cU;
}
static inline u32 falcon_falcon_dmemc_r(u32 i)
{
return 0x000001c0U + i*8U;
}
static inline u32 falcon_falcon_dmemc_offs_f(u32 v)
{
return (v & 0x3fU) << 2U;
}
static inline u32 falcon_falcon_dmemc_offs_m(void)
{
return 0x3fU << 2U;
}
static inline u32 falcon_falcon_dmemc_blk_f(u32 v)
{
return (v & 0xffU) << 8U;
}
static inline u32 falcon_falcon_dmemc_blk_m(void)
{
return 0xffU << 8U;
}
static inline u32 falcon_falcon_dmemc_aincw_f(u32 v)
{
return (v & 0x1U) << 24U;
}
static inline u32 falcon_falcon_dmemc_aincr_f(u32 v)
{
return (v & 0x1U) << 25U;
}
static inline u32 falcon_falcon_dmemd_r(u32 i)
{
return 0x000001c4U + i*8U;
}
static inline u32 falcon_falcon_debug1_r(void)
{
return 0x00000090U;
}
static inline u32 falcon_falcon_debug1_ctxsw_mode_s(void)
{
return 1U;
}
static inline u32 falcon_falcon_debug1_ctxsw_mode_f(u32 v)
{
return (v & 0x1U) << 16U;
}
static inline u32 falcon_falcon_debug1_ctxsw_mode_m(void)
{
return 0x1U << 16U;
}
static inline u32 falcon_falcon_debug1_ctxsw_mode_v(u32 r)
{
return (r >> 16U) & 0x1U;
}
static inline u32 falcon_falcon_debug1_ctxsw_mode_init_f(void)
{
return 0x0U;
}
static inline u32 falcon_falcon_debuginfo_r(void)
{
return 0x00000094U;
}
#endif

View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,551 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* Function naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
*
* <x>_o(void) : Returns the offset for element <x>.
*
* <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
*
* <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
*
* <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
* and masked to place it at field <y> of register <x>. This value
* can be |'d with others to produce a full register value for
* register <x>.
*
* <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
* value can be ~'d and then &'d to clear the value of field <y> for
* register <x>.
*
* <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
* to place it at field <y> of register <x>. This value can be |'d
* with others to produce a full register value for <x>.
*
* <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
* <x> value 'r' after being shifted to place its LSB at bit 0.
* This value is suitable for direct comparison with other unshifted
* values appropriate for use in field <y> of register <x>.
*
* <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
* field <y> of register <x>. This value is suitable for direct
* comparison with unshifted values appropriate for use in field <y>
* of register <x>.
*/
#ifndef _hw_fifo_gv100_h_
#define _hw_fifo_gv100_h_
static inline u32 fifo_bar1_base_r(void)
{
return 0x00002254U;
}
static inline u32 fifo_bar1_base_ptr_f(u32 v)
{
return (v & 0xfffffffU) << 0U;
}
static inline u32 fifo_bar1_base_ptr_align_shift_v(void)
{
return 0x0000000cU;
}
static inline u32 fifo_bar1_base_valid_false_f(void)
{
return 0x0U;
}
static inline u32 fifo_bar1_base_valid_true_f(void)
{
return 0x10000000U;
}
static inline u32 fifo_userd_writeback_r(void)
{
return 0x0000225cU;
}
static inline u32 fifo_userd_writeback_timer_f(u32 v)
{
return (v & 0xffU) << 0U;
}
static inline u32 fifo_userd_writeback_timer_disabled_v(void)
{
return 0x00000000U;
}
static inline u32 fifo_userd_writeback_timer_shorter_v(void)
{
return 0x00000003U;
}
static inline u32 fifo_userd_writeback_timer_100us_v(void)
{
return 0x00000064U;
}
static inline u32 fifo_userd_writeback_timescale_f(u32 v)
{
return (v & 0xfU) << 12U;
}
static inline u32 fifo_userd_writeback_timescale_0_v(void)
{
return 0x00000000U;
}
static inline u32 fifo_runlist_base_r(void)
{
return 0x00002270U;
}
static inline u32 fifo_runlist_base_ptr_f(u32 v)
{
return (v & 0xfffffffU) << 0U;
}
static inline u32 fifo_runlist_base_target_vid_mem_f(void)
{
return 0x0U;
}
static inline u32 fifo_runlist_base_target_sys_mem_coh_f(void)
{
return 0x20000000U;
}
static inline u32 fifo_runlist_base_target_sys_mem_ncoh_f(void)
{
return 0x30000000U;
}
static inline u32 fifo_runlist_r(void)
{
return 0x00002274U;
}
static inline u32 fifo_runlist_engine_f(u32 v)
{
return (v & 0xfU) << 20U;
}
static inline u32 fifo_eng_runlist_base_r(u32 i)
{
return 0x00002280U + i*8U;
}
static inline u32 fifo_eng_runlist_base__size_1_v(void)
{
return 0x0000000dU;
}
static inline u32 fifo_eng_runlist_r(u32 i)
{
return 0x00002284U + i*8U;
}
static inline u32 fifo_eng_runlist__size_1_v(void)
{
return 0x0000000dU;
}
static inline u32 fifo_eng_runlist_length_f(u32 v)
{
return (v & 0xffffU) << 0U;
}
static inline u32 fifo_eng_runlist_length_max_v(void)
{
return 0x0000ffffU;
}
static inline u32 fifo_eng_runlist_pending_true_f(void)
{
return 0x100000U;
}
static inline u32 fifo_pb_timeslice_r(u32 i)
{
return 0x00002350U + i*4U;
}
static inline u32 fifo_pb_timeslice_timeout_16_f(void)
{
return 0x10U;
}
static inline u32 fifo_pb_timeslice_timescale_0_f(void)
{
return 0x0U;
}
static inline u32 fifo_pb_timeslice_enable_true_f(void)
{
return 0x10000000U;
}
static inline u32 fifo_pbdma_map_r(u32 i)
{
return 0x00002390U + i*4U;
}
static inline u32 fifo_intr_0_r(void)
{
return 0x00002100U;
}
static inline u32 fifo_intr_0_bind_error_pending_f(void)
{
return 0x1U;
}
static inline u32 fifo_intr_0_bind_error_reset_f(void)
{
return 0x1U;
}
static inline u32 fifo_intr_0_sched_error_pending_f(void)
{
return 0x100U;
}
static inline u32 fifo_intr_0_sched_error_reset_f(void)
{
return 0x100U;
}
static inline u32 fifo_intr_0_chsw_error_pending_f(void)
{
return 0x10000U;
}
static inline u32 fifo_intr_0_chsw_error_reset_f(void)
{
return 0x10000U;
}
static inline u32 fifo_intr_0_fb_flush_timeout_pending_f(void)
{
return 0x800000U;
}
static inline u32 fifo_intr_0_fb_flush_timeout_reset_f(void)
{
return 0x800000U;
}
static inline u32 fifo_intr_0_lb_error_pending_f(void)
{
return 0x1000000U;
}
static inline u32 fifo_intr_0_lb_error_reset_f(void)
{
return 0x1000000U;
}
static inline u32 fifo_intr_0_pbdma_intr_pending_f(void)
{
return 0x20000000U;
}
static inline u32 fifo_intr_0_runlist_event_pending_f(void)
{
return 0x40000000U;
}
static inline u32 fifo_intr_0_channel_intr_pending_f(void)
{
return 0x80000000U;
}
static inline u32 fifo_intr_en_0_r(void)
{
return 0x00002140U;
}
static inline u32 fifo_intr_en_0_sched_error_f(u32 v)
{
return (v & 0x1U) << 8U;
}
static inline u32 fifo_intr_en_0_sched_error_m(void)
{
return 0x1U << 8U;
}
static inline u32 fifo_intr_en_1_r(void)
{
return 0x00002528U;
}
static inline u32 fifo_intr_bind_error_r(void)
{
return 0x0000252cU;
}
static inline u32 fifo_intr_sched_error_r(void)
{
return 0x0000254cU;
}
static inline u32 fifo_intr_sched_error_code_f(u32 v)
{
return (v & 0xffU) << 0U;
}
static inline u32 fifo_intr_chsw_error_r(void)
{
return 0x0000256cU;
}
static inline u32 fifo_intr_pbdma_id_r(void)
{
return 0x000025a0U;
}
static inline u32 fifo_intr_pbdma_id_status_f(u32 v, u32 i)
{
return (v & 0x1U) << (0U + i*1U);
}
static inline u32 fifo_intr_pbdma_id_status_v(u32 r, u32 i)
{
return (r >> (0U + i*1U)) & 0x1U;
}
static inline u32 fifo_intr_pbdma_id_status__size_1_v(void)
{
return 0x0000000eU;
}
static inline u32 fifo_intr_runlist_r(void)
{
return 0x00002a00U;
}
static inline u32 fifo_fb_timeout_r(void)
{
return 0x00002a04U;
}
static inline u32 fifo_fb_timeout_period_m(void)
{
return 0x3fffffffU << 0U;
}
static inline u32 fifo_fb_timeout_period_max_f(void)
{
return 0x3fffffffU;
}
static inline u32 fifo_fb_timeout_period_init_f(void)
{
return 0x3c00U;
}
static inline u32 fifo_sched_disable_r(void)
{
return 0x00002630U;
}
static inline u32 fifo_sched_disable_runlist_f(u32 v, u32 i)
{
return (v & 0x1U) << (0U + i*1U);
}
static inline u32 fifo_sched_disable_runlist_m(u32 i)
{
return 0x1U << (0U + i*1U);
}
static inline u32 fifo_sched_disable_true_v(void)
{
return 0x00000001U;
}
static inline u32 fifo_runlist_preempt_r(void)
{
return 0x00002638U;
}
static inline u32 fifo_runlist_preempt_runlist_f(u32 v, u32 i)
{
return (v & 0x1U) << (0U + i*1U);
}
static inline u32 fifo_runlist_preempt_runlist_m(u32 i)
{
return 0x1U << (0U + i*1U);
}
static inline u32 fifo_runlist_preempt_runlist_pending_v(void)
{
return 0x00000001U;
}
static inline u32 fifo_preempt_r(void)
{
return 0x00002634U;
}
static inline u32 fifo_preempt_pending_true_f(void)
{
return 0x100000U;
}
static inline u32 fifo_preempt_type_channel_f(void)
{
return 0x0U;
}
static inline u32 fifo_preempt_type_tsg_f(void)
{
return 0x1000000U;
}
static inline u32 fifo_preempt_chid_f(u32 v)
{
return (v & 0xfffU) << 0U;
}
static inline u32 fifo_preempt_id_f(u32 v)
{
return (v & 0xfffU) << 0U;
}
static inline u32 fifo_engine_status_r(u32 i)
{
return 0x00002640U + i*8U;
}
static inline u32 fifo_engine_status__size_1_v(void)
{
return 0x0000000fU;
}
static inline u32 fifo_engine_status_id_v(u32 r)
{
return (r >> 0U) & 0xfffU;
}
static inline u32 fifo_engine_status_id_type_v(u32 r)
{
return (r >> 12U) & 0x1U;
}
static inline u32 fifo_engine_status_id_type_chid_v(void)
{
return 0x00000000U;
}
static inline u32 fifo_engine_status_id_type_tsgid_v(void)
{
return 0x00000001U;
}
static inline u32 fifo_engine_status_ctx_status_v(u32 r)
{
return (r >> 13U) & 0x7U;
}
static inline u32 fifo_engine_status_ctx_status_valid_v(void)
{
return 0x00000001U;
}
static inline u32 fifo_engine_status_ctx_status_ctxsw_load_v(void)
{
return 0x00000005U;
}
static inline u32 fifo_engine_status_ctx_status_ctxsw_save_v(void)
{
return 0x00000006U;
}
static inline u32 fifo_engine_status_ctx_status_ctxsw_switch_v(void)
{
return 0x00000007U;
}
static inline u32 fifo_engine_status_next_id_v(u32 r)
{
return (r >> 16U) & 0xfffU;
}
static inline u32 fifo_engine_status_next_id_type_v(u32 r)
{
return (r >> 28U) & 0x1U;
}
static inline u32 fifo_engine_status_next_id_type_chid_v(void)
{
return 0x00000000U;
}
static inline u32 fifo_engine_status_eng_reload_v(u32 r)
{
return (r >> 29U) & 0x1U;
}
static inline u32 fifo_engine_status_faulted_v(u32 r)
{
return (r >> 30U) & 0x1U;
}
static inline u32 fifo_engine_status_faulted_true_v(void)
{
return 0x00000001U;
}
static inline u32 fifo_engine_status_engine_v(u32 r)
{
return (r >> 31U) & 0x1U;
}
static inline u32 fifo_engine_status_engine_idle_v(void)
{
return 0x00000000U;
}
static inline u32 fifo_engine_status_engine_busy_v(void)
{
return 0x00000001U;
}
static inline u32 fifo_engine_status_ctxsw_v(u32 r)
{
return (r >> 15U) & 0x1U;
}
static inline u32 fifo_engine_status_ctxsw_in_progress_v(void)
{
return 0x00000001U;
}
static inline u32 fifo_engine_status_ctxsw_in_progress_f(void)
{
return 0x8000U;
}
static inline u32 fifo_pbdma_status_r(u32 i)
{
return 0x00003080U + i*4U;
}
static inline u32 fifo_pbdma_status__size_1_v(void)
{
return 0x0000000eU;
}
static inline u32 fifo_pbdma_status_id_v(u32 r)
{
return (r >> 0U) & 0xfffU;
}
static inline u32 fifo_pbdma_status_id_type_v(u32 r)
{
return (r >> 12U) & 0x1U;
}
static inline u32 fifo_pbdma_status_id_type_chid_v(void)
{
return 0x00000000U;
}
static inline u32 fifo_pbdma_status_id_type_tsgid_v(void)
{
return 0x00000001U;
}
static inline u32 fifo_pbdma_status_chan_status_v(u32 r)
{
return (r >> 13U) & 0x7U;
}
static inline u32 fifo_pbdma_status_chan_status_valid_v(void)
{
return 0x00000001U;
}
static inline u32 fifo_pbdma_status_chan_status_chsw_load_v(void)
{
return 0x00000005U;
}
static inline u32 fifo_pbdma_status_chan_status_chsw_save_v(void)
{
return 0x00000006U;
}
static inline u32 fifo_pbdma_status_chan_status_chsw_switch_v(void)
{
return 0x00000007U;
}
static inline u32 fifo_pbdma_status_next_id_v(u32 r)
{
return (r >> 16U) & 0xfffU;
}
static inline u32 fifo_pbdma_status_next_id_type_v(u32 r)
{
return (r >> 28U) & 0x1U;
}
static inline u32 fifo_pbdma_status_next_id_type_chid_v(void)
{
return 0x00000000U;
}
static inline u32 fifo_pbdma_status_chsw_v(u32 r)
{
return (r >> 15U) & 0x1U;
}
static inline u32 fifo_pbdma_status_chsw_in_progress_v(void)
{
return 0x00000001U;
}
static inline u32 fifo_cfg0_r(void)
{
return 0x00002004U;
}
static inline u32 fifo_cfg0_num_pbdma_v(u32 r)
{
return (r >> 0U) & 0xffU;
}
static inline u32 fifo_cfg0_pbdma_fault_id_v(u32 r)
{
return (r >> 16U) & 0xffU;
}
static inline u32 fifo_fb_iface_r(void)
{
return 0x000026f0U;
}
static inline u32 fifo_fb_iface_control_v(u32 r)
{
return (r >> 0U) & 0x1U;
}
static inline u32 fifo_fb_iface_control_enable_f(void)
{
return 0x1U;
}
static inline u32 fifo_fb_iface_status_v(u32 r)
{
return (r >> 4U) & 0x1U;
}
static inline u32 fifo_fb_iface_status_enabled_f(void)
{
return 0x10U;
}
#endif

View File

@@ -0,0 +1,187 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* Function naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
*
* <x>_o(void) : Returns the offset for element <x>.
*
* <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
*
* <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
*
* <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
* and masked to place it at field <y> of register <x>. This value
* can be |'d with others to produce a full register value for
* register <x>.
*
* <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
* value can be ~'d and then &'d to clear the value of field <y> for
* register <x>.
*
* <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
* to place it at field <y> of register <x>. This value can be |'d
* with others to produce a full register value for <x>.
*
* <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
* <x> value 'r' after being shifted to place its LSB at bit 0.
* This value is suitable for direct comparison with other unshifted
* values appropriate for use in field <y> of register <x>.
*
* <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
* field <y> of register <x>. This value is suitable for direct
* comparison with unshifted values appropriate for use in field <y>
* of register <x>.
*/
#ifndef _hw_flush_gv100_h_
#define _hw_flush_gv100_h_
static inline u32 flush_l2_system_invalidate_r(void)
{
return 0x00070004U;
}
static inline u32 flush_l2_system_invalidate_pending_v(u32 r)
{
return (r >> 0U) & 0x1U;
}
static inline u32 flush_l2_system_invalidate_pending_busy_v(void)
{
return 0x00000001U;
}
static inline u32 flush_l2_system_invalidate_pending_busy_f(void)
{
return 0x1U;
}
static inline u32 flush_l2_system_invalidate_outstanding_v(u32 r)
{
return (r >> 1U) & 0x1U;
}
static inline u32 flush_l2_system_invalidate_outstanding_true_v(void)
{
return 0x00000001U;
}
static inline u32 flush_l2_flush_dirty_r(void)
{
return 0x00070010U;
}
static inline u32 flush_l2_flush_dirty_pending_v(u32 r)
{
return (r >> 0U) & 0x1U;
}
static inline u32 flush_l2_flush_dirty_pending_empty_v(void)
{
return 0x00000000U;
}
static inline u32 flush_l2_flush_dirty_pending_empty_f(void)
{
return 0x0U;
}
static inline u32 flush_l2_flush_dirty_pending_busy_v(void)
{
return 0x00000001U;
}
static inline u32 flush_l2_flush_dirty_pending_busy_f(void)
{
return 0x1U;
}
static inline u32 flush_l2_flush_dirty_outstanding_v(u32 r)
{
return (r >> 1U) & 0x1U;
}
static inline u32 flush_l2_flush_dirty_outstanding_false_v(void)
{
return 0x00000000U;
}
static inline u32 flush_l2_flush_dirty_outstanding_false_f(void)
{
return 0x0U;
}
static inline u32 flush_l2_flush_dirty_outstanding_true_v(void)
{
return 0x00000001U;
}
static inline u32 flush_l2_clean_comptags_r(void)
{
return 0x0007000cU;
}
static inline u32 flush_l2_clean_comptags_pending_v(u32 r)
{
return (r >> 0U) & 0x1U;
}
static inline u32 flush_l2_clean_comptags_pending_empty_v(void)
{
return 0x00000000U;
}
static inline u32 flush_l2_clean_comptags_pending_empty_f(void)
{
return 0x0U;
}
static inline u32 flush_l2_clean_comptags_pending_busy_v(void)
{
return 0x00000001U;
}
static inline u32 flush_l2_clean_comptags_pending_busy_f(void)
{
return 0x1U;
}
static inline u32 flush_l2_clean_comptags_outstanding_v(u32 r)
{
return (r >> 1U) & 0x1U;
}
static inline u32 flush_l2_clean_comptags_outstanding_false_v(void)
{
return 0x00000000U;
}
static inline u32 flush_l2_clean_comptags_outstanding_false_f(void)
{
return 0x0U;
}
static inline u32 flush_l2_clean_comptags_outstanding_true_v(void)
{
return 0x00000001U;
}
static inline u32 flush_fb_flush_r(void)
{
return 0x00070000U;
}
static inline u32 flush_fb_flush_pending_v(u32 r)
{
return (r >> 0U) & 0x1U;
}
static inline u32 flush_fb_flush_pending_busy_v(void)
{
return 0x00000001U;
}
static inline u32 flush_fb_flush_pending_busy_f(void)
{
return 0x1U;
}
static inline u32 flush_fb_flush_outstanding_v(u32 r)
{
return (r >> 1U) & 0x1U;
}
static inline u32 flush_fb_flush_outstanding_true_v(void)
{
return 0x00000001U;
}
#endif

View File

@@ -0,0 +1,143 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* Function naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
*
* <x>_o(void) : Returns the offset for element <x>.
*
* <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
*
* <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
*
* <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
* and masked to place it at field <y> of register <x>. This value
* can be |'d with others to produce a full register value for
* register <x>.
*
* <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
* value can be ~'d and then &'d to clear the value of field <y> for
* register <x>.
*
* <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
* to place it at field <y> of register <x>. This value can be |'d
* with others to produce a full register value for <x>.
*
* <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
* <x> value 'r' after being shifted to place its LSB at bit 0.
* This value is suitable for direct comparison with other unshifted
* values appropriate for use in field <y> of register <x>.
*
* <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
* field <y> of register <x>. This value is suitable for direct
* comparison with unshifted values appropriate for use in field <y>
* of register <x>.
*/
#ifndef _hw_fuse_gv100_h_
#define _hw_fuse_gv100_h_
static inline u32 fuse_status_opt_tpc_gpc_r(u32 i)
{
return 0x00021c38U + i*4U;
}
static inline u32 fuse_ctrl_opt_tpc_gpc_r(u32 i)
{
return 0x00021838U + i*4U;
}
static inline u32 fuse_ctrl_opt_ram_svop_pdp_r(void)
{
return 0x00021944U;
}
static inline u32 fuse_ctrl_opt_ram_svop_pdp_data_f(u32 v)
{
return (v & 0xffU) << 0U;
}
static inline u32 fuse_ctrl_opt_ram_svop_pdp_data_m(void)
{
return 0xffU << 0U;
}
static inline u32 fuse_ctrl_opt_ram_svop_pdp_data_v(u32 r)
{
return (r >> 0U) & 0xffU;
}
static inline u32 fuse_ctrl_opt_ram_svop_pdp_override_r(void)
{
return 0x00021948U;
}
static inline u32 fuse_ctrl_opt_ram_svop_pdp_override_data_f(u32 v)
{
return (v & 0x1U) << 0U;
}
static inline u32 fuse_ctrl_opt_ram_svop_pdp_override_data_m(void)
{
return 0x1U << 0U;
}
static inline u32 fuse_ctrl_opt_ram_svop_pdp_override_data_v(u32 r)
{
return (r >> 0U) & 0x1U;
}
static inline u32 fuse_ctrl_opt_ram_svop_pdp_override_data_yes_f(void)
{
return 0x1U;
}
static inline u32 fuse_ctrl_opt_ram_svop_pdp_override_data_no_f(void)
{
return 0x0U;
}
static inline u32 fuse_status_opt_fbio_r(void)
{
return 0x00021c14U;
}
static inline u32 fuse_status_opt_fbio_data_f(u32 v)
{
return (v & 0xffffU) << 0U;
}
static inline u32 fuse_status_opt_fbio_data_m(void)
{
return 0xffffU << 0U;
}
static inline u32 fuse_status_opt_fbio_data_v(u32 r)
{
return (r >> 0U) & 0xffffU;
}
static inline u32 fuse_status_opt_rop_l2_fbp_r(u32 i)
{
return 0x00021d70U + i*4U;
}
static inline u32 fuse_status_opt_fbp_r(void)
{
return 0x00021d38U;
}
static inline u32 fuse_status_opt_fbp_idx_v(u32 r, u32 i)
{
return (r >> (0U + i*1U)) & 0x1U;
}
static inline u32 fuse_opt_ecc_en_r(void)
{
return 0x00021228U;
}
static inline u32 fuse_opt_feature_fuses_override_disable_r(void)
{
return 0x000213f0U;
}
#endif

View File

File diff suppressed because it is too large Load Diff

View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,619 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* Function naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
*
* <x>_o(void) : Returns the offset for element <x>.
*
* <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
*
* <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
*
* <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
* and masked to place it at field <y> of register <x>. This value
* can be |'d with others to produce a full register value for
* register <x>.
*
* <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
* value can be ~'d and then &'d to clear the value of field <y> for
* register <x>.
*
* <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
* to place it at field <y> of register <x>. This value can be |'d
* with others to produce a full register value for <x>.
*
* <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
* <x> value 'r' after being shifted to place its LSB at bit 0.
* This value is suitable for direct comparison with other unshifted
* values appropriate for use in field <y> of register <x>.
*
* <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
* field <y> of register <x>. This value is suitable for direct
* comparison with unshifted values appropriate for use in field <y>
* of register <x>.
*/
#ifndef _hw_ltc_gv100_h_
#define _hw_ltc_gv100_h_
static inline u32 ltc_pltcg_base_v(void)
{
return 0x00140000U;
}
static inline u32 ltc_pltcg_extent_v(void)
{
return 0x0017ffffU;
}
static inline u32 ltc_ltc0_ltss_v(void)
{
return 0x00140200U;
}
static inline u32 ltc_ltc0_lts0_v(void)
{
return 0x00140400U;
}
static inline u32 ltc_ltcs_ltss_v(void)
{
return 0x0017e200U;
}
static inline u32 ltc_ltcs_lts0_cbc_ctrl1_r(void)
{
return 0x0014046cU;
}
static inline u32 ltc_ltc0_lts0_dstg_cfg0_r(void)
{
return 0x00140518U;
}
static inline u32 ltc_ltcs_ltss_dstg_cfg0_r(void)
{
return 0x0017e318U;
}
static inline u32 ltc_ltcs_ltss_dstg_cfg0_vdc_4to2_disable_m(void)
{
return 0x1U << 15U;
}
static inline u32 ltc_ltc0_lts0_tstg_cfg1_r(void)
{
return 0x00140494U;
}
static inline u32 ltc_ltc0_lts0_tstg_cfg1_active_ways_v(u32 r)
{
return (r >> 0U) & 0xffffU;
}
static inline u32 ltc_ltc0_lts0_tstg_cfg1_active_sets_v(u32 r)
{
return (r >> 16U) & 0x3U;
}
static inline u32 ltc_ltc0_lts0_tstg_cfg1_active_sets_all_v(void)
{
return 0x00000000U;
}
static inline u32 ltc_ltc0_lts0_tstg_cfg1_active_sets_half_v(void)
{
return 0x00000001U;
}
static inline u32 ltc_ltc0_lts0_tstg_cfg1_active_sets_quarter_v(void)
{
return 0x00000002U;
}
static inline u32 ltc_ltcs_ltss_cbc_ctrl1_r(void)
{
return 0x0017e26cU;
}
static inline u32 ltc_ltcs_ltss_cbc_ctrl1_clean_active_f(void)
{
return 0x1U;
}
static inline u32 ltc_ltcs_ltss_cbc_ctrl1_invalidate_active_f(void)
{
return 0x2U;
}
static inline u32 ltc_ltcs_ltss_cbc_ctrl1_clear_v(u32 r)
{
return (r >> 2U) & 0x1U;
}
static inline u32 ltc_ltcs_ltss_cbc_ctrl1_clear_active_v(void)
{
return 0x00000001U;
}
static inline u32 ltc_ltcs_ltss_cbc_ctrl1_clear_active_f(void)
{
return 0x4U;
}
static inline u32 ltc_ltc0_lts0_cbc_ctrl1_r(void)
{
return 0x0014046cU;
}
static inline u32 ltc_ltcs_ltss_cbc_ctrl2_r(void)
{
return 0x0017e270U;
}
static inline u32 ltc_ltcs_ltss_cbc_ctrl2_clear_lower_bound_f(u32 v)
{
return (v & 0x3ffffU) << 0U;
}
static inline u32 ltc_ltcs_ltss_cbc_ctrl3_r(void)
{
return 0x0017e274U;
}
static inline u32 ltc_ltcs_ltss_cbc_ctrl3_clear_upper_bound_f(u32 v)
{
return (v & 0x3ffffU) << 0U;
}
static inline u32 ltc_ltcs_ltss_cbc_ctrl3_clear_upper_bound_init_v(void)
{
return 0x0003ffffU;
}
static inline u32 ltc_ltcs_ltss_cbc_base_r(void)
{
return 0x0017e278U;
}
static inline u32 ltc_ltcs_ltss_cbc_base_alignment_shift_v(void)
{
return 0x0000000bU;
}
static inline u32 ltc_ltcs_ltss_cbc_base_address_v(u32 r)
{
return (r >> 0U) & 0x3ffffffU;
}
static inline u32 ltc_ltcs_ltss_cbc_num_active_ltcs_r(void)
{
return 0x0017e27cU;
}
static inline u32 ltc_ltcs_ltss_cbc_num_active_ltcs__v(u32 r)
{
return (r >> 0U) & 0x1fU;
}
static inline u32 ltc_ltcs_ltss_cbc_num_active_ltcs_nvlink_peer_through_l2_f(u32 v)
{
return (v & 0x1U) << 24U;
}
static inline u32 ltc_ltcs_ltss_cbc_num_active_ltcs_nvlink_peer_through_l2_v(u32 r)
{
return (r >> 24U) & 0x1U;
}
static inline u32 ltc_ltcs_ltss_cbc_num_active_ltcs_serialize_f(u32 v)
{
return (v & 0x1U) << 25U;
}
static inline u32 ltc_ltcs_ltss_cbc_num_active_ltcs_serialize_v(u32 r)
{
return (r >> 25U) & 0x1U;
}
static inline u32 ltc_ltcs_misc_ltc_num_active_ltcs_r(void)
{
return 0x0017e000U;
}
static inline u32 ltc_ltcs_ltss_cbc_param_r(void)
{
return 0x0017e280U;
}
static inline u32 ltc_ltcs_ltss_cbc_param_comptags_per_cache_line_v(u32 r)
{
return (r >> 0U) & 0xffffU;
}
static inline u32 ltc_ltcs_ltss_cbc_param_cache_line_size_v(u32 r)
{
return (r >> 24U) & 0xfU;
}
static inline u32 ltc_ltcs_ltss_cbc_param_slices_per_ltc_v(u32 r)
{
return (r >> 28U) & 0xfU;
}
static inline u32 ltc_ltcs_ltss_cbc_param2_r(void)
{
return 0x0017e3f4U;
}
static inline u32 ltc_ltcs_ltss_cbc_param2_gobs_per_comptagline_per_slice_v(u32 r)
{
return (r >> 0U) & 0xffffU;
}
static inline u32 ltc_ltcs_ltss_tstg_set_mgmt_r(void)
{
return 0x0017e2acU;
}
static inline u32 ltc_ltcs_ltss_tstg_set_mgmt_max_ways_evict_last_f(u32 v)
{
return (v & 0x1fU) << 16U;
}
static inline u32 ltc_ltcs_ltss_dstg_zbc_index_r(void)
{
return 0x0017e338U;
}
static inline u32 ltc_ltcs_ltss_dstg_zbc_index_address_f(u32 v)
{
return (v & 0xfU) << 0U;
}
static inline u32 ltc_ltcs_ltss_dstg_zbc_color_clear_value_r(u32 i)
{
return 0x0017e33cU + i*4U;
}
static inline u32 ltc_ltcs_ltss_dstg_zbc_color_clear_value__size_1_v(void)
{
return 0x00000004U;
}
static inline u32 ltc_ltcs_ltss_dstg_zbc_depth_clear_value_r(void)
{
return 0x0017e34cU;
}
static inline u32 ltc_ltcs_ltss_dstg_zbc_depth_clear_value_field_s(void)
{
return 32U;
}
static inline u32 ltc_ltcs_ltss_dstg_zbc_depth_clear_value_field_f(u32 v)
{
return (v & 0xffffffffU) << 0U;
}
static inline u32 ltc_ltcs_ltss_dstg_zbc_depth_clear_value_field_m(void)
{
return 0xffffffffU << 0U;
}
static inline u32 ltc_ltcs_ltss_dstg_zbc_depth_clear_value_field_v(u32 r)
{
return (r >> 0U) & 0xffffffffU;
}
static inline u32 ltc_ltcs_ltss_dstg_zbc_stencil_clear_value_r(void)
{
return 0x0017e204U;
}
static inline u32 ltc_ltcs_ltss_dstg_zbc_stencil_clear_value_field_s(void)
{
return 8U;
}
static inline u32 ltc_ltcs_ltss_dstg_zbc_stencil_clear_value_field_f(u32 v)
{
return (v & 0xffU) << 0U;
}
static inline u32 ltc_ltcs_ltss_dstg_zbc_stencil_clear_value_field_m(void)
{
return 0xffU << 0U;
}
static inline u32 ltc_ltcs_ltss_dstg_zbc_stencil_clear_value_field_v(u32 r)
{
return (r >> 0U) & 0xffU;
}
static inline u32 ltc_ltcs_ltss_tstg_set_mgmt_2_r(void)
{
return 0x0017e2b0U;
}
static inline u32 ltc_ltcs_ltss_tstg_set_mgmt_2_l2_bypass_mode_enabled_f(void)
{
return 0x10000000U;
}
static inline u32 ltc_ltcs_ltss_g_elpg_r(void)
{
return 0x0017e214U;
}
static inline u32 ltc_ltcs_ltss_g_elpg_flush_v(u32 r)
{
return (r >> 0U) & 0x1U;
}
static inline u32 ltc_ltcs_ltss_g_elpg_flush_pending_v(void)
{
return 0x00000001U;
}
static inline u32 ltc_ltcs_ltss_g_elpg_flush_pending_f(void)
{
return 0x1U;
}
static inline u32 ltc_ltc0_ltss_g_elpg_r(void)
{
return 0x00140214U;
}
static inline u32 ltc_ltc0_ltss_g_elpg_flush_v(u32 r)
{
return (r >> 0U) & 0x1U;
}
static inline u32 ltc_ltc0_ltss_g_elpg_flush_pending_v(void)
{
return 0x00000001U;
}
static inline u32 ltc_ltc0_ltss_g_elpg_flush_pending_f(void)
{
return 0x1U;
}
static inline u32 ltc_ltc1_ltss_g_elpg_r(void)
{
return 0x00142214U;
}
static inline u32 ltc_ltc1_ltss_g_elpg_flush_v(u32 r)
{
return (r >> 0U) & 0x1U;
}
static inline u32 ltc_ltc1_ltss_g_elpg_flush_pending_v(void)
{
return 0x00000001U;
}
static inline u32 ltc_ltc1_ltss_g_elpg_flush_pending_f(void)
{
return 0x1U;
}
static inline u32 ltc_ltcs_ltss_intr_r(void)
{
return 0x0017e20cU;
}
static inline u32 ltc_ltcs_ltss_intr_ecc_sec_error_pending_f(void)
{
return 0x100U;
}
static inline u32 ltc_ltcs_ltss_intr_ecc_ded_error_pending_f(void)
{
return 0x200U;
}
static inline u32 ltc_ltcs_ltss_intr_en_evicted_cb_m(void)
{
return 0x1U << 20U;
}
static inline u32 ltc_ltcs_ltss_intr_en_illegal_compstat_access_m(void)
{
return 0x1U << 30U;
}
static inline u32 ltc_ltcs_ltss_intr_en_ecc_sec_error_enabled_f(void)
{
return 0x1000000U;
}
static inline u32 ltc_ltcs_ltss_intr_en_ecc_ded_error_enabled_f(void)
{
return 0x2000000U;
}
static inline u32 ltc_ltc0_lts0_intr_r(void)
{
return 0x0014040cU;
}
static inline u32 ltc_ltc0_lts0_dstg_ecc_report_r(void)
{
return 0x0014051cU;
}
static inline u32 ltc_ltc0_lts0_dstg_ecc_report_sec_count_m(void)
{
return 0xffU << 0U;
}
static inline u32 ltc_ltc0_lts0_dstg_ecc_report_sec_count_v(u32 r)
{
return (r >> 0U) & 0xffU;
}
static inline u32 ltc_ltc0_lts0_dstg_ecc_report_ded_count_m(void)
{
return 0xffU << 16U;
}
static inline u32 ltc_ltc0_lts0_dstg_ecc_report_ded_count_v(u32 r)
{
return (r >> 16U) & 0xffU;
}
static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_r(void)
{
return 0x0017e2a0U;
}
static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_v(u32 r)
{
return (r >> 0U) & 0x1U;
}
static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_pending_v(void)
{
return 0x00000001U;
}
static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_pending_f(void)
{
return 0x1U;
}
static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_max_cycles_between_invalidates_v(u32 r)
{
return (r >> 8U) & 0xfU;
}
static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_max_cycles_between_invalidates_3_v(void)
{
return 0x00000003U;
}
static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_max_cycles_between_invalidates_3_f(void)
{
return 0x300U;
}
static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_last_class_v(u32 r)
{
return (r >> 28U) & 0x1U;
}
static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_last_class_true_v(void)
{
return 0x00000001U;
}
static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_last_class_true_f(void)
{
return 0x10000000U;
}
static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_normal_class_v(u32 r)
{
return (r >> 29U) & 0x1U;
}
static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_normal_class_true_v(void)
{
return 0x00000001U;
}
static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_normal_class_true_f(void)
{
return 0x20000000U;
}
static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_first_class_v(u32 r)
{
return (r >> 30U) & 0x1U;
}
static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_first_class_true_v(void)
{
return 0x00000001U;
}
static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_first_class_true_f(void)
{
return 0x40000000U;
}
static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_r(void)
{
return 0x0017e2a4U;
}
static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_v(u32 r)
{
return (r >> 0U) & 0x1U;
}
static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_pending_v(void)
{
return 0x00000001U;
}
static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_pending_f(void)
{
return 0x1U;
}
static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_max_cycles_between_cleans_v(u32 r)
{
return (r >> 8U) & 0xfU;
}
static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_max_cycles_between_cleans_3_v(void)
{
return 0x00000003U;
}
static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_max_cycles_between_cleans_3_f(void)
{
return 0x300U;
}
static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_wait_for_fb_to_pull_v(u32 r)
{
return (r >> 16U) & 0x1U;
}
static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_wait_for_fb_to_pull_true_v(void)
{
return 0x00000001U;
}
static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_wait_for_fb_to_pull_true_f(void)
{
return 0x10000U;
}
static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_last_class_v(u32 r)
{
return (r >> 28U) & 0x1U;
}
static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_last_class_true_v(void)
{
return 0x00000001U;
}
static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_last_class_true_f(void)
{
return 0x10000000U;
}
static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_normal_class_v(u32 r)
{
return (r >> 29U) & 0x1U;
}
static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_normal_class_true_v(void)
{
return 0x00000001U;
}
static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_normal_class_true_f(void)
{
return 0x20000000U;
}
static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_first_class_v(u32 r)
{
return (r >> 30U) & 0x1U;
}
static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_first_class_true_v(void)
{
return 0x00000001U;
}
static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_first_class_true_f(void)
{
return 0x40000000U;
}
static inline u32 ltc_ltc0_ltss_tstg_cmgmt0_r(void)
{
return 0x001402a0U;
}
static inline u32 ltc_ltc0_ltss_tstg_cmgmt0_invalidate_v(u32 r)
{
return (r >> 0U) & 0x1U;
}
static inline u32 ltc_ltc0_ltss_tstg_cmgmt0_invalidate_pending_v(void)
{
return 0x00000001U;
}
static inline u32 ltc_ltc0_ltss_tstg_cmgmt0_invalidate_pending_f(void)
{
return 0x1U;
}
static inline u32 ltc_ltc0_ltss_tstg_cmgmt1_r(void)
{
return 0x001402a4U;
}
static inline u32 ltc_ltc0_ltss_tstg_cmgmt1_clean_v(u32 r)
{
return (r >> 0U) & 0x1U;
}
static inline u32 ltc_ltc0_ltss_tstg_cmgmt1_clean_pending_v(void)
{
return 0x00000001U;
}
static inline u32 ltc_ltc0_ltss_tstg_cmgmt1_clean_pending_f(void)
{
return 0x1U;
}
static inline u32 ltc_ltc1_ltss_tstg_cmgmt0_r(void)
{
return 0x001422a0U;
}
static inline u32 ltc_ltc1_ltss_tstg_cmgmt0_invalidate_v(u32 r)
{
return (r >> 0U) & 0x1U;
}
static inline u32 ltc_ltc1_ltss_tstg_cmgmt0_invalidate_pending_v(void)
{
return 0x00000001U;
}
static inline u32 ltc_ltc1_ltss_tstg_cmgmt0_invalidate_pending_f(void)
{
return 0x1U;
}
static inline u32 ltc_ltc1_ltss_tstg_cmgmt1_r(void)
{
return 0x001422a4U;
}
static inline u32 ltc_ltc1_ltss_tstg_cmgmt1_clean_v(u32 r)
{
return (r >> 0U) & 0x1U;
}
static inline u32 ltc_ltc1_ltss_tstg_cmgmt1_clean_pending_v(void)
{
return 0x00000001U;
}
static inline u32 ltc_ltc1_ltss_tstg_cmgmt1_clean_pending_f(void)
{
return 0x1U;
}
static inline u32 ltc_ltc0_lts0_tstg_info_1_r(void)
{
return 0x0014058cU;
}
static inline u32 ltc_ltc0_lts0_tstg_info_1_slice_size_in_kb_v(u32 r)
{
return (r >> 0U) & 0xffffU;
}
static inline u32 ltc_ltc0_lts0_tstg_info_1_slices_per_l2_v(u32 r)
{
return (r >> 16U) & 0x1fU;
}
#endif

View File

@@ -0,0 +1,259 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* Function naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
*
* <x>_o(void) : Returns the offset for element <x>.
*
* <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
*
* <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
*
* <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
* and masked to place it at field <y> of register <x>. This value
* can be |'d with others to produce a full register value for
* register <x>.
*
* <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
* value can be ~'d and then &'d to clear the value of field <y> for
* register <x>.
*
* <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
* to place it at field <y> of register <x>. This value can be |'d
* with others to produce a full register value for <x>.
*
* <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
* <x> value 'r' after being shifted to place its LSB at bit 0.
* This value is suitable for direct comparison with other unshifted
* values appropriate for use in field <y> of register <x>.
*
* <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
* field <y> of register <x>. This value is suitable for direct
* comparison with unshifted values appropriate for use in field <y>
* of register <x>.
*/
#ifndef _hw_mc_gv100_h_
#define _hw_mc_gv100_h_
static inline u32 mc_boot_0_r(void)
{
return 0x00000000U;
}
static inline u32 mc_boot_0_architecture_v(u32 r)
{
return (r >> 24U) & 0x1fU;
}
static inline u32 mc_boot_0_implementation_v(u32 r)
{
return (r >> 20U) & 0xfU;
}
static inline u32 mc_boot_0_major_revision_v(u32 r)
{
return (r >> 4U) & 0xfU;
}
static inline u32 mc_boot_0_minor_revision_v(u32 r)
{
return (r >> 0U) & 0xfU;
}
static inline u32 mc_intr_r(u32 i)
{
return 0x00000100U + i*4U;
}
static inline u32 mc_intr_pfifo_pending_f(void)
{
return 0x100U;
}
static inline u32 mc_intr_hub_pending_f(void)
{
return 0x200U;
}
static inline u32 mc_intr_pgraph_pending_f(void)
{
return 0x1000U;
}
static inline u32 mc_intr_pmu_pending_f(void)
{
return 0x1000000U;
}
static inline u32 mc_intr_ltc_pending_f(void)
{
return 0x2000000U;
}
static inline u32 mc_intr_priv_ring_pending_f(void)
{
return 0x40000000U;
}
static inline u32 mc_intr_pbus_pending_f(void)
{
return 0x10000000U;
}
static inline u32 mc_intr_en_r(u32 i)
{
return 0x00000140U + i*4U;
}
static inline u32 mc_intr_en_set_r(u32 i)
{
return 0x00000160U + i*4U;
}
static inline u32 mc_intr_en_clear_r(u32 i)
{
return 0x00000180U + i*4U;
}
static inline u32 mc_enable_r(void)
{
return 0x00000200U;
}
static inline u32 mc_enable_xbar_enabled_f(void)
{
return 0x4U;
}
static inline u32 mc_enable_l2_enabled_f(void)
{
return 0x8U;
}
static inline u32 mc_enable_pmedia_s(void)
{
return 1U;
}
static inline u32 mc_enable_pmedia_f(u32 v)
{
return (v & 0x1U) << 4U;
}
static inline u32 mc_enable_pmedia_m(void)
{
return 0x1U << 4U;
}
static inline u32 mc_enable_pmedia_v(u32 r)
{
return (r >> 4U) & 0x1U;
}
static inline u32 mc_enable_ce0_m(void)
{
return 0x1U << 6U;
}
static inline u32 mc_enable_pfifo_enabled_f(void)
{
return 0x100U;
}
static inline u32 mc_enable_pgraph_enabled_f(void)
{
return 0x1000U;
}
static inline u32 mc_enable_pwr_v(u32 r)
{
return (r >> 13U) & 0x1U;
}
static inline u32 mc_enable_pwr_disabled_v(void)
{
return 0x00000000U;
}
static inline u32 mc_enable_pwr_enabled_f(void)
{
return 0x2000U;
}
static inline u32 mc_enable_pfb_enabled_f(void)
{
return 0x100000U;
}
static inline u32 mc_enable_ce2_m(void)
{
return 0x1U << 21U;
}
static inline u32 mc_enable_ce2_enabled_f(void)
{
return 0x200000U;
}
static inline u32 mc_enable_blg_enabled_f(void)
{
return 0x8000000U;
}
static inline u32 mc_enable_perfmon_enabled_f(void)
{
return 0x10000000U;
}
static inline u32 mc_enable_hub_enabled_f(void)
{
return 0x20000000U;
}
static inline u32 mc_enable_nvdec_disabled_v(void)
{
return 0x00000000U;
}
static inline u32 mc_enable_nvdec_enabled_f(void)
{
return 0x8000U;
}
static inline u32 mc_intr_ltc_r(void)
{
return 0x000001c0U;
}
static inline u32 mc_enable_pb_r(void)
{
return 0x00000204U;
}
static inline u32 mc_enable_pb_0_s(void)
{
return 1U;
}
static inline u32 mc_enable_pb_0_f(u32 v)
{
return (v & 0x1U) << 0U;
}
static inline u32 mc_enable_pb_0_m(void)
{
return 0x1U << 0U;
}
static inline u32 mc_enable_pb_0_v(u32 r)
{
return (r >> 0U) & 0x1U;
}
static inline u32 mc_enable_pb_0_enabled_v(void)
{
return 0x00000001U;
}
static inline u32 mc_enable_pb_sel_f(u32 v, u32 i)
{
return (v & 0x1U) << (0U + i*1U);
}
static inline u32 mc_elpg_enable_r(void)
{
return 0x0000020cU;
}
static inline u32 mc_elpg_enable_xbar_enabled_f(void)
{
return 0x4U;
}
static inline u32 mc_elpg_enable_pfb_enabled_f(void)
{
return 0x100000U;
}
static inline u32 mc_elpg_enable_hub_enabled_f(void)
{
return 0x20000000U;
}
static inline u32 mc_elpg_enable_l2_enabled_f(void)
{
return 0x8U;
}
#endif

View File

@@ -0,0 +1,659 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* Function naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
*
* <x>_o(void) : Returns the offset for element <x>.
*
* <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
*
* <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
*
* <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
* and masked to place it at field <y> of register <x>. This value
* can be |'d with others to produce a full register value for
* register <x>.
*
* <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
* value can be ~'d and then &'d to clear the value of field <y> for
* register <x>.
*
* <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
* to place it at field <y> of register <x>. This value can be |'d
* with others to produce a full register value for <x>.
*
* <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
* <x> value 'r' after being shifted to place its LSB at bit 0.
* This value is suitable for direct comparison with other unshifted
* values appropriate for use in field <y> of register <x>.
*
* <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
* field <y> of register <x>. This value is suitable for direct
* comparison with unshifted values appropriate for use in field <y>
* of register <x>.
*/
#ifndef _hw_pbdma_gv100_h_
#define _hw_pbdma_gv100_h_
static inline u32 pbdma_gp_entry1_r(void)
{
return 0x10000004U;
}
static inline u32 pbdma_gp_entry1_get_hi_v(u32 r)
{
return (r >> 0U) & 0xffU;
}
static inline u32 pbdma_gp_entry1_length_f(u32 v)
{
return (v & 0x1fffffU) << 10U;
}
static inline u32 pbdma_gp_entry1_length_v(u32 r)
{
return (r >> 10U) & 0x1fffffU;
}
static inline u32 pbdma_gp_base_r(u32 i)
{
return 0x00040048U + i*8192U;
}
static inline u32 pbdma_gp_base__size_1_v(void)
{
return 0x0000000eU;
}
static inline u32 pbdma_gp_base_offset_f(u32 v)
{
return (v & 0x1fffffffU) << 3U;
}
static inline u32 pbdma_gp_base_rsvd_s(void)
{
return 3U;
}
static inline u32 pbdma_gp_base_hi_r(u32 i)
{
return 0x0004004cU + i*8192U;
}
static inline u32 pbdma_gp_base_hi_offset_f(u32 v)
{
return (v & 0xffU) << 0U;
}
static inline u32 pbdma_gp_base_hi_limit2_f(u32 v)
{
return (v & 0x1fU) << 16U;
}
static inline u32 pbdma_gp_fetch_r(u32 i)
{
return 0x00040050U + i*8192U;
}
static inline u32 pbdma_gp_get_r(u32 i)
{
return 0x00040014U + i*8192U;
}
static inline u32 pbdma_gp_put_r(u32 i)
{
return 0x00040000U + i*8192U;
}
static inline u32 pbdma_pb_fetch_r(u32 i)
{
return 0x00040054U + i*8192U;
}
static inline u32 pbdma_pb_fetch_hi_r(u32 i)
{
return 0x00040058U + i*8192U;
}
static inline u32 pbdma_get_r(u32 i)
{
return 0x00040018U + i*8192U;
}
static inline u32 pbdma_get_hi_r(u32 i)
{
return 0x0004001cU + i*8192U;
}
static inline u32 pbdma_put_r(u32 i)
{
return 0x0004005cU + i*8192U;
}
static inline u32 pbdma_put_hi_r(u32 i)
{
return 0x00040060U + i*8192U;
}
static inline u32 pbdma_pb_header_r(u32 i)
{
return 0x00040084U + i*8192U;
}
static inline u32 pbdma_pb_header_priv_user_f(void)
{
return 0x0U;
}
static inline u32 pbdma_pb_header_method_zero_f(void)
{
return 0x0U;
}
static inline u32 pbdma_pb_header_subchannel_zero_f(void)
{
return 0x0U;
}
static inline u32 pbdma_pb_header_level_main_f(void)
{
return 0x0U;
}
static inline u32 pbdma_pb_header_first_true_f(void)
{
return 0x400000U;
}
static inline u32 pbdma_pb_header_type_inc_f(void)
{
return 0x20000000U;
}
static inline u32 pbdma_pb_header_type_non_inc_f(void)
{
return 0x60000000U;
}
static inline u32 pbdma_hdr_shadow_r(u32 i)
{
return 0x00040118U + i*8192U;
}
static inline u32 pbdma_gp_shadow_0_r(u32 i)
{
return 0x00040110U + i*8192U;
}
static inline u32 pbdma_gp_shadow_1_r(u32 i)
{
return 0x00040114U + i*8192U;
}
static inline u32 pbdma_subdevice_r(u32 i)
{
return 0x00040094U + i*8192U;
}
static inline u32 pbdma_subdevice_id_f(u32 v)
{
return (v & 0xfffU) << 0U;
}
static inline u32 pbdma_subdevice_status_active_f(void)
{
return 0x10000000U;
}
static inline u32 pbdma_subdevice_channel_dma_enable_f(void)
{
return 0x20000000U;
}
static inline u32 pbdma_method0_r(u32 i)
{
return 0x000400c0U + i*8192U;
}
static inline u32 pbdma_method0_fifo_size_v(void)
{
return 0x00000004U;
}
static inline u32 pbdma_method0_addr_f(u32 v)
{
return (v & 0xfffU) << 2U;
}
static inline u32 pbdma_method0_addr_v(u32 r)
{
return (r >> 2U) & 0xfffU;
}
static inline u32 pbdma_method0_subch_v(u32 r)
{
return (r >> 16U) & 0x7U;
}
static inline u32 pbdma_method0_first_true_f(void)
{
return 0x400000U;
}
static inline u32 pbdma_method0_valid_true_f(void)
{
return 0x80000000U;
}
static inline u32 pbdma_method1_r(u32 i)
{
return 0x000400c8U + i*8192U;
}
static inline u32 pbdma_method2_r(u32 i)
{
return 0x000400d0U + i*8192U;
}
static inline u32 pbdma_method3_r(u32 i)
{
return 0x000400d8U + i*8192U;
}
static inline u32 pbdma_data0_r(u32 i)
{
return 0x000400c4U + i*8192U;
}
static inline u32 pbdma_acquire_r(u32 i)
{
return 0x00040030U + i*8192U;
}
static inline u32 pbdma_acquire_retry_man_2_f(void)
{
return 0x2U;
}
static inline u32 pbdma_acquire_retry_exp_2_f(void)
{
return 0x100U;
}
static inline u32 pbdma_acquire_timeout_exp_f(u32 v)
{
return (v & 0xfU) << 11U;
}
static inline u32 pbdma_acquire_timeout_exp_max_v(void)
{
return 0x0000000fU;
}
static inline u32 pbdma_acquire_timeout_exp_max_f(void)
{
return 0x7800U;
}
static inline u32 pbdma_acquire_timeout_man_f(u32 v)
{
return (v & 0xffffU) << 15U;
}
static inline u32 pbdma_acquire_timeout_man_max_v(void)
{
return 0x0000ffffU;
}
static inline u32 pbdma_acquire_timeout_man_max_f(void)
{
return 0x7fff8000U;
}
static inline u32 pbdma_acquire_timeout_en_enable_f(void)
{
return 0x80000000U;
}
static inline u32 pbdma_acquire_timeout_en_disable_f(void)
{
return 0x0U;
}
static inline u32 pbdma_status_r(u32 i)
{
return 0x00040100U + i*8192U;
}
static inline u32 pbdma_channel_r(u32 i)
{
return 0x00040120U + i*8192U;
}
static inline u32 pbdma_signature_r(u32 i)
{
return 0x00040010U + i*8192U;
}
static inline u32 pbdma_signature_hw_valid_f(void)
{
return 0xfaceU;
}
static inline u32 pbdma_signature_sw_zero_f(void)
{
return 0x0U;
}
static inline u32 pbdma_userd_r(u32 i)
{
return 0x00040008U + i*8192U;
}
static inline u32 pbdma_userd_target_vid_mem_f(void)
{
return 0x0U;
}
static inline u32 pbdma_userd_target_sys_mem_coh_f(void)
{
return 0x2U;
}
static inline u32 pbdma_userd_target_sys_mem_ncoh_f(void)
{
return 0x3U;
}
static inline u32 pbdma_userd_addr_f(u32 v)
{
return (v & 0x7fffffU) << 9U;
}
static inline u32 pbdma_config_r(u32 i)
{
return 0x000400f4U + i*8192U;
}
static inline u32 pbdma_config_l2_evict_first_f(void)
{
return 0x0U;
}
static inline u32 pbdma_config_l2_evict_normal_f(void)
{
return 0x1U;
}
static inline u32 pbdma_config_l2_evict_last_f(void)
{
return 0x2U;
}
static inline u32 pbdma_config_ce_split_enable_f(void)
{
return 0x0U;
}
static inline u32 pbdma_config_ce_split_disable_f(void)
{
return 0x10U;
}
static inline u32 pbdma_config_auth_level_non_privileged_f(void)
{
return 0x0U;
}
static inline u32 pbdma_config_auth_level_privileged_f(void)
{
return 0x100U;
}
static inline u32 pbdma_config_userd_writeback_disable_f(void)
{
return 0x0U;
}
static inline u32 pbdma_config_userd_writeback_enable_f(void)
{
return 0x1000U;
}
static inline u32 pbdma_userd_hi_r(u32 i)
{
return 0x0004000cU + i*8192U;
}
static inline u32 pbdma_userd_hi_addr_f(u32 v)
{
return (v & 0xffU) << 0U;
}
static inline u32 pbdma_hce_ctrl_r(u32 i)
{
return 0x000400e4U + i*8192U;
}
static inline u32 pbdma_hce_ctrl_hce_priv_mode_yes_f(void)
{
return 0x20U;
}
static inline u32 pbdma_intr_0_r(u32 i)
{
return 0x00040108U + i*8192U;
}
static inline u32 pbdma_intr_0_memreq_v(u32 r)
{
return (r >> 0U) & 0x1U;
}
static inline u32 pbdma_intr_0_memreq_pending_f(void)
{
return 0x1U;
}
static inline u32 pbdma_intr_0_memack_timeout_pending_f(void)
{
return 0x2U;
}
static inline u32 pbdma_intr_0_memack_extra_pending_f(void)
{
return 0x4U;
}
static inline u32 pbdma_intr_0_memdat_timeout_pending_f(void)
{
return 0x8U;
}
static inline u32 pbdma_intr_0_memdat_extra_pending_f(void)
{
return 0x10U;
}
static inline u32 pbdma_intr_0_memflush_pending_f(void)
{
return 0x20U;
}
static inline u32 pbdma_intr_0_memop_pending_f(void)
{
return 0x40U;
}
static inline u32 pbdma_intr_0_lbconnect_pending_f(void)
{
return 0x80U;
}
static inline u32 pbdma_intr_0_lbreq_pending_f(void)
{
return 0x100U;
}
static inline u32 pbdma_intr_0_lback_timeout_pending_f(void)
{
return 0x200U;
}
static inline u32 pbdma_intr_0_lback_extra_pending_f(void)
{
return 0x400U;
}
static inline u32 pbdma_intr_0_lbdat_timeout_pending_f(void)
{
return 0x800U;
}
static inline u32 pbdma_intr_0_lbdat_extra_pending_f(void)
{
return 0x1000U;
}
static inline u32 pbdma_intr_0_gpfifo_pending_f(void)
{
return 0x2000U;
}
static inline u32 pbdma_intr_0_gpptr_pending_f(void)
{
return 0x4000U;
}
static inline u32 pbdma_intr_0_gpentry_pending_f(void)
{
return 0x8000U;
}
static inline u32 pbdma_intr_0_gpcrc_pending_f(void)
{
return 0x10000U;
}
static inline u32 pbdma_intr_0_pbptr_pending_f(void)
{
return 0x20000U;
}
static inline u32 pbdma_intr_0_pbentry_pending_f(void)
{
return 0x40000U;
}
static inline u32 pbdma_intr_0_pbcrc_pending_f(void)
{
return 0x80000U;
}
static inline u32 pbdma_intr_0_clear_faulted_error_pending_f(void)
{
return 0x100000U;
}
static inline u32 pbdma_intr_0_method_pending_f(void)
{
return 0x200000U;
}
static inline u32 pbdma_intr_0_methodcrc_pending_f(void)
{
return 0x400000U;
}
static inline u32 pbdma_intr_0_device_pending_f(void)
{
return 0x800000U;
}
static inline u32 pbdma_intr_0_eng_reset_pending_f(void)
{
return 0x1000000U;
}
static inline u32 pbdma_intr_0_semaphore_pending_f(void)
{
return 0x2000000U;
}
static inline u32 pbdma_intr_0_acquire_pending_f(void)
{
return 0x4000000U;
}
static inline u32 pbdma_intr_0_pri_pending_f(void)
{
return 0x8000000U;
}
static inline u32 pbdma_intr_0_no_ctxsw_seg_pending_f(void)
{
return 0x20000000U;
}
static inline u32 pbdma_intr_0_pbseg_pending_f(void)
{
return 0x40000000U;
}
static inline u32 pbdma_intr_0_signature_pending_f(void)
{
return 0x80000000U;
}
static inline u32 pbdma_intr_1_r(u32 i)
{
return 0x00040148U + i*8192U;
}
static inline u32 pbdma_intr_1_ctxnotvalid_m(void)
{
return 0x1U << 31U;
}
static inline u32 pbdma_intr_1_ctxnotvalid_pending_f(void)
{
return 0x80000000U;
}
static inline u32 pbdma_intr_en_0_r(u32 i)
{
return 0x0004010cU + i*8192U;
}
static inline u32 pbdma_intr_en_0_lbreq_enabled_f(void)
{
return 0x100U;
}
static inline u32 pbdma_intr_en_1_r(u32 i)
{
return 0x0004014cU + i*8192U;
}
static inline u32 pbdma_intr_stall_r(u32 i)
{
return 0x0004013cU + i*8192U;
}
static inline u32 pbdma_intr_stall_lbreq_enabled_f(void)
{
return 0x100U;
}
static inline u32 pbdma_intr_stall_1_r(u32 i)
{
return 0x00040140U + i*8192U;
}
static inline u32 pbdma_udma_nop_r(void)
{
return 0x00000008U;
}
static inline u32 pbdma_runlist_timeslice_r(u32 i)
{
return 0x000400f8U + i*8192U;
}
static inline u32 pbdma_runlist_timeslice_timeout_128_f(void)
{
return 0x80U;
}
static inline u32 pbdma_runlist_timeslice_timescale_3_f(void)
{
return 0x3000U;
}
static inline u32 pbdma_runlist_timeslice_enable_true_f(void)
{
return 0x10000000U;
}
static inline u32 pbdma_target_r(u32 i)
{
return 0x000400acU + i*8192U;
}
static inline u32 pbdma_target_engine_sw_f(void)
{
return 0x1fU;
}
static inline u32 pbdma_target_eng_ctx_valid_true_f(void)
{
return 0x10000U;
}
static inline u32 pbdma_target_eng_ctx_valid_false_f(void)
{
return 0x0U;
}
static inline u32 pbdma_target_ce_ctx_valid_true_f(void)
{
return 0x20000U;
}
static inline u32 pbdma_target_ce_ctx_valid_false_f(void)
{
return 0x0U;
}
static inline u32 pbdma_target_host_tsg_event_reason_pbdma_idle_f(void)
{
return 0x0U;
}
static inline u32 pbdma_target_host_tsg_event_reason_semaphore_acquire_failure_f(void)
{
return 0x1000000U;
}
static inline u32 pbdma_target_host_tsg_event_reason_tsg_yield_f(void)
{
return 0x2000000U;
}
static inline u32 pbdma_target_host_tsg_event_reason_host_subchannel_switch_f(void)
{
return 0x3000000U;
}
static inline u32 pbdma_target_should_send_tsg_event_true_f(void)
{
return 0x20000000U;
}
static inline u32 pbdma_target_should_send_tsg_event_false_f(void)
{
return 0x0U;
}
static inline u32 pbdma_target_needs_host_tsg_event_true_f(void)
{
return 0x80000000U;
}
static inline u32 pbdma_target_needs_host_tsg_event_false_f(void)
{
return 0x0U;
}
static inline u32 pbdma_set_channel_info_r(u32 i)
{
return 0x000400fcU + i*8192U;
}
static inline u32 pbdma_set_channel_info_scg_type_graphics_compute0_f(void)
{
return 0x0U;
}
static inline u32 pbdma_set_channel_info_scg_type_compute1_f(void)
{
return 0x1U;
}
static inline u32 pbdma_set_channel_info_veid_f(u32 v)
{
return (v & 0x3fU) << 8U;
}
static inline u32 pbdma_timeout_r(u32 i)
{
return 0x0004012cU + i*8192U;
}
static inline u32 pbdma_timeout_period_m(void)
{
return 0xffffffffU << 0U;
}
static inline u32 pbdma_timeout_period_max_f(void)
{
return 0xffffffffU;
}
static inline u32 pbdma_timeout_period_init_f(void)
{
return 0x10000U;
}
#endif

View File

@@ -0,0 +1,211 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* Function naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
*
* <x>_o(void) : Returns the offset for element <x>.
*
* <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
*
* <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
*
* <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
* and masked to place it at field <y> of register <x>. This value
* can be |'d with others to produce a full register value for
* register <x>.
*
* <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
* value can be ~'d and then &'d to clear the value of field <y> for
* register <x>.
*
* <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
* to place it at field <y> of register <x>. This value can be |'d
* with others to produce a full register value for <x>.
*
* <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
* <x> value 'r' after being shifted to place its LSB at bit 0.
* This value is suitable for direct comparison with other unshifted
* values appropriate for use in field <y> of register <x>.
*
* <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
* field <y> of register <x>. This value is suitable for direct
* comparison with unshifted values appropriate for use in field <y>
* of register <x>.
*/
#ifndef _hw_perf_gv100_h_
#define _hw_perf_gv100_h_
static inline u32 perf_pmasys_control_r(void)
{
return 0x0024a000U;
}
static inline u32 perf_pmasys_control_membuf_status_v(u32 r)
{
return (r >> 4U) & 0x1U;
}
static inline u32 perf_pmasys_control_membuf_status_overflowed_v(void)
{
return 0x00000001U;
}
static inline u32 perf_pmasys_control_membuf_status_overflowed_f(void)
{
return 0x10U;
}
static inline u32 perf_pmasys_control_membuf_clear_status_f(u32 v)
{
return (v & 0x1U) << 5U;
}
static inline u32 perf_pmasys_control_membuf_clear_status_v(u32 r)
{
return (r >> 5U) & 0x1U;
}
static inline u32 perf_pmasys_control_membuf_clear_status_doit_v(void)
{
return 0x00000001U;
}
static inline u32 perf_pmasys_control_membuf_clear_status_doit_f(void)
{
return 0x20U;
}
static inline u32 perf_pmasys_mem_block_r(void)
{
return 0x0024a070U;
}
static inline u32 perf_pmasys_mem_block_base_f(u32 v)
{
return (v & 0xfffffffU) << 0U;
}
static inline u32 perf_pmasys_mem_block_target_f(u32 v)
{
return (v & 0x3U) << 28U;
}
static inline u32 perf_pmasys_mem_block_target_v(u32 r)
{
return (r >> 28U) & 0x3U;
}
static inline u32 perf_pmasys_mem_block_target_lfb_v(void)
{
return 0x00000000U;
}
static inline u32 perf_pmasys_mem_block_target_lfb_f(void)
{
return 0x0U;
}
static inline u32 perf_pmasys_mem_block_target_sys_coh_v(void)
{
return 0x00000002U;
}
static inline u32 perf_pmasys_mem_block_target_sys_coh_f(void)
{
return 0x20000000U;
}
static inline u32 perf_pmasys_mem_block_target_sys_ncoh_v(void)
{
return 0x00000003U;
}
static inline u32 perf_pmasys_mem_block_target_sys_ncoh_f(void)
{
return 0x30000000U;
}
static inline u32 perf_pmasys_mem_block_valid_f(u32 v)
{
return (v & 0x1U) << 31U;
}
static inline u32 perf_pmasys_mem_block_valid_v(u32 r)
{
return (r >> 31U) & 0x1U;
}
static inline u32 perf_pmasys_mem_block_valid_true_v(void)
{
return 0x00000001U;
}
static inline u32 perf_pmasys_mem_block_valid_true_f(void)
{
return 0x80000000U;
}
static inline u32 perf_pmasys_mem_block_valid_false_v(void)
{
return 0x00000000U;
}
static inline u32 perf_pmasys_mem_block_valid_false_f(void)
{
return 0x0U;
}
static inline u32 perf_pmasys_outbase_r(void)
{
return 0x0024a074U;
}
static inline u32 perf_pmasys_outbase_ptr_f(u32 v)
{
return (v & 0x7ffffffU) << 5U;
}
static inline u32 perf_pmasys_outbaseupper_r(void)
{
return 0x0024a078U;
}
static inline u32 perf_pmasys_outbaseupper_ptr_f(u32 v)
{
return (v & 0xffU) << 0U;
}
static inline u32 perf_pmasys_outsize_r(void)
{
return 0x0024a07cU;
}
static inline u32 perf_pmasys_outsize_numbytes_f(u32 v)
{
return (v & 0x7ffffffU) << 5U;
}
static inline u32 perf_pmasys_mem_bytes_r(void)
{
return 0x0024a084U;
}
static inline u32 perf_pmasys_mem_bytes_numbytes_f(u32 v)
{
return (v & 0xfffffffU) << 4U;
}
static inline u32 perf_pmasys_mem_bump_r(void)
{
return 0x0024a088U;
}
static inline u32 perf_pmasys_mem_bump_numbytes_f(u32 v)
{
return (v & 0xfffffffU) << 4U;
}
static inline u32 perf_pmasys_enginestatus_r(void)
{
return 0x0024a0a4U;
}
static inline u32 perf_pmasys_enginestatus_rbufempty_f(u32 v)
{
return (v & 0x1U) << 4U;
}
static inline u32 perf_pmasys_enginestatus_rbufempty_empty_v(void)
{
return 0x00000001U;
}
static inline u32 perf_pmasys_enginestatus_rbufempty_empty_f(void)
{
return 0x10U;
}
#endif

View File

@@ -0,0 +1,63 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* Function naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
*
* <x>_o(void) : Returns the offset for element <x>.
*
* <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
*
* <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
*
* <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
* and masked to place it at field <y> of register <x>. This value
* can be |'d with others to produce a full register value for
* register <x>.
*
* <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
* value can be ~'d and then &'d to clear the value of field <y> for
* register <x>.
*
* <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
* to place it at field <y> of register <x>. This value can be |'d
* with others to produce a full register value for <x>.
*
* <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
* <x> value 'r' after being shifted to place its LSB at bit 0.
* This value is suitable for direct comparison with other unshifted
* values appropriate for use in field <y> of register <x>.
*
* <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
* field <y> of register <x>. This value is suitable for direct
* comparison with unshifted values appropriate for use in field <y>
* of register <x>.
*/
#ifndef _hw_pram_gv100_h_
#define _hw_pram_gv100_h_
static inline u32 pram_data032_r(u32 i)
{
return 0x00700000U + i*4U;
}
#endif

View File

@@ -0,0 +1,167 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* Function naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
*
* <x>_o(void) : Returns the offset for element <x>.
*
* <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
*
* <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
*
* <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
* and masked to place it at field <y> of register <x>. This value
* can be |'d with others to produce a full register value for
* register <x>.
*
* <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
* value can be ~'d and then &'d to clear the value of field <y> for
* register <x>.
*
* <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
* to place it at field <y> of register <x>. This value can be |'d
* with others to produce a full register value for <x>.
*
* <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
* <x> value 'r' after being shifted to place its LSB at bit 0.
* This value is suitable for direct comparison with other unshifted
* values appropriate for use in field <y> of register <x>.
*
* <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
* field <y> of register <x>. This value is suitable for direct
* comparison with unshifted values appropriate for use in field <y>
* of register <x>.
*/
#ifndef _hw_pri_ringmaster_gv100_h_
#define _hw_pri_ringmaster_gv100_h_
static inline u32 pri_ringmaster_command_r(void)
{
return 0x0012004cU;
}
static inline u32 pri_ringmaster_command_cmd_m(void)
{
return 0x3fU << 0U;
}
static inline u32 pri_ringmaster_command_cmd_v(u32 r)
{
return (r >> 0U) & 0x3fU;
}
static inline u32 pri_ringmaster_command_cmd_no_cmd_v(void)
{
return 0x00000000U;
}
static inline u32 pri_ringmaster_command_cmd_start_ring_f(void)
{
return 0x1U;
}
static inline u32 pri_ringmaster_command_cmd_ack_interrupt_f(void)
{
return 0x2U;
}
static inline u32 pri_ringmaster_command_cmd_enumerate_stations_f(void)
{
return 0x3U;
}
static inline u32 pri_ringmaster_command_cmd_enumerate_stations_bc_grp_all_f(void)
{
return 0x0U;
}
static inline u32 pri_ringmaster_command_data_r(void)
{
return 0x00120048U;
}
static inline u32 pri_ringmaster_start_results_r(void)
{
return 0x00120050U;
}
static inline u32 pri_ringmaster_start_results_connectivity_v(u32 r)
{
return (r >> 0U) & 0x1U;
}
static inline u32 pri_ringmaster_start_results_connectivity_pass_v(void)
{
return 0x00000001U;
}
static inline u32 pri_ringmaster_intr_status0_r(void)
{
return 0x00120058U;
}
static inline u32 pri_ringmaster_intr_status0_ring_start_conn_fault_v(u32 r)
{
return (r >> 0U) & 0x1U;
}
static inline u32 pri_ringmaster_intr_status0_disconnect_fault_v(u32 r)
{
return (r >> 1U) & 0x1U;
}
static inline u32 pri_ringmaster_intr_status0_overflow_fault_v(u32 r)
{
return (r >> 2U) & 0x1U;
}
static inline u32 pri_ringmaster_intr_status0_gbl_write_error_sys_v(u32 r)
{
return (r >> 8U) & 0x1U;
}
static inline u32 pri_ringmaster_intr_status1_r(void)
{
return 0x0012005cU;
}
static inline u32 pri_ringmaster_global_ctl_r(void)
{
return 0x00120060U;
}
static inline u32 pri_ringmaster_global_ctl_ring_reset_asserted_f(void)
{
return 0x1U;
}
static inline u32 pri_ringmaster_global_ctl_ring_reset_deasserted_f(void)
{
return 0x0U;
}
static inline u32 pri_ringmaster_enum_fbp_r(void)
{
return 0x00120074U;
}
static inline u32 pri_ringmaster_enum_fbp_count_v(u32 r)
{
return (r >> 0U) & 0x1fU;
}
static inline u32 pri_ringmaster_enum_gpc_r(void)
{
return 0x00120078U;
}
static inline u32 pri_ringmaster_enum_gpc_count_v(u32 r)
{
return (r >> 0U) & 0x1fU;
}
static inline u32 pri_ringmaster_enum_ltc_r(void)
{
return 0x0012006cU;
}
static inline u32 pri_ringmaster_enum_ltc_count_v(u32 r)
{
return (r >> 0U) & 0x1fU;
}
#endif

View File

@@ -0,0 +1,79 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* Function naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
*
* <x>_o(void) : Returns the offset for element <x>.
*
* <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
*
* <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
*
* <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
* and masked to place it at field <y> of register <x>. This value
* can be |'d with others to produce a full register value for
* register <x>.
*
* <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
* value can be ~'d and then &'d to clear the value of field <y> for
* register <x>.
*
* <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
* to place it at field <y> of register <x>. This value can be |'d
* with others to produce a full register value for <x>.
*
* <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
* <x> value 'r' after being shifted to place its LSB at bit 0.
* This value is suitable for direct comparison with other unshifted
* values appropriate for use in field <y> of register <x>.
*
* <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
* field <y> of register <x>. This value is suitable for direct
* comparison with unshifted values appropriate for use in field <y>
* of register <x>.
*/
#ifndef _hw_pri_ringstation_gpc_gv100_h_
#define _hw_pri_ringstation_gpc_gv100_h_
static inline u32 pri_ringstation_gpc_master_config_r(u32 i)
{
return 0x00128300U + i*4U;
}
static inline u32 pri_ringstation_gpc_gpc0_priv_error_adr_r(void)
{
return 0x00128120U;
}
static inline u32 pri_ringstation_gpc_gpc0_priv_error_wrdat_r(void)
{
return 0x00128124U;
}
static inline u32 pri_ringstation_gpc_gpc0_priv_error_info_r(void)
{
return 0x00128128U;
}
static inline u32 pri_ringstation_gpc_gpc0_priv_error_code_r(void)
{
return 0x0012812cU;
}
#endif

View File

@@ -0,0 +1,91 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* Function naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
*
* <x>_o(void) : Returns the offset for element <x>.
*
* <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
*
* <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
*
* <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
* and masked to place it at field <y> of register <x>. This value
* can be |'d with others to produce a full register value for
* register <x>.
*
* <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
* value can be ~'d and then &'d to clear the value of field <y> for
* register <x>.
*
* <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
* to place it at field <y> of register <x>. This value can be |'d
* with others to produce a full register value for <x>.
*
* <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
* <x> value 'r' after being shifted to place its LSB at bit 0.
* This value is suitable for direct comparison with other unshifted
* values appropriate for use in field <y> of register <x>.
*
* <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
* field <y> of register <x>. This value is suitable for direct
* comparison with unshifted values appropriate for use in field <y>
* of register <x>.
*/
#ifndef _hw_pri_ringstation_sys_gv100_h_
#define _hw_pri_ringstation_sys_gv100_h_
static inline u32 pri_ringstation_sys_master_config_r(u32 i)
{
return 0x00122300U + i*4U;
}
static inline u32 pri_ringstation_sys_decode_config_r(void)
{
return 0x00122204U;
}
static inline u32 pri_ringstation_sys_decode_config_ring_m(void)
{
return 0x7U << 0U;
}
static inline u32 pri_ringstation_sys_decode_config_ring_drop_on_ring_not_started_f(void)
{
return 0x1U;
}
static inline u32 pri_ringstation_sys_priv_error_adr_r(void)
{
return 0x00122120U;
}
static inline u32 pri_ringstation_sys_priv_error_wrdat_r(void)
{
return 0x00122124U;
}
static inline u32 pri_ringstation_sys_priv_error_info_r(void)
{
return 0x00122128U;
}
static inline u32 pri_ringstation_sys_priv_error_code_r(void)
{
return 0x0012212cU;
}
#endif

View File

@@ -0,0 +1,195 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* Function naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
*
* <x>_o(void) : Returns the offset for element <x>.
*
* <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
*
* <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
*
* <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
* and masked to place it at field <y> of register <x>. This value
* can be |'d with others to produce a full register value for
* register <x>.
*
* <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
* value can be ~'d and then &'d to clear the value of field <y> for
* register <x>.
*
* <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
* to place it at field <y> of register <x>. This value can be |'d
* with others to produce a full register value for <x>.
*
* <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
* <x> value 'r' after being shifted to place its LSB at bit 0.
* This value is suitable for direct comparison with other unshifted
* values appropriate for use in field <y> of register <x>.
*
* <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
* field <y> of register <x>. This value is suitable for direct
* comparison with unshifted values appropriate for use in field <y>
* of register <x>.
*/
#ifndef _hw_proj_gv100_h_
#define _hw_proj_gv100_h_
static inline u32 proj_gpc_base_v(void)
{
return 0x00500000U;
}
static inline u32 proj_gpc_shared_base_v(void)
{
return 0x00418000U;
}
static inline u32 proj_gpc_stride_v(void)
{
return 0x00008000U;
}
static inline u32 proj_ltc_stride_v(void)
{
return 0x00002000U;
}
static inline u32 proj_lts_stride_v(void)
{
return 0x00000200U;
}
static inline u32 proj_fbpa_base_v(void)
{
return 0x00900000U;
}
static inline u32 proj_fbpa_shared_base_v(void)
{
return 0x009a0000U;
}
static inline u32 proj_fbpa_stride_v(void)
{
return 0x00004000U;
}
static inline u32 proj_ppc_in_gpc_base_v(void)
{
return 0x00003000U;
}
static inline u32 proj_ppc_in_gpc_shared_base_v(void)
{
return 0x00003e00U;
}
static inline u32 proj_ppc_in_gpc_stride_v(void)
{
return 0x00000200U;
}
static inline u32 proj_rop_base_v(void)
{
return 0x00410000U;
}
static inline u32 proj_rop_shared_base_v(void)
{
return 0x00408800U;
}
static inline u32 proj_rop_stride_v(void)
{
return 0x00000400U;
}
static inline u32 proj_tpc_in_gpc_base_v(void)
{
return 0x00004000U;
}
static inline u32 proj_tpc_in_gpc_stride_v(void)
{
return 0x00000800U;
}
static inline u32 proj_tpc_in_gpc_shared_base_v(void)
{
return 0x00001800U;
}
static inline u32 proj_smpc_base_v(void)
{
return 0x00000200U;
}
static inline u32 proj_smpc_shared_base_v(void)
{
return 0x00000300U;
}
static inline u32 proj_smpc_unique_base_v(void)
{
return 0x00000600U;
}
static inline u32 proj_smpc_stride_v(void)
{
return 0x00000100U;
}
static inline u32 proj_host_num_engines_v(void)
{
return 0x0000000fU;
}
static inline u32 proj_host_num_pbdma_v(void)
{
return 0x0000000eU;
}
static inline u32 proj_scal_litter_num_tpc_per_gpc_v(void)
{
return 0x00000007U;
}
static inline u32 proj_scal_litter_num_fbps_v(void)
{
return 0x00000008U;
}
static inline u32 proj_scal_litter_num_fbpas_v(void)
{
return 0x00000010U;
}
static inline u32 proj_scal_litter_num_gpcs_v(void)
{
return 0x00000006U;
}
static inline u32 proj_scal_litter_num_pes_per_gpc_v(void)
{
return 0x00000003U;
}
static inline u32 proj_scal_litter_num_tpcs_per_pes_v(void)
{
return 0x00000003U;
}
static inline u32 proj_scal_litter_num_zcull_banks_v(void)
{
return 0x00000004U;
}
static inline u32 proj_scal_litter_num_sm_per_tpc_v(void)
{
return 0x00000002U;
}
static inline u32 proj_scal_max_gpcs_v(void)
{
return 0x00000020U;
}
static inline u32 proj_scal_max_tpc_per_gpc_v(void)
{
return 0x00000008U;
}
static inline u32 proj_sm_stride_v(void)
{
return 0x00000080U;
}
#endif

View File

@@ -0,0 +1,935 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* Function naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
*
* <x>_o(void) : Returns the offset for element <x>.
*
* <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
*
* <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
*
* <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
* and masked to place it at field <y> of register <x>. This value
* can be |'d with others to produce a full register value for
* register <x>.
*
* <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
* value can be ~'d and then &'d to clear the value of field <y> for
* register <x>.
*
* <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
* to place it at field <y> of register <x>. This value can be |'d
* with others to produce a full register value for <x>.
*
* <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
* <x> value 'r' after being shifted to place its LSB at bit 0.
* This value is suitable for direct comparison with other unshifted
* values appropriate for use in field <y> of register <x>.
*
* <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
* field <y> of register <x>. This value is suitable for direct
* comparison with unshifted values appropriate for use in field <y>
* of register <x>.
*/
#ifndef _hw_pwr_gv100_h_
#define _hw_pwr_gv100_h_
static inline u32 pwr_falcon_irqsset_r(void)
{
return 0x0010a000U;
}
static inline u32 pwr_falcon_irqsset_swgen0_set_f(void)
{
return 0x40U;
}
static inline u32 pwr_falcon_irqsclr_r(void)
{
return 0x0010a004U;
}
static inline u32 pwr_falcon_irqstat_r(void)
{
return 0x0010a008U;
}
static inline u32 pwr_falcon_irqstat_halt_true_f(void)
{
return 0x10U;
}
static inline u32 pwr_falcon_irqstat_exterr_true_f(void)
{
return 0x20U;
}
static inline u32 pwr_falcon_irqstat_swgen0_true_f(void)
{
return 0x40U;
}
static inline u32 pwr_falcon_irqstat_ext_second_true_f(void)
{
return 0x800U;
}
static inline u32 pwr_falcon_irqmode_r(void)
{
return 0x0010a00cU;
}
static inline u32 pwr_falcon_irqmset_r(void)
{
return 0x0010a010U;
}
static inline u32 pwr_falcon_irqmset_gptmr_f(u32 v)
{
return (v & 0x1U) << 0U;
}
static inline u32 pwr_falcon_irqmset_wdtmr_f(u32 v)
{
return (v & 0x1U) << 1U;
}
static inline u32 pwr_falcon_irqmset_mthd_f(u32 v)
{
return (v & 0x1U) << 2U;
}
static inline u32 pwr_falcon_irqmset_ctxsw_f(u32 v)
{
return (v & 0x1U) << 3U;
}
static inline u32 pwr_falcon_irqmset_halt_f(u32 v)
{
return (v & 0x1U) << 4U;
}
static inline u32 pwr_falcon_irqmset_exterr_f(u32 v)
{
return (v & 0x1U) << 5U;
}
static inline u32 pwr_falcon_irqmset_swgen0_f(u32 v)
{
return (v & 0x1U) << 6U;
}
static inline u32 pwr_falcon_irqmset_swgen1_f(u32 v)
{
return (v & 0x1U) << 7U;
}
static inline u32 pwr_falcon_irqmset_ext_f(u32 v)
{
return (v & 0xffU) << 8U;
}
static inline u32 pwr_falcon_irqmset_ext_ctxe_f(u32 v)
{
return (v & 0x1U) << 8U;
}
static inline u32 pwr_falcon_irqmset_ext_limitv_f(u32 v)
{
return (v & 0x1U) << 9U;
}
static inline u32 pwr_falcon_irqmset_ext_second_f(u32 v)
{
return (v & 0x1U) << 11U;
}
static inline u32 pwr_falcon_irqmset_ext_therm_f(u32 v)
{
return (v & 0x1U) << 12U;
}
static inline u32 pwr_falcon_irqmset_ext_miscio_f(u32 v)
{
return (v & 0x1U) << 13U;
}
static inline u32 pwr_falcon_irqmset_ext_rttimer_f(u32 v)
{
return (v & 0x1U) << 14U;
}
static inline u32 pwr_falcon_irqmclr_r(void)
{
return 0x0010a014U;
}
static inline u32 pwr_falcon_irqmclr_gptmr_f(u32 v)
{
return (v & 0x1U) << 0U;
}
static inline u32 pwr_falcon_irqmclr_wdtmr_f(u32 v)
{
return (v & 0x1U) << 1U;
}
static inline u32 pwr_falcon_irqmclr_mthd_f(u32 v)
{
return (v & 0x1U) << 2U;
}
static inline u32 pwr_falcon_irqmclr_ctxsw_f(u32 v)
{
return (v & 0x1U) << 3U;
}
static inline u32 pwr_falcon_irqmclr_halt_f(u32 v)
{
return (v & 0x1U) << 4U;
}
static inline u32 pwr_falcon_irqmclr_exterr_f(u32 v)
{
return (v & 0x1U) << 5U;
}
static inline u32 pwr_falcon_irqmclr_swgen0_f(u32 v)
{
return (v & 0x1U) << 6U;
}
static inline u32 pwr_falcon_irqmclr_swgen1_f(u32 v)
{
return (v & 0x1U) << 7U;
}
static inline u32 pwr_falcon_irqmclr_ext_f(u32 v)
{
return (v & 0xffU) << 8U;
}
static inline u32 pwr_falcon_irqmclr_ext_ctxe_f(u32 v)
{
return (v & 0x1U) << 8U;
}
static inline u32 pwr_falcon_irqmclr_ext_limitv_f(u32 v)
{
return (v & 0x1U) << 9U;
}
static inline u32 pwr_falcon_irqmclr_ext_second_f(u32 v)
{
return (v & 0x1U) << 11U;
}
static inline u32 pwr_falcon_irqmclr_ext_therm_f(u32 v)
{
return (v & 0x1U) << 12U;
}
static inline u32 pwr_falcon_irqmclr_ext_miscio_f(u32 v)
{
return (v & 0x1U) << 13U;
}
static inline u32 pwr_falcon_irqmclr_ext_rttimer_f(u32 v)
{
return (v & 0x1U) << 14U;
}
static inline u32 pwr_falcon_irqmask_r(void)
{
return 0x0010a018U;
}
static inline u32 pwr_falcon_irqdest_r(void)
{
return 0x0010a01cU;
}
static inline u32 pwr_falcon_irqdest_host_gptmr_f(u32 v)
{
return (v & 0x1U) << 0U;
}
static inline u32 pwr_falcon_irqdest_host_wdtmr_f(u32 v)
{
return (v & 0x1U) << 1U;
}
static inline u32 pwr_falcon_irqdest_host_mthd_f(u32 v)
{
return (v & 0x1U) << 2U;
}
static inline u32 pwr_falcon_irqdest_host_ctxsw_f(u32 v)
{
return (v & 0x1U) << 3U;
}
static inline u32 pwr_falcon_irqdest_host_halt_f(u32 v)
{
return (v & 0x1U) << 4U;
}
static inline u32 pwr_falcon_irqdest_host_exterr_f(u32 v)
{
return (v & 0x1U) << 5U;
}
static inline u32 pwr_falcon_irqdest_host_swgen0_f(u32 v)
{
return (v & 0x1U) << 6U;
}
static inline u32 pwr_falcon_irqdest_host_swgen1_f(u32 v)
{
return (v & 0x1U) << 7U;
}
static inline u32 pwr_falcon_irqdest_host_ext_f(u32 v)
{
return (v & 0xffU) << 8U;
}
static inline u32 pwr_falcon_irqdest_host_ext_ctxe_f(u32 v)
{
return (v & 0x1U) << 8U;
}
static inline u32 pwr_falcon_irqdest_host_ext_limitv_f(u32 v)
{
return (v & 0x1U) << 9U;
}
static inline u32 pwr_falcon_irqdest_host_ext_second_f(u32 v)
{
return (v & 0x1U) << 11U;
}
static inline u32 pwr_falcon_irqdest_host_ext_therm_f(u32 v)
{
return (v & 0x1U) << 12U;
}
static inline u32 pwr_falcon_irqdest_host_ext_miscio_f(u32 v)
{
return (v & 0x1U) << 13U;
}
static inline u32 pwr_falcon_irqdest_host_ext_rttimer_f(u32 v)
{
return (v & 0x1U) << 14U;
}
static inline u32 pwr_falcon_irqdest_target_gptmr_f(u32 v)
{
return (v & 0x1U) << 16U;
}
static inline u32 pwr_falcon_irqdest_target_wdtmr_f(u32 v)
{
return (v & 0x1U) << 17U;
}
static inline u32 pwr_falcon_irqdest_target_mthd_f(u32 v)
{
return (v & 0x1U) << 18U;
}
static inline u32 pwr_falcon_irqdest_target_ctxsw_f(u32 v)
{
return (v & 0x1U) << 19U;
}
static inline u32 pwr_falcon_irqdest_target_halt_f(u32 v)
{
return (v & 0x1U) << 20U;
}
static inline u32 pwr_falcon_irqdest_target_exterr_f(u32 v)
{
return (v & 0x1U) << 21U;
}
static inline u32 pwr_falcon_irqdest_target_swgen0_f(u32 v)
{
return (v & 0x1U) << 22U;
}
static inline u32 pwr_falcon_irqdest_target_swgen1_f(u32 v)
{
return (v & 0x1U) << 23U;
}
static inline u32 pwr_falcon_irqdest_target_ext_f(u32 v)
{
return (v & 0xffU) << 24U;
}
static inline u32 pwr_falcon_irqdest_target_ext_ctxe_f(u32 v)
{
return (v & 0x1U) << 24U;
}
static inline u32 pwr_falcon_irqdest_target_ext_limitv_f(u32 v)
{
return (v & 0x1U) << 25U;
}
static inline u32 pwr_falcon_irqdest_target_ext_second_f(u32 v)
{
return (v & 0x1U) << 27U;
}
static inline u32 pwr_falcon_irqdest_target_ext_therm_f(u32 v)
{
return (v & 0x1U) << 28U;
}
static inline u32 pwr_falcon_irqdest_target_ext_miscio_f(u32 v)
{
return (v & 0x1U) << 29U;
}
static inline u32 pwr_falcon_irqdest_target_ext_rttimer_f(u32 v)
{
return (v & 0x1U) << 30U;
}
static inline u32 pwr_falcon_curctx_r(void)
{
return 0x0010a050U;
}
static inline u32 pwr_falcon_nxtctx_r(void)
{
return 0x0010a054U;
}
static inline u32 pwr_falcon_mailbox0_r(void)
{
return 0x0010a040U;
}
static inline u32 pwr_falcon_mailbox1_r(void)
{
return 0x0010a044U;
}
static inline u32 pwr_falcon_itfen_r(void)
{
return 0x0010a048U;
}
static inline u32 pwr_falcon_itfen_ctxen_enable_f(void)
{
return 0x1U;
}
static inline u32 pwr_falcon_idlestate_r(void)
{
return 0x0010a04cU;
}
static inline u32 pwr_falcon_idlestate_falcon_busy_v(u32 r)
{
return (r >> 0U) & 0x1U;
}
static inline u32 pwr_falcon_idlestate_ext_busy_v(u32 r)
{
return (r >> 1U) & 0x7fffU;
}
static inline u32 pwr_falcon_os_r(void)
{
return 0x0010a080U;
}
static inline u32 pwr_falcon_engctl_r(void)
{
return 0x0010a0a4U;
}
static inline u32 pwr_falcon_cpuctl_r(void)
{
return 0x0010a100U;
}
static inline u32 pwr_falcon_cpuctl_startcpu_f(u32 v)
{
return (v & 0x1U) << 1U;
}
static inline u32 pwr_falcon_cpuctl_halt_intr_f(u32 v)
{
return (v & 0x1U) << 4U;
}
static inline u32 pwr_falcon_cpuctl_halt_intr_m(void)
{
return 0x1U << 4U;
}
static inline u32 pwr_falcon_cpuctl_halt_intr_v(u32 r)
{
return (r >> 4U) & 0x1U;
}
static inline u32 pwr_falcon_cpuctl_cpuctl_alias_en_f(u32 v)
{
return (v & 0x1U) << 6U;
}
static inline u32 pwr_falcon_cpuctl_cpuctl_alias_en_m(void)
{
return 0x1U << 6U;
}
static inline u32 pwr_falcon_cpuctl_cpuctl_alias_en_v(u32 r)
{
return (r >> 6U) & 0x1U;
}
static inline u32 pwr_falcon_cpuctl_alias_r(void)
{
return 0x0010a130U;
}
static inline u32 pwr_falcon_cpuctl_alias_startcpu_f(u32 v)
{
return (v & 0x1U) << 1U;
}
static inline u32 pwr_pmu_scpctl_stat_r(void)
{
return 0x0010ac08U;
}
static inline u32 pwr_pmu_scpctl_stat_debug_mode_f(u32 v)
{
return (v & 0x1U) << 20U;
}
static inline u32 pwr_pmu_scpctl_stat_debug_mode_m(void)
{
return 0x1U << 20U;
}
static inline u32 pwr_pmu_scpctl_stat_debug_mode_v(u32 r)
{
return (r >> 20U) & 0x1U;
}
static inline u32 pwr_falcon_imemc_r(u32 i)
{
return 0x0010a180U + i*16U;
}
static inline u32 pwr_falcon_imemc_offs_f(u32 v)
{
return (v & 0x3fU) << 2U;
}
static inline u32 pwr_falcon_imemc_blk_f(u32 v)
{
return (v & 0xffU) << 8U;
}
static inline u32 pwr_falcon_imemc_aincw_f(u32 v)
{
return (v & 0x1U) << 24U;
}
static inline u32 pwr_falcon_imemd_r(u32 i)
{
return 0x0010a184U + i*16U;
}
static inline u32 pwr_falcon_imemt_r(u32 i)
{
return 0x0010a188U + i*16U;
}
static inline u32 pwr_falcon_sctl_r(void)
{
return 0x0010a240U;
}
static inline u32 pwr_falcon_mmu_phys_sec_r(void)
{
return 0x00100ce4U;
}
static inline u32 pwr_falcon_bootvec_r(void)
{
return 0x0010a104U;
}
static inline u32 pwr_falcon_bootvec_vec_f(u32 v)
{
return (v & 0xffffffffU) << 0U;
}
static inline u32 pwr_falcon_dmactl_r(void)
{
return 0x0010a10cU;
}
static inline u32 pwr_falcon_dmactl_dmem_scrubbing_m(void)
{
return 0x1U << 1U;
}
static inline u32 pwr_falcon_dmactl_imem_scrubbing_m(void)
{
return 0x1U << 2U;
}
static inline u32 pwr_falcon_hwcfg_r(void)
{
return 0x0010a108U;
}
static inline u32 pwr_falcon_hwcfg_imem_size_v(u32 r)
{
return (r >> 0U) & 0x1ffU;
}
static inline u32 pwr_falcon_hwcfg_dmem_size_v(u32 r)
{
return (r >> 9U) & 0x1ffU;
}
static inline u32 pwr_falcon_dmatrfbase_r(void)
{
return 0x0010a110U;
}
static inline u32 pwr_falcon_dmatrfbase1_r(void)
{
return 0x0010a128U;
}
static inline u32 pwr_falcon_dmatrfmoffs_r(void)
{
return 0x0010a114U;
}
static inline u32 pwr_falcon_dmatrfcmd_r(void)
{
return 0x0010a118U;
}
static inline u32 pwr_falcon_dmatrfcmd_imem_f(u32 v)
{
return (v & 0x1U) << 4U;
}
static inline u32 pwr_falcon_dmatrfcmd_write_f(u32 v)
{
return (v & 0x1U) << 5U;
}
static inline u32 pwr_falcon_dmatrfcmd_size_f(u32 v)
{
return (v & 0x7U) << 8U;
}
static inline u32 pwr_falcon_dmatrfcmd_ctxdma_f(u32 v)
{
return (v & 0x7U) << 12U;
}
static inline u32 pwr_falcon_dmatrffboffs_r(void)
{
return 0x0010a11cU;
}
static inline u32 pwr_falcon_exterraddr_r(void)
{
return 0x0010a168U;
}
static inline u32 pwr_falcon_exterrstat_r(void)
{
return 0x0010a16cU;
}
static inline u32 pwr_falcon_exterrstat_valid_m(void)
{
return 0x1U << 31U;
}
static inline u32 pwr_falcon_exterrstat_valid_v(u32 r)
{
return (r >> 31U) & 0x1U;
}
static inline u32 pwr_falcon_exterrstat_valid_true_v(void)
{
return 0x00000001U;
}
static inline u32 pwr_pmu_falcon_icd_cmd_r(void)
{
return 0x0010a200U;
}
static inline u32 pwr_pmu_falcon_icd_cmd_opc_s(void)
{
return 4U;
}
static inline u32 pwr_pmu_falcon_icd_cmd_opc_f(u32 v)
{
return (v & 0xfU) << 0U;
}
static inline u32 pwr_pmu_falcon_icd_cmd_opc_m(void)
{
return 0xfU << 0U;
}
static inline u32 pwr_pmu_falcon_icd_cmd_opc_v(u32 r)
{
return (r >> 0U) & 0xfU;
}
static inline u32 pwr_pmu_falcon_icd_cmd_opc_rreg_f(void)
{
return 0x8U;
}
static inline u32 pwr_pmu_falcon_icd_cmd_opc_rstat_f(void)
{
return 0xeU;
}
static inline u32 pwr_pmu_falcon_icd_cmd_idx_f(u32 v)
{
return (v & 0x1fU) << 8U;
}
static inline u32 pwr_pmu_falcon_icd_rdata_r(void)
{
return 0x0010a20cU;
}
static inline u32 pwr_falcon_dmemc_r(u32 i)
{
return 0x0010a1c0U + i*8U;
}
static inline u32 pwr_falcon_dmemc_offs_f(u32 v)
{
return (v & 0x3fU) << 2U;
}
static inline u32 pwr_falcon_dmemc_offs_m(void)
{
return 0x3fU << 2U;
}
static inline u32 pwr_falcon_dmemc_blk_f(u32 v)
{
return (v & 0xffU) << 8U;
}
static inline u32 pwr_falcon_dmemc_blk_m(void)
{
return 0xffU << 8U;
}
static inline u32 pwr_falcon_dmemc_aincw_f(u32 v)
{
return (v & 0x1U) << 24U;
}
static inline u32 pwr_falcon_dmemc_aincr_f(u32 v)
{
return (v & 0x1U) << 25U;
}
static inline u32 pwr_falcon_dmemd_r(u32 i)
{
return 0x0010a1c4U + i*8U;
}
static inline u32 pwr_pmu_new_instblk_r(void)
{
return 0x0010a480U;
}
static inline u32 pwr_pmu_new_instblk_ptr_f(u32 v)
{
return (v & 0xfffffffU) << 0U;
}
static inline u32 pwr_pmu_new_instblk_target_fb_f(void)
{
return 0x0U;
}
static inline u32 pwr_pmu_new_instblk_target_sys_coh_f(void)
{
return 0x20000000U;
}
static inline u32 pwr_pmu_new_instblk_target_sys_ncoh_f(void)
{
return 0x30000000U;
}
static inline u32 pwr_pmu_new_instblk_valid_f(u32 v)
{
return (v & 0x1U) << 30U;
}
static inline u32 pwr_pmu_mutex_id_r(void)
{
return 0x0010a488U;
}
static inline u32 pwr_pmu_mutex_id_value_v(u32 r)
{
return (r >> 0U) & 0xffU;
}
static inline u32 pwr_pmu_mutex_id_value_init_v(void)
{
return 0x00000000U;
}
static inline u32 pwr_pmu_mutex_id_value_not_avail_v(void)
{
return 0x000000ffU;
}
static inline u32 pwr_pmu_mutex_id_release_r(void)
{
return 0x0010a48cU;
}
static inline u32 pwr_pmu_mutex_id_release_value_f(u32 v)
{
return (v & 0xffU) << 0U;
}
static inline u32 pwr_pmu_mutex_id_release_value_m(void)
{
return 0xffU << 0U;
}
static inline u32 pwr_pmu_mutex_id_release_value_init_v(void)
{
return 0x00000000U;
}
static inline u32 pwr_pmu_mutex_id_release_value_init_f(void)
{
return 0x0U;
}
static inline u32 pwr_pmu_mutex_r(u32 i)
{
return 0x0010a580U + i*4U;
}
static inline u32 pwr_pmu_mutex__size_1_v(void)
{
return 0x00000010U;
}
static inline u32 pwr_pmu_mutex_value_f(u32 v)
{
return (v & 0xffU) << 0U;
}
static inline u32 pwr_pmu_mutex_value_v(u32 r)
{
return (r >> 0U) & 0xffU;
}
static inline u32 pwr_pmu_mutex_value_initial_lock_f(void)
{
return 0x0U;
}
static inline u32 pwr_pmu_queue_head_r(u32 i)
{
return 0x0010a800U + i*4U;
}
static inline u32 pwr_pmu_queue_head__size_1_v(void)
{
return 0x00000008U;
}
static inline u32 pwr_pmu_queue_head_address_f(u32 v)
{
return (v & 0xffffffffU) << 0U;
}
static inline u32 pwr_pmu_queue_head_address_v(u32 r)
{
return (r >> 0U) & 0xffffffffU;
}
static inline u32 pwr_pmu_queue_tail_r(u32 i)
{
return 0x0010a820U + i*4U;
}
static inline u32 pwr_pmu_queue_tail__size_1_v(void)
{
return 0x00000008U;
}
static inline u32 pwr_pmu_queue_tail_address_f(u32 v)
{
return (v & 0xffffffffU) << 0U;
}
static inline u32 pwr_pmu_queue_tail_address_v(u32 r)
{
return (r >> 0U) & 0xffffffffU;
}
static inline u32 pwr_pmu_msgq_head_r(void)
{
return 0x0010a4c8U;
}
static inline u32 pwr_pmu_msgq_head_val_f(u32 v)
{
return (v & 0xffffffffU) << 0U;
}
static inline u32 pwr_pmu_msgq_head_val_v(u32 r)
{
return (r >> 0U) & 0xffffffffU;
}
static inline u32 pwr_pmu_msgq_tail_r(void)
{
return 0x0010a4ccU;
}
static inline u32 pwr_pmu_msgq_tail_val_f(u32 v)
{
return (v & 0xffffffffU) << 0U;
}
static inline u32 pwr_pmu_msgq_tail_val_v(u32 r)
{
return (r >> 0U) & 0xffffffffU;
}
static inline u32 pwr_pmu_idle_mask_r(u32 i)
{
return 0x0010a504U + i*16U;
}
static inline u32 pwr_pmu_idle_mask_gr_enabled_f(void)
{
return 0x1U;
}
static inline u32 pwr_pmu_idle_mask_ce_2_enabled_f(void)
{
return 0x200000U;
}
static inline u32 pwr_pmu_idle_count_r(u32 i)
{
return 0x0010a508U + i*16U;
}
static inline u32 pwr_pmu_idle_count_value_f(u32 v)
{
return (v & 0x7fffffffU) << 0U;
}
static inline u32 pwr_pmu_idle_count_value_v(u32 r)
{
return (r >> 0U) & 0x7fffffffU;
}
static inline u32 pwr_pmu_idle_count_reset_f(u32 v)
{
return (v & 0x1U) << 31U;
}
static inline u32 pwr_pmu_idle_ctrl_r(u32 i)
{
return 0x0010a50cU + i*16U;
}
static inline u32 pwr_pmu_idle_ctrl_value_m(void)
{
return 0x3U << 0U;
}
static inline u32 pwr_pmu_idle_ctrl_value_busy_f(void)
{
return 0x2U;
}
static inline u32 pwr_pmu_idle_ctrl_value_always_f(void)
{
return 0x3U;
}
static inline u32 pwr_pmu_idle_ctrl_filter_m(void)
{
return 0x1U << 2U;
}
static inline u32 pwr_pmu_idle_ctrl_filter_disabled_f(void)
{
return 0x0U;
}
static inline u32 pwr_pmu_idle_mask_supp_r(u32 i)
{
return 0x0010a9f0U + i*8U;
}
static inline u32 pwr_pmu_idle_mask_1_supp_r(u32 i)
{
return 0x0010a9f4U + i*8U;
}
static inline u32 pwr_pmu_idle_ctrl_supp_r(u32 i)
{
return 0x0010aa30U + i*8U;
}
static inline u32 pwr_pmu_debug_r(u32 i)
{
return 0x0010a5c0U + i*4U;
}
static inline u32 pwr_pmu_debug__size_1_v(void)
{
return 0x00000004U;
}
static inline u32 pwr_pmu_mailbox_r(u32 i)
{
return 0x0010a450U + i*4U;
}
static inline u32 pwr_pmu_mailbox__size_1_v(void)
{
return 0x0000000cU;
}
static inline u32 pwr_pmu_bar0_addr_r(void)
{
return 0x0010a7a0U;
}
static inline u32 pwr_pmu_bar0_data_r(void)
{
return 0x0010a7a4U;
}
static inline u32 pwr_pmu_bar0_ctl_r(void)
{
return 0x0010a7acU;
}
static inline u32 pwr_pmu_bar0_timeout_r(void)
{
return 0x0010a7a8U;
}
static inline u32 pwr_pmu_bar0_fecs_error_r(void)
{
return 0x0010a988U;
}
static inline u32 pwr_pmu_bar0_error_status_r(void)
{
return 0x0010a7b0U;
}
static inline u32 pwr_pmu_pg_idlefilth_r(u32 i)
{
return 0x0010a6c0U + i*4U;
}
static inline u32 pwr_pmu_pg_ppuidlefilth_r(u32 i)
{
return 0x0010a6e8U + i*4U;
}
static inline u32 pwr_pmu_pg_idle_cnt_r(u32 i)
{
return 0x0010a710U + i*4U;
}
static inline u32 pwr_pmu_pg_intren_r(u32 i)
{
return 0x0010a760U + i*4U;
}
static inline u32 pwr_fbif_transcfg_r(u32 i)
{
return 0x0010ae00U + i*4U;
}
static inline u32 pwr_fbif_transcfg_target_local_fb_f(void)
{
return 0x0U;
}
static inline u32 pwr_fbif_transcfg_target_coherent_sysmem_f(void)
{
return 0x1U;
}
static inline u32 pwr_fbif_transcfg_target_noncoherent_sysmem_f(void)
{
return 0x2U;
}
static inline u32 pwr_fbif_transcfg_mem_type_s(void)
{
return 1U;
}
static inline u32 pwr_fbif_transcfg_mem_type_f(u32 v)
{
return (v & 0x1U) << 2U;
}
static inline u32 pwr_fbif_transcfg_mem_type_m(void)
{
return 0x1U << 2U;
}
static inline u32 pwr_fbif_transcfg_mem_type_v(u32 r)
{
return (r >> 2U) & 0x1U;
}
static inline u32 pwr_fbif_transcfg_mem_type_virtual_f(void)
{
return 0x0U;
}
static inline u32 pwr_fbif_transcfg_mem_type_physical_f(void)
{
return 0x4U;
}
#endif

View File

@@ -0,0 +1,775 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* Function naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
*
* <x>_o(void) : Returns the offset for element <x>.
*
* <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
*
* <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
*
* <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
* and masked to place it at field <y> of register <x>. This value
* can be |'d with others to produce a full register value for
* register <x>.
*
* <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
* value can be ~'d and then &'d to clear the value of field <y> for
* register <x>.
*
* <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
* to place it at field <y> of register <x>. This value can be |'d
* with others to produce a full register value for <x>.
*
* <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
* <x> value 'r' after being shifted to place its LSB at bit 0.
* This value is suitable for direct comparison with other unshifted
* values appropriate for use in field <y> of register <x>.
*
* <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
* field <y> of register <x>. This value is suitable for direct
* comparison with unshifted values appropriate for use in field <y>
* of register <x>.
*/
#ifndef _hw_ram_gv100_h_
#define _hw_ram_gv100_h_
static inline u32 ram_in_ramfc_s(void)
{
return 4096U;
}
static inline u32 ram_in_ramfc_w(void)
{
return 0U;
}
static inline u32 ram_in_page_dir_base_target_f(u32 v)
{
return (v & 0x3U) << 0U;
}
static inline u32 ram_in_page_dir_base_target_w(void)
{
return 128U;
}
static inline u32 ram_in_page_dir_base_target_vid_mem_f(void)
{
return 0x0U;
}
static inline u32 ram_in_page_dir_base_target_sys_mem_coh_f(void)
{
return 0x2U;
}
static inline u32 ram_in_page_dir_base_target_sys_mem_ncoh_f(void)
{
return 0x3U;
}
static inline u32 ram_in_page_dir_base_vol_w(void)
{
return 128U;
}
static inline u32 ram_in_page_dir_base_vol_true_f(void)
{
return 0x4U;
}
static inline u32 ram_in_page_dir_base_vol_false_f(void)
{
return 0x0U;
}
static inline u32 ram_in_page_dir_base_fault_replay_tex_f(u32 v)
{
return (v & 0x1U) << 4U;
}
static inline u32 ram_in_page_dir_base_fault_replay_tex_m(void)
{
return 0x1U << 4U;
}
static inline u32 ram_in_page_dir_base_fault_replay_tex_w(void)
{
return 128U;
}
static inline u32 ram_in_page_dir_base_fault_replay_tex_true_f(void)
{
return 0x10U;
}
static inline u32 ram_in_page_dir_base_fault_replay_gcc_f(u32 v)
{
return (v & 0x1U) << 5U;
}
static inline u32 ram_in_page_dir_base_fault_replay_gcc_m(void)
{
return 0x1U << 5U;
}
static inline u32 ram_in_page_dir_base_fault_replay_gcc_w(void)
{
return 128U;
}
static inline u32 ram_in_page_dir_base_fault_replay_gcc_true_f(void)
{
return 0x20U;
}
static inline u32 ram_in_big_page_size_f(u32 v)
{
return (v & 0x1U) << 11U;
}
static inline u32 ram_in_big_page_size_m(void)
{
return 0x1U << 11U;
}
static inline u32 ram_in_big_page_size_w(void)
{
return 128U;
}
static inline u32 ram_in_big_page_size_128kb_f(void)
{
return 0x0U;
}
static inline u32 ram_in_big_page_size_64kb_f(void)
{
return 0x800U;
}
static inline u32 ram_in_page_dir_base_lo_f(u32 v)
{
return (v & 0xfffffU) << 12U;
}
static inline u32 ram_in_page_dir_base_lo_w(void)
{
return 128U;
}
static inline u32 ram_in_page_dir_base_hi_f(u32 v)
{
return (v & 0xffffffffU) << 0U;
}
static inline u32 ram_in_page_dir_base_hi_w(void)
{
return 129U;
}
static inline u32 ram_in_engine_cs_w(void)
{
return 132U;
}
static inline u32 ram_in_engine_cs_wfi_v(void)
{
return 0x00000000U;
}
static inline u32 ram_in_engine_cs_wfi_f(void)
{
return 0x0U;
}
static inline u32 ram_in_engine_cs_fg_v(void)
{
return 0x00000001U;
}
static inline u32 ram_in_engine_cs_fg_f(void)
{
return 0x8U;
}
static inline u32 ram_in_engine_wfi_mode_f(u32 v)
{
return (v & 0x1U) << 2U;
}
static inline u32 ram_in_engine_wfi_mode_w(void)
{
return 132U;
}
static inline u32 ram_in_engine_wfi_mode_physical_v(void)
{
return 0x00000000U;
}
static inline u32 ram_in_engine_wfi_mode_virtual_v(void)
{
return 0x00000001U;
}
static inline u32 ram_in_engine_wfi_target_f(u32 v)
{
return (v & 0x3U) << 0U;
}
static inline u32 ram_in_engine_wfi_target_w(void)
{
return 132U;
}
static inline u32 ram_in_engine_wfi_target_sys_mem_coh_v(void)
{
return 0x00000002U;
}
static inline u32 ram_in_engine_wfi_target_sys_mem_ncoh_v(void)
{
return 0x00000003U;
}
static inline u32 ram_in_engine_wfi_target_local_mem_v(void)
{
return 0x00000000U;
}
static inline u32 ram_in_engine_wfi_ptr_lo_f(u32 v)
{
return (v & 0xfffffU) << 12U;
}
static inline u32 ram_in_engine_wfi_ptr_lo_w(void)
{
return 132U;
}
static inline u32 ram_in_engine_wfi_ptr_hi_f(u32 v)
{
return (v & 0xffU) << 0U;
}
static inline u32 ram_in_engine_wfi_ptr_hi_w(void)
{
return 133U;
}
static inline u32 ram_in_engine_wfi_veid_f(u32 v)
{
return (v & 0x3fU) << 0U;
}
static inline u32 ram_in_engine_wfi_veid_w(void)
{
return 134U;
}
static inline u32 ram_in_eng_method_buffer_addr_lo_f(u32 v)
{
return (v & 0xffffffffU) << 0U;
}
static inline u32 ram_in_eng_method_buffer_addr_lo_w(void)
{
return 136U;
}
static inline u32 ram_in_eng_method_buffer_addr_hi_f(u32 v)
{
return (v & 0x1ffffU) << 0U;
}
static inline u32 ram_in_eng_method_buffer_addr_hi_w(void)
{
return 137U;
}
static inline u32 ram_in_sc_page_dir_base_target_f(u32 v, u32 i)
{
return (v & 0x3U) << (0U + i*0U);
}
static inline u32 ram_in_sc_page_dir_base_target__size_1_v(void)
{
return 0x00000040U;
}
static inline u32 ram_in_sc_page_dir_base_target_vid_mem_v(void)
{
return 0x00000000U;
}
static inline u32 ram_in_sc_page_dir_base_target_invalid_v(void)
{
return 0x00000001U;
}
static inline u32 ram_in_sc_page_dir_base_target_sys_mem_coh_v(void)
{
return 0x00000002U;
}
static inline u32 ram_in_sc_page_dir_base_target_sys_mem_ncoh_v(void)
{
return 0x00000003U;
}
static inline u32 ram_in_sc_page_dir_base_vol_f(u32 v, u32 i)
{
return (v & 0x1U) << (2U + i*0U);
}
static inline u32 ram_in_sc_page_dir_base_vol__size_1_v(void)
{
return 0x00000040U;
}
static inline u32 ram_in_sc_page_dir_base_vol_true_v(void)
{
return 0x00000001U;
}
static inline u32 ram_in_sc_page_dir_base_vol_false_v(void)
{
return 0x00000000U;
}
static inline u32 ram_in_sc_page_dir_base_fault_replay_tex_f(u32 v, u32 i)
{
return (v & 0x1U) << (4U + i*0U);
}
static inline u32 ram_in_sc_page_dir_base_fault_replay_tex__size_1_v(void)
{
return 0x00000040U;
}
static inline u32 ram_in_sc_page_dir_base_fault_replay_tex_enabled_v(void)
{
return 0x00000001U;
}
static inline u32 ram_in_sc_page_dir_base_fault_replay_tex_disabled_v(void)
{
return 0x00000000U;
}
static inline u32 ram_in_sc_page_dir_base_fault_replay_gcc_f(u32 v, u32 i)
{
return (v & 0x1U) << (5U + i*0U);
}
static inline u32 ram_in_sc_page_dir_base_fault_replay_gcc__size_1_v(void)
{
return 0x00000040U;
}
static inline u32 ram_in_sc_page_dir_base_fault_replay_gcc_enabled_v(void)
{
return 0x00000001U;
}
static inline u32 ram_in_sc_page_dir_base_fault_replay_gcc_disabled_v(void)
{
return 0x00000000U;
}
static inline u32 ram_in_sc_use_ver2_pt_format_f(u32 v, u32 i)
{
return (v & 0x1U) << (10U + i*0U);
}
static inline u32 ram_in_sc_use_ver2_pt_format__size_1_v(void)
{
return 0x00000040U;
}
static inline u32 ram_in_sc_use_ver2_pt_format_false_v(void)
{
return 0x00000000U;
}
static inline u32 ram_in_sc_use_ver2_pt_format_true_v(void)
{
return 0x00000001U;
}
static inline u32 ram_in_sc_big_page_size_f(u32 v, u32 i)
{
return (v & 0x1U) << (11U + i*0U);
}
static inline u32 ram_in_sc_big_page_size__size_1_v(void)
{
return 0x00000040U;
}
static inline u32 ram_in_sc_big_page_size_64kb_v(void)
{
return 0x00000001U;
}
static inline u32 ram_in_sc_page_dir_base_lo_f(u32 v, u32 i)
{
return (v & 0xfffffU) << (12U + i*0U);
}
static inline u32 ram_in_sc_page_dir_base_lo__size_1_v(void)
{
return 0x00000040U;
}
static inline u32 ram_in_sc_page_dir_base_hi_f(u32 v, u32 i)
{
return (v & 0xffffffffU) << (0U + i*0U);
}
static inline u32 ram_in_sc_page_dir_base_hi__size_1_v(void)
{
return 0x00000040U;
}
static inline u32 ram_in_sc_page_dir_base_target_0_f(u32 v)
{
return (v & 0x3U) << 0U;
}
static inline u32 ram_in_sc_page_dir_base_target_0_w(void)
{
return 168U;
}
static inline u32 ram_in_sc_page_dir_base_vol_0_f(u32 v)
{
return (v & 0x1U) << 2U;
}
static inline u32 ram_in_sc_page_dir_base_vol_0_w(void)
{
return 168U;
}
static inline u32 ram_in_sc_page_dir_base_fault_replay_tex_0_f(u32 v)
{
return (v & 0x1U) << 4U;
}
static inline u32 ram_in_sc_page_dir_base_fault_replay_tex_0_w(void)
{
return 168U;
}
static inline u32 ram_in_sc_page_dir_base_fault_replay_gcc_0_f(u32 v)
{
return (v & 0x1U) << 5U;
}
static inline u32 ram_in_sc_page_dir_base_fault_replay_gcc_0_w(void)
{
return 168U;
}
static inline u32 ram_in_sc_use_ver2_pt_format_0_f(u32 v)
{
return (v & 0x1U) << 10U;
}
static inline u32 ram_in_sc_use_ver2_pt_format_0_w(void)
{
return 168U;
}
static inline u32 ram_in_sc_big_page_size_0_f(u32 v)
{
return (v & 0x1U) << 11U;
}
static inline u32 ram_in_sc_big_page_size_0_w(void)
{
return 168U;
}
static inline u32 ram_in_sc_page_dir_base_lo_0_f(u32 v)
{
return (v & 0xfffffU) << 12U;
}
static inline u32 ram_in_sc_page_dir_base_lo_0_w(void)
{
return 168U;
}
static inline u32 ram_in_sc_page_dir_base_hi_0_f(u32 v)
{
return (v & 0xffffffffU) << 0U;
}
static inline u32 ram_in_sc_page_dir_base_hi_0_w(void)
{
return 169U;
}
static inline u32 ram_in_base_shift_v(void)
{
return 0x0000000cU;
}
static inline u32 ram_in_alloc_size_v(void)
{
return 0x00001000U;
}
static inline u32 ram_fc_size_val_v(void)
{
return 0x00000200U;
}
static inline u32 ram_fc_gp_put_w(void)
{
return 0U;
}
static inline u32 ram_fc_userd_w(void)
{
return 2U;
}
static inline u32 ram_fc_userd_hi_w(void)
{
return 3U;
}
static inline u32 ram_fc_signature_w(void)
{
return 4U;
}
static inline u32 ram_fc_gp_get_w(void)
{
return 5U;
}
static inline u32 ram_fc_pb_get_w(void)
{
return 6U;
}
static inline u32 ram_fc_pb_get_hi_w(void)
{
return 7U;
}
static inline u32 ram_fc_pb_top_level_get_w(void)
{
return 8U;
}
static inline u32 ram_fc_pb_top_level_get_hi_w(void)
{
return 9U;
}
static inline u32 ram_fc_acquire_w(void)
{
return 12U;
}
static inline u32 ram_fc_sem_addr_hi_w(void)
{
return 14U;
}
static inline u32 ram_fc_sem_addr_lo_w(void)
{
return 15U;
}
static inline u32 ram_fc_sem_payload_lo_w(void)
{
return 16U;
}
static inline u32 ram_fc_sem_payload_hi_w(void)
{
return 39U;
}
static inline u32 ram_fc_sem_execute_w(void)
{
return 17U;
}
static inline u32 ram_fc_gp_base_w(void)
{
return 18U;
}
static inline u32 ram_fc_gp_base_hi_w(void)
{
return 19U;
}
static inline u32 ram_fc_gp_fetch_w(void)
{
return 20U;
}
static inline u32 ram_fc_pb_fetch_w(void)
{
return 21U;
}
static inline u32 ram_fc_pb_fetch_hi_w(void)
{
return 22U;
}
static inline u32 ram_fc_pb_put_w(void)
{
return 23U;
}
static inline u32 ram_fc_pb_put_hi_w(void)
{
return 24U;
}
static inline u32 ram_fc_pb_header_w(void)
{
return 33U;
}
static inline u32 ram_fc_pb_count_w(void)
{
return 34U;
}
static inline u32 ram_fc_subdevice_w(void)
{
return 37U;
}
static inline u32 ram_fc_target_w(void)
{
return 43U;
}
static inline u32 ram_fc_hce_ctrl_w(void)
{
return 57U;
}
static inline u32 ram_fc_chid_w(void)
{
return 58U;
}
static inline u32 ram_fc_chid_id_f(u32 v)
{
return (v & 0xfffU) << 0U;
}
static inline u32 ram_fc_chid_id_w(void)
{
return 0U;
}
static inline u32 ram_fc_config_w(void)
{
return 61U;
}
static inline u32 ram_fc_runlist_timeslice_w(void)
{
return 62U;
}
static inline u32 ram_fc_set_channel_info_w(void)
{
return 63U;
}
static inline u32 ram_userd_base_shift_v(void)
{
return 0x00000009U;
}
static inline u32 ram_userd_chan_size_v(void)
{
return 0x00000200U;
}
static inline u32 ram_userd_put_w(void)
{
return 16U;
}
static inline u32 ram_userd_get_w(void)
{
return 17U;
}
static inline u32 ram_userd_ref_w(void)
{
return 18U;
}
static inline u32 ram_userd_put_hi_w(void)
{
return 19U;
}
static inline u32 ram_userd_ref_threshold_w(void)
{
return 20U;
}
static inline u32 ram_userd_top_level_get_w(void)
{
return 22U;
}
static inline u32 ram_userd_top_level_get_hi_w(void)
{
return 23U;
}
static inline u32 ram_userd_get_hi_w(void)
{
return 24U;
}
static inline u32 ram_userd_gp_get_w(void)
{
return 34U;
}
static inline u32 ram_userd_gp_put_w(void)
{
return 35U;
}
static inline u32 ram_userd_gp_top_level_get_w(void)
{
return 22U;
}
static inline u32 ram_userd_gp_top_level_get_hi_w(void)
{
return 23U;
}
static inline u32 ram_rl_entry_size_v(void)
{
return 0x00000010U;
}
static inline u32 ram_rl_entry_type_f(u32 v)
{
return (v & 0x1U) << 0U;
}
static inline u32 ram_rl_entry_type_channel_v(void)
{
return 0x00000000U;
}
static inline u32 ram_rl_entry_type_tsg_v(void)
{
return 0x00000001U;
}
static inline u32 ram_rl_entry_id_f(u32 v)
{
return (v & 0xfffU) << 0U;
}
static inline u32 ram_rl_entry_chan_runqueue_selector_f(u32 v)
{
return (v & 0x1U) << 1U;
}
static inline u32 ram_rl_entry_chan_inst_target_f(u32 v)
{
return (v & 0x3U) << 4U;
}
static inline u32 ram_rl_entry_chan_inst_target_sys_mem_ncoh_v(void)
{
return 0x00000003U;
}
static inline u32 ram_rl_entry_chan_inst_target_sys_mem_coh_v(void)
{
return 0x00000002U;
}
static inline u32 ram_rl_entry_chan_inst_target_vid_mem_v(void)
{
return 0x00000000U;
}
static inline u32 ram_rl_entry_chan_userd_target_f(u32 v)
{
return (v & 0x3U) << 6U;
}
static inline u32 ram_rl_entry_chan_userd_target_vid_mem_v(void)
{
return 0x00000000U;
}
static inline u32 ram_rl_entry_chan_userd_target_vid_mem_nvlink_coh_v(void)
{
return 0x00000001U;
}
static inline u32 ram_rl_entry_chan_userd_target_sys_mem_coh_v(void)
{
return 0x00000002U;
}
static inline u32 ram_rl_entry_chan_userd_target_sys_mem_ncoh_v(void)
{
return 0x00000003U;
}
static inline u32 ram_rl_entry_chan_userd_ptr_lo_f(u32 v)
{
return (v & 0xffffffU) << 8U;
}
static inline u32 ram_rl_entry_chan_userd_ptr_hi_f(u32 v)
{
return (v & 0xffffffffU) << 0U;
}
static inline u32 ram_rl_entry_chid_f(u32 v)
{
return (v & 0xfffU) << 0U;
}
static inline u32 ram_rl_entry_chan_inst_ptr_lo_f(u32 v)
{
return (v & 0xfffffU) << 12U;
}
static inline u32 ram_rl_entry_chan_inst_ptr_hi_f(u32 v)
{
return (v & 0xffffffffU) << 0U;
}
static inline u32 ram_rl_entry_tsg_timeslice_scale_f(u32 v)
{
return (v & 0xfU) << 16U;
}
static inline u32 ram_rl_entry_tsg_timeslice_scale_3_v(void)
{
return 0x00000003U;
}
static inline u32 ram_rl_entry_tsg_timeslice_timeout_f(u32 v)
{
return (v & 0xffU) << 24U;
}
static inline u32 ram_rl_entry_tsg_timeslice_timeout_128_v(void)
{
return 0x00000080U;
}
static inline u32 ram_rl_entry_tsg_timeslice_timeout_disable_v(void)
{
return 0x00000000U;
}
static inline u32 ram_rl_entry_tsg_length_f(u32 v)
{
return (v & 0xffU) << 0U;
}
static inline u32 ram_rl_entry_tsg_length_init_v(void)
{
return 0x00000000U;
}
static inline u32 ram_rl_entry_tsg_length_min_v(void)
{
return 0x00000001U;
}
static inline u32 ram_rl_entry_tsg_length_max_v(void)
{
return 0x00000080U;
}
static inline u32 ram_rl_entry_tsg_tsgid_f(u32 v)
{
return (v & 0xfffU) << 0U;
}
static inline u32 ram_rl_entry_chan_userd_ptr_align_shift_v(void)
{
return 0x00000008U;
}
static inline u32 ram_rl_entry_chan_userd_align_shift_v(void)
{
return 0x00000008U;
}
static inline u32 ram_rl_entry_chan_inst_ptr_align_shift_v(void)
{
return 0x0000000cU;
}
#endif

View File

@@ -0,0 +1,299 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* Function naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
*
* <x>_o(void) : Returns the offset for element <x>.
*
* <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
*
* <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
*
* <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
* and masked to place it at field <y> of register <x>. This value
* can be |'d with others to produce a full register value for
* register <x>.
*
* <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
* value can be ~'d and then &'d to clear the value of field <y> for
* register <x>.
*
* <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
* to place it at field <y> of register <x>. This value can be |'d
* with others to produce a full register value for <x>.
*
* <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
* <x> value 'r' after being shifted to place its LSB at bit 0.
* This value is suitable for direct comparison with other unshifted
* values appropriate for use in field <y> of register <x>.
*
* <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
* field <y> of register <x>. This value is suitable for direct
* comparison with unshifted values appropriate for use in field <y>
* of register <x>.
*/
#ifndef _hw_therm_gv100_h_
#define _hw_therm_gv100_h_
static inline u32 therm_weight_1_r(void)
{
return 0x00020024U;
}
static inline u32 therm_config1_r(void)
{
return 0x00020050U;
}
static inline u32 therm_config2_r(void)
{
return 0x00020130U;
}
static inline u32 therm_config2_slowdown_factor_extended_f(u32 v)
{
return (v & 0x1U) << 24U;
}
static inline u32 therm_config2_grad_enable_f(u32 v)
{
return (v & 0x1U) << 31U;
}
static inline u32 therm_gate_ctrl_r(u32 i)
{
return 0x00020200U + i*4U;
}
static inline u32 therm_gate_ctrl_eng_clk_m(void)
{
return 0x3U << 0U;
}
static inline u32 therm_gate_ctrl_eng_clk_run_f(void)
{
return 0x0U;
}
static inline u32 therm_gate_ctrl_eng_clk_auto_f(void)
{
return 0x1U;
}
static inline u32 therm_gate_ctrl_eng_clk_stop_f(void)
{
return 0x2U;
}
static inline u32 therm_gate_ctrl_blk_clk_m(void)
{
return 0x3U << 2U;
}
static inline u32 therm_gate_ctrl_blk_clk_run_f(void)
{
return 0x0U;
}
static inline u32 therm_gate_ctrl_blk_clk_auto_f(void)
{
return 0x4U;
}
static inline u32 therm_gate_ctrl_idle_holdoff_m(void)
{
return 0x1U << 4U;
}
static inline u32 therm_gate_ctrl_idle_holdoff_off_f(void)
{
return 0x0U;
}
static inline u32 therm_gate_ctrl_idle_holdoff_on_f(void)
{
return 0x10U;
}
static inline u32 therm_gate_ctrl_eng_idle_filt_exp_f(u32 v)
{
return (v & 0x1fU) << 8U;
}
static inline u32 therm_gate_ctrl_eng_idle_filt_exp_m(void)
{
return 0x1fU << 8U;
}
static inline u32 therm_gate_ctrl_eng_idle_filt_mant_f(u32 v)
{
return (v & 0x7U) << 13U;
}
static inline u32 therm_gate_ctrl_eng_idle_filt_mant_m(void)
{
return 0x7U << 13U;
}
static inline u32 therm_gate_ctrl_eng_delay_before_f(u32 v)
{
return (v & 0xfU) << 16U;
}
static inline u32 therm_gate_ctrl_eng_delay_before_m(void)
{
return 0xfU << 16U;
}
static inline u32 therm_gate_ctrl_eng_delay_after_f(u32 v)
{
return (v & 0xfU) << 20U;
}
static inline u32 therm_gate_ctrl_eng_delay_after_m(void)
{
return 0xfU << 20U;
}
static inline u32 therm_fecs_idle_filter_r(void)
{
return 0x00020288U;
}
static inline u32 therm_fecs_idle_filter_value_m(void)
{
return 0xffffffffU << 0U;
}
static inline u32 therm_hubmmu_idle_filter_r(void)
{
return 0x0002028cU;
}
static inline u32 therm_hubmmu_idle_filter_value_m(void)
{
return 0xffffffffU << 0U;
}
static inline u32 therm_clk_slowdown_r(u32 i)
{
return 0x00020160U + i*4U;
}
static inline u32 therm_clk_slowdown_idle_factor_f(u32 v)
{
return (v & 0x3fU) << 16U;
}
static inline u32 therm_clk_slowdown_idle_factor_m(void)
{
return 0x3fU << 16U;
}
static inline u32 therm_clk_slowdown_idle_factor_v(u32 r)
{
return (r >> 16U) & 0x3fU;
}
static inline u32 therm_clk_slowdown_idle_factor_disabled_f(void)
{
return 0x0U;
}
static inline u32 therm_grad_stepping_table_r(u32 i)
{
return 0x000202c8U + i*4U;
}
static inline u32 therm_grad_stepping_table_slowdown_factor0_f(u32 v)
{
return (v & 0x3fU) << 0U;
}
static inline u32 therm_grad_stepping_table_slowdown_factor0_m(void)
{
return 0x3fU << 0U;
}
static inline u32 therm_grad_stepping_table_slowdown_factor0_fpdiv_by1p5_f(void)
{
return 0x1U;
}
static inline u32 therm_grad_stepping_table_slowdown_factor0_fpdiv_by2_f(void)
{
return 0x2U;
}
static inline u32 therm_grad_stepping_table_slowdown_factor0_fpdiv_by4_f(void)
{
return 0x6U;
}
static inline u32 therm_grad_stepping_table_slowdown_factor0_fpdiv_by8_f(void)
{
return 0xeU;
}
static inline u32 therm_grad_stepping_table_slowdown_factor1_f(u32 v)
{
return (v & 0x3fU) << 6U;
}
static inline u32 therm_grad_stepping_table_slowdown_factor1_m(void)
{
return 0x3fU << 6U;
}
static inline u32 therm_grad_stepping_table_slowdown_factor2_f(u32 v)
{
return (v & 0x3fU) << 12U;
}
static inline u32 therm_grad_stepping_table_slowdown_factor2_m(void)
{
return 0x3fU << 12U;
}
static inline u32 therm_grad_stepping_table_slowdown_factor3_f(u32 v)
{
return (v & 0x3fU) << 18U;
}
static inline u32 therm_grad_stepping_table_slowdown_factor3_m(void)
{
return 0x3fU << 18U;
}
static inline u32 therm_grad_stepping_table_slowdown_factor4_f(u32 v)
{
return (v & 0x3fU) << 24U;
}
static inline u32 therm_grad_stepping_table_slowdown_factor4_m(void)
{
return 0x3fU << 24U;
}
static inline u32 therm_grad_stepping0_r(void)
{
return 0x000202c0U;
}
static inline u32 therm_grad_stepping0_feature_s(void)
{
return 1U;
}
static inline u32 therm_grad_stepping0_feature_f(u32 v)
{
return (v & 0x1U) << 0U;
}
static inline u32 therm_grad_stepping0_feature_m(void)
{
return 0x1U << 0U;
}
static inline u32 therm_grad_stepping0_feature_v(u32 r)
{
return (r >> 0U) & 0x1U;
}
static inline u32 therm_grad_stepping0_feature_enable_f(void)
{
return 0x1U;
}
static inline u32 therm_grad_stepping1_r(void)
{
return 0x000202c4U;
}
static inline u32 therm_grad_stepping1_pdiv_duration_f(u32 v)
{
return (v & 0x1ffffU) << 0U;
}
static inline u32 therm_clk_timing_r(u32 i)
{
return 0x000203c0U + i*4U;
}
static inline u32 therm_clk_timing_grad_slowdown_f(u32 v)
{
return (v & 0x1U) << 16U;
}
static inline u32 therm_clk_timing_grad_slowdown_m(void)
{
return 0x1U << 16U;
}
static inline u32 therm_clk_timing_grad_slowdown_enabled_f(void)
{
return 0x10000U;
}
#endif

View File

@@ -0,0 +1,115 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* Function naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
*
* <x>_o(void) : Returns the offset for element <x>.
*
* <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
*
* <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
*
* <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
* and masked to place it at field <y> of register <x>. This value
* can be |'d with others to produce a full register value for
* register <x>.
*
* <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
* value can be ~'d and then &'d to clear the value of field <y> for
* register <x>.
*
* <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
* to place it at field <y> of register <x>. This value can be |'d
* with others to produce a full register value for <x>.
*
* <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
* <x> value 'r' after being shifted to place its LSB at bit 0.
* This value is suitable for direct comparison with other unshifted
* values appropriate for use in field <y> of register <x>.
*
* <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
* field <y> of register <x>. This value is suitable for direct
* comparison with unshifted values appropriate for use in field <y>
* of register <x>.
*/
#ifndef _hw_timer_gv100_h_
#define _hw_timer_gv100_h_
static inline u32 timer_pri_timeout_r(void)
{
return 0x00009080U;
}
static inline u32 timer_pri_timeout_period_f(u32 v)
{
return (v & 0xffffffU) << 0U;
}
static inline u32 timer_pri_timeout_period_m(void)
{
return 0xffffffU << 0U;
}
static inline u32 timer_pri_timeout_period_v(u32 r)
{
return (r >> 0U) & 0xffffffU;
}
static inline u32 timer_pri_timeout_en_f(u32 v)
{
return (v & 0x1U) << 31U;
}
static inline u32 timer_pri_timeout_en_m(void)
{
return 0x1U << 31U;
}
static inline u32 timer_pri_timeout_en_v(u32 r)
{
return (r >> 31U) & 0x1U;
}
static inline u32 timer_pri_timeout_en_en_enabled_f(void)
{
return 0x80000000U;
}
static inline u32 timer_pri_timeout_en_en_disabled_f(void)
{
return 0x0U;
}
static inline u32 timer_pri_timeout_save_0_r(void)
{
return 0x00009084U;
}
static inline u32 timer_pri_timeout_save_1_r(void)
{
return 0x00009088U;
}
static inline u32 timer_pri_timeout_fecs_errcode_r(void)
{
return 0x0000908cU;
}
static inline u32 timer_time_0_r(void)
{
return 0x00009400U;
}
static inline u32 timer_time_1_r(void)
{
return 0x00009410U;
}
#endif

View File

@@ -0,0 +1,235 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* Function naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
*
* <x>_o(void) : Returns the offset for element <x>.
*
* <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
*
* <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
*
* <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
* and masked to place it at field <y> of register <x>. This value
* can be |'d with others to produce a full register value for
* register <x>.
*
* <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
* value can be ~'d and then &'d to clear the value of field <y> for
* register <x>.
*
* <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
* to place it at field <y> of register <x>. This value can be |'d
* with others to produce a full register value for <x>.
*
* <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
* <x> value 'r' after being shifted to place its LSB at bit 0.
* This value is suitable for direct comparison with other unshifted
* values appropriate for use in field <y> of register <x>.
*
* <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
* field <y> of register <x>. This value is suitable for direct
* comparison with unshifted values appropriate for use in field <y>
* of register <x>.
*/
#ifndef _hw_top_gv100_h_
#define _hw_top_gv100_h_
static inline u32 top_num_gpcs_r(void)
{
return 0x00022430U;
}
static inline u32 top_num_gpcs_value_v(u32 r)
{
return (r >> 0U) & 0x1fU;
}
static inline u32 top_tpc_per_gpc_r(void)
{
return 0x00022434U;
}
static inline u32 top_tpc_per_gpc_value_v(u32 r)
{
return (r >> 0U) & 0x1fU;
}
static inline u32 top_num_fbps_r(void)
{
return 0x00022438U;
}
static inline u32 top_num_fbps_value_v(u32 r)
{
return (r >> 0U) & 0x1fU;
}
static inline u32 top_ltc_per_fbp_r(void)
{
return 0x00022450U;
}
static inline u32 top_ltc_per_fbp_value_v(u32 r)
{
return (r >> 0U) & 0x1fU;
}
static inline u32 top_slices_per_ltc_r(void)
{
return 0x0002245cU;
}
static inline u32 top_slices_per_ltc_value_v(u32 r)
{
return (r >> 0U) & 0x1fU;
}
static inline u32 top_num_ltcs_r(void)
{
return 0x00022454U;
}
static inline u32 top_num_ces_r(void)
{
return 0x00022444U;
}
static inline u32 top_num_ces_value_v(u32 r)
{
return (r >> 0U) & 0x1fU;
}
static inline u32 top_device_info_r(u32 i)
{
return 0x00022700U + i*4U;
}
static inline u32 top_device_info__size_1_v(void)
{
return 0x00000040U;
}
static inline u32 top_device_info_chain_v(u32 r)
{
return (r >> 31U) & 0x1U;
}
static inline u32 top_device_info_chain_enable_v(void)
{
return 0x00000001U;
}
static inline u32 top_device_info_engine_enum_v(u32 r)
{
return (r >> 26U) & 0xfU;
}
static inline u32 top_device_info_runlist_enum_v(u32 r)
{
return (r >> 21U) & 0xfU;
}
static inline u32 top_device_info_intr_enum_v(u32 r)
{
return (r >> 15U) & 0x1fU;
}
static inline u32 top_device_info_reset_enum_v(u32 r)
{
return (r >> 9U) & 0x1fU;
}
static inline u32 top_device_info_type_enum_v(u32 r)
{
return (r >> 2U) & 0x1fffffffU;
}
static inline u32 top_device_info_type_enum_graphics_v(void)
{
return 0x00000000U;
}
static inline u32 top_device_info_type_enum_graphics_f(void)
{
return 0x0U;
}
static inline u32 top_device_info_type_enum_copy2_v(void)
{
return 0x00000003U;
}
static inline u32 top_device_info_type_enum_copy2_f(void)
{
return 0xcU;
}
static inline u32 top_device_info_type_enum_lce_v(void)
{
return 0x00000013U;
}
static inline u32 top_device_info_type_enum_lce_f(void)
{
return 0x4cU;
}
static inline u32 top_device_info_engine_v(u32 r)
{
return (r >> 5U) & 0x1U;
}
static inline u32 top_device_info_runlist_v(u32 r)
{
return (r >> 4U) & 0x1U;
}
static inline u32 top_device_info_intr_v(u32 r)
{
return (r >> 3U) & 0x1U;
}
static inline u32 top_device_info_reset_v(u32 r)
{
return (r >> 2U) & 0x1U;
}
static inline u32 top_device_info_entry_v(u32 r)
{
return (r >> 0U) & 0x3U;
}
static inline u32 top_device_info_entry_not_valid_v(void)
{
return 0x00000000U;
}
static inline u32 top_device_info_entry_enum_v(void)
{
return 0x00000002U;
}
static inline u32 top_device_info_entry_data_v(void)
{
return 0x00000001U;
}
static inline u32 top_device_info_data_type_v(u32 r)
{
return (r >> 30U) & 0x1U;
}
static inline u32 top_device_info_data_type_enum2_v(void)
{
return 0x00000000U;
}
static inline u32 top_device_info_data_inst_id_v(u32 r)
{
return (r >> 26U) & 0xfU;
}
static inline u32 top_device_info_data_pri_base_v(u32 r)
{
return (r >> 12U) & 0xfffU;
}
static inline u32 top_device_info_data_pri_base_align_v(void)
{
return 0x0000000cU;
}
static inline u32 top_device_info_data_fault_id_enum_v(u32 r)
{
return (r >> 3U) & 0x7fU;
}
static inline u32 top_device_info_data_fault_id_v(u32 r)
{
return (r >> 2U) & 0x1U;
}
static inline u32 top_device_info_data_fault_id_valid_v(void)
{
return 0x00000001U;
}
#endif

View File

@@ -0,0 +1,95 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* Function naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
*
* <x>_o(void) : Returns the offset for element <x>.
*
* <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
*
* <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
*
* <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
* and masked to place it at field <y> of register <x>. This value
* can be |'d with others to produce a full register value for
* register <x>.
*
* <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
* value can be ~'d and then &'d to clear the value of field <y> for
* register <x>.
*
* <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
* to place it at field <y> of register <x>. This value can be |'d
* with others to produce a full register value for <x>.
*
* <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
* <x> value 'r' after being shifted to place its LSB at bit 0.
* This value is suitable for direct comparison with other unshifted
* values appropriate for use in field <y> of register <x>.
*
* <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
* field <y> of register <x>. This value is suitable for direct
* comparison with unshifted values appropriate for use in field <y>
* of register <x>.
*/
#ifndef _hw_usermode_gv100_h_
#define _hw_usermode_gv100_h_
static inline u32 usermode_cfg0_r(void)
{
return 0x00810000U;
}
static inline u32 usermode_cfg0_class_id_f(u32 v)
{
return (v & 0xffffU) << 0U;
}
static inline u32 usermode_cfg0_class_id_value_v(void)
{
return 0x0000c361U;
}
static inline u32 usermode_time_0_r(void)
{
return 0x00810080U;
}
static inline u32 usermode_time_0_nsec_f(u32 v)
{
return (v & 0x7ffffffU) << 5U;
}
static inline u32 usermode_time_1_r(void)
{
return 0x00810084U;
}
static inline u32 usermode_time_1_nsec_f(u32 v)
{
return (v & 0x1fffffffU) << 0U;
}
static inline u32 usermode_notify_channel_pending_r(void)
{
return 0x00810090U;
}
static inline u32 usermode_notify_channel_pending_id_f(u32 v)
{
return (v & 0xffffffffU) << 0U;
}
#endif

Some files were not shown because too many files have changed in this diff Show More