gpu: nvgpu: Fold T19x code back to main code paths

Lots of code paths were split to T19x specific code paths and structs
due to split repository. Now that repositories are merged, fold all of
them back to main code paths and structs and remove the T19x specific
Kconfig flag.

Change-Id: Id0d17a5f0610fc0b49f51ab6664e716dc8b222b6
Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1640606
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Terje Bergstrom
2018-01-17 12:39:13 -08:00
committed by mobile promotions
parent 193a2ed38c
commit f3f14cdff5
77 changed files with 439 additions and 1160 deletions

View File

@@ -128,10 +128,3 @@ config GK20A_VIDMEM
Enable support for using and allocating buffers in a distinct video Enable support for using and allocating buffers in a distinct video
memory aperture (in contrast to general system memory), available on memory aperture (in contrast to general system memory), available on
GPUs that have their own banks. PCIe GPUs have this, for example. GPUs that have their own banks. PCIe GPUs have this, for example.
config TEGRA_19x_GPU
bool "Tegra 19x family GPU"
depends on GK20A && ARCH_TEGRA_19x_SOC
default y
help
Support for NVIDIA Tegra 19x family of GPU

View File

@@ -18,6 +18,7 @@ obj-$(CONFIG_GK20A) := nvgpu.o
nvgpu-y := \ nvgpu-y := \
common/linux/module.o \ common/linux/module.o \
common/linux/module_usermode.o \
common/linux/kmem.o \ common/linux/kmem.o \
common/linux/timers.o \ common/linux/timers.o \
common/linux/ioctl.o \ common/linux/ioctl.o \
@@ -40,6 +41,7 @@ nvgpu-y := \
common/linux/sysfs.o \ common/linux/sysfs.o \
common/linux/cde.o \ common/linux/cde.o \
common/linux/io.o \ common/linux/io.o \
common/linux/io_usermode.o \
common/linux/rwsem.o \ common/linux/rwsem.o \
common/linux/cde_gm20b.o \ common/linux/cde_gm20b.o \
common/linux/cde_gp10b.o \ common/linux/cde_gp10b.o \
@@ -148,9 +150,16 @@ endif
nvgpu-$(CONFIG_GK20A_CTXSW_TRACE) += \ nvgpu-$(CONFIG_GK20A_CTXSW_TRACE) += \
common/linux/ctxsw_trace.o common/linux/ctxsw_trace.o
nvgpu-$(CONFIG_TEGRA_GK20A) += common/linux/platform_gk20a_tegra.o nvgpu-$(CONFIG_TEGRA_GK20A) += \
common/linux/platform_gk20a_tegra.o \
common/linux/platform_gp10b_tegra.o \
common/linux/platform_gv11b_tegra.o
nvgpu-$(CONFIG_SYNC) += gk20a/sync_gk20a.o nvgpu-$(CONFIG_SYNC) += gk20a/sync_gk20a.o
nvgpu-$(CONFIG_GK20A_PCI) += common/linux/pci.o
nvgpu-$(CONFIG_GK20A_PCI) += common/linux/pci.o \
common/linux/pci_usermode.o \
nvgpu-$(CONFIG_TEGRA_GK20A_NVHOST) += common/linux/nvhost.o nvgpu-$(CONFIG_TEGRA_GK20A_NVHOST) += common/linux/nvhost.o
nvgpu-$(CONFIG_TEGRA_GR_VIRTUALIZATION) += \ nvgpu-$(CONFIG_TEGRA_GR_VIRTUALIZATION) += \
@@ -211,6 +220,32 @@ nvgpu-y += \
gp106/regops_gp106.o \ gp106/regops_gp106.o \
gp106/bios_gp106.o \ gp106/bios_gp106.o \
gp106/fuse_gp106.o \ gp106/fuse_gp106.o \
gv11b/gv11b.o \
gv11b/css_gr_gv11b.o \
gv11b/dbg_gpu_gv11b.o \
gv11b/mc_gv11b.o \
gv11b/ltc_gv11b.o \
gv11b/hal_gv11b.o \
gv11b/gv11b_gating_reglist.o \
gv11b/gr_gv11b.o \
gv11b/fb_gv11b.o \
gv11b/fifo_gv11b.o \
gv11b/mm_gv11b.o \
gv11b/ce_gv11b.o \
gv11b/gr_ctx_gv11b.o \
gv11b/pmu_gv11b.o \
gv11b/acr_gv11b.o \
gv11b/subctx_gv11b.o \
gv11b/regops_gv11b.o \
gv11b/therm_gv11b.o \
gv100/mm_gv100.o \
gv100/gr_ctx_gv100.o \
gv100/fb_gv100.o \
gv100/bios_gv100.o \
gv100/fifo_gv100.o \
gv100/gr_gv100.o \
gv100/regops_gv100.o \
gv100/hal_gv100.o \
pstate/pstate.o \ pstate/pstate.o \
clk/clk_vin.o \ clk/clk_vin.o \
clk/clk_fll.o \ clk/clk_fll.o \
@@ -245,7 +280,6 @@ nvgpu-y += \
lpwr/rppg.o \ lpwr/rppg.o \
lpwr/lpwr.o lpwr/lpwr.o
nvgpu-$(CONFIG_TEGRA_GK20A) += common/linux/platform_gp10b_tegra.o
nvgpu-$(CONFIG_TEGRA_GR_VIRTUALIZATION) += \ nvgpu-$(CONFIG_TEGRA_GR_VIRTUALIZATION) += \
common/linux/vgpu/gp10b/vgpu_hal_gp10b.o \ common/linux/vgpu/gp10b/vgpu_hal_gp10b.o \
@@ -253,43 +287,6 @@ nvgpu-$(CONFIG_TEGRA_GR_VIRTUALIZATION) += \
common/linux/vgpu/gp10b/vgpu_fuse_gp10b.o \ common/linux/vgpu/gp10b/vgpu_fuse_gp10b.o \
common/linux/vgpu/gp10b/vgpu_mm_gp10b.o common/linux/vgpu/gp10b/vgpu_mm_gp10b.o
ifeq ($(CONFIG_ARCH_TEGRA_19x_SOC),y)
nvgpu-y += \
common/mm/gmmu_t19x.o \
common/linux/ioctl_tsg_t19x.o \
common/linux/ioctl_ctrl_t19x.o \
common/linux/io_t19x.o \
common/linux/module_t19x.o \
common/linux/pci_t19x.o \
gv11b/gv11b.o \
gv11b/css_gr_gv11b.o \
gv11b/dbg_gpu_gv11b.o \
gv11b/mc_gv11b.o \
gv11b/ltc_gv11b.o \
gv11b/hal_gv11b.o \
gv11b/gv11b_gating_reglist.o \
gv11b/gr_gv11b.o \
gv11b/fb_gv11b.o \
gv11b/fifo_gv11b.o \
gv11b/mm_gv11b.o \
gv11b/ce_gv11b.o \
gv11b/gr_ctx_gv11b.o \
gv11b/pmu_gv11b.o \
gv11b/acr_gv11b.o \
gv11b/subctx_gv11b.o \
gv11b/regops_gv11b.o \
gv11b/therm_gv11b.o \
gv100/mm_gv100.o \
gv100/gr_ctx_gv100.o \
gv100/fb_gv100.o \
gv100/bios_gv100.o \
gv100/fifo_gv100.o \
gv100/gr_gv100.o \
gv100/regops_gv100.o \
gv100/hal_gv100.o
nvgpu-$(CONFIG_TEGRA_GK20A) += common/linux/platform_gv11b_tegra.o
nvgpu-$(CONFIG_TEGRA_GK20A_NVHOST) += common/linux/nvhost_t19x.o
nvgpu-$(CONFIG_TEGRA_GR_VIRTUALIZATION) += \ nvgpu-$(CONFIG_TEGRA_GR_VIRTUALIZATION) += \
common/linux/vgpu/gv11b/platform_gv11b_vgpu_tegra.o \ common/linux/vgpu/gv11b/platform_gv11b_vgpu_tegra.o \
@@ -299,4 +296,3 @@ nvgpu-$(CONFIG_TEGRA_GR_VIRTUALIZATION) += \
common/linux/vgpu/gv11b/vgpu_fifo_gv11b.o \ common/linux/vgpu/gv11b/vgpu_fifo_gv11b.o \
common/linux/vgpu/gv11b/vgpu_subctx_gv11b.o \ common/linux/vgpu/gv11b/vgpu_subctx_gv11b.o \
common/linux/vgpu/gv11b/vgpu_tsg_gv11b.o common/linux/vgpu/gv11b/vgpu_tsg_gv11b.o
endif

View File

@@ -1,33 +0,0 @@
/*
* NVIDIA T19x Channel info
*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NVGPU_CHANNEL_T19X_H_
#define _NVGPU_CHANNEL_T19X_H_
struct channel_t19x {
u32 subctx_id;
u32 runqueue_sel;
};
#endif

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License, * under the terms and conditions of the GNU General Public License,
@@ -19,10 +19,10 @@
#include <nvgpu/hw/gv11b/hw_usermode_gv11b.h> #include <nvgpu/hw/gv11b/hw_usermode_gv11b.h>
void gv11b_usermode_writel(struct gk20a *g, u32 r, u32 v) void nvgpu_usermode_writel(struct gk20a *g, u32 r, u32 v)
{ {
struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g); struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
void __iomem *reg = l->t19x.usermode_regs + (r - usermode_cfg0_r()); void __iomem *reg = l->usermode_regs + (r - usermode_cfg0_r());
writel_relaxed(v, reg); writel_relaxed(v, reg);
gk20a_dbg(gpu_dbg_reg, "usermode r=0x%x v=0x%x", r, v); gk20a_dbg(gpu_dbg_reg, "usermode r=0x%x v=0x%x", r, v);

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2011-2017, NVIDIA Corporation. All rights reserved. * Copyright (c) 2011-2018, NVIDIA Corporation. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License, * under the terms and conditions of the GNU General Public License,
@@ -35,9 +35,6 @@
#include "ioctl_ctrl.h" #include "ioctl_ctrl.h"
#include "ioctl_dbg.h" #include "ioctl_dbg.h"
#include "ioctl_as.h" #include "ioctl_as.h"
#ifdef CONFIG_TEGRA_19x_GPU
#include "common/linux/ioctl_ctrl_t19x.h"
#endif
#include "ioctl_tsg.h" #include "ioctl_tsg.h"
#include "ioctl_channel.h" #include "ioctl_channel.h"
#include "gk20a/gk20a.h" #include "gk20a/gk20a.h"
@@ -173,6 +170,8 @@ static struct nvgpu_flags_mapping flags_mapping[] = {
NVGPU_ECC_ENABLED_TEX}, NVGPU_ECC_ENABLED_TEX},
{NVGPU_GPU_FLAGS_ECC_ENABLED_LTC, {NVGPU_GPU_FLAGS_ECC_ENABLED_LTC,
NVGPU_ECC_ENABLED_LTC}, NVGPU_ECC_ENABLED_LTC},
{NVGPU_GPU_FLAGS_SUPPORT_TSG_SUBCONTEXTS,
NVGPU_SUPPORT_TSG_SUBCONTEXTS},
}; };
static u64 nvgpu_ctrl_ioctl_gpu_characteristics_flags(struct gk20a *g) static u64 nvgpu_ctrl_ioctl_gpu_characteristics_flags(struct gk20a *g)
@@ -240,9 +239,7 @@ gk20a_ctrl_ioctl_gpu_characteristics(
gpu.gpc_mask = (1 << g->gr.gpc_count)-1; gpu.gpc_mask = (1 << g->gr.gpc_count)-1;
gpu.flags = nvgpu_ctrl_ioctl_gpu_characteristics_flags(g); gpu.flags = nvgpu_ctrl_ioctl_gpu_characteristics_flags(g);
#ifdef CONFIG_TEGRA_19x_GPU
gpu.flags |= nvgpu_ctrl_ioctl_gpu_characteristics_flags_t19x(g);
#endif
gpu.arch = g->params.gpu_arch; gpu.arch = g->params.gpu_arch;
gpu.impl = g->params.gpu_impl; gpu.impl = g->params.gpu_impl;
gpu.rev = g->params.gpu_rev; gpu.rev = g->params.gpu_rev;

View File

@@ -1,33 +0,0 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include <uapi/linux/nvgpu.h>
#include <nvgpu/types.h>
#include <nvgpu/enabled.h>
#include <nvgpu/enabled_t19x.h>
#include "ioctl_ctrl_t19x.h"
#include "common/linux/os_linux.h"
#include "gk20a/gk20a.h"
u64 nvgpu_ctrl_ioctl_gpu_characteristics_flags_t19x(struct gk20a *g)
{
u64 ioctl_flags = 0;
if (nvgpu_is_enabled(g, NVGPU_SUPPORT_TSG_SUBCONTEXTS))
ioctl_flags |= NVGPU_GPU_FLAGS_SUPPORT_TSG_SUBCONTEXTS;
return ioctl_flags;
}

View File

@@ -1,23 +0,0 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#ifndef _NVGPU_IOCTL_CTRL_T19X
#define _NVGPU_IOCTL_CTRL_T19X
#include <nvgpu/types.h>
struct gk20a;
u64 nvgpu_ctrl_ioctl_gpu_characteristics_flags_t19x(struct gk20a *g);
#endif

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License, * under the terms and conditions of the GNU General Public License,
@@ -27,13 +27,11 @@
#include "gk20a/gk20a.h" #include "gk20a/gk20a.h"
#include "gk20a/tsg_gk20a.h" #include "gk20a/tsg_gk20a.h"
#include "gv11b/fifo_gv11b.h"
#include "platform_gk20a.h" #include "platform_gk20a.h"
#include "ioctl_tsg.h" #include "ioctl_tsg.h"
#include "ioctl_channel.h" #include "ioctl_channel.h"
#include "os_linux.h" #include "os_linux.h"
#ifdef CONFIG_TEGRA_19x_GPU
#include "common/linux/ioctl_tsg_t19x.h"
#endif
struct tsg_private { struct tsg_private {
struct gk20a *g; struct gk20a *g;
@@ -55,6 +53,72 @@ static int gk20a_tsg_bind_channel_fd(struct tsg_gk20a *tsg, int ch_fd)
return err; return err;
} }
static int gk20a_tsg_ioctl_bind_channel_ex(struct gk20a *g,
struct tsg_gk20a *tsg, struct nvgpu_tsg_bind_channel_ex_args *arg)
{
struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
struct gk20a_sched_ctrl *sched = &l->sched_ctrl;
struct channel_gk20a *ch;
struct gr_gk20a *gr = &g->gr;
int err = 0;
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid);
nvgpu_mutex_acquire(&sched->control_lock);
if (sched->control_locked) {
err = -EPERM;
goto mutex_release;
}
err = gk20a_busy(g);
if (err) {
nvgpu_err(g, "failed to power on gpu");
goto mutex_release;
}
ch = gk20a_get_channel_from_file(arg->channel_fd);
if (!ch) {
err = -EINVAL;
goto idle;
}
if (arg->tpc_pg_enabled && (!tsg->tpc_num_initialized)) {
if ((arg->num_active_tpcs > gr->max_tpc_count) ||
!(arg->num_active_tpcs)) {
nvgpu_err(g, "Invalid num of active TPCs");
err = -EINVAL;
goto ch_put;
}
tsg->tpc_num_initialized = true;
tsg->num_active_tpcs = arg->num_active_tpcs;
tsg->tpc_pg_enabled = true;
} else {
tsg->tpc_pg_enabled = false; nvgpu_log(g, gpu_dbg_info, "dynamic TPC-PG not enabled");
}
if (arg->subcontext_id < g->fifo.max_subctx_count) {
ch->subctx_id = arg->subcontext_id;
} else {
err = -EINVAL;
goto ch_put;
}
nvgpu_log(g, gpu_dbg_info, "channel id : %d : subctx: %d",
ch->chid, ch->subctx_id);
/* Use runqueue selector 1 for all ASYNC ids */
if (ch->subctx_id > CHANNEL_INFO_VEID0)
ch->runqueue_sel = 1;
err = ch->g->ops.fifo.tsg_bind_channel(tsg, ch);
ch_put:
gk20a_channel_put(ch);
idle:
gk20a_idle(g);
mutex_release:
nvgpu_mutex_release(&sched->control_lock);
return err;
}
static int gk20a_tsg_get_event_data_from_id(struct tsg_gk20a *tsg, static int gk20a_tsg_get_event_data_from_id(struct tsg_gk20a *tsg,
unsigned int event_id, unsigned int event_id,
struct gk20a_event_id_data **event_id_data) struct gk20a_event_id_data **event_id_data)
@@ -478,6 +542,13 @@ long nvgpu_ioctl_tsg_dev_ioctl(struct file *filp, unsigned int cmd,
break; break;
} }
case NVGPU_TSG_IOCTL_BIND_CHANNEL_EX:
{
err = gk20a_tsg_ioctl_bind_channel_ex(g, tsg,
(struct nvgpu_tsg_bind_channel_ex_args *)buf);
break;
}
case NVGPU_TSG_IOCTL_UNBIND_CHANNEL: case NVGPU_TSG_IOCTL_UNBIND_CHANNEL:
/* We do not support explicitly unbinding channel from TSG. /* We do not support explicitly unbinding channel from TSG.
* Channel will be unbounded from TSG when it is closed. * Channel will be unbounded from TSG when it is closed.
@@ -550,13 +621,9 @@ long nvgpu_ioctl_tsg_dev_ioctl(struct file *filp, unsigned int cmd,
} }
default: default:
#ifdef CONFIG_TEGRA_19x_GPU
err = t19x_tsg_ioctl_handler(g, tsg, cmd, buf);
#else
nvgpu_err(g, "unrecognized tsg gpu ioctl cmd: 0x%x", nvgpu_err(g, "unrecognized tsg gpu ioctl cmd: 0x%x",
cmd); cmd);
err = -ENOTTY; err = -ENOTTY;
#endif
break; break;
} }

View File

@@ -1,115 +0,0 @@
/*
* GV11B TSG IOCTL Handler
*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include <linux/types.h>
#include <uapi/linux/nvgpu.h>
#include "gk20a/gk20a.h"
#include "gv11b/fifo_gv11b.h"
#include "gv11b/subctx_gv11b.h"
#include "ioctl_tsg_t19x.h"
#include "common/linux/os_linux.h"
static int gv11b_tsg_ioctl_bind_channel_ex(struct gk20a *g,
struct tsg_gk20a *tsg, struct nvgpu_tsg_bind_channel_ex_args *arg)
{
struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
struct gk20a_sched_ctrl *sched = &l->sched_ctrl;
struct channel_gk20a *ch;
struct gr_gk20a *gr = &g->gr;
int err = 0;
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid);
nvgpu_mutex_acquire(&sched->control_lock);
if (sched->control_locked) {
err = -EPERM;
goto mutex_release;
}
err = gk20a_busy(g);
if (err) {
nvgpu_err(g, "failed to power on gpu");
goto mutex_release;
}
ch = gk20a_get_channel_from_file(arg->channel_fd);
if (!ch) {
err = -EINVAL;
goto idle;
}
if (arg->tpc_pg_enabled && (!tsg->t19x.tpc_num_initialized)) {
if ((arg->num_active_tpcs > gr->max_tpc_count) ||
!(arg->num_active_tpcs)) {
nvgpu_err(g, "Invalid num of active TPCs");
err = -EINVAL;
goto ch_put;
}
tsg->t19x.tpc_num_initialized = true;
tsg->t19x.num_active_tpcs = arg->num_active_tpcs;
tsg->t19x.tpc_pg_enabled = true;
} else {
tsg->t19x.tpc_pg_enabled = false;
nvgpu_log(g, gpu_dbg_info, "dynamic TPC-PG not enabled");
}
if (arg->subcontext_id < g->fifo.t19x.max_subctx_count) {
ch->t19x.subctx_id = arg->subcontext_id;
} else {
err = -EINVAL;
goto ch_put;
}
nvgpu_log(g, gpu_dbg_info, "channel id : %d : subctx: %d",
ch->chid, ch->t19x.subctx_id);
/* Use runqueue selector 1 for all ASYNC ids */
if (ch->t19x.subctx_id > CHANNEL_INFO_VEID0)
ch->t19x.runqueue_sel = 1;
err = ch->g->ops.fifo.tsg_bind_channel(tsg, ch);
ch_put:
gk20a_channel_put(ch);
idle:
gk20a_idle(g);
mutex_release:
nvgpu_mutex_release(&sched->control_lock);
return err;
}
int t19x_tsg_ioctl_handler(struct gk20a *g, struct tsg_gk20a *tsg,
unsigned int cmd, u8 *buf)
{
int err = 0;
nvgpu_log(g, gpu_dbg_fn, "t19x_tsg_ioctl_handler");
switch (cmd) {
case NVGPU_TSG_IOCTL_BIND_CHANNEL_EX:
{
err = gv11b_tsg_ioctl_bind_channel_ex(g, tsg,
(struct nvgpu_tsg_bind_channel_ex_args *)buf);
break;
}
default:
nvgpu_err(g, "unrecognized tsg gpu ioctl cmd: 0x%x",
cmd);
err = -ENOTTY;
break;
}
return err;
}

View File

@@ -1,21 +0,0 @@
/*
* GV11B TSG IOCTL handler
*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#ifndef _NVGPU_IOCTL_TSG_T19X
#define _NVGPU_IOCTL_TSG_T19X
int t19x_tsg_ioctl_handler(struct gk20a *g, struct tsg_gk20a *tsg,
unsigned int cmd, u8 *arg);
#endif

View File

@@ -46,13 +46,11 @@
#include "scale.h" #include "scale.h"
#include "pci.h" #include "pci.h"
#include "module.h" #include "module.h"
#include "module_usermode.h"
#include "intr.h" #include "intr.h"
#include "cde.h" #include "cde.h"
#include "ioctl.h" #include "ioctl.h"
#include "sim.h" #include "sim.h"
#ifdef CONFIG_TEGRA_19x_GPU
#include "nvgpu_gpuid_t19x.h"
#endif
#include "os_linux.h" #include "os_linux.h"
#include "cde_gm20b.h" #include "cde_gm20b.h"
@@ -175,9 +173,7 @@ static int gk20a_restore_registers(struct gk20a *g)
l->regs = l->regs_saved; l->regs = l->regs_saved;
l->bar1 = l->bar1_saved; l->bar1 = l->bar1_saved;
#ifdef CONFIG_TEGRA_19x_GPU nvgpu_restore_usermode_registers(g);
t19x_restore_registers(g);
#endif
return 0; return 0;
} }
@@ -313,9 +309,7 @@ static int gk20a_lockout_registers(struct gk20a *g)
l->regs = NULL; l->regs = NULL;
l->bar1 = NULL; l->bar1 = NULL;
#ifdef CONFIG_TEGRA_19x_GPU nvgpu_lockout_usermode_registers(g);
t19x_lockout_registers(g);
#endif
return 0; return 0;
} }
@@ -384,14 +378,12 @@ static struct of_device_id tegra_gk20a_of_match[] = {
.data = &gm20b_tegra_platform }, .data = &gm20b_tegra_platform },
{ .compatible = "nvidia,tegra186-gp10b", { .compatible = "nvidia,tegra186-gp10b",
.data = &gp10b_tegra_platform }, .data = &gp10b_tegra_platform },
#ifdef CONFIG_TEGRA_19x_GPU { .compatible = "nvidia,gv11b",
{ .compatible = TEGRA_19x_GPU_COMPAT_TEGRA, .data = &gv11b_tegra_platform },
.data = &t19x_gpu_tegra_platform },
#ifdef CONFIG_TEGRA_GR_VIRTUALIZATION #ifdef CONFIG_TEGRA_GR_VIRTUALIZATION
{ .compatible = "nvidia,gv11b-vgpu", { .compatible = "nvidia,gv11b-vgpu",
.data = &gv11b_vgpu_tegra_platform}, .data = &gv11b_vgpu_tegra_platform},
#endif #endif
#endif
#ifdef CONFIG_TEGRA_GR_VIRTUALIZATION #ifdef CONFIG_TEGRA_GR_VIRTUALIZATION
{ .compatible = "nvidia,tegra124-gk20a-vgpu", { .compatible = "nvidia,tegra124-gk20a-vgpu",
.data = &vgpu_tegra_platform }, .data = &vgpu_tegra_platform },
@@ -669,9 +661,7 @@ void gk20a_remove_support(struct gk20a *g)
l->bar1 = NULL; l->bar1 = NULL;
} }
#ifdef CONFIG_TEGRA_19x_GPU nvgpu_remove_usermode_support(g);
t19x_remove_support(g);
#endif
nvgpu_free_enabled_flags(g); nvgpu_free_enabled_flags(g);
} }
@@ -721,9 +711,7 @@ static int gk20a_init_support(struct platform_device *dev)
goto fail; goto fail;
} }
#ifdef CONFIG_TEGRA_19x_GPU nvgpu_init_usermode_support(g);
t19x_init_support(g);
#endif
return 0; return 0;

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License, * under the terms and conditions of the GNU General Public License,
@@ -13,10 +13,6 @@
#ifndef __NVGPU_COMMON_LINUX_MODULE_H__ #ifndef __NVGPU_COMMON_LINUX_MODULE_H__
#define __NVGPU_COMMON_LINUX_MODULE_H__ #define __NVGPU_COMMON_LINUX_MODULE_H__
#ifdef CONFIG_TEGRA_19x_GPU
#include <nvgpu/linux/module_t19x.h>
#endif
struct gk20a; struct gk20a;
struct device; struct device;
struct nvgpu_os_linux; struct nvgpu_os_linux;

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License, * under the terms and conditions of the GNU General Public License,
@@ -27,36 +27,36 @@
* after the GPU has been turned off. On older chips these reads and writes can * after the GPU has been turned off. On older chips these reads and writes can
* also lock the entire CPU up. * also lock the entire CPU up.
*/ */
void t19x_lockout_registers(struct gk20a *g) void nvgpu_lockout_usermode_registers(struct gk20a *g)
{ {
struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g); struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
l->t19x.usermode_regs = NULL; l->usermode_regs = NULL;
} }
/* /*
* Undoes t19x_lockout_registers(). * Undoes t19x_lockout_registers().
*/ */
void t19x_restore_registers(struct gk20a *g) void nvgpu_restore_usermode_registers(struct gk20a *g)
{ {
struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g); struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
l->t19x.usermode_regs = l->t19x.usermode_regs_saved; l->usermode_regs = l->usermode_regs_saved;
} }
void t19x_remove_support(struct gk20a *g) void nvgpu_remove_usermode_support(struct gk20a *g)
{ {
struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g); struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
if (l->t19x.usermode_regs) { if (l->usermode_regs) {
l->t19x.usermode_regs = NULL; l->usermode_regs = NULL;
} }
} }
void t19x_init_support(struct gk20a *g) void nvgpu_init_usermode_support(struct gk20a *g)
{ {
struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g); struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
l->t19x.usermode_regs = l->regs + usermode_cfg0_r(); l->usermode_regs = l->regs + usermode_cfg0_r();
l->t19x.usermode_regs_saved = l->t19x.usermode_regs; l->usermode_regs_saved = l->usermode_regs;
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License, * under the terms and conditions of the GNU General Public License,
@@ -19,9 +19,9 @@
struct gk20a; struct gk20a;
void t19x_init_support(struct gk20a *g); void nvgpu_init_usermode_support(struct gk20a *g);
void t19x_remove_support(struct gk20a *g); void nvgpu_remove_usermode_support(struct gk20a *g);
void t19x_lockout_registers(struct gk20a *g); void nvgpu_lockout_usermode_registers(struct gk20a *g);
void t19x_restore_registers(struct gk20a *g); void nvgpu_restore_usermode_registers(struct gk20a *g);
#endif #endif

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License, * under the terms and conditions of the GNU General Public License,
@@ -15,6 +15,7 @@
*/ */
#include <linux/nvhost.h> #include <linux/nvhost.h>
#include <linux/nvhost_t194.h>
#include <linux/nvhost_ioctl.h> #include <linux/nvhost_ioctl.h>
#include <linux/of_platform.h> #include <linux/of_platform.h>
@@ -210,3 +211,18 @@ struct sync_fence *nvgpu_nvhost_sync_create_fence(
return nvhost_sync_create_fence(nvhost_dev->host1x_pdev, &pt, 1, name); return nvhost_sync_create_fence(nvhost_dev->host1x_pdev, &pt, 1, name);
} }
#endif /* CONFIG_SYNC */ #endif /* CONFIG_SYNC */
#ifdef CONFIG_TEGRA_T19X_GRHOST
int nvgpu_nvhost_syncpt_unit_interface_get_aperture(
struct nvgpu_nvhost_dev *nvhost_dev,
u64 *base, size_t *size)
{
return nvhost_syncpt_unit_interface_get_aperture(
nvhost_dev->host1x_pdev, (phys_addr_t *)base, size);
}
u32 nvgpu_nvhost_syncpt_unit_interface_get_byte_offset(u32 syncpt_id)
{
return nvhost_syncpt_unit_interface_get_byte_offset(syncpt_id);
}
#endif

View File

@@ -1,35 +0,0 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/nvhost.h>
#include <linux/nvhost_t194.h>
#include <nvgpu/nvhost_t19x.h>
#include "common/linux/nvhost_priv.h"
int nvgpu_nvhost_syncpt_unit_interface_get_aperture(
struct nvgpu_nvhost_dev *nvhost_dev,
u64 *base, size_t *size)
{
return nvhost_syncpt_unit_interface_get_aperture(
nvhost_dev->host1x_pdev, (phys_addr_t *)base, size);
}
u32 nvgpu_nvhost_syncpt_unit_interface_get_byte_offset(u32 syncpt_id)
{
return nvhost_syncpt_unit_interface_get_byte_offset(syncpt_id);
}

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License, * under the terms and conditions of the GNU General Public License,
@@ -20,9 +20,6 @@
#include <linux/cdev.h> #include <linux/cdev.h>
#include <linux/iommu.h> #include <linux/iommu.h>
#ifdef CONFIG_TEGRA_19x_GPU
#include <nvgpu/linux/os_linux_t19x.h>
#endif
#include "gk20a/gk20a.h" #include "gk20a/gk20a.h"
#include "cde.h" #include "cde.h"
#include "sched.h" #include "sched.h"
@@ -114,9 +111,8 @@ struct nvgpu_os_linux {
void __iomem *bar1; void __iomem *bar1;
void __iomem *bar1_saved; void __iomem *bar1_saved;
#ifdef CONFIG_TEGRA_19x_GPU void __iomem *usermode_regs;
struct nvgpu_os_linux_t19x t19x; void __iomem *usermode_regs_saved;
#endif
struct nvgpu_os_linux_ops ops; struct nvgpu_os_linux_ops ops;

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License, * under the terms and conditions of the GNU General Public License,
@@ -34,9 +34,7 @@
#include "platform_gk20a.h" #include "platform_gk20a.h"
#include "pci.h" #include "pci.h"
#ifdef CONFIG_TEGRA_19x_GPU #include "pci_usermode.h"
#include <nvgpu/linux/pci_t19x.h>
#endif
#include "os_linux.h" #include "os_linux.h"
#include "driver_common.h" #include "driver_common.h"
@@ -453,9 +451,7 @@ static int nvgpu_pci_init_support(struct pci_dev *pdev)
goto fail; goto fail;
} }
#ifdef CONFIG_TEGRA_19x_GPU nvgpu_pci_init_usermode_support(l);
t19x_nvgpu_pci_init_support(l);
#endif
return 0; return 0;

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License, * under the terms and conditions of the GNU General Public License,
@@ -17,8 +17,8 @@
#include "common/linux/os_linux.h" #include "common/linux/os_linux.h"
void t19x_nvgpu_pci_init_support(struct nvgpu_os_linux *l) void nvgpu_pci_init_usermode_support(struct nvgpu_os_linux *l)
{ {
l->t19x.usermode_regs = l->regs + usermode_cfg0_r(); l->usermode_regs = l->regs + usermode_cfg0_r();
l->t19x.usermode_regs_saved = l->t19x.usermode_regs; l->usermode_regs_saved = l->usermode_regs;
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License, * under the terms and conditions of the GNU General Public License,
@@ -13,11 +13,11 @@
* You should have received a copy of the GNU General Public License * You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/ */
#ifndef __NVGPU_PCI_T19X_H__ #ifndef __NVGPU_PCI_USERMODE_H__
#define __NVGPU_PCI_T19X_H__ #define __NVGPU_PCI_USERMODE_H__
struct nvgpu_os_linux; struct nvgpu_os_linux;
void t19x_nvgpu_pci_init_support(struct nvgpu_os_linux *l); void nvgpu_pci_init_usermode_support(struct nvgpu_os_linux *l);
#endif #endif

View File

@@ -1,7 +1,7 @@
/* /*
* GK20A Platform (SoC) Interface * GK20A Platform (SoC) Interface
* *
* Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License, * under the terms and conditions of the GNU General Public License,
@@ -250,6 +250,7 @@ static inline struct gk20a_platform *gk20a_get_platform(
#ifdef CONFIG_TEGRA_GK20A #ifdef CONFIG_TEGRA_GK20A
extern struct gk20a_platform gm20b_tegra_platform; extern struct gk20a_platform gm20b_tegra_platform;
extern struct gk20a_platform gp10b_tegra_platform; extern struct gk20a_platform gp10b_tegra_platform;
extern struct gk20a_platform gv11b_tegra_platform;
#ifdef CONFIG_TEGRA_GR_VIRTUALIZATION #ifdef CONFIG_TEGRA_GR_VIRTUALIZATION
extern struct gk20a_platform vgpu_tegra_platform; extern struct gk20a_platform vgpu_tegra_platform;
extern struct gk20a_platform gv11b_vgpu_tegra_platform; extern struct gk20a_platform gv11b_vgpu_tegra_platform;

View File

@@ -26,7 +26,6 @@
#include <linux/platform/tegra/emc_bwmgr.h> #include <linux/platform/tegra/emc_bwmgr.h>
#include <nvgpu/nvhost.h> #include <nvgpu/nvhost.h>
#include <nvgpu/nvhost_t19x.h>
#include <uapi/linux/nvgpu.h> #include <uapi/linux/nvgpu.h>
@@ -44,7 +43,6 @@
#include "os_linux.h" #include "os_linux.h"
#include "platform_gk20a_tegra.h" #include "platform_gk20a_tegra.h"
#include "gv11b/gr_gv11b.h" #include "gv11b/gr_gv11b.h"
#include "nvgpu_gpuid_t19x.h"
static void gr_gv11b_remove_sysfs(struct device *dev); static void gr_gv11b_remove_sysfs(struct device *dev);
@@ -203,7 +201,7 @@ static int gv11b_tegra_suspend(struct device *dev)
return 0; return 0;
} }
struct gk20a_platform t19x_gpu_tegra_platform = { struct gk20a_platform gv11b_tegra_platform = {
.has_syncpoints = true, .has_syncpoints = true,
/* no cde. use sysmem compression */ /* no cde. use sysmem compression */
@@ -297,7 +295,7 @@ void gr_gv11b_create_sysfs(struct gk20a *g)
initialized multiple times but we only need to create the ECC initialized multiple times but we only need to create the ECC
stats once. Therefore, add the following check to avoid stats once. Therefore, add the following check to avoid
creating duplicate stat sysfs nodes. */ creating duplicate stat sysfs nodes. */
if (g->ecc.gr.t19x.sm_l1_tag_corrected_err_count.counters != NULL) if (g->ecc.gr.sm_l1_tag_corrected_err_count.counters != NULL)
return; return;
gr_gp10b_create_sysfs(g); gr_gp10b_create_sysfs(g);
@@ -305,61 +303,61 @@ void gr_gv11b_create_sysfs(struct gk20a *g)
error |= gr_gp10b_ecc_stat_create(dev, error |= gr_gp10b_ecc_stat_create(dev,
0, 0,
"sm_l1_tag_ecc_corrected_err_count", "sm_l1_tag_ecc_corrected_err_count",
&g->ecc.gr.t19x.sm_l1_tag_corrected_err_count, &g->ecc.gr.sm_l1_tag_corrected_err_count,
&dev_attr_sm_l1_tag_ecc_corrected_err_count_array); &dev_attr_sm_l1_tag_ecc_corrected_err_count_array);
error |= gr_gp10b_ecc_stat_create(dev, error |= gr_gp10b_ecc_stat_create(dev,
0, 0,
"sm_l1_tag_ecc_uncorrected_err_count", "sm_l1_tag_ecc_uncorrected_err_count",
&g->ecc.gr.t19x.sm_l1_tag_uncorrected_err_count, &g->ecc.gr.sm_l1_tag_uncorrected_err_count,
&dev_attr_sm_l1_tag_ecc_uncorrected_err_count_array); &dev_attr_sm_l1_tag_ecc_uncorrected_err_count_array);
error |= gr_gp10b_ecc_stat_create(dev, error |= gr_gp10b_ecc_stat_create(dev,
0, 0,
"sm_cbu_ecc_corrected_err_count", "sm_cbu_ecc_corrected_err_count",
&g->ecc.gr.t19x.sm_cbu_corrected_err_count, &g->ecc.gr.sm_cbu_corrected_err_count,
&dev_attr_sm_cbu_ecc_corrected_err_count_array); &dev_attr_sm_cbu_ecc_corrected_err_count_array);
error |= gr_gp10b_ecc_stat_create(dev, error |= gr_gp10b_ecc_stat_create(dev,
0, 0,
"sm_cbu_ecc_uncorrected_err_count", "sm_cbu_ecc_uncorrected_err_count",
&g->ecc.gr.t19x.sm_cbu_uncorrected_err_count, &g->ecc.gr.sm_cbu_uncorrected_err_count,
&dev_attr_sm_cbu_ecc_uncorrected_err_count_array); &dev_attr_sm_cbu_ecc_uncorrected_err_count_array);
error |= gr_gp10b_ecc_stat_create(dev, error |= gr_gp10b_ecc_stat_create(dev,
0, 0,
"sm_l1_data_ecc_corrected_err_count", "sm_l1_data_ecc_corrected_err_count",
&g->ecc.gr.t19x.sm_l1_data_corrected_err_count, &g->ecc.gr.sm_l1_data_corrected_err_count,
&dev_attr_sm_l1_data_ecc_corrected_err_count_array); &dev_attr_sm_l1_data_ecc_corrected_err_count_array);
error |= gr_gp10b_ecc_stat_create(dev, error |= gr_gp10b_ecc_stat_create(dev,
0, 0,
"sm_l1_data_ecc_uncorrected_err_count", "sm_l1_data_ecc_uncorrected_err_count",
&g->ecc.gr.t19x.sm_l1_data_uncorrected_err_count, &g->ecc.gr.sm_l1_data_uncorrected_err_count,
&dev_attr_sm_l1_data_ecc_uncorrected_err_count_array); &dev_attr_sm_l1_data_ecc_uncorrected_err_count_array);
error |= gr_gp10b_ecc_stat_create(dev, error |= gr_gp10b_ecc_stat_create(dev,
0, 0,
"sm_icache_ecc_corrected_err_count", "sm_icache_ecc_corrected_err_count",
&g->ecc.gr.t19x.sm_icache_corrected_err_count, &g->ecc.gr.sm_icache_corrected_err_count,
&dev_attr_sm_icache_ecc_corrected_err_count_array); &dev_attr_sm_icache_ecc_corrected_err_count_array);
error |= gr_gp10b_ecc_stat_create(dev, error |= gr_gp10b_ecc_stat_create(dev,
0, 0,
"sm_icache_ecc_uncorrected_err_count", "sm_icache_ecc_uncorrected_err_count",
&g->ecc.gr.t19x.sm_icache_uncorrected_err_count, &g->ecc.gr.sm_icache_uncorrected_err_count,
&dev_attr_sm_icache_ecc_uncorrected_err_count_array); &dev_attr_sm_icache_ecc_uncorrected_err_count_array);
error |= gr_gp10b_ecc_stat_create(dev, error |= gr_gp10b_ecc_stat_create(dev,
0, 0,
"gcc_l15_ecc_corrected_err_count", "gcc_l15_ecc_corrected_err_count",
&g->ecc.gr.t19x.gcc_l15_corrected_err_count, &g->ecc.gr.gcc_l15_corrected_err_count,
&dev_attr_gcc_l15_ecc_corrected_err_count_array); &dev_attr_gcc_l15_ecc_corrected_err_count_array);
error |= gr_gp10b_ecc_stat_create(dev, error |= gr_gp10b_ecc_stat_create(dev,
0, 0,
"gcc_l15_ecc_uncorrected_err_count", "gcc_l15_ecc_uncorrected_err_count",
&g->ecc.gr.t19x.gcc_l15_uncorrected_err_count, &g->ecc.gr.gcc_l15_uncorrected_err_count,
&dev_attr_gcc_l15_ecc_uncorrected_err_count_array); &dev_attr_gcc_l15_ecc_uncorrected_err_count_array);
error |= gp10b_ecc_stat_create(dev, error |= gp10b_ecc_stat_create(dev,
@@ -368,7 +366,7 @@ void gr_gv11b_create_sysfs(struct gk20a *g)
"ltc", "ltc",
NULL, NULL,
"l2_cache_uncorrected_err_count", "l2_cache_uncorrected_err_count",
&g->ecc.ltc.t19x.l2_cache_uncorrected_err_count, &g->ecc.ltc.l2_cache_uncorrected_err_count,
&dev_attr_l2_cache_ecc_uncorrected_err_count_array); &dev_attr_l2_cache_ecc_uncorrected_err_count_array);
error |= gp10b_ecc_stat_create(dev, error |= gp10b_ecc_stat_create(dev,
@@ -377,7 +375,7 @@ void gr_gv11b_create_sysfs(struct gk20a *g)
"ltc", "ltc",
NULL, NULL,
"l2_cache_corrected_err_count", "l2_cache_corrected_err_count",
&g->ecc.ltc.t19x.l2_cache_corrected_err_count, &g->ecc.ltc.l2_cache_corrected_err_count,
&dev_attr_l2_cache_ecc_corrected_err_count_array); &dev_attr_l2_cache_ecc_corrected_err_count_array);
error |= gp10b_ecc_stat_create(dev, error |= gp10b_ecc_stat_create(dev,
@@ -386,7 +384,7 @@ void gr_gv11b_create_sysfs(struct gk20a *g)
"gpc", "gpc",
NULL, NULL,
"fecs_ecc_uncorrected_err_count", "fecs_ecc_uncorrected_err_count",
&g->ecc.gr.t19x.fecs_uncorrected_err_count, &g->ecc.gr.fecs_uncorrected_err_count,
&dev_attr_fecs_ecc_uncorrected_err_count_array); &dev_attr_fecs_ecc_uncorrected_err_count_array);
error |= gp10b_ecc_stat_create(dev, error |= gp10b_ecc_stat_create(dev,
@@ -395,7 +393,7 @@ void gr_gv11b_create_sysfs(struct gk20a *g)
"gpc", "gpc",
NULL, NULL,
"fecs_ecc_corrected_err_count", "fecs_ecc_corrected_err_count",
&g->ecc.gr.t19x.fecs_corrected_err_count, &g->ecc.gr.fecs_corrected_err_count,
&dev_attr_fecs_ecc_corrected_err_count_array); &dev_attr_fecs_ecc_corrected_err_count_array);
error |= gp10b_ecc_stat_create(dev, error |= gp10b_ecc_stat_create(dev,
@@ -404,7 +402,7 @@ void gr_gv11b_create_sysfs(struct gk20a *g)
"gpc", "gpc",
NULL, NULL,
"gpccs_ecc_uncorrected_err_count", "gpccs_ecc_uncorrected_err_count",
&g->ecc.gr.t19x.gpccs_uncorrected_err_count, &g->ecc.gr.gpccs_uncorrected_err_count,
&dev_attr_gpccs_ecc_uncorrected_err_count_array); &dev_attr_gpccs_ecc_uncorrected_err_count_array);
error |= gp10b_ecc_stat_create(dev, error |= gp10b_ecc_stat_create(dev,
@@ -413,7 +411,7 @@ void gr_gv11b_create_sysfs(struct gk20a *g)
"gpc", "gpc",
NULL, NULL,
"gpccs_ecc_corrected_err_count", "gpccs_ecc_corrected_err_count",
&g->ecc.gr.t19x.gpccs_corrected_err_count, &g->ecc.gr.gpccs_corrected_err_count,
&dev_attr_gpccs_ecc_corrected_err_count_array); &dev_attr_gpccs_ecc_corrected_err_count_array);
error |= gp10b_ecc_stat_create(dev, error |= gp10b_ecc_stat_create(dev,
@@ -422,7 +420,7 @@ void gr_gv11b_create_sysfs(struct gk20a *g)
"gpc", "gpc",
NULL, NULL,
"mmu_l1tlb_ecc_uncorrected_err_count", "mmu_l1tlb_ecc_uncorrected_err_count",
&g->ecc.gr.t19x.mmu_l1tlb_uncorrected_err_count, &g->ecc.gr.mmu_l1tlb_uncorrected_err_count,
&dev_attr_mmu_l1tlb_ecc_uncorrected_err_count_array); &dev_attr_mmu_l1tlb_ecc_uncorrected_err_count_array);
error |= gp10b_ecc_stat_create(dev, error |= gp10b_ecc_stat_create(dev,
@@ -431,7 +429,7 @@ void gr_gv11b_create_sysfs(struct gk20a *g)
"gpc", "gpc",
NULL, NULL,
"mmu_l1tlb_ecc_corrected_err_count", "mmu_l1tlb_ecc_corrected_err_count",
&g->ecc.gr.t19x.mmu_l1tlb_corrected_err_count, &g->ecc.gr.mmu_l1tlb_corrected_err_count,
&dev_attr_mmu_l1tlb_ecc_corrected_err_count_array); &dev_attr_mmu_l1tlb_ecc_corrected_err_count_array);
error |= gp10b_ecc_stat_create(dev, error |= gp10b_ecc_stat_create(dev,
@@ -440,7 +438,7 @@ void gr_gv11b_create_sysfs(struct gk20a *g)
"eng", "eng",
NULL, NULL,
"mmu_l2tlb_ecc_uncorrected_err_count", "mmu_l2tlb_ecc_uncorrected_err_count",
&g->ecc.eng.t19x.mmu_l2tlb_uncorrected_err_count, &g->ecc.fb.mmu_l2tlb_uncorrected_err_count,
&dev_attr_mmu_l2tlb_ecc_uncorrected_err_count_array); &dev_attr_mmu_l2tlb_ecc_uncorrected_err_count_array);
error |= gp10b_ecc_stat_create(dev, error |= gp10b_ecc_stat_create(dev,
@@ -449,7 +447,7 @@ void gr_gv11b_create_sysfs(struct gk20a *g)
"eng", "eng",
NULL, NULL,
"mmu_l2tlb_ecc_corrected_err_count", "mmu_l2tlb_ecc_corrected_err_count",
&g->ecc.eng.t19x.mmu_l2tlb_corrected_err_count, &g->ecc.fb.mmu_l2tlb_corrected_err_count,
&dev_attr_mmu_l2tlb_ecc_corrected_err_count_array); &dev_attr_mmu_l2tlb_ecc_corrected_err_count_array);
error |= gp10b_ecc_stat_create(dev, error |= gp10b_ecc_stat_create(dev,
@@ -458,7 +456,7 @@ void gr_gv11b_create_sysfs(struct gk20a *g)
"eng", "eng",
NULL, NULL,
"mmu_hubtlb_ecc_uncorrected_err_count", "mmu_hubtlb_ecc_uncorrected_err_count",
&g->ecc.eng.t19x.mmu_hubtlb_uncorrected_err_count, &g->ecc.fb.mmu_hubtlb_uncorrected_err_count,
&dev_attr_mmu_hubtlb_ecc_uncorrected_err_count_array); &dev_attr_mmu_hubtlb_ecc_uncorrected_err_count_array);
error |= gp10b_ecc_stat_create(dev, error |= gp10b_ecc_stat_create(dev,
@@ -467,7 +465,7 @@ void gr_gv11b_create_sysfs(struct gk20a *g)
"eng", "eng",
NULL, NULL,
"mmu_hubtlb_ecc_corrected_err_count", "mmu_hubtlb_ecc_corrected_err_count",
&g->ecc.eng.t19x.mmu_hubtlb_corrected_err_count, &g->ecc.fb.mmu_hubtlb_corrected_err_count,
&dev_attr_mmu_hubtlb_ecc_corrected_err_count_array); &dev_attr_mmu_hubtlb_ecc_corrected_err_count_array);
error |= gp10b_ecc_stat_create(dev, error |= gp10b_ecc_stat_create(dev,
@@ -476,7 +474,7 @@ void gr_gv11b_create_sysfs(struct gk20a *g)
"eng", "eng",
NULL, NULL,
"mmu_fillunit_ecc_uncorrected_err_count", "mmu_fillunit_ecc_uncorrected_err_count",
&g->ecc.eng.t19x.mmu_fillunit_uncorrected_err_count, &g->ecc.fb.mmu_fillunit_uncorrected_err_count,
&dev_attr_mmu_fillunit_ecc_uncorrected_err_count_array); &dev_attr_mmu_fillunit_ecc_uncorrected_err_count_array);
error |= gp10b_ecc_stat_create(dev, error |= gp10b_ecc_stat_create(dev,
@@ -485,7 +483,7 @@ void gr_gv11b_create_sysfs(struct gk20a *g)
"eng", "eng",
NULL, NULL,
"mmu_fillunit_ecc_corrected_err_count", "mmu_fillunit_ecc_corrected_err_count",
&g->ecc.eng.t19x.mmu_fillunit_corrected_err_count, &g->ecc.fb.mmu_fillunit_corrected_err_count,
&dev_attr_mmu_fillunit_ecc_corrected_err_count_array); &dev_attr_mmu_fillunit_ecc_corrected_err_count_array);
error |= gp10b_ecc_stat_create(dev, error |= gp10b_ecc_stat_create(dev,
@@ -494,7 +492,7 @@ void gr_gv11b_create_sysfs(struct gk20a *g)
"eng", "eng",
NULL, NULL,
"pmu_ecc_uncorrected_err_count", "pmu_ecc_uncorrected_err_count",
&g->ecc.eng.t19x.pmu_uncorrected_err_count, &g->ecc.pmu.pmu_uncorrected_err_count,
&dev_attr_pmu_ecc_uncorrected_err_count_array); &dev_attr_pmu_ecc_uncorrected_err_count_array);
error |= gp10b_ecc_stat_create(dev, error |= gp10b_ecc_stat_create(dev,
@@ -503,7 +501,7 @@ void gr_gv11b_create_sysfs(struct gk20a *g)
"eng", "eng",
NULL, NULL,
"pmu_ecc_corrected_err_count", "pmu_ecc_corrected_err_count",
&g->ecc.eng.t19x.pmu_corrected_err_count, &g->ecc.pmu.pmu_corrected_err_count,
&dev_attr_pmu_ecc_corrected_err_count_array); &dev_attr_pmu_ecc_corrected_err_count_array);
@@ -517,131 +515,131 @@ static void gr_gv11b_remove_sysfs(struct device *dev)
gr_gp10b_ecc_stat_remove(dev, gr_gp10b_ecc_stat_remove(dev,
0, 0,
&g->ecc.gr.t19x.sm_l1_tag_corrected_err_count, &g->ecc.gr.sm_l1_tag_corrected_err_count,
dev_attr_sm_l1_tag_ecc_corrected_err_count_array); dev_attr_sm_l1_tag_ecc_corrected_err_count_array);
gr_gp10b_ecc_stat_remove(dev, gr_gp10b_ecc_stat_remove(dev,
0, 0,
&g->ecc.gr.t19x.sm_l1_tag_uncorrected_err_count, &g->ecc.gr.sm_l1_tag_uncorrected_err_count,
dev_attr_sm_l1_tag_ecc_uncorrected_err_count_array); dev_attr_sm_l1_tag_ecc_uncorrected_err_count_array);
gr_gp10b_ecc_stat_remove(dev, gr_gp10b_ecc_stat_remove(dev,
0, 0,
&g->ecc.gr.t19x.sm_cbu_corrected_err_count, &g->ecc.gr.sm_cbu_corrected_err_count,
dev_attr_sm_cbu_ecc_corrected_err_count_array); dev_attr_sm_cbu_ecc_corrected_err_count_array);
gr_gp10b_ecc_stat_remove(dev, gr_gp10b_ecc_stat_remove(dev,
0, 0,
&g->ecc.gr.t19x.sm_cbu_uncorrected_err_count, &g->ecc.gr.sm_cbu_uncorrected_err_count,
dev_attr_sm_cbu_ecc_uncorrected_err_count_array); dev_attr_sm_cbu_ecc_uncorrected_err_count_array);
gr_gp10b_ecc_stat_remove(dev, gr_gp10b_ecc_stat_remove(dev,
0, 0,
&g->ecc.gr.t19x.sm_l1_data_corrected_err_count, &g->ecc.gr.sm_l1_data_corrected_err_count,
dev_attr_sm_l1_data_ecc_corrected_err_count_array); dev_attr_sm_l1_data_ecc_corrected_err_count_array);
gr_gp10b_ecc_stat_remove(dev, gr_gp10b_ecc_stat_remove(dev,
0, 0,
&g->ecc.gr.t19x.sm_l1_data_uncorrected_err_count, &g->ecc.gr.sm_l1_data_uncorrected_err_count,
dev_attr_sm_l1_data_ecc_uncorrected_err_count_array); dev_attr_sm_l1_data_ecc_uncorrected_err_count_array);
gr_gp10b_ecc_stat_remove(dev, gr_gp10b_ecc_stat_remove(dev,
0, 0,
&g->ecc.gr.t19x.sm_icache_corrected_err_count, &g->ecc.gr.sm_icache_corrected_err_count,
dev_attr_sm_icache_ecc_corrected_err_count_array); dev_attr_sm_icache_ecc_corrected_err_count_array);
gr_gp10b_ecc_stat_remove(dev, gr_gp10b_ecc_stat_remove(dev,
0, 0,
&g->ecc.gr.t19x.sm_icache_uncorrected_err_count, &g->ecc.gr.sm_icache_uncorrected_err_count,
dev_attr_sm_icache_ecc_uncorrected_err_count_array); dev_attr_sm_icache_ecc_uncorrected_err_count_array);
gr_gp10b_ecc_stat_remove(dev, gr_gp10b_ecc_stat_remove(dev,
0, 0,
&g->ecc.gr.t19x.gcc_l15_corrected_err_count, &g->ecc.gr.gcc_l15_corrected_err_count,
dev_attr_gcc_l15_ecc_corrected_err_count_array); dev_attr_gcc_l15_ecc_corrected_err_count_array);
gr_gp10b_ecc_stat_remove(dev, gr_gp10b_ecc_stat_remove(dev,
0, 0,
&g->ecc.gr.t19x.gcc_l15_uncorrected_err_count, &g->ecc.gr.gcc_l15_uncorrected_err_count,
dev_attr_gcc_l15_ecc_uncorrected_err_count_array); dev_attr_gcc_l15_ecc_uncorrected_err_count_array);
gp10b_ecc_stat_remove(dev, gp10b_ecc_stat_remove(dev,
g->ltc_count, g->ltc_count,
&g->ecc.ltc.t19x.l2_cache_uncorrected_err_count, &g->ecc.ltc.l2_cache_uncorrected_err_count,
dev_attr_l2_cache_ecc_uncorrected_err_count_array); dev_attr_l2_cache_ecc_uncorrected_err_count_array);
gp10b_ecc_stat_remove(dev, gp10b_ecc_stat_remove(dev,
g->ltc_count, g->ltc_count,
&g->ecc.ltc.t19x.l2_cache_corrected_err_count, &g->ecc.ltc.l2_cache_corrected_err_count,
dev_attr_l2_cache_ecc_corrected_err_count_array); dev_attr_l2_cache_ecc_corrected_err_count_array);
gp10b_ecc_stat_remove(dev, gp10b_ecc_stat_remove(dev,
1, 1,
&g->ecc.gr.t19x.fecs_uncorrected_err_count, &g->ecc.gr.fecs_uncorrected_err_count,
dev_attr_fecs_ecc_uncorrected_err_count_array); dev_attr_fecs_ecc_uncorrected_err_count_array);
gp10b_ecc_stat_remove(dev, gp10b_ecc_stat_remove(dev,
1, 1,
&g->ecc.gr.t19x.fecs_corrected_err_count, &g->ecc.gr.fecs_corrected_err_count,
dev_attr_fecs_ecc_corrected_err_count_array); dev_attr_fecs_ecc_corrected_err_count_array);
gp10b_ecc_stat_remove(dev, gp10b_ecc_stat_remove(dev,
g->gr.gpc_count, g->gr.gpc_count,
&g->ecc.gr.t19x.gpccs_uncorrected_err_count, &g->ecc.gr.gpccs_uncorrected_err_count,
dev_attr_gpccs_ecc_uncorrected_err_count_array); dev_attr_gpccs_ecc_uncorrected_err_count_array);
gp10b_ecc_stat_remove(dev, gp10b_ecc_stat_remove(dev,
g->gr.gpc_count, g->gr.gpc_count,
&g->ecc.gr.t19x.gpccs_corrected_err_count, &g->ecc.gr.gpccs_corrected_err_count,
dev_attr_gpccs_ecc_corrected_err_count_array); dev_attr_gpccs_ecc_corrected_err_count_array);
gp10b_ecc_stat_remove(dev, gp10b_ecc_stat_remove(dev,
g->gr.gpc_count, g->gr.gpc_count,
&g->ecc.gr.t19x.mmu_l1tlb_uncorrected_err_count, &g->ecc.gr.mmu_l1tlb_uncorrected_err_count,
dev_attr_mmu_l1tlb_ecc_uncorrected_err_count_array); dev_attr_mmu_l1tlb_ecc_uncorrected_err_count_array);
gp10b_ecc_stat_remove(dev, gp10b_ecc_stat_remove(dev,
g->gr.gpc_count, g->gr.gpc_count,
&g->ecc.gr.t19x.mmu_l1tlb_corrected_err_count, &g->ecc.gr.mmu_l1tlb_corrected_err_count,
dev_attr_mmu_l1tlb_ecc_corrected_err_count_array); dev_attr_mmu_l1tlb_ecc_corrected_err_count_array);
gp10b_ecc_stat_remove(dev, gp10b_ecc_stat_remove(dev,
1, 1,
&g->ecc.eng.t19x.mmu_l2tlb_uncorrected_err_count, &g->ecc.fb.mmu_l2tlb_uncorrected_err_count,
dev_attr_mmu_l2tlb_ecc_uncorrected_err_count_array); dev_attr_mmu_l2tlb_ecc_uncorrected_err_count_array);
gp10b_ecc_stat_remove(dev, gp10b_ecc_stat_remove(dev,
1, 1,
&g->ecc.eng.t19x.mmu_l2tlb_corrected_err_count, &g->ecc.fb.mmu_l2tlb_corrected_err_count,
dev_attr_mmu_l2tlb_ecc_corrected_err_count_array); dev_attr_mmu_l2tlb_ecc_corrected_err_count_array);
gp10b_ecc_stat_remove(dev, gp10b_ecc_stat_remove(dev,
1, 1,
&g->ecc.eng.t19x.mmu_hubtlb_uncorrected_err_count, &g->ecc.fb.mmu_hubtlb_uncorrected_err_count,
dev_attr_mmu_hubtlb_ecc_uncorrected_err_count_array); dev_attr_mmu_hubtlb_ecc_uncorrected_err_count_array);
gp10b_ecc_stat_remove(dev, gp10b_ecc_stat_remove(dev,
1, 1,
&g->ecc.eng.t19x.mmu_hubtlb_corrected_err_count, &g->ecc.fb.mmu_hubtlb_corrected_err_count,
dev_attr_mmu_hubtlb_ecc_corrected_err_count_array); dev_attr_mmu_hubtlb_ecc_corrected_err_count_array);
gp10b_ecc_stat_remove(dev, gp10b_ecc_stat_remove(dev,
1, 1,
&g->ecc.eng.t19x.mmu_fillunit_uncorrected_err_count, &g->ecc.fb.mmu_fillunit_uncorrected_err_count,
dev_attr_mmu_fillunit_ecc_uncorrected_err_count_array); dev_attr_mmu_fillunit_ecc_uncorrected_err_count_array);
gp10b_ecc_stat_remove(dev, gp10b_ecc_stat_remove(dev,
1, 1,
&g->ecc.eng.t19x.mmu_fillunit_corrected_err_count, &g->ecc.fb.mmu_fillunit_corrected_err_count,
dev_attr_mmu_fillunit_ecc_corrected_err_count_array); dev_attr_mmu_fillunit_ecc_corrected_err_count_array);
gp10b_ecc_stat_remove(dev, gp10b_ecc_stat_remove(dev,
1, 1,
&g->ecc.eng.t19x.pmu_uncorrected_err_count, &g->ecc.pmu.pmu_uncorrected_err_count,
dev_attr_pmu_ecc_uncorrected_err_count_array); dev_attr_pmu_ecc_uncorrected_err_count_array);
gp10b_ecc_stat_remove(dev, gp10b_ecc_stat_remove(dev,
1, 1,
&g->ecc.eng.t19x.pmu_corrected_err_count, &g->ecc.pmu.pmu_corrected_err_count,
dev_attr_pmu_ecc_corrected_err_count_array); dev_attr_pmu_ecc_corrected_err_count_array);
} }

View File

@@ -169,10 +169,8 @@ u64 vgpu_gp10b_locked_gmmu_map(struct vm_gk20a *vm,
p->flags = TEGRA_VGPU_MAP_CACHEABLE; p->flags = TEGRA_VGPU_MAP_CACHEABLE;
if (flags & NVGPU_VM_MAP_IO_COHERENT) if (flags & NVGPU_VM_MAP_IO_COHERENT)
p->flags |= TEGRA_VGPU_MAP_IO_COHERENT; p->flags |= TEGRA_VGPU_MAP_IO_COHERENT;
#ifdef CONFIG_TEGRA_19x_GPU
if (flags & NVGPU_VM_MAP_L3_ALLOC) if (flags & NVGPU_VM_MAP_L3_ALLOC)
p->flags |= TEGRA_VGPU_MAP_L3_ALLOC; p->flags |= TEGRA_VGPU_MAP_L3_ALLOC;
#endif
p->prot = prot; p->prot = prot;
p->ctag_offset = ctag_offset; p->ctag_offset = ctag_offset;
p->clear_ctags = clear_ctags; p->clear_ctags = clear_ctags;

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License, * under the terms and conditions of the GNU General Public License,
@@ -20,7 +20,6 @@
#include "common/linux/os_linux.h" #include "common/linux/os_linux.h"
#include <nvgpu/nvhost.h> #include <nvgpu/nvhost.h>
#include <nvgpu/nvhost_t19x.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
@@ -44,12 +43,12 @@ static int gv11b_vgpu_probe(struct device *dev)
dev_err(dev, "failed to map usermode regs\n"); dev_err(dev, "failed to map usermode regs\n");
return PTR_ERR(regs); return PTR_ERR(regs);
} }
l->t19x.usermode_regs = regs; l->usermode_regs = regs;
#ifdef CONFIG_TEGRA_GK20A_NVHOST #ifdef CONFIG_TEGRA_GK20A_NVHOST
ret = nvgpu_get_nvhost_dev(g); ret = nvgpu_get_nvhost_dev(g);
if (ret) { if (ret) {
l->t19x.usermode_regs = NULL; l->usermode_regs = NULL;
return ret; return ret;
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License, * under the terms and conditions of the GNU General Public License,
@@ -18,7 +18,7 @@
#include "common/linux/vgpu/vgpu.h" #include "common/linux/vgpu/vgpu.h"
#include "gv11b/fifo_gv11b.h" #include "gv11b/fifo_gv11b.h"
#include <nvgpu/nvhost_t19x.h> #include <nvgpu/nvhost.h>
#include <linux/tegra_vgpu.h> #include <linux/tegra_vgpu.h>
@@ -99,7 +99,7 @@ int vgpu_gv11b_init_fifo_setup_hw(struct gk20a *g)
struct fifo_gk20a *f = &g->fifo; struct fifo_gk20a *f = &g->fifo;
struct vgpu_priv_data *priv = vgpu_get_priv_data(g); struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
f->t19x.max_subctx_count = priv->constants.max_subctx_count; f->max_subctx_count = priv->constants.max_subctx_count;
return 0; return 0;
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License, * under the terms and conditions of the GNU General Public License,
@@ -17,7 +17,6 @@
#include "gk20a/gk20a.h" #include "gk20a/gk20a.h"
#include <nvgpu/enabled.h> #include <nvgpu/enabled.h>
#include <nvgpu/enabled_t19x.h>
#include "common/linux/vgpu/vgpu.h" #include "common/linux/vgpu/vgpu.h"
#include "vgpu_gv11b.h" #include "vgpu_gv11b.h"

View File

@@ -68,8 +68,7 @@
#include <gv11b/gr_ctx_gv11b.h> #include <gv11b/gr_ctx_gv11b.h>
#include <gv11b/ltc_gv11b.h> #include <gv11b/ltc_gv11b.h>
#include <gv11b/gv11b_gating_reglist.h> #include <gv11b/gv11b_gating_reglist.h>
#include <gv11b/gr_gv11b.h>
#include <gv100/gr_gv100.h>
#include <nvgpu/enabled.h> #include <nvgpu/enabled.h>

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2017-2018, NVIDIA CORPORATION. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License, * under the terms and conditions of the GNU General Public License,
@@ -38,8 +38,8 @@ int vgpu_gv11b_tsg_bind_channel(struct tsg_gk20a *tsg,
msg.handle = vgpu_get_handle(tsg->g); msg.handle = vgpu_get_handle(tsg->g);
p->tsg_id = tsg->tsgid; p->tsg_id = tsg->tsgid;
p->ch_handle = ch->virt_ctx; p->ch_handle = ch->virt_ctx;
p->subctx_id = ch->t19x.subctx_id; p->subctx_id = ch->subctx_id;
p->runqueue_sel = ch->t19x.runqueue_sel; p->runqueue_sel = ch->runqueue_sel;
err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
err = err ? err : msg.ret; err = err ? err : msg.ret;
if (err) { if (err) {

View File

@@ -1,7 +1,7 @@
/* /*
* Virtualized GPU * Virtualized GPU
* *
* Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License, * under the terms and conditions of the GNU General Public License,
@@ -47,10 +47,6 @@
#include "common/linux/scale.h" #include "common/linux/scale.h"
#include "common/linux/driver_common.h" #include "common/linux/driver_common.h"
#ifdef CONFIG_TEGRA_19x_GPU
#include <nvgpu_gpuid_t19x.h>
#endif
#include <nvgpu/hw/gk20a/hw_mc_gk20a.h> #include <nvgpu/hw/gk20a/hw_mc_gk20a.h>
static inline int vgpu_comm_init(struct platform_device *pdev) static inline int vgpu_comm_init(struct platform_device *pdev)
@@ -436,11 +432,9 @@ static int vgpu_init_hal(struct gk20a *g)
gk20a_dbg_info("gp10b detected"); gk20a_dbg_info("gp10b detected");
err = vgpu_gp10b_init_hal(g); err = vgpu_gp10b_init_hal(g);
break; break;
#ifdef CONFIG_TEGRA_19x_GPU case NVGPU_GPUID_GV11B:
case TEGRA_19x_GPUID:
err = vgpu_gv11b_init_hal(g); err = vgpu_gv11b_init_hal(g);
break; break;
#endif
default: default:
nvgpu_err(g, "no support for %x", ver); nvgpu_err(g, "no support for %x", ver);
err = -ENODEV; err = -ENODEV;

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -700,9 +700,7 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm,
if (attrs.ctag) if (attrs.ctag)
attrs.ctag += buffer_offset & (ctag_granularity - 1U); attrs.ctag += buffer_offset & (ctag_granularity - 1U);
#ifdef CONFIG_TEGRA_19x_GPU attrs.l3_alloc = (bool)(flags & NVGPU_VM_MAP_L3_ALLOC);
nvgpu_gmmu_add_t19x_attrs(&attrs, flags);
#endif
/* /*
* Only allocate a new GPU VA range if we haven't already been passed a * Only allocate a new GPU VA range if we haven't already been passed a

View File

@@ -1,29 +0,0 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvgpu/gmmu.h>
#include <nvgpu/vm.h>
void nvgpu_gmmu_add_t19x_attrs(struct nvgpu_gmmu_attrs *attrs, u32 flags)
{
attrs->t19x_attrs.l3_alloc = (bool)(flags & NVGPU_VM_MAP_L3_ALLOC);
}

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -577,11 +577,9 @@ static void __nvgpu_vm_remove(struct vm_gk20a *vm)
} }
} }
#if defined(CONFIG_TEGRA_GK20A_NVHOST) && defined(CONFIG_TEGRA_19x_GPU)
if (nvgpu_mem_is_valid(&g->syncpt_mem) && vm->syncpt_ro_map_gpu_va) if (nvgpu_mem_is_valid(&g->syncpt_mem) && vm->syncpt_ro_map_gpu_va)
nvgpu_gmmu_unmap(vm, &g->syncpt_mem, nvgpu_gmmu_unmap(vm, &g->syncpt_mem,
vm->syncpt_ro_map_gpu_va); vm->syncpt_ro_map_gpu_va);
#endif
nvgpu_mutex_acquire(&vm->update_gmmu_lock); nvgpu_mutex_acquire(&vm->update_gmmu_lock);

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -28,10 +28,6 @@
#include "gk20a/gk20a.h" #include "gk20a/gk20a.h"
#ifdef CONFIG_TEGRA_19x_GPU
#include "nvgpu_gpuid_t19x.h"
#endif
static u8 get_perfmon_id(struct nvgpu_pmu *pmu) static u8 get_perfmon_id(struct nvgpu_pmu *pmu)
{ {
struct gk20a *g = gk20a_from_pmu(pmu); struct gk20a *g = gk20a_from_pmu(pmu);
@@ -49,11 +45,9 @@ static u8 get_perfmon_id(struct nvgpu_pmu *pmu)
case NVGPU_GPUID_GP106: case NVGPU_GPUID_GP106:
unit_id = PMU_UNIT_PERFMON_T18X; unit_id = PMU_UNIT_PERFMON_T18X;
break; break;
#if defined(CONFIG_TEGRA_19x_GPU) case NVGPU_GPUID_GV11B:
case TEGRA_19x_GPUID:
unit_id = PMU_UNIT_PERFMON_T18X; unit_id = PMU_UNIT_PERFMON_T18X;
break; break;
#endif
default: default:
unit_id = PMU_UNIT_INVALID; unit_id = PMU_UNIT_INVALID;
nvgpu_err(g, "no support for %x", ver); nvgpu_err(g, "no support for %x", ver);

View File

@@ -1,29 +0,0 @@
/*
* NVIDIA T19x ECC
*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NVGPU_ECC_T19X_H_
#define _NVGPU_ECC_T19X_H_
#include "gv11b/ecc_gv11b.h"
#endif

View File

@@ -1,30 +0,0 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _FIFO_T19X_H_
#define _FIFO_T19X_H_
struct fifo_t19x {
u32 max_subctx_count;
};
#endif

View File

@@ -1,7 +1,7 @@
/* /*
* GK20A Graphics channel * GK20A Graphics channel
* *
* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -700,10 +700,8 @@ struct channel_gk20a *gk20a_open_new_channel(struct gk20a *g,
ch->has_timedout = false; ch->has_timedout = false;
ch->wdt_enabled = true; ch->wdt_enabled = true;
ch->obj_class = 0; ch->obj_class = 0;
#ifdef CONFIG_TEGRA_19x_GPU ch->subctx_id = 0;
memset(&ch->t19x, 0, sizeof(struct channel_t19x)); ch->runqueue_sel = 0;
#endif
/* The channel is *not* runnable at this point. It still needs to have /* The channel is *not* runnable at this point. It still needs to have
* an address space bound and allocate a gpfifo and grctx. */ * an address space bound and allocate a gpfifo and grctx. */

View File

@@ -1,7 +1,7 @@
/* /*
* GK20A graphics channel * GK20A graphics channel
* *
* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -40,9 +40,6 @@ struct fifo_profile_gk20a;
#include "mm_gk20a.h" #include "mm_gk20a.h"
#include "gr_gk20a.h" #include "gr_gk20a.h"
#include "fence_gk20a.h" #include "fence_gk20a.h"
#ifdef CONFIG_TEGRA_19x_GPU
#include "channel_t19x.h"
#endif
/* Flags to be passed to gk20a_channel_alloc_gpfifo() */ /* Flags to be passed to gk20a_channel_alloc_gpfifo() */
#define NVGPU_GPFIFO_FLAGS_SUPPORT_VPR (1 << 0) #define NVGPU_GPFIFO_FLAGS_SUPPORT_VPR (1 << 0)
@@ -237,9 +234,8 @@ struct channel_gk20a {
u32 runlist_id; u32 runlist_id;
bool is_privileged_channel; bool is_privileged_channel;
#ifdef CONFIG_TEGRA_19x_GPU u32 subctx_id;
struct channel_t19x t19x; u32 runqueue_sel;
#endif
struct ctx_header_desc ctx_header; struct ctx_header_desc ctx_header;

View File

@@ -1,7 +1,7 @@
/* /*
* GK20A ECC * GK20A ECC
* *
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -33,10 +33,6 @@ struct gk20a_ecc_stat {
#endif #endif
}; };
#ifdef CONFIG_TEGRA_19x_GPU
#include "ecc_t19x.h"
#endif
struct ecc_gk20a { struct ecc_gk20a {
/* Stats per engine */ /* Stats per engine */
struct { struct {
@@ -56,24 +52,44 @@ struct ecc_gk20a {
struct gk20a_ecc_stat tex_unique_sec_pipe1_count; struct gk20a_ecc_stat tex_unique_sec_pipe1_count;
struct gk20a_ecc_stat tex_unique_ded_pipe1_count; struct gk20a_ecc_stat tex_unique_ded_pipe1_count;
#ifdef CONFIG_TEGRA_19x_GPU struct gk20a_ecc_stat sm_l1_tag_corrected_err_count;
struct ecc_gr_t19x t19x; struct gk20a_ecc_stat sm_l1_tag_uncorrected_err_count;
#endif struct gk20a_ecc_stat sm_cbu_corrected_err_count;
struct gk20a_ecc_stat sm_cbu_uncorrected_err_count;
struct gk20a_ecc_stat sm_l1_data_corrected_err_count;
struct gk20a_ecc_stat sm_l1_data_uncorrected_err_count;
struct gk20a_ecc_stat sm_icache_corrected_err_count;
struct gk20a_ecc_stat sm_icache_uncorrected_err_count;
struct gk20a_ecc_stat gcc_l15_corrected_err_count;
struct gk20a_ecc_stat gcc_l15_uncorrected_err_count;
struct gk20a_ecc_stat fecs_corrected_err_count;
struct gk20a_ecc_stat fecs_uncorrected_err_count;
struct gk20a_ecc_stat gpccs_corrected_err_count;
struct gk20a_ecc_stat gpccs_uncorrected_err_count;
struct gk20a_ecc_stat mmu_l1tlb_corrected_err_count;
struct gk20a_ecc_stat mmu_l1tlb_uncorrected_err_count;
} gr; } gr;
struct { struct {
struct gk20a_ecc_stat l2_sec_count; struct gk20a_ecc_stat l2_sec_count;
struct gk20a_ecc_stat l2_ded_count; struct gk20a_ecc_stat l2_ded_count;
#ifdef CONFIG_TEGRA_19x_GPU struct gk20a_ecc_stat l2_cache_corrected_err_count;
struct ecc_ltc_t19x t19x; struct gk20a_ecc_stat l2_cache_uncorrected_err_count;
#endif
} ltc; } ltc;
struct { struct {
#ifdef CONFIG_TEGRA_19x_GPU struct gk20a_ecc_stat mmu_l2tlb_corrected_err_count;
struct ecc_eng_t19x t19x; struct gk20a_ecc_stat mmu_l2tlb_uncorrected_err_count;
#endif struct gk20a_ecc_stat mmu_hubtlb_corrected_err_count;
} eng; struct gk20a_ecc_stat mmu_hubtlb_uncorrected_err_count;
struct gk20a_ecc_stat mmu_fillunit_corrected_err_count;
struct gk20a_ecc_stat mmu_fillunit_uncorrected_err_count;
} fb;
struct {
struct gk20a_ecc_stat pmu_corrected_err_count;
struct gk20a_ecc_stat pmu_uncorrected_err_count;
} pmu;
}; };

View File

@@ -1,9 +1,7 @@
/* /*
* drivers/video/tegra/host/gk20a/fifo_gk20a.h
*
* GK20A graphics fifo (gr host) * GK20A graphics fifo (gr host)
* *
* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -29,10 +27,6 @@
#include "channel_gk20a.h" #include "channel_gk20a.h"
#include "tsg_gk20a.h" #include "tsg_gk20a.h"
#ifdef CONFIG_TEGRA_19x_GPU
#include "fifo_t19x.h"
#endif
#include <nvgpu/kref.h> #include <nvgpu/kref.h>
struct gk20a_debug_output; struct gk20a_debug_output;
@@ -213,9 +207,7 @@ struct fifo_gk20a {
bool deferred_reset_pending; bool deferred_reset_pending;
struct nvgpu_mutex deferred_reset_mutex; struct nvgpu_mutex deferred_reset_mutex;
#ifdef CONFIG_TEGRA_19x_GPU u32 max_subctx_count;
struct fifo_t19x t19x;
#endif
u32 channel_base; u32 channel_base;
}; };

View File

@@ -1,7 +1,7 @@
/* /*
* GK20A Graphics * GK20A Graphics
* *
* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -48,10 +48,6 @@
#include "bus_gk20a.h" #include "bus_gk20a.h"
#include "pstate/pstate.h" #include "pstate/pstate.h"
#ifdef CONFIG_TEGRA_19x_GPU
#include "nvgpu_gpuid_t19x.h"
#endif
void __nvgpu_check_gpu_state(struct gk20a *g) void __nvgpu_check_gpu_state(struct gk20a *g)
{ {
u32 boot_0 = 0xffffffff; u32 boot_0 = 0xffffffff;
@@ -127,7 +123,7 @@ int gk20a_prepare_poweroff(struct gk20a *g)
int gk20a_finalize_poweron(struct gk20a *g) int gk20a_finalize_poweron(struct gk20a *g)
{ {
int err; int err;
#if defined(CONFIG_TEGRA_GK20A_NVHOST) && defined(CONFIG_TEGRA_19x_GPU) #if defined(CONFIG_TEGRA_GK20A_NVHOST)
u32 nr_pages; u32 nr_pages;
#endif #endif
@@ -319,7 +315,7 @@ int gk20a_finalize_poweron(struct gk20a *g)
} }
} }
#if defined(CONFIG_TEGRA_GK20A_NVHOST) && defined(CONFIG_TEGRA_19x_GPU) #if defined(CONFIG_TEGRA_GK20A_NVHOST)
if (gk20a_platform_has_syncpoints(g) && g->syncpt_unit_size) { if (gk20a_platform_has_syncpoints(g) && g->syncpt_unit_size) {
if (!nvgpu_mem_is_valid(&g->syncpt_mem)) { if (!nvgpu_mem_is_valid(&g->syncpt_mem)) {
nr_pages = DIV_ROUND_UP(g->syncpt_unit_size, PAGE_SIZE); nr_pages = DIV_ROUND_UP(g->syncpt_unit_size, PAGE_SIZE);

View File

@@ -1342,7 +1342,7 @@ struct gk20a {
u64 dma_memory_used; u64 dma_memory_used;
#if defined(CONFIG_TEGRA_GK20A_NVHOST) && defined(CONFIG_TEGRA_19x_GPU) #if defined(CONFIG_TEGRA_GK20A_NVHOST)
u64 syncpt_unit_base; u64 syncpt_unit_base;
size_t syncpt_unit_size; size_t syncpt_unit_size;
u32 syncpt_size; u32 syncpt_size;
@@ -1479,6 +1479,8 @@ int gk20a_wait_for_idle(struct gk20a *g);
#define NVGPU_GPUID_GP10B 0x0000013B #define NVGPU_GPUID_GP10B 0x0000013B
#define NVGPU_GPUID_GP104 0x00000134 #define NVGPU_GPUID_GP104 0x00000134
#define NVGPU_GPUID_GP106 0x00000136 #define NVGPU_GPUID_GP106 0x00000136
#define NVGPU_GPUID_GV11B 0x0000015B
#define NVGPU_GPUID_GV100 0x00000140
int gk20a_init_gpu_characteristics(struct gk20a *g); int gk20a_init_gpu_characteristics(struct gk20a *g);

View File

@@ -24,10 +24,6 @@
#ifndef GR_GK20A_H #ifndef GR_GK20A_H
#define GR_GK20A_H #define GR_GK20A_H
#ifdef CONFIG_TEGRA_19x_GPU
#include "gr_t19x.h"
#endif
#include "gr_ctx_gk20a.h" #include "gr_ctx_gk20a.h"
#include "mm_gk20a.h" #include "mm_gk20a.h"
@@ -199,6 +195,12 @@ struct zbc_depth_table {
u32 ref_cnt; u32 ref_cnt;
}; };
struct zbc_s_table {
u32 stencil;
u32 format;
u32 ref_cnt;
};
struct zbc_entry { struct zbc_entry {
u32 color_ds[GK20A_ZBC_COLOR_VALUE_SIZE]; u32 color_ds[GK20A_ZBC_COLOR_VALUE_SIZE];
u32 color_l2[GK20A_ZBC_COLOR_VALUE_SIZE]; u32 color_l2[GK20A_ZBC_COLOR_VALUE_SIZE];
@@ -393,20 +395,14 @@ struct gr_gk20a {
struct nvgpu_mutex zbc_lock; struct nvgpu_mutex zbc_lock;
struct zbc_color_table zbc_col_tbl[GK20A_ZBC_TABLE_SIZE]; struct zbc_color_table zbc_col_tbl[GK20A_ZBC_TABLE_SIZE];
struct zbc_depth_table zbc_dep_tbl[GK20A_ZBC_TABLE_SIZE]; struct zbc_depth_table zbc_dep_tbl[GK20A_ZBC_TABLE_SIZE];
#ifdef CONFIG_TEGRA_19x_GPU
struct zbc_s_table zbc_s_tbl[GK20A_ZBC_TABLE_SIZE]; struct zbc_s_table zbc_s_tbl[GK20A_ZBC_TABLE_SIZE];
#endif
s32 max_default_color_index; s32 max_default_color_index;
s32 max_default_depth_index; s32 max_default_depth_index;
#ifdef CONFIG_TEGRA_19x_GPU
s32 max_default_s_index; s32 max_default_s_index;
#endif
u32 max_used_color_index; u32 max_used_color_index;
u32 max_used_depth_index; u32 max_used_depth_index;
#ifdef CONFIG_TEGRA_19x_GPU
u32 max_used_s_index; u32 max_used_s_index;
#endif
#define GR_CHANNEL_MAP_TLB_SIZE 2 /* must of power of 2 */ #define GR_CHANNEL_MAP_TLB_SIZE 2 /* must of power of 2 */
struct gr_channel_map_tlb_entry chid_tlb[GR_CHANNEL_MAP_TLB_SIZE]; struct gr_channel_map_tlb_entry chid_tlb[GR_CHANNEL_MAP_TLB_SIZE];

View File

@@ -1,7 +1,7 @@
/* /*
* NVIDIA GPU HAL interface. * NVIDIA GPU HAL interface.
* *
* Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -27,10 +27,8 @@
#include "gm20b/hal_gm20b.h" #include "gm20b/hal_gm20b.h"
#include "gp10b/hal_gp10b.h" #include "gp10b/hal_gp10b.h"
#include "gp106/hal_gp106.h" #include "gp106/hal_gp106.h"
#include "gv100/hal_gv100.h"
#ifdef CONFIG_TEGRA_19x_GPU #include "gv11b/hal_gv11b.h"
#include "nvgpu_gpuid_t19x.h"
#endif
#include <nvgpu/log.h> #include <nvgpu/log.h>
@@ -53,17 +51,15 @@ int gpu_init_hal(struct gk20a *g)
if (gp106_init_hal(g)) if (gp106_init_hal(g))
return -ENODEV; return -ENODEV;
break; break;
#ifdef CONFIG_TEGRA_19x_GPU case NVGPU_GPUID_GV11B:
case TEGRA_19x_GPUID: if (gv11b_init_hal(g))
if (TEGRA_19x_GPUID_HAL(g))
return -ENODEV; return -ENODEV;
break; break;
case BIGGPU_19x_GPUID: case NVGPU_GPUID_GV100:
if (BIGGPU_19x_GPUID_HAL(g)) if (gv100_init_hal(g))
return -ENODEV; return -ENODEV;
break; break;
#endif
default: default:
nvgpu_err(g, "no support for %x", ver); nvgpu_err(g, "no support for %x", ver);
return -ENODEV; return -ENODEV;

View File

@@ -1,7 +1,7 @@
/* /*
* GK20A PMU (aka. gPMU outside gk20a context) * GK20A PMU (aka. gPMU outside gk20a context)
* *
* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -39,10 +39,6 @@
#include <nvgpu/hw/gk20a/hw_pwr_gk20a.h> #include <nvgpu/hw/gk20a/hw_pwr_gk20a.h>
#include <nvgpu/hw/gk20a/hw_top_gk20a.h> #include <nvgpu/hw/gk20a/hw_top_gk20a.h>
#ifdef CONFIG_TEGRA_19x_GPU
#include "nvgpu_gpuid_t19x.h"
#endif
#define gk20a_dbg_pmu(fmt, arg...) \ #define gk20a_dbg_pmu(fmt, arg...) \
gk20a_dbg(gpu_dbg_pmu, fmt, ##arg) gk20a_dbg(gpu_dbg_pmu, fmt, ##arg)

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -28,9 +28,6 @@
#include "gr_gk20a.h" #include "gr_gk20a.h"
#ifdef CONFIG_TEGRA_19x_GPU
#include "tsg_t19x.h"
#endif
#define NVGPU_INVALID_TSG_ID (-1) #define NVGPU_INVALID_TSG_ID (-1)
struct channel_gk20a; struct channel_gk20a;
@@ -68,9 +65,9 @@ struct tsg_gk20a {
u32 runlist_id; u32 runlist_id;
pid_t tgid; pid_t tgid;
struct nvgpu_mem *eng_method_buffers; struct nvgpu_mem *eng_method_buffers;
#ifdef CONFIG_TEGRA_19x_GPU u32 num_active_tpcs;
struct tsg_t19x t19x; u8 tpc_pg_enabled;
#endif bool tpc_num_initialized;
struct nvgpu_gr_ctx gr_ctx; struct nvgpu_gr_ctx gr_ctx;
}; };

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -35,17 +35,13 @@
#include "gm20b/acr_gm20b.h" #include "gm20b/acr_gm20b.h"
#include "gp106/acr_gp106.h" #include "gp106/acr_gp106.h"
#include "gp106/pmu_gp106.h" #include "gp106/pmu_gp106.h"
#include "gv100/acr_gv100.h"
#include "sec2_gp106.h" #include "sec2_gp106.h"
#include <nvgpu/hw/gp106/hw_psec_gp106.h> #include <nvgpu/hw/gp106/hw_psec_gp106.h>
#include <nvgpu/hw/gp106/hw_pwr_gp106.h> #include <nvgpu/hw/gp106/hw_pwr_gp106.h>
#ifdef CONFIG_TEGRA_19x_GPU
#include "nvgpu_gpuid_t19x.h"
#include "acr_t19x.h"
#endif
/*Defines*/ /*Defines*/
#define gp106_dbg_pmu(fmt, arg...) \ #define gp106_dbg_pmu(fmt, arg...) \
gk20a_dbg(gpu_dbg_pmu, fmt, ##arg) gk20a_dbg(gpu_dbg_pmu, fmt, ##arg)
@@ -200,17 +196,15 @@ int fecs_ucode_details(struct gk20a *g, struct flcn_ucode_img_v1 *p_img)
GP106_FECS_UCODE_SIG, GP106_FECS_UCODE_SIG,
NVGPU_REQUEST_FIRMWARE_NO_SOC); NVGPU_REQUEST_FIRMWARE_NO_SOC);
break; break;
#if defined(CONFIG_TEGRA_19x_GPU) case NVGPU_GPUID_GV11B:
case TEGRA_19x_GPUID:
fecs_sig = nvgpu_request_firmware(g, fecs_sig = nvgpu_request_firmware(g,
GM20B_FECS_UCODE_SIG, 0); GM20B_FECS_UCODE_SIG, 0);
break; break;
case BIGGPU_19x_GPUID: case NVGPU_GPUID_GV100:
fecs_sig = nvgpu_request_firmware(g, fecs_sig = nvgpu_request_firmware(g,
BIGGPU_FECS_UCODE_SIG, GV100_FECS_UCODE_SIG,
NVGPU_REQUEST_FIRMWARE_NO_SOC); NVGPU_REQUEST_FIRMWARE_NO_SOC);
break; break;
#endif
default: default:
nvgpu_err(g, "no support for GPUID %x", ver); nvgpu_err(g, "no support for GPUID %x", ver);
} }
@@ -297,17 +291,15 @@ int gpccs_ucode_details(struct gk20a *g, struct flcn_ucode_img_v1 *p_img)
GP106_GPCCS_UCODE_SIG, GP106_GPCCS_UCODE_SIG,
NVGPU_REQUEST_FIRMWARE_NO_SOC); NVGPU_REQUEST_FIRMWARE_NO_SOC);
break; break;
#if defined(CONFIG_TEGRA_19x_GPU) case NVGPU_GPUID_GV11B:
case TEGRA_19x_GPUID:
gpccs_sig = nvgpu_request_firmware(g, gpccs_sig = nvgpu_request_firmware(g,
T18x_GPCCS_UCODE_SIG, 0); T18x_GPCCS_UCODE_SIG, 0);
break; break;
case BIGGPU_19x_GPUID: case NVGPU_GPUID_GV100:
gpccs_sig = nvgpu_request_firmware(g, gpccs_sig = nvgpu_request_firmware(g,
BIGGPU_GPCCS_UCODE_SIG, GV100_GPCCS_UCODE_SIG,
NVGPU_REQUEST_FIRMWARE_NO_SOC); NVGPU_REQUEST_FIRMWARE_NO_SOC);
break; break;
#endif
default: default:
nvgpu_err(g, "no support for GPUID %x", ver); nvgpu_err(g, "no support for GPUID %x", ver);
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -20,10 +20,10 @@
* DEALINGS IN THE SOFTWARE. * DEALINGS IN THE SOFTWARE.
*/ */
#ifndef _NVGPU_ACR_T19X_H_ #ifndef _NVGPU_ACR_GV100_H_
#define _NVGPU_ACR_T19X_H_ #define _NVGPU_ACR_GV100_H_
#define BIGGPU_FECS_UCODE_SIG "gv100/fecs_sig.bin" #define GV100_FECS_UCODE_SIG "gv100/fecs_sig.bin"
#define BIGGPU_GPCCS_UCODE_SIG "gv100/gpccs_sig.bin" #define GV100_GPCCS_UCODE_SIG "gv100/gpccs_sig.bin"
#endif #endif

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -23,7 +23,6 @@
#define __GR_CTX_GV100_H__ #define __GR_CTX_GV100_H__
#include "gk20a/gr_ctx_gk20a.h" #include "gk20a/gr_ctx_gk20a.h"
#include "nvgpu_gpuid_t19x.h"
/* production netlist, one and only one from below */ /* production netlist, one and only one from below */
#define GV100_NETLIST_IMAGE_FW_NAME GK20A_NETLIST_IMAGE_D #define GV100_NETLIST_IMAGE_FW_NAME GK20A_NETLIST_IMAGE_D

View File

@@ -323,7 +323,7 @@ u32 gr_gv100_get_patch_slots(struct gk20a *g)
/* /*
* We need this for all subcontexts * We need this for all subcontexts
*/ */
size *= f->t19x.max_subctx_count; size *= f->max_subctx_count;
/* /*
* Add space for a partition mode change as well * Add space for a partition mode change as well

View File

@@ -79,6 +79,7 @@
#include "gv11b/dbg_gpu_gv11b.h" #include "gv11b/dbg_gpu_gv11b.h"
#include "gv11b/hal_gv11b.h" #include "gv11b/hal_gv11b.h"
#include "gv100/gr_gv100.h" #include "gv100/gr_gv100.h"
#include "gv11b/gr_gv11b.h"
#include "gv11b/mc_gv11b.h" #include "gv11b/mc_gv11b.h"
#include "gv11b/ltc_gv11b.h" #include "gv11b/ltc_gv11b.h"
#include "gv11b/gv11b.h" #include "gv11b/gv11b.h"
@@ -106,7 +107,6 @@
#include <nvgpu/bus.h> #include <nvgpu/bus.h>
#include <nvgpu/debug.h> #include <nvgpu/debug.h>
#include <nvgpu/enabled.h> #include <nvgpu/enabled.h>
#include <nvgpu/enabled_t19x.h>
#include <nvgpu/ctxsw_trace.h> #include <nvgpu/ctxsw_trace.h>
#include <nvgpu/hw/gv100/hw_proj_gv100.h> #include <nvgpu/hw/gv100/hw_proj_gv100.h>

View File

@@ -1,7 +1,7 @@
/* /*
* GV100 Tegra HAL interface * GV100 Tegra HAL interface
* *
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -22,9 +22,11 @@
* DEALINGS IN THE SOFTWARE. * DEALINGS IN THE SOFTWARE.
*/ */
#ifndef _NVGPU_HAL_GV11B_H #ifndef _NVGPU_HAL_GV100_H
#define _NVGPU_HAL_GV11B_H #define _NVGPU_HAL_GV100_H
struct gk20a; struct gk20a;
int gv100_init_hal(struct gk20a *gops); int gv100_init_hal(struct gk20a *gops);
#endif #endif

View File

@@ -1,66 +0,0 @@
/*
* GV11B GPU ECC
*
* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NVGPU_ECC_GV11B_H_
#define _NVGPU_ECC_GV11B_H_
struct ecc_gr_t19x {
struct gk20a_ecc_stat sm_l1_tag_corrected_err_count;
struct gk20a_ecc_stat sm_l1_tag_uncorrected_err_count;
struct gk20a_ecc_stat sm_cbu_corrected_err_count;
struct gk20a_ecc_stat sm_cbu_uncorrected_err_count;
struct gk20a_ecc_stat sm_l1_data_corrected_err_count;
struct gk20a_ecc_stat sm_l1_data_uncorrected_err_count;
struct gk20a_ecc_stat sm_icache_corrected_err_count;
struct gk20a_ecc_stat sm_icache_uncorrected_err_count;
struct gk20a_ecc_stat gcc_l15_corrected_err_count;
struct gk20a_ecc_stat gcc_l15_uncorrected_err_count;
struct gk20a_ecc_stat fecs_corrected_err_count;
struct gk20a_ecc_stat fecs_uncorrected_err_count;
struct gk20a_ecc_stat gpccs_corrected_err_count;
struct gk20a_ecc_stat gpccs_uncorrected_err_count;
struct gk20a_ecc_stat mmu_l1tlb_corrected_err_count;
struct gk20a_ecc_stat mmu_l1tlb_uncorrected_err_count;
};
struct ecc_ltc_t19x {
struct gk20a_ecc_stat l2_cache_corrected_err_count;
struct gk20a_ecc_stat l2_cache_uncorrected_err_count;
};
/* TODO: PMU and FB ECC features are still under embargo */
struct ecc_eng_t19x {
/* FB */
struct gk20a_ecc_stat mmu_l2tlb_corrected_err_count;
struct gk20a_ecc_stat mmu_l2tlb_uncorrected_err_count;
struct gk20a_ecc_stat mmu_hubtlb_corrected_err_count;
struct gk20a_ecc_stat mmu_hubtlb_uncorrected_err_count;
struct gk20a_ecc_stat mmu_fillunit_corrected_err_count;
struct gk20a_ecc_stat mmu_fillunit_uncorrected_err_count;
/* PMU */
struct gk20a_ecc_stat pmu_corrected_err_count;
struct gk20a_ecc_stat pmu_uncorrected_err_count;
};
#endif

View File

@@ -1,7 +1,7 @@
/* /*
* GV11B FB * GV11B FB
* *
* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -568,9 +568,9 @@ static void gv11b_handle_l2tlb_ecc_isr(struct gk20a *g, u32 ecc_status)
uncorrected_delta += (0x1UL << fb_mmu_l2tlb_ecc_uncorrected_err_count_total_s()); uncorrected_delta += (0x1UL << fb_mmu_l2tlb_ecc_uncorrected_err_count_total_s());
g->ecc.eng.t19x.mmu_l2tlb_corrected_err_count.counters[0] += g->ecc.fb.mmu_l2tlb_corrected_err_count.counters[0] +=
corrected_delta; corrected_delta;
g->ecc.eng.t19x.mmu_l2tlb_uncorrected_err_count.counters[0] += g->ecc.fb.mmu_l2tlb_uncorrected_err_count.counters[0] +=
uncorrected_delta; uncorrected_delta;
if (ecc_status & fb_mmu_l2tlb_ecc_status_corrected_err_l2tlb_sa_data_m()) if (ecc_status & fb_mmu_l2tlb_ecc_status_corrected_err_l2tlb_sa_data_m())
@@ -584,8 +584,8 @@ static void gv11b_handle_l2tlb_ecc_isr(struct gk20a *g, u32 ecc_status)
"ecc error address: 0x%x", ecc_addr); "ecc error address: 0x%x", ecc_addr);
nvgpu_log(g, gpu_dbg_intr, nvgpu_log(g, gpu_dbg_intr,
"ecc error count corrected: %d, uncorrected %d", "ecc error count corrected: %d, uncorrected %d",
g->ecc.eng.t19x.mmu_l2tlb_corrected_err_count.counters[0], g->ecc.fb.mmu_l2tlb_corrected_err_count.counters[0],
g->ecc.eng.t19x.mmu_l2tlb_uncorrected_err_count.counters[0]); g->ecc.fb.mmu_l2tlb_uncorrected_err_count.counters[0]);
} }
static void gv11b_handle_hubtlb_ecc_isr(struct gk20a *g, u32 ecc_status) static void gv11b_handle_hubtlb_ecc_isr(struct gk20a *g, u32 ecc_status)
@@ -626,9 +626,9 @@ static void gv11b_handle_hubtlb_ecc_isr(struct gk20a *g, u32 ecc_status)
uncorrected_delta += (0x1UL << fb_mmu_hubtlb_ecc_uncorrected_err_count_total_s()); uncorrected_delta += (0x1UL << fb_mmu_hubtlb_ecc_uncorrected_err_count_total_s());
g->ecc.eng.t19x.mmu_hubtlb_corrected_err_count.counters[0] += g->ecc.fb.mmu_hubtlb_corrected_err_count.counters[0] +=
corrected_delta; corrected_delta;
g->ecc.eng.t19x.mmu_hubtlb_uncorrected_err_count.counters[0] += g->ecc.fb.mmu_hubtlb_uncorrected_err_count.counters[0] +=
uncorrected_delta; uncorrected_delta;
if (ecc_status & fb_mmu_hubtlb_ecc_status_corrected_err_sa_data_m()) if (ecc_status & fb_mmu_hubtlb_ecc_status_corrected_err_sa_data_m())
@@ -642,8 +642,8 @@ static void gv11b_handle_hubtlb_ecc_isr(struct gk20a *g, u32 ecc_status)
"ecc error address: 0x%x", ecc_addr); "ecc error address: 0x%x", ecc_addr);
nvgpu_log(g, gpu_dbg_intr, nvgpu_log(g, gpu_dbg_intr,
"ecc error count corrected: %d, uncorrected %d", "ecc error count corrected: %d, uncorrected %d",
g->ecc.eng.t19x.mmu_hubtlb_corrected_err_count.counters[0], g->ecc.fb.mmu_hubtlb_corrected_err_count.counters[0],
g->ecc.eng.t19x.mmu_hubtlb_uncorrected_err_count.counters[0]); g->ecc.fb.mmu_hubtlb_uncorrected_err_count.counters[0]);
} }
static void gv11b_handle_fillunit_ecc_isr(struct gk20a *g, u32 ecc_status) static void gv11b_handle_fillunit_ecc_isr(struct gk20a *g, u32 ecc_status)
@@ -684,9 +684,9 @@ static void gv11b_handle_fillunit_ecc_isr(struct gk20a *g, u32 ecc_status)
uncorrected_delta += (0x1UL << fb_mmu_fillunit_ecc_uncorrected_err_count_total_s()); uncorrected_delta += (0x1UL << fb_mmu_fillunit_ecc_uncorrected_err_count_total_s());
g->ecc.eng.t19x.mmu_fillunit_corrected_err_count.counters[0] += g->ecc.fb.mmu_fillunit_corrected_err_count.counters[0] +=
corrected_delta; corrected_delta;
g->ecc.eng.t19x.mmu_fillunit_uncorrected_err_count.counters[0] += g->ecc.fb.mmu_fillunit_uncorrected_err_count.counters[0] +=
uncorrected_delta; uncorrected_delta;
if (ecc_status & fb_mmu_fillunit_ecc_status_corrected_err_pte_data_m()) if (ecc_status & fb_mmu_fillunit_ecc_status_corrected_err_pte_data_m())
@@ -705,8 +705,8 @@ static void gv11b_handle_fillunit_ecc_isr(struct gk20a *g, u32 ecc_status)
"ecc error address: 0x%x", ecc_addr); "ecc error address: 0x%x", ecc_addr);
nvgpu_log(g, gpu_dbg_intr, nvgpu_log(g, gpu_dbg_intr,
"ecc error count corrected: %d, uncorrected %d", "ecc error count corrected: %d, uncorrected %d",
g->ecc.eng.t19x.mmu_fillunit_corrected_err_count.counters[0], g->ecc.fb.mmu_fillunit_corrected_err_count.counters[0],
g->ecc.eng.t19x.mmu_fillunit_uncorrected_err_count.counters[0]); g->ecc.fb.mmu_fillunit_uncorrected_err_count.counters[0]);
} }
static void gv11b_fb_parse_mmfault(struct mmu_fault_info *mmfault) static void gv11b_fb_parse_mmfault(struct mmu_fault_info *mmfault)

View File

@@ -30,10 +30,11 @@
#include <nvgpu/gmmu.h> #include <nvgpu/gmmu.h>
#include <nvgpu/soc.h> #include <nvgpu/soc.h>
#include <nvgpu/debug.h> #include <nvgpu/debug.h>
#include <nvgpu/nvhost_t19x.h> #include <nvgpu/nvhost.h>
#include <nvgpu/barrier.h> #include <nvgpu/barrier.h>
#include <nvgpu/mm.h> #include <nvgpu/mm.h>
#include <nvgpu/ctxsw_trace.h> #include <nvgpu/ctxsw_trace.h>
#include <nvgpu/io_usermode.h>
#include "gk20a/gk20a.h" #include "gk20a/gk20a.h"
#include "gk20a/fifo_gk20a.h" #include "gk20a/fifo_gk20a.h"
@@ -96,7 +97,7 @@ void gv11b_get_ch_runlist_entry(struct channel_gk20a *c, u32 *runlist)
/* Time being use 0 pbdma sequencer */ /* Time being use 0 pbdma sequencer */
runlist_entry = ram_rl_entry_type_channel_v() | runlist_entry = ram_rl_entry_type_channel_v() |
ram_rl_entry_chan_runqueue_selector_f( ram_rl_entry_chan_runqueue_selector_f(
c->t19x.runqueue_sel) | c->runqueue_sel) |
ram_rl_entry_chan_userd_target_f( ram_rl_entry_chan_userd_target_f(
nvgpu_aperture_mask(g, &g->fifo.userd, nvgpu_aperture_mask(g, &g->fifo.userd,
ram_rl_entry_chan_userd_target_sys_mem_ncoh_v(), ram_rl_entry_chan_userd_target_sys_mem_ncoh_v(),
@@ -185,7 +186,7 @@ int channel_gv11b_setup_ramfc(struct channel_gk20a *c,
nvgpu_mem_wr32(g, mem, ram_fc_chid_w(), ram_fc_chid_id_f(c->chid)); nvgpu_mem_wr32(g, mem, ram_fc_chid_w(), ram_fc_chid_id_f(c->chid));
nvgpu_mem_wr32(g, mem, ram_fc_set_channel_info_w(), nvgpu_mem_wr32(g, mem, ram_fc_set_channel_info_w(),
pbdma_set_channel_info_veid_f(c->t19x.subctx_id)); pbdma_set_channel_info_veid_f(c->subctx_id));
gv11b_fifo_init_ramfc_eng_method_buffer(g, c, mem); gv11b_fifo_init_ramfc_eng_method_buffer(g, c, mem);
@@ -215,7 +216,7 @@ static void gv11b_ring_channel_doorbell(struct channel_gk20a *c)
gk20a_dbg_info("channel ring door bell %d\n", c->chid); gk20a_dbg_info("channel ring door bell %d\n", c->chid);
gv11b_usermode_writel(c->g, usermode_notify_channel_pending_r(), nvgpu_usermode_writel(c->g, usermode_notify_channel_pending_r(),
usermode_notify_channel_pending_id_f(hw_chid)); usermode_notify_channel_pending_id_f(hw_chid));
} }
@@ -1782,8 +1783,7 @@ int gv11b_init_fifo_setup_hw(struct gk20a *g)
{ {
struct fifo_gk20a *f = &g->fifo; struct fifo_gk20a *f = &g->fifo;
f->t19x.max_subctx_count = f->max_subctx_count = gr_pri_fe_chip_def_info_max_veid_count_init_v();
gr_pri_fe_chip_def_info_max_veid_count_init_v();
return 0; return 0;
} }
@@ -1794,7 +1794,7 @@ static u32 gv11b_mmu_fault_id_to_gr_veid(struct gk20a *g, u32 gr_eng_fault_id,
u32 num_subctx; u32 num_subctx;
u32 veid = FIFO_INVAL_VEID; u32 veid = FIFO_INVAL_VEID;
num_subctx = f->t19x.max_subctx_count; num_subctx = f->max_subctx_count;
if (mmu_fault_id >= gr_eng_fault_id && if (mmu_fault_id >= gr_eng_fault_id &&
mmu_fault_id < (gr_eng_fault_id + num_subctx)) mmu_fault_id < (gr_eng_fault_id + num_subctx))

View File

@@ -189,7 +189,7 @@ static int gr_gv11b_handle_l1_tag_exception(struct gk20a *g, u32 gpc, u32 tpc,
l1_tag_corrected_err_count_delta += l1_tag_corrected_err_count_delta +=
(is_l1_tag_ecc_corrected_total_err_overflow << (is_l1_tag_ecc_corrected_total_err_overflow <<
gr_pri_gpc0_tpc0_sm_l1_tag_ecc_corrected_err_count_total_s()); gr_pri_gpc0_tpc0_sm_l1_tag_ecc_corrected_err_count_total_s());
g->ecc.gr.t19x.sm_l1_tag_corrected_err_count.counters[tpc] += g->ecc.gr.sm_l1_tag_corrected_err_count.counters[tpc] +=
l1_tag_corrected_err_count_delta; l1_tag_corrected_err_count_delta;
gk20a_writel(g, gk20a_writel(g,
gr_pri_gpc0_tpc0_sm_l1_tag_ecc_corrected_err_count_r() + offset, gr_pri_gpc0_tpc0_sm_l1_tag_ecc_corrected_err_count_r() + offset,
@@ -204,7 +204,7 @@ static int gr_gv11b_handle_l1_tag_exception(struct gk20a *g, u32 gpc, u32 tpc,
l1_tag_uncorrected_err_count_delta += l1_tag_uncorrected_err_count_delta +=
(is_l1_tag_ecc_uncorrected_total_err_overflow << (is_l1_tag_ecc_uncorrected_total_err_overflow <<
gr_pri_gpc0_tpc0_sm_l1_tag_ecc_uncorrected_err_count_total_s()); gr_pri_gpc0_tpc0_sm_l1_tag_ecc_uncorrected_err_count_total_s());
g->ecc.gr.t19x.sm_l1_tag_uncorrected_err_count.counters[tpc] += g->ecc.gr.sm_l1_tag_uncorrected_err_count.counters[tpc] +=
l1_tag_uncorrected_err_count_delta; l1_tag_uncorrected_err_count_delta;
gk20a_writel(g, gk20a_writel(g,
gr_pri_gpc0_tpc0_sm_l1_tag_ecc_uncorrected_err_count_r() + offset, gr_pri_gpc0_tpc0_sm_l1_tag_ecc_uncorrected_err_count_r() + offset,
@@ -399,7 +399,7 @@ static int gr_gv11b_handle_cbu_exception(struct gk20a *g, u32 gpc, u32 tpc,
cbu_corrected_err_count_delta += cbu_corrected_err_count_delta +=
(is_cbu_ecc_corrected_total_err_overflow << (is_cbu_ecc_corrected_total_err_overflow <<
gr_pri_gpc0_tpc0_sm_cbu_ecc_corrected_err_count_total_s()); gr_pri_gpc0_tpc0_sm_cbu_ecc_corrected_err_count_total_s());
g->ecc.gr.t19x.sm_cbu_corrected_err_count.counters[tpc] += g->ecc.gr.sm_cbu_corrected_err_count.counters[tpc] +=
cbu_corrected_err_count_delta; cbu_corrected_err_count_delta;
gk20a_writel(g, gk20a_writel(g,
gr_pri_gpc0_tpc0_sm_cbu_ecc_corrected_err_count_r() + offset, gr_pri_gpc0_tpc0_sm_cbu_ecc_corrected_err_count_r() + offset,
@@ -414,7 +414,7 @@ static int gr_gv11b_handle_cbu_exception(struct gk20a *g, u32 gpc, u32 tpc,
cbu_uncorrected_err_count_delta += cbu_uncorrected_err_count_delta +=
(is_cbu_ecc_uncorrected_total_err_overflow << (is_cbu_ecc_uncorrected_total_err_overflow <<
gr_pri_gpc0_tpc0_sm_cbu_ecc_uncorrected_err_count_total_s()); gr_pri_gpc0_tpc0_sm_cbu_ecc_uncorrected_err_count_total_s());
g->ecc.gr.t19x.sm_cbu_uncorrected_err_count.counters[tpc] += g->ecc.gr.sm_cbu_uncorrected_err_count.counters[tpc] +=
cbu_uncorrected_err_count_delta; cbu_uncorrected_err_count_delta;
gk20a_writel(g, gk20a_writel(g,
gr_pri_gpc0_tpc0_sm_cbu_ecc_uncorrected_err_count_r() + offset, gr_pri_gpc0_tpc0_sm_cbu_ecc_uncorrected_err_count_r() + offset,
@@ -479,7 +479,7 @@ static int gr_gv11b_handle_l1_data_exception(struct gk20a *g, u32 gpc, u32 tpc,
l1_data_corrected_err_count_delta += l1_data_corrected_err_count_delta +=
(is_l1_data_ecc_corrected_total_err_overflow << (is_l1_data_ecc_corrected_total_err_overflow <<
gr_pri_gpc0_tpc0_sm_l1_data_ecc_corrected_err_count_total_s()); gr_pri_gpc0_tpc0_sm_l1_data_ecc_corrected_err_count_total_s());
g->ecc.gr.t19x.sm_l1_data_corrected_err_count.counters[tpc] += g->ecc.gr.sm_l1_data_corrected_err_count.counters[tpc] +=
l1_data_corrected_err_count_delta; l1_data_corrected_err_count_delta;
gk20a_writel(g, gk20a_writel(g,
gr_pri_gpc0_tpc0_sm_l1_data_ecc_corrected_err_count_r() + offset, gr_pri_gpc0_tpc0_sm_l1_data_ecc_corrected_err_count_r() + offset,
@@ -494,7 +494,7 @@ static int gr_gv11b_handle_l1_data_exception(struct gk20a *g, u32 gpc, u32 tpc,
l1_data_uncorrected_err_count_delta += l1_data_uncorrected_err_count_delta +=
(is_l1_data_ecc_uncorrected_total_err_overflow << (is_l1_data_ecc_uncorrected_total_err_overflow <<
gr_pri_gpc0_tpc0_sm_l1_data_ecc_uncorrected_err_count_total_s()); gr_pri_gpc0_tpc0_sm_l1_data_ecc_uncorrected_err_count_total_s());
g->ecc.gr.t19x.sm_l1_data_uncorrected_err_count.counters[tpc] += g->ecc.gr.sm_l1_data_uncorrected_err_count.counters[tpc] +=
l1_data_uncorrected_err_count_delta; l1_data_uncorrected_err_count_delta;
gk20a_writel(g, gk20a_writel(g,
gr_pri_gpc0_tpc0_sm_l1_data_ecc_uncorrected_err_count_r() + offset, gr_pri_gpc0_tpc0_sm_l1_data_ecc_uncorrected_err_count_r() + offset,
@@ -563,7 +563,7 @@ static int gr_gv11b_handle_icache_exception(struct gk20a *g, u32 gpc, u32 tpc,
icache_corrected_err_count_delta += icache_corrected_err_count_delta +=
(is_icache_ecc_corrected_total_err_overflow << (is_icache_ecc_corrected_total_err_overflow <<
gr_pri_gpc0_tpc0_sm_icache_ecc_corrected_err_count_total_s()); gr_pri_gpc0_tpc0_sm_icache_ecc_corrected_err_count_total_s());
g->ecc.gr.t19x.sm_icache_corrected_err_count.counters[tpc] += g->ecc.gr.sm_icache_corrected_err_count.counters[tpc] +=
icache_corrected_err_count_delta; icache_corrected_err_count_delta;
gk20a_writel(g, gk20a_writel(g,
gr_pri_gpc0_tpc0_sm_icache_ecc_corrected_err_count_r() + offset, gr_pri_gpc0_tpc0_sm_icache_ecc_corrected_err_count_r() + offset,
@@ -578,7 +578,7 @@ static int gr_gv11b_handle_icache_exception(struct gk20a *g, u32 gpc, u32 tpc,
icache_uncorrected_err_count_delta += icache_uncorrected_err_count_delta +=
(is_icache_ecc_uncorrected_total_err_overflow << (is_icache_ecc_uncorrected_total_err_overflow <<
gr_pri_gpc0_tpc0_sm_icache_ecc_uncorrected_err_count_total_s()); gr_pri_gpc0_tpc0_sm_icache_ecc_uncorrected_err_count_total_s());
g->ecc.gr.t19x.sm_icache_uncorrected_err_count.counters[tpc] += g->ecc.gr.sm_icache_uncorrected_err_count.counters[tpc] +=
icache_uncorrected_err_count_delta; icache_uncorrected_err_count_delta;
gk20a_writel(g, gk20a_writel(g,
gr_pri_gpc0_tpc0_sm_icache_ecc_uncorrected_err_count_r() + offset, gr_pri_gpc0_tpc0_sm_icache_ecc_uncorrected_err_count_r() + offset,
@@ -667,7 +667,7 @@ int gr_gv11b_handle_gcc_exception(struct gk20a *g, u32 gpc, u32 tpc,
gcc_l15_corrected_err_count_delta += gcc_l15_corrected_err_count_delta +=
(is_gcc_l15_ecc_corrected_total_err_overflow << (is_gcc_l15_ecc_corrected_total_err_overflow <<
gr_pri_gpc0_gcc_l15_ecc_corrected_err_count_total_s()); gr_pri_gpc0_gcc_l15_ecc_corrected_err_count_total_s());
g->ecc.gr.t19x.gcc_l15_corrected_err_count.counters[gpc] += g->ecc.gr.gcc_l15_corrected_err_count.counters[gpc] +=
gcc_l15_corrected_err_count_delta; gcc_l15_corrected_err_count_delta;
gk20a_writel(g, gk20a_writel(g,
gr_pri_gpc0_gcc_l15_ecc_corrected_err_count_r() + offset, gr_pri_gpc0_gcc_l15_ecc_corrected_err_count_r() + offset,
@@ -682,7 +682,7 @@ int gr_gv11b_handle_gcc_exception(struct gk20a *g, u32 gpc, u32 tpc,
gcc_l15_uncorrected_err_count_delta += gcc_l15_uncorrected_err_count_delta +=
(is_gcc_l15_ecc_uncorrected_total_err_overflow << (is_gcc_l15_ecc_uncorrected_total_err_overflow <<
gr_pri_gpc0_gcc_l15_ecc_uncorrected_err_count_total_s()); gr_pri_gpc0_gcc_l15_ecc_uncorrected_err_count_total_s());
g->ecc.gr.t19x.gcc_l15_uncorrected_err_count.counters[gpc] += g->ecc.gr.gcc_l15_uncorrected_err_count.counters[gpc] +=
gcc_l15_uncorrected_err_count_delta; gcc_l15_uncorrected_err_count_delta;
gk20a_writel(g, gk20a_writel(g,
gr_pri_gpc0_gcc_l15_ecc_uncorrected_err_count_r() + offset, gr_pri_gpc0_gcc_l15_ecc_uncorrected_err_count_r() + offset,
@@ -752,9 +752,9 @@ static int gr_gv11b_handle_gpcmmu_ecc_exception(struct gk20a *g, u32 gpc,
uncorrected_delta += (0x1UL << gr_gpc0_mmu_l1tlb_ecc_uncorrected_err_count_total_s()); uncorrected_delta += (0x1UL << gr_gpc0_mmu_l1tlb_ecc_uncorrected_err_count_total_s());
g->ecc.gr.t19x.mmu_l1tlb_corrected_err_count.counters[gpc] += g->ecc.gr.mmu_l1tlb_corrected_err_count.counters[gpc] +=
corrected_delta; corrected_delta;
g->ecc.gr.t19x.mmu_l1tlb_uncorrected_err_count.counters[gpc] += g->ecc.gr.mmu_l1tlb_uncorrected_err_count.counters[gpc] +=
uncorrected_delta; uncorrected_delta;
nvgpu_log(g, gpu_dbg_intr, nvgpu_log(g, gpu_dbg_intr,
"mmu l1tlb gpc:%d ecc interrupt intr: 0x%x", gpc, hww_esr); "mmu l1tlb gpc:%d ecc interrupt intr: 0x%x", gpc, hww_esr);
@@ -774,8 +774,8 @@ static int gr_gv11b_handle_gpcmmu_ecc_exception(struct gk20a *g, u32 gpc,
"ecc error address: 0x%x", ecc_addr); "ecc error address: 0x%x", ecc_addr);
nvgpu_log(g, gpu_dbg_intr, nvgpu_log(g, gpu_dbg_intr,
"ecc error count corrected: %d, uncorrected %d", "ecc error count corrected: %d, uncorrected %d",
g->ecc.gr.t19x.mmu_l1tlb_corrected_err_count.counters[gpc], g->ecc.gr.mmu_l1tlb_corrected_err_count.counters[gpc],
g->ecc.gr.t19x.mmu_l1tlb_uncorrected_err_count.counters[gpc]); g->ecc.gr.mmu_l1tlb_uncorrected_err_count.counters[gpc]);
return ret; return ret;
} }
@@ -830,9 +830,9 @@ static int gr_gv11b_handle_gpccs_ecc_exception(struct gk20a *g, u32 gpc,
gk20a_writel(g, gr_gpc0_gpccs_falcon_ecc_status_r() + offset, gk20a_writel(g, gr_gpc0_gpccs_falcon_ecc_status_r() + offset,
gr_gpc0_gpccs_falcon_ecc_status_reset_task_f()); gr_gpc0_gpccs_falcon_ecc_status_reset_task_f());
g->ecc.gr.t19x.gpccs_corrected_err_count.counters[gpc] += g->ecc.gr.gpccs_corrected_err_count.counters[gpc] +=
corrected_delta; corrected_delta;
g->ecc.gr.t19x.gpccs_uncorrected_err_count.counters[gpc] += g->ecc.gr.gpccs_uncorrected_err_count.counters[gpc] +=
uncorrected_delta; uncorrected_delta;
nvgpu_log(g, gpu_dbg_intr, nvgpu_log(g, gpu_dbg_intr,
"gppcs gpc:%d ecc interrupt intr: 0x%x", gpc, hww_esr); "gppcs gpc:%d ecc interrupt intr: 0x%x", gpc, hww_esr);
@@ -857,8 +857,8 @@ static int gr_gv11b_handle_gpccs_ecc_exception(struct gk20a *g, u32 gpc,
nvgpu_log(g, gpu_dbg_intr, nvgpu_log(g, gpu_dbg_intr,
"ecc error count corrected: %d, uncorrected %d", "ecc error count corrected: %d, uncorrected %d",
g->ecc.gr.t19x.gpccs_corrected_err_count.counters[gpc], g->ecc.gr.gpccs_corrected_err_count.counters[gpc],
g->ecc.gr.t19x.gpccs_uncorrected_err_count.counters[gpc]); g->ecc.gr.gpccs_uncorrected_err_count.counters[gpc]);
return ret; return ret;
} }
@@ -2206,9 +2206,9 @@ static void gr_gv11b_handle_fecs_ecc_error(struct gk20a *g, u32 intr)
gk20a_writel(g, gr_fecs_falcon_ecc_status_r(), gk20a_writel(g, gr_fecs_falcon_ecc_status_r(),
gr_fecs_falcon_ecc_status_reset_task_f()); gr_fecs_falcon_ecc_status_reset_task_f());
g->ecc.gr.t19x.fecs_corrected_err_count.counters[0] += g->ecc.gr.fecs_corrected_err_count.counters[0] +=
corrected_delta; corrected_delta;
g->ecc.gr.t19x.fecs_uncorrected_err_count.counters[0] += g->ecc.gr.fecs_uncorrected_err_count.counters[0] +=
uncorrected_delta; uncorrected_delta;
nvgpu_log(g, gpu_dbg_intr, nvgpu_log(g, gpu_dbg_intr,
@@ -2237,8 +2237,8 @@ static void gr_gv11b_handle_fecs_ecc_error(struct gk20a *g, u32 intr)
nvgpu_log(g, gpu_dbg_intr, nvgpu_log(g, gpu_dbg_intr,
"ecc error count corrected: %d, uncorrected %d", "ecc error count corrected: %d, uncorrected %d",
g->ecc.gr.t19x.fecs_corrected_err_count.counters[0], g->ecc.gr.fecs_corrected_err_count.counters[0],
g->ecc.gr.t19x.fecs_uncorrected_err_count.counters[0]); g->ecc.gr.fecs_uncorrected_err_count.counters[0]);
} }
} }
@@ -2323,7 +2323,7 @@ static int gv11b_write_bundle_veid_state(struct gk20a *g, u32 index)
u32 j; u32 j;
u32 num_subctx, err = 0; u32 num_subctx, err = 0;
num_subctx = g->fifo.t19x.max_subctx_count; num_subctx = g->fifo.max_subctx_count;
for (j = 0; j < num_subctx; j++) { for (j = 0; j < num_subctx; j++) {
nvgpu_log_fn(g, "write bundle_address_r for subctx: %d", j); nvgpu_log_fn(g, "write bundle_address_r for subctx: %d", j);

View File

@@ -34,12 +34,6 @@
#define ZBC_STENCIL_CLEAR_FMT_INVAILD 0 #define ZBC_STENCIL_CLEAR_FMT_INVAILD 0
#define ZBC_STENCIL_CLEAR_FMT_U8 1 #define ZBC_STENCIL_CLEAR_FMT_U8 1
struct zbc_s_table {
u32 stencil;
u32 format;
u32 ref_cnt;
};
struct gk20a; struct gk20a;
struct gr_gk20a; struct gr_gk20a;
struct zbc_entry; struct zbc_entry;

View File

@@ -1,7 +1,7 @@
/* /*
* GV11B Graphics * GV11B Graphics
* *
* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -23,7 +23,6 @@
*/ */
#include <nvgpu/enabled.h> #include <nvgpu/enabled.h>
#include <nvgpu/enabled_t19x.h>
#include "gk20a/gk20a.h" #include "gk20a/gk20a.h"
#include "gp10b/gp10b.h" #include "gp10b/gp10b.h"

View File

@@ -1,7 +1,7 @@
/* /*
* GV11B LTC * GV11B LTC
* *
* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -177,9 +177,9 @@ void gv11b_ltc_isr(struct gk20a *g)
} }
} }
g->ecc.ltc.t19x.l2_cache_corrected_err_count.counters[ltc] += g->ecc.ltc.l2_cache_corrected_err_count.counters[ltc] +=
ltc_corrected; ltc_corrected;
g->ecc.ltc.t19x.l2_cache_uncorrected_err_count.counters[ltc] += g->ecc.ltc.l2_cache_uncorrected_err_count.counters[ltc] +=
ltc_uncorrected; ltc_uncorrected;
} }

View File

@@ -1,7 +1,7 @@
/* /*
* GV11B MMU * GV11B MMU
* *
* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -273,7 +273,7 @@ void gv11b_mm_l2_flush(struct gk20a *g, bool invalidate)
u64 gv11b_gpu_phys_addr(struct gk20a *g, u64 gv11b_gpu_phys_addr(struct gk20a *g,
struct nvgpu_gmmu_attrs *attrs, u64 phys) struct nvgpu_gmmu_attrs *attrs, u64 phys)
{ {
if (attrs && attrs->t19x_attrs.l3_alloc) if (attrs && attrs->l3_alloc)
return phys | NVGPU_L3_ALLOC_BIT; return phys | NVGPU_L3_ALLOC_BIT;
return phys; return phys;

View File

@@ -1,7 +1,7 @@
/* /*
* GV11B PMU * GV11B PMU
* *
* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -343,8 +343,8 @@ void gv11b_pmu_handle_ext_irq(struct gk20a *g, u32 intr0)
if (uncorrected_overflow) if (uncorrected_overflow)
uncorrected_delta += (0x1UL << pwr_pmu_falcon_ecc_uncorrected_err_count_total_s()); uncorrected_delta += (0x1UL << pwr_pmu_falcon_ecc_uncorrected_err_count_total_s());
g->ecc.eng.t19x.pmu_corrected_err_count.counters[0] += corrected_delta; g->ecc.pmu.pmu_corrected_err_count.counters[0] += corrected_delta;
g->ecc.eng.t19x.pmu_uncorrected_err_count.counters[0] += uncorrected_delta; g->ecc.pmu.pmu_uncorrected_err_count.counters[0] += uncorrected_delta;
nvgpu_log(g, gpu_dbg_intr, nvgpu_log(g, gpu_dbg_intr,
"pmu ecc interrupt intr1: 0x%x", intr1); "pmu ecc interrupt intr1: 0x%x", intr1);
@@ -371,8 +371,8 @@ void gv11b_pmu_handle_ext_irq(struct gk20a *g, u32 intr0)
nvgpu_log(g, gpu_dbg_intr, nvgpu_log(g, gpu_dbg_intr,
"ecc error count corrected: %d, uncorrected %d", "ecc error count corrected: %d, uncorrected %d",
g->ecc.eng.t19x.pmu_corrected_err_count.counters[0], g->ecc.pmu.pmu_corrected_err_count.counters[0],
g->ecc.eng.t19x.pmu_uncorrected_err_count.counters[0]); g->ecc.pmu.pmu_uncorrected_err_count.counters[0]);
} }
} }
} }

View File

@@ -1,7 +1,7 @@
/* /*
* Volta GPU series Subcontext * Volta GPU series Subcontext
* *
* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -103,9 +103,9 @@ static void gv11b_init_subcontext_pdb(struct channel_gk20a *c,
gv11b_subctx_commit_pdb(c, inst_block); gv11b_subctx_commit_pdb(c, inst_block);
gv11b_subctx_commit_valid_mask(c, inst_block); gv11b_subctx_commit_valid_mask(c, inst_block);
nvgpu_log(g, gpu_dbg_info, " subctx %d instblk set", c->t19x.subctx_id); nvgpu_log(g, gpu_dbg_info, " subctx %d instblk set", c->subctx_id);
nvgpu_mem_wr32(g, inst_block, ram_in_engine_wfi_veid_w(), nvgpu_mem_wr32(g, inst_block, ram_in_engine_wfi_veid_w(),
ram_in_engine_wfi_veid_f(c->t19x.subctx_id)); ram_in_engine_wfi_veid_f(c->subctx_id));
} }
@@ -206,7 +206,7 @@ void gv11b_subctx_commit_pdb(struct channel_gk20a *c,
ram_in_sc_page_dir_base_lo_0_f(pdb_addr_lo); ram_in_sc_page_dir_base_lo_0_f(pdb_addr_lo);
nvgpu_log(g, gpu_dbg_info, " pdb info lo %x hi %x", nvgpu_log(g, gpu_dbg_info, " pdb info lo %x hi %x",
format_word, pdb_addr_hi); format_word, pdb_addr_hi);
for (subctx_id = 0; subctx_id < f->t19x.max_subctx_count; subctx_id++) { for (subctx_id = 0; subctx_id < f->max_subctx_count; subctx_id++) {
lo = ram_in_sc_page_dir_base_vol_0_w() + (4 * subctx_id); lo = ram_in_sc_page_dir_base_vol_0_w() + (4 * subctx_id);
hi = ram_in_sc_page_dir_base_hi_0_w() + (4 * subctx_id); hi = ram_in_sc_page_dir_base_hi_0_w() + (4 * subctx_id);
nvgpu_mem_wr32(g, inst_block, lo, format_word); nvgpu_mem_wr32(g, inst_block, lo, format_word);

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -142,6 +142,8 @@ struct gk20a;
/* set if ASPM is enabled; only makes sense for PCI */ /* set if ASPM is enabled; only makes sense for PCI */
#define NVGPU_SUPPORT_ASPM 62 #define NVGPU_SUPPORT_ASPM 62
/* subcontexts are available */
#define NVGPU_SUPPORT_TSG_SUBCONTEXTS 63
/* /*
* Must be greater than the largest bit offset in the above list. * Must be greater than the largest bit offset in the above list.
*/ */

View File

@@ -1,29 +0,0 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVGPU_ENABLED_T19X_H__
#define __NVGPU_ENABLED_T19X_H__
/* subcontexts are available */
#define NVGPU_SUPPORT_TSG_SUBCONTEXTS 63
#endif

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -29,10 +29,6 @@
#include <nvgpu/rbtree.h> #include <nvgpu/rbtree.h>
#include <nvgpu/lock.h> #include <nvgpu/lock.h>
#ifdef CONFIG_TEGRA_19x_GPU
#include <nvgpu/gmmu_t19x.h>
#endif
/* /*
* This is the GMMU API visible to blocks outside of the GMMU. Basically this * This is the GMMU API visible to blocks outside of the GMMU. Basically this
* API supports all the different types of mappings that might be done in the * API supports all the different types of mappings that might be done in the
@@ -180,9 +176,7 @@ struct nvgpu_gmmu_attrs {
enum nvgpu_aperture aperture; enum nvgpu_aperture aperture;
bool debug; bool debug;
#ifdef CONFIG_TEGRA_19x_GPU bool l3_alloc;
struct nvgpu_gmmu_attrs_t19x t19x_attrs;
#endif
}; };
struct gk20a_mmu_level { struct gk20a_mmu_level {

View File

@@ -1,34 +0,0 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVGPU_GMMU_T19X_H__
#define __NVGPU_GMMU_T19X_H__
struct nvgpu_gmmu_attrs;
struct nvgpu_gmmu_attrs_t19x {
bool l3_alloc;
};
void nvgpu_gmmu_add_t19x_attrs(struct nvgpu_gmmu_attrs *attrs, u32 flags);
#endif

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -23,9 +23,6 @@
#define __NVGPU_IO_H__ #define __NVGPU_IO_H__
#include <nvgpu/types.h> #include <nvgpu/types.h>
#ifdef CONFIG_TEGRA_19x_GPU
#include <nvgpu/io_t19x.h>
#endif
/* Legacy defines - should be removed once everybody uses nvgpu_* */ /* Legacy defines - should be removed once everybody uses nvgpu_* */
#define gk20a_writel nvgpu_writel #define gk20a_writel nvgpu_writel

View File

@@ -1,29 +0,0 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVGPU_IO_T19X_H__
#define __NVGPU_IO_T19X_H__
#ifdef __KERNEL__
#include "linux/io_t19x.h"
#endif
#endif

View File

@@ -1,7 +1,5 @@
/* /*
* NVIDIA T19x GR * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -21,9 +19,9 @@
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE. * DEALINGS IN THE SOFTWARE.
*/ */
#ifndef _NVGPU_GR_T19X_H_ #ifndef __NVGPU_IO_USERMODE_H__
#define _NVGPU_GR_T19X_H_ #define __NVGPU_IO_USERMODE_H__
#include "gv11b/gr_gv11b.h" void nvgpu_usermode_writel(struct gk20a *g, u32 r, u32 v);
#endif #endif

View File

@@ -1,26 +0,0 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __NVGPU_IO_T19X_LINUX_H__
#define __NVGPU_IO_T19X_LINUX_H__
#include <nvgpu/types.h>
struct gk20a;
void gv11b_usermode_writel(struct gk20a *g, u32 r, u32 v);
#endif

View File

@@ -1,26 +0,0 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef NVGPU_OS_LINUX_T19X_H
#define NVGPU_OS_LINUX_T19X_H
#include <linux/compiler.h>
struct nvgpu_os_linux_t19x {
void __iomem *usermode_regs;
void __iomem *usermode_regs_saved;
};
#endif

View File

@@ -1,4 +1,6 @@
/* /*
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation * to deal in the Software without restriction, including without limitation
@@ -126,13 +128,12 @@ struct mm_gk20a {
struct nvgpu_mem bar2_desc; struct nvgpu_mem bar2_desc;
#ifdef CONFIG_TEGRA_19x_GPU
struct nvgpu_mem hw_fault_buf[FAULT_TYPE_NUM]; struct nvgpu_mem hw_fault_buf[FAULT_TYPE_NUM];
unsigned int hw_fault_buf_status[FAULT_TYPE_NUM]; unsigned int hw_fault_buf_status[FAULT_TYPE_NUM];
struct mmu_fault_info *fault_info[FAULT_TYPE_NUM]; struct mmu_fault_info *fault_info[FAULT_TYPE_NUM];
struct nvgpu_mutex hub_isr_mutex; struct nvgpu_mutex hub_isr_mutex;
u32 hub_intr_types; u32 hub_intr_types;
#endif
/* /*
* Separate function to cleanup the CE since it requires a channel to * Separate function to cleanup the CE since it requires a channel to
* be closed which must happen before fifo cleanup. * be closed which must happen before fifo cleanup.

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -77,5 +77,19 @@ struct sync_fence *nvgpu_nvhost_sync_create_fence(
struct nvgpu_nvhost_dev *nvhost_dev, struct nvgpu_nvhost_dev *nvhost_dev,
u32 id, u32 thresh, const char *name); u32 id, u32 thresh, const char *name);
#endif /* CONFIG_SYNC */ #endif /* CONFIG_SYNC */
#ifdef CONFIG_TEGRA_T19X_GRHOST
int nvgpu_nvhost_syncpt_unit_interface_get_aperture(
struct nvgpu_nvhost_dev *nvhost_dev,
u64 *base, size_t *size);
u32 nvgpu_nvhost_syncpt_unit_interface_get_byte_offset(u32 syncpt_id);
#else
static inline int nvgpu_nvhost_syncpt_unit_interface_get_aperture(
struct nvgpu_nvhost_dev *nvhost_dev,
u64 *base, size_t *size) { return -EINVAL; }
static inline u32 nvgpu_nvhost_syncpt_unit_interface_get_byte_offset(u32 syncpt_id) {
return 0;
}
#endif
#endif /* CONFIG_TEGRA_GK20A_NVHOST */ #endif /* CONFIG_TEGRA_GK20A_NVHOST */
#endif /* __NVGPU_NVHOST_H__ */ #endif /* __NVGPU_NVHOST_H__ */

View File

@@ -1,37 +0,0 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVGPU_NVHOST_T19X_H__
#define __NVGPU_NVHOST_T19X_H__
#ifdef CONFIG_TEGRA_GK20A_NVHOST
#include <nvgpu/types.h>
struct nvgpu_nvhost_dev;
int nvgpu_nvhost_syncpt_unit_interface_get_aperture(
struct nvgpu_nvhost_dev *nvhost_dev,
u64 *base, size_t *size);
u32 nvgpu_nvhost_syncpt_unit_interface_get_byte_offset(u32 syncpt_id);
#endif
#endif /* __NVGPU_NVHOST_T19X_H__ */

View File

@@ -1,47 +0,0 @@
/*
* NVIDIA GPU ID functions, definitions.
*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NVGPU_GPUID_T19X_H_
#define _NVGPU_GPUID_T19X_H_
#define NVGPU_GPUID_GV11B 0x0000015B
#define NVGPU_GPUID_GV100 0x00000140
#define NVGPU_COMPAT_TEGRA_GV11B "nvidia,gv11b"
#define NVGPU_COMPAT_GENERIC_GV11B "nvidia,generic-gv11b"
#define TEGRA_19x_GPUID NVGPU_GPUID_GV11B
#define TEGRA_19x_GPUID_HAL gv11b_init_hal
#define TEGRA_19x_GPU_COMPAT_TEGRA NVGPU_COMPAT_TEGRA_GV11B
#define TEGRA_19x_GPU_COMPAT_GENERIC NVGPU_COMPAT_GENERIC_GV11B
#define BIGGPU_19x_GPUID NVGPU_GPUID_GV100
#define BIGGPU_19x_GPUID_HAL gv100_init_hal
struct gpu_ops;
extern int gv11b_init_hal(struct gk20a *);
extern int gv100_init_hal(struct gk20a *);
extern struct gk20a_platform t19x_gpu_tegra_platform;
#endif

View File

@@ -1,36 +0,0 @@
/*
* NVIDIA T19x TSG
*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVGPU_TSG_T19X_H__
#define __NVGPU_TSG_T19X_H__
#include <nvgpu/types.h>
struct tsg_t19x {
u32 num_active_tpcs;
u8 tpc_pg_enabled;
bool tpc_num_initialized;
};
#endif

View File

@@ -1,53 +0,0 @@
/*
* NVGPU Public Interface Header
*
* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
/* This file is meant to extend nvgpu.h, not replace it
* as such, be sure that nvgpu.h is actually the file performing the
* inclusion, to the extent that's possible.
*/
#ifndef _UAPI__LINUX_NVGPU_IOCTL_H
# error "This file is to be included within nvgpu.h only."
#endif
#ifndef _UAPI__LINUX_NVGPU_T19X_IOCTL_H_
#define _UAPI__LINUX_NVGPU_T19X_IOCTL_H_
#define NVGPU_GPU_ARCH_GV110 0x00000150
#define NVGPU_GPU_ARCH_GV100 0x00000140
#define NVGPU_GPU_IMPL_GV11B 0x0000000B
#define NVGPU_GPU_IMPL_GV100 0x00000000
/* subcontexts are available */
#define NVGPU_GPU_FLAGS_SUPPORT_TSG_SUBCONTEXTS (1ULL << 22)
struct nvgpu_tsg_bind_channel_ex_args {
/* in: channel fd */
__s32 channel_fd;
/* in: VEID in Volta */
__u32 subcontext_id;
__u32 num_active_tpcs;
__u8 tpc_pg_enabled;
__u8 reserved[11];
};
#define NVGPU_TSG_IOCTL_BIND_CHANNEL_EX \
_IOWR(NVGPU_TSG_IOCTL_MAGIC, 11, struct nvgpu_tsg_bind_channel_ex_args)
#define NVGPU_TSG_IOCTL_MAX NVGPU_TSG_IOCTL_BIND_CHANNEL_EX
#define NVGPU_TSG_IOCTL_MAX_ARG sizeof(struct nvgpu_tsg_bind_channel_ex_args)
#endif /* _UAPI__LINUX_NVGPU_T19X_IOCTL_H_ */

View File

@@ -84,6 +84,8 @@ struct nvgpu_gpu_zbc_query_table_args {
#define NVGPU_GPU_ARCH_GK100 0x000000E0 #define NVGPU_GPU_ARCH_GK100 0x000000E0
#define NVGPU_GPU_ARCH_GM200 0x00000120 #define NVGPU_GPU_ARCH_GM200 0x00000120
#define NVGPU_GPU_ARCH_GP100 0x00000130 #define NVGPU_GPU_ARCH_GP100 0x00000130
#define NVGPU_GPU_ARCH_GV110 0x00000150
#define NVGPU_GPU_ARCH_GV100 0x00000140
#define NVGPU_GPU_IMPL_GK20A 0x0000000A #define NVGPU_GPU_IMPL_GK20A 0x0000000A
#define NVGPU_GPU_IMPL_GM204 0x00000004 #define NVGPU_GPU_IMPL_GM204 0x00000004
@@ -93,6 +95,8 @@ struct nvgpu_gpu_zbc_query_table_args {
#define NVGPU_GPU_IMPL_GP104 0x00000004 #define NVGPU_GPU_IMPL_GP104 0x00000004
#define NVGPU_GPU_IMPL_GP106 0x00000006 #define NVGPU_GPU_IMPL_GP106 0x00000006
#define NVGPU_GPU_IMPL_GP10B 0x0000000B #define NVGPU_GPU_IMPL_GP10B 0x0000000B
#define NVGPU_GPU_IMPL_GV11B 0x0000000B
#define NVGPU_GPU_IMPL_GV100 0x00000000
#ifdef CONFIG_TEGRA_19x_GPU #ifdef CONFIG_TEGRA_19x_GPU
#include <linux/nvgpu-t19x.h> #include <linux/nvgpu-t19x.h>
@@ -142,6 +146,8 @@ struct nvgpu_gpu_zbc_query_table_args {
#define NVGPU_GPU_FLAGS_SUPPORT_IO_COHERENCE (1ULL << 20) #define NVGPU_GPU_FLAGS_SUPPORT_IO_COHERENCE (1ULL << 20)
/* NVGPU_SUBMIT_GPFIFO_FLAGS_RESCHEDULE_RUNLIST is available */ /* NVGPU_SUBMIT_GPFIFO_FLAGS_RESCHEDULE_RUNLIST is available */
#define NVGPU_GPU_FLAGS_SUPPORT_RESCHEDULE_RUNLIST (1ULL << 21) #define NVGPU_GPU_FLAGS_SUPPORT_RESCHEDULE_RUNLIST (1ULL << 21)
/* subcontexts are available */
#define NVGPU_GPU_FLAGS_SUPPORT_TSG_SUBCONTEXTS (1ULL << 22)
/* Direct PTE kind control is supported (map_buffer_ex) */ /* Direct PTE kind control is supported (map_buffer_ex) */
#define NVGPU_GPU_FLAGS_SUPPORT_MAP_DIRECT_KIND_CTRL (1ULL << 23) #define NVGPU_GPU_FLAGS_SUPPORT_MAP_DIRECT_KIND_CTRL (1ULL << 23)
/* NVGPU_GPU_IOCTL_SET_DETERMINISTIC_OPTS is available */ /* NVGPU_GPU_IOCTL_SET_DETERMINISTIC_OPTS is available */
@@ -1008,6 +1014,17 @@ struct nvgpu_gpu_set_event_filter_args {
#define NVGPU_TSG_IOCTL_MAGIC 'T' #define NVGPU_TSG_IOCTL_MAGIC 'T'
struct nvgpu_tsg_bind_channel_ex_args {
/* in: channel fd */
__s32 channel_fd;
/* in: VEID in Volta */
__u32 subcontext_id;
__u32 num_active_tpcs;
__u8 tpc_pg_enabled;
__u8 reserved[11];
};
#define NVGPU_TSG_IOCTL_BIND_CHANNEL \ #define NVGPU_TSG_IOCTL_BIND_CHANNEL \
_IOW(NVGPU_TSG_IOCTL_MAGIC, 1, int) _IOW(NVGPU_TSG_IOCTL_MAGIC, 1, int)
#define NVGPU_TSG_IOCTL_UNBIND_CHANNEL \ #define NVGPU_TSG_IOCTL_UNBIND_CHANNEL \
@@ -1026,19 +1043,12 @@ struct nvgpu_gpu_set_event_filter_args {
_IOW(NVGPU_TSG_IOCTL_MAGIC, 9, struct nvgpu_timeslice_args) _IOW(NVGPU_TSG_IOCTL_MAGIC, 9, struct nvgpu_timeslice_args)
#define NVGPU_IOCTL_TSG_GET_TIMESLICE \ #define NVGPU_IOCTL_TSG_GET_TIMESLICE \
_IOR(NVGPU_TSG_IOCTL_MAGIC, 10, struct nvgpu_timeslice_args) _IOR(NVGPU_TSG_IOCTL_MAGIC, 10, struct nvgpu_timeslice_args)
#define NVGPU_TSG_IOCTL_BIND_CHANNEL_EX \
_IOWR(NVGPU_TSG_IOCTL_MAGIC, 11, struct nvgpu_tsg_bind_channel_ex_args)
#ifdef CONFIG_TEGRA_19x_GPU
#define NVGPU_TSG_IOCTL_MAX_ARG_SIZE \ #define NVGPU_TSG_IOCTL_MAX_ARG_SIZE \
NVGPU_TSG_IOCTL_MAX_ARG sizeof(struct nvgpu_tsg_bind_channel_ex_args)
#define NVGPU_TSG_IOCTL_LAST \ #define NVGPU_TSG_IOCTL_LAST \
_IOC_NR(NVGPU_TSG_IOCTL_MAX) _IOC_NR(NVGPU_TSG_IOCTL_BIND_CHANNEL_EX)
#else
#define NVGPU_TSG_IOCTL_MAX_ARG_SIZE \
sizeof(struct nvgpu_event_id_ctrl_args)
#define NVGPU_TSG_IOCTL_LAST \
_IOC_NR(NVGPU_IOCTL_TSG_GET_TIMESLICE)
#endif
/* /*
* /dev/nvhost-dbg-gpu device * /dev/nvhost-dbg-gpu device