diff --git a/drivers/gpu/nvgpu/Makefile b/drivers/gpu/nvgpu/Makefile
index 6b3c9e164..f7bbd7442 100644
--- a/drivers/gpu/nvgpu/Makefile
+++ b/drivers/gpu/nvgpu/Makefile
@@ -95,6 +95,7 @@ nvgpu-$(CONFIG_TEGRA_GR_VIRTUALIZATION) += \
vgpu/vgpu.o \
vgpu/dbg_vgpu.o \
vgpu/fecs_trace_vgpu.o \
+ vgpu/tsg_vgpu.o \
vgpu/gk20a/vgpu_hal_gk20a.o \
vgpu/gk20a/vgpu_gr_gk20a.o \
vgpu/gm20b/vgpu_hal_gm20b.o \
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
index a73a314ca..02de13919 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
@@ -952,7 +952,7 @@ static void gk20a_free_channel(struct channel_gk20a *ch)
unbind:
if (gk20a_is_channel_marked_as_tsg(ch))
- gk20a_tsg_unbind_channel(ch);
+ g->ops.fifo.tsg_unbind_channel(ch);
g->ops.fifo.unbind_channel(ch);
g->ops.fifo.free_inst(g, ch);
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
index 92536b360..134a24804 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
@@ -2015,7 +2015,7 @@ int gk20a_fifo_preempt(struct gk20a *g, struct channel_gk20a *ch)
int err;
if (gk20a_is_channel_marked_as_tsg(ch))
- err = gk20a_fifo_preempt_tsg(ch->g, ch->tsgid);
+ err = g->ops.fifo.preempt_tsg(ch->g, ch->tsgid);
else
err = g->ops.fifo.preempt_channel(ch->g, ch->hw_chid);
@@ -2754,6 +2754,7 @@ void gk20a_init_fifo(struct gpu_ops *gops)
{
gk20a_init_channel(gops);
gops->fifo.preempt_channel = gk20a_fifo_preempt_channel;
+ gops->fifo.preempt_tsg = gk20a_fifo_preempt_tsg;
gops->fifo.update_runlist = gk20a_fifo_update_runlist;
gops->fifo.trigger_mmu_fault = gk20a_fifo_trigger_mmu_fault;
gops->fifo.apply_pb_timeout = gk20a_fifo_apply_pb_timeout;
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.h b/drivers/gpu/nvgpu/gk20a/gk20a.h
index 0e13fba32..6f47f2281 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/gk20a.h
@@ -324,6 +324,7 @@ struct gpu_ops {
u32 gpfifo_entries, u32 flags);
int (*resetup_ramfc)(struct channel_gk20a *c);
int (*preempt_channel)(struct gk20a *g, u32 hw_chid);
+ int (*preempt_tsg)(struct gk20a *g, u32 tsgid);
int (*update_runlist)(struct gk20a *g, u32 runlist_id,
u32 hw_chid, bool add,
bool wait_for_finish);
@@ -345,6 +346,9 @@ struct gpu_ops {
void (*device_info_data_parse)(struct gk20a *g,
u32 table_entry, u32 *inst_id,
u32 *pri_base, u32 *fault_id);
+ int (*tsg_bind_channel)(struct tsg_gk20a *tsg,
+ struct channel_gk20a *ch);
+ int (*tsg_unbind_channel)(struct channel_gk20a *ch);
} fifo;
struct pmu_v {
/*used for change of enum zbc update cmd id from ver 0 to ver1*/
diff --git a/drivers/gpu/nvgpu/gk20a/hal_gk20a.c b/drivers/gpu/nvgpu/gk20a/hal_gk20a.c
index fb3b3e55d..5112af556 100644
--- a/drivers/gpu/nvgpu/gk20a/hal_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/hal_gk20a.c
@@ -30,6 +30,7 @@
#include "regops_gk20a.h"
#include "therm_gk20a.h"
#include "hw_proj_gk20a.h"
+#include "tsg_gk20a.h"
static struct gpu_ops gk20a_ops = {
.clock_gating = {
@@ -142,6 +143,7 @@ int gk20a_init_hal(struct gk20a *g)
gk20a_init_regops(gops);
gk20a_init_debug_ops(gops);
gk20a_init_therm_ops(gops);
+ gk20a_init_tsg_ops(gops);
gops->name = "gk20a";
gops->chip_init_gpu_characteristics = gk20a_init_gpu_characteristics;
gops->get_litter_value = gk20a_get_litter_value;
diff --git a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
index 5b77bf805..1e4793956 100644
--- a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
@@ -37,13 +37,12 @@ bool gk20a_is_channel_marked_as_tsg(struct channel_gk20a *ch)
int gk20a_enable_tsg(struct tsg_gk20a *tsg)
{
+ struct gk20a *g = tsg->g;
struct channel_gk20a *ch;
mutex_lock(&tsg->ch_list_lock);
list_for_each_entry(ch, &tsg->ch_list, ch_entry) {
- gk20a_writel(ch->g, ccsr_channel_r(ch->hw_chid),
- gk20a_readl(ch->g, ccsr_channel_r(ch->hw_chid))
- | ccsr_channel_enable_set_true_f());
+ g->ops.fifo.enable_channel(ch);
}
mutex_unlock(&tsg->ch_list_lock);
@@ -52,13 +51,12 @@ int gk20a_enable_tsg(struct tsg_gk20a *tsg)
int gk20a_disable_tsg(struct tsg_gk20a *tsg)
{
+ struct gk20a *g = tsg->g;
struct channel_gk20a *ch;
mutex_lock(&tsg->ch_list_lock);
list_for_each_entry(ch, &tsg->ch_list, ch_entry) {
- gk20a_writel(ch->g, ccsr_channel_r(ch->hw_chid),
- gk20a_readl(ch->g, ccsr_channel_r(ch->hw_chid))
- | ccsr_channel_enable_clr_true_f());
+ g->ops.fifo.disable_channel(ch);
}
mutex_unlock(&tsg->ch_list_lock);
@@ -80,31 +78,37 @@ static bool gk20a_is_channel_active(struct gk20a *g, struct channel_gk20a *ch)
return false;
}
+static int gk20a_tsg_bind_channel_fd(struct tsg_gk20a *tsg, int ch_fd)
+{
+ struct file *f = fget(ch_fd);
+ struct channel_gk20a *ch;
+ int err;
+
+ ch = gk20a_get_channel_from_file(ch_fd);
+ if (!ch)
+ return -EINVAL;
+ err = ch->g->ops.fifo.tsg_bind_channel(tsg, ch);
+ fput(f);
+ return err;
+}
+
/*
* API to mark channel as part of TSG
*
* Note that channel is not runnable when we bind it to TSG
*/
-static int gk20a_tsg_bind_channel(struct tsg_gk20a *tsg, int ch_fd)
+int gk20a_tsg_bind_channel(struct tsg_gk20a *tsg,
+ struct channel_gk20a *ch)
{
- struct file *f = fget(ch_fd);
- struct channel_gk20a *ch;
-
gk20a_dbg_fn("");
- ch = gk20a_get_channel_from_file(ch_fd);
- if (!ch)
- return -EINVAL;
-
/* check if channel is already bound to some TSG */
if (gk20a_is_channel_marked_as_tsg(ch)) {
- fput(f);
return -EINVAL;
}
/* channel cannot be bound to TSG if it is already active */
if (gk20a_is_channel_active(tsg->g, ch)) {
- fput(f);
return -EINVAL;
}
@@ -119,8 +123,6 @@ static int gk20a_tsg_bind_channel(struct tsg_gk20a *tsg, int ch_fd)
gk20a_dbg(gpu_dbg_fn, "BIND tsg:%d channel:%d\n",
tsg->tsgid, ch->hw_chid);
- fput(f);
-
gk20a_dbg_fn("done");
return 0;
}
@@ -494,7 +496,7 @@ long gk20a_tsg_dev_ioctl(struct file *filp, unsigned int cmd,
err = -EINVAL;
break;
}
- err = gk20a_tsg_bind_channel(tsg, ch_fd);
+ err = gk20a_tsg_bind_channel_fd(tsg, ch_fd);
break;
}
@@ -539,7 +541,7 @@ long gk20a_tsg_dev_ioctl(struct file *filp, unsigned int cmd,
return err;
}
/* preempt TSG */
- err = gk20a_fifo_preempt_tsg(g, tsg->tsgid);
+ err = g->ops.fifo.preempt_tsg(g, tsg->tsgid);
gk20a_idle(g->dev);
break;
}
@@ -600,3 +602,9 @@ long gk20a_tsg_dev_ioctl(struct file *filp, unsigned int cmd,
return err;
}
+
+void gk20a_init_tsg_ops(struct gpu_ops *gops)
+{
+ gops->fifo.tsg_bind_channel = gk20a_tsg_bind_channel;
+ gops->fifo.tsg_unbind_channel = gk20a_tsg_unbind_channel;
+}
diff --git a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.h b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.h
index 9a8bfadac..14ead5c06 100644
--- a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.h
@@ -28,8 +28,7 @@ long gk20a_tsg_dev_ioctl(struct file *filp,
unsigned int cmd, unsigned long arg);
int gk20a_init_tsg_support(struct gk20a *g, u32 tsgid);
-
-int gk20a_tsg_unbind_channel(struct channel_gk20a *ch);
+void gk20a_init_tsg_ops(struct gpu_ops *gops);
struct tsg_gk20a {
struct gk20a *g;
@@ -59,6 +58,9 @@ struct tsg_gk20a {
int gk20a_enable_tsg(struct tsg_gk20a *tsg);
int gk20a_disable_tsg(struct tsg_gk20a *tsg);
+int gk20a_tsg_bind_channel(struct tsg_gk20a *tsg,
+ struct channel_gk20a *ch);
+int gk20a_tsg_unbind_channel(struct channel_gk20a *ch);
void gk20a_tsg_event_id_post_event(struct tsg_gk20a *tsg,
int event_id);
diff --git a/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c b/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c
index eaa22dc26..bb873bcca 100644
--- a/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c
@@ -142,6 +142,7 @@ void gm20b_init_fifo(struct gpu_ops *gops)
gops->fifo.channel_set_timeslice = gk20a_channel_set_timeslice;
gops->fifo.preempt_channel = gk20a_fifo_preempt_channel;
+ gops->fifo.preempt_tsg = gk20a_fifo_preempt_tsg;
gops->fifo.update_runlist = gk20a_fifo_update_runlist;
gops->fifo.trigger_mmu_fault = gm20b_fifo_trigger_mmu_fault;
gops->fifo.wait_engine_idle = gk20a_fifo_wait_engine_idle;
diff --git a/drivers/gpu/nvgpu/gm20b/hal_gm20b.c b/drivers/gpu/nvgpu/gm20b/hal_gm20b.c
index df25be5ef..c01134987 100644
--- a/drivers/gpu/nvgpu/gm20b/hal_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/hal_gm20b.c
@@ -211,6 +211,7 @@ int gm20b_init_hal(struct gk20a *g)
gm20b_init_debug_ops(gops);
gm20b_init_cde_ops(gops);
gm20b_init_therm_ops(gops);
+ gk20a_init_tsg_ops(gops);
gops->name = "gm20b";
gops->chip_init_gpu_characteristics = gk20a_init_gpu_characteristics;
gops->get_litter_value = gm20b_get_litter_value;
diff --git a/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c b/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
index 8fcc7cc16..fb19db4af 100644
--- a/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
@@ -410,6 +410,30 @@ static int vgpu_fifo_preempt_channel(struct gk20a *g, u32 hw_chid)
return err;
}
+static int vgpu_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
+{
+ struct gk20a_platform *platform = gk20a_get_platform(g->dev);
+ struct tegra_vgpu_cmd_msg msg;
+ struct tegra_vgpu_tsg_preempt_params *p =
+ &msg.params.tsg_preempt;
+ int err;
+
+ gk20a_dbg_fn("");
+
+ msg.cmd = TEGRA_VGPU_CMD_TSG_PREEMPT;
+ msg.handle = platform->virt_handle;
+ p->tsg_id = tsgid;
+ err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
+ err = err ? err : msg.ret;
+
+ if (err) {
+ gk20a_err(dev_from_gk20a(g),
+ "preempt tsg %u failed\n", tsgid);
+ }
+
+ return err;
+}
+
static int vgpu_submit_runlist(u64 handle, u8 runlist_id, u16 *runlist,
u32 num_entries)
{
@@ -680,6 +704,7 @@ void vgpu_init_fifo_ops(struct gpu_ops *gops)
gops->fifo.free_inst = vgpu_channel_free_inst;
gops->fifo.setup_ramfc = vgpu_channel_setup_ramfc;
gops->fifo.preempt_channel = vgpu_fifo_preempt_channel;
+ gops->fifo.preempt_tsg = vgpu_fifo_preempt_tsg;
gops->fifo.update_runlist = vgpu_fifo_update_runlist;
gops->fifo.wait_engine_idle = vgpu_fifo_wait_engine_idle;
gops->fifo.channel_set_priority = vgpu_channel_set_priority;
diff --git a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
index b9490ac92..f395ac1e7 100644
--- a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
@@ -445,6 +445,26 @@ static int vgpu_gr_ch_bind_gr_ctx(struct channel_gk20a *c)
return err;
}
+static int vgpu_gr_tsg_bind_gr_ctx(struct tsg_gk20a *tsg)
+{
+ struct gk20a_platform *platform = gk20a_get_platform(tsg->g->dev);
+ struct gr_ctx_desc *gr_ctx = tsg->tsg_gr_ctx;
+ struct tegra_vgpu_cmd_msg msg = {0};
+ struct tegra_vgpu_tsg_bind_gr_ctx_params *p =
+ &msg.params.tsg_bind_gr_ctx;
+ int err;
+
+ msg.cmd = TEGRA_VGPU_CMD_TSG_BIND_GR_CTX;
+ msg.handle = platform->virt_handle;
+ p->tsg_id = tsg->tsgid;
+ p->gr_ctx_handle = gr_ctx->virt_ctx;
+ err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
+ err = err ? err : msg.ret;
+ WARN_ON(err);
+
+ return err;
+}
+
static int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c,
struct nvgpu_alloc_obj_ctx_args *args)
{
@@ -472,32 +492,58 @@ static int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c,
}
c->obj_class = args->class_num;
- /* FIXME: add TSG support */
if (gk20a_is_channel_marked_as_tsg(c))
tsg = &f->tsg[c->tsgid];
- /* allocate gr ctx buffer */
- if (!ch_ctx->gr_ctx) {
- err = g->ops.gr.alloc_gr_ctx(g, &c->ch_ctx.gr_ctx,
- c->vm,
- args->class_num,
- args->flags);
- if (!err)
- err = vgpu_gr_ch_bind_gr_ctx(c);
-
- if (err) {
+ if (!tsg) {
+ /* allocate gr ctx buffer */
+ if (!ch_ctx->gr_ctx) {
+ err = g->ops.gr.alloc_gr_ctx(g, &c->ch_ctx.gr_ctx,
+ c->vm,
+ args->class_num,
+ args->flags);
+ if (!err)
+ err = vgpu_gr_ch_bind_gr_ctx(c);
+ if (err) {
+ gk20a_err(dev_from_gk20a(g),
+ "fail to allocate gr ctx buffer");
+ goto out;
+ }
+ } else {
+ /*TBD: needs to be more subtle about which is
+ * being allocated as some are allowed to be
+ * allocated along same channel */
gk20a_err(dev_from_gk20a(g),
- "fail to allocate gr ctx buffer");
+ "too many classes alloc'd on same channel");
+ err = -EINVAL;
goto out;
}
} else {
- /*TBD: needs to be more subtle about which is
- * being allocated as some are allowed to be
- * allocated along same channel */
- gk20a_err(dev_from_gk20a(g),
- "too many classes alloc'd on same channel");
- err = -EINVAL;
- goto out;
+ if (!tsg->tsg_gr_ctx) {
+ tsg->vm = c->vm;
+ gk20a_vm_get(tsg->vm);
+ err = g->ops.gr.alloc_gr_ctx(g, &tsg->tsg_gr_ctx,
+ c->vm,
+ args->class_num,
+ args->flags);
+ if (!err)
+ err = vgpu_gr_tsg_bind_gr_ctx(tsg);
+ if (err) {
+ gk20a_err(dev_from_gk20a(g),
+ "fail to allocate TSG gr ctx buffer, err=%d", err);
+ gk20a_vm_put(tsg->vm);
+ tsg->vm = NULL;
+ goto out;
+ }
+ }
+
+ ch_ctx->gr_ctx = tsg->tsg_gr_ctx;
+ err = vgpu_gr_ch_bind_gr_ctx(c);
+ if (err) {
+ gk20a_err(dev_from_gk20a(g),
+ "fail to bind gr ctx buffer");
+ goto out;
+ }
}
/* commit gr ctx buffer */
diff --git a/drivers/gpu/nvgpu/vgpu/tsg_vgpu.c b/drivers/gpu/nvgpu/vgpu/tsg_vgpu.c
new file mode 100644
index 000000000..9245693d2
--- /dev/null
+++ b/drivers/gpu/nvgpu/vgpu/tsg_vgpu.c
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ */
+
+#include
+#include
+
+#include "gk20a/gk20a.h"
+#include "gk20a/channel_gk20a.h"
+#include "gk20a/platform_gk20a.h"
+#include "gk20a/tsg_gk20a.h"
+#include "vgpu.h"
+
+static int vgpu_tsg_bind_channel(struct tsg_gk20a *tsg,
+ struct channel_gk20a *ch)
+{
+ struct gk20a_platform *platform = gk20a_get_platform(tsg->g->dev);
+ struct tegra_vgpu_cmd_msg msg = {};
+ struct tegra_vgpu_tsg_bind_unbind_channel_params *p =
+ &msg.params.tsg_bind_unbind_channel;
+ int err;
+
+ gk20a_dbg_fn("");
+
+ err = gk20a_tsg_bind_channel(tsg, ch);
+ if (err)
+ return err;
+
+ msg.cmd = TEGRA_VGPU_CMD_TSG_BIND_CHANNEL;
+ msg.handle = platform->virt_handle;
+ p->tsg_id = tsg->tsgid;
+ p->ch_handle = ch->virt_ctx;
+ err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
+ err = err ? err : msg.ret;
+ if (err) {
+ gk20a_err(dev_from_gk20a(tsg->g),
+ "vgpu_tsg_bind_channel failed, ch %d tsgid %d",
+ ch->hw_chid, tsg->tsgid);
+ gk20a_tsg_unbind_channel(ch);
+ }
+
+ return err;
+}
+
+static int vgpu_tsg_unbind_channel(struct channel_gk20a *ch)
+{
+ struct gk20a_platform *platform = gk20a_get_platform(ch->g->dev);
+ struct tegra_vgpu_cmd_msg msg = {};
+ struct tegra_vgpu_tsg_bind_unbind_channel_params *p =
+ &msg.params.tsg_bind_unbind_channel;
+ int err;
+
+ gk20a_dbg_fn("");
+
+ err = gk20a_tsg_unbind_channel(ch);
+ if (err)
+ return err;
+
+ msg.cmd = TEGRA_VGPU_CMD_TSG_UNBIND_CHANNEL;
+ msg.handle = platform->virt_handle;
+ p->ch_handle = ch->virt_ctx;
+ err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
+ err = err ? err : msg.ret;
+ WARN_ON(err);
+
+ return err;
+}
+
+void vgpu_init_tsg_ops(struct gpu_ops *gops)
+{
+ gops->fifo.tsg_bind_channel = vgpu_tsg_bind_channel;
+ gops->fifo.tsg_unbind_channel = vgpu_tsg_unbind_channel;
+}
diff --git a/drivers/gpu/nvgpu/vgpu/vgpu.c b/drivers/gpu/nvgpu/vgpu/vgpu.c
index 3c06652b7..4948d2464 100644
--- a/drivers/gpu/nvgpu/vgpu/vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/vgpu.c
@@ -268,6 +268,7 @@ void vgpu_init_hal_common(struct gk20a *g)
vgpu_init_mm_ops(gops);
vgpu_init_debug_ops(gops);
vgpu_init_fecs_trace_ops(gops);
+ vgpu_init_tsg_ops(gops);
gops->chip_init_gpu_characteristics = gk20a_init_gpu_characteristics;
}
@@ -340,8 +341,6 @@ int vgpu_pm_finalize_poweron(struct device *dev)
goto done;
}
- g->gpu_characteristics.flags &= ~NVGPU_GPU_FLAGS_SUPPORT_TSG;
-
gk20a_ctxsw_trace_init(g);
gk20a_channel_resume(g);
diff --git a/drivers/gpu/nvgpu/vgpu/vgpu.h b/drivers/gpu/nvgpu/vgpu/vgpu.h
index 32f4b1108..fdd0a54cd 100644
--- a/drivers/gpu/nvgpu/vgpu/vgpu.h
+++ b/drivers/gpu/nvgpu/vgpu/vgpu.h
@@ -1,7 +1,7 @@
/*
* Virtualized GPU Interfaces
*
- * Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -47,6 +47,7 @@ void vgpu_init_gr_ops(struct gpu_ops *gops);
void vgpu_init_ltc_ops(struct gpu_ops *gops);
void vgpu_init_mm_ops(struct gpu_ops *gops);
void vgpu_init_debug_ops(struct gpu_ops *gops);
+void vgpu_init_tsg_ops(struct gpu_ops *gops);
int vgpu_init_mm_support(struct gk20a *g);
int vgpu_init_gr_support(struct gk20a *g);
int vgpu_init_fifo_support(struct gk20a *g);
diff --git a/include/linux/tegra_vgpu.h b/include/linux/tegra_vgpu.h
index 1d195efde..bdaabf29a 100644
--- a/include/linux/tegra_vgpu.h
+++ b/include/linux/tegra_vgpu.h
@@ -88,6 +88,9 @@ enum {
TEGRA_VGPU_CMD_GR_CTX_FREE,
TEGRA_VGPU_CMD_CHANNEL_BIND_GR_CTX,
TEGRA_VGPU_CMD_TSG_BIND_GR_CTX,
+ TEGRA_VGPU_CMD_TSG_BIND_CHANNEL,
+ TEGRA_VGPU_CMD_TSG_UNBIND_CHANNEL,
+ TEGRA_VGPU_CMD_TSG_PREEMPT,
};
struct tegra_vgpu_connect_params {
@@ -361,6 +364,15 @@ struct tegra_vgpu_tsg_bind_gr_ctx_params {
u64 gr_ctx_handle;
};
+struct tegra_vgpu_tsg_bind_unbind_channel_params {
+ u32 tsg_id;
+ u64 ch_handle;
+};
+
+struct tegra_vgpu_tsg_preempt_params {
+ u32 tsg_id;
+};
+
struct tegra_vgpu_cmd_msg {
u32 cmd;
int ret;
@@ -397,6 +409,8 @@ struct tegra_vgpu_cmd_msg {
struct tegra_vgpu_gr_ctx_params gr_ctx;
struct tegra_vgpu_channel_bind_gr_ctx_params ch_bind_gr_ctx;
struct tegra_vgpu_tsg_bind_gr_ctx_params tsg_bind_gr_ctx;
+ struct tegra_vgpu_tsg_bind_unbind_channel_params tsg_bind_unbind_channel;
+ struct tegra_vgpu_tsg_preempt_params tsg_preempt;
char padding[192];
} params;
};