gpu: nvgpu: add tsg support for vgpu

- make tsg_gk20a.c call HAL for enable/disable channels
- add preempt_tsg HAL callbacks
- add tsg bind/unbind channel HAL callbacks
- add according tsg callbacks for vgpu

Bug 1702773
JIRA VFND-1003

Change-Id: I2cba74b3ebd3920ef09219a168e6433d9574dbe8
Signed-off-by: Richard Zhao <rizhao@nvidia.com>
Reviewed-on: http://git-master/r/1144932
(cherry picked from commit c3787de7d38651d46969348f5acae2ba86b31ec7)
Reviewed-on: http://git-master/r/1126942
Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Tested-by: Terje Bergstrom <tbergstrom@nvidia.com>
This commit is contained in:
Richard Zhao
2016-03-31 11:16:23 -07:00
committed by Terje Bergstrom
parent a71ce831fb
commit d707c5a444
15 changed files with 236 additions and 46 deletions

View File

@@ -95,6 +95,7 @@ nvgpu-$(CONFIG_TEGRA_GR_VIRTUALIZATION) += \
vgpu/vgpu.o \ vgpu/vgpu.o \
vgpu/dbg_vgpu.o \ vgpu/dbg_vgpu.o \
vgpu/fecs_trace_vgpu.o \ vgpu/fecs_trace_vgpu.o \
vgpu/tsg_vgpu.o \
vgpu/gk20a/vgpu_hal_gk20a.o \ vgpu/gk20a/vgpu_hal_gk20a.o \
vgpu/gk20a/vgpu_gr_gk20a.o \ vgpu/gk20a/vgpu_gr_gk20a.o \
vgpu/gm20b/vgpu_hal_gm20b.o \ vgpu/gm20b/vgpu_hal_gm20b.o \

View File

@@ -952,7 +952,7 @@ static void gk20a_free_channel(struct channel_gk20a *ch)
unbind: unbind:
if (gk20a_is_channel_marked_as_tsg(ch)) if (gk20a_is_channel_marked_as_tsg(ch))
gk20a_tsg_unbind_channel(ch); g->ops.fifo.tsg_unbind_channel(ch);
g->ops.fifo.unbind_channel(ch); g->ops.fifo.unbind_channel(ch);
g->ops.fifo.free_inst(g, ch); g->ops.fifo.free_inst(g, ch);

View File

@@ -2015,7 +2015,7 @@ int gk20a_fifo_preempt(struct gk20a *g, struct channel_gk20a *ch)
int err; int err;
if (gk20a_is_channel_marked_as_tsg(ch)) if (gk20a_is_channel_marked_as_tsg(ch))
err = gk20a_fifo_preempt_tsg(ch->g, ch->tsgid); err = g->ops.fifo.preempt_tsg(ch->g, ch->tsgid);
else else
err = g->ops.fifo.preempt_channel(ch->g, ch->hw_chid); err = g->ops.fifo.preempt_channel(ch->g, ch->hw_chid);
@@ -2754,6 +2754,7 @@ void gk20a_init_fifo(struct gpu_ops *gops)
{ {
gk20a_init_channel(gops); gk20a_init_channel(gops);
gops->fifo.preempt_channel = gk20a_fifo_preempt_channel; gops->fifo.preempt_channel = gk20a_fifo_preempt_channel;
gops->fifo.preempt_tsg = gk20a_fifo_preempt_tsg;
gops->fifo.update_runlist = gk20a_fifo_update_runlist; gops->fifo.update_runlist = gk20a_fifo_update_runlist;
gops->fifo.trigger_mmu_fault = gk20a_fifo_trigger_mmu_fault; gops->fifo.trigger_mmu_fault = gk20a_fifo_trigger_mmu_fault;
gops->fifo.apply_pb_timeout = gk20a_fifo_apply_pb_timeout; gops->fifo.apply_pb_timeout = gk20a_fifo_apply_pb_timeout;

View File

@@ -324,6 +324,7 @@ struct gpu_ops {
u32 gpfifo_entries, u32 flags); u32 gpfifo_entries, u32 flags);
int (*resetup_ramfc)(struct channel_gk20a *c); int (*resetup_ramfc)(struct channel_gk20a *c);
int (*preempt_channel)(struct gk20a *g, u32 hw_chid); int (*preempt_channel)(struct gk20a *g, u32 hw_chid);
int (*preempt_tsg)(struct gk20a *g, u32 tsgid);
int (*update_runlist)(struct gk20a *g, u32 runlist_id, int (*update_runlist)(struct gk20a *g, u32 runlist_id,
u32 hw_chid, bool add, u32 hw_chid, bool add,
bool wait_for_finish); bool wait_for_finish);
@@ -345,6 +346,9 @@ struct gpu_ops {
void (*device_info_data_parse)(struct gk20a *g, void (*device_info_data_parse)(struct gk20a *g,
u32 table_entry, u32 *inst_id, u32 table_entry, u32 *inst_id,
u32 *pri_base, u32 *fault_id); u32 *pri_base, u32 *fault_id);
int (*tsg_bind_channel)(struct tsg_gk20a *tsg,
struct channel_gk20a *ch);
int (*tsg_unbind_channel)(struct channel_gk20a *ch);
} fifo; } fifo;
struct pmu_v { struct pmu_v {
/*used for change of enum zbc update cmd id from ver 0 to ver1*/ /*used for change of enum zbc update cmd id from ver 0 to ver1*/

View File

@@ -30,6 +30,7 @@
#include "regops_gk20a.h" #include "regops_gk20a.h"
#include "therm_gk20a.h" #include "therm_gk20a.h"
#include "hw_proj_gk20a.h" #include "hw_proj_gk20a.h"
#include "tsg_gk20a.h"
static struct gpu_ops gk20a_ops = { static struct gpu_ops gk20a_ops = {
.clock_gating = { .clock_gating = {
@@ -142,6 +143,7 @@ int gk20a_init_hal(struct gk20a *g)
gk20a_init_regops(gops); gk20a_init_regops(gops);
gk20a_init_debug_ops(gops); gk20a_init_debug_ops(gops);
gk20a_init_therm_ops(gops); gk20a_init_therm_ops(gops);
gk20a_init_tsg_ops(gops);
gops->name = "gk20a"; gops->name = "gk20a";
gops->chip_init_gpu_characteristics = gk20a_init_gpu_characteristics; gops->chip_init_gpu_characteristics = gk20a_init_gpu_characteristics;
gops->get_litter_value = gk20a_get_litter_value; gops->get_litter_value = gk20a_get_litter_value;

View File

@@ -37,13 +37,12 @@ bool gk20a_is_channel_marked_as_tsg(struct channel_gk20a *ch)
int gk20a_enable_tsg(struct tsg_gk20a *tsg) int gk20a_enable_tsg(struct tsg_gk20a *tsg)
{ {
struct gk20a *g = tsg->g;
struct channel_gk20a *ch; struct channel_gk20a *ch;
mutex_lock(&tsg->ch_list_lock); mutex_lock(&tsg->ch_list_lock);
list_for_each_entry(ch, &tsg->ch_list, ch_entry) { list_for_each_entry(ch, &tsg->ch_list, ch_entry) {
gk20a_writel(ch->g, ccsr_channel_r(ch->hw_chid), g->ops.fifo.enable_channel(ch);
gk20a_readl(ch->g, ccsr_channel_r(ch->hw_chid))
| ccsr_channel_enable_set_true_f());
} }
mutex_unlock(&tsg->ch_list_lock); mutex_unlock(&tsg->ch_list_lock);
@@ -52,13 +51,12 @@ int gk20a_enable_tsg(struct tsg_gk20a *tsg)
int gk20a_disable_tsg(struct tsg_gk20a *tsg) int gk20a_disable_tsg(struct tsg_gk20a *tsg)
{ {
struct gk20a *g = tsg->g;
struct channel_gk20a *ch; struct channel_gk20a *ch;
mutex_lock(&tsg->ch_list_lock); mutex_lock(&tsg->ch_list_lock);
list_for_each_entry(ch, &tsg->ch_list, ch_entry) { list_for_each_entry(ch, &tsg->ch_list, ch_entry) {
gk20a_writel(ch->g, ccsr_channel_r(ch->hw_chid), g->ops.fifo.disable_channel(ch);
gk20a_readl(ch->g, ccsr_channel_r(ch->hw_chid))
| ccsr_channel_enable_clr_true_f());
} }
mutex_unlock(&tsg->ch_list_lock); mutex_unlock(&tsg->ch_list_lock);
@@ -80,31 +78,37 @@ static bool gk20a_is_channel_active(struct gk20a *g, struct channel_gk20a *ch)
return false; return false;
} }
static int gk20a_tsg_bind_channel_fd(struct tsg_gk20a *tsg, int ch_fd)
{
struct file *f = fget(ch_fd);
struct channel_gk20a *ch;
int err;
ch = gk20a_get_channel_from_file(ch_fd);
if (!ch)
return -EINVAL;
err = ch->g->ops.fifo.tsg_bind_channel(tsg, ch);
fput(f);
return err;
}
/* /*
* API to mark channel as part of TSG * API to mark channel as part of TSG
* *
* Note that channel is not runnable when we bind it to TSG * Note that channel is not runnable when we bind it to TSG
*/ */
static int gk20a_tsg_bind_channel(struct tsg_gk20a *tsg, int ch_fd) int gk20a_tsg_bind_channel(struct tsg_gk20a *tsg,
struct channel_gk20a *ch)
{ {
struct file *f = fget(ch_fd);
struct channel_gk20a *ch;
gk20a_dbg_fn(""); gk20a_dbg_fn("");
ch = gk20a_get_channel_from_file(ch_fd);
if (!ch)
return -EINVAL;
/* check if channel is already bound to some TSG */ /* check if channel is already bound to some TSG */
if (gk20a_is_channel_marked_as_tsg(ch)) { if (gk20a_is_channel_marked_as_tsg(ch)) {
fput(f);
return -EINVAL; return -EINVAL;
} }
/* channel cannot be bound to TSG if it is already active */ /* channel cannot be bound to TSG if it is already active */
if (gk20a_is_channel_active(tsg->g, ch)) { if (gk20a_is_channel_active(tsg->g, ch)) {
fput(f);
return -EINVAL; return -EINVAL;
} }
@@ -119,8 +123,6 @@ static int gk20a_tsg_bind_channel(struct tsg_gk20a *tsg, int ch_fd)
gk20a_dbg(gpu_dbg_fn, "BIND tsg:%d channel:%d\n", gk20a_dbg(gpu_dbg_fn, "BIND tsg:%d channel:%d\n",
tsg->tsgid, ch->hw_chid); tsg->tsgid, ch->hw_chid);
fput(f);
gk20a_dbg_fn("done"); gk20a_dbg_fn("done");
return 0; return 0;
} }
@@ -494,7 +496,7 @@ long gk20a_tsg_dev_ioctl(struct file *filp, unsigned int cmd,
err = -EINVAL; err = -EINVAL;
break; break;
} }
err = gk20a_tsg_bind_channel(tsg, ch_fd); err = gk20a_tsg_bind_channel_fd(tsg, ch_fd);
break; break;
} }
@@ -539,7 +541,7 @@ long gk20a_tsg_dev_ioctl(struct file *filp, unsigned int cmd,
return err; return err;
} }
/* preempt TSG */ /* preempt TSG */
err = gk20a_fifo_preempt_tsg(g, tsg->tsgid); err = g->ops.fifo.preempt_tsg(g, tsg->tsgid);
gk20a_idle(g->dev); gk20a_idle(g->dev);
break; break;
} }
@@ -600,3 +602,9 @@ long gk20a_tsg_dev_ioctl(struct file *filp, unsigned int cmd,
return err; return err;
} }
void gk20a_init_tsg_ops(struct gpu_ops *gops)
{
gops->fifo.tsg_bind_channel = gk20a_tsg_bind_channel;
gops->fifo.tsg_unbind_channel = gk20a_tsg_unbind_channel;
}

View File

@@ -28,8 +28,7 @@ long gk20a_tsg_dev_ioctl(struct file *filp,
unsigned int cmd, unsigned long arg); unsigned int cmd, unsigned long arg);
int gk20a_init_tsg_support(struct gk20a *g, u32 tsgid); int gk20a_init_tsg_support(struct gk20a *g, u32 tsgid);
void gk20a_init_tsg_ops(struct gpu_ops *gops);
int gk20a_tsg_unbind_channel(struct channel_gk20a *ch);
struct tsg_gk20a { struct tsg_gk20a {
struct gk20a *g; struct gk20a *g;
@@ -59,6 +58,9 @@ struct tsg_gk20a {
int gk20a_enable_tsg(struct tsg_gk20a *tsg); int gk20a_enable_tsg(struct tsg_gk20a *tsg);
int gk20a_disable_tsg(struct tsg_gk20a *tsg); int gk20a_disable_tsg(struct tsg_gk20a *tsg);
int gk20a_tsg_bind_channel(struct tsg_gk20a *tsg,
struct channel_gk20a *ch);
int gk20a_tsg_unbind_channel(struct channel_gk20a *ch);
void gk20a_tsg_event_id_post_event(struct tsg_gk20a *tsg, void gk20a_tsg_event_id_post_event(struct tsg_gk20a *tsg,
int event_id); int event_id);

View File

@@ -142,6 +142,7 @@ void gm20b_init_fifo(struct gpu_ops *gops)
gops->fifo.channel_set_timeslice = gk20a_channel_set_timeslice; gops->fifo.channel_set_timeslice = gk20a_channel_set_timeslice;
gops->fifo.preempt_channel = gk20a_fifo_preempt_channel; gops->fifo.preempt_channel = gk20a_fifo_preempt_channel;
gops->fifo.preempt_tsg = gk20a_fifo_preempt_tsg;
gops->fifo.update_runlist = gk20a_fifo_update_runlist; gops->fifo.update_runlist = gk20a_fifo_update_runlist;
gops->fifo.trigger_mmu_fault = gm20b_fifo_trigger_mmu_fault; gops->fifo.trigger_mmu_fault = gm20b_fifo_trigger_mmu_fault;
gops->fifo.wait_engine_idle = gk20a_fifo_wait_engine_idle; gops->fifo.wait_engine_idle = gk20a_fifo_wait_engine_idle;

View File

@@ -211,6 +211,7 @@ int gm20b_init_hal(struct gk20a *g)
gm20b_init_debug_ops(gops); gm20b_init_debug_ops(gops);
gm20b_init_cde_ops(gops); gm20b_init_cde_ops(gops);
gm20b_init_therm_ops(gops); gm20b_init_therm_ops(gops);
gk20a_init_tsg_ops(gops);
gops->name = "gm20b"; gops->name = "gm20b";
gops->chip_init_gpu_characteristics = gk20a_init_gpu_characteristics; gops->chip_init_gpu_characteristics = gk20a_init_gpu_characteristics;
gops->get_litter_value = gm20b_get_litter_value; gops->get_litter_value = gm20b_get_litter_value;

View File

@@ -410,6 +410,30 @@ static int vgpu_fifo_preempt_channel(struct gk20a *g, u32 hw_chid)
return err; return err;
} }
static int vgpu_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
{
struct gk20a_platform *platform = gk20a_get_platform(g->dev);
struct tegra_vgpu_cmd_msg msg;
struct tegra_vgpu_tsg_preempt_params *p =
&msg.params.tsg_preempt;
int err;
gk20a_dbg_fn("");
msg.cmd = TEGRA_VGPU_CMD_TSG_PREEMPT;
msg.handle = platform->virt_handle;
p->tsg_id = tsgid;
err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
err = err ? err : msg.ret;
if (err) {
gk20a_err(dev_from_gk20a(g),
"preempt tsg %u failed\n", tsgid);
}
return err;
}
static int vgpu_submit_runlist(u64 handle, u8 runlist_id, u16 *runlist, static int vgpu_submit_runlist(u64 handle, u8 runlist_id, u16 *runlist,
u32 num_entries) u32 num_entries)
{ {
@@ -680,6 +704,7 @@ void vgpu_init_fifo_ops(struct gpu_ops *gops)
gops->fifo.free_inst = vgpu_channel_free_inst; gops->fifo.free_inst = vgpu_channel_free_inst;
gops->fifo.setup_ramfc = vgpu_channel_setup_ramfc; gops->fifo.setup_ramfc = vgpu_channel_setup_ramfc;
gops->fifo.preempt_channel = vgpu_fifo_preempt_channel; gops->fifo.preempt_channel = vgpu_fifo_preempt_channel;
gops->fifo.preempt_tsg = vgpu_fifo_preempt_tsg;
gops->fifo.update_runlist = vgpu_fifo_update_runlist; gops->fifo.update_runlist = vgpu_fifo_update_runlist;
gops->fifo.wait_engine_idle = vgpu_fifo_wait_engine_idle; gops->fifo.wait_engine_idle = vgpu_fifo_wait_engine_idle;
gops->fifo.channel_set_priority = vgpu_channel_set_priority; gops->fifo.channel_set_priority = vgpu_channel_set_priority;

View File

@@ -445,6 +445,26 @@ static int vgpu_gr_ch_bind_gr_ctx(struct channel_gk20a *c)
return err; return err;
} }
static int vgpu_gr_tsg_bind_gr_ctx(struct tsg_gk20a *tsg)
{
struct gk20a_platform *platform = gk20a_get_platform(tsg->g->dev);
struct gr_ctx_desc *gr_ctx = tsg->tsg_gr_ctx;
struct tegra_vgpu_cmd_msg msg = {0};
struct tegra_vgpu_tsg_bind_gr_ctx_params *p =
&msg.params.tsg_bind_gr_ctx;
int err;
msg.cmd = TEGRA_VGPU_CMD_TSG_BIND_GR_CTX;
msg.handle = platform->virt_handle;
p->tsg_id = tsg->tsgid;
p->gr_ctx_handle = gr_ctx->virt_ctx;
err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
err = err ? err : msg.ret;
WARN_ON(err);
return err;
}
static int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c, static int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c,
struct nvgpu_alloc_obj_ctx_args *args) struct nvgpu_alloc_obj_ctx_args *args)
{ {
@@ -472,32 +492,58 @@ static int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c,
} }
c->obj_class = args->class_num; c->obj_class = args->class_num;
/* FIXME: add TSG support */
if (gk20a_is_channel_marked_as_tsg(c)) if (gk20a_is_channel_marked_as_tsg(c))
tsg = &f->tsg[c->tsgid]; tsg = &f->tsg[c->tsgid];
/* allocate gr ctx buffer */ if (!tsg) {
if (!ch_ctx->gr_ctx) { /* allocate gr ctx buffer */
err = g->ops.gr.alloc_gr_ctx(g, &c->ch_ctx.gr_ctx, if (!ch_ctx->gr_ctx) {
c->vm, err = g->ops.gr.alloc_gr_ctx(g, &c->ch_ctx.gr_ctx,
args->class_num, c->vm,
args->flags); args->class_num,
if (!err) args->flags);
err = vgpu_gr_ch_bind_gr_ctx(c); if (!err)
err = vgpu_gr_ch_bind_gr_ctx(c);
if (err) { if (err) {
gk20a_err(dev_from_gk20a(g),
"fail to allocate gr ctx buffer");
goto out;
}
} else {
/*TBD: needs to be more subtle about which is
* being allocated as some are allowed to be
* allocated along same channel */
gk20a_err(dev_from_gk20a(g), gk20a_err(dev_from_gk20a(g),
"fail to allocate gr ctx buffer"); "too many classes alloc'd on same channel");
err = -EINVAL;
goto out; goto out;
} }
} else { } else {
/*TBD: needs to be more subtle about which is if (!tsg->tsg_gr_ctx) {
* being allocated as some are allowed to be tsg->vm = c->vm;
* allocated along same channel */ gk20a_vm_get(tsg->vm);
gk20a_err(dev_from_gk20a(g), err = g->ops.gr.alloc_gr_ctx(g, &tsg->tsg_gr_ctx,
"too many classes alloc'd on same channel"); c->vm,
err = -EINVAL; args->class_num,
goto out; args->flags);
if (!err)
err = vgpu_gr_tsg_bind_gr_ctx(tsg);
if (err) {
gk20a_err(dev_from_gk20a(g),
"fail to allocate TSG gr ctx buffer, err=%d", err);
gk20a_vm_put(tsg->vm);
tsg->vm = NULL;
goto out;
}
}
ch_ctx->gr_ctx = tsg->tsg_gr_ctx;
err = vgpu_gr_ch_bind_gr_ctx(c);
if (err) {
gk20a_err(dev_from_gk20a(g),
"fail to bind gr ctx buffer");
goto out;
}
} }
/* commit gr ctx buffer */ /* commit gr ctx buffer */

View File

@@ -0,0 +1,85 @@
/*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
#include <linux/tegra_vgpu.h>
#include "gk20a/gk20a.h"
#include "gk20a/channel_gk20a.h"
#include "gk20a/platform_gk20a.h"
#include "gk20a/tsg_gk20a.h"
#include "vgpu.h"
static int vgpu_tsg_bind_channel(struct tsg_gk20a *tsg,
struct channel_gk20a *ch)
{
struct gk20a_platform *platform = gk20a_get_platform(tsg->g->dev);
struct tegra_vgpu_cmd_msg msg = {};
struct tegra_vgpu_tsg_bind_unbind_channel_params *p =
&msg.params.tsg_bind_unbind_channel;
int err;
gk20a_dbg_fn("");
err = gk20a_tsg_bind_channel(tsg, ch);
if (err)
return err;
msg.cmd = TEGRA_VGPU_CMD_TSG_BIND_CHANNEL;
msg.handle = platform->virt_handle;
p->tsg_id = tsg->tsgid;
p->ch_handle = ch->virt_ctx;
err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
err = err ? err : msg.ret;
if (err) {
gk20a_err(dev_from_gk20a(tsg->g),
"vgpu_tsg_bind_channel failed, ch %d tsgid %d",
ch->hw_chid, tsg->tsgid);
gk20a_tsg_unbind_channel(ch);
}
return err;
}
static int vgpu_tsg_unbind_channel(struct channel_gk20a *ch)
{
struct gk20a_platform *platform = gk20a_get_platform(ch->g->dev);
struct tegra_vgpu_cmd_msg msg = {};
struct tegra_vgpu_tsg_bind_unbind_channel_params *p =
&msg.params.tsg_bind_unbind_channel;
int err;
gk20a_dbg_fn("");
err = gk20a_tsg_unbind_channel(ch);
if (err)
return err;
msg.cmd = TEGRA_VGPU_CMD_TSG_UNBIND_CHANNEL;
msg.handle = platform->virt_handle;
p->ch_handle = ch->virt_ctx;
err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
err = err ? err : msg.ret;
WARN_ON(err);
return err;
}
void vgpu_init_tsg_ops(struct gpu_ops *gops)
{
gops->fifo.tsg_bind_channel = vgpu_tsg_bind_channel;
gops->fifo.tsg_unbind_channel = vgpu_tsg_unbind_channel;
}

View File

@@ -268,6 +268,7 @@ void vgpu_init_hal_common(struct gk20a *g)
vgpu_init_mm_ops(gops); vgpu_init_mm_ops(gops);
vgpu_init_debug_ops(gops); vgpu_init_debug_ops(gops);
vgpu_init_fecs_trace_ops(gops); vgpu_init_fecs_trace_ops(gops);
vgpu_init_tsg_ops(gops);
gops->chip_init_gpu_characteristics = gk20a_init_gpu_characteristics; gops->chip_init_gpu_characteristics = gk20a_init_gpu_characteristics;
} }
@@ -340,8 +341,6 @@ int vgpu_pm_finalize_poweron(struct device *dev)
goto done; goto done;
} }
g->gpu_characteristics.flags &= ~NVGPU_GPU_FLAGS_SUPPORT_TSG;
gk20a_ctxsw_trace_init(g); gk20a_ctxsw_trace_init(g);
gk20a_channel_resume(g); gk20a_channel_resume(g);

View File

@@ -1,7 +1,7 @@
/* /*
* Virtualized GPU Interfaces * Virtualized GPU Interfaces
* *
* Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License, * under the terms and conditions of the GNU General Public License,
@@ -47,6 +47,7 @@ void vgpu_init_gr_ops(struct gpu_ops *gops);
void vgpu_init_ltc_ops(struct gpu_ops *gops); void vgpu_init_ltc_ops(struct gpu_ops *gops);
void vgpu_init_mm_ops(struct gpu_ops *gops); void vgpu_init_mm_ops(struct gpu_ops *gops);
void vgpu_init_debug_ops(struct gpu_ops *gops); void vgpu_init_debug_ops(struct gpu_ops *gops);
void vgpu_init_tsg_ops(struct gpu_ops *gops);
int vgpu_init_mm_support(struct gk20a *g); int vgpu_init_mm_support(struct gk20a *g);
int vgpu_init_gr_support(struct gk20a *g); int vgpu_init_gr_support(struct gk20a *g);
int vgpu_init_fifo_support(struct gk20a *g); int vgpu_init_fifo_support(struct gk20a *g);

View File

@@ -88,6 +88,9 @@ enum {
TEGRA_VGPU_CMD_GR_CTX_FREE, TEGRA_VGPU_CMD_GR_CTX_FREE,
TEGRA_VGPU_CMD_CHANNEL_BIND_GR_CTX, TEGRA_VGPU_CMD_CHANNEL_BIND_GR_CTX,
TEGRA_VGPU_CMD_TSG_BIND_GR_CTX, TEGRA_VGPU_CMD_TSG_BIND_GR_CTX,
TEGRA_VGPU_CMD_TSG_BIND_CHANNEL,
TEGRA_VGPU_CMD_TSG_UNBIND_CHANNEL,
TEGRA_VGPU_CMD_TSG_PREEMPT,
}; };
struct tegra_vgpu_connect_params { struct tegra_vgpu_connect_params {
@@ -361,6 +364,15 @@ struct tegra_vgpu_tsg_bind_gr_ctx_params {
u64 gr_ctx_handle; u64 gr_ctx_handle;
}; };
struct tegra_vgpu_tsg_bind_unbind_channel_params {
u32 tsg_id;
u64 ch_handle;
};
struct tegra_vgpu_tsg_preempt_params {
u32 tsg_id;
};
struct tegra_vgpu_cmd_msg { struct tegra_vgpu_cmd_msg {
u32 cmd; u32 cmd;
int ret; int ret;
@@ -397,6 +409,8 @@ struct tegra_vgpu_cmd_msg {
struct tegra_vgpu_gr_ctx_params gr_ctx; struct tegra_vgpu_gr_ctx_params gr_ctx;
struct tegra_vgpu_channel_bind_gr_ctx_params ch_bind_gr_ctx; struct tegra_vgpu_channel_bind_gr_ctx_params ch_bind_gr_ctx;
struct tegra_vgpu_tsg_bind_gr_ctx_params tsg_bind_gr_ctx; struct tegra_vgpu_tsg_bind_gr_ctx_params tsg_bind_gr_ctx;
struct tegra_vgpu_tsg_bind_unbind_channel_params tsg_bind_unbind_channel;
struct tegra_vgpu_tsg_preempt_params tsg_preempt;
char padding[192]; char padding[192];
} params; } params;
}; };