mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 17:36:20 +03:00
All channels in a TSG need to share same engine context i.e. pointer in RAMFC of all channels in a TSG must point to same NV_RAMIN_GR_WFI_TARGET To get this, add a pointer to gr_ctx inside TSG struct so that TSG can maintain its own unique gr_ctx Also, change the type of gr_ctx in a channel to pointer variable so that if channel is part of TSG it can point to TSG's gr_ctx otherwise it will point to its own gr_ctx In gk20a_alloc_obj_ctx(), allocate gr_ctx as below : 1) If channel is not part of any TSG - allocate its own gr_ctx buffer if it is already not allocated 2) If channel is part of TSG - Check if TSG has already allocated gr_ctx (as part of TSG) - If yes, channel's gr_ctx will point to that of TSG's - If not, then it means channels is first to be bounded to this TSG - And in this case we will allocate new gr_ctx on TSG first and then make channel's gr_ctx to point to this gr_ctx Also, gr_ctx will be released as below ; 1) If channels is not part of TSG, then it will be released when channels is closed 2) Otherwise, it will be released when TSG itself is closed Bug 1470692 Change-Id: Id347217d5b462e0e972cd3d79d17795b37034a50 Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: http://git-master/r/417065 Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Tested-by: Terje Bergstrom <tbergstrom@nvidia.com>
272 lines
5.8 KiB
C
272 lines
5.8 KiB
C
/*
|
|
* Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify it
|
|
* under the terms and conditions of the GNU General Public License,
|
|
* version 2, as published by the Free Software Foundation.
|
|
*
|
|
* This program is distributed in the hope it will be useful, but WITHOUT
|
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
* more details.
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
*/
|
|
|
|
#include <linux/fs.h>
|
|
#include <linux/file.h>
|
|
#include <linux/cdev.h>
|
|
#include <linux/uaccess.h>
|
|
#include <linux/nvhost.h>
|
|
#include <linux/nvhost_gpu_ioctl.h>
|
|
#include <linux/anon_inodes.h>
|
|
|
|
#include "gk20a.h"
|
|
|
|
bool gk20a_is_channel_marked_as_tsg(struct channel_gk20a *ch)
|
|
{
|
|
return !(ch->tsgid == NVGPU_INVALID_TSG_ID);
|
|
}
|
|
|
|
/*
|
|
* API to add channel to runnable list of TSG.
|
|
*
|
|
* After this call, a channel will be scheduled as TSG channel
|
|
* in runlist
|
|
*/
|
|
int gk20a_bind_runnable_channel_to_tsg(struct channel_gk20a *ch, int tsgid)
|
|
{
|
|
struct gk20a *g = ch->g;
|
|
struct tsg_gk20a *tsg = NULL;
|
|
|
|
if (ch->tsgid != tsgid)
|
|
return -EINVAL;
|
|
|
|
tsg = &g->fifo.tsg[tsgid];
|
|
|
|
mutex_lock(&tsg->ch_list_lock);
|
|
list_add_tail(&ch->ch_entry, &tsg->ch_runnable_list);
|
|
tsg->num_runnable_channels += 1;
|
|
mutex_unlock(&tsg->ch_list_lock);
|
|
|
|
return tsg->num_runnable_channels;
|
|
}
|
|
|
|
int gk20a_unbind_channel_from_tsg(struct channel_gk20a *ch, int tsgid)
|
|
{
|
|
struct gk20a *g = ch->g;
|
|
struct tsg_gk20a *tsg = NULL;
|
|
|
|
if (ch->tsgid != tsgid)
|
|
return -EINVAL;
|
|
|
|
tsg = &g->fifo.tsg[tsgid];
|
|
|
|
mutex_lock(&tsg->ch_list_lock);
|
|
list_del_init(&ch->ch_entry);
|
|
tsg->num_runnable_channels -= 1;
|
|
mutex_unlock(&tsg->ch_list_lock);
|
|
|
|
return tsg->num_runnable_channels;
|
|
}
|
|
|
|
/*
|
|
* API to mark channel as part of TSG
|
|
*
|
|
* Note that channel is not runnable when we bind it to TSG
|
|
*/
|
|
static int nvgpu_tsg_bind_channel(struct tsg_gk20a *tsg, int ch_fd)
|
|
{
|
|
struct file *f = fget(ch_fd);
|
|
struct channel_gk20a *ch = f->private_data;
|
|
|
|
/* check if channel is already bound to some TSG */
|
|
if (gk20a_is_channel_marked_as_tsg(ch))
|
|
return -EINVAL;
|
|
|
|
ch->tsgid = tsg->tsgid;
|
|
|
|
gk20a_dbg(gpu_dbg_fn, "BIND tsg:%d channel:%d\n",
|
|
tsg->tsgid, ch->hw_chid);
|
|
|
|
fput(f);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int nvgpu_tsg_unbind_channel(struct tsg_gk20a *tsg, int ch_fd)
|
|
{
|
|
/* We do not support explicitly unbinding channel from TSG.
|
|
* Channel will be unbounded from TSG when it is closed.
|
|
*/
|
|
return 0;
|
|
}
|
|
|
|
int gk20a_init_tsg_support(struct gk20a *g, u32 tsgid)
|
|
{
|
|
struct tsg_gk20a *tsg = NULL;
|
|
|
|
if (tsgid < 0 || tsgid >= g->fifo.num_channels)
|
|
return -EINVAL;
|
|
|
|
tsg = &g->fifo.tsg[tsgid];
|
|
|
|
tsg->in_use = false;
|
|
tsg->tsgid = tsgid;
|
|
|
|
INIT_LIST_HEAD(&tsg->ch_runnable_list);
|
|
mutex_init(&tsg->ch_list_lock);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void release_used_tsg(struct fifo_gk20a *f, struct tsg_gk20a *tsg)
|
|
{
|
|
mutex_lock(&f->tsg_inuse_mutex);
|
|
f->tsg[tsg->tsgid].in_use = false;
|
|
mutex_unlock(&f->tsg_inuse_mutex);
|
|
}
|
|
|
|
static struct tsg_gk20a *acquire_unused_tsg(struct fifo_gk20a *f)
|
|
{
|
|
struct tsg_gk20a *tsg = NULL;
|
|
int tsgid;
|
|
|
|
mutex_lock(&f->tsg_inuse_mutex);
|
|
for (tsgid = 0; tsgid < f->num_channels; tsgid++) {
|
|
if (!f->tsg[tsgid].in_use) {
|
|
f->tsg[tsgid].in_use = true;
|
|
tsg = &f->tsg[tsgid];
|
|
break;
|
|
}
|
|
}
|
|
mutex_unlock(&f->tsg_inuse_mutex);
|
|
|
|
return tsg;
|
|
}
|
|
|
|
int gk20a_tsg_dev_open(struct inode *inode, struct file *filp)
|
|
{
|
|
struct tsg_gk20a *tsg;
|
|
struct gk20a *g;
|
|
struct device *dev;
|
|
|
|
g = container_of(inode->i_cdev,
|
|
struct gk20a, tsg.cdev);
|
|
dev = dev_from_gk20a(g);
|
|
|
|
gk20a_dbg(gpu_dbg_fn, "tsg: %s", dev_name(dev));
|
|
|
|
tsg = acquire_unused_tsg(&g->fifo);
|
|
if (!tsg)
|
|
return -ENOMEM;
|
|
|
|
tsg->g = g;
|
|
tsg->num_runnable_channels = 0;
|
|
|
|
tsg->tsg_gr_ctx = NULL;
|
|
tsg->vm = NULL;
|
|
|
|
filp->private_data = tsg;
|
|
|
|
gk20a_dbg(gpu_dbg_fn, "tsg opened %d\n", tsg->tsgid);
|
|
|
|
return 0;
|
|
}
|
|
|
|
int gk20a_tsg_dev_release(struct inode *inode, struct file *filp)
|
|
{
|
|
struct tsg_gk20a *tsg = filp->private_data;
|
|
struct gk20a *g = container_of(inode->i_cdev,
|
|
struct gk20a, tsg.cdev);
|
|
|
|
if (tsg->num_runnable_channels) {
|
|
gk20a_err(dev_from_gk20a(g),
|
|
"Trying to free TSG %d with active channels %d\n",
|
|
tsg->tsgid, tsg->num_runnable_channels);
|
|
return -EBUSY;
|
|
}
|
|
|
|
if (tsg->tsg_gr_ctx) {
|
|
gr_gk20a_free_tsg_gr_ctx(tsg);
|
|
tsg->tsg_gr_ctx = NULL;
|
|
}
|
|
if (tsg->vm)
|
|
tsg->vm = NULL;
|
|
|
|
release_used_tsg(&g->fifo, tsg);
|
|
|
|
gk20a_dbg(gpu_dbg_fn, "tsg released %d\n", tsg->tsgid);
|
|
|
|
return 0;
|
|
}
|
|
|
|
long gk20a_tsg_dev_ioctl(struct file *filp, unsigned int cmd,
|
|
unsigned long arg)
|
|
{
|
|
struct tsg_gk20a *tsg = filp->private_data;
|
|
struct gk20a *g = tsg->g;
|
|
u8 __maybe_unused buf[NVGPU_TSG_IOCTL_MAX_ARG_SIZE];
|
|
int err = 0;
|
|
|
|
gk20a_dbg(gpu_dbg_fn, "");
|
|
|
|
if ((_IOC_TYPE(cmd) != NVGPU_TSG_IOCTL_MAGIC) ||
|
|
(_IOC_NR(cmd) == 0) ||
|
|
(_IOC_NR(cmd) > NVGPU_TSG_IOCTL_LAST))
|
|
return -EFAULT;
|
|
|
|
BUG_ON(_IOC_SIZE(cmd) > NVGPU_TSG_IOCTL_MAX_ARG_SIZE);
|
|
|
|
if (_IOC_DIR(cmd) & _IOC_WRITE) {
|
|
if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
|
|
return -EFAULT;
|
|
}
|
|
|
|
if (!g->gr.sw_ready) {
|
|
err = gk20a_busy(g->dev);
|
|
if (err)
|
|
return err;
|
|
|
|
gk20a_idle(g->dev);
|
|
}
|
|
|
|
switch (cmd) {
|
|
case NVGPU_TSG_IOCTL_BIND_CHANNEL:
|
|
{
|
|
int ch_fd = *(int *)buf;
|
|
if (ch_fd < 0) {
|
|
err = -EINVAL;
|
|
break;
|
|
}
|
|
err = nvgpu_tsg_bind_channel(tsg, ch_fd);
|
|
break;
|
|
}
|
|
|
|
case NVGPU_TSG_IOCTL_UNBIND_CHANNEL:
|
|
{
|
|
int ch_fd = *(int *)buf;
|
|
if (ch_fd < 0) {
|
|
err = -EINVAL;
|
|
break;
|
|
}
|
|
err = nvgpu_tsg_unbind_channel(tsg, ch_fd);
|
|
break;
|
|
}
|
|
|
|
default:
|
|
gk20a_err(dev_from_gk20a(g),
|
|
"unrecognized tsg gpu ioctl cmd: 0x%x",
|
|
cmd);
|
|
err = -ENOTTY;
|
|
break;
|
|
}
|
|
|
|
if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
|
|
err = copy_to_user((void __user *)arg,
|
|
buf, _IOC_SIZE(cmd));
|
|
|
|
return err;
|
|
}
|