gpu: nvgpu: Remove support for channel events

Remove support for events for bare channels. All users have already
moved to TSGs and TSG events.

Bug 1842197

Change-Id: Ib3ff68134ad9515ee761d0f0e19a3150a0b744ab
Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1618906
Reviewed-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Terje Bergstrom
2017-12-15 10:25:22 -08:00
committed by mobile promotions
parent aa52601f62
commit f19f22fcc8
10 changed files with 122 additions and 366 deletions

View File

@@ -709,262 +709,6 @@ notif_clean_up:
return ret;
}
static unsigned int gk20a_event_id_poll(struct file *filep, poll_table *wait)
{
unsigned int mask = 0;
struct gk20a_event_id_data *event_id_data = filep->private_data;
struct gk20a *g = event_id_data->g;
u32 event_id = event_id_data->event_id;
gk20a_dbg(gpu_dbg_fn | gpu_dbg_info, "");
poll_wait(filep, &event_id_data->event_id_wq.wq, wait);
nvgpu_mutex_acquire(&event_id_data->lock);
if (event_id_data->is_tsg) {
struct tsg_gk20a *tsg = g->fifo.tsg + event_id_data->id;
if (event_id_data->event_posted) {
gk20a_dbg_info(
"found pending event_id=%d on TSG=%d\n",
event_id, tsg->tsgid);
mask = (POLLPRI | POLLIN);
event_id_data->event_posted = false;
}
} else {
struct channel_gk20a *ch = g->fifo.channel
+ event_id_data->id;
if (event_id_data->event_posted) {
gk20a_dbg_info(
"found pending event_id=%d on chid=%d\n",
event_id, ch->chid);
mask = (POLLPRI | POLLIN);
event_id_data->event_posted = false;
}
}
nvgpu_mutex_release(&event_id_data->lock);
return mask;
}
static int gk20a_event_id_release(struct inode *inode, struct file *filp)
{
struct gk20a_event_id_data *event_id_data = filp->private_data;
struct gk20a *g = event_id_data->g;
if (event_id_data->is_tsg) {
struct tsg_gk20a *tsg = g->fifo.tsg + event_id_data->id;
nvgpu_mutex_acquire(&tsg->event_id_list_lock);
nvgpu_list_del(&event_id_data->event_id_node);
nvgpu_mutex_release(&tsg->event_id_list_lock);
} else {
struct channel_gk20a *ch = g->fifo.channel + event_id_data->id;
nvgpu_mutex_acquire(&ch->event_id_list_lock);
nvgpu_list_del(&event_id_data->event_id_node);
nvgpu_mutex_release(&ch->event_id_list_lock);
}
nvgpu_mutex_destroy(&event_id_data->lock);
gk20a_put(g);
nvgpu_kfree(g, event_id_data);
filp->private_data = NULL;
return 0;
}
const struct file_operations gk20a_event_id_ops = {
.owner = THIS_MODULE,
.poll = gk20a_event_id_poll,
.release = gk20a_event_id_release,
};
static int gk20a_channel_get_event_data_from_id(struct channel_gk20a *ch,
u32 event_id,
struct gk20a_event_id_data **event_id_data)
{
struct gk20a_event_id_data *local_event_id_data;
bool event_found = false;
nvgpu_mutex_acquire(&ch->event_id_list_lock);
nvgpu_list_for_each_entry(local_event_id_data, &ch->event_id_list,
gk20a_event_id_data, event_id_node) {
if (local_event_id_data->event_id == event_id) {
event_found = true;
break;
}
}
nvgpu_mutex_release(&ch->event_id_list_lock);
if (event_found) {
*event_id_data = local_event_id_data;
return 0;
} else {
return -1;
}
}
/*
* Convert common event_id of the form NVGPU_EVENT_ID_* to Linux specific
* event_id of the form NVGPU_IOCTL_CHANNEL_EVENT_ID_* which is used in IOCTLs
*/
u32 nvgpu_event_id_to_ioctl_channel_event_id(u32 event_id)
{
switch (event_id) {
case NVGPU_EVENT_ID_BPT_INT:
return NVGPU_IOCTL_CHANNEL_EVENT_ID_BPT_INT;
case NVGPU_EVENT_ID_BPT_PAUSE:
return NVGPU_IOCTL_CHANNEL_EVENT_ID_BPT_PAUSE;
case NVGPU_EVENT_ID_BLOCKING_SYNC:
return NVGPU_IOCTL_CHANNEL_EVENT_ID_BLOCKING_SYNC;
case NVGPU_EVENT_ID_CILP_PREEMPTION_STARTED:
return NVGPU_IOCTL_CHANNEL_EVENT_ID_CILP_PREEMPTION_STARTED;
case NVGPU_EVENT_ID_CILP_PREEMPTION_COMPLETE:
return NVGPU_IOCTL_CHANNEL_EVENT_ID_CILP_PREEMPTION_COMPLETE;
case NVGPU_EVENT_ID_GR_SEMAPHORE_WRITE_AWAKEN:
return NVGPU_IOCTL_CHANNEL_EVENT_ID_GR_SEMAPHORE_WRITE_AWAKEN;
}
return NVGPU_IOCTL_CHANNEL_EVENT_ID_MAX;
}
void gk20a_channel_event_id_post_event(struct channel_gk20a *ch,
u32 __event_id)
{
struct gk20a_event_id_data *event_id_data;
u32 event_id;
int err = 0;
event_id = nvgpu_event_id_to_ioctl_channel_event_id(__event_id);
if (event_id >= NVGPU_IOCTL_CHANNEL_EVENT_ID_MAX)
return;
err = gk20a_channel_get_event_data_from_id(ch, event_id,
&event_id_data);
if (err)
return;
nvgpu_mutex_acquire(&event_id_data->lock);
gk20a_dbg_info(
"posting event for event_id=%d on ch=%d\n",
event_id, ch->chid);
event_id_data->event_posted = true;
nvgpu_cond_broadcast_interruptible(&event_id_data->event_id_wq);
nvgpu_mutex_release(&event_id_data->lock);
}
static int gk20a_channel_event_id_enable(struct channel_gk20a *ch,
int event_id,
int *fd)
{
struct gk20a *g;
int err = 0;
int local_fd;
struct file *file;
char name[64];
struct gk20a_event_id_data *event_id_data;
g = gk20a_get(ch->g);
if (!g)
return -ENODEV;
err = gk20a_channel_get_event_data_from_id(ch,
event_id, &event_id_data);
if (err == 0) {
/* We already have event enabled */
err = -EINVAL;
goto free_ref;
}
err = get_unused_fd_flags(O_RDWR);
if (err < 0)
goto free_ref;
local_fd = err;
snprintf(name, sizeof(name), "nvgpu-event%d-fd%d",
event_id, local_fd);
file = anon_inode_getfile(name, &gk20a_event_id_ops,
NULL, O_RDWR);
if (IS_ERR(file)) {
err = PTR_ERR(file);
goto clean_up;
}
event_id_data = nvgpu_kzalloc(ch->g, sizeof(*event_id_data));
if (!event_id_data) {
err = -ENOMEM;
goto clean_up_file;
}
event_id_data->g = g;
event_id_data->id = ch->chid;
event_id_data->is_tsg = false;
event_id_data->event_id = event_id;
nvgpu_cond_init(&event_id_data->event_id_wq);
err = nvgpu_mutex_init(&event_id_data->lock);
if (err)
goto clean_up_free;
nvgpu_init_list_node(&event_id_data->event_id_node);
nvgpu_mutex_acquire(&ch->event_id_list_lock);
nvgpu_list_add_tail(&event_id_data->event_id_node, &ch->event_id_list);
nvgpu_mutex_release(&ch->event_id_list_lock);
fd_install(local_fd, file);
file->private_data = event_id_data;
*fd = local_fd;
return 0;
clean_up_free:
nvgpu_kfree(g, event_id_data);
clean_up_file:
fput(file);
clean_up:
put_unused_fd(local_fd);
free_ref:
gk20a_put(g);
return err;
}
static int gk20a_channel_event_id_ctrl(struct channel_gk20a *ch,
struct nvgpu_event_id_ctrl_args *args)
{
int err = 0;
int fd = -1;
if (args->event_id >= NVGPU_IOCTL_CHANNEL_EVENT_ID_MAX)
return -EINVAL;
if (gk20a_is_channel_marked_as_tsg(ch))
return -EINVAL;
switch (args->cmd) {
case NVGPU_IOCTL_CHANNEL_EVENT_ID_CMD_ENABLE:
err = gk20a_channel_event_id_enable(ch, args->event_id, &fd);
if (!err)
args->event_fd = fd;
break;
default:
nvgpu_err(ch->g,
"unrecognized channel event id cmd: 0x%x",
args->cmd);
err = -EINVAL;
break;
}
return err;
}
static int gk20a_channel_zcull_bind(struct channel_gk20a *ch,
struct nvgpu_zcull_bind_args *args)
{
@@ -1467,10 +1211,6 @@ long gk20a_channel_ioctl(struct file *filp,
NVGPU_ERR_NOTIFIER_RESETCHANNEL_VERIF_ERROR, true);
gk20a_idle(ch->g);
break;
case NVGPU_IOCTL_CHANNEL_EVENT_ID_CTRL:
err = gk20a_channel_event_id_ctrl(ch,
(struct nvgpu_event_id_ctrl_args *)buf);
break;
#ifdef CONFIG_GK20A_CYCLE_STATS
case NVGPU_IOCTL_CHANNEL_CYCLE_STATS_SNAPSHOT:
err = gk20a_busy(ch->g);

View File

@@ -39,10 +39,8 @@ int gk20a_channel_open_ioctl(struct gk20a *g,
int gk20a_channel_free_cycle_stats_snapshot(struct channel_gk20a *ch);
void gk20a_channel_free_cycle_stats_buffer(struct channel_gk20a *ch);
extern const struct file_operations gk20a_event_id_ops;
extern const struct file_operations gk20a_channel_ops;
u32 nvgpu_event_id_to_ioctl_channel_event_id(u32 event_id);
u32 nvgpu_get_common_runlist_level(u32 level);
u32 nvgpu_get_ioctl_graphics_preempt_mode_flags(u32 graphics_preempt_mode_flags);

View File

@@ -18,6 +18,7 @@
#include <linux/file.h>
#include <linux/cdev.h>
#include <linux/uaccess.h>
#include <linux/poll.h>
#include <uapi/linux/nvgpu.h>
#include <linux/anon_inodes.h>
@@ -79,6 +80,30 @@ static int gk20a_tsg_get_event_data_from_id(struct tsg_gk20a *tsg,
}
}
/*
* Convert common event_id of the form NVGPU_EVENT_ID_* to Linux specific
* event_id of the form NVGPU_IOCTL_CHANNEL_EVENT_ID_* which is used in IOCTLs
*/
static u32 nvgpu_event_id_to_ioctl_channel_event_id(u32 event_id)
{
switch (event_id) {
case NVGPU_EVENT_ID_BPT_INT:
return NVGPU_IOCTL_CHANNEL_EVENT_ID_BPT_INT;
case NVGPU_EVENT_ID_BPT_PAUSE:
return NVGPU_IOCTL_CHANNEL_EVENT_ID_BPT_PAUSE;
case NVGPU_EVENT_ID_BLOCKING_SYNC:
return NVGPU_IOCTL_CHANNEL_EVENT_ID_BLOCKING_SYNC;
case NVGPU_EVENT_ID_CILP_PREEMPTION_STARTED:
return NVGPU_IOCTL_CHANNEL_EVENT_ID_CILP_PREEMPTION_STARTED;
case NVGPU_EVENT_ID_CILP_PREEMPTION_COMPLETE:
return NVGPU_IOCTL_CHANNEL_EVENT_ID_CILP_PREEMPTION_COMPLETE;
case NVGPU_EVENT_ID_GR_SEMAPHORE_WRITE_AWAKEN:
return NVGPU_IOCTL_CHANNEL_EVENT_ID_GR_SEMAPHORE_WRITE_AWAKEN;
}
return NVGPU_IOCTL_CHANNEL_EVENT_ID_MAX;
}
void gk20a_tsg_event_id_post_event(struct tsg_gk20a *tsg,
int __event_id)
{
@@ -107,6 +132,57 @@ void gk20a_tsg_event_id_post_event(struct tsg_gk20a *tsg,
nvgpu_mutex_release(&event_id_data->lock);
}
static unsigned int gk20a_event_id_poll(struct file *filep, poll_table *wait)
{
unsigned int mask = 0;
struct gk20a_event_id_data *event_id_data = filep->private_data;
struct gk20a *g = event_id_data->g;
u32 event_id = event_id_data->event_id;
struct tsg_gk20a *tsg = g->fifo.tsg + event_id_data->id;
gk20a_dbg(gpu_dbg_fn | gpu_dbg_info, "");
poll_wait(filep, &event_id_data->event_id_wq.wq, wait);
nvgpu_mutex_acquire(&event_id_data->lock);
if (event_id_data->event_posted) {
gk20a_dbg_info(
"found pending event_id=%d on TSG=%d\n",
event_id, tsg->tsgid);
mask = (POLLPRI | POLLIN);
event_id_data->event_posted = false;
}
nvgpu_mutex_release(&event_id_data->lock);
return mask;
}
static int gk20a_event_id_release(struct inode *inode, struct file *filp)
{
struct gk20a_event_id_data *event_id_data = filp->private_data;
struct gk20a *g = event_id_data->g;
struct tsg_gk20a *tsg = g->fifo.tsg + event_id_data->id;
nvgpu_mutex_acquire(&tsg->event_id_list_lock);
nvgpu_list_del(&event_id_data->event_id_node);
nvgpu_mutex_release(&tsg->event_id_list_lock);
nvgpu_mutex_destroy(&event_id_data->lock);
gk20a_put(g);
nvgpu_kfree(g, event_id_data);
filp->private_data = NULL;
return 0;
}
const struct file_operations gk20a_event_id_ops = {
.owner = THIS_MODULE,
.poll = gk20a_event_id_poll,
.release = gk20a_event_id_release,
};
static int gk20a_tsg_event_id_enable(struct tsg_gk20a *tsg,
int event_id,
int *fd)
@@ -152,7 +228,6 @@ static int gk20a_tsg_event_id_enable(struct tsg_gk20a *tsg,
}
event_id_data->g = g;
event_id_data->id = tsg->tsgid;
event_id_data->is_tsg = true;
event_id_data->event_id = event_id;
nvgpu_cond_init(&event_id_data->event_id_wq);

View File

@@ -125,27 +125,22 @@ int vgpu_get_attribute(u64 handle, u32 attrib, u32 *value)
static void vgpu_handle_channel_event(struct gk20a *g,
struct tegra_vgpu_channel_event_info *info)
{
struct tsg_gk20a *tsg;
if (!info->is_tsg) {
nvgpu_err(g, "channel event posted");
return;
}
if (info->id >= g->fifo.num_channels ||
info->event_id >= NVGPU_IOCTL_CHANNEL_EVENT_ID_MAX) {
nvgpu_err(g, "invalid channel event");
return;
}
if (info->is_tsg) {
struct tsg_gk20a *tsg = &g->fifo.tsg[info->id];
tsg = &g->fifo.tsg[info->id];
gk20a_tsg_event_id_post_event(tsg, info->event_id);
} else {
struct channel_gk20a *ch = &g->fifo.channel[info->id];
if (!gk20a_channel_get(ch)) {
nvgpu_err(g, "invalid channel %d for event %d",
(int)info->id, (int)info->event_id);
return;
}
gk20a_channel_event_id_post_event(ch, info->event_id);
gk20a_channel_put(ch);
}
gk20a_tsg_event_id_post_event(tsg, info->event_id);
}