mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 10:34:43 +03:00
gpu: nvgpu: Remove support for channel events
Remove support for events for bare channels. All users have already moved to TSGs and TSG events. Bug 1842197 Change-Id: Ib3ff68134ad9515ee761d0f0e19a3150a0b744ab Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1618906 Reviewed-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
aa52601f62
commit
f19f22fcc8
@@ -709,262 +709,6 @@ notif_clean_up:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static unsigned int gk20a_event_id_poll(struct file *filep, poll_table *wait)
|
||||
{
|
||||
unsigned int mask = 0;
|
||||
struct gk20a_event_id_data *event_id_data = filep->private_data;
|
||||
struct gk20a *g = event_id_data->g;
|
||||
u32 event_id = event_id_data->event_id;
|
||||
|
||||
gk20a_dbg(gpu_dbg_fn | gpu_dbg_info, "");
|
||||
|
||||
poll_wait(filep, &event_id_data->event_id_wq.wq, wait);
|
||||
|
||||
nvgpu_mutex_acquire(&event_id_data->lock);
|
||||
|
||||
if (event_id_data->is_tsg) {
|
||||
struct tsg_gk20a *tsg = g->fifo.tsg + event_id_data->id;
|
||||
|
||||
if (event_id_data->event_posted) {
|
||||
gk20a_dbg_info(
|
||||
"found pending event_id=%d on TSG=%d\n",
|
||||
event_id, tsg->tsgid);
|
||||
mask = (POLLPRI | POLLIN);
|
||||
event_id_data->event_posted = false;
|
||||
}
|
||||
} else {
|
||||
struct channel_gk20a *ch = g->fifo.channel
|
||||
+ event_id_data->id;
|
||||
|
||||
if (event_id_data->event_posted) {
|
||||
gk20a_dbg_info(
|
||||
"found pending event_id=%d on chid=%d\n",
|
||||
event_id, ch->chid);
|
||||
mask = (POLLPRI | POLLIN);
|
||||
event_id_data->event_posted = false;
|
||||
}
|
||||
}
|
||||
|
||||
nvgpu_mutex_release(&event_id_data->lock);
|
||||
|
||||
return mask;
|
||||
}
|
||||
|
||||
static int gk20a_event_id_release(struct inode *inode, struct file *filp)
|
||||
{
|
||||
struct gk20a_event_id_data *event_id_data = filp->private_data;
|
||||
struct gk20a *g = event_id_data->g;
|
||||
|
||||
if (event_id_data->is_tsg) {
|
||||
struct tsg_gk20a *tsg = g->fifo.tsg + event_id_data->id;
|
||||
|
||||
nvgpu_mutex_acquire(&tsg->event_id_list_lock);
|
||||
nvgpu_list_del(&event_id_data->event_id_node);
|
||||
nvgpu_mutex_release(&tsg->event_id_list_lock);
|
||||
} else {
|
||||
struct channel_gk20a *ch = g->fifo.channel + event_id_data->id;
|
||||
|
||||
nvgpu_mutex_acquire(&ch->event_id_list_lock);
|
||||
nvgpu_list_del(&event_id_data->event_id_node);
|
||||
nvgpu_mutex_release(&ch->event_id_list_lock);
|
||||
}
|
||||
|
||||
nvgpu_mutex_destroy(&event_id_data->lock);
|
||||
gk20a_put(g);
|
||||
nvgpu_kfree(g, event_id_data);
|
||||
filp->private_data = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct file_operations gk20a_event_id_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
.poll = gk20a_event_id_poll,
|
||||
.release = gk20a_event_id_release,
|
||||
};
|
||||
|
||||
static int gk20a_channel_get_event_data_from_id(struct channel_gk20a *ch,
|
||||
u32 event_id,
|
||||
struct gk20a_event_id_data **event_id_data)
|
||||
{
|
||||
struct gk20a_event_id_data *local_event_id_data;
|
||||
bool event_found = false;
|
||||
|
||||
nvgpu_mutex_acquire(&ch->event_id_list_lock);
|
||||
nvgpu_list_for_each_entry(local_event_id_data, &ch->event_id_list,
|
||||
gk20a_event_id_data, event_id_node) {
|
||||
if (local_event_id_data->event_id == event_id) {
|
||||
event_found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
nvgpu_mutex_release(&ch->event_id_list_lock);
|
||||
|
||||
if (event_found) {
|
||||
*event_id_data = local_event_id_data;
|
||||
return 0;
|
||||
} else {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert common event_id of the form NVGPU_EVENT_ID_* to Linux specific
|
||||
* event_id of the form NVGPU_IOCTL_CHANNEL_EVENT_ID_* which is used in IOCTLs
|
||||
*/
|
||||
u32 nvgpu_event_id_to_ioctl_channel_event_id(u32 event_id)
|
||||
{
|
||||
switch (event_id) {
|
||||
case NVGPU_EVENT_ID_BPT_INT:
|
||||
return NVGPU_IOCTL_CHANNEL_EVENT_ID_BPT_INT;
|
||||
case NVGPU_EVENT_ID_BPT_PAUSE:
|
||||
return NVGPU_IOCTL_CHANNEL_EVENT_ID_BPT_PAUSE;
|
||||
case NVGPU_EVENT_ID_BLOCKING_SYNC:
|
||||
return NVGPU_IOCTL_CHANNEL_EVENT_ID_BLOCKING_SYNC;
|
||||
case NVGPU_EVENT_ID_CILP_PREEMPTION_STARTED:
|
||||
return NVGPU_IOCTL_CHANNEL_EVENT_ID_CILP_PREEMPTION_STARTED;
|
||||
case NVGPU_EVENT_ID_CILP_PREEMPTION_COMPLETE:
|
||||
return NVGPU_IOCTL_CHANNEL_EVENT_ID_CILP_PREEMPTION_COMPLETE;
|
||||
case NVGPU_EVENT_ID_GR_SEMAPHORE_WRITE_AWAKEN:
|
||||
return NVGPU_IOCTL_CHANNEL_EVENT_ID_GR_SEMAPHORE_WRITE_AWAKEN;
|
||||
}
|
||||
|
||||
return NVGPU_IOCTL_CHANNEL_EVENT_ID_MAX;
|
||||
}
|
||||
|
||||
void gk20a_channel_event_id_post_event(struct channel_gk20a *ch,
|
||||
u32 __event_id)
|
||||
{
|
||||
struct gk20a_event_id_data *event_id_data;
|
||||
u32 event_id;
|
||||
int err = 0;
|
||||
|
||||
event_id = nvgpu_event_id_to_ioctl_channel_event_id(__event_id);
|
||||
if (event_id >= NVGPU_IOCTL_CHANNEL_EVENT_ID_MAX)
|
||||
return;
|
||||
|
||||
err = gk20a_channel_get_event_data_from_id(ch, event_id,
|
||||
&event_id_data);
|
||||
if (err)
|
||||
return;
|
||||
|
||||
nvgpu_mutex_acquire(&event_id_data->lock);
|
||||
|
||||
gk20a_dbg_info(
|
||||
"posting event for event_id=%d on ch=%d\n",
|
||||
event_id, ch->chid);
|
||||
event_id_data->event_posted = true;
|
||||
|
||||
nvgpu_cond_broadcast_interruptible(&event_id_data->event_id_wq);
|
||||
|
||||
nvgpu_mutex_release(&event_id_data->lock);
|
||||
}
|
||||
|
||||
static int gk20a_channel_event_id_enable(struct channel_gk20a *ch,
|
||||
int event_id,
|
||||
int *fd)
|
||||
{
|
||||
struct gk20a *g;
|
||||
int err = 0;
|
||||
int local_fd;
|
||||
struct file *file;
|
||||
char name[64];
|
||||
struct gk20a_event_id_data *event_id_data;
|
||||
|
||||
g = gk20a_get(ch->g);
|
||||
if (!g)
|
||||
return -ENODEV;
|
||||
|
||||
err = gk20a_channel_get_event_data_from_id(ch,
|
||||
event_id, &event_id_data);
|
||||
if (err == 0) {
|
||||
/* We already have event enabled */
|
||||
err = -EINVAL;
|
||||
goto free_ref;
|
||||
}
|
||||
|
||||
err = get_unused_fd_flags(O_RDWR);
|
||||
if (err < 0)
|
||||
goto free_ref;
|
||||
local_fd = err;
|
||||
|
||||
snprintf(name, sizeof(name), "nvgpu-event%d-fd%d",
|
||||
event_id, local_fd);
|
||||
file = anon_inode_getfile(name, &gk20a_event_id_ops,
|
||||
NULL, O_RDWR);
|
||||
if (IS_ERR(file)) {
|
||||
err = PTR_ERR(file);
|
||||
goto clean_up;
|
||||
}
|
||||
|
||||
event_id_data = nvgpu_kzalloc(ch->g, sizeof(*event_id_data));
|
||||
if (!event_id_data) {
|
||||
err = -ENOMEM;
|
||||
goto clean_up_file;
|
||||
}
|
||||
event_id_data->g = g;
|
||||
event_id_data->id = ch->chid;
|
||||
event_id_data->is_tsg = false;
|
||||
event_id_data->event_id = event_id;
|
||||
|
||||
nvgpu_cond_init(&event_id_data->event_id_wq);
|
||||
err = nvgpu_mutex_init(&event_id_data->lock);
|
||||
if (err)
|
||||
goto clean_up_free;
|
||||
nvgpu_init_list_node(&event_id_data->event_id_node);
|
||||
|
||||
nvgpu_mutex_acquire(&ch->event_id_list_lock);
|
||||
nvgpu_list_add_tail(&event_id_data->event_id_node, &ch->event_id_list);
|
||||
nvgpu_mutex_release(&ch->event_id_list_lock);
|
||||
|
||||
fd_install(local_fd, file);
|
||||
file->private_data = event_id_data;
|
||||
|
||||
*fd = local_fd;
|
||||
|
||||
return 0;
|
||||
|
||||
clean_up_free:
|
||||
nvgpu_kfree(g, event_id_data);
|
||||
clean_up_file:
|
||||
fput(file);
|
||||
clean_up:
|
||||
put_unused_fd(local_fd);
|
||||
free_ref:
|
||||
gk20a_put(g);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int gk20a_channel_event_id_ctrl(struct channel_gk20a *ch,
|
||||
struct nvgpu_event_id_ctrl_args *args)
|
||||
{
|
||||
int err = 0;
|
||||
int fd = -1;
|
||||
|
||||
if (args->event_id >= NVGPU_IOCTL_CHANNEL_EVENT_ID_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
if (gk20a_is_channel_marked_as_tsg(ch))
|
||||
return -EINVAL;
|
||||
|
||||
switch (args->cmd) {
|
||||
case NVGPU_IOCTL_CHANNEL_EVENT_ID_CMD_ENABLE:
|
||||
err = gk20a_channel_event_id_enable(ch, args->event_id, &fd);
|
||||
if (!err)
|
||||
args->event_fd = fd;
|
||||
break;
|
||||
|
||||
default:
|
||||
nvgpu_err(ch->g,
|
||||
"unrecognized channel event id cmd: 0x%x",
|
||||
args->cmd);
|
||||
err = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int gk20a_channel_zcull_bind(struct channel_gk20a *ch,
|
||||
struct nvgpu_zcull_bind_args *args)
|
||||
{
|
||||
@@ -1467,10 +1211,6 @@ long gk20a_channel_ioctl(struct file *filp,
|
||||
NVGPU_ERR_NOTIFIER_RESETCHANNEL_VERIF_ERROR, true);
|
||||
gk20a_idle(ch->g);
|
||||
break;
|
||||
case NVGPU_IOCTL_CHANNEL_EVENT_ID_CTRL:
|
||||
err = gk20a_channel_event_id_ctrl(ch,
|
||||
(struct nvgpu_event_id_ctrl_args *)buf);
|
||||
break;
|
||||
#ifdef CONFIG_GK20A_CYCLE_STATS
|
||||
case NVGPU_IOCTL_CHANNEL_CYCLE_STATS_SNAPSHOT:
|
||||
err = gk20a_busy(ch->g);
|
||||
|
||||
@@ -39,10 +39,8 @@ int gk20a_channel_open_ioctl(struct gk20a *g,
|
||||
int gk20a_channel_free_cycle_stats_snapshot(struct channel_gk20a *ch);
|
||||
void gk20a_channel_free_cycle_stats_buffer(struct channel_gk20a *ch);
|
||||
|
||||
extern const struct file_operations gk20a_event_id_ops;
|
||||
extern const struct file_operations gk20a_channel_ops;
|
||||
|
||||
u32 nvgpu_event_id_to_ioctl_channel_event_id(u32 event_id);
|
||||
u32 nvgpu_get_common_runlist_level(u32 level);
|
||||
|
||||
u32 nvgpu_get_ioctl_graphics_preempt_mode_flags(u32 graphics_preempt_mode_flags);
|
||||
|
||||
@@ -18,6 +18,7 @@
|
||||
#include <linux/file.h>
|
||||
#include <linux/cdev.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/poll.h>
|
||||
#include <uapi/linux/nvgpu.h>
|
||||
#include <linux/anon_inodes.h>
|
||||
|
||||
@@ -79,6 +80,30 @@ static int gk20a_tsg_get_event_data_from_id(struct tsg_gk20a *tsg,
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert common event_id of the form NVGPU_EVENT_ID_* to Linux specific
|
||||
* event_id of the form NVGPU_IOCTL_CHANNEL_EVENT_ID_* which is used in IOCTLs
|
||||
*/
|
||||
static u32 nvgpu_event_id_to_ioctl_channel_event_id(u32 event_id)
|
||||
{
|
||||
switch (event_id) {
|
||||
case NVGPU_EVENT_ID_BPT_INT:
|
||||
return NVGPU_IOCTL_CHANNEL_EVENT_ID_BPT_INT;
|
||||
case NVGPU_EVENT_ID_BPT_PAUSE:
|
||||
return NVGPU_IOCTL_CHANNEL_EVENT_ID_BPT_PAUSE;
|
||||
case NVGPU_EVENT_ID_BLOCKING_SYNC:
|
||||
return NVGPU_IOCTL_CHANNEL_EVENT_ID_BLOCKING_SYNC;
|
||||
case NVGPU_EVENT_ID_CILP_PREEMPTION_STARTED:
|
||||
return NVGPU_IOCTL_CHANNEL_EVENT_ID_CILP_PREEMPTION_STARTED;
|
||||
case NVGPU_EVENT_ID_CILP_PREEMPTION_COMPLETE:
|
||||
return NVGPU_IOCTL_CHANNEL_EVENT_ID_CILP_PREEMPTION_COMPLETE;
|
||||
case NVGPU_EVENT_ID_GR_SEMAPHORE_WRITE_AWAKEN:
|
||||
return NVGPU_IOCTL_CHANNEL_EVENT_ID_GR_SEMAPHORE_WRITE_AWAKEN;
|
||||
}
|
||||
|
||||
return NVGPU_IOCTL_CHANNEL_EVENT_ID_MAX;
|
||||
}
|
||||
|
||||
void gk20a_tsg_event_id_post_event(struct tsg_gk20a *tsg,
|
||||
int __event_id)
|
||||
{
|
||||
@@ -107,6 +132,57 @@ void gk20a_tsg_event_id_post_event(struct tsg_gk20a *tsg,
|
||||
nvgpu_mutex_release(&event_id_data->lock);
|
||||
}
|
||||
|
||||
static unsigned int gk20a_event_id_poll(struct file *filep, poll_table *wait)
|
||||
{
|
||||
unsigned int mask = 0;
|
||||
struct gk20a_event_id_data *event_id_data = filep->private_data;
|
||||
struct gk20a *g = event_id_data->g;
|
||||
u32 event_id = event_id_data->event_id;
|
||||
struct tsg_gk20a *tsg = g->fifo.tsg + event_id_data->id;
|
||||
|
||||
gk20a_dbg(gpu_dbg_fn | gpu_dbg_info, "");
|
||||
|
||||
poll_wait(filep, &event_id_data->event_id_wq.wq, wait);
|
||||
|
||||
nvgpu_mutex_acquire(&event_id_data->lock);
|
||||
|
||||
if (event_id_data->event_posted) {
|
||||
gk20a_dbg_info(
|
||||
"found pending event_id=%d on TSG=%d\n",
|
||||
event_id, tsg->tsgid);
|
||||
mask = (POLLPRI | POLLIN);
|
||||
event_id_data->event_posted = false;
|
||||
}
|
||||
|
||||
nvgpu_mutex_release(&event_id_data->lock);
|
||||
|
||||
return mask;
|
||||
}
|
||||
|
||||
static int gk20a_event_id_release(struct inode *inode, struct file *filp)
|
||||
{
|
||||
struct gk20a_event_id_data *event_id_data = filp->private_data;
|
||||
struct gk20a *g = event_id_data->g;
|
||||
struct tsg_gk20a *tsg = g->fifo.tsg + event_id_data->id;
|
||||
|
||||
nvgpu_mutex_acquire(&tsg->event_id_list_lock);
|
||||
nvgpu_list_del(&event_id_data->event_id_node);
|
||||
nvgpu_mutex_release(&tsg->event_id_list_lock);
|
||||
|
||||
nvgpu_mutex_destroy(&event_id_data->lock);
|
||||
gk20a_put(g);
|
||||
nvgpu_kfree(g, event_id_data);
|
||||
filp->private_data = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct file_operations gk20a_event_id_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
.poll = gk20a_event_id_poll,
|
||||
.release = gk20a_event_id_release,
|
||||
};
|
||||
|
||||
static int gk20a_tsg_event_id_enable(struct tsg_gk20a *tsg,
|
||||
int event_id,
|
||||
int *fd)
|
||||
@@ -152,7 +228,6 @@ static int gk20a_tsg_event_id_enable(struct tsg_gk20a *tsg,
|
||||
}
|
||||
event_id_data->g = g;
|
||||
event_id_data->id = tsg->tsgid;
|
||||
event_id_data->is_tsg = true;
|
||||
event_id_data->event_id = event_id;
|
||||
|
||||
nvgpu_cond_init(&event_id_data->event_id_wq);
|
||||
|
||||
@@ -125,27 +125,22 @@ int vgpu_get_attribute(u64 handle, u32 attrib, u32 *value)
|
||||
static void vgpu_handle_channel_event(struct gk20a *g,
|
||||
struct tegra_vgpu_channel_event_info *info)
|
||||
{
|
||||
struct tsg_gk20a *tsg;
|
||||
|
||||
if (!info->is_tsg) {
|
||||
nvgpu_err(g, "channel event posted");
|
||||
return;
|
||||
}
|
||||
|
||||
if (info->id >= g->fifo.num_channels ||
|
||||
info->event_id >= NVGPU_IOCTL_CHANNEL_EVENT_ID_MAX) {
|
||||
nvgpu_err(g, "invalid channel event");
|
||||
return;
|
||||
}
|
||||
|
||||
if (info->is_tsg) {
|
||||
struct tsg_gk20a *tsg = &g->fifo.tsg[info->id];
|
||||
tsg = &g->fifo.tsg[info->id];
|
||||
|
||||
gk20a_tsg_event_id_post_event(tsg, info->event_id);
|
||||
} else {
|
||||
struct channel_gk20a *ch = &g->fifo.channel[info->id];
|
||||
|
||||
if (!gk20a_channel_get(ch)) {
|
||||
nvgpu_err(g, "invalid channel %d for event %d",
|
||||
(int)info->id, (int)info->event_id);
|
||||
return;
|
||||
}
|
||||
gk20a_channel_event_id_post_event(ch, info->event_id);
|
||||
gk20a_channel_put(ch);
|
||||
}
|
||||
gk20a_tsg_event_id_post_event(tsg, info->event_id);
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -689,7 +689,6 @@ struct channel_gk20a *gk20a_open_new_channel(struct gk20a *g,
|
||||
{
|
||||
struct fifo_gk20a *f = &g->fifo;
|
||||
struct channel_gk20a *ch;
|
||||
struct gk20a_event_id_data *event_id_data, *event_id_data_temp;
|
||||
|
||||
/* compatibility with existing code */
|
||||
if (!gk20a_fifo_is_valid_runlist_id(g, runlist_id)) {
|
||||
@@ -730,16 +729,6 @@ struct channel_gk20a *gk20a_open_new_channel(struct gk20a *g,
|
||||
ch->pid = current->pid;
|
||||
ch->tgid = current->tgid; /* process granularity for FECS traces */
|
||||
|
||||
/* unhook all events created on this channel */
|
||||
nvgpu_mutex_acquire(&ch->event_id_list_lock);
|
||||
nvgpu_list_for_each_entry_safe(event_id_data, event_id_data_temp,
|
||||
&ch->event_id_list,
|
||||
gk20a_event_id_data,
|
||||
event_id_node) {
|
||||
nvgpu_list_del(&event_id_data->event_id_node);
|
||||
}
|
||||
nvgpu_mutex_release(&ch->event_id_list_lock);
|
||||
|
||||
/* By default, channel is regular (non-TSG) channel */
|
||||
ch->tsgid = NVGPU_INVALID_TSG_ID;
|
||||
|
||||
@@ -2134,7 +2123,6 @@ int gk20a_init_channel_support(struct gk20a *g, u32 chid)
|
||||
|
||||
nvgpu_init_list_node(&c->joblist.dynamic.jobs);
|
||||
nvgpu_init_list_node(&c->dbg_s_list);
|
||||
nvgpu_init_list_node(&c->event_id_list);
|
||||
nvgpu_init_list_node(&c->worker_item);
|
||||
|
||||
err = nvgpu_mutex_init(&c->ioctl_lock);
|
||||
@@ -2157,19 +2145,14 @@ int gk20a_init_channel_support(struct gk20a *g, u32 chid)
|
||||
if (err)
|
||||
goto fail_5;
|
||||
#endif
|
||||
err = nvgpu_mutex_init(&c->event_id_list_lock);
|
||||
if (err)
|
||||
goto fail_6;
|
||||
err = nvgpu_mutex_init(&c->dbg_s_lock);
|
||||
if (err)
|
||||
goto fail_7;
|
||||
goto fail_6;
|
||||
|
||||
nvgpu_list_add(&c->free_chs, &g->fifo.free_chs);
|
||||
|
||||
return 0;
|
||||
|
||||
fail_7:
|
||||
nvgpu_mutex_destroy(&c->event_id_list_lock);
|
||||
fail_6:
|
||||
#if defined(CONFIG_GK20A_CYCLE_STATS)
|
||||
nvgpu_mutex_destroy(&c->cs_client_mutex);
|
||||
@@ -2286,9 +2269,6 @@ void gk20a_channel_semaphore_wakeup(struct gk20a *g, bool post_events)
|
||||
|
||||
gk20a_tsg_event_id_post_event(tsg,
|
||||
NVGPU_EVENT_ID_BLOCKING_SYNC);
|
||||
} else {
|
||||
gk20a_channel_event_id_post_event(c,
|
||||
NVGPU_EVENT_ID_BLOCKING_SYNC);
|
||||
}
|
||||
}
|
||||
/*
|
||||
|
||||
@@ -124,27 +124,6 @@ struct channel_gk20a_timeout {
|
||||
u64 pb_get;
|
||||
};
|
||||
|
||||
struct gk20a_event_id_data {
|
||||
struct gk20a *g;
|
||||
|
||||
int id; /* ch or tsg */
|
||||
bool is_tsg;
|
||||
u32 event_id;
|
||||
|
||||
bool event_posted;
|
||||
|
||||
struct nvgpu_cond event_id_wq;
|
||||
struct nvgpu_mutex lock;
|
||||
struct nvgpu_list_node event_id_node;
|
||||
};
|
||||
|
||||
static inline struct gk20a_event_id_data *
|
||||
gk20a_event_id_data_from_event_id_node(struct nvgpu_list_node *node)
|
||||
{
|
||||
return (struct gk20a_event_id_data *)
|
||||
((uintptr_t)node - offsetof(struct gk20a_event_id_data, event_id_node));
|
||||
};
|
||||
|
||||
/*
|
||||
* Track refcount actions, saving their stack traces. This number specifies how
|
||||
* many most recent actions are stored in a buffer. Set to 0 to disable. 128
|
||||
@@ -265,9 +244,6 @@ struct channel_gk20a {
|
||||
struct nvgpu_mutex dbg_s_lock;
|
||||
struct nvgpu_list_node dbg_s_list;
|
||||
|
||||
struct nvgpu_list_node event_id_list;
|
||||
struct nvgpu_mutex event_id_list_lock;
|
||||
|
||||
bool has_timedout;
|
||||
u32 timeout_ms_max;
|
||||
bool timeout_debug_dump;
|
||||
@@ -385,8 +361,6 @@ int gk20a_channel_get_timescale_from_timeslice(struct gk20a *g,
|
||||
int *__timeslice_timeout, int *__timeslice_scale);
|
||||
int gk20a_channel_set_runlist_interleave(struct channel_gk20a *ch,
|
||||
u32 level);
|
||||
void gk20a_channel_event_id_post_event(struct channel_gk20a *ch,
|
||||
u32 event_id);
|
||||
|
||||
int channel_gk20a_alloc_job(struct channel_gk20a *c,
|
||||
struct channel_gk20a_job **job_out);
|
||||
|
||||
@@ -564,7 +564,6 @@ static void gk20a_remove_fifo_support(struct fifo_gk20a *f)
|
||||
nvgpu_mutex_destroy(&c->cyclestate.cyclestate_buffer_mutex);
|
||||
nvgpu_mutex_destroy(&c->cs_client_mutex);
|
||||
#endif
|
||||
nvgpu_mutex_destroy(&c->event_id_list_lock);
|
||||
nvgpu_mutex_destroy(&c->dbg_s_lock);
|
||||
|
||||
}
|
||||
|
||||
@@ -5256,16 +5256,10 @@ static int gk20a_gr_handle_semaphore_pending(struct gk20a *g,
|
||||
{
|
||||
struct fifo_gk20a *f = &g->fifo;
|
||||
struct channel_gk20a *ch = &f->channel[isr_data->chid];
|
||||
struct tsg_gk20a *tsg = &g->fifo.tsg[ch->tsgid];
|
||||
|
||||
if (gk20a_is_channel_marked_as_tsg(ch)) {
|
||||
struct tsg_gk20a *tsg = &g->fifo.tsg[ch->tsgid];
|
||||
|
||||
gk20a_tsg_event_id_post_event(tsg,
|
||||
NVGPU_EVENT_ID_GR_SEMAPHORE_WRITE_AWAKEN);
|
||||
} else {
|
||||
gk20a_channel_event_id_post_event(ch,
|
||||
NVGPU_EVENT_ID_GR_SEMAPHORE_WRITE_AWAKEN);
|
||||
}
|
||||
gk20a_tsg_event_id_post_event(tsg,
|
||||
NVGPU_EVENT_ID_GR_SEMAPHORE_WRITE_AWAKEN);
|
||||
|
||||
nvgpu_cond_broadcast(&ch->semaphore_wq);
|
||||
|
||||
@@ -5806,26 +5800,16 @@ static int gk20a_gr_post_bpt_events(struct gk20a *g, struct channel_gk20a *ch,
|
||||
u32 global_esr)
|
||||
{
|
||||
if (global_esr & gr_gpc0_tpc0_sm_hww_global_esr_bpt_int_pending_f()) {
|
||||
if (gk20a_is_channel_marked_as_tsg(ch)) {
|
||||
struct tsg_gk20a *tsg = &g->fifo.tsg[ch->tsgid];
|
||||
struct tsg_gk20a *tsg = &g->fifo.tsg[ch->tsgid];
|
||||
|
||||
gk20a_tsg_event_id_post_event(tsg,
|
||||
NVGPU_EVENT_ID_BPT_INT);
|
||||
} else {
|
||||
gk20a_channel_event_id_post_event(ch,
|
||||
NVGPU_EVENT_ID_BPT_INT);
|
||||
}
|
||||
gk20a_tsg_event_id_post_event(tsg,
|
||||
NVGPU_EVENT_ID_BPT_INT);
|
||||
}
|
||||
if (global_esr & gr_gpc0_tpc0_sm_hww_global_esr_bpt_pause_pending_f()) {
|
||||
if (gk20a_is_channel_marked_as_tsg(ch)) {
|
||||
struct tsg_gk20a *tsg = &g->fifo.tsg[ch->tsgid];
|
||||
struct tsg_gk20a *tsg = &g->fifo.tsg[ch->tsgid];
|
||||
|
||||
gk20a_tsg_event_id_post_event(tsg,
|
||||
NVGPU_EVENT_ID_BPT_PAUSE);
|
||||
} else {
|
||||
gk20a_channel_event_id_post_event(ch,
|
||||
NVGPU_EVENT_ID_BPT_PAUSE);
|
||||
}
|
||||
gk20a_tsg_event_id_post_event(tsg,
|
||||
NVGPU_EVENT_ID_BPT_PAUSE);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -87,5 +87,24 @@ u32 gk20a_tsg_get_timeslice(struct tsg_gk20a *tsg);
|
||||
int gk20a_tsg_set_priority(struct gk20a *g, struct tsg_gk20a *tsg,
|
||||
u32 priority);
|
||||
|
||||
struct gk20a_event_id_data {
|
||||
struct gk20a *g;
|
||||
|
||||
int id; /* ch or tsg */
|
||||
u32 event_id;
|
||||
|
||||
bool event_posted;
|
||||
|
||||
struct nvgpu_cond event_id_wq;
|
||||
struct nvgpu_mutex lock;
|
||||
struct nvgpu_list_node event_id_node;
|
||||
};
|
||||
|
||||
static inline struct gk20a_event_id_data *
|
||||
gk20a_event_id_data_from_event_id_node(struct nvgpu_list_node *node)
|
||||
{
|
||||
return (struct gk20a_event_id_data *)
|
||||
((uintptr_t)node - offsetof(struct gk20a_event_id_data, event_id_node));
|
||||
};
|
||||
|
||||
#endif /* __TSG_GK20A_H_ */
|
||||
|
||||
@@ -1709,6 +1709,7 @@ int gr_gp10b_set_cilp_preempt_pending(struct gk20a *g,
|
||||
{
|
||||
int ret;
|
||||
struct gr_ctx_desc *gr_ctx = fault_ch->ch_ctx.gr_ctx;
|
||||
struct tsg_gk20a *tsg;
|
||||
|
||||
gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, "");
|
||||
|
||||
@@ -1773,15 +1774,10 @@ int gr_gp10b_set_cilp_preempt_pending(struct gk20a *g,
|
||||
gr_ctx->cilp_preempt_pending = true;
|
||||
g->gr.cilp_preempt_pending_chid = fault_ch->chid;
|
||||
|
||||
if (gk20a_is_channel_marked_as_tsg(fault_ch)) {
|
||||
struct tsg_gk20a *tsg = &g->fifo.tsg[fault_ch->tsgid];
|
||||
tsg = &g->fifo.tsg[fault_ch->tsgid];
|
||||
|
||||
gk20a_tsg_event_id_post_event(tsg,
|
||||
NVGPU_EVENT_ID_CILP_PREEMPTION_STARTED);
|
||||
} else {
|
||||
gk20a_channel_event_id_post_event(fault_ch,
|
||||
NVGPU_EVENT_ID_CILP_PREEMPTION_STARTED);
|
||||
}
|
||||
gk20a_tsg_event_id_post_event(tsg,
|
||||
NVGPU_EVENT_ID_CILP_PREEMPTION_STARTED);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -1948,6 +1944,7 @@ int gr_gp10b_handle_fecs_error(struct gk20a *g,
|
||||
struct channel_gk20a *ch;
|
||||
int chid = -1;
|
||||
int ret = 0;
|
||||
struct tsg_gk20a *tsg;
|
||||
|
||||
gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, "");
|
||||
|
||||
@@ -1984,15 +1981,10 @@ int gr_gp10b_handle_fecs_error(struct gk20a *g,
|
||||
/* Post events to UMD */
|
||||
gk20a_dbg_gpu_post_events(ch);
|
||||
|
||||
if (gk20a_is_channel_marked_as_tsg(ch)) {
|
||||
struct tsg_gk20a *tsg = &g->fifo.tsg[ch->tsgid];
|
||||
tsg = &g->fifo.tsg[ch->tsgid];
|
||||
|
||||
gk20a_tsg_event_id_post_event(tsg,
|
||||
NVGPU_EVENT_ID_CILP_PREEMPTION_COMPLETE);
|
||||
} else {
|
||||
gk20a_channel_event_id_post_event(ch,
|
||||
NVGPU_EVENT_ID_CILP_PREEMPTION_COMPLETE);
|
||||
}
|
||||
gk20a_tsg_event_id_post_event(tsg,
|
||||
NVGPU_EVENT_ID_CILP_PREEMPTION_COMPLETE);
|
||||
|
||||
gk20a_channel_put(ch);
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user