gpu: nvgpu: rework TSG's channel list

Modify TSG's channel list as "ch_list" for all channels
instead of "ch_runnable_list" for only runnable list
We can traverse this list and check runnable status of
channel in active_channels to get runnable channels

Remove below APIs as they are no longer required :
gk20a_bind_runnable_channel_to_tsg()
gk20a_unbind_channel_from_tsg()

While closing the channel, call gk20a_tsg_unbind_channel()
to unbind the channel from TSG

bug 1470692

Change-Id: I0178fa74b3e8bb4e5c0b3e3b2b2f031491761ba7
Signed-off-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-on: http://git-master/r/449227
Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Tested-by: Terje Bergstrom <tbergstrom@nvidia.com>
This commit is contained in:
Deepak Nibade
2014-08-04 16:58:56 +05:30
committed by Dan Willemsen
parent b33020008b
commit 76993ba18c
4 changed files with 50 additions and 90 deletions

View File

@@ -680,6 +680,9 @@ void gk20a_free_channel(struct channel_gk20a *ch, bool finish)
gk20a_vm_put(ch_vm);
unbind:
if (gk20a_is_channel_marked_as_tsg(ch))
gk20a_tsg_unbind_channel(ch);
channel_gk20a_unbind(ch);
channel_gk20a_free_inst(g, ch);

View File

@@ -1722,41 +1722,32 @@ static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
phys_addr_t runlist_pa;
u32 old_buf, new_buf;
u32 chid, tsgid;
struct channel_gk20a *ch;
struct tsg_gk20a *tsg;
struct channel_gk20a *ch = NULL;
struct tsg_gk20a *tsg = NULL;
u32 count = 0;
int num_ch;
runlist = &f->runlist_info[runlist_id];
/* valid channel, add/remove it from active list.
Otherwise, keep active list untouched for suspend/resume. */
if (hw_chid != ~0) {
ch = &f->channel[hw_chid];
if (gk20a_is_channel_marked_as_tsg(ch))
tsg = &f->tsg[ch->tsgid];
if (add) {
if (test_and_set_bit(hw_chid,
runlist->active_channels) == 1)
return 0;
if (gk20a_is_channel_marked_as_tsg(
&f->channel[hw_chid])) {
num_ch = gk20a_bind_runnable_channel_to_tsg(
&f->channel[hw_chid],
f->channel[hw_chid].tsgid);
if (num_ch > 0)
set_bit(f->channel[hw_chid].tsgid,
runlist->active_tsgs);
}
if (tsg && ++tsg->num_active_channels > 0)
set_bit(f->channel[hw_chid].tsgid,
runlist->active_tsgs);
} else {
if (test_and_clear_bit(hw_chid,
runlist->active_channels) == 0)
return 0;
if (gk20a_is_channel_marked_as_tsg(
&f->channel[hw_chid])) {
num_ch = gk20a_unbind_channel_from_tsg(
&f->channel[hw_chid],
f->channel[hw_chid].tsgid);
if (!num_ch)
clear_bit(f->channel[hw_chid].tsgid,
runlist->active_tsgs);
}
if (tsg && --tsg->num_active_channels == 0)
clear_bit(f->channel[hw_chid].tsgid,
runlist->active_tsgs);
}
}
@@ -1811,15 +1802,17 @@ static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
ram_rl_entry_timeslice_timeout_f(
ram_rl_entry_timeslice_timeout_128_f()) |
ram_rl_entry_tsg_length_f(
tsg->num_runnable_channels);
tsg->num_active_channels);
runlist_entry[1] = 0;
runlist_entry += 2;
count++;
/* add channels bound to this TSG */
/* add runnable channels bound to this TSG */
mutex_lock(&tsg->ch_list_lock);
list_for_each_entry(ch,
&tsg->ch_runnable_list, ch_entry) {
list_for_each_entry(ch, &tsg->ch_list, ch_entry) {
if (!test_bit(ch->hw_chid,
runlist->active_channels))
continue;
gk20a_dbg_info("add channel %d to runlist",
ch->hw_chid);
runlist_entry[0] =

View File

@@ -29,64 +29,28 @@ bool gk20a_is_channel_marked_as_tsg(struct channel_gk20a *ch)
return !(ch->tsgid == NVGPU_INVALID_TSG_ID);
}
/*
* API to add channel to runnable list of TSG.
*
* After this call, a channel will be scheduled as TSG channel
* in runlist
*/
int gk20a_bind_runnable_channel_to_tsg(struct channel_gk20a *ch, int tsgid)
{
struct gk20a *g = ch->g;
struct tsg_gk20a *tsg = NULL;
if (ch->tsgid != tsgid)
return -EINVAL;
tsg = &g->fifo.tsg[tsgid];
mutex_lock(&tsg->ch_list_lock);
list_add_tail(&ch->ch_entry, &tsg->ch_runnable_list);
tsg->num_runnable_channels += 1;
mutex_unlock(&tsg->ch_list_lock);
return tsg->num_runnable_channels;
}
int gk20a_unbind_channel_from_tsg(struct channel_gk20a *ch, int tsgid)
{
struct gk20a *g = ch->g;
struct tsg_gk20a *tsg = NULL;
if (ch->tsgid != tsgid)
return -EINVAL;
tsg = &g->fifo.tsg[tsgid];
mutex_lock(&tsg->ch_list_lock);
list_del_init(&ch->ch_entry);
tsg->num_runnable_channels -= 1;
mutex_unlock(&tsg->ch_list_lock);
return tsg->num_runnable_channels;
}
/*
* API to mark channel as part of TSG
*
* Note that channel is not runnable when we bind it to TSG
*/
static int nvgpu_tsg_bind_channel(struct tsg_gk20a *tsg, int ch_fd)
static int gk20a_tsg_bind_channel(struct tsg_gk20a *tsg, int ch_fd)
{
struct file *f = fget(ch_fd);
struct channel_gk20a *ch = f->private_data;
/* check if channel is already bound to some TSG */
if (gk20a_is_channel_marked_as_tsg(ch))
if (gk20a_is_channel_marked_as_tsg(ch)) {
fput(f);
return -EINVAL;
}
ch->tsgid = tsg->tsgid;
mutex_lock(&tsg->ch_list_lock);
list_add_tail(&ch->ch_entry, &tsg->ch_list);
mutex_unlock(&tsg->ch_list_lock);
gk20a_dbg(gpu_dbg_fn, "BIND tsg:%d channel:%d\n",
tsg->tsgid, ch->hw_chid);
@@ -95,11 +59,17 @@ static int nvgpu_tsg_bind_channel(struct tsg_gk20a *tsg, int ch_fd)
return 0;
}
static int nvgpu_tsg_unbind_channel(struct tsg_gk20a *tsg, int ch_fd)
int gk20a_tsg_unbind_channel(struct channel_gk20a *ch)
{
/* We do not support explicitly unbinding channel from TSG.
* Channel will be unbounded from TSG when it is closed.
*/
struct fifo_gk20a *f = &ch->g->fifo;
struct tsg_gk20a *tsg = &f->tsg[ch->tsgid];
mutex_lock(&tsg->ch_list_lock);
list_del_init(&ch->ch_entry);
mutex_unlock(&tsg->ch_list_lock);
ch->tsgid = NVGPU_INVALID_TSG_ID;
return 0;
}
@@ -115,7 +85,7 @@ int gk20a_init_tsg_support(struct gk20a *g, u32 tsgid)
tsg->in_use = false;
tsg->tsgid = tsgid;
INIT_LIST_HEAD(&tsg->ch_runnable_list);
INIT_LIST_HEAD(&tsg->ch_list);
mutex_init(&tsg->ch_list_lock);
return 0;
@@ -163,7 +133,7 @@ int gk20a_tsg_dev_open(struct inode *inode, struct file *filp)
return -ENOMEM;
tsg->g = g;
tsg->num_runnable_channels = 0;
tsg->num_active_channels = 0;
tsg->tsg_gr_ctx = NULL;
tsg->vm = NULL;
@@ -181,10 +151,10 @@ int gk20a_tsg_dev_release(struct inode *inode, struct file *filp)
struct gk20a *g = container_of(inode->i_cdev,
struct gk20a, tsg.cdev);
if (tsg->num_runnable_channels) {
if (tsg->num_active_channels) {
gk20a_err(dev_from_gk20a(g),
"Trying to free TSG %d with active channels %d\n",
tsg->tsgid, tsg->num_runnable_channels);
tsg->tsgid, tsg->num_active_channels);
return -EBUSY;
}
@@ -240,20 +210,15 @@ long gk20a_tsg_dev_ioctl(struct file *filp, unsigned int cmd,
err = -EINVAL;
break;
}
err = nvgpu_tsg_bind_channel(tsg, ch_fd);
err = gk20a_tsg_bind_channel(tsg, ch_fd);
break;
}
case NVGPU_TSG_IOCTL_UNBIND_CHANNEL:
{
int ch_fd = *(int *)buf;
if (ch_fd < 0) {
err = -EINVAL;
break;
}
err = nvgpu_tsg_unbind_channel(tsg, ch_fd);
/* We do not support explicitly unbinding channel from TSG.
* Channel will be unbounded from TSG when it is closed.
*/
break;
}
default:
gk20a_err(dev_from_gk20a(g),

View File

@@ -27,8 +27,7 @@ long gk20a_tsg_dev_ioctl(struct file *filp,
int gk20a_init_tsg_support(struct gk20a *g, u32 tsgid);
int gk20a_bind_runnable_channel_to_tsg(struct channel_gk20a *ch, int tsgid);
int gk20a_unbind_channel_from_tsg(struct channel_gk20a *ch, int tsgid);
int gk20a_tsg_unbind_channel(struct channel_gk20a *ch);
struct tsg_gk20a {
struct gk20a *g;
@@ -36,8 +35,8 @@ struct tsg_gk20a {
bool in_use;
int tsgid;
struct list_head ch_runnable_list;
int num_runnable_channels;
struct list_head ch_list;
int num_active_channels;
struct mutex ch_list_lock;
struct gr_ctx_desc *tsg_gr_ctx;