gpu: nvgpu: use channel pointer for update_runlist

A naked channel ID does not carry good information about the channel
validity and is a very low level construct for an API of this level.
Refactor the runlist updating fifo APIs to take a channel pointer.

While at it, delete the channel and wait_for_finish parameters from
gk20a_fifo_update_runlist_ids() - the only caller is suspend and resume
and the parameters were always null for channel and true for wait.

Jira NVGPU-1309
Jira NVGPU-1737

Change-Id: Ied350bc8e482d8e311cc708ab0c7afdf315c61cc
Signed-off-by: Konsta Holtta <kholtta@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1997744
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Konsta Holtta
2019-01-17 11:16:27 +02:00
committed by mobile promotions
parent 911d25dda2
commit 4e85ebc05f
10 changed files with 47 additions and 49 deletions

View File

@@ -160,7 +160,7 @@ int gk20a_channel_get_timescale_from_timeslice(struct gk20a *g,
int channel_gk20a_update_runlist(struct channel_gk20a *c, bool add)
{
return c->g->ops.runlist.update_runlist(c->g, c->runlist_id,
c->chid, add, true);
c, add, true);
}
int gk20a_enable_channel_tsg(struct gk20a *g, struct channel_gk20a *ch)
@@ -2440,11 +2440,7 @@ int gk20a_channel_suspend(struct gk20a *g)
}
if (channels_in_use) {
gk20a_fifo_update_runlist_ids(g,
active_runlist_ids,
FIFO_INVAL_CHANNEL_ID,
false,
true);
gk20a_fifo_update_runlist_ids(g, active_runlist_ids, false);
for (chid = 0; chid < f->num_channels; chid++) {
struct channel_gk20a *ch = gk20a_channel_from_id(g, chid);
@@ -2494,11 +2490,7 @@ int gk20a_channel_resume(struct gk20a *g)
}
if (channels_in_use) {
gk20a_fifo_update_runlist_ids(g,
active_runlist_ids,
FIFO_INVAL_CHANNEL_ID,
true,
true);
gk20a_fifo_update_runlist_ids(g, active_runlist_ids, true);
}
nvgpu_log_fn(g, "done");

View File

@@ -275,7 +275,7 @@ u32 nvgpu_runlist_construct_locked(struct fifo_gk20a *f,
}
int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
u32 chid, bool add,
struct channel_gk20a *ch, bool add,
bool wait_for_finish)
{
int ret = 0;
@@ -283,36 +283,34 @@ int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
struct fifo_runlist_info_gk20a *runlist = NULL;
u64 runlist_iova;
u32 new_buf;
struct channel_gk20a *ch = NULL;
struct tsg_gk20a *tsg = NULL;
runlist = &f->runlist_info[runlist_id];
/* valid channel, add/remove it from active list.
Otherwise, keep active list untouched for suspend/resume. */
if (chid != FIFO_INVAL_CHANNEL_ID) {
ch = &f->channel[chid];
if (ch != NULL) {
if (gk20a_is_channel_marked_as_tsg(ch)) {
tsg = &f->tsg[ch->tsgid];
}
if (add) {
if (test_and_set_bit(chid,
if (test_and_set_bit((int)ch->chid,
runlist->active_channels)) {
return 0;
}
if ((tsg != NULL) && (++tsg->num_active_channels != 0U)) {
set_bit((int)f->channel[chid].tsgid,
set_bit((int)tsg->tsgid,
runlist->active_tsgs);
}
} else {
if (!test_and_clear_bit(chid,
if (!test_and_clear_bit((int)ch->chid,
runlist->active_channels)) {
return 0;
}
if ((tsg != NULL) &&
(--tsg->num_active_channels == 0U)) {
clear_bit((int)f->channel[chid].tsgid,
clear_bit((int)tsg->tsgid,
runlist->active_tsgs);
}
}
@@ -331,7 +329,7 @@ int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
goto clean_up;
}
if (chid != FIFO_INVAL_CHANNEL_ID || /* add/remove a valid channel */
if (ch != NULL || /* add/remove a valid channel */
add /* resume to add all channels back */) {
u32 num_entries;
@@ -422,9 +420,10 @@ static void gk20a_fifo_runlist_reset_engines(struct gk20a *g, u32 runlist_id)
/* add/remove a channel from runlist
special cases below: runlist->active_channels will NOT be changed.
(chid == ~0 && !add) means remove all active channels from runlist.
(chid == ~0 && add) means restore all active channels on runlist. */
int gk20a_fifo_update_runlist(struct gk20a *g, u32 runlist_id, u32 chid,
(ch == NULL && !add) means remove all active channels from runlist.
(ch == NULL && add) means restore all active channels on runlist. */
int gk20a_fifo_update_runlist(struct gk20a *g, u32 runlist_id,
struct channel_gk20a *ch,
bool add, bool wait_for_finish)
{
struct fifo_runlist_info_gk20a *runlist = NULL;
@@ -444,7 +443,7 @@ int gk20a_fifo_update_runlist(struct gk20a *g, u32 runlist_id, u32 chid,
PMU_MUTEX_ID_FIFO, &token);
}
ret = gk20a_fifo_update_runlist_locked(g, runlist_id, chid, add,
ret = gk20a_fifo_update_runlist_locked(g, runlist_id, ch, add,
wait_for_finish);
if (mutex_ret == 0) {
@@ -460,8 +459,8 @@ int gk20a_fifo_update_runlist(struct gk20a *g, u32 runlist_id, u32 chid,
return ret;
}
int gk20a_fifo_update_runlist_ids(struct gk20a *g, u32 runlist_ids, u32 chid,
bool add, bool wait_for_finish)
int gk20a_fifo_update_runlist_ids(struct gk20a *g, u32 runlist_ids,
bool add)
{
int ret = -EINVAL;
unsigned long runlist_id = 0;
@@ -476,7 +475,7 @@ int gk20a_fifo_update_runlist_ids(struct gk20a *g, u32 runlist_ids, u32 chid,
for_each_set_bit(runlist_id, &ulong_runlist_ids, 32U) {
/* Capture the last failure error code */
errcode = g->ops.runlist.update_runlist(g, (u32)runlist_id,
chid, add, wait_for_finish);
NULL, add, true);
if (errcode != 0) {
nvgpu_err(g,
"failed to update_runlist %lu %d",

View File

@@ -384,7 +384,7 @@ int gk20a_tsg_set_runlist_interleave(struct tsg_gk20a *tsg, u32 level)
return (ret != 0) ? ret : g->ops.runlist.update_runlist(g,
tsg->runlist_id,
FIFO_INVAL_CHANNEL_ID,
NULL,
true,
true);
}

View File

@@ -2767,7 +2767,7 @@ int gk20a_fifo_tsg_set_timeslice(struct tsg_gk20a *tsg, u32 timeslice)
return g->ops.runlist.update_runlist(g,
tsg->runlist_id,
FIFO_INVAL_CHANNEL_ID,
NULL,
true,
true);
}

View File

@@ -1563,7 +1563,7 @@ static int gr_gp10b_disable_channel_or_tsg(struct gk20a *g, struct channel_gk20a
ret = g->ops.runlist.update_runlist(g,
fault_ch->runlist_id,
FIFO_INVAL_CHANNEL_ID,
NULL,
true,
false);
if (ret != 0) {

View File

@@ -923,7 +923,6 @@ static void gv11b_fifo_locked_abort_runlist_active_tsgs(struct gk20a *g,
struct fifo_runlist_info_gk20a *runlist = NULL;
u32 token = PMU_INVALID_MUTEX_OWNER_ID;
int mutex_ret = -EINVAL;
bool add = false, wait_for_finish = false;
int err;
nvgpu_err(g, "runlist id unknown, abort active tsgs in runlists");
@@ -968,9 +967,12 @@ static void gv11b_fifo_locked_abort_runlist_active_tsgs(struct gk20a *g,
}
}
/* (chid == ~0 && !add) remove all act ch from runlist*/
/*
* remove all entries from this runlist; don't wait for
* the update to finish on hw.
*/
err = gk20a_fifo_update_runlist_locked(g, rlid,
FIFO_INVAL_CHANNEL_ID, add, wait_for_finish);
NULL, false, false);
if (err != 0) {
nvgpu_err(g, "runlist id %d is not cleaned up",
rlid);

View File

@@ -852,7 +852,7 @@ struct gpu_ops {
int (*reschedule_preempt_next_locked)(struct channel_gk20a *ch,
bool wait_preempt);
int (*update_runlist)(struct gk20a *g, u32 runlist_id,
u32 chid, bool add,
struct channel_gk20a *ch, bool add,
bool wait_for_finish);
int (*set_runlist_interleave)(struct gk20a *g, u32 id,
u32 runlist_id,

View File

@@ -37,16 +37,17 @@ u32 nvgpu_runlist_construct_locked(struct fifo_gk20a *f,
u32 buf_id,
u32 max_entries);
int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
u32 chid, bool add,
struct channel_gk20a *ch, bool add,
bool wait_for_finish);
int nvgpu_fifo_reschedule_runlist(struct channel_gk20a *ch, bool preempt_next,
bool wait_preempt);
int gk20a_fifo_update_runlist(struct gk20a *g, u32 runlist_id, u32 chid,
int gk20a_fifo_update_runlist(struct gk20a *g, u32 runlist_id,
struct channel_gk20a *ch,
bool add, bool wait_for_finish);
int gk20a_fifo_update_runlist_ids(struct gk20a *g, u32 runlist_ids, u32 chid,
bool add, bool wait_for_finish);
int gk20a_fifo_update_runlist_ids(struct gk20a *g, u32 runlist_ids,
bool add);
const char *gk20a_fifo_interleave_level_name(u32 interleave_level);

View File

@@ -475,7 +475,7 @@ done:
}
static int vgpu_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
u32 chid, bool add,
struct channel_gk20a *ch, bool add,
bool wait_for_finish)
{
struct fifo_gk20a *f = &g->fifo;
@@ -489,19 +489,19 @@ static int vgpu_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
/* valid channel, add/remove it from active list.
Otherwise, keep active list untouched for suspend/resume. */
if (chid != (u32)~0) {
if (ch != NULL) {
if (add) {
if (test_and_set_bit(chid,
if (test_and_set_bit((int)ch->chid,
runlist->active_channels) == 1)
return 0;
} else {
if (test_and_clear_bit(chid,
if (test_and_clear_bit((int)ch->chid,
runlist->active_channels) == 0)
return 0;
}
}
if (chid != (u32)~0 || /* add/remove a valid channel */
if (ch != NULL || /* add/remove a valid channel */
add /* resume to add all channels back */) {
u32 cid;
@@ -513,8 +513,10 @@ static int vgpu_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
runlist_entry++;
count++;
}
} else /* suspend to remove all channels */
} else {
/* suspend to remove all channels */
count = 0;
}
return vgpu_submit_runlist(g, vgpu_get_handle(g), runlist_id,
runlist->mem[0].cpu_va, count);
@@ -522,10 +524,11 @@ static int vgpu_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
/* add/remove a channel from runlist
special cases below: runlist->active_channels will NOT be changed.
(chid == ~0 && !add) means remove all active channels from runlist.
(chid == ~0 && add) means restore all active channels on runlist. */
(ch == NULL && !add) means remove all active channels from runlist.
(ch == NULL && add) means restore all active channels on runlist. */
int vgpu_fifo_update_runlist(struct gk20a *g, u32 runlist_id,
u32 chid, bool add, bool wait_for_finish)
struct channel_gk20a *ch,
bool add, bool wait_for_finish)
{
struct fifo_runlist_info_gk20a *runlist = NULL;
struct fifo_gk20a *f = &g->fifo;
@@ -537,7 +540,7 @@ int vgpu_fifo_update_runlist(struct gk20a *g, u32 runlist_id,
nvgpu_mutex_acquire(&runlist->runlist_lock);
ret = vgpu_fifo_update_runlist_locked(g, runlist_id, chid, add,
ret = vgpu_fifo_update_runlist_locked(g, runlist_id, ch, add,
wait_for_finish);
nvgpu_mutex_release(&runlist->runlist_lock);

View File

@@ -44,7 +44,8 @@ int vgpu_fifo_init_engine_info(struct fifo_gk20a *f);
int vgpu_fifo_preempt_channel(struct gk20a *g, struct channel_gk20a *ch);
int vgpu_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg);
int vgpu_fifo_update_runlist(struct gk20a *g, u32 runlist_id,
u32 chid, bool add, bool wait_for_finish);
struct channel_gk20a *ch,
bool add, bool wait_for_finish);
int vgpu_fifo_wait_engine_idle(struct gk20a *g);
int vgpu_fifo_set_runlist_interleave(struct gk20a *g,
u32 id,