gpu: nvgpu: Update runlist_update() to take runlist ptr

Update the nvgpu_runlist_update_for_channel() function:

  - Rename it to nvgpu_runlist_update()
  - Have it take a pointer to the runlist to update instead
    of a runlist ID. For the most part this makes the code
    better but there's a few places where it's worse (for
    now).

This starts the slow and painful process of moving away from
the non-runlist code using runlist IDs in many places it should
not.

Most of this patch is just fixing compilation problems with
the minor header updates.

JIRA NVGPU-6425

Change-Id: Id9885fe655d1d750625a1c8aceda9e67a2cbdb7a
Signed-off-by: Alex Waterman <alexw@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2470304
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
GVS: Gerrit_Virtual_Submit
This commit is contained in:
Alex Waterman
2020-12-31 22:42:19 -06:00
committed by mobile promotions
parent fae1f20ab7
commit 77c0b9ffdc
19 changed files with 75 additions and 78 deletions

View File

@@ -158,8 +158,7 @@ void nvgpu_channel_commit_va(struct nvgpu_channel *c)
int nvgpu_channel_update_runlist(struct nvgpu_channel *c, bool add)
{
return c->g->ops.runlist.update_for_channel(c->g, c->runlist_id,
c, add, true);
return c->g->ops.runlist.update(c->g, c->runlist, c, add, true);
}
int nvgpu_channel_enable_tsg(struct gk20a *g, struct nvgpu_channel *ch)
@@ -1249,7 +1248,7 @@ NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_MISRA(Rule, 15_6))
ch->g = g;
/* Runlist for the channel */
ch->runlist_id = runlist_id;
ch->runlist = f->runlists[runlist_id];
/* Channel privilege level */
ch->is_privileged_channel = is_privileged_channel;
@@ -1903,7 +1902,7 @@ int nvgpu_channel_suspend_all_serviceable_ch(struct gk20a *g)
channels_in_use = true;
active_runlist_ids |= BIT32(ch->runlist_id);
active_runlist_ids |= BIT32(ch->runlist->runlist_id);
}
nvgpu_channel_put(ch);
@@ -1940,7 +1939,7 @@ int nvgpu_channel_resume_all_serviceable_ch(struct gk20a *g)
nvgpu_log_info(g, "resume channel %d", chid);
g->ops.channel.bind(ch);
channels_in_use = true;
active_runlist_ids |= BIT32(ch->runlist_id);
active_runlist_ids |= BIT32(ch->runlist->runlist_id);
}
nvgpu_channel_put(ch);
}

View File

@@ -471,7 +471,7 @@ int nvgpu_runlist_reschedule(struct nvgpu_channel *ch, bool preempt_next,
#endif
int ret = 0;
runlist = g->fifo.runlists[ch->runlist_id];
runlist = ch->runlist;
if (nvgpu_mutex_tryacquire(&runlist->runlist_lock) == 0) {
return -EBUSY;
}
@@ -481,7 +481,7 @@ int nvgpu_runlist_reschedule(struct nvgpu_channel *ch, bool preempt_next,
#endif
g->ops.runlist.hw_submit(
g, ch->runlist_id, runlist->count, runlist->cur_buffer);
g, runlist->runlist_id, runlist->count, runlist->cur_buffer);
if (preempt_next) {
if (g->ops.runlist.reschedule_preempt_next_locked(ch,
@@ -490,9 +490,9 @@ int nvgpu_runlist_reschedule(struct nvgpu_channel *ch, bool preempt_next,
}
}
if (g->ops.runlist.wait_pending(g, ch->runlist_id) != 0) {
if (g->ops.runlist.wait_pending(g, runlist->runlist_id) != 0) {
nvgpu_err(g, "wait pending failed for runlist %u",
ch->runlist_id);
runlist->runlist_id);
}
#ifdef CONFIG_NVGPU_LS_PMU
if (mutex_ret == 0) {
@@ -512,12 +512,10 @@ int nvgpu_runlist_reschedule(struct nvgpu_channel *ch, bool preempt_next,
special cases below: runlist->active_channels will NOT be changed.
(ch == NULL && !add) means remove all active channels from runlist.
(ch == NULL && add) means restore all active channels on runlist. */
static int nvgpu_runlist_update(struct gk20a *g, u32 runlist_id,
struct nvgpu_channel *ch,
bool add, bool wait_for_finish)
static int nvgpu_runlist_do_update(struct gk20a *g, struct nvgpu_runlist *rl,
struct nvgpu_channel *ch,
bool add, bool wait_for_finish)
{
struct nvgpu_runlist *runlist = NULL;
struct nvgpu_fifo *f = &g->fifo;
#ifdef CONFIG_NVGPU_LS_PMU
u32 token = PMU_INVALID_MUTEX_OWNER_ID;
int mutex_ret = 0;
@@ -526,14 +524,12 @@ static int nvgpu_runlist_update(struct gk20a *g, u32 runlist_id,
nvgpu_log_fn(g, " ");
runlist = f->runlists[runlist_id];
nvgpu_mutex_acquire(&runlist->runlist_lock);
nvgpu_mutex_acquire(&rl->runlist_lock);
#ifdef CONFIG_NVGPU_LS_PMU
mutex_ret = nvgpu_pmu_lock_acquire(g, g->pmu,
PMU_MUTEX_ID_FIFO, &token);
#endif
ret = nvgpu_runlist_update_locked(g, runlist_id, ch, add,
ret = nvgpu_runlist_update_locked(g, rl->runlist_id, ch, add,
wait_for_finish);
#ifdef CONFIG_NVGPU_LS_PMU
if (mutex_ret == 0) {
@@ -543,32 +539,33 @@ static int nvgpu_runlist_update(struct gk20a *g, u32 runlist_id,
}
}
#endif
nvgpu_mutex_release(&runlist->runlist_lock);
nvgpu_mutex_release(&rl->runlist_lock);
if (ret == -ETIMEDOUT) {
nvgpu_rc_runlist_update(g, runlist_id);
nvgpu_rc_runlist_update(g, rl->runlist_id);
}
return ret;
}
int nvgpu_runlist_update_for_channel(struct gk20a *g, u32 runlist_id,
struct nvgpu_channel *ch,
bool add, bool wait_for_finish)
int nvgpu_runlist_update(struct gk20a *g, struct nvgpu_runlist *rl,
struct nvgpu_channel *ch,
bool add, bool wait_for_finish)
{
nvgpu_assert(ch != NULL);
return nvgpu_runlist_update(g, runlist_id, ch, add, wait_for_finish);
return nvgpu_runlist_do_update(g, rl, ch, add, wait_for_finish);
}
int nvgpu_runlist_reload(struct gk20a *g, u32 runlist_id,
int nvgpu_runlist_reload(struct gk20a *g, struct nvgpu_runlist *rl,
bool add, bool wait_for_finish)
{
return nvgpu_runlist_update(g, runlist_id, NULL, add, wait_for_finish);
return nvgpu_runlist_do_update(g, rl, NULL, add, wait_for_finish);
}
int nvgpu_runlist_reload_ids(struct gk20a *g, u32 runlist_ids, bool add)
{
struct nvgpu_fifo *f = &g->fifo;
int ret = -EINVAL;
unsigned long runlist_id = 0;
int errcode;
@@ -581,7 +578,8 @@ int nvgpu_runlist_reload_ids(struct gk20a *g, u32 runlist_ids, bool add)
ret = 0;
for_each_set_bit(runlist_id, &ulong_runlist_ids, 32U) {
/* Capture the last failure error code */
errcode = g->ops.runlist.reload(g, (u32)runlist_id, add, true);
errcode = g->ops.runlist.reload(g,
f->runlists[runlist_id], add, true);
if (errcode != 0) {
nvgpu_err(g,
"failed to update_runlist %lu %d",
@@ -880,7 +878,7 @@ u32 nvgpu_runlist_get_runlists_mask(struct gk20a *g, u32 id,
if (id_type == ID_TYPE_TSG) {
runlists_mask |= BIT32(f->tsg[id].runlist_id);
} else {
runlists_mask |= BIT32(f->channel[id].runlist_id);
runlists_mask |= BIT32(f->channel[id].runlist->runlist_id);
}
} else {
if (bitmask_disabled) {

View File

@@ -115,12 +115,12 @@ int nvgpu_tsg_bind_channel(struct nvgpu_tsg *tsg, struct nvgpu_channel *ch)
/* all the channel part of TSG should need to be same runlist_id */
if (tsg->runlist_id == NVGPU_INVALID_TSG_ID) {
tsg->runlist_id = ch->runlist_id;
tsg->runlist_id = ch->runlist->runlist_id;
} else {
if (tsg->runlist_id != ch->runlist_id) {
if (tsg->runlist_id != ch->runlist->runlist_id) {
nvgpu_err(tsg->g,
"runlist_id mismatch ch[%d] tsg[%d]",
ch->runlist_id, tsg->runlist_id);
ch->runlist->runlist_id, tsg->runlist_id);
return -EINVAL;
}
}
@@ -677,7 +677,7 @@ int nvgpu_tsg_set_interleave(struct nvgpu_tsg *tsg, u32 level)
return 0;
}
return g->ops.runlist.reload(g, tsg->runlist_id, true, true);
return g->ops.runlist.reload(g, g->fifo.runlists[tsg->runlist_id], true, true);
}
int nvgpu_tsg_set_timeslice(struct nvgpu_tsg *tsg, u32 timeslice_us)
@@ -699,7 +699,7 @@ int nvgpu_tsg_set_timeslice(struct nvgpu_tsg *tsg, u32 timeslice_us)
return 0;
}
return g->ops.runlist.reload(g, tsg->runlist_id, true, true);
return g->ops.runlist.reload(g, g->fifo.runlists[tsg->runlist_id], true, true);
}
u32 nvgpu_tsg_get_timeslice(struct nvgpu_tsg *tsg)