gpu: nvgpu: Update runlist_update() to take runlist ptr

Update the nvgpu_runlist_update_for_channel() function:

  - Rename it to nvgpu_runlist_update()
  - Have it take a pointer to the runlist to update instead
    of a runlist ID. For the most part this makes the code
    better but there's a few places where it's worse (for
    now).

This starts the slow and painful process of moving away from
the non-runlist code using runlist IDs in many places it should
not.

Most of this patch is just fixing compilation problems with
the minor header updates.

JIRA NVGPU-6425

Change-Id: Id9885fe655d1d750625a1c8aceda9e67a2cbdb7a
Signed-off-by: Alex Waterman <alexw@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2470304
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
GVS: Gerrit_Virtual_Submit
This commit is contained in:
Alex Waterman
2020-12-31 22:42:19 -06:00
committed by mobile promotions
parent fae1f20ab7
commit 77c0b9ffdc
19 changed files with 75 additions and 78 deletions

View File

@@ -158,8 +158,7 @@ void nvgpu_channel_commit_va(struct nvgpu_channel *c)
int nvgpu_channel_update_runlist(struct nvgpu_channel *c, bool add) int nvgpu_channel_update_runlist(struct nvgpu_channel *c, bool add)
{ {
return c->g->ops.runlist.update_for_channel(c->g, c->runlist_id, return c->g->ops.runlist.update(c->g, c->runlist, c, add, true);
c, add, true);
} }
int nvgpu_channel_enable_tsg(struct gk20a *g, struct nvgpu_channel *ch) int nvgpu_channel_enable_tsg(struct gk20a *g, struct nvgpu_channel *ch)
@@ -1249,7 +1248,7 @@ NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_MISRA(Rule, 15_6))
ch->g = g; ch->g = g;
/* Runlist for the channel */ /* Runlist for the channel */
ch->runlist_id = runlist_id; ch->runlist = f->runlists[runlist_id];
/* Channel privilege level */ /* Channel privilege level */
ch->is_privileged_channel = is_privileged_channel; ch->is_privileged_channel = is_privileged_channel;
@@ -1903,7 +1902,7 @@ int nvgpu_channel_suspend_all_serviceable_ch(struct gk20a *g)
channels_in_use = true; channels_in_use = true;
active_runlist_ids |= BIT32(ch->runlist_id); active_runlist_ids |= BIT32(ch->runlist->runlist_id);
} }
nvgpu_channel_put(ch); nvgpu_channel_put(ch);
@@ -1940,7 +1939,7 @@ int nvgpu_channel_resume_all_serviceable_ch(struct gk20a *g)
nvgpu_log_info(g, "resume channel %d", chid); nvgpu_log_info(g, "resume channel %d", chid);
g->ops.channel.bind(ch); g->ops.channel.bind(ch);
channels_in_use = true; channels_in_use = true;
active_runlist_ids |= BIT32(ch->runlist_id); active_runlist_ids |= BIT32(ch->runlist->runlist_id);
} }
nvgpu_channel_put(ch); nvgpu_channel_put(ch);
} }

View File

@@ -471,7 +471,7 @@ int nvgpu_runlist_reschedule(struct nvgpu_channel *ch, bool preempt_next,
#endif #endif
int ret = 0; int ret = 0;
runlist = g->fifo.runlists[ch->runlist_id]; runlist = ch->runlist;
if (nvgpu_mutex_tryacquire(&runlist->runlist_lock) == 0) { if (nvgpu_mutex_tryacquire(&runlist->runlist_lock) == 0) {
return -EBUSY; return -EBUSY;
} }
@@ -481,7 +481,7 @@ int nvgpu_runlist_reschedule(struct nvgpu_channel *ch, bool preempt_next,
#endif #endif
g->ops.runlist.hw_submit( g->ops.runlist.hw_submit(
g, ch->runlist_id, runlist->count, runlist->cur_buffer); g, runlist->runlist_id, runlist->count, runlist->cur_buffer);
if (preempt_next) { if (preempt_next) {
if (g->ops.runlist.reschedule_preempt_next_locked(ch, if (g->ops.runlist.reschedule_preempt_next_locked(ch,
@@ -490,9 +490,9 @@ int nvgpu_runlist_reschedule(struct nvgpu_channel *ch, bool preempt_next,
} }
} }
if (g->ops.runlist.wait_pending(g, ch->runlist_id) != 0) { if (g->ops.runlist.wait_pending(g, runlist->runlist_id) != 0) {
nvgpu_err(g, "wait pending failed for runlist %u", nvgpu_err(g, "wait pending failed for runlist %u",
ch->runlist_id); runlist->runlist_id);
} }
#ifdef CONFIG_NVGPU_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
if (mutex_ret == 0) { if (mutex_ret == 0) {
@@ -512,12 +512,10 @@ int nvgpu_runlist_reschedule(struct nvgpu_channel *ch, bool preempt_next,
special cases below: runlist->active_channels will NOT be changed. special cases below: runlist->active_channels will NOT be changed.
(ch == NULL && !add) means remove all active channels from runlist. (ch == NULL && !add) means remove all active channels from runlist.
(ch == NULL && add) means restore all active channels on runlist. */ (ch == NULL && add) means restore all active channels on runlist. */
static int nvgpu_runlist_update(struct gk20a *g, u32 runlist_id, static int nvgpu_runlist_do_update(struct gk20a *g, struct nvgpu_runlist *rl,
struct nvgpu_channel *ch, struct nvgpu_channel *ch,
bool add, bool wait_for_finish) bool add, bool wait_for_finish)
{ {
struct nvgpu_runlist *runlist = NULL;
struct nvgpu_fifo *f = &g->fifo;
#ifdef CONFIG_NVGPU_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
u32 token = PMU_INVALID_MUTEX_OWNER_ID; u32 token = PMU_INVALID_MUTEX_OWNER_ID;
int mutex_ret = 0; int mutex_ret = 0;
@@ -526,14 +524,12 @@ static int nvgpu_runlist_update(struct gk20a *g, u32 runlist_id,
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
runlist = f->runlists[runlist_id]; nvgpu_mutex_acquire(&rl->runlist_lock);
nvgpu_mutex_acquire(&runlist->runlist_lock);
#ifdef CONFIG_NVGPU_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
mutex_ret = nvgpu_pmu_lock_acquire(g, g->pmu, mutex_ret = nvgpu_pmu_lock_acquire(g, g->pmu,
PMU_MUTEX_ID_FIFO, &token); PMU_MUTEX_ID_FIFO, &token);
#endif #endif
ret = nvgpu_runlist_update_locked(g, runlist_id, ch, add, ret = nvgpu_runlist_update_locked(g, rl->runlist_id, ch, add,
wait_for_finish); wait_for_finish);
#ifdef CONFIG_NVGPU_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
if (mutex_ret == 0) { if (mutex_ret == 0) {
@@ -543,32 +539,33 @@ static int nvgpu_runlist_update(struct gk20a *g, u32 runlist_id,
} }
} }
#endif #endif
nvgpu_mutex_release(&runlist->runlist_lock); nvgpu_mutex_release(&rl->runlist_lock);
if (ret == -ETIMEDOUT) { if (ret == -ETIMEDOUT) {
nvgpu_rc_runlist_update(g, runlist_id); nvgpu_rc_runlist_update(g, rl->runlist_id);
} }
return ret; return ret;
} }
int nvgpu_runlist_update_for_channel(struct gk20a *g, u32 runlist_id, int nvgpu_runlist_update(struct gk20a *g, struct nvgpu_runlist *rl,
struct nvgpu_channel *ch, struct nvgpu_channel *ch,
bool add, bool wait_for_finish) bool add, bool wait_for_finish)
{ {
nvgpu_assert(ch != NULL); nvgpu_assert(ch != NULL);
return nvgpu_runlist_update(g, runlist_id, ch, add, wait_for_finish); return nvgpu_runlist_do_update(g, rl, ch, add, wait_for_finish);
} }
int nvgpu_runlist_reload(struct gk20a *g, u32 runlist_id, int nvgpu_runlist_reload(struct gk20a *g, struct nvgpu_runlist *rl,
bool add, bool wait_for_finish) bool add, bool wait_for_finish)
{ {
return nvgpu_runlist_update(g, runlist_id, NULL, add, wait_for_finish); return nvgpu_runlist_do_update(g, rl, NULL, add, wait_for_finish);
} }
int nvgpu_runlist_reload_ids(struct gk20a *g, u32 runlist_ids, bool add) int nvgpu_runlist_reload_ids(struct gk20a *g, u32 runlist_ids, bool add)
{ {
struct nvgpu_fifo *f = &g->fifo;
int ret = -EINVAL; int ret = -EINVAL;
unsigned long runlist_id = 0; unsigned long runlist_id = 0;
int errcode; int errcode;
@@ -581,7 +578,8 @@ int nvgpu_runlist_reload_ids(struct gk20a *g, u32 runlist_ids, bool add)
ret = 0; ret = 0;
for_each_set_bit(runlist_id, &ulong_runlist_ids, 32U) { for_each_set_bit(runlist_id, &ulong_runlist_ids, 32U) {
/* Capture the last failure error code */ /* Capture the last failure error code */
errcode = g->ops.runlist.reload(g, (u32)runlist_id, add, true); errcode = g->ops.runlist.reload(g,
f->runlists[runlist_id], add, true);
if (errcode != 0) { if (errcode != 0) {
nvgpu_err(g, nvgpu_err(g,
"failed to update_runlist %lu %d", "failed to update_runlist %lu %d",
@@ -880,7 +878,7 @@ u32 nvgpu_runlist_get_runlists_mask(struct gk20a *g, u32 id,
if (id_type == ID_TYPE_TSG) { if (id_type == ID_TYPE_TSG) {
runlists_mask |= BIT32(f->tsg[id].runlist_id); runlists_mask |= BIT32(f->tsg[id].runlist_id);
} else { } else {
runlists_mask |= BIT32(f->channel[id].runlist_id); runlists_mask |= BIT32(f->channel[id].runlist->runlist_id);
} }
} else { } else {
if (bitmask_disabled) { if (bitmask_disabled) {

View File

@@ -115,12 +115,12 @@ int nvgpu_tsg_bind_channel(struct nvgpu_tsg *tsg, struct nvgpu_channel *ch)
/* all the channel part of TSG should need to be same runlist_id */ /* all the channel part of TSG should need to be same runlist_id */
if (tsg->runlist_id == NVGPU_INVALID_TSG_ID) { if (tsg->runlist_id == NVGPU_INVALID_TSG_ID) {
tsg->runlist_id = ch->runlist_id; tsg->runlist_id = ch->runlist->runlist_id;
} else { } else {
if (tsg->runlist_id != ch->runlist_id) { if (tsg->runlist_id != ch->runlist->runlist_id) {
nvgpu_err(tsg->g, nvgpu_err(tsg->g,
"runlist_id mismatch ch[%d] tsg[%d]", "runlist_id mismatch ch[%d] tsg[%d]",
ch->runlist_id, tsg->runlist_id); ch->runlist->runlist_id, tsg->runlist_id);
return -EINVAL; return -EINVAL;
} }
} }
@@ -677,7 +677,7 @@ int nvgpu_tsg_set_interleave(struct nvgpu_tsg *tsg, u32 level)
return 0; return 0;
} }
return g->ops.runlist.reload(g, tsg->runlist_id, true, true); return g->ops.runlist.reload(g, g->fifo.runlists[tsg->runlist_id], true, true);
} }
int nvgpu_tsg_set_timeslice(struct nvgpu_tsg *tsg, u32 timeslice_us) int nvgpu_tsg_set_timeslice(struct nvgpu_tsg *tsg, u32 timeslice_us)
@@ -699,7 +699,7 @@ int nvgpu_tsg_set_timeslice(struct nvgpu_tsg *tsg, u32 timeslice_us)
return 0; return 0;
} }
return g->ops.runlist.reload(g, tsg->runlist_id, true, true); return g->ops.runlist.reload(g, g->fifo.runlists[tsg->runlist_id], true, true);
} }
u32 nvgpu_tsg_get_timeslice(struct nvgpu_tsg *tsg) u32 nvgpu_tsg_get_timeslice(struct nvgpu_tsg *tsg)

View File

@@ -22,6 +22,7 @@
#include <nvgpu/gk20a.h> #include <nvgpu/gk20a.h>
#include <nvgpu/channel.h> #include <nvgpu/channel.h>
#include <nvgpu/runlist.h>
#include <nvgpu/error_notifier.h> #include <nvgpu/error_notifier.h>
#include <nvgpu/vgpu/vgpu_ivc.h> #include <nvgpu/vgpu/vgpu_ivc.h>
#include <nvgpu/vgpu/vgpu.h> #include <nvgpu/vgpu/vgpu.h>
@@ -81,7 +82,7 @@ int vgpu_channel_alloc_inst(struct gk20a *g, struct nvgpu_channel *ch)
msg.cmd = TEGRA_VGPU_CMD_CHANNEL_ALLOC_HWCTX; msg.cmd = TEGRA_VGPU_CMD_CHANNEL_ALLOC_HWCTX;
msg.handle = vgpu_get_handle(g); msg.handle = vgpu_get_handle(g);
p->id = ch->chid; p->id = ch->chid;
p->runlist_id = ch->runlist_id; p->runlist_id = ch->runlist->runlist_id;
p->pid = (u64)ch->pid; p->pid = (u64)ch->pid;
err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
if (err || msg.ret) { if (err || msg.ret) {

View File

@@ -163,40 +163,36 @@ static int vgpu_runlist_update_locked(struct gk20a *g, u32 runlist_id,
special cases below: runlist->active_channels will NOT be changed. special cases below: runlist->active_channels will NOT be changed.
(ch == NULL && !add) means remove all active channels from runlist. (ch == NULL && !add) means remove all active channels from runlist.
(ch == NULL && add) means restore all active channels on runlist. */ (ch == NULL && add) means restore all active channels on runlist. */
static int vgpu_runlist_update(struct gk20a *g, u32 runlist_id, static int vgpu_runlist_do_update(struct gk20a *g, struct nvgpu_runlist *rl,
struct nvgpu_channel *ch, struct nvgpu_channel *ch,
bool add, bool wait_for_finish) bool add, bool wait_for_finish)
{ {
struct nvgpu_runlist *runlist = NULL;
struct nvgpu_fifo *f = &g->fifo;
u32 ret = 0; u32 ret = 0;
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
runlist = f->runlists[runlist_id]; nvgpu_mutex_acquire(&rl->runlist_lock);
nvgpu_mutex_acquire(&runlist->runlist_lock); ret = vgpu_runlist_update_locked(g, rl->runlist_id, ch, add,
ret = vgpu_runlist_update_locked(g, runlist_id, ch, add,
wait_for_finish); wait_for_finish);
nvgpu_mutex_release(&runlist->runlist_lock); nvgpu_mutex_release(&rl->runlist_lock);
return ret; return ret;
} }
int vgpu_runlist_update_for_channel(struct gk20a *g, u32 runlist_id, int vgpu_runlist_update(struct gk20a *g, struct nvgpu_runlist *rl,
struct nvgpu_channel *ch, struct nvgpu_channel *ch,
bool add, bool wait_for_finish) bool add, bool wait_for_finish)
{ {
nvgpu_assert(ch != NULL); nvgpu_assert(ch != NULL);
return vgpu_runlist_update(g, runlist_id, ch, add, wait_for_finish); return vgpu_runlist_do_update(g, rl, ch, add, wait_for_finish);
} }
int vgpu_runlist_reload(struct gk20a *g, u32 runlist_id, int vgpu_runlist_reload(struct gk20a *g, struct nvgpu_runlist *rl,
bool add, bool wait_for_finish) bool add, bool wait_for_finish)
{ {
return vgpu_runlist_update(g, runlist_id, NULL, add, wait_for_finish); return vgpu_runlist_do_update(g, rl, NULL, add, wait_for_finish);
} }
u32 vgpu_runlist_length_max(struct gk20a *g) u32 vgpu_runlist_length_max(struct gk20a *g)

View File

@@ -24,11 +24,12 @@
struct gk20a; struct gk20a;
struct nvgpu_channel; struct nvgpu_channel;
struct nvgpu_runlist;
int vgpu_runlist_update_for_channel(struct gk20a *g, u32 runlist_id, int vgpu_runlist_update(struct gk20a *g, struct nvgpu_runlist *rl,
struct nvgpu_channel *ch, struct nvgpu_channel *ch,
bool add, bool wait_for_finish); bool add, bool wait_for_finish);
int vgpu_runlist_reload(struct gk20a *g, u32 runlist_id, int vgpu_runlist_reload(struct gk20a *g, struct nvgpu_runlist *rl,
bool add, bool wait_for_finish); bool add, bool wait_for_finish);
u32 vgpu_runlist_length_max(struct gk20a *g); u32 vgpu_runlist_length_max(struct gk20a *g);
u32 vgpu_runlist_entry_size(struct gk20a *g); u32 vgpu_runlist_entry_size(struct gk20a *g);

View File

@@ -312,27 +312,27 @@ int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 id,
unsigned int id_type) unsigned int id_type)
{ {
struct nvgpu_fifo *f = &g->fifo; struct nvgpu_fifo *f = &g->fifo;
struct nvgpu_runlist *rl;
unsigned long runlist_served_pbdmas; unsigned long runlist_served_pbdmas;
unsigned long runlist_served_engines; unsigned long runlist_served_engines;
unsigned long bit; unsigned long bit;
u32 pbdma_id; u32 pbdma_id;
u32 engine_id; u32 engine_id;
u32 runlist_id;
int err, ret = 0; int err, ret = 0;
u32 tsgid; u32 tsgid;
if (id_type == ID_TYPE_TSG) { if (id_type == ID_TYPE_TSG) {
runlist_id = f->tsg[id].runlist_id; rl = f->runlists[f->tsg[id].runlist_id];
tsgid = id; tsgid = id;
} else { } else {
runlist_id = f->channel[id].runlist_id; rl = f->channel[id].runlist;
tsgid = f->channel[id].tsgid; tsgid = f->channel[id].tsgid;
} }
nvgpu_log_info(g, "Check preempt pending for tsgid = %u", tsgid); nvgpu_log_info(g, "Check preempt pending for tsgid = %u", tsgid);
runlist_served_pbdmas = f->runlists[runlist_id]->pbdma_bitmask; runlist_served_pbdmas = rl->pbdma_bitmask;
runlist_served_engines = f->runlists[runlist_id]->eng_bitmask; runlist_served_engines = rl->eng_bitmask;
for_each_set_bit(bit, &runlist_served_pbdmas, for_each_set_bit(bit, &runlist_served_pbdmas,
nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA)) { nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA)) {
@@ -344,13 +344,13 @@ int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 id,
} }
} }
f->runlists[runlist_id]->reset_eng_bitmask = 0U; rl->reset_eng_bitmask = 0U;
for_each_set_bit(bit, &runlist_served_engines, f->max_engines) { for_each_set_bit(bit, &runlist_served_engines, f->max_engines) {
engine_id = U32(bit); engine_id = U32(bit);
err = gv11b_fifo_preempt_poll_eng(g, err = gv11b_fifo_preempt_poll_eng(g,
tsgid, engine_id, tsgid, engine_id,
&f->runlists[runlist_id]->reset_eng_bitmask); &rl->reset_eng_bitmask);
if ((err != 0) && (ret == 0)) { if ((err != 0) && (ret == 0)) {
ret = err; ret = err;
} }

View File

@@ -52,8 +52,7 @@ int gk20a_fifo_reschedule_preempt_next(struct nvgpu_channel *ch,
bool wait_preempt) bool wait_preempt)
{ {
struct gk20a *g = ch->g; struct gk20a *g = ch->g;
struct nvgpu_runlist *runlist = struct nvgpu_runlist *runlist = ch->runlist;
g->fifo.runlists[ch->runlist_id];
int ret = 0; int ret = 0;
u32 fecsstat0 = 0, fecsstat1 = 0; u32 fecsstat0 = 0, fecsstat1 = 0;
u32 preempt_id; u32 preempt_id;

View File

@@ -26,6 +26,7 @@
#include <nvgpu/gk20a.h> #include <nvgpu/gk20a.h>
#include <nvgpu/channel.h> #include <nvgpu/channel.h>
#include <nvgpu/io_usermode.h> #include <nvgpu/io_usermode.h>
#include <nvgpu/runlist.h>
#include "usermode_tu104.h" #include "usermode_tu104.h"
@@ -59,13 +60,13 @@ u32 tu104_usermode_doorbell_token(struct nvgpu_channel *ch)
u32 hw_chid = f->channel_base + ch->chid; u32 hw_chid = f->channel_base + ch->chid;
return ctrl_doorbell_vector_f(hw_chid) | return ctrl_doorbell_vector_f(hw_chid) |
ctrl_doorbell_runlist_id_f(ch->runlist_id); ctrl_doorbell_runlist_id_f(ch->runlist->runlist_id);
} }
void tu104_usermode_ring_doorbell(struct nvgpu_channel *ch) void tu104_usermode_ring_doorbell(struct nvgpu_channel *ch)
{ {
nvgpu_log_info(ch->g, "channel ring door bell %d, runlist %d", nvgpu_log_info(ch->g, "channel ring door bell %d, runlist %d",
ch->chid, ch->runlist_id); ch->chid, ch->runlist->runlist_id);
nvgpu_usermode_writel(ch->g, func_doorbell_r(), nvgpu_usermode_writel(ch->g, func_doorbell_r(),
ch->g->ops.usermode.doorbell_token(ch)); ch->g->ops.usermode.doorbell_token(ch));

View File

@@ -44,6 +44,7 @@
#include <nvgpu/engines.h> #include <nvgpu/engines.h>
#include <nvgpu/engine_status.h> #include <nvgpu/engine_status.h>
#include <nvgpu/preempt.h> #include <nvgpu/preempt.h>
#include <nvgpu/runlist.h>
#include "gr_gk20a.h" #include "gr_gk20a.h"
#include "gr_gp10b.h" #include "gr_gp10b.h"
@@ -402,7 +403,7 @@ static int gr_gp10b_disable_channel_or_tsg(struct gk20a *g, struct nvgpu_channel
return ret; return ret;
} }
ret = g->ops.runlist.reload(g, fault_ch->runlist_id, true, false); ret = g->ops.runlist.reload(g, fault_ch->runlist, true, false);
if (ret != 0) { if (ret != 0) {
nvgpu_err(g, "CILP: failed to restart runlist 0!"); nvgpu_err(g, "CILP: failed to restart runlist 0!");
return ret; return ret;

View File

@@ -684,7 +684,7 @@ static const struct gops_ramin gm20b_ops_ramin = {
}; };
static const struct gops_runlist gm20b_ops_runlist = { static const struct gops_runlist gm20b_ops_runlist = {
.update_for_channel = nvgpu_runlist_update_for_channel, .update = nvgpu_runlist_update,
.reload = nvgpu_runlist_reload, .reload = nvgpu_runlist_reload,
.count_max = gk20a_runlist_count_max, .count_max = gk20a_runlist_count_max,
.entry_size = gk20a_runlist_entry_size, .entry_size = gk20a_runlist_entry_size,

View File

@@ -769,7 +769,7 @@ static const struct gops_runlist gp10b_ops_runlist = {
.reschedule = gk20a_runlist_reschedule, .reschedule = gk20a_runlist_reschedule,
.reschedule_preempt_next_locked = gk20a_fifo_reschedule_preempt_next, .reschedule_preempt_next_locked = gk20a_fifo_reschedule_preempt_next,
#endif #endif
.update_for_channel = nvgpu_runlist_update_for_channel, .update = nvgpu_runlist_update,
.reload = nvgpu_runlist_reload, .reload = nvgpu_runlist_reload,
.count_max = gk20a_runlist_count_max, .count_max = gk20a_runlist_count_max,
.entry_size = gk20a_runlist_entry_size, .entry_size = gk20a_runlist_entry_size,

View File

@@ -943,7 +943,7 @@ static const struct gops_runlist gv11b_ops_runlist = {
.reschedule = gv11b_runlist_reschedule, .reschedule = gv11b_runlist_reschedule,
.reschedule_preempt_next_locked = gk20a_fifo_reschedule_preempt_next, .reschedule_preempt_next_locked = gk20a_fifo_reschedule_preempt_next,
#endif #endif
.update_for_channel = nvgpu_runlist_update_for_channel, .update = nvgpu_runlist_update,
.reload = nvgpu_runlist_reload, .reload = nvgpu_runlist_reload,
.count_max = gv11b_runlist_count_max, .count_max = gv11b_runlist_count_max,
.entry_size = gv11b_runlist_entry_size, .entry_size = gv11b_runlist_entry_size,

View File

@@ -994,7 +994,7 @@ static const struct gops_ramin tu104_ops_ramin = {
}; };
static const struct gops_runlist tu104_ops_runlist = { static const struct gops_runlist tu104_ops_runlist = {
.update_for_channel = nvgpu_runlist_update_for_channel, .update = nvgpu_runlist_update,
.reload = nvgpu_runlist_reload, .reload = nvgpu_runlist_reload,
.count_max = tu104_runlist_count_max, .count_max = tu104_runlist_count_max,
.entry_size = tu104_runlist_entry_size, .entry_size = tu104_runlist_entry_size,

View File

@@ -546,7 +546,7 @@ static const struct gops_ramin vgpu_gp10b_ops_ramin = {
static const struct gops_runlist vgpu_gp10b_ops_runlist = { static const struct gops_runlist vgpu_gp10b_ops_runlist = {
.reschedule = NULL, .reschedule = NULL,
.update_for_channel = vgpu_runlist_update_for_channel, .update = vgpu_runlist_update,
.reload = vgpu_runlist_reload, .reload = vgpu_runlist_reload,
.count_max = gk20a_runlist_count_max, .count_max = gk20a_runlist_count_max,
.entry_size = vgpu_runlist_entry_size, .entry_size = vgpu_runlist_entry_size,

View File

@@ -649,7 +649,7 @@ static const struct gops_ramin vgpu_gv11b_ops_ramin = {
static const struct gops_runlist vgpu_gv11b_ops_runlist = { static const struct gops_runlist vgpu_gv11b_ops_runlist = {
.reschedule = NULL, .reschedule = NULL,
.update_for_channel = vgpu_runlist_update_for_channel, .update = vgpu_runlist_update,
.reload = vgpu_runlist_reload, .reload = vgpu_runlist_reload,
.count_max = gv11b_runlist_count_max, .count_max = gv11b_runlist_count_max,
.entry_size = vgpu_runlist_entry_size, .entry_size = vgpu_runlist_entry_size,

View File

@@ -50,6 +50,7 @@ struct priv_cmd_queue;
struct priv_cmd_entry; struct priv_cmd_entry;
struct nvgpu_channel_wdt; struct nvgpu_channel_wdt;
struct nvgpu_user_fence; struct nvgpu_user_fence;
struct nvgpu_runlist;
/** /**
* S/W defined invalid channel identifier. * S/W defined invalid channel identifier.
@@ -488,8 +489,8 @@ struct nvgpu_channel {
*/ */
u32 runqueue_sel; u32 runqueue_sel;
/** Identifer of the runlist the channel will run on */ /** Runlist the channel will run on. */
u32 runlist_id; struct nvgpu_runlist *runlist;
/** /**
* Recovery path can be entered twice for the same error in * Recovery path can be entered twice for the same error in

View File

@@ -65,12 +65,12 @@ struct gops_runlist {
* @retval -E2BIG in case there are not enough entries in the runlist * @retval -E2BIG in case there are not enough entries in the runlist
* buffer to accommodate all active channels/TSGs. * buffer to accommodate all active channels/TSGs.
*/ */
int (*reload)(struct gk20a *g, u32 runlist_id, int (*reload)(struct gk20a *g, struct nvgpu_runlist *rl,
bool add, bool wait_for_finish); bool add, bool wait_for_finish);
/** @cond DOXYGEN_SHOULD_SKIP_THIS */ /** @cond DOXYGEN_SHOULD_SKIP_THIS */
int (*update_for_channel)(struct gk20a *g, u32 runlist_id, int (*update)(struct gk20a *g, struct nvgpu_runlist *rl,
struct nvgpu_channel *ch, bool add, struct nvgpu_channel *ch, bool add,
bool wait_for_finish); bool wait_for_finish);
u32 (*count_max)(struct gk20a *g); u32 (*count_max)(struct gk20a *g);

View File

@@ -142,7 +142,7 @@ u32 nvgpu_runlist_construct_locked(struct nvgpu_fifo *f,
* *
* When #ch is NULL, this function has same behavior as #nvgpu_runlist_reload. * When #ch is NULL, this function has same behavior as #nvgpu_runlist_reload.
* When #ch is non NULL, this function has same behavior as * When #ch is non NULL, this function has same behavior as
* #nvgpu_runlist_update_for_channel. * #nvgpu_runlist_update.
* *
* The only difference with #nvgpu_runlist_reload is that the caller already * The only difference with #nvgpu_runlist_reload is that the caller already
* holds the runlist_lock before calling this function. * holds the runlist_lock before calling this function.
@@ -185,7 +185,7 @@ int nvgpu_runlist_reschedule(struct nvgpu_channel *ch, bool preempt_next,
* @retval -E2BIG in case there are not enough entries in runlist buffer to * @retval -E2BIG in case there are not enough entries in runlist buffer to
* accommodate all active channels/TSGs. * accommodate all active channels/TSGs.
*/ */
int nvgpu_runlist_update_for_channel(struct gk20a *g, u32 runlist_id, int nvgpu_runlist_update(struct gk20a *g, struct nvgpu_runlist *rl,
struct nvgpu_channel *ch, bool add, bool wait_for_finish); struct nvgpu_channel *ch, bool add, bool wait_for_finish);
/** /**
@@ -211,7 +211,7 @@ int nvgpu_runlist_update_for_channel(struct gk20a *g, u32 runlist_id,
* @retval -E2BIG in case there are not enough entries in the runlist buffer * @retval -E2BIG in case there are not enough entries in the runlist buffer
* to accommodate all active channels/TSGs. * to accommodate all active channels/TSGs.
*/ */
int nvgpu_runlist_reload(struct gk20a *g, u32 runlist_id, int nvgpu_runlist_reload(struct gk20a *g, struct nvgpu_runlist *rl,
bool add, bool wait_for_finish); bool add, bool wait_for_finish);
/** /**