mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 17:36:20 +03:00
gpu: nvgpu: Update runlist_update() to take runlist ptr
Update the nvgpu_runlist_update_for_channel() function:
- Rename it to nvgpu_runlist_update()
- Have it take a pointer to the runlist to update instead
of a runlist ID. For the most part this makes the code
better but there's a few places where it's worse (for
now).
This starts the slow and painful process of moving away from
the non-runlist code using runlist IDs in many places it should
not.
Most of this patch is just fixing compilation problems with
the minor header updates.
JIRA NVGPU-6425
Change-Id: Id9885fe655d1d750625a1c8aceda9e67a2cbdb7a
Signed-off-by: Alex Waterman <alexw@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2470304
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
GVS: Gerrit_Virtual_Submit
This commit is contained in:
committed by
mobile promotions
parent
fae1f20ab7
commit
77c0b9ffdc
@@ -158,8 +158,7 @@ void nvgpu_channel_commit_va(struct nvgpu_channel *c)
|
||||
|
||||
int nvgpu_channel_update_runlist(struct nvgpu_channel *c, bool add)
|
||||
{
|
||||
return c->g->ops.runlist.update_for_channel(c->g, c->runlist_id,
|
||||
c, add, true);
|
||||
return c->g->ops.runlist.update(c->g, c->runlist, c, add, true);
|
||||
}
|
||||
|
||||
int nvgpu_channel_enable_tsg(struct gk20a *g, struct nvgpu_channel *ch)
|
||||
@@ -1249,7 +1248,7 @@ NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_MISRA(Rule, 15_6))
|
||||
ch->g = g;
|
||||
|
||||
/* Runlist for the channel */
|
||||
ch->runlist_id = runlist_id;
|
||||
ch->runlist = f->runlists[runlist_id];
|
||||
|
||||
/* Channel privilege level */
|
||||
ch->is_privileged_channel = is_privileged_channel;
|
||||
@@ -1903,7 +1902,7 @@ int nvgpu_channel_suspend_all_serviceable_ch(struct gk20a *g)
|
||||
|
||||
channels_in_use = true;
|
||||
|
||||
active_runlist_ids |= BIT32(ch->runlist_id);
|
||||
active_runlist_ids |= BIT32(ch->runlist->runlist_id);
|
||||
}
|
||||
|
||||
nvgpu_channel_put(ch);
|
||||
@@ -1940,7 +1939,7 @@ int nvgpu_channel_resume_all_serviceable_ch(struct gk20a *g)
|
||||
nvgpu_log_info(g, "resume channel %d", chid);
|
||||
g->ops.channel.bind(ch);
|
||||
channels_in_use = true;
|
||||
active_runlist_ids |= BIT32(ch->runlist_id);
|
||||
active_runlist_ids |= BIT32(ch->runlist->runlist_id);
|
||||
}
|
||||
nvgpu_channel_put(ch);
|
||||
}
|
||||
|
||||
@@ -471,7 +471,7 @@ int nvgpu_runlist_reschedule(struct nvgpu_channel *ch, bool preempt_next,
|
||||
#endif
|
||||
int ret = 0;
|
||||
|
||||
runlist = g->fifo.runlists[ch->runlist_id];
|
||||
runlist = ch->runlist;
|
||||
if (nvgpu_mutex_tryacquire(&runlist->runlist_lock) == 0) {
|
||||
return -EBUSY;
|
||||
}
|
||||
@@ -481,7 +481,7 @@ int nvgpu_runlist_reschedule(struct nvgpu_channel *ch, bool preempt_next,
|
||||
#endif
|
||||
|
||||
g->ops.runlist.hw_submit(
|
||||
g, ch->runlist_id, runlist->count, runlist->cur_buffer);
|
||||
g, runlist->runlist_id, runlist->count, runlist->cur_buffer);
|
||||
|
||||
if (preempt_next) {
|
||||
if (g->ops.runlist.reschedule_preempt_next_locked(ch,
|
||||
@@ -490,9 +490,9 @@ int nvgpu_runlist_reschedule(struct nvgpu_channel *ch, bool preempt_next,
|
||||
}
|
||||
}
|
||||
|
||||
if (g->ops.runlist.wait_pending(g, ch->runlist_id) != 0) {
|
||||
if (g->ops.runlist.wait_pending(g, runlist->runlist_id) != 0) {
|
||||
nvgpu_err(g, "wait pending failed for runlist %u",
|
||||
ch->runlist_id);
|
||||
runlist->runlist_id);
|
||||
}
|
||||
#ifdef CONFIG_NVGPU_LS_PMU
|
||||
if (mutex_ret == 0) {
|
||||
@@ -512,12 +512,10 @@ int nvgpu_runlist_reschedule(struct nvgpu_channel *ch, bool preempt_next,
|
||||
special cases below: runlist->active_channels will NOT be changed.
|
||||
(ch == NULL && !add) means remove all active channels from runlist.
|
||||
(ch == NULL && add) means restore all active channels on runlist. */
|
||||
static int nvgpu_runlist_update(struct gk20a *g, u32 runlist_id,
|
||||
struct nvgpu_channel *ch,
|
||||
bool add, bool wait_for_finish)
|
||||
static int nvgpu_runlist_do_update(struct gk20a *g, struct nvgpu_runlist *rl,
|
||||
struct nvgpu_channel *ch,
|
||||
bool add, bool wait_for_finish)
|
||||
{
|
||||
struct nvgpu_runlist *runlist = NULL;
|
||||
struct nvgpu_fifo *f = &g->fifo;
|
||||
#ifdef CONFIG_NVGPU_LS_PMU
|
||||
u32 token = PMU_INVALID_MUTEX_OWNER_ID;
|
||||
int mutex_ret = 0;
|
||||
@@ -526,14 +524,12 @@ static int nvgpu_runlist_update(struct gk20a *g, u32 runlist_id,
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
runlist = f->runlists[runlist_id];
|
||||
|
||||
nvgpu_mutex_acquire(&runlist->runlist_lock);
|
||||
nvgpu_mutex_acquire(&rl->runlist_lock);
|
||||
#ifdef CONFIG_NVGPU_LS_PMU
|
||||
mutex_ret = nvgpu_pmu_lock_acquire(g, g->pmu,
|
||||
PMU_MUTEX_ID_FIFO, &token);
|
||||
#endif
|
||||
ret = nvgpu_runlist_update_locked(g, runlist_id, ch, add,
|
||||
ret = nvgpu_runlist_update_locked(g, rl->runlist_id, ch, add,
|
||||
wait_for_finish);
|
||||
#ifdef CONFIG_NVGPU_LS_PMU
|
||||
if (mutex_ret == 0) {
|
||||
@@ -543,32 +539,33 @@ static int nvgpu_runlist_update(struct gk20a *g, u32 runlist_id,
|
||||
}
|
||||
}
|
||||
#endif
|
||||
nvgpu_mutex_release(&runlist->runlist_lock);
|
||||
nvgpu_mutex_release(&rl->runlist_lock);
|
||||
|
||||
if (ret == -ETIMEDOUT) {
|
||||
nvgpu_rc_runlist_update(g, runlist_id);
|
||||
nvgpu_rc_runlist_update(g, rl->runlist_id);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int nvgpu_runlist_update_for_channel(struct gk20a *g, u32 runlist_id,
|
||||
struct nvgpu_channel *ch,
|
||||
bool add, bool wait_for_finish)
|
||||
int nvgpu_runlist_update(struct gk20a *g, struct nvgpu_runlist *rl,
|
||||
struct nvgpu_channel *ch,
|
||||
bool add, bool wait_for_finish)
|
||||
{
|
||||
nvgpu_assert(ch != NULL);
|
||||
|
||||
return nvgpu_runlist_update(g, runlist_id, ch, add, wait_for_finish);
|
||||
return nvgpu_runlist_do_update(g, rl, ch, add, wait_for_finish);
|
||||
}
|
||||
|
||||
int nvgpu_runlist_reload(struct gk20a *g, u32 runlist_id,
|
||||
int nvgpu_runlist_reload(struct gk20a *g, struct nvgpu_runlist *rl,
|
||||
bool add, bool wait_for_finish)
|
||||
{
|
||||
return nvgpu_runlist_update(g, runlist_id, NULL, add, wait_for_finish);
|
||||
return nvgpu_runlist_do_update(g, rl, NULL, add, wait_for_finish);
|
||||
}
|
||||
|
||||
int nvgpu_runlist_reload_ids(struct gk20a *g, u32 runlist_ids, bool add)
|
||||
{
|
||||
struct nvgpu_fifo *f = &g->fifo;
|
||||
int ret = -EINVAL;
|
||||
unsigned long runlist_id = 0;
|
||||
int errcode;
|
||||
@@ -581,7 +578,8 @@ int nvgpu_runlist_reload_ids(struct gk20a *g, u32 runlist_ids, bool add)
|
||||
ret = 0;
|
||||
for_each_set_bit(runlist_id, &ulong_runlist_ids, 32U) {
|
||||
/* Capture the last failure error code */
|
||||
errcode = g->ops.runlist.reload(g, (u32)runlist_id, add, true);
|
||||
errcode = g->ops.runlist.reload(g,
|
||||
f->runlists[runlist_id], add, true);
|
||||
if (errcode != 0) {
|
||||
nvgpu_err(g,
|
||||
"failed to update_runlist %lu %d",
|
||||
@@ -880,7 +878,7 @@ u32 nvgpu_runlist_get_runlists_mask(struct gk20a *g, u32 id,
|
||||
if (id_type == ID_TYPE_TSG) {
|
||||
runlists_mask |= BIT32(f->tsg[id].runlist_id);
|
||||
} else {
|
||||
runlists_mask |= BIT32(f->channel[id].runlist_id);
|
||||
runlists_mask |= BIT32(f->channel[id].runlist->runlist_id);
|
||||
}
|
||||
} else {
|
||||
if (bitmask_disabled) {
|
||||
|
||||
@@ -115,12 +115,12 @@ int nvgpu_tsg_bind_channel(struct nvgpu_tsg *tsg, struct nvgpu_channel *ch)
|
||||
|
||||
/* all the channel part of TSG should need to be same runlist_id */
|
||||
if (tsg->runlist_id == NVGPU_INVALID_TSG_ID) {
|
||||
tsg->runlist_id = ch->runlist_id;
|
||||
tsg->runlist_id = ch->runlist->runlist_id;
|
||||
} else {
|
||||
if (tsg->runlist_id != ch->runlist_id) {
|
||||
if (tsg->runlist_id != ch->runlist->runlist_id) {
|
||||
nvgpu_err(tsg->g,
|
||||
"runlist_id mismatch ch[%d] tsg[%d]",
|
||||
ch->runlist_id, tsg->runlist_id);
|
||||
ch->runlist->runlist_id, tsg->runlist_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
@@ -677,7 +677,7 @@ int nvgpu_tsg_set_interleave(struct nvgpu_tsg *tsg, u32 level)
|
||||
return 0;
|
||||
}
|
||||
|
||||
return g->ops.runlist.reload(g, tsg->runlist_id, true, true);
|
||||
return g->ops.runlist.reload(g, g->fifo.runlists[tsg->runlist_id], true, true);
|
||||
}
|
||||
|
||||
int nvgpu_tsg_set_timeslice(struct nvgpu_tsg *tsg, u32 timeslice_us)
|
||||
@@ -699,7 +699,7 @@ int nvgpu_tsg_set_timeslice(struct nvgpu_tsg *tsg, u32 timeslice_us)
|
||||
return 0;
|
||||
}
|
||||
|
||||
return g->ops.runlist.reload(g, tsg->runlist_id, true, true);
|
||||
return g->ops.runlist.reload(g, g->fifo.runlists[tsg->runlist_id], true, true);
|
||||
}
|
||||
|
||||
u32 nvgpu_tsg_get_timeslice(struct nvgpu_tsg *tsg)
|
||||
|
||||
@@ -22,6 +22,7 @@
|
||||
|
||||
#include <nvgpu/gk20a.h>
|
||||
#include <nvgpu/channel.h>
|
||||
#include <nvgpu/runlist.h>
|
||||
#include <nvgpu/error_notifier.h>
|
||||
#include <nvgpu/vgpu/vgpu_ivc.h>
|
||||
#include <nvgpu/vgpu/vgpu.h>
|
||||
@@ -81,7 +82,7 @@ int vgpu_channel_alloc_inst(struct gk20a *g, struct nvgpu_channel *ch)
|
||||
msg.cmd = TEGRA_VGPU_CMD_CHANNEL_ALLOC_HWCTX;
|
||||
msg.handle = vgpu_get_handle(g);
|
||||
p->id = ch->chid;
|
||||
p->runlist_id = ch->runlist_id;
|
||||
p->runlist_id = ch->runlist->runlist_id;
|
||||
p->pid = (u64)ch->pid;
|
||||
err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
|
||||
if (err || msg.ret) {
|
||||
|
||||
@@ -163,40 +163,36 @@ static int vgpu_runlist_update_locked(struct gk20a *g, u32 runlist_id,
|
||||
special cases below: runlist->active_channels will NOT be changed.
|
||||
(ch == NULL && !add) means remove all active channels from runlist.
|
||||
(ch == NULL && add) means restore all active channels on runlist. */
|
||||
static int vgpu_runlist_update(struct gk20a *g, u32 runlist_id,
|
||||
static int vgpu_runlist_do_update(struct gk20a *g, struct nvgpu_runlist *rl,
|
||||
struct nvgpu_channel *ch,
|
||||
bool add, bool wait_for_finish)
|
||||
{
|
||||
struct nvgpu_runlist *runlist = NULL;
|
||||
struct nvgpu_fifo *f = &g->fifo;
|
||||
u32 ret = 0;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
runlist = f->runlists[runlist_id];
|
||||
nvgpu_mutex_acquire(&rl->runlist_lock);
|
||||
|
||||
nvgpu_mutex_acquire(&runlist->runlist_lock);
|
||||
|
||||
ret = vgpu_runlist_update_locked(g, runlist_id, ch, add,
|
||||
ret = vgpu_runlist_update_locked(g, rl->runlist_id, ch, add,
|
||||
wait_for_finish);
|
||||
|
||||
nvgpu_mutex_release(&runlist->runlist_lock);
|
||||
nvgpu_mutex_release(&rl->runlist_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int vgpu_runlist_update_for_channel(struct gk20a *g, u32 runlist_id,
|
||||
struct nvgpu_channel *ch,
|
||||
bool add, bool wait_for_finish)
|
||||
int vgpu_runlist_update(struct gk20a *g, struct nvgpu_runlist *rl,
|
||||
struct nvgpu_channel *ch,
|
||||
bool add, bool wait_for_finish)
|
||||
{
|
||||
nvgpu_assert(ch != NULL);
|
||||
|
||||
return vgpu_runlist_update(g, runlist_id, ch, add, wait_for_finish);
|
||||
return vgpu_runlist_do_update(g, rl, ch, add, wait_for_finish);
|
||||
}
|
||||
|
||||
int vgpu_runlist_reload(struct gk20a *g, u32 runlist_id,
|
||||
bool add, bool wait_for_finish)
|
||||
int vgpu_runlist_reload(struct gk20a *g, struct nvgpu_runlist *rl,
|
||||
bool add, bool wait_for_finish)
|
||||
{
|
||||
return vgpu_runlist_update(g, runlist_id, NULL, add, wait_for_finish);
|
||||
return vgpu_runlist_do_update(g, rl, NULL, add, wait_for_finish);
|
||||
}
|
||||
|
||||
u32 vgpu_runlist_length_max(struct gk20a *g)
|
||||
|
||||
@@ -24,11 +24,12 @@
|
||||
|
||||
struct gk20a;
|
||||
struct nvgpu_channel;
|
||||
struct nvgpu_runlist;
|
||||
|
||||
int vgpu_runlist_update_for_channel(struct gk20a *g, u32 runlist_id,
|
||||
struct nvgpu_channel *ch,
|
||||
bool add, bool wait_for_finish);
|
||||
int vgpu_runlist_reload(struct gk20a *g, u32 runlist_id,
|
||||
int vgpu_runlist_update(struct gk20a *g, struct nvgpu_runlist *rl,
|
||||
struct nvgpu_channel *ch,
|
||||
bool add, bool wait_for_finish);
|
||||
int vgpu_runlist_reload(struct gk20a *g, struct nvgpu_runlist *rl,
|
||||
bool add, bool wait_for_finish);
|
||||
u32 vgpu_runlist_length_max(struct gk20a *g);
|
||||
u32 vgpu_runlist_entry_size(struct gk20a *g);
|
||||
|
||||
@@ -312,27 +312,27 @@ int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 id,
|
||||
unsigned int id_type)
|
||||
{
|
||||
struct nvgpu_fifo *f = &g->fifo;
|
||||
struct nvgpu_runlist *rl;
|
||||
unsigned long runlist_served_pbdmas;
|
||||
unsigned long runlist_served_engines;
|
||||
unsigned long bit;
|
||||
u32 pbdma_id;
|
||||
u32 engine_id;
|
||||
u32 runlist_id;
|
||||
int err, ret = 0;
|
||||
u32 tsgid;
|
||||
|
||||
if (id_type == ID_TYPE_TSG) {
|
||||
runlist_id = f->tsg[id].runlist_id;
|
||||
rl = f->runlists[f->tsg[id].runlist_id];
|
||||
tsgid = id;
|
||||
} else {
|
||||
runlist_id = f->channel[id].runlist_id;
|
||||
rl = f->channel[id].runlist;
|
||||
tsgid = f->channel[id].tsgid;
|
||||
}
|
||||
|
||||
nvgpu_log_info(g, "Check preempt pending for tsgid = %u", tsgid);
|
||||
|
||||
runlist_served_pbdmas = f->runlists[runlist_id]->pbdma_bitmask;
|
||||
runlist_served_engines = f->runlists[runlist_id]->eng_bitmask;
|
||||
runlist_served_pbdmas = rl->pbdma_bitmask;
|
||||
runlist_served_engines = rl->eng_bitmask;
|
||||
|
||||
for_each_set_bit(bit, &runlist_served_pbdmas,
|
||||
nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA)) {
|
||||
@@ -344,13 +344,13 @@ int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 id,
|
||||
}
|
||||
}
|
||||
|
||||
f->runlists[runlist_id]->reset_eng_bitmask = 0U;
|
||||
rl->reset_eng_bitmask = 0U;
|
||||
|
||||
for_each_set_bit(bit, &runlist_served_engines, f->max_engines) {
|
||||
engine_id = U32(bit);
|
||||
err = gv11b_fifo_preempt_poll_eng(g,
|
||||
tsgid, engine_id,
|
||||
&f->runlists[runlist_id]->reset_eng_bitmask);
|
||||
&rl->reset_eng_bitmask);
|
||||
if ((err != 0) && (ret == 0)) {
|
||||
ret = err;
|
||||
}
|
||||
|
||||
@@ -52,8 +52,7 @@ int gk20a_fifo_reschedule_preempt_next(struct nvgpu_channel *ch,
|
||||
bool wait_preempt)
|
||||
{
|
||||
struct gk20a *g = ch->g;
|
||||
struct nvgpu_runlist *runlist =
|
||||
g->fifo.runlists[ch->runlist_id];
|
||||
struct nvgpu_runlist *runlist = ch->runlist;
|
||||
int ret = 0;
|
||||
u32 fecsstat0 = 0, fecsstat1 = 0;
|
||||
u32 preempt_id;
|
||||
|
||||
@@ -26,6 +26,7 @@
|
||||
#include <nvgpu/gk20a.h>
|
||||
#include <nvgpu/channel.h>
|
||||
#include <nvgpu/io_usermode.h>
|
||||
#include <nvgpu/runlist.h>
|
||||
|
||||
#include "usermode_tu104.h"
|
||||
|
||||
@@ -59,13 +60,13 @@ u32 tu104_usermode_doorbell_token(struct nvgpu_channel *ch)
|
||||
u32 hw_chid = f->channel_base + ch->chid;
|
||||
|
||||
return ctrl_doorbell_vector_f(hw_chid) |
|
||||
ctrl_doorbell_runlist_id_f(ch->runlist_id);
|
||||
ctrl_doorbell_runlist_id_f(ch->runlist->runlist_id);
|
||||
}
|
||||
|
||||
void tu104_usermode_ring_doorbell(struct nvgpu_channel *ch)
|
||||
{
|
||||
nvgpu_log_info(ch->g, "channel ring door bell %d, runlist %d",
|
||||
ch->chid, ch->runlist_id);
|
||||
ch->chid, ch->runlist->runlist_id);
|
||||
|
||||
nvgpu_usermode_writel(ch->g, func_doorbell_r(),
|
||||
ch->g->ops.usermode.doorbell_token(ch));
|
||||
|
||||
@@ -44,6 +44,7 @@
|
||||
#include <nvgpu/engines.h>
|
||||
#include <nvgpu/engine_status.h>
|
||||
#include <nvgpu/preempt.h>
|
||||
#include <nvgpu/runlist.h>
|
||||
|
||||
#include "gr_gk20a.h"
|
||||
#include "gr_gp10b.h"
|
||||
@@ -402,7 +403,7 @@ static int gr_gp10b_disable_channel_or_tsg(struct gk20a *g, struct nvgpu_channel
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = g->ops.runlist.reload(g, fault_ch->runlist_id, true, false);
|
||||
ret = g->ops.runlist.reload(g, fault_ch->runlist, true, false);
|
||||
if (ret != 0) {
|
||||
nvgpu_err(g, "CILP: failed to restart runlist 0!");
|
||||
return ret;
|
||||
|
||||
@@ -684,7 +684,7 @@ static const struct gops_ramin gm20b_ops_ramin = {
|
||||
};
|
||||
|
||||
static const struct gops_runlist gm20b_ops_runlist = {
|
||||
.update_for_channel = nvgpu_runlist_update_for_channel,
|
||||
.update = nvgpu_runlist_update,
|
||||
.reload = nvgpu_runlist_reload,
|
||||
.count_max = gk20a_runlist_count_max,
|
||||
.entry_size = gk20a_runlist_entry_size,
|
||||
|
||||
@@ -769,7 +769,7 @@ static const struct gops_runlist gp10b_ops_runlist = {
|
||||
.reschedule = gk20a_runlist_reschedule,
|
||||
.reschedule_preempt_next_locked = gk20a_fifo_reschedule_preempt_next,
|
||||
#endif
|
||||
.update_for_channel = nvgpu_runlist_update_for_channel,
|
||||
.update = nvgpu_runlist_update,
|
||||
.reload = nvgpu_runlist_reload,
|
||||
.count_max = gk20a_runlist_count_max,
|
||||
.entry_size = gk20a_runlist_entry_size,
|
||||
|
||||
@@ -943,7 +943,7 @@ static const struct gops_runlist gv11b_ops_runlist = {
|
||||
.reschedule = gv11b_runlist_reschedule,
|
||||
.reschedule_preempt_next_locked = gk20a_fifo_reschedule_preempt_next,
|
||||
#endif
|
||||
.update_for_channel = nvgpu_runlist_update_for_channel,
|
||||
.update = nvgpu_runlist_update,
|
||||
.reload = nvgpu_runlist_reload,
|
||||
.count_max = gv11b_runlist_count_max,
|
||||
.entry_size = gv11b_runlist_entry_size,
|
||||
|
||||
@@ -994,7 +994,7 @@ static const struct gops_ramin tu104_ops_ramin = {
|
||||
};
|
||||
|
||||
static const struct gops_runlist tu104_ops_runlist = {
|
||||
.update_for_channel = nvgpu_runlist_update_for_channel,
|
||||
.update = nvgpu_runlist_update,
|
||||
.reload = nvgpu_runlist_reload,
|
||||
.count_max = tu104_runlist_count_max,
|
||||
.entry_size = tu104_runlist_entry_size,
|
||||
|
||||
@@ -546,7 +546,7 @@ static const struct gops_ramin vgpu_gp10b_ops_ramin = {
|
||||
|
||||
static const struct gops_runlist vgpu_gp10b_ops_runlist = {
|
||||
.reschedule = NULL,
|
||||
.update_for_channel = vgpu_runlist_update_for_channel,
|
||||
.update = vgpu_runlist_update,
|
||||
.reload = vgpu_runlist_reload,
|
||||
.count_max = gk20a_runlist_count_max,
|
||||
.entry_size = vgpu_runlist_entry_size,
|
||||
|
||||
@@ -649,7 +649,7 @@ static const struct gops_ramin vgpu_gv11b_ops_ramin = {
|
||||
|
||||
static const struct gops_runlist vgpu_gv11b_ops_runlist = {
|
||||
.reschedule = NULL,
|
||||
.update_for_channel = vgpu_runlist_update_for_channel,
|
||||
.update = vgpu_runlist_update,
|
||||
.reload = vgpu_runlist_reload,
|
||||
.count_max = gv11b_runlist_count_max,
|
||||
.entry_size = vgpu_runlist_entry_size,
|
||||
|
||||
@@ -50,6 +50,7 @@ struct priv_cmd_queue;
|
||||
struct priv_cmd_entry;
|
||||
struct nvgpu_channel_wdt;
|
||||
struct nvgpu_user_fence;
|
||||
struct nvgpu_runlist;
|
||||
|
||||
/**
|
||||
* S/W defined invalid channel identifier.
|
||||
@@ -488,8 +489,8 @@ struct nvgpu_channel {
|
||||
*/
|
||||
u32 runqueue_sel;
|
||||
|
||||
/** Identifer of the runlist the channel will run on */
|
||||
u32 runlist_id;
|
||||
/** Runlist the channel will run on. */
|
||||
struct nvgpu_runlist *runlist;
|
||||
|
||||
/**
|
||||
* Recovery path can be entered twice for the same error in
|
||||
|
||||
@@ -65,12 +65,12 @@ struct gops_runlist {
|
||||
* @retval -E2BIG in case there are not enough entries in the runlist
|
||||
* buffer to accommodate all active channels/TSGs.
|
||||
*/
|
||||
int (*reload)(struct gk20a *g, u32 runlist_id,
|
||||
int (*reload)(struct gk20a *g, struct nvgpu_runlist *rl,
|
||||
bool add, bool wait_for_finish);
|
||||
|
||||
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
|
||||
|
||||
int (*update_for_channel)(struct gk20a *g, u32 runlist_id,
|
||||
int (*update)(struct gk20a *g, struct nvgpu_runlist *rl,
|
||||
struct nvgpu_channel *ch, bool add,
|
||||
bool wait_for_finish);
|
||||
u32 (*count_max)(struct gk20a *g);
|
||||
|
||||
@@ -142,7 +142,7 @@ u32 nvgpu_runlist_construct_locked(struct nvgpu_fifo *f,
|
||||
*
|
||||
* When #ch is NULL, this function has same behavior as #nvgpu_runlist_reload.
|
||||
* When #ch is non NULL, this function has same behavior as
|
||||
* #nvgpu_runlist_update_for_channel.
|
||||
* #nvgpu_runlist_update.
|
||||
*
|
||||
* The only difference with #nvgpu_runlist_reload is that the caller already
|
||||
* holds the runlist_lock before calling this function.
|
||||
@@ -185,7 +185,7 @@ int nvgpu_runlist_reschedule(struct nvgpu_channel *ch, bool preempt_next,
|
||||
* @retval -E2BIG in case there are not enough entries in runlist buffer to
|
||||
* accommodate all active channels/TSGs.
|
||||
*/
|
||||
int nvgpu_runlist_update_for_channel(struct gk20a *g, u32 runlist_id,
|
||||
int nvgpu_runlist_update(struct gk20a *g, struct nvgpu_runlist *rl,
|
||||
struct nvgpu_channel *ch, bool add, bool wait_for_finish);
|
||||
|
||||
/**
|
||||
@@ -211,7 +211,7 @@ int nvgpu_runlist_update_for_channel(struct gk20a *g, u32 runlist_id,
|
||||
* @retval -E2BIG in case there are not enough entries in the runlist buffer
|
||||
* to accommodate all active channels/TSGs.
|
||||
*/
|
||||
int nvgpu_runlist_reload(struct gk20a *g, u32 runlist_id,
|
||||
int nvgpu_runlist_reload(struct gk20a *g, struct nvgpu_runlist *rl,
|
||||
bool add, bool wait_for_finish);
|
||||
|
||||
/**
|
||||
|
||||
Reference in New Issue
Block a user