From 77c0b9ffdc52cb6b911dc9cb7ee5fbf84d973712 Mon Sep 17 00:00:00 2001 From: Alex Waterman Date: Thu, 31 Dec 2020 22:42:19 -0600 Subject: [PATCH] gpu: nvgpu: Update runlist_update() to take runlist ptr Update the nvgpu_runlist_update_for_channel() function: - Rename it to nvgpu_runlist_update() - Have it take a pointer to the runlist to update instead of a runlist ID. For the most part this makes the code better but there's a few places where it's worse (for now). This starts the slow and painful process of moving away from the non-runlist code using runlist IDs in many places it should not. Most of this patch is just fixing compilation problems with the minor header updates. JIRA NVGPU-6425 Change-Id: Id9885fe655d1d750625a1c8aceda9e67a2cbdb7a Signed-off-by: Alex Waterman Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2470304 Reviewed-by: svc-mobile-coverity Reviewed-by: Deepak Nibade Reviewed-by: svc-mobile-cert Reviewed-by: mobile promotions Tested-by: mobile promotions GVS: Gerrit_Virtual_Submit --- drivers/gpu/nvgpu/common/fifo/channel.c | 9 ++-- drivers/gpu/nvgpu/common/fifo/runlist.c | 44 +++++++++---------- drivers/gpu/nvgpu/common/fifo/tsg.c | 10 ++--- .../gpu/nvgpu/common/vgpu/fifo/channel_vgpu.c | 3 +- .../gpu/nvgpu/common/vgpu/fifo/runlist_vgpu.c | 26 +++++------ .../gpu/nvgpu/common/vgpu/fifo/runlist_vgpu.h | 9 ++-- .../gpu/nvgpu/hal/fifo/preempt_gv11b_fusa.c | 14 +++--- .../gpu/nvgpu/hal/fifo/runlist_fifo_gk20a.c | 3 +- drivers/gpu/nvgpu/hal/fifo/usermode_tu104.c | 5 ++- drivers/gpu/nvgpu/hal/gr/gr/gr_gp10b.c | 3 +- drivers/gpu/nvgpu/hal/init/hal_gm20b.c | 2 +- drivers/gpu/nvgpu/hal/init/hal_gp10b.c | 2 +- drivers/gpu/nvgpu/hal/init/hal_gv11b.c | 2 +- drivers/gpu/nvgpu/hal/init/hal_tu104.c | 2 +- .../gpu/nvgpu/hal/vgpu/init/vgpu_hal_gp10b.c | 2 +- .../gpu/nvgpu/hal/vgpu/init/vgpu_hal_gv11b.c | 2 +- drivers/gpu/nvgpu/include/nvgpu/channel.h | 5 ++- .../gpu/nvgpu/include/nvgpu/gops/runlist.h | 4 +- drivers/gpu/nvgpu/include/nvgpu/runlist.h | 6 +-- 19 files changed, 75 insertions(+), 78 deletions(-) diff --git a/drivers/gpu/nvgpu/common/fifo/channel.c b/drivers/gpu/nvgpu/common/fifo/channel.c index 7a7fc4d5d..53e88a23a 100644 --- a/drivers/gpu/nvgpu/common/fifo/channel.c +++ b/drivers/gpu/nvgpu/common/fifo/channel.c @@ -158,8 +158,7 @@ void nvgpu_channel_commit_va(struct nvgpu_channel *c) int nvgpu_channel_update_runlist(struct nvgpu_channel *c, bool add) { - return c->g->ops.runlist.update_for_channel(c->g, c->runlist_id, - c, add, true); + return c->g->ops.runlist.update(c->g, c->runlist, c, add, true); } int nvgpu_channel_enable_tsg(struct gk20a *g, struct nvgpu_channel *ch) @@ -1249,7 +1248,7 @@ NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_MISRA(Rule, 15_6)) ch->g = g; /* Runlist for the channel */ - ch->runlist_id = runlist_id; + ch->runlist = f->runlists[runlist_id]; /* Channel privilege level */ ch->is_privileged_channel = is_privileged_channel; @@ -1903,7 +1902,7 @@ int nvgpu_channel_suspend_all_serviceable_ch(struct gk20a *g) channels_in_use = true; - active_runlist_ids |= BIT32(ch->runlist_id); + active_runlist_ids |= BIT32(ch->runlist->runlist_id); } nvgpu_channel_put(ch); @@ -1940,7 +1939,7 @@ int nvgpu_channel_resume_all_serviceable_ch(struct gk20a *g) nvgpu_log_info(g, "resume channel %d", chid); g->ops.channel.bind(ch); channels_in_use = true; - active_runlist_ids |= BIT32(ch->runlist_id); + active_runlist_ids |= BIT32(ch->runlist->runlist_id); } nvgpu_channel_put(ch); } diff --git a/drivers/gpu/nvgpu/common/fifo/runlist.c b/drivers/gpu/nvgpu/common/fifo/runlist.c index bbc7ff880..d4fc8756c 100644 --- a/drivers/gpu/nvgpu/common/fifo/runlist.c +++ b/drivers/gpu/nvgpu/common/fifo/runlist.c @@ -471,7 +471,7 @@ int nvgpu_runlist_reschedule(struct nvgpu_channel *ch, bool preempt_next, #endif int ret = 0; - runlist = g->fifo.runlists[ch->runlist_id]; + runlist = ch->runlist; if (nvgpu_mutex_tryacquire(&runlist->runlist_lock) == 0) { return -EBUSY; } @@ -481,7 +481,7 @@ int nvgpu_runlist_reschedule(struct nvgpu_channel *ch, bool preempt_next, #endif g->ops.runlist.hw_submit( - g, ch->runlist_id, runlist->count, runlist->cur_buffer); + g, runlist->runlist_id, runlist->count, runlist->cur_buffer); if (preempt_next) { if (g->ops.runlist.reschedule_preempt_next_locked(ch, @@ -490,9 +490,9 @@ int nvgpu_runlist_reschedule(struct nvgpu_channel *ch, bool preempt_next, } } - if (g->ops.runlist.wait_pending(g, ch->runlist_id) != 0) { + if (g->ops.runlist.wait_pending(g, runlist->runlist_id) != 0) { nvgpu_err(g, "wait pending failed for runlist %u", - ch->runlist_id); + runlist->runlist_id); } #ifdef CONFIG_NVGPU_LS_PMU if (mutex_ret == 0) { @@ -512,12 +512,10 @@ int nvgpu_runlist_reschedule(struct nvgpu_channel *ch, bool preempt_next, special cases below: runlist->active_channels will NOT be changed. (ch == NULL && !add) means remove all active channels from runlist. (ch == NULL && add) means restore all active channels on runlist. */ -static int nvgpu_runlist_update(struct gk20a *g, u32 runlist_id, - struct nvgpu_channel *ch, - bool add, bool wait_for_finish) +static int nvgpu_runlist_do_update(struct gk20a *g, struct nvgpu_runlist *rl, + struct nvgpu_channel *ch, + bool add, bool wait_for_finish) { - struct nvgpu_runlist *runlist = NULL; - struct nvgpu_fifo *f = &g->fifo; #ifdef CONFIG_NVGPU_LS_PMU u32 token = PMU_INVALID_MUTEX_OWNER_ID; int mutex_ret = 0; @@ -526,14 +524,12 @@ static int nvgpu_runlist_update(struct gk20a *g, u32 runlist_id, nvgpu_log_fn(g, " "); - runlist = f->runlists[runlist_id]; - - nvgpu_mutex_acquire(&runlist->runlist_lock); + nvgpu_mutex_acquire(&rl->runlist_lock); #ifdef CONFIG_NVGPU_LS_PMU mutex_ret = nvgpu_pmu_lock_acquire(g, g->pmu, PMU_MUTEX_ID_FIFO, &token); #endif - ret = nvgpu_runlist_update_locked(g, runlist_id, ch, add, + ret = nvgpu_runlist_update_locked(g, rl->runlist_id, ch, add, wait_for_finish); #ifdef CONFIG_NVGPU_LS_PMU if (mutex_ret == 0) { @@ -543,32 +539,33 @@ static int nvgpu_runlist_update(struct gk20a *g, u32 runlist_id, } } #endif - nvgpu_mutex_release(&runlist->runlist_lock); + nvgpu_mutex_release(&rl->runlist_lock); if (ret == -ETIMEDOUT) { - nvgpu_rc_runlist_update(g, runlist_id); + nvgpu_rc_runlist_update(g, rl->runlist_id); } return ret; } -int nvgpu_runlist_update_for_channel(struct gk20a *g, u32 runlist_id, - struct nvgpu_channel *ch, - bool add, bool wait_for_finish) +int nvgpu_runlist_update(struct gk20a *g, struct nvgpu_runlist *rl, + struct nvgpu_channel *ch, + bool add, bool wait_for_finish) { nvgpu_assert(ch != NULL); - return nvgpu_runlist_update(g, runlist_id, ch, add, wait_for_finish); + return nvgpu_runlist_do_update(g, rl, ch, add, wait_for_finish); } -int nvgpu_runlist_reload(struct gk20a *g, u32 runlist_id, +int nvgpu_runlist_reload(struct gk20a *g, struct nvgpu_runlist *rl, bool add, bool wait_for_finish) { - return nvgpu_runlist_update(g, runlist_id, NULL, add, wait_for_finish); + return nvgpu_runlist_do_update(g, rl, NULL, add, wait_for_finish); } int nvgpu_runlist_reload_ids(struct gk20a *g, u32 runlist_ids, bool add) { + struct nvgpu_fifo *f = &g->fifo; int ret = -EINVAL; unsigned long runlist_id = 0; int errcode; @@ -581,7 +578,8 @@ int nvgpu_runlist_reload_ids(struct gk20a *g, u32 runlist_ids, bool add) ret = 0; for_each_set_bit(runlist_id, &ulong_runlist_ids, 32U) { /* Capture the last failure error code */ - errcode = g->ops.runlist.reload(g, (u32)runlist_id, add, true); + errcode = g->ops.runlist.reload(g, + f->runlists[runlist_id], add, true); if (errcode != 0) { nvgpu_err(g, "failed to update_runlist %lu %d", @@ -880,7 +878,7 @@ u32 nvgpu_runlist_get_runlists_mask(struct gk20a *g, u32 id, if (id_type == ID_TYPE_TSG) { runlists_mask |= BIT32(f->tsg[id].runlist_id); } else { - runlists_mask |= BIT32(f->channel[id].runlist_id); + runlists_mask |= BIT32(f->channel[id].runlist->runlist_id); } } else { if (bitmask_disabled) { diff --git a/drivers/gpu/nvgpu/common/fifo/tsg.c b/drivers/gpu/nvgpu/common/fifo/tsg.c index cc4ecef76..c0f475971 100644 --- a/drivers/gpu/nvgpu/common/fifo/tsg.c +++ b/drivers/gpu/nvgpu/common/fifo/tsg.c @@ -115,12 +115,12 @@ int nvgpu_tsg_bind_channel(struct nvgpu_tsg *tsg, struct nvgpu_channel *ch) /* all the channel part of TSG should need to be same runlist_id */ if (tsg->runlist_id == NVGPU_INVALID_TSG_ID) { - tsg->runlist_id = ch->runlist_id; + tsg->runlist_id = ch->runlist->runlist_id; } else { - if (tsg->runlist_id != ch->runlist_id) { + if (tsg->runlist_id != ch->runlist->runlist_id) { nvgpu_err(tsg->g, "runlist_id mismatch ch[%d] tsg[%d]", - ch->runlist_id, tsg->runlist_id); + ch->runlist->runlist_id, tsg->runlist_id); return -EINVAL; } } @@ -677,7 +677,7 @@ int nvgpu_tsg_set_interleave(struct nvgpu_tsg *tsg, u32 level) return 0; } - return g->ops.runlist.reload(g, tsg->runlist_id, true, true); + return g->ops.runlist.reload(g, g->fifo.runlists[tsg->runlist_id], true, true); } int nvgpu_tsg_set_timeslice(struct nvgpu_tsg *tsg, u32 timeslice_us) @@ -699,7 +699,7 @@ int nvgpu_tsg_set_timeslice(struct nvgpu_tsg *tsg, u32 timeslice_us) return 0; } - return g->ops.runlist.reload(g, tsg->runlist_id, true, true); + return g->ops.runlist.reload(g, g->fifo.runlists[tsg->runlist_id], true, true); } u32 nvgpu_tsg_get_timeslice(struct nvgpu_tsg *tsg) diff --git a/drivers/gpu/nvgpu/common/vgpu/fifo/channel_vgpu.c b/drivers/gpu/nvgpu/common/vgpu/fifo/channel_vgpu.c index dfeb9d869..e18341d25 100644 --- a/drivers/gpu/nvgpu/common/vgpu/fifo/channel_vgpu.c +++ b/drivers/gpu/nvgpu/common/vgpu/fifo/channel_vgpu.c @@ -22,6 +22,7 @@ #include #include +#include #include #include #include @@ -81,7 +82,7 @@ int vgpu_channel_alloc_inst(struct gk20a *g, struct nvgpu_channel *ch) msg.cmd = TEGRA_VGPU_CMD_CHANNEL_ALLOC_HWCTX; msg.handle = vgpu_get_handle(g); p->id = ch->chid; - p->runlist_id = ch->runlist_id; + p->runlist_id = ch->runlist->runlist_id; p->pid = (u64)ch->pid; err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); if (err || msg.ret) { diff --git a/drivers/gpu/nvgpu/common/vgpu/fifo/runlist_vgpu.c b/drivers/gpu/nvgpu/common/vgpu/fifo/runlist_vgpu.c index 6cfa623e1..7d0a6d646 100644 --- a/drivers/gpu/nvgpu/common/vgpu/fifo/runlist_vgpu.c +++ b/drivers/gpu/nvgpu/common/vgpu/fifo/runlist_vgpu.c @@ -163,40 +163,36 @@ static int vgpu_runlist_update_locked(struct gk20a *g, u32 runlist_id, special cases below: runlist->active_channels will NOT be changed. (ch == NULL && !add) means remove all active channels from runlist. (ch == NULL && add) means restore all active channels on runlist. */ -static int vgpu_runlist_update(struct gk20a *g, u32 runlist_id, +static int vgpu_runlist_do_update(struct gk20a *g, struct nvgpu_runlist *rl, struct nvgpu_channel *ch, bool add, bool wait_for_finish) { - struct nvgpu_runlist *runlist = NULL; - struct nvgpu_fifo *f = &g->fifo; u32 ret = 0; nvgpu_log_fn(g, " "); - runlist = f->runlists[runlist_id]; + nvgpu_mutex_acquire(&rl->runlist_lock); - nvgpu_mutex_acquire(&runlist->runlist_lock); - - ret = vgpu_runlist_update_locked(g, runlist_id, ch, add, + ret = vgpu_runlist_update_locked(g, rl->runlist_id, ch, add, wait_for_finish); - nvgpu_mutex_release(&runlist->runlist_lock); + nvgpu_mutex_release(&rl->runlist_lock); return ret; } -int vgpu_runlist_update_for_channel(struct gk20a *g, u32 runlist_id, - struct nvgpu_channel *ch, - bool add, bool wait_for_finish) +int vgpu_runlist_update(struct gk20a *g, struct nvgpu_runlist *rl, + struct nvgpu_channel *ch, + bool add, bool wait_for_finish) { nvgpu_assert(ch != NULL); - return vgpu_runlist_update(g, runlist_id, ch, add, wait_for_finish); + return vgpu_runlist_do_update(g, rl, ch, add, wait_for_finish); } -int vgpu_runlist_reload(struct gk20a *g, u32 runlist_id, - bool add, bool wait_for_finish) +int vgpu_runlist_reload(struct gk20a *g, struct nvgpu_runlist *rl, + bool add, bool wait_for_finish) { - return vgpu_runlist_update(g, runlist_id, NULL, add, wait_for_finish); + return vgpu_runlist_do_update(g, rl, NULL, add, wait_for_finish); } u32 vgpu_runlist_length_max(struct gk20a *g) diff --git a/drivers/gpu/nvgpu/common/vgpu/fifo/runlist_vgpu.h b/drivers/gpu/nvgpu/common/vgpu/fifo/runlist_vgpu.h index 50178e0c5..f7efe330e 100644 --- a/drivers/gpu/nvgpu/common/vgpu/fifo/runlist_vgpu.h +++ b/drivers/gpu/nvgpu/common/vgpu/fifo/runlist_vgpu.h @@ -24,11 +24,12 @@ struct gk20a; struct nvgpu_channel; +struct nvgpu_runlist; -int vgpu_runlist_update_for_channel(struct gk20a *g, u32 runlist_id, - struct nvgpu_channel *ch, - bool add, bool wait_for_finish); -int vgpu_runlist_reload(struct gk20a *g, u32 runlist_id, +int vgpu_runlist_update(struct gk20a *g, struct nvgpu_runlist *rl, + struct nvgpu_channel *ch, + bool add, bool wait_for_finish); +int vgpu_runlist_reload(struct gk20a *g, struct nvgpu_runlist *rl, bool add, bool wait_for_finish); u32 vgpu_runlist_length_max(struct gk20a *g); u32 vgpu_runlist_entry_size(struct gk20a *g); diff --git a/drivers/gpu/nvgpu/hal/fifo/preempt_gv11b_fusa.c b/drivers/gpu/nvgpu/hal/fifo/preempt_gv11b_fusa.c index be81a6ef7..83b299e6c 100644 --- a/drivers/gpu/nvgpu/hal/fifo/preempt_gv11b_fusa.c +++ b/drivers/gpu/nvgpu/hal/fifo/preempt_gv11b_fusa.c @@ -312,27 +312,27 @@ int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 id, unsigned int id_type) { struct nvgpu_fifo *f = &g->fifo; + struct nvgpu_runlist *rl; unsigned long runlist_served_pbdmas; unsigned long runlist_served_engines; unsigned long bit; u32 pbdma_id; u32 engine_id; - u32 runlist_id; int err, ret = 0; u32 tsgid; if (id_type == ID_TYPE_TSG) { - runlist_id = f->tsg[id].runlist_id; + rl = f->runlists[f->tsg[id].runlist_id]; tsgid = id; } else { - runlist_id = f->channel[id].runlist_id; + rl = f->channel[id].runlist; tsgid = f->channel[id].tsgid; } nvgpu_log_info(g, "Check preempt pending for tsgid = %u", tsgid); - runlist_served_pbdmas = f->runlists[runlist_id]->pbdma_bitmask; - runlist_served_engines = f->runlists[runlist_id]->eng_bitmask; + runlist_served_pbdmas = rl->pbdma_bitmask; + runlist_served_engines = rl->eng_bitmask; for_each_set_bit(bit, &runlist_served_pbdmas, nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA)) { @@ -344,13 +344,13 @@ int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 id, } } - f->runlists[runlist_id]->reset_eng_bitmask = 0U; + rl->reset_eng_bitmask = 0U; for_each_set_bit(bit, &runlist_served_engines, f->max_engines) { engine_id = U32(bit); err = gv11b_fifo_preempt_poll_eng(g, tsgid, engine_id, - &f->runlists[runlist_id]->reset_eng_bitmask); + &rl->reset_eng_bitmask); if ((err != 0) && (ret == 0)) { ret = err; } diff --git a/drivers/gpu/nvgpu/hal/fifo/runlist_fifo_gk20a.c b/drivers/gpu/nvgpu/hal/fifo/runlist_fifo_gk20a.c index a3afc08b0..585451bf3 100644 --- a/drivers/gpu/nvgpu/hal/fifo/runlist_fifo_gk20a.c +++ b/drivers/gpu/nvgpu/hal/fifo/runlist_fifo_gk20a.c @@ -52,8 +52,7 @@ int gk20a_fifo_reschedule_preempt_next(struct nvgpu_channel *ch, bool wait_preempt) { struct gk20a *g = ch->g; - struct nvgpu_runlist *runlist = - g->fifo.runlists[ch->runlist_id]; + struct nvgpu_runlist *runlist = ch->runlist; int ret = 0; u32 fecsstat0 = 0, fecsstat1 = 0; u32 preempt_id; diff --git a/drivers/gpu/nvgpu/hal/fifo/usermode_tu104.c b/drivers/gpu/nvgpu/hal/fifo/usermode_tu104.c index 71e73fe6b..5d1e2cea3 100644 --- a/drivers/gpu/nvgpu/hal/fifo/usermode_tu104.c +++ b/drivers/gpu/nvgpu/hal/fifo/usermode_tu104.c @@ -26,6 +26,7 @@ #include #include #include +#include #include "usermode_tu104.h" @@ -59,13 +60,13 @@ u32 tu104_usermode_doorbell_token(struct nvgpu_channel *ch) u32 hw_chid = f->channel_base + ch->chid; return ctrl_doorbell_vector_f(hw_chid) | - ctrl_doorbell_runlist_id_f(ch->runlist_id); + ctrl_doorbell_runlist_id_f(ch->runlist->runlist_id); } void tu104_usermode_ring_doorbell(struct nvgpu_channel *ch) { nvgpu_log_info(ch->g, "channel ring door bell %d, runlist %d", - ch->chid, ch->runlist_id); + ch->chid, ch->runlist->runlist_id); nvgpu_usermode_writel(ch->g, func_doorbell_r(), ch->g->ops.usermode.doorbell_token(ch)); diff --git a/drivers/gpu/nvgpu/hal/gr/gr/gr_gp10b.c b/drivers/gpu/nvgpu/hal/gr/gr/gr_gp10b.c index da70ba0df..42d6896e4 100644 --- a/drivers/gpu/nvgpu/hal/gr/gr/gr_gp10b.c +++ b/drivers/gpu/nvgpu/hal/gr/gr/gr_gp10b.c @@ -44,6 +44,7 @@ #include #include #include +#include #include "gr_gk20a.h" #include "gr_gp10b.h" @@ -402,7 +403,7 @@ static int gr_gp10b_disable_channel_or_tsg(struct gk20a *g, struct nvgpu_channel return ret; } - ret = g->ops.runlist.reload(g, fault_ch->runlist_id, true, false); + ret = g->ops.runlist.reload(g, fault_ch->runlist, true, false); if (ret != 0) { nvgpu_err(g, "CILP: failed to restart runlist 0!"); return ret; diff --git a/drivers/gpu/nvgpu/hal/init/hal_gm20b.c b/drivers/gpu/nvgpu/hal/init/hal_gm20b.c index d9665e577..14dc98804 100644 --- a/drivers/gpu/nvgpu/hal/init/hal_gm20b.c +++ b/drivers/gpu/nvgpu/hal/init/hal_gm20b.c @@ -684,7 +684,7 @@ static const struct gops_ramin gm20b_ops_ramin = { }; static const struct gops_runlist gm20b_ops_runlist = { - .update_for_channel = nvgpu_runlist_update_for_channel, + .update = nvgpu_runlist_update, .reload = nvgpu_runlist_reload, .count_max = gk20a_runlist_count_max, .entry_size = gk20a_runlist_entry_size, diff --git a/drivers/gpu/nvgpu/hal/init/hal_gp10b.c b/drivers/gpu/nvgpu/hal/init/hal_gp10b.c index a877e7cb2..73ea8823d 100644 --- a/drivers/gpu/nvgpu/hal/init/hal_gp10b.c +++ b/drivers/gpu/nvgpu/hal/init/hal_gp10b.c @@ -769,7 +769,7 @@ static const struct gops_runlist gp10b_ops_runlist = { .reschedule = gk20a_runlist_reschedule, .reschedule_preempt_next_locked = gk20a_fifo_reschedule_preempt_next, #endif - .update_for_channel = nvgpu_runlist_update_for_channel, + .update = nvgpu_runlist_update, .reload = nvgpu_runlist_reload, .count_max = gk20a_runlist_count_max, .entry_size = gk20a_runlist_entry_size, diff --git a/drivers/gpu/nvgpu/hal/init/hal_gv11b.c b/drivers/gpu/nvgpu/hal/init/hal_gv11b.c index 1cec3ebc6..b512dc8cd 100644 --- a/drivers/gpu/nvgpu/hal/init/hal_gv11b.c +++ b/drivers/gpu/nvgpu/hal/init/hal_gv11b.c @@ -943,7 +943,7 @@ static const struct gops_runlist gv11b_ops_runlist = { .reschedule = gv11b_runlist_reschedule, .reschedule_preempt_next_locked = gk20a_fifo_reschedule_preempt_next, #endif - .update_for_channel = nvgpu_runlist_update_for_channel, + .update = nvgpu_runlist_update, .reload = nvgpu_runlist_reload, .count_max = gv11b_runlist_count_max, .entry_size = gv11b_runlist_entry_size, diff --git a/drivers/gpu/nvgpu/hal/init/hal_tu104.c b/drivers/gpu/nvgpu/hal/init/hal_tu104.c index d27266a0d..2e6710b83 100644 --- a/drivers/gpu/nvgpu/hal/init/hal_tu104.c +++ b/drivers/gpu/nvgpu/hal/init/hal_tu104.c @@ -994,7 +994,7 @@ static const struct gops_ramin tu104_ops_ramin = { }; static const struct gops_runlist tu104_ops_runlist = { - .update_for_channel = nvgpu_runlist_update_for_channel, + .update = nvgpu_runlist_update, .reload = nvgpu_runlist_reload, .count_max = tu104_runlist_count_max, .entry_size = tu104_runlist_entry_size, diff --git a/drivers/gpu/nvgpu/hal/vgpu/init/vgpu_hal_gp10b.c b/drivers/gpu/nvgpu/hal/vgpu/init/vgpu_hal_gp10b.c index ae189236c..0bc25ec85 100644 --- a/drivers/gpu/nvgpu/hal/vgpu/init/vgpu_hal_gp10b.c +++ b/drivers/gpu/nvgpu/hal/vgpu/init/vgpu_hal_gp10b.c @@ -546,7 +546,7 @@ static const struct gops_ramin vgpu_gp10b_ops_ramin = { static const struct gops_runlist vgpu_gp10b_ops_runlist = { .reschedule = NULL, - .update_for_channel = vgpu_runlist_update_for_channel, + .update = vgpu_runlist_update, .reload = vgpu_runlist_reload, .count_max = gk20a_runlist_count_max, .entry_size = vgpu_runlist_entry_size, diff --git a/drivers/gpu/nvgpu/hal/vgpu/init/vgpu_hal_gv11b.c b/drivers/gpu/nvgpu/hal/vgpu/init/vgpu_hal_gv11b.c index 19b40240e..984807eba 100644 --- a/drivers/gpu/nvgpu/hal/vgpu/init/vgpu_hal_gv11b.c +++ b/drivers/gpu/nvgpu/hal/vgpu/init/vgpu_hal_gv11b.c @@ -649,7 +649,7 @@ static const struct gops_ramin vgpu_gv11b_ops_ramin = { static const struct gops_runlist vgpu_gv11b_ops_runlist = { .reschedule = NULL, - .update_for_channel = vgpu_runlist_update_for_channel, + .update = vgpu_runlist_update, .reload = vgpu_runlist_reload, .count_max = gv11b_runlist_count_max, .entry_size = vgpu_runlist_entry_size, diff --git a/drivers/gpu/nvgpu/include/nvgpu/channel.h b/drivers/gpu/nvgpu/include/nvgpu/channel.h index 893cb30ae..f3c68d346 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/channel.h +++ b/drivers/gpu/nvgpu/include/nvgpu/channel.h @@ -50,6 +50,7 @@ struct priv_cmd_queue; struct priv_cmd_entry; struct nvgpu_channel_wdt; struct nvgpu_user_fence; +struct nvgpu_runlist; /** * S/W defined invalid channel identifier. @@ -488,8 +489,8 @@ struct nvgpu_channel { */ u32 runqueue_sel; - /** Identifer of the runlist the channel will run on */ - u32 runlist_id; + /** Runlist the channel will run on. */ + struct nvgpu_runlist *runlist; /** * Recovery path can be entered twice for the same error in diff --git a/drivers/gpu/nvgpu/include/nvgpu/gops/runlist.h b/drivers/gpu/nvgpu/include/nvgpu/gops/runlist.h index 56865e48c..f9741070f 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/gops/runlist.h +++ b/drivers/gpu/nvgpu/include/nvgpu/gops/runlist.h @@ -65,12 +65,12 @@ struct gops_runlist { * @retval -E2BIG in case there are not enough entries in the runlist * buffer to accommodate all active channels/TSGs. */ - int (*reload)(struct gk20a *g, u32 runlist_id, + int (*reload)(struct gk20a *g, struct nvgpu_runlist *rl, bool add, bool wait_for_finish); /** @cond DOXYGEN_SHOULD_SKIP_THIS */ - int (*update_for_channel)(struct gk20a *g, u32 runlist_id, + int (*update)(struct gk20a *g, struct nvgpu_runlist *rl, struct nvgpu_channel *ch, bool add, bool wait_for_finish); u32 (*count_max)(struct gk20a *g); diff --git a/drivers/gpu/nvgpu/include/nvgpu/runlist.h b/drivers/gpu/nvgpu/include/nvgpu/runlist.h index 345bcaa55..8150a57ca 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/runlist.h +++ b/drivers/gpu/nvgpu/include/nvgpu/runlist.h @@ -142,7 +142,7 @@ u32 nvgpu_runlist_construct_locked(struct nvgpu_fifo *f, * * When #ch is NULL, this function has same behavior as #nvgpu_runlist_reload. * When #ch is non NULL, this function has same behavior as - * #nvgpu_runlist_update_for_channel. + * #nvgpu_runlist_update. * * The only difference with #nvgpu_runlist_reload is that the caller already * holds the runlist_lock before calling this function. @@ -185,7 +185,7 @@ int nvgpu_runlist_reschedule(struct nvgpu_channel *ch, bool preempt_next, * @retval -E2BIG in case there are not enough entries in runlist buffer to * accommodate all active channels/TSGs. */ -int nvgpu_runlist_update_for_channel(struct gk20a *g, u32 runlist_id, +int nvgpu_runlist_update(struct gk20a *g, struct nvgpu_runlist *rl, struct nvgpu_channel *ch, bool add, bool wait_for_finish); /** @@ -211,7 +211,7 @@ int nvgpu_runlist_update_for_channel(struct gk20a *g, u32 runlist_id, * @retval -E2BIG in case there are not enough entries in the runlist buffer * to accommodate all active channels/TSGs. */ -int nvgpu_runlist_reload(struct gk20a *g, u32 runlist_id, +int nvgpu_runlist_reload(struct gk20a *g, struct nvgpu_runlist *rl, bool add, bool wait_for_finish); /**