mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 17:36:20 +03:00
gpu: nvgpu: array of pointers to runlists
Currently a fifo_runlist_info_gk20a structure is allocated and initialized for each possible runlist. But only a few runlists are actually used. Use an array of pointers to runlists in fifo_gk20a. The array keeps existing indexing by runlist_id. In this patch a context is still allocated for each possible runlist, but follow up patch will allow to skip context allocation for inactive runlists. Bug 2470115 Change-Id: I1615043cea84db35a270ade64695d51f85c1193a Signed-off-by: Thomas Fleury <tfleury@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2025203 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
0a0120c18b
commit
5fdda1b075
@@ -281,7 +281,7 @@ static bool gk20a_runlist_modify_active_locked(struct gk20a *g, u32 runlist_id,
|
||||
struct fifo_runlist_info_gk20a *runlist = NULL;
|
||||
struct tsg_gk20a *tsg = NULL;
|
||||
|
||||
runlist = &f->runlist_info[runlist_id];
|
||||
runlist = f->runlist_info[runlist_id];
|
||||
tsg = tsg_gk20a_from_ch(ch);
|
||||
|
||||
if (tsg == NULL) {
|
||||
@@ -326,7 +326,7 @@ static int gk20a_runlist_reconstruct_locked(struct gk20a *g, u32 runlist_id,
|
||||
struct fifo_gk20a *f = &g->fifo;
|
||||
struct fifo_runlist_info_gk20a *runlist = NULL;
|
||||
|
||||
runlist = &f->runlist_info[runlist_id];
|
||||
runlist = f->runlist_info[runlist_id];
|
||||
|
||||
nvgpu_log_info(g, "runlist_id : %d, switch to new buffer 0x%16llx",
|
||||
runlist_id, (u64)nvgpu_mem_get_addr(g, &runlist->mem[buf_id]));
|
||||
@@ -372,7 +372,7 @@ int gk20a_runlist_update_locked(struct gk20a *g, u32 runlist_id,
|
||||
add_entries = add;
|
||||
}
|
||||
|
||||
runlist = &f->runlist_info[runlist_id];
|
||||
runlist = f->runlist_info[runlist_id];
|
||||
/* double buffering, swap to next */
|
||||
buf_id = runlist->cur_buffer == 0U ? 1U : 0U;
|
||||
|
||||
@@ -412,7 +412,7 @@ int nvgpu_fifo_reschedule_runlist(struct channel_gk20a *ch, bool preempt_next,
|
||||
int mutex_ret = -EINVAL;
|
||||
int ret = 0;
|
||||
|
||||
runlist = &g->fifo.runlist_info[ch->runlist_id];
|
||||
runlist = g->fifo.runlist_info[ch->runlist_id];
|
||||
if (nvgpu_mutex_tryacquire(&runlist->runlist_lock) == 0) {
|
||||
return -EBUSY;
|
||||
}
|
||||
@@ -466,7 +466,7 @@ static int gk20a_runlist_update(struct gk20a *g, u32 runlist_id,
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
runlist = &f->runlist_info[runlist_id];
|
||||
runlist = f->runlist_info[runlist_id];
|
||||
|
||||
nvgpu_mutex_acquire(&runlist->runlist_lock);
|
||||
|
||||
@@ -592,7 +592,7 @@ void gk20a_fifo_delete_runlist(struct fifo_gk20a *f)
|
||||
g = f->g;
|
||||
|
||||
for (runlist_id = 0; runlist_id < f->max_runlists; runlist_id++) {
|
||||
runlist = &f->runlist_info[runlist_id];
|
||||
runlist = f->runlist_info[runlist_id];
|
||||
for (i = 0; i < MAX_RUNLIST_BUFFERS; i++) {
|
||||
nvgpu_dma_free(g, &runlist->mem[i]);
|
||||
}
|
||||
@@ -604,10 +604,9 @@ void gk20a_fifo_delete_runlist(struct fifo_gk20a *f)
|
||||
runlist->active_tsgs = NULL;
|
||||
|
||||
nvgpu_mutex_destroy(&runlist->runlist_lock);
|
||||
|
||||
nvgpu_kfree(g, runlist);
|
||||
f->runlist_info[runlist_id] = NULL;
|
||||
}
|
||||
(void) memset(f->runlist_info, 0,
|
||||
(sizeof(struct fifo_runlist_info_gk20a) * f->max_runlists));
|
||||
|
||||
nvgpu_kfree(g, f->runlist_info);
|
||||
f->runlist_info = NULL;
|
||||
@@ -626,14 +625,18 @@ int nvgpu_init_runlist(struct gk20a *g, struct fifo_gk20a *f)
|
||||
|
||||
f->max_runlists = g->ops.runlist.count_max();
|
||||
f->runlist_info = nvgpu_kzalloc(g,
|
||||
sizeof(struct fifo_runlist_info_gk20a) *
|
||||
sizeof(struct fifo_runlist_info_gk20a *) *
|
||||
f->max_runlists);
|
||||
if (f->runlist_info == NULL) {
|
||||
goto clean_up_runlist;
|
||||
}
|
||||
|
||||
for (runlist_id = 0; runlist_id < f->max_runlists; runlist_id++) {
|
||||
runlist = &f->runlist_info[runlist_id];
|
||||
runlist = nvgpu_kzalloc(g, sizeof(*runlist));
|
||||
if (runlist == NULL) {
|
||||
goto clean_up_runlist;
|
||||
}
|
||||
f->runlist_info[runlist_id] = runlist;
|
||||
|
||||
runlist->active_channels =
|
||||
nvgpu_kzalloc(g, DIV_ROUND_UP(f->num_channels,
|
||||
|
||||
@@ -49,7 +49,7 @@ int gk20a_fifo_reschedule_preempt_next(struct channel_gk20a *ch,
|
||||
{
|
||||
struct gk20a *g = ch->g;
|
||||
struct fifo_runlist_info_gk20a *runlist =
|
||||
&g->fifo.runlist_info[ch->runlist_id];
|
||||
g->fifo.runlist_info[ch->runlist_id];
|
||||
int ret = 0;
|
||||
u32 gr_eng_id = 0;
|
||||
u32 fecsstat0 = 0, fecsstat1 = 0;
|
||||
@@ -169,7 +169,7 @@ void gk20a_runlist_hw_submit(struct gk20a *g, u32 runlist_id,
|
||||
struct fifo_runlist_info_gk20a *runlist = NULL;
|
||||
u64 runlist_iova;
|
||||
|
||||
runlist = &g->fifo.runlist_info[runlist_id];
|
||||
runlist = g->fifo.runlist_info[runlist_id];
|
||||
runlist_iova = nvgpu_mem_get_addr(g, &runlist->mem[buffer_index]);
|
||||
|
||||
nvgpu_spinlock_acquire(&g->fifo.runlist_submit_lock);
|
||||
|
||||
@@ -49,7 +49,7 @@ void tu104_runlist_hw_submit(struct gk20a *g, u32 runlist_id,
|
||||
u64 runlist_iova;
|
||||
u32 runlist_iova_lo, runlist_iova_hi;
|
||||
|
||||
runlist = &g->fifo.runlist_info[runlist_id];
|
||||
runlist = g->fifo.runlist_info[runlist_id];
|
||||
runlist_iova = nvgpu_mem_get_addr(g, &runlist->mem[buffer_index]);
|
||||
|
||||
runlist_iova_lo = u64_lo32(runlist_iova) >>
|
||||
|
||||
@@ -94,7 +94,7 @@ static bool gk20a_is_channel_active(struct gk20a *g, struct channel_gk20a *ch)
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < f->max_runlists; ++i) {
|
||||
runlist = &f->runlist_info[i];
|
||||
runlist = f->runlist_info[i];
|
||||
if (test_bit((int)ch->chid, runlist->active_channels)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -78,7 +78,7 @@ static bool vgpu_runlist_modify_active_locked(struct gk20a *g, u32 runlist_id,
|
||||
struct fifo_gk20a *f = &g->fifo;
|
||||
struct fifo_runlist_info_gk20a *runlist;
|
||||
|
||||
runlist = &f->runlist_info[runlist_id];
|
||||
runlist = f->runlist_info[runlist_id];
|
||||
|
||||
if (add) {
|
||||
if (test_and_set_bit((int)ch->chid,
|
||||
@@ -103,7 +103,7 @@ static void vgpu_runlist_reconstruct_locked(struct gk20a *g, u32 runlist_id,
|
||||
struct fifo_gk20a *f = &g->fifo;
|
||||
struct fifo_runlist_info_gk20a *runlist;
|
||||
|
||||
runlist = &f->runlist_info[runlist_id];
|
||||
runlist = f->runlist_info[runlist_id];
|
||||
|
||||
if (add_entries) {
|
||||
u16 *runlist_entry;
|
||||
@@ -150,7 +150,7 @@ static int vgpu_runlist_update_locked(struct gk20a *g, u32 runlist_id,
|
||||
add_entries = add;
|
||||
}
|
||||
|
||||
runlist = &f->runlist_info[runlist_id];
|
||||
runlist = f->runlist_info[runlist_id];
|
||||
|
||||
vgpu_runlist_reconstruct_locked(g, runlist_id, add_entries);
|
||||
|
||||
@@ -172,7 +172,7 @@ static int vgpu_runlist_update(struct gk20a *g, u32 runlist_id,
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
runlist = &f->runlist_info[runlist_id];
|
||||
runlist = f->runlist_info[runlist_id];
|
||||
|
||||
nvgpu_mutex_acquire(&runlist->runlist_lock);
|
||||
|
||||
|
||||
@@ -394,7 +394,7 @@ static int nvgpu_init_runlist_enginfo(struct gk20a *g, struct fifo_gk20a *f)
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
for (runlist_id = 0; runlist_id < f->max_runlists; runlist_id++) {
|
||||
runlist = &f->runlist_info[runlist_id];
|
||||
runlist = f->runlist_info[runlist_id];
|
||||
|
||||
for (pbdma_id = 0; pbdma_id < f->num_pbdma; pbdma_id++) {
|
||||
if ((f->pbdma_map[pbdma_id] & BIT32(runlist_id)) != 0U) {
|
||||
@@ -1347,7 +1347,7 @@ static bool gk20a_fifo_handle_mmu_fault(
|
||||
|
||||
nvgpu_log_info(g, "acquire runlist_lock for all runlists");
|
||||
for (rlid = 0; rlid < g->fifo.max_runlists; rlid++) {
|
||||
nvgpu_mutex_acquire(&g->fifo.runlist_info[rlid].runlist_lock);
|
||||
nvgpu_mutex_acquire(&g->fifo.runlist_info[rlid]->runlist_lock);
|
||||
}
|
||||
|
||||
verbose = gk20a_fifo_handle_mmu_fault_locked(g, mmu_fault_engines,
|
||||
@@ -1355,7 +1355,7 @@ static bool gk20a_fifo_handle_mmu_fault(
|
||||
|
||||
nvgpu_log_info(g, "release runlist_lock for all runlists");
|
||||
for (rlid = 0; rlid < g->fifo.max_runlists; rlid++) {
|
||||
nvgpu_mutex_release(&g->fifo.runlist_info[rlid].runlist_lock);
|
||||
nvgpu_mutex_release(&g->fifo.runlist_info[rlid]->runlist_lock);
|
||||
}
|
||||
return verbose;
|
||||
}
|
||||
@@ -1454,7 +1454,7 @@ void gk20a_fifo_teardown_ch_tsg(struct gk20a *g, u32 __engine_ids,
|
||||
|
||||
nvgpu_log_info(g, "acquire runlist_lock for all runlists");
|
||||
for (rlid = 0; rlid < g->fifo.max_runlists; rlid++) {
|
||||
nvgpu_mutex_acquire(&g->fifo.runlist_info[rlid].runlist_lock);
|
||||
nvgpu_mutex_acquire(&g->fifo.runlist_info[rlid]->runlist_lock);
|
||||
}
|
||||
|
||||
if (id_is_known) {
|
||||
@@ -1516,7 +1516,7 @@ void gk20a_fifo_teardown_ch_tsg(struct gk20a *g, u32 __engine_ids,
|
||||
|
||||
nvgpu_log_info(g, "release runlist_lock for all runlists");
|
||||
for (rlid = 0; rlid < g->fifo.max_runlists; rlid++) {
|
||||
nvgpu_mutex_release(&g->fifo.runlist_info[rlid].runlist_lock);
|
||||
nvgpu_mutex_release(&g->fifo.runlist_info[rlid]->runlist_lock);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2272,7 +2272,7 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, struct channel_gk20a *ch)
|
||||
|
||||
/* we have no idea which runlist we are using. lock all */
|
||||
for (i = 0; i < g->fifo.max_runlists; i++) {
|
||||
nvgpu_mutex_acquire(&f->runlist_info[i].runlist_lock);
|
||||
nvgpu_mutex_acquire(&f->runlist_info[i]->runlist_lock);
|
||||
}
|
||||
|
||||
if (g->ops.pmu.is_pmu_supported(g)) {
|
||||
@@ -2287,7 +2287,7 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, struct channel_gk20a *ch)
|
||||
}
|
||||
|
||||
for (i = 0; i < g->fifo.max_runlists; i++) {
|
||||
nvgpu_mutex_release(&f->runlist_info[i].runlist_lock);
|
||||
nvgpu_mutex_release(&f->runlist_info[i]->runlist_lock);
|
||||
}
|
||||
|
||||
if (ret != 0) {
|
||||
@@ -2317,7 +2317,7 @@ int gk20a_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg)
|
||||
|
||||
/* we have no idea which runlist we are using. lock all */
|
||||
for (i = 0; i < g->fifo.max_runlists; i++) {
|
||||
nvgpu_mutex_acquire(&f->runlist_info[i].runlist_lock);
|
||||
nvgpu_mutex_acquire(&f->runlist_info[i]->runlist_lock);
|
||||
}
|
||||
|
||||
if (g->ops.pmu.is_pmu_supported(g)) {
|
||||
@@ -2332,7 +2332,7 @@ int gk20a_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg)
|
||||
}
|
||||
|
||||
for (i = 0; i < g->fifo.max_runlists; i++) {
|
||||
nvgpu_mutex_release(&f->runlist_info[i].runlist_lock);
|
||||
nvgpu_mutex_release(&f->runlist_info[i]->runlist_lock);
|
||||
}
|
||||
|
||||
if (ret != 0) {
|
||||
|
||||
@@ -149,7 +149,7 @@ struct fifo_gk20a {
|
||||
u32 num_engines;
|
||||
u32 *active_engines_list;
|
||||
|
||||
struct fifo_runlist_info_gk20a *runlist_info;
|
||||
struct fifo_runlist_info_gk20a **runlist_info;
|
||||
u32 max_runlists;
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
struct {
|
||||
|
||||
@@ -641,7 +641,7 @@ static u32 gv11b_fifo_get_runlists_mask(struct gk20a *g, u32 act_eng_bitmask,
|
||||
|
||||
for (rlid = 0; rlid < f->max_runlists; rlid++) {
|
||||
|
||||
runlist = &f->runlist_info[rlid];
|
||||
runlist = f->runlist_info[rlid];
|
||||
|
||||
if ((runlist->eng_bitmask & act_eng_bitmask) != 0U) {
|
||||
runlists_mask |= BIT32(rlid);
|
||||
@@ -657,7 +657,7 @@ static u32 gv11b_fifo_get_runlists_mask(struct gk20a *g, u32 act_eng_bitmask,
|
||||
for (rlid = 0; rlid < f->max_runlists; rlid++) {
|
||||
if (act_eng_bitmask != 0U) {
|
||||
/* eng ids are known */
|
||||
runlist = &f->runlist_info[rlid];
|
||||
runlist = f->runlist_info[rlid];
|
||||
if ((runlist->eng_bitmask & act_eng_bitmask) != 0U) {
|
||||
runlists_mask |= BIT32(rlid);
|
||||
}
|
||||
@@ -703,18 +703,18 @@ int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 id,
|
||||
|
||||
nvgpu_log_info(g, "Check preempt pending for tsgid = %u", tsgid);
|
||||
|
||||
runlist_served_pbdmas = f->runlist_info[runlist_id].pbdma_bitmask;
|
||||
runlist_served_engines = f->runlist_info[runlist_id].eng_bitmask;
|
||||
runlist_served_pbdmas = f->runlist_info[runlist_id]->pbdma_bitmask;
|
||||
runlist_served_engines = f->runlist_info[runlist_id]->eng_bitmask;
|
||||
|
||||
for_each_set_bit(pbdma_id, &runlist_served_pbdmas, f->num_pbdma) {
|
||||
ret |= gv11b_fifo_poll_pbdma_chan_status(g, tsgid, pbdma_id);
|
||||
}
|
||||
|
||||
f->runlist_info[runlist_id].reset_eng_bitmask = 0;
|
||||
f->runlist_info[runlist_id]->reset_eng_bitmask = 0;
|
||||
|
||||
for_each_set_bit(act_eng_id, &runlist_served_engines, f->max_engines) {
|
||||
ret |= gv11b_fifo_poll_eng_ctx_status(g, tsgid, act_eng_id,
|
||||
&f->runlist_info[runlist_id].reset_eng_bitmask);
|
||||
&f->runlist_info[runlist_id]->reset_eng_bitmask);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
@@ -772,7 +772,7 @@ int gv11b_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg)
|
||||
return 0;
|
||||
}
|
||||
|
||||
nvgpu_mutex_acquire(&f->runlist_info[runlist_id].runlist_lock);
|
||||
nvgpu_mutex_acquire(&f->runlist_info[runlist_id]->runlist_lock);
|
||||
|
||||
/* WAR for Bug 2065990 */
|
||||
gk20a_tsg_disable_sched(g, tsg);
|
||||
@@ -796,7 +796,7 @@ int gv11b_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg)
|
||||
/* WAR for Bug 2065990 */
|
||||
gk20a_tsg_enable_sched(g, tsg);
|
||||
|
||||
nvgpu_mutex_release(&f->runlist_info[runlist_id].runlist_lock);
|
||||
nvgpu_mutex_release(&f->runlist_info[runlist_id]->runlist_lock);
|
||||
|
||||
if (ret != 0) {
|
||||
if (nvgpu_platform_is_silicon(g)) {
|
||||
@@ -836,8 +836,8 @@ static void gv11b_fifo_locked_preempt_runlists_rc(struct gk20a *g,
|
||||
for (rlid = 0; rlid < g->fifo.max_runlists; rlid++) {
|
||||
if ((runlists_mask &
|
||||
fifo_runlist_preempt_runlist_m(rlid)) != 0U) {
|
||||
g->fifo.runlist_info[rlid].reset_eng_bitmask =
|
||||
g->fifo.runlist_info[rlid].eng_bitmask;
|
||||
g->fifo.runlist_info[rlid]->reset_eng_bitmask =
|
||||
g->fifo.runlist_info[rlid]->eng_bitmask;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -878,7 +878,7 @@ static void gv11b_fifo_locked_abort_runlist_active_tsgs(struct gk20a *g,
|
||||
}
|
||||
nvgpu_log(g, gpu_dbg_info, "abort runlist id %d",
|
||||
rlid);
|
||||
runlist = &g->fifo.runlist_info[rlid];
|
||||
runlist = g->fifo.runlist_info[rlid];
|
||||
|
||||
for_each_set_bit(tsgid, runlist->active_tsgs,
|
||||
g->fifo.num_channels) {
|
||||
@@ -982,7 +982,7 @@ void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask,
|
||||
|
||||
nvgpu_log_fn(g, "acquire runlist_lock for all runlists");
|
||||
for (rlid = 0; rlid < g->fifo.max_runlists; rlid++) {
|
||||
nvgpu_mutex_acquire(&f->runlist_info[rlid].
|
||||
nvgpu_mutex_acquire(&f->runlist_info[rlid]->
|
||||
runlist_lock);
|
||||
}
|
||||
|
||||
@@ -1010,7 +1010,7 @@ void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask,
|
||||
for (rlid = 0; rlid < f->max_runlists; rlid++) {
|
||||
if (act_eng_bitmask != 0U) {
|
||||
/* eng ids are known */
|
||||
runlist = &f->runlist_info[rlid];
|
||||
runlist = f->runlist_info[rlid];
|
||||
if ((runlist->eng_bitmask & act_eng_bitmask) != 0U) {
|
||||
runlist_id = rlid;
|
||||
num_runlists++;
|
||||
@@ -1034,7 +1034,7 @@ void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask,
|
||||
if (rlid != runlist_id) {
|
||||
nvgpu_log_fn(g, "release runlist_lock for "
|
||||
"unused runlist id: %d", rlid);
|
||||
nvgpu_mutex_release(&f->runlist_info[rlid].
|
||||
nvgpu_mutex_release(&f->runlist_info[rlid]->
|
||||
runlist_lock);
|
||||
}
|
||||
}
|
||||
@@ -1101,7 +1101,7 @@ void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask,
|
||||
*/
|
||||
if (tsg != NULL) {
|
||||
rlid = f->tsg[id].runlist_id;
|
||||
runlist_served_pbdmas = f->runlist_info[rlid].pbdma_bitmask;
|
||||
runlist_served_pbdmas = f->runlist_info[rlid]->pbdma_bitmask;
|
||||
for_each_set_bit(pbdma_id, &runlist_served_pbdmas,
|
||||
f->num_pbdma) {
|
||||
/*
|
||||
@@ -1119,7 +1119,7 @@ void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask,
|
||||
/* check if engine reset should be deferred */
|
||||
for (rlid = 0; rlid < g->fifo.max_runlists; rlid++) {
|
||||
|
||||
runlist = &g->fifo.runlist_info[rlid];
|
||||
runlist = g->fifo.runlist_info[rlid];
|
||||
if (((runlists_mask & BIT32(rlid)) != 0U) &&
|
||||
(runlist->reset_eng_bitmask != 0U)) {
|
||||
|
||||
@@ -1193,11 +1193,11 @@ void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask,
|
||||
if (runlist_id != FIFO_INVAL_RUNLIST_ID) {
|
||||
nvgpu_log_fn(g, "release runlist_lock runlist_id = %d",
|
||||
runlist_id);
|
||||
nvgpu_mutex_release(&f->runlist_info[runlist_id].runlist_lock);
|
||||
nvgpu_mutex_release(&f->runlist_info[runlist_id]->runlist_lock);
|
||||
} else {
|
||||
nvgpu_log_fn(g, "release runlist_lock for all runlists");
|
||||
for (rlid = 0; rlid < g->fifo.max_runlists; rlid++) {
|
||||
nvgpu_mutex_release(&f->runlist_info[rlid].
|
||||
nvgpu_mutex_release(&f->runlist_info[rlid]->
|
||||
runlist_lock);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -73,7 +73,7 @@ static int gk20a_fifo_sched_debugfs_seq_show(
|
||||
engine_id = nvgpu_engine_get_gr_eng_id(g);
|
||||
engine_info = (f->engine_info + engine_id);
|
||||
runlist_id = engine_info->runlist_id;
|
||||
runlist = &f->runlist_info[runlist_id];
|
||||
runlist = f->runlist_info[runlist_id];
|
||||
|
||||
if (ch == f->channel) {
|
||||
seq_puts(s, "chid tsgid pid timeslice timeout interleave graphics_preempt compute_preempt\n");
|
||||
|
||||
@@ -35,10 +35,11 @@ static void setup_fifo(struct gk20a *g, unsigned long *tsg_map,
|
||||
unsigned long *ch_map, struct tsg_gk20a *tsgs,
|
||||
struct channel_gk20a *chs, unsigned int num_tsgs,
|
||||
unsigned int num_channels,
|
||||
struct fifo_runlist_info_gk20a *runlist, u32 *rl_data,
|
||||
struct fifo_runlist_info_gk20a **runlists, u32 *rl_data,
|
||||
bool interleave)
|
||||
{
|
||||
struct fifo_gk20a *f = &g->fifo;
|
||||
struct fifo_runlist_info_gk20a *runlist = runlists[0];
|
||||
|
||||
/* we only use the runlist 0 here */
|
||||
runlist->mem[0].cpu_va = rl_data;
|
||||
@@ -53,7 +54,7 @@ static void setup_fifo(struct gk20a *g, unsigned long *tsg_map,
|
||||
f->tsg = tsgs;
|
||||
f->channel = chs;
|
||||
f->num_channels = num_channels;
|
||||
f->runlist_info = runlist;
|
||||
f->runlist_info = runlists;
|
||||
|
||||
/*
|
||||
* For testing the runlist entry order format, these simpler dual-u32
|
||||
@@ -117,7 +118,7 @@ static int run_format_test(struct unit_module *m, struct fifo_gk20a *f,
|
||||
setup_tsg_multich(tsg, chs, 0, prio, 5, n_ch);
|
||||
|
||||
/* entry capacity: tsg header and some channels */
|
||||
n = nvgpu_runlist_construct_locked(f, f->runlist_info, 0, 1 + n_ch);
|
||||
n = nvgpu_runlist_construct_locked(f, f->runlist_info[0], 0, 1 + n_ch);
|
||||
if (n != 1 + n_ch) {
|
||||
unit_return_fail(m, "number of entries mismatch %d\n", n);
|
||||
}
|
||||
@@ -160,6 +161,7 @@ static int test_tsg_format_gen(struct unit_module *m, struct gk20a *g,
|
||||
{
|
||||
struct fifo_gk20a *f = &g->fifo;
|
||||
struct fifo_runlist_info_gk20a runlist;
|
||||
struct fifo_runlist_info_gk20a *runlists = &runlist;
|
||||
unsigned long active_tsgs_map = 0;
|
||||
unsigned long active_chs_map = 0;
|
||||
struct tsg_gk20a tsgs[1] = {{0}};
|
||||
@@ -172,7 +174,7 @@ static int test_tsg_format_gen(struct unit_module *m, struct gk20a *g,
|
||||
(void)test_args->timeslice;
|
||||
|
||||
setup_fifo(g, &active_tsgs_map, &active_chs_map, tsgs, chs, 1, 5,
|
||||
&runlist, rl_data, false);
|
||||
&runlists, rl_data, false);
|
||||
|
||||
active_chs_map = test_args->chs_bitmap;
|
||||
|
||||
@@ -220,6 +222,7 @@ static int test_common_gen(struct unit_module *m, struct gk20a *g,
|
||||
{
|
||||
struct fifo_gk20a *f = &g->fifo;
|
||||
struct fifo_runlist_info_gk20a runlist;
|
||||
struct fifo_runlist_info_gk20a *runlists = &runlist;
|
||||
unsigned long active_tsgs_map = 0;
|
||||
unsigned long active_chs_map = 0;
|
||||
struct tsg_gk20a tsgs[6] = {{0}};
|
||||
@@ -233,7 +236,7 @@ static int test_common_gen(struct unit_module *m, struct gk20a *g,
|
||||
u32 i = 0;
|
||||
|
||||
setup_fifo(g, &active_tsgs_map, &active_chs_map, tsgs, chs,
|
||||
levels_count, 6, &runlist, rl_data, interleave);
|
||||
levels_count, 6, &runlists, rl_data, interleave);
|
||||
|
||||
for (i = 0; i < levels_count; i++) {
|
||||
setup_tsg(tsgs, chs, i, levels[i]);
|
||||
|
||||
Reference in New Issue
Block a user