From f67bc51e519be2d0e43dfdadf8ef569cc468865b Mon Sep 17 00:00:00 2001 From: Thomas Fleury Date: Thu, 28 Feb 2019 15:03:51 -0800 Subject: [PATCH] Revert "gpu: nvgpu: allocate only active runlists" This reverts commit 45fa0441f7afec8055c27257d5b8102ce5aebba1. Bug 2522374 Change-Id: Icb80b7a31c7588a269850a3768ab0238dbec67b1 Signed-off-by: Thomas Fleury Reviewed-on: https://git-master.nvidia.com/r/2030292 Reviewed-by: svc-mobile-coverity Reviewed-by: svc-mobile-misra Reviewed-by: svc-misra-checker Reviewed-by: Hoang Pham --- drivers/gpu/nvgpu/common/fifo/runlist.c | 96 ++++++-------------- drivers/gpu/nvgpu/common/fifo/tsg.c | 4 +- drivers/gpu/nvgpu/gk20a/fifo_gk20a.c | 59 ++++++++---- drivers/gpu/nvgpu/gk20a/fifo_gk20a.h | 9 -- drivers/gpu/nvgpu/gv11b/fifo_gv11b.c | 105 +++++++++++----------- drivers/gpu/nvgpu/include/nvgpu/runlist.h | 3 - 6 files changed, 125 insertions(+), 151 deletions(-) diff --git a/drivers/gpu/nvgpu/common/fifo/runlist.c b/drivers/gpu/nvgpu/common/fifo/runlist.c index 8dda48abe..9ea2b66dc 100644 --- a/drivers/gpu/nvgpu/common/fifo/runlist.c +++ b/drivers/gpu/nvgpu/common/fifo/runlist.c @@ -26,32 +26,6 @@ #include #include -void nvgpu_fifo_lock_active_runlists(struct gk20a *g) -{ - struct fifo_gk20a *f = &g->fifo; - struct fifo_runlist_info_gk20a *runlist; - u32 i; - - nvgpu_log_info(g, "acquire runlist_lock for active runlists"); - for (i = 0; i < g->fifo.num_runlists; i++) { - runlist = &f->active_runlist_info[i]; - nvgpu_mutex_acquire(&runlist->runlist_lock); - } -} - -void nvgpu_fifo_unlock_active_runlists(struct gk20a *g) -{ - struct fifo_gk20a *f = &g->fifo; - struct fifo_runlist_info_gk20a *runlist; - u32 i; - - nvgpu_log_info(g, "release runlist_lock for active runlists"); - for (i = 0; i < g->fifo.num_runlists; i++) { - runlist = &f->active_runlist_info[i]; - nvgpu_mutex_release(&runlist->runlist_lock); - } -} - static u32 nvgpu_runlist_append_tsg(struct gk20a *g, struct fifo_runlist_info_gk20a *runlist, u32 **runlist_entry, @@ -606,7 +580,8 @@ void gk20a_fifo_set_runlist_state(struct gk20a *g, u32 runlists_mask, void gk20a_fifo_delete_runlist(struct fifo_gk20a *f) { - u32 i, j; + u32 i; + u32 runlist_id; struct fifo_runlist_info_gk20a *runlist; struct gk20a *g = NULL; @@ -616,10 +591,10 @@ void gk20a_fifo_delete_runlist(struct fifo_gk20a *f) g = f->g; - for (i = 0; i < f->num_runlists; i++) { - runlist = &f->active_runlist_info[i]; - for (j = 0; j < MAX_RUNLIST_BUFFERS; j++) { - nvgpu_dma_free(g, &runlist->mem[j]); + for (runlist_id = 0; runlist_id < f->max_runlists; runlist_id++) { + runlist = f->runlist_info[runlist_id]; + for (i = 0; i < MAX_RUNLIST_BUFFERS; i++) { + nvgpu_dma_free(g, &runlist->mem[i]); } nvgpu_kfree(g, runlist->active_channels); @@ -629,11 +604,10 @@ void gk20a_fifo_delete_runlist(struct fifo_gk20a *f) runlist->active_tsgs = NULL; nvgpu_mutex_destroy(&runlist->runlist_lock); - f->runlist_info[runlist->runlist_id] = NULL; nvgpu_kfree(g, runlist); + f->runlist_info[runlist_id] = NULL; } - nvgpu_kfree(g, f->active_runlist_info); nvgpu_kfree(g, f->runlist_info); f->runlist_info = NULL; f->max_runlists = 0; @@ -643,8 +617,7 @@ int nvgpu_init_runlist(struct gk20a *g, struct fifo_gk20a *f) { struct fifo_runlist_info_gk20a *runlist; unsigned int runlist_id; - u32 i, j; - u32 num_runlists = 0U; + u32 i; size_t runlist_size; int err = 0; @@ -652,39 +625,18 @@ int nvgpu_init_runlist(struct gk20a *g, struct fifo_gk20a *f) f->max_runlists = g->ops.runlist.count_max(); f->runlist_info = nvgpu_kzalloc(g, - sizeof(*f->runlist_info) * f->max_runlists); + sizeof(struct fifo_runlist_info_gk20a *) * + f->max_runlists); if (f->runlist_info == NULL) { goto clean_up_runlist; } for (runlist_id = 0; runlist_id < f->max_runlists; runlist_id++) { - if (gk20a_fifo_is_valid_runlist_id(g, runlist_id)) { - num_runlists++; + runlist = nvgpu_kzalloc(g, sizeof(*runlist)); + if (runlist == NULL) { + goto clean_up_runlist; } - } - f->num_runlists = num_runlists; - - f->active_runlist_info = nvgpu_kzalloc(g, - sizeof(*f->active_runlist_info) * num_runlists); - if (f->active_runlist_info == NULL) { - goto clean_up_runlist; - } - nvgpu_log_info(g, "num_runlists=%u", num_runlists); - - /* In most case we want to loop through active runlists only. Here - * we need to loop through all possible runlists, to build the mapping - * between runlist_info[runlist_id] and active_runlist_info[i]. - */ - i = 0U; - for (runlist_id = 0; runlist_id < f->max_runlists; runlist_id++) { - if (!gk20a_fifo_is_valid_runlist_id(g, runlist_id)) { - /* skip inactive runlist */ - continue; - } - runlist = &f->active_runlist_info[i]; - runlist->runlist_id = runlist_id; f->runlist_info[runlist_id] = runlist; - i++; runlist->active_channels = nvgpu_kzalloc(g, DIV_ROUND_UP(f->num_channels, @@ -706,15 +658,19 @@ int nvgpu_init_runlist(struct gk20a *g, struct fifo_gk20a *f) "runlist_entries %d runlist size %zu", f->num_runlist_entries, runlist_size); - for (j = 0; j < MAX_RUNLIST_BUFFERS; j++) { - err = nvgpu_dma_alloc_flags_sys(g, - g->is_virtual ? - 0 : NVGPU_DMA_PHYSICALLY_ADDRESSED, - runlist_size, - &runlist->mem[j]); - if (err != 0) { - nvgpu_err(g, "memory allocation failed"); - goto clean_up_runlist; + /* skip buffer allocation for unused runlists */ + if (gk20a_fifo_is_valid_runlist_id(g, runlist_id)) { + unsigned long flags = g->is_virtual ? 0 : + NVGPU_DMA_PHYSICALLY_ADDRESSED; + for (i = 0; i < MAX_RUNLIST_BUFFERS; i++) { + err = nvgpu_dma_alloc_flags_sys(g, + flags, + runlist_size, + &runlist->mem[i]); + if (err != 0) { + nvgpu_err(g, "memory allocation failed"); + goto clean_up_runlist; + } } } diff --git a/drivers/gpu/nvgpu/common/fifo/tsg.c b/drivers/gpu/nvgpu/common/fifo/tsg.c index 984a7d23f..03aca9ae7 100644 --- a/drivers/gpu/nvgpu/common/fifo/tsg.c +++ b/drivers/gpu/nvgpu/common/fifo/tsg.c @@ -93,8 +93,8 @@ static bool gk20a_is_channel_active(struct gk20a *g, struct channel_gk20a *ch) struct fifo_runlist_info_gk20a *runlist; unsigned int i; - for (i = 0; i < f->num_runlists; ++i) { - runlist = &f->active_runlist_info[i]; + for (i = 0; i < f->max_runlists; ++i) { + runlist = f->runlist_info[i]; if (test_bit((int)ch->chid, runlist->active_channels)) { return true; } diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c index 05e820ec7..e62347326 100644 --- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c @@ -388,33 +388,33 @@ static int nvgpu_init_runlist_enginfo(struct gk20a *g, struct fifo_gk20a *f) { struct fifo_runlist_info_gk20a *runlist; struct fifo_engine_info_gk20a *engine_info; - u32 i, active_engine_id, pbdma_id, engine_id; + unsigned int runlist_id; + u32 active_engine_id, pbdma_id, engine_id; nvgpu_log_fn(g, " "); - for (i = 0; i < f->num_runlists; i++) { - runlist = &f->active_runlist_info[i]; + for (runlist_id = 0; runlist_id < f->max_runlists; runlist_id++) { + runlist = f->runlist_info[runlist_id]; for (pbdma_id = 0; pbdma_id < f->num_pbdma; pbdma_id++) { - if ((f->pbdma_map[pbdma_id] & - BIT32(runlist->runlist_id)) != 0U) { + if ((f->pbdma_map[pbdma_id] & BIT32(runlist_id)) != 0U) { runlist->pbdma_bitmask |= BIT32(pbdma_id); } } nvgpu_log(g, gpu_dbg_info, "runlist %d : pbdma bitmask 0x%x", - runlist->runlist_id, runlist->pbdma_bitmask); + runlist_id, runlist->pbdma_bitmask); for (engine_id = 0; engine_id < f->num_engines; ++engine_id) { active_engine_id = f->active_engines_list[engine_id]; engine_info = &f->engine_info[active_engine_id]; if ((engine_info != NULL) && - (engine_info->runlist_id == runlist->runlist_id)) { + (engine_info->runlist_id == runlist_id)) { runlist->eng_bitmask |= BIT(active_engine_id); } } nvgpu_log(g, gpu_dbg_info, "runlist %d : act eng bitmask 0x%x", - runlist->runlist_id, runlist->eng_bitmask); + runlist_id, runlist->eng_bitmask); } nvgpu_log_fn(g, "done"); @@ -1340,17 +1340,23 @@ static bool gk20a_fifo_handle_mmu_fault( u32 hw_id, /* queried from HW if ~(u32)0 OR mmu_fault_engines == 0*/ bool id_is_tsg) { + u32 rlid; bool verbose; nvgpu_log_fn(g, " "); - nvgpu_fifo_lock_active_runlists(g); + nvgpu_log_info(g, "acquire runlist_lock for all runlists"); + for (rlid = 0; rlid < g->fifo.max_runlists; rlid++) { + nvgpu_mutex_acquire(&g->fifo.runlist_info[rlid]->runlist_lock); + } verbose = gk20a_fifo_handle_mmu_fault_locked(g, mmu_fault_engines, hw_id, id_is_tsg); - nvgpu_fifo_unlock_active_runlists(g); - + nvgpu_log_info(g, "release runlist_lock for all runlists"); + for (rlid = 0; rlid < g->fifo.max_runlists; rlid++) { + nvgpu_mutex_release(&g->fifo.runlist_info[rlid]->runlist_lock); + } return verbose; } @@ -1444,8 +1450,12 @@ void gk20a_fifo_teardown_ch_tsg(struct gk20a *g, u32 __engine_ids, bool ref_id_is_tsg = false; bool id_is_known = (id_type != ID_TYPE_UNKNOWN) ? true : false; bool id_is_tsg = (id_type == ID_TYPE_TSG) ? true : false; + u32 rlid; - nvgpu_fifo_lock_active_runlists(g); + nvgpu_log_info(g, "acquire runlist_lock for all runlists"); + for (rlid = 0; rlid < g->fifo.max_runlists; rlid++) { + nvgpu_mutex_acquire(&g->fifo.runlist_info[rlid]->runlist_lock); + } if (id_is_known) { engine_ids = g->ops.fifo.get_engines_mask_on_id(g, @@ -1504,7 +1514,10 @@ void gk20a_fifo_teardown_ch_tsg(struct gk20a *g, u32 __engine_ids, g->ops.fifo.teardown_unmask_intr(g); } - nvgpu_fifo_unlock_active_runlists(g); + nvgpu_log_info(g, "release runlist_lock for all runlists"); + for (rlid = 0; rlid < g->fifo.max_runlists; rlid++) { + nvgpu_mutex_release(&g->fifo.runlist_info[rlid]->runlist_lock); + } } void gk20a_fifo_recover(struct gk20a *g, u32 engine_ids, @@ -2249,14 +2262,18 @@ int __locked_fifo_preempt(struct gk20a *g, u32 id, bool is_tsg) int gk20a_fifo_preempt_channel(struct gk20a *g, struct channel_gk20a *ch) { + struct fifo_gk20a *f = &g->fifo; int ret = 0; u32 token = PMU_INVALID_MUTEX_OWNER_ID; int mutex_ret = -EINVAL; + u32 i; nvgpu_log_fn(g, "chid: %d", ch->chid); /* we have no idea which runlist we are using. lock all */ - nvgpu_fifo_lock_active_runlists(g); + for (i = 0; i < g->fifo.max_runlists; i++) { + nvgpu_mutex_acquire(&f->runlist_info[i]->runlist_lock); + } if (g->ops.pmu.is_pmu_supported(g)) { mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, @@ -2269,7 +2286,9 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, struct channel_gk20a *ch) nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); } - nvgpu_fifo_unlock_active_runlists(g); + for (i = 0; i < g->fifo.max_runlists; i++) { + nvgpu_mutex_release(&f->runlist_info[i]->runlist_lock); + } if (ret != 0) { if (nvgpu_platform_is_silicon(g)) { @@ -2288,14 +2307,18 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, struct channel_gk20a *ch) int gk20a_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg) { + struct fifo_gk20a *f = &g->fifo; int ret = 0; u32 token = PMU_INVALID_MUTEX_OWNER_ID; int mutex_ret = -EINVAL; + u32 i; nvgpu_log_fn(g, "tsgid: %d", tsg->tsgid); /* we have no idea which runlist we are using. lock all */ - nvgpu_fifo_lock_active_runlists(g); + for (i = 0; i < g->fifo.max_runlists; i++) { + nvgpu_mutex_acquire(&f->runlist_info[i]->runlist_lock); + } if (g->ops.pmu.is_pmu_supported(g)) { mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, @@ -2308,7 +2331,9 @@ int gk20a_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg) nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); } - nvgpu_fifo_unlock_active_runlists(g); + for (i = 0; i < g->fifo.max_runlists; i++) { + nvgpu_mutex_release(&f->runlist_info[i]->runlist_lock); + } if (ret != 0) { if (nvgpu_platform_is_silicon(g)) { diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h index 9f9e9a53f..2409c369e 100644 --- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h @@ -75,7 +75,6 @@ struct tsg_gk20a; /* generally corresponds to the "pbdma" engine */ struct fifo_runlist_info_gk20a { - u32 runlist_id; unsigned long *active_channels; unsigned long *active_tsgs; /* Each engine has its own SW and HW runlist buffer.*/ @@ -150,16 +149,8 @@ struct fifo_gk20a { u32 num_engines; u32 *active_engines_list; - /* Pointers to runlists, indexed by real hw runlist_id. - * If a runlist is active, then runlist_info[runlist_id] points - * to one entry in active_runlist_info. Otherwise, it is NULL. - */ struct fifo_runlist_info_gk20a **runlist_info; u32 max_runlists; - - /* Array of runlists that are actually in use */ - struct fifo_runlist_info_gk20a *active_runlist_info; - u32 num_runlists; /* number of active runlists */ #ifdef CONFIG_DEBUG_FS struct { struct fifo_profile_gk20a *data; diff --git a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c index f9633c052..bb5bac716 100644 --- a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c +++ b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c @@ -624,7 +624,7 @@ static u32 gv11b_fifo_get_runlists_mask(struct gk20a *g, u32 act_eng_bitmask, u32 runlists_mask = 0; struct fifo_gk20a *f = &g->fifo; struct fifo_runlist_info_gk20a *runlist; - u32 i, pbdma_bitmask = 0; + u32 rlid, pbdma_bitmask = 0; if (id_type != ID_TYPE_UNKNOWN) { if (id_type == ID_TYPE_TSG) { @@ -639,30 +639,30 @@ static u32 gv11b_fifo_get_runlists_mask(struct gk20a *g, u32 act_eng_bitmask, pbdma_bitmask = BIT32(mmfault->faulted_pbdma); } - for (i = 0U; i < f->num_runlists; i++) { - runlist = &f->active_runlist_info[i]; + for (rlid = 0; rlid < f->max_runlists; rlid++) { + + runlist = f->runlist_info[rlid]; if ((runlist->eng_bitmask & act_eng_bitmask) != 0U) { - runlists_mask |= BIT32(runlist->runlist_id); + runlists_mask |= BIT32(rlid); } if ((runlist->pbdma_bitmask & pbdma_bitmask) != 0U) { - runlists_mask |= BIT32(runlist->runlist_id); + runlists_mask |= BIT32(rlid); } } } if (id_type == ID_TYPE_UNKNOWN) { - for (i = 0U; i < f->num_runlists; i++) { - runlist = &f->active_runlist_info[i]; - + for (rlid = 0; rlid < f->max_runlists; rlid++) { if (act_eng_bitmask != 0U) { /* eng ids are known */ + runlist = f->runlist_info[rlid]; if ((runlist->eng_bitmask & act_eng_bitmask) != 0U) { - runlists_mask |= BIT32(runlist->runlist_id); - } + runlists_mask |= BIT32(rlid); + } } else { - runlists_mask |= BIT32(runlist->runlist_id); + runlists_mask |= BIT32(rlid); } } } @@ -813,11 +813,9 @@ int gv11b_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg) static void gv11b_fifo_locked_preempt_runlists_rc(struct gk20a *g, u32 runlists_mask) { - struct fifo_gk20a *f = &g->fifo; - struct fifo_runlist_info_gk20a *runlist; u32 token = PMU_INVALID_MUTEX_OWNER_ID; int mutex_ret = -EINVAL; - u32 i; + u32 rlid; /* runlist_lock are locked by teardown and sched are disabled too */ nvgpu_log_fn(g, "preempt runlists_mask:0x%08x", runlists_mask); @@ -835,12 +833,11 @@ static void gv11b_fifo_locked_preempt_runlists_rc(struct gk20a *g, * Do not poll for preemption to complete. Reset engines served by * runlists. */ - for (i = 0U; i < f->num_runlists; i++) { - runlist = &f->active_runlist_info[i]; - - if ((fifo_runlist_preempt_runlist_m(runlist->runlist_id) & - runlists_mask) != 0U) { - runlist->reset_eng_bitmask = runlist->eng_bitmask; + for (rlid = 0; rlid < g->fifo.max_runlists; rlid++) { + if ((runlists_mask & + fifo_runlist_preempt_runlist_m(rlid)) != 0U) { + g->fifo.runlist_info[rlid]->reset_eng_bitmask = + g->fifo.runlist_info[rlid]->eng_bitmask; } } @@ -858,14 +855,13 @@ static void gv11b_fifo_locked_abort_runlist_active_tsgs(struct gk20a *g, unsigned int rc_type, u32 runlists_mask) { - struct fifo_gk20a *f = &g->fifo; struct tsg_gk20a *tsg = NULL; unsigned long tsgid; + u32 rlid; struct fifo_runlist_info_gk20a *runlist = NULL; u32 token = PMU_INVALID_MUTEX_OWNER_ID; int mutex_ret = -EINVAL; int err; - u32 i; nvgpu_err(g, "runlist id unknown, abort active tsgs in runlists"); @@ -875,14 +871,14 @@ static void gv11b_fifo_locked_abort_runlist_active_tsgs(struct gk20a *g, PMU_MUTEX_ID_FIFO, &token); } - for (i = 0U; i < f->num_runlists; i++) { - runlist = &f->active_runlist_info[i]; - - if ((runlists_mask & BIT32(runlist->runlist_id)) == 0U) { + for (rlid = 0; rlid < g->fifo.max_runlists; + rlid++) { + if ((runlists_mask & BIT32(rlid)) == 0U) { continue; } nvgpu_log(g, gpu_dbg_info, "abort runlist id %d", - runlist->runlist_id); + rlid); + runlist = g->fifo.runlist_info[rlid]; for_each_set_bit(tsgid, runlist->active_tsgs, g->fifo.num_channels) { @@ -920,11 +916,11 @@ static void gv11b_fifo_locked_abort_runlist_active_tsgs(struct gk20a *g, * remove all entries from this runlist; don't wait for * the update to finish on hw. */ - err = gk20a_runlist_update_locked(g, runlist->runlist_id, + err = gk20a_runlist_update_locked(g, rlid, NULL, false, false); if (err != 0) { nvgpu_err(g, "runlist id %d is not cleaned up", - runlist->runlist_id); + rlid); } gk20a_fifo_abort_tsg(g, tsg, false); @@ -974,7 +970,7 @@ void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask, struct mmu_fault_info *mmfault) { struct tsg_gk20a *tsg = NULL; - u32 runlists_mask, rlid, i; + u32 runlists_mask, rlid; unsigned long pbdma_id; struct fifo_runlist_info_gk20a *runlist = NULL; unsigned long engine_id; @@ -984,7 +980,11 @@ void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask, u32 num_runlists = 0U; unsigned long runlist_served_pbdmas; - nvgpu_fifo_lock_active_runlists(g); + nvgpu_log_fn(g, "acquire runlist_lock for all runlists"); + for (rlid = 0; rlid < g->fifo.max_runlists; rlid++) { + nvgpu_mutex_acquire(&f->runlist_info[rlid]-> + runlist_lock); + } g->ops.fifo.teardown_mask_intr(g); @@ -1007,12 +1007,16 @@ void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask, * it corresponds to single runlist id. If eng mask corresponds * to multiple runlists, then abort all runlists */ - for (i = 0U; i < f->num_runlists; i++) { - runlist = &f->active_runlist_info[i]; - - if ((runlist->eng_bitmask & act_eng_bitmask) != 0U) { - runlist_id = runlist->runlist_id; - num_runlists++; + for (rlid = 0; rlid < f->max_runlists; rlid++) { + if (act_eng_bitmask != 0U) { + /* eng ids are known */ + runlist = f->runlist_info[rlid]; + if ((runlist->eng_bitmask & act_eng_bitmask) != 0U) { + runlist_id = rlid; + num_runlists++; + } + } else { + break; } } if (num_runlists > 1U) { @@ -1026,14 +1030,12 @@ void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask, * needed for this recovery */ if (runlist_id != FIFO_INVAL_RUNLIST_ID && num_runlists == 1U) { - for (i = 0U; i < f->num_runlists; i++) { - runlist = &f->active_runlist_info[i]; - - if (runlist->runlist_id != runlist_id) { + for (rlid = 0; rlid < g->fifo.max_runlists; rlid++) { + if (rlid != runlist_id) { nvgpu_log_fn(g, "release runlist_lock for " - "unused runlist id: %d", - runlist->runlist_id); - nvgpu_mutex_release(&runlist->runlist_lock); + "unused runlist id: %d", rlid); + nvgpu_mutex_release(&f->runlist_info[rlid]-> + runlist_lock); } } } @@ -1115,10 +1117,10 @@ void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask, } /* check if engine reset should be deferred */ - for (i = 0U; i < f->num_runlists; i++) { - runlist = &f->active_runlist_info[i]; + for (rlid = 0; rlid < g->fifo.max_runlists; rlid++) { - if (((runlists_mask & BIT32(runlist->runlist_id)) != 0U) && + runlist = g->fifo.runlist_info[rlid]; + if (((runlists_mask & BIT32(rlid)) != 0U) && (runlist->reset_eng_bitmask != 0U)) { unsigned long __reset_eng_bitmask = @@ -1191,10 +1193,13 @@ void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask, if (runlist_id != FIFO_INVAL_RUNLIST_ID) { nvgpu_log_fn(g, "release runlist_lock runlist_id = %d", runlist_id); - runlist = f->runlist_info[runlist_id]; - nvgpu_mutex_release(&runlist->runlist_lock); + nvgpu_mutex_release(&f->runlist_info[runlist_id]->runlist_lock); } else { - nvgpu_fifo_unlock_active_runlists(g); + nvgpu_log_fn(g, "release runlist_lock for all runlists"); + for (rlid = 0; rlid < g->fifo.max_runlists; rlid++) { + nvgpu_mutex_release(&f->runlist_info[rlid]-> + runlist_lock); + } } } diff --git a/drivers/gpu/nvgpu/include/nvgpu/runlist.h b/drivers/gpu/nvgpu/include/nvgpu/runlist.h index 88a732bfe..e5f10b188 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/runlist.h +++ b/drivers/gpu/nvgpu/include/nvgpu/runlist.h @@ -58,7 +58,4 @@ void gk20a_fifo_set_runlist_state(struct gk20a *g, u32 runlists_mask, void gk20a_fifo_delete_runlist(struct fifo_gk20a *f); int nvgpu_init_runlist(struct gk20a *g, struct fifo_gk20a *f); -void nvgpu_fifo_lock_active_runlists(struct gk20a *g); -void nvgpu_fifo_unlock_active_runlists(struct gk20a *g); - #endif /* NVGPU_RUNLIST_H */