gpu: nvgpu: add function to enable/disable runlists sched

-gk20a_fifo_set_runlist_state() can be used to enable/disable runlists  
 scheduler. This change would be needed for t19x fifo recovery too
-Also delete gk20a_fifo_disable_all_engine_activity function as it is not
 used anywhere.

JIRA GPUT19X-7

Change-Id: I6bb9a7574a473327f0e47060f32d52cd90551c6d
Signed-off-by: Seema Khowala <seemaj@nvidia.com>
Reviewed-on: http://git-master/r/1315180
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Seema Khowala
2017-03-03 12:36:16 -08:00
committed by mobile promotions
parent 17df192180
commit e1e059454a
2 changed files with 72 additions and 49 deletions

View File

@@ -2596,25 +2596,76 @@ int gk20a_fifo_preempt(struct gk20a *g, struct channel_gk20a *ch)
return err; return err;
} }
int gk20a_fifo_enable_engine_activity(struct gk20a *g, static void gk20a_fifo_sched_disable_rw(struct gk20a *g, u32 runlists_mask,
struct fifo_engine_info_gk20a *eng_info) u32 runlist_state)
{
u32 reg_val;
reg_val = gk20a_readl(g, fifo_sched_disable_r());
if (runlist_state == RUNLIST_DISABLED)
reg_val |= runlists_mask;
else
reg_val &= (~runlists_mask);
gk20a_writel(g, fifo_sched_disable_r(), reg_val);
}
void gk20a_fifo_set_runlist_state(struct gk20a *g, u32 runlists_mask,
u32 runlist_state,
int is_runlist_info_mutex_locked)
{ {
u32 token = PMU_INVALID_MUTEX_OWNER_ID; u32 token = PMU_INVALID_MUTEX_OWNER_ID;
u32 mutex_ret; u32 mutex_ret;
u32 enable; u32 runlist_id;
gk20a_dbg_fn(""); gk20a_dbg_fn("");
if (!is_runlist_info_mutex_locked) {
gk20a_dbg_info("acquire runlist_info mutex");
for (runlist_id = 0; runlist_id < g->fifo.max_runlists;
runlist_id++) {
if (runlists_mask &
fifo_sched_disable_runlist_m(runlist_id))
nvgpu_mutex_acquire(&g->fifo.
runlist_info[runlist_id].mutex);
}
}
mutex_ret = pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); mutex_ret = pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
enable = gk20a_readl(g, fifo_sched_disable_r()); gk20a_fifo_sched_disable_rw(g, runlists_mask, runlist_state);
enable &= ~(fifo_sched_disable_true_v() << eng_info->runlist_id);
gk20a_writel(g, fifo_sched_disable_r(), enable);
if (!mutex_ret) if (!mutex_ret)
pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
if (!is_runlist_info_mutex_locked) {
gk20a_dbg_info("release runlist_info mutex");
for (runlist_id = 0; runlist_id < g->fifo.max_runlists;
runlist_id++) {
if (runlists_mask &
fifo_sched_disable_runlist_m(runlist_id))
nvgpu_mutex_release(&g->fifo.
runlist_info[runlist_id].mutex);
}
}
gk20a_dbg_fn("done"); gk20a_dbg_fn("done");
}
int gk20a_fifo_enable_engine_activity(struct gk20a *g,
struct fifo_engine_info_gk20a *eng_info)
{
gk20a_dbg_fn("");
gk20a_fifo_set_runlist_state(g, fifo_sched_disable_runlist_m(
eng_info->runlist_id), RUNLIST_ENABLED,
!RUNLIST_INFO_MUTEX_LOCKED);
gk20a_dbg_fn("done");
return 0; return 0;
} }
@@ -2643,7 +2694,7 @@ int gk20a_fifo_disable_engine_activity(struct gk20a *g,
{ {
u32 gr_stat, pbdma_stat, chan_stat, eng_stat, ctx_stat; u32 gr_stat, pbdma_stat, chan_stat, eng_stat, ctx_stat;
u32 pbdma_chid = FIFO_INVAL_CHANNEL_ID; u32 pbdma_chid = FIFO_INVAL_CHANNEL_ID;
u32 engine_chid = FIFO_INVAL_CHANNEL_ID, disable; u32 engine_chid = FIFO_INVAL_CHANNEL_ID;
u32 token = PMU_INVALID_MUTEX_OWNER_ID; u32 token = PMU_INVALID_MUTEX_OWNER_ID;
u32 mutex_ret; u32 mutex_ret;
u32 err = 0; u32 err = 0;
@@ -2658,12 +2709,9 @@ int gk20a_fifo_disable_engine_activity(struct gk20a *g,
mutex_ret = pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); mutex_ret = pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
disable = gk20a_readl(g, fifo_sched_disable_r()); gk20a_fifo_set_runlist_state(g, fifo_sched_disable_runlist_m(
disable = set_field(disable, eng_info->runlist_id), RUNLIST_DISABLED,
fifo_sched_disable_runlist_m(eng_info->runlist_id), !RUNLIST_INFO_MUTEX_LOCKED);
fifo_sched_disable_runlist_f(fifo_sched_disable_true_v(),
eng_info->runlist_id));
gk20a_writel(g, fifo_sched_disable_r(), disable);
/* chid from pbdma status */ /* chid from pbdma status */
pbdma_stat = gk20a_readl(g, fifo_pbdma_status_r(eng_info->pbdma_id)); pbdma_stat = gk20a_readl(g, fifo_pbdma_status_r(eng_info->pbdma_id));
@@ -2712,40 +2760,6 @@ clean_up:
return err; return err;
} }
int gk20a_fifo_disable_all_engine_activity(struct gk20a *g,
bool wait_for_idle)
{
unsigned int i;
int err = 0, ret = 0;
u32 active_engine_id;
for (i = 0; i < g->fifo.num_engines; i++) {
active_engine_id = g->fifo.active_engines_list[i];
err = gk20a_fifo_disable_engine_activity(g,
&g->fifo.engine_info[active_engine_id],
wait_for_idle);
if (err) {
gk20a_err(dev_from_gk20a(g),
"failed to disable engine %d activity\n", active_engine_id);
ret = err;
break;
}
}
if (err) {
while (i-- != 0) {
active_engine_id = g->fifo.active_engines_list[i];
err = gk20a_fifo_enable_engine_activity(g,
&g->fifo.engine_info[active_engine_id]);
if (err)
gk20a_err(dev_from_gk20a(g),
"failed to re-enable engine %d activity\n", active_engine_id);
}
}
return ret;
}
static void gk20a_fifo_runlist_reset_engines(struct gk20a *g, u32 runlist_id) static void gk20a_fifo_runlist_reset_engines(struct gk20a *g, u32 runlist_id)
{ {
struct fifo_gk20a *f = &g->fifo; struct fifo_gk20a *f = &g->fifo;

View File

@@ -47,6 +47,11 @@
#define FIFO_PROFILING_ENTRIES 16384 #define FIFO_PROFILING_ENTRIES 16384
#endif #endif
#define RUNLIST_DISABLED 0
#define RUNLIST_ENABLED 1
#define RUNLIST_INFO_MUTEX_LOCKED 1
/* generally corresponds to the "pbdma" engine */ /* generally corresponds to the "pbdma" engine */
struct fifo_runlist_info_gk20a { struct fifo_runlist_info_gk20a {
@@ -290,11 +295,13 @@ const char *gk20a_fifo_interleave_level_name(u32 interleave_level);
int gk20a_fifo_engine_enum_from_type(struct gk20a *g, u32 engine_type, int gk20a_fifo_engine_enum_from_type(struct gk20a *g, u32 engine_type,
u32 *inst_id); u32 *inst_id);
u32 gk20a_fifo_get_engine_ids(struct gk20a *g, u32 engine_id[], u32 engine_id_sz, u32 engine_enum); u32 gk20a_fifo_get_engine_ids(struct gk20a *g, u32 engine_id[],
u32 engine_id_sz, u32 engine_enum);
void gk20a_fifo_delete_runlist(struct fifo_gk20a *f); void gk20a_fifo_delete_runlist(struct fifo_gk20a *f);
struct fifo_engine_info_gk20a *gk20a_fifo_get_engine_info(struct gk20a *g, u32 engine_id); struct fifo_engine_info_gk20a *gk20a_fifo_get_engine_info(struct gk20a *g,
u32 engine_id);
bool gk20a_fifo_is_valid_engine_id(struct gk20a *g, u32 engine_id); bool gk20a_fifo_is_valid_engine_id(struct gk20a *g, u32 engine_id);
@@ -317,6 +324,8 @@ int gk20a_fifo_init_engine_info(struct fifo_gk20a *f);
void gk20a_get_tsg_runlist_entry(struct tsg_gk20a *tsg, u32 *runlist); void gk20a_get_tsg_runlist_entry(struct tsg_gk20a *tsg, u32 *runlist);
void gk20a_get_ch_runlist_entry(struct channel_gk20a *ch, u32 *runlist); void gk20a_get_ch_runlist_entry(struct channel_gk20a *ch, u32 *runlist);
void gk20a_fifo_set_runlist_state(struct gk20a *g, u32 runlists_mask,
u32 runlist_state, int runlist_mutex_state);
u32 gk20a_userd_gp_get(struct gk20a *g, struct channel_gk20a *c); u32 gk20a_userd_gp_get(struct gk20a *g, struct channel_gk20a *c);
void gk20a_userd_gp_put(struct gk20a *g, struct channel_gk20a *c); void gk20a_userd_gp_put(struct gk20a *g, struct channel_gk20a *c);