gpu: nvgpu: reorder runlist enable/disable

Move gk20a_fifo_set_runlist_state() to common and move
gk20a_tsg_{enable,disable}_sched() to be part of tsg.

Jira NVGPU-1309

Change-Id: I16ffe7f9f97249b5ac0885bba56510847bb6858b
Signed-off-by: Konsta Holtta <kholtta@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1978059
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Konsta Holtta
2018-11-29 16:09:41 +02:00
committed by mobile promotions
parent e05c0d13a0
commit 2f51d7c5ed
6 changed files with 39 additions and 39 deletions

View File

@@ -507,6 +507,24 @@ const char *gk20a_fifo_interleave_level_name(u32 interleave_level)
return ret_string;
}
void gk20a_fifo_set_runlist_state(struct gk20a *g, u32 runlists_mask,
u32 runlist_state)
{
u32 token = PMU_INVALID_MUTEX_OWNER_ID;
int mutex_ret;
nvgpu_log(g, gpu_dbg_info, "runlist mask = 0x%08x state = 0x%08x",
runlists_mask, runlist_state);
mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
g->ops.fifo.runlist_write_state(g, runlists_mask, runlist_state);
if (mutex_ret == 0) {
nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
}
}
void gk20a_fifo_delete_runlist(struct fifo_gk20a *f)
{
u32 i;

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2014-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -43,7 +43,7 @@ int gk20a_enable_tsg(struct tsg_gk20a *tsg)
struct channel_gk20a *ch;
bool is_next, is_ctx_reload;
gk20a_fifo_disable_tsg_sched(g, tsg);
gk20a_tsg_disable_sched(g, tsg);
/*
* Due to h/w bug that exists in Maxwell and Pascal,
@@ -72,7 +72,7 @@ int gk20a_enable_tsg(struct tsg_gk20a *tsg)
}
nvgpu_rwsem_up_read(&tsg->ch_list_lock);
gk20a_fifo_enable_tsg_sched(g, tsg);
gk20a_tsg_enable_sched(g, tsg);
return 0;
}
@@ -405,6 +405,19 @@ u32 gk20a_tsg_get_timeslice(struct tsg_gk20a *tsg)
return tsg->timeslice_us;
}
void gk20a_tsg_enable_sched(struct gk20a *g, struct tsg_gk20a *tsg)
{
gk20a_fifo_set_runlist_state(g, BIT32(tsg->runlist_id),
RUNLIST_ENABLED);
}
void gk20a_tsg_disable_sched(struct gk20a *g, struct tsg_gk20a *tsg)
{
gk20a_fifo_set_runlist_state(g, BIT32(tsg->runlist_id),
RUNLIST_DISABLED);
}
static void release_used_tsg(struct fifo_gk20a *f, struct tsg_gk20a *tsg)
{
nvgpu_mutex_acquire(&f->tsg_inuse_mutex);

View File

@@ -2513,37 +2513,6 @@ void gk20a_fifo_runlist_write_state(struct gk20a *g, u32 runlists_mask,
}
void gk20a_fifo_set_runlist_state(struct gk20a *g, u32 runlists_mask,
u32 runlist_state)
{
u32 token = PMU_INVALID_MUTEX_OWNER_ID;
int mutex_ret;
nvgpu_log(g, gpu_dbg_info, "runlist mask = 0x%08x state = 0x%08x",
runlists_mask, runlist_state);
mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
g->ops.fifo.runlist_write_state(g, runlists_mask, runlist_state);
if (mutex_ret == 0) {
nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
}
}
void gk20a_fifo_enable_tsg_sched(struct gk20a *g, struct tsg_gk20a *tsg)
{
gk20a_fifo_set_runlist_state(g, BIT32(tsg->runlist_id),
RUNLIST_ENABLED);
}
void gk20a_fifo_disable_tsg_sched(struct gk20a *g, struct tsg_gk20a *tsg)
{
gk20a_fifo_set_runlist_state(g, BIT32(tsg->runlist_id),
RUNLIST_DISABLED);
}
int gk20a_fifo_enable_engine_activity(struct gk20a *g,
struct fifo_engine_info_gk20a *eng_info)
{

View File

@@ -261,8 +261,6 @@ int gk20a_fifo_disable_all_engine_activity(struct gk20a *g,
bool wait_for_idle);
void gk20a_fifo_runlist_write_state(struct gk20a *g, u32 runlists_mask,
u32 runlist_state);
void gk20a_fifo_enable_tsg_sched(struct gk20a *g, struct tsg_gk20a *tsg);
void gk20a_fifo_disable_tsg_sched(struct gk20a *g, struct tsg_gk20a *tsg);
u32 gk20a_fifo_engines_on_ch(struct gk20a *g, u32 chid);

View File

@@ -891,7 +891,7 @@ int gv11b_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg)
nvgpu_mutex_acquire(&f->runlist_info[runlist_id].runlist_lock);
/* WAR for Bug 2065990 */
gk20a_fifo_disable_tsg_sched(g, tsg);
gk20a_tsg_disable_sched(g, tsg);
mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
@@ -902,7 +902,7 @@ int gv11b_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg)
}
/* WAR for Bug 2065990 */
gk20a_fifo_enable_tsg_sched(g, tsg);
gk20a_tsg_enable_sched(g, tsg);
nvgpu_mutex_release(&f->runlist_info[runlist_id].runlist_lock);

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2014-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -108,6 +108,8 @@ bool nvgpu_tsg_check_ctxsw_timeout(struct tsg_gk20a *tsg,
int gk20a_tsg_set_runlist_interleave(struct tsg_gk20a *tsg, u32 level);
int gk20a_tsg_set_timeslice(struct tsg_gk20a *tsg, u32 timeslice);
u32 gk20a_tsg_get_timeslice(struct tsg_gk20a *tsg);
void gk20a_tsg_enable_sched(struct gk20a *g, struct tsg_gk20a *tsg);
void gk20a_tsg_disable_sched(struct gk20a *g, struct tsg_gk20a *tsg);
int gk20a_tsg_set_priority(struct gk20a *g, struct tsg_gk20a *tsg,
u32 priority);
int gk20a_tsg_alloc_sm_error_states_mem(struct gk20a *g,