gpu: nvgpu: fix TSG enable sequence

Due to a h/w bug in Maxwell and Pascal we first need to enable all channels with
NEXT and CTX_RELOAD set in a TSG, and then rest of the channels should be
enabled
Add this sequence to gk20a_tsg_enable()

Add new APIs to enable/disable scheduling of TSG runlist
gk20a_fifo_enable_tsg_sched()
gk20a_fifo_disble_tsg_sched()

Add new APIs to check if channel has NEXT or CTX_RELOAD set
gk20a_fifo_channel_status_is_next()
gk20a_fifo_channel_status_is_ctx_reload()

Bug 1739362

Change-Id: I4891cbd7f22ebc1e0bf32c52801002cdc259dbe1
Signed-off-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1560636
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Seshendra Gadagottu <sgadagottu@nvidia.com>
Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
This commit is contained in:
Deepak Nibade
2017-09-13 05:41:52 -07:00
committed by mobile promotions
parent 7d6d040531
commit 460951ed09
7 changed files with 208 additions and 6 deletions

View File

@@ -2671,6 +2671,21 @@ void gk20a_fifo_set_runlist_state(struct gk20a *g, u32 runlists_mask,
gk20a_dbg_fn("done");
}
void gk20a_fifo_enable_tsg_sched(struct gk20a *g, struct tsg_gk20a *tsg)
{
gk20a_fifo_set_runlist_state(g, fifo_sched_disable_runlist_m(
tsg->runlist_id), RUNLIST_ENABLED,
!RUNLIST_INFO_MUTEX_LOCKED);
}
void gk20a_fifo_disable_tsg_sched(struct gk20a *g, struct tsg_gk20a *tsg)
{
gk20a_fifo_set_runlist_state(g, fifo_sched_disable_runlist_m(
tsg->runlist_id), RUNLIST_DISABLED,
!RUNLIST_INFO_MUTEX_LOCKED);
}
int gk20a_fifo_enable_engine_activity(struct gk20a *g,
struct fifo_engine_info_gk20a *eng_info)
{
@@ -3413,6 +3428,27 @@ const char *gk20a_decode_pbdma_chan_eng_ctx_status(u32 index)
return pbdma_chan_eng_ctx_status_str[index];
}
bool gk20a_fifo_channel_status_is_next(struct gk20a *g, u32 chid)
{
u32 channel = gk20a_readl(g, ccsr_channel_r(chid));
return ccsr_channel_next_v(channel) == ccsr_channel_next_true_v();
}
bool gk20a_fifo_channel_status_is_ctx_reload(struct gk20a *g, u32 chid)
{
u32 channel = gk20a_readl(g, ccsr_channel_r(chid));
u32 status = ccsr_channel_status_v(channel);
return (status == ccsr_channel_status_pending_ctx_reload_v() ||
status == ccsr_channel_status_pending_acq_ctx_reload_v() ||
status == ccsr_channel_status_on_pbdma_ctx_reload_v() ||
status == ccsr_channel_status_on_pbdma_and_eng_ctx_reload_v() ||
status == ccsr_channel_status_on_eng_ctx_reload_v() ||
status == ccsr_channel_status_on_eng_pending_ctx_reload_v() ||
status == ccsr_channel_status_on_eng_pending_acq_ctx_reload_v());
}
void gk20a_dump_channel_status_ramfc(struct gk20a *g,
struct gk20a_debug_output *o,
u32 chid,