gpu: nvgpu: fix TSG enable sequence

Due to a h/w bug in Maxwell and Pascal we first need to enable all channels with
NEXT and CTX_RELOAD set in a TSG, and then rest of the channels should be
enabled
Add this sequence to gk20a_tsg_enable()

Add new APIs to enable/disable scheduling of TSG runlist
gk20a_fifo_enable_tsg_sched()
gk20a_fifo_disble_tsg_sched()

Add new APIs to check if channel has NEXT or CTX_RELOAD set
gk20a_fifo_channel_status_is_next()
gk20a_fifo_channel_status_is_ctx_reload()

Bug 1739362

Change-Id: I4891cbd7f22ebc1e0bf32c52801002cdc259dbe1
Signed-off-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1560636
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Seshendra Gadagottu <sgadagottu@nvidia.com>
Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
This commit is contained in:
Deepak Nibade
2017-09-13 05:41:52 -07:00
committed by mobile promotions
parent 7d6d040531
commit 460951ed09
7 changed files with 208 additions and 6 deletions

View File

@@ -2671,6 +2671,21 @@ void gk20a_fifo_set_runlist_state(struct gk20a *g, u32 runlists_mask,
gk20a_dbg_fn("done");
}
void gk20a_fifo_enable_tsg_sched(struct gk20a *g, struct tsg_gk20a *tsg)
{
gk20a_fifo_set_runlist_state(g, fifo_sched_disable_runlist_m(
tsg->runlist_id), RUNLIST_ENABLED,
!RUNLIST_INFO_MUTEX_LOCKED);
}
void gk20a_fifo_disable_tsg_sched(struct gk20a *g, struct tsg_gk20a *tsg)
{
gk20a_fifo_set_runlist_state(g, fifo_sched_disable_runlist_m(
tsg->runlist_id), RUNLIST_DISABLED,
!RUNLIST_INFO_MUTEX_LOCKED);
}
int gk20a_fifo_enable_engine_activity(struct gk20a *g,
struct fifo_engine_info_gk20a *eng_info)
{
@@ -3413,6 +3428,27 @@ const char *gk20a_decode_pbdma_chan_eng_ctx_status(u32 index)
return pbdma_chan_eng_ctx_status_str[index];
}
bool gk20a_fifo_channel_status_is_next(struct gk20a *g, u32 chid)
{
u32 channel = gk20a_readl(g, ccsr_channel_r(chid));
return ccsr_channel_next_v(channel) == ccsr_channel_next_true_v();
}
bool gk20a_fifo_channel_status_is_ctx_reload(struct gk20a *g, u32 chid)
{
u32 channel = gk20a_readl(g, ccsr_channel_r(chid));
u32 status = ccsr_channel_status_v(channel);
return (status == ccsr_channel_status_pending_ctx_reload_v() ||
status == ccsr_channel_status_pending_acq_ctx_reload_v() ||
status == ccsr_channel_status_on_pbdma_ctx_reload_v() ||
status == ccsr_channel_status_on_pbdma_and_eng_ctx_reload_v() ||
status == ccsr_channel_status_on_eng_ctx_reload_v() ||
status == ccsr_channel_status_on_eng_pending_ctx_reload_v() ||
status == ccsr_channel_status_on_eng_pending_acq_ctx_reload_v());
}
void gk20a_dump_channel_status_ramfc(struct gk20a *g,
struct gk20a_debug_output *o,
u32 chid,

View File

@@ -248,6 +248,9 @@ int gk20a_fifo_disable_engine_activity(struct gk20a *g,
bool wait_for_idle);
int gk20a_fifo_disable_all_engine_activity(struct gk20a *g,
bool wait_for_idle);
void gk20a_fifo_enable_tsg_sched(struct gk20a *g, struct tsg_gk20a *tsg);
void gk20a_fifo_disable_tsg_sched(struct gk20a *g, struct tsg_gk20a *tsg);
u32 gk20a_fifo_engines_on_ch(struct gk20a *g, u32 chid);
int gk20a_fifo_reschedule_runlist(struct gk20a *g, u32 runlist_id);
@@ -362,6 +365,9 @@ const char *gk20a_decode_pbdma_chan_eng_ctx_status(u32 index);
void gk20a_fifo_enable_channel(struct channel_gk20a *ch);
void gk20a_fifo_disable_channel(struct channel_gk20a *ch);
bool gk20a_fifo_channel_status_is_next(struct gk20a *g, u32 chid);
bool gk20a_fifo_channel_status_is_ctx_reload(struct gk20a *g, u32 chid);
struct channel_gk20a *gk20a_refch_from_inst_ptr(struct gk20a *g, u64 inst_ptr);
void gk20a_fifo_channel_unbind(struct channel_gk20a *ch_gk20a);

View File

@@ -29,13 +29,37 @@ int gk20a_enable_tsg(struct tsg_gk20a *tsg)
{
struct gk20a *g = tsg->g;
struct channel_gk20a *ch;
bool is_next, is_ctx_reload;
gk20a_fifo_disable_tsg_sched(g, tsg);
/*
* Due to h/w bug that exists in Maxwell and Pascal,
* we first need to enable all channels with NEXT and CTX_RELOAD set,
* and then rest of the channels should be enabled
*/
down_read(&tsg->ch_list_lock);
nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
is_next = gk20a_fifo_channel_status_is_next(g, ch->chid);
is_ctx_reload = gk20a_fifo_channel_status_is_ctx_reload(g, ch->chid);
if (is_next || is_ctx_reload)
g->ops.fifo.enable_channel(ch);
}
nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
is_next = gk20a_fifo_channel_status_is_next(g, ch->chid);
is_ctx_reload = gk20a_fifo_channel_status_is_ctx_reload(g, ch->chid);
if (is_next || is_ctx_reload)
continue;
g->ops.fifo.enable_channel(ch);
}
up_read(&tsg->ch_list_lock);
gk20a_fifo_enable_tsg_sched(g, tsg);
return 0;
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2012-2016, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2012-2017, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -114,6 +114,42 @@ static inline u32 ccsr_channel_status_v(u32 r)
{
return (r >> 24) & 0xf;
}
static inline u32 ccsr_channel_status_pending_ctx_reload_v(void)
{
return 0x00000002;
}
static inline u32 ccsr_channel_status_pending_acq_ctx_reload_v(void)
{
return 0x00000004;
}
static inline u32 ccsr_channel_status_on_pbdma_ctx_reload_v(void)
{
return 0x0000000a;
}
static inline u32 ccsr_channel_status_on_pbdma_and_eng_ctx_reload_v(void)
{
return 0x0000000b;
}
static inline u32 ccsr_channel_status_on_eng_ctx_reload_v(void)
{
return 0x0000000c;
}
static inline u32 ccsr_channel_status_on_eng_pending_ctx_reload_v(void)
{
return 0x0000000d;
}
static inline u32 ccsr_channel_status_on_eng_pending_acq_ctx_reload_v(void)
{
return 0x0000000e;
}
static inline u32 ccsr_channel_next_v(u32 r)
{
return (r >> 1) & 0x1;
}
static inline u32 ccsr_channel_next_true_v(void)
{
return 0x00000001;
}
static inline u32 ccsr_channel_busy_v(u32 r)
{
return (r >> 28) & 0x1;

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -110,6 +110,42 @@ static inline u32 ccsr_channel_status_v(u32 r)
{
return (r >> 24) & 0xf;
}
static inline u32 ccsr_channel_status_pending_ctx_reload_v(void)
{
return 0x00000002;
}
static inline u32 ccsr_channel_status_pending_acq_ctx_reload_v(void)
{
return 0x00000004;
}
static inline u32 ccsr_channel_status_on_pbdma_ctx_reload_v(void)
{
return 0x0000000a;
}
static inline u32 ccsr_channel_status_on_pbdma_and_eng_ctx_reload_v(void)
{
return 0x0000000b;
}
static inline u32 ccsr_channel_status_on_eng_ctx_reload_v(void)
{
return 0x0000000c;
}
static inline u32 ccsr_channel_status_on_eng_pending_ctx_reload_v(void)
{
return 0x0000000d;
}
static inline u32 ccsr_channel_status_on_eng_pending_acq_ctx_reload_v(void)
{
return 0x0000000e;
}
static inline u32 ccsr_channel_next_v(u32 r)
{
return (r >> 1) & 0x1;
}
static inline u32 ccsr_channel_next_true_v(void)
{
return 0x00000001;
}
static inline u32 ccsr_channel_busy_v(u32 r)
{
return (r >> 28) & 0x1;

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -114,12 +114,40 @@ static inline u32 ccsr_channel_status_pending_ctx_reload_v(void)
{
return 0x00000002;
}
static inline u32 ccsr_channel_busy_v(u32 r)
static inline u32 ccsr_channel_status_pending_acq_ctx_reload_v(void)
{
return (r >> 28) & 0x1;
return 0x00000004;
}
static inline u32 ccsr_channel_status_on_pbdma_ctx_reload_v(void)
{
return 0x0000000a;
}
static inline u32 ccsr_channel_status_on_pbdma_and_eng_ctx_reload_v(void)
{
return 0x0000000b;
}
static inline u32 ccsr_channel_status_on_eng_ctx_reload_v(void)
{
return 0x0000000c;
}
static inline u32 ccsr_channel_status_on_eng_pending_ctx_reload_v(void)
{
return 0x0000000d;
}
static inline u32 ccsr_channel_status_on_eng_pending_acq_ctx_reload_v(void)
{
return 0x0000000e;
}
static inline u32 ccsr_channel_next_v(u32 r)
{
return (r >> 1) & 0x1;
}
static inline u32 ccsr_channel_next_true_v(void)
{
return 0x00000001;
}
static inline u32 ccsr_channel_busy_v(u32 r)
{
return (r >> 28) & 0x1;
}
#endif

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -110,6 +110,42 @@ static inline u32 ccsr_channel_status_v(u32 r)
{
return (r >> 24) & 0xf;
}
static inline u32 ccsr_channel_status_pending_ctx_reload_v(void)
{
return 0x00000002;
}
static inline u32 ccsr_channel_status_pending_acq_ctx_reload_v(void)
{
return 0x00000004;
}
static inline u32 ccsr_channel_status_on_pbdma_ctx_reload_v(void)
{
return 0x0000000a;
}
static inline u32 ccsr_channel_status_on_pbdma_and_eng_ctx_reload_v(void)
{
return 0x0000000b;
}
static inline u32 ccsr_channel_status_on_eng_ctx_reload_v(void)
{
return 0x0000000c;
}
static inline u32 ccsr_channel_status_on_eng_pending_ctx_reload_v(void)
{
return 0x0000000d;
}
static inline u32 ccsr_channel_status_on_eng_pending_acq_ctx_reload_v(void)
{
return 0x0000000e;
}
static inline u32 ccsr_channel_next_v(u32 r)
{
return (r >> 1) & 0x1;
}
static inline u32 ccsr_channel_next_true_v(void)
{
return 0x00000001;
}
static inline u32 ccsr_channel_busy_v(u32 r)
{
return (r >> 28) & 0x1;