gpu: nvgpu: add teardown_ch_tsg fifo ops

teardown_ch_tsg fifo ops added as t19x s/w recovery procedure
is different than legacy chips.

JIRA GPUT19X-7

Change-Id: I5b88f2c1a19d309e5c97c588ddf9689163a75fea
Signed-off-by: Seema Khowala <seemaj@nvidia.com>
Reviewed-on: http://git-master/r/1327932
Reviewed-by: Automatic_Commit_Validation_User
Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
This commit is contained in:
Seema Khowala
2017-03-16 13:22:26 -07:00
committed by mobile promotions
parent 06fe28567d
commit 0778d7f331
5 changed files with 66 additions and 11 deletions

View File

@@ -997,7 +997,7 @@ clean_up:
return err;
}
static void gk20a_fifo_handle_runlist_event(struct gk20a *g)
void gk20a_fifo_handle_runlist_event(struct gk20a *g)
{
u32 runlist_event = gk20a_readl(g, fifo_intr_runlist_r());
@@ -1276,7 +1276,7 @@ bool gk20a_is_fault_engine_subid_gpc(struct gk20a *g, u32 engine_subid)
return (engine_subid == fifo_intr_mmu_fault_info_engine_subid_gpc_v());
}
static bool gk20a_fifo_should_defer_engine_reset(struct gk20a *g, u32 engine_id,
bool gk20a_fifo_should_defer_engine_reset(struct gk20a *g, u32 engine_id,
u32 engine_subid, bool fake_fault)
{
u32 engine_enum = ENGINE_INVAL_GK20A;
@@ -1831,9 +1831,9 @@ void gk20a_fifo_recover_tsg(struct gk20a *g, u32 tsgid, bool verbose)
nvgpu_mutex_release(&g->dbg_sessions_lock);
}
void gk20a_fifo_recover(struct gk20a *g, u32 __engine_ids,
u32 hw_id, bool id_is_tsg,
bool id_is_known, bool verbose)
void gk20a_fifo_teardown_ch_tsg(struct gk20a *g, u32 __engine_ids,
u32 hw_id, unsigned int id_type, unsigned int rc_type,
struct mmu_fault_info *mmfault)
{
unsigned long engine_id, i;
unsigned long _engine_ids = __engine_ids;
@@ -1843,12 +1843,8 @@ void gk20a_fifo_recover(struct gk20a *g, u32 __engine_ids,
u32 ref_type;
u32 ref_id;
u32 ref_id_is_tsg = false;
if (verbose)
gk20a_debug_dump(g->dev);
if (g->ops.ltc.flush)
g->ops.ltc.flush(g);
bool id_is_known = (id_type != ID_TYPE_UNKNOWN) ? true : false;
bool id_is_tsg = (id_type == ID_TYPE_TSG) ? true : false;
if (id_is_known) {
engine_ids = gk20a_fifo_engines_on_id(g, hw_id, id_is_tsg);
@@ -1916,6 +1912,27 @@ void gk20a_fifo_recover(struct gk20a *g, u32 __engine_ids,
}
}
void gk20a_fifo_recover(struct gk20a *g, u32 __engine_ids,
u32 hw_id, bool id_is_tsg,
bool id_is_known, bool verbose)
{
unsigned int id_type;
if (verbose)
gk20a_debug_dump(g->dev);
if (g->ops.ltc.flush)
g->ops.ltc.flush(g);
if (id_is_known)
id_type = id_is_tsg ? ID_TYPE_TSG : ID_TYPE_CHANNEL;
else
id_type = ID_TYPE_UNKNOWN;
g->ops.fifo.teardown_ch_tsg(g, __engine_ids, hw_id, id_type,
RC_TYPE_NORMAL, NULL);
}
/* force reset channel and tsg (if it's part of one) */
int gk20a_fifo_force_reset_ch(struct channel_gk20a *ch,
u32 err_code, bool verbose)
@@ -4225,4 +4242,5 @@ void gk20a_init_fifo(struct gpu_ops *gops)
gops->fifo.userd_gp_get = gk20a_fifo_userd_gp_get;
gops->fifo.userd_gp_put = gk20a_fifo_userd_gp_put;
gops->fifo.pbdma_acquire_val = gk20a_fifo_pbdma_acquire_val;
gops->fifo.teardown_ch_tsg = gk20a_fifo_teardown_ch_tsg;
}

View File

@@ -40,6 +40,9 @@
#define GRFIFO_TIMEOUT_CHECK_PERIOD_US 100000
#define RC_TYPE_NORMAL 0
#define RC_TYPE_MMU_FAULT 1
/*
* Number of entries in the kickoff latency buffer, used to calculate
* the profiling and histogram. This number is calculated to be statistically
@@ -376,4 +379,11 @@ int gk20a_fifo_setup_userd(struct channel_gk20a *c);
u32 gk20a_fifo_pbdma_acquire_val(u64 timeout);
void gk20a_fifo_handle_runlist_event(struct gk20a *g);
bool gk20a_fifo_should_defer_engine_reset(struct gk20a *g, u32 engine_id,
u32 engine_subid, bool fake_fault);
void gk20a_fifo_teardown_ch_tsg(struct gk20a *g, u32 __engine_ids,
u32 hw_id, unsigned int id_type, unsigned int rc_type,
struct mmu_fault_info *mmfault);
#endif /*__GR_GK20A_H__*/

View File

@@ -471,6 +471,9 @@ struct gpu_ops {
int (*reset_enable_hw)(struct gk20a *g);
int (*setup_userd)(struct channel_gk20a *c);
u32 (*pbdma_acquire_val)(u64 timeout);
void (*teardown_ch_tsg)(struct gk20a *g, u32 act_eng_bitmask,
u32 id, unsigned int id_type, unsigned int rc_type,
struct mmu_fault_info *mmfault);
} fifo;
struct pmu_v {
/*used for change of enum zbc update cmd id from ver 0 to ver1*/

View File

@@ -341,6 +341,29 @@ void gk20a_mm_l2_flush(struct gk20a *g, bool invalidate);
void gk20a_mm_cbc_clean(struct gk20a *g);
void gk20a_mm_l2_invalidate(struct gk20a *g);
struct mmu_fault_info {
u64 inst_ptr;
u32 inst_aperture;
u64 fault_addr;
u32 fault_addr_aperture;
u32 timestamp_lo;
u32 timestamp_hi;
u32 mmu_engine_id;
u32 gpc_id;
u32 client_type;
u32 client_id;
u32 fault_type;
u32 access_type;
u32 protected_mode;
u32 replayable_fault;
u32 replay_fault_en;
u32 valid;
u32 faulted_pbdma;
u32 faulted_engine;
u32 hw_chid;
struct channel_gk20a *refch;
};
struct mm_gk20a {
struct gk20a *g;

View File

@@ -226,4 +226,5 @@ void gm20b_init_fifo(struct gpu_ops *gops)
gops->fifo.is_preempt_pending = gk20a_fifo_is_preempt_pending;
gops->fifo.init_pbdma_intr_descs = gm20b_fifo_init_pbdma_intr_descs;
gops->fifo.reset_enable_hw = gk20a_init_fifo_reset_enable_hw;
gops->fifo.teardown_ch_tsg = gk20a_fifo_teardown_ch_tsg;
}