mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 10:34:43 +03:00
gpu: nvgpu: remove code for ch not bound to tsg
- Remove handling for channels that are no more bound to tsg
as channel could be referenceable but no more part of a tsg
- Use tsg_gk20a_from_ch to get pointer to tsg for a given channel
- Clear unhandled gr interrupts
Bug 2429295
JIRA NVGPU-1580
Change-Id: I9da43a2bc9a0282c793b9f301eaf8e8604f91d70
Signed-off-by: Seema Khowala <seemaj@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1972492
(cherry picked from commit 013ca60edd
in dev-kernel)
Reviewed-on: https://git-master.nvidia.com/r/2018262
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: Debarshi Dutta <ddutta@nvidia.com>
Tested-by: Debarshi Dutta <ddutta@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Bibek Basu <bbasu@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
d975bda398
commit
c9d4df288d
@@ -1580,7 +1580,8 @@ void gk20a_fifo_abort_tsg(struct gk20a *g, struct tsg_gk20a *tsg, bool preempt)
|
||||
|
||||
int gk20a_fifo_deferred_reset(struct gk20a *g, struct channel_gk20a *ch)
|
||||
{
|
||||
u32 engine_id, engines;
|
||||
unsigned long engine_id, engines = 0U;
|
||||
struct tsg_gk20a *tsg;
|
||||
|
||||
nvgpu_mutex_acquire(&g->dbg_sessions_lock);
|
||||
gr_gk20a_disable_ctxsw(g);
|
||||
@@ -1589,12 +1590,14 @@ int gk20a_fifo_deferred_reset(struct gk20a *g, struct channel_gk20a *ch)
|
||||
goto clean_up;
|
||||
}
|
||||
|
||||
if (gk20a_is_channel_marked_as_tsg(ch)) {
|
||||
engines = gk20a_fifo_engines_on_id(g, ch->tsgid, true);
|
||||
tsg = tsg_gk20a_from_ch(ch);
|
||||
if (tsg != NULL) {
|
||||
engines = gk20a_fifo_engines_on_id(g, tsg->tsgid, true);
|
||||
} else {
|
||||
engines = gk20a_fifo_engines_on_id(g, ch->chid, false);
|
||||
nvgpu_err(g, "chid: %d is not bound to tsg", ch->chid);
|
||||
}
|
||||
if (!engines) {
|
||||
|
||||
if (engines == 0U) {
|
||||
goto clean_up;
|
||||
}
|
||||
|
||||
@@ -1750,16 +1753,18 @@ static bool gk20a_fifo_handle_mmu_fault_locked(
|
||||
} else if (type == fifo_engine_status_id_type_chid_v()) {
|
||||
ch = &g->fifo.channel[id];
|
||||
refch = gk20a_channel_get(ch);
|
||||
if (refch != NULL) {
|
||||
tsg = tsg_gk20a_from_ch(refch);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
/* read channel based on instruction pointer */
|
||||
ch = gk20a_refch_from_inst_ptr(g,
|
||||
mmfault_info.inst_ptr);
|
||||
refch = ch;
|
||||
}
|
||||
|
||||
if (ch && gk20a_is_channel_marked_as_tsg(ch)) {
|
||||
tsg = &g->fifo.tsg[ch->tsgid];
|
||||
if (refch != NULL) {
|
||||
tsg = tsg_gk20a_from_ch(refch);
|
||||
}
|
||||
}
|
||||
|
||||
/* check if engine reset should be deferred */
|
||||
@@ -1786,16 +1791,10 @@ static bool gk20a_fifo_handle_mmu_fault_locked(
|
||||
}
|
||||
|
||||
#ifdef CONFIG_GK20A_CTXSW_TRACE
|
||||
/*
|
||||
* For non fake mmu fault, both tsg and ch pointers
|
||||
* could be valid. Check tsg first.
|
||||
*/
|
||||
if (tsg)
|
||||
if (tsg) {
|
||||
gk20a_ctxsw_trace_tsg_reset(g, tsg);
|
||||
else if (ch)
|
||||
gk20a_ctxsw_trace_channel_reset(g, ch);
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Disable the channel/TSG from hw and increment syncpoints.
|
||||
*/
|
||||
@@ -1815,26 +1814,10 @@ static bool gk20a_fifo_handle_mmu_fault_locked(
|
||||
if (refch) {
|
||||
gk20a_channel_put(ch);
|
||||
}
|
||||
} else if (ch) {
|
||||
if (refch) {
|
||||
if (g->fifo.deferred_reset_pending) {
|
||||
g->ops.fifo.disable_channel(ch);
|
||||
} else {
|
||||
if (!fake_fault) {
|
||||
gk20a_fifo_set_ctx_mmu_error_ch(
|
||||
g, refch);
|
||||
}
|
||||
|
||||
verbose = gk20a_fifo_error_ch(g,
|
||||
refch);
|
||||
gk20a_channel_abort(ch, false);
|
||||
}
|
||||
gk20a_channel_put(ch);
|
||||
} else {
|
||||
nvgpu_err(g,
|
||||
"mmu error in freed channel %d",
|
||||
ch->chid);
|
||||
}
|
||||
} else if (refch != NULL) {
|
||||
nvgpu_err(g, "mmu error in unbound channel %d",
|
||||
ch->chid);
|
||||
gk20a_channel_put(ch);
|
||||
} else if (mmfault_info.inst_ptr ==
|
||||
nvgpu_inst_block_addr(g, &g->mm.bar1.inst_block)) {
|
||||
nvgpu_err(g, "mmu fault from bar1");
|
||||
@@ -2116,7 +2099,7 @@ void gk20a_fifo_recover(struct gk20a *g, u32 __engine_ids,
|
||||
rc_type, NULL);
|
||||
}
|
||||
|
||||
/* force reset channel and tsg (if it's part of one) */
|
||||
/* force reset channel and tsg */
|
||||
int gk20a_fifo_force_reset_ch(struct channel_gk20a *ch,
|
||||
u32 err_code, bool verbose)
|
||||
{
|
||||
@@ -2126,7 +2109,6 @@ int gk20a_fifo_force_reset_ch(struct channel_gk20a *ch,
|
||||
struct tsg_gk20a *tsg = tsg_gk20a_from_ch(ch);
|
||||
|
||||
if (tsg != NULL) {
|
||||
|
||||
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
|
||||
|
||||
nvgpu_list_for_each_entry(ch_tsg, &tsg->ch_list,
|
||||
@@ -2142,9 +2124,7 @@ int gk20a_fifo_force_reset_ch(struct channel_gk20a *ch,
|
||||
gk20a_fifo_recover_tsg(g, tsg, verbose,
|
||||
RC_TYPE_FORCE_RESET);
|
||||
} else {
|
||||
g->ops.fifo.set_error_notifier(ch, err_code);
|
||||
gk20a_fifo_recover_ch(g, ch, verbose,
|
||||
RC_TYPE_FORCE_RESET);
|
||||
nvgpu_err(g, "chid: %d is not bound to tsg", ch->chid);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
Reference in New Issue
Block a user