gpu: nvgpu: move gk20a_fifo_recover_tsg into tsg unit

gk20a_fifo_recover_tsg does high-level software calls and
invokes gk20a_fifo_recover. This function belongs to the tsg unit and
is moved to tsg.c file. Also, the function is renamed to
nvgpu_tsg_recover.

Jira NVGPU-1237

Change-Id: Id1911fb182817b0cfc47b3219065cba6c4ca507a
Signed-off-by: Debarshi Dutta <ddutta@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1970034
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Debarshi Dutta
2018-12-10 11:09:27 +05:30
committed by mobile promotions
parent fb114f8fda
commit 0188b93e30
4 changed files with 37 additions and 33 deletions

View File

@@ -21,6 +21,7 @@
*/ */
#include <nvgpu/bug.h> #include <nvgpu/bug.h>
#include <nvgpu/debug.h>
#include <nvgpu/kmem.h> #include <nvgpu/kmem.h>
#include <nvgpu/log.h> #include <nvgpu/log.h>
#include <nvgpu/os_sched.h> #include <nvgpu/os_sched.h>
@@ -29,6 +30,8 @@
#include <nvgpu/gk20a.h> #include <nvgpu/gk20a.h>
#include <nvgpu/error_notifier.h> #include <nvgpu/error_notifier.h>
#include "gk20a/gr_gk20a.h"
bool gk20a_is_channel_marked_as_tsg(struct channel_gk20a *ch) bool gk20a_is_channel_marked_as_tsg(struct channel_gk20a *ch)
{ {
return !(ch->tsgid == NVGPU_INVALID_TSG_ID); return !(ch->tsgid == NVGPU_INVALID_TSG_ID);
@@ -183,6 +186,35 @@ int gk20a_tsg_unbind_channel(struct channel_gk20a *ch)
return 0; return 0;
} }
void nvgpu_tsg_recover(struct gk20a *g, struct tsg_gk20a *tsg,
bool verbose, u32 rc_type)
{
u32 engines;
/* stop context switching to prevent engine assignments from
changing until TSG is recovered */
nvgpu_mutex_acquire(&g->dbg_sessions_lock);
gr_gk20a_disable_ctxsw(g);
engines = g->ops.fifo.get_engines_mask_on_id(g, tsg->tsgid, true);
if (engines != 0U) {
gk20a_fifo_recover(g, engines, tsg->tsgid, true, true, verbose,
rc_type);
} else {
if (nvgpu_tsg_mark_error(g, tsg) && verbose) {
gk20a_debug_dump(g);
}
gk20a_fifo_abort_tsg(g, tsg, false);
}
gr_gk20a_enable_ctxsw(g);
nvgpu_mutex_release(&g->dbg_sessions_lock);
}
int gk20a_init_tsg_support(struct gk20a *g, u32 tsgid) int gk20a_init_tsg_support(struct gk20a *g, u32 tsgid)
{ {
struct tsg_gk20a *tsg = NULL; struct tsg_gk20a *tsg = NULL;

View File

@@ -1822,33 +1822,6 @@ u32 gk20a_fifo_engines_on_id(struct gk20a *g, u32 id, bool is_tsg)
return engines; return engines;
} }
void gk20a_fifo_recover_tsg(struct gk20a *g, struct tsg_gk20a *tsg,
bool verbose, u32 rc_type)
{
u32 engines;
/* stop context switching to prevent engine assignments from
changing until TSG is recovered */
nvgpu_mutex_acquire(&g->dbg_sessions_lock);
gr_gk20a_disable_ctxsw(g);
engines = g->ops.fifo.get_engines_mask_on_id(g, tsg->tsgid, true);
if (engines != 0U) {
gk20a_fifo_recover(g, engines, tsg->tsgid, true, true, verbose,
rc_type);
} else {
if (nvgpu_tsg_mark_error(g, tsg) && verbose) {
gk20a_debug_dump(g);
}
gk20a_fifo_abort_tsg(g, tsg, false);
}
gr_gk20a_enable_ctxsw(g);
nvgpu_mutex_release(&g->dbg_sessions_lock);
}
void gk20a_fifo_teardown_ch_tsg(struct gk20a *g, u32 __engine_ids, void gk20a_fifo_teardown_ch_tsg(struct gk20a *g, u32 __engine_ids,
u32 hw_id, unsigned int id_type, unsigned int rc_type, u32 hw_id, unsigned int id_type, unsigned int rc_type,
struct mmu_fault_info *mmfault) struct mmu_fault_info *mmfault)
@@ -1993,8 +1966,7 @@ int gk20a_fifo_force_reset_ch(struct channel_gk20a *ch,
} }
nvgpu_rwsem_up_read(&tsg->ch_list_lock); nvgpu_rwsem_up_read(&tsg->ch_list_lock);
gk20a_fifo_recover_tsg(g, tsg, verbose, nvgpu_tsg_recover(g, tsg, verbose, RC_TYPE_FORCE_RESET);
RC_TYPE_FORCE_RESET);
} else { } else {
g->ops.fifo.set_error_notifier(ch, err_code); g->ops.fifo.set_error_notifier(ch, err_code);
nvgpu_channel_recover(g, ch, verbose, nvgpu_channel_recover(g, ch, verbose,
@@ -2467,7 +2439,7 @@ static void gk20a_fifo_pbdma_fault_rc(struct gk20a *g,
} }
} }
nvgpu_rwsem_up_read(&tsg->ch_list_lock); nvgpu_rwsem_up_read(&tsg->ch_list_lock);
gk20a_fifo_recover_tsg(g, tsg, true, RC_TYPE_PBDMA_FAULT); nvgpu_tsg_recover(g, tsg, true, RC_TYPE_PBDMA_FAULT);
} }
} }
@@ -2652,7 +2624,7 @@ void gk20a_fifo_preempt_timeout_rc_tsg(struct gk20a *g, struct tsg_gk20a *tsg)
gk20a_channel_put(ch); gk20a_channel_put(ch);
} }
nvgpu_rwsem_up_read(&tsg->ch_list_lock); nvgpu_rwsem_up_read(&tsg->ch_list_lock);
gk20a_fifo_recover_tsg(g, tsg, true, RC_TYPE_PREEMPT_TIMEOUT); nvgpu_tsg_recover(g, tsg, true, RC_TYPE_PREEMPT_TIMEOUT);
} }
void gk20a_fifo_preempt_timeout_rc(struct gk20a *g, struct channel_gk20a *ch) void gk20a_fifo_preempt_timeout_rc(struct gk20a *g, struct channel_gk20a *ch)

View File

@@ -287,8 +287,6 @@ void gk20a_fifo_recover(struct gk20a *g,
u32 hw_id, /* if ~0, will be queried from HW */ u32 hw_id, /* if ~0, will be queried from HW */
bool id_is_tsg, /* ignored if hw_id == ~0 */ bool id_is_tsg, /* ignored if hw_id == ~0 */
bool id_is_known, bool verbose, u32 rc_type); bool id_is_known, bool verbose, u32 rc_type);
void gk20a_fifo_recover_tsg(struct gk20a *g, struct tsg_gk20a *tsg,
bool verbose, u32 rc_type);
int gk20a_fifo_force_reset_ch(struct channel_gk20a *ch, int gk20a_fifo_force_reset_ch(struct channel_gk20a *ch,
u32 err_code, bool verbose); u32 err_code, bool verbose);
void gk20a_fifo_reset_engine(struct gk20a *g, u32 engine_id); void gk20a_fifo_reset_engine(struct gk20a *g, u32 engine_id);

View File

@@ -94,6 +94,8 @@ int gk20a_disable_tsg(struct tsg_gk20a *tsg);
int gk20a_tsg_bind_channel(struct tsg_gk20a *tsg, int gk20a_tsg_bind_channel(struct tsg_gk20a *tsg,
struct channel_gk20a *ch); struct channel_gk20a *ch);
int gk20a_tsg_unbind_channel(struct channel_gk20a *ch); int gk20a_tsg_unbind_channel(struct channel_gk20a *ch);
void nvgpu_tsg_recover(struct gk20a *g, struct tsg_gk20a *tsg,
bool verbose, u32 rc_type);
void nvgpu_tsg_set_ctx_mmu_error(struct gk20a *g, void nvgpu_tsg_set_ctx_mmu_error(struct gk20a *g,
struct tsg_gk20a *tsg); struct tsg_gk20a *tsg);