gpu: nvgpu: move gk20a_fifo_recover_tsg into tsg unit

gk20a_fifo_recover_tsg does high-level software calls and
invokes gk20a_fifo_recover. This function belongs to the tsg unit and
is moved to tsg.c file. Also, the function is renamed to
nvgpu_tsg_recover.

Jira NVGPU-1237

Change-Id: Id1911fb182817b0cfc47b3219065cba6c4ca507a
Signed-off-by: Debarshi Dutta <ddutta@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1970034
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Debarshi Dutta
2018-12-10 11:09:27 +05:30
committed by mobile promotions
parent fb114f8fda
commit 0188b93e30
4 changed files with 37 additions and 33 deletions

View File

@@ -21,6 +21,7 @@
*/
#include <nvgpu/bug.h>
#include <nvgpu/debug.h>
#include <nvgpu/kmem.h>
#include <nvgpu/log.h>
#include <nvgpu/os_sched.h>
@@ -29,6 +30,8 @@
#include <nvgpu/gk20a.h>
#include <nvgpu/error_notifier.h>
#include "gk20a/gr_gk20a.h"
bool gk20a_is_channel_marked_as_tsg(struct channel_gk20a *ch)
{
return !(ch->tsgid == NVGPU_INVALID_TSG_ID);
@@ -183,6 +186,35 @@ int gk20a_tsg_unbind_channel(struct channel_gk20a *ch)
return 0;
}
void nvgpu_tsg_recover(struct gk20a *g, struct tsg_gk20a *tsg,
bool verbose, u32 rc_type)
{
u32 engines;
/* stop context switching to prevent engine assignments from
changing until TSG is recovered */
nvgpu_mutex_acquire(&g->dbg_sessions_lock);
gr_gk20a_disable_ctxsw(g);
engines = g->ops.fifo.get_engines_mask_on_id(g, tsg->tsgid, true);
if (engines != 0U) {
gk20a_fifo_recover(g, engines, tsg->tsgid, true, true, verbose,
rc_type);
} else {
if (nvgpu_tsg_mark_error(g, tsg) && verbose) {
gk20a_debug_dump(g);
}
gk20a_fifo_abort_tsg(g, tsg, false);
}
gr_gk20a_enable_ctxsw(g);
nvgpu_mutex_release(&g->dbg_sessions_lock);
}
int gk20a_init_tsg_support(struct gk20a *g, u32 tsgid)
{
struct tsg_gk20a *tsg = NULL;