gpu: nvgpu: recover ctxsw timeout only for kernel submit

Context switch timeout is checked only when
CONFIG_NVGPU_KERNEL_MODE_SUBMIT is defined. Hence move
context switch timeout recovery case to the same #ifdef,
to avoid dead code in safety build.

Jira NVGPU-3400

Change-Id: I23176b3bd5cd6fd1346c7aabd327dcc4f340c9ac
Signed-off-by: Thomas Fleury <tfleury@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2254331
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com>
Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com>
Tested-by: Sagar Kadamati <skadamati@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Vinod Gopalakrishnakurup <vinodg@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Thomas Fleury
2019-12-03 08:39:23 -05:00
committed by Alex Waterman
parent 5a17ccb83f
commit a7656276ae
2 changed files with 25 additions and 29 deletions

View File

@@ -118,21 +118,21 @@ bool gk20a_fifo_handle_ctxsw_timeout(struct gk20a *g)
#ifdef CONFIG_NVGPU_KERNEL_MODE_SUBMIT
if (tsg != NULL) {
recover = g->ops.tsg.check_ctxsw_timeout(tsg, &debug_dump, &ms);
if (recover) {
nvgpu_err(g,
"fifo ctxsw timeout error: "
"engine=%u, %s=%d, ms=%u",
engine_id, is_tsg ? "tsg" : "ch", id, ms);
nvgpu_rc_ctxsw_timeout(g, BIT32(engine_id), tsg, debug_dump);
return recover;
}
}
#endif
if (recover) {
nvgpu_err(g,
"fifo ctxsw timeout error: "
"engine=%u, %s=%d, ms=%u",
engine_id, is_tsg ? "tsg" : "ch", id, ms);
nvgpu_rc_ctxsw_timeout(g, BIT32(engine_id), tsg, debug_dump);
} else {
nvgpu_log_info(g,
"fifo is waiting for ctxsw switch for %d ms, "
nvgpu_log_info(g,
"fifo is waiting for ctxsw switch for %d ms, "
"%s=%d", ms, is_tsg ? "tsg" : "ch", id);
}
return recover;
}

View File

@@ -33,15 +33,6 @@
#include <nvgpu/hw/gv11b/hw_fifo_gv11b.h>
static const char *ctxsw_status_invalid_str = "invalid";
static const char *const ctxsw_timeout_status_desc[] = {
"awaiting ack",
"eng was reset",
"ack received",
"dropped timeout"
};
void gv11b_fifo_ctxsw_timeout_enable(struct gk20a *g, bool enable)
{
u32 timeout;
@@ -70,7 +61,6 @@ void gv11b_fifo_ctxsw_timeout_enable(struct gk20a *g, bool enable)
nvgpu_log_info(g,
"new fifo_eng_ctxsw_timeout reg val = 0x%08x",
timeout);
nvgpu_writel(g, fifo_eng_ctxsw_timeout_r(), timeout);
}
} else {
@@ -186,7 +176,6 @@ bool gv11b_fifo_handle_ctxsw_timeout(struct gk20a *g)
u32 engine_id, active_eng_id;
u32 timeout_val, ctxsw_timeout_engines;
u32 info_status;
const char *info_status_str;
struct nvgpu_tsg *tsg = NULL;
/* get ctxsw timedout engines */
@@ -208,8 +197,15 @@ bool gv11b_fifo_handle_ctxsw_timeout(struct gk20a *g)
fifo_intr_ctxsw_timeout_engine_pending_f(
active_eng_id)) != 0U) {
u32 ms = 0;
#ifdef CONFIG_NVGPU_KERNEL_MODE_SUBMIT
bool debug_dump = false;
const char *const ctxsw_timeout_status_desc[] = {
"awaiting ack",
"eng was reset",
"ack received",
"dropped timeout"
};
#endif
tsgid = gv11b_fifo_ctxsw_timeout_info(g, active_eng_id,
&info_status);
tsg = nvgpu_tsg_check_and_get_from_id(g, tsgid);
@@ -224,9 +220,8 @@ bool gv11b_fifo_handle_ctxsw_timeout(struct gk20a *g)
#ifdef CONFIG_NVGPU_KERNEL_MODE_SUBMIT
recover = g->ops.tsg.check_ctxsw_timeout(tsg,
&debug_dump, &ms);
#endif
if (recover) {
info_status_str = ctxsw_status_invalid_str;
const char *info_status_str = "invalid";
if (info_status <
ARRAY_SIZE(ctxsw_timeout_status_desc)) {
info_status_str =
@@ -240,11 +235,12 @@ bool gv11b_fifo_handle_ctxsw_timeout(struct gk20a *g)
nvgpu_rc_ctxsw_timeout(g, BIT32(active_eng_id),
tsg, debug_dump);
} else {
nvgpu_log_info(g,
"fifo is waiting for ctxsw switch: "
"for %d ms, %s=%d", ms, "tsg", tsgid);
continue;
}
#endif
nvgpu_log_info(g,
"fifo is waiting for ctxsw switch: "
"for %d ms, %s=%d", ms, "tsg", tsgid);
}
}
/* clear interrupt */