mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 09:12:24 +03:00
gpu: nvgpu: sw quiesce when recovery is disabled
When CONFIG_NVGPU_RECOVERY is disabled, warn if recovery function is entered with sw_quiesce_pending false. Jira NVGPU-3871 Change-Id: Ic8e878ff6637c07f80b1a3542355ec51f729fe12 Signed-off-by: Thomas Fleury <tfleury@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2175446 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
Alex Waterman
parent
2bd4e4d8e0
commit
b8465d479d
@@ -58,7 +58,7 @@ void nvgpu_rc_fifo_recover(struct gk20a *g, u32 eng_bitmask,
|
||||
g->ops.fifo.recover(g, eng_bitmask, hw_id, id_type,
|
||||
rc_type, NULL);
|
||||
#else
|
||||
nvgpu_err(g, "recovery not supported");
|
||||
WARN_ON(!g->sw_quiesce_pending);
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -80,7 +80,7 @@ void nvgpu_rc_ctxsw_timeout(struct gk20a *g, u32 eng_bitmask,
|
||||
nvgpu_rc_fifo_recover(g, eng_bitmask, tsg->tsgid, true, true, debug_dump,
|
||||
RC_TYPE_CTXSW_TIMEOUT);
|
||||
#else
|
||||
nvgpu_err(g, "recovery not supported");
|
||||
WARN_ON(!g->sw_quiesce_pending);
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -136,7 +136,7 @@ void nvgpu_rc_pbdma_fault(struct gk20a *g, u32 pbdma_id, u32 error_notifier,
|
||||
nvgpu_err(g, "Invalid pbdma_status.id_type");
|
||||
}
|
||||
#else
|
||||
nvgpu_err(g, "recovery not supported");
|
||||
WARN_ON(!g->sw_quiesce_pending);
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -150,7 +150,7 @@ void nvgpu_rc_runlist_update(struct gk20a *g, u32 runlist_id)
|
||||
RC_TYPE_RUNLIST_UPDATE_TIMEOUT);
|
||||
}
|
||||
#else
|
||||
nvgpu_err(g, "recovery not supported");
|
||||
WARN_ON(!g->sw_quiesce_pending);
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -162,7 +162,7 @@ void nvgpu_rc_preempt_timeout(struct gk20a *g, struct nvgpu_tsg *tsg)
|
||||
|
||||
nvgpu_rc_tsg_and_related_engines(g, tsg, true, RC_TYPE_PREEMPT_TIMEOUT);
|
||||
#else
|
||||
nvgpu_err(g, "recovery not supported");
|
||||
WARN_ON(!g->sw_quiesce_pending);
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -192,7 +192,7 @@ void nvgpu_rc_gr_fault(struct gk20a *g, struct nvgpu_tsg *tsg,
|
||||
false, false, true, RC_TYPE_GR_FAULT);
|
||||
}
|
||||
#else
|
||||
nvgpu_err(g, "recovery not supported");
|
||||
WARN_ON(!g->sw_quiesce_pending);
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -203,7 +203,7 @@ void nvgpu_rc_sched_error_bad_tsg(struct gk20a *g)
|
||||
nvgpu_rc_fifo_recover(g, 0, INVAL_ID, false, false, false,
|
||||
RC_TYPE_SCHED_ERR);
|
||||
#else
|
||||
nvgpu_err(g, "recovery not supported");
|
||||
WARN_ON(!g->sw_quiesce_pending);
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -275,6 +275,6 @@ void nvgpu_rc_tsg_and_related_engines(struct gk20a *g, struct nvgpu_tsg *tsg,
|
||||
nvgpu_mutex_release(&g->dbg_sessions_lock);
|
||||
#endif
|
||||
#else
|
||||
nvgpu_err(g, "recovery not supported");
|
||||
WARN_ON(!g->sw_quiesce_pending);
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -410,8 +410,13 @@ void gv11b_mm_mmu_fault_handle_mmu_fault_common(struct gk20a *g,
|
||||
}
|
||||
|
||||
if (rc_type != RC_TYPE_NO_RC) {
|
||||
#ifdef CONFIG_NVGPU_RECOVERY
|
||||
g->ops.fifo.recover(g, act_eng_bitmask,
|
||||
id, id_type, rc_type, mmufault);
|
||||
#else
|
||||
nvgpu_err(g, "mmu fault id=%u id_type=%u act_eng_bitmask=%08x",
|
||||
id, id_type, act_eng_bitmask);
|
||||
#endif
|
||||
}
|
||||
} else {
|
||||
if (mmufault->fault_type == gmmu_fault_type_pte_v()) {
|
||||
|
||||
@@ -473,7 +473,9 @@ static const struct gpu_ops vgpu_gp10b_ops = {
|
||||
.preempt_tsg = vgpu_fifo_preempt_tsg,
|
||||
.is_preempt_pending = NULL,
|
||||
.reset_enable_hw = NULL,
|
||||
#ifdef CONFIG_NVGPU_RECOVERY
|
||||
.recover = NULL,
|
||||
#endif
|
||||
.setup_sw = vgpu_fifo_setup_sw,
|
||||
.cleanup_sw = vgpu_fifo_cleanup_sw,
|
||||
.set_sm_exception_type_mask = vgpu_set_sm_exception_type_mask,
|
||||
|
||||
@@ -584,7 +584,9 @@ static const struct gpu_ops vgpu_gv11b_ops = {
|
||||
.preempt_tsg = vgpu_fifo_preempt_tsg,
|
||||
.is_preempt_pending = gv11b_fifo_is_preempt_pending,
|
||||
.reset_enable_hw = NULL,
|
||||
#ifdef CONFIG_NVGPU_RECOVERY
|
||||
.recover = NULL,
|
||||
#endif
|
||||
.setup_sw = vgpu_fifo_setup_sw,
|
||||
.cleanup_sw = vgpu_fifo_cleanup_sw,
|
||||
.set_sm_exception_type_mask = vgpu_set_sm_exception_type_mask,
|
||||
|
||||
@@ -1087,9 +1087,11 @@ struct gpu_ops {
|
||||
int (*is_preempt_pending)(struct gk20a *g, u32 id,
|
||||
unsigned int id_type);
|
||||
int (*reset_enable_hw)(struct gk20a *g);
|
||||
#ifdef CONFIG_NVGPU_RECOVERY
|
||||
void (*recover)(struct gk20a *g, u32 act_eng_bitmask,
|
||||
u32 id, unsigned int id_type, unsigned int rc_type,
|
||||
struct mmu_fault_info *mmfault);
|
||||
#endif
|
||||
void (*intr_set_recover_mask)(struct gk20a *g);
|
||||
void (*intr_unset_recover_mask)(struct gk20a *g);
|
||||
#ifdef CONFIG_NVGPU_DEBUGGER
|
||||
|
||||
Reference in New Issue
Block a user