mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 02:22:34 +03:00
gpu: nvgpu: add nvgpu_tsg_reset_faulted_eng_pbdma
Add nvgpu_tsg_reset_faulted_eng_pbdma Remove gv11b_reset_faulted_tsg gv11b_fifo_reset_pbdma_and_eng_faulted JIRA NVGPU-1314 Change-Id: Icb5278b715b4c65837b79dc2766db4f608f051bf Signed-off-by: Seema Khowala <seemaj@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2098138 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
5e12623785
commit
7b2db862fe
@@ -840,3 +840,24 @@ void nvgpu_tsg_abort(struct gk20a *g, struct tsg_gk20a *tsg, bool preempt)
|
||||
nvgpu_rwsem_up_read(&tsg->ch_list_lock);
|
||||
}
|
||||
|
||||
void nvgpu_tsg_reset_faulted_eng_pbdma(struct gk20a *g, struct tsg_gk20a *tsg,
|
||||
bool eng, bool pbdma)
|
||||
{
|
||||
struct channel_gk20a *ch;
|
||||
|
||||
if (g->ops.channel.reset_faulted == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (tsg == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
nvgpu_log(g, gpu_dbg_info, "reset faulted eng and pbdma bits in ccsr");
|
||||
|
||||
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
|
||||
nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
|
||||
g->ops.channel.reset_faulted(g, ch, eng, pbdma);
|
||||
}
|
||||
nvgpu_rwsem_up_read(&tsg->ch_list_lock);
|
||||
}
|
||||
|
||||
@@ -305,34 +305,6 @@ static int gv11b_fifo_poll_eng_ctx_status(struct gk20a *g, u32 id,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void gv11b_reset_faulted_tsg(struct tsg_gk20a *tsg, bool eng, bool pbdma)
|
||||
{
|
||||
struct gk20a *g = tsg->g;
|
||||
struct channel_gk20a *ch;
|
||||
|
||||
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
|
||||
nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
|
||||
g->ops.channel.reset_faulted(g, ch, eng, pbdma);
|
||||
}
|
||||
nvgpu_rwsem_up_read(&tsg->ch_list_lock);
|
||||
}
|
||||
|
||||
void gv11b_fifo_reset_pbdma_and_eng_faulted(struct gk20a *g,
|
||||
struct tsg_gk20a *tsg,
|
||||
u32 faulted_pbdma, u32 faulted_engine)
|
||||
{
|
||||
if (tsg == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
nvgpu_log(g, gpu_dbg_intr, "reset faulted pbdma:0x%x eng:0x%x",
|
||||
faulted_pbdma, faulted_engine);
|
||||
|
||||
gv11b_reset_faulted_tsg(tsg,
|
||||
faulted_engine != FIFO_INVAL_ENGINE_ID,
|
||||
faulted_pbdma != FIFO_INVAL_PBDMA_ID);
|
||||
}
|
||||
|
||||
static u32 gv11b_fifo_get_runlists_mask(struct gk20a *g, u32 act_eng_bitmask,
|
||||
u32 id, unsigned int id_type, unsigned int rc_type,
|
||||
struct mmu_fault_info *mmfault)
|
||||
@@ -590,9 +562,7 @@ static void gv11b_fifo_locked_abort_runlist_active_tsgs(struct gk20a *g,
|
||||
|
||||
g->ops.tsg.disable(tsg);
|
||||
|
||||
/* assume all pbdma and eng faulted are set */
|
||||
nvgpu_log(g, gpu_dbg_info, "reset pbdma and eng faulted");
|
||||
gv11b_reset_faulted_tsg(tsg, true, true);
|
||||
nvgpu_tsg_reset_faulted_eng_pbdma(g, tsg, true, true);
|
||||
|
||||
#ifdef CONFIG_GK20A_CTXSW_TRACE
|
||||
nvgpu_gr_fecs_trace_add_tsg_reset(g, tsg);
|
||||
@@ -725,9 +695,7 @@ void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask,
|
||||
if (rc_type == RC_TYPE_MMU_FAULT) {
|
||||
gk20a_debug_dump(g);
|
||||
client_type = mmfault->client_type;
|
||||
gv11b_fifo_reset_pbdma_and_eng_faulted(g, tsg,
|
||||
mmfault->faulted_pbdma,
|
||||
mmfault->faulted_engine);
|
||||
nvgpu_tsg_reset_faulted_eng_pbdma(g, tsg, true, true);
|
||||
}
|
||||
|
||||
if (tsg != NULL) {
|
||||
|
||||
@@ -36,9 +36,6 @@
|
||||
|
||||
struct gpu_ops;
|
||||
|
||||
void gv11b_fifo_reset_pbdma_and_eng_faulted(struct gk20a *g,
|
||||
struct tsg_gk20a *tsg,
|
||||
u32 faulted_pbdma, u32 faulted_engine);
|
||||
void gv11b_mmu_fault_id_to_eng_pbdma_id_and_veid(struct gk20a *g,
|
||||
u32 mmu_fault_id, u32 *active_engine_id, u32 *veid, u32 *pbdma_id);
|
||||
|
||||
|
||||
@@ -36,6 +36,7 @@
|
||||
#include <nvgpu/fifo.h>
|
||||
#include <nvgpu/gk20a.h>
|
||||
#include <nvgpu/channel.h>
|
||||
#include <nvgpu/tsg.h>
|
||||
#include <nvgpu/nvgpu_err.h>
|
||||
#include <nvgpu/ltc.h>
|
||||
#include <nvgpu/rc.h>
|
||||
@@ -653,12 +654,10 @@ static void gv11b_fb_handle_mmu_fault_common(struct gk20a *g,
|
||||
/* CE page faults are not reported as replayable */
|
||||
nvgpu_log(g, gpu_dbg_intr, "CE Faulted");
|
||||
err = gv11b_fb_fix_page_fault(g, mmfault);
|
||||
if ((mmfault->refch != NULL) &&
|
||||
((u32)mmfault->refch->tsgid != FIFO_INVAL_TSG_ID)) {
|
||||
gv11b_fifo_reset_pbdma_and_eng_faulted(g,
|
||||
&g->fifo.tsg[mmfault->refch->tsgid],
|
||||
mmfault->faulted_pbdma,
|
||||
mmfault->faulted_engine);
|
||||
|
||||
if (mmfault->refch != NULL) {
|
||||
tsg = tsg_gk20a_from_ch(mmfault->refch);
|
||||
nvgpu_tsg_reset_faulted_eng_pbdma(g, tsg, true, true);
|
||||
}
|
||||
if (err == 0) {
|
||||
nvgpu_log(g, gpu_dbg_intr, "CE Page Fault Fixed");
|
||||
|
||||
@@ -156,5 +156,6 @@ void nvgpu_tsg_set_error_notifier(struct gk20a *g, struct tsg_gk20a *tsg,
|
||||
bool nvgpu_tsg_ctxsw_timeout_debug_dump_state(struct tsg_gk20a *tsg);
|
||||
void nvgpu_tsg_set_ctxsw_timeout_accumulated_ms(struct tsg_gk20a *tsg, u32 ms);
|
||||
void nvgpu_tsg_abort(struct gk20a *g, struct tsg_gk20a *tsg, bool preempt);
|
||||
|
||||
void nvgpu_tsg_reset_faulted_eng_pbdma(struct gk20a *g, struct tsg_gk20a *tsg,
|
||||
bool eng, bool pbdma);
|
||||
#endif /* NVGPU_TSG_H */
|
||||
|
||||
Reference in New Issue
Block a user