mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 17:36:20 +03:00
gpu: nvgpu: move engine functions to engines.c
Removed fifo.runlist_busy_engines ops Moved to engines.c and renamed gk20a_fifo_get_failing_engine_data -> nvgpu_engine_find_busy_doing_ctxsw gk20a_fifo_get_faulty_id_type -> nvgpu_engine_get_id_and_type gk20a_fifo_runlist_busy_engines -> nvgpu_engine_get_runlist_busy_engines JIRA NVGPU-1314 Change-Id: I89c81f331321d47a616a785082d66f9b4a51ff71 Signed-off-by: Seema Khowala <seemaj@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2093788 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
9ca3782666
commit
ca628dfd6e
@@ -35,9 +35,12 @@
|
|||||||
#include <nvgpu/channel.h>
|
#include <nvgpu/channel.h>
|
||||||
#include <nvgpu/soc.h>
|
#include <nvgpu/soc.h>
|
||||||
#include <nvgpu/top.h>
|
#include <nvgpu/top.h>
|
||||||
|
#include <nvgpu/gr/gr_falcon.h>
|
||||||
|
|
||||||
#include "gk20a/fifo_gk20a.h"
|
#include "gk20a/fifo_gk20a.h"
|
||||||
|
|
||||||
|
#define FECS_METHOD_WFI_RESTORE 0x80000U
|
||||||
|
|
||||||
enum nvgpu_fifo_engine nvgpu_engine_enum_from_type(struct gk20a *g,
|
enum nvgpu_fifo_engine nvgpu_engine_enum_from_type(struct gk20a *g,
|
||||||
u32 engine_type)
|
u32 engine_type)
|
||||||
{
|
{
|
||||||
@@ -797,3 +800,104 @@ int nvgpu_engine_init_info(struct fifo_gk20a *f)
|
|||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void nvgpu_engine_get_id_and_type(struct gk20a *g, u32 engine_id,
|
||||||
|
u32 *id, u32 *type)
|
||||||
|
{
|
||||||
|
struct nvgpu_engine_status_info engine_status;
|
||||||
|
|
||||||
|
g->ops.engine_status.read_engine_status_info(g, engine_id,
|
||||||
|
&engine_status);
|
||||||
|
|
||||||
|
/* use next_id if context load is failing */
|
||||||
|
if (nvgpu_engine_status_is_ctxsw_load(
|
||||||
|
&engine_status)) {
|
||||||
|
nvgpu_engine_status_get_next_ctx_id_type(
|
||||||
|
&engine_status, id, type);
|
||||||
|
} else {
|
||||||
|
nvgpu_engine_status_get_ctx_id_type(
|
||||||
|
&engine_status, id, type);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
u32 nvgpu_engine_find_busy_doing_ctxsw(struct gk20a *g,
|
||||||
|
u32 *id_ptr, bool *is_tsg_ptr)
|
||||||
|
{
|
||||||
|
u32 engine_id;
|
||||||
|
u32 id = U32_MAX;
|
||||||
|
bool is_tsg = false;
|
||||||
|
u32 mailbox2;
|
||||||
|
u32 act_eng_id = FIFO_INVAL_ENGINE_ID;
|
||||||
|
struct nvgpu_engine_status_info engine_status;
|
||||||
|
|
||||||
|
for (engine_id = 0U; engine_id < g->fifo.num_engines; engine_id++) {
|
||||||
|
bool failing_engine;
|
||||||
|
|
||||||
|
act_eng_id = g->fifo.active_engines_list[engine_id];
|
||||||
|
g->ops.engine_status.read_engine_status_info(g, act_eng_id,
|
||||||
|
&engine_status);
|
||||||
|
|
||||||
|
/* we are interested in busy engines */
|
||||||
|
failing_engine = engine_status.is_busy;
|
||||||
|
|
||||||
|
/* ..that are doing context switch */
|
||||||
|
failing_engine = failing_engine &&
|
||||||
|
nvgpu_engine_status_is_ctxsw(&engine_status);
|
||||||
|
|
||||||
|
if (!failing_engine) {
|
||||||
|
act_eng_id = FIFO_INVAL_ENGINE_ID;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (nvgpu_engine_status_is_ctxsw_load(&engine_status)) {
|
||||||
|
id = engine_status.ctx_next_id;
|
||||||
|
is_tsg = nvgpu_engine_status_is_next_ctx_type_tsg(
|
||||||
|
&engine_status);
|
||||||
|
} else if (nvgpu_engine_status_is_ctxsw_switch(&engine_status)) {
|
||||||
|
mailbox2 = g->ops.gr.falcon.read_fecs_ctxsw_mailbox(g,
|
||||||
|
NVGPU_GR_FALCON_FECS_CTXSW_MAILBOX2);
|
||||||
|
if ((mailbox2 & FECS_METHOD_WFI_RESTORE) != 0U) {
|
||||||
|
id = engine_status.ctx_next_id;
|
||||||
|
is_tsg = nvgpu_engine_status_is_next_ctx_type_tsg(
|
||||||
|
&engine_status);
|
||||||
|
} else {
|
||||||
|
id = engine_status.ctx_id;
|
||||||
|
is_tsg = nvgpu_engine_status_is_ctx_type_tsg(
|
||||||
|
&engine_status);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
id = engine_status.ctx_id;
|
||||||
|
is_tsg = nvgpu_engine_status_is_ctx_type_tsg(
|
||||||
|
&engine_status);
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
*id_ptr = id;
|
||||||
|
*is_tsg_ptr = is_tsg;
|
||||||
|
|
||||||
|
return act_eng_id;
|
||||||
|
}
|
||||||
|
|
||||||
|
u32 nvgpu_engine_get_runlist_busy_engines(struct gk20a *g, u32 runlist_id)
|
||||||
|
{
|
||||||
|
struct fifo_gk20a *f = &g->fifo;
|
||||||
|
u32 i, eng_bitmask = 0U;
|
||||||
|
struct nvgpu_engine_status_info engine_status;
|
||||||
|
|
||||||
|
for (i = 0U; i < f->num_engines; i++) {
|
||||||
|
u32 act_eng_id = f->active_engines_list[i];
|
||||||
|
u32 engine_runlist = f->engine_info[act_eng_id].runlist_id;
|
||||||
|
bool engine_busy;
|
||||||
|
|
||||||
|
g->ops.engine_status.read_engine_status_info(g, act_eng_id,
|
||||||
|
&engine_status);
|
||||||
|
engine_busy = engine_status.is_busy;
|
||||||
|
|
||||||
|
if (engine_busy && engine_runlist == runlist_id) {
|
||||||
|
eng_bitmask |= BIT(act_eng_id);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return eng_bitmask;
|
||||||
|
}
|
||||||
|
|||||||
@@ -115,7 +115,7 @@ void nvgpu_rc_pbdma_fault(struct gk20a *g, struct fifo_gk20a *f,
|
|||||||
|
|
||||||
void nvgpu_rc_runlist_update(struct gk20a *g, u32 runlist_id)
|
void nvgpu_rc_runlist_update(struct gk20a *g, u32 runlist_id)
|
||||||
{
|
{
|
||||||
u32 eng_bitmask = g->ops.fifo.runlist_busy_engines(g, runlist_id);
|
u32 eng_bitmask = nvgpu_engine_get_runlist_busy_engines(g, runlist_id);
|
||||||
|
|
||||||
if (eng_bitmask != 0U) {
|
if (eng_bitmask != 0U) {
|
||||||
nvgpu_rc_fifo_recover(g, eng_bitmask, INVAL_ID, false, false, true,
|
nvgpu_rc_fifo_recover(g, eng_bitmask, INVAL_ID, false, false, true,
|
||||||
|
|||||||
@@ -432,24 +432,6 @@ bool gk20a_fifo_handle_mmu_fault(
|
|||||||
return verbose;
|
return verbose;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void gk20a_fifo_get_faulty_id_type(struct gk20a *g, u32 engine_id,
|
|
||||||
u32 *id, u32 *type)
|
|
||||||
{
|
|
||||||
struct nvgpu_engine_status_info engine_status;
|
|
||||||
|
|
||||||
g->ops.engine_status.read_engine_status_info(g, engine_id, &engine_status);
|
|
||||||
|
|
||||||
/* use next_id if context load is failing */
|
|
||||||
if (nvgpu_engine_status_is_ctxsw_load(
|
|
||||||
&engine_status)) {
|
|
||||||
nvgpu_engine_status_get_next_ctx_id_type(
|
|
||||||
&engine_status, id, type);
|
|
||||||
} else {
|
|
||||||
nvgpu_engine_status_get_ctx_id_type(
|
|
||||||
&engine_status, id, type);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void gk20a_fifo_teardown_mask_intr(struct gk20a *g)
|
void gk20a_fifo_teardown_mask_intr(struct gk20a *g)
|
||||||
{
|
{
|
||||||
u32 val;
|
u32 val;
|
||||||
@@ -511,7 +493,7 @@ void gk20a_fifo_teardown_ch_tsg(struct gk20a *g, u32 __engine_ids,
|
|||||||
} else {
|
} else {
|
||||||
/* store faulted engines in advance */
|
/* store faulted engines in advance */
|
||||||
for_each_set_bit(engine_id, &_engine_ids, 32U) {
|
for_each_set_bit(engine_id, &_engine_ids, 32U) {
|
||||||
gk20a_fifo_get_faulty_id_type(g, (u32)engine_id,
|
nvgpu_engine_get_id_and_type(g, (u32)engine_id,
|
||||||
&ref_id, &ref_type);
|
&ref_id, &ref_type);
|
||||||
if (ref_type == fifo_engine_status_id_type_tsgid_v()) {
|
if (ref_type == fifo_engine_status_id_type_tsgid_v()) {
|
||||||
ref_id_is_tsg = true;
|
ref_id_is_tsg = true;
|
||||||
@@ -525,7 +507,7 @@ void gk20a_fifo_teardown_ch_tsg(struct gk20a *g, u32 __engine_ids,
|
|||||||
u32 type;
|
u32 type;
|
||||||
u32 id;
|
u32 id;
|
||||||
|
|
||||||
gk20a_fifo_get_faulty_id_type(g,
|
nvgpu_engine_get_id_and_type(g,
|
||||||
active_engine_id, &id, &type);
|
active_engine_id, &id, &type);
|
||||||
if (ref_type == type && ref_id == id) {
|
if (ref_type == type && ref_id == id) {
|
||||||
u32 mmu_id = nvgpu_engine_id_to_mmu_fault_id(g,
|
u32 mmu_id = nvgpu_engine_id_to_mmu_fault_id(g,
|
||||||
@@ -555,64 +537,6 @@ void gk20a_fifo_teardown_ch_tsg(struct gk20a *g, u32 __engine_ids,
|
|||||||
nvgpu_mutex_release(&g->fifo.engines_reset_mutex);
|
nvgpu_mutex_release(&g->fifo.engines_reset_mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
u32 gk20a_fifo_get_failing_engine_data(struct gk20a *g,
|
|
||||||
u32 *__id, bool *__is_tsg)
|
|
||||||
{
|
|
||||||
u32 engine_id;
|
|
||||||
u32 id = U32_MAX;
|
|
||||||
bool is_tsg = false;
|
|
||||||
u32 mailbox2;
|
|
||||||
u32 active_engine_id = FIFO_INVAL_ENGINE_ID;
|
|
||||||
struct nvgpu_engine_status_info engine_status;
|
|
||||||
|
|
||||||
for (engine_id = 0; engine_id < g->fifo.num_engines; engine_id++) {
|
|
||||||
bool failing_engine;
|
|
||||||
|
|
||||||
active_engine_id = g->fifo.active_engines_list[engine_id];
|
|
||||||
g->ops.engine_status.read_engine_status_info(g, active_engine_id,
|
|
||||||
&engine_status);
|
|
||||||
|
|
||||||
/* we are interested in busy engines */
|
|
||||||
failing_engine = engine_status.is_busy;
|
|
||||||
|
|
||||||
/* ..that are doing context switch */
|
|
||||||
failing_engine = failing_engine &&
|
|
||||||
nvgpu_engine_status_is_ctxsw(&engine_status);
|
|
||||||
|
|
||||||
if (!failing_engine) {
|
|
||||||
active_engine_id = FIFO_INVAL_ENGINE_ID;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (nvgpu_engine_status_is_ctxsw_load(&engine_status)) {
|
|
||||||
id = engine_status.ctx_next_id;
|
|
||||||
is_tsg = nvgpu_engine_status_is_next_ctx_type_tsg(
|
|
||||||
&engine_status);
|
|
||||||
} else if (nvgpu_engine_status_is_ctxsw_switch(&engine_status)) {
|
|
||||||
mailbox2 = gk20a_readl(g, gr_fecs_ctxsw_mailbox_r(2));
|
|
||||||
if ((mailbox2 & FECS_METHOD_WFI_RESTORE) != 0U) {
|
|
||||||
id = engine_status.ctx_next_id;
|
|
||||||
is_tsg = nvgpu_engine_status_is_next_ctx_type_tsg(
|
|
||||||
&engine_status);
|
|
||||||
} else {
|
|
||||||
id = engine_status.ctx_id;
|
|
||||||
is_tsg = nvgpu_engine_status_is_ctx_type_tsg(
|
|
||||||
&engine_status);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
id = engine_status.ctx_id;
|
|
||||||
is_tsg = nvgpu_engine_status_is_ctx_type_tsg(
|
|
||||||
&engine_status);
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
*__id = id;
|
|
||||||
*__is_tsg = is_tsg;
|
|
||||||
|
|
||||||
return active_engine_id;
|
|
||||||
}
|
|
||||||
|
|
||||||
void gk20a_fifo_issue_preempt(struct gk20a *g, u32 id, bool is_tsg)
|
void gk20a_fifo_issue_preempt(struct gk20a *g, u32 id, bool is_tsg)
|
||||||
{
|
{
|
||||||
if (is_tsg) {
|
if (is_tsg) {
|
||||||
@@ -777,29 +701,6 @@ int gk20a_fifo_preempt(struct gk20a *g, struct channel_gk20a *ch)
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
u32 gk20a_fifo_runlist_busy_engines(struct gk20a *g, u32 runlist_id)
|
|
||||||
{
|
|
||||||
struct fifo_gk20a *f = &g->fifo;
|
|
||||||
u32 engines = 0;
|
|
||||||
unsigned int i;
|
|
||||||
struct nvgpu_engine_status_info engine_status;
|
|
||||||
|
|
||||||
for (i = 0; i < f->num_engines; i++) {
|
|
||||||
u32 active_engine_id = f->active_engines_list[i];
|
|
||||||
u32 engine_runlist = f->engine_info[active_engine_id].runlist_id;
|
|
||||||
bool engine_busy;
|
|
||||||
g->ops.engine_status.read_engine_status_info(g, active_engine_id,
|
|
||||||
&engine_status);
|
|
||||||
engine_busy = engine_status.is_busy;
|
|
||||||
|
|
||||||
if (engine_busy && engine_runlist == runlist_id) {
|
|
||||||
engines |= BIT(active_engine_id);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return engines;
|
|
||||||
}
|
|
||||||
|
|
||||||
u32 gk20a_fifo_default_timeslice_us(struct gk20a *g)
|
u32 gk20a_fifo_default_timeslice_us(struct gk20a *g)
|
||||||
{
|
{
|
||||||
u64 slice = (((u64)(NVGPU_FIFO_DEFAULT_TIMESLICE_TIMEOUT <<
|
u64 slice = (((u64)(NVGPU_FIFO_DEFAULT_TIMESLICE_TIMEOUT <<
|
||||||
|
|||||||
@@ -226,8 +226,7 @@ int gk20a_init_fifo_reset_enable_hw(struct gk20a *g);
|
|||||||
|
|
||||||
void fifo_gk20a_finish_mmu_fault_handling(struct gk20a *g,
|
void fifo_gk20a_finish_mmu_fault_handling(struct gk20a *g,
|
||||||
unsigned long fault_id);
|
unsigned long fault_id);
|
||||||
u32 gk20a_fifo_get_failing_engine_data(struct gk20a *g,
|
|
||||||
u32 *__id, bool *__is_tsg);
|
|
||||||
void gk20a_fifo_issue_preempt(struct gk20a *g, u32 id, bool is_tsg);
|
void gk20a_fifo_issue_preempt(struct gk20a *g, u32 id, bool is_tsg);
|
||||||
int gk20a_fifo_tsg_set_timeslice(struct tsg_gk20a *tsg, u32 timeslice);
|
int gk20a_fifo_tsg_set_timeslice(struct tsg_gk20a *tsg, u32 timeslice);
|
||||||
|
|
||||||
@@ -258,7 +257,6 @@ int gk20a_fifo_is_preempt_pending(struct gk20a *g, u32 id,
|
|||||||
unsigned int id_type);
|
unsigned int id_type);
|
||||||
int __locked_fifo_preempt(struct gk20a *g, u32 id, bool is_tsg);
|
int __locked_fifo_preempt(struct gk20a *g, u32 id, bool is_tsg);
|
||||||
|
|
||||||
u32 gk20a_fifo_runlist_busy_engines(struct gk20a *g, u32 runlist_id);
|
|
||||||
bool gk20a_fifo_should_defer_engine_reset(struct gk20a *g, u32 engine_id,
|
bool gk20a_fifo_should_defer_engine_reset(struct gk20a *g, u32 engine_id,
|
||||||
u32 engine_subid, bool fake_fault);
|
u32 engine_subid, bool fake_fault);
|
||||||
|
|
||||||
|
|||||||
@@ -656,7 +656,6 @@ static const struct gpu_ops gm20b_ops = {
|
|||||||
.setup_sw = nvgpu_fifo_setup_sw,
|
.setup_sw = nvgpu_fifo_setup_sw,
|
||||||
.cleanup_sw = nvgpu_fifo_cleanup_sw,
|
.cleanup_sw = nvgpu_fifo_cleanup_sw,
|
||||||
.set_sm_exception_type_mask = gk20a_tsg_set_sm_exception_type_mask,
|
.set_sm_exception_type_mask = gk20a_tsg_set_sm_exception_type_mask,
|
||||||
.runlist_busy_engines = gk20a_fifo_runlist_busy_engines,
|
|
||||||
.intr_0_enable = gk20a_fifo_intr_0_enable,
|
.intr_0_enable = gk20a_fifo_intr_0_enable,
|
||||||
.intr_1_enable = gk20a_fifo_intr_1_enable,
|
.intr_1_enable = gk20a_fifo_intr_1_enable,
|
||||||
.intr_0_isr = gk20a_fifo_intr_0_isr,
|
.intr_0_isr = gk20a_fifo_intr_0_isr,
|
||||||
|
|||||||
@@ -754,7 +754,6 @@ static const struct gpu_ops gp10b_ops = {
|
|||||||
.setup_sw = nvgpu_fifo_setup_sw,
|
.setup_sw = nvgpu_fifo_setup_sw,
|
||||||
.cleanup_sw = nvgpu_fifo_cleanup_sw,
|
.cleanup_sw = nvgpu_fifo_cleanup_sw,
|
||||||
.set_sm_exception_type_mask = gk20a_tsg_set_sm_exception_type_mask,
|
.set_sm_exception_type_mask = gk20a_tsg_set_sm_exception_type_mask,
|
||||||
.runlist_busy_engines = gk20a_fifo_runlist_busy_engines,
|
|
||||||
.intr_0_enable = gk20a_fifo_intr_0_enable,
|
.intr_0_enable = gk20a_fifo_intr_0_enable,
|
||||||
.intr_1_enable = gk20a_fifo_intr_1_enable,
|
.intr_1_enable = gk20a_fifo_intr_1_enable,
|
||||||
.intr_0_isr = gk20a_fifo_intr_0_isr,
|
.intr_0_isr = gk20a_fifo_intr_0_isr,
|
||||||
|
|||||||
@@ -928,7 +928,6 @@ static const struct gpu_ops gv100_ops = {
|
|||||||
.setup_sw = nvgpu_fifo_setup_sw,
|
.setup_sw = nvgpu_fifo_setup_sw,
|
||||||
.cleanup_sw = nvgpu_fifo_cleanup_sw,
|
.cleanup_sw = nvgpu_fifo_cleanup_sw,
|
||||||
.set_sm_exception_type_mask = gk20a_tsg_set_sm_exception_type_mask,
|
.set_sm_exception_type_mask = gk20a_tsg_set_sm_exception_type_mask,
|
||||||
.runlist_busy_engines = gk20a_fifo_runlist_busy_engines,
|
|
||||||
.intr_0_enable = gk20a_fifo_intr_0_enable,
|
.intr_0_enable = gk20a_fifo_intr_0_enable,
|
||||||
.intr_1_enable = gk20a_fifo_intr_1_enable,
|
.intr_1_enable = gk20a_fifo_intr_1_enable,
|
||||||
.intr_0_isr = gv11b_fifo_intr_0_isr,
|
.intr_0_isr = gv11b_fifo_intr_0_isr,
|
||||||
|
|||||||
@@ -902,7 +902,6 @@ static const struct gpu_ops gv11b_ops = {
|
|||||||
.setup_sw = nvgpu_fifo_setup_sw,
|
.setup_sw = nvgpu_fifo_setup_sw,
|
||||||
.cleanup_sw = nvgpu_fifo_cleanup_sw,
|
.cleanup_sw = nvgpu_fifo_cleanup_sw,
|
||||||
.set_sm_exception_type_mask = gk20a_tsg_set_sm_exception_type_mask,
|
.set_sm_exception_type_mask = gk20a_tsg_set_sm_exception_type_mask,
|
||||||
.runlist_busy_engines = gk20a_fifo_runlist_busy_engines,
|
|
||||||
.intr_0_enable = gv11b_fifo_intr_0_enable,
|
.intr_0_enable = gv11b_fifo_intr_0_enable,
|
||||||
.intr_1_enable = gk20a_fifo_intr_1_enable,
|
.intr_1_enable = gk20a_fifo_intr_1_enable,
|
||||||
.intr_0_isr = gv11b_fifo_intr_0_isr,
|
.intr_0_isr = gv11b_fifo_intr_0_isr,
|
||||||
|
|||||||
@@ -27,6 +27,7 @@
|
|||||||
#include <nvgpu/ptimer.h>
|
#include <nvgpu/ptimer.h>
|
||||||
#include <nvgpu/channel.h>
|
#include <nvgpu/channel.h>
|
||||||
#include <nvgpu/rc.h>
|
#include <nvgpu/rc.h>
|
||||||
|
#include <nvgpu/engines.h>
|
||||||
|
|
||||||
#include <hal/fifo/ctxsw_timeout_gk20a.h>
|
#include <hal/fifo/ctxsw_timeout_gk20a.h>
|
||||||
|
|
||||||
@@ -65,7 +66,7 @@ bool gk20a_fifo_handle_ctxsw_timeout(struct gk20a *g)
|
|||||||
/* read the scheduler error register */
|
/* read the scheduler error register */
|
||||||
sched_error = nvgpu_readl(g, fifo_intr_sched_error_r());
|
sched_error = nvgpu_readl(g, fifo_intr_sched_error_r());
|
||||||
|
|
||||||
engine_id = gk20a_fifo_get_failing_engine_data(g, &id, &is_tsg);
|
engine_id = nvgpu_engine_find_busy_doing_ctxsw(g, &id, &is_tsg);
|
||||||
/*
|
/*
|
||||||
* Could not find the engine
|
* Could not find the engine
|
||||||
* Possible Causes:
|
* Possible Causes:
|
||||||
|
|||||||
@@ -31,6 +31,7 @@
|
|||||||
#include <nvgpu/nvgpu_err.h>
|
#include <nvgpu/nvgpu_err.h>
|
||||||
#include <nvgpu/error_notifier.h>
|
#include <nvgpu/error_notifier.h>
|
||||||
#include <nvgpu/pbdma_status.h>
|
#include <nvgpu/pbdma_status.h>
|
||||||
|
#include <nvgpu/engines.h>
|
||||||
|
|
||||||
#include <hal/fifo/fifo_intr_gk20a.h>
|
#include <hal/fifo/fifo_intr_gk20a.h>
|
||||||
#include <hal/fifo/mmu_fault_gk20a.h>
|
#include <hal/fifo/mmu_fault_gk20a.h>
|
||||||
@@ -131,7 +132,7 @@ bool gk20a_fifo_handle_sched_error(struct gk20a *g)
|
|||||||
/* read the scheduler error register */
|
/* read the scheduler error register */
|
||||||
sched_error = nvgpu_readl(g, fifo_intr_sched_error_r());
|
sched_error = nvgpu_readl(g, fifo_intr_sched_error_r());
|
||||||
|
|
||||||
engine_id = gk20a_fifo_get_failing_engine_data(g, &id, &is_tsg);
|
engine_id = nvgpu_engine_find_busy_doing_ctxsw(g, &id, &is_tsg);
|
||||||
|
|
||||||
if (fifo_intr_sched_error_code_f(sched_error) !=
|
if (fifo_intr_sched_error_code_f(sched_error) !=
|
||||||
fifo_intr_sched_error_code_ctxsw_timeout_v()) {
|
fifo_intr_sched_error_code_ctxsw_timeout_v()) {
|
||||||
|
|||||||
@@ -75,4 +75,10 @@ u32 nvgpu_engine_mmu_fault_id_to_engine_id(struct gk20a *g, u32 fault_id);
|
|||||||
u32 nvgpu_engine_get_mask_on_id(struct gk20a *g, u32 id, bool is_tsg);
|
u32 nvgpu_engine_get_mask_on_id(struct gk20a *g, u32 id, bool is_tsg);
|
||||||
int nvgpu_engine_init_info(struct fifo_gk20a *f);
|
int nvgpu_engine_init_info(struct fifo_gk20a *f);
|
||||||
|
|
||||||
|
void nvgpu_engine_get_id_and_type(struct gk20a *g, u32 engine_id,
|
||||||
|
u32 *id, u32 *type);
|
||||||
|
u32 nvgpu_engine_find_busy_doing_ctxsw(struct gk20a *g,
|
||||||
|
u32 *id_ptr, bool *is_tsg_ptr);
|
||||||
|
u32 nvgpu_engine_get_runlist_busy_engines(struct gk20a *g, u32 runlist_id);
|
||||||
|
|
||||||
#endif /*NVGPU_ENGINE_H*/
|
#endif /*NVGPU_ENGINE_H*/
|
||||||
|
|||||||
@@ -992,7 +992,6 @@ struct gpu_ops {
|
|||||||
void (*deinit_pdb_cache_war)(struct gk20a *g);
|
void (*deinit_pdb_cache_war)(struct gk20a *g);
|
||||||
int (*set_sm_exception_type_mask)(struct channel_gk20a *ch,
|
int (*set_sm_exception_type_mask)(struct channel_gk20a *ch,
|
||||||
u32 exception_mask);
|
u32 exception_mask);
|
||||||
u32 (*runlist_busy_engines)(struct gk20a *g, u32 runlist_id);
|
|
||||||
struct {
|
struct {
|
||||||
int (*report_host_err)(struct gk20a *g,
|
int (*report_host_err)(struct gk20a *g,
|
||||||
u32 hw_id, u32 inst, u32 err_id,
|
u32 hw_id, u32 inst, u32 err_id,
|
||||||
|
|||||||
@@ -966,7 +966,6 @@ static const struct gpu_ops tu104_ops = {
|
|||||||
.init_pdb_cache_war = tu104_init_pdb_cache_war,
|
.init_pdb_cache_war = tu104_init_pdb_cache_war,
|
||||||
.deinit_pdb_cache_war = tu104_deinit_pdb_cache_war,
|
.deinit_pdb_cache_war = tu104_deinit_pdb_cache_war,
|
||||||
.set_sm_exception_type_mask = gk20a_tsg_set_sm_exception_type_mask,
|
.set_sm_exception_type_mask = gk20a_tsg_set_sm_exception_type_mask,
|
||||||
.runlist_busy_engines = gk20a_fifo_runlist_busy_engines,
|
|
||||||
.intr_0_enable = gv11b_fifo_intr_0_enable,
|
.intr_0_enable = gv11b_fifo_intr_0_enable,
|
||||||
.intr_1_enable = gk20a_fifo_intr_1_enable,
|
.intr_1_enable = gk20a_fifo_intr_1_enable,
|
||||||
.intr_0_isr = gv11b_fifo_intr_0_isr,
|
.intr_0_isr = gv11b_fifo_intr_0_isr,
|
||||||
|
|||||||
Reference in New Issue
Block a user