gpu: nvgpu: move fifo intr to hal/fifo

Removed intr_0_error_mask ops

Added below ops for fifo intr
intr_0_enable
intr_1_enable
intr_0_isr
intr_1_isr

JIRA NVGPU-1310

Change-Id: I19bd1a380a89cffd582d6c4a0b7796a46fec5afb
Signed-off-by: Seema Khowala <seemaj@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2072144
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Seema Khowala
2019-03-12 16:21:08 -07:00
committed by mobile promotions
parent 217be5e492
commit f66f3e1341
21 changed files with 855 additions and 540 deletions

View File

@@ -221,6 +221,8 @@ nvgpu-y += \
hal/fifo/pbdma_status_gm20b.o \
hal/fifo/userd_gk20a.o \
hal/fifo/userd_gv11b.o \
hal/fifo/fifo_intr_gk20a.o \
hal/fifo/fifo_intr_gv11b.o \
hal/falcon/falcon_gk20a.o \
hal/nvlink/minion_gv100.o \
hal/nvlink/minion_tu104.o \

View File

@@ -375,12 +375,14 @@ srcs += common/sim.c \
hal/fifo/pbdma_status_gm20b.c \
hal/fifo/userd_gk20a.c \
hal/fifo/userd_gv11b.c \
hal/fifo/fifo_intr_gk20a.c \
hal/fifo/fifo_intr_gv11b.c \
hal/falcon/falcon_gk20a.c \
hal/nvlink/minion_gv100.c \
hal/nvlink/minion_tu104.c \
hal/nvlink/link_mode_transitions_gv100.c \
hal/nvlink/link_mode_transitions_tu104.c \
hal/gsp/gsp_gv100.c
hal/gsp/gsp_gv100.c
ifeq ($(NVGPU_DEBUGGER),1)
srcs += common/debugger.c

View File

@@ -69,7 +69,7 @@ void gm20b_mc_isr_stall(struct gk20a *g)
}
}
if ((mc_intr_0 & mc_intr_pfifo_pending_f()) != 0U) {
gk20a_fifo_isr(g);
g->ops.fifo.intr_0_isr(g);
}
if ((mc_intr_0 & mc_intr_pmu_pending_f()) != 0U) {
g->ops.pmu.pmu_isr(g);
@@ -96,7 +96,7 @@ u32 gm20b_mc_isr_nonstall(struct gk20a *g)
mc_intr_1 = g->ops.mc.intr_nonstall(g);
if (g->ops.mc.is_intr1_pending(g, NVGPU_UNIT_FIFO, mc_intr_1)) {
ops |= gk20a_fifo_nonstall_isr(g);
ops |= g->ops.fifo.intr_1_isr(g);
}
for (engine_id_idx = 0; engine_id_idx < g->fifo.num_engines;

View File

@@ -129,7 +129,7 @@ void mc_gp10b_isr_stall(struct gk20a *g)
g->ops.fb.hub_isr(g);
}
if ((mc_intr_0 & mc_intr_pfifo_pending_f()) != 0U) {
gk20a_fifo_isr(g);
g->ops.fifo.intr_0_isr(g);
}
if ((mc_intr_0 & mc_intr_pmu_pending_f()) != 0U) {
g->ops.pmu.pmu_isr(g);

View File

@@ -419,7 +419,6 @@ static const struct gpu_ops vgpu_gp10b_ops = {
.get_engines_mask_on_id = NULL,
.dump_channel_status_ramfc = NULL,
.capture_channel_ram_dump = NULL,
.intr_0_error_mask = gk20a_fifo_intr_0_error_mask,
.is_preempt_pending = NULL,
.reset_enable_hw = NULL,
.teardown_ch_tsg = NULL,
@@ -437,6 +436,10 @@ static const struct gpu_ops vgpu_gp10b_ops = {
.cleanup_sw = vgpu_fifo_cleanup_sw,
.resetup_ramfc = NULL,
.set_sm_exception_type_mask = vgpu_set_sm_exception_type_mask,
.intr_0_enable = NULL,
.intr_1_enable = NULL,
.intr_0_isr = NULL,
.intr_1_isr = NULL,
},
.engine = {
.is_fault_engine_subid_gpc = gm20b_is_fault_engine_subid_gpc,

View File

@@ -28,12 +28,14 @@
#include "hal/fifo/pbdma_gv11b.h"
#include "hal/fifo/userd_gk20a.h"
#include "hal/fifo/userd_gv11b.h"
#include "hal/fifo/fifo_intr_gv11b.h"
#include "hal/therm/therm_gm20b.h"
#include "hal/therm/therm_gp10b.h"
#include "hal/therm/therm_gv11b.h"
#include "hal/gr/fecs_trace/fecs_trace_gv11b.h"
#include "hal/gr/zbc/zbc_gv11b.h"
#include "hal/gr/hwpm_map/hwpm_map_gv100.h"
#include "hal/gr/init/gr_init_gv11b.h"
#include "hal/ltc/ltc_gm20b.h"
#include "hal/ltc/ltc_gp10b.h"
#include "hal/ltc/ltc_gv11b.h"
@@ -490,7 +492,6 @@ static const struct gpu_ops vgpu_gv11b_ops = {
.get_engines_mask_on_id = NULL,
.dump_channel_status_ramfc = NULL,
.capture_channel_ram_dump = NULL,
.intr_0_error_mask = gv11b_fifo_intr_0_error_mask,
.is_preempt_pending = gv11b_fifo_is_preempt_pending,
.reset_enable_hw = NULL,
.teardown_ch_tsg = NULL,
@@ -516,6 +517,10 @@ static const struct gpu_ops vgpu_gv11b_ops = {
.set_sm_exception_type_mask = vgpu_set_sm_exception_type_mask,
.usermode_base = gv11b_fifo_usermode_base,
.doorbell_token = gv11b_fifo_doorbell_token,
.intr_0_enable = NULL,
.intr_1_enable = NULL,
.intr_0_isr = NULL,
.intr_1_isr = NULL,
},
.engine = {
.is_fault_engine_subid_gpc = gv11b_is_fault_engine_subid_gpc,

View File

@@ -207,37 +207,8 @@ static inline u32 gk20a_mmu_id_to_engine_id(struct gk20a *g, u32 fault_id)
return active_engine_id;
}
u32 gk20a_fifo_intr_0_error_mask(struct gk20a *g)
{
u32 intr_0_error_mask =
fifo_intr_0_bind_error_pending_f() |
fifo_intr_0_sched_error_pending_f() |
fifo_intr_0_chsw_error_pending_f() |
fifo_intr_0_fb_flush_timeout_pending_f() |
fifo_intr_0_dropped_mmu_fault_pending_f() |
fifo_intr_0_mmu_fault_pending_f() |
fifo_intr_0_lb_error_pending_f() |
fifo_intr_0_pio_error_pending_f();
return intr_0_error_mask;
}
static u32 gk20a_fifo_intr_0_en_mask(struct gk20a *g)
{
u32 intr_0_en_mask;
intr_0_en_mask = g->ops.fifo.intr_0_error_mask(g);
intr_0_en_mask |= fifo_intr_0_runlist_event_pending_f() |
fifo_intr_0_pbdma_intr_pending_f();
return intr_0_en_mask;
}
int gk20a_init_fifo_reset_enable_hw(struct gk20a *g)
{
u32 intr_stall;
u32 mask;
u32 timeout;
unsigned int i;
u32 host_num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA);
@@ -269,62 +240,14 @@ int gk20a_init_fifo_reset_enable_hw(struct gk20a *g)
g->ops.fifo.apply_pb_timeout(g);
}
if (g->ops.fifo.apply_ctxsw_timeout_intr != NULL) {
g->ops.fifo.apply_ctxsw_timeout_intr(g);
} else {
timeout = g->fifo_eng_timeout_us;
timeout = scale_ptimer(timeout,
ptimer_scalingfactor10x(g->ptimer_src_freq));
timeout |= fifo_eng_timeout_detection_enabled_f();
gk20a_writel(g, fifo_eng_timeout_r(), timeout);
}
/* clear and enable pbdma interrupt */
for (i = 0; i < host_num_pbdma; i++) {
gk20a_writel(g, pbdma_intr_0_r(i), 0xFFFFFFFFU);
gk20a_writel(g, pbdma_intr_1_r(i), 0xFFFFFFFFU);
intr_stall = gk20a_readl(g, pbdma_intr_stall_r(i));
intr_stall &= ~pbdma_intr_stall_lbreq_enabled_f();
gk20a_writel(g, pbdma_intr_stall_r(i), intr_stall);
nvgpu_log_info(g, "pbdma id:%u, intr_en_0 0x%08x", i, intr_stall);
gk20a_writel(g, pbdma_intr_en_0_r(i), intr_stall);
intr_stall = gk20a_readl(g, pbdma_intr_stall_1_r(i));
/*
* For bug 2082123
* Mask the unused HCE_RE_ILLEGAL_OP bit from the interrupt.
*/
intr_stall &= ~pbdma_intr_stall_1_hce_illegal_op_enabled_f();
nvgpu_log_info(g, "pbdma id:%u, intr_en_1 0x%08x", i, intr_stall);
gk20a_writel(g, pbdma_intr_en_1_r(i), intr_stall);
}
/* reset runlist interrupts */
gk20a_writel(g, fifo_intr_runlist_r(), U32_MAX);
/* clear and enable pfifo interrupt */
gk20a_writel(g, fifo_intr_0_r(), 0xFFFFFFFFU);
mask = gk20a_fifo_intr_0_en_mask(g);
nvgpu_log_info(g, "fifo_intr_en_0 0x%08x", mask);
gk20a_writel(g, fifo_intr_en_0_r(), mask);
nvgpu_log_info(g, "fifo_intr_en_1 = 0x80000000");
gk20a_writel(g, fifo_intr_en_1_r(), 0x80000000U);
g->ops.fifo.intr_0_enable(g, true);
g->ops.fifo.intr_1_enable(g, true);
nvgpu_log_fn(g, "done");
return 0;
}
void gk20a_fifo_handle_runlist_event(struct gk20a *g)
{
u32 runlist_event = gk20a_readl(g, fifo_intr_runlist_r());
nvgpu_log(g, gpu_dbg_intr, "runlist event %08x",
runlist_event);
gk20a_writel(g, fifo_intr_runlist_r(), runlist_event);
}
int gk20a_init_fifo_setup_hw(struct gk20a *g)
{
struct fifo_gk20a *f = &g->fifo;
@@ -527,19 +450,7 @@ void gk20a_fifo_get_mmu_fault_info(struct gk20a *g, u32 mmu_fault_id,
mmfault->inst_ptr <<= fifo_intr_mmu_fault_inst_ptr_align_shift_v();
}
static void gk20a_fifo_handle_chsw_fault(struct gk20a *g)
{
u32 intr;
intr = gk20a_readl(g, fifo_intr_chsw_error_r());
nvgpu_report_host_error(g, 0,
GPU_HOST_PFIFO_CHSW_ERROR, intr);
nvgpu_err(g, "chsw: %08x", intr);
g->ops.gr.dump_gr_falcon_stats(g);
gk20a_writel(g, fifo_intr_chsw_error_r(), intr);
}
static void gk20a_fifo_handle_dropped_mmu_fault(struct gk20a *g)
void gk20a_fifo_handle_dropped_mmu_fault(struct gk20a *g)
{
u32 fault_id = gk20a_readl(g, fifo_intr_mmu_fault_id_r());
nvgpu_err(g, "dropped mmu fault (0x%08x)", fault_id);
@@ -869,7 +780,7 @@ static bool gk20a_fifo_handle_mmu_fault_locked(
return verbose;
}
static bool gk20a_fifo_handle_mmu_fault(
bool gk20a_fifo_handle_mmu_fault(
struct gk20a *g,
u32 mmu_fault_engines, /* queried from HW if 0 */
u32 hw_id, /* queried from HW if ~(u32)0 OR mmu_fault_engines == 0*/
@@ -1363,50 +1274,6 @@ err:
return ret;
}
static u32 fifo_error_isr(struct gk20a *g, u32 fifo_intr)
{
u32 handled = 0;
nvgpu_log_fn(g, "fifo_intr=0x%08x", fifo_intr);
if ((fifo_intr & fifo_intr_0_pio_error_pending_f()) != 0U) {
/* pio mode is unused. this shouldn't happen, ever. */
/* should we clear it or just leave it pending? */
nvgpu_err(g, "fifo pio error!");
BUG();
}
if ((fifo_intr & fifo_intr_0_bind_error_pending_f()) != 0U) {
u32 bind_error = gk20a_readl(g, fifo_intr_bind_error_r());
nvgpu_report_host_error(g, 0,
GPU_HOST_PFIFO_BIND_ERROR, bind_error);
nvgpu_err(g, "fifo bind error: 0x%08x", bind_error);
handled |= fifo_intr_0_bind_error_pending_f();
}
if ((fifo_intr & fifo_intr_0_sched_error_pending_f()) != 0U) {
(void) g->ops.fifo.handle_sched_error(g);
handled |= fifo_intr_0_sched_error_pending_f();
}
if ((fifo_intr & fifo_intr_0_chsw_error_pending_f()) != 0U) {
gk20a_fifo_handle_chsw_fault(g);
handled |= fifo_intr_0_chsw_error_pending_f();
}
if ((fifo_intr & fifo_intr_0_mmu_fault_pending_f()) != 0U) {
(void) gk20a_fifo_handle_mmu_fault(g, 0, ~(u32)0, false);
handled |= fifo_intr_0_mmu_fault_pending_f();
}
if ((fifo_intr & fifo_intr_0_dropped_mmu_fault_pending_f()) != 0U) {
gk20a_fifo_handle_dropped_mmu_fault(g);
handled |= fifo_intr_0_dropped_mmu_fault_pending_f();
}
return handled;
}
static void gk20a_fifo_pbdma_fault_rc(struct gk20a *g,
struct fifo_gk20a *f, u32 pbdma_id,
u32 error_notifier)
@@ -1490,7 +1357,7 @@ u32 gk20a_fifo_handle_pbdma_intr(struct gk20a *g, struct fifo_gk20a *f,
return handled;
}
static u32 fifo_pbdma_isr(struct gk20a *g, u32 fifo_intr)
u32 fifo_pbdma_isr(struct gk20a *g, u32 fifo_intr)
{
struct fifo_gk20a *f = &g->fifo;
u32 clear_intr = 0, i;
@@ -1507,61 +1374,6 @@ static u32 fifo_pbdma_isr(struct gk20a *g, u32 fifo_intr)
return fifo_intr_0_pbdma_intr_pending_f();
}
void gk20a_fifo_isr(struct gk20a *g)
{
u32 error_intr_mask;
u32 clear_intr = 0;
u32 fifo_intr = gk20a_readl(g, fifo_intr_0_r());
error_intr_mask = g->ops.fifo.intr_0_error_mask(g);
if (g->fifo.sw_ready) {
/* note we're not actually in an "isr", but rather
* in a threaded interrupt context... */
nvgpu_mutex_acquire(&g->fifo.intr.isr.mutex);
nvgpu_log(g, gpu_dbg_intr, "fifo isr %08x\n", fifo_intr);
/* handle runlist update */
if ((fifo_intr & fifo_intr_0_runlist_event_pending_f()) != 0U) {
gk20a_fifo_handle_runlist_event(g);
clear_intr |= fifo_intr_0_runlist_event_pending_f();
}
if ((fifo_intr & fifo_intr_0_pbdma_intr_pending_f()) != 0U) {
clear_intr |= fifo_pbdma_isr(g, fifo_intr);
}
if (g->ops.fifo.handle_ctxsw_timeout != NULL) {
g->ops.fifo.handle_ctxsw_timeout(g, fifo_intr);
}
if (unlikely((fifo_intr & error_intr_mask) != 0U)) {
clear_intr |= fifo_error_isr(g, fifo_intr);
}
nvgpu_mutex_release(&g->fifo.intr.isr.mutex);
}
gk20a_writel(g, fifo_intr_0_r(), clear_intr);
return;
}
u32 gk20a_fifo_nonstall_isr(struct gk20a *g)
{
u32 fifo_intr = gk20a_readl(g, fifo_intr_0_r());
u32 clear_intr = 0;
nvgpu_log(g, gpu_dbg_intr, "fifo nonstall isr %08x\n", fifo_intr);
if ((fifo_intr & fifo_intr_0_channel_intr_pending_f()) != 0U) {
clear_intr = fifo_intr_0_channel_intr_pending_f();
}
gk20a_writel(g, fifo_intr_0_r(), clear_intr);
return GK20A_NONSTALL_OPS_WAKEUP_SEMAPHORE;
}
void gk20a_fifo_issue_preempt(struct gk20a *g, u32 id, bool is_tsg)
{
if (is_tsg) {
@@ -1809,8 +1621,8 @@ int gk20a_fifo_suspend(struct gk20a *g)
}
/* disable fifo intr */
gk20a_writel(g, fifo_intr_en_0_r(), 0);
gk20a_writel(g, fifo_intr_en_1_r(), 0);
g->ops.fifo.intr_0_enable(g, false);
g->ops.fifo.intr_1_enable(g, false);
nvgpu_log_fn(g, "done");
return 0;

View File

@@ -324,8 +324,6 @@ int gk20a_fifo_tsg_unbind_channel_verify_status(struct channel_gk20a *ch);
struct channel_gk20a *gk20a_refch_from_inst_ptr(struct gk20a *g, u64 inst_ptr);
u32 gk20a_fifo_intr_0_error_mask(struct gk20a *g);
int gk20a_fifo_is_preempt_pending(struct gk20a *g, u32 id,
unsigned int id_type);
int __locked_fifo_preempt(struct gk20a *g, u32 id, bool is_tsg);
@@ -339,7 +337,6 @@ int gk20a_fifo_alloc_inst(struct gk20a *g, struct channel_gk20a *ch);
void gk20a_fifo_free_inst(struct gk20a *g, struct channel_gk20a *ch);
u32 gk20a_fifo_runlist_busy_engines(struct gk20a *g, u32 runlist_id);
void gk20a_fifo_handle_runlist_event(struct gk20a *g);
bool gk20a_fifo_should_defer_engine_reset(struct gk20a *g, u32 engine_id,
u32 engine_subid, bool fake_fault);
@@ -365,4 +362,9 @@ void gk20a_fifo_get_mmu_fault_gpc_desc(struct mmu_fault_info *mmfault);
bool gk20a_fifo_find_pbdma_for_runlist(struct fifo_gk20a *f, u32 runlist_id,
u32 *pbdma_id);
int gk20a_fifo_init_pbdma_info(struct fifo_gk20a *f);
u32 fifo_pbdma_isr(struct gk20a *g, u32 fifo_intr);
bool gk20a_fifo_handle_mmu_fault(struct gk20a *g,
u32 mmu_fault_engines, u32 hw_id, bool id_is_tsg);
void gk20a_fifo_handle_dropped_mmu_fault(struct gk20a *g);
#endif /* FIFO_GK20A_H */

View File

@@ -53,6 +53,7 @@
#include "hal/fifo/engine_status_gm20b.h"
#include "hal/fifo/pbdma_status_gm20b.h"
#include "hal/fifo/userd_gk20a.h"
#include "hal/fifo/fifo_intr_gk20a.h"
#include "hal/gr/zbc/zbc_gm20b.h"
#include "hal/gr/zcull/zcull_gm20b.h"
#include "hal/gr/init/gr_init_gm20b.h"
@@ -561,7 +562,6 @@ static const struct gpu_ops gm20b_ops = {
.get_engines_mask_on_id = gk20a_fifo_engines_on_id,
.dump_channel_status_ramfc = gk20a_dump_channel_status_ramfc,
.capture_channel_ram_dump = gk20a_capture_channel_ram_dump,
.intr_0_error_mask = gk20a_fifo_intr_0_error_mask,
.is_preempt_pending = gk20a_fifo_is_preempt_pending,
.reset_enable_hw = gk20a_init_fifo_reset_enable_hw,
.teardown_ch_tsg = gk20a_fifo_teardown_ch_tsg,
@@ -583,6 +583,10 @@ static const struct gpu_ops gm20b_ops = {
.runlist_busy_engines = gk20a_fifo_runlist_busy_engines,
.find_pbdma_for_runlist = gk20a_fifo_find_pbdma_for_runlist,
.init_ce_engine_info = gm20b_fifo_init_ce_engine_info,
.intr_0_enable = gk20a_fifo_intr_0_enable,
.intr_1_enable = gk20a_fifo_intr_1_enable,
.intr_0_isr = gk20a_fifo_intr_0_isr,
.intr_1_isr = gk20a_fifo_intr_1_isr,
},
.engine = {
.is_fault_engine_subid_gpc = gm20b_is_fault_engine_subid_gpc,

View File

@@ -62,6 +62,7 @@
#include "hal/fifo/engine_status_gm20b.h"
#include "hal/fifo/pbdma_status_gm20b.h"
#include "hal/fifo/userd_gk20a.h"
#include "hal/fifo/fifo_intr_gk20a.h"
#include "hal/gr/fecs_trace/fecs_trace_gm20b.h"
#include "hal/gr/fecs_trace/fecs_trace_gp10b.h"
#include "hal/gr/zbc/zbc_gp10b.h"
@@ -639,7 +640,6 @@ static const struct gpu_ops gp10b_ops = {
.get_engines_mask_on_id = gk20a_fifo_engines_on_id,
.dump_channel_status_ramfc = gk20a_dump_channel_status_ramfc,
.capture_channel_ram_dump = gk20a_capture_channel_ram_dump,
.intr_0_error_mask = gk20a_fifo_intr_0_error_mask,
.is_preempt_pending = gk20a_fifo_is_preempt_pending,
.reset_enable_hw = gk20a_init_fifo_reset_enable_hw,
.teardown_ch_tsg = gk20a_fifo_teardown_ch_tsg,
@@ -662,6 +662,10 @@ static const struct gpu_ops gp10b_ops = {
.runlist_busy_engines = gk20a_fifo_runlist_busy_engines,
.find_pbdma_for_runlist = gk20a_fifo_find_pbdma_for_runlist,
.init_ce_engine_info = gp10b_fifo_init_ce_engine_info,
.intr_0_enable = gk20a_fifo_intr_0_enable,
.intr_1_enable = gk20a_fifo_intr_1_enable,
.intr_0_isr = gk20a_fifo_intr_0_isr,
.intr_1_isr = gk20a_fifo_intr_1_isr,
},
.engine = {
.is_fault_engine_subid_gpc = gm20b_is_fault_engine_subid_gpc,

View File

@@ -53,6 +53,8 @@
#include "hal/fifo/pbdma_status_gm20b.h"
#include "hal/fifo/userd_gk20a.h"
#include "hal/fifo/userd_gv11b.h"
#include "hal/fifo/fifo_intr_gk20a.h"
#include "hal/fifo/fifo_intr_gv11b.h"
#include "hal/gr/fecs_trace/fecs_trace_gm20b.h"
#include "hal/gr/zbc/zbc_gp10b.h"
#include "hal/gr/zbc/zbc_gv11b.h"
@@ -813,7 +815,6 @@ static const struct gpu_ops gv100_ops = {
.get_engines_mask_on_id = gk20a_fifo_engines_on_id,
.dump_channel_status_ramfc = gv11b_dump_channel_status_ramfc,
.capture_channel_ram_dump = gv11b_capture_channel_ram_dump,
.intr_0_error_mask = gv11b_fifo_intr_0_error_mask,
.is_preempt_pending = gv11b_fifo_is_preempt_pending,
.reset_enable_hw = gk20a_init_fifo_reset_enable_hw,
.teardown_ch_tsg = gv11b_fifo_teardown_ch_tsg,
@@ -843,6 +844,10 @@ static const struct gpu_ops gv100_ops = {
.runlist_busy_engines = gk20a_fifo_runlist_busy_engines,
.find_pbdma_for_runlist = gk20a_fifo_find_pbdma_for_runlist,
.init_ce_engine_info = gp10b_fifo_init_ce_engine_info,
.intr_0_enable = gk20a_fifo_intr_0_enable,
.intr_1_enable = gk20a_fifo_intr_1_enable,
.intr_0_isr = gv11b_fifo_intr_0_isr,
.intr_1_isr = gk20a_fifo_intr_1_isr,
},
.engine = {
.is_fault_engine_subid_gpc = gv11b_is_fault_engine_subid_gpc,

View File

@@ -250,18 +250,6 @@ void gv11b_dump_channel_status_ramfc(struct gk20a *g,
gk20a_debug_output(o, "\n");
}
u32 gv11b_fifo_intr_0_error_mask(struct gk20a *g)
{
u32 intr_0_error_mask =
fifo_intr_0_bind_error_pending_f() |
fifo_intr_0_sched_error_pending_f() |
fifo_intr_0_chsw_error_pending_f() |
fifo_intr_0_memop_timeout_pending_f() |
fifo_intr_0_lb_error_pending_f();
return intr_0_error_mask;
}
u32 gv11b_fifo_get_preempt_timeout(struct gk20a *g)
{
/* using gr_idle_timeout for polling pdma/eng/runlist
@@ -1094,22 +1082,8 @@ void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask,
nvgpu_mutex_release(&g->fifo.engines_reset_mutex);
}
static u32 gv11b_fifo_intr_0_en_mask(struct gk20a *g)
{
u32 intr_0_en_mask;
intr_0_en_mask = g->ops.fifo.intr_0_error_mask(g);
intr_0_en_mask |= fifo_intr_0_pbdma_intr_pending_f() |
fifo_intr_0_ctxsw_timeout_pending_f();
return intr_0_en_mask;
}
int gv11b_init_fifo_reset_enable_hw(struct gk20a *g)
{
u32 intr_stall;
u32 mask;
u32 timeout;
unsigned int i;
u32 host_num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA);
@@ -1150,300 +1124,14 @@ int gv11b_init_fifo_reset_enable_hw(struct gk20a *g)
}
}
/* clear and enable pbdma interrupt */
for (i = 0; i < host_num_pbdma; i++) {
gk20a_writel(g, pbdma_intr_0_r(i), 0xFFFFFFFFU);
gk20a_writel(g, pbdma_intr_1_r(i), 0xFFFFFFFFU);
intr_stall = gk20a_readl(g, pbdma_intr_stall_r(i));
nvgpu_log_info(g, "pbdma id:%u, intr_en_0 0x%08x", i, intr_stall);
gk20a_writel(g, pbdma_intr_en_0_r(i), intr_stall);
intr_stall = gk20a_readl(g, pbdma_intr_stall_1_r(i));
/*
* For bug 2082123
* Mask the unused HCE_RE_ILLEGAL_OP bit from the interrupt.
*/
intr_stall &= ~pbdma_intr_stall_1_hce_illegal_op_enabled_f();
nvgpu_log_info(g, "pbdma id:%u, intr_en_1 0x%08x", i, intr_stall);
gk20a_writel(g, pbdma_intr_en_1_r(i), intr_stall);
}
/* clear ctxsw timeout interrupts */
gk20a_writel(g, fifo_intr_ctxsw_timeout_r(), ~U32(0U));
if (nvgpu_platform_is_silicon(g)) {
/* enable ctxsw timeout */
timeout = g->fifo_eng_timeout_us;
timeout = scale_ptimer(timeout,
ptimer_scalingfactor10x(g->ptimer_src_freq));
timeout |= fifo_eng_ctxsw_timeout_detection_enabled_f();
gk20a_writel(g, fifo_eng_ctxsw_timeout_r(), timeout);
} else {
timeout = gk20a_readl(g, fifo_eng_ctxsw_timeout_r());
nvgpu_log_info(g, "fifo_eng_ctxsw_timeout reg val = 0x%08x",
timeout);
timeout = set_field(timeout, fifo_eng_ctxsw_timeout_period_m(),
fifo_eng_ctxsw_timeout_period_max_f());
timeout = set_field(timeout,
fifo_eng_ctxsw_timeout_detection_m(),
fifo_eng_ctxsw_timeout_detection_disabled_f());
nvgpu_log_info(g, "new fifo_eng_ctxsw_timeout reg val = 0x%08x",
timeout);
gk20a_writel(g, fifo_eng_ctxsw_timeout_r(), timeout);
}
/* clear runlist interrupts */
gk20a_writel(g, fifo_intr_runlist_r(), ~U32(0U));
/* clear and enable pfifo interrupt */
gk20a_writel(g, fifo_intr_0_r(), 0xFFFFFFFFU);
mask = gv11b_fifo_intr_0_en_mask(g);
nvgpu_log_info(g, "fifo_intr_en_0 0x%08x", mask);
gk20a_writel(g, fifo_intr_en_0_r(), mask);
nvgpu_log_info(g, "fifo_intr_en_1 = 0x80000000");
gk20a_writel(g, fifo_intr_en_1_r(), 0x80000000U);
g->ops.fifo.intr_0_enable(g, true);
g->ops.fifo.intr_1_enable(g, true);
nvgpu_log_fn(g, "done");
return 0;
}
static const char *const gv11b_sched_error_str[] = {
"xxx-0",
"xxx-1",
"xxx-2",
"xxx-3",
"xxx-4",
"engine_reset",
"rl_ack_timeout",
"rl_ack_extra",
"rl_rdat_timeout",
"rl_rdat_extra",
"eng_ctxsw_timeout",
"xxx-b",
"rl_req_timeout",
"new_runlist",
"code_config_while_busy",
"xxx-f",
"xxx-0x10",
"xxx-0x11",
"xxx-0x12",
"xxx-0x13",
"xxx-0x14",
"xxx-0x15",
"xxx-0x16",
"xxx-0x17",
"xxx-0x18",
"xxx-0x19",
"xxx-0x1a",
"xxx-0x1b",
"xxx-0x1c",
"xxx-0x1d",
"xxx-0x1e",
"xxx-0x1f",
"bad_tsg",
};
bool gv11b_fifo_handle_sched_error(struct gk20a *g)
{
u32 sched_error;
sched_error = gk20a_readl(g, fifo_intr_sched_error_r());
if (sched_error < ARRAY_SIZE(gv11b_sched_error_str)) {
nvgpu_err(g, "fifo sched error :%s",
gv11b_sched_error_str[sched_error]);
} else {
nvgpu_err(g, "fifo sched error code not supported");
}
nvgpu_report_host_error(g, 0,
GPU_HOST_PFIFO_SCHED_ERROR, sched_error);
if (sched_error == SCHED_ERROR_CODE_BAD_TSG ) {
/* id is unknown, preempt all runlists and do recovery */
gk20a_fifo_recover(g, 0, 0, false, false, false,
RC_TYPE_SCHED_ERR);
}
return false;
}
static const char * const invalid_str = "invalid";
static const char *const ctxsw_timeout_status_desc[] = {
"awaiting ack",
"eng was reset",
"ack received",
"dropped timeout"
};
static u32 gv11b_fifo_ctxsw_timeout_info(struct gk20a *g, u32 active_eng_id,
u32 *info_status)
{
u32 tsgid = FIFO_INVAL_TSG_ID;
u32 timeout_info;
u32 ctx_status;
timeout_info = gk20a_readl(g,
fifo_intr_ctxsw_timeout_info_r(active_eng_id));
/*
* ctxsw_state and tsgid are snapped at the point of the timeout and
* will not change while the corresponding INTR_CTXSW_TIMEOUT_ENGINE bit
* is PENDING.
*/
ctx_status = fifo_intr_ctxsw_timeout_info_ctxsw_state_v(timeout_info);
if (ctx_status ==
fifo_intr_ctxsw_timeout_info_ctxsw_state_load_v()) {
tsgid = fifo_intr_ctxsw_timeout_info_next_tsgid_v(timeout_info);
} else if (ctx_status ==
fifo_intr_ctxsw_timeout_info_ctxsw_state_switch_v() ||
ctx_status ==
fifo_intr_ctxsw_timeout_info_ctxsw_state_save_v()) {
tsgid = fifo_intr_ctxsw_timeout_info_prev_tsgid_v(timeout_info);
}
nvgpu_log_info(g, "ctxsw timeout info: tsgid = %d", tsgid);
/*
* STATUS indicates whether the context request ack was eventually
* received and whether a subsequent request timed out. This field is
* updated live while the corresponding INTR_CTXSW_TIMEOUT_ENGINE bit
* is PENDING. STATUS starts in AWAITING_ACK, and progresses to
* ACK_RECEIVED and finally ends with DROPPED_TIMEOUT.
*
* AWAITING_ACK - context request ack still not returned from engine.
* ENG_WAS_RESET - The engine was reset via a PRI write to NV_PMC_ENABLE
* or NV_PMC_ELPG_ENABLE prior to receiving the ack. Host will not
* expect ctx ack to return, but if it is already in flight, STATUS will
* transition shortly to ACK_RECEIVED unless the interrupt is cleared
* first. Once the engine is reset, additional context switches can
* occur; if one times out, STATUS will transition to DROPPED_TIMEOUT
* if the interrupt isn't cleared first.
* ACK_RECEIVED - The ack for the timed-out context request was
* received between the point of the timeout and this register being
* read. Note this STATUS can be reported during the load stage of the
* same context switch that timed out if the timeout occurred during the
* save half of a context switch. Additional context requests may have
* completed or may be outstanding, but no further context timeout has
* occurred. This simplifies checking for spurious context switch
* timeouts.
* DROPPED_TIMEOUT - The originally timed-out context request acked,
* but a subsequent context request then timed out.
* Information about the subsequent timeout is not stored; in fact, that
* context request may also have already been acked by the time SW
* SW reads this register. If not, there is a chance SW can get the
* dropped information by clearing the corresponding
* INTR_CTXSW_TIMEOUT_ENGINE bit and waiting for the timeout to occur
* again. Note, however, that if the engine does time out again,
* it may not be from the original request that caused the
* DROPPED_TIMEOUT state, as that request may
* be acked in the interim.
*/
*info_status = fifo_intr_ctxsw_timeout_info_status_v(timeout_info);
if (*info_status ==
fifo_intr_ctxsw_timeout_info_status_ack_received_v()) {
nvgpu_log_info(g, "ctxsw timeout info : ack received");
/* no need to recover */
tsgid = FIFO_INVAL_TSG_ID;
} else if (*info_status ==
fifo_intr_ctxsw_timeout_info_status_dropped_timeout_v()) {
nvgpu_log_info(g, "ctxsw timeout info : dropped timeout");
/* no need to recover */
tsgid = FIFO_INVAL_TSG_ID;
}
return tsgid;
}
bool gv11b_fifo_handle_ctxsw_timeout(struct gk20a *g, u32 fifo_intr)
{
bool ret = false;
u32 tsgid = FIFO_INVAL_TSG_ID;
u32 engine_id, active_eng_id;
u32 timeout_val, ctxsw_timeout_engines;
u32 info_status;
const char *info_status_str;
if ((fifo_intr & fifo_intr_0_ctxsw_timeout_pending_f()) == 0U) {
return ret;
}
/* get ctxsw timedout engines */
ctxsw_timeout_engines = gk20a_readl(g, fifo_intr_ctxsw_timeout_r());
if (ctxsw_timeout_engines == 0U) {
nvgpu_err(g, "no eng ctxsw timeout pending");
return ret;
}
timeout_val = gk20a_readl(g, fifo_eng_ctxsw_timeout_r());
timeout_val = fifo_eng_ctxsw_timeout_period_v(timeout_val);
nvgpu_log_info(g, "eng ctxsw timeout period = 0x%x", timeout_val);
for (engine_id = 0; engine_id < g->fifo.num_engines; engine_id++) {
active_eng_id = g->fifo.active_engines_list[engine_id];
if ((ctxsw_timeout_engines &
fifo_intr_ctxsw_timeout_engine_pending_f(
active_eng_id)) != 0U) {
struct fifo_gk20a *f = &g->fifo;
u32 ms = 0;
bool verbose = false;
tsgid = gv11b_fifo_ctxsw_timeout_info(g, active_eng_id,
&info_status);
if (tsgid == FIFO_INVAL_TSG_ID) {
continue;
}
nvgpu_report_host_error(g, 0,
GPU_HOST_PFIFO_CTXSW_TIMEOUT_ERROR,
tsgid);
if (nvgpu_tsg_check_ctxsw_timeout(
&f->tsg[tsgid], &verbose, &ms)) {
ret = true;
info_status_str = invalid_str;
if (info_status <
ARRAY_SIZE(ctxsw_timeout_status_desc)) {
info_status_str =
ctxsw_timeout_status_desc[info_status];
}
nvgpu_err(g, "ctxsw timeout error: "
"active engine id =%u, %s=%d, info: %s ms=%u",
active_eng_id, "tsg", tsgid, info_status_str,
ms);
/* Cancel all channels' timeout */
gk20a_channel_timeout_restart_all_channels(g);
gk20a_fifo_recover(g, BIT32(active_eng_id),
tsgid, true, true, verbose,
RC_TYPE_CTXSW_TIMEOUT);
} else {
nvgpu_log_info(g,
"fifo is waiting for ctx switch: "
"for %d ms, %s=%d", ms, "tsg", tsgid);
}
}
}
/* clear interrupt */
gk20a_writel(g, fifo_intr_ctxsw_timeout_r(), ctxsw_timeout_engines);
return ret;
}
void gv11b_fifo_init_ramfc_eng_method_buffer(struct gk20a *g,
struct channel_gk20a *ch, struct nvgpu_mem *mem)
{

View File

@@ -30,19 +30,6 @@
#define FIFO_INVAL_PBDMA_ID (~U32(0U))
#define FIFO_INVAL_VEID (~U32(0U))
/* engine context-switch request occurred while the engine was in reset */
#define SCHED_ERROR_CODE_ENGINE_RESET 0x00000005U
/*
* ERROR_CODE_BAD_TSG indicates that Host encountered a badly formed TSG header
* or a badly formed channel type runlist entry in the runlist. This is typically
* caused by encountering a new TSG entry in the middle of a TSG definition.
* A channel type entry having wrong runqueue selector can also cause this.
* Additionally this error code can indicate when a channel is encountered on
* the runlist which is outside of a TSG.
*/
#define SCHED_ERROR_CODE_BAD_TSG 0x00000020U
/* can be removed after runque support is added */
#define GR_RUNQUE 0U /* pbdma 0 */
@@ -69,7 +56,6 @@ void gv11b_dump_channel_status_ramfc(struct gk20a *g,
void gv11b_capture_channel_ram_dump(struct gk20a *g,
struct channel_gk20a *ch,
struct nvgpu_channel_dump_info *info);
u32 gv11b_fifo_intr_0_error_mask(struct gk20a *g);
int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 id,
unsigned int id_type);
int gv11b_fifo_preempt_channel(struct gk20a *g, struct channel_gk20a *ch);
@@ -82,8 +68,6 @@ void gv11b_fifo_teardown_mask_intr(struct gk20a *g);
void gv11b_fifo_teardown_unmask_intr(struct gk20a *g);
void gv11b_fifo_init_pbdma_intr_descs(struct fifo_gk20a *f);
int gv11b_init_fifo_reset_enable_hw(struct gk20a *g);
bool gv11b_fifo_handle_sched_error(struct gk20a *g);
bool gv11b_fifo_handle_ctxsw_timeout(struct gk20a *g, u32 fifo_intr);
void gv11b_fifo_init_eng_method_buffers(struct gk20a *g,
struct tsg_gk20a *tsg);
void gv11b_fifo_deinit_eng_method_buffers(struct gk20a *g,

View File

@@ -53,6 +53,8 @@
#include "hal/fifo/engines_gv11b.h"
#include "hal/fifo/userd_gk20a.h"
#include "hal/fifo/userd_gv11b.h"
#include "hal/fifo/fifo_intr_gk20a.h"
#include "hal/fifo/fifo_intr_gv11b.h"
#include "hal/gr/fecs_trace/fecs_trace_gm20b.h"
#include "hal/gr/fecs_trace/fecs_trace_gv11b.h"
#include "hal/gr/zbc/zbc_gp10b.h"
@@ -768,7 +770,6 @@ static const struct gpu_ops gv11b_ops = {
.get_engines_mask_on_id = gk20a_fifo_engines_on_id,
.dump_channel_status_ramfc = gv11b_dump_channel_status_ramfc,
.capture_channel_ram_dump = gv11b_capture_channel_ram_dump,
.intr_0_error_mask = gv11b_fifo_intr_0_error_mask,
.is_preempt_pending = gv11b_fifo_is_preempt_pending,
.reset_enable_hw = gv11b_init_fifo_reset_enable_hw,
.teardown_ch_tsg = gv11b_fifo_teardown_ch_tsg,
@@ -799,6 +800,10 @@ static const struct gpu_ops gv11b_ops = {
.runlist_busy_engines = gk20a_fifo_runlist_busy_engines,
.find_pbdma_for_runlist = gk20a_fifo_find_pbdma_for_runlist,
.init_ce_engine_info = gp10b_fifo_init_ce_engine_info,
.intr_0_enable = gv11b_fifo_intr_0_enable,
.intr_1_enable = gk20a_fifo_intr_1_enable,
.intr_0_isr = gv11b_fifo_intr_0_isr,
.intr_1_isr = gk20a_fifo_intr_1_isr,
},
.engine = {
.is_fault_engine_subid_gpc = gv11b_is_fault_engine_subid_gpc,

View File

@@ -0,0 +1,260 @@
/*
* Copyright (c) 2011-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvgpu/gk20a.h>
#include <nvgpu/log.h>
#include <nvgpu/io.h>
#include <nvgpu/soc.h>
#include <nvgpu/ptimer.h>
#include <nvgpu/channel.h>
#include <nvgpu/tsg.h>
#include <nvgpu/nvgpu_err.h>
#include <nvgpu/error_notifier.h>
#include <nvgpu/pbdma_status.h>
#include <hal/fifo/fifo_intr_gk20a.h>
#include <nvgpu/hw/gk20a/hw_fifo_gk20a.h>
#include <nvgpu/hw/gk20a/hw_pbdma_gk20a.h> /* TODO: remove */
static u32 gk20a_fifo_intr_0_error_mask(struct gk20a *g)
{
u32 intr_0_error_mask =
fifo_intr_0_bind_error_pending_f() |
fifo_intr_0_sched_error_pending_f() |
fifo_intr_0_chsw_error_pending_f() |
fifo_intr_0_fb_flush_timeout_pending_f() |
fifo_intr_0_dropped_mmu_fault_pending_f() |
fifo_intr_0_mmu_fault_pending_f() |
fifo_intr_0_lb_error_pending_f() |
fifo_intr_0_pio_error_pending_f();
return intr_0_error_mask;
}
static u32 gk20a_fifo_intr_0_en_mask(struct gk20a *g)
{
u32 intr_0_en_mask;
intr_0_en_mask = gk20a_fifo_intr_0_error_mask(g);
intr_0_en_mask |= fifo_intr_0_runlist_event_pending_f() |
fifo_intr_0_pbdma_intr_pending_f();
return intr_0_en_mask;
}
void gk20a_fifo_intr_0_enable(struct gk20a *g, bool enable)
{
unsigned int i;
u32 intr_stall, timeout, mask;
u32 host_num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA);
if (!enable) {
nvgpu_writel(g, fifo_intr_en_0_r(), 0U);
return;
}
if (g->ops.fifo.apply_ctxsw_timeout_intr != NULL) {
g->ops.fifo.apply_ctxsw_timeout_intr(g);
} else {
timeout = g->fifo_eng_timeout_us;
timeout = scale_ptimer(timeout,
ptimer_scalingfactor10x(g->ptimer_src_freq));
timeout |= fifo_eng_timeout_detection_enabled_f();
nvgpu_writel(g, fifo_eng_timeout_r(), timeout);
}
/* clear and enable pbdma interrupt */
for (i = 0; i < host_num_pbdma; i++) {
nvgpu_writel(g, pbdma_intr_0_r(i), U32_MAX);
nvgpu_writel(g, pbdma_intr_1_r(i), U32_MAX);
intr_stall = nvgpu_readl(g, pbdma_intr_stall_r(i));
intr_stall &= ~pbdma_intr_stall_lbreq_enabled_f();
nvgpu_writel(g, pbdma_intr_stall_r(i), intr_stall);
nvgpu_log_info(g, "pbdma id:%u, intr_en_0 0x%08x", i,
intr_stall);
nvgpu_writel(g, pbdma_intr_en_0_r(i), intr_stall);
intr_stall = nvgpu_readl(g, pbdma_intr_stall_1_r(i));
/*
* For bug 2082123
* Mask the unused HCE_RE_ILLEGAL_OP bit from the interrupt.
*/
intr_stall &= ~pbdma_intr_stall_1_hce_illegal_op_enabled_f();
nvgpu_log_info(g, "pbdma id:%u, intr_en_1 0x%08x", i,
intr_stall);
nvgpu_writel(g, pbdma_intr_en_1_r(i), intr_stall);
}
/* reset runlist interrupts */
nvgpu_writel(g, fifo_intr_runlist_r(), ~U32(0U));
/* clear and enable pfifo interrupt */
nvgpu_writel(g, fifo_intr_0_r(), U32_MAX);
mask = gk20a_fifo_intr_0_en_mask(g);
nvgpu_log_info(g, "fifo_intr_en_0 0x%08x", mask);
nvgpu_writel(g, fifo_intr_en_0_r(), mask);
}
void gk20a_fifo_intr_1_enable(struct gk20a *g, bool enable)
{
if (enable) {
nvgpu_writel(g, fifo_intr_en_1_r(),
fifo_intr_0_channel_intr_pending_f());
nvgpu_log_info(g, "fifo_intr_en_1 = 0x%08x",
nvgpu_readl(g, fifo_intr_en_1_r()));
} else {
nvgpu_writel(g, fifo_intr_en_1_r(), 0U);
}
}
u32 gk20a_fifo_intr_1_isr(struct gk20a *g)
{
u32 fifo_intr = nvgpu_readl(g, fifo_intr_0_r());
u32 clear_intr = 0U;
nvgpu_log(g, gpu_dbg_intr, "fifo nonstall isr %08x\n", fifo_intr);
if ((fifo_intr & fifo_intr_0_channel_intr_pending_f()) != 0U) {
clear_intr = fifo_intr_0_channel_intr_pending_f();
}
nvgpu_writel(g, fifo_intr_0_r(), clear_intr);
return GK20A_NONSTALL_OPS_WAKEUP_SEMAPHORE;
}
void gk20a_fifo_intr_handle_chsw_error(struct gk20a *g)
{
u32 intr;
intr = nvgpu_readl(g, fifo_intr_chsw_error_r());
nvgpu_report_host_error(g, 0,
GPU_HOST_PFIFO_CHSW_ERROR, intr);
nvgpu_err(g, "chsw: %08x", intr);
g->ops.gr.dump_gr_falcon_stats(g);
nvgpu_writel(g, fifo_intr_chsw_error_r(), intr);
}
static u32 gk20a_fifo_intr_handle_errors(struct gk20a *g, u32 fifo_intr)
{
u32 handled = 0U;
nvgpu_log_fn(g, "fifo_intr=0x%08x", fifo_intr);
if ((fifo_intr & fifo_intr_0_pio_error_pending_f()) != 0U) {
/* pio mode is unused. this shouldn't happen, ever. */
/* should we clear it or just leave it pending? */
nvgpu_err(g, "fifo pio error!");
BUG();
}
if ((fifo_intr & fifo_intr_0_bind_error_pending_f()) != 0U) {
u32 bind_error = nvgpu_readl(g, fifo_intr_bind_error_r());
nvgpu_report_host_error(g, 0,
GPU_HOST_PFIFO_BIND_ERROR, bind_error);
nvgpu_err(g, "fifo bind error: 0x%08x", bind_error);
handled |= fifo_intr_0_bind_error_pending_f();
}
if ((fifo_intr & fifo_intr_0_chsw_error_pending_f()) != 0U) {
gk20a_fifo_intr_handle_chsw_error(g);
handled |= fifo_intr_0_chsw_error_pending_f();
}
if ((fifo_intr & fifo_intr_0_fb_flush_timeout_pending_f()) != 0U) {
nvgpu_err(g, "fifo fb flush timeout error");
handled |= fifo_intr_0_fb_flush_timeout_pending_f();
}
if ((fifo_intr & fifo_intr_0_lb_error_pending_f()) != 0U) {
nvgpu_err(g, "fifo lb error");
handled |= fifo_intr_0_lb_error_pending_f();
}
return handled;
}
void gk20a_fifo_intr_handle_runlist_event(struct gk20a *g)
{
u32 runlist_event = nvgpu_readl(g, fifo_intr_runlist_r());
nvgpu_log(g, gpu_dbg_intr, "runlist event %08x",
runlist_event);
nvgpu_writel(g, fifo_intr_runlist_r(), runlist_event);
}
void gk20a_fifo_intr_0_isr(struct gk20a *g)
{
u32 clear_intr = 0U;
u32 fifo_intr = nvgpu_readl(g, fifo_intr_0_r());
/* TODO: sw_ready is needed only for recovery part */
if (!g->fifo.sw_ready) {
nvgpu_err(g, "unhandled fifo intr: 0x%08x", fifo_intr);
nvgpu_writel(g, fifo_intr_0_r(), fifo_intr);
return;
}
/* note we're not actually in an "isr", but rather
* in a threaded interrupt context... */
nvgpu_mutex_acquire(&g->fifo.intr.isr.mutex);
nvgpu_log(g, gpu_dbg_intr, "fifo isr %08x", fifo_intr);
if (unlikely((fifo_intr & gk20a_fifo_intr_0_error_mask(g)) !=
0U)) {
clear_intr |= gk20a_fifo_intr_handle_errors(g,
fifo_intr);
}
if ((fifo_intr & fifo_intr_0_runlist_event_pending_f()) != 0U) {
gk20a_fifo_intr_handle_runlist_event(g);
clear_intr |= fifo_intr_0_runlist_event_pending_f();
}
if ((fifo_intr & fifo_intr_0_pbdma_intr_pending_f()) != 0U) {
clear_intr |= fifo_pbdma_isr(g, fifo_intr);
}
if ((fifo_intr & fifo_intr_0_mmu_fault_pending_f()) != 0U) {
(void) gk20a_fifo_handle_mmu_fault(g, 0, INVAL_ID, false);
clear_intr |= fifo_intr_0_mmu_fault_pending_f();
}
if ((fifo_intr & fifo_intr_0_sched_error_pending_f()) != 0U) {
(void) g->ops.fifo.handle_sched_error(g);
clear_intr |= fifo_intr_0_sched_error_pending_f();
}
if ((fifo_intr & fifo_intr_0_dropped_mmu_fault_pending_f()) != 0U) {
gk20a_fifo_handle_dropped_mmu_fault(g);
clear_intr |= fifo_intr_0_dropped_mmu_fault_pending_f();
}
nvgpu_mutex_release(&g->fifo.intr.isr.mutex);
nvgpu_writel(g, fifo_intr_0_r(), clear_intr);
}

View File

@@ -0,0 +1,38 @@
/*
* Copyright (c) 2011-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_FIFO_INTR_GK20A_H
#define NVGPU_FIFO_INTR_GK20A_H
#include <nvgpu/types.h>
struct gk20a;
void gk20a_fifo_intr_0_enable(struct gk20a *g, bool enable);
void gk20a_fifo_intr_1_enable(struct gk20a *g, bool enable);
void gk20a_fifo_intr_0_isr(struct gk20a *g);
u32 gk20a_fifo_intr_1_isr(struct gk20a *g);
void gk20a_fifo_intr_handle_chsw_error(struct gk20a *g);
void gk20a_fifo_intr_handle_runlist_event(struct gk20a *g);
#endif /* NVGPU_FIFO_INTR_GK20A_H */

View File

@@ -0,0 +1,443 @@
/*
* Copyright (c) 2015-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvgpu/gk20a.h>
#include <nvgpu/log.h>
#include <nvgpu/io.h>
#include <nvgpu/soc.h>
#include <nvgpu/ptimer.h>
#include <nvgpu/channel.h>
#include <nvgpu/tsg.h>
#include <nvgpu/nvgpu_err.h>
#include <nvgpu/error_notifier.h>
#include <hal/fifo/fifo_intr_gk20a.h>
#include <hal/fifo/fifo_intr_gv11b.h>
#include <nvgpu/hw/gv11b/hw_fifo_gv11b.h>
#include <nvgpu/hw/gv11b/hw_pbdma_gv11b.h> /* TODO: remove */
static u32 gv11b_fifo_intr_0_error_mask(struct gk20a *g)
{
u32 intr_0_error_mask =
fifo_intr_0_bind_error_pending_f() |
fifo_intr_0_sched_error_pending_f() |
fifo_intr_0_chsw_error_pending_f() |
fifo_intr_0_memop_timeout_pending_f() |
fifo_intr_0_lb_error_pending_f();
return intr_0_error_mask;
}
static u32 gv11b_fifo_intr_0_en_mask(struct gk20a *g)
{
u32 intr_0_en_mask;
intr_0_en_mask = gv11b_fifo_intr_0_error_mask(g);
intr_0_en_mask |= fifo_intr_0_pbdma_intr_pending_f() |
fifo_intr_0_ctxsw_timeout_pending_f();
return intr_0_en_mask;
}
void gv11b_fifo_intr_0_enable(struct gk20a *g, bool enable)
{
unsigned int i;
u32 intr_stall, timeout, mask;
u32 host_num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA);
if (!enable) {
nvgpu_writel(g, fifo_intr_en_0_r(), 0);
return;
}
/* clear and enable pbdma interrupt */
for (i = 0; i < host_num_pbdma; i++) {
nvgpu_writel(g, pbdma_intr_0_r(i), U32_MAX);
nvgpu_writel(g, pbdma_intr_1_r(i), U32_MAX);
intr_stall = nvgpu_readl(g, pbdma_intr_stall_r(i));
nvgpu_log_info(g, "pbdma id:%u, intr_en_0 0x%08x", i,
intr_stall);
nvgpu_writel(g, pbdma_intr_en_0_r(i), intr_stall);
intr_stall = nvgpu_readl(g, pbdma_intr_stall_1_r(i));
/*
* For bug 2082123
* Mask the unused HCE_RE_ILLEGAL_OP bit from the interrupt.
*/
intr_stall &= ~pbdma_intr_stall_1_hce_illegal_op_enabled_f();
nvgpu_log_info(g, "pbdma id:%u, intr_en_1 0x%08x", i,
intr_stall);
nvgpu_writel(g, pbdma_intr_en_1_r(i), intr_stall);
}
/* clear ctxsw timeout interrupts */
nvgpu_writel(g, fifo_intr_ctxsw_timeout_r(), ~U32(0U));
if (nvgpu_platform_is_silicon(g)) {
/* enable ctxsw timeout */
timeout = g->fifo_eng_timeout_us;
timeout = scale_ptimer(timeout,
ptimer_scalingfactor10x(g->ptimer_src_freq));
timeout |= fifo_eng_ctxsw_timeout_detection_enabled_f();
nvgpu_writel(g, fifo_eng_ctxsw_timeout_r(), timeout);
} else {
timeout = nvgpu_readl(g, fifo_eng_ctxsw_timeout_r());
nvgpu_log_info(g,
"fifo_eng_ctxsw_timeout reg val = 0x%08x",
timeout);
timeout = set_field(timeout,
fifo_eng_ctxsw_timeout_period_m(),
fifo_eng_ctxsw_timeout_period_max_f());
timeout = set_field(timeout,
fifo_eng_ctxsw_timeout_detection_m(),
fifo_eng_ctxsw_timeout_detection_disabled_f());
nvgpu_log_info(g,
"new fifo_eng_ctxsw_timeout reg val = 0x%08x",
timeout);
nvgpu_writel(g, fifo_eng_ctxsw_timeout_r(), timeout);
}
/* clear runlist interrupts */
nvgpu_writel(g, fifo_intr_runlist_r(), ~U32(0U));
/* clear and enable pfifo interrupt */
nvgpu_writel(g, fifo_intr_0_r(), U32_MAX);
mask = gv11b_fifo_intr_0_en_mask(g);
nvgpu_log_info(g, "fifo_intr_en_0 0x%08x", mask);
nvgpu_writel(g, fifo_intr_en_0_r(), mask);
}
static const char *const gv11b_sched_error_str[] = {
"xxx-0",
"xxx-1",
"xxx-2",
"xxx-3",
"xxx-4",
"engine_reset",
"rl_ack_timeout",
"rl_ack_extra",
"rl_rdat_timeout",
"rl_rdat_extra",
"eng_ctxsw_timeout",
"xxx-b",
"rl_req_timeout",
"new_runlist",
"code_config_while_busy",
"xxx-f",
"xxx-0x10",
"xxx-0x11",
"xxx-0x12",
"xxx-0x13",
"xxx-0x14",
"xxx-0x15",
"xxx-0x16",
"xxx-0x17",
"xxx-0x18",
"xxx-0x19",
"xxx-0x1a",
"xxx-0x1b",
"xxx-0x1c",
"xxx-0x1d",
"xxx-0x1e",
"xxx-0x1f",
"bad_tsg",
};
bool gv11b_fifo_handle_sched_error(struct gk20a *g)
{
u32 sched_error;
sched_error = nvgpu_readl(g, fifo_intr_sched_error_r());
if (sched_error < ARRAY_SIZE(gv11b_sched_error_str)) {
nvgpu_err(g, "fifo sched error :%s",
gv11b_sched_error_str[sched_error]);
} else {
nvgpu_err(g, "fifo sched error code not supported");
}
nvgpu_report_host_error(g, 0,
GPU_HOST_PFIFO_SCHED_ERROR, sched_error);
if (sched_error == SCHED_ERROR_CODE_BAD_TSG) {
/* id is unknown, preempt all runlists and do recovery */
gk20a_fifo_recover(g, 0, 0, false, false, false,
RC_TYPE_SCHED_ERR);
}
return false;
}
static const char * const invalid_str = "invalid";
static const char *const ctxsw_timeout_status_desc[] = {
"awaiting ack",
"eng was reset",
"ack received",
"dropped timeout"
};
static u32 gv11b_fifo_ctxsw_timeout_info(struct gk20a *g, u32 active_eng_id,
u32 *info_status)
{
u32 tsgid = FIFO_INVAL_TSG_ID;
u32 timeout_info;
u32 ctx_status;
timeout_info = nvgpu_readl(g,
fifo_intr_ctxsw_timeout_info_r(active_eng_id));
/*
* ctxsw_state and tsgid are snapped at the point of the timeout and
* will not change while the corresponding INTR_CTXSW_TIMEOUT_ENGINE bit
* is PENDING.
*/
ctx_status = fifo_intr_ctxsw_timeout_info_ctxsw_state_v(timeout_info);
if (ctx_status ==
fifo_intr_ctxsw_timeout_info_ctxsw_state_load_v()) {
tsgid = fifo_intr_ctxsw_timeout_info_next_tsgid_v(timeout_info);
} else if (ctx_status ==
fifo_intr_ctxsw_timeout_info_ctxsw_state_switch_v() ||
ctx_status ==
fifo_intr_ctxsw_timeout_info_ctxsw_state_save_v()) {
tsgid = fifo_intr_ctxsw_timeout_info_prev_tsgid_v(timeout_info);
}
nvgpu_log_info(g, "ctxsw timeout info: tsgid = %d", tsgid);
/*
* STATUS indicates whether the context request ack was eventually
* received and whether a subsequent request timed out. This field is
* updated live while the corresponding INTR_CTXSW_TIMEOUT_ENGINE bit
* is PENDING. STATUS starts in AWAITING_ACK, and progresses to
* ACK_RECEIVED and finally ends with DROPPED_TIMEOUT.
*
* AWAITING_ACK - context request ack still not returned from engine.
* ENG_WAS_RESET - The engine was reset via a PRI write to NV_PMC_ENABLE
* or NV_PMC_ELPG_ENABLE prior to receiving the ack. Host will not
* expect ctx ack to return, but if it is already in flight, STATUS will
* transition shortly to ACK_RECEIVED unless the interrupt is cleared
* first. Once the engine is reset, additional context switches can
* occur; if one times out, STATUS will transition to DROPPED_TIMEOUT
* if the interrupt isn't cleared first.
* ACK_RECEIVED - The ack for the timed-out context request was
* received between the point of the timeout and this register being
* read. Note this STATUS can be reported during the load stage of the
* same context switch that timed out if the timeout occurred during the
* save half of a context switch. Additional context requests may have
* completed or may be outstanding, but no further context timeout has
* occurred. This simplifies checking for spurious context switch
* timeouts.
* DROPPED_TIMEOUT - The originally timed-out context request acked,
* but a subsequent context request then timed out.
* Information about the subsequent timeout is not stored; in fact, that
* context request may also have already been acked by the time SW
* SW reads this register. If not, there is a chance SW can get the
* dropped information by clearing the corresponding
* INTR_CTXSW_TIMEOUT_ENGINE bit and waiting for the timeout to occur
* again. Note, however, that if the engine does time out again,
* it may not be from the original request that caused the
* DROPPED_TIMEOUT state, as that request may
* be acked in the interim.
*/
*info_status = fifo_intr_ctxsw_timeout_info_status_v(timeout_info);
if (*info_status ==
fifo_intr_ctxsw_timeout_info_status_ack_received_v()) {
nvgpu_log_info(g, "ctxsw timeout info : ack received");
/* no need to recover */
tsgid = FIFO_INVAL_TSG_ID;
} else if (*info_status ==
fifo_intr_ctxsw_timeout_info_status_dropped_timeout_v()) {
nvgpu_log_info(g, "ctxsw timeout info : dropped timeout");
/* no need to recover */
tsgid = FIFO_INVAL_TSG_ID;
}
return tsgid;
}
bool gv11b_fifo_handle_ctxsw_timeout(struct gk20a *g, u32 fifo_intr)
{
bool ret = false;
u32 tsgid = FIFO_INVAL_TSG_ID;
u32 engine_id, active_eng_id;
u32 timeout_val, ctxsw_timeout_engines;
u32 info_status;
const char *info_status_str;
if ((fifo_intr & fifo_intr_0_ctxsw_timeout_pending_f()) == 0U) {
return ret;
}
/* get ctxsw timedout engines */
ctxsw_timeout_engines = nvgpu_readl(g, fifo_intr_ctxsw_timeout_r());
if (ctxsw_timeout_engines == 0U) {
nvgpu_err(g, "no eng ctxsw timeout pending");
return ret;
}
timeout_val = nvgpu_readl(g, fifo_eng_ctxsw_timeout_r());
timeout_val = fifo_eng_ctxsw_timeout_period_v(timeout_val);
nvgpu_log_info(g, "eng ctxsw timeout period = 0x%x", timeout_val);
for (engine_id = 0; engine_id < g->fifo.num_engines; engine_id++) {
active_eng_id = g->fifo.active_engines_list[engine_id];
if ((ctxsw_timeout_engines &
fifo_intr_ctxsw_timeout_engine_pending_f(
active_eng_id)) != 0U) {
struct fifo_gk20a *f = &g->fifo;
u32 ms = 0;
bool verbose = false;
tsgid = gv11b_fifo_ctxsw_timeout_info(g, active_eng_id,
&info_status);
if (tsgid == FIFO_INVAL_TSG_ID) {
continue;
}
if (nvgpu_tsg_check_ctxsw_timeout(
&f->tsg[tsgid], &verbose, &ms)) {
ret = true;
info_status_str = invalid_str;
if (info_status <
ARRAY_SIZE(ctxsw_timeout_status_desc)) {
info_status_str =
ctxsw_timeout_status_desc[info_status];
}
nvgpu_err(g, "ctxsw timeout error: "
"active engine id =%u, %s=%d, info: %s ms=%u",
active_eng_id, "tsg", tsgid, info_status_str,
ms);
/* Cancel all channels' timeout */
gk20a_channel_timeout_restart_all_channels(g);
gk20a_fifo_recover(g, BIT32(active_eng_id),
tsgid, true, true, verbose,
RC_TYPE_CTXSW_TIMEOUT);
} else {
nvgpu_log_info(g,
"fifo is waiting for ctx switch: "
"for %d ms, %s=%d", ms, "tsg", tsgid);
}
}
}
/* clear interrupt */
nvgpu_writel(g, fifo_intr_ctxsw_timeout_r(), ctxsw_timeout_engines);
return ret;
}
static u32 gv11b_fifo_intr_handle_errors(struct gk20a *g, u32 fifo_intr)
{
u32 handled = 0U;
nvgpu_log_fn(g, "fifo_intr=0x%08x", fifo_intr);
if ((fifo_intr & fifo_intr_0_bind_error_pending_f()) != 0U) {
u32 bind_error = nvgpu_readl(g, fifo_intr_bind_error_r());
nvgpu_report_host_error(g, 0,
GPU_HOST_PFIFO_BIND_ERROR, bind_error);
nvgpu_err(g, "fifo bind error: 0x%08x", bind_error);
handled |= fifo_intr_0_bind_error_pending_f();
}
if ((fifo_intr & fifo_intr_0_chsw_error_pending_f()) != 0U) {
gk20a_fifo_intr_handle_chsw_error(g);
handled |= fifo_intr_0_chsw_error_pending_f();
}
if ((fifo_intr & fifo_intr_0_memop_timeout_pending_f()) != 0U) {
nvgpu_err(g, "fifo memop timeout error");
handled |= fifo_intr_0_memop_timeout_pending_f();
}
if ((fifo_intr & fifo_intr_0_lb_error_pending_f()) != 0U) {
nvgpu_err(g, "fifo lb error");
handled |= fifo_intr_0_lb_error_pending_f();
}
return handled;
}
void gv11b_fifo_intr_0_isr(struct gk20a *g)
{
u32 clear_intr = 0U;
u32 fifo_intr = nvgpu_readl(g, fifo_intr_0_r());
/* TODO: sw_ready is needed only for recovery part */
if (!g->fifo.sw_ready) {
nvgpu_err(g, "unhandled fifo intr: 0x%08x", fifo_intr);
nvgpu_writel(g, fifo_intr_0_r(), fifo_intr);
return;
}
/* note we're not actually in an "isr", but rather
* in a threaded interrupt context... */
nvgpu_mutex_acquire(&g->fifo.intr.isr.mutex);
nvgpu_log(g, gpu_dbg_intr, "fifo isr %08x", fifo_intr);
if (unlikely((fifo_intr & gv11b_fifo_intr_0_error_mask(g)) !=
0U)) {
clear_intr |= gv11b_fifo_intr_handle_errors(g,
fifo_intr);
}
if ((fifo_intr & fifo_intr_0_runlist_event_pending_f()) != 0U) {
gk20a_fifo_intr_handle_runlist_event(g);
clear_intr |= fifo_intr_0_runlist_event_pending_f();
}
if ((fifo_intr & fifo_intr_0_pbdma_intr_pending_f()) != 0U) {
clear_intr |= fifo_pbdma_isr(g, fifo_intr);
}
if ((fifo_intr & fifo_intr_0_sched_error_pending_f()) != 0U) {
(void) g->ops.fifo.handle_sched_error(g);
clear_intr |= fifo_intr_0_sched_error_pending_f();
}
if ((fifo_intr & fifo_intr_0_ctxsw_timeout_pending_f()) != 0U) {
if (g->ops.fifo.handle_ctxsw_timeout != NULL) {
g->ops.fifo.handle_ctxsw_timeout(g, fifo_intr);
} else {
nvgpu_err(g, "unhandled fifo ctxsw timeout intr");
}
}
nvgpu_mutex_release(&g->fifo.intr.isr.mutex);
nvgpu_writel(g, fifo_intr_0_r(), clear_intr);
}

View File

@@ -0,0 +1,46 @@
/*
* Copyright (c) 2015-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_FIFO_INTR_GV11B_H
#define NVGPU_FIFO_INTR_GV11B_H
#include <nvgpu/types.h>
/*
* ERROR_CODE_BAD_TSG indicates that Host encountered a badly formed TSG header
* or a badly formed channel type runlist entry in the runlist. This is typically
* caused by encountering a new TSG entry in the middle of a TSG definition.
* A channel type entry having wrong runqueue selector can also cause this.
* Additionally this error code can indicate when a channel is encountered on
* the runlist which is outside of a TSG.
*/
#define SCHED_ERROR_CODE_BAD_TSG 0x00000020U
struct gk20a;
void gv11b_fifo_intr_0_enable(struct gk20a *g, bool enable);
void gv11b_fifo_intr_0_isr(struct gk20a *g);
bool gv11b_fifo_handle_sched_error(struct gk20a *g);
bool gv11b_fifo_handle_ctxsw_timeout(struct gk20a *g, u32 fifo_intr);
#endif /* NVGPU_FIFO_INTR_GV11B_H */

View File

@@ -35,6 +35,8 @@
#define RC_TYPE_FORCE_RESET 7U
#define RC_TYPE_SCHED_ERR 8U
#define INVAL_ID (~U32(0U))
struct gk20a;
struct nvgpu_channel_hw_state {

View File

@@ -906,7 +906,6 @@ struct gpu_ops {
void (*capture_channel_ram_dump)(struct gk20a *g,
struct channel_gk20a *ch,
struct nvgpu_channel_dump_info *info);
u32 (*intr_0_error_mask)(struct gk20a *g);
int (*is_preempt_pending)(struct gk20a *g, u32 id,
unsigned int id_type);
int (*reset_enable_hw)(struct gk20a *g);
@@ -947,6 +946,12 @@ struct gpu_ops {
u32 hw_id, u32 inst, u32 err_id,
u32 intr_info);
} err_ops;
void (*intr_0_enable)(struct gk20a *g, bool enable);
void (*intr_0_isr)(struct gk20a *g);
void (*intr_1_enable)(struct gk20a *g, bool enable);
u32 (*intr_1_isr)(struct gk20a *g);
} fifo;
struct {
int (*reschedule)(struct channel_gk20a *ch, bool preempt_next);

View File

@@ -56,6 +56,8 @@
#include "hal/fifo/engines_gv11b.h"
#include "hal/fifo/userd_gk20a.h"
#include "hal/fifo/userd_gv11b.h"
#include "hal/fifo/fifo_intr_gk20a.h"
#include "hal/fifo/fifo_intr_gv11b.h"
#include "hal/gr/fecs_trace/fecs_trace_gm20b.h"
#include "hal/gr/fecs_trace/fecs_trace_gv11b.h"
#include "hal/gr/zbc/zbc_gp10b.h"
@@ -848,7 +850,6 @@ static const struct gpu_ops tu104_ops = {
.get_engines_mask_on_id = gk20a_fifo_engines_on_id,
.dump_channel_status_ramfc = gv11b_dump_channel_status_ramfc,
.capture_channel_ram_dump = gv11b_capture_channel_ram_dump,
.intr_0_error_mask = gv11b_fifo_intr_0_error_mask,
.is_preempt_pending = gv11b_fifo_is_preempt_pending,
.reset_enable_hw = gv11b_init_fifo_reset_enable_hw,
.teardown_ch_tsg = gv11b_fifo_teardown_ch_tsg,
@@ -881,6 +882,10 @@ static const struct gpu_ops tu104_ops = {
.runlist_busy_engines = gk20a_fifo_runlist_busy_engines,
.find_pbdma_for_runlist = gk20a_fifo_find_pbdma_for_runlist,
.init_ce_engine_info = gp10b_fifo_init_ce_engine_info,
.intr_0_enable = gv11b_fifo_intr_0_enable,
.intr_1_enable = gk20a_fifo_intr_1_enable,
.intr_0_isr = gv11b_fifo_intr_0_isr,
.intr_1_isr = gk20a_fifo_intr_1_isr,
},
.engine = {
.is_fault_engine_subid_gpc = gv11b_is_fault_engine_subid_gpc,