gpu: nvgpu: move mc_intr_pbus from stall (intr_0) to nonstall (intr_1) tree

Nvgpu does not support nested interrupts and as a result priv/pbus
interrupt do not reach cpu while other interrupts on intr_0 (stall)
tree are being processed. This issue is not specific to priv/pbus
but since pbus errors are critical, it is important to detect it
early on.

Below is the snippet from one of the failing logs where nvgpu
is doing recovery to process gr interrupt.
Right after GR engine is reset (PGRAPH of PMC_ENABLE), failing priv
accesses should have triggered pbus interrupt but it does not reach cpu
until gr interrupt is handled. Any interrupt that requires recovery will
take longer to finish isr as recovery is done as part of isr.
Also intr_0 (stall) interrupts are paused while stall interrupt is being
processed.

gm20b_gr_falcon_bind_instblk:147  [ERR]  arbiter idle timeout, status: badf1020
gm20b_gr_falcon_wait_for_fecs_arb_idle:125  [ERR]  arbiter idle timeout, fecs ctxsw status: 0xbadf1020

Fix to detect pbus intr while other stall interrupts are being processed
is to move pbus intr enable/disable/clear/handle to nonstall (intr_1)
tree. Configure pbus_intr_en_1 to route pbus to nostall tree.
Priv interrupts cannot be moved to nonstall (intr_1) tree due
to h/w not supporting this.

In Turing, moving pbus intr to nonstall is not feasible as mc_intr(1)
tree is deprecated. Add Turing specific stall intr handler hals with
original logic to route pbus intr to mc_intr(0).

JIRA NVGPU-25
Bug 200603566

Change-Id: I36fc376800802f20a0ea581b4f787bcc6c73ec7e
Signed-off-by: Seema Khowala <seemaj@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2354192
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Reviewed-by: automaticguardword <automaticguardword@nvidia.com>
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com>
Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com>
Reviewed-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
GVS: Gerrit_Virtual_Submit
This commit is contained in:
Seema Khowala
2020-06-01 16:00:56 -07:00
committed by Alex Waterman
parent 58c7969687
commit db30ea3362
17 changed files with 137 additions and 53 deletions

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -36,17 +36,21 @@
int gk20a_bus_init_hw(struct gk20a *g) int gk20a_bus_init_hw(struct gk20a *g)
{ {
u32 intr_en_mask = 0; u32 intr_en_mask = 0U;
nvgpu_mc_intr_nonstall_unit_config(g, MC_INTR_UNIT_BUS, MC_INTR_ENABLE);
/*
* Note: bus_intr_en_0 is for routing intr to stall tree (mc_intr_0)
* bus_intr_en_1 is for routing bus intr to nostall tree (mc_intr_1)
*/
if (nvgpu_platform_is_silicon(g) || nvgpu_platform_is_fpga(g)) { if (nvgpu_platform_is_silicon(g) || nvgpu_platform_is_fpga(g)) {
intr_en_mask = bus_intr_en_0_pri_squash_m() | intr_en_mask = bus_intr_en_1_pri_squash_m() |
bus_intr_en_0_pri_fecserr_m() | bus_intr_en_1_pri_fecserr_m() |
bus_intr_en_0_pri_timeout_m(); bus_intr_en_1_pri_timeout_m();
} }
nvgpu_mc_intr_stall_unit_config(g, MC_INTR_UNIT_BUS, MC_INTR_ENABLE); nvgpu_writel(g, bus_intr_en_1_r(), intr_en_mask);
nvgpu_writel(g, bus_intr_en_0_r(), intr_en_mask);
if (g->ops.bus.configure_debug_bus != NULL) { if (g->ops.bus.configure_debug_bus != NULL) {
g->ops.bus.configure_debug_bus(g); g->ops.bus.configure_debug_bus(g);

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -21,6 +21,8 @@
*/ */
#include <nvgpu/timers.h> #include <nvgpu/timers.h>
#include <nvgpu/soc.h>
#include <nvgpu/io.h>
#include <nvgpu/mm.h> #include <nvgpu/mm.h>
#include <nvgpu/gk20a.h> #include <nvgpu/gk20a.h>
@@ -31,6 +33,31 @@
#include <nvgpu/hw/tu104/hw_bus_tu104.h> #include <nvgpu/hw/tu104/hw_bus_tu104.h>
#include <nvgpu/hw/tu104/hw_func_tu104.h> #include <nvgpu/hw/tu104/hw_func_tu104.h>
int tu104_bus_init_hw(struct gk20a *g)
{
u32 intr_en_mask = 0U;
nvgpu_mc_intr_stall_unit_config(g, MC_INTR_UNIT_BUS, MC_INTR_ENABLE);
/*
* Note: bus_intr_en_0 is for routing intr to stall tree (mc_intr_0)
* bus_intr_en_1 is for routing bus intr to nostall tree (mc_intr_1)
*/
if (nvgpu_platform_is_silicon(g) || nvgpu_platform_is_fpga(g)) {
intr_en_mask = bus_intr_en_0_pri_squash_m() |
bus_intr_en_0_pri_fecserr_m() |
bus_intr_en_0_pri_timeout_m();
}
nvgpu_writel(g, bus_intr_en_0_r(), intr_en_mask);
if (g->ops.bus.configure_debug_bus != NULL) {
g->ops.bus.configure_debug_bus(g);
}
return 0;
}
int bus_tu104_bar2_bind(struct gk20a *g, struct nvgpu_mem *bar2_inst) int bus_tu104_bar2_bind(struct gk20a *g, struct nvgpu_mem *bar2_inst)
{ {
struct nvgpu_timeout timeout; struct nvgpu_timeout timeout;

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -26,6 +26,7 @@
struct gk20a; struct gk20a;
struct nvgpu_mem; struct nvgpu_mem;
int tu104_bus_init_hw(struct gk20a *g);
int bus_tu104_bar2_bind(struct gk20a *g, struct nvgpu_mem *bar2_inst); int bus_tu104_bar2_bind(struct gk20a *g, struct nvgpu_mem *bar2_inst);
#endif /* NVGPU_BUS_TU104_H */ #endif /* NVGPU_BUS_TU104_H */

View File

@@ -1368,7 +1368,7 @@ static const struct gpu_ops tu104_ops = {
#endif #endif
.intr_stall_unit_config = intr_tu104_stall_unit_config, .intr_stall_unit_config = intr_tu104_stall_unit_config,
.intr_nonstall_unit_config = intr_tu104_nonstall_unit_config, .intr_nonstall_unit_config = intr_tu104_nonstall_unit_config,
.isr_stall = mc_gp10b_isr_stall, .isr_stall = mc_tu104_isr_stall,
.intr_stall = intr_tu104_stall, .intr_stall = intr_tu104_stall,
.intr_stall_pause = intr_tu104_stall_pause, .intr_stall_pause = intr_tu104_stall_pause,
.intr_stall_resume = intr_tu104_stall_resume, .intr_stall_resume = intr_tu104_stall_resume,
@@ -1427,7 +1427,7 @@ static const struct gpu_ops tu104_ops = {
}, },
#endif #endif
.bus = { .bus = {
.init_hw = gk20a_bus_init_hw, .init_hw = tu104_bus_init_hw,
.isr = gk20a_bus_isr, .isr = gk20a_bus_isr,
.bar1_bind = NULL, .bar1_bind = NULL,
.bar2_bind = bus_tu104_bar2_bind, .bar2_bind = bus_tu104_bar2_bind,

View File

@@ -85,9 +85,6 @@ void gm20b_mc_isr_stall(struct gk20a *g)
if ((mc_intr_0 & mc_intr_ltc_pending_f()) != 0U) { if ((mc_intr_0 & mc_intr_ltc_pending_f()) != 0U) {
g->ops.mc.ltc_isr(g); g->ops.mc.ltc_isr(g);
} }
if ((mc_intr_0 & mc_intr_pbus_pending_f()) != 0U) {
g->ops.bus.isr(g);
}
} }
void gm20b_mc_intr_mask(struct gk20a *g) void gm20b_mc_intr_mask(struct gk20a *g)

View File

@@ -65,6 +65,10 @@ u32 gm20b_mc_isr_nonstall(struct gk20a *g)
mc_intr_1 = g->ops.mc.intr_nonstall(g); mc_intr_1 = g->ops.mc.intr_nonstall(g);
if ((mc_intr_1 & mc_intr_pbus_pending_f()) != 0U) {
g->ops.bus.isr(g);
}
if (g->ops.mc.is_intr1_pending(g, NVGPU_UNIT_FIFO, mc_intr_1)) { if (g->ops.mc.is_intr1_pending(g, NVGPU_UNIT_FIFO, mc_intr_1)) {
ops |= g->ops.fifo.intr_1_isr(g); ops |= g->ops.fifo.intr_1_isr(g);
} }

View File

@@ -31,10 +31,15 @@
struct gk20a; struct gk20a;
enum nvgpu_unit; enum nvgpu_unit;
enum nvgpu_fifo_engine;
void mc_gp10b_intr_mask(struct gk20a *g); void mc_gp10b_intr_mask(struct gk20a *g);
void mc_gp10b_intr_stall_unit_config(struct gk20a *g, u32 unit, bool enable); void mc_gp10b_intr_stall_unit_config(struct gk20a *g, u32 unit, bool enable);
void mc_gp10b_intr_nonstall_unit_config(struct gk20a *g, u32 unit, bool enable); void mc_gp10b_intr_nonstall_unit_config(struct gk20a *g, u32 unit, bool enable);
void mc_gp10b_isr_stall_secondary_1(struct gk20a *g, u32 mc_intr_0);
void mc_gp10b_isr_stall_secondary_0(struct gk20a *g, u32 mc_intr_0);
void mc_gp10b_isr_stall_engine(struct gk20a *g,
enum nvgpu_fifo_engine engine_enum, u32 engine_id);
void mc_gp10b_isr_stall(struct gk20a *g); void mc_gp10b_isr_stall(struct gk20a *g);
bool mc_gp10b_is_intr1_pending(struct gk20a *g, bool mc_gp10b_is_intr1_pending(struct gk20a *g,
enum nvgpu_unit unit, u32 mc_intr_1); enum nvgpu_unit unit, u32 mc_intr_1);

View File

@@ -82,15 +82,12 @@ static u32 mc_gp10b_intr_pending_f(struct gk20a *g, u32 unit)
static void mc_gp10b_isr_stall_primary(struct gk20a *g, u32 mc_intr_0) static void mc_gp10b_isr_stall_primary(struct gk20a *g, u32 mc_intr_0)
{ {
if ((mc_intr_0 & mc_intr_pbus_pending_f()) != 0U) {
g->ops.bus.isr(g);
}
if ((mc_intr_0 & mc_intr_priv_ring_pending_f()) != 0U) { if ((mc_intr_0 & mc_intr_priv_ring_pending_f()) != 0U) {
g->ops.priv_ring.isr(g); g->ops.priv_ring.isr(g);
} }
} }
static void mc_gp10b_isr_stall_secondary_1(struct gk20a *g, u32 mc_intr_0) void mc_gp10b_isr_stall_secondary_1(struct gk20a *g, u32 mc_intr_0)
{ {
if ((mc_intr_0 & mc_intr_ltc_pending_f()) != 0U) { if ((mc_intr_0 & mc_intr_ltc_pending_f()) != 0U) {
g->ops.mc.ltc_isr(g); g->ops.mc.ltc_isr(g);
@@ -107,7 +104,7 @@ static void mc_gp10b_isr_stall_secondary_1(struct gk20a *g, u32 mc_intr_0)
#endif #endif
} }
static void mc_gp10b_isr_stall_secondary_0(struct gk20a *g, u32 mc_intr_0) void mc_gp10b_isr_stall_secondary_0(struct gk20a *g, u32 mc_intr_0)
{ {
if ((g->ops.mc.is_intr_hub_pending != NULL) && if ((g->ops.mc.is_intr_hub_pending != NULL) &&
g->ops.mc.is_intr_hub_pending(g, mc_intr_0)) { g->ops.mc.is_intr_hub_pending(g, mc_intr_0)) {
@@ -121,7 +118,7 @@ static void mc_gp10b_isr_stall_secondary_0(struct gk20a *g, u32 mc_intr_0)
} }
} }
static void mc_gp10b_isr_stall_engine(struct gk20a *g, void mc_gp10b_isr_stall_engine(struct gk20a *g,
enum nvgpu_fifo_engine engine_enum, u32 engine_id) enum nvgpu_fifo_engine engine_enum, u32 engine_id)
{ {
/* GR Engine */ /* GR Engine */

View File

@@ -49,7 +49,6 @@ bool gv11b_mc_is_stall_and_eng_intr_pending(struct gk20a *g, u32 engine_id,
stall_intr = mc_intr_pfifo_pending_f() | stall_intr = mc_intr_pfifo_pending_f() |
mc_intr_hub_pending_f() | mc_intr_hub_pending_f() |
mc_intr_priv_ring_pending_f() | mc_intr_priv_ring_pending_f() |
mc_intr_pbus_pending_f() |
mc_intr_ltc_pending_f(); mc_intr_ltc_pending_f();
nvgpu_log(g, gpu_dbg_info | gpu_dbg_intr, nvgpu_log(g, gpu_dbg_info | gpu_dbg_intr,

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -28,6 +28,7 @@
#include <nvgpu/gk20a.h> #include <nvgpu/gk20a.h>
#include <nvgpu/engines.h> #include <nvgpu/engines.h>
#include <nvgpu/gops_mc.h> #include <nvgpu/gops_mc.h>
#include <nvgpu/power_features/pg.h>
#include "hal/mc/mc_gp10b.h" #include "hal/mc/mc_gp10b.h"
@@ -464,3 +465,48 @@ void mc_tu104_ltc_isr(struct gk20a *g)
g->ops.ltc.intr.isr(g, ltc); g->ops.ltc.intr.isr(g, ltc);
} }
} }
static void mc_tu104_isr_stall_primary(struct gk20a *g, u32 mc_intr_0)
{
/*
* In Turing, mc_intr_1 is deprecated and pbus intr is routed to
* mc_intr_0. This is different than legacy chips pbus interrupt.
*/
if ((mc_intr_0 & mc_intr_pbus_pending_f()) != 0U) {
g->ops.bus.isr(g);
}
if ((mc_intr_0 & mc_intr_priv_ring_pending_f()) != 0U) {
g->ops.priv_ring.isr(g);
}
}
void mc_tu104_isr_stall(struct gk20a *g)
{
u32 mc_intr_0;
u32 i;
u32 engine_id = 0U;
enum nvgpu_fifo_engine engine_enum;
mc_intr_0 = nvgpu_readl(g, mc_intr_r(NVGPU_MC_INTR_STALLING));
nvgpu_log(g, gpu_dbg_intr, "stall intr 0x%08x", mc_intr_0);
mc_tu104_isr_stall_primary(g, mc_intr_0);
for (i = 0U; i < g->fifo.num_engines; i++) {
engine_id = g->fifo.active_engines_list[i];
if ((mc_intr_0 &
g->fifo.engine_info[engine_id].intr_mask) == 0U) {
continue;
}
engine_enum = g->fifo.engine_info[engine_id].engine_enum;
mc_gp10b_isr_stall_engine(g, engine_enum, engine_id);
}
mc_gp10b_isr_stall_secondary_0(g, mc_intr_0);
mc_gp10b_isr_stall_secondary_1(g, mc_intr_0);
nvgpu_log(g, gpu_dbg_intr, "stall intr done 0x%08x", mc_intr_0);
}

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -63,5 +63,6 @@ bool intr_tu104_is_intr_hub_pending(struct gk20a *g, u32 mc_intr_0);
void intr_tu104_log_pending_intrs(struct gk20a *g); void intr_tu104_log_pending_intrs(struct gk20a *g);
void mc_tu104_fbpa_isr(struct gk20a *g); void mc_tu104_fbpa_isr(struct gk20a *g);
void mc_tu104_ltc_isr(struct gk20a *g); void mc_tu104_ltc_isr(struct gk20a *g);
void mc_tu104_isr_stall(struct gk20a *g);
#endif /* NVGPU_MC_TU104_H */ #endif /* NVGPU_MC_TU104_H */

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2012-2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2012-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -83,8 +83,8 @@
#define bus_intr_0_pri_squash_m() (U32(0x1U) << 1U) #define bus_intr_0_pri_squash_m() (U32(0x1U) << 1U)
#define bus_intr_0_pri_fecserr_m() (U32(0x1U) << 2U) #define bus_intr_0_pri_fecserr_m() (U32(0x1U) << 2U)
#define bus_intr_0_pri_timeout_m() (U32(0x1U) << 3U) #define bus_intr_0_pri_timeout_m() (U32(0x1U) << 3U)
#define bus_intr_en_0_r() (0x00001140U) #define bus_intr_en_1_r() (0x00001144U)
#define bus_intr_en_0_pri_squash_m() (U32(0x1U) << 1U) #define bus_intr_en_1_pri_squash_m() (U32(0x1U) << 1U)
#define bus_intr_en_0_pri_fecserr_m() (U32(0x1U) << 2U) #define bus_intr_en_1_pri_fecserr_m() (U32(0x1U) << 2U)
#define bus_intr_en_0_pri_timeout_m() (U32(0x1U) << 3U) #define bus_intr_en_1_pri_timeout_m() (U32(0x1U) << 3U)
#endif #endif

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2014-2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -100,8 +100,8 @@
#define bus_intr_0_pri_squash_m() (U32(0x1U) << 1U) #define bus_intr_0_pri_squash_m() (U32(0x1U) << 1U)
#define bus_intr_0_pri_fecserr_m() (U32(0x1U) << 2U) #define bus_intr_0_pri_fecserr_m() (U32(0x1U) << 2U)
#define bus_intr_0_pri_timeout_m() (U32(0x1U) << 3U) #define bus_intr_0_pri_timeout_m() (U32(0x1U) << 3U)
#define bus_intr_en_0_r() (0x00001140U) #define bus_intr_en_1_r() (0x00001144U)
#define bus_intr_en_0_pri_squash_m() (U32(0x1U) << 1U) #define bus_intr_en_1_pri_squash_m() (U32(0x1U) << 1U)
#define bus_intr_en_0_pri_fecserr_m() (U32(0x1U) << 2U) #define bus_intr_en_1_pri_fecserr_m() (U32(0x1U) << 2U)
#define bus_intr_en_0_pri_timeout_m() (U32(0x1U) << 3U) #define bus_intr_en_1_pri_timeout_m() (U32(0x1U) << 3U)
#endif #endif

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2014-2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -100,8 +100,8 @@
#define bus_intr_0_pri_squash_m() (U32(0x1U) << 1U) #define bus_intr_0_pri_squash_m() (U32(0x1U) << 1U)
#define bus_intr_0_pri_fecserr_m() (U32(0x1U) << 2U) #define bus_intr_0_pri_fecserr_m() (U32(0x1U) << 2U)
#define bus_intr_0_pri_timeout_m() (U32(0x1U) << 3U) #define bus_intr_0_pri_timeout_m() (U32(0x1U) << 3U)
#define bus_intr_en_0_r() (0x00001140U) #define bus_intr_en_1_r() (0x00001144U)
#define bus_intr_en_0_pri_squash_m() (U32(0x1U) << 1U) #define bus_intr_en_1_pri_squash_m() (U32(0x1U) << 1U)
#define bus_intr_en_0_pri_fecserr_m() (U32(0x1U) << 2U) #define bus_intr_en_1_pri_fecserr_m() (U32(0x1U) << 2U)
#define bus_intr_en_0_pri_timeout_m() (U32(0x1U) << 3U) #define bus_intr_en_1_pri_timeout_m() (U32(0x1U) << 3U)
#endif #endif

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -100,10 +100,10 @@
#define bus_intr_0_pri_squash_m() (U32(0x1U) << 1U) #define bus_intr_0_pri_squash_m() (U32(0x1U) << 1U)
#define bus_intr_0_pri_fecserr_m() (U32(0x1U) << 2U) #define bus_intr_0_pri_fecserr_m() (U32(0x1U) << 2U)
#define bus_intr_0_pri_timeout_m() (U32(0x1U) << 3U) #define bus_intr_0_pri_timeout_m() (U32(0x1U) << 3U)
#define bus_intr_en_0_r() (0x00001140U) #define bus_intr_en_1_r() (0x00001144U)
#define bus_intr_en_0_pri_squash_m() (U32(0x1U) << 1U) #define bus_intr_en_1_pri_squash_m() (U32(0x1U) << 1U)
#define bus_intr_en_0_pri_fecserr_m() (U32(0x1U) << 2U) #define bus_intr_en_1_pri_fecserr_m() (U32(0x1U) << 2U)
#define bus_intr_en_0_pri_timeout_m() (U32(0x1U) << 3U) #define bus_intr_en_1_pri_timeout_m() (U32(0x1U) << 3U)
#define bus_debug_sel_0_r() (0x000010a0U) #define bus_debug_sel_0_r() (0x000010a0U)
#define bus_debug_sel_1_r() (0x000010a4U) #define bus_debug_sel_1_r() (0x000010a4U)
#define bus_debug_sel_2_r() (0x000010a8U) #define bus_debug_sel_2_r() (0x000010a8U)

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -125,8 +125,8 @@ int test_bus_setup(struct unit_module *m, struct gk20a *g, void *args)
g->ops.bus.bar1_bind = gm20b_bus_bar1_bind; g->ops.bus.bar1_bind = gm20b_bus_bar1_bind;
g->ops.bus.bar2_bind = gp10b_bus_bar2_bind; g->ops.bus.bar2_bind = gp10b_bus_bar2_bind;
g->ops.bus.configure_debug_bus = gv11b_bus_configure_debug_bus; g->ops.bus.configure_debug_bus = gv11b_bus_configure_debug_bus;
g->ops.mc.intr_stall_unit_config = g->ops.mc.intr_nonstall_unit_config =
mc_gp10b_intr_stall_unit_config; mc_gp10b_intr_nonstall_unit_config;
g->ops.ptimer.isr = gk20a_ptimer_isr; g->ops.ptimer.isr = gk20a_ptimer_isr;
/* Map register space NV_PRIV_MASTER */ /* Map register space NV_PRIV_MASTER */
@@ -181,7 +181,7 @@ int test_init_hw(struct unit_module *m, struct gk20a *g, void *args)
p->is_silicon = false; p->is_silicon = false;
g->ops.bus.configure_debug_bus = NULL; g->ops.bus.configure_debug_bus = NULL;
ret = g->ops.bus.init_hw(g); ret = g->ops.bus.init_hw(g);
assert(nvgpu_readl(g, bus_intr_en_0_r()) == 0U); assert(nvgpu_readl(g, bus_intr_en_1_r()) == 0U);
assert(nvgpu_readl(g, bus_debug_sel_0_r()) == 0xFU); assert(nvgpu_readl(g, bus_debug_sel_0_r()) == 0xFU);
assert(nvgpu_readl(g, bus_debug_sel_1_r()) == 0xFU); assert(nvgpu_readl(g, bus_debug_sel_1_r()) == 0xFU);
assert(nvgpu_readl(g, bus_debug_sel_2_r()) == 0xFU); assert(nvgpu_readl(g, bus_debug_sel_2_r()) == 0xFU);
@@ -190,7 +190,7 @@ int test_init_hw(struct unit_module *m, struct gk20a *g, void *args)
p->is_silicon = true; p->is_silicon = true;
g->ops.bus.configure_debug_bus = gv11b_bus_configure_debug_bus; g->ops.bus.configure_debug_bus = gv11b_bus_configure_debug_bus;
ret = g->ops.bus.init_hw(g); ret = g->ops.bus.init_hw(g);
assert(nvgpu_readl(g, bus_intr_en_0_r()) == 0xEU); assert(nvgpu_readl(g, bus_intr_en_1_r()) == 0xEU);
assert(nvgpu_readl(g, bus_debug_sel_0_r()) == 0x0U); assert(nvgpu_readl(g, bus_debug_sel_0_r()) == 0x0U);
assert(nvgpu_readl(g, bus_debug_sel_1_r()) == 0x0U); assert(nvgpu_readl(g, bus_debug_sel_1_r()) == 0x0U);
assert(nvgpu_readl(g, bus_debug_sel_2_r()) == 0x0U); assert(nvgpu_readl(g, bus_debug_sel_2_r()) == 0x0U);
@@ -199,7 +199,7 @@ int test_init_hw(struct unit_module *m, struct gk20a *g, void *args)
p->is_fpga = true; p->is_fpga = true;
p->is_silicon = false; p->is_silicon = false;
ret = g->ops.bus.init_hw(g); ret = g->ops.bus.init_hw(g);
assert(nvgpu_readl(g, bus_intr_en_0_r()) == 0xEU); assert(nvgpu_readl(g, bus_intr_en_1_r()) == 0xEU);
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
done: done:
return ret; return ret;

View File

@@ -492,7 +492,10 @@ int test_isr_stall(struct unit_module *m, struct gk20a *g, void *args)
nvgpu_posix_io_writel_reg_space(g, mc_intr_ltc_r(), 1U); nvgpu_posix_io_writel_reg_space(g, mc_intr_ltc_r(), 1U);
reset_ctx(); reset_ctx();
g->ops.mc.isr_stall(g); g->ops.mc.isr_stall(g);
if (!u.bus_isr || !u.ce_isr || !u.fb_isr || !u.fifo_isr || !u.gr_isr || if (u.bus_isr) {
unit_return_fail(m, "BUS ISR called from Stall\n");
}
if (!u.ce_isr || !u.fb_isr || !u.fifo_isr || !u.gr_isr ||
!u.pmu_isr || !u.priv_ring_isr) { !u.pmu_isr || !u.priv_ring_isr) {
unit_return_fail(m, "not all ISRs called\n"); unit_return_fail(m, "not all ISRs called\n");
} }
@@ -599,7 +602,7 @@ int test_isr_nonstall(struct unit_module *m, struct gk20a *g, void *args)
u.fifo_isr_return = 0x2; u.fifo_isr_return = 0x2;
u.gr_isr_return = 0x4; u.gr_isr_return = 0x4;
val = g->ops.mc.isr_nonstall(g); val = g->ops.mc.isr_nonstall(g);
if (!u.ce_isr || !u.fifo_isr || !u.gr_isr) { if (!u.bus_isr || !u.ce_isr || !u.fifo_isr || !u.gr_isr) {
unit_return_fail(m, "not all ISRs called\n"); unit_return_fail(m, "not all ISRs called\n");
} }
if (val != 0x7) { if (val != 0x7) {