mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-25 02:52:51 +03:00
gpu: nvgpu: Add nvlink_intr_and_err_handling unit
Move code involved in nvlink interrupt and error handling and initialization into a separate unit under subelement 'nvlink'. Add g->ops.nvlink.intr_err ops to allow other units to access the APIs exposed by this unit. JIRA NVGPU-1813 Change-Id: I2d90cf1394faa0692630514b6a3cea15f5e105ae Signed-off-by: Tejal Kudav <tkudav@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1997732 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
27ff49fab0
commit
b729bde8eb
@@ -333,6 +333,7 @@ nvgpu-y += \
|
||||
common/nvlink/probe.o \
|
||||
common/nvlink/init/device_reginit_gv100.o \
|
||||
common/nvlink/init/device_reginit.o \
|
||||
common/nvlink/intr_and_err_handling_gv100.o \
|
||||
common/nvlink/nvlink.o \
|
||||
common/nvlink/nvlink_gv100.o \
|
||||
common/nvlink/nvlink_tu104.o \
|
||||
|
||||
@@ -266,6 +266,7 @@ srcs += common/sim.c \
|
||||
common/nvlink/probe.c \
|
||||
common/nvlink/init/device_reginit.c \
|
||||
common/nvlink/init/device_reginit_gv100.c \
|
||||
common/nvlink/intr_and_err_handling_gv100.c \
|
||||
common/nvlink/nvlink_gv100.c \
|
||||
common/nvlink/nvlink_tu104.c \
|
||||
common/nvlink/nvlink.c \
|
||||
|
||||
@@ -142,7 +142,7 @@ void mc_gp10b_isr_stall(struct gk20a *g)
|
||||
}
|
||||
if ((g->ops.mc.is_intr_nvlink_pending != NULL) &&
|
||||
g->ops.mc.is_intr_nvlink_pending(g, mc_intr_0)) {
|
||||
g->ops.nvlink.isr(g);
|
||||
g->ops.nvlink.intr.isr(g);
|
||||
}
|
||||
if ((mc_intr_0 & mc_intr_pfb_pending_f()) != 0U &&
|
||||
(g->ops.mc.fbpa_isr != NULL)) {
|
||||
|
||||
702
drivers/gpu/nvgpu/common/nvlink/intr_and_err_handling_gv100.c
Normal file
702
drivers/gpu/nvgpu/common/nvlink/intr_and_err_handling_gv100.c
Normal file
@@ -0,0 +1,702 @@
|
||||
/*
|
||||
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_TEGRA_NVLINK
|
||||
|
||||
#include <nvgpu/io.h>
|
||||
#include <nvgpu/gk20a.h>
|
||||
#include "intr_and_err_handling_gv100.h"
|
||||
|
||||
#include <nvgpu/hw/gv100/hw_nvlipt_gv100.h>
|
||||
#include <nvgpu/hw/gv100/hw_ioctrl_gv100.h>
|
||||
#include <nvgpu/hw/gv100/hw_minion_gv100.h>
|
||||
#include <nvgpu/hw/gv100/hw_nvl_gv100.h>
|
||||
#include <nvgpu/hw/gv100/hw_ioctrlmif_gv100.h>
|
||||
#include <nvgpu/hw/gv100/hw_nvtlc_gv100.h>
|
||||
|
||||
/*
|
||||
* The manuals are missing some useful defines
|
||||
* we add them for now
|
||||
*/
|
||||
#define IPT_INTR_CONTROL_LINK(i) (nvlipt_intr_control_link0_r() + (i)*4)
|
||||
#define IPT_ERR_UC_STATUS_LINK(i) (nvlipt_err_uc_status_link0_r() + (i)*36)
|
||||
#define IPT_ERR_UC_MASK_LINK(i) (nvlipt_err_uc_mask_link0_r() + (i)*36)
|
||||
#define IPT_ERR_UC_SEVERITY_LINK(i) (nvlipt_err_uc_severity_link0_r() + (i)*36)
|
||||
#define IPT_ERR_UC_FIRST_LINK(i) (nvlipt_err_uc_first_link0_r() + (i)*36)
|
||||
#define IPT_ERR_UC_ADVISORY_LINK(i) (nvlipt_err_uc_advisory_link0_r() + (i)*36)
|
||||
#define IPT_ERR_C_STATUS_LINK(i) (nvlipt_err_c_status_link0_r() + (i)*36)
|
||||
#define IPT_ERR_C_MASK_LINK(i) (nvlipt_err_c_mask_link0_r() + (i)*36)
|
||||
#define IPT_ERR_C_FIRST_LINK(i) (nvlipt_err_c_first_link0_r() + (i)*36)
|
||||
#define IPT_ERR_CONTROL_LINK(i) (nvlipt_err_control_link0_r() + (i)*4)
|
||||
|
||||
#define IPT_ERR_UC_ACTIVE_BITS (nvlipt_err_uc_status_link0_dlprotocol_f(1) | \
|
||||
nvlipt_err_uc_status_link0_datapoisoned_f(1) | \
|
||||
nvlipt_err_uc_status_link0_flowcontrol_f(1) | \
|
||||
nvlipt_err_uc_status_link0_responsetimeout_f(1) | \
|
||||
nvlipt_err_uc_status_link0_targeterror_f(1) | \
|
||||
nvlipt_err_uc_status_link0_unexpectedresponse_f(1) | \
|
||||
nvlipt_err_uc_status_link0_receiveroverflow_f(1) | \
|
||||
nvlipt_err_uc_status_link0_malformedpacket_f(1) | \
|
||||
nvlipt_err_uc_status_link0_stompedpacketreceived_f(1) | \
|
||||
nvlipt_err_uc_status_link0_unsupportedrequest_f(1) | \
|
||||
nvlipt_err_uc_status_link0_ucinternal_f(1))
|
||||
|
||||
|
||||
#define MINION_FALCON_INTR_MASK (minion_falcon_irqmset_wdtmr_set_f() | \
|
||||
minion_falcon_irqmset_halt_set_f() | \
|
||||
minion_falcon_irqmset_exterr_set_f()| \
|
||||
minion_falcon_irqmset_swgen0_set_f()| \
|
||||
minion_falcon_irqmset_swgen1_set_f())
|
||||
|
||||
#define MINION_FALCON_INTR_DEST ( \
|
||||
minion_falcon_irqdest_host_wdtmr_host_f() | \
|
||||
minion_falcon_irqdest_host_halt_host_f() | \
|
||||
minion_falcon_irqdest_host_exterr_host_f() | \
|
||||
minion_falcon_irqdest_host_swgen0_host_f() | \
|
||||
minion_falcon_irqdest_host_swgen1_host_f() | \
|
||||
minion_falcon_irqdest_target_wdtmr_host_normal_f() | \
|
||||
minion_falcon_irqdest_target_halt_host_normal_f() | \
|
||||
minion_falcon_irqdest_target_exterr_host_normal_f() | \
|
||||
minion_falcon_irqdest_target_swgen0_host_normal_f() | \
|
||||
minion_falcon_irqdest_target_swgen1_host_normal_f())
|
||||
|
||||
/*
|
||||
* Clear minion Interrupts
|
||||
*/
|
||||
void gv100_nvlink_minion_clear_interrupts(struct gk20a *g)
|
||||
{
|
||||
nvgpu_falcon_set_irq(g->minion_flcn, true, MINION_FALCON_INTR_MASK,
|
||||
MINION_FALCON_INTR_DEST);
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialization of link specific interrupts
|
||||
*/
|
||||
static void gv100_nvlink_minion_link_intr_enable(struct gk20a *g, u32 link_id,
|
||||
bool enable)
|
||||
{
|
||||
u32 intr, links;
|
||||
|
||||
/* Only stall interrupts for now */
|
||||
intr = MINION_REG_RD32(g, minion_minion_intr_stall_en_r());
|
||||
links = minion_minion_intr_stall_en_link_v(intr);
|
||||
|
||||
if (enable)
|
||||
links |= BIT(link_id);
|
||||
else
|
||||
links &= ~BIT(link_id);
|
||||
|
||||
intr = set_field(intr, minion_minion_intr_stall_en_link_m(),
|
||||
minion_minion_intr_stall_en_link_f(links));
|
||||
MINION_REG_WR32(g, minion_minion_intr_stall_en_r(), intr);
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialization of falcon interrupts
|
||||
*/
|
||||
static void gv100_nvlink_minion_falcon_intr_enable(struct gk20a *g, bool enable)
|
||||
{
|
||||
u32 reg;
|
||||
|
||||
reg = MINION_REG_RD32(g, minion_minion_intr_stall_en_r());
|
||||
if (enable) {
|
||||
reg = set_field(reg, minion_minion_intr_stall_en_fatal_m(),
|
||||
minion_minion_intr_stall_en_fatal_enable_f());
|
||||
reg = set_field(reg, minion_minion_intr_stall_en_nonfatal_m(),
|
||||
minion_minion_intr_stall_en_nonfatal_enable_f());
|
||||
reg = set_field(reg, minion_minion_intr_stall_en_falcon_stall_m(),
|
||||
minion_minion_intr_stall_en_falcon_stall_enable_f());
|
||||
reg = set_field(reg, minion_minion_intr_stall_en_falcon_nostall_m(),
|
||||
minion_minion_intr_stall_en_falcon_nostall_enable_f());
|
||||
} else {
|
||||
reg = set_field(reg, minion_minion_intr_stall_en_fatal_m(),
|
||||
minion_minion_intr_stall_en_fatal_disable_f());
|
||||
reg = set_field(reg, minion_minion_intr_stall_en_nonfatal_m(),
|
||||
minion_minion_intr_stall_en_nonfatal_disable_f());
|
||||
reg = set_field(reg, minion_minion_intr_stall_en_falcon_stall_m(),
|
||||
minion_minion_intr_stall_en_falcon_stall_disable_f());
|
||||
reg = set_field(reg, minion_minion_intr_stall_en_falcon_nostall_m(),
|
||||
minion_minion_intr_stall_en_falcon_nostall_disable_f());
|
||||
}
|
||||
|
||||
MINION_REG_WR32(g, minion_minion_intr_stall_en_r(), reg);
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize minion IP interrupts
|
||||
*/
|
||||
void gv100_nvlink_init_minion_intr(struct gk20a *g)
|
||||
{
|
||||
/* Disable non-stall tree */
|
||||
MINION_REG_WR32(g, minion_minion_intr_nonstall_en_r(), 0x0);
|
||||
|
||||
gv100_nvlink_minion_falcon_intr_enable(g, true);
|
||||
}
|
||||
|
||||
/*
|
||||
* Falcon specific ISR handling
|
||||
*/
|
||||
bool gv100_nvlink_minion_falcon_isr(struct gk20a *g)
|
||||
{
|
||||
u32 intr;
|
||||
|
||||
intr = MINION_REG_RD32(g, minion_falcon_irqstat_r()) &
|
||||
MINION_REG_RD32(g, minion_falcon_irqmask_r());
|
||||
|
||||
if (!intr)
|
||||
return true;
|
||||
|
||||
if (intr & minion_falcon_irqstat_exterr_true_f()) {
|
||||
nvgpu_err(g, "FALCON EXT ADDR: 0x%x 0x%x 0x%x",
|
||||
MINION_REG_RD32(g, 0x244),
|
||||
MINION_REG_RD32(g, 0x248),
|
||||
MINION_REG_RD32(g, 0x24c));
|
||||
}
|
||||
|
||||
MINION_REG_WR32(g, minion_falcon_irqsclr_r(), intr);
|
||||
|
||||
nvgpu_err(g, "FATAL minion IRQ: 0x%08x", intr);
|
||||
|
||||
|
||||
intr = MINION_REG_RD32(g, minion_falcon_irqstat_r()) &
|
||||
MINION_REG_RD32(g, minion_falcon_irqmask_r());
|
||||
|
||||
return (intr == 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Link Specific ISR
|
||||
*/
|
||||
|
||||
static bool gv100_nvlink_minion_link_isr(struct gk20a *g, u32 link_id)
|
||||
{
|
||||
u32 intr, code;
|
||||
bool fatal = false;
|
||||
|
||||
intr = MINION_REG_RD32(g, minion_nvlink_link_intr_r(link_id));
|
||||
code = minion_nvlink_link_intr_code_v(intr);
|
||||
|
||||
if (code == minion_nvlink_link_intr_code_swreq_v()) {
|
||||
nvgpu_err(g, " Intr SWREQ, link: %d subcode: %x",
|
||||
link_id, minion_nvlink_link_intr_subcode_v(intr));
|
||||
} else if (code == minion_nvlink_link_intr_code_pmdisabled_v()) {
|
||||
nvgpu_err(g, " Fatal Intr PMDISABLED, link: %d subcode: %x",
|
||||
link_id, minion_nvlink_link_intr_subcode_v(intr));
|
||||
fatal = true;
|
||||
} else if (code == minion_nvlink_link_intr_code_na_v()) {
|
||||
nvgpu_err(g, " Fatal Intr NA, link: %d subcode: %x",
|
||||
link_id, minion_nvlink_link_intr_subcode_v(intr));
|
||||
fatal = true;
|
||||
} else if (code == minion_nvlink_link_intr_code_dlreq_v()) {
|
||||
nvgpu_err(g, " Fatal Intr DLREQ, link: %d subcode: %x",
|
||||
link_id, minion_nvlink_link_intr_subcode_v(intr));
|
||||
fatal = true;
|
||||
} else {
|
||||
nvgpu_err(g, " Fatal Intr UNKN:%x, link: %d subcode: %x", code,
|
||||
link_id, minion_nvlink_link_intr_subcode_v(intr));
|
||||
fatal = true;
|
||||
}
|
||||
|
||||
if (fatal)
|
||||
gv100_nvlink_minion_link_intr_enable(g, link_id, false);
|
||||
|
||||
intr = set_field(intr, minion_nvlink_link_intr_state_m(),
|
||||
minion_nvlink_link_intr_state_f(1));
|
||||
MINION_REG_WR32(g, minion_nvlink_link_intr_r(link_id), intr);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Global minion routine to service interrupts
|
||||
*/
|
||||
static bool gv100_nvlink_minion_isr(struct gk20a *g) {
|
||||
|
||||
u32 intr, i;
|
||||
unsigned long links;
|
||||
|
||||
intr = MINION_REG_RD32(g, minion_minion_intr_r()) &
|
||||
MINION_REG_RD32(g, minion_minion_intr_stall_en_r());
|
||||
|
||||
if (minion_minion_intr_falcon_stall_v(intr) ||
|
||||
minion_minion_intr_falcon_nostall_v(intr))
|
||||
gv100_nvlink_minion_falcon_isr(g);
|
||||
|
||||
if (minion_minion_intr_fatal_v(intr)) {
|
||||
gv100_nvlink_minion_falcon_intr_enable(g, false);
|
||||
MINION_REG_WR32(g, minion_minion_intr_r(),
|
||||
minion_minion_intr_fatal_f(1));
|
||||
}
|
||||
|
||||
if (minion_minion_intr_nonfatal_v(intr))
|
||||
MINION_REG_WR32(g, minion_minion_intr_r(),
|
||||
minion_minion_intr_nonfatal_f(1));
|
||||
|
||||
links = minion_minion_intr_link_v(intr) & g->nvlink.enabled_links;
|
||||
|
||||
if (links)
|
||||
for_each_set_bit(i, &links, 32)
|
||||
gv100_nvlink_minion_link_isr(g, i);
|
||||
|
||||
/* Re-test interrupt status */
|
||||
intr = MINION_REG_RD32(g, minion_minion_intr_r()) &
|
||||
MINION_REG_RD32(g, minion_minion_intr_stall_en_r());
|
||||
|
||||
return (intr == 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Init TLC per link interrupts
|
||||
*/
|
||||
static void gv100_nvlink_tlc_intr_enable(struct gk20a *g, u32 link_id,
|
||||
bool enable)
|
||||
{
|
||||
u32 reg_rx0 = 0, reg_rx1 = 0, reg_tx = 0;
|
||||
|
||||
if (enable) {
|
||||
/* Set PROD values */
|
||||
reg_rx0 = 0x0FFFFFF;
|
||||
reg_rx1 = 0x03FFFFF;
|
||||
reg_tx = 0x1FFFFFF;
|
||||
}
|
||||
|
||||
TLC_REG_WR32(g, link_id, nvtlc_rx_err_report_en_0_r(), reg_rx0);
|
||||
TLC_REG_WR32(g, link_id, nvtlc_rx_err_report_en_1_r(), reg_rx1);
|
||||
TLC_REG_WR32(g, link_id, nvtlc_tx_err_report_en_0_r(), reg_tx);
|
||||
}
|
||||
|
||||
/*
|
||||
* helper function to get TLC intr status in common structure
|
||||
*/
|
||||
static void gv100_nvlink_tlc_get_intr(struct gk20a *g, u32 link_id)
|
||||
{
|
||||
g->nvlink.tlc_rx_err_status_0[link_id] =
|
||||
TLC_REG_RD32(g, link_id, nvtlc_rx_err_status_0_r());
|
||||
g->nvlink.tlc_rx_err_status_1[link_id] =
|
||||
TLC_REG_RD32(g, link_id, nvtlc_rx_err_status_1_r());
|
||||
g->nvlink.tlc_tx_err_status_0[link_id] =
|
||||
TLC_REG_RD32(g, link_id, nvtlc_tx_err_status_0_r());
|
||||
}
|
||||
|
||||
/*
|
||||
* Interrupt routine handler for TLC
|
||||
*/
|
||||
static void gv100_nvlink_tlc_isr(struct gk20a *g, u32 link_id)
|
||||
{
|
||||
|
||||
if (g->nvlink.tlc_rx_err_status_0[link_id]) {
|
||||
/* All TLC RX 0 errors are fatal. Notify and disable */
|
||||
nvgpu_err(g, "Fatal TLC RX 0 interrupt on link %d mask: %x",
|
||||
link_id, g->nvlink.tlc_rx_err_status_0[link_id]);
|
||||
TLC_REG_WR32(g, link_id, nvtlc_rx_err_first_0_r(),
|
||||
g->nvlink.tlc_rx_err_status_0[link_id]);
|
||||
TLC_REG_WR32(g, link_id, nvtlc_rx_err_status_0_r(),
|
||||
g->nvlink.tlc_rx_err_status_0[link_id]);
|
||||
}
|
||||
if (g->nvlink.tlc_rx_err_status_1[link_id]) {
|
||||
/* All TLC RX 1 errors are fatal. Notify and disable */
|
||||
nvgpu_err(g, "Fatal TLC RX 1 interrupt on link %d mask: %x",
|
||||
link_id, g->nvlink.tlc_rx_err_status_1[link_id]);
|
||||
TLC_REG_WR32(g, link_id, nvtlc_rx_err_first_1_r(),
|
||||
g->nvlink.tlc_rx_err_status_1[link_id]);
|
||||
TLC_REG_WR32(g, link_id, nvtlc_rx_err_status_1_r(),
|
||||
g->nvlink.tlc_rx_err_status_1[link_id]);
|
||||
}
|
||||
if (g->nvlink.tlc_tx_err_status_0[link_id]) {
|
||||
/* All TLC TX 0 errors are fatal. Notify and disable */
|
||||
nvgpu_err(g, "Fatal TLC TX 0 interrupt on link %d mask: %x",
|
||||
link_id, g->nvlink.tlc_tx_err_status_0[link_id]);
|
||||
TLC_REG_WR32(g, link_id, nvtlc_tx_err_first_0_r(),
|
||||
g->nvlink.tlc_tx_err_status_0[link_id]);
|
||||
TLC_REG_WR32(g, link_id, nvtlc_tx_err_status_0_r(),
|
||||
g->nvlink.tlc_tx_err_status_0[link_id]);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* DLPL interrupt enable helper
|
||||
*/
|
||||
void gv100_nvlink_dlpl_intr_enable(struct gk20a *g, u32 link_id, bool enable)
|
||||
{
|
||||
u32 reg = 0;
|
||||
|
||||
/* Always disable nonstall tree */
|
||||
DLPL_REG_WR32(g, link_id, nvl_intr_nonstall_en_r(), 0);
|
||||
|
||||
if (!enable)
|
||||
{
|
||||
DLPL_REG_WR32(g, link_id, nvl_intr_stall_en_r(), 0);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Clear interrupt register to get rid of stale state (W1C) */
|
||||
DLPL_REG_WR32(g, link_id, nvl_intr_r(), 0xffffffffU);
|
||||
DLPL_REG_WR32(g, link_id, nvl_intr_sw2_r(), 0xffffffffU);
|
||||
|
||||
reg = nvl_intr_stall_en_tx_recovery_long_enable_f() |
|
||||
nvl_intr_stall_en_tx_fault_ram_enable_f() |
|
||||
nvl_intr_stall_en_tx_fault_interface_enable_f() |
|
||||
nvl_intr_stall_en_tx_fault_sublink_change_enable_f() |
|
||||
nvl_intr_stall_en_rx_fault_sublink_change_enable_f() |
|
||||
nvl_intr_stall_en_rx_fault_dl_protocol_enable_f() |
|
||||
nvl_intr_stall_en_ltssm_fault_enable_f();
|
||||
|
||||
DLPL_REG_WR32(g, link_id, nvl_intr_stall_en_r(), reg);
|
||||
|
||||
/* Configure error threshold */
|
||||
reg = DLPL_REG_RD32(g, link_id, nvl_sl1_error_rate_ctrl_r());
|
||||
reg = set_field(reg, nvl_sl1_error_rate_ctrl_short_threshold_man_m(),
|
||||
nvl_sl1_error_rate_ctrl_short_threshold_man_f(0x2));
|
||||
reg = set_field(reg, nvl_sl1_error_rate_ctrl_long_threshold_man_m(),
|
||||
nvl_sl1_error_rate_ctrl_long_threshold_man_f(0x2));
|
||||
DLPL_REG_WR32(g, link_id, nvl_sl1_error_rate_ctrl_r(), reg);
|
||||
}
|
||||
|
||||
/*
|
||||
* DLPL per-link isr
|
||||
*/
|
||||
|
||||
#define DLPL_NON_FATAL_INTR_MASK (nvl_intr_tx_replay_f(1) | \
|
||||
nvl_intr_tx_recovery_short_f(1) | \
|
||||
nvl_intr_tx_recovery_long_f(1) | \
|
||||
nvl_intr_rx_short_error_rate_f(1) | \
|
||||
nvl_intr_rx_long_error_rate_f(1) | \
|
||||
nvl_intr_rx_ila_trigger_f(1) | \
|
||||
nvl_intr_ltssm_protocol_f(1))
|
||||
|
||||
#define DLPL_FATAL_INTR_MASK ( nvl_intr_ltssm_fault_f(1) | \
|
||||
nvl_intr_rx_fault_dl_protocol_f(1) | \
|
||||
nvl_intr_rx_fault_sublink_change_f(1) | \
|
||||
nvl_intr_tx_fault_sublink_change_f(1) | \
|
||||
nvl_intr_tx_fault_interface_f(1) | \
|
||||
nvl_intr_tx_fault_ram_f(1))
|
||||
|
||||
static void gv100_nvlink_dlpl_isr(struct gk20a *g, u32 link_id)
|
||||
{
|
||||
u32 non_fatal_mask = 0;
|
||||
u32 fatal_mask = 0;
|
||||
u32 intr = 0;
|
||||
bool retrain = false;
|
||||
int err;
|
||||
|
||||
intr = DLPL_REG_RD32(g, link_id, nvl_intr_r()) &
|
||||
DLPL_REG_RD32(g, link_id, nvl_intr_stall_en_r());
|
||||
|
||||
if (!intr)
|
||||
return;
|
||||
|
||||
fatal_mask = intr & DLPL_FATAL_INTR_MASK;
|
||||
non_fatal_mask = intr & DLPL_NON_FATAL_INTR_MASK;
|
||||
|
||||
nvgpu_err(g, " handling DLPL %d isr. Fatal: %x non-Fatal: %x",
|
||||
link_id, fatal_mask, non_fatal_mask);
|
||||
|
||||
/* Check if we are not handling an interupt */
|
||||
if ((fatal_mask | non_fatal_mask) & ~intr)
|
||||
nvgpu_err(g, "Unable to service DLPL intr on link %d", link_id);
|
||||
|
||||
if (non_fatal_mask & nvl_intr_tx_recovery_long_f(1))
|
||||
retrain = true;
|
||||
if (fatal_mask)
|
||||
retrain = false;
|
||||
|
||||
if (retrain) {
|
||||
err = nvgpu_nvlink_train(g, link_id, false);
|
||||
if (err != 0)
|
||||
nvgpu_err(g, "failed to retrain link %d", link_id);
|
||||
}
|
||||
|
||||
/* Clear interrupts */
|
||||
DLPL_REG_WR32(g, link_id, nvl_intr_r(), (non_fatal_mask | fatal_mask));
|
||||
DLPL_REG_WR32(g, link_id, nvl_intr_sw2_r(), 0xffffffffU);
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize MIF API with PROD settings
|
||||
*/
|
||||
void gv100_nvlink_init_mif_intr(struct gk20a *g, u32 link_id)
|
||||
{
|
||||
u32 tmp;
|
||||
|
||||
/* Enable MIF RX error */
|
||||
|
||||
/* Containment (make fatal) */
|
||||
tmp = 0;
|
||||
tmp = set_field(tmp,
|
||||
ioctrlmif_rx_err_contain_en_0_rxramdataparityerr_m(),
|
||||
ioctrlmif_rx_err_contain_en_0_rxramdataparityerr__prod_f());
|
||||
tmp = set_field(tmp,
|
||||
ioctrlmif_rx_err_contain_en_0_rxramhdrparityerr_m(),
|
||||
ioctrlmif_rx_err_contain_en_0_rxramhdrparityerr__prod_f());
|
||||
MIF_REG_WR32(g, link_id, ioctrlmif_rx_err_contain_en_0_r(), tmp);
|
||||
|
||||
/* Logging (do not ignore) */
|
||||
tmp = 0;
|
||||
tmp = set_field(tmp,
|
||||
ioctrlmif_rx_err_log_en_0_rxramdataparityerr_m(),
|
||||
ioctrlmif_rx_err_log_en_0_rxramdataparityerr_f(1));
|
||||
tmp = set_field(tmp,
|
||||
ioctrlmif_rx_err_log_en_0_rxramhdrparityerr_m(),
|
||||
ioctrlmif_rx_err_log_en_0_rxramhdrparityerr_f(1));
|
||||
MIF_REG_WR32(g, link_id, ioctrlmif_rx_err_log_en_0_r(), tmp);
|
||||
|
||||
/* Tx Error */
|
||||
/* Containment (make fatal) */
|
||||
tmp = 0;
|
||||
tmp = set_field(tmp,
|
||||
ioctrlmif_tx_err_contain_en_0_txramdataparityerr_m(),
|
||||
ioctrlmif_tx_err_contain_en_0_txramdataparityerr__prod_f());
|
||||
tmp = set_field(tmp,
|
||||
ioctrlmif_tx_err_contain_en_0_txramhdrparityerr_m(),
|
||||
ioctrlmif_tx_err_contain_en_0_txramhdrparityerr__prod_f());
|
||||
MIF_REG_WR32(g, link_id, ioctrlmif_tx_err_contain_en_0_r(), tmp);
|
||||
|
||||
/* Logging (do not ignore) */
|
||||
tmp = 0;
|
||||
tmp = set_field(tmp, ioctrlmif_tx_err_log_en_0_txramdataparityerr_m(),
|
||||
ioctrlmif_tx_err_log_en_0_txramdataparityerr_f(1));
|
||||
tmp = set_field(tmp, ioctrlmif_tx_err_log_en_0_txramhdrparityerr_m(),
|
||||
ioctrlmif_tx_err_log_en_0_txramhdrparityerr_f(1));
|
||||
MIF_REG_WR32(g, link_id, ioctrlmif_tx_err_log_en_0_r(), tmp);
|
||||
|
||||
/* Credit release */
|
||||
MIF_REG_WR32(g, link_id, ioctrlmif_rx_ctrl_buffer_ready_r(), 0x1);
|
||||
MIF_REG_WR32(g, link_id, ioctrlmif_tx_ctrl_buffer_ready_r(), 0x1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Enable per-link MIF interrupts
|
||||
*/
|
||||
void gv100_nvlink_mif_intr_enable(struct gk20a *g, u32 link_id, bool enable)
|
||||
{
|
||||
u32 reg0 = 0, reg1 = 0;
|
||||
|
||||
if (enable) {
|
||||
reg0 = set_field(reg0,
|
||||
ioctrlmif_rx_err_report_en_0_rxramdataparityerr_m(),
|
||||
ioctrlmif_rx_err_report_en_0_rxramdataparityerr_f(1));
|
||||
reg0 = set_field(reg0,
|
||||
ioctrlmif_rx_err_report_en_0_rxramhdrparityerr_m(),
|
||||
ioctrlmif_rx_err_report_en_0_rxramhdrparityerr_f(1));
|
||||
reg1 = set_field(reg1,
|
||||
ioctrlmif_tx_err_report_en_0_txramdataparityerr_m(),
|
||||
ioctrlmif_tx_err_report_en_0_txramdataparityerr_f(1));
|
||||
reg1 = set_field(reg1,
|
||||
ioctrlmif_tx_err_report_en_0_txramhdrparityerr_m(),
|
||||
ioctrlmif_tx_err_report_en_0_txramhdrparityerr_f(1));
|
||||
}
|
||||
|
||||
MIF_REG_WR32(g, link_id, ioctrlmif_rx_err_report_en_0_r(), reg0);
|
||||
MIF_REG_WR32(g, link_id, ioctrlmif_tx_err_report_en_0_r(), reg1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Handle per-link MIF interrupts
|
||||
*/
|
||||
static void gv100_nvlink_mif_isr(struct gk20a *g, u32 link_id)
|
||||
{
|
||||
u32 intr, fatal_mask = 0;
|
||||
|
||||
/* RX Errors */
|
||||
intr = MIF_REG_RD32(g, link_id, ioctrlmif_rx_err_status_0_r());
|
||||
if (intr) {
|
||||
if (intr & ioctrlmif_rx_err_status_0_rxramdataparityerr_m()) {
|
||||
nvgpu_err(g, "Fatal MIF RX interrupt hit on link %d: RAM_DATA_PARITY",
|
||||
link_id);
|
||||
fatal_mask |= ioctrlmif_rx_err_status_0_rxramdataparityerr_f(1);
|
||||
}
|
||||
if (intr & ioctrlmif_rx_err_status_0_rxramhdrparityerr_m()) {
|
||||
nvgpu_err(g, "Fatal MIF RX interrupt hit on link %d: RAM_HDR_PARITY",
|
||||
link_id);
|
||||
fatal_mask |= ioctrlmif_rx_err_status_0_rxramhdrparityerr_f(1);
|
||||
}
|
||||
|
||||
if (fatal_mask) {
|
||||
MIF_REG_WR32(g, link_id, ioctrlmif_rx_err_first_0_r(),
|
||||
fatal_mask);
|
||||
MIF_REG_WR32(g, link_id, ioctrlmif_rx_err_status_0_r(),
|
||||
fatal_mask);
|
||||
}
|
||||
}
|
||||
|
||||
/* TX Errors */
|
||||
fatal_mask = 0;
|
||||
intr = MIF_REG_RD32(g, link_id, ioctrlmif_tx_err_status_0_r());
|
||||
if (intr) {
|
||||
if (intr & ioctrlmif_tx_err_status_0_txramdataparityerr_m()) {
|
||||
nvgpu_err(g, "Fatal MIF TX interrupt hit on link %d: RAM_DATA_PARITY",
|
||||
link_id);
|
||||
fatal_mask |= ioctrlmif_tx_err_status_0_txramdataparityerr_f(1);
|
||||
}
|
||||
if (intr & ioctrlmif_tx_err_status_0_txramhdrparityerr_m()) {
|
||||
nvgpu_err(g, "Fatal MIF TX interrupt hit on link %d: RAM_HDR_PARITY",
|
||||
link_id);
|
||||
fatal_mask |= ioctrlmif_tx_err_status_0_txramhdrparityerr_f(1);
|
||||
}
|
||||
|
||||
if (fatal_mask) {
|
||||
MIF_REG_WR32(g, link_id, ioctrlmif_tx_err_first_0_r(),
|
||||
fatal_mask);
|
||||
MIF_REG_WR32(g, link_id, ioctrlmif_tx_err_status_0_r(),
|
||||
fatal_mask);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* NVLIPT IP initialization (per-link)
|
||||
*/
|
||||
void gv100_nvlink_init_nvlipt_intr(struct gk20a *g, u32 link_id)
|
||||
{
|
||||
/* init persistent scratch registers */
|
||||
IPT_REG_WR32(g, nvlipt_scratch_cold_r(),
|
||||
nvlipt_scratch_cold_data_init_v());
|
||||
|
||||
/*
|
||||
* AErr settings (top level)
|
||||
*/
|
||||
|
||||
/* UC first and status reg (W1C) need to be cleared byt arch */
|
||||
IPT_REG_WR32(g, IPT_ERR_UC_FIRST_LINK(link_id), IPT_ERR_UC_ACTIVE_BITS);
|
||||
IPT_REG_WR32(g, IPT_ERR_UC_STATUS_LINK(link_id), IPT_ERR_UC_ACTIVE_BITS);
|
||||
|
||||
/* AErr Severity */
|
||||
IPT_REG_WR32(g, IPT_ERR_UC_SEVERITY_LINK(link_id), IPT_ERR_UC_ACTIVE_BITS);
|
||||
|
||||
/* AErr Control settings */
|
||||
IPT_REG_WR32(g, IPT_ERR_CONTROL_LINK(link_id),
|
||||
nvlipt_err_control_link0_fatalenable_f(1) |
|
||||
nvlipt_err_control_link0_nonfatalenable_f(1));
|
||||
}
|
||||
|
||||
/*
|
||||
* Enable NVLIPT interrupts
|
||||
*/
|
||||
static void gv100_nvlink_nvlipt_intr_enable(struct gk20a *g, u32 link_id,
|
||||
bool enable)
|
||||
{
|
||||
u32 val = 0;
|
||||
u32 reg;
|
||||
|
||||
if (enable)
|
||||
val = 1;
|
||||
|
||||
reg = IPT_REG_RD32(g, IPT_INTR_CONTROL_LINK(link_id));
|
||||
reg = set_field(reg, nvlipt_intr_control_link0_stallenable_m(),
|
||||
nvlipt_intr_control_link0_stallenable_f(val));
|
||||
reg = set_field(reg, nvlipt_intr_control_link0_nostallenable_m(),
|
||||
nvlipt_intr_control_link0_nostallenable_f(val));
|
||||
IPT_REG_WR32(g, IPT_INTR_CONTROL_LINK(link_id), reg);
|
||||
}
|
||||
|
||||
/*
|
||||
* Per-link NVLIPT ISR handler
|
||||
*/
|
||||
static bool gv100_nvlink_nvlipt_isr(struct gk20a *g, u32 link_id)
|
||||
{
|
||||
/*
|
||||
* Interrupt handling happens in leaf handlers. Assume all interrupts
|
||||
* were handled and clear roll ups/
|
||||
*/
|
||||
IPT_REG_WR32(g, IPT_ERR_UC_FIRST_LINK(link_id), IPT_ERR_UC_ACTIVE_BITS);
|
||||
IPT_REG_WR32(g, IPT_ERR_UC_STATUS_LINK(link_id), IPT_ERR_UC_ACTIVE_BITS);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
*******************************************************************************
|
||||
* Interrupt handling functions *
|
||||
*******************************************************************************
|
||||
*/
|
||||
|
||||
/*
|
||||
* Enable common interrupts
|
||||
*/
|
||||
void gv100_nvlink_common_intr_enable(struct gk20a *g, unsigned long mask)
|
||||
{
|
||||
u32 reg, i;
|
||||
|
||||
/* Init IOCTRL */
|
||||
for_each_set_bit(i, &mask, 32) {
|
||||
reg = IOCTRL_REG_RD32(g, ioctrl_link_intr_0_mask_r(i));
|
||||
reg |= (ioctrl_link_intr_0_mask_fatal_f(1) |
|
||||
ioctrl_link_intr_0_mask_nonfatal_f(1) |
|
||||
ioctrl_link_intr_0_mask_correctable_f(1) |
|
||||
ioctrl_link_intr_0_mask_intra_f(1));
|
||||
IOCTRL_REG_WR32(g, ioctrl_link_intr_0_mask_r(i), reg);
|
||||
}
|
||||
|
||||
reg = IOCTRL_REG_RD32(g, ioctrl_common_intr_0_mask_r());
|
||||
reg |= (ioctrl_common_intr_0_mask_fatal_f(1) |
|
||||
ioctrl_common_intr_0_mask_nonfatal_f(1) |
|
||||
ioctrl_common_intr_0_mask_correctable_f(1) |
|
||||
ioctrl_common_intr_0_mask_intra_f(1));
|
||||
IOCTRL_REG_WR32(g, ioctrl_common_intr_0_mask_r(), reg);
|
||||
|
||||
/* Init NVLIPT */
|
||||
IPT_REG_WR32(g, nvlipt_intr_control_common_r(),
|
||||
nvlipt_intr_control_common_stallenable_f(1) |
|
||||
nvlipt_intr_control_common_nonstallenable_f(1));
|
||||
}
|
||||
|
||||
/*
|
||||
* Enable link specific interrupts (top-level)
|
||||
*/
|
||||
void gv100_nvlink_enable_link_intr(struct gk20a *g, u32 link_id, bool enable)
|
||||
{
|
||||
gv100_nvlink_minion_link_intr_enable(g, link_id, enable);
|
||||
gv100_nvlink_dlpl_intr_enable(g, link_id, enable);
|
||||
gv100_nvlink_tlc_intr_enable(g, link_id, enable);
|
||||
gv100_nvlink_mif_intr_enable(g, link_id, enable);
|
||||
gv100_nvlink_nvlipt_intr_enable(g, link_id, enable);
|
||||
}
|
||||
|
||||
/*
|
||||
* Top level interrupt handler
|
||||
*/
|
||||
int gv100_nvlink_isr(struct gk20a *g)
|
||||
{
|
||||
unsigned long links;
|
||||
u32 link_id;
|
||||
|
||||
links = ioctrl_top_intr_0_status_link_v(
|
||||
IOCTRL_REG_RD32(g, ioctrl_top_intr_0_status_r()));
|
||||
|
||||
links &= g->nvlink.enabled_links;
|
||||
/* As per ARCH minion must be serviced first */
|
||||
gv100_nvlink_minion_isr(g);
|
||||
|
||||
for_each_set_bit(link_id, &links, 32) {
|
||||
/* Cache error logs from TLC, DL handler may clear them */
|
||||
gv100_nvlink_tlc_get_intr(g, link_id);
|
||||
gv100_nvlink_dlpl_isr(g, link_id);
|
||||
gv100_nvlink_tlc_isr(g, link_id);
|
||||
gv100_nvlink_mif_isr(g, link_id);
|
||||
|
||||
/* NVLIPT is top-level. Do it last */
|
||||
gv100_nvlink_nvlipt_isr(g, link_id);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_TEGRA_NVLINK */
|
||||
@@ -0,0 +1,40 @@
|
||||
/*
|
||||
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef INTR_AND_ERR_HANDLING_GV100_H
|
||||
#define INTR_AND_ERR_HANDLING_GV100_H
|
||||
|
||||
#include <nvgpu/types.h>
|
||||
struct gk20a;
|
||||
|
||||
void gv100_nvlink_minion_clear_interrupts(struct gk20a *g);
|
||||
void gv100_nvlink_init_minion_intr(struct gk20a *g);
|
||||
bool gv100_nvlink_minion_falcon_isr(struct gk20a *g);
|
||||
void gv100_nvlink_common_intr_enable(struct gk20a *g, unsigned long mask);
|
||||
void gv100_nvlink_init_nvlipt_intr(struct gk20a *g, u32 link_id);
|
||||
void gv100_nvlink_enable_link_intr(struct gk20a *g, u32 link_id, bool enable);
|
||||
void gv100_nvlink_init_mif_intr(struct gk20a *g, u32 link_id);
|
||||
void gv100_nvlink_mif_intr_enable(struct gk20a *g, u32 link_id, bool enable);
|
||||
void gv100_nvlink_dlpl_intr_enable(struct gk20a *g, u32 link_id, bool enable);
|
||||
int gv100_nvlink_isr(struct gk20a *g);
|
||||
|
||||
#endif /* INTR_AND_ERR_HANDLING_GV100_H */
|
||||
@@ -33,66 +33,16 @@
|
||||
#include <nvgpu/timers.h>
|
||||
#include <nvgpu/gk20a.h>
|
||||
#include <nvgpu/top.h>
|
||||
|
||||
#include "nvlink_gv100.h"
|
||||
|
||||
#include <nvgpu/hw/gv100/hw_nvlinkip_discovery_gv100.h>
|
||||
#include <nvgpu/hw/gv100/hw_nvlipt_gv100.h>
|
||||
#include <nvgpu/hw/gv100/hw_ioctrl_gv100.h>
|
||||
#include <nvgpu/hw/gv100/hw_minion_gv100.h>
|
||||
#include <nvgpu/hw/gv100/hw_nvl_gv100.h>
|
||||
#include <nvgpu/hw/gv100/hw_ioctrlmif_gv100.h>
|
||||
#include <nvgpu/hw/gv100/hw_trim_gv100.h>
|
||||
#include <nvgpu/hw/gv100/hw_nvtlc_gv100.h>
|
||||
|
||||
#define NVLINK_PLL_ON_TIMEOUT_MS 30
|
||||
#define NVLINK_SUBLINK_TIMEOUT_MS 200
|
||||
/*
|
||||
* The manuals are missing some useful defines
|
||||
* we add them for now
|
||||
*/
|
||||
|
||||
#define IPT_INTR_CONTROL_LINK(i) (nvlipt_intr_control_link0_r() + (i)*4)
|
||||
#define IPT_ERR_UC_STATUS_LINK(i) (nvlipt_err_uc_status_link0_r() + (i)*36)
|
||||
#define IPT_ERR_UC_MASK_LINK(i) (nvlipt_err_uc_mask_link0_r() + (i)*36)
|
||||
#define IPT_ERR_UC_SEVERITY_LINK(i) (nvlipt_err_uc_severity_link0_r() + (i)*36)
|
||||
#define IPT_ERR_UC_FIRST_LINK(i) (nvlipt_err_uc_first_link0_r() + (i)*36)
|
||||
#define IPT_ERR_UC_ADVISORY_LINK(i) (nvlipt_err_uc_advisory_link0_r() + (i)*36)
|
||||
#define IPT_ERR_C_STATUS_LINK(i) (nvlipt_err_c_status_link0_r() + (i)*36)
|
||||
#define IPT_ERR_C_MASK_LINK(i) (nvlipt_err_c_mask_link0_r() + (i)*36)
|
||||
#define IPT_ERR_C_FIRST_LINK(i) (nvlipt_err_c_first_link0_r() + (i)*36)
|
||||
#define IPT_ERR_CONTROL_LINK(i) (nvlipt_err_control_link0_r() + (i)*4)
|
||||
|
||||
#define IPT_ERR_UC_ACTIVE_BITS (nvlipt_err_uc_status_link0_dlprotocol_f(1) | \
|
||||
nvlipt_err_uc_status_link0_datapoisoned_f(1) | \
|
||||
nvlipt_err_uc_status_link0_flowcontrol_f(1) | \
|
||||
nvlipt_err_uc_status_link0_responsetimeout_f(1) | \
|
||||
nvlipt_err_uc_status_link0_targeterror_f(1) | \
|
||||
nvlipt_err_uc_status_link0_unexpectedresponse_f(1) | \
|
||||
nvlipt_err_uc_status_link0_receiveroverflow_f(1) | \
|
||||
nvlipt_err_uc_status_link0_malformedpacket_f(1) | \
|
||||
nvlipt_err_uc_status_link0_stompedpacketreceived_f(1) | \
|
||||
nvlipt_err_uc_status_link0_unsupportedrequest_f(1) | \
|
||||
nvlipt_err_uc_status_link0_ucinternal_f(1))
|
||||
|
||||
|
||||
#define MINION_FALCON_INTR_MASK (minion_falcon_irqmset_wdtmr_set_f() | \
|
||||
minion_falcon_irqmset_halt_set_f() | \
|
||||
minion_falcon_irqmset_exterr_set_f()| \
|
||||
minion_falcon_irqmset_swgen0_set_f()| \
|
||||
minion_falcon_irqmset_swgen1_set_f())
|
||||
|
||||
#define MINION_FALCON_INTR_DEST ( \
|
||||
minion_falcon_irqdest_host_wdtmr_host_f() | \
|
||||
minion_falcon_irqdest_host_halt_host_f() | \
|
||||
minion_falcon_irqdest_host_exterr_host_f() | \
|
||||
minion_falcon_irqdest_host_swgen0_host_f() | \
|
||||
minion_falcon_irqdest_host_swgen1_host_f() | \
|
||||
minion_falcon_irqdest_target_wdtmr_host_normal_f() | \
|
||||
minion_falcon_irqdest_target_halt_host_normal_f() | \
|
||||
minion_falcon_irqdest_target_exterr_host_normal_f() | \
|
||||
minion_falcon_irqdest_target_swgen0_host_normal_f() | \
|
||||
minion_falcon_irqdest_target_swgen1_host_normal_f())
|
||||
|
||||
#define NVL_DEVICE(str) nvlinkip_discovery_common_device_##str##_v()
|
||||
|
||||
@@ -140,70 +90,6 @@ static int gv100_nvlink_rxcal_en(struct gk20a *g, unsigned long mask);
|
||||
*-----------------------------------------------------------------------------*
|
||||
*/
|
||||
|
||||
/*
|
||||
* Initialization of link specific interrupts
|
||||
*/
|
||||
static void gv100_nvlink_minion_link_intr_enable(struct gk20a *g, u32 link_id,
|
||||
bool enable)
|
||||
{
|
||||
u32 intr, links;
|
||||
|
||||
/* Only stall interrupts for now */
|
||||
intr = MINION_REG_RD32(g, minion_minion_intr_stall_en_r());
|
||||
links = minion_minion_intr_stall_en_link_v(intr);
|
||||
|
||||
if (enable)
|
||||
links |= BIT(link_id);
|
||||
else
|
||||
links &= ~BIT(link_id);
|
||||
|
||||
intr = set_field(intr, minion_minion_intr_stall_en_link_m(),
|
||||
minion_minion_intr_stall_en_link_f(links));
|
||||
MINION_REG_WR32(g, minion_minion_intr_stall_en_r(), intr);
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialization of falcon interrupts
|
||||
*/
|
||||
static void gv100_nvlink_minion_falcon_intr_enable(struct gk20a *g, bool enable)
|
||||
{
|
||||
u32 reg;
|
||||
|
||||
reg = MINION_REG_RD32(g, minion_minion_intr_stall_en_r());
|
||||
if (enable) {
|
||||
reg = set_field(reg, minion_minion_intr_stall_en_fatal_m(),
|
||||
minion_minion_intr_stall_en_fatal_enable_f());
|
||||
reg = set_field(reg, minion_minion_intr_stall_en_nonfatal_m(),
|
||||
minion_minion_intr_stall_en_nonfatal_enable_f());
|
||||
reg = set_field(reg, minion_minion_intr_stall_en_falcon_stall_m(),
|
||||
minion_minion_intr_stall_en_falcon_stall_enable_f());
|
||||
reg = set_field(reg, minion_minion_intr_stall_en_falcon_nostall_m(),
|
||||
minion_minion_intr_stall_en_falcon_nostall_enable_f());
|
||||
} else {
|
||||
reg = set_field(reg, minion_minion_intr_stall_en_fatal_m(),
|
||||
minion_minion_intr_stall_en_fatal_disable_f());
|
||||
reg = set_field(reg, minion_minion_intr_stall_en_nonfatal_m(),
|
||||
minion_minion_intr_stall_en_nonfatal_disable_f());
|
||||
reg = set_field(reg, minion_minion_intr_stall_en_falcon_stall_m(),
|
||||
minion_minion_intr_stall_en_falcon_stall_disable_f());
|
||||
reg = set_field(reg, minion_minion_intr_stall_en_falcon_nostall_m(),
|
||||
minion_minion_intr_stall_en_falcon_nostall_disable_f());
|
||||
}
|
||||
|
||||
MINION_REG_WR32(g, minion_minion_intr_stall_en_r(), reg);
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize minion IP interrupts
|
||||
*/
|
||||
static void gv100_nvlink_initialize_minion(struct gk20a *g)
|
||||
{
|
||||
/* Disable non-stall tree */
|
||||
MINION_REG_WR32(g, minion_minion_intr_nonstall_en_r(), 0x0);
|
||||
|
||||
gv100_nvlink_minion_falcon_intr_enable(g, true);
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if minion is up
|
||||
*/
|
||||
@@ -220,118 +106,6 @@ static bool __gv100_nvlink_minion_is_running(struct gk20a *g)
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Falcon specific ISR handling
|
||||
*/
|
||||
static bool gv100_nvlink_minion_falcon_isr(struct gk20a *g)
|
||||
{
|
||||
u32 intr;
|
||||
|
||||
intr = MINION_REG_RD32(g, minion_falcon_irqstat_r()) &
|
||||
MINION_REG_RD32(g, minion_falcon_irqmask_r());
|
||||
|
||||
if (!intr)
|
||||
return true;
|
||||
|
||||
if (intr & minion_falcon_irqstat_exterr_true_f()) {
|
||||
nvgpu_err(g, "FALCON EXT ADDR: 0x%x 0x%x 0x%x",
|
||||
MINION_REG_RD32(g, 0x244),
|
||||
MINION_REG_RD32(g, 0x248),
|
||||
MINION_REG_RD32(g, 0x24c));
|
||||
}
|
||||
|
||||
MINION_REG_WR32(g, minion_falcon_irqsclr_r(), intr);
|
||||
|
||||
nvgpu_err(g, "FATAL minion IRQ: 0x%08x", intr);
|
||||
|
||||
|
||||
intr = MINION_REG_RD32(g, minion_falcon_irqstat_r()) &
|
||||
MINION_REG_RD32(g, minion_falcon_irqmask_r());
|
||||
|
||||
return (intr == 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Link Specific ISR
|
||||
*/
|
||||
|
||||
static bool gv100_nvlink_minion_link_isr(struct gk20a *g, u32 link_id)
|
||||
{
|
||||
u32 intr, code;
|
||||
bool fatal = false;
|
||||
|
||||
intr = MINION_REG_RD32(g, minion_nvlink_link_intr_r(link_id));
|
||||
code = minion_nvlink_link_intr_code_v(intr);
|
||||
|
||||
if (code == minion_nvlink_link_intr_code_swreq_v()) {
|
||||
nvgpu_err(g, " Intr SWREQ, link: %d subcode: %x",
|
||||
link_id, minion_nvlink_link_intr_subcode_v(intr));
|
||||
} else if (code == minion_nvlink_link_intr_code_pmdisabled_v()) {
|
||||
nvgpu_err(g, " Fatal Intr PMDISABLED, link: %d subcode: %x",
|
||||
link_id, minion_nvlink_link_intr_subcode_v(intr));
|
||||
fatal = true;
|
||||
} else if (code == minion_nvlink_link_intr_code_na_v()) {
|
||||
nvgpu_err(g, " Fatal Intr NA, link: %d subcode: %x",
|
||||
link_id, minion_nvlink_link_intr_subcode_v(intr));
|
||||
fatal = true;
|
||||
} else if (code == minion_nvlink_link_intr_code_dlreq_v()) {
|
||||
nvgpu_err(g, " Fatal Intr DLREQ, link: %d subcode: %x",
|
||||
link_id, minion_nvlink_link_intr_subcode_v(intr));
|
||||
fatal = true;
|
||||
} else {
|
||||
nvgpu_err(g, " Fatal Intr UNKN:%x, link: %d subcode: %x", code,
|
||||
link_id, minion_nvlink_link_intr_subcode_v(intr));
|
||||
fatal = true;
|
||||
}
|
||||
|
||||
if (fatal)
|
||||
gv100_nvlink_minion_link_intr_enable(g, link_id, false);
|
||||
|
||||
intr = set_field(intr, minion_nvlink_link_intr_state_m(),
|
||||
minion_nvlink_link_intr_state_f(1));
|
||||
MINION_REG_WR32(g, minion_nvlink_link_intr_r(link_id), intr);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Global minion routine to service interrupts
|
||||
*/
|
||||
static bool gv100_nvlink_minion_isr(struct gk20a *g) {
|
||||
|
||||
u32 intr, i;
|
||||
unsigned long links;
|
||||
|
||||
intr = MINION_REG_RD32(g, minion_minion_intr_r()) &
|
||||
MINION_REG_RD32(g, minion_minion_intr_stall_en_r());
|
||||
|
||||
if (minion_minion_intr_falcon_stall_v(intr) ||
|
||||
minion_minion_intr_falcon_nostall_v(intr))
|
||||
gv100_nvlink_minion_falcon_isr(g);
|
||||
|
||||
if (minion_minion_intr_fatal_v(intr)) {
|
||||
gv100_nvlink_minion_falcon_intr_enable(g, false);
|
||||
MINION_REG_WR32(g, minion_minion_intr_r(),
|
||||
minion_minion_intr_fatal_f(1));
|
||||
}
|
||||
|
||||
if (minion_minion_intr_nonfatal_v(intr))
|
||||
MINION_REG_WR32(g, minion_minion_intr_r(),
|
||||
minion_minion_intr_nonfatal_f(1));
|
||||
|
||||
links = minion_minion_intr_link_v(intr) & g->nvlink.enabled_links;
|
||||
|
||||
if (links)
|
||||
for_each_set_bit(i, &links, 32)
|
||||
gv100_nvlink_minion_link_isr(g, i);
|
||||
|
||||
/* Re-test interrupt status */
|
||||
intr = MINION_REG_RD32(g, minion_minion_intr_r()) &
|
||||
MINION_REG_RD32(g, minion_minion_intr_stall_en_r());
|
||||
|
||||
return (intr == 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Load minion FW and set up bootstrap
|
||||
*/
|
||||
@@ -360,8 +134,7 @@ static u32 gv100_nvlink_minion_load(struct gk20a *g)
|
||||
nvgpu_falcon_reset(g->minion_flcn);
|
||||
|
||||
/* Clear interrupts */
|
||||
nvgpu_falcon_set_irq(g->minion_flcn, true, MINION_FALCON_INTR_MASK,
|
||||
MINION_FALCON_INTR_DEST);
|
||||
g->ops.nvlink.intr.minion_clear_interrupts(g);
|
||||
|
||||
err = nvgpu_nvlink_minion_load_ucode(g, nvgpu_minion_fw);
|
||||
if (err != 0) {
|
||||
@@ -400,14 +173,14 @@ static u32 gv100_nvlink_minion_load(struct gk20a *g)
|
||||
} while (!nvgpu_timeout_expired_msg(&timeout, " minion boot timeout"));
|
||||
|
||||
/* Service interrupts */
|
||||
gv100_nvlink_minion_falcon_isr(g);
|
||||
g->ops.nvlink.intr.minion_falcon_isr(g);
|
||||
|
||||
if (nvgpu_timeout_peek_expired(&timeout)) {
|
||||
err = -ETIMEDOUT;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
gv100_nvlink_initialize_minion(g);
|
||||
g->ops.nvlink.intr.init_minion_intr(g);
|
||||
return err;
|
||||
|
||||
exit:
|
||||
@@ -654,478 +427,6 @@ static int gv100_nvlink_minion_lane_shutdown(struct gk20a *g, u32 link_id,
|
||||
return err;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/*
|
||||
*-----------------------------------------------------------------------------*
|
||||
* TLC API
|
||||
*-----------------------------------------------------------------------------*
|
||||
*/
|
||||
|
||||
/*
|
||||
* Init TLC IP and prod regs
|
||||
*/
|
||||
static void gv100_nvlink_initialize_tlc(struct gk20a *g, u32 link_id)
|
||||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* Init TLC per link interrupts
|
||||
*/
|
||||
static void gv100_nvlink_tlc_intr_enable(struct gk20a *g, u32 link_id,
|
||||
bool enable)
|
||||
{
|
||||
u32 reg_rx0 = 0, reg_rx1 = 0, reg_tx = 0;
|
||||
|
||||
if (enable) {
|
||||
/* Set PROD values */
|
||||
reg_rx0 = 0x0FFFFFF;
|
||||
reg_rx1 = 0x03FFFFF;
|
||||
reg_tx = 0x1FFFFFF;
|
||||
}
|
||||
|
||||
TLC_REG_WR32(g, link_id, nvtlc_rx_err_report_en_0_r(), reg_rx0);
|
||||
TLC_REG_WR32(g, link_id, nvtlc_rx_err_report_en_1_r(), reg_rx1);
|
||||
TLC_REG_WR32(g, link_id, nvtlc_tx_err_report_en_0_r(), reg_tx);
|
||||
}
|
||||
|
||||
/*
|
||||
* helper function to get TLC intr status in common structure
|
||||
*/
|
||||
static void gv100_nvlink_tlc_get_intr(struct gk20a *g, u32 link_id)
|
||||
{
|
||||
g->nvlink.tlc_rx_err_status_0[link_id] =
|
||||
TLC_REG_RD32(g, link_id, nvtlc_rx_err_status_0_r());
|
||||
g->nvlink.tlc_rx_err_status_1[link_id] =
|
||||
TLC_REG_RD32(g, link_id, nvtlc_rx_err_status_1_r());
|
||||
g->nvlink.tlc_tx_err_status_0[link_id] =
|
||||
TLC_REG_RD32(g, link_id, nvtlc_tx_err_status_0_r());
|
||||
}
|
||||
|
||||
/*
|
||||
* Interrupt routine handler for TLC
|
||||
*/
|
||||
static void gv100_nvlink_tlc_isr(struct gk20a *g, u32 link_id)
|
||||
{
|
||||
|
||||
if (g->nvlink.tlc_rx_err_status_0[link_id]) {
|
||||
/* All TLC RX 0 errors are fatal. Notify and disable */
|
||||
nvgpu_err(g, "Fatal TLC RX 0 interrupt on link %d mask: %x",
|
||||
link_id, g->nvlink.tlc_rx_err_status_0[link_id]);
|
||||
TLC_REG_WR32(g, link_id, nvtlc_rx_err_first_0_r(),
|
||||
g->nvlink.tlc_rx_err_status_0[link_id]);
|
||||
TLC_REG_WR32(g, link_id, nvtlc_rx_err_status_0_r(),
|
||||
g->nvlink.tlc_rx_err_status_0[link_id]);
|
||||
}
|
||||
if (g->nvlink.tlc_rx_err_status_1[link_id]) {
|
||||
/* All TLC RX 1 errors are fatal. Notify and disable */
|
||||
nvgpu_err(g, "Fatal TLC RX 1 interrupt on link %d mask: %x",
|
||||
link_id, g->nvlink.tlc_rx_err_status_1[link_id]);
|
||||
TLC_REG_WR32(g, link_id, nvtlc_rx_err_first_1_r(),
|
||||
g->nvlink.tlc_rx_err_status_1[link_id]);
|
||||
TLC_REG_WR32(g, link_id, nvtlc_rx_err_status_1_r(),
|
||||
g->nvlink.tlc_rx_err_status_1[link_id]);
|
||||
}
|
||||
if (g->nvlink.tlc_tx_err_status_0[link_id]) {
|
||||
/* All TLC TX 0 errors are fatal. Notify and disable */
|
||||
nvgpu_err(g, "Fatal TLC TX 0 interrupt on link %d mask: %x",
|
||||
link_id, g->nvlink.tlc_tx_err_status_0[link_id]);
|
||||
TLC_REG_WR32(g, link_id, nvtlc_tx_err_first_0_r(),
|
||||
g->nvlink.tlc_tx_err_status_0[link_id]);
|
||||
TLC_REG_WR32(g, link_id, nvtlc_tx_err_status_0_r(),
|
||||
g->nvlink.tlc_tx_err_status_0[link_id]);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
*-----------------------------------------------------------------------------*
|
||||
* DLPL API
|
||||
*-----------------------------------------------------------------------------*
|
||||
*/
|
||||
|
||||
/*
|
||||
* DLPL interrupt enable helper
|
||||
*/
|
||||
static void gv100_nvlink_dlpl_intr_enable(struct gk20a *g, u32 link_id,
|
||||
bool enable)
|
||||
{
|
||||
u32 reg = 0;
|
||||
|
||||
/* Always disable nonstall tree */
|
||||
DLPL_REG_WR32(g, link_id, nvl_intr_nonstall_en_r(), 0);
|
||||
|
||||
if (!enable)
|
||||
{
|
||||
DLPL_REG_WR32(g, link_id, nvl_intr_stall_en_r(), 0);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Clear interrupt register to get rid of stale state (W1C) */
|
||||
DLPL_REG_WR32(g, link_id, nvl_intr_r(), 0xffffffffU);
|
||||
DLPL_REG_WR32(g, link_id, nvl_intr_sw2_r(), 0xffffffffU);
|
||||
|
||||
reg = nvl_intr_stall_en_tx_recovery_long_enable_f() |
|
||||
nvl_intr_stall_en_tx_fault_ram_enable_f() |
|
||||
nvl_intr_stall_en_tx_fault_interface_enable_f() |
|
||||
nvl_intr_stall_en_tx_fault_sublink_change_enable_f() |
|
||||
nvl_intr_stall_en_rx_fault_sublink_change_enable_f() |
|
||||
nvl_intr_stall_en_rx_fault_dl_protocol_enable_f() |
|
||||
nvl_intr_stall_en_ltssm_fault_enable_f();
|
||||
|
||||
DLPL_REG_WR32(g, link_id, nvl_intr_stall_en_r(), reg);
|
||||
|
||||
/* Configure error threshold */
|
||||
reg = DLPL_REG_RD32(g, link_id, nvl_sl1_error_rate_ctrl_r());
|
||||
reg = set_field(reg, nvl_sl1_error_rate_ctrl_short_threshold_man_m(),
|
||||
nvl_sl1_error_rate_ctrl_short_threshold_man_f(0x2));
|
||||
reg = set_field(reg, nvl_sl1_error_rate_ctrl_long_threshold_man_m(),
|
||||
nvl_sl1_error_rate_ctrl_long_threshold_man_f(0x2));
|
||||
DLPL_REG_WR32(g, link_id, nvl_sl1_error_rate_ctrl_r(), reg);
|
||||
}
|
||||
|
||||
/*
|
||||
* DLPL per-link isr
|
||||
*/
|
||||
|
||||
#define DLPL_NON_FATAL_INTR_MASK (nvl_intr_tx_replay_f(1) | \
|
||||
nvl_intr_tx_recovery_short_f(1) | \
|
||||
nvl_intr_tx_recovery_long_f(1) | \
|
||||
nvl_intr_rx_short_error_rate_f(1) | \
|
||||
nvl_intr_rx_long_error_rate_f(1) | \
|
||||
nvl_intr_rx_ila_trigger_f(1) | \
|
||||
nvl_intr_ltssm_protocol_f(1))
|
||||
|
||||
#define DLPL_FATAL_INTR_MASK ( nvl_intr_ltssm_fault_f(1) | \
|
||||
nvl_intr_rx_fault_dl_protocol_f(1) | \
|
||||
nvl_intr_rx_fault_sublink_change_f(1) | \
|
||||
nvl_intr_tx_fault_sublink_change_f(1) | \
|
||||
nvl_intr_tx_fault_interface_f(1) | \
|
||||
nvl_intr_tx_fault_ram_f(1))
|
||||
|
||||
static void gv100_nvlink_dlpl_isr(struct gk20a *g, u32 link_id)
|
||||
{
|
||||
u32 non_fatal_mask = 0;
|
||||
u32 fatal_mask = 0;
|
||||
u32 intr = 0;
|
||||
bool retrain = false;
|
||||
int err;
|
||||
|
||||
intr = DLPL_REG_RD32(g, link_id, nvl_intr_r()) &
|
||||
DLPL_REG_RD32(g, link_id, nvl_intr_stall_en_r());
|
||||
|
||||
if (!intr)
|
||||
return;
|
||||
|
||||
fatal_mask = intr & DLPL_FATAL_INTR_MASK;
|
||||
non_fatal_mask = intr & DLPL_NON_FATAL_INTR_MASK;
|
||||
|
||||
nvgpu_err(g, " handling DLPL %d isr. Fatal: %x non-Fatal: %x",
|
||||
link_id, fatal_mask, non_fatal_mask);
|
||||
|
||||
/* Check if we are not handling an interupt */
|
||||
if ((fatal_mask | non_fatal_mask) & ~intr)
|
||||
nvgpu_err(g, "Unable to service DLPL intr on link %d", link_id);
|
||||
|
||||
if (non_fatal_mask & nvl_intr_tx_recovery_long_f(1))
|
||||
retrain = true;
|
||||
if (fatal_mask)
|
||||
retrain = false;
|
||||
|
||||
if (retrain) {
|
||||
err = nvgpu_nvlink_train(g, link_id, false);
|
||||
if (err != 0)
|
||||
nvgpu_err(g, "failed to retrain link %d", link_id);
|
||||
}
|
||||
|
||||
/* Clear interrupts */
|
||||
DLPL_REG_WR32(g, link_id, nvl_intr_r(), (non_fatal_mask | fatal_mask));
|
||||
DLPL_REG_WR32(g, link_id, nvl_intr_sw2_r(), 0xffffffffU);
|
||||
}
|
||||
|
||||
/*
|
||||
*-----------------------------------------------------------------------------*
|
||||
* MIF API
|
||||
*-----------------------------------------------------------------------------*
|
||||
*/
|
||||
|
||||
/*
|
||||
* Initialize MIF API with PROD settings
|
||||
*/
|
||||
static void gv100_nvlink_initialize_mif(struct gk20a *g, u32 link_id)
|
||||
{
|
||||
u32 tmp;
|
||||
|
||||
/* Enable MIF RX error */
|
||||
|
||||
/* Containment (make fatal) */
|
||||
tmp = 0;
|
||||
tmp = set_field(tmp,
|
||||
ioctrlmif_rx_err_contain_en_0_rxramdataparityerr_m(),
|
||||
ioctrlmif_rx_err_contain_en_0_rxramdataparityerr__prod_f());
|
||||
tmp = set_field(tmp,
|
||||
ioctrlmif_rx_err_contain_en_0_rxramhdrparityerr_m(),
|
||||
ioctrlmif_rx_err_contain_en_0_rxramhdrparityerr__prod_f());
|
||||
MIF_REG_WR32(g, link_id, ioctrlmif_rx_err_contain_en_0_r(), tmp);
|
||||
|
||||
/* Logging (do not ignore) */
|
||||
tmp = 0;
|
||||
tmp = set_field(tmp,
|
||||
ioctrlmif_rx_err_log_en_0_rxramdataparityerr_m(),
|
||||
ioctrlmif_rx_err_log_en_0_rxramdataparityerr_f(1));
|
||||
tmp = set_field(tmp,
|
||||
ioctrlmif_rx_err_log_en_0_rxramhdrparityerr_m(),
|
||||
ioctrlmif_rx_err_log_en_0_rxramhdrparityerr_f(1));
|
||||
MIF_REG_WR32(g, link_id, ioctrlmif_rx_err_log_en_0_r(), tmp);
|
||||
|
||||
/* Tx Error */
|
||||
/* Containment (make fatal) */
|
||||
tmp = 0;
|
||||
tmp = set_field(tmp,
|
||||
ioctrlmif_tx_err_contain_en_0_txramdataparityerr_m(),
|
||||
ioctrlmif_tx_err_contain_en_0_txramdataparityerr__prod_f());
|
||||
tmp = set_field(tmp,
|
||||
ioctrlmif_tx_err_contain_en_0_txramhdrparityerr_m(),
|
||||
ioctrlmif_tx_err_contain_en_0_txramhdrparityerr__prod_f());
|
||||
MIF_REG_WR32(g, link_id, ioctrlmif_tx_err_contain_en_0_r(), tmp);
|
||||
|
||||
/* Logging (do not ignore) */
|
||||
tmp = 0;
|
||||
tmp = set_field(tmp, ioctrlmif_tx_err_log_en_0_txramdataparityerr_m(),
|
||||
ioctrlmif_tx_err_log_en_0_txramdataparityerr_f(1));
|
||||
tmp = set_field(tmp, ioctrlmif_tx_err_log_en_0_txramhdrparityerr_m(),
|
||||
ioctrlmif_tx_err_log_en_0_txramhdrparityerr_f(1));
|
||||
MIF_REG_WR32(g, link_id, ioctrlmif_tx_err_log_en_0_r(), tmp);
|
||||
|
||||
/* Credit release */
|
||||
MIF_REG_WR32(g, link_id, ioctrlmif_rx_ctrl_buffer_ready_r(), 0x1);
|
||||
MIF_REG_WR32(g, link_id, ioctrlmif_tx_ctrl_buffer_ready_r(), 0x1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Enable per-link MIF interrupts
|
||||
*/
|
||||
static void gv100_nvlink_mif_intr_enable(struct gk20a *g, u32 link_id,
|
||||
bool enable)
|
||||
{
|
||||
u32 reg0 = 0, reg1 = 0;
|
||||
|
||||
if (enable) {
|
||||
reg0 = set_field(reg0,
|
||||
ioctrlmif_rx_err_report_en_0_rxramdataparityerr_m(),
|
||||
ioctrlmif_rx_err_report_en_0_rxramdataparityerr_f(1));
|
||||
reg0 = set_field(reg0,
|
||||
ioctrlmif_rx_err_report_en_0_rxramhdrparityerr_m(),
|
||||
ioctrlmif_rx_err_report_en_0_rxramhdrparityerr_f(1));
|
||||
reg1 = set_field(reg1,
|
||||
ioctrlmif_tx_err_report_en_0_txramdataparityerr_m(),
|
||||
ioctrlmif_tx_err_report_en_0_txramdataparityerr_f(1));
|
||||
reg1 = set_field(reg1,
|
||||
ioctrlmif_tx_err_report_en_0_txramhdrparityerr_m(),
|
||||
ioctrlmif_tx_err_report_en_0_txramhdrparityerr_f(1));
|
||||
}
|
||||
|
||||
MIF_REG_WR32(g, link_id, ioctrlmif_rx_err_report_en_0_r(), reg0);
|
||||
MIF_REG_WR32(g, link_id, ioctrlmif_tx_err_report_en_0_r(), reg1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Handle per-link MIF interrupts
|
||||
*/
|
||||
static void gv100_nvlink_mif_isr(struct gk20a *g, u32 link_id)
|
||||
{
|
||||
u32 intr, fatal_mask = 0;
|
||||
|
||||
/* RX Errors */
|
||||
intr = MIF_REG_RD32(g, link_id, ioctrlmif_rx_err_status_0_r());
|
||||
if (intr) {
|
||||
if (intr & ioctrlmif_rx_err_status_0_rxramdataparityerr_m()) {
|
||||
nvgpu_err(g, "Fatal MIF RX interrupt hit on link %d: RAM_DATA_PARITY",
|
||||
link_id);
|
||||
fatal_mask |= ioctrlmif_rx_err_status_0_rxramdataparityerr_f(1);
|
||||
}
|
||||
if (intr & ioctrlmif_rx_err_status_0_rxramhdrparityerr_m()) {
|
||||
nvgpu_err(g, "Fatal MIF RX interrupt hit on link %d: RAM_HDR_PARITY",
|
||||
link_id);
|
||||
fatal_mask |= ioctrlmif_rx_err_status_0_rxramhdrparityerr_f(1);
|
||||
}
|
||||
|
||||
if (fatal_mask) {
|
||||
MIF_REG_WR32(g, link_id, ioctrlmif_rx_err_first_0_r(),
|
||||
fatal_mask);
|
||||
MIF_REG_WR32(g, link_id, ioctrlmif_rx_err_status_0_r(),
|
||||
fatal_mask);
|
||||
}
|
||||
}
|
||||
|
||||
/* TX Errors */
|
||||
fatal_mask = 0;
|
||||
intr = MIF_REG_RD32(g, link_id, ioctrlmif_tx_err_status_0_r());
|
||||
if (intr) {
|
||||
if (intr & ioctrlmif_tx_err_status_0_txramdataparityerr_m()) {
|
||||
nvgpu_err(g, "Fatal MIF TX interrupt hit on link %d: RAM_DATA_PARITY",
|
||||
link_id);
|
||||
fatal_mask |= ioctrlmif_tx_err_status_0_txramdataparityerr_f(1);
|
||||
}
|
||||
if (intr & ioctrlmif_tx_err_status_0_txramhdrparityerr_m()) {
|
||||
nvgpu_err(g, "Fatal MIF TX interrupt hit on link %d: RAM_HDR_PARITY",
|
||||
link_id);
|
||||
fatal_mask |= ioctrlmif_tx_err_status_0_txramhdrparityerr_f(1);
|
||||
}
|
||||
|
||||
if (fatal_mask) {
|
||||
MIF_REG_WR32(g, link_id, ioctrlmif_tx_err_first_0_r(),
|
||||
fatal_mask);
|
||||
MIF_REG_WR32(g, link_id, ioctrlmif_tx_err_status_0_r(),
|
||||
fatal_mask);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
*-----------------------------------------------------------------------------*
|
||||
* NVLIPT API
|
||||
*-----------------------------------------------------------------------------*
|
||||
*/
|
||||
|
||||
/*
|
||||
* NVLIPT IP initialization (per-link)
|
||||
*/
|
||||
static void gv100_nvlink_initialize_nvlipt(struct gk20a *g, u32 link_id)
|
||||
{
|
||||
/* init persistent scratch registers */
|
||||
IPT_REG_WR32(g, nvlipt_scratch_cold_r(),
|
||||
nvlipt_scratch_cold_data_init_v());
|
||||
|
||||
/*
|
||||
* AErr settings (top level)
|
||||
*/
|
||||
|
||||
/* UC first and status reg (W1C) need to be cleared byt arch */
|
||||
IPT_REG_WR32(g, IPT_ERR_UC_FIRST_LINK(link_id), IPT_ERR_UC_ACTIVE_BITS);
|
||||
IPT_REG_WR32(g, IPT_ERR_UC_STATUS_LINK(link_id), IPT_ERR_UC_ACTIVE_BITS);
|
||||
|
||||
/* AErr Severity */
|
||||
IPT_REG_WR32(g, IPT_ERR_UC_SEVERITY_LINK(link_id), IPT_ERR_UC_ACTIVE_BITS);
|
||||
|
||||
/* AErr Control settings */
|
||||
IPT_REG_WR32(g, IPT_ERR_CONTROL_LINK(link_id),
|
||||
nvlipt_err_control_link0_fatalenable_f(1) |
|
||||
nvlipt_err_control_link0_nonfatalenable_f(1));
|
||||
}
|
||||
|
||||
/*
|
||||
* Enable NVLIPT interrupts
|
||||
*/
|
||||
static void gv100_nvlink_nvlipt_intr_enable(struct gk20a *g, u32 link_id,
|
||||
bool enable)
|
||||
{
|
||||
u32 val = 0;
|
||||
u32 reg;
|
||||
|
||||
if (enable)
|
||||
val = 1;
|
||||
|
||||
reg = IPT_REG_RD32(g, IPT_INTR_CONTROL_LINK(link_id));
|
||||
reg = set_field(reg, nvlipt_intr_control_link0_stallenable_m(),
|
||||
nvlipt_intr_control_link0_stallenable_f(val));
|
||||
reg = set_field(reg, nvlipt_intr_control_link0_nostallenable_m(),
|
||||
nvlipt_intr_control_link0_nostallenable_f(val));
|
||||
IPT_REG_WR32(g, IPT_INTR_CONTROL_LINK(link_id), reg);
|
||||
}
|
||||
|
||||
/*
|
||||
* Per-link NVLIPT ISR handler
|
||||
*/
|
||||
static bool gv100_nvlink_nvlipt_isr(struct gk20a *g, u32 link_id)
|
||||
{
|
||||
/*
|
||||
* Interrupt handling happens in leaf handlers. Assume all interrupts
|
||||
* were handled and clear roll ups/
|
||||
*/
|
||||
IPT_REG_WR32(g, IPT_ERR_UC_FIRST_LINK(link_id), IPT_ERR_UC_ACTIVE_BITS);
|
||||
IPT_REG_WR32(g, IPT_ERR_UC_STATUS_LINK(link_id), IPT_ERR_UC_ACTIVE_BITS);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
*******************************************************************************
|
||||
* Interrupt handling functions *
|
||||
*******************************************************************************
|
||||
*/
|
||||
|
||||
/*
|
||||
* Enable common interrupts
|
||||
*/
|
||||
static void gv100_nvlink_common_intr_enable(struct gk20a *g,
|
||||
unsigned long mask)
|
||||
{
|
||||
u32 reg, i;
|
||||
|
||||
/* Init IOCTRL */
|
||||
for_each_set_bit(i, &mask, 32) {
|
||||
reg = IOCTRL_REG_RD32(g, ioctrl_link_intr_0_mask_r(i));
|
||||
reg |= (ioctrl_link_intr_0_mask_fatal_f(1) |
|
||||
ioctrl_link_intr_0_mask_nonfatal_f(1) |
|
||||
ioctrl_link_intr_0_mask_correctable_f(1) |
|
||||
ioctrl_link_intr_0_mask_intra_f(1));
|
||||
IOCTRL_REG_WR32(g, ioctrl_link_intr_0_mask_r(i), reg);
|
||||
}
|
||||
|
||||
reg = IOCTRL_REG_RD32(g, ioctrl_common_intr_0_mask_r());
|
||||
reg |= (ioctrl_common_intr_0_mask_fatal_f(1) |
|
||||
ioctrl_common_intr_0_mask_nonfatal_f(1) |
|
||||
ioctrl_common_intr_0_mask_correctable_f(1) |
|
||||
ioctrl_common_intr_0_mask_intra_f(1));
|
||||
IOCTRL_REG_WR32(g, ioctrl_common_intr_0_mask_r(), reg);
|
||||
|
||||
/* Init NVLIPT */
|
||||
IPT_REG_WR32(g, nvlipt_intr_control_common_r(),
|
||||
nvlipt_intr_control_common_stallenable_f(1) |
|
||||
nvlipt_intr_control_common_nonstallenable_f(1));
|
||||
}
|
||||
|
||||
/*
|
||||
* Enable link specific interrupts (top-level)
|
||||
*/
|
||||
static void gv100_nvlink_enable_link_intr(struct gk20a *g, u32 link_id,
|
||||
bool enable)
|
||||
{
|
||||
gv100_nvlink_minion_link_intr_enable(g, link_id, enable);
|
||||
gv100_nvlink_dlpl_intr_enable(g, link_id, enable);
|
||||
gv100_nvlink_tlc_intr_enable(g, link_id, enable);
|
||||
gv100_nvlink_mif_intr_enable(g, link_id, enable);
|
||||
gv100_nvlink_nvlipt_intr_enable(g, link_id, enable);
|
||||
}
|
||||
|
||||
/*
|
||||
* Top level interrupt handler
|
||||
*/
|
||||
int gv100_nvlink_isr(struct gk20a *g)
|
||||
{
|
||||
unsigned long links;
|
||||
u32 link_id;
|
||||
|
||||
links = ioctrl_top_intr_0_status_link_v(
|
||||
IOCTRL_REG_RD32(g, ioctrl_top_intr_0_status_r()));
|
||||
|
||||
links &= g->nvlink.enabled_links;
|
||||
/* As per ARCH minion must be serviced first */
|
||||
gv100_nvlink_minion_isr(g);
|
||||
|
||||
for_each_set_bit(link_id, &links, 32) {
|
||||
/* Cache error logs from TLC, DL handler may clear them */
|
||||
gv100_nvlink_tlc_get_intr(g, link_id);
|
||||
gv100_nvlink_dlpl_isr(g, link_id);
|
||||
gv100_nvlink_tlc_isr(g, link_id);
|
||||
gv100_nvlink_mif_isr(g, link_id);
|
||||
|
||||
/* NVLIPT is top-level. Do it last */
|
||||
gv100_nvlink_nvlipt_isr(g, link_id);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
* Helper functions *
|
||||
*******************************************************************************
|
||||
@@ -1144,7 +445,7 @@ static u32 __gv100_nvlink_state_load_hal(struct gk20a *g)
|
||||
{
|
||||
unsigned long discovered = g->nvlink.discovered_links;
|
||||
|
||||
gv100_nvlink_common_intr_enable(g, discovered);
|
||||
g->ops.nvlink.intr.common_intr_enable(g, discovered);
|
||||
return gv100_nvlink_minion_load(g);
|
||||
}
|
||||
|
||||
@@ -1334,9 +635,8 @@ static int gv100_nvlink_enable_links_post_top(struct gk20a *g, u32 links)
|
||||
for_each_set_bit(link_id, &enabled_links, 32) {
|
||||
if (g->ops.nvlink.set_sw_war)
|
||||
g->ops.nvlink.set_sw_war(g, link_id);
|
||||
gv100_nvlink_initialize_tlc(g, link_id);
|
||||
gv100_nvlink_initialize_nvlipt(g, link_id);
|
||||
gv100_nvlink_enable_link_intr(g, link_id, true);
|
||||
g->ops.nvlink.intr.init_nvlipt_intr(g, link_id);
|
||||
g->ops.nvlink.intr.enable_link_intr(g, link_id, true);
|
||||
|
||||
g->nvlink.initialized_links |= BIT(link_id);
|
||||
};
|
||||
@@ -1900,8 +1200,8 @@ int gv100_nvlink_interface_init(struct gk20a *g)
|
||||
int err;
|
||||
|
||||
for_each_set_bit(link_id, &mask, 32) {
|
||||
gv100_nvlink_initialize_mif(g, link_id);
|
||||
gv100_nvlink_mif_intr_enable(g, link_id, true);
|
||||
g->ops.nvlink.intr.init_mif_intr(g, link_id);
|
||||
g->ops.nvlink.intr.mif_intr_enable(g, link_id, true);
|
||||
}
|
||||
|
||||
err = g->ops.fb.init_nvlink(g);
|
||||
@@ -2057,7 +1357,7 @@ int gv100_nvlink_link_set_mode(struct gk20a *g, u32 link_id, u32 mode)
|
||||
return -EPERM;
|
||||
case nvgpu_nvlink_link_disable_err_detect:
|
||||
/* Disable Link interrupts */
|
||||
gv100_nvlink_dlpl_intr_enable(g, link_id, false);
|
||||
g->ops.nvlink.intr.dlpl_intr_enable(g, link_id, false);
|
||||
break;
|
||||
case nvgpu_nvlink_link_lane_disable:
|
||||
err = gv100_nvlink_minion_lane_disable(g, link_id, true);
|
||||
|
||||
@@ -30,7 +30,6 @@ struct gk20a;
|
||||
int gv100_nvlink_discover_ioctrl(struct gk20a *g);
|
||||
int gv100_nvlink_discover_link(struct gk20a *g);
|
||||
int gv100_nvlink_init(struct gk20a *g);
|
||||
int gv100_nvlink_isr(struct gk20a *g);
|
||||
int gv100_nvlink_minion_send_command(struct gk20a *g, u32 link_id, u32 command,
|
||||
u32 scratch_0, bool sync);
|
||||
int gv100_nvlink_setup_pll(struct gk20a *g, unsigned long link_mask);
|
||||
|
||||
@@ -69,6 +69,7 @@
|
||||
#include "common/falcon/falcon_gv100.h"
|
||||
#include "common/nvdec/nvdec_gp106.h"
|
||||
#include "common/nvlink/init/device_reginit_gv100.h"
|
||||
#include "common/nvlink/intr_and_err_handling_gv100.h"
|
||||
#include "common/nvlink/nvlink_gv100.h"
|
||||
#include "common/nvlink/nvlink_tu104.h"
|
||||
#include "common/pmu/perf/perf_gv100.h"
|
||||
@@ -1061,7 +1062,6 @@ static const struct gpu_ops gv100_ops = {
|
||||
.discover_ioctrl = gv100_nvlink_discover_ioctrl,
|
||||
.discover_link = gv100_nvlink_discover_link,
|
||||
.init = gv100_nvlink_init,
|
||||
.isr = gv100_nvlink_isr,
|
||||
.rxdet = NULL,
|
||||
.setup_pll = gv100_nvlink_setup_pll,
|
||||
.minion_data_ready_en = gv100_nvlink_minion_data_ready_en,
|
||||
@@ -1082,6 +1082,19 @@ static const struct gpu_ops gv100_ops = {
|
||||
.shutdown = gv100_nvlink_shutdown,
|
||||
.early_init = gv100_nvlink_early_init,
|
||||
.speed_config = gv100_nvlink_speed_config,
|
||||
.intr = {
|
||||
.minion_clear_interrupts =
|
||||
gv100_nvlink_minion_clear_interrupts,
|
||||
.init_minion_intr = gv100_nvlink_init_minion_intr,
|
||||
.minion_falcon_isr = gv100_nvlink_minion_falcon_isr,
|
||||
.common_intr_enable = gv100_nvlink_common_intr_enable,
|
||||
.init_nvlipt_intr = gv100_nvlink_init_nvlipt_intr,
|
||||
.enable_link_intr = gv100_nvlink_enable_link_intr,
|
||||
.init_mif_intr = gv100_nvlink_init_mif_intr,
|
||||
.mif_intr_enable = gv100_nvlink_mif_intr_enable,
|
||||
.dlpl_intr_enable = gv100_nvlink_dlpl_intr_enable,
|
||||
.isr = gv100_nvlink_isr,
|
||||
}
|
||||
},
|
||||
#endif
|
||||
.top = {
|
||||
|
||||
@@ -1474,7 +1474,6 @@ struct gpu_ops {
|
||||
int (*init)(struct gk20a *g);
|
||||
int (*discover_ioctrl)(struct gk20a *g);
|
||||
int (*discover_link)(struct gk20a *g);
|
||||
int (*isr)(struct gk20a *g);
|
||||
int (*rxdet)(struct gk20a *g, u32 link_id);
|
||||
int (*setup_pll)(struct gk20a *g, unsigned long link_mask);
|
||||
int (*minion_data_ready_en)(struct gk20a *g,
|
||||
@@ -1498,6 +1497,22 @@ struct gpu_ops {
|
||||
int (*shutdown)(struct gk20a *g);
|
||||
int (*early_init)(struct gk20a *g);
|
||||
int (*speed_config)(struct gk20a *g);
|
||||
struct {
|
||||
void (*minion_clear_interrupts)(struct gk20a *g);
|
||||
void (*init_minion_intr)(struct gk20a *g);
|
||||
bool (*minion_falcon_isr)(struct gk20a *g);
|
||||
void (*common_intr_enable)(struct gk20a *g,
|
||||
unsigned long mask);
|
||||
void (*init_nvlipt_intr)(struct gk20a *g, u32 link_id);
|
||||
void (*enable_link_intr)(struct gk20a *g, u32 link_id,
|
||||
bool enable);
|
||||
void (*init_mif_intr)(struct gk20a *g, u32 link_id);
|
||||
void (*mif_intr_enable)(struct gk20a *g, u32 link_id,
|
||||
bool enable);
|
||||
void (*dlpl_intr_enable)(struct gk20a *g, u32 link_id,
|
||||
bool enable);
|
||||
int (*isr)(struct gk20a *g);
|
||||
} intr;
|
||||
} nvlink;
|
||||
struct {
|
||||
u32 (*get_nvhsclk_ctrl_e_clk_nvl)(struct gk20a *g);
|
||||
|
||||
@@ -75,6 +75,7 @@
|
||||
#include "common/top/top_gm20b.h"
|
||||
#include "common/top/top_gp10b.h"
|
||||
#include "common/nvlink/init/device_reginit_gv100.h"
|
||||
#include "common/nvlink/intr_and_err_handling_gv100.h"
|
||||
#include "common/nvlink/nvlink_gv100.h"
|
||||
#include "common/nvlink/nvlink_tu104.h"
|
||||
#include "common/sync/syncpt_cmdbuf_gv11b.h"
|
||||
@@ -1099,7 +1100,6 @@ static const struct gpu_ops tu104_ops = {
|
||||
.discover_ioctrl = gv100_nvlink_discover_ioctrl,
|
||||
.discover_link = gv100_nvlink_discover_link,
|
||||
.init = gv100_nvlink_init,
|
||||
.isr = gv100_nvlink_isr,
|
||||
.rxdet = tu104_nvlink_rxdet,
|
||||
.setup_pll = tu104_nvlink_setup_pll,
|
||||
.minion_data_ready_en = tu104_nvlink_minion_data_ready_en,
|
||||
@@ -1119,6 +1119,19 @@ static const struct gpu_ops tu104_ops = {
|
||||
.shutdown = gv100_nvlink_shutdown,
|
||||
.early_init = gv100_nvlink_early_init,
|
||||
.speed_config = tu104_nvlink_speed_config,
|
||||
.intr = {
|
||||
.minion_clear_interrupts =
|
||||
gv100_nvlink_minion_clear_interrupts,
|
||||
.init_minion_intr = gv100_nvlink_init_minion_intr,
|
||||
.minion_falcon_isr = gv100_nvlink_minion_falcon_isr,
|
||||
.common_intr_enable = gv100_nvlink_common_intr_enable,
|
||||
.init_nvlipt_intr = gv100_nvlink_init_nvlipt_intr,
|
||||
.enable_link_intr = gv100_nvlink_enable_link_intr,
|
||||
.init_mif_intr = gv100_nvlink_init_mif_intr,
|
||||
.mif_intr_enable = gv100_nvlink_mif_intr_enable,
|
||||
.dlpl_intr_enable = gv100_nvlink_dlpl_intr_enable,
|
||||
.isr = gv100_nvlink_isr,
|
||||
}
|
||||
},
|
||||
#endif
|
||||
.acr = {
|
||||
|
||||
Reference in New Issue
Block a user