diff --git a/arch/nvgpu-common.yaml b/arch/nvgpu-common.yaml index 6372e6737..e4089c457 100644 --- a/arch/nvgpu-common.yaml +++ b/arch/nvgpu-common.yaml @@ -143,8 +143,6 @@ nvlink: common/nvlink/init/device_reginit.c, common/nvlink/init/device_reginit_gv100.c, common/nvlink/init/device_reginit_gv100.h, - common/nvlink/intr_and_err_handling_gv100.c, - common/nvlink/intr_and_err_handling_gv100.h, include/nvgpu/nvlink.h, include/nvgpu/nvlink_device_reginit.h, include/nvgpu/nvlink_link_mode_transitions.h, diff --git a/arch/nvgpu-hal-new.yaml b/arch/nvgpu-hal-new.yaml index 94fcf26b4..a92973090 100644 --- a/arch/nvgpu-hal-new.yaml +++ b/arch/nvgpu-hal-new.yaml @@ -682,6 +682,8 @@ nvlink: hal/nvlink/link_mode_transitions_gv100.h, hal/nvlink/link_mode_transitions_tu104.c, hal/nvlink/link_mode_transitions_tu104.h, + hal/nvlink/intr_and_err_handling_tu104.c, + hal/nvlink/intr_and_err_handling_tu104.h, hal/nvlink/minion_gv100.c, hal/nvlink/minion_gv100.h, hal/nvlink/minion_tu104.c, diff --git a/drivers/gpu/nvgpu/Makefile b/drivers/gpu/nvgpu/Makefile index acc0765f5..8b5ae90ff 100644 --- a/drivers/gpu/nvgpu/Makefile +++ b/drivers/gpu/nvgpu/Makefile @@ -158,7 +158,6 @@ nvgpu-y += \ common/nvlink/probe.o \ common/nvlink/init/device_reginit.o \ common/nvlink/init/device_reginit_gv100.o \ - common/nvlink/intr_and_err_handling_gv100.o \ common/nvlink/minion.o \ common/nvlink/link_mode_transitions.o \ common/nvlink/nvlink_gv100.o \ @@ -168,6 +167,7 @@ nvgpu-y += \ os/linux/nvlink.o \ hal/nvlink/minion_gv100.o \ hal/nvlink/minion_tu104.o \ + hal/nvlink/intr_and_err_handling_tu104.o \ hal/nvlink/link_mode_transitions_gv100.o \ hal/nvlink/link_mode_transitions_tu104.o diff --git a/drivers/gpu/nvgpu/Makefile.sources b/drivers/gpu/nvgpu/Makefile.sources index a57409d1a..c01cfd51f 100644 --- a/drivers/gpu/nvgpu/Makefile.sources +++ b/drivers/gpu/nvgpu/Makefile.sources @@ -572,7 +572,6 @@ srcs += common/vbios/nvlink_bios.c \ common/nvlink/probe.c \ common/nvlink/init/device_reginit.c \ common/nvlink/init/device_reginit_gv100.c \ - common/nvlink/intr_and_err_handling_gv100.c \ common/nvlink/minion.c \ common/nvlink/link_mode_transitions.c \ common/nvlink/nvlink_gv100.c \ @@ -580,6 +579,7 @@ srcs += common/vbios/nvlink_bios.c \ common/nvlink/nvlink.c \ hal/nvlink/minion_gv100.c \ hal/nvlink/minion_tu104.c \ + hal/nvlink/intr_and_err_handling_tu104.c \ hal/nvlink/link_mode_transitions_gv100.c \ hal/nvlink/link_mode_transitions_tu104.c endif diff --git a/drivers/gpu/nvgpu/common/nvlink/intr_and_err_handling_gv100.c b/drivers/gpu/nvgpu/common/nvlink/intr_and_err_handling_gv100.c deleted file mode 100644 index ab3dae423..000000000 --- a/drivers/gpu/nvgpu/common/nvlink/intr_and_err_handling_gv100.c +++ /dev/null @@ -1,511 +0,0 @@ -/* - * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifdef CONFIG_NVGPU_NVLINK - -#include -#include -#include "intr_and_err_handling_gv100.h" - -#include -#include -#include -#include -#include - -/* - * The manuals are missing some useful defines - * we add them for now - */ -#define IPT_INTR_CONTROL_LINK(i) (nvlipt_intr_control_link0_r() + (i)*4U) -#define IPT_ERR_UC_STATUS_LINK(i) (nvlipt_err_uc_status_link0_r() + (i)*36U) -#define IPT_ERR_UC_MASK_LINK(i) (nvlipt_err_uc_mask_link0_r() + (i)*36U) -#define IPT_ERR_UC_SEVERITY_LINK(i) (nvlipt_err_uc_severity_link0_r() + (i)*36U) -#define IPT_ERR_UC_FIRST_LINK(i) (nvlipt_err_uc_first_link0_r() + (i)*36U) -#define IPT_ERR_UC_ADVISORY_LINK(i) (nvlipt_err_uc_advisory_link0_r() + (i)*36U) -#define IPT_ERR_C_STATUS_LINK(i) (nvlipt_err_c_status_link0_r() + (i)*36U) -#define IPT_ERR_C_MASK_LINK(i) (nvlipt_err_c_mask_link0_r() + (i)*36U) -#define IPT_ERR_C_FIRST_LINK(i) (nvlipt_err_c_first_link0_r() + (i)*36U) -#define IPT_ERR_CONTROL_LINK(i) (nvlipt_err_control_link0_r() + (i)*4U) - -#define IPT_ERR_UC_ACTIVE_BITS (nvlipt_err_uc_status_link0_dlprotocol_f(1) | \ - nvlipt_err_uc_status_link0_datapoisoned_f(1) | \ - nvlipt_err_uc_status_link0_flowcontrol_f(1) | \ - nvlipt_err_uc_status_link0_responsetimeout_f(1) | \ - nvlipt_err_uc_status_link0_targeterror_f(1) | \ - nvlipt_err_uc_status_link0_unexpectedresponse_f(1) | \ - nvlipt_err_uc_status_link0_receiveroverflow_f(1) | \ - nvlipt_err_uc_status_link0_malformedpacket_f(1) | \ - nvlipt_err_uc_status_link0_stompedpacketreceived_f(1) | \ - nvlipt_err_uc_status_link0_unsupportedrequest_f(1) | \ - nvlipt_err_uc_status_link0_ucinternal_f(1)) - -/* - * Init TLC per link interrupts - */ -static void gv100_nvlink_tlc_intr_enable(struct gk20a *g, u32 link_id, - bool enable) -{ - u32 reg_rx0 = 0, reg_rx1 = 0, reg_tx = 0; - - if (enable) { - /* Set PROD values */ - reg_rx0 = 0x0FFFFFF; - reg_rx1 = 0x03FFFFF; - reg_tx = 0x1FFFFFF; - } - - TLC_REG_WR32(g, link_id, nvtlc_rx_err_report_en_0_r(), reg_rx0); - TLC_REG_WR32(g, link_id, nvtlc_rx_err_report_en_1_r(), reg_rx1); - TLC_REG_WR32(g, link_id, nvtlc_tx_err_report_en_0_r(), reg_tx); -} - -/* - * helper function to get TLC intr status in common structure - */ -static void gv100_nvlink_tlc_get_intr(struct gk20a *g, u32 link_id) -{ - g->nvlink.tlc_rx_err_status_0[link_id] = - TLC_REG_RD32(g, link_id, nvtlc_rx_err_status_0_r()); - g->nvlink.tlc_rx_err_status_1[link_id] = - TLC_REG_RD32(g, link_id, nvtlc_rx_err_status_1_r()); - g->nvlink.tlc_tx_err_status_0[link_id] = - TLC_REG_RD32(g, link_id, nvtlc_tx_err_status_0_r()); -} - -/* - * Interrupt routine handler for TLC - */ -static void gv100_nvlink_tlc_isr(struct gk20a *g, u32 link_id) -{ - - if (g->nvlink.tlc_rx_err_status_0[link_id] != 0U) { - /* All TLC RX 0 errors are fatal. Notify and disable */ - nvgpu_err(g, "Fatal TLC RX 0 interrupt on link %d mask: %x", - link_id, g->nvlink.tlc_rx_err_status_0[link_id]); - TLC_REG_WR32(g, link_id, nvtlc_rx_err_first_0_r(), - g->nvlink.tlc_rx_err_status_0[link_id]); - TLC_REG_WR32(g, link_id, nvtlc_rx_err_status_0_r(), - g->nvlink.tlc_rx_err_status_0[link_id]); - } - if (g->nvlink.tlc_rx_err_status_1[link_id] != 0U) { - /* All TLC RX 1 errors are fatal. Notify and disable */ - nvgpu_err(g, "Fatal TLC RX 1 interrupt on link %d mask: %x", - link_id, g->nvlink.tlc_rx_err_status_1[link_id]); - TLC_REG_WR32(g, link_id, nvtlc_rx_err_first_1_r(), - g->nvlink.tlc_rx_err_status_1[link_id]); - TLC_REG_WR32(g, link_id, nvtlc_rx_err_status_1_r(), - g->nvlink.tlc_rx_err_status_1[link_id]); - } - if (g->nvlink.tlc_tx_err_status_0[link_id] != 0U) { - /* All TLC TX 0 errors are fatal. Notify and disable */ - nvgpu_err(g, "Fatal TLC TX 0 interrupt on link %d mask: %x", - link_id, g->nvlink.tlc_tx_err_status_0[link_id]); - TLC_REG_WR32(g, link_id, nvtlc_tx_err_first_0_r(), - g->nvlink.tlc_tx_err_status_0[link_id]); - TLC_REG_WR32(g, link_id, nvtlc_tx_err_status_0_r(), - g->nvlink.tlc_tx_err_status_0[link_id]); - } -} - -/* - * DLPL interrupt enable helper - */ -void gv100_nvlink_dlpl_intr_enable(struct gk20a *g, u32 link_id, bool enable) -{ - u32 reg = 0; - - /* Always disable nonstall tree */ - DLPL_REG_WR32(g, link_id, nvl_intr_nonstall_en_r(), 0); - - if (!enable) - { - DLPL_REG_WR32(g, link_id, nvl_intr_stall_en_r(), 0); - return; - } - - /* Clear interrupt register to get rid of stale state (W1C) */ - DLPL_REG_WR32(g, link_id, nvl_intr_r(), 0xffffffffU); - DLPL_REG_WR32(g, link_id, nvl_intr_sw2_r(), 0xffffffffU); - - reg = nvl_intr_stall_en_tx_recovery_long_enable_f() | - nvl_intr_stall_en_tx_fault_ram_enable_f() | - nvl_intr_stall_en_tx_fault_interface_enable_f() | - nvl_intr_stall_en_tx_fault_sublink_change_enable_f() | - nvl_intr_stall_en_rx_fault_sublink_change_enable_f() | - nvl_intr_stall_en_rx_fault_dl_protocol_enable_f() | - nvl_intr_stall_en_ltssm_fault_enable_f(); - - DLPL_REG_WR32(g, link_id, nvl_intr_stall_en_r(), reg); - - /* Configure error threshold */ - reg = DLPL_REG_RD32(g, link_id, nvl_sl1_error_rate_ctrl_r()); - reg = set_field(reg, nvl_sl1_error_rate_ctrl_short_threshold_man_m(), - nvl_sl1_error_rate_ctrl_short_threshold_man_f(0x2)); - reg = set_field(reg, nvl_sl1_error_rate_ctrl_long_threshold_man_m(), - nvl_sl1_error_rate_ctrl_long_threshold_man_f(0x2)); - DLPL_REG_WR32(g, link_id, nvl_sl1_error_rate_ctrl_r(), reg); -} - -/* - * DLPL per-link isr - */ - -#define DLPL_NON_FATAL_INTR_MASK (nvl_intr_tx_replay_f(1) | \ - nvl_intr_tx_recovery_short_f(1) | \ - nvl_intr_tx_recovery_long_f(1) | \ - nvl_intr_rx_short_error_rate_f(1) | \ - nvl_intr_rx_long_error_rate_f(1) | \ - nvl_intr_rx_ila_trigger_f(1) | \ - nvl_intr_ltssm_protocol_f(1)) - -#define DLPL_FATAL_INTR_MASK ( nvl_intr_ltssm_fault_f(1) | \ - nvl_intr_rx_fault_dl_protocol_f(1) | \ - nvl_intr_rx_fault_sublink_change_f(1) | \ - nvl_intr_tx_fault_sublink_change_f(1) | \ - nvl_intr_tx_fault_interface_f(1) | \ - nvl_intr_tx_fault_ram_f(1)) - -static void gv100_nvlink_dlpl_isr(struct gk20a *g, u32 link_id) -{ - u32 non_fatal_mask = 0; - u32 fatal_mask = 0; - u32 intr = 0; - bool retrain = false; - int err; - - intr = DLPL_REG_RD32(g, link_id, nvl_intr_r()) & - DLPL_REG_RD32(g, link_id, nvl_intr_stall_en_r()); - - if (intr == 0U) { - return; - } - - fatal_mask = intr & DLPL_FATAL_INTR_MASK; - non_fatal_mask = intr & DLPL_NON_FATAL_INTR_MASK; - - nvgpu_err(g, " handling DLPL %d isr. Fatal: %x non-Fatal: %x", - link_id, fatal_mask, non_fatal_mask); - - /* Check if we are not handling an interupt */ - if (((fatal_mask | non_fatal_mask) & ~intr) != 0U) { - nvgpu_err(g, "Unable to service DLPL intr on link %d", link_id); - } - - if ((non_fatal_mask & nvl_intr_tx_recovery_long_f(1)) != 0U) { - retrain = true; - } - if (fatal_mask != 0U) { - retrain = false; - } - - if (retrain) { - err = nvgpu_nvlink_train(g, link_id, false); - if (err != 0) { - nvgpu_err(g, "failed to retrain link %d", link_id); - } - } - - /* Clear interrupts */ - DLPL_REG_WR32(g, link_id, nvl_intr_r(), (non_fatal_mask | fatal_mask)); - DLPL_REG_WR32(g, link_id, nvl_intr_sw2_r(), 0xffffffffU); -} - -/* - * Initialize MIF API with PROD settings - */ -void gv100_nvlink_init_mif_intr(struct gk20a *g, u32 link_id) -{ - u32 tmp; - - /* Enable MIF RX error */ - - /* Containment (make fatal) */ - tmp = 0; - tmp = set_field(tmp, - ioctrlmif_rx_err_contain_en_0_rxramdataparityerr_m(), - ioctrlmif_rx_err_contain_en_0_rxramdataparityerr__prod_f()); - tmp = set_field(tmp, - ioctrlmif_rx_err_contain_en_0_rxramhdrparityerr_m(), - ioctrlmif_rx_err_contain_en_0_rxramhdrparityerr__prod_f()); - MIF_REG_WR32(g, link_id, ioctrlmif_rx_err_contain_en_0_r(), tmp); - - /* Logging (do not ignore) */ - tmp = 0; - tmp = set_field(tmp, - ioctrlmif_rx_err_log_en_0_rxramdataparityerr_m(), - ioctrlmif_rx_err_log_en_0_rxramdataparityerr_f(1)); - tmp = set_field(tmp, - ioctrlmif_rx_err_log_en_0_rxramhdrparityerr_m(), - ioctrlmif_rx_err_log_en_0_rxramhdrparityerr_f(1)); - MIF_REG_WR32(g, link_id, ioctrlmif_rx_err_log_en_0_r(), tmp); - - /* Tx Error */ - /* Containment (make fatal) */ - tmp = 0; - tmp = set_field(tmp, - ioctrlmif_tx_err_contain_en_0_txramdataparityerr_m(), - ioctrlmif_tx_err_contain_en_0_txramdataparityerr__prod_f()); - tmp = set_field(tmp, - ioctrlmif_tx_err_contain_en_0_txramhdrparityerr_m(), - ioctrlmif_tx_err_contain_en_0_txramhdrparityerr__prod_f()); - MIF_REG_WR32(g, link_id, ioctrlmif_tx_err_contain_en_0_r(), tmp); - - /* Logging (do not ignore) */ - tmp = 0; - tmp = set_field(tmp, ioctrlmif_tx_err_log_en_0_txramdataparityerr_m(), - ioctrlmif_tx_err_log_en_0_txramdataparityerr_f(1)); - tmp = set_field(tmp, ioctrlmif_tx_err_log_en_0_txramhdrparityerr_m(), - ioctrlmif_tx_err_log_en_0_txramhdrparityerr_f(1)); - MIF_REG_WR32(g, link_id, ioctrlmif_tx_err_log_en_0_r(), tmp); - - /* Credit release */ - MIF_REG_WR32(g, link_id, ioctrlmif_rx_ctrl_buffer_ready_r(), 0x1); - MIF_REG_WR32(g, link_id, ioctrlmif_tx_ctrl_buffer_ready_r(), 0x1); -} - -/* - * Enable per-link MIF interrupts - */ -void gv100_nvlink_mif_intr_enable(struct gk20a *g, u32 link_id, bool enable) -{ - u32 reg0 = 0, reg1 = 0; - - if (enable) { - reg0 = set_field(reg0, - ioctrlmif_rx_err_report_en_0_rxramdataparityerr_m(), - ioctrlmif_rx_err_report_en_0_rxramdataparityerr_f(1)); - reg0 = set_field(reg0, - ioctrlmif_rx_err_report_en_0_rxramhdrparityerr_m(), - ioctrlmif_rx_err_report_en_0_rxramhdrparityerr_f(1)); - reg1 = set_field(reg1, - ioctrlmif_tx_err_report_en_0_txramdataparityerr_m(), - ioctrlmif_tx_err_report_en_0_txramdataparityerr_f(1)); - reg1 = set_field(reg1, - ioctrlmif_tx_err_report_en_0_txramhdrparityerr_m(), - ioctrlmif_tx_err_report_en_0_txramhdrparityerr_f(1)); - } - - MIF_REG_WR32(g, link_id, ioctrlmif_rx_err_report_en_0_r(), reg0); - MIF_REG_WR32(g, link_id, ioctrlmif_tx_err_report_en_0_r(), reg1); -} - -/* - * Handle per-link MIF interrupts - */ -static void gv100_nvlink_mif_isr(struct gk20a *g, u32 link_id) -{ - u32 intr, fatal_mask = 0; - - /* RX Errors */ - intr = MIF_REG_RD32(g, link_id, ioctrlmif_rx_err_status_0_r()); - if (intr != 0U) { - if ((intr & ioctrlmif_rx_err_status_0_rxramdataparityerr_m()) != - 0U) { - nvgpu_err(g, "Fatal MIF RX interrupt hit on link %d: RAM_DATA_PARITY", - link_id); - fatal_mask |= ioctrlmif_rx_err_status_0_rxramdataparityerr_f(1); - } - if ((intr & ioctrlmif_rx_err_status_0_rxramhdrparityerr_m()) != - 0U) { - nvgpu_err(g, "Fatal MIF RX interrupt hit on link %d: RAM_HDR_PARITY", - link_id); - fatal_mask |= ioctrlmif_rx_err_status_0_rxramhdrparityerr_f(1); - } - - if (fatal_mask != 0U) { - MIF_REG_WR32(g, link_id, ioctrlmif_rx_err_first_0_r(), - fatal_mask); - MIF_REG_WR32(g, link_id, ioctrlmif_rx_err_status_0_r(), - fatal_mask); - } - } - - /* TX Errors */ - fatal_mask = 0; - intr = MIF_REG_RD32(g, link_id, ioctrlmif_tx_err_status_0_r()); - if (intr != 0U) { - if ((intr & ioctrlmif_tx_err_status_0_txramdataparityerr_m()) != - 0U) { - nvgpu_err(g, "Fatal MIF TX interrupt hit on link %d: RAM_DATA_PARITY", - link_id); - fatal_mask |= ioctrlmif_tx_err_status_0_txramdataparityerr_f(1); - } - if ((intr & ioctrlmif_tx_err_status_0_txramhdrparityerr_m()) != - 0U) { - nvgpu_err(g, "Fatal MIF TX interrupt hit on link %d: RAM_HDR_PARITY", - link_id); - fatal_mask |= ioctrlmif_tx_err_status_0_txramhdrparityerr_f(1); - } - - if (fatal_mask != 0U) { - MIF_REG_WR32(g, link_id, ioctrlmif_tx_err_first_0_r(), - fatal_mask); - MIF_REG_WR32(g, link_id, ioctrlmif_tx_err_status_0_r(), - fatal_mask); - } - } -} - -/* - * NVLIPT IP initialization (per-link) - */ -void gv100_nvlink_init_nvlipt_intr(struct gk20a *g, u32 link_id) -{ - /* init persistent scratch registers */ - IPT_REG_WR32(g, nvlipt_scratch_cold_r(), - nvlipt_scratch_cold_data_init_v()); - - /* - * AErr settings (top level) - */ - - /* UC first and status reg (W1C) need to be cleared byt arch */ - IPT_REG_WR32(g, IPT_ERR_UC_FIRST_LINK(link_id), IPT_ERR_UC_ACTIVE_BITS); - IPT_REG_WR32(g, IPT_ERR_UC_STATUS_LINK(link_id), IPT_ERR_UC_ACTIVE_BITS); - - /* AErr Severity */ - IPT_REG_WR32(g, IPT_ERR_UC_SEVERITY_LINK(link_id), IPT_ERR_UC_ACTIVE_BITS); - - /* AErr Control settings */ - IPT_REG_WR32(g, IPT_ERR_CONTROL_LINK(link_id), - nvlipt_err_control_link0_fatalenable_f(1) | - nvlipt_err_control_link0_nonfatalenable_f(1)); -} - -/* - * Enable NVLIPT interrupts - */ -static void gv100_nvlink_nvlipt_intr_enable(struct gk20a *g, u32 link_id, - bool enable) -{ - u32 val = 0; - u32 reg; - - if (enable) { - val = 1; - } - - reg = IPT_REG_RD32(g, IPT_INTR_CONTROL_LINK(link_id)); - reg = set_field(reg, nvlipt_intr_control_link0_stallenable_m(), - nvlipt_intr_control_link0_stallenable_f(val)); - reg = set_field(reg, nvlipt_intr_control_link0_nostallenable_m(), - nvlipt_intr_control_link0_nostallenable_f(val)); - IPT_REG_WR32(g, IPT_INTR_CONTROL_LINK(link_id), reg); -} - -/* - * Per-link NVLIPT ISR handler - */ -static void gv100_nvlink_nvlipt_isr(struct gk20a *g, u32 link_id) -{ - /* - * Interrupt handling happens in leaf handlers. Assume all interrupts - * were handled and clear roll ups/ - */ - IPT_REG_WR32(g, IPT_ERR_UC_FIRST_LINK(link_id), IPT_ERR_UC_ACTIVE_BITS); - IPT_REG_WR32(g, IPT_ERR_UC_STATUS_LINK(link_id), IPT_ERR_UC_ACTIVE_BITS); - - return; -} - -/* - ******************************************************************************* - * Interrupt handling functions * - ******************************************************************************* - */ - -/* - * Enable common interrupts - */ -void gv100_nvlink_common_intr_enable(struct gk20a *g, unsigned long mask) -{ - u32 reg, link_id; - unsigned long bit; - - /* Init IOCTRL */ - for_each_set_bit(bit, &mask, NVLINK_MAX_LINKS_SW) { - link_id = (u32)bit; - reg = IOCTRL_REG_RD32(g, ioctrl_link_intr_0_mask_r(link_id)); - reg |= (ioctrl_link_intr_0_mask_fatal_f(1) | - ioctrl_link_intr_0_mask_nonfatal_f(1) | - ioctrl_link_intr_0_mask_correctable_f(1) | - ioctrl_link_intr_0_mask_intra_f(1)); - IOCTRL_REG_WR32(g, ioctrl_link_intr_0_mask_r(link_id), reg); - } - - reg = IOCTRL_REG_RD32(g, ioctrl_common_intr_0_mask_r()); - reg |= (ioctrl_common_intr_0_mask_fatal_f(1) | - ioctrl_common_intr_0_mask_nonfatal_f(1) | - ioctrl_common_intr_0_mask_correctable_f(1) | - ioctrl_common_intr_0_mask_intra_f(1)); - IOCTRL_REG_WR32(g, ioctrl_common_intr_0_mask_r(), reg); - - /* Init NVLIPT */ - IPT_REG_WR32(g, nvlipt_intr_control_common_r(), - nvlipt_intr_control_common_stallenable_f(1) | - nvlipt_intr_control_common_nonstallenable_f(1)); -} - -/* - * Enable link specific interrupts (top-level) - */ -void gv100_nvlink_enable_link_intr(struct gk20a *g, u32 link_id, bool enable) -{ - g->ops.nvlink.minion.enable_link_intr(g, link_id, enable); - gv100_nvlink_dlpl_intr_enable(g, link_id, enable); - gv100_nvlink_tlc_intr_enable(g, link_id, enable); - gv100_nvlink_mif_intr_enable(g, link_id, enable); - gv100_nvlink_nvlipt_intr_enable(g, link_id, enable); -} - -/* - * Top level interrupt handler - */ -void gv100_nvlink_isr(struct gk20a *g) -{ - unsigned long links; - u32 link_id; - unsigned long bit; - - links = ioctrl_top_intr_0_status_link_v( - IOCTRL_REG_RD32(g, ioctrl_top_intr_0_status_r())); - - links &= g->nvlink.enabled_links; - /* As per ARCH minion must be serviced first */ - g->ops.nvlink.minion.isr(g); - - for_each_set_bit(bit, &links, NVLINK_MAX_LINKS_SW) { - link_id = (u32)bit; - /* Cache error logs from TLC, DL handler may clear them */ - gv100_nvlink_tlc_get_intr(g, link_id); - gv100_nvlink_dlpl_isr(g, link_id); - gv100_nvlink_tlc_isr(g, link_id); - gv100_nvlink_mif_isr(g, link_id); - - /* NVLIPT is top-level. Do it last */ - gv100_nvlink_nvlipt_isr(g, link_id); - } - return; -} - -#endif /* CONFIG_NVGPU_NVLINK */ diff --git a/drivers/gpu/nvgpu/common/nvlink/nvlink_gv100.c b/drivers/gpu/nvgpu/common/nvlink/nvlink_gv100.c index 67f07031c..393c165a9 100644 --- a/drivers/gpu/nvgpu/common/nvlink/nvlink_gv100.c +++ b/drivers/gpu/nvgpu/common/nvlink/nvlink_gv100.c @@ -57,9 +57,6 @@ u32 gv100_nvlink_get_link_reset_mask(struct gk20a *g) static int gv100_nvlink_state_load_hal(struct gk20a *g) { - unsigned long discovered = g->nvlink.discovered_links; - - g->ops.nvlink.intr.common_intr_enable(g, discovered); return nvgpu_nvlink_minion_load(g); } @@ -254,8 +251,8 @@ static int gv100_nvlink_enable_links_post_top(struct gk20a *g, if (g->ops.nvlink.set_sw_war != NULL) { g->ops.nvlink.set_sw_war(g, link_id); } - g->ops.nvlink.intr.init_nvlipt_intr(g, link_id); - g->ops.nvlink.intr.enable_link_intr(g, link_id, true); + g->ops.nvlink.intr.init_link_err_intr(g, link_id); + g->ops.nvlink.intr.enable_link_err_intr(g, link_id, true); g->nvlink.initialized_links |= BIT32(link_id); }; @@ -766,16 +763,7 @@ int gv100_nvlink_link_early_init(struct gk20a *g, unsigned long mask) int gv100_nvlink_interface_init(struct gk20a *g) { - unsigned long mask = g->nvlink.enabled_links; - u32 link_id; int err; - unsigned long bit; - - for_each_set_bit(bit, &mask, NVLINK_MAX_LINKS_SW) { - link_id = (u32)bit; - g->ops.nvlink.intr.init_mif_intr(g, link_id); - g->ops.nvlink.intr.mif_intr_enable(g, link_id, true); - } err = g->ops.fb.init_nvlink(g); if (err != 0) { diff --git a/drivers/gpu/nvgpu/hal/init/hal_tu104.c b/drivers/gpu/nvgpu/hal/init/hal_tu104.c index 90f8db7af..88bc2a2b7 100644 --- a/drivers/gpu/nvgpu/hal/init/hal_tu104.c +++ b/drivers/gpu/nvgpu/hal/init/hal_tu104.c @@ -180,7 +180,7 @@ #include "hal/xve/xve_tu104.h" #include "common/nvlink/init/device_reginit_gv100.h" -#include "common/nvlink/intr_and_err_handling_gv100.h" +#include "hal/nvlink/intr_and_err_handling_tu104.h" #include "hal/nvlink/minion_gv100.h" #include "hal/nvlink/minion_tu104.h" #include "hal/nvlink/link_mode_transitions_gv100.h" @@ -1553,13 +1553,9 @@ static const struct gpu_ops tu104_ops = { .is_debug_mode = tu104_nvlink_minion_is_debug_mode, }, .intr = { - .common_intr_enable = gv100_nvlink_common_intr_enable, - .init_nvlipt_intr = gv100_nvlink_init_nvlipt_intr, - .enable_link_intr = gv100_nvlink_enable_link_intr, - .init_mif_intr = gv100_nvlink_init_mif_intr, - .mif_intr_enable = gv100_nvlink_mif_intr_enable, - .dlpl_intr_enable = gv100_nvlink_dlpl_intr_enable, - .isr = gv100_nvlink_isr, + .init_link_err_intr = tu104_nvlink_init_link_err_intr, + .enable_link_err_intr = tu104_nvlink_enable_link_err_intr, + .isr = tu104_nvlink_isr, } }, #endif diff --git a/drivers/gpu/nvgpu/hal/nvlink/intr_and_err_handling_tu104.c b/drivers/gpu/nvgpu/hal/nvlink/intr_and_err_handling_tu104.c new file mode 100644 index 000000000..65c74aff3 --- /dev/null +++ b/drivers/gpu/nvgpu/hal/nvlink/intr_and_err_handling_tu104.c @@ -0,0 +1,589 @@ +/* + * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifdef CONFIG_NVGPU_NVLINK + +#include +#include +#include "intr_and_err_handling_tu104.h" + +#include +#include +#include +#include +#include + +#define IPT_ERR_UC_ACTIVE_BITS (nvlipt_err_uc_status_link0_dlprotocol_f(1) | \ + nvlipt_err_uc_status_link0_datapoisoned_f(1) | \ + nvlipt_err_uc_status_link0_flowcontrol_f(1) | \ + nvlipt_err_uc_status_link0_responsetimeout_f(1) | \ + nvlipt_err_uc_status_link0_targeterror_f(1) | \ + nvlipt_err_uc_status_link0_unexpectedresponse_f(1) | \ + nvlipt_err_uc_status_link0_receiveroverflow_f(1) | \ + nvlipt_err_uc_status_link0_malformedpacket_f(1) | \ + nvlipt_err_uc_status_link0_stompedpacketreceived_f(1) | \ + nvlipt_err_uc_status_link0_unsupportedrequest_f(1) | \ + nvlipt_err_uc_status_link0_ucinternal_f(1)) + +/* + * Initialize logging and containment policy for TLC Parity errors + */ +static void tu104_nvlink_init_tlc_link_err(struct gk20a *g, u32 link_id) +{ + u32 reg; + + /*TX error */ + + /* Containment (Do not enable for TX Data RAM parity errors). + * That bit should be left 0, so that the error can be signaled + * to the far device by poisoning. As long as containment is + * turned off, the poison enable is set by default. + */ + reg = (nvtlc_tx_err_contain_en_0_txhdrcreditovferr__prod_f() | \ + nvtlc_tx_err_contain_en_0_txdatacreditovferr__prod_f() | \ + nvtlc_tx_err_contain_en_0_txdlcreditovferr__prod_f() | \ + nvtlc_tx_err_contain_en_0_txdlcreditparityerr__prod_f() | \ + nvtlc_tx_err_contain_en_0_txramhdrparityerr__prod_f() | \ + nvtlc_tx_err_contain_en_0_txunsupvcovferr__prod_f() | \ + nvtlc_tx_err_contain_en_0_txstompdet__prod_f() | \ + nvtlc_tx_err_contain_en_0_txpoisondet_f(1U) | \ + nvtlc_tx_err_contain_en_0_targeterr_f(1U) | \ + nvtlc_tx_err_contain_en_0_unsupportedrequesterr_f(1U)); + TLC_REG_WR32(g, link_id, nvtlc_tx_err_contain_en_0_r(), reg); + + /* Logging */ + reg = (nvtlc_tx_err_log_en_0_txhdrcreditovferr__prod_f() | \ + nvtlc_tx_err_log_en_0_txdatacreditovferr__prod_f() | \ + nvtlc_tx_err_log_en_0_txdlcreditovferr__prod_f() | \ + nvtlc_tx_err_log_en_0_txdlcreditparityerr__prod_f() | \ + nvtlc_tx_err_log_en_0_txramhdrparityerr__prod_f() | \ + nvtlc_tx_err_log_en_0_txramdataparityerr__prod_f() | \ + nvtlc_tx_err_log_en_0_txunsupvcovferr__prod_f() | \ + nvtlc_tx_err_log_en_0_txstompdet__prod_f() | \ + nvtlc_tx_err_log_en_0_txpoisondet__prod_f() | \ + nvtlc_tx_err_log_en_0_targeterr__prod_f() | \ + nvtlc_tx_err_log_en_0_unsupportedrequesterr__prod_f()); + TLC_REG_WR32(g, link_id, nvtlc_tx_err_log_en_0_r(), reg); + + /* RX Error */ + /* Containment */ + reg = (nvtlc_rx_err_contain_en_0_rxdlhdrparityerr__prod_f() | \ + nvtlc_rx_err_contain_en_0_rxdldataparityerr__prod_f() | \ + nvtlc_rx_err_contain_en_0_rxdlctrlparityerr__prod_f() | \ + nvtlc_rx_err_contain_en_0_rxramdataparityerr_f(1U) | \ + nvtlc_rx_err_contain_en_0_rxramhdrparityerr__prod_f() | \ + nvtlc_rx_err_contain_en_0_rxinvalidaeerr__prod_f() | \ + nvtlc_rx_err_contain_en_0_rxinvalidbeerr__prod_f() | \ + nvtlc_rx_err_contain_en_0_rxinvalidaddralignerr__prod_f() | \ + nvtlc_rx_err_contain_en_0_rxpktlenerr__prod_f() | \ + nvtlc_rx_err_contain_en_0_datlengtatomicreqmaxerr__prod_f() | \ + nvtlc_rx_err_contain_en_0_datlengtrmwreqmaxerr__prod_f() | \ + nvtlc_rx_err_contain_en_0_datlenltatrrspminerr__prod_f() | \ + nvtlc_rx_err_contain_en_0_invalidcacheattrpoerr__prod_f() | \ + nvtlc_rx_err_contain_en_0_invalidcrerr__prod_f() | \ + nvtlc_rx_err_contain_en_0_rxrespstatustargeterr__prod_f() | \ + nvtlc_rx_err_contain_en_0_rxrespstatusunsupportedrequesterr__prod_f()); + TLC_REG_WR32(g, link_id, nvtlc_rx_err_contain_en_0_r(), reg); + + reg = (nvtlc_rx_err_contain_en_1_rxhdrovferr__prod_f() | \ + nvtlc_rx_err_contain_en_1_rxdataovferr__prod_f() | \ + nvtlc_rx_err_contain_en_1_stompdeterr__prod_f() | \ + nvtlc_rx_err_contain_en_1_rxpoisonerr__prod_f() | \ + nvtlc_rx_err_contain_en_1_rxunsupvcovferr__prod_f() | \ + nvtlc_rx_err_contain_en_1_rxunsupnvlinkcreditrelerr__prod_f() | \ + nvtlc_rx_err_contain_en_1_rxunsupncisoccreditrelerr__prod_f()); + TLC_REG_WR32(g, link_id, nvtlc_rx_err_contain_en_1_r(), reg); + + /* Logging */ + reg = (nvtlc_rx_err_log_en_0_rxdlhdrparityerr__prod_f() | \ + nvtlc_rx_err_log_en_0_rxdldataparityerr__prod_f() | \ + nvtlc_rx_err_log_en_0_rxdlctrlparityerr__prod_f() | \ + nvtlc_rx_err_log_en_0_rxramdataparityerr__prod_f() | \ + nvtlc_rx_err_log_en_0_rxramhdrparityerr__prod_f() | \ + nvtlc_rx_err_log_en_0_rxinvalidaeerr__prod_f() | \ + nvtlc_rx_err_log_en_0_rxinvalidbeerr__prod_f() | \ + nvtlc_rx_err_log_en_0_rxinvalidaddralignerr__prod_f() | \ + nvtlc_rx_err_log_en_0_rxpktlenerr__prod_f() | \ + nvtlc_rx_err_log_en_0_datlengtatomicreqmaxerr__prod_f() | \ + nvtlc_rx_err_log_en_0_datlengtrmwreqmaxerr__prod_f() | \ + nvtlc_rx_err_log_en_0_datlenltatrrspminerr__prod_f() | \ + nvtlc_rx_err_log_en_0_invalidcacheattrpoerr__prod_f() | \ + nvtlc_rx_err_log_en_0_invalidcrerr__prod_f() | \ + nvtlc_rx_err_log_en_0_rxrespstatustargeterr__prod_f() | \ + nvtlc_rx_err_log_en_0_rxrespstatusunsupportedrequesterr__prod_f()); + TLC_REG_WR32(g, link_id, nvtlc_rx_err_log_en_0_r(), reg); + + reg = (nvtlc_rx_err_log_en_1_rxhdrovferr__prod_f() | \ + nvtlc_rx_err_log_en_1_rxdataovferr__prod_f() | \ + nvtlc_rx_err_log_en_1_stompdeterr__prod_f() | \ + nvtlc_rx_err_log_en_1_rxpoisonerr__prod_f() | \ + nvtlc_rx_err_log_en_1_rxunsupvcovferr__prod_f() | \ + nvtlc_rx_err_log_en_1_rxunsupnvlinkcreditrelerr__prod_f() | \ + nvtlc_rx_err_log_en_1_rxunsupncisoccreditrelerr__prod_f()); + TLC_REG_WR32(g, link_id, nvtlc_rx_err_log_en_1_r(), reg); +} + +/* + * Enable TLC per link interrupts + */ +static void tu104_nvlink_enable_tlc_link_err(struct gk20a *g, u32 link_id, + bool enable) +{ + u32 reg_rx0 = 0, reg_rx1 = 0, reg_tx = 0; + + if (enable) { + reg_tx = (nvtlc_tx_err_report_en_0_txhdrcreditovferr__prod_f() | \ + nvtlc_tx_err_report_en_0_txdatacreditovferr__prod_f() | \ + nvtlc_tx_err_report_en_0_txdlcreditovferr__prod_f() | \ + nvtlc_tx_err_report_en_0_txdlcreditparityerr__prod_f() | \ + nvtlc_tx_err_report_en_0_txramhdrparityerr__prod_f() | \ + nvtlc_tx_err_report_en_0_txramdataparityerr__prod_f() | \ + nvtlc_tx_err_report_en_0_txunsupvcovferr__prod_f() | \ + nvtlc_tx_err_report_en_0_txstompdet__prod_f() | \ + nvtlc_tx_err_report_en_0_txpoisondet__prod_f() | \ + nvtlc_tx_err_report_en_0_targeterr__prod_f() | \ + nvtlc_tx_err_report_en_0_unsupportedrequesterr__prod_f()); + + reg_rx0 = (nvtlc_rx_err_report_en_0_rxdlhdrparityerr__prod_f() | \ + nvtlc_rx_err_report_en_0_rxdldataparityerr__prod_f() | \ + nvtlc_rx_err_report_en_0_rxdlctrlparityerr__prod_f() | \ + nvtlc_rx_err_report_en_0_rxramdataparityerr__prod_f() | \ + nvtlc_rx_err_report_en_0_rxramhdrparityerr__prod_f() | \ + nvtlc_rx_err_report_en_0_rxinvalidaeerr__prod_f() | \ + nvtlc_rx_err_report_en_0_rxinvalidbeerr__prod_f() | \ + nvtlc_rx_err_report_en_0_rxinvalidaddralignerr__prod_f() | \ + nvtlc_rx_err_report_en_0_rxpktlenerr__prod_f() | \ + nvtlc_rx_err_report_en_0_datlengtatomicreqmaxerr__prod_f() | \ + nvtlc_rx_err_report_en_0_datlengtrmwreqmaxerr__prod_f() | \ + nvtlc_rx_err_report_en_0_datlenltatrrspminerr__prod_f() | \ + nvtlc_rx_err_report_en_0_invalidcacheattrpoerr__prod_f() | \ + nvtlc_rx_err_report_en_0_invalidcrerr__prod_f() | \ + nvtlc_rx_err_report_en_0_rxrespstatustargeterr__prod_f() | \ + nvtlc_rx_err_report_en_0_rxrespstatusunsupportedrequesterr__prod_f()); + + reg_rx1 = (nvtlc_rx_err_report_en_1_rxhdrovferr__prod_f() | \ + nvtlc_rx_err_report_en_1_rxdataovferr__prod_f() | \ + nvtlc_rx_err_report_en_1_stompdeterr__prod_f() | \ + nvtlc_rx_err_report_en_1_rxpoisonerr__prod_f() | \ + nvtlc_rx_err_report_en_1_rxunsupvcovferr__prod_f() | \ + nvtlc_rx_err_report_en_1_rxunsupnvlinkcreditrelerr__prod_f() | \ + nvtlc_rx_err_report_en_1_rxunsupncisoccreditrelerr__prod_f()); + + } + + TLC_REG_WR32(g, link_id, nvtlc_rx_err_report_en_0_r(), reg_rx0); + TLC_REG_WR32(g, link_id, nvtlc_rx_err_report_en_1_r(), reg_rx1); + TLC_REG_WR32(g, link_id, nvtlc_tx_err_report_en_0_r(), reg_tx); +} + +/* + * Interrupt routine handler for TLC + */ +static void tu104_nvlink_tlc_isr(struct gk20a *g, u32 link_id) +{ + u32 rx_status_0; + u32 rx_status_1; + u32 tx_status_0; + + rx_status_0 = TLC_REG_RD32(g, link_id, nvtlc_rx_err_status_0_r()); + rx_status_1 = TLC_REG_RD32(g, link_id, nvtlc_rx_err_status_1_r()); + tx_status_0 = TLC_REG_RD32(g, link_id, nvtlc_tx_err_status_0_r()); + + nvgpu_log(g, gpu_dbg_nvlink, "Nvlink TLC ISR: RX0=0x%x, RX1=0x%x, TX0=0x%x", + rx_status_0, rx_status_1, tx_status_0); + + if (rx_status_0 != 0U) { + /* All TLC RX 0 errors are fatal. Notify and disable */ + nvgpu_err(g, "Fatal TLC RX 0 interrupt on link %d mask: %x", + link_id, rx_status_0); + TLC_REG_WR32(g, link_id, nvtlc_rx_err_first_0_r(), + rx_status_0); + TLC_REG_WR32(g, link_id, nvtlc_rx_err_status_0_r(), + rx_status_0); + } + if (rx_status_1 != 0U) { + /* All TLC RX 1 errors are fatal. Notify and disable */ + nvgpu_err(g, "Fatal TLC RX 1 interrupt on link %d mask: %x", + link_id, rx_status_1); + TLC_REG_WR32(g, link_id, nvtlc_rx_err_first_1_r(), + rx_status_1); + TLC_REG_WR32(g, link_id, nvtlc_rx_err_status_1_r(), + rx_status_1); + } + if (tx_status_0 != 0U) { + /* All TLC TX 0 errors are fatal. Notify and disable */ + nvgpu_err(g, "Fatal TLC TX 0 interrupt on link %d mask: %x", + link_id, tx_status_0); + TLC_REG_WR32(g, link_id, nvtlc_tx_err_first_0_r(), + tx_status_0); + TLC_REG_WR32(g, link_id, nvtlc_tx_err_status_0_r(), + tx_status_0); + } +} + +/* + * Enable link specific DLPL interrupts + */ +static void tu104_nvlink_enable_dlpl_link_intr(struct gk20a *g, u32 link_id, bool enable) +{ + u32 reg = 0U; + + /* Always disable nonstall tree */ + DLPL_REG_WR32(g, link_id, nvl_intr_nonstall_en_r(), 0U); + + if (!enable) { + DLPL_REG_WR32(g, link_id, nvl_intr_stall_en_r(), 0U); + return; + } + + /* Clear interrupt register to get rid of stale state (W1C) */ + DLPL_REG_WR32(g, link_id, nvl_intr_r(), 0xffffffffU); + DLPL_REG_WR32(g, link_id, nvl_intr_sw2_r(), 0xffffffffU); + + reg = nvl_intr_stall_en_ltssm_protocol_enable_f() | + nvl_intr_stall_en_ltssm_fault_enable_f() | + nvl_intr_stall_en_tx_recovery_long_enable_f() | + nvl_intr_stall_en_tx_fault_ram_enable_f() | + nvl_intr_stall_en_tx_fault_interface_enable_f() | + nvl_intr_stall_en_rx_fault_sublink_change_enable_f() | + nvl_intr_stall_en_rx_fault_dl_protocol_enable_f() | + nvl_intr_stall_en_rx_short_error_rate_enable_f(); + + DLPL_REG_WR32(g, link_id, nvl_intr_stall_en_r(), reg); + + /* Configure error threshold */ + reg = DLPL_REG_RD32(g, link_id, nvl_sl1_error_count_ctrl_r()); + reg = set_field(reg, nvl_sl1_error_count_ctrl_short_rate_m(), + nvl_sl1_error_count_ctrl_short_rate_enable_f()); + reg = set_field(reg, nvl_sl1_error_count_ctrl_rate_count_mode_m(), + nvl_sl1_error_count_ctrl_rate_count_mode_flit_f()); + DLPL_REG_WR32(g, link_id, nvl_sl1_error_count_ctrl_r(), reg); + + reg = DLPL_REG_RD32(g, link_id, nvl_sl1_error_rate_ctrl_r()); + reg = set_field(reg, nvl_sl1_error_rate_ctrl_short_threshold_man_m(), + nvl_sl1_error_rate_ctrl_short_threshold_man_f(12U)); + reg = set_field(reg, nvl_sl1_error_rate_ctrl_short_threshold_exp_m(), + nvl_sl1_error_rate_ctrl_short_threshold_exp_f(1U)); + reg = set_field(reg, nvl_sl1_error_rate_ctrl_short_timescale_man_m(), + nvl_sl1_error_rate_ctrl_short_timescale_man_f(5U)); + reg = set_field(reg, nvl_sl1_error_rate_ctrl_short_timescale_exp_m(), + nvl_sl1_error_rate_ctrl_short_timescale_exp_f(2U)); + DLPL_REG_WR32(g, link_id, nvl_sl1_error_rate_ctrl_r(), reg); +} + +/* + * DLPL per-link isr + */ +static void tu104_nvlink_dlpl_isr(struct gk20a *g, u32 link_id) +{ + u32 intr = 0; + + intr = DLPL_REG_RD32(g, link_id, nvl_intr_r()) & + DLPL_REG_RD32(g, link_id, nvl_intr_stall_en_r()); + + nvgpu_log(g, gpu_dbg_nvlink, "Nvlink DLPL ISR triggered with intr: 0x%x", intr); + + if (intr == 0U) { + return; + } + + /* Clear interrupts */ + DLPL_REG_WR32(g, link_id, nvl_intr_r(), intr); + DLPL_REG_WR32(g, link_id, nvl_intr_sw2_r(), intr); +} + +/* + * Initialize logging and containment policy for MIF Parity errors + */ +static void tu104_nvlink_init_mif_link_err(struct gk20a *g, u32 link_id) +{ + u32 reg; + + /*RX error */ + + /* Containment (Enabled only for Header errors) + * In the Rx direction, the HSHUB does not handle either poison or + * containing (stomping) in mid packet (see bug 1939387), + * so there is no containment applied. + */ + reg = 0U; + reg = set_field(reg, + ioctrlmif_rx_err_contain_en_0_rxramhdrparityerr_m(), + ioctrlmif_rx_err_contain_en_0_rxramhdrparityerr__prod_f()); + MIF_REG_WR32(g, link_id, ioctrlmif_rx_err_contain_en_0_r(), reg); + + /* Logging (do not ignore) */ + reg = 0U; + reg = set_field(reg, + ioctrlmif_rx_err_log_en_0_rxramdataparityerr_m(), + ioctrlmif_rx_err_log_en_0_rxramdataparityerr_f(1U)); + reg = set_field(reg, + ioctrlmif_rx_err_log_en_0_rxramhdrparityerr_m(), + ioctrlmif_rx_err_log_en_0_rxramhdrparityerr_f(1U)); + MIF_REG_WR32(g, link_id, ioctrlmif_rx_err_log_en_0_r(), reg); + + /* Tx Error */ + + /* Containment (Enabled only for Header errors) + * In the Tx direction, data parity errors will be poisoned, + * making it the far receiver’s responsibility to handle containment, + * and removing the requirement to contain at the transmitter. + */ + reg = 0U; + reg = set_field(reg, + ioctrlmif_tx_err_contain_en_0_txramhdrparityerr_m(), + ioctrlmif_tx_err_contain_en_0_txramhdrparityerr__prod_f()); + MIF_REG_WR32(g, link_id, ioctrlmif_tx_err_contain_en_0_r(), reg); + + reg = 0U; + reg = set_field(reg, + ioctrlmif_tx_err_misc_0_txramdataparitypois_m(), + ioctrlmif_tx_err_misc_0_txramdataparitypois_f(1U)); + MIF_REG_WR32(g, link_id, ioctrlmif_tx_err_misc_0_r(), reg); + + /* Logging (do not ignore) */ + reg = 0U; + reg = set_field(reg, ioctrlmif_tx_err_log_en_0_txramdataparityerr_m(), + ioctrlmif_tx_err_log_en_0_txramdataparityerr_f(1U)); + reg = set_field(reg, ioctrlmif_tx_err_log_en_0_txramhdrparityerr_m(), + ioctrlmif_tx_err_log_en_0_txramhdrparityerr_f(1U)); + MIF_REG_WR32(g, link_id, ioctrlmif_tx_err_log_en_0_r(), reg); + + /* Credit release */ + MIF_REG_WR32(g, link_id, ioctrlmif_rx_ctrl_buffer_ready_r(), 0x1U); + MIF_REG_WR32(g, link_id, ioctrlmif_tx_ctrl_buffer_ready_r(), 0x1U); +} + +/* + * Enable reporting(interrupt generation) per-link MIF interrupts + */ +static void tu104_nvlink_enable_mif_link_err(struct gk20a *g, u32 link_id, + bool enable) +{ + u32 reg0 = 0U; + u32 reg1 = 0U; + + if (enable) { + reg0 = set_field(reg0, + ioctrlmif_rx_err_report_en_0_rxramdataparityerr_m(), + ioctrlmif_rx_err_report_en_0_rxramdataparityerr_f(1U)); + reg0 = set_field(reg0, + ioctrlmif_rx_err_report_en_0_rxramhdrparityerr_m(), + ioctrlmif_rx_err_report_en_0_rxramhdrparityerr_f(1U)); + reg1 = set_field(reg1, + ioctrlmif_tx_err_report_en_0_txramdataparityerr_m(), + ioctrlmif_tx_err_report_en_0_txramdataparityerr_f(1U)); + reg1 = set_field(reg1, + ioctrlmif_tx_err_report_en_0_txramhdrparityerr_m(), + ioctrlmif_tx_err_report_en_0_txramhdrparityerr_f(1U)); + } + + MIF_REG_WR32(g, link_id, ioctrlmif_rx_err_report_en_0_r(), reg0); + MIF_REG_WR32(g, link_id, ioctrlmif_tx_err_report_en_0_r(), reg1); +} + +/* + * Handle per-link MIF interrupts + */ +static void tu104_nvlink_mif_isr(struct gk20a *g, u32 link_id) +{ + u32 intr, fatal_mask = 0; + + /* RX Errors */ + intr = MIF_REG_RD32(g, link_id, ioctrlmif_rx_err_status_0_r()); + nvgpu_log(g, gpu_dbg_nvlink, "Nvlink MIF RX ISR triggered with intr: 0x%x", intr); + + if (intr != 0U) { + if ((intr & ioctrlmif_rx_err_status_0_rxramdataparityerr_m()) != + 0U) { + nvgpu_err(g, "Fatal MIF RX interrupt hit on link %d: RAM_DATA_PARITY", + link_id); + fatal_mask |= ioctrlmif_rx_err_status_0_rxramdataparityerr_f(1); + } + if ((intr & ioctrlmif_rx_err_status_0_rxramhdrparityerr_m()) != + 0U) { + nvgpu_err(g, "Fatal MIF RX interrupt hit on link %d: RAM_HDR_PARITY", + link_id); + fatal_mask |= ioctrlmif_rx_err_status_0_rxramhdrparityerr_f(1); + } + + if (fatal_mask != 0U) { + MIF_REG_WR32(g, link_id, ioctrlmif_rx_err_first_0_r(), + fatal_mask); + MIF_REG_WR32(g, link_id, ioctrlmif_rx_err_status_0_r(), + fatal_mask); + } + } + + /* TX Errors */ + fatal_mask = 0; + intr = MIF_REG_RD32(g, link_id, ioctrlmif_tx_err_status_0_r()); + nvgpu_log(g, gpu_dbg_nvlink, "Nvlink MIF TX ISR triggered with intr: 0x%x", intr); + if (intr != 0U) { + if ((intr & ioctrlmif_tx_err_status_0_txramdataparityerr_m()) != + 0U) { + nvgpu_err(g, "Fatal MIF TX interrupt hit on link %d: RAM_DATA_PARITY", + link_id); + fatal_mask |= ioctrlmif_tx_err_status_0_txramdataparityerr_f(1); + } + if ((intr & ioctrlmif_tx_err_status_0_txramhdrparityerr_m()) != + 0U) { + nvgpu_err(g, "Fatal MIF TX interrupt hit on link %d: RAM_HDR_PARITY", + link_id); + fatal_mask |= ioctrlmif_tx_err_status_0_txramhdrparityerr_f(1); + } + + if (fatal_mask != 0U) { + MIF_REG_WR32(g, link_id, ioctrlmif_tx_err_first_0_r(), + fatal_mask); + MIF_REG_WR32(g, link_id, ioctrlmif_tx_err_status_0_r(), + fatal_mask); + } + } +} + +/* + * Initialize NVLIPT level link Aerr settings + */ +static void tu104_nvlink_init_nvlipt_link_err(struct gk20a *g, u32 link_id) +{ + /* + * AErr settings (nvlipt level) + */ + + /* UC first and status reg (W1C) need to be cleared */ + IPT_REG_WR32(g, nvlipt_err_uc_first_link0_r(), IPT_ERR_UC_ACTIVE_BITS); + IPT_REG_WR32(g, nvlipt_err_uc_status_link0_r(), IPT_ERR_UC_ACTIVE_BITS); + + /* AErr Severity */ + IPT_REG_WR32(g, nvlipt_err_uc_severity_link0_r(), + IPT_ERR_UC_ACTIVE_BITS); +} + +/* + * Enable NVLIPT link errors and interrupts + */ +static void tu104_nvlink_enable_nvlipt_link_err_intr(struct gk20a *g, + u32 link_id, + bool enable) +{ + u32 val = 0U; + + if (enable) { + val = 1U; + } + + /* Enable fatal link errors. There are no non-fatal or correctable + * link errors. All errors are marked fatal. + */ + IPT_REG_WR32(g, nvlipt_err_control_link0_r(), + nvlipt_err_control_link0_fatalenable_f(val)); + + /* Enable stalling link interrupts. No non-stalling interrupts as per + * HSI. + */ + IPT_REG_WR32(g, nvlipt_intr_control_link0_r(), + nvlipt_intr_control_link0_stallenable_f(val)); +} + +/* + * Per-link NVLIPT ISR handler + */ +static void tu104_nvlink_nvlipt_isr(struct gk20a *g, u32 link_id) +{ + nvgpu_log(g, gpu_dbg_nvlink, "Nvlink NVLIPT ISR"); + /* + * Interrupt handling happens in leaf handlers. Assume all interrupts + * were handled and clear roll ups/ + */ + IPT_REG_WR32(g, nvlipt_err_uc_first_link0_r(), IPT_ERR_UC_ACTIVE_BITS); + IPT_REG_WR32(g, nvlipt_err_uc_status_link0_r(), IPT_ERR_UC_ACTIVE_BITS); + + return; +} + +/* + * Enable interrupts at top(IOCTRL) level + */ +static void tu104_nvlink_enable_ioctrl_link_intr(struct gk20a *g, u32 link_id, + bool enable) +{ + u32 val = 0U; + + if (enable) { + val = 1U; + } + + /* Init IOCTRL */ + IOCTRL_REG_WR32(g, ioctrl_link_intr_0_mask_r(link_id), + (ioctrl_link_intr_0_mask_fatal_f(val) | + ioctrl_link_intr_0_mask_intra_f(val))); +} + +void tu104_nvlink_init_link_err_intr(struct gk20a *g, u32 link_id) +{ + tu104_nvlink_init_tlc_link_err(g, link_id); + tu104_nvlink_init_mif_link_err(g, link_id); + tu104_nvlink_init_nvlipt_link_err(g, link_id); +} +/* + * * Enable link specific errors and interrupts (top-level) + * */ +void tu104_nvlink_enable_link_err_intr(struct gk20a *g, u32 link_id, bool enable) +{ + tu104_nvlink_enable_ioctrl_link_intr(g, link_id, enable); + g->ops.nvlink.minion.enable_link_intr(g, link_id, enable); + tu104_nvlink_enable_dlpl_link_intr(g, link_id, enable); + tu104_nvlink_enable_tlc_link_err(g, link_id, enable); + tu104_nvlink_enable_mif_link_err(g, link_id, enable); + tu104_nvlink_enable_nvlipt_link_err_intr(g, link_id, enable); +} + +/* + * * Top level interrupt handler + * */ +void tu104_nvlink_isr(struct gk20a *g) +{ + unsigned long links; + u32 link_id; + unsigned long bit; + + links = ioctrl_top_intr_0_status_link_v( + IOCTRL_REG_RD32(g, ioctrl_top_intr_0_status_r())); + nvgpu_log(g, gpu_dbg_nvlink, "Top-level nvlink ISR triggered on link:%lu", links); + + links &= g->nvlink.enabled_links; + /* As per ARCH minion must be serviced first */ + g->ops.nvlink.minion.isr(g); + + for_each_set_bit(bit, &links, NVLINK_MAX_LINKS_SW) { + link_id = (u32)bit; + tu104_nvlink_dlpl_isr(g, link_id); + tu104_nvlink_tlc_isr(g, link_id); + tu104_nvlink_mif_isr(g, link_id); + /* NVLIPT is top-level. Do it last */ + tu104_nvlink_nvlipt_isr(g, link_id); + } + return; +} + +#endif /* CONFIG_NVGPU_NVLINK */ diff --git a/drivers/gpu/nvgpu/common/nvlink/intr_and_err_handling_gv100.h b/drivers/gpu/nvgpu/hal/nvlink/intr_and_err_handling_tu104.h similarity index 62% rename from drivers/gpu/nvgpu/common/nvlink/intr_and_err_handling_gv100.h rename to drivers/gpu/nvgpu/hal/nvlink/intr_and_err_handling_tu104.h index 8053ab012..c30a187e2 100644 --- a/drivers/gpu/nvgpu/common/nvlink/intr_and_err_handling_gv100.h +++ b/drivers/gpu/nvgpu/hal/nvlink/intr_and_err_handling_tu104.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -20,18 +20,15 @@ * DEALINGS IN THE SOFTWARE. */ -#ifndef INTR_AND_ERR_HANDLING_GV100_H -#define INTR_AND_ERR_HANDLING_GV100_H +#ifndef INTR_AND_ERR_HANDLING_TU104_H +#define INTR_AND_ERR_HANDLING_TU104_H #include struct gk20a; -void gv100_nvlink_common_intr_enable(struct gk20a *g, unsigned long mask); -void gv100_nvlink_init_nvlipt_intr(struct gk20a *g, u32 link_id); -void gv100_nvlink_enable_link_intr(struct gk20a *g, u32 link_id, bool enable); -void gv100_nvlink_init_mif_intr(struct gk20a *g, u32 link_id); -void gv100_nvlink_mif_intr_enable(struct gk20a *g, u32 link_id, bool enable); -void gv100_nvlink_dlpl_intr_enable(struct gk20a *g, u32 link_id, bool enable); -void gv100_nvlink_isr(struct gk20a *g); +void tu104_nvlink_init_link_err_intr(struct gk20a *g, u32 link_id); +void tu104_nvlink_enable_link_err_intr(struct gk20a *g, u32 link_id, + bool enable); +void tu104_nvlink_isr(struct gk20a *g); -#endif /* INTR_AND_ERR_HANDLING_GV100_H */ +#endif /* INTR_AND_ERR_HANDLING_TU104_H */ diff --git a/drivers/gpu/nvgpu/hal/nvlink/link_mode_transitions_gv100.c b/drivers/gpu/nvgpu/hal/nvlink/link_mode_transitions_gv100.c index f489c1d22..082f3ba70 100644 --- a/drivers/gpu/nvgpu/hal/nvlink/link_mode_transitions_gv100.c +++ b/drivers/gpu/nvgpu/hal/nvlink/link_mode_transitions_gv100.c @@ -476,7 +476,7 @@ int gv100_nvlink_set_link_mode(struct gk20a *g, u32 link_id, break; case nvgpu_nvlink_link_disable_err_detect: /* Disable Link interrupts */ - g->ops.nvlink.intr.dlpl_intr_enable(g, link_id, false); + g->ops.nvlink.intr.enable_link_err_intr(g, link_id, false); break; case nvgpu_nvlink_link_lane_disable: err = gv100_nvlink_lane_disable(g, link_id, true); diff --git a/drivers/gpu/nvgpu/include/nvgpu/gk20a.h b/drivers/gpu/nvgpu/include/nvgpu/gk20a.h index 673c97cc1..b560567d9 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/gk20a.h +++ b/drivers/gpu/nvgpu/include/nvgpu/gk20a.h @@ -575,16 +575,9 @@ struct gpu_ops { bool (*is_debug_mode)(struct gk20a *g); } minion; struct { - void (*common_intr_enable)(struct gk20a *g, - unsigned long mask); - void (*init_nvlipt_intr)(struct gk20a *g, u32 link_id); - void (*enable_link_intr)(struct gk20a *g, u32 link_id, - bool enable); - void (*init_mif_intr)(struct gk20a *g, u32 link_id); - void (*mif_intr_enable)(struct gk20a *g, u32 link_id, - bool enable); - void (*dlpl_intr_enable)(struct gk20a *g, u32 link_id, - bool enable); + void (*init_link_err_intr)(struct gk20a *g, u32 link_id); + void (*enable_link_err_intr)(struct gk20a *g, + u32 link_id, bool enable); void (*isr)(struct gk20a *g); } intr; } nvlink; diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/tu104/hw_ioctrlmif_tu104.h b/drivers/gpu/nvgpu/include/nvgpu/hw/tu104/hw_ioctrlmif_tu104.h index cead15e83..85f17e144 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/hw/tu104/hw_ioctrlmif_tu104.h +++ b/drivers/gpu/nvgpu/include/nvgpu/hw/tu104/hw_ioctrlmif_tu104.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -122,7 +122,7 @@ #define ioctrlmif_tx_err_log_en_0_txramhdrparityerr_f(v) ((U32(v) & 0x1U) << 1U) #define ioctrlmif_tx_err_log_en_0_txramhdrparityerr_m() (U32(0x1U) << 1U) #define ioctrlmif_tx_err_log_en_0_txramhdrparityerr_v(r) (((r) >> 1U) & 0x1U) -#define ioctrlmif_tx_err_report_en_0_r() (0x00000e08U) +#define ioctrlmif_tx_err_report_en_0_r() (0x00000a8cU) #define ioctrlmif_tx_err_report_en_0_txramdataparityerr_f(v)\ ((U32(v) & 0x1U) << 0U) #define ioctrlmif_tx_err_report_en_0_txramdataparityerr_m() (U32(0x1U) << 0U) @@ -143,4 +143,8 @@ #define ioctrlmif_tx_err_first_0_r() (0x00000a98U) #define ioctrlmif_tx_ctrl_buffer_ready_r() (0x00000a7cU) #define ioctrlmif_rx_ctrl_buffer_ready_r() (0x00000dfcU) +#define ioctrlmif_tx_err_misc_0_r() (0x00000a9cU) +#define ioctrlmif_tx_err_misc_0_txramdataparitypois_f(v) ((U32(v) & 0x1U) << 0U) +#define ioctrlmif_tx_err_misc_0_txramdataparitypois_m() (U32(0x1U) << 0U) +#define ioctrlmif_tx_err_misc_0_txramdataparitypois_v(r) (((r) >> 0U) & 0x1U) #endif diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/tu104/hw_nvl_tu104.h b/drivers/gpu/nvgpu/include/nvgpu/hw/tu104/hw_nvl_tu104.h index a1fd90a0d..718867254 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/hw/tu104/hw_nvl_tu104.h +++ b/drivers/gpu/nvgpu/include/nvgpu/hw/tu104/hw_nvl_tu104.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -256,9 +256,25 @@ #define nvl_sl1_error_rate_ctrl_short_threshold_man_f(v) ((U32(v) & 0x7U) << 0U) #define nvl_sl1_error_rate_ctrl_short_threshold_man_m() (U32(0x7U) << 0U) #define nvl_sl1_error_rate_ctrl_short_threshold_man_v(r) (((r) >> 0U) & 0x7U) +#define nvl_sl1_error_rate_ctrl_short_threshold_exp_f(v) ((U32(v) & 0x1U) << 3U) +#define nvl_sl1_error_rate_ctrl_short_threshold_exp_m() (U32(0x1U) << 3U) +#define nvl_sl1_error_rate_ctrl_short_threshold_exp_v(r) (((r) >> 3U) & 0x1U) #define nvl_sl1_error_rate_ctrl_long_threshold_man_f(v) ((U32(v) & 0x7U) << 16U) #define nvl_sl1_error_rate_ctrl_long_threshold_man_m() (U32(0x7U) << 16U) #define nvl_sl1_error_rate_ctrl_long_threshold_man_v(r) (((r) >> 16U) & 0x7U) +#define nvl_sl1_error_rate_ctrl_short_timescale_man_f(v) ((U32(v) & 0x7U) << 4U) +#define nvl_sl1_error_rate_ctrl_short_timescale_man_m() (U32(0x7U) << 4U) +#define nvl_sl1_error_rate_ctrl_short_timescale_man_v(r) (((r) >> 4U) & 0x7U) +#define nvl_sl1_error_rate_ctrl_short_timescale_exp_f(v) ((U32(v) & 0xfU) << 8U) +#define nvl_sl1_error_rate_ctrl_short_timescale_exp_m() (U32(0xfU) << 8U) +#define nvl_sl1_error_rate_ctrl_short_timescale_exp_v(r) (((r) >> 8U) & 0xfU) +#define nvl_sl1_error_count_ctrl_r() (0x00003280U) +#define nvl_sl1_error_count_ctrl_short_rate_f(v) ((U32(v) & 0x1U) << 8U) +#define nvl_sl1_error_count_ctrl_short_rate_m() (U32(0x1U) << 8U) +#define nvl_sl1_error_count_ctrl_short_rate_enable_f() (0x100U) +#define nvl_sl1_error_count_ctrl_rate_count_mode_f(v) ((U32(v) & 0x1U) << 10U) +#define nvl_sl1_error_count_ctrl_rate_count_mode_m() (U32(0x1U) << 10U) +#define nvl_sl1_error_count_ctrl_rate_count_mode_flit_f() (0x0U) #define nvl_sl1_rxslsm_timeout_2_r() (0x00003034U) #define nvl_txiobist_configreg_r() (0x00002e14U) #define nvl_txiobist_configreg_io_bist_mode_in_f(v) ((U32(v) & 0x1U) << 17U) diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/tu104/hw_nvtlc_tu104.h b/drivers/gpu/nvgpu/include/nvgpu/hw/tu104/hw_nvtlc_tu104.h index 404529f1a..dfe272775 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/hw/tu104/hw_nvtlc_tu104.h +++ b/drivers/gpu/nvgpu/include/nvgpu/hw/tu104/hw_nvtlc_tu104.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -59,13 +59,257 @@ #include #include -#define nvtlc_tx_err_report_en_0_r() (0x00000708U) -#define nvtlc_rx_err_report_en_0_r() (0x00000f08U) -#define nvtlc_rx_err_report_en_1_r() (0x00000f20U) #define nvtlc_tx_err_status_0_r() (0x00000700U) #define nvtlc_rx_err_status_0_r() (0x00000f00U) #define nvtlc_rx_err_status_1_r() (0x00000f18U) #define nvtlc_tx_err_first_0_r() (0x00000714U) #define nvtlc_rx_err_first_0_r() (0x00000f14U) #define nvtlc_rx_err_first_1_r() (0x00000f2cU) +#define nvtlc_tx_err_report_en_0_r() (0x00000708U) +#define nvtlc_tx_err_report_en_0_txhdrcreditovferr_f(v) ((U32(v) & 0xffU) << 0U) +#define nvtlc_tx_err_report_en_0_txhdrcreditovferr__prod_f() (0xffU) +#define nvtlc_tx_err_report_en_0_txdatacreditovferr_f(v)\ + ((U32(v) & 0xffU) << 8U) +#define nvtlc_tx_err_report_en_0_txdatacreditovferr__prod_f() (0xff00U) +#define nvtlc_tx_err_report_en_0_txdlcreditovferr_f(v) ((U32(v) & 0x1U) << 16U) +#define nvtlc_tx_err_report_en_0_txdlcreditovferr__prod_f() (0x10000U) +#define nvtlc_tx_err_report_en_0_txdlcreditparityerr_f(v)\ + ((U32(v) & 0x1U) << 17U) +#define nvtlc_tx_err_report_en_0_txdlcreditparityerr__prod_f() (0x20000U) +#define nvtlc_tx_err_report_en_0_txramhdrparityerr_f(v) ((U32(v) & 0x1U) << 18U) +#define nvtlc_tx_err_report_en_0_txramhdrparityerr__prod_f() (0x40000U) +#define nvtlc_tx_err_report_en_0_txramdataparityerr_f(v)\ + ((U32(v) & 0x1U) << 19U) +#define nvtlc_tx_err_report_en_0_txramdataparityerr__prod_f() (0x80000U) +#define nvtlc_tx_err_report_en_0_txunsupvcovferr_f(v) ((U32(v) & 0x1U) << 20U) +#define nvtlc_tx_err_report_en_0_txunsupvcovferr__prod_f() (0x100000U) +#define nvtlc_tx_err_report_en_0_txstompdet_f(v) ((U32(v) & 0x1U) << 22U) +#define nvtlc_tx_err_report_en_0_txstompdet__prod_f() (0x400000U) +#define nvtlc_tx_err_report_en_0_txpoisondet_f(v) ((U32(v) & 0x1U) << 23U) +#define nvtlc_tx_err_report_en_0_txpoisondet__prod_f() (0x800000U) +#define nvtlc_tx_err_report_en_0_targeterr_f(v) ((U32(v) & 0x1U) << 24U) +#define nvtlc_tx_err_report_en_0_targeterr__prod_f() (0x1000000U) +#define nvtlc_tx_err_report_en_0_unsupportedrequesterr_f(v)\ + ((U32(v) & 0x1U) << 25U) +#define nvtlc_tx_err_report_en_0_unsupportedrequesterr__prod_f() (0x2000000U) +#define nvtlc_tx_err_log_en_0_r() (0x00000704U) +#define nvtlc_tx_err_log_en_0_txhdrcreditovferr_f(v) ((U32(v) & 0xffU) << 0U) +#define nvtlc_tx_err_log_en_0_txhdrcreditovferr__prod_f() (0xffU) +#define nvtlc_tx_err_log_en_0_txdatacreditovferr_f(v) ((U32(v) & 0xffU) << 8U) +#define nvtlc_tx_err_log_en_0_txdatacreditovferr__prod_f() (0xff00U) +#define nvtlc_tx_err_log_en_0_txdlcreditovferr_f(v) ((U32(v) & 0x1U) << 16U) +#define nvtlc_tx_err_log_en_0_txdlcreditovferr__prod_f() (0x10000U) +#define nvtlc_tx_err_log_en_0_txdlcreditparityerr_f(v) ((U32(v) & 0x1U) << 17U) +#define nvtlc_tx_err_log_en_0_txdlcreditparityerr__prod_f() (0x20000U) +#define nvtlc_tx_err_log_en_0_txramhdrparityerr_f(v) ((U32(v) & 0x1U) << 18U) +#define nvtlc_tx_err_log_en_0_txramhdrparityerr__prod_f() (0x40000U) +#define nvtlc_tx_err_log_en_0_txramdataparityerr_f(v) ((U32(v) & 0x1U) << 19U) +#define nvtlc_tx_err_log_en_0_txramdataparityerr__prod_f() (0x80000U) +#define nvtlc_tx_err_log_en_0_txunsupvcovferr_f(v) ((U32(v) & 0x1U) << 20U) +#define nvtlc_tx_err_log_en_0_txunsupvcovferr__prod_f() (0x100000U) +#define nvtlc_tx_err_log_en_0_txstompdet_f(v) ((U32(v) & 0x1U) << 22U) +#define nvtlc_tx_err_log_en_0_txstompdet__prod_f() (0x400000U) +#define nvtlc_tx_err_log_en_0_txpoisondet_f(v) ((U32(v) & 0x1U) << 23U) +#define nvtlc_tx_err_log_en_0_txpoisondet__prod_f() (0x800000U) +#define nvtlc_tx_err_log_en_0_targeterr_f(v) ((U32(v) & 0x1U) << 24U) +#define nvtlc_tx_err_log_en_0_targeterr__prod_f() (0x1000000U) +#define nvtlc_tx_err_log_en_0_unsupportedrequesterr_f(v)\ + ((U32(v) & 0x1U) << 25U) +#define nvtlc_tx_err_log_en_0_unsupportedrequesterr__prod_f() (0x2000000U) +#define nvtlc_tx_err_contain_en_0_r() (0x0000070cU) +#define nvtlc_tx_err_contain_en_0_txhdrcreditovferr_f(v)\ + ((U32(v) & 0xffU) << 0U) +#define nvtlc_tx_err_contain_en_0_txhdrcreditovferr__prod_f() (0xffU) +#define nvtlc_tx_err_contain_en_0_txdatacreditovferr_f(v)\ + ((U32(v) & 0xffU) << 8U) +#define nvtlc_tx_err_contain_en_0_txdatacreditovferr__prod_f() (0xff00U) +#define nvtlc_tx_err_contain_en_0_txdlcreditovferr_f(v) ((U32(v) & 0x1U) << 16U) +#define nvtlc_tx_err_contain_en_0_txdlcreditovferr__prod_f() (0x10000U) +#define nvtlc_tx_err_contain_en_0_txdlcreditparityerr_f(v)\ + ((U32(v) & 0x1U) << 17U) +#define nvtlc_tx_err_contain_en_0_txdlcreditparityerr__prod_f() (0x20000U) +#define nvtlc_tx_err_contain_en_0_txramhdrparityerr_f(v)\ + ((U32(v) & 0x1U) << 18U) +#define nvtlc_tx_err_contain_en_0_txramhdrparityerr__prod_f() (0x40000U) +#define nvtlc_tx_err_contain_en_0_txunsupvcovferr_f(v) ((U32(v) & 0x1U) << 20U) +#define nvtlc_tx_err_contain_en_0_txunsupvcovferr__prod_f() (0x100000U) +#define nvtlc_tx_err_contain_en_0_txstompdet_f(v) ((U32(v) & 0x1U) << 22U) +#define nvtlc_tx_err_contain_en_0_txstompdet__prod_f() (0x400000U) +#define nvtlc_tx_err_contain_en_0_txpoisondet_f(v) ((U32(v) & 0x1U) << 23U) +#define nvtlc_tx_err_contain_en_0_targeterr_f(v) ((U32(v) & 0x1U) << 24U) +#define nvtlc_tx_err_contain_en_0_unsupportedrequesterr_f(v)\ + ((U32(v) & 0x1U) << 25U) +#define nvtlc_rx_err_report_en_0_r() (0x00000f08U) +#define nvtlc_rx_err_report_en_0_rxdlhdrparityerr_f(v) ((U32(v) & 0x1U) << 0U) +#define nvtlc_rx_err_report_en_0_rxdlhdrparityerr__prod_f() (0x1U) +#define nvtlc_rx_err_report_en_0_rxdldataparityerr_f(v) ((U32(v) & 0x1U) << 1U) +#define nvtlc_rx_err_report_en_0_rxdldataparityerr__prod_f() (0x2U) +#define nvtlc_rx_err_report_en_0_rxdlctrlparityerr_f(v) ((U32(v) & 0x1U) << 2U) +#define nvtlc_rx_err_report_en_0_rxdlctrlparityerr__prod_f() (0x4U) +#define nvtlc_rx_err_report_en_0_rxramdataparityerr_f(v) ((U32(v) & 0x1U) << 3U) +#define nvtlc_rx_err_report_en_0_rxramdataparityerr__prod_f() (0x8U) +#define nvtlc_rx_err_report_en_0_rxramhdrparityerr_f(v) ((U32(v) & 0x1U) << 4U) +#define nvtlc_rx_err_report_en_0_rxramhdrparityerr__prod_f() (0x10U) +#define nvtlc_rx_err_report_en_0_rxinvalidaeerr_f(v) ((U32(v) & 0x1U) << 5U) +#define nvtlc_rx_err_report_en_0_rxinvalidaeerr__prod_f() (0x20U) +#define nvtlc_rx_err_report_en_0_rxinvalidbeerr_f(v) ((U32(v) & 0x1U) << 6U) +#define nvtlc_rx_err_report_en_0_rxinvalidbeerr__prod_f() (0x40U) +#define nvtlc_rx_err_report_en_0_rxinvalidaddralignerr_f(v)\ + ((U32(v) & 0x1U) << 7U) +#define nvtlc_rx_err_report_en_0_rxinvalidaddralignerr__prod_f() (0x80U) +#define nvtlc_rx_err_report_en_0_rxpktlenerr_f(v) ((U32(v) & 0x1U) << 8U) +#define nvtlc_rx_err_report_en_0_rxpktlenerr__prod_f() (0x100U) +#define nvtlc_rx_err_report_en_0_datlengtatomicreqmaxerr_f(v)\ + ((U32(v) & 0x1U) << 17U) +#define nvtlc_rx_err_report_en_0_datlengtatomicreqmaxerr__prod_f() (0x20000U) +#define nvtlc_rx_err_report_en_0_datlengtrmwreqmaxerr_f(v)\ + ((U32(v) & 0x1U) << 18U) +#define nvtlc_rx_err_report_en_0_datlengtrmwreqmaxerr__prod_f() (0x40000U) +#define nvtlc_rx_err_report_en_0_datlenltatrrspminerr_f(v)\ + ((U32(v) & 0x1U) << 19U) +#define nvtlc_rx_err_report_en_0_datlenltatrrspminerr__prod_f() (0x80000U) +#define nvtlc_rx_err_report_en_0_invalidcacheattrpoerr_f(v)\ + ((U32(v) & 0x1U) << 20U) +#define nvtlc_rx_err_report_en_0_invalidcacheattrpoerr__prod_f() (0x100000U) +#define nvtlc_rx_err_report_en_0_invalidcrerr_f(v) ((U32(v) & 0x1U) << 21U) +#define nvtlc_rx_err_report_en_0_invalidcrerr__prod_f() (0x200000U) +#define nvtlc_rx_err_report_en_0_rxrespstatustargeterr_f(v)\ + ((U32(v) & 0x1U) << 22U) +#define nvtlc_rx_err_report_en_0_rxrespstatustargeterr__prod_f() (0x400000U) +#define nvtlc_rx_err_report_en_0_rxrespstatusunsupportedrequesterr_f(v)\ + ((U32(v) & 0x1U) << 23U) +#define nvtlc_rx_err_report_en_0_rxrespstatusunsupportedrequesterr__prod_f()\ + (0x800000U) +#define nvtlc_rx_err_log_en_0_r() (0x00000f04U) +#define nvtlc_rx_err_log_en_0_rxdlhdrparityerr_f(v) ((U32(v) & 0x1U) << 0U) +#define nvtlc_rx_err_log_en_0_rxdlhdrparityerr__prod_f() (0x1U) +#define nvtlc_rx_err_log_en_0_rxdldataparityerr_f(v) ((U32(v) & 0x1U) << 1U) +#define nvtlc_rx_err_log_en_0_rxdldataparityerr__prod_f() (0x2U) +#define nvtlc_rx_err_log_en_0_rxdlctrlparityerr_f(v) ((U32(v) & 0x1U) << 2U) +#define nvtlc_rx_err_log_en_0_rxdlctrlparityerr__prod_f() (0x4U) +#define nvtlc_rx_err_log_en_0_rxramdataparityerr_f(v) ((U32(v) & 0x1U) << 3U) +#define nvtlc_rx_err_log_en_0_rxramdataparityerr__prod_f() (0x8U) +#define nvtlc_rx_err_log_en_0_rxramhdrparityerr_f(v) ((U32(v) & 0x1U) << 4U) +#define nvtlc_rx_err_log_en_0_rxramhdrparityerr__prod_f() (0x10U) +#define nvtlc_rx_err_log_en_0_rxinvalidaeerr_f(v) ((U32(v) & 0x1U) << 5U) +#define nvtlc_rx_err_log_en_0_rxinvalidaeerr__prod_f() (0x20U) +#define nvtlc_rx_err_log_en_0_rxinvalidbeerr_f(v) ((U32(v) & 0x1U) << 6U) +#define nvtlc_rx_err_log_en_0_rxinvalidbeerr__prod_f() (0x40U) +#define nvtlc_rx_err_log_en_0_rxinvalidaddralignerr_f(v) ((U32(v) & 0x1U) << 7U) +#define nvtlc_rx_err_log_en_0_rxinvalidaddralignerr__prod_f() (0x80U) +#define nvtlc_rx_err_log_en_0_rxpktlenerr_f(v) ((U32(v) & 0x1U) << 8U) +#define nvtlc_rx_err_log_en_0_rxpktlenerr__prod_f() (0x100U) +#define nvtlc_rx_err_log_en_0_datlengtatomicreqmaxerr_f(v)\ + ((U32(v) & 0x1U) << 17U) +#define nvtlc_rx_err_log_en_0_datlengtatomicreqmaxerr__prod_f() (0x20000U) +#define nvtlc_rx_err_log_en_0_datlengtrmwreqmaxerr_f(v) ((U32(v) & 0x1U) << 18U) +#define nvtlc_rx_err_log_en_0_datlengtrmwreqmaxerr__prod_f() (0x40000U) +#define nvtlc_rx_err_log_en_0_datlenltatrrspminerr_f(v) ((U32(v) & 0x1U) << 19U) +#define nvtlc_rx_err_log_en_0_datlenltatrrspminerr__prod_f() (0x80000U) +#define nvtlc_rx_err_log_en_0_invalidcacheattrpoerr_f(v)\ + ((U32(v) & 0x1U) << 20U) +#define nvtlc_rx_err_log_en_0_invalidcacheattrpoerr__prod_f() (0x100000U) +#define nvtlc_rx_err_log_en_0_invalidcrerr_f(v) ((U32(v) & 0x1U) << 21U) +#define nvtlc_rx_err_log_en_0_invalidcrerr__prod_f() (0x200000U) +#define nvtlc_rx_err_log_en_0_rxrespstatustargeterr_f(v)\ + ((U32(v) & 0x1U) << 22U) +#define nvtlc_rx_err_log_en_0_rxrespstatustargeterr__prod_f() (0x400000U) +#define nvtlc_rx_err_log_en_0_rxrespstatusunsupportedrequesterr_f(v)\ + ((U32(v) & 0x1U) << 23U) +#define nvtlc_rx_err_log_en_0_rxrespstatusunsupportedrequesterr__prod_f()\ + (0x800000U) +#define nvtlc_rx_err_contain_en_0_r() (0x00000f0cU) +#define nvtlc_rx_err_contain_en_0_rxdlhdrparityerr_f(v) ((U32(v) & 0x1U) << 0U) +#define nvtlc_rx_err_contain_en_0_rxdlhdrparityerr__prod_f() (0x1U) +#define nvtlc_rx_err_contain_en_0_rxdldataparityerr_f(v) ((U32(v) & 0x1U) << 1U) +#define nvtlc_rx_err_contain_en_0_rxdldataparityerr__prod_f() (0x2U) +#define nvtlc_rx_err_contain_en_0_rxdlctrlparityerr_f(v) ((U32(v) & 0x1U) << 2U) +#define nvtlc_rx_err_contain_en_0_rxdlctrlparityerr__prod_f() (0x4U) +#define nvtlc_rx_err_contain_en_0_rxramdataparityerr_f(v)\ + ((U32(v) & 0x1U) << 3U) +#define nvtlc_rx_err_contain_en_0_rxramhdrparityerr_f(v) ((U32(v) & 0x1U) << 4U) +#define nvtlc_rx_err_contain_en_0_rxramhdrparityerr__prod_f() (0x10U) +#define nvtlc_rx_err_contain_en_0_rxinvalidaeerr_f(v) ((U32(v) & 0x1U) << 5U) +#define nvtlc_rx_err_contain_en_0_rxinvalidaeerr__prod_f() (0x20U) +#define nvtlc_rx_err_contain_en_0_rxinvalidbeerr_f(v) ((U32(v) & 0x1U) << 6U) +#define nvtlc_rx_err_contain_en_0_rxinvalidbeerr__prod_f() (0x40U) +#define nvtlc_rx_err_contain_en_0_rxinvalidaddralignerr_f(v)\ + ((U32(v) & 0x1U) << 7U) +#define nvtlc_rx_err_contain_en_0_rxinvalidaddralignerr__prod_f() (0x80U) +#define nvtlc_rx_err_contain_en_0_rxpktlenerr_f(v) ((U32(v) & 0x1U) << 8U) +#define nvtlc_rx_err_contain_en_0_rxpktlenerr__prod_f() (0x100U) +#define nvtlc_rx_err_contain_en_0_datlengtatomicreqmaxerr_f(v)\ + ((U32(v) & 0x1U) << 17U) +#define nvtlc_rx_err_contain_en_0_datlengtatomicreqmaxerr__prod_f() (0x20000U) +#define nvtlc_rx_err_contain_en_0_datlengtrmwreqmaxerr_f(v)\ + ((U32(v) & 0x1U) << 18U) +#define nvtlc_rx_err_contain_en_0_datlengtrmwreqmaxerr__prod_f() (0x40000U) +#define nvtlc_rx_err_contain_en_0_datlenltatrrspminerr_f(v)\ + ((U32(v) & 0x1U) << 19U) +#define nvtlc_rx_err_contain_en_0_datlenltatrrspminerr__prod_f() (0x80000U) +#define nvtlc_rx_err_contain_en_0_invalidcacheattrpoerr_f(v)\ + ((U32(v) & 0x1U) << 20U) +#define nvtlc_rx_err_contain_en_0_invalidcacheattrpoerr__prod_f() (0x100000U) +#define nvtlc_rx_err_contain_en_0_invalidcrerr_f(v) ((U32(v) & 0x1U) << 21U) +#define nvtlc_rx_err_contain_en_0_invalidcrerr__prod_f() (0x200000U) +#define nvtlc_rx_err_contain_en_0_rxrespstatustargeterr_f(v)\ + ((U32(v) & 0x1U) << 22U) +#define nvtlc_rx_err_contain_en_0_rxrespstatustargeterr__prod_f() (0x400000U) +#define nvtlc_rx_err_contain_en_0_rxrespstatusunsupportedrequesterr_f(v)\ + ((U32(v) & 0x1U) << 23U) +#define nvtlc_rx_err_contain_en_0_rxrespstatusunsupportedrequesterr__prod_f()\ + (0x800000U) +#define nvtlc_rx_err_report_en_1_r() (0x00000f20U) +#define nvtlc_rx_err_report_en_1_rxhdrovferr_f(v) ((U32(v) & 0xffU) << 0U) +#define nvtlc_rx_err_report_en_1_rxhdrovferr__prod_f() (0xffU) +#define nvtlc_rx_err_report_en_1_rxdataovferr_f(v) ((U32(v) & 0xffU) << 8U) +#define nvtlc_rx_err_report_en_1_rxdataovferr__prod_f() (0xff00U) +#define nvtlc_rx_err_report_en_1_stompdeterr_f(v) ((U32(v) & 0x1U) << 16U) +#define nvtlc_rx_err_report_en_1_stompdeterr__prod_f() (0x10000U) +#define nvtlc_rx_err_report_en_1_rxpoisonerr_f(v) ((U32(v) & 0x1U) << 17U) +#define nvtlc_rx_err_report_en_1_rxpoisonerr__prod_f() (0x20000U) +#define nvtlc_rx_err_report_en_1_rxunsupvcovferr_f(v) ((U32(v) & 0x1U) << 19U) +#define nvtlc_rx_err_report_en_1_rxunsupvcovferr__prod_f() (0x80000U) +#define nvtlc_rx_err_report_en_1_rxunsupnvlinkcreditrelerr_f(v)\ + ((U32(v) & 0x1U) << 20U) +#define nvtlc_rx_err_report_en_1_rxunsupnvlinkcreditrelerr__prod_f() (0x100000U) +#define nvtlc_rx_err_report_en_1_rxunsupncisoccreditrelerr_f(v)\ + ((U32(v) & 0x1U) << 21U) +#define nvtlc_rx_err_report_en_1_rxunsupncisoccreditrelerr__prod_f() (0x200000U) +#define nvtlc_rx_err_log_en_1_r() (0x00000f1cU) +#define nvtlc_rx_err_log_en_1_rxhdrovferr_f(v) ((U32(v) & 0xffU) << 0U) +#define nvtlc_rx_err_log_en_1_rxhdrovferr__prod_f() (0xffU) +#define nvtlc_rx_err_log_en_1_rxdataovferr_f(v) ((U32(v) & 0xffU) << 8U) +#define nvtlc_rx_err_log_en_1_rxdataovferr__prod_f() (0xff00U) +#define nvtlc_rx_err_log_en_1_stompdeterr_f(v) ((U32(v) & 0x1U) << 16U) +#define nvtlc_rx_err_log_en_1_stompdeterr__prod_f() (0x10000U) +#define nvtlc_rx_err_log_en_1_rxpoisonerr_f(v) ((U32(v) & 0x1U) << 17U) +#define nvtlc_rx_err_log_en_1_rxpoisonerr__prod_f() (0x20000U) +#define nvtlc_rx_err_log_en_1_rxunsupvcovferr_f(v) ((U32(v) & 0x1U) << 19U) +#define nvtlc_rx_err_log_en_1_rxunsupvcovferr__prod_f() (0x80000U) +#define nvtlc_rx_err_log_en_1_rxunsupnvlinkcreditrelerr_f(v)\ + ((U32(v) & 0x1U) << 20U) +#define nvtlc_rx_err_log_en_1_rxunsupnvlinkcreditrelerr__prod_f() (0x100000U) +#define nvtlc_rx_err_log_en_1_rxunsupncisoccreditrelerr_f(v)\ + ((U32(v) & 0x1U) << 21U) +#define nvtlc_rx_err_log_en_1_rxunsupncisoccreditrelerr__prod_f() (0x200000U) +#define nvtlc_rx_err_contain_en_1_r() (0x00000f24U) +#define nvtlc_rx_err_contain_en_1_rxhdrovferr_f(v) ((U32(v) & 0xffU) << 0U) +#define nvtlc_rx_err_contain_en_1_rxhdrovferr__prod_f() (0xffU) +#define nvtlc_rx_err_contain_en_1_rxdataovferr_f(v) ((U32(v) & 0xffU) << 8U) +#define nvtlc_rx_err_contain_en_1_rxdataovferr__prod_f() (0xff00U) +#define nvtlc_rx_err_contain_en_1_stompdeterr_f(v) ((U32(v) & 0x1U) << 16U) +#define nvtlc_rx_err_contain_en_1_stompdeterr__prod_f() (0x10000U) +#define nvtlc_rx_err_contain_en_1_rxpoisonerr_f(v) ((U32(v) & 0x1U) << 17U) +#define nvtlc_rx_err_contain_en_1_rxpoisonerr__prod_f() (0x20000U) +#define nvtlc_rx_err_contain_en_1_rxunsupvcovferr_f(v) ((U32(v) & 0x1U) << 19U) +#define nvtlc_rx_err_contain_en_1_rxunsupvcovferr__prod_f() (0x80000U) +#define nvtlc_rx_err_contain_en_1_rxunsupnvlinkcreditrelerr_f(v)\ + ((U32(v) & 0x1U) << 20U) +#define nvtlc_rx_err_contain_en_1_rxunsupnvlinkcreditrelerr__prod_f()\ + (0x100000U) +#define nvtlc_rx_err_contain_en_1_rxunsupncisoccreditrelerr_f(v)\ + ((U32(v) & 0x1U) << 21U) +#define nvtlc_rx_err_contain_en_1_rxunsupncisoccreditrelerr__prod_f()\ + (0x200000U) #endif diff --git a/drivers/gpu/nvgpu/include/nvgpu/nvlink.h b/drivers/gpu/nvgpu/include/nvgpu/nvlink.h index bf6e3b99e..ab639fe9d 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/nvlink.h +++ b/drivers/gpu/nvgpu/include/nvgpu/nvlink.h @@ -182,7 +182,6 @@ int nvgpu_nvlink_interface_init(struct gk20a *g); int nvgpu_nvlink_interface_disable(struct gk20a *g); int nvgpu_nvlink_dev_shutdown(struct gk20a *g); int nvgpu_nvlink_enumerate(struct gk20a *g); -int nvgpu_nvlink_train(struct gk20a *g, u32 link_id, bool from_off); int nvgpu_nvlink_remove(struct gk20a *g); void nvgpu_mss_nvlink_init_credits(struct gk20a *g); diff --git a/drivers/gpu/nvgpu/os/posix/posix-nvlink.c b/drivers/gpu/nvgpu/os/posix/posix-nvlink.c index bc6e20040..f3fa7f1ba 100644 --- a/drivers/gpu/nvgpu/os/posix/posix-nvlink.c +++ b/drivers/gpu/nvgpu/os/posix/posix-nvlink.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -27,11 +27,6 @@ struct gk20a; struct nvgpu_firmware; -int nvgpu_nvlink_train(struct gk20a *g, u32 link_id, bool from_off) -{ - return -ENOSYS; -} - int nvgpu_nvlink_enumerate(struct gk20a *g) { return -ENOSYS;