gpu: nvgpu: Add nvlink "minion" unit

Move the code involved in dealing with minion into a separate unit
called "nvlink_minion". This unit includes minion HW access, ucode
handling, exposing state of minion and also dealing with minion
interrupts. The interfaces to this unit are partially exposed using
g->ops.nvlink.minion ops and rest are part of nvlink_minion.h public
header.

JIRA NVGPU-2860

Change-Id: Iea9288ea5f0b26688540b1eb8ab64afd756941a4
Signed-off-by: Tejal Kudav <tkudav@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2030103
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Tejal Kudav
2019-02-28 21:26:14 +05:30
committed by mobile promotions
parent a3ba265dd6
commit a6f0ce7971
20 changed files with 835 additions and 508 deletions

View File

@@ -384,6 +384,9 @@ nvgpu-y += \
common/nvlink/init/device_reginit_gv100.o \
common/nvlink/init/device_reginit.o \
common/nvlink/intr_and_err_handling_gv100.o \
common/nvlink/minion.o \
hal/nvlink/minion_gv100.o \
hal/nvlink/minion_tu104.o \
common/nvlink/nvlink.o \
common/nvlink/nvlink_gv100.o \
common/nvlink/nvlink_tu104.o \

View File

@@ -288,6 +288,9 @@ srcs += common/sim.c \
common/nvlink/init/device_reginit.c \
common/nvlink/init/device_reginit_gv100.c \
common/nvlink/intr_and_err_handling_gv100.c \
common/nvlink/minion.c \
hal/nvlink/minion_gv100.c \
hal/nvlink/minion_tu104.c \
common/nvlink/nvlink_gv100.c \
common/nvlink/nvlink_tu104.c \
common/nvlink/nvlink.c \

View File

@@ -55,7 +55,7 @@ int gv100_falcon_hal_sw_init(struct nvgpu_falcon *flcn)
switch (flcn->flcn_id) {
case FALCON_ID_MINION:
flcn->flcn_base = g->ops.nvlink.falcon_base_addr(g);
flcn->flcn_base = g->ops.nvlink.minion.base_addr(g);
flcn->is_falcon_supported = true;
flcn->is_interrupt_enabled = true;
break;

View File

@@ -28,7 +28,6 @@
#include <nvgpu/hw/gv100/hw_nvlipt_gv100.h>
#include <nvgpu/hw/gv100/hw_ioctrl_gv100.h>
#include <nvgpu/hw/gv100/hw_minion_gv100.h>
#include <nvgpu/hw/gv100/hw_nvl_gv100.h>
#include <nvgpu/hw/gv100/hw_ioctrlmif_gv100.h>
#include <nvgpu/hw/gv100/hw_nvtlc_gv100.h>
@@ -59,213 +58,6 @@
nvlipt_err_uc_status_link0_stompedpacketreceived_f(1) | \
nvlipt_err_uc_status_link0_unsupportedrequest_f(1) | \
nvlipt_err_uc_status_link0_ucinternal_f(1))
#define MINION_FALCON_INTR_MASK (minion_falcon_irqmset_wdtmr_set_f() | \
minion_falcon_irqmset_halt_set_f() | \
minion_falcon_irqmset_exterr_set_f()| \
minion_falcon_irqmset_swgen0_set_f()| \
minion_falcon_irqmset_swgen1_set_f())
#define MINION_FALCON_INTR_DEST ( \
minion_falcon_irqdest_host_wdtmr_host_f() | \
minion_falcon_irqdest_host_halt_host_f() | \
minion_falcon_irqdest_host_exterr_host_f() | \
minion_falcon_irqdest_host_swgen0_host_f() | \
minion_falcon_irqdest_host_swgen1_host_f() | \
minion_falcon_irqdest_target_wdtmr_host_normal_f() | \
minion_falcon_irqdest_target_halt_host_normal_f() | \
minion_falcon_irqdest_target_exterr_host_normal_f() | \
minion_falcon_irqdest_target_swgen0_host_normal_f() | \
minion_falcon_irqdest_target_swgen1_host_normal_f())
/*
* Clear minion Interrupts
*/
void gv100_nvlink_minion_clear_interrupts(struct gk20a *g)
{
nvgpu_falcon_set_irq(g->minion_flcn, true, MINION_FALCON_INTR_MASK,
MINION_FALCON_INTR_DEST);
}
/*
* Initialization of link specific interrupts
*/
static void gv100_nvlink_minion_link_intr_enable(struct gk20a *g, u32 link_id,
bool enable)
{
u32 intr, links;
/* Only stall interrupts for now */
intr = MINION_REG_RD32(g, minion_minion_intr_stall_en_r());
links = minion_minion_intr_stall_en_link_v(intr);
if (enable) {
links |= BIT32(link_id);
} else {
links &= ~BIT32(link_id);
}
intr = set_field(intr, minion_minion_intr_stall_en_link_m(),
minion_minion_intr_stall_en_link_f(links));
MINION_REG_WR32(g, minion_minion_intr_stall_en_r(), intr);
}
/*
* Initialization of falcon interrupts
*/
static void gv100_nvlink_minion_falcon_intr_enable(struct gk20a *g, bool enable)
{
u32 reg;
reg = MINION_REG_RD32(g, minion_minion_intr_stall_en_r());
if (enable) {
reg = set_field(reg, minion_minion_intr_stall_en_fatal_m(),
minion_minion_intr_stall_en_fatal_enable_f());
reg = set_field(reg, minion_minion_intr_stall_en_nonfatal_m(),
minion_minion_intr_stall_en_nonfatal_enable_f());
reg = set_field(reg, minion_minion_intr_stall_en_falcon_stall_m(),
minion_minion_intr_stall_en_falcon_stall_enable_f());
reg = set_field(reg, minion_minion_intr_stall_en_falcon_nostall_m(),
minion_minion_intr_stall_en_falcon_nostall_enable_f());
} else {
reg = set_field(reg, minion_minion_intr_stall_en_fatal_m(),
minion_minion_intr_stall_en_fatal_disable_f());
reg = set_field(reg, minion_minion_intr_stall_en_nonfatal_m(),
minion_minion_intr_stall_en_nonfatal_disable_f());
reg = set_field(reg, minion_minion_intr_stall_en_falcon_stall_m(),
minion_minion_intr_stall_en_falcon_stall_disable_f());
reg = set_field(reg, minion_minion_intr_stall_en_falcon_nostall_m(),
minion_minion_intr_stall_en_falcon_nostall_disable_f());
}
MINION_REG_WR32(g, minion_minion_intr_stall_en_r(), reg);
}
/*
* Initialize minion IP interrupts
*/
void gv100_nvlink_init_minion_intr(struct gk20a *g)
{
/* Disable non-stall tree */
MINION_REG_WR32(g, minion_minion_intr_nonstall_en_r(), 0x0);
gv100_nvlink_minion_falcon_intr_enable(g, true);
}
/*
* Falcon specific ISR handling
*/
void gv100_nvlink_minion_falcon_isr(struct gk20a *g)
{
u32 intr;
intr = MINION_REG_RD32(g, minion_falcon_irqstat_r()) &
MINION_REG_RD32(g, minion_falcon_irqmask_r());
if (intr == 0U) {
return;
}
if ((intr & minion_falcon_irqstat_exterr_true_f()) != 0U) {
nvgpu_err(g, "FALCON EXT ADDR: 0x%x 0x%x 0x%x",
MINION_REG_RD32(g, minion_falcon_csberrstat_r()),
MINION_REG_RD32(g, minion_falcon_csberr_info_r()),
MINION_REG_RD32(g, minion_falcon_csberr_addr_r()));
}
MINION_REG_WR32(g, minion_falcon_irqsclr_r(), intr);
nvgpu_err(g, "FATAL minion IRQ: 0x%08x", intr);
return;
}
/*
* Link Specific ISR
*/
static void gv100_nvlink_minion_link_isr(struct gk20a *g, u32 link_id)
{
u32 intr, code;
bool fatal = false;
intr = MINION_REG_RD32(g, minion_nvlink_link_intr_r(link_id));
code = minion_nvlink_link_intr_code_v(intr);
if (code == minion_nvlink_link_intr_code_swreq_v()) {
nvgpu_err(g, " Intr SWREQ, link: %d subcode: %x",
link_id, minion_nvlink_link_intr_subcode_v(intr));
} else if (code == minion_nvlink_link_intr_code_pmdisabled_v()) {
nvgpu_err(g, " Fatal Intr PMDISABLED, link: %d subcode: %x",
link_id, minion_nvlink_link_intr_subcode_v(intr));
fatal = true;
} else if (code == minion_nvlink_link_intr_code_na_v()) {
nvgpu_err(g, " Fatal Intr NA, link: %d subcode: %x",
link_id, minion_nvlink_link_intr_subcode_v(intr));
fatal = true;
} else if (code == minion_nvlink_link_intr_code_dlreq_v()) {
nvgpu_err(g, " Fatal Intr DLREQ, link: %d subcode: %x",
link_id, minion_nvlink_link_intr_subcode_v(intr));
fatal = true;
} else {
nvgpu_err(g, " Fatal Intr UNKN:%x, link: %d subcode: %x", code,
link_id, minion_nvlink_link_intr_subcode_v(intr));
fatal = true;
}
if (fatal) {
gv100_nvlink_minion_link_intr_enable(g, link_id, false);
}
intr = set_field(intr, minion_nvlink_link_intr_state_m(),
minion_nvlink_link_intr_state_f(1));
MINION_REG_WR32(g, minion_nvlink_link_intr_r(link_id), intr);
return;
}
/*
* Global minion routine to service interrupts
*/
static void gv100_nvlink_minion_isr(struct gk20a *g) {
u32 intr, link_id;
unsigned long links;
unsigned long bit;
intr = MINION_REG_RD32(g, minion_minion_intr_r()) &
MINION_REG_RD32(g, minion_minion_intr_stall_en_r());
if ((minion_minion_intr_falcon_stall_v(intr) != 0U) ||
(minion_minion_intr_falcon_nostall_v(intr) != 0U)) {
gv100_nvlink_minion_falcon_isr(g);
}
if (minion_minion_intr_fatal_v(intr) != 0U) {
gv100_nvlink_minion_falcon_intr_enable(g, false);
MINION_REG_WR32(g, minion_minion_intr_r(),
minion_minion_intr_fatal_f(1));
}
if (minion_minion_intr_nonfatal_v(intr) != 0U) {
MINION_REG_WR32(g, minion_minion_intr_r(),
minion_minion_intr_nonfatal_f(1));
}
links = minion_minion_intr_link_v(intr) &
(unsigned long) g->nvlink.enabled_links;
if (links != 0UL) {
for_each_set_bit(bit, &links, NVLINK_MAX_LINKS_SW) {
link_id = (u32)bit;
gv100_nvlink_minion_link_isr(g, link_id);
}
}
return;
}
/*
* Init TLC per link interrupts
*/
@@ -678,7 +470,7 @@ void gv100_nvlink_common_intr_enable(struct gk20a *g, unsigned long mask)
*/
void gv100_nvlink_enable_link_intr(struct gk20a *g, u32 link_id, bool enable)
{
gv100_nvlink_minion_link_intr_enable(g, link_id, enable);
g->ops.nvlink.minion.enable_link_intr(g, link_id, enable);
gv100_nvlink_dlpl_intr_enable(g, link_id, enable);
gv100_nvlink_tlc_intr_enable(g, link_id, enable);
gv100_nvlink_mif_intr_enable(g, link_id, enable);
@@ -699,7 +491,7 @@ void gv100_nvlink_isr(struct gk20a *g)
links &= g->nvlink.enabled_links;
/* As per ARCH minion must be serviced first */
gv100_nvlink_minion_isr(g);
g->ops.nvlink.minion.isr(g);
for_each_set_bit(bit, &links, NVLINK_MAX_LINKS_SW) {
link_id = (u32)bit;

View File

@@ -26,9 +26,6 @@
#include <nvgpu/types.h>
struct gk20a;
void gv100_nvlink_minion_clear_interrupts(struct gk20a *g);
void gv100_nvlink_init_minion_intr(struct gk20a *g);
void gv100_nvlink_minion_falcon_isr(struct gk20a *g);
void gv100_nvlink_common_intr_enable(struct gk20a *g, unsigned long mask);
void gv100_nvlink_init_nvlipt_intr(struct gk20a *g, u32 link_id);
void gv100_nvlink_enable_link_intr(struct gk20a *g, u32 link_id, bool enable);

View File

@@ -0,0 +1,131 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvgpu/gk20a.h>
#include <nvgpu/firmware.h>
#include <nvgpu/timers.h>
#include <nvgpu/nvlink_minion.h>
#ifdef CONFIG_TEGRA_NVLINK
/* Extract a WORD from the MINION ucode */
u32 nvgpu_nvlink_minion_extract_word(struct nvgpu_firmware *fw, u32 idx)
{
u32 out_data = 0U;
u8 byte = 0U;
u32 i = 0U;
for (i = 0U; i < 4U; i++) {
byte = fw->data[idx + i];
out_data |= ((u32)byte) << (8U * i);
}
return out_data;
}
/*
* Load minion FW and set up bootstrap
*/
int nvgpu_nvlink_minion_load(struct gk20a *g)
{
int err = 0;
struct nvgpu_firmware *nvgpu_minion_fw = NULL;
struct nvgpu_timeout timeout;
u32 delay = GR_IDLE_CHECK_DEFAULT;
bool boot_cmplte;
nvgpu_log_fn(g, " ");
if (g->ops.nvlink.minion.is_running(g)) {
return 0;
}
/* get mem unlock ucode binary */
nvgpu_minion_fw = nvgpu_request_firmware(g, "minion.bin", 0);
if (nvgpu_minion_fw == NULL) {
nvgpu_err(g, "minion ucode get fail");
err = -ENOENT;
goto exit;
}
/* Minion reset */
err = nvgpu_falcon_reset(g->minion_flcn);
if (err != 0) {
nvgpu_err(g, "Minion reset failed");
goto exit;
}
/* Clear interrupts */
g->ops.nvlink.minion.clear_intr(g);
err = nvgpu_nvlink_minion_load_ucode(g, nvgpu_minion_fw);
if (err != 0) {
goto exit;
}
/* set BOOTVEC to start of non-secure code */
err = nvgpu_falcon_bootstrap(g->minion_flcn, 0x0);
if (err != 0) {
nvgpu_err(g, "Minion bootstrap failed");
goto exit;
}
err = nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g),
NVGPU_TIMER_CPU_TIMER);
if (err != 0) {
nvgpu_err(g, "Minion boot timeout init failed");
goto exit;
}
do {
err = g->ops.nvlink.minion.is_boot_complete(g, &boot_cmplte);
if (err != 0) {
goto exit;
}
if (boot_cmplte) {
nvgpu_log(g, gpu_dbg_nvlink,"MINION boot successful");
break;
}
nvgpu_usleep_range(delay, delay * 2U);
delay = min_t(unsigned int,
delay << 1, GR_IDLE_CHECK_MAX);
} while (nvgpu_timeout_expired_msg(&timeout,
"minion boot timeout") == 0);
/* Service interrupts */
g->ops.nvlink.minion.falcon_isr(g);
if (nvgpu_timeout_peek_expired(&timeout) != 0) {
err = -ETIMEDOUT;
goto exit;
}
g->ops.nvlink.minion.init_intr(g);
return err;
exit:
nvgpu_nvlink_free_minion_used_mem(g, nvgpu_minion_fw);
return err;
}
#endif

View File

@@ -24,7 +24,6 @@
#include <nvgpu/nvlink.h>
#include <nvgpu/nvlink_probe.h>
#include <nvgpu/enabled.h>
#include <nvgpu/firmware.h>
#ifdef CONFIG_TEGRA_NVLINK
/*
@@ -200,21 +199,6 @@ int nvgpu_nvlink_set_sublink_mode(struct gk20a *g,
return g->ops.nvlink.set_sublink_mode(g, link_id, is_rx_sublink, mode);
}
/* Extract a WORD from the MINION ucode */
u32 nvgpu_nvlink_minion_extract_word(struct nvgpu_firmware *fw, u32 idx)
{
u32 out_data = 0U;
u8 byte = 0U;
u32 i = 0U;
for (i = 0U; i < 4U; i++) {
byte = fw->data[idx + i];
out_data |= ((u32)byte) << (8U * i);
}
return out_data;
}
#endif
int nvgpu_nvlink_remove(struct gk20a *g)

View File

@@ -24,7 +24,6 @@
#include <nvgpu/nvgpu_common.h>
#include <nvgpu/bios.h>
#include <nvgpu/firmware.h>
#include <nvgpu/bitops.h>
#include <nvgpu/nvlink.h>
#include <nvgpu/enabled.h>
@@ -33,11 +32,11 @@
#include <nvgpu/timers.h>
#include <nvgpu/gk20a.h>
#include <nvgpu/top.h>
#include <nvgpu/nvlink_minion.h>
#include "nvlink_gv100.h"
#include <nvgpu/hw/gv100/hw_nvlinkip_discovery_gv100.h>
#include <nvgpu/hw/gv100/hw_ioctrl_gv100.h>
#include <nvgpu/hw/gv100/hw_minion_gv100.h>
#include <nvgpu/hw/gv100/hw_nvl_gv100.h>
#include <nvgpu/hw/gv100/hw_trim_gv100.h>
@@ -94,212 +93,6 @@ static int gv100_nvlink_rxcal_en(struct gk20a *g, unsigned long mask);
*******************************************************************************
*/
/*
*-----------------------------------------------------------------------------*
* MINION API
*-----------------------------------------------------------------------------*
*/
/*
* Check if minion is up
*/
static bool gv100_nvlink_minion_is_running(struct gk20a *g)
{
/* if minion is booted and not halted, it is running */
if (((MINION_REG_RD32(g, minion_minion_status_r()) &
minion_minion_status_status_f(1)) != 0U) &&
((minion_falcon_irqstat_halt_v(
MINION_REG_RD32(g, minion_falcon_irqstat_r()))) == 0U)) {
return true;
}
return false;
}
/*
* Load minion FW and set up bootstrap
*/
static int gv100_nvlink_minion_load(struct gk20a *g)
{
int err = 0;
struct nvgpu_firmware *nvgpu_minion_fw = NULL;
struct nvgpu_timeout timeout;
u32 delay = GR_IDLE_CHECK_DEFAULT;
u32 reg;
nvgpu_log_fn(g, " ");
if (gv100_nvlink_minion_is_running(g)) {
return 0;
}
/* get mem unlock ucode binary */
nvgpu_minion_fw = nvgpu_request_firmware(g, "minion.bin", 0);
if (nvgpu_minion_fw == NULL) {
nvgpu_err(g, "minion ucode get fail");
err = -ENOENT;
goto exit;
}
/* Minion reset */
err = nvgpu_falcon_reset(g->minion_flcn);
if (err != 0) {
nvgpu_err(g, "Minion reset failed");
goto exit;
}
/* Clear interrupts */
g->ops.nvlink.intr.minion_clear_interrupts(g);
err = nvgpu_nvlink_minion_load_ucode(g, nvgpu_minion_fw);
if (err != 0) {
goto exit;
}
/* set BOOTVEC to start of non-secure code */
err = nvgpu_falcon_bootstrap(g->minion_flcn, 0x0);
if (err != 0) {
nvgpu_err(g, "Minion bootstrap failed");
goto exit;
}
err = nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g),
NVGPU_TIMER_CPU_TIMER);
if (err != 0) {
nvgpu_err(g, "Minion boot timeout init failed");
goto exit;
}
do {
reg = MINION_REG_RD32(g, minion_minion_status_r());
if (minion_minion_status_status_v(reg) != 0U) {
/* Minion sequence completed, check status */
if (minion_minion_status_status_v(reg) !=
minion_minion_status_status_boot_v()) {
nvgpu_err(g, "MINION init sequence failed: 0x%x",
minion_minion_status_status_v(reg));
err = -EINVAL;
goto exit;
}
nvgpu_log(g, gpu_dbg_nvlink,
"MINION boot successful: 0x%x", reg);
err = 0;
break;
}
nvgpu_usleep_range(delay, delay * 2U);
delay = min_t(unsigned int,
delay << 1, GR_IDLE_CHECK_MAX);
} while (nvgpu_timeout_expired_msg(&timeout,
"minion boot timeout") == 0);
/* Service interrupts */
g->ops.nvlink.intr.minion_falcon_isr(g);
if (nvgpu_timeout_peek_expired(&timeout) != 0) {
err = -ETIMEDOUT;
goto exit;
}
g->ops.nvlink.intr.init_minion_intr(g);
return err;
exit:
nvgpu_nvlink_free_minion_used_mem(g, nvgpu_minion_fw);
return err;
}
/*
* Check if MINION command is complete
*/
static int gv100_nvlink_minion_command_complete(struct gk20a *g, u32 link_id)
{
u32 reg;
struct nvgpu_timeout timeout;
u32 delay = GR_IDLE_CHECK_DEFAULT;
int err = 0;
err = nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g),
NVGPU_TIMER_CPU_TIMER);
if (err != 0) {
nvgpu_err(g, "Minion cmd complete timeout init failed");
return err;
}
do {
reg = MINION_REG_RD32(g, minion_nvlink_dl_cmd_r(link_id));
if (minion_nvlink_dl_cmd_ready_v(reg) == 1U) {
/* Command completed, check sucess */
if (minion_nvlink_dl_cmd_fault_v(reg) ==
minion_nvlink_dl_cmd_fault_fault_clear_v()) {
nvgpu_err(g, "minion cmd(%d) error: 0x%x",
link_id, reg);
reg = minion_nvlink_dl_cmd_fault_f(1);
MINION_REG_WR32(g,
minion_nvlink_dl_cmd_r(link_id), reg);
return -EINVAL;
}
/* Commnand success */
break;
}
nvgpu_usleep_range(delay, delay * 2U);
delay = min_t(unsigned int,
delay << 1, GR_IDLE_CHECK_MAX);
} while (nvgpu_timeout_expired_msg(&timeout,
"minion cmd timeout") == 0);
if (nvgpu_timeout_peek_expired(&timeout) != 0) {
return -ETIMEDOUT;
}
nvgpu_log(g, gpu_dbg_nvlink, "minion cmd Complete");
return err;
}
/*
* Send Minion command (can be async)
*/
int gv100_nvlink_minion_send_command(struct gk20a *g, u32 link_id,
u32 command, u32 scratch_0, bool sync)
{
int err = 0;
/* Check last command succeded */
err = gv100_nvlink_minion_command_complete(g, link_id);
if (err != 0) {
return -EINVAL;
}
nvgpu_log(g, gpu_dbg_nvlink,
"sending MINION command 0x%x to link %d", command, link_id);
if (command == minion_nvlink_dl_cmd_command_configeom_v()) {
MINION_REG_WR32(g, minion_misc_0_r(),
minion_misc_0_scratch_swrw_0_f(scratch_0));
}
MINION_REG_WR32(g, minion_nvlink_dl_cmd_r(link_id),
minion_nvlink_dl_cmd_command_f(command) |
minion_nvlink_dl_cmd_fault_f(1));
if (sync) {
err = gv100_nvlink_minion_command_complete(g, link_id);
}
return err;
}
/* MINION API COMMANDS */
/*
* Init UPHY
*/
@@ -341,8 +134,8 @@ static int gv100_nvlink_minion_init_uphy(struct gk20a *g, unsigned long mask,
/* Check if INIT PLL is done on link */
if ((BIT(master_pll) & g->nvlink.init_pll_done) == 0U) {
err = gv100_nvlink_minion_send_command(g, master_pll,
g->nvlink.initpll_cmd, 0, sync);
err = g->ops.nvlink.minion.send_dlcmd(g, master_pll,
g->nvlink.initpll_cmd, sync);
if (err != 0) {
nvgpu_err(g, " Error sending INITPLL to minion");
return err;
@@ -361,8 +154,8 @@ static int gv100_nvlink_minion_init_uphy(struct gk20a *g, unsigned long mask,
/* INITPHY commands */
for_each_set_bit(bit, &mask, NVLINK_MAX_LINKS_SW) {
link_id = (u32)bit;
err = gv100_nvlink_minion_send_command(g, link_id,
minion_nvlink_dl_cmd_command_initphy_v(), 0, sync);
err = g->ops.nvlink.minion.send_dlcmd(g, link_id,
NVGPU_NVLINK_MINION_DLCMD_INITPHY, sync);
if (err != 0) {
nvgpu_err(g, "Error on INITPHY minion DL command %u",
link_id);
@@ -392,8 +185,8 @@ static int gv100_nvlink_minion_configure_ac_coupling(struct gk20a *g,
DLPL_REG_WR32(g, link_id, nvl_link_config_r(), temp);
err = gv100_nvlink_minion_send_command(g, link_id,
minion_nvlink_dl_cmd_command_setacmode_v(), 0, sync);
err = g->ops.nvlink.minion.send_dlcmd(g, link_id,
NVGPU_NVLINK_MINION_DLCMD_SETACMODE, sync);
if (err != 0) {
return err;
@@ -415,9 +208,8 @@ int gv100_nvlink_minion_data_ready_en(struct gk20a *g,
for_each_set_bit(bit, &link_mask, NVLINK_MAX_LINKS_SW) {
link_id = (u32)bit;
ret = gv100_nvlink_minion_send_command(g, link_id,
minion_nvlink_dl_cmd_command_initlaneenable_v(), 0,
sync);
ret = g->ops.nvlink.minion.send_dlcmd(g, link_id,
NVGPU_NVLINK_MINION_DLCMD_INITLANEENABLE, sync);
if (ret != 0) {
nvgpu_err(g, "Failed initlaneenable on link %u",
link_id);
@@ -427,8 +219,8 @@ int gv100_nvlink_minion_data_ready_en(struct gk20a *g,
for_each_set_bit(bit, &link_mask, NVLINK_MAX_LINKS_SW) {
link_id = (u32)bit;
ret = gv100_nvlink_minion_send_command(g, link_id,
minion_nvlink_dl_cmd_command_initdlpl_v(), 0, sync);
ret = g->ops.nvlink.minion.send_dlcmd(g, link_id,
NVGPU_NVLINK_MINION_DLCMD_INITDLPL, sync);
if (ret != 0) {
nvgpu_err(g, "Failed initdlpl on link %u", link_id);
return ret;
@@ -445,8 +237,8 @@ static int gv100_nvlink_minion_lane_disable(struct gk20a *g, u32 link_id,
{
int err = 0;
err = gv100_nvlink_minion_send_command(g, link_id,
minion_nvlink_dl_cmd_command_lanedisable_v(), 0, sync);
err = g->ops.nvlink.minion.send_dlcmd(g, link_id,
NVGPU_NVLINK_MINION_DLCMD_LANEDISABLE, sync);
if (err != 0) {
nvgpu_err(g, " failed to disable lane on %d", link_id);
@@ -463,8 +255,8 @@ static int gv100_nvlink_minion_lane_shutdown(struct gk20a *g, u32 link_id,
{
int err = 0;
err = gv100_nvlink_minion_send_command(g, link_id,
minion_nvlink_dl_cmd_command_laneshutdown_v(), 0, sync);
err = g->ops.nvlink.minion.send_dlcmd(g, link_id,
NVGPU_NVLINK_MINION_DLCMD_LANESHUTDOWN, sync);
if (err != 0) {
nvgpu_err(g, " failed to shutdown lane on %d", link_id);
@@ -492,7 +284,7 @@ static int gv100_nvlink_state_load_hal(struct gk20a *g)
unsigned long discovered = g->nvlink.discovered_links;
g->ops.nvlink.intr.common_intr_enable(g, discovered);
return gv100_nvlink_minion_load(g);
return nvgpu_nvlink_minion_load(g);
}
#define TRIM_SYS_NVLINK_CTRL(i) (trim_sys_nvlink0_ctrl_r() + 16U*(i))
@@ -1856,12 +1648,8 @@ int gv100_nvlink_speed_config(struct gk20a *g)
{
g->nvlink.speed = nvgpu_nvlink_speed_20G;
g->nvlink.initpll_ordinal = INITPLL_1;
g->nvlink.initpll_cmd = minion_nvlink_dl_cmd_command_initpll_1_v();
g->nvlink.initpll_cmd = NVGPU_NVLINK_MINION_DLCMD_INITPLL_1;
return 0;
}
u32 gv100_nvlink_falcon_base_addr(struct gk20a *g)
{
return g->nvlink.minion_base;
}
#endif /* CONFIG_TEGRA_NVLINK */

View File

@@ -30,8 +30,6 @@ struct gk20a;
int gv100_nvlink_discover_ioctrl(struct gk20a *g);
int gv100_nvlink_discover_link(struct gk20a *g);
int gv100_nvlink_init(struct gk20a *g);
int gv100_nvlink_minion_send_command(struct gk20a *g, u32 link_id, u32 command,
u32 scratch_0, bool sync);
int gv100_nvlink_setup_pll(struct gk20a *g, unsigned long link_mask);
int gv100_nvlink_minion_data_ready_en(struct gk20a *g,
unsigned long link_mask, bool sync);
@@ -55,5 +53,4 @@ int gv100_nvlink_interface_disable(struct gk20a *g);
int gv100_nvlink_shutdown(struct gk20a *g);
int gv100_nvlink_early_init(struct gk20a *g);
int gv100_nvlink_speed_config(struct gk20a *g);
u32 gv100_nvlink_falcon_base_addr(struct gk20a *g);
#endif

View File

@@ -30,11 +30,11 @@
#include <nvgpu/timers.h>
#include <nvgpu/gk20a.h>
#include <nvgpu/bios.h>
#include <nvgpu/nvlink_minion.h>
#include "nvlink_gv100.h"
#include "nvlink_tu104.h"
#include <nvgpu/hw/tu104/hw_minion_tu104.h>
#include <nvgpu/hw/tu104/hw_nvl_tu104.h>
int tu104_nvlink_rxdet(struct gk20a *g, u32 link_id)
@@ -43,16 +43,16 @@ int tu104_nvlink_rxdet(struct gk20a *g, u32 link_id)
u32 reg;
struct nvgpu_timeout timeout;
ret = gv100_nvlink_minion_send_command(g, link_id,
0x00000005U, 0, true);
ret = g->ops.nvlink.minion.send_dlcmd(g, link_id,
NVGPU_NVLINK_MINION_DLCMD_INITRXTERM, true);
if (ret != 0) {
nvgpu_err(g, "Error during INITRXTERM minion DLCMD on link %u",
link_id);
return ret;
}
ret = gv100_nvlink_minion_send_command(g, link_id,
minion_nvlink_dl_cmd_command_turing_rxdet_v(), 0, true);
ret = g->ops.nvlink.minion.send_dlcmd(g, link_id,
NVGPU_NVLINK_MINION_DLCMD_TURING_RXDET, true);
if (ret != 0) {
nvgpu_err(g, "Error during RXDET minion DLCMD on link %u",
link_id);
@@ -98,9 +98,8 @@ int tu104_nvlink_setup_pll(struct gk20a *g, unsigned long link_mask)
for_each_set_bit(bit, &link_mask, NVLINK_MAX_LINKS_SW) {
link_id = (u32)bit;
ret = gv100_nvlink_minion_send_command(g, link_id,
minion_nvlink_dl_cmd_command_txclkswitch_pll_v(),
0, true);
ret = g->ops.nvlink.minion.send_dlcmd(g, link_id,
NVGPU_NVLINK_MINION_DLCMD_TXCLKSWITCH_PLL, true);
if (ret != 0) {
nvgpu_err(g, "Error: TXCLKSWITCH_PLL dlcmd on link %u",
link_id);
@@ -212,9 +211,8 @@ int tu104_nvlink_minion_data_ready_en(struct gk20a *g,
*/
for_each_set_bit(bit, &link_mask, NVLINK_MAX_LINKS_SW) {
link_id = (u32)bit;
ret = gv100_nvlink_minion_send_command(g, link_id,
minion_nvlink_dl_cmd_command_initdlpl_v(), 0,
sync);
ret = g->ops.nvlink.minion.send_dlcmd(g, link_id,
NVGPU_NVLINK_MINION_DLCMD_INITDLPL, sync);
if (ret != 0) {
nvgpu_err(g, "Minion initdlpl failed on link %u",
link_id);
@@ -223,9 +221,9 @@ int tu104_nvlink_minion_data_ready_en(struct gk20a *g,
}
for_each_set_bit(bit, &link_mask, NVLINK_MAX_LINKS_SW) {
link_id = (u32)bit;
ret = gv100_nvlink_minion_send_command(g, link_id,
minion_nvlink_dl_cmd_command_turing_initdlpl_to_chipa_v(),
0, sync);
ret = g->ops.nvlink.minion.send_dlcmd(g, link_id,
NVGPU_NVLINK_MINION_DLCMD_TURING_INITDLPL_TO_CHIPA,
sync);
if (ret != 0) {
nvgpu_err(g, "Minion initdlpl_to_chipA failed on link\
%u", link_id);
@@ -234,9 +232,8 @@ int tu104_nvlink_minion_data_ready_en(struct gk20a *g,
}
for_each_set_bit(bit, &link_mask, NVLINK_MAX_LINKS_SW) {
link_id = (u32)bit;
ret = gv100_nvlink_minion_send_command(g, link_id,
minion_nvlink_dl_cmd_command_inittl_v(), 0,
sync);
ret = g->ops.nvlink.minion.send_dlcmd(g, link_id,
NVGPU_NVLINK_MINION_DLCMD_INITTL, sync);
if (ret != 0) {
nvgpu_err(g, "Minion inittl failed on link %u",
link_id);
@@ -245,9 +242,8 @@ int tu104_nvlink_minion_data_ready_en(struct gk20a *g,
}
for_each_set_bit(bit, &link_mask, NVLINK_MAX_LINKS_SW) {
link_id = (u32)bit;
ret = gv100_nvlink_minion_send_command(g, link_id,
minion_nvlink_dl_cmd_command_initlaneenable_v(), 0,
sync);
ret = g->ops.nvlink.minion.send_dlcmd(g, link_id,
NVGPU_NVLINK_MINION_DLCMD_INITLANEENABLE, sync);
if (ret != 0) {
nvgpu_err(g, "Minion initlaneenable failed on link %u",
link_id);
@@ -275,13 +271,11 @@ int tu104_nvlink_speed_config(struct gk20a *g)
switch (g->nvlink.initpll_ordinal) {
case INITPLL_1:
g->nvlink.speed = nvgpu_nvlink_speed_20G;
g->nvlink.initpll_cmd =
minion_nvlink_dl_cmd_command_initpll_1_v();
g->nvlink.initpll_cmd = NVGPU_NVLINK_MINION_DLCMD_INITPLL_1;
break;
case INITPLL_7:
g->nvlink.speed = nvgpu_nvlink_speed_16G;
g->nvlink.initpll_cmd =
minion_nvlink_dl_cmd_command_initpll_7_v();
g->nvlink.initpll_cmd = NVGPU_NVLINK_MINION_DLCMD_INITPLL_7;
break;
default:
nvgpu_err(g, "Nvlink initpll %d from VBIOS not supported.",
@@ -292,4 +286,5 @@ int tu104_nvlink_speed_config(struct gk20a *g)
return ret;
}
#endif /* CONFIG_TEGRA_NVLINK */

View File

@@ -73,6 +73,7 @@
#include "common/nvdec/nvdec_gp106.h"
#include "common/nvlink/init/device_reginit_gv100.h"
#include "common/nvlink/intr_and_err_handling_gv100.h"
#include "hal/nvlink/minion_gv100.h"
#include "common/nvlink/nvlink_gv100.h"
#include "common/nvlink/nvlink_tu104.h"
#include "common/pmu/perf/perf_gv100.h"
@@ -1093,7 +1094,6 @@ static const struct gpu_ops gv100_ops = {
},
#if defined(CONFIG_TEGRA_NVLINK)
.nvlink = {
.falcon_base_addr = gv100_nvlink_falcon_base_addr,
.discover_ioctrl = gv100_nvlink_discover_ioctrl,
.discover_link = gv100_nvlink_discover_link,
.init = gv100_nvlink_init,
@@ -1117,11 +1117,21 @@ static const struct gpu_ops gv100_ops = {
.shutdown = gv100_nvlink_shutdown,
.early_init = gv100_nvlink_early_init,
.speed_config = gv100_nvlink_speed_config,
.minion = {
.base_addr = gv100_nvlink_minion_base_addr,
.is_running = gv100_nvlink_minion_is_running,
.is_boot_complete =
gv100_nvlink_minion_is_boot_complete,
.get_dlcmd_ordinal =
gv100_nvlink_minion_get_dlcmd_ordinal,
.send_dlcmd = gv100_nvlink_minion_send_dlcmd,
.clear_intr = gv100_nvlink_minion_clear_intr,
.init_intr = gv100_nvlink_minion_init_intr,
.enable_link_intr = gv100_nvlink_minion_enable_link_intr,
.falcon_isr = gv100_nvlink_minion_falcon_isr,
.isr = gv100_nvlink_minion_isr,
},
.intr = {
.minion_clear_interrupts =
gv100_nvlink_minion_clear_interrupts,
.init_minion_intr = gv100_nvlink_init_minion_intr,
.minion_falcon_isr = gv100_nvlink_minion_falcon_isr,
.common_intr_enable = gv100_nvlink_common_intr_enable,
.init_nvlipt_intr = gv100_nvlink_init_nvlipt_intr,
.enable_link_intr = gv100_nvlink_enable_link_intr,

View File

@@ -0,0 +1,410 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifdef CONFIG_TEGRA_NVLINK
#include <nvgpu/io.h>
#include <nvgpu/timers.h>
#include <nvgpu/gk20a.h>
#include <nvgpu/nvlink_minion.h>
#include "minion_gv100.h"
#include <nvgpu/hw/gv100/hw_minion_gv100.h>
#define MINION_FALCON_INTR_MASK (minion_falcon_irqmset_wdtmr_set_f() | \
minion_falcon_irqmset_halt_set_f() | \
minion_falcon_irqmset_exterr_set_f()| \
minion_falcon_irqmset_swgen0_set_f()| \
minion_falcon_irqmset_swgen1_set_f())
#define MINION_FALCON_INTR_DEST ( \
minion_falcon_irqdest_host_wdtmr_host_f() | \
minion_falcon_irqdest_host_halt_host_f() | \
minion_falcon_irqdest_host_exterr_host_f() | \
minion_falcon_irqdest_host_swgen0_host_f() | \
minion_falcon_irqdest_host_swgen1_host_f() | \
minion_falcon_irqdest_target_wdtmr_host_normal_f() | \
minion_falcon_irqdest_target_halt_host_normal_f() | \
minion_falcon_irqdest_target_exterr_host_normal_f() | \
minion_falcon_irqdest_target_swgen0_host_normal_f() | \
minion_falcon_irqdest_target_swgen1_host_normal_f())
u32 gv100_nvlink_minion_base_addr(struct gk20a *g)
{
return g->nvlink.minion_base;
}
/*
* Check if minion is up
*/
bool gv100_nvlink_minion_is_running(struct gk20a *g)
{
/* if minion is booted and not halted, it is running */
if (((MINION_REG_RD32(g, minion_minion_status_r()) &
minion_minion_status_status_f(1)) != 0U) &&
((minion_falcon_irqstat_halt_v(
MINION_REG_RD32(g, minion_falcon_irqstat_r()))) == 0U)) {
return true;
}
return false;
}
/*
* Check if minion ucode boot is complete.
*/
int gv100_nvlink_minion_is_boot_complete(struct gk20a *g, bool *boot_cmplte)
{
u32 reg;
int err = 0;
reg = MINION_REG_RD32(g, minion_minion_status_r());
*boot_cmplte = false;
if (minion_minion_status_status_v(reg) != 0U) {
/* Minion sequence completed, check status */
if (minion_minion_status_status_v(reg) ==
minion_minion_status_status_boot_v()) {
*boot_cmplte = true;
} else {
nvgpu_err(g, "MINION init sequence failed: 0x%x",
minion_minion_status_status_v(reg));
err = -EINVAL;
}
}
return err;
}
/*
* Check if MINION command is complete
*/
static int gv100_nvlink_minion_command_complete(struct gk20a *g, u32 link_id)
{
u32 reg;
struct nvgpu_timeout timeout;
u32 delay = GR_IDLE_CHECK_DEFAULT;
int err = 0;
err = nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g),
NVGPU_TIMER_CPU_TIMER);
if (err != 0) {
nvgpu_err(g, "Minion cmd complete timeout init failed");
return err;
}
do {
reg = MINION_REG_RD32(g, minion_nvlink_dl_cmd_r(link_id));
if (minion_nvlink_dl_cmd_ready_v(reg) == 1U) {
/* Command completed, check sucess */
if (minion_nvlink_dl_cmd_fault_v(reg) ==
minion_nvlink_dl_cmd_fault_fault_clear_v()) {
nvgpu_err(g, "minion cmd(%d) error: 0x%x",
link_id, reg);
reg = minion_nvlink_dl_cmd_fault_f(1);
MINION_REG_WR32(g,
minion_nvlink_dl_cmd_r(link_id), reg);
return -EINVAL;
}
/* Command success */
break;
}
nvgpu_usleep_range(delay, delay * 2U);
delay = min_t(unsigned int,
delay << 1, GR_IDLE_CHECK_MAX);
} while (nvgpu_timeout_expired_msg(&timeout,
"minion cmd timeout") == 0);
if (nvgpu_timeout_peek_expired(&timeout) != 0) {
return -ETIMEDOUT;
}
nvgpu_log(g, gpu_dbg_nvlink, "minion cmd Complete");
return err;
}
u32 gv100_nvlink_minion_get_dlcmd_ordinal(struct gk20a *g,
enum nvgpu_nvlink_minion_dlcmd dlcmd)
{
u32 dlcmd_ordinal;
switch (dlcmd) {
case NVGPU_NVLINK_MINION_DLCMD_INITPHY:
dlcmd_ordinal = minion_nvlink_dl_cmd_command_initphy_v();
break;
case NVGPU_NVLINK_MINION_DLCMD_INITLANEENABLE:
dlcmd_ordinal = minion_nvlink_dl_cmd_command_initlaneenable_v();
break;
case NVGPU_NVLINK_MINION_DLCMD_INITDLPL:
dlcmd_ordinal = minion_nvlink_dl_cmd_command_initdlpl_v();
break;
case NVGPU_NVLINK_MINION_DLCMD_LANEDISABLE:
dlcmd_ordinal = minion_nvlink_dl_cmd_command_lanedisable_v();
break;
case NVGPU_NVLINK_MINION_DLCMD_SETACMODE:
dlcmd_ordinal = minion_nvlink_dl_cmd_command_setacmode_v();
break;
case NVGPU_NVLINK_MINION_DLCMD_LANESHUTDOWN:
dlcmd_ordinal = minion_nvlink_dl_cmd_command_laneshutdown_v();
break;
case NVGPU_NVLINK_MINION_DLCMD_INITPLL_1:
dlcmd_ordinal = minion_nvlink_dl_cmd_command_initpll_1_v();
break;
case NVGPU_NVLINK_MINION_DLCMD_INITPLL_7:
dlcmd_ordinal = minion_nvlink_dl_cmd_command_initpll_7_v();
break;
default:
dlcmd_ordinal = U32_MAX;
break;
}
return dlcmd_ordinal;
}
/*
* Send Minion command (can be async)
*/
int gv100_nvlink_minion_send_dlcmd(struct gk20a *g, u32 link_id,
enum nvgpu_nvlink_minion_dlcmd dlcmd, bool sync)
{
int err = 0;
u32 dlcmd_ordinal;
dlcmd_ordinal = g->ops.nvlink.minion.get_dlcmd_ordinal(g, dlcmd);
if (dlcmd_ordinal == U32_MAX) {
nvgpu_err(g, "DLCMD not supported");
return -EPERM;
}
/* Check last command succeded */
err = gv100_nvlink_minion_command_complete(g, link_id);
if (err != 0) {
return -EINVAL;
}
nvgpu_log(g, gpu_dbg_nvlink, "sending MINION command 0x%x to link %d",
dlcmd_ordinal, link_id);
MINION_REG_WR32(g, minion_nvlink_dl_cmd_r(link_id),
minion_nvlink_dl_cmd_command_f(dlcmd_ordinal) |
minion_nvlink_dl_cmd_fault_f(1));
if (sync) {
err = gv100_nvlink_minion_command_complete(g, link_id);
}
return err;
}
/*
* Clear minion Interrupts
*/
void gv100_nvlink_minion_clear_intr(struct gk20a *g)
{
nvgpu_falcon_set_irq(g->minion_flcn, true, MINION_FALCON_INTR_MASK,
MINION_FALCON_INTR_DEST);
}
/*
* Initialization of link specific interrupts
*/
void gv100_nvlink_minion_enable_link_intr(struct gk20a *g, u32 link_id,
bool enable)
{
u32 intr, links;
/* Only stall interrupts for now */
intr = MINION_REG_RD32(g, minion_minion_intr_stall_en_r());
links = minion_minion_intr_stall_en_link_v(intr);
if (enable) {
links |= BIT32(link_id);
} else {
links &= ~BIT32(link_id);
}
intr = set_field(intr, minion_minion_intr_stall_en_link_m(),
minion_minion_intr_stall_en_link_f(links));
MINION_REG_WR32(g, minion_minion_intr_stall_en_r(), intr);
}
/*
* Initialization of falcon interrupts
*/
static void gv100_nvlink_minion_falcon_intr_enable(struct gk20a *g, bool enable)
{
u32 reg;
reg = MINION_REG_RD32(g, minion_minion_intr_stall_en_r());
if (enable) {
reg = set_field(reg, minion_minion_intr_stall_en_fatal_m(),
minion_minion_intr_stall_en_fatal_enable_f());
reg = set_field(reg, minion_minion_intr_stall_en_nonfatal_m(),
minion_minion_intr_stall_en_nonfatal_enable_f());
reg = set_field(reg, minion_minion_intr_stall_en_falcon_stall_m(),
minion_minion_intr_stall_en_falcon_stall_enable_f());
reg = set_field(reg, minion_minion_intr_stall_en_falcon_nostall_m(),
minion_minion_intr_stall_en_falcon_nostall_enable_f());
} else {
reg = set_field(reg, minion_minion_intr_stall_en_fatal_m(),
minion_minion_intr_stall_en_fatal_disable_f());
reg = set_field(reg, minion_minion_intr_stall_en_nonfatal_m(),
minion_minion_intr_stall_en_nonfatal_disable_f());
reg = set_field(reg, minion_minion_intr_stall_en_falcon_stall_m(),
minion_minion_intr_stall_en_falcon_stall_disable_f());
reg = set_field(reg, minion_minion_intr_stall_en_falcon_nostall_m(),
minion_minion_intr_stall_en_falcon_nostall_disable_f());
}
MINION_REG_WR32(g, minion_minion_intr_stall_en_r(), reg);
}
/*
* Initialize minion IP interrupts
*/
void gv100_nvlink_minion_init_intr(struct gk20a *g)
{
/* Disable non-stall tree */
MINION_REG_WR32(g, minion_minion_intr_nonstall_en_r(), 0x0);
gv100_nvlink_minion_falcon_intr_enable(g, true);
}
/*
* Falcon specific ISR handling
*/
void gv100_nvlink_minion_falcon_isr(struct gk20a *g)
{
u32 intr;
intr = MINION_REG_RD32(g, minion_falcon_irqstat_r()) &
MINION_REG_RD32(g, minion_falcon_irqmask_r());
if (intr == 0u) {
return;
}
if ((intr & minion_falcon_irqstat_exterr_true_f()) != 0u) {
nvgpu_err(g, "falcon ext addr: 0x%x 0x%x 0x%x",
MINION_REG_RD32(g, minion_falcon_csberrstat_r()),
MINION_REG_RD32(g, minion_falcon_csberr_info_r()),
MINION_REG_RD32(g, minion_falcon_csberr_addr_r()));
}
MINION_REG_WR32(g, minion_falcon_irqsclr_r(), intr);
nvgpu_err(g, "fatal minion irq: 0x%08x", intr);
return;
}
/*
* link specific isr
*/
static void gv100_nvlink_minion_link_isr(struct gk20a *g, u32 link_id)
{
u32 intr, code;
bool fatal = false;
intr = MINION_REG_RD32(g, minion_nvlink_link_intr_r(link_id));
code = minion_nvlink_link_intr_code_v(intr);
if (code == minion_nvlink_link_intr_code_swreq_v()) {
nvgpu_err(g, " Intr SWREQ, link: %d subcode: %x",
link_id, minion_nvlink_link_intr_subcode_v(intr));
} else if (code == minion_nvlink_link_intr_code_pmdisabled_v()) {
nvgpu_err(g, " Fatal Intr PMDISABLED, link: %d subcode: %x",
link_id, minion_nvlink_link_intr_subcode_v(intr));
fatal = true;
} else if (code == minion_nvlink_link_intr_code_na_v()) {
nvgpu_err(g, " Fatal Intr NA, link: %d subcode: %x",
link_id, minion_nvlink_link_intr_subcode_v(intr));
fatal = true;
} else if (code == minion_nvlink_link_intr_code_dlreq_v()) {
nvgpu_err(g, " Fatal Intr DLREQ, link: %d subcode: %x",
link_id, minion_nvlink_link_intr_subcode_v(intr));
fatal = true;
} else {
nvgpu_err(g, " Fatal Intr UNKN:%x, link: %d subcode: %x", code,
link_id, minion_nvlink_link_intr_subcode_v(intr));
fatal = true;
}
if (fatal) {
g->ops.nvlink.minion.enable_link_intr(g, link_id, false);
}
intr = set_field(intr, minion_nvlink_link_intr_state_m(),
minion_nvlink_link_intr_state_f(1));
MINION_REG_WR32(g, minion_nvlink_link_intr_r(link_id), intr);
return;
}
/*
* Global minion routine to service interrupts
*/
void gv100_nvlink_minion_isr(struct gk20a *g) {
u32 intr, link_id;
unsigned long links;
unsigned long bit;
intr = MINION_REG_RD32(g, minion_minion_intr_r()) &
MINION_REG_RD32(g, minion_minion_intr_stall_en_r());
if ((minion_minion_intr_falcon_stall_v(intr) != 0U) ||
(minion_minion_intr_falcon_nostall_v(intr) != 0U)) {
gv100_nvlink_minion_falcon_isr(g);
}
if (minion_minion_intr_fatal_v(intr) != 0U) {
gv100_nvlink_minion_falcon_intr_enable(g, false);
MINION_REG_WR32(g, minion_minion_intr_r(),
minion_minion_intr_fatal_f(1));
}
if (minion_minion_intr_nonfatal_v(intr) != 0U) {
MINION_REG_WR32(g, minion_minion_intr_r(),
minion_minion_intr_nonfatal_f(1));
}
links = minion_minion_intr_link_v(intr) &
(unsigned long) g->nvlink.enabled_links;
if (links != 0UL) {
for_each_set_bit(bit, &links, NVLINK_MAX_LINKS_SW) {
link_id = (u32)bit;
gv100_nvlink_minion_link_isr(g, link_id);
}
}
return;
}
#endif /* CONFIG_TEGRA_NVLINK */

View File

@@ -0,0 +1,44 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef MINION_GV100_H
#define MINION_GV100_H
#include <nvgpu/types.h>
enum nvgpu_nvlink_minion_dlcmd;
struct gk20a;
u32 gv100_nvlink_minion_base_addr(struct gk20a *g);
bool gv100_nvlink_minion_is_running(struct gk20a *g);
int gv100_nvlink_minion_is_boot_complete(struct gk20a *g, bool *boot_cmplte);
u32 gv100_nvlink_minion_get_dlcmd_ordinal(struct gk20a *g,
enum nvgpu_nvlink_minion_dlcmd dlcmd);
int gv100_nvlink_minion_send_dlcmd(struct gk20a *g, u32 link_id,
enum nvgpu_nvlink_minion_dlcmd dlcmd, bool sync);
void gv100_nvlink_minion_clear_intr(struct gk20a *g);
void gv100_nvlink_minion_init_intr(struct gk20a *g);
void gv100_nvlink_minion_enable_link_intr(struct gk20a *g, u32 link_id,
bool enable);
void gv100_nvlink_minion_falcon_isr(struct gk20a *g);
void gv100_nvlink_minion_isr(struct gk20a *g);
#endif /* MINION_GV100_H */

View File

@@ -0,0 +1,63 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifdef CONFIG_TEGRA_NVLINK
#include <nvgpu/nvlink_minion.h>
#include "minion_gv100.h"
#include "minion_tu104.h"
#include <nvgpu/hw/tu104/hw_minion_tu104.h>
struct gk20a;
u32 tu104_nvlink_minion_get_dlcmd_ordinal(struct gk20a *g,
enum nvgpu_nvlink_minion_dlcmd dlcmd)
{
u32 dlcmd_ordinal;
switch (dlcmd) {
case NVGPU_NVLINK_MINION_DLCMD_INITRXTERM:
dlcmd_ordinal = 0x05U;
break;
case NVGPU_NVLINK_MINION_DLCMD_TURING_RXDET:
dlcmd_ordinal = minion_nvlink_dl_cmd_command_turing_rxdet_v();
break;
case NVGPU_NVLINK_MINION_DLCMD_TXCLKSWITCH_PLL:
dlcmd_ordinal = minion_nvlink_dl_cmd_command_txclkswitch_pll_v();
break;
case NVGPU_NVLINK_MINION_DLCMD_TURING_INITDLPL_TO_CHIPA:
dlcmd_ordinal = minion_nvlink_dl_cmd_command_turing_initdlpl_to_chipa_v();
break;
case NVGPU_NVLINK_MINION_DLCMD_INITTL:
dlcmd_ordinal = minion_nvlink_dl_cmd_command_inittl_v();
break;
default:
dlcmd_ordinal = gv100_nvlink_minion_get_dlcmd_ordinal(g, dlcmd);
break;
}
return dlcmd_ordinal;
}
#endif /* CONFIG_TEGRA_NVLINK */

View File

@@ -0,0 +1,33 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef MINION_TU104_H
#define MINION_TU104_H
#include <nvgpu/types.h>
enum nvgpu_nvlink_minion_dlcmd;
struct gk20a;
u32 tu104_nvlink_minion_get_dlcmd_ordinal(struct gk20a *g,
enum nvgpu_nvlink_minion_dlcmd dlcmd);
#endif /* MINION_TU104_H */

View File

@@ -70,6 +70,7 @@ struct nvgpu_gr_zbc_query_params;
struct nvgpu_channel_hw_state;
struct nvgpu_engine_status_info;
struct nvgpu_pbdma_status_info;
enum nvgpu_nvlink_minion_dlcmd;
#include <nvgpu/lock.h>
#include <nvgpu/thread.h>
@@ -1534,7 +1535,6 @@ struct gpu_ops {
int (*read_gcplex_config_fuse)(struct gk20a *g, u32 *val);
} fuse;
struct {
u32 (*falcon_base_addr)(struct gk20a *g);
int (*init)(struct gk20a *g);
int (*discover_ioctrl)(struct gk20a *g);
int (*discover_link)(struct gk20a *g);
@@ -1566,9 +1566,23 @@ struct gpu_ops {
int (*early_init)(struct gk20a *g);
int (*speed_config)(struct gk20a *g);
struct {
void (*minion_clear_interrupts)(struct gk20a *g);
void (*init_minion_intr)(struct gk20a *g);
void (*minion_falcon_isr)(struct gk20a *g);
u32 (*base_addr)(struct gk20a *g);
bool (*is_running)(struct gk20a *g);
int (*is_boot_complete)(struct gk20a *g,
bool *boot_cmplte);
u32 (*get_dlcmd_ordinal)(struct gk20a *g,
enum nvgpu_nvlink_minion_dlcmd dlcmd);
int (*send_dlcmd)(struct gk20a *g, u32 link_id,
enum nvgpu_nvlink_minion_dlcmd dlcmd,
bool sync);
void (*clear_intr)(struct gk20a *g);
void (*init_intr)(struct gk20a *g);
void (*enable_link_intr)(struct gk20a *g, u32 link_id,
bool enable);
void (*falcon_isr)(struct gk20a *g);
void (*isr)(struct gk20a *g);
} minion;
struct {
void (*common_intr_enable)(struct gk20a *g,
unsigned long mask);
void (*init_nvlipt_intr)(struct gk20a *g, u32 link_id);

View File

@@ -24,6 +24,7 @@
#define NVGPU_NVLINK_H
#include <nvgpu/types.h>
#include <nvgpu/nvlink_minion.h>
#define NVLINK_MAX_LINKS_SW 6U
@@ -33,8 +34,6 @@
#define INITPLL_1 U8(1)
#define INITPLL_7 U8(7)
#define MINION_REG_RD32(g, off) gk20a_readl(g, (g)->nvlink.minion_base + (off))
#define MINION_REG_WR32(g, off, v) gk20a_writel(g, (g)->nvlink.minion_base + (off), (v))
#define IOCTRL_REG_RD32(g, off) gk20a_readl(g, (g)->nvlink.ioctrl_base + (off))
#define IOCTRL_REG_WR32(g, off, v) gk20a_writel(g, (g)->nvlink.ioctrl_base + (off), (v))
#define MIF_REG_RD32(g, id, off) gk20a_readl(g, (g)->nvlink.links[(id)].mif_base + (off))
@@ -214,7 +213,7 @@ struct nvgpu_nvlink_dev {
u32 init_pll_done;
enum nvgpu_nvlink_speed speed;
u32 initpll_cmd;
enum nvgpu_nvlink_minion_dlcmd initpll_cmd;
/* tlc cached errors */
u32 tlc_rx_err_status_0[NVLINK_MAX_LINKS_SW];
@@ -225,9 +224,6 @@ struct nvgpu_nvlink_dev {
void *priv;
};
void nvgpu_nvlink_free_minion_used_mem(struct gk20a *g,
struct nvgpu_firmware *nvgpu_minion_fw);
u32 nvgpu_nvlink_minion_extract_word(struct nvgpu_firmware *fw, u32 idx);
int nvgpu_nvlink_speed_config(struct gk20a *g);
int nvgpu_nvlink_early_init(struct gk20a *g);
int nvgpu_nvlink_link_early_init(struct gk20a *g);
@@ -248,8 +244,6 @@ int nvgpu_nvlink_set_sublink_mode(struct gk20a *g,
int nvgpu_nvlink_enumerate(struct gk20a *g);
int nvgpu_nvlink_train(struct gk20a *g, u32 link_id, bool from_off);
int nvgpu_nvlink_remove(struct gk20a *g);
int nvgpu_nvlink_minion_load_ucode(struct gk20a *g,
struct nvgpu_firmware *nvgpu_minion_fw);
void nvgpu_mss_nvlink_init_credits(struct gk20a *g);
#endif /* NVGPU_NVLINK_H */

View File

@@ -0,0 +1,57 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_NVLINK_MINION_H
#define NVGPU_NVLINK_MINION_H
#include <nvgpu/types.h>
struct gk20a;
struct nvgpu_firmware;
#define MINION_REG_RD32(g, off) nvgpu_readl(g, (g)->nvlink.minion_base + (off))
#define MINION_REG_WR32(g, off, v) nvgpu_writel(g, (g)->nvlink.minion_base + (off), (v))
enum nvgpu_nvlink_minion_dlcmd {
NVGPU_NVLINK_MINION_DLCMD_INITPHY,
NVGPU_NVLINK_MINION_DLCMD_INITLANEENABLE,
NVGPU_NVLINK_MINION_DLCMD_INITDLPL,
NVGPU_NVLINK_MINION_DLCMD_INITRXTERM,
NVGPU_NVLINK_MINION_DLCMD_INITTL,
NVGPU_NVLINK_MINION_DLCMD_LANEDISABLE,
NVGPU_NVLINK_MINION_DLCMD_SETACMODE,
NVGPU_NVLINK_MINION_DLCMD_LANESHUTDOWN,
NVGPU_NVLINK_MINION_DLCMD_TXCLKSWITCH_PLL,
NVGPU_NVLINK_MINION_DLCMD_INITPLL_1,
NVGPU_NVLINK_MINION_DLCMD_INITPLL_7,
NVGPU_NVLINK_MINION_DLCMD_TURING_INITDLPL_TO_CHIPA,
NVGPU_NVLINK_MINION_DLCMD_TURING_RXDET,
NVGPU_NVLINK_MINION_DLCMD__LAST,
};
u32 nvgpu_nvlink_minion_extract_word(struct nvgpu_firmware *fw, u32 idx);
int nvgpu_nvlink_minion_load(struct gk20a *g);
int nvgpu_nvlink_minion_load_ucode(struct gk20a *g,
struct nvgpu_firmware *nvgpu_minion_fw);
void nvgpu_nvlink_free_minion_used_mem(struct gk20a *g,
struct nvgpu_firmware *nvgpu_minion_fw);
#endif /* NVGPU_NVLINK_MINION_H */

View File

@@ -21,6 +21,7 @@
#include <nvgpu/gk20a.h>
#include <nvgpu/nvlink.h>
#include <nvgpu/nvlink_minion.h>
#include <nvgpu/enabled.h>
#include <nvgpu/firmware.h>

View File

@@ -78,6 +78,8 @@
#include "common/top/top_gp10b.h"
#include "common/nvlink/init/device_reginit_gv100.h"
#include "common/nvlink/intr_and_err_handling_gv100.h"
#include "hal/nvlink/minion_gv100.h"
#include "hal/nvlink/minion_tu104.h"
#include "common/nvlink/nvlink_gv100.h"
#include "common/nvlink/nvlink_tu104.h"
#include "common/sync/syncpt_cmdbuf_gv11b.h"
@@ -1130,7 +1132,6 @@ static const struct gpu_ops tu104_ops = {
},
#if defined(CONFIG_TEGRA_NVLINK)
.nvlink = {
.falcon_base_addr = gv100_nvlink_falcon_base_addr,
.discover_ioctrl = gv100_nvlink_discover_ioctrl,
.discover_link = gv100_nvlink_discover_link,
.init = gv100_nvlink_init,
@@ -1153,11 +1154,21 @@ static const struct gpu_ops tu104_ops = {
.shutdown = gv100_nvlink_shutdown,
.early_init = gv100_nvlink_early_init,
.speed_config = tu104_nvlink_speed_config,
.minion = {
.base_addr = gv100_nvlink_minion_base_addr,
.is_running = gv100_nvlink_minion_is_running,
.is_boot_complete =
gv100_nvlink_minion_is_boot_complete,
.get_dlcmd_ordinal =
tu104_nvlink_minion_get_dlcmd_ordinal,
.send_dlcmd = gv100_nvlink_minion_send_dlcmd,
.clear_intr = gv100_nvlink_minion_clear_intr,
.init_intr = gv100_nvlink_minion_init_intr,
.enable_link_intr = gv100_nvlink_minion_enable_link_intr,
.falcon_isr = gv100_nvlink_minion_falcon_isr,
.isr = gv100_nvlink_minion_isr,
},
.intr = {
.minion_clear_interrupts =
gv100_nvlink_minion_clear_interrupts,
.init_minion_intr = gv100_nvlink_init_minion_intr,
.minion_falcon_isr = gv100_nvlink_minion_falcon_isr,
.common_intr_enable = gv100_nvlink_common_intr_enable,
.init_nvlipt_intr = gv100_nvlink_init_nvlipt_intr,
.enable_link_intr = gv100_nvlink_enable_link_intr,