gpu: nvgpu: Remove external APIs in nvlink common

The Tegra SOC nvlink driver and dGPU nvlink driver depend on
struct definitions, macros and functions exposed by nvlink-core
driver. The nvlink-core driver is not part of the nvgpu driver,
hence we should not be directly accessing any core driver
APIs/macros/structs from the /common/nvlink code. Common code can
only use nvgpu internal APIs. We wrap all calls from common/nvlink.c
to other drivers in nvgpu wrappers, and define the implementation of
wrappers in os/linux and os/nvgpu_rmos, and stub them in os/posix.

Also, we remove the implicit inclusion of OS specific nvlink header
file via common nvgpu/nvlink.h. So the OS specific code needs to
explicitly add OS specific header file.

JIRA NVGPU-966

Change-Id: I65c67e247ee74088bb1253f6ae4c8d0c49420a98
Signed-off-by: Tejal Kudav <tkudav@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1990071
Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Antony Clince Alex <aalex@nvidia.com>
Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Tejal Kudav
2019-01-08 15:58:35 +05:30
committed by mobile promotions
parent 1ff12f065e
commit b83c5e4594
6 changed files with 717 additions and 596 deletions

View File

@@ -23,6 +23,7 @@
#include <nvgpu/gk20a.h>
#include <nvgpu/nvlink.h>
#include <nvgpu/enabled.h>
#include <nvgpu/firmware.h>
#ifdef CONFIG_TEGRA_NVLINK
/*
@@ -30,10 +31,9 @@
* on the library for now
* Returns NVLINK_MAX_LINKS_SW on failure
*/
static u32 __nvgpu_nvlink_get_link(struct nvlink_device *ndev)
static u32 nvgpu_nvlink_get_link(struct gk20a *g)
{
u32 link_id;
struct gk20a *g = (struct gk20a *) ndev->priv;
if (!g)
return NVLINK_MAX_LINKS_SW;
@@ -50,48 +50,18 @@ static u32 __nvgpu_nvlink_get_link(struct nvlink_device *ndev)
return NVLINK_MAX_LINKS_SW;
}
static int nvgpu_nvlink_speed_config(struct nvlink_device *ndev)
int nvgpu_nvlink_speed_config(struct gk20a *g)
{
struct gk20a *g = (struct gk20a *) ndev->priv;
int err;
/* For now master topology is the only one supported */
if (!ndev->is_master) {
nvgpu_err(g, "dGPU is not master for Nvlink speed config");
return -EINVAL;
}
err = g->ops.nvlink.speed_config(g);
if (err != 0) {
nvgpu_err(g, "Nvlink speed config failed.\n");
return err;
}
ndev->speed = g->nvlink.speed;
nvgpu_log(g, gpu_dbg_nvlink, "Nvlink default speed set to %d\n",
ndev->speed);
return err;
return g->ops.nvlink.speed_config(g);
}
static int nvgpu_nvlink_early_init(struct nvlink_device *ndev)
int nvgpu_nvlink_early_init(struct gk20a *g)
{
struct gk20a *g = (struct gk20a *) ndev->priv;
int err;
/* For now master topology is the only one supported */
if (!ndev->is_master) {
nvgpu_log(g, gpu_dbg_info | gpu_dbg_nvlink,
"dGPU is not master of Nvlink link");
return -EINVAL;
}
err = g->ops.nvlink.early_init(g);
return err;
return g->ops.nvlink.early_init(g);
}
static int nvgpu_nvlink_link_early_init(struct nvlink_device *ndev)
int nvgpu_nvlink_link_early_init(struct gk20a *g)
{
struct gk20a *g = (struct gk20a *) ndev->priv;
int err;
u32 link_id;
/*
@@ -102,46 +72,37 @@ static int nvgpu_nvlink_link_early_init(struct nvlink_device *ndev)
g->nvlink.links[link_id].remote_info.is_connected = true;
g->nvlink.links[link_id].remote_info.device_type =
nvgpu_nvlink_endp_tegra;
err = g->ops.nvlink.link_early_init(g, BIT(link_id));
return g->ops.nvlink.link_early_init(g, BIT(link_id));
if (err == 0) {
g->nvlink.links[link_id].priv = (void *) &(ndev->link);
ndev->link.priv = (void *) g;
}
return err;
}
static int nvgpu_nvlink_interface_init(struct nvlink_device *ndev)
int nvgpu_nvlink_interface_init(struct gk20a *g)
{
int err;
struct gk20a *g = (struct gk20a *) ndev->priv;
err = g->ops.nvlink.interface_init(g);
return err;
}
static int nvgpu_nvlink_interface_disable(struct nvlink_device *ndev)
int nvgpu_nvlink_interface_disable(struct gk20a *g)
{
int err = 0;
struct gk20a *g = (struct gk20a *) ndev->priv;
if (g->ops.nvlink.interface_disable)
err = g->ops.nvlink.interface_disable(g);
return err;
}
static int nvgpu_nvlink_shutdown(struct nvlink_device *ndev)
int nvgpu_nvlink_dev_shutdown(struct gk20a *g)
{
int err;
struct gk20a *g = (struct gk20a *) ndev->priv;
err = g->ops.nvlink.shutdown(g);
return 0;
}
static int nvgpu_nvlink_reg_init(struct nvlink_device *ndev)
int nvgpu_nvlink_reg_init(struct gk20a *g)
{
struct gk20a *g = (struct gk20a *) ndev->priv;
int err;
err = g->ops.nvlink.reg_init(g);
@@ -149,347 +110,101 @@ static int nvgpu_nvlink_reg_init(struct nvlink_device *ndev)
return err;
}
static u32 nvgpu_nvlink_get_link_mode(struct nvlink_device *ndev)
u32 nvgpu_nvlink_get_link_mode(struct gk20a *g)
{
struct gk20a *g = (struct gk20a *) ndev->priv;
u32 link_id;
u32 mode;
link_id = __nvgpu_nvlink_get_link(ndev);
link_id = nvgpu_nvlink_get_link(g);
if (link_id == NVLINK_MAX_LINKS_SW)
return -EINVAL;
mode = g->ops.nvlink.link_get_mode(g, link_id);
switch (mode) {
case nvgpu_nvlink_link_off:
return NVLINK_LINK_OFF;
case nvgpu_nvlink_link_hs:
return NVLINK_LINK_HS;
case nvgpu_nvlink_link_safe:
return NVLINK_LINK_SAFE;
case nvgpu_nvlink_link_fault:
return NVLINK_LINK_FAULT;
case nvgpu_nvlink_link_rcvy_ac:
return NVLINK_LINK_RCVY_AC;
case nvgpu_nvlink_link_rcvy_sw:
return NVLINK_LINK_RCVY_SW;
case nvgpu_nvlink_link_rcvy_rx:
return NVLINK_LINK_RCVY_RX;
case nvgpu_nvlink_link_detect:
return NVLINK_LINK_DETECT;
case nvgpu_nvlink_link_reset:
return NVLINK_LINK_RESET;
case nvgpu_nvlink_link_enable_pm:
return NVLINK_LINK_ENABLE_PM;
case nvgpu_nvlink_link_disable_pm:
return NVLINK_LINK_DISABLE_PM;
case nvgpu_nvlink_link_disable_err_detect:
return NVLINK_LINK_DISABLE_ERR_DETECT;
case nvgpu_nvlink_link_lane_disable:
return NVLINK_LINK_LANE_DISABLE;
case nvgpu_nvlink_link_lane_shutdown:
return NVLINK_LINK_LANE_SHUTDOWN;
default:
nvgpu_log(g, gpu_dbg_info | gpu_dbg_nvlink,
"unsupported mode %u", mode);
}
return NVLINK_LINK_OFF;
return g->ops.nvlink.link_get_mode(g, link_id);
}
static u32 nvgpu_nvlink_get_link_state(struct nvlink_device *ndev)
u32 nvgpu_nvlink_get_link_state(struct gk20a *g)
{
struct gk20a *g = (struct gk20a *) ndev->priv;
u32 link_id;
link_id = __nvgpu_nvlink_get_link(ndev);
link_id = nvgpu_nvlink_get_link(g);
if (link_id == NVLINK_MAX_LINKS_SW)
return -EINVAL;
return g->ops.nvlink.link_get_state(g, link_id);
}
static int nvgpu_nvlink_set_link_mode(struct nvlink_device *ndev, u32 mode)
int nvgpu_nvlink_set_link_mode(struct gk20a *g, u32 mode)
{
struct gk20a *g = (struct gk20a *) ndev->priv;
u32 link_id;
u32 mode_sw;
link_id = __nvgpu_nvlink_get_link(ndev);
link_id = nvgpu_nvlink_get_link(g);
if (link_id == NVLINK_MAX_LINKS_SW)
return -EINVAL;
switch (mode) {
case NVLINK_LINK_OFF:
mode_sw = nvgpu_nvlink_link_off;
break;
case NVLINK_LINK_HS:
mode_sw = nvgpu_nvlink_link_hs;
break;
case NVLINK_LINK_SAFE:
mode_sw = nvgpu_nvlink_link_safe;
break;
case NVLINK_LINK_FAULT:
mode_sw = nvgpu_nvlink_link_fault;
break;
case NVLINK_LINK_RCVY_AC:
mode_sw = nvgpu_nvlink_link_rcvy_ac;
break;
case NVLINK_LINK_RCVY_SW:
mode_sw = nvgpu_nvlink_link_rcvy_sw;
break;
case NVLINK_LINK_RCVY_RX:
mode_sw = nvgpu_nvlink_link_rcvy_rx;
break;
case NVLINK_LINK_DETECT:
mode_sw = nvgpu_nvlink_link_detect;
break;
case NVLINK_LINK_RESET:
mode_sw = nvgpu_nvlink_link_reset;
break;
case NVLINK_LINK_ENABLE_PM:
mode_sw = nvgpu_nvlink_link_enable_pm;
break;
case NVLINK_LINK_DISABLE_PM:
mode_sw = nvgpu_nvlink_link_disable_pm;
break;
case NVLINK_LINK_DISABLE_ERR_DETECT:
mode_sw = nvgpu_nvlink_link_disable_err_detect;
break;
case NVLINK_LINK_LANE_DISABLE:
mode_sw = nvgpu_nvlink_link_lane_disable;
break;
case NVLINK_LINK_LANE_SHUTDOWN:
mode_sw = nvgpu_nvlink_link_lane_shutdown;
break;
default:
mode_sw = nvgpu_nvlink_link_off;
}
return g->ops.nvlink.link_set_mode(g, link_id, mode_sw);
return g->ops.nvlink.link_set_mode(g, link_id, mode);
}
static void nvgpu_nvlink_get_tx_sublink_state(struct nvlink_device *ndev, u32 *state)
void nvgpu_nvlink_get_tx_sublink_state(struct gk20a *g, u32 *state)
{
struct gk20a *g = (struct gk20a *) ndev->priv;
u32 link_id;
link_id = __nvgpu_nvlink_get_link(ndev);
link_id = nvgpu_nvlink_get_link(g);
if (link_id == NVLINK_MAX_LINKS_SW)
return;
if (state)
*state = g->ops.nvlink.get_tx_sublink_state(g, link_id);
}
static void nvgpu_nvlink_get_rx_sublink_state(struct nvlink_device *ndev, u32 *state)
void nvgpu_nvlink_get_rx_sublink_state(struct gk20a *g, u32 *state)
{
struct gk20a *g = (struct gk20a *) ndev->priv;
u32 link_id;
link_id = __nvgpu_nvlink_get_link(ndev);
link_id = nvgpu_nvlink_get_link(g);
if (link_id == NVLINK_MAX_LINKS_SW)
return;
if (state)
*state = g->ops.nvlink.get_rx_sublink_state(g, link_id);
}
static u32 nvgpu_nvlink_get_sublink_mode(struct nvlink_device *ndev,
bool is_rx_sublink)
u32 nvgpu_nvlink_get_sublink_mode(struct gk20a *g, bool is_rx_sublink)
{
struct gk20a *g = (struct gk20a *) ndev->priv;
u32 link_id;
u32 mode;
link_id = __nvgpu_nvlink_get_link(ndev);
link_id = nvgpu_nvlink_get_link(g);
if (link_id == NVLINK_MAX_LINKS_SW)
return -EINVAL;
mode = g->ops.nvlink.get_sublink_mode(g, link_id, is_rx_sublink);
return g->ops.nvlink.get_sublink_mode(g, link_id, is_rx_sublink);
switch (mode) {
case nvgpu_nvlink_sublink_tx_hs:
return NVLINK_TX_HS;
case nvgpu_nvlink_sublink_tx_off:
return NVLINK_TX_OFF;
case nvgpu_nvlink_sublink_tx_single_lane:
return NVLINK_TX_SINGLE_LANE;
case nvgpu_nvlink_sublink_tx_safe:
return NVLINK_TX_SAFE;
case nvgpu_nvlink_sublink_tx_enable_pm:
return NVLINK_TX_ENABLE_PM;
case nvgpu_nvlink_sublink_tx_disable_pm:
return NVLINK_TX_DISABLE_PM;
case nvgpu_nvlink_sublink_tx_common:
return NVLINK_TX_COMMON;
case nvgpu_nvlink_sublink_tx_common_disable:
return NVLINK_TX_COMMON_DISABLE;
case nvgpu_nvlink_sublink_tx_data_ready:
return NVLINK_TX_DATA_READY;
case nvgpu_nvlink_sublink_tx_prbs_en:
return NVLINK_TX_PRBS_EN;
case nvgpu_nvlink_sublink_rx_hs:
return NVLINK_RX_HS;
case nvgpu_nvlink_sublink_rx_enable_pm:
return NVLINK_RX_ENABLE_PM;
case nvgpu_nvlink_sublink_rx_disable_pm:
return NVLINK_RX_DISABLE_PM;
case nvgpu_nvlink_sublink_rx_single_lane:
return NVLINK_RX_SINGLE_LANE;
case nvgpu_nvlink_sublink_rx_safe:
return NVLINK_RX_SAFE;
case nvgpu_nvlink_sublink_rx_off:
return NVLINK_RX_OFF;
case nvgpu_nvlink_sublink_rx_rxcal:
return NVLINK_RX_RXCAL;
default:
nvgpu_log(g, gpu_dbg_nvlink, "Unsupported mode: %u", mode);
break;
}
if (is_rx_sublink)
return NVLINK_RX_OFF;
return NVLINK_TX_OFF;
}
static int nvgpu_nvlink_set_sublink_mode(struct nvlink_device *ndev,
int nvgpu_nvlink_set_sublink_mode(struct gk20a *g,
bool is_rx_sublink, u32 mode)
{
struct gk20a *g = (struct gk20a *) ndev->priv;
u32 link_id;
u32 mode_sw;
link_id = __nvgpu_nvlink_get_link(ndev);
link_id = nvgpu_nvlink_get_link(g);
if (link_id == NVLINK_MAX_LINKS_SW)
return -EINVAL;
if (!is_rx_sublink) {
switch (mode) {
case NVLINK_TX_HS:
mode_sw = nvgpu_nvlink_sublink_tx_hs;
break;
case NVLINK_TX_ENABLE_PM:
mode_sw = nvgpu_nvlink_sublink_tx_enable_pm;
break;
case NVLINK_TX_DISABLE_PM:
mode_sw = nvgpu_nvlink_sublink_tx_disable_pm;
break;
case NVLINK_TX_SINGLE_LANE:
mode_sw = nvgpu_nvlink_sublink_tx_single_lane;
break;
case NVLINK_TX_SAFE:
mode_sw = nvgpu_nvlink_sublink_tx_safe;
break;
case NVLINK_TX_OFF:
mode_sw = nvgpu_nvlink_sublink_tx_off;
break;
case NVLINK_TX_COMMON:
mode_sw = nvgpu_nvlink_sublink_tx_common;
break;
case NVLINK_TX_COMMON_DISABLE:
mode_sw = nvgpu_nvlink_sublink_tx_common_disable;
break;
case NVLINK_TX_DATA_READY:
mode_sw = nvgpu_nvlink_sublink_tx_data_ready;
break;
case NVLINK_TX_PRBS_EN:
mode_sw = nvgpu_nvlink_sublink_tx_prbs_en;
break;
default:
return -EINVAL;
}
} else {
switch (mode) {
case NVLINK_RX_HS:
mode_sw = nvgpu_nvlink_sublink_rx_hs;
break;
case NVLINK_RX_ENABLE_PM:
mode_sw = nvgpu_nvlink_sublink_rx_enable_pm;
break;
case NVLINK_RX_DISABLE_PM:
mode_sw = nvgpu_nvlink_sublink_rx_disable_pm;
break;
case NVLINK_RX_SINGLE_LANE:
mode_sw = nvgpu_nvlink_sublink_rx_single_lane;
break;
case NVLINK_RX_SAFE:
mode_sw = nvgpu_nvlink_sublink_rx_safe;
break;
case NVLINK_RX_OFF:
mode_sw = nvgpu_nvlink_sublink_rx_off;
break;
case NVLINK_RX_RXCAL:
mode_sw = nvgpu_nvlink_sublink_rx_rxcal;
break;
default:
return -EINVAL;
}
return g->ops.nvlink.set_sublink_mode(g, link_id, is_rx_sublink, mode);
}
/* Extract a WORD from the MINION ucode */
u32 nvgpu_nvlink_minion_extract_word(struct nvgpu_firmware *fw, u32 idx)
{
u32 out_data = 0;
u8 byte = 0;
u32 i = 0;
for (i = 0; i < 4; i++) {
byte = fw->data[idx + i];
out_data |= ((u32)byte) << (8 * i);
}
return g->ops.nvlink.set_sublink_mode(g, link_id, is_rx_sublink,
mode_sw);
}
static int nvgpu_nvlink_init_ops(struct gk20a *g)
{
struct nvlink_device *ndev = g->nvlink.priv;
if (!ndev)
return -EINVAL;
/* Fill in device struct */
ndev->dev_ops.dev_early_init = nvgpu_nvlink_early_init;
ndev->dev_ops.dev_interface_init = nvgpu_nvlink_interface_init;
ndev->dev_ops.dev_reg_init = nvgpu_nvlink_reg_init;
ndev->dev_ops.dev_interface_disable = nvgpu_nvlink_interface_disable;
ndev->dev_ops.dev_shutdown = nvgpu_nvlink_shutdown;
ndev->dev_ops.dev_speed_config = nvgpu_nvlink_speed_config;
/* Fill in the link struct */
ndev->link.device_id = ndev->device_id;
ndev->link.mode = NVLINK_LINK_OFF;
ndev->link.is_sl_supported = false;
ndev->link.link_ops.get_link_mode = nvgpu_nvlink_get_link_mode;
ndev->link.link_ops.set_link_mode = nvgpu_nvlink_set_link_mode;
ndev->link.link_ops.get_sublink_mode = nvgpu_nvlink_get_sublink_mode;
ndev->link.link_ops.set_sublink_mode = nvgpu_nvlink_set_sublink_mode;
ndev->link.link_ops.get_link_state = nvgpu_nvlink_get_link_state;
ndev->link.link_ops.get_tx_sublink_state =
nvgpu_nvlink_get_tx_sublink_state;
ndev->link.link_ops.get_rx_sublink_state =
nvgpu_nvlink_get_rx_sublink_state;
ndev->link.link_ops.link_early_init =
nvgpu_nvlink_link_early_init;
return 0;
}
int nvgpu_nvlink_enumerate(struct gk20a *g)
{
struct nvlink_device *ndev = (struct nvlink_device *) g->nvlink.priv;
if (!ndev)
return -ENODEV;
return nvlink_enumerate(ndev);
}
int nvgpu_nvlink_train(struct gk20a *g, u32 link_id, bool from_off)
{
struct nvlink_device *ndev = (struct nvlink_device *) g->nvlink.priv;
if (!ndev)
return -ENODEV;
/* Check if the link is connected */
if (!g->nvlink.links[link_id].remote_info.is_connected)
return -ENODEV;
if (from_off)
return nvlink_transition_intranode_conn_off_to_safe(ndev);
return nvlink_train_intranode_conn_safe_to_hs(ndev);
return out_data;
}
#endif
@@ -498,17 +213,10 @@ int nvgpu_nvlink_probe(struct gk20a *g)
{
#ifdef CONFIG_TEGRA_NVLINK
int err;
struct nvlink_device *ndev;
/* Allocating structures */
ndev = nvgpu_kzalloc(g, sizeof(struct nvlink_device));
if (!ndev) {
nvgpu_err(g, "OOM while allocating nvlink device struct");
return -ENOMEM;
}
ndev->priv = (void *) g;
g->nvlink.priv = (void *) ndev;
err = nvgpu_nvlink_setup_ndev(g);
if (err != 0)
return err;
err = nvgpu_nvlink_read_dt_props(g);
if (err != 0)
@@ -519,14 +227,14 @@ int nvgpu_nvlink_probe(struct gk20a *g)
goto free_ndev;
/* Register device with core driver*/
err = nvlink_register_device(ndev);
err = nvgpu_nvlink_register_device(g);
if (err != 0) {
nvgpu_err(g, "failed on nvlink device registration");
goto free_ndev;
}
/* Register link with core driver */
err = nvlink_register_link(&ndev->link);
err = nvgpu_nvlink_register_link(g);
if (err != 0) {
nvgpu_err(g, "failed on nvlink link registration");
goto unregister_ndev;
@@ -537,10 +245,10 @@ int nvgpu_nvlink_probe(struct gk20a *g)
return 0;
unregister_ndev:
nvlink_unregister_device(ndev);
nvgpu_nvlink_unregister_device(g);
free_ndev:
nvgpu_kfree(g, ndev);
nvgpu_kfree(g, g->nvlink.priv);
g->nvlink.priv = NULL;
return err;
#else
@@ -551,31 +259,26 @@ free_ndev:
int nvgpu_nvlink_remove(struct gk20a *g)
{
#ifdef CONFIG_TEGRA_NVLINK
struct nvlink_device *ndev;
int err;
if (!nvgpu_is_enabled(g, NVGPU_SUPPORT_NVLINK))
return -ENODEV;
ndev = g->nvlink.priv;
if (!ndev)
return -ENODEV;
nvgpu_set_enabled(g, NVGPU_SUPPORT_NVLINK, false);
err = nvlink_unregister_link(&ndev->link);
err = nvgpu_nvlink_unregister_link(g);
if (err != 0) {
nvgpu_err(g, "failed on nvlink link unregistration");
return err;
}
err = nvlink_unregister_device(ndev);
err = nvgpu_nvlink_unregister_device(g);
if (err != 0) {
nvgpu_err(g, "failed on nvlink device unregistration");
return err;
}
nvgpu_kfree(g, ndev);
nvgpu_kfree(g, g->nvlink.priv);
return 0;
#else

View File

@@ -426,32 +426,13 @@ static bool gv100_nvlink_minion_isr(struct gk20a *g) {
return (intr == 0);
}
/* Extract a WORD from the MINION ucode */
static inline u32 minion_extract_word(struct nvgpu_firmware *fw, u32 idx)
{
u32 out_data = 0;
u8 byte = 0;
u32 i = 0;
for (i = 0; i < 4; i++) {
byte = fw->data[idx + i];
out_data |= ((u32)byte) << (8 * i);
}
return out_data;
}
/*
* Load minion FW and set up bootstrap
*/
static u32 gv100_nvlink_minion_load(struct gk20a *g)
{
u32 err = 0;
struct nvlink_device *ndev = (struct nvlink_device *) g->nvlink.priv;
struct nvgpu_firmware *nvgpu_minion_fw = NULL;
struct minion_hdr *minion_hdr = &ndev->minion_hdr;
u32 data_idx = 0;
u32 app = 0;
struct nvgpu_timeout timeout;
u32 delay = GR_IDLE_CHECK_DEFAULT;
u32 reg;
@@ -472,160 +453,13 @@ static u32 gv100_nvlink_minion_load(struct gk20a *g)
/* nvdec falcon reset */
nvgpu_falcon_reset(g->minion_flcn);
/* Read ucode header */
minion_hdr->os_code_offset = minion_extract_word(nvgpu_minion_fw,
data_idx);
data_idx += 4;
minion_hdr->os_code_size = minion_extract_word(nvgpu_minion_fw,
data_idx);
data_idx += 4;
minion_hdr->os_data_offset = minion_extract_word(nvgpu_minion_fw,
data_idx);
data_idx += 4;
minion_hdr->os_data_size = minion_extract_word(nvgpu_minion_fw,
data_idx);
data_idx += 4;
minion_hdr->num_apps = minion_extract_word(nvgpu_minion_fw,
data_idx);
data_idx += 4;
nvgpu_log(g, gpu_dbg_nvlink,
"MINION Ucode Header Info:");
nvgpu_log(g, gpu_dbg_nvlink,
"-------------------------");
nvgpu_log(g, gpu_dbg_nvlink,
" - OS Code Offset = %u", minion_hdr->os_code_offset);
nvgpu_log(g, gpu_dbg_nvlink,
" - OS Code Size = %u", minion_hdr->os_code_size);
nvgpu_log(g, gpu_dbg_nvlink,
" - OS Data Offset = %u", minion_hdr->os_data_offset);
nvgpu_log(g, gpu_dbg_nvlink,
" - OS Data Size = %u", minion_hdr->os_data_size);
nvgpu_log(g, gpu_dbg_nvlink,
" - Num Apps = %u", minion_hdr->num_apps);
/* Allocate offset/size arrays for all the ucode apps */
minion_hdr->app_code_offsets = nvgpu_kcalloc(g,
minion_hdr->num_apps,
sizeof(u32));
if (!minion_hdr->app_code_offsets) {
nvgpu_err(g, "Couldn't allocate MINION app_code_offsets array");
err = -ENOMEM;
goto exit;
}
minion_hdr->app_code_sizes = nvgpu_kcalloc(g,
minion_hdr->num_apps,
sizeof(u32));
if (!minion_hdr->app_code_sizes) {
nvgpu_err(g, "Couldn't allocate MINION app_code_sizes array");
err = -ENOMEM;
goto exit;
}
minion_hdr->app_data_offsets = nvgpu_kcalloc(g,
minion_hdr->num_apps,
sizeof(u32));
if (!minion_hdr->app_data_offsets) {
nvgpu_err(g, "Couldn't allocate MINION app_data_offsets array");
err = -ENOMEM;
goto exit;
}
minion_hdr->app_data_sizes = nvgpu_kcalloc(g,
minion_hdr->num_apps,
sizeof(u32));
if (!minion_hdr->app_data_sizes) {
nvgpu_err(g, "Couldn't allocate MINION app_data_sizes array");
err = -ENOMEM;
goto exit;
}
/* Get app code offsets and sizes */
for (app = 0; app < minion_hdr->num_apps; app++) {
minion_hdr->app_code_offsets[app] =
minion_extract_word(nvgpu_minion_fw, data_idx);
data_idx += 4;
minion_hdr->app_code_sizes[app] =
minion_extract_word(nvgpu_minion_fw, data_idx);
data_idx += 4;
nvgpu_log(g, gpu_dbg_nvlink,
" - App Code:");
nvgpu_log(g, gpu_dbg_nvlink,
" - App #%d: Code Offset = %u, Code Size = %u",
app,
minion_hdr->app_code_offsets[app],
minion_hdr->app_code_sizes[app]);
}
/* Get app data offsets and sizes */
for (app = 0; app < minion_hdr->num_apps; app++) {
minion_hdr->app_data_offsets[app] =
minion_extract_word(nvgpu_minion_fw, data_idx);
data_idx += 4;
minion_hdr->app_data_sizes[app] =
minion_extract_word(nvgpu_minion_fw, data_idx);
data_idx += 4;
nvgpu_log(g, gpu_dbg_nvlink,
" - App Data:");
nvgpu_log(g, gpu_dbg_nvlink,
" - App #%d: Data Offset = %u, Data Size = %u",
app,
minion_hdr->app_data_offsets[app],
minion_hdr->app_data_sizes[app]);
}
minion_hdr->ovl_offset = minion_extract_word(nvgpu_minion_fw, data_idx);
data_idx += 4;
minion_hdr->ovl_size = minion_extract_word(nvgpu_minion_fw, data_idx);
data_idx += 4;
ndev->minion_img = &(nvgpu_minion_fw->data[data_idx]);
minion_hdr->ucode_data_size = nvgpu_minion_fw->size - data_idx;
nvgpu_log(g, gpu_dbg_nvlink,
" - Overlay Offset = %u", minion_hdr->ovl_offset);
nvgpu_log(g, gpu_dbg_nvlink,
" - Overlay Size = %u", minion_hdr->ovl_size);
nvgpu_log(g, gpu_dbg_nvlink,
" - Ucode Data Size = %u", minion_hdr->ucode_data_size);
/* Clear interrupts */
nvgpu_falcon_set_irq(g->minion_flcn, true, MINION_FALCON_INTR_MASK,
MINION_FALCON_INTR_DEST);
/* Copy Non Secure IMEM code */
nvgpu_falcon_copy_to_imem(g->minion_flcn, 0,
(u8 *)&ndev->minion_img[minion_hdr->os_code_offset],
minion_hdr->os_code_size, 0, false,
GET_IMEM_TAG(minion_hdr->os_code_offset));
/* Copy Non Secure DMEM code */
nvgpu_falcon_copy_to_dmem(g->minion_flcn, 0,
(u8 *)&ndev->minion_img[minion_hdr->os_data_offset],
minion_hdr->os_data_size, 0);
/* Load the apps securely */
for (app = 0; app < minion_hdr->num_apps; app++) {
u32 app_code_start = minion_hdr->app_code_offsets[app];
u32 app_code_size = minion_hdr->app_code_sizes[app];
u32 app_data_start = minion_hdr->app_data_offsets[app];
u32 app_data_size = minion_hdr->app_data_sizes[app];
if (app_code_size)
nvgpu_falcon_copy_to_imem(g->minion_flcn,
app_code_start,
(u8 *)&ndev->minion_img[app_code_start],
app_code_size, 0, true,
GET_IMEM_TAG(app_code_start));
if (app_data_size)
nvgpu_falcon_copy_to_dmem(g->minion_flcn,
app_data_start,
(u8 *)&ndev->minion_img[app_data_start],
app_data_size, 0);
err = nvgpu_nvlink_minion_load_ucode(g, nvgpu_minion_fw);
if (err != 0) {
goto exit;
}
/* set BOOTVEC to start of non-secure code */
@@ -668,18 +502,10 @@ static u32 gv100_nvlink_minion_load(struct gk20a *g)
}
gv100_nvlink_initialize_minion(g);
return err;
exit:
nvgpu_kfree(g, minion_hdr->app_code_offsets);
nvgpu_kfree(g, minion_hdr->app_code_sizes);
nvgpu_kfree(g, minion_hdr->app_data_offsets);
nvgpu_kfree(g, minion_hdr->app_data_sizes);
if (nvgpu_minion_fw) {
nvgpu_release_firmware(g, nvgpu_minion_fw);
ndev->minion_img = NULL;
}
nvgpu_nvlink_free_minion_used_mem(g, nvgpu_minion_fw);
return err;
}

View File

@@ -1,31 +0,0 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVGPU_LINUX_NVLINK_H__
#define __NVGPU_LINUX_NVLINK_H__
#ifdef CONFIG_TEGRA_NVLINK
#include <linux/mutex.h>
#include <nvlink/common/tegra-nvlink.h>
#endif
#endif

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -25,14 +25,6 @@
#include <nvgpu/types.h>
#ifdef __KERNEL__
#include <nvgpu/linux/nvlink.h>
#elif defined(__NVGPU_POSIX__)
#include <nvgpu/posix/nvlink.h>
#else
#include <nvgpu_rmos/include/nvlink.h>
#endif
#define NV_NVLINK_REG_POLL_TIMEOUT_MS 3000
#define NV_NVLINK_TIMEOUT_DELAY_US 5
@@ -53,6 +45,7 @@
#define DLPL_REG_WR32(g, id, off, v) gk20a_writel(g, g->nvlink.links[(id)].dlpl_base + (off), (v))
struct gk20a;
struct nvgpu_firmware;
struct nvgpu_nvlink_ioctrl_list {
bool valid;
@@ -231,13 +224,39 @@ struct nvgpu_nvlink_dev {
void *priv;
};
void nvgpu_nvlink_free_minion_used_mem(struct gk20a *g,
struct nvgpu_firmware *nvgpu_minion_fw);
u32 nvgpu_nvlink_minion_extract_word(struct nvgpu_firmware *fw, u32 idx);
int nvgpu_nvlink_speed_config(struct gk20a *g);
int nvgpu_nvlink_early_init(struct gk20a *g);
int nvgpu_nvlink_link_early_init(struct gk20a *g);
int nvgpu_nvlink_interface_init(struct gk20a *g);
int nvgpu_nvlink_interface_disable(struct gk20a *g);
int nvgpu_nvlink_dev_shutdown(struct gk20a *g);
int nvgpu_nvlink_reg_init(struct gk20a *g);
u32 nvgpu_nvlink_get_link_mode(struct gk20a *g);
u32 nvgpu_nvlink_get_link_state(struct gk20a *g);
int nvgpu_nvlink_set_link_mode(struct gk20a *g, u32 mode);
void nvgpu_nvlink_get_tx_sublink_state(struct gk20a *g, u32 *state);
void nvgpu_nvlink_get_rx_sublink_state(struct gk20a *g, u32 *state);
u32 nvgpu_nvlink_get_sublink_mode(struct gk20a *g, bool is_rx_sublink);
int nvgpu_nvlink_set_sublink_mode(struct gk20a *g,
bool is_rx_sublink, u32 mode);
int nvgpu_nvlink_setup_ndev(struct gk20a *g);
int nvgpu_nvlink_init_ops(struct gk20a *g);
int nvgpu_nvlink_enumerate(struct gk20a *g);
int nvgpu_nvlink_train(struct gk20a *g, u32 link_id, bool from_off);
int nvgpu_nvlink_read_dt_props(struct gk20a *g);
int nvgpu_nvlink_probe(struct gk20a *g);
int nvgpu_nvlink_remove(struct gk20a *g);
int nvgpu_nvlink_register_device(struct gk20a *g);
int nvgpu_nvlink_unregister_device(struct gk20a *g);
int nvgpu_nvlink_register_link(struct gk20a *g);
int nvgpu_nvlink_unregister_link(struct gk20a *g);
u32 nvgpu_nvlink_minion_load_ucode(struct gk20a *g,
struct nvgpu_firmware *nvgpu_minion_fw);
void nvgpu_mss_nvlink_init_credits(struct gk20a *g);
#endif /* NVGPU_NVLINK_H */

View File

@@ -1,24 +0,0 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __NVGPU_POSIX_NVLINK_H__
#define __NVGPU_POSIX_NVLINK_H__
/*
* Empty...
*/
#endif

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -14,6 +14,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/mutex.h>
#ifdef CONFIG_TEGRA_NVLINK
#include <nvlink/common/tegra-nvlink.h>
#endif
@@ -21,6 +22,7 @@
#include <nvgpu/gk20a.h>
#include <nvgpu/nvlink.h>
#include <nvgpu/enabled.h>
#include <nvgpu/firmware.h>
#include "module.h"
#ifdef CONFIG_TEGRA_NVLINK
@@ -72,6 +74,632 @@ fail:
nvgpu_info(g, "nvlink endpoint not found or invaling in DT");
return -ENODEV;
}
static int nvgpu_nvlink_ops_speed_config(struct nvlink_device *ndev)
{
struct gk20a *g = (struct gk20a *) ndev->priv;
int err;
err = nvgpu_nvlink_speed_config(g);
if (err != 0) {
nvgpu_err(g, "Nvlink speed config failed.\n");
} else {
ndev->speed = g->nvlink.speed;
nvgpu_log(g, gpu_dbg_nvlink, "Nvlink default speed set to %d\n",
ndev->speed);
}
return err;
}
static int nvgpu_nvlink_ops_early_init(struct nvlink_device *ndev)
{
struct gk20a *g = (struct gk20a *) ndev->priv;
return nvgpu_nvlink_early_init(g);
}
static int nvgpu_nvlink_ops_link_early_init(struct nvlink_device *ndev)
{
struct gk20a *g = (struct gk20a *) ndev->priv;
return nvgpu_nvlink_link_early_init(g);
}
static int nvgpu_nvlink_ops_interface_init(struct nvlink_device *ndev)
{
struct gk20a *g = (struct gk20a *) ndev->priv;
return nvgpu_nvlink_interface_init(g);
}
static int nvgpu_nvlink_ops_interface_disable(struct nvlink_device *ndev)
{
struct gk20a *g = (struct gk20a *) ndev->priv;
return nvgpu_nvlink_interface_disable(g);
}
static int nvgpu_nvlink_ops_dev_shutdown(struct nvlink_device *ndev)
{
struct gk20a *g = (struct gk20a *) ndev->priv;
return nvgpu_nvlink_dev_shutdown(g);
}
static int nvgpu_nvlink_ops_reg_init(struct nvlink_device *ndev)
{
struct gk20a *g = (struct gk20a *) ndev->priv;
return nvgpu_nvlink_reg_init(g);
}
static u32 nvgpu_nvlink_ops_get_link_mode(struct nvlink_device *ndev)
{
struct gk20a *g = (struct gk20a *) ndev->priv;
u32 mode;
mode = nvgpu_nvlink_get_link_mode(g);
switch (mode) {
case nvgpu_nvlink_link_off:
return NVLINK_LINK_OFF;
case nvgpu_nvlink_link_hs:
return NVLINK_LINK_HS;
case nvgpu_nvlink_link_safe:
return NVLINK_LINK_SAFE;
case nvgpu_nvlink_link_fault:
return NVLINK_LINK_FAULT;
case nvgpu_nvlink_link_rcvy_ac:
return NVLINK_LINK_RCVY_AC;
case nvgpu_nvlink_link_rcvy_sw:
return NVLINK_LINK_RCVY_SW;
case nvgpu_nvlink_link_rcvy_rx:
return NVLINK_LINK_RCVY_RX;
case nvgpu_nvlink_link_detect:
return NVLINK_LINK_DETECT;
case nvgpu_nvlink_link_reset:
return NVLINK_LINK_RESET;
case nvgpu_nvlink_link_enable_pm:
return NVLINK_LINK_ENABLE_PM;
case nvgpu_nvlink_link_disable_pm:
return NVLINK_LINK_DISABLE_PM;
case nvgpu_nvlink_link_disable_err_detect:
return NVLINK_LINK_DISABLE_ERR_DETECT;
case nvgpu_nvlink_link_lane_disable:
return NVLINK_LINK_LANE_DISABLE;
case nvgpu_nvlink_link_lane_shutdown:
return NVLINK_LINK_LANE_SHUTDOWN;
default:
nvgpu_log(g, gpu_dbg_info | gpu_dbg_nvlink,
"unsupported mode %u", mode);
}
return NVLINK_LINK_OFF;
}
static u32 nvgpu_nvlink_ops_get_link_state(struct nvlink_device *ndev)
{
struct gk20a *g = (struct gk20a *) ndev->priv;
return nvgpu_nvlink_get_link_state(g);
}
static int nvgpu_nvlink_ops_set_link_mode(struct nvlink_device *ndev, u32 mode)
{
struct gk20a *g = (struct gk20a *) ndev->priv;
u32 mode_sw;
switch (mode) {
case NVLINK_LINK_OFF:
mode_sw = nvgpu_nvlink_link_off;
break;
case NVLINK_LINK_HS:
mode_sw = nvgpu_nvlink_link_hs;
break;
case NVLINK_LINK_SAFE:
mode_sw = nvgpu_nvlink_link_safe;
break;
case NVLINK_LINK_FAULT:
mode_sw = nvgpu_nvlink_link_fault;
break;
case NVLINK_LINK_RCVY_AC:
mode_sw = nvgpu_nvlink_link_rcvy_ac;
break;
case NVLINK_LINK_RCVY_SW:
mode_sw = nvgpu_nvlink_link_rcvy_sw;
break;
case NVLINK_LINK_RCVY_RX:
mode_sw = nvgpu_nvlink_link_rcvy_rx;
break;
case NVLINK_LINK_DETECT:
mode_sw = nvgpu_nvlink_link_detect;
break;
case NVLINK_LINK_RESET:
mode_sw = nvgpu_nvlink_link_reset;
break;
case NVLINK_LINK_ENABLE_PM:
mode_sw = nvgpu_nvlink_link_enable_pm;
break;
case NVLINK_LINK_DISABLE_PM:
mode_sw = nvgpu_nvlink_link_disable_pm;
break;
case NVLINK_LINK_DISABLE_ERR_DETECT:
mode_sw = nvgpu_nvlink_link_disable_err_detect;
break;
case NVLINK_LINK_LANE_DISABLE:
mode_sw = nvgpu_nvlink_link_lane_disable;
break;
case NVLINK_LINK_LANE_SHUTDOWN:
mode_sw = nvgpu_nvlink_link_lane_shutdown;
break;
default:
mode_sw = nvgpu_nvlink_link_off;
}
return nvgpu_nvlink_set_link_mode(g, mode_sw);
}
static void nvgpu_nvlink_ops_get_tx_sublink_state(struct nvlink_device *ndev,
u32 *tx_sublink_state)
{
struct gk20a *g = (struct gk20a *) ndev->priv;
return nvgpu_nvlink_get_tx_sublink_state(g, tx_sublink_state);
}
static void nvgpu_nvlink_ops_get_rx_sublink_state(struct nvlink_device *ndev,
u32 *rx_sublink_state)
{
struct gk20a *g = (struct gk20a *) ndev->priv;
return nvgpu_nvlink_get_rx_sublink_state(g, rx_sublink_state);
}
static u32 nvgpu_nvlink_ops_get_sublink_mode(struct nvlink_device *ndev,
bool is_rx_sublink)
{
struct gk20a *g = (struct gk20a *) ndev->priv;
u32 mode;
mode = nvgpu_nvlink_get_sublink_mode(g, is_rx_sublink);
switch (mode) {
case nvgpu_nvlink_sublink_tx_hs:
return NVLINK_TX_HS;
case nvgpu_nvlink_sublink_tx_off:
return NVLINK_TX_OFF;
case nvgpu_nvlink_sublink_tx_single_lane:
return NVLINK_TX_SINGLE_LANE;
case nvgpu_nvlink_sublink_tx_safe:
return NVLINK_TX_SAFE;
case nvgpu_nvlink_sublink_tx_enable_pm:
return NVLINK_TX_ENABLE_PM;
case nvgpu_nvlink_sublink_tx_disable_pm:
return NVLINK_TX_DISABLE_PM;
case nvgpu_nvlink_sublink_tx_common:
return NVLINK_TX_COMMON;
case nvgpu_nvlink_sublink_tx_common_disable:
return NVLINK_TX_COMMON_DISABLE;
case nvgpu_nvlink_sublink_tx_data_ready:
return NVLINK_TX_DATA_READY;
case nvgpu_nvlink_sublink_tx_prbs_en:
return NVLINK_TX_PRBS_EN;
case nvgpu_nvlink_sublink_rx_hs:
return NVLINK_RX_HS;
case nvgpu_nvlink_sublink_rx_enable_pm:
return NVLINK_RX_ENABLE_PM;
case nvgpu_nvlink_sublink_rx_disable_pm:
return NVLINK_RX_DISABLE_PM;
case nvgpu_nvlink_sublink_rx_single_lane:
return NVLINK_RX_SINGLE_LANE;
case nvgpu_nvlink_sublink_rx_safe:
return NVLINK_RX_SAFE;
case nvgpu_nvlink_sublink_rx_off:
return NVLINK_RX_OFF;
case nvgpu_nvlink_sublink_rx_rxcal:
return NVLINK_RX_RXCAL;
default:
nvgpu_log(g, gpu_dbg_nvlink, "Unsupported mode: %u", mode);
break;
}
if (is_rx_sublink)
return NVLINK_RX_OFF;
return NVLINK_TX_OFF;
}
static int nvgpu_nvlink_ops_set_sublink_mode(struct nvlink_device *ndev,
bool is_rx_sublink, u32 mode)
{
struct gk20a *g = (struct gk20a *) ndev->priv;
u32 mode_sw;
if (!is_rx_sublink) {
switch (mode) {
case NVLINK_TX_HS:
mode_sw = nvgpu_nvlink_sublink_tx_hs;
break;
case NVLINK_TX_ENABLE_PM:
mode_sw = nvgpu_nvlink_sublink_tx_enable_pm;
break;
case NVLINK_TX_DISABLE_PM:
mode_sw = nvgpu_nvlink_sublink_tx_disable_pm;
break;
case NVLINK_TX_SINGLE_LANE:
mode_sw = nvgpu_nvlink_sublink_tx_single_lane;
break;
case NVLINK_TX_SAFE:
mode_sw = nvgpu_nvlink_sublink_tx_safe;
break;
case NVLINK_TX_OFF:
mode_sw = nvgpu_nvlink_sublink_tx_off;
break;
case NVLINK_TX_COMMON:
mode_sw = nvgpu_nvlink_sublink_tx_common;
break;
case NVLINK_TX_COMMON_DISABLE:
mode_sw = nvgpu_nvlink_sublink_tx_common_disable;
break;
case NVLINK_TX_DATA_READY:
mode_sw = nvgpu_nvlink_sublink_tx_data_ready;
break;
case NVLINK_TX_PRBS_EN:
mode_sw = nvgpu_nvlink_sublink_tx_prbs_en;
break;
default:
return -EINVAL;
}
} else {
switch (mode) {
case NVLINK_RX_HS:
mode_sw = nvgpu_nvlink_sublink_rx_hs;
break;
case NVLINK_RX_ENABLE_PM:
mode_sw = nvgpu_nvlink_sublink_rx_enable_pm;
break;
case NVLINK_RX_DISABLE_PM:
mode_sw = nvgpu_nvlink_sublink_rx_disable_pm;
break;
case NVLINK_RX_SINGLE_LANE:
mode_sw = nvgpu_nvlink_sublink_rx_single_lane;
break;
case NVLINK_RX_SAFE:
mode_sw = nvgpu_nvlink_sublink_rx_safe;
break;
case NVLINK_RX_OFF:
mode_sw = nvgpu_nvlink_sublink_rx_off;
break;
case NVLINK_RX_RXCAL:
mode_sw = nvgpu_nvlink_sublink_rx_rxcal;
break;
default:
return -EINVAL;
}
}
return nvgpu_nvlink_set_sublink_mode(g, is_rx_sublink, mode_sw);
}
int nvgpu_nvlink_setup_ndev(struct gk20a *g)
{
struct nvlink_device *ndev;
/* Allocating structures */
ndev = nvgpu_kzalloc(g, sizeof(struct nvlink_device));
if (!ndev) {
nvgpu_err(g, "OOM while allocating nvlink device struct");
return -ENOMEM;
}
ndev->priv = (void *) g;
g->nvlink.priv = (void *) ndev;
return 0;
}
int nvgpu_nvlink_init_ops(struct gk20a *g)
{
struct nvlink_device *ndev = (struct nvlink_device *) g->nvlink.priv;
if (!ndev)
return -EINVAL;
/* Fill in device struct */
ndev->dev_ops.dev_early_init = nvgpu_nvlink_ops_early_init;
ndev->dev_ops.dev_interface_init = nvgpu_nvlink_ops_interface_init;
ndev->dev_ops.dev_reg_init = nvgpu_nvlink_ops_reg_init;
ndev->dev_ops.dev_interface_disable =
nvgpu_nvlink_ops_interface_disable;
ndev->dev_ops.dev_shutdown = nvgpu_nvlink_ops_dev_shutdown;
ndev->dev_ops.dev_speed_config = nvgpu_nvlink_ops_speed_config;
/* Fill in the link struct */
ndev->link.device_id = ndev->device_id;
ndev->link.mode = NVLINK_LINK_OFF;
ndev->link.is_sl_supported = false;
ndev->link.link_ops.get_link_mode = nvgpu_nvlink_ops_get_link_mode;
ndev->link.link_ops.set_link_mode = nvgpu_nvlink_ops_set_link_mode;
ndev->link.link_ops.get_sublink_mode =
nvgpu_nvlink_ops_get_sublink_mode;
ndev->link.link_ops.set_sublink_mode =
nvgpu_nvlink_ops_set_sublink_mode;
ndev->link.link_ops.get_link_state = nvgpu_nvlink_ops_get_link_state;
ndev->link.link_ops.get_tx_sublink_state =
nvgpu_nvlink_ops_get_tx_sublink_state;
ndev->link.link_ops.get_rx_sublink_state =
nvgpu_nvlink_ops_get_rx_sublink_state;
ndev->link.link_ops.link_early_init =
nvgpu_nvlink_ops_link_early_init;
return 0;
}
int nvgpu_nvlink_register_device(struct gk20a *g)
{
struct nvlink_device *ndev = (struct nvlink_device *) g->nvlink.priv;
if (!ndev)
return -ENODEV;
return nvlink_register_device(ndev);
}
int nvgpu_nvlink_unregister_device(struct gk20a *g)
{
struct nvlink_device *ndev = (struct nvlink_device *) g->nvlink.priv;
if (!ndev)
return -ENODEV;
return nvlink_unregister_device(ndev);
}
int nvgpu_nvlink_register_link(struct gk20a *g)
{
struct nvlink_device *ndev = (struct nvlink_device *) g->nvlink.priv;
if (!ndev)
return -ENODEV;
return nvlink_register_link(&ndev->link);
}
int nvgpu_nvlink_unregister_link(struct gk20a *g)
{
struct nvlink_device *ndev = (struct nvlink_device *) g->nvlink.priv;
if (!ndev)
return -ENODEV;
return nvlink_unregister_link(&ndev->link);
}
int nvgpu_nvlink_enumerate(struct gk20a *g)
{
struct nvlink_device *ndev = (struct nvlink_device *) g->nvlink.priv;
if (!ndev)
return -ENODEV;
return nvlink_enumerate(ndev);
}
int nvgpu_nvlink_train(struct gk20a *g, u32 link_id, bool from_off)
{
struct nvlink_device *ndev = (struct nvlink_device *) g->nvlink.priv;
if (!ndev)
return -ENODEV;
/* Check if the link is connected */
if (!g->nvlink.links[link_id].remote_info.is_connected)
return -ENODEV;
if (from_off)
return nvlink_transition_intranode_conn_off_to_safe(ndev);
return nvlink_train_intranode_conn_safe_to_hs(ndev);
}
void nvgpu_nvlink_free_minion_used_mem(struct gk20a *g,
struct nvgpu_firmware *nvgpu_minion_fw)
{
struct nvlink_device *ndev = (struct nvlink_device *) g->nvlink.priv;
struct minion_hdr *minion_hdr = &ndev->minion_hdr;
nvgpu_kfree(g, minion_hdr->app_code_offsets);
nvgpu_kfree(g, minion_hdr->app_code_sizes);
nvgpu_kfree(g, minion_hdr->app_data_offsets);
nvgpu_kfree(g, minion_hdr->app_data_sizes);
if (nvgpu_minion_fw) {
nvgpu_release_firmware(g, nvgpu_minion_fw);
ndev->minion_img = NULL;
}
}
/*
* Load minion FW
*/
u32 nvgpu_nvlink_minion_load_ucode(struct gk20a *g,
struct nvgpu_firmware *nvgpu_minion_fw)
{
u32 err = 0;
struct nvlink_device *ndev = (struct nvlink_device *) g->nvlink.priv;
struct minion_hdr *minion_hdr = &ndev->minion_hdr;
u32 data_idx = 0;
u32 app = 0;
nvgpu_log_fn(g, " ");
/* Read ucode header */
minion_hdr->os_code_offset = nvgpu_nvlink_minion_extract_word(
nvgpu_minion_fw,
data_idx);
data_idx += 4;
minion_hdr->os_code_size = nvgpu_nvlink_minion_extract_word(
nvgpu_minion_fw,
data_idx);
data_idx += 4;
minion_hdr->os_data_offset = nvgpu_nvlink_minion_extract_word(
nvgpu_minion_fw,
data_idx);
data_idx += 4;
minion_hdr->os_data_size = nvgpu_nvlink_minion_extract_word(
nvgpu_minion_fw,
data_idx);
data_idx += 4;
minion_hdr->num_apps = nvgpu_nvlink_minion_extract_word(
nvgpu_minion_fw,
data_idx);
data_idx += 4;
nvgpu_log(g, gpu_dbg_nvlink,
"MINION Ucode Header Info:");
nvgpu_log(g, gpu_dbg_nvlink,
"-------------------------");
nvgpu_log(g, gpu_dbg_nvlink,
" - OS Code Offset = %u", minion_hdr->os_code_offset);
nvgpu_log(g, gpu_dbg_nvlink,
" - OS Code Size = %u", minion_hdr->os_code_size);
nvgpu_log(g, gpu_dbg_nvlink,
" - OS Data Offset = %u", minion_hdr->os_data_offset);
nvgpu_log(g, gpu_dbg_nvlink,
" - OS Data Size = %u", minion_hdr->os_data_size);
nvgpu_log(g, gpu_dbg_nvlink,
" - Num Apps = %u", minion_hdr->num_apps);
/* Allocate offset/size arrays for all the ucode apps */
minion_hdr->app_code_offsets = nvgpu_kcalloc(g,
minion_hdr->num_apps,
sizeof(u32));
if (!minion_hdr->app_code_offsets) {
nvgpu_err(g, "Couldn't allocate MINION app_code_offsets array");
return -ENOMEM;
}
minion_hdr->app_code_sizes = nvgpu_kcalloc(g,
minion_hdr->num_apps,
sizeof(u32));
if (!minion_hdr->app_code_sizes) {
nvgpu_err(g, "Couldn't allocate MINION app_code_sizes array");
return -ENOMEM;
}
minion_hdr->app_data_offsets = nvgpu_kcalloc(g,
minion_hdr->num_apps,
sizeof(u32));
if (!minion_hdr->app_data_offsets) {
nvgpu_err(g, "Couldn't allocate MINION app_data_offsets array");
return -ENOMEM;
}
minion_hdr->app_data_sizes = nvgpu_kcalloc(g,
minion_hdr->num_apps,
sizeof(u32));
if (!minion_hdr->app_data_sizes) {
nvgpu_err(g, "Couldn't allocate MINION app_data_sizes array");
return -ENOMEM;
}
/* Get app code offsets and sizes */
for (app = 0; app < minion_hdr->num_apps; app++) {
minion_hdr->app_code_offsets[app] =
nvgpu_nvlink_minion_extract_word(
nvgpu_minion_fw,
data_idx);
data_idx += 4;
minion_hdr->app_code_sizes[app] =
nvgpu_nvlink_minion_extract_word(
nvgpu_minion_fw,
data_idx);
data_idx += 4;
nvgpu_log(g, gpu_dbg_nvlink,
" - App Code:");
nvgpu_log(g, gpu_dbg_nvlink,
" - App #%d: Code Offset = %u, Code Size = %u",
app,
minion_hdr->app_code_offsets[app],
minion_hdr->app_code_sizes[app]);
}
/* Get app data offsets and sizes */
for (app = 0; app < minion_hdr->num_apps; app++) {
minion_hdr->app_data_offsets[app] =
nvgpu_nvlink_minion_extract_word(
nvgpu_minion_fw,
data_idx);
data_idx += 4;
minion_hdr->app_data_sizes[app] =
nvgpu_nvlink_minion_extract_word(
nvgpu_minion_fw,
data_idx);
data_idx += 4;
nvgpu_log(g, gpu_dbg_nvlink,
" - App Data:");
nvgpu_log(g, gpu_dbg_nvlink,
" - App #%d: Data Offset = %u, Data Size = %u",
app,
minion_hdr->app_data_offsets[app],
minion_hdr->app_data_sizes[app]);
}
minion_hdr->ovl_offset = nvgpu_nvlink_minion_extract_word(
nvgpu_minion_fw,
data_idx);
data_idx += 4;
minion_hdr->ovl_size = nvgpu_nvlink_minion_extract_word(
nvgpu_minion_fw,
data_idx);
data_idx += 4;
ndev->minion_img = &(nvgpu_minion_fw->data[data_idx]);
minion_hdr->ucode_data_size = nvgpu_minion_fw->size - data_idx;
nvgpu_log(g, gpu_dbg_nvlink,
" - Overlay Offset = %u", minion_hdr->ovl_offset);
nvgpu_log(g, gpu_dbg_nvlink,
" - Overlay Size = %u", minion_hdr->ovl_size);
nvgpu_log(g, gpu_dbg_nvlink,
" - Ucode Data Size = %u", minion_hdr->ucode_data_size);
/* Copy Non Secure IMEM code */
nvgpu_falcon_copy_to_imem(g->minion_flcn, 0,
(u8 *)&ndev->minion_img[minion_hdr->os_code_offset],
minion_hdr->os_code_size, 0, false,
GET_IMEM_TAG(minion_hdr->os_code_offset));
/* Copy Non Secure DMEM code */
nvgpu_falcon_copy_to_dmem(g->minion_flcn, 0,
(u8 *)&ndev->minion_img[minion_hdr->os_data_offset],
minion_hdr->os_data_size, 0);
/* Load the apps securely */
for (app = 0; app < minion_hdr->num_apps; app++) {
u32 app_code_start = minion_hdr->app_code_offsets[app];
u32 app_code_size = minion_hdr->app_code_sizes[app];
u32 app_data_start = minion_hdr->app_data_offsets[app];
u32 app_data_size = minion_hdr->app_data_sizes[app];
if (app_code_size)
nvgpu_falcon_copy_to_imem(g->minion_flcn,
app_code_start,
(u8 *)&ndev->minion_img[app_code_start],
app_code_size, 0, true,
GET_IMEM_TAG(app_code_start));
if (app_data_size)
nvgpu_falcon_copy_to_dmem(g->minion_flcn,
app_data_start,
(u8 *)&ndev->minion_img[app_data_start],
app_data_size, 0);
}
return err;
}
#endif /* CONFIG_TEGRA_NVLINK */
void nvgpu_mss_nvlink_init_credits(struct gk20a *g)