Open source GPL/LGPL release

This commit is contained in:
svcmobrel-release
2022-07-21 16:03:29 -07:00
commit f338182221
2260 changed files with 576813 additions and 0 deletions

View File

@@ -0,0 +1,882 @@
/*
* Copyright (c) 2015-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvgpu/bios.h>
#include <nvgpu/io.h>
#include <nvgpu/gk20a.h>
#include <nvgpu/string.h>
#include <nvgpu/soc.h>
#include <nvgpu/static_analysis.h>
#include <nvgpu/timers.h>
#include "bios_sw_gv100.h"
#include "bios_sw_tu104.h"
#if defined(CONFIG_NVGPU_NEXT) && defined(CONFIG_NVGPU_NON_FUSA)
#include "nvgpu_next_gpuid.h"
#endif
static void nvgpu_bios_parse_bit(struct gk20a *g, u32 offset);
int nvgpu_bios_devinit(struct gk20a *g,
struct nvgpu_bios *bios)
{
if (bios == NULL) {
return 0;
}
if (bios->devinit_bios != NULL) {
return bios->devinit_bios(g);
} else {
return 0;
}
}
int nvgpu_bios_preos_wait_for_halt(struct gk20a *g,
struct nvgpu_bios *bios)
{
if (bios == NULL) {
return 0;
}
if (bios->preos_wait_for_halt != NULL) {
return bios->preos_wait_for_halt(g);
} else {
return 0;
}
}
bool nvgpu_bios_check_dgpu(struct gk20a *g, u32 ver)
{
bool is_supported;
switch (ver) {
case NVGPU_GPUID_GV100:
case NVGPU_GPUID_TU104:
#if defined(CONFIG_NVGPU_NEXT) && defined(CONFIG_NVGPU_NON_FUSA)
case NVGPU_NEXT_DGPU_GPUID:
#endif
is_supported = true;
break;
default:
is_supported = false;
break;
}
return is_supported;
}
u32 nvgpu_bios_get_vbios_version(struct gk20a *g)
{
u32 ver = nvgpu_safe_add_u32(g->params.gpu_arch, g->params.gpu_impl);
u32 vbios_version;
switch (ver) {
case NVGPU_GPUID_GV100:
case NVGPU_GPUID_TU104:
if (nvgpu_platform_is_silicon(g)) {
vbios_version = g->bios->vbios_version;
} else {
vbios_version = 0;
}
break;
default:
vbios_version = 0;
break;
}
return vbios_version;
}
u8 nvgpu_bios_get_vbios_oem_version(struct gk20a *g)
{
u32 ver = nvgpu_safe_add_u32(g->params.gpu_arch, g->params.gpu_impl);
u8 vbios_oem_version;
switch (ver) {
case NVGPU_GPUID_GV100:
case NVGPU_GPUID_TU104:
if (nvgpu_platform_is_silicon(g)) {
vbios_oem_version = g->bios->vbios_oem_version;
} else {
vbios_oem_version = 0;
}
break;
default:
vbios_oem_version = 0;
break;
}
return vbios_oem_version;
}
struct bit_token *nvgpu_bios_get_bit_token(struct gk20a *g, u8 token_id)
{
struct bit_token *token = NULL;
switch (token_id) {
case NVGPU_BIOS_CLOCK_TOKEN:
token = g->bios->clock_token;
break;
case NVGPU_BIOS_PERF_TOKEN:
token = g->bios->perf_token;
break;
case NVGPU_BIOS_VIRT_TOKEN:
token = g->bios->virt_token;
break;
default:
token = NULL;
break;
}
return token;
}
int nvgpu_bios_sw_init(struct gk20a *g)
{
u32 ver = nvgpu_safe_add_u32(g->params.gpu_arch, g->params.gpu_impl);
int err = 0;
#ifdef CONFIG_NVGPU_SIM
if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) {
goto done;
}
#endif
if (nvgpu_bios_check_dgpu(g, ver) == false) {
goto done;
}
if (g->bios != NULL) {
/* skip alloc/reinit for unrailgate sequence */
nvgpu_pmu_dbg(g, "skip bios init for unrailgate sequence");
goto done;
}
g->bios = (struct nvgpu_bios *)
nvgpu_kzalloc(g, sizeof(struct nvgpu_bios));
if (g->bios == NULL) {
err = -ENOMEM;
goto done;
}
switch (ver) {
#ifdef CONFIG_NVGPU_DGPU
case NVGPU_GPUID_GV100:
nvgpu_gv100_bios_sw_init(g, g->bios);
break;
case NVGPU_GPUID_TU104:
nvgpu_tu104_bios_sw_init(g, g->bios);
break;
#if defined(CONFIG_NVGPU_NEXT) && defined(CONFIG_NVGPU_NON_FUSA)
case NVGPU_NEXT_DGPU_GPUID:
/*
* TODO
* After IFR region removal from bios image this can
* be replaced with nvgpu_tu104_bios_sw_init.
*/
err = tu104_bios_verify_devinit(g);
break;
#endif
#endif
default:
goto clean_bios;
}
if ((g->bios)->init != NULL) {
err = g->bios->init(g);
if (err != 0) {
nvgpu_falcon_sw_free(g, FALCON_ID_FECS);
goto clean_bios;
}
}
done:
return err;
clean_bios:
nvgpu_kfree(g, g->bios);
g->bios = NULL;
return err;
}
void nvgpu_bios_sw_deinit(struct gk20a *g, struct nvgpu_bios *bios)
{
if (bios == NULL) {
return;
} else {
nvgpu_kfree(g, bios);
}
}
static u16 nvgpu_bios_rdu16(struct gk20a *g, u32 offset)
{
u32 val;
nvgpu_assert(offset < U32_MAX);
/*
* cast up to u32 because cast to u16 promotes into an int and
* causes a CERT-C INT31 violation
*/
val = nvgpu_safe_add_u32(U32(g->bios->data[offset+1U]) << U32(8),
U32(g->bios->data[offset]));
return nvgpu_safe_cast_u32_to_u16(val);
}
static u32 nvgpu_bios_rdu32(struct gk20a *g, u32 offset)
{
u32 val;
nvgpu_assert(offset <= (U32_MAX - 3U));
val = nvgpu_safe_add_u32(U32(g->bios->data[offset+3U]) << U32(24),
U32(g->bios->data[offset+2U]) << U32(16));
val = nvgpu_safe_add_u32(val, U32(g->bios->data[offset+1U]) << U32(8));
val = nvgpu_safe_add_u32(val, U32(g->bios->data[offset]));
return val;
}
int nvgpu_bios_parse_rom(struct gk20a *g)
{
u32 offset = 0;
u8 last = 0;
bool found = false;
unsigned int i;
while (last == 0U) {
struct pci_exp_rom pci_rom;
struct pci_data_struct pci_data;
struct pci_ext_data_struct pci_ext_data;
nvgpu_memcpy((u8 *)&pci_rom, (u8 *)(g->bios->data + offset),
sizeof(struct pci_exp_rom));
nvgpu_log_fn(g, "pci rom sig %04x ptr %04x block %x",
pci_rom.sig, pci_rom.pci_data_struct_ptr,
pci_rom.size_of_block);
if (pci_rom.sig != PCI_EXP_ROM_SIG &&
pci_rom.sig != PCI_EXP_ROM_SIG_NV) {
nvgpu_err(g, "invalid VBIOS signature");
return -EINVAL;
}
nvgpu_memcpy((u8 *)&pci_data, (u8 *)(g->bios->data + offset +
pci_rom.pci_data_struct_ptr),
sizeof(struct pci_data_struct));
nvgpu_log_fn(g, "pci data sig %08x len %d image len %x type %x last %d max %08x",
pci_data.sig, pci_data.pci_data_struct_len,
pci_data.image_len, pci_data.code_type,
pci_data.last_image,
pci_data.max_runtime_image_len);
/* Get Base ROM Size */
if (pci_data.code_type ==
PCI_DATA_STRUCTURE_CODE_TYPE_VBIOS_BASE) {
g->bios->base_rom_size = nvgpu_safe_mult_u32(
(u32)pci_data.image_len,
PCI_ROM_IMAGE_BLOCK_SIZE);
nvgpu_log_fn(g, "Base ROM Size: %x",
g->bios->base_rom_size);
}
/* Get Expansion ROM offset:
* In the UEFI case, the expansion ROM where the Perf tables
* are located is not necessarily immediately after the base
* VBIOS image. Some VBIOS images uses a "private image" layout,
* where the order of the images is the VBIOS base block,
* the UEFI ROM, the expansion ROM, and then the cert. So we
* need to add the UEFI ROM size to offsets within the
* expansion ROM.
*/
if (pci_data.code_type ==
PCI_DATA_STRUCTURE_CODE_TYPE_VBIOS_UEFI) {
u32 ext_offset;
ext_offset = nvgpu_safe_add_u32(offset,
pci_rom.pci_data_struct_ptr);
ext_offset = nvgpu_safe_add_u32(ext_offset,
pci_data.pci_data_struct_len);
ext_offset = nvgpu_safe_add_u32(ext_offset,
0xfU) & ~0xfU;
nvgpu_memcpy((u8 *)&pci_ext_data, (u8 *)(g->bios->data +
ext_offset),
sizeof(struct pci_ext_data_struct));
nvgpu_log_fn(g, "pci ext data sig %08x rev %x len %x sub_image_len %x priv_last %d flags %x",
pci_ext_data.sig,
pci_ext_data.nv_pci_data_ext_rev,
pci_ext_data.nv_pci_data_ext_len,
pci_ext_data.sub_image_len,
pci_ext_data.priv_last_image,
pci_ext_data.flags);
nvgpu_log_fn(g, "expansion rom offset %x",
nvgpu_safe_mult_u32((u32)pci_data.image_len,
PCI_ROM_IMAGE_BLOCK_SIZE));
g->bios->expansion_rom_offset = nvgpu_safe_mult_u32(
(u32)pci_data.image_len,
PCI_ROM_IMAGE_BLOCK_SIZE);
offset = nvgpu_safe_add_u32(offset,
nvgpu_safe_mult_u32(
(u32)pci_ext_data.sub_image_len,
PCI_ROM_IMAGE_BLOCK_SIZE));
last = pci_ext_data.priv_last_image;
} else {
offset = nvgpu_safe_add_u32(offset,
nvgpu_safe_mult_u32(
(u32)pci_data.image_len,
PCI_ROM_IMAGE_BLOCK_SIZE));
last = pci_data.last_image;
}
}
nvgpu_log_info(g, "read bios");
for (i = 0; i < nvgpu_safe_sub_u64(g->bios->size, 6ULL); i++) {
if (nvgpu_bios_rdu16(g, i) == BIT_HEADER_ID &&
nvgpu_bios_rdu32(g, i+2U) == BIT_HEADER_SIGNATURE) {
nvgpu_bios_parse_bit(g, i);
found = true;
}
}
if (!found) {
return -EINVAL;
} else {
return 0;
}
}
static void nvgpu_bios_parse_biosdata(struct gk20a *g, u32 offset)
{
struct biosdata bios_data;
nvgpu_memcpy((u8 *)&bios_data, &g->bios->data[offset],
sizeof(bios_data));
nvgpu_log_fn(g, "bios version %x, oem version %x",
bios_data.version,
bios_data.oem_version);
g->bios->vbios_version = bios_data.version;
g->bios->vbios_oem_version = bios_data.oem_version;
}
static void nvgpu_bios_parse_nvinit_ptrs(struct gk20a *g, u32 offset)
{
struct nvinit_ptrs init_ptrs;
nvgpu_memcpy((u8 *)&init_ptrs, &g->bios->data[offset],
sizeof(init_ptrs));
nvgpu_log_fn(g, "devinit ptr %x size %d", init_ptrs.devinit_tables_ptr,
init_ptrs.devinit_tables_size);
nvgpu_log_fn(g, "bootscripts ptr %x size %d", init_ptrs.bootscripts_ptr,
init_ptrs.bootscripts_size);
g->bios->devinit_tables = &g->bios->data[init_ptrs.devinit_tables_ptr];
g->bios->devinit_tables_size = init_ptrs.devinit_tables_size;
g->bios->bootscripts = &g->bios->data[init_ptrs.bootscripts_ptr];
g->bios->bootscripts_size = init_ptrs.bootscripts_size;
g->bios->condition_table_ptr = init_ptrs.condition_table_ptr;
g->bios->nvlink_config_data_offset = init_ptrs.nvlink_config_data_ptr;
}
static void nvgpu_bios_parse_memory_ptrs(struct gk20a *g, u16 offset, u8 version)
{
struct memory_ptrs_v1 v1;
struct memory_ptrs_v2 v2;
switch (version) {
case MEMORY_PTRS_V1:
nvgpu_memcpy((u8 *)&v1, &g->bios->data[offset], sizeof(v1));
g->bios->mem_strap_data_count = v1.mem_strap_data_count;
g->bios->mem_strap_xlat_tbl_ptr = v1.mem_strap_xlat_tbl_ptr;
break;
case MEMORY_PTRS_V2:
nvgpu_memcpy((u8 *)&v2, &g->bios->data[offset], sizeof(v2));
g->bios->mem_strap_data_count = v2.mem_strap_data_count;
g->bios->mem_strap_xlat_tbl_ptr = v2.mem_strap_xlat_tbl_ptr;
break;
default:
nvgpu_err(g, "unknown vbios memory table version %x", version);
break;
}
return;
}
static void nvgpu_bios_parse_bios_board_id_ptrs(struct gk20a *g, u16 offset)
{
struct bios_board_id board;
nvgpu_memcpy((u8 *)&board, &g->bios->data[offset], sizeof(board));
g->bios->vbios_board_id = board.board_id;
}
static void nvgpu_bios_parse_devinit_appinfo(struct gk20a *g, u32 dmem_offset)
{
struct devinit_engine_interface interface;
nvgpu_memcpy((u8 *)&interface, &g->bios->devinit.dmem[dmem_offset],
sizeof(interface));
nvgpu_log_fn(g, "devinit version %x tables phys %x script phys %x size %d",
interface.version,
interface.tables_phys_base,
interface.script_phys_base,
interface.script_size);
if (interface.version != 1U) {
return;
}
g->bios->devinit_tables_phys_base = interface.tables_phys_base;
g->bios->devinit_script_phys_base = interface.script_phys_base;
}
static int nvgpu_bios_parse_appinfo_table(struct gk20a *g, u32 offset)
{
struct application_interface_table_hdr_v1 hdr;
u32 i;
nvgpu_memcpy((u8 *)&hdr, &g->bios->data[offset], sizeof(hdr));
nvgpu_log_fn(g, "appInfoHdr ver %d size %d entrySize %d entryCount %d",
hdr.version, hdr.header_size,
hdr.entry_size, hdr.entry_count);
if (hdr.version != 1U) {
return 0;
}
offset = nvgpu_safe_add_u32(offset,
nvgpu_safe_cast_u64_to_u32(sizeof(hdr)));
for (i = 0U; i < hdr.entry_count; i++) {
struct application_interface_entry_v1 entry;
nvgpu_memcpy((u8 *)&entry, &g->bios->data[offset],
sizeof(entry));
nvgpu_log_fn(g, "appInfo id %d dmem_offset %d",
entry.id, entry.dmem_offset);
if (entry.id == APPINFO_ID_DEVINIT) {
nvgpu_bios_parse_devinit_appinfo(g, entry.dmem_offset);
}
offset = nvgpu_safe_add_u32(offset, hdr.entry_size);
}
return 0;
}
static int nvgpu_bios_parse_falcon_ucode_desc(struct gk20a *g,
struct nvgpu_bios_ucode *ucode, u32 offset)
{
union falcon_ucode_desc udesc;
struct falcon_ucode_desc_v2 desc;
u8 version;
u16 desc_size;
int ret = 0;
nvgpu_memcpy((u8 *)&udesc, &g->bios->data[offset], sizeof(udesc));
if (FALCON_UCODE_IS_VERSION_AVAILABLE(udesc)) {
version = FALCON_UCODE_GET_VERSION(udesc);
desc_size = FALCON_UCODE_GET_DESC_SIZE(udesc);
} else {
size_t tmp_size = sizeof(udesc.v1);
version = 1;
nvgpu_assert(tmp_size <= (size_t)U16_MAX);
desc_size = U16(tmp_size);
}
switch (version) {
case 1:
desc.stored_size = udesc.v1.hdr_size.stored_size;
desc.uncompressed_size = udesc.v1.uncompressed_size;
desc.virtual_entry = udesc.v1.virtual_entry;
desc.interface_offset = udesc.v1.interface_offset;
desc.imem_phys_base = udesc.v1.imem_phys_base;
desc.imem_load_size = udesc.v1.imem_load_size;
desc.imem_virt_base = udesc.v1.imem_virt_base;
desc.imem_sec_base = udesc.v1.imem_sec_base;
desc.imem_sec_size = udesc.v1.imem_sec_size;
desc.dmem_offset = udesc.v1.dmem_offset;
desc.dmem_phys_base = udesc.v1.dmem_phys_base;
desc.dmem_load_size = udesc.v1.dmem_load_size;
break;
case 2:
nvgpu_memcpy((u8 *)&desc, (u8 *)&udesc, sizeof(udesc.v2));
break;
default:
nvgpu_log_info(g, "invalid version");
ret = -EINVAL;
break;
}
if (ret != 0) {
return ret;
}
nvgpu_log_info(g, "falcon ucode desc version %x len %x", version, desc_size);
nvgpu_log_info(g, "falcon ucode desc stored size %x uncompressed size %x",
desc.stored_size, desc.uncompressed_size);
nvgpu_log_info(g, "falcon ucode desc virtualEntry %x, interfaceOffset %x",
desc.virtual_entry, desc.interface_offset);
nvgpu_log_info(g, "falcon ucode IMEM phys base %x, load size %x virt base %x sec base %x sec size %x",
desc.imem_phys_base, desc.imem_load_size,
desc.imem_virt_base, desc.imem_sec_base,
desc.imem_sec_size);
nvgpu_log_info(g, "falcon ucode DMEM offset %x phys base %x, load size %x",
desc.dmem_offset, desc.dmem_phys_base,
desc.dmem_load_size);
if (desc.stored_size != desc.uncompressed_size) {
nvgpu_log_info(g, "does not match");
return -EINVAL;
}
ucode->code_entry_point = desc.virtual_entry;
ucode->bootloader = &g->bios->data[offset] + desc_size;
ucode->bootloader_phys_base = desc.imem_phys_base;
ucode->bootloader_size = nvgpu_safe_sub_u32(desc.imem_load_size,
desc.imem_sec_size);
ucode->ucode = ucode->bootloader + ucode->bootloader_size;
ucode->phys_base = nvgpu_safe_add_u32(ucode->bootloader_phys_base,
ucode->bootloader_size);
ucode->size = desc.imem_sec_size;
ucode->dmem = ucode->bootloader + desc.dmem_offset;
ucode->dmem_phys_base = desc.dmem_phys_base;
ucode->dmem_size = desc.dmem_load_size;
offset = nvgpu_safe_add_u32(offset, U32(desc_size));
offset = nvgpu_safe_add_u32(offset, desc.dmem_offset);
offset = nvgpu_safe_add_u32(offset, desc.interface_offset);
ret = nvgpu_bios_parse_appinfo_table(g, offset);
return ret;
}
static int nvgpu_bios_parse_falcon_ucode_table(struct gk20a *g, u32 offset)
{
struct falcon_ucode_table_hdr_v1 hdr;
u32 i;
nvgpu_memcpy((u8 *)&hdr, &g->bios->data[offset], sizeof(hdr));
nvgpu_log_fn(g, "falcon ucode table ver %d size %d entrySize %d entryCount %d descVer %d descSize %d",
hdr.version, hdr.header_size,
hdr.entry_size, hdr.entry_count,
hdr.desc_version, hdr.desc_size);
if (hdr.version != 1U) {
return -EINVAL;
}
offset = nvgpu_safe_add_u32(offset, hdr.header_size);
for (i = 0U; i < hdr.entry_count; i++) {
struct falcon_ucode_table_entry_v1 entry;
nvgpu_memcpy((u8 *)&entry, &g->bios->data[offset],
sizeof(entry));
nvgpu_log_fn(g, "falcon ucode table entry appid %x targetId %x descPtr %x",
entry.application_id, entry.target_id,
entry.desc_ptr);
if (entry.target_id == TARGET_ID_PMU &&
entry.application_id == APPLICATION_ID_DEVINIT) {
int err;
err = nvgpu_bios_parse_falcon_ucode_desc(g,
&g->bios->devinit, entry.desc_ptr);
if (err != 0) {
err = nvgpu_bios_parse_falcon_ucode_desc(g,
&g->bios->devinit,
nvgpu_safe_add_u32(entry.desc_ptr,
g->bios->expansion_rom_offset));
}
if (err != 0) {
nvgpu_err(g,
"could not parse devinit ucode desc");
}
} else if (entry.target_id == TARGET_ID_PMU &&
entry.application_id == APPLICATION_ID_PRE_OS) {
int err;
err = nvgpu_bios_parse_falcon_ucode_desc(g,
&g->bios->preos, entry.desc_ptr);
if (err != 0) {
err = nvgpu_bios_parse_falcon_ucode_desc(g,
&g->bios->preos,
nvgpu_safe_add_u32(entry.desc_ptr,
g->bios->expansion_rom_offset));
}
if (err != 0) {
nvgpu_err(g,
"could not parse preos ucode desc");
}
} else {
nvgpu_log_info(g, "App_id: %u and target_id: %u"
" combination not supported.",
entry.application_id,
entry.target_id);
}
offset = nvgpu_safe_add_u32(offset, hdr.entry_size);
}
return 0;
}
static void nvgpu_bios_parse_falcon_data_v2(struct gk20a *g, u32 offset)
{
struct falcon_data_v2 falcon_data;
int err;
nvgpu_memcpy((u8 *)&falcon_data, &g->bios->data[offset],
sizeof(falcon_data));
nvgpu_log_fn(g, "falcon ucode table ptr %x",
falcon_data.falcon_ucode_table_ptr);
err = nvgpu_bios_parse_falcon_ucode_table(g,
falcon_data.falcon_ucode_table_ptr);
if (err != 0) {
err = nvgpu_bios_parse_falcon_ucode_table(g,
nvgpu_safe_add_u32(falcon_data.falcon_ucode_table_ptr,
g->bios->expansion_rom_offset));
}
if (err != 0) {
nvgpu_err(g, "could not parse falcon ucode table");
}
}
void *nvgpu_bios_get_perf_table_ptrs(struct gk20a *g,
struct bit_token *ptoken, u8 table_id)
{
u32 perf_table_id_offset = 0;
u8 *perf_table_ptr = NULL;
u8 data_size = 4;
if (ptoken != NULL) {
if (ptoken->token_id == TOKEN_ID_VIRT_PTRS) {
perf_table_id_offset =
*((u16 *)((uintptr_t)g->bios->data +
ptoken->data_ptr +
(U16(table_id) *
U16(PERF_PTRS_WIDTH_16))));
data_size = PERF_PTRS_WIDTH_16;
} else {
perf_table_id_offset =
*((u32 *)((uintptr_t)g->bios->data +
ptoken->data_ptr +
(U16(table_id) *
U16(PERF_PTRS_WIDTH))));
data_size = PERF_PTRS_WIDTH;
}
} else {
return (void *)perf_table_ptr;
}
if (table_id < (ptoken->data_size/data_size)) {
nvgpu_log_info(g, "Perf_Tbl_ID-offset 0x%x Tbl_ID_Ptr-offset- 0x%x",
(ptoken->data_ptr +
(U16(table_id) * U16(data_size))),
perf_table_id_offset);
if (perf_table_id_offset != 0U) {
/* check if perf_table_id_offset is beyond base rom */
if (perf_table_id_offset > g->bios->base_rom_size) {
u32 tmp_index = nvgpu_safe_add_u32(
g->bios->expansion_rom_offset,
perf_table_id_offset);
perf_table_ptr = &g->bios->data[tmp_index];
} else {
perf_table_ptr =
&g->bios->data[perf_table_id_offset];
}
} else {
nvgpu_warn(g, "PERF TABLE ID %d is NULL",
table_id);
}
} else {
nvgpu_warn(g, "INVALID PERF TABLE ID - %d ", table_id);
}
return (void *)perf_table_ptr;
}
static void nvgpu_bios_parse_bit(struct gk20a *g, u32 offset)
{
struct bios_bit bit;
struct bit_token token;
u32 i;
nvgpu_log_fn(g, " ");
nvgpu_memcpy((u8 *)&bit, &g->bios->data[offset], sizeof(bit));
nvgpu_log_info(g, "BIT header: %04x %08x", bit.id, bit.signature);
nvgpu_log_info(g, "tokens: %d entries * %d bytes",
bit.token_entries, bit.token_size);
offset = nvgpu_safe_add_u32(offset, bit.header_size);
for (i = 0U; i < bit.token_entries; i++) {
nvgpu_memcpy((u8 *)&token, &g->bios->data[offset],
sizeof(token));
nvgpu_log_info(g, "BIT token id %d ptr %d size %d ver %d",
token.token_id, token.data_ptr,
token.data_size, token.data_version);
switch (token.token_id) {
case TOKEN_ID_BIOSDATA:
nvgpu_bios_parse_biosdata(g, token.data_ptr);
break;
case TOKEN_ID_NVINIT_PTRS:
nvgpu_bios_parse_nvinit_ptrs(g, token.data_ptr);
break;
case TOKEN_ID_FALCON_DATA:
if (token.data_version == 2U) {
nvgpu_bios_parse_falcon_data_v2(g,
token.data_ptr);
}
break;
case TOKEN_ID_PERF_PTRS:
g->bios->perf_token =
(struct bit_token *)
((uintptr_t)g->bios->data + offset);
break;
case TOKEN_ID_CLOCK_PTRS:
g->bios->clock_token =
(struct bit_token *)
((uintptr_t)g->bios->data + offset);
break;
case TOKEN_ID_VIRT_PTRS:
g->bios->virt_token =
(struct bit_token *)
((uintptr_t)g->bios->data + offset);
break;
case TOKEN_ID_MEMORY_PTRS:
nvgpu_bios_parse_memory_ptrs(g, token.data_ptr,
token.data_version);
break;
case TOKEN_ID_BIOS_BOARD_ID_PTRS:
nvgpu_bios_parse_bios_board_id_ptrs(g,
token.data_ptr);
break;
default:
nvgpu_log_info(g, "Token id %d not supported",
token.token_id);
break;
}
offset = nvgpu_safe_add_u32(offset, bit.token_size);
}
nvgpu_log_fn(g, "done");
}
static u32 nvgpu_bios_readbyte_impl(struct gk20a *g, u32 offset)
{
return g->bios->data[offset];
}
u8 nvgpu_bios_read_u8(struct gk20a *g, u32 offset)
{
u32 val = nvgpu_bios_readbyte_impl(g, offset);
return nvgpu_safe_cast_u32_to_u8(val);
}
s8 nvgpu_bios_read_s8(struct gk20a *g, u32 offset)
{
u32 val;
val = nvgpu_bios_readbyte_impl(g, offset);
val = ((val & 0x80U) != 0U) ? (val | ~0xffU) : val;
return nvgpu_safe_cast_u32_to_s8(val);
}
u16 nvgpu_bios_read_u16(struct gk20a *g, u32 offset)
{
u32 val;
nvgpu_assert(offset < U32_MAX);
/*
* cast up to u32 because cast to u16 promotes into an int and
* causes a CERT-C INT31 violation
*/
val = U32(nvgpu_bios_readbyte_impl(g, offset) |
(U32(nvgpu_bios_readbyte_impl(g, offset+1U)) << 8U));
return nvgpu_safe_cast_u32_to_u16(val);
}
u32 nvgpu_bios_read_u32(struct gk20a *g, u32 offset)
{
u32 val;
nvgpu_assert(offset < (U32_MAX - 3U));
val = U32(nvgpu_bios_readbyte_impl(g, offset) |
(nvgpu_bios_readbyte_impl(g, offset+1U) << 8U) |
(nvgpu_bios_readbyte_impl(g, offset+2U) << 16U) |
(nvgpu_bios_readbyte_impl(g, offset+3U) << 24U));
return val;
}
#ifdef CONFIG_NVGPU_DGPU
bool nvgpu_bios_wait_for_init_done(struct gk20a *g)
{
struct nvgpu_timeout timeout;
int err;
err = nvgpu_timeout_init(g, &timeout,
NVGPU_BIOS_DEVINIT_VERIFY_TIMEOUT_MS, NVGPU_TIMER_CPU_TIMER);
if (err != 0) {
return false;
}
/* Wait till vbios is completed */
do {
if (g->bios_is_init == true) {
return true;
}
nvgpu_msleep(NVGPU_BIOS_DEVINIT_VERIFY_COMPLETION_MS);
} while (nvgpu_timeout_expired(&timeout) == 0);
if (g->bios_is_init == true) {
return true;
} else {
return false;
}
}
#endif

View File

@@ -0,0 +1,383 @@
/*
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvgpu/bios.h>
#include <nvgpu/nvgpu_common.h>
#include <nvgpu/timers.h>
#include <nvgpu/io.h>
#include <nvgpu/gk20a.h>
#include "bios_sw_gv100.h"
#define BIOS_SIZE 0x90000
#define PMU_BOOT_TIMEOUT_DEFAULT 100U /* usec */
#define PMU_BOOT_TIMEOUT_MAX 2000000U /* usec */
#define SCRATCH_PREOS_PROGRESS 6U
#define PREOS_PROGRESS_MASK(r) (((r) >> 12U) & 0xfU)
#define PREOS_PROGRESS_NOT_STARTED 0U
#define PREOS_PROGRESS_STARTED 1U
#define PREOS_PROGRESS_EXIT 2U
#define PREOS_PROGRESS_EXIT_SECUREMODE 3U
#define PREOS_PROGRESS_ABORTED 6U
#define SCRATCH_PMU_EXIT_AND_HALT 1U
#define PMU_EXIT_AND_HALT_SET(r, v) (((r) & ~0x200U) | (v))
#define PMU_EXIT_AND_HALT_YES BIT32(9)
#define SCRATCH_PRE_OS_RELOAD 1U
#define PRE_OS_RELOAD_SET(r, v) (((r) & ~0x100U) | (v))
#define PRE_OS_RELOAD_YES BIT32(8)
void gv100_bios_preos_reload_check(struct gk20a *g)
{
u32 progress = g->ops.bus.read_sw_scratch(g, SCRATCH_PREOS_PROGRESS);
if (PREOS_PROGRESS_MASK(progress) != PREOS_PROGRESS_NOT_STARTED) {
u32 reload = g->ops.bus.read_sw_scratch(g,
SCRATCH_PRE_OS_RELOAD);
g->ops.bus.write_sw_scratch(g, SCRATCH_PRE_OS_RELOAD,
PRE_OS_RELOAD_SET(reload, PRE_OS_RELOAD_YES));
}
}
int gv100_bios_preos_wait_for_halt(struct gk20a *g)
{
int err = -EINVAL;
u32 progress;
u32 tmp;
bool preos_completed;
struct nvgpu_timeout timeout;
nvgpu_udelay(PMU_BOOT_TIMEOUT_DEFAULT);
/* Check the progress */
progress = g->ops.bus.read_sw_scratch(g, SCRATCH_PREOS_PROGRESS);
if (PREOS_PROGRESS_MASK(progress) == PREOS_PROGRESS_STARTED) {
err = 0;
/* Complete the handshake */
tmp = g->ops.bus.read_sw_scratch(g, SCRATCH_PMU_EXIT_AND_HALT);
g->ops.bus.write_sw_scratch(g, SCRATCH_PMU_EXIT_AND_HALT,
PMU_EXIT_AND_HALT_SET(tmp, PMU_EXIT_AND_HALT_YES));
err = nvgpu_timeout_init(g, &timeout,
PMU_BOOT_TIMEOUT_MAX /
PMU_BOOT_TIMEOUT_DEFAULT,
NVGPU_TIMER_RETRY_TIMER);
if (err != 0) {
nvgpu_err(g, "NVGPU timeout init failed");
return err;
}
do {
progress = g->ops.bus.read_sw_scratch(g,
SCRATCH_PREOS_PROGRESS);
preos_completed = g->ops.falcon.is_falcon_cpu_halted(
g->pmu->flcn) &&
(PREOS_PROGRESS_MASK(progress) ==
PREOS_PROGRESS_EXIT);
nvgpu_udelay(PMU_BOOT_TIMEOUT_DEFAULT);
} while (!preos_completed &&
(nvgpu_timeout_expired(&timeout) == 0));
}
return err;
}
int gv100_bios_devinit(struct gk20a *g)
{
int err = 0;
bool devinit_completed;
struct nvgpu_timeout timeout;
u32 top_scratch1_reg;
nvgpu_log_fn(g, " ");
if (nvgpu_falcon_reset(g->pmu->flcn) != 0) {
err = -ETIMEDOUT;
goto out;
}
err = nvgpu_falcon_copy_to_imem(g->pmu->flcn,
g->bios->devinit.bootloader_phys_base,
g->bios->devinit.bootloader,
g->bios->devinit.bootloader_size,
0, 0, g->bios->devinit.bootloader_phys_base >> 8);
if (err != 0) {
nvgpu_err(g, "bios devinit bootloader copy failed %d", err);
goto out;
}
err = nvgpu_falcon_copy_to_imem(g->pmu->flcn, g->bios->devinit.phys_base,
g->bios->devinit.ucode,
g->bios->devinit.size,
0, 1, g->bios->devinit.phys_base >> 8);
if (err != 0) {
nvgpu_err(g, "bios devinit ucode copy failed %d", err);
goto out;
}
err = nvgpu_falcon_copy_to_dmem(g->pmu->flcn,
g->bios->devinit.dmem_phys_base,
g->bios->devinit.dmem,
g->bios->devinit.dmem_size,
0);
if (err != 0) {
nvgpu_err(g, "bios devinit dmem copy failed %d", err);
goto out;
}
err = nvgpu_falcon_copy_to_dmem(g->pmu->flcn,
g->bios->devinit_tables_phys_base,
g->bios->devinit_tables,
g->bios->devinit_tables_size,
0);
if (err != 0) {
nvgpu_err(g, "fbios devinit tables copy failed %d", err);
goto out;
}
err = nvgpu_falcon_copy_to_dmem(g->pmu->flcn,
g->bios->devinit_script_phys_base,
g->bios->bootscripts,
g->bios->bootscripts_size,
0);
if (err != 0) {
nvgpu_err(g, "bios devinit bootscripts copy failed %d", err);
goto out;
}
err = nvgpu_falcon_bootstrap(g->pmu->flcn,
g->bios->devinit.code_entry_point);
if (err != 0) {
nvgpu_err(g, "falcon bootstrap failed %d", err);
goto out;
}
err = nvgpu_timeout_init(g, &timeout,
PMU_BOOT_TIMEOUT_MAX /
PMU_BOOT_TIMEOUT_DEFAULT,
NVGPU_TIMER_RETRY_TIMER);
if (err != 0) {
nvgpu_err(g, "nvgpu timeout init failed %d", err);
goto out;
}
do {
top_scratch1_reg = g->ops.top.read_top_scratch1_reg(g);
devinit_completed = ((g->ops.falcon.is_falcon_cpu_halted(
g->pmu->flcn) != 0U) &&
(g->ops.top.top_scratch1_devinit_completed(g,
top_scratch1_reg)) != 0U);
nvgpu_udelay(PMU_BOOT_TIMEOUT_DEFAULT);
} while (!devinit_completed && (nvgpu_timeout_expired(&timeout) == 0));
if (nvgpu_timeout_peek_expired(&timeout)) {
err = -ETIMEDOUT;
goto out;
}
err = nvgpu_falcon_clear_halt_intr_status(g->pmu->flcn,
nvgpu_get_poll_timeout(g));
if (err != 0) {
nvgpu_err(g, "falcon_clear_halt_intr_status failed %d", err);
goto out;
}
out:
nvgpu_log_fn(g, "done");
return err;
}
int gv100_bios_init(struct gk20a *g)
{
unsigned int i;
int err;
nvgpu_log_fn(g, " ");
if (g->bios_is_init) {
return 0;
}
nvgpu_log_info(g, "reading bios from EEPROM");
g->bios->size = BIOS_SIZE;
g->bios->data = nvgpu_vmalloc(g, BIOS_SIZE);
if (g->bios->data == NULL) {
return -ENOMEM;
}
if (g->ops.xve.disable_shadow_rom != NULL) {
g->ops.xve.disable_shadow_rom(g);
}
for (i = 0U; i < g->bios->size/4U; i++) {
u32 val = be32_to_cpu(gk20a_readl(g, 0x300000U + i*4U));
g->bios->data[(i*4U)] = (val >> 24U) & 0xffU;
g->bios->data[(i*4U)+1U] = (val >> 16U) & 0xffU;
g->bios->data[(i*4U)+2U] = (val >> 8U) & 0xffU;
g->bios->data[(i*4U)+3U] = val & 0xffU;
}
if (g->ops.xve.enable_shadow_rom != NULL) {
g->ops.xve.enable_shadow_rom(g);
}
err = nvgpu_bios_parse_rom(g);
if (err != 0) {
goto free_firmware;
}
if (g->bios->verify_version != NULL) {
if (g->bios->verify_version(g) < 0) {
err = -EINVAL;
goto free_firmware;
}
}
nvgpu_log_fn(g, "done");
err = nvgpu_bios_devinit(g, g->bios);
if (err != 0) {
nvgpu_err(g, "devinit failed");
goto free_firmware;
}
if (nvgpu_is_enabled(g, NVGPU_PMU_RUN_PREOS) &&
(g->bios->preos_bios != NULL)) {
err = g->bios->preos_bios(g);
if (err != 0) {
nvgpu_err(g, "pre-os failed");
goto free_firmware;
}
}
if (g->bios->verify_devinit != NULL) {
err = g->bios->verify_devinit(g);
if (err != 0) {
nvgpu_err(g, "devinit status verification failed");
goto free_firmware;
}
}
g->bios_is_init = true;
return 0;
free_firmware:
if (g->bios->data != NULL) {
nvgpu_vfree(g, g->bios->data);
}
return err;
}
int gv100_bios_preos(struct gk20a *g)
{
int err = 0;
nvgpu_log_fn(g, " ");
if (nvgpu_falcon_reset(g->pmu->flcn) != 0) {
err = -ETIMEDOUT;
goto out;
}
if (g->bios->preos_reload_check != NULL) {
g->bios->preos_reload_check(g);
}
err = nvgpu_falcon_copy_to_imem(g->pmu->flcn,
g->bios->preos.bootloader_phys_base,
g->bios->preos.bootloader,
g->bios->preos.bootloader_size,
0, 0, g->bios->preos.bootloader_phys_base >> 8);
if (err != 0) {
nvgpu_err(g, "bios preos bootloader copy failed %d", err);
goto out;
}
err = nvgpu_falcon_copy_to_imem(g->pmu->flcn, g->bios->preos.phys_base,
g->bios->preos.ucode,
g->bios->preos.size,
0, 1, g->bios->preos.phys_base >> 8);
if (err != 0) {
nvgpu_err(g, "bios preos ucode copy failed %d", err);
goto out;
}
err = nvgpu_falcon_copy_to_dmem(g->pmu->flcn, g->bios->preos.dmem_phys_base,
g->bios->preos.dmem,
g->bios->preos.dmem_size,
0);
if (err != 0) {
nvgpu_err(g, "bios preos dmem copy failed %d", err);
goto out;
}
err = nvgpu_falcon_bootstrap(g->pmu->flcn,
g->bios->preos.code_entry_point);
if (err != 0) {
nvgpu_err(g, "falcon bootstrap failed %d", err);
goto out;
}
err = nvgpu_bios_preos_wait_for_halt(g, g->bios);
if (err != 0) {
nvgpu_err(g, "preos_wait_for_halt failed %d", err);
goto out;
}
err = nvgpu_falcon_clear_halt_intr_status(g->pmu->flcn,
nvgpu_get_poll_timeout(g));
if (err != 0) {
nvgpu_err(g, "falcon_clear_halt_intr_status failed %d", err);
goto out;
}
out:
nvgpu_log_fn(g, "done");
return err;
}
void nvgpu_gv100_bios_sw_init(struct gk20a *g,
struct nvgpu_bios *bios)
{
bios->init = gv100_bios_init;
bios->verify_version = NULL;
bios->preos_wait_for_halt = gv100_bios_preos_wait_for_halt;
bios->preos_reload_check = gv100_bios_preos_reload_check;
bios->preos_bios = gv100_bios_preos;
bios->devinit_bios = gv100_bios_devinit;
bios->verify_devinit = NULL;
}

View File

@@ -0,0 +1,37 @@
/*
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_BIOS_SW_GV100_H
#define NVGPU_BIOS_SW_GV100_H
struct gk20a;
struct nvgpu_bios;
void gv100_bios_preos_reload_check(struct gk20a *g);
int gv100_bios_preos_wait_for_halt(struct gk20a *g);
int gv100_bios_devinit(struct gk20a *g);
int gv100_bios_preos(struct gk20a *g);
int gv100_bios_init(struct gk20a *g);
void nvgpu_gv100_bios_sw_init(struct gk20a *g,
struct nvgpu_bios *bios);
#endif /* NVGPU_BIOS_SW_GV100_H */

View File

@@ -0,0 +1,168 @@
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvgpu/types.h>
#include <nvgpu/timers.h>
#include <nvgpu/io.h>
#include <nvgpu/gk20a.h>
#include "bios_sw_gv100.h"
#include "bios_sw_tu104.h"
#define NV_PGC6_AON_SECURE_SCRATCH_GROUP_05_0_GFW_BOOT_PROGRESS_MASK \
0xFFU
#define NV_PGC6_AON_SECURE_SCRATCH_GROUP_05_0_GFW_BOOT_PROGRESS_COMPLETED \
0xFFU
#define NVGPU_PG189_MIN_VBIOS 0x90041800U
#define NVGPU_PG189_0600_VBIOS 0x90049500U
#define NVGPU_PG189_0600_QS_VBIOS 0x9004A200U
#define NVGPU_PG189_0601_VBIOS 0x90045a00U
#define NVGPU_PG189_0610_QS_VBIOS 0x90049100U
#define NVGPU_PG189_0601_QS_VBIOS 0x90049600U
struct nvgpu_vbios_board {
u16 board_id;
u32 vbios_version;
};
#define NVGPU_PG189_NUM_VBIOS_BOARDS 5U
static struct nvgpu_vbios_board vbios_boards[NVGPU_PG189_NUM_VBIOS_BOARDS] = {
/* SKU 600 ES/CS, SKU 606*/
[0] = {
.board_id = 0x0068,
.vbios_version = NVGPU_PG189_0600_VBIOS,
},
/* SKU 600 QS */
[1] = {
.board_id = 0x0183,
.vbios_version = NVGPU_PG189_0600_QS_VBIOS,
},
/* SKU 601 CS */
[2] = {
.board_id = 0x00E8,
.vbios_version = NVGPU_PG189_0601_VBIOS,
},
/* SKU 610 QS */
[3] = {
.board_id = 0x01a3,
.vbios_version = NVGPU_PG189_0610_QS_VBIOS,
},
/* SKU 601 QS */
[4] = {
.board_id = 0x01cc,
.vbios_version = NVGPU_PG189_0601_QS_VBIOS,
},
};
static int tu104_bios_verify_version(struct gk20a *g)
{
struct nvgpu_vbios_board *board = NULL;
u32 i;
nvgpu_info(g, "VBIOS board id %04x", g->bios->vbios_board_id);
nvgpu_info(g, "VBIOS version %08x:%02x\n",
g->bios->vbios_version,
g->bios->vbios_oem_version);
if (g->bios->vbios_version < NVGPU_PG189_MIN_VBIOS) {
nvgpu_err(g, "unsupported VBIOS version %08x",
g->bios->vbios_version);
return -EINVAL;
}
for (i = 0; i < NVGPU_PG189_NUM_VBIOS_BOARDS; i++) {
if (g->bios->vbios_board_id == vbios_boards[i].board_id) {
board = &vbios_boards[i];
}
}
if (board == NULL) {
nvgpu_warn(g, "unknown board id %04x",
g->bios->vbios_board_id);
return 0;
}
if ((board->vbios_version != 0U) &&
(g->bios->vbios_version < board->vbios_version)) {
nvgpu_warn(g, "VBIOS version should be at least %08x",
board->vbios_version);
}
return 0;
}
int tu104_bios_verify_devinit(struct gk20a *g)
{
struct nvgpu_timeout timeout;
u32 val;
u32 aon_secure_scratch_reg;
int err;
err = nvgpu_timeout_init(g, &timeout,
NVGPU_BIOS_DEVINIT_VERIFY_TIMEOUT_MS, NVGPU_TIMER_CPU_TIMER);
if (err != 0) {
return err;
}
do {
aon_secure_scratch_reg = g->ops.bios.get_aon_secure_scratch_reg(g, 0);
val = nvgpu_readl(g, aon_secure_scratch_reg);
val &= NV_PGC6_AON_SECURE_SCRATCH_GROUP_05_0_GFW_BOOT_PROGRESS_MASK;
if (val == NV_PGC6_AON_SECURE_SCRATCH_GROUP_05_0_GFW_BOOT_PROGRESS_COMPLETED) {
nvgpu_log_info(g, "devinit complete");
return 0;
}
nvgpu_udelay(NVGPU_BIOS_DEVINIT_VERIFY_DELAY_US);
} while (nvgpu_timeout_expired(&timeout) == 0);
return -ETIMEDOUT;
}
int tu104_bios_init(struct gk20a *g)
{
#ifdef CONFIG_NVGPU_SIM
if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) {
return 0;
}
#endif
return gv100_bios_init(g);
}
void nvgpu_tu104_bios_sw_init(struct gk20a *g,
struct nvgpu_bios *bios)
{
bios->init = tu104_bios_init;
bios->verify_version = tu104_bios_verify_version;
bios->preos_wait_for_halt = NULL;
bios->preos_reload_check = NULL;
bios->preos_bios = NULL;
bios->devinit_bios = NULL;
bios->verify_devinit = tu104_bios_verify_devinit;
}

View File

@@ -0,0 +1,37 @@
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_BIOS_SW_TU104_H
#define NVGPU_BIOS_SW_TU104_H
#define NVGPU_BIOS_DEVINIT_VERIFY_TIMEOUT_MS 1000U
#define NVGPU_BIOS_DEVINIT_VERIFY_DELAY_US 10U
#define NVGPU_BIOS_DEVINIT_VERIFY_COMPLETION_MS 1U
struct gk20a;
int tu104_bios_verify_devinit(struct gk20a *g);
int tu104_bios_init(struct gk20a *g);
void nvgpu_tu104_bios_sw_init(struct gk20a *g,
struct nvgpu_bios *bios);
#endif /* NVGPU_BIOS_SW_TU104_H */

View File

@@ -0,0 +1,73 @@
/*
* Copyright (c) 2015-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvgpu/gk20a.h>
#include <nvgpu/nvlink_bios.h>
#include <nvgpu/string.h>
#include <nvgpu/bios.h>
int nvgpu_bios_get_nvlink_config_data(struct gk20a *g)
{
int ret = 0;
struct nvlink_config_data_hdr_v1 config;
if (g->bios->nvlink_config_data_offset == 0U) {
return -EINVAL;
}
nvgpu_memcpy((u8 *)&config,
&g->bios->data[g->bios->nvlink_config_data_offset],
sizeof(config));
if (config.version != NVLINK_CONFIG_DATA_HDR_VER_10) {
nvgpu_err(g, "unsupported nvlink bios version: 0x%x",
config.version);
return -EINVAL;
}
switch (config.hdr_size) {
case NVLINK_CONFIG_DATA_HDR_12_SIZE:
g->nvlink.ac_coupling_mask = config.ac_coupling_mask;
g->nvlink.train_at_boot = config.train_at_boot;
g->nvlink.link_disable_mask = config.link_disable_mask;
g->nvlink.link_mode_mask = config.link_mode_mask;
g->nvlink.link_refclk_mask = config.link_refclk_mask;
break;
case NVLINK_CONFIG_DATA_HDR_11_SIZE:
g->nvlink.train_at_boot = config.train_at_boot;
g->nvlink.link_disable_mask = config.link_disable_mask;
g->nvlink.link_mode_mask = config.link_mode_mask;
g->nvlink.link_refclk_mask = config.link_refclk_mask;
break;
case NVLINK_CONFIG_DATA_HDR_10_SIZE:
g->nvlink.link_disable_mask = config.link_disable_mask;
g->nvlink.link_mode_mask = config.link_mode_mask;
g->nvlink.link_refclk_mask = config.link_refclk_mask;
break;
default:
nvgpu_err(g, "invalid nvlink bios config size");
ret = -EINVAL;
break;
}
return ret;
}