Open source GPL/LGPL release

This commit is contained in:
svcmobrel-release
2025-12-19 15:25:44 -08:00
commit 9fc87a7ec7
2261 changed files with 576825 additions and 0 deletions

View File

@@ -0,0 +1,172 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvgpu/types.h>
#include <nvgpu/dma.h>
#include <nvgpu/firmware.h>
#include <nvgpu/gk20a.h>
#include <nvgpu/acr.h>
#include "acr_priv.h"
#ifdef CONFIG_NVGPU_ACR_LEGACY
#include "acr_sw_gm20b.h"
#include "acr_sw_gp10b.h"
#endif
#include "acr_sw_gv11b.h"
#ifdef CONFIG_NVGPU_DGPU
#include "acr_sw_tu104.h"
#endif
#if defined(CONFIG_NVGPU_NEXT) && defined(CONFIG_NVGPU_NON_FUSA)
#include "nvgpu_next_gpuid.h"
#endif
/* ACR public API's */
bool nvgpu_acr_is_lsf_lazy_bootstrap(struct gk20a *g, struct nvgpu_acr *acr,
u32 falcon_id)
{
if (acr == NULL) {
return false;
}
if ((falcon_id == FALCON_ID_FECS) || (falcon_id == FALCON_ID_PMU) ||
(falcon_id == FALCON_ID_GPCCS)) {
return acr->lsf[falcon_id].is_lazy_bootstrap;
} else {
nvgpu_err(g, "Invalid falcon id\n");
return false;
}
}
#ifdef CONFIG_NVGPU_DGPU
int nvgpu_acr_alloc_blob_prerequisite(struct gk20a *g, struct nvgpu_acr *acr,
size_t size)
{
if (acr == NULL) {
return -EINVAL;
}
return acr->alloc_blob_space(g, size, &acr->ucode_blob);
}
#endif
/* ACR blob construct & bootstrap */
int nvgpu_acr_bootstrap_hs_acr(struct gk20a *g, struct nvgpu_acr *acr)
{
int err = 0;
if (acr == NULL) {
return -EINVAL;
}
err = acr->bootstrap_hs_acr(g, acr);
if (err != 0) {
nvgpu_err(g, "ACR bootstrap failed");
}
nvgpu_log(g, gpu_dbg_gr, "ACR bootstrap Done");
return err;
}
int nvgpu_acr_construct_execute(struct gk20a *g)
{
int err = 0;
if (g->acr == NULL) {
return -EINVAL;
}
err = g->acr->prepare_ucode_blob(g);
if (err != 0) {
nvgpu_err(g, "ACR ucode blob prepare failed");
goto done;
}
err = nvgpu_acr_bootstrap_hs_acr(g, g->acr);
if (err != 0) {
nvgpu_err(g, "Bootstrap HS ACR failed");
}
done:
return err;
}
/* ACR init */
int nvgpu_acr_init(struct gk20a *g)
{
u32 ver = nvgpu_safe_add_u32(g->params.gpu_arch,
g->params.gpu_impl);
int err = 0;
if (g->acr != NULL) {
/*
* Recovery/unrailgate case, we do not need to do ACR init as ACR is
* set during cold boot & doesn't execute ACR clean up as part off
* sequence, so reuse to perform faster boot.
*/
return err;
}
g->acr = (struct nvgpu_acr *)nvgpu_kzalloc(g, sizeof(struct nvgpu_acr));
if (g->acr == NULL) {
err = -ENOMEM;
goto done;
}
switch (ver) {
#ifdef CONFIG_NVGPU_ACR_LEGACY
case GK20A_GPUID_GM20B:
case GK20A_GPUID_GM20B_B:
nvgpu_gm20b_acr_sw_init(g, g->acr);
break;
case NVGPU_GPUID_GP10B:
nvgpu_gp10b_acr_sw_init(g, g->acr);
break;
#endif
case NVGPU_GPUID_GV11B:
nvgpu_gv11b_acr_sw_init(g, g->acr);
break;
#if defined(CONFIG_NVGPU_NEXT)
case NVGPU_NEXT_GPUID:
nvgpu_next_acr_sw_init(g, g->acr);
break;
#endif
#ifdef CONFIG_NVGPU_DGPU
case NVGPU_GPUID_TU104:
nvgpu_tu104_acr_sw_init(g, g->acr);
break;
#if defined(CONFIG_NVGPU_NEXT)
case NVGPU_NEXT_DGPU_GPUID:
nvgpu_next_dgpu_acr_sw_init(g, g->acr);
break;
#endif
#endif
default:
nvgpu_kfree(g, g->acr);
err = -EINVAL;
nvgpu_err(g, "no support for GPUID %x", ver);
break;
}
done:
return err;
}

View File

@@ -0,0 +1,63 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvgpu/types.h>
#include <nvgpu/dma.h>
#include <nvgpu/gk20a.h>
#include "acr_wpr.h"
#include "acr_priv.h"
#include "acr_blob_alloc.h"
int nvgpu_acr_alloc_blob_space_sys(struct gk20a *g, size_t size,
struct nvgpu_mem *mem)
{
return nvgpu_dma_alloc_flags_sys(g, NVGPU_DMA_PHYSICALLY_ADDRESSED,
size, mem);
}
#ifdef CONFIG_NVGPU_DGPU
int nvgpu_acr_alloc_blob_space_vid(struct gk20a *g, size_t size,
struct nvgpu_mem *mem)
{
struct wpr_carveout_info wpr_inf;
int err;
if (mem->size != 0ULL) {
return 0;
}
g->acr->get_wpr_info(g, &wpr_inf);
/*
* Even though this mem_desc wouldn't be used, the wpr region needs to
* be reserved in the allocator.
*/
err = nvgpu_dma_alloc_vid_at(g, wpr_inf.size,
&g->acr->wpr_dummy, wpr_inf.wpr_base);
if (err != 0) {
return err;
}
return nvgpu_dma_alloc_vid_at(g, wpr_inf.size, mem,
wpr_inf.nonwpr_base);
}
#endif

View File

@@ -0,0 +1,36 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef ACR_BLOB_ALLOC_H
#define ACR_BLOB_ALLOC_H
struct gk20a;
struct nvgpu_mem;
int nvgpu_acr_alloc_blob_space_sys(struct gk20a *g, size_t size,
struct nvgpu_mem *mem);
#ifdef CONFIG_NVGPU_DGPU
int nvgpu_acr_alloc_blob_space_vid(struct gk20a *g, size_t size,
struct nvgpu_mem *mem);
#endif
#endif /* ACR_BLOB_ALLOC_H */

View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,153 @@
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef ACR_BLOB_CONSTRUCT_H
#define ACR_BLOB_CONSTRUCT_H
#include <nvgpu/falcon.h>
#include <nvgpu/flcnif_cmn.h>
#include <nvgpu/pmu.h>
#include "nvgpu_acr_interface.h"
#define UCODE_NB_MAX_DATE_LENGTH 64U
struct ls_falcon_ucode_desc {
u32 descriptor_size;
u32 image_size;
u32 tools_version;
u32 app_version;
char date[UCODE_NB_MAX_DATE_LENGTH];
u32 bootloader_start_offset;
u32 bootloader_size;
u32 bootloader_imem_offset;
u32 bootloader_entry_point;
u32 app_start_offset;
u32 app_size;
u32 app_imem_offset;
u32 app_imem_entry;
u32 app_dmem_offset;
u32 app_resident_code_offset;
u32 app_resident_code_size;
u32 app_resident_data_offset;
u32 app_resident_data_size;
u32 nb_imem_overlays;
u32 nb_dmem_overlays;
struct {u32 start; u32 size; } load_ovl[UCODE_NB_MAX_DATE_LENGTH];
u32 compressed;
};
struct ls_falcon_ucode_desc_v1 {
u32 descriptor_size;
u32 image_size;
u32 tools_version;
u32 app_version;
char date[UCODE_NB_MAX_DATE_LENGTH];
u32 secure_bootloader;
u32 bootloader_start_offset;
u32 bootloader_size;
u32 bootloader_imem_offset;
u32 bootloader_entry_point;
u32 app_start_offset;
u32 app_size;
u32 app_imem_offset;
u32 app_imem_entry;
u32 app_dmem_offset;
u32 app_resident_code_offset;
u32 app_resident_code_size;
u32 app_resident_data_offset;
u32 app_resident_data_size;
u32 nb_imem_overlays;
u32 nb_dmem_overlays;
struct {u32 start; u32 size; } load_ovl[64];
u32 compressed;
};
struct flcn_ucode_img {
u32 *data;
struct ls_falcon_ucode_desc *desc;
u32 data_size;
struct lsf_ucode_desc *lsf_desc;
bool is_next_core_img;
#if defined(CONFIG_NVGPU_NEXT)
struct falcon_next_core_ucode_desc *ndesc;
#endif
};
struct lsfm_managed_ucode_img {
struct lsfm_managed_ucode_img *next;
struct lsf_wpr_header wpr_header;
struct lsf_lsb_header lsb_header;
struct flcn_bl_dmem_desc bl_gen_desc;
u32 bl_gen_desc_size;
u32 full_ucode_size;
struct flcn_ucode_img ucode_img;
};
#ifdef CONFIG_NVGPU_DGPU
/*
* LSF shared SubWpr Header
*
* use_case_id - Shared SubWpr use case ID (updated by nvgpu)
* start_addr - start address of subWpr (updated by nvgpu)
* size_4K - size of subWpr in 4K (updated by nvgpu)
*/
struct lsf_shared_sub_wpr_header {
u32 use_case_id;
u32 start_addr;
u32 size_4K;
};
/*
* LSFM SUB WPRs struct
* pnext : Next entry in the list, NULL if last
* sub_wpr_header : SubWpr Header struct
*/
struct lsfm_sub_wpr {
struct lsfm_sub_wpr *pnext;
struct lsf_shared_sub_wpr_header sub_wpr_header;
};
#endif
struct ls_flcn_mgr {
u16 managed_flcn_cnt;
u32 wpr_size;
struct lsfm_managed_ucode_img *ucode_img_list;
#ifdef CONFIG_NVGPU_DGPU
u16 managed_sub_wpr_count;
struct lsfm_sub_wpr *psub_wpr_list;
#endif
};
int nvgpu_acr_prepare_ucode_blob(struct gk20a *g);
#ifdef CONFIG_NVGPU_LS_PMU
int nvgpu_acr_lsf_pmu_ucode_details(struct gk20a *g, void *lsf_ucode_img);
#if defined(CONFIG_NVGPU_NEXT)
s32 nvgpu_acr_lsf_pmu_ncore_ucode_details(struct gk20a *g, void *lsf_ucode_img);
#endif
#endif
int nvgpu_acr_lsf_fecs_ucode_details(struct gk20a *g, void *lsf_ucode_img);
int nvgpu_acr_lsf_gpccs_ucode_details(struct gk20a *g, void *lsf_ucode_img);
#ifdef CONFIG_NVGPU_DGPU
int nvgpu_acr_lsf_sec2_ucode_details(struct gk20a *g, void *lsf_ucode_img);
#endif
#endif /* ACR_BLOB_CONSTRUCT_H */

View File

@@ -0,0 +1,801 @@
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvgpu/firmware.h>
#include <nvgpu/pmu.h>
#include <nvgpu/falcon.h>
#include <nvgpu/gk20a.h>
#include <nvgpu/string.h>
#include <nvgpu/bug.h>
#include <nvgpu/gr/gr_falcon.h>
#include <nvgpu/pmu/fw.h>
#include <nvgpu/gr/gr_utils.h>
#include "acr_blob_construct_v0.h"
#include "acr_wpr.h"
#include "acr_priv.h"
#ifdef CONFIG_NVGPU_LS_PMU
int nvgpu_acr_lsf_pmu_ucode_details_v0(struct gk20a *g, void *lsf_ucode_img)
{
struct lsf_ucode_desc_v0 *lsf_desc;
struct nvgpu_firmware *fw_sig;
struct nvgpu_firmware *fw_desc;
struct nvgpu_firmware *fw_image;
struct flcn_ucode_img_v0 *p_img = (struct flcn_ucode_img_v0 *)lsf_ucode_img;
int err = 0;
lsf_desc = nvgpu_kzalloc(g, sizeof(struct lsf_ucode_desc_v0));
if (lsf_desc == NULL) {
err = -ENOMEM;
goto exit;
}
fw_sig = nvgpu_pmu_fw_sig_desc(g, g->pmu);
fw_desc = nvgpu_pmu_fw_desc_desc(g, g->pmu);
fw_image = nvgpu_pmu_fw_image_desc(g, g->pmu);
nvgpu_memcpy((u8 *)lsf_desc, (u8 *)fw_sig->data,
min_t(size_t, sizeof(*lsf_desc), fw_sig->size));
lsf_desc->falcon_id = FALCON_ID_PMU;
p_img->desc = (struct pmu_ucode_desc *)(void *)fw_desc->data;
p_img->data = (u32 *)(void *)fw_image->data;
p_img->data_size = p_img->desc->image_size;
p_img->lsf_desc = (struct lsf_ucode_desc_v0 *)lsf_desc;
exit:
return err;
}
#endif
int nvgpu_acr_lsf_fecs_ucode_details_v0(struct gk20a *g, void *lsf_ucode_img)
{
struct lsf_ucode_desc_v0 *lsf_desc;
struct nvgpu_firmware *fecs_sig;
struct flcn_ucode_img_v0 *p_img = (struct flcn_ucode_img_v0 *)lsf_ucode_img;
struct nvgpu_gr_falcon *gr_falcon = nvgpu_gr_get_falcon_ptr(g);
struct nvgpu_ctxsw_ucode_segments *fecs =
nvgpu_gr_falcon_get_fecs_ucode_segments(gr_falcon);
int err;
fecs_sig = nvgpu_request_firmware(g, GM20B_FECS_UCODE_SIG, 0);
if (fecs_sig == NULL) {
nvgpu_err(g, "failed to load fecs sig");
return -ENOENT;
}
lsf_desc = nvgpu_kzalloc(g, sizeof(struct lsf_ucode_desc_v0));
if (lsf_desc == NULL) {
err = -ENOMEM;
goto rel_sig;
}
nvgpu_memcpy((u8 *)lsf_desc, (u8 *)fecs_sig->data,
min_t(size_t, sizeof(*lsf_desc), fecs_sig->size));
lsf_desc->falcon_id = FALCON_ID_FECS;
p_img->desc = nvgpu_kzalloc(g, sizeof(struct pmu_ucode_desc));
if (p_img->desc == NULL) {
err = -ENOMEM;
goto free_lsf_desc;
}
p_img->desc->bootloader_start_offset = fecs->boot.offset;
p_img->desc->bootloader_size = NVGPU_ALIGN(fecs->boot.size, 256U);
p_img->desc->bootloader_imem_offset = fecs->boot_imem_offset;
p_img->desc->bootloader_entry_point = fecs->boot_entry;
p_img->desc->image_size = NVGPU_ALIGN(fecs->boot.size, 256U) +
NVGPU_ALIGN(fecs->code.size, 256U) + NVGPU_ALIGN(fecs->data.size, 256U);
p_img->desc->app_size = NVGPU_ALIGN(fecs->code.size, 256U) +
NVGPU_ALIGN(fecs->data.size, 256U);
p_img->desc->app_start_offset = fecs->code.offset;
p_img->desc->app_imem_offset = 0;
p_img->desc->app_imem_entry = 0;
p_img->desc->app_dmem_offset = 0;
p_img->desc->app_resident_code_offset = 0;
p_img->desc->app_resident_code_size = fecs->code.size;
p_img->desc->app_resident_data_offset =
fecs->data.offset - fecs->code.offset;
p_img->desc->app_resident_data_size = fecs->data.size;
p_img->data = nvgpu_gr_falcon_get_surface_desc_cpu_va(gr_falcon);
p_img->data_size = p_img->desc->image_size;
p_img->lsf_desc = (struct lsf_ucode_desc_v0 *)lsf_desc;
nvgpu_acr_dbg(g, "fecs fw loaded\n");
nvgpu_release_firmware(g, fecs_sig);
return 0;
free_lsf_desc:
nvgpu_kfree(g, lsf_desc);
rel_sig:
nvgpu_release_firmware(g, fecs_sig);
return err;
}
int nvgpu_acr_lsf_gpccs_ucode_details_v0(struct gk20a *g, void *lsf_ucode_img)
{
struct lsf_ucode_desc_v0 *lsf_desc;
struct nvgpu_firmware *gpccs_sig;
struct flcn_ucode_img_v0 *p_img = (struct flcn_ucode_img_v0 *)lsf_ucode_img;
struct nvgpu_gr_falcon *gr_falcon = nvgpu_gr_get_falcon_ptr(g);
struct nvgpu_ctxsw_ucode_segments *gpccs =
nvgpu_gr_falcon_get_gpccs_ucode_segments(gr_falcon);
int err;
if (!nvgpu_is_enabled(g, NVGPU_SEC_SECUREGPCCS)) {
return -ENOENT;
}
gpccs_sig = nvgpu_request_firmware(g, T18x_GPCCS_UCODE_SIG, 0);
if (gpccs_sig == NULL) {
nvgpu_err(g, "failed to load gpccs sig");
return -ENOENT;
}
lsf_desc = nvgpu_kzalloc(g, sizeof(struct lsf_ucode_desc_v0));
if (lsf_desc == NULL) {
err = -ENOMEM;
goto rel_sig;
}
nvgpu_memcpy((u8 *)lsf_desc, (u8 *)gpccs_sig->data,
min_t(size_t, sizeof(*lsf_desc), gpccs_sig->size));
lsf_desc->falcon_id = FALCON_ID_GPCCS;
p_img->desc = nvgpu_kzalloc(g, sizeof(struct pmu_ucode_desc));
if (p_img->desc == NULL) {
err = -ENOMEM;
goto free_lsf_desc;
}
p_img->desc->bootloader_start_offset =
0;
p_img->desc->bootloader_size = NVGPU_ALIGN(gpccs->boot.size, 256U);
p_img->desc->bootloader_imem_offset = gpccs->boot_imem_offset;
p_img->desc->bootloader_entry_point = gpccs->boot_entry;
p_img->desc->image_size = NVGPU_ALIGN(gpccs->boot.size, 256U) +
NVGPU_ALIGN(gpccs->code.size, 256U) +
NVGPU_ALIGN(gpccs->data.size, 256U);
p_img->desc->app_size = NVGPU_ALIGN(gpccs->code.size, 256U) +
NVGPU_ALIGN(gpccs->data.size, 256U);
p_img->desc->app_start_offset = p_img->desc->bootloader_size;
p_img->desc->app_imem_offset = 0;
p_img->desc->app_imem_entry = 0;
p_img->desc->app_dmem_offset = 0;
p_img->desc->app_resident_code_offset = 0;
p_img->desc->app_resident_code_size = NVGPU_ALIGN(gpccs->code.size, 256U);
p_img->desc->app_resident_data_offset =
NVGPU_ALIGN(gpccs->data.offset, 256U) -
NVGPU_ALIGN(gpccs->code.offset, 256U);
p_img->desc->app_resident_data_size = NVGPU_ALIGN(gpccs->data.size, 256U);
p_img->data = (u32 *)
((u8 *)nvgpu_gr_falcon_get_surface_desc_cpu_va(gr_falcon) +
gpccs->boot.offset);
p_img->data_size = NVGPU_ALIGN(p_img->desc->image_size, 256U);
p_img->lsf_desc = (struct lsf_ucode_desc_v0 *)lsf_desc;
nvgpu_acr_dbg(g, "gpccs fw loaded\n");
nvgpu_release_firmware(g, gpccs_sig);
return 0;
free_lsf_desc:
nvgpu_kfree(g, lsf_desc);
rel_sig:
nvgpu_release_firmware(g, gpccs_sig);
return err;
}
/*
* @brief lsfm_fill_static_lsb_hdr_info
* Populate static LSB header information using the provided ucode image
*/
static void lsfm_fill_static_lsb_hdr_info(struct gk20a *g,
u32 falcon_id, struct lsfm_managed_ucode_img_v0 *pnode)
{
u32 full_app_size = 0;
u32 data = 0;
if (pnode->ucode_img.lsf_desc != NULL) {
nvgpu_memcpy((u8 *)&pnode->lsb_header.signature,
(u8 *)pnode->ucode_img.lsf_desc,
sizeof(struct lsf_ucode_desc_v0));
}
pnode->lsb_header.ucode_size = pnode->ucode_img.data_size;
/* Uses a loader. that is has a desc */
pnode->lsb_header.data_size = 0;
/*
* The loader code size is already aligned (padded) such that
* the code following it is aligned, but the size in the image
* desc is not, bloat it up to be on a 256 byte alignment.
*/
pnode->lsb_header.bl_code_size = NVGPU_ALIGN(
pnode->ucode_img.desc->bootloader_size,
LSF_BL_CODE_SIZE_ALIGNMENT);
full_app_size = NVGPU_ALIGN(pnode->ucode_img.desc->app_size,
LSF_BL_CODE_SIZE_ALIGNMENT) +
pnode->lsb_header.bl_code_size;
pnode->lsb_header.ucode_size = NVGPU_ALIGN(
pnode->ucode_img.desc->app_resident_data_offset,
LSF_BL_CODE_SIZE_ALIGNMENT) +
pnode->lsb_header.bl_code_size;
pnode->lsb_header.data_size = full_app_size -
pnode->lsb_header.ucode_size;
/*
* Though the BL is located at 0th offset of the image, the VA
* is different to make sure that it doesn't collide the actual
* OS VA range
*/
pnode->lsb_header.bl_imem_off =
pnode->ucode_img.desc->bootloader_imem_offset;
pnode->lsb_header.flags = 0;
if (falcon_id == FALCON_ID_PMU) {
data = NV_FLCN_ACR_LSF_FLAG_DMACTL_REQ_CTX_TRUE;
pnode->lsb_header.flags = data;
}
if (g->acr->lsf[falcon_id].is_priv_load) {
pnode->lsb_header.flags |=
NV_FLCN_ACR_LSF_FLAG_FORCE_PRIV_LOAD_TRUE;
}
}
/* Adds a ucode image to the list of managed ucode images managed. */
static int lsfm_add_ucode_img(struct gk20a *g, struct ls_flcn_mgr_v0 *plsfm,
struct flcn_ucode_img_v0 *ucode_image, u32 falcon_id)
{
struct lsfm_managed_ucode_img_v0 *pnode;
pnode = nvgpu_kzalloc(g, sizeof(struct lsfm_managed_ucode_img_v0));
if (pnode == NULL) {
return -ENOMEM;
}
/* Keep a copy of the ucode image info locally */
nvgpu_memcpy((u8 *)&pnode->ucode_img, (u8 *)ucode_image,
sizeof(struct flcn_ucode_img_v0));
/* Fill in static WPR header info*/
pnode->wpr_header.falcon_id = falcon_id;
pnode->wpr_header.bootstrap_owner = g->acr->bootstrap_owner;
pnode->wpr_header.status = LSF_IMAGE_STATUS_COPY;
pnode->wpr_header.lazy_bootstrap =
(u32)g->acr->lsf[falcon_id].is_lazy_bootstrap;
/* Fill in static LSB header info elsewhere */
lsfm_fill_static_lsb_hdr_info(g, falcon_id, pnode);
pnode->next = plsfm->ucode_img_list;
plsfm->ucode_img_list = pnode;
return 0;
}
/* Discover all managed falcon ucode images */
static int lsfm_discover_ucode_images(struct gk20a *g,
struct ls_flcn_mgr_v0 *plsfm)
{
struct flcn_ucode_img_v0 ucode_img;
struct nvgpu_acr *acr = g->acr;
u32 falcon_id;
u32 i;
int err = 0;
/*
* Enumerate all constructed falcon objects, as we need the ucode
* image info and total falcon count
*/
for (i = 0U; i < FALCON_ID_END; i++) {
if (nvgpu_test_bit(i, (void *)&acr->lsf_enable_mask) &&
acr->lsf[i].get_lsf_ucode_details != NULL) {
(void) memset(&ucode_img, 0, sizeof(ucode_img));
if (acr->lsf[i].get_lsf_ucode_details(g,
(void *)&ucode_img) != 0) {
nvgpu_err(g, "LS falcon-%d ucode get failed", i);
goto exit;
}
if (ucode_img.lsf_desc != NULL) {
/*
* falon_id is formed by grabbing the static
* base falonId from the image and adding the
* engine-designated falcon instance.
*/
falcon_id = ucode_img.lsf_desc->falcon_id;
err = lsfm_add_ucode_img(g, plsfm, &ucode_img,
falcon_id);
if (err != 0) {
nvgpu_err(g, " Failed to add falcon-%d to LSFM ",
falcon_id);
goto exit;
}
plsfm->managed_flcn_cnt++;
}
}
}
exit:
return err;
}
/* Generate WPR requirements for ACR allocation request */
static int lsf_gen_wpr_requirements(struct gk20a *g, struct ls_flcn_mgr_v0 *plsfm)
{
struct lsfm_managed_ucode_img_v0 *pnode = plsfm->ucode_img_list;
u32 wpr_offset;
/*
* Start with an array of WPR headers at the base of the WPR.
* The expectation here is that the secure falcon will do a single DMA
* read of this array and cache it internally so it's OK to pack these.
* Also, we add 1 to the falcon count to indicate the end of the array.
*/
wpr_offset = U32(sizeof(struct lsf_wpr_header_v0)) *
(U32(plsfm->managed_flcn_cnt) + U32(1));
/*
* Walk the managed falcons, accounting for the LSB structs
* as well as the ucode images.
*/
while (pnode != NULL) {
/* Align, save off, and include an LSB header size */
wpr_offset = NVGPU_ALIGN(wpr_offset, LSF_LSB_HEADER_ALIGNMENT);
pnode->wpr_header.lsb_offset = wpr_offset;
wpr_offset += (u32)sizeof(struct lsf_lsb_header_v0);
/*
* Align, save off, and include the original (static)
* ucode image size
*/
wpr_offset = NVGPU_ALIGN(wpr_offset,
LSF_UCODE_DATA_ALIGNMENT);
pnode->lsb_header.ucode_off = wpr_offset;
wpr_offset += pnode->ucode_img.data_size;
/*
* For falcons that use a boot loader (BL), we append a loader
* desc structure on the end of the ucode image and consider this
* the boot loader data. The host will then copy the loader desc
* args to this space within the WPR region (before locking down)
* and the HS bin will then copy them to DMEM 0 for the loader.
*/
/*
* Track the size for LSB details filled in later
* Note that at this point we don't know what kind of
* boot loader desc, so we just take the size of the
* generic one, which is the largest it will will ever be.
*/
/* Align (size bloat) and save off generic descriptor size */
pnode->lsb_header.bl_data_size = NVGPU_ALIGN(
(u32)sizeof(pnode->bl_gen_desc),
LSF_BL_DATA_SIZE_ALIGNMENT);
/* Align, save off, and include the additional BL data */
wpr_offset = NVGPU_ALIGN(wpr_offset,
LSF_BL_DATA_ALIGNMENT);
pnode->lsb_header.bl_data_off = wpr_offset;
wpr_offset += pnode->lsb_header.bl_data_size;
/* Finally, update ucode surface size to include updates */
pnode->full_ucode_size = wpr_offset -
pnode->lsb_header.ucode_off;
if (pnode->wpr_header.falcon_id != FALCON_ID_PMU) {
pnode->lsb_header.app_code_off =
pnode->lsb_header.bl_code_size;
pnode->lsb_header.app_code_size =
pnode->lsb_header.ucode_size -
pnode->lsb_header.bl_code_size;
pnode->lsb_header.app_data_off =
pnode->lsb_header.ucode_size;
pnode->lsb_header.app_data_size =
pnode->lsb_header.data_size;
}
pnode = pnode->next;
}
plsfm->wpr_size = wpr_offset;
return 0;
}
/* Initialize WPR contents */
static int gm20b_pmu_populate_loader_cfg(struct gk20a *g,
void *lsfm, u32 *p_bl_gen_desc_size)
{
struct wpr_carveout_info wpr_inf;
struct lsfm_managed_ucode_img_v0 *p_lsfm =
(struct lsfm_managed_ucode_img_v0 *)lsfm;
struct flcn_ucode_img_v0 *p_img = &(p_lsfm->ucode_img);
struct loader_config *ldr_cfg = &(p_lsfm->bl_gen_desc.loader_cfg);
u64 addr_base;
struct pmu_ucode_desc *desc;
u64 tmp;
u32 addr_code, addr_data;
if (p_img->desc == NULL) {
/*
* This means its a header based ucode,
* and so we do not fill BL gen desc structure
*/
return -EINVAL;
}
desc = p_img->desc;
/*
* Calculate physical and virtual addresses for various portions of
* the PMU ucode image
* Calculate the 32-bit addresses for the application code, application
* data, and bootloader code. These values are all based on IM_BASE.
* The 32-bit addresses will be the upper 32-bits of the virtual or
* physical addresses of each respective segment.
*/
addr_base = p_lsfm->lsb_header.ucode_off;
g->acr->get_wpr_info(g, &wpr_inf);
addr_base += wpr_inf.wpr_base;
nvgpu_acr_dbg(g, "pmu loader cfg u32 addrbase %x\n", (u32)addr_base);
/*From linux*/
tmp = (addr_base +
desc->app_start_offset +
desc->app_resident_code_offset) >> 8;
nvgpu_assert(tmp <= U32_MAX);
addr_code = u64_lo32(tmp);
nvgpu_acr_dbg(g, "app start %d app res code off %d\n",
desc->app_start_offset, desc->app_resident_code_offset);
tmp = (addr_base +
desc->app_start_offset +
desc->app_resident_data_offset) >> 8;
nvgpu_assert(tmp <= U32_MAX);
addr_data = u64_lo32(tmp);
nvgpu_acr_dbg(g, "app res data offset%d\n",
desc->app_resident_data_offset);
nvgpu_acr_dbg(g, "bl start off %d\n", desc->bootloader_start_offset);
/* Populate the loader_config state*/
ldr_cfg->dma_idx = g->acr->lsf[FALCON_ID_PMU].falcon_dma_idx;
ldr_cfg->code_dma_base = addr_code;
ldr_cfg->code_dma_base1 = 0x0;
ldr_cfg->code_size_total = desc->app_size;
ldr_cfg->code_size_to_load = desc->app_resident_code_size;
ldr_cfg->code_entry_point = desc->app_imem_entry;
ldr_cfg->data_dma_base = addr_data;
ldr_cfg->data_dma_base1 = 0;
ldr_cfg->data_size = desc->app_resident_data_size;
ldr_cfg->overlay_dma_base = addr_code;
ldr_cfg->overlay_dma_base1 = 0x0;
/* Update the argc/argv members*/
ldr_cfg->argc = 1;
#ifdef CONFIG_NVGPU_LS_PMU
nvgpu_pmu_fw_get_cmd_line_args_offset(g, &ldr_cfg->argv);
#endif
*p_bl_gen_desc_size = (u32)sizeof(struct loader_config);
return 0;
}
static int gm20b_flcn_populate_bl_dmem_desc(struct gk20a *g,
void *lsfm, u32 *p_bl_gen_desc_size, u32 falconid)
{
struct wpr_carveout_info wpr_inf;
struct lsfm_managed_ucode_img_v0 *p_lsfm =
(struct lsfm_managed_ucode_img_v0 *)lsfm;
struct flcn_ucode_img_v0 *p_img = &(p_lsfm->ucode_img);
struct flcn_bl_dmem_desc_v0 *ldr_cfg =
&(p_lsfm->bl_gen_desc.bl_dmem_desc);
u64 addr_base;
struct pmu_ucode_desc *desc;
u32 addr_code, addr_data;
u64 tmp;
if (p_img->desc == NULL) {
/*
* This means its a header based ucode,
* and so we do not fill BL gen desc structure
*/
return -EINVAL;
}
desc = p_img->desc;
/*
* Calculate physical and virtual addresses for various portions of
* the PMU ucode image
* Calculate the 32-bit addresses for the application code, application
* data, and bootloader code. These values are all based on IM_BASE.
* The 32-bit addresses will be the upper 32-bits of the virtual or
* physical addresses of each respective segment.
*/
addr_base = p_lsfm->lsb_header.ucode_off;
g->acr->get_wpr_info(g, &wpr_inf);
addr_base += wpr_inf.wpr_base;
nvgpu_acr_dbg(g, "gen loader cfg %x u32 addrbase %x ID\n", (u32)addr_base,
p_lsfm->wpr_header.falcon_id);
tmp = (addr_base +
desc->app_start_offset +
desc->app_resident_code_offset) >> 8;
nvgpu_assert(tmp <= U32_MAX);
addr_code = u64_lo32(tmp);
tmp = (addr_base +
desc->app_start_offset +
desc->app_resident_data_offset) >> 8;
nvgpu_assert(tmp <= U32_MAX);
addr_data = u64_lo32(tmp);
nvgpu_acr_dbg(g, "gen cfg %x u32 addrcode %x & data %x load offset %xID\n",
(u32)addr_code, (u32)addr_data, desc->bootloader_start_offset,
p_lsfm->wpr_header.falcon_id);
/* Populate the LOADER_CONFIG state */
(void) memset((void *) ldr_cfg, 0, sizeof(struct flcn_bl_dmem_desc_v0));
ldr_cfg->ctx_dma = g->acr->lsf[falconid].falcon_dma_idx;
ldr_cfg->code_dma_base = addr_code;
ldr_cfg->non_sec_code_size = desc->app_resident_code_size;
ldr_cfg->data_dma_base = addr_data;
ldr_cfg->data_size = desc->app_resident_data_size;
ldr_cfg->code_entry_point = desc->app_imem_entry;
*p_bl_gen_desc_size = (u32)sizeof(struct flcn_bl_dmem_desc_v0);
return 0;
}
/* Populate falcon boot loader generic desc.*/
static int lsfm_fill_flcn_bl_gen_desc(struct gk20a *g,
struct lsfm_managed_ucode_img_v0 *pnode)
{
int err = -ENOENT;
if (pnode->wpr_header.falcon_id != FALCON_ID_PMU) {
nvgpu_acr_dbg(g, "non pmu. write flcn bl gen desc\n");
err = gm20b_flcn_populate_bl_dmem_desc(g,
pnode, &pnode->bl_gen_desc_size,
pnode->wpr_header.falcon_id);
if (err != 0) {
nvgpu_err(g, "flcn_populate_bl_dmem_desc failed=%d",
err);
}
return err;
}
if (pnode->wpr_header.falcon_id == FALCON_ID_PMU) {
nvgpu_acr_dbg(g, "pmu write flcn bl gen desc\n");
err = gm20b_pmu_populate_loader_cfg(g, pnode,
&pnode->bl_gen_desc_size);
if (err != 0) {
nvgpu_err(g, "pmu_populate_loader_cfg failed=%d",
err);
}
return err;
}
/* Failed to find the falcon requested. */
return err;
}
static int lsfm_init_wpr_contents(struct gk20a *g, struct ls_flcn_mgr_v0 *plsfm,
struct nvgpu_mem *ucode)
{
struct lsfm_managed_ucode_img_v0 *pnode = plsfm->ucode_img_list;
struct lsf_wpr_header_v0 last_wpr_hdr;
u32 i;
int err = 0;
/* The WPR array is at the base of the WPR */
pnode = plsfm->ucode_img_list;
(void) memset(&last_wpr_hdr, 0, sizeof(struct lsf_wpr_header_v0));
i = 0;
/*
* Walk the managed falcons, flush WPR and LSB headers to FB.
* flush any bl args to the storage area relative to the
* ucode image (appended on the end as a DMEM area).
*/
while (pnode != NULL) {
/* Flush WPR header to memory*/
nvgpu_mem_wr_n(g, ucode, i * (u32)sizeof(pnode->wpr_header),
&pnode->wpr_header,
(u32)sizeof(pnode->wpr_header));
nvgpu_acr_dbg(g, "wpr header");
nvgpu_acr_dbg(g, "falconid :%d",
pnode->wpr_header.falcon_id);
nvgpu_acr_dbg(g, "lsb_offset :%x",
pnode->wpr_header.lsb_offset);
nvgpu_acr_dbg(g, "bootstrap_owner :%d",
pnode->wpr_header.bootstrap_owner);
nvgpu_acr_dbg(g, "lazy_bootstrap :%d",
pnode->wpr_header.lazy_bootstrap);
nvgpu_acr_dbg(g, "status :%d",
pnode->wpr_header.status);
/*Flush LSB header to memory*/
nvgpu_mem_wr_n(g, ucode, pnode->wpr_header.lsb_offset,
&pnode->lsb_header,
(u32)sizeof(pnode->lsb_header));
nvgpu_acr_dbg(g, "lsb header");
nvgpu_acr_dbg(g, "ucode_off :%x",
pnode->lsb_header.ucode_off);
nvgpu_acr_dbg(g, "ucode_size :%x",
pnode->lsb_header.ucode_size);
nvgpu_acr_dbg(g, "data_size :%x",
pnode->lsb_header.data_size);
nvgpu_acr_dbg(g, "bl_code_size :%x",
pnode->lsb_header.bl_code_size);
nvgpu_acr_dbg(g, "bl_imem_off :%x",
pnode->lsb_header.bl_imem_off);
nvgpu_acr_dbg(g, "bl_data_off :%x",
pnode->lsb_header.bl_data_off);
nvgpu_acr_dbg(g, "bl_data_size :%x",
pnode->lsb_header.bl_data_size);
nvgpu_acr_dbg(g, "app_code_off :%x",
pnode->lsb_header.app_code_off);
nvgpu_acr_dbg(g, "app_code_size :%x",
pnode->lsb_header.app_code_size);
nvgpu_acr_dbg(g, "app_data_off :%x",
pnode->lsb_header.app_data_off);
nvgpu_acr_dbg(g, "app_data_size :%x",
pnode->lsb_header.app_data_size);
nvgpu_acr_dbg(g, "flags :%x",
pnode->lsb_header.flags);
/* this falcon has a boot loader and related args, flush them */
/* Populate gen bl and flush to memory */
err = lsfm_fill_flcn_bl_gen_desc(g, pnode);
if (err != 0) {
nvgpu_err(g, "bl_gen_desc failed err=%d", err);
return err;
}
nvgpu_mem_wr_n(g, ucode,
pnode->lsb_header.bl_data_off,
&pnode->bl_gen_desc,
pnode->bl_gen_desc_size);
/* Copying of ucode */
nvgpu_mem_wr_n(g, ucode, pnode->lsb_header.ucode_off,
pnode->ucode_img.data,
pnode->ucode_img.data_size);
pnode = pnode->next;
i++;
}
/* Tag the terminator WPR header with an invalid falcon ID. */
last_wpr_hdr.falcon_id = FALCON_ID_INVALID;
nvgpu_mem_wr_n(g, ucode,
(u32)plsfm->managed_flcn_cnt *
(u32)sizeof(struct lsf_wpr_header_v0),
&last_wpr_hdr,
(u32)sizeof(struct lsf_wpr_header_v0));
return err;
}
/* Free any ucode image structure resources. */
static void lsfm_free_ucode_img_res(struct gk20a *g,
struct flcn_ucode_img_v0 *p_img)
{
if (p_img->lsf_desc != NULL) {
nvgpu_kfree(g, p_img->lsf_desc);
p_img->lsf_desc = NULL;
}
}
/* Free any ucode image structure resources. */
static void lsfm_free_nonpmu_ucode_img_res(struct gk20a *g,
struct flcn_ucode_img_v0 *p_img)
{
if (p_img->lsf_desc != NULL) {
nvgpu_kfree(g, p_img->lsf_desc);
p_img->lsf_desc = NULL;
}
if (p_img->desc != NULL) {
nvgpu_kfree(g, p_img->desc);
p_img->desc = NULL;
}
}
static void free_acr_resources(struct gk20a *g, struct ls_flcn_mgr_v0 *plsfm)
{
u32 cnt = plsfm->managed_flcn_cnt;
struct lsfm_managed_ucode_img_v0 *mg_ucode_img;
while (cnt != 0U) {
mg_ucode_img = plsfm->ucode_img_list;
if (mg_ucode_img->ucode_img.lsf_desc->falcon_id ==
FALCON_ID_PMU) {
lsfm_free_ucode_img_res(g, &mg_ucode_img->ucode_img);
} else {
lsfm_free_nonpmu_ucode_img_res(g,
&mg_ucode_img->ucode_img);
}
plsfm->ucode_img_list = mg_ucode_img->next;
nvgpu_kfree(g, mg_ucode_img);
cnt--;
}
}
int nvgpu_acr_prepare_ucode_blob_v0(struct gk20a *g)
{
int err = 0;
struct ls_flcn_mgr_v0 lsfm_l, *plsfm;
struct wpr_carveout_info wpr_inf;
struct nvgpu_gr_falcon *gr_falcon = nvgpu_gr_get_falcon_ptr(g);
if (g->acr->ucode_blob.cpu_va != NULL) {
/* Recovery case, we do not need to form non WPR blob */
return err;
}
plsfm = &lsfm_l;
(void) memset((void *)plsfm, 0, sizeof(struct ls_flcn_mgr_v0));
nvgpu_acr_dbg(g, "fetching GMMU regs\n");
err = g->ops.fb.vpr_info_fetch(g);
if (err != 0) {
nvgpu_err(g, "fb.vpr_info_fetch failed err=%d", err);
return err;
}
err = nvgpu_gr_falcon_init_ctxsw_ucode(g, gr_falcon);
if (err != 0) {
nvgpu_err(g, "gr_falcon_init_ctxsw_ucode failed err=%d", err);
return err;
}
g->acr->get_wpr_info(g, &wpr_inf);
nvgpu_acr_dbg(g, "wpr carveout base:%llx\n", wpr_inf.wpr_base);
nvgpu_acr_dbg(g, "wpr carveout size :%llx\n", wpr_inf.size);
/* Discover all managed falcons*/
err = lsfm_discover_ucode_images(g, plsfm);
nvgpu_acr_dbg(g, " Managed Falcon cnt %d\n", plsfm->managed_flcn_cnt);
if (err != 0) {
goto exit_err;
}
if ((plsfm->managed_flcn_cnt != 0U) &&
(g->acr->ucode_blob.cpu_va == NULL)) {
/* Generate WPR requirements */
err = lsf_gen_wpr_requirements(g, plsfm);
if (err != 0) {
goto exit_err;
}
/* Alloc memory to hold ucode blob contents */
err = g->acr->alloc_blob_space(g, plsfm->wpr_size
, &g->acr->ucode_blob);
if (err != 0) {
goto exit_err;
}
nvgpu_acr_dbg(g, "managed LS falcon %d, WPR size %d bytes.\n",
plsfm->managed_flcn_cnt, plsfm->wpr_size);
err = lsfm_init_wpr_contents(g, plsfm, &g->acr->ucode_blob);
if (err != 0) {
nvgpu_kfree(g, &g->acr->ucode_blob);
goto free_acr;
}
} else {
nvgpu_acr_dbg(g, "LSFM is managing no falcons.\n");
}
nvgpu_acr_dbg(g, "prepare ucode blob return 0\n");
free_acr:
free_acr_resources(g, plsfm);
exit_err:
return err;
}

View File

@@ -0,0 +1,207 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef ACR_BLOB_CONSTRUCT_V0_H
#define ACR_BLOB_CONSTRUCT_V0_H
#include <nvgpu/falcon.h>
#include <nvgpu/flcnif_cmn.h>
/*
* Light Secure WPR Content Alignments
*/
#define LSF_WPR_HEADER_ALIGNMENT (256U)
#define LSF_SUB_WPR_HEADER_ALIGNMENT (256U)
#define LSF_LSB_HEADER_ALIGNMENT (256U)
#define LSF_BL_DATA_ALIGNMENT (256U)
#define LSF_BL_DATA_SIZE_ALIGNMENT (256U)
#define LSF_BL_CODE_SIZE_ALIGNMENT (256U)
#define LSF_DATA_SIZE_ALIGNMENT (256U)
#define LSF_CODE_SIZE_ALIGNMENT (256U)
#define LSF_UCODE_DATA_ALIGNMENT 4096U
/* Defined for 1MB alignment */
#define SHIFT_1MB (20U)
#define SHIFT_4KB (12U)
/*Light Secure Bootstrap header related defines*/
#define NV_FLCN_ACR_LSF_FLAG_LOAD_CODE_AT_0_FALSE 0U
#define NV_FLCN_ACR_LSF_FLAG_LOAD_CODE_AT_0_TRUE BIT32(0)
#define NV_FLCN_ACR_LSF_FLAG_DMACTL_REQ_CTX_FALSE 0U
#define NV_FLCN_ACR_LSF_FLAG_DMACTL_REQ_CTX_TRUE BIT32(2)
#define NV_FLCN_ACR_LSF_FLAG_FORCE_PRIV_LOAD_TRUE BIT32(3)
#define NV_FLCN_ACR_LSF_FLAG_FORCE_PRIV_LOAD_FALSE (0U)
/*
* Image Status Defines
*/
#define LSF_IMAGE_STATUS_NONE (0U)
#define LSF_IMAGE_STATUS_COPY (1U)
#define LSF_IMAGE_STATUS_VALIDATION_CODE_FAILED (2U)
#define LSF_IMAGE_STATUS_VALIDATION_DATA_FAILED (3U)
#define LSF_IMAGE_STATUS_VALIDATION_DONE (4U)
#define LSF_IMAGE_STATUS_VALIDATION_SKIPPED (5U)
#define LSF_IMAGE_STATUS_BOOTSTRAP_READY (6U)
/*
* Light Secure WPR Header
* Defines state allowing Light Secure Falcon bootstrapping.
*/
struct lsf_wpr_header_v0 {
u32 falcon_id;
u32 lsb_offset;
u32 bootstrap_owner;
u32 lazy_bootstrap;
u32 status;
};
/*
* Light Secure Falcon Ucode Description Defines
* This structure is prelim and may change as the ucode signing flow evolves.
*/
struct lsf_ucode_desc_v0 {
u8 prd_keys[2][16];
u8 dbg_keys[2][16];
u32 b_prd_present;
u32 b_dbg_present;
u32 falcon_id;
};
/*
* Light Secure Bootstrap Header
* Defines state allowing Light Secure Falcon bootstrapping.
*/
struct lsf_lsb_header_v0 {
struct lsf_ucode_desc_v0 signature;
u32 ucode_off;
u32 ucode_size;
u32 data_size;
u32 bl_code_size;
u32 bl_imem_off;
u32 bl_data_off;
u32 bl_data_size;
u32 app_code_off;
u32 app_code_size;
u32 app_data_off;
u32 app_data_size;
u32 flags;
};
/*
* Union of all supported structures used by bootloaders.
*/
/* Falcon BL interfaces */
/*
* Structure used by the boot-loader to load the rest of the code. This has
* to be filled by NVGPU and copied into DMEM at offset provided in the
* hsflcn_bl_desc.bl_desc_dmem_load_off.
*/
struct flcn_bl_dmem_desc_v0 {
u32 reserved[4]; /*Should be the first element..*/
u32 signature[4]; /*Should be the first element..*/
u32 ctx_dma;
u32 code_dma_base;
u32 non_sec_code_off;
u32 non_sec_code_size;
u32 sec_code_off;
u32 sec_code_size;
u32 code_entry_point;
u32 data_dma_base;
u32 data_size;
u32 code_dma_base1;
u32 data_dma_base1;
};
/*
* Legacy structure used by the current PMU bootloader.
*/
struct loader_config {
u32 dma_idx;
u32 code_dma_base; /* upper 32-bits of 40-bit dma address */
u32 code_size_total;
u32 code_size_to_load;
u32 code_entry_point;
u32 data_dma_base; /* upper 32-bits of 40-bit dma address */
u32 data_size; /* initialized data of the application */
u32 overlay_dma_base; /* upper 32-bits of the 40-bit dma address */
u32 argc;
u32 argv;
u16 code_dma_base1; /* upper 7 bits of 47-bit dma address */
u16 data_dma_base1; /* upper 7 bits of 47-bit dma address */
u16 overlay_dma_base1; /* upper 7 bits of the 47-bit dma address */
};
union flcn_bl_generic_desc {
struct flcn_bl_dmem_desc_v0 bl_dmem_desc;
struct loader_config loader_cfg;
};
struct flcn_ucode_img_v0 {
u32 *data;
struct pmu_ucode_desc *desc; /* only some falcons have descriptor */
u32 data_size;
/* NULL if not a light secure falcon. */
struct lsf_ucode_desc_v0 *lsf_desc;
/* True if there a resources to freed by the client. */
};
/*
* LSFM Managed Ucode Image
* next : Next image the list, NULL if last.
* wpr_header : WPR header for this ucode image
* lsb_header : LSB header for this ucode image
* bl_gen_desc : Bootloader generic desc structure for this ucode image
* bl_gen_desc_size : Sizeof bootloader desc structure for this ucode image
* full_ucode_size : Surface size required for final ucode image
* ucode_img : Ucode image info
*/
struct lsfm_managed_ucode_img_v0 {
struct lsfm_managed_ucode_img_v0 *next;
struct lsf_wpr_header_v0 wpr_header;
struct lsf_lsb_header_v0 lsb_header;
union flcn_bl_generic_desc bl_gen_desc;
u32 bl_gen_desc_size;
u32 full_ucode_size;
struct flcn_ucode_img_v0 ucode_img;
};
/*
* Defines the structure used to contain all generic information related to
* the LSFM.
*
* Contains the Light Secure Falcon Manager (LSFM) feature related data.
*/
struct ls_flcn_mgr_v0 {
u16 managed_flcn_cnt;
u32 wpr_size;
struct lsfm_managed_ucode_img_v0 *ucode_img_list;
};
int nvgpu_acr_lsf_pmu_ucode_details_v0(struct gk20a *g, void *lsf_ucode_img);
int nvgpu_acr_lsf_fecs_ucode_details_v0(struct gk20a *g, void *lsf_ucode_img);
int nvgpu_acr_lsf_gpccs_ucode_details_v0(struct gk20a *g, void *lsf_ucode_img);
int nvgpu_acr_prepare_ucode_blob_v0(struct gk20a *g);
#endif /* ACR_BLOB_CONSTRUCT_V0_H */

View File

@@ -0,0 +1,254 @@
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvgpu/types.h>
#include <nvgpu/dma.h>
#include <nvgpu/timers.h>
#include <nvgpu/nvgpu_mem.h>
#include <nvgpu/firmware.h>
#include <nvgpu/pmu.h>
#include <nvgpu/falcon.h>
#include <nvgpu/gk20a.h>
#include <nvgpu/acr.h>
#include <nvgpu/bug.h>
#include <nvgpu/soc.h>
#include "acr_bootstrap.h"
#include "acr_priv.h"
int nvgpu_acr_wait_for_completion(struct gk20a *g, struct hs_acr *acr_desc,
u32 timeout)
{
u32 flcn_id;
#ifdef CONFIG_NVGPU_FALCON_NON_FUSA
u32 sctl, cpuctl;
#endif
int completion = 0;
u32 data = 0;
u32 bar0_status = 0;
u32 error_type;
nvgpu_log_fn(g, " ");
flcn_id = nvgpu_falcon_get_id(acr_desc->acr_flcn);
completion = nvgpu_falcon_wait_for_halt(acr_desc->acr_flcn, timeout);
if (completion != 0) {
nvgpu_err(g, "flcn-%d: HS ucode boot timed out, limit: %d ms",
flcn_id, timeout);
error_type = ACR_BOOT_TIMEDOUT;
goto exit;
}
if (acr_desc->acr_engine_bus_err_status != NULL) {
completion = acr_desc->acr_engine_bus_err_status(g,
&bar0_status, &error_type);
if (completion != 0) {
nvgpu_err(g, "flcn-%d: ACR engine bus error", flcn_id);
goto exit;
}
}
data = nvgpu_falcon_mailbox_read(acr_desc->acr_flcn, FALCON_MAILBOX_0);
if (data != 0U) {
nvgpu_err(g, "flcn-%d: HS ucode boot failed, err %x", flcn_id,
data);
nvgpu_err(g, "flcn-%d: Mailbox-1 : 0x%x", flcn_id,
nvgpu_falcon_mailbox_read(acr_desc->acr_flcn,
FALCON_MAILBOX_1));
completion = -EAGAIN;
error_type = ACR_BOOT_FAILED;
goto exit;
}
/*
* When engine-falcon is used for ACR bootstrap, validate the integrity
* of falcon IMEM and DMEM.
*/
if (acr_desc->acr_validate_mem_integrity != NULL) {
if (!acr_desc->acr_validate_mem_integrity(g)) {
nvgpu_err(g, "flcn-%d: memcheck failed", flcn_id);
completion = -EAGAIN;
error_type = ACR_BOOT_FAILED;
}
}
exit:
#ifdef CONFIG_NVGPU_FALCON_NON_FUSA
nvgpu_falcon_get_ctls(acr_desc->acr_flcn, &sctl, &cpuctl);
nvgpu_acr_dbg(g, "flcn-%d: sctl reg %x cpuctl reg %x",
flcn_id, sctl, cpuctl);
#endif
if (completion != 0) {
#ifdef CONFIG_NVGPU_FALCON_DEBUG
nvgpu_falcon_dump_stats(acr_desc->acr_flcn);
#endif
if (acr_desc->report_acr_engine_bus_err_status != NULL) {
acr_desc->report_acr_engine_bus_err_status(g,
bar0_status, error_type);
}
}
return completion;
}
/*
* Patch signatures into ucode image
*/
static void acr_ucode_patch_sig(struct gk20a *g,
unsigned int *p_img, unsigned int *p_prod_sig,
unsigned int *p_dbg_sig, unsigned int *p_patch_loc,
unsigned int *p_patch_ind, u32 sig_size)
{
#if defined(CONFIG_NVGPU_NEXT)
struct nvgpu_acr *acr = g->acr;
#endif
unsigned int i, j, *p_sig;
const u32 dmem_word_size = 4U;
nvgpu_acr_dbg(g, " ");
if (!g->ops.pmu.is_debug_mode_enabled(g)) {
p_sig = p_prod_sig;
nvgpu_acr_dbg(g, "PRODUCTION MODE\n");
} else {
p_sig = p_dbg_sig;
nvgpu_info(g, "DEBUG MODE\n");
}
#if defined(CONFIG_NVGPU_NEXT)
if (acr->get_versioned_sig != NULL) {
p_sig = acr->get_versioned_sig(g, acr, p_sig, &sig_size);
}
#endif
/* Patching logic:*/
sig_size = sig_size / dmem_word_size;
for (i = 0U; i < (sizeof(*p_patch_loc) / dmem_word_size); i++) {
for (j = 0U; j < sig_size; j++) {
p_img[nvgpu_safe_add_u32(
(p_patch_loc[i] / dmem_word_size), j)] =
p_sig[nvgpu_safe_add_u32(
(p_patch_ind[i] * dmem_word_size), j)];
}
}
}
/*
* Loads ACR bin to SYSMEM/FB and bootstraps ACR with bootloader code
* start and end are addresses of ucode blob in non-WPR region
*/
int nvgpu_acr_bootstrap_hs_ucode(struct gk20a *g, struct nvgpu_acr *acr,
struct hs_acr *acr_desc)
{
struct nvgpu_firmware *acr_fw = acr_desc->acr_fw;
struct bin_hdr *hs_bin_hdr = NULL;
struct acr_fw_header *fw_hdr = NULL;
u32 *ucode_header = NULL;
u32 *ucode = NULL;
u32 timeout = 0;
int err = 0;
nvgpu_acr_dbg(g, "ACR TYPE %x ", acr_desc->acr_type);
if (acr_fw != NULL) {
err = acr->patch_wpr_info_to_ucode(g, acr, acr_desc, true);
if (err != 0) {
nvgpu_err(g, "Falcon ucode patch wpr info failed");
return err;
}
} else {
/* Firmware is stored in soc specific path in FMODEL
* Hence NVGPU_REQUEST_FIRMWARE_NO_WARN is used instead
* of NVGPU_REQUEST_FIRMWARE_NO_SOC
*/
#ifdef CONFIG_NVGPU_SIM
if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) {
acr_fw = nvgpu_request_firmware(g,
acr_desc->acr_fw_name,
NVGPU_REQUEST_FIRMWARE_NO_WARN);
} else
#endif
{
acr_fw = nvgpu_request_firmware(g,
acr_desc->acr_fw_name,
NVGPU_REQUEST_FIRMWARE_NO_SOC);
}
if (acr_fw == NULL) {
nvgpu_err(g, "%s ucode get fail for %s",
acr_desc->acr_fw_name, g->name);
return -ENOENT;
}
acr_desc->acr_fw = acr_fw;
err = acr->patch_wpr_info_to_ucode(g, acr, acr_desc, false);
if (err != 0) {
nvgpu_err(g, "Falcon ucode patch wpr info failed");
goto err_free_ucode;
}
}
hs_bin_hdr = (struct bin_hdr *)(void *)acr_fw->data;
fw_hdr = (struct acr_fw_header *)(void *)(acr_fw->data +
hs_bin_hdr->header_offset);
ucode_header = (u32 *)(void *)(acr_fw->data + fw_hdr->hdr_offset);
ucode = (u32 *)(void *)(acr_fw->data + hs_bin_hdr->data_offset);
/* Patch Ucode signatures */
acr_ucode_patch_sig(g, ucode,
(u32 *)(void *)(acr_fw->data + fw_hdr->sig_prod_offset),
(u32 *)(void *)(acr_fw->data + fw_hdr->sig_dbg_offset),
(u32 *)(void *)(acr_fw->data + fw_hdr->patch_loc),
(u32 *)(void *)(acr_fw->data + fw_hdr->patch_sig),
fw_hdr->sig_dbg_size);
err = nvgpu_falcon_hs_ucode_load_bootstrap(acr_desc->acr_flcn,
ucode, ucode_header);
if (err != 0) {
nvgpu_err(g, "HS ucode load & bootstrap failed");
goto err_free_ucode;
}
/* wait for complete & halt */
if (nvgpu_platform_is_silicon(g)) {
timeout = ACR_COMPLETION_TIMEOUT_SILICON_MS;
} else {
timeout = ACR_COMPLETION_TIMEOUT_NON_SILICON_MS;
}
err = nvgpu_acr_wait_for_completion(g, acr_desc, timeout);
if (err != 0) {
nvgpu_err(g, "HS ucode completion err %d", err);
goto err_free_ucode;
}
return 0;
err_free_ucode:
nvgpu_release_firmware(g, acr_fw);
acr_desc->acr_fw = NULL;
return err;
}

View File

@@ -0,0 +1,139 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef ACR_BOOTSTRAP_H
#define ACR_BOOTSTRAP_H
#include "nvgpu_acr_interface.h"
#ifdef CONFIG_NVGPU_NEXT
#include "common/acr/nvgpu_next_acr_bootstrap.h"
#endif
struct gk20a;
struct nvgpu_acr;
struct flcn_acr_region_prop_v0 {
u32 start_addr;
u32 end_addr;
u32 region_id;
u32 read_mask;
u32 write_mask;
u32 client_mask;
};
struct flcn_acr_regions_v0 {
u32 no_regions;
struct flcn_acr_region_prop_v0 region_props[NVGPU_FLCN_ACR_MAX_REGIONS];
};
struct flcn_acr_desc_v0 {
union {
u32 reserved_dmem[(LSF_BOOTSTRAP_OWNER_RESERVED_DMEM_SIZE/4)];
u32 signatures[4];
} ucode_reserved_space;
/*Always 1st*/
u32 wpr_region_id;
u32 wpr_offset;
u32 mmu_mem_range;
struct flcn_acr_regions_v0 regions;
u32 nonwpr_ucode_blob_size;
u64 nonwpr_ucode_blob_start;
};
struct bin_hdr {
/* 0x10de */
u32 bin_magic;
/* versioning of bin format */
u32 bin_ver;
/* Entire image size including this header */
u32 bin_size;
/*
* Header offset of executable binary metadata,
* start @ offset- 0x100 *
*/
u32 header_offset;
/*
* Start of executable binary data, start @
* offset- 0x200
*/
u32 data_offset;
/* Size of executable binary */
u32 data_size;
};
struct acr_fw_header {
u32 sig_dbg_offset;
u32 sig_dbg_size;
u32 sig_prod_offset;
u32 sig_prod_size;
u32 patch_loc;
u32 patch_sig;
u32 hdr_offset; /* This header points to acr_ucode_header_t210_load */
u32 hdr_size; /* Size of above header */
};
/* ACR Falcon descriptor's */
struct hs_acr {
#define ACR_DEFAULT 0U
#define ACR_AHESASC_NON_FUSA 1U
#define ACR_ASB_NON_FUSA 2U
#define ACR_AHESASC_FUSA 3U
#define ACR_ASB_FUSA 4U
u32 acr_type;
/* ACR ucode */
const char *acr_fw_name;
const char *acr_code_name;
const char *acr_data_name;
const char *acr_manifest_name;
struct nvgpu_firmware *code_fw;
struct nvgpu_firmware *data_fw;
struct nvgpu_firmware *manifest_fw;
struct nvgpu_firmware *acr_fw;
union{
struct flcn_acr_desc_v0 *acr_dmem_desc_v0;
struct flcn_acr_desc *acr_dmem_desc;
};
#if defined(CONFIG_NVGPU_NEXT)
struct nvgpu_mem acr_falcon2_sysmem_desc;
struct flcn2_acr_desc acr_sysmem_desc;
struct nvgpu_mem ls_pmu_desc;
#endif
/* Falcon used to execute ACR ucode */
struct nvgpu_falcon *acr_flcn;
void (*report_acr_engine_bus_err_status)(struct gk20a *g,
u32 bar0_status, u32 error_type);
int (*acr_engine_bus_err_status)(struct gk20a *g, u32 *bar0_status,
u32 *error_type);
bool (*acr_validate_mem_integrity)(struct gk20a *g);
};
int nvgpu_acr_wait_for_completion(struct gk20a *g, struct hs_acr *acr_desc,
u32 timeout);
int nvgpu_acr_bootstrap_hs_ucode(struct gk20a *g, struct nvgpu_acr *acr,
struct hs_acr *acr_desc);
#endif /* ACR_BOOTSTRAP_H */

View File

@@ -0,0 +1,161 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef ACR_H
#define ACR_H
#include "acr_bootstrap.h"
#ifdef CONFIG_NVGPU_ACR_LEGACY
#include "acr_blob_construct_v0.h"
#endif
#include "acr_blob_construct.h"
struct gk20a;
struct nvgpu_acr;
struct wpr_carveout_info;
#define nvgpu_acr_dbg(g, fmt, args...) \
nvgpu_log(g, gpu_dbg_pmu, fmt, ##args)
/*
* Falcon UCODE header index.
*/
#define FLCN_NL_UCODE_HDR_OS_CODE_OFF_IND (0U)
#define FLCN_NL_UCODE_HDR_OS_CODE_SIZE_IND (1U)
#define FLCN_NL_UCODE_HDR_OS_DATA_OFF_IND (2U)
#define FLCN_NL_UCODE_HDR_OS_DATA_SIZE_IND (3U)
#define FLCN_NL_UCODE_HDR_NUM_APPS_IND (4U)
/*
* There are total N number of Apps with code and offset defined in UCODE header
* This macro provides the CODE and DATA offset and size of Ath application.
*/
#define FLCN_NL_UCODE_HDR_APP_CODE_START_IND (5U)
#define FLCN_NL_UCODE_HDR_APP_CODE_OFF_IND(N, A) \
(FLCN_NL_UCODE_HDR_APP_CODE_START_IND + ((A)*2U))
#define FLCN_NL_UCODE_HDR_APP_CODE_SIZE_IND(N, A) \
(FLCN_NL_UCODE_HDR_APP_CODE_START_IND + ((A)*2U) + 1U)
#define FLCN_NL_UCODE_HDR_APP_CODE_END_IND(N) \
(FLCN_NL_UCODE_HDR_APP_CODE_START_IND + ((N)*2U) - 1U)
#define FLCN_NL_UCODE_HDR_APP_DATA_START_IND(N) \
(FLCN_NL_UCODE_HDR_APP_CODE_END_IND(N) + 1U)
#define FLCN_NL_UCODE_HDR_APP_DATA_OFF_IND(N, A) \
(FLCN_NL_UCODE_HDR_APP_DATA_START_IND(N) + ((A)*2U))
#define FLCN_NL_UCODE_HDR_APP_DATA_SIZE_IND(N, A) \
(FLCN_NL_UCODE_HDR_APP_DATA_START_IND(N) + ((A)*2U) + 1U)
#define FLCN_NL_UCODE_HDR_APP_DATA_END_IND(N) \
(FLCN_NL_UCODE_HDR_APP_DATA_START_IND(N) + ((N)*2U) - 1U)
#define FLCN_NL_UCODE_HDR_OS_OVL_OFF_IND(N) \
(FLCN_NL_UCODE_HDR_APP_DATA_END_IND(N) + 1U)
#define FLCN_NL_UCODE_HDR_OS_OVL_SIZE_IND(N) \
(FLCN_NL_UCODE_HDR_APP_DATA_END_IND(N) + 2U)
#define GM20B_HSBIN_ACR_PROD_UCODE "nv_acr_ucode_prod.bin"
#define GM20B_HSBIN_ACR_DBG_UCODE "nv_acr_ucode_dbg.bin"
#define HSBIN_ACR_BL_UCODE_IMAGE "pmu_bl.bin"
#define HSBIN_ACR_PROD_UCODE "acr_ucode_prod.bin"
#define HSBIN_ACR_DBG_UCODE "acr_ucode_dbg.bin"
#define HSBIN_ACR_AHESASC_NON_FUSA_PROD_UCODE "acr_ahesasc_prod_ucode.bin"
#define HSBIN_ACR_ASB_NON_FUSA_PROD_UCODE "acr_asb_prod_ucode.bin"
#define HSBIN_ACR_AHESASC_NON_FUSA_DBG_UCODE "acr_ahesasc_dbg_ucode.bin"
#define HSBIN_ACR_ASB_NON_FUSA_DBG_UCODE "acr_asb_dbg_ucode.bin"
#define HSBIN_ACR_AHESASC_FUSA_PROD_UCODE "acr_ahesasc_fusa_prod_ucode.bin"
#define HSBIN_ACR_ASB_FUSA_PROD_UCODE "acr_asb_fusa_prod_ucode.bin"
#define HSBIN_ACR_AHESASC_FUSA_DBG_UCODE "acr_ahesasc_fusa_dbg_ucode.bin"
#define HSBIN_ACR_ASB_FUSA_DBG_UCODE "acr_asb_fusa_dbg_ucode.bin"
#define GM20B_FECS_UCODE_SIG "fecs_sig.bin"
#define T18x_GPCCS_UCODE_SIG "gpccs_sig.bin"
#define TU104_FECS_UCODE_SIG "tu104/fecs_sig.bin"
#define TU104_GPCCS_UCODE_SIG "tu104/gpccs_sig.bin"
#define LSF_SEC2_UCODE_IMAGE_BIN "sec2_ucode_image.bin"
#define LSF_SEC2_UCODE_DESC_BIN "sec2_ucode_desc.bin"
#define LSF_SEC2_UCODE_SIG_BIN "sec2_sig.bin"
#define LSF_SEC2_UCODE_IMAGE_FUSA_BIN "sec2_ucode_fusa_image.bin"
#define LSF_SEC2_UCODE_DESC_FUSA_BIN "sec2_ucode_fusa_desc.bin"
#define LSF_SEC2_UCODE_SIG_FUSA_BIN "sec2_fusa_sig.bin"
#define ACR_COMPLETION_TIMEOUT_NON_SILICON_MS 10000U /*in msec */
#define ACR_COMPLETION_TIMEOUT_SILICON_MS 100 /*in msec */
struct acr_lsf_config {
u32 falcon_id;
u32 falcon_dma_idx;
bool is_lazy_bootstrap;
bool is_priv_load;
int (*get_lsf_ucode_details)(struct gk20a *g, void *lsf_ucode_img);
void (*get_cmd_line_args_offset)(struct gk20a *g, u32 *args_offset);
};
struct nvgpu_acr {
struct gk20a *g;
u32 bootstrap_owner;
u32 num_of_sig;
/* LSF properties */
u64 lsf_enable_mask;
struct acr_lsf_config lsf[FALCON_ID_END];
/*
* non-wpr space to hold LSF ucodes,
* ACR does copy ucode from non-wpr to wpr
*/
struct nvgpu_mem ucode_blob;
/*
* Even though this mem_desc wouldn't be used,
* the wpr region needs to be reserved in the
* allocator in dGPU case.
*/
struct nvgpu_mem wpr_dummy;
/* ACR member for different types of ucode */
/* For older dgpu/tegra ACR cuode */
struct hs_acr acr;
/* ACR load split feature support */
struct hs_acr acr_ahesasc;
struct hs_acr acr_asb;
/* ACR load split feature support for iGPU*/
struct hs_acr acr_alsb;
struct hs_acr acr_asc;
int (*prepare_ucode_blob)(struct gk20a *g);
int (*alloc_blob_space)(struct gk20a *g, size_t size,
struct nvgpu_mem *mem);
int (*patch_wpr_info_to_ucode)(struct gk20a *g, struct nvgpu_acr *acr,
struct hs_acr *acr_desc, bool is_recovery);
int (*bootstrap_hs_acr)(struct gk20a *g, struct nvgpu_acr *acr);
void (*get_wpr_info)(struct gk20a *g, struct wpr_carveout_info *inf);
u32* (*get_versioned_sig)(struct gk20a *g, struct nvgpu_acr *acr,
u32 *sig, u32 *sig_size);
};
#endif /* ACR_H */

View File

@@ -0,0 +1,172 @@
/*
* Copyright (c) 2015-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvgpu/gk20a.h>
#include <nvgpu/types.h>
#include <nvgpu/firmware.h>
#include <nvgpu/falcon.h>
#include <nvgpu/bug.h>
#include <nvgpu/pmu/fw.h>
#include "acr_wpr.h"
#include "acr_priv.h"
#include "acr_sw_gm20b.h"
#include "acr_blob_alloc.h"
#include "acr_bootstrap.h"
#include "acr_blob_construct_v0.h"
static int gm20b_bootstrap_hs_acr(struct gk20a *g, struct nvgpu_acr *acr)
{
int err = 0;
nvgpu_log_fn(g, " ");
err = nvgpu_acr_bootstrap_hs_ucode(g, g->acr, &g->acr->acr);
if (err != 0) {
nvgpu_err(g, "ACR bootstrap failed");
}
return err;
}
static int gm20b_acr_patch_wpr_info_to_ucode(struct gk20a *g,
struct nvgpu_acr *acr, struct hs_acr *acr_desc, bool is_recovery)
{
struct nvgpu_firmware *acr_fw = acr_desc->acr_fw;
struct acr_fw_header *acr_fw_hdr = NULL;
struct bin_hdr *acr_fw_bin_hdr = NULL;
struct flcn_acr_desc_v0 *acr_dmem_desc;
u32 *acr_ucode_header = NULL;
u32 *acr_ucode_data = NULL;
nvgpu_log_fn(g, " ");
if (is_recovery) {
acr_desc->acr_dmem_desc_v0->nonwpr_ucode_blob_size = 0U;
} else {
acr_fw_bin_hdr = (struct bin_hdr *)acr_fw->data;
acr_fw_hdr = (struct acr_fw_header *)
(acr_fw->data + acr_fw_bin_hdr->header_offset);
acr_ucode_data = (u32 *)(acr_fw->data +
acr_fw_bin_hdr->data_offset);
acr_ucode_header = (u32 *)(acr_fw->data +
acr_fw_hdr->hdr_offset);
/* Patch WPR info to ucode */
acr_dmem_desc = (struct flcn_acr_desc_v0 *)
&(((u8 *)acr_ucode_data)[acr_ucode_header[2U]]);
acr_desc->acr_dmem_desc_v0 = acr_dmem_desc;
acr_dmem_desc->nonwpr_ucode_blob_start =
nvgpu_mem_get_addr(g, &g->acr->ucode_blob);
nvgpu_assert(g->acr->ucode_blob.size <= U32_MAX);
acr_dmem_desc->nonwpr_ucode_blob_size =
(u32)g->acr->ucode_blob.size;
acr_dmem_desc->regions.no_regions = 1U;
acr_dmem_desc->wpr_offset = 0U;
}
return 0;
}
/* LSF static config functions */
static u32 gm20b_acr_lsf_pmu(struct gk20a *g,
struct acr_lsf_config *lsf)
{
/* PMU LS falcon info */
lsf->falcon_id = FALCON_ID_PMU;
lsf->falcon_dma_idx = GK20A_PMU_DMAIDX_UCODE;
lsf->is_lazy_bootstrap = false;
lsf->is_priv_load = false;
#ifdef CONFIG_NVGPU_LS_PMU
lsf->get_lsf_ucode_details = nvgpu_acr_lsf_pmu_ucode_details_v0;
lsf->get_cmd_line_args_offset = nvgpu_pmu_fw_get_cmd_line_args_offset;
#endif
return BIT32(lsf->falcon_id);
}
static u32 gm20b_acr_lsf_fecs(struct gk20a *g,
struct acr_lsf_config *lsf)
{
/* FECS LS falcon info */
lsf->falcon_id = FALCON_ID_FECS;
lsf->falcon_dma_idx = GK20A_PMU_DMAIDX_UCODE;
lsf->is_lazy_bootstrap = false;
lsf->is_priv_load = false;
lsf->get_lsf_ucode_details = nvgpu_acr_lsf_fecs_ucode_details_v0;
lsf->get_cmd_line_args_offset = NULL;
return BIT32(lsf->falcon_id);
}
static u32 gm20b_acr_lsf_conifg(struct gk20a *g,
struct nvgpu_acr *acr)
{
u32 lsf_enable_mask = 0;
lsf_enable_mask |= gm20b_acr_lsf_pmu(g, &acr->lsf[FALCON_ID_PMU]);
lsf_enable_mask |= gm20b_acr_lsf_fecs(g, &acr->lsf[FALCON_ID_FECS]);
return lsf_enable_mask;
}
static void gm20b_acr_default_sw_init(struct gk20a *g, struct hs_acr *hs_acr)
{
nvgpu_log_fn(g, " ");
/* ACR HS ucode type & f/w name*/
hs_acr->acr_type = ACR_DEFAULT;
if (!g->ops.pmu.is_debug_mode_enabled(g)) {
hs_acr->acr_fw_name = GM20B_HSBIN_ACR_PROD_UCODE;
} else {
hs_acr->acr_fw_name = GM20B_HSBIN_ACR_DBG_UCODE;
}
/* set on which falcon ACR need to execute*/
hs_acr->acr_flcn = g->pmu->flcn;
hs_acr->acr_engine_bus_err_status =
g->ops.pmu.bar0_error_status;
}
void nvgpu_gm20b_acr_sw_init(struct gk20a *g, struct nvgpu_acr *acr)
{
nvgpu_log_fn(g, " ");
acr->g = g;
acr->bootstrap_owner = FALCON_ID_PMU;
acr->lsf_enable_mask = gm20b_acr_lsf_conifg(g, acr);
gm20b_acr_default_sw_init(g, &acr->acr);
acr->prepare_ucode_blob = nvgpu_acr_prepare_ucode_blob_v0;
acr->get_wpr_info = nvgpu_acr_wpr_info_sys;
acr->alloc_blob_space = nvgpu_acr_alloc_blob_space_sys;
acr->bootstrap_hs_acr = gm20b_bootstrap_hs_acr;
acr->patch_wpr_info_to_ucode =
gm20b_acr_patch_wpr_info_to_ucode;
}

View File

@@ -0,0 +1,33 @@
/*
* GM20B ACR
*
* Copyright (c) 2015-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef ACR_SW_GM20B_H
#define ACR_SW_GM20B_H
struct gk20a;
struct nvgpu_acr;
void nvgpu_gm20b_acr_sw_init(struct gk20a *g, struct nvgpu_acr *acr);
#endif /*ACR_SW_GM20B_H*/

View File

@@ -0,0 +1,80 @@
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "acr_sw_gp10b.h"
#include <nvgpu/types.h>
#include <nvgpu/gk20a.h>
#include <nvgpu/pmu.h>
#include "acr_blob_construct_v0.h"
#include "acr_priv.h"
#include "acr_sw_gm20b.h"
#include "acr_sw_gp10b.h"
/* LSF static config functions */
static u32 gp10b_acr_lsf_gpccs(struct gk20a *g,
struct acr_lsf_config *lsf)
{
/* GPCCS LS falcon info */
lsf->falcon_id = FALCON_ID_GPCCS;
lsf->falcon_dma_idx = GK20A_PMU_DMAIDX_UCODE;
lsf->is_lazy_bootstrap = true;
lsf->is_priv_load = true;
lsf->get_lsf_ucode_details = nvgpu_acr_lsf_gpccs_ucode_details_v0;
lsf->get_cmd_line_args_offset = NULL;
return BIT32(lsf->falcon_id);
}
static void gp10b_acr_default_sw_init(struct gk20a *g, struct hs_acr *hs_acr)
{
nvgpu_log_fn(g, " ");
/* ACR HS ucode type & f/w name*/
hs_acr->acr_type = ACR_DEFAULT;
if (!g->ops.pmu.is_debug_mode_enabled(g)) {
hs_acr->acr_fw_name = HSBIN_ACR_PROD_UCODE;
} else {
hs_acr->acr_fw_name = HSBIN_ACR_DBG_UCODE;
}
/* set on which falcon ACR need to execute*/
hs_acr->acr_flcn = g->pmu->flcn;
hs_acr->acr_engine_bus_err_status =
g->ops.pmu.bar0_error_status;
}
void nvgpu_gp10b_acr_sw_init(struct gk20a *g, struct nvgpu_acr *acr)
{
nvgpu_log_fn(g, " ");
/* inherit the gm20b config data */
nvgpu_gm20b_acr_sw_init(g, acr);
gp10b_acr_default_sw_init(g, &acr->acr);
/* gp10b supports LSF gpccs bootstrap */
acr->lsf_enable_mask |= gp10b_acr_lsf_gpccs(g,
&acr->lsf[FALCON_ID_GPCCS]);
}

View File

@@ -0,0 +1,31 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef ACR_SW_GP10B_H
#define ACR_SW_GP10B_H
struct gk20a;
struct nvgpu_acr;
void nvgpu_gp10b_acr_sw_init(struct gk20a *g, struct nvgpu_acr *acr);
#endif /* ACR_SW_GP10B_H */

View File

@@ -0,0 +1,211 @@
/*
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvgpu/types.h>
#include <nvgpu/firmware.h>
#include <nvgpu/gk20a.h>
#include <nvgpu/bug.h>
#ifdef CONFIG_NVGPU_LS_PMU
#include <nvgpu/pmu/fw.h>
#endif
#include "acr_wpr.h"
#include "acr_priv.h"
#include "acr_blob_alloc.h"
#include "acr_blob_construct.h"
#include "acr_bootstrap.h"
#include "acr_sw_gv11b.h"
#define RECOVERY_UCODE_BLOB_SIZE (0U)
#define WPR_OFFSET (0U)
#define ACR_REGIONS (1U)
static int gv11b_bootstrap_hs_acr(struct gk20a *g, struct nvgpu_acr *acr)
{
int err = 0;
nvgpu_log_fn(g, " ");
err = nvgpu_acr_bootstrap_hs_ucode(g, g->acr, &g->acr->acr);
if (err != 0) {
nvgpu_err(g, "ACR bootstrap failed");
}
return err;
}
static int gv11b_acr_patch_wpr_info_to_ucode(struct gk20a *g,
struct nvgpu_acr *acr, struct hs_acr *acr_desc, bool is_recovery)
{
struct nvgpu_firmware *acr_fw = acr_desc->acr_fw;
struct acr_fw_header *acr_fw_hdr = NULL;
struct bin_hdr *acr_fw_bin_hdr = NULL;
struct flcn_acr_desc *acr_dmem_desc;
u32 *acr_ucode_header = NULL;
u32 *acr_ucode_data = NULL;
const u32 acr_desc_offset = 2U;
nvgpu_log_fn(g, " ");
#ifdef CONFIG_NVGPU_NON_FUSA
if (is_recovery) {
acr_desc->acr_dmem_desc->nonwpr_ucode_blob_size =
RECOVERY_UCODE_BLOB_SIZE;
} else
#endif
{
acr_fw_bin_hdr = (struct bin_hdr *)(void *)acr_fw->data;
acr_fw_hdr = (struct acr_fw_header *)(void *)
(acr_fw->data + acr_fw_bin_hdr->header_offset);
acr_ucode_data = (u32 *)(void *)(acr_fw->data +
acr_fw_bin_hdr->data_offset);
acr_ucode_header = (u32 *)(void *)(acr_fw->data +
acr_fw_hdr->hdr_offset);
/* Patch WPR info to ucode */
acr_dmem_desc = (struct flcn_acr_desc *)(void *)
&(((u8 *)acr_ucode_data)[acr_ucode_header[acr_desc_offset]]);
acr_desc->acr_dmem_desc = acr_dmem_desc;
acr_dmem_desc->nonwpr_ucode_blob_start =
nvgpu_mem_get_addr(g, &g->acr->ucode_blob);
nvgpu_assert(g->acr->ucode_blob.size <= U32_MAX);
acr_dmem_desc->nonwpr_ucode_blob_size =
(u32)g->acr->ucode_blob.size;
acr_dmem_desc->regions.no_regions = ACR_REGIONS;
acr_dmem_desc->wpr_offset = WPR_OFFSET;
}
return 0;
}
/* LSF static config functions */
#ifdef CONFIG_NVGPU_LS_PMU
static u32 gv11b_acr_lsf_pmu(struct gk20a *g,
struct acr_lsf_config *lsf)
{
if (!g->support_ls_pmu) {
/* skip adding LS PMU ucode to ACR blob */
return 0;
}
/* PMU LS falcon info */
lsf->falcon_id = FALCON_ID_PMU;
lsf->falcon_dma_idx = GK20A_PMU_DMAIDX_UCODE;
lsf->is_lazy_bootstrap = false;
lsf->is_priv_load = false;
lsf->get_lsf_ucode_details = nvgpu_acr_lsf_pmu_ucode_details;
lsf->get_cmd_line_args_offset = nvgpu_pmu_fw_get_cmd_line_args_offset;
return BIT32(lsf->falcon_id);
}
#endif
/* LSF init */
static u32 gv11b_acr_lsf_fecs(struct gk20a *g,
struct acr_lsf_config *lsf)
{
/* FECS LS falcon info */
lsf->falcon_id = FALCON_ID_FECS;
lsf->falcon_dma_idx = GK20A_PMU_DMAIDX_UCODE;
/*
* FECS LSF cold/recovery bootstrap is handled by ACR when LS PMU
* not present
*/
lsf->is_lazy_bootstrap = g->support_ls_pmu ? true : false;
lsf->is_priv_load = false;
lsf->get_lsf_ucode_details = nvgpu_acr_lsf_fecs_ucode_details;
lsf->get_cmd_line_args_offset = NULL;
return BIT32(lsf->falcon_id);
}
static u32 gv11b_acr_lsf_gpccs(struct gk20a *g,
struct acr_lsf_config *lsf)
{
/* GPCCS LS falcon info */
lsf->falcon_id = FALCON_ID_GPCCS;
lsf->falcon_dma_idx = GK20A_PMU_DMAIDX_UCODE;
/*
* GPCCS LSF cold/recovery bootstrap is handled by ACR when LS PMU
* not present
*/
lsf->is_lazy_bootstrap = g->support_ls_pmu ? true : false;
lsf->is_priv_load = true;
lsf->get_lsf_ucode_details = nvgpu_acr_lsf_gpccs_ucode_details;
lsf->get_cmd_line_args_offset = NULL;
return BIT32(lsf->falcon_id);
}
u32 gv11b_acr_lsf_config(struct gk20a *g,
struct nvgpu_acr *acr)
{
u32 lsf_enable_mask = 0;
#ifdef CONFIG_NVGPU_LS_PMU
lsf_enable_mask |= gv11b_acr_lsf_pmu(g, &acr->lsf[FALCON_ID_PMU]);
#endif
lsf_enable_mask |= gv11b_acr_lsf_fecs(g, &acr->lsf[FALCON_ID_FECS]);
lsf_enable_mask |= gv11b_acr_lsf_gpccs(g, &acr->lsf[FALCON_ID_GPCCS]);
return lsf_enable_mask;
}
static void gv11b_acr_default_sw_init(struct gk20a *g, struct hs_acr *acr_desc)
{
nvgpu_log_fn(g, " ");
acr_desc->acr_type = ACR_DEFAULT;
if (!g->ops.pmu.is_debug_mode_enabled(g)) {
acr_desc->acr_fw_name = HSBIN_ACR_PROD_UCODE;
} else {
acr_desc->acr_fw_name = HSBIN_ACR_DBG_UCODE;
}
acr_desc->acr_flcn = g->pmu->flcn;
acr_desc->report_acr_engine_bus_err_status =
nvgpu_pmu_report_bar0_pri_err_status;
acr_desc->acr_engine_bus_err_status =
g->ops.pmu.bar0_error_status;
acr_desc->acr_validate_mem_integrity = g->ops.pmu.validate_mem_integrity;
}
void nvgpu_gv11b_acr_sw_init(struct gk20a *g, struct nvgpu_acr *acr)
{
nvgpu_log_fn(g, " ");
acr->g = g;
acr->bootstrap_owner = FALCON_ID_PMU;
acr->lsf_enable_mask = gv11b_acr_lsf_config(g, acr);
gv11b_acr_default_sw_init(g, &acr->acr);
acr->prepare_ucode_blob = nvgpu_acr_prepare_ucode_blob;
acr->get_wpr_info = nvgpu_acr_wpr_info_sys;
acr->alloc_blob_space = nvgpu_acr_alloc_blob_space_sys;
acr->bootstrap_hs_acr = gv11b_bootstrap_hs_acr;
acr->patch_wpr_info_to_ucode = gv11b_acr_patch_wpr_info_to_ucode;
}

View File

@@ -0,0 +1,34 @@
/*
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef ACR_SW_GV11B_H
#define ACR_SW_GV11B_H
struct gk20a;
struct nvgpu_acr;
struct hs_acr;
void nvgpu_gv11b_acr_sw_init(struct gk20a *g, struct nvgpu_acr *acr);
u32 gv11b_acr_lsf_config(struct gk20a *g, struct nvgpu_acr *acr);
#endif /* ACR_SW_GV11B_H */

View File

@@ -0,0 +1,285 @@
/*
* Copyright (c) 2016-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "acr_sw_tu104.h"
#include <nvgpu/gk20a.h>
#include <nvgpu/firmware.h>
#include "acr_wpr.h"
#include "acr_priv.h"
#include "acr_blob_alloc.h"
#include "acr_bootstrap.h"
#include "acr_blob_construct.h"
#include "acr_sw_gv11b.h"
#include "acr_sw_tu104.h"
static int tu104_bootstrap_hs_acr(struct gk20a *g, struct nvgpu_acr *acr)
{
int err = 0;
nvgpu_log_fn(g, " ");
err = nvgpu_acr_bootstrap_hs_ucode(g, g->acr, &g->acr->acr_ahesasc);
if (err != 0) {
nvgpu_err(g, "ACR AHESASC bootstrap failed");
goto exit;
}
err = nvgpu_acr_bootstrap_hs_ucode(g, g->acr, &g->acr->acr_asb);
if (err != 0) {
nvgpu_err(g, "ACR ASB bootstrap failed");
goto exit;
}
exit:
return err;
}
/* WPR info update */
static int tu104_acr_patch_wpr_info_to_ucode(struct gk20a *g,
struct nvgpu_acr *acr, struct hs_acr *acr_desc,
bool is_recovery)
{
struct nvgpu_firmware *acr_fw = acr_desc->acr_fw;
struct acr_fw_header *acr_fw_hdr = NULL;
struct bin_hdr *acr_fw_bin_hdr = NULL;
struct flcn_acr_desc *acr_dmem_desc;
struct wpr_carveout_info wpr_inf;
u32 *acr_ucode_header = NULL;
u32 *acr_ucode_data = NULL;
u64 tmp_addr;
nvgpu_log_fn(g, " ");
acr_fw_bin_hdr = (struct bin_hdr *)acr_fw->data;
acr_fw_hdr = (struct acr_fw_header *)
(acr_fw->data + acr_fw_bin_hdr->header_offset);
acr_ucode_data = (u32 *)(acr_fw->data + acr_fw_bin_hdr->data_offset);
acr_ucode_header = (u32 *)(acr_fw->data + acr_fw_hdr->hdr_offset);
acr->get_wpr_info(g, &wpr_inf);
acr_dmem_desc = (struct flcn_acr_desc *)
&(((u8 *)acr_ucode_data)[acr_ucode_header[2U]]);
acr_dmem_desc->nonwpr_ucode_blob_start = wpr_inf.nonwpr_base;
nvgpu_assert(wpr_inf.size <= U32_MAX);
acr_dmem_desc->nonwpr_ucode_blob_size = (u32)wpr_inf.size;
acr_dmem_desc->regions.no_regions = 1U;
acr_dmem_desc->wpr_offset = 0U;
acr_dmem_desc->wpr_region_id = 1U;
acr_dmem_desc->regions.region_props[0U].region_id = 1U;
tmp_addr = (wpr_inf.wpr_base) >> 8U;
nvgpu_assert(u64_hi32(tmp_addr) == 0U);
acr_dmem_desc->regions.region_props[0U].start_addr = U32(tmp_addr);
tmp_addr = ((wpr_inf.wpr_base) + wpr_inf.size) >> 8U;
nvgpu_assert(u64_hi32(tmp_addr) == 0U);
acr_dmem_desc->regions.region_props[0U].end_addr = U32(tmp_addr);
tmp_addr = wpr_inf.nonwpr_base >> 8U;
nvgpu_assert(u64_hi32(tmp_addr) == 0U);
acr_dmem_desc->regions.region_props[0U].shadowmMem_startaddress =
U32(tmp_addr);
return 0;
}
/* LSF init */
static u32 tu104_acr_lsf_sec2(struct gk20a *g,
struct acr_lsf_config *lsf)
{
/* SEC2 LS falcon info */
lsf->falcon_id = FALCON_ID_SEC2;
lsf->falcon_dma_idx = NV_SEC2_DMAIDX_UCODE;
lsf->is_lazy_bootstrap = false;
lsf->is_priv_load = false;
lsf->get_lsf_ucode_details = nvgpu_acr_lsf_sec2_ucode_details;
lsf->get_cmd_line_args_offset = NULL;
return BIT32(lsf->falcon_id);
}
static u32 tu104_acr_lsf_pmu(struct gk20a *g,
struct acr_lsf_config *lsf)
{
/* PMU support not required until PSTATE support is enabled */
if (!g->support_ls_pmu) {
/* skip adding LS PMU ucode to ACR blob */
return 0;
}
/* PMU LS falcon info */
lsf->falcon_id = FALCON_ID_PMU;
lsf->falcon_dma_idx = GK20A_PMU_DMAIDX_UCODE;
lsf->is_lazy_bootstrap = false;
lsf->is_priv_load = false;
#ifdef CONFIG_NVGPU_LS_PMU
lsf->get_lsf_ucode_details = nvgpu_acr_lsf_pmu_ucode_details;
lsf->get_cmd_line_args_offset = nvgpu_pmu_fw_get_cmd_line_args_offset;
#endif
return BIT32(lsf->falcon_id);
}
static u32 tu104_acr_lsf_fecs(struct gk20a *g,
struct acr_lsf_config *lsf)
{
/* FECS LS falcon info */
lsf->falcon_id = FALCON_ID_FECS;
lsf->falcon_dma_idx = GK20A_PMU_DMAIDX_UCODE;
lsf->is_lazy_bootstrap = true;
lsf->is_priv_load = true;
lsf->get_lsf_ucode_details = nvgpu_acr_lsf_fecs_ucode_details;
lsf->get_cmd_line_args_offset = NULL;
return BIT32(lsf->falcon_id);
}
static u32 tu104_acr_lsf_gpccs(struct gk20a *g,
struct acr_lsf_config *lsf)
{
/* FECS LS falcon info */
lsf->falcon_id = FALCON_ID_GPCCS;
lsf->falcon_dma_idx = GK20A_PMU_DMAIDX_UCODE;
lsf->is_lazy_bootstrap = true;
lsf->is_priv_load = true;
lsf->get_lsf_ucode_details = nvgpu_acr_lsf_gpccs_ucode_details;
lsf->get_cmd_line_args_offset = NULL;
return BIT32(lsf->falcon_id);
}
static u32 tu104_acr_lsf_conifg(struct gk20a *g,
struct nvgpu_acr *acr)
{
u32 lsf_enable_mask = 0;
lsf_enable_mask |= tu104_acr_lsf_pmu(g, &acr->lsf[FALCON_ID_PMU]);
lsf_enable_mask |= tu104_acr_lsf_fecs(g, &acr->lsf[FALCON_ID_FECS]);
lsf_enable_mask |= tu104_acr_lsf_gpccs(g, &acr->lsf[FALCON_ID_GPCCS]);
lsf_enable_mask |= tu104_acr_lsf_sec2(g, &acr->lsf[FALCON_ID_SEC2]);
return lsf_enable_mask;
}
/* fusa signing enable check */
static bool tu104_acr_is_fusa_enabled(struct gk20a *g)
{
return g->is_fusa_sku;
}
/* ACR-AHESASC(ACR hub encryption setter and signature checker) init*/
static void tu104_acr_ahesasc_v0_ucode_select(struct gk20a *g,
struct hs_acr *acr_ahesasc)
{
acr_ahesasc->acr_type = ACR_AHESASC_NON_FUSA;
if (!g->ops.pmu.is_debug_mode_enabled(g)) {
acr_ahesasc->acr_fw_name = HSBIN_ACR_AHESASC_NON_FUSA_PROD_UCODE;
} else {
acr_ahesasc->acr_fw_name = HSBIN_ACR_AHESASC_NON_FUSA_DBG_UCODE;
}
}
static void tu104_acr_ahesasc_fusa_ucode_select(struct gk20a *g,
struct hs_acr *acr_ahesasc)
{
acr_ahesasc->acr_type = ACR_AHESASC_FUSA;
if (!g->ops.pmu.is_debug_mode_enabled(g)) {
acr_ahesasc->acr_fw_name = HSBIN_ACR_AHESASC_FUSA_PROD_UCODE;
} else {
acr_ahesasc->acr_fw_name = HSBIN_ACR_AHESASC_FUSA_DBG_UCODE;
}
}
static void tu104_acr_ahesasc_sw_init(struct gk20a *g,
struct hs_acr *acr_ahesasc)
{
if (tu104_acr_is_fusa_enabled(g)) {
tu104_acr_ahesasc_fusa_ucode_select(g, acr_ahesasc);
} else {
tu104_acr_ahesasc_v0_ucode_select(g, acr_ahesasc);
}
acr_ahesasc->acr_flcn = &g->sec2.flcn;
}
/* ACR-ASB(ACR SEC2 booter) init*/
static void tu104_acr_asb_v0_ucode_select(struct gk20a *g,
struct hs_acr *acr_asb)
{
acr_asb->acr_type = ACR_ASB_NON_FUSA;
if (!g->ops.pmu.is_debug_mode_enabled(g)) {
acr_asb->acr_fw_name = HSBIN_ACR_ASB_NON_FUSA_PROD_UCODE;
} else {
acr_asb->acr_fw_name = HSBIN_ACR_ASB_NON_FUSA_DBG_UCODE;
}
}
static void tu104_acr_asb_fusa_ucode_select(struct gk20a *g,
struct hs_acr *acr_asb)
{
acr_asb->acr_type = ACR_ASB_FUSA;
if (!g->ops.pmu.is_debug_mode_enabled(g)) {
acr_asb->acr_fw_name = HSBIN_ACR_ASB_FUSA_PROD_UCODE;
} else {
acr_asb->acr_fw_name = HSBIN_ACR_ASB_FUSA_DBG_UCODE;
}
}
static void tu104_acr_asb_sw_init(struct gk20a *g,
struct hs_acr *acr_asb)
{
if (tu104_acr_is_fusa_enabled(g)) {
tu104_acr_asb_fusa_ucode_select(g, acr_asb);
} else {
tu104_acr_asb_v0_ucode_select(g, acr_asb);
}
acr_asb->acr_flcn = &g->gsp_flcn;
}
void nvgpu_tu104_acr_sw_init(struct gk20a *g, struct nvgpu_acr *acr)
{
nvgpu_log_fn(g, " ");
acr->lsf_enable_mask = tu104_acr_lsf_conifg(g, acr);
acr->prepare_ucode_blob = nvgpu_acr_prepare_ucode_blob;
acr->get_wpr_info = nvgpu_acr_wpr_info_vid;
acr->alloc_blob_space = nvgpu_acr_alloc_blob_space_vid;
acr->bootstrap_owner = FALCON_ID_GSPLITE;
acr->bootstrap_hs_acr = tu104_bootstrap_hs_acr;
acr->patch_wpr_info_to_ucode = tu104_acr_patch_wpr_info_to_ucode;
/* Init ACR-AHESASC */
tu104_acr_ahesasc_sw_init(g, &acr->acr_ahesasc);
/* Init ACR-ASB*/
tu104_acr_asb_sw_init(g, &acr->acr_asb);
}

View File

@@ -0,0 +1,31 @@
/*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef ACR_SW_TU104_H
#define ACR_SW_TU104_H
struct gk20a;
struct nvgpu_acr;
void nvgpu_tu104_acr_sw_init(struct gk20a *g, struct nvgpu_acr *acr);
#endif /*ACR_SW_TU104_H*/

View File

@@ -0,0 +1,43 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvgpu/gk20a.h>
#include <nvgpu/types.h>
#include <nvgpu/dma.h>
#include "acr_wpr.h"
/* Both size and address of WPR need to be 128K-aligned */
#define DGPU_WPR_SIZE 0x200000U
void nvgpu_acr_wpr_info_sys(struct gk20a *g, struct wpr_carveout_info *inf)
{
g->ops.fb.read_wpr_info(g, &inf->wpr_base, &inf->size);
}
#ifdef CONFIG_NVGPU_DGPU
void nvgpu_acr_wpr_info_vid(struct gk20a *g, struct wpr_carveout_info *inf)
{
inf->wpr_base = g->mm.vidmem.bootstrap_base;
inf->nonwpr_base = inf->wpr_base + DGPU_WPR_SIZE;
inf->size = DGPU_WPR_SIZE;
}
#endif

View File

@@ -0,0 +1,40 @@
/*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef ACR_WPR_H
#define ACR_WPR_H
struct gk20a;
struct wpr_carveout_info;
struct wpr_carveout_info {
u64 wpr_base;
u64 nonwpr_base;
u64 size;
};
void nvgpu_acr_wpr_info_sys(struct gk20a *g, struct wpr_carveout_info *inf);
#ifdef CONFIG_NVGPU_DGPU
void nvgpu_acr_wpr_info_vid(struct gk20a *g, struct wpr_carveout_info *inf);
#endif
#endif /* NVGPU_ACR_WPR_H */

View File

@@ -0,0 +1,609 @@
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_ACR_INTERFACE_H
#define NVGPU_ACR_INTERFACE_H
/**
* @defgroup NVGPURM_BLOB_CONSTRUCT blob construct
*
* Blob construct interfaces:
* NVGPU creates LS ucode blob in system/FB's non-WPR memory. LS ucodes
* will be read from filesystem and added to blob for the detected chip.
* Below are the structs that need to be filled by NvGPU for each LS Falcon
* ucode supported for the detected chip. After filling structures successfully,
* NvGPU should copy below structs along with ucode to the non-WPR blob
* in below mentioned pattern. LS ucodes blob is required by the ACR HS
* ucode to authenticate & load LS ucode on to respective engine's LS Falcon.
*
* + WPR header struct #lsf_wpr_header.
* + LSB header struct #lsf_lsb_header.
* + Boot loader struct #flcn_bl_dmem_desc.
* + ucode image.
*
* + BLOB Pattern:
* ---------------------------------------------
* | LSF WPR HDR | LSF LSB HDR | BL desc | ucode |
* ---------------------------------------------
*/
/**
* @ingroup NVGPURM_BLOB_CONSTRUCT
*/
/** @{*/
/**
* Light Secure WPR Content Alignments
*/
/** WPR header should be aligned to 256 bytes */
#define LSF_WPR_HEADER_ALIGNMENT (256U)
/** SUB WPR header should be aligned to 256 bytes */
#define LSF_SUB_WPR_HEADER_ALIGNMENT (256U)
/** LSB header should be aligned to 256 bytes */
#define LSF_LSB_HEADER_ALIGNMENT (256U)
/** BL DATA should be aligned to 256 bytes */
#define LSF_BL_DATA_ALIGNMENT (256U)
/** BL DATA size should be aligned to 256 bytes */
#define LSF_BL_DATA_SIZE_ALIGNMENT (256U)
/** BL CODE size should be aligned to 256 bytes */
#define LSF_BL_CODE_SIZE_ALIGNMENT (256U)
/** LSF DATA size should be aligned to 256 bytes */
#define LSF_DATA_SIZE_ALIGNMENT (256U)
/** LSF CODE size should be aligned to 256 bytes */
#define LSF_CODE_SIZE_ALIGNMENT (256U)
/** UCODE surface should be aligned to 4k PAGE_SIZE */
#define LSF_UCODE_DATA_ALIGNMENT 4096U
/**
* Maximum WPR Header size
*/
#define LSF_WPR_HEADERS_TOTAL_SIZE_MAX \
(ALIGN_UP(((u32)sizeof(struct lsf_wpr_header) * FALCON_ID_END), \
LSF_WPR_HEADER_ALIGNMENT))
#define LSF_LSB_HEADER_TOTAL_SIZE_MAX (\
ALIGN_UP(sizeof(struct lsf_lsb_header), LSF_LSB_HEADER_ALIGNMENT))
/** @} */
#ifdef CONFIG_NVGPU_DGPU
/* Maximum SUB WPR header size */
#define LSF_SUB_WPR_HEADERS_TOTAL_SIZE_MAX (ALIGN_UP( \
(sizeof(struct lsf_shared_sub_wpr_header) * \
LSF_SHARED_DATA_SUB_WPR_USE_CASE_ID_MAX), \
LSF_SUB_WPR_HEADER_ALIGNMENT))
/* MMU excepts sub_wpr sizes in units of 4K */
#define SUB_WPR_SIZE_ALIGNMENT (4096U)
/* Defined for 1MB alignment */
#define SHIFT_4KB (12U)
/* shared sub_wpr use case IDs */
enum {
LSF_SHARED_DATA_SUB_WPR_USE_CASE_ID_FRTS_VBIOS_TABLES = 1,
LSF_SHARED_DATA_SUB_WPR_USE_CASE_ID_PLAYREADY_SHARED_DATA = 2
};
#define LSF_SHARED_DATA_SUB_WPR_USE_CASE_ID_MAX \
LSF_SHARED_DATA_SUB_WPR_USE_CASE_ID_PLAYREADY_SHARED_DATA
#define LSF_SHARED_DATA_SUB_WPR_USE_CASE_ID_INVALID (0xFFFFFFFFU)
#define MAX_SUPPORTED_SHARED_SUB_WPR_USE_CASES \
LSF_SHARED_DATA_SUB_WPR_USE_CASE_ID_MAX
/* Static sizes of shared subWPRs */
/* Minimum granularity supported is 4K */
/* 1MB in 4K */
#define LSF_SHARED_DATA_SUB_WPR_FRTS_VBIOS_TABLES_SIZE_IN_4K (0x100U)
/* 4K */
#define LSF_SHARED_DATA_SUB_WPR_PLAYREADY_SHARED_DATA_SIZE_IN_4K (0x1U)
#endif
/**
* @ingroup NVGPURM_BLOB_CONSTRUCT
*/
/** @{*/
/**
* Image status updated by ACR HS ucode to know the LS
* Falcon ucode status.
*/
/** IMAGE copied from NON-WPR to WPR BLOB*/
#define LSF_IMAGE_STATUS_COPY (1U)
/** LS Falcon ucode verification failed*/
#define LSF_IMAGE_STATUS_VALIDATION_CODE_FAILED (2U)
/** LS Falcon data verification failed*/
#define LSF_IMAGE_STATUS_VALIDATION_DATA_FAILED (3U)
/** Both ucode and data validation passed */
#define LSF_IMAGE_STATUS_VALIDATION_DONE (4U)
/**
* LS Falcons such as FECS and GPCCS does not have signatures for binaries in
* debug environment(fmodel).
*/
#define LSF_IMAGE_STATUS_VALIDATION_SKIPPED (5U)
/** LS Falcon validation passed & ready for bootstrap */
#define LSF_IMAGE_STATUS_BOOTSTRAP_READY (6U)
/**
* Light Secure WPR Header
* Defines state allowing Light Secure Falcon bootstrapping.
*/
struct lsf_wpr_header {
/**
* LS Falcon ID
* FALCON_ID_FECS - 2
* FALCON_ID_GPCCS - 3
*/
u32 falcon_id;
/**
* LS Falcon LSB header offset from non-WPR base, below equation used
* to get LSB header offset for each managed LS falcon.
* Offset = Non-WPR base + #LSF_LSB_HEADER_ALIGNMENT +
* ((#LSF_UCODE_DATA_ALIGNMENT + #LSF_BL_DATA_ALIGNMENT) *
* LS Falcon index)
*
*/
u32 lsb_offset;
/**
* LS Falcon bootstrap owner, which performs bootstrapping of
* supported LS Falcon from ACR HS ucode. Below are the bootstrapping
* supporting Falcon owners.
* + Falcon #FALCON_ID_PMU
*
* On GV11B, bootstrap_owner set to #FALCON_ID_PMU as ACR HS ucode
* runs on PMU Engine Falcon.
*
*/
u32 bootstrap_owner;
/**
* Skip bootstrapping by ACR HS ucode,
* 1 - skip LS Falcon bootstrapping by ACR HS ucode.
* 0 - LS Falcon bootstrapping is done by ACR HS ucode.
*
* On GV11B, always set 0.
*/
u32 lazy_bootstrap;
/** LS ucode bin version*/
u32 bin_version;
/**
* Bootstrapping status updated by ACR HS ucode to know the LS
* Falcon ucode status.
*/
u32 status;
};
/** @} */
/**
* @ingroup NVGPURM_BLOB_CONSTRUCT
*/
/** @{*/
/**
* Size in entries of the ucode descriptor's dependency map.
*/
#define LSF_FALCON_DEPMAP_SIZE (11U)
/**
* Code/data signature details of LS falcon
*/
struct lsf_ucode_desc {
/** ucode's production signature */
u8 prd_keys[2][16];
/** ucode's debug signature */
u8 dbg_keys[2][16];
/**
* production signature present status,
* 1 - production signature present
* 0 - production signature not present
*/
u32 b_prd_present;
/**
* debug signature present
* 1 - debug signature present
* 0 - debug signature not present
*/
u32 b_dbg_present;
/**
* LS Falcon ID
* FALCON_ID_FECS - 2
* FALCON_ID_GPCCS - 3
*/
u32 falcon_id;
/**
* include version in signature calculation if supported
* 1 - supported
* 0 - not supported
*/
u32 bsupports_versioning;
/** version to include it in signature calculation if supported */
u32 version;
/** valid dependency map data to consider from dep_map array member */
u32 dep_map_count;
/**
* packed dependency map used to compute the DM hashes on the code and
* data.
*/
u8 dep_map[LSF_FALCON_DEPMAP_SIZE * 2 * 4];
/** Message used to derive key */
u8 kdf[16];
};
/** @} */
/**
* @ingroup NVGPURM_BLOB_CONSTRUCT
*/
/** @{*/
/**
* Light Secure Bootstrap Header
* Defines state allowing Light Secure Falcon bootstrapping.
*/
/** Load BL at 0th IMEM offset */
#define NV_FLCN_ACR_LSF_FLAG_LOAD_CODE_AT_0_FALSE 0U
#define NV_FLCN_ACR_LSF_FLAG_LOAD_CODE_AT_0_TRUE BIT32(0)
/** This falcon requires a ctx before issuing DMAs. */
#define NV_FLCN_ACR_LSF_FLAG_DMACTL_REQ_CTX_FALSE 0U
#define NV_FLCN_ACR_LSF_FLAG_DMACTL_REQ_CTX_TRUE BIT32(2)
/** Use priv loading method instead of bootloader/DMAs */
#define NV_FLCN_ACR_LSF_FLAG_FORCE_PRIV_LOAD_TRUE BIT32(3)
#define NV_FLCN_ACR_LSF_FLAG_FORCE_PRIV_LOAD_FALSE (0U)
struct lsf_lsb_header {
/** Code/data signature details of each LS falcon */
struct lsf_ucode_desc signature;
/**
* Offset from non-WPR base where UCODE is located,
* Offset = Non-WPR base + #LSF_LSB_HEADER_ALIGNMENT +
* #LSF_UCODE_DATA_ALIGNMENT + ( #LSF_BL_DATA_ALIGNMENT *
* LS Falcon index)
*/
u32 ucode_off;
/**
* Size of LS Falcon ucode, required to perform signature verification
* of LS Falcon ucode by ACR HS.
*/
u32 ucode_size;
/**
* Size of LS Falcon ucode data, required to perform signature
* verification of LS Falcon ucode data by ACR HS.
*/
u32 data_size;
/**
* Size of bootloader that needs to be loaded by bootstrap owner.
*
* On GV11B, respective LS Falcon BL code size should not exceed
* below mentioned size.
* FALCON_ID_FECS IMEM size - 32k
* FALCON_ID_GPCCS IMEM size - 16k
*/
u32 bl_code_size;
/** BL starting virtual address. Need for tagging */
u32 bl_imem_off;
/**
* Offset from non-WPR base holding the BL data
* Offset = (Non-WPR base + #LSF_LSB_HEADER_ALIGNMENT +
* #LSF_UCODE_DATA_ALIGNMENT + #LSF_BL_DATA_ALIGNMENT) *
* #LS Falcon index
*/
u32 bl_data_off;
/**
* Size of BL data, BL data will be copied to LS Falcon DMEM of
* bl data size
*
* On GV11B, respective LS Falcon BL data size should not exceed
* below mentioned size.
* FALCON_ID_FECS DMEM size - 8k
* FALCON_ID_GPCCS DMEM size - 5k
*/
u32 bl_data_size;
/**
* Offset from non-WPR base address where UCODE Application code is
* located.
*/
u32 app_code_off;
/**
* Size of UCODE Application code.
*
* On GV11B, FECS/GPCCS LS Falcon app code size should not exceed
* below mentioned size.
* FALCON_ID_FECS IMEM size - 32k
* FALCON_ID_GPCCS IMEM size - 16k
*/
u32 app_code_size;
/**
* Offset from non-WPR base address where UCODE Application data
* is located
*/
u32 app_data_off;
/**
* Size of UCODE Application data.
*
* On GV11B, respective LS Falcon app data size should not exceed
* below mentioned size.
* FALCON_ID_FECS DMEM size - 8k
* FALCON_ID_GPCCS DMEM size - 5k
*/
u32 app_data_size;
/**
* NV_FLCN_ACR_LSF_FLAG_LOAD_CODE_AT_0 - Load BL at 0th IMEM offset
* NV_FLCN_ACR_LSF_FLAG_DMACTL_REQ_CTX - This falcon requires a ctx
* before issuing DMAs.
* NV_FLCN_ACR_LSF_FLAG_FORCE_PRIV_LOAD - Use priv loading method
* instead of bootloader/DMAs
*/
u32 flags;
};
#define FLCN_SIG_SIZE (4U)
/** @} */
/**
* @ingroup NVGPURM_BLOB_CONSTRUCT
*/
/** @{*/
/**
* Structure used by the boot-loader to load the rest of the LS Falcon code.
*
* This has to be filled by the GPU driver and copied into WPR region offset
* holding the BL data.
*/
struct flcn_bl_dmem_desc {
/** Should be always first element */
u32 reserved[FLCN_SIG_SIZE];
/**
* Signature should follow reserved 16B signature for secure code.
* 0s if no secure code
*/
u32 signature[FLCN_SIG_SIZE];
/**
* Type of memory-aperture DMA index used by the bootloader
* while loading code/data.
*/
u32 ctx_dma;
/**
* 256B aligned physical sysmem(iGPU)/FB(dGPU) address where code
* is located.
*/
struct falc_u64 code_dma_base;
/**
* Offset from code_dma_base where the nonSecure code is located.
* The offset must be multiple of 256 to help performance.
*/
u32 non_sec_code_off;
/**
* The size of the non-secure code part.
*
* On GV11B, FECS/GPCCS LS Falcon non-secure + secure code size
* should not exceed below mentioned size.
* FALCON_ID_FECS IMEM size - 32k
* FALCON_ID_GPCCS IMEM size - 16k
*/
u32 non_sec_code_size;
/**
* Offset from code_dma_base where the secure code is located.
* The offset must be multiple of 256 to help performance.
*/
u32 sec_code_off;
/**
* The size of the secure code part.
*
* On GV11B, FECS/GPCCS LS Falcon non-secure + secure code size
* should not exceed below mentioned size.
* FALCON_ID_FECS IMEM size - 32k
* FALCON_ID_GPCCS IMEM size - 16k
*/
u32 sec_code_size;
/**
* Code entry point which will be invoked by BL after code is
* loaded.
*/
u32 code_entry_point;
/**
* 256B aligned Physical sysmem(iGPU)/FB(dGPU) Address where data
* is located.
*/
struct falc_u64 data_dma_base;
/**
* Size of data block. Should be multiple of 256B.
*
* On GV11B, respective LS Falcon data size should not exceed
* below mentioned size.
* FALCON_ID_FECS DMEM size - 8k
* FALCON_ID_GPCCS DMEM size - 5k
*/
u32 data_size;
/** Arguments to be passed to the target firmware being loaded. */
u32 argc;
/**
* Number of arguments to be passed to the target firmware
* being loaded.
*/
u32 argv;
};
/** @} */
/**
* @defgroup NVGPURM_ACR_HS_LOAD_BOOTSTRAP ACR HS ucode load & bootstrap
*
* ACR HS ucode load & bootstrap interfaces:
* ACR HS ucode is read from the filesystem based on the chip-id by the ACR
* unit. Read ACR HS ucode will be update with below structs by patching at
* offset present in struct #struct acr_fw_header member hdr_offset. Read
* ACR HS ucode is loaded onto PMU/SEC2/GSP engines Falcon to bootstrap
* ACR HS ucode. ACR HS ucode does self-authentication using H/W based
* HS authentication methodology. Once authenticated the ACR HS ucode
* starts executing on the falcon.
*/
/**
* @ingroup NVGPURM_ACR_HS_LOAD_BOOTSTRAP
*/
/** @{*/
/**
* Supporting maximum of 2 regions.
* This is needed to pre-allocate space in DMEM
*/
#define NVGPU_FLCN_ACR_MAX_REGIONS (2U)
/** Reserve 512 bytes for bootstrap owner LS ucode data */
#define LSF_BOOTSTRAP_OWNER_RESERVED_DMEM_SIZE (0x200U)
/**
* The descriptor used by ACR HS ucode to figure out properties of individual
* WPR regions.
*
* On GV11B, this struct members are set to 0x0 by default, reason
* to fetch WPR1 details from H/W.
*/
struct flcn_acr_region_prop {
/** Starting address of WPR region */
u32 start_addr;
/** Ending address of WPR region */
u32 end_addr;
/** The ID of the WPR region. 0 for WPR1 and 1 for WPR2 */
u32 region_id;
/** Read mask associated with this region */
u32 read_mask;
/** Write mask associated with this region */
u32 write_mask;
/** Bit map of all clients currently using this region */
u32 client_mask;
/**
* sysmem(iGPU)/FB(dGPU) location from where contents need to
* be copied to startAddress
*/
u32 shadowmMem_startaddress;
};
/**
* The descriptor used by ACR HS ucode to figure out supporting regions &
* its properties.
*/
struct flcn_acr_regions {
/**
* Number of regions used by NVGPU from the total number of ACR
* regions supported in chip.
*
* On GV11B, 1 ACR region supported and should always be greater
* than 0.
*/
u32 no_regions;
/** Region properties */
struct flcn_acr_region_prop region_props[NVGPU_FLCN_ACR_MAX_REGIONS];
};
#define DMEM_WORD_SIZE 4U
#define DUMMY_SPACE_SIZE 4U
/**
* The descriptor used by ACR HS ucode to figure out the
* WPR & non-WPR blob details.
*/
struct flcn_acr_desc {
/*
* The bootstrap owner needs to switch into LS mode when bootstrapping
* other LS Falcons is completed. It needs to have its own actual
* DMEM image copied into DMEM as part of LS setup. If ACR desc is
* at location 0, it will definitely get overwritten causing data
* corruption. Hence need to reserve 0x200 bytes to give room for
* any loading data.
* NOTE: This has to be the first member always.
*/
union {
u32 reserved_dmem[(LSF_BOOTSTRAP_OWNER_RESERVED_DMEM_SIZE/DMEM_WORD_SIZE)];
} ucode_reserved_space;
/** Signature of ACR ucode. */
u32 signatures[FLCN_SIG_SIZE];
/**
* WPR Region ID holding the WPR header and its details
*
* on GV11B, wpr_region_id set to 0x0 by default to indicate
* to ACR HS ucode to fetch WPR region details from H/W &
* updating WPR start_addr, end_addr, read_mask & write_mask
* of struct #flcn_acr_region_prop.
*/
u32 wpr_region_id;
/** Offset from the non-WPR base holding the wpr header */
u32 wpr_offset;
/** usable memory ranges, on GV11B it is not set */
u32 mmu_mem_range;
/**
* WPR Region descriptors to provide info about WPR.
* on GV11B, no_regions set to 1 & region properties value to 0x0
* to indicate to ACR HS ucode to fetch WPR region details from H/W.
*/
struct flcn_acr_regions regions;
/**
* stores the size of the ucode blob.
*
* On GV11B, size is calculated at runtime & aligned to 256 bytes.
* Size varies based on number of LS falcon supports.
*/
u32 nonwpr_ucode_blob_size;
/**
* stores sysmem(iGPU)/FB's(dGPU) non-WPR start address where
* kernel stores ucode blob
*/
u64 nonwpr_ucode_blob_start;
/** dummy space, not used by iGPU */
u32 dummy[DUMMY_SPACE_SIZE];
};
struct flcn2_acr_desc {
/**
* WPR Region ID holding the WPR header and its details
*
* on GPUID_NEXT, wpr_region_id set to 0x0 by default to indicate
* to ACR HS ucode to fetch WPR region details from H/W &
* updating WPR start_addr, end_addr, read_mask & write_mask
* of struct #flcn_acr_region_prop.
*/
u32 wpr_region_id;
/** Offset from the non-WPR base holding the wpr header */
u32 wpr_offset;
/**
* WPR Region descriptors to provide info about WPR.
* on GPUID_NEXT, no_regions set to 1 & region properties value to 0x0
* to indicate to ACR HS ucode to fetch WPR region details from H/W.
*/
struct flcn_acr_regions regions;
/**
* stores the size of the ucode blob.
*
* On GPUID_NEXT, size is calculated at runtime & aligned to 256 bytes.
* Size varies based on number of LS falcon supports.
*/
u32 nonwpr_ucode_blob_size;
/**
* stores sysmem(iGPU)/FB's(dGPU) non-WPR start address where
* kernel stores ucode blob
*/
u64 nonwpr_ucode_blob_start;
u64 ls_pmu_desc;
};
/** @} */
#endif /* NVGPU_ACR_INTERFACE_H */