mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 10:34:43 +03:00
gpu: nvgpu: ACR refactor to create ACR unit
Move ACR code to separate folder under common/acr to make ACR separate unit. with this, separating ACR blob construct, bootstrap & ACR chip specific configuration code to different files. ACR blob construction code split into two version, as gm20b & gp10b still uses older ACR interfaces & not yet moved to Tegra ACR, blob_construct_v0 file can be deleted once gm20b/gp10b uses Tegra ACR ucode & point to blob_construct_v1 with simple change. As ACR ucode can execute on different engine falcon & should not be dependent on specific engine falcon, used generic falcon functions/interface to support ACR & doesn't access any engine h/w registers directly, and files with chip name has configuration needed for ACR HS ucode & LS falcons. JIRA NVGPU-1148 Change-Id: Ieedbe82f3e1a4303f055fbc795d9ce0f1866d259 Signed-off-by: Mahantesh Kumbar <mkumbar@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2017046 GVS: Gerrit_Virtual_Submit Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
0d05c6e159
commit
0aa55f6741
337
drivers/gpu/nvgpu/common/acr/acr.c
Normal file
337
drivers/gpu/nvgpu/common/acr/acr.c
Normal file
@@ -0,0 +1,337 @@
|
||||
/*
|
||||
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <nvgpu/types.h>
|
||||
#include <nvgpu/dma.h>
|
||||
#include <nvgpu/timers.h>
|
||||
#include <nvgpu/nvgpu_mem.h>
|
||||
#include <nvgpu/acr/nvgpu_acr.h>
|
||||
#include <nvgpu/firmware.h>
|
||||
#include <nvgpu/pmu.h>
|
||||
#include <nvgpu/falcon.h>
|
||||
#include <nvgpu/gk20a.h>
|
||||
#include <nvgpu/bug.h>
|
||||
|
||||
/* Both size and address of WPR need to be 128K-aligned */
|
||||
#define DGPU_WPR_SIZE 0x200000U
|
||||
|
||||
static int acr_wait_for_completion(struct gk20a *g,
|
||||
struct nvgpu_falcon *flcn, unsigned int timeout)
|
||||
{
|
||||
u32 flcn_id = nvgpu_falcon_get_id(flcn);
|
||||
u32 sctl, cpuctl;
|
||||
int completion = 0;
|
||||
u32 data = 0;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
completion = nvgpu_falcon_wait_for_halt(flcn, timeout);
|
||||
if (completion != 0) {
|
||||
nvgpu_err(g, "flcn-%d: ACR boot timed out", flcn_id);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
nvgpu_acr_dbg(g, "flcn-%d: ACR capabilities %x", flcn_id,
|
||||
nvgpu_falcon_mailbox_read(flcn, FALCON_MAILBOX_1));
|
||||
|
||||
data = nvgpu_falcon_mailbox_read(flcn, FALCON_MAILBOX_0);
|
||||
if (data != 0U) {
|
||||
nvgpu_err(g, "flcn-%d: ACR boot failed, err %x", flcn_id,
|
||||
data);
|
||||
completion = -EAGAIN;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
nvgpu_falcon_get_ctls(flcn, &sctl, &cpuctl);
|
||||
|
||||
nvgpu_acr_dbg(g, "flcn-%d: sctl reg %x cpuctl reg %x",
|
||||
flcn_id, sctl, cpuctl);
|
||||
|
||||
exit:
|
||||
return completion;
|
||||
}
|
||||
|
||||
static int acr_hs_bl_exec(struct gk20a *g, struct nvgpu_acr *acr,
|
||||
struct hs_acr *acr_desc, bool b_wait_for_halt)
|
||||
{
|
||||
struct nvgpu_firmware *hs_bl_fw = acr_desc->acr_hs_bl.hs_bl_fw;
|
||||
struct hsflcn_bl_desc *hs_bl_desc;
|
||||
struct nvgpu_falcon_bl_info bl_info;
|
||||
struct hs_flcn_bl *hs_bl = &acr_desc->acr_hs_bl;
|
||||
struct mm_gk20a *mm = &g->mm;
|
||||
struct vm_gk20a *vm = mm->pmu.vm;
|
||||
u32 flcn_id = nvgpu_falcon_get_id(acr_desc->acr_flcn);
|
||||
u32 *hs_bl_code = NULL;
|
||||
int err = 0;
|
||||
u32 bl_sz;
|
||||
|
||||
nvgpu_acr_dbg(g, "Executing ACR HS Bootloader %s on Falcon-ID - %d",
|
||||
hs_bl->bl_fw_name, flcn_id);
|
||||
|
||||
if (hs_bl_fw == NULL) {
|
||||
hs_bl_fw = nvgpu_request_firmware(g, hs_bl->bl_fw_name, 0);
|
||||
if (hs_bl_fw == NULL) {
|
||||
nvgpu_err(g, "ACR HS BL ucode load fail");
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
hs_bl->hs_bl_fw = hs_bl_fw;
|
||||
hs_bl->hs_bl_bin_hdr = (struct bin_hdr *)hs_bl_fw->data;
|
||||
hs_bl->hs_bl_desc = (struct hsflcn_bl_desc *)(hs_bl_fw->data +
|
||||
hs_bl->hs_bl_bin_hdr->header_offset);
|
||||
|
||||
hs_bl_desc = hs_bl->hs_bl_desc;
|
||||
hs_bl_code = (u32 *)(hs_bl_fw->data +
|
||||
hs_bl->hs_bl_bin_hdr->data_offset);
|
||||
|
||||
bl_sz = ALIGN(hs_bl_desc->bl_img_hdr.bl_code_size, 256U);
|
||||
|
||||
hs_bl->hs_bl_ucode.size = bl_sz;
|
||||
|
||||
err = nvgpu_dma_alloc_sys(g, bl_sz, &hs_bl->hs_bl_ucode);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "ACR HS BL failed to allocate memory");
|
||||
goto err_done;
|
||||
}
|
||||
|
||||
hs_bl->hs_bl_ucode.gpu_va = nvgpu_gmmu_map(vm,
|
||||
&hs_bl->hs_bl_ucode,
|
||||
bl_sz,
|
||||
0U, /* flags */
|
||||
gk20a_mem_flag_read_only, false,
|
||||
hs_bl->hs_bl_ucode.aperture);
|
||||
if (hs_bl->hs_bl_ucode.gpu_va == 0U) {
|
||||
nvgpu_err(g, "ACR HS BL failed to map ucode memory!!");
|
||||
goto err_free_ucode;
|
||||
}
|
||||
|
||||
nvgpu_mem_wr_n(g, &hs_bl->hs_bl_ucode, 0U, hs_bl_code, bl_sz);
|
||||
|
||||
nvgpu_acr_dbg(g, "Copied BL ucode to bl_cpuva");
|
||||
}
|
||||
|
||||
/* Fill HS BL info */
|
||||
bl_info.bl_src = hs_bl->hs_bl_ucode.cpu_va;
|
||||
bl_info.bl_desc = acr_desc->ptr_bl_dmem_desc;
|
||||
nvgpu_assert(acr_desc->bl_dmem_desc_size <= U32_MAX);
|
||||
bl_info.bl_desc_size = (u32)acr_desc->bl_dmem_desc_size;
|
||||
nvgpu_assert(hs_bl->hs_bl_ucode.size <= U32_MAX);
|
||||
bl_info.bl_size = (u32)hs_bl->hs_bl_ucode.size;
|
||||
bl_info.bl_start_tag = hs_bl->hs_bl_desc->bl_start_tag;
|
||||
|
||||
/*
|
||||
* 1. Does falcon reset
|
||||
* 2. setup falcon apertures
|
||||
* 3. bootstrap falcon
|
||||
*/
|
||||
acr_desc->acr_flcn_setup_hw_and_bl_bootstrap(g, acr_desc, &bl_info);
|
||||
|
||||
if (b_wait_for_halt) {
|
||||
/* wait for ACR halt*/
|
||||
err = acr_wait_for_completion(g, acr_desc->acr_flcn,
|
||||
ACR_COMPLETION_TIMEOUT_MS);
|
||||
if (err != 0) {
|
||||
goto err_unmap_bl;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
err_unmap_bl:
|
||||
nvgpu_gmmu_unmap(vm, &hs_bl->hs_bl_ucode, hs_bl->hs_bl_ucode.gpu_va);
|
||||
err_free_ucode:
|
||||
nvgpu_dma_free(g, &hs_bl->hs_bl_ucode);
|
||||
err_done:
|
||||
nvgpu_release_firmware(g, hs_bl_fw);
|
||||
acr_desc->acr_hs_bl.hs_bl_fw = NULL;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
* Patch signatures into ucode image
|
||||
*/
|
||||
static int acr_ucode_patch_sig(struct gk20a *g,
|
||||
unsigned int *p_img, unsigned int *p_prod_sig,
|
||||
unsigned int *p_dbg_sig, unsigned int *p_patch_loc,
|
||||
unsigned int *p_patch_ind)
|
||||
{
|
||||
unsigned int i, *p_sig;
|
||||
nvgpu_acr_dbg(g, " ");
|
||||
|
||||
if (!g->ops.pmu.is_debug_mode_enabled(g)) {
|
||||
p_sig = p_prod_sig;
|
||||
nvgpu_acr_dbg(g, "PRODUCTION MODE\n");
|
||||
} else {
|
||||
p_sig = p_dbg_sig;
|
||||
nvgpu_acr_dbg(g, "DEBUG MODE\n");
|
||||
}
|
||||
|
||||
/* Patching logic:*/
|
||||
for (i = 0U; i < sizeof(*p_patch_loc)>>2U; i++) {
|
||||
p_img[(p_patch_loc[i]>>2U)] = p_sig[(p_patch_ind[i]<<2U)];
|
||||
p_img[(p_patch_loc[i]>>2U)+1U] = p_sig[(p_patch_ind[i]<<2U)+1U];
|
||||
p_img[(p_patch_loc[i]>>2U)+2U] = p_sig[(p_patch_ind[i]<<2U)+2U];
|
||||
p_img[(p_patch_loc[i]>>2U)+3U] = p_sig[(p_patch_ind[i]<<2U)+3U];
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Loads ACR bin to SYSMEM/FB and bootstraps ACR with bootloader code
|
||||
* start and end are addresses of ucode blob in non-WPR region
|
||||
*/
|
||||
int nvgpu_acr_bootstrap_hs_ucode(struct gk20a *g, struct nvgpu_acr *acr,
|
||||
struct hs_acr *acr_desc)
|
||||
{
|
||||
struct mm_gk20a *mm = &g->mm;
|
||||
struct vm_gk20a *vm = mm->pmu.vm;
|
||||
struct nvgpu_firmware *acr_fw = acr_desc->acr_fw;
|
||||
struct bin_hdr *acr_fw_bin_hdr = NULL;
|
||||
struct acr_fw_header *acr_fw_hdr = NULL;
|
||||
struct nvgpu_mem *acr_ucode_mem = &acr_desc->acr_ucode;
|
||||
u32 img_size_in_bytes = 0;
|
||||
u32 *acr_ucode_data;
|
||||
u32 *acr_ucode_header;
|
||||
int status = 0;
|
||||
|
||||
nvgpu_acr_dbg(g, "ACR TYPE %x ", acr_desc->acr_type);
|
||||
|
||||
if (acr_fw != NULL) {
|
||||
acr->patch_wpr_info_to_ucode(g, acr, acr_desc, true);
|
||||
} else {
|
||||
acr_fw = nvgpu_request_firmware(g, acr_desc->acr_fw_name,
|
||||
NVGPU_REQUEST_FIRMWARE_NO_SOC);
|
||||
if (acr_fw == NULL) {
|
||||
nvgpu_err(g, "%s ucode get fail for %s",
|
||||
acr_desc->acr_fw_name, g->name);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
acr_desc->acr_fw = acr_fw;
|
||||
|
||||
acr_fw_bin_hdr = (struct bin_hdr *)acr_fw->data;
|
||||
|
||||
acr_fw_hdr = (struct acr_fw_header *)
|
||||
(acr_fw->data + acr_fw_bin_hdr->header_offset);
|
||||
|
||||
acr_ucode_header = (u32 *)(acr_fw->data +
|
||||
acr_fw_hdr->hdr_offset);
|
||||
|
||||
acr_ucode_data = (u32 *)(acr_fw->data +
|
||||
acr_fw_bin_hdr->data_offset);
|
||||
|
||||
img_size_in_bytes = ALIGN((acr_fw_bin_hdr->data_size), 256U);
|
||||
|
||||
/* Lets patch the signatures first.. */
|
||||
if (acr_ucode_patch_sig(g, acr_ucode_data,
|
||||
(u32 *)(acr_fw->data + acr_fw_hdr->sig_prod_offset),
|
||||
(u32 *)(acr_fw->data + acr_fw_hdr->sig_dbg_offset),
|
||||
(u32 *)(acr_fw->data + acr_fw_hdr->patch_loc),
|
||||
(u32 *)(acr_fw->data + acr_fw_hdr->patch_sig)) < 0) {
|
||||
nvgpu_err(g, "patch signatures fail");
|
||||
status = -1;
|
||||
goto err_release_acr_fw;
|
||||
}
|
||||
|
||||
status = nvgpu_dma_alloc_map_sys(vm, img_size_in_bytes,
|
||||
acr_ucode_mem);
|
||||
if (status != 0) {
|
||||
status = -ENOMEM;
|
||||
goto err_release_acr_fw;
|
||||
}
|
||||
|
||||
acr->patch_wpr_info_to_ucode(g, acr, acr_desc, false);
|
||||
|
||||
nvgpu_mem_wr_n(g, acr_ucode_mem, 0U, acr_ucode_data,
|
||||
img_size_in_bytes);
|
||||
|
||||
/*
|
||||
* In order to execute this binary, we will be using
|
||||
* a bootloader which will load this image into
|
||||
* FALCON IMEM/DMEM.
|
||||
* Fill up the bootloader descriptor to use..
|
||||
* TODO: Use standard descriptor which the generic bootloader is
|
||||
* checked in.
|
||||
*/
|
||||
acr->acr_fill_bl_dmem_desc(g, acr, acr_desc, acr_ucode_header);
|
||||
}
|
||||
|
||||
status = acr_hs_bl_exec(g, acr, acr_desc, true);
|
||||
if (status != 0) {
|
||||
goto err_free_ucode_map;
|
||||
}
|
||||
|
||||
return 0;
|
||||
err_free_ucode_map:
|
||||
nvgpu_dma_unmap_free(vm, acr_ucode_mem);
|
||||
err_release_acr_fw:
|
||||
nvgpu_release_firmware(g, acr_fw);
|
||||
acr_desc->acr_fw = NULL;
|
||||
return status;
|
||||
}
|
||||
|
||||
int nvgpu_acr_alloc_blob_space_sys(struct gk20a *g, size_t size,
|
||||
struct nvgpu_mem *mem)
|
||||
{
|
||||
return nvgpu_dma_alloc_flags_sys(g, NVGPU_DMA_PHYSICALLY_ADDRESSED,
|
||||
size, mem);
|
||||
}
|
||||
|
||||
int nvgpu_acr_alloc_blob_space_vid(struct gk20a *g, size_t size,
|
||||
struct nvgpu_mem *mem)
|
||||
{
|
||||
struct wpr_carveout_info wpr_inf;
|
||||
int err;
|
||||
|
||||
if (mem->size != 0ULL) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
g->acr.get_wpr_info(g, &wpr_inf);
|
||||
|
||||
/*
|
||||
* Even though this mem_desc wouldn't be used, the wpr region needs to
|
||||
* be reserved in the allocator.
|
||||
*/
|
||||
err = nvgpu_dma_alloc_vid_at(g, wpr_inf.size,
|
||||
&g->acr.wpr_dummy, wpr_inf.wpr_base);
|
||||
if (err != 0) {
|
||||
return err;
|
||||
}
|
||||
|
||||
return nvgpu_dma_alloc_vid_at(g, wpr_inf.size, mem,
|
||||
wpr_inf.nonwpr_base);
|
||||
}
|
||||
|
||||
void nvgpu_acr_wpr_info_sys(struct gk20a *g, struct wpr_carveout_info *inf)
|
||||
{
|
||||
g->ops.fb.read_wpr_info(g, inf);
|
||||
}
|
||||
|
||||
void nvgpu_acr_wpr_info_vid(struct gk20a *g, struct wpr_carveout_info *inf)
|
||||
{
|
||||
inf->wpr_base = g->mm.vidmem.bootstrap_base;
|
||||
inf->nonwpr_base = inf->wpr_base + DGPU_WPR_SIZE;
|
||||
inf->size = DGPU_WPR_SIZE;
|
||||
}
|
||||
|
||||
|
||||
836
drivers/gpu/nvgpu/common/acr/acr_blob_construct_v0.c
Normal file
836
drivers/gpu/nvgpu/common/acr/acr_blob_construct_v0.c
Normal file
@@ -0,0 +1,836 @@
|
||||
/*
|
||||
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <nvgpu/acr/nvgpu_acr.h>
|
||||
#include <nvgpu/firmware.h>
|
||||
#include <nvgpu/pmu.h>
|
||||
#include <nvgpu/falcon.h>
|
||||
#include <nvgpu/enabled.h>
|
||||
#include <nvgpu/gk20a.h>
|
||||
#include <nvgpu/string.h>
|
||||
#include <nvgpu/bug.h>
|
||||
|
||||
int nvgpu_acr_lsf_pmu_ucode_details_v0(struct gk20a *g, void *lsf_ucode_img)
|
||||
{
|
||||
struct nvgpu_pmu *pmu = &g->pmu;
|
||||
struct lsf_ucode_desc *lsf_desc;
|
||||
struct flcn_ucode_img *p_img = (struct flcn_ucode_img *)lsf_ucode_img;
|
||||
int err = 0;
|
||||
|
||||
lsf_desc = nvgpu_kzalloc(g, sizeof(struct lsf_ucode_desc));
|
||||
if (lsf_desc == NULL) {
|
||||
err = -ENOMEM;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
nvgpu_memcpy((u8 *)lsf_desc, (u8 *)pmu->fw_sig->data,
|
||||
min_t(size_t, sizeof(*lsf_desc), pmu->fw_sig->size));
|
||||
|
||||
lsf_desc->falcon_id = FALCON_ID_PMU;
|
||||
|
||||
p_img->desc = (struct pmu_ucode_desc *)(void *)pmu->fw_desc->data;
|
||||
p_img->data = (u32 *)(void *)pmu->fw_image->data;
|
||||
p_img->data_size = p_img->desc->image_size;
|
||||
p_img->fw_ver = NULL;
|
||||
p_img->header = NULL;
|
||||
p_img->lsf_desc = (struct lsf_ucode_desc *)lsf_desc;
|
||||
|
||||
exit:
|
||||
return err;
|
||||
}
|
||||
|
||||
int nvgpu_acr_lsf_fecs_ucode_details_v0(struct gk20a *g, void *lsf_ucode_img)
|
||||
{
|
||||
struct lsf_ucode_desc *lsf_desc;
|
||||
struct nvgpu_firmware *fecs_sig;
|
||||
struct flcn_ucode_img *p_img = (struct flcn_ucode_img *)lsf_ucode_img;
|
||||
int err;
|
||||
|
||||
fecs_sig = nvgpu_request_firmware(g, GM20B_FECS_UCODE_SIG, 0);
|
||||
if (fecs_sig == NULL) {
|
||||
nvgpu_err(g, "failed to load fecs sig");
|
||||
return -ENOENT;
|
||||
}
|
||||
lsf_desc = nvgpu_kzalloc(g, sizeof(struct lsf_ucode_desc));
|
||||
if (lsf_desc == NULL) {
|
||||
err = -ENOMEM;
|
||||
goto rel_sig;
|
||||
}
|
||||
nvgpu_memcpy((u8 *)lsf_desc, (u8 *)fecs_sig->data,
|
||||
min_t(size_t, sizeof(*lsf_desc), fecs_sig->size));
|
||||
|
||||
lsf_desc->falcon_id = FALCON_ID_FECS;
|
||||
|
||||
p_img->desc = nvgpu_kzalloc(g, sizeof(struct pmu_ucode_desc));
|
||||
if (p_img->desc == NULL) {
|
||||
err = -ENOMEM;
|
||||
goto free_lsf_desc;
|
||||
}
|
||||
|
||||
p_img->desc->bootloader_start_offset =
|
||||
g->ctxsw_ucode_info.fecs.boot.offset;
|
||||
p_img->desc->bootloader_size =
|
||||
ALIGN(g->ctxsw_ucode_info.fecs.boot.size, 256);
|
||||
p_img->desc->bootloader_imem_offset =
|
||||
g->ctxsw_ucode_info.fecs.boot_imem_offset;
|
||||
p_img->desc->bootloader_entry_point =
|
||||
g->ctxsw_ucode_info.fecs.boot_entry;
|
||||
|
||||
p_img->desc->image_size =
|
||||
ALIGN(g->ctxsw_ucode_info.fecs.boot.size, 256) +
|
||||
ALIGN(g->ctxsw_ucode_info.fecs.code.size, 256) +
|
||||
ALIGN(g->ctxsw_ucode_info.fecs.data.size, 256);
|
||||
p_img->desc->app_size = ALIGN(g->ctxsw_ucode_info.fecs.code.size, 256) +
|
||||
ALIGN(g->ctxsw_ucode_info.fecs.data.size, 256);
|
||||
p_img->desc->app_start_offset = g->ctxsw_ucode_info.fecs.code.offset;
|
||||
p_img->desc->app_imem_offset = 0;
|
||||
p_img->desc->app_imem_entry = 0;
|
||||
p_img->desc->app_dmem_offset = 0;
|
||||
p_img->desc->app_resident_code_offset = 0;
|
||||
p_img->desc->app_resident_code_size =
|
||||
g->ctxsw_ucode_info.fecs.code.size;
|
||||
p_img->desc->app_resident_data_offset =
|
||||
g->ctxsw_ucode_info.fecs.data.offset -
|
||||
g->ctxsw_ucode_info.fecs.code.offset;
|
||||
p_img->desc->app_resident_data_size =
|
||||
g->ctxsw_ucode_info.fecs.data.size;
|
||||
p_img->data = g->ctxsw_ucode_info.surface_desc.cpu_va;
|
||||
p_img->data_size = p_img->desc->image_size;
|
||||
|
||||
p_img->fw_ver = NULL;
|
||||
p_img->header = NULL;
|
||||
p_img->lsf_desc = (struct lsf_ucode_desc *)lsf_desc;
|
||||
nvgpu_acr_dbg(g, "fecs fw loaded\n");
|
||||
nvgpu_release_firmware(g, fecs_sig);
|
||||
return 0;
|
||||
free_lsf_desc:
|
||||
nvgpu_kfree(g, lsf_desc);
|
||||
rel_sig:
|
||||
nvgpu_release_firmware(g, fecs_sig);
|
||||
return err;
|
||||
}
|
||||
|
||||
int nvgpu_acr_lsf_gpccs_ucode_details_v0(struct gk20a *g, void *lsf_ucode_img)
|
||||
{
|
||||
struct lsf_ucode_desc *lsf_desc;
|
||||
struct nvgpu_firmware *gpccs_sig;
|
||||
struct flcn_ucode_img *p_img = (struct flcn_ucode_img *)lsf_ucode_img;
|
||||
int err;
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_SEC_SECUREGPCCS)) {
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
gpccs_sig = nvgpu_request_firmware(g, T18x_GPCCS_UCODE_SIG, 0);
|
||||
if (gpccs_sig == NULL) {
|
||||
nvgpu_err(g, "failed to load gpccs sig");
|
||||
return -ENOENT;
|
||||
}
|
||||
lsf_desc = nvgpu_kzalloc(g, sizeof(struct lsf_ucode_desc));
|
||||
if (lsf_desc == NULL) {
|
||||
err = -ENOMEM;
|
||||
goto rel_sig;
|
||||
}
|
||||
nvgpu_memcpy((u8 *)lsf_desc, (u8 *)gpccs_sig->data,
|
||||
min_t(size_t, sizeof(*lsf_desc), gpccs_sig->size));
|
||||
lsf_desc->falcon_id = FALCON_ID_GPCCS;
|
||||
|
||||
p_img->desc = nvgpu_kzalloc(g, sizeof(struct pmu_ucode_desc));
|
||||
if (p_img->desc == NULL) {
|
||||
err = -ENOMEM;
|
||||
goto free_lsf_desc;
|
||||
}
|
||||
|
||||
p_img->desc->bootloader_start_offset =
|
||||
0;
|
||||
p_img->desc->bootloader_size =
|
||||
ALIGN(g->ctxsw_ucode_info.gpccs.boot.size, 256);
|
||||
p_img->desc->bootloader_imem_offset =
|
||||
g->ctxsw_ucode_info.gpccs.boot_imem_offset;
|
||||
p_img->desc->bootloader_entry_point =
|
||||
g->ctxsw_ucode_info.gpccs.boot_entry;
|
||||
|
||||
p_img->desc->image_size =
|
||||
ALIGN(g->ctxsw_ucode_info.gpccs.boot.size, 256) +
|
||||
ALIGN(g->ctxsw_ucode_info.gpccs.code.size, 256) +
|
||||
ALIGN(g->ctxsw_ucode_info.gpccs.data.size, 256);
|
||||
p_img->desc->app_size = ALIGN(g->ctxsw_ucode_info.gpccs.code.size, 256)
|
||||
+ ALIGN(g->ctxsw_ucode_info.gpccs.data.size, 256);
|
||||
p_img->desc->app_start_offset = p_img->desc->bootloader_size;
|
||||
p_img->desc->app_imem_offset = 0;
|
||||
p_img->desc->app_imem_entry = 0;
|
||||
p_img->desc->app_dmem_offset = 0;
|
||||
p_img->desc->app_resident_code_offset = 0;
|
||||
p_img->desc->app_resident_code_size =
|
||||
ALIGN(g->ctxsw_ucode_info.gpccs.code.size, 256);
|
||||
p_img->desc->app_resident_data_offset =
|
||||
ALIGN(g->ctxsw_ucode_info.gpccs.data.offset, 256) -
|
||||
ALIGN(g->ctxsw_ucode_info.gpccs.code.offset, 256);
|
||||
p_img->desc->app_resident_data_size =
|
||||
ALIGN(g->ctxsw_ucode_info.gpccs.data.size, 256);
|
||||
p_img->data = (u32 *)((u8 *)g->ctxsw_ucode_info.surface_desc.cpu_va +
|
||||
g->ctxsw_ucode_info.gpccs.boot.offset);
|
||||
p_img->data_size = ALIGN(p_img->desc->image_size, 256);
|
||||
p_img->fw_ver = NULL;
|
||||
p_img->header = NULL;
|
||||
p_img->lsf_desc = (struct lsf_ucode_desc *)lsf_desc;
|
||||
nvgpu_acr_dbg(g, "gpccs fw loaded\n");
|
||||
nvgpu_release_firmware(g, gpccs_sig);
|
||||
return 0;
|
||||
free_lsf_desc:
|
||||
nvgpu_kfree(g, lsf_desc);
|
||||
rel_sig:
|
||||
nvgpu_release_firmware(g, gpccs_sig);
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
* lsfm_parse_no_loader_ucode: parses UCODE header of falcon &
|
||||
* updates values in LSB header
|
||||
*/
|
||||
static void lsfm_parse_no_loader_ucode(u32 *p_ucodehdr,
|
||||
struct lsf_lsb_header *lsb_hdr)
|
||||
{
|
||||
|
||||
u32 code_size = 0;
|
||||
u32 data_size = 0;
|
||||
u32 i = 0;
|
||||
u32 total_apps = p_ucodehdr[FLCN_NL_UCODE_HDR_NUM_APPS_IND];
|
||||
|
||||
/* Lets calculate code size*/
|
||||
code_size += p_ucodehdr[FLCN_NL_UCODE_HDR_OS_CODE_SIZE_IND];
|
||||
for (i = 0; i < total_apps; i++) {
|
||||
code_size += p_ucodehdr[FLCN_NL_UCODE_HDR_APP_CODE_SIZE_IND
|
||||
(total_apps, i)];
|
||||
}
|
||||
code_size += p_ucodehdr[FLCN_NL_UCODE_HDR_OS_OVL_SIZE_IND(total_apps)];
|
||||
|
||||
/* Calculate data size*/
|
||||
data_size += p_ucodehdr[FLCN_NL_UCODE_HDR_OS_DATA_SIZE_IND];
|
||||
for (i = 0; i < total_apps; i++) {
|
||||
data_size += p_ucodehdr[FLCN_NL_UCODE_HDR_APP_DATA_SIZE_IND
|
||||
(total_apps, i)];
|
||||
}
|
||||
|
||||
lsb_hdr->ucode_size = code_size;
|
||||
lsb_hdr->data_size = data_size;
|
||||
lsb_hdr->bl_code_size = p_ucodehdr[FLCN_NL_UCODE_HDR_OS_CODE_SIZE_IND];
|
||||
lsb_hdr->bl_imem_off = 0;
|
||||
lsb_hdr->bl_data_off = p_ucodehdr[FLCN_NL_UCODE_HDR_OS_DATA_OFF_IND];
|
||||
lsb_hdr->bl_data_size = p_ucodehdr[FLCN_NL_UCODE_HDR_OS_DATA_SIZE_IND];
|
||||
}
|
||||
|
||||
/*
|
||||
* @brief lsfm_fill_static_lsb_hdr_info
|
||||
* Populate static LSB header information using the provided ucode image
|
||||
*/
|
||||
static void lsfm_fill_static_lsb_hdr_info(struct gk20a *g,
|
||||
u32 falcon_id, struct lsfm_managed_ucode_img *pnode)
|
||||
{
|
||||
u32 full_app_size = 0;
|
||||
u32 data = 0;
|
||||
|
||||
if (pnode->ucode_img.lsf_desc != NULL) {
|
||||
nvgpu_memcpy((u8 *)&pnode->lsb_header.signature,
|
||||
(u8 *)pnode->ucode_img.lsf_desc,
|
||||
sizeof(struct lsf_ucode_desc));
|
||||
}
|
||||
pnode->lsb_header.ucode_size = pnode->ucode_img.data_size;
|
||||
|
||||
/* The remainder of the LSB depends on the loader usage */
|
||||
if (pnode->ucode_img.header != NULL) {
|
||||
/* Does not use a loader */
|
||||
pnode->lsb_header.data_size = 0;
|
||||
pnode->lsb_header.bl_code_size = 0;
|
||||
pnode->lsb_header.bl_data_off = 0;
|
||||
pnode->lsb_header.bl_data_size = 0;
|
||||
|
||||
lsfm_parse_no_loader_ucode(pnode->ucode_img.header,
|
||||
&(pnode->lsb_header));
|
||||
|
||||
/*
|
||||
* Set LOAD_CODE_AT_0 and DMACTL_REQ_CTX.
|
||||
* True for all method based falcons
|
||||
*/
|
||||
data = NV_FLCN_ACR_LSF_FLAG_LOAD_CODE_AT_0_TRUE |
|
||||
NV_FLCN_ACR_LSF_FLAG_DMACTL_REQ_CTX_TRUE;
|
||||
pnode->lsb_header.flags = data;
|
||||
} else {
|
||||
/* Uses a loader. that is has a desc */
|
||||
pnode->lsb_header.data_size = 0;
|
||||
|
||||
/*
|
||||
* The loader code size is already aligned (padded) such that
|
||||
* the code following it is aligned, but the size in the image
|
||||
* desc is not, bloat it up to be on a 256 byte alignment.
|
||||
*/
|
||||
pnode->lsb_header.bl_code_size = ALIGN(
|
||||
pnode->ucode_img.desc->bootloader_size,
|
||||
LSF_BL_CODE_SIZE_ALIGNMENT);
|
||||
full_app_size = ALIGN(pnode->ucode_img.desc->app_size,
|
||||
LSF_BL_CODE_SIZE_ALIGNMENT) +
|
||||
pnode->lsb_header.bl_code_size;
|
||||
pnode->lsb_header.ucode_size = ALIGN(
|
||||
pnode->ucode_img.desc->app_resident_data_offset,
|
||||
LSF_BL_CODE_SIZE_ALIGNMENT) +
|
||||
pnode->lsb_header.bl_code_size;
|
||||
pnode->lsb_header.data_size = full_app_size -
|
||||
pnode->lsb_header.ucode_size;
|
||||
/*
|
||||
* Though the BL is located at 0th offset of the image, the VA
|
||||
* is different to make sure that it doesn't collide the actual
|
||||
* OS VA range
|
||||
*/
|
||||
pnode->lsb_header.bl_imem_off =
|
||||
pnode->ucode_img.desc->bootloader_imem_offset;
|
||||
|
||||
pnode->lsb_header.flags = 0;
|
||||
|
||||
if (falcon_id == FALCON_ID_PMU) {
|
||||
data = NV_FLCN_ACR_LSF_FLAG_DMACTL_REQ_CTX_TRUE;
|
||||
pnode->lsb_header.flags = data;
|
||||
}
|
||||
|
||||
if (g->acr.lsf[falcon_id].is_priv_load) {
|
||||
pnode->lsb_header.flags |=
|
||||
NV_FLCN_ACR_LSF_FLAG_FORCE_PRIV_LOAD_TRUE;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Adds a ucode image to the list of managed ucode images managed. */
|
||||
static int lsfm_add_ucode_img(struct gk20a *g, struct ls_flcn_mgr *plsfm,
|
||||
struct flcn_ucode_img *ucode_image, u32 falcon_id)
|
||||
{
|
||||
|
||||
struct lsfm_managed_ucode_img *pnode;
|
||||
|
||||
pnode = nvgpu_kzalloc(g, sizeof(struct lsfm_managed_ucode_img));
|
||||
if (pnode == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Keep a copy of the ucode image info locally */
|
||||
nvgpu_memcpy((u8 *)&pnode->ucode_img, (u8 *)ucode_image,
|
||||
sizeof(struct flcn_ucode_img));
|
||||
|
||||
/* Fill in static WPR header info*/
|
||||
pnode->wpr_header.falcon_id = falcon_id;
|
||||
pnode->wpr_header.bootstrap_owner = g->acr.bootstrap_owner;
|
||||
pnode->wpr_header.status = LSF_IMAGE_STATUS_COPY;
|
||||
|
||||
pnode->wpr_header.lazy_bootstrap =
|
||||
(u32)g->acr.lsf[falcon_id].is_lazy_bootstrap;
|
||||
|
||||
/* Fill in static LSB header info elsewhere */
|
||||
lsfm_fill_static_lsb_hdr_info(g, falcon_id, pnode);
|
||||
pnode->next = plsfm->ucode_img_list;
|
||||
plsfm->ucode_img_list = pnode;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Discover all managed falcon ucode images */
|
||||
static int lsfm_discover_ucode_images(struct gk20a *g,
|
||||
struct ls_flcn_mgr *plsfm)
|
||||
{
|
||||
struct flcn_ucode_img ucode_img;
|
||||
struct nvgpu_acr *acr = &g->acr;
|
||||
u32 falcon_id;
|
||||
u32 i;
|
||||
int err = 0;
|
||||
|
||||
/*
|
||||
* Enumerate all constructed falcon objects, as we need the ucode
|
||||
* image info and total falcon count
|
||||
*/
|
||||
for (i = 0U; i < FALCON_ID_END; i++) {
|
||||
if (test_bit((int)i, (void *)&acr->lsf_enable_mask) &&
|
||||
acr->lsf[i].get_lsf_ucode_details != NULL) {
|
||||
|
||||
(void) memset(&ucode_img, 0, sizeof(ucode_img));
|
||||
|
||||
if (acr->lsf[i].get_lsf_ucode_details(g,
|
||||
(void *)&ucode_img) != 0) {
|
||||
nvgpu_err(g, "LS falcon-%d ucode get failed", i);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if (ucode_img.lsf_desc != NULL) {
|
||||
/*
|
||||
* falon_id is formed by grabbing the static
|
||||
* base falonId from the image and adding the
|
||||
* engine-designated falcon instance.
|
||||
*/
|
||||
falcon_id = ucode_img.lsf_desc->falcon_id +
|
||||
ucode_img.flcn_inst;
|
||||
|
||||
err = lsfm_add_ucode_img(g, plsfm, &ucode_img,
|
||||
falcon_id);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, " Failed to add falcon-%d to LSFM ",
|
||||
falcon_id);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
plsfm->managed_flcn_cnt++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
exit:
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Generate WPR requirements for ACR allocation request */
|
||||
static int lsf_gen_wpr_requirements(struct gk20a *g, struct ls_flcn_mgr *plsfm)
|
||||
{
|
||||
struct lsfm_managed_ucode_img *pnode = plsfm->ucode_img_list;
|
||||
u32 wpr_offset;
|
||||
|
||||
/*
|
||||
* Start with an array of WPR headers at the base of the WPR.
|
||||
* The expectation here is that the secure falcon will do a single DMA
|
||||
* read of this array and cache it internally so it's OK to pack these.
|
||||
* Also, we add 1 to the falcon count to indicate the end of the array.
|
||||
*/
|
||||
wpr_offset = U32(sizeof(struct lsf_wpr_header)) *
|
||||
(U32(plsfm->managed_flcn_cnt) + U32(1));
|
||||
|
||||
/*
|
||||
* Walk the managed falcons, accounting for the LSB structs
|
||||
* as well as the ucode images.
|
||||
*/
|
||||
while (pnode != NULL) {
|
||||
/* Align, save off, and include an LSB header size */
|
||||
wpr_offset = ALIGN(wpr_offset, LSF_LSB_HEADER_ALIGNMENT);
|
||||
pnode->wpr_header.lsb_offset = wpr_offset;
|
||||
wpr_offset += (u32)sizeof(struct lsf_lsb_header);
|
||||
|
||||
/*
|
||||
* Align, save off, and include the original (static)
|
||||
* ucode image size
|
||||
*/
|
||||
wpr_offset = ALIGN(wpr_offset,
|
||||
LSF_UCODE_DATA_ALIGNMENT);
|
||||
pnode->lsb_header.ucode_off = wpr_offset;
|
||||
wpr_offset += pnode->ucode_img.data_size;
|
||||
|
||||
/*
|
||||
* For falcons that use a boot loader (BL), we append a loader
|
||||
* desc structure on the end of the ucode image and consider this
|
||||
* the boot loader data. The host will then copy the loader desc
|
||||
* args to this space within the WPR region (before locking down)
|
||||
* and the HS bin will then copy them to DMEM 0 for the loader.
|
||||
*/
|
||||
if (pnode->ucode_img.header == NULL) {
|
||||
/*
|
||||
* Track the size for LSB details filled in later
|
||||
* Note that at this point we don't know what kind of
|
||||
* boot loader desc, so we just take the size of the
|
||||
* generic one, which is the largest it will will ever be.
|
||||
*/
|
||||
/* Align (size bloat) and save off generic descriptor size */
|
||||
pnode->lsb_header.bl_data_size = ALIGN(
|
||||
(u32)sizeof(pnode->bl_gen_desc),
|
||||
LSF_BL_DATA_SIZE_ALIGNMENT);
|
||||
|
||||
/* Align, save off, and include the additional BL data */
|
||||
wpr_offset = ALIGN(wpr_offset,
|
||||
LSF_BL_DATA_ALIGNMENT);
|
||||
pnode->lsb_header.bl_data_off = wpr_offset;
|
||||
wpr_offset += pnode->lsb_header.bl_data_size;
|
||||
} else {
|
||||
/*
|
||||
* bl_data_off is already assigned in static
|
||||
* information. But that is from start of the image
|
||||
*/
|
||||
pnode->lsb_header.bl_data_off +=
|
||||
(wpr_offset - pnode->ucode_img.data_size);
|
||||
}
|
||||
|
||||
/* Finally, update ucode surface size to include updates */
|
||||
pnode->full_ucode_size = wpr_offset -
|
||||
pnode->lsb_header.ucode_off;
|
||||
if (pnode->wpr_header.falcon_id != FALCON_ID_PMU) {
|
||||
pnode->lsb_header.app_code_off =
|
||||
pnode->lsb_header.bl_code_size;
|
||||
pnode->lsb_header.app_code_size =
|
||||
pnode->lsb_header.ucode_size -
|
||||
pnode->lsb_header.bl_code_size;
|
||||
pnode->lsb_header.app_data_off =
|
||||
pnode->lsb_header.ucode_size;
|
||||
pnode->lsb_header.app_data_size =
|
||||
pnode->lsb_header.data_size;
|
||||
}
|
||||
pnode = pnode->next;
|
||||
}
|
||||
plsfm->wpr_size = wpr_offset;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Initialize WPR contents */
|
||||
static int gm20b_pmu_populate_loader_cfg(struct gk20a *g,
|
||||
void *lsfm, u32 *p_bl_gen_desc_size)
|
||||
{
|
||||
struct wpr_carveout_info wpr_inf;
|
||||
struct lsfm_managed_ucode_img *p_lsfm =
|
||||
(struct lsfm_managed_ucode_img *)lsfm;
|
||||
struct flcn_ucode_img *p_img = &(p_lsfm->ucode_img);
|
||||
struct loader_config *ldr_cfg = &(p_lsfm->bl_gen_desc.loader_cfg);
|
||||
u64 addr_base;
|
||||
struct pmu_ucode_desc *desc;
|
||||
u64 tmp;
|
||||
u32 addr_code, addr_data;
|
||||
|
||||
if (p_img->desc == NULL) {
|
||||
/*
|
||||
* This means its a header based ucode,
|
||||
* and so we do not fill BL gen desc structure
|
||||
*/
|
||||
return -EINVAL;
|
||||
}
|
||||
desc = p_img->desc;
|
||||
/*
|
||||
* Calculate physical and virtual addresses for various portions of
|
||||
* the PMU ucode image
|
||||
* Calculate the 32-bit addresses for the application code, application
|
||||
* data, and bootloader code. These values are all based on IM_BASE.
|
||||
* The 32-bit addresses will be the upper 32-bits of the virtual or
|
||||
* physical addresses of each respective segment.
|
||||
*/
|
||||
addr_base = p_lsfm->lsb_header.ucode_off;
|
||||
g->acr.get_wpr_info(g, &wpr_inf);
|
||||
addr_base += wpr_inf.wpr_base;
|
||||
nvgpu_acr_dbg(g, "pmu loader cfg u32 addrbase %x\n", (u32)addr_base);
|
||||
/*From linux*/
|
||||
tmp = (addr_base +
|
||||
desc->app_start_offset +
|
||||
desc->app_resident_code_offset) >> 8;
|
||||
nvgpu_assert(tmp <= U32_MAX);
|
||||
addr_code = u64_lo32(tmp);
|
||||
nvgpu_acr_dbg(g, "app start %d app res code off %d\n",
|
||||
desc->app_start_offset, desc->app_resident_code_offset);
|
||||
tmp = (addr_base +
|
||||
desc->app_start_offset +
|
||||
desc->app_resident_data_offset) >> 8;
|
||||
nvgpu_assert(tmp <= U32_MAX);
|
||||
addr_data = u64_lo32(tmp);
|
||||
nvgpu_acr_dbg(g, "app res data offset%d\n",
|
||||
desc->app_resident_data_offset);
|
||||
nvgpu_acr_dbg(g, "bl start off %d\n", desc->bootloader_start_offset);
|
||||
|
||||
/* Populate the loader_config state*/
|
||||
ldr_cfg->dma_idx = g->acr.lsf[FALCON_ID_PMU].falcon_dma_idx;
|
||||
ldr_cfg->code_dma_base = addr_code;
|
||||
ldr_cfg->code_dma_base1 = 0x0;
|
||||
ldr_cfg->code_size_total = desc->app_size;
|
||||
ldr_cfg->code_size_to_load = desc->app_resident_code_size;
|
||||
ldr_cfg->code_entry_point = desc->app_imem_entry;
|
||||
ldr_cfg->data_dma_base = addr_data;
|
||||
ldr_cfg->data_dma_base1 = 0;
|
||||
ldr_cfg->data_size = desc->app_resident_data_size;
|
||||
ldr_cfg->overlay_dma_base = addr_code;
|
||||
ldr_cfg->overlay_dma_base1 = 0x0;
|
||||
|
||||
/* Update the argc/argv members*/
|
||||
ldr_cfg->argc = 1;
|
||||
nvgpu_pmu_get_cmd_line_args_offset(g, &ldr_cfg->argv);
|
||||
|
||||
*p_bl_gen_desc_size = (u32)sizeof(struct loader_config);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gm20b_flcn_populate_bl_dmem_desc(struct gk20a *g,
|
||||
void *lsfm, u32 *p_bl_gen_desc_size, u32 falconid)
|
||||
{
|
||||
struct wpr_carveout_info wpr_inf;
|
||||
struct lsfm_managed_ucode_img *p_lsfm =
|
||||
(struct lsfm_managed_ucode_img *)lsfm;
|
||||
struct flcn_ucode_img *p_img = &(p_lsfm->ucode_img);
|
||||
struct flcn_bl_dmem_desc *ldr_cfg =
|
||||
&(p_lsfm->bl_gen_desc.bl_dmem_desc);
|
||||
u64 addr_base;
|
||||
struct pmu_ucode_desc *desc;
|
||||
u32 addr_code, addr_data;
|
||||
u64 tmp;
|
||||
|
||||
if (p_img->desc == NULL) {
|
||||
/*
|
||||
* This means its a header based ucode,
|
||||
* and so we do not fill BL gen desc structure
|
||||
*/
|
||||
return -EINVAL;
|
||||
}
|
||||
desc = p_img->desc;
|
||||
|
||||
/*
|
||||
* Calculate physical and virtual addresses for various portions of
|
||||
* the PMU ucode image
|
||||
* Calculate the 32-bit addresses for the application code, application
|
||||
* data, and bootloader code. These values are all based on IM_BASE.
|
||||
* The 32-bit addresses will be the upper 32-bits of the virtual or
|
||||
* physical addresses of each respective segment.
|
||||
*/
|
||||
addr_base = p_lsfm->lsb_header.ucode_off;
|
||||
g->acr.get_wpr_info(g, &wpr_inf);
|
||||
addr_base += wpr_inf.wpr_base;
|
||||
|
||||
nvgpu_acr_dbg(g, "gen loader cfg %x u32 addrbase %x ID\n", (u32)addr_base,
|
||||
p_lsfm->wpr_header.falcon_id);
|
||||
tmp = (addr_base +
|
||||
desc->app_start_offset +
|
||||
desc->app_resident_code_offset) >> 8;
|
||||
nvgpu_assert(tmp <= U32_MAX);
|
||||
addr_code = u64_lo32(tmp);
|
||||
tmp = (addr_base +
|
||||
desc->app_start_offset +
|
||||
desc->app_resident_data_offset) >> 8;
|
||||
nvgpu_assert(tmp <= U32_MAX);
|
||||
addr_data = u64_lo32(tmp);
|
||||
|
||||
nvgpu_acr_dbg(g, "gen cfg %x u32 addrcode %x & data %x load offset %xID\n",
|
||||
(u32)addr_code, (u32)addr_data, desc->bootloader_start_offset,
|
||||
p_lsfm->wpr_header.falcon_id);
|
||||
|
||||
/* Populate the LOADER_CONFIG state */
|
||||
(void) memset((void *) ldr_cfg, 0, sizeof(struct flcn_bl_dmem_desc));
|
||||
ldr_cfg->ctx_dma = g->acr.lsf[falconid].falcon_dma_idx;
|
||||
ldr_cfg->code_dma_base = addr_code;
|
||||
ldr_cfg->non_sec_code_size = desc->app_resident_code_size;
|
||||
ldr_cfg->data_dma_base = addr_data;
|
||||
ldr_cfg->data_size = desc->app_resident_data_size;
|
||||
ldr_cfg->code_entry_point = desc->app_imem_entry;
|
||||
*p_bl_gen_desc_size = (u32)sizeof(struct flcn_bl_dmem_desc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Populate falcon boot loader generic desc.*/
|
||||
static int lsfm_fill_flcn_bl_gen_desc(struct gk20a *g,
|
||||
struct lsfm_managed_ucode_img *pnode)
|
||||
{
|
||||
|
||||
if (pnode->wpr_header.falcon_id != FALCON_ID_PMU) {
|
||||
nvgpu_acr_dbg(g, "non pmu. write flcn bl gen desc\n");
|
||||
gm20b_flcn_populate_bl_dmem_desc(g,
|
||||
pnode, &pnode->bl_gen_desc_size,
|
||||
pnode->wpr_header.falcon_id);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (pnode->wpr_header.falcon_id == FALCON_ID_PMU) {
|
||||
nvgpu_acr_dbg(g, "pmu write flcn bl gen desc\n");
|
||||
return gm20b_pmu_populate_loader_cfg(g, pnode,
|
||||
&pnode->bl_gen_desc_size);
|
||||
}
|
||||
|
||||
/* Failed to find the falcon requested. */
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
static void lsfm_init_wpr_contents(struct gk20a *g, struct ls_flcn_mgr *plsfm,
|
||||
struct nvgpu_mem *ucode)
|
||||
{
|
||||
struct lsfm_managed_ucode_img *pnode = plsfm->ucode_img_list;
|
||||
struct lsf_wpr_header last_wpr_hdr;
|
||||
u32 i;
|
||||
|
||||
/* The WPR array is at the base of the WPR */
|
||||
pnode = plsfm->ucode_img_list;
|
||||
(void) memset(&last_wpr_hdr, 0, sizeof(struct lsf_wpr_header));
|
||||
i = 0;
|
||||
|
||||
/*
|
||||
* Walk the managed falcons, flush WPR and LSB headers to FB.
|
||||
* flush any bl args to the storage area relative to the
|
||||
* ucode image (appended on the end as a DMEM area).
|
||||
*/
|
||||
while (pnode != NULL) {
|
||||
/* Flush WPR header to memory*/
|
||||
nvgpu_mem_wr_n(g, ucode, i * (u32)sizeof(pnode->wpr_header),
|
||||
&pnode->wpr_header,
|
||||
(u32)sizeof(pnode->wpr_header));
|
||||
|
||||
nvgpu_acr_dbg(g, "wpr header");
|
||||
nvgpu_acr_dbg(g, "falconid :%d",
|
||||
pnode->wpr_header.falcon_id);
|
||||
nvgpu_acr_dbg(g, "lsb_offset :%x",
|
||||
pnode->wpr_header.lsb_offset);
|
||||
nvgpu_acr_dbg(g, "bootstrap_owner :%d",
|
||||
pnode->wpr_header.bootstrap_owner);
|
||||
nvgpu_acr_dbg(g, "lazy_bootstrap :%d",
|
||||
pnode->wpr_header.lazy_bootstrap);
|
||||
nvgpu_acr_dbg(g, "status :%d",
|
||||
pnode->wpr_header.status);
|
||||
|
||||
/*Flush LSB header to memory*/
|
||||
nvgpu_mem_wr_n(g, ucode, pnode->wpr_header.lsb_offset,
|
||||
&pnode->lsb_header,
|
||||
(u32)sizeof(pnode->lsb_header));
|
||||
|
||||
nvgpu_acr_dbg(g, "lsb header");
|
||||
nvgpu_acr_dbg(g, "ucode_off :%x",
|
||||
pnode->lsb_header.ucode_off);
|
||||
nvgpu_acr_dbg(g, "ucode_size :%x",
|
||||
pnode->lsb_header.ucode_size);
|
||||
nvgpu_acr_dbg(g, "data_size :%x",
|
||||
pnode->lsb_header.data_size);
|
||||
nvgpu_acr_dbg(g, "bl_code_size :%x",
|
||||
pnode->lsb_header.bl_code_size);
|
||||
nvgpu_acr_dbg(g, "bl_imem_off :%x",
|
||||
pnode->lsb_header.bl_imem_off);
|
||||
nvgpu_acr_dbg(g, "bl_data_off :%x",
|
||||
pnode->lsb_header.bl_data_off);
|
||||
nvgpu_acr_dbg(g, "bl_data_size :%x",
|
||||
pnode->lsb_header.bl_data_size);
|
||||
nvgpu_acr_dbg(g, "app_code_off :%x",
|
||||
pnode->lsb_header.app_code_off);
|
||||
nvgpu_acr_dbg(g, "app_code_size :%x",
|
||||
pnode->lsb_header.app_code_size);
|
||||
nvgpu_acr_dbg(g, "app_data_off :%x",
|
||||
pnode->lsb_header.app_data_off);
|
||||
nvgpu_acr_dbg(g, "app_data_size :%x",
|
||||
pnode->lsb_header.app_data_size);
|
||||
nvgpu_acr_dbg(g, "flags :%x",
|
||||
pnode->lsb_header.flags);
|
||||
|
||||
/* If this falcon has a boot loader and related args, flush them */
|
||||
if (pnode->ucode_img.header == NULL) {
|
||||
/* Populate gen bl and flush to memory */
|
||||
lsfm_fill_flcn_bl_gen_desc(g, pnode);
|
||||
nvgpu_mem_wr_n(g, ucode,
|
||||
pnode->lsb_header.bl_data_off,
|
||||
&pnode->bl_gen_desc,
|
||||
pnode->bl_gen_desc_size);
|
||||
}
|
||||
/* Copying of ucode */
|
||||
nvgpu_mem_wr_n(g, ucode, pnode->lsb_header.ucode_off,
|
||||
pnode->ucode_img.data,
|
||||
pnode->ucode_img.data_size);
|
||||
pnode = pnode->next;
|
||||
i++;
|
||||
}
|
||||
|
||||
/* Tag the terminator WPR header with an invalid falcon ID. */
|
||||
last_wpr_hdr.falcon_id = FALCON_ID_INVALID;
|
||||
nvgpu_mem_wr_n(g, ucode,
|
||||
(u32)plsfm->managed_flcn_cnt *
|
||||
(u32)sizeof(struct lsf_wpr_header),
|
||||
&last_wpr_hdr,
|
||||
(u32)sizeof(struct lsf_wpr_header));
|
||||
}
|
||||
|
||||
/* Free any ucode image structure resources. */
|
||||
static void lsfm_free_ucode_img_res(struct gk20a *g,
|
||||
struct flcn_ucode_img *p_img)
|
||||
{
|
||||
if (p_img->lsf_desc != NULL) {
|
||||
nvgpu_kfree(g, p_img->lsf_desc);
|
||||
p_img->lsf_desc = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/* Free any ucode image structure resources. */
|
||||
static void lsfm_free_nonpmu_ucode_img_res(struct gk20a *g,
|
||||
struct flcn_ucode_img *p_img)
|
||||
{
|
||||
if (p_img->lsf_desc != NULL) {
|
||||
nvgpu_kfree(g, p_img->lsf_desc);
|
||||
p_img->lsf_desc = NULL;
|
||||
}
|
||||
if (p_img->desc != NULL) {
|
||||
nvgpu_kfree(g, p_img->desc);
|
||||
p_img->desc = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static void free_acr_resources(struct gk20a *g, struct ls_flcn_mgr *plsfm)
|
||||
{
|
||||
u32 cnt = plsfm->managed_flcn_cnt;
|
||||
struct lsfm_managed_ucode_img *mg_ucode_img;
|
||||
while (cnt != 0U) {
|
||||
mg_ucode_img = plsfm->ucode_img_list;
|
||||
if (mg_ucode_img->ucode_img.lsf_desc->falcon_id ==
|
||||
FALCON_ID_PMU) {
|
||||
lsfm_free_ucode_img_res(g, &mg_ucode_img->ucode_img);
|
||||
} else {
|
||||
lsfm_free_nonpmu_ucode_img_res(g,
|
||||
&mg_ucode_img->ucode_img);
|
||||
}
|
||||
plsfm->ucode_img_list = mg_ucode_img->next;
|
||||
nvgpu_kfree(g, mg_ucode_img);
|
||||
cnt--;
|
||||
}
|
||||
}
|
||||
|
||||
int nvgpu_acr_prepare_ucode_blob_v0(struct gk20a *g)
|
||||
{
|
||||
int err = 0;
|
||||
struct ls_flcn_mgr lsfm_l, *plsfm;
|
||||
struct wpr_carveout_info wpr_inf;
|
||||
|
||||
if (g->acr.ucode_blob.cpu_va != NULL) {
|
||||
/* Recovery case, we do not need to form non WPR blob */
|
||||
return err;
|
||||
}
|
||||
plsfm = &lsfm_l;
|
||||
(void) memset((void *)plsfm, 0, sizeof(struct ls_flcn_mgr));
|
||||
nvgpu_acr_dbg(g, "fetching GMMU regs\n");
|
||||
g->ops.fb.vpr_info_fetch(g);
|
||||
gr_gk20a_init_ctxsw_ucode(g);
|
||||
|
||||
g->acr.get_wpr_info(g, &wpr_inf);
|
||||
nvgpu_acr_dbg(g, "wpr carveout base:%llx\n", wpr_inf.wpr_base);
|
||||
nvgpu_acr_dbg(g, "wpr carveout size :%llx\n", wpr_inf.size);
|
||||
|
||||
/* Discover all managed falcons*/
|
||||
err = lsfm_discover_ucode_images(g, plsfm);
|
||||
nvgpu_acr_dbg(g, " Managed Falcon cnt %d\n", plsfm->managed_flcn_cnt);
|
||||
if (err != 0) {
|
||||
goto free_sgt;
|
||||
}
|
||||
|
||||
if ((plsfm->managed_flcn_cnt != 0U) &&
|
||||
(g->acr.ucode_blob.cpu_va == NULL)) {
|
||||
/* Generate WPR requirements */
|
||||
err = lsf_gen_wpr_requirements(g, plsfm);
|
||||
if (err != 0) {
|
||||
goto free_sgt;
|
||||
}
|
||||
|
||||
/* Alloc memory to hold ucode blob contents */
|
||||
err = g->acr.alloc_blob_space(g, plsfm->wpr_size
|
||||
, &g->acr.ucode_blob);
|
||||
if (err != 0) {
|
||||
goto free_sgt;
|
||||
}
|
||||
|
||||
nvgpu_acr_dbg(g, "managed LS falcon %d, WPR size %d bytes.\n",
|
||||
plsfm->managed_flcn_cnt, plsfm->wpr_size);
|
||||
lsfm_init_wpr_contents(g, plsfm, &g->acr.ucode_blob);
|
||||
} else {
|
||||
nvgpu_acr_dbg(g, "LSFM is managing no falcons.\n");
|
||||
}
|
||||
nvgpu_acr_dbg(g, "prepare ucode blob return 0\n");
|
||||
free_acr_resources(g, plsfm);
|
||||
free_sgt:
|
||||
return err;
|
||||
}
|
||||
988
drivers/gpu/nvgpu/common/acr/acr_blob_construct_v1.c
Normal file
988
drivers/gpu/nvgpu/common/acr/acr_blob_construct_v1.c
Normal file
@@ -0,0 +1,988 @@
|
||||
/*
|
||||
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <nvgpu/acr/nvgpu_acr.h>
|
||||
#include <nvgpu/firmware.h>
|
||||
#include <nvgpu/pmu.h>
|
||||
#include <nvgpu/enabled.h>
|
||||
#include <nvgpu/gk20a.h>
|
||||
#include <nvgpu/string.h>
|
||||
#include <nvgpu/bug.h>
|
||||
|
||||
#include "acr_gm20b.h"
|
||||
#include "acr_gv100.h"
|
||||
#include "acr_tu104.h"
|
||||
|
||||
static void flcn64_set_dma(struct falc_u64 *dma_addr, u64 value)
|
||||
{
|
||||
dma_addr->lo |= u64_lo32(value);
|
||||
dma_addr->hi |= u64_hi32(value);
|
||||
}
|
||||
|
||||
int nvgpu_acr_lsf_pmu_ucode_details_v1(struct gk20a *g, void *lsf_ucode_img)
|
||||
{
|
||||
struct nvgpu_pmu *pmu = &g->pmu;
|
||||
struct lsf_ucode_desc_v1 *lsf_desc;
|
||||
struct flcn_ucode_img_v1 *p_img =
|
||||
(struct flcn_ucode_img_v1 *)lsf_ucode_img;
|
||||
int err = 0;
|
||||
|
||||
lsf_desc = nvgpu_kzalloc(g, sizeof(struct lsf_ucode_desc_v1));
|
||||
if (lsf_desc == NULL) {
|
||||
err = -ENOMEM;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
nvgpu_memcpy((u8 *)lsf_desc, (u8 *)pmu->fw_sig->data,
|
||||
min_t(size_t, sizeof(*lsf_desc), pmu->fw_sig->size));
|
||||
|
||||
lsf_desc->falcon_id = FALCON_ID_PMU;
|
||||
|
||||
p_img->desc = (struct pmu_ucode_desc_v1 *)(void *)pmu->fw_desc->data;
|
||||
p_img->data = (u32 *)(void *)pmu->fw_image->data;
|
||||
p_img->data_size = p_img->desc->app_start_offset + p_img->desc->app_size;
|
||||
p_img->fw_ver = NULL;
|
||||
p_img->header = NULL;
|
||||
p_img->lsf_desc = (struct lsf_ucode_desc_v1 *)lsf_desc;
|
||||
|
||||
exit:
|
||||
return err;
|
||||
}
|
||||
|
||||
int nvgpu_acr_lsf_fecs_ucode_details_v1(struct gk20a *g, void *lsf_ucode_img)
|
||||
{
|
||||
u32 ver = g->params.gpu_arch + g->params.gpu_impl;
|
||||
struct lsf_ucode_desc_v1 *lsf_desc;
|
||||
struct nvgpu_firmware *fecs_sig = NULL;
|
||||
struct flcn_ucode_img_v1 *p_img =
|
||||
(struct flcn_ucode_img_v1 *)lsf_ucode_img;
|
||||
int err;
|
||||
|
||||
switch (ver) {
|
||||
case NVGPU_GPUID_GV11B:
|
||||
fecs_sig = nvgpu_request_firmware(g, GM20B_FECS_UCODE_SIG, 0);
|
||||
break;
|
||||
case NVGPU_GPUID_GV100:
|
||||
fecs_sig = nvgpu_request_firmware(g, GV100_FECS_UCODE_SIG,
|
||||
NVGPU_REQUEST_FIRMWARE_NO_SOC);
|
||||
break;
|
||||
case NVGPU_GPUID_TU104:
|
||||
fecs_sig = nvgpu_request_firmware(g, TU104_FECS_UCODE_SIG,
|
||||
NVGPU_REQUEST_FIRMWARE_NO_SOC);
|
||||
break;
|
||||
default:
|
||||
nvgpu_err(g, "no support for GPUID %x", ver);
|
||||
}
|
||||
|
||||
if (fecs_sig == NULL) {
|
||||
nvgpu_err(g, "failed to load fecs sig");
|
||||
return -ENOENT;
|
||||
}
|
||||
lsf_desc = nvgpu_kzalloc(g, sizeof(struct lsf_ucode_desc_v1));
|
||||
if (lsf_desc == NULL) {
|
||||
err = -ENOMEM;
|
||||
goto rel_sig;
|
||||
}
|
||||
nvgpu_memcpy((u8 *)lsf_desc, (u8 *)fecs_sig->data,
|
||||
min_t(size_t, sizeof(*lsf_desc), fecs_sig->size));
|
||||
|
||||
lsf_desc->falcon_id = FALCON_ID_FECS;
|
||||
|
||||
p_img->desc = nvgpu_kzalloc(g, sizeof(struct pmu_ucode_desc_v1));
|
||||
if (p_img->desc == NULL) {
|
||||
err = -ENOMEM;
|
||||
goto free_lsf_desc;
|
||||
}
|
||||
|
||||
p_img->desc->bootloader_start_offset =
|
||||
g->ctxsw_ucode_info.fecs.boot.offset;
|
||||
p_img->desc->bootloader_size =
|
||||
ALIGN(g->ctxsw_ucode_info.fecs.boot.size, 256);
|
||||
p_img->desc->bootloader_imem_offset =
|
||||
g->ctxsw_ucode_info.fecs.boot_imem_offset;
|
||||
p_img->desc->bootloader_entry_point =
|
||||
g->ctxsw_ucode_info.fecs.boot_entry;
|
||||
|
||||
p_img->desc->image_size =
|
||||
ALIGN(g->ctxsw_ucode_info.fecs.boot.size, 256) +
|
||||
ALIGN(g->ctxsw_ucode_info.fecs.code.size, 256) +
|
||||
ALIGN(g->ctxsw_ucode_info.fecs.data.size, 256);
|
||||
p_img->desc->app_size = ALIGN(g->ctxsw_ucode_info.fecs.code.size, 256) +
|
||||
ALIGN(g->ctxsw_ucode_info.fecs.data.size, 256);
|
||||
p_img->desc->app_start_offset = g->ctxsw_ucode_info.fecs.code.offset;
|
||||
p_img->desc->app_imem_offset = 0;
|
||||
p_img->desc->app_imem_entry = 0;
|
||||
p_img->desc->app_dmem_offset = 0;
|
||||
p_img->desc->app_resident_code_offset = 0;
|
||||
p_img->desc->app_resident_code_size =
|
||||
g->ctxsw_ucode_info.fecs.code.size;
|
||||
p_img->desc->app_resident_data_offset =
|
||||
g->ctxsw_ucode_info.fecs.data.offset -
|
||||
g->ctxsw_ucode_info.fecs.code.offset;
|
||||
p_img->desc->app_resident_data_size =
|
||||
g->ctxsw_ucode_info.fecs.data.size;
|
||||
p_img->data = g->ctxsw_ucode_info.surface_desc.cpu_va;
|
||||
p_img->data_size = p_img->desc->image_size;
|
||||
|
||||
p_img->fw_ver = NULL;
|
||||
p_img->header = NULL;
|
||||
p_img->lsf_desc = (struct lsf_ucode_desc_v1 *)lsf_desc;
|
||||
|
||||
nvgpu_acr_dbg(g, "fecs fw loaded\n");
|
||||
|
||||
nvgpu_release_firmware(g, fecs_sig);
|
||||
|
||||
return 0;
|
||||
free_lsf_desc:
|
||||
nvgpu_kfree(g, lsf_desc);
|
||||
rel_sig:
|
||||
nvgpu_release_firmware(g, fecs_sig);
|
||||
return err;
|
||||
}
|
||||
|
||||
int nvgpu_acr_lsf_gpccs_ucode_details_v1(struct gk20a *g, void *lsf_ucode_img)
|
||||
{
|
||||
u32 ver = g->params.gpu_arch + g->params.gpu_impl;
|
||||
struct lsf_ucode_desc_v1 *lsf_desc;
|
||||
struct nvgpu_firmware *gpccs_sig = NULL;
|
||||
struct flcn_ucode_img_v1 *p_img =
|
||||
(struct flcn_ucode_img_v1 *)lsf_ucode_img;
|
||||
int err;
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_SEC_SECUREGPCCS)) {
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
switch (ver) {
|
||||
case NVGPU_GPUID_GV11B:
|
||||
gpccs_sig = nvgpu_request_firmware(g, T18x_GPCCS_UCODE_SIG, 0);
|
||||
break;
|
||||
case NVGPU_GPUID_GV100:
|
||||
gpccs_sig = nvgpu_request_firmware(g, GV100_GPCCS_UCODE_SIG,
|
||||
NVGPU_REQUEST_FIRMWARE_NO_SOC);
|
||||
break;
|
||||
case NVGPU_GPUID_TU104:
|
||||
gpccs_sig = nvgpu_request_firmware(g, TU104_GPCCS_UCODE_SIG,
|
||||
NVGPU_REQUEST_FIRMWARE_NO_SOC);
|
||||
break;
|
||||
default:
|
||||
nvgpu_err(g, "no support for GPUID %x", ver);
|
||||
}
|
||||
|
||||
if (gpccs_sig == NULL) {
|
||||
nvgpu_err(g, "failed to load gpccs sig");
|
||||
return -ENOENT;
|
||||
}
|
||||
lsf_desc = nvgpu_kzalloc(g, sizeof(struct lsf_ucode_desc_v1));
|
||||
if (lsf_desc == NULL) {
|
||||
err = -ENOMEM;
|
||||
goto rel_sig;
|
||||
}
|
||||
nvgpu_memcpy((u8 *)lsf_desc, gpccs_sig->data,
|
||||
min_t(size_t, sizeof(*lsf_desc), gpccs_sig->size));
|
||||
lsf_desc->falcon_id = FALCON_ID_GPCCS;
|
||||
|
||||
p_img->desc = nvgpu_kzalloc(g, sizeof(struct pmu_ucode_desc_v1));
|
||||
if (p_img->desc == NULL) {
|
||||
err = -ENOMEM;
|
||||
goto free_lsf_desc;
|
||||
}
|
||||
|
||||
p_img->desc->bootloader_start_offset = 0;
|
||||
p_img->desc->bootloader_size =
|
||||
ALIGN(g->ctxsw_ucode_info.gpccs.boot.size, 256);
|
||||
p_img->desc->bootloader_imem_offset =
|
||||
g->ctxsw_ucode_info.gpccs.boot_imem_offset;
|
||||
p_img->desc->bootloader_entry_point =
|
||||
g->ctxsw_ucode_info.gpccs.boot_entry;
|
||||
|
||||
p_img->desc->image_size =
|
||||
ALIGN(g->ctxsw_ucode_info.gpccs.boot.size, 256) +
|
||||
ALIGN(g->ctxsw_ucode_info.gpccs.code.size, 256) +
|
||||
ALIGN(g->ctxsw_ucode_info.gpccs.data.size, 256);
|
||||
p_img->desc->app_size = ALIGN(g->ctxsw_ucode_info.gpccs.code.size, 256)
|
||||
+ ALIGN(g->ctxsw_ucode_info.gpccs.data.size, 256);
|
||||
p_img->desc->app_start_offset = p_img->desc->bootloader_size;
|
||||
p_img->desc->app_imem_offset = 0;
|
||||
p_img->desc->app_imem_entry = 0;
|
||||
p_img->desc->app_dmem_offset = 0;
|
||||
p_img->desc->app_resident_code_offset = 0;
|
||||
p_img->desc->app_resident_code_size =
|
||||
ALIGN(g->ctxsw_ucode_info.gpccs.code.size, 256);
|
||||
p_img->desc->app_resident_data_offset =
|
||||
ALIGN(g->ctxsw_ucode_info.gpccs.data.offset, 256) -
|
||||
ALIGN(g->ctxsw_ucode_info.gpccs.code.offset, 256);
|
||||
p_img->desc->app_resident_data_size =
|
||||
ALIGN(g->ctxsw_ucode_info.gpccs.data.size, 256);
|
||||
p_img->data = (u32 *)((u8 *)g->ctxsw_ucode_info.surface_desc.cpu_va +
|
||||
g->ctxsw_ucode_info.gpccs.boot.offset);
|
||||
p_img->data_size = ALIGN(p_img->desc->image_size, 256);
|
||||
p_img->fw_ver = NULL;
|
||||
p_img->header = NULL;
|
||||
p_img->lsf_desc = (struct lsf_ucode_desc_v1 *)lsf_desc;
|
||||
|
||||
nvgpu_acr_dbg(g, "gpccs fw loaded\n");
|
||||
|
||||
nvgpu_release_firmware(g, gpccs_sig);
|
||||
|
||||
return 0;
|
||||
free_lsf_desc:
|
||||
nvgpu_kfree(g, lsf_desc);
|
||||
rel_sig:
|
||||
nvgpu_release_firmware(g, gpccs_sig);
|
||||
return err;
|
||||
}
|
||||
|
||||
int nvgpu_acr_lsf_sec2_ucode_details_v1(struct gk20a *g, void *lsf_ucode_img)
|
||||
{
|
||||
struct nvgpu_firmware *sec2_fw, *sec2_desc, *sec2_sig;
|
||||
struct pmu_ucode_desc_v1 *desc;
|
||||
struct lsf_ucode_desc_v1 *lsf_desc;
|
||||
struct flcn_ucode_img_v1 *p_img =
|
||||
(struct flcn_ucode_img_v1 *)lsf_ucode_img;
|
||||
u32 *ucode_image;
|
||||
int err = 0;
|
||||
|
||||
nvgpu_acr_dbg(g, "requesting SEC2 ucode in %s", g->name);
|
||||
sec2_fw = nvgpu_request_firmware(g, LSF_SEC2_UCODE_IMAGE_BIN,
|
||||
NVGPU_REQUEST_FIRMWARE_NO_SOC);
|
||||
if (sec2_fw == NULL) {
|
||||
nvgpu_err(g, "failed to load sec2 ucode!!");
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
ucode_image = (u32 *)sec2_fw->data;
|
||||
|
||||
nvgpu_acr_dbg(g, "requesting SEC2 ucode desc in %s", g->name);
|
||||
sec2_desc = nvgpu_request_firmware(g, LSF_SEC2_UCODE_DESC_BIN,
|
||||
NVGPU_REQUEST_FIRMWARE_NO_SOC);
|
||||
if (sec2_desc == NULL) {
|
||||
nvgpu_err(g, "failed to load SEC2 ucode desc!!");
|
||||
err = -ENOENT;
|
||||
goto release_img_fw;
|
||||
}
|
||||
|
||||
desc = (struct pmu_ucode_desc_v1 *)sec2_desc->data;
|
||||
|
||||
sec2_sig = nvgpu_request_firmware(g, LSF_SEC2_UCODE_SIG_BIN,
|
||||
NVGPU_REQUEST_FIRMWARE_NO_SOC);
|
||||
if (sec2_sig == NULL) {
|
||||
nvgpu_err(g, "failed to load SEC2 sig!!");
|
||||
err = -ENOENT;
|
||||
goto release_desc;
|
||||
}
|
||||
|
||||
lsf_desc = nvgpu_kzalloc(g, sizeof(struct lsf_ucode_desc_v1));
|
||||
if (lsf_desc == NULL) {
|
||||
err = -ENOMEM;
|
||||
goto release_sig;
|
||||
}
|
||||
|
||||
nvgpu_memcpy((u8 *)lsf_desc, (u8 *)sec2_sig->data,
|
||||
min_t(size_t, sizeof(*lsf_desc), sec2_sig->size));
|
||||
|
||||
lsf_desc->falcon_id = FALCON_ID_SEC2;
|
||||
|
||||
p_img->desc = desc;
|
||||
p_img->data = ucode_image;
|
||||
p_img->data_size = desc->app_start_offset + desc->app_size;
|
||||
p_img->fw_ver = NULL;
|
||||
p_img->header = NULL;
|
||||
p_img->lsf_desc = (struct lsf_ucode_desc_v1 *)lsf_desc;
|
||||
|
||||
nvgpu_acr_dbg(g, "requesting SEC2 ucode in %s done", g->name);
|
||||
|
||||
return err;
|
||||
release_sig:
|
||||
nvgpu_release_firmware(g, sec2_sig);
|
||||
release_desc:
|
||||
nvgpu_release_firmware(g, sec2_desc);
|
||||
release_img_fw:
|
||||
nvgpu_release_firmware(g, sec2_fw);
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
* lsfm_parse_no_loader_ucode: parses UCODE header of falcon & updates
|
||||
* values in LSB header
|
||||
*/
|
||||
static void lsfm_parse_no_loader_ucode(u32 *p_ucodehdr,
|
||||
struct lsf_lsb_header_v1 *lsb_hdr)
|
||||
{
|
||||
u32 code_size = 0;
|
||||
u32 data_size = 0;
|
||||
u32 i = 0;
|
||||
u32 total_apps = p_ucodehdr[FLCN_NL_UCODE_HDR_NUM_APPS_IND];
|
||||
|
||||
/* Lets calculate code size*/
|
||||
code_size += p_ucodehdr[FLCN_NL_UCODE_HDR_OS_CODE_SIZE_IND];
|
||||
for (i = 0; i < total_apps; i++) {
|
||||
code_size += p_ucodehdr[FLCN_NL_UCODE_HDR_APP_CODE_SIZE_IND
|
||||
(total_apps, i)];
|
||||
}
|
||||
code_size += p_ucodehdr[FLCN_NL_UCODE_HDR_OS_OVL_SIZE_IND(total_apps)];
|
||||
|
||||
/* Calculate data size*/
|
||||
data_size += p_ucodehdr[FLCN_NL_UCODE_HDR_OS_DATA_SIZE_IND];
|
||||
for (i = 0; i < total_apps; i++) {
|
||||
data_size += p_ucodehdr[FLCN_NL_UCODE_HDR_APP_DATA_SIZE_IND
|
||||
(total_apps, i)];
|
||||
}
|
||||
|
||||
lsb_hdr->ucode_size = code_size;
|
||||
lsb_hdr->data_size = data_size;
|
||||
lsb_hdr->bl_code_size = p_ucodehdr[FLCN_NL_UCODE_HDR_OS_CODE_SIZE_IND];
|
||||
lsb_hdr->bl_imem_off = 0;
|
||||
lsb_hdr->bl_data_off = p_ucodehdr[FLCN_NL_UCODE_HDR_OS_DATA_OFF_IND];
|
||||
lsb_hdr->bl_data_size = p_ucodehdr[FLCN_NL_UCODE_HDR_OS_DATA_SIZE_IND];
|
||||
}
|
||||
|
||||
/* Populate static LSB header information using the provided ucode image */
|
||||
static void lsfm_fill_static_lsb_hdr_info(struct gk20a *g,
|
||||
u32 falcon_id, struct lsfm_managed_ucode_img_v2 *pnode)
|
||||
{
|
||||
u32 full_app_size = 0;
|
||||
u32 data = 0;
|
||||
|
||||
if (pnode->ucode_img.lsf_desc != NULL) {
|
||||
nvgpu_memcpy((u8 *)&pnode->lsb_header.signature,
|
||||
(u8 *)pnode->ucode_img.lsf_desc,
|
||||
sizeof(struct lsf_ucode_desc_v1));
|
||||
}
|
||||
pnode->lsb_header.ucode_size = pnode->ucode_img.data_size;
|
||||
|
||||
/* The remainder of the LSB depends on the loader usage */
|
||||
if (pnode->ucode_img.header != NULL) {
|
||||
/* Does not use a loader */
|
||||
pnode->lsb_header.data_size = 0;
|
||||
pnode->lsb_header.bl_code_size = 0;
|
||||
pnode->lsb_header.bl_data_off = 0;
|
||||
pnode->lsb_header.bl_data_size = 0;
|
||||
|
||||
lsfm_parse_no_loader_ucode(pnode->ucode_img.header,
|
||||
&(pnode->lsb_header));
|
||||
|
||||
/*
|
||||
* Set LOAD_CODE_AT_0 and DMACTL_REQ_CTX.
|
||||
* True for all method based falcons
|
||||
*/
|
||||
data = NV_FLCN_ACR_LSF_FLAG_LOAD_CODE_AT_0_TRUE |
|
||||
NV_FLCN_ACR_LSF_FLAG_DMACTL_REQ_CTX_TRUE;
|
||||
pnode->lsb_header.flags = data;
|
||||
} else {
|
||||
/* Uses a loader. that is has a desc */
|
||||
pnode->lsb_header.data_size = 0;
|
||||
|
||||
/*
|
||||
* The loader code size is already aligned (padded) such that
|
||||
* the code following it is aligned, but the size in the image
|
||||
* desc is not, bloat it up to be on a 256 byte alignment.
|
||||
*/
|
||||
pnode->lsb_header.bl_code_size = ALIGN(
|
||||
pnode->ucode_img.desc->bootloader_size,
|
||||
LSF_BL_CODE_SIZE_ALIGNMENT);
|
||||
full_app_size = ALIGN(pnode->ucode_img.desc->app_size,
|
||||
LSF_BL_CODE_SIZE_ALIGNMENT) +
|
||||
pnode->lsb_header.bl_code_size;
|
||||
pnode->lsb_header.ucode_size = ALIGN(
|
||||
pnode->ucode_img.desc->app_resident_data_offset,
|
||||
LSF_BL_CODE_SIZE_ALIGNMENT) +
|
||||
pnode->lsb_header.bl_code_size;
|
||||
pnode->lsb_header.data_size = full_app_size -
|
||||
pnode->lsb_header.ucode_size;
|
||||
/*
|
||||
* Though the BL is located at 0th offset of the image, the VA
|
||||
* is different to make sure that it doesn't collide the actual OS
|
||||
* VA range
|
||||
*/
|
||||
pnode->lsb_header.bl_imem_off =
|
||||
pnode->ucode_img.desc->bootloader_imem_offset;
|
||||
|
||||
pnode->lsb_header.flags = 0;
|
||||
|
||||
if (falcon_id == FALCON_ID_PMU) {
|
||||
data = NV_FLCN_ACR_LSF_FLAG_DMACTL_REQ_CTX_TRUE;
|
||||
pnode->lsb_header.flags = data;
|
||||
}
|
||||
|
||||
if (g->acr.lsf[falcon_id].is_priv_load) {
|
||||
pnode->lsb_header.flags |=
|
||||
NV_FLCN_ACR_LSF_FLAG_FORCE_PRIV_LOAD_TRUE;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Adds a ucode image to the list of managed ucode images managed. */
|
||||
static int lsfm_add_ucode_img(struct gk20a *g, struct ls_flcn_mgr_v1 *plsfm,
|
||||
struct flcn_ucode_img_v1 *ucode_image, u32 falcon_id)
|
||||
{
|
||||
struct lsfm_managed_ucode_img_v2 *pnode;
|
||||
|
||||
pnode = nvgpu_kzalloc(g, sizeof(struct lsfm_managed_ucode_img_v2));
|
||||
if (pnode == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Keep a copy of the ucode image info locally */
|
||||
nvgpu_memcpy((u8 *)&pnode->ucode_img, (u8 *)ucode_image,
|
||||
sizeof(struct flcn_ucode_img_v1));
|
||||
|
||||
/* Fill in static WPR header info*/
|
||||
pnode->wpr_header.falcon_id = falcon_id;
|
||||
pnode->wpr_header.bootstrap_owner = g->acr.bootstrap_owner;
|
||||
pnode->wpr_header.status = LSF_IMAGE_STATUS_COPY;
|
||||
|
||||
pnode->wpr_header.lazy_bootstrap =
|
||||
(u32)g->acr.lsf[falcon_id].is_lazy_bootstrap;
|
||||
|
||||
/* Fill in static LSB header info elsewhere */
|
||||
lsfm_fill_static_lsb_hdr_info(g, falcon_id, pnode);
|
||||
pnode->wpr_header.bin_version = pnode->lsb_header.signature.version;
|
||||
pnode->next = plsfm->ucode_img_list;
|
||||
plsfm->ucode_img_list = pnode;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Discover all managed falcon ucode images */
|
||||
static int lsfm_discover_ucode_images(struct gk20a *g,
|
||||
struct ls_flcn_mgr_v1 *plsfm)
|
||||
{
|
||||
struct flcn_ucode_img_v1 ucode_img;
|
||||
struct nvgpu_acr *acr = &g->acr;
|
||||
u32 falcon_id;
|
||||
u32 i;
|
||||
int err = 0;
|
||||
|
||||
/*
|
||||
* Enumerate all constructed falcon objects, as we need the ucode
|
||||
* image info and total falcon count
|
||||
*/
|
||||
for (i = 0U; i < FALCON_ID_END; i++) {
|
||||
if (test_bit((int)i, (void *)&acr->lsf_enable_mask) &&
|
||||
acr->lsf[i].get_lsf_ucode_details != NULL) {
|
||||
|
||||
(void) memset(&ucode_img, 0, sizeof(ucode_img));
|
||||
|
||||
if (acr->lsf[i].get_lsf_ucode_details(g,
|
||||
(void *)&ucode_img) != 0) {
|
||||
nvgpu_err(g, "LS falcon-%d ucode get failed", i);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if (ucode_img.lsf_desc != NULL) {
|
||||
/*
|
||||
* falon_id is formed by grabbing the static
|
||||
* base falonId from the image and adding the
|
||||
* engine-designated falcon instance.
|
||||
*/
|
||||
falcon_id = ucode_img.lsf_desc->falcon_id +
|
||||
ucode_img.flcn_inst;
|
||||
|
||||
err = lsfm_add_ucode_img(g, plsfm, &ucode_img,
|
||||
falcon_id);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, " Failed to add falcon-%d to LSFM ",
|
||||
falcon_id);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
plsfm->managed_flcn_cnt++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
exit:
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Discover all supported shared data falcon SUB WPRs */
|
||||
static int lsfm_discover_and_add_sub_wprs(struct gk20a *g,
|
||||
struct ls_flcn_mgr_v1 *plsfm)
|
||||
{
|
||||
struct lsfm_sub_wpr *pnode;
|
||||
u32 size_4K = 0;
|
||||
u32 sub_wpr_index;
|
||||
|
||||
for (sub_wpr_index = 1;
|
||||
sub_wpr_index <= LSF_SHARED_DATA_SUB_WPR_USE_CASE_ID_MAX;
|
||||
sub_wpr_index++) {
|
||||
|
||||
switch (sub_wpr_index) {
|
||||
case LSF_SHARED_DATA_SUB_WPR_USE_CASE_ID_FRTS_VBIOS_TABLES:
|
||||
size_4K = LSF_SHARED_DATA_SUB_WPR_FRTS_VBIOS_TABLES_SIZE_IN_4K;
|
||||
break;
|
||||
case LSF_SHARED_DATA_SUB_WPR_USE_CASE_ID_PLAYREADY_SHARED_DATA:
|
||||
size_4K = LSF_SHARED_DATA_SUB_WPR_PLAYREADY_SHARED_DATA_SIZE_IN_4K;
|
||||
break;
|
||||
default:
|
||||
size_4K = 0; /* subWpr not supported */
|
||||
break;
|
||||
}
|
||||
|
||||
if (size_4K != 0U) {
|
||||
pnode = nvgpu_kzalloc(g, sizeof(struct lsfm_sub_wpr));
|
||||
if (pnode == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
pnode->sub_wpr_header.use_case_id = sub_wpr_index;
|
||||
pnode->sub_wpr_header.size_4K = size_4K;
|
||||
|
||||
pnode->pnext = plsfm->psub_wpr_list;
|
||||
plsfm->psub_wpr_list = pnode;
|
||||
|
||||
plsfm->managed_sub_wpr_count++;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Generate WPR requirements for ACR allocation request */
|
||||
static int lsf_gen_wpr_requirements(struct gk20a *g,
|
||||
struct ls_flcn_mgr_v1 *plsfm)
|
||||
{
|
||||
struct lsfm_managed_ucode_img_v2 *pnode = plsfm->ucode_img_list;
|
||||
struct lsfm_sub_wpr *pnode_sub_wpr = plsfm->psub_wpr_list;
|
||||
u32 wpr_offset;
|
||||
|
||||
/*
|
||||
* Start with an array of WPR headers at the base of the WPR.
|
||||
* The expectation here is that the secure falcon will do a single DMA
|
||||
* read of this array and cache it internally so it's OK to pack these.
|
||||
* Also, we add 1 to the falcon count to indicate the end of the array.
|
||||
*/
|
||||
wpr_offset = U32(sizeof(struct lsf_wpr_header_v1)) *
|
||||
(U32(plsfm->managed_flcn_cnt) + U32(1));
|
||||
|
||||
if (nvgpu_is_enabled(g, NVGPU_SUPPORT_MULTIPLE_WPR)) {
|
||||
wpr_offset = ALIGN_UP(wpr_offset, LSF_WPR_HEADERS_TOTAL_SIZE_MAX);
|
||||
/*
|
||||
* SUB WPR header is appended after LSF_WPR_HEADER in WPR blob.
|
||||
* The size is allocated as per the managed SUB WPR count.
|
||||
*/
|
||||
wpr_offset = ALIGN_UP(wpr_offset, LSF_SUB_WPR_HEADER_ALIGNMENT);
|
||||
wpr_offset = wpr_offset +
|
||||
(U32(sizeof(struct lsf_shared_sub_wpr_header)) *
|
||||
(U32(plsfm->managed_sub_wpr_count) + U32(1)));
|
||||
}
|
||||
|
||||
/*
|
||||
* Walk the managed falcons, accounting for the LSB structs
|
||||
* as well as the ucode images.
|
||||
*/
|
||||
while (pnode != NULL) {
|
||||
/* Align, save off, and include an LSB header size */
|
||||
wpr_offset = ALIGN(wpr_offset, LSF_LSB_HEADER_ALIGNMENT);
|
||||
pnode->wpr_header.lsb_offset = wpr_offset;
|
||||
wpr_offset += (u32)sizeof(struct lsf_lsb_header_v1);
|
||||
|
||||
/*
|
||||
* Align, save off, and include the original (static)ucode
|
||||
* image size
|
||||
*/
|
||||
wpr_offset = ALIGN(wpr_offset, LSF_UCODE_DATA_ALIGNMENT);
|
||||
pnode->lsb_header.ucode_off = wpr_offset;
|
||||
wpr_offset += pnode->ucode_img.data_size;
|
||||
|
||||
/*
|
||||
* For falcons that use a boot loader (BL), we append a loader
|
||||
* desc structure on the end of the ucode image and consider this
|
||||
* the boot loader data. The host will then copy the loader desc
|
||||
* args to this space within the WPR region (before locking down)
|
||||
* and the HS bin will then copy them to DMEM 0 for the loader.
|
||||
*/
|
||||
if (pnode->ucode_img.header == NULL) {
|
||||
/*
|
||||
* Track the size for LSB details filled in later
|
||||
* Note that at this point we don't know what kind of
|
||||
* boot loader desc, so we just take the size of the
|
||||
* generic one, which is the largest it will will ever be.
|
||||
*/
|
||||
/* Align (size bloat) and save off generic descriptor size*/
|
||||
pnode->lsb_header.bl_data_size = ALIGN(
|
||||
(u32)sizeof(pnode->bl_gen_desc),
|
||||
LSF_BL_DATA_SIZE_ALIGNMENT);
|
||||
|
||||
/*Align, save off, and include the additional BL data*/
|
||||
wpr_offset = ALIGN(wpr_offset, LSF_BL_DATA_ALIGNMENT);
|
||||
pnode->lsb_header.bl_data_off = wpr_offset;
|
||||
wpr_offset += pnode->lsb_header.bl_data_size;
|
||||
} else {
|
||||
/*
|
||||
* bl_data_off is already assigned in static
|
||||
* information. But that is from start of the image
|
||||
*/
|
||||
pnode->lsb_header.bl_data_off +=
|
||||
(wpr_offset - pnode->ucode_img.data_size);
|
||||
}
|
||||
|
||||
/* Finally, update ucode surface size to include updates */
|
||||
pnode->full_ucode_size = wpr_offset -
|
||||
pnode->lsb_header.ucode_off;
|
||||
if (pnode->wpr_header.falcon_id != FALCON_ID_PMU) {
|
||||
pnode->lsb_header.app_code_off =
|
||||
pnode->lsb_header.bl_code_size;
|
||||
pnode->lsb_header.app_code_size =
|
||||
pnode->lsb_header.ucode_size -
|
||||
pnode->lsb_header.bl_code_size;
|
||||
pnode->lsb_header.app_data_off =
|
||||
pnode->lsb_header.ucode_size;
|
||||
pnode->lsb_header.app_data_size =
|
||||
pnode->lsb_header.data_size;
|
||||
}
|
||||
pnode = pnode->next;
|
||||
}
|
||||
|
||||
if (nvgpu_is_enabled(g, NVGPU_SUPPORT_MULTIPLE_WPR)) {
|
||||
/*
|
||||
* Walk through the sub wpr headers to accommodate
|
||||
* sub wprs in WPR request
|
||||
*/
|
||||
while (pnode_sub_wpr != NULL) {
|
||||
wpr_offset = ALIGN_UP(wpr_offset,
|
||||
SUB_WPR_SIZE_ALIGNMENT);
|
||||
pnode_sub_wpr->sub_wpr_header.start_addr = wpr_offset;
|
||||
wpr_offset = wpr_offset +
|
||||
(pnode_sub_wpr->sub_wpr_header.size_4K
|
||||
<< SHIFT_4KB);
|
||||
pnode_sub_wpr = pnode_sub_wpr->pnext;
|
||||
}
|
||||
wpr_offset = ALIGN_UP(wpr_offset, SUB_WPR_SIZE_ALIGNMENT);
|
||||
}
|
||||
|
||||
plsfm->wpr_size = wpr_offset;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Initialize WPR contents */
|
||||
static int lsfm_populate_flcn_bl_dmem_desc(struct gk20a *g,
|
||||
void *lsfm, u32 *p_bl_gen_desc_size, u32 falconid)
|
||||
{
|
||||
struct wpr_carveout_info wpr_inf;
|
||||
struct lsfm_managed_ucode_img_v2 *p_lsfm =
|
||||
(struct lsfm_managed_ucode_img_v2 *)lsfm;
|
||||
struct flcn_ucode_img_v1 *p_img = &(p_lsfm->ucode_img);
|
||||
struct flcn_bl_dmem_desc_v1 *ldr_cfg =
|
||||
&(p_lsfm->bl_gen_desc.bl_dmem_desc_v1);
|
||||
u64 addr_base;
|
||||
struct pmu_ucode_desc_v1 *desc;
|
||||
u64 addr_code, addr_data;
|
||||
|
||||
if (p_img->desc == NULL) {
|
||||
/*
|
||||
* This means its a header based ucode,
|
||||
* and so we do not fill BL gen desc structure
|
||||
*/
|
||||
return -EINVAL;
|
||||
}
|
||||
desc = p_img->desc;
|
||||
|
||||
/*
|
||||
* Calculate physical and virtual addresses for various portions of
|
||||
* the PMU ucode image
|
||||
* Calculate the 32-bit addresses for the application code, application
|
||||
* data, and bootloader code. These values are all based on IM_BASE.
|
||||
* The 32-bit addresses will be the upper 32-bits of the virtual or
|
||||
* physical addresses of each respective segment.
|
||||
*/
|
||||
addr_base = p_lsfm->lsb_header.ucode_off;
|
||||
g->acr.get_wpr_info(g, &wpr_inf);
|
||||
addr_base += wpr_inf.wpr_base;
|
||||
|
||||
nvgpu_acr_dbg(g, "falcon ID %x", p_lsfm->wpr_header.falcon_id);
|
||||
nvgpu_acr_dbg(g, "gen loader cfg addrbase %llx ", addr_base);
|
||||
addr_code = addr_base + desc->app_start_offset;
|
||||
addr_data = addr_base + desc->app_start_offset +
|
||||
desc->app_resident_data_offset;
|
||||
|
||||
nvgpu_acr_dbg(g, "gen cfg addrcode %llx data %llx load offset %x",
|
||||
addr_code, addr_data, desc->bootloader_start_offset);
|
||||
|
||||
/* Populate the LOADER_CONFIG state */
|
||||
(void) memset((void *) ldr_cfg, 0,
|
||||
sizeof(struct flcn_bl_dmem_desc_v1));
|
||||
|
||||
ldr_cfg->ctx_dma = g->acr.lsf[falconid].falcon_dma_idx;
|
||||
flcn64_set_dma(&ldr_cfg->code_dma_base, addr_code);
|
||||
ldr_cfg->non_sec_code_off = desc->app_resident_code_offset;
|
||||
ldr_cfg->non_sec_code_size = desc->app_resident_code_size;
|
||||
flcn64_set_dma(&ldr_cfg->data_dma_base, addr_data);
|
||||
ldr_cfg->data_size = desc->app_resident_data_size;
|
||||
ldr_cfg->code_entry_point = desc->app_imem_entry;
|
||||
|
||||
/* Update the argc/argv members*/
|
||||
ldr_cfg->argc = 1;
|
||||
if (g->acr.lsf[falconid].get_cmd_line_args_offset != NULL) {
|
||||
g->acr.lsf[falconid].get_cmd_line_args_offset(g,
|
||||
&ldr_cfg->argv);
|
||||
}
|
||||
|
||||
*p_bl_gen_desc_size = (u32)sizeof(struct flcn_bl_dmem_desc_v1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Populate falcon boot loader generic desc.*/
|
||||
static int lsfm_fill_flcn_bl_gen_desc(struct gk20a *g,
|
||||
struct lsfm_managed_ucode_img_v2 *pnode)
|
||||
{
|
||||
return lsfm_populate_flcn_bl_dmem_desc(g, pnode,
|
||||
&pnode->bl_gen_desc_size,
|
||||
pnode->wpr_header.falcon_id);
|
||||
}
|
||||
|
||||
static u32 lsfm_init_sub_wpr_contents(struct gk20a *g,
|
||||
struct ls_flcn_mgr_v1 *plsfm, struct nvgpu_mem *ucode)
|
||||
{
|
||||
struct lsfm_sub_wpr *psub_wpr_node;
|
||||
struct lsf_shared_sub_wpr_header last_sub_wpr_header;
|
||||
u32 temp_size = (u32)sizeof(struct lsf_shared_sub_wpr_header);
|
||||
u32 sub_wpr_header_offset = 0;
|
||||
u32 i = 0;
|
||||
|
||||
/* SubWpr headers are placed after WPR headers */
|
||||
sub_wpr_header_offset = LSF_WPR_HEADERS_TOTAL_SIZE_MAX;
|
||||
|
||||
/*
|
||||
* Walk through the managed shared subWPRs headers
|
||||
* and flush them to FB
|
||||
*/
|
||||
psub_wpr_node = plsfm->psub_wpr_list;
|
||||
i = 0;
|
||||
while (psub_wpr_node != NULL) {
|
||||
nvgpu_mem_wr_n(g, ucode,
|
||||
sub_wpr_header_offset + (i * temp_size),
|
||||
&psub_wpr_node->sub_wpr_header, temp_size);
|
||||
|
||||
psub_wpr_node = psub_wpr_node->pnext;
|
||||
i++;
|
||||
}
|
||||
last_sub_wpr_header.use_case_id =
|
||||
LSF_SHARED_DATA_SUB_WPR_USE_CASE_ID_INVALID;
|
||||
nvgpu_mem_wr_n(g, ucode, sub_wpr_header_offset +
|
||||
(plsfm->managed_sub_wpr_count * temp_size),
|
||||
&last_sub_wpr_header, temp_size);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void lsfm_init_wpr_contents(struct gk20a *g,
|
||||
struct ls_flcn_mgr_v1 *plsfm, struct nvgpu_mem *ucode)
|
||||
{
|
||||
struct lsfm_managed_ucode_img_v2 *pnode = plsfm->ucode_img_list;
|
||||
struct lsf_wpr_header_v1 last_wpr_hdr;
|
||||
u32 i;
|
||||
u64 tmp;
|
||||
|
||||
/* The WPR array is at the base of the WPR */
|
||||
pnode = plsfm->ucode_img_list;
|
||||
(void) memset(&last_wpr_hdr, 0, sizeof(struct lsf_wpr_header_v1));
|
||||
i = 0;
|
||||
|
||||
if (nvgpu_is_enabled(g, NVGPU_SUPPORT_MULTIPLE_WPR)) {
|
||||
lsfm_init_sub_wpr_contents(g, plsfm, ucode);
|
||||
}
|
||||
|
||||
/*
|
||||
* Walk the managed falcons, flush WPR and LSB headers to FB.
|
||||
* flush any bl args to the storage area relative to the
|
||||
* ucode image (appended on the end as a DMEM area).
|
||||
*/
|
||||
while (pnode != NULL) {
|
||||
/* Flush WPR header to memory*/
|
||||
nvgpu_mem_wr_n(g, ucode, i * (u32)sizeof(pnode->wpr_header),
|
||||
&pnode->wpr_header, (u32)sizeof(pnode->wpr_header));
|
||||
|
||||
nvgpu_acr_dbg(g, "wpr header");
|
||||
nvgpu_acr_dbg(g, "falconid :%d",
|
||||
pnode->wpr_header.falcon_id);
|
||||
nvgpu_acr_dbg(g, "lsb_offset :%x",
|
||||
pnode->wpr_header.lsb_offset);
|
||||
nvgpu_acr_dbg(g, "bootstrap_owner :%d",
|
||||
pnode->wpr_header.bootstrap_owner);
|
||||
nvgpu_acr_dbg(g, "lazy_bootstrap :%d",
|
||||
pnode->wpr_header.lazy_bootstrap);
|
||||
nvgpu_acr_dbg(g, "status :%d",
|
||||
pnode->wpr_header.status);
|
||||
|
||||
/*Flush LSB header to memory*/
|
||||
nvgpu_mem_wr_n(g, ucode, pnode->wpr_header.lsb_offset,
|
||||
&pnode->lsb_header,
|
||||
(u32)sizeof(pnode->lsb_header));
|
||||
|
||||
nvgpu_acr_dbg(g, "lsb header");
|
||||
nvgpu_acr_dbg(g, "ucode_off :%x",
|
||||
pnode->lsb_header.ucode_off);
|
||||
nvgpu_acr_dbg(g, "ucode_size :%x",
|
||||
pnode->lsb_header.ucode_size);
|
||||
nvgpu_acr_dbg(g, "data_size :%x",
|
||||
pnode->lsb_header.data_size);
|
||||
nvgpu_acr_dbg(g, "bl_code_size :%x",
|
||||
pnode->lsb_header.bl_code_size);
|
||||
nvgpu_acr_dbg(g, "bl_imem_off :%x",
|
||||
pnode->lsb_header.bl_imem_off);
|
||||
nvgpu_acr_dbg(g, "bl_data_off :%x",
|
||||
pnode->lsb_header.bl_data_off);
|
||||
nvgpu_acr_dbg(g, "bl_data_size :%x",
|
||||
pnode->lsb_header.bl_data_size);
|
||||
nvgpu_acr_dbg(g, "app_code_off :%x",
|
||||
pnode->lsb_header.app_code_off);
|
||||
nvgpu_acr_dbg(g, "app_code_size :%x",
|
||||
pnode->lsb_header.app_code_size);
|
||||
nvgpu_acr_dbg(g, "app_data_off :%x",
|
||||
pnode->lsb_header.app_data_off);
|
||||
nvgpu_acr_dbg(g, "app_data_size :%x",
|
||||
pnode->lsb_header.app_data_size);
|
||||
nvgpu_acr_dbg(g, "flags :%x",
|
||||
pnode->lsb_header.flags);
|
||||
|
||||
/*
|
||||
* If this falcon has a boot loader and related args,
|
||||
* flush them.
|
||||
*/
|
||||
if (pnode->ucode_img.header == NULL) {
|
||||
/* Populate gen bl and flush to memory */
|
||||
lsfm_fill_flcn_bl_gen_desc(g, pnode);
|
||||
nvgpu_mem_wr_n(g, ucode, pnode->lsb_header.bl_data_off,
|
||||
&pnode->bl_gen_desc, pnode->bl_gen_desc_size);
|
||||
}
|
||||
|
||||
/* Copying of ucode */
|
||||
nvgpu_mem_wr_n(g, ucode, pnode->lsb_header.ucode_off,
|
||||
pnode->ucode_img.data, pnode->ucode_img.data_size);
|
||||
pnode = pnode->next;
|
||||
i++;
|
||||
}
|
||||
|
||||
/* Tag the terminator WPR header with an invalid falcon ID. */
|
||||
last_wpr_hdr.falcon_id = FALCON_ID_INVALID;
|
||||
tmp = plsfm->managed_flcn_cnt * sizeof(struct lsf_wpr_header_v1);
|
||||
nvgpu_assert(tmp <= U32_MAX);
|
||||
nvgpu_mem_wr_n(g, ucode, (u32)tmp, &last_wpr_hdr,
|
||||
(u32)sizeof(struct lsf_wpr_header_v1));
|
||||
}
|
||||
|
||||
/* Free any ucode image structure resources. */
|
||||
static void lsfm_free_ucode_img_res(struct gk20a *g,
|
||||
struct flcn_ucode_img_v1 *p_img)
|
||||
{
|
||||
if (p_img->lsf_desc != NULL) {
|
||||
nvgpu_kfree(g, p_img->lsf_desc);
|
||||
p_img->lsf_desc = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static void lsfm_free_nonpmu_ucode_img_res(struct gk20a *g,
|
||||
struct flcn_ucode_img_v1 *p_img)
|
||||
{
|
||||
if (p_img->lsf_desc != NULL) {
|
||||
nvgpu_kfree(g, p_img->lsf_desc);
|
||||
p_img->lsf_desc = NULL;
|
||||
}
|
||||
if (p_img->desc != NULL) {
|
||||
nvgpu_kfree(g, p_img->desc);
|
||||
p_img->desc = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static void free_acr_resources(struct gk20a *g, struct ls_flcn_mgr_v1 *plsfm)
|
||||
{
|
||||
u32 cnt = plsfm->managed_flcn_cnt;
|
||||
struct lsfm_managed_ucode_img_v2 *mg_ucode_img;
|
||||
|
||||
while (cnt != 0U) {
|
||||
mg_ucode_img = plsfm->ucode_img_list;
|
||||
if (mg_ucode_img->ucode_img.lsf_desc->falcon_id ==
|
||||
FALCON_ID_PMU) {
|
||||
lsfm_free_ucode_img_res(g, &mg_ucode_img->ucode_img);
|
||||
} else {
|
||||
lsfm_free_nonpmu_ucode_img_res(g,
|
||||
&mg_ucode_img->ucode_img);
|
||||
}
|
||||
plsfm->ucode_img_list = mg_ucode_img->next;
|
||||
nvgpu_kfree(g, mg_ucode_img);
|
||||
cnt--;
|
||||
}
|
||||
}
|
||||
|
||||
int nvgpu_acr_prepare_ucode_blob_v1(struct gk20a *g)
|
||||
{
|
||||
int err = 0;
|
||||
struct ls_flcn_mgr_v1 lsfm_l, *plsfm;
|
||||
struct wpr_carveout_info wpr_inf;
|
||||
|
||||
/* Recovery case, we do not need to form non WPR blob of ucodes */
|
||||
if (g->acr.ucode_blob.cpu_va != NULL) {
|
||||
return err;
|
||||
}
|
||||
|
||||
plsfm = &lsfm_l;
|
||||
(void) memset((void *)plsfm, 0, sizeof(struct ls_flcn_mgr_v1));
|
||||
gr_gk20a_init_ctxsw_ucode(g);
|
||||
|
||||
g->acr.get_wpr_info(g, &wpr_inf);
|
||||
nvgpu_acr_dbg(g, "wpr carveout base:%llx\n", (wpr_inf.wpr_base));
|
||||
nvgpu_acr_dbg(g, "wpr carveout size :%x\n", (u32)wpr_inf.size);
|
||||
|
||||
/* Discover all managed falcons */
|
||||
err = lsfm_discover_ucode_images(g, plsfm);
|
||||
nvgpu_acr_dbg(g, " Managed Falcon cnt %d\n", plsfm->managed_flcn_cnt);
|
||||
if (err != 0) {
|
||||
goto exit_err;
|
||||
}
|
||||
|
||||
if (nvgpu_is_enabled(g, NVGPU_SUPPORT_MULTIPLE_WPR)) {
|
||||
err = lsfm_discover_and_add_sub_wprs(g, plsfm);
|
||||
if (err != 0) {
|
||||
goto exit_err;
|
||||
}
|
||||
}
|
||||
|
||||
if ((plsfm->managed_flcn_cnt != 0U) &&
|
||||
(g->acr.ucode_blob.cpu_va == NULL)) {
|
||||
/* Generate WPR requirements */
|
||||
err = lsf_gen_wpr_requirements(g, plsfm);
|
||||
if (err != 0) {
|
||||
goto exit_err;
|
||||
}
|
||||
|
||||
/* Alloc memory to hold ucode blob contents */
|
||||
err = g->acr.alloc_blob_space(g, plsfm->wpr_size
|
||||
,&g->acr.ucode_blob);
|
||||
if (err != 0) {
|
||||
goto exit_err;
|
||||
}
|
||||
|
||||
nvgpu_acr_dbg(g, "managed LS falcon %d, WPR size %d bytes.\n",
|
||||
plsfm->managed_flcn_cnt, plsfm->wpr_size);
|
||||
|
||||
lsfm_init_wpr_contents(g, plsfm, &g->acr.ucode_blob);
|
||||
} else {
|
||||
nvgpu_acr_dbg(g, "LSFM is managing no falcons.\n");
|
||||
}
|
||||
nvgpu_acr_dbg(g, "prepare ucode blob return 0\n");
|
||||
free_acr_resources(g, plsfm);
|
||||
|
||||
exit_err:
|
||||
return err;
|
||||
}
|
||||
219
drivers/gpu/nvgpu/common/acr/acr_gm20b.c
Normal file
219
drivers/gpu/nvgpu/common/acr/acr_gm20b.c
Normal file
@@ -0,0 +1,219 @@
|
||||
/*
|
||||
* Copyright (c) 2015-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <nvgpu/types.h>
|
||||
#include <nvgpu/acr/nvgpu_acr.h>
|
||||
#include <nvgpu/firmware.h>
|
||||
#include <nvgpu/pmu.h>
|
||||
#include <nvgpu/falcon.h>
|
||||
#include <nvgpu/gk20a.h>
|
||||
#include <nvgpu/bug.h>
|
||||
|
||||
#include "common/pmu/pmu_gm20b.h"
|
||||
|
||||
#include "acr_gm20b.h"
|
||||
|
||||
static int gm20b_acr_patch_wpr_info_to_ucode(struct gk20a *g,
|
||||
struct nvgpu_acr *acr, struct hs_acr *acr_desc, bool is_recovery)
|
||||
{
|
||||
struct nvgpu_firmware *acr_fw = acr_desc->acr_fw;
|
||||
struct acr_fw_header *acr_fw_hdr = NULL;
|
||||
struct bin_hdr *acr_fw_bin_hdr = NULL;
|
||||
struct flcn_acr_desc *acr_dmem_desc;
|
||||
u32 *acr_ucode_header = NULL;
|
||||
u32 *acr_ucode_data = NULL;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
if (is_recovery) {
|
||||
acr_desc->acr_dmem_desc->nonwpr_ucode_blob_size = 0U;
|
||||
} else {
|
||||
acr_fw_bin_hdr = (struct bin_hdr *)acr_fw->data;
|
||||
acr_fw_hdr = (struct acr_fw_header *)
|
||||
(acr_fw->data + acr_fw_bin_hdr->header_offset);
|
||||
|
||||
acr_ucode_data = (u32 *)(acr_fw->data +
|
||||
acr_fw_bin_hdr->data_offset);
|
||||
|
||||
acr_ucode_header = (u32 *)(acr_fw->data +
|
||||
acr_fw_hdr->hdr_offset);
|
||||
|
||||
/* During recovery need to update blob size as 0x0*/
|
||||
acr_desc->acr_dmem_desc = (struct flcn_acr_desc *)((u8 *)(
|
||||
acr_desc->acr_ucode.cpu_va) + acr_ucode_header[2U]);
|
||||
|
||||
/* Patch WPR info to ucode */
|
||||
acr_dmem_desc = (struct flcn_acr_desc *)
|
||||
&(((u8 *)acr_ucode_data)[acr_ucode_header[2U]]);
|
||||
|
||||
acr_dmem_desc->nonwpr_ucode_blob_start =
|
||||
nvgpu_mem_get_addr(g, &g->acr.ucode_blob);
|
||||
nvgpu_assert(g->acr.ucode_blob.size <= U32_MAX);
|
||||
acr_dmem_desc->nonwpr_ucode_blob_size =
|
||||
(u32)g->acr.ucode_blob.size;
|
||||
acr_dmem_desc->regions.no_regions = 1U;
|
||||
acr_dmem_desc->wpr_offset = 0U;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gm20b_acr_fill_bl_dmem_desc(struct gk20a *g,
|
||||
struct nvgpu_acr *acr, struct hs_acr *acr_desc,
|
||||
u32 *acr_ucode_header)
|
||||
{
|
||||
struct flcn_bl_dmem_desc *bl_dmem_desc = &acr_desc->bl_dmem_desc;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
(void) memset(bl_dmem_desc, 0U, sizeof(struct flcn_bl_dmem_desc));
|
||||
|
||||
bl_dmem_desc->signature[0] = 0U;
|
||||
bl_dmem_desc->signature[1] = 0U;
|
||||
bl_dmem_desc->signature[2] = 0U;
|
||||
bl_dmem_desc->signature[3] = 0U;
|
||||
bl_dmem_desc->ctx_dma = GK20A_PMU_DMAIDX_VIRT;
|
||||
bl_dmem_desc->code_dma_base =
|
||||
(unsigned int)(((u64)acr_desc->acr_ucode.gpu_va >> 8U));
|
||||
bl_dmem_desc->code_dma_base1 = 0x0U;
|
||||
bl_dmem_desc->non_sec_code_off = acr_ucode_header[0U];
|
||||
bl_dmem_desc->non_sec_code_size = acr_ucode_header[1U];
|
||||
bl_dmem_desc->sec_code_off = acr_ucode_header[5U];
|
||||
bl_dmem_desc->sec_code_size = acr_ucode_header[6U];
|
||||
bl_dmem_desc->code_entry_point = 0U; /* Start at 0th offset */
|
||||
bl_dmem_desc->data_dma_base =
|
||||
bl_dmem_desc->code_dma_base +
|
||||
((acr_ucode_header[2U]) >> 8U);
|
||||
bl_dmem_desc->data_dma_base1 = 0x0U;
|
||||
bl_dmem_desc->data_size = acr_ucode_header[3U];
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* LSF static config functions */
|
||||
static u32 gm20b_acr_lsf_pmu(struct gk20a *g,
|
||||
struct acr_lsf_config *lsf)
|
||||
{
|
||||
/* PMU LS falcon info */
|
||||
lsf->falcon_id = FALCON_ID_PMU;
|
||||
lsf->falcon_dma_idx = GK20A_PMU_DMAIDX_UCODE;
|
||||
lsf->is_lazy_bootstrap = false;
|
||||
lsf->is_priv_load = false;
|
||||
lsf->get_lsf_ucode_details = nvgpu_acr_lsf_pmu_ucode_details_v0;
|
||||
lsf->get_cmd_line_args_offset = nvgpu_pmu_get_cmd_line_args_offset;
|
||||
|
||||
return BIT32(lsf->falcon_id);
|
||||
}
|
||||
|
||||
static u32 gm20b_acr_lsf_fecs(struct gk20a *g,
|
||||
struct acr_lsf_config *lsf)
|
||||
{
|
||||
/* FECS LS falcon info */
|
||||
lsf->falcon_id = FALCON_ID_FECS;
|
||||
lsf->falcon_dma_idx = GK20A_PMU_DMAIDX_UCODE;
|
||||
lsf->is_lazy_bootstrap = false;
|
||||
lsf->is_priv_load = false;
|
||||
lsf->get_lsf_ucode_details = nvgpu_acr_lsf_fecs_ucode_details_v0;
|
||||
lsf->get_cmd_line_args_offset = NULL;
|
||||
|
||||
return BIT32(lsf->falcon_id);
|
||||
}
|
||||
|
||||
static u32 gm20b_acr_lsf_conifg(struct gk20a *g,
|
||||
struct nvgpu_acr *acr)
|
||||
{
|
||||
u32 lsf_enable_mask = 0;
|
||||
|
||||
lsf_enable_mask |= gm20b_acr_lsf_pmu(g, &acr->lsf[FALCON_ID_PMU]);
|
||||
lsf_enable_mask |= gm20b_acr_lsf_fecs(g, &acr->lsf[FALCON_ID_FECS]);
|
||||
|
||||
return lsf_enable_mask;
|
||||
}
|
||||
|
||||
static void gm20b_acr_default_sw_init(struct gk20a *g, struct hs_acr *hs_acr)
|
||||
{
|
||||
struct hs_flcn_bl *hs_bl = &hs_acr->acr_hs_bl;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
/* ACR HS bootloader ucode name */
|
||||
hs_bl->bl_fw_name = HSBIN_ACR_BL_UCODE_IMAGE;
|
||||
|
||||
/* ACR HS ucode type & f/w name*/
|
||||
hs_acr->acr_type = ACR_DEFAULT;
|
||||
hs_acr->acr_fw_name = HSBIN_ACR_UCODE_IMAGE;
|
||||
|
||||
/* bootlader interface used by ACR HS bootloader*/
|
||||
hs_acr->ptr_bl_dmem_desc = &hs_acr->bl_dmem_desc;
|
||||
hs_acr->bl_dmem_desc_size = (u32)sizeof(struct flcn_bl_dmem_desc);
|
||||
|
||||
/* set on which falcon ACR need to execute*/
|
||||
hs_acr->acr_flcn = g->pmu.flcn;
|
||||
hs_acr->acr_flcn_setup_hw_and_bl_bootstrap =
|
||||
gm20b_pmu_setup_hw_and_bl_bootstrap;
|
||||
}
|
||||
|
||||
void gm20b_remove_acr_support(struct nvgpu_acr *acr)
|
||||
{
|
||||
struct gk20a *g = acr->g;
|
||||
struct mm_gk20a *mm = &g->mm;
|
||||
struct vm_gk20a *vm = mm->pmu.vm;
|
||||
|
||||
if (acr->acr.acr_fw != NULL) {
|
||||
nvgpu_release_firmware(g, acr->acr.acr_fw);
|
||||
}
|
||||
|
||||
if (acr->acr.acr_hs_bl.hs_bl_fw != NULL) {
|
||||
nvgpu_release_firmware(g, acr->acr.acr_hs_bl.hs_bl_fw);
|
||||
}
|
||||
|
||||
if (nvgpu_mem_is_valid(&acr->acr.acr_ucode)) {
|
||||
nvgpu_dma_unmap_free(vm, &acr->acr.acr_ucode);
|
||||
}
|
||||
if (nvgpu_mem_is_valid(&acr->acr.acr_hs_bl.hs_bl_ucode)) {
|
||||
nvgpu_dma_unmap_free(vm, &acr->acr.acr_hs_bl.hs_bl_ucode);
|
||||
}
|
||||
}
|
||||
|
||||
void nvgpu_gm20b_acr_sw_init(struct gk20a *g, struct nvgpu_acr *acr)
|
||||
{
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
acr->g = g;
|
||||
|
||||
acr->bootstrap_owner = FALCON_ID_PMU;
|
||||
|
||||
acr->lsf_enable_mask = gm20b_acr_lsf_conifg(g, acr);
|
||||
|
||||
gm20b_acr_default_sw_init(g, &acr->acr);
|
||||
|
||||
acr->prepare_ucode_blob = nvgpu_acr_prepare_ucode_blob_v0;
|
||||
acr->get_wpr_info = nvgpu_acr_wpr_info_sys;
|
||||
acr->alloc_blob_space = nvgpu_acr_alloc_blob_space_sys;
|
||||
acr->bootstrap_hs_acr = nvgpu_acr_bootstrap_hs_ucode;
|
||||
acr->patch_wpr_info_to_ucode =
|
||||
gm20b_acr_patch_wpr_info_to_ucode;
|
||||
acr->acr_fill_bl_dmem_desc =
|
||||
gm20b_acr_fill_bl_dmem_desc;
|
||||
|
||||
acr->remove_support = gm20b_remove_acr_support;
|
||||
}
|
||||
31
drivers/gpu/nvgpu/common/acr/acr_gm20b.h
Normal file
31
drivers/gpu/nvgpu/common/acr/acr_gm20b.h
Normal file
@@ -0,0 +1,31 @@
|
||||
/*
|
||||
* GM20B ACR
|
||||
*
|
||||
* Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef NVGPU_GM20B_ACR_GM20B_H
|
||||
#define NVGPU_GM20B_ACR_GM20B_H
|
||||
|
||||
void gm20b_remove_acr_support(struct nvgpu_acr *acr);
|
||||
void nvgpu_gm20b_acr_sw_init(struct gk20a *g, struct nvgpu_acr *acr);
|
||||
|
||||
#endif /*NVGPU_GM20B_ACR_GM20B_H*/
|
||||
56
drivers/gpu/nvgpu/common/acr/acr_gp10b.c
Normal file
56
drivers/gpu/nvgpu/common/acr/acr_gp10b.c
Normal file
@@ -0,0 +1,56 @@
|
||||
/*
|
||||
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <nvgpu/types.h>
|
||||
#include <nvgpu/acr/nvgpu_acr.h>
|
||||
#include <nvgpu/gk20a.h>
|
||||
#include <nvgpu/pmu.h>
|
||||
|
||||
#include "acr_gm20b.h"
|
||||
#include "acr_gp10b.h"
|
||||
|
||||
/* LSF static config functions */
|
||||
static u32 gp10b_acr_lsf_gpccs(struct gk20a *g,
|
||||
struct acr_lsf_config *lsf)
|
||||
{
|
||||
/* GPCCS LS falcon info */
|
||||
lsf->falcon_id = FALCON_ID_GPCCS;
|
||||
lsf->falcon_dma_idx = GK20A_PMU_DMAIDX_UCODE;
|
||||
lsf->is_lazy_bootstrap = true;
|
||||
lsf->is_priv_load = true;
|
||||
lsf->get_lsf_ucode_details = nvgpu_acr_lsf_gpccs_ucode_details_v0;
|
||||
lsf->get_cmd_line_args_offset = NULL;
|
||||
|
||||
return BIT32(lsf->falcon_id);
|
||||
}
|
||||
|
||||
void nvgpu_gp10b_acr_sw_init(struct gk20a *g, struct nvgpu_acr *acr)
|
||||
{
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
/* inherit the gm20b config data */
|
||||
nvgpu_gm20b_acr_sw_init(g, acr);
|
||||
|
||||
/* gp10b supports LSF gpccs bootstrap */
|
||||
acr->lsf_enable_mask |= gp10b_acr_lsf_gpccs(g,
|
||||
&acr->lsf[FALCON_ID_GPCCS]);
|
||||
}
|
||||
28
drivers/gpu/nvgpu/common/acr/acr_gp10b.h
Normal file
28
drivers/gpu/nvgpu/common/acr/acr_gp10b.h
Normal file
@@ -0,0 +1,28 @@
|
||||
/*
|
||||
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef NVGPU_ACR_GP10B_H
|
||||
#define NVGPU_ACR_GP10B_H
|
||||
|
||||
void nvgpu_gp10b_acr_sw_init(struct gk20a *g, struct nvgpu_acr *acr);
|
||||
|
||||
#endif /* NVGPU_ACR_GP10B_H */
|
||||
214
drivers/gpu/nvgpu/common/acr/acr_gv100.c
Normal file
214
drivers/gpu/nvgpu/common/acr/acr_gv100.c
Normal file
@@ -0,0 +1,214 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <nvgpu/acr/nvgpu_acr.h>
|
||||
#include <nvgpu/firmware.h>
|
||||
#include <nvgpu/pmu.h>
|
||||
#include <nvgpu/gk20a.h>
|
||||
#include <nvgpu/bug.h>
|
||||
#include "acr_gm20b.h"
|
||||
|
||||
#include "acr_gv100.h"
|
||||
#include "gp106/sec2_gp106.h"
|
||||
|
||||
|
||||
static void flcn64_set_dma(struct falc_u64 *dma_addr, u64 value)
|
||||
{
|
||||
dma_addr->lo |= u64_lo32(value);
|
||||
dma_addr->hi |= u64_hi32(value);
|
||||
}
|
||||
|
||||
static int gv100_acr_patch_wpr_info_to_ucode(struct gk20a *g, struct nvgpu_acr *acr,
|
||||
struct hs_acr *acr_desc, bool is_recovery)
|
||||
{
|
||||
struct nvgpu_firmware *acr_fw = acr_desc->acr_fw;
|
||||
struct acr_fw_header *acr_fw_hdr = NULL;
|
||||
struct bin_hdr *acr_fw_bin_hdr = NULL;
|
||||
struct flcn_acr_desc_v1 *acr_dmem_desc;
|
||||
struct wpr_carveout_info wpr_inf;
|
||||
u32 *acr_ucode_header = NULL;
|
||||
u32 *acr_ucode_data = NULL;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
acr_fw_bin_hdr = (struct bin_hdr *)acr_fw->data;
|
||||
acr_fw_hdr = (struct acr_fw_header *)
|
||||
(acr_fw->data + acr_fw_bin_hdr->header_offset);
|
||||
|
||||
acr_ucode_data = (u32 *)(acr_fw->data + acr_fw_bin_hdr->data_offset);
|
||||
acr_ucode_header = (u32 *)(acr_fw->data + acr_fw_hdr->hdr_offset);
|
||||
|
||||
acr->get_wpr_info(g, &wpr_inf);
|
||||
|
||||
acr_dmem_desc = (struct flcn_acr_desc_v1 *)
|
||||
&(((u8 *)acr_ucode_data)[acr_ucode_header[2U]]);
|
||||
|
||||
acr_dmem_desc->nonwpr_ucode_blob_start = wpr_inf.nonwpr_base;
|
||||
nvgpu_assert(wpr_inf.size <= U32_MAX);
|
||||
acr_dmem_desc->nonwpr_ucode_blob_size = (u32)wpr_inf.size;
|
||||
acr_dmem_desc->regions.no_regions = 1U;
|
||||
acr_dmem_desc->wpr_offset = 0U;
|
||||
|
||||
acr_dmem_desc->wpr_region_id = 1U;
|
||||
acr_dmem_desc->regions.region_props[0U].region_id = 1U;
|
||||
acr_dmem_desc->regions.region_props[0U].start_addr =
|
||||
(wpr_inf.wpr_base) >> 8U;
|
||||
acr_dmem_desc->regions.region_props[0U].end_addr =
|
||||
((wpr_inf.wpr_base) + wpr_inf.size) >> 8U;
|
||||
acr_dmem_desc->regions.region_props[0U].shadowmMem_startaddress =
|
||||
wpr_inf.nonwpr_base >> 8U;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int gv100_acr_fill_bl_dmem_desc(struct gk20a *g,
|
||||
struct nvgpu_acr *acr, struct hs_acr *acr_desc,
|
||||
u32 *acr_ucode_header)
|
||||
{
|
||||
struct nvgpu_mem *acr_ucode_mem = &acr_desc->acr_ucode;
|
||||
struct flcn_bl_dmem_desc_v1 *bl_dmem_desc =
|
||||
&acr_desc->bl_dmem_desc_v1;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
(void) memset(bl_dmem_desc, 0U, sizeof(struct flcn_bl_dmem_desc_v1));
|
||||
|
||||
bl_dmem_desc->signature[0] = 0U;
|
||||
bl_dmem_desc->signature[1] = 0U;
|
||||
bl_dmem_desc->signature[2] = 0U;
|
||||
bl_dmem_desc->signature[3] = 0U;
|
||||
bl_dmem_desc->ctx_dma = GK20A_PMU_DMAIDX_VIRT;
|
||||
|
||||
flcn64_set_dma(&bl_dmem_desc->code_dma_base,
|
||||
acr_ucode_mem->gpu_va);
|
||||
|
||||
bl_dmem_desc->non_sec_code_off = acr_ucode_header[0U];
|
||||
bl_dmem_desc->non_sec_code_size = acr_ucode_header[1U];
|
||||
bl_dmem_desc->sec_code_off = acr_ucode_header[5U];
|
||||
bl_dmem_desc->sec_code_size = acr_ucode_header[6U];
|
||||
bl_dmem_desc->code_entry_point = 0U;
|
||||
|
||||
flcn64_set_dma(&bl_dmem_desc->data_dma_base,
|
||||
acr_ucode_mem->gpu_va + acr_ucode_header[2U]);
|
||||
|
||||
bl_dmem_desc->data_size = acr_ucode_header[3U];
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* LSF init */
|
||||
static u32 gv100_acr_lsf_pmu(struct gk20a *g,
|
||||
struct acr_lsf_config *lsf)
|
||||
{
|
||||
/* PMU LS falcon info */
|
||||
lsf->falcon_id = FALCON_ID_PMU;
|
||||
lsf->falcon_dma_idx = GK20A_PMU_DMAIDX_UCODE;
|
||||
lsf->is_lazy_bootstrap = false;
|
||||
lsf->is_priv_load = false;
|
||||
lsf->get_lsf_ucode_details = nvgpu_acr_lsf_pmu_ucode_details_v1;
|
||||
lsf->get_cmd_line_args_offset = nvgpu_pmu_get_cmd_line_args_offset;
|
||||
|
||||
return BIT32(lsf->falcon_id);
|
||||
}
|
||||
|
||||
static u32 gv100_acr_lsf_fecs(struct gk20a *g,
|
||||
struct acr_lsf_config *lsf)
|
||||
{
|
||||
/* FECS LS falcon info */
|
||||
lsf->falcon_id = FALCON_ID_FECS;
|
||||
lsf->falcon_dma_idx = GK20A_PMU_DMAIDX_UCODE;
|
||||
lsf->is_lazy_bootstrap = true;
|
||||
lsf->is_priv_load = true;
|
||||
lsf->get_lsf_ucode_details = nvgpu_acr_lsf_fecs_ucode_details_v1;
|
||||
lsf->get_cmd_line_args_offset = NULL;
|
||||
|
||||
return BIT32(lsf->falcon_id);
|
||||
}
|
||||
|
||||
static u32 gv100_acr_lsf_gpccs(struct gk20a *g,
|
||||
struct acr_lsf_config *lsf)
|
||||
{
|
||||
/* FECS LS falcon info */
|
||||
lsf->falcon_id = FALCON_ID_GPCCS;
|
||||
lsf->falcon_dma_idx = GK20A_PMU_DMAIDX_UCODE;
|
||||
lsf->is_lazy_bootstrap = true;
|
||||
lsf->is_priv_load = true;
|
||||
lsf->get_lsf_ucode_details = nvgpu_acr_lsf_gpccs_ucode_details_v1;
|
||||
lsf->get_cmd_line_args_offset = NULL;
|
||||
|
||||
return BIT32(lsf->falcon_id);
|
||||
}
|
||||
|
||||
static u32 gv100_acr_lsf_conifg(struct gk20a *g,
|
||||
struct nvgpu_acr *acr)
|
||||
{
|
||||
u32 lsf_enable_mask = 0;
|
||||
|
||||
lsf_enable_mask |= gv100_acr_lsf_pmu(g, &acr->lsf[FALCON_ID_PMU]);
|
||||
lsf_enable_mask |= gv100_acr_lsf_fecs(g, &acr->lsf[FALCON_ID_FECS]);
|
||||
lsf_enable_mask |= gv100_acr_lsf_gpccs(g, &acr->lsf[FALCON_ID_GPCCS]);
|
||||
|
||||
return lsf_enable_mask;
|
||||
}
|
||||
|
||||
static void nvgpu_gv100_acr_default_sw_init(struct gk20a *g, struct hs_acr *hs_acr)
|
||||
{
|
||||
struct hs_flcn_bl *hs_bl = &hs_acr->acr_hs_bl;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
hs_bl->bl_fw_name = HSBIN_ACR_BL_UCODE_IMAGE;
|
||||
|
||||
hs_acr->acr_type = ACR_DEFAULT;
|
||||
hs_acr->acr_fw_name = HSBIN_ACR_UCODE_IMAGE;
|
||||
|
||||
hs_acr->ptr_bl_dmem_desc = &hs_acr->bl_dmem_desc_v1;
|
||||
hs_acr->bl_dmem_desc_size = (u32)sizeof(struct flcn_bl_dmem_desc_v1);
|
||||
|
||||
hs_acr->acr_flcn = g->sec2.flcn;
|
||||
hs_acr->acr_flcn_setup_hw_and_bl_bootstrap =
|
||||
gp106_sec2_setup_hw_and_bl_bootstrap;
|
||||
}
|
||||
|
||||
void nvgpu_gv100_acr_sw_init(struct gk20a *g, struct nvgpu_acr *acr)
|
||||
{
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
acr->g = g;
|
||||
|
||||
acr->bootstrap_owner = FALCON_ID_SEC2;
|
||||
|
||||
acr->lsf_enable_mask = gv100_acr_lsf_conifg(g, acr);
|
||||
|
||||
nvgpu_gv100_acr_default_sw_init(g, &acr->acr);
|
||||
|
||||
acr->prepare_ucode_blob = nvgpu_acr_prepare_ucode_blob_v1;
|
||||
acr->get_wpr_info = nvgpu_acr_wpr_info_vid;
|
||||
acr->alloc_blob_space = nvgpu_acr_alloc_blob_space_vid;
|
||||
acr->bootstrap_hs_acr = nvgpu_acr_bootstrap_hs_ucode;
|
||||
acr->patch_wpr_info_to_ucode =
|
||||
gv100_acr_patch_wpr_info_to_ucode;
|
||||
acr->acr_fill_bl_dmem_desc =
|
||||
gv100_acr_fill_bl_dmem_desc;
|
||||
|
||||
acr->remove_support = gm20b_remove_acr_support;
|
||||
}
|
||||
34
drivers/gpu/nvgpu/common/acr/acr_gv100.h
Normal file
34
drivers/gpu/nvgpu/common/acr/acr_gv100.h
Normal file
@@ -0,0 +1,34 @@
|
||||
/*
|
||||
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef NVGPU_ACR_GV100_H
|
||||
#define NVGPU_ACR_GV100_H
|
||||
|
||||
#define GV100_FECS_UCODE_SIG "gv100/fecs_sig.bin"
|
||||
#define GV100_GPCCS_UCODE_SIG "gv100/gpccs_sig.bin"
|
||||
|
||||
int gv100_acr_fill_bl_dmem_desc(struct gk20a *g, struct nvgpu_acr *acr,
|
||||
struct hs_acr *acr_desc, u32 *acr_ucode_header);
|
||||
|
||||
void nvgpu_gv100_acr_sw_init(struct gk20a *g, struct nvgpu_acr *acr);
|
||||
|
||||
#endif /* NVGPU_ACR_GV100_H */
|
||||
176
drivers/gpu/nvgpu/common/acr/acr_gv11b.c
Normal file
176
drivers/gpu/nvgpu/common/acr/acr_gv11b.c
Normal file
@@ -0,0 +1,176 @@
|
||||
/*
|
||||
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <nvgpu/types.h>
|
||||
#include <nvgpu/firmware.h>
|
||||
#include <nvgpu/acr/nvgpu_acr.h>
|
||||
#include <nvgpu/gk20a.h>
|
||||
#include <nvgpu/bug.h>
|
||||
|
||||
#include "common/pmu/pmu_gm20b.h"
|
||||
|
||||
#include "acr_gm20b.h"
|
||||
#include "acr_gv100.h"
|
||||
#include "acr_gv11b.h"
|
||||
|
||||
static int gv11b_acr_patch_wpr_info_to_ucode(struct gk20a *g,
|
||||
struct nvgpu_acr *acr, struct hs_acr *acr_desc, bool is_recovery)
|
||||
{
|
||||
struct nvgpu_firmware *acr_fw = acr_desc->acr_fw;
|
||||
struct acr_fw_header *acr_fw_hdr = NULL;
|
||||
struct bin_hdr *acr_fw_bin_hdr = NULL;
|
||||
struct flcn_acr_desc_v1 *acr_dmem_desc;
|
||||
u32 *acr_ucode_header = NULL;
|
||||
u32 *acr_ucode_data = NULL;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
if (is_recovery) {
|
||||
acr_desc->acr_dmem_desc_v1->nonwpr_ucode_blob_size = 0U;
|
||||
} else {
|
||||
acr_fw_bin_hdr = (struct bin_hdr *)acr_fw->data;
|
||||
acr_fw_hdr = (struct acr_fw_header *)
|
||||
(acr_fw->data + acr_fw_bin_hdr->header_offset);
|
||||
|
||||
acr_ucode_data = (u32 *)(acr_fw->data +
|
||||
acr_fw_bin_hdr->data_offset);
|
||||
acr_ucode_header = (u32 *)(acr_fw->data +
|
||||
acr_fw_hdr->hdr_offset);
|
||||
|
||||
/* During recovery need to update blob size as 0x0*/
|
||||
acr_desc->acr_dmem_desc_v1 = (struct flcn_acr_desc_v1 *)
|
||||
((u8 *)(acr_desc->acr_ucode.cpu_va) +
|
||||
acr_ucode_header[2U]);
|
||||
|
||||
/* Patch WPR info to ucode */
|
||||
acr_dmem_desc = (struct flcn_acr_desc_v1 *)
|
||||
&(((u8 *)acr_ucode_data)[acr_ucode_header[2U]]);
|
||||
|
||||
acr_dmem_desc->nonwpr_ucode_blob_start =
|
||||
nvgpu_mem_get_addr(g, &g->acr.ucode_blob);
|
||||
nvgpu_assert(g->acr.ucode_blob.size <= U32_MAX);
|
||||
acr_dmem_desc->nonwpr_ucode_blob_size =
|
||||
(u32)g->acr.ucode_blob.size;
|
||||
acr_dmem_desc->regions.no_regions = 1U;
|
||||
acr_dmem_desc->wpr_offset = 0U;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* LSF static config functions */
|
||||
static u32 gv11b_acr_lsf_pmu(struct gk20a *g,
|
||||
struct acr_lsf_config *lsf)
|
||||
{
|
||||
/* PMU LS falcon info */
|
||||
lsf->falcon_id = FALCON_ID_PMU;
|
||||
lsf->falcon_dma_idx = GK20A_PMU_DMAIDX_UCODE;
|
||||
lsf->is_lazy_bootstrap = false;
|
||||
lsf->is_priv_load = false;
|
||||
lsf->get_lsf_ucode_details = nvgpu_acr_lsf_pmu_ucode_details_v1;
|
||||
lsf->get_cmd_line_args_offset = nvgpu_pmu_get_cmd_line_args_offset;
|
||||
|
||||
return BIT32(lsf->falcon_id);
|
||||
}
|
||||
|
||||
/* LSF init */
|
||||
static u32 gv11b_acr_lsf_fecs(struct gk20a *g,
|
||||
struct acr_lsf_config *lsf)
|
||||
{
|
||||
/* FECS LS falcon info */
|
||||
lsf->falcon_id = FALCON_ID_FECS;
|
||||
lsf->falcon_dma_idx = GK20A_PMU_DMAIDX_UCODE;
|
||||
lsf->is_lazy_bootstrap = true;
|
||||
lsf->is_priv_load = false;
|
||||
lsf->get_lsf_ucode_details = nvgpu_acr_lsf_fecs_ucode_details_v1;
|
||||
lsf->get_cmd_line_args_offset = NULL;
|
||||
|
||||
return BIT32(lsf->falcon_id);
|
||||
}
|
||||
|
||||
static u32 gv11b_acr_lsf_gpccs(struct gk20a *g,
|
||||
struct acr_lsf_config *lsf)
|
||||
{
|
||||
/* FECS LS falcon info */
|
||||
lsf->falcon_id = FALCON_ID_GPCCS;
|
||||
lsf->falcon_dma_idx = GK20A_PMU_DMAIDX_UCODE;
|
||||
lsf->is_lazy_bootstrap = true;
|
||||
lsf->is_priv_load = true;
|
||||
lsf->get_lsf_ucode_details = nvgpu_acr_lsf_gpccs_ucode_details_v1;
|
||||
lsf->get_cmd_line_args_offset = NULL;
|
||||
|
||||
return BIT32(lsf->falcon_id);
|
||||
}
|
||||
|
||||
static u32 gv11b_acr_lsf_conifg(struct gk20a *g,
|
||||
struct nvgpu_acr *acr)
|
||||
{
|
||||
u32 lsf_enable_mask = 0;
|
||||
|
||||
lsf_enable_mask |= gv11b_acr_lsf_pmu(g, &acr->lsf[FALCON_ID_PMU]);
|
||||
lsf_enable_mask |= gv11b_acr_lsf_fecs(g, &acr->lsf[FALCON_ID_FECS]);
|
||||
lsf_enable_mask |= gv11b_acr_lsf_gpccs(g, &acr->lsf[FALCON_ID_GPCCS]);
|
||||
|
||||
return lsf_enable_mask;
|
||||
}
|
||||
|
||||
static void gv11b_acr_default_sw_init(struct gk20a *g, struct hs_acr *hs_acr)
|
||||
{
|
||||
struct hs_flcn_bl *hs_bl = &hs_acr->acr_hs_bl;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
hs_bl->bl_fw_name = HSBIN_ACR_BL_UCODE_IMAGE;
|
||||
|
||||
hs_acr->acr_type = ACR_DEFAULT;
|
||||
hs_acr->acr_fw_name = HSBIN_ACR_UCODE_IMAGE;
|
||||
|
||||
hs_acr->ptr_bl_dmem_desc = &hs_acr->bl_dmem_desc_v1;
|
||||
hs_acr->bl_dmem_desc_size = (u32)sizeof(struct flcn_bl_dmem_desc_v1);
|
||||
|
||||
hs_acr->acr_flcn = g->pmu.flcn;
|
||||
hs_acr->acr_flcn_setup_hw_and_bl_bootstrap =
|
||||
gm20b_pmu_setup_hw_and_bl_bootstrap;
|
||||
}
|
||||
|
||||
void nvgpu_gv11b_acr_sw_init(struct gk20a *g, struct nvgpu_acr *acr)
|
||||
{
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
acr->g = g;
|
||||
|
||||
acr->bootstrap_owner = FALCON_ID_PMU;
|
||||
|
||||
acr->lsf_enable_mask = gv11b_acr_lsf_conifg(g, acr);
|
||||
|
||||
gv11b_acr_default_sw_init(g, &acr->acr);
|
||||
|
||||
acr->prepare_ucode_blob = nvgpu_acr_prepare_ucode_blob_v1;
|
||||
acr->get_wpr_info = nvgpu_acr_wpr_info_sys;
|
||||
acr->alloc_blob_space = nvgpu_acr_alloc_blob_space_sys;
|
||||
acr->bootstrap_hs_acr = nvgpu_acr_bootstrap_hs_ucode;
|
||||
acr->patch_wpr_info_to_ucode = gv11b_acr_patch_wpr_info_to_ucode;
|
||||
acr->acr_fill_bl_dmem_desc =
|
||||
gv100_acr_fill_bl_dmem_desc;
|
||||
|
||||
acr->remove_support = gm20b_remove_acr_support;
|
||||
}
|
||||
29
drivers/gpu/nvgpu/common/acr/acr_gv11b.h
Normal file
29
drivers/gpu/nvgpu/common/acr/acr_gv11b.h
Normal file
@@ -0,0 +1,29 @@
|
||||
/*
|
||||
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef NVGPU_ACR_GV11B_H
|
||||
#define NVGPU_ACR_GV11B_H
|
||||
|
||||
void nvgpu_gv11b_acr_sw_init(struct gk20a *g, struct nvgpu_acr *acr);
|
||||
|
||||
#endif /* NVGPU_ACR_GV11B_H */
|
||||
|
||||
175
drivers/gpu/nvgpu/common/acr/acr_tu104.c
Normal file
175
drivers/gpu/nvgpu/common/acr/acr_tu104.c
Normal file
@@ -0,0 +1,175 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <nvgpu/acr/nvgpu_acr.h>
|
||||
#include <nvgpu/firmware.h>
|
||||
#include <nvgpu/enabled.h>
|
||||
#include <nvgpu/debug.h>
|
||||
#include <nvgpu/pmu.h>
|
||||
#include <nvgpu/gk20a.h>
|
||||
#include <nvgpu/sec2if/sec2_if_cmn.h>
|
||||
|
||||
#include "acr_gm20b.h"
|
||||
#include "acr_gv100.h"
|
||||
#include "acr_tu104.h"
|
||||
|
||||
#include "gv100/gsp_gv100.h"
|
||||
#include "tu104/sec2_tu104.h"
|
||||
|
||||
|
||||
static int tu104_bootstrap_hs_acr(struct gk20a *g, struct nvgpu_acr *acr,
|
||||
struct hs_acr *acr_type)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
err = nvgpu_acr_bootstrap_hs_ucode(g, &g->acr, &g->acr.acr_ahesasc);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "ACR AHESASC bootstrap failed");
|
||||
goto exit;
|
||||
}
|
||||
err = nvgpu_acr_bootstrap_hs_ucode(g, &g->acr, &g->acr.acr_asb);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "ACR ASB bootstrap failed");
|
||||
goto exit;
|
||||
}
|
||||
|
||||
exit:
|
||||
return err;
|
||||
}
|
||||
|
||||
/* LSF init */
|
||||
static u32 tu104_acr_lsf_sec2(struct gk20a *g,
|
||||
struct acr_lsf_config *lsf)
|
||||
{
|
||||
/* SEC2 LS falcon info */
|
||||
lsf->falcon_id = FALCON_ID_SEC2;
|
||||
lsf->falcon_dma_idx = NV_SEC2_DMAIDX_UCODE;
|
||||
lsf->is_lazy_bootstrap = false;
|
||||
lsf->is_priv_load = false;
|
||||
lsf->get_lsf_ucode_details = nvgpu_acr_lsf_sec2_ucode_details_v1;
|
||||
lsf->get_cmd_line_args_offset = NULL;
|
||||
|
||||
return BIT32(lsf->falcon_id);
|
||||
}
|
||||
|
||||
/* ACR-AHESASC(ACR hub encryption setter and signature checker) init*/
|
||||
static void nvgpu_tu104_acr_ahesasc_sw_init(struct gk20a *g,
|
||||
struct hs_acr *acr_ahesasc)
|
||||
{
|
||||
struct hs_flcn_bl *hs_bl = &acr_ahesasc->acr_hs_bl;
|
||||
|
||||
hs_bl->bl_fw_name = HSBIN_ACR_BL_UCODE_IMAGE;
|
||||
|
||||
acr_ahesasc->acr_type = ACR_AHESASC;
|
||||
|
||||
if (!g->ops.pmu.is_debug_mode_enabled(g)) {
|
||||
acr_ahesasc->acr_fw_name = HSBIN_ACR_AHESASC_PROD_UCODE;
|
||||
} else {
|
||||
acr_ahesasc->acr_fw_name = HSBIN_ACR_AHESASC_DBG_UCODE;
|
||||
}
|
||||
|
||||
acr_ahesasc->ptr_bl_dmem_desc = &acr_ahesasc->bl_dmem_desc_v1;
|
||||
acr_ahesasc->bl_dmem_desc_size = (u32)sizeof(struct flcn_bl_dmem_desc_v1);
|
||||
|
||||
acr_ahesasc->acr_flcn = g->sec2.flcn;
|
||||
acr_ahesasc->acr_flcn_setup_hw_and_bl_bootstrap =
|
||||
tu104_sec2_setup_hw_and_bl_bootstrap;
|
||||
}
|
||||
|
||||
/* ACR-ASB(ACR SEC2 booter) init*/
|
||||
static void nvgpu_tu104_acr_asb_sw_init(struct gk20a *g,
|
||||
struct hs_acr *acr_asb)
|
||||
{
|
||||
struct hs_flcn_bl *hs_bl = &acr_asb->acr_hs_bl;
|
||||
|
||||
hs_bl->bl_fw_name = HSBIN_ACR_BL_UCODE_IMAGE;
|
||||
|
||||
acr_asb->acr_type = ACR_ASB;
|
||||
|
||||
if (!g->ops.pmu.is_debug_mode_enabled(g)) {
|
||||
acr_asb->acr_fw_name = HSBIN_ACR_ASB_PROD_UCODE;
|
||||
} else {
|
||||
acr_asb->acr_fw_name = HSBIN_ACR_ASB_DBG_UCODE;
|
||||
}
|
||||
|
||||
acr_asb->ptr_bl_dmem_desc = &acr_asb->bl_dmem_desc_v1;
|
||||
acr_asb->bl_dmem_desc_size = (u32)sizeof(struct flcn_bl_dmem_desc_v1);
|
||||
|
||||
acr_asb->acr_flcn = g->gsp_flcn;
|
||||
acr_asb->acr_flcn_setup_hw_and_bl_bootstrap =
|
||||
gv100_gsp_setup_hw_and_bl_bootstrap;
|
||||
}
|
||||
|
||||
static void tu104_free_hs_acr(struct gk20a *g,
|
||||
struct hs_acr *acr_type)
|
||||
{
|
||||
struct mm_gk20a *mm = &g->mm;
|
||||
struct vm_gk20a *vm = mm->pmu.vm;
|
||||
|
||||
if (acr_type->acr_fw != NULL) {
|
||||
nvgpu_release_firmware(g, acr_type->acr_fw);
|
||||
}
|
||||
|
||||
if (acr_type->acr_hs_bl.hs_bl_fw != NULL) {
|
||||
nvgpu_release_firmware(g, acr_type->acr_hs_bl.hs_bl_fw);
|
||||
}
|
||||
|
||||
if (nvgpu_mem_is_valid(&acr_type->acr_ucode)) {
|
||||
nvgpu_dma_unmap_free(vm, &acr_type->acr_ucode);
|
||||
}
|
||||
if (nvgpu_mem_is_valid(&acr_type->acr_hs_bl.hs_bl_ucode)) {
|
||||
nvgpu_dma_unmap_free(vm, &acr_type->acr_hs_bl.hs_bl_ucode);
|
||||
}
|
||||
}
|
||||
|
||||
static void tu104_remove_acr_support(struct nvgpu_acr *acr)
|
||||
{
|
||||
struct gk20a *g = acr->g;
|
||||
|
||||
tu104_free_hs_acr(g, &acr->acr_ahesasc);
|
||||
|
||||
tu104_free_hs_acr(g, &acr->acr_asb);
|
||||
}
|
||||
|
||||
void nvgpu_tu104_acr_sw_init(struct gk20a *g, struct nvgpu_acr *acr)
|
||||
{
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
/* Inherit settings from older chip */
|
||||
nvgpu_gv100_acr_sw_init(g, acr);
|
||||
|
||||
acr->lsf_enable_mask |= tu104_acr_lsf_sec2(g,
|
||||
&acr->lsf[FALCON_ID_SEC2]);
|
||||
|
||||
acr->prepare_ucode_blob = nvgpu_acr_prepare_ucode_blob_v1;
|
||||
acr->bootstrap_owner = FALCON_ID_GSPLITE;
|
||||
acr->bootstrap_hs_acr = tu104_bootstrap_hs_acr;
|
||||
acr->remove_support = tu104_remove_acr_support;
|
||||
|
||||
/* Init ACR-AHESASC */
|
||||
nvgpu_tu104_acr_ahesasc_sw_init(g, &acr->acr_ahesasc);
|
||||
|
||||
/* Init ACR-ASB*/
|
||||
nvgpu_tu104_acr_asb_sw_init(g, &acr->acr_asb);
|
||||
}
|
||||
31
drivers/gpu/nvgpu/common/acr/acr_tu104.h
Normal file
31
drivers/gpu/nvgpu/common/acr/acr_tu104.h
Normal file
@@ -0,0 +1,31 @@
|
||||
/*
|
||||
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef NVGPU_ACR_TU104_H
|
||||
#define NVGPU_ACR_TU104_H
|
||||
|
||||
#define TU104_FECS_UCODE_SIG "tu104/fecs_sig.bin"
|
||||
#define TU104_GPCCS_UCODE_SIG "tu104/gpccs_sig.bin"
|
||||
|
||||
void nvgpu_tu104_acr_sw_init(struct gk20a *g, struct nvgpu_acr *acr);
|
||||
|
||||
#endif /*NVGPU_ACR_TU104_H*/
|
||||
Reference in New Issue
Block a user