gpu: nvgpu: Remove pmu_bl from GPU secure boot flow

ACR HS ucode is currently loaded by pmu_bl.bin (falcon bootloader),
but ACR ucode can be loaded without bootloader support by directly 
copying non-secure/secure code to respective IMEM offset along with 
required data to DMEM, with this bootloader dependency is removed.

This patch uses nvgpu_acr_self_hs_load_bootstrap to directly load
acr ucode to imem using priv writes. This also removes the bootloader
related code

JIRA NVGPU-3811

Change-Id: Ie2632eb26e421de3765a99c5426471eb37bf1bc9
Signed-off-by: smadhavan <smadhavan@nvidia.com>
Signed-off-by: Mahantesh Kumbar <mkumbar@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2169976
Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
smadhavan
2019-08-07 14:34:04 +05:30
committed by Alex Waterman
parent 50d8ab033d
commit 238be35d5a
8 changed files with 28 additions and 366 deletions

View File

@@ -31,12 +31,9 @@
#include <nvgpu/acr.h>
#include <nvgpu/bug.h>
#include "acr_falcon_bl.h"
#include "acr_bootstrap.h"
#include "acr_priv.h"
struct vm_gk20a* acr_get_engine_vm(struct gk20a *g, u32 falcon_id);
static int acr_wait_for_completion(struct gk20a *g,
struct nvgpu_falcon *flcn, unsigned int timeout)
{
@@ -110,150 +107,6 @@ exit:
return completion;
}
struct vm_gk20a* acr_get_engine_vm(struct gk20a *g, u32 falcon_id)
{
struct vm_gk20a *vm = NULL;
switch (falcon_id) {
case FALCON_ID_PMU:
vm = g->mm.pmu.vm;
break;
#ifdef CONFIG_NVGPU_DGPU
case FALCON_ID_SEC2:
if (nvgpu_is_enabled(g, NVGPU_SUPPORT_SEC2_VM)) {
vm = g->mm.sec2.vm;
}
break;
case FALCON_ID_GSPLITE:
if (nvgpu_is_enabled(g, NVGPU_SUPPORT_GSP_VM)) {
vm = g->mm.gsp.vm;
}
break;
#endif
default:
vm = NULL;
break;
}
return vm;
}
static int acr_hs_bl_exec(struct gk20a *g, struct hs_acr *acr_desc,
bool b_wait_for_halt)
{
struct nvgpu_firmware *hs_bl_fw = acr_desc->acr_hs_bl.hs_bl_fw;
struct hsflcn_bl_desc *hs_bl_desc;
struct nvgpu_falcon_bl_info bl_info;
struct hs_flcn_bl *hs_bl = &acr_desc->acr_hs_bl;
struct vm_gk20a *vm = NULL;
u32 flcn_id = nvgpu_falcon_get_id(acr_desc->acr_flcn);
u32 *hs_bl_code = NULL;
int err = 0;
u32 bl_sz;
nvgpu_acr_dbg(g, "Executing ACR HS Bootloader %s on Falcon-ID - %d",
hs_bl->bl_fw_name, flcn_id);
vm = acr_get_engine_vm(g, flcn_id);
if (vm == NULL) {
nvgpu_err(g, "vm space not allocated for engine falcon - %d", flcn_id);
return -ENOMEM;
}
if (hs_bl_fw == NULL) {
hs_bl_fw = nvgpu_request_firmware(g, hs_bl->bl_fw_name, 0);
if (hs_bl_fw == NULL) {
nvgpu_err(g, "ACR HS BL ucode load fail");
return -ENOENT;
}
hs_bl->hs_bl_fw = hs_bl_fw;
hs_bl->hs_bl_bin_hdr = (struct bin_hdr *)(void *)hs_bl_fw->data;
hs_bl->hs_bl_desc = (struct hsflcn_bl_desc *)(void *)
(hs_bl_fw->data + hs_bl->hs_bl_bin_hdr->header_offset);
hs_bl_desc = hs_bl->hs_bl_desc;
hs_bl_code = (u32 *)(void *)(hs_bl_fw->data +
hs_bl->hs_bl_bin_hdr->data_offset);
bl_sz = ALIGN(hs_bl_desc->bl_img_hdr.bl_code_size, 256U);
hs_bl->hs_bl_ucode.size = bl_sz;
err = nvgpu_dma_alloc_sys(g, bl_sz, &hs_bl->hs_bl_ucode);
if (err != 0) {
nvgpu_err(g, "ACR HS BL failed to allocate memory");
goto err_done;
}
hs_bl->hs_bl_ucode.gpu_va = nvgpu_gmmu_map(vm,
&hs_bl->hs_bl_ucode,
bl_sz,
0U, /* flags */
gk20a_mem_flag_read_only, false,
hs_bl->hs_bl_ucode.aperture);
if (hs_bl->hs_bl_ucode.gpu_va == 0U) {
nvgpu_err(g, "ACR HS BL failed to map ucode memory!!");
goto err_free_ucode;
}
nvgpu_mem_wr_n(g, &hs_bl->hs_bl_ucode, 0U, hs_bl_code, bl_sz);
nvgpu_acr_dbg(g, "Copied BL ucode to bl_cpuva");
}
/* Fill HS BL info */
bl_info.bl_src = hs_bl->hs_bl_ucode.cpu_va;
bl_info.bl_desc = acr_desc->ptr_bl_dmem_desc;
bl_info.bl_desc_size = acr_desc->bl_dmem_desc_size;
nvgpu_assert(hs_bl->hs_bl_ucode.size <= U32_MAX);
bl_info.bl_size = (u32)hs_bl->hs_bl_ucode.size;
bl_info.bl_start_tag = hs_bl->hs_bl_desc->bl_start_tag;
/* Engine falcon reset */
err = nvgpu_falcon_reset(acr_desc->acr_flcn);
if (err != 0) {
goto err_unmap_bl;
}
/* setup falcon apertures, boot-config */
err = nvgpu_falcon_setup_bootstrap_config(acr_desc->acr_flcn);
if (err != 0) {
goto err_unmap_bl;
}
nvgpu_falcon_mailbox_write(acr_desc->acr_flcn, FALCON_MAILBOX_0,
0xDEADA5A5U);
/* bootstrap falcon */
err = nvgpu_falcon_bl_bootstrap(acr_desc->acr_flcn, &bl_info);
if (err != 0) {
goto err_unmap_bl;
}
if (b_wait_for_halt) {
/* wait for ACR halt*/
err = acr_wait_for_completion(g, acr_desc->acr_flcn,
ACR_COMPLETION_TIMEOUT_MS);
if (err != 0) {
goto err_unmap_bl;
}
}
return 0;
err_unmap_bl:
nvgpu_gmmu_unmap(vm, &hs_bl->hs_bl_ucode, hs_bl->hs_bl_ucode.gpu_va);
err_free_ucode:
nvgpu_dma_free(g, &hs_bl->hs_bl_ucode);
err_done:
nvgpu_release_firmware(g, hs_bl_fw);
acr_desc->acr_hs_bl.hs_bl_fw = NULL;
return err;
}
/*
* Patch signatures into ucode image
*/
@@ -292,26 +145,11 @@ static int acr_ucode_patch_sig(struct gk20a *g,
int nvgpu_acr_bootstrap_hs_ucode(struct gk20a *g, struct nvgpu_acr *acr,
struct hs_acr *acr_desc)
{
struct vm_gk20a *vm = NULL;
struct nvgpu_firmware *acr_fw = acr_desc->acr_fw;
struct bin_hdr *acr_fw_bin_hdr = NULL;
struct acr_fw_header *acr_fw_hdr = NULL;
struct nvgpu_mem *acr_ucode_mem = &acr_desc->acr_ucode;
u32 flcn_id = nvgpu_falcon_get_id(acr_desc->acr_flcn);
u32 img_size_in_bytes = 0;
u32 *acr_ucode_data;
u32 *acr_ucode_header;
int status = 0;
nvgpu_acr_dbg(g, "ACR TYPE %x ", acr_desc->acr_type);
vm = acr_get_engine_vm(g, flcn_id);
if (vm == NULL) {
nvgpu_err(g, "vm space not allocated for engine falcon - %d",
flcn_id);
return -ENOMEM;
}
if (acr_fw != NULL) {
acr->patch_wpr_info_to_ucode(g, acr, acr_desc, true);
} else {
@@ -325,77 +163,27 @@ int nvgpu_acr_bootstrap_hs_ucode(struct gk20a *g, struct nvgpu_acr *acr,
acr_desc->acr_fw = acr_fw;
acr_fw_bin_hdr = (struct bin_hdr *)(void *)acr_fw->data;
acr_fw_hdr = (struct acr_fw_header *)(void *)
(acr_fw->data + acr_fw_bin_hdr->header_offset);
acr_ucode_header = (u32 *)(void *)(acr_fw->data +
acr_fw_hdr->hdr_offset);
acr_ucode_data = (u32 *)(void *)(acr_fw->data +
acr_fw_bin_hdr->data_offset);
img_size_in_bytes = ALIGN((acr_fw_bin_hdr->data_size), 256U);
/* Lets patch the signatures first.. */
if (acr_ucode_patch_sig(g, acr_ucode_data,
(u32 *)(void *)(acr_fw->data +
acr_fw_hdr->sig_prod_offset),
(u32 *)(void *)(acr_fw->data +
acr_fw_hdr->sig_dbg_offset),
(u32 *)(void *)(acr_fw->data +
acr_fw_hdr->patch_loc),
(u32 *)(void *)(acr_fw->data +
acr_fw_hdr->patch_sig),
acr_fw_hdr->sig_dbg_size) < 0) {
nvgpu_err(g, "patch signatures fail");
status = -1;
goto err_release_acr_fw;
}
status = nvgpu_dma_alloc_map_sys(vm, img_size_in_bytes,
acr_ucode_mem);
if (status != 0) {
status = -ENOMEM;
goto err_release_acr_fw;
}
acr->patch_wpr_info_to_ucode(g, acr, acr_desc, false);
nvgpu_mem_wr_n(g, acr_ucode_mem, 0U, acr_ucode_data,
img_size_in_bytes);
/*
* In order to execute this binary, we will be using
* a bootloader which will load this image into
* FALCON IMEM/DMEM.
* Fill up the bootloader descriptor to use..
* TODO: Use standard descriptor which the generic bootloader is
* checked in.
*/
acr->acr_fill_bl_dmem_desc(g, acr, acr_desc, acr_ucode_header);
}
status = acr_hs_bl_exec(g, acr_desc, true);
/* Load acr ucode and bootstrap */
status = nvgpu_acr_self_hs_load_bootstrap(g, acr_desc->acr_flcn, acr_fw,
ACR_COMPLETION_TIMEOUT_MS);
if (status != 0) {
goto err_free_ucode_map;
goto err_free_ucode;
}
return 0;
err_free_ucode_map:
nvgpu_dma_unmap_free(vm, acr_ucode_mem);
err_release_acr_fw:
err_free_ucode:
nvgpu_release_firmware(g, acr_fw);
acr_desc->acr_fw = NULL;
return status;
}
#ifdef CONFIG_NVGPU_DGPU
int nvgpu_acr_self_hs_load_bootstrap(struct gk20a *g, struct nvgpu_falcon *flcn,
struct nvgpu_firmware *hs_fw, u32 timeout)
struct nvgpu_firmware *hs_fw, u32 timeout)
{
struct bin_hdr *bin_hdr = NULL;
struct bin_hdr *hs_bin_hdr = NULL;
struct acr_fw_header *fw_hdr = NULL;
u32 *ucode_header = NULL;
u32 *ucode = NULL;
@@ -409,25 +197,29 @@ int nvgpu_acr_self_hs_load_bootstrap(struct gk20a *g, struct nvgpu_falcon *flcn,
return err;
}
bin_hdr = (struct bin_hdr *)hs_fw->data;
fw_hdr = (struct acr_fw_header *)(hs_fw->data + bin_hdr->header_offset);
ucode_header = (u32 *)(hs_fw->data + fw_hdr->hdr_offset);
ucode = (u32 *)(hs_fw->data + bin_hdr->data_offset);
hs_bin_hdr = (struct bin_hdr *)(void *)hs_fw->data;
fw_hdr = (struct acr_fw_header *)(void *)(hs_fw->data +
hs_bin_hdr->header_offset);
ucode_header = (u32 *)(void *)(hs_fw->data + fw_hdr->hdr_offset);
ucode = (u32 *)(void *)(hs_fw->data + hs_bin_hdr->data_offset);
/* Patch Ucode signatures */
if (acr_ucode_patch_sig(g, ucode,
(u32 *)(hs_fw->data + fw_hdr->sig_prod_offset),
(u32 *)(hs_fw->data + fw_hdr->sig_dbg_offset),
(u32 *)(hs_fw->data + fw_hdr->patch_loc),
(u32 *)(hs_fw->data + fw_hdr->patch_sig),
(u32 *)(void *)(hs_fw->data + fw_hdr->sig_prod_offset),
(u32 *)(void *)(hs_fw->data + fw_hdr->sig_dbg_offset),
(u32 *)(void *)(hs_fw->data + fw_hdr->patch_loc),
(u32 *)(void *)(hs_fw->data + fw_hdr->patch_sig),
fw_hdr->sig_dbg_size) < 0) {
nvgpu_err(g, "HS ucode patch signatures fail");
err = -EPERM;
goto exit;
}
/* Clear interrupts */
nvgpu_falcon_set_irq(flcn, false, 0x0U, 0x0U);
/* setup falcon apertures, boot-config */
err = nvgpu_falcon_setup_bootstrap_config(flcn);
if (err != 0) {
goto exit;
}
/* Copy Non Secure IMEM code */
err = nvgpu_falcon_copy_to_imem(flcn, 0U,
@@ -483,5 +275,4 @@ int nvgpu_acr_self_hs_load_bootstrap(struct gk20a *g, struct nvgpu_falcon *flcn,
exit:
return err;
}
#endif

View File

@@ -141,14 +141,6 @@ struct bin_hdr {
u32 data_size;
};
struct hs_flcn_bl {
const char *bl_fw_name;
struct nvgpu_firmware *hs_bl_fw;
struct hsflcn_bl_desc *hs_bl_desc;
struct bin_hdr *hs_bl_bin_hdr;
struct nvgpu_mem hs_bl_ucode;
};
struct acr_fw_header {
u32 sig_dbg_offset;
u32 sig_dbg_size;
@@ -169,21 +161,9 @@ struct hs_acr {
#define ACR_ASB_FUSA 4U
u32 acr_type;
/* HS bootloader to validate & load ACR ucode */
struct hs_flcn_bl acr_hs_bl;
/* ACR ucode */
const char *acr_fw_name;
struct nvgpu_firmware *acr_fw;
struct nvgpu_mem acr_ucode;
union {
struct flcn_bl_dmem_desc bl_dmem_desc;
struct flcn_bl_dmem_desc_v1 bl_dmem_desc_v1;
};
void *ptr_bl_dmem_desc;
u32 bl_dmem_desc_size;
union{
struct flcn_acr_desc *acr_dmem_desc;

View File

@@ -142,9 +142,6 @@ struct nvgpu_acr {
struct nvgpu_mem *mem);
void (*patch_wpr_info_to_ucode)(struct gk20a *g, struct nvgpu_acr *acr,
struct hs_acr *acr_desc, bool is_recovery);
void (*acr_fill_bl_dmem_desc)(struct gk20a *g,
struct nvgpu_acr *acr, struct hs_acr *acr_desc,
u32 *acr_ucode_header);
int (*bootstrap_hs_acr)(struct gk20a *g, struct nvgpu_acr *acr,
struct hs_acr *acr_desc);

View File

@@ -59,14 +59,12 @@ static void gm20b_acr_patch_wpr_info_to_ucode(struct gk20a *g,
acr_ucode_header = (u32 *)(acr_fw->data +
acr_fw_hdr->hdr_offset);
/* During recovery need to update blob size as 0x0*/
acr_desc->acr_dmem_desc = (struct flcn_acr_desc *)((u8 *)(
acr_desc->acr_ucode.cpu_va) + acr_ucode_header[2U]);
/* Patch WPR info to ucode */
acr_dmem_desc = (struct flcn_acr_desc *)
&(((u8 *)acr_ucode_data)[acr_ucode_header[2U]]);
acr_desc->acr_dmem_desc = acr_dmem_desc;
acr_dmem_desc->nonwpr_ucode_blob_start =
nvgpu_mem_get_addr(g, &g->acr->ucode_blob);
nvgpu_assert(g->acr->ucode_blob.size <= U32_MAX);
@@ -77,36 +75,6 @@ static void gm20b_acr_patch_wpr_info_to_ucode(struct gk20a *g,
}
}
static void gm20b_acr_fill_bl_dmem_desc(struct gk20a *g,
struct nvgpu_acr *acr, struct hs_acr *acr_desc,
u32 *acr_ucode_header)
{
struct flcn_bl_dmem_desc *bl_dmem_desc = &acr_desc->bl_dmem_desc;
nvgpu_log_fn(g, " ");
(void) memset(bl_dmem_desc, 0, sizeof(struct flcn_bl_dmem_desc));
bl_dmem_desc->signature[0] = 0U;
bl_dmem_desc->signature[1] = 0U;
bl_dmem_desc->signature[2] = 0U;
bl_dmem_desc->signature[3] = 0U;
bl_dmem_desc->ctx_dma = GK20A_PMU_DMAIDX_VIRT;
bl_dmem_desc->code_dma_base =
(unsigned int)(((u64)acr_desc->acr_ucode.gpu_va >> 8U));
bl_dmem_desc->code_dma_base1 = 0x0U;
bl_dmem_desc->non_sec_code_off = acr_ucode_header[0U];
bl_dmem_desc->non_sec_code_size = acr_ucode_header[1U];
bl_dmem_desc->sec_code_off = acr_ucode_header[5U];
bl_dmem_desc->sec_code_size = acr_ucode_header[6U];
bl_dmem_desc->code_entry_point = 0U; /* Start at 0th offset */
bl_dmem_desc->data_dma_base =
bl_dmem_desc->code_dma_base +
((acr_ucode_header[2U]) >> 8U);
bl_dmem_desc->data_dma_base1 = 0x0U;
bl_dmem_desc->data_size = acr_ucode_header[3U];
}
/* LSF static config functions */
static u32 gm20b_acr_lsf_pmu(struct gk20a *g,
struct acr_lsf_config *lsf)
@@ -150,21 +118,12 @@ static u32 gm20b_acr_lsf_conifg(struct gk20a *g,
static void gm20b_acr_default_sw_init(struct gk20a *g, struct hs_acr *hs_acr)
{
struct hs_flcn_bl *hs_bl = &hs_acr->acr_hs_bl;
nvgpu_log_fn(g, " ");
/* ACR HS bootloader ucode name */
hs_bl->bl_fw_name = HSBIN_ACR_BL_UCODE_IMAGE;
/* ACR HS ucode type & f/w name*/
hs_acr->acr_type = ACR_DEFAULT;
hs_acr->acr_fw_name = HSBIN_ACR_UCODE_IMAGE;
/* bootlader interface used by ACR HS bootloader*/
hs_acr->ptr_bl_dmem_desc = &hs_acr->bl_dmem_desc;
hs_acr->bl_dmem_desc_size = (u32)sizeof(struct flcn_bl_dmem_desc);
/* set on which falcon ACR need to execute*/
hs_acr->acr_flcn = g->pmu->flcn;
hs_acr->acr_engine_bus_err_status =
@@ -189,6 +148,4 @@ void nvgpu_gm20b_acr_sw_init(struct gk20a *g, struct nvgpu_acr *acr)
acr->bootstrap_hs_acr = nvgpu_acr_bootstrap_hs_ucode;
acr->patch_wpr_info_to_ucode =
gm20b_acr_patch_wpr_info_to_ucode;
acr->acr_fill_bl_dmem_desc =
gm20b_acr_fill_bl_dmem_desc;
}

View File

@@ -59,15 +59,12 @@ static void gv11b_acr_patch_wpr_info_to_ucode(struct gk20a *g,
acr_ucode_header = (u32 *)(void *)(acr_fw->data +
acr_fw_hdr->hdr_offset);
/* During recovery need to update blob size as 0x0*/
acr_desc->acr_dmem_desc_v1 = (struct flcn_acr_desc_v1 *)(void *)
((u8 *)(acr_desc->acr_ucode.cpu_va) +
acr_ucode_header[2U]);
/* Patch WPR info to ucode */
acr_dmem_desc = (struct flcn_acr_desc_v1 *)(void *)
&(((u8 *)acr_ucode_data)[acr_ucode_header[2U]]);
acr_desc->acr_dmem_desc_v1 = acr_dmem_desc;
acr_dmem_desc->nonwpr_ucode_blob_start =
nvgpu_mem_get_addr(g, &g->acr->ucode_blob);
nvgpu_assert(g->acr->ucode_blob.size <= U32_MAX);
@@ -78,40 +75,6 @@ static void gv11b_acr_patch_wpr_info_to_ucode(struct gk20a *g,
}
}
void gv11b_acr_fill_bl_dmem_desc(struct gk20a *g,
struct nvgpu_acr *acr, struct hs_acr *acr_desc,
u32 *acr_ucode_header)
{
struct nvgpu_mem *acr_ucode_mem = &acr_desc->acr_ucode;
struct flcn_bl_dmem_desc_v1 *bl_dmem_desc =
&acr_desc->bl_dmem_desc_v1;
nvgpu_log_fn(g, " ");
(void) memset(bl_dmem_desc, 0, sizeof(struct flcn_bl_dmem_desc_v1));
bl_dmem_desc->signature[0] = 0U;
bl_dmem_desc->signature[1] = 0U;
bl_dmem_desc->signature[2] = 0U;
bl_dmem_desc->signature[3] = 0U;
bl_dmem_desc->ctx_dma = GK20A_PMU_DMAIDX_VIRT;
flcn64_set_dma(&bl_dmem_desc->code_dma_base,
acr_ucode_mem->gpu_va);
bl_dmem_desc->non_sec_code_off = acr_ucode_header[0U];
bl_dmem_desc->non_sec_code_size = acr_ucode_header[1U];
bl_dmem_desc->sec_code_off = acr_ucode_header[5U];
bl_dmem_desc->sec_code_size = acr_ucode_header[6U];
bl_dmem_desc->code_entry_point = 0U;
flcn64_set_dma(&bl_dmem_desc->data_dma_base,
nvgpu_safe_add_u64(acr_ucode_mem->gpu_va,
acr_ucode_header[2U]));
bl_dmem_desc->data_size = acr_ucode_header[3U];
}
/* LSF static config functions */
#ifdef CONFIG_NVGPU_LS_PMU
static u32 gv11b_acr_lsf_pmu(struct gk20a *g,
@@ -186,18 +149,11 @@ static u32 gv11b_acr_lsf_conifg(struct gk20a *g,
static void gv11b_acr_default_sw_init(struct gk20a *g, struct hs_acr *acr_desc)
{
struct hs_flcn_bl *hs_bl = &acr_desc->acr_hs_bl;
nvgpu_log_fn(g, " ");
hs_bl->bl_fw_name = HSBIN_ACR_BL_UCODE_IMAGE;
acr_desc->acr_type = ACR_DEFAULT;
acr_desc->acr_fw_name = HSBIN_ACR_UCODE_IMAGE;
acr_desc->ptr_bl_dmem_desc = &acr_desc->bl_dmem_desc_v1;
acr_desc->bl_dmem_desc_size = (u32)sizeof(struct flcn_bl_dmem_desc_v1);
acr_desc->acr_flcn = g->pmu->flcn;
acr_desc->report_acr_engine_bus_err_status =
nvgpu_pmu_report_bar0_pri_err_status;
@@ -223,5 +179,4 @@ void nvgpu_gv11b_acr_sw_init(struct gk20a *g, struct nvgpu_acr *acr)
acr->alloc_blob_space = nvgpu_acr_alloc_blob_space_sys;
acr->bootstrap_hs_acr = nvgpu_acr_bootstrap_hs_ucode;
acr->patch_wpr_info_to_ucode = gv11b_acr_patch_wpr_info_to_ucode;
acr->acr_fill_bl_dmem_desc = gv11b_acr_fill_bl_dmem_desc;
}

View File

@@ -27,8 +27,6 @@ struct gk20a;
struct nvgpu_acr;
struct hs_acr;
void gv11b_acr_fill_bl_dmem_desc(struct gk20a *g, struct nvgpu_acr *acr,
struct hs_acr *acr_desc, u32 *acr_ucode_header);
void nvgpu_gv11b_acr_sw_init(struct gk20a *g, struct nvgpu_acr *acr);
#endif /* ACR_SW_GV11B_H */

View File

@@ -211,20 +211,12 @@ static void tu104_acr_ahesasc_fusa_ucode_select(struct gk20a *g,
static void tu104_acr_ahesasc_sw_init(struct gk20a *g,
struct hs_acr *acr_ahesasc)
{
struct hs_flcn_bl *hs_bl = &acr_ahesasc->acr_hs_bl;
hs_bl->bl_fw_name = HSBIN_ACR_BL_UCODE_IMAGE;
if (tu104_acr_is_fusa_enabled(g)) {
tu104_acr_ahesasc_fusa_ucode_select(g, acr_ahesasc);
} else {
tu104_acr_ahesasc_non_fusa_ucode_select(g, acr_ahesasc);
}
acr_ahesasc->ptr_bl_dmem_desc = &acr_ahesasc->bl_dmem_desc_v1;
acr_ahesasc->bl_dmem_desc_size =
(u32)sizeof(struct flcn_bl_dmem_desc_v1);
acr_ahesasc->acr_flcn = &g->sec2.flcn;
}
@@ -256,19 +248,12 @@ static void tu104_acr_asb_fusa_ucode_select(struct gk20a *g,
static void tu104_acr_asb_sw_init(struct gk20a *g,
struct hs_acr *acr_asb)
{
struct hs_flcn_bl *hs_bl = &acr_asb->acr_hs_bl;
hs_bl->bl_fw_name = HSBIN_ACR_BL_UCODE_IMAGE;
if (tu104_acr_is_fusa_enabled(g)) {
tu104_acr_asb_fusa_ucode_select(g, acr_asb);
} else {
tu104_acr_asb_non_fusa_ucode_select(g, acr_asb);
}
acr_asb->ptr_bl_dmem_desc = &acr_asb->bl_dmem_desc_v1;
acr_asb->bl_dmem_desc_size = (u32)sizeof(struct flcn_bl_dmem_desc_v1);
acr_asb->acr_flcn = &g->gsp_flcn;
}
@@ -284,7 +269,6 @@ void nvgpu_tu104_acr_sw_init(struct gk20a *g, struct nvgpu_acr *acr)
acr->bootstrap_owner = FALCON_ID_GSPLITE;
acr->bootstrap_hs_acr = tu104_bootstrap_hs_acr;
acr->patch_wpr_info_to_ucode = tu104_acr_patch_wpr_info_to_ucode;
acr->acr_fill_bl_dmem_desc = gv11b_acr_fill_bl_dmem_desc;
/* Init ACR-AHESASC */
tu104_acr_ahesasc_sw_init(g, &acr->acr_ahesasc);

View File

@@ -171,9 +171,9 @@ int nvgpu_acr_init(struct gk20a *g, struct nvgpu_acr **acr);
#ifdef CONFIG_NVGPU_DGPU
int nvgpu_acr_alloc_blob_prerequisite(struct gk20a *g, struct nvgpu_acr *acr,
size_t size);
#endif
int nvgpu_acr_self_hs_load_bootstrap(struct gk20a *g, struct nvgpu_falcon *flcn,
struct nvgpu_firmware *hs_fw, u32 timeout);
#endif
/**
* @brief Construct blob of LS ucode's in non-wpr memory. Load and bootstrap HS
@@ -207,7 +207,7 @@ int nvgpu_acr_construct_execute(struct gk20a *g, struct nvgpu_acr *acr);
int nvgpu_acr_bootstrap_hs_acr(struct gk20a *g, struct nvgpu_acr *acr);
/**
* @brief Chek if ls-Falcon lazy-bootstrap status to load & bootstrap from
* @brief Check if ls-Falcon lazy-bootstrap status to load & bootstrap from
* LS-RTOS or not
*
* @param g [in] The GPU driver struct.