gpu: nvgpu: Add ACR HS ucode self load & bootstrap support

ACR HS ucode self-load & bootstrap functionality was part of FB
unit to support FB mem-unlock HS ucode & this needs to access
some ACR structs which will be part of ACR private headers &
adding constraints to implement ACR unit private header support.

JIRA NVGPU-2907

Change-Id: I6c6c7504ffe55426b377e9bcf911d4005813bb31
Signed-off-by: Mahantesh Kumbar <mkumbar@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2069724
GVS: Gerrit_Virtual_Submit
Reviewed-by: Alex Waterman <alexw@nvidia.com>
Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Mahantesh Kumbar
2019-03-11 16:32:10 +05:30
committed by mobile promotions
parent b86354fef9
commit 9c89f6d7cb
3 changed files with 96 additions and 104 deletions

View File

@@ -43,17 +43,17 @@ static int acr_wait_for_completion(struct gk20a *g,
completion = nvgpu_falcon_wait_for_halt(flcn, timeout);
if (completion != 0) {
nvgpu_err(g, "flcn-%d: ACR boot timed out", flcn_id);
nvgpu_err(g, "flcn-%d: HS ucode boot timed out", flcn_id);
nvgpu_falcon_dump_stats(flcn);
goto exit;
}
nvgpu_acr_dbg(g, "flcn-%d: ACR capabilities %x", flcn_id,
nvgpu_acr_dbg(g, "flcn-%d: HS ucode capabilities %x", flcn_id,
nvgpu_falcon_mailbox_read(flcn, FALCON_MAILBOX_1));
data = nvgpu_falcon_mailbox_read(flcn, FALCON_MAILBOX_0);
if (data != 0U) {
nvgpu_err(g, "flcn-%d: ACR boot failed, err %x", flcn_id,
nvgpu_err(g, "flcn-%d: HS ucode boot failed, err %x", flcn_id,
data);
completion = -EAGAIN;
goto exit;
@@ -287,4 +287,91 @@ err_release_acr_fw:
return status;
}
int nvgpu_acr_self_hs_load_bootstrap(struct gk20a *g, struct nvgpu_falcon *flcn,
struct nvgpu_firmware *hs_fw, u32 timeout)
{
struct bin_hdr *bin_hdr = NULL;
struct acr_fw_header *fw_hdr = NULL;
u32 *ucode_header = NULL;
u32 *ucode = NULL;
u32 sec_imem_dest = 0U;
int err = 0;
/* falcon reset */
nvgpu_falcon_reset(flcn);
bin_hdr = (struct bin_hdr *)hs_fw->data;
fw_hdr = (struct acr_fw_header *)(hs_fw->data + bin_hdr->header_offset);
ucode_header = (u32 *)(hs_fw->data + fw_hdr->hdr_offset);
ucode = (u32 *)(hs_fw->data + bin_hdr->data_offset);
/* Patch Ucode signatures */
if (acr_ucode_patch_sig(g, ucode,
(u32 *)(hs_fw->data + fw_hdr->sig_prod_offset),
(u32 *)(hs_fw->data + fw_hdr->sig_dbg_offset),
(u32 *)(hs_fw->data + fw_hdr->patch_loc),
(u32 *)(hs_fw->data + fw_hdr->patch_sig)) < 0) {
nvgpu_err(g, "HS ucode patch signatures fail");
err = -EPERM;
goto exit;
}
/* Clear interrupts */
nvgpu_falcon_set_irq(flcn, false, 0x0U, 0x0U);
/* Copy Non Secure IMEM code */
err = nvgpu_falcon_copy_to_imem(flcn, 0U,
(u8 *)&ucode[ucode_header[OS_CODE_OFFSET] >> 2U],
ucode_header[OS_CODE_SIZE], 0U, false,
GET_IMEM_TAG(ucode_header[OS_CODE_OFFSET]));
if (err != 0) {
nvgpu_err(g, "HS ucode non-secure code to IMEM failed");
goto exit;
}
/* Put secure code after non-secure block */
sec_imem_dest = GET_NEXT_BLOCK(ucode_header[OS_CODE_SIZE]);
err = nvgpu_falcon_copy_to_imem(flcn, sec_imem_dest,
(u8 *)&ucode[ucode_header[APP_0_CODE_OFFSET] >> 2U],
ucode_header[APP_0_CODE_SIZE], 0U, true,
GET_IMEM_TAG(ucode_header[APP_0_CODE_OFFSET]));
if (err != 0) {
nvgpu_err(g, "HS ucode secure code to IMEM failed");
goto exit;
}
/* load DMEM: ensure that signatures are patched */
err = nvgpu_falcon_copy_to_dmem(flcn, 0U, (u8 *)&ucode[
ucode_header[OS_DATA_OFFSET] >> 2U],
ucode_header[OS_DATA_SIZE], 0U);
if (err != 0) {
nvgpu_err(g, "HS ucode data copy to DMEM failed");
goto exit;
}
/*
* Write non-zero value to mailbox register which is updated by
* HS bin to denote its return status.
*/
nvgpu_falcon_mailbox_write(flcn, FALCON_MAILBOX_0, 0xdeadbeefU);
/* set BOOTVEC to start of non-secure code */
err = nvgpu_falcon_bootstrap(flcn, 0U);
if (err != 0) {
nvgpu_err(g, "HS ucode bootstrap failed err-%d on falcon-%d", err,
nvgpu_falcon_get_id(flcn));
goto exit;
}
/* wait for complete & halt */
err = acr_wait_for_completion(g, flcn, timeout);
if (err != 0) {
nvgpu_err(g, "HS ucode completion err %d", err);
}
exit:
return err;
}

View File

@@ -103,41 +103,9 @@ void gv100_fb_disable_hub_intr(struct gk20a *g)
mask);
}
/*
* @brief Patch signatures into ucode image
*/
static int gv100_fb_acr_ucode_patch_sig(struct gk20a *g,
u32 *p_img,
u32 *p_prod_sig,
u32 *p_dbg_sig,
u32 *p_patch_loc,
u32 *p_patch_ind)
{
u32 *p_sig;
if (!g->ops.pmu.is_debug_mode_enabled(g)) {
p_sig = p_prod_sig;
} else {
p_sig = p_dbg_sig;
}
/* Patching logic. We have just one location to patch. */
p_img[(*p_patch_loc>>2)] = p_sig[(*p_patch_ind<<2)];
p_img[(*p_patch_loc>>2)+1U] = p_sig[(*p_patch_ind<<2)+1U];
p_img[(*p_patch_loc>>2)+2U] = p_sig[(*p_patch_ind<<2)+2U];
p_img[(*p_patch_loc>>2)+3U] = p_sig[(*p_patch_ind<<2)+3U];
return 0;
}
int gv100_fb_memory_unlock(struct gk20a *g)
{
struct nvgpu_firmware *mem_unlock_fw = NULL;
struct bin_hdr *hsbin_hdr = NULL;
struct acr_fw_header *fw_hdr = NULL;
u32 *mem_unlock_ucode = NULL;
u32 *mem_unlock_ucode_header = NULL;
u32 sec_imem_dest = 0;
u32 val = 0;
int err = 0;
nvgpu_log_fn(g, " ");
@@ -160,76 +128,10 @@ int gv100_fb_memory_unlock(struct gk20a *g)
/* Enable nvdec */
g->ops.mc.enable(g, g->ops.mc.reset_mask(g, NVGPU_UNIT_NVDEC));
/* nvdec falcon reset */
nvgpu_falcon_reset(g->nvdec_flcn);
hsbin_hdr = (struct bin_hdr *)mem_unlock_fw->data;
fw_hdr = (struct acr_fw_header *)(mem_unlock_fw->data +
hsbin_hdr->header_offset);
mem_unlock_ucode_header = (u32 *)(mem_unlock_fw->data +
fw_hdr->hdr_offset);
mem_unlock_ucode = (u32 *)(mem_unlock_fw->data +
hsbin_hdr->data_offset);
/* Patch Ucode signatures */
if (gv100_fb_acr_ucode_patch_sig(g, mem_unlock_ucode,
(u32 *)(mem_unlock_fw->data + fw_hdr->sig_prod_offset),
(u32 *)(mem_unlock_fw->data + fw_hdr->sig_dbg_offset),
(u32 *)(mem_unlock_fw->data + fw_hdr->patch_loc),
(u32 *)(mem_unlock_fw->data + fw_hdr->patch_sig)) < 0) {
nvgpu_err(g, "mem unlock patch signatures fail");
err = -EPERM;
goto exit;
}
/* Clear interrupts */
nvgpu_falcon_set_irq(g->nvdec_flcn, false, 0x0, 0x0);
/* Copy Non Secure IMEM code */
nvgpu_falcon_copy_to_imem(g->nvdec_flcn, 0,
(u8 *)&mem_unlock_ucode[
mem_unlock_ucode_header[OS_CODE_OFFSET] >> 2],
mem_unlock_ucode_header[OS_CODE_SIZE], 0, false,
GET_IMEM_TAG(mem_unlock_ucode_header[OS_CODE_OFFSET]));
/* Put secure code after non-secure block */
sec_imem_dest = GET_NEXT_BLOCK(mem_unlock_ucode_header[OS_CODE_SIZE]);
nvgpu_falcon_copy_to_imem(g->nvdec_flcn, sec_imem_dest,
(u8 *)&mem_unlock_ucode[
mem_unlock_ucode_header[APP_0_CODE_OFFSET] >> 2],
mem_unlock_ucode_header[APP_0_CODE_SIZE], 0, true,
GET_IMEM_TAG(mem_unlock_ucode_header[APP_0_CODE_OFFSET]));
/* load DMEM: ensure that signatures are patched */
nvgpu_falcon_copy_to_dmem(g->nvdec_flcn, 0, (u8 *)&mem_unlock_ucode[
mem_unlock_ucode_header[OS_DATA_OFFSET] >> 2],
mem_unlock_ucode_header[OS_DATA_SIZE], 0);
/* Write non-zero value to mailbox register which is updated by
* mem_unlock bin to denote its return status.
*/
nvgpu_falcon_mailbox_write(g->nvdec_flcn,
FALCON_MAILBOX_0, 0xdeadbeefU);
/* set BOOTVEC to start of non-secure code */
err = nvgpu_falcon_bootstrap(g->nvdec_flcn, 0);
err = nvgpu_acr_self_hs_load_bootstrap(g, g->nvdec_flcn, mem_unlock_fw,
MEM_UNLOCK_TIMEOUT );
if (err != 0) {
nvgpu_err(g, "falcon bootstrap failed %d", err);
goto exit;
}
/* wait for complete & halt */
nvgpu_falcon_wait_for_halt(g->nvdec_flcn, MEM_UNLOCK_TIMEOUT);
/* check mem unlock status */
val = nvgpu_falcon_mailbox_read(g->nvdec_flcn, FALCON_MAILBOX_0);
if (val != 0U) {
nvgpu_err(g, "memory unlock failed, err %x", val);
nvgpu_falcon_dump_stats(g->nvdec_flcn);
err = -1;
goto exit;
nvgpu_err(g, "mem unlock HS ucode failed, err-0x%x", err);
}
exit:

View File

@@ -216,5 +216,8 @@ int nvgpu_acr_lsf_sec2_ucode_details_v1(struct gk20a *g, void *lsf_ucode_img);
void nvgpu_acr_init(struct gk20a *g);
int nvgpu_acr_construct_execute(struct gk20a *g);
int nvgpu_acr_self_hs_load_bootstrap(struct gk20a *g, struct nvgpu_falcon *flcn,
struct nvgpu_firmware *hs_fw, u32 timeout);
#endif /* NVGPU_ACR_H */