gpu: nvgpu: non-zero blob size for rail-gating.

Ucode blob size 0 is passed currently for rail-gating.
Ucode blob size 0 is not supported by ACR yet.
ACR will copy UCODE blob again
to SYSMEM for GPU Rail-gating cycles.

Bug 3361416

Change-Id: I1fdb3993cda7e5d62507d83f9c0a8645dc5f7fc7
Signed-off-by: deepak goyal <dgoyal@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2588207
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Reviewed-by: svc_kernel_abi <svc_kernel_abi@nvidia.com>
Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
GVS: Gerrit_Virtual_Submit
This commit is contained in:
deepak goyal
2021-09-02 12:39:26 +05:30
committed by mobile promotions
parent b27524916a
commit cc7b048641
2 changed files with 24 additions and 17 deletions

View File

@@ -357,7 +357,7 @@ int nvgpu_acr_bootstrap_hs_ucode_riscv(struct gk20a *g, struct nvgpu_acr *acr)
nvgpu_err(g, "RISCV ucode loading failed"); nvgpu_err(g, "RISCV ucode loading failed");
return -EINVAL; return -EINVAL;
} }
// TODO: Based on Railgating/Cold boot use True/False flag with this call.
err = acr->patch_wpr_info_to_ucode(g, acr, &acr->acr_asc, false); err = acr->patch_wpr_info_to_ucode(g, acr, &acr->acr_asc, false);
if (err != 0) { if (err != 0) {
nvgpu_err(g, "RISCV ucode patch wpr info failed"); nvgpu_err(g, "RISCV ucode patch wpr info failed");

View File

@@ -81,13 +81,14 @@ static int ga10b_acr_patch_wpr_info_to_ucode(struct gk20a *g,
/* /*
* In case of recovery ucode blob size is 0 as it has already * In case of recovery ucode blob size is 0 as it has already
* been authenticated during cold boot. * been authenticated during cold boot.
* TODO: Set blob size as 0x0
* i.e. nonwpr_ucode_blob_size = RECOVERY_UCODE_BLOB_SIZE
* and call with true flag.
*/ */
if (!nvgpu_mem_is_valid(&acr_desc->acr_falcon2_sysmem_desc)) { if (!nvgpu_mem_is_valid(&acr_desc->acr_falcon2_sysmem_desc)) {
nvgpu_err(g, "invalid mem acr_falcon2_sysmem_desc"); nvgpu_err(g, "invalid mem acr_falcon2_sysmem_desc");
return -EINVAL; return -EINVAL;
} }
acr_sysmem_desc->nonwpr_ucode_blob_size =
RECOVERY_UCODE_BLOB_SIZE;
} else } else
#endif #endif
{ {
@@ -95,34 +96,40 @@ static int ga10b_acr_patch_wpr_info_to_ucode(struct gk20a *g,
* Alloc space for sys mem space to which interface struct is * Alloc space for sys mem space to which interface struct is
* copied. * copied.
*/ */
if (nvgpu_mem_is_valid(acr_falcon2_sysmem_desc)) { if (!nvgpu_mem_is_valid(acr_falcon2_sysmem_desc)) {
acr_sysmem_desc->nonwpr_ucode_blob_size = err = nvgpu_dma_alloc_flags_sys(g,
RECOVERY_UCODE_BLOB_SIZE;
goto load;
}
err = nvgpu_dma_alloc_flags_sys(g,
NVGPU_DMA_PHYSICALLY_ADDRESSED, NVGPU_DMA_PHYSICALLY_ADDRESSED,
sizeof(struct flcn2_acr_desc), sizeof(struct flcn2_acr_desc),
acr_falcon2_sysmem_desc); acr_falcon2_sysmem_desc);
if (err != 0) { if (err != 0) {
goto end; nvgpu_err(g, "alloc for sysmem desc failed");
} goto end;
}
} else {
/*
* TODO: Set blob size as 0x0.
* i.e.nonwpr_ucode_blob_size=RECOVERY_UCODE_BLOB_SIZE
* and call with true flag.
*/
goto load;
}
#ifdef CONFIG_NVGPU_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
if(g->support_ls_pmu && if(g->support_ls_pmu &&
nvgpu_is_enabled(g, NVGPU_PMU_NEXT_CORE_ENABLED)) { nvgpu_is_enabled(g, NVGPU_PMU_NEXT_CORE_ENABLED)) {
err = nvgpu_dma_alloc_flags_sys(g, err = nvgpu_dma_alloc_flags_sys(g,
NVGPU_DMA_PHYSICALLY_ADDRESSED, NVGPU_DMA_PHYSICALLY_ADDRESSED,
sizeof(struct falcon_next_core_ucode_desc), sizeof(struct falcon_next_core_ucode_desc),
ls_pmu_desc); ls_pmu_desc);
if (err != 0) { if (err != 0) {
goto end; goto end;
} }
fw_desc = nvgpu_pmu_fw_desc_desc(g, g->pmu); fw_desc = nvgpu_pmu_fw_desc_desc(g, g->pmu);
nvgpu_mem_wr_n(g, ls_pmu_desc, 0U, fw_desc->data, nvgpu_mem_wr_n(g, ls_pmu_desc, 0U,
sizeof(struct falcon_next_core_ucode_desc)); fw_desc->data,
sizeof(struct falcon_next_core_ucode_desc));
acr_sysmem_desc->ls_pmu_desc = acr_sysmem_desc->ls_pmu_desc =
nvgpu_mem_get_addr(g, ls_pmu_desc); nvgpu_mem_get_addr(g, ls_pmu_desc);