gpu: nvgpu: Move SEC2 RTOS ucode to last in the WPR blob

-This change is required to have reduced access of WPR1 region
for ACRLIB hosting falcon.
-By doing the above we allow only L3 Read access for ACRLIB
hosting falcon, enforcing better security.
-Fixed freeing of ACR resource at exit upon failure.

JIRA NVGPU-5459

Change-Id: I9c32a1fe723570cf3768f7e741a7a2e9d96cc1bf
Signed-off-by: mkumbar <mkumbar@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2365589
Reviewed-by: automaticguardword <automaticguardword@nvidia.com>
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com>
Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com>
Reviewed-by: Sagar Kamble <skamble@nvidia.com>
Reviewed-by: Vaibhav Kachore <vkachore@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
GVS: Gerrit_Virtual_Submit
This commit is contained in:
mkumbar
2020-06-23 13:37:09 +05:30
committed by Alex Waterman
parent 7aa8447ef2
commit 4b206055ae

View File

@@ -489,54 +489,74 @@ static int lsfm_add_ucode_img(struct gk20a *g, struct ls_flcn_mgr *plsfm,
return 0; return 0;
} }
/* Discover all managed falcon ucode images */ static int lsfm_check_and_add_ucode_image(struct gk20a *g,
static int lsfm_discover_ucode_images(struct gk20a *g, struct ls_flcn_mgr *plsfm, u32 lsf_index)
struct ls_flcn_mgr *plsfm)
{ {
struct flcn_ucode_img ucode_img; struct flcn_ucode_img ucode_img;
struct nvgpu_acr *acr = g->acr; struct nvgpu_acr *acr = g->acr;
u32 falcon_id; u32 falcon_id;
int err = 0;
if (!nvgpu_test_bit(lsf_index, (void *)&acr->lsf_enable_mask)) {
return err;
}
if (acr->lsf[lsf_index].get_lsf_ucode_details == NULL) {
nvgpu_err(g, "LS falcon-%d ucode fetch details not initialized",
lsf_index);
return -ENOENT;
}
(void) memset(&ucode_img, MEMSET_VALUE, sizeof(ucode_img));
err = acr->lsf[lsf_index].get_lsf_ucode_details(g,
(void *)&ucode_img);
if (err != 0) {
nvgpu_err(g, "LS falcon-%d ucode get failed", lsf_index);
return err;
}
falcon_id = ucode_img.lsf_desc->falcon_id;
err = lsfm_add_ucode_img(g, plsfm, &ucode_img, falcon_id);
if (err != 0) {
nvgpu_err(g, " Failed to add falcon-%d to LSFM ", falcon_id);
return err;
}
plsfm->managed_flcn_cnt++;
return err;
}
/* Discover all managed falcon ucode images */
static int lsfm_discover_ucode_images(struct gk20a *g,
struct ls_flcn_mgr *plsfm)
{
u32 i; u32 i;
int err = 0; int err = 0;
#ifdef CONFIG_NVGPU_DGPU
err = lsfm_check_and_add_ucode_image(g, plsfm, FALCON_ID_SEC2);
if (err != 0) {
return err;
}
#endif
/* /*
* Enumerate all constructed falcon objects, as we need the ucode * Enumerate all constructed falcon objects, as we need the ucode
* image info and total falcon count * image info and total falcon count
*/ */
for (i = 0U; i < FALCON_ID_END; i++) { for (i = 0U; i < FALCON_ID_END; i++) {
if (nvgpu_test_bit(i, (void *)&acr->lsf_enable_mask) && #ifdef CONFIG_NVGPU_DGPU
(acr->lsf[i].get_lsf_ucode_details != NULL)) { if (i == FALCON_ID_SEC2) {
continue;
(void) memset(&ucode_img, MEMSET_VALUE, sizeof(ucode_img)); }
err = acr->lsf[i].get_lsf_ucode_details(g, #endif
(void *)&ucode_img); err = lsfm_check_and_add_ucode_image(g, plsfm, i);
if (err != 0) { if (err != 0) {
nvgpu_err(g, "LS falcon-%d ucode get failed", i); return err;
goto exit;
}
if (ucode_img.lsf_desc != NULL) {
/*
* falon_id is formed by grabbing the static
* base falonId from the image and adding the
* engine-designated falcon instance.
*/
falcon_id = ucode_img.lsf_desc->falcon_id;
err = lsfm_add_ucode_img(g, plsfm, &ucode_img,
falcon_id);
if (err != 0) {
nvgpu_err(g, " Failed to add falcon-%d to LSFM ",
falcon_id);
goto exit;
}
plsfm->managed_flcn_cnt++;
}
} }
} }
exit:
return err; return err;
} }
@@ -1007,14 +1027,14 @@ int nvgpu_acr_prepare_ucode_blob(struct gk20a *g)
err = lsfm_discover_ucode_images(g, plsfm); err = lsfm_discover_ucode_images(g, plsfm);
nvgpu_acr_dbg(g, " Managed Falcon cnt %d\n", plsfm->managed_flcn_cnt); nvgpu_acr_dbg(g, " Managed Falcon cnt %d\n", plsfm->managed_flcn_cnt);
if (err != 0) { if (err != 0) {
goto exit_err; goto cleanup_exit;
} }
#ifdef CONFIG_NVGPU_DGPU #ifdef CONFIG_NVGPU_DGPU
if (nvgpu_is_enabled(g, NVGPU_SUPPORT_MULTIPLE_WPR)) { if (nvgpu_is_enabled(g, NVGPU_SUPPORT_MULTIPLE_WPR)) {
err = lsfm_discover_and_add_sub_wprs(g, plsfm); err = lsfm_discover_and_add_sub_wprs(g, plsfm);
if (err != 0) { if (err != 0) {
goto exit_err; goto cleanup_exit;
} }
} }
#endif #endif
@@ -1024,14 +1044,14 @@ int nvgpu_acr_prepare_ucode_blob(struct gk20a *g)
/* Generate WPR requirements */ /* Generate WPR requirements */
err = lsf_gen_wpr_requirements(g, plsfm); err = lsf_gen_wpr_requirements(g, plsfm);
if (err != 0) { if (err != 0) {
goto exit_err; goto cleanup_exit;
} }
/* Alloc memory to hold ucode blob contents */ /* Alloc memory to hold ucode blob contents */
err = g->acr->alloc_blob_space(g, plsfm->wpr_size, err = g->acr->alloc_blob_space(g, plsfm->wpr_size,
&g->acr->ucode_blob); &g->acr->ucode_blob);
if (err != 0) { if (err != 0) {
goto exit_err; goto cleanup_exit;
} }
nvgpu_acr_dbg(g, "managed LS falcon %d, WPR size %d bytes.\n", nvgpu_acr_dbg(g, "managed LS falcon %d, WPR size %d bytes.\n",
@@ -1040,15 +1060,14 @@ int nvgpu_acr_prepare_ucode_blob(struct gk20a *g)
err = lsfm_init_wpr_contents(g, plsfm, &g->acr->ucode_blob); err = lsfm_init_wpr_contents(g, plsfm, &g->acr->ucode_blob);
if (err != 0) { if (err != 0) {
nvgpu_kfree(g, &g->acr->ucode_blob); nvgpu_kfree(g, &g->acr->ucode_blob);
goto free_acr; goto cleanup_exit;
} }
} else { } else {
nvgpu_acr_dbg(g, "LSFM is managing no falcons.\n"); nvgpu_acr_dbg(g, "LSFM is managing no falcons.\n");
} }
nvgpu_acr_dbg(g, "prepare ucode blob return 0\n"); nvgpu_acr_dbg(g, "prepare ucode blob return 0\n");
free_acr: cleanup_exit:
free_acr_resources(g, plsfm); free_acr_resources(g, plsfm);
exit_err:
return err; return err;
} }