From 1e8b88fcc11b7f5c47039a3b708abdaa2d2f70b9 Mon Sep 17 00:00:00 2001 From: Nicolas Benech Date: Thu, 4 Apr 2019 14:41:31 -0400 Subject: [PATCH] gpu: nvgpu: fix MISRA 17.7 violations in ACR MISRA Rule-17.7 requires the return value of all functions to be used. Fix is either to use the return value or change the function to return void. This patch contains fixes for all 17.7 violations in common/acr code. JIRA NVGPU-3032 Change-Id: I79dbbcca72f50d5c0b0614d6c4e573c5f856ceb4 Signed-off-by: Nicolas Benech Reviewed-on: https://git-master.nvidia.com/r/2090043 Reviewed-by: svc-mobile-coverity Reviewed-by: svc-mobile-misra Reviewed-by: Automatic_Commit_Validation_User GVS: Gerrit_Virtual_Submit Reviewed-by: Deepak Goyal Reviewed-by: Adeel Raza Reviewed-by: mobile promotions Tested-by: mobile promotions --- .../nvgpu/common/acr/acr_blob_construct_v0.c | 57 ++++++++++++++----- .../nvgpu/common/acr/acr_blob_construct_v1.c | 36 ++++++++---- drivers/gpu/nvgpu/common/acr/acr_bootstrap.c | 6 +- 3 files changed, 74 insertions(+), 25 deletions(-) diff --git a/drivers/gpu/nvgpu/common/acr/acr_blob_construct_v0.c b/drivers/gpu/nvgpu/common/acr/acr_blob_construct_v0.c index e814ecf5e..69859d933 100644 --- a/drivers/gpu/nvgpu/common/acr/acr_blob_construct_v0.c +++ b/drivers/gpu/nvgpu/common/acr/acr_blob_construct_v0.c @@ -631,31 +631,42 @@ static int gm20b_flcn_populate_bl_dmem_desc(struct gk20a *g, static int lsfm_fill_flcn_bl_gen_desc(struct gk20a *g, struct lsfm_managed_ucode_img *pnode) { + int err = -ENOENT; if (pnode->wpr_header.falcon_id != FALCON_ID_PMU) { nvgpu_acr_dbg(g, "non pmu. write flcn bl gen desc\n"); - gm20b_flcn_populate_bl_dmem_desc(g, + err = gm20b_flcn_populate_bl_dmem_desc(g, pnode, &pnode->bl_gen_desc_size, pnode->wpr_header.falcon_id); - return 0; + if (err != 0) { + nvgpu_err(g, "flcn_populate_bl_dmem_desc failed=%d", + err); + } + return err; } if (pnode->wpr_header.falcon_id == FALCON_ID_PMU) { nvgpu_acr_dbg(g, "pmu write flcn bl gen desc\n"); - return gm20b_pmu_populate_loader_cfg(g, pnode, + err = gm20b_pmu_populate_loader_cfg(g, pnode, &pnode->bl_gen_desc_size); + if (err != 0) { + nvgpu_err(g, "pmu_populate_loader_cfg failed=%d", + err); + } + return err; } /* Failed to find the falcon requested. */ - return -ENOENT; + return err; } -static void lsfm_init_wpr_contents(struct gk20a *g, struct ls_flcn_mgr *plsfm, +static int lsfm_init_wpr_contents(struct gk20a *g, struct ls_flcn_mgr *plsfm, struct nvgpu_mem *ucode) { struct lsfm_managed_ucode_img *pnode = plsfm->ucode_img_list; struct lsf_wpr_header last_wpr_hdr; u32 i; + int err = 0; /* The WPR array is at the base of the WPR */ pnode = plsfm->ucode_img_list; @@ -719,7 +730,11 @@ static void lsfm_init_wpr_contents(struct gk20a *g, struct ls_flcn_mgr *plsfm, /* If this falcon has a boot loader and related args, flush them */ if (pnode->ucode_img.header == NULL) { /* Populate gen bl and flush to memory */ - lsfm_fill_flcn_bl_gen_desc(g, pnode); + err = lsfm_fill_flcn_bl_gen_desc(g, pnode); + if (err != 0) { + nvgpu_err(g, "bl_gen_desc failed err=%d", err); + return err; + } nvgpu_mem_wr_n(g, ucode, pnode->lsb_header.bl_data_off, &pnode->bl_gen_desc, @@ -740,6 +755,7 @@ static void lsfm_init_wpr_contents(struct gk20a *g, struct ls_flcn_mgr *plsfm, (u32)sizeof(struct lsf_wpr_header), &last_wpr_hdr, (u32)sizeof(struct lsf_wpr_header)); + return err; } /* Free any ucode image structure resources. */ @@ -798,8 +814,17 @@ int nvgpu_acr_prepare_ucode_blob_v0(struct gk20a *g) plsfm = &lsfm_l; (void) memset((void *)plsfm, 0, sizeof(struct ls_flcn_mgr)); nvgpu_acr_dbg(g, "fetching GMMU regs\n"); - g->ops.fb.vpr_info_fetch(g); - nvgpu_gr_falcon_init_ctxsw_ucode(g); + err = g->ops.fb.vpr_info_fetch(g); + if (err != 0) { + nvgpu_err(g, "fb.vpr_info_fetch failed err=%d", err); + return err; + } + + err = nvgpu_gr_falcon_init_ctxsw_ucode(g); + if (err != 0) { + nvgpu_err(g, "gr_falcon_init_ctxsw_ucode failed err=%d", err); + return err; + } g->acr->get_wpr_info(g, &wpr_inf); nvgpu_acr_dbg(g, "wpr carveout base:%llx\n", wpr_inf.wpr_base); @@ -809,7 +834,7 @@ int nvgpu_acr_prepare_ucode_blob_v0(struct gk20a *g) err = lsfm_discover_ucode_images(g, plsfm); nvgpu_acr_dbg(g, " Managed Falcon cnt %d\n", plsfm->managed_flcn_cnt); if (err != 0) { - goto free_sgt; + goto exit_err; } if ((plsfm->managed_flcn_cnt != 0U) && @@ -817,24 +842,30 @@ int nvgpu_acr_prepare_ucode_blob_v0(struct gk20a *g) /* Generate WPR requirements */ err = lsf_gen_wpr_requirements(g, plsfm); if (err != 0) { - goto free_sgt; + goto exit_err; } /* Alloc memory to hold ucode blob contents */ err = g->acr->alloc_blob_space(g, plsfm->wpr_size , &g->acr->ucode_blob); if (err != 0) { - goto free_sgt; + goto exit_err; } nvgpu_acr_dbg(g, "managed LS falcon %d, WPR size %d bytes.\n", plsfm->managed_flcn_cnt, plsfm->wpr_size); - lsfm_init_wpr_contents(g, plsfm, &g->acr->ucode_blob); + err = lsfm_init_wpr_contents(g, plsfm, &g->acr->ucode_blob); + if (err != 0) { + nvgpu_kfree(g, &g->acr->ucode_blob); + goto free_acr; + } } else { nvgpu_acr_dbg(g, "LSFM is managing no falcons.\n"); } nvgpu_acr_dbg(g, "prepare ucode blob return 0\n"); + +free_acr: free_acr_resources(g, plsfm); -free_sgt: +exit_err: return err; } diff --git a/drivers/gpu/nvgpu/common/acr/acr_blob_construct_v1.c b/drivers/gpu/nvgpu/common/acr/acr_blob_construct_v1.c index ef2b1d89a..4ab5fd7f6 100644 --- a/drivers/gpu/nvgpu/common/acr/acr_blob_construct_v1.c +++ b/drivers/gpu/nvgpu/common/acr/acr_blob_construct_v1.c @@ -751,7 +751,7 @@ static int lsfm_fill_flcn_bl_gen_desc(struct gk20a *g, pnode->wpr_header.falcon_id); } -static u32 lsfm_init_sub_wpr_contents(struct gk20a *g, +static void lsfm_init_sub_wpr_contents(struct gk20a *g, struct ls_flcn_mgr_v1 *plsfm, struct nvgpu_mem *ucode) { struct lsfm_sub_wpr *psub_wpr_node; @@ -782,17 +782,16 @@ static u32 lsfm_init_sub_wpr_contents(struct gk20a *g, nvgpu_mem_wr_n(g, ucode, sub_wpr_header_offset + (plsfm->managed_sub_wpr_count * temp_size), &last_sub_wpr_header, temp_size); - - return 0; } -static void lsfm_init_wpr_contents(struct gk20a *g, +static int lsfm_init_wpr_contents(struct gk20a *g, struct ls_flcn_mgr_v1 *plsfm, struct nvgpu_mem *ucode) { struct lsfm_managed_ucode_img_v2 *pnode = plsfm->ucode_img_list; struct lsf_wpr_header_v1 last_wpr_hdr; u32 i; u64 tmp; + int err = 0; /* The WPR array is at the base of the WPR */ pnode = plsfm->ucode_img_list; @@ -862,7 +861,11 @@ static void lsfm_init_wpr_contents(struct gk20a *g, */ if (pnode->ucode_img.header == NULL) { /* Populate gen bl and flush to memory */ - lsfm_fill_flcn_bl_gen_desc(g, pnode); + err = lsfm_fill_flcn_bl_gen_desc(g, pnode); + if (err != 0) { + nvgpu_err(g, "bl_gen_desc failed err=%d", err); + return err; + } nvgpu_mem_wr_n(g, ucode, pnode->lsb_header.bl_data_off, &pnode->bl_gen_desc, pnode->bl_gen_desc_size); } @@ -880,6 +883,8 @@ static void lsfm_init_wpr_contents(struct gk20a *g, nvgpu_assert(tmp <= U32_MAX); nvgpu_mem_wr_n(g, ucode, (u32)tmp, &last_wpr_hdr, (u32)sizeof(struct lsf_wpr_header_v1)); + + return err; } /* Free any ucode image structure resources. */ @@ -938,7 +943,11 @@ int nvgpu_acr_prepare_ucode_blob_v1(struct gk20a *g) plsfm = &lsfm_l; (void) memset((void *)plsfm, 0, sizeof(struct ls_flcn_mgr_v1)); - nvgpu_gr_falcon_init_ctxsw_ucode(g); + err = nvgpu_gr_falcon_init_ctxsw_ucode(g); + if (err != 0) { + nvgpu_err(g, "gr_falcon_init_ctxsw_ucode failed err=%d", err); + return err; + } g->acr->get_wpr_info(g, &wpr_inf); nvgpu_acr_dbg(g, "wpr carveout base:%llx\n", (wpr_inf.wpr_base)); @@ -967,8 +976,8 @@ int nvgpu_acr_prepare_ucode_blob_v1(struct gk20a *g) } /* Alloc memory to hold ucode blob contents */ - err = g->acr->alloc_blob_space(g, plsfm->wpr_size - ,&g->acr->ucode_blob); + err = g->acr->alloc_blob_space(g, plsfm->wpr_size, + &g->acr->ucode_blob); if (err != 0) { goto exit_err; } @@ -976,13 +985,18 @@ int nvgpu_acr_prepare_ucode_blob_v1(struct gk20a *g) nvgpu_acr_dbg(g, "managed LS falcon %d, WPR size %d bytes.\n", plsfm->managed_flcn_cnt, plsfm->wpr_size); - lsfm_init_wpr_contents(g, plsfm, &g->acr->ucode_blob); + err = lsfm_init_wpr_contents(g, plsfm, &g->acr->ucode_blob); + if (err != 0) { + nvgpu_kfree(g, &g->acr->ucode_blob); + goto free_acr; + } } else { nvgpu_acr_dbg(g, "LSFM is managing no falcons.\n"); } nvgpu_acr_dbg(g, "prepare ucode blob return 0\n"); - free_acr_resources(g, plsfm); - exit_err: +free_acr: + free_acr_resources(g, plsfm); +exit_err: return err; } diff --git a/drivers/gpu/nvgpu/common/acr/acr_bootstrap.c b/drivers/gpu/nvgpu/common/acr/acr_bootstrap.c index 7b69c7380..762d47214 100644 --- a/drivers/gpu/nvgpu/common/acr/acr_bootstrap.c +++ b/drivers/gpu/nvgpu/common/acr/acr_bootstrap.c @@ -370,7 +370,11 @@ int nvgpu_acr_self_hs_load_bootstrap(struct gk20a *g, struct nvgpu_falcon *flcn, int err = 0; /* falcon reset */ - nvgpu_falcon_reset(flcn); + err = nvgpu_falcon_reset(flcn); + if (err != 0) { + nvgpu_err(g, "nvgpu_falcon_reset() failed err=%d", err); + return err; + } bin_hdr = (struct bin_hdr *)hs_fw->data; fw_hdr = (struct acr_fw_header *)(hs_fw->data + bin_hdr->header_offset);