mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 02:22:34 +03:00
gpu: nvgpu: avoid dereferencing gr in acr
Added utility function to get gr falcon pointer to avoid direct de-referencing gr in acr. struct nvgpu_gr_falcon *nvgpu_gr_get_falcon_ptr(struct gk20a *g); JIRA NVGPU-3168 Change-Id: I8f05cdbcd5d3e52c585df54f93cf065685733e5d Signed-off-by: Seshendra Gadagottu <sgadagottu@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2114214 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Vinod Gopalakrishnakurup <vinodg@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
47f652e0f9
commit
21f04a94af
@@ -28,8 +28,8 @@
|
||||
#include <nvgpu/bug.h>
|
||||
#include <nvgpu/gr/gr_falcon.h>
|
||||
#include <nvgpu/pmu/fw.h>
|
||||
#include <nvgpu/gr/gr_utils.h>
|
||||
|
||||
#include "common/gr/gr_priv.h"
|
||||
#include "acr_blob_construct_v0.h"
|
||||
#include "acr_falcon_bl.h"
|
||||
#include "acr_wpr.h"
|
||||
@@ -75,8 +75,9 @@ int nvgpu_acr_lsf_fecs_ucode_details_v0(struct gk20a *g, void *lsf_ucode_img)
|
||||
struct lsf_ucode_desc *lsf_desc;
|
||||
struct nvgpu_firmware *fecs_sig;
|
||||
struct flcn_ucode_img *p_img = (struct flcn_ucode_img *)lsf_ucode_img;
|
||||
struct nvgpu_gr_falcon *gr_falcon = nvgpu_gr_get_falcon_ptr(g);
|
||||
struct nvgpu_ctxsw_ucode_segments *fecs =
|
||||
nvgpu_gr_falcon_get_fecs_ucode_segments(g->gr->falcon);
|
||||
nvgpu_gr_falcon_get_fecs_ucode_segments(gr_falcon);
|
||||
int err;
|
||||
|
||||
fecs_sig = nvgpu_request_firmware(g, GM20B_FECS_UCODE_SIG, 0);
|
||||
@@ -118,7 +119,7 @@ int nvgpu_acr_lsf_fecs_ucode_details_v0(struct gk20a *g, void *lsf_ucode_img)
|
||||
p_img->desc->app_resident_data_offset =
|
||||
fecs->data.offset - fecs->code.offset;
|
||||
p_img->desc->app_resident_data_size = fecs->data.size;
|
||||
p_img->data = nvgpu_gr_falcon_get_surface_desc_cpu_va(g->gr->falcon);
|
||||
p_img->data = nvgpu_gr_falcon_get_surface_desc_cpu_va(gr_falcon);
|
||||
p_img->data_size = p_img->desc->image_size;
|
||||
|
||||
p_img->fw_ver = NULL;
|
||||
@@ -139,8 +140,9 @@ int nvgpu_acr_lsf_gpccs_ucode_details_v0(struct gk20a *g, void *lsf_ucode_img)
|
||||
struct lsf_ucode_desc *lsf_desc;
|
||||
struct nvgpu_firmware *gpccs_sig;
|
||||
struct flcn_ucode_img *p_img = (struct flcn_ucode_img *)lsf_ucode_img;
|
||||
struct nvgpu_gr_falcon *gr_falcon = nvgpu_gr_get_falcon_ptr(g);
|
||||
struct nvgpu_ctxsw_ucode_segments *gpccs =
|
||||
nvgpu_gr_falcon_get_gpccs_ucode_segments(g->gr->falcon);
|
||||
nvgpu_gr_falcon_get_gpccs_ucode_segments(gr_falcon);
|
||||
int err;
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_SEC_SECUREGPCCS)) {
|
||||
@@ -187,7 +189,7 @@ int nvgpu_acr_lsf_gpccs_ucode_details_v0(struct gk20a *g, void *lsf_ucode_img)
|
||||
ALIGN(gpccs->code.offset, 256);
|
||||
p_img->desc->app_resident_data_size = ALIGN(gpccs->data.size, 256);
|
||||
p_img->data = (u32 *)
|
||||
((u8 *)nvgpu_gr_falcon_get_surface_desc_cpu_va(g->gr->falcon) +
|
||||
((u8 *)nvgpu_gr_falcon_get_surface_desc_cpu_va(gr_falcon) +
|
||||
gpccs->boot.offset);
|
||||
p_img->data_size = ALIGN(p_img->desc->image_size, 256);
|
||||
p_img->fw_ver = NULL;
|
||||
@@ -802,6 +804,7 @@ int nvgpu_acr_prepare_ucode_blob_v0(struct gk20a *g)
|
||||
int err = 0;
|
||||
struct ls_flcn_mgr lsfm_l, *plsfm;
|
||||
struct wpr_carveout_info wpr_inf;
|
||||
struct nvgpu_gr_falcon *gr_falcon = nvgpu_gr_get_falcon_ptr(g);
|
||||
|
||||
if (g->acr->ucode_blob.cpu_va != NULL) {
|
||||
/* Recovery case, we do not need to form non WPR blob */
|
||||
@@ -816,7 +819,7 @@ int nvgpu_acr_prepare_ucode_blob_v0(struct gk20a *g)
|
||||
return err;
|
||||
}
|
||||
|
||||
err = nvgpu_gr_falcon_init_ctxsw_ucode(g, g->gr->falcon);
|
||||
err = nvgpu_gr_falcon_init_ctxsw_ucode(g, gr_falcon);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "gr_falcon_init_ctxsw_ucode failed err=%d", err);
|
||||
return err;
|
||||
|
||||
@@ -26,7 +26,7 @@
|
||||
#include <nvgpu/string.h>
|
||||
#include <nvgpu/bug.h>
|
||||
#include <nvgpu/gr/gr_falcon.h>
|
||||
#include "common/gr/gr_priv.h"
|
||||
#include <nvgpu/gr/gr_utils.h>
|
||||
|
||||
#include "acr_blob_construct_v1.h"
|
||||
#include "acr_falcon_bl.h"
|
||||
@@ -82,8 +82,9 @@ int nvgpu_acr_lsf_fecs_ucode_details_v1(struct gk20a *g, void *lsf_ucode_img)
|
||||
struct nvgpu_firmware *fecs_sig = NULL;
|
||||
struct flcn_ucode_img_v1 *p_img =
|
||||
(struct flcn_ucode_img_v1 *)lsf_ucode_img;
|
||||
struct nvgpu_gr_falcon *gr_falcon = nvgpu_gr_get_falcon_ptr(g);
|
||||
struct nvgpu_ctxsw_ucode_segments *fecs =
|
||||
nvgpu_gr_falcon_get_fecs_ucode_segments(g->gr->falcon);
|
||||
nvgpu_gr_falcon_get_fecs_ucode_segments(gr_falcon);
|
||||
int err;
|
||||
|
||||
switch (ver) {
|
||||
@@ -140,7 +141,7 @@ int nvgpu_acr_lsf_fecs_ucode_details_v1(struct gk20a *g, void *lsf_ucode_img)
|
||||
p_img->desc->app_resident_data_offset = fecs->data.offset -
|
||||
fecs->code.offset;
|
||||
p_img->desc->app_resident_data_size = fecs->data.size;
|
||||
p_img->data = nvgpu_gr_falcon_get_surface_desc_cpu_va(g->gr->falcon);
|
||||
p_img->data = nvgpu_gr_falcon_get_surface_desc_cpu_va(gr_falcon);
|
||||
p_img->data_size = p_img->desc->image_size;
|
||||
|
||||
p_img->fw_ver = NULL;
|
||||
@@ -166,8 +167,9 @@ int nvgpu_acr_lsf_gpccs_ucode_details_v1(struct gk20a *g, void *lsf_ucode_img)
|
||||
struct nvgpu_firmware *gpccs_sig = NULL;
|
||||
struct flcn_ucode_img_v1 *p_img =
|
||||
(struct flcn_ucode_img_v1 *)lsf_ucode_img;
|
||||
struct nvgpu_gr_falcon *gr_falcon = nvgpu_gr_get_falcon_ptr(g);
|
||||
struct nvgpu_ctxsw_ucode_segments *gpccs =
|
||||
nvgpu_gr_falcon_get_gpccs_ucode_segments(g->gr->falcon);
|
||||
nvgpu_gr_falcon_get_gpccs_ucode_segments(gr_falcon);
|
||||
int err;
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_SEC_SECUREGPCCS)) {
|
||||
@@ -228,7 +230,7 @@ int nvgpu_acr_lsf_gpccs_ucode_details_v1(struct gk20a *g, void *lsf_ucode_img)
|
||||
ALIGN(gpccs->code.offset, 256);
|
||||
p_img->desc->app_resident_data_size = ALIGN(gpccs->data.size, 256);
|
||||
p_img->data = (u32 *)
|
||||
((u8 *)nvgpu_gr_falcon_get_surface_desc_cpu_va(g->gr->falcon) +
|
||||
((u8 *)nvgpu_gr_falcon_get_surface_desc_cpu_va(gr_falcon) +
|
||||
gpccs->boot.offset);
|
||||
p_img->data_size = ALIGN(p_img->desc->image_size, 256);
|
||||
p_img->fw_ver = NULL;
|
||||
@@ -930,6 +932,7 @@ int nvgpu_acr_prepare_ucode_blob_v1(struct gk20a *g)
|
||||
int err = 0;
|
||||
struct ls_flcn_mgr_v1 lsfm_l, *plsfm;
|
||||
struct wpr_carveout_info wpr_inf;
|
||||
struct nvgpu_gr_falcon *gr_falcon = nvgpu_gr_get_falcon_ptr(g);
|
||||
|
||||
/* Recovery case, we do not need to form non WPR blob of ucodes */
|
||||
if (g->acr->ucode_blob.cpu_va != NULL) {
|
||||
@@ -938,7 +941,7 @@ int nvgpu_acr_prepare_ucode_blob_v1(struct gk20a *g)
|
||||
|
||||
plsfm = &lsfm_l;
|
||||
(void) memset((void *)plsfm, 0, sizeof(struct ls_flcn_mgr_v1));
|
||||
err = nvgpu_gr_falcon_init_ctxsw_ucode(g, g->gr->falcon);
|
||||
err = nvgpu_gr_falcon_init_ctxsw_ucode(g, gr_falcon);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "gr_falcon_init_ctxsw_ucode failed err=%d", err);
|
||||
return err;
|
||||
|
||||
Reference in New Issue
Block a user