gpu: nvgpu: avoid dereferencing gr in acr

Added utility function to get gr falcon pointer to avoid direct
de-referencing gr in acr.

struct nvgpu_gr_falcon *nvgpu_gr_get_falcon_ptr(struct gk20a *g);

JIRA NVGPU-3168

Change-Id: I8f05cdbcd5d3e52c585df54f93cf065685733e5d
Signed-off-by: Seshendra Gadagottu <sgadagottu@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2114214
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Vinod Gopalakrishnakurup <vinodg@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Seshendra Gadagottu
2019-05-07 18:05:59 -07:00
committed by mobile promotions
parent 47f652e0f9
commit 21f04a94af
6 changed files with 84 additions and 12 deletions

View File

@@ -50,6 +50,7 @@ nvgpu-y += \
common/ltc/ltc.o \ common/ltc/ltc.o \
common/fbp/fbp.o \ common/fbp/fbp.o \
common/cbc/cbc.o \ common/cbc/cbc.o \
common/gr/gr_utils.o \
common/gr/gr.o \ common/gr/gr.o \
common/gr/gr_intr.o \ common/gr/gr_intr.o \
common/gr/global_ctx.o \ common/gr/global_ctx.o \

View File

@@ -108,6 +108,7 @@ srcs += common/sim/sim.c \
common/engine_queues/engine_emem_queue.c \ common/engine_queues/engine_emem_queue.c \
common/engine_queues/engine_fb_queue.c \ common/engine_queues/engine_fb_queue.c \
common/gr/gr.c \ common/gr/gr.c \
common/gr/gr_utils.c \
common/gr/gr_intr.c \ common/gr/gr_intr.c \
common/gr/global_ctx.c \ common/gr/global_ctx.c \
common/gr/subctx.c \ common/gr/subctx.c \

View File

@@ -28,8 +28,8 @@
#include <nvgpu/bug.h> #include <nvgpu/bug.h>
#include <nvgpu/gr/gr_falcon.h> #include <nvgpu/gr/gr_falcon.h>
#include <nvgpu/pmu/fw.h> #include <nvgpu/pmu/fw.h>
#include <nvgpu/gr/gr_utils.h>
#include "common/gr/gr_priv.h"
#include "acr_blob_construct_v0.h" #include "acr_blob_construct_v0.h"
#include "acr_falcon_bl.h" #include "acr_falcon_bl.h"
#include "acr_wpr.h" #include "acr_wpr.h"
@@ -75,8 +75,9 @@ int nvgpu_acr_lsf_fecs_ucode_details_v0(struct gk20a *g, void *lsf_ucode_img)
struct lsf_ucode_desc *lsf_desc; struct lsf_ucode_desc *lsf_desc;
struct nvgpu_firmware *fecs_sig; struct nvgpu_firmware *fecs_sig;
struct flcn_ucode_img *p_img = (struct flcn_ucode_img *)lsf_ucode_img; struct flcn_ucode_img *p_img = (struct flcn_ucode_img *)lsf_ucode_img;
struct nvgpu_gr_falcon *gr_falcon = nvgpu_gr_get_falcon_ptr(g);
struct nvgpu_ctxsw_ucode_segments *fecs = struct nvgpu_ctxsw_ucode_segments *fecs =
nvgpu_gr_falcon_get_fecs_ucode_segments(g->gr->falcon); nvgpu_gr_falcon_get_fecs_ucode_segments(gr_falcon);
int err; int err;
fecs_sig = nvgpu_request_firmware(g, GM20B_FECS_UCODE_SIG, 0); fecs_sig = nvgpu_request_firmware(g, GM20B_FECS_UCODE_SIG, 0);
@@ -118,7 +119,7 @@ int nvgpu_acr_lsf_fecs_ucode_details_v0(struct gk20a *g, void *lsf_ucode_img)
p_img->desc->app_resident_data_offset = p_img->desc->app_resident_data_offset =
fecs->data.offset - fecs->code.offset; fecs->data.offset - fecs->code.offset;
p_img->desc->app_resident_data_size = fecs->data.size; p_img->desc->app_resident_data_size = fecs->data.size;
p_img->data = nvgpu_gr_falcon_get_surface_desc_cpu_va(g->gr->falcon); p_img->data = nvgpu_gr_falcon_get_surface_desc_cpu_va(gr_falcon);
p_img->data_size = p_img->desc->image_size; p_img->data_size = p_img->desc->image_size;
p_img->fw_ver = NULL; p_img->fw_ver = NULL;
@@ -139,8 +140,9 @@ int nvgpu_acr_lsf_gpccs_ucode_details_v0(struct gk20a *g, void *lsf_ucode_img)
struct lsf_ucode_desc *lsf_desc; struct lsf_ucode_desc *lsf_desc;
struct nvgpu_firmware *gpccs_sig; struct nvgpu_firmware *gpccs_sig;
struct flcn_ucode_img *p_img = (struct flcn_ucode_img *)lsf_ucode_img; struct flcn_ucode_img *p_img = (struct flcn_ucode_img *)lsf_ucode_img;
struct nvgpu_gr_falcon *gr_falcon = nvgpu_gr_get_falcon_ptr(g);
struct nvgpu_ctxsw_ucode_segments *gpccs = struct nvgpu_ctxsw_ucode_segments *gpccs =
nvgpu_gr_falcon_get_gpccs_ucode_segments(g->gr->falcon); nvgpu_gr_falcon_get_gpccs_ucode_segments(gr_falcon);
int err; int err;
if (!nvgpu_is_enabled(g, NVGPU_SEC_SECUREGPCCS)) { if (!nvgpu_is_enabled(g, NVGPU_SEC_SECUREGPCCS)) {
@@ -187,7 +189,7 @@ int nvgpu_acr_lsf_gpccs_ucode_details_v0(struct gk20a *g, void *lsf_ucode_img)
ALIGN(gpccs->code.offset, 256); ALIGN(gpccs->code.offset, 256);
p_img->desc->app_resident_data_size = ALIGN(gpccs->data.size, 256); p_img->desc->app_resident_data_size = ALIGN(gpccs->data.size, 256);
p_img->data = (u32 *) p_img->data = (u32 *)
((u8 *)nvgpu_gr_falcon_get_surface_desc_cpu_va(g->gr->falcon) + ((u8 *)nvgpu_gr_falcon_get_surface_desc_cpu_va(gr_falcon) +
gpccs->boot.offset); gpccs->boot.offset);
p_img->data_size = ALIGN(p_img->desc->image_size, 256); p_img->data_size = ALIGN(p_img->desc->image_size, 256);
p_img->fw_ver = NULL; p_img->fw_ver = NULL;
@@ -802,6 +804,7 @@ int nvgpu_acr_prepare_ucode_blob_v0(struct gk20a *g)
int err = 0; int err = 0;
struct ls_flcn_mgr lsfm_l, *plsfm; struct ls_flcn_mgr lsfm_l, *plsfm;
struct wpr_carveout_info wpr_inf; struct wpr_carveout_info wpr_inf;
struct nvgpu_gr_falcon *gr_falcon = nvgpu_gr_get_falcon_ptr(g);
if (g->acr->ucode_blob.cpu_va != NULL) { if (g->acr->ucode_blob.cpu_va != NULL) {
/* Recovery case, we do not need to form non WPR blob */ /* Recovery case, we do not need to form non WPR blob */
@@ -816,7 +819,7 @@ int nvgpu_acr_prepare_ucode_blob_v0(struct gk20a *g)
return err; return err;
} }
err = nvgpu_gr_falcon_init_ctxsw_ucode(g, g->gr->falcon); err = nvgpu_gr_falcon_init_ctxsw_ucode(g, gr_falcon);
if (err != 0) { if (err != 0) {
nvgpu_err(g, "gr_falcon_init_ctxsw_ucode failed err=%d", err); nvgpu_err(g, "gr_falcon_init_ctxsw_ucode failed err=%d", err);
return err; return err;

View File

@@ -26,7 +26,7 @@
#include <nvgpu/string.h> #include <nvgpu/string.h>
#include <nvgpu/bug.h> #include <nvgpu/bug.h>
#include <nvgpu/gr/gr_falcon.h> #include <nvgpu/gr/gr_falcon.h>
#include "common/gr/gr_priv.h" #include <nvgpu/gr/gr_utils.h>
#include "acr_blob_construct_v1.h" #include "acr_blob_construct_v1.h"
#include "acr_falcon_bl.h" #include "acr_falcon_bl.h"
@@ -82,8 +82,9 @@ int nvgpu_acr_lsf_fecs_ucode_details_v1(struct gk20a *g, void *lsf_ucode_img)
struct nvgpu_firmware *fecs_sig = NULL; struct nvgpu_firmware *fecs_sig = NULL;
struct flcn_ucode_img_v1 *p_img = struct flcn_ucode_img_v1 *p_img =
(struct flcn_ucode_img_v1 *)lsf_ucode_img; (struct flcn_ucode_img_v1 *)lsf_ucode_img;
struct nvgpu_gr_falcon *gr_falcon = nvgpu_gr_get_falcon_ptr(g);
struct nvgpu_ctxsw_ucode_segments *fecs = struct nvgpu_ctxsw_ucode_segments *fecs =
nvgpu_gr_falcon_get_fecs_ucode_segments(g->gr->falcon); nvgpu_gr_falcon_get_fecs_ucode_segments(gr_falcon);
int err; int err;
switch (ver) { switch (ver) {
@@ -140,7 +141,7 @@ int nvgpu_acr_lsf_fecs_ucode_details_v1(struct gk20a *g, void *lsf_ucode_img)
p_img->desc->app_resident_data_offset = fecs->data.offset - p_img->desc->app_resident_data_offset = fecs->data.offset -
fecs->code.offset; fecs->code.offset;
p_img->desc->app_resident_data_size = fecs->data.size; p_img->desc->app_resident_data_size = fecs->data.size;
p_img->data = nvgpu_gr_falcon_get_surface_desc_cpu_va(g->gr->falcon); p_img->data = nvgpu_gr_falcon_get_surface_desc_cpu_va(gr_falcon);
p_img->data_size = p_img->desc->image_size; p_img->data_size = p_img->desc->image_size;
p_img->fw_ver = NULL; p_img->fw_ver = NULL;
@@ -166,8 +167,9 @@ int nvgpu_acr_lsf_gpccs_ucode_details_v1(struct gk20a *g, void *lsf_ucode_img)
struct nvgpu_firmware *gpccs_sig = NULL; struct nvgpu_firmware *gpccs_sig = NULL;
struct flcn_ucode_img_v1 *p_img = struct flcn_ucode_img_v1 *p_img =
(struct flcn_ucode_img_v1 *)lsf_ucode_img; (struct flcn_ucode_img_v1 *)lsf_ucode_img;
struct nvgpu_gr_falcon *gr_falcon = nvgpu_gr_get_falcon_ptr(g);
struct nvgpu_ctxsw_ucode_segments *gpccs = struct nvgpu_ctxsw_ucode_segments *gpccs =
nvgpu_gr_falcon_get_gpccs_ucode_segments(g->gr->falcon); nvgpu_gr_falcon_get_gpccs_ucode_segments(gr_falcon);
int err; int err;
if (!nvgpu_is_enabled(g, NVGPU_SEC_SECUREGPCCS)) { if (!nvgpu_is_enabled(g, NVGPU_SEC_SECUREGPCCS)) {
@@ -228,7 +230,7 @@ int nvgpu_acr_lsf_gpccs_ucode_details_v1(struct gk20a *g, void *lsf_ucode_img)
ALIGN(gpccs->code.offset, 256); ALIGN(gpccs->code.offset, 256);
p_img->desc->app_resident_data_size = ALIGN(gpccs->data.size, 256); p_img->desc->app_resident_data_size = ALIGN(gpccs->data.size, 256);
p_img->data = (u32 *) p_img->data = (u32 *)
((u8 *)nvgpu_gr_falcon_get_surface_desc_cpu_va(g->gr->falcon) + ((u8 *)nvgpu_gr_falcon_get_surface_desc_cpu_va(gr_falcon) +
gpccs->boot.offset); gpccs->boot.offset);
p_img->data_size = ALIGN(p_img->desc->image_size, 256); p_img->data_size = ALIGN(p_img->desc->image_size, 256);
p_img->fw_ver = NULL; p_img->fw_ver = NULL;
@@ -930,6 +932,7 @@ int nvgpu_acr_prepare_ucode_blob_v1(struct gk20a *g)
int err = 0; int err = 0;
struct ls_flcn_mgr_v1 lsfm_l, *plsfm; struct ls_flcn_mgr_v1 lsfm_l, *plsfm;
struct wpr_carveout_info wpr_inf; struct wpr_carveout_info wpr_inf;
struct nvgpu_gr_falcon *gr_falcon = nvgpu_gr_get_falcon_ptr(g);
/* Recovery case, we do not need to form non WPR blob of ucodes */ /* Recovery case, we do not need to form non WPR blob of ucodes */
if (g->acr->ucode_blob.cpu_va != NULL) { if (g->acr->ucode_blob.cpu_va != NULL) {
@@ -938,7 +941,7 @@ int nvgpu_acr_prepare_ucode_blob_v1(struct gk20a *g)
plsfm = &lsfm_l; plsfm = &lsfm_l;
(void) memset((void *)plsfm, 0, sizeof(struct ls_flcn_mgr_v1)); (void) memset((void *)plsfm, 0, sizeof(struct ls_flcn_mgr_v1));
err = nvgpu_gr_falcon_init_ctxsw_ucode(g, g->gr->falcon); err = nvgpu_gr_falcon_init_ctxsw_ucode(g, gr_falcon);
if (err != 0) { if (err != 0) {
nvgpu_err(g, "gr_falcon_init_ctxsw_ucode failed err=%d", err); nvgpu_err(g, "gr_falcon_init_ctxsw_ucode failed err=%d", err);
return err; return err;

View File

@@ -0,0 +1,33 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvgpu/gk20a.h>
#include <nvgpu/types.h>
#include <nvgpu/gr/gr_utils.h>
#include "gr_priv.h"
struct nvgpu_gr_falcon *nvgpu_gr_get_falcon_ptr(struct gk20a *g)
{
return g->gr->falcon;
}

View File

@@ -0,0 +1,31 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_GR_UTILS_H
#define NVGPU_GR_UTILS_H
struct gk20a;
struct nvgpu_gr_falcon;
struct nvgpu_gr_falcon *nvgpu_gr_get_falcon_ptr(struct gk20a *g);
#endif /* NVGPU_GR_UTILS_H */