gpu: nvgpu: initialize gr struct in poweron path

struct nvgpu_gr is right now initialized during probe and from OS
specific code. To support multiple instances of graphics engine,
nvgpu needs to initialize nvgpu_gr after number of engine instances
have been enumerated in poweron path.
Hence move nvgpu_gr_alloc() to poweron path and after gr manager has
been initialized.

Some of the members of nvgpu_gr are initialized in probe path and they
too are in OS specific code. Move them to common code in
nvgpu_gr_alloc()

Add field fecs_feature_override_ecc_val to struct gk20a to store the
override flag read from device tree. This flag is later copied to
nvgpu_gr in poweron path.

Update tpc_pg_mask_store() to check for g->gr being NULL before
accessing golden image pointer.
Update tpc_fs_mask_store() to return error if g->gr is not initialized.
This path needs nvgpu_gr struct initialized. Also fix the incorrect
NULL pointer check in tpc_fs_mask_store() which breaks the write path
to this sysfs.

Jira NVGPU-5648

Change-Id: Ifa2f66f3663dc2f7c8891cb03b25e997e148ab06
Signed-off-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2397259
Reviewed-by: automaticguardword <automaticguardword@nvidia.com>
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com>
Reviewed-by: Lakshmanan M <lm@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
GVS: Gerrit_Virtual_Submit
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Deepak Nibade
2020-08-10 18:04:47 +05:30
committed by Alex Waterman
parent a04525ece8
commit 010f818596
13 changed files with 40 additions and 54 deletions

View File

@@ -846,8 +846,7 @@ static ssize_t tpc_pg_mask_store(struct device *dev,
{
struct gk20a *g = get_gk20a(dev);
unsigned long val = 0;
struct nvgpu_gr_obj_ctx_golden_image *gr_golden_image =
nvgpu_gr_get_golden_image_ptr(g);
struct nvgpu_gr_obj_ctx_golden_image *gr_golden_image = NULL;
nvgpu_mutex_acquire(&g->tpc_pg_lock);
@@ -862,6 +861,10 @@ static ssize_t tpc_pg_mask_store(struct device *dev,
goto exit;
}
if (g->gr != NULL) {
gr_golden_image = nvgpu_gr_get_golden_image_ptr(g);
}
if (gr_golden_image &&
nvgpu_gr_obj_ctx_get_golden_image_size(gr_golden_image)
!= 0) {
@@ -892,17 +895,23 @@ static ssize_t tpc_fs_mask_store(struct device *dev,
{
#ifdef CONFIG_NVGPU_TEGRA_FUSE
struct gk20a *g = get_gk20a(dev);
struct nvgpu_gr_config *gr_config = nvgpu_gr_get_config_ptr(g);
struct nvgpu_gr_obj_ctx_golden_image *gr_golden_image =
nvgpu_gr_get_golden_image_ptr(g);
struct nvgpu_gr_falcon *gr_falcon =
nvgpu_gr_get_falcon_ptr(g);
struct nvgpu_gr_config *gr_config;
struct nvgpu_gr_obj_ctx_golden_image *gr_golden_image;
struct nvgpu_gr_falcon *gr_falcon;
unsigned long val = 0;
if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
if (nvgpu_gr_config_get_gpc_tpc_mask_base(gr_config) != NULL)
if (g->gr == NULL) {
return -ENODEV;
}
gr_config = nvgpu_gr_get_config_ptr(g);
gr_golden_image = nvgpu_gr_get_golden_image_ptr(g);
gr_falcon = nvgpu_gr_get_falcon_ptr(g);
if (nvgpu_gr_config_get_gpc_tpc_mask_base(gr_config) == NULL)
return -ENODEV;
if (val && val != nvgpu_gr_config_get_gpc_tpc_mask(gr_config, 0) &&