gpu: nvgpu: add sysfs node for golden img status

- Add a sysfs node "golden_img_status" to show
  if golden_image size and ptr are already initialized
  or not.
- This node helps to know golden image status before
  attempting to modify gpc/tpc/fbp masks.

Bug 3960290

Change-Id: I3c3de69b369bcaf2f0127e897d06e21cb8e2d68e
Signed-off-by: Divya <dsinghatwari@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2868729
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Divya
2023-02-27 10:51:51 +00:00
committed by mobile promotions
parent c49ac865de
commit c728f09c18
3 changed files with 48 additions and 28 deletions

View File

@@ -1440,6 +1440,26 @@ int nvgpu_gr_obj_ctx_init(struct gk20a *g,
return 0; return 0;
} }
bool nvgpu_gr_obj_ctx_golden_img_status(struct gk20a *g)
{
struct nvgpu_gr_obj_ctx_golden_image *gr_golden_image = NULL;
bool status;
if (g->gr != NULL) {
gr_golden_image = nvgpu_gr_get_golden_image_ptr(g);
}
if ((gr_golden_image != NULL) &&
(nvgpu_gr_obj_ctx_get_golden_image_size(gr_golden_image)) != 0U) {
/* golden ctx img is initialized */
status = true;
} else {
status = false;
}
return status;
}
void nvgpu_gr_obj_ctx_deinit(struct gk20a *g, void nvgpu_gr_obj_ctx_deinit(struct gk20a *g,
struct nvgpu_gr_obj_ctx_golden_image *golden_image) struct nvgpu_gr_obj_ctx_golden_image *golden_image)
{ {

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -125,6 +125,7 @@ void nvgpu_gr_clear_cilp_preempt_pending_chid(struct gk20a *g);
#endif #endif
struct nvgpu_gr_obj_ctx_golden_image *nvgpu_gr_get_golden_image_ptr( struct nvgpu_gr_obj_ctx_golden_image *nvgpu_gr_get_golden_image_ptr(
struct gk20a *g); struct gk20a *g);
bool nvgpu_gr_obj_ctx_golden_img_status(struct gk20a *g);
#ifdef CONFIG_NVGPU_DEBUGGER #ifdef CONFIG_NVGPU_DEBUGGER
struct nvgpu_gr_hwpm_map *nvgpu_gr_get_hwpm_map_ptr(struct gk20a *g); struct nvgpu_gr_hwpm_map *nvgpu_gr_get_hwpm_map_ptr(struct gk20a *g);
void nvgpu_gr_reset_falcon_ptr(struct gk20a *g); void nvgpu_gr_reset_falcon_ptr(struct gk20a *g);

View File

@@ -907,6 +907,24 @@ static ssize_t force_idle_read(struct device *dev,
static DEVICE_ATTR(force_idle, ROOTRW, force_idle_read, force_idle_store); static DEVICE_ATTR(force_idle, ROOTRW, force_idle_read, force_idle_store);
#endif #endif
static ssize_t golden_img_status_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct gk20a *g = get_gk20a(dev);
u32 status = 0;
if (nvgpu_gr_obj_ctx_golden_img_status(g)) {
/* golden ctx is initialized*/
status = 1;
} else {
status = 0;
}
return snprintf(buf, NVGPU_CPU_PAGE_SIZE, "%u\n", status);
}
static DEVICE_ATTR_RO(golden_img_status);
#ifdef CONFIG_NVGPU_STATIC_POWERGATE #ifdef CONFIG_NVGPU_STATIC_POWERGATE
static ssize_t gpc_pg_mask_read(struct device *dev, static ssize_t gpc_pg_mask_read(struct device *dev,
struct device_attribute *attr, char *buf) struct device_attribute *attr, char *buf)
@@ -921,7 +939,6 @@ static ssize_t gpc_pg_mask_store(struct device *dev,
{ {
struct gk20a *g = get_gk20a(dev); struct gk20a *g = get_gk20a(dev);
struct gk20a_platform *platform = dev_get_drvdata(dev); struct gk20a_platform *platform = dev_get_drvdata(dev);
struct nvgpu_gr_obj_ctx_golden_image *gr_golden_image = NULL;
unsigned long val = 0; unsigned long val = 0;
int err = 0; int err = 0;
@@ -938,14 +955,8 @@ static ssize_t gpc_pg_mask_store(struct device *dev,
goto exit; goto exit;
} }
if (g->gr != NULL) { if (nvgpu_gr_obj_ctx_golden_img_status(g)) {
gr_golden_image = nvgpu_gr_get_golden_image_ptr(g); nvgpu_info(g, "golden image size already initialized");
}
if (gr_golden_image &&
nvgpu_gr_obj_ctx_get_golden_image_size(gr_golden_image)
!= 0) {
nvgpu_err(g, "golden image size already initialized");
nvgpu_mutex_release(&g->static_pg_lock); nvgpu_mutex_release(&g->static_pg_lock);
return -ENODEV; return -ENODEV;
} }
@@ -1001,7 +1012,6 @@ static ssize_t fbp_pg_mask_store(struct device *dev,
{ {
struct gk20a *g = get_gk20a(dev); struct gk20a *g = get_gk20a(dev);
struct gk20a_platform *platform = dev_get_drvdata(dev); struct gk20a_platform *platform = dev_get_drvdata(dev);
struct nvgpu_gr_obj_ctx_golden_image *gr_golden_image = NULL;
unsigned long val = 0; unsigned long val = 0;
int err = 0; int err = 0;
@@ -1018,14 +1028,8 @@ static ssize_t fbp_pg_mask_store(struct device *dev,
goto exit; goto exit;
} }
if (g->gr != NULL) { if (nvgpu_gr_obj_ctx_golden_img_status(g)) {
gr_golden_image = nvgpu_gr_get_golden_image_ptr(g); nvgpu_info(g, "golden image size already initialized");
}
if (gr_golden_image &&
nvgpu_gr_obj_ctx_get_golden_image_size(gr_golden_image)
!= 0) {
nvgpu_err(g, "golden image size already initialized");
nvgpu_mutex_release(&g->static_pg_lock); nvgpu_mutex_release(&g->static_pg_lock);
return -ENODEV; return -ENODEV;
} }
@@ -1089,7 +1093,6 @@ static ssize_t tpc_pg_mask_store(struct device *dev,
{ {
struct gk20a *g = get_gk20a(dev); struct gk20a *g = get_gk20a(dev);
struct gk20a_platform *platform = dev_get_drvdata(dev); struct gk20a_platform *platform = dev_get_drvdata(dev);
struct nvgpu_gr_obj_ctx_golden_image *gr_golden_image = NULL;
unsigned long val = 0; unsigned long val = 0;
int err = 0; int err = 0;
u32 i; u32 i;
@@ -1114,14 +1117,8 @@ static ssize_t tpc_pg_mask_store(struct device *dev,
goto exit; goto exit;
} }
if (g->gr != NULL) { if (nvgpu_gr_obj_ctx_golden_img_status(g)) {
gr_golden_image = nvgpu_gr_get_golden_image_ptr(g); nvgpu_info(g, "golden image size already initialized");
}
if (gr_golden_image &&
nvgpu_gr_obj_ctx_get_golden_image_size(gr_golden_image)
!= 0) {
nvgpu_err(g, "golden image size already initialized");
nvgpu_mutex_release(&g->static_pg_lock); nvgpu_mutex_release(&g->static_pg_lock);
/* /*
* as golden context is already created, * as golden context is already created,
@@ -1520,6 +1517,7 @@ void nvgpu_remove_sysfs(struct device *dev)
device_remove_file(dev, &dev_attr_railgate_enable); device_remove_file(dev, &dev_attr_railgate_enable);
#endif #endif
device_remove_file(dev, &dev_attr_allow_all); device_remove_file(dev, &dev_attr_allow_all);
device_remove_file(dev, &dev_attr_golden_img_status);
device_remove_file(dev, &dev_attr_tpc_fs_mask); device_remove_file(dev, &dev_attr_tpc_fs_mask);
device_remove_file(dev, &dev_attr_tpc_pg_mask); device_remove_file(dev, &dev_attr_tpc_pg_mask);
device_remove_file(dev, &dev_attr_gpc_fs_mask); device_remove_file(dev, &dev_attr_gpc_fs_mask);
@@ -1596,6 +1594,7 @@ int nvgpu_create_sysfs(struct device *dev)
error |= device_create_file(dev, &dev_attr_railgate_enable); error |= device_create_file(dev, &dev_attr_railgate_enable);
#endif #endif
error |= device_create_file(dev, &dev_attr_allow_all); error |= device_create_file(dev, &dev_attr_allow_all);
error |= device_create_file(dev, &dev_attr_golden_img_status);
error |= device_create_file(dev, &dev_attr_tpc_fs_mask); error |= device_create_file(dev, &dev_attr_tpc_fs_mask);
error |= device_create_file(dev, &dev_attr_tpc_pg_mask); error |= device_create_file(dev, &dev_attr_tpc_pg_mask);
error |= device_create_file(dev, &dev_attr_gpc_fs_mask); error |= device_create_file(dev, &dev_attr_gpc_fs_mask);