mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 17:36:20 +03:00
gpu: nvgpu: add sysfs node for golden img status
- Add a sysfs node "golden_img_status" to show
if golden_image size and ptr are already initialized
or not.
- This node helps to know golden image status before
attempting to modify gpc/tpc/fbp masks.
Bug 3960290
Change-Id: I3c3de69b369bcaf2f0127e897d06e21cb8e2d68e
Signed-off-by: Divya <dsinghatwari@nvidia.com>
(cherry picked from commit c728f09c18)
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2864095
Reviewed-by: Sagar Kamble <skamble@nvidia.com>
Reviewed-by: Jonathan Hunter <jonathanh@nvidia.com>
Reviewed-by: Mahantesh Kumbar <mkumbar@nvidia.com>
GVS: Gerrit_Virtual_Submit <buildbot_gerritrpt@nvidia.com>
This commit is contained in:
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -111,7 +111,6 @@ void nvgpu_gr_clear_cilp_preempt_pending_chid(struct gk20a *g)
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_NVGPU_DEBUGGER
|
||||
struct nvgpu_gr_obj_ctx_golden_image *nvgpu_gr_get_golden_image_ptr(
|
||||
struct gk20a *g)
|
||||
{
|
||||
@@ -120,6 +119,7 @@ struct nvgpu_gr_obj_ctx_golden_image *nvgpu_gr_get_golden_image_ptr(
|
||||
return gr->golden_image;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NVGPU_DEBUGGER
|
||||
struct nvgpu_gr_hwpm_map *nvgpu_gr_get_hwpm_map_ptr(struct gk20a *g)
|
||||
{
|
||||
struct nvgpu_gr *gr = nvgpu_gr_get_cur_instance_ptr(g);
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -33,6 +33,7 @@
|
||||
#include <nvgpu/gr/global_ctx.h>
|
||||
#include <nvgpu/gr/obj_ctx.h>
|
||||
#include <nvgpu/gr/config.h>
|
||||
#include <nvgpu/gr/gr_utils.h>
|
||||
#include <nvgpu/netlist.h>
|
||||
#include <nvgpu/gr/gr_falcon.h>
|
||||
#include <nvgpu/gr/fs_state.h>
|
||||
@@ -1004,6 +1005,26 @@ int nvgpu_gr_obj_ctx_init(struct gk20a *g,
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool nvgpu_gr_obj_ctx_golden_img_status(struct gk20a *g)
|
||||
{
|
||||
struct nvgpu_gr_obj_ctx_golden_image *gr_golden_image = NULL;
|
||||
bool status;
|
||||
|
||||
if (g->gr != NULL) {
|
||||
gr_golden_image = nvgpu_gr_get_golden_image_ptr(g);
|
||||
}
|
||||
|
||||
if ((gr_golden_image != NULL) &&
|
||||
(nvgpu_gr_obj_ctx_get_golden_image_size(gr_golden_image)) != 0U) {
|
||||
/* golden ctx img is initialized */
|
||||
status = true;
|
||||
} else {
|
||||
status = false;
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
void nvgpu_gr_obj_ctx_deinit(struct gk20a *g,
|
||||
struct nvgpu_gr_obj_ctx_golden_image *golden_image)
|
||||
{
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -123,9 +123,10 @@ struct nvgpu_gr_zbc *nvgpu_gr_get_zbc_ptr(struct gk20a *g);
|
||||
u32 nvgpu_gr_get_cilp_preempt_pending_chid(struct gk20a *g);
|
||||
void nvgpu_gr_clear_cilp_preempt_pending_chid(struct gk20a *g);
|
||||
#endif
|
||||
#ifdef CONFIG_NVGPU_DEBUGGER
|
||||
struct nvgpu_gr_obj_ctx_golden_image *nvgpu_gr_get_golden_image_ptr(
|
||||
struct gk20a *g);
|
||||
bool nvgpu_gr_obj_ctx_golden_img_status(struct gk20a *g);
|
||||
#ifdef CONFIG_NVGPU_DEBUGGER
|
||||
struct nvgpu_gr_hwpm_map *nvgpu_gr_get_hwpm_map_ptr(struct gk20a *g);
|
||||
void nvgpu_gr_reset_falcon_ptr(struct gk20a *g);
|
||||
void nvgpu_gr_reset_golden_image_ptr(struct gk20a *g);
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2011-2023, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
@@ -861,6 +861,24 @@ static ssize_t force_idle_read(struct device *dev,
|
||||
static DEVICE_ATTR(force_idle, ROOTRW, force_idle_read, force_idle_store);
|
||||
#endif
|
||||
|
||||
static ssize_t golden_img_status_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct gk20a *g = get_gk20a(dev);
|
||||
u32 status = 0;
|
||||
|
||||
if (nvgpu_gr_obj_ctx_golden_img_status(g)) {
|
||||
/* golden ctx is initialized*/
|
||||
status = 1;
|
||||
} else {
|
||||
status = 0;
|
||||
}
|
||||
|
||||
return snprintf(buf, NVGPU_CPU_PAGE_SIZE, "%u\n", status);
|
||||
}
|
||||
|
||||
static DEVICE_ATTR_RO(golden_img_status);
|
||||
|
||||
#ifdef CONFIG_NVGPU_STATIC_POWERGATE
|
||||
static ssize_t gpc_pg_mask_read(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
@@ -875,7 +893,6 @@ static ssize_t gpc_pg_mask_store(struct device *dev,
|
||||
{
|
||||
struct gk20a *g = get_gk20a(dev);
|
||||
struct gk20a_platform *platform = dev_get_drvdata(dev);
|
||||
struct nvgpu_gr_obj_ctx_golden_image *gr_golden_image = NULL;
|
||||
unsigned long val = 0;
|
||||
int err = 0;
|
||||
|
||||
@@ -892,14 +909,8 @@ static ssize_t gpc_pg_mask_store(struct device *dev,
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if (g->gr != NULL) {
|
||||
gr_golden_image = nvgpu_gr_get_golden_image_ptr(g);
|
||||
}
|
||||
|
||||
if (gr_golden_image &&
|
||||
nvgpu_gr_obj_ctx_get_golden_image_size(gr_golden_image)
|
||||
!= 0) {
|
||||
nvgpu_err(g, "golden image size already initialized");
|
||||
if (nvgpu_gr_obj_ctx_golden_img_status(g)) {
|
||||
nvgpu_info(g, "golden image size already initialized");
|
||||
nvgpu_mutex_release(&g->static_pg_lock);
|
||||
return -ENODEV;
|
||||
}
|
||||
@@ -955,7 +966,6 @@ static ssize_t fbp_pg_mask_store(struct device *dev,
|
||||
{
|
||||
struct gk20a *g = get_gk20a(dev);
|
||||
struct gk20a_platform *platform = dev_get_drvdata(dev);
|
||||
struct nvgpu_gr_obj_ctx_golden_image *gr_golden_image = NULL;
|
||||
unsigned long val = 0;
|
||||
int err = 0;
|
||||
|
||||
@@ -972,14 +982,8 @@ static ssize_t fbp_pg_mask_store(struct device *dev,
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if (g->gr != NULL) {
|
||||
gr_golden_image = nvgpu_gr_get_golden_image_ptr(g);
|
||||
}
|
||||
|
||||
if (gr_golden_image &&
|
||||
nvgpu_gr_obj_ctx_get_golden_image_size(gr_golden_image)
|
||||
!= 0) {
|
||||
nvgpu_err(g, "golden image size already initialized");
|
||||
if (nvgpu_gr_obj_ctx_golden_img_status(g)) {
|
||||
nvgpu_info(g, "golden image size already initialized");
|
||||
nvgpu_mutex_release(&g->static_pg_lock);
|
||||
return -ENODEV;
|
||||
}
|
||||
@@ -1043,7 +1047,6 @@ static ssize_t tpc_pg_mask_store(struct device *dev,
|
||||
{
|
||||
struct gk20a *g = get_gk20a(dev);
|
||||
struct gk20a_platform *platform = dev_get_drvdata(dev);
|
||||
struct nvgpu_gr_obj_ctx_golden_image *gr_golden_image = NULL;
|
||||
unsigned long val = 0;
|
||||
int err = 0;
|
||||
u32 i;
|
||||
@@ -1068,14 +1071,8 @@ static ssize_t tpc_pg_mask_store(struct device *dev,
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if (g->gr != NULL) {
|
||||
gr_golden_image = nvgpu_gr_get_golden_image_ptr(g);
|
||||
}
|
||||
|
||||
if (gr_golden_image &&
|
||||
nvgpu_gr_obj_ctx_get_golden_image_size(gr_golden_image)
|
||||
!= 0) {
|
||||
nvgpu_err(g, "golden image size already initialized");
|
||||
if (nvgpu_gr_obj_ctx_golden_img_status(g)) {
|
||||
nvgpu_info(g, "golden image size already initialized");
|
||||
nvgpu_mutex_release(&g->static_pg_lock);
|
||||
/*
|
||||
* as golden context is already created,
|
||||
@@ -1386,6 +1383,7 @@ void nvgpu_remove_sysfs(struct device *dev)
|
||||
device_remove_file(dev, &dev_attr_aelpg_param);
|
||||
device_remove_file(dev, &dev_attr_aelpg_enable);
|
||||
device_remove_file(dev, &dev_attr_allow_all);
|
||||
device_remove_file(dev, &dev_attr_golden_img_status);
|
||||
device_remove_file(dev, &dev_attr_tpc_fs_mask);
|
||||
device_remove_file(dev, &dev_attr_tpc_pg_mask);
|
||||
device_remove_file(dev, &dev_attr_gpc_fs_mask);
|
||||
@@ -1456,6 +1454,7 @@ int nvgpu_create_sysfs(struct device *dev)
|
||||
error |= device_create_file(dev, &dev_attr_aelpg_param);
|
||||
error |= device_create_file(dev, &dev_attr_aelpg_enable);
|
||||
error |= device_create_file(dev, &dev_attr_allow_all);
|
||||
error |= device_create_file(dev, &dev_attr_golden_img_status);
|
||||
error |= device_create_file(dev, &dev_attr_tpc_fs_mask);
|
||||
error |= device_create_file(dev, &dev_attr_tpc_pg_mask);
|
||||
error |= device_create_file(dev, &dev_attr_gpc_fs_mask);
|
||||
|
||||
Reference in New Issue
Block a user