gpu: nvgpu: gr_priv header include cleanup

Add more apis in gr_utils for accessing variables within gr struct.
This helps to avoid including gr_priv.h outside gr files and
derefencing gr struct.

Jira NVGPU-3218

Change-Id: I6f24cc302f10aa1da14a981d80c400a027c9a115
Signed-off-by: Vinod G <vinodg@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2115930
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Vinod G
2019-05-09 14:32:05 -07:00
committed by mobile promotions
parent e615e8f0ff
commit 5c60645cfa
17 changed files with 155 additions and 62 deletions

View File

@@ -23,6 +23,7 @@
#include <nvgpu/gk20a.h>
#include <nvgpu/gr/config.h>
#include <nvgpu/gr/gr.h>
#include <nvgpu/gr/gr_utils.h>
#include <nvgpu/ltc.h>
#include <nvgpu/nvgpu_err.h>

View File

@@ -32,8 +32,7 @@
#include <nvgpu/gr/ctx.h>
#include <nvgpu/gr/subctx.h>
#include <nvgpu/gr/fecs_trace.h>
#include "common/gr/gr_priv.h"
#include <nvgpu/gr/gr_utils.h>
#ifdef CONFIG_GK20A_CTXSW_TRACE
@@ -215,8 +214,10 @@ int nvgpu_gr_fecs_trace_num_ts(struct gk20a *g)
struct nvgpu_fecs_trace_record *nvgpu_gr_fecs_trace_get_record(
struct gk20a *g, int idx)
{
struct nvgpu_gr_global_ctx_buffer_desc *gr_global_ctx_buffer =
nvgpu_gr_get_global_ctx_buffer_ptr(g);
struct nvgpu_mem *mem = nvgpu_gr_global_ctx_buffer_get_mem(
g->gr->global_ctx_buffer,
gr_global_ctx_buffer,
NVGPU_GR_GLOBAL_CTX_FECS_TRACE_BUFFER);
if (mem == NULL) {
return NULL;
@@ -621,6 +622,8 @@ int nvgpu_gr_fecs_trace_bind_channel(struct gk20a *g,
u64 addr = 0ULL;
struct nvgpu_gr_fecs_trace *trace = g->fecs_trace;
struct nvgpu_mem *mem;
struct nvgpu_gr_global_ctx_buffer_desc *gr_global_ctx_buffer =
nvgpu_gr_get_global_ctx_buffer_ptr(g);
u32 context_ptr;
u32 aperture_mask;
int ret;
@@ -636,7 +639,7 @@ int nvgpu_gr_fecs_trace_bind_channel(struct gk20a *g,
pid, context_ptr,
nvgpu_inst_block_addr(g, inst_block));
mem = nvgpu_gr_global_ctx_buffer_get_mem(g->gr->global_ctx_buffer,
mem = nvgpu_gr_global_ctx_buffer_get_mem(gr_global_ctx_buffer,
NVGPU_GR_GLOBAL_CTX_FECS_TRACE_BUFFER);
if (mem == NULL) {
return -EINVAL;

View File

@@ -769,13 +769,3 @@ void nvgpu_gr_sw_ready(struct gk20a *g, bool enable)
{
g->gr->sw_ready = enable;
}
void nvgpu_gr_override_ecc_val(struct gk20a *g, u32 ecc_val)
{
g->gr->fecs_feature_override_ecc_val = ecc_val;
}
struct nvgpu_gr_config *nvgpu_gr_get_config_ptr(struct gk20a *g)
{
return g->gr->config;
}

View File

@@ -35,8 +35,8 @@
#include <nvgpu/gr/config.h>
#include <nvgpu/gr/gr_falcon.h>
#include <nvgpu/gr/fecs_trace.h>
#include <nvgpu/gr/gr_utils.h>
#include "gr_priv.h"
#include "gr_intr_priv.h"
static void gr_intr_report_ctxsw_error(struct gk20a *g, u32 err_type, u32 chid,
@@ -236,7 +236,7 @@ struct nvgpu_channel *nvgpu_gr_intr_get_channel_from_ctx(struct gk20a *g,
u32 curr_ctx, u32 *curr_tsgid)
{
struct nvgpu_fifo *f = &g->fifo;
struct nvgpu_gr_intr *intr = g->gr->intr;
struct nvgpu_gr_intr *intr = nvgpu_gr_get_intr_ptr(g);
u32 chid;
u32 tsgid = NVGPU_INVALID_TSG_ID;
u32 i;
@@ -694,7 +694,7 @@ int nvgpu_gr_intr_stall_isr(struct gk20a *g)
struct nvgpu_tsg *tsg = NULL;
u32 global_esr = 0;
u32 chid;
struct nvgpu_gr_config *gr_config = g->gr->config;
struct nvgpu_gr_config *gr_config = nvgpu_gr_get_config_ptr(g);
u32 gr_intr = g->ops.gr.intr.read_pending_interrupts(g, &intr_info);
u32 clear_intr = gr_intr;
@@ -881,7 +881,7 @@ int nvgpu_gr_intr_stall_isr(struct gk20a *g)
/* invalidate channel lookup tlb */
void nvgpu_gr_intr_flush_channel_tlb(struct gk20a *g)
{
struct nvgpu_gr_intr *intr = g->gr->intr;
struct nvgpu_gr_intr *intr = nvgpu_gr_get_intr_ptr(g);
nvgpu_spinlock_acquire(&intr->ch_tlb_lock);
(void) memset(intr->chid_tlb, 0,

View File

@@ -24,6 +24,8 @@
#include <nvgpu/types.h>
#include <nvgpu/gr/gr_utils.h>
#include <nvgpu/gr/config.h>
#include "gr_priv.h"
struct nvgpu_gr_falcon *nvgpu_gr_get_falcon_ptr(struct gk20a *g)
@@ -31,3 +33,70 @@ struct nvgpu_gr_falcon *nvgpu_gr_get_falcon_ptr(struct gk20a *g)
return g->gr->falcon;
}
void nvgpu_gr_reset_falcon_ptr(struct gk20a *g)
{
g->gr->falcon = NULL;
}
struct nvgpu_gr_obj_ctx_golden_image *nvgpu_gr_get_golden_image_ptr(
struct gk20a *g)
{
return g->gr->golden_image;
}
void nvgpu_gr_reset_golden_image_ptr(struct gk20a *g)
{
g->gr->golden_image = NULL;
}
struct nvgpu_gr_zcull *nvgpu_gr_get_zcull_ptr(struct gk20a *g)
{
return g->gr->zcull;
}
struct nvgpu_gr_zbc *nvgpu_gr_get_zbc_ptr(struct gk20a *g)
{
return g->gr->zbc;
}
struct nvgpu_gr_config *nvgpu_gr_get_config_ptr(struct gk20a *g)
{
return g->gr->config;
}
struct nvgpu_gr_hwpm_map *nvgpu_gr_get_hwpm_map_ptr(struct gk20a *g)
{
return g->gr->hwpm_map;
}
struct nvgpu_gr_intr *nvgpu_gr_get_intr_ptr(struct gk20a *g)
{
return g->gr->intr;
}
struct nvgpu_gr_global_ctx_buffer_desc *nvgpu_gr_get_global_ctx_buffer_ptr(
struct gk20a *g)
{
return g->gr->global_ctx_buffer;
}
u32 nvgpu_gr_get_override_ecc_val(struct gk20a *g)
{
return g->gr->fecs_feature_override_ecc_val;
}
void nvgpu_gr_override_ecc_val(struct gk20a *g, u32 ecc_val)
{
g->gr->fecs_feature_override_ecc_val = ecc_val;
}
u32 nvgpu_gr_get_cilp_preempt_pending_chid(struct gk20a *g)
{
return g->gr->cilp_preempt_pending_chid;
}
void nvgpu_gr_clear_cilp_preempt_pending_chid(struct gk20a *g)
{
g->gr->cilp_preempt_pending_chid =
NVGPU_INVALID_CHANNEL_ID;
}

View File

@@ -36,7 +36,6 @@
#include <nvgpu/power_features/cg.h>
#include "obj_ctx_priv.h"
#include "gr_priv.h"
void nvgpu_gr_obj_ctx_commit_inst_gpu_va(struct gk20a *g,
struct nvgpu_mem *inst_block, u64 gpu_va)

View File

@@ -29,10 +29,10 @@
#include <nvgpu/gk20a.h>
#include <nvgpu/regops.h>
#include <nvgpu/gr/obj_ctx.h>
#include <nvgpu/gr/gr_utils.h>
/* Access ctx buffer offset functions in gr_gk20a.h */
#include "hal/gr/gr/gr_gk20a.h"
#include "common/gr/gr_priv.h"
static int regop_bsearch_range_cmp(const void *pkey, const void *pelem)
{
@@ -69,9 +69,12 @@ static inline bool linear_search(u32 offset, const u32 *list, u64 size)
* But note: while the dbg_gpu bind requires the a channel fd,
* it doesn't require an allocated gr/compute obj at that point...
*/
static bool gr_context_info_available(struct nvgpu_gr *gr)
static bool gr_context_info_available(struct gk20a *g)
{
return nvgpu_gr_obj_ctx_is_golden_image_ready(gr->golden_image);
struct nvgpu_gr_obj_ctx_golden_image *gr_golden_image =
nvgpu_gr_get_golden_image_ptr(g);
return nvgpu_gr_obj_ctx_is_golden_image_ready(gr_golden_image);
}
static bool validate_reg_ops(struct gk20a *g,
@@ -119,7 +122,7 @@ int exec_regops_gk20a(struct gk20a *g,
/* be sure that ctx info is in place if there are ctx ops */
if ((ctx_wr_count | ctx_rd_count) != 0U) {
if (!gr_context_info_available(g->gr)) {
if (!gr_context_info_available(g)) {
nvgpu_err(g, "gr context data not available");
return -ENODEV;
}

View File

@@ -33,12 +33,12 @@
#include <nvgpu/gr/ctx.h>
#include <nvgpu/gr/obj_ctx.h>
#include <nvgpu/gr/hwpm_map.h>
#include <nvgpu/gr/gr_utils.h>
#include "common/gr/ctx_priv.h"
#include "ctx_vgpu.h"
#include "common/vgpu/ivc/comm_vgpu.h"
#include "common/gr/gr_priv.h"
int vgpu_gr_alloc_gr_ctx(struct gk20a *g,
struct nvgpu_gr_ctx *gr_ctx,
@@ -46,14 +46,15 @@ int vgpu_gr_alloc_gr_ctx(struct gk20a *g,
{
struct tegra_vgpu_cmd_msg msg = {0};
struct tegra_vgpu_gr_ctx_params *p = &msg.params.gr_ctx;
struct nvgpu_gr *gr = g->gr;
struct nvgpu_gr_obj_ctx_golden_image *gr_golden_image =
nvgpu_gr_get_golden_image_ptr(g);
u32 golden_image_size;
int err;
nvgpu_log_fn(g, " ");
golden_image_size =
nvgpu_gr_obj_ctx_get_golden_image_size(gr->golden_image);
nvgpu_gr_obj_ctx_get_golden_image_size(gr_golden_image);
if (golden_image_size == 0) {
return -EINVAL;
}
@@ -171,6 +172,7 @@ int vgpu_gr_alloc_pm_ctx(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx,
struct vm_gk20a *vm)
{
struct pm_ctx_desc *pm_ctx = &gr_ctx->pm_ctx;
struct nvgpu_gr_hwpm_map *gr_hwpm_map = nvgpu_gr_get_hwpm_map_ptr(g);
nvgpu_log_fn(g, " ");
@@ -179,7 +181,7 @@ int vgpu_gr_alloc_pm_ctx(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx,
}
pm_ctx->mem.gpu_va = nvgpu_vm_alloc_va(vm,
nvgpu_gr_hwpm_map_get_size(g->gr->hwpm_map),
nvgpu_gr_hwpm_map_get_size(gr_hwpm_map),
GMMU_PAGE_SIZE_KERNEL);
if (!pm_ctx->mem.gpu_va) {
@@ -187,7 +189,7 @@ int vgpu_gr_alloc_pm_ctx(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx,
return -ENOMEM;
}
pm_ctx->mem.size = nvgpu_gr_hwpm_map_get_size(g->gr->hwpm_map);
pm_ctx->mem.size = nvgpu_gr_hwpm_map_get_size(gr_hwpm_map);
return 0;
}

View File

@@ -25,12 +25,9 @@
#include <nvgpu/log.h>
#include <nvgpu/bug.h>
#include <nvgpu/gr/ctx.h>
#include <nvgpu/gr/config.h>
#include <nvgpu/gr/config.h>
#include <nvgpu/gr/gr.h>
#include "common/gr/gr_priv.h"
#include <nvgpu/gr/gr_utils.h>
#include "gr_init_gm20b.h"
#include "gr_init_gp10b.h"
@@ -205,6 +202,7 @@ int gp10b_gr_init_wait_empty(struct gk20a *g)
int gp10b_gr_init_fs_state(struct gk20a *g)
{
u32 data;
u32 ecc_val = nvgpu_gr_get_override_ecc_val(g);
nvgpu_log_fn(g, " ");
@@ -219,10 +217,9 @@ int gp10b_gr_init_fs_state(struct gk20a *g)
gr_gpcs_tpcs_sm_disp_ctrl_re_suppress_disable_f());
nvgpu_writel(g, gr_gpcs_tpcs_sm_disp_ctrl_r(), data);
if (g->gr->fecs_feature_override_ecc_val != 0U) {
nvgpu_writel(g,
gr_fecs_feature_override_ecc_r(),
g->gr->fecs_feature_override_ecc_val);
if (ecc_val != 0U) {
nvgpu_writel(g, gr_fecs_feature_override_ecc_r(), ecc_val);
}
return gm20b_gr_init_fs_state(g);

View File

@@ -30,7 +30,7 @@
#include <nvgpu/netlist.h>
#include <nvgpu/gr/config.h>
#include "common/gr/gr_priv.h"
#include <nvgpu/gr/gr_utils.h>
#include "gr_init_gm20b.h"
#include "gr_init_gv11b.h"
@@ -542,6 +542,7 @@ void gv11b_gr_init_rop_mapping(struct gk20a *g,
int gv11b_gr_init_fs_state(struct gk20a *g)
{
u32 data;
u32 ecc_val;
int err = 0;
u32 ver = g->params.gpu_arch + g->params.gpu_impl;
@@ -586,10 +587,9 @@ int gv11b_gr_init_fs_state(struct gk20a *g)
gr_gpcs_tpcs_sm_disp_ctrl_re_suppress_disable_f());
nvgpu_writel(g, gr_gpcs_tpcs_sm_disp_ctrl_r(), data);
if (g->gr->fecs_feature_override_ecc_val != 0U) {
nvgpu_writel(g,
gr_fecs_feature_override_ecc_r(),
g->gr->fecs_feature_override_ecc_val);
ecc_val = nvgpu_gr_get_override_ecc_val(g);
if (ecc_val != 0U) {
nvgpu_writel(g, gr_fecs_feature_override_ecc_r(), ecc_val);
}
data = nvgpu_readl(g, gr_debug_0_r());

View File

@@ -30,8 +30,7 @@
#include <nvgpu/gr/ctx.h>
#include <nvgpu/gr/gr_falcon.h>
#include <nvgpu/gr/gr_intr.h>
#include "common/gr/gr_priv.h"
#include <nvgpu/gr/gr_utils.h>
#include "gr_intr_gp10b.h"
@@ -62,7 +61,7 @@ static int gp10b_gr_intr_clear_cilp_preempt_pending(struct gk20a *g,
}
nvgpu_gr_ctx_set_cilp_preempt_pending(gr_ctx, false);
g->gr->cilp_preempt_pending_chid = NVGPU_INVALID_CHANNEL_ID;
nvgpu_gr_clear_cilp_preempt_pending_chid(g);
return 0;
}
@@ -76,7 +75,7 @@ static int gp10b_gr_intr_get_cilp_preempt_pending_chid(struct gk20a *g,
u32 chid;
int ret = -EINVAL;
chid = g->gr->cilp_preempt_pending_chid;
chid = nvgpu_gr_get_cilp_preempt_pending_chid(g);
if (chid == NVGPU_INVALID_CHANNEL_ID) {
return ret;
}

View File

@@ -46,6 +46,4 @@ int nvgpu_gr_enable_ctxsw(struct gk20a *g);
int nvgpu_gr_halt_pipe(struct gk20a *g);
void nvgpu_gr_remove_support(struct gk20a *g);
void nvgpu_gr_sw_ready(struct gk20a *g, bool enable);
void nvgpu_gr_override_ecc_val(struct gk20a *g, u32 ecc_val);
struct nvgpu_gr_config *nvgpu_gr_get_config_ptr(struct gk20a *g);
#endif /* NVGPU_GR_H */

View File

@@ -25,7 +25,32 @@
struct gk20a;
struct nvgpu_gr_falcon;
struct nvgpu_gr_obj_ctx_golden_image;
struct nvgpu_gr_config;
struct nvgpu_gr_zbc;
struct nvgpu_gr_zcull;
struct nvgpu_gr_hwpm_map;
struct nvgpu_gr_intr;
struct nvgpu_gr_global_ctx_buffer_desc;
/* gr struct pointers */
struct nvgpu_gr_falcon *nvgpu_gr_get_falcon_ptr(struct gk20a *g);
struct nvgpu_gr_obj_ctx_golden_image *nvgpu_gr_get_golden_image_ptr(
struct gk20a *g);
struct nvgpu_gr_zcull *nvgpu_gr_get_zcull_ptr(struct gk20a *g);
struct nvgpu_gr_zbc *nvgpu_gr_get_zbc_ptr(struct gk20a *g);
struct nvgpu_gr_config *nvgpu_gr_get_config_ptr(struct gk20a *g);
struct nvgpu_gr_hwpm_map *nvgpu_gr_get_hwpm_map_ptr(struct gk20a *g);
struct nvgpu_gr_intr *nvgpu_gr_get_intr_ptr(struct gk20a *g);
struct nvgpu_gr_global_ctx_buffer_desc *nvgpu_gr_get_global_ctx_buffer_ptr(
struct gk20a *g);
void nvgpu_gr_reset_falcon_ptr(struct gk20a *g);
void nvgpu_gr_reset_golden_image_ptr(struct gk20a *g);
/* gr variables */
u32 nvgpu_gr_get_override_ecc_val(struct gk20a *g);
void nvgpu_gr_override_ecc_val(struct gk20a *g, u32 ecc_val);
u32 nvgpu_gr_get_cilp_preempt_pending_chid(struct gk20a *g);
void nvgpu_gr_clear_cilp_preempt_pending_chid(struct gk20a *g);
#endif /* NVGPU_GR_UTILS_H */

View File

@@ -38,6 +38,7 @@
#include <nvgpu/gr/zbc.h>
#include <nvgpu/gr/zcull.h>
#include <nvgpu/gr/gr.h>
#include <nvgpu/gr/gr_utils.h>
#include <nvgpu/gr/warpstate.h>
#include <nvgpu/channel.h>
#include <nvgpu/pmu/pmgr.h>
@@ -45,8 +46,6 @@
#include <nvgpu/fence.h>
#include <nvgpu/channel_sync_syncpt.h>
#include "common/gr/gr_priv.h"
#include "ioctl_ctrl.h"
#include "ioctl_dbg.h"
#include "ioctl_as.h"
@@ -1667,6 +1666,8 @@ long gk20a_ctrl_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg
struct nvgpu_gr_zbc_entry *zbc_val;
struct nvgpu_gr_zbc_query_params *zbc_tbl;
struct nvgpu_gr_config *gr_config = nvgpu_gr_get_config_ptr(g);
struct nvgpu_gr_zcull *gr_zcull = nvgpu_gr_get_zcull_ptr(g);
struct nvgpu_gr_zbc *gr_zbc = nvgpu_gr_get_zbc_ptr(g);
int err = 0;
u32 i;
@@ -1697,7 +1698,7 @@ long gk20a_ctrl_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg
case NVGPU_GPU_IOCTL_ZCULL_GET_CTX_SIZE:
get_ctx_size_args = (struct nvgpu_gpu_zcull_get_ctx_size_args *)buf;
get_ctx_size_args->size = nvgpu_gr_get_ctxsw_zcull_size(g, g->gr->zcull);
get_ctx_size_args->size = nvgpu_gr_get_ctxsw_zcull_size(g, gr_zcull);
break;
case NVGPU_GPU_IOCTL_ZCULL_GET_INFO:
@@ -1711,7 +1712,7 @@ long gk20a_ctrl_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg
return -ENOMEM;
err = g->ops.gr.zcull.get_zcull_info(g, gr_config,
g->gr->zcull, zcull_info);
gr_zcull, zcull_info);
if (err) {
nvgpu_kfree(g, zcull_info);
break;
@@ -1762,7 +1763,7 @@ long gk20a_ctrl_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg
if (!err) {
err = gk20a_busy(g);
if (!err) {
err = g->ops.gr.zbc.set_table(g, g->gr->zbc,
err = g->ops.gr.zbc.set_table(g, gr_zbc,
zbc_val);
gk20a_idle(g);
}
@@ -1781,7 +1782,7 @@ long gk20a_ctrl_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg
zbc_tbl->type = query_table_args->type;
zbc_tbl->index_size = query_table_args->index_size;
err = g->ops.gr.zbc.query_table(g, g->gr->zbc, zbc_tbl);
err = g->ops.gr.zbc.query_table(g, gr_zbc, zbc_tbl);
if (!err) {
switch (zbc_tbl->type) {

View File

@@ -28,6 +28,7 @@
#include <nvgpu/gk20a.h>
#include <nvgpu/gr/config.h>
#include <nvgpu/gr/gr.h>
#include <nvgpu/gr/gr_utils.h>
#include <nvgpu/channel.h>
#include <nvgpu/tsg.h>
#include <nvgpu/fifo.h>

View File

@@ -50,6 +50,7 @@
#include <nvgpu/engines.h>
#include <nvgpu/channel.h>
#include <nvgpu/gr/gr.h>
#include <nvgpu/gr/gr_utils.h>
#include <nvgpu/pmu/pmu_pstate.h>
#include <nvgpu/cyclestats_snapshot.h>

View File

@@ -27,13 +27,12 @@
#include <nvgpu/gr/obj_ctx.h>
#include <nvgpu/gr/gr_falcon.h>
#include <nvgpu/gr/gr.h>
#include <nvgpu/gr/gr_utils.h>
#include <nvgpu/power_features/cg.h>
#include <nvgpu/power_features/pg.h>
#include <nvgpu/pmu/pmu_perfmon.h>
#include <nvgpu/pmu/fw.h>
#include "common/gr/gr_priv.h"
#include "os_linux.h"
#include "sysfs.h"
#include "platform_gk20a.h"
@@ -828,8 +827,9 @@ static ssize_t tpc_pg_mask_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct gk20a *g = get_gk20a(dev);
struct nvgpu_gr *gr = g->gr;
unsigned long val = 0;
struct nvgpu_gr_obj_ctx_golden_image *gr_golden_image =
nvgpu_gr_get_golden_image_ptr(g);
nvgpu_mutex_acquire(&g->tpc_pg_lock);
@@ -849,7 +849,7 @@ static ssize_t tpc_pg_mask_store(struct device *dev,
goto exit;
}
if (nvgpu_gr_obj_ctx_get_golden_image_size(gr->golden_image) != 0) {
if (nvgpu_gr_obj_ctx_get_golden_image_size(gr_golden_image) != 0) {
nvgpu_err(g, "golden image size already initialized");
nvgpu_mutex_release(&g->tpc_pg_lock);
return -ENODEV;
@@ -875,6 +875,10 @@ static ssize_t tpc_fs_mask_store(struct device *dev,
{
struct gk20a *g = get_gk20a(dev);
struct nvgpu_gr_config *gr_config = nvgpu_gr_get_config_ptr(g);
struct nvgpu_gr_obj_ctx_golden_image *gr_golden_image =
nvgpu_gr_get_golden_image_ptr(g);
struct nvgpu_gr_falcon *gr_falcon =
nvgpu_gr_get_falcon_ptr(g);
unsigned long val = 0;
if (kstrtoul(buf, 10, &val) < 0)
@@ -890,12 +894,12 @@ static ssize_t tpc_fs_mask_store(struct device *dev,
g->ops.gr.set_gpc_tpc_mask(g, 0);
nvgpu_gr_obj_ctx_set_golden_image_size(g->gr->golden_image, 0);
nvgpu_gr_obj_ctx_deinit(g, g->gr->golden_image);
g->gr->golden_image = NULL;
nvgpu_gr_obj_ctx_set_golden_image_size(gr_golden_image, 0);
nvgpu_gr_obj_ctx_deinit(g, gr_golden_image);
nvgpu_gr_reset_golden_image_ptr(g);
nvgpu_gr_falcon_remove_support(g, g->gr->falcon);
g->gr->falcon = NULL;
nvgpu_gr_falcon_remove_support(g, gr_falcon);
nvgpu_gr_reset_falcon_ptr(g);
nvgpu_gr_config_deinit(g, gr_config);
/* Cause next poweron to reinit just gr */