gpu: nvgpu: remove g->ops.gr.dump_ctxsw_stats

g->ops.gr.dump_ctxsw_stats is redundant since we can directly call
g->ops.gr.ctxsw_prog.dump_ctxsw_stats

Also clean up gr_gp10b_dump_ctxsw_stats since it too becomes redundant

Jira NVGPU-1527

Change-Id: I0ac5bcf6cf3dca30954d302766431496971708f4
Signed-off-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1986814
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Deepak Nibade
2019-01-03 19:43:03 +05:30
committed by mobile promotions
parent b10960e7b7
commit 93a05937f0
7 changed files with 3 additions and 16 deletions

View File

@@ -2585,9 +2585,9 @@ void gr_gk20a_free_gr_ctx(struct gk20a *g,
gr_gk20a_free_channel_patch_ctx(g, vm, gr_ctx);
gr_gk20a_free_channel_pm_ctx(g, vm, gr_ctx);
if ((g->ops.gr.dump_ctxsw_stats != NULL) &&
if ((g->ops.gr.ctxsw_prog.dump_ctxsw_stats != NULL) &&
g->gr.ctx_vars.dump_ctxsw_stats_on_channel_close) {
g->ops.gr.dump_ctxsw_stats(g, vm, gr_ctx);
g->ops.gr.ctxsw_prog.dump_ctxsw_stats(g, &gr_ctx->mem);
}
nvgpu_dma_unmap_free(vm, &gr_ctx->pagepool_ctxsw_buffer);

View File

@@ -1120,12 +1120,6 @@ int gr_gp10b_init_ctxsw_preemption_mode(struct gk20a *g,
return 0;
}
void gr_gp10b_dump_ctxsw_stats(struct gk20a *g, struct vm_gk20a *vm,
struct nvgpu_gr_ctx *gr_ctx)
{
g->ops.gr.ctxsw_prog.dump_ctxsw_stats(g, &gr_ctx->mem);
}
void gr_gp10b_update_ctxsw_preemption_mode(struct gk20a *g,
struct nvgpu_gr_ctx *gr_ctx, struct nvgpu_mem *ctxheader)
{

View File

@@ -1,7 +1,7 @@
/*
* GP10B GPU GR
*
* Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2015-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -107,8 +107,6 @@ void gr_gp10b_update_ctxsw_preemption_mode(struct gk20a *g,
struct nvgpu_gr_ctx *gr_ctx, struct nvgpu_mem *ctxheader);
int gr_gp10b_dump_gr_status_regs(struct gk20a *g,
struct gk20a_debug_output *o);
void gr_gp10b_dump_ctxsw_stats(struct gk20a *g, struct vm_gk20a *vm,
struct nvgpu_gr_ctx *gr_ctx);
int gr_gp10b_wait_empty(struct gk20a *g, unsigned long duration_ms,
u32 expect_delay);
void gr_gp10b_commit_global_attrib_cb(struct gk20a *g,

View File

@@ -353,7 +353,6 @@ static const struct gpu_ops gp10b_ops = {
gr_gp10b_init_gfxp_wfi_timeout_count,
.get_max_gfxp_wfi_timeout_count =
gr_gp10b_get_max_gfxp_wfi_timeout_count,
.dump_ctxsw_stats = gr_gp10b_dump_ctxsw_stats,
.fecs_host_int_enable = gr_gk20a_fecs_host_int_enable,
.handle_notify_pending = gk20a_gr_handle_notify_pending,
.handle_semaphore_pending = gk20a_gr_handle_semaphore_pending,

View File

@@ -431,7 +431,6 @@ static const struct gpu_ops gv11b_ops = {
.get_max_gfxp_wfi_timeout_count =
gr_gv11b_get_max_gfxp_wfi_timeout_count,
.ecc_init_scrub_reg = gr_gv11b_ecc_init_scrub_reg,
.dump_ctxsw_stats = gr_gp10b_dump_ctxsw_stats,
.fecs_host_int_enable = gr_gv11b_fecs_host_int_enable,
.handle_ssync_hww = gr_gv11b_handle_ssync_hww,
.handle_notify_pending = gk20a_gr_handle_notify_pending,

View File

@@ -484,8 +484,6 @@ struct gpu_ops {
void (*ecc_init_scrub_reg)(struct gk20a *g);
u32 (*get_gpcs_swdx_dss_zbc_c_format_reg)(struct gk20a *g);
u32 (*get_gpcs_swdx_dss_zbc_z_format_reg)(struct gk20a *g);
void (*dump_ctxsw_stats)(struct gk20a *g, struct vm_gk20a *vm,
struct nvgpu_gr_ctx *gr_ctx);
void (*fecs_host_int_enable)(struct gk20a *g);
int (*handle_ssync_hww)(struct gk20a *g);
int (*handle_notify_pending)(struct gk20a *g,

View File

@@ -500,7 +500,6 @@ static const struct gpu_ops tu104_ops = {
gr_gv11b_init_gfxp_wfi_timeout_count,
.get_max_gfxp_wfi_timeout_count =
gr_gv11b_get_max_gfxp_wfi_timeout_count,
.dump_ctxsw_stats = gr_gp10b_dump_ctxsw_stats,
.fecs_host_int_enable = gr_gv11b_fecs_host_int_enable,
.handle_ssync_hww = gr_gv11b_handle_ssync_hww,
.handle_notify_pending = gk20a_gr_handle_notify_pending,