gpu: nvgpu: move gk20a_gr_suspend to common.gr.init

Move gk20a_gr_suspend function from gr_gk20a.c to common.gr.init as
nvgpu_gr_suspend function.
Update the file that use gk20a_gr_suspend function.

JIRA NVGPU-1885

Change-Id: I1eb27d644428cf7c637f7a330762a87e6e788d08
Signed-off-by: Vinod G <vinodg@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2083110
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: Seshendra Gadagottu <sgadagottu@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Vinod G
2019-03-27 12:25:05 -07:00
committed by mobile promotions
parent a9c97031b5
commit e086c6442d
5 changed files with 31 additions and 31 deletions

View File

@@ -79,6 +79,34 @@ static void gr_load_tpc_mask(struct gk20a *g)
g->ops.gr.init.tpc_mask(g, 0, pes_tpc_mask);
}
int nvgpu_gr_suspend(struct gk20a *g)
{
int ret = 0;
nvgpu_log_fn(g, " ");
ret = g->ops.gr.init.wait_empty(g);
if (ret != 0) {
return ret;
}
/* Disable fifo access */
g->ops.gr.init.fifo_access(g, false);
/* disable gr intr */
g->ops.gr.intr.enable_interrupts(g, false);
/* disable all exceptions */
g->ops.gr.intr.enable_exceptions(g, g->gr.config, false);
nvgpu_gr_flush_channel_tlb(g);
g->gr.initialized = false;
nvgpu_log_fn(g, "done");
return ret;
}
/* invalidate channel lookup tlb */
void nvgpu_gr_flush_channel_tlb(struct gk20a *g)
{

View File

@@ -42,6 +42,7 @@
#include <nvgpu/gk20a.h>
#include <nvgpu/channel_sync.h>
#include <nvgpu/pmu/pstate.h>
#include <nvgpu/gr/gr.h>
#include <trace/events/gk20a.h>
@@ -97,7 +98,7 @@ int gk20a_prepare_poweroff(struct gk20a *g)
ret |= nvgpu_sec2_destroy(g);
}
ret |= gk20a_gr_suspend(g);
ret |= nvgpu_gr_suspend(g);
ret |= nvgpu_mm_suspend(g);
ret |= gk20a_fifo_suspend(g);

View File

@@ -3608,34 +3608,6 @@ int gr_gk20a_fecs_set_reglist_virtual_addr(struct gk20a *g, u64 pmu_va)
.mailbox.fail = 0U}, false);
}
int gk20a_gr_suspend(struct gk20a *g)
{
int ret = 0;
nvgpu_log_fn(g, " ");
ret = g->ops.gr.init.wait_empty(g);
if (ret != 0) {
return ret;
}
/* Disable fifo access */
g->ops.gr.init.fifo_access(g, false);
/* disable gr intr */
g->ops.gr.intr.enable_interrupts(g, false);
/* disable all exceptions */
g->ops.gr.intr.enable_exceptions(g, g->gr.config, false);
nvgpu_gr_flush_channel_tlb(g);
g->gr.initialized = false;
nvgpu_log_fn(g, "done");
return ret;
}
static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g,
u32 addr,
bool is_quad, u32 quad,

View File

@@ -319,8 +319,6 @@ void gr_gk20a_init_cg_mode(struct gk20a *g, u32 cgmode, u32 mode_config);
bool gk20a_gr_sm_debugger_attached(struct gk20a *g);
u32 gk20a_gr_get_sm_no_lock_down_hww_global_esr_mask(struct gk20a *g);
int gk20a_gr_suspend(struct gk20a *g);
struct nvgpu_dbg_reg_op;
int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
struct nvgpu_dbg_reg_op *ctx_ops, u32 num_ops,

View File

@@ -28,6 +28,7 @@
#define NVGPU_GR_IDLE_CHECK_DEFAULT_US 10U
#define NVGPU_GR_IDLE_CHECK_MAX_US 200U
int nvgpu_gr_suspend(struct gk20a *g);
void nvgpu_gr_flush_channel_tlb(struct gk20a *g);
u32 nvgpu_gr_get_idle_timeout(struct gk20a *g);
int nvgpu_gr_init_fs_state(struct gk20a *g);