gpu: nvgpu: add hal.gr.init hal to enable/disable fe_go_idle timeout

Add new hal operation g->ops.gr.init.fe_go_idle_timeout() in hal.gr.init
unit to enable/disable fe_go_idle timeout

Use this hal in gr_gk20a_init_golden_ctx_image() instead of direct
register access

Remove timeout disable/enable code in gk20a_init_sw_bundle() since
parent API gr_gk20a_init_golden_ctx_image() is already taking care of
that

Jira NVGPU-2961

Change-Id: Ice72699059f031ca0b1994fa57661716a6c66cd2
Signed-off-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2072550
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Deepak Nibade
2019-03-13 19:34:00 +05:30
committed by mobile promotions
parent 15d8941341
commit 04786d1a2e
9 changed files with 31 additions and 25 deletions

View File

@@ -998,9 +998,6 @@ int gk20a_init_sw_bundle(struct gk20a *g)
int err = 0;
unsigned int i;
/* disable fe_go_idle */
gk20a_writel(g, gr_fe_go_idle_timeout_r(),
gr_fe_go_idle_timeout_count_disabled_f());
/* enable pipe mode override */
gk20a_writel(g, gr_pipe_bundle_config_r(),
gr_pipe_bundle_config_override_pipe_mode_enabled_f());
@@ -1050,10 +1047,6 @@ int gk20a_init_sw_bundle(struct gk20a *g)
err = g->ops.gr.init.wait_idle(g);
/* restore fe_go_idle */
gk20a_writel(g, gr_fe_go_idle_timeout_r(),
gr_fe_go_idle_timeout_count_prod_f());
return err;
error:
@@ -1061,10 +1054,6 @@ error:
gk20a_writel(g, gr_pipe_bundle_config_r(),
gr_pipe_bundle_config_override_pipe_mode_disabled_f());
/* restore fe_go_idle */
gk20a_writel(g, gr_fe_go_idle_timeout_r(),
gr_fe_go_idle_timeout_count_prod_f());
return err;
}
@@ -1137,8 +1126,7 @@ int gr_gk20a_init_golden_ctx_image(struct gk20a *g,
}
/* disable fe_go_idle */
gk20a_writel(g, gr_fe_go_idle_timeout_r(),
gr_fe_go_idle_timeout_count_disabled_f());
g->ops.gr.init.fe_go_idle_timeout(g, false);
err = g->ops.gr.commit_global_ctx_buffers(g, gr_ctx, false);
if (err != 0) {
@@ -1166,8 +1154,7 @@ int gr_gk20a_init_golden_ctx_image(struct gk20a *g,
restore_fe_go_idle:
/* restore fe_go_idle */
gk20a_writel(g, gr_fe_go_idle_timeout_r(),
gr_fe_go_idle_timeout_count_prod_f());
g->ops.gr.init.fe_go_idle_timeout(g, true);
if ((err != 0) || (g->ops.gr.init.wait_idle(g) != 0)) {
goto clean_up;

View File

@@ -424,15 +424,16 @@ static const struct gpu_ops gm20b_ops = {
.get_gpcs_swdx_dss_zbc_z_format_reg = NULL,
},
.init = {
.pd_tpc_per_gpc = gm20b_gr_init_pd_tpc_per_gpc,
.pd_skip_table_gpc = gm20b_gr_init_pd_skip_table_gpc,
.cwd_gpcs_tpcs_num = gm20b_gr_init_cwd_gpcs_tpcs_num,
.wait_idle = gm20b_gr_init_wait_idle,
.wait_fe_idle = gm20b_gr_init_wait_fe_idle,
.fe_pwr_mode_force_on =
gm20b_gr_init_fe_pwr_mode_force_on,
.override_context_reset =
gm20b_gr_init_override_context_reset,
.wait_idle = gm20b_gr_init_wait_idle,
.wait_fe_idle = gm20b_gr_init_wait_fe_idle,
.pd_tpc_per_gpc = gm20b_gr_init_pd_tpc_per_gpc,
.pd_skip_table_gpc = gm20b_gr_init_pd_skip_table_gpc,
.cwd_gpcs_tpcs_num = gm20b_gr_init_cwd_gpcs_tpcs_num,
.fe_go_idle_timeout = gm20b_gr_init_fe_go_idle_timeout,
},
},
.fb = {

View File

@@ -505,6 +505,7 @@ static const struct gpu_ops gp10b_ops = {
.override_context_reset =
gm20b_gr_init_override_context_reset,
.preemption_state = gp10b_gr_init_preemption_state,
.fe_go_idle_timeout = gm20b_gr_init_fe_go_idle_timeout,
},
},
.fb = {

View File

@@ -629,15 +629,16 @@ static const struct gpu_ops gv100_ops = {
gv100_gr_hwpm_map_get_active_fbpa_mask,
},
.init = {
.pd_tpc_per_gpc = gm20b_gr_init_pd_tpc_per_gpc,
.pd_skip_table_gpc = gm20b_gr_init_pd_skip_table_gpc,
.cwd_gpcs_tpcs_num = gm20b_gr_init_cwd_gpcs_tpcs_num,
.wait_idle = gm20b_gr_init_wait_idle,
.wait_fe_idle = gm20b_gr_init_wait_fe_idle,
.fe_pwr_mode_force_on =
gm20b_gr_init_fe_pwr_mode_force_on,
.override_context_reset =
gm20b_gr_init_override_context_reset,
.wait_idle = gm20b_gr_init_wait_idle,
.wait_fe_idle = gm20b_gr_init_wait_fe_idle,
.pd_tpc_per_gpc = gm20b_gr_init_pd_tpc_per_gpc,
.pd_skip_table_gpc = gm20b_gr_init_pd_skip_table_gpc,
.cwd_gpcs_tpcs_num = gm20b_gr_init_cwd_gpcs_tpcs_num,
.fe_go_idle_timeout = gm20b_gr_init_fe_go_idle_timeout,
},
},
.fb = {

View File

@@ -598,6 +598,7 @@ static const struct gpu_ops gv11b_ops = {
.override_context_reset =
gm20b_gr_init_override_context_reset,
.preemption_state = gv11b_gr_init_preemption_state,
.fe_go_idle_timeout = gm20b_gr_init_fe_go_idle_timeout,
},
},
.fb = {

View File

@@ -269,3 +269,14 @@ void gm20b_gr_init_override_context_reset(struct gk20a *g)
nvgpu_udelay(FECS_CTXSW_RESET_DELAY_US);
(void) nvgpu_readl(g, gr_fecs_ctxsw_reset_ctl_r());
}
void gm20b_gr_init_fe_go_idle_timeout(struct gk20a *g, bool enable)
{
if (enable) {
nvgpu_writel(g, gr_fe_go_idle_timeout_r(),
gr_fe_go_idle_timeout_count_prod_f());
} else {
nvgpu_writel(g, gr_fe_go_idle_timeout_r(),
gr_fe_go_idle_timeout_count_disabled_f());
}
}

View File

@@ -35,5 +35,6 @@ int gm20b_gr_init_wait_idle(struct gk20a *g);
int gm20b_gr_init_wait_fe_idle(struct gk20a *g);
int gm20b_gr_init_fe_pwr_mode_force_on(struct gk20a *g, bool force_on);
void gm20b_gr_init_override_context_reset(struct gk20a *g);
void gm20b_gr_init_fe_go_idle_timeout(struct gk20a *g, bool enable);
#endif /* NVGPU_GR_INIT_GM20B_H */

View File

@@ -684,6 +684,8 @@ struct gpu_ops {
int (*preemption_state)(struct gk20a *g,
u32 gfxp_wfi_timeout_count,
bool gfxp_wfi_timeout_unit_usec);
void (*fe_go_idle_timeout)(struct gk20a *g,
bool enable);
} init;
u32 (*fecs_falcon_base_addr)(void);

View File

@@ -666,6 +666,7 @@ static const struct gpu_ops tu104_ops = {
.override_context_reset =
gm20b_gr_init_override_context_reset,
.preemption_state = gv11b_gr_init_preemption_state,
.fe_go_idle_timeout = gm20b_gr_init_fe_go_idle_timeout,
},
},
.fb = {