gpu: nvgpu: gr/init update

move gr_gk20a_init_fs_state function to common/gr/init as
nvgpu_gr_init_fs_state.

JIRA NVGPU-1885

Change-Id: I37aad483be268e2b722883719376beb142c0b7ea
Signed-off-by: Vinod G <vinodg@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2072413
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Vinod G
2019-03-13 23:00:17 -07:00
committed by mobile promotions
parent e29c1a6c03
commit 43672dd237
6 changed files with 55 additions and 55 deletions

View File

@@ -30,3 +30,55 @@ u32 nvgpu_gr_get_idle_timeout(struct gk20a *g)
g->gr_idle_timeout_default : UINT_MAX;
}
int nvgpu_gr_init_fs_state(struct gk20a *g)
{
u32 tpc_index, gpc_index;
u32 sm_id = 0;
u32 fuse_tpc_mask;
u32 gpc_cnt, tpc_cnt, max_tpc_cnt;
int err = 0;
struct nvgpu_gr_config *gr_config = g->gr.config;
nvgpu_log_fn(g, " ");
if (g->ops.gr.init_sm_id_table != NULL) {
err = g->ops.gr.init_sm_id_table(g);
if (err != 0) {
return err;
}
/* Is table empty ? */
if (g->gr.no_of_sm == 0U) {
return -EINVAL;
}
}
for (sm_id = 0; sm_id < g->gr.no_of_sm; sm_id++) {
tpc_index = g->gr.sm_to_cluster[sm_id].tpc_index;
gpc_index = g->gr.sm_to_cluster[sm_id].gpc_index;
g->ops.gr.program_sm_id_numbering(g, gpc_index, tpc_index, sm_id);
}
g->ops.gr.init.pd_tpc_per_gpc(g);
/* gr__setup_pd_mapping */
g->ops.gr.setup_rop_mapping(g, &g->gr);
g->ops.gr.init.pd_skip_table_gpc(g);
fuse_tpc_mask = g->ops.gr.config.get_gpc_tpc_mask(g, gr_config, 0);
gpc_cnt = nvgpu_gr_config_get_gpc_count(gr_config);
tpc_cnt = nvgpu_gr_config_get_tpc_count(gr_config);
max_tpc_cnt = nvgpu_gr_config_get_max_tpc_count(gr_config);
if ((g->tpc_fs_mask_user != 0U) &&
(fuse_tpc_mask == BIT32(max_tpc_cnt) - 1U)) {
u32 val = g->tpc_fs_mask_user;
val &= BIT32(max_tpc_cnt) - U32(1);
tpc_cnt = (u32)hweight32(val);
}
g->ops.gr.init.cwd_gpcs_tpcs_num(g, gpc_cnt, tpc_cnt);
return err;
}

View File

@@ -914,58 +914,6 @@ int gr_gk20a_init_sm_id_table(struct gk20a *g)
return 0;
}
int gr_gk20a_init_fs_state(struct gk20a *g)
{
struct gr_gk20a *gr = &g->gr;
u32 tpc_index, gpc_index;
u32 sm_id = 0;
u32 fuse_tpc_mask;
u32 gpc_cnt, tpc_cnt;
int err = 0;
nvgpu_log_fn(g, " ");
if (g->ops.gr.init_sm_id_table != NULL) {
err = g->ops.gr.init_sm_id_table(g);
if (err != 0) {
return err;
}
/* Is table empty ? */
if (g->gr.no_of_sm == 0U) {
return -EINVAL;
}
}
for (sm_id = 0; sm_id < g->gr.no_of_sm; sm_id++) {
tpc_index = g->gr.sm_to_cluster[sm_id].tpc_index;
gpc_index = g->gr.sm_to_cluster[sm_id].gpc_index;
g->ops.gr.program_sm_id_numbering(g, gpc_index, tpc_index, sm_id);
}
g->ops.gr.init.pd_tpc_per_gpc(g);
/* gr__setup_pd_mapping stubbed for gk20a */
g->ops.gr.setup_rop_mapping(g, gr);
g->ops.gr.init.pd_skip_table_gpc(g);
fuse_tpc_mask = g->ops.gr.config.get_gpc_tpc_mask(g, gr->config, 0);
gpc_cnt = nvgpu_gr_config_get_gpc_count(gr->config);
tpc_cnt = nvgpu_gr_config_get_tpc_count(gr->config);
if ((g->tpc_fs_mask_user != 0U) &&
(fuse_tpc_mask == BIT32(nvgpu_gr_config_get_max_tpc_count(gr->config)) - 1U)) {
u32 val = g->tpc_fs_mask_user;
val &= BIT32(nvgpu_gr_config_get_max_tpc_count(gr->config)) - U32(1);
tpc_cnt = (u32)hweight32(val);
}
g->ops.gr.init.cwd_gpcs_tpcs_num(g, gpc_cnt, tpc_cnt);
return err;
}
int gr_gk20a_fecs_ctx_image_save(struct channel_gk20a *c, u32 save_type)
{
struct gk20a *g = c->g;

View File

@@ -433,7 +433,6 @@ void gr_gk20a_commit_global_pagepool(struct gk20a *g,
u64 addr, u32 size, bool patch);
void gk20a_gr_set_shader_exceptions(struct gk20a *g, u32 data);
void gr_gk20a_enable_hww_exceptions(struct gk20a *g);
int gr_gk20a_init_fs_state(struct gk20a *g);
int gr_gk20a_setup_rop_mapping(struct gk20a *g, struct gr_gk20a *gr);
int gr_gk20a_init_ctxsw_ucode(struct gk20a *g);
int gr_gk20a_load_ctxsw_ucode(struct gk20a *g);

View File

@@ -677,7 +677,7 @@ int gr_gm20b_init_fs_state(struct gk20a *g)
nvgpu_log_fn(g, " ");
err = gr_gk20a_init_fs_state(g);
err = nvgpu_gr_init_fs_state(g);
if (err != 0) {
return err;
}

View File

@@ -2923,7 +2923,7 @@ int gr_gv11b_init_fs_state(struct gk20a *g)
gr_debug_0_scg_force_slow_drain_tpc_enabled_f());
gk20a_writel(g, gr_debug_0_r(), data);
err = gr_gk20a_init_fs_state(g);
err = nvgpu_gr_init_fs_state(g);
if (err != 0) {
return err;
}

View File

@@ -29,5 +29,6 @@
#define NVGPU_GR_IDLE_CHECK_MAX_US 200U
u32 nvgpu_gr_get_idle_timeout(struct gk20a *g);
int nvgpu_gr_init_fs_state(struct gk20a *g);
#endif /* NVGPU_GR_H */