gpu: nvgpu: disable graphics specific init functions in MIG mode

MIG mode does not support graphics, ELPG, and use cases like TPC
floorsweeping. Skip all such initialization functions in common.gr
unit if MIG mode is enabled.

Set can_elpg to false if MIG mode is enabled.

Jira NVGPU-5648

Change-Id: I03656dc6289e49a21ec7783430db9c8564c6bf1f
Signed-off-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2411741
Reviewed-by: automaticguardword <automaticguardword@nvidia.com>
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com>
Reviewed-by: Lakshmanan M <lm@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
GVS: Gerrit_Virtual_Submit
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Deepak Nibade
2020-09-10 19:58:35 +05:30
committed by Alex Waterman
parent 7a937a6190
commit 6a69ea235e
6 changed files with 94 additions and 79 deletions

View File

@@ -73,6 +73,7 @@ static void gr_load_tpc_mask(struct gk20a *g, struct nvgpu_gr_config *config)
nvgpu_log_info(g, "pes_tpc_mask %u\n", pes_tpc_mask); nvgpu_log_info(g, "pes_tpc_mask %u\n", pes_tpc_mask);
#ifdef CONFIG_NVGPU_NON_FUSA #ifdef CONFIG_NVGPU_NON_FUSA
if (!nvgpu_is_enabled(g, NVGPU_SUPPORT_MIG)) {
fuse_tpc_mask = g->ops.gr.config.get_gpc_tpc_mask(g, config, 0); fuse_tpc_mask = g->ops.gr.config.get_gpc_tpc_mask(g, config, 0);
if ((g->tpc_fs_mask_user != 0U) && if ((g->tpc_fs_mask_user != 0U) &&
(g->tpc_fs_mask_user != fuse_tpc_mask)) { (g->tpc_fs_mask_user != fuse_tpc_mask)) {
@@ -88,6 +89,7 @@ static void gr_load_tpc_mask(struct gk20a *g, struct nvgpu_gr_config *config)
pes_tpc_mask = val; pes_tpc_mask = val;
} }
} }
}
#endif #endif
g->ops.gr.init.tpc_mask(g, 0, pes_tpc_mask); g->ops.gr.init.tpc_mask(g, 0, pes_tpc_mask);
@@ -130,8 +132,10 @@ int nvgpu_gr_fs_state_init(struct gk20a *g, struct nvgpu_gr_config *config)
g->ops.gr.init.pd_tpc_per_gpc(g, config); g->ops.gr.init.pd_tpc_per_gpc(g, config);
#ifdef CONFIG_NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
if (!nvgpu_is_enabled(g, NVGPU_SUPPORT_MIG)) {
/* gr__setup_pd_mapping */ /* gr__setup_pd_mapping */
g->ops.gr.init.rop_mapping(g, config); g->ops.gr.init.rop_mapping(g, config);
}
#endif #endif
g->ops.gr.init.pd_skip_table_gpc(g, config); g->ops.gr.init.pd_skip_table_gpc(g, config);
@@ -140,6 +144,7 @@ int nvgpu_gr_fs_state_init(struct gk20a *g, struct nvgpu_gr_config *config)
tpc_cnt = nvgpu_gr_config_get_tpc_count(config); tpc_cnt = nvgpu_gr_config_get_tpc_count(config);
#ifdef CONFIG_NVGPU_NON_FUSA #ifdef CONFIG_NVGPU_NON_FUSA
if (!nvgpu_is_enabled(g, NVGPU_SUPPORT_MIG)) {
fuse_tpc_mask = g->ops.gr.config.get_gpc_tpc_mask(g, config, 0); fuse_tpc_mask = g->ops.gr.config.get_gpc_tpc_mask(g, config, 0);
max_tpc_cnt = nvgpu_gr_config_get_max_tpc_count(config); max_tpc_cnt = nvgpu_gr_config_get_max_tpc_count(config);
@@ -150,6 +155,7 @@ int nvgpu_gr_fs_state_init(struct gk20a *g, struct nvgpu_gr_config *config)
val &= nvgpu_safe_sub_u32(BIT32(max_tpc_cnt), U32(1)); val &= nvgpu_safe_sub_u32(BIT32(max_tpc_cnt), U32(1));
tpc_cnt = (u32)hweight32(val); tpc_cnt = (u32)hweight32(val);
} }
}
#endif #endif
g->ops.gr.init.cwd_gpcs_tpcs_num(g, gpc_cnt, tpc_cnt); g->ops.gr.init.cwd_gpcs_tpcs_num(g, gpc_cnt, tpc_cnt);

View File

@@ -251,17 +251,24 @@ static int gr_init_setup_hw(struct gk20a *g, struct nvgpu_gr *gr)
} }
#ifdef CONFIG_NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
if (!nvgpu_is_enabled(g, NVGPU_SUPPORT_MIG)) {
err = nvgpu_gr_zcull_init_hw(g, gr->zcull, gr->config); err = nvgpu_gr_zcull_init_hw(g, gr->zcull, gr->config);
if (err != 0) { if (err != 0) {
goto out; goto out;
} }
#endif /* CONFIG_NVGPU_GRAPHICS */
#ifdef CONFIG_NVGPU_GRAPHICS
err = nvgpu_gr_zbc_load_table(g, gr->zbc); err = nvgpu_gr_zbc_load_table(g, gr->zbc);
if (err != 0) { if (err != 0) {
goto out; goto out;
} }
if (g->ops.gr.init.preemption_state != NULL) {
err = g->ops.gr.init.preemption_state(g);
if (err != 0) {
goto out;
}
}
}
#endif /* CONFIG_NVGPU_GRAPHICS */ #endif /* CONFIG_NVGPU_GRAPHICS */
/* /*
@@ -274,15 +281,6 @@ static int gr_init_setup_hw(struct gk20a *g, struct nvgpu_gr *gr)
g->ops.gr.init.lg_coalesce(g, 0); g->ops.gr.init.lg_coalesce(g, 0);
} }
#ifdef CONFIG_NVGPU_GRAPHICS
if (g->ops.gr.init.preemption_state != NULL) {
err = g->ops.gr.init.preemption_state(g);
if (err != 0) {
goto out;
}
}
#endif
/* floorsweep anything left */ /* floorsweep anything left */
err = nvgpu_gr_fs_state_init(g, gr->config); err = nvgpu_gr_fs_state_init(g, gr->config);
if (err != 0) { if (err != 0) {
@@ -488,7 +486,13 @@ static int gr_init_setup_sw(struct gk20a *g, struct nvgpu_gr *gr)
} }
#endif #endif
err = gr_init_ctx_bufs(g, gr);
if (err != 0) {
goto clean_up;
}
#ifdef CONFIG_NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
if (!nvgpu_is_enabled(g, NVGPU_SUPPORT_MIG)) {
err = nvgpu_gr_config_init_map_tiles(g, gr->config); err = nvgpu_gr_config_init_map_tiles(g, gr->config);
if (err != 0) { if (err != 0) {
goto clean_up; goto clean_up;
@@ -500,18 +504,12 @@ static int gr_init_setup_sw(struct gk20a *g, struct nvgpu_gr *gr)
if (err != 0) { if (err != 0) {
goto clean_up; goto clean_up;
} }
#endif /* CONFIG_NVGPU_GRAPHICS */
err = gr_init_ctx_bufs(g, gr);
if (err != 0) {
goto clean_up;
}
#ifdef CONFIG_NVGPU_GRAPHICS
err = nvgpu_gr_zbc_init(g, &gr->zbc); err = nvgpu_gr_zbc_init(g, &gr->zbc);
if (err != 0) { if (err != 0) {
goto clean_up; goto clean_up;
} }
}
#endif /* CONFIG_NVGPU_GRAPHICS */ #endif /* CONFIG_NVGPU_GRAPHICS */
gr->remove_support = gr_remove_support; gr->remove_support = gr_remove_support;

View File

@@ -234,10 +234,12 @@ static bool gr_config_alloc_struct_mem(struct gk20a *g,
gpc_size = nvgpu_safe_mult_u64((size_t)config->gpc_count, sizeof(u32)); gpc_size = nvgpu_safe_mult_u64((size_t)config->gpc_count, sizeof(u32));
config->gpc_tpc_count = nvgpu_kzalloc(g, gpc_size); config->gpc_tpc_count = nvgpu_kzalloc(g, gpc_size);
#ifdef CONFIG_NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
if (!nvgpu_is_enabled(g, NVGPU_SUPPORT_MIG)) {
config->max_zcull_per_gpc_count = nvgpu_get_litter_value(g, config->max_zcull_per_gpc_count = nvgpu_get_litter_value(g,
GPU_LIT_NUM_ZCULL_BANKS); GPU_LIT_NUM_ZCULL_BANKS);
config->gpc_zcb_count = nvgpu_kzalloc(g, gpc_size); config->gpc_zcb_count = nvgpu_kzalloc(g, gpc_size);
}
#endif #endif
config->gpc_ppc_count = nvgpu_kzalloc(g, gpc_size); config->gpc_ppc_count = nvgpu_kzalloc(g, gpc_size);
@@ -403,11 +405,13 @@ struct nvgpu_gr_config *nvgpu_gr_config_init(struct gk20a *g)
config->gpc_tpc_count[gpc_index]); config->gpc_tpc_count[gpc_index]);
#ifdef CONFIG_NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
if (!nvgpu_is_enabled(g, NVGPU_SUPPORT_MIG)) {
config->gpc_zcb_count[gpc_index] = config->gpc_zcb_count[gpc_index] =
g->ops.gr.config.get_zcull_count_in_gpc(g, config, g->ops.gr.config.get_zcull_count_in_gpc(g, config,
gpc_index); gpc_index);
config->zcb_count = nvgpu_safe_add_u32(config->zcb_count, config->zcb_count = nvgpu_safe_add_u32(config->zcb_count,
config->gpc_zcb_count[gpc_index]); config->gpc_zcb_count[gpc_index]);
}
#endif #endif
gr_config_init_pes_tpc(g, config, gpc_index); gr_config_init_pes_tpc(g, config, gpc_index);

View File

@@ -576,6 +576,7 @@ defined(CONFIG_NVGPU_CTXSW_FW_ERROR_CODE_TESTING)
#endif #endif
#ifdef CONFIG_NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
if (!nvgpu_is_enabled(g, NVGPU_SUPPORT_MIG)) {
ret = g->ops.gr.falcon.ctrl_ctxsw(g, ret = g->ops.gr.falcon.ctrl_ctxsw(g,
NVGPU_GR_FALCON_METHOD_CTXSW_DISCOVER_ZCULL_IMAGE_SIZE, NVGPU_GR_FALCON_METHOD_CTXSW_DISCOVER_ZCULL_IMAGE_SIZE,
0, &sizes->zcull_image_size); 0, &sizes->zcull_image_size);
@@ -584,6 +585,7 @@ defined(CONFIG_NVGPU_CTXSW_FW_ERROR_CODE_TESTING)
"query zcull ctx image size failed"); "query zcull ctx image size failed");
return ret; return ret;
} }
}
nvgpu_log(g, gpu_dbg_gr, "ZCULL image size = %u", sizes->zcull_image_size); nvgpu_log(g, gpu_dbg_gr, "ZCULL image size = %u", sizes->zcull_image_size);
#endif #endif

View File

@@ -43,6 +43,7 @@ int gp10b_gr_falcon_init_ctx_state(struct gk20a *g,
return err; return err;
} }
if (!nvgpu_is_enabled(g, NVGPU_SUPPORT_MIG)) {
err = g->ops.gr.falcon.ctrl_ctxsw(g, err = g->ops.gr.falcon.ctrl_ctxsw(g,
NVGPU_GR_FALCON_METHOD_PREEMPT_IMAGE_SIZE, 0U, NVGPU_GR_FALCON_METHOD_PREEMPT_IMAGE_SIZE, 0U,
&sizes->preempt_image_size); &sizes->preempt_image_size);
@@ -50,6 +51,7 @@ int gp10b_gr_falcon_init_ctx_state(struct gk20a *g,
nvgpu_err(g, "query preempt image size failed"); nvgpu_err(g, "query preempt image size failed");
return err; return err;
} }
}
nvgpu_log(g, gpu_dbg_gr, "Preempt image size = %u", sizes->preempt_image_size); nvgpu_log(g, gpu_dbg_gr, "Preempt image size = %u", sizes->preempt_image_size);
#endif #endif

View File

@@ -205,6 +205,9 @@ static void nvgpu_init_pm_vars(struct gk20a *g)
nvgpu_platform_is_silicon(g) ? platform->enable_mscg : false; nvgpu_platform_is_silicon(g) ? platform->enable_mscg : false;
g->can_elpg = g->can_elpg =
nvgpu_platform_is_silicon(g) ? platform->can_elpg_init : false; nvgpu_platform_is_silicon(g) ? platform->can_elpg_init : false;
if (nvgpu_is_enabled(g, NVGPU_SUPPORT_MIG)) {
g->can_elpg = false;
}
nvgpu_set_enabled(g, NVGPU_PMU_PERFMON, platform->enable_perfmon); nvgpu_set_enabled(g, NVGPU_PMU_PERFMON, platform->enable_perfmon);
} }