gpu: nvgpu: disable graphics specific init functions in MIG mode

MIG mode does not support graphics, ELPG, and use cases like TPC
floorsweeping. Skip all such initialization functions in common.gr
unit if MIG mode is enabled.

Set can_elpg to false if MIG mode is enabled.

Jira NVGPU-5648

Change-Id: I03656dc6289e49a21ec7783430db9c8564c6bf1f
Signed-off-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2411741
Reviewed-by: automaticguardword <automaticguardword@nvidia.com>
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com>
Reviewed-by: Lakshmanan M <lm@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
GVS: Gerrit_Virtual_Submit
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Deepak Nibade
2020-09-10 19:58:35 +05:30
committed by Alex Waterman
parent 7a937a6190
commit 6a69ea235e
6 changed files with 94 additions and 79 deletions

View File

@@ -73,19 +73,21 @@ static void gr_load_tpc_mask(struct gk20a *g, struct nvgpu_gr_config *config)
nvgpu_log_info(g, "pes_tpc_mask %u\n", pes_tpc_mask); nvgpu_log_info(g, "pes_tpc_mask %u\n", pes_tpc_mask);
#ifdef CONFIG_NVGPU_NON_FUSA #ifdef CONFIG_NVGPU_NON_FUSA
fuse_tpc_mask = g->ops.gr.config.get_gpc_tpc_mask(g, config, 0); if (!nvgpu_is_enabled(g, NVGPU_SUPPORT_MIG)) {
if ((g->tpc_fs_mask_user != 0U) && fuse_tpc_mask = g->ops.gr.config.get_gpc_tpc_mask(g, config, 0);
(g->tpc_fs_mask_user != fuse_tpc_mask)) { if ((g->tpc_fs_mask_user != 0U) &&
if (fuse_tpc_mask == nvgpu_safe_sub_u32(BIT32(max_tpc_count), (g->tpc_fs_mask_user != fuse_tpc_mask)) {
U32(1))) { if (fuse_tpc_mask == nvgpu_safe_sub_u32(BIT32(max_tpc_count),
val = g->tpc_fs_mask_user; U32(1))) {
val &= nvgpu_safe_sub_u32(BIT32(max_tpc_count), U32(1)); val = g->tpc_fs_mask_user;
/* val &= nvgpu_safe_sub_u32(BIT32(max_tpc_count), U32(1));
* skip tpc to disable the other tpc cause channel /*
* timeout * skip tpc to disable the other tpc cause channel
*/ * timeout
val = nvgpu_safe_sub_u32(BIT32(hweight32(val)), U32(1)); */
pes_tpc_mask = val; val = nvgpu_safe_sub_u32(BIT32(hweight32(val)), U32(1));
pes_tpc_mask = val;
}
} }
} }
#endif #endif
@@ -130,8 +132,10 @@ int nvgpu_gr_fs_state_init(struct gk20a *g, struct nvgpu_gr_config *config)
g->ops.gr.init.pd_tpc_per_gpc(g, config); g->ops.gr.init.pd_tpc_per_gpc(g, config);
#ifdef CONFIG_NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
/* gr__setup_pd_mapping */ if (!nvgpu_is_enabled(g, NVGPU_SUPPORT_MIG)) {
g->ops.gr.init.rop_mapping(g, config); /* gr__setup_pd_mapping */
g->ops.gr.init.rop_mapping(g, config);
}
#endif #endif
g->ops.gr.init.pd_skip_table_gpc(g, config); g->ops.gr.init.pd_skip_table_gpc(g, config);
@@ -140,15 +144,17 @@ int nvgpu_gr_fs_state_init(struct gk20a *g, struct nvgpu_gr_config *config)
tpc_cnt = nvgpu_gr_config_get_tpc_count(config); tpc_cnt = nvgpu_gr_config_get_tpc_count(config);
#ifdef CONFIG_NVGPU_NON_FUSA #ifdef CONFIG_NVGPU_NON_FUSA
fuse_tpc_mask = g->ops.gr.config.get_gpc_tpc_mask(g, config, 0); if (!nvgpu_is_enabled(g, NVGPU_SUPPORT_MIG)) {
max_tpc_cnt = nvgpu_gr_config_get_max_tpc_count(config); fuse_tpc_mask = g->ops.gr.config.get_gpc_tpc_mask(g, config, 0);
max_tpc_cnt = nvgpu_gr_config_get_max_tpc_count(config);
if ((g->tpc_fs_mask_user != 0U) && if ((g->tpc_fs_mask_user != 0U) &&
(fuse_tpc_mask == (fuse_tpc_mask ==
nvgpu_safe_sub_u32(BIT32(max_tpc_cnt), U32(1)))) { nvgpu_safe_sub_u32(BIT32(max_tpc_cnt), U32(1)))) {
u32 val = g->tpc_fs_mask_user; u32 val = g->tpc_fs_mask_user;
val &= nvgpu_safe_sub_u32(BIT32(max_tpc_cnt), U32(1)); val &= nvgpu_safe_sub_u32(BIT32(max_tpc_cnt), U32(1));
tpc_cnt = (u32)hweight32(val); tpc_cnt = (u32)hweight32(val);
}
} }
#endif #endif

View File

@@ -251,16 +251,23 @@ static int gr_init_setup_hw(struct gk20a *g, struct nvgpu_gr *gr)
} }
#ifdef CONFIG_NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
err = nvgpu_gr_zcull_init_hw(g, gr->zcull, gr->config); if (!nvgpu_is_enabled(g, NVGPU_SUPPORT_MIG)) {
if (err != 0) { err = nvgpu_gr_zcull_init_hw(g, gr->zcull, gr->config);
goto out; if (err != 0) {
} goto out;
#endif /* CONFIG_NVGPU_GRAPHICS */ }
#ifdef CONFIG_NVGPU_GRAPHICS err = nvgpu_gr_zbc_load_table(g, gr->zbc);
err = nvgpu_gr_zbc_load_table(g, gr->zbc); if (err != 0) {
if (err != 0) { goto out;
goto out; }
if (g->ops.gr.init.preemption_state != NULL) {
err = g->ops.gr.init.preemption_state(g);
if (err != 0) {
goto out;
}
}
} }
#endif /* CONFIG_NVGPU_GRAPHICS */ #endif /* CONFIG_NVGPU_GRAPHICS */
@@ -274,15 +281,6 @@ static int gr_init_setup_hw(struct gk20a *g, struct nvgpu_gr *gr)
g->ops.gr.init.lg_coalesce(g, 0); g->ops.gr.init.lg_coalesce(g, 0);
} }
#ifdef CONFIG_NVGPU_GRAPHICS
if (g->ops.gr.init.preemption_state != NULL) {
err = g->ops.gr.init.preemption_state(g);
if (err != 0) {
goto out;
}
}
#endif
/* floorsweep anything left */ /* floorsweep anything left */
err = nvgpu_gr_fs_state_init(g, gr->config); err = nvgpu_gr_fs_state_init(g, gr->config);
if (err != 0) { if (err != 0) {
@@ -488,29 +486,29 @@ static int gr_init_setup_sw(struct gk20a *g, struct nvgpu_gr *gr)
} }
#endif #endif
#ifdef CONFIG_NVGPU_GRAPHICS
err = nvgpu_gr_config_init_map_tiles(g, gr->config);
if (err != 0) {
goto clean_up;
}
err = nvgpu_gr_zcull_init(g, &gr->zcull,
nvgpu_gr_falcon_get_zcull_image_size(gr->falcon),
gr->config);
if (err != 0) {
goto clean_up;
}
#endif /* CONFIG_NVGPU_GRAPHICS */
err = gr_init_ctx_bufs(g, gr); err = gr_init_ctx_bufs(g, gr);
if (err != 0) { if (err != 0) {
goto clean_up; goto clean_up;
} }
#ifdef CONFIG_NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
err = nvgpu_gr_zbc_init(g, &gr->zbc); if (!nvgpu_is_enabled(g, NVGPU_SUPPORT_MIG)) {
if (err != 0) { err = nvgpu_gr_config_init_map_tiles(g, gr->config);
goto clean_up; if (err != 0) {
goto clean_up;
}
err = nvgpu_gr_zcull_init(g, &gr->zcull,
nvgpu_gr_falcon_get_zcull_image_size(gr->falcon),
gr->config);
if (err != 0) {
goto clean_up;
}
err = nvgpu_gr_zbc_init(g, &gr->zbc);
if (err != 0) {
goto clean_up;
}
} }
#endif /* CONFIG_NVGPU_GRAPHICS */ #endif /* CONFIG_NVGPU_GRAPHICS */

View File

@@ -234,10 +234,12 @@ static bool gr_config_alloc_struct_mem(struct gk20a *g,
gpc_size = nvgpu_safe_mult_u64((size_t)config->gpc_count, sizeof(u32)); gpc_size = nvgpu_safe_mult_u64((size_t)config->gpc_count, sizeof(u32));
config->gpc_tpc_count = nvgpu_kzalloc(g, gpc_size); config->gpc_tpc_count = nvgpu_kzalloc(g, gpc_size);
#ifdef CONFIG_NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
config->max_zcull_per_gpc_count = nvgpu_get_litter_value(g, if (!nvgpu_is_enabled(g, NVGPU_SUPPORT_MIG)) {
GPU_LIT_NUM_ZCULL_BANKS); config->max_zcull_per_gpc_count = nvgpu_get_litter_value(g,
GPU_LIT_NUM_ZCULL_BANKS);
config->gpc_zcb_count = nvgpu_kzalloc(g, gpc_size); config->gpc_zcb_count = nvgpu_kzalloc(g, gpc_size);
}
#endif #endif
config->gpc_ppc_count = nvgpu_kzalloc(g, gpc_size); config->gpc_ppc_count = nvgpu_kzalloc(g, gpc_size);
@@ -403,11 +405,13 @@ struct nvgpu_gr_config *nvgpu_gr_config_init(struct gk20a *g)
config->gpc_tpc_count[gpc_index]); config->gpc_tpc_count[gpc_index]);
#ifdef CONFIG_NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
config->gpc_zcb_count[gpc_index] = if (!nvgpu_is_enabled(g, NVGPU_SUPPORT_MIG)) {
g->ops.gr.config.get_zcull_count_in_gpc(g, config, config->gpc_zcb_count[gpc_index] =
gpc_index); g->ops.gr.config.get_zcull_count_in_gpc(g, config,
config->zcb_count = nvgpu_safe_add_u32(config->zcb_count, gpc_index);
config->gpc_zcb_count[gpc_index]); config->zcb_count = nvgpu_safe_add_u32(config->zcb_count,
config->gpc_zcb_count[gpc_index]);
}
#endif #endif
gr_config_init_pes_tpc(g, config, gpc_index); gr_config_init_pes_tpc(g, config, gpc_index);

View File

@@ -576,13 +576,15 @@ defined(CONFIG_NVGPU_CTXSW_FW_ERROR_CODE_TESTING)
#endif #endif
#ifdef CONFIG_NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
ret = g->ops.gr.falcon.ctrl_ctxsw(g, if (!nvgpu_is_enabled(g, NVGPU_SUPPORT_MIG)) {
NVGPU_GR_FALCON_METHOD_CTXSW_DISCOVER_ZCULL_IMAGE_SIZE, ret = g->ops.gr.falcon.ctrl_ctxsw(g,
0, &sizes->zcull_image_size); NVGPU_GR_FALCON_METHOD_CTXSW_DISCOVER_ZCULL_IMAGE_SIZE,
if (ret != 0) { 0, &sizes->zcull_image_size);
nvgpu_err(g, if (ret != 0) {
"query zcull ctx image size failed"); nvgpu_err(g,
return ret; "query zcull ctx image size failed");
return ret;
}
} }
nvgpu_log(g, gpu_dbg_gr, "ZCULL image size = %u", sizes->zcull_image_size); nvgpu_log(g, gpu_dbg_gr, "ZCULL image size = %u", sizes->zcull_image_size);

View File

@@ -43,12 +43,14 @@ int gp10b_gr_falcon_init_ctx_state(struct gk20a *g,
return err; return err;
} }
err = g->ops.gr.falcon.ctrl_ctxsw(g, if (!nvgpu_is_enabled(g, NVGPU_SUPPORT_MIG)) {
NVGPU_GR_FALCON_METHOD_PREEMPT_IMAGE_SIZE, 0U, err = g->ops.gr.falcon.ctrl_ctxsw(g,
&sizes->preempt_image_size); NVGPU_GR_FALCON_METHOD_PREEMPT_IMAGE_SIZE, 0U,
if (err != 0) { &sizes->preempt_image_size);
nvgpu_err(g, "query preempt image size failed"); if (err != 0) {
return err; nvgpu_err(g, "query preempt image size failed");
return err;
}
} }
nvgpu_log(g, gpu_dbg_gr, "Preempt image size = %u", sizes->preempt_image_size); nvgpu_log(g, gpu_dbg_gr, "Preempt image size = %u", sizes->preempt_image_size);

View File

@@ -205,6 +205,9 @@ static void nvgpu_init_pm_vars(struct gk20a *g)
nvgpu_platform_is_silicon(g) ? platform->enable_mscg : false; nvgpu_platform_is_silicon(g) ? platform->enable_mscg : false;
g->can_elpg = g->can_elpg =
nvgpu_platform_is_silicon(g) ? platform->can_elpg_init : false; nvgpu_platform_is_silicon(g) ? platform->can_elpg_init : false;
if (nvgpu_is_enabled(g, NVGPU_SUPPORT_MIG)) {
g->can_elpg = false;
}
nvgpu_set_enabled(g, NVGPU_PMU_PERFMON, platform->enable_perfmon); nvgpu_set_enabled(g, NVGPU_PMU_PERFMON, platform->enable_perfmon);
} }