From 2ecb5feaad7ddbf48ddf66069ecbaae5a4fb8bea Mon Sep 17 00:00:00 2001 From: Lakshmanan M Date: Mon, 5 Oct 2020 13:38:53 +0530 Subject: [PATCH] gpu: nvgpu: Skip graphics CB programming for MIG Added logic to skip the following graphics CB allocation, map and programming sequence when MIG is enabled. Global CB: 1) NVGPU_GR_GLOBAL_CTX_CIRCULAR 2) NVGPU_GR_GLOBAL_CTX_PAGEPOOL 3) NVGPU_GR_GLOBAL_CTX_ATTRIBUTE 4) NVGPU_GR_GLOBAL_CTX_CIRCULAR_VPR 5) NVGPU_GR_GLOBAL_CTX_PAGEPOOL_VPR 6) NVGPU_GR_GLOBAL_CTX_ATTRIBUTE_VPR 7) NVGPU_GR_GLOBAL_CTX_RTV_CIRCULAR_BUFFER CTX CB: 1) NVGPU_GR_CTX_CIRCULAR_VA 2) NVGPU_GR_CTX_PAGEPOOL_VA 3) NVGPU_GR_CTX_ATTRIBUTE_VA 4) NVGPU_GR_CTX_RTV_CIRCULAR_BUFFER_VA JIRA NVGPU-5650 Change-Id: I38c2859ce57ad76c58a772fdf9f589f2106149af Signed-off-by: Lakshmanan M Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2423450 Tested-by: mobile promotions Reviewed-by: automaticguardword Reviewed-by: svc-mobile-coverity Reviewed-by: svc-mobile-cert Reviewed-by: Rajesh Devaraj Reviewed-by: Dinesh T Reviewed-by: Deepak Nibade Reviewed-by: Vaibhav Kachore Reviewed-by: mobile promotions GVS: Gerrit_Virtual_Submit --- drivers/gpu/nvgpu/common/gr/ctx.c | 77 +++++++++-------- drivers/gpu/nvgpu/common/gr/global_ctx.c | 86 +++++++++++++------ drivers/gpu/nvgpu/common/gr/gr.c | 77 ++++++++++------- drivers/gpu/nvgpu/common/gr/gr_setup.c | 10 ++- drivers/gpu/nvgpu/common/gr/obj_ctx.c | 60 +++++++------ drivers/gpu/nvgpu/common/init/nvgpu_init.c | 3 + drivers/gpu/nvgpu/common/vgpu/gr/ctx_vgpu.c | 65 +++++++------- drivers/gpu/nvgpu/common/vgpu/gr/gr_vgpu.c | 5 +- .../gpu/nvgpu/common/vgpu/init/init_vgpu.c | 2 - 9 files changed, 231 insertions(+), 154 deletions(-) diff --git a/drivers/gpu/nvgpu/common/gr/ctx.c b/drivers/gpu/nvgpu/common/gr/ctx.c index 7f92ff983..dc68357de 100644 --- a/drivers/gpu/nvgpu/common/gr/ctx.c +++ b/drivers/gpu/nvgpu/common/gr/ctx.c @@ -355,28 +355,50 @@ int nvgpu_gr_ctx_map_global_ctx_buffers(struct gk20a *g, nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gr, " "); - /* Circular Buffer */ - err = nvgpu_gr_ctx_map_ctx_circular_buffer(g, gr_ctx, - global_ctx_buffer, vm, vpr); - if (err != 0) { - nvgpu_err(g, "cannot map ctx circular buffer"); - goto fail; - } + /* + * MIG supports only compute class. + * Allocate BUNDLE_CB, PAGEPOOL, ATTRIBUTE_CB and RTV_CB + * if 2D/3D/I2M classes(graphics) are supported. + */ + if (!nvgpu_is_enabled(g, NVGPU_SUPPORT_MIG)) { + /* Circular Buffer */ + err = nvgpu_gr_ctx_map_ctx_circular_buffer(g, gr_ctx, + global_ctx_buffer, vm, vpr); + if (err != 0) { + nvgpu_err(g, "cannot map ctx circular buffer"); + goto fail; + } - /* Attribute Buffer */ - err = nvgpu_gr_ctx_map_ctx_attribute_buffer(g, gr_ctx, - global_ctx_buffer, vm, vpr); - if (err != 0) { - nvgpu_err(g, "cannot map ctx attribute buffer"); - goto fail; - } + /* Attribute Buffer */ + err = nvgpu_gr_ctx_map_ctx_attribute_buffer(g, gr_ctx, + global_ctx_buffer, vm, vpr); + if (err != 0) { + nvgpu_err(g, "cannot map ctx attribute buffer"); + goto fail; + } - /* Page Pool */ - err = nvgpu_gr_ctx_map_ctx_pagepool_buffer(g, gr_ctx, - global_ctx_buffer, vm, vpr); - if (err != 0) { - nvgpu_err(g, "cannot map ctx pagepool buffer"); - goto fail; + /* Page Pool */ + err = nvgpu_gr_ctx_map_ctx_pagepool_buffer(g, gr_ctx, + global_ctx_buffer, vm, vpr); + if (err != 0) { + nvgpu_err(g, "cannot map ctx pagepool buffer"); + goto fail; + } +#ifdef CONFIG_NVGPU_DGPU + /* RTV circular buffer */ + if (nvgpu_gr_global_ctx_buffer_ready(global_ctx_buffer, + NVGPU_GR_GLOBAL_CTX_RTV_CIRCULAR_BUFFER)) { + err = nvgpu_gr_ctx_map_ctx_buffer(g, + NVGPU_GR_GLOBAL_CTX_RTV_CIRCULAR_BUFFER, + NVGPU_GR_CTX_RTV_CIRCULAR_BUFFER_VA, + gr_ctx, global_ctx_buffer, vm); + if (err != 0) { + nvgpu_err(g, + "cannot map ctx rtv circular buffer"); + goto fail; + } + } +#endif } /* Priv register Access Map */ @@ -403,21 +425,6 @@ int nvgpu_gr_ctx_map_global_ctx_buffers(struct gk20a *g, } #endif -#ifdef CONFIG_NVGPU_DGPU - /* RTV circular buffer */ - if (nvgpu_gr_global_ctx_buffer_ready(global_ctx_buffer, - NVGPU_GR_GLOBAL_CTX_RTV_CIRCULAR_BUFFER)) { - err = nvgpu_gr_ctx_map_ctx_buffer(g, - NVGPU_GR_GLOBAL_CTX_RTV_CIRCULAR_BUFFER, - NVGPU_GR_CTX_RTV_CIRCULAR_BUFFER_VA, - gr_ctx, global_ctx_buffer, vm); - if (err != 0) { - nvgpu_err(g, "cannot map ctx rtv circular buffer"); - goto fail; - } - } -#endif - gr_ctx->global_ctx_buffer_mapped = true; nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gr, "done"); diff --git a/drivers/gpu/nvgpu/common/gr/global_ctx.c b/drivers/gpu/nvgpu/common/gr/global_ctx.c index 7480cc179..05e4a2cce 100644 --- a/drivers/gpu/nvgpu/common/gr/global_ctx.c +++ b/drivers/gpu/nvgpu/common/gr/global_ctx.c @@ -160,18 +160,26 @@ static int nvgpu_gr_global_ctx_buffer_alloc_vpr(struct gk20a *g, static bool nvgpu_gr_global_ctx_buffer_sizes_are_valid(struct gk20a *g, struct nvgpu_gr_global_ctx_buffer_desc *desc) { - if ((desc[NVGPU_GR_GLOBAL_CTX_CIRCULAR].size == 0U) || - (desc[NVGPU_GR_GLOBAL_CTX_PAGEPOOL].size == 0U) || - (desc[NVGPU_GR_GLOBAL_CTX_ATTRIBUTE].size == 0U) || -#ifdef CONFIG_NVGPU_VPR - (desc[NVGPU_GR_GLOBAL_CTX_CIRCULAR_VPR].size == 0U) || - (desc[NVGPU_GR_GLOBAL_CTX_PAGEPOOL_VPR].size == 0U) || - (desc[NVGPU_GR_GLOBAL_CTX_ATTRIBUTE_VPR].size == 0U) || -#endif - (desc[NVGPU_GR_GLOBAL_CTX_PRIV_ACCESS_MAP].size == 0U)) { + + if (desc[NVGPU_GR_GLOBAL_CTX_PRIV_ACCESS_MAP].size == 0U) { return false; } + if (!nvgpu_is_enabled(g, NVGPU_SUPPORT_MIG)) { + if ((desc[NVGPU_GR_GLOBAL_CTX_CIRCULAR].size == 0U) || + (desc[NVGPU_GR_GLOBAL_CTX_PAGEPOOL].size == 0U) || + (desc[NVGPU_GR_GLOBAL_CTX_ATTRIBUTE].size == 0U)) { + return false; + } +#ifdef CONFIG_NVGPU_VPR + if ((desc[NVGPU_GR_GLOBAL_CTX_CIRCULAR_VPR].size == 0U) || + (desc[NVGPU_GR_GLOBAL_CTX_PAGEPOOL_VPR].size == 0U) || + (desc[NVGPU_GR_GLOBAL_CTX_ATTRIBUTE_VPR].size == 0U)) { + return false; + } +#endif + } + return true; } @@ -181,6 +189,19 @@ static int nvgpu_gr_global_ctx_buffer_vpr_alloc(struct gk20a *g, { int err = 0; + /* + * MIG supports only compute class. + * Allocate BUNDLE_CB, PAGEPOOL, ATTRIBUTE_CB and RTV_CB + * if 2D/3D/I2M classes(graphics) are supported. + */ + if (nvgpu_is_enabled(g, NVGPU_SUPPORT_MIG)) { + nvgpu_log(g, gpu_dbg_gr | gpu_dbg_mig, + "2D class is not supported " + "skip BUNDLE_CB, PAGEPOOL, ATTRIBUTE_CB " + "and RTV_CB"); + return 0; + } + err = nvgpu_gr_global_ctx_buffer_alloc_vpr(g, desc, NVGPU_GR_GLOBAL_CTX_CIRCULAR_VPR); if (err != 0) { @@ -208,22 +229,29 @@ static int nvgpu_gr_global_ctx_buffer_sys_alloc(struct gk20a *g, { int err = 0; - err = nvgpu_gr_global_ctx_buffer_alloc_sys(g, desc, - NVGPU_GR_GLOBAL_CTX_CIRCULAR); - if (err != 0) { - goto fail; - } + /* + * MIG supports only compute class. + * Allocate BUNDLE_CB, PAGEPOOL, ATTRIBUTE_CB and RTV_CB + * if 2D/3D/I2M classes(graphics) are supported. + */ + if (!nvgpu_is_enabled(g, NVGPU_SUPPORT_MIG)) { + err = nvgpu_gr_global_ctx_buffer_alloc_sys(g, desc, + NVGPU_GR_GLOBAL_CTX_CIRCULAR); + if (err != 0) { + goto fail; + } - err = nvgpu_gr_global_ctx_buffer_alloc_sys(g, desc, - NVGPU_GR_GLOBAL_CTX_PAGEPOOL); - if (err != 0) { - goto fail; - } + err = nvgpu_gr_global_ctx_buffer_alloc_sys(g, desc, + NVGPU_GR_GLOBAL_CTX_PAGEPOOL); + if (err != 0) { + goto fail; + } - err = nvgpu_gr_global_ctx_buffer_alloc_sys(g, desc, - NVGPU_GR_GLOBAL_CTX_ATTRIBUTE); - if (err != 0) { - goto fail; + err = nvgpu_gr_global_ctx_buffer_alloc_sys(g, desc, + NVGPU_GR_GLOBAL_CTX_ATTRIBUTE); + if (err != 0) { + goto fail; + } } err = nvgpu_gr_global_ctx_buffer_alloc_sys(g, desc, @@ -261,11 +289,13 @@ int nvgpu_gr_global_ctx_buffer_alloc(struct gk20a *g, #endif #ifdef CONFIG_NVGPU_DGPU - if (desc[NVGPU_GR_GLOBAL_CTX_RTV_CIRCULAR_BUFFER].size != 0U) { - err = nvgpu_gr_global_ctx_buffer_alloc_sys(g, desc, - NVGPU_GR_GLOBAL_CTX_RTV_CIRCULAR_BUFFER); - if (err != 0) { - goto clean_up; + if (!nvgpu_is_enabled(g, NVGPU_SUPPORT_MIG)) { + if (desc[NVGPU_GR_GLOBAL_CTX_RTV_CIRCULAR_BUFFER].size != 0U) { + err = nvgpu_gr_global_ctx_buffer_alloc_sys(g, desc, + NVGPU_GR_GLOBAL_CTX_RTV_CIRCULAR_BUFFER); + if (err != 0) { + goto clean_up; + } } } #endif diff --git a/drivers/gpu/nvgpu/common/gr/gr.c b/drivers/gpu/nvgpu/common/gr/gr.c index 932b338d9..ddb3c7627 100644 --- a/drivers/gpu/nvgpu/common/gr/gr.c +++ b/drivers/gpu/nvgpu/common/gr/gr.c @@ -60,36 +60,58 @@ static int gr_alloc_global_ctx_buffers(struct gk20a *g, struct nvgpu_gr *gr) nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gr, " "); - size = g->ops.gr.init.get_global_ctx_cb_buffer_size(g); - nvgpu_log(g, gpu_dbg_info | gpu_dbg_gr, "cb_buffer_size : %d", size); + /* + * MIG supports only compute class. + * Allocate BUNDLE_CB, PAGEPOOL, ATTRIBUTE_CB and RTV_CB + * if 2D/3D/I2M classes(graphics) are supported. + */ + if (!nvgpu_is_enabled(g, NVGPU_SUPPORT_MIG)) { + size = g->ops.gr.init.get_global_ctx_cb_buffer_size(g); + nvgpu_log(g, gpu_dbg_info | gpu_dbg_gr, + "cb_buffer_size : %d", size); - nvgpu_gr_global_ctx_set_size(gr->global_ctx_buffer, - NVGPU_GR_GLOBAL_CTX_CIRCULAR, size); + nvgpu_gr_global_ctx_set_size(gr->global_ctx_buffer, + NVGPU_GR_GLOBAL_CTX_CIRCULAR, size); #ifdef CONFIG_NVGPU_VPR - nvgpu_gr_global_ctx_set_size(gr->global_ctx_buffer, - NVGPU_GR_GLOBAL_CTX_CIRCULAR_VPR, size); + nvgpu_gr_global_ctx_set_size(gr->global_ctx_buffer, + NVGPU_GR_GLOBAL_CTX_CIRCULAR_VPR, size); #endif - size = g->ops.gr.init.get_global_ctx_pagepool_buffer_size(g); - nvgpu_log(g, gpu_dbg_info | gpu_dbg_gr, "pagepool_buffer_size : %d", size); + size = g->ops.gr.init.get_global_ctx_pagepool_buffer_size(g); + nvgpu_log(g, gpu_dbg_info | gpu_dbg_gr, + "pagepool_buffer_size : %d", size); - nvgpu_gr_global_ctx_set_size(gr->global_ctx_buffer, - NVGPU_GR_GLOBAL_CTX_PAGEPOOL, size); + nvgpu_gr_global_ctx_set_size(gr->global_ctx_buffer, + NVGPU_GR_GLOBAL_CTX_PAGEPOOL, size); #ifdef CONFIG_NVGPU_VPR - nvgpu_gr_global_ctx_set_size(gr->global_ctx_buffer, - NVGPU_GR_GLOBAL_CTX_PAGEPOOL_VPR, size); + nvgpu_gr_global_ctx_set_size(gr->global_ctx_buffer, + NVGPU_GR_GLOBAL_CTX_PAGEPOOL_VPR, size); #endif - size = g->ops.gr.init.get_global_attr_cb_size(g, - nvgpu_gr_config_get_tpc_count(gr->config), - nvgpu_gr_config_get_max_tpc_count(gr->config)); - nvgpu_log(g, gpu_dbg_info | gpu_dbg_gr, "attr_buffer_size : %u", size); + size = g->ops.gr.init.get_global_attr_cb_size(g, + nvgpu_gr_config_get_tpc_count(gr->config), + nvgpu_gr_config_get_max_tpc_count(gr->config)); + nvgpu_log(g, gpu_dbg_info | gpu_dbg_gr, + "attr_buffer_size : %u", size); - nvgpu_gr_global_ctx_set_size(gr->global_ctx_buffer, - NVGPU_GR_GLOBAL_CTX_ATTRIBUTE, size); + nvgpu_gr_global_ctx_set_size(gr->global_ctx_buffer, + NVGPU_GR_GLOBAL_CTX_ATTRIBUTE, size); #ifdef CONFIG_NVGPU_VPR - nvgpu_gr_global_ctx_set_size(gr->global_ctx_buffer, - NVGPU_GR_GLOBAL_CTX_ATTRIBUTE_VPR, size); + nvgpu_gr_global_ctx_set_size(gr->global_ctx_buffer, + NVGPU_GR_GLOBAL_CTX_ATTRIBUTE_VPR, size); #endif + +#ifdef CONFIG_NVGPU_DGPU + if (g->ops.gr.init.get_rtv_cb_size != NULL) { + size = g->ops.gr.init.get_rtv_cb_size(g); + nvgpu_log(g, gpu_dbg_info | gpu_dbg_gr, + "rtv_circular_buffer_size : %u", size); + + nvgpu_gr_global_ctx_set_size(gr->global_ctx_buffer, + NVGPU_GR_GLOBAL_CTX_RTV_CIRCULAR_BUFFER, size); + } +#endif + } + size = NVGPU_GR_GLOBAL_CTX_PRIV_ACCESS_MAP_SIZE; nvgpu_log(g, gpu_dbg_info | gpu_dbg_gr, "priv_access_map_size : %d", size); @@ -104,16 +126,6 @@ static int gr_alloc_global_ctx_buffers(struct gk20a *g, struct nvgpu_gr *gr) NVGPU_GR_GLOBAL_CTX_FECS_TRACE_BUFFER, size); #endif -#ifdef CONFIG_NVGPU_DGPU - if (g->ops.gr.init.get_rtv_cb_size != NULL) { - size = g->ops.gr.init.get_rtv_cb_size(g); - nvgpu_log(g, gpu_dbg_info | gpu_dbg_gr, "rtv_circular_buffer_size : %u", size); - - nvgpu_gr_global_ctx_set_size(gr->global_ctx_buffer, - NVGPU_GR_GLOBAL_CTX_RTV_CIRCULAR_BUFFER, size); - } -#endif - err = nvgpu_gr_global_ctx_buffer_alloc(g, gr->global_ctx_buffer); if (err != 0) { return err; @@ -405,8 +417,11 @@ static int gr_init_ctx_bufs(struct gk20a *g, struct nvgpu_gr *gr) } #ifdef CONFIG_NVGPU_GRAPHICS - nvgpu_gr_ctx_set_size(gr->gr_ctx_desc, NVGPU_GR_CTX_PREEMPT_CTXSW, + if (!nvgpu_is_enabled(g, NVGPU_SUPPORT_MIG)) { + nvgpu_gr_ctx_set_size(gr->gr_ctx_desc, + NVGPU_GR_CTX_PREEMPT_CTXSW, nvgpu_gr_falcon_get_preempt_image_size(gr->falcon)); + } #endif gr->global_ctx_buffer = nvgpu_gr_global_ctx_desc_alloc(g); diff --git a/drivers/gpu/nvgpu/common/gr/gr_setup.c b/drivers/gpu/nvgpu/common/gr/gr_setup.c index c7db55ddf..bcd4afe0b 100644 --- a/drivers/gpu/nvgpu/common/gr/gr_setup.c +++ b/drivers/gpu/nvgpu/common/gr/gr_setup.c @@ -355,10 +355,12 @@ int nvgpu_gr_setup_set_preemption_mode(struct nvgpu_channel *ch, nvgpu_gr_obj_ctx_update_ctxsw_preemption_mode(g, gr->config, gr_ctx, ch->subctx); - nvgpu_gr_ctx_patch_write_begin(g, gr_ctx, true); - g->ops.gr.init.commit_global_cb_manager(g, gr->config, gr_ctx, - true); - nvgpu_gr_ctx_patch_write_end(g, gr_ctx, true); + if (!nvgpu_is_enabled(g, NVGPU_SUPPORT_MIG)) { + nvgpu_gr_ctx_patch_write_begin(g, gr_ctx, true); + g->ops.gr.init.commit_global_cb_manager(g, gr->config, gr_ctx, + true); + nvgpu_gr_ctx_patch_write_end(g, gr_ctx, true); + } g->ops.tsg.enable(tsg); diff --git a/drivers/gpu/nvgpu/common/gr/obj_ctx.c b/drivers/gpu/nvgpu/common/gr/obj_ctx.c index 55c919b2e..47a235f6d 100644 --- a/drivers/gpu/nvgpu/common/gr/obj_ctx.c +++ b/drivers/gpu/nvgpu/common/gr/obj_ctx.c @@ -336,40 +336,52 @@ void nvgpu_gr_obj_ctx_commit_global_ctx_buffers(struct gk20a *g, nvgpu_gr_ctx_patch_write_begin(g, gr_ctx, false); } - /* global pagepool buffer */ - addr = nvgpu_gr_ctx_get_global_ctx_va(gr_ctx, NVGPU_GR_CTX_PAGEPOOL_VA); - size = nvgpu_safe_cast_u64_to_u32(nvgpu_gr_global_ctx_get_size( - global_ctx_buffer, NVGPU_GR_GLOBAL_CTX_PAGEPOOL)); + /* + * MIG supports only compute class. + * Skip BUNDLE_CB, PAGEPOOL, ATTRIBUTE_CB and RTV_CB + * if 2D/3D/I2M classes(graphics) are not supported. + */ + if (!nvgpu_is_enabled(g, NVGPU_SUPPORT_MIG)) { + /* global pagepool buffer */ + addr = nvgpu_gr_ctx_get_global_ctx_va(gr_ctx, + NVGPU_GR_CTX_PAGEPOOL_VA); + size = nvgpu_safe_cast_u64_to_u32(nvgpu_gr_global_ctx_get_size( + global_ctx_buffer, + NVGPU_GR_GLOBAL_CTX_PAGEPOOL)); - g->ops.gr.init.commit_global_pagepool(g, gr_ctx, addr, size, patch, - true); + g->ops.gr.init.commit_global_pagepool(g, gr_ctx, addr, size, + patch, true); - /* global bundle cb */ - addr = nvgpu_gr_ctx_get_global_ctx_va(gr_ctx, NVGPU_GR_CTX_CIRCULAR_VA); - size = nvgpu_safe_cast_u64_to_u32( - g->ops.gr.init.get_bundle_cb_default_size(g)); + /* global bundle cb */ + addr = nvgpu_gr_ctx_get_global_ctx_va(gr_ctx, + NVGPU_GR_CTX_CIRCULAR_VA); + size = nvgpu_safe_cast_u64_to_u32( + g->ops.gr.init.get_bundle_cb_default_size(g)); - g->ops.gr.init.commit_global_bundle_cb(g, gr_ctx, addr, size, patch); + g->ops.gr.init.commit_global_bundle_cb(g, gr_ctx, addr, size, + patch); - /* global attrib cb */ - addr = nvgpu_gr_ctx_get_global_ctx_va(gr_ctx, - NVGPU_GR_CTX_ATTRIBUTE_VA); + /* global attrib cb */ + addr = nvgpu_gr_ctx_get_global_ctx_va(gr_ctx, + NVGPU_GR_CTX_ATTRIBUTE_VA); - g->ops.gr.init.commit_global_attrib_cb(g, gr_ctx, - nvgpu_gr_config_get_tpc_count(config), - nvgpu_gr_config_get_max_tpc_count(config), addr, patch); + g->ops.gr.init.commit_global_attrib_cb(g, gr_ctx, + nvgpu_gr_config_get_tpc_count(config), + nvgpu_gr_config_get_max_tpc_count(config), addr, patch); - g->ops.gr.init.commit_global_cb_manager(g, config, gr_ctx, patch); + g->ops.gr.init.commit_global_cb_manager(g, config, gr_ctx, + patch); #ifdef CONFIG_NVGPU_DGPU - if (g->ops.gr.init.commit_rtv_cb != NULL) { - /* RTV circular buffer */ - addr = nvgpu_gr_ctx_get_global_ctx_va(gr_ctx, - NVGPU_GR_CTX_RTV_CIRCULAR_BUFFER_VA); + if (g->ops.gr.init.commit_rtv_cb != NULL) { + /* RTV circular buffer */ + addr = nvgpu_gr_ctx_get_global_ctx_va(gr_ctx, + NVGPU_GR_CTX_RTV_CIRCULAR_BUFFER_VA); - g->ops.gr.init.commit_rtv_cb(g, addr, gr_ctx, patch); - } + g->ops.gr.init.commit_rtv_cb(g, addr, gr_ctx, patch); + } #endif + } #ifdef CONFIG_NVGPU_SM_DIVERSITY if ((nvgpu_is_enabled(g, NVGPU_SUPPORT_SM_DIVERSITY)) && diff --git a/drivers/gpu/nvgpu/common/init/nvgpu_init.c b/drivers/gpu/nvgpu/common/init/nvgpu_init.c index 534d10326..6d39cefbf 100644 --- a/drivers/gpu/nvgpu/common/init/nvgpu_init.c +++ b/drivers/gpu/nvgpu/common/init/nvgpu_init.c @@ -856,6 +856,9 @@ int nvgpu_init_gpu_characteristics(struct gk20a *g) nvgpu_set_enabled(g, NVGPU_SUPPORT_3D, true); nvgpu_set_enabled(g, NVGPU_SUPPORT_I2M, true); nvgpu_set_enabled(g, NVGPU_SUPPORT_ZBC, true); + } else { + nvgpu_set_enabled(g, NVGPU_SUPPORT_ZBC_STENCIL, false); + nvgpu_set_enabled(g, NVGPU_SUPPORT_PREEMPTION_GFXP, false); } return 0; diff --git a/drivers/gpu/nvgpu/common/vgpu/gr/ctx_vgpu.c b/drivers/gpu/nvgpu/common/vgpu/gr/ctx_vgpu.c index 146c587dc..7dc83ccb8 100644 --- a/drivers/gpu/nvgpu/common/vgpu/gr/ctx_vgpu.c +++ b/drivers/gpu/nvgpu/common/vgpu/gr/ctx_vgpu.c @@ -257,37 +257,44 @@ int vgpu_gr_map_global_ctx_buffers(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx, g_bfr_va = gr_ctx->global_ctx_buffer_va; - /* Circular Buffer */ - gpu_va = nvgpu_vm_alloc_va(ch_vm, - nvgpu_gr_global_ctx_get_size(global_ctx_buffer, - NVGPU_GR_GLOBAL_CTX_CIRCULAR), - GMMU_PAGE_SIZE_KERNEL); + /* + * MIG supports only compute class. + * Allocate BUNDLE_CB, PAGEPOOL, ATTRIBUTE_CB and RTV_CB + * if 2D/3D/I2M classes(graphics) are supported. + */ + if (!nvgpu_is_enabled(g, NVGPU_SUPPORT_MIG)) { + /* Circular Buffer */ + gpu_va = nvgpu_vm_alloc_va(ch_vm, + nvgpu_gr_global_ctx_get_size(global_ctx_buffer, + NVGPU_GR_GLOBAL_CTX_CIRCULAR), + GMMU_PAGE_SIZE_KERNEL); - if (!gpu_va) { - goto clean_up; + if (!gpu_va) { + goto clean_up; + } + g_bfr_va[NVGPU_GR_CTX_CIRCULAR_VA] = gpu_va; + + /* Attribute Buffer */ + gpu_va = nvgpu_vm_alloc_va(ch_vm, + nvgpu_gr_global_ctx_get_size(global_ctx_buffer, + NVGPU_GR_GLOBAL_CTX_ATTRIBUTE), + GMMU_PAGE_SIZE_KERNEL); + + if (!gpu_va) { + goto clean_up; + } + g_bfr_va[NVGPU_GR_CTX_ATTRIBUTE_VA] = gpu_va; + + /* Page Pool */ + gpu_va = nvgpu_vm_alloc_va(ch_vm, + nvgpu_gr_global_ctx_get_size(global_ctx_buffer, + NVGPU_GR_GLOBAL_CTX_PAGEPOOL), + GMMU_PAGE_SIZE_KERNEL); + if (!gpu_va) { + goto clean_up; + } + g_bfr_va[NVGPU_GR_CTX_PAGEPOOL_VA] = gpu_va; } - g_bfr_va[NVGPU_GR_CTX_CIRCULAR_VA] = gpu_va; - - /* Attribute Buffer */ - gpu_va = nvgpu_vm_alloc_va(ch_vm, - nvgpu_gr_global_ctx_get_size(global_ctx_buffer, - NVGPU_GR_GLOBAL_CTX_ATTRIBUTE), - GMMU_PAGE_SIZE_KERNEL); - - if (!gpu_va) { - goto clean_up; - } - g_bfr_va[NVGPU_GR_CTX_ATTRIBUTE_VA] = gpu_va; - - /* Page Pool */ - gpu_va = nvgpu_vm_alloc_va(ch_vm, - nvgpu_gr_global_ctx_get_size(global_ctx_buffer, - NVGPU_GR_GLOBAL_CTX_PAGEPOOL), - GMMU_PAGE_SIZE_KERNEL); - if (!gpu_va) { - goto clean_up; - } - g_bfr_va[NVGPU_GR_CTX_PAGEPOOL_VA] = gpu_va; /* Priv register Access Map */ gpu_va = nvgpu_vm_alloc_va(ch_vm, diff --git a/drivers/gpu/nvgpu/common/vgpu/gr/gr_vgpu.c b/drivers/gpu/nvgpu/common/vgpu/gr/gr_vgpu.c index d63555fba..51b7c3422 100644 --- a/drivers/gpu/nvgpu/common/vgpu/gr/gr_vgpu.c +++ b/drivers/gpu/nvgpu/common/vgpu/gr/gr_vgpu.c @@ -770,8 +770,11 @@ static int vgpu_gr_init_gr_setup_sw(struct gk20a *g) } #ifdef CONFIG_NVGPU_GRAPHICS - nvgpu_gr_ctx_set_size(gr->gr_ctx_desc, NVGPU_GR_CTX_PREEMPT_CTXSW, + if (!nvgpu_is_enabled(g, NVGPU_SUPPORT_MIG)) { + nvgpu_gr_ctx_set_size(gr->gr_ctx_desc, + NVGPU_GR_CTX_PREEMPT_CTXSW, nvgpu_gr_falcon_get_preempt_image_size(g->gr->falcon)); + } #endif nvgpu_spinlock_init(&g->gr->intr->ch_tlb_lock); diff --git a/drivers/gpu/nvgpu/common/vgpu/init/init_vgpu.c b/drivers/gpu/nvgpu/common/vgpu/init/init_vgpu.c index 6baa43089..f1184f401 100644 --- a/drivers/gpu/nvgpu/common/vgpu/init/init_vgpu.c +++ b/drivers/gpu/nvgpu/common/vgpu/init/init_vgpu.c @@ -123,8 +123,6 @@ int vgpu_init_gpu_characteristics(struct gk20a *g) return err; } - nvgpu_set_enabled(g, NVGPU_SUPPORT_PREEMPTION_GFXP, true); - /* features vgpu does not support */ nvgpu_set_enabled(g, NVGPU_SUPPORT_MAP_BUFFER_BATCH, false); nvgpu_set_enabled(g, NVGPU_SUPPORT_RESCHEDULE_RUNLIST, false);