gpu: nvgpu: Skip graphics CB programming for MIG

Added logic to skip the following graphics CB allocation, map and
programming sequence when MIG is enabled.

Global CB:
1) NVGPU_GR_GLOBAL_CTX_CIRCULAR
2) NVGPU_GR_GLOBAL_CTX_PAGEPOOL
3) NVGPU_GR_GLOBAL_CTX_ATTRIBUTE
4) NVGPU_GR_GLOBAL_CTX_CIRCULAR_VPR
5) NVGPU_GR_GLOBAL_CTX_PAGEPOOL_VPR
6) NVGPU_GR_GLOBAL_CTX_ATTRIBUTE_VPR
7) NVGPU_GR_GLOBAL_CTX_RTV_CIRCULAR_BUFFER

CTX CB:
1) NVGPU_GR_CTX_CIRCULAR_VA
2) NVGPU_GR_CTX_PAGEPOOL_VA
3) NVGPU_GR_CTX_ATTRIBUTE_VA
4) NVGPU_GR_CTX_RTV_CIRCULAR_BUFFER_VA

JIRA NVGPU-5650

Change-Id: I38c2859ce57ad76c58a772fdf9f589f2106149af
Signed-off-by: Lakshmanan M <lm@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2423450
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Reviewed-by: automaticguardword <automaticguardword@nvidia.com>
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com>
Reviewed-by: Rajesh Devaraj <rdevaraj@nvidia.com>
Reviewed-by: Dinesh T <dt@nvidia.com>
Reviewed-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-by: Vaibhav Kachore <vkachore@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
GVS: Gerrit_Virtual_Submit
This commit is contained in:
Lakshmanan M
2020-10-05 13:38:53 +05:30
committed by Alex Waterman
parent c9a964aefd
commit 2ecb5feaad
9 changed files with 231 additions and 154 deletions

View File

@@ -355,28 +355,50 @@ int nvgpu_gr_ctx_map_global_ctx_buffers(struct gk20a *g,
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gr, " "); nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gr, " ");
/* Circular Buffer */ /*
err = nvgpu_gr_ctx_map_ctx_circular_buffer(g, gr_ctx, * MIG supports only compute class.
global_ctx_buffer, vm, vpr); * Allocate BUNDLE_CB, PAGEPOOL, ATTRIBUTE_CB and RTV_CB
if (err != 0) { * if 2D/3D/I2M classes(graphics) are supported.
nvgpu_err(g, "cannot map ctx circular buffer"); */
goto fail; if (!nvgpu_is_enabled(g, NVGPU_SUPPORT_MIG)) {
} /* Circular Buffer */
err = nvgpu_gr_ctx_map_ctx_circular_buffer(g, gr_ctx,
global_ctx_buffer, vm, vpr);
if (err != 0) {
nvgpu_err(g, "cannot map ctx circular buffer");
goto fail;
}
/* Attribute Buffer */ /* Attribute Buffer */
err = nvgpu_gr_ctx_map_ctx_attribute_buffer(g, gr_ctx, err = nvgpu_gr_ctx_map_ctx_attribute_buffer(g, gr_ctx,
global_ctx_buffer, vm, vpr); global_ctx_buffer, vm, vpr);
if (err != 0) { if (err != 0) {
nvgpu_err(g, "cannot map ctx attribute buffer"); nvgpu_err(g, "cannot map ctx attribute buffer");
goto fail; goto fail;
} }
/* Page Pool */ /* Page Pool */
err = nvgpu_gr_ctx_map_ctx_pagepool_buffer(g, gr_ctx, err = nvgpu_gr_ctx_map_ctx_pagepool_buffer(g, gr_ctx,
global_ctx_buffer, vm, vpr); global_ctx_buffer, vm, vpr);
if (err != 0) { if (err != 0) {
nvgpu_err(g, "cannot map ctx pagepool buffer"); nvgpu_err(g, "cannot map ctx pagepool buffer");
goto fail; goto fail;
}
#ifdef CONFIG_NVGPU_DGPU
/* RTV circular buffer */
if (nvgpu_gr_global_ctx_buffer_ready(global_ctx_buffer,
NVGPU_GR_GLOBAL_CTX_RTV_CIRCULAR_BUFFER)) {
err = nvgpu_gr_ctx_map_ctx_buffer(g,
NVGPU_GR_GLOBAL_CTX_RTV_CIRCULAR_BUFFER,
NVGPU_GR_CTX_RTV_CIRCULAR_BUFFER_VA,
gr_ctx, global_ctx_buffer, vm);
if (err != 0) {
nvgpu_err(g,
"cannot map ctx rtv circular buffer");
goto fail;
}
}
#endif
} }
/* Priv register Access Map */ /* Priv register Access Map */
@@ -403,21 +425,6 @@ int nvgpu_gr_ctx_map_global_ctx_buffers(struct gk20a *g,
} }
#endif #endif
#ifdef CONFIG_NVGPU_DGPU
/* RTV circular buffer */
if (nvgpu_gr_global_ctx_buffer_ready(global_ctx_buffer,
NVGPU_GR_GLOBAL_CTX_RTV_CIRCULAR_BUFFER)) {
err = nvgpu_gr_ctx_map_ctx_buffer(g,
NVGPU_GR_GLOBAL_CTX_RTV_CIRCULAR_BUFFER,
NVGPU_GR_CTX_RTV_CIRCULAR_BUFFER_VA,
gr_ctx, global_ctx_buffer, vm);
if (err != 0) {
nvgpu_err(g, "cannot map ctx rtv circular buffer");
goto fail;
}
}
#endif
gr_ctx->global_ctx_buffer_mapped = true; gr_ctx->global_ctx_buffer_mapped = true;
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gr, "done"); nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gr, "done");

View File

@@ -160,18 +160,26 @@ static int nvgpu_gr_global_ctx_buffer_alloc_vpr(struct gk20a *g,
static bool nvgpu_gr_global_ctx_buffer_sizes_are_valid(struct gk20a *g, static bool nvgpu_gr_global_ctx_buffer_sizes_are_valid(struct gk20a *g,
struct nvgpu_gr_global_ctx_buffer_desc *desc) struct nvgpu_gr_global_ctx_buffer_desc *desc)
{ {
if ((desc[NVGPU_GR_GLOBAL_CTX_CIRCULAR].size == 0U) ||
(desc[NVGPU_GR_GLOBAL_CTX_PAGEPOOL].size == 0U) || if (desc[NVGPU_GR_GLOBAL_CTX_PRIV_ACCESS_MAP].size == 0U) {
(desc[NVGPU_GR_GLOBAL_CTX_ATTRIBUTE].size == 0U) ||
#ifdef CONFIG_NVGPU_VPR
(desc[NVGPU_GR_GLOBAL_CTX_CIRCULAR_VPR].size == 0U) ||
(desc[NVGPU_GR_GLOBAL_CTX_PAGEPOOL_VPR].size == 0U) ||
(desc[NVGPU_GR_GLOBAL_CTX_ATTRIBUTE_VPR].size == 0U) ||
#endif
(desc[NVGPU_GR_GLOBAL_CTX_PRIV_ACCESS_MAP].size == 0U)) {
return false; return false;
} }
if (!nvgpu_is_enabled(g, NVGPU_SUPPORT_MIG)) {
if ((desc[NVGPU_GR_GLOBAL_CTX_CIRCULAR].size == 0U) ||
(desc[NVGPU_GR_GLOBAL_CTX_PAGEPOOL].size == 0U) ||
(desc[NVGPU_GR_GLOBAL_CTX_ATTRIBUTE].size == 0U)) {
return false;
}
#ifdef CONFIG_NVGPU_VPR
if ((desc[NVGPU_GR_GLOBAL_CTX_CIRCULAR_VPR].size == 0U) ||
(desc[NVGPU_GR_GLOBAL_CTX_PAGEPOOL_VPR].size == 0U) ||
(desc[NVGPU_GR_GLOBAL_CTX_ATTRIBUTE_VPR].size == 0U)) {
return false;
}
#endif
}
return true; return true;
} }
@@ -181,6 +189,19 @@ static int nvgpu_gr_global_ctx_buffer_vpr_alloc(struct gk20a *g,
{ {
int err = 0; int err = 0;
/*
* MIG supports only compute class.
* Allocate BUNDLE_CB, PAGEPOOL, ATTRIBUTE_CB and RTV_CB
* if 2D/3D/I2M classes(graphics) are supported.
*/
if (nvgpu_is_enabled(g, NVGPU_SUPPORT_MIG)) {
nvgpu_log(g, gpu_dbg_gr | gpu_dbg_mig,
"2D class is not supported "
"skip BUNDLE_CB, PAGEPOOL, ATTRIBUTE_CB "
"and RTV_CB");
return 0;
}
err = nvgpu_gr_global_ctx_buffer_alloc_vpr(g, desc, err = nvgpu_gr_global_ctx_buffer_alloc_vpr(g, desc,
NVGPU_GR_GLOBAL_CTX_CIRCULAR_VPR); NVGPU_GR_GLOBAL_CTX_CIRCULAR_VPR);
if (err != 0) { if (err != 0) {
@@ -208,22 +229,29 @@ static int nvgpu_gr_global_ctx_buffer_sys_alloc(struct gk20a *g,
{ {
int err = 0; int err = 0;
err = nvgpu_gr_global_ctx_buffer_alloc_sys(g, desc, /*
NVGPU_GR_GLOBAL_CTX_CIRCULAR); * MIG supports only compute class.
if (err != 0) { * Allocate BUNDLE_CB, PAGEPOOL, ATTRIBUTE_CB and RTV_CB
goto fail; * if 2D/3D/I2M classes(graphics) are supported.
} */
if (!nvgpu_is_enabled(g, NVGPU_SUPPORT_MIG)) {
err = nvgpu_gr_global_ctx_buffer_alloc_sys(g, desc,
NVGPU_GR_GLOBAL_CTX_CIRCULAR);
if (err != 0) {
goto fail;
}
err = nvgpu_gr_global_ctx_buffer_alloc_sys(g, desc, err = nvgpu_gr_global_ctx_buffer_alloc_sys(g, desc,
NVGPU_GR_GLOBAL_CTX_PAGEPOOL); NVGPU_GR_GLOBAL_CTX_PAGEPOOL);
if (err != 0) { if (err != 0) {
goto fail; goto fail;
} }
err = nvgpu_gr_global_ctx_buffer_alloc_sys(g, desc, err = nvgpu_gr_global_ctx_buffer_alloc_sys(g, desc,
NVGPU_GR_GLOBAL_CTX_ATTRIBUTE); NVGPU_GR_GLOBAL_CTX_ATTRIBUTE);
if (err != 0) { if (err != 0) {
goto fail; goto fail;
}
} }
err = nvgpu_gr_global_ctx_buffer_alloc_sys(g, desc, err = nvgpu_gr_global_ctx_buffer_alloc_sys(g, desc,
@@ -261,11 +289,13 @@ int nvgpu_gr_global_ctx_buffer_alloc(struct gk20a *g,
#endif #endif
#ifdef CONFIG_NVGPU_DGPU #ifdef CONFIG_NVGPU_DGPU
if (desc[NVGPU_GR_GLOBAL_CTX_RTV_CIRCULAR_BUFFER].size != 0U) { if (!nvgpu_is_enabled(g, NVGPU_SUPPORT_MIG)) {
err = nvgpu_gr_global_ctx_buffer_alloc_sys(g, desc, if (desc[NVGPU_GR_GLOBAL_CTX_RTV_CIRCULAR_BUFFER].size != 0U) {
NVGPU_GR_GLOBAL_CTX_RTV_CIRCULAR_BUFFER); err = nvgpu_gr_global_ctx_buffer_alloc_sys(g, desc,
if (err != 0) { NVGPU_GR_GLOBAL_CTX_RTV_CIRCULAR_BUFFER);
goto clean_up; if (err != 0) {
goto clean_up;
}
} }
} }
#endif #endif

View File

@@ -60,36 +60,58 @@ static int gr_alloc_global_ctx_buffers(struct gk20a *g, struct nvgpu_gr *gr)
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gr, " "); nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gr, " ");
size = g->ops.gr.init.get_global_ctx_cb_buffer_size(g); /*
nvgpu_log(g, gpu_dbg_info | gpu_dbg_gr, "cb_buffer_size : %d", size); * MIG supports only compute class.
* Allocate BUNDLE_CB, PAGEPOOL, ATTRIBUTE_CB and RTV_CB
* if 2D/3D/I2M classes(graphics) are supported.
*/
if (!nvgpu_is_enabled(g, NVGPU_SUPPORT_MIG)) {
size = g->ops.gr.init.get_global_ctx_cb_buffer_size(g);
nvgpu_log(g, gpu_dbg_info | gpu_dbg_gr,
"cb_buffer_size : %d", size);
nvgpu_gr_global_ctx_set_size(gr->global_ctx_buffer, nvgpu_gr_global_ctx_set_size(gr->global_ctx_buffer,
NVGPU_GR_GLOBAL_CTX_CIRCULAR, size); NVGPU_GR_GLOBAL_CTX_CIRCULAR, size);
#ifdef CONFIG_NVGPU_VPR #ifdef CONFIG_NVGPU_VPR
nvgpu_gr_global_ctx_set_size(gr->global_ctx_buffer, nvgpu_gr_global_ctx_set_size(gr->global_ctx_buffer,
NVGPU_GR_GLOBAL_CTX_CIRCULAR_VPR, size); NVGPU_GR_GLOBAL_CTX_CIRCULAR_VPR, size);
#endif #endif
size = g->ops.gr.init.get_global_ctx_pagepool_buffer_size(g); size = g->ops.gr.init.get_global_ctx_pagepool_buffer_size(g);
nvgpu_log(g, gpu_dbg_info | gpu_dbg_gr, "pagepool_buffer_size : %d", size); nvgpu_log(g, gpu_dbg_info | gpu_dbg_gr,
"pagepool_buffer_size : %d", size);
nvgpu_gr_global_ctx_set_size(gr->global_ctx_buffer, nvgpu_gr_global_ctx_set_size(gr->global_ctx_buffer,
NVGPU_GR_GLOBAL_CTX_PAGEPOOL, size); NVGPU_GR_GLOBAL_CTX_PAGEPOOL, size);
#ifdef CONFIG_NVGPU_VPR #ifdef CONFIG_NVGPU_VPR
nvgpu_gr_global_ctx_set_size(gr->global_ctx_buffer, nvgpu_gr_global_ctx_set_size(gr->global_ctx_buffer,
NVGPU_GR_GLOBAL_CTX_PAGEPOOL_VPR, size); NVGPU_GR_GLOBAL_CTX_PAGEPOOL_VPR, size);
#endif #endif
size = g->ops.gr.init.get_global_attr_cb_size(g, size = g->ops.gr.init.get_global_attr_cb_size(g,
nvgpu_gr_config_get_tpc_count(gr->config), nvgpu_gr_config_get_tpc_count(gr->config),
nvgpu_gr_config_get_max_tpc_count(gr->config)); nvgpu_gr_config_get_max_tpc_count(gr->config));
nvgpu_log(g, gpu_dbg_info | gpu_dbg_gr, "attr_buffer_size : %u", size); nvgpu_log(g, gpu_dbg_info | gpu_dbg_gr,
"attr_buffer_size : %u", size);
nvgpu_gr_global_ctx_set_size(gr->global_ctx_buffer, nvgpu_gr_global_ctx_set_size(gr->global_ctx_buffer,
NVGPU_GR_GLOBAL_CTX_ATTRIBUTE, size); NVGPU_GR_GLOBAL_CTX_ATTRIBUTE, size);
#ifdef CONFIG_NVGPU_VPR #ifdef CONFIG_NVGPU_VPR
nvgpu_gr_global_ctx_set_size(gr->global_ctx_buffer, nvgpu_gr_global_ctx_set_size(gr->global_ctx_buffer,
NVGPU_GR_GLOBAL_CTX_ATTRIBUTE_VPR, size); NVGPU_GR_GLOBAL_CTX_ATTRIBUTE_VPR, size);
#endif #endif
#ifdef CONFIG_NVGPU_DGPU
if (g->ops.gr.init.get_rtv_cb_size != NULL) {
size = g->ops.gr.init.get_rtv_cb_size(g);
nvgpu_log(g, gpu_dbg_info | gpu_dbg_gr,
"rtv_circular_buffer_size : %u", size);
nvgpu_gr_global_ctx_set_size(gr->global_ctx_buffer,
NVGPU_GR_GLOBAL_CTX_RTV_CIRCULAR_BUFFER, size);
}
#endif
}
size = NVGPU_GR_GLOBAL_CTX_PRIV_ACCESS_MAP_SIZE; size = NVGPU_GR_GLOBAL_CTX_PRIV_ACCESS_MAP_SIZE;
nvgpu_log(g, gpu_dbg_info | gpu_dbg_gr, "priv_access_map_size : %d", size); nvgpu_log(g, gpu_dbg_info | gpu_dbg_gr, "priv_access_map_size : %d", size);
@@ -104,16 +126,6 @@ static int gr_alloc_global_ctx_buffers(struct gk20a *g, struct nvgpu_gr *gr)
NVGPU_GR_GLOBAL_CTX_FECS_TRACE_BUFFER, size); NVGPU_GR_GLOBAL_CTX_FECS_TRACE_BUFFER, size);
#endif #endif
#ifdef CONFIG_NVGPU_DGPU
if (g->ops.gr.init.get_rtv_cb_size != NULL) {
size = g->ops.gr.init.get_rtv_cb_size(g);
nvgpu_log(g, gpu_dbg_info | gpu_dbg_gr, "rtv_circular_buffer_size : %u", size);
nvgpu_gr_global_ctx_set_size(gr->global_ctx_buffer,
NVGPU_GR_GLOBAL_CTX_RTV_CIRCULAR_BUFFER, size);
}
#endif
err = nvgpu_gr_global_ctx_buffer_alloc(g, gr->global_ctx_buffer); err = nvgpu_gr_global_ctx_buffer_alloc(g, gr->global_ctx_buffer);
if (err != 0) { if (err != 0) {
return err; return err;
@@ -405,8 +417,11 @@ static int gr_init_ctx_bufs(struct gk20a *g, struct nvgpu_gr *gr)
} }
#ifdef CONFIG_NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
nvgpu_gr_ctx_set_size(gr->gr_ctx_desc, NVGPU_GR_CTX_PREEMPT_CTXSW, if (!nvgpu_is_enabled(g, NVGPU_SUPPORT_MIG)) {
nvgpu_gr_ctx_set_size(gr->gr_ctx_desc,
NVGPU_GR_CTX_PREEMPT_CTXSW,
nvgpu_gr_falcon_get_preempt_image_size(gr->falcon)); nvgpu_gr_falcon_get_preempt_image_size(gr->falcon));
}
#endif #endif
gr->global_ctx_buffer = nvgpu_gr_global_ctx_desc_alloc(g); gr->global_ctx_buffer = nvgpu_gr_global_ctx_desc_alloc(g);

View File

@@ -355,10 +355,12 @@ int nvgpu_gr_setup_set_preemption_mode(struct nvgpu_channel *ch,
nvgpu_gr_obj_ctx_update_ctxsw_preemption_mode(g, gr->config, gr_ctx, nvgpu_gr_obj_ctx_update_ctxsw_preemption_mode(g, gr->config, gr_ctx,
ch->subctx); ch->subctx);
nvgpu_gr_ctx_patch_write_begin(g, gr_ctx, true); if (!nvgpu_is_enabled(g, NVGPU_SUPPORT_MIG)) {
g->ops.gr.init.commit_global_cb_manager(g, gr->config, gr_ctx, nvgpu_gr_ctx_patch_write_begin(g, gr_ctx, true);
true); g->ops.gr.init.commit_global_cb_manager(g, gr->config, gr_ctx,
nvgpu_gr_ctx_patch_write_end(g, gr_ctx, true); true);
nvgpu_gr_ctx_patch_write_end(g, gr_ctx, true);
}
g->ops.tsg.enable(tsg); g->ops.tsg.enable(tsg);

View File

@@ -336,40 +336,52 @@ void nvgpu_gr_obj_ctx_commit_global_ctx_buffers(struct gk20a *g,
nvgpu_gr_ctx_patch_write_begin(g, gr_ctx, false); nvgpu_gr_ctx_patch_write_begin(g, gr_ctx, false);
} }
/* global pagepool buffer */ /*
addr = nvgpu_gr_ctx_get_global_ctx_va(gr_ctx, NVGPU_GR_CTX_PAGEPOOL_VA); * MIG supports only compute class.
size = nvgpu_safe_cast_u64_to_u32(nvgpu_gr_global_ctx_get_size( * Skip BUNDLE_CB, PAGEPOOL, ATTRIBUTE_CB and RTV_CB
global_ctx_buffer, NVGPU_GR_GLOBAL_CTX_PAGEPOOL)); * if 2D/3D/I2M classes(graphics) are not supported.
*/
if (!nvgpu_is_enabled(g, NVGPU_SUPPORT_MIG)) {
/* global pagepool buffer */
addr = nvgpu_gr_ctx_get_global_ctx_va(gr_ctx,
NVGPU_GR_CTX_PAGEPOOL_VA);
size = nvgpu_safe_cast_u64_to_u32(nvgpu_gr_global_ctx_get_size(
global_ctx_buffer,
NVGPU_GR_GLOBAL_CTX_PAGEPOOL));
g->ops.gr.init.commit_global_pagepool(g, gr_ctx, addr, size, patch, g->ops.gr.init.commit_global_pagepool(g, gr_ctx, addr, size,
true); patch, true);
/* global bundle cb */ /* global bundle cb */
addr = nvgpu_gr_ctx_get_global_ctx_va(gr_ctx, NVGPU_GR_CTX_CIRCULAR_VA); addr = nvgpu_gr_ctx_get_global_ctx_va(gr_ctx,
size = nvgpu_safe_cast_u64_to_u32( NVGPU_GR_CTX_CIRCULAR_VA);
g->ops.gr.init.get_bundle_cb_default_size(g)); size = nvgpu_safe_cast_u64_to_u32(
g->ops.gr.init.get_bundle_cb_default_size(g));
g->ops.gr.init.commit_global_bundle_cb(g, gr_ctx, addr, size, patch); g->ops.gr.init.commit_global_bundle_cb(g, gr_ctx, addr, size,
patch);
/* global attrib cb */ /* global attrib cb */
addr = nvgpu_gr_ctx_get_global_ctx_va(gr_ctx, addr = nvgpu_gr_ctx_get_global_ctx_va(gr_ctx,
NVGPU_GR_CTX_ATTRIBUTE_VA); NVGPU_GR_CTX_ATTRIBUTE_VA);
g->ops.gr.init.commit_global_attrib_cb(g, gr_ctx, g->ops.gr.init.commit_global_attrib_cb(g, gr_ctx,
nvgpu_gr_config_get_tpc_count(config), nvgpu_gr_config_get_tpc_count(config),
nvgpu_gr_config_get_max_tpc_count(config), addr, patch); nvgpu_gr_config_get_max_tpc_count(config), addr, patch);
g->ops.gr.init.commit_global_cb_manager(g, config, gr_ctx, patch); g->ops.gr.init.commit_global_cb_manager(g, config, gr_ctx,
patch);
#ifdef CONFIG_NVGPU_DGPU #ifdef CONFIG_NVGPU_DGPU
if (g->ops.gr.init.commit_rtv_cb != NULL) { if (g->ops.gr.init.commit_rtv_cb != NULL) {
/* RTV circular buffer */ /* RTV circular buffer */
addr = nvgpu_gr_ctx_get_global_ctx_va(gr_ctx, addr = nvgpu_gr_ctx_get_global_ctx_va(gr_ctx,
NVGPU_GR_CTX_RTV_CIRCULAR_BUFFER_VA); NVGPU_GR_CTX_RTV_CIRCULAR_BUFFER_VA);
g->ops.gr.init.commit_rtv_cb(g, addr, gr_ctx, patch); g->ops.gr.init.commit_rtv_cb(g, addr, gr_ctx, patch);
} }
#endif #endif
}
#ifdef CONFIG_NVGPU_SM_DIVERSITY #ifdef CONFIG_NVGPU_SM_DIVERSITY
if ((nvgpu_is_enabled(g, NVGPU_SUPPORT_SM_DIVERSITY)) && if ((nvgpu_is_enabled(g, NVGPU_SUPPORT_SM_DIVERSITY)) &&

View File

@@ -856,6 +856,9 @@ int nvgpu_init_gpu_characteristics(struct gk20a *g)
nvgpu_set_enabled(g, NVGPU_SUPPORT_3D, true); nvgpu_set_enabled(g, NVGPU_SUPPORT_3D, true);
nvgpu_set_enabled(g, NVGPU_SUPPORT_I2M, true); nvgpu_set_enabled(g, NVGPU_SUPPORT_I2M, true);
nvgpu_set_enabled(g, NVGPU_SUPPORT_ZBC, true); nvgpu_set_enabled(g, NVGPU_SUPPORT_ZBC, true);
} else {
nvgpu_set_enabled(g, NVGPU_SUPPORT_ZBC_STENCIL, false);
nvgpu_set_enabled(g, NVGPU_SUPPORT_PREEMPTION_GFXP, false);
} }
return 0; return 0;

View File

@@ -257,37 +257,44 @@ int vgpu_gr_map_global_ctx_buffers(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx,
g_bfr_va = gr_ctx->global_ctx_buffer_va; g_bfr_va = gr_ctx->global_ctx_buffer_va;
/* Circular Buffer */ /*
gpu_va = nvgpu_vm_alloc_va(ch_vm, * MIG supports only compute class.
nvgpu_gr_global_ctx_get_size(global_ctx_buffer, * Allocate BUNDLE_CB, PAGEPOOL, ATTRIBUTE_CB and RTV_CB
NVGPU_GR_GLOBAL_CTX_CIRCULAR), * if 2D/3D/I2M classes(graphics) are supported.
GMMU_PAGE_SIZE_KERNEL); */
if (!nvgpu_is_enabled(g, NVGPU_SUPPORT_MIG)) {
/* Circular Buffer */
gpu_va = nvgpu_vm_alloc_va(ch_vm,
nvgpu_gr_global_ctx_get_size(global_ctx_buffer,
NVGPU_GR_GLOBAL_CTX_CIRCULAR),
GMMU_PAGE_SIZE_KERNEL);
if (!gpu_va) { if (!gpu_va) {
goto clean_up; goto clean_up;
}
g_bfr_va[NVGPU_GR_CTX_CIRCULAR_VA] = gpu_va;
/* Attribute Buffer */
gpu_va = nvgpu_vm_alloc_va(ch_vm,
nvgpu_gr_global_ctx_get_size(global_ctx_buffer,
NVGPU_GR_GLOBAL_CTX_ATTRIBUTE),
GMMU_PAGE_SIZE_KERNEL);
if (!gpu_va) {
goto clean_up;
}
g_bfr_va[NVGPU_GR_CTX_ATTRIBUTE_VA] = gpu_va;
/* Page Pool */
gpu_va = nvgpu_vm_alloc_va(ch_vm,
nvgpu_gr_global_ctx_get_size(global_ctx_buffer,
NVGPU_GR_GLOBAL_CTX_PAGEPOOL),
GMMU_PAGE_SIZE_KERNEL);
if (!gpu_va) {
goto clean_up;
}
g_bfr_va[NVGPU_GR_CTX_PAGEPOOL_VA] = gpu_va;
} }
g_bfr_va[NVGPU_GR_CTX_CIRCULAR_VA] = gpu_va;
/* Attribute Buffer */
gpu_va = nvgpu_vm_alloc_va(ch_vm,
nvgpu_gr_global_ctx_get_size(global_ctx_buffer,
NVGPU_GR_GLOBAL_CTX_ATTRIBUTE),
GMMU_PAGE_SIZE_KERNEL);
if (!gpu_va) {
goto clean_up;
}
g_bfr_va[NVGPU_GR_CTX_ATTRIBUTE_VA] = gpu_va;
/* Page Pool */
gpu_va = nvgpu_vm_alloc_va(ch_vm,
nvgpu_gr_global_ctx_get_size(global_ctx_buffer,
NVGPU_GR_GLOBAL_CTX_PAGEPOOL),
GMMU_PAGE_SIZE_KERNEL);
if (!gpu_va) {
goto clean_up;
}
g_bfr_va[NVGPU_GR_CTX_PAGEPOOL_VA] = gpu_va;
/* Priv register Access Map */ /* Priv register Access Map */
gpu_va = nvgpu_vm_alloc_va(ch_vm, gpu_va = nvgpu_vm_alloc_va(ch_vm,

View File

@@ -770,8 +770,11 @@ static int vgpu_gr_init_gr_setup_sw(struct gk20a *g)
} }
#ifdef CONFIG_NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
nvgpu_gr_ctx_set_size(gr->gr_ctx_desc, NVGPU_GR_CTX_PREEMPT_CTXSW, if (!nvgpu_is_enabled(g, NVGPU_SUPPORT_MIG)) {
nvgpu_gr_ctx_set_size(gr->gr_ctx_desc,
NVGPU_GR_CTX_PREEMPT_CTXSW,
nvgpu_gr_falcon_get_preempt_image_size(g->gr->falcon)); nvgpu_gr_falcon_get_preempt_image_size(g->gr->falcon));
}
#endif #endif
nvgpu_spinlock_init(&g->gr->intr->ch_tlb_lock); nvgpu_spinlock_init(&g->gr->intr->ch_tlb_lock);

View File

@@ -123,8 +123,6 @@ int vgpu_init_gpu_characteristics(struct gk20a *g)
return err; return err;
} }
nvgpu_set_enabled(g, NVGPU_SUPPORT_PREEMPTION_GFXP, true);
/* features vgpu does not support */ /* features vgpu does not support */
nvgpu_set_enabled(g, NVGPU_SUPPORT_MAP_BUFFER_BATCH, false); nvgpu_set_enabled(g, NVGPU_SUPPORT_MAP_BUFFER_BATCH, false);
nvgpu_set_enabled(g, NVGPU_SUPPORT_RESCHEDULE_RUNLIST, false); nvgpu_set_enabled(g, NVGPU_SUPPORT_RESCHEDULE_RUNLIST, false);