gpu: nvgpu: gv1xx: resize patch buffer

Follow the sizing consideration in bug 1753763 to support dynamic TPC modes
and subcontexts.

bug 200350539

Change-Id: Ibbdbf02f9c2ea3f082c1b2810ae7176b0775d461
Signed-off-by: David Nieto <dmartineznie@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1584034
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
David Nieto
2017-10-23 14:01:28 -07:00
committed by mobile promotions
parent 938785f152
commit 2029426446
4 changed files with 56 additions and 1 deletions

View File

@@ -294,3 +294,56 @@ void gr_gv100_load_tpc_mask(struct gk20a *g)
gk20a_writel(g, gr_fe_tpc_fs_r(0), u64_lo32(pes_tpc_mask)); gk20a_writel(g, gr_fe_tpc_fs_r(0), u64_lo32(pes_tpc_mask));
gk20a_writel(g, gr_fe_tpc_fs_r(1), u64_hi32(pes_tpc_mask)); gk20a_writel(g, gr_fe_tpc_fs_r(1), u64_hi32(pes_tpc_mask));
} }
u32 gr_gv100_get_patch_slots(struct gk20a *g)
{
struct gr_gk20a *gr = &g->gr;
struct fifo_gk20a *f = &g->fifo;
u32 size = 0;
/*
* CMD to update PE table
*/
size++;
/*
* Update PE table contents
* for PE table, each patch buffer update writes 32 TPCs
*/
size += DIV_ROUND_UP(gr->tpc_count, 32);
/*
* Update the PL table contents
* For PL table, each patch buffer update configures 4 TPCs
*/
size += DIV_ROUND_UP(gr->tpc_count, 4);
/*
* We need this for all subcontexts
*/
size *= f->t19x.max_subctx_count;
/*
* Add space for a partition mode change as well
* reserve two slots since DYNAMIC -> STATIC requires
* DYNAMIC -> NONE -> STATIC
*/
size += 2;
/*
* Add current patch buffer size
*/
size += gr_gk20a_get_patch_slots(g);
/*
* Align to 4K size
*/
size = ALIGN(size, PATCH_CTX_SLOTS_PER_PAGE);
/*
* Increase the size to accommodate for additional TPC partition update
*/
size += 2 * PATCH_CTX_SLOTS_PER_PAGE;
return size;
}

View File

@@ -32,5 +32,5 @@ void gr_gv100_init_sm_id_table(struct gk20a *g);
void gr_gv100_program_sm_id_numbering(struct gk20a *g, void gr_gv100_program_sm_id_numbering(struct gk20a *g,
u32 gpc, u32 tpc, u32 smid); u32 gpc, u32 tpc, u32 smid);
int gr_gv100_load_smid_config(struct gk20a *g); int gr_gv100_load_smid_config(struct gk20a *g);
u32 gr_gv100_get_patch_slots(struct gk20a *g);
#endif #endif

View File

@@ -261,6 +261,7 @@ static const struct gpu_ops gv100_ops = {
.get_num_pce = gv11b_ce_get_num_pce, .get_num_pce = gv11b_ce_get_num_pce,
}, },
.gr = { .gr = {
.get_patch_slots = gr_gv100_get_patch_slots,
.init_gpc_mmu = gr_gv11b_init_gpc_mmu, .init_gpc_mmu = gr_gv11b_init_gpc_mmu,
.bundle_cb_defaults = gr_gv100_bundle_cb_defaults, .bundle_cb_defaults = gr_gv100_bundle_cb_defaults,
.cb_size_default = gr_gv100_cb_size_default, .cb_size_default = gr_gv100_cb_size_default,

View File

@@ -227,6 +227,7 @@ static const struct gpu_ops gv11b_ops = {
.get_num_pce = gv11b_ce_get_num_pce, .get_num_pce = gv11b_ce_get_num_pce,
}, },
.gr = { .gr = {
.get_patch_slots = gr_gv100_get_patch_slots,
.init_gpc_mmu = gr_gv11b_init_gpc_mmu, .init_gpc_mmu = gr_gv11b_init_gpc_mmu,
.bundle_cb_defaults = gr_gv11b_bundle_cb_defaults, .bundle_cb_defaults = gr_gv11b_bundle_cb_defaults,
.cb_size_default = gr_gv11b_cb_size_default, .cb_size_default = gr_gv11b_cb_size_default,