gpu: nvgpu: mm: limit recursion depth for pd levels

The two MM functions nvgpu_set_pd_level() and nvgpu_vm_do_free_entries()
are simple recursive functions for processing page descriptors (PDs) by
traversing the levels in the PDs. MISRA Rule 17.2 prohibits functions
calling themselves because "unless recursion is tightly controlled, it
is not possible to determine before execution what the worst-case stack
usage could be." So, this change limits the recursion depth of each of
these functions to the maximum number of page table levels by checking
the level against the MM HAL max_page_table_levels().

This also required configuring this HAL in various unit tests as they
were no previously using this HAL.

JIRA NVGPU-3489

Change-Id: Iadee3fa5ba9f45cd643ac6c202e9296d75d51880
Signed-off-by: Philip Elcan <pelcan@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2224450
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Philip Elcan
2019-10-23 15:27:10 -04:00
committed by Alex Waterman
parent cf8707e2b3
commit 49a620e48a
10 changed files with 15 additions and 0 deletions

View File

@@ -456,6 +456,9 @@ static int nvgpu_set_pd_level(struct vm_gk20a *vm,
" ", /* L=4 */
};
/* This limits recursion */
nvgpu_assert(lvl < g->ops.mm.gmmu.get_max_page_table_levels(g));
pde_range = 1ULL << (u64)l->lo_bit[attrs->pgsz];
nvgpu_gmmu_dbg_v(g, attrs,

View File

@@ -185,8 +185,12 @@ static void nvgpu_vm_do_free_entries(struct vm_gk20a *vm,
struct nvgpu_gmmu_pd *pd,
u32 level)
{
struct gk20a *g = gk20a_from_vm(vm);
u32 i;
/* This limits recursion */
nvgpu_assert(level < g->ops.mm.gmmu.get_max_page_table_levels(g));
if (pd->mem != NULL) {
nvgpu_pd_free(vm, pd);
pd->mem = NULL;

View File

@@ -20,6 +20,7 @@ gm20b_fb_tlb_invalidate
gm20b_mm_get_big_page_sizes
gm20b_gr_falcon_get_fecs_ctx_state_store_major_rev_id
gm20b_ramin_set_big_page_size
gp10b_get_max_page_table_levels
gp10b_mm_get_default_big_page_size
gp10b_mm_get_iommu_bit
gp10b_mm_get_mmu_levels

View File

@@ -73,6 +73,7 @@ static struct vm_gk20a *init_vm_env(struct unit_module *m, struct gk20a *g,
g->ops.mm.gmmu.get_default_big_page_size =
gp10b_mm_get_default_big_page_size;
g->ops.mm.gmmu.get_mmu_levels = gp10b_mm_get_mmu_levels;
g->ops.mm.gmmu.get_max_page_table_levels = gp10b_get_max_page_table_levels;
#ifdef CONFIG_NVGPU_DGPU
/* Minimum HAL init for PRAMIN */

View File

@@ -135,6 +135,7 @@ int test_init_mm(struct unit_module *m, struct gk20a *g, void *args)
g->ops.mm.gmmu.get_default_big_page_size =
gp10b_mm_get_default_big_page_size;
g->ops.mm.gmmu.get_mmu_levels = gp10b_mm_get_mmu_levels;
g->ops.mm.gmmu.get_max_page_table_levels = gp10b_get_max_page_table_levels;
g->ops.mm.init_inst_block = gv11b_mm_init_inst_block;
g->ops.mm.gmmu.map = nvgpu_gmmu_map_locked;
g->ops.mm.gmmu.unmap = nvgpu_gmmu_unmap_locked;

View File

@@ -154,6 +154,7 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
g->ops.mm.gmmu.get_default_big_page_size =
gp10b_mm_get_default_big_page_size;
g->ops.mm.gmmu.get_mmu_levels = gp10b_mm_get_mmu_levels;
g->ops.mm.gmmu.get_max_page_table_levels = gp10b_get_max_page_table_levels;
g->ops.mm.init_inst_block = gv11b_mm_init_inst_block;
g->ops.mm.gmmu.map = nvgpu_gmmu_map_locked;
g->ops.mm.gmmu.unmap = nvgpu_gmmu_unmap_locked;

View File

@@ -301,6 +301,7 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
g->ops.mm.gmmu.get_default_big_page_size =
gp10b_mm_get_default_big_page_size;
g->ops.mm.gmmu.get_mmu_levels = gp10b_mm_get_mmu_levels;
g->ops.mm.gmmu.get_max_page_table_levels = gp10b_get_max_page_table_levels;
g->ops.mm.init_inst_block = gv11b_mm_init_inst_block;
g->ops.mm.gmmu.map = nvgpu_gmmu_map_locked;
g->ops.mm.gmmu.unmap = nvgpu_gmmu_unmap_locked;

View File

@@ -466,6 +466,7 @@ int test_mm_init_hal(struct unit_module *m, struct gk20a *g, void *args)
g->ops.mm.gmmu.get_default_big_page_size =
gp10b_mm_get_default_big_page_size;
g->ops.mm.gmmu.get_mmu_levels = gp10b_mm_get_mmu_levels;
g->ops.mm.gmmu.get_max_page_table_levels = gp10b_get_max_page_table_levels;
g->ops.mm.init_inst_block = gv11b_mm_init_inst_block;
g->ops.mm.gmmu.map = nvgpu_gmmu_map_locked;
g->ops.mm.gmmu.unmap = nvgpu_gmmu_unmap_locked;

View File

@@ -122,6 +122,7 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
g->ops.mm.gmmu.get_default_big_page_size =
gp10b_mm_get_default_big_page_size;
g->ops.mm.gmmu.get_mmu_levels = gp10b_mm_get_mmu_levels;
g->ops.mm.gmmu.get_max_page_table_levels = gp10b_get_max_page_table_levels;
g->ops.mm.init_inst_block = gv11b_mm_init_inst_block;
g->ops.mm.gmmu.map = nvgpu_gmmu_map_locked;
g->ops.mm.gmmu.unmap = nvgpu_gmmu_unmap_locked;

View File

@@ -175,6 +175,7 @@ static int init_test_env(struct unit_module *m, struct gk20a *g)
g->ops.mm.gmmu.get_default_big_page_size =
gp10b_mm_get_default_big_page_size;
g->ops.mm.gmmu.get_mmu_levels = gp10b_mm_get_mmu_levels;
g->ops.mm.gmmu.get_max_page_table_levels = gp10b_get_max_page_table_levels;
g->ops.mm.gmmu.map = nvgpu_gmmu_map_locked;
g->ops.mm.gmmu.unmap = nvgpu_gmmu_unmap_locked;
g->ops.mm.gmmu.gpu_phys_addr = gv11b_gpu_phys_addr;