diff --git a/drivers/gpu/nvgpu/common/mm/gmmu/page_table.c b/drivers/gpu/nvgpu/common/mm/gmmu/page_table.c index 9431682ed..af42fc81b 100644 --- a/drivers/gpu/nvgpu/common/mm/gmmu/page_table.c +++ b/drivers/gpu/nvgpu/common/mm/gmmu/page_table.c @@ -456,6 +456,9 @@ static int nvgpu_set_pd_level(struct vm_gk20a *vm, " ", /* L=4 */ }; + /* This limits recursion */ + nvgpu_assert(lvl < g->ops.mm.gmmu.get_max_page_table_levels(g)); + pde_range = 1ULL << (u64)l->lo_bit[attrs->pgsz]; nvgpu_gmmu_dbg_v(g, attrs, diff --git a/drivers/gpu/nvgpu/common/mm/vm.c b/drivers/gpu/nvgpu/common/mm/vm.c index e34ab9b50..38d8159a8 100644 --- a/drivers/gpu/nvgpu/common/mm/vm.c +++ b/drivers/gpu/nvgpu/common/mm/vm.c @@ -185,8 +185,12 @@ static void nvgpu_vm_do_free_entries(struct vm_gk20a *vm, struct nvgpu_gmmu_pd *pd, u32 level) { + struct gk20a *g = gk20a_from_vm(vm); u32 i; + /* This limits recursion */ + nvgpu_assert(level < g->ops.mm.gmmu.get_max_page_table_levels(g)); + if (pd->mem != NULL) { nvgpu_pd_free(vm, pd); pd->mem = NULL; diff --git a/drivers/gpu/nvgpu/libnvgpu-drv_safe.export b/drivers/gpu/nvgpu/libnvgpu-drv_safe.export index e12cefc5e..799ad5780 100644 --- a/drivers/gpu/nvgpu/libnvgpu-drv_safe.export +++ b/drivers/gpu/nvgpu/libnvgpu-drv_safe.export @@ -20,6 +20,7 @@ gm20b_fb_tlb_invalidate gm20b_mm_get_big_page_sizes gm20b_gr_falcon_get_fecs_ctx_state_store_major_rev_id gm20b_ramin_set_big_page_size +gp10b_get_max_page_table_levels gp10b_mm_get_default_big_page_size gp10b_mm_get_iommu_bit gp10b_mm_get_mmu_levels diff --git a/userspace/units/mm/allocators/buddy_allocator/buddy_allocator.c b/userspace/units/mm/allocators/buddy_allocator/buddy_allocator.c index 03a0ab10a..cce2162d7 100644 --- a/userspace/units/mm/allocators/buddy_allocator/buddy_allocator.c +++ b/userspace/units/mm/allocators/buddy_allocator/buddy_allocator.c @@ -73,6 +73,7 @@ static struct vm_gk20a *init_vm_env(struct unit_module *m, struct gk20a *g, g->ops.mm.gmmu.get_default_big_page_size = gp10b_mm_get_default_big_page_size; g->ops.mm.gmmu.get_mmu_levels = gp10b_mm_get_mmu_levels; + g->ops.mm.gmmu.get_max_page_table_levels = gp10b_get_max_page_table_levels; #ifdef CONFIG_NVGPU_DGPU /* Minimum HAL init for PRAMIN */ diff --git a/userspace/units/mm/as/as.c b/userspace/units/mm/as/as.c index 037f838de..7285b5e46 100644 --- a/userspace/units/mm/as/as.c +++ b/userspace/units/mm/as/as.c @@ -135,6 +135,7 @@ int test_init_mm(struct unit_module *m, struct gk20a *g, void *args) g->ops.mm.gmmu.get_default_big_page_size = gp10b_mm_get_default_big_page_size; g->ops.mm.gmmu.get_mmu_levels = gp10b_mm_get_mmu_levels; + g->ops.mm.gmmu.get_max_page_table_levels = gp10b_get_max_page_table_levels; g->ops.mm.init_inst_block = gv11b_mm_init_inst_block; g->ops.mm.gmmu.map = nvgpu_gmmu_map_locked; g->ops.mm.gmmu.unmap = nvgpu_gmmu_unmap_locked; diff --git a/userspace/units/mm/dma/dma.c b/userspace/units/mm/dma/dma.c index 69aa264f4..676c80893 100644 --- a/userspace/units/mm/dma/dma.c +++ b/userspace/units/mm/dma/dma.c @@ -154,6 +154,7 @@ static int init_mm(struct unit_module *m, struct gk20a *g) g->ops.mm.gmmu.get_default_big_page_size = gp10b_mm_get_default_big_page_size; g->ops.mm.gmmu.get_mmu_levels = gp10b_mm_get_mmu_levels; + g->ops.mm.gmmu.get_max_page_table_levels = gp10b_get_max_page_table_levels; g->ops.mm.init_inst_block = gv11b_mm_init_inst_block; g->ops.mm.gmmu.map = nvgpu_gmmu_map_locked; g->ops.mm.gmmu.unmap = nvgpu_gmmu_unmap_locked; diff --git a/userspace/units/mm/gmmu/page_table/page_table.c b/userspace/units/mm/gmmu/page_table/page_table.c index a140f58a9..cdc81adc3 100644 --- a/userspace/units/mm/gmmu/page_table/page_table.c +++ b/userspace/units/mm/gmmu/page_table/page_table.c @@ -301,6 +301,7 @@ static int init_mm(struct unit_module *m, struct gk20a *g) g->ops.mm.gmmu.get_default_big_page_size = gp10b_mm_get_default_big_page_size; g->ops.mm.gmmu.get_mmu_levels = gp10b_mm_get_mmu_levels; + g->ops.mm.gmmu.get_max_page_table_levels = gp10b_get_max_page_table_levels; g->ops.mm.init_inst_block = gv11b_mm_init_inst_block; g->ops.mm.gmmu.map = nvgpu_gmmu_map_locked; g->ops.mm.gmmu.unmap = nvgpu_gmmu_unmap_locked; diff --git a/userspace/units/mm/mm/mm.c b/userspace/units/mm/mm/mm.c index 367b3dd64..3df78e565 100644 --- a/userspace/units/mm/mm/mm.c +++ b/userspace/units/mm/mm/mm.c @@ -466,6 +466,7 @@ int test_mm_init_hal(struct unit_module *m, struct gk20a *g, void *args) g->ops.mm.gmmu.get_default_big_page_size = gp10b_mm_get_default_big_page_size; g->ops.mm.gmmu.get_mmu_levels = gp10b_mm_get_mmu_levels; + g->ops.mm.gmmu.get_max_page_table_levels = gp10b_get_max_page_table_levels; g->ops.mm.init_inst_block = gv11b_mm_init_inst_block; g->ops.mm.gmmu.map = nvgpu_gmmu_map_locked; g->ops.mm.gmmu.unmap = nvgpu_gmmu_unmap_locked; diff --git a/userspace/units/mm/page_table_faults/page_table_faults.c b/userspace/units/mm/page_table_faults/page_table_faults.c index 5b409cf0d..dc2e0c8bf 100644 --- a/userspace/units/mm/page_table_faults/page_table_faults.c +++ b/userspace/units/mm/page_table_faults/page_table_faults.c @@ -122,6 +122,7 @@ static int init_mm(struct unit_module *m, struct gk20a *g) g->ops.mm.gmmu.get_default_big_page_size = gp10b_mm_get_default_big_page_size; g->ops.mm.gmmu.get_mmu_levels = gp10b_mm_get_mmu_levels; + g->ops.mm.gmmu.get_max_page_table_levels = gp10b_get_max_page_table_levels; g->ops.mm.init_inst_block = gv11b_mm_init_inst_block; g->ops.mm.gmmu.map = nvgpu_gmmu_map_locked; g->ops.mm.gmmu.unmap = nvgpu_gmmu_unmap_locked; diff --git a/userspace/units/mm/vm/vm.c b/userspace/units/mm/vm/vm.c index 5f78db1d5..aa60136aa 100644 --- a/userspace/units/mm/vm/vm.c +++ b/userspace/units/mm/vm/vm.c @@ -175,6 +175,7 @@ static int init_test_env(struct unit_module *m, struct gk20a *g) g->ops.mm.gmmu.get_default_big_page_size = gp10b_mm_get_default_big_page_size; g->ops.mm.gmmu.get_mmu_levels = gp10b_mm_get_mmu_levels; + g->ops.mm.gmmu.get_max_page_table_levels = gp10b_get_max_page_table_levels; g->ops.mm.gmmu.map = nvgpu_gmmu_map_locked; g->ops.mm.gmmu.unmap = nvgpu_gmmu_unmap_locked; g->ops.mm.gmmu.gpu_phys_addr = gv11b_gpu_phys_addr;