diff --git a/drivers/gpu/nvgpu/common/mm/mm.c b/drivers/gpu/nvgpu/common/mm/mm.c index 445e2ff23..3ae09e20f 100644 --- a/drivers/gpu/nvgpu/common/mm/mm.c +++ b/drivers/gpu/nvgpu/common/mm/mm.c @@ -246,14 +246,16 @@ static int nvgpu_init_hwpm(struct mm_gk20a *mm) static int nvgpu_init_cde_vm(struct mm_gk20a *mm) { struct gk20a *g = gk20a_from_mm(mm); + u64 user_size, kernel_size; u32 big_page_size = g->ops.mm.gmmu.get_default_big_page_size(); + g->ops.mm.get_default_va_sizes(NULL, &user_size, &kernel_size); + mm->cde.vm = nvgpu_vm_init(g, big_page_size, U64(big_page_size) << U64(10), - nvgpu_safe_sub_u64(NV_MM_DEFAULT_USER_SIZE, + nvgpu_safe_sub_u64(user_size, U64(big_page_size) << U64(10)), - NV_MM_DEFAULT_KERNEL_SIZE, - false, false, false, "cde"); + kernel_size, false, false, false, "cde"); if (mm->cde.vm == NULL) { return -ENOMEM; } @@ -263,14 +265,16 @@ static int nvgpu_init_cde_vm(struct mm_gk20a *mm) static int nvgpu_init_ce_vm(struct mm_gk20a *mm) { struct gk20a *g = gk20a_from_mm(mm); + u64 user_size, kernel_size; u32 big_page_size = g->ops.mm.gmmu.get_default_big_page_size(); + g->ops.mm.get_default_va_sizes(NULL, &user_size, &kernel_size); + mm->ce.vm = nvgpu_vm_init(g, big_page_size, U64(big_page_size) << U64(10), - nvgpu_safe_sub_u64(NV_MM_DEFAULT_USER_SIZE, + nvgpu_safe_sub_u64(user_size, U64(big_page_size) << U64(10)), - NV_MM_DEFAULT_KERNEL_SIZE, - false, false, false, "ce"); + kernel_size, false, false, false, "ce"); if (mm->ce.vm == NULL) { return -ENOMEM; } @@ -507,8 +511,8 @@ static int nvgpu_init_mm_setup_sw(struct gk20a *g) nvgpu_mutex_init(&mm->l2_op_lock); /*TBD: make channel vm size configurable */ - mm->channel.user_size = NV_MM_DEFAULT_USER_SIZE; - mm->channel.kernel_size = NV_MM_DEFAULT_KERNEL_SIZE; + g->ops.mm.get_default_va_sizes(NULL, &mm->channel.user_size, + &mm->channel.kernel_size); nvgpu_log_info(g, "channel vm size: user %uMB kernel %uMB", nvgpu_safe_cast_u64_to_u32(mm->channel.user_size >> U64(20)), diff --git a/drivers/gpu/nvgpu/common/mm/vm.c b/drivers/gpu/nvgpu/common/mm/vm.c index 32334e522..ce35023d4 100644 --- a/drivers/gpu/nvgpu/common/mm/vm.c +++ b/drivers/gpu/nvgpu/common/mm/vm.c @@ -710,22 +710,15 @@ static int nvgpu_vm_init_attributes(struct mm_gk20a *mm, const char *name) { struct gk20a *g = gk20a_from_mm(mm); - u64 low_hole_size, user_va_size; u64 aperture_size; - u64 pde_align = (U64(big_page_size) << U64(10)); + u64 default_aperture_size; - if (user_reserved == 0ULL) { - low_hole_size = low_hole; - user_va_size = user_reserved; - } else { - low_hole_size = ALIGN(low_hole, pde_align); - user_va_size = ALIGN(user_reserved, pde_align); - } + g->ops.mm.get_default_va_sizes(&default_aperture_size, NULL, NULL); aperture_size = nvgpu_safe_add_u64(kernel_reserved, - nvgpu_safe_add_u64(user_va_size, low_hole_size)); + nvgpu_safe_add_u64(user_reserved, low_hole)); - if (aperture_size > NV_MM_DEFAULT_APERTURE_SIZE) { + if (aperture_size > default_aperture_size) { nvgpu_do_assert_print(g, "Overlap between user and kernel spaces"); return -ENOMEM; @@ -758,7 +751,7 @@ static int nvgpu_vm_init_attributes(struct mm_gk20a *mm, vm->vma[GMMU_PAGE_SIZE_BIG] = &vm->user_lp; } - vm->va_start = low_hole_size; + vm->va_start = low_hole; vm->va_limit = aperture_size; vm->big_page_size = vm->gmmu_page_sizes[GMMU_PAGE_SIZE_BIG]; @@ -777,7 +770,7 @@ static int nvgpu_vm_init_attributes(struct mm_gk20a *mm, } /* - * Initialize a preallocated vm + * Initialize a preallocated vm. */ int nvgpu_vm_do_init(struct mm_gk20a *mm, struct vm_gk20a *vm, diff --git a/drivers/gpu/nvgpu/common/perf/perfbuf.c b/drivers/gpu/nvgpu/common/perf/perfbuf.c index 4786008b9..616972e46 100644 --- a/drivers/gpu/nvgpu/common/perf/perfbuf.c +++ b/drivers/gpu/nvgpu/common/perf/perfbuf.c @@ -81,11 +81,13 @@ int nvgpu_perfbuf_init_vm(struct gk20a *g) struct mm_gk20a *mm = &g->mm; u32 big_page_size = g->ops.mm.gmmu.get_default_big_page_size(); int err; + u64 user_size, kernel_size; + + g->ops.mm.get_default_va_sizes(NULL, &user_size, &kernel_size); mm->perfbuf.vm = nvgpu_vm_init(g, big_page_size, SZ_4K, - nvgpu_safe_sub_u64(NV_MM_DEFAULT_USER_SIZE, SZ_4K), - NV_MM_DEFAULT_KERNEL_SIZE, - false, false, false, "perfbuf"); + nvgpu_safe_sub_u64(user_size, SZ_4K), + kernel_size, false, false, false, "perfbuf"); if (mm->perfbuf.vm == NULL) { return -ENOMEM; } diff --git a/drivers/gpu/nvgpu/common/vgpu/mm/mm_vgpu.c b/drivers/gpu/nvgpu/common/vgpu/mm/mm_vgpu.c index 35b432c70..0f58e4394 100644 --- a/drivers/gpu/nvgpu/common/vgpu/mm/mm_vgpu.c +++ b/drivers/gpu/nvgpu/common/vgpu/mm/mm_vgpu.c @@ -1,7 +1,7 @@ /* * Virtualized GPU Memory Management * - * Copyright (c) 2014-2019, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -52,8 +52,8 @@ static int vgpu_init_mm_setup_sw(struct gk20a *g) mm->g = g; /*TBD: make channel vm size configurable */ - mm->channel.user_size = NV_MM_DEFAULT_USER_SIZE; - mm->channel.kernel_size = NV_MM_DEFAULT_KERNEL_SIZE; + g->ops.mm.get_default_va_sizes(NULL, &mm->channel.user_size, + &mm->channel.kernel_size); nvgpu_log_info(g, "channel vm size: user %dMB kernel %dMB", (int)(mm->channel.user_size >> 20), diff --git a/drivers/gpu/nvgpu/hal/init/hal_gm20b.c b/drivers/gpu/nvgpu/hal/init/hal_gm20b.c index 6c4d38f16..9506f55c3 100644 --- a/drivers/gpu/nvgpu/hal/init/hal_gm20b.c +++ b/drivers/gpu/nvgpu/hal/init/hal_gm20b.c @@ -781,6 +781,7 @@ static const struct gops_mm gm20b_ops_mm = { .setup_hw = nvgpu_mm_setup_hw, .is_bar1_supported = gm20b_mm_is_bar1_supported, .init_inst_block = gk20a_mm_init_inst_block, + .get_default_va_sizes = gm20b_mm_get_default_va_sizes, #ifdef CONFIG_NVGPU_USERD .bar1_map_userd = gk20a_mm_bar1_map_userd, #endif diff --git a/drivers/gpu/nvgpu/hal/init/hal_gp10b.c b/drivers/gpu/nvgpu/hal/init/hal_gp10b.c index 0071995fb..b8175369b 100644 --- a/drivers/gpu/nvgpu/hal/init/hal_gp10b.c +++ b/drivers/gpu/nvgpu/hal/init/hal_gp10b.c @@ -871,6 +871,7 @@ static const struct gops_mm gp10b_ops_mm = { .init_inst_block = gk20a_mm_init_inst_block, .init_bar2_vm = gp10b_mm_init_bar2_vm, .remove_bar2_vm = gp10b_mm_remove_bar2_vm, + .get_default_va_sizes = gp10b_mm_get_default_va_sizes, #ifdef CONFIG_NVGPU_USERD .bar1_map_userd = gk20a_mm_bar1_map_userd, #endif diff --git a/drivers/gpu/nvgpu/hal/init/hal_gv11b.c b/drivers/gpu/nvgpu/hal/init/hal_gv11b.c index 37bde9fd5..73308e580 100644 --- a/drivers/gpu/nvgpu/hal/init/hal_gv11b.c +++ b/drivers/gpu/nvgpu/hal/init/hal_gv11b.c @@ -1056,6 +1056,7 @@ static const struct gops_mm gv11b_ops_mm = { .init_inst_block_for_subctxs = gv11b_mm_init_inst_block_for_subctxs, .init_bar2_vm = gp10b_mm_init_bar2_vm, .remove_bar2_vm = gp10b_mm_remove_bar2_vm, + .get_default_va_sizes = gp10b_mm_get_default_va_sizes, .bar1_map_userd = NULL, }; diff --git a/drivers/gpu/nvgpu/hal/init/hal_tu104.c b/drivers/gpu/nvgpu/hal/init/hal_tu104.c index 36a807a4a..143931fcb 100644 --- a/drivers/gpu/nvgpu/hal/init/hal_tu104.c +++ b/drivers/gpu/nvgpu/hal/init/hal_tu104.c @@ -1108,6 +1108,7 @@ static const struct gops_mm tu104_ops_mm = { .init_bar2_vm = gp10b_mm_init_bar2_vm, .remove_bar2_vm = gp10b_mm_remove_bar2_vm, .get_flush_retries = tu104_mm_get_flush_retries, + .get_default_va_sizes = gp10b_mm_get_default_va_sizes, .bar1_map_userd = NULL, }; diff --git a/drivers/gpu/nvgpu/hal/mm/gmmu/gmmu_gk20a.c b/drivers/gpu/nvgpu/hal/mm/gmmu/gmmu_gk20a.c index 1106260ae..24f37fea5 100644 --- a/drivers/gpu/nvgpu/hal/mm/gmmu/gmmu_gk20a.c +++ b/drivers/gpu/nvgpu/hal/mm/gmmu/gmmu_gk20a.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -236,7 +236,7 @@ static void update_gmmu_pte_locked(struct vm_gk20a *vm, } const struct gk20a_mmu_level gk20a_mm_levels_64k[] = { - {.hi_bit = {NV_GMMU_VA_RANGE-1, NV_GMMU_VA_RANGE-1}, + {.hi_bit = {37, 37}, .lo_bit = {26, 26}, .update_entry = update_gmmu_pde_locked, .entry_size = 8, @@ -250,7 +250,7 @@ const struct gk20a_mmu_level gk20a_mm_levels_64k[] = { }; const struct gk20a_mmu_level gk20a_mm_levels_128k[] = { - {.hi_bit = {NV_GMMU_VA_RANGE-1, NV_GMMU_VA_RANGE-1}, + {.hi_bit = {37, 37}, .lo_bit = {27, 27}, .update_entry = update_gmmu_pde_locked, .entry_size = 8, diff --git a/drivers/gpu/nvgpu/hal/mm/mm_gm20b.c b/drivers/gpu/nvgpu/hal/mm/mm_gm20b.c index e06de2dc9..2d2ecde4c 100644 --- a/drivers/gpu/nvgpu/hal/mm/mm_gm20b.c +++ b/drivers/gpu/nvgpu/hal/mm/mm_gm20b.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -20,9 +20,37 @@ * DEALINGS IN THE SOFTWARE. */ +#include + #include "mm_gm20b.h" bool gm20b_mm_is_bar1_supported(struct gk20a *g) { return true; } + +void gm20b_mm_get_default_va_sizes(u64 *aperture_size, + u64 *user_size, u64 *kernel_size) +{ + /* + * The maximum GPU VA range supported. + * Max VA Bits = 40, refer dev_mmu.ref. + */ + if (aperture_size != NULL) { + *aperture_size = BIT64(40); + } + + /* + * The default userspace-visible GPU VA size. + */ + if (user_size != NULL) { + *user_size = BIT64(37); + } + + /* + * The default kernel-reserved GPU VA size. + */ + if (kernel_size != NULL) { + *kernel_size = BIT64(32); + } +} \ No newline at end of file diff --git a/drivers/gpu/nvgpu/hal/mm/mm_gm20b.h b/drivers/gpu/nvgpu/hal/mm/mm_gm20b.h index aecb3aee2..0face83ae 100644 --- a/drivers/gpu/nvgpu/hal/mm/mm_gm20b.h +++ b/drivers/gpu/nvgpu/hal/mm/mm_gm20b.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -28,5 +28,7 @@ struct gk20a; bool gm20b_mm_is_bar1_supported(struct gk20a *g); +void gm20b_mm_get_default_va_sizes(u64 *aperture_size, + u64 *user_size, u64 *kernel_size); #endif diff --git a/drivers/gpu/nvgpu/hal/mm/mm_gp10b.h b/drivers/gpu/nvgpu/hal/mm/mm_gp10b.h index fe087f97f..d1e08d110 100644 --- a/drivers/gpu/nvgpu/hal/mm/mm_gp10b.h +++ b/drivers/gpu/nvgpu/hal/mm/mm_gp10b.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -27,5 +27,7 @@ struct gk20a; int gp10b_mm_init_bar2_vm(struct gk20a *g); void gp10b_mm_remove_bar2_vm(struct gk20a *g); +void gp10b_mm_get_default_va_sizes(u64 *aperture_size, + u64 *user_size, u64 *kernel_size); #endif diff --git a/drivers/gpu/nvgpu/hal/mm/mm_gp10b_fusa.c b/drivers/gpu/nvgpu/hal/mm/mm_gp10b_fusa.c index 40f07743f..1574d4e51 100644 --- a/drivers/gpu/nvgpu/hal/mm/mm_gp10b_fusa.c +++ b/drivers/gpu/nvgpu/hal/mm/mm_gp10b_fusa.c @@ -21,6 +21,7 @@ */ #include +#include #include #include @@ -67,3 +68,29 @@ void gp10b_mm_remove_bar2_vm(struct gk20a *g) nvgpu_free_inst_block(g, &mm->bar2.inst_block); nvgpu_vm_put(mm->bar2.vm); } + +void gp10b_mm_get_default_va_sizes(u64 *aperture_size, + u64 *user_size, u64 *kernel_size) +{ + /* + * The maximum GPU VA range supported. + * Max VA Bits = 49, refer dev_mmu.ref. + */ + if (aperture_size != NULL) { + *aperture_size = BIT64(49); + } + + /* + * The default userspace-visible GPU VA size. + */ + if (user_size != NULL) { + *user_size = BIT64(37); + } + + /* + * The default kernel-reserved GPU VA size. + */ + if (kernel_size != NULL) { + *kernel_size = BIT64(32); + } +} diff --git a/drivers/gpu/nvgpu/hal/vgpu/init/vgpu_hal_gp10b.c b/drivers/gpu/nvgpu/hal/vgpu/init/vgpu_hal_gp10b.c index 74890236d..ae189236c 100644 --- a/drivers/gpu/nvgpu/hal/vgpu/init/vgpu_hal_gp10b.c +++ b/drivers/gpu/nvgpu/hal/vgpu/init/vgpu_hal_gp10b.c @@ -645,6 +645,7 @@ static const struct gops_mm vgpu_gp10b_ops_mm = { .bar1_map_userd = vgpu_mm_bar1_map_userd, .vm_as_alloc_share = vgpu_vm_as_alloc_share, .vm_as_free_share = vgpu_vm_as_free_share, + .get_default_va_sizes = gp10b_mm_get_default_va_sizes, }; #ifdef CONFIG_NVGPU_DGPU diff --git a/drivers/gpu/nvgpu/hal/vgpu/init/vgpu_hal_gv11b.c b/drivers/gpu/nvgpu/hal/vgpu/init/vgpu_hal_gv11b.c index 735405640..7a2c364aa 100644 --- a/drivers/gpu/nvgpu/hal/vgpu/init/vgpu_hal_gv11b.c +++ b/drivers/gpu/nvgpu/hal/vgpu/init/vgpu_hal_gv11b.c @@ -762,6 +762,7 @@ static const struct gops_mm vgpu_gv11b_ops_mm = { .bar1_map_userd = vgpu_mm_bar1_map_userd, .vm_as_alloc_share = vgpu_vm_as_alloc_share, .vm_as_free_share = vgpu_vm_as_free_share, + .get_default_va_sizes = gp10b_mm_get_default_va_sizes, }; static const struct gops_therm vgpu_gv11b_ops_therm = { diff --git a/drivers/gpu/nvgpu/include/nvgpu/gops/mm.h b/drivers/gpu/nvgpu/include/nvgpu/gops/mm.h index b6c54352c..38502597d 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/gops/mm.h +++ b/drivers/gpu/nvgpu/include/nvgpu/gops/mm.h @@ -575,6 +575,19 @@ struct gops_mm { */ u32 (*get_flush_retries)(struct gk20a *g, enum nvgpu_flush_op op); + /** + * @brief HAL to get default virtual memory sizes. + * + * @param aperture_size [in] Pointer to aperture size. + * @param user_size [in] Pointer to user size. + * @param kernel_size [in] Pointer to kernel size. + * + * Number of bits for virtual address space can vary. This HAL is used + * to get default values for virtual address spaces. + */ + void (*get_default_va_sizes)(u64 *aperture_size, + u64 *user_size, u64 *kernel_size); + /** @cond DOXYGEN_SHOULD_SKIP_THIS */ u64 (*bar1_map_userd)(struct gk20a *g, struct nvgpu_mem *mem, u32 offset); diff --git a/drivers/gpu/nvgpu/include/nvgpu/mm.h b/drivers/gpu/nvgpu/include/nvgpu/mm.h index 46f0cd65c..8c828e8b9 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/mm.h +++ b/drivers/gpu/nvgpu/include/nvgpu/mm.h @@ -488,17 +488,6 @@ static inline u32 bar1_aperture_size_mb_gk20a(void) return 16U; } -/** The maximum GPU VA range supported */ -#define NV_GMMU_VA_RANGE 38U - -#define NV_MM_DEFAULT_APERTURE_SIZE (1ULL << NV_GMMU_VA_RANGE) - -/** The default userspace-visible GPU VA size */ -#define NV_MM_DEFAULT_USER_SIZE (1ULL << 37) - -/** The default kernel-reserved GPU VA size */ -#define NV_MM_DEFAULT_KERNEL_SIZE (1ULL << 32) - /** * @brief Get small page bottom GPU VA address range. * diff --git a/libs/dgpu/libnvgpu-drv-dgpu_safe.export b/libs/dgpu/libnvgpu-drv-dgpu_safe.export index 2311d30b2..67e9cecc7 100644 --- a/libs/dgpu/libnvgpu-drv-dgpu_safe.export +++ b/libs/dgpu/libnvgpu-drv-dgpu_safe.export @@ -84,6 +84,7 @@ gm20b_bus_bar1_bind gp10b_bus_bar2_bind gp10b_ce_nonstall_isr gp10b_get_max_page_table_levels +gp10b_mm_get_default_va_sizes gp10b_mm_get_iommu_bit gp10b_mm_get_mmu_levels gp10b_mm_init_bar2_vm diff --git a/libs/igpu/libnvgpu-drv-igpu_safe.export b/libs/igpu/libnvgpu-drv-igpu_safe.export index 29a768385..fa2ec25e5 100644 --- a/libs/igpu/libnvgpu-drv-igpu_safe.export +++ b/libs/igpu/libnvgpu-drv-igpu_safe.export @@ -84,6 +84,7 @@ gm20b_bus_bar1_bind gp10b_bus_bar2_bind gp10b_ce_nonstall_isr gp10b_get_max_page_table_levels +gp10b_mm_get_default_va_sizes gp10b_mm_get_iommu_bit gp10b_mm_get_mmu_levels gp10b_mm_init_bar2_vm diff --git a/userspace/required_tests.ini b/userspace/required_tests.ini index 075e20db3..8208edcd8 100644 --- a/userspace/required_tests.ini +++ b/userspace/required_tests.ini @@ -421,6 +421,7 @@ test_handle_nonreplay_replay_fault.handle_nonreplay_s3=0 [nvgpu-acr] free_falcon_test_env.acr_free_falcon_test_env=0 +test_acr_bootstrap_hs_acr.acr_bootstrap_hs_acr=0 test_acr_construct_execute.acr_construct_execute=0 test_acr_init.acr_init=0 test_acr_is_lsf_lazy_bootstrap.acr_is_lsf_lazy_bootstrap=0 diff --git a/userspace/units/mm/dma/dma.c b/userspace/units/mm/dma/dma.c index 9618e05e9..beb1f92a1 100644 --- a/userspace/units/mm/dma/dma.c +++ b/userspace/units/mm/dma/dma.c @@ -188,9 +188,8 @@ static int init_mm(struct unit_module *m, struct gk20a *g) low_hole = SZ_4K * 16UL; aperture_size = GK20A_PMU_VA_SIZE; mm->pmu.aperture_size = GK20A_PMU_VA_SIZE; - mm->channel.user_size = NV_MM_DEFAULT_USER_SIZE; - mm->channel.kernel_size = NV_MM_DEFAULT_KERNEL_SIZE; - + g->ops.mm.get_default_va_sizes(NULL, &mm->channel.user_size, + &mm->channel.kernel_size); mm->pmu.vm = nvgpu_vm_init(g, g->ops.mm.gmmu.get_default_big_page_size(), diff --git a/userspace/units/mm/gmmu/page_table/page_table.c b/userspace/units/mm/gmmu/page_table/page_table.c index ac9825c52..41adb4349 100644 --- a/userspace/units/mm/gmmu/page_table/page_table.c +++ b/userspace/units/mm/gmmu/page_table/page_table.c @@ -347,9 +347,8 @@ static int init_mm(struct unit_module *m, struct gk20a *g) low_hole = SZ_4K * 16UL; aperture_size = GK20A_PMU_VA_SIZE; mm->pmu.aperture_size = GK20A_PMU_VA_SIZE; - mm->channel.user_size = NV_MM_DEFAULT_USER_SIZE; - mm->channel.kernel_size = NV_MM_DEFAULT_KERNEL_SIZE; - + g->ops.mm.get_default_va_sizes(NULL, &mm->channel.user_size, + &mm->channel.kernel_size); mm->pmu.vm = nvgpu_vm_init(g, g->ops.mm.gmmu.get_default_big_page_size(), diff --git a/userspace/units/mm/vm/vm.c b/userspace/units/mm/vm/vm.c index 50a96094d..2912da9d1 100644 --- a/userspace/units/mm/vm/vm.c +++ b/userspace/units/mm/vm/vm.c @@ -39,6 +39,7 @@ #include #include #include +#include #include #include @@ -192,6 +193,7 @@ static int init_test_env(struct unit_module *m, struct gk20a *g) g->ops.mm.gmmu.gpu_phys_addr = gv11b_gpu_phys_addr; g->ops.mm.cache.l2_flush = gv11b_mm_l2_flush; g->ops.mm.cache.fb_flush = gk20a_mm_fb_flush; + g->ops.mm.get_default_va_sizes = gp10b_mm_get_default_va_sizes, g->ops.mm.init_inst_block = hal_mm_init_inst_block; g->ops.mm.vm_as_free_share = hal_vm_as_free_share; g->ops.mm.vm_bind_channel = nvgpu_vm_bind_channel; @@ -899,6 +901,7 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args) bool big_pages = true; struct nvgpu_posix_fault_inj *kmem_fi = nvgpu_kmem_get_fault_injection(); + u64 default_aperture_size; /* Initialize test environment */ ret = init_test_env(m, g); @@ -907,6 +910,7 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args) } /* Set VM parameters */ + g->ops.mm.get_default_va_sizes(&default_aperture_size, NULL, NULL); big_pages = true; low_hole = SZ_1M * 64; aperture_size = 128 * SZ_1G; @@ -940,7 +944,7 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args) g->ops.mm.gmmu.get_default_big_page_size(), low_hole, user_vma, - NV_MM_DEFAULT_APERTURE_SIZE, /* invalid aperture size */ + default_aperture_size, /* invalid aperture size */ big_pages, false, true, diff --git a/userspace/units/sync/nvgpu-sync.c b/userspace/units/sync/nvgpu-sync.c index c0c0c6e8a..df1542e20 100644 --- a/userspace/units/sync/nvgpu-sync.c +++ b/userspace/units/sync/nvgpu-sync.c @@ -80,8 +80,8 @@ static int init_channel_vm(struct unit_module *m, struct nvgpu_channel *ch) aperture_size = GK20A_PMU_VA_SIZE; mm->pmu.aperture_size = GK20A_PMU_VA_SIZE; - mm->channel.user_size = NV_MM_DEFAULT_USER_SIZE; - mm->channel.kernel_size = NV_MM_DEFAULT_KERNEL_SIZE; + g->ops.mm.get_default_va_sizes(NULL, &mm->channel.user_size, + &mm->channel.kernel_size); mm->pmu.vm = nvgpu_vm_init(g, g->ops.mm.gmmu.get_default_big_page_size(),