gpu: nvgpu: accept user vma size in vm init

Modify nvgpu_vm_init to accept low_hole, user_reserved and
kernel_reserved. This will simplify argument limit checks and make code
more legible.

JIRA NVGPU-5302

Change-Id: I62773dd7b06264a3b6cb8896239b24c49fa69f9b
Signed-off-by: Vedashree Vidwans <vvidwans@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2394901
Reviewed-by: automaticguardword <automaticguardword@nvidia.com>
Reviewed-by: Automatic_Commit_Validation_User
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: Alex Waterman <alexw@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
GVS: Gerrit_Virtual_Submit
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Vedashree Vidwans
2020-08-04 18:58:29 -07:00
committed by Alex Waterman
parent aef3367ca5
commit 49c9f0c137
23 changed files with 156 additions and 136 deletions

View File

@@ -85,9 +85,10 @@ static int gk20a_vm_alloc_share(struct gk20a_as_share *as_share,
vm = nvgpu_vm_init(g, big_page_size, vm = nvgpu_vm_init(g, big_page_size,
U64(big_page_size) << U64(10), U64(big_page_size) << U64(10),
nvgpu_safe_sub_u64(mm->channel.user_size,
nvgpu_safe_sub_u64(mm->channel.kernel_size,
U64(big_page_size) << U64(10))),
mm->channel.kernel_size, mm->channel.kernel_size,
nvgpu_safe_add_u64(mm->channel.user_size,
mm->channel.kernel_size),
!mm->disable_bigpage, !mm->disable_bigpage,
userspace_managed, unified_va, name); userspace_managed, unified_va, name);
if (vm == NULL) { if (vm == NULL) {

View File

@@ -205,8 +205,8 @@ static int nvgpu_init_system_vm(struct mm_gk20a *mm)
mm->pmu.vm = nvgpu_vm_init(g, big_page_size, mm->pmu.vm = nvgpu_vm_init(g, big_page_size,
low_hole, low_hole,
0ULL,
nvgpu_safe_sub_u64(aperture_size, low_hole), nvgpu_safe_sub_u64(aperture_size, low_hole),
aperture_size,
true, true,
false, false,
false, false,
@@ -250,8 +250,9 @@ static int nvgpu_init_cde_vm(struct mm_gk20a *mm)
mm->cde.vm = nvgpu_vm_init(g, big_page_size, mm->cde.vm = nvgpu_vm_init(g, big_page_size,
U64(big_page_size) << U64(10), U64(big_page_size) << U64(10),
nvgpu_safe_sub_u64(NV_MM_DEFAULT_USER_SIZE,
U64(big_page_size) << U64(10)),
NV_MM_DEFAULT_KERNEL_SIZE, NV_MM_DEFAULT_KERNEL_SIZE,
NV_MM_DEFAULT_KERNEL_SIZE + NV_MM_DEFAULT_USER_SIZE,
false, false, false, "cde"); false, false, false, "cde");
if (mm->cde.vm == NULL) { if (mm->cde.vm == NULL) {
return -ENOMEM; return -ENOMEM;
@@ -266,8 +267,9 @@ static int nvgpu_init_ce_vm(struct mm_gk20a *mm)
mm->ce.vm = nvgpu_vm_init(g, big_page_size, mm->ce.vm = nvgpu_vm_init(g, big_page_size,
U64(big_page_size) << U64(10), U64(big_page_size) << U64(10),
nvgpu_safe_sub_u64(NV_MM_DEFAULT_USER_SIZE,
U64(big_page_size) << U64(10)),
NV_MM_DEFAULT_KERNEL_SIZE, NV_MM_DEFAULT_KERNEL_SIZE,
NV_MM_DEFAULT_KERNEL_SIZE + NV_MM_DEFAULT_USER_SIZE,
false, false, false, "ce"); false, false, false, "ce");
if (mm->ce.vm == NULL) { if (mm->ce.vm == NULL) {
return -ENOMEM; return -ENOMEM;
@@ -332,9 +334,9 @@ static int nvgpu_init_bar1_vm(struct mm_gk20a *mm)
mm->bar1.vm = nvgpu_vm_init(g, mm->bar1.vm = nvgpu_vm_init(g,
big_page_size, big_page_size,
SZ_64K, SZ_64K,
0ULL,
nvgpu_safe_sub_u64(mm->bar1.aperture_size, nvgpu_safe_sub_u64(mm->bar1.aperture_size,
SZ_64K), SZ_64K),
mm->bar1.aperture_size,
true, false, false, true, false, false,
"bar1"); "bar1");
if (mm->bar1.vm == NULL) { if (mm->bar1.vm == NULL) {
@@ -367,8 +369,9 @@ static int nvgpu_init_engine_ucode_vm(struct gk20a *g,
ucode->aperture_size); ucode->aperture_size);
ucode->vm = nvgpu_vm_init(g, big_page_size, SZ_4K, ucode->vm = nvgpu_vm_init(g, big_page_size, SZ_4K,
nvgpu_safe_sub_u64(ucode->aperture_size, SZ_4K), 0ULL, nvgpu_safe_sub_u64(ucode->aperture_size, SZ_4K),
ucode->aperture_size, false, false, false, address_space_name); false, false, false,
address_space_name);
if (ucode->vm == NULL) { if (ucode->vm == NULL) {
return -ENOMEM; return -ENOMEM;
} }
@@ -504,8 +507,7 @@ static int nvgpu_init_mm_setup_sw(struct gk20a *g)
nvgpu_mutex_init(&mm->l2_op_lock); nvgpu_mutex_init(&mm->l2_op_lock);
/*TBD: make channel vm size configurable */ /*TBD: make channel vm size configurable */
mm->channel.user_size = NV_MM_DEFAULT_USER_SIZE - mm->channel.user_size = NV_MM_DEFAULT_USER_SIZE;
NV_MM_DEFAULT_KERNEL_SIZE;
mm->channel.kernel_size = NV_MM_DEFAULT_KERNEL_SIZE; mm->channel.kernel_size = NV_MM_DEFAULT_KERNEL_SIZE;
nvgpu_log_info(g, "channel vm size: user %uMB kernel %uMB", nvgpu_log_info(g, "channel vm size: user %uMB kernel %uMB",

View File

@@ -618,33 +618,34 @@ static int nvgpu_vm_init_check_vma_limits(struct gk20a *g, struct vm_gk20a *vm,
static int nvgpu_vm_init_vma(struct gk20a *g, struct vm_gk20a *vm, static int nvgpu_vm_init_vma(struct gk20a *g, struct vm_gk20a *vm,
u64 low_hole, u64 low_hole,
u64 user_reserved,
u64 kernel_reserved, u64 kernel_reserved,
u64 aperture_size,
bool big_pages, bool big_pages,
bool unified_va, bool unified_va,
const char *name) const char *name)
{ {
int err = 0; int err = 0;
u64 kernel_vma_flags = 0ULL;
u64 user_vma_start, user_vma_limit; u64 user_vma_start, user_vma_limit;
u64 user_lp_vma_start, user_lp_vma_limit; u64 user_lp_vma_start, user_lp_vma_limit;
u64 kernel_vma_start, kernel_vma_limit; u64 kernel_vma_start, kernel_vma_limit;
u64 kernel_vma_flags;
/* Setup vma limits. */ /* Setup vma limits. */
if (nvgpu_safe_add_u64(kernel_reserved, low_hole) < aperture_size) { if (user_reserved > 0ULL) {
kernel_vma_flags = GPU_ALLOC_GVA_SPACE;
/* /*
* If big_pages are disabled for this VM then it only makes * If big_pages are disabled for this VM then it only makes
* sense to make one VM, same as if the unified address flag * sense to make one VM, same as if the unified address flag
* is set. * is set.
*/ */
if (!big_pages || unified_va) { if (!big_pages || unified_va) {
user_vma_start = low_hole; user_vma_start = vm->va_start;
user_vma_limit = nvgpu_safe_sub_u64(vm->va_limit, user_vma_limit = nvgpu_safe_sub_u64(vm->va_limit,
kernel_reserved); kernel_reserved);
user_lp_vma_start = user_vma_limit; user_lp_vma_start = user_vma_limit;
user_lp_vma_limit = user_vma_limit; user_lp_vma_limit = user_vma_limit;
} else { } else {
user_vma_start = low_hole; user_vma_start = vm->va_start;
user_vma_limit = nvgpu_gmmu_va_small_page_limit(); user_vma_limit = nvgpu_gmmu_va_small_page_limit();
user_lp_vma_start = nvgpu_gmmu_va_small_page_limit(); user_lp_vma_start = nvgpu_gmmu_va_small_page_limit();
user_lp_vma_limit = nvgpu_safe_sub_u64(vm->va_limit, user_lp_vma_limit = nvgpu_safe_sub_u64(vm->va_limit,
@@ -676,10 +677,6 @@ static int nvgpu_vm_init_vma(struct gk20a *g, struct vm_gk20a *vm,
goto clean_up_page_tables; goto clean_up_page_tables;
} }
kernel_vma_flags =
(nvgpu_safe_add_u64(kernel_reserved, low_hole) ==
aperture_size) ? 0ULL : GPU_ALLOC_GVA_SPACE;
nvgpu_vm_init_check_big_pages(vm, user_vma_start, user_vma_limit, nvgpu_vm_init_check_big_pages(vm, user_vma_start, user_vma_limit,
user_lp_vma_start, user_lp_vma_limit, user_lp_vma_start, user_lp_vma_limit,
big_pages, unified_va); big_pages, unified_va);
@@ -705,16 +702,30 @@ static int nvgpu_vm_init_attributes(struct mm_gk20a *mm,
struct vm_gk20a *vm, struct vm_gk20a *vm,
u32 big_page_size, u32 big_page_size,
u64 low_hole, u64 low_hole,
u64 user_reserved,
u64 kernel_reserved, u64 kernel_reserved,
u64 aperture_size,
bool big_pages, bool big_pages,
bool userspace_managed, bool userspace_managed,
bool unified_va, bool unified_va,
const char *name) const char *name)
{ {
struct gk20a *g = gk20a_from_mm(mm); struct gk20a *g = gk20a_from_mm(mm);
u64 low_hole_size, user_va_size;
u64 aperture_size;
u64 pde_align = (U64(big_page_size) << U64(10));
if (nvgpu_safe_add_u64(kernel_reserved, low_hole) > aperture_size) { if (user_reserved == 0ULL) {
low_hole_size = low_hole;
user_va_size = user_reserved;
} else {
low_hole_size = ALIGN(low_hole, pde_align);
user_va_size = ALIGN(user_reserved, pde_align);
}
aperture_size = nvgpu_safe_add_u64(kernel_reserved,
nvgpu_safe_add_u64(user_va_size, low_hole_size));
if (aperture_size > NV_MM_DEFAULT_APERTURE_SIZE) {
nvgpu_do_assert_print(g, nvgpu_do_assert_print(g,
"Overlap between user and kernel spaces"); "Overlap between user and kernel spaces");
return -ENOMEM; return -ENOMEM;
@@ -747,7 +758,7 @@ static int nvgpu_vm_init_attributes(struct mm_gk20a *mm,
vm->vma[GMMU_PAGE_SIZE_BIG] = &vm->user_lp; vm->vma[GMMU_PAGE_SIZE_BIG] = &vm->user_lp;
} }
vm->va_start = low_hole; vm->va_start = low_hole_size;
vm->va_limit = aperture_size; vm->va_limit = aperture_size;
vm->big_page_size = vm->gmmu_page_sizes[GMMU_PAGE_SIZE_BIG]; vm->big_page_size = vm->gmmu_page_sizes[GMMU_PAGE_SIZE_BIG];
@@ -772,8 +783,8 @@ int nvgpu_vm_do_init(struct mm_gk20a *mm,
struct vm_gk20a *vm, struct vm_gk20a *vm,
u32 big_page_size, u32 big_page_size,
u64 low_hole, u64 low_hole,
u64 user_reserved,
u64 kernel_reserved, u64 kernel_reserved,
u64 aperture_size,
bool big_pages, bool big_pages,
bool userspace_managed, bool userspace_managed,
bool unified_va, bool unified_va,
@@ -783,7 +794,7 @@ int nvgpu_vm_do_init(struct mm_gk20a *mm,
int err = 0; int err = 0;
err = nvgpu_vm_init_attributes(mm, vm, big_page_size, low_hole, err = nvgpu_vm_init_attributes(mm, vm, big_page_size, low_hole,
kernel_reserved, aperture_size, big_pages, userspace_managed, user_reserved, kernel_reserved, big_pages, userspace_managed,
unified_va, name); unified_va, name);
if (err != 0) { if (err != 0) {
return err; return err;
@@ -805,7 +816,7 @@ int nvgpu_vm_do_init(struct mm_gk20a *mm,
goto clean_up_gpu_vm; goto clean_up_gpu_vm;
} }
err = nvgpu_vm_init_vma(g, vm, low_hole, kernel_reserved, aperture_size, err = nvgpu_vm_init_vma(g, vm, low_hole, user_reserved, kernel_reserved,
big_pages, unified_va, name); big_pages, unified_va, name);
if (err != 0) { if (err != 0) {
goto clean_up_gpu_vm; goto clean_up_gpu_vm;
@@ -855,8 +866,8 @@ clean_up_gpu_vm:
* @big_page_size - Size of big pages associated with this VM. * @big_page_size - Size of big pages associated with this VM.
* @low_hole - The size of the low hole (unaddressable memory at the bottom of * @low_hole - The size of the low hole (unaddressable memory at the bottom of
* the address space). * the address space).
* @user_reserved - Space reserved for user allocations..
* @kernel_reserved - Space reserved for kernel only allocations. * @kernel_reserved - Space reserved for kernel only allocations.
* @aperture_size - Total size of the aperture.
* @big_pages - If true then big pages are possible in the VM. Note this does * @big_pages - If true then big pages are possible in the VM. Note this does
* not guarantee that big pages will be possible. * not guarantee that big pages will be possible.
* @name - Name of the address space. * @name - Name of the address space.
@@ -887,8 +898,8 @@ clean_up_gpu_vm:
struct vm_gk20a *nvgpu_vm_init(struct gk20a *g, struct vm_gk20a *nvgpu_vm_init(struct gk20a *g,
u32 big_page_size, u32 big_page_size,
u64 low_hole, u64 low_hole,
u64 user_reserved,
u64 kernel_reserved, u64 kernel_reserved,
u64 aperture_size,
bool big_pages, bool big_pages,
bool userspace_managed, bool userspace_managed,
bool unified_va, bool unified_va,
@@ -902,7 +913,7 @@ struct vm_gk20a *nvgpu_vm_init(struct gk20a *g,
} }
err = nvgpu_vm_do_init(&g->mm, vm, big_page_size, low_hole, err = nvgpu_vm_do_init(&g->mm, vm, big_page_size, low_hole,
kernel_reserved, aperture_size, big_pages, user_reserved, kernel_reserved, big_pages,
userspace_managed, unified_va, name); userspace_managed, unified_va, name);
if (err != 0) { if (err != 0) {
nvgpu_kfree(g, vm); nvgpu_kfree(g, vm);

View File

@@ -84,8 +84,9 @@ int nvgpu_perfbuf_init_vm(struct gk20a *g)
mm->perfbuf.vm = nvgpu_vm_init(g, big_page_size, mm->perfbuf.vm = nvgpu_vm_init(g, big_page_size,
big_page_size << 10, big_page_size << 10,
nvgpu_safe_sub_u64(NV_MM_DEFAULT_USER_SIZE,
big_page_size << 10),
NV_MM_DEFAULT_KERNEL_SIZE, NV_MM_DEFAULT_KERNEL_SIZE,
NV_MM_DEFAULT_KERNEL_SIZE + NV_MM_DEFAULT_USER_SIZE,
false, false, false, "perfbuf"); false, false, false, "perfbuf");
if (mm->perfbuf.vm == NULL) { if (mm->perfbuf.vm == NULL) {
return -ENOMEM; return -ENOMEM;

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -38,8 +38,8 @@ int gp10b_mm_init_bar2_vm(struct gk20a *g)
nvgpu_log_info(g, "bar2 vm size = 0x%x", mm->bar2.aperture_size); nvgpu_log_info(g, "bar2 vm size = 0x%x", mm->bar2.aperture_size);
mm->bar2.vm = nvgpu_vm_init(g, big_page_size, SZ_4K, mm->bar2.vm = nvgpu_vm_init(g, big_page_size, SZ_4K,
nvgpu_safe_sub_u64(mm->bar2.aperture_size, SZ_4K), 0ULL, nvgpu_safe_sub_u64(mm->bar2.aperture_size, SZ_4K),
mm->bar2.aperture_size, false, false, false, "bar2"); false, false, false, "bar2");
if (mm->bar2.vm == NULL) { if (mm->bar2.vm == NULL) {
return -ENOMEM; return -ENOMEM;
} }

View File

@@ -489,7 +489,9 @@ static inline u32 bar1_aperture_size_mb_gk20a(void)
} }
/** The maximum GPU VA range supported */ /** The maximum GPU VA range supported */
#define NV_GMMU_VA_RANGE 38 #define NV_GMMU_VA_RANGE 38U
#define NV_MM_DEFAULT_APERTURE_SIZE (1ULL << NV_GMMU_VA_RANGE)
/** The default userspace-visible GPU VA size */ /** The default userspace-visible GPU VA size */
#define NV_MM_DEFAULT_USER_SIZE (1ULL << 37) #define NV_MM_DEFAULT_USER_SIZE (1ULL << 37)

View File

@@ -733,8 +733,8 @@ void nvgpu_insert_mapped_buf(struct vm_gk20a *vm,
* @param low_hole [in] The size of the low hole * @param low_hole [in] The size of the low hole
* (non-addressable memory at the bottom of * (non-addressable memory at the bottom of
* the address space). * the address space).
* @param user_reserved [in] Space reserved for user allocations.
* @param kernel_reserved [in] Space reserved for kernel only allocations. * @param kernel_reserved [in] Space reserved for kernel only allocations.
* @param aperture_size [in] Total size of the aperture.
* @param big_pages [in] If true then big pages are possible in the * @param big_pages [in] If true then big pages are possible in the
* VM. Note this does not guarantee that big * VM. Note this does not guarantee that big
* pages will be possible. * pages will be possible.
@@ -761,8 +761,8 @@ int nvgpu_vm_do_init(struct mm_gk20a *mm,
struct vm_gk20a *vm, struct vm_gk20a *vm,
u32 big_page_size, u32 big_page_size,
u64 low_hole, u64 low_hole,
u64 user_reserved,
u64 kernel_reserved, u64 kernel_reserved,
u64 aperture_size,
bool big_pages, bool big_pages,
bool userspace_managed, bool userspace_managed,
bool unified_va, bool unified_va,
@@ -777,8 +777,8 @@ int nvgpu_vm_do_init(struct mm_gk20a *mm,
* @param low_hole [in] The size of the low hole * @param low_hole [in] The size of the low hole
* (non-addressable memory at the bottom of * (non-addressable memory at the bottom of
* the address space). * the address space).
* @param user_reserved [in] Space reserved for user allocations.
* @param kernel_reserved [in] Space reserved for kernel only allocations. * @param kernel_reserved [in] Space reserved for kernel only allocations.
* @param aperture_size [in] Total size of the aperture.
* @param big_pages [in] If true then big pages are possible in the * @param big_pages [in] If true then big pages are possible in the
* VM. Note this does not guarantee that big * VM. Note this does not guarantee that big
* pages will be possible. * pages will be possible.
@@ -817,8 +817,8 @@ int nvgpu_vm_do_init(struct mm_gk20a *mm,
struct vm_gk20a *nvgpu_vm_init(struct gk20a *g, struct vm_gk20a *nvgpu_vm_init(struct gk20a *g,
u32 big_page_size, u32 big_page_size,
u64 low_hole, u64 low_hole,
u64 user_reserved,
u64 kernel_reserved, u64 kernel_reserved,
u64 aperture_size,
bool big_pages, bool big_pages,
bool userspace_managed, bool userspace_managed,
bool unified_va, bool unified_va,

View File

@@ -61,9 +61,9 @@ int test_gr_ctx_error_injection(struct unit_module *m,
unit_return_fail(m, "failed to allocate memory"); unit_return_fail(m, "failed to allocate memory");
} }
vm = nvgpu_vm_init(g, SZ_4K, SZ_4K << 10, (1ULL << 32), vm = nvgpu_vm_init(g, SZ_4K, SZ_4K << 10,
(1ULL << 32) + (1ULL << 37), false, false, false, nvgpu_safe_sub_u64(1ULL << 37, SZ_4K << 10),
"dummy"); (1ULL << 32), false, false, false, "dummy");
if (!vm) { if (!vm) {
unit_return_fail(m, "failed to allocate VM"); unit_return_fail(m, "failed to allocate VM");
} }

View File

@@ -794,9 +794,9 @@ int test_gr_init_hal_error_injection(struct unit_module *m,
g->ops.mm.cache.l2_flush = dummy_l2_flush; g->ops.mm.cache.l2_flush = dummy_l2_flush;
vm = nvgpu_vm_init(g, SZ_4K, SZ_4K << 10, (1ULL << 32), vm = nvgpu_vm_init(g, SZ_4K, SZ_4K << 10,
(1ULL << 32) + (1ULL << 37), false, false, false, nvgpu_safe_sub_u64(1ULL << 37, SZ_4K << 10),
"dummy"); (1ULL << 32), false, false, false, "dummy");
if (!vm) { if (!vm) {
unit_return_fail(m, "failed to allocate VM"); unit_return_fail(m, "failed to allocate VM");
} }

View File

@@ -146,9 +146,9 @@ int test_gr_obj_ctx_error_injection(struct unit_module *m,
} }
/* Setup VM */ /* Setup VM */
vm = nvgpu_vm_init(g, SZ_4K, SZ_4K << 10, (1ULL << 32), vm = nvgpu_vm_init(g, SZ_4K, SZ_4K << 10,
(1ULL << 32) + (1ULL << 37), false, false, false, nvgpu_safe_sub_u64(1ULL << 37, SZ_4K << 10),
"dummy"); (1ULL << 32), false, false, false, "dummy");
if (!vm) { if (!vm) {
unit_return_fail(m, "failed to allocate VM"); unit_return_fail(m, "failed to allocate VM");
} }

View File

@@ -100,8 +100,8 @@ static struct vm_gk20a *init_vm_env(struct unit_module *m, struct gk20a *g,
/* Init vm with big_pages disabled */ /* Init vm with big_pages disabled */
test_vm = nvgpu_vm_init(g, g->ops.mm.gmmu.get_default_big_page_size(), test_vm = nvgpu_vm_init(g, g->ops.mm.gmmu.get_default_big_page_size(),
low_hole, low_hole,
aperture_size - low_hole, 0ULL,
aperture_size, nvgpu_safe_sub_u64(aperture_size, low_hole),
big_pages, big_pages,
false, false,
false, false,

View File

@@ -188,16 +188,15 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
low_hole = SZ_4K * 16UL; low_hole = SZ_4K * 16UL;
aperture_size = GK20A_PMU_VA_SIZE; aperture_size = GK20A_PMU_VA_SIZE;
mm->pmu.aperture_size = GK20A_PMU_VA_SIZE; mm->pmu.aperture_size = GK20A_PMU_VA_SIZE;
mm->channel.user_size = NV_MM_DEFAULT_USER_SIZE - mm->channel.user_size = NV_MM_DEFAULT_USER_SIZE;
NV_MM_DEFAULT_KERNEL_SIZE;
mm->channel.kernel_size = NV_MM_DEFAULT_KERNEL_SIZE; mm->channel.kernel_size = NV_MM_DEFAULT_KERNEL_SIZE;
mm->pmu.vm = nvgpu_vm_init(g, mm->pmu.vm = nvgpu_vm_init(g,
g->ops.mm.gmmu.get_default_big_page_size(), g->ops.mm.gmmu.get_default_big_page_size(),
low_hole, low_hole,
aperture_size - low_hole, 0ULL,
aperture_size, nvgpu_safe_sub_u64(aperture_size, low_hole),
true, true,
false, false,
false, false,
@@ -538,7 +537,7 @@ int test_mm_dma_alloc_map_fault_injection(struct unit_module *m,
* nvgpu_dma_alloc_flags_sys function * nvgpu_dma_alloc_flags_sys function
*/ */
kmem_fi = nvgpu_kmem_get_fault_injection(); kmem_fi = nvgpu_kmem_get_fault_injection();
nvgpu_posix_enable_fault_injection(kmem_fi, true, 5); nvgpu_posix_enable_fault_injection(kmem_fi, true, 1);
err = nvgpu_dma_alloc_map(g->mm.pmu.vm, SZ_4K, mem); err = nvgpu_dma_alloc_map(g->mm.pmu.vm, SZ_4K, mem);
if (err == 0) { if (err == 0) {

View File

@@ -347,16 +347,15 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
low_hole = SZ_4K * 16UL; low_hole = SZ_4K * 16UL;
aperture_size = GK20A_PMU_VA_SIZE; aperture_size = GK20A_PMU_VA_SIZE;
mm->pmu.aperture_size = GK20A_PMU_VA_SIZE; mm->pmu.aperture_size = GK20A_PMU_VA_SIZE;
mm->channel.user_size = NV_MM_DEFAULT_USER_SIZE - mm->channel.user_size = NV_MM_DEFAULT_USER_SIZE;
NV_MM_DEFAULT_KERNEL_SIZE;
mm->channel.kernel_size = NV_MM_DEFAULT_KERNEL_SIZE; mm->channel.kernel_size = NV_MM_DEFAULT_KERNEL_SIZE;
mm->pmu.vm = nvgpu_vm_init(g, mm->pmu.vm = nvgpu_vm_init(g,
g->ops.mm.gmmu.get_default_big_page_size(), g->ops.mm.gmmu.get_default_big_page_size(),
low_hole, low_hole,
aperture_size - low_hole, 0ULL,
aperture_size, nvgpu_safe_sub_u64(aperture_size, low_hole),
true, true,
false, false,
false, false,
@@ -953,7 +952,7 @@ static int check_pte_invalidated(struct unit_module *m, struct gk20a *g,
/* Create a VM based on requirements described in NVGPU-RQCD-45 */ /* Create a VM based on requirements described in NVGPU-RQCD-45 */
static struct vm_gk20a *init_test_req_vm(struct gk20a *g) static struct vm_gk20a *init_test_req_vm(struct gk20a *g)
{ {
u64 low_hole, aperture_size, kernel_reserved; u64 low_hole, aperture_size, kernel_reserved, user_reserved;
bool big_pages; bool big_pages;
/* Init some common attributes */ /* Init some common attributes */
@@ -971,10 +970,12 @@ static struct vm_gk20a *init_test_req_vm(struct gk20a *g)
aperture_size = 128 * SZ_1G; aperture_size = 128 * SZ_1G;
/* 1.4. Have a 4GB kernel reserved space */ /* 1.4. Have a 4GB kernel reserved space */
kernel_reserved = 4 * SZ_1G; kernel_reserved = 4 * SZ_1G;
/* 1.5. User reserved space */
user_reserved = aperture_size - kernel_reserved - low_hole;
return nvgpu_vm_init(g, g->ops.mm.gmmu.get_default_big_page_size(), return nvgpu_vm_init(g, g->ops.mm.gmmu.get_default_big_page_size(),
low_hole, kernel_reserved, low_hole, user_reserved, kernel_reserved,
aperture_size, big_pages, true, true, "testmem"); big_pages, true, true, "testmem");
} }
int test_nvgpu_page_table_c1_full(struct unit_module *m, struct gk20a *g, int test_nvgpu_page_table_c1_full(struct unit_module *m, struct gk20a *g,

View File

@@ -159,8 +159,8 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
mm->pmu.vm = nvgpu_vm_init(g, mm->pmu.vm = nvgpu_vm_init(g,
g->ops.mm.gmmu.get_default_big_page_size(), g->ops.mm.gmmu.get_default_big_page_size(),
low_hole, low_hole,
aperture_size - low_hole, 0ULL,
aperture_size, nvgpu_safe_sub_u64(aperture_size, low_hole),
true, true,
false, false,
false, false,

View File

@@ -148,8 +148,8 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
mm->pmu.vm = nvgpu_vm_init(g, mm->pmu.vm = nvgpu_vm_init(g,
g->ops.mm.gmmu.get_default_big_page_size(), g->ops.mm.gmmu.get_default_big_page_size(),
low_hole, low_hole,
aperture_size - low_hole, 0ULL,
aperture_size, nvgpu_safe_sub_u64(aperture_size, low_hole),
true, true,
false, false,
false, false,
@@ -162,8 +162,8 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
mm->bar1.aperture_size = U32(16) << 20U; mm->bar1.aperture_size = U32(16) << 20U;
mm->bar1.vm = nvgpu_vm_init(g, mm->bar1.vm = nvgpu_vm_init(g,
g->ops.mm.gmmu.get_default_big_page_size(), g->ops.mm.gmmu.get_default_big_page_size(),
SZ_4K, mm->bar1.aperture_size - SZ_4K, SZ_4K, 0ULL, nvgpu_safe_sub_u64(mm->bar1.aperture_size, SZ_4K),
mm->bar1.aperture_size, false, false, false, "bar1"); false, false, false, "bar1");
if (mm->bar1.vm == NULL) { if (mm->bar1.vm == NULL) {
unit_return_fail(m, "'bar1' nvgpu_vm_init failed\n"); unit_return_fail(m, "'bar1' nvgpu_vm_init failed\n");
} }

View File

@@ -144,6 +144,7 @@ int test_update_gmmu_pde3_locked(struct unit_module *m,
unit_assert(g->mm.pd_cache == NULL, goto done); unit_assert(g->mm.pd_cache == NULL, goto done);
vm.mm = &g->mm; vm.mm = &g->mm;
vm.mm->g = g;
err = nvgpu_pd_cache_init(g); err = nvgpu_pd_cache_init(g);
unit_assert(err == 0, goto done); unit_assert(err == 0, goto done);

View File

@@ -139,8 +139,8 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
mm->pmu.vm = nvgpu_vm_init(g, mm->pmu.vm = nvgpu_vm_init(g,
g->ops.mm.gmmu.get_default_big_page_size(), g->ops.mm.gmmu.get_default_big_page_size(),
low_hole, low_hole,
aperture_size - low_hole, 0ULL,
aperture_size, nvgpu_safe_sub_u64(aperture_size, low_hole),
true, true,
false, false,
false, false,

View File

@@ -134,8 +134,8 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
mm->pmu.vm = nvgpu_vm_init(g, mm->pmu.vm = nvgpu_vm_init(g,
g->ops.mm.gmmu.get_default_big_page_size(), g->ops.mm.gmmu.get_default_big_page_size(),
low_hole, low_hole,
aperture_size - low_hole, 0ULL,
aperture_size, nvgpu_safe_sub_u64(aperture_size, low_hole),
true, true,
false, false,
false, false,
@@ -148,8 +148,8 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
mm->bar2.aperture_size = U32(32) << 20U; mm->bar2.aperture_size = U32(32) << 20U;
mm->bar2.vm = nvgpu_vm_init(g, mm->bar2.vm = nvgpu_vm_init(g,
g->ops.mm.gmmu.get_default_big_page_size(), g->ops.mm.gmmu.get_default_big_page_size(),
SZ_4K, mm->bar2.aperture_size - SZ_4K, SZ_4K, 0ULL, nvgpu_safe_sub_u64(mm->bar2.aperture_size, SZ_4K),
mm->bar2.aperture_size, false, false, false, "bar2"); false, false, false, "bar2");
if (mm->bar2.vm == NULL) { if (mm->bar2.vm == NULL) {
unit_return_fail(m, "'bar2' nvgpu_vm_init failed\n"); unit_return_fail(m, "'bar2' nvgpu_vm_init failed\n");
} }

View File

@@ -173,8 +173,8 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
mm->pmu.vm = nvgpu_vm_init(g, mm->pmu.vm = nvgpu_vm_init(g,
g->ops.mm.gmmu.get_default_big_page_size(), g->ops.mm.gmmu.get_default_big_page_size(),
low_hole, low_hole,
aperture_size - low_hole, 0ULL,
aperture_size, nvgpu_safe_sub_u64(aperture_size, low_hole),
true, true,
false, false,
false, false,
@@ -187,8 +187,8 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
mm->bar2.aperture_size = U32(32) << 20U; mm->bar2.aperture_size = U32(32) << 20U;
mm->bar2.vm = nvgpu_vm_init(g, mm->bar2.vm = nvgpu_vm_init(g,
g->ops.mm.gmmu.get_default_big_page_size(), g->ops.mm.gmmu.get_default_big_page_size(),
SZ_4K, mm->bar2.aperture_size - SZ_4K, SZ_4K, 0ULL, nvgpu_safe_sub_u64(mm->bar2.aperture_size, SZ_4K),
mm->bar2.aperture_size, false, false, false, "bar2"); false, false, false, "bar2");
if (mm->bar2.vm == NULL) { if (mm->bar2.vm == NULL) {
unit_return_fail(m, "'bar2' nvgpu_vm_init failed\n"); unit_return_fail(m, "'bar2' nvgpu_vm_init failed\n");
} }

View File

@@ -228,12 +228,12 @@ int test_nvgpu_init_mm(struct unit_module *m, struct gk20a *g, void *args)
-ENOMEM, 4); -ENOMEM, 4);
/* Making nvgpu_init_system_vm fail on the PMU VM init */ /* Making nvgpu_init_system_vm fail on the PMU VM init */
errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_KMEM, 29, errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_KMEM, 10,
-ENOMEM, 5); -ENOMEM, 5);
/* Making nvgpu_init_system_vm fail again with extra branch coverage */ /* Making nvgpu_init_system_vm fail again with extra branch coverage */
g->ops.mm.init_bar2_vm = NULL; g->ops.mm.init_bar2_vm = NULL;
errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_KMEM, 20, errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_KMEM, 6,
-ENOMEM, 6); -ENOMEM, 6);
g->ops.mm.init_bar2_vm = gp10b_mm_init_bar2_vm; g->ops.mm.init_bar2_vm = gp10b_mm_init_bar2_vm;
@@ -246,7 +246,7 @@ int test_nvgpu_init_mm(struct unit_module *m, struct gk20a *g, void *args)
-ENOMEM, 8); -ENOMEM, 8);
/* Making nvgpu_init_engine_ucode_vm(sec2) fail on VM init */ /* Making nvgpu_init_engine_ucode_vm(sec2) fail on VM init */
errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_KMEM, 46, errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_KMEM, 15,
-ENOMEM, 9); -ENOMEM, 9);
/* Making nvgpu_init_engine_ucode_vm(sec2) fail on alloc_inst_block */ /* Making nvgpu_init_engine_ucode_vm(sec2) fail on alloc_inst_block */
@@ -258,11 +258,11 @@ int test_nvgpu_init_mm(struct unit_module *m, struct gk20a *g, void *args)
-ENOMEM, 11); -ENOMEM, 11);
/* Making nvgpu_init_cde_vm fail */ /* Making nvgpu_init_cde_vm fail */
errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_KMEM, 80, errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_KMEM, 25,
-ENOMEM, 12); -ENOMEM, 12);
/* Making nvgpu_init_ce_vm fail */ /* Making nvgpu_init_ce_vm fail */
errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_KMEM, 98, errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_KMEM, 33,
-ENOMEM, 13); -ENOMEM, 13);
/* Making nvgpu_init_mmu_debug fail on wr_mem DMA alloc */ /* Making nvgpu_init_mmu_debug fail on wr_mem DMA alloc */

View File

@@ -199,8 +199,8 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
mm->pmu.vm = nvgpu_vm_init(g, mm->pmu.vm = nvgpu_vm_init(g,
g->ops.mm.gmmu.get_default_big_page_size(), g->ops.mm.gmmu.get_default_big_page_size(),
low_hole, low_hole,
aperture_size - low_hole, 0ULL,
aperture_size, nvgpu_safe_sub_u64(aperture_size, low_hole),
true, true,
false, false,
false, false,
@@ -213,8 +213,8 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
mm->bar2.aperture_size = U32(32) << 20U; mm->bar2.aperture_size = U32(32) << 20U;
mm->bar2.vm = nvgpu_vm_init(g, mm->bar2.vm = nvgpu_vm_init(g,
g->ops.mm.gmmu.get_default_big_page_size(), g->ops.mm.gmmu.get_default_big_page_size(),
SZ_4K, mm->bar2.aperture_size - SZ_4K, SZ_4K, 0ULL, nvgpu_safe_sub_u64(mm->bar2.aperture_size, SZ_4K),
mm->bar2.aperture_size, false, false, false, "bar2"); false, false, false, "bar2");
if (mm->bar2.vm == NULL) { if (mm->bar2.vm == NULL) {
unit_return_fail(m, "'bar2' nvgpu_vm_init failed\n"); unit_return_fail(m, "'bar2' nvgpu_vm_init failed\n");
} }

View File

@@ -221,8 +221,8 @@ static struct vm_gk20a *create_test_vm(struct unit_module *m, struct gk20a *g)
vm = nvgpu_vm_init(g, vm = nvgpu_vm_init(g,
g->ops.mm.gmmu.get_default_big_page_size(), g->ops.mm.gmmu.get_default_big_page_size(),
low_hole, low_hole,
user_vma,
kernel_reserved, kernel_reserved,
aperture_size,
true, true,
false, false,
true, true,
@@ -451,7 +451,7 @@ int test_map_buffer_error_cases(struct unit_module *m, struct gk20a *g,
#endif #endif
/* Make g->ops.mm.gmmu.map fail */ /* Make g->ops.mm.gmmu.map fail */
nvgpu_posix_enable_fault_injection(kmem_fi, true, 40); nvgpu_posix_enable_fault_injection(kmem_fi, true, 20);
ret = nvgpu_vm_map(vm, ret = nvgpu_vm_map(vm,
&os_buf, &os_buf,
sgt, sgt,
@@ -895,6 +895,7 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args)
u64 low_hole = 0; u64 low_hole = 0;
u64 kernel_reserved = 0; u64 kernel_reserved = 0;
u64 aperture_size = 0; u64 aperture_size = 0;
u64 user_vma = 0;
bool big_pages = true; bool big_pages = true;
struct nvgpu_posix_fault_inj *kmem_fi = struct nvgpu_posix_fault_inj *kmem_fi =
nvgpu_kmem_get_fault_injection(); nvgpu_kmem_get_fault_injection();
@@ -910,14 +911,15 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args)
low_hole = SZ_1M * 64; low_hole = SZ_1M * 64;
aperture_size = 128 * SZ_1G; aperture_size = 128 * SZ_1G;
kernel_reserved = 4 * SZ_1G - low_hole; kernel_reserved = 4 * SZ_1G - low_hole;
user_vma = aperture_size - low_hole - kernel_reserved;
/* Error injection to make the allocation for struct vm_gk20a to fail */ /* Error injection to make the allocation for struct vm_gk20a to fail */
nvgpu_posix_enable_fault_injection(kmem_fi, true, 0); nvgpu_posix_enable_fault_injection(kmem_fi, true, 0);
vm = nvgpu_vm_init(g, vm = nvgpu_vm_init(g,
g->ops.mm.gmmu.get_default_big_page_size(), g->ops.mm.gmmu.get_default_big_page_size(),
low_hole, low_hole,
user_vma,
kernel_reserved, kernel_reserved,
aperture_size,
big_pages, big_pages,
false, false,
true, true,
@@ -937,8 +939,8 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args)
nvgpu_vm_init(g, nvgpu_vm_init(g,
g->ops.mm.gmmu.get_default_big_page_size(), g->ops.mm.gmmu.get_default_big_page_size(),
low_hole, low_hole,
kernel_reserved, user_vma,
0, /* invalid aperture size */ NV_MM_DEFAULT_APERTURE_SIZE, /* invalid aperture size */
big_pages, big_pages,
false, false,
true, true,
@@ -955,7 +957,7 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args)
if (!EXPECT_BUG( if (!EXPECT_BUG(
nvgpu_vm_do_init(&g->mm, vm, nvgpu_vm_do_init(&g->mm, vm,
g->ops.mm.gmmu.get_default_big_page_size(), g->ops.mm.gmmu.get_default_big_page_size(),
low_hole, kernel_reserved, aperture_size, low_hole, user_vma, kernel_reserved,
big_pages, false, true, __func__) big_pages, false, true, __func__)
)) { )) {
unit_err(m, "BUG() was not called but it was expected (3).\n"); unit_err(m, "BUG() was not called but it was expected (3).\n");
@@ -968,7 +970,7 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args)
g->is_virtual = true; g->is_virtual = true;
ret = nvgpu_vm_do_init(&g->mm, vm, ret = nvgpu_vm_do_init(&g->mm, vm,
g->ops.mm.gmmu.get_default_big_page_size(), g->ops.mm.gmmu.get_default_big_page_size(),
low_hole, kernel_reserved, aperture_size, low_hole, user_vma, kernel_reserved,
big_pages, true, true, __func__); big_pages, true, true, __func__);
g->is_virtual = false; g->is_virtual = false;
if (ret != -ENOSYS) { if (ret != -ENOSYS) {
@@ -981,7 +983,7 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args)
g->ops.mm.vm_as_alloc_share = hal_vm_as_alloc_share_error; g->ops.mm.vm_as_alloc_share = hal_vm_as_alloc_share_error;
ret = nvgpu_vm_do_init(&g->mm, vm, ret = nvgpu_vm_do_init(&g->mm, vm,
g->ops.mm.gmmu.get_default_big_page_size(), g->ops.mm.gmmu.get_default_big_page_size(),
low_hole, kernel_reserved, aperture_size, low_hole, user_vma, kernel_reserved,
big_pages, true, true, __func__); big_pages, true, true, __func__);
g->ops.mm.vm_as_alloc_share = hal_vm_as_alloc_share_success; g->ops.mm.vm_as_alloc_share = hal_vm_as_alloc_share_success;
if (ret != -1) { if (ret != -1) {
@@ -995,7 +997,7 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args)
if (!EXPECT_BUG( if (!EXPECT_BUG(
nvgpu_vm_do_init(&g->mm, vm, nvgpu_vm_do_init(&g->mm, vm,
g->ops.mm.gmmu.get_default_big_page_size(), g->ops.mm.gmmu.get_default_big_page_size(),
low_hole, kernel_reserved, aperture_size, low_hole, user_vma, kernel_reserved,
big_pages, false, false, __func__) big_pages, false, false, __func__)
)) { )) {
unit_err(m, "BUG() was not called but it was expected (6).\n"); unit_err(m, "BUG() was not called but it was expected (6).\n");
@@ -1008,7 +1010,7 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args)
nvgpu_posix_enable_fault_injection(kmem_fi, true, 0); nvgpu_posix_enable_fault_injection(kmem_fi, true, 0);
ret = nvgpu_vm_do_init(&g->mm, vm, ret = nvgpu_vm_do_init(&g->mm, vm,
g->ops.mm.gmmu.get_default_big_page_size(), g->ops.mm.gmmu.get_default_big_page_size(),
low_hole, kernel_reserved, aperture_size, low_hole, user_vma, kernel_reserved,
big_pages, false, true, __func__); big_pages, false, true, __func__);
nvgpu_posix_enable_fault_injection(kmem_fi, false, 0); nvgpu_posix_enable_fault_injection(kmem_fi, false, 0);
if (ret != -ENOMEM) { if (ret != -ENOMEM) {
@@ -1021,7 +1023,7 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args)
nvgpu_posix_enable_fault_injection(kmem_fi, true, 5); nvgpu_posix_enable_fault_injection(kmem_fi, true, 5);
ret = nvgpu_vm_do_init(&g->mm, vm, ret = nvgpu_vm_do_init(&g->mm, vm,
g->ops.mm.gmmu.get_default_big_page_size(), g->ops.mm.gmmu.get_default_big_page_size(),
low_hole, kernel_reserved, aperture_size, low_hole, user_vma, kernel_reserved,
big_pages, false, true, __func__); big_pages, false, true, __func__);
nvgpu_posix_enable_fault_injection(kmem_fi, false, 0); nvgpu_posix_enable_fault_injection(kmem_fi, false, 0);
if (ret != -ENOMEM) { if (ret != -ENOMEM) {
@@ -1034,7 +1036,7 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args)
nvgpu_posix_enable_fault_injection(kmem_fi, true, 12); nvgpu_posix_enable_fault_injection(kmem_fi, true, 12);
ret = nvgpu_vm_do_init(&g->mm, vm, ret = nvgpu_vm_do_init(&g->mm, vm,
g->ops.mm.gmmu.get_default_big_page_size(), g->ops.mm.gmmu.get_default_big_page_size(),
low_hole, kernel_reserved, aperture_size, low_hole, user_vma, kernel_reserved,
big_pages, false, false, __func__); big_pages, false, false, __func__);
nvgpu_posix_enable_fault_injection(kmem_fi, false, 0); nvgpu_posix_enable_fault_injection(kmem_fi, false, 0);
if (ret != -ENOMEM) { if (ret != -ENOMEM) {
@@ -1047,7 +1049,7 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args)
nvgpu_posix_enable_fault_injection(kmem_fi, true, 17); nvgpu_posix_enable_fault_injection(kmem_fi, true, 17);
ret = nvgpu_vm_do_init(&g->mm, vm, ret = nvgpu_vm_do_init(&g->mm, vm,
g->ops.mm.gmmu.get_default_big_page_size(), g->ops.mm.gmmu.get_default_big_page_size(),
low_hole, kernel_reserved, aperture_size, low_hole, user_vma, kernel_reserved,
big_pages, false, false, __func__); big_pages, false, false, __func__);
nvgpu_posix_enable_fault_injection(kmem_fi, false, 0); nvgpu_posix_enable_fault_injection(kmem_fi, false, 0);
if (ret != -ENOMEM) { if (ret != -ENOMEM) {
@@ -1061,7 +1063,7 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args)
ret = nvgpu_vm_do_init(&g->mm, vm, ret = nvgpu_vm_do_init(&g->mm, vm,
g->ops.mm.gmmu.get_default_big_page_size(), g->ops.mm.gmmu.get_default_big_page_size(),
nvgpu_gmmu_va_small_page_limit(), nvgpu_gmmu_va_small_page_limit(),
0, aperture_size, big_pages, false, false, ((u64)SZ_1G * 200U), 0, big_pages, false, false,
__func__); __func__);
vm->guest_managed = false; vm->guest_managed = false;
if (ret != -EINVAL) { if (ret != -EINVAL) {
@@ -1073,7 +1075,7 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args)
/* Cause nvgpu_vm_init_vma_allocators to fail for long vm name */ /* Cause nvgpu_vm_init_vma_allocators to fail for long vm name */
ret = nvgpu_vm_do_init(&g->mm, vm, ret = nvgpu_vm_do_init(&g->mm, vm,
g->ops.mm.gmmu.get_default_big_page_size(), g->ops.mm.gmmu.get_default_big_page_size(),
low_hole, kernel_reserved, aperture_size, low_hole, user_vma, kernel_reserved,
big_pages, false, false, big_pages, false, false,
"very_long_vm_name_to_fail_vm_init"); "very_long_vm_name_to_fail_vm_init");
if (ret != -EINVAL) { if (ret != -EINVAL) {
@@ -1085,7 +1087,7 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args)
/* Success with big pages and not unified VA */ /* Success with big pages and not unified VA */
ret = nvgpu_vm_do_init(&g->mm, vm, ret = nvgpu_vm_do_init(&g->mm, vm,
g->ops.mm.gmmu.get_default_big_page_size(), g->ops.mm.gmmu.get_default_big_page_size(),
low_hole, kernel_reserved, aperture_size, low_hole, user_vma, kernel_reserved,
big_pages, false, false, __func__); big_pages, false, false, __func__);
if (ret != 0) { if (ret != 0) {
unit_err(m, "nvgpu_vm_do_init did not succeed as expected (B).\n"); unit_err(m, "nvgpu_vm_do_init did not succeed as expected (B).\n");
@@ -1096,7 +1098,7 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args)
/* Success with big pages disabled */ /* Success with big pages disabled */
ret = nvgpu_vm_do_init(&g->mm, vm, ret = nvgpu_vm_do_init(&g->mm, vm,
g->ops.mm.gmmu.get_default_big_page_size(), g->ops.mm.gmmu.get_default_big_page_size(),
low_hole, kernel_reserved, aperture_size, low_hole, user_vma, kernel_reserved,
false, false, false, __func__); false, false, false, __func__);
if (ret != 0) { if (ret != 0) {
unit_err(m, "nvgpu_vm_do_init did not succeed as expected (C).\n"); unit_err(m, "nvgpu_vm_do_init did not succeed as expected (C).\n");
@@ -1108,7 +1110,7 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args)
ret = nvgpu_vm_do_init(&g->mm, vm, ret = nvgpu_vm_do_init(&g->mm, vm,
g->ops.mm.gmmu.get_default_big_page_size(), g->ops.mm.gmmu.get_default_big_page_size(),
nvgpu_gmmu_va_small_page_limit(), nvgpu_gmmu_va_small_page_limit(),
kernel_reserved, aperture_size, big_pages, 0ULL, kernel_reserved, big_pages,
false, false, __func__); false, false, __func__);
if (ret != 0) { if (ret != 0) {
unit_err(m, "nvgpu_vm_do_init did not succeed as expected (D).\n"); unit_err(m, "nvgpu_vm_do_init did not succeed as expected (D).\n");
@@ -1192,8 +1194,8 @@ int test_map_buf(struct unit_module *m, struct gk20a *g, void *__args)
vm = nvgpu_vm_init(g, vm = nvgpu_vm_init(g,
g->ops.mm.gmmu.get_default_big_page_size(), g->ops.mm.gmmu.get_default_big_page_size(),
low_hole, low_hole,
user_vma,
kernel_reserved, kernel_reserved,
aperture_size,
big_pages, big_pages,
false, false,
true, true,
@@ -1429,7 +1431,6 @@ int test_map_buf_gpu_va(struct unit_module *m,
aperture_size = 128 * SZ_1G; aperture_size = 128 * SZ_1G;
kernel_reserved = 4 * SZ_1G - low_hole; kernel_reserved = 4 * SZ_1G - low_hole;
user_vma = aperture_size - low_hole - kernel_reserved; user_vma = aperture_size - low_hole - kernel_reserved;
user_vma_limit = aperture_size - kernel_reserved;
unit_info(m, "Initializing VM:\n"); unit_info(m, "Initializing VM:\n");
unit_info(m, " - Low Hole Size = 0x%llx\n", low_hole); unit_info(m, " - Low Hole Size = 0x%llx\n", low_hole);
unit_info(m, " - User Aperture Size = 0x%llx\n", user_vma); unit_info(m, " - User Aperture Size = 0x%llx\n", user_vma);
@@ -1438,8 +1439,8 @@ int test_map_buf_gpu_va(struct unit_module *m,
vm = nvgpu_vm_init(g, vm = nvgpu_vm_init(g,
g->ops.mm.gmmu.get_default_big_page_size(), g->ops.mm.gmmu.get_default_big_page_size(),
low_hole, low_hole,
user_vma,
kernel_reserved, kernel_reserved,
aperture_size,
big_pages, big_pages,
false, false,
true, true,
@@ -1458,7 +1459,9 @@ int test_map_buf_gpu_va(struct unit_module *m,
* Calculate a valid base GPU VA for the buffer. We're multiplying * Calculate a valid base GPU VA for the buffer. We're multiplying
* buf_size by 10 just to be on the safe side. * buf_size by 10 just to be on the safe side.
*/ */
user_vma_limit = nvgpu_alloc_end(&vm->user);
gpu_va = user_vma_limit - buf_size*10; gpu_va = user_vma_limit - buf_size*10;
unit_info(m, " - user_vma_limit = 0x%llx\n", user_vma_limit);
unit_info(m, "Mapping Buffer:\n"); unit_info(m, "Mapping Buffer:\n");
unit_info(m, " - CPU PA = 0x%llx\n", BUF_CPU_PA); unit_info(m, " - CPU PA = 0x%llx\n", BUF_CPU_PA);
unit_info(m, " - GPU VA = 0x%llx\n", gpu_va); unit_info(m, " - GPU VA = 0x%llx\n", gpu_va);
@@ -1708,8 +1711,8 @@ int test_batch(struct unit_module *m, struct gk20a *g, void *__args)
vm = nvgpu_vm_init(g, vm = nvgpu_vm_init(g,
g->ops.mm.gmmu.get_default_big_page_size(), g->ops.mm.gmmu.get_default_big_page_size(),
low_hole, low_hole,
user_vma,
kernel_reserved, kernel_reserved,
aperture_size,
big_pages, big_pages,
false, false,
true, true,
@@ -1881,7 +1884,7 @@ int test_vm_area_error_cases(struct unit_module *m, struct gk20a *g,
} }
/* Failure: Dynamic allocation in nvgpu_vm_area_alloc_gmmu_map fails */ /* Failure: Dynamic allocation in nvgpu_vm_area_alloc_gmmu_map fails */
nvgpu_posix_enable_fault_injection(kmem_fi, true, 33); nvgpu_posix_enable_fault_injection(kmem_fi, true, 25);
ret = nvgpu_vm_area_alloc(vm, 10, SZ_4K, &gpu_va, ret = nvgpu_vm_area_alloc(vm, 10, SZ_4K, &gpu_va,
NVGPU_VM_AREA_ALLOC_SPARSE); NVGPU_VM_AREA_ALLOC_SPARSE);
nvgpu_posix_enable_fault_injection(kmem_fi, false, 0); nvgpu_posix_enable_fault_injection(kmem_fi, false, 0);

View File

@@ -80,15 +80,14 @@ static int init_channel_vm(struct unit_module *m, struct nvgpu_channel *ch)
aperture_size = GK20A_PMU_VA_SIZE; aperture_size = GK20A_PMU_VA_SIZE;
mm->pmu.aperture_size = GK20A_PMU_VA_SIZE; mm->pmu.aperture_size = GK20A_PMU_VA_SIZE;
mm->channel.user_size = NV_MM_DEFAULT_USER_SIZE - mm->channel.user_size = NV_MM_DEFAULT_USER_SIZE;
NV_MM_DEFAULT_KERNEL_SIZE;
mm->channel.kernel_size = NV_MM_DEFAULT_KERNEL_SIZE; mm->channel.kernel_size = NV_MM_DEFAULT_KERNEL_SIZE;
mm->pmu.vm = nvgpu_vm_init(g, mm->pmu.vm = nvgpu_vm_init(g,
g->ops.mm.gmmu.get_default_big_page_size(), g->ops.mm.gmmu.get_default_big_page_size(),
low_hole, low_hole,
aperture_size - low_hole, 0ULL,
aperture_size, nvgpu_safe_sub_u64(aperture_size, low_hole),
true, true,
false, false,
false, false,
@@ -486,7 +485,7 @@ int test_sync_create_fail(struct unit_module *m, struct gk20a *g, void *args)
* consequtive calls to kmalloc * consequtive calls to kmalloc
*/ */
ch->vm->syncpt_ro_map_gpu_va = 1ULL; ch->vm->syncpt_ro_map_gpu_va = 1ULL;
nvgpu_posix_enable_fault_injection(kmem_fi, true, 3); nvgpu_posix_enable_fault_injection(kmem_fi, true, 2);
fault_injection_enabled = true; fault_injection_enabled = true;
} else { } else {
continue; continue;