mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 10:34:43 +03:00
gpu: nvgpu: accept user vma size in vm init
Modify nvgpu_vm_init to accept low_hole, user_reserved and kernel_reserved. This will simplify argument limit checks and make code more legible. JIRA NVGPU-5302 Change-Id: I62773dd7b06264a3b6cb8896239b24c49fa69f9b Signed-off-by: Vedashree Vidwans <vvidwans@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2394901 Reviewed-by: automaticguardword <automaticguardword@nvidia.com> Reviewed-by: Automatic_Commit_Validation_User Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: Alex Waterman <alexw@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> GVS: Gerrit_Virtual_Submit Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
Alex Waterman
parent
aef3367ca5
commit
49c9f0c137
@@ -100,8 +100,8 @@ static struct vm_gk20a *init_vm_env(struct unit_module *m, struct gk20a *g,
|
||||
/* Init vm with big_pages disabled */
|
||||
test_vm = nvgpu_vm_init(g, g->ops.mm.gmmu.get_default_big_page_size(),
|
||||
low_hole,
|
||||
aperture_size - low_hole,
|
||||
aperture_size,
|
||||
0ULL,
|
||||
nvgpu_safe_sub_u64(aperture_size, low_hole),
|
||||
big_pages,
|
||||
false,
|
||||
false,
|
||||
|
||||
@@ -188,16 +188,15 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
|
||||
low_hole = SZ_4K * 16UL;
|
||||
aperture_size = GK20A_PMU_VA_SIZE;
|
||||
mm->pmu.aperture_size = GK20A_PMU_VA_SIZE;
|
||||
mm->channel.user_size = NV_MM_DEFAULT_USER_SIZE -
|
||||
NV_MM_DEFAULT_KERNEL_SIZE;
|
||||
mm->channel.user_size = NV_MM_DEFAULT_USER_SIZE;
|
||||
mm->channel.kernel_size = NV_MM_DEFAULT_KERNEL_SIZE;
|
||||
|
||||
|
||||
mm->pmu.vm = nvgpu_vm_init(g,
|
||||
g->ops.mm.gmmu.get_default_big_page_size(),
|
||||
low_hole,
|
||||
aperture_size - low_hole,
|
||||
aperture_size,
|
||||
0ULL,
|
||||
nvgpu_safe_sub_u64(aperture_size, low_hole),
|
||||
true,
|
||||
false,
|
||||
false,
|
||||
@@ -538,7 +537,7 @@ int test_mm_dma_alloc_map_fault_injection(struct unit_module *m,
|
||||
* nvgpu_dma_alloc_flags_sys function
|
||||
*/
|
||||
kmem_fi = nvgpu_kmem_get_fault_injection();
|
||||
nvgpu_posix_enable_fault_injection(kmem_fi, true, 5);
|
||||
nvgpu_posix_enable_fault_injection(kmem_fi, true, 1);
|
||||
|
||||
err = nvgpu_dma_alloc_map(g->mm.pmu.vm, SZ_4K, mem);
|
||||
if (err == 0) {
|
||||
|
||||
@@ -347,16 +347,15 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
|
||||
low_hole = SZ_4K * 16UL;
|
||||
aperture_size = GK20A_PMU_VA_SIZE;
|
||||
mm->pmu.aperture_size = GK20A_PMU_VA_SIZE;
|
||||
mm->channel.user_size = NV_MM_DEFAULT_USER_SIZE -
|
||||
NV_MM_DEFAULT_KERNEL_SIZE;
|
||||
mm->channel.user_size = NV_MM_DEFAULT_USER_SIZE;
|
||||
mm->channel.kernel_size = NV_MM_DEFAULT_KERNEL_SIZE;
|
||||
|
||||
|
||||
mm->pmu.vm = nvgpu_vm_init(g,
|
||||
g->ops.mm.gmmu.get_default_big_page_size(),
|
||||
low_hole,
|
||||
aperture_size - low_hole,
|
||||
aperture_size,
|
||||
0ULL,
|
||||
nvgpu_safe_sub_u64(aperture_size, low_hole),
|
||||
true,
|
||||
false,
|
||||
false,
|
||||
@@ -953,7 +952,7 @@ static int check_pte_invalidated(struct unit_module *m, struct gk20a *g,
|
||||
/* Create a VM based on requirements described in NVGPU-RQCD-45 */
|
||||
static struct vm_gk20a *init_test_req_vm(struct gk20a *g)
|
||||
{
|
||||
u64 low_hole, aperture_size, kernel_reserved;
|
||||
u64 low_hole, aperture_size, kernel_reserved, user_reserved;
|
||||
bool big_pages;
|
||||
|
||||
/* Init some common attributes */
|
||||
@@ -971,10 +970,12 @@ static struct vm_gk20a *init_test_req_vm(struct gk20a *g)
|
||||
aperture_size = 128 * SZ_1G;
|
||||
/* 1.4. Have a 4GB kernel reserved space */
|
||||
kernel_reserved = 4 * SZ_1G;
|
||||
/* 1.5. User reserved space */
|
||||
user_reserved = aperture_size - kernel_reserved - low_hole;
|
||||
|
||||
return nvgpu_vm_init(g, g->ops.mm.gmmu.get_default_big_page_size(),
|
||||
low_hole, kernel_reserved,
|
||||
aperture_size, big_pages, true, true, "testmem");
|
||||
low_hole, user_reserved, kernel_reserved,
|
||||
big_pages, true, true, "testmem");
|
||||
}
|
||||
|
||||
int test_nvgpu_page_table_c1_full(struct unit_module *m, struct gk20a *g,
|
||||
|
||||
@@ -159,8 +159,8 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
|
||||
mm->pmu.vm = nvgpu_vm_init(g,
|
||||
g->ops.mm.gmmu.get_default_big_page_size(),
|
||||
low_hole,
|
||||
aperture_size - low_hole,
|
||||
aperture_size,
|
||||
0ULL,
|
||||
nvgpu_safe_sub_u64(aperture_size, low_hole),
|
||||
true,
|
||||
false,
|
||||
false,
|
||||
|
||||
@@ -148,8 +148,8 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
|
||||
mm->pmu.vm = nvgpu_vm_init(g,
|
||||
g->ops.mm.gmmu.get_default_big_page_size(),
|
||||
low_hole,
|
||||
aperture_size - low_hole,
|
||||
aperture_size,
|
||||
0ULL,
|
||||
nvgpu_safe_sub_u64(aperture_size, low_hole),
|
||||
true,
|
||||
false,
|
||||
false,
|
||||
@@ -162,8 +162,8 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
|
||||
mm->bar1.aperture_size = U32(16) << 20U;
|
||||
mm->bar1.vm = nvgpu_vm_init(g,
|
||||
g->ops.mm.gmmu.get_default_big_page_size(),
|
||||
SZ_4K, mm->bar1.aperture_size - SZ_4K,
|
||||
mm->bar1.aperture_size, false, false, false, "bar1");
|
||||
SZ_4K, 0ULL, nvgpu_safe_sub_u64(mm->bar1.aperture_size, SZ_4K),
|
||||
false, false, false, "bar1");
|
||||
if (mm->bar1.vm == NULL) {
|
||||
unit_return_fail(m, "'bar1' nvgpu_vm_init failed\n");
|
||||
}
|
||||
|
||||
@@ -144,6 +144,7 @@ int test_update_gmmu_pde3_locked(struct unit_module *m,
|
||||
unit_assert(g->mm.pd_cache == NULL, goto done);
|
||||
|
||||
vm.mm = &g->mm;
|
||||
vm.mm->g = g;
|
||||
err = nvgpu_pd_cache_init(g);
|
||||
unit_assert(err == 0, goto done);
|
||||
|
||||
|
||||
@@ -139,8 +139,8 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
|
||||
mm->pmu.vm = nvgpu_vm_init(g,
|
||||
g->ops.mm.gmmu.get_default_big_page_size(),
|
||||
low_hole,
|
||||
aperture_size - low_hole,
|
||||
aperture_size,
|
||||
0ULL,
|
||||
nvgpu_safe_sub_u64(aperture_size, low_hole),
|
||||
true,
|
||||
false,
|
||||
false,
|
||||
|
||||
@@ -134,8 +134,8 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
|
||||
mm->pmu.vm = nvgpu_vm_init(g,
|
||||
g->ops.mm.gmmu.get_default_big_page_size(),
|
||||
low_hole,
|
||||
aperture_size - low_hole,
|
||||
aperture_size,
|
||||
0ULL,
|
||||
nvgpu_safe_sub_u64(aperture_size, low_hole),
|
||||
true,
|
||||
false,
|
||||
false,
|
||||
@@ -148,8 +148,8 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
|
||||
mm->bar2.aperture_size = U32(32) << 20U;
|
||||
mm->bar2.vm = nvgpu_vm_init(g,
|
||||
g->ops.mm.gmmu.get_default_big_page_size(),
|
||||
SZ_4K, mm->bar2.aperture_size - SZ_4K,
|
||||
mm->bar2.aperture_size, false, false, false, "bar2");
|
||||
SZ_4K, 0ULL, nvgpu_safe_sub_u64(mm->bar2.aperture_size, SZ_4K),
|
||||
false, false, false, "bar2");
|
||||
if (mm->bar2.vm == NULL) {
|
||||
unit_return_fail(m, "'bar2' nvgpu_vm_init failed\n");
|
||||
}
|
||||
|
||||
@@ -173,8 +173,8 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
|
||||
mm->pmu.vm = nvgpu_vm_init(g,
|
||||
g->ops.mm.gmmu.get_default_big_page_size(),
|
||||
low_hole,
|
||||
aperture_size - low_hole,
|
||||
aperture_size,
|
||||
0ULL,
|
||||
nvgpu_safe_sub_u64(aperture_size, low_hole),
|
||||
true,
|
||||
false,
|
||||
false,
|
||||
@@ -187,8 +187,8 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
|
||||
mm->bar2.aperture_size = U32(32) << 20U;
|
||||
mm->bar2.vm = nvgpu_vm_init(g,
|
||||
g->ops.mm.gmmu.get_default_big_page_size(),
|
||||
SZ_4K, mm->bar2.aperture_size - SZ_4K,
|
||||
mm->bar2.aperture_size, false, false, false, "bar2");
|
||||
SZ_4K, 0ULL, nvgpu_safe_sub_u64(mm->bar2.aperture_size, SZ_4K),
|
||||
false, false, false, "bar2");
|
||||
if (mm->bar2.vm == NULL) {
|
||||
unit_return_fail(m, "'bar2' nvgpu_vm_init failed\n");
|
||||
}
|
||||
|
||||
@@ -228,12 +228,12 @@ int test_nvgpu_init_mm(struct unit_module *m, struct gk20a *g, void *args)
|
||||
-ENOMEM, 4);
|
||||
|
||||
/* Making nvgpu_init_system_vm fail on the PMU VM init */
|
||||
errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_KMEM, 29,
|
||||
errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_KMEM, 10,
|
||||
-ENOMEM, 5);
|
||||
|
||||
/* Making nvgpu_init_system_vm fail again with extra branch coverage */
|
||||
g->ops.mm.init_bar2_vm = NULL;
|
||||
errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_KMEM, 20,
|
||||
errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_KMEM, 6,
|
||||
-ENOMEM, 6);
|
||||
g->ops.mm.init_bar2_vm = gp10b_mm_init_bar2_vm;
|
||||
|
||||
@@ -246,7 +246,7 @@ int test_nvgpu_init_mm(struct unit_module *m, struct gk20a *g, void *args)
|
||||
-ENOMEM, 8);
|
||||
|
||||
/* Making nvgpu_init_engine_ucode_vm(sec2) fail on VM init */
|
||||
errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_KMEM, 46,
|
||||
errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_KMEM, 15,
|
||||
-ENOMEM, 9);
|
||||
|
||||
/* Making nvgpu_init_engine_ucode_vm(sec2) fail on alloc_inst_block */
|
||||
@@ -258,11 +258,11 @@ int test_nvgpu_init_mm(struct unit_module *m, struct gk20a *g, void *args)
|
||||
-ENOMEM, 11);
|
||||
|
||||
/* Making nvgpu_init_cde_vm fail */
|
||||
errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_KMEM, 80,
|
||||
errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_KMEM, 25,
|
||||
-ENOMEM, 12);
|
||||
|
||||
/* Making nvgpu_init_ce_vm fail */
|
||||
errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_KMEM, 98,
|
||||
errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_KMEM, 33,
|
||||
-ENOMEM, 13);
|
||||
|
||||
/* Making nvgpu_init_mmu_debug fail on wr_mem DMA alloc */
|
||||
|
||||
@@ -199,8 +199,8 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
|
||||
mm->pmu.vm = nvgpu_vm_init(g,
|
||||
g->ops.mm.gmmu.get_default_big_page_size(),
|
||||
low_hole,
|
||||
aperture_size - low_hole,
|
||||
aperture_size,
|
||||
0ULL,
|
||||
nvgpu_safe_sub_u64(aperture_size, low_hole),
|
||||
true,
|
||||
false,
|
||||
false,
|
||||
@@ -213,8 +213,8 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
|
||||
mm->bar2.aperture_size = U32(32) << 20U;
|
||||
mm->bar2.vm = nvgpu_vm_init(g,
|
||||
g->ops.mm.gmmu.get_default_big_page_size(),
|
||||
SZ_4K, mm->bar2.aperture_size - SZ_4K,
|
||||
mm->bar2.aperture_size, false, false, false, "bar2");
|
||||
SZ_4K, 0ULL, nvgpu_safe_sub_u64(mm->bar2.aperture_size, SZ_4K),
|
||||
false, false, false, "bar2");
|
||||
if (mm->bar2.vm == NULL) {
|
||||
unit_return_fail(m, "'bar2' nvgpu_vm_init failed\n");
|
||||
}
|
||||
|
||||
@@ -221,8 +221,8 @@ static struct vm_gk20a *create_test_vm(struct unit_module *m, struct gk20a *g)
|
||||
vm = nvgpu_vm_init(g,
|
||||
g->ops.mm.gmmu.get_default_big_page_size(),
|
||||
low_hole,
|
||||
user_vma,
|
||||
kernel_reserved,
|
||||
aperture_size,
|
||||
true,
|
||||
false,
|
||||
true,
|
||||
@@ -451,7 +451,7 @@ int test_map_buffer_error_cases(struct unit_module *m, struct gk20a *g,
|
||||
#endif
|
||||
|
||||
/* Make g->ops.mm.gmmu.map fail */
|
||||
nvgpu_posix_enable_fault_injection(kmem_fi, true, 40);
|
||||
nvgpu_posix_enable_fault_injection(kmem_fi, true, 20);
|
||||
ret = nvgpu_vm_map(vm,
|
||||
&os_buf,
|
||||
sgt,
|
||||
@@ -895,6 +895,7 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args)
|
||||
u64 low_hole = 0;
|
||||
u64 kernel_reserved = 0;
|
||||
u64 aperture_size = 0;
|
||||
u64 user_vma = 0;
|
||||
bool big_pages = true;
|
||||
struct nvgpu_posix_fault_inj *kmem_fi =
|
||||
nvgpu_kmem_get_fault_injection();
|
||||
@@ -910,14 +911,15 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args)
|
||||
low_hole = SZ_1M * 64;
|
||||
aperture_size = 128 * SZ_1G;
|
||||
kernel_reserved = 4 * SZ_1G - low_hole;
|
||||
user_vma = aperture_size - low_hole - kernel_reserved;
|
||||
|
||||
/* Error injection to make the allocation for struct vm_gk20a to fail */
|
||||
nvgpu_posix_enable_fault_injection(kmem_fi, true, 0);
|
||||
vm = nvgpu_vm_init(g,
|
||||
g->ops.mm.gmmu.get_default_big_page_size(),
|
||||
low_hole,
|
||||
user_vma,
|
||||
kernel_reserved,
|
||||
aperture_size,
|
||||
big_pages,
|
||||
false,
|
||||
true,
|
||||
@@ -935,14 +937,14 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args)
|
||||
*/
|
||||
if (!EXPECT_BUG(
|
||||
nvgpu_vm_init(g,
|
||||
g->ops.mm.gmmu.get_default_big_page_size(),
|
||||
low_hole,
|
||||
kernel_reserved,
|
||||
0, /* invalid aperture size */
|
||||
big_pages,
|
||||
false,
|
||||
true,
|
||||
__func__)
|
||||
g->ops.mm.gmmu.get_default_big_page_size(),
|
||||
low_hole,
|
||||
user_vma,
|
||||
NV_MM_DEFAULT_APERTURE_SIZE, /* invalid aperture size */
|
||||
big_pages,
|
||||
false,
|
||||
true,
|
||||
__func__)
|
||||
)) {
|
||||
unit_err(m, "BUG() was not called but it was expected (2).\n");
|
||||
ret = UNIT_FAIL;
|
||||
@@ -955,7 +957,7 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args)
|
||||
if (!EXPECT_BUG(
|
||||
nvgpu_vm_do_init(&g->mm, vm,
|
||||
g->ops.mm.gmmu.get_default_big_page_size(),
|
||||
low_hole, kernel_reserved, aperture_size,
|
||||
low_hole, user_vma, kernel_reserved,
|
||||
big_pages, false, true, __func__)
|
||||
)) {
|
||||
unit_err(m, "BUG() was not called but it was expected (3).\n");
|
||||
@@ -968,7 +970,7 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args)
|
||||
g->is_virtual = true;
|
||||
ret = nvgpu_vm_do_init(&g->mm, vm,
|
||||
g->ops.mm.gmmu.get_default_big_page_size(),
|
||||
low_hole, kernel_reserved, aperture_size,
|
||||
low_hole, user_vma, kernel_reserved,
|
||||
big_pages, true, true, __func__);
|
||||
g->is_virtual = false;
|
||||
if (ret != -ENOSYS) {
|
||||
@@ -981,7 +983,7 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args)
|
||||
g->ops.mm.vm_as_alloc_share = hal_vm_as_alloc_share_error;
|
||||
ret = nvgpu_vm_do_init(&g->mm, vm,
|
||||
g->ops.mm.gmmu.get_default_big_page_size(),
|
||||
low_hole, kernel_reserved, aperture_size,
|
||||
low_hole, user_vma, kernel_reserved,
|
||||
big_pages, true, true, __func__);
|
||||
g->ops.mm.vm_as_alloc_share = hal_vm_as_alloc_share_success;
|
||||
if (ret != -1) {
|
||||
@@ -995,7 +997,7 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args)
|
||||
if (!EXPECT_BUG(
|
||||
nvgpu_vm_do_init(&g->mm, vm,
|
||||
g->ops.mm.gmmu.get_default_big_page_size(),
|
||||
low_hole, kernel_reserved, aperture_size,
|
||||
low_hole, user_vma, kernel_reserved,
|
||||
big_pages, false, false, __func__)
|
||||
)) {
|
||||
unit_err(m, "BUG() was not called but it was expected (6).\n");
|
||||
@@ -1008,7 +1010,7 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args)
|
||||
nvgpu_posix_enable_fault_injection(kmem_fi, true, 0);
|
||||
ret = nvgpu_vm_do_init(&g->mm, vm,
|
||||
g->ops.mm.gmmu.get_default_big_page_size(),
|
||||
low_hole, kernel_reserved, aperture_size,
|
||||
low_hole, user_vma, kernel_reserved,
|
||||
big_pages, false, true, __func__);
|
||||
nvgpu_posix_enable_fault_injection(kmem_fi, false, 0);
|
||||
if (ret != -ENOMEM) {
|
||||
@@ -1021,7 +1023,7 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args)
|
||||
nvgpu_posix_enable_fault_injection(kmem_fi, true, 5);
|
||||
ret = nvgpu_vm_do_init(&g->mm, vm,
|
||||
g->ops.mm.gmmu.get_default_big_page_size(),
|
||||
low_hole, kernel_reserved, aperture_size,
|
||||
low_hole, user_vma, kernel_reserved,
|
||||
big_pages, false, true, __func__);
|
||||
nvgpu_posix_enable_fault_injection(kmem_fi, false, 0);
|
||||
if (ret != -ENOMEM) {
|
||||
@@ -1034,7 +1036,7 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args)
|
||||
nvgpu_posix_enable_fault_injection(kmem_fi, true, 12);
|
||||
ret = nvgpu_vm_do_init(&g->mm, vm,
|
||||
g->ops.mm.gmmu.get_default_big_page_size(),
|
||||
low_hole, kernel_reserved, aperture_size,
|
||||
low_hole, user_vma, kernel_reserved,
|
||||
big_pages, false, false, __func__);
|
||||
nvgpu_posix_enable_fault_injection(kmem_fi, false, 0);
|
||||
if (ret != -ENOMEM) {
|
||||
@@ -1047,7 +1049,7 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args)
|
||||
nvgpu_posix_enable_fault_injection(kmem_fi, true, 17);
|
||||
ret = nvgpu_vm_do_init(&g->mm, vm,
|
||||
g->ops.mm.gmmu.get_default_big_page_size(),
|
||||
low_hole, kernel_reserved, aperture_size,
|
||||
low_hole, user_vma, kernel_reserved,
|
||||
big_pages, false, false, __func__);
|
||||
nvgpu_posix_enable_fault_injection(kmem_fi, false, 0);
|
||||
if (ret != -ENOMEM) {
|
||||
@@ -1061,7 +1063,7 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args)
|
||||
ret = nvgpu_vm_do_init(&g->mm, vm,
|
||||
g->ops.mm.gmmu.get_default_big_page_size(),
|
||||
nvgpu_gmmu_va_small_page_limit(),
|
||||
0, aperture_size, big_pages, false, false,
|
||||
((u64)SZ_1G * 200U), 0, big_pages, false, false,
|
||||
__func__);
|
||||
vm->guest_managed = false;
|
||||
if (ret != -EINVAL) {
|
||||
@@ -1073,7 +1075,7 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args)
|
||||
/* Cause nvgpu_vm_init_vma_allocators to fail for long vm name */
|
||||
ret = nvgpu_vm_do_init(&g->mm, vm,
|
||||
g->ops.mm.gmmu.get_default_big_page_size(),
|
||||
low_hole, kernel_reserved, aperture_size,
|
||||
low_hole, user_vma, kernel_reserved,
|
||||
big_pages, false, false,
|
||||
"very_long_vm_name_to_fail_vm_init");
|
||||
if (ret != -EINVAL) {
|
||||
@@ -1085,7 +1087,7 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args)
|
||||
/* Success with big pages and not unified VA */
|
||||
ret = nvgpu_vm_do_init(&g->mm, vm,
|
||||
g->ops.mm.gmmu.get_default_big_page_size(),
|
||||
low_hole, kernel_reserved, aperture_size,
|
||||
low_hole, user_vma, kernel_reserved,
|
||||
big_pages, false, false, __func__);
|
||||
if (ret != 0) {
|
||||
unit_err(m, "nvgpu_vm_do_init did not succeed as expected (B).\n");
|
||||
@@ -1096,7 +1098,7 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args)
|
||||
/* Success with big pages disabled */
|
||||
ret = nvgpu_vm_do_init(&g->mm, vm,
|
||||
g->ops.mm.gmmu.get_default_big_page_size(),
|
||||
low_hole, kernel_reserved, aperture_size,
|
||||
low_hole, user_vma, kernel_reserved,
|
||||
false, false, false, __func__);
|
||||
if (ret != 0) {
|
||||
unit_err(m, "nvgpu_vm_do_init did not succeed as expected (C).\n");
|
||||
@@ -1108,7 +1110,7 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args)
|
||||
ret = nvgpu_vm_do_init(&g->mm, vm,
|
||||
g->ops.mm.gmmu.get_default_big_page_size(),
|
||||
nvgpu_gmmu_va_small_page_limit(),
|
||||
kernel_reserved, aperture_size, big_pages,
|
||||
0ULL, kernel_reserved, big_pages,
|
||||
false, false, __func__);
|
||||
if (ret != 0) {
|
||||
unit_err(m, "nvgpu_vm_do_init did not succeed as expected (D).\n");
|
||||
@@ -1192,8 +1194,8 @@ int test_map_buf(struct unit_module *m, struct gk20a *g, void *__args)
|
||||
vm = nvgpu_vm_init(g,
|
||||
g->ops.mm.gmmu.get_default_big_page_size(),
|
||||
low_hole,
|
||||
user_vma,
|
||||
kernel_reserved,
|
||||
aperture_size,
|
||||
big_pages,
|
||||
false,
|
||||
true,
|
||||
@@ -1429,7 +1431,6 @@ int test_map_buf_gpu_va(struct unit_module *m,
|
||||
aperture_size = 128 * SZ_1G;
|
||||
kernel_reserved = 4 * SZ_1G - low_hole;
|
||||
user_vma = aperture_size - low_hole - kernel_reserved;
|
||||
user_vma_limit = aperture_size - kernel_reserved;
|
||||
unit_info(m, "Initializing VM:\n");
|
||||
unit_info(m, " - Low Hole Size = 0x%llx\n", low_hole);
|
||||
unit_info(m, " - User Aperture Size = 0x%llx\n", user_vma);
|
||||
@@ -1438,8 +1439,8 @@ int test_map_buf_gpu_va(struct unit_module *m,
|
||||
vm = nvgpu_vm_init(g,
|
||||
g->ops.mm.gmmu.get_default_big_page_size(),
|
||||
low_hole,
|
||||
user_vma,
|
||||
kernel_reserved,
|
||||
aperture_size,
|
||||
big_pages,
|
||||
false,
|
||||
true,
|
||||
@@ -1458,7 +1459,9 @@ int test_map_buf_gpu_va(struct unit_module *m,
|
||||
* Calculate a valid base GPU VA for the buffer. We're multiplying
|
||||
* buf_size by 10 just to be on the safe side.
|
||||
*/
|
||||
user_vma_limit = nvgpu_alloc_end(&vm->user);
|
||||
gpu_va = user_vma_limit - buf_size*10;
|
||||
unit_info(m, " - user_vma_limit = 0x%llx\n", user_vma_limit);
|
||||
unit_info(m, "Mapping Buffer:\n");
|
||||
unit_info(m, " - CPU PA = 0x%llx\n", BUF_CPU_PA);
|
||||
unit_info(m, " - GPU VA = 0x%llx\n", gpu_va);
|
||||
@@ -1708,8 +1711,8 @@ int test_batch(struct unit_module *m, struct gk20a *g, void *__args)
|
||||
vm = nvgpu_vm_init(g,
|
||||
g->ops.mm.gmmu.get_default_big_page_size(),
|
||||
low_hole,
|
||||
user_vma,
|
||||
kernel_reserved,
|
||||
aperture_size,
|
||||
big_pages,
|
||||
false,
|
||||
true,
|
||||
@@ -1881,7 +1884,7 @@ int test_vm_area_error_cases(struct unit_module *m, struct gk20a *g,
|
||||
}
|
||||
|
||||
/* Failure: Dynamic allocation in nvgpu_vm_area_alloc_gmmu_map fails */
|
||||
nvgpu_posix_enable_fault_injection(kmem_fi, true, 33);
|
||||
nvgpu_posix_enable_fault_injection(kmem_fi, true, 25);
|
||||
ret = nvgpu_vm_area_alloc(vm, 10, SZ_4K, &gpu_va,
|
||||
NVGPU_VM_AREA_ALLOC_SPARSE);
|
||||
nvgpu_posix_enable_fault_injection(kmem_fi, false, 0);
|
||||
|
||||
Reference in New Issue
Block a user