mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 09:12:24 +03:00
gpu: nvgpu: remove user managed addr space capability flag
Remove NVGPU_GPU_IOCTL_ALLOC_AS_FLAGS_USERSPACE_MANAGED and NVGPU_AS_ALLOC_USERSPACE_MANAGED flags which are used for supporting userspace managed address-space. This functionality is not implemented fully in kernel neither going to be implemented in near future. Jira NVGPU-9832 Bug 4034184 Change-Id: I3787d92c44682b02d440e52c7a0c8c0553742dcc Signed-off-by: Shashank Singh <shashsingh@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2882168 Tested-by: mobile promotions <svcmobile_promotions@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
84bb919909
commit
9512b9f1de
@@ -339,12 +339,12 @@ test_unit_config.unit_config=2
|
||||
[mm.as]
|
||||
test_as_alloc_share.as_alloc_share_0k_um=2
|
||||
test_as_alloc_share.as_alloc_share_64k_um=2
|
||||
test_as_alloc_share.as_alloc_share_64k_um_as_fail=0
|
||||
test_as_alloc_share.as_alloc_share_64k_um_busy_fail_1=0
|
||||
test_as_alloc_share.as_alloc_share_64k_um_as_fail=2
|
||||
test_as_alloc_share.as_alloc_share_64k_um_busy_fail_1=2
|
||||
test_as_alloc_share.as_alloc_share_64k_um_busy_fail_2=2
|
||||
test_as_alloc_share.as_alloc_share_64k_um_vm_fail=0
|
||||
test_as_alloc_share.as_alloc_share_einval_um=0
|
||||
test_as_alloc_share.as_alloc_share_notp2_um=0
|
||||
test_as_alloc_share.as_alloc_share_64k_um_vm_fail=2
|
||||
test_as_alloc_share.as_alloc_share_einval_um=2
|
||||
test_as_alloc_share.as_alloc_share_notp2_um=2
|
||||
test_as_alloc_share.as_alloc_share_uva=2
|
||||
test_as_alloc_share.as_alloc_share_uva_enabled=2
|
||||
test_init_mm.init=0
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -120,7 +120,7 @@ int test_gr_ctx_error_injection(struct unit_module *m,
|
||||
vm = nvgpu_vm_init(g, SZ_4K, SZ_4K << 10,
|
||||
nvgpu_safe_sub_u64(1ULL << 37, SZ_4K << 10),
|
||||
(1ULL << 32), 0ULL,
|
||||
false, false, false, "dummy");
|
||||
false, false, "dummy");
|
||||
if (!vm) {
|
||||
unit_return_fail(m, "failed to allocate VM");
|
||||
}
|
||||
@@ -132,7 +132,7 @@ int test_gr_ctx_error_injection(struct unit_module *m,
|
||||
0ULL,
|
||||
nvgpu_safe_sub_u64(mm->bar1.aperture_size, low_hole),
|
||||
0ULL,
|
||||
true, false, false,
|
||||
true, false,
|
||||
"bar1");
|
||||
if (mm->bar1.vm == NULL) {
|
||||
unit_return_fail(m, "nvgpu_vm_init failed\n");
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -740,7 +740,7 @@ int test_gr_init_hal_error_injection(struct unit_module *m,
|
||||
vm = nvgpu_vm_init(g, SZ_4K, SZ_4K << 10,
|
||||
nvgpu_safe_sub_u64(1ULL << 37, SZ_4K << 10),
|
||||
(1ULL << 32), 0ULL,
|
||||
false, false, false, "dummy");
|
||||
false, false, "dummy");
|
||||
if (!vm) {
|
||||
unit_return_fail(m, "failed to allocate VM");
|
||||
}
|
||||
|
||||
@@ -334,7 +334,7 @@ static int gr_test_intr_allocate_ch_tsg(struct unit_module *m,
|
||||
vm = nvgpu_vm_init(g, SZ_4K, SZ_4K << 10,
|
||||
nvgpu_safe_sub_u64(1ULL << 37, SZ_4K << 10),
|
||||
(1ULL << 32), 0ULL,
|
||||
false, false, false, "dummy");
|
||||
false, false, "dummy");
|
||||
if (!vm) {
|
||||
unit_err(m, "failed to allocate VM");
|
||||
goto ch_cleanup;
|
||||
|
||||
@@ -156,7 +156,7 @@ int test_gr_obj_ctx_error_injection(struct unit_module *m,
|
||||
vm = nvgpu_vm_init(g, SZ_4K, SZ_4K << 10,
|
||||
nvgpu_safe_sub_u64(1ULL << 37, SZ_4K << 10),
|
||||
(1ULL << 32), 0ULL,
|
||||
false, false, false, "dummy");
|
||||
false, false, "dummy");
|
||||
if (!vm) {
|
||||
unit_return_fail(m, "failed to allocate VM");
|
||||
}
|
||||
|
||||
@@ -271,7 +271,7 @@ int test_ltc_init_support(struct unit_module *m,
|
||||
0ULL,
|
||||
nvgpu_safe_sub_u64(mm->bar1.aperture_size, low_hole),
|
||||
0ULL,
|
||||
true, false, false,
|
||||
true, false,
|
||||
"bar1");
|
||||
if (mm->bar1.vm == NULL) {
|
||||
unit_return_fail(m, "nvgpu_vm_init failed\n");
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -107,7 +107,6 @@ static struct vm_gk20a *init_vm_env(struct unit_module *m, struct gk20a *g,
|
||||
0ULL,
|
||||
big_pages,
|
||||
false,
|
||||
false,
|
||||
name);
|
||||
|
||||
if (test_vm == NULL) {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -219,7 +219,6 @@ int test_nvgpu_buddy_allocator_alloc(struct unit_module *m,
|
||||
* - aperture_size = GK20A_PMU_VA_SIZE.
|
||||
* - kernel_reserved = aperture_size - low_hole.
|
||||
* - flags = GPU_ALLOC_GVA_SPACE, GVA space enabled.
|
||||
* - userspace_managed = false, unified_va = false.
|
||||
* - big_pages = false.
|
||||
* - Initialize buddy allocator for this test.
|
||||
* - Base address = 1K.
|
||||
@@ -265,7 +264,6 @@ int test_buddy_allocator_with_small_pages(struct unit_module *m,
|
||||
* - aperture_size = GK20A_PMU_VA_SIZE.
|
||||
* - kernel_reserved = aperture_size - low_hole.
|
||||
* - flags = GPU_ALLOC_GVA_SPACE, GVA space enabled.
|
||||
* - userspace_managed = false, unified_va = false.
|
||||
* - big_pages = true.
|
||||
* - Initialize buddy allocator for this test.
|
||||
* - Base address = 64M, PDE aligned.
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -55,6 +55,7 @@
|
||||
*/
|
||||
static int global_id_count;
|
||||
|
||||
#if 0
|
||||
/* Parameters to test standard cases of allocation */
|
||||
static struct test_parameters test_64k_user_managed = {
|
||||
.big_page_size = SZ_64K,
|
||||
@@ -69,6 +70,7 @@ static struct test_parameters test_0k_user_managed = {
|
||||
.flags = NVGPU_AS_ALLOC_USERSPACE_MANAGED,
|
||||
.expected_error = 0
|
||||
};
|
||||
#endif
|
||||
|
||||
static struct test_parameters test_64k_unified_va = {
|
||||
.big_page_size = SZ_64K,
|
||||
@@ -85,6 +87,7 @@ static struct test_parameters test_64k_unified_va_enabled = {
|
||||
.unify_address_spaces_flag = true
|
||||
};
|
||||
|
||||
#if 0
|
||||
static struct test_parameters test_einval_user_managed = {
|
||||
.big_page_size = 1,
|
||||
.small_big_split = (SZ_1G * 56ULL),
|
||||
@@ -131,7 +134,7 @@ static struct test_parameters test_64k_user_managed_busy_fail_2 = {
|
||||
.expected_error = 0,
|
||||
.special_case = SPECIAL_CASE_GK20A_BUSY_RELEASE
|
||||
};
|
||||
|
||||
#endif
|
||||
/*
|
||||
* Init the minimum set of HALs to use DMA amd GMMU features, then call the
|
||||
* init_mm base function.
|
||||
@@ -270,6 +273,7 @@ int test_as_alloc_share(struct unit_module *m, struct gk20a *g, void *args)
|
||||
|
||||
struct unit_module_test nvgpu_mm_as_tests[] = {
|
||||
UNIT_TEST(init, test_init_mm, NULL, 0),
|
||||
#if 0
|
||||
UNIT_TEST(as_alloc_share_64k_um_as_fail, test_as_alloc_share,
|
||||
(void *) &test_64k_user_managed_as_fail, 0),
|
||||
UNIT_TEST(as_alloc_share_64k_um_vm_fail, test_as_alloc_share,
|
||||
@@ -286,6 +290,7 @@ struct unit_module_test nvgpu_mm_as_tests[] = {
|
||||
(void *) &test_einval_user_managed, 0),
|
||||
UNIT_TEST(as_alloc_share_notp2_um, test_as_alloc_share,
|
||||
(void *) &test_notp2_user_managed, 0),
|
||||
#endif
|
||||
UNIT_TEST(as_alloc_share_uva, test_as_alloc_share,
|
||||
(void *) &test_64k_unified_va, 2),
|
||||
UNIT_TEST(as_alloc_share_uva_enabled, test_as_alloc_share,
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -199,7 +199,6 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
|
||||
0ULL,
|
||||
true,
|
||||
false,
|
||||
false,
|
||||
"system");
|
||||
if (mm->pmu.vm == NULL) {
|
||||
unit_return_fail(m, "nvgpu_vm_init failed\n");
|
||||
@@ -212,7 +211,7 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
|
||||
0ULL,
|
||||
nvgpu_safe_sub_u64(mm->bar1.aperture_size, low_hole),
|
||||
0ULL,
|
||||
true, false, false,
|
||||
true, false,
|
||||
"bar1");
|
||||
if (mm->bar1.vm == NULL) {
|
||||
unit_return_fail(m, "nvgpu_vm_init failed\n");
|
||||
|
||||
@@ -356,7 +356,7 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
|
||||
0ULL,
|
||||
nvgpu_safe_sub_u64(mm->bar1.aperture_size, low_hole),
|
||||
0ULL,
|
||||
true, false, false,
|
||||
true, false,
|
||||
"bar1");
|
||||
if (mm->bar1.vm == NULL) {
|
||||
unit_return_fail(m, "nvgpu_vm_init failed\n");
|
||||
@@ -370,7 +370,6 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
|
||||
0ULL,
|
||||
true,
|
||||
false,
|
||||
false,
|
||||
"system");
|
||||
if (mm->pmu.vm == NULL) {
|
||||
unit_return_fail(m, "nvgpu_vm_init failed\n");
|
||||
@@ -983,7 +982,7 @@ static struct vm_gk20a *init_test_req_vm(struct gk20a *g)
|
||||
return nvgpu_vm_init(g, g->ops.mm.gmmu.get_default_big_page_size(),
|
||||
low_hole, user_reserved, kernel_reserved,
|
||||
nvgpu_gmmu_va_small_page_limit(),
|
||||
big_pages, true, true, "testmem");
|
||||
big_pages, true, "testmem");
|
||||
}
|
||||
|
||||
int test_nvgpu_page_table_c1_full(struct unit_module *m, struct gk20a *g,
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -164,7 +164,6 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
|
||||
0ULL,
|
||||
true,
|
||||
false,
|
||||
false,
|
||||
"system");
|
||||
if (mm->pmu.vm == NULL) {
|
||||
unit_return_fail(m, "'system' nvgpu_vm_init failed\n");
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -153,7 +153,6 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
|
||||
0ULL,
|
||||
true,
|
||||
false,
|
||||
false,
|
||||
"system");
|
||||
if (mm->pmu.vm == NULL) {
|
||||
unit_return_fail(m, "'system' nvgpu_vm_init failed\n");
|
||||
@@ -164,7 +163,7 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
|
||||
mm->bar1.vm = nvgpu_vm_init(g,
|
||||
g->ops.mm.gmmu.get_default_big_page_size(),
|
||||
SZ_4K, 0ULL, nvgpu_safe_sub_u64(mm->bar1.aperture_size, SZ_4K),
|
||||
0ULL, false, false, false, "bar1");
|
||||
0ULL, false, false, "bar1");
|
||||
if (mm->bar1.vm == NULL) {
|
||||
unit_return_fail(m, "'bar1' nvgpu_vm_init failed\n");
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -144,7 +144,6 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
|
||||
0ULL,
|
||||
true,
|
||||
false,
|
||||
false,
|
||||
"system");
|
||||
if (mm->pmu.vm == NULL) {
|
||||
unit_return_fail(m, "'system' nvgpu_vm_init failed\n");
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -139,7 +139,6 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
|
||||
0ULL,
|
||||
true,
|
||||
false,
|
||||
false,
|
||||
"system");
|
||||
if (mm->pmu.vm == NULL) {
|
||||
unit_return_fail(m, "'system' nvgpu_vm_init failed\n");
|
||||
@@ -150,7 +149,7 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
|
||||
mm->bar2.vm = nvgpu_vm_init(g,
|
||||
g->ops.mm.gmmu.get_default_big_page_size(),
|
||||
SZ_4K, 0ULL, nvgpu_safe_sub_u64(mm->bar2.aperture_size, SZ_4K),
|
||||
0ULL, false, false, false, "bar2");
|
||||
0ULL, false, false, "bar2");
|
||||
if (mm->bar2.vm == NULL) {
|
||||
unit_return_fail(m, "'bar2' nvgpu_vm_init failed\n");
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -181,7 +181,6 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
|
||||
0ULL,
|
||||
true,
|
||||
false,
|
||||
false,
|
||||
"system");
|
||||
if (mm->pmu.vm == NULL) {
|
||||
unit_return_fail(m, "'system' nvgpu_vm_init failed\n");
|
||||
@@ -192,7 +191,7 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
|
||||
mm->bar2.vm = nvgpu_vm_init(g,
|
||||
g->ops.mm.gmmu.get_default_big_page_size(),
|
||||
SZ_4K, 0ULL, nvgpu_safe_sub_u64(mm->bar2.aperture_size, SZ_4K),
|
||||
0ULL, false, false, false, "bar2");
|
||||
0ULL, false, false, "bar2");
|
||||
if (mm->bar2.vm == NULL) {
|
||||
unit_return_fail(m, "'bar2' nvgpu_vm_init failed\n");
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -204,7 +204,6 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
|
||||
0ULL,
|
||||
true,
|
||||
false,
|
||||
false,
|
||||
"system");
|
||||
if (mm->pmu.vm == NULL) {
|
||||
unit_return_fail(m, "'system' nvgpu_vm_init failed\n");
|
||||
@@ -217,7 +216,7 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
|
||||
0ULL,
|
||||
nvgpu_safe_sub_u64(mm->bar1.aperture_size, SZ_64K),
|
||||
0ULL,
|
||||
true, false, false,
|
||||
true, false,
|
||||
"bar1");
|
||||
if (mm->bar1.vm == NULL) {
|
||||
return -ENOMEM;
|
||||
@@ -229,7 +228,7 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
|
||||
mm->bar2.vm = nvgpu_vm_init(g,
|
||||
g->ops.mm.gmmu.get_default_big_page_size(),
|
||||
SZ_4K, 0ULL, nvgpu_safe_sub_u64(mm->bar2.aperture_size, SZ_4K),
|
||||
0ULL, false, false, false, "bar2");
|
||||
0ULL, false, false, "bar2");
|
||||
if (mm->bar2.vm == NULL) {
|
||||
unit_return_fail(m, "'bar2' nvgpu_vm_init failed\n");
|
||||
}
|
||||
|
||||
@@ -151,6 +151,7 @@ static int hal_fb_tlb_invalidate_error(struct gk20a *g, struct nvgpu_mem *pdb)
|
||||
return -1;
|
||||
}
|
||||
|
||||
#if 0
|
||||
/* Dummy HAL for vm_as_alloc_share that always fails */
|
||||
static int hal_vm_as_alloc_share_error(struct gk20a *g, struct vm_gk20a *vm)
|
||||
{
|
||||
@@ -162,6 +163,7 @@ static int hal_vm_as_alloc_share_success(struct gk20a *g, struct vm_gk20a *vm)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Initialize test environment */
|
||||
static int init_test_env(struct unit_module *m, struct gk20a *g)
|
||||
@@ -227,7 +229,6 @@ static struct vm_gk20a *create_test_vm(struct unit_module *m, struct gk20a *g)
|
||||
kernel_reserved,
|
||||
nvgpu_gmmu_va_small_page_limit(),
|
||||
true,
|
||||
false,
|
||||
true,
|
||||
__func__);
|
||||
return vm;
|
||||
@@ -348,6 +349,7 @@ int test_map_buffer_error_cases(struct unit_module *m, struct gk20a *g,
|
||||
goto free_sgt_os_buf;
|
||||
}
|
||||
|
||||
#if 0
|
||||
/* Non-fixed offset with userspace managed VM */
|
||||
vm->userspace_managed = true;
|
||||
ret = nvgpu_vm_map(vm,
|
||||
@@ -370,6 +372,7 @@ int test_map_buffer_error_cases(struct unit_module *m, struct gk20a *g,
|
||||
ret = UNIT_FAIL;
|
||||
goto free_sgt_os_buf;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Invalid buffer size */
|
||||
os_buf.size = 0;
|
||||
@@ -1142,6 +1145,7 @@ static int map_buffer(struct unit_module *m,
|
||||
goto free_mapped_buf;
|
||||
}
|
||||
|
||||
#if 0
|
||||
/*
|
||||
* If VM is userspace managed, there should not be any accessible
|
||||
* buffers.
|
||||
@@ -1154,6 +1158,7 @@ static int map_buffer(struct unit_module *m,
|
||||
ret = UNIT_FAIL;
|
||||
goto free_mapped_buf;
|
||||
}
|
||||
#endif
|
||||
|
||||
ret = UNIT_SUCCESS;
|
||||
|
||||
@@ -1297,7 +1302,6 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args)
|
||||
kernel_reserved,
|
||||
nvgpu_gmmu_va_small_page_limit(),
|
||||
big_pages,
|
||||
false,
|
||||
true,
|
||||
__func__);
|
||||
nvgpu_posix_enable_fault_injection(kmem_fi, false, 0);
|
||||
@@ -1319,7 +1323,6 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args)
|
||||
default_aperture_size, /* invalid aperture size */
|
||||
nvgpu_gmmu_va_small_page_limit(),
|
||||
big_pages,
|
||||
false,
|
||||
true,
|
||||
__func__)
|
||||
)) {
|
||||
@@ -1331,6 +1334,7 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args)
|
||||
/* Make nvgpu_vm_do_init fail with invalid parameters */
|
||||
vm = nvgpu_kzalloc(g, sizeof(*vm));
|
||||
|
||||
#if 0
|
||||
/* vGPU with userspace managed */
|
||||
g->is_virtual = true;
|
||||
ret = nvgpu_vm_do_init(&g->mm, vm,
|
||||
@@ -1358,7 +1362,7 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args)
|
||||
ret = UNIT_FAIL;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
#endif
|
||||
/* Invalid VM configuration - This scenario is not feasible */
|
||||
low_hole = SZ_1M * 64;
|
||||
|
||||
@@ -1368,7 +1372,7 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args)
|
||||
g->ops.mm.gmmu.get_default_big_page_size(),
|
||||
low_hole, user_vma, kernel_reserved,
|
||||
nvgpu_gmmu_va_small_page_limit(),
|
||||
big_pages, false, true, __func__);
|
||||
big_pages, true, __func__);
|
||||
nvgpu_posix_enable_fault_injection(kmem_fi, false, 0);
|
||||
if (ret != -ENOMEM) {
|
||||
unit_err(m, "nvgpu_vm_do_init did not fail as expected (7).\n");
|
||||
@@ -1382,7 +1386,7 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args)
|
||||
g->ops.mm.gmmu.get_default_big_page_size(),
|
||||
low_hole, user_vma, kernel_reserved,
|
||||
nvgpu_gmmu_va_small_page_limit(),
|
||||
big_pages, false, true, __func__);
|
||||
big_pages, true, __func__);
|
||||
nvgpu_posix_enable_fault_injection(kmem_fi, false, 0);
|
||||
if (ret != -ENOMEM) {
|
||||
unit_err(m, "nvgpu_vm_do_init did not fail as expected (8).\n");
|
||||
@@ -1396,7 +1400,7 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args)
|
||||
g->ops.mm.gmmu.get_default_big_page_size(),
|
||||
low_hole, user_vma, kernel_reserved,
|
||||
nvgpu_gmmu_va_small_page_limit(),
|
||||
big_pages, false, false, __func__);
|
||||
big_pages, false, __func__);
|
||||
nvgpu_posix_enable_fault_injection(kmem_fi, false, 0);
|
||||
if (ret != -ENOMEM) {
|
||||
unit_err(m, "nvgpu_vm_do_init didn't fail as expected (9).\n");
|
||||
@@ -1410,7 +1414,7 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args)
|
||||
g->ops.mm.gmmu.get_default_big_page_size(),
|
||||
low_hole, user_vma, kernel_reserved,
|
||||
nvgpu_gmmu_va_small_page_limit(),
|
||||
big_pages, false, false, __func__);
|
||||
big_pages, false, __func__);
|
||||
nvgpu_posix_enable_fault_injection(kmem_fi, false, 0);
|
||||
if (ret != -ENOMEM) {
|
||||
unit_err(m, "nvgpu_vm_do_init didn't fail as expected (10).\n");
|
||||
@@ -1423,7 +1427,7 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args)
|
||||
g->ops.mm.gmmu.get_default_big_page_size(),
|
||||
low_hole, user_vma, kernel_reserved,
|
||||
nvgpu_gmmu_va_small_page_limit(),
|
||||
big_pages, false, false,
|
||||
big_pages, false,
|
||||
"very_long_vm_name_to_fail_vm_init");
|
||||
if (ret != -EINVAL) {
|
||||
unit_err(m, "nvgpu_vm_do_init didn't fail as expected (12).\n");
|
||||
@@ -1436,7 +1440,7 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args)
|
||||
g->ops.mm.gmmu.get_default_big_page_size(),
|
||||
low_hole, user_vma, kernel_reserved,
|
||||
nvgpu_gmmu_va_small_page_limit(),
|
||||
big_pages, false, false, __func__);
|
||||
big_pages, false, __func__);
|
||||
if (ret != 0) {
|
||||
unit_err(m, "nvgpu_vm_do_init did not succeed as expected (B).\n");
|
||||
ret = UNIT_FAIL;
|
||||
@@ -1448,7 +1452,7 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args)
|
||||
g->ops.mm.gmmu.get_default_big_page_size(),
|
||||
low_hole, user_vma, kernel_reserved,
|
||||
nvgpu_gmmu_va_small_page_limit(),
|
||||
false, false, false, __func__);
|
||||
false, false, __func__);
|
||||
if (ret != 0) {
|
||||
unit_err(m, "nvgpu_vm_do_init did not succeed as expected (C).\n");
|
||||
ret = UNIT_FAIL;
|
||||
@@ -1461,7 +1465,7 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args)
|
||||
nvgpu_gmmu_va_small_page_limit(),
|
||||
0ULL, kernel_reserved,
|
||||
nvgpu_gmmu_va_small_page_limit(), big_pages,
|
||||
false, false, __func__);
|
||||
false, __func__);
|
||||
if (ret != 0) {
|
||||
unit_err(m, "nvgpu_vm_do_init did not succeed as expected (D).\n");
|
||||
ret = UNIT_FAIL;
|
||||
@@ -1547,7 +1551,6 @@ int test_map_buf(struct unit_module *m, struct gk20a *g, void *__args)
|
||||
kernel_reserved,
|
||||
nvgpu_gmmu_va_small_page_limit(),
|
||||
big_pages,
|
||||
false,
|
||||
true,
|
||||
__func__);
|
||||
if (vm == NULL) {
|
||||
@@ -1793,7 +1796,6 @@ int test_map_buf_gpu_va(struct unit_module *m,
|
||||
kernel_reserved,
|
||||
nvgpu_gmmu_va_small_page_limit(),
|
||||
big_pages,
|
||||
false,
|
||||
true,
|
||||
__func__);
|
||||
if (vm == NULL) {
|
||||
@@ -2048,7 +2050,6 @@ int test_batch(struct unit_module *m, struct gk20a *g, void *__args)
|
||||
kernel_reserved,
|
||||
nvgpu_gmmu_va_small_page_limit(),
|
||||
big_pages,
|
||||
false,
|
||||
true,
|
||||
__func__);
|
||||
if (vm == NULL) {
|
||||
|
||||
@@ -93,7 +93,6 @@ static int init_channel_vm(struct unit_module *m, struct nvgpu_channel *ch)
|
||||
0ULL,
|
||||
true,
|
||||
false,
|
||||
false,
|
||||
"system");
|
||||
if (mm->pmu.vm == NULL) {
|
||||
unit_return_fail(m, "nvgpu_vm_init failed\n");
|
||||
@@ -108,7 +107,7 @@ static int init_channel_vm(struct unit_module *m, struct nvgpu_channel *ch)
|
||||
0ULL,
|
||||
nvgpu_safe_sub_u64(mm->bar1.aperture_size, low_hole),
|
||||
0ULL,
|
||||
true, false, false,
|
||||
true, false,
|
||||
"bar1");
|
||||
if (mm->bar1.vm == NULL) {
|
||||
unit_return_fail(m, "nvgpu_vm_init failed\n");
|
||||
|
||||
Reference in New Issue
Block a user