gpu: nvgpu: remove user managed addr space capability flag

Remove NVGPU_GPU_IOCTL_ALLOC_AS_FLAGS_USERSPACE_MANAGED and
NVGPU_AS_ALLOC_USERSPACE_MANAGED flags which are used for supporting
userspace managed address-space. This functionality is not implemented
fully in kernel neither going to be implemented in near future.

Jira NVGPU-9832
Bug 4034184

Change-Id: I3787d92c44682b02d440e52c7a0c8c0553742dcc
Signed-off-by: Shashank Singh <shashsingh@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2882168
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Shashank Singh
2023-04-04 07:06:10 +00:00
committed by mobile promotions
parent 84bb919909
commit 9512b9f1de
32 changed files with 83 additions and 142 deletions

View File

@@ -1165,7 +1165,7 @@ int nvgpu_gr_obj_ctx_init_golden_context_image(struct gk20a *g)
U64(big_page_size) << U64(10)), U64(big_page_size) << U64(10)),
kernel_size, kernel_size,
0ULL, 0ULL,
false, false, false, "golden_context"); false, false, "golden_context");
if (vm == NULL) { if (vm == NULL) {
nvgpu_err(g, "vm init failed"); nvgpu_err(g, "vm init failed");
err = -ENOMEM; err = -ENOMEM;

View File

@@ -1,7 +1,7 @@
/* /*
* GK20A Address Spaces * GK20A Address Spaces
* *
* Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2011-2023, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -62,8 +62,6 @@ static int gk20a_vm_alloc_share(struct gk20a_as_share *as_share,
u64 kernel_size = mm->channel.kernel_size; u64 kernel_size = mm->channel.kernel_size;
u64 pde_size, pde_size_mask; u64 pde_size, pde_size_mask;
bool big_pages; bool big_pages;
const bool userspace_managed =
(flags & NVGPU_AS_ALLOC_USERSPACE_MANAGED) != 0U;
const bool unified_va = const bool unified_va =
nvgpu_is_enabled(g, NVGPU_MM_UNIFY_ADDRESS_SPACES) || nvgpu_is_enabled(g, NVGPU_MM_UNIFY_ADDRESS_SPACES) ||
((flags & NVGPU_AS_ALLOC_UNIFIED_VA) != 0U); ((flags & NVGPU_AS_ALLOC_UNIFIED_VA) != 0U);
@@ -135,7 +133,7 @@ static int gk20a_vm_alloc_share(struct gk20a_as_share *as_share,
user_size, user_size,
kernel_size, kernel_size,
va_range_split, va_range_split,
big_pages, userspace_managed, unified_va, name); big_pages, unified_va, name);
if (vm == NULL) { if (vm == NULL) {
return -ENOMEM; return -ENOMEM;
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2023, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -222,7 +222,6 @@ static int nvgpu_init_system_vm(struct mm_gk20a *mm)
0ULL, 0ULL,
true, true,
false, false,
false,
"system"); "system");
if (mm->pmu.vm == NULL) { if (mm->pmu.vm == NULL) {
return -ENOMEM; return -ENOMEM;
@@ -281,7 +280,7 @@ static int nvgpu_init_cde_vm(struct mm_gk20a *mm)
U64(big_page_size) << U64(10)), U64(big_page_size) << U64(10)),
kernel_size, kernel_size,
0ULL, 0ULL,
false, false, false, "cde"); false, false, "cde");
if (mm->cde.vm == NULL) { if (mm->cde.vm == NULL) {
return -ENOMEM; return -ENOMEM;
} }
@@ -303,7 +302,7 @@ static int nvgpu_init_ce_vm(struct mm_gk20a *mm)
U64(big_page_size) << U64(10)), U64(big_page_size) << U64(10)),
kernel_size, kernel_size,
0ULL, 0ULL,
false, false, false, "ce"); false, false, "ce");
if (mm->ce.vm == NULL) { if (mm->ce.vm == NULL) {
return -ENOMEM; return -ENOMEM;
} }
@@ -370,7 +369,7 @@ static int nvgpu_init_bar1_vm(struct mm_gk20a *mm)
0ULL, 0ULL,
nvgpu_safe_sub_u64(mm->bar1.aperture_size, SZ_64K), nvgpu_safe_sub_u64(mm->bar1.aperture_size, SZ_64K),
0ULL, 0ULL,
true, false, false, true, false,
"bar1"); "bar1");
if (mm->bar1.vm == NULL) { if (mm->bar1.vm == NULL) {
return -ENOMEM; return -ENOMEM;
@@ -408,7 +407,7 @@ static int nvgpu_init_engine_ucode_vm(struct gk20a *g,
ucode->vm = nvgpu_vm_init(g, big_page_size, SZ_4K, ucode->vm = nvgpu_vm_init(g, big_page_size, SZ_4K,
0ULL, nvgpu_safe_sub_u64(ucode->aperture_size, SZ_4K), 0ULL, 0ULL, nvgpu_safe_sub_u64(ucode->aperture_size, SZ_4K), 0ULL,
false, false, false, false, false,
address_space_name); address_space_name);
if (ucode->vm == NULL) { if (ucode->vm == NULL) {
return -ENOMEM; return -ENOMEM;

View File

@@ -711,7 +711,6 @@ static int nvgpu_vm_init_attributes(struct mm_gk20a *mm,
u64 user_reserved, u64 user_reserved,
u64 kernel_reserved, u64 kernel_reserved,
bool big_pages, bool big_pages,
bool userspace_managed,
bool unified_va, bool unified_va,
const char *name) const char *name)
{ {
@@ -757,17 +756,10 @@ static int nvgpu_vm_init_attributes(struct mm_gk20a *mm,
vm->va_limit = aperture_size; vm->va_limit = aperture_size;
vm->big_page_size = vm->gmmu_page_sizes[GMMU_PAGE_SIZE_BIG]; vm->big_page_size = vm->gmmu_page_sizes[GMMU_PAGE_SIZE_BIG];
vm->userspace_managed = userspace_managed;
vm->unified_va = unified_va; vm->unified_va = unified_va;
vm->mmu_levels = vm->mmu_levels =
g->ops.mm.gmmu.get_mmu_levels(g, vm->big_page_size); g->ops.mm.gmmu.get_mmu_levels(g, vm->big_page_size);
#ifdef CONFIG_NVGPU_GR_VIRTUALIZATION
if (nvgpu_is_legacy_vgpu(g) && userspace_managed) {
nvgpu_err(g, "vGPU: no userspace managed addr space support");
return -ENOSYS;
}
#endif
return 0; return 0;
} }
@@ -782,7 +774,6 @@ int nvgpu_vm_do_init(struct mm_gk20a *mm,
u64 kernel_reserved, u64 kernel_reserved,
u64 small_big_split, u64 small_big_split,
bool big_pages, bool big_pages,
bool userspace_managed,
bool unified_va, bool unified_va,
const char *name) const char *name)
{ {
@@ -790,8 +781,7 @@ int nvgpu_vm_do_init(struct mm_gk20a *mm,
int err = 0; int err = 0;
err = nvgpu_vm_init_attributes(mm, vm, big_page_size, low_hole, err = nvgpu_vm_init_attributes(mm, vm, big_page_size, low_hole,
user_reserved, kernel_reserved, big_pages, userspace_managed, user_reserved, kernel_reserved, big_pages, unified_va, name);
unified_va, name);
if (err != 0) { if (err != 0) {
return err; return err;
} }
@@ -899,7 +889,6 @@ struct vm_gk20a *nvgpu_vm_init(struct gk20a *g,
u64 kernel_reserved, u64 kernel_reserved,
u64 small_big_split, u64 small_big_split,
bool big_pages, bool big_pages,
bool userspace_managed,
bool unified_va, bool unified_va,
const char *name) const char *name)
{ {
@@ -912,7 +901,7 @@ struct vm_gk20a *nvgpu_vm_init(struct gk20a *g,
err = nvgpu_vm_do_init(&g->mm, vm, big_page_size, low_hole, err = nvgpu_vm_do_init(&g->mm, vm, big_page_size, low_hole,
user_reserved, kernel_reserved, small_big_split, user_reserved, kernel_reserved, small_big_split,
big_pages, userspace_managed, unified_va, name); big_pages, unified_va, name);
if (err != 0) { if (err != 0) {
nvgpu_kfree(g, vm); nvgpu_kfree(g, vm);
return NULL; return NULL;
@@ -1094,12 +1083,6 @@ int nvgpu_vm_get_buffers(struct vm_gk20a *vm,
struct nvgpu_rbtree_node *node = NULL; struct nvgpu_rbtree_node *node = NULL;
u32 i = 0; u32 i = 0;
if (vm->userspace_managed) {
*mapped_buffers = NULL;
*num_buffers = 0;
return 0;
}
nvgpu_mutex_acquire(&vm->update_gmmu_lock); nvgpu_mutex_acquire(&vm->update_gmmu_lock);
if (vm->num_user_mapped_buffers == 0U) { if (vm->num_user_mapped_buffers == 0U) {
@@ -1362,22 +1345,20 @@ static int nvgpu_vm_new_mapping(struct vm_gk20a *vm,
/* /*
* Check if this buffer is already mapped. * Check if this buffer is already mapped.
*/ */
if (!vm->userspace_managed) { nvgpu_mutex_acquire(&vm->update_gmmu_lock);
nvgpu_mutex_acquire(&vm->update_gmmu_lock); mapped_buffer = nvgpu_vm_find_mapping(vm,
mapped_buffer = nvgpu_vm_find_mapping(vm, os_buf,
os_buf, map_addr,
map_addr, binfo_ptr->flags,
binfo_ptr->flags, map_key_kind);
map_key_kind);
if (mapped_buffer != NULL) { if (mapped_buffer != NULL) {
nvgpu_ref_get(&mapped_buffer->ref); nvgpu_ref_get(&mapped_buffer->ref);
nvgpu_mutex_release(&vm->update_gmmu_lock);
*mapped_buffer_arg = mapped_buffer;
return 1;
}
nvgpu_mutex_release(&vm->update_gmmu_lock); nvgpu_mutex_release(&vm->update_gmmu_lock);
*mapped_buffer_arg = mapped_buffer;
return 1;
} }
nvgpu_mutex_release(&vm->update_gmmu_lock);
/* /*
* Generate a new mapping! * Generate a new mapping!
@@ -1420,14 +1401,6 @@ static int nvgpu_vm_map_check_attributes(struct vm_gk20a *vm,
(void)compr_kind; (void)compr_kind;
if (vm->userspace_managed &&
((flags & NVGPU_VM_MAP_FIXED_OFFSET) == 0U)) {
nvgpu_err(g,
"non-fixed-offset mapping not available on "
"userspace managed address spaces");
return -EINVAL;
}
binfo_ptr->flags = flags; binfo_ptr->flags = flags;
binfo_ptr->size = nvgpu_os_buf_get_size(os_buf); binfo_ptr->size = nvgpu_os_buf_get_size(os_buf);
if (binfo_ptr->size == 0UL) { if (binfo_ptr->size == 0UL) {

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2023, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -70,10 +70,9 @@ int nvgpu_vm_area_validate_buffer(struct vm_gk20a *vm,
return -EINVAL; return -EINVAL;
} }
/* Find the space reservation, but it's ok to have none for /* Find the space reservation */
* userspace-managed address spaces */
vm_area = nvgpu_vm_area_find(vm, map_addr); vm_area = nvgpu_vm_area_find(vm, map_addr);
if ((vm_area == NULL) && !vm->userspace_managed) { if (vm_area == NULL) {
nvgpu_warn(g, "fixed offset mapping without space allocation"); nvgpu_warn(g, "fixed offset mapping without space allocation");
return -EINVAL; return -EINVAL;
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -96,7 +96,7 @@ int nvgpu_perfbuf_init_vm(struct gk20a *g)
nvgpu_safe_sub_u64(user_size, SZ_4K), nvgpu_safe_sub_u64(user_size, SZ_4K),
kernel_size, kernel_size,
0ULL, 0ULL,
false, false, false, "perfbuf"); false, false, "perfbuf");
if (mm->perfbuf.vm == NULL) { if (mm->perfbuf.vm == NULL) {
return -ENOMEM; return -ENOMEM;
} }

View File

@@ -44,7 +44,7 @@ int gp10b_mm_init_bar2_vm(struct gk20a *g)
mm->bar2.vm = nvgpu_vm_init(g, big_page_size, SZ_4K, mm->bar2.vm = nvgpu_vm_init(g, big_page_size, SZ_4K,
0ULL, nvgpu_safe_sub_u64(mm->bar2.aperture_size, SZ_4K), 0ULL, 0ULL, nvgpu_safe_sub_u64(mm->bar2.aperture_size, SZ_4K), 0ULL,
false, false, false, "bar2"); false, false, "bar2");
if (mm->bar2.vm == NULL) { if (mm->bar2.vm == NULL) {
return -ENOMEM; return -ENOMEM;
} }

View File

@@ -1,7 +1,7 @@
/* /*
* GK20A Address Spaces * GK20A Address Spaces
* *
* Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2011-2023, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -49,11 +49,6 @@ struct gk20a_as_share {
int id; int id;
}; };
/**
* AS allocation flag for userspace managed
*/
#define NVGPU_AS_ALLOC_USERSPACE_MANAGED BIT32(0)
/** /**
* AS allocation flag for unified VA * AS allocation flag for unified VA
*/ */
@@ -97,8 +92,7 @@ int gk20a_vm_release_share(struct gk20a_as_share *as_share);
* @param g [in] The GPU * @param g [in] The GPU
* @param big_page_size [in] Big page size to use for the VM, * @param big_page_size [in] Big page size to use for the VM,
* set 0 for 64K big page size. * set 0 for 64K big page size.
* @param flags [in] NVGPU_AS_ALLOC_* flags. The flags are * @param flags [in] NVGPU_AS_ALLOC_* flags. The flag is
* NVGPU_AS_ALLOC_USERSPACE_MANAGED and
* NVGPU_AS_ALLOC_UNIFIED_VA. * NVGPU_AS_ALLOC_UNIFIED_VA.
* @param va_range_start [in] Requested user managed memory start * @param va_range_start [in] Requested user managed memory start
* address, used to map buffers, save data * address, used to map buffers, save data
@@ -117,8 +111,6 @@ int gk20a_vm_release_share(struct gk20a_as_share *as_share);
* of two and it should be in the range supported big page sizes supported by the GPU. * of two and it should be in the range supported big page sizes supported by the GPU.
* *
* @note if \a big_page_size == 0, the default big page size(64K) is used. * @note if \a big_page_size == 0, the default big page size(64K) is used.
* @note The \a flags is always set as NVGPU_AS_ALLOC_USERSPACE_MANAGED(AS
* allocation flag for userspace managed)
* *
* @return 0 in case of success, < 0 in case of failure. * @return 0 in case of success, < 0 in case of failure.
* *

View File

@@ -68,8 +68,6 @@ struct gk20a;
"false if vidmem aperture actually points to sysmem"), \ "false if vidmem aperture actually points to sysmem"), \
DEFINE_FLAG(NVGPU_MM_UNIFIED_MEMORY, \ DEFINE_FLAG(NVGPU_MM_UNIFIED_MEMORY, \
"unified or split memory with separate vidmem?"), \ "unified or split memory with separate vidmem?"), \
DEFINE_FLAG(NVGPU_SUPPORT_USERSPACE_MANAGED_AS, \
"User-space managed address spaces support"), \
DEFINE_FLAG(NVGPU_SUPPORT_IO_COHERENCE, \ DEFINE_FLAG(NVGPU_SUPPORT_IO_COHERENCE, \
"IO coherence support is available"), \ "IO coherence support is available"), \
DEFINE_FLAG(NVGPU_SUPPORT_SPARSE_ALLOCS, \ DEFINE_FLAG(NVGPU_SUPPORT_SPARSE_ALLOCS, \

View File

@@ -262,8 +262,6 @@ struct vm_gk20a {
/** Page size used for mappings with this address space. */ /** Page size used for mappings with this address space. */
u32 big_page_size; u32 big_page_size;
/** Whether this address space is managed by user space or not. */
bool userspace_managed;
/** GPU and CPU using same address space or not. */ /** GPU and CPU using same address space or not. */
bool unified_va; bool unified_va;
@@ -797,7 +795,6 @@ int nvgpu_vm_do_init(struct mm_gk20a *mm,
u64 kernel_reserved, u64 kernel_reserved,
u64 small_big_split, u64 small_big_split,
bool big_pages, bool big_pages,
bool userspace_managed,
bool unified_va, bool unified_va,
const char *name); const char *name);
@@ -856,7 +853,6 @@ struct vm_gk20a *nvgpu_vm_init(struct gk20a *g,
u64 kernel_reserved, u64 kernel_reserved,
u64 small_big_split, u64 small_big_split,
bool big_pages, bool big_pages,
bool userspace_managed,
bool unified_va, bool unified_va,
const char *name); const char *name);

View File

@@ -102,8 +102,6 @@ static u32 gk20a_as_translate_as_alloc_flags(struct gk20a *g, u32 flags)
{ {
u32 core_flags = 0; u32 core_flags = 0;
if (flags & NVGPU_GPU_IOCTL_ALLOC_AS_FLAGS_USERSPACE_MANAGED)
core_flags |= NVGPU_AS_ALLOC_USERSPACE_MANAGED;
if (flags & NVGPU_GPU_IOCTL_ALLOC_AS_FLAGS_UNIFIED_VA) if (flags & NVGPU_GPU_IOCTL_ALLOC_AS_FLAGS_UNIFIED_VA)
core_flags |= NVGPU_AS_ALLOC_UNIFIED_VA; core_flags |= NVGPU_AS_ALLOC_UNIFIED_VA;
@@ -234,8 +232,6 @@ static struct nvgpu_flags_mapping flags_mapping[] = {
NVGPU_SUPPORT_CYCLE_STATS}, NVGPU_SUPPORT_CYCLE_STATS},
{NVGPU_GPU_FLAGS_SUPPORT_CYCLE_STATS_SNAPSHOT, {NVGPU_GPU_FLAGS_SUPPORT_CYCLE_STATS_SNAPSHOT,
NVGPU_SUPPORT_CYCLE_STATS_SNAPSHOT}, NVGPU_SUPPORT_CYCLE_STATS_SNAPSHOT},
{NVGPU_GPU_FLAGS_SUPPORT_USERSPACE_MANAGED_AS,
NVGPU_SUPPORT_USERSPACE_MANAGED_AS},
{NVGPU_GPU_FLAGS_SUPPORT_TSG, {NVGPU_GPU_FLAGS_SUPPORT_TSG,
NVGPU_SUPPORT_TSG}, NVGPU_SUPPORT_TSG},
{NVGPU_GPU_FLAGS_SUPPORT_CLOCK_CONTROLS, {NVGPU_GPU_FLAGS_SUPPORT_CLOCK_CONTROLS,

View File

@@ -353,7 +353,6 @@ void gk20a_init_linux_characteristics(struct gk20a *g)
struct device *dev = dev_from_gk20a(g); struct device *dev = dev_from_gk20a(g);
nvgpu_set_enabled(g, NVGPU_SUPPORT_DETERMINISTIC_OPTS, true); nvgpu_set_enabled(g, NVGPU_SUPPORT_DETERMINISTIC_OPTS, true);
nvgpu_set_enabled(g, NVGPU_SUPPORT_USERSPACE_MANAGED_AS, true);
nvgpu_set_enabled(g, NVGPU_SUPPORT_REMAP, true); nvgpu_set_enabled(g, NVGPU_SUPPORT_REMAP, true);
nvgpu_set_enabled(g, NVGPU_SUPPORT_BUFFER_METADATA, true); nvgpu_set_enabled(g, NVGPU_SUPPORT_BUFFER_METADATA, true);
nvgpu_set_enabled(g, NVGPU_SUPPORT_MULTI_PROCESS_TSG_SHARING, true); nvgpu_set_enabled(g, NVGPU_SUPPORT_MULTI_PROCESS_TSG_SHARING, true);

View File

@@ -103,8 +103,6 @@ struct nvgpu_gpu_zbc_query_table_args {
#define NVGPU_GPU_FLAGS_SUPPORT_CYCLE_STATS (1ULL << 4) #define NVGPU_GPU_FLAGS_SUPPORT_CYCLE_STATS (1ULL << 4)
/* NVGPU_DBG_GPU_IOCTL_CYCLE_STATS_SNAPSHOT is available */ /* NVGPU_DBG_GPU_IOCTL_CYCLE_STATS_SNAPSHOT is available */
#define NVGPU_GPU_FLAGS_SUPPORT_CYCLE_STATS_SNAPSHOT (1ULL << 6) #define NVGPU_GPU_FLAGS_SUPPORT_CYCLE_STATS_SNAPSHOT (1ULL << 6)
/* User-space managed address spaces support */
#define NVGPU_GPU_FLAGS_SUPPORT_USERSPACE_MANAGED_AS (1ULL << 7)
/* Both gpu driver and device support TSG */ /* Both gpu driver and device support TSG */
#define NVGPU_GPU_FLAGS_SUPPORT_TSG (1ULL << 8) #define NVGPU_GPU_FLAGS_SUPPORT_TSG (1ULL << 8)
/* Clock control support */ /* Clock control support */
@@ -420,7 +418,6 @@ struct nvgpu_alloc_as_args {
* increments at kickoffs and decrements at job completion are * increments at kickoffs and decrements at job completion are
* bypassed. * bypassed.
*/ */
#define NVGPU_GPU_IOCTL_ALLOC_AS_FLAGS_USERSPACE_MANAGED (1 << 0)
#define NVGPU_GPU_IOCTL_ALLOC_AS_FLAGS_UNIFIED_VA (1 << 1) #define NVGPU_GPU_IOCTL_ALLOC_AS_FLAGS_UNIFIED_VA (1 << 1)
__u32 flags; __u32 flags;
__u32 reserved; /* must be zero */ __u32 reserved; /* must be zero */

View File

@@ -339,12 +339,12 @@ test_unit_config.unit_config=2
[mm.as] [mm.as]
test_as_alloc_share.as_alloc_share_0k_um=2 test_as_alloc_share.as_alloc_share_0k_um=2
test_as_alloc_share.as_alloc_share_64k_um=2 test_as_alloc_share.as_alloc_share_64k_um=2
test_as_alloc_share.as_alloc_share_64k_um_as_fail=0 test_as_alloc_share.as_alloc_share_64k_um_as_fail=2
test_as_alloc_share.as_alloc_share_64k_um_busy_fail_1=0 test_as_alloc_share.as_alloc_share_64k_um_busy_fail_1=2
test_as_alloc_share.as_alloc_share_64k_um_busy_fail_2=2 test_as_alloc_share.as_alloc_share_64k_um_busy_fail_2=2
test_as_alloc_share.as_alloc_share_64k_um_vm_fail=0 test_as_alloc_share.as_alloc_share_64k_um_vm_fail=2
test_as_alloc_share.as_alloc_share_einval_um=0 test_as_alloc_share.as_alloc_share_einval_um=2
test_as_alloc_share.as_alloc_share_notp2_um=0 test_as_alloc_share.as_alloc_share_notp2_um=2
test_as_alloc_share.as_alloc_share_uva=2 test_as_alloc_share.as_alloc_share_uva=2
test_as_alloc_share.as_alloc_share_uva_enabled=2 test_as_alloc_share.as_alloc_share_uva_enabled=2
test_init_mm.init=0 test_init_mm.init=0

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -120,7 +120,7 @@ int test_gr_ctx_error_injection(struct unit_module *m,
vm = nvgpu_vm_init(g, SZ_4K, SZ_4K << 10, vm = nvgpu_vm_init(g, SZ_4K, SZ_4K << 10,
nvgpu_safe_sub_u64(1ULL << 37, SZ_4K << 10), nvgpu_safe_sub_u64(1ULL << 37, SZ_4K << 10),
(1ULL << 32), 0ULL, (1ULL << 32), 0ULL,
false, false, false, "dummy"); false, false, "dummy");
if (!vm) { if (!vm) {
unit_return_fail(m, "failed to allocate VM"); unit_return_fail(m, "failed to allocate VM");
} }
@@ -132,7 +132,7 @@ int test_gr_ctx_error_injection(struct unit_module *m,
0ULL, 0ULL,
nvgpu_safe_sub_u64(mm->bar1.aperture_size, low_hole), nvgpu_safe_sub_u64(mm->bar1.aperture_size, low_hole),
0ULL, 0ULL,
true, false, false, true, false,
"bar1"); "bar1");
if (mm->bar1.vm == NULL) { if (mm->bar1.vm == NULL) {
unit_return_fail(m, "nvgpu_vm_init failed\n"); unit_return_fail(m, "nvgpu_vm_init failed\n");

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -740,7 +740,7 @@ int test_gr_init_hal_error_injection(struct unit_module *m,
vm = nvgpu_vm_init(g, SZ_4K, SZ_4K << 10, vm = nvgpu_vm_init(g, SZ_4K, SZ_4K << 10,
nvgpu_safe_sub_u64(1ULL << 37, SZ_4K << 10), nvgpu_safe_sub_u64(1ULL << 37, SZ_4K << 10),
(1ULL << 32), 0ULL, (1ULL << 32), 0ULL,
false, false, false, "dummy"); false, false, "dummy");
if (!vm) { if (!vm) {
unit_return_fail(m, "failed to allocate VM"); unit_return_fail(m, "failed to allocate VM");
} }

View File

@@ -334,7 +334,7 @@ static int gr_test_intr_allocate_ch_tsg(struct unit_module *m,
vm = nvgpu_vm_init(g, SZ_4K, SZ_4K << 10, vm = nvgpu_vm_init(g, SZ_4K, SZ_4K << 10,
nvgpu_safe_sub_u64(1ULL << 37, SZ_4K << 10), nvgpu_safe_sub_u64(1ULL << 37, SZ_4K << 10),
(1ULL << 32), 0ULL, (1ULL << 32), 0ULL,
false, false, false, "dummy"); false, false, "dummy");
if (!vm) { if (!vm) {
unit_err(m, "failed to allocate VM"); unit_err(m, "failed to allocate VM");
goto ch_cleanup; goto ch_cleanup;

View File

@@ -156,7 +156,7 @@ int test_gr_obj_ctx_error_injection(struct unit_module *m,
vm = nvgpu_vm_init(g, SZ_4K, SZ_4K << 10, vm = nvgpu_vm_init(g, SZ_4K, SZ_4K << 10,
nvgpu_safe_sub_u64(1ULL << 37, SZ_4K << 10), nvgpu_safe_sub_u64(1ULL << 37, SZ_4K << 10),
(1ULL << 32), 0ULL, (1ULL << 32), 0ULL,
false, false, false, "dummy"); false, false, "dummy");
if (!vm) { if (!vm) {
unit_return_fail(m, "failed to allocate VM"); unit_return_fail(m, "failed to allocate VM");
} }

View File

@@ -271,7 +271,7 @@ int test_ltc_init_support(struct unit_module *m,
0ULL, 0ULL,
nvgpu_safe_sub_u64(mm->bar1.aperture_size, low_hole), nvgpu_safe_sub_u64(mm->bar1.aperture_size, low_hole),
0ULL, 0ULL,
true, false, false, true, false,
"bar1"); "bar1");
if (mm->bar1.vm == NULL) { if (mm->bar1.vm == NULL) {
unit_return_fail(m, "nvgpu_vm_init failed\n"); unit_return_fail(m, "nvgpu_vm_init failed\n");

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -107,7 +107,6 @@ static struct vm_gk20a *init_vm_env(struct unit_module *m, struct gk20a *g,
0ULL, 0ULL,
big_pages, big_pages,
false, false,
false,
name); name);
if (test_vm == NULL) { if (test_vm == NULL) {

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -219,7 +219,6 @@ int test_nvgpu_buddy_allocator_alloc(struct unit_module *m,
* - aperture_size = GK20A_PMU_VA_SIZE. * - aperture_size = GK20A_PMU_VA_SIZE.
* - kernel_reserved = aperture_size - low_hole. * - kernel_reserved = aperture_size - low_hole.
* - flags = GPU_ALLOC_GVA_SPACE, GVA space enabled. * - flags = GPU_ALLOC_GVA_SPACE, GVA space enabled.
* - userspace_managed = false, unified_va = false.
* - big_pages = false. * - big_pages = false.
* - Initialize buddy allocator for this test. * - Initialize buddy allocator for this test.
* - Base address = 1K. * - Base address = 1K.
@@ -265,7 +264,6 @@ int test_buddy_allocator_with_small_pages(struct unit_module *m,
* - aperture_size = GK20A_PMU_VA_SIZE. * - aperture_size = GK20A_PMU_VA_SIZE.
* - kernel_reserved = aperture_size - low_hole. * - kernel_reserved = aperture_size - low_hole.
* - flags = GPU_ALLOC_GVA_SPACE, GVA space enabled. * - flags = GPU_ALLOC_GVA_SPACE, GVA space enabled.
* - userspace_managed = false, unified_va = false.
* - big_pages = true. * - big_pages = true.
* - Initialize buddy allocator for this test. * - Initialize buddy allocator for this test.
* - Base address = 64M, PDE aligned. * - Base address = 64M, PDE aligned.

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -55,6 +55,7 @@
*/ */
static int global_id_count; static int global_id_count;
#if 0
/* Parameters to test standard cases of allocation */ /* Parameters to test standard cases of allocation */
static struct test_parameters test_64k_user_managed = { static struct test_parameters test_64k_user_managed = {
.big_page_size = SZ_64K, .big_page_size = SZ_64K,
@@ -69,6 +70,7 @@ static struct test_parameters test_0k_user_managed = {
.flags = NVGPU_AS_ALLOC_USERSPACE_MANAGED, .flags = NVGPU_AS_ALLOC_USERSPACE_MANAGED,
.expected_error = 0 .expected_error = 0
}; };
#endif
static struct test_parameters test_64k_unified_va = { static struct test_parameters test_64k_unified_va = {
.big_page_size = SZ_64K, .big_page_size = SZ_64K,
@@ -85,6 +87,7 @@ static struct test_parameters test_64k_unified_va_enabled = {
.unify_address_spaces_flag = true .unify_address_spaces_flag = true
}; };
#if 0
static struct test_parameters test_einval_user_managed = { static struct test_parameters test_einval_user_managed = {
.big_page_size = 1, .big_page_size = 1,
.small_big_split = (SZ_1G * 56ULL), .small_big_split = (SZ_1G * 56ULL),
@@ -131,7 +134,7 @@ static struct test_parameters test_64k_user_managed_busy_fail_2 = {
.expected_error = 0, .expected_error = 0,
.special_case = SPECIAL_CASE_GK20A_BUSY_RELEASE .special_case = SPECIAL_CASE_GK20A_BUSY_RELEASE
}; };
#endif
/* /*
* Init the minimum set of HALs to use DMA amd GMMU features, then call the * Init the minimum set of HALs to use DMA amd GMMU features, then call the
* init_mm base function. * init_mm base function.
@@ -270,6 +273,7 @@ int test_as_alloc_share(struct unit_module *m, struct gk20a *g, void *args)
struct unit_module_test nvgpu_mm_as_tests[] = { struct unit_module_test nvgpu_mm_as_tests[] = {
UNIT_TEST(init, test_init_mm, NULL, 0), UNIT_TEST(init, test_init_mm, NULL, 0),
#if 0
UNIT_TEST(as_alloc_share_64k_um_as_fail, test_as_alloc_share, UNIT_TEST(as_alloc_share_64k_um_as_fail, test_as_alloc_share,
(void *) &test_64k_user_managed_as_fail, 0), (void *) &test_64k_user_managed_as_fail, 0),
UNIT_TEST(as_alloc_share_64k_um_vm_fail, test_as_alloc_share, UNIT_TEST(as_alloc_share_64k_um_vm_fail, test_as_alloc_share,
@@ -286,6 +290,7 @@ struct unit_module_test nvgpu_mm_as_tests[] = {
(void *) &test_einval_user_managed, 0), (void *) &test_einval_user_managed, 0),
UNIT_TEST(as_alloc_share_notp2_um, test_as_alloc_share, UNIT_TEST(as_alloc_share_notp2_um, test_as_alloc_share,
(void *) &test_notp2_user_managed, 0), (void *) &test_notp2_user_managed, 0),
#endif
UNIT_TEST(as_alloc_share_uva, test_as_alloc_share, UNIT_TEST(as_alloc_share_uva, test_as_alloc_share,
(void *) &test_64k_unified_va, 2), (void *) &test_64k_unified_va, 2),
UNIT_TEST(as_alloc_share_uva_enabled, test_as_alloc_share, UNIT_TEST(as_alloc_share_uva_enabled, test_as_alloc_share,

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -199,7 +199,6 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
0ULL, 0ULL,
true, true,
false, false,
false,
"system"); "system");
if (mm->pmu.vm == NULL) { if (mm->pmu.vm == NULL) {
unit_return_fail(m, "nvgpu_vm_init failed\n"); unit_return_fail(m, "nvgpu_vm_init failed\n");
@@ -212,7 +211,7 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
0ULL, 0ULL,
nvgpu_safe_sub_u64(mm->bar1.aperture_size, low_hole), nvgpu_safe_sub_u64(mm->bar1.aperture_size, low_hole),
0ULL, 0ULL,
true, false, false, true, false,
"bar1"); "bar1");
if (mm->bar1.vm == NULL) { if (mm->bar1.vm == NULL) {
unit_return_fail(m, "nvgpu_vm_init failed\n"); unit_return_fail(m, "nvgpu_vm_init failed\n");

View File

@@ -356,7 +356,7 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
0ULL, 0ULL,
nvgpu_safe_sub_u64(mm->bar1.aperture_size, low_hole), nvgpu_safe_sub_u64(mm->bar1.aperture_size, low_hole),
0ULL, 0ULL,
true, false, false, true, false,
"bar1"); "bar1");
if (mm->bar1.vm == NULL) { if (mm->bar1.vm == NULL) {
unit_return_fail(m, "nvgpu_vm_init failed\n"); unit_return_fail(m, "nvgpu_vm_init failed\n");
@@ -370,7 +370,6 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
0ULL, 0ULL,
true, true,
false, false,
false,
"system"); "system");
if (mm->pmu.vm == NULL) { if (mm->pmu.vm == NULL) {
unit_return_fail(m, "nvgpu_vm_init failed\n"); unit_return_fail(m, "nvgpu_vm_init failed\n");
@@ -983,7 +982,7 @@ static struct vm_gk20a *init_test_req_vm(struct gk20a *g)
return nvgpu_vm_init(g, g->ops.mm.gmmu.get_default_big_page_size(), return nvgpu_vm_init(g, g->ops.mm.gmmu.get_default_big_page_size(),
low_hole, user_reserved, kernel_reserved, low_hole, user_reserved, kernel_reserved,
nvgpu_gmmu_va_small_page_limit(), nvgpu_gmmu_va_small_page_limit(),
big_pages, true, true, "testmem"); big_pages, true, "testmem");
} }
int test_nvgpu_page_table_c1_full(struct unit_module *m, struct gk20a *g, int test_nvgpu_page_table_c1_full(struct unit_module *m, struct gk20a *g,

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -164,7 +164,6 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
0ULL, 0ULL,
true, true,
false, false,
false,
"system"); "system");
if (mm->pmu.vm == NULL) { if (mm->pmu.vm == NULL) {
unit_return_fail(m, "'system' nvgpu_vm_init failed\n"); unit_return_fail(m, "'system' nvgpu_vm_init failed\n");

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -153,7 +153,6 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
0ULL, 0ULL,
true, true,
false, false,
false,
"system"); "system");
if (mm->pmu.vm == NULL) { if (mm->pmu.vm == NULL) {
unit_return_fail(m, "'system' nvgpu_vm_init failed\n"); unit_return_fail(m, "'system' nvgpu_vm_init failed\n");
@@ -164,7 +163,7 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
mm->bar1.vm = nvgpu_vm_init(g, mm->bar1.vm = nvgpu_vm_init(g,
g->ops.mm.gmmu.get_default_big_page_size(), g->ops.mm.gmmu.get_default_big_page_size(),
SZ_4K, 0ULL, nvgpu_safe_sub_u64(mm->bar1.aperture_size, SZ_4K), SZ_4K, 0ULL, nvgpu_safe_sub_u64(mm->bar1.aperture_size, SZ_4K),
0ULL, false, false, false, "bar1"); 0ULL, false, false, "bar1");
if (mm->bar1.vm == NULL) { if (mm->bar1.vm == NULL) {
unit_return_fail(m, "'bar1' nvgpu_vm_init failed\n"); unit_return_fail(m, "'bar1' nvgpu_vm_init failed\n");
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -144,7 +144,6 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
0ULL, 0ULL,
true, true,
false, false,
false,
"system"); "system");
if (mm->pmu.vm == NULL) { if (mm->pmu.vm == NULL) {
unit_return_fail(m, "'system' nvgpu_vm_init failed\n"); unit_return_fail(m, "'system' nvgpu_vm_init failed\n");

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -139,7 +139,6 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
0ULL, 0ULL,
true, true,
false, false,
false,
"system"); "system");
if (mm->pmu.vm == NULL) { if (mm->pmu.vm == NULL) {
unit_return_fail(m, "'system' nvgpu_vm_init failed\n"); unit_return_fail(m, "'system' nvgpu_vm_init failed\n");
@@ -150,7 +149,7 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
mm->bar2.vm = nvgpu_vm_init(g, mm->bar2.vm = nvgpu_vm_init(g,
g->ops.mm.gmmu.get_default_big_page_size(), g->ops.mm.gmmu.get_default_big_page_size(),
SZ_4K, 0ULL, nvgpu_safe_sub_u64(mm->bar2.aperture_size, SZ_4K), SZ_4K, 0ULL, nvgpu_safe_sub_u64(mm->bar2.aperture_size, SZ_4K),
0ULL, false, false, false, "bar2"); 0ULL, false, false, "bar2");
if (mm->bar2.vm == NULL) { if (mm->bar2.vm == NULL) {
unit_return_fail(m, "'bar2' nvgpu_vm_init failed\n"); unit_return_fail(m, "'bar2' nvgpu_vm_init failed\n");
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -181,7 +181,6 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
0ULL, 0ULL,
true, true,
false, false,
false,
"system"); "system");
if (mm->pmu.vm == NULL) { if (mm->pmu.vm == NULL) {
unit_return_fail(m, "'system' nvgpu_vm_init failed\n"); unit_return_fail(m, "'system' nvgpu_vm_init failed\n");
@@ -192,7 +191,7 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
mm->bar2.vm = nvgpu_vm_init(g, mm->bar2.vm = nvgpu_vm_init(g,
g->ops.mm.gmmu.get_default_big_page_size(), g->ops.mm.gmmu.get_default_big_page_size(),
SZ_4K, 0ULL, nvgpu_safe_sub_u64(mm->bar2.aperture_size, SZ_4K), SZ_4K, 0ULL, nvgpu_safe_sub_u64(mm->bar2.aperture_size, SZ_4K),
0ULL, false, false, false, "bar2"); 0ULL, false, false, "bar2");
if (mm->bar2.vm == NULL) { if (mm->bar2.vm == NULL) {
unit_return_fail(m, "'bar2' nvgpu_vm_init failed\n"); unit_return_fail(m, "'bar2' nvgpu_vm_init failed\n");
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -204,7 +204,6 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
0ULL, 0ULL,
true, true,
false, false,
false,
"system"); "system");
if (mm->pmu.vm == NULL) { if (mm->pmu.vm == NULL) {
unit_return_fail(m, "'system' nvgpu_vm_init failed\n"); unit_return_fail(m, "'system' nvgpu_vm_init failed\n");
@@ -217,7 +216,7 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
0ULL, 0ULL,
nvgpu_safe_sub_u64(mm->bar1.aperture_size, SZ_64K), nvgpu_safe_sub_u64(mm->bar1.aperture_size, SZ_64K),
0ULL, 0ULL,
true, false, false, true, false,
"bar1"); "bar1");
if (mm->bar1.vm == NULL) { if (mm->bar1.vm == NULL) {
return -ENOMEM; return -ENOMEM;
@@ -229,7 +228,7 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
mm->bar2.vm = nvgpu_vm_init(g, mm->bar2.vm = nvgpu_vm_init(g,
g->ops.mm.gmmu.get_default_big_page_size(), g->ops.mm.gmmu.get_default_big_page_size(),
SZ_4K, 0ULL, nvgpu_safe_sub_u64(mm->bar2.aperture_size, SZ_4K), SZ_4K, 0ULL, nvgpu_safe_sub_u64(mm->bar2.aperture_size, SZ_4K),
0ULL, false, false, false, "bar2"); 0ULL, false, false, "bar2");
if (mm->bar2.vm == NULL) { if (mm->bar2.vm == NULL) {
unit_return_fail(m, "'bar2' nvgpu_vm_init failed\n"); unit_return_fail(m, "'bar2' nvgpu_vm_init failed\n");
} }

View File

@@ -151,6 +151,7 @@ static int hal_fb_tlb_invalidate_error(struct gk20a *g, struct nvgpu_mem *pdb)
return -1; return -1;
} }
#if 0
/* Dummy HAL for vm_as_alloc_share that always fails */ /* Dummy HAL for vm_as_alloc_share that always fails */
static int hal_vm_as_alloc_share_error(struct gk20a *g, struct vm_gk20a *vm) static int hal_vm_as_alloc_share_error(struct gk20a *g, struct vm_gk20a *vm)
{ {
@@ -162,6 +163,7 @@ static int hal_vm_as_alloc_share_success(struct gk20a *g, struct vm_gk20a *vm)
{ {
return 0; return 0;
} }
#endif
/* Initialize test environment */ /* Initialize test environment */
static int init_test_env(struct unit_module *m, struct gk20a *g) static int init_test_env(struct unit_module *m, struct gk20a *g)
@@ -227,7 +229,6 @@ static struct vm_gk20a *create_test_vm(struct unit_module *m, struct gk20a *g)
kernel_reserved, kernel_reserved,
nvgpu_gmmu_va_small_page_limit(), nvgpu_gmmu_va_small_page_limit(),
true, true,
false,
true, true,
__func__); __func__);
return vm; return vm;
@@ -348,6 +349,7 @@ int test_map_buffer_error_cases(struct unit_module *m, struct gk20a *g,
goto free_sgt_os_buf; goto free_sgt_os_buf;
} }
#if 0
/* Non-fixed offset with userspace managed VM */ /* Non-fixed offset with userspace managed VM */
vm->userspace_managed = true; vm->userspace_managed = true;
ret = nvgpu_vm_map(vm, ret = nvgpu_vm_map(vm,
@@ -370,6 +372,7 @@ int test_map_buffer_error_cases(struct unit_module *m, struct gk20a *g,
ret = UNIT_FAIL; ret = UNIT_FAIL;
goto free_sgt_os_buf; goto free_sgt_os_buf;
} }
#endif
/* Invalid buffer size */ /* Invalid buffer size */
os_buf.size = 0; os_buf.size = 0;
@@ -1142,6 +1145,7 @@ static int map_buffer(struct unit_module *m,
goto free_mapped_buf; goto free_mapped_buf;
} }
#if 0
/* /*
* If VM is userspace managed, there should not be any accessible * If VM is userspace managed, there should not be any accessible
* buffers. * buffers.
@@ -1154,6 +1158,7 @@ static int map_buffer(struct unit_module *m,
ret = UNIT_FAIL; ret = UNIT_FAIL;
goto free_mapped_buf; goto free_mapped_buf;
} }
#endif
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
@@ -1297,7 +1302,6 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args)
kernel_reserved, kernel_reserved,
nvgpu_gmmu_va_small_page_limit(), nvgpu_gmmu_va_small_page_limit(),
big_pages, big_pages,
false,
true, true,
__func__); __func__);
nvgpu_posix_enable_fault_injection(kmem_fi, false, 0); nvgpu_posix_enable_fault_injection(kmem_fi, false, 0);
@@ -1319,7 +1323,6 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args)
default_aperture_size, /* invalid aperture size */ default_aperture_size, /* invalid aperture size */
nvgpu_gmmu_va_small_page_limit(), nvgpu_gmmu_va_small_page_limit(),
big_pages, big_pages,
false,
true, true,
__func__) __func__)
)) { )) {
@@ -1331,6 +1334,7 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args)
/* Make nvgpu_vm_do_init fail with invalid parameters */ /* Make nvgpu_vm_do_init fail with invalid parameters */
vm = nvgpu_kzalloc(g, sizeof(*vm)); vm = nvgpu_kzalloc(g, sizeof(*vm));
#if 0
/* vGPU with userspace managed */ /* vGPU with userspace managed */
g->is_virtual = true; g->is_virtual = true;
ret = nvgpu_vm_do_init(&g->mm, vm, ret = nvgpu_vm_do_init(&g->mm, vm,
@@ -1358,7 +1362,7 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args)
ret = UNIT_FAIL; ret = UNIT_FAIL;
goto exit; goto exit;
} }
#endif
/* Invalid VM configuration - This scenario is not feasible */ /* Invalid VM configuration - This scenario is not feasible */
low_hole = SZ_1M * 64; low_hole = SZ_1M * 64;
@@ -1368,7 +1372,7 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args)
g->ops.mm.gmmu.get_default_big_page_size(), g->ops.mm.gmmu.get_default_big_page_size(),
low_hole, user_vma, kernel_reserved, low_hole, user_vma, kernel_reserved,
nvgpu_gmmu_va_small_page_limit(), nvgpu_gmmu_va_small_page_limit(),
big_pages, false, true, __func__); big_pages, true, __func__);
nvgpu_posix_enable_fault_injection(kmem_fi, false, 0); nvgpu_posix_enable_fault_injection(kmem_fi, false, 0);
if (ret != -ENOMEM) { if (ret != -ENOMEM) {
unit_err(m, "nvgpu_vm_do_init did not fail as expected (7).\n"); unit_err(m, "nvgpu_vm_do_init did not fail as expected (7).\n");
@@ -1382,7 +1386,7 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args)
g->ops.mm.gmmu.get_default_big_page_size(), g->ops.mm.gmmu.get_default_big_page_size(),
low_hole, user_vma, kernel_reserved, low_hole, user_vma, kernel_reserved,
nvgpu_gmmu_va_small_page_limit(), nvgpu_gmmu_va_small_page_limit(),
big_pages, false, true, __func__); big_pages, true, __func__);
nvgpu_posix_enable_fault_injection(kmem_fi, false, 0); nvgpu_posix_enable_fault_injection(kmem_fi, false, 0);
if (ret != -ENOMEM) { if (ret != -ENOMEM) {
unit_err(m, "nvgpu_vm_do_init did not fail as expected (8).\n"); unit_err(m, "nvgpu_vm_do_init did not fail as expected (8).\n");
@@ -1396,7 +1400,7 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args)
g->ops.mm.gmmu.get_default_big_page_size(), g->ops.mm.gmmu.get_default_big_page_size(),
low_hole, user_vma, kernel_reserved, low_hole, user_vma, kernel_reserved,
nvgpu_gmmu_va_small_page_limit(), nvgpu_gmmu_va_small_page_limit(),
big_pages, false, false, __func__); big_pages, false, __func__);
nvgpu_posix_enable_fault_injection(kmem_fi, false, 0); nvgpu_posix_enable_fault_injection(kmem_fi, false, 0);
if (ret != -ENOMEM) { if (ret != -ENOMEM) {
unit_err(m, "nvgpu_vm_do_init didn't fail as expected (9).\n"); unit_err(m, "nvgpu_vm_do_init didn't fail as expected (9).\n");
@@ -1410,7 +1414,7 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args)
g->ops.mm.gmmu.get_default_big_page_size(), g->ops.mm.gmmu.get_default_big_page_size(),
low_hole, user_vma, kernel_reserved, low_hole, user_vma, kernel_reserved,
nvgpu_gmmu_va_small_page_limit(), nvgpu_gmmu_va_small_page_limit(),
big_pages, false, false, __func__); big_pages, false, __func__);
nvgpu_posix_enable_fault_injection(kmem_fi, false, 0); nvgpu_posix_enable_fault_injection(kmem_fi, false, 0);
if (ret != -ENOMEM) { if (ret != -ENOMEM) {
unit_err(m, "nvgpu_vm_do_init didn't fail as expected (10).\n"); unit_err(m, "nvgpu_vm_do_init didn't fail as expected (10).\n");
@@ -1423,7 +1427,7 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args)
g->ops.mm.gmmu.get_default_big_page_size(), g->ops.mm.gmmu.get_default_big_page_size(),
low_hole, user_vma, kernel_reserved, low_hole, user_vma, kernel_reserved,
nvgpu_gmmu_va_small_page_limit(), nvgpu_gmmu_va_small_page_limit(),
big_pages, false, false, big_pages, false,
"very_long_vm_name_to_fail_vm_init"); "very_long_vm_name_to_fail_vm_init");
if (ret != -EINVAL) { if (ret != -EINVAL) {
unit_err(m, "nvgpu_vm_do_init didn't fail as expected (12).\n"); unit_err(m, "nvgpu_vm_do_init didn't fail as expected (12).\n");
@@ -1436,7 +1440,7 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args)
g->ops.mm.gmmu.get_default_big_page_size(), g->ops.mm.gmmu.get_default_big_page_size(),
low_hole, user_vma, kernel_reserved, low_hole, user_vma, kernel_reserved,
nvgpu_gmmu_va_small_page_limit(), nvgpu_gmmu_va_small_page_limit(),
big_pages, false, false, __func__); big_pages, false, __func__);
if (ret != 0) { if (ret != 0) {
unit_err(m, "nvgpu_vm_do_init did not succeed as expected (B).\n"); unit_err(m, "nvgpu_vm_do_init did not succeed as expected (B).\n");
ret = UNIT_FAIL; ret = UNIT_FAIL;
@@ -1448,7 +1452,7 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args)
g->ops.mm.gmmu.get_default_big_page_size(), g->ops.mm.gmmu.get_default_big_page_size(),
low_hole, user_vma, kernel_reserved, low_hole, user_vma, kernel_reserved,
nvgpu_gmmu_va_small_page_limit(), nvgpu_gmmu_va_small_page_limit(),
false, false, false, __func__); false, false, __func__);
if (ret != 0) { if (ret != 0) {
unit_err(m, "nvgpu_vm_do_init did not succeed as expected (C).\n"); unit_err(m, "nvgpu_vm_do_init did not succeed as expected (C).\n");
ret = UNIT_FAIL; ret = UNIT_FAIL;
@@ -1461,7 +1465,7 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args)
nvgpu_gmmu_va_small_page_limit(), nvgpu_gmmu_va_small_page_limit(),
0ULL, kernel_reserved, 0ULL, kernel_reserved,
nvgpu_gmmu_va_small_page_limit(), big_pages, nvgpu_gmmu_va_small_page_limit(), big_pages,
false, false, __func__); false, __func__);
if (ret != 0) { if (ret != 0) {
unit_err(m, "nvgpu_vm_do_init did not succeed as expected (D).\n"); unit_err(m, "nvgpu_vm_do_init did not succeed as expected (D).\n");
ret = UNIT_FAIL; ret = UNIT_FAIL;
@@ -1547,7 +1551,6 @@ int test_map_buf(struct unit_module *m, struct gk20a *g, void *__args)
kernel_reserved, kernel_reserved,
nvgpu_gmmu_va_small_page_limit(), nvgpu_gmmu_va_small_page_limit(),
big_pages, big_pages,
false,
true, true,
__func__); __func__);
if (vm == NULL) { if (vm == NULL) {
@@ -1793,7 +1796,6 @@ int test_map_buf_gpu_va(struct unit_module *m,
kernel_reserved, kernel_reserved,
nvgpu_gmmu_va_small_page_limit(), nvgpu_gmmu_va_small_page_limit(),
big_pages, big_pages,
false,
true, true,
__func__); __func__);
if (vm == NULL) { if (vm == NULL) {
@@ -2048,7 +2050,6 @@ int test_batch(struct unit_module *m, struct gk20a *g, void *__args)
kernel_reserved, kernel_reserved,
nvgpu_gmmu_va_small_page_limit(), nvgpu_gmmu_va_small_page_limit(),
big_pages, big_pages,
false,
true, true,
__func__); __func__);
if (vm == NULL) { if (vm == NULL) {

View File

@@ -93,7 +93,6 @@ static int init_channel_vm(struct unit_module *m, struct nvgpu_channel *ch)
0ULL, 0ULL,
true, true,
false, false,
false,
"system"); "system");
if (mm->pmu.vm == NULL) { if (mm->pmu.vm == NULL) {
unit_return_fail(m, "nvgpu_vm_init failed\n"); unit_return_fail(m, "nvgpu_vm_init failed\n");
@@ -108,7 +107,7 @@ static int init_channel_vm(struct unit_module *m, struct nvgpu_channel *ch)
0ULL, 0ULL,
nvgpu_safe_sub_u64(mm->bar1.aperture_size, low_hole), nvgpu_safe_sub_u64(mm->bar1.aperture_size, low_hole),
0ULL, 0ULL,
true, false, false, true, false,
"bar1"); "bar1");
if (mm->bar1.vm == NULL) { if (mm->bar1.vm == NULL) {
unit_return_fail(m, "nvgpu_vm_init failed\n"); unit_return_fail(m, "nvgpu_vm_init failed\n");