gpu: nvgpu: unit: mm: add coverage & traceability

This patch adds new tests to improve test coverage. Also, updating test
target tags to increase traceability.

Jira NVGPU-4780

Change-Id: I87341efa3fa7d741f7abb611ff28ad6d5e1c6880
Signed-off-by: Vedashree Vidwans <vvidwans@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2279644
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Vedashree Vidwans
2019-12-31 11:00:24 -08:00
committed by Alex Waterman
parent 4bcd419a0b
commit f46c3064ed
27 changed files with 934 additions and 240 deletions

View File

@@ -294,7 +294,7 @@ static void nvgpu_buddy_allocator_destroy(struct nvgpu_allocator *na)
struct nvgpu_rbtree_node *node = NULL;
struct nvgpu_buddy *bud;
struct nvgpu_fixed_alloc *falloc;
struct nvgpu_buddy_allocator *a = na->priv;
struct nvgpu_buddy_allocator *a = buddy_allocator(na);
alloc_lock(na);
@@ -909,7 +909,7 @@ static u64 nvgpu_buddy_balloc_pte(struct nvgpu_allocator *na, u64 len,
{
u64 order, addr;
u32 pte_size;
struct nvgpu_buddy_allocator *a = na->priv;
struct nvgpu_buddy_allocator *a = buddy_allocator(na);
if (len == 0ULL) {
alloc_dbg(balloc_owner(a), "Alloc fail");
@@ -968,7 +968,7 @@ static u64 nvgpu_balloc_fixed_buddy_locked(struct nvgpu_allocator *na,
u64 ret, real_bytes = 0;
struct nvgpu_buddy *bud;
struct nvgpu_fixed_alloc *falloc = NULL;
struct nvgpu_buddy_allocator *a = na->priv;
struct nvgpu_buddy_allocator *a = buddy_allocator(na);
/* If base isn't aligned to an order 0 block, fail. */
nvgpu_assert(a->blk_size > 0ULL);
@@ -1050,7 +1050,7 @@ static u64 nvgpu_balloc_fixed_buddy(struct nvgpu_allocator *na,
u64 base, u64 len, u32 page_size)
{
u64 alloc;
struct nvgpu_buddy_allocator *a = na->priv;
struct nvgpu_buddy_allocator *a = buddy_allocator(na);
alloc_lock(na);
alloc = nvgpu_balloc_fixed_buddy_locked(na, base, len, page_size);
@@ -1071,7 +1071,7 @@ static void nvgpu_buddy_bfree_locked(struct nvgpu_allocator *na, u64 addr)
{
struct nvgpu_buddy *bud;
struct nvgpu_fixed_alloc *falloc;
struct nvgpu_buddy_allocator *a = na->priv;
struct nvgpu_buddy_allocator *a = buddy_allocator(na);
/*
* First see if this is a fixed alloc. If not fall back to a regular
@@ -1148,7 +1148,7 @@ static bool nvgpu_buddy_reserve_is_possible(struct nvgpu_buddy_allocator *a,
static int nvgpu_buddy_reserve_co(struct nvgpu_allocator *na,
struct nvgpu_alloc_carveout *co)
{
struct nvgpu_buddy_allocator *a = na->priv;
struct nvgpu_buddy_allocator *a = buddy_allocator(na);
u64 addr;
int err = 0;
@@ -1203,21 +1203,21 @@ static void nvgpu_buddy_release_co(struct nvgpu_allocator *na,
static u64 nvgpu_buddy_alloc_length(struct nvgpu_allocator *a)
{
struct nvgpu_buddy_allocator *ba = a->priv;
struct nvgpu_buddy_allocator *ba = buddy_allocator(a);
return ba->length;
}
static u64 nvgpu_buddy_alloc_base(struct nvgpu_allocator *a)
{
struct nvgpu_buddy_allocator *ba = a->priv;
struct nvgpu_buddy_allocator *ba = buddy_allocator(a);
return ba->start;
}
static bool nvgpu_buddy_alloc_inited(struct nvgpu_allocator *a)
{
struct nvgpu_buddy_allocator *ba = a->priv;
struct nvgpu_buddy_allocator *ba = buddy_allocator(a);
bool inited = ba->initialized;
nvgpu_smp_rmb();
@@ -1226,14 +1226,14 @@ static bool nvgpu_buddy_alloc_inited(struct nvgpu_allocator *a)
static u64 nvgpu_buddy_alloc_end(struct nvgpu_allocator *a)
{
struct nvgpu_buddy_allocator *ba = a->priv;
struct nvgpu_buddy_allocator *ba = buddy_allocator(a);
return ba->end;
}
static u64 nvgpu_buddy_alloc_space(struct nvgpu_allocator *a)
{
struct nvgpu_buddy_allocator *ba = a->priv;
struct nvgpu_buddy_allocator *ba = buddy_allocator(a);
u64 space;
alloc_lock(a);
@@ -1259,7 +1259,7 @@ static void nvgpu_buddy_print_stats(struct nvgpu_allocator *na,
struct nvgpu_rbtree_node *node = NULL;
struct nvgpu_fixed_alloc *falloc;
struct nvgpu_alloc_carveout *tmp;
struct nvgpu_buddy_allocator *a = na->priv;
struct nvgpu_buddy_allocator *a = buddy_allocator(na);
alloc_pstat(s, na, "base = %llu, limit = %llu, blk_size = %llu",
a->base, a->length, a->blk_size);

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2011-2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2011-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -681,26 +681,6 @@ int nvgpu_alloc_common_init(struct nvgpu_allocator *a, struct gk20a *g,
const char *name, void *priv, bool dbg,
const struct nvgpu_allocator_ops *ops);
/**
* @brief Enable allocator debug.
*
* @param[in] a Pointer to nvgpu allocator.
*/
static inline void nvgpu_alloc_enable_dbg(struct nvgpu_allocator *a)
{
a->debug = true;
}
/**
* @brief Disable allocator debug.
*
* @param[in] a Pointer to nvgpu allocator.
*/
static inline void nvgpu_alloc_disable_dbg(struct nvgpu_allocator *a)
{
a->debug = false;
}
/*
* Debug stuff.
*/

View File

@@ -548,6 +548,7 @@ nvgpu_mc_intr_stall_unit_config
nvgpu_memcmp
nvgpu_memcpy
nvgpu_memset
nvgpu_mem_create_from_mem
nvgpu_mem_create_from_phys
nvgpu_mem_get_addr
nvgpu_mem_iommu_translate

View File

@@ -65,7 +65,7 @@ INPUT += ../../../userspace/units/mm/as/as.h
INPUT += ../../../userspace/units/mm/dma/dma.h
INPUT += ../../../userspace/units/mm/gmmu/page_table/page_table.h
INPUT += ../../../userspace/units/mm/hal/cache/flush_gk20a_fusa/flush-gk20a-fusa.h
INPUT += ../../../userspace/units/mm/hal/cache/flush_gk20a_fusa/flush-gv11b-fusa.h
INPUT += ../../../userspace/units/mm/hal/cache/flush_gv11b_fusa/flush-gv11b-fusa.h
INPUT += ../../../userspace/units/mm/hal/gmmu/gmmu_gk20a_fusa/gmmu-gk20a-fusa.h
INPUT += ../../../userspace/units/mm/hal/gmmu/gmmu_gm20b_fusa/gmmu-gm20b-fusa.h
INPUT += ../../../userspace/units/mm/hal/gmmu/gmmu_gp10b_fusa/gmmu-gp10b-fusa.h

View File

@@ -756,13 +756,13 @@
"test_level": 0
},
{
"test": "test_env_clean",
"test": "test_env_clean_flush_gk20a_fusa",
"case": "env_clean",
"unit": "flush_gk20a_fusa",
"test_level": 0
},
{
"test": "test_env_init",
"test": "test_env_init_flush_gk20a_fusa",
"case": "env_init",
"unit": "flush_gk20a_fusa",
"test_level": 0
@@ -876,13 +876,13 @@
"test_level": 0
},
{
"test": "test_env_clean",
"test": "test_env_clean_flush_gv11b_fusa",
"case": "env_clean",
"unit": "flush_gv11b_fusa",
"test_level": 0
},
{
"test": "test_env_init",
"test": "test_env_init_flush_gv11b_fusa",
"case": "env_init",
"unit": "flush_gv11b_fusa",
"test_level": 0
@@ -1536,13 +1536,13 @@
"test_level": 0
},
{
"test": "test_env_clean",
"test": "test_env_clean_mm_gp10b_fusa",
"case": "env_clean",
"unit": "mm_gp10b_fusa",
"test_level": 0
},
{
"test": "test_env_init",
"test": "test_env_init_mm_gp10b_fusa",
"case": "env_init",
"unit": "mm_gp10b_fusa",
"test_level": 0
@@ -1566,13 +1566,13 @@
"test_level": 0
},
{
"test": "test_env_clean",
"test": "test_env_clean_mm_gv11b_fusa",
"case": "env_clean",
"unit": "mm_gv11b_fusa",
"test_level": 0
},
{
"test": "test_env_init",
"test": "test_env_init_mm_gv11b_fusa",
"case": "env_init",
"unit": "mm_gv11b_fusa",
"test_level": 0
@@ -1620,13 +1620,13 @@
"test_level": 0
},
{
"test": "test_env_clean",
"test": "test_env_clean_mm_mmu_fault_gv11b_fusa",
"case": "env_clean",
"unit": "mmu_fault_gv11b_fusa",
"test_level": 0
},
{
"test": "test_env_init",
"test": "test_env_init_mm_mmu_fault_gv11b_fusa",
"case": "env_init",
"unit": "mmu_fault_gv11b_fusa",
"test_level": 0
@@ -2021,6 +2021,12 @@
"unit": "nvgpu_allocator",
"test_level": 0
},
{
"test": "test_nvgpu_allocator_init",
"case": "allocator_init",
"unit": "nvgpu_allocator",
"test_level": 0
},
{
"test": "test_nvgpu_alloc_common_init",
"case": "common_init",
@@ -2897,6 +2903,12 @@
"unit": "nvgpu_gr_obj_ctx",
"test_level": 0
},
{
"test": "test_nvgpu_mem_create_from_mem",
"case": "create_mem_from_mem",
"unit": "nvgpu_mem",
"test_level": 0
},
{
"test": "test_nvgpu_mem_create_from_phys",
"case": "mem_create_from_phys",
@@ -2915,6 +2927,12 @@
"unit": "nvgpu_mem",
"test_level": 0
},
{
"test": "test_nvgpu_aperture_str",
"case": "nvgpu_aperture_name",
"unit": "nvgpu_mem",
"test_level": 0
},
{
"test": "test_nvgpu_mem_phys_ops",
"case": "nvgpu_mem_phys_ops",
@@ -3647,6 +3665,12 @@
"unit": "page_table",
"test_level": 0
},
{
"test": "test_nvgpu_gmmu_map_unmap_adv",
"case": "gmmu_map_unmap_tlb_invalidate_fail",
"unit": "page_table",
"test_level": 0
},
{
"test": "test_nvgpu_gmmu_map_unmap",
"case": "gmmu_map_unmap_unmapped",
@@ -3689,6 +3713,12 @@
"unit": "page_table",
"test_level": 0
},
{
"test": "test_nvgpu_gmmu_map_unmap_map_fail",
"case": "map_fail_tlb_invalidate",
"unit": "page_table",
"test_level": 0
},
{
"case": "req_fixed_address",
"vc": "V4",

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -37,7 +37,10 @@ struct unit_module;
*
* Description: Initialize bitmap allocator.
*
* Test Type: Feature
* Test Type: Feature, Error injection
*
* Targets: nvgpu_bitmap_allocator_init, nvgpu_bitmap_check_argument_limits,
* nvgpu_allocator.ops.fini, nvgpu_alloc_to_gpu
*
* Input: None
*
@@ -61,6 +64,9 @@ int test_nvgpu_bitmap_allocator_init(struct unit_module *m,
*
* Test Type: Feature
*
* Targets: nvgpu_allocator.ops.base, nvgpu_allocator.ops.length,
* nvgpu_allocator.ops.end, nvgpu_allocator.ops.inited
*
* Input: test_nvgpu_bitmap_allocator_init
*
* Steps:
@@ -80,7 +86,12 @@ int test_nvgpu_bitmap_allocator_ops(struct unit_module *m,
*
* Description: Allocate various sizes of memory to test different scenarios.
*
* Test Type: Feature
* Test Type: Feature, Error injection
*
* Targets: nvgpu_allocator.ops.alloc, nvgpu_allocator.ops.free_alloc,
* nvgpu_allocator.ops.alloc_fixed, nvgpu_allocator.ops.free_fixed,
* nvgpu_bitmap_alloc_from_rbtree_node, bitmap_allocator,
* alloc_loc, alloc_unlock
*
* Input: test_nvgpu_bitmap_allocator_init
*
@@ -109,6 +120,8 @@ int test_nvgpu_bitmap_allocator_alloc(struct unit_module *m,
*
* Test Type: Other (clean up)
*
* Targets: nvgpu_allocator.ops.fini
*
* Input: test_nvgpu_bitmap_allocator_init
*
* Steps:
@@ -128,6 +141,11 @@ int test_nvgpu_bitmap_allocator_destroy(struct unit_module *m,
*
* Test Type: Feature
*
* Targets: nvgpu_allocator_init, nvgpu_bitmap_allocator_init,
* nvgpu_bitmap_check_argument_limits, nvgpu_allocator.ops.alloc
* nvgpu_allocator.ops.free_alloc, nvgpu_allocator.ops.alloc_fixed,
* nvgpu_allocator.ops.free_fixed, nvgpu_allocator.ops.fini
*
* Input: None
*
* Steps:

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -39,6 +39,7 @@
#include "buddy_allocator.h"
#define SZ_8K (SZ_4K << 1)
#define SZ_16K (SZ_4K << 2)
#define BA_DEFAULT_BASE SZ_4K
#define BA_DEFAULT_SIZE SZ_1M
#define BA_DEFAULT_BLK_SIZE SZ_4K
@@ -150,7 +151,7 @@ int test_buddy_allocator_with_big_pages(struct unit_module *m,
}
/*
* Initialize buddy allocator, base = 0 and blk_size not pde aligned
* Initialize buddy allocator, base = 0
* Expect to fail
*/
if (nvgpu_allocator_init(g, na, vm_big_pages, "test", 0ULL, size,
@@ -160,6 +161,17 @@ int test_buddy_allocator_with_big_pages(struct unit_module *m,
"despite base=0, blk_size not pde aligned\n");
}
/*
* Initialize buddy allocator, base = 256M
* Expect to fail
*/
if (nvgpu_allocator_init(g, na, vm_big_pages, "test", SZ_256M, SZ_64K,
blk_size, max_order, flags, BUDDY_ALLOCATOR) == 0) {
free_vm_env(m, g, vm_big_pages);
unit_return_fail(m, "ba_big_pages inited "
"despite base=0, blk_size not pde aligned\n");
}
/* Initialize buddy allocator with big pages for this test */
if (nvgpu_allocator_init(g, na, vm_big_pages, "test", base, size,
blk_size, max_order, flags, BUDDY_ALLOCATOR) != 0) {
@@ -279,6 +291,7 @@ int test_buddy_allocator_with_small_pages(struct unit_module *m,
u64 max_order = 10;
u64 flags = GPU_ALLOC_GVA_SPACE;
u64 addr;
struct nvgpu_buddy_allocator *ba;
struct nvgpu_posix_fault_inj *kmem_fi =
nvgpu_kmem_get_fault_injection();
@@ -411,6 +424,22 @@ int test_buddy_allocator_with_small_pages(struct unit_module *m,
}
na->ops->fini(na);
/* Request align_order > ba->max_order */
if (nvgpu_allocator_init(g, na, vm_small_pages, "test", base, size,
blk_size, 5, flags, BUDDY_ALLOCATOR) != 0) {
free_vm_env(m, g, vm_small_pages);
unit_return_fail(m, "ba small pages init failed\n");
}
ba = na->priv;
addr = na->ops->alloc_fixed(na, ba->start, SZ_1M, SZ_4K);
if (addr != 0) {
unit_err(m, "%d: Allocated with align_order > ba->max_order\n",
__LINE__);
goto fail;
}
return UNIT_SUCCESS;
fail:
@@ -431,7 +460,6 @@ int test_nvgpu_buddy_allocator_alloc(struct unit_module *m,
u64 max_order = 0;
u64 flags = 0ULL;
u64 addr;
u64 len_orig, split_orig, alloced_orig;
struct nvgpu_buddy_allocator *ba;
struct nvgpu_posix_fault_inj *kmem_fi =
nvgpu_kmem_get_fault_injection();
@@ -553,8 +581,7 @@ int test_nvgpu_buddy_allocator_alloc(struct unit_module *m,
/*
* Test nvgpu_buddy_allocator_destroy()
*/
len_orig = ba->buddy_list_len[max_order/2];
ba->buddy_list_len[max_order/2] = 100;
ba->buddy_list_len[0] = 100;
if (!EXPECT_BUG(na->ops->fini(na))) {
unit_err(m, "%d: Excess buddies didn't trigger BUG()\n",
__LINE__);
@@ -565,29 +592,28 @@ int test_nvgpu_buddy_allocator_alloc(struct unit_module *m,
* by BUG()
*/
alloc_unlock(na);
ba->buddy_list_len[0] = 0;
ba->buddy_list_len[max_order/2] = len_orig;
split_orig = ba->buddy_list_split[max_order/3];
ba->buddy_list_split[max_order/3] = 100;
ba->buddy_list_split[0] = 100;
if (!EXPECT_BUG(na->ops->fini(na))) {
unit_err(m, "%d: Excess split nodes didn't trigger BUG()\n",
__LINE__);
goto cleanup;
}
/* Release mutex again */
/*
* Release the mutex that was left locked when fini() was interrupted
* by BUG()
*/
alloc_unlock(na);
ba->buddy_list_split[0] = 0;
ba->buddy_list_split[max_order/3] = split_orig;
alloced_orig = ba->buddy_list_alloced[max_order/4];
ba->buddy_list_alloced[max_order/4] = 100;
ba->buddy_list_alloced[0] = 100;
if (!EXPECT_BUG(na->ops->fini(na))) {
unit_err(m, "%d: Excess alloced nodes didn't trigger BUG()\n",
__LINE__);
goto cleanup;
}
ba->buddy_list_alloced[max_order/4] = alloced_orig;
ba->buddy_list_alloced[0] = 0;
result = UNIT_SUCCESS;
@@ -611,6 +637,8 @@ int test_nvgpu_buddy_allocator_carveout(struct unit_module *m,
NVGPU_CARVEOUT("test_co", 0ULL, 0ULL);
struct nvgpu_alloc_carveout test_co1 =
NVGPU_CARVEOUT("test_co1", 0ULL, 0ULL);
struct nvgpu_alloc_carveout test_co2 =
NVGPU_CARVEOUT("test_co2", 0ULL, 0ULL);
/*
* test_co base < buddy_allocator start
@@ -625,7 +653,7 @@ int test_nvgpu_buddy_allocator_carveout(struct unit_module *m,
* test_co base + test_co length > buddy allocator end
* Expect to fail
*/
test_co.base = BA_DEFAULT_BASE >> 1;
test_co.base = BA_DEFAULT_BASE;
test_co.length = BA_DEFAULT_SIZE << 1;
err = na->ops->reserve_carveout(na, &test_co);
@@ -646,18 +674,18 @@ int test_nvgpu_buddy_allocator_carveout(struct unit_module *m,
unit_return_fail(m, "carveout reserved with unaligned base\n");
}
test_co.base = BA_DEFAULT_BASE;
test_co.length = SZ_4K;
err = na->ops->reserve_carveout(na, &test_co);
test_co1.base = BA_DEFAULT_BASE;
test_co1.length = SZ_4K;
err = na->ops->reserve_carveout(na, &test_co1);
if (err < 0) {
unit_return_fail(m, "couldn't reserve 4K carveout\n");
}
na->ops->release_carveout(na, &test_co);
na->ops->release_carveout(na, &test_co1);
test_co.base = SZ_4K;
test_co.length = SZ_4K;
err = na->ops->reserve_carveout(na, &test_co);
test_co1.base = SZ_4K;
test_co1.length = SZ_4K;
err = na->ops->reserve_carveout(na, &test_co1);
if (err < 0) {
unit_return_fail(m,
"couldn't reserve 4K carveout after release\n");
@@ -667,21 +695,57 @@ int test_nvgpu_buddy_allocator_carveout(struct unit_module *m,
* Allocate 64K carveout at already allocated address
* Expect to fail
*/
test_co1.base = 0x1800;
test_co1.length = SZ_64K;
err = na->ops->reserve_carveout(na, &test_co1);
test_co.base = 0x1800;
test_co.length = SZ_64K;
err = na->ops->reserve_carveout(na, &test_co);
if (err == 0) {
unit_return_fail(m,
"64K carveout reserved at already allocated address\n");
}
test_co1.base = SZ_4K << 2;
test_co1.length = SZ_64K;
err = na->ops->reserve_carveout(na, &test_co1);
test_co2.base = SZ_16K;
test_co2.length = SZ_64K;
err = na->ops->reserve_carveout(na, &test_co2);
if (err < 0) {
unit_return_fail(m, "couldn't reserve 64K carveout\n");
}
/*
* Allocate 8K carveout at already allocated address
* Expect to fail
*/
test_co.base = 0x1800 + SZ_4K;
test_co.length = SZ_8K;
err = na->ops->reserve_carveout(na, &test_co);
if (err == 0) {
unit_return_fail(m,
"8K carveout reserved at already allocated address\n");
}
/*
* Allocate 4K carveout at already allocated address
* Expect to fail
*/
test_co.base = SZ_16K;
test_co.length = SZ_4K;
err = na->ops->reserve_carveout(na, &test_co);
if (err == 0) {
unit_return_fail(m,
"8K carveout reserved at already allocated address\n");
}
/*
* Allocate 8K carveout at already allocated address
* Expect to fail
*/
test_co.base = 0x1800;
test_co.length = SZ_4K;
err = na->ops->reserve_carveout(na, &test_co);
if (err == 0) {
unit_return_fail(m,
"8K carveout reserved at already allocated address\n");
}
addr = na->ops->alloc(na, (SZ_64K >> 1));
if (addr == 0) {
unit_return_fail(m, "couldn't allocate 32K\n");

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -37,7 +37,13 @@ struct unit_module;
*
* Description: Initialize buddy allocator.
*
* Test Type: Feature
* Test Type: Feature, Error injection
*
* Targets: nvgpu_allocator_init, nvgpu_buddy_allocator_init,
* nvgpu_buddy_check_argument_limits, nvgpu_buddy_set_attributes,
* balloc_allocator_align, balloc_compute_max_order, balloc_init_lists,
* balloc_max_order_in, balloc_get_order, balloc_get_order_list,
* nvgpu_allocator.ops.fini
*
* Input: None
*
@@ -74,6 +80,10 @@ int test_nvgpu_buddy_allocator_init(struct unit_module *m,
*
* Test Type: Feature
*
* Targets: nvgpu_allocator.ops.alloc, nvgpu_allocator.ops.reserve_carveout,
* nvgpu_allocator.ops.release_carveout,
* nvgpu_alloc_carveout_from_co_entry
*
* Input: test_nvgpu_buddy_allocator_init
*
* Steps:
@@ -100,6 +110,16 @@ int test_nvgpu_buddy_allocator_carveout(struct unit_module *m,
*
* Test Type: Feature
*
* Targets: nvgpu_allocator.ops.base, nvgpu_allocator.ops.length,
* nvgpu_allocator.ops.end, nvgpu_allocator.ops.inited,
* nvgpu_allocator.ops.space, nvgpu_allocator.ops.alloc,
* nvgpu_allocator.ops.alloc_pte, nvgpu_allocator.ops.free_alloc,
* nvgpu_allocator.ops.alloc_fixed, nvgpu_buddy_allocator_flag_ops,
* nvgpu_buddy_from_buddy_entry, balloc_base_shift, buddy_allocator,
* balloc_base_unshift, balloc_owner, balloc_order_to_len, alloc_lock,
* alloc_unlock, nvgpu_alloc_to_gpu, nvgpu_buddy_from_rbtree_node,
* nvgpu_fixed_alloc_from_rbtree_node
*
* Input: test_nvgpu_buddy_allocator_init
*
* Steps:
@@ -129,6 +149,8 @@ int test_nvgpu_buddy_allocator_basic_ops(struct unit_module *m,
*
* Test Type: Other (cleanup)
*
* Targets: nvgpu_allocator.ops.fini
*
* Input: test_nvgpu_buddy_allocator_init
*
* Steps:
@@ -146,7 +168,11 @@ int test_nvgpu_buddy_allocator_destroy(struct unit_module *m,
*
* Description: Test cleanup branch of memory allocations.
*
* Test Type: Feature
* Test Type: Feature, Error injection
*
* Targets: nvgpu_allocator_init, nvgpu_allocator.ops.alloc,
* nvgpu_allocator.ops.alloc_fixed, nvgpu_buddy_allocator_flag_ops,
* nvgpu_allocator.ops.fini
*
* Input: None
*
@@ -178,7 +204,12 @@ int test_nvgpu_buddy_allocator_alloc(struct unit_module *m,
*
* Description: Test buddy allocator functions with big pages disabled.
*
* Test Type: Feature
* Test Type: Feature, Error injection
*
* Targets: nvgpu_allocator_init, nvgpu_buddy_allocator_init,
* nvgpu_buddy_check_argument_limits, nvgpu_allocator.ops.inited
* nvgpu_buddy_set_attributes, nvgpu_allocator.ops.alloc_pte,
* nvgpu_allocator.ops.alloc_fixed, nvgpu_allocator.ops.fini
*
* Input: None
*
@@ -220,6 +251,12 @@ int test_buddy_allocator_with_small_pages(struct unit_module *m,
*
* Test Type: Feature
*
* Targets: nvgpu_allocator_init, nvgpu_buddy_allocator_init,
* nvgpu_buddy_check_argument_limits, nvgpu_buddy_set_attributes,
* nvgpu_allocator.ops.alloc_pte, nvgpu_allocator.ops.alloc_fixed
* nvgpu_allocator.ops.alloc, nvgpu_allocator.ops.free_alloc,
* nvgpu_allocator.ops.fini
*
* Input: None
*
* Steps:

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -24,8 +24,11 @@
#include <unit/unit.h>
#include <nvgpu/types.h>
#include <nvgpu/sizes.h>
#include <nvgpu/allocator.h>
#include "nvgpu_allocator.h"
#define OP_ALLOC 0
#define OP_FREE 1
#define OP_ALLOC_PTE 2
@@ -149,18 +152,7 @@ static struct nvgpu_allocator_ops dummy_ops = {
.fini = dummy_fini
};
/*
* Make sure the op functions are called and that's it. Verifying that the ops
* actually do what they are supposed to do is the responsibility of the unit
* tests for the actual allocator implementations.
*
* In this unit test the meaning of these ops can't really be assumed. But we
* can test that the logic for only calling present ops is tested.
*
* Also note: we don't test the fini op here; instead we test it separately as
* part of the init/destroy functionality.
*/
static int test_nvgpu_alloc_ops_present(struct unit_module *m,
int test_nvgpu_alloc_ops_present(struct unit_module *m,
struct gk20a *g, void *args)
{
u32 i;
@@ -243,13 +235,7 @@ static int test_nvgpu_alloc_ops_present(struct unit_module *m,
return UNIT_SUCCESS;
}
/*
* Test the common_init() function used by all allocator implementations. The
* test here is to simply catch that the various invalid input checks are
* exercised and that the parameters passed into the common_init() make their
* way into the allocator struct.
*/
static int test_nvgpu_alloc_common_init(struct unit_module *m,
int test_nvgpu_alloc_common_init(struct unit_module *m,
struct gk20a *g, void *args)
{
struct nvgpu_allocator a;
@@ -294,11 +280,7 @@ static int test_nvgpu_alloc_common_init(struct unit_module *m,
return UNIT_SUCCESS;
}
/*
* Test that the destroy function works. This just calls the fini() op and
* expects the allocator to have been completely zeroed.
*/
static int test_nvgpu_alloc_destroy(struct unit_module *m,
int test_nvgpu_alloc_destroy(struct unit_module *m,
struct gk20a *g, void *args)
{
struct nvgpu_allocator a;
@@ -322,10 +304,53 @@ static int test_nvgpu_alloc_destroy(struct unit_module *m,
return UNIT_SUCCESS;
}
int test_nvgpu_allocator_init(struct unit_module *m,
struct gk20a *g, void *args)
{
struct nvgpu_allocator a;
u64 base = SZ_4K;
u64 size = SZ_64K;
u64 blk_size = SZ_4K;
u64 max_order = 0;
u64 flags = 0ULL;
if (nvgpu_allocator_init(g, &a, NULL, "buddy", base, size, blk_size,
max_order, flags, BUDDY_ALLOCATOR) != 0) {
unit_return_fail(m, "failed to init buddy_allocator\n");
} else {
a.ops->fini(&a);
}
#ifdef CONFIG_NVGPU_DGPU
if (nvgpu_allocator_init(g, &a, NULL, "page", base, size, blk_size,
max_order, flags, PAGE_ALLOCATOR) != 0) {
unit_return_fail(m, "failed to init page_allocator\n");
} else {
a.ops->fini(&a);
}
#endif
if (nvgpu_allocator_init(g, &a, NULL, "bitmap", base, size, blk_size,
max_order, flags, BITMAP_ALLOCATOR) != 0) {
unit_return_fail(m, "failed to init bitmap_allocator\n");
} else {
a.ops->fini(&a);
}
/* Initialize invalid allocator */
if (nvgpu_allocator_init(g, &a, NULL, "invalid", base, size, blk_size,
max_order, flags, -1) != -EINVAL) {
unit_return_fail(m, "initialized invalid allocator\n");
}
return UNIT_SUCCESS;
}
struct unit_module_test nvgpu_allocator_tests[] = {
UNIT_TEST(common_init, test_nvgpu_alloc_common_init, NULL, 0),
UNIT_TEST(alloc_destroy, test_nvgpu_alloc_destroy, NULL, 0),
UNIT_TEST(alloc_ops, test_nvgpu_alloc_ops_present, NULL, 0),
UNIT_TEST(allocator_init, test_nvgpu_allocator_init, NULL, 0),
};
UNIT_MODULE(nvgpu_allocator, nvgpu_allocator_tests, UNIT_PRIO_NVGPU_TEST);

View File

@@ -0,0 +1,125 @@
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef UNIT_NVGPU_ALLOCATOR_H
#define UNIT_NVGPU_ALLOCATOR_H
struct gk20a;
struct unit_module;
/** @addtogroup SWUTS-mm-allocators-nvgpu-allocator
* @{
*
* Software Unit Test Specification for mm.allocators.nvgpu_allocator
*/
/**
* Test specification for: test_nvgpu_alloc_common_init
*
* Description: Test common_init() function
*
* Test Type: Feature
*
* Targets: nvgpu_alloc_common_init
*
* Input: None
*
* Steps:
* - Initialize nvgpu allocator with default ops values.
* - Confirm that the parameters passed to the function make their way into
* allocator struct.
* - Initialize nvgpu allocator for various invalid input cases.
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_nvgpu_alloc_common_init(struct unit_module *m,
struct gk20a *g, void *args);
/**
* Test specification for: test_nvgpu_alloc_destroy
*
* Description: Test allocator destroy function
*
* Test Type: Feature
*
* Targets: nvgpu_alloc_common_init, nvgpu_alloc_destroy
*
* Input: None
*
* Steps:
* - Trigger allocator destroy function which further invokes fini() op.
* - Allocator struct should be completely zeroed after this function.
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_nvgpu_alloc_destroy(struct unit_module *m,
struct gk20a *g, void *args);
/**
* Test specification for: test_nvgpu_alloc_ops_present
*
* Description: Test allocator destroy function
*
* Test Type: Feature
*
* Targets: nvgpu_alloc, nvgpu_alloc_pte, nvgpu_alloc_fixed, nvgpu_free_fixed,
* nvgpu_alloc_reserve_carveout, nvgpu_alloc_release_carveout,
* nvgpu_alloc_base, nvgpu_alloc_length, nvgpu_alloc_end, nvgpu_free,
* nvgpu_alloc_initialized, nvgpu_alloc_space
*
* Input: None
*
* Steps:
* - Test the logic for calling present ops.
* - Actual functionality of the ops should be verified by the respective
* allocator unit tests.
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_nvgpu_alloc_ops_present(struct unit_module *m,
struct gk20a *g, void *args);
/**
* Test specification for: test_nvgpu_allocator_init
*
* Description: Test allocator init function
*
* Test Type: Feature
*
* Targets: nvgpu_allocator_init, nvgpu_alloc_destroy
*
* Input: None
*
* Steps:
* - Initialize each allocator and check that the allocator is created
* successfully.
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_nvgpu_allocator_init(struct unit_module *m,
struct gk20a *g, void *args);
#endif /* UNIT_NVGPU_ALLOCATOR_H */

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -32,6 +32,7 @@
#include <nvgpu/posix/posix-fault-injection.h>
#include <nvgpu/page_allocator.h>
#include "page_allocator.h"
#ifdef CONFIG_NVGPU_DGPU
@@ -229,12 +230,7 @@ static struct test_parameters alloc_more_than_available = {
.error_msg = "Allocated more than available memory",
};
/*
* This function is a wrapper for page_allocator alloc() function.
* test_page_alloc() - Allocates page allocator memory
*/
static int test_page_alloc(struct unit_module *m, struct gk20a *g, void *args)
int test_page_alloc(struct unit_module *m, struct gk20a *g, void *args)
{
struct test_parameters *param = (struct test_parameters *) args;
struct nvgpu_page_allocator *pa = page_allocator(na);
@@ -259,11 +255,7 @@ static int test_page_alloc(struct unit_module *m, struct gk20a *g, void *args)
}
}
/*
* This function is a wrapper for page_allocator free() function.
* test_page_free() - Frees page allocator memory
*/
static int test_page_free(struct unit_module *m, struct gk20a *g, void *args)
int test_page_free(struct unit_module *m, struct gk20a *g, void *args)
{
struct test_parameters *param = (struct test_parameters *) args;
struct nvgpu_page_allocator *pa = page_allocator(na);
@@ -275,13 +267,7 @@ static int test_page_free(struct unit_module *m, struct gk20a *g, void *args)
return UNIT_SUCCESS;
}
/*
* This function is a wrapper for page_allocator alloc_fixed() function.
* test_page_alloc_fixed() - Allocates page allocator memory starting at
* fixed address location
*/
static int test_page_alloc_fixed(struct unit_module *m, struct gk20a *g,
void *args)
int test_page_alloc_fixed(struct unit_module *m, struct gk20a *g, void *args)
{
struct test_parameters *param = (struct test_parameters *) args;
struct nvgpu_page_allocator *pa = page_allocator(na);
@@ -308,12 +294,7 @@ static int test_page_alloc_fixed(struct unit_module *m, struct gk20a *g,
}
}
/*
* This function is a wrapper for page_allocator free_fixed() function.
* test_page_free_fixed() - Frees fixed page allocator memory
*/
static int test_page_free_fixed(struct unit_module *m, struct gk20a *g,
void *args)
int test_page_free_fixed(struct unit_module *m, struct gk20a *g, void *args)
{
struct test_parameters *param = (struct test_parameters *) args;
struct nvgpu_page_allocator *pa = page_allocator(na);
@@ -325,11 +306,7 @@ static int test_page_free_fixed(struct unit_module *m, struct gk20a *g,
return UNIT_SUCCESS;
}
/*
* Tests nvgpu_page_allocator_init()
* This test considers multiple conditions to initialize page allocator
*/
static int test_page_allocator_init_slabs(struct unit_module *m,
int test_page_allocator_init_slabs(struct unit_module *m,
struct gk20a *g, void *args)
{
u64 base = SZ_64K;
@@ -383,10 +360,7 @@ static int test_page_allocator_init_slabs(struct unit_module *m,
return UNIT_SUCCESS;
}
/*
* Tests page_allocator sgt ops
*/
static int test_page_allocator_sgt_ops(struct unit_module *m,
int test_page_allocator_sgt_ops(struct unit_module *m,
struct gk20a *g, void *args)
{
u64 addr;
@@ -437,11 +411,7 @@ static int test_page_allocator_sgt_ops(struct unit_module *m,
return UNIT_SUCCESS;
}
/*
* Tests page_allocator basic ops
* Page allocator attributes are set corresponding to default init values
*/
static int test_nvgpu_page_allocator_ops(struct unit_module *m,
int test_nvgpu_page_allocator_ops(struct unit_module *m,
struct gk20a *g, void *args)
{
u64 addr;
@@ -497,23 +467,19 @@ static int test_nvgpu_page_allocator_ops(struct unit_module *m,
return UNIT_SUCCESS;
}
/*
* De-initialize page allocator
*/
static int test_nvgpu_page_allocator_destroy(struct unit_module *m,
int test_nvgpu_page_allocator_destroy(struct unit_module *m,
struct gk20a *g, void *args)
{
na->ops->fini(na);
if (na->priv != NULL) {
unit_return_fail(m, "page allocator destroy failed\n");
}
nvgpu_kfree(g, na);
return UNIT_SUCCESS;
}
/*
* Tests nvgpu_page_allocator_init()
* This test considers multiple conditions to initialize page allocator
*/
static int test_nvgpu_page_allocator_init(struct unit_module *m,
int test_nvgpu_page_allocator_init(struct unit_module *m,
struct gk20a *g, void *args)
{
u64 base = BA_DEFAULT_BASE;
@@ -602,8 +568,8 @@ struct unit_module_test page_allocator_tests[] = {
/* Below tests examine page allocation */
/*
* NOTE: The test order should not be changed. A test contructs
* required memory structure for later tests.
* NOTE: The test order should not be changed. Previous test develop
* memory allocation arrangement required for later tests.
*/
/* These tests check execution with fault injection at various locations */

View File

@@ -0,0 +1,258 @@
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef UNIT_PAGE_ALLOCATOR_H
#define UNIT_PAGE_ALLOCATOR_H
struct gk20a;
struct unit_module;
/** @addtogroup SWUTS-mm-allocators-page-allocator
* @{
*
* Software Unit Test Specification for mm.allocators.page_allocator
*/
/**
* Test specification for: test_nvgpu_page_allocator_init
*
* Description: Initialize page allocator.
*
* Test Type: Feature, Error injection
*
* Targets: nvgpu_allocator_init, nvgpu_page_allocator_init,
* nvgpu_allocator.ops.fini
*
* Input: None
*
* Steps:
* - Initialize page allocator with following characteristics.
* - 4K memory base address.
* - 1M length of memory.
* - 4K block size.
* - Check that page allocator initializations fails for scenarios such as
* odd value of block_size and fault injection for memory allocation.
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_nvgpu_page_allocator_init(struct unit_module *m,
struct gk20a *g, void *args);
/**
* Test specification for: test_nvgpu_page_allocator_ops
*
* Description: Test page allocator ops
*
* Test Type: Feature
*
* Targets: nvgpu_allocator.ops.alloc, nvgpu_allocator.ops.free_alloc,
* nvgpu_allocator.ops.reserve_carveout,
* nvgpu_allocator.ops.release_carveout, nvgpu_allocator.ops.base,
* nvgpu_allocator.ops.end, nvgpu_allocator.ops.length,
* nvgpu_allocator.ops.inited, nvgpu_allocator.ops.space
*
* Input: test_nvgpu_page_allocator_init
*
* Steps:
* - Check page_allocator attributes using allocator ops.
* - Execute allocator ops to read attibute value.
* - Confirm that value is equal to the default values set during
* initialization.
* - Allocate carveout and confirm that allocation is successful. Check that
* carveout cannot be reserved after normal page allocation.
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_nvgpu_page_allocator_ops(struct unit_module *m,
struct gk20a *g, void *args);
/**
* Test specification for: test_page_allocator_sgt_ops
*
* Description: Test page alloc sgt ops
*
* Test Type: Feature
*
* Targets: nvgpu_page_alloc.nvgpu_sgt.nvgpu_sgt_ops.sgl_next,
* nvgpu_page_alloc.nvgpu_sgt.nvgpu_sgt_ops.sgl_phys,
* nvgpu_page_alloc.nvgpu_sgt.nvgpu_sgt_ops.sgl_ipa,
* nvgpu_page_alloc.nvgpu_sgt.nvgpu_sgt_ops.sgl_ipa_to_pa,
* nvgpu_page_alloc.nvgpu_sgt.nvgpu_sgt_ops.sgl_dma,
* nvgpu_page_alloc.nvgpu_sgt.nvgpu_sgt_ops.sgl_length,
* nvgpu_page_alloc.nvgpu_sgt.nvgpu_sgt_ops.sgl_gpu_addr,
* nvgpu_page_alloc.nvgpu_sgt.nvgpu_sgt_ops.sgt_free
*
* Input: test_nvgpu_page_allocator_init
*
* Steps:
* - Check allocated page attributes using sgt ops
* - Confirm that allocation details are equal to values set during allocation.
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_page_allocator_sgt_ops(struct unit_module *m,
struct gk20a *g, void *args);
/**
* Test specification for: test_page_alloc_fixed
*
* Description: Allocate memory at fixed address
*
* Test Type: Feature, Error injection
*
* Targets: nvgpu_allocator.ops.alloc_fixed
*
* Input: test_nvgpu_page_allocator_init or test_page_allocator_init_slabs,
* args (fault_at_alloc_cache, fault_at_sgl_alloc, simple_alloc_128K,
* alloc_no_scatter_gather, failing_alloc_8K)
*
* Steps:
* - Allocate chunk of memory at fixed address as per test_parameters input.
* - Check that result is equal to test_parameters expected output.
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_page_alloc_fixed(struct unit_module *m, struct gk20a *g, void *args);
/**
* Test specification for: test_page_alloc
*
* Description: Allocate memory using page allocator
*
* Test Type: Feature, Error injection
*
* Targets: nvgpu_allocator.ops.alloc
*
* Input: test_nvgpu_page_allocator_init or test_page_allocator_init_slabs,
* args (fault_at_alloc_cache, fault_at_nvgpu_alloc,
* first_simple_alloc_32K, fault_at_sgl_alloc, alloc_no_scatter_gather,
* alloc_contiguous, simple_alloc_512K, alloc_more_than_available,
* fault_at_page_cache, second_simple_alloc_32K, third_simple_alloc_32K,
* fourth_simple_alloc_32K, simple_alloc_8K, failing_alloc_16K)
*
* Steps:
* - Allocate chunk of memory at any address as per test_parameters input.
* - Check that result is equal to test_parameters expected output.
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_page_alloc(struct unit_module *m, struct gk20a *g, void *args);
/**
* Test specification for: test_page_free
*
* Description: Free allocated memory
*
* Test Type: Feature
*
* Targets: nvgpu_allocator.ops.free_alloc
*
* Input: test_nvgpu_page_allocator_init or test_page_allocator_init_slabs,
* args (alloc_no_scatter_gather, first_simple_alloc_32K,
* simple_alloc_512K, fourth_simple_alloc_32K, second_simple_alloc_32K,
* first_simple_alloc_32K, third_simple_alloc_32K)
*
* Steps:
* - Free allocated memory at given address as per test_parameters input.
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_page_free(struct unit_module *m, struct gk20a *g, void *args);
/**
* Test specification for: test_page_free_fixed
*
* Description: Free allocated page at given address
*
* Test Type: Feature
*
* Targets: nvgpu_allocator.ops.free_fixed
*
* Input: test_nvgpu_page_allocator_init, args (alloc_no_scatter_gather,
* simple_alloc_128K)
*
* Steps:
* - Free allocated memory at fixed address as per test_parameters input.
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_page_free_fixed(struct unit_module *m, struct gk20a *g, void *args);
/**
* Test specification for: test_page_allocator_init_slabs
*
* Description: Initialize page allocator with slabs.
*
* Test Type: Feature, Error injection
*
* Targets: nvgpu_allocator_init, nvgpu_page_allocator_init,
* nvgpu_page_alloc_init_slabs, nvgpu_allocator.ops.fini
*
* Input: None
*
* Steps:
* - Initialize page allocator with following characteristics.
* - 64K memory base address.
* - 128K length of memory.
* - 64K block size.
* - Flags set to GPU_ALLOC_4K_VIDMEM_PAGES to enable slabs.
* - Check that page allocator initializations fails for scenarios such as
* odd value of block_size and fault injection for memory allocation.
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_page_allocator_init_slabs(struct unit_module *m,
struct gk20a *g, void *args);
/**
* Test specification for: test_nvgpu_page_allocator_destroy
*
* Description: Destroy page allocator structure
*
* Test Type: Feature
*
* Targets: nvgpu_allocator.ops.fini
*
* Input: test_nvgpu_page_allocator_init or test_page_allocator_init_slabs
*
* Steps:
* - De-initialize page allocator structure.
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_nvgpu_page_allocator_destroy(struct unit_module *m,
struct gk20a *g, void *args);
/**
* @}
*/
#endif /* UNIT_PAGE_ALLOCATOR_H */

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -179,7 +179,8 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
return UNIT_SUCCESS;
}
int test_env_init(struct unit_module *m, struct gk20a *g, void *args)
int test_env_init_flush_gk20a_fusa(struct unit_module *m, struct gk20a *g,
void *args)
{
g->log_mask = 0;
@@ -422,7 +423,8 @@ done:
return ret;
}
int test_env_clean(struct unit_module *m, struct gk20a *g, void *args)
int test_env_clean_flush_gk20a_fusa(struct unit_module *m, struct gk20a *g,
void *args)
{
g->log_mask = 0;
@@ -432,7 +434,7 @@ int test_env_clean(struct unit_module *m, struct gk20a *g, void *args)
}
struct unit_module_test mm_flush_gk20a_fusa_tests[] = {
UNIT_TEST(env_init, test_env_init, NULL, 0),
UNIT_TEST(env_init, test_env_init_flush_gk20a_fusa, NULL, 0),
UNIT_TEST(mm_fb_flush_s0, test_gk20a_mm_fb_flush, (void *)F_GK20A_FB_FLUSH_DEFAULT_INPUT, 0),
UNIT_TEST(mm_fb_flush_s1, test_gk20a_mm_fb_flush, (void *)F_GK20A_FB_FLUSH_GET_RETRIES, 0),
UNIT_TEST(mm_fb_flush_s2, test_gk20a_mm_fb_flush, (void *)F_GK20A_FB_FLUSH_PENDING_TRUE, 0),
@@ -451,7 +453,7 @@ struct unit_module_test mm_flush_gk20a_fusa_tests[] = {
UNIT_TEST(mm_l2_invalidate_s2, test_gk20a_mm_l2_invalidate, (void *)F_GK20A_L2_INVALIDATE_OUTSTANDING_TRUE, 0),
UNIT_TEST(mm_l2_invalidate_s3, test_gk20a_mm_l2_invalidate, (void *)F_GK20A_L2_INVALIDATE_GET_RETRIES_NULL, 0),
UNIT_TEST(mm_l2_invalidate_s4, test_gk20a_mm_l2_invalidate, (void *)F_GK20A_L2_INVALIDATE_NVGPU_POWERED_OFF, 0),
UNIT_TEST(env_clean, test_env_clean, NULL, 0),
UNIT_TEST(env_clean, test_env_clean_flush_gk20a_fusa, NULL, 0),
};
UNIT_MODULE(flush_gk20a_fusa, mm_flush_gk20a_fusa_tests, UNIT_PRIO_NVGPU_TEST);

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -33,7 +33,7 @@ struct unit_module;
*/
/**
* Test specification for: test_env_init
* Test specification for: test_env_init_flush_gk20a_fusa
*
* Description: Initialize environment for MM tests
*
@@ -49,7 +49,8 @@ struct unit_module;
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_env_init(struct unit_module *m, struct gk20a *g, void *args);
int test_env_init_flush_gk20a_fusa(struct unit_module *m, struct gk20a *g,
void *args);
/**
* Test specification for: test_gk20a_mm_fb_flush
@@ -130,7 +131,7 @@ int test_gk20a_mm_l2_invalidate(struct unit_module *m, struct gk20a *g,
void *args);
/**
* Test specification for: test_env_clean
* Test specification for: test_env_clean_flush_gk20a_fusa
*
* Description: Cleanup test environment
*
@@ -146,7 +147,8 @@ int test_gk20a_mm_l2_invalidate(struct unit_module *m, struct gk20a *g,
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_env_clean(struct unit_module *m, struct gk20a *g, void *args);
int test_env_clean_flush_gk20a_fusa(struct unit_module *m, struct gk20a *g,
void *args);
/** @} */
#endif /* UNIT_MM_HAL_CACHE_FLUSH_GK20A_FUSA_H */

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -178,7 +178,8 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
return UNIT_SUCCESS;
}
int test_env_init(struct unit_module *m, struct gk20a *g, void *args)
int test_env_init_flush_gv11b_fusa(struct unit_module *m, struct gk20a *g,
void *args)
{
g->log_mask = 0;
@@ -284,7 +285,8 @@ done:
return ret;
}
int test_env_clean(struct unit_module *m, struct gk20a *g, void *args)
int test_env_clean_flush_gv11b_fusa(struct unit_module *m, struct gk20a *g,
void *args)
{
g->log_mask = 0;
@@ -295,14 +297,14 @@ int test_env_clean(struct unit_module *m, struct gk20a *g, void *args)
}
struct unit_module_test mm_flush_gv11b_fusa_tests[] = {
UNIT_TEST(env_init, test_env_init, (void *)0, 0),
UNIT_TEST(env_init, test_env_init_flush_gv11b_fusa, NULL, 0),
UNIT_TEST(mm_l2_flush_s0, test_gv11b_mm_l2_flush, (void *)F_GV11B_L2_FLUSH_PASS_BAR1_BIND_NOT_NULL, 0),
UNIT_TEST(mm_l2_flush_s1, test_gv11b_mm_l2_flush, (void *)F_GV11B_L2_FLUSH_PASS_BAR1_BIND_NULL, 0),
UNIT_TEST(mm_l2_flush_s2, test_gv11b_mm_l2_flush, (void *)F_GV11B_L2_FLUSH_FB_FLUSH_FAIL, 0),
UNIT_TEST(mm_l2_flush_s3, test_gv11b_mm_l2_flush, (void *)F_GV11B_L2_FLUSH_L2_FLUSH_FAIL, 0),
UNIT_TEST(mm_l2_flush_s4, test_gv11b_mm_l2_flush, (void *)F_GV11B_L2_FLUSH_TLB_INVALIDATE_FAIL, 0),
UNIT_TEST(mm_l2_flush_s5, test_gv11b_mm_l2_flush, (void *)F_GV11B_L2_FLUSH_FB_FLUSH2_FAIL, 0),
UNIT_TEST(env_clean, test_env_clean, NULL, 0),
UNIT_TEST(env_clean, test_env_clean_flush_gv11b_fusa, NULL, 0),
};
UNIT_MODULE(flush_gv11b_fusa, mm_flush_gv11b_fusa_tests, UNIT_PRIO_NVGPU_TEST);

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -33,7 +33,7 @@ struct unit_module;
*/
/**
* Test specification for: test_env_init
* Test specification for: test_env_init_flush_gv11b_fusa
*
* Description: Initialize environment for MM tests
*
@@ -49,7 +49,8 @@ struct unit_module;
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_env_init(struct unit_module *m, struct gk20a *g, void *args);
int test_env_init_flush_gv11b_fusa(struct unit_module *m, struct gk20a *g,
void *args);
/**
* Test specification for: test_gv11b_mm_l2_flush
@@ -80,7 +81,7 @@ int test_env_init(struct unit_module *m, struct gk20a *g, void *args);
int test_gv11b_mm_l2_flush(struct unit_module *m, struct gk20a *g, void *args);
/**
* Test specification for: test_env_clean
* Test specification for: test_env_clean_flush_gv11b_fusa
*
* Description: Cleanup test environment
*
@@ -96,7 +97,8 @@ int test_gv11b_mm_l2_flush(struct unit_module *m, struct gk20a *g, void *args);
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_env_clean(struct unit_module *m, struct gk20a *g, void *args);
int test_env_clean_flush_gv11b_fusa(struct unit_module *m, struct gk20a *g,
void *args);
/** @} */
#endif /* UNIT_MM_HAL_CACHE_FLUSH_GV11B_FUSA_H */

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -163,7 +163,8 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
return UNIT_SUCCESS;
}
int test_env_init(struct unit_module *m, struct gk20a *g, void *args)
int test_env_init_mm_gp10b_fusa(struct unit_module *m, struct gk20a *g,
void *args)
{
g->log_mask = 0;
@@ -227,7 +228,8 @@ done:
return ret;
}
int test_env_clean(struct unit_module *m, struct gk20a *g, void *args)
int test_env_clean_mm_gp10b_fusa(struct unit_module *m, struct gk20a *g,
void *args)
{
g->log_mask = 0;
g->ops.mm.mmu_fault.info_mem_destroy(g);
@@ -237,11 +239,11 @@ int test_env_clean(struct unit_module *m, struct gk20a *g, void *args)
}
struct unit_module_test mm_gp10b_fusa_tests[] = {
UNIT_TEST(env_init, test_env_init, (void *)0, 0),
UNIT_TEST(env_init, test_env_init_mm_gp10b_fusa, (void *)0, 0),
UNIT_TEST(mm_init_bar2_vm_s0, test_gp10b_mm_init_bar2_vm, (void *)F_INIT_BAR2_VM_DEFAULT, 0),
UNIT_TEST(mm_init_bar2_vm_s1, test_gp10b_mm_init_bar2_vm, (void *)F_INIT_BAR2_VM_INIT_VM_FAIL, 0),
UNIT_TEST(mm_init_bar2_vm_s2, test_gp10b_mm_init_bar2_vm, (void *)F_INIT_BAR2_VM_ALLOC_INST_BLOCK_FAIL, 0),
UNIT_TEST(env_clean, test_env_clean, NULL, 0),
UNIT_TEST(env_clean, test_env_clean_mm_gp10b_fusa, NULL, 0),
};
UNIT_MODULE(mm_gp10b_fusa, mm_gp10b_fusa_tests, UNIT_PRIO_NVGPU_TEST);

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -33,7 +33,7 @@ struct unit_module;
*/
/**
* Test specification for: test_env_init
* Test specification for: test_env_init_mm_gp10b_fusa
*
* Description: Initialize environment for MM tests
*
@@ -49,14 +49,15 @@ struct unit_module;
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_env_init(struct unit_module *m, struct gk20a *g, void *args);
int test_env_init_mm_gp10b_fusa(struct unit_module *m, struct gk20a *g,
void *args);
/**
* Test specification for: test_gp10b_mm_init_bar2_vm
*
* Description: Initialize bar2 VM
*
* Test Type: Feature
* Test Type: Feature, Error injection
*
* Targets: gops_mm.init_bar2_vm, gp10b_mm_init_bar2_vm, gops_mm.remove_bar2_vm,
* gp10b_mm_remove_bar2_vm
@@ -75,7 +76,7 @@ int test_gp10b_mm_init_bar2_vm(struct unit_module *m, struct gk20a *g,
void *args);
/**
* Test specification for: test_env_clean
* Test specification for: test_env_clean_mm_gp10b_fusa
*
* Description: Cleanup test environment
*
@@ -91,7 +92,8 @@ int test_gp10b_mm_init_bar2_vm(struct unit_module *m, struct gk20a *g,
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_env_clean(struct unit_module *m, struct gk20a *g, void *args);
int test_env_clean_mm_gp10b_fusa(struct unit_module *m, struct gk20a *g,
void *args);
/** @} */
#endif /* UNIT_MM_HAL_GP10B_FUSA_H */

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -168,7 +168,8 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
return UNIT_SUCCESS;
}
int test_env_init(struct unit_module *m, struct gk20a *g, void *args)
int test_env_init_mm_gv11b_fusa(struct unit_module *m, struct gk20a *g,
void *args)
{
g->log_mask = 0;
@@ -226,7 +227,8 @@ int test_gv11b_mm_is_bar1_supported(struct unit_module *m, struct gk20a *g,
return UNIT_SUCCESS;
}
int test_env_clean(struct unit_module *m, struct gk20a *g, void *args)
int test_env_clean_mm_gv11b_fusa(struct unit_module *m, struct gk20a *g,
void *args)
{
g->log_mask = 0;
g->ops.mm.mmu_fault.info_mem_destroy(g);
@@ -237,13 +239,13 @@ int test_env_clean(struct unit_module *m, struct gk20a *g, void *args)
}
struct unit_module_test mm_gv11b_fusa_tests[] = {
UNIT_TEST(env_init, test_env_init, (void *)0, 0),
UNIT_TEST(env_init, test_env_init_mm_gv11b_fusa, (void *)0, 0),
UNIT_TEST(inst_block_s0, test_gv11b_mm_init_inst_block, (void *)0U, 0),
UNIT_TEST(inst_block_s1, test_gv11b_mm_init_inst_block, (void *)1U, 0),
UNIT_TEST(inst_block_s2, test_gv11b_mm_init_inst_block, (void *)2U, 0),
UNIT_TEST(inst_block_s3, test_gv11b_mm_init_inst_block, (void *)3U, 0),
UNIT_TEST(is_bar1_supported, test_gv11b_mm_is_bar1_supported, NULL, 0),
UNIT_TEST(env_clean, test_env_clean, NULL, 0),
UNIT_TEST(env_clean, test_env_clean_mm_gv11b_fusa, NULL, 0),
};
UNIT_MODULE(mm_gv11b_fusa, mm_gv11b_fusa_tests, UNIT_PRIO_NVGPU_TEST);

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -33,7 +33,7 @@ struct unit_module;
*/
/**
* Test specification for: test_env_init
* Test specification for: test_env_init_mm_gv11b_fusa
*
* Description: Initialize environment for MM tests
*
@@ -49,7 +49,8 @@ struct unit_module;
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_env_init(struct unit_module *m, struct gk20a *g, void *args);
int test_env_init_mm_gv11b_fusa(struct unit_module *m, struct gk20a *g,
void *args);
/**
* Test specification for: test_gv11b_mm_init_inst_block
@@ -94,7 +95,7 @@ int test_gv11b_mm_init_inst_block(struct unit_module *m, struct gk20a *g,
int test_gv11b_mm_is_bar1_supported(struct unit_module *m, struct gk20a *g,
void *args);
/**
* Test specification for: test_env_clean
* Test specification for: test_env_clean_mm_gv11b_fusa
*
* Description: Cleanup test environment
*
@@ -110,7 +111,8 @@ int test_gv11b_mm_is_bar1_supported(struct unit_module *m, struct gk20a *g,
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_env_clean(struct unit_module *m, struct gk20a *g, void *args);
int test_env_clean_mm_gv11b_fusa(struct unit_module *m, struct gk20a *g,
void *args);
/** @} */
#endif /* UNIT_MM_HAL_GV11B_FUSA_H */

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -199,7 +199,8 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
return UNIT_SUCCESS;
}
int test_env_init(struct unit_module *m, struct gk20a *g, void *args)
int test_env_init_mm_mmu_fault_gv11b_fusa(struct unit_module *m,
struct gk20a *g, void *args)
{
g->log_mask = 0;
@@ -778,7 +779,8 @@ done:
return ret;
}
int test_env_clean(struct unit_module *m, struct gk20a *g, void *args)
int test_env_clean_mm_mmu_fault_gv11b_fusa(struct unit_module *m,
struct gk20a *g, void *args)
{
g->log_mask = 0;
@@ -789,7 +791,7 @@ int test_env_clean(struct unit_module *m, struct gk20a *g, void *args)
}
struct unit_module_test mm_mmu_fault_gv11b_fusa_tests[] = {
UNIT_TEST(env_init, test_env_init, (void *)0, 0),
UNIT_TEST(env_init, test_env_init_mm_mmu_fault_gv11b_fusa, NULL, 0),
UNIT_TEST(setup_sw_s0, test_gv11b_mm_mmu_fault_setup_sw, (void *)F_MMU_FAULT_SETUP_SW_FAULT_BUF_ALLOC_FAIL, 0),
UNIT_TEST(setup_sw_s1, test_gv11b_mm_mmu_fault_setup_sw, (void *)F_MMU_FAULT_SETUP_SW_DEFAULT, 0),
UNIT_TEST(setup_sw_s2, test_gv11b_mm_mmu_fault_setup_sw, (void *)F_MMU_FAULT_SETUP_SW_2, 0),
@@ -821,7 +823,7 @@ struct unit_module_test mm_mmu_fault_gv11b_fusa_tests[] = {
UNIT_TEST(handle_nonreplay_s1, test_handle_nonreplay_replay_fault, (void *)F_HANDLE_NON_RPLYBLE_INVALID_BUF_ENTRY, 0),
UNIT_TEST(handle_nonreplay_s2, test_handle_nonreplay_replay_fault, (void *)F_HANDLE_NON_RPLYBLE_VALID_BUF_ENTRY, 0),
UNIT_TEST(handle_nonreplay_s3, test_handle_nonreplay_replay_fault, (void *)F_HANDLE_NON_RPLYBLE_VALID_BUF_CH, 0),
UNIT_TEST(env_clean, test_env_clean, NULL, 0),
UNIT_TEST(env_clean, test_env_clean_mm_mmu_fault_gv11b_fusa, NULL, 0),
};
UNIT_MODULE(mmu_fault_gv11b_fusa, mm_mmu_fault_gv11b_fusa_tests, UNIT_PRIO_NVGPU_TEST);

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -33,7 +33,7 @@ struct unit_module;
*/
/**
* Test specification for: test_env_init
* Test specification for: test_env_init_mm_mmu_fault_gv11b_fusa
*
* Description: Initialize environment for MM tests
*
@@ -49,14 +49,15 @@ struct unit_module;
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_env_init(struct unit_module *m, struct gk20a *g, void *args);
int test_env_init_mm_mmu_fault_gv11b_fusa(struct unit_module *m,
struct gk20a *g, void *args);
/**
* Test specification for: test_gv11b_mm_mmu_fault_setup_sw
*
* Description: Test mmu fault setup sw function
*
* Test Type: Feature
* Test Type: Feature, Error injection
*
* Targets: gops_mm.gops_mm_mmu_fault.setup_sw, gv11b_mm_mmu_fault_setup_sw,
* gops_mm.gops_mm_mmu_fault.info_mem_destroy,
@@ -72,7 +73,8 @@ int test_env_init(struct unit_module *m, struct gk20a *g, void *args);
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_gv11b_mm_mmu_fault_setup_sw(struct unit_module *m, struct gk20a *g, void *args);
int test_gv11b_mm_mmu_fault_setup_sw(struct unit_module *m, struct gk20a *g,
void *args);
/**
* Test specification for:
@@ -202,7 +204,7 @@ int test_handle_nonreplay_replay_fault(struct unit_module *m, struct gk20a *g,
void *args);
/**
* Test specification for: test_env_clean
* Test specification for: test_env_clean_mm_mmu_fault_gv11b_fusa
*
* Description: Cleanup test environment
*
@@ -218,7 +220,8 @@ int test_handle_nonreplay_replay_fault(struct unit_module *m, struct gk20a *g,
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_env_clean(struct unit_module *m, struct gk20a *g, void *args);
int test_env_clean_mm_mmu_fault_gv11b_fusa(struct unit_module *m,
struct gk20a *g, void *args);
/** @} */
#endif /* UNIT_MM_HAL_MMU_FAULT_GV11B_FUSA_H */

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -277,6 +277,12 @@ int test_nvgpu_init_mm(struct unit_module *m, struct gk20a *g, void *args)
errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_HAL, 1,
ARBITRARY_ERROR, 16);
/* Making g->ops.fb.fb_ecc_init fail */
g->ops.fb.fb_ecc_init = int_empty_hal;
errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_HAL, 2,
ARBITRARY_ERROR, 17);
g->ops.fb.fb_ecc_init = NULL;
/*
* Extra cases for branch coverage: change support flags to test
* other branches

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -129,9 +129,6 @@ static void free_vidmem_env(struct unit_module *m, struct gk20a *g)
nvgpu_posix_io_delete_reg_space(g, bus_bar0_window_r());
}
/*
* Test APERTURE_VIDMEM branch of nvgpu_mem read and write functions
*/
int test_nvgpu_mem_vidmem(struct unit_module *m,
struct gk20a *g, void *args)
{
@@ -277,9 +274,31 @@ int test_nvgpu_aperture_mask(struct unit_module *m,
return UNIT_SUCCESS;
}
/*
* Test for iommu translate
*/
static const char *aperture_name_str[APERTURE_MAX_ENUM + 1] = {
[APERTURE_INVALID] = "INVAL",
[APERTURE_SYSMEM] = "SYSTEM",
[APERTURE_SYSMEM_COH] = "SYSCOH",
[APERTURE_VIDMEM] = "VIDMEM",
[APERTURE_MAX_ENUM] = "UNKNOWN",
};
int test_nvgpu_aperture_str(struct unit_module *m, struct gk20a *g, void *args)
{
enum nvgpu_aperture ap = 0;
const char *name_str;
while (ap <= APERTURE_MAX_ENUM) {
name_str = nvgpu_aperture_str(ap);
if (strcmp((name_str), aperture_name_str[ap]) != 0) {
unit_return_fail(m,
"Incorrect aperture str for aperture %d\n", ap);
}
ap += 1;
}
return UNIT_SUCCESS;
}
int test_nvgpu_mem_iommu_translate(struct unit_module *m,
struct gk20a *g, void *args)
{
@@ -329,12 +348,6 @@ int test_nvgpu_mem_iommu_translate(struct unit_module *m,
return UNIT_SUCCESS;
}
/*
* Test nvgpu_memset()
*
* Testing function in APERTURE_SYSMEM and APERTURE_INVALID case.
*
*/
int test_nvgpu_memset_sysmem(struct unit_module *m,
struct gk20a *g, void *args)
{
@@ -373,9 +386,6 @@ int test_nvgpu_memset_sysmem(struct unit_module *m,
return UNIT_SUCCESS;
}
/*
* Test all memory write and read calls.
*/
int test_nvgpu_mem_wr_rd(struct unit_module *m,
struct gk20a *g, void *args)
{
@@ -652,8 +662,25 @@ int test_nvgpu_mem_create_from_phys(struct unit_module *m,
return UNIT_SUCCESS;
}
int test_free_nvgpu_mem(struct unit_module *m,
struct gk20a *g, void *args)
int test_nvgpu_mem_create_from_mem(struct unit_module *m, struct gk20a *g,
void *args)
{
struct nvgpu_mem dest_mem;
nvgpu_mem_create_from_mem(g, &dest_mem, test_mem, 0, 2);
unit_assert(dest_mem.cpu_va == test_mem->cpu_va, goto done);
unit_assert(dest_mem.size == (2 * PAGE_SIZE), goto done);
unit_assert((dest_mem.mem_flags & NVGPU_MEM_FLAG_SHADOW_COPY) == true,
goto done);
unit_assert(dest_mem.aperture == APERTURE_SYSMEM, goto done);
return UNIT_SUCCESS;
done:
unit_return_fail(m, "%s: failed!\n", __func__);
}
int test_free_nvgpu_mem(struct unit_module *m, struct gk20a *g, void *args)
{
test_mem->aperture = APERTURE_SYSMEM;
nvgpu_dma_free(g, test_mem);
@@ -680,6 +707,8 @@ struct unit_module_test nvgpu_mem_tests[] = {
* Tests covering VIDMEM branches
*/
UNIT_TEST(nvgpu_aperture_mask, test_nvgpu_aperture_mask, NULL, 0),
UNIT_TEST(nvgpu_aperture_name, test_nvgpu_aperture_str, NULL, 0),
UNIT_TEST(create_mem_from_mem, test_nvgpu_mem_create_from_mem, NULL, 0),
#ifdef CONFIG_NVGPU_DGPU
UNIT_TEST(nvgpu_mem_vidmem, test_nvgpu_mem_vidmem, NULL, 0),
#endif

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -37,7 +37,9 @@ struct unit_module;
*
* Description: Initialize nvgpu_mem for given size and base address.
*
* Test Type: Feature
* Test Type: Feature, Error injection
*
* Targets: nvgpu_mem_create_from_phys
*
* Input: None
*
@@ -60,6 +62,15 @@ int test_nvgpu_mem_create_from_phys(struct unit_module *m,
*
* Test Type: Feature
*
* Targets: nvgpu_mem.nvgpu_sgt.nvgpu_sgt_ops.sgl_next,
* nvgpu_mem.nvgpu_sgt.nvgpu_sgt_ops.sgl_dma,
* nvgpu_mem.nvgpu_sgt.nvgpu_sgt_ops.sgl_phys,
* nvgpu_mem.nvgpu_sgt.nvgpu_sgt_ops.sgl_ipa,
* nvgpu_mem.nvgpu_sgt.nvgpu_sgt_ops.sgl_ipa_to_pa,
* nvgpu_mem.nvgpu_sgt.nvgpu_sgt_ops.sgl_length,
* nvgpu_mem.nvgpu_sgt.nvgpu_sgt_ops.sgl_gpu_addr,
* nvgpu_mem.nvgpu_sgt.nvgpu_sgt_ops.sgt_free
*
* Input: test_nvgpu_mem_create_from_phys
*
* Steps:
@@ -78,6 +89,8 @@ int test_nvgpu_mem_phys_ops(struct unit_module *m, struct gk20a *g, void *args);
*
* Test Type: Feature
*
* Targets: nvgpu_memset
*
* Input: test_nvgpu_mem_create_from_phys
*
* Steps:
@@ -100,6 +113,10 @@ int test_nvgpu_memset_sysmem(struct unit_module *m,
*
* Test Type: Feature
*
* Targets: nvgpu_mem_is_sysmem, nvgpu_mem_is_valid, nvgpu_mem_wr, nvgpu_mem_rd,
* nvgpu_mem_wr_n, nvgpu_mem_rd_n, nvgpu_mem_rd32_pair, nvgpu_mem_rd32,
* nvgpu_mem_wr32
*
* Input: test_nvgpu_mem_create_from_phys
*
* Steps:
@@ -125,6 +142,8 @@ int test_nvgpu_mem_wr_rd(struct unit_module *m, struct gk20a *g, void *args);
*
* Test Type: Feature
*
* Targets: nvgpu_mem_iommu_translate
*
* Input: test_nvgpu_mem_create_from_phys
*
* Steps:
@@ -144,6 +163,8 @@ int test_nvgpu_mem_iommu_translate(struct unit_module *m,
*
* Test Type: Feature
*
* Targets: nvgpu_aperture_mask, nvgpu_aperture_mask_raw
*
* Input: test_nvgpu_mem_create_from_phys
*
* Steps:
@@ -156,6 +177,48 @@ int test_nvgpu_mem_iommu_translate(struct unit_module *m,
int test_nvgpu_aperture_mask(struct unit_module *m,
struct gk20a *g, void *args);
/**
* Test specification for: test_nvgpu_aperture_str
*
* Description: Check nvgpu_mem aperture name string
*
* Test Type: Feature
*
* Targets: nvgpu_aperture_str
*
* Input: test_nvgpu_mem_create_from_phys
*
* Steps:
* - Run nvgpu_aperture_str function for all aperture values.
* - Confirm that returned aperture name is correct as per input aperture.
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_nvgpu_aperture_str(struct unit_module *m, struct gk20a *g, void *args);
/**
* Test specification for: test_nvgpu_mem_create_from_mem
*
* Description: Create nvgpu_mem from another nvgpu_mem struct
*
* Test Type: Feature
*
* Targets: nvgpu_mem_create_from_mem
*
* Input: test_nvgpu_mem_create_from_phys
*
* Steps:
* - Create a nvgpu_mem structure with 2 pages from global nvgpu_mem struct.
* - Confirm that returned destination nvgpu_mem address and size corresponds to
* - 2 pages of global nvgpu_mem structure with SYSMEM aperture.
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_nvgpu_mem_create_from_mem(struct unit_module *m, struct gk20a *g,
void *args);
/**
* Test specification for: test_nvgpu_mem_vidmem
*
@@ -163,6 +226,10 @@ int test_nvgpu_aperture_mask(struct unit_module *m,
*
* Test Type: Feature
*
* Targets: nvgpu_mem_is_sysmem, nvgpu_mem_is_valid, nvgpu_mem_wr, nvgpu_mem_rd,
* nvgpu_mem_wr_n, nvgpu_mem_rd_n, nvgpu_mem_rd32_pair, nvgpu_mem_rd32,
* nvgpu_mem_wr32
*
* Input: test_nvgpu_mem_create_from_phys
*
* Steps:
@@ -181,6 +248,8 @@ int test_nvgpu_mem_vidmem(struct unit_module *m, struct gk20a *g, void *args);
*
* Test Type: Other (cleanup)
*
* Targets: None
*
* Input: test_nvgpu_mem_create_from_phys
*
* Steps:

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -60,6 +60,7 @@
#define SPECIAL_CASE_DOUBLE_MAP 1
#define SPECIAL_CASE_NO_FREE 2
#define SPECIAL_CASE_NO_VM_AREA 4
#define SPECIAL_CASE_TIMEOUT_INIT_FAIL 8
/*
* Helper function used to create custom SGTs from a list of SGLs.
@@ -512,6 +513,8 @@ static int map_buffer(struct unit_module *m,
u32 pte[2];
struct nvgpu_mapped_buf **mapped_buffers = NULL;
u32 num_mapped_buffers = 0;
struct nvgpu_posix_fault_inj *timers_fi =
nvgpu_timers_get_fault_injection();
struct nvgpu_posix_fault_inj *kmem_fi =
nvgpu_kmem_get_fault_injection();
@@ -748,7 +751,13 @@ static int map_buffer(struct unit_module *m,
free_mapped_buf:
if ((mapped_buf != NULL) && !(subcase & SPECIAL_CASE_NO_FREE)) {
if (subcase & SPECIAL_CASE_TIMEOUT_INIT_FAIL) {
nvgpu_posix_enable_fault_injection(timers_fi, true, 0);
nvgpu_vm_unmap(vm, mapped_buf->addr, batch);
nvgpu_posix_enable_fault_injection(timers_fi, false, 0);
} else {
nvgpu_vm_unmap(vm, mapped_buf->addr, batch);
}
/*
* Unmapping an already unmapped buffer should not cause any
* errors.
@@ -949,8 +958,8 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args)
}
low_hole = SZ_1M * 64;
/* Cause nvgpu_allocator_init(BUDDY) to fail for user VMA */
nvgpu_posix_enable_fault_injection(kmem_fi, true, 5);
/* Cause nvgpu_gmmu_init_page_table to fail */
nvgpu_posix_enable_fault_injection(kmem_fi, true, 0);
ret = nvgpu_vm_do_init(&g->mm, vm,
g->ops.mm.gmmu.get_default_big_page_size(),
low_hole, kernel_reserved, aperture_size,
@@ -962,8 +971,8 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args)
goto exit;
}
/* Cause nvgpu_allocator_init(BUDDY) to fail for kernel VMA */
nvgpu_posix_enable_fault_injection(kmem_fi, true, 10);
/* Cause nvgpu_allocator_init(BUDDY) to fail for user VMA */
nvgpu_posix_enable_fault_injection(kmem_fi, true, 5);
ret = nvgpu_vm_do_init(&g->mm, vm,
g->ops.mm.gmmu.get_default_big_page_size(),
low_hole, kernel_reserved, aperture_size,
@@ -975,6 +984,32 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args)
goto exit;
}
/* Cause nvgpu_allocator_init(BUDDY) to fail for user_lp VMA */
nvgpu_posix_enable_fault_injection(kmem_fi, true, 12);
ret = nvgpu_vm_do_init(&g->mm, vm,
g->ops.mm.gmmu.get_default_big_page_size(),
low_hole, kernel_reserved, aperture_size,
big_pages, false, false, __func__);
nvgpu_posix_enable_fault_injection(kmem_fi, false, 0);
if (ret != -ENOMEM) {
unit_err(m, "nvgpu_vm_do_init didn't fail as expected (9).\n");
ret = UNIT_FAIL;
goto exit;
}
/* Cause nvgpu_allocator_init(BUDDY) to fail for kernel VMA */
nvgpu_posix_enable_fault_injection(kmem_fi, true, 17);
ret = nvgpu_vm_do_init(&g->mm, vm,
g->ops.mm.gmmu.get_default_big_page_size(),
low_hole, kernel_reserved, aperture_size,
big_pages, false, false, __func__);
nvgpu_posix_enable_fault_injection(kmem_fi, false, 0);
if (ret != -ENOMEM) {
unit_err(m, "nvgpu_vm_do_init didn't fail as expected (10).\n");
ret = UNIT_FAIL;
goto exit;
}
/* Invalid low_hole and kernel_reserved to cause an invalid config */
vm->guest_managed = true;
ret = nvgpu_vm_do_init(&g->mm, vm,
@@ -984,7 +1019,19 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args)
__func__);
vm->guest_managed = false;
if (ret != -EINVAL) {
unit_err(m, "nvgpu_vm_do_init did not fail as expected (9).\n");
unit_err(m, "nvgpu_vm_do_init didn't fail as expected (11).\n");
ret = UNIT_FAIL;
goto exit;
}
/* Cause nvgpu_vm_init_vma_allocators to fail for long vm name */
ret = nvgpu_vm_do_init(&g->mm, vm,
g->ops.mm.gmmu.get_default_big_page_size(),
low_hole, kernel_reserved, aperture_size,
big_pages, false, false,
"very_long_vm_name_to_fail_vm_init");
if (ret != -EINVAL) {
unit_err(m, "nvgpu_vm_do_init didn't fail as expected (12).\n");
ret = UNIT_FAIL;
goto exit;
}
@@ -1387,6 +1434,24 @@ int test_map_buf_gpu_va(struct unit_module *m,
goto exit;
}
/*
* Corner case: Timeout init fails in nvgpu_vm_unmap
*/
ret = map_buffer(m,
g,
vm,
NULL,
BUF_CPU_PA,
gpu_va,
buf_size,
page_size,
alignment,
SPECIAL_CASE_TIMEOUT_INIT_FAIL);
if (ret != UNIT_SUCCESS) {
unit_err(m, "Mapping failed (already mapped case)\n");
goto exit;
}
/* Map 64KB buffer */
buf_size = SZ_64K;
page_size = SZ_64K;