Open source GPL/LGPL release

This commit is contained in:
svcmobrel-release
2025-12-19 15:25:44 -08:00
commit 9fc87a7ec7
2261 changed files with 576825 additions and 0 deletions

View File

@@ -0,0 +1,26 @@
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
.SUFFIXES:
OBJS = bitmap_allocator.o
MODULE = bitmap_allocator
include ../../../Makefile.units

View File

@@ -0,0 +1,33 @@
################################### tell Emacs this is a -*- makefile-gmake -*-
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
###############################################################################
NVGPU_UNIT_NAME=bitmap_allocator
include $(NV_COMPONENT_DIR)/../../../Makefile.units.common.interface.tmk
# Local Variables:
# indent-tabs-mode: t
# tab-width: 8
# End:
# vi: set tabstop=8 noexpandtab:

View File

@@ -0,0 +1,33 @@
################################### tell Emacs this is a -*- makefile-gmake -*-
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
###############################################################################
NVGPU_UNIT_NAME=bitmap_allocator
include $(NV_COMPONENT_DIR)/../../../Makefile.units.common.tmk
# Local Variables:
# indent-tabs-mode: t
# tab-width: 8
# End:
# vi: set tabstop=8 noexpandtab:

View File

@@ -0,0 +1,394 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <unit/io.h>
#include <unit/unit.h>
#include <nvgpu/sizes.h>
#include <nvgpu/types.h>
#include <nvgpu/allocator.h>
#include <nvgpu/posix/kmem.h>
#include <nvgpu/posix/posix-fault-injection.h>
#include "common/mm/allocators/bitmap_allocator_priv.h"
#include "bitmap_allocator.h"
#define BA_DEFAULT_BASE SZ_1K
#define BA_DEFAULT_LENGTH (SZ_64K << 1)
#define BA_DEFAULT_BLK_SIZE SZ_1K
#define SZ_2K (SZ_1K << 1)
#define SZ_8K (SZ_4K << 1)
#define SZ_16K (SZ_4K << 2)
#define SZ_32K (SZ_64K >> 1)
static struct nvgpu_allocator *na;
int test_nvgpu_bitmap_allocator_critical(struct unit_module *m,
struct gk20a *g, void *args)
{
u64 base = BA_DEFAULT_BASE;
u64 length = BA_DEFAULT_LENGTH;
u64 blk_size = BA_DEFAULT_BLK_SIZE;
u64 flags = GPU_ALLOC_NO_ALLOC_PAGE;
u64 addr, addr1;
na = (struct nvgpu_allocator *)
nvgpu_kzalloc(g, sizeof(struct nvgpu_allocator));
if (na == NULL) {
unit_return_fail(m, "Could not allocate nvgpu_allocator\n");
}
if (nvgpu_allocator_init(g, na, NULL, "test_bitmap", base, length,
blk_size, 0ULL, flags, BITMAP_ALLOCATOR) != 0) {
nvgpu_kfree(g, na);
unit_return_fail(m, "bitmap_allocator init failed\n");
}
addr1 = na->ops->alloc(na, SZ_2K);
if (addr1 == 0) {
unit_err(m, "%d: couldn't allocate 2K bits\n", __LINE__);
goto fail;
}
addr = na->ops->alloc_fixed(na, SZ_4K, SZ_8K, SZ_1K);
if (addr == 0) {
unit_err(m, "%d: alloc_fixed failed to allocate 8K\n",
__LINE__);
goto fail;
}
/*
* Alloate 0 bytes at 64K
* Note: 0 bytes are actually allocated. But error handling should
* be done by the user.
*/
addr = na->ops->alloc_fixed(na, SZ_64K, 0ULL, SZ_1K);
if (addr == 0) {
unit_err(m, "%d: alloc_fixed couldn't alloc 0 bytes at 64K\n",
__LINE__);
goto fail;
}
addr1 = na->ops->alloc(na, SZ_2K + 4);
if (addr1 == 0) {
unit_err(m, "%d: alloc failed to allocate 2052 bits\n",
__LINE__);
goto fail;
}
na->ops->free_alloc(na, addr1);
na->ops->free_fixed(na, SZ_4K, SZ_8K);
na->ops->fini(na);
nvgpu_kfree(g, na);
return UNIT_SUCCESS;
fail:
na->ops->fini(na);
nvgpu_kfree(g, na);
return UNIT_FAIL;
}
int test_nvgpu_bitmap_allocator_alloc(struct unit_module *m,
struct gk20a *g, void *args)
{
u64 alloc0, alloc3k, alloc4k, alloc_at64, addr, addr_fail;
struct nvgpu_posix_fault_inj *kmem_fi =
nvgpu_kmem_get_fault_injection();
/*
* len = 0
* Expect to fail
*/
alloc0 = na->ops->alloc(na, 0);
if (alloc0 != 0) {
unit_err(m, "ops->alloc allocated with len = 0\n");
}
alloc3k = na->ops->alloc(na, SZ_2K + 4);
if (alloc3k == 0) {
unit_return_fail(m, "couldn't allocate 2052 bits\n");
}
/*
* 2M is more than available for bitmap
* Expect to fail
*/
addr_fail = na->ops->alloc(na, (SZ_1M << 1));
if (addr_fail != 0) {
unit_return_fail(m,
"bitmap allocated more than available memory\n");
}
/* Fault injection at nvgpu_bitmap_store_alloc */
nvgpu_posix_enable_fault_injection(kmem_fi, true, 0);
addr_fail = na->ops->alloc(na, (SZ_1K << 1));
if (addr_fail != 0) {
unit_return_fail(m,
"ops->alloc allocated despite fault injection\n");
}
nvgpu_posix_enable_fault_injection(kmem_fi, false, 0);
na->ops->free_alloc(na, alloc3k);
na->ops->free_alloc(na, alloc3k);
alloc4k = na->ops->alloc(na, SZ_4K);
if (alloc4k == 0) {
unit_return_fail(m, "bitmap couldn't allocate 4K");
}
addr = na->ops->alloc(na, SZ_8K);
if (addr == 0) {
unit_return_fail(m, "bitmap couldn't allocate 8K");
}
addr = na->ops->alloc(na, SZ_16K);
if (addr == 0) {
unit_return_fail(m, "bitmap couldn't allocate 16K");
}
addr = na->ops->alloc(na, SZ_32K);
if (addr == 0) {
unit_return_fail(m, "bitmap couldn't allocate 32K");
}
/*
* Requesting at allocated base address
* Expect to fail
*/
addr_fail = na->ops->alloc_fixed(na, alloc4k, SZ_4K, SZ_1K);
if (addr_fail != 0) {
unit_return_fail(m,
"allocated at already occupied address\n");
}
/*
* Unaligned base
* Expect to fail
*/
addr_fail = na->ops->alloc_fixed(na, (SZ_64K + 1ULL), SZ_4K, SZ_1K);
if (addr_fail != 0) {
unit_return_fail(m,
"ops->alloc_fixed allocated with unaligned base\n");
}
alloc_at64 = na->ops->alloc_fixed(na, SZ_64K, (SZ_64K - 1ULL), SZ_1K);
if (alloc_at64 == 0) {
unit_return_fail(m,
"ops->alloc_fixed failed to allocate 4097 bits\n");
}
/*
* Unaligned base
* Expect to fail
*/
if (!EXPECT_BUG(na->ops->free_fixed(na, (SZ_64K + 1ULL), SZ_4K))) {
unit_return_fail(m,
"freeing unaligned base didn't trigger BUG()\n");
}
na->ops->free_alloc(na, alloc4k);
/*
* Allocate 4K
* This allocation will require the bitmap allocator to find available
* space before next_blk.
*/
alloc4k = na->ops->alloc(na, SZ_4K);
if (alloc4k == 0) {
unit_return_fail(m, "bitmap couldn't allocate 4K");
}
na->ops->free_fixed(na, alloc_at64, (SZ_4K + 1ULL));
return UNIT_SUCCESS;
}
int test_nvgpu_bitmap_allocator_ops(struct unit_module *m,
struct gk20a *g, void *args)
{
u64 addr;
if (!na->ops->inited(na)) {
unit_return_fail(m, "bitmap ops->inited incorrect\n");
}
addr = na->ops->base(na);
if (addr != BA_DEFAULT_BASE) {
unit_return_fail(m, "bitmap ops->base incorrect\n");
}
addr = na->ops->length(na);
if (addr != BA_DEFAULT_LENGTH) {
unit_return_fail(m, "bitmap ops->length incorrect\n");
}
addr = na->ops->end(na);
if (addr != (BA_DEFAULT_BASE + BA_DEFAULT_LENGTH)) {
unit_return_fail(m, "bitmap ops->end incorrect\n");
}
return UNIT_SUCCESS;
}
int test_nvgpu_bitmap_allocator_destroy(struct unit_module *m,
struct gk20a *g, void *args)
{
na->ops->fini(na);
nvgpu_kfree(g, na);
return UNIT_SUCCESS;
}
int test_nvgpu_bitmap_allocator_init(struct unit_module *m,
struct gk20a *g, void *args)
{
u64 base = BA_DEFAULT_BASE;
u64 length = BA_DEFAULT_LENGTH;
u64 blk_size = BA_DEFAULT_BLK_SIZE;
u64 flags = 0ULL;
struct nvgpu_bitmap_allocator *ba;
struct nvgpu_posix_fault_inj *kmem_fi =
nvgpu_kmem_get_fault_injection();
na = (struct nvgpu_allocator *)
nvgpu_kzalloc(g, sizeof(struct nvgpu_allocator));
if (na == NULL) {
unit_return_fail(m, "Could not allocate nvgpu_allocator\n");
}
/* base = 0, length = 0, blk_size = 0 */
if (!EXPECT_BUG(nvgpu_allocator_init(g, na, NULL, "test_bitmap", 0ULL,
0ULL, 0ULL, 0ULL, flags, BITMAP_ALLOCATOR))) {
na->ops->fini(na);
unit_return_fail(m,
"bitmap inited despite blk_size = base = length = 0\n");
}
/*
* blk_size = 0
* Since base and length are not aligned with 0, init fails
*/
if (!EXPECT_BUG(nvgpu_allocator_init(g, na, NULL, "test_bitmap", base,
length, 0ULL, 0ULL, flags, BITMAP_ALLOCATOR))) {
unit_return_fail(m, "bitmap inited despite blk_size=0\n");
}
/* Odd blk_size */
if (!EXPECT_BUG(nvgpu_allocator_init(g, na, NULL, "test_bitmap", base,
length, 3ULL, 0ULL, flags, BITMAP_ALLOCATOR))) {
unit_return_fail(m, "bitmap inited despite odd blk_size\n");
}
/* length unaligned */
if (nvgpu_allocator_init(g, na, NULL, "test_bitmap", base, 0x0010,
blk_size, 0ULL, flags, BITMAP_ALLOCATOR) == 0) {
unit_return_fail(m, "bitmap init despite unaligned length\n");
}
/* base unaligned */
if (nvgpu_allocator_init(g, na, NULL, "test_bitmap", 0x0100, length,
blk_size, 0ULL, flags, BITMAP_ALLOCATOR) == 0) {
unit_return_fail(m, "bitmap init despite unaligned base\n");
}
/* base = 0 */
if (nvgpu_allocator_init(g, na, NULL, "test_bitmap", 0ULL, length,
blk_size, 0ULL, flags, BITMAP_ALLOCATOR) != 0) {
unit_return_fail(m, "bitmap init failed with base = 0\n");
} else {
ba = na->priv;
if (ba->base != ba->blk_size) {
na->ops->fini(na);
unit_return_fail(m, "bitmap init with base=0 "
"didn't update base = blk_size\n");
}
ba = NULL;
na->ops->fini(na);
}
/* length = 0 */
if (nvgpu_allocator_init(g, na, NULL, "test_bitmap", 0ULL, 0ULL,
blk_size, 0ULL, flags, BITMAP_ALLOCATOR) == 0) {
unit_return_fail(m, "bitmap inited with length = 0\n");
}
/* Fault injection at nvgpu_bitmap_allocator alloc */
nvgpu_posix_enable_fault_injection(kmem_fi, true, 0);
if (nvgpu_allocator_init(g, na, NULL, "test_bitmap", base, length,
blk_size, 0ULL, flags, BITMAP_ALLOCATOR) == 0) {
unit_return_fail(m, "bitmap inited despite "
"fault injection at nvgpu_bitmap_allocator alloc\n");
}
nvgpu_posix_enable_fault_injection(kmem_fi, false, 0);
/* Fault injection at meta_data_cache create */
nvgpu_posix_enable_fault_injection(kmem_fi, true, 1);
if (nvgpu_allocator_init(g, na, NULL, "test_bitmap", base, length,
blk_size, 0ULL, flags, BITMAP_ALLOCATOR) == 0) {
unit_return_fail(m, "bitmap inited despite "
"fault injection at meta_data_cache\n");
}
nvgpu_posix_enable_fault_injection(kmem_fi, false, 0);
/* Fault injection at bitmap create */
nvgpu_posix_enable_fault_injection(kmem_fi, true, 2);
if (nvgpu_allocator_init(g, na, NULL, "test_bitmap", base, length,
blk_size, 0ULL, flags, BITMAP_ALLOCATOR) == 0) {
unit_return_fail(m, "bitmap inited despite "
"fault injection at bitmap create\n");
}
nvgpu_posix_enable_fault_injection(kmem_fi, false, 0);
/*
* Initialize bitmap allocator
* This ba will be used for further tests.
*/
if (nvgpu_allocator_init(g, na, NULL, "test_bitmap", base, length,
blk_size, 0ULL, flags, BITMAP_ALLOCATOR) != 0) {
unit_return_fail(m, "bitmap_allocator init failed\n");
}
return UNIT_SUCCESS;
}
struct unit_module_test bitmap_allocator_tests[] = {
/* BA initialized in this test is used by next tests */
UNIT_TEST(init, test_nvgpu_bitmap_allocator_init, NULL, 0),
/* These tests use bitmap allocator created in the first test */
UNIT_TEST(ops, test_nvgpu_bitmap_allocator_ops, NULL, 0),
UNIT_TEST(alloc, test_nvgpu_bitmap_allocator_alloc, NULL, 0),
UNIT_TEST(free, test_nvgpu_bitmap_allocator_destroy, NULL, 0),
/* Tests GPU_ALLOC_NO_ALLOC_PAGE operations by bitmap allocator */
UNIT_TEST(critical, test_nvgpu_bitmap_allocator_critical, NULL, 0),
};
UNIT_MODULE(bitmap_allocator, bitmap_allocator_tests, UNIT_PRIO_NVGPU_TEST);

View File

@@ -0,0 +1,168 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef UNIT_BITMAP_ALLOCATOR_H
#define UNIT_BITMAP_ALLOCATOR_H
struct gk20a;
struct unit_module;
/** @addtogroup SWUTS-mm-allocators-bitmap-allocator
* @{
*
* Software Unit Test Specification for mm.allocators.bitmap_allocator
*/
/**
* Test specification for: test_nvgpu_bitmap_allocator_init
*
* Description: Initialize bitmap allocator.
*
* Test Type: Feature, Error injection
*
* Targets: nvgpu_bitmap_allocator_init, nvgpu_bitmap_check_argument_limits,
* nvgpu_allocator.ops.fini, nvgpu_alloc_to_gpu
*
* Input: None
*
* Steps:
* - Initialize bitmap allocator with following characteristics.
* - 1K memory base address.
* - 128K length of memory.
* - 1K block size.
* - Use this bitmap allocator for rest of the tests.
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_nvgpu_bitmap_allocator_init(struct unit_module *m,
struct gk20a *g, void *args);
/**
* Test specification for: test_nvgpu_bitmap_allocator_ops
*
* Description: Check bitmap_allocator attribute values using allocator ops.
*
* Test Type: Feature
*
* Targets: nvgpu_allocator.ops.base, nvgpu_allocator.ops.length,
* nvgpu_allocator.ops.end, nvgpu_allocator.ops.inited
*
* Input: test_nvgpu_bitmap_allocator_init
*
* Steps:
* - Check bitmap_allocator attributes using allocator ops.
* - Execute allocator ops to read attibute value.
* - Confirm that value is equal to the default values set during
* initialization.
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_nvgpu_bitmap_allocator_ops(struct unit_module *m,
struct gk20a *g, void *args);
/**
* Test specification for: test_nvgpu_bitmap_allocator_alloc
*
* Description: Allocate various sizes of memory to test different scenarios.
*
* Test Type: Feature, Error injection
*
* Targets: nvgpu_allocator.ops.alloc, nvgpu_allocator.ops.free_alloc,
* nvgpu_allocator.ops.alloc_fixed, nvgpu_allocator.ops.free_fixed,
* nvgpu_bitmap_alloc_from_rbtree_node, bitmap_allocator,
* alloc_loc, alloc_unlock
*
* Input: test_nvgpu_bitmap_allocator_init
*
* Steps:
* - Allocate 3k memory using allocation functions.
* - Confirm that allocation is successful.
* - Allocate 2M which is more than available memory.
* - Allocation is expected to fail.
* - Allocate 4K, 8K, 16K and 32K memory segments.
* - Confirm all allocations are successful.
* - Allocate various memory segments using fixed allocation functions.
* - Confirm aloocations are successful as expected.
* - Free allocations.
* - Confirm allocations are freed.
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_nvgpu_bitmap_allocator_alloc(struct unit_module *m,
struct gk20a *g, void *args);
/**
* Test specification for: test_nvgpu_bitmap_allocator_destroy
*
* Description: Free memory used for bitmap allocator.
*
* Test Type: Other (clean up)
*
* Targets: nvgpu_allocator.ops.fini
*
* Input: test_nvgpu_bitmap_allocator_init
*
* Steps:
* - Free bitmap_allocator allocated for this unit test.
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_nvgpu_bitmap_allocator_destroy(struct unit_module *m,
struct gk20a *g, void *args);
/**
* Test specification for: test_nvgpu_bitmap_allocator_critical
*
* Description: Test allocator functions for bitmap allocator in latency
* critical path.
*
* Test Type: Feature
*
* Targets: nvgpu_allocator_init, nvgpu_bitmap_allocator_init,
* nvgpu_bitmap_check_argument_limits, nvgpu_allocator.ops.alloc
* nvgpu_allocator.ops.free_alloc, nvgpu_allocator.ops.alloc_fixed,
* nvgpu_allocator.ops.free_fixed, nvgpu_allocator.ops.fini
*
* Input: None
*
* Steps:
* - Initialize allocator with following characteristics.
* - 1K memory base address.
* - 128K memory length.
* - 1K block size.
* - GPU_ALLOC_NO_ALLOC_PAGE flag value.
* - Allocate memory segments using allocation functions.
* - Confirm allocations are successful.
* - Free allocated memory segments.
* - Free bitmap allocator used for this test.
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_nvgpu_bitmap_allocator_critical(struct unit_module *m,
struct gk20a *g, void *args);
#endif /* UNIT_BITMAP_ALLOCATOR_H */

View File

@@ -0,0 +1,26 @@
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
.SUFFIXES:
OBJS = buddy_allocator.o
MODULE = buddy_allocator
include ../../../Makefile.units

View File

@@ -0,0 +1,33 @@
################################### tell Emacs this is a -*- makefile-gmake -*-
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
###############################################################################
NVGPU_UNIT_NAME=buddy_allocator
include $(NV_COMPONENT_DIR)/../../../Makefile.units.common.interface.tmk
# Local Variables:
# indent-tabs-mode: t
# tab-width: 8
# End:
# vi: set tabstop=8 noexpandtab:

View File

@@ -0,0 +1,33 @@
################################### tell Emacs this is a -*- makefile-gmake -*-
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
###############################################################################
NVGPU_UNIT_NAME=buddy_allocator
include $(NV_COMPONENT_DIR)/../../../Makefile.units.common.tmk
# Local Variables:
# indent-tabs-mode: t
# tab-width: 8
# End:
# vi: set tabstop=8 noexpandtab:

View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,290 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef UNIT_BUDDY_ALLOCATOR_H
#define UNIT_BUDDY_ALLOCATOR_H
struct gk20a;
struct unit_module;
/** @addtogroup SWUTS-mm-allocators-buddy-allocator
* @{
*
* Software Unit Test Specification for mm.allocators.buddy_allocator
*/
/**
* Test specification for: test_nvgpu_buddy_allocator_init
*
* Description: Initialize buddy allocator.
*
* Test Type: Feature, Error injection
*
* Targets: nvgpu_allocator_init, nvgpu_buddy_allocator_init,
* nvgpu_buddy_check_argument_limits, nvgpu_buddy_set_attributes,
* balloc_allocator_align, balloc_compute_max_order, balloc_init_lists,
* balloc_max_order_in, balloc_get_order, balloc_get_order_list,
* nvgpu_allocator.ops.fini
*
* Input: None
*
* Steps:
* - Allocate memory for nvgpu allocator
* - Initialize buddy allocator with different parameters to test init function.
* - zero block size.
* - block size which is not power of 2.
* - max order set greater than GPU_BALLOC_MAX_ORDER.
* - zero memory size.
* - base address zero.
* - unaligned base address.
* - Confirm the output of the above test cases is as expected.
* - Initialize buddy allocator with following attributes.
* - 4K base address.
* - 1M memory size.
* - 4K block size.
* - max order equal to GPU_BALLOC_MAX_ORDER.
* - flags set to NULL.
* - NULL vm.
* - GVA space disabled.
* - This initialized allocator will be used for this unit test.
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_nvgpu_buddy_allocator_init(struct unit_module *m,
struct gk20a *g, void *args);
/**
* Test specification for: test_nvgpu_buddy_allocator_carveout
*
* Description: Test allocation of carveouts.
*
* Test Type: Feature
*
* Targets: nvgpu_allocator.ops.alloc, nvgpu_allocator.ops.reserve_carveout,
* nvgpu_allocator.ops.release_carveout,
* nvgpu_alloc_carveout_from_co_entry
*
* Input: test_nvgpu_buddy_allocator_init
*
* Steps:
* - Allocate segment of memory as carveout.
* - Use reserved_carveout operation of buddy allocator to portion out segment
* of memory.
* - Confirm that the carveout is successful.
* - Test carveout allocation with below variations.
* - Carveout base address less than buddy allocator base address.
* - Carveout length more than buddy allocator size.
* - Unaligned base address.
* - Reserve carveout after normal memory allocation.
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_nvgpu_buddy_allocator_carveout(struct unit_module *m,
struct gk20a *g, void *args);
/**
* Test specification for: test_nvgpu_buddy_allocator_basic_ops
*
* Description: Test buddy allocator attribute and allocation functions.
*
* Test Type: Feature
*
* Targets: nvgpu_allocator.ops.base, nvgpu_allocator.ops.length,
* nvgpu_allocator.ops.end, nvgpu_allocator.ops.inited,
* nvgpu_allocator.ops.space, nvgpu_allocator.ops.alloc,
* nvgpu_allocator.ops.alloc_pte, nvgpu_allocator.ops.free_alloc,
* nvgpu_allocator.ops.alloc_fixed, nvgpu_buddy_allocator_flag_ops,
* nvgpu_buddy_from_buddy_entry, balloc_base_shift, buddy_allocator,
* balloc_base_unshift, balloc_owner, balloc_order_to_len, alloc_lock,
* alloc_unlock, nvgpu_alloc_to_gpu, nvgpu_buddy_from_rbtree_node,
* nvgpu_fixed_alloc_from_rbtree_node
*
* Input: test_nvgpu_buddy_allocator_init
*
* Steps:
* - Check buddy allocator attribute values.
* - Use buddy allocator ops to check base, length, end and space values.
* - Confirm that values match init values.
* - Test memory allocation functions.
* - Use alloc and alloc_fixed ops to allocate chunk of memory
* - Confirm allocation is successful.
* - Test allocation ops with below vaiations.
* - Zero length memory segment.
* - Allocate more than available memory.
* - Unaligned base.
* - Base address equal to previously assigned carveout base.
* - Zero PTE size.
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_nvgpu_buddy_allocator_basic_ops(struct unit_module *m,
struct gk20a *g, void *args);
/**
* Test specification for: test_nvgpu_buddy_allocator_destroy
*
* Description: Free buddy allocator used for previous tests.
*
* Test Type: Other (cleanup)
*
* Targets: nvgpu_allocator.ops.fini
*
* Input: test_nvgpu_buddy_allocator_init
*
* Steps:
* - Free using buddy allocator fini ops
* - Free nvgpu allocator
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_nvgpu_buddy_allocator_destroy(struct unit_module *m,
struct gk20a *g, void *args);
/**
* Test specification for: test_nvgpu_buddy_allocator_alloc
*
* Description: Test cleanup branch of memory allocations.
*
* Test Type: Feature, Error injection
*
* Targets: nvgpu_allocator_init, nvgpu_allocator.ops.alloc,
* nvgpu_allocator.ops.alloc_fixed, nvgpu_buddy_allocator_flag_ops,
* nvgpu_allocator.ops.fini
*
* Input: None
*
* Steps:
* - Allocate nvgpu and buddy allocator for this test
* - 4K base address.
* - 1M memory size.
* - 1K block size.
* - Zero max order.
* - flags set to 0.
* - NULL vm.
* - GVA space disabled.
* - Inject fault at specific step to test alloc function cleanup.
* - Allocate fixed memory segment, when part of memory is already allocated.
* - Use alloc_fixed to test if such allocation is successful
* - Test buddy allocator destroy function
* - Increase count of buddy, split buddy and alloced buddy list lengths one
* by one and check if destroy function triggers BUG()
* - Free nvgpu and buddy allocator used in this test
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_nvgpu_buddy_allocator_alloc(struct unit_module *m,
struct gk20a *g, void *args);
/**
* Test specification for: test_buddy_allocator_with_small_pages
*
* Description: Test buddy allocator functions with big pages disabled.
*
* Test Type: Feature, Error injection
*
* Targets: nvgpu_allocator_init, nvgpu_buddy_allocator_init,
* nvgpu_buddy_check_argument_limits, nvgpu_allocator.ops.inited
* nvgpu_buddy_set_attributes, nvgpu_allocator.ops.alloc_pte,
* nvgpu_allocator.ops.alloc_fixed, nvgpu_allocator.ops.fini
*
* Input: None
*
* Steps:
* - Initialize vm environment with below characteristics.
* - low_hole = 64K.
* - aperture_size = GK20A_PMU_VA_SIZE.
* - kernel_reserved = aperture_size - low_hole.
* - flags = GPU_ALLOC_GVA_SPACE, GVA space enabled.
* - userspace_managed = false, unified_va = false.
* - big_pages = false.
* - Initialize buddy allocator for this test.
* - Base address = 1K.
* - Allocator size = 1M.
* - Block size = 1K.
* - max order = 10.
* - GVA space enabled.
* - vm initialized in the previous step.
* - Test sincere allocations with buddy allocator ops.
* - Test alloc ops with below variations.
* - Request more than available memory.
* - Alloc with size unaligned with respect to PTE size.
* - unusual page size.
* - Length zero memory segment.
* - Inject faults to test cleanup code.
* - Free buddy allocator.
* - Free vm environment.
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_buddy_allocator_with_small_pages(struct unit_module *m,
struct gk20a *g, void *args);
/**
* Test specification for: test_buddy_allocator_with_big_pages
*
* Description: Test buddy allocator functions with big pages enabled.
*
* Test Type: Feature
*
* Targets: nvgpu_allocator_init, nvgpu_buddy_allocator_init,
* nvgpu_buddy_check_argument_limits, nvgpu_buddy_set_attributes,
* nvgpu_allocator.ops.alloc_pte, nvgpu_allocator.ops.alloc_fixed
* nvgpu_allocator.ops.alloc, nvgpu_allocator.ops.free_alloc,
* nvgpu_allocator.ops.fini
*
* Input: None
*
* Steps:
* - Initialize vm environment with below characteristics.
* - low_hole = 64K.
* - aperture_size = GK20A_PMU_VA_SIZE.
* - kernel_reserved = aperture_size - low_hole.
* - flags = GPU_ALLOC_GVA_SPACE, GVA space enabled.
* - userspace_managed = false, unified_va = false.
* - big_pages = true.
* - Initialize buddy allocator for this test.
* - Base address = 64M, PDE aligned.
* - Allocator size = 256M.
* - Block size = 4K.
* - max order = GPU_BALLOC_MAX_ORDER.
* - GVA space enabled.
* - vm initialized in the previous step.
* - Test sincere allocations with buddy allocator ops
* - Test alloc ops with below variations
* - base address less than buddy allocator base address
* - unusual page size
* - Free buddy allocator
* - Free vm environment
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_buddy_allocator_with_big_pages(struct unit_module *m,
struct gk20a *g, void *args);
#endif /* UNIT_BUDDY_ALLOCATOR_H */

View File

@@ -0,0 +1,26 @@
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
.SUFFIXES:
OBJS = nvgpu_allocator.o
MODULE = nvgpu_allocator
include ../../../Makefile.units

View File

@@ -0,0 +1,35 @@
################################### tell Emacs this is a -*- makefile-gmake -*-
#
# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# tmake for SW Mobile component makefile
#
###############################################################################
NVGPU_UNIT_NAME=nvgpu_allocator
include $(NV_COMPONENT_DIR)/../../../Makefile.units.common.interface.tmk
# Local Variables:
# indent-tabs-mode: t
# tab-width: 8
# End:
# vi: set tabstop=8 noexpandtab:

View File

@@ -0,0 +1,35 @@
################################### tell Emacs this is a -*- makefile-gmake -*-
#
# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# tmake for SW Mobile component makefile
#
###############################################################################
NVGPU_UNIT_NAME=nvgpu_allocator
include $(NV_COMPONENT_DIR)/../../../Makefile.units.common.tmk
# Local Variables:
# indent-tabs-mode: t
# tab-width: 8
# End:
# vi: set tabstop=8 noexpandtab:

View File

@@ -0,0 +1,356 @@
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <unit/io.h>
#include <unit/unit.h>
#include <nvgpu/types.h>
#include <nvgpu/sizes.h>
#include <nvgpu/allocator.h>
#include "nvgpu_allocator.h"
#define OP_ALLOC 0
#define OP_FREE 1
#define OP_ALLOC_PTE 2
#define OP_ALLOC_FIXED 3
#define OP_FREE_FIXED 4
#define OP_RESERVE_CARVEOUT 5
#define OP_RELEASE_CARVEOUT 6
#define OP_BASE 7
#define OP_LENGTH 8
#define OP_END 9
#define OP_INITED 10
#define OP_SPACE 11
#define OP_NUMBER 12
static bool dummy_op_called[OP_NUMBER];
static const char *ops_str[] = {
"alloc",
"free_alloc",
"alloc_pte",
"alloc_fixed",
"free fixed",
"reserve_carveout",
"release_carveout",
"base",
"length",
"end",
"inited",
"space",
};
static u64 dummy_alloc(struct nvgpu_allocator *allocator, u64 len)
{
dummy_op_called[OP_ALLOC] = true;
return 0ULL;
}
static void dummy_free(struct nvgpu_allocator *allocator, u64 addr)
{
dummy_op_called[OP_FREE] = true;
}
static u64 dummy_alloc_pte(struct nvgpu_allocator *allocator, u64 len,
u32 page_size)
{
dummy_op_called[OP_ALLOC_PTE] = true;
return 0ULL;
}
static u64 dummy_alloc_fixed(struct nvgpu_allocator *allocator,
u64 base, u64 len, u32 page_size)
{
dummy_op_called[OP_ALLOC_FIXED] = true;
return 0ULL;
}
static void dummy_free_fixed(struct nvgpu_allocator *allocator,
u64 base, u64 len)
{
dummy_op_called[OP_FREE_FIXED] = true;
}
static int dummy_reserve_carveout(struct nvgpu_allocator *allocator,
struct nvgpu_alloc_carveout *co)
{
dummy_op_called[OP_RESERVE_CARVEOUT] = true;
return 0;
}
static void dummy_release_carveout(struct nvgpu_allocator *allocator,
struct nvgpu_alloc_carveout *co)
{
dummy_op_called[OP_RELEASE_CARVEOUT] = true;
}
static u64 dummy_base(struct nvgpu_allocator *allocator)
{
dummy_op_called[OP_BASE] = true;
return 0ULL;
}
static u64 dummy_length(struct nvgpu_allocator *allocator)
{
dummy_op_called[OP_LENGTH] = true;
return 0ULL;
}
static u64 dummy_end(struct nvgpu_allocator *allocator)
{
dummy_op_called[OP_END] = true;
return 0ULL;
}
static bool dummy_inited(struct nvgpu_allocator *allocator)
{
dummy_op_called[OP_INITED] = true;
return false;
}
static u64 dummy_space(struct nvgpu_allocator *allocator)
{
dummy_op_called[OP_SPACE] = true;
return false;
}
static void dummy_fini(struct nvgpu_allocator *allocator)
{
}
static struct nvgpu_allocator_ops dummy_ops = {
.alloc = dummy_alloc,
.free_alloc = dummy_free,
.alloc_pte = dummy_alloc_pte,
.alloc_fixed = dummy_alloc_fixed,
.free_fixed = dummy_free_fixed,
.reserve_carveout = dummy_reserve_carveout,
.release_carveout = dummy_release_carveout,
.base = dummy_base,
.length = dummy_length,
.end = dummy_end,
.inited = dummy_inited,
.space = dummy_space,
.fini = dummy_fini
};
int test_nvgpu_alloc_ops_present(struct unit_module *m,
struct gk20a *g, void *args)
{
u32 i;
int err;
bool failed;
struct nvgpu_allocator a;
memset(dummy_op_called, 0, sizeof(dummy_op_called));
err = nvgpu_alloc_common_init(&a, NULL, "test",
NULL, false, &dummy_ops);
if (err)
unit_return_fail(m, "Unexpected common_init() fail!\n");
/*
* Now that we have the allocator just call all the alloc functions and
* make sure that the associated bool is true.
*/
nvgpu_alloc(&a, 0UL);
nvgpu_alloc_pte(&a, 0UL, 0U);
nvgpu_alloc_fixed(&a, 0UL, 0UL, 0U);
nvgpu_free(&a, 0UL);
nvgpu_free_fixed(&a, 0UL, 0UL);
nvgpu_alloc_reserve_carveout(&a, NULL);
nvgpu_alloc_release_carveout(&a, NULL);
nvgpu_alloc_base(&a);
nvgpu_alloc_length(&a);
nvgpu_alloc_end(&a);
nvgpu_alloc_initialized(&a);
nvgpu_alloc_space(&a);
failed = false;
for (i = 0; i < OP_NUMBER; i++) {
if (!dummy_op_called[i]) {
failed = true;
unit_info(m, "%s did not call op function!\n",
ops_str[i]);
}
}
if (failed)
unit_return_fail(m, "OPs uncalled!\n");
/*
* Next make sure that if the ops are NULL we don't crash or anything
* like that.
*
* Note that not all ops have if NULL checks. We skip these in the unit
* test.
*/
memset(dummy_op_called, 0, sizeof(dummy_op_called));
memset(&dummy_ops, 0, sizeof(dummy_ops));
nvgpu_alloc_fixed(&a, 0UL, 0UL, 0U);
nvgpu_free_fixed(&a, 0UL, 0UL);
nvgpu_alloc_reserve_carveout(&a, NULL);
nvgpu_alloc_release_carveout(&a, NULL);
nvgpu_alloc_base(&a);
nvgpu_alloc_length(&a);
nvgpu_alloc_end(&a);
nvgpu_alloc_initialized(&a);
nvgpu_alloc_space(&a);
failed = false;
for (i = 0; i < OP_NUMBER; i++) {
if (dummy_op_called[i]) {
failed = true;
unit_info(m, "op function %s called despite null op!\n",
ops_str[i]);
}
}
if (failed)
unit_return_fail(m, "OPs called!\n");
return UNIT_SUCCESS;
}
int test_nvgpu_alloc_common_init(struct unit_module *m,
struct gk20a *g, void *args)
{
struct nvgpu_allocator a;
struct nvgpu_allocator_ops ops = { };
void *dummy_priv = (void *)0x10000;
void *dummy_g = (void *)0x1000;
if (nvgpu_alloc_common_init(NULL, NULL, NULL, NULL, false, NULL) == 0)
unit_return_fail(m, "Made NULL allocator!?\n");
/*
* Hit all the invalid ops struct criteria.
*/
if (nvgpu_alloc_common_init(&a, NULL, "test", NULL, false, &ops) == 0)
unit_return_fail(m, "common_init passes despite empty ops\n");
ops.alloc = dummy_alloc;
if (nvgpu_alloc_common_init(&a, NULL, "test", NULL, false, &ops) == 0)
unit_return_fail(m,
"common_init passes despite missing free(),fini()\n");
ops.free_alloc = dummy_free;
if (nvgpu_alloc_common_init(&a, NULL, "test", NULL, false, &ops) == 0)
unit_return_fail(m,
"common_init passes despite missing fini()\n");
ops.fini = dummy_fini;
if (0 != nvgpu_alloc_common_init(&a, dummy_g, "test",
dummy_priv, true, &ops))
unit_return_fail(m, "common_init should have passed\n");
/*
* Verify that the allocator struct actually is made correctly.
*/
if (a.g != dummy_g || a.priv != dummy_priv ||
a.debug != true || a.ops != &ops)
unit_return_fail(m, "Invalid data in allocator\n");
if (strcmp(a.name, "test") != 0)
unit_return_fail(m, "Invalid name in allocator\n");
return UNIT_SUCCESS;
}
int test_nvgpu_alloc_destroy(struct unit_module *m,
struct gk20a *g, void *args)
{
struct nvgpu_allocator a;
struct nvgpu_allocator zero_a = { };
struct nvgpu_allocator_ops ops = {
.alloc = dummy_alloc,
.free_alloc = dummy_free,
.fini = dummy_fini,
};
if (nvgpu_alloc_common_init(&a, NULL, "test", NULL, false, &ops) != 0)
unit_return_fail(m, "common_init failed with valid input\n");
nvgpu_alloc_destroy(&a);
if (memcmp(&a, &zero_a, sizeof(a)) != 0)
unit_return_fail(m, "Allocator has not been memset to 0\n");
return UNIT_SUCCESS;
}
int test_nvgpu_allocator_init(struct unit_module *m,
struct gk20a *g, void *args)
{
struct nvgpu_allocator a;
u64 base = SZ_4K;
u64 size = SZ_64K;
u64 blk_size = SZ_4K;
u64 max_order = 0;
u64 flags = 0ULL;
if (nvgpu_allocator_init(g, &a, NULL, "buddy", base, size, blk_size,
max_order, flags, BUDDY_ALLOCATOR) != 0) {
unit_return_fail(m, "failed to init buddy_allocator\n");
} else {
a.ops->fini(&a);
}
#ifdef CONFIG_NVGPU_DGPU
if (nvgpu_allocator_init(g, &a, NULL, "page", base, size, blk_size,
max_order, flags, PAGE_ALLOCATOR) != 0) {
unit_return_fail(m, "failed to init page_allocator\n");
} else {
a.ops->fini(&a);
}
#endif
if (nvgpu_allocator_init(g, &a, NULL, "bitmap", base, size, blk_size,
max_order, flags, BITMAP_ALLOCATOR) != 0) {
unit_return_fail(m, "failed to init bitmap_allocator\n");
} else {
a.ops->fini(&a);
}
/* Initialize invalid allocator */
if (nvgpu_allocator_init(g, &a, NULL, "invalid", base, size, blk_size,
max_order, flags, -1) != -EINVAL) {
unit_return_fail(m, "initialized invalid allocator\n");
}
return UNIT_SUCCESS;
}
struct unit_module_test nvgpu_allocator_tests[] = {
UNIT_TEST(common_init, test_nvgpu_alloc_common_init, NULL, 0),
UNIT_TEST(alloc_destroy, test_nvgpu_alloc_destroy, NULL, 0),
UNIT_TEST(alloc_ops, test_nvgpu_alloc_ops_present, NULL, 0),
UNIT_TEST(allocator_init, test_nvgpu_allocator_init, NULL, 0),
};
UNIT_MODULE(nvgpu_allocator, nvgpu_allocator_tests, UNIT_PRIO_NVGPU_TEST);

View File

@@ -0,0 +1,125 @@
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef UNIT_NVGPU_ALLOCATOR_H
#define UNIT_NVGPU_ALLOCATOR_H
struct gk20a;
struct unit_module;
/** @addtogroup SWUTS-mm-allocators-nvgpu-allocator
* @{
*
* Software Unit Test Specification for mm.allocators.nvgpu_allocator
*/
/**
* Test specification for: test_nvgpu_alloc_common_init
*
* Description: Test common_init() function
*
* Test Type: Feature
*
* Targets: nvgpu_alloc_common_init
*
* Input: None
*
* Steps:
* - Initialize nvgpu allocator with default ops values.
* - Confirm that the parameters passed to the function make their way into
* allocator struct.
* - Initialize nvgpu allocator for various invalid input cases.
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_nvgpu_alloc_common_init(struct unit_module *m,
struct gk20a *g, void *args);
/**
* Test specification for: test_nvgpu_alloc_destroy
*
* Description: Test allocator destroy function
*
* Test Type: Feature
*
* Targets: nvgpu_alloc_common_init, nvgpu_alloc_destroy
*
* Input: None
*
* Steps:
* - Trigger allocator destroy function which further invokes fini() op.
* - Allocator struct should be completely zeroed after this function.
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_nvgpu_alloc_destroy(struct unit_module *m,
struct gk20a *g, void *args);
/**
* Test specification for: test_nvgpu_alloc_ops_present
*
* Description: Test allocator destroy function
*
* Test Type: Feature
*
* Targets: nvgpu_alloc, nvgpu_alloc_pte, nvgpu_alloc_fixed, nvgpu_free_fixed,
* nvgpu_alloc_reserve_carveout, nvgpu_alloc_release_carveout,
* nvgpu_alloc_base, nvgpu_alloc_length, nvgpu_alloc_end, nvgpu_free,
* nvgpu_alloc_initialized, nvgpu_alloc_space
*
* Input: None
*
* Steps:
* - Test the logic for calling present ops.
* - Actual functionality of the ops should be verified by the respective
* allocator unit tests.
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_nvgpu_alloc_ops_present(struct unit_module *m,
struct gk20a *g, void *args);
/**
* Test specification for: test_nvgpu_allocator_init
*
* Description: Test allocator init function
*
* Test Type: Feature
*
* Targets: nvgpu_allocator_init, nvgpu_alloc_destroy
*
* Input: None
*
* Steps:
* - Initialize each allocator and check that the allocator is created
* successfully.
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_nvgpu_allocator_init(struct unit_module *m,
struct gk20a *g, void *args);
#endif /* UNIT_NVGPU_ALLOCATOR_H */

View File

@@ -0,0 +1,26 @@
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
.SUFFIXES:
OBJS = page_allocator.o
MODULE = page_allocator
include ../../../Makefile.units

View File

@@ -0,0 +1,33 @@
################################### tell Emacs this is a -*- makefile-gmake -*-
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
###############################################################################
NVGPU_UNIT_NAME=page_allocator
include $(NV_COMPONENT_DIR)/../../../Makefile.units.common.interface.tmk
# Local Variables:
# indent-tabs-mode: t
# tab-width: 8
# End:
# vi: set tabstop=8 noexpandtab:

View File

@@ -0,0 +1,33 @@
################################### tell Emacs this is a -*- makefile-gmake -*-
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
###############################################################################
NVGPU_UNIT_NAME=page_allocator
include $(NV_COMPONENT_DIR)/../../../Makefile.units.common.tmk
# Local Variables:
# indent-tabs-mode: t
# tab-width: 8
# End:
# vi: set tabstop=8 noexpandtab:

View File

@@ -0,0 +1,644 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <unit/io.h>
#include <unit/unit.h>
#include <nvgpu/gk20a.h>
#include <nvgpu/sizes.h>
#include <nvgpu/types.h>
#include <nvgpu/allocator.h>
#include <nvgpu/posix/kmem.h>
#include <nvgpu/posix/posix-fault-injection.h>
#include <nvgpu/page_allocator.h>
#include "page_allocator.h"
#ifdef CONFIG_NVGPU_DGPU
#define BA_DEFAULT_BASE SZ_4K
#define BA_DEFAULT_LENGTH SZ_1M
#define BA_DEFAULT_BLK_SIZE SZ_4K
#define SZ_2K (SZ_1K << 1)
#define SZ_8K (SZ_4K << 1)
#define SZ_16K (SZ_4K << 2)
#define SZ_32K (SZ_64K >> 1)
static struct nvgpu_allocator *na;
/*
* @fault_enable- Enable/Disable fault injection
* @fault_at - If fault is enabled,
fault_at contains fault_counter value
otherwise,
fault_at should be set to 0
* @base - Base address of allocation in case of fixed allocation
* @len - Length of memory to be allocated
* @flags - Additional flags to be enabled
* @ret_addr - Return address of allocation, this is used in free()
* @expected_zero - Expected result of test
* @error_msg - Message to be displayed if test fails
*/
struct test_parameters {
bool fault_enable;
u32 fault_at;
u64 base;
u64 len;
u64 flags;
u64 ret_addr;
bool expected_zero;
char *error_msg;
};
static struct test_parameters fault_at_alloc_cache = {
.fault_enable = true,
.fault_at = 0,
.base = 0ULL,
.len = SZ_32K,
.flags = 0ULL,
.expected_zero = true,
.error_msg = "alloced despite fault injection at alloc_cache",
};
static struct test_parameters fault_at_nvgpu_alloc = {
.fault_enable = true,
.fault_at = 1,
.base = 0ULL,
.len = SZ_32K,
.flags = 0ULL,
.expected_zero = true,
.error_msg = "alloced despite fault injection at nvgpu_alloc",
};
static struct test_parameters fault_at_sgl_alloc = {
.fault_enable = true,
.fault_at = 1,
.base = 0ULL,
.len = SZ_32K,
.flags = 0ULL,
.expected_zero = true,
.error_msg = "alloced despite fault injection at sgl alloc",
};
static struct test_parameters fault_at_page_cache = {
.fault_enable = true,
.fault_at = 2,
.base = 0ULL,
.len = SZ_32K,
.flags = 0ULL,
.expected_zero = true,
.error_msg = "alloced despite fault injection at page_cache",
};
static struct test_parameters first_simple_alloc_32K = {
.fault_enable = false,
.fault_at = 0,
.base = 0ULL,
.len = SZ_32K,
.flags = 0ULL,
.expected_zero = false,
.error_msg = "first instance of 32K alloc failed",
};
static struct test_parameters second_simple_alloc_32K = {
.fault_enable = false,
.fault_at = 0,
.base = 0ULL,
.len = SZ_32K,
.flags = 0ULL,
.expected_zero = false,
.error_msg = "second instance of 32K alloc failed",
};
static struct test_parameters third_simple_alloc_32K = {
.fault_enable = false,
.fault_at = 0,
.base = 0ULL,
.len = SZ_32K,
.flags = 0ULL,
.expected_zero = false,
.error_msg = "third instance of 32K alloc failed",
};
static struct test_parameters fourth_simple_alloc_32K = {
.fault_enable = false,
.fault_at = 0,
.base = 0ULL,
.len = SZ_32K,
.flags = 0ULL,
.expected_zero = false,
.error_msg = "fourth instance of 32K alloc failed",
};
static struct test_parameters failing_alloc_16K = {
.fault_enable = false,
.fault_at = 0,
.base = 0ULL,
.len = SZ_16K,
.flags = 0ULL,
.expected_zero = true,
.error_msg = "16K alloc is supposed to fail",
};
static struct test_parameters simple_alloc_8K = {
.fault_enable = false,
.fault_at = 0,
.base = 0ULL,
.len = SZ_8K,
.flags = 0ULL,
.expected_zero = false,
.error_msg = "8K alloc failed",
};
static struct test_parameters failing_alloc_8K = {
.fault_enable = false,
.fault_at = 0,
.base = SZ_64K,
.len = SZ_8K,
.flags = 0ULL,
.expected_zero = true,
.error_msg = "8K alloc supposed to fail",
};
static struct test_parameters alloc_no_scatter_gather = {
.fault_enable = false,
.fault_at = 0,
.base = SZ_64K,
.len = SZ_32K,
.flags = GPU_ALLOC_NO_SCATTER_GATHER,
.expected_zero = false,
.error_msg = "32K alloc failed with no_scatter_gather enabled",
};
static struct test_parameters simple_alloc_128K = {
.fault_enable = false,
.fault_at = 0,
.base = SZ_128K << 2,
.len = SZ_128K,
.flags = 0ULL,
.expected_zero = false,
.error_msg = "128K alloc failed",
};
static struct test_parameters alloc_contiguous = {
.fault_enable = false,
.fault_at = 0,
.base = 0ULL,
.len = SZ_128K << 2,
.flags = GPU_ALLOC_FORCE_CONTIG,
.expected_zero = true,
.error_msg = "contiguous alloc should have failed",
};
static struct test_parameters simple_alloc_512K = {
.fault_enable = false,
.fault_at = 0,
.base = 0ULL,
.len = SZ_128K << 2,
.flags = 0ULL,
.expected_zero = false,
.error_msg = "8K alloc failed",
};
static struct test_parameters alloc_more_than_available = {
.fault_enable = false,
.fault_at = 0,
.base = 0ULL,
.len = SZ_1M,
.flags = 0ULL,
.expected_zero = true,
.error_msg = "Allocated more than available memory",
};
int test_page_alloc(struct unit_module *m, struct gk20a *g, void *args)
{
struct test_parameters *param = (struct test_parameters *) args;
struct nvgpu_page_allocator *pa = page_allocator(na);
struct nvgpu_posix_fault_inj *kmem_fi =
nvgpu_kmem_get_fault_injection();
pa->flags |= param->flags;
nvgpu_posix_enable_fault_injection(kmem_fi,
param->fault_enable, param->fault_at);
param->ret_addr = na->ops->alloc(na, param->len);
nvgpu_posix_enable_fault_injection(kmem_fi, false, 0);
pa->flags &= ~(param->flags);
if ((param->expected_zero && (param->ret_addr == 0)) ||
(!param->expected_zero && (param->ret_addr != 0))) {
return UNIT_SUCCESS;
} else {
unit_return_fail(m, "%s", param->error_msg);
}
}
int test_page_free(struct unit_module *m, struct gk20a *g, void *args)
{
struct test_parameters *param = (struct test_parameters *) args;
struct nvgpu_page_allocator *pa = page_allocator(na);
pa->flags |= param->flags;
na->ops->free_alloc(na, param->ret_addr);
pa->flags &= ~(param->flags);
return UNIT_SUCCESS;
}
int test_page_alloc_fixed(struct unit_module *m, struct gk20a *g, void *args)
{
struct test_parameters *param = (struct test_parameters *) args;
struct nvgpu_page_allocator *pa = page_allocator(na);
struct nvgpu_posix_fault_inj *kmem_fi =
nvgpu_kmem_get_fault_injection();
pa->flags |= param->flags;
nvgpu_posix_enable_fault_injection(kmem_fi,
param->fault_enable, param->fault_at);
/* page_size = SZ_4K ignored in the function */
param->ret_addr = na->ops->alloc_fixed(na,
param->base, param->len, SZ_4K);
nvgpu_posix_enable_fault_injection(kmem_fi, false, 0);
pa->flags &= ~(param->flags);
if ((param->expected_zero && (param->ret_addr == 0)) ||
(!param->expected_zero && (param->ret_addr != 0))) {
return UNIT_SUCCESS;
} else {
unit_return_fail(m, "%s", param->error_msg);
}
}
int test_page_free_fixed(struct unit_module *m, struct gk20a *g, void *args)
{
struct test_parameters *param = (struct test_parameters *) args;
struct nvgpu_page_allocator *pa = page_allocator(na);
pa->flags |= param->flags;
na->ops->free_fixed(na, param->ret_addr, param->len);
pa->flags &= ~(param->flags);
return UNIT_SUCCESS;
}
int test_page_allocator_init_slabs(struct unit_module *m,
struct gk20a *g, void *args)
{
u64 base = SZ_64K;
u64 length = SZ_128K;
u64 blk_size = SZ_64K;
u64 flags = GPU_ALLOC_4K_VIDMEM_PAGES;
struct nvgpu_posix_fault_inj *kmem_fi =
nvgpu_kmem_get_fault_injection();
na = (struct nvgpu_allocator *)
nvgpu_kzalloc(g, sizeof(struct nvgpu_allocator));
if (na == NULL) {
unit_return_fail(m, "Could not allocate nvgpu_allocator\n");
}
/* Fault injection at init_slabs */
nvgpu_posix_enable_fault_injection(kmem_fi, true, 3);
if (nvgpu_allocator_init(g, na, NULL, "test_page", base, length,
blk_size, 0ULL, flags, PAGE_ALLOCATOR) == 0) {
unit_return_fail(m,
"pa with slabs inited despite fault injection\n");
}
nvgpu_posix_enable_fault_injection(kmem_fi, false, 0);
/*
* Expect to fail as blk_size is odd
*/
if (nvgpu_allocator_init(g, na, NULL, "test_page", base, length,
SZ_4K + 1ULL, 0ULL, flags, PAGE_ALLOCATOR) == 0) {
unit_return_fail(m,
"vidmem page allocator inited with odd blk_size\n");
}
if (nvgpu_allocator_init(g, na, NULL, "test_page", base, length,
SZ_4K, 0ULL, flags, PAGE_ALLOCATOR) != 0) {
unit_return_fail(m,
"vidmem page allocator inited with odd blk_size\n");
} else {
na->ops->fini(na);
}
/*
* Initialize page allocator
* This will be used for further tests.
*/
if (nvgpu_allocator_init(g, na, NULL, "test_page", base, length,
blk_size, 0ULL, flags, PAGE_ALLOCATOR) != 0) {
unit_return_fail(m, "init with slabs failed\n");
}
return UNIT_SUCCESS;
}
int test_page_allocator_sgt_ops(struct unit_module *m,
struct gk20a *g, void *args)
{
u64 addr;
void *sgl = NULL;
struct nvgpu_page_alloc *alloc = NULL;
addr = na->ops->alloc(na, SZ_32K);
if (addr == 0) {
unit_return_fail(m, "couldn't allocate 32K");
}
/* Test page allocator sgt ops */
alloc = (struct nvgpu_page_alloc *) addr;
sgl = alloc->sgt.sgl;
if (alloc->sgt.ops->sgl_next(sgl) != NULL) {
unit_return_fail(m, "sgl_next should be NULL\n");
}
if (alloc->sgt.ops->sgl_phys(g, sgl) != alloc->base) {
unit_return_fail(m, "sgl_phys != base address\n");
}
if (alloc->sgt.ops->sgl_ipa(g, sgl) != alloc->base) {
unit_return_fail(m, "sgl_ipa != base address\n");
}
if (alloc->sgt.ops->sgl_dma(sgl) != alloc->base) {
unit_return_fail(m, "sgl_dma != base address\n");
}
if (alloc->sgt.ops->sgl_gpu_addr(g, sgl, NULL) != alloc->base) {
unit_return_fail(m, "sgl_gpu_addr != base address\n");
}
if (alloc->sgt.ops->sgl_ipa_to_pa(g, sgl, SZ_4K, NULL) != SZ_4K) {
unit_return_fail(m, "sgl_ipa_to_pa != SZ_4K\n");
}
if (alloc->sgt.ops->sgl_length(sgl) != SZ_32K) {
unit_return_fail(m, "sgl_length != SZ_32K\n");
}
alloc->sgt.ops->sgt_free(g, &alloc->sgt);
na->ops->free_alloc(na, addr);
return UNIT_SUCCESS;
}
int test_nvgpu_page_allocator_ops(struct unit_module *m,
struct gk20a *g, void *args)
{
u64 addr;
int err;
struct nvgpu_alloc_carveout test_co =
NVGPU_CARVEOUT("test_co", 0ULL, 0ULL);
test_co.base = BA_DEFAULT_BASE;
test_co.length = SZ_8K;
if (!na->ops->inited(na)) {
unit_return_fail(m, "ops not inited\n");
}
addr = na->ops->base(na);
if (addr != BA_DEFAULT_BASE) {
unit_return_fail(m, "base incorrect\n");
}
addr = na->ops->length(na);
if (addr != BA_DEFAULT_LENGTH) {
unit_return_fail(m, "length incorrect\n");
}
addr = na->ops->end(na);
if (addr != (BA_DEFAULT_BASE + BA_DEFAULT_LENGTH)) {
unit_return_fail(m, "end incorrect\n");
}
addr = na->ops->space(na);
if (addr == 0) {
unit_return_fail(m, "zero space allocated\n");
}
err = na->ops->reserve_carveout(na, &test_co);
if (err < 0) {
unit_return_fail(m, "couldn't reserve 8K carveout\n");
}
na->ops->release_carveout(na, &test_co);
addr = na->ops->alloc(na, SZ_32K);
if (addr == 0) {
unit_return_fail(m, "couldn't allocate 32K");
}
err = na->ops->reserve_carveout(na, &test_co);
if (err == 0) {
unit_return_fail(m, "reserved carveout after alloc\n");
}
na->ops->free_alloc(na, addr);
return UNIT_SUCCESS;
}
int test_nvgpu_page_allocator_destroy(struct unit_module *m,
struct gk20a *g, void *args)
{
na->ops->fini(na);
if (na->priv != NULL) {
unit_return_fail(m, "page allocator destroy failed\n");
}
nvgpu_kfree(g, na);
return UNIT_SUCCESS;
}
int test_nvgpu_page_allocator_init(struct unit_module *m,
struct gk20a *g, void *args)
{
u64 base = BA_DEFAULT_BASE;
u64 length = BA_DEFAULT_LENGTH;
u64 blk_size = BA_DEFAULT_BLK_SIZE;
u64 flags = 0ULL;
struct nvgpu_posix_fault_inj *kmem_fi =
nvgpu_kmem_get_fault_injection();
na = (struct nvgpu_allocator *)
nvgpu_kzalloc(g, sizeof(struct nvgpu_allocator));
if (na == NULL) {
unit_return_fail(m, "Could not allocate nvgpu_allocator\n");
}
/*
* expect to fail as blk_size < SZ_4K
*/
if (nvgpu_allocator_init(g, na, NULL, "test_page", base, length,
0ULL, 0ULL, flags, PAGE_ALLOCATOR) == 0) {
unit_return_fail(m, "inited despite blk_size = 0\n");
}
/* Fault injection at nvgpu_page_allocator allocation */
nvgpu_posix_enable_fault_injection(kmem_fi, true, 0);
if (nvgpu_allocator_init(g, na, NULL, "test_page", base, length,
blk_size, 0ULL, flags, PAGE_ALLOCATOR) == 0) {
unit_return_fail(m,
"inited despite fault injection at page_allocator\n");
}
nvgpu_posix_enable_fault_injection(kmem_fi, false, 0);
/* Fault injection at alloc_cache */
nvgpu_posix_enable_fault_injection(kmem_fi, true, 1);
if (nvgpu_allocator_init(g, na, NULL, "test_page", base, length,
blk_size, 0ULL, flags, PAGE_ALLOCATOR) == 0) {
unit_return_fail(m,
"inited despite fault injection at alloc_cache\n");
}
nvgpu_posix_enable_fault_injection(kmem_fi, false, 0);
/* Fault injection at slab_page_cache */
nvgpu_posix_enable_fault_injection(kmem_fi, true, 2);
if (nvgpu_allocator_init(g, na, NULL, "test_page", base, length,
blk_size, 0ULL, flags, PAGE_ALLOCATOR) == 0) {
unit_return_fail(m,
"inited despite fault injection at slab_page_cache\n");
}
nvgpu_posix_enable_fault_injection(kmem_fi, false, 0);
/*
* expect to fail as blk_size is odd
*/
if (nvgpu_allocator_init(g, na, NULL, "test_page", base, length,
SZ_4K + 3ULL, 0ULL, flags, PAGE_ALLOCATOR) == 0) {
unit_return_fail(m, "inited despite odd blk_size\n");
}
/* base = 0 */
if (nvgpu_allocator_init(g, na, NULL, "test_page", 0ULL, length,
blk_size, 0ULL, flags, PAGE_ALLOCATOR) != 0) {
unit_return_fail(m, "init failed with base = 0\n");
} else {
na->ops->fini(na);
}
/*
* Initialize page allocator
* This will be used for further tests.
*/
if (nvgpu_allocator_init(g, na, NULL, "test_page", base, length,
blk_size, 0ULL, flags, PAGE_ALLOCATOR) != 0) {
unit_return_fail(m, "init failed\n");
}
return UNIT_SUCCESS;
}
#endif
struct unit_module_test page_allocator_tests[] = {
#ifdef CONFIG_NVGPU_DGPU
/* These tests create and evaluate page_allocator w/o 4K VIDMEM pages */
UNIT_TEST(init, test_nvgpu_page_allocator_init, NULL, 0),
UNIT_TEST(ops, test_nvgpu_page_allocator_ops, NULL, 0),
UNIT_TEST(sgt_ops, test_page_allocator_sgt_ops, NULL, 0),
/* Below tests examine page allocation */
/*
* NOTE: The test order should not be changed. Previous test develop
* memory allocation arrangement required for later tests.
*/
/* These tests check execution with fault injection at various locations */
UNIT_TEST(fixed_alloc_fault_at_alloc_cache, test_page_alloc_fixed, (void *) &fault_at_alloc_cache, 0),
UNIT_TEST(fixed_alloc_fault_at_sgl_alloc, test_page_alloc_fixed, (void *) &fault_at_sgl_alloc, 0),
UNIT_TEST(alloc_fault_at_alloc_cache, test_page_alloc, (void *) &fault_at_alloc_cache, 0),
UNIT_TEST(alloc_fault_at_nvgpu_alloc, test_page_alloc, (void *) &fault_at_nvgpu_alloc, 0),
/* Alloc some memory, this ensures fault injection at sgl alloc in next test */
UNIT_TEST(simple_32K_alloc, test_page_alloc, (void *) &first_simple_alloc_32K, 0),
UNIT_TEST(alloc_fault_at_sgl_alloc, test_page_alloc, (void *) &fault_at_sgl_alloc, 0),
/* Test different allocation scenarios using simple alloc function */
UNIT_TEST(alloc_no_scatter_gather, test_page_alloc, (void *) &alloc_no_scatter_gather, 0),
UNIT_TEST(free_no_scatter_gather, test_page_free, (void *) &alloc_no_scatter_gather, 0),
/* Second free call checks execution when address is NULL */
UNIT_TEST(free_no_scatter_gather_again, test_page_free, (void *) &alloc_no_scatter_gather, 0),
UNIT_TEST(free_32K_alloc, test_page_free, (void *) &first_simple_alloc_32K, 0),
UNIT_TEST(fixed_alloc_128K, test_page_alloc_fixed, (void *) &simple_alloc_128K, 0),
/* After previous allocations, contiguous 512K memory isn't available */
UNIT_TEST(contiguous_alloc_512K, test_page_alloc, (void *) &alloc_contiguous, 0),
UNIT_TEST(simple_alloc_512K, test_page_alloc, (void *) &simple_alloc_512K, 0),
UNIT_TEST(alloc_more_than_available, test_page_alloc, (void *) &alloc_more_than_available, 0),
UNIT_TEST(free_alloc_512K, test_page_free, (void *) &simple_alloc_512K, 0),
UNIT_TEST(alloc_fixed_no_scatter_gather, test_page_alloc_fixed, (void *) &alloc_no_scatter_gather, 0),
UNIT_TEST(free_fixed_no_scatter_gather, test_page_free_fixed, (void *) &alloc_no_scatter_gather, 0),
/* Second free call checks execution when address is NULL */
UNIT_TEST(free_fixed_no_scatter_gather_again, test_page_free_fixed, (void *) &alloc_no_scatter_gather, 0),
UNIT_TEST(free_fixed_128K, test_page_free_fixed, (void *) &simple_alloc_128K, 0),
UNIT_TEST(destroy, test_nvgpu_page_allocator_destroy, NULL, 0),
/* These tests create and evaluate page_allocator w/ 4K VIDMEM pages */
UNIT_TEST(init_slabs, test_page_allocator_init_slabs, NULL, 0),
/* Below tests examine slab allocation */
/*
* NOTE: The test order should not be changed. A test contructs
* required memory structure for later tests.
*/
/* These tests check execution with fault injection at various locations */
UNIT_TEST(slabs_fault_at_alloc_cache, test_page_alloc, (void *) &fault_at_alloc_cache, 0),
UNIT_TEST(slabs_fault_at_sgl_alloc, test_page_alloc, (void *) &fault_at_sgl_alloc, 0),
UNIT_TEST(slabs_fault_at_page_cache, test_page_alloc, (void *) &fault_at_page_cache, 0),
/* Test different allocation scenarios */
UNIT_TEST(add_partial_slab, test_page_alloc, (void *) &first_simple_alloc_32K, 0),
UNIT_TEST(add_full_slab, test_page_alloc, (void *) &second_simple_alloc_32K, 0),
UNIT_TEST(add_second_partial_slab, test_page_alloc, (void *) &third_simple_alloc_32K, 0),
UNIT_TEST(add_second_full_slab, test_page_alloc, (void *) &fourth_simple_alloc_32K, 0),
/* Note: No free memory available for allocation */
UNIT_TEST(fixed_alloc_8K, test_page_alloc_fixed, (void *) &failing_alloc_8K, 0),
/* Freeing allocated slabs, adds slabs to empty and free lists */
UNIT_TEST(revert_partial_slab, test_page_free, (void *) &fourth_simple_alloc_32K, 0),
UNIT_TEST(revert_second_partial_slab, test_page_free, (void *) &second_simple_alloc_32K, 0),
UNIT_TEST(add_empty_slab, test_page_free, (void *) &first_simple_alloc_32K, 0),
UNIT_TEST(free_slab, test_page_free, (void *) &third_simple_alloc_32K, 0),
UNIT_TEST(slabs_alloc_8K, test_page_alloc, (void *) &simple_alloc_8K, 0),
UNIT_TEST(slabs_alloc_32K, test_page_alloc, (void *) &first_simple_alloc_32K, 0),
/*
* Note: Page allocator has only 2 slabs.
* These are now allocated for 8K and 32K chunks
*/
UNIT_TEST(no_more_slabs, test_page_alloc, (void *) &failing_alloc_16K, 0),
UNIT_TEST(destroy_slabs, test_nvgpu_page_allocator_destroy, NULL, 0),
#endif
};
UNIT_MODULE(page_allocator, page_allocator_tests, UNIT_PRIO_NVGPU_TEST);

View File

@@ -0,0 +1,258 @@
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef UNIT_PAGE_ALLOCATOR_H
#define UNIT_PAGE_ALLOCATOR_H
struct gk20a;
struct unit_module;
/** @addtogroup SWUTS-mm-allocators-page-allocator
* @{
*
* Software Unit Test Specification for mm.allocators.page_allocator
*/
/**
* Test specification for: test_nvgpu_page_allocator_init
*
* Description: Initialize page allocator.
*
* Test Type: Feature, Error injection
*
* Targets: nvgpu_allocator_init, nvgpu_page_allocator_init,
* nvgpu_allocator.ops.fini
*
* Input: None
*
* Steps:
* - Initialize page allocator with following characteristics.
* - 4K memory base address.
* - 1M length of memory.
* - 4K block size.
* - Check that page allocator initializations fails for scenarios such as
* odd value of block_size and fault injection for memory allocation.
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_nvgpu_page_allocator_init(struct unit_module *m,
struct gk20a *g, void *args);
/**
* Test specification for: test_nvgpu_page_allocator_ops
*
* Description: Test page allocator ops
*
* Test Type: Feature
*
* Targets: nvgpu_allocator.ops.alloc, nvgpu_allocator.ops.free_alloc,
* nvgpu_allocator.ops.reserve_carveout,
* nvgpu_allocator.ops.release_carveout, nvgpu_allocator.ops.base,
* nvgpu_allocator.ops.end, nvgpu_allocator.ops.length,
* nvgpu_allocator.ops.inited, nvgpu_allocator.ops.space
*
* Input: test_nvgpu_page_allocator_init
*
* Steps:
* - Check page_allocator attributes using allocator ops.
* - Execute allocator ops to read attibute value.
* - Confirm that value is equal to the default values set during
* initialization.
* - Allocate carveout and confirm that allocation is successful. Check that
* carveout cannot be reserved after normal page allocation.
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_nvgpu_page_allocator_ops(struct unit_module *m,
struct gk20a *g, void *args);
/**
* Test specification for: test_page_allocator_sgt_ops
*
* Description: Test page alloc sgt ops
*
* Test Type: Feature
*
* Targets: nvgpu_page_alloc.nvgpu_sgt.nvgpu_sgt_ops.sgl_next,
* nvgpu_page_alloc.nvgpu_sgt.nvgpu_sgt_ops.sgl_phys,
* nvgpu_page_alloc.nvgpu_sgt.nvgpu_sgt_ops.sgl_ipa,
* nvgpu_page_alloc.nvgpu_sgt.nvgpu_sgt_ops.sgl_ipa_to_pa,
* nvgpu_page_alloc.nvgpu_sgt.nvgpu_sgt_ops.sgl_dma,
* nvgpu_page_alloc.nvgpu_sgt.nvgpu_sgt_ops.sgl_length,
* nvgpu_page_alloc.nvgpu_sgt.nvgpu_sgt_ops.sgl_gpu_addr,
* nvgpu_page_alloc.nvgpu_sgt.nvgpu_sgt_ops.sgt_free
*
* Input: test_nvgpu_page_allocator_init
*
* Steps:
* - Check allocated page attributes using sgt ops
* - Confirm that allocation details are equal to values set during allocation.
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_page_allocator_sgt_ops(struct unit_module *m,
struct gk20a *g, void *args);
/**
* Test specification for: test_page_alloc_fixed
*
* Description: Allocate memory at fixed address
*
* Test Type: Feature, Error injection
*
* Targets: nvgpu_allocator.ops.alloc_fixed
*
* Input: test_nvgpu_page_allocator_init or test_page_allocator_init_slabs,
* args (fault_at_alloc_cache, fault_at_sgl_alloc, simple_alloc_128K,
* alloc_no_scatter_gather, failing_alloc_8K)
*
* Steps:
* - Allocate chunk of memory at fixed address as per test_parameters input.
* - Check that result is equal to test_parameters expected output.
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_page_alloc_fixed(struct unit_module *m, struct gk20a *g, void *args);
/**
* Test specification for: test_page_alloc
*
* Description: Allocate memory using page allocator
*
* Test Type: Feature, Error injection
*
* Targets: nvgpu_allocator.ops.alloc
*
* Input: test_nvgpu_page_allocator_init or test_page_allocator_init_slabs,
* args (fault_at_alloc_cache, fault_at_nvgpu_alloc,
* first_simple_alloc_32K, fault_at_sgl_alloc, alloc_no_scatter_gather,
* alloc_contiguous, simple_alloc_512K, alloc_more_than_available,
* fault_at_page_cache, second_simple_alloc_32K, third_simple_alloc_32K,
* fourth_simple_alloc_32K, simple_alloc_8K, failing_alloc_16K)
*
* Steps:
* - Allocate chunk of memory at any address as per test_parameters input.
* - Check that result is equal to test_parameters expected output.
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_page_alloc(struct unit_module *m, struct gk20a *g, void *args);
/**
* Test specification for: test_page_free
*
* Description: Free allocated memory
*
* Test Type: Feature
*
* Targets: nvgpu_allocator.ops.free_alloc
*
* Input: test_nvgpu_page_allocator_init or test_page_allocator_init_slabs,
* args (alloc_no_scatter_gather, first_simple_alloc_32K,
* simple_alloc_512K, fourth_simple_alloc_32K, second_simple_alloc_32K,
* first_simple_alloc_32K, third_simple_alloc_32K)
*
* Steps:
* - Free allocated memory at given address as per test_parameters input.
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_page_free(struct unit_module *m, struct gk20a *g, void *args);
/**
* Test specification for: test_page_free_fixed
*
* Description: Free allocated page at given address
*
* Test Type: Feature
*
* Targets: nvgpu_allocator.ops.free_fixed
*
* Input: test_nvgpu_page_allocator_init, args (alloc_no_scatter_gather,
* simple_alloc_128K)
*
* Steps:
* - Free allocated memory at fixed address as per test_parameters input.
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_page_free_fixed(struct unit_module *m, struct gk20a *g, void *args);
/**
* Test specification for: test_page_allocator_init_slabs
*
* Description: Initialize page allocator with slabs.
*
* Test Type: Feature, Error injection
*
* Targets: nvgpu_allocator_init, nvgpu_page_allocator_init,
* nvgpu_page_alloc_init_slabs, nvgpu_allocator.ops.fini
*
* Input: None
*
* Steps:
* - Initialize page allocator with following characteristics.
* - 64K memory base address.
* - 128K length of memory.
* - 64K block size.
* - Flags set to GPU_ALLOC_4K_VIDMEM_PAGES to enable slabs.
* - Check that page allocator initializations fails for scenarios such as
* odd value of block_size and fault injection for memory allocation.
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_page_allocator_init_slabs(struct unit_module *m,
struct gk20a *g, void *args);
/**
* Test specification for: test_nvgpu_page_allocator_destroy
*
* Description: Destroy page allocator structure
*
* Test Type: Feature
*
* Targets: nvgpu_allocator.ops.fini
*
* Input: test_nvgpu_page_allocator_init or test_page_allocator_init_slabs
*
* Steps:
* - De-initialize page allocator structure.
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_nvgpu_page_allocator_destroy(struct unit_module *m,
struct gk20a *g, void *args);
/**
* @}
*/
#endif /* UNIT_PAGE_ALLOCATOR_H */

View File

@@ -0,0 +1,26 @@
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
.SUFFIXES:
OBJS = as.o
MODULE = as
include ../../Makefile.units

View File

@@ -0,0 +1,35 @@
################################### tell Emacs this is a -*- makefile-gmake -*-
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# tmake for SW Mobile component makefile
#
###############################################################################
NVGPU_UNIT_NAME=as
include $(NV_COMPONENT_DIR)/../../Makefile.units.common.interface.tmk
# Local Variables:
# indent-tabs-mode: t
# tab-width: 8
# End:
# vi: set tabstop=8 noexpandtab:

View File

@@ -0,0 +1,35 @@
################################### tell Emacs this is a -*- makefile-gmake -*-
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# tmake for SW Mobile component makefile
#
###############################################################################
NVGPU_UNIT_NAME=as
include $(NV_COMPONENT_DIR)/../../Makefile.units.common.tmk
# Local Variables:
# indent-tabs-mode: t
# tab-width: 8
# End:
# vi: set tabstop=8 noexpandtab:

323
userspace/units/mm/as/as.c Normal file
View File

@@ -0,0 +1,323 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <unit/io.h>
#include <unit/unit.h>
#include <unit/core.h>
#include "as.h"
#include <nvgpu/posix/io.h>
#include "os/posix/os_posix.h"
#include "hal/mm/mm_gp10b.h"
#include "hal/mm/mm_gv11b.h"
#include "hal/mm/cache/flush_gk20a.h"
#include "hal/mm/cache/flush_gv11b.h"
#include "hal/mm/gmmu/gmmu_gp10b.h"
#include "hal/mm/gmmu/gmmu_gv11b.h"
#include "hal/mm/mmu_fault/mmu_fault_gv11b.h"
#include "hal/fb/fb_gm20b.h"
#include "hal/fb/fb_gp10b.h"
#include "hal/fb/fb_gv11b.h"
#include "hal/fb/fb_mmu_fault_gv11b.h"
#include "hal/fb/intr/fb_intr_gv11b.h"
#include "hal/fifo/ramin_gk20a.h"
#include "hal/fifo/ramin_gv11b.h"
#include <nvgpu/hw/gv11b/hw_fb_gv11b.h>
#include <nvgpu/hw/gv11b/hw_flush_gv11b.h>
#include <nvgpu/as.h>
#include <nvgpu/nvgpu_common.h>
#include <nvgpu/posix/posix-fault-injection.h>
/*
* Each allocated as_share gets a unique, incrementing, global_id. Use the
* following global static to track the global_id and ensure they are
* correct.
*/
static int global_id_count;
/* Parameters to test standard cases of allocation */
static struct test_parameters test_64k_user_managed = {
.big_page_size = SZ_64K,
.small_big_split = (SZ_1G * 56ULL),
.flags = NVGPU_AS_ALLOC_USERSPACE_MANAGED,
.expected_error = 0
};
static struct test_parameters test_0k_user_managed = {
.big_page_size = 0,
.small_big_split = 0,
.flags = NVGPU_AS_ALLOC_USERSPACE_MANAGED,
.expected_error = 0
};
static struct test_parameters test_64k_unified_va = {
.big_page_size = SZ_64K,
.small_big_split = 0,
.flags = NVGPU_AS_ALLOC_UNIFIED_VA,
.expected_error = 0
};
static struct test_parameters test_64k_unified_va_enabled = {
.big_page_size = SZ_64K,
.small_big_split = 0,
.flags = 0,
.expected_error = 0,
.unify_address_spaces_flag = true
};
static struct test_parameters test_einval_user_managed = {
.big_page_size = 1,
.small_big_split = (SZ_1G * 56ULL),
.flags = NVGPU_AS_ALLOC_USERSPACE_MANAGED,
.expected_error = -EINVAL
};
static struct test_parameters test_notp2_user_managed = {
.big_page_size = SZ_64K-1,
.small_big_split = (SZ_1G * 56ULL),
.flags = NVGPU_AS_ALLOC_USERSPACE_MANAGED,
.expected_error = -EINVAL
};
/* Parameters to test corner cases and error handling */
static struct test_parameters test_64k_user_managed_as_fail = {
.big_page_size = SZ_64K,
.small_big_split = (SZ_1G * 56ULL),
.flags = 0,
.expected_error = -ENOMEM,
.special_case = SPECIAL_CASE_AS_MALLOC_FAIL
};
static struct test_parameters test_64k_user_managed_vm_fail = {
.big_page_size = SZ_64K,
.small_big_split = (SZ_1G * 56ULL),
.flags = 0,
.expected_error = -ENOMEM,
.special_case = SPECIAL_CASE_VM_INIT_FAIL
};
static struct test_parameters test_64k_user_managed_busy_fail_1 = {
.big_page_size = SZ_64K,
.small_big_split = (SZ_1G * 56ULL),
.flags = 0,
.expected_error = -ENODEV,
.special_case = SPECIAL_CASE_GK20A_BUSY_ALLOC
};
static struct test_parameters test_64k_user_managed_busy_fail_2 = {
.big_page_size = SZ_64K,
.small_big_split = (SZ_1G * 56ULL),
.flags = 0,
.expected_error = 0,
.special_case = SPECIAL_CASE_GK20A_BUSY_RELEASE
};
/*
* Init the minimum set of HALs to use DMA amd GMMU features, then call the
* init_mm base function.
*/
int test_init_mm(struct unit_module *m, struct gk20a *g, void *args)
{
int err;
struct nvgpu_os_posix *p = nvgpu_os_posix_from_gk20a(g);
p->mm_is_iommuable = true;
g->ops.mm.gmmu.get_default_big_page_size =
nvgpu_gmmu_default_big_page_size;
g->ops.mm.gmmu.get_mmu_levels = gp10b_mm_get_mmu_levels;
g->ops.mm.gmmu.get_max_page_table_levels = gp10b_get_max_page_table_levels;
g->ops.mm.init_inst_block = gv11b_mm_init_inst_block;
g->ops.mm.gmmu.map = nvgpu_gmmu_map_locked;
g->ops.mm.gmmu.unmap = nvgpu_gmmu_unmap_locked;
g->ops.mm.gmmu.get_iommu_bit = gp10b_mm_get_iommu_bit;
g->ops.mm.gmmu.gpu_phys_addr = gv11b_gpu_phys_addr;
g->ops.mm.is_bar1_supported = gv11b_mm_is_bar1_supported;
g->ops.mm.cache.l2_flush = gv11b_mm_l2_flush;
g->ops.mm.cache.fb_flush = gk20a_mm_fb_flush;
#ifdef CONFIG_NVGPU_COMPRESSION
g->ops.fb.compression_page_size = gp10b_fb_compression_page_size;
#endif
g->ops.fb.tlb_invalidate = gm20b_fb_tlb_invalidate;
g->ops.ramin.init_pdb = gv11b_ramin_init_pdb;
g->ops.ramin.alloc_size = gk20a_ramin_alloc_size;
g->ops.fb.is_fault_buf_enabled = gv11b_fb_is_fault_buf_enabled;
g->ops.fb.read_mmu_fault_buffer_size =
gv11b_fb_read_mmu_fault_buffer_size;
g->ops.fb.init_hw = gv11b_fb_init_hw;
g->ops.fb.intr.enable = gv11b_fb_intr_enable;
g->ops.fb.ecc.init = NULL;
err = nvgpu_pd_cache_init(g);
if (err != 0) {
unit_return_fail(m, "pd cache initialization failed\n");
}
err = nvgpu_init_mm_support(g);
if (err != 0) {
unit_return_fail(m, "nvgpu_init_mm_support failed err=%d\n",
err);
}
/*
* Before ref_init calls to gk20a_as_alloc_share should immediately
* fail.
*/
err = gk20a_as_alloc_share(g, 0, 0, 0, 0, 0, NULL);
if (err != -ENODEV) {
unit_return_fail(m, "gk20a_as_alloc_share did not fail as expected err=%d\n",
err);
}
nvgpu_ref_init(&g->refcount);
return UNIT_SUCCESS;
}
int test_as_alloc_share(struct unit_module *m, struct gk20a *g, void *args)
{
struct gk20a_as_share *out;
int err;
struct nvgpu_posix_fault_inj *kmem_fi =
nvgpu_kmem_get_fault_injection();
struct nvgpu_posix_fault_inj *nvgpu_fi =
nvgpu_nvgpu_get_fault_injection();
struct test_parameters *params = (struct test_parameters *) args;
global_id_count++;
if (params->unify_address_spaces_flag) {
nvgpu_set_enabled(g, NVGPU_MM_UNIFY_ADDRESS_SPACES, true);
}
if (params->special_case == SPECIAL_CASE_AS_MALLOC_FAIL) {
nvgpu_posix_enable_fault_injection(kmem_fi, true, 0);
}
if (params->special_case == SPECIAL_CASE_VM_INIT_FAIL) {
nvgpu_posix_enable_fault_injection(kmem_fi, true, 1);
}
if (params->special_case == SPECIAL_CASE_GK20A_BUSY_ALLOC) {
nvgpu_posix_enable_fault_injection(nvgpu_fi, true, 0);
}
err = gk20a_as_alloc_share(g, params->big_page_size,
params->flags, (SZ_64K << 10), (1ULL << 37),
params->small_big_split, &out);
if (params->unify_address_spaces_flag) {
nvgpu_set_enabled(g, NVGPU_MM_UNIFY_ADDRESS_SPACES, false);
}
if (params->special_case == SPECIAL_CASE_AS_MALLOC_FAIL) {
/* The failure will cause the global_id not to be incremented */
global_id_count--;
}
nvgpu_posix_enable_fault_injection(kmem_fi, false, 0);
nvgpu_posix_enable_fault_injection(nvgpu_fi, false, 0);
if (err != params->expected_error) {
unit_return_fail(m, "gk20a_as_alloc_share failed err=%d\n",
err);
} else if (err != 0) {
/* We got the expected error, no cleanup needed */
return UNIT_SUCCESS;
}
if (out->id != global_id_count) {
unit_return_fail(m, "unexpected out->id (%d)\n", out->id);
}
if (params->special_case == SPECIAL_CASE_GK20A_BUSY_RELEASE) {
nvgpu_posix_enable_fault_injection(nvgpu_fi, true, 0);
}
err = gk20a_as_release_share(out);
if (params->special_case == SPECIAL_CASE_GK20A_BUSY_RELEASE) {
nvgpu_posix_enable_fault_injection(nvgpu_fi, false, 0);
if (err != -ENODEV) {
unit_return_fail(m, "gk20a_as_release_share did not fail as expected err=%d\n", err);
}
} else if (err != 0) {
unit_return_fail(m, "gk20a_as_release_share failed err=%d\n",
err);
}
return UNIT_SUCCESS;
}
int test_gk20a_from_as(struct unit_module *m, struct gk20a *g, void *args)
{
int ret = UNIT_FAIL;
struct gk20a_as_share *out;
int err;
err = gk20a_as_alloc_share(g, SZ_64K, NVGPU_AS_ALLOC_USERSPACE_MANAGED,
(SZ_64K << 10), (1ULL << 37),
nvgpu_gmmu_va_small_page_limit(), &out);
if (err != 0) {
unit_return_fail(m, "gk20a_as_alloc_share failed err=%d\n",
err);
}
if (g != gk20a_from_as(out->as)) {
unit_err(m, "ptr mismatch in gk20a_from_as\n");
goto exit;
}
ret = UNIT_SUCCESS;
exit:
gk20a_as_release_share(out);
return ret;
}
struct unit_module_test nvgpu_mm_as_tests[] = {
UNIT_TEST(init, test_init_mm, NULL, 0),
UNIT_TEST(as_alloc_share_64k_um_as_fail, test_as_alloc_share,
(void *) &test_64k_user_managed_as_fail, 0),
UNIT_TEST(as_alloc_share_64k_um_vm_fail, test_as_alloc_share,
(void *) &test_64k_user_managed_vm_fail, 0),
UNIT_TEST(as_alloc_share_64k_um_busy_fail_1, test_as_alloc_share,
(void *) &test_64k_user_managed_busy_fail_1, 0),
UNIT_TEST(as_alloc_share_64k_um_busy_fail_2, test_as_alloc_share,
(void *) &test_64k_user_managed_busy_fail_2, 0),
UNIT_TEST(as_alloc_share_64k_um, test_as_alloc_share,
(void *) &test_64k_user_managed, 0),
UNIT_TEST(as_alloc_share_0k_um, test_as_alloc_share,
(void *) &test_0k_user_managed, 0),
UNIT_TEST(as_alloc_share_einval_um, test_as_alloc_share,
(void *) &test_einval_user_managed, 0),
UNIT_TEST(as_alloc_share_notp2_um, test_as_alloc_share,
(void *) &test_notp2_user_managed, 0),
UNIT_TEST(as_alloc_share_uva, test_as_alloc_share,
(void *) &test_64k_unified_va, 0),
UNIT_TEST(as_alloc_share_uva_enabled, test_as_alloc_share,
(void *) &test_64k_unified_va_enabled, 0),
UNIT_TEST(gk20a_from_as, test_gk20a_from_as, NULL, 0),
};
UNIT_MODULE(mm.as, nvgpu_mm_as_tests, UNIT_PRIO_NVGPU_TEST);

176
userspace/units/mm/as/as.h Normal file
View File

@@ -0,0 +1,176 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef UNIT_MM_AS_H
#define UNIT_MM_AS_H
struct gk20a;
struct unit_module;
/** @addtogroup SWUTS-mm-as
* @{
*
* Software Unit Test Specification for mm.as
*/
/** Special case to cause the gk20a_as_share malloc to fail */
#define SPECIAL_CASE_AS_MALLOC_FAIL 1
/** Special case to cause the VM init to fail */
#define SPECIAL_CASE_VM_INIT_FAIL 2
/**
* Special case to cause the call to gk20a_busy to fail in gk20a_as_alloc_share
*/
#define SPECIAL_CASE_GK20A_BUSY_ALLOC 3
/**
* Special case to cause the call to gk20a_busy to fail in
* gk20a_as_release_share
*/
#define SPECIAL_CASE_GK20A_BUSY_RELEASE 4
/**
* Structure to hold various parameters for the test_as_alloc_share function.
*/
struct test_parameters {
/**
* Size of big pages
*/
int big_page_size;
/**
* Address for small big page vma split
*/
unsigned long long small_big_split;
/**
* Flags to use when calling gk20a_as_alloc_share. Should be one of the
* NVGPU_AS_ALLOC_* flag defined in as.h.
*/
int flags;
/**
* The expected error code from gk20a_as_alloc_share. Typically 0 if
* the test is expecting success, or a specific error value if it is
* expecting failure.
*/
int expected_error;
/**
* If true, enable NVGPU_MM_UNIFY_ADDRESS_SPACES before running
* gk20a_as_alloc_share (and disable it afterwards).
*/
bool unify_address_spaces_flag;
/**
* One of the SPECIAL_CASE_* values defined above, to trigger special
* corner cases. No special case if set to 0.
*/
int special_case;
};
/**
* Test specification for: test_init_mm
*
* Description: Test to initialize the mm.as environment.
*
* Test Type: Other (Init)
*
* Targets: nvgpu_init_mm_support, gk20a_as_alloc_share, nvgpu_ref_init
*
* Input: None
*
* Steps:
* - Set all the minimum HAL needed for the mm.as unit.
* - Call nvgpu_init_mm_support to initialize the mm subsystem and check the
* return code to ensure success.
* - Call gk20a_as_alloc_share with zeroed parameters and ensure that it
* returns -ENODEV as expected since g->refcount has not been initialized.
* - Initialize g->refcount.
*
* Output: Returns PASS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_init_mm(struct unit_module *m, struct gk20a *g, void *args);
/**
* Test specification for: test_as_alloc_share
*
* Description: The AS unit shall be able to allocate address spaces based on
* required flags, or report appropriate error codes in case of failures.
*
* Test Type: Feature
*
* Targets: gk20a_as_alloc_share, gk20a_as_release_share,
* gk20a_vm_release_share, gk20a_from_as
*
* Input:
* - The test_init_mm must have been executed
* - The args argument points to an instance of struct test_parameters that
* contains the flags to be used, any special cases if needed and the expected
* return value from gk20a_as_alloc_share.
*
* Steps:
* - Increment a global id counter used to track the allocations made later by
* calls to gk20a_as_alloc_share.
* - Test if a special case is requested in the arguments and act accordingly
* by either:
* - enabling the NVGPU_MM_UNIFY_ADDRESS_SPACES flag
* - enabling KMEM fault injection
* - enabling nvgpu fault injection (for gk20a_busy)
* - Call the gk20a_as_alloc_share with the flags and page size set in the
* arguments.
* - If needed disable the NVGPU_MM_UNIFY_ADDRESS_SPACES.
* - Disable all fault injection mechanisms.
* - Compare the return code of gk20a_as_alloc_share with the one expected from
* the test arguments.
* - If the call to gk20a_as_alloc_share was expected to succeed, compare the
* id of the allocated as with the global id counter to ensure they match.
* - Enable nvgpu fault injection if a special case is enabled.
* - Call the gk20a_as_release_share on the allocated as and collect its
* return value. Check the return value either for success or for an expected
* failure if fault injection was enabled.
*
* Output: Returns PASS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_as_alloc_share(struct unit_module *m, struct gk20a *g, void *args);
/**
* Test specification for: test_gk20a_from_as
*
* Description: Simple test to check gk20a_from_as.
*
* Test Type: Feature
*
* Targets: gk20a_from_as
*
* Input: None
*
* Steps:
* - Call gk20a_from_as with an 'as' pointer and ensure it returns a
* pointer on g.
*
* Output: Returns PASS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_gk20a_from_as(struct unit_module *m, struct gk20a *g, void *args);
#endif /* UNIT_MM_AS_H */

View File

@@ -0,0 +1,26 @@
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
.SUFFIXES:
OBJS = dma.o
MODULE = dma
include ../../Makefile.units

View File

@@ -0,0 +1,35 @@
################################### tell Emacs this is a -*- makefile-gmake -*-
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# tmake for SW Mobile component makefile
#
###############################################################################
NVGPU_UNIT_NAME=dma
include $(NV_COMPONENT_DIR)/../../Makefile.units.common.interface.tmk
# Local Variables:
# indent-tabs-mode: t
# tab-width: 8
# End:
# vi: set tabstop=8 noexpandtab:

View File

@@ -0,0 +1,35 @@
################################### tell Emacs this is a -*- makefile-gmake -*-
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# tmake for SW Mobile component makefile
#
###############################################################################
NVGPU_UNIT_NAME=dma
include $(NV_COMPONENT_DIR)/../../Makefile.units.common.tmk
# Local Variables:
# indent-tabs-mode: t
# tab-width: 8
# End:
# vi: set tabstop=8 noexpandtab:

View File

@@ -0,0 +1,588 @@
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "dma.h"
#include <unit/io.h>
#include <unit/unit.h>
#include <nvgpu/io.h>
#include <nvgpu/posix/io.h>
#include <nvgpu/gk20a.h>
#include <nvgpu/types.h>
#include <nvgpu/sizes.h>
#include <nvgpu/mm.h>
#include <nvgpu/vm.h>
#include <nvgpu/dma.h>
#include <nvgpu/pramin.h>
#include <nvgpu/hw/gk20a/hw_pram_gk20a.h>
#include <nvgpu/hw/gk20a/hw_bus_gk20a.h>
#include "hal/bus/bus_gk20a.h"
#include "os/posix/os_posix.h"
#include "hal/mm/mm_gv11b.h"
#include "hal/mm/cache/flush_gk20a.h"
#include "hal/mm/cache/flush_gv11b.h"
#include "hal/mm/gmmu/gmmu_gp10b.h"
#include "hal/mm/gmmu/gmmu_gv11b.h"
#include "hal/fb/fb_gp10b.h"
#include "hal/fb/fb_gm20b.h"
#include "hal/fb/fb_gv11b.h"
#include "hal/fifo/ramin_gk20a.h"
#include "hal/fifo/ramin_gm20b.h"
#include "hal/fifo/ramin_gv11b.h"
#include "hal/pramin/pramin_init.h"
#include <nvgpu/posix/posix-fault-injection.h>
/* Arbitrary PA address for nvgpu_mem usage */
#define TEST_PA_ADDRESS 0xEFAD80000000
/* Create an 8MB VIDMEM area. PRAMIN has a 1MB window on this area */
#define VIDMEM_SIZE (8*SZ_1M)
static u32 *vidmem;
static bool is_PRAM_range(struct gk20a *g, u32 addr)
{
if ((addr >= pram_data032_r(0)) &&
(addr <= (pram_data032_r(0)+SZ_1M))) {
return true;
}
return false;
}
static u32 PRAM_get_u32_index(struct gk20a *g, u32 addr)
{
u32 index = addr % VIDMEM_SIZE;
return (index)/sizeof(u32);
}
static u32 PRAM_read(struct gk20a *g, u32 addr)
{
return vidmem[PRAM_get_u32_index(g, addr)];
}
static void PRAM_write(struct gk20a *g, u32 addr, u32 value)
{
vidmem[PRAM_get_u32_index(g, addr)] = value;
}
/*
* Write callback (for all nvgpu_writel calls). If address belongs to PRAM
* range, route the call to our own handler, otherwise call the IO framework
*/
static void writel_access_reg_fn(struct gk20a *g,
struct nvgpu_reg_access *access)
{
if (is_PRAM_range(g, access->addr)) {
PRAM_write(g, access->addr - pram_data032_r(0), access->value);
} else {
nvgpu_posix_io_writel_reg_space(g, access->addr, access->value);
}
nvgpu_posix_io_record_access(g, access);
}
/*
* Read callback, similar to the write callback above.
*/
static void readl_access_reg_fn(struct gk20a *g,
struct nvgpu_reg_access *access)
{
if (is_PRAM_range(g, access->addr)) {
access->value = PRAM_read(g, access->addr - pram_data032_r(0));
} else {
access->value = nvgpu_posix_io_readl_reg_space(g, access->addr);
}
}
/*
* Define all the callbacks to be used during the test. Typically all
* write operations use the same callback, likewise for all read operations.
*/
static struct nvgpu_posix_io_callbacks pramin_callbacks = {
/* Write APIs all can use the same accessor. */
.writel = writel_access_reg_fn,
.writel_check = writel_access_reg_fn,
.bar1_writel = writel_access_reg_fn,
.usermode_writel = writel_access_reg_fn,
/* Likewise for the read APIs. */
.__readl = readl_access_reg_fn,
.readl = readl_access_reg_fn,
.bar1_readl = readl_access_reg_fn,
};
static void init_platform(struct unit_module *m, struct gk20a *g, bool is_iGPU)
{
if (is_iGPU) {
nvgpu_set_enabled(g, NVGPU_MM_UNIFIED_MEMORY, true);
} else {
nvgpu_set_enabled(g, NVGPU_MM_UNIFIED_MEMORY, false);
}
}
/*
* Init the minimum set of HALs to use DMA amd GMMU features, then call the
* init_mm base function.
*/
static int init_mm(struct unit_module *m, struct gk20a *g)
{
u64 low_hole, aperture_size;
struct nvgpu_os_posix *p = nvgpu_os_posix_from_gk20a(g);
struct mm_gk20a *mm = &g->mm;
p->mm_is_iommuable = true;
if (!nvgpu_iommuable(g)) {
unit_return_fail(m, "Mismatch on nvgpu_iommuable\n");
}
g->ops.mm.gmmu.get_default_big_page_size =
nvgpu_gmmu_default_big_page_size;
g->ops.mm.gmmu.get_mmu_levels = gp10b_mm_get_mmu_levels;
g->ops.mm.gmmu.get_max_page_table_levels = gp10b_get_max_page_table_levels;
g->ops.mm.init_inst_block = gv11b_mm_init_inst_block;
g->ops.mm.gmmu.map = nvgpu_gmmu_map_locked;
g->ops.mm.gmmu.unmap = nvgpu_gmmu_unmap_locked;
g->ops.mm.gmmu.get_iommu_bit = gp10b_mm_get_iommu_bit;
g->ops.mm.gmmu.gpu_phys_addr = gv11b_gpu_phys_addr;
g->ops.mm.is_bar1_supported = gv11b_mm_is_bar1_supported;
g->ops.mm.cache.l2_flush = gv11b_mm_l2_flush;
g->ops.mm.cache.fb_flush = gk20a_mm_fb_flush;
#ifdef CONFIG_NVGPU_COMPRESSION
g->ops.fb.compression_page_size = gp10b_fb_compression_page_size;
#endif
g->ops.fb.tlb_invalidate = gm20b_fb_tlb_invalidate;
g->ops.ramin.init_pdb = gv11b_ramin_init_pdb;
g->ops.ramin.alloc_size = gk20a_ramin_alloc_size;
if (g->ops.mm.is_bar1_supported(g)) {
unit_return_fail(m, "BAR1 is not supported on Volta+\n");
}
/*
* Initialize one VM space for system memory to be used throughout this
* unit module.
* Values below are similar to those used in nvgpu_init_system_vm()
*/
low_hole = SZ_4K * 16UL;
aperture_size = GK20A_PMU_VA_SIZE;
mm->pmu.aperture_size = GK20A_PMU_VA_SIZE;
g->ops.mm.get_default_va_sizes(NULL, &mm->channel.user_size,
&mm->channel.kernel_size);
mm->pmu.vm = nvgpu_vm_init(g,
g->ops.mm.gmmu.get_default_big_page_size(),
low_hole,
0ULL,
nvgpu_safe_sub_u64(aperture_size, low_hole),
0ULL,
true,
false,
false,
"system");
if (mm->pmu.vm == NULL) {
unit_return_fail(m, "nvgpu_vm_init failed\n");
}
mm->bar1.aperture_size = bar1_aperture_size_mb_gk20a() << 20;
mm->bar1.vm = nvgpu_vm_init(g,
g->ops.mm.gmmu.get_default_big_page_size(),
low_hole,
0ULL,
nvgpu_safe_sub_u64(mm->bar1.aperture_size, low_hole),
0ULL,
true, false, false,
"bar1");
if (mm->bar1.vm == NULL) {
unit_return_fail(m, "nvgpu_vm_init failed\n");
}
if (nvgpu_pd_cache_init(g) != 0) {
unit_return_fail(m, "pd cache initialization failed\n");
}
return UNIT_SUCCESS;
}
int test_mm_dma_init(struct unit_module *m, struct gk20a *g, void *args)
{
u64 debug_level = (u64)args;
g->log_mask = 0;
if (debug_level >= 1) {
g->log_mask = gpu_dbg_map;
}
if (debug_level >= 2) {
g->log_mask |= gpu_dbg_map_v;
}
if (debug_level >= 3) {
g->log_mask |= gpu_dbg_pte;
}
init_platform(m, g, true);
#ifdef CONFIG_NVGPU_DGPU
nvgpu_init_pramin(&g->mm);
#endif
/* Create the VIDMEM */
vidmem = (u32 *) malloc(VIDMEM_SIZE);
if (vidmem == NULL) {
return UNIT_FAIL;
}
nvgpu_posix_register_io(g, &pramin_callbacks);
#ifdef CONFIG_NVGPU_DGPU
/* Minimum HAL init for PRAMIN */
g->ops.bus.set_bar0_window = gk20a_bus_set_bar0_window;
nvgpu_pramin_ops_init(g);
unit_assert(g->ops.pramin.data032_r != NULL, return UNIT_FAIL);
#endif
/* Register space: BUS_BAR0 */
if (nvgpu_posix_io_add_reg_space(g, bus_bar0_window_r(), 0x100) != 0) {
free(vidmem);
return UNIT_FAIL;
}
if (init_mm(m, g) != 0) {
unit_return_fail(m, "nvgpu_init_mm_support failed\n");
}
return UNIT_SUCCESS;
}
/*
* Helper function to create a nvgpu_mem for use throughout this unit.
*/
static struct nvgpu_mem *create_test_mem(void)
{
struct nvgpu_mem *mem = malloc(sizeof(struct nvgpu_mem));
memset(mem, 0, sizeof(struct nvgpu_mem));
mem->size = SZ_4K;
mem->cpu_va = (void *) TEST_PA_ADDRESS;
return mem;
}
int test_mm_dma_alloc_flags(struct unit_module *m, struct gk20a *g, void *args)
{
int err;
struct nvgpu_os_posix *p = nvgpu_os_posix_from_gk20a(g);
int result = UNIT_FAIL;
struct nvgpu_mem *mem = create_test_mem();
p->mm_is_iommuable = false;
p->mm_sgt_is_iommuable = false;
/* Force allocation in SYSMEM and READ_ONLY */
err = nvgpu_dma_alloc_flags_sys(g, NVGPU_DMA_READ_ONLY, SZ_4K, mem);
if (err != 0) {
unit_return_fail(m, "alloc failed, err=%d\n", err);
}
if (mem->aperture != APERTURE_SYSMEM) {
unit_err(m, "allocation not in SYSMEM\n");
goto end;
}
nvgpu_dma_free(g, mem);
/* Force allocation in SYSMEM and NVGPU_DMA_PHYSICALLY_ADDRESSED */
err = nvgpu_dma_alloc_flags_sys(g, NVGPU_DMA_PHYSICALLY_ADDRESSED,
SZ_4K, mem);
if (err != 0) {
unit_return_fail(m, "alloc failed, err=%d\n", err);
}
if (mem->aperture != APERTURE_SYSMEM) {
unit_err(m, "allocation not in SYSMEM\n");
goto end;
}
nvgpu_dma_free_sys(g, mem);
/* Force allocation in VIDMEM and READ_ONLY */
#ifdef CONFIG_NVGPU_DGPU
unit_info(m, "alloc_vid with READ_ONLY will cause a WARNING.");
err = nvgpu_dma_alloc_flags_vid(g, NVGPU_DMA_READ_ONLY, SZ_4K, mem);
if (err != 0) {
unit_return_fail(m, "alloc failed, err=%d\n", err);
}
if (mem->aperture != APERTURE_VIDMEM) {
unit_err(m, "allocation not in VIDMEM\n");
goto end;
}
nvgpu_dma_free(g, mem);
/* Force allocation in VIDMEM and NVGPU_DMA_PHYSICALLY_ADDRESSED */
unit_info(m, "alloc_vid PHYSICALLY_ADDRESSED will cause a WARNING.");
err = nvgpu_dma_alloc_flags_vid(g, NVGPU_DMA_PHYSICALLY_ADDRESSED,
SZ_4K, mem);
if (err != 0) {
unit_return_fail(m, "alloc failed, err=%d\n", err);
}
if (mem->aperture != APERTURE_VIDMEM) {
unit_err(m, "allocation not in VIDMEM\n");
goto end;
}
nvgpu_dma_free(g, mem);
#endif
result = UNIT_SUCCESS;
end:
nvgpu_dma_free(g, mem);
free(mem);
return result;
}
int test_mm_dma_alloc(struct unit_module *m, struct gk20a *g, void *args)
{
int err;
struct nvgpu_os_posix *p = nvgpu_os_posix_from_gk20a(g);
int result = UNIT_FAIL;
struct nvgpu_mem *mem = create_test_mem();
p->mm_is_iommuable = false;
p->mm_sgt_is_iommuable = false;
/* iGPU mode so SYSMEM allocations by default */
init_platform(m, g, true);
err = nvgpu_dma_alloc(g, SZ_4K, mem);
if (err != 0) {
unit_return_fail(m, "alloc failed, err=%d\n", err);
}
if (mem->aperture != APERTURE_SYSMEM) {
unit_err(m, "allocation not in SYSMEM\n");
goto end;
}
nvgpu_dma_free(g, mem);
#ifdef CONFIG_NVGPU_DGPU
/* dGPU mode */
init_platform(m, g, false);
err = nvgpu_dma_alloc(g, SZ_4K, mem);
if (err != 0) {
unit_return_fail(m, "alloc failed, err=%d\n", err);
}
if (mem->aperture != APERTURE_VIDMEM) {
unit_err(m, "allocation not in VIDMEM\n");
goto end;
}
nvgpu_dma_free(g, mem);
#endif
/* Force allocation in SYSMEM */
err = nvgpu_dma_alloc_sys(g, SZ_4K, mem);
if (err != 0) {
unit_return_fail(m, "alloc failed, err=%d\n", err);
}
if (mem->aperture != APERTURE_SYSMEM) {
unit_err(m, "allocation not in SYSMEM\n");
goto end;
}
nvgpu_dma_free(g, mem);
#ifdef CONFIG_NVGPU_DGPU
/* Force allocation in VIDMEM */
init_platform(m, g, true);
err = nvgpu_dma_alloc_vid(g, SZ_4K, mem);
if (err != 0) {
unit_return_fail(m, "alloc failed, err=%d\n", err);
}
if (mem->aperture != APERTURE_VIDMEM) {
unit_err(m, "allocation not in VIDMEM\n");
goto end;
}
nvgpu_dma_free(g, mem);
#endif
#ifdef CONFIG_NVGPU_DGPU
/* Allocation at fixed address in VIDMEM */
init_platform(m, g, true);
err = nvgpu_dma_alloc_vid_at(g, SZ_4K, mem, 0x1000);
if (err != -ENOMEM) {
unit_err(m, "allocation did not fail as expected: %d\n", err);
goto end;
}
nvgpu_dma_free(g, mem);
#endif
result = UNIT_SUCCESS;
end:
nvgpu_dma_free(g, mem);
free(mem);
return result;
}
/*
* Test to target nvgpu_dma_alloc_map_* functions, testing allocations and GMMU
* mappings in SYSMEM or VIDMEM.
*/
int test_mm_dma_alloc_map(struct unit_module *m, struct gk20a *g, void *args)
{
int err;
struct nvgpu_os_posix *p = nvgpu_os_posix_from_gk20a(g);
int result = UNIT_FAIL;
struct nvgpu_mem *mem = create_test_mem();
p->mm_is_iommuable = false;
p->mm_sgt_is_iommuable = false;
/* iGPU mode so SYSMEM allocations by default */
init_platform(m, g, true);
err = nvgpu_dma_alloc_map(g->mm.pmu.vm, SZ_4K, mem);
if (err != 0) {
unit_return_fail(m, "alloc failed, err=%d\n", err);
}
if (mem->aperture != APERTURE_SYSMEM) {
unit_err(m, "allocation not in SYSMEM\n");
goto end;
}
nvgpu_dma_unmap_free(g->mm.pmu.vm, mem);
#ifdef CONFIG_NVGPU_DGPU
/* dGPU mode */
mem->size = SZ_4K;
mem->cpu_va = (void *) TEST_PA_ADDRESS;
init_platform(m, g, false);
err = nvgpu_dma_alloc_map(g->mm.pmu.vm, SZ_4K, mem);
if (err != 0) {
unit_return_fail(m, "alloc failed, err=%d\n", err);
}
if (mem->aperture != APERTURE_VIDMEM) {
unit_err(m, "allocation not in VIDMEM\n");
goto end;
}
/*
* Mark SGT as freed since page_table takes care of that in VIDMEM
* case
*/
mem->priv.sgt = NULL;
nvgpu_dma_unmap_free(g->mm.pmu.vm, mem);
#endif
/* Force allocation in SYSMEM */
mem->size = SZ_4K;
mem->cpu_va = (void *) TEST_PA_ADDRESS;
err = nvgpu_dma_alloc_map_sys(g->mm.pmu.vm, SZ_4K, mem);
if (err != 0) {
unit_return_fail(m, "alloc failed, err=%d\n", err);
}
if (mem->aperture != APERTURE_SYSMEM) {
unit_err(m, "allocation not in SYSMEM\n");
goto end;
}
mem->priv.sgt = NULL;
nvgpu_dma_unmap_free(g->mm.pmu.vm, mem);
#ifdef CONFIG_NVGPU_DGPU
/* Force allocation in VIDMEM */
mem->size = SZ_4K;
mem->cpu_va = (void *) TEST_PA_ADDRESS;
init_platform(m, g, true);
err = nvgpu_dma_alloc_map_vid(g->mm.pmu.vm, SZ_4K, mem);
if (err != 0) {
unit_return_fail(m, "alloc failed, err=%d\n", err);
}
if (mem->aperture != APERTURE_VIDMEM) {
unit_err(m, "allocation not in VIDMEM\n");
goto end;
}
mem->priv.sgt = NULL;
nvgpu_dma_unmap_free(g->mm.pmu.vm, mem);
#endif
result = UNIT_SUCCESS;
end:
nvgpu_dma_unmap_free(g->mm.pmu.vm, mem);
free(mem);
return result;
}
int test_mm_dma_alloc_map_fault_injection(struct unit_module *m,
struct gk20a *g, void *args)
{
int err;
struct nvgpu_posix_fault_inj *dma_fi;
struct nvgpu_posix_fault_inj *kmem_fi;
struct nvgpu_os_posix *p = nvgpu_os_posix_from_gk20a(g);
int result = UNIT_FAIL;
struct nvgpu_mem *mem = create_test_mem();
p->mm_is_iommuable = false;
p->mm_sgt_is_iommuable = false;
/* iGPU mode so SYSMEM allocations by default */
init_platform(m, g, true);
/* Enable fault injection(0) to make nvgpu_dma_alloc_flags_sys fail */
dma_fi = nvgpu_dma_alloc_get_fault_injection();
nvgpu_posix_enable_fault_injection(dma_fi, true, 0);
err = nvgpu_dma_alloc_map(g->mm.pmu.vm, SZ_4K, mem);
if (err == 0) {
unit_err(m, "alloc did not fail as expected (1)\n");
nvgpu_dma_unmap_free(g->mm.pmu.vm, mem);
goto end_dma;
}
nvgpu_posix_enable_fault_injection(dma_fi, false, 0);
/*
* Enable fault injection(5) to make nvgpu_gmmu_map fail inside of the
* nvgpu_dma_alloc_flags_sys function
*/
kmem_fi = nvgpu_kmem_get_fault_injection();
nvgpu_posix_enable_fault_injection(kmem_fi, true, 1);
err = nvgpu_dma_alloc_map(g->mm.pmu.vm, SZ_4K, mem);
if (err == 0) {
unit_err(m, "alloc did not fail as expected (2)\n");
nvgpu_dma_unmap_free(g->mm.pmu.vm, mem);
goto end_kmem;
}
nvgpu_posix_enable_fault_injection(kmem_fi, false, 0);
result = UNIT_SUCCESS;
end_kmem:
nvgpu_posix_enable_fault_injection(kmem_fi, false, 0);
end_dma:
nvgpu_posix_enable_fault_injection(dma_fi, false, 0);
free(mem);
return result;
}
struct unit_module_test nvgpu_mm_dma_tests[] = {
UNIT_TEST(init, test_mm_dma_init, (void *)0, 0),
UNIT_TEST(alloc, test_mm_dma_alloc, NULL, 0),
UNIT_TEST(alloc_flags, test_mm_dma_alloc_flags, NULL, 0),
UNIT_TEST(alloc_map, test_mm_dma_alloc_map, NULL, 0),
UNIT_TEST(alloc_map_fault_inj, test_mm_dma_alloc_map_fault_injection,
NULL, 0),
};
UNIT_MODULE(mm.dma, nvgpu_mm_dma_tests, UNIT_PRIO_NVGPU_TEST);

View File

@@ -0,0 +1,177 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef UNIT_DMA_H
#define UNIT_DMA_H
struct gk20a;
struct unit_module;
/** @addtogroup SWUTS-mm-dma
* @{
*
* Software Unit Test Specification for mm.dma
*/
/**
* Test specification for: test_mm_dma_init
*
* Description: This test must be run once and be the first one as it
* initializes the MM subsystem.
*
* Test Type: Feature, Other (setup)
*
* Targets: nvgpu_vm_init, nvgpu_iommuable
*
* Input: None
*
* Steps:
* - Initialize the enabled flag NVGPU_MM_UNIFIED_MEMORY.
* - Allocate a test buffer to be used as VIDMEM.
* - Register test IO callbacks for PRAMIN.
* - Set the ops.bus.set_bar0_window HAL.
* - Set the ops.pramin.data032_r HAL.
* - Register the BUS_BAR0 test IO space.
* - Set all needed MM-related HALs.
* - Ensure that MM HAL indicates that BAR1 is not supported.
* - Create a test VM with big pages enabled.
*
* Output: Returns PASS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_mm_dma_init(struct unit_module *m, struct gk20a *g, void *__args);
/**
* Test specification for: test_mm_dma_alloc
*
* Description: Test to target nvgpu_dma_alloc_* functions, testing automatic or
* forced allocations in SYSMEM or VIDMEM.
*
* Test Type: Feature
*
* Targets: nvgpu_dma_alloc_flags_sys, nvgpu_dma_free,
* nvgpu_dma_alloc_flags_vid, nvgpu_dma_alloc, nvgpu_dma_alloc_sys
*
* Input: test_mm_dma_init
*
* Steps:
* - Create a test nvgpu_mem instance (4 KB size with a static physical address)
* - Set memory interface as non IOMMU-able, iGPU and SYSMEM.
* - Create a DMA allocation on the nvgpu_mem instance and ensure it succeeds.
* - Ensure the allocated DMA has a SYSMEM aperture.
* - Free the allocation.
* - Perform the same DMA allocation but explicitly request it to be performed
* in SYSMEM. Ensure it succeeded and has a SYSMEM aperture.
* - Free the allocation.
*
* Output: Returns PASS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_mm_dma_alloc(struct unit_module *m, struct gk20a *g, void *args);
/**
* Test specification for: test_mm_dma_alloc_flags
*
* Description: Test to target nvgpu_dma_alloc_flags_* functions, testing
* several possible flags and SYSMEM/VIDMEM.
*
* Test Type: Feature
*
* Targets: nvgpu_dma_alloc_flags_sys, nvgpu_dma_free,
* nvgpu_dma_alloc_flags_vid, nvgpu_dma_free_sys, nvgpu_dma_alloc_flags
*
* Input: test_mm_dma_init
*
* Steps:
* - Create a test nvgpu_mem instance (4 KB size with a static physical address)
* - Set memory interface as non IOMMU-able, iGPU and SYSMEM.
* - Create a DMA allocation on the nvgpu_mem instance with a READ_ONLY flag and
* ensure it succeeds.
* - Ensure the allocated DMA has a SYSMEM aperture.
* - Free the allocation.
* - Perform the same DMA allocation with the NVGPU_DMA_PHYSICALLY_ADDRESSED
* flag. Ensure it succeeded and has a SYSMEM aperture.
* - Free the allocation.
*
* Output: Returns PASS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_mm_dma_alloc_flags(struct unit_module *m, struct gk20a *g, void *args);
/**
* Test specification for: test_mm_dma_alloc_map
*
* Description: Test to target nvgpu_dma_alloc_map_* functions, testing
* allocations and GMMU mappings in SYSMEM or VIDMEM.
*
* Test Type: Feature
*
* Targets: nvgpu_dma_alloc_map, nvgpu_dma_unmap_free, nvgpu_dma_alloc_map_sys,
* nvgpu_dma_alloc_map_vid, nvgpu_dma_alloc_map_flags,
* nvgpu_dma_alloc_map_flags_sys
*
* Input: test_mm_dma_init
*
* Steps:
* - Create a test nvgpu_mem instance (4 KB size with a static physical address)
* - Set memory interface as non IOMMU-able, iGPU and SYSMEM.
* - Create a DMA allocation on the nvgpu_mem instance and map it, then ensure
* it succeeds.
* - Ensure the allocated DMA has a SYSMEM aperture.
* - Free and unmap the allocation.
* - Perform the same DMA allocation/map but explicitly request it to be
* performed in SYSMEM. Ensure it succeeded and has a SYSMEM aperture.
* - Free and unmap the allocation.
*
* Output: Returns PASS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_mm_dma_alloc_map(struct unit_module *m, struct gk20a *g, void *args);
/**
* Test specification for: test_mm_dma_alloc_map_fault_injection
*
* Description: Test error handling branches in nvgpu_dma_alloc_map
*
* Test Type: Feature
*
* Targets: nvgpu_dma_alloc_map, nvgpu_dma_unmap_free
*
* Input: test_mm_dma_init
*
* Steps:
* - Create a test nvgpu_mem instance (4 KB size with a static physical address)
* - Set memory interface as non IOMMU-able, iGPU and SYSMEM.
* - Setup DMA fault injection to trigger at the next allocation.
* - Try to perform an allocation and map and ensure it failed as expected.
* - Reset fault injection to trigger at the 5th call in order to target the
* nvgpu_gmmu_map inside of the nvgpu_dma_alloc_flags_sys function.
* - Try to perform an allocation and map and ensure it failed as expected.
* - Disable fault injection.
*
* Output: Returns PASS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_mm_dma_alloc_map_fault_injection(struct unit_module *m,
struct gk20a *g, void *args);
/** }@ */
#endif /* UNIT_DMA_H */

View File

@@ -0,0 +1,26 @@
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
.SUFFIXES:
OBJS = page_table.o
MODULE = page_table
include ../../../Makefile.units

View File

@@ -0,0 +1,35 @@
################################### tell Emacs this is a -*- makefile-gmake -*-
#
# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# tmake for SW Mobile component makefile
#
###############################################################################
NVGPU_UNIT_NAME=page_table
include $(NV_COMPONENT_DIR)/../../../Makefile.units.common.interface.tmk
# Local Variables:
# indent-tabs-mode: t
# tab-width: 8
# End:
# vi: set tabstop=8 noexpandtab:

View File

@@ -0,0 +1,35 @@
################################### tell Emacs this is a -*- makefile-gmake -*-
#
# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# tmake for SW Mobile component makefile
#
###############################################################################
NVGPU_UNIT_NAME=page_table
include $(NV_COMPONENT_DIR)/../../../Makefile.units.common.tmk
# Local Variables:
# indent-tabs-mode: t
# tab-width: 8
# End:
# vi: set tabstop=8 noexpandtab:

View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,345 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef UNIT_PAGE_TABLE_H
#define UNIT_PAGE_TABLE_H
struct gk20a;
struct unit_module;
/** @addtogroup SWUTS-mm-gmmu-page_table
* @{
*
* Software Unit Test Specification for mm.gmmu.page_table
*/
/**
* Test specification for: test_nvgpu_gmmu_map_unmap_map_fail
*
* Description: Test special corner cases causing map to fail. Mostly to cover
* error handling and some branches.
*
* Test Type: Feature, Error injection
*
* Targets: nvgpu_gmmu_map
*
* Input: args as an int to choose a supported scenario.
*
* Steps:
* - Instantiate a nvgpu_mem instance, with a known size and PA.
* - Depending on one of the supported scenario (passed as argument):
* - Enable fault injection to trigger a NULL SGT.
* - Enable fault injection to trigger a failure in pd_allocate().
* - Enable fault injection to trigger a failure in pd_allocate_children().
* - Set the VM PMU as guest managed to make __nvgpu_vm_alloc_va() fail.
* - Call the nvgpu_gmmu_map() function and ensure it failed as expected.
* - Disable error injection.
*
* Output: Returns PASS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_nvgpu_gmmu_map_unmap_map_fail(struct unit_module *m, struct gk20a *g,
void *__args);
/**
* Test specification for: test_nvgpu_gmmu_map_unmap
*
* Description: This test does a simple map and unmap of a buffer. Several
* parameters can be changed and provided in the args. This test will also
* attempt to compare the data in PTEs to the parameters provided.
*
* Test Type: Feature
*
* Targets: nvgpu_gmmu_map_fixed, gops_mm.gops_mm_gmmu.map, nvgpu_gmmu_map,
* nvgpu_get_pte, gops_mm.gops_mm_gmmu.unmap, nvgpu_gmmu_unmap
*
* Input: args as a struct test_parameters to hold scenario and test parameters.
*
* Steps:
* - Instantiate a nvgpu_mem instance, with a known size and PA.
* - If scenario requires a fixed mapping, call nvgpu_gmmu_map_fixed() with a
* known fixed PA. Otherwise call nvgpu_gmmu_map().
* - Check that the mapping succeeded.
* - Check that the mapping is 4KB aligned.
* - Get the PTE from the mapping and ensure it actually exists.
* - Make sure the PTE is marked as valid.
* - Make sure the PTE matches the PA that was mapped.
* - Depending on the scenario's mapping flags, check RO and RW bits in PTE.
* - Depending on the scenario's privileged flag, check that the PTE is correct.
* - Depending on the scenario's cacheable flag, check that the PTE is correct.
* - Unmap the buffer.
* - Ensure that the PTE is now invalid.
*
* Output: Returns PASS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_nvgpu_gmmu_map_unmap(struct unit_module *m, struct gk20a *g,
void *args);
/**
* Test specification for: test_nvgpu_gmmu_map_unmap_adv
*
* Description: Similar to test_nvgpu_gmmu_map_unmap but supports more advanced
* parameters and creates a test SGT.
*
* Test Type: Feature
*
* Targets: gops_mm.gops_mm_gmmu.map, nvgpu_gmmu_map_locked,
* gops_mm.gops_mm_gmmu.unmap, nvgpu_gmmu_unmap, gk20a_from_vm
*
* Input: args as a struct test_parameters to hold scenario and test parameters.
*
* Steps:
* - Instantiate a nvgpu_mem instance, with a known size and PA.
* - Create an SGT with a custom SGL.
* - Perform a mapping using the SGT and the parameters in argument.
* - Ensure the mapping succeeded and is 4KB-aligned.
* - Unmap the buffer.
*
* Output: Returns PASS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_nvgpu_gmmu_map_unmap_adv(struct unit_module *m, struct gk20a *g,
void *args);
/**
* Test specification for: test_nvgpu_gmmu_map_unmap_batched
*
* Description: This tests uses the batch mode and maps 2 buffers. Then it
* checks that the flags in the batch structure were set correctly.
*
* Test Type: Feature
*
* Targets: nvgpu_gmmu_map_locked, nvgpu_gmmu_unmap, gops_mm.gops_mm_gmmu.unmap,
* nvgpu_gmmu_unmap_locked
*
* Input: args as a struct test_parameters to hold scenario and test parameters.
*
* Steps:
* - Instantiate 2 nvgpu_mem instances, with a known size and PA.
* - Create an SGT with a custom SGL.
* - Perform a mapping using the SGT, the parameters in argument and an instance
* of struct vm_gk20a_mapping_batch.
* - Repeat for the 2nd nvgpu_mem instance.
* - Ensure the need_tlb_invalidate of the batch is set as expected.
* - Reset the need_tlb_invalidate flag.
* - Unmap both buffers.
* - Ensure the need_tlb_invalidate of the batch is set as expected.
* - Ensure the gpu_l2_flushed of the batch is set as expected.
*
* Output: Returns PASS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_nvgpu_gmmu_map_unmap_batched(struct unit_module *m, struct gk20a *g,
void *args);
/**
* Test specification for: test_nvgpu_page_table_c1_full
*
* Description: Test case to cover NVGPU-RQCD-45 C1.
*
* Test Type: Feature
*
* Targets: nvgpu_vm_init, nvgpu_gmmu_map, gops_mm.gops_mm_gmmu.map,
* nvgpu_gmmu_map_locked, gops_mm.gops_mm_gmmu.unmap, nvgpu_gmmu_unmap,
* nvgpu_vm_put
*
* Input: None
*
* Steps:
* - Create a test VM.
* - Create a 64KB-aligned and a 4KB-aligned nvgpu_mem objects.
* - Create an nvgpu_mem object with a custom SGL composed of blocks of length
* 4KB or 64KB.
* - For each of the nvgpu_mem objects:
* - Map the buffer.
* - Ensure mapping succeeded.
* - Ensure alignment is correct.
* - Ensure that the page table attributes are correct.
* - Unmap the buffer.
* - Ensure the corresponding PTE is not valid anymore.
* - Free the VM.
*
* Output: Returns PASS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_nvgpu_page_table_c1_full(struct unit_module *m, struct gk20a *g,
void *args);
/**
* Test specification for: test_nvgpu_page_table_c2_full
*
* Description: Test case to cover NVGPU-RQCD-45 C2.
*
* Test Type: Feature
*
* Targets: nvgpu_vm_init, gops_mm.gops_mm_gmmu.map, nvgpu_gmmu_map_fixed,
* gops_mm.gops_mm_gmmu.unmap, nvgpu_gmmu_unmap, nvgpu_vm_put
*
* Input: None
*
* Steps:
* - Create a test VM.
* - Create a 64KB-aligned nvgpu_mem object.
* - Perform a fixed allocation, check properties and unmap.
* - Repeat the same operation to ensure it still succeeds (thereby ensuring
* the first unmap was done properly).
* - Change the nvgpu_mem object to be 4KB aligned.
* - Repeat the mapping/check/unmapping operation and check for success to
* ensure page markers were cleared properly during previous allocations.
* - Free the VM.
*
* Output: Returns PASS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_nvgpu_page_table_c2_full(struct unit_module *m, struct gk20a *g,
void *args);
/**
* Test specification for: test_nvgpu_gmmu_init_page_table_fail
*
* Description: Test special corner cases causing nvgpu_gmmu_init_page_table
* to fail, mostly to cover error handling and some branches.
*
* Test Type: Error injection
*
* Targets: nvgpu_gmmu_init_page_table
*
* Input: None
*
* Steps:
* - Enable KMEM fault injection.
* - Call nvgpu_gmmu_init_page_table.
* - Disable KMEM fault injection.
* - Ensure that nvgpu_gmmu_init_page_table failed as expected.
*
* Output: Returns PASS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_nvgpu_gmmu_init_page_table_fail(struct unit_module *m,
struct gk20a *g, void *args);
/**
* Test specification for: test_nvgpu_gmmu_set_pte
*
* Description: This test targets the nvgpu_set_pte() function by mapping a
* buffer, and then trying to alter the validity bit of the corresponding PTE.
*
* Test Type: Feature, Error injection
*
* Targets: nvgpu_get_pte, nvgpu_set_pte, nvgpu_pte_words
*
* Input: None
*
* Steps:
* - Map a test buffer (dynamic) and get the assigned GPU VA.
* - Ensure the mapping succeeded.
* - Check that nvgpu_pte_words returns the expected value (2).
* - Use nvgpu_get_pte to retrieve the PTE from the assigned GPU VA, ensure
* it is valid.
* - Call nvgpu_set_pte with an invalid address and ensure it failed.
* - Using nvgpu_set_pte, rewrite the PTE with the validity bit flipped and
* ensure it reports success.
* - Retrieve the PTE again, ensure it succeeds and then check that the PTE
* is invalid.
*
* Output: Returns PASS if the steps above were executed successfully. FAIL
* otherwise.
*/
static int test_nvgpu_gmmu_set_pte(struct unit_module *m,
struct gk20a *g, void *args);
/**
* Test specification for: test_nvgpu_gmmu_init
*
* Description: This test must be run once and be the first one as it
* initializes the MM subsystem.
*
* Test Type: Other (setup), Feature
*
* Targets: nvgpu_gmmu_init_page_table, nvgpu_vm_init
*
* Input: None
*
* Steps:
* - Set debug log masks if needed.
* - For iGPU, enable the following flags: NVGPU_MM_UNIFIED_MEMORY,
* NVGPU_USE_COHERENT_SYSMEM, NVGPU_SUPPORT_NVLINK
* - Setup all the needed HALs.
* - Create a test PMU VM to be used by other tests which will cause the
* nvgpu_gmmu_init_page_table function to be called.
*
* Output: Returns PASS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_nvgpu_gmmu_init(struct unit_module *m, struct gk20a *g, void *args);
/**
* Test specification for: test_nvgpu_gmmu_clean
*
* Description: This test should be the last one to run as it de-initializes
* components.
*
* Test Type: Other (cleanup)
*
* Targets: None
*
* Input: None
*
* Steps:
* - Set log mask to 0.
* - Call nvgpu_vm_put to remove the test VM.
*
* Output: Returns PASS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_nvgpu_gmmu_clean(struct unit_module *m, struct gk20a *g, void *args);
/**
* Test specification for: test_nvgpu_gmmu_perm_str
*
* Description: Tests all supported combinations of permissions on the
* nvgpu_gmmu_perm_str function.
*
* Test Type: Feature
*
* Targets: nvgpu_gmmu_perm_str
*
* Input: None
*
* Steps:
* - Call nvgpu_gmmu_perm_str with flag gk20a_mem_flag_none and ensure it
* returns "RW"
* - Call nvgpu_gmmu_perm_str with flag gk20a_mem_flag_write_only and ensure it
* returns "WO"
* - Call nvgpu_gmmu_perm_str with flag gk20a_mem_flag_read_only and ensure it
* returns "RO"
* - Call nvgpu_gmmu_perm_str with an invalid flag and ensure it
* returns "??"
*
* Output: Returns PASS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_nvgpu_gmmu_perm_str(struct unit_module *m, struct gk20a *g,
void *args);
/** }@ */
#endif /* UNIT_PAGE_TABLE_H */

View File

@@ -0,0 +1,26 @@
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
.SUFFIXES:
OBJS = pd_cache.o
MODULE = pd_cache
include ../../../Makefile.units

View File

@@ -0,0 +1,35 @@
################################### tell Emacs this is a -*- makefile-gmake -*-
#
# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# tmake for SW Mobile component makefile
#
###############################################################################
NVGPU_UNIT_NAME=pd_cache
include $(NV_COMPONENT_DIR)/../../../Makefile.units.common.interface.tmk
# Local Variables:
# indent-tabs-mode: t
# tab-width: 8
# End:
# vi: set tabstop=8 noexpandtab:

View File

@@ -0,0 +1,35 @@
################################### tell Emacs this is a -*- makefile-gmake -*-
#
# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# tmake for SW Mobile component makefile
#
###############################################################################
NVGPU_UNIT_NAME=pd_cache
include $(NV_COMPONENT_DIR)/../../../Makefile.units.common.tmk
# Local Variables:
# indent-tabs-mode: t
# tab-width: 8
# End:
# vi: set tabstop=8 noexpandtab:

View File

@@ -0,0 +1,999 @@
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "pd_cache.h"
#include <unit/io.h>
#include <unit/core.h>
#include <unit/unit.h>
#include <unit/unit-requirement-ids.h>
#include <nvgpu/gk20a.h>
#include <nvgpu/gmmu.h>
#include <nvgpu/pd_cache.h>
#include <nvgpu/enabled.h>
#include <nvgpu/posix/dma.h>
#include <nvgpu/posix/kmem.h>
#include <nvgpu/posix/posix-fault-injection.h>
#include "common/mm/gmmu/pd_cache_priv.h"
#include "hal/mm/gmmu/gmmu_gp10b.h"
/*
* Direct allocs are allocs large enough to just pass straight on to the
* DMA allocator. Basically that means the size of the PD is larger than a page.
*/
struct pd_cache_alloc_direct_gen {
u32 bytes;
u32 nr;
u32 nr_allocs_before_free;
u32 nr_frees_before_alloc;
};
/*
* Direct alloc testing: i.e larger than a page allocs.
*/
static struct pd_cache_alloc_direct_gen alloc_direct_1xPAGE = {
.bytes = NVGPU_CPU_PAGE_SIZE,
.nr = 1U,
};
static struct pd_cache_alloc_direct_gen alloc_direct_1024xPAGE = {
.bytes = NVGPU_CPU_PAGE_SIZE,
.nr = 1024U,
};
static struct pd_cache_alloc_direct_gen alloc_direct_1x16PAGE = {
.bytes = 16U * NVGPU_CPU_PAGE_SIZE,
.nr = 1U,
};
static struct pd_cache_alloc_direct_gen alloc_direct_1024x16PAGE = {
.bytes = 16U * NVGPU_CPU_PAGE_SIZE,
.nr = 1024U,
};
static struct pd_cache_alloc_direct_gen alloc_direct_1024xPAGE_x32x24 = {
.bytes = NVGPU_CPU_PAGE_SIZE,
.nr = 1024U,
.nr_allocs_before_free = 32U,
.nr_frees_before_alloc = 24U
};
static struct pd_cache_alloc_direct_gen alloc_direct_1024xPAGE_x16x4 = {
.bytes = NVGPU_CPU_PAGE_SIZE,
.nr = 1024U,
.nr_allocs_before_free = 16U,
.nr_frees_before_alloc = 4U
};
static struct pd_cache_alloc_direct_gen alloc_direct_1024xPAGE_x16x15 = {
.bytes = NVGPU_CPU_PAGE_SIZE,
.nr = 1024U,
.nr_allocs_before_free = 16U,
.nr_frees_before_alloc = 15U
};
static struct pd_cache_alloc_direct_gen alloc_direct_1024xPAGE_x16x1 = {
.bytes = NVGPU_CPU_PAGE_SIZE,
.nr = 1024U,
.nr_allocs_before_free = 16U,
.nr_frees_before_alloc = 1U
};
/*
* Sub-page sized allocs. This will test the logic of the pd_caching.
*/
static struct pd_cache_alloc_direct_gen alloc_1x256B = {
.bytes = 256U,
.nr = 1U,
};
static struct pd_cache_alloc_direct_gen alloc_1x512B = {
.bytes = 512U,
.nr = 1U,
};
static struct pd_cache_alloc_direct_gen alloc_1x1024B = {
.bytes = 1024U,
.nr = 1U,
};
static struct pd_cache_alloc_direct_gen alloc_1x2048B = {
.bytes = 2048U,
.nr = 1U,
};
static struct pd_cache_alloc_direct_gen alloc_1024x256B_x16x15 = {
.bytes = 256U,
.nr = 1024U,
.nr_allocs_before_free = 16U,
.nr_frees_before_alloc = 15U
};
static struct pd_cache_alloc_direct_gen alloc_1024x256B_x16x1 = {
.bytes = 256U,
.nr = 1024U,
.nr_allocs_before_free = 16U,
.nr_frees_before_alloc = 1U
};
static struct pd_cache_alloc_direct_gen alloc_1024x256B_x32x1 = {
.bytes = 256U,
.nr = 1024U,
.nr_allocs_before_free = 32U,
.nr_frees_before_alloc = 1U
};
static struct pd_cache_alloc_direct_gen alloc_1024x256B_x11x3 = {
.bytes = 256U,
.nr = 1024U,
.nr_allocs_before_free = 11U,
.nr_frees_before_alloc = 3U
};
/*
* Init a PD cache for us to use.
*/
static int init_pd_cache(struct unit_module *m,
struct gk20a *g, struct vm_gk20a *vm)
{
int err;
/*
* Make sure there's not already a pd_cache inited.
*/
if (g->mm.pd_cache != NULL) {
unit_return_fail(m, "pd_cache already inited\n");
}
/*
* This is just enough init of the VM to get this code to work. Really
* these APIs should just take the gk20a struct...
*/
vm->mm = &g->mm;
err = nvgpu_pd_cache_init(g);
if (err != 0) {
unit_return_fail(m, "nvgpu_pd_cache_init failed ??\n");
}
return UNIT_SUCCESS;
}
int test_pd_cache_alloc_gen(struct unit_module *m, struct gk20a *g,
void *args)
{
u32 i, j;
int err;
int test_status = UNIT_SUCCESS;
struct vm_gk20a vm;
struct nvgpu_gmmu_pd *pds;
struct pd_cache_alloc_direct_gen *test_spec = args;
pds = malloc(sizeof(*pds) * test_spec->nr);
if (pds == NULL) {
unit_return_fail(m, "OOM in unit test ??\n");
}
err = init_pd_cache(m, g, &vm);
if (err != UNIT_SUCCESS) {
return err;
}
if (test_spec->nr_allocs_before_free == 0U) {
test_spec->nr_allocs_before_free = test_spec->nr;
test_spec->nr_frees_before_alloc = 0U;
}
/*
* This takes the test spec and executes some allocs/frees.
*/
i = 0U;
while (i < test_spec->nr) {
bool do_break = false;
/*
* Do some allocs. Note the i++. Keep marching i along.
*/
for (j = 0U; j < test_spec->nr_allocs_before_free; j++) {
struct nvgpu_gmmu_pd *c = &pds[i++];
memset(c, 0, sizeof(*c));
err = nvgpu_pd_alloc(&vm, c, test_spec->bytes);
if (err != 0) {
unit_err(m, "%s():%d Failed to do an alloc\n",
__func__, __LINE__);
goto cleanup_err;
}
if (i >= test_spec->nr) {
/* Break the while loop too! */
do_break = true;
break;
}
}
/*
* And now the frees. The --i is done for the same reason as the
* i++ in the alloc loop.
*/
for (j = 0U; j < test_spec->nr_frees_before_alloc; j++) {
struct nvgpu_gmmu_pd *c = &pds[--i];
/*
* Can't easily verify this works directly. Will have to
* do that later...
*/
nvgpu_pd_free(&vm, c);
}
/*
* Without this we alloc/free and incr/decr i forever...
*/
if (do_break) {
break;
}
}
/*
* We may well have a lot more frees to do!
*/
while (i > 0) {
i--;
nvgpu_pd_free(&vm, &pds[i]);
}
/*
* After freeing everything all the pd_cache entries should be cleaned
* up. This is not super easy to verify because the pd_cache impl hides
* it's data structures within the C code itself.
*
* We can at least check that the mem field within the nvgpu_gmmu_pd
* struct is zeroed. That implies that the nvgpu_pd_free() routine did
* at least run through the cleanup code on this nvgpu_gmmu_pd.
*/
for (i = 0U; i < test_spec->nr; i++) {
if (pds[i].mem != NULL) {
unit_err(m, "%s():%d PD was not freed: %u\n",
__func__, __LINE__, i);
test_status = UNIT_FAIL;
}
}
free(pds);
nvgpu_pd_cache_fini(g);
return test_status;
cleanup_err:
for (i = 0U; i < test_spec->nr; i++) {
if (pds[i].mem != NULL) {
nvgpu_pd_free(&vm, &pds[i]);
}
}
free(pds);
nvgpu_pd_cache_fini(g);
return UNIT_FAIL;
}
int test_pd_free_empty_pd(struct unit_module *m, struct gk20a *g,
void *args)
{
int err;
struct vm_gk20a vm;
struct nvgpu_gmmu_pd pd;
err = init_pd_cache(m, g, &vm);
if (err != UNIT_SUCCESS) {
return err;
}
/* First test cached frees. */
err = nvgpu_pd_alloc(&vm, &pd, 2048U);
if (err != 0) {
unit_return_fail(m, "PD alloc failed");
}
/*
* nvgpu_pd_free() has no return value so we can't check this directly.
* So we will make sure we don't crash.
*/
nvgpu_pd_free(&vm, &pd);
if (!EXPECT_BUG(nvgpu_pd_free(&vm, &pd))) {
unit_return_fail(m, "nvgpu_pd_free did not BUG() as expected");
}
/* When BUG() occurs the pd_cache lock is not released, so do it here */
nvgpu_mutex_release(&g->mm.pd_cache->lock);
pd.mem = NULL;
if (!EXPECT_BUG(nvgpu_pd_free(&vm, &pd))) {
unit_return_fail(m, "nvgpu_pd_free did not BUG() as expected");
}
nvgpu_mutex_release(&g->mm.pd_cache->lock);
/* And now direct frees. */
memset(&pd, 0U, sizeof(pd));
err = nvgpu_pd_alloc(&vm, &pd, NVGPU_PD_CACHE_SIZE);
if (err != 0) {
unit_return_fail(m, "PD alloc failed");
}
nvgpu_pd_free(&vm, &pd);
/*
* nvgpu_pd_free calls below will not cause BUG() because pd->cached is
* true.
*/
nvgpu_pd_free(&vm, &pd);
pd.mem = NULL;
nvgpu_pd_free(&vm, &pd);
nvgpu_pd_cache_fini(g);
return UNIT_SUCCESS;
}
int test_pd_alloc_invalid_input(struct unit_module *m, struct gk20a *g,
void *args)
{
int err;
struct vm_gk20a vm;
struct nvgpu_gmmu_pd pd;
u32 i, garbage[] = { 0U, 128U, 255U, 4095U, 3000U, 128U, 2049U };
g->mm.g = g;
vm.mm = &g->mm;
if (g->mm.pd_cache != NULL) {
unit_return_fail(m, "pd_cache already inited\n");
}
/* Obviously shouldn't work pd_cache is not init'ed. */
if (!EXPECT_BUG(nvgpu_pd_alloc(&vm, &pd, 2048U))) {
unit_return_fail(m, "pd_alloc worked on NULL pd_cache\n");
}
err = init_pd_cache(m, g, &vm);
if (err != UNIT_SUCCESS) {
return err;
}
/* Test garbage input. */
for (i = 0U; i < (sizeof(garbage) / sizeof(garbage[0])); i++) {
err = nvgpu_pd_alloc(&vm, &pd, garbage[i]);
if (err == 0) {
unit_return_fail(m, "PD alloc success: %u (failed)\n",
garbage[i]);
}
}
nvgpu_pd_cache_fini(g);
return UNIT_SUCCESS;
}
int test_pd_alloc_direct_fi(struct unit_module *m, struct gk20a *g, void *args)
{
int err;
struct vm_gk20a vm;
struct nvgpu_gmmu_pd pd;
struct nvgpu_posix_fault_inj *kmem_fi =
nvgpu_kmem_get_fault_injection();
struct nvgpu_posix_fault_inj *dma_fi =
nvgpu_dma_alloc_get_fault_injection();
err = init_pd_cache(m, g, &vm);
if (err != UNIT_SUCCESS) {
return err;
}
/*
* The alloc_direct() call is easy: there's two places we can fail. One
* is allocating the nvgpu_mem struct, the next is the DMA alloc into
* the nvgpu_mem struct. Inject faults for these and verify we A) don't
* crash and that the allocs are recorded as failures.
*/
nvgpu_posix_enable_fault_injection(kmem_fi, true, 0);
err = nvgpu_pd_alloc(&vm, &pd, NVGPU_CPU_PAGE_SIZE);
if (err == 0) {
unit_return_fail(m, "pd_alloc() success with kmem OOM\n");
}
nvgpu_posix_enable_fault_injection(kmem_fi, false, 0);
nvgpu_posix_enable_fault_injection(dma_fi, true, 0);
err = nvgpu_pd_alloc(&vm, &pd, NVGPU_CPU_PAGE_SIZE);
if (err == 0) {
unit_return_fail(m, "pd_alloc() success with DMA OOM\n");
}
nvgpu_posix_enable_fault_injection(dma_fi, false, 0);
nvgpu_pd_cache_fini(g);
return UNIT_SUCCESS;
}
int test_pd_alloc_fi(struct unit_module *m, struct gk20a *g, void *args)
{
int err;
struct vm_gk20a vm;
struct nvgpu_gmmu_pd pd;
struct nvgpu_posix_fault_inj *kmem_fi =
nvgpu_kmem_get_fault_injection();
struct nvgpu_posix_fault_inj *dma_fi =
nvgpu_dma_alloc_get_fault_injection();
err = init_pd_cache(m, g, &vm);
if (err != UNIT_SUCCESS) {
return err;
}
/*
* nvgpu_pd_alloc_new() is effectively the same. We know we will hit the
* faults in the new alloc since we have no prior allocs. Therefor we
* won't hit a partial alloc and miss the DMA/kmem allocs.
*/
nvgpu_posix_enable_fault_injection(kmem_fi, true, 0);
err = nvgpu_pd_alloc(&vm, &pd, 2048U);
if (err == 0) {
unit_return_fail(m, "pd_alloc() success with kmem OOM\n");
}
nvgpu_posix_enable_fault_injection(kmem_fi, false, 0);
nvgpu_posix_enable_fault_injection(dma_fi, true, 0);
err = nvgpu_pd_alloc(&vm, &pd, 2048U);
if (err == 0) {
unit_return_fail(m, "pd_alloc() success with DMA OOM\n");
}
nvgpu_posix_enable_fault_injection(dma_fi, false, 0);
nvgpu_pd_cache_fini(g);
return UNIT_SUCCESS;
}
int test_pd_cache_init(struct unit_module *m, struct gk20a *g, void *args)
{
int err, i;
struct nvgpu_pd_cache *cache;
struct nvgpu_posix_fault_inj *kmem_fi =
nvgpu_kmem_get_fault_injection();
/*
* Test 1 - do some SW fault injection to make sure we hit the -ENOMEM
* potential when initializing the pd cache.
*/
nvgpu_posix_enable_fault_injection(kmem_fi, true, 0);
err = nvgpu_pd_cache_init(g);
if (err != -ENOMEM) {
unit_return_fail(m, "OOM condition didn't lead to -ENOMEM\n");
}
if (g->mm.pd_cache != NULL) {
unit_return_fail(m, "PD cache init'ed with no mem\n");
}
nvgpu_posix_enable_fault_injection(kmem_fi, false, 0);
/*
* Test 2: Make sure that the init function initializes the necessary
* pd_cache data structure within the GPU @g. Just checks some internal
* data structures for their presence to make sure this code path has
* run.
*/
err = nvgpu_pd_cache_init(g);
if (err != 0) {
unit_return_fail(m, "PD cache failed to init!\n");
}
if (g->mm.pd_cache == NULL) {
unit_return_fail(m, "PD cache data structure not inited!\n");
}
/*
* Test 3: make sure that any re-init call doesn't blow away a
* previously inited pd_cache.
*/
cache = g->mm.pd_cache;
for (i = 0; i < 5; i++) {
nvgpu_pd_cache_init(g);
}
if (g->mm.pd_cache != cache) {
unit_return_fail(m, "PD cache got re-inited!\n");
}
/*
* Leave the PD cache inited at this point...
*/
return UNIT_SUCCESS;
}
int test_pd_cache_fini(struct unit_module *m, struct gk20a *g, void *args)
{
if (g->mm.pd_cache == NULL) {
unit_return_fail(m, "Missing an init'ed pd_cache\n");
}
/*
* Test 1: make sure the function pointer is NULL as that implies we
* made it to the nvgpu_kfree().
*/
nvgpu_pd_cache_fini(g);
if (g->mm.pd_cache != NULL) {
unit_return_fail(m, "Failed to cleanup pd_cache\n");
}
/*
* Test 2: this one is hard to test for functionality - just make sure
* we don't crash.
*/
nvgpu_pd_cache_fini(g);
return UNIT_SUCCESS;
}
int test_pd_cache_valid_alloc(struct unit_module *m, struct gk20a *g,
void *args)
{
u32 bytes;
int err;
struct vm_gk20a vm;
struct nvgpu_gmmu_pd pd;
err = init_pd_cache(m, g, &vm);
if (err != UNIT_SUCCESS) {
return err;
}
/*
* Allocate a PD of each valid PD size and ensure they are properly
* populated with nvgpu_mem data. This tests read/write and alignment.
* This covers the VCs 1 and 2.
*/
bytes = 256; /* 256 bytes is the min PD size. */
while (bytes <= NVGPU_CPU_PAGE_SIZE) {
err = nvgpu_pd_alloc(&vm, &pd, bytes);
if (err) {
goto fail;
}
/*
* Do a write to the zeroth word and then verify this made it to
* the nvgpu_mem. Using the zeroth word makes it easy to read
* back.
*/
nvgpu_pd_write(g, &pd, 0, 0x12345678);
if (0x12345678 !=
nvgpu_mem_rd32(g, pd.mem, pd.mem_offs / sizeof(u32))) {
nvgpu_pd_free(&vm, &pd);
goto fail;
}
/*
* Check alignment is at least as much as the size.
*/
if ((pd.mem_offs & (bytes - 1)) != 0) {
nvgpu_pd_free(&vm, &pd);
goto fail;
}
nvgpu_pd_free(&vm, &pd);
bytes <<= 1;
}
nvgpu_pd_cache_fini(g);
return UNIT_SUCCESS;
fail:
nvgpu_pd_cache_fini(g);
return err;
}
/**
* Requirement NVGPU-RQCD-68.C3
*
* Valid/Invalid: 16 256B, 8 512B, etc, PDs can/cannot fit into a single
* page sized DMA allocation.
*/
static int do_test_pd_cache_packing_size(struct unit_module *m, struct gk20a *g,
struct vm_gk20a *vm, u32 pd_size)
{
int err;
u32 i;
u32 n = NVGPU_PD_CACHE_SIZE / pd_size;
struct nvgpu_gmmu_pd pds[n], pd;
struct nvgpu_posix_fault_inj *dma_fi =
nvgpu_dma_alloc_get_fault_injection();
unit_info(m, "Alloc %u PDs in page; PD size=%u bytes\n", n, pd_size);
/*
* Only allow one DMA alloc to happen. If before we alloc N PDs we
* see an OOM return then we failed to pack sufficient PDs into the
* single DMA page.
*/
nvgpu_posix_enable_fault_injection(dma_fi, true, 1);
for (i = 0U; i < n; i++) {
err = nvgpu_pd_alloc(vm, &pds[i], pd_size);
if (err) {
err = UNIT_FAIL;
goto cleanup;
}
}
/*
* Let's just ensure that we trigger the fault on the next alloc.
*/
err = nvgpu_pd_alloc(vm, &pd, pd_size);
if (err) {
err = UNIT_SUCCESS;
} else {
nvgpu_pd_free(vm, &pd);
err = UNIT_FAIL;
}
cleanup:
/*
* If there was a failure don't try and free un-allocated PDs.
* Effectively a noop if this test passes.
*/
n = i;
for (i = 0; i < n; i++) {
nvgpu_pd_free(vm, &pds[i]);
}
nvgpu_posix_enable_fault_injection(dma_fi, false, 0);
return err;
}
/**
* Requirement NVGPU-RQCD-118.C1
*
* Valid/Invalid: Previously allocated PD entries are/are not re-usable.
*/
static int do_test_pd_reusability(struct unit_module *m, struct gk20a *g,
struct vm_gk20a *vm, u32 pd_size)
{
int err = UNIT_SUCCESS;
u32 i;
u32 n = NVGPU_PD_CACHE_SIZE / pd_size;
struct nvgpu_gmmu_pd pds[n];
struct nvgpu_posix_fault_inj *dma_fi =
nvgpu_dma_alloc_get_fault_injection();
nvgpu_posix_enable_fault_injection(dma_fi, true, 1);
for (i = 0U; i < n; i++) {
err = nvgpu_pd_alloc(vm, &pds[i], pd_size);
if (err) {
err = UNIT_FAIL;
goto cleanup;
}
}
/* Free all but one PD so that we ensure the page stays cached. */
for (i = 1U; i < n; i++) {
nvgpu_pd_free(vm, &pds[i]);
}
/* Re-alloc. Will get a -ENOMEM if another page is alloced. */
for (i = 1U; i < n; i++) {
err = nvgpu_pd_alloc(vm, &pds[i], pd_size);
if (err) {
err = UNIT_FAIL;
goto cleanup;
}
}
cleanup:
n = i;
/* Really cleanup. */
for (i = 0U; i < n; i++) {
nvgpu_pd_free(vm, &pds[i]);
}
nvgpu_posix_enable_fault_injection(dma_fi, false, 0);
return err;
}
int test_per_pd_size(struct unit_module *m, struct gk20a *g, void *args)
{
int err;
u32 pd_size;
struct vm_gk20a vm;
int (*fn)(struct unit_module *m, struct gk20a *g,
struct vm_gk20a *vm, u32 pd_size) = args;
err = init_pd_cache(m, g, &vm);
if (err != UNIT_SUCCESS) {
return err;
}
pd_size = 256U; /* 256 bytes is the min PD size. */
while (pd_size < NVGPU_CPU_PAGE_SIZE) {
err = fn(m, g, &vm, pd_size);
if (err) {
err = UNIT_FAIL;
goto cleanup;
}
pd_size *= 2U;
}
err = UNIT_SUCCESS;
cleanup:
nvgpu_pd_cache_fini(g);
return err;
}
/*
* Read back and compare the pattern to the word in the page directory. Return
* true if they match, false otherwise.
*/
static bool readback_pd_write(struct gk20a *g, struct nvgpu_gmmu_pd *pd,
u32 index, u32 pattern)
{
u32 offset = index + (pd->mem_offs / sizeof(u32));
return nvgpu_mem_rd32(g, pd->mem, offset) == pattern;
}
int test_pd_write(struct unit_module *m, struct gk20a *g, void *args)
{
int err = UNIT_SUCCESS;
struct vm_gk20a vm;
struct nvgpu_gmmu_pd pd_2w, pd_4w;
const struct gk20a_mmu_level *mm_levels =
gp10b_mm_get_mmu_levels(g, SZ_64K);
u32 i, indexes[] = { 0U, 16U, 255U };
err = init_pd_cache(m, g, &vm);
if (err != UNIT_SUCCESS) {
return err;
}
/*
* Typical size of the last level dual page PD is 4K bytes - 256 entries
* at 16 bytes an entry.
*/
err = nvgpu_pd_alloc(&vm, &pd_4w, SZ_4K);
if (err != UNIT_SUCCESS) {
goto cleanup;
}
/*
* Most upper level PDs are 512 entries with 8 bytes per entry: again 4K
* bytes.
*/
err = nvgpu_pd_alloc(&vm, &pd_2w, SZ_4K);
if (err != UNIT_SUCCESS) {
goto cleanup;
}
/*
* Write to PDs at the given index and read back the value from the
* underlying nvgpu_mem.
*/
for (i = 0U; i < sizeof(indexes) / sizeof(*indexes); i++) {
u32 offs_2w = nvgpu_pd_offset_from_index(&mm_levels[2],
indexes[i]);
u32 offs_4w = nvgpu_pd_offset_from_index(&mm_levels[3],
indexes[i]);
nvgpu_pd_write(g, &pd_2w, offs_2w, 0xA5A5A5A5);
nvgpu_pd_write(g, &pd_4w, offs_4w, 0xA5A5A5A5);
/* Read back. */
if (!readback_pd_write(g, &pd_2w, offs_2w, 0xA5A5A5A5)) {
err = UNIT_FAIL;
goto cleanup;
}
if (!readback_pd_write(g, &pd_4w, offs_4w, 0xA5A5A5A5)) {
err = UNIT_FAIL;
goto cleanup;
}
}
cleanup:
nvgpu_pd_free(&vm, &pd_2w);
nvgpu_pd_free(&vm, &pd_4w);
nvgpu_pd_cache_fini(g);
return err;
}
int test_gpu_address(struct unit_module *m, struct gk20a *g, void *args)
{
int err;
struct vm_gk20a vm;
struct nvgpu_gmmu_pd pd;
u64 addr;
err = init_pd_cache(m, g, &vm);
if (err != UNIT_SUCCESS) {
return err;
}
err = nvgpu_pd_alloc(&vm, &pd, SZ_4K);
if (err != UNIT_SUCCESS) {
nvgpu_pd_cache_fini(g);
return UNIT_FAIL;
}
addr = nvgpu_pd_gpu_addr(g, &pd);
if (addr == 0ULL) {
unit_return_fail(m, "GPU address of PD is NULL\n");
}
nvgpu_pd_free(&vm, &pd);
nvgpu_pd_cache_fini(g);
return UNIT_SUCCESS;
}
int test_offset_computation(struct unit_module *m, struct gk20a *g,
void *args)
{
const struct gk20a_mmu_level *mm_levels =
gp10b_mm_get_mmu_levels(g, SZ_64K);
u32 indexes[] = { 0U, 4U, 16U, 255U };
u32 offsets_2w[] = { 0U, 8U, 32U, 510U };
u32 offsets_4w[] = { 0U, 16U, 64U, 1020U };
bool fail = false;
u32 i;
for (i = 0U; i < sizeof(indexes) / sizeof(*indexes); i++) {
u32 offs_2w = nvgpu_pd_offset_from_index(&mm_levels[2],
indexes[i]);
u32 offs_4w = nvgpu_pd_offset_from_index(&mm_levels[3],
indexes[i]);
if (offs_2w != offsets_2w[i]) {
unit_err(m, "2w offset comp failed: [%u] %u -> %u\n",
i, indexes[i], offs_2w);
fail = true;
}
if (offs_4w != offsets_4w[i]) {
unit_err(m, "4w offset comp failed: [%u] %u -> %u\n",
i, indexes[i], offs_4w);
fail = true;
}
}
return fail ? UNIT_FAIL : UNIT_SUCCESS;
}
int test_init_deinit(struct unit_module *m, struct gk20a *g, void *args)
{
int err, status = UNIT_SUCCESS;
struct vm_gk20a vm;
struct nvgpu_gmmu_pd pd;
struct nvgpu_posix_fault_inj *kmem_fi =
nvgpu_kmem_get_fault_injection();
struct nvgpu_posix_fault_inj *dma_fi =
nvgpu_dma_alloc_get_fault_injection();
err = init_pd_cache(m, g, &vm);
if (err != UNIT_SUCCESS) {
return err;
}
err = nvgpu_pd_alloc(&vm, &pd, SZ_4K);
if (err != UNIT_SUCCESS) {
nvgpu_pd_cache_fini(g);
return UNIT_FAIL;
}
nvgpu_posix_enable_fault_injection(kmem_fi, true, 0);
nvgpu_posix_enable_fault_injection(dma_fi, true, 0);
/*
* Block all allocs and check that we don't hit a -ENOMEM. This proves
* that we haven't done any extra allocations on subsequent init calls.
*/
err = nvgpu_pd_cache_init(g);
if (err == -ENOMEM) {
unit_err(m, "Attempted allocation during multi-init\n");
status = UNIT_FAIL;
}
nvgpu_posix_enable_fault_injection(kmem_fi, false, 0);
nvgpu_posix_enable_fault_injection(dma_fi, false, 0);
nvgpu_pd_free(&vm, &pd);
nvgpu_pd_cache_fini(g);
return status;
}
/*
* Init the global env - just make sure we don't try and allocate from VIDMEM
* when doing dma allocs.
*/
static int test_pd_cache_env_init(struct unit_module *m,
struct gk20a *g, void *args)
{
nvgpu_set_enabled(g, NVGPU_MM_UNIFIED_MEMORY, true);
g->log_mask = 0;
if (verbose_lvl(m) >= 1) {
g->log_mask = gpu_dbg_pd_cache;
}
return UNIT_SUCCESS;
}
struct unit_module_test pd_cache_tests[] = {
UNIT_TEST(env_init, test_pd_cache_env_init, NULL, 0),
UNIT_TEST(init, test_pd_cache_init, NULL, 0),
UNIT_TEST(fini, test_pd_cache_fini, NULL, 0),
/*
* Requirement verification tests.
*/
UNIT_TEST_REQ("NVGPU-RQCD-68.C1,2", PD_CACHE_REQ1_UID, "V4",
valid_alloc, test_pd_cache_valid_alloc, NULL, 0),
UNIT_TEST_REQ("NVGPU-RQCD-68.C3", PD_CACHE_REQ1_UID, "V4",
pd_packing, test_per_pd_size, do_test_pd_cache_packing_size, 0),
UNIT_TEST_REQ("NVGPU-RQCD-118.C1", PD_CACHE_REQ2_UID, "V3",
pd_reusability, test_per_pd_size, do_test_pd_reusability, 0),
UNIT_TEST_REQ("NVGPU-RQCD-122.C1", PD_CACHE_REQ3_UID, "V3",
write, test_pd_write, NULL, 0),
UNIT_TEST_REQ("NVGPU-RQCD-123.C1", PD_CACHE_REQ4_UID, "V2",
gpu_address, test_gpu_address, NULL, 0),
UNIT_TEST_REQ("NVGPU-RQCD-126.C1,2", PD_CACHE_REQ5_UID, "V1",
offset_comp, test_offset_computation, NULL, 0),
UNIT_TEST_REQ("NVGPU-RQCD-124.C1", PD_CACHE_REQ6_UID, "V3",
init_deinit, test_init_deinit, NULL, 0),
UNIT_TEST_REQ("NVGPU-RQCD-155.C1", PD_CACHE_REQ7_UID, "V2",
multi_init, test_init_deinit, NULL, 0),
UNIT_TEST_REQ("NVGPU-RQCD-125.C1", PD_CACHE_REQ8_UID, "V2",
deinit, test_init_deinit, NULL, 0),
/*
* Direct allocs.
*/
UNIT_TEST(alloc_direct_1xPAGE, test_pd_cache_alloc_gen, &alloc_direct_1xPAGE, 0),
UNIT_TEST(alloc_direct_1024xPAGE, test_pd_cache_alloc_gen, &alloc_direct_1024xPAGE, 0),
UNIT_TEST(alloc_direct_1x16PAGE, test_pd_cache_alloc_gen, &alloc_direct_1x16PAGE, 0),
UNIT_TEST(alloc_direct_1024x16PAGE, test_pd_cache_alloc_gen, &alloc_direct_1024x16PAGE, 0),
UNIT_TEST(alloc_direct_1024xPAGE_x32x24, test_pd_cache_alloc_gen, &alloc_direct_1024xPAGE_x32x24, 0),
UNIT_TEST(alloc_direct_1024xPAGE_x16x4, test_pd_cache_alloc_gen, &alloc_direct_1024xPAGE_x16x4, 0),
UNIT_TEST(alloc_direct_1024xPAGE_x16x15, test_pd_cache_alloc_gen, &alloc_direct_1024xPAGE_x16x15, 0),
UNIT_TEST(alloc_direct_1024xPAGE_x16x1, test_pd_cache_alloc_gen, &alloc_direct_1024xPAGE_x16x1, 0),
/*
* Cached allocs.
*/
UNIT_TEST(alloc_1x256B, test_pd_cache_alloc_gen, &alloc_1x256B, 0),
UNIT_TEST(alloc_1x512B, test_pd_cache_alloc_gen, &alloc_1x512B, 0),
UNIT_TEST(alloc_1x1024B, test_pd_cache_alloc_gen, &alloc_1x1024B, 0),
UNIT_TEST(alloc_1x2048B, test_pd_cache_alloc_gen, &alloc_1x2048B, 0),
UNIT_TEST(alloc_1024x256B_x16x15, test_pd_cache_alloc_gen, &alloc_1024x256B_x16x15, 0),
UNIT_TEST(alloc_1024x256B_x16x1, test_pd_cache_alloc_gen, &alloc_1024x256B_x16x1, 0),
UNIT_TEST(alloc_1024x256B_x32x1, test_pd_cache_alloc_gen, &alloc_1024x256B_x32x1, 0),
UNIT_TEST(alloc_1024x256B_x11x3, test_pd_cache_alloc_gen, &alloc_1024x256B_x11x3, 0),
/*
* Error path testing.
*/
UNIT_TEST(free_empty, test_pd_free_empty_pd, NULL, 0),
UNIT_TEST(invalid_pd_alloc, test_pd_alloc_invalid_input, NULL, 0),
UNIT_TEST(alloc_direct_oom, test_pd_alloc_direct_fi, NULL, 0),
UNIT_TEST(alloc_oom, test_pd_alloc_fi, NULL, 0),
};
UNIT_MODULE(pd_cache, pd_cache_tests, UNIT_PRIO_NVGPU_TEST);

View File

@@ -0,0 +1,402 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef UNIT_PD_CACHE_H
#define UNIT_PD_CACHE_H
struct gk20a;
struct unit_module;
/** @addtogroup SWUTS-mm-gmmu-pd_cache
* @{
*
* Software Unit Test Specification for mm.gmmu.pd_cache
*/
/**
* Test specification for: test_pd_cache_init
*
* Description: Test to cover the initialization routines of pd_cache.
*
* Test Type: Feature, Error Injection
*
* Targets: gops_mm.pd_cache_init, nvgpu_pd_cache_init
*
* Input: None
*
* Steps:
* - Check that init with a memory failure returns -ENOMEM and that the pd_cache
* is not initialized.
* - Perform a normal initialization and ensure that all the expected data
* structures were initialized.
* - Perform the initialization again and make sure that any re-init call
* doesn't blow away a previously inited pd_cache.
*
* Output: Returns PASS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_pd_cache_init(struct unit_module *m, struct gk20a *g, void *__args);
/**
* Test specification for: test_pd_cache_fini
*
* Description: Test to cover the de-initialization routines of pd_cache.
*
* Test Type: Feature
*
* Targets: nvgpu_pd_cache_fini
*
* Input: test_pd_cache_init
*
* Steps:
* - Check that de-initializing the pd_cache results in a NULL pointer.
* - Call the de-initialization again and ensure it doesn't cause a crash.
*
* Output: Returns PASS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_pd_cache_fini(struct unit_module *m, struct gk20a *g, void *__args);
/**
* Test specification for: test_pd_cache_valid_alloc
*
* Description: Checks that pd_cache allocates suitable DMA'able buffer of
* memory, that it is sufficiently aligned for use by the GMMU and it can
* allocate valid PDs.
*
* Test Type: Feature
*
* Targets: nvgpu_pd_alloc, nvgpu_pd_write, nvgpu_pd_free, nvgpu_pd_cache_fini
*
* Input: None
*
* Steps:
* - Initialize a pd_cache.
* - Allocate a PD of each valid PD size and ensure they are properly
* populated with nvgpu_mem data. This tests read/write and alignment.
* - Do a write to the zeroth word and then verify this made it to
* the nvgpu_mem. Using the zeroth word makes it easy to read back.
* - Check alignment is at least as much as the size.
* - Free the PD.
*
* Output: Returns PASS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_pd_cache_valid_alloc(struct unit_module *m, struct gk20a *g,
void *__args);
/**
* Test specification for: test_per_pd_size
*
* Description: Checks that pd_cache allocations are successful in a number of
* supported sizes.
*
* Test Type: Feature
*
* Targets: gops_mm.pd_cache_init, nvgpu_pd_cache_init, nvgpu_pd_alloc,
* nvgpu_pd_free, nvgpu_pd_cache_fini
*
* Input: None
*
* Steps:
* - Initialize a pd_cache.
* - Set PD size to 256 bytes (i.e. minimum PD size)
* - While the PD size is smaller than the page size:
* - Call one of 2 scenario:
* - Ensure that 16 256B, 8 512B, etc, PDs can fit into a single page sized
* DMA allocation.
* - Ensure that previously allocated PD entries are re-usable.
* - Double the PD size.
* - De-allocate the pd_cache.
*
* Output: Returns PASS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_per_pd_size(struct unit_module *m, struct gk20a *g, void *__args);
/**
* Test specification for: test_pd_write
*
* Description: Ensure that the pd_cache writes a word of memory in a
* passed PD with 2 word or 4 word PDE/PTE.
*
* Test Type: Feature
*
* Targets: gp10b_mm_get_mmu_levels, gops_mm.pd_cache_init, nvgpu_pd_cache_init,
* nvgpu_pd_alloc, nvgpu_pd_offset_from_index, nvgpu_pd_write, nvgpu_pd_free,
* nvgpu_pd_cache_fini
*
* Input: None
*
* Steps:
* - Initialize a pd_cache.
* - Allocate 2 test PD with page size 4KB.
* - Iterate over the 3 supported index sizes: 0, 16, 255:
* - Get the PD offset from the current index at the 3rd level and 4th level
* (respectively for 2 word and 4 word PDE/PTE.)
* - Write a known 32-bit pattern as a PD.
* - Read back the pattern and ensure it matches the written value.
* - De-allocate the 2 test PD.
* - De-allocate the pd_cache.
*
* Output: Returns PASS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_pd_write(struct unit_module *m, struct gk20a *g, void *__args);
/**
* Test specification for: test_gpu_address
*
* Description: Ensure the pd_cache does provide a valid GPU physical address
* for a given PD.
*
* Test Type: Feature
*
* Targets: gops_mm.pd_cache_init, nvgpu_pd_cache_init, nvgpu_pd_alloc,
* nvgpu_pd_gpu_addr, nvgpu_pd_free, nvgpu_pd_cache_fini
*
* Input: None
*
* Steps:
* - Initialize a pd_cache.
* - Allocate a test PD with page size 4KB.
* - Get the GPU address of the allocated PD and ensure it is not NULL.
* - De-allocate the test PD.
* - De-allocate the pd_cache.
*
* Output: Returns PASS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_gpu_address(struct unit_module *m, struct gk20a *g, void *__args);
/**
* Test specification for: test_offset_computation
*
* Description: Ensure that the pd_cache unit returns a valid word offset for
* 2 and 4 word PDE/PTE.
*
* Test Type: Feature
*
* Targets: gp10b_mm_get_mmu_levels, nvgpu_pd_offset_from_index
*
* Input: None
*
* Steps:
* - Get all supported MMU levels.
* - Iterate over 4 index sizes: 0, 4, 16, 255.
* - Get the offset for a 2 word PDE/PTE and ensure it matches the expected
* value.
* - Get the offset for a 4 word PDE/PTE and ensure it matches the expected
* value.
*
* Output: Returns PASS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_offset_computation(struct unit_module *m, struct gk20a *g,
void *__args);
/**
* Test specification for: test_init_deinit
*
* Description: Ensure that the initialization routines of pd_cache handle all
* corner cases appropriately.
*
* Test Type: Feature, Error injection
*
* Targets: gops_mm.pd_cache_init, nvgpu_pd_cache_init, nvgpu_pd_alloc,
* nvgpu_pd_cache_fini, nvgpu_pd_free
*
* Input: None
*
* Steps:
* - Initialize a pd_cache.
* - Allocate a test PD with page size 4KB.
* - Enable memory and DMA fault injection.
* - Call the pd_cache initialization again.
* - Since the pd_cache was already initialized, ensure the previous call
* still reported success, confirming that no further allocations were made.
* - Disable fault injection.
* - De-allocate the test PD.
* - De-allocate the pd_cache.
*
* Output: Returns PASS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_init_deinit(struct unit_module *m, struct gk20a *g, void *__args);
/**
* Test specification for: test_pd_cache_alloc_gen
*
* Description: Simple test that will perform allocations. It allocates
* nr allocs of the passed size either all at once or in an interleaved
* pattern.
* If nr_allocs_before_free is set then this value will determine how many
* allocs to do before trying frees. If unset it will be simply be nr.
* If nr_free_before_alloc is set this will determine the number of frees to
* do before swapping back to allocs. This way you can control the interleaving
* pattern to some degree. If not set it defaults to nr_allocs_before_free.
* Anything left over after the last free loop will be freed in one big loop.
*
* Test Type: Feature
*
* Targets: gops_mm.pd_cache_init, nvgpu_pd_cache_init, nvgpu_pd_alloc,
* nvgpu_pd_cache_fini, nvgpu_pd_free
*
* Input: None
*
* Steps:
* - Initialize a pd_cache.
* - If there is no requested "allocs before free" value, set it to the
* requested total number of allocations. Also set the number of "frees before
* alloc" to 0.
* - Loop over the requested number of allocations with index 'i':
* - Loop from 0 to the requested number of "allocs before free":
* - Perform a PD allocation of the requested size.
* - Loop from 0 to the requested number of "frees before alloc":
* - Perform a PD free of allocation at index 'i'.
* - Loop backwards to free all the allocations.
* - Loop over all the PD allocation handles and ensure they have been zero'ed
* out as expected.
* - De-allocate the pd_cache.
*
* Output: Returns PASS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_pd_cache_alloc_gen(struct unit_module *m, struct gk20a *g,
void *__args);
/**
* Test specification for: test_pd_free_empty_pd
*
* Description: Test free on empty PD cache and extra corner cases.
*
* Test Type: Feature, Error injection
*
* Targets: gops_mm.pd_cache_init, nvgpu_pd_cache_init, nvgpu_pd_alloc,
* nvgpu_pd_cache_fini, nvgpu_pd_free
*
* Input: None
*
* Steps:
* - Initialize a pd_cache.
* - Allocate a test PD with a 2KB page size (cached).
* - Free the test PD.
* - Attempt to free the test PD again and ensure it causes a call to BUG().
* - Attempt another free with pd.mem set to NULL and ensure it causes a call to
* BUG().
* - Allocate a test PD with a 4KB page size (direct).
* - Free the test PD.
* - Call the free again which should not cause a BUG().
* - Call the free again with pd.mem set to NULL which should not cause a BUG().
* - De-allocate the pd_cache.
*
* Output: Returns PASS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_pd_free_empty_pd(struct unit_module *m, struct gk20a *g,
void *__args);
/**
* Test specification for: test_pd_alloc_invalid_input
*
* Description: Test invalid nvgpu_pd_alloc() calls. Invalid bytes,
* invalid pd_cache, etc.
*
* Test Type: Error injection
*
* Targets: gops_mm.pd_cache_init, nvgpu_pd_cache_init, nvgpu_pd_alloc,
* nvgpu_pd_cache_fini
*
* Input: None
*
* Steps:
* - Ensure that no pd_cache is initialized in the system.
* - Attempt to perform an allocation and ensure it causes a call to BUG().
* - Initialize a pd_cache.
* - Perform several allocation attempts with invalid sizes and ensure all
* calls report a failure.
* - De-allocate the pd_cache.
*
* Output: Returns PASS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_pd_alloc_invalid_input(struct unit_module *m, struct gk20a *g,
void *__args);
/**
* Test specification for: test_pd_alloc_direct_fi
*
* Description: Test invalid nvgpu_pd_alloc() when out of memory conditions
* occur for direct allocations.
*
* Test Type: Error injection
*
* Targets: nvgpu_pd_cache_init, nvgpu_pd_alloc, gops_mm.pd_cache_init,
* nvgpu_pd_cache_fini
*
* Input: None
*
* Steps:
* - Initialize a pd_cache.
* - Enable kernel memory error injection.
* - Try to perform a PD allocation and ensure it failed.
* - Disable kernel memory error injection.
* - Enable DMA memory error injection.
* - Try to perform a PD allocation and ensure it failed.
* - Disable DMA memory error injection.
* - De-allocate the pd_cache.
*
* Output: Returns PASS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_pd_alloc_direct_fi(struct unit_module *m, struct gk20a *g, void *args);
/**
* Test specification for: test_pd_alloc_fi
*
* Description: Test invalid nvgpu_pd_alloc() when out of memory conditions
* occur for nvgpu_pd_alloc_new allocations.
*
* Test Type: Error injection
*
* Targets: gops_mm.pd_cache_init, nvgpu_pd_cache_init, nvgpu_pd_alloc,
* nvgpu_pd_cache_fini
*
* Input: None
*
* Steps:
* - Initialize a pd_cache.
* - Enable kernel memory error injection.
* - Try to perform a PD allocation and ensure it failed.
* - Disable kernel memory error injection.
* - Enable DMA memory error injection.
* - Try to perform a PD allocation and ensure it failed.
* - Disable DMA memory error injection.
* - De-allocate the pd_cache.
*
* Output: Returns PASS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_pd_alloc_fi(struct unit_module *m, struct gk20a *g, void *args);
/** }@ */
#endif /* UNIT_PD_CACHE_H */

View File

@@ -0,0 +1,26 @@
# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
.SUFFIXES:
OBJS = flush-gk20a-fusa.o
MODULE = flush-gk20a-fusa
include ../../../../Makefile.units

View File

@@ -0,0 +1,35 @@
################################### tell Emacs this is a -*- makefile-gmake -*-
#
# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# tmake for SW Mobile component makefile
#
###############################################################################
NVGPU_UNIT_NAME=flush-gk20a-fusa
include $(NV_COMPONENT_DIR)/../../../../Makefile.units.common.interface.tmk
# Local Variables:
# indent-tabs-mode: t
# tab-width: 8
# End:
# vi: set tabstop=8 noexpandtab:

View File

@@ -0,0 +1,35 @@
################################### tell Emacs this is a -*- makefile-gmake -*-
#
# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# tmake for SW Mobile component makefile
#
###############################################################################
NVGPU_UNIT_NAME=flush-gk20a-fusa
include $(NV_COMPONENT_DIR)/../../../../Makefile.units.common.tmk
# Local Variables:
# indent-tabs-mode: t
# tab-width: 8
# End:
# vi: set tabstop=8 noexpandtab:

View File

@@ -0,0 +1,458 @@
/*
* Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <unit/io.h>
#include <unit/unit.h>
#include <nvgpu/io.h>
#include <nvgpu/posix/io.h>
#include <nvgpu/gk20a.h>
#include <nvgpu/types.h>
#include <nvgpu/vm.h>
#include <nvgpu/nvgpu_init.h>
#include "os/posix/os_posix.h"
#include "hal/fb/fb_gv11b.h"
#include "hal/fb/intr/fb_intr_gv11b.h"
#include "hal/fifo/ramin_gk20a.h"
#include "hal/fifo/ramin_gv11b.h"
#include "hal/mm/cache/flush_gk20a.h"
#include "hal/mm/gmmu/gmmu_gp10b.h"
#include "hal/mm/mm_gp10b.h"
#include "hal/mm/mm_gv11b.h"
#include "hal/mm/mmu_fault/mmu_fault_gv11b.h"
#include "hal/mm/cache/flush_gk20a.h"
#include <nvgpu/hw/gv11b/hw_flush_gv11b.h>
#include <nvgpu/posix/posix-fault-injection.h>
#include <nvgpu/posix/dma.h>
#include "flush-gk20a-fusa.h"
/*
* Write callback (for all nvgpu_writel calls).
*/
#define WR_FLUSH_0 0
#define WR_FLUSH_1 1
#define WR_FLUSH_2 2
#define WR_FLUSH_3 3
#define WR_FLUSH_ACTUAL 0
#define WR_FLUSH_TEST_FB_FLUSH_ADDR 1
#define WR_FLUSH_TEST_L2_FLUSH_DIRTY_ADDR 2
#define WR_FLUSH_TEST_L2_SYSTEM_INVALIDATE 3
static u32 write_specific_value;
static u32 write_specific_addr;
static void writel_access_reg_fn(struct gk20a *g,
struct nvgpu_reg_access *access)
{
if (((write_specific_addr == WR_FLUSH_TEST_FB_FLUSH_ADDR) &&
(access->addr == flush_fb_flush_r())) ||
((write_specific_addr == WR_FLUSH_TEST_L2_FLUSH_DIRTY_ADDR) &&
(access->addr == flush_l2_flush_dirty_r())) ||
((write_specific_addr == WR_FLUSH_TEST_L2_SYSTEM_INVALIDATE) &&
(access->addr == flush_l2_system_invalidate_r()))) {
nvgpu_posix_io_writel_reg_space(g, access->addr,
write_specific_value);
} else {
nvgpu_posix_io_writel_reg_space(g, access->addr, access->value);
}
}
/*
* Read callback, similar to the write callback above.
*/
static void readl_access_reg_fn(struct gk20a *g,
struct nvgpu_reg_access *access)
{
access->value = nvgpu_posix_io_readl_reg_space(g, access->addr);
}
/*
* Define all the callbacks to be used during the test. Typically all
* write operations use the same callback, likewise for all read operations.
*/
static struct nvgpu_posix_io_callbacks mmu_faults_callbacks = {
/* Write APIs all can use the same accessor. */
.writel = writel_access_reg_fn,
.writel_check = writel_access_reg_fn,
.bar1_writel = writel_access_reg_fn,
.usermode_writel = writel_access_reg_fn,
/* Likewise for the read APIs. */
.__readl = readl_access_reg_fn,
.readl = readl_access_reg_fn,
.bar1_readl = readl_access_reg_fn,
};
static void init_platform(struct unit_module *m, struct gk20a *g, bool is_iGPU)
{
if (is_iGPU) {
nvgpu_set_enabled(g, NVGPU_MM_UNIFIED_MEMORY, true);
} else {
nvgpu_set_enabled(g, NVGPU_MM_UNIFIED_MEMORY, false);
}
}
static int init_mm(struct unit_module *m, struct gk20a *g)
{
u64 low_hole, aperture_size;
struct nvgpu_os_posix *p = nvgpu_os_posix_from_gk20a(g);
struct mm_gk20a *mm = &g->mm;
p->mm_is_iommuable = true;
/* Minimum HALs for page_table */
g->ops.mm.gmmu.get_default_big_page_size =
nvgpu_gmmu_default_big_page_size;
g->ops.mm.init_inst_block = gv11b_mm_init_inst_block;
g->ops.mm.gmmu.get_mmu_levels = gp10b_mm_get_mmu_levels;
g->ops.ramin.init_pdb = gv11b_ramin_init_pdb;
g->ops.ramin.alloc_size = gk20a_ramin_alloc_size;
g->ops.mm.setup_hw = nvgpu_mm_setup_hw;
g->ops.fb.init_hw = gv11b_fb_init_hw;
g->ops.fb.intr.enable = gv11b_fb_intr_enable;
g->ops.mm.cache.fb_flush = gk20a_mm_fb_flush;
g->ops.mm.mmu_fault.info_mem_destroy =
gv11b_mm_mmu_fault_info_mem_destroy;
nvgpu_posix_register_io(g, &mmu_faults_callbacks);
/* Register space: FB_MMU */
if (nvgpu_posix_io_add_reg_space(g, flush_fb_flush_r(), 0x800) != 0) {
unit_return_fail(m, "nvgpu_posix_io_add_reg_space failed\n");
}
/*
* Initialize VM space for system memory to be used throughout this
* unit module.
* Values below are similar to those used in nvgpu_init_system_vm()
*/
low_hole = SZ_4K * 16UL;
aperture_size = GK20A_PMU_VA_SIZE;
mm->pmu.aperture_size = GK20A_PMU_VA_SIZE;
mm->pmu.vm = nvgpu_vm_init(g,
g->ops.mm.gmmu.get_default_big_page_size(),
low_hole,
0ULL,
nvgpu_safe_sub_u64(aperture_size, low_hole),
0ULL,
true,
false,
false,
"system");
if (mm->pmu.vm == NULL) {
unit_return_fail(m, "'system' nvgpu_vm_init failed\n");
}
/*
* This initialization will make sure that correct aperture mask
* is returned */
g->mm.mmu_wr_mem.aperture = APERTURE_SYSMEM;
g->mm.mmu_rd_mem.aperture = APERTURE_SYSMEM;
return UNIT_SUCCESS;
}
int test_env_init_flush_gk20a_fusa(struct unit_module *m, struct gk20a *g,
void *args)
{
g->log_mask = 0;
init_platform(m, g, true);
if (init_mm(m, g) != 0) {
unit_return_fail(m, "nvgpu_init_mm_support failed\n");
}
write_specific_value = 0;
write_specific_addr = 0;
return UNIT_SUCCESS;
}
#define F_GK20A_FB_FLUSH_DEFAULT_INPUT 0
#define F_GK20A_FB_FLUSH_GET_RETRIES 1
#define F_GK20A_FB_FLUSH_PENDING_TRUE 2
#define F_GK20A_FB_FLUSH_OUTSTANDING_TRUE 3
#define F_GK20A_FB_FLUSH_OUTSTANDING_PENDING_TRUE 4
#define F_GK20A_FB_FLUSH_DUMP_VPR_WPR_INFO 5
#define F_GK20A_FB_FLUSH_NVGPU_POWERED_OFF 6
const char *m_gk20a_mm_fb_flush_str[] = {
"default_input",
"get_flush_retries",
"fb_flush_pending_true",
"fb_flush_outstanding_true",
"fb_flush_outstanding_pending_true",
"nvgpu_powered_off",
};
static u32 stub_mm_get_flush_retries(struct gk20a *g, enum nvgpu_flush_op op)
{
return 100U;
}
static void stub_fb_dump_vpr_info(struct gk20a *g)
{
}
static void stub_fb_dump_wpr_info(struct gk20a *g)
{
}
int test_gk20a_mm_fb_flush(struct unit_module *m, struct gk20a *g, void *args)
{
int err;
int ret = UNIT_FAIL;
u64 branch = (u64)args;
nvgpu_set_power_state(g, NVGPU_STATE_POWERED_ON);
write_specific_addr = WR_FLUSH_TEST_FB_FLUSH_ADDR;
switch (branch) {
case F_GK20A_FB_FLUSH_PENDING_TRUE:
write_specific_value = WR_FLUSH_1;
break;
case F_GK20A_FB_FLUSH_OUTSTANDING_TRUE:
write_specific_value = WR_FLUSH_2;
break;
case F_GK20A_FB_FLUSH_OUTSTANDING_PENDING_TRUE:
write_specific_value = WR_FLUSH_3;
break;
case F_GK20A_FB_FLUSH_DUMP_VPR_WPR_INFO:
write_specific_value = WR_FLUSH_1;
break;
default:
write_specific_value = WR_FLUSH_0;
break;
}
g->ops.mm.get_flush_retries = branch == F_GK20A_FB_FLUSH_GET_RETRIES ?
stub_mm_get_flush_retries : NULL;
g->ops.fb.dump_vpr_info = branch == F_GK20A_FB_FLUSH_DUMP_VPR_WPR_INFO ?
stub_fb_dump_vpr_info : NULL;
g->ops.fb.dump_wpr_info = branch == F_GK20A_FB_FLUSH_DUMP_VPR_WPR_INFO ?
stub_fb_dump_wpr_info : NULL;
if (branch == F_GK20A_FB_FLUSH_NVGPU_POWERED_OFF) {
nvgpu_set_power_state(g, NVGPU_STATE_POWERED_OFF);
}
err = gk20a_mm_fb_flush(g);
if ((branch == F_GK20A_FB_FLUSH_PENDING_TRUE) ||
(branch == F_GK20A_FB_FLUSH_OUTSTANDING_TRUE) ||
(branch == F_GK20A_FB_FLUSH_OUTSTANDING_PENDING_TRUE) ||
(branch == F_GK20A_FB_FLUSH_DUMP_VPR_WPR_INFO)) {
unit_assert(err != 0, goto done);
} else {
unit_assert(err == 0, goto done);
}
ret = UNIT_SUCCESS;
done:
if (ret != UNIT_SUCCESS) {
unit_err(m, "%s: failed at %s\n", __func__,
m_gk20a_mm_fb_flush_str[branch]);
}
write_specific_addr = WR_FLUSH_ACTUAL;
return ret;
}
#define F_GK20A_L2_FLUSH_DEFAULT_INPUT 0
#define F_GK20A_L2_FLUSH_GET_RETRIES 1
#define F_GK20A_L2_FLUSH_PENDING_TRUE 2
#define F_GK20A_L2_FLUSH_OUTSTANDING_TRUE 3
#define F_GK20A_L2_FLUSH_INVALIDATE 4
#define F_GK20A_L2_FLUSH_NVGPU_POWERED_OFF 5
const char *m_gk20a_mm_l2_flush_str[] = {
"default_input",
"get_flush_retries",
"l2_flush_pending_true",
"l2_flush_outstanding_true",
"l2_flush_invalidate",
"nvgpu_powered_off",
};
int test_gk20a_mm_l2_flush(struct unit_module *m, struct gk20a *g, void *args)
{
int err;
int ret = UNIT_FAIL;
u64 branch = (u64)args;
bool invalidate;
nvgpu_set_power_state(g, NVGPU_STATE_POWERED_ON);
write_specific_addr = WR_FLUSH_TEST_L2_FLUSH_DIRTY_ADDR;
switch (branch) {
case F_GK20A_L2_FLUSH_PENDING_TRUE:
write_specific_value = WR_FLUSH_1;
break;
case F_GK20A_L2_FLUSH_OUTSTANDING_TRUE:
write_specific_value = WR_FLUSH_2;
break;
default:
write_specific_value = WR_FLUSH_0;
break;
}
g->ops.mm.get_flush_retries = (branch == F_GK20A_L2_FLUSH_GET_RETRIES) ?
stub_mm_get_flush_retries : NULL;
invalidate = (branch == F_GK20A_L2_FLUSH_INVALIDATE) ? true : false;
if (branch == F_GK20A_L2_FLUSH_NVGPU_POWERED_OFF) {
nvgpu_set_power_state(g, NVGPU_STATE_POWERED_OFF);
}
err = gk20a_mm_l2_flush(g, invalidate);
if ((branch == F_GK20A_L2_FLUSH_PENDING_TRUE) ||
(branch == F_GK20A_L2_FLUSH_OUTSTANDING_TRUE)) {
unit_assert(err != 0, goto done);
} else {
unit_assert(err == 0, goto done);
}
ret = UNIT_SUCCESS;
done:
if (ret != UNIT_SUCCESS) {
unit_err(m, "%s: failed at %s\n", __func__,
m_gk20a_mm_l2_flush_str[branch]);
}
write_specific_addr = WR_FLUSH_ACTUAL;
return ret;
}
#define F_GK20A_L2_INVALIDATE_DEFAULT_INPUT 0
#define F_GK20A_L2_INVALIDATE_PENDING_TRUE 1
#define F_GK20A_L2_INVALIDATE_OUTSTANDING_TRUE 2
#define F_GK20A_L2_INVALIDATE_GET_RETRIES_NULL 3
#define F_GK20A_L2_INVALIDATE_NVGPU_POWERED_OFF 4
const char *m_gk20a_mm_l2_invalidate_str[] = {
"invalidate_default_input",
"invalidate_l2_pending_true",
"invalidate_l2_outstanding_true",
"invalidate_get_flush_retries_null",
};
static u32 global_count = 100;
static u32 count;
static u32 stub_mm_get_flush_retries_count(struct gk20a *g,
enum nvgpu_flush_op op)
{
count = global_count++;
return 100U;
}
int test_gk20a_mm_l2_invalidate(struct unit_module *m, struct gk20a *g,
void *args)
{
int ret = UNIT_FAIL;
u64 branch = (u64)args;
nvgpu_set_power_state(g, NVGPU_STATE_POWERED_ON);
write_specific_addr = WR_FLUSH_TEST_L2_SYSTEM_INVALIDATE;
switch (branch) {
case F_GK20A_L2_INVALIDATE_PENDING_TRUE:
write_specific_value = WR_FLUSH_1;
break;
case F_GK20A_L2_INVALIDATE_OUTSTANDING_TRUE:
write_specific_value = WR_FLUSH_2;
break;
default:
write_specific_value = WR_FLUSH_0;
break;
}
g->ops.mm.get_flush_retries =
(branch == F_GK20A_L2_INVALIDATE_GET_RETRIES_NULL) ?
NULL : stub_mm_get_flush_retries_count;
if (branch == F_GK20A_L2_INVALIDATE_NVGPU_POWERED_OFF) {
nvgpu_set_power_state(g, NVGPU_STATE_POWERED_OFF);
}
gk20a_mm_l2_invalidate(g);
if (branch != F_GK20A_L2_INVALIDATE_GET_RETRIES_NULL) {
unit_assert(count == (global_count - 1U), goto done);
}
ret = UNIT_SUCCESS;
done:
if (ret != UNIT_SUCCESS) {
unit_err(m, "%s: failed at %s\n", __func__,
m_gk20a_mm_l2_invalidate_str[branch]);
}
write_specific_addr = WR_FLUSH_ACTUAL;
return ret;
}
int test_env_clean_flush_gk20a_fusa(struct unit_module *m, struct gk20a *g,
void *args)
{
g->log_mask = 0;
nvgpu_vm_put(g->mm.pmu.vm);
return UNIT_SUCCESS;
}
struct unit_module_test mm_flush_gk20a_fusa_tests[] = {
UNIT_TEST(env_init, test_env_init_flush_gk20a_fusa, NULL, 0),
UNIT_TEST(mm_fb_flush_s0, test_gk20a_mm_fb_flush, (void *)F_GK20A_FB_FLUSH_DEFAULT_INPUT, 0),
UNIT_TEST(mm_fb_flush_s1, test_gk20a_mm_fb_flush, (void *)F_GK20A_FB_FLUSH_GET_RETRIES, 0),
UNIT_TEST(mm_fb_flush_s2, test_gk20a_mm_fb_flush, (void *)F_GK20A_FB_FLUSH_PENDING_TRUE, 0),
UNIT_TEST(mm_fb_flush_s3, test_gk20a_mm_fb_flush, (void *)F_GK20A_FB_FLUSH_OUTSTANDING_TRUE, 0),
UNIT_TEST(mm_fb_flush_s4, test_gk20a_mm_fb_flush, (void *)F_GK20A_FB_FLUSH_OUTSTANDING_PENDING_TRUE, 0),
UNIT_TEST(mm_fb_flush_s5, test_gk20a_mm_fb_flush, (void *)F_GK20A_FB_FLUSH_DUMP_VPR_WPR_INFO, 0),
UNIT_TEST(mm_fb_flush_s6, test_gk20a_mm_fb_flush, (void *)F_GK20A_FB_FLUSH_NVGPU_POWERED_OFF, 0),
UNIT_TEST(mm_l2_flush_s0, test_gk20a_mm_l2_flush, (void *)F_GK20A_L2_FLUSH_DEFAULT_INPUT, 0),
UNIT_TEST(mm_l2_flush_s1, test_gk20a_mm_l2_flush, (void *)F_GK20A_L2_FLUSH_GET_RETRIES, 0),
UNIT_TEST(mm_l2_flush_s2, test_gk20a_mm_l2_flush, (void *)F_GK20A_L2_FLUSH_PENDING_TRUE, 0),
UNIT_TEST(mm_l2_flush_s3, test_gk20a_mm_l2_flush, (void *)F_GK20A_L2_FLUSH_OUTSTANDING_TRUE, 0),
UNIT_TEST(mm_l2_flush_s4, test_gk20a_mm_l2_flush, (void *)F_GK20A_L2_FLUSH_INVALIDATE, 0),
UNIT_TEST(mm_l2_flush_s5, test_gk20a_mm_l2_flush, (void *)F_GK20A_L2_FLUSH_NVGPU_POWERED_OFF, 0),
UNIT_TEST(mm_l2_invalidate_s0, test_gk20a_mm_l2_invalidate, (void *)F_GK20A_L2_INVALIDATE_DEFAULT_INPUT, 0),
UNIT_TEST(mm_l2_invalidate_s1, test_gk20a_mm_l2_invalidate, (void *)F_GK20A_L2_INVALIDATE_PENDING_TRUE, 0),
UNIT_TEST(mm_l2_invalidate_s2, test_gk20a_mm_l2_invalidate, (void *)F_GK20A_L2_INVALIDATE_OUTSTANDING_TRUE, 0),
UNIT_TEST(mm_l2_invalidate_s3, test_gk20a_mm_l2_invalidate, (void *)F_GK20A_L2_INVALIDATE_GET_RETRIES_NULL, 0),
UNIT_TEST(mm_l2_invalidate_s4, test_gk20a_mm_l2_invalidate, (void *)F_GK20A_L2_INVALIDATE_NVGPU_POWERED_OFF, 0),
UNIT_TEST(env_clean, test_env_clean_flush_gk20a_fusa, NULL, 0),
};
UNIT_MODULE(flush_gk20a_fusa, mm_flush_gk20a_fusa_tests, UNIT_PRIO_NVGPU_TEST);

View File

@@ -0,0 +1,155 @@
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef UNIT_MM_HAL_CACHE_FLUSH_GK20A_FUSA_H
#define UNIT_MM_HAL_CACHE_FLUSH_GK20A_FUSA_H
struct gk20a;
struct unit_module;
/** @addtogroup SWUTS-mm-hal-cache-flush-gk20a-fusa
* @{
*
* Software Unit Test Specification for mm.hal.cache.flush_gk20a_fusa
*/
/**
* Test specification for: test_env_init_flush_gk20a_fusa
*
* Description: Initialize environment for MM tests
*
* Test Type: Feature
*
* Targets: None
*
* Input: None
*
* Steps:
* - Init HALs and initialize VMs similar to nvgpu_init_system_vm().
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_env_init_flush_gk20a_fusa(struct unit_module *m, struct gk20a *g,
void *args);
/**
* Test specification for: test_gk20a_mm_fb_flush
*
* Description: Test FB flush
*
* Test Type: Feature
*
* Targets: gops_mm_cache.fb_flush, gk20a_mm_fb_flush,
* gops_mm.get_flush_retries
*
* Input: test_env_init, args (value can be F_GK20A_FB_FLUSH_DEFAULT_INPUT,
* F_GK20A_FB_FLUSH_GET_RETRIES, F_GK20A_FB_FLUSH_PENDING_TRUE,
* F_GK20A_FB_FLUSH_OUTSTANDING_TRUE,
* F_GK20A_FB_FLUSH_OUTSTANDING_PENDING_TRUE,
* F_GK20A_FB_FLUSH_DUMP_VPR_WPR_INFO or
* F_GK20A_FB_FLUSH_NVGPU_POWERED_OFF)
*
* Steps:
* - Invoke FB flush command
* - Test FB flush with various scenarios as below:
* - flush outstanding, flush pending, GPU powered off
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_gk20a_mm_fb_flush(struct unit_module *m, struct gk20a *g, void *args);
/**
* Test specification for: test_gk20a_mm_l2_flush
*
* Description: Test L2 flush
*
* Test Type: Feature
*
* Targets: gops_mm_cache.l2_flush, gk20a_mm_l2_flush,
* gk20a_mm_l2_invalidate_locked
*
* Input: test_env_init, args (value can be F_GK20A_L2_FLUSH_DEFAULT_INPUT,
* F_GK20A_L2_FLUSH_GET_RETRIES, F_GK20A_L2_FLUSH_PENDING_TRUE,
* F_GK20A_L2_FLUSH_OUTSTANDING_TRUE, F_GK20A_L2_FLUSH_INVALIDATE or
* F_GK20A_L2_FLUSH_NVGPU_POWERED_OFF)
*
* Steps:
* - Invoke L2 flush command
* - Test L2 flush with various scenarios as below:
* - flush dirty outstanding, flush dirty pending, GPU powered off,
* flush with invalidate
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_gk20a_mm_l2_flush(struct unit_module *m, struct gk20a *g, void *args);
/**
* Test specification for: test_gk20a_mm_l2_invalidate
*
* Description: Test L2 invalidate
*
* Test Type: Feature
*
* Targets: gops_mm_cache.l2_invalidate, gk20a_mm_l2_invalidate,
* gk20a_mm_l2_invalidate_locked
*
* Input: test_env_init, args (value can be F_GK20A_L2_INVALIDATE_DEFAULT_INPUT,
* F_GK20A_L2_INVALIDATE_PENDING_TRUE,
* F_GK20A_L2_INVALIDATE_OUTSTANDING_TRUE,
* F_GK20A_L2_INVALIDATE_GET_RETRIES_NULL or
* F_GK20A_L2_INVALIDATE_NVGPU_POWERED_OFF)
*
* Steps:
* - Invoke L2 invalidate
* - Test when invalidate is outstanding and/or pending
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_gk20a_mm_l2_invalidate(struct unit_module *m, struct gk20a *g,
void *args);
/**
* Test specification for: test_env_clean_flush_gk20a_fusa
*
* Description: Cleanup test environment
*
* Test Type: Feature
*
* Targets: None
*
* Input: test_env_init
*
* Steps:
* - Destroy memory and VMs initialized for the test.
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_env_clean_flush_gk20a_fusa(struct unit_module *m, struct gk20a *g,
void *args);
/** @} */
#endif /* UNIT_MM_HAL_CACHE_FLUSH_GK20A_FUSA_H */

View File

@@ -0,0 +1,26 @@
# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
.SUFFIXES:
OBJS = flush-gv11b-fusa.o
MODULE = flush-gv11b-fusa
include ../../../../Makefile.units

View File

@@ -0,0 +1,35 @@
################################### tell Emacs this is a -*- makefile-gmake -*-
#
# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# tmake for SW Mobile component makefile
#
###############################################################################
NVGPU_UNIT_NAME=flush-gv11b-fusa
include $(NV_COMPONENT_DIR)/../../../../Makefile.units.common.interface.tmk
# Local Variables:
# indent-tabs-mode: t
# tab-width: 8
# End:
# vi: set tabstop=8 noexpandtab:

View File

@@ -0,0 +1,35 @@
################################### tell Emacs this is a -*- makefile-gmake -*-
#
# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# tmake for SW Mobile component makefile
#
###############################################################################
NVGPU_UNIT_NAME=flush-gv11b-fusa
include $(NV_COMPONENT_DIR)/../../../../Makefile.units.common.tmk
# Local Variables:
# indent-tabs-mode: t
# tab-width: 8
# End:
# vi: set tabstop=8 noexpandtab:

View File

@@ -0,0 +1,310 @@
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <unit/io.h>
#include <unit/unit.h>
#include <nvgpu/io.h>
#include <nvgpu/posix/io.h>
#include <nvgpu/gk20a.h>
#include <nvgpu/types.h>
#include <nvgpu/vm.h>
#include <nvgpu/nvgpu_mem.h>
#include <nvgpu/nvgpu_init.h>
#include "os/posix/os_posix.h"
#include "hal/fb/fb_gv11b.h"
#include "hal/fb/intr/fb_intr_gv11b.h"
#include "hal/fifo/ramin_gk20a.h"
#include "hal/fifo/ramin_gv11b.h"
#include "hal/mm/cache/flush_gk20a.h"
#include "hal/mm/gmmu/gmmu_gp10b.h"
#include "hal/mm/mm_gp10b.h"
#include "hal/mm/mm_gv11b.h"
#include "hal/mm/mmu_fault/mmu_fault_gv11b.h"
#include "hal/mm/cache/flush_gv11b.h"
#include <nvgpu/hw/gv11b/hw_flush_gv11b.h>
#include <nvgpu/posix/posix-fault-injection.h>
#include <nvgpu/posix/dma.h>
#include "flush-gv11b-fusa.h"
/*
* Write callback (for all nvgpu_writel calls).
*/
#define WR_FLUSH_0 0
#define WR_FLUSH_1 1
static u32 write_specific_value;
static void writel_access_reg_fn(struct gk20a *g,
struct nvgpu_reg_access *access)
{
if (access->addr == flush_l2_flush_dirty_r()) {
nvgpu_posix_io_writel_reg_space(g, access->addr,
write_specific_value);
} else {
nvgpu_posix_io_writel_reg_space(g, access->addr, access->value);
}
}
/*
* Read callback, similar to the write callback above.
*/
static void readl_access_reg_fn(struct gk20a *g,
struct nvgpu_reg_access *access)
{
access->value = nvgpu_posix_io_readl_reg_space(g, access->addr);
}
/*
* Define all the callbacks to be used during the test. Typically all
* write operations use the same callback, likewise for all read operations.
*/
static struct nvgpu_posix_io_callbacks mmu_faults_callbacks = {
/* Write APIs all can use the same accessor. */
.writel = writel_access_reg_fn,
.writel_check = writel_access_reg_fn,
.bar1_writel = writel_access_reg_fn,
.usermode_writel = writel_access_reg_fn,
/* Likewise for the read APIs. */
.__readl = readl_access_reg_fn,
.readl = readl_access_reg_fn,
.bar1_readl = readl_access_reg_fn,
};
static void init_platform(struct unit_module *m, struct gk20a *g, bool is_iGPU)
{
if (is_iGPU) {
nvgpu_set_enabled(g, NVGPU_MM_UNIFIED_MEMORY, true);
} else {
nvgpu_set_enabled(g, NVGPU_MM_UNIFIED_MEMORY, false);
}
}
static int init_mm(struct unit_module *m, struct gk20a *g)
{
u64 low_hole, aperture_size;
struct nvgpu_os_posix *p = nvgpu_os_posix_from_gk20a(g);
struct mm_gk20a *mm = &g->mm;
p->mm_is_iommuable = true;
/* Minimum HALs for page_table */
g->ops.mm.gmmu.get_default_big_page_size =
nvgpu_gmmu_default_big_page_size;
g->ops.mm.init_inst_block = gv11b_mm_init_inst_block;
g->ops.mm.gmmu.get_mmu_levels = gp10b_mm_get_mmu_levels;
g->ops.ramin.init_pdb = gv11b_ramin_init_pdb;
g->ops.ramin.alloc_size = gk20a_ramin_alloc_size;
g->ops.mm.setup_hw = nvgpu_mm_setup_hw;
g->ops.fb.init_hw = gv11b_fb_init_hw;
g->ops.fb.intr.enable = gv11b_fb_intr_enable;
g->ops.mm.cache.fb_flush = gk20a_mm_fb_flush;
g->ops.mm.mmu_fault.info_mem_destroy =
gv11b_mm_mmu_fault_info_mem_destroy;
nvgpu_posix_register_io(g, &mmu_faults_callbacks);
/* Register space: FB_MMU */
if (nvgpu_posix_io_add_reg_space(g, flush_fb_flush_r(), 0x800) != 0) {
unit_return_fail(m, "nvgpu_posix_io_add_reg_space failed\n");
}
/*
* Initialize VM space for system memory to be used throughout this
* unit module.
* Values below are similar to those used in nvgpu_init_system_vm()
*/
low_hole = SZ_4K * 16UL;
aperture_size = GK20A_PMU_VA_SIZE;
mm->pmu.aperture_size = GK20A_PMU_VA_SIZE;
mm->pmu.vm = nvgpu_vm_init(g,
g->ops.mm.gmmu.get_default_big_page_size(),
low_hole,
0ULL,
nvgpu_safe_sub_u64(aperture_size, low_hole),
0ULL,
true,
false,
false,
"system");
if (mm->pmu.vm == NULL) {
unit_return_fail(m, "'system' nvgpu_vm_init failed\n");
}
/* BAR1 memory space */
mm->bar1.aperture_size = U32(16) << 20U;
mm->bar1.vm = nvgpu_vm_init(g,
g->ops.mm.gmmu.get_default_big_page_size(),
SZ_4K, 0ULL, nvgpu_safe_sub_u64(mm->bar1.aperture_size, SZ_4K),
0ULL, false, false, false, "bar1");
if (mm->bar1.vm == NULL) {
unit_return_fail(m, "'bar1' nvgpu_vm_init failed\n");
}
/*
* This initialization will make sure that correct aperture mask
* is returned */
g->mm.mmu_wr_mem.aperture = APERTURE_SYSMEM;
g->mm.mmu_rd_mem.aperture = APERTURE_SYSMEM;
return UNIT_SUCCESS;
}
int test_env_init_flush_gv11b_fusa(struct unit_module *m, struct gk20a *g,
void *args)
{
g->log_mask = 0;
init_platform(m, g, true);
if (init_mm(m, g) != 0) {
unit_return_fail(m, "nvgpu_init_mm_support failed\n");
}
write_specific_value = 0;
return UNIT_SUCCESS;
}
#define F_GV11B_L2_FLUSH_PASS_BAR1_BIND_NOT_NULL 0
#define F_GV11B_L2_FLUSH_PASS_BAR1_BIND_NULL 1
#define F_GV11B_L2_FLUSH_FB_FLUSH_FAIL 2
#define F_GV11B_L2_FLUSH_L2_FLUSH_FAIL 3
#define F_GV11B_L2_FLUSH_TLB_INVALIDATE_FAIL 4
#define F_GV11B_L2_FLUSH_FB_FLUSH2_FAIL 5
const char *m_gv11b_mm_l2_flush_str[] = {
"pass_bar1_bind_not_null",
"pass_bar1_bind_null",
"fb_flush_fail",
"l2_flush_fail",
"tlb_invalidate_fail",
"fb_flush_2_fail",
};
static u32 stub_fb_flush_fail;
static bool stub_tlb_invalidate_fail;
static int stub_mm_fb_flush(struct gk20a *g)
{
if (stub_fb_flush_fail == 0) {
return -EBUSY;
}
stub_fb_flush_fail--;
return 0;
}
static int stub_bus_bar1_bind(struct gk20a *g, struct nvgpu_mem *bar1_inst)
{
return 0;
}
static int stub_fb_tlb_invalidate(struct gk20a *g, struct nvgpu_mem *pdb)
{
if (stub_tlb_invalidate_fail) {
return -ETIMEDOUT;
}
return 0;
}
int test_gv11b_mm_l2_flush(struct unit_module *m, struct gk20a *g, void *args)
{
struct gpu_ops gops = g->ops;
int err;
int ret = UNIT_FAIL;
u64 branch = (u64)args;
nvgpu_set_power_state(g, NVGPU_STATE_POWERED_ON);
g->ops.mm.cache.fb_flush = stub_mm_fb_flush;
g->ops.fb.tlb_invalidate = stub_fb_tlb_invalidate;
stub_fb_flush_fail = (branch == F_GV11B_L2_FLUSH_FB_FLUSH_FAIL) ?
0U : (branch == F_GV11B_L2_FLUSH_FB_FLUSH2_FAIL ? 1U : 2U);
/* Write data to flush dirty addr will control l2_flush() output */
write_specific_value = branch == F_GV11B_L2_FLUSH_L2_FLUSH_FAIL ?
WR_FLUSH_1 : WR_FLUSH_0;
g->ops.bus.bar1_bind =
((branch == F_GV11B_L2_FLUSH_PASS_BAR1_BIND_NULL) ||
(branch == F_GV11B_L2_FLUSH_FB_FLUSH2_FAIL)) ?
NULL : stub_bus_bar1_bind;
stub_tlb_invalidate_fail =
branch == F_GV11B_L2_FLUSH_TLB_INVALIDATE_FAIL ? true : false;
err = gv11b_mm_l2_flush(g, false);
unit_info(m, "%p\n", g->mm.bar1.vm->pdb.mem);
if ((branch == F_GV11B_L2_FLUSH_PASS_BAR1_BIND_NOT_NULL) ||
(branch == F_GV11B_L2_FLUSH_PASS_BAR1_BIND_NULL)) {
unit_assert(err == 0, goto done);
} else {
unit_assert(err != 0, goto done);
}
ret = UNIT_SUCCESS;
done:
if (ret != UNIT_SUCCESS) {
unit_err(m, "%s: failed at %s\n", __func__,
m_gv11b_mm_l2_flush_str[branch]);
}
nvgpu_set_power_state(g, NVGPU_STATE_POWERED_OFF);
g->ops = gops;
return ret;
}
int test_env_clean_flush_gv11b_fusa(struct unit_module *m, struct gk20a *g,
void *args)
{
g->log_mask = 0;
nvgpu_vm_put(g->mm.pmu.vm);
nvgpu_vm_put(g->mm.bar1.vm);
return UNIT_SUCCESS;
}
struct unit_module_test mm_flush_gv11b_fusa_tests[] = {
UNIT_TEST(env_init, test_env_init_flush_gv11b_fusa, NULL, 0),
UNIT_TEST(mm_l2_flush_s0, test_gv11b_mm_l2_flush, (void *)F_GV11B_L2_FLUSH_PASS_BAR1_BIND_NOT_NULL, 0),
UNIT_TEST(mm_l2_flush_s1, test_gv11b_mm_l2_flush, (void *)F_GV11B_L2_FLUSH_PASS_BAR1_BIND_NULL, 0),
UNIT_TEST(mm_l2_flush_s2, test_gv11b_mm_l2_flush, (void *)F_GV11B_L2_FLUSH_FB_FLUSH_FAIL, 0),
UNIT_TEST(mm_l2_flush_s3, test_gv11b_mm_l2_flush, (void *)F_GV11B_L2_FLUSH_L2_FLUSH_FAIL, 0),
UNIT_TEST(mm_l2_flush_s4, test_gv11b_mm_l2_flush, (void *)F_GV11B_L2_FLUSH_TLB_INVALIDATE_FAIL, 0),
UNIT_TEST(mm_l2_flush_s5, test_gv11b_mm_l2_flush, (void *)F_GV11B_L2_FLUSH_FB_FLUSH2_FAIL, 0),
UNIT_TEST(env_clean, test_env_clean_flush_gv11b_fusa, NULL, 0),
};
UNIT_MODULE(flush_gv11b_fusa, mm_flush_gv11b_fusa_tests, UNIT_PRIO_NVGPU_TEST);

View File

@@ -0,0 +1,104 @@
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef UNIT_MM_HAL_CACHE_FLUSH_GV11B_FUSA_H
#define UNIT_MM_HAL_CACHE_FLUSH_GV11B_FUSA_H
struct gk20a;
struct unit_module;
/** @addtogroup SWUTS-mm-hal-cache-flush-gv11b-fusa
* @{
*
* Software Unit Test Specification for mm.hal.cache.flush_gv11b_fusa
*/
/**
* Test specification for: test_env_init_flush_gv11b_fusa
*
* Description: Initialize environment for MM tests
*
* Test Type: Feature
*
* Targets: None
*
* Input: None
*
* Steps:
* - Init HALs and initialize VMs similar to nvgpu_init_system_vm().
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_env_init_flush_gv11b_fusa(struct unit_module *m, struct gk20a *g,
void *args);
/**
* Test specification for: test_gv11b_mm_l2_flush
*
* Description: Test L2 flush
*
* Test Type: Feature
*
* Targets: gops_mm_cache.l2_flush, gv11b_mm_l2_flush
*
* Input: test_env_init, args (value can be
* F_GV11B_L2_FLUSH_PASS_BAR1_BIND_NOT_NULL,
* F_GV11B_L2_FLUSH_PASS_BAR1_BIND_NULL, F_GV11B_L2_FLUSH_FB_FLUSH_FAIL,
* F_GV11B_L2_FLUSH_L2_FLUSH_FAIL, F_GV11B_L2_FLUSH_TLB_INVALIDATE_FAIL,
* F_GV11B_L2_FLUSH_FB_FLUSH2_FAIL)
*
* Steps:
* - Invoke L2 flush command
* - Test L2 flush with various scenarios as below:
* - fb_flush is successful or fails
* - l2_flush passes or fails
* - bar1_bind is populated or not populated
* - tlb_invalidate passes or fails
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_gv11b_mm_l2_flush(struct unit_module *m, struct gk20a *g, void *args);
/**
* Test specification for: test_env_clean_flush_gv11b_fusa
*
* Description: Cleanup test environment
*
* Test Type: Feature
*
* Targets: None
*
* Input: test_env_init
*
* Steps:
* - Destroy memory and VMs initialized for the test.
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_env_clean_flush_gv11b_fusa(struct unit_module *m, struct gk20a *g,
void *args);
/** @} */
#endif /* UNIT_MM_HAL_CACHE_FLUSH_GV11B_FUSA_H */

View File

@@ -0,0 +1,26 @@
# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
.SUFFIXES:
OBJS = gmmu-gk20a-fusa.o
MODULE = gmmu-gk20a-fusa
include ../../../../Makefile.units

View File

@@ -0,0 +1,35 @@
################################### tell Emacs this is a -*- makefile-gmake -*-
#
# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# tmake for SW Mobile component makefile
#
###############################################################################
NVGPU_UNIT_NAME=gmmu-gk20a-fusa
include $(NV_COMPONENT_DIR)/../../../../Makefile.units.common.interface.tmk
# Local Variables:
# indent-tabs-mode: t
# tab-width: 8
# End:
# vi: set tabstop=8 noexpandtab:

View File

@@ -0,0 +1,35 @@
################################### tell Emacs this is a -*- makefile-gmake -*-
#
# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# tmake for SW Mobile component makefile
#
###############################################################################
NVGPU_UNIT_NAME=gmmu-gk20a-fusa
include $(NV_COMPONENT_DIR)/../../../../Makefile.units.common.tmk
# Local Variables:
# indent-tabs-mode: t
# tab-width: 8
# End:
# vi: set tabstop=8 noexpandtab:

View File

@@ -0,0 +1,82 @@
/*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <unit/io.h>
#include <unit/unit.h>
#include <nvgpu/io.h>
#include <nvgpu/posix/io.h>
#include <nvgpu/gk20a.h>
#include <nvgpu/gmmu.h>
#include "hal/mm/gmmu/gmmu_gk20a.h"
#include "gmmu-gk20a-fusa.h"
int test_gk20a_get_pde_pgsz(struct unit_module *m, struct gk20a *g, void *args)
{
struct gk20a_mmu_level l;
struct nvgpu_gmmu_pd pd;
u32 ret_pgsz;
int ret = UNIT_FAIL;
ret_pgsz = gk20a_get_pde_pgsz(g, &l, &pd, 0U);
unit_assert(ret_pgsz == GMMU_PAGE_SIZE_SMALL, goto done);
ret = UNIT_SUCCESS;
done:
if (ret != UNIT_SUCCESS) {
unit_err(m, "%s pde_pgsz != GMMU_PAGE_SIZE_SMALL as expected\n",
__func__);
}
return ret;
}
int test_gk20a_get_pte_pgsz(struct unit_module *m, struct gk20a *g, void *args)
{
struct gk20a_mmu_level l;
struct nvgpu_gmmu_pd pd;
u32 ret_pgsz;
int ret = UNIT_FAIL;
ret_pgsz = gk20a_get_pte_pgsz(g, &l, &pd, 0U);
unit_assert(ret_pgsz == GMMU_NR_PAGE_SIZES, goto done);
ret = UNIT_SUCCESS;
done:
if (ret != UNIT_SUCCESS) {
unit_err(m, "%s pte_pgsz != GMMU_NR_PAGE_SIZES as expected\n",
__func__);
}
return ret;
}
struct unit_module_test mm_gmmu_gk20a_fusa_tests[] = {
UNIT_TEST(pde_pgsz, test_gk20a_get_pde_pgsz, NULL, 0),
UNIT_TEST(pte_pgsz, test_gk20a_get_pte_pgsz, NULL, 0),
};
UNIT_MODULE(gmmu_gk20a_fusa, mm_gmmu_gk20a_fusa_tests, UNIT_PRIO_NVGPU_TEST);

View File

@@ -0,0 +1,74 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef UNIT_MM_HAL_GMMU_GMMU_GK20A_FUSA_H
#define UNIT_MM_HAL_GMMU_GMMU_GK20A_FUSA_H
struct gk20a;
struct unit_module;
/** @addtogroup SWUTS-mm-hal-gmmu-gmmu_gk20a_fusa
* @{
*
* Software Unit Test Specification for mm.hal.gmmu.gmmu_gk20a_fusa
*/
/**
* Test specification for: test_gk20a_get_pde_pgsz
*
* Description: Test PDE page size
*
* Test Type: Feature
*
* Targets: gk20a_get_pde_pgsz
*
* Input: test_env_init
*
* Steps:
* - Check PDE page size value using the get_pgsz API
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_gk20a_get_pde_pgsz(struct unit_module *m, struct gk20a *g, void *args);
/**
* Test specification for: test_gk20a_get_pte_pgsz
*
* Description: Test PTE page size
*
* Test Type: Feature
*
* Targets: gk20a_get_pte_pgsz
*
* Input: test_env_init
*
* Steps:
* - Check PTE page size value using the get_pgsz API
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_gk20a_get_pte_pgsz(struct unit_module *m, struct gk20a *g, void *args);
/** @} */
#endif /* UNIT_MM_HAL_GMMU_GMMU_GK20A_FUSA_H */

View File

@@ -0,0 +1,26 @@
# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
.SUFFIXES:
OBJS = gmmu-gm20b-fusa.o
MODULE = gmmu-gm20b-fusa
include ../../../../Makefile.units

View File

@@ -0,0 +1,35 @@
################################### tell Emacs this is a -*- makefile-gmake -*-
#
# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# tmake for SW Mobile component makefile
#
###############################################################################
NVGPU_UNIT_NAME=gmmu-gm20b-fusa
include $(NV_COMPONENT_DIR)/../../../../Makefile.units.common.interface.tmk
# Local Variables:
# indent-tabs-mode: t
# tab-width: 8
# End:
# vi: set tabstop=8 noexpandtab:

View File

@@ -0,0 +1,35 @@
################################### tell Emacs this is a -*- makefile-gmake -*-
#
# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# tmake for SW Mobile component makefile
#
###############################################################################
NVGPU_UNIT_NAME=gmmu-gm20b-fusa
include $(NV_COMPONENT_DIR)/../../../../Makefile.units.common.tmk
# Local Variables:
# indent-tabs-mode: t
# tab-width: 8
# End:
# vi: set tabstop=8 noexpandtab:

View File

@@ -0,0 +1,58 @@
/*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <unit/io.h>
#include <unit/unit.h>
#include <nvgpu/gk20a.h>
#include <nvgpu/types.h>
#include "hal/mm/gmmu/gmmu_gm20b.h"
#include "gmmu-gm20b-fusa.h"
int test_gm20b_mm_get_big_page_sizes(struct unit_module *m, struct gk20a *g,
void *args)
{
u32 ret_pgsz;
int ret = UNIT_FAIL;
ret_pgsz = gm20b_mm_get_big_page_sizes();
unit_assert(ret_pgsz == (SZ_64K | SZ_128K), goto done);
ret = UNIT_SUCCESS;
done:
if (ret != UNIT_SUCCESS) {
unit_err(m, "%s pde_pgsz != GMMU_PAGE_SIZE_SMALL as expected\n",
__func__);
}
return ret;
}
struct unit_module_test mm_gmmu_gm20b_fusa_tests[] = {
UNIT_TEST(get_big_pgsz, test_gm20b_mm_get_big_page_sizes, NULL, 0),
};
UNIT_MODULE(gmmu_gm20b_fusa, mm_gmmu_gm20b_fusa_tests, UNIT_PRIO_NVGPU_TEST);

View File

@@ -0,0 +1,56 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef UNIT_MM_HAL_GMMU_GMMU_GM20B_FUSA_H
#define UNIT_MM_HAL_GMMU_GMMU_GM20B_FUSA_H
struct gk20a;
struct unit_module;
/** @addtogroup SWUTS-mm-hal-gmmu-gmmu_gm20b_fusa
* @{
*
* Software Unit Test Specification for mm.hal.gmmu.gmmu_gm20b_fusa
*/
/**
* Test specification for: test_gm20b_mm_get_big_page_sizes
*
* Description: Test big page size
*
* Test Type: Feature
*
* Targets: gops_mm.gops_mm_gmmu.get_big_page_sizes, gm20b_mm_get_big_page_sizes
*
* Input: test_env_init
*
* Steps:
* - Check big page size value
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_gm20b_mm_get_big_page_sizes(struct unit_module *m, struct gk20a *g,
void *args);
/** @} */
#endif /* UNIT_MM_HAL_GMMU_GMMU_GM20B_FUSA_H */

View File

@@ -0,0 +1,26 @@
# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
.SUFFIXES:
OBJS = gmmu-gp10b-fusa.o
MODULE = gmmu-gp10b-fusa
include ../../../../Makefile.units

View File

@@ -0,0 +1,35 @@
################################### tell Emacs this is a -*- makefile-gmake -*-
#
# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# tmake for SW Mobile component makefile
#
###############################################################################
NVGPU_UNIT_NAME=gmmu-gp10b-fusa
include $(NV_COMPONENT_DIR)/../../../../Makefile.units.common.interface.tmk
# Local Variables:
# indent-tabs-mode: t
# tab-width: 8
# End:
# vi: set tabstop=8 noexpandtab:

View File

@@ -0,0 +1,35 @@
################################### tell Emacs this is a -*- makefile-gmake -*-
#
# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# tmake for SW Mobile component makefile
#
###############################################################################
NVGPU_UNIT_NAME=gmmu-gp10b-fusa
include $(NV_COMPONENT_DIR)/../../../../Makefile.units.common.tmk
# Local Variables:
# indent-tabs-mode: t
# tab-width: 8
# End:
# vi: set tabstop=8 noexpandtab:

View File

@@ -0,0 +1,552 @@
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <unit/io.h>
#include <unit/unit.h>
#include <nvgpu/io.h>
#include <nvgpu/posix/io.h>
#include <nvgpu/gk20a.h>
#include <nvgpu/types.h>
#include <nvgpu/vm.h>
#include <nvgpu/nvgpu_mem.h>
#include <nvgpu/gmmu.h>
#include <nvgpu/hw/gv11b/hw_gmmu_gv11b.h>
#include "hal/mm/gmmu/gmmu_gp10b.h"
#include "gmmu-gp10b-fusa.h"
static u32 max_page_table_levels;
static const struct gk20a_mmu_level *mmu_level;
int test_gp10b_mm_get_default_big_page_size(struct unit_module *m,
struct gk20a *g, void *args)
{
u32 ret_pgsz;
int ret = UNIT_FAIL;
ret_pgsz = nvgpu_gmmu_default_big_page_size();
unit_assert(ret_pgsz == U32(SZ_64K), goto done);
ret = UNIT_SUCCESS;
done:
if (ret != UNIT_SUCCESS) {
unit_err(m, "%s: big page size != 64K as expected\n", __func__);
}
return ret;
}
int test_gp10b_mm_get_iommu_bit(struct unit_module *m,
struct gk20a *g, void *args)
{
u32 ret_bit;
int ret = UNIT_FAIL;
ret_bit = gp10b_mm_get_iommu_bit(g);
unit_assert(ret_bit == 36U, goto done);
ret = UNIT_SUCCESS;
done:
if (ret != UNIT_SUCCESS) {
unit_err(m, "%s: iommu bit != 36 as expected\n", __func__);
}
return ret;
}
int test_gp10b_get_max_page_table_levels(struct unit_module *m,
struct gk20a *g, void *args)
{
int ret = UNIT_FAIL;
max_page_table_levels = gp10b_get_max_page_table_levels(g);
unit_assert(max_page_table_levels == 5U, goto done);
ret = UNIT_SUCCESS;
done:
if (ret != UNIT_SUCCESS) {
unit_err(m, "%s: max page table levels != 5 as expected\n",
__func__);
}
return ret;
}
int test_gp10b_mm_get_mmu_levels(struct unit_module *m,
struct gk20a *g, void *args)
{
int ret = UNIT_FAIL;
const struct gk20a_mmu_level *l;
u32 i;
l = gp10b_mm_get_mmu_levels(g, SZ_64K);
for (i = 0; i < max_page_table_levels; i++) {
unit_assert((l->update_entry != NULL), goto done);
l++;
}
unit_assert(l->update_entry == NULL, goto done);
/* If get mmu_levels is successful, copy mmu_levels for future use */
mmu_level = gp10b_mm_get_mmu_levels(g, SZ_64K);
ret = UNIT_SUCCESS;
done:
if (ret != UNIT_SUCCESS) {
unit_err(m, "%s: max page table levels != 5 as expected\n",
__func__);
}
return ret;
}
int test_update_gmmu_pde3_locked(struct unit_module *m,
struct gk20a *g, void *args)
{
int ret = UNIT_FAIL;
int err;
struct vm_gk20a vm;
struct nvgpu_gmmu_pd pd;
struct nvgpu_gmmu_attrs attrs;
const struct gk20a_mmu_level *l = mmu_level;
u64 vaddr, size = SZ_4K;
u32 data = 0U;
u32 *data_ptr = NULL;
unit_assert(l != NULL, goto done);
unit_assert(g->mm.pd_cache == NULL, goto done);
vm.mm = &g->mm;
vm.mm->g = g;
err = nvgpu_pd_cache_init(g);
unit_assert(err == 0, goto done);
err = nvgpu_pd_alloc(&vm, &pd, size);
unit_assert(err == 0, goto done);
vaddr = nvgpu_pd_gpu_addr(g, &pd);
unit_assert(vaddr != 0ULL, goto done);
pd.entries = (struct nvgpu_gmmu_pd *) nvgpu_kzalloc(g,
sizeof(struct nvgpu_gmmu_pd));
unit_assert(pd.entries != NULL, goto done);
pd.entries->mem = (struct nvgpu_mem *) nvgpu_kzalloc(g,
sizeof(struct nvgpu_mem));
unit_assert(pd.entries->mem != NULL, goto done);
nvgpu_set_enabled(g, NVGPU_MM_HONORS_APERTURE, true);
pd.entries->mem->aperture = APERTURE_SYSMEM;
l[0].update_entry(&vm, l, &pd, 0U, vaddr, size, &attrs);
/* Compute data written to pd->mem */
/* pd.entries->mem is SYSMEM with HONORS_APERTURE */
data_ptr = pd.mem->cpu_va;
data |= gmmu_new_pde_aperture_sys_mem_ncoh_f();
data |= gmmu_new_pde_address_sys_f(size >>
gmmu_new_pde_address_shift_v());
data |= gmmu_new_pde_vol_true_f();
unit_assert(data == data_ptr[0], goto done);
ret = UNIT_SUCCESS;
done:
if (ret != UNIT_SUCCESS) {
unit_err(m, "%s: failed\n", __func__);
}
if (pd.entries->mem != NULL) {
nvgpu_kfree(g, pd.entries->mem);
}
if (pd.entries != NULL) {
nvgpu_kfree(g, pd.entries);
}
nvgpu_set_enabled(g, NVGPU_MM_HONORS_APERTURE, false);
nvgpu_pd_free(&vm, &pd);
nvgpu_pd_cache_fini(g);
return ret;
}
#define F_UPDATE_GMMU_PDE0_SMALL_PAGE 0ULL
#define F_UPDATE_GMMU_PDE0_BIG_PAGE 1ULL
static const char *f_gmmu_pde0_locked[] = {
"gmmu_small_page_size",
"gmmu_big_page_size",
};
int test_update_gmmu_pde0_locked(struct unit_module *m,
struct gk20a *g, void *args)
{
int ret = UNIT_FAIL;
int err;
u64 branch = (u64)args;
struct vm_gk20a vm;
struct nvgpu_gmmu_pd pd;
struct nvgpu_gmmu_attrs attrs;
const struct gk20a_mmu_level *l = mmu_level;
u64 vaddr, size = SZ_4K;
u32 data = 0U;
u32 *data_ptr = NULL;
unit_assert(l != NULL, goto done);
unit_assert(g->mm.pd_cache == NULL, goto done);
vm.mm = &g->mm;
err = nvgpu_pd_cache_init(g);
unit_assert(err == 0, goto done);
err = nvgpu_pd_alloc(&vm, &pd, size);
unit_assert(err == 0, goto done);
vaddr = nvgpu_pd_gpu_addr(g, &pd);
unit_assert(vaddr != 0ULL, goto done);
pd.entries = (struct nvgpu_gmmu_pd *) nvgpu_kzalloc(g,
sizeof(struct nvgpu_gmmu_pd));
unit_assert(pd.entries != NULL, goto done);
pd.entries->mem = (struct nvgpu_mem *) nvgpu_kzalloc(g,
sizeof(struct nvgpu_mem));
unit_assert(pd.entries->mem != NULL, goto done);
nvgpu_set_enabled(g, NVGPU_MM_HONORS_APERTURE, true);
pd.entries->mem->aperture = APERTURE_SYSMEM;
attrs.pgsz = branch == F_UPDATE_GMMU_PDE0_SMALL_PAGE ?
GMMU_PAGE_SIZE_SMALL : GMMU_PAGE_SIZE_BIG;
l[3].update_entry(&vm, l, &pd, 0U, vaddr, size, &attrs);
/* Compute data written to pd->mem */
/* pd.entries->mem is SYSMEM with HONORS_APERTURE */
data_ptr = pd.mem->cpu_va;
if (branch == F_UPDATE_GMMU_PDE0_SMALL_PAGE) {
data |= gmmu_new_dual_pde_aperture_small_sys_mem_ncoh_f();
data |= gmmu_new_dual_pde_address_small_sys_f(size >>
gmmu_new_dual_pde_address_shift_v());
data |= gmmu_new_dual_pde_vol_small_true_f();
unit_assert(data == data_ptr[2], goto done);
} else {
data |= gmmu_new_dual_pde_aperture_big_sys_mem_ncoh_f();
data |= gmmu_new_dual_pde_address_big_sys_f(size >>
gmmu_new_dual_pde_address_big_shift_v());
data |= gmmu_new_dual_pde_vol_big_true_f();
unit_assert(data == data_ptr[0], goto done);
}
ret = UNIT_SUCCESS;
done:
if (ret != UNIT_SUCCESS) {
unit_err(m, "%s: %s failed\n", __func__,
f_gmmu_pde0_locked[branch]);
}
if (pd.entries->mem != NULL) {
nvgpu_kfree(g, pd.entries->mem);
}
if (pd.entries != NULL) {
nvgpu_kfree(g, pd.entries);
}
nvgpu_set_enabled(g, NVGPU_MM_HONORS_APERTURE, false);
nvgpu_pd_free(&vm, &pd);
nvgpu_pd_cache_fini(g);
return ret;
}
#define F_ATTRS_PRIV 0x1ULL
#define F_ATTRS_READ_ONLY 0x2ULL
#define F_ATTRS_VALID 0x4ULL
#define F_ATTRS_CACHEABLE 0x8ULL
#define F_ATTRS_APERTURE_VIDMEM 0x10ULL
#define F_PLATFORM_ATOMIC 0x20ULL
#define F_UPDATE_PTE 0x40ULL
#define F_UPDATE_PTE_SPARSE 0x80ULL
#define F_UPDATE_PTE_PHYS_ADDR_ZERO 0x00ULL
/* F_UPDATE_PTE */
#define F_UPDATE_PTE_DEFAULT 0x40ULL
/* F_UPDATE_PTE | F_ATTRS_PRIV | F_ATTRS_READ_ONLY */
#define F_UPDATE_PTE_ATTRS_PRIV_READ_ONLY 0x43ULL
/* F_UPDATE_PTE | F_ATTRS_VALID */
#define F_UPDATE_PTE_ATTRS_VALID 0x44ULL
/* F_UPDATE_PTE | F_ATTRS_CACHEABLE */
#define F_UPDATE_PTE_ATTRS_CACHEABLE 0x48ULL
/* F_UPDATE_PTE | F_ATTRS_APERTURE_VIDMEM */
#define F_UPDATE_PTE_ATTRS_VIDMEM 0x50ULL
/* F_UPDATE_PTE | F_PLATFORM_ATOMIC */
#define F_UPDATE_PTE_PLATFORM_ATOMIC 0x60ULL
static const char *f_gmmu_pte_locked[] = {
[F_UPDATE_PTE_DEFAULT] = "update_pte_default",
[F_UPDATE_PTE_ATTRS_PRIV_READ_ONLY] = "update_pte_attrs_priv_read_only",
[F_UPDATE_PTE_ATTRS_VALID] = "update_pte_attrs_valid",
[F_UPDATE_PTE_ATTRS_CACHEABLE] = "update_pte_attrs_cacheable",
[F_UPDATE_PTE_ATTRS_VIDMEM] = "update_pte_attrs_vidmem",
[F_UPDATE_PTE_PLATFORM_ATOMIC] = "update_pte_platform_atomic",
[F_UPDATE_PTE_SPARSE] = "update_pte_sparse",
};
int test_update_gmmu_pte_locked(struct unit_module *m,
struct gk20a *g, void *args)
{
int ret = UNIT_FAIL;
int err;
u64 branch = (u64)args;
struct vm_gk20a vm;
struct nvgpu_gmmu_pd pd;
struct nvgpu_gmmu_attrs attrs = {0};
const struct gk20a_mmu_level *l = mmu_level;
u64 vaddr, size = SZ_4K, paddr = 0;
u32 data = 0U;
u32 *data_ptr = NULL;
unit_assert(l != NULL, goto done);
unit_assert(g->mm.pd_cache == NULL, goto done);
vm.mm = &g->mm;
err = nvgpu_pd_cache_init(g);
unit_assert(err == 0, goto done);
err = nvgpu_pd_alloc(&vm, &pd, size);
unit_assert(err == 0, goto done);
vaddr = nvgpu_pd_gpu_addr(g, &pd);
unit_assert(vaddr != 0ULL, goto done);
pd.entries = (struct nvgpu_gmmu_pd *) nvgpu_kzalloc(g,
sizeof(struct nvgpu_gmmu_pd));
unit_assert(pd.entries != NULL, goto done);
pd.entries->mem = (struct nvgpu_mem *) nvgpu_kzalloc(g,
sizeof(struct nvgpu_mem));
unit_assert(pd.entries->mem != NULL, goto done);
nvgpu_set_enabled(g, NVGPU_MM_HONORS_APERTURE, true);
pd.entries->mem->aperture = APERTURE_SYSMEM;
attrs.pgsz = GMMU_PAGE_SIZE_SMALL;
vm.gmmu_page_sizes[GMMU_PAGE_SIZE_SMALL] = SZ_4K;
paddr = branch & F_UPDATE_PTE ? size : 0ULL;
nvgpu_set_enabled(g, NVGPU_SUPPORT_PLATFORM_ATOMIC,
(branch & F_PLATFORM_ATOMIC ? true : false));
attrs.platform_atomic = branch & F_PLATFORM_ATOMIC ? true : false;
attrs.aperture = branch & F_ATTRS_APERTURE_VIDMEM ?
APERTURE_VIDMEM : APERTURE_SYSMEM;
attrs.priv = branch & F_ATTRS_PRIV ? true : false;
attrs.rw_flag = branch & F_ATTRS_READ_ONLY ?
gk20a_mem_flag_read_only : gk20a_mem_flag_none;
attrs.valid = branch & F_ATTRS_VALID ? true : false;
attrs.cacheable = branch & F_ATTRS_CACHEABLE ? true : false;
attrs.sparse = branch & F_UPDATE_PTE_SPARSE ? true : false;
l[4].update_entry(&vm, l, &pd, 0U, vaddr, paddr, &attrs);
/* Compute data written to pd->mem */
/* pd.entries->mem is SYSMEM with HONORS_APERTURE */
data_ptr = pd.mem->cpu_va;
if (branch & F_UPDATE_PTE) {
data |= branch & F_ATTRS_APERTURE_VIDMEM ?
gmmu_new_pte_address_vid_f(paddr >>
gmmu_new_pte_address_shift_v()) :
gmmu_new_pte_address_sys_f(paddr >>
gmmu_new_pte_address_shift_v());
data |= branch & F_PLATFORM_ATOMIC ?
gmmu_new_pte_aperture_sys_mem_coh_f() :
branch & F_ATTRS_APERTURE_VIDMEM ?
gmmu_new_pte_aperture_video_memory_f() :
gmmu_new_pte_aperture_sys_mem_ncoh_f();
data |= branch & F_ATTRS_VALID ? gmmu_new_pte_valid_true_f() :
gmmu_new_pte_valid_false_f();
data |= branch & F_ATTRS_PRIV ?
gmmu_new_pte_privilege_true_f() : 0U;
data |= branch & F_ATTRS_READ_ONLY ?
gmmu_new_pte_read_only_true_f() : 0U;
if (!(branch & F_ATTRS_CACHEABLE)) {
data |= branch & F_ATTRS_VALID ?
gmmu_new_pte_vol_true_f() :
gmmu_new_pte_read_only_true_f();
}
} else if (branch & F_UPDATE_PTE_SPARSE) {
data = gmmu_new_pte_valid_false_f();
data |= gmmu_new_pte_vol_true_f();
}
unit_assert(data == data_ptr[0], goto done);
ret = UNIT_SUCCESS;
done:
if (ret != UNIT_SUCCESS) {
unit_err(m, "%s: %s failed\n", __func__,
f_gmmu_pte_locked[branch]);
}
if (pd.entries->mem != NULL) {
nvgpu_kfree(g, pd.entries->mem);
}
if (pd.entries != NULL) {
nvgpu_kfree(g, pd.entries);
}
nvgpu_set_enabled(g, NVGPU_MM_HONORS_APERTURE, false);
nvgpu_pd_free(&vm, &pd);
nvgpu_pd_cache_fini(g);
return ret;
}
#define F_PDE_V0_VALUE_SET 0x1ULL
#define F_PDE_V1_VALUE_SET 0x2ULL
#define F_PDE_V2_VALUE_SET 0x4ULL
#define F_PDE_V3_VALUE_SET 0x8ULL
#define F_PDE_BIG_PAGE_APERTURE_SET_ONLY 0x01ULL
#define F_PDE_BIG_PAGE_APERTURE_ADDR_SET 0x03ULL
#define F_PDE_SMALL_PAGE_APERTURE_SET_ONLY 0x04ULL
#define F_PDE_SMALL_PAGE_APERTURE_ADDR_SET 0x0CULL
#define F_PDE_SMALL_BIG_SET 0x0FULL
#define F_PDE0_PGSZ_MEM_NULL 0x10ULL
static const char *f_get_pde0_pgsz[] = {
[F_PDE_BIG_PAGE_APERTURE_SET_ONLY] =
"get_pde0_pgsz_big_page_only_aperture_set",
[F_PDE_BIG_PAGE_APERTURE_ADDR_SET] =
"get_pde0_pgsz_big_page_aperture_addr_set",
[F_PDE_SMALL_PAGE_APERTURE_SET_ONLY] =
"get_pde0_pgsz_small_page_only_aperture_set",
[F_PDE_SMALL_PAGE_APERTURE_ADDR_SET] =
"get_pde0_pgsz_small_page_aperture_addr_set",
[F_PDE_SMALL_BIG_SET] = "get_pde0_pgsz_small_big_set",
[F_PDE0_PGSZ_MEM_NULL] = "get_pde0_pgsz_mem_null",
};
int test_gp10b_get_pde0_pgsz(struct unit_module *m, struct gk20a *g, void *args)
{
int ret = UNIT_FAIL;
int err;
u64 branch = (u64)args;
struct vm_gk20a vm;
struct nvgpu_gmmu_pd pd;
const struct gk20a_mmu_level *l = mmu_level;
u64 vaddr, size = SZ_4K;
u32 *data;
u32 ret_pgsz;
struct nvgpu_mem *tmp_mem_ptr = NULL;
unit_assert(l != NULL, goto done);
unit_assert(g->mm.pd_cache == NULL, goto done);
vm.mm = &g->mm;
err = nvgpu_pd_cache_init(g);
unit_assert(err == 0, goto done);
err = nvgpu_pd_alloc(&vm, &pd, size);
unit_assert(err == 0, goto done);
vaddr = nvgpu_pd_gpu_addr(g, &pd);
unit_assert(vaddr != 0ULL, goto done);
if (branch & F_PDE0_PGSZ_MEM_NULL) {
tmp_mem_ptr = pd.mem;
pd.mem = NULL;
} else {
data = pd.mem->cpu_va;
data[0] = branch & F_PDE_V0_VALUE_SET ?
(gmmu_new_dual_pde_aperture_big_sys_mem_ncoh_f() |
gmmu_new_dual_pde_aperture_big_sys_mem_coh_f() |
gmmu_new_dual_pde_aperture_big_video_memory_f()) : 0U;
data[1] = branch & F_PDE_V1_VALUE_SET ? 1U : 0U;
data[2] = branch & F_PDE_V2_VALUE_SET ?
(gmmu_new_dual_pde_aperture_small_sys_mem_ncoh_f() |
gmmu_new_dual_pde_aperture_small_sys_mem_coh_f() |
gmmu_new_dual_pde_aperture_small_video_memory_f()) : 0U;
data[3] = branch & F_PDE_V3_VALUE_SET ? 1U : 0U;
}
ret_pgsz = l[3].get_pgsz(g, l, &pd, 0U);
if (branch == F_PDE_BIG_PAGE_APERTURE_ADDR_SET) {
unit_assert(ret_pgsz == GMMU_PAGE_SIZE_BIG, goto done);
} else if (branch == F_PDE_SMALL_PAGE_APERTURE_ADDR_SET) {
unit_assert(ret_pgsz == GMMU_PAGE_SIZE_SMALL, goto done);
} else {
unit_assert(ret_pgsz == GMMU_NR_PAGE_SIZES, goto done);
}
if (branch & F_PDE0_PGSZ_MEM_NULL) {
pd.mem = tmp_mem_ptr;
}
ret = UNIT_SUCCESS;
done:
if (ret != UNIT_SUCCESS) {
unit_err(m, "%s: %s failed\n", __func__,
f_get_pde0_pgsz[branch]);
}
nvgpu_pd_free(&vm, &pd);
nvgpu_pd_cache_fini(g);
return ret;
}
struct unit_module_test mm_gmmu_gp10b_fusa_tests[] = {
UNIT_TEST(big_pgsz, test_gp10b_mm_get_default_big_page_size, NULL, 0),
UNIT_TEST(iommu_bit, test_gp10b_mm_get_iommu_bit, NULL, 0),
UNIT_TEST(max_page_table_levels, test_gp10b_get_max_page_table_levels, NULL, 0),
UNIT_TEST(mmu_levels, test_gp10b_mm_get_mmu_levels, NULL, 0),
UNIT_TEST(update_gmmu_pde3_locked, test_update_gmmu_pde3_locked, NULL, 0),
UNIT_TEST(update_gmmu_pde0_locked_s0, test_update_gmmu_pde0_locked, (void *)F_UPDATE_GMMU_PDE0_SMALL_PAGE, 0),
UNIT_TEST(update_gmmu_pde0_locked_s1, test_update_gmmu_pde0_locked, (void *)F_UPDATE_GMMU_PDE0_BIG_PAGE, 0),
UNIT_TEST(update_gmmu_pte_locked_s0, test_update_gmmu_pte_locked, (void *)F_UPDATE_PTE_PHYS_ADDR_ZERO, 0),
UNIT_TEST(update_gmmu_pte_locked_s1, test_update_gmmu_pte_locked, (void *)F_UPDATE_PTE_DEFAULT, 0),
UNIT_TEST(update_gmmu_pte_locked_s2, test_update_gmmu_pte_locked, (void *)F_UPDATE_PTE_ATTRS_PRIV_READ_ONLY, 0),
UNIT_TEST(update_gmmu_pte_locked_s3, test_update_gmmu_pte_locked, (void *)F_UPDATE_PTE_ATTRS_VALID, 0),
UNIT_TEST(update_gmmu_pte_locked_s4, test_update_gmmu_pte_locked, (void *)F_UPDATE_PTE_ATTRS_CACHEABLE, 0),
UNIT_TEST(update_gmmu_pte_locked_s5, test_update_gmmu_pte_locked, (void *)F_UPDATE_PTE_ATTRS_VIDMEM, 0),
UNIT_TEST(update_gmmu_pte_locked_s6, test_update_gmmu_pte_locked, (void *)F_UPDATE_PTE_PLATFORM_ATOMIC, 0),
UNIT_TEST(update_gmmu_pte_locked_s7, test_update_gmmu_pte_locked, (void *)F_UPDATE_PTE_SPARSE, 0),
UNIT_TEST(gp10b_get_pde0_pgsz_s0, test_gp10b_get_pde0_pgsz, (void *)F_PDE_BIG_PAGE_APERTURE_SET_ONLY, 0),
UNIT_TEST(gp10b_get_pde0_pgsz_s1, test_gp10b_get_pde0_pgsz, (void *)F_PDE_BIG_PAGE_APERTURE_ADDR_SET, 0),
UNIT_TEST(gp10b_get_pde0_pgsz_s2, test_gp10b_get_pde0_pgsz, (void *)F_PDE_SMALL_PAGE_APERTURE_SET_ONLY, 0),
UNIT_TEST(gp10b_get_pde0_pgsz_s3, test_gp10b_get_pde0_pgsz, (void *)F_PDE_SMALL_PAGE_APERTURE_ADDR_SET, 0),
UNIT_TEST(gp10b_get_pde0_pgsz_s4, test_gp10b_get_pde0_pgsz, (void *)F_PDE_SMALL_BIG_SET, 0),
UNIT_TEST(gp10b_get_pde0_pgsz_s5, test_gp10b_get_pde0_pgsz, (void *)F_PDE0_PGSZ_MEM_NULL, 0),
};
UNIT_MODULE(gmmu_gp10b_fusa, mm_gmmu_gp10b_fusa_tests, UNIT_PRIO_NVGPU_TEST);

View File

@@ -0,0 +1,210 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef UNIT_MM_HAL_GMMU_GMMU_GP10B_FUSA_H
#define UNIT_MM_HAL_GMMU_GMMU_GP10B_FUSA_H
struct gk20a;
struct unit_module;
/** @addtogroup SWUTS-mm-hal-gmmu-gmmu_gp10b_fusa
* @{
*
* Software Unit Test Specification for mm.hal.gmmu.gmmu_gp10b_fusa
*/
/**
* Test specification for: test_gp10b_mm_get_default_big_page_size
*
* Description: Test big page size
*
* Test Type: Feature
*
* Targets: gops_mm.gops_mm_gmmu.get_default_big_page_size,
* nvgpu_gmmu_default_big_page_size
*
* Input: None
*
* Steps:
* - Check big page size value and confirm that size is 64K.
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_gp10b_mm_get_default_big_page_size(struct unit_module *m,
struct gk20a *g, void *args);
/**
* Test specification for: test_gp10b_mm_get_iommu_bit
*
* Description: Test IOMMU bit number
*
* Test Type: Feature
*
* Targets: gops_mm.gops_mm_gmmu.get_iommu_bit, gp10b_mm_get_iommu_bit
*
* Input: None
*
* Steps:
* - Check iommu bit is equal to 36.
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_gp10b_mm_get_iommu_bit(struct unit_module *m, struct gk20a *g,
void *args);
/**
* Test specification for: test_gp10b_get_max_page_table_levels
*
* Description: Test max page table levels
*
* Test Type: Feature
*
* Targets: gops_mm.gops_mm_gmmu.get_max_page_table_levels,
* gp10b_get_max_page_table_levels
*
* Input: None
*
* Steps:
* - Check max page table levels is 5.
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_gp10b_get_max_page_table_levels(struct unit_module *m,
struct gk20a *g, void *args);
/**
* Test specification for: test_gp10b_mm_get_mmu_levels
*
* Description: Test mmu_levels structure
*
* Test Type: Feature
*
* Targets: gops_mm.gops_mm_gmmu.get_mmu_levels, gp10b_mm_get_mmu_levels
*
* Input: None
*
* Steps:
* - Copy mmu_levels structure and validate struct using update_entry pointer.
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_gp10b_mm_get_mmu_levels(struct unit_module *m,
struct gk20a *g, void *args);
/**
* Test specification for: test_update_gmmu_pde3_locked
*
* Description: Test mmu_levels update entry function
*
* Test Type: Feature
*
* Targets: update_gmmu_pde3_locked, pte_dbg_print
*
* Input: None
*
* Steps:
* - Update gmmu pde3 for given physical address.
* - Check if data written to memory is as expected.
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_update_gmmu_pde3_locked(struct unit_module *m,
struct gk20a *g, void *args);
/**
* Test specification for: test_update_gmmu_pde0_locked
*
* Description: Test mmu_level 3 update entry function
*
* Test Type: Feature
*
* Targets: update_gmmu_pde0_locked, pte_dbg_print
*
* Input: args (value can be F_UPDATE_GMMU_PDE0_SMALL_PAGE or
* F_UPDATE_GMMU_PDE0_BIG_PAGE)
*
* Steps:
* - Update gmmu pde3 for given physical address.
* - For big and small page size, check data written to memory.
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_update_gmmu_pde0_locked(struct unit_module *m,
struct gk20a *g, void *args);
/**
* Test specification for: test_update_gmmu_pte_locked
*
* Description: Test mmu_level 4 update entry function
*
* Test Type: Feature
*
* Targets: update_gmmu_pte_locked, update_pte, update_pte_sparse,
* gmmu_aperture_mask
*
* Input: args (value can be F_UPDATE_PTE_PHYS_ADDR_ZERO, F_UPDATE_PTE_DEFAULT,
* F_UPDATE_PTE_ATTRS_PRIV_READ_ONLY, F_UPDATE_PTE_ATTRS_VALID,
* F_UPDATE_PTE_ATTRS_CACHEABLE, F_UPDATE_PTE_ATTRS_VIDMEM,
* F_UPDATE_PTE_PLATFORM_ATOMIC or F_UPDATE_PTE_SPARSE)
*
* Steps:
* - Update gmmu pte for given physical address.
* - Check data written to pd mem for various scenarios such as cacheable GMMU
* mapping, priviledged mapping, read only address, etc.
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_update_gmmu_pte_locked(struct unit_module *m,
struct gk20a *g, void *args);
/**
* Test specification for: test_gp10b_get_pde0_pgsz
*
* Description: Test GMMU level 3 page size function
*
* Test Type: Feature
*
* Targets: gp10b_get_pde0_pgsz
*
* Input: args (value can be F_PDE_BIG_PAGE_APERTURE_SET_ONLY,
* F_PDE_BIG_PAGE_APERTURE_ADDR_SET, F_PDE_SMALL_PAGE_APERTURE_SET_ONLY,
* F_PDE_SMALL_PAGE_APERTURE_ADDR_SET, F_PDE_SMALL_BIG_SET or
* F_PDE0_PGSZ_MEM_NULL)
*
* Steps:
* - Check pde0 page size for given aperture values
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_gp10b_get_pde0_pgsz(struct unit_module *m, struct gk20a *g,
void *args);
/** @} */
#endif /* UNIT_MM_HAL_GMMU_GMMU_GP10B_FUSA_H */

View File

@@ -0,0 +1,26 @@
# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
.SUFFIXES:
OBJS = gmmu-gv11b-fusa.o
MODULE = gmmu-gv11b-fusa
include ../../../../Makefile.units

View File

@@ -0,0 +1,35 @@
################################### tell Emacs this is a -*- makefile-gmake -*-
#
# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# tmake for SW Mobile component makefile
#
###############################################################################
NVGPU_UNIT_NAME=gmmu-gv11b-fusa
include $(NV_COMPONENT_DIR)/../../../../Makefile.units.common.interface.tmk
# Local Variables:
# indent-tabs-mode: t
# tab-width: 8
# End:
# vi: set tabstop=8 noexpandtab:

View File

@@ -0,0 +1,35 @@
################################### tell Emacs this is a -*- makefile-gmake -*-
#
# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# tmake for SW Mobile component makefile
#
###############################################################################
NVGPU_UNIT_NAME=gmmu-gv11b-fusa
include $(NV_COMPONENT_DIR)/../../../../Makefile.units.common.tmk
# Local Variables:
# indent-tabs-mode: t
# tab-width: 8
# End:
# vi: set tabstop=8 noexpandtab:

View File

@@ -0,0 +1,87 @@
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <unit/io.h>
#include <unit/unit.h>
#include <nvgpu/io.h>
#include <nvgpu/gk20a.h>
#include <nvgpu/types.h>
#include <nvgpu/gmmu.h>
#include "hal/mm/gmmu/gmmu_gp10b.h"
#include "hal/mm/gmmu/gmmu_gv11b.h"
#include "gmmu-gv11b-fusa.h"
#define F_GV11B_GPU_PHYS_ADDR_GMMU_ATTRS_NULL 0
#define F_GV11B_GPU_PHYS_ADDR_L3_ALLOC_FALSE 1
#define F_GV11B_GPU_PHYS_ADDR_L3_ALLOC_TRUE 2
int test_gv11b_gpu_phys_addr(struct unit_module *m, struct gk20a *g, void *args)
{
struct gpu_ops gops = g->ops;
struct nvgpu_gmmu_attrs attrs = {0};
struct nvgpu_gmmu_attrs *attrs_ptr;
u64 phys = BIT(10);
u64 ret_phys;
u64 branch = (u64)args;
int ret = UNIT_FAIL;
g->ops.mm.gmmu.get_iommu_bit = gp10b_mm_get_iommu_bit;
attrs_ptr = branch == F_GV11B_GPU_PHYS_ADDR_GMMU_ATTRS_NULL ?
NULL : &attrs;
attrs.l3_alloc = branch == F_GV11B_GPU_PHYS_ADDR_L3_ALLOC_FALSE ?
false : true;
ret_phys = gv11b_gpu_phys_addr(g, attrs_ptr, phys);
if (branch == F_GV11B_GPU_PHYS_ADDR_L3_ALLOC_TRUE) {
unit_assert(ret_phys == (phys |
BIT64(g->ops.mm.gmmu.get_iommu_bit(g))),
goto done);
} else {
unit_assert(ret_phys == phys, goto done);
}
ret = UNIT_SUCCESS;
done:
if (ret != UNIT_SUCCESS) {
unit_err(m, "%s pde_pgsz != GMMU_PAGE_SIZE_SMALL as expected\n",
__func__);
}
g->ops = gops;
return ret;
}
struct unit_module_test mm_gmmu_gv11b_fusa_tests[] = {
UNIT_TEST(gpu_phys_addr_s0, test_gv11b_gpu_phys_addr, (void *)F_GV11B_GPU_PHYS_ADDR_GMMU_ATTRS_NULL, 0),
UNIT_TEST(gpu_phys_addr_s1, test_gv11b_gpu_phys_addr, (void *)F_GV11B_GPU_PHYS_ADDR_L3_ALLOC_FALSE, 0),
UNIT_TEST(gpu_phys_addr_s2, test_gv11b_gpu_phys_addr, (void *)F_GV11B_GPU_PHYS_ADDR_L3_ALLOC_TRUE, 0),
};
UNIT_MODULE(gmmu_gv11b_fusa, mm_gmmu_gv11b_fusa_tests, UNIT_PRIO_NVGPU_TEST);

View File

@@ -0,0 +1,58 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef UNIT_MM_HAL_GMMU_GMMU_GV11B_FUSA_H
#define UNIT_MM_HAL_GMMU_GMMU_GV11B_FUSA_H
struct gk20a;
struct unit_module;
/** @addtogroup SWUTS-mm-hal-gmmu-gmmu_gv11b_fusa
* @{
*
* Software Unit Test Specification for mm.hal.gmmu.gmmu_gv11b_fusa
*/
/**
* Test specification for: test_gv11b_gpu_phys_addr
*
* Description: Test PTE page size
*
* Test Type: Feature
*
* Targets: gops_mm.gops_mm_gmmu.gpu_phys_addr, gv11b_gpu_phys_addr
*
* Input: args (value can be F_GV11B_GPU_PHYS_ADDR_GMMU_ATTRS_NULL,
* F_GV11B_GPU_PHYS_ADDR_L3_ALLOC_FALSE or
* F_GV11B_GPU_PHYS_ADDR_L3_ALLOC_TRUE)
*
* Steps:
* - Check PTE page size value using the get_pgsz API
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_gv11b_gpu_phys_addr(struct unit_module *m, struct gk20a *g,
void *args);
/** @} */
#endif /* UNIT_MM_HAL_GMMU_GMMU_GV11B_FUSA_H */

View File

@@ -0,0 +1,26 @@
# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
.SUFFIXES:
OBJS = mm-gp10b-fusa.o
MODULE = mm-gp10b-fusa
include ../../../Makefile.units

View File

@@ -0,0 +1,35 @@
################################### tell Emacs this is a -*- makefile-gmake -*-
#
# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# tmake for SW Mobile component makefile
#
###############################################################################
NVGPU_UNIT_NAME=mm-gp10b-fusa
include $(NV_COMPONENT_DIR)/../../../Makefile.units.common.interface.tmk
# Local Variables:
# indent-tabs-mode: t
# tab-width: 8
# End:
# vi: set tabstop=8 noexpandtab:

View File

@@ -0,0 +1,35 @@
################################### tell Emacs this is a -*- makefile-gmake -*-
#
# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# tmake for SW Mobile component makefile
#
###############################################################################
NVGPU_UNIT_NAME=mm-gp10b-fusa
include $(NV_COMPONENT_DIR)/../../../Makefile.units.common.tmk
# Local Variables:
# indent-tabs-mode: t
# tab-width: 8
# End:
# vi: set tabstop=8 noexpandtab:

View File

@@ -0,0 +1,251 @@
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <unit/io.h>
#include <unit/unit.h>
#include <nvgpu/io.h>
#include <nvgpu/posix/io.h>
#include <nvgpu/gk20a.h>
#include <nvgpu/types.h>
#include <nvgpu/vm.h>
#include "os/posix/os_posix.h"
#include "hal/fb/fb_gv11b.h"
#include "hal/fb/intr/fb_intr_gv11b.h"
#include "hal/fifo/ramin_gk20a.h"
#include "hal/fifo/ramin_gv11b.h"
#include "hal/mc/mc_gp10b.h"
#include "hal/mm/cache/flush_gk20a.h"
#include "hal/mm/gmmu/gmmu_gp10b.h"
#include "hal/mm/mm_gp10b.h"
#include "hal/mm/mm_gv11b.h"
#include "hal/mm/mmu_fault/mmu_fault_gv11b.h"
#include <nvgpu/hw/gv11b/hw_fb_gv11b.h>
#include <nvgpu/posix/posix-fault-injection.h>
#include <nvgpu/posix/dma.h>
#include "mm-gp10b-fusa.h"
/*
* Write callback (for all nvgpu_writel calls).
*/
static void writel_access_reg_fn(struct gk20a *g,
struct nvgpu_reg_access *access)
{
nvgpu_posix_io_writel_reg_space(g, access->addr, access->value);
}
/*
* Read callback, similar to the write callback above.
*/
static void readl_access_reg_fn(struct gk20a *g,
struct nvgpu_reg_access *access)
{
access->value = nvgpu_posix_io_readl_reg_space(g, access->addr);
}
/*
* Define all the callbacks to be used during the test. Typically all
* write operations use the same callback, likewise for all read operations.
*/
static struct nvgpu_posix_io_callbacks mmu_faults_callbacks = {
/* Write APIs all can use the same accessor. */
.writel = writel_access_reg_fn,
.writel_check = writel_access_reg_fn,
.bar1_writel = writel_access_reg_fn,
.usermode_writel = writel_access_reg_fn,
/* Likewise for the read APIs. */
.__readl = readl_access_reg_fn,
.readl = readl_access_reg_fn,
.bar1_readl = readl_access_reg_fn,
};
static void init_platform(struct unit_module *m, struct gk20a *g, bool is_iGPU)
{
if (is_iGPU) {
nvgpu_set_enabled(g, NVGPU_MM_UNIFIED_MEMORY, true);
} else {
nvgpu_set_enabled(g, NVGPU_MM_UNIFIED_MEMORY, false);
}
}
static int init_mm(struct unit_module *m, struct gk20a *g)
{
u64 low_hole, aperture_size;
struct nvgpu_os_posix *p = nvgpu_os_posix_from_gk20a(g);
struct mm_gk20a *mm = &g->mm;
int err;
p->mm_is_iommuable = true;
/* Minimum HALs for page_table */
memset(&g->ops.bus, 0, sizeof(g->ops.bus));
memset(&g->ops.fb, 0, sizeof(g->ops.fb));
g->ops.fb.init_hw = gv11b_fb_init_hw;
g->ops.fb.intr.enable = gv11b_fb_intr_enable;
g->ops.ramin.init_pdb = gv11b_ramin_init_pdb;
g->ops.ramin.alloc_size = gk20a_ramin_alloc_size;
g->ops.mm.gmmu.get_default_big_page_size =
nvgpu_gmmu_default_big_page_size;
g->ops.mm.init_inst_block = gv11b_mm_init_inst_block;
g->ops.mm.gmmu.get_mmu_levels = gp10b_mm_get_mmu_levels;
g->ops.mm.setup_hw = nvgpu_mm_setup_hw;
g->ops.mm.cache.fb_flush = gk20a_mm_fb_flush;
g->ops.mm.mmu_fault.info_mem_destroy =
gv11b_mm_mmu_fault_info_mem_destroy;
g->ops.mc.intr_stall_unit_config = mc_gp10b_intr_stall_unit_config;
nvgpu_posix_register_io(g, &mmu_faults_callbacks);
/* Register space: FB_MMU */
if (nvgpu_posix_io_add_reg_space(g, fb_niso_intr_r(), 0x800) != 0) {
unit_return_fail(m, "nvgpu_posix_io_add_reg_space failed\n");
}
/*
* Initialize VM space for system memory to be used throughout this
* unit module.
* Values below are similar to those used in nvgpu_init_system_vm()
*/
low_hole = SZ_4K * 16UL;
aperture_size = GK20A_PMU_VA_SIZE;
mm->pmu.aperture_size = GK20A_PMU_VA_SIZE;
mm->pmu.vm = nvgpu_vm_init(g,
g->ops.mm.gmmu.get_default_big_page_size(),
low_hole,
0ULL,
nvgpu_safe_sub_u64(aperture_size, low_hole),
0ULL,
true,
false,
false,
"system");
if (mm->pmu.vm == NULL) {
unit_return_fail(m, "'system' nvgpu_vm_init failed\n");
}
/*
* This initialization will make sure that correct aperture mask
* is returned */
g->mm.mmu_wr_mem.aperture = APERTURE_SYSMEM;
g->mm.mmu_rd_mem.aperture = APERTURE_SYSMEM;
/* Init MM H/W */
err = g->ops.mm.setup_hw(g);
if (err != 0) {
unit_return_fail(m, "init_mm_setup_hw failed code=%d\n", err);
}
return UNIT_SUCCESS;
}
int test_env_init_mm_gp10b_fusa(struct unit_module *m, struct gk20a *g,
void *args)
{
g->log_mask = 0;
init_platform(m, g, true);
if (init_mm(m, g) != 0) {
unit_return_fail(m, "nvgpu_init_mm_support failed\n");
}
return UNIT_SUCCESS;
}
#define F_INIT_BAR2_VM_DEFAULT 0ULL
#define F_INIT_BAR2_VM_INIT_VM_FAIL 1ULL
#define F_INIT_BAR2_VM_ALLOC_INST_BLOCK_FAIL 2ULL
const char *m_init_bar2_vm_str[] = {
"default_input",
"vm_init_fail",
"alloc_inst_block_fail",
};
int test_gp10b_mm_init_bar2_vm(struct unit_module *m, struct gk20a *g,
void *args)
{
int err;
int ret = UNIT_FAIL;
u64 branch = (u64)args;
u64 fail = F_INIT_BAR2_VM_INIT_VM_FAIL |
F_INIT_BAR2_VM_ALLOC_INST_BLOCK_FAIL;
struct nvgpu_posix_fault_inj *kmem_fi =
nvgpu_kmem_get_fault_injection();
struct nvgpu_posix_fault_inj *dma_fi =
nvgpu_dma_alloc_get_fault_injection();
if ((branch & F_INIT_BAR2_VM_INIT_VM_FAIL) != 0) {
nvgpu_posix_enable_fault_injection(kmem_fi, true, 0);
}
if ((branch & F_INIT_BAR2_VM_ALLOC_INST_BLOCK_FAIL) != 0) {
nvgpu_posix_enable_fault_injection(dma_fi, true, 1);
}
err = gp10b_mm_init_bar2_vm(g);
if (branch & fail) {
nvgpu_posix_enable_fault_injection(kmem_fi, false, 0);
nvgpu_posix_enable_fault_injection(dma_fi, false, 0);
unit_assert(err != 0, goto done);
} else {
unit_assert(err == 0, goto done);
gp10b_mm_remove_bar2_vm(g);
}
ret = UNIT_SUCCESS;
done:
if (ret != UNIT_SUCCESS) {
unit_err(m, "%s: failed at %s\n", __func__,
m_init_bar2_vm_str[branch]);
}
return ret;
}
int test_env_clean_mm_gp10b_fusa(struct unit_module *m, struct gk20a *g,
void *args)
{
g->log_mask = 0;
g->ops.mm.mmu_fault.info_mem_destroy(g);
nvgpu_vm_put(g->mm.pmu.vm);
return UNIT_SUCCESS;
}
struct unit_module_test mm_gp10b_fusa_tests[] = {
UNIT_TEST(env_init, test_env_init_mm_gp10b_fusa, (void *)0, 0),
UNIT_TEST(mm_init_bar2_vm_s0, test_gp10b_mm_init_bar2_vm, (void *)F_INIT_BAR2_VM_DEFAULT, 0),
UNIT_TEST(mm_init_bar2_vm_s1, test_gp10b_mm_init_bar2_vm, (void *)F_INIT_BAR2_VM_INIT_VM_FAIL, 0),
UNIT_TEST(mm_init_bar2_vm_s2, test_gp10b_mm_init_bar2_vm, (void *)F_INIT_BAR2_VM_ALLOC_INST_BLOCK_FAIL, 0),
UNIT_TEST(env_clean, test_env_clean_mm_gp10b_fusa, NULL, 0),
};
UNIT_MODULE(mm_gp10b_fusa, mm_gp10b_fusa_tests, UNIT_PRIO_NVGPU_TEST);

View File

@@ -0,0 +1,99 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef UNIT_MM_HAL_GP10B_FUSA_H
#define UNIT_MM_HAL_GP10B_FUSA_H
struct gk20a;
struct unit_module;
/** @addtogroup SWUTS-mm-hal-gp10b_fusa
* @{
*
* Software Unit Test Specification for mm.hal.gp10b_fusa
*/
/**
* Test specification for: test_env_init_mm_gp10b_fusa
*
* Description: Initialize environment for MM tests
*
* Test Type: Feature
*
* Targets: None
*
* Input: None
*
* Steps:
* - Init HALs and initialize VMs similar to nvgpu_init_system_vm().
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_env_init_mm_gp10b_fusa(struct unit_module *m, struct gk20a *g,
void *args);
/**
* Test specification for: test_gp10b_mm_init_bar2_vm
*
* Description: Initialize bar2 VM
*
* Test Type: Feature, Error injection
*
* Targets: gops_mm.init_bar2_vm, gp10b_mm_init_bar2_vm, gops_mm.remove_bar2_vm,
* gp10b_mm_remove_bar2_vm
*
* Input: test_env_init, args (value can be F_INIT_BAR2_VM_DEFAULT,
* F_INIT_BAR2_VM_INIT_VM_FAIL or F_INIT_BAR2_VM_ALLOC_INST_BLOCK_FAIL)
*
* Steps:
* - Allocate and initialize bar2 VM.
* - Check failure cases when allocation fails.
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_gp10b_mm_init_bar2_vm(struct unit_module *m, struct gk20a *g,
void *args);
/**
* Test specification for: test_env_clean_mm_gp10b_fusa
*
* Description: Cleanup test environment
*
* Test Type: Feature
*
* Targets: None
*
* Input: test_env_init
*
* Steps:
* - Destroy memory and VMs initialized for the test.
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_env_clean_mm_gp10b_fusa(struct unit_module *m, struct gk20a *g,
void *args);
/** @} */
#endif /* UNIT_MM_HAL_GP10B_FUSA_H */

View File

@@ -0,0 +1,26 @@
# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
.SUFFIXES:
OBJS = mm-gv11b-fusa.o
MODULE = mm-gv11b-fusa
include ../../../Makefile.units

View File

@@ -0,0 +1,35 @@
################################### tell Emacs this is a -*- makefile-gmake -*-
#
# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# tmake for SW Mobile component makefile
#
###############################################################################
NVGPU_UNIT_NAME=mm-gv11b-fusa
include $(NV_COMPONENT_DIR)/../../../Makefile.units.common.interface.tmk
# Local Variables:
# indent-tabs-mode: t
# tab-width: 8
# End:
# vi: set tabstop=8 noexpandtab:

View File

@@ -0,0 +1,35 @@
################################### tell Emacs this is a -*- makefile-gmake -*-
#
# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# tmake for SW Mobile component makefile
#
###############################################################################
NVGPU_UNIT_NAME=mm-gv11b-fusa
include $(NV_COMPONENT_DIR)/../../../Makefile.units.common.tmk
# Local Variables:
# indent-tabs-mode: t
# tab-width: 8
# End:
# vi: set tabstop=8 noexpandtab:

View File

@@ -0,0 +1,253 @@
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <unit/io.h>
#include <unit/unit.h>
#include <nvgpu/io.h>
#include <nvgpu/posix/io.h>
#include <nvgpu/gk20a.h>
#include <nvgpu/types.h>
#include <nvgpu/vm.h>
#include "os/posix/os_posix.h"
#include "hal/fb/fb_gv11b.h"
#include "hal/fb/intr/fb_intr_gv11b.h"
#include "hal/fifo/ramin_gk20a.h"
#include "hal/fifo/ramin_gv11b.h"
#include "hal/mm/cache/flush_gk20a.h"
#include "hal/mm/gmmu/gmmu_gp10b.h"
#include "hal/mm/mm_gv11b.h"
#include "hal/mm/mmu_fault/mmu_fault_gv11b.h"
#include "hal/mc/mc_gp10b.h"
#include <nvgpu/hw/gv11b/hw_fb_gv11b.h>
#include "mm-gv11b-fusa.h"
/*
* Write callback (for all nvgpu_writel calls).
*/
static void writel_access_reg_fn(struct gk20a *g,
struct nvgpu_reg_access *access)
{
nvgpu_posix_io_writel_reg_space(g, access->addr, access->value);
}
/*
* Read callback, similar to the write callback above.
*/
static void readl_access_reg_fn(struct gk20a *g,
struct nvgpu_reg_access *access)
{
access->value = nvgpu_posix_io_readl_reg_space(g, access->addr);
}
/*
* Define all the callbacks to be used during the test. Typically all
* write operations use the same callback, likewise for all read operations.
*/
static struct nvgpu_posix_io_callbacks mmu_faults_callbacks = {
/* Write APIs all can use the same accessor. */
.writel = writel_access_reg_fn,
.writel_check = writel_access_reg_fn,
.bar1_writel = writel_access_reg_fn,
.usermode_writel = writel_access_reg_fn,
/* Likewise for the read APIs. */
.__readl = readl_access_reg_fn,
.readl = readl_access_reg_fn,
.bar1_readl = readl_access_reg_fn,
};
static void init_platform(struct unit_module *m, struct gk20a *g, bool is_iGPU)
{
if (is_iGPU) {
nvgpu_set_enabled(g, NVGPU_MM_UNIFIED_MEMORY, true);
} else {
nvgpu_set_enabled(g, NVGPU_MM_UNIFIED_MEMORY, false);
}
}
static int init_mm(struct unit_module *m, struct gk20a *g)
{
u64 low_hole, aperture_size;
struct nvgpu_os_posix *p = nvgpu_os_posix_from_gk20a(g);
struct mm_gk20a *mm = &g->mm;
int err;
p->mm_is_iommuable = true;
/* Minimum HALs for page_table */
memset(&g->ops.bus, 0, sizeof(g->ops.bus));
memset(&g->ops.fb, 0, sizeof(g->ops.fb));
g->ops.fb.init_hw = gv11b_fb_init_hw;
g->ops.fb.intr.enable = gv11b_fb_intr_enable;
g->ops.ramin.init_pdb = gv11b_ramin_init_pdb;
g->ops.ramin.alloc_size = gk20a_ramin_alloc_size;
g->ops.mc.intr_stall_unit_config = mc_gp10b_intr_stall_unit_config;
g->ops.mm.gmmu.get_default_big_page_size =
nvgpu_gmmu_default_big_page_size;
g->ops.mm.gmmu.get_mmu_levels = gp10b_mm_get_mmu_levels;
g->ops.mm.setup_hw = nvgpu_mm_setup_hw;
g->ops.mm.cache.fb_flush = gk20a_mm_fb_flush;
g->ops.mm.mmu_fault.info_mem_destroy =
gv11b_mm_mmu_fault_info_mem_destroy;
nvgpu_posix_register_io(g, &mmu_faults_callbacks);
/* Register space: FB_MMU */
if (nvgpu_posix_io_add_reg_space(g, fb_niso_intr_r(), 0x800) != 0) {
unit_return_fail(m, "nvgpu_posix_io_add_reg_space failed\n");
}
/*
* Initialize VM space for system memory to be used throughout this
* unit module.
* Values below are similar to those used in nvgpu_init_system_vm()
*/
low_hole = SZ_4K * 16UL;
aperture_size = GK20A_PMU_VA_SIZE;
mm->pmu.aperture_size = GK20A_PMU_VA_SIZE;
mm->pmu.vm = nvgpu_vm_init(g,
g->ops.mm.gmmu.get_default_big_page_size(),
low_hole,
0ULL,
nvgpu_safe_sub_u64(aperture_size, low_hole),
0ULL,
true,
false,
false,
"system");
if (mm->pmu.vm == NULL) {
unit_return_fail(m, "'system' nvgpu_vm_init failed\n");
}
/* BAR2 memory space */
mm->bar2.aperture_size = U32(32) << 20U;
mm->bar2.vm = nvgpu_vm_init(g,
g->ops.mm.gmmu.get_default_big_page_size(),
SZ_4K, 0ULL, nvgpu_safe_sub_u64(mm->bar2.aperture_size, SZ_4K),
0ULL, false, false, false, "bar2");
if (mm->bar2.vm == NULL) {
unit_return_fail(m, "'bar2' nvgpu_vm_init failed\n");
}
/*
* This initialization will make sure that correct aperture mask
* is returned */
g->mm.mmu_wr_mem.aperture = APERTURE_SYSMEM;
g->mm.mmu_rd_mem.aperture = APERTURE_SYSMEM;
/* Init MM H/W */
err = g->ops.mm.setup_hw(g);
if (err != 0) {
unit_return_fail(m, "init_mm_setup_hw failed code=%d\n", err);
}
return UNIT_SUCCESS;
}
int test_env_init_mm_gv11b_fusa(struct unit_module *m, struct gk20a *g,
void *args)
{
g->log_mask = 0;
init_platform(m, g, true);
if (init_mm(m, g) != 0) {
unit_return_fail(m, "nvgpu_init_mm_support failed\n");
}
return UNIT_SUCCESS;
}
#define F_INIT_INST_BLOCK_SET_BIG_PAGE_ZERO 1ULL
#define F_INIT_INST_BLOCK_SET_BIG_PAGE_SIZE_NULL 2ULL
#define F_INIT_INST_BLOCK_INIT_SUBCTX_PDB_NULL 3ULL
int test_gv11b_mm_init_inst_block(struct unit_module *m, struct gk20a *g,
void *args)
{
struct nvgpu_mem inst_block;
struct gpu_ops gops = g->ops;
int err;
u64 branch = (u64)args;
u32 big_page_size;
big_page_size = branch == F_INIT_INST_BLOCK_SET_BIG_PAGE_ZERO ?
0U : g->ops.mm.gmmu.get_default_big_page_size();
g->ops.ramin.set_big_page_size =
branch == F_INIT_INST_BLOCK_SET_BIG_PAGE_SIZE_NULL ?
NULL : gops.ramin.set_big_page_size;
g->ops.ramin.init_subctx_pdb =
branch == F_INIT_INST_BLOCK_INIT_SUBCTX_PDB_NULL ?
NULL : gops.ramin.init_subctx_pdb;
err = nvgpu_alloc_inst_block(g, &inst_block);
if (err != 0) {
unit_return_fail(m, "could not alloc inst block\n");
}
gv11b_mm_init_inst_block(&inst_block, g->mm.bar2.vm, big_page_size);
return UNIT_SUCCESS;
}
int test_gv11b_mm_is_bar1_supported(struct unit_module *m, struct gk20a *g,
void *args)
{
bool ret = gv11b_mm_is_bar1_supported(g);
if (ret != false) {
unit_return_fail(m, "FAIL! bar1 is supported on Volta\n");
}
return UNIT_SUCCESS;
}
int test_env_clean_mm_gv11b_fusa(struct unit_module *m, struct gk20a *g,
void *args)
{
g->log_mask = 0;
g->ops.mm.mmu_fault.info_mem_destroy(g);
nvgpu_vm_put(g->mm.pmu.vm);
nvgpu_vm_put(g->mm.bar2.vm);
return UNIT_SUCCESS;
}
struct unit_module_test mm_gv11b_fusa_tests[] = {
UNIT_TEST(env_init, test_env_init_mm_gv11b_fusa, (void *)0, 0),
UNIT_TEST(inst_block_s0, test_gv11b_mm_init_inst_block, (void *)0U, 0),
UNIT_TEST(inst_block_s1, test_gv11b_mm_init_inst_block, (void *)1U, 0),
UNIT_TEST(inst_block_s2, test_gv11b_mm_init_inst_block, (void *)2U, 0),
UNIT_TEST(inst_block_s3, test_gv11b_mm_init_inst_block, (void *)3U, 0),
UNIT_TEST(is_bar1_supported, test_gv11b_mm_is_bar1_supported, NULL, 0),
UNIT_TEST(env_clean, test_env_clean_mm_gv11b_fusa, NULL, 0),
};
UNIT_MODULE(mm_gv11b_fusa, mm_gv11b_fusa_tests, UNIT_PRIO_NVGPU_TEST);

View File

@@ -0,0 +1,118 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef UNIT_MM_HAL_GV11B_FUSA_H
#define UNIT_MM_HAL_GV11B_FUSA_H
struct gk20a;
struct unit_module;
/** @addtogroup SWUTS-mm-hal-gv11b-fusa
* @{
*
* Software Unit Test Specification for mm.hal.gv11b_fusa
*/
/**
* Test specification for: test_env_init_mm_gv11b_fusa
*
* Description: Initialize environment for MM tests
*
* Test Type: Feature
*
* Targets: None
*
* Input: None
*
* Steps:
* - Init HALs and initialize VMs similar to nvgpu_init_system_vm().
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_env_init_mm_gv11b_fusa(struct unit_module *m, struct gk20a *g,
void *args);
/**
* Test specification for: test_gv11b_mm_init_inst_block
*
* Description: Initialize instance block
*
* Test Type: Feature
*
* Targets: gops_mm.init_inst_block, gv11b_mm_init_inst_block
*
* Input: test_env_init, args (value can be F_INIT_INST_BLOCK_SET_BIG_PAGE_ZERO,
* F_INIT_INST_BLOCK_SET_BIG_PAGE_SIZE_NULL or
* F_INIT_INST_BLOCK_INIT_SUBCTX_PDB_NULL)
*
* Steps:
* - Allocate memory for instance block.
* - Initialize GPU accessible instance block memory.
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_gv11b_mm_init_inst_block(struct unit_module *m, struct gk20a *g,
void *args);
/**
* Test specification for: test_gv11b_mm_is_bar1_supported
*
* Description: Test if bar1_is_supported
*
* Test Type: Feature
*
* Targets: gops_mm.is_bar1_supported, gv11b_mm_is_bar1_supported
*
* Input: test_env_init
*
* Steps:
* - Execute gv11b_mm_is_bar1_supported() to check if bar1 is supported.
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_gv11b_mm_is_bar1_supported(struct unit_module *m, struct gk20a *g,
void *args);
/**
* Test specification for: test_env_clean_mm_gv11b_fusa
*
* Description: Cleanup test environment
*
* Test Type: Feature
*
* Targets: None
*
* Input: test_env_init
*
* Steps:
* - Destroy memory and VMs initialized for the test.
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_env_clean_mm_gv11b_fusa(struct unit_module *m, struct gk20a *g,
void *args);
/** @} */
#endif /* UNIT_MM_HAL_GV11B_FUSA_H */

View File

@@ -0,0 +1,26 @@
# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
.SUFFIXES:
OBJS = mmu-fault-gv11b-fusa.o
MODULE = mmu-fault-gv11b-fusa
include ../../../../Makefile.units

View File

@@ -0,0 +1,35 @@
################################### tell Emacs this is a -*- makefile-gmake -*-
#
# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# tmake for SW Mobile component makefile
#
###############################################################################
NVGPU_UNIT_NAME=mmu-fault-gv11b-fusa
include $(NV_COMPONENT_DIR)/../../../../Makefile.units.common.interface.tmk
# Local Variables:
# indent-tabs-mode: t
# tab-width: 8
# End:
# vi: set tabstop=8 noexpandtab:

View File

@@ -0,0 +1,35 @@
################################### tell Emacs this is a -*- makefile-gmake -*-
#
# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# tmake for SW Mobile component makefile
#
###############################################################################
NVGPU_UNIT_NAME=mmu-fault-gv11b-fusa
include $(NV_COMPONENT_DIR)/../../../../Makefile.units.common.tmk
# Local Variables:
# indent-tabs-mode: t
# tab-width: 8
# End:
# vi: set tabstop=8 noexpandtab:

View File

@@ -0,0 +1,853 @@
/*
* Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <stdlib.h>
#include <sys/types.h>
#include <unistd.h>
#include <unit/io.h>
#include <unit/unit.h>
#include <nvgpu/io.h>
#include <nvgpu/atomic.h>
#include <nvgpu/posix/io.h>
#include <nvgpu/gk20a.h>
#include <nvgpu/types.h>
#include <nvgpu/fifo.h>
#include <nvgpu/vm.h>
#include <nvgpu/tsg.h>
#include <nvgpu/engines.h>
#include <nvgpu/preempt.h>
#include <nvgpu/cic.h>
#include <nvgpu/nvgpu_init.h>
#include <nvgpu/hw/gv11b/hw_fb_gv11b.h>
#include <nvgpu/hw/gv11b/hw_gmmu_gv11b.h>
#include <nvgpu/posix/dma.h>
#include <nvgpu/posix/posix-fault-injection.h>
#include "os/posix/os_posix.h"
#include "hal/init/hal_gv11b.h"
#include "hal/fb/fb_gm20b.h"
#include "hal/fb/fb_gv11b.h"
#include "hal/fb/fb_mmu_fault_gv11b.h"
#include "hal/fb/intr/fb_intr_gv11b.h"
#include "hal/fifo/channel_gk20a.h"
#include "hal/fifo/channel_gv11b.h"
#include "hal/fifo/preempt_gv11b.h"
#include "hal/fifo/ramin_gk20a.h"
#include "hal/fifo/ramin_gv11b.h"
#include "hal/mm/cache/flush_gk20a.h"
#include "hal/mm/gmmu/gmmu_gp10b.h"
#include "hal/mm/gmmu/gmmu_gv11b.h"
#include "hal/mm/mm_gp10b.h"
#include "hal/mm/mm_gv11b.h"
#include "hal/cic/cic_gv11b.h"
#include "hal/mm/mmu_fault/mmu_fault_gv11b.h"
#include "mmu-fault-gv11b-fusa.h"
static u32 global_count;
static u32 count;
/*
* Write callback (for all nvgpu_writel calls).
*/
static void writel_access_reg_fn(struct gk20a *g,
struct nvgpu_reg_access *access)
{
nvgpu_posix_io_writel_reg_space(g, access->addr, access->value);
}
/*
* Read callback, similar to the write callback above.
*/
static void readl_access_reg_fn(struct gk20a *g,
struct nvgpu_reg_access *access)
{
access->value = nvgpu_posix_io_readl_reg_space(g, access->addr);
}
/*
* Define all the callbacks to be used during the test. Typically all
* write operations use the same callback, likewise for all read operations.
*/
static struct nvgpu_posix_io_callbacks mmu_faults_callbacks = {
/* Write APIs all can use the same accessor. */
.writel = writel_access_reg_fn,
.writel_check = writel_access_reg_fn,
.bar1_writel = writel_access_reg_fn,
.usermode_writel = writel_access_reg_fn,
/* Likewise for the read APIs. */
.__readl = readl_access_reg_fn,
.readl = readl_access_reg_fn,
.bar1_readl = readl_access_reg_fn,
};
static void init_platform(struct unit_module *m, struct gk20a *g, bool is_iGPU)
{
if (is_iGPU) {
nvgpu_set_enabled(g, NVGPU_MM_UNIFIED_MEMORY, true);
} else {
nvgpu_set_enabled(g, NVGPU_MM_UNIFIED_MEMORY, false);
}
}
static u32 stub_channel_count(struct gk20a *g)
{
return 32;
}
static int stub_mm_l2_flush(struct gk20a *g, bool invalidate)
{
return 0;
}
static int init_mm(struct unit_module *m, struct gk20a *g)
{
u64 low_hole, aperture_size;
struct nvgpu_os_posix *p = nvgpu_os_posix_from_gk20a(g);
struct mm_gk20a *mm = &g->mm;
int err;
p->mm_is_iommuable = true;
/* Minimum HALs for page_table */
g->ops.mm.gmmu.get_default_big_page_size =
nvgpu_gmmu_default_big_page_size;
g->ops.mm.init_inst_block = gv11b_mm_init_inst_block;
g->ops.mm.gmmu.get_mmu_levels = gp10b_mm_get_mmu_levels;
g->ops.ramin.init_pdb = gv11b_ramin_init_pdb;
g->ops.ramin.alloc_size = gk20a_ramin_alloc_size;
g->ops.mm.setup_hw = nvgpu_mm_setup_hw;
g->ops.fb.init_hw = gv11b_fb_init_hw;
g->ops.fb.intr.enable = gv11b_fb_intr_enable;
g->ops.mm.cache.fb_flush = gk20a_mm_fb_flush;
g->ops.channel.count = stub_channel_count;
g->ops.mm.gmmu.map = nvgpu_gmmu_map_locked;
g->ops.mm.gmmu.unmap = nvgpu_gmmu_unmap_locked;
g->ops.mm.gmmu.get_iommu_bit = gp10b_mm_get_iommu_bit;
g->ops.mm.gmmu.gpu_phys_addr = gv11b_gpu_phys_addr;
g->ops.mm.cache.l2_flush = stub_mm_l2_flush;
g->ops.fb.tlb_invalidate = gm20b_fb_tlb_invalidate;
g->ops.mm.gmmu.get_max_page_table_levels =
gp10b_get_max_page_table_levels;
g->ops.mm.mmu_fault.info_mem_destroy =
gv11b_mm_mmu_fault_info_mem_destroy;
g->ops.mm.mmu_fault.parse_mmu_fault_info =
gv11b_mm_mmu_fault_parse_mmu_fault_info;
nvgpu_posix_register_io(g, &mmu_faults_callbacks);
/* Register space: FB_MMU */
if (nvgpu_posix_io_add_reg_space(g, fb_mmu_ctrl_r(), 0x800) != 0) {
unit_return_fail(m, "nvgpu_posix_io_add_reg_space failed\n");
}
/*
* Initialize VM space for system memory to be used throughout this
* unit module.
* Values below are similar to those used in nvgpu_init_system_vm()
*/
low_hole = SZ_4K * 16UL;
aperture_size = GK20A_PMU_VA_SIZE;
mm->pmu.aperture_size = GK20A_PMU_VA_SIZE;
mm->pmu.vm = nvgpu_vm_init(g,
g->ops.mm.gmmu.get_default_big_page_size(),
low_hole,
0ULL,
nvgpu_safe_sub_u64(aperture_size, low_hole),
0ULL,
true,
false,
false,
"system");
if (mm->pmu.vm == NULL) {
unit_return_fail(m, "'system' nvgpu_vm_init failed\n");
}
/* BAR2 memory space */
mm->bar2.aperture_size = U32(32) << 20U;
mm->bar2.vm = nvgpu_vm_init(g,
g->ops.mm.gmmu.get_default_big_page_size(),
SZ_4K, 0ULL, nvgpu_safe_sub_u64(mm->bar2.aperture_size, SZ_4K),
0ULL, false, false, false, "bar2");
if (mm->bar2.vm == NULL) {
unit_return_fail(m, "'bar2' nvgpu_vm_init failed\n");
}
err = nvgpu_pd_cache_init(g);
if (err != 0) {
unit_return_fail(m, "PD cache init failed\n");
}
/*
* This initialization will make sure that correct aperture mask
* is returned */
g->mm.mmu_wr_mem.aperture = APERTURE_SYSMEM;
g->mm.mmu_rd_mem.aperture = APERTURE_SYSMEM;
return UNIT_SUCCESS;
}
int test_env_init_mm_mmu_fault_gv11b_fusa(struct unit_module *m,
struct gk20a *g, void *args)
{
g->log_mask = 0;
init_platform(m, g, true);
if (init_mm(m, g) != 0) {
unit_return_fail(m, "nvgpu_init_mm_support failed\n");
}
g->ops.cic.init = gv11b_cic_init;
g->ops.cic.report_err = nvgpu_cic_report_err_safety_services;
if (nvgpu_cic_init_common(g) != 0) {
unit_return_fail(m, "Failed to initialize CIC\n");
}
return UNIT_SUCCESS;
}
#define F_MMU_FAULT_SETUP_SW_FAULT_BUF_ALLOC_FAIL 0
#define F_MMU_FAULT_SETUP_SW_DEFAULT 1
static const char *f_mmu_fault_setup_sw[] = {
"mmu_fault_setup_sw_alloc_fail",
"mmu_fault_setup_sw_default",
};
int test_gv11b_mm_mmu_fault_setup_sw(struct unit_module *m, struct gk20a *g,
void *args)
{
int ret = UNIT_FAIL;
int err;
struct nvgpu_posix_fault_inj *l_dma_fi;
u64 branch = (u64)args;
l_dma_fi = nvgpu_dma_alloc_get_fault_injection();
nvgpu_posix_enable_fault_injection(l_dma_fi,
branch == F_MMU_FAULT_SETUP_SW_FAULT_BUF_ALLOC_FAIL ?
true : false, 0);
err = gv11b_mm_mmu_fault_setup_sw(g);
unit_assert(err == 0, goto done);
if (branch == F_MMU_FAULT_SETUP_SW_FAULT_BUF_ALLOC_FAIL) {
unit_assert(
g->mm.hw_fault_buf[NVGPU_MMU_FAULT_NONREPLAY_INDX].aperture
== APERTURE_INVALID, goto done);
} else {
unit_assert(
g->mm.hw_fault_buf[NVGPU_MMU_FAULT_NONREPLAY_INDX].aperture
== APERTURE_SYSMEM, goto done);
unit_assert(
g->mm.hw_fault_buf[NVGPU_MMU_FAULT_NONREPLAY_INDX].gpu_va
!= 0ULL, goto done);
}
gv11b_mm_mmu_fault_info_mem_destroy(g);
unit_assert(g->mm.hw_fault_buf[NVGPU_MMU_FAULT_NONREPLAY_INDX].aperture
== APERTURE_INVALID, goto done);
ret = UNIT_SUCCESS;
done:
if (ret != UNIT_SUCCESS) {
unit_err(m, "%s: %s failed\n", __func__,
f_mmu_fault_setup_sw[branch]);
}
nvgpu_posix_enable_fault_injection(l_dma_fi, false, 0);
return ret;
}
static void stub_fb_fault_buf_configure_hw(struct gk20a *g, u32 index)
{
count = global_count;
}
int test_gv11b_mm_mmu_fault_setup_hw(struct unit_module *m, struct gk20a *g,
void *args)
{
int ret = UNIT_FAIL;
int err;
enum nvgpu_aperture fb_aperture_orig = APERTURE_INVALID;
global_count = 0U;
count = 1U;
g->ops.fb.fault_buf_configure_hw = stub_fb_fault_buf_configure_hw;
err = gv11b_mm_mmu_fault_setup_sw(g);
unit_assert(err == 0, goto done);
gv11b_mm_mmu_fault_setup_hw(g);
unit_assert(count == global_count, goto done);
global_count++;
fb_aperture_orig =
g->mm.hw_fault_buf[NVGPU_MMU_FAULT_NONREPLAY_INDX].aperture;
g->mm.hw_fault_buf[NVGPU_MMU_FAULT_NONREPLAY_INDX].aperture =
APERTURE_INVALID;
gv11b_mm_mmu_fault_setup_hw(g);
unit_assert(count != global_count, goto done);
ret = UNIT_SUCCESS;
done:
if (ret != UNIT_SUCCESS) {
unit_err(m, "%s failed\n", __func__);
}
g->mm.hw_fault_buf[NVGPU_MMU_FAULT_NONREPLAY_INDX].aperture =
fb_aperture_orig;
gv11b_mm_mmu_fault_info_mem_destroy(g);
return ret;
}
#define F_MMU_FAULT_DISABLE_HW_FALSE 0
#define F_MMU_FAULT_DISABLE_HW_TRUE 1
static const char *f_mmu_fault_disable[] = {
"mmu_fault_disable_hw_false",
"mmu_fault_disable_hw_true",
};
static bool fault_buf_enabled;
static bool stub_fb_is_fault_buf_enabled(struct gk20a *g, u32 index)
{
count = global_count;
return fault_buf_enabled;
}
static void stub_fb_fault_buf_set_state_hw(struct gk20a *g, u32 index, u32 state)
{
global_count += 2U;
}
int test_gv11b_mm_mmu_fault_disable_hw(struct unit_module *m, struct gk20a *g,
void *args)
{
int ret = UNIT_FAIL;
int err = 0U;
u64 branch = (u64)args;
struct gpu_ops gops = g->ops;
global_count = 10U;
count = 0U;
err = gv11b_mm_mmu_fault_setup_sw(g);
unit_assert(err == 0, goto done);
g->ops.fb.is_fault_buf_enabled = stub_fb_is_fault_buf_enabled;
g->ops.fb.fault_buf_set_state_hw = stub_fb_fault_buf_set_state_hw;
fault_buf_enabled = branch == F_MMU_FAULT_DISABLE_HW_FALSE ?
false : true;
gv11b_mm_mmu_fault_disable_hw(g);
unit_assert(count == 10U, goto done);
unit_assert(global_count == (10U + (2U * fault_buf_enabled)), goto done);
ret = UNIT_SUCCESS;
done:
if (ret != UNIT_SUCCESS) {
unit_err(m, "%s: %s failed\n", __func__,
f_mmu_fault_disable[branch]);
}
gv11b_mm_mmu_fault_info_mem_destroy(g);
g->ops = gops;
return ret;
}
#define F_MMU_FAULT_ENG_ID_INVALID 0
#define F_MMU_FAULT_ENG_ID_BAR2 1
#define F_MMU_FAULT_ENG_ID_PHYSICAL 2
static const char *f_mmu_fault_notify[] = {
"mmu_fault_notify_eng_id_invalid",
"mmu_fault_notify_eng_id_bar2",
"mmu_fault_notify_eng_id_physical",
};
static void stub_ce_mthd_buffer_fault_in_bar2_fault(struct gk20a *g)
{
}
static int stub_bus_bar2_bind(struct gk20a *g, struct nvgpu_mem *bar2_inst)
{
return 0;
}
static u32 stub_fifo_mmu_fault_id_to_pbdma_id(struct gk20a *g, u32 mmu_fault_id)
{
return INVAL_ID;
}
int test_gv11b_mm_mmu_fault_handle_other_fault_notify(struct unit_module *m,
struct gk20a *g, void *args)
{
int ret = UNIT_FAIL;
u64 branch = (u64)args;
int err;
struct gpu_ops gops = g->ops;
u32 reg_val;
g->ops.fb.read_mmu_fault_inst_lo_hi =
gv11b_fb_read_mmu_fault_inst_lo_hi;
g->ops.fb.read_mmu_fault_addr_lo_hi =
gv11b_fb_read_mmu_fault_addr_lo_hi;
g->ops.fb.read_mmu_fault_info = gv11b_fb_read_mmu_fault_info;
g->ops.fb.write_mmu_fault_status = gv11b_fb_write_mmu_fault_status;
g->ops.ce.mthd_buffer_fault_in_bar2_fault =
stub_ce_mthd_buffer_fault_in_bar2_fault;
g->ops.bus.bar2_bind = stub_bus_bar2_bind;
g->ops.fifo.mmu_fault_id_to_pbdma_id =
stub_fifo_mmu_fault_id_to_pbdma_id;
reg_val = branch == F_MMU_FAULT_ENG_ID_BAR2 ?
gmmu_fault_mmu_eng_id_bar2_v() :
branch == F_MMU_FAULT_ENG_ID_PHYSICAL ?
gmmu_fault_mmu_eng_id_physical_v() : 0U;
nvgpu_writel(g, fb_mmu_fault_inst_lo_r(), reg_val);
err = gv11b_mm_mmu_fault_setup_sw(g);
unit_assert(err == 0, goto done);
gv11b_mm_mmu_fault_handle_other_fault_notify(g,
fb_mmu_fault_status_valid_set_f());
if (branch == F_MMU_FAULT_ENG_ID_BAR2) {
unit_assert(g->mm.fault_info[
NVGPU_MMU_FAULT_NONREPLAY_INDX].mmu_engine_id ==
gmmu_fault_mmu_eng_id_bar2_v(), goto done);
} else if (branch == F_MMU_FAULT_ENG_ID_PHYSICAL) {
unit_assert(g->mm.fault_info[
NVGPU_MMU_FAULT_NONREPLAY_INDX].mmu_engine_id ==
gmmu_fault_mmu_eng_id_physical_v(), goto done);
} else {
unit_assert(g->mm.fault_info[
NVGPU_MMU_FAULT_NONREPLAY_INDX].mmu_engine_id ==
0U, goto done);
}
ret = UNIT_SUCCESS;
done:
if (ret != UNIT_SUCCESS) {
unit_err(m, "%s: %s failed\n", __func__,
f_mmu_fault_notify[branch]);
}
gv11b_mm_mmu_fault_info_mem_destroy(g);
g->ops = gops;
return ret;
}
#define F_MMU_FAULT_INFO_FAULT_TYPE_INVALID 0x01ULL
#define F_MMU_FAULT_INFO_CLIENT_TYPE_INVALID 0x02ULL
#define F_MMU_FAULT_INFO_CLIENT_TYPE_HUB 0x04ULL
#define F_MMU_FAULT_INFO_CLIENT_TYPE_GPC 0x08ULL
#define F_MMU_FAULT_INFO_CLIENT_ID_INVALID 0x10ULL
#define F_MMU_FAULT_PARSE_DEFAULT 0x00ULL
/* F_MMU_FAULT_INFO_FAULT_TYPE_INVALID */
#define F_MMU_FAULT_PARSE_FAULT_TYPE_INVALID 0x01ULL
/* F_MMU_FAULT_INFO_CLIENT_TYPE_INVALID */
#define F_MMU_FAULT_PARSE_CLIENT_TYPE_INVALID 0x02ULL
/* F_MMU_FAULT_INFO_CLIENT_TYPE_HUB */
#define F_MMU_FAULT_PARSE_CLIENT_TYPE_HUB 0x04ULL
/* F_MMU_FAULT_INFO_CLIENT_TYPE_HUB + F_MMU_FAULT_INFO_CLIENT_ID_INVALID */
#define F_MMU_FAULT_PARSE_CLIENT_HUB_ID_INVALID 0x14ULL
/* F_MMU_FAULT_INFO_CLIENT_TYPE_GPC */
#define F_MMU_FAULT_PARSE_CLIENT_TYPE_GPC 0x08ULL
/* F_MMU_FAULT_INFO_CLIENT_TYPE_GPC + F_MMU_FAULT_INFO_CLIENT_ID_INVALID */
#define F_MMU_FAULT_PARSE_CLIENT_GPC_ID_INVALID 0x18ULL
int test_gv11b_mm_mmu_fault_parse_mmu_fault_info(struct unit_module *m,
struct gk20a *g, void *args)
{
int ret = UNIT_FAIL;
u64 branch = (u64)args;
struct mmu_fault_info *mmufault =
&g->mm.fault_info[NVGPU_MMU_FAULT_NONREPLAY_INDX];
mmufault->fault_type = branch & F_MMU_FAULT_INFO_FAULT_TYPE_INVALID ?
1000U : 0U;
mmufault->client_type = branch & F_MMU_FAULT_INFO_CLIENT_TYPE_INVALID ?
1000U :
branch & F_MMU_FAULT_INFO_CLIENT_TYPE_HUB ?
gmmu_fault_client_type_hub_v() :
branch & F_MMU_FAULT_INFO_CLIENT_TYPE_GPC ?
gmmu_fault_client_type_gpc_v() : 0U;
mmufault->client_id = branch & F_MMU_FAULT_INFO_CLIENT_ID_INVALID ?
1000U : 0U;
EXPECT_BUG(gv11b_mm_mmu_fault_parse_mmu_fault_info(mmufault));
if (!(branch & F_MMU_FAULT_PARSE_FAULT_TYPE_INVALID)) {
unit_assert(strcmp(mmufault->fault_type_desc, "invalid pde") == 0, goto done);
}
ret = UNIT_SUCCESS;
done:
if (ret != UNIT_SUCCESS) {
unit_err(m, "%s: failed\n", __func__);
}
return ret;
}
static u32 ret_num_lce;
static u32 stub_top_get_num_lce(struct gk20a *g)
{
return ret_num_lce;
}
static int stub_runlist_update(struct gk20a *g,
struct nvgpu_runlist *rl,
struct nvgpu_channel *ch,
bool add, bool wait_for_finish)
{
return 0;
}
static void stub_set_err_notifier_if_empty(struct nvgpu_channel *ch, u32 error)
{
}
static u32 stub_gr_init_get_no_of_sm(struct gk20a *g)
{
return 8;
}
#define F_MMU_FAULT_VALID 0x01ULL
#define F_NVGPU_POWERED_ON 0x02ULL
#define F_MMU_FAULT_ENG_ID_CE0 0x04ULL
#define F_NUM_LCE_0 0x08ULL
#define F_MMU_FAULT_NON_REPLAYABLE 0x10ULL
#define F_MMU_FAULT_TYPE_INST_BLOCK 0x20ULL
#define F_MMU_FAULT_REFCH 0x40ULL
#define F_FAULTED_ENGINE_INVALID 0x80ULL
#define F_MMU_NACK_HANDLED 0x100ULL
#define F_TSG_VALID 0x200ULL
/* !F_MMU_FAULT_VALID */
#define F_MMU_HANDLER_FAULT_INVALID 0x00ULL
/* F_MMU_FAULT_VALID + !F_NVGPU_POWERED_ON */
#define F_MMU_HANDLER_NVGPU_POWERED_OFF 0x01ULL
/* F_MMU_FAULT_VALID + F_NVGPU_POWERED_ON + F_MMU_FAULT_ENG_ID_CE0 */
#define F_MMU_HANDLER_CE_DEFAULT 0x07ULL
/* F_MMU_FAULT_VALID + F_NVGPU_POWERED_ON + F_MMU_FAULT_ENG_ID_CE0 +
F_NUM_LCE_0 */
#define F_MMU_HANDLER_CE_LCE_0 0x0FULL
/* F_MMU_FAULT_VALID + F_NVGPU_POWERED_ON + F_MMU_FAULT_ENG_ID_CE0 +
F_MMU_FAULT_REFCH */
#define F_MMU_HANDLER_CE_REFCH 0x47ULL
/* F_MMU_FAULT_VALID + F_NVGPU_POWERED_ON + F_MMU_FAULT_NON_REPLAYABLE */
#define F_MMU_HANDLER_NON_REPLAYABLE_DEFAULT 0x13ULL
/* F_MMU_FAULT_VALID + F_NVGPU_POWERED_ON + F_MMU_FAULT_NON_REPLAYABLE +
F_MMU_FAULT_TYPE_INST_BLOCK */
#define F_MMU_HANDLER_NON_REPLAYABLE_INST_BLOCK 0x33ULL
/* F_MMU_FAULT_VALID + F_NVGPU_POWERED_ON + F_MMU_FAULT_NON_REPLAYABLE +
F_MMU_FAULT_REFCH */
#define F_MMU_HANDLER_NON_REPLAYABLE_REFCH 0x53ULL
/* F_MMU_FAULT_VALID + F_NVGPU_POWERED_ON + F_MMU_FAULT_NON_REPLAYABLE +
F_MMU_FAULT_REFCH + F_MMU_NACK_HANDLED*/
#define F_MMU_HANDLER_NON_REPLAYABLE_REFCH_NACK_HNDLD 0x153ULL
/* F_MMU_FAULT_VALID + F_NVGPU_POWERED_ON + F_MMU_FAULT_NON_REPLAYABLE +
F_FAULTED_ENGINE_INVALID */
#define F_MMU_HANDLER_NON_REPLAYABLE_FAULTED_INVALID 0x93ULL
/* F_MMU_FAULT_VALID + F_NVGPU_POWERED_ON + F_MMU_FAULT_NON_REPLAYABLE +
F_NUM_LCE_0 + F_TSG_VALID */
#define F_MMU_HANDLER_NON_REPLAYABLE_TSG 0x29BULL
static const char *f_mmu_handler[] = {
[F_MMU_HANDLER_FAULT_INVALID] = "mmu_handler_fault_invalid",
[F_MMU_HANDLER_NVGPU_POWERED_OFF] = "mmu_handler_nvgpu_powered_off",
[F_MMU_HANDLER_CE_DEFAULT] = "mmu_handler_ce_default",
[F_MMU_HANDLER_CE_LCE_0] = "mmu_handler_ce_with_lce_0",
[F_MMU_HANDLER_CE_REFCH] = "mmu_handler_ce_refch_valid",
[F_MMU_HANDLER_NON_REPLAYABLE_DEFAULT] =
"mmu_handler_non-replayable_default",
[F_MMU_HANDLER_NON_REPLAYABLE_INST_BLOCK] =
"mmu_handler_non-replayable_inst_block",
[F_MMU_HANDLER_NON_REPLAYABLE_REFCH] =
"mmu_handler_non-replayable_refch_valid",
[F_MMU_HANDLER_NON_REPLAYABLE_REFCH_NACK_HNDLD] =
"mmu_handler_non-replayable_refch_nack_handled",
[F_MMU_HANDLER_NON_REPLAYABLE_FAULTED_INVALID] =
"mmu_handler_non-replayable_faulted_engine_invalid",
[F_MMU_HANDLER_NON_REPLAYABLE_TSG] =
"mmu_handler_non-replayable_tsg_valid",
};
int test_handle_mmu_fault_common(struct unit_module *m,
struct gk20a *g, void *args)
{
int ret = UNIT_FAIL;
u64 branch = (u64)args;
int err;
u32 invalidate_replay_val;
struct gpu_ops gops = g->ops;
struct nvgpu_channel chA = {0};
struct nvgpu_channel *chB = NULL;
struct nvgpu_tsg *tsg = NULL;
struct mmu_fault_info *mmufault =
&g->mm.fault_info[NVGPU_MMU_FAULT_NONREPLAY_INDX];
g->ops.top.get_num_lce = stub_top_get_num_lce;
g->sw_quiesce_pending = true;
err = gv11b_mm_mmu_fault_setup_sw(g);
unit_assert(err == 0, goto done);
mmufault->valid = branch & F_MMU_FAULT_VALID ? true : false;
nvgpu_set_power_state(g, branch & F_NVGPU_POWERED_ON ?
NVGPU_STATE_POWERED_ON : NVGPU_STATE_POWERED_OFF);
mmufault->mmu_engine_id = branch & F_MMU_FAULT_ENG_ID_CE0 ?
gmmu_fault_mmu_eng_id_ce0_v() :
gmmu_fault_mmu_eng_id_ce0_v() - 1U;
ret_num_lce = branch & F_NUM_LCE_0 ? 0U : 5U;
mmufault->replayable_fault = branch & F_MMU_FAULT_NON_REPLAYABLE ?
false : true;
mmufault->fault_type = branch & F_MMU_FAULT_TYPE_INST_BLOCK ?
gmmu_fault_type_unbound_inst_block_v() : 0U;
mmufault->faulted_engine = branch & F_FAULTED_ENGINE_INVALID ?
NVGPU_INVALID_ENG_ID : 0U;
if (branch & F_MMU_FAULT_REFCH) {
/* Init chA */
chA.g = g;
chA.tsgid = NVGPU_INVALID_TSG_ID;
nvgpu_atomic_set(&chA.ref_count, 2);
chA.mmu_nack_handled = branch & F_MMU_NACK_HANDLED ?
true : false;
mmufault->refch = &chA;
} else if (branch & F_TSG_VALID) {
/* Init TSG and chB */
g->ops.gr.init.get_no_of_sm = stub_gr_init_get_no_of_sm;
g->ops.runlist.update = stub_runlist_update;
g->ops.tsg.default_timeslice_us =
nvgpu_tsg_default_timeslice_us;
g->ops.channel.alloc_inst = nvgpu_channel_alloc_inst;
g->ops.channel.set_error_notifier =
stub_set_err_notifier_if_empty;
g->ops.channel.disable = gk20a_channel_disable;
g->ops.channel.unbind = gv11b_channel_unbind;
g->ops.channel.free_inst = nvgpu_channel_free_inst;
g->ops.tsg.disable = nvgpu_tsg_disable;
g->ops.fifo.preempt_tsg = nvgpu_fifo_preempt_tsg;
g->aggressive_sync_destroy_thresh = 0U;
g->fifo.g = g;
err = nvgpu_channel_setup_sw(g);
unit_assert(err == 0, goto done);
err = nvgpu_tsg_setup_sw(g);
unit_assert(err == 0, goto done);
tsg = nvgpu_tsg_open(g, getpid());
unit_assert(tsg != NULL, goto done);
chB = nvgpu_channel_open_new(g, U32_MAX, false,
getpid(), getpid());
unit_assert(chB != NULL, goto done);
err = nvgpu_tsg_bind_channel(tsg, chB);
unit_assert(err == 0, goto done);
mmufault->refch = chB;
} else {
mmufault->refch = NULL;
}
gv11b_mm_mmu_fault_handle_mmu_fault_common(g, mmufault,
&invalidate_replay_val);
ret = UNIT_SUCCESS;
done:
if (ret != UNIT_SUCCESS) {
unit_err(m, "%s: %s failed\n", __func__, f_mmu_handler[branch]);
}
nvgpu_set_power_state(g, NVGPU_STATE_POWERED_OFF);
gv11b_mm_mmu_fault_info_mem_destroy(g);
if (chB != NULL) {
nvgpu_atomic_set(&chB->ref_count, 1);
nvgpu_channel_close(chB);
}
if (tsg != NULL) {
nvgpu_ref_put(&tsg->refcount, nvgpu_tsg_release);
}
g->ops = gops;
return ret;
}
#define F_BUF_EMPTY 0x01ULL
#define F_VALID_ENTRY 0x02ULL
#define F_VALID_CH 0x04ULL
#define F_HANDLE_NON_RPLYBLE_BUF_EMPTY 0x01ULL
#define F_HANDLE_NON_RPLYBLE_INVALID_BUF_ENTRY 0x00ULL
#define F_HANDLE_NON_RPLYBLE_VALID_BUF_ENTRY 0x02ULL
#define F_HANDLE_NON_RPLYBLE_VALID_BUF_CH 0x06ULL
static const char *f_mmu_fault_nonreplay[] = {
[F_HANDLE_NON_RPLYBLE_BUF_EMPTY] = "fault_buf_empty",
[F_HANDLE_NON_RPLYBLE_INVALID_BUF_ENTRY] = "buf_entry_invalid",
[F_HANDLE_NON_RPLYBLE_VALID_BUF_ENTRY] = "buf_entry_valid",
[F_HANDLE_NON_RPLYBLE_VALID_BUF_CH] = "validbuf_entry_and_refch",
};
static u32 get_idx, put_idx;
static u32 stub_fb_read_mmu_fault_buffer_get(struct gk20a *g, u32 index)
{
return get_idx;
}
static u32 stub_fb_read_mmu_fault_buffer_put(struct gk20a *g, u32 index)
{
return put_idx;
}
static u32 stub_fb_read_mmu_fault_buffer_size(struct gk20a *g, u32 index)
{
return 32U;
}
static void stub_fb_write_mmu_fault_buffer_get(struct gk20a *g, u32 index,
u32 reg_val)
{
}
int test_handle_nonreplay_replay_fault(struct unit_module *m, struct gk20a *g,
void *args)
{
int ret = UNIT_FAIL;
int err;
u64 branch = (u64)args;
u32 *data;
struct nvgpu_channel ch = {0};
struct gpu_ops gops = g->ops;
g->ops.fb.read_mmu_fault_buffer_get =
stub_fb_read_mmu_fault_buffer_get;
g->ops.fb.read_mmu_fault_buffer_put =
stub_fb_read_mmu_fault_buffer_put;
g->ops.fb.read_mmu_fault_buffer_size =
stub_fb_read_mmu_fault_buffer_size;
g->ops.fb.write_mmu_fault_buffer_get =
stub_fb_write_mmu_fault_buffer_get;
g->ops.fifo.mmu_fault_id_to_pbdma_id =
stub_fifo_mmu_fault_id_to_pbdma_id;
err = gv11b_mm_mmu_fault_setup_sw(g);
unit_assert(err == 0, goto done);
get_idx = 0;
put_idx = (branch & F_BUF_EMPTY) ? get_idx : 1U;
data = g->mm.hw_fault_buf[0].cpu_va;
data[gmmu_fault_buf_entry_valid_w()] = branch & F_VALID_ENTRY ?
gmmu_fault_buf_entry_valid_m() : 0U;
if (branch & F_VALID_CH) {
g->fifo.channel = &ch;
g->fifo.num_channels = 1;
ch.referenceable = true;
}
gv11b_mm_mmu_fault_handle_nonreplay_replay_fault(g, 0U, 0U);
ret = UNIT_SUCCESS;
done:
if (ret != UNIT_SUCCESS) {
unit_err(m, "%s: %s failed\n", __func__,
f_mmu_fault_nonreplay[branch]);
}
gv11b_mm_mmu_fault_info_mem_destroy(g);
g->ops = gops;
return ret;
}
int test_env_clean_mm_mmu_fault_gv11b_fusa(struct unit_module *m,
struct gk20a *g, void *args)
{
g->log_mask = 0;
nvgpu_vm_put(g->mm.pmu.vm);
nvgpu_vm_put(g->mm.bar2.vm);
return UNIT_SUCCESS;
}
struct unit_module_test mm_mmu_fault_gv11b_fusa_tests[] = {
UNIT_TEST(env_init, test_env_init_mm_mmu_fault_gv11b_fusa, NULL, 0),
UNIT_TEST(setup_sw_s0, test_gv11b_mm_mmu_fault_setup_sw, (void *)F_MMU_FAULT_SETUP_SW_FAULT_BUF_ALLOC_FAIL, 0),
UNIT_TEST(setup_sw_s1, test_gv11b_mm_mmu_fault_setup_sw, (void *)F_MMU_FAULT_SETUP_SW_DEFAULT, 0),
UNIT_TEST(setup_hw, test_gv11b_mm_mmu_fault_setup_hw, NULL, 0),
UNIT_TEST(disable_hw_s0, test_gv11b_mm_mmu_fault_disable_hw, (void *)F_MMU_FAULT_DISABLE_HW_FALSE, 0),
UNIT_TEST(disable_hw_s1, test_gv11b_mm_mmu_fault_disable_hw, (void *)F_MMU_FAULT_DISABLE_HW_TRUE, 0),
UNIT_TEST(fault_notify_s0, test_gv11b_mm_mmu_fault_handle_other_fault_notify, (void *)F_MMU_FAULT_ENG_ID_INVALID, 0),
UNIT_TEST(fault_notify_s1, test_gv11b_mm_mmu_fault_handle_other_fault_notify, (void *)F_MMU_FAULT_ENG_ID_BAR2, 0),
UNIT_TEST(fault_notify_s2, test_gv11b_mm_mmu_fault_handle_other_fault_notify, (void *)F_MMU_FAULT_ENG_ID_PHYSICAL, 0),
UNIT_TEST(parse_info_s0, test_gv11b_mm_mmu_fault_parse_mmu_fault_info, (void *)F_MMU_FAULT_PARSE_DEFAULT, 0),
UNIT_TEST(parse_info_s1, test_gv11b_mm_mmu_fault_parse_mmu_fault_info, (void *)F_MMU_FAULT_PARSE_FAULT_TYPE_INVALID, 0),
UNIT_TEST(parse_info_s2, test_gv11b_mm_mmu_fault_parse_mmu_fault_info, (void *)F_MMU_FAULT_PARSE_CLIENT_TYPE_INVALID, 0),
UNIT_TEST(parse_info_s3, test_gv11b_mm_mmu_fault_parse_mmu_fault_info, (void *)F_MMU_FAULT_PARSE_CLIENT_TYPE_HUB, 0),
UNIT_TEST(parse_info_s4, test_gv11b_mm_mmu_fault_parse_mmu_fault_info, (void *)F_MMU_FAULT_PARSE_CLIENT_HUB_ID_INVALID, 0),
UNIT_TEST(parse_info_s5, test_gv11b_mm_mmu_fault_parse_mmu_fault_info, (void *)F_MMU_FAULT_PARSE_CLIENT_TYPE_GPC, 0),
UNIT_TEST(parse_info_s6, test_gv11b_mm_mmu_fault_parse_mmu_fault_info, (void *)F_MMU_FAULT_PARSE_CLIENT_GPC_ID_INVALID, 0),
UNIT_TEST(handle_mmu_common_s0, test_handle_mmu_fault_common, (void *)F_MMU_HANDLER_FAULT_INVALID, 0),
UNIT_TEST(handle_mmu_common_s1, test_handle_mmu_fault_common, (void *)F_MMU_HANDLER_NVGPU_POWERED_OFF, 0),
UNIT_TEST(handle_mmu_common_s2, test_handle_mmu_fault_common, (void *)F_MMU_HANDLER_CE_DEFAULT, 0),
UNIT_TEST(handle_mmu_common_s3, test_handle_mmu_fault_common, (void *)F_MMU_HANDLER_CE_LCE_0, 0),
UNIT_TEST(handle_mmu_common_s4, test_handle_mmu_fault_common, (void *)F_MMU_HANDLER_CE_REFCH, 0),
UNIT_TEST(handle_mmu_common_s5, test_handle_mmu_fault_common, (void *)F_MMU_HANDLER_NON_REPLAYABLE_DEFAULT, 0),
UNIT_TEST(handle_mmu_common_s6, test_handle_mmu_fault_common, (void *)F_MMU_HANDLER_NON_REPLAYABLE_INST_BLOCK, 0),
UNIT_TEST(handle_mmu_common_s7, test_handle_mmu_fault_common, (void *)F_MMU_HANDLER_NON_REPLAYABLE_REFCH, 0),
UNIT_TEST(handle_mmu_common_s8, test_handle_mmu_fault_common, (void *)F_MMU_HANDLER_NON_REPLAYABLE_REFCH_NACK_HNDLD, 0),
UNIT_TEST(handle_mmu_common_s9, test_handle_mmu_fault_common, (void *)F_MMU_HANDLER_NON_REPLAYABLE_FAULTED_INVALID, 0),
UNIT_TEST(handle_mmu_common_s10, test_handle_mmu_fault_common, (void *)F_MMU_HANDLER_NON_REPLAYABLE_TSG, 2),
UNIT_TEST(handle_nonreplay_s0, test_handle_nonreplay_replay_fault, (void *)F_HANDLE_NON_RPLYBLE_BUF_EMPTY, 0),
UNIT_TEST(handle_nonreplay_s1, test_handle_nonreplay_replay_fault, (void *)F_HANDLE_NON_RPLYBLE_INVALID_BUF_ENTRY, 0),
UNIT_TEST(handle_nonreplay_s2, test_handle_nonreplay_replay_fault, (void *)F_HANDLE_NON_RPLYBLE_VALID_BUF_ENTRY, 0),
UNIT_TEST(handle_nonreplay_s3, test_handle_nonreplay_replay_fault, (void *)F_HANDLE_NON_RPLYBLE_VALID_BUF_CH, 0),
UNIT_TEST(env_clean, test_env_clean_mm_mmu_fault_gv11b_fusa, NULL, 0),
};
UNIT_MODULE(mmu_fault_gv11b_fusa, mm_mmu_fault_gv11b_fusa_tests, UNIT_PRIO_NVGPU_TEST);

View File

@@ -0,0 +1,227 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef UNIT_MM_HAL_MMU_FAULT_GV11B_FUSA_H
#define UNIT_MM_HAL_MMU_FAULT_GV11B_FUSA_H
struct gk20a;
struct unit_module;
/** @addtogroup SWUTS-mm-hal-mmu_fault-gv11b_fusa
* @{
*
* Software Unit Test Specification for mm.hal.mmu_fault.mmu_fault_gv11b_fusa
*/
/**
* Test specification for: test_env_init_mm_mmu_fault_gv11b_fusa
*
* Description: Initialize environment for MM tests
*
* Test Type: Feature
*
* Targets: None
*
* Input: None
*
* Steps:
* - Init HALs and initialize VMs similar to nvgpu_init_system_vm().
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_env_init_mm_mmu_fault_gv11b_fusa(struct unit_module *m,
struct gk20a *g, void *args);
/**
* Test specification for: test_gv11b_mm_mmu_fault_setup_sw
*
* Description: Test mmu fault setup sw function
*
* Test Type: Feature, Error injection
*
* Targets: gops_mm.gops_mm_mmu_fault.setup_sw, gv11b_mm_mmu_fault_setup_sw,
* gops_mm.gops_mm_mmu_fault.info_mem_destroy,
* gv11b_mm_mmu_fault_info_mem_destroy
*
* Input: test_env_init
*
* Steps:
* - Check that mmu hw fault buffer is allocated and mapped.
* - Check that gv11b_mm_mmu_fault_info_mem_destroy() deallocates fault buffer
* memory.
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_gv11b_mm_mmu_fault_setup_sw(struct unit_module *m, struct gk20a *g,
void *args);
/**
* Test specification for:
*
* Description: Test mmu fault setup hw function
*
* Test Type: Feature
*
* Targets: gops_mm.gops_mm_mmu_fault.setup_hw, gv11b_mm_mmu_fault_setup_hw
*
* Input: test_env_init
*
* Steps:
* - Check that gv11b_mm_mmu_fault_setup_hw() configures fault buffer. Here,
* buffer addr is written to memory to be used by h/w for fault notification.
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_gv11b_mm_mmu_fault_setup_hw(struct unit_module *m, struct gk20a *g,
void *args);
/**
* Test specification for: test_gv11b_mm_mmu_fault_disable_hw
*
* Description: Test mmu fault disable hw function
*
* Test Type: Feature
*
* Targets: gops_mm.gops_mm_mmu_fault.disable_hw, gv11b_mm_mmu_fault_disable_hw
*
* Input: test_env_init
*
* Steps:
* - Check that gv11b_mm_mmu_fault_disable_hw() sets disabled state if fault
* buf is enabled.
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_gv11b_mm_mmu_fault_disable_hw(struct unit_module *m, struct gk20a *g,
void *args);
/**
* Test specification for: test_gv11b_mm_mmu_fault_handle_other_fault_notify
*
* Description: Test other fault notify
*
* Test Type: Feature
*
* Targets: gv11b_mm_mmu_fault_handle_other_fault_notify
*
* Input: test_env_init
*
* Steps:
* - Check that BAR2 / physical faults are recognized and notified.
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_gv11b_mm_mmu_fault_handle_other_fault_notify(struct unit_module *m,
struct gk20a *g, void *args);
/**
* Test specification for: test_gv11b_mm_mmu_fault_parse_mmu_fault_info
*
* Description: Test mmu fault parse function
*
* Test Type: Feature
*
* Targets: gv11b_mm_mmu_fault_parse_mmu_fault_info
*
* Input: test_env_init
*
* Steps:
* - Parse mmu fault info such as fault type, client type and client id.
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_gv11b_mm_mmu_fault_parse_mmu_fault_info(struct unit_module *m,
struct gk20a *g, void *args);
/**
* Test specification for: test_handle_mmu_fault_common
*
* Description: Test mmu fault handler
*
* Test Type: Feature
*
* Targets: gv11b_mm_mmu_fault_handle_mmu_fault_common,
* gv11b_mm_mmu_fault_handle_mmu_fault_ce,
* gv11b_mm_mmu_fault_handle_non_replayable,
* gv11b_mm_mmu_fault_handle_mmu_fault_refch
*
* Input: test_env_init
*
* Steps:
* - Check that fault handler processes valid and invalid cases of mmu fault.
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_handle_mmu_fault_common(struct unit_module *m,
struct gk20a *g, void *args);
/**
* Test specification for:
*
* Description: Test non-replayable replayable fault handler
*
* Test Type: Feature
*
* Targets: gv11b_mm_mmu_fault_handle_nonreplay_replay_fault,
* gv11b_mm_mmu_fault_handle_buf_valid_entry,
* gv11b_fb_copy_from_hw_fault_buf
*
* Input: test_env_init
*
* Steps:
* - Test non-replayable fault handler with valid and invalid cases.
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_handle_nonreplay_replay_fault(struct unit_module *m, struct gk20a *g,
void *args);
/**
* Test specification for: test_env_clean_mm_mmu_fault_gv11b_fusa
*
* Description: Cleanup test environment
*
* Test Type: Feature
*
* Targets: None
*
* Input: test_env_init
*
* Steps:
* - Destroy memory and VMs initialized for the test.
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_env_clean_mm_mmu_fault_gv11b_fusa(struct unit_module *m,
struct gk20a *g, void *args);
/** @} */
#endif /* UNIT_MM_HAL_MMU_FAULT_GV11B_FUSA_H */

View File

@@ -0,0 +1,26 @@
# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
.SUFFIXES:
OBJS = mm.o
MODULE = mm
include ../../Makefile.units

View File

@@ -0,0 +1,35 @@
################################### tell Emacs this is a -*- makefile-gmake -*-
#
# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# tmake for SW Mobile component makefile
#
###############################################################################
NVGPU_UNIT_NAME=mm
include $(NV_COMPONENT_DIR)/../../Makefile.units.common.interface.tmk
# Local Variables:
# indent-tabs-mode: t
# tab-width: 8
# End:
# vi: set tabstop=8 noexpandtab:

View File

@@ -0,0 +1,35 @@
################################### tell Emacs this is a -*- makefile-gmake -*-
#
# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# tmake for SW Mobile component makefile
#
###############################################################################
NVGPU_UNIT_NAME=mm
include $(NV_COMPONENT_DIR)/../../Makefile.units.common.tmk
# Local Variables:
# indent-tabs-mode: t
# tab-width: 8
# End:
# vi: set tabstop=8 noexpandtab:

744
userspace/units/mm/mm/mm.c Normal file
View File

@@ -0,0 +1,744 @@
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "mm.h"
#include <unit/io.h>
#include <unit/unit.h>
#include <unit/core.h>
#include <nvgpu/errata.h>
#include <nvgpu/nvgpu_init.h>
#include <nvgpu/posix/io.h>
#include "os/posix/os_posix.h"
#include "hal/mc/mc_gp10b.h"
#include "hal/mm/mm_gp10b.h"
#include "hal/mm/mm_gv11b.h"
#include "hal/mm/cache/flush_gk20a.h"
#include "hal/mm/cache/flush_gv11b.h"
#include "hal/mm/gmmu/gmmu_gm20b.h"
#include "hal/mm/gmmu/gmmu_gp10b.h"
#include "hal/mm/gmmu/gmmu_gv11b.h"
#include "hal/mm/mmu_fault/mmu_fault_gv11b.h"
#include "hal/fb/fb_gm20b.h"
#include "hal/fb/fb_gp10b.h"
#include "hal/fb/fb_gv11b.h"
#include "hal/fb/fb_mmu_fault_gv11b.h"
#include "hal/fb/intr/fb_intr_gv11b.h"
#include "hal/fifo/ramin_gk20a.h"
#include "hal/fifo/ramin_gv11b.h"
#include <nvgpu/hw/gv11b/hw_fb_gv11b.h>
#include <nvgpu/hw/gv11b/hw_flush_gv11b.h>
#include <nvgpu/bug.h>
#include <nvgpu/posix/dma.h>
#include <nvgpu/posix/kmem.h>
#include <nvgpu/posix/posix-fault-injection.h>
#define TEST_ADDRESS 0x10002000
#define ARBITRARY_ERROR -42
#define ERROR_TYPE_KMEM 0
#define ERROR_TYPE_DMA 1
#define ERROR_TYPE_HAL 2
struct unit_module *current_module;
bool test_flag;
int int_empty_hal_return_error_after;
/*
* Write callback (for all nvgpu_writel calls).
*/
static void writel_access_reg_fn(struct gk20a *g,
struct nvgpu_reg_access *access)
{
if (access->addr == flush_fb_flush_r()) {
if (access->value == flush_fb_flush_pending_busy_v()) {
unit_info(current_module,
"writel: setting FB_flush to not pending\n");
access->value = 0;
}
} else if (access->addr == flush_l2_flush_dirty_r()) {
if (access->value == flush_l2_flush_dirty_pending_busy_v()) {
unit_info(current_module,
"writel: setting L2_flush to not pending\n");
access->value = 0;
}
}
nvgpu_posix_io_writel_reg_space(g, access->addr, access->value);
nvgpu_posix_io_record_access(g, access);
}
/*
* Read callback, similar to the write callback above.
*/
static void readl_access_reg_fn(struct gk20a *g,
struct nvgpu_reg_access *access)
{
access->value = nvgpu_posix_io_readl_reg_space(g, access->addr);
}
/*
* Define all the callbacks to be used during the test. Typically all
* write operations use the same callback, likewise for all read operations.
*/
static struct nvgpu_posix_io_callbacks mmu_faults_callbacks = {
/* Write APIs all can use the same accessor. */
.writel = writel_access_reg_fn,
.writel_check = writel_access_reg_fn,
.bar1_writel = writel_access_reg_fn,
.usermode_writel = writel_access_reg_fn,
/* Likewise for the read APIs. */
.__readl = readl_access_reg_fn,
.readl = readl_access_reg_fn,
.bar1_readl = readl_access_reg_fn,
};
static void init_platform(struct unit_module *m, struct gk20a *g, bool is_iGPU)
{
if (is_iGPU) {
nvgpu_set_enabled(g, NVGPU_MM_UNIFIED_MEMORY, true);
} else {
nvgpu_set_enabled(g, NVGPU_MM_UNIFIED_MEMORY, false);
}
/* Enable extra features to increase line coverage */
nvgpu_set_enabled(g, NVGPU_SUPPORT_SEC2_VM, true);
nvgpu_set_enabled(g, NVGPU_SUPPORT_GSP_VM, true);
nvgpu_set_errata(g, NVGPU_ERRATA_MM_FORCE_128K_PMU_VM, true);
}
/*
* Simple HAL function to exercise branches and return an arbitrary error
* code after a given number of calls.
*/
static int int_empty_hal(struct gk20a *g)
{
if (int_empty_hal_return_error_after > 0) {
int_empty_hal_return_error_after--;
}
if (int_empty_hal_return_error_after == 0) {
return ARBITRARY_ERROR;
}
return 0;
}
/* Similar HAL to mimic the bus.bar1_bind and bus.bar2_bind HALs */
static int int_empty_hal_bar_bind(struct gk20a *g, struct nvgpu_mem *bar_inst)
{
/* Re-use int_empty_hal to leverage the error injection mechanism */
return int_empty_hal(g);
}
/* Simple HAL with no return value */
static void void_empty_hal(struct gk20a *g)
{
return;
}
/*
* Helper function to factorize the testing of the many possible error cases
* in nvgpu_init_mm_support.
* It supports 3 types of error injection (kmalloc, dma, and empty_hal). The
* chosen error will occur after 'count' calls. It will return 0 if the
* expected_error occurred, and 1 otherwise.
* The 'step' parameter is used in case of failure to more easily trace the
* issue in logs.
*/
static int nvgpu_init_mm_support_inject_error(struct unit_module *m,
struct gk20a *g, int error_type, int count, int expected_error,
int step)
{
int err;
struct nvgpu_posix_fault_inj *dma_fi =
nvgpu_dma_alloc_get_fault_injection();
struct nvgpu_posix_fault_inj *kmem_fi =
nvgpu_kmem_get_fault_injection();
if (error_type == ERROR_TYPE_KMEM) {
nvgpu_posix_enable_fault_injection(kmem_fi, true, count);
} else if (error_type == ERROR_TYPE_DMA) {
nvgpu_posix_enable_fault_injection(dma_fi, true, count);
} else if (error_type == ERROR_TYPE_HAL) {
int_empty_hal_return_error_after = count;
}
err = nvgpu_init_mm_support(g);
nvgpu_posix_enable_fault_injection(kmem_fi, false, 0);
nvgpu_posix_enable_fault_injection(dma_fi, false, 0);
int_empty_hal_return_error_after = -1;
if (err != expected_error) {
unit_err(m, "init_mm_support didn't fail as expected step=%d err=%d\n",
step, err);
return 1;
}
return 0;
}
int test_nvgpu_init_mm(struct unit_module *m, struct gk20a *g, void *args)
{
int err;
int errors = 0;
/*
* We need to call nvgpu_init_mm_support but first make it fail to
* test (numerous) error handling cases
*/
int_empty_hal_return_error_after = -1;
/* Making nvgpu_alloc_sysmem_flush fail */
errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_DMA, 0,
-ENOMEM, 1);
/* Making nvgpu_init_bar1_vm fail on VM init */
errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_KMEM, 0,
-ENOMEM, 2);
/* Making nvgpu_init_bar1_vm fail on alloc_inst_block */
errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_DMA, 2,
-ENOMEM, 3);
/* Making nvgpu_init_bar2_vm fail */
errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_DMA, 4,
-ENOMEM, 4);
/* Making nvgpu_init_system_vm fail on the PMU VM init */
errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_KMEM, 10,
-ENOMEM, 5);
/* Making nvgpu_init_system_vm fail again with extra branch coverage */
g->ops.mm.init_bar2_vm = NULL;
errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_KMEM, 6,
-ENOMEM, 6);
g->ops.mm.init_bar2_vm = gp10b_mm_init_bar2_vm;
/* Making nvgpu_init_system_vm fail on alloc_inst_block */
errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_DMA, 6,
-ENOMEM, 7);
/* Making nvgpu_init_hwpm fail */
errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_DMA, 7,
-ENOMEM, 8);
/* Making nvgpu_init_engine_ucode_vm(sec2) fail on VM init */
errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_KMEM, 15,
-ENOMEM, 9);
/* Making nvgpu_init_engine_ucode_vm(sec2) fail on alloc_inst_block */
errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_DMA, 9,
-ENOMEM, 10);
/* Making nvgpu_init_engine_ucode_vm(gsp) fail */
errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_DMA, 11,
-ENOMEM, 11);
/* Making nvgpu_init_cde_vm fail */
errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_KMEM, 25,
-ENOMEM, 12);
/* Making nvgpu_init_ce_vm fail */
errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_KMEM, 33,
-ENOMEM, 13);
/* Making nvgpu_init_mmu_debug fail on wr_mem DMA alloc */
errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_DMA, 14,
-ENOMEM, 14);
/* Making nvgpu_init_mmu_debug fail on rd_mem DMA alloc */
errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_DMA, 15,
-ENOMEM, 15);
/* Making g->ops.mm.mmu_fault.setup_sw fail */
errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_HAL, 1,
ARBITRARY_ERROR, 16);
/* Making g->ops.fb.fb_ecc_init fail */
g->ops.fb.ecc.init = int_empty_hal;
errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_HAL, 2,
ARBITRARY_ERROR, 17);
g->ops.fb.ecc.init = NULL;
/*
* Extra cases for branch coverage: change support flags to test
* other branches
*/
nvgpu_set_enabled(g, NVGPU_SUPPORT_SEC2_VM, false);
nvgpu_set_enabled(g, NVGPU_SUPPORT_GSP_VM, false);
nvgpu_set_errata(g, NVGPU_ERRATA_MM_FORCE_128K_PMU_VM, false);
g->has_cde = false;
errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_HAL, 1,
ARBITRARY_ERROR, 17);
nvgpu_set_enabled(g, NVGPU_SUPPORT_SEC2_VM, true);
nvgpu_set_enabled(g, NVGPU_SUPPORT_GSP_VM, true);
nvgpu_set_errata(g, NVGPU_ERRATA_MM_FORCE_128K_PMU_VM, true);
g->has_cde = true;
/*
* Extra cases for branch coverage: remove some HALs to test branches
* in nvgpu_init_mm_reset_enable_hw
*/
g->ops.mc.fb_reset = NULL;
g->ops.fb.init_fs_state = NULL;
errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_HAL, 1,
ARBITRARY_ERROR, 18);
g->ops.mc.fb_reset = void_empty_hal;
g->ops.fb.init_fs_state = void_empty_hal;
if (errors != 0) {
return UNIT_FAIL;
}
/* Now it should succeed */
err = nvgpu_init_mm_support(g);
if (err != 0) {
unit_return_fail(m, "nvgpu_init_mm_support failed (1) err=%d\n",
err);
}
/*
* Now running it again should succeed too but will hit some
* "already initialized" paths
*/
err = nvgpu_init_mm_support(g);
if (err != 0) {
unit_return_fail(m, "nvgpu_init_mm_support failed (2) err=%d\n",
err);
}
/*
* Extra case for branch coverage: remove mmu_fault.setup_sw HALs to
* test branch in nvgpu_init_mm_setup_sw
*/
g->ops.mm.mmu_fault.setup_sw = NULL;
g->ops.mm.setup_hw = NULL;
g->mm.sw_ready = false;
err = nvgpu_init_mm_support(g);
if (err != 0) {
unit_return_fail(m, "nvgpu_init_mm_support failed (3) err=%d\n",
err);
}
g->ops.mm.mmu_fault.setup_sw = int_empty_hal;
g->ops.mm.setup_hw = int_empty_hal;
return UNIT_SUCCESS;
}
int test_nvgpu_mm_setup_hw(struct unit_module *m, struct gk20a *g, void *args)
{
int err;
/*
* We need to call nvgpu_mm_setup_hw but first make it fail to test
* error handling and other corner cases
*/
g->ops.bus.bar1_bind = int_empty_hal_bar_bind;
int_empty_hal_return_error_after = 1;
err = nvgpu_mm_setup_hw(g);
if (err != ARBITRARY_ERROR) {
unit_return_fail(m, "nvgpu_mm_setup_hw did not fail as expected (1) err=%d\n",
err);
}
g->ops.bus.bar2_bind = int_empty_hal_bar_bind;
int_empty_hal_return_error_after = 2;
err = nvgpu_mm_setup_hw(g);
if (err != ARBITRARY_ERROR) {
unit_return_fail(m, "nvgpu_mm_setup_hw did not fail as expected (2) err=%d\n",
err);
}
int_empty_hal_return_error_after = -1;
g->ops.bus.bar1_bind = NULL;
g->ops.bus.bar2_bind = NULL;
/* Make flush fail */
g->ops.mm.cache.fb_flush = int_empty_hal;
int_empty_hal_return_error_after = 1;
err = nvgpu_mm_setup_hw(g);
if (err != -EBUSY) {
unit_return_fail(m, "nvgpu_mm_setup_hw did not fail as expected (3) err=%d\n",
err);
}
/* Make the 2nd call to flush fail */
int_empty_hal_return_error_after = 2;
err = nvgpu_mm_setup_hw(g);
if (err != -EBUSY) {
unit_return_fail(m, "nvgpu_mm_setup_hw did not fail as expected (4) err=%d\n",
err);
}
int_empty_hal_return_error_after = -1;
g->ops.mm.cache.fb_flush = gk20a_mm_fb_flush;
/* Success but no branch on g->ops.fb.set_mmu_page_size != NULL */
err = nvgpu_mm_setup_hw(g);
if (err != 0) {
unit_return_fail(m, "nvgpu_mm_setup_hw failed (1) err=%d\n",
err);
}
/* Success but branch on g->ops.fb.set_mmu_page_size != NULL */
g->ops.fb.set_mmu_page_size = NULL;
err = nvgpu_mm_setup_hw(g);
if (err != 0) {
unit_return_fail(m, "nvgpu_mm_setup_hw failed (2) err=%d\n",
err);
}
/* Success but branch on error return from g->ops.bus.bar2_bind */
g->ops.bus.bar2_bind = int_empty_hal_bar_bind;
err = nvgpu_mm_setup_hw(g);
if (err != 0) {
unit_return_fail(m, "nvgpu_mm_setup_hw failed (3) err=%d\n",
err);
}
/* Success but branch on g->ops.mm.mmu_fault.setup_hw != NULL */
g->ops.mm.mmu_fault.setup_hw = NULL;
err = nvgpu_mm_setup_hw(g);
if (err != 0) {
unit_return_fail(m, "nvgpu_mm_setup_hw failed (4) err=%d\n",
err);
}
return UNIT_SUCCESS;
}
int test_mm_init_hal(struct unit_module *m, struct gk20a *g, void *args)
{
g->log_mask = 0;
if (verbose_lvl(m) >= 1) {
g->log_mask = gpu_dbg_map;
}
if (verbose_lvl(m) >= 2) {
g->log_mask |= gpu_dbg_map_v;
}
if (verbose_lvl(m) >= 3) {
g->log_mask |= gpu_dbg_pte;
}
current_module = m;
init_platform(m, g, true);
struct nvgpu_os_posix *p = nvgpu_os_posix_from_gk20a(g);
p->mm_is_iommuable = true;
g->has_cde = true;
g->ops.mc.intr_stall_unit_config = mc_gp10b_intr_stall_unit_config;
g->ops.mc.intr_nonstall_unit_config =
mc_gp10b_intr_nonstall_unit_config;
g->ops.mm.gmmu.get_default_big_page_size =
nvgpu_gmmu_default_big_page_size;
g->ops.mm.gmmu.get_mmu_levels = gp10b_mm_get_mmu_levels;
g->ops.mm.gmmu.get_max_page_table_levels = gp10b_get_max_page_table_levels;
g->ops.mm.init_inst_block = gv11b_mm_init_inst_block;
g->ops.mm.gmmu.map = nvgpu_gmmu_map_locked;
g->ops.mm.gmmu.unmap = nvgpu_gmmu_unmap_locked;
g->ops.mm.gmmu.get_iommu_bit = gp10b_mm_get_iommu_bit;
g->ops.mm.gmmu.gpu_phys_addr = gv11b_gpu_phys_addr;
g->ops.mm.is_bar1_supported = gv11b_mm_is_bar1_supported;
g->ops.mm.cache.l2_flush = gv11b_mm_l2_flush;
g->ops.mm.cache.fb_flush = gk20a_mm_fb_flush;
#ifdef CONFIG_NVGPU_COMPRESSION
g->ops.fb.compression_page_size = gp10b_fb_compression_page_size;
#endif
g->ops.fb.tlb_invalidate = gm20b_fb_tlb_invalidate;
g->ops.ramin.init_pdb = gv11b_ramin_init_pdb;
g->ops.ramin.alloc_size = gk20a_ramin_alloc_size;
g->ops.fb.is_fault_buf_enabled = gv11b_fb_is_fault_buf_enabled;
g->ops.fb.read_mmu_fault_buffer_size =
gv11b_fb_read_mmu_fault_buffer_size;
g->ops.fb.init_hw = gv11b_fb_init_hw;
g->ops.fb.intr.enable = gv11b_fb_intr_enable;
/* Add bar2 to have more init/cleanup logic */
g->ops.mm.init_bar2_vm = gp10b_mm_init_bar2_vm;
g->ops.mm.init_bar2_vm(g);
/*
* For extra coverage. Note: the goal of this unit test is to validate
* the mm.mm unit, not the underlying HALs.
*/
g->ops.fb.init_fs_state = void_empty_hal;
g->ops.fb.set_mmu_page_size = void_empty_hal;
g->ops.mc.fb_reset = void_empty_hal;
g->ops.mm.mmu_fault.setup_hw = void_empty_hal;
g->ops.mm.mmu_fault.setup_sw = int_empty_hal;
g->ops.mm.setup_hw = int_empty_hal;
nvgpu_posix_register_io(g, &mmu_faults_callbacks);
/* Register space: FB_MMU */
if (nvgpu_posix_io_add_reg_space(g, fb_niso_intr_r(), 0x800) != 0) {
unit_return_fail(m, "nvgpu_posix_io_add_reg_space failed\n");
}
/* Register space: HW_FLUSH */
if (nvgpu_posix_io_add_reg_space(g, flush_fb_flush_r(), 0x20) != 0) {
unit_return_fail(m, "nvgpu_posix_io_add_reg_space failed\n");
}
if (g->ops.mm.is_bar1_supported(g)) {
unit_return_fail(m, "BAR1 is not supported on Volta+\n");
}
return UNIT_SUCCESS;
}
static int stub_mm_l2_flush(struct gk20a *g, bool invalidate)
{
return -ETIMEDOUT;
}
int test_mm_suspend(struct unit_module *m, struct gk20a *g, void *args)
{
int err;
int (*save_func)(struct gk20a *g, bool inv);
/* Allow l2_flush failure by stubbing the call. */
save_func = g->ops.mm.cache.l2_flush;
g->ops.mm.cache.l2_flush = stub_mm_l2_flush;
nvgpu_set_power_state(g, NVGPU_STATE_POWERED_OFF);
err = nvgpu_mm_suspend(g);
if (err != -ETIMEDOUT) {
unit_return_fail(m, "suspend did not fail as expected err=%d\n",
err);
}
/* restore original l2_flush method */
g->ops.mm.cache.l2_flush = save_func;
nvgpu_set_power_state(g, NVGPU_STATE_POWERED_ON);
err = nvgpu_mm_suspend(g);
if (err != 0) {
unit_return_fail(m, "suspend fail err=%d\n", err);
}
/*
* Some optional HALs are executed if not NULL in nvgpu_mm_suspend.
* Calls above went through branches where these HAL pointers were NULL,
* now define them and run again for complete coverage.
*/
g->ops.fb.intr.disable = gv11b_fb_intr_disable;
g->ops.mm.mmu_fault.disable_hw = gv11b_mm_mmu_fault_disable_hw;
nvgpu_set_power_state(g, NVGPU_STATE_POWERED_ON);
err = nvgpu_mm_suspend(g);
if (err != 0) {
unit_return_fail(m, "suspend fail err=%d\n", err);
}
return UNIT_SUCCESS;
}
int test_mm_remove_mm_support(struct unit_module *m, struct gk20a *g,
void *args)
{
int err;
/*
* Since the last step of the removal is to call nvgpu_pd_cache_fini,
* g->mm.pd_cache = NULL indicates that the removal completed
* successfully.
*/
err = nvgpu_pd_cache_init(g);
if (err != 0) {
unit_return_fail(m, "nvgpu_pd_cache_init failed ??\n");
}
g->ops.mm.mmu_fault.info_mem_destroy = NULL;
g->mm.remove_support(&g->mm);
if (g->mm.pd_cache != NULL) {
unit_return_fail(m, "mm removal did not complete\n");
}
/* Add extra HALs to cover some branches */
g->ops.mm.mmu_fault.info_mem_destroy =
gv11b_mm_mmu_fault_info_mem_destroy;
g->ops.mm.remove_bar2_vm = gp10b_mm_remove_bar2_vm;
g->mm.remove_support(&g->mm);
/* Reset this to NULL to avoid trying to destroy the mutex again */
g->ops.mm.mmu_fault.info_mem_destroy = NULL;
/* Extra cases for branch coverage */
nvgpu_set_enabled(g, NVGPU_SUPPORT_SEC2_VM, false);
nvgpu_set_enabled(g, NVGPU_SUPPORT_GSP_VM, false);
g->has_cde = false;
g->mm.remove_support(&g->mm);
nvgpu_set_enabled(g, NVGPU_SUPPORT_SEC2_VM, true);
nvgpu_set_enabled(g, NVGPU_SUPPORT_GSP_VM, true);
g->has_cde = true;
return UNIT_SUCCESS;
}
/*
* Test: test_mm_page_sizes
* Test a couple of page_size related functions
*/
int test_mm_page_sizes(struct unit_module *m, struct gk20a *g,
void *args)
{
g->ops.mm.gmmu.get_big_page_sizes = NULL;
if (nvgpu_mm_get_default_big_page_size(g) != SZ_64K) {
unit_return_fail(m, "unexpected big page size (1)\n");
}
if (nvgpu_mm_get_available_big_page_sizes(g) != SZ_64K) {
unit_return_fail(m, "unexpected big page size (2)\n");
}
/* For branch/line coverage */
g->mm.disable_bigpage = true;
if (nvgpu_mm_get_available_big_page_sizes(g) != 0) {
unit_return_fail(m, "unexpected big page size (3)\n");
}
if (nvgpu_mm_get_default_big_page_size(g) != 0) {
unit_return_fail(m, "unexpected big page size (4)\n");
}
g->mm.disable_bigpage = false;
/* Case of non NULL g->ops.mm.gmmu.get_big_page_sizes */
g->ops.mm.gmmu.get_big_page_sizes = gm20b_mm_get_big_page_sizes;
if (nvgpu_mm_get_available_big_page_sizes(g) != (SZ_64K | SZ_128K)) {
unit_return_fail(m, "unexpected big page size (5)\n");
}
g->ops.mm.gmmu.get_big_page_sizes = NULL;
return UNIT_SUCCESS;
}
int test_mm_inst_block(struct unit_module *m, struct gk20a *g,
void *args)
{
u32 addr;
struct nvgpu_mem *block = malloc(sizeof(struct nvgpu_mem));
int ret = UNIT_FAIL;
memset(block, 0, sizeof(*block));
block->aperture = APERTURE_SYSMEM;
block->cpu_va = (void *) TEST_ADDRESS;
g->ops.ramin.base_shift = gk20a_ramin_base_shift;
addr = nvgpu_inst_block_ptr(g, block);
if (addr != ((u32) TEST_ADDRESS >> g->ops.ramin.base_shift())) {
unit_err(m, "invalid inst_block_ptr address (1)\n");
goto cleanup;
}
/* Run again with NVLINK support for code coverage */
nvgpu_set_enabled(g, NVGPU_SUPPORT_NVLINK, true);
addr = nvgpu_inst_block_ptr(g, block);
nvgpu_set_enabled(g, NVGPU_SUPPORT_NVLINK, false);
if (addr != ((u32) TEST_ADDRESS >> g->ops.ramin.base_shift())) {
unit_err(m, "invalid inst_block_ptr address (2)\n");
goto cleanup;
}
ret = UNIT_SUCCESS;
cleanup:
free(block);
return ret;
}
int test_mm_alloc_inst_block(struct unit_module *m, struct gk20a *g,
void *args)
{
struct nvgpu_mem *mem = malloc(sizeof(struct nvgpu_mem));
struct nvgpu_posix_fault_inj *dma_fi =
nvgpu_dma_alloc_get_fault_injection();
int result = UNIT_FAIL;
if (nvgpu_alloc_inst_block(g, mem) != 0) {
unit_err(m, "alloc_inst failed unexpectedly\n");
goto cleanup;
}
nvgpu_posix_enable_fault_injection(dma_fi, true, 0);
if (nvgpu_alloc_inst_block(g, mem) == 0) {
unit_err(m, "alloc_inst did not fail as expected\n");
goto cleanup;
}
result = UNIT_SUCCESS;
cleanup:
nvgpu_posix_enable_fault_injection(dma_fi, false, 0);
free(mem);
return result;
}
int test_gk20a_from_mm(struct unit_module *m, struct gk20a *g, void *args)
{
if (g != gk20a_from_mm(&(g->mm))) {
unit_return_fail(m, "ptr mismatch in gk20a_from_mm\n");
}
return UNIT_SUCCESS;
}
int test_bar1_aperture_size_mb_gk20a(struct unit_module *m, struct gk20a *g,
void *args)
{
if (g->mm.bar1.aperture_size != (bar1_aperture_size_mb_gk20a() << 20)) {
unit_return_fail(m, "mismatch in bar1_aperture_size\n");
}
return UNIT_SUCCESS;
}
struct unit_module_test nvgpu_mm_mm_tests[] = {
UNIT_TEST(init_hal, test_mm_init_hal, NULL, 0),
UNIT_TEST(init_mm, test_nvgpu_init_mm, NULL, 0),
UNIT_TEST(init_mm_hw, test_nvgpu_mm_setup_hw, NULL, 0),
UNIT_TEST(suspend, test_mm_suspend, NULL, 0),
UNIT_TEST(remove_support, test_mm_remove_mm_support, NULL, 0),
UNIT_TEST(page_sizes, test_mm_page_sizes, NULL, 0),
UNIT_TEST(inst_block, test_mm_inst_block, NULL, 0),
UNIT_TEST(alloc_inst_block, test_mm_alloc_inst_block, NULL, 0),
UNIT_TEST(gk20a_from_mm, test_gk20a_from_mm, NULL, 0),
UNIT_TEST(bar1_aperture_size, test_bar1_aperture_size_mb_gk20a, NULL,
0),
};
UNIT_MODULE(mm.mm, nvgpu_mm_mm_tests, UNIT_PRIO_NVGPU_TEST);

298
userspace/units/mm/mm/mm.h Normal file
View File

@@ -0,0 +1,298 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef UNIT_MM_MM_H
#define UNIT_MM_MM_H
struct gk20a;
struct unit_module;
/** @addtogroup SWUTS-mm-mm
* @{
*
* Software Unit Test Specification for mm.mm
*/
/**
* Test specification for: test_mm_init_hal
*
* Description: The Enabled flags, HAL and register spaces must be initialized
* properly before running any other tests.
*
* Test Type: Other (Init)
*
* Targets: gops_mm.init_bar2_vm, gops_mm.is_bar1_supported
*
* Input: None
*
* Steps:
* - Set verbosity based on unit testing arguments.
* - Initialize the platform:
* - Set the UNIFIED_MEMORY flag if iGPU configuration, disabled otherwise
* - Enable the following flags to enable various MM-related features:
* - NVGPU_SUPPORT_SEC2_VM
* - NVGPU_SUPPORT_GSP_VM
* - NVGPU_MM_FORCE_128K_PMU_VM
* - Set all the minimum HAL needed for the mm.mm module.
* - Register IO reg space for FB_MMU and HW_FLUSH.
* - Ensure BAR1 support is disabled.
*
* Output: Returns PASS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_mm_init_hal(struct unit_module *m, struct gk20a *g, void *args);
/**
* Test specification for: test_nvgpu_init_mm
*
* Description: The nvgpu_init_mm_support function must initialize all the
* necessary components on the mm unit. It must also properly handle error
* cases.
*
* Test Type: Feature, Error guessing
*
* Targets: gops_mm.init_mm_support, nvgpu_init_mm_support
*
* Input: test_mm_init_hal must have been executed successfully.
*
* Steps:
* - Rely on error injection mechanisms to target all the possible error
* cases within the nvgpu_init_mm_support function. In particular, this step
* will use KMEM (malloc), DMA and HAL error injection mechanisms to
* selectively cause errors, and then check the error code to ensure the
* expected failure occurred.
* - nvgpu_init_mm_support is then called and expected to succeed.
* - Call nvgpu_init_mm_support again to test the case where initialization
* already succeeded.
*
* Output: Returns PASS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_nvgpu_init_mm(struct unit_module *m, struct gk20a *g, void *args);
/**
* Test specification for: test_nvgpu_mm_setup_hw
*
* Description: The nvgpu_mm_setup_hw function must initialize all HW related
* components on the mm unit. It must also properly handle error cases.
*
* Test Type: Feature, Error guessing
*
* Targets: gops_mm.setup_hw, nvgpu_mm_setup_hw
*
* Input: test_mm_init_hal and test_nvgpu_init_mm must have been executed
* successfully.
*
* Steps:
* - Rely on HAL error injection mechanisms to target all the possible error
* cases within the test_nvgpu_mm_setup_hw function.
* - test_nvgpu_mm_setup_hw is then called and expected to succeed.
* - Call nvgpu_init_mm_support again to test the case where initialization
* already succeeded and test a branch on set_mmu_page_size HAL.
*
* Output: Returns PASS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_nvgpu_mm_setup_hw(struct unit_module *m, struct gk20a *g, void *args);
/**
* Test specification for: test_mm_suspend
*
* Description: The nvgpu_mm_suspend shall suspend the hardware-related
* components by calling the relevant HALs to flush L2, disable FB interrupts
* and disable MMU fault handling.
*
* Test Type: Feature
*
* Targets: nvgpu_set_power_state, gops_mm.mm_suspend, nvgpu_mm_suspend
*
* Input: test_mm_init_hal, test_nvgpu_init_mm and test_nvgpu_mm_setup_hw must
* have been executed successfully.
*
* Steps:
* - Simulate that the GPU power is off.
* - Run nvgpu_mm_suspend and check that it failed with -ETIMEDOUT.
* - Simulate that power is on.
* - Run nvgpu_mm_suspend and check that it succeeded.
* - Define extra HALs. (intr disable, MMU fault disable)
* - Simulate that power is on.
* - Run nvgpu_mm_suspend and check that it succeeded.
*
* Output: Returns PASS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_mm_suspend(struct unit_module *m, struct gk20a *g, void *args);
/**
* Test specification for: test_mm_remove_mm_support
*
* Description: The mm.remove_support operation (nvgpu_remove_mm_support
* function) shall de-allocate all resources related to mm. In particular, it
* is expected that nvgpu_remove_mm_support will call the nvgpu_pd_cache_fini
* as its last step.
*
* Test Type: Feature
*
* Targets: gops_mm.pd_cache_init, nvgpu_pd_cache_init, gops_mm.remove_support
*
* Input: test_mm_init_hal, test_nvgpu_init_mm and test_nvgpu_mm_setup_hw must
* have been executed successfully
*
* Steps:
* - Allocate pd_cache by calling nvgpu_pd_cache_init.
* - Call mm.remove_support.
* - Verify that g->mm.pd_cache is NULL.
* - Setup additional HALs for line/branch coverage: mmu_fault.info_mem_destroy
* and mm.remove_bar2_vm.
* - Call mm.remove_support again.
*
* Output: Returns PASS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_mm_remove_mm_support(struct unit_module *m, struct gk20a *g,
void *args);
/**
* Test specification for: test_mm_page_sizes
*
* Description: The mm page size related operations shall provide information
* about big page sizes available.
*
* Test Type: Feature
*
* Targets: nvgpu_mm_get_default_big_page_size,
* nvgpu_mm_get_available_big_page_sizes
*
* Input: test_mm_init_hal, test_nvgpu_init_mm and test_nvgpu_mm_setup_hw must
* have been executed successfully.
*
* Steps:
* - Call nvgpu_mm_get_default_big_page_size and check that it returns 64KB.
* - Call nvgpu_mm_get_available_big_page_sizes and check that it returns 64KB.
* - Disable big page support.
* - Call nvgpu_mm_get_default_big_page_size and check that it returns 0.
* - Call nvgpu_mm_get_available_big_page_sizes and check that it returns 0.
* - Enable big page support.
* - Setup the mm.gmmu.get_big_page_sizes HAL.
* - Call nvgpu_mm_get_available_big_page_sizes and check that it returns a
* bitwise OR of SZ_64K and SZ_128K.
* - Restore the mm.gmmu.get_big_page_sizes HAL to NULL.
*
* Output: Returns PASS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_mm_page_sizes(struct unit_module *m, struct gk20a *g, void *args);
/**
* Test specification for: test_mm_inst_block
*
* Description: The nvgpu_inst_block_ptr shall return the base address of the
* provided memory block, taking into account necessary RAMIN offset.
*
* Test Type: Feature
*
* Targets: nvgpu_inst_block_ptr, gops_ramin.base_shift
*
* Input: test_mm_init_hal, test_nvgpu_init_mm and test_nvgpu_mm_setup_hw must
* have been executed successfully.
*
* Steps:
* - Create an arbitrary nvgpu_mem block with SYSMEM aperture and a well
* defined CPU VA.
* - Setup the ramin.base_shift HAL.
* - Call nvgpu_inst_block_ptr.
* - Check that the returned address has been shifted by the same number of bits
* than provided by the ramin.base_shift HAL.
* - For code coverage, enable NVGPU_SUPPORT_NVLINK, call nvgpu_inst_block_ptr
* again and check for the same bit shift as earlier.
*
* Output: Returns PASS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_mm_inst_block(struct unit_module *m, struct gk20a *g, void *args);
/**
* Test specification for: test_mm_alloc_inst_block
*
* Description: The nvgpu_alloc_inst_block shall allocate DMA resources for a
* given block.
*
* Test Type: Feature
*
* Targets: nvgpu_alloc_inst_block
*
* Input: test_mm_init_hal, test_nvgpu_init_mm and test_nvgpu_mm_setup_hw must
* have been executed successfully.
*
* Steps:
* - Create an arbitrary nvgpu_mem block.
* - Call nvgpu_alloc_inst_block and ensure it succeeded.
* - Enable DMA fault injection.
* - Call nvgpu_alloc_inst_block and ensure it did not succeed.
*
* Output: Returns PASS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_mm_alloc_inst_block(struct unit_module *m, struct gk20a *g,
void *args);
/**
* Test specification for: test_gk20a_from_mm
*
* Description: Simple test to check gk20a_from_mm.
*
* Test Type: Feature
*
* Targets: gk20a_from_mm
*
* Input: None
*
* Steps:
* - Call gk20a_from_mm with the g->mm pointer and ensure it returns a
* pointer on g.
*
* Output: Returns PASS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_gk20a_from_mm(struct unit_module *m, struct gk20a *g, void *args);
/**
* Test specification for: test_bar1_aperture_size_mb_gk20a
*
* Description: Simple test to check bar1_aperture_size_mb_gk20a.
*
* Test Type: Feature
*
* Targets: bar1_aperture_size_mb_gk20a
*
* Input: None
*
* Steps:
* - Ensure that g->mm.bar1.aperture_size matches the expected value from
* bar1_aperture_size_mb_gk20a
*
* Output: Returns PASS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_bar1_aperture_size_mb_gk20a(struct unit_module *m, struct gk20a *g,
void *args);
#endif /* UNIT_MM_MM_H */

View File

@@ -0,0 +1,26 @@
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
.SUFFIXES:
OBJS = nvgpu_mem.o
MODULE = nvgpu_mem
include ../../Makefile.units

View File

@@ -0,0 +1,35 @@
################################### tell Emacs this is a -*- makefile-gmake -*-
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# tmake for SW Mobile component makefile
#
###############################################################################
NVGPU_UNIT_NAME=nvgpu_mem
include $(NV_COMPONENT_DIR)/../../Makefile.units.common.interface.tmk
# Local Variables:
# indent-tabs-mode: t
# tab-width: 8
# End:
# vi: set tabstop=8 noexpandtab:

View File

@@ -0,0 +1,35 @@
################################### tell Emacs this is a -*- makefile-gmake -*-
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# tmake for SW Mobile component makefile
#
###############################################################################
NVGPU_UNIT_NAME=nvgpu_mem
include $(NV_COMPONENT_DIR)/../../Makefile.units.common.tmk
# Local Variables:
# indent-tabs-mode: t
# tab-width: 8
# End:
# vi: set tabstop=8 noexpandtab:

View File

@@ -0,0 +1,35 @@
################################### tell Emacs this is a -*- makefile-gmake -*-
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# tmake for SW Mobile component makefile
#
###############################################################################
NVGPU_UNIT_NAME=nvgpu_mem
include $(NV_COMPONENT_DIR)/../../../Makefile.units.common.interface.tmk
# Local Variables:
# indent-tabs-mode: t
# tab-width: 8
# End:
# vi: set tabstop=8 noexpandtab:

View File

@@ -0,0 +1,38 @@
################################### tell Emacs this is a -*- makefile-gmake -*-
#
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# tmake for SW Mobile component makefile
#
###############################################################################
NVGPU_UNIT_NAME=nvgpu_mem
NVGPU_UNIT_SRCS=$(NV_COMPONENT_DIR)/../nvgpu_mem.c
CONFIG_NVGPU_DGPU := 1
include $(NV_COMPONENT_DIR)/../../../Makefile.units.common.tmk
# Local Variables:
# indent-tabs-mode: t
# tab-width: 8
# End:
# vi: set tabstop=8 noexpandtab:

View File

@@ -0,0 +1,736 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <stdlib.h>
#include <unit/io.h>
#include <unit/unit.h>
#include <nvgpu/nvgpu_mem.h>
#include <nvgpu/nvgpu_sgt.h>
#include <nvgpu/page_allocator.h>
#include <nvgpu/pramin.h>
#include <nvgpu/gk20a.h>
#include <nvgpu/sizes.h>
#include <nvgpu/dma.h>
#include <nvgpu/posix/io.h>
#include <nvgpu/posix/posix-fault-injection.h>
#include <os/posix/os_posix.h>
#include <hal/mm/gmmu/gmmu_gp10b.h>
#include <hal/pramin/pramin_init.h>
#include <hal/bus/bus_gk20a.h>
#include <nvgpu/hw/gk20a/hw_pram_gk20a.h>
#include <nvgpu/hw/gk20a/hw_bus_gk20a.h>
#include "nvgpu_mem.h"
/*
* MEM_ADDRESS represents arbitrary memory start address. Init function will
* allocate MEM_PAGES number of pages in memory.
*/
#define MEM_ADDRESS 0x00040000
#define MEM_PAGES 4U
#define MEM_SIZE (MEM_PAGES * SZ_4K)
/* Amount of test data should be less than or equal to MEM_SIZE */
#define TEST_SIZE (2U * SZ_4K)
#if TEST_SIZE > MEM_SIZE
#error "TEST_SIZE should be less than or equal to MEM_SIZE"
#endif
static struct nvgpu_mem *test_mem;
#ifdef CONFIG_NVGPU_DGPU
/*
* Pramin write callback (for all nvgpu_writel calls).
* No-op as callbacks/functions are already tested in pramin module.
*/
static void writel_access_reg_fn(struct gk20a *g,
struct nvgpu_reg_access *access)
{
/* No-op */
}
/*
* Pramin read callback, similar to the write callback above.
* Dummy return as callbacks/functions are already tested in pramin module.
*/
static void readl_access_reg_fn(struct gk20a *g,
struct nvgpu_reg_access *access)
{
access->value = 0;
}
/*
* Define pramin callbacks to be used during the test. Typically all
* write operations use the same callback, likewise for all read operations.
*/
static struct nvgpu_posix_io_callbacks pramin_callbacks = {
/* Write APIs all can use the same accessor. */
.writel = writel_access_reg_fn,
.writel_check = writel_access_reg_fn,
.bar1_writel = writel_access_reg_fn,
.usermode_writel = writel_access_reg_fn,
/* Likewise for the read APIs. */
.__readl = readl_access_reg_fn,
.readl = readl_access_reg_fn,
.bar1_readl = readl_access_reg_fn,
};
/*
* Populate vidmem allocations.
* These are required for testing APERTURE_VIDMEM branches.
*/
static int init_vidmem_env(struct unit_module *m, struct gk20a *g)
{
int err;
nvgpu_init_pramin(&g->mm);
nvgpu_posix_register_io(g, &pramin_callbacks);
/* Minimum HAL init for PRAMIN */
g->ops.bus.set_bar0_window = gk20a_bus_set_bar0_window;
nvgpu_pramin_ops_init(g);
unit_assert(g->ops.pramin.data032_r != NULL, return -EINVAL);
err = nvgpu_dma_alloc_vid_at(g, TEST_SIZE, test_mem, 0);
if (err != 0) {
return err;
}
return 0;
}
/* Free vidmem allocations */
static void free_vidmem_env(struct unit_module *m, struct gk20a *g)
{
nvgpu_dma_free(g, test_mem);
nvgpu_posix_io_delete_reg_space(g, bus_bar0_window_r());
}
int test_nvgpu_mem_vidmem(struct unit_module *m,
struct gk20a *g, void *args)
{
int err;
u32 memset_pattern = 0x0000005A;
u32 data_size = (16U * sizeof(u32));
u32 *data_src = (u32 *) nvgpu_kmalloc(g, data_size);
if (data_src == NULL) {
free_vidmem_env(m, g);
unit_return_fail(m, "Could not allocate data_src\n");
}
(void) memset(data_src, memset_pattern, (data_size));
/* Reset aperture to invalid, so that init doesn't complain */
test_mem->aperture = APERTURE_INVALID;
err = init_vidmem_env(m, g);
if (err != 0) {
nvgpu_kfree(g, data_src);
unit_return_fail(m, "Vidmem init failed with err=%d\n", err);
}
nvgpu_memset(g, test_mem, 0, memset_pattern, TEST_SIZE);
nvgpu_mem_wr(g, test_mem, 0, memset_pattern);
nvgpu_mem_rd(g, test_mem, 0);
nvgpu_mem_wr_n(g, test_mem, 0, data_src, data_size);
nvgpu_mem_rd_n(g, test_mem, 0, (void *)data_src, data_size);
nvgpu_kfree(g, data_src);
free_vidmem_env(m, g);
/* Reset attributes */
test_mem->aperture = APERTURE_SYSMEM;
return UNIT_SUCCESS;
}
#endif
int test_nvgpu_aperture_mask(struct unit_module *m,
struct gk20a *g, void *args)
{
u32 sysmem_mask = 1;
u32 sysmem_coh_mask = 3;
u32 vidmem_mask = 4;
u32 ret_ap_mask;
#ifdef CONFIG_NVGPU_DGPU
/* Case: APERTURE_VIDMEM */
test_mem->aperture = APERTURE_VIDMEM;
ret_ap_mask = nvgpu_aperture_mask(g, test_mem, sysmem_mask,
sysmem_coh_mask, vidmem_mask);
if (ret_ap_mask != vidmem_mask) {
unit_return_fail(m, "Vidmem mask returned incorrect\n");
}
#endif
/*
* NVGPU_MM_HONORS_APERTURE enabled
*/
nvgpu_set_enabled(g, NVGPU_MM_HONORS_APERTURE, true);
/* Case: APERTURE_SYSMEM */
test_mem->aperture = APERTURE_SYSMEM;
if (!nvgpu_aperture_is_sysmem(test_mem->aperture)) {
unit_return_fail(m, "Invalid aperture enum\n");
}
ret_ap_mask = nvgpu_aperture_mask(g, test_mem, sysmem_mask,
sysmem_coh_mask, vidmem_mask);
if (ret_ap_mask != sysmem_mask) {
unit_return_fail(m, "MM_HONORS enabled: "
"Incorrect mask returned for sysmem\n");
}
/* Case: APERTURE_SYSMEM_COH */
test_mem->aperture = APERTURE_SYSMEM_COH;
ret_ap_mask = nvgpu_aperture_mask(g, test_mem, sysmem_mask,
sysmem_coh_mask, vidmem_mask);
if (ret_ap_mask != sysmem_coh_mask) {
unit_return_fail(m, "MM_HONORS enabled: "
"Incorrect mask returned for sysmem_coh\n");
}
/* Case: APERTURE_INVALID */
test_mem->aperture = APERTURE_INVALID;
if (!EXPECT_BUG(nvgpu_aperture_mask(g, test_mem, sysmem_mask,
sysmem_coh_mask, vidmem_mask))) {
unit_return_fail(m, "MM_HONORS enabled: Aperture_mask "
"did not BUG() for APERTURE_INVALID as expected\n");
}
/* Case: Bad aperture value. This will cover default return value */
test_mem->aperture = 10;
if (!EXPECT_BUG(nvgpu_aperture_mask(g, test_mem, sysmem_mask,
sysmem_coh_mask, vidmem_mask))) {
unit_return_fail(m, "MM_HONORS enabled: Aperture_mask"
"did not BUG() for junk aperture as expected\n");
}
/*
* NVGPU_MM_HONORS_APERTURE disabled
*/
nvgpu_set_enabled(g, NVGPU_MM_HONORS_APERTURE, false);
#ifdef CONFIG_NVGPU_DGPU
/* Case: APERTURE_SYSMEM */
test_mem->aperture = APERTURE_SYSMEM;
ret_ap_mask = nvgpu_aperture_mask(g, test_mem, sysmem_mask,
sysmem_coh_mask, vidmem_mask);
if (ret_ap_mask != vidmem_mask) {
unit_return_fail(m, "MM_HONORS disabled: "
"Incorrect mask returned for sysmem\n");
}
/* Case: APERTURE_SYSMEM_COH */
test_mem->aperture = APERTURE_SYSMEM_COH;
ret_ap_mask = nvgpu_aperture_mask(g, test_mem, sysmem_mask,
sysmem_coh_mask, vidmem_mask);
if (ret_ap_mask != vidmem_mask) {
unit_return_fail(m, "MM_HONORS disabled: "
"Incorrect mask returned for sysmem_coh\n");
}
#endif
/* Case: APERTURE_INVALID */
test_mem->aperture = APERTURE_INVALID;
if (!EXPECT_BUG(nvgpu_aperture_mask(g, test_mem, sysmem_mask,
sysmem_coh_mask, vidmem_mask))) {
unit_return_fail(m, "MM_HONORS disabled: Aperture_mask "
"did not BUG() for APERTURE_INVALID as expected\n");
}
/* Case: Bad aperture value. This will cover default return value */
test_mem->aperture = 10;
if (!EXPECT_BUG(nvgpu_aperture_mask(g, test_mem, sysmem_mask,
sysmem_coh_mask, vidmem_mask))) {
unit_return_fail(m, "MM_HONORS disabled: Aperture_mask"
"did not BUG() for junk aperture as expected\n");
}
/* Reset attributes */
test_mem->aperture = APERTURE_SYSMEM;
return UNIT_SUCCESS;
}
static const char *aperture_name_str[APERTURE_MAX_ENUM + 1] = {
[APERTURE_INVALID] = "INVAL",
[APERTURE_SYSMEM] = "SYSTEM",
[APERTURE_SYSMEM_COH] = "SYSCOH",
[APERTURE_VIDMEM] = "VIDMEM",
[APERTURE_MAX_ENUM] = "UNKNOWN",
};
int test_nvgpu_aperture_str(struct unit_module *m, struct gk20a *g, void *args)
{
enum nvgpu_aperture ap = 0;
const char *name_str;
while (ap <= APERTURE_MAX_ENUM) {
name_str = nvgpu_aperture_str(ap);
if (strcmp((name_str), aperture_name_str[ap]) != 0) {
unit_return_fail(m,
"Incorrect aperture str for aperture %d\n", ap);
}
ap += 1;
}
return UNIT_SUCCESS;
}
int test_nvgpu_mem_iommu_translate(struct unit_module *m,
struct gk20a *g, void *args)
{
u64 temp_phys;
struct nvgpu_mem_sgl *test_sgl =
(struct nvgpu_mem_sgl *) test_mem->phys_sgt->sgl;
struct nvgpu_os_posix *p = nvgpu_os_posix_from_gk20a(g);
/*
* Case: mm is not iommuable
* This is specified in nvgpu_os_posix structure.
*/
temp_phys = nvgpu_mem_iommu_translate(g, test_sgl->phys);
if (temp_phys != test_sgl->phys) {
unit_return_fail(m, "iommu_translate did not return "
"same phys as expected\n");
}
/*
* Case: mm is not iommuable
* But, mm_is_iommuable = true.
*/
p->mm_is_iommuable = true;
g->ops.mm.gmmu.get_iommu_bit = NULL;
temp_phys = nvgpu_mem_iommu_translate(g, test_sgl->phys);
if (temp_phys != test_sgl->phys) {
unit_return_fail(m, "iommu_translate: mm_is_iommuable=true: "
"did not return same phys as expected\n");
}
/*
* Case: mm is iommuable
* Set HAL to enable iommu_translate
*/
g->ops.mm.gmmu.get_iommu_bit = gp10b_mm_get_iommu_bit;
temp_phys = nvgpu_mem_iommu_translate(g, test_sgl->phys);
if (temp_phys == test_sgl->phys) {
unit_return_fail(m,
"iommu_translate did not translate address\n");
}
/* Reset iommuable settings */
p->mm_is_iommuable = false;
return UNIT_SUCCESS;
}
int test_nvgpu_memset_sysmem(struct unit_module *m,
struct gk20a *g, void *args)
{
u32 i;
u32 memset_pattern = 0x0000005A;
u32 memset_pattern_word =
(memset_pattern << 24) | (memset_pattern << 16) |
(memset_pattern << 8) | (memset_pattern);
u32 memset_words = TEST_SIZE / sizeof(u32);
u32 *test_cpu_va = (u32 *)test_mem->cpu_va;
/* Case: APERTURE_SYSMEM */
test_mem->aperture = APERTURE_SYSMEM;
nvgpu_memset(g, test_mem, 0, memset_pattern, TEST_SIZE);
for (i = 0; i < memset_words; i++) {
if (test_cpu_va[i] != memset_pattern_word) {
unit_return_fail(m,
"Memset pattern not found at offset %d\n", i);
}
}
/* Case: APERTURE_INVALID */
test_mem->aperture = APERTURE_INVALID;
if (!EXPECT_BUG(nvgpu_memset(g, test_mem, 0,
memset_pattern, TEST_SIZE))) {
unit_return_fail(m, "APERTURE_INVALID: "
"nvgpu_memset did not BUG() as expected\n");
}
/* Reset attributes */
test_mem->aperture = APERTURE_SYSMEM;
return UNIT_SUCCESS;
}
int test_nvgpu_mem_wr_rd(struct unit_module *m,
struct gk20a *g, void *args)
{
u32 i;
u32 data_rd, data_words = 16U;
u32 test_offset = 0x400;
u32 data_pattern = 0x5A5A5A5A;
u32 *test_cpu_va = (u32 *)test_mem->cpu_va;
u32 data_size = (data_words * sizeof(u32));
u32 *data_src, *data_rd_buf;
u64 data_rd_pair;
/* Test nvgpu_mem_is_sysmem()*/
/* Case: APERTURE_INVALID */
test_mem->aperture = APERTURE_INVALID;
if (nvgpu_mem_is_sysmem(test_mem) != false) {
unit_return_fail(m, "nvgpu_mem_is_sysmem "
"returns true for APERTURE_INVALID\n");
}
if (nvgpu_mem_is_valid(test_mem) != false) {
unit_return_fail(m, "nvgpu_mem_is_valid "
"returns true for APERTURE_INVALID\n");
}
/* Case: APERTURE_SYSMEM_COH */
test_mem->aperture = APERTURE_SYSMEM_COH;
/* Confirm nvgpu_mem is set to SYSMEM*/
if (nvgpu_mem_is_sysmem(test_mem) != true) {
unit_return_fail(m, "nvgpu_mem_is_sysmem "
"returns false for APERTURE_SYSMEM_COH\n");
}
/* Case: APERTURE_SYSMEM */
test_mem->aperture = APERTURE_SYSMEM;
if (nvgpu_mem_is_sysmem(test_mem) != true) {
unit_return_fail(m, "nvgpu_mem_is_sysmem "
"returns false for APERTURE_SYSMEM\n");
}
/* Confirm nvgpu_mem is allocated*/
if (nvgpu_mem_is_valid(test_mem) != true) {
unit_return_fail(m, "nvgpu_mem_is_valid "
"returns false for APERTURE_SYSMEM\n");
}
/* Test read and write functions */
/* Case: APERTURE_SYSMEM */
nvgpu_mem_wr(g, test_mem, test_offset, data_pattern);
if (test_cpu_va[(test_offset / (u32)sizeof(u32))] != data_pattern) {
unit_err(m,
"mem_wr incorrect write at offset %d\n", test_offset);
goto return_fail;
}
data_rd = nvgpu_mem_rd(g, test_mem, test_offset);
if (data_rd != data_pattern) {
unit_err(m,
"mem_rd data at offset %d incorrect\n", test_offset);
goto return_fail;
}
data_src = (u32 *) nvgpu_kmalloc(g, data_size);
if (data_src == NULL) {
unit_err(m, "Could not allocate data_src\n");
goto return_fail;
}
(void) memset(data_src, data_pattern, (data_size));
nvgpu_mem_wr_n(g, test_mem, 0, data_src, data_size);
for (i = 0; i < data_words; i++) {
if (test_cpu_va[i] != data_src[i]) {
unit_err(m,
"mem_wr_n incorrect write at offset %d\n", i);
goto free_data_src;
}
}
data_rd_buf = (u32 *) nvgpu_kzalloc(g, data_size);
if (data_rd_buf == NULL) {
unit_err(m, "Could not allocate data_rd_buf\n");
goto free_data_src;
}
nvgpu_mem_rd_n(g, test_mem, 0, (void *)data_rd_buf, data_size);
for (i = 0; i < data_words; i++) {
if (data_rd_buf[i] != data_src[i]) {
unit_err(m,
"mem_rd_n data at offset %d incorrect\n", i);
goto free_buffers;
}
}
data_rd_pair = nvgpu_mem_rd32_pair(g, test_mem, 0, 1);
if (data_rd_pair != ((u64)data_pattern |
((u64)data_pattern << 32ULL))) {
unit_err(m, "nvgpu_mem_rd32_pair pattern incorrect\n");
goto free_buffers;
}
/* Case: APERTURE_INVALID */
test_mem->aperture = APERTURE_INVALID;
if (!EXPECT_BUG(nvgpu_mem_wr(g, test_mem, test_offset, data_pattern))) {
unit_err(m,
"APERTURE_INVALID: mem_wr did not BUG() as expected\n");
goto free_buffers;
}
if (!EXPECT_BUG(nvgpu_mem_rd(g, test_mem, test_offset))) {
unit_err(m, "APERTURE_INVALID: "
"mem_rd did not BUG() as expected\n");
goto free_buffers;
}
if (!EXPECT_BUG(nvgpu_mem_wr_n(g, test_mem, 0, data_src, data_size))) {
unit_err(m, "APERTURE_INVALID: "
"mem_wr_n did not BUG() as expected\n");
goto free_buffers;
}
if (!EXPECT_BUG(nvgpu_mem_rd_n(g, test_mem, 0,
(void *)data_rd_buf, data_size))) {
unit_err(m, "APERTURE_INVALID: "
"mem_rd_n did not BUG() as expected\n");
goto free_buffers;
}
nvgpu_kfree(g, data_src);
data_src = NULL;
nvgpu_kfree(g, data_rd_buf);
data_rd_buf = NULL;
/* Reset attribute */
test_mem->aperture = APERTURE_SYSMEM;
return UNIT_SUCCESS;
free_buffers:
nvgpu_kfree(g, data_rd_buf);
free_data_src:
nvgpu_kfree(g, data_src);
return_fail:
return UNIT_FAIL;
}
/*
* Test: test_nvgpu_mem_phys_ops
*/
int test_nvgpu_mem_phys_ops(struct unit_module *m,
struct gk20a *g, void *args)
{
u64 ret;
struct nvgpu_gmmu_attrs *attrs = NULL;
struct nvgpu_sgt *test_sgt = test_mem->phys_sgt;
void *test_sgl = test_sgt->sgl;
void *temp_sgl = test_sgt->ops->sgl_next(test_sgl);
if (temp_sgl != NULL) {
unit_return_fail(m,
"nvgpu_mem_phys_sgl_next not NULL as expected\n");
}
ret = test_sgt->ops->sgl_dma(test_sgl);
if (ret != MEM_ADDRESS) {
unit_return_fail(m,
"nvgpu_mem_phys_sgl_dma not equal to phys as expected\n");
}
ret = test_sgt->ops->sgl_phys(g, test_sgl);
if (ret != MEM_ADDRESS) {
unit_return_fail(m,
"nvgpu_mem_phys_sgl_phys not equal to phys as expected\n");
}
ret = test_sgt->ops->sgl_ipa(g, test_sgl);
if (ret != MEM_ADDRESS) {
unit_return_fail(m, "nvgpu_mem_phys_sgl_ipa incorrect\n");
}
ret = test_sgt->ops->sgl_ipa_to_pa(g, test_sgl, 0ULL, NULL);
if (ret != 0ULL) {
unit_return_fail(m,
"nvgpu_mem_phys_sgl_ipa_to_pa not zero as expected\n");
}
ret = test_sgt->ops->sgl_length(test_sgl);
if (ret != MEM_SIZE) {
unit_return_fail(m, "nvgpu_mem_phys_sgl_length incorrect\n");
}
ret = test_sgt->ops->sgl_gpu_addr(g, test_sgl, attrs);
if (ret != MEM_ADDRESS) {
unit_return_fail(m, "nvgpu_mem_phys_sgl_gpu_addr incorrect\n");
}
if (test_sgt->ops->sgt_iommuable != NULL) {
unit_return_fail(m, "physical nvgpu_mems is not IOMMU'able\n");
}
/*
* Test nvgpu_mem_phys_sgt_free - No-op
*/
test_sgt->ops->sgt_free(g, test_sgt);
return UNIT_SUCCESS;
}
int test_nvgpu_mem_create_from_phys(struct unit_module *m,
struct gk20a *g, void *args)
{
int err;
struct nvgpu_posix_fault_inj *kmem_fi =
nvgpu_kmem_get_fault_injection();
test_mem = (struct nvgpu_mem *)
nvgpu_kzalloc(g, sizeof(struct nvgpu_mem));
if (test_mem == NULL) {
unit_return_fail(m, "Couldn't allocate memory for nvgpu_mem\n");
}
/*
* Test 1 - Enable SW fault injection and check that init function
* fails with -ENOMEM.
*/
nvgpu_posix_enable_fault_injection(kmem_fi, true, 0);
err = nvgpu_mem_create_from_phys(g, test_mem, MEM_ADDRESS, MEM_PAGES);
if (err != -ENOMEM) {
unit_return_fail(m,
"nvgpu_mem_create_from_phys didn't fail as expected\n");
}
nvgpu_posix_enable_fault_injection(kmem_fi, false, 0);
/*
* Test 2 - Enable SW fault injection for second allocation and
* check that init function fails with -ENOMEM.
*/
nvgpu_posix_enable_fault_injection(kmem_fi, true, 1);
err = nvgpu_mem_create_from_phys(g, test_mem, MEM_ADDRESS, MEM_PAGES);
if (err != -ENOMEM) {
unit_return_fail(m,
"nvgpu_mem_create_from_phys didn't fail as expected\n");
}
nvgpu_posix_enable_fault_injection(kmem_fi, false, 0);
/*
* Test 3 - Check that physical memory is inited successfully
* Use this allocated memory for next tests in module.
*/
err = nvgpu_mem_create_from_phys(g, test_mem, MEM_ADDRESS, MEM_PAGES);
if (err != 0) {
unit_return_fail(m, "nvgpu_mem_create_from_phys init failed\n");
}
if (nvgpu_mem_get_phys_addr(g, test_mem) != ((u64) test_mem->cpu_va)) {
unit_return_fail(m, "invalid physical address\n");
}
if (nvgpu_mem_get_addr(g, test_mem) != ((u64) test_mem->cpu_va)) {
unit_return_fail(m, "invalid nvgpu_mem_get_addr address\n");
}
/* Allocate cpu_va for later tests */
test_mem->cpu_va = nvgpu_kzalloc(g, MEM_SIZE);
if (test_mem->cpu_va == NULL) {
nvgpu_kfree(g, test_mem);
unit_return_fail(m, "Could not allocate memory for cpu_va\n");
}
return UNIT_SUCCESS;
}
int test_nvgpu_mem_create_from_mem(struct unit_module *m, struct gk20a *g,
void *args)
{
struct nvgpu_mem dest_mem;
nvgpu_mem_create_from_mem(g, &dest_mem, test_mem, 0, 2);
unit_assert(dest_mem.cpu_va == test_mem->cpu_va, goto done);
unit_assert(dest_mem.size == (2 * NVGPU_CPU_PAGE_SIZE), goto done);
unit_assert((dest_mem.mem_flags & NVGPU_MEM_FLAG_SHADOW_COPY) == true,
goto done);
unit_assert(dest_mem.aperture == APERTURE_SYSMEM, goto done);
return UNIT_SUCCESS;
done:
unit_return_fail(m, "%s: failed!\n", __func__);
}
int test_free_nvgpu_mem(struct unit_module *m, struct gk20a *g, void *args)
{
test_mem->aperture = APERTURE_SYSMEM;
nvgpu_dma_free(g, test_mem);
nvgpu_kfree(g, test_mem);
return UNIT_SUCCESS;
}
struct unit_module_test nvgpu_mem_tests[] = {
/*
* Init test should run first in order to use allocated memory.
*/
UNIT_TEST(mem_create_from_phys, test_nvgpu_mem_create_from_phys, NULL, 0),
/*
* Tests for SYSMEM
*/
UNIT_TEST(nvgpu_mem_phys_ops, test_nvgpu_mem_phys_ops, NULL, 2),
UNIT_TEST(nvgpu_memset_sysmem, test_nvgpu_memset_sysmem, NULL, 0),
UNIT_TEST(nvgpu_mem_wr_rd, test_nvgpu_mem_wr_rd, NULL, 0),
UNIT_TEST(mem_iommu_translate, test_nvgpu_mem_iommu_translate, NULL, 2),
/*
* Tests covering VIDMEM branches
*/
UNIT_TEST(nvgpu_aperture_mask, test_nvgpu_aperture_mask, NULL, 0),
UNIT_TEST(nvgpu_aperture_name, test_nvgpu_aperture_str, NULL, 0),
UNIT_TEST(create_mem_from_mem, test_nvgpu_mem_create_from_mem, NULL, 0),
#ifdef CONFIG_NVGPU_DGPU
UNIT_TEST(nvgpu_mem_vidmem, test_nvgpu_mem_vidmem, NULL, 2),
#endif
/*
* Free test should be executed at the end to free allocated memory.
* As nvgpu_mem doesn't not have an explicit free function for sysmem,
* this test doesn't cover any nvgpu_mem code.
*/
UNIT_TEST(test_free_nvgpu_mem, test_free_nvgpu_mem, NULL, 0),
};
UNIT_MODULE(nvgpu_mem, nvgpu_mem_tests, UNIT_PRIO_NVGPU_TEST);

View File

@@ -0,0 +1,267 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef UNIT_NVGPU_MEM_H
#define UNIT_NVGPU_MEM_H
struct gk20a;
struct unit_module;
/** @addtogroup SWUTS-mm-nvgpu-mem
* @{
*
* Software Unit Test Specification for mm.nvgpu_mem
*/
/**
* Test specification for: test_nvgpu_mem_create_from_phys
*
* Description: Initialize nvgpu_mem for given size and base address.
*
* Test Type: Feature, Error injection
*
* Targets: nvgpu_mem_create_from_phys
*
* Targets: nvgpu_mem_create_from_phys, nvgpu_mem_get_phys_addr,
* nvgpu_mem_get_addr
*
* Input: None
*
* Steps:
* - Initialize nvgpu_mem
* - Allocate memory for nvgpu_mem sgt and sgl
* - Initialize nvgpu_mem structure members to appropriate value.
* - Allocate cpu_va memory for later tests
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_nvgpu_mem_create_from_phys(struct unit_module *m,
struct gk20a *g, void *args);
/**
* Test specification for: test_nvgpu_mem_phys_ops
*
* Description: Check all nvgpu_sgt_ops functions
*
* Test Type: Feature
*
* Targets: nvgpu_mem.nvgpu_sgt.nvgpu_sgt_ops.sgl_next,
* nvgpu_mem.nvgpu_sgt.nvgpu_sgt_ops.sgl_dma,
* nvgpu_mem.nvgpu_sgt.nvgpu_sgt_ops.sgl_phys,
* nvgpu_mem.nvgpu_sgt.nvgpu_sgt_ops.sgl_ipa,
* nvgpu_mem.nvgpu_sgt.nvgpu_sgt_ops.sgl_ipa_to_pa,
* nvgpu_mem.nvgpu_sgt.nvgpu_sgt_ops.sgl_length,
* nvgpu_mem.nvgpu_sgt.nvgpu_sgt_ops.sgl_gpu_addr,
* nvgpu_mem.nvgpu_sgt.nvgpu_sgt_ops.sgt_free
*
* Input: test_nvgpu_mem_create_from_phys
*
* Steps:
* - Execute nvgpu_sgt_ops functions
* - Check if each nvgpu_sgt_ops function executes and returns expected value.
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_nvgpu_mem_phys_ops(struct unit_module *m, struct gk20a *g, void *args);
/**
* Test specification for: test_nvgpu_memset_sysmem
*
* Description: Store pre-defined pattern at allocated nvgpu_mem address
*
* Test Type: Feature
*
* Targets: nvgpu_memset
*
* Input: test_nvgpu_mem_create_from_phys
*
* Steps:
* - Store data pattern and check value for multiple cases
* - Execute below steps for APERTURE_SYSMEM and APERTURE_INVALID cases
* - Using nvgpu_memset() store pre-defined data pattern in part of allocated
* memory
* - Check if set data pattern is correctly stored
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_nvgpu_memset_sysmem(struct unit_module *m,
struct gk20a *g, void *args);
/**
* Test specification for: test_nvgpu_mem_wr_rd
*
* Description: Test read and write functions for sysmem
*
* Test Type: Feature
*
* Targets: nvgpu_mem_is_sysmem, nvgpu_mem_is_valid, nvgpu_mem_wr, nvgpu_mem_rd,
* nvgpu_mem_wr_n, nvgpu_mem_rd_n, nvgpu_mem_rd32_pair, nvgpu_mem_rd32,
* nvgpu_mem_wr32
*
* Input: test_nvgpu_mem_create_from_phys
*
* Steps:
* - Check if memory is of sysmem type
* - Check if memory aperture is not invalid
* - Execute below steps for APERTURE_SYSMEM and APERTURE_INVALID cases
* - Execute all write functions and confirm data written
* - Write preset data pattern to allocated nvgpu_mem
* - Confirm data written at the memory location is correct
* - Execute read functions and confirm data read
* - Read data from a segment of allocated memory
* - Confirm read data is correct
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_nvgpu_mem_wr_rd(struct unit_module *m, struct gk20a *g, void *args);
/**
* Test specification for: test_nvgpu_mem_iommu_translate
*
* Description: Test if given address is iommuable
*
* Test Type: Feature
*
* Targets: nvgpu_mem_iommu_translate
*
* Input: test_nvgpu_mem_create_from_phys
*
* Steps:
* - Check if nvgpu_mem is iommuable
* - Return value is equal to nvgpu_mem phys address value
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_nvgpu_mem_iommu_translate(struct unit_module *m,
struct gk20a *g, void *args);
/**
* Test specification for: test_nvgpu_aperture_mask
*
* Description: Check if nvgpu_mem aperture is correct
*
* Test Type: Feature
*
* Targets: nvgpu_aperture_mask, nvgpu_aperture_mask_raw,
* nvgpu_aperture_is_sysmem
*
* Input: test_nvgpu_mem_create_from_phys
*
* Steps:
* - Execute these steps for all the aperture types
* - Check if nvgpu_mem aperture mask values returned are as expected
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_nvgpu_aperture_mask(struct unit_module *m,
struct gk20a *g, void *args);
/**
* Test specification for: test_nvgpu_aperture_str
*
* Description: Check nvgpu_mem aperture name string
*
* Test Type: Feature
*
* Targets: nvgpu_aperture_str
*
* Input: test_nvgpu_mem_create_from_phys
*
* Steps:
* - Run nvgpu_aperture_str function for all aperture values.
* - Confirm that returned aperture name is correct as per input aperture.
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_nvgpu_aperture_str(struct unit_module *m, struct gk20a *g, void *args);
/**
* Test specification for: test_nvgpu_mem_create_from_mem
*
* Description: Create nvgpu_mem from another nvgpu_mem struct
*
* Test Type: Feature
*
* Targets: nvgpu_mem_create_from_mem
*
* Input: test_nvgpu_mem_create_from_phys
*
* Steps:
* - Create a nvgpu_mem structure with 2 pages from global nvgpu_mem struct.
* - Confirm that returned destination nvgpu_mem address and size corresponds to
* - 2 pages of global nvgpu_mem structure with SYSMEM aperture.
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_nvgpu_mem_create_from_mem(struct unit_module *m, struct gk20a *g,
void *args);
/**
* Test specification for: test_nvgpu_mem_vidmem
*
* Description: Test read and write memory functions for vidmem
*
* Test Type: Feature
*
* Targets: nvgpu_mem_is_sysmem, nvgpu_mem_is_valid, nvgpu_mem_wr, nvgpu_mem_rd,
* nvgpu_mem_wr_n, nvgpu_mem_rd_n, nvgpu_mem_rd32_pair, nvgpu_mem_rd32,
* nvgpu_mem_wr32
*
* Input: test_nvgpu_mem_create_from_phys
*
* Steps:
* - Execute read and write calls for vidmem which are converted to pramin calls
* - pramin functions are tested in pramin module
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_nvgpu_mem_vidmem(struct unit_module *m, struct gk20a *g, void *args);
/**
* Test specification for: test_free_nvgpu_mem
*
* Description: Cleanup allocated memory for nvgpu_mem structure
*
* Test Type: Other (cleanup)
*
* Targets: None
*
* Input: test_nvgpu_mem_create_from_phys
*
* Steps:
* - Free allocated memory
*
* Output: Returns SUCCESS if the steps above were executed successfully. FAIL
* otherwise.
*/
int test_free_nvgpu_mem(struct unit_module *m, struct gk20a *g, void *args);
#endif /* UNIT_NVGPU_MEM_H */

View File

@@ -0,0 +1,26 @@
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
.SUFFIXES:
OBJS = nvgpu_sgt.o
MODULE = nvgpu_sgt
include ../../Makefile.units

View File

@@ -0,0 +1,35 @@
################################### tell Emacs this is a -*- makefile-gmake -*-
#
# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# tmake for SW Mobile component makefile
#
###############################################################################
NVGPU_UNIT_NAME=nvgpu_sgt
include $(NV_COMPONENT_DIR)/../../Makefile.units.common.interface.tmk
# Local Variables:
# indent-tabs-mode: t
# tab-width: 8
# End:
# vi: set tabstop=8 noexpandtab:

View File

@@ -0,0 +1,35 @@
################################### tell Emacs this is a -*- makefile-gmake -*-
#
# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# tmake for SW Mobile component makefile
#
###############################################################################
NVGPU_UNIT_NAME=nvgpu_sgt
include $(NV_COMPONENT_DIR)/../../Makefile.units.common.tmk
# Local Variables:
# indent-tabs-mode: t
# tab-width: 8
# End:
# vi: set tabstop=8 noexpandtab:

Some files were not shown because too many files have changed in this diff Show More