gpu: nvgpu: unit: Add nvgpu_pd_free() VC test

Add the verification criteria test for pd_cache's nvgpu_pd_free()
function.

JIRA NVGPU-1323

Change-Id: Ida7b4c1d071d79487c1e0cbdd2a8dd5f36f6e938
Signed-off-by: Alex Waterman <alexw@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2001292
Reviewed-by: Nicolas Benech <nbenech@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Alex Waterman
2019-01-22 15:11:56 -08:00
committed by mobile promotions
parent 2d496942c5
commit a31980359f
3 changed files with 74 additions and 12 deletions

View File

@@ -27,6 +27,7 @@
* Unit requirement test specification unique IDs. * Unit requirement test specification unique IDs.
*/ */
#define PD_CACHE_REQ1_UID "6439202" #define PD_CACHE_REQ1_UID "6439202"
#define PD_CACHE_REQ2_UID "6898078"
#define PAGE_TABLE_REQ1_UID "6439094" #define PAGE_TABLE_REQ1_UID "6439094"
#endif #endif

View File

@@ -488,6 +488,13 @@
"uid": "6439202", "uid": "6439202",
"unit": "pd_cache" "unit": "pd_cache"
}, },
{
"test": "pd_reusability",
"req": "NVGPU-RQCD-118.C1",
"vc": "V3",
"uid": "6898078",
"unit": "pd_cache"
},
{ {
"test": "valid_alloc", "test": "valid_alloc",
"req": "NVGPU-RQCD-68.C1,2", "req": "NVGPU-RQCD-68.C1,2",

View File

@@ -561,12 +561,14 @@ static int test_pd_cache_fini(struct unit_module *m,
/* /*
* Requirement NVGPU-RQCD-68.C1 * Requirement NVGPU-RQCD-68.C1
*
* Valid/Invalid: The pd_cache does/does not allocate a suitable DMA'able
* buffer of memory.
*
* Requirement NVGPU-RQCD-68.C2 * Requirement NVGPU-RQCD-68.C2
* *
* Valid/Invalid: The pd_cache does/does not allocate a suitable DMA'able * Valid/Invalid: The allocated PD is/is not sufficiently aligned for use by
* buffer of memory. * the GMMU.
* Valid/Invalid: The allocated PD is/is not sufficiently aligned for use by
* the GMMU.
*/ */
static int test_pd_cache_valid_alloc(struct unit_module *m, static int test_pd_cache_valid_alloc(struct unit_module *m,
struct gk20a *g, void *args) struct gk20a *g, void *args)
@@ -629,7 +631,10 @@ fail:
} }
/* /*
* Ensure that we can efficiently pack N pd_size PDs into a page. * Requirement NVGPU-RQCD-68.C3
*
* C3: Valid/Invalid: 16 256B, 8 512B, etc, PDs can/cannot fit into a single
* page sized DMA allocation.
*/ */
static int do_test_pd_cache_packing_size(struct unit_module *m, struct gk20a *g, static int do_test_pd_cache_packing_size(struct unit_module *m, struct gk20a *g,
struct vm_gk20a *vm, u32 pd_size) struct vm_gk20a *vm, u32 pd_size)
@@ -684,17 +689,64 @@ cleanup:
} }
/* /*
* Requirement NVGPU-RQCD-68.C3 * Requirement NVGPU-RQCD-118.C1
* *
* C3: Valid/Invalid: 16 256B, 8 512B, etc, PDs can/cannot fit into a single * C1: Valid/Invalid: Previously allocated PD entries are/are not re-usable.
* page sized DMA allocation.
*/ */
static int test_pd_cache_packing(struct unit_module *m, static int do_test_pd_reusability(struct unit_module *m, struct gk20a *g,
struct gk20a *g, void *args) struct vm_gk20a *vm, u32 pd_size)
{
int err = UNIT_SUCCESS;
u32 i;
u32 n = PAGE_SIZE / pd_size;
struct nvgpu_gmmu_pd pds[n];
struct nvgpu_posix_fault_inj *dma_fi =
nvgpu_dma_alloc_get_fault_injection();
nvgpu_posix_enable_fault_injection(dma_fi, true, 1);
for (i = 0U; i < n; i++) {
err = nvgpu_pd_alloc(vm, &pds[i], pd_size);
if (err) {
err = UNIT_FAIL;
goto cleanup;
}
}
/* Free all but one PD so that we ensure the page stays cached. */
for (i = 1U; i < n; i++) {
nvgpu_pd_free(vm, &pds[i]);
}
/* Re-alloc. Will get a -ENOMEM if another page is alloced. */
for (i = 1U; i < n; i++) {
err = nvgpu_pd_alloc(vm, &pds[i], pd_size);
if (err) {
err = UNIT_FAIL;
goto cleanup;
}
}
cleanup:
n = i;
/* Really cleanup. */
for (i = 0U; i < n; i++) {
nvgpu_pd_free(vm, &pds[i]);
}
nvgpu_posix_enable_fault_injection(dma_fi, false, 0);
return err;
}
static int test_per_pd_size(struct unit_module *m,
struct gk20a *g, void *args)
{ {
int err; int err;
u32 pd_size; u32 pd_size;
struct vm_gk20a vm; struct vm_gk20a vm;
int (*fn)(struct unit_module *m, struct gk20a *g,
struct vm_gk20a *vm, u32 pd_size) = args;
err = init_pd_cache(m, g, &vm); err = init_pd_cache(m, g, &vm);
if (err != UNIT_SUCCESS) { if (err != UNIT_SUCCESS) {
@@ -703,7 +755,7 @@ static int test_pd_cache_packing(struct unit_module *m,
pd_size = 256U; /* 256 bytes is the min PD size. */ pd_size = 256U; /* 256 bytes is the min PD size. */
while (pd_size < PAGE_SIZE) { while (pd_size < PAGE_SIZE) {
err = do_test_pd_cache_packing_size(m, g, &vm, pd_size); err = fn(m, g, &vm, pd_size);
if (err) { if (err) {
err = UNIT_FAIL; err = UNIT_FAIL;
goto cleanup; goto cleanup;
@@ -742,7 +794,9 @@ struct unit_module_test pd_cache_tests[] = {
UNIT_TEST_REQ("NVGPU-RQCD-68.C1,2", PD_CACHE_REQ1_UID, "V4", UNIT_TEST_REQ("NVGPU-RQCD-68.C1,2", PD_CACHE_REQ1_UID, "V4",
valid_alloc, test_pd_cache_valid_alloc, NULL), valid_alloc, test_pd_cache_valid_alloc, NULL),
UNIT_TEST_REQ("NVGPU-RQCD-68.C3", PD_CACHE_REQ1_UID, "V4", UNIT_TEST_REQ("NVGPU-RQCD-68.C3", PD_CACHE_REQ1_UID, "V4",
pd_packing, test_pd_cache_packing, NULL), pd_packing, test_per_pd_size, do_test_pd_cache_packing_size),
UNIT_TEST_REQ("NVGPU-RQCD-118.C1", PD_CACHE_REQ2_UID, "V3",
pd_reusability, test_per_pd_size, do_test_pd_reusability),
/* /*
* Direct allocs. * Direct allocs.