gpu: nvgpu: unit: improve coverage for engines

Improve branch coverage for the following functions:
- nvgpu_engine_get_active_eng_info
- nvgpu_engine_get_ids
- nvgpu_ce_engine_interrupt_mask
- nvgpu_engine_get_gr_runlist_id

Add unit tests for the following functions:
-_nvgpu_engine_get_fast_ce_runlist_id
- nvgpu_engine_is_valid_runlist_id
- nvgpu_engine_id_to_mmu_fault_id
- nvgpu_engine_mmu_fault_id_to_engine_id
- nvgpu_engine_get_mask_on_id
- nvgpu_engine_get_id_and_type
- nvgpu_engine_find_busy_doing_ctxsw
- nvgpu_engine_get_runlist_busy_engines
- nvgpu_engine_mmu_fault_id_to_veid
- nvgpu_engine_mmu_fault_id_to_eng_id_and_veid
- nvgpu_engine_mmu_fault_id_to_eng_ve_pbdma_id

Jira NVGPU-4511

Change-Id: Ib340df17468ff3447e271a86af9a47a067f6ad11
Signed-off-by: Thomas Fleury <tfleury@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2262222
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Thomas Fleury
2019-12-11 14:25:04 -05:00
committed by Alex Waterman
parent 45b99f67b2
commit 55510f266d
11 changed files with 1136 additions and 38 deletions

View File

@@ -881,20 +881,17 @@ u32 nvgpu_engine_find_busy_doing_ctxsw(struct gk20a *g,
struct nvgpu_engine_status_info engine_status;
for (i = 0U; i < g->fifo.num_engines; i++) {
bool failing_engine;
engine_id = g->fifo.active_engines_list[i];
g->ops.engine_status.read_engine_status_info(g, engine_id,
&engine_status);
/* we are interested in busy engines */
failing_engine = engine_status.is_busy;
/* ..that are doing context switch */
failing_engine = failing_engine &&
nvgpu_engine_status_is_ctxsw(&engine_status);
if (!failing_engine) {
/*
* we are interested in busy engines that
* are doing context switch
*/
if (!engine_status.is_busy ||
!nvgpu_engine_status_is_ctxsw(&engine_status)) {
engine_id = NVGPU_INVALID_ENG_ID;
continue;
}
@@ -1021,7 +1018,6 @@ u32 nvgpu_engine_mmu_fault_id_to_eng_id_and_veid(struct gk20a *g,
struct nvgpu_engine_info *engine_info;
struct nvgpu_fifo *f = &g->fifo;
for (i = 0U; i < f->num_engines; i++) {
engine_id = f->active_engines_list[i];
engine_info = &g->fifo.engine_info[engine_id];

View File

@@ -357,15 +357,34 @@ nvgpu_engine_act_interrupt_mask
nvgpu_engine_check_valid_id
nvgpu_engine_cleanup_sw
nvgpu_engine_enum_from_type
nvgpu_engine_find_busy_doing_ctxsw
nvgpu_engine_get_active_eng_info
nvgpu_engine_get_all_ce_reset_mask
nvgpu_engine_get_fast_ce_runlist_id
nvgpu_engine_get_gr_id
nvgpu_engine_get_gr_runlist_id
nvgpu_engine_get_id_and_type
nvgpu_engine_get_ids
nvgpu_engine_init_info
nvgpu_engine_get_mask_on_id
nvgpu_engine_get_runlist_busy_engines
nvgpu_engine_id_to_mmu_fault_id
nvgpu_engine_is_valid_runlist_id
nvgpu_engine_init_info
nvgpu_engine_mmu_fault_id_to_engine_id
nvgpu_engine_mmu_fault_id_to_eng_id_and_veid
nvgpu_engine_mmu_fault_id_to_eng_ve_pbdma_id
nvgpu_engine_mmu_fault_id_to_veid
nvgpu_engine_setup_sw
nvgpu_engine_status_get_ctx_id_type
nvgpu_engine_status_get_next_ctx_id_type
nvgpu_engine_status_is_ctx_type_tsg
nvgpu_engine_status_is_next_ctx_type_tsg
nvgpu_engine_status_is_ctxsw
nvgpu_engine_status_is_ctxsw_invalid
nvgpu_engine_status_is_ctxsw_load
nvgpu_engine_status_is_ctxsw_save
nvgpu_engine_status_is_ctxsw_switch
nvgpu_engine_status_is_ctxsw_valid
nvgpu_falcon_hs_ucode_load_bootstrap
nvgpu_falcon_copy_to_dmem
nvgpu_falcon_copy_to_imem

View File

@@ -1,6 +1,9 @@
INPUT += ../../../userspace/SWUTS.h
<<<<<<< d9538d12c25f23f90bfb5978be79cdc4ff69f114
INPUT += ../../../userspace/units/ce/nvgpu-ce.h
INPUT += ../../../userspace/units/cg/nvgpu-cg.h
=======
>>>>>>> gpu: nvgpu: unit: improve coverage for engines
INPUT += ../../../userspace/units/enabled/nvgpu-enabled.h
INPUT += ../../../userspace/units/interface/bsearch/bsearch.h
INPUT += ../../../userspace/units/interface/lock/lock.h
@@ -21,6 +24,7 @@ INPUT += ../../../userspace/units/fifo/channel/gm20b/nvgpu-channel-gm20b.h
INPUT += ../../../userspace/units/fifo/channel/gv11b/nvgpu-channel-gv11b.h
INPUT += ../../../userspace/units/fifo/ctxsw_timeout/gv11b/nvgpu-ctxsw-timeout-gv11b.h
INPUT += ../../../userspace/units/fifo/engine/nvgpu-engine.h
INPUT += ../../../userspace/units/fifo/engine/nvgpu-engine-status.h
INPUT += ../../../userspace/units/fifo/engine/gm20b/nvgpu-engine-gm20b.h
INPUT += ../../../userspace/units/fifo/engine/gp10b/nvgpu-engine-gp10b.h
INPUT += ../../../userspace/units/fifo/engine/gv100/nvgpu-engine-gv100.h

View File

@@ -2273,12 +2273,42 @@
"unit": "nvgpu_engine",
"test_level": 0
},
{
"test": "test_engine_find_busy_doing_ctxsw",
"case": "find_busy_doing_ctxsw",
"unit": "nvgpu_engine",
"test_level": 0
},
{
"test": "test_engine_get_active_eng_info",
"case": "get_active_eng_info",
"unit": "nvgpu_engine",
"test_level": 0
},
{
"test": "test_engine_get_fast_ce_runlist_id",
"case": "get_fast_ce_runlist_id",
"unit": "nvgpu_engine",
"test_level": 0
},
{
"test": "test_engine_get_gr_runlist_id",
"case": "get_gr_runlist_id",
"unit": "nvgpu_engine",
"test_level": 0
},
{
"test": "test_engine_get_mask_on_id",
"case": "get_mask_on_id",
"unit": "nvgpu_engine",
"test_level": 0
},
{
"test": "test_engine_get_runlist_busy_engines",
"case": "get_runlist_busy_engines",
"unit": "nvgpu_engine",
"test_level": 0
},
{
"test": "test_engine_ids",
"case": "ids",
@@ -2303,6 +2333,24 @@
"unit": "nvgpu_engine",
"test_level": 0
},
{
"test": "test_engine_is_valid_runlist_id",
"case": "is_valid_runlist_id",
"unit": "nvgpu_engine",
"test_level": 0
},
{
"test": "test_engine_mmu_fault_id",
"case": "mmu_fault_id",
"unit": "nvgpu_engine",
"test_level": 0
},
{
"test": "test_engine_mmu_fault_id_veid",
"case": "mmu_fault_id_veid",
"unit": "nvgpu_engine",
"test_level": 0
},
{
"test": "test_fifo_remove_support",
"case": "remove_support",
@@ -2315,6 +2363,12 @@
"unit": "nvgpu_engine",
"test_level": 0
},
{
"test": "test_engine_status",
"case": "status",
"unit": "nvgpu_engine",
"test_level": 0
},
{
"test": "test_fifo_init_support",
"case": "init_support",

View File

@@ -20,7 +20,7 @@
.SUFFIXES:
OBJS = nvgpu-engine.o
OBJS = nvgpu-engine.o nvgpu-engine-status.o
MODULE = nvgpu-engine
LIB_PATHS += -lnvgpu-fifo-common

View File

@@ -25,7 +25,7 @@
###############################################################################
NVGPU_UNIT_NAME = nvgpu-engine
NVGPU_UNIT_SRCS = nvgpu-engine.c
NVGPU_UNIT_SRCS = nvgpu-engine.c nvgpu-engine-status.c
NVGPU_UNIT_INTERFACE_DIRS := \
$(NV_COMPONENT_DIR)/.. \

View File

@@ -0,0 +1,123 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <stdlib.h>
#include <sys/types.h>
#include <unistd.h>
#include <unit/io.h>
#include <unit/unit.h>
#include <nvgpu/types.h>
#include <nvgpu/engine_status.h>
#include "../nvgpu-fifo-common.h"
#include "nvgpu-engine-status.h"
#define assert(cond) unit_assert(cond, goto done)
#define NUM_CTXSW_STATUS 6
#define NUM_ID_TYPES 3
#define NUM_NEXT_ID_TYPES 3
int test_engine_status(struct unit_module *m,
struct gk20a *g, void *args)
{
int ret = UNIT_FAIL;
struct nvgpu_engine_status_info status;
const u32 ctxsw_status[NUM_CTXSW_STATUS] = {
NVGPU_CTX_STATUS_INVALID,
NVGPU_CTX_STATUS_VALID,
NVGPU_CTX_STATUS_CTXSW_LOAD,
NVGPU_CTX_STATUS_CTXSW_SAVE,
NVGPU_CTX_STATUS_CTXSW_SWITCH,
U32(~0),
};
const u32 id_types[NUM_ID_TYPES] = {
ENGINE_STATUS_CTX_ID_TYPE_CHID,
ENGINE_STATUS_CTX_ID_TYPE_TSGID,
ENGINE_STATUS_CTX_ID_TYPE_INVALID,
};
const u32 next_id_types[NUM_NEXT_ID_TYPES] = {
ENGINE_STATUS_CTX_NEXT_ID_TYPE_CHID,
ENGINE_STATUS_CTX_NEXT_ID_TYPE_TSGID,
ENGINE_STATUS_CTX_NEXT_ID_TYPE_INVALID,
};
int i;
for (i = 0; i < NUM_CTXSW_STATUS; i++)
{
status.ctxsw_status = ctxsw_status[i];
assert(nvgpu_engine_status_is_ctxsw_switch(&status) ==
(ctxsw_status[i] == NVGPU_CTX_STATUS_CTXSW_SWITCH));
assert(nvgpu_engine_status_is_ctxsw_load(&status) ==
(ctxsw_status[i] == NVGPU_CTX_STATUS_CTXSW_LOAD));
assert(nvgpu_engine_status_is_ctxsw_save(&status) ==
(ctxsw_status[i] == NVGPU_CTX_STATUS_CTXSW_SAVE));
assert(nvgpu_engine_status_is_ctxsw(&status) ==
((ctxsw_status[i] == NVGPU_CTX_STATUS_CTXSW_SWITCH) ||
(ctxsw_status[i] == NVGPU_CTX_STATUS_CTXSW_LOAD) ||
(ctxsw_status[i] == NVGPU_CTX_STATUS_CTXSW_SAVE)));
assert(nvgpu_engine_status_is_ctxsw_invalid(&status) ==
(ctxsw_status[i] == NVGPU_CTX_STATUS_INVALID));
assert(nvgpu_engine_status_is_ctxsw_valid(&status) ==
(ctxsw_status[i] == NVGPU_CTX_STATUS_VALID));
}
for (i = 0; i < NUM_ID_TYPES; i++)
{
u32 ctx_id, ctx_type;
status.ctx_id = i;
status.ctx_id_type = id_types[i];
status.ctx_next_id = 0xcafe;
status.ctx_next_id_type = 0xcafe;
assert(nvgpu_engine_status_is_ctx_type_tsg(&status) ==
(id_types[i] == ENGINE_STATUS_CTX_ID_TYPE_TSGID));
nvgpu_engine_status_get_ctx_id_type(&status,
&ctx_id, &ctx_type);
assert(ctx_id == status.ctx_id);
assert(ctx_type == status.ctx_id_type);
}
for (i = 0; i < NUM_NEXT_ID_TYPES; i++)
{
u32 ctx_next_id, ctx_next_type;
status.ctx_id = 0xcafe;
status.ctx_id_type = 0xcafe;
status.ctx_next_id = i;
status.ctx_next_id_type = next_id_types[i];
assert(nvgpu_engine_status_is_next_ctx_type_tsg(&status) ==
(next_id_types[i] ==
ENGINE_STATUS_CTX_NEXT_ID_TYPE_TSGID));
nvgpu_engine_status_get_next_ctx_id_type(&status,
&ctx_next_id, &ctx_next_type);
assert(ctx_next_id == status.ctx_next_id);
assert(ctx_next_type == status.ctx_next_id_type);
}
ret = UNIT_SUCCESS;
done:
return ret;
}

View File

@@ -0,0 +1,84 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef UNIT_NVGPU_ENGINE_STATUS_H
#define UNIT_NVGPU_ENGINE_STATUS_H
#include <nvgpu/types.h>
struct unit_module;
struct gk20a;
/** @addtogroup SWUTS-fifo-engine
* @{
*
* Software Unit Test Specification for fifo/engine
*/
/**
* Test specification for: test_engine_status
*
* Description: Engine status helper functions
*
* Test Type: Feature based
*
* Targets: nvgpu_engine_status_is_ctxsw_switch,
* nvgpu_engine_status_is_ctxsw_load,
* nvgpu_engine_status_is_ctxsw_save,
* nvgpu_engine_status_is_ctxsw,
* nvgpu_engine_status_is_ctxsw_invalid,
* nvgpu_engine_status_is_ctxsw_valid,
* nvgpu_engine_status_is_ctx_type_tsg,
* nvgpu_engine_status_is_next_ctx_type_tsg
*
* Input: None
*
* Steps:
* - Initialize ctxsw_status field of nvgpu_engine_status_info structure with
* with NVGPU_CTX_STATUS_INVALID, NVGPU_CTX_STATUS_VALID,
* NVGPU_CTX_STATUS_CTXSW_LOAD, NVGPU_CTX_STATUS_CTXSW_SAVE,
* NVGPU_CTX_STATUS_CTXSW_SWITCH, and U32(~0).
* - Check that nvgpu_engine_status_is_ctxsw_load,
* nvgpu_engine_status_is_ctxsw_save, nvgpu_engine_status_is_ctxsw,
* nvgpu_engine_status_is_ctxsw_invalid, nvgpu_engine_status_is_ctxsw_valid,
* return consistent values.
* - Initialize ctx_id with a counter and ctx_id_types successively with
* ENGINE_STATUS_CTX_ID_TYPE_CHID, ENGINE_STATUS_CTX_ID_TYPE_TSGID, and
* ENGINE_STATUS_CTX_ID_TYPE_INVALID.
* - Initialize next_ctx_id and next_ctx_id_types with invalid values
* (to make sure accessors use the right fields).
* - Check that nvgpu_engine_status_is_ctx_type_tsg and
* nvgpu_engine_status_get_ctx_id_type return consitent values.
* - Use same method to check nvgpu_engine_status_is_next_ctx_type_tsg and
* nvgpu_engine_status_get_next_ctx_id_type.
*
* Output: Returns PASS if all branches gave expected results. FAIL otherwise.
*/
int test_engine_status(struct unit_module *m,
struct gk20a *g, void *args);
/**
* @}
*/
#endif /* UNIT_NVGPU_ENGINE_STATUS_H */

View File

@@ -30,6 +30,7 @@
#include <nvgpu/channel_sync.h>
#include <nvgpu/dma.h>
#include <nvgpu/engines.h>
#include <nvgpu/engine_status.h>
#include <nvgpu/tsg.h>
#include <nvgpu/gk20a.h>
#include <nvgpu/runlist.h>
@@ -43,7 +44,9 @@
#include "../nvgpu-fifo-common.h"
#include "../nvgpu-fifo-gv11b.h"
#include "nvgpu-engine.h"
#include "nvgpu-engine-status.h"
#define ENGINE_UNIT_DEBUG
#ifdef ENGINE_UNIT_DEBUG
#undef unit_verbose
#define unit_verbose unit_info
@@ -60,13 +63,17 @@ struct unit_ctx {
u32 branches;
u32 ce_mask;
u32 eng_mask;
u32 id;
u32 is_tsg;
};
struct unit_ctx unit_ctx;
static struct unit_ctx u;
static void subtest_setup(u32 branches)
{
unit_ctx.branches = branches;
u.branches = branches;
/* do NOT clean u.eng_mask */
}
#define subtest_pruned test_fifo_subtest_pruned
@@ -272,8 +279,8 @@ int test_engine_ids(struct unit_module *m,
u32 n, i;
u32 engine_id;
unit_ctx.ce_mask = 0;
unit_ctx.eng_mask = 0;
u.ce_mask = 0;
u.eng_mask = 0;
unit_assert(nvgpu_engine_check_valid_id(g, U32_MAX) == false,
goto done);
@@ -290,24 +297,115 @@ int test_engine_ids(struct unit_module *m,
unit_assert(nvgpu_engine_check_valid_id(g, engine_id) ==
true, goto done);
unit_ctx.eng_mask |= BIT(engine_id);
u.eng_mask |= BIT(engine_id);
if (e == NVGPU_ENGINE_ASYNC_CE || e == NVGPU_ENGINE_GRCE) {
unit_ctx.ce_mask |= BIT(engine_id);
u.ce_mask |= BIT(engine_id);
}
}
}
unit_assert(nvgpu_engine_get_ids(g, &engine_id, 1,
NVGPU_ENGINE_GR) == 1, goto done);
unit_assert(nvgpu_engine_get_ids(g, &engine_id,
1, NVGPU_ENGINE_GR) == 1, goto done);
unit_assert(engine_id == nvgpu_engine_get_gr_id(g), goto done);
unit_assert(unit_ctx.eng_mask != 0, goto done);
unit_assert(unit_ctx.ce_mask != 0, goto done);
unit_assert(u.eng_mask != 0, goto done);
unit_assert(u.ce_mask != 0, goto done);
unit_assert(nvgpu_engine_get_ids(g, &engine_id,
0, NVGPU_ENGINE_GR) == 0, goto done);
unit_assert(nvgpu_engine_get_ids(g, &engine_id,
1, NVGPU_ENGINE_GRCE) == 1, goto done);
ret = UNIT_SUCCESS;
done:
return ret;
}
int test_engine_is_valid_runlist_id(struct unit_module *m,
struct gk20a *g, void *args)
{
int ret = UNIT_FAIL;
u32 i;
u32 engine_id;
u32 runlist_id = 0;
struct nvgpu_engine_info *engine_info;
struct nvgpu_fifo *f = &g->fifo;
for (i = 0; i < f->num_engines; i++) {
engine_id = f->active_engines_list[i];
engine_info = &f->engine_info[engine_id];
unit_assert(nvgpu_engine_is_valid_runlist_id(g,
engine_info->runlist_id), goto done);
}
unit_assert(!nvgpu_engine_is_valid_runlist_id(NULL,
runlist_id), goto done);
unit_assert(!nvgpu_engine_is_valid_runlist_id(g,
NVGPU_INVALID_RUNLIST_ID), goto done);
ret = UNIT_SUCCESS;
done:
return ret;
}
int test_engine_get_fast_ce_runlist_id(struct unit_module *m,
struct gk20a *g, void *args)
{
u32 runlist_id;
int ret = UNIT_FAIL;
runlist_id = nvgpu_engine_get_fast_ce_runlist_id(g);
unit_assert(runlist_id != NVGPU_INVALID_RUNLIST_ID, goto done);
unit_assert(nvgpu_engine_get_fast_ce_runlist_id(NULL) ==
NVGPU_INVALID_ENG_ID, goto done);
ret = UNIT_SUCCESS;
done:
return ret;
}
int test_engine_get_gr_runlist_id(struct unit_module *m,
struct gk20a *g, void *args)
{
struct nvgpu_fifo *f = &g->fifo;
struct nvgpu_fifo fifo = g->fifo;
u32 runlist_id;
int ret = UNIT_FAIL;
struct nvgpu_engine_info engine_info[2];
u32 active_engines_list;
runlist_id = nvgpu_engine_get_gr_runlist_id(g);
unit_assert(runlist_id != NVGPU_INVALID_RUNLIST_ID, goto done);
f->num_engines = 1;
f->max_engines = 1;
f->active_engines_list = &active_engines_list;
active_engines_list = 0;
f->engine_info = engine_info;
engine_info[0].engine_id = 0;
engine_info[0].runlist_id = 1;
/* NVGPU_ENGINE_GR not found */
engine_info[0].engine_enum = NVGPU_ENGINE_GRCE;
runlist_id = nvgpu_engine_get_gr_runlist_id(g);
unit_assert(runlist_id == NVGPU_INVALID_RUNLIST_ID, goto done);
/* valid entry */
engine_info[0].engine_enum = NVGPU_ENGINE_GR;
runlist_id = nvgpu_engine_get_gr_runlist_id(g);
unit_assert(runlist_id != NVGPU_INVALID_RUNLIST_ID, goto done);
ret = UNIT_SUCCESS;
done:
g->fifo = fifo;
return ret;
}
int test_engine_get_active_eng_info(struct unit_module *m,
struct gk20a *g, void *args)
{
@@ -316,8 +414,12 @@ int test_engine_get_active_eng_info(struct unit_module *m,
struct nvgpu_engine_info *info;
u32 eng_mask = 0;
struct nvgpu_fifo *f = &g->fifo;
struct nvgpu_fifo fifo = g->fifo;
for (engine_id = 0; engine_id < f->max_engines; engine_id++) {
unit_assert(nvgpu_engine_get_active_eng_info(NULL, 0) == NULL,
goto done);
for (engine_id = 0; engine_id <= f->max_engines; engine_id++) {
unit_verbose(m, "engine_id=%u\n", engine_id);
info = nvgpu_engine_get_active_eng_info(g, engine_id);
@@ -330,11 +432,16 @@ int test_engine_get_active_eng_info(struct unit_module *m,
}
}
unit_verbose(m, "eng_mask=%x\n", eng_mask);
unit_verbose(m, "unit_ctx.eng_mask=%x\n", unit_ctx.eng_mask);
unit_assert(eng_mask == unit_ctx.eng_mask, goto done);
unit_verbose(m, "u.eng_mask=%x\n", u.eng_mask);
unit_assert(eng_mask == u.eng_mask, goto done);
f->num_engines = 0;
unit_assert(nvgpu_engine_get_active_eng_info(g, 0) == NULL,
goto done);
ret = UNIT_SUCCESS;
done:
g->fifo = fifo;
return ret;
}
@@ -343,6 +450,7 @@ int test_engine_enum_from_type(struct unit_module *m,
{
int ret = UNIT_FAIL;
int engine_enum;
struct gpu_ops gops = g->ops;
engine_enum = nvgpu_engine_enum_from_type(g,
top_device_info_type_enum_graphics_v());
@@ -355,8 +463,20 @@ int test_engine_enum_from_type(struct unit_module *m,
engine_enum = nvgpu_engine_enum_from_type(g, 0xff);
unit_assert(engine_enum == NVGPU_ENGINE_INVAL, goto done);
g->ops.top.is_engine_gr = NULL;
engine_enum = nvgpu_engine_enum_from_type(g,
top_device_info_type_enum_graphics_v());
unit_assert(engine_enum == NVGPU_ENGINE_INVAL, goto done);
g->ops = gops;
g->ops.top.is_engine_ce = NULL;
engine_enum = nvgpu_engine_enum_from_type(g,
top_device_info_type_enum_graphics_v());
unit_assert(engine_enum == NVGPU_ENGINE_INVAL, goto done);
ret = UNIT_SUCCESS;
done:
g->ops = gops;
return ret;
}
@@ -364,6 +484,7 @@ int test_engine_interrupt_mask(struct unit_module *m,
struct gk20a *g, void *args)
{
int ret = UNIT_FAIL;
struct gpu_ops gops = g->ops;
u32 intr_mask =
nvgpu_gr_engine_interrupt_mask(g) |
nvgpu_ce_engine_interrupt_mask(g);
@@ -372,6 +493,9 @@ int test_engine_interrupt_mask(struct unit_module *m,
u32 mask;
u32 engine_id;
struct nvgpu_fifo *f = &g->fifo;
struct nvgpu_fifo fifo = g->fifo;
unit_assert(nvgpu_engine_check_valid_id(NULL, 0) == false, goto done);
unit_assert(intr_mask != 0U, goto done);
for (engine_id = 0; engine_id < f->max_engines; engine_id++) {
@@ -387,23 +511,492 @@ int test_engine_interrupt_mask(struct unit_module *m,
}
unit_assert(intr_mask == all_mask, goto done);
unit_assert(nvgpu_engine_act_interrupt_mask(NULL, 0) == 0, goto done);
g->ops.ce.isr_stall = NULL;
unit_assert(nvgpu_ce_engine_interrupt_mask(g) == 0, goto done);
g->ops = gops;
g->ops.ce.isr_nonstall = NULL;
unit_assert(nvgpu_ce_engine_interrupt_mask(g) == 0, goto done);
ce_reset_mask = nvgpu_engine_get_all_ce_reset_mask(g);
unit_assert(ce_reset_mask != 0, goto done);;
unit_assert(ce_reset_mask != 0, goto done);
unit_assert(nvgpu_engine_get_all_ce_reset_mask(NULL) == 0, goto done);
f->num_engines = 0;
unit_assert(nvgpu_engine_check_valid_id(g, 0) == false, goto done);
ret = UNIT_SUCCESS;
done:
g->fifo = fifo;
g->ops = gops;
return ret;
}
int test_engine_mmu_fault_id(struct unit_module *m,
struct gk20a *g, void *args)
{
struct nvgpu_fifo *f = &g->fifo;
int ret = UNIT_FAIL;
struct nvgpu_engine_info *engine_info;
u32 fault_id;
u32 engine_id;
u32 id;
for (engine_id = 0;
engine_id <= f->max_engines;
engine_id++) {
engine_info = nvgpu_engine_get_active_eng_info(g, engine_id);
unit_assert((engine_info == NULL) ==
!nvgpu_engine_check_valid_id(g, engine_id), goto done);
fault_id = nvgpu_engine_id_to_mmu_fault_id(g, engine_id);
unit_assert((fault_id == NVGPU_INVALID_ENG_ID) ==
!nvgpu_engine_check_valid_id(g, engine_id), goto done);
unit_assert(!engine_info ||
(engine_info->fault_id == fault_id), goto done);
id = nvgpu_engine_mmu_fault_id_to_engine_id(g, fault_id);
unit_assert((id == NVGPU_INVALID_ENG_ID) ==
!nvgpu_engine_check_valid_id(g, engine_id), goto done);
unit_assert(!engine_info ||
(engine_info->engine_id == id), goto done);
}
ret = UNIT_SUCCESS;
done:
return ret;
}
int test_engine_mmu_fault_id_veid(struct unit_module *m,
struct gk20a *g, void *args)
{
struct nvgpu_fifo *f = &g->fifo;
struct nvgpu_fifo fifo = g->fifo;
int ret = UNIT_FAIL;
struct nvgpu_engine_info *engine_info;
u32 fault_id;
u32 engine_id;
u32 veid;
u32 gr_eng_fault_id;
u32 pbdma_id;
u32 id;
u32 n;
u32 i;
for (engine_id = 0;
engine_id <= f->max_engines;
engine_id++) {
engine_info = nvgpu_engine_get_active_eng_info(g, engine_id);
unit_assert((engine_info == NULL) ==
!nvgpu_engine_check_valid_id(g, engine_id), goto done);
fault_id = nvgpu_engine_id_to_mmu_fault_id(g, engine_id);
unit_assert((fault_id == NVGPU_INVALID_ENG_ID) ==
!nvgpu_engine_check_valid_id(g, engine_id), goto done);
unit_assert(!engine_info ||
(engine_info->fault_id == fault_id), goto done);
id = nvgpu_engine_mmu_fault_id_to_eng_id_and_veid(g,
fault_id, &veid);
unit_assert(!engine_info || (id == engine_id), goto done);
}
/* fault_id in GR MMU fault id range */
engine_id = nvgpu_engine_get_gr_id(g);
engine_info = nvgpu_engine_get_active_eng_info(g, engine_id);
unit_assert(engine_info->engine_enum == NVGPU_ENGINE_GR,
goto done);
gr_eng_fault_id = engine_info->fault_id;
for (i = 0; i < f->max_subctx_count; i++) {
fault_id = gr_eng_fault_id + i;
veid = nvgpu_engine_mmu_fault_id_to_veid(g,
fault_id, gr_eng_fault_id);
unit_assert(veid == i, goto done);
id = nvgpu_engine_mmu_fault_id_to_eng_id_and_veid(g,
fault_id, &veid);
unit_assert(veid == i, goto done);
unit_assert(id == engine_id, goto done);
nvgpu_engine_mmu_fault_id_to_eng_ve_pbdma_id(g,
fault_id, &id, &veid, &pbdma_id);
unit_assert(id == engine_id, goto done);
unit_assert(pbdma_id == INVAL_ID, goto done);
}
/* fault_id in CE range */
n = nvgpu_engine_get_ids(g, &engine_id, 1, NVGPU_ENGINE_ASYNC_CE);
unit_assert(n == 1, goto done);
engine_info = nvgpu_engine_get_active_eng_info(g, engine_id);
unit_assert(engine_info != NULL, goto done);
veid = 0xcafe;
fault_id = engine_info->fault_id;
id = nvgpu_engine_mmu_fault_id_to_eng_id_and_veid(g,
fault_id, &veid);
unit_assert(id == engine_id, goto done);
unit_assert(veid == INVAL_ID, goto done);
/* valid CE MMU fault id */
fault_id = engine_info->fault_id;
nvgpu_engine_mmu_fault_id_to_eng_ve_pbdma_id(g,
fault_id, &id, &veid, &pbdma_id);
unit_assert(id == engine_id, goto done);
unit_assert(veid == INVAL_ID, goto done);
unit_assert(pbdma_id == INVAL_ID, goto done);
/* valid PBDMA MMU fault id */
fault_id = 33;
nvgpu_engine_mmu_fault_id_to_eng_ve_pbdma_id(g,
fault_id, &id, &veid, &pbdma_id);
unit_assert(id == NVGPU_INVALID_ENG_ID, goto done);
unit_assert(veid == INVAL_ID, goto done);
unit_assert(pbdma_id != INVAL_ID, goto done);
/* invalid engine and pbdma MMU fault id */
pbdma_id = 0xcafe;
nvgpu_engine_mmu_fault_id_to_eng_ve_pbdma_id(g,
INVAL_ID, &id, &veid, &pbdma_id);
unit_assert(id == NVGPU_INVALID_ENG_ID, goto done);
unit_assert(veid == INVAL_ID, goto done);
unit_assert(pbdma_id == INVAL_ID, goto done);
ret = UNIT_SUCCESS;
done:
g->fifo = fifo;
return ret;
}
#define F_GET_MASK_IS_TSG BIT(0)
#define F_GET_MASK_LOAD BIT(1)
#define F_GET_MASK_BUSY BIT(2)
#define F_GET_MASK_SAME_ID BIT(3)
#define F_GET_MASK_SAME_TYPE BIT(4)
#define F_GET_MASK_LAST BIT(5)
#define FECS_METHOD_WFI_RESTORE 0x80000U
static void stub_engine_read_engine_status_info(struct gk20a *g,
u32 engine_id, struct nvgpu_engine_status_info *status)
{
status->ctxsw_status = u.branches & F_GET_MASK_LOAD ?
NVGPU_CTX_STATUS_CTXSW_LOAD :
NVGPU_CTX_STATUS_VALID;
status->is_busy = ((u.branches & F_GET_MASK_BUSY) != 0);
status->ctx_id_type = ENGINE_STATUS_CTX_ID_TYPE_INVALID;
status->ctx_next_id_type = ENGINE_STATUS_CTX_NEXT_ID_TYPE_INVALID;
if (u.branches & F_GET_MASK_SAME_TYPE) {
status->ctx_id_type =
u.branches & F_GET_MASK_IS_TSG ?
ENGINE_STATUS_CTX_ID_TYPE_TSGID :
ENGINE_STATUS_CTX_ID_TYPE_CHID;
status->ctx_next_id_type =
u.branches & F_GET_MASK_IS_TSG ?
ENGINE_STATUS_CTX_NEXT_ID_TYPE_TSGID :
ENGINE_STATUS_CTX_NEXT_ID_TYPE_CHID;
}
if (u.branches & F_GET_MASK_SAME_ID) {
status->ctx_id = u.id;
status->ctx_next_id = u.id;
} else {
status->ctx_id = ~0;
status->ctx_next_id = ~0;
}
}
int test_engine_get_mask_on_id(struct unit_module *m,
struct gk20a *g, void *args)
{
struct gpu_ops gops = g->ops;
int ret = UNIT_FAIL;
u32 mask;
u32 branches;
u32 engine_id = nvgpu_engine_get_gr_id(g);
const char *labels[] = {
"is_tsg",
"load",
"busy",
"same_id",
"same_type"
};
g->ops.engine_status.read_engine_status_info =
stub_engine_read_engine_status_info;
u.id = 0x0100;
for (branches = 0U; branches < F_GET_MASK_LAST; branches++) {
u32 id;
u32 type;
u32 expected_type;
subtest_setup(branches);
unit_verbose(m, "%s branches=%s\n", __func__,
branches_str(branches, labels));
u.is_tsg = ((branches & F_GET_MASK_IS_TSG) != 0);
u.id++;
expected_type = ENGINE_STATUS_CTX_ID_TYPE_INVALID;
if (branches & F_GET_MASK_SAME_TYPE) {
expected_type = branches & F_GET_MASK_IS_TSG ?
ENGINE_STATUS_CTX_ID_TYPE_TSGID :
ENGINE_STATUS_CTX_ID_TYPE_CHID;
}
nvgpu_engine_get_id_and_type(g, engine_id, &id, &type);
unit_assert((id == u.id) ==
((branches & F_GET_MASK_SAME_ID) != 0), goto done);
unit_assert(type == expected_type, goto done);
mask = nvgpu_engine_get_mask_on_id(g, u.id, u.is_tsg);
if ((branches & F_GET_MASK_BUSY) &&
(branches & F_GET_MASK_SAME_ID) &&
(branches & F_GET_MASK_SAME_TYPE)) {
unit_assert(mask = u.eng_mask, goto done);
} else {
unit_assert(mask == 0, goto done);
}
}
ret = UNIT_SUCCESS;
done:
if (ret != UNIT_SUCCESS) {
unit_err(m, "%s branches=%s\n", __func__,
branches_str(branches, labels));
}
g->ops = gops;
return ret;
}
#define F_FIND_BUSY_CTXSW_IDLE BIT(0)
#define F_FIND_BUSY_CTXSW_LOAD BIT(1)
#define F_FIND_BUSY_CTXSW_SWITCH_FECS_WFI_RESTORE BIT(2)
#define F_FIND_BUSY_CTXSW_SWITCH_FECS_OTHER BIT(3)
#define F_FIND_BUSY_CTXSW_SAVE BIT(4)
#define F_FIND_BUSY_CTXSW_LAST BIT(5)
static u32 stub_gr_falcon_read_fecs_ctxsw_mailbox(struct gk20a *g,
u32 reg_index)
{
if (u.branches & F_FIND_BUSY_CTXSW_SWITCH_FECS_WFI_RESTORE) {
return FECS_METHOD_WFI_RESTORE;
}
return 0;
}
static void stub_engine_read_engine_status_info2(struct gk20a *g,
u32 engine_id, struct nvgpu_engine_status_info *status)
{
status->is_busy = ((u.branches & F_FIND_BUSY_CTXSW_IDLE) == 0);
status->ctx_id = ENGINE_STATUS_CTX_ID_INVALID;
status->ctx_id_type = ENGINE_STATUS_CTX_ID_TYPE_INVALID;
status->ctx_next_id = ENGINE_STATUS_CTX_NEXT_ID_INVALID;
status->ctx_next_id_type = ENGINE_STATUS_CTX_NEXT_ID_TYPE_INVALID;
status->ctxsw_status = NVGPU_CTX_STATUS_VALID;
if (u.branches & F_FIND_BUSY_CTXSW_LOAD) {
status->ctxsw_status = NVGPU_CTX_STATUS_CTXSW_LOAD;
status->ctx_next_id = u.id;
status->ctx_next_id_type = ENGINE_STATUS_CTX_ID_TYPE_TSGID;
}
if (u.branches & F_FIND_BUSY_CTXSW_SWITCH_FECS_WFI_RESTORE) {
status->ctxsw_status = NVGPU_CTX_STATUS_CTXSW_SWITCH;
status->ctx_next_id = u.id;
status->ctx_next_id_type = ENGINE_STATUS_CTX_ID_TYPE_TSGID;
}
if (u.branches & F_FIND_BUSY_CTXSW_SWITCH_FECS_OTHER) {
status->ctxsw_status = NVGPU_CTX_STATUS_CTXSW_SWITCH;
status->ctx_id = u.id;
status->ctx_id_type = ENGINE_STATUS_CTX_ID_TYPE_TSGID;
}
if (u.branches & F_FIND_BUSY_CTXSW_SAVE) {
status->ctxsw_status = NVGPU_CTX_STATUS_CTXSW_SAVE;
status->ctx_id = u.id;
status->ctx_id_type = ENGINE_STATUS_CTX_ID_TYPE_TSGID;
}
}
int test_engine_find_busy_doing_ctxsw(struct unit_module *m,
struct gk20a *g, void *args)
{
struct gpu_ops gops = g->ops;
struct nvgpu_fifo fifo = g->fifo;
struct nvgpu_fifo *f = &g->fifo;
int ret = UNIT_FAIL;
u32 branches;
u32 engine_id;
const char *labels[] = {
"idle",
"load",
"switch_fecs_restore",
"switch_fecs_other",
"save",
};
g->ops.gr.falcon.read_fecs_ctxsw_mailbox =
stub_gr_falcon_read_fecs_ctxsw_mailbox;
g->ops.engine_status.read_engine_status_info =
stub_engine_read_engine_status_info2;
f->num_engines = 1;
u.id = 0x0100;
for (branches = 0U; branches < F_FIND_BUSY_CTXSW_LAST; branches++) {
u32 id;
bool is_tsg;
u32 count;
count = __builtin_popcount(branches &
(F_FIND_BUSY_CTXSW_LOAD |
F_FIND_BUSY_CTXSW_SWITCH_FECS_WFI_RESTORE |
F_FIND_BUSY_CTXSW_SWITCH_FECS_OTHER |
F_FIND_BUSY_CTXSW_SAVE));
if (count > 1) {
goto pruned;
}
if ((branches & F_FIND_BUSY_CTXSW_IDLE) &&
(branches & ~F_FIND_BUSY_CTXSW_IDLE)) {
pruned:
unit_verbose(m, "%s branches=%s (pruned)\n",
__func__, branches_str(branches, labels));
continue;
}
subtest_setup(branches);
unit_verbose(m, "%s branches=%s\n", __func__,
branches_str(branches, labels));
u.id++;
is_tsg = false;
engine_id = nvgpu_engine_find_busy_doing_ctxsw(g, &id, &is_tsg);
if ((branches & F_FIND_BUSY_CTXSW_IDLE) || (count == 0)) {
unit_assert(engine_id == NVGPU_INVALID_ENG_ID,
goto done);
unit_assert(id == NVGPU_INVALID_TSG_ID, goto done);
unit_assert(!is_tsg, goto done);
} else {
unit_assert(engine_id != NVGPU_INVALID_ENG_ID,
goto done);
unit_assert(id == u.id, goto done);
unit_assert(is_tsg, goto done);
}
}
ret = UNIT_SUCCESS;
done:
if (ret != UNIT_SUCCESS) {
unit_err(m, "%s branches=%s\n", __func__,
branches_str(branches, labels));
}
g->ops = gops;
g->fifo = fifo;
return ret;
}
static void stub_engine_read_engine_status_info_busy(struct gk20a *g,
u32 engine_id, struct nvgpu_engine_status_info *status)
{
status->is_busy = true;
}
static void stub_engine_read_engine_status_info_idle(struct gk20a *g,
u32 engine_id, struct nvgpu_engine_status_info *status)
{
status->is_busy = false;
}
int test_engine_get_runlist_busy_engines(struct unit_module *m,
struct gk20a *g, void *args)
{
struct gpu_ops gops = g->ops;
struct nvgpu_fifo fifo = g->fifo;
struct nvgpu_fifo *f = &g->fifo;
int ret = UNIT_FAIL;
struct nvgpu_engine_info engine_info;
u32 engine_id = 0;
u32 eng_mask;
f->num_engines = 1;
f->engine_info = &engine_info;
f->active_engines_list = &engine_id;
engine_info.engine_id = 1;
engine_info.runlist_id = 3;
g->ops.engine_status.read_engine_status_info =
stub_engine_read_engine_status_info_busy;
/* busy and same runlist_id (match found) */
eng_mask = nvgpu_engine_get_runlist_busy_engines(g,
engine_info.runlist_id);
unit_assert(eng_mask == BIT32(engine_id), goto done);
/* no entry with matching runlist_id */
eng_mask = nvgpu_engine_get_runlist_busy_engines(g, 1);
unit_assert(eng_mask == 0, goto done);
/* no busy entry found */
g->ops.engine_status.read_engine_status_info =
stub_engine_read_engine_status_info_idle;
eng_mask = nvgpu_engine_get_runlist_busy_engines(g,
engine_info.runlist_id);
unit_assert(eng_mask == 0, goto done);
/* no entry at all */
f->num_engines = 0;
eng_mask = nvgpu_engine_get_runlist_busy_engines(g,
engine_info.runlist_id);
unit_assert(eng_mask == 0, goto done);
ret = UNIT_SUCCESS;
done:
g->ops = gops;
g->fifo = fifo;
return ret;
}
struct unit_module_test nvgpu_engine_tests[] = {
UNIT_TEST(setup_sw, test_engine_setup_sw, &unit_ctx, 0),
UNIT_TEST(init_support, test_fifo_init_support, &unit_ctx, 0),
UNIT_TEST(init_info, test_engine_init_info, &unit_ctx, 0),
UNIT_TEST(ids, test_engine_ids, &unit_ctx, 0),
UNIT_TEST(get_active_eng_info, test_engine_get_active_eng_info, &unit_ctx, 0),
UNIT_TEST(enum_from_type, test_engine_enum_from_type, &unit_ctx, 0),
UNIT_TEST(interrupt_mask, test_engine_interrupt_mask, &unit_ctx, 0),
UNIT_TEST(remove_support, test_fifo_remove_support, &unit_ctx, 0),
UNIT_TEST(setup_sw, test_engine_setup_sw, &u, 0),
UNIT_TEST(init_support, test_fifo_init_support, &u, 0),
UNIT_TEST(init_info, test_engine_init_info, &u, 0),
UNIT_TEST(ids, test_engine_ids, &u, 0),
UNIT_TEST(get_active_eng_info, test_engine_get_active_eng_info, &u, 0),
UNIT_TEST(enum_from_type, test_engine_enum_from_type, &u, 0),
UNIT_TEST(interrupt_mask, test_engine_interrupt_mask, &u, 0),
UNIT_TEST(get_fast_ce_runlist_id,
test_engine_get_fast_ce_runlist_id, &u, 0),
UNIT_TEST(get_gr_runlist_id,
test_engine_get_gr_runlist_id, &u, 0),
UNIT_TEST(is_valid_runlist_id,
test_engine_is_valid_runlist_id, &u, 0),
UNIT_TEST(mmu_fault_id, test_engine_mmu_fault_id, &u, 0),
UNIT_TEST(mmu_fault_id_veid, test_engine_mmu_fault_id_veid, &u, 0),
UNIT_TEST(get_mask_on_id, test_engine_get_mask_on_id, &u, 0),
UNIT_TEST(status, test_engine_status, &u, 0),
UNIT_TEST(find_busy_doing_ctxsw,
test_engine_find_busy_doing_ctxsw, &u, 0),
UNIT_TEST(get_runlist_busy_engines,
test_engine_get_runlist_busy_engines, &u, 0),
UNIT_TEST(remove_support, test_fifo_remove_support, &u, 0),
};
UNIT_MODULE(nvgpu_engine, nvgpu_engine_tests, UNIT_PRIO_NVGPU_TEST);

View File

@@ -112,6 +112,70 @@ int test_engine_init_info(struct unit_module *m,
int test_engine_ids(struct unit_module *m,
struct gk20a *g, void *args);
/**
* Test specification for: test_engine_get_fast_ce_runlist_id
*
* Description: Get Asynchronous CE runlist id
*
* Test Type: Feature based
*
* Targets: nvgpu_engine_get_fast_ce_runlist_id
*
* Input: test_fifo_init_support must have run.
*
* Steps:
* - Check that nvgpu_engine_get_fast_ce_runlist_id returns valid id.
* - Check that NVGPU_INVALID_ENG_ID is returned when g is NULL.
*
* Output: Returns PASS if all branches gave expected results. FAIL otherwise.
*/
int test_engine_get_fast_ce_runlist_id(struct unit_module *m,
struct gk20a *g, void *args);
/**
* Test specification for: test_nvgpu_engine_get_gr_runlist_id
*
* Description: Get GR runlist id
*
* Test Type: Feature based
*
* Targets: nvgpu_engine_get_gr_runlist_id
*
* Input: test_fifo_init_support must have run.
*
* Steps:
* - Check that nvgpu_engine_get_gr_runlist_id returns valid id.
* - Check case where NVGPU_ENGINE_GR is not found.
* - Check case where an entry is found for NVGPU_ENGINE_GR, but
* the HW engine_id is invalid.
*
* Output: Returns PASS if all branches gave expected results. FAIL otherwise.
*/
int test_engine_get_gr_runlist_id(struct unit_module *m,
struct gk20a *g, void *args);
/**
* Test specification for: test_engine_is_valid_runlist_id
*
* Description: Check is runlist Id is valid
*
* Test Type: Feature based
*
* Targets: nvgpu_engine_is_valid_runlist_id
*
* Input: test_fifo_init_support must have run.
*
* Steps:
* - Check that nvgpu_engine_is_valid_runlist_id returns true for
* active engines's runlist_id.
* - Check that false is returned when g is NULL.
* - Check that false is returned for NVGPU_INVALID_RUNLIST_ID.
*
* Output: Returns PASS if all branches gave expected results. FAIL otherwise.
*/
int test_engine_is_valid_runlist_id(struct unit_module *m,
struct gk20a *g, void *args);
/**
* Test specification for: test_engine_get_active_eng_info
*
@@ -160,7 +224,6 @@ int test_engine_get_active_eng_info(struct unit_module *m,
int test_engine_enum_from_type(struct unit_module *m,
struct gk20a *g, void *args);
/**
* Test specification for: test_engine_interrupt_mask
*
@@ -189,6 +252,169 @@ int test_engine_enum_from_type(struct unit_module *m,
int test_engine_interrupt_mask(struct unit_module *m,
struct gk20a *g, void *args);
/**
* Test specification for: test_engine_mmu_fault_id
*
* Description: Engine ID to MMU fault ID conversions
*
* Test Type: Feature based
*
* Targets: nvgpu_engine_id_to_mmu_fault_id,
* nvgpu_engine_mmu_fault_id_to_engine_id
*
* Input: test_fifo_init_support must have run.
*
* Steps:
* - For each engine_id (including invalid one)
* - Get engine_info using nvgpu_engine_get_active_eng_info.
* - Get fault_id using nvgpu_engine_id_to_mmu_fault_id.
* - For valid engine ids, check that fault_id matches the one
* from engine_info, else check that returned fault_id is invalid.
* - Get engine_id using nvgpu_engine_mmu_fault_id_to_engine_id.
* - For valid engine ids, check that engine_id matches the one
* from engine_info, else check that returned engine_id is invalid.
*
* Output: Returns PASS if all branches gave expected results. FAIL otherwise.
*/
int test_engine_mmu_fault_id(struct unit_module *m,
struct gk20a *g, void *args);
/**
* Test specification for: test_engine_mmu_fault_id_veid
*
* Description: Engine ID to MMU fault ID conversions
*
* Test Type: Feature based
*
* Targets: nvgpu_engine_mmu_fault_id_to_veid,
* nvgpu_engine_mmu_fault_id_to_eng_id_and_veid,
* nvgpu_engine_mmu_fault_id_to_eng_ve_pbdma_id,
* nvgpu_engine_runqueue_sel
*
* Input: test_fifo_init_support must have run.
*
* Steps:
* - For each engine_id (including invalid one)
* - Get engine_info using nvgpu_engine_get_active_eng_info.
* - Get fault_id using nvgpu_engine_id_to_mmu_fault_id.
* - Get engine_id using nvgpu_engine_mmu_fault_id_to_engine_id and
* nvgpu_engine_mmu_fault_id_to_eng_id_and_veid.
* - For valid engine ids, check that engine_id matches the one
* from engine_info, else check that returned engine_id is invalid.
*
* - Cover the following cases for nvgpu_engine_mmu_fault_id_to_veid:
* - gr_eng_fault_id <= mmu_fault_id < (gr_eng_fault_id + num_subctx),
* returned veid should be in [0..num_subctx-1] range.
* - mmu_fault_id out of above range, in which case returned veid
* must be INVAL_ID.
*
* - Call nvgpu_engine_mmu_fault_id_to_eng_id_and_veid for all
* possible GR MMU fault ids, and check that function returns
* GR's active engine id, and sets veid properly.
* MMU fault id, and check that function returns CE's active
* engine id, but veid is not set.
* - Call nvgpu_engine_mmu_fault_id_to_eng_id_and_veid for a CE
* MMU fault id, and check that function returns CE's active
* engine id, but veid is not set.
*
* - Check that nvgpu_engine_mmu_fault_id_to_eng_ve_pbdma_id looks
* up pbdma_id when active engine id was found. Check that it
* returns invalid PBDMA id otherwise.
*
* Output: Returns PASS if all branches gave expected results. FAIL otherwise.
*/
int test_engine_mmu_fault_id_veid(struct unit_module *m,
struct gk20a *g, void *args);
/**
* Test specification for: test_engine_get_mask_on_id
*
* Description: Get mask of engines TSG/ch is loaded on
*
* Test Type: Feature based
*
* Targets: nvgpu_engine_get_mask_on_id, nvgpu_engine_get_id_and_type
*
* Input: test_engine_ids must have run.
*
* Steps:
* - Call nvgpu_engine_get_mask_on_id with a combination of type
* (TSG or channel), and incrementing the id.
* - Using a stub for g->ops.engine_status.read_engine_status_info,
* cover the following cases:
* - Engine is busy or idle.
* - Context switch is loading a context, or not (which determines
* whether to check against ctx_next_id or ctx_id).
* - Context on engine has the same type (TSG/ch) or not.
* - Context on engine has the same id, or not.
* - Check that nvgpu_engine_get_id_and_type returns expected id and type.
* - Check that the mask is only set when engine is busy, and
* context has same id and type.
*
* Output: Returns PASS if all branches gave expected results. FAIL otherwise.
*/
int test_engine_get_mask_on_id(struct unit_module *m,
struct gk20a *g, void *args);
/**
* Test specification for: test_engine_find_busy_doing_ctxsw
*
* Description: Find busy engine doing context switch
*
* Test Type: Feature based
*
* Targets: nvgpu_engine_find_busy_doing_ctxsw
*
* Input: test_fifo_init_support must have run.
*
* Steps:
* - Use stub for g->ops.engine_status.read_engine_status_info, to
* emulate engine status:
* - Busy/idle state.
* - Context switch status (VALID, LOAD or SAVE).
* - Set ctx_id and ctx_id_type as per context switch status.
* - Set ctx_next_id and ctx_next_id_type as per context switch status.
* - Use stub for g->ops.gr.falcon_read_fecs_ctxsw_mailbox, to
* emulate current FECS method.
* - Call nvgpu_engine_find_busy_doing_ctxsw, and check that:
* - When engine is idle, or not doing a context switch,
* NVGPU_INVALID_ENG_ID is returned, and other parameters
* are not modified.
* - When engine is busy and doing a context switch, engine_id
* is returned, is_tsg is true and id matches expected TSG id.
*
* Output: Returns PASS if all branches gave expected results. FAIL otherwise.
*/
int test_engine_find_busy_doing_ctxsw(struct unit_module *m,
struct gk20a *g, void *args);
/**
* Test specification for: test_engine_get_runlist_busy_engines
*
* Description: Get busy engines serviced by a given runlist
*
* Test Type: Feature based
*
* Targets: nvgpu_engine_get_runlist_busy_engines
*
* Input: test_fifo_init_support must have run.
*
* Steps:
* - Use stub for g->ops.engine_status.read_engine_status_info, to
* emulate busy/idle state for engine.
* - Build f->engine_info and f->active_engines_list, to cover the
* following cases for nvgpu_engine_get_runlist_busy_engines:
* - Engine has same runlist_id, and is busy.
* - Engine has same runlist_id, but is idle.
* - No engine with matching runlist_id was found.
* - No engine at all (f->num_engines = 0).
* - Check that returned mask is non-zero only for the first case
* (busy and matching runlist_id).
*
* Output: Returns PASS if all branches gave expected results. FAIL otherwise.
*/
int test_engine_get_runlist_busy_engines(struct unit_module *m,
struct gk20a *g, void *args);
/**
* @}
*/

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -139,7 +139,6 @@ int test_fifo_init_support(struct unit_module *m, struct gk20a *g, void *args)
}
gv11b_init_hal(g);
g->ops.fifo.init_fifo_setup_hw = NULL;
g->ops.gr.init.get_no_of_sm = stub_gv11b_gr_init_get_no_of_sm;
global_m = m;