gpu: nvgpu: unit: add channel open tests

Add unit test for:
- gk20a_channel_open_new

Jira NVGPU-3480

Change-Id: I50d8cef746aa532712c94a3806822f5f0c7f435f
Signed-off-by: Thomas Fleury <tfleury@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2129723
GVS: Gerrit_Virtual_Submit
Reviewed-by: Alex Waterman <alexw@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Thomas Fleury
2019-05-23 16:33:17 -07:00
committed by mobile promotions
parent d2d7922411
commit 1eef4eaae5

View File

@@ -27,6 +27,7 @@
#include <unit/unit.h>
#include <nvgpu/channel.h>
#include <nvgpu/engines.h>
#include <nvgpu/tsg.h>
#include <nvgpu/gk20a.h>
#include <nvgpu/runlist.h>
@@ -35,17 +36,25 @@
#include "../nvgpu-fifo.h"
#define MAX_STUB 2
struct stub_ctx {
u32 chid;
};
struct stub_ctx stub[MAX_STUB];
struct channel_unit_ctx {
u32 branches;
struct stub_ctx stub[MAX_STUB];
};
static struct channel_unit_ctx unit_ctx;
static void subtest_setup(u32 branches)
{
u32 i;
unit_ctx.branches = branches;
memset(stub, 0, sizeof(stub));
for (i = 0; i < MAX_STUB; i++) {
@@ -129,9 +138,182 @@ done:
return rc;
}
#define F_CHANNEL_OPEN_ENGINE_NOT_VALID BIT(0)
#define F_CHANNEL_OPEN_PRIVILEGED BIT(1)
#define F_CHANNEL_OPEN_ALLOC_CH_FAIL BIT(2)
#define F_CHANNEL_OPEN_ALLOC_CH_WARN0 BIT(3)
#define F_CHANNEL_OPEN_ALLOC_CH_WARN1 BIT(4)
#define F_CHANNEL_OPEN_ALLOC_CH_AGGRESSIVE BIT(5)
#define F_CHANNEL_OPEN_BUG_ON BIT(6)
#define F_CHANNEL_OPEN_ALLOC_INST_FAIL BIT(7)
#define F_CHANNEL_OPEN_OS BIT(8)
#define F_CHANNEL_OPEN_LAST BIT(9)
/* TODO: cover nvgpu_cond_init failures */
#define F_CHANNEL_OPEN_COND0_INIT_FAIL
#define F_CHANNEL_OPEN_COND1_INIT_FAIL
static const char *f_channel_open[] = {
"engine_not_valid",
"privileged",
"alloc_ch_fail",
"alloc_ch_warn0",
"alloc_ch_warn1",
"aggressive_destroy",
"bug_on",
"alloc_inst_fail",
"cond0_init_fail",
"cond1_init_fail",
"hal",
};
static int stub_channel_alloc_inst_ENOMEM(struct gk20a *g,
struct nvgpu_channel *ch)
{
return -ENOMEM;
}
static int test_channel_open(struct unit_module *m,
struct gk20a *g, void *args)
{
struct nvgpu_fifo *f = &g->fifo;
struct nvgpu_fifo fifo = g->fifo;
struct gpu_ops gops = g->ops;
struct nvgpu_channel *ch, *next_ch;
struct nvgpu_posix_fault_inj *kmem_fi;
u32 branches;
int rc = UNIT_FAIL;
u32 fail =
F_CHANNEL_OPEN_ALLOC_CH_FAIL |
F_CHANNEL_OPEN_BUG_ON |
F_CHANNEL_OPEN_ALLOC_INST_FAIL;
u32 prune = fail |
F_CHANNEL_OPEN_ALLOC_CH_WARN0 |
F_CHANNEL_OPEN_ALLOC_CH_WARN1;
u32 runlist_id;
bool privileged;
int err;
void (*os_channel_open)(struct nvgpu_channel *ch) =
g->os_channel.open;
kmem_fi = nvgpu_kmem_get_fault_injection();
for (branches = 0U; branches < F_CHANNEL_OPEN_LAST; branches++) {
if (subtest_pruned(branches, prune)) {
unit_verbose(m, "%s branches=%s (pruned)\n", __func__,
branches_str(branches, f_channel_open));
continue;
}
subtest_setup(branches);
unit_verbose(m, "%s branches=%s\n", __func__,
branches_str(branches, f_channel_open));
nvgpu_posix_enable_fault_injection(kmem_fi, false, 0);
next_ch =
nvgpu_list_empty(&f->free_chs) ? NULL :
nvgpu_list_first_entry(&f->free_chs,
nvgpu_channel, free_chs);
assert(next_ch != NULL);
runlist_id =
branches & F_CHANNEL_OPEN_ENGINE_NOT_VALID ?
NVGPU_INVALID_RUNLIST_ID :
NVGPU_ENGINE_GR;
privileged =
branches & F_CHANNEL_OPEN_PRIVILEGED ?
true : false;
if (branches & F_CHANNEL_OPEN_ALLOC_CH_FAIL) {
nvgpu_init_list_node(&f->free_chs);
}
if (branches & F_CHANNEL_OPEN_ALLOC_CH_WARN0) {
nvgpu_atomic_inc(&next_ch->ref_count);
}
if (branches & F_CHANNEL_OPEN_ALLOC_CH_WARN1) {
next_ch->referenceable = false;
}
if (branches & F_CHANNEL_OPEN_ALLOC_CH_AGGRESSIVE) {
g->aggressive_sync_destroy_thresh += 1U;
f->used_channels += 2U;
}
g->ops.channel.alloc_inst =
branches & F_CHANNEL_OPEN_ALLOC_INST_FAIL ?
stub_channel_alloc_inst_ENOMEM :
gops.channel.alloc_inst;
if (branches & F_CHANNEL_OPEN_BUG_ON) {
next_ch->g = (void *)1;
}
err = EXPECT_BUG(
ch = gk20a_open_new_channel(g, runlist_id,
privileged, getpid(), getpid());
);
if (branches & F_CHANNEL_OPEN_BUG_ON) {
next_ch->g = NULL;
assert(err != 0);
} else {
assert(err == 0);
};
if (branches & F_CHANNEL_OPEN_ALLOC_CH_WARN1) {
next_ch->referenceable = true;
}
if (branches & F_CHANNEL_OPEN_ALLOC_CH_AGGRESSIVE) {
g->aggressive_sync_destroy_thresh -= 1U;
f->used_channels -= 2U;
assert(g->aggressive_sync_destroy);
g->aggressive_sync_destroy = false;
}
if (branches & fail) {
if (branches & F_CHANNEL_OPEN_ALLOC_CH_FAIL) {
f->free_chs = fifo.free_chs;
}
if (branches & F_CHANNEL_OPEN_ALLOC_CH_WARN0) {
nvgpu_atomic_dec(&ch->ref_count);
}
assert(ch == NULL);
} else {
assert(ch != NULL);
assert(ch->g == g);
assert(nvgpu_list_empty(&ch->free_chs));
nvgpu_channel_close(ch);
ch = NULL;
}
}
rc = UNIT_SUCCESS;
done:
if (rc != UNIT_SUCCESS) {
unit_err(m, "%s branches=%s\n", __func__,
branches_str(branches, f_channel_open));
}
nvgpu_posix_enable_fault_injection(kmem_fi, false, 0);
if (ch != NULL) {
nvgpu_channel_close(ch);
}
g->ops = gops;
g->os_channel.open = os_channel_open;
return rc;
}
struct unit_module_test nvgpu_channel_tests[] = {
UNIT_TEST(setup_sw, test_channel_setup_sw, &unit_ctx, 0),
UNIT_TEST(init_support, test_fifo_init_support, &unit_ctx, 0),
UNIT_TEST(open, test_channel_open, &unit_ctx, 0),
UNIT_TEST(remove_support, test_fifo_remove_support, &unit_ctx, 0),
};