gpu: nvgpu: unit: fifo: move assert to unit_assert

unit_assert macro is provided to check a condition and execute bail_out
action given as a second argument.
Currently, in fifo unit, unit_assert() is redefined as assert with
common bail_out action. However, name assert() creates confusion with
linux assert macro. So, this patch removes redefined assert macro and
replaces with unit_assert.

Jira NVGPU-4684

Change-Id: I3a880f965a191f16efdabced5e23723e66ecaf3c
Signed-off-by: Vedashree Vidwans <vvidwans@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2276863
GVS: Gerrit_Virtual_Submit
Reviewed-by: Alex Waterman <alexw@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Vedashree Vidwans
2020-01-09 14:46:19 -08:00
committed by Alex Waterman
parent 4a287f08cd
commit 652cff2cd0
32 changed files with 795 additions and 710 deletions

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -54,7 +54,6 @@
} while (0) } while (0)
#endif #endif
#define assert(cond) unit_assert(cond, goto done)
#define branches_str test_fifo_flags_str #define branches_str test_fifo_flags_str
int test_gk20a_channel_enable(struct unit_module *m, int test_gk20a_channel_enable(struct unit_module *m,
@@ -67,11 +66,11 @@ int test_gk20a_channel_enable(struct unit_module *m,
ch = nvgpu_channel_open_new(g, runlist_id, ch = nvgpu_channel_open_new(g, runlist_id,
privileged, getpid(), getpid()); privileged, getpid(), getpid());
assert(ch); unit_assert(ch, goto done);
gk20a_channel_enable(ch); gk20a_channel_enable(ch);
assert((nvgpu_readl(ch->g, ccsr_channel_r(ch->chid)) unit_assert((nvgpu_readl(ch->g, ccsr_channel_r(ch->chid))
& ccsr_channel_enable_set_true_f()) != 0); & ccsr_channel_enable_set_true_f()) != 0, goto done);
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
done: done:
@@ -92,11 +91,11 @@ int test_gk20a_channel_disable(struct unit_module *m,
ch = nvgpu_channel_open_new(g, runlist_id, ch = nvgpu_channel_open_new(g, runlist_id,
privileged, getpid(), getpid()); privileged, getpid(), getpid());
assert(ch); unit_assert(ch, goto done);
gk20a_channel_disable(ch); gk20a_channel_disable(ch);
assert((nvgpu_readl(ch->g, ccsr_channel_r(ch->chid)) unit_assert((nvgpu_readl(ch->g, ccsr_channel_r(ch->chid))
& ccsr_channel_enable_clr_true_f()) != 0); & ccsr_channel_enable_clr_true_f()) != 0, goto done);
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
done: done:
@@ -131,7 +130,7 @@ int test_gk20a_channel_read_state(struct unit_module *m,
ch = nvgpu_channel_open_new(g, runlist_id, ch = nvgpu_channel_open_new(g, runlist_id,
privileged, getpid(), getpid()); privileged, getpid(), getpid());
assert(ch); unit_assert(ch, goto done);
for (branches = 0U; branches < F_CHANNEL_READ_STATE_LAST; branches++) { for (branches = 0U; branches < F_CHANNEL_READ_STATE_LAST; branches++) {
@@ -198,11 +197,12 @@ int test_gk20a_channel_read_state(struct unit_module *m,
gk20a_channel_read_state(g, ch, &state); gk20a_channel_read_state(g, ch, &state);
assert(state.next == next); unit_assert(state.next == next, goto done);
assert(state.enabled == enabled); unit_assert(state.enabled == enabled, goto done);
assert(state.busy == busy); unit_assert(state.busy == busy, goto done);
assert(state.ctx_reload == ctx_reload); unit_assert(state.ctx_reload == ctx_reload, goto done);
assert(state.pending_acquire == pending_acquire); unit_assert(state.pending_acquire == pending_acquire,
goto done);
} }
} }

View File

@@ -56,8 +56,6 @@
} while (0) } while (0)
#endif #endif
#define assert(cond) unit_assert(cond, goto done)
int test_gm20b_channel_bind(struct unit_module *m, int test_gm20b_channel_bind(struct unit_module *m,
struct gk20a *g, void *args) struct gk20a *g, void *args)
{ {
@@ -70,17 +68,17 @@ int test_gm20b_channel_bind(struct unit_module *m,
ch = nvgpu_channel_open_new(g, runlist_id, ch = nvgpu_channel_open_new(g, runlist_id,
privileged, getpid(), getpid()); privileged, getpid(), getpid());
assert(ch); unit_assert(ch, goto done);
assert(nvgpu_atomic_read(&ch->bound) == 0); unit_assert(nvgpu_atomic_read(&ch->bound) == 0, goto done);
nvgpu_writel(g, ccsr_channel_inst_r(ch->chid), 0); nvgpu_writel(g, ccsr_channel_inst_r(ch->chid), 0);
nvgpu_writel(g, ccsr_channel_r(ch->chid), 0); nvgpu_writel(g, ccsr_channel_r(ch->chid), 0);
gm20b_channel_bind(ch); gm20b_channel_bind(ch);
assert(nvgpu_readl(g, ccsr_channel_inst_r(ch->chid)) != 0); unit_assert(nvgpu_readl(g, ccsr_channel_inst_r(ch->chid)) != 0, goto done);
assert(nvgpu_readl(g, ccsr_channel_r(ch->chid)) != 0); unit_assert(nvgpu_readl(g, ccsr_channel_r(ch->chid)) != 0, goto done);
assert(nvgpu_atomic_read(&ch->bound) == 1); unit_assert(nvgpu_atomic_read(&ch->bound) == 1, goto done);
nvgpu_atomic_set(&ch->bound, 0); nvgpu_atomic_set(&ch->bound, 0);
@@ -88,7 +86,7 @@ int test_gm20b_channel_bind(struct unit_module *m,
ch->chid = U32_MAX; ch->chid = U32_MAX;
err = EXPECT_BUG(gm20b_channel_bind(ch)); err = EXPECT_BUG(gm20b_channel_bind(ch));
ch->chid = chid; ch->chid = chid;
assert(err != 0); unit_assert(err != 0, goto done);
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
done: done:
@@ -111,18 +109,18 @@ int test_gm20b_channel_force_ctx_reload(struct unit_module *m,
ch = nvgpu_channel_open_new(g, runlist_id, ch = nvgpu_channel_open_new(g, runlist_id,
privileged, getpid(), getpid()); privileged, getpid(), getpid());
assert(ch); unit_assert(ch, goto done);
nvgpu_writel(g, ccsr_channel_r(ch->chid), 0); nvgpu_writel(g, ccsr_channel_r(ch->chid), 0);
gm20b_channel_force_ctx_reload(ch); gm20b_channel_force_ctx_reload(ch);
assert((nvgpu_readl(g, ccsr_channel_r(ch->chid)) & unit_assert((nvgpu_readl(g, ccsr_channel_r(ch->chid)) &
ccsr_channel_force_ctx_reload_true_f()) != 0); ccsr_channel_force_ctx_reload_true_f()) != 0, goto done);
chid = ch->chid; chid = ch->chid;
ch->chid = U32_MAX; ch->chid = U32_MAX;
err = EXPECT_BUG(gm20b_channel_force_ctx_reload(ch)); err = EXPECT_BUG(gm20b_channel_force_ctx_reload(ch));
ch->chid = chid; ch->chid = chid;
assert(err != 0); unit_assert(err != 0, goto done);
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
done: done:

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -59,8 +59,6 @@
} while (0) } while (0)
#endif #endif
#define assert(cond) unit_assert(cond, goto done)
#define branches_str test_fifo_flags_str #define branches_str test_fifo_flags_str
#define pruned test_fifo_subtest_pruned #define pruned test_fifo_subtest_pruned
@@ -81,22 +79,22 @@ int test_gv11b_channel_unbind(struct unit_module *m,
ch = nvgpu_channel_open_new(g, runlist_id, ch = nvgpu_channel_open_new(g, runlist_id,
privileged, getpid(), getpid()); privileged, getpid(), getpid());
assert(ch); unit_assert(ch, goto done);
assert(nvgpu_atomic_read(&ch->bound) == 0); unit_assert(nvgpu_atomic_read(&ch->bound) == 0, goto done);
nvgpu_writel(g, ccsr_channel_inst_r(ch->chid), 0); nvgpu_writel(g, ccsr_channel_inst_r(ch->chid), 0);
nvgpu_writel(g, ccsr_channel_r(ch->chid), 0); nvgpu_writel(g, ccsr_channel_r(ch->chid), 0);
g->ops.channel.bind(ch); g->ops.channel.bind(ch);
assert(nvgpu_atomic_read(&ch->bound) == 1); unit_assert(nvgpu_atomic_read(&ch->bound) == 1, goto done);
gv11b_channel_unbind(ch); gv11b_channel_unbind(ch);
assert(nvgpu_readl(g, (ccsr_channel_inst_r(ch->chid)) & unit_assert(nvgpu_readl(g, (ccsr_channel_inst_r(ch->chid)) &
ccsr_channel_inst_bind_false_f()) != 0); ccsr_channel_inst_bind_false_f()) != 0, goto done);
assert(nvgpu_readl(g, (ccsr_channel_r(ch->chid)) & unit_assert(nvgpu_readl(g, (ccsr_channel_r(ch->chid)) &
ccsr_channel_enable_clr_true_f()) != 0); ccsr_channel_enable_clr_true_f()) != 0, goto done);
assert(nvgpu_atomic_read(&ch->bound) == 0); unit_assert(nvgpu_atomic_read(&ch->bound) == 0, goto done);
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
done: done:
@@ -112,7 +110,8 @@ int test_gv11b_channel_count(struct unit_module *m,
{ {
int ret = UNIT_FAIL; int ret = UNIT_FAIL;
assert(gv11b_channel_count(g) == ccsr_channel__size_1_v()); unit_assert(gv11b_channel_count(g) == ccsr_channel__size_1_v(),
goto done);
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
done: done:
return ret; return ret;
@@ -139,7 +138,7 @@ int test_gv11b_channel_read_state(struct unit_module *m,
ch = nvgpu_channel_open_new(g, runlist_id, ch = nvgpu_channel_open_new(g, runlist_id,
privileged, getpid(), getpid()); privileged, getpid(), getpid());
assert(ch); unit_assert(ch, goto done);
for (branches = 0U; branches < F_CHANNEL_READ_STATE_LAST; branches++) { for (branches = 0U; branches < F_CHANNEL_READ_STATE_LAST; branches++) {
@@ -157,7 +156,7 @@ int test_gv11b_channel_read_state(struct unit_module *m,
nvgpu_writel(g, ccsr_channel_r(ch->chid), v); nvgpu_writel(g, ccsr_channel_r(ch->chid), v);
gv11b_channel_read_state(g, ch, &state); gv11b_channel_read_state(g, ch, &state);
assert(state.eng_faulted == eng_faulted); unit_assert(state.eng_faulted == eng_faulted, goto done);
} }
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
@@ -189,7 +188,7 @@ int test_gv11b_channel_reset_faulted(struct unit_module *m,
ch = nvgpu_channel_open_new(g, runlist_id, ch = nvgpu_channel_open_new(g, runlist_id,
privileged, getpid(), getpid()); privileged, getpid(), getpid());
assert(ch); unit_assert(ch, goto done);
for (branches = 0U; branches < F_CHANNEL_RESET_FAULTED_LAST; branches++) { for (branches = 0U; branches < F_CHANNEL_RESET_FAULTED_LAST; branches++) {
@@ -208,8 +207,12 @@ int test_gv11b_channel_reset_faulted(struct unit_module *m,
gv11b_channel_reset_faulted(g, ch, eng, pbdma); gv11b_channel_reset_faulted(g, ch, eng, pbdma);
v = nvgpu_readl(g, ccsr_channel_r(ch->chid)); v = nvgpu_readl(g, ccsr_channel_r(ch->chid));
assert(!eng || ((v & ccsr_channel_eng_faulted_reset_f()) != 0)); unit_assert(!eng ||
assert(!pbdma || ((v & ccsr_channel_pbdma_faulted_reset_f()) != 0)); ((v & ccsr_channel_eng_faulted_reset_f()) != 0),
goto done);
unit_assert(!pbdma ||
((v & ccsr_channel_pbdma_faulted_reset_f()) != 0),
goto done);
} }
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
@@ -266,7 +269,7 @@ int test_gv11b_channel_debug_dump(struct unit_module *m,
ch = nvgpu_channel_open_new(g, runlist_id, ch = nvgpu_channel_open_new(g, runlist_id,
privileged, getpid(), getpid()); privileged, getpid(), getpid());
assert(ch); unit_assert(ch, goto done);
for (branches = 0U; branches < F_CHANNEL_DUMP_LAST; branches++) { for (branches = 0U; branches < F_CHANNEL_DUMP_LAST; branches++) {
@@ -291,8 +294,8 @@ int test_gv11b_channel_debug_dump(struct unit_module *m,
gv11b_channel_debug_dump(g, &o, info); gv11b_channel_debug_dump(g, &o, info);
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
assert(unit_ctx.count > 4); unit_assert(unit_ctx.count > 4, goto done);
assert(unit_ctx.err == 0); unit_assert(unit_ctx.err == 0, goto done);
#endif #endif
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -73,8 +73,6 @@ static void subtest_setup(u32 branches)
#define subtest_pruned test_fifo_subtest_pruned #define subtest_pruned test_fifo_subtest_pruned
#define branches_str test_fifo_flags_str #define branches_str test_fifo_flags_str
#define assert(cond) unit_assert(cond, goto done)
#define F_CHANNEL_SETUP_SW_VZALLOC_FAIL BIT(0) #define F_CHANNEL_SETUP_SW_VZALLOC_FAIL BIT(0)
#define F_CHANNEL_SETUP_SW_REF_COND_FAIL BIT(1) #define F_CHANNEL_SETUP_SW_REF_COND_FAIL BIT(1)
#define F_CHANNEL_SETUP_SW_LAST BIT(2) #define F_CHANNEL_SETUP_SW_LAST BIT(2)
@@ -135,10 +133,10 @@ int test_channel_setup_sw(struct unit_module *m, struct gk20a *g, void *vargs)
if (branches & fail) { if (branches & fail) {
nvgpu_posix_enable_fault_injection(kmem_fi, false, 0); nvgpu_posix_enable_fault_injection(kmem_fi, false, 0);
nvgpu_posix_enable_fault_injection(l_cond_fi, false, 0); nvgpu_posix_enable_fault_injection(l_cond_fi, false, 0);
assert(err != 0); unit_assert(err != 0, goto done);
assert(f->channel == NULL); unit_assert(f->channel == NULL, goto done);
} else { } else {
assert(err == 0); unit_assert(err == 0, goto done);
nvgpu_channel_cleanup_sw(g); nvgpu_channel_cleanup_sw(g);
} }
} }
@@ -226,7 +224,7 @@ int test_channel_open(struct unit_module *m, struct gk20a *g, void *vargs)
nvgpu_list_empty(&f->free_chs) ? NULL : nvgpu_list_empty(&f->free_chs) ? NULL :
nvgpu_list_first_entry(&f->free_chs, nvgpu_list_first_entry(&f->free_chs,
nvgpu_channel, free_chs); nvgpu_channel, free_chs);
assert(next_ch != NULL); unit_assert(next_ch != NULL, goto done);
runlist_id = runlist_id =
branches & F_CHANNEL_OPEN_ENGINE_NOT_VALID ? branches & F_CHANNEL_OPEN_ENGINE_NOT_VALID ?
@@ -277,9 +275,9 @@ int test_channel_open(struct unit_module *m, struct gk20a *g, void *vargs)
if (branches & F_CHANNEL_OPEN_BUG_ON) { if (branches & F_CHANNEL_OPEN_BUG_ON) {
next_ch->g = NULL; next_ch->g = NULL;
assert(err != 0); unit_assert(err != 0, goto done);
} else { } else {
assert(err == 0); unit_assert(err == 0, goto done);
}; };
if (branches & F_CHANNEL_OPEN_ALLOC_CH_WARN1) { if (branches & F_CHANNEL_OPEN_ALLOC_CH_WARN1) {
@@ -289,7 +287,7 @@ int test_channel_open(struct unit_module *m, struct gk20a *g, void *vargs)
if (branches & F_CHANNEL_OPEN_ALLOC_CH_AGGRESSIVE) { if (branches & F_CHANNEL_OPEN_ALLOC_CH_AGGRESSIVE) {
g->aggressive_sync_destroy_thresh -= 1U; g->aggressive_sync_destroy_thresh -= 1U;
f->used_channels -= 2U; f->used_channels -= 2U;
assert(g->aggressive_sync_destroy); unit_assert(g->aggressive_sync_destroy, goto done);
g->aggressive_sync_destroy = false; g->aggressive_sync_destroy = false;
} }
@@ -302,11 +300,11 @@ int test_channel_open(struct unit_module *m, struct gk20a *g, void *vargs)
if (branches & F_CHANNEL_OPEN_ALLOC_CH_WARN0) { if (branches & F_CHANNEL_OPEN_ALLOC_CH_WARN0) {
nvgpu_atomic_dec(&ch->ref_count); nvgpu_atomic_dec(&ch->ref_count);
} }
assert(ch == NULL); unit_assert(ch == NULL, goto done);
} else { } else {
assert(ch != NULL); unit_assert(ch != NULL, goto done);
assert(ch->g == g); unit_assert(ch->g == g, goto done);
assert(nvgpu_list_empty(&ch->free_chs)); unit_assert(nvgpu_list_empty(&ch->free_chs), goto done);
nvgpu_channel_close(ch); nvgpu_channel_close(ch);
ch = NULL; ch = NULL;
@@ -427,7 +425,7 @@ int test_channel_close(struct unit_module *m, struct gk20a *g, void *vargs)
struct vm_gk20a vm; struct vm_gk20a vm;
tsg = nvgpu_tsg_open(g, getpid()); tsg = nvgpu_tsg_open(g, getpid());
assert(tsg != NULL); unit_assert(tsg != NULL, goto done);
g->ops.gr.intr.flush_channel_tlb = stub_gr_intr_flush_channel_tlb; g->ops.gr.intr.flush_channel_tlb = stub_gr_intr_flush_channel_tlb;
@@ -445,7 +443,7 @@ int test_channel_close(struct unit_module *m, struct gk20a *g, void *vargs)
ch = nvgpu_channel_open_new(g, runlist_id, ch = nvgpu_channel_open_new(g, runlist_id,
privileged, getpid(), getpid()); privileged, getpid(), getpid());
assert(ch != NULL); unit_assert(ch != NULL, goto done);
ch->usermode_submit_enabled = true; ch->usermode_submit_enabled = true;
@@ -465,7 +463,7 @@ int test_channel_close(struct unit_module *m, struct gk20a *g, void *vargs)
if (branches & F_CHANNEL_CLOSE_TSG_BOUND) { if (branches & F_CHANNEL_CLOSE_TSG_BOUND) {
err = nvgpu_tsg_bind_channel(tsg, ch); err = nvgpu_tsg_bind_channel(tsg, ch);
assert(err == 0); unit_assert(err == 0, goto done);
} }
ch->referenceable = ch->referenceable =
@@ -526,19 +524,19 @@ int test_channel_close(struct unit_module *m, struct gk20a *g, void *vargs)
} }
if (branches & F_CHANNEL_CLOSE_ALREADY_FREED) { if (branches & F_CHANNEL_CLOSE_ALREADY_FREED) {
assert(err != 0); unit_assert(err != 0, goto done);
assert(ch->g == NULL); unit_assert(ch->g == NULL, goto done);
continue; continue;
} }
if ((branches & F_CHANNEL_CLOSE_USER_SYNC) != 0U) { if ((branches & F_CHANNEL_CLOSE_USER_SYNC) != 0U) {
assert(stub[0].chid == 1U); unit_assert(stub[0].chid == 1U, goto done);
ch->user_sync = NULL; ch->user_sync = NULL;
} }
if (branches & fail) { if (branches & fail) {
assert(ch->g != NULL); unit_assert(ch->g != NULL, goto done);
assert(nvgpu_list_empty(&ch->free_chs)); unit_assert(nvgpu_list_empty(&ch->free_chs), goto done);
if (branches & F_CHANNEL_CLOSE_ALREADY_FREED) { if (branches & F_CHANNEL_CLOSE_ALREADY_FREED) {
continue; continue;
@@ -553,12 +551,13 @@ int test_channel_close(struct unit_module *m, struct gk20a *g, void *vargs)
nvgpu_init_list_node(&tsg->ch_list); nvgpu_init_list_node(&tsg->ch_list);
nvgpu_ref_put(&tsg->refcount, nvgpu_tsg_release); nvgpu_ref_put(&tsg->refcount, nvgpu_tsg_release);
} else { } else {
assert(!nvgpu_list_empty(&ch->free_chs)); unit_assert(!nvgpu_list_empty(&ch->free_chs),
assert(nvgpu_list_empty(&tsg->ch_list)); goto done);
unit_assert(nvgpu_list_empty(&tsg->ch_list), goto done);
} }
if (branches & F_CHANNEL_CLOSE_OS_CLOSE) { if (branches & F_CHANNEL_CLOSE_OS_CLOSE) {
assert(stub[0].chid == ch->chid); unit_assert(stub[0].chid == ch->chid, goto done);
} }
if (!(branches & F_CHANNEL_CLOSE_AS_BOUND)) { if (!(branches & F_CHANNEL_CLOSE_AS_BOUND)) {
@@ -566,7 +565,7 @@ int test_channel_close(struct unit_module *m, struct gk20a *g, void *vargs)
} }
if (branches & F_CHANNEL_CLOSE_FREE_SUBCTX) { if (branches & F_CHANNEL_CLOSE_FREE_SUBCTX) {
assert(ch->subctx == NULL); unit_assert(ch->subctx == NULL, goto done);
} }
if (ch->subctx != NULL) { if (ch->subctx != NULL) {
@@ -578,12 +577,12 @@ int test_channel_close(struct unit_module *m, struct gk20a *g, void *vargs)
ch->deterministic = false; ch->deterministic = false;
ch->deterministic_railgate_allowed = false; ch->deterministic_railgate_allowed = false;
assert(ch->usermode_submit_enabled == false); unit_assert(ch->usermode_submit_enabled == false, goto done);
/* we took an extra reference to avoid nvgpu_vm_remove_ref */ /* we took an extra reference to avoid nvgpu_vm_remove_ref */
assert(nvgpu_ref_put_return(&vm.ref, NULL)); unit_assert(nvgpu_ref_put_return(&vm.ref, NULL), goto done);
assert(ch->user_sync == NULL); unit_assert(ch->user_sync == NULL, goto done);
unbind: unbind:
/* /*
@@ -594,9 +593,9 @@ unbind:
* - free pre-allocated resources * - free pre-allocated resources
* - channel refcount tracking * - channel refcount tracking
*/ */
assert(ch->g == NULL); unit_assert(ch->g == NULL, goto done);
assert(!ch->referenceable); unit_assert(!ch->referenceable, goto done);
assert(!nvgpu_list_empty(&ch->free_chs)); unit_assert(!nvgpu_list_empty(&ch->free_chs), goto done);
ch = NULL; ch = NULL;
} }
@@ -736,14 +735,14 @@ int test_channel_setup_bind(struct unit_module *m, struct gk20a *g, void *vargs)
struct nvgpu_setup_bind_args bind_args; struct nvgpu_setup_bind_args bind_args;
tsg = nvgpu_tsg_open(g, getpid()); tsg = nvgpu_tsg_open(g, getpid());
assert(tsg != NULL); unit_assert(tsg != NULL, goto done);
ch = nvgpu_channel_open_new(g, runlist_id, ch = nvgpu_channel_open_new(g, runlist_id,
privileged, getpid(), getpid()); privileged, getpid(), getpid());
assert(ch != NULL); unit_assert(ch != NULL, goto done);
err = nvgpu_tsg_bind_channel(tsg, ch); err = nvgpu_tsg_bind_channel(tsg, ch);
assert(err == 0); unit_assert(err == 0, goto done);
g->ops.gr.intr.flush_channel_tlb = stub_gr_intr_flush_channel_tlb; g->ops.gr.intr.flush_channel_tlb = stub_gr_intr_flush_channel_tlb;
g->ops.mm.cache.l2_flush = stub_mm_l2_flush; /* bug 2621189 */ g->ops.mm.cache.l2_flush = stub_mm_l2_flush; /* bug 2621189 */
@@ -754,7 +753,7 @@ int test_channel_setup_bind(struct unit_module *m, struct gk20a *g, void *vargs)
vm.mm = &mm; vm.mm = &mm;
ch->vm = &vm; ch->vm = &vm;
err = nvgpu_dma_alloc(g, PAGE_SIZE, &pdb_mem); err = nvgpu_dma_alloc(g, PAGE_SIZE, &pdb_mem);
assert(err == 0); unit_assert(err == 0, goto done);
vm.pdb.mem = &pdb_mem; vm.pdb.mem = &pdb_mem;
memset(&bind_args, 0, sizeof(bind_args)); memset(&bind_args, 0, sizeof(bind_args));
@@ -842,19 +841,24 @@ int test_channel_setup_bind(struct unit_module *m, struct gk20a *g, void *vargs)
if (branches & fail) { if (branches & fail) {
nvgpu_posix_enable_fault_injection( nvgpu_posix_enable_fault_injection(
l_nvgpu_fi, false, 0); l_nvgpu_fi, false, 0);
assert(err != 0); unit_assert(err != 0, goto done);
assert(!nvgpu_mem_is_valid(&ch->usermode_userd)); unit_assert(!nvgpu_mem_is_valid(&ch->usermode_userd),
assert(!nvgpu_mem_is_valid(&ch->usermode_gpfifo)); goto done);
unit_assert(!nvgpu_mem_is_valid(&ch->usermode_gpfifo),
goto done);
ch->usermode_submit_enabled = false; ch->usermode_submit_enabled = false;
assert(nvgpu_atomic_read(&ch->bound) == false); unit_assert(nvgpu_atomic_read(&ch->bound) == false,
goto done);
g->os_channel.free_usermode_buffers = NULL; g->os_channel.free_usermode_buffers = NULL;
} else { } else {
assert(err == 0); unit_assert(err == 0, goto done);
assert(stub[0].chid == ch->chid); unit_assert(stub[0].chid == ch->chid, goto done);
assert(ch->usermode_submit_enabled == true); unit_assert(ch->usermode_submit_enabled == true,
assert(ch->userd_iova != 0U); goto done);
assert(stub[1].chid == ch->chid); unit_assert(ch->userd_iova != 0U, goto done);
assert(nvgpu_atomic_read(&ch->bound) == true); unit_assert(stub[1].chid == ch->chid, goto done);
unit_assert(nvgpu_atomic_read(&ch->bound) == true,
goto done);
nvgpu_dma_free(g, &ch->usermode_userd); nvgpu_dma_free(g, &ch->usermode_userd);
nvgpu_dma_free(g, &ch->usermode_gpfifo); nvgpu_dma_free(g, &ch->usermode_gpfifo);
ch->userd_iova = 0U; ch->userd_iova = 0U;
@@ -904,7 +908,7 @@ int test_channel_alloc_inst(struct unit_module *m, struct gk20a *g, void *vargs)
ch = nvgpu_channel_open_new(g, runlist_id, ch = nvgpu_channel_open_new(g, runlist_id,
privileged, getpid(), getpid()); privileged, getpid(), getpid());
assert(ch != NULL); unit_assert(ch != NULL, goto done);
for (branches = 0U; branches < F_CHANNEL_ALLOC_INST_LAST; branches++) { for (branches = 0U; branches < F_CHANNEL_ALLOC_INST_LAST; branches++) {
@@ -925,17 +929,18 @@ int test_channel_alloc_inst(struct unit_module *m, struct gk20a *g, void *vargs)
err = nvgpu_channel_alloc_inst(g, ch); err = nvgpu_channel_alloc_inst(g, ch);
if (branches & fail) { if (branches & fail) {
assert(err != 0); unit_assert(err != 0, goto done);
assert(ch->inst_block.aperture == unit_assert(ch->inst_block.aperture ==
APERTURE_INVALID); APERTURE_INVALID, goto done);
} else { } else {
assert(err == 0); unit_assert(err == 0, goto done);
assert(ch->inst_block.aperture != unit_assert(ch->inst_block.aperture !=
APERTURE_INVALID); APERTURE_INVALID, goto done);
} }
nvgpu_channel_free_inst(g, ch); nvgpu_channel_free_inst(g, ch);
assert(ch->inst_block.aperture == APERTURE_INVALID); unit_assert(ch->inst_block.aperture == APERTURE_INVALID,
goto done);
} }
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
@@ -989,13 +994,13 @@ int test_channel_from_inst(struct unit_module *m, struct gk20a *g, void *vargs)
chA = nvgpu_channel_open_new(g, runlist_id, chA = nvgpu_channel_open_new(g, runlist_id,
privileged, getpid(), getpid()); privileged, getpid(), getpid());
assert(chA != NULL); unit_assert(chA != NULL, goto done);
chB = nvgpu_channel_open_new(g, runlist_id, chB = nvgpu_channel_open_new(g, runlist_id,
privileged, getpid(), getpid()); privileged, getpid(), getpid());
assert(chB != NULL); unit_assert(chB != NULL, goto done);
assert(f->num_channels > 0U); unit_assert(f->num_channels > 0U, goto done);
for (branches = 0U; branches < F_CHANNEL_FROM_INST_LAST; branches++) { for (branches = 0U; branches < F_CHANNEL_FROM_INST_LAST; branches++) {
@@ -1031,17 +1036,18 @@ int test_channel_from_inst(struct unit_module *m, struct gk20a *g, void *vargs)
if (branches & found) { if (branches & found) {
if (branches & F_CHANNEL_FROM_INST_MATCH_A) { if (branches & F_CHANNEL_FROM_INST_MATCH_A) {
assert(ch == chA); unit_assert(ch == chA, goto done);
} }
if (branches & F_CHANNEL_FROM_INST_MATCH_B) { if (branches & F_CHANNEL_FROM_INST_MATCH_B) {
assert(ch == chB); unit_assert(ch == chB, goto done);
} }
assert(nvgpu_atomic_read(&ch->ref_count) == 2); unit_assert(nvgpu_atomic_read(&ch->ref_count) == 2,
goto done);
nvgpu_channel_put(ch); nvgpu_channel_put(ch);
} else { } else {
f->channel = fifo.channel; f->channel = fifo.channel;
f->num_channels = fifo.num_channels; f->num_channels = fifo.num_channels;
assert(ch == NULL); unit_assert(ch == NULL, goto done);
} }
} }
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
@@ -1083,14 +1089,14 @@ int test_channel_enable_disable_tsg(struct unit_module *m,
int ret = UNIT_FAIL; int ret = UNIT_FAIL;
tsg = nvgpu_tsg_open(g, getpid()); tsg = nvgpu_tsg_open(g, getpid());
assert(tsg != NULL); unit_assert(tsg != NULL, goto done);
ch = nvgpu_channel_open_new(g, runlist_id, ch = nvgpu_channel_open_new(g, runlist_id,
privileged, getpid(), getpid()); privileged, getpid(), getpid());
assert(ch != NULL); unit_assert(ch != NULL, goto done);
err = nvgpu_tsg_bind_channel(tsg, ch); err = nvgpu_tsg_bind_channel(tsg, ch);
assert(err == 0); unit_assert(err == 0, goto done);
g->ops.tsg.enable = stub_tsg_enable; g->ops.tsg.enable = stub_tsg_enable;
g->ops.tsg.disable = stub_tsg_disable; g->ops.tsg.disable = stub_tsg_disable;
@@ -1098,21 +1104,21 @@ int test_channel_enable_disable_tsg(struct unit_module *m,
subtest_setup(branches); subtest_setup(branches);
err = nvgpu_channel_enable_tsg(g, ch); err = nvgpu_channel_enable_tsg(g, ch);
assert(stub[0].tsgid = tsg->tsgid); unit_assert(stub[0].tsgid = tsg->tsgid, goto done);
err = nvgpu_channel_disable_tsg(g, ch); err = nvgpu_channel_disable_tsg(g, ch);
assert(stub[1].tsgid = tsg->tsgid); unit_assert(stub[1].tsgid = tsg->tsgid, goto done);
subtest_setup(branches); subtest_setup(branches);
err = nvgpu_tsg_unbind_channel(tsg, ch); err = nvgpu_tsg_unbind_channel(tsg, ch);
assert(err == 0); unit_assert(err == 0, goto done);
err = nvgpu_channel_enable_tsg(g, ch); err = nvgpu_channel_enable_tsg(g, ch);
assert(err != 0); unit_assert(err != 0, goto done);
err = nvgpu_channel_disable_tsg(g, ch); err = nvgpu_channel_disable_tsg(g, ch);
assert(err != 0); unit_assert(err != 0, goto done);
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
@@ -1145,11 +1151,11 @@ int test_channel_abort(struct unit_module *m, struct gk20a *g, void *vargs)
int err; int err;
tsg = nvgpu_tsg_open(g, getpid()); tsg = nvgpu_tsg_open(g, getpid());
assert(tsg != NULL); unit_assert(tsg != NULL, goto done);
ch = nvgpu_channel_open_new(g, runlist_id, ch = nvgpu_channel_open_new(g, runlist_id,
privileged, getpid(), getpid()); privileged, getpid(), getpid());
assert(ch != NULL); unit_assert(ch != NULL, goto done);
for (branches = 0U; branches < F_CHANNEL_ABORT_LAST; branches++) { for (branches = 0U; branches < F_CHANNEL_ABORT_LAST; branches++) {
subtest_setup(branches); subtest_setup(branches);
@@ -1158,11 +1164,11 @@ int test_channel_abort(struct unit_module *m, struct gk20a *g, void *vargs)
if ((branches & F_CHANNEL_ABORT_TSG) != 0U) { if ((branches & F_CHANNEL_ABORT_TSG) != 0U) {
err = nvgpu_tsg_bind_channel(tsg, ch); err = nvgpu_tsg_bind_channel(tsg, ch);
assert(err == 0); unit_assert(err == 0, goto done);
} }
nvgpu_channel_abort(ch, false); nvgpu_channel_abort(ch, false);
assert(ch->unserviceable == true); unit_assert(ch->unserviceable == true, goto done);
} }
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
@@ -1199,7 +1205,7 @@ int test_channel_mark_error(struct unit_module *m, struct gk20a *g, void *vargs)
ch = nvgpu_channel_open_new(g, runlist_id, ch = nvgpu_channel_open_new(g, runlist_id,
privileged, getpid(), getpid()); privileged, getpid(), getpid());
assert(ch != NULL); unit_assert(ch != NULL, goto done);
for (branches = 0U; branches < F_CHANNEL_MARK_ERROR_LAST; branches++) { for (branches = 0U; branches < F_CHANNEL_MARK_ERROR_LAST; branches++) {
@@ -1214,8 +1220,8 @@ int test_channel_mark_error(struct unit_module *m, struct gk20a *g, void *vargs)
} }
err = nvgpu_channel_mark_error(g, ch); err = nvgpu_channel_mark_error(g, ch);
assert(err == false); unit_assert(err == false, goto done);
assert(ch->unserviceable == true); unit_assert(ch->unserviceable == true, goto done);
ch->semaphore_wq.initialized = true; ch->semaphore_wq.initialized = true;
ch->notifier_wq.initialized = true; ch->notifier_wq.initialized = true;
@@ -1245,12 +1251,12 @@ int test_channel_sw_quiesce(struct unit_module *m, struct gk20a *g, void *vargs)
ch = nvgpu_channel_open_new(g, runlist_id, ch = nvgpu_channel_open_new(g, runlist_id,
privileged, getpid(), getpid()); privileged, getpid(), getpid());
assert(ch != NULL); unit_assert(ch != NULL, goto done);
assert(f->num_channels > 0U); unit_assert(f->num_channels > 0U, goto done);
#ifndef CONFIG_NVGPU_RECOVERY #ifndef CONFIG_NVGPU_RECOVERY
nvgpu_channel_sw_quiesce(g); nvgpu_channel_sw_quiesce(g);
assert(ch->unserviceable == true); unit_assert(ch->unserviceable == true, goto done);
#endif #endif
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
@@ -1293,14 +1299,14 @@ int test_channel_deterministic_idle_unidle(struct unit_module *m,
struct nvgpu_setup_bind_args bind_args; struct nvgpu_setup_bind_args bind_args;
tsg = nvgpu_tsg_open(g, getpid()); tsg = nvgpu_tsg_open(g, getpid());
assert(tsg != NULL); unit_assert(tsg != NULL, goto done);
ch = nvgpu_channel_open_new(g, runlist_id, ch = nvgpu_channel_open_new(g, runlist_id,
privileged, getpid(), getpid()); privileged, getpid(), getpid());
assert(ch != NULL); unit_assert(ch != NULL, goto done);
err = nvgpu_tsg_bind_channel(tsg, ch); err = nvgpu_tsg_bind_channel(tsg, ch);
assert(err == 0); unit_assert(err == 0, goto done);
memset(&mm, 0, sizeof(mm)); memset(&mm, 0, sizeof(mm));
memset(&vm, 0, sizeof(vm)); memset(&vm, 0, sizeof(vm));
@@ -1308,7 +1314,7 @@ int test_channel_deterministic_idle_unidle(struct unit_module *m,
vm.mm = &mm; vm.mm = &mm;
ch->vm = &vm; ch->vm = &vm;
err = nvgpu_dma_alloc(g, PAGE_SIZE, &pdb_mem); err = nvgpu_dma_alloc(g, PAGE_SIZE, &pdb_mem);
assert(err == 0); unit_assert(err == 0, goto done);
vm.pdb.mem = &pdb_mem; vm.pdb.mem = &pdb_mem;
g->ops.gr.intr.flush_channel_tlb = stub_gr_intr_flush_channel_tlb; g->ops.gr.intr.flush_channel_tlb = stub_gr_intr_flush_channel_tlb;
@@ -1340,8 +1346,8 @@ int test_channel_deterministic_idle_unidle(struct unit_module *m,
} }
err = nvgpu_channel_setup_bind(ch, &bind_args); err = nvgpu_channel_setup_bind(ch, &bind_args);
assert(err == 0); unit_assert(err == 0, goto done);
assert(nvgpu_atomic_read(&ch->bound) == true); unit_assert(nvgpu_atomic_read(&ch->bound) == true, goto done);
ch->deterministic_railgate_allowed = (branches & ch->deterministic_railgate_allowed = (branches &
F_CHANNEL_DETERMINISTIC_IDLE_RAILGATE_ALLOWED) ? F_CHANNEL_DETERMINISTIC_IDLE_RAILGATE_ALLOWED) ?
@@ -1357,21 +1363,23 @@ int test_channel_deterministic_idle_unidle(struct unit_module *m,
if ((u64)(branches & 0x3U) == if ((u64)(branches & 0x3U) ==
(F_CHANNEL_DETERMINISTIC_IDLE_UNIDLE & (F_CHANNEL_DETERMINISTIC_IDLE_UNIDLE &
~F_CHANNEL_DETERMINISTIC_IDLE_RAILGATE_ALLOWED)) { ~F_CHANNEL_DETERMINISTIC_IDLE_RAILGATE_ALLOWED)) {
assert(g->usage_count.v == unit_assert(g->usage_count.v ==
(gpu_usage_count_initial - 1)); (gpu_usage_count_initial - 1), goto done);
} else { } else {
assert(g->usage_count.v == gpu_usage_count_initial); unit_assert(g->usage_count.v == gpu_usage_count_initial,
goto done);
} }
nvgpu_channel_deterministic_unidle(g); nvgpu_channel_deterministic_unidle(g);
if (branches == ((F_CHANNEL_DETERMINISTIC_IDLE_UNIDLE | if (branches == ((F_CHANNEL_DETERMINISTIC_IDLE_UNIDLE |
F_CHANNEL_DETERMINISTIC_UNIDLE_GK20ABUSY_FAIL) & F_CHANNEL_DETERMINISTIC_UNIDLE_GK20ABUSY_FAIL) &
~F_CHANNEL_DETERMINISTIC_IDLE_RAILGATE_ALLOWED)) { ~F_CHANNEL_DETERMINISTIC_IDLE_RAILGATE_ALLOWED)) {
assert(g->usage_count.v == unit_assert(g->usage_count.v ==
(gpu_usage_count_initial - 1)); (gpu_usage_count_initial - 1), goto done);
} else { } else {
assert(g->usage_count.v == gpu_usage_count_initial); unit_assert(g->usage_count.v == gpu_usage_count_initial,
goto done);
} }
nvgpu_dma_free(g, &ch->usermode_userd); nvgpu_dma_free(g, &ch->usermode_userd);
@@ -1444,13 +1452,13 @@ int test_channel_suspend_resume_serviceable_chs(struct unit_module *m,
int ret = UNIT_FAIL; int ret = UNIT_FAIL;
tsg = nvgpu_tsg_open(g, getpid()); tsg = nvgpu_tsg_open(g, getpid());
assert(tsg != NULL); unit_assert(tsg != NULL, goto done);
ch = nvgpu_channel_open_new(g, runlist_id, ch = nvgpu_channel_open_new(g, runlist_id,
privileged, getpid(), getpid()); privileged, getpid(), getpid());
assert(ch != NULL); unit_assert(ch != NULL, goto done);
err = nvgpu_tsg_bind_channel(tsg, ch); err = nvgpu_tsg_bind_channel(tsg, ch);
assert(err == 0); unit_assert(err == 0, goto done);
g->ops.fifo.preempt_tsg = stub_fifo_preempt_tsg; g->ops.fifo.preempt_tsg = stub_fifo_preempt_tsg;
g->ops.fifo.preempt_channel = stub_fifo_preempt_channel; g->ops.fifo.preempt_channel = stub_fifo_preempt_channel;
@@ -1483,16 +1491,16 @@ int test_channel_suspend_resume_serviceable_chs(struct unit_module *m,
NVGPU_INVALID_TSG_ID : orig_ch_tsgid; NVGPU_INVALID_TSG_ID : orig_ch_tsgid;
err = nvgpu_channel_suspend_all_serviceable_ch(g); err = nvgpu_channel_suspend_all_serviceable_ch(g);
assert(err == 0); unit_assert(err == 0, goto done);
err = nvgpu_channel_resume_all_serviceable_ch(g); err = nvgpu_channel_resume_all_serviceable_ch(g);
if (branches & F_CHANNEL_SUSPEND_RESUME_INVALID_TSGID) { if (branches & F_CHANNEL_SUSPEND_RESUME_INVALID_TSGID) {
assert(stub[0].chid == ch->chid); unit_assert(stub[0].chid == ch->chid, goto done);
} else if (branches & } else if (branches &
F_CHANNEL_SUSPEND_RESUME_UNSERVICEABLE_CH) { F_CHANNEL_SUSPEND_RESUME_UNSERVICEABLE_CH) {
assert(err == 0); unit_assert(err == 0, goto done);
} else { } else {
assert(stub[0].tsgid == ch->tsgid); unit_assert(stub[0].tsgid == ch->tsgid, goto done);
} }
} }
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
@@ -1562,14 +1570,14 @@ int test_channel_debug_dump(struct unit_module *m, struct gk20a *g, void *vargs)
int ret = UNIT_FAIL; int ret = UNIT_FAIL;
tsg = nvgpu_tsg_open(g, getpid()); tsg = nvgpu_tsg_open(g, getpid());
assert(tsg != NULL); unit_assert(tsg != NULL, goto done);
ch = nvgpu_channel_open_new(g, runlist_id, ch = nvgpu_channel_open_new(g, runlist_id,
privileged, getpid(), getpid()); privileged, getpid(), getpid());
assert(ch != NULL); unit_assert(ch != NULL, goto done);
err = nvgpu_tsg_bind_channel(tsg, ch); err = nvgpu_tsg_bind_channel(tsg, ch);
assert(err == 0); unit_assert(err == 0, goto done);
kmem_fi = nvgpu_kmem_get_fault_injection(); kmem_fi = nvgpu_kmem_get_fault_injection();
@@ -1599,8 +1607,8 @@ int test_channel_debug_dump(struct unit_module *m, struct gk20a *g, void *vargs)
if (branches & fail) { if (branches & fail) {
nvgpu_posix_enable_fault_injection(kmem_fi, false, 0); nvgpu_posix_enable_fault_injection(kmem_fi, false, 0);
} else { } else {
assert(stub[0].chid == ch->chid); unit_assert(stub[0].chid == ch->chid, goto done);
assert(stub[1].chid == ch->chid); unit_assert(stub[1].chid == ch->chid, goto done);
} }
} }
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
@@ -1661,14 +1669,14 @@ int test_channel_semaphore_wakeup(struct unit_module *m,
global_count = 0; global_count = 0;
tsg = nvgpu_tsg_open(g, getpid()); tsg = nvgpu_tsg_open(g, getpid());
assert(tsg != NULL); unit_assert(tsg != NULL, goto done);
ch = nvgpu_channel_open_new(g, runlist_id, ch = nvgpu_channel_open_new(g, runlist_id,
privileged, getpid(), getpid()); privileged, getpid(), getpid());
assert(ch != NULL); unit_assert(ch != NULL, goto done);
err = nvgpu_tsg_bind_channel(tsg, ch); err = nvgpu_tsg_bind_channel(tsg, ch);
assert(err == 0); unit_assert(err == 0, goto done);
memset(&mm, 0, sizeof(mm)); memset(&mm, 0, sizeof(mm));
memset(&vm, 0, sizeof(vm)); memset(&vm, 0, sizeof(vm));
@@ -1676,7 +1684,7 @@ int test_channel_semaphore_wakeup(struct unit_module *m,
vm.mm = &mm; vm.mm = &mm;
ch->vm = &vm; ch->vm = &vm;
err = nvgpu_dma_alloc(g, PAGE_SIZE, &pdb_mem); err = nvgpu_dma_alloc(g, PAGE_SIZE, &pdb_mem);
assert(err == 0); unit_assert(err == 0, goto done);
vm.pdb.mem = &pdb_mem; vm.pdb.mem = &pdb_mem;
g->ops.gr.intr.flush_channel_tlb = stub_gr_intr_flush_channel_tlb; g->ops.gr.intr.flush_channel_tlb = stub_gr_intr_flush_channel_tlb;
@@ -1692,8 +1700,8 @@ int test_channel_semaphore_wakeup(struct unit_module *m,
bind_args.flags |= NVGPU_SETUP_BIND_FLAGS_USERMODE_SUPPORT; bind_args.flags |= NVGPU_SETUP_BIND_FLAGS_USERMODE_SUPPORT;
err = nvgpu_channel_setup_bind(ch, &bind_args); err = nvgpu_channel_setup_bind(ch, &bind_args);
assert(err == 0); unit_assert(err == 0, goto done);
assert(nvgpu_atomic_read(&ch->bound) == true); unit_assert(nvgpu_atomic_read(&ch->bound) == true, goto done);
for (branches = 0U; branches < F_CHANNEL_SEMAPHORRE_WAKEUP_LAST; for (branches = 0U; branches < F_CHANNEL_SEMAPHORRE_WAKEUP_LAST;
branches++) { branches++) {
@@ -1722,7 +1730,7 @@ int test_channel_semaphore_wakeup(struct unit_module *m,
} }
nvgpu_channel_semaphore_wakeup(g, false); nvgpu_channel_semaphore_wakeup(g, false);
assert(stub[0].count == (global_count - 1U)); unit_assert(stub[0].count == (global_count - 1U), goto done);
ch->deterministic = false; ch->deterministic = false;
} }
@@ -1750,7 +1758,7 @@ int test_channel_from_invalid_id(struct unit_module *m, struct gk20a *g,
int ret = UNIT_FAIL; int ret = UNIT_FAIL;
ch = nvgpu_channel_from_id(g, NVGPU_INVALID_CHANNEL_ID); ch = nvgpu_channel_from_id(g, NVGPU_INVALID_CHANNEL_ID);
assert(ch == NULL); unit_assert(ch == NULL, goto done);
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
@@ -1771,8 +1779,8 @@ int test_channel_put_warn(struct unit_module *m, struct gk20a *g, void *vargs)
ch = nvgpu_channel_open_new(g, runlist_id, ch = nvgpu_channel_open_new(g, runlist_id,
privileged, getpid(), getpid()); privileged, getpid(), getpid());
assert(ch != NULL); unit_assert(ch != NULL, goto done);
assert(f->num_channels > 0U); unit_assert(f->num_channels > 0U, goto done);
/* condition broadcast fail */ /* condition broadcast fail */
ch->ref_count_dec_wq.initialized = false; ch->ref_count_dec_wq.initialized = false;
@@ -1813,15 +1821,15 @@ int test_ch_referenceable_cleanup(struct unit_module *m, struct gk20a *g,
ch = nvgpu_channel_open_new(g, runlist_id, ch = nvgpu_channel_open_new(g, runlist_id,
privileged, getpid(), getpid()); privileged, getpid(), getpid());
assert(ch != NULL); unit_assert(ch != NULL, goto done);
assert(f->num_channels > 0U); unit_assert(f->num_channels > 0U, goto done);
nvgpu_channel_cleanup_sw(g); nvgpu_channel_cleanup_sw(g);
assert(f->channel == NULL); unit_assert(f->channel == NULL, goto done);
/* Reset environment variables */ /* Reset environment variables */
err = nvgpu_channel_setup_sw(g); err = nvgpu_channel_setup_sw(g);
assert(err == 0); unit_assert(err == 0, goto done);
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
done: done:
@@ -1844,13 +1852,13 @@ int test_channel_abort_cleanup(struct unit_module *m, struct gk20a *g,
bool privileged = false; bool privileged = false;
tsg = nvgpu_tsg_open(g, getpid()); tsg = nvgpu_tsg_open(g, getpid());
assert(tsg != NULL); unit_assert(tsg != NULL, goto done);
g->ops.gr.intr.flush_channel_tlb = stub_gr_intr_flush_channel_tlb; g->ops.gr.intr.flush_channel_tlb = stub_gr_intr_flush_channel_tlb;
ch = nvgpu_channel_open_new(g, runlist_id, ch = nvgpu_channel_open_new(g, runlist_id,
privileged, getpid(), getpid()); privileged, getpid(), getpid());
assert(ch != NULL); unit_assert(ch != NULL, goto done);
ch->usermode_submit_enabled = true; ch->usermode_submit_enabled = true;
/* Channel requires to be as_bound */ /* Channel requires to be as_bound */
@@ -1863,7 +1871,7 @@ int test_channel_abort_cleanup(struct unit_module *m, struct gk20a *g,
nvgpu_ref_get(&vm.ref); nvgpu_ref_get(&vm.ref);
err = nvgpu_tsg_bind_channel(tsg, ch); err = nvgpu_tsg_bind_channel(tsg, ch);
assert(err == 0); unit_assert(err == 0, goto done);
ch->user_sync = nvgpu_kzalloc(g, ch->user_sync = nvgpu_kzalloc(g,
sizeof(struct nvgpu_channel_sync)); sizeof(struct nvgpu_channel_sync));
@@ -1872,7 +1880,7 @@ int test_channel_abort_cleanup(struct unit_module *m, struct gk20a *g,
ch->user_sync->destroy = stub_channel_sync_destroy; ch->user_sync->destroy = stub_channel_sync_destroy;
err = nvgpu_tsg_unbind_channel(tsg, ch); err = nvgpu_tsg_unbind_channel(tsg, ch);
assert(err == 0); unit_assert(err == 0, goto done);
nvgpu_channel_close(ch); nvgpu_channel_close(ch);

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -53,8 +53,6 @@
} while (0) } while (0)
#endif #endif
#define assert(cond) unit_assert(cond, goto done)
#define branches_str test_fifo_flags_str #define branches_str test_fifo_flags_str
#define pruned test_fifo_subtest_pruned #define pruned test_fifo_subtest_pruned
@@ -91,12 +89,13 @@ int test_gm20b_read_engine_status_info(struct unit_module *m,
}; };
char *ctxsw_status_label = NULL; char *ctxsw_status_label = NULL;
assert(f->num_engines > 0); unit_assert(f->num_engines > 0, goto done);
assert(f->engine_info[0].engine_enum == NVGPU_ENGINE_GR); unit_assert(f->engine_info[0].engine_enum == NVGPU_ENGINE_GR,
goto done);
nvgpu_writel(g, fifo_engine_status_r(engine_id), 0xbeef); nvgpu_writel(g, fifo_engine_status_r(engine_id), 0xbeef);
gm20b_read_engine_status_info(g, NVGPU_INVALID_ENG_ID, &status); gm20b_read_engine_status_info(g, NVGPU_INVALID_ENG_ID, &status);
assert(status.reg_data == 0); unit_assert(status.reg_data == 0, goto done);
for (branches = 0; branches < F_ENGINE_READ_STATUS_LAST; branches++) { for (branches = 0; branches < F_ENGINE_READ_STATUS_LAST; branches++) {
@@ -201,14 +200,22 @@ int test_gm20b_read_engine_status_info(struct unit_module *m,
gm20b_read_engine_status_info(g, engine_id, &status); gm20b_read_engine_status_info(g, engine_id, &status);
assert(status.is_busy == expected.is_busy); unit_assert(status.is_busy == expected.is_busy,
assert(status.is_faulted == expected.is_faulted); goto done);
assert(status.ctxsw_in_progress == expected.ctxsw_in_progress); unit_assert(status.is_faulted == expected.is_faulted,
assert(status.ctxsw_status == expected.ctxsw_status); goto done);
assert(status.ctx_id == expected.ctx_id); unit_assert(status.ctxsw_in_progress ==
assert(status.ctx_id_type == expected.ctx_id_type); expected.ctxsw_in_progress, goto done);
assert(status.ctx_next_id == expected.ctx_next_id); unit_assert(status.ctxsw_status ==
assert(status.ctx_next_id_type == expected.ctx_next_id_type); expected.ctxsw_status, goto done);
unit_assert(status.ctx_id ==
expected.ctx_id, goto done);
unit_assert(status.ctx_id_type ==
expected.ctx_id_type, goto done);
unit_assert(status.ctx_next_id ==
expected.ctx_next_id, goto done);
unit_assert(status.ctx_next_id_type ==
expected.ctx_next_id_type, goto done);
} }
} }
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -55,8 +55,6 @@
} while (0) } while (0)
#endif #endif
#define assert(cond) unit_assert(cond, goto done)
#define branches_str test_fifo_flags_str #define branches_str test_fifo_flags_str
#define pruned test_fifo_subtest_pruned #define pruned test_fifo_subtest_pruned
@@ -198,8 +196,9 @@ int test_gp10b_engine_init_ce_info(struct unit_module *m,
u.m = m; u.m = m;
u.gops = g->ops; u.gops = g->ops;
assert(f->num_engines > 0); unit_assert(f->num_engines > 0, goto done);
assert(f->engine_info[0].engine_enum == NVGPU_ENGINE_GR); unit_assert(f->engine_info[0].engine_enum == NVGPU_ENGINE_GR,
goto done);
g->ops.top.get_device_info = wrap_top_get_device_info; g->ops.top.get_device_info = wrap_top_get_device_info;
g->ops.pbdma.find_for_runlist = wrap_pbdma_find_for_runlist; g->ops.pbdma.find_for_runlist = wrap_pbdma_find_for_runlist;
@@ -237,11 +236,11 @@ int test_gp10b_engine_init_ce_info(struct unit_module *m,
} }
if (branches & fail) { if (branches & fail) {
assert(err != 0); unit_assert(err != 0, goto done);
assert(f->num_engines < (1 + num_lce)); unit_assert(f->num_engines < (1 + num_lce), goto done);
} else { } else {
assert(err == 0); unit_assert(err == 0, goto done);
assert(f->num_engines = (1 + num_lce)); unit_assert(f->num_engines = (1 + num_lce), goto done);
} }
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -54,8 +54,6 @@
} while (0) } while (0)
#endif #endif
#define assert(cond) unit_assert(cond, goto done)
#define branches_str test_fifo_flags_str #define branches_str test_fifo_flags_str
#define pruned test_fifo_subtest_pruned #define pruned test_fifo_subtest_pruned
@@ -78,11 +76,11 @@ int test_gv100_read_engine_status_info(struct unit_module *m,
nvgpu_writel(g, fifo_engine_status_r(engine_id), 0); nvgpu_writel(g, fifo_engine_status_r(engine_id), 0);
gv100_read_engine_status_info(g, engine_id, &status); gv100_read_engine_status_info(g, engine_id, &status);
assert(status.in_reload_status == false); unit_assert(status.in_reload_status == false, goto done);
nvgpu_writel(g, fifo_engine_status_r(engine_id), BIT(29)); nvgpu_writel(g, fifo_engine_status_r(engine_id), BIT(29));
gv100_read_engine_status_info(g, engine_id, &status); gv100_read_engine_status_info(g, engine_id, &status);
assert(status.in_reload_status == true); unit_assert(status.in_reload_status == true, goto done);
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
done: done:
@@ -156,12 +154,12 @@ int test_gv100_dump_engine_status(struct unit_module *m,
unit_ctx.engine_id = 0; unit_ctx.engine_id = 0;
gv100_dump_engine_status(g, &o); gv100_dump_engine_status(g, &o);
assert(unit_ctx.engine_id == (num_engines - 1)); unit_assert(unit_ctx.engine_id == (num_engines - 1), goto done);
unit_ctx.engine_id = (u32)~0; unit_ctx.engine_id = (u32)~0;
g->ops.get_litter_value = stub_get_litter_value_0; g->ops.get_litter_value = stub_get_litter_value_0;
gv100_dump_engine_status(g, &o); gv100_dump_engine_status(g, &o);
assert(unit_ctx.engine_id == (u32)~0); unit_assert(unit_ctx.engine_id == (u32)~0, goto done);
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
done: done:

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -52,18 +52,16 @@
} while (0) } while (0)
#endif #endif
#define assert(cond) unit_assert(cond, goto done)
int test_gv11b_is_fault_engine_subid_gpc(struct unit_module *m, int test_gv11b_is_fault_engine_subid_gpc(struct unit_module *m,
struct gk20a *g, void *args) struct gk20a *g, void *args)
{ {
int ret = UNIT_FAIL; int ret = UNIT_FAIL;
assert(gv11b_is_fault_engine_subid_gpc(g, unit_assert(gv11b_is_fault_engine_subid_gpc(g,
gmmu_fault_client_type_gpc_v()) == true); gmmu_fault_client_type_gpc_v()) == true, goto done);
assert(gv11b_is_fault_engine_subid_gpc(g, unit_assert(gv11b_is_fault_engine_subid_gpc(g,
gmmu_fault_client_type_hub_v()) == false); gmmu_fault_client_type_hub_v()) == false, goto done);
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
done: done:

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -72,8 +72,6 @@ static void subtest_setup(u32 branches)
#define subtest_pruned test_fifo_subtest_pruned #define subtest_pruned test_fifo_subtest_pruned
#define branches_str test_fifo_flags_str #define branches_str test_fifo_flags_str
#define assert(cond) unit_assert(cond, goto done)
#define F_ENGINE_SETUP_SW_ENGINE_INFO_ENOMEM BIT(0) #define F_ENGINE_SETUP_SW_ENGINE_INFO_ENOMEM BIT(0)
#define F_ENGINE_SETUP_SW_ENGINE_LIST_ENOMEM BIT(1) #define F_ENGINE_SETUP_SW_ENGINE_LIST_ENOMEM BIT(1)
#define F_ENGINE_SETUP_SW_INIT_INFO_FAIL BIT(2) #define F_ENGINE_SETUP_SW_INIT_INFO_FAIL BIT(2)
@@ -110,7 +108,7 @@ int test_engine_setup_sw(struct unit_module *m,
u32 prune = fail; u32 prune = fail;
err = test_fifo_setup_gv11b_reg_space(m, g); err = test_fifo_setup_gv11b_reg_space(m, g);
assert(err == 0); unit_assert(err == 0, goto done);
gv11b_init_hal(g); gv11b_init_hal(g);
@@ -144,13 +142,13 @@ int test_engine_setup_sw(struct unit_module *m,
err = nvgpu_engine_setup_sw(g); err = nvgpu_engine_setup_sw(g);
if (branches & fail) { if (branches & fail) {
assert(err != 0); unit_assert(err != 0, goto done);
assert(f->active_engines_list == NULL); unit_assert(f->active_engines_list == NULL, goto done);
assert(f->engine_info == NULL); unit_assert(f->engine_info == NULL, goto done);
} else { } else {
assert(err == 0); unit_assert(err == 0, goto done);
assert(f->active_engines_list != NULL); unit_assert(f->active_engines_list != NULL, goto done);
assert(f->engine_info != NULL); unit_assert(f->engine_info != NULL, goto done);
nvgpu_engine_cleanup_sw(g); nvgpu_engine_cleanup_sw(g);
} }
} }
@@ -245,10 +243,10 @@ int test_engine_init_info(struct unit_module *m,
err = nvgpu_engine_init_info(f); err = nvgpu_engine_init_info(f);
if (branches & fail) { if (branches & fail) {
assert(err != 0); unit_assert(err != 0, goto done);
} else { } else {
assert(err == 0); unit_assert(err == 0, goto done);
assert(f->num_engines > 0); unit_assert(f->num_engines > 0, goto done);
} }
} }
@@ -277,18 +275,21 @@ int test_engine_ids(struct unit_module *m,
unit_ctx.ce_mask = 0; unit_ctx.ce_mask = 0;
unit_ctx.eng_mask = 0; unit_ctx.eng_mask = 0;
assert(nvgpu_engine_check_valid_id(g, U32_MAX) == false); unit_assert(nvgpu_engine_check_valid_id(g, U32_MAX) == false,
goto done);
assert(nvgpu_engine_get_ids(g, &engine_id, 1, NVGPU_ENGINE_INVAL) == 0); unit_assert(nvgpu_engine_get_ids(g, &engine_id, 1,
NVGPU_ENGINE_INVAL) == 0, goto done);
for (e = NVGPU_ENGINE_GR; e < NVGPU_ENGINE_INVAL; e++) { for (e = NVGPU_ENGINE_GR; e < NVGPU_ENGINE_INVAL; e++) {
n = nvgpu_engine_get_ids(g, engine_ids, MAX_ENGINE_IDS, e); n = nvgpu_engine_get_ids(g, engine_ids, MAX_ENGINE_IDS, e);
assert(n > 0); unit_assert(n > 0, goto done);
for (i = 0; i < n; i++) { for (i = 0; i < n; i++) {
engine_id = engine_ids[i]; engine_id = engine_ids[i];
assert(nvgpu_engine_check_valid_id(g, engine_id) == true); unit_assert(nvgpu_engine_check_valid_id(g, engine_id) ==
true, goto done);
unit_ctx.eng_mask |= BIT(engine_id); unit_ctx.eng_mask |= BIT(engine_id);
if (e == NVGPU_ENGINE_ASYNC_CE || e == NVGPU_ENGINE_GRCE) { if (e == NVGPU_ENGINE_ASYNC_CE || e == NVGPU_ENGINE_GRCE) {
unit_ctx.ce_mask |= BIT(engine_id); unit_ctx.ce_mask |= BIT(engine_id);
@@ -296,10 +297,11 @@ int test_engine_ids(struct unit_module *m,
} }
} }
assert(nvgpu_engine_get_ids(g, &engine_id, 1, NVGPU_ENGINE_GR) == 1); unit_assert(nvgpu_engine_get_ids(g, &engine_id, 1,
assert(engine_id == nvgpu_engine_get_gr_id(g)); NVGPU_ENGINE_GR) == 1, goto done);
assert(unit_ctx.eng_mask != 0); unit_assert(engine_id == nvgpu_engine_get_gr_id(g), goto done);
assert(unit_ctx.ce_mask != 0); unit_assert(unit_ctx.eng_mask != 0, goto done);
unit_assert(unit_ctx.ce_mask != 0, goto done);
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
done: done:
@@ -320,16 +322,16 @@ int test_engine_get_active_eng_info(struct unit_module *m,
unit_verbose(m, "engine_id=%u\n", engine_id); unit_verbose(m, "engine_id=%u\n", engine_id);
info = nvgpu_engine_get_active_eng_info(g, engine_id); info = nvgpu_engine_get_active_eng_info(g, engine_id);
if (nvgpu_engine_check_valid_id(g, engine_id)) { if (nvgpu_engine_check_valid_id(g, engine_id)) {
assert(info != NULL); unit_assert(info != NULL, goto done);
assert(info->engine_id == engine_id); unit_assert(info->engine_id == engine_id, goto done);
eng_mask |= BIT(engine_id); eng_mask |= BIT(engine_id);
} else { } else {
assert(info == NULL); unit_assert(info == NULL, goto done);
} }
} }
unit_verbose(m, "eng_mask=%x\n", eng_mask); unit_verbose(m, "eng_mask=%x\n", eng_mask);
unit_verbose(m, "unit_ctx.eng_mask=%x\n", unit_ctx.eng_mask); unit_verbose(m, "unit_ctx.eng_mask=%x\n", unit_ctx.eng_mask);
assert(eng_mask == unit_ctx.eng_mask); unit_assert(eng_mask == unit_ctx.eng_mask, goto done);
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
done: done:
@@ -344,14 +346,14 @@ int test_engine_enum_from_type(struct unit_module *m,
engine_enum = nvgpu_engine_enum_from_type(g, engine_enum = nvgpu_engine_enum_from_type(g,
top_device_info_type_enum_graphics_v()); top_device_info_type_enum_graphics_v());
assert(engine_enum == NVGPU_ENGINE_GR); unit_assert(engine_enum == NVGPU_ENGINE_GR, goto done);
engine_enum = nvgpu_engine_enum_from_type(g, engine_enum = nvgpu_engine_enum_from_type(g,
top_device_info_type_enum_lce_v()); top_device_info_type_enum_lce_v());
assert(engine_enum == NVGPU_ENGINE_ASYNC_CE); unit_assert(engine_enum == NVGPU_ENGINE_ASYNC_CE, goto done);
engine_enum = nvgpu_engine_enum_from_type(g, 0xff); engine_enum = nvgpu_engine_enum_from_type(g, 0xff);
assert(engine_enum == NVGPU_ENGINE_INVAL); unit_assert(engine_enum == NVGPU_ENGINE_INVAL, goto done);
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
done: done:
@@ -371,22 +373,22 @@ int test_engine_interrupt_mask(struct unit_module *m,
u32 engine_id; u32 engine_id;
struct nvgpu_fifo *f = &g->fifo; struct nvgpu_fifo *f = &g->fifo;
assert(intr_mask != 0U); unit_assert(intr_mask != 0U, goto done);
for (engine_id = 0; engine_id < f->max_engines; engine_id++) { for (engine_id = 0; engine_id < f->max_engines; engine_id++) {
unit_verbose(m, "engine_id=%u\n", engine_id); unit_verbose(m, "engine_id=%u\n", engine_id);
mask = nvgpu_engine_act_interrupt_mask(g, engine_id); mask = nvgpu_engine_act_interrupt_mask(g, engine_id);
if (nvgpu_engine_check_valid_id(g, engine_id)) { if (nvgpu_engine_check_valid_id(g, engine_id)) {
assert(mask != 0); unit_assert(mask != 0, goto done);
assert((mask & intr_mask) == mask); unit_assert((mask & intr_mask) == mask, goto done);
all_mask |= mask; all_mask |= mask;
} else { } else {
assert(mask == 0); unit_assert(mask == 0, goto done);
} }
} }
assert(intr_mask == all_mask); unit_assert(intr_mask == all_mask, goto done);
ce_reset_mask = nvgpu_engine_get_all_ce_reset_mask(g); ce_reset_mask = nvgpu_engine_get_all_ce_reset_mask(g);
assert(ce_reset_mask != 0);; unit_assert(ce_reset_mask != 0, goto done);;
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
done: done:

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -56,8 +56,6 @@
} while (0) } while (0)
#endif #endif
#define assert(cond) unit_assert(cond, goto done)
#define UNIT_MAX_PBDMA 32 #define UNIT_MAX_PBDMA 32
int test_gk20a_init_pbdma_map(struct unit_module *m, int test_gk20a_init_pbdma_map(struct unit_module *m,
@@ -67,14 +65,14 @@ int test_gk20a_init_pbdma_map(struct unit_module *m,
u32 num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA); u32 num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA);
u32 pbdma_map[UNIT_MAX_PBDMA]; u32 pbdma_map[UNIT_MAX_PBDMA];
u32 id; u32 id;
assert(num_pbdma > 0); unit_assert(num_pbdma > 0, goto done);
assert(num_pbdma <= UNIT_MAX_PBDMA); unit_assert(num_pbdma <= UNIT_MAX_PBDMA, goto done);
memset(pbdma_map, 0, sizeof(pbdma_map)); memset(pbdma_map, 0, sizeof(pbdma_map));
gk20a_fifo_init_pbdma_map(g, pbdma_map, num_pbdma); gk20a_fifo_init_pbdma_map(g, pbdma_map, num_pbdma);
for (id = 0; id < num_pbdma; id++) { for (id = 0; id < num_pbdma; id++) {
unit_verbose(m, "id=%u map=%08x\n", id, pbdma_map[id]); unit_verbose(m, "id=%u map=%08x\n", id, pbdma_map[id]);
assert(pbdma_map[id] != 0); unit_assert(pbdma_map[id] != 0, goto done);
} }
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
@@ -90,12 +88,14 @@ int test_gk20a_get_timeslices(struct unit_module *m,
u32 pb_timeslice = gk20a_fifo_get_pb_timeslice(g); u32 pb_timeslice = gk20a_fifo_get_pb_timeslice(g);
/* check that timeslices are enabled */ /* check that timeslices are enabled */
assert((rl_timeslice & fifo_runlist_timeslice_enable_true_f()) != 0); unit_assert((rl_timeslice & fifo_runlist_timeslice_enable_true_f()) !=
assert((pb_timeslice & fifo_pb_timeslice_enable_true_f()) != 0); 0, goto done);
unit_assert((pb_timeslice & fifo_pb_timeslice_enable_true_f()) != 0,
goto done);
/* check that timeslices are non-zero */ /* check that timeslices are non-zero */
assert((rl_timeslice & 0xFF) != 0); unit_assert((rl_timeslice & 0xFF) != 0, goto done);
assert((pb_timeslice & 0xFF) != 0); unit_assert((pb_timeslice & 0xFF) != 0, goto done);
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
done: done:

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -56,8 +56,6 @@
} while (0) } while (0)
#endif #endif
#define assert(cond) unit_assert(cond, goto done)
struct unit_ctx { struct unit_ctx {
u32 count; u32 count;
bool fail; bool fail;
@@ -73,14 +71,14 @@ int test_gk20a_fifo_intr_1_enable(struct unit_module *m,
nvgpu_writel(g, fifo_intr_en_1_r(), 0); nvgpu_writel(g, fifo_intr_en_1_r(), 0);
gk20a_fifo_intr_1_enable(g, true); gk20a_fifo_intr_1_enable(g, true);
assert((nvgpu_readl(g, fifo_intr_en_1_r()) & unit_assert((nvgpu_readl(g, fifo_intr_en_1_r()) &
fifo_intr_0_channel_intr_pending_f()) != 0); fifo_intr_0_channel_intr_pending_f()) != 0, goto done);
gk20a_fifo_intr_1_enable(g, false); gk20a_fifo_intr_1_enable(g, false);
assert((nvgpu_readl(g, fifo_intr_en_1_r()) & unit_assert((nvgpu_readl(g, fifo_intr_en_1_r()) &
fifo_intr_0_channel_intr_pending_f()) == 0); fifo_intr_0_channel_intr_pending_f()) == 0, goto done);
assert(ret == UNIT_FAIL); unit_assert(ret == UNIT_FAIL, goto done);
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
done: done:
return ret; return ret;
@@ -94,12 +92,13 @@ int test_gk20a_fifo_intr_1_isr(struct unit_module *m,
/* no channel intr pending */ /* no channel intr pending */
nvgpu_writel(g, fifo_intr_0_r(), ~fifo_intr_0_channel_intr_pending_f()); nvgpu_writel(g, fifo_intr_0_r(), ~fifo_intr_0_channel_intr_pending_f());
gk20a_fifo_intr_1_isr(g); gk20a_fifo_intr_1_isr(g);
assert(nvgpu_readl(g, fifo_intr_0_r()) == 0); unit_assert(nvgpu_readl(g, fifo_intr_0_r()) == 0, goto done);
/* channel intr pending */ /* channel intr pending */
nvgpu_writel(g, fifo_intr_0_r(), U32_MAX); nvgpu_writel(g, fifo_intr_0_r(), U32_MAX);
gk20a_fifo_intr_1_isr(g); gk20a_fifo_intr_1_isr(g);
assert(nvgpu_readl(g, fifo_intr_0_r()) == fifo_intr_0_channel_intr_pending_f()); unit_assert(nvgpu_readl(g, fifo_intr_0_r()) ==
fifo_intr_0_channel_intr_pending_f(), goto done);
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
done: done:
@@ -123,8 +122,9 @@ int test_gk20a_fifo_intr_handle_chsw_error(struct unit_module *m,
u.count = 0; u.count = 0;
nvgpu_writel(g, fifo_intr_chsw_error_r(), 0xcafe); nvgpu_writel(g, fifo_intr_chsw_error_r(), 0xcafe);
gk20a_fifo_intr_handle_chsw_error(g); gk20a_fifo_intr_handle_chsw_error(g);
assert(u.count > 0); unit_assert(u.count > 0, goto done);
assert(nvgpu_readl(g, fifo_intr_chsw_error_r()) == 0xcafe); unit_assert(nvgpu_readl(g, fifo_intr_chsw_error_r()) == 0xcafe,
goto done);
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
done: done:
@@ -163,7 +163,7 @@ int test_gk20a_fifo_intr_handle_runlist_event(struct unit_module *m,
u.fail = false; u.fail = false;
old_io = nvgpu_posix_register_io(g, &new_io); old_io = nvgpu_posix_register_io(g, &new_io);
gk20a_fifo_intr_handle_runlist_event(g); gk20a_fifo_intr_handle_runlist_event(g);
assert(!u.fail); unit_assert(!u.fail, goto done);
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
done: done:
@@ -194,7 +194,7 @@ int test_gk20a_fifo_pbdma_isr(struct unit_module *m,
u32 num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA); u32 num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA);
struct gpu_ops gops = g->ops; struct gpu_ops gops = g->ops;
assert(num_pbdma > 0); unit_assert(num_pbdma > 0, goto done);
g->ops.pbdma.handle_intr = stub_pbdma_handle_intr; g->ops.pbdma.handle_intr = stub_pbdma_handle_intr;
@@ -205,9 +205,10 @@ int test_gk20a_fifo_pbdma_isr(struct unit_module *m,
nvgpu_writel(g, fifo_intr_pbdma_id_r(), BIT(pbdma_id)); nvgpu_writel(g, fifo_intr_pbdma_id_r(), BIT(pbdma_id));
u.count = 0; u.count = 0;
pending = gk20a_fifo_pbdma_isr(g); pending = gk20a_fifo_pbdma_isr(g);
assert(pending == fifo_intr_0_pbdma_intr_pending_f()); unit_assert(pending ==
assert(!u.fail); fifo_intr_0_pbdma_intr_pending_f(), goto done);
assert(u.count == 1); unit_assert(!u.fail, goto done);
unit_assert(u.count == 1, goto done);
} }
} }
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -57,8 +57,6 @@
} while (0) } while (0)
#endif #endif
#define assert(cond) unit_assert(cond, goto done)
int test_gv11b_fifo_init_hw(struct unit_module *m, int test_gv11b_fifo_init_hw(struct unit_module *m,
struct gk20a *g, void *args) struct gk20a *g, void *args)
{ {
@@ -77,12 +75,14 @@ int test_gv11b_fifo_init_hw(struct unit_module *m,
gv11b_init_fifo_reset_enable_hw(g); gv11b_init_fifo_reset_enable_hw(g);
if (!nvgpu_platform_is_silicon(g)) { if (!nvgpu_platform_is_silicon(g)) {
assert(nvgpu_readl(g, fifo_fb_timeout_r()) != 0); unit_assert(nvgpu_readl(g, fifo_fb_timeout_r()) != 0,
goto done);
} }
nvgpu_writel(g, fifo_userd_writeback_r(), 0); nvgpu_writel(g, fifo_userd_writeback_r(), 0);
gv11b_init_fifo_setup_hw(g); gv11b_init_fifo_setup_hw(g);
assert(nvgpu_readl(g, fifo_userd_writeback_r()) != 0); unit_assert(nvgpu_readl(g, fifo_userd_writeback_r()) != 0,
goto done);
} }
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
@@ -107,15 +107,15 @@ int test_gv11b_fifo_mmu_fault_id_to_pbdma_id(struct unit_module *m,
nvgpu_writel(g, fifo_cfg0_r(), reg_val); nvgpu_writel(g, fifo_cfg0_r(), reg_val);
pbdma_id = gv11b_fifo_mmu_fault_id_to_pbdma_id(g, 1); pbdma_id = gv11b_fifo_mmu_fault_id_to_pbdma_id(g, 1);
assert(pbdma_id == INVALID_ID); unit_assert(pbdma_id == INVALID_ID, goto done);
pbdma_id = gv11b_fifo_mmu_fault_id_to_pbdma_id(g, fault_id_pbdma0 + num_pbdma); pbdma_id = gv11b_fifo_mmu_fault_id_to_pbdma_id(g, fault_id_pbdma0 + num_pbdma);
assert(pbdma_id == INVALID_ID); unit_assert(pbdma_id == INVALID_ID, goto done);
for (i = 0; i < num_pbdma; i++) { for (i = 0; i < num_pbdma; i++) {
fault_id = fault_id_pbdma0 + i; fault_id = fault_id_pbdma0 + i;
pbdma_id = gv11b_fifo_mmu_fault_id_to_pbdma_id(g, fault_id); pbdma_id = gv11b_fifo_mmu_fault_id_to_pbdma_id(g, fault_id);
assert(pbdma_id == i); unit_assert(pbdma_id == i, goto done);
} }
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -60,7 +60,6 @@
} while (0) } while (0)
#endif #endif
#define assert(cond) unit_assert(cond, goto done)
#define branches_str test_fifo_flags_str #define branches_str test_fifo_flags_str
struct unit_ctx { struct unit_ctx {
@@ -90,16 +89,17 @@ int test_gv11b_fifo_intr_0_enable(struct unit_module *m,
g->ops.pbdma.intr_enable = stub_pbdma_intr_enable; g->ops.pbdma.intr_enable = stub_pbdma_intr_enable;
gv11b_fifo_intr_0_enable(g, true); gv11b_fifo_intr_0_enable(g, true);
assert(u.fifo_ctxsw_timeout_enable); unit_assert(u.fifo_ctxsw_timeout_enable, goto done);
assert(u.pbdma_intr_enable); unit_assert(u.pbdma_intr_enable, goto done);
assert(nvgpu_readl(g, fifo_intr_runlist_r()) == U32_MAX); unit_assert(nvgpu_readl(g, fifo_intr_runlist_r()) == U32_MAX,
assert(nvgpu_readl(g, fifo_intr_0_r()) == U32_MAX); goto done);
assert(nvgpu_readl(g, fifo_intr_en_0_r()) != 0); unit_assert(nvgpu_readl(g, fifo_intr_0_r()) == U32_MAX, goto done);
unit_assert(nvgpu_readl(g, fifo_intr_en_0_r()) != 0, goto done);
gv11b_fifo_intr_0_enable(g, false); gv11b_fifo_intr_0_enable(g, false);
assert(!u.fifo_ctxsw_timeout_enable); unit_assert(!u.fifo_ctxsw_timeout_enable, goto done);
assert(!u.pbdma_intr_enable); unit_assert(!u.pbdma_intr_enable, goto done);
assert(nvgpu_readl(g, fifo_intr_en_0_r()) == 0); unit_assert(nvgpu_readl(g, fifo_intr_en_0_r()) == 0, goto done);
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
done: done:
@@ -126,7 +126,7 @@ int test_gv11b_fifo_handle_sched_error(struct unit_module *m,
nvgpu_writel(g, fifo_intr_sched_error_r(), SCHED_ERROR_CODE_BAD_TSG); nvgpu_writel(g, fifo_intr_sched_error_r(), SCHED_ERROR_CODE_BAD_TSG);
gv11b_fifo_handle_sched_error(g); gv11b_fifo_handle_sched_error(g);
assert(ret != UNIT_SUCCESS); unit_assert(ret != UNIT_SUCCESS, goto done);
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
done: done:
return ret; return ret;
@@ -218,7 +218,7 @@ int test_gv11b_fifo_intr_0_isr(struct unit_module *m,
g->ops.gr.falcon.dump_stats = g->ops.gr.falcon.dump_stats =
stub_gr_falcon_dump_stats; stub_gr_falcon_dump_stats;
assert(f->sw_ready); unit_assert(f->sw_ready, goto done);
for (branches = 0; branches < BIT(FIFO_NUM_INTRS_0); branches++) { for (branches = 0; branches < BIT(FIFO_NUM_INTRS_0); branches++) {
unit_verbose(m, "%s branches=%s\n", __func__, unit_verbose(m, "%s branches=%s\n", __func__,
@@ -234,15 +234,16 @@ int test_gv11b_fifo_intr_0_isr(struct unit_module *m,
fifo_intr_0); fifo_intr_0);
gv11b_fifo_intr_0_isr(g); gv11b_fifo_intr_0_isr(g);
val = nvgpu_posix_io_readl_reg_space(g, fifo_intr_0_r()); val = nvgpu_posix_io_readl_reg_space(g, fifo_intr_0_r());
assert((val & intr_0_handled_mask) == 0); unit_assert((val & intr_0_handled_mask) == 0, goto done);
assert((val & ~intr_0_handled_mask) == unit_assert((val & ~intr_0_handled_mask) ==
(fifo_intr_0 & ~intr_0_handled_mask)); (fifo_intr_0 & ~intr_0_handled_mask), goto done);
} }
f->sw_ready = false; f->sw_ready = false;
nvgpu_posix_io_writel_reg_space(g, fifo_intr_0_r(), 0xcafe); nvgpu_posix_io_writel_reg_space(g, fifo_intr_0_r(), 0xcafe);
gv11b_fifo_intr_0_isr(g); gv11b_fifo_intr_0_isr(g);
assert(nvgpu_posix_io_readl_reg_space(g, fifo_intr_0_r()) == 0); unit_assert(nvgpu_posix_io_readl_reg_space(g, fifo_intr_0_r()) == 0,
goto done);
f->sw_ready = true; f->sw_ready = true;
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
@@ -268,19 +269,22 @@ int test_gv11b_fifo_intr_recover_mask(struct unit_module *m,
gv11b_fifo_intr_0_enable(g, true); gv11b_fifo_intr_0_enable(g, true);
intr_en_0 = nvgpu_posix_io_readl_reg_space(g, fifo_intr_en_0_r()); intr_en_0 = nvgpu_posix_io_readl_reg_space(g, fifo_intr_en_0_r());
assert((intr_en_0 & fifo_intr_0_ctxsw_timeout_pending_f()) != 0); unit_assert((intr_en_0 & fifo_intr_0_ctxsw_timeout_pending_f()) != 0,
goto done);
nvgpu_posix_io_writel_reg_space(g, nvgpu_posix_io_writel_reg_space(g,
fifo_intr_ctxsw_timeout_r(), 0xcafe); fifo_intr_ctxsw_timeout_r(), 0xcafe);
gv11b_fifo_intr_set_recover_mask(g); gv11b_fifo_intr_set_recover_mask(g);
intr_en_0 = nvgpu_posix_io_readl_reg_space(g, fifo_intr_en_0_r()); intr_en_0 = nvgpu_posix_io_readl_reg_space(g, fifo_intr_en_0_r());
assert((intr_en_0 & fifo_intr_0_ctxsw_timeout_pending_f()) == 0); unit_assert((intr_en_0 & fifo_intr_0_ctxsw_timeout_pending_f()) == 0,
goto done);
val = nvgpu_posix_io_readl_reg_space(g, fifo_intr_ctxsw_timeout_r()); val = nvgpu_posix_io_readl_reg_space(g, fifo_intr_ctxsw_timeout_r());
assert(val == 0); unit_assert(val == 0, goto done);
gv11b_fifo_intr_unset_recover_mask(g); gv11b_fifo_intr_unset_recover_mask(g);
intr_en_0 = nvgpu_posix_io_readl_reg_space(g, fifo_intr_en_0_r()); intr_en_0 = nvgpu_posix_io_readl_reg_space(g, fifo_intr_en_0_r());
assert((intr_en_0 & fifo_intr_0_ctxsw_timeout_pending_f()) != 0); unit_assert((intr_en_0 & fifo_intr_0_ctxsw_timeout_pending_f()) != 0,
goto done);
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
done: done:

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -47,8 +47,6 @@
} while (0) } while (0)
#endif #endif
#define assert(cond) unit_assert(cond, goto done)
#define pruned test_fifo_subtest_pruned #define pruned test_fifo_subtest_pruned
#define branches_str test_fifo_flags_str #define branches_str test_fifo_flags_str
@@ -79,8 +77,8 @@ int test_decode_pbdma_ch_eng_status(struct unit_module *m, struct gk20a *g,
pbdma_ch_eng_status = pbdma_ch_eng_status =
nvgpu_fifo_decode_pbdma_ch_eng_status(index); nvgpu_fifo_decode_pbdma_ch_eng_status(index);
assert(strcmp(pbdma_ch_eng_status, unit_assert(strcmp(pbdma_ch_eng_status,
f_fifo_decode_status[index]) == 0); f_fifo_decode_status[index]) == 0, goto done);
} }
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
@@ -120,13 +118,13 @@ int test_fifo_suspend(struct unit_module *m, struct gk20a *g, void *args)
u32 prune = F_FIFO_SUSPEND_BAR1_SUPPORTED; u32 prune = F_FIFO_SUSPEND_BAR1_SUPPORTED;
err = test_fifo_setup_gv11b_reg_space(m, g); err = test_fifo_setup_gv11b_reg_space(m, g);
assert(err == 0); unit_assert(err == 0, goto done);
gv11b_init_hal(g); gv11b_init_hal(g);
gops = g->ops; gops = g->ops;
g->ops.fifo.bar1_snooping_disable = stub_fifo_bar1_snooping_disable; g->ops.fifo.bar1_snooping_disable = stub_fifo_bar1_snooping_disable;
err = nvgpu_fifo_init_support(g); err = nvgpu_fifo_init_support(g);
assert(err == 0); unit_assert(err == 0, goto done);
for (branches = 0U; branches < F_FIFO_SUSPEND_LAST; branches++) { for (branches = 0U; branches < F_FIFO_SUSPEND_LAST; branches++) {
@@ -145,8 +143,8 @@ int test_fifo_suspend(struct unit_module *m, struct gk20a *g, void *args)
err = nvgpu_fifo_suspend(g); err = nvgpu_fifo_suspend(g);
reg0_val = nvgpu_readl(g, fifo_intr_en_0_r()); reg0_val = nvgpu_readl(g, fifo_intr_en_0_r());
reg1_val = nvgpu_readl(g, fifo_intr_en_1_r()); reg1_val = nvgpu_readl(g, fifo_intr_en_1_r());
assert(reg0_val == 0U); unit_assert(reg0_val == 0U, goto done);
assert(reg1_val == 0U); unit_assert(reg1_val == 0U, goto done);
} }
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
@@ -170,17 +168,17 @@ int test_fifo_sw_quiesce(struct unit_module *m, struct gk20a *g, void *args)
int err; int err;
err = test_fifo_setup_gv11b_reg_space(m, g); err = test_fifo_setup_gv11b_reg_space(m, g);
assert(err == 0); unit_assert(err == 0, goto done);
gv11b_init_hal(g); gv11b_init_hal(g);
gops = g->ops; gops = g->ops;
err = nvgpu_fifo_init_support(g); err = nvgpu_fifo_init_support(g);
assert(err == 0); unit_assert(err == 0, goto done);
#ifndef CONFIG_NVGPU_RECOVERY #ifndef CONFIG_NVGPU_RECOVERY
nvgpu_fifo_sw_quiesce(g); nvgpu_fifo_sw_quiesce(g);
reg_val = nvgpu_readl(g, fifo_sched_disable_r()); reg_val = nvgpu_readl(g, fifo_sched_disable_r());
assert(reg_val == 3U); unit_assert(reg_val == 3U, goto done);
#endif #endif
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
@@ -261,7 +259,7 @@ int test_init_support(struct unit_module *m, struct gk20a *g, void *args)
kmem_fi = nvgpu_kmem_get_fault_injection(); kmem_fi = nvgpu_kmem_get_fault_injection();
err = test_fifo_setup_gv11b_reg_space(m, g); err = test_fifo_setup_gv11b_reg_space(m, g);
assert(err == 0); unit_assert(err == 0, goto done);
gv11b_init_hal(g); gv11b_init_hal(g);
gops = g->ops; gops = g->ops;
@@ -282,7 +280,7 @@ int test_init_support(struct unit_module *m, struct gk20a *g, void *args)
if (branches & F_FIFO_SETUP_SW_READY) { if (branches & F_FIFO_SETUP_SW_READY) {
err = nvgpu_fifo_init_support(g); err = nvgpu_fifo_init_support(g);
assert(err == 0); unit_assert(err == 0, goto done);
} }
g->ops.fifo.init_fifo_setup_hw = g->ops.fifo.init_fifo_setup_hw =
@@ -315,9 +313,9 @@ int test_init_support(struct unit_module *m, struct gk20a *g, void *args)
if (branches & F_FIFO_CLEANUP_SW_PBDMA_NULL) { if (branches & F_FIFO_CLEANUP_SW_PBDMA_NULL) {
gops.pbdma.cleanup_sw(g); gops.pbdma.cleanup_sw(g);
} }
assert(err != 0); unit_assert(err != 0, goto done);
} else { } else {
assert(err == 0); unit_assert(err == 0, goto done);
nvgpu_fifo_cleanup_sw_common(g); nvgpu_fifo_cleanup_sw_common(g);
} }

View File

@@ -57,8 +57,6 @@
} while (0) } while (0)
#endif #endif
#define assert(cond) unit_assert(cond, goto done)
#define branches_str test_fifo_flags_str #define branches_str test_fifo_flags_str
#define pruned test_fifo_subtest_pruned #define pruned test_fifo_subtest_pruned
@@ -105,13 +103,15 @@ static bool is_timeout_valid(struct unit_module *m, u32 timeout, u64 ms) {
u64 max_ns = ((1024UL * (u64)pbdma_acquire_timeout_man_max_v()) << u64 max_ns = ((1024UL * (u64)pbdma_acquire_timeout_man_max_v()) <<
pbdma_acquire_timeout_exp_max_v()); pbdma_acquire_timeout_exp_max_v());
assert((timeout & 0x3ff) == unit_assert((timeout & 0x3ff) == (pbdma_acquire_retry_man_2_f() |
(pbdma_acquire_retry_man_2_f() | pbdma_acquire_retry_exp_2_f())); pbdma_acquire_retry_exp_2_f()), goto done);
if (ms == 0) { if (ms == 0) {
assert((timeout & pbdma_acquire_timeout_en_enable_f()) == 0); unit_assert((timeout & pbdma_acquire_timeout_en_enable_f()) ==
0, goto done);
return true; return true;
} else { } else {
assert((timeout & pbdma_acquire_timeout_en_enable_f()) != 0); unit_assert((timeout & pbdma_acquire_timeout_en_enable_f()) !=
0, goto done);
} }
unit_verbose(m, "ms = %llu\n", ms); unit_verbose(m, "ms = %llu\n", ms);
@@ -129,7 +129,7 @@ static bool is_timeout_valid(struct unit_module *m, u32 timeout, u64 ms) {
expected_ns - actual_ns : actual_ns - expected_ns); expected_ns - actual_ns : actual_ns - expected_ns);
unit_verbose(m, "max delta = %llu\n", max_delta); unit_verbose(m, "max delta = %llu\n", max_delta);
unit_verbose(m, "delta = %llu\n", delta); unit_verbose(m, "delta = %llu\n", delta);
assert(delta < max_delta); unit_assert(delta < max_delta, goto done);
return true; return true;
done: done:
@@ -146,16 +146,16 @@ int test_gm20b_pbdma_acquire_val(struct unit_module *m,
int err; int err;
timeout = gm20b_pbdma_acquire_val(0); timeout = gm20b_pbdma_acquire_val(0);
assert(is_timeout_valid(m, timeout, 0)); unit_assert(is_timeout_valid(m, timeout, 0), goto done);
for (i = 0; i < 32; i++) { for (i = 0; i < 32; i++) {
ms = (1ULL << i); ms = (1ULL << i);
timeout = gm20b_pbdma_acquire_val(ms); timeout = gm20b_pbdma_acquire_val(ms);
assert(is_timeout_valid(m, timeout, ms)); unit_assert(is_timeout_valid(m, timeout, ms), goto done);
} }
err = EXPECT_BUG(gm20b_pbdma_acquire_val(U64_MAX)); err = EXPECT_BUG(gm20b_pbdma_acquire_val(U64_MAX));
assert(err != 0); unit_assert(err != 0, goto done);
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
done: done:
@@ -239,21 +239,24 @@ int test_gm20b_pbdma_handle_intr(struct unit_module *m,
recover = gm20b_pbdma_handle_intr(g, pbdma_id, err_notifier, &pbdma_status); recover = gm20b_pbdma_handle_intr(g, pbdma_id, err_notifier, &pbdma_status);
if (branches & F_PBDMA_HANDLE_INTR_0_PENDING) { if (branches & F_PBDMA_HANDLE_INTR_0_PENDING) {
assert(u.stubs.pbdma_handle_intr_0.count > 0); unit_assert(u.stubs.pbdma_handle_intr_0.count > 0,
goto done);
if (branches & F_PBDMA_HANDLE_INTR_0_RECOVER) { if (branches & F_PBDMA_HANDLE_INTR_0_RECOVER) {
assert(recover); unit_assert(recover, goto done);
} }
} }
if (branches & F_PBDMA_HANDLE_INTR_1_PENDING) { if (branches & F_PBDMA_HANDLE_INTR_1_PENDING) {
assert(u.stubs.pbdma_handle_intr_1.count > 0); unit_assert(u.stubs.pbdma_handle_intr_1.count > 0,
goto done);
if (branches & F_PBDMA_HANDLE_INTR_1_RECOVER) { if (branches & F_PBDMA_HANDLE_INTR_1_RECOVER) {
assert(recover); unit_assert(recover, goto done);
} }
} }
if (branches & F_PBDMA_HANDLE_INTR_ERR_NOTIFIER) { if (branches & F_PBDMA_HANDLE_INTR_ERR_NOTIFIER) {
assert(*err_notifier != INVALID_ERR_NOTIFIER); unit_assert(*err_notifier != INVALID_ERR_NOTIFIER,
goto done);
} }
} }
@@ -305,7 +308,8 @@ int test_gm20b_pbdma_handle_intr_0(struct unit_module *m,
int i; int i;
int err; int err;
assert((f->intr.pbdma.device_fatal_0 & pbdma_intr_0_memreq_pending_f()) != 0); unit_assert((f->intr.pbdma.device_fatal_0 &
pbdma_intr_0_memreq_pending_f()) != 0, goto done);
for (branches = 0; branches < BIT(PBDMA_NUM_INTRS); branches++) { for (branches = 0; branches < BIT(PBDMA_NUM_INTRS); branches++) {
@@ -331,45 +335,61 @@ int test_gm20b_pbdma_handle_intr_0(struct unit_module *m,
recover = gm20b_pbdma_handle_intr_0(g, pbdma_id, pbdma_intr_0, &err_notifier); recover = gm20b_pbdma_handle_intr_0(g, pbdma_id, pbdma_intr_0, &err_notifier);
if (pbdma_intr_0 == 0) { if (pbdma_intr_0 == 0) {
assert(!recover); unit_assert(!recover, goto done);
} }
if (pbdma_intr_0 & pbdma_intr_0_memreq_pending_f()) { if (pbdma_intr_0 & pbdma_intr_0_memreq_pending_f()) {
assert(recover); unit_assert(recover, goto done);
} }
if (pbdma_intr_0 & pbdma_intr_0_acquire_pending_f()) { if (pbdma_intr_0 & pbdma_intr_0_acquire_pending_f()) {
if (nvgpu_is_timeouts_enabled(g)) { if (nvgpu_is_timeouts_enabled(g)) {
assert(recover); unit_assert(recover, goto done);
assert(err_notifier != INVALID_ERR_NOTIFIER); unit_assert(err_notifier !=
INVALID_ERR_NOTIFIER, goto done);
} else { } else {
assert(!recover); unit_assert(!recover, goto done);
} }
} }
if (pbdma_intr_0 & pbdma_intr_0_pbentry_pending_f()) { if (pbdma_intr_0 & pbdma_intr_0_pbentry_pending_f()) {
assert(recover); unit_assert(recover, goto done);
assert(nvgpu_readl(g, pbdma_pb_header_r(pbdma_id)) != 0); unit_assert(nvgpu_readl(g,
assert(nvgpu_readl(g, pbdma_method0_r(pbdma_id)) != METHOD_SUBCH5); pbdma_pb_header_r(pbdma_id)) != 0, goto done);
unit_assert(nvgpu_readl(g,
pbdma_method0_r(pbdma_id)) != METHOD_SUBCH5,
goto done);
} }
if (pbdma_intr_0 & pbdma_intr_0_method_pending_f()) { if (pbdma_intr_0 & pbdma_intr_0_method_pending_f()) {
assert(recover); unit_assert(recover, goto done);
assert(nvgpu_readl(g, pbdma_method0_r(pbdma_id)) != METHOD_SUBCH5); unit_assert(nvgpu_readl(g,
pbdma_method0_r(pbdma_id)) != METHOD_SUBCH5,
goto done);
} }
if (pbdma_intr_0 & pbdma_intr_0_pbcrc_pending_f()) { if (pbdma_intr_0 & pbdma_intr_0_pbcrc_pending_f()) {
assert(recover); unit_assert(recover, goto done);
assert(err_notifier != INVALID_ERR_NOTIFIER); unit_assert(err_notifier != INVALID_ERR_NOTIFIER,
goto done);
} }
if (pbdma_intr_0 & pbdma_intr_0_device_pending_f()) { if (pbdma_intr_0 & pbdma_intr_0_device_pending_f()) {
assert(recover); unit_assert(recover, goto done);
assert(nvgpu_readl(g, pbdma_pb_header_r(pbdma_id)) != 0); unit_assert(nvgpu_readl(g,
assert(nvgpu_readl(g, pbdma_method0_r(pbdma_id)) != METHOD_SUBCH5); pbdma_pb_header_r(pbdma_id)) != 0, goto done);
assert(nvgpu_readl(g, pbdma_method1_r(pbdma_id)) == METHOD_NO_SUBCH); unit_assert(nvgpu_readl(g,
assert(nvgpu_readl(g, pbdma_method2_r(pbdma_id)) != METHOD_SUBCH6); pbdma_method0_r(pbdma_id)) != METHOD_SUBCH5,
assert(nvgpu_readl(g, pbdma_method3_r(pbdma_id)) != METHOD_SUBCH7); goto done);
unit_assert(nvgpu_readl(g,
pbdma_method1_r(pbdma_id)) == METHOD_NO_SUBCH,
goto done);
unit_assert(nvgpu_readl(g,
pbdma_method2_r(pbdma_id)) != METHOD_SUBCH6,
goto done);
unit_assert(nvgpu_readl(g,
pbdma_method3_r(pbdma_id)) != METHOD_SUBCH7,
goto done);
} }
} }
@@ -382,7 +402,7 @@ int test_gm20b_pbdma_handle_intr_0(struct unit_module *m,
pbdma_intr_0_device_pending_f(), pbdma_intr_0_device_pending_f(),
&err_notifier) &err_notifier)
); );
assert(err != 0); unit_assert(err != 0, goto done);
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
done: done:
@@ -404,7 +424,8 @@ int test_gm20b_pbdma_read_data(struct unit_module *m,
for (pbdma_id = 0; pbdma_id < f->num_pbdma; pbdma_id++) { for (pbdma_id = 0; pbdma_id < f->num_pbdma; pbdma_id++) {
u32 pattern = (0xbeef << 16) + pbdma_id; u32 pattern = (0xbeef << 16) + pbdma_id;
nvgpu_writel(g, pbdma_hdr_shadow_r(pbdma_id), pattern); nvgpu_writel(g, pbdma_hdr_shadow_r(pbdma_id), pattern);
assert(gm20b_pbdma_read_data(g, pbdma_id) == pattern); unit_assert(gm20b_pbdma_read_data(g, pbdma_id) == pattern,
goto done);
} }
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
@@ -423,10 +444,10 @@ int test_gm20b_pbdma_intr_descs(struct unit_module *m,
u32 fatal_0 = gm20b_pbdma_device_fatal_0_intr_descs(); u32 fatal_0 = gm20b_pbdma_device_fatal_0_intr_descs();
u32 restartable_0 = gm20b_pbdma_restartable_0_intr_descs(); u32 restartable_0 = gm20b_pbdma_restartable_0_intr_descs();
assert(fatal_0 != 0); unit_assert(fatal_0 != 0, goto done);
assert(restartable_0 != 0); unit_assert(restartable_0 != 0, goto done);
assert((intr_descs & fatal_0) == fatal_0); unit_assert((intr_descs & fatal_0) == fatal_0, goto done);
assert((intr_descs & restartable_0) == restartable_0); unit_assert((intr_descs & restartable_0) == restartable_0, goto done);
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
done: done:
@@ -443,8 +464,9 @@ int test_gm20b_pbdma_format_gpfifo_entry(struct unit_module *m,
memset(&gpfifo_entry, 0, sizeof(gpfifo_entry)); memset(&gpfifo_entry, 0, sizeof(gpfifo_entry));
gm20b_pbdma_format_gpfifo_entry(g, &gpfifo_entry, pb_gpu_va, method_size); gm20b_pbdma_format_gpfifo_entry(g, &gpfifo_entry, pb_gpu_va, method_size);
assert(gpfifo_entry.entry0 == 0xdeadbeef); unit_assert(gpfifo_entry.entry0 == 0xdeadbeef, goto done);
assert(gpfifo_entry.entry1 == (0x12 | pbdma_gp_entry1_length_f(method_size))); unit_assert(gpfifo_entry.entry1 == (0x12 |
pbdma_gp_entry1_length_f(method_size)), goto done);
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
done: done:
@@ -462,16 +484,17 @@ int test_gm20b_pbdma_get_gp_base(struct unit_module *m,
int err; int err;
err = EXPECT_BUG(gm20b_pbdma_get_gp_base_hi(gpfifo_base, 0)); err = EXPECT_BUG(gm20b_pbdma_get_gp_base_hi(gpfifo_base, 0));
assert(err != 0); unit_assert(err != 0, goto done);
for (n = 1; n < 16; n++) { for (n = 1; n < 16; n++) {
base_lo = gm20b_pbdma_get_gp_base(gpfifo_base); base_lo = gm20b_pbdma_get_gp_base(gpfifo_base);
base_hi = gm20b_pbdma_get_gp_base_hi(gpfifo_base, 1 << n); base_hi = gm20b_pbdma_get_gp_base_hi(gpfifo_base, 1 << n);
assert(base_lo == pbdma_gp_base_offset_f( unit_assert(base_lo == pbdma_gp_base_offset_f(
u64_lo32(gpfifo_base >> pbdma_gp_base_rsvd_s()))); u64_lo32(gpfifo_base >> pbdma_gp_base_rsvd_s())),
assert(base_hi == goto done);
unit_assert(base_hi ==
(pbdma_gp_base_hi_offset_f(u64_hi32(gpfifo_base)) | (pbdma_gp_base_hi_offset_f(u64_hi32(gpfifo_base)) |
pbdma_gp_base_hi_limit2_f(n))); pbdma_gp_base_hi_limit2_f(n)), goto done);
} }
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
@@ -486,10 +509,10 @@ int test_gm20b_pbdma_get_fc_subdevice(struct unit_module *m,
{ {
int ret = UNIT_FAIL; int ret = UNIT_FAIL;
assert(gm20b_pbdma_get_fc_subdevice() == unit_assert(gm20b_pbdma_get_fc_subdevice() ==
(pbdma_subdevice_id_f(PBDMA_SUBDEVICE_ID) | (pbdma_subdevice_id_f(PBDMA_SUBDEVICE_ID) |
pbdma_subdevice_status_active_f() | pbdma_subdevice_status_active_f() |
pbdma_subdevice_channel_dma_enable_f())); pbdma_subdevice_channel_dma_enable_f()), goto done);
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
done: done:
@@ -501,8 +524,8 @@ int test_gm20b_pbdma_get_ctrl_hce_priv_mode_yes(struct unit_module *m,
{ {
int ret = UNIT_FAIL; int ret = UNIT_FAIL;
assert(gm20b_pbdma_get_ctrl_hce_priv_mode_yes() == unit_assert(gm20b_pbdma_get_ctrl_hce_priv_mode_yes() ==
pbdma_hce_ctrl_hce_priv_mode_yes_f()); pbdma_hce_ctrl_hce_priv_mode_yes_f(), goto done);
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
done: done:
@@ -520,28 +543,36 @@ int test_gm20b_pbdma_get_userd(struct unit_module *m,
u32 mask = 0xaaaa; u32 mask = 0xaaaa;
int err; int err;
assert(gm20b_pbdma_get_userd_addr(addr_lo) == pbdma_userd_addr_f(addr_lo)); unit_assert(gm20b_pbdma_get_userd_addr(addr_lo) ==
assert(gm20b_pbdma_get_userd_hi_addr(addr_hi) == pbdma_userd_hi_addr_f(addr_hi)); pbdma_userd_addr_f(addr_lo), goto done);
unit_assert(gm20b_pbdma_get_userd_hi_addr(addr_hi) ==
pbdma_userd_hi_addr_f(addr_hi), goto done);
mem.aperture = APERTURE_INVALID; mem.aperture = APERTURE_INVALID;
err = EXPECT_BUG(mask = gm20b_pbdma_get_userd_aperture_mask(g, &mem)); err = EXPECT_BUG(mask = gm20b_pbdma_get_userd_aperture_mask(g, &mem));
assert(err != 0); unit_assert(err != 0, goto done);
assert(mask == 0xaaaa); unit_assert(mask == 0xaaaa, goto done);
if (nvgpu_is_enabled(g, NVGPU_MM_HONORS_APERTURE)) { if (nvgpu_is_enabled(g, NVGPU_MM_HONORS_APERTURE)) {
mem.aperture = APERTURE_SYSMEM; mem.aperture = APERTURE_SYSMEM;
assert(gm20b_pbdma_get_userd_aperture_mask(g, &mem) == pbdma_userd_target_sys_mem_ncoh_f()); unit_assert(gm20b_pbdma_get_userd_aperture_mask(g, &mem) ==
pbdma_userd_target_sys_mem_ncoh_f(), goto done);
mem.aperture = APERTURE_SYSMEM_COH; mem.aperture = APERTURE_SYSMEM_COH;
assert(gm20b_pbdma_get_userd_aperture_mask(g, &mem) == pbdma_userd_target_sys_mem_coh_f()); unit_assert(gm20b_pbdma_get_userd_aperture_mask(g, &mem) ==
pbdma_userd_target_sys_mem_coh_f(), goto done);
mem.aperture = APERTURE_VIDMEM; mem.aperture = APERTURE_VIDMEM;
assert(gm20b_pbdma_get_userd_aperture_mask(g, &mem) == pbdma_userd_target_vid_mem_f()); unit_assert(gm20b_pbdma_get_userd_aperture_mask(g, &mem) ==
pbdma_userd_target_vid_mem_f(), goto done);
} else { } else {
mem.aperture = APERTURE_SYSMEM; mem.aperture = APERTURE_SYSMEM;
assert(gm20b_pbdma_get_userd_aperture_mask(g, &mem) == pbdma_userd_target_vid_mem_f()); unit_assert(gm20b_pbdma_get_userd_aperture_mask(g, &mem) ==
pbdma_userd_target_vid_mem_f(), goto done);
mem.aperture = APERTURE_SYSMEM_COH; mem.aperture = APERTURE_SYSMEM_COH;
assert(gm20b_pbdma_get_userd_aperture_mask(g, &mem) == pbdma_userd_target_vid_mem_f()); unit_assert(gm20b_pbdma_get_userd_aperture_mask(g, &mem) ==
pbdma_userd_target_vid_mem_f(), goto done);
mem.aperture = APERTURE_VIDMEM; mem.aperture = APERTURE_VIDMEM;
assert(gm20b_pbdma_get_userd_aperture_mask(g, &mem) == pbdma_userd_target_vid_mem_f()); unit_assert(gm20b_pbdma_get_userd_aperture_mask(g, &mem) ==
pbdma_userd_target_vid_mem_f(), goto done);
} }
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -57,15 +57,13 @@
} while (0) } while (0)
#endif #endif
#define assert(cond) unit_assert(cond, goto done)
int test_gp10b_pbdma_get_signature(struct unit_module *m, int test_gp10b_pbdma_get_signature(struct unit_module *m,
struct gk20a *g, void *args) struct gk20a *g, void *args)
{ {
int ret = UNIT_FAIL; int ret = UNIT_FAIL;
assert(gp10b_pbdma_get_signature(g) == unit_assert(gp10b_pbdma_get_signature(g) ==
(g->ops.get_litter_value(g, GPU_LIT_GPFIFO_CLASS) | (g->ops.get_litter_value(g, GPU_LIT_GPFIFO_CLASS) |
pbdma_signature_sw_zero_f())); pbdma_signature_sw_zero_f()), goto done);
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
done: done:
@@ -85,9 +83,9 @@ int test_gp10b_pbdma_get_fc_runlist_timeslice(struct unit_module *m,
u32 timescale = (timeslice >> 12) & 0xF; u32 timescale = (timeslice >> 12) & 0xF;
bool enabled = ((timeslice & pbdma_runlist_timeslice_enable_true_f()) != 0); bool enabled = ((timeslice & pbdma_runlist_timeslice_enable_true_f()) != 0);
assert(timeout <= RL_MAX_TIMESLICE_TIMEOUT); unit_assert(timeout <= RL_MAX_TIMESLICE_TIMEOUT, goto done);
assert(timescale <= RL_MAX_TIMESLICE_SCALE); unit_assert(timescale <= RL_MAX_TIMESLICE_SCALE, goto done);
assert(enabled); unit_assert(enabled, goto done);
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
done: done:
@@ -99,8 +97,8 @@ int test_gp10b_pbdma_get_config_auth_level_privileged(struct unit_module *m,
{ {
int ret = UNIT_FAIL; int ret = UNIT_FAIL;
assert(gp10b_pbdma_get_config_auth_level_privileged() == unit_assert(gp10b_pbdma_get_config_auth_level_privileged() ==
pbdma_config_auth_level_privileged_f()); pbdma_config_auth_level_privileged_f(), goto done);
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
done: done:

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -57,8 +57,6 @@
} while (0) } while (0)
#endif #endif
#define assert(cond) unit_assert(cond, goto done)
#define branches_str test_fifo_flags_str #define branches_str test_fifo_flags_str
#define pruned test_fifo_subtest_pruned #define pruned test_fifo_subtest_pruned
@@ -75,15 +73,15 @@ int test_gv11b_pbdma_setup_hw(struct unit_module *m,
u32 timeout; u32 timeout;
num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA); num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA);
assert(num_pbdma > 0); unit_assert(num_pbdma > 0, goto done);
gv11b_pbdma_setup_hw(g); gv11b_pbdma_setup_hw(g);
if (nvgpu_platform_is_silicon(g)) { if (nvgpu_platform_is_silicon(g)) {
for (pbdma_id = 0; pbdma_id < num_pbdma; pbdma_id++) for (pbdma_id = 0; pbdma_id < num_pbdma; pbdma_id++)
{ {
timeout = nvgpu_readl(g, pbdma_timeout_r(pbdma_id)); timeout = nvgpu_readl(g, pbdma_timeout_r(pbdma_id));
assert(get_field(timeout, pbdma_timeout_period_m()) == unit_assert(get_field(timeout, pbdma_timeout_period_m()) ==
pbdma_timeout_period_max_f()); pbdma_timeout_period_max_f(), goto done);
} }
} }
@@ -103,7 +101,7 @@ int test_gv11b_pbdma_intr_enable(struct unit_module *m,
u32 i; u32 i;
num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA); num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA);
assert(num_pbdma > 0); unit_assert(num_pbdma > 0, goto done);
for (i = 0 ; i < 2; i++) { for (i = 0 ; i < 2; i++) {
enable = (i > 0); enable = (i > 0);
@@ -127,15 +125,16 @@ int test_gv11b_pbdma_intr_enable(struct unit_module *m,
u32 intr_en_1 = nvgpu_readl(g, pbdma_intr_en_1_r(pbdma_id)); u32 intr_en_1 = nvgpu_readl(g, pbdma_intr_en_1_r(pbdma_id));
if (enable) { if (enable) {
assert(intr_en_0 == pattern); unit_assert(intr_en_0 == pattern, goto done);
assert(intr_en_1 == (pattern & unit_assert(intr_en_1 == (pattern &
~pbdma_intr_stall_1_hce_illegal_op_enabled_f())); ~pbdma_intr_stall_1_hce_illegal_op_enabled_f()),
goto done);
} else { } else {
assert(intr_en_0 == 0); unit_assert(intr_en_0 == 0, goto done);
assert(intr_en_1 == 0); unit_assert(intr_en_1 == 0, goto done);
} }
assert(intr_0 != 0); unit_assert(intr_0 != 0, goto done);
assert(intr_1 != 0); unit_assert(intr_1 != 0, goto done);
} }
} }
@@ -177,7 +176,8 @@ int test_gv11b_pbdma_handle_intr_0(struct unit_module *m,
bool recover; bool recover;
int i; int i;
assert((f->intr.pbdma.device_fatal_0 & pbdma_intr_0_memreq_pending_f()) != 0); unit_assert((f->intr.pbdma.device_fatal_0 &
pbdma_intr_0_memreq_pending_f()) != 0, goto done);
for (branches = 0; branches < BIT(PBDMA_NUM_INTRS_0); branches++) { for (branches = 0; branches < BIT(PBDMA_NUM_INTRS_0); branches++) {
@@ -198,22 +198,24 @@ int test_gv11b_pbdma_handle_intr_0(struct unit_module *m,
recover = gv11b_pbdma_handle_intr_0(g, pbdma_id, pbdma_intr_0, &err_notifier); recover = gv11b_pbdma_handle_intr_0(g, pbdma_id, pbdma_intr_0, &err_notifier);
if (pbdma_intr_0 == 0) { if (pbdma_intr_0 == 0) {
assert(!recover); unit_assert(!recover, goto done);
} }
if (pbdma_intr_0 & pbdma_intr_0_memreq_pending_f()) { if (pbdma_intr_0 & pbdma_intr_0_memreq_pending_f()) {
assert(recover); unit_assert(recover, goto done);
} }
if (pbdma_intr_0 & pbdma_intr_0_clear_faulted_error_pending_f()) { if (pbdma_intr_0 & pbdma_intr_0_clear_faulted_error_pending_f()) {
assert(recover); unit_assert(recover, goto done);
assert(nvgpu_readl(g, pbdma_method_r(pbdma_id, 0)) != 0); unit_assert(nvgpu_readl(g,
pbdma_method_r(pbdma_id, 0)) != 0, goto done);
} else { } else {
assert(nvgpu_readl(g, pbdma_method_r(pbdma_id, 0)) == 0); unit_assert(nvgpu_readl(g,
pbdma_method_r(pbdma_id, 0)) == 0, goto done);
} }
if (pbdma_intr_0 & pbdma_intr_0_eng_reset_pending_f()) { if (pbdma_intr_0 & pbdma_intr_0_eng_reset_pending_f()) {
assert(recover); unit_assert(recover, goto done);
} }
} }
@@ -283,15 +285,15 @@ int test_gv11b_pbdma_handle_intr_1(struct unit_module *m,
recover = gv11b_pbdma_handle_intr_1(g, pbdma_id, pbdma_intr_1, &err_notifier); recover = gv11b_pbdma_handle_intr_1(g, pbdma_id, pbdma_intr_1, &err_notifier);
if (pbdma_intr_1 == 0) { if (pbdma_intr_1 == 0) {
assert(!recover); unit_assert(!recover, goto done);
} }
if (((branches & F_PBDMA_INTR_1_CTXNOTVALID_IN) && if (((branches & F_PBDMA_INTR_1_CTXNOTVALID_IN) &&
(branches & F_PBDMA_INTR_1_CTXNOTVALID_READ)) || (branches & F_PBDMA_INTR_1_CTXNOTVALID_READ)) ||
(branches & F_PBDMA_INTR_1_HCE)) { (branches & F_PBDMA_INTR_1_HCE)) {
assert(recover); unit_assert(recover, goto done);
} else { } else {
assert(!recover); unit_assert(!recover, goto done);
} }
} }
@@ -314,8 +316,9 @@ int test_gv11b_pbdma_intr_descs(struct unit_module *m,
f->intr.pbdma.restartable_0); f->intr.pbdma.restartable_0);
u32 channel_fatal_0 = gv11b_pbdma_channel_fatal_0_intr_descs(); u32 channel_fatal_0 = gv11b_pbdma_channel_fatal_0_intr_descs();
assert(channel_fatal_0 != 0); unit_assert(channel_fatal_0 != 0, goto done);
assert((intr_descs & channel_fatal_0) == channel_fatal_0); unit_assert((intr_descs & channel_fatal_0) == channel_fatal_0,
goto done);
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
done: done:
@@ -328,17 +331,17 @@ int test_gv11b_pbdma_get_fc(struct unit_module *m,
{ {
int ret = UNIT_FAIL; int ret = UNIT_FAIL;
assert(gv11b_pbdma_get_fc_pb_header() == unit_assert(gv11b_pbdma_get_fc_pb_header() ==
(pbdma_pb_header_method_zero_f() | (pbdma_pb_header_method_zero_f() |
pbdma_pb_header_subchannel_zero_f() | pbdma_pb_header_subchannel_zero_f() |
pbdma_pb_header_level_main_f() | pbdma_pb_header_level_main_f() |
pbdma_pb_header_first_true_f() | pbdma_pb_header_first_true_f() |
pbdma_pb_header_type_inc_f())); pbdma_pb_header_type_inc_f()), goto done);
assert(gv11b_pbdma_get_fc_target() == unit_assert(gv11b_pbdma_get_fc_target() ==
(pbdma_target_engine_sw_f() | (pbdma_target_engine_sw_f() |
pbdma_target_eng_ctx_valid_true_f() | pbdma_target_eng_ctx_valid_true_f() |
pbdma_target_ce_ctx_valid_true_f())); pbdma_target_ce_ctx_valid_true_f()), goto done);
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
done: done:
@@ -352,8 +355,8 @@ int test_gv11b_pbdma_set_channel_info_veid(struct unit_module *m,
u32 subctx_id; u32 subctx_id;
for (subctx_id = 0; subctx_id < 64; subctx_id ++) { for (subctx_id = 0; subctx_id < 64; subctx_id ++) {
assert(gv11b_pbdma_set_channel_info_veid(subctx_id) == unit_assert(gv11b_pbdma_set_channel_info_veid(subctx_id) ==
pbdma_set_channel_info_veid_f(subctx_id)); pbdma_set_channel_info_veid_f(subctx_id), goto done);
} }
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
@@ -366,8 +369,8 @@ int test_gv11b_pbdma_config_userd_writeback_enable(struct unit_module *m,
{ {
int ret = UNIT_FAIL; int ret = UNIT_FAIL;
assert(gv11b_pbdma_config_userd_writeback_enable() == unit_assert(gv11b_pbdma_config_userd_writeback_enable() ==
pbdma_config_userd_writeback_enable_f()); pbdma_config_userd_writeback_enable_f(), goto done);
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
done: done:

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -55,8 +55,6 @@
} while (0) } while (0)
#endif #endif
#define assert(cond) unit_assert(cond, goto done)
#define pruned test_fifo_subtest_pruned #define pruned test_fifo_subtest_pruned
#define branches_str test_fifo_flags_str #define branches_str test_fifo_flags_str
@@ -105,7 +103,7 @@ int test_pbdma_setup_sw(struct unit_module *m,
kmem_fi = nvgpu_kmem_get_fault_injection(); kmem_fi = nvgpu_kmem_get_fault_injection();
err = test_fifo_setup_gv11b_reg_space(m, g); err = test_fifo_setup_gv11b_reg_space(m, g);
assert(err == 0); unit_assert(err == 0, goto done);
gv11b_init_hal(g); gv11b_init_hal(g);
@@ -142,19 +140,22 @@ int test_pbdma_setup_sw(struct unit_module *m,
err = nvgpu_pbdma_setup_sw(g); err = nvgpu_pbdma_setup_sw(g);
if (branches & fail) { if (branches & fail) {
assert(err != 0); unit_assert(err != 0, goto done);
assert(f->pbdma_map == NULL); unit_assert(f->pbdma_map == NULL, goto done);
} else { } else {
assert(err == 0); unit_assert(err == 0, goto done);
assert(f->pbdma_map != NULL); unit_assert(f->pbdma_map != NULL, goto done);
assert(f->intr.pbdma.device_fatal_0 == unit_assert(f->intr.pbdma.device_fatal_0 ==
(branches & F_PBDMA_SETUP_SW_DEVICE_FATAL_0)); (branches & F_PBDMA_SETUP_SW_DEVICE_FATAL_0),
assert(f->intr.pbdma.channel_fatal_0 == goto done);
(branches & F_PBDMA_SETUP_SW_CHANNEL_FATAL_0)); unit_assert(f->intr.pbdma.channel_fatal_0 ==
assert(f->intr.pbdma.restartable_0 == (branches & F_PBDMA_SETUP_SW_CHANNEL_FATAL_0),
(branches & F_PBDMA_SETUP_SW_RESTARTABLE_0)); goto done);
unit_assert(f->intr.pbdma.restartable_0 ==
(branches & F_PBDMA_SETUP_SW_RESTARTABLE_0),
goto done);
nvgpu_pbdma_cleanup_sw(g); nvgpu_pbdma_cleanup_sw(g);
assert(f->pbdma_map == NULL); unit_assert(f->pbdma_map == NULL, goto done);
} }
} }
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
@@ -188,17 +189,18 @@ int test_pbdma_find_for_runlist(struct unit_module *m,
found = nvgpu_pbdma_find_for_runlist(g, runlist_id, &pbdma_id); found = nvgpu_pbdma_find_for_runlist(g, runlist_id, &pbdma_id);
if (active) { if (active) {
assert(found); unit_assert(found, goto done);
assert(pbdma_id != U32_MAX); unit_assert(pbdma_id != U32_MAX, goto done);
assert((f->pbdma_map[pbdma_id] & BIT(runlist_id)) != 0); unit_assert((f->pbdma_map[pbdma_id] &
BIT(runlist_id)) != 0, goto done);
} else { } else {
assert(!found); unit_assert(!found, goto done);
assert(pbdma_id == U32_MAX); unit_assert(pbdma_id == U32_MAX, goto done);
} }
} }
f->num_pbdma = 0; f->num_pbdma = 0;
assert(!nvgpu_pbdma_find_for_runlist(g, 0, &pbdma_id)); unit_assert(!nvgpu_pbdma_find_for_runlist(g, 0, &pbdma_id), goto done);
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
@@ -219,29 +221,39 @@ int test_pbdma_status(struct unit_module *m,
pbdma_status.chsw_status <= NVGPU_PBDMA_CHSW_STATUS_SWITCH; pbdma_status.chsw_status <= NVGPU_PBDMA_CHSW_STATUS_SWITCH;
pbdma_status.chsw_status++) pbdma_status.chsw_status++)
{ {
assert(nvgpu_pbdma_status_is_chsw_switch(&pbdma_status) == unit_assert(nvgpu_pbdma_status_is_chsw_switch(&pbdma_status) ==
(pbdma_status.chsw_status == NVGPU_PBDMA_CHSW_STATUS_SWITCH)); (pbdma_status.chsw_status ==
assert(nvgpu_pbdma_status_is_chsw_load(&pbdma_status) == NVGPU_PBDMA_CHSW_STATUS_SWITCH), goto done);
(pbdma_status.chsw_status == NVGPU_PBDMA_CHSW_STATUS_LOAD)); unit_assert(nvgpu_pbdma_status_is_chsw_load(&pbdma_status) ==
assert(nvgpu_pbdma_status_is_chsw_save(&pbdma_status) == (pbdma_status.chsw_status ==
(pbdma_status.chsw_status == NVGPU_PBDMA_CHSW_STATUS_SAVE)); NVGPU_PBDMA_CHSW_STATUS_LOAD), goto done);
assert(nvgpu_pbdma_status_is_chsw_valid(&pbdma_status) == unit_assert(nvgpu_pbdma_status_is_chsw_save(&pbdma_status) ==
(pbdma_status.chsw_status == NVGPU_PBDMA_CHSW_STATUS_VALID)); (pbdma_status.chsw_status ==
NVGPU_PBDMA_CHSW_STATUS_SAVE), goto done);
unit_assert(nvgpu_pbdma_status_is_chsw_valid(&pbdma_status) ==
(pbdma_status.chsw_status ==
NVGPU_PBDMA_CHSW_STATUS_VALID), goto done);
} }
pbdma_status.id_type = PBDMA_STATUS_ID_TYPE_CHID; pbdma_status.id_type = PBDMA_STATUS_ID_TYPE_CHID;
assert(nvgpu_pbdma_status_is_id_type_tsg(&pbdma_status) == false); unit_assert(nvgpu_pbdma_status_is_id_type_tsg(&pbdma_status) == false,
goto done);
pbdma_status.id_type = PBDMA_STATUS_ID_TYPE_TSGID; pbdma_status.id_type = PBDMA_STATUS_ID_TYPE_TSGID;
assert(nvgpu_pbdma_status_is_id_type_tsg(&pbdma_status) == true); unit_assert(nvgpu_pbdma_status_is_id_type_tsg(&pbdma_status) == true,
goto done);
pbdma_status.id_type = PBDMA_STATUS_ID_TYPE_INVALID; pbdma_status.id_type = PBDMA_STATUS_ID_TYPE_INVALID;
assert(nvgpu_pbdma_status_is_id_type_tsg(&pbdma_status) == false); unit_assert(nvgpu_pbdma_status_is_id_type_tsg(&pbdma_status) == false,
goto done);
pbdma_status.next_id_type = PBDMA_STATUS_ID_TYPE_CHID; pbdma_status.next_id_type = PBDMA_STATUS_ID_TYPE_CHID;
assert(nvgpu_pbdma_status_is_next_id_type_tsg(&pbdma_status) == false); unit_assert(nvgpu_pbdma_status_is_next_id_type_tsg(&pbdma_status) ==
false, goto done);
pbdma_status.next_id_type = PBDMA_STATUS_ID_TYPE_TSGID; pbdma_status.next_id_type = PBDMA_STATUS_ID_TYPE_TSGID;
assert(nvgpu_pbdma_status_is_next_id_type_tsg(&pbdma_status) == true); unit_assert(nvgpu_pbdma_status_is_next_id_type_tsg(&pbdma_status) ==
true, goto done);
pbdma_status.next_id_type = PBDMA_STATUS_ID_TYPE_INVALID; pbdma_status.next_id_type = PBDMA_STATUS_ID_TYPE_INVALID;
assert(nvgpu_pbdma_status_is_next_id_type_tsg(&pbdma_status) == false); unit_assert(nvgpu_pbdma_status_is_next_id_type_tsg(&pbdma_status) ==
false, goto done);
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -56,8 +56,6 @@
} while (0) } while (0)
#endif #endif
#define assert(cond) unit_assert(cond, goto done)
#define MAX_STUB 2 #define MAX_STUB 2
struct stub_ctx { struct stub_ctx {
@@ -127,7 +125,7 @@ int test_preempt(struct unit_module *m, struct gk20a *g, void *args)
ch = nvgpu_channel_open_new(g, runlist_id, ch = nvgpu_channel_open_new(g, runlist_id,
privileged, getpid(), getpid()); privileged, getpid(), getpid());
assert(ch != NULL); unit_assert(ch != NULL, goto done);
g->ops.fifo.preempt_tsg = stub_fifo_preempt_tsg; g->ops.fifo.preempt_tsg = stub_fifo_preempt_tsg;
g->ops.fifo.preempt_channel = stub_fifo_preempt_channel; g->ops.fifo.preempt_channel = stub_fifo_preempt_channel;
@@ -142,12 +140,12 @@ int test_preempt(struct unit_module *m, struct gk20a *g, void *args)
NVGPU_INVALID_TSG_ID : 0; NVGPU_INVALID_TSG_ID : 0;
err = nvgpu_preempt_channel(g, ch); err = nvgpu_preempt_channel(g, ch);
assert(err == 0); unit_assert(err == 0, goto done);
if (branches & F_PREEMPT_CHANNEL) { if (branches & F_PREEMPT_CHANNEL) {
assert(stub[0].chid == ch->chid); unit_assert(stub[0].chid == ch->chid, goto done);
} else { } else {
assert(stub[0].tsgid == ch->tsgid); unit_assert(stub[0].tsgid == ch->tsgid, goto done);
} }
} }
@@ -202,7 +200,7 @@ int test_preempt_poll_tsg_on_pbdma(struct unit_module *m, struct gk20a *g,
u32 prune = F_PREEMPT_POLL_PBDMA_NULL | F_PREEMPT_POLL_TSG_NULL; u32 prune = F_PREEMPT_POLL_PBDMA_NULL | F_PREEMPT_POLL_TSG_NULL;
tsg = nvgpu_tsg_open(g, getpid()); tsg = nvgpu_tsg_open(g, getpid());
assert(tsg != NULL); unit_assert(tsg != NULL, goto done);
tsg->runlist_id = 0; tsg->runlist_id = 0;
for (branches = 0U; branches < F_PREEMPT_POLL_LAST; for (branches = 0U; branches < F_PREEMPT_POLL_LAST;
@@ -230,11 +228,13 @@ int test_preempt_poll_tsg_on_pbdma(struct unit_module *m, struct gk20a *g,
} }
if (branches & F_PREEMPT_POLL_TSG_NULL) { if (branches & F_PREEMPT_POLL_TSG_NULL) {
assert(stub[0].tsgid == NVGPU_INVALID_TSG_ID); unit_assert(stub[0].tsgid == NVGPU_INVALID_TSG_ID,
goto done);
} else if (!(branches & F_PREEMPT_POLL_PBDMA_NULL)) { } else if (!(branches & F_PREEMPT_POLL_PBDMA_NULL)) {
assert(stub[0].tsgid == 0); unit_assert(stub[0].tsgid == 0, goto done);
assert(stub[0].pbdma_id == unit_assert(stub[0].pbdma_id ==
nvgpu_ffs(f->runlist_info[0]->pbdma_bitmask)); nvgpu_ffs(f->runlist_info[0]->pbdma_bitmask),
goto done);
} }
} }
@@ -254,7 +254,7 @@ int test_preempt_get_timeout(struct unit_module *m, struct gk20a *g, void *args)
int ret = UNIT_FAIL; int ret = UNIT_FAIL;
timeout = nvgpu_preempt_get_timeout(g); timeout = nvgpu_preempt_get_timeout(g);
assert(timeout == 0U); unit_assert(timeout == 0U, goto done);
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
done: done:

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -38,8 +38,6 @@
#include "../../nvgpu-fifo-common.h" #include "../../nvgpu-fifo-common.h"
#include "nvgpu-ramfc-gp10b.h" #include "nvgpu-ramfc-gp10b.h"
#define assert(cond) unit_assert(cond, goto done)
struct stub_ctx { struct stub_ctx {
u32 addr_lo; u32 addr_lo;
u32 addr_hi; u32 addr_hi;
@@ -85,7 +83,7 @@ int test_gp10b_ramfc_commit_userd(struct unit_module *m, struct gk20a *g,
/* Aperture should be fixed = SYSMEM */ /* Aperture should be fixed = SYSMEM */
nvgpu_set_enabled(g, NVGPU_MM_HONORS_APERTURE, true); nvgpu_set_enabled(g, NVGPU_MM_HONORS_APERTURE, true);
err = nvgpu_alloc_inst_block(g, &ch.inst_block); err = nvgpu_alloc_inst_block(g, &ch.inst_block);
assert(err == 0); unit_assert(err == 0, goto done);
ch.g = g; ch.g = g;
ch.chid = 0; ch.chid = 0;
@@ -93,14 +91,14 @@ int test_gp10b_ramfc_commit_userd(struct unit_module *m, struct gk20a *g,
USERD_IOVA_ADDR_LO) << ram_userd_base_shift_v(); USERD_IOVA_ADDR_LO) << ram_userd_base_shift_v();
gp10b_ramfc_commit_userd(&ch); gp10b_ramfc_commit_userd(&ch);
assert(stub[0].addr_lo == USERD_IOVA_ADDR_LO); unit_assert(stub[0].addr_lo == USERD_IOVA_ADDR_LO, goto done);
assert(stub[0].addr_hi == (USERD_IOVA_ADDR_HI) << unit_assert(stub[0].addr_hi == (USERD_IOVA_ADDR_HI) <<
ram_userd_base_shift_v()); ram_userd_base_shift_v(), goto done);
assert(nvgpu_mem_rd32(g, &ch.inst_block, unit_assert(nvgpu_mem_rd32(g, &ch.inst_block,
ram_in_ramfc_w() + ram_fc_userd_w()) == ram_in_ramfc_w() + ram_fc_userd_w()) ==
pbdma_userd_target_sys_mem_ncoh_f()); pbdma_userd_target_sys_mem_ncoh_f(), goto done);
assert(nvgpu_mem_rd32(g, &ch.inst_block, unit_assert(nvgpu_mem_rd32(g, &ch.inst_block, ram_in_ramfc_w() +
ram_in_ramfc_w() + ram_fc_userd_hi_w()) == 1U); ram_fc_userd_hi_w()) == 1U, goto done);
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
done: done:

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -48,8 +48,6 @@
} while (0) } while (0)
#endif #endif
#define assert(cond) unit_assert(cond, goto done)
#define branches_str test_fifo_flags_str #define branches_str test_fifo_flags_str
#define pruned test_fifo_subtest_pruned #define pruned test_fifo_subtest_pruned
@@ -179,7 +177,7 @@ int test_gv11b_ramfc_setup(struct unit_module *m, struct gk20a *g, void *args)
/* Aperture should be fixed = SYSMEM */ /* Aperture should be fixed = SYSMEM */
nvgpu_set_enabled(g, NVGPU_MM_HONORS_APERTURE, true); nvgpu_set_enabled(g, NVGPU_MM_HONORS_APERTURE, true);
err = nvgpu_alloc_inst_block(g, &ch.inst_block); err = nvgpu_alloc_inst_block(g, &ch.inst_block);
assert(err == 0); unit_assert(err == 0, goto done);
ch.g = g; ch.g = g;
ch.subctx_id = 1; ch.subctx_id = 1;
@@ -196,14 +194,14 @@ int test_gv11b_ramfc_setup(struct unit_module *m, struct gk20a *g, void *args)
true : false; true : false;
err = gv11b_ramfc_setup(&ch, 0U, 0U, 0ULL, 0U); err = gv11b_ramfc_setup(&ch, 0U, 0U, 0ULL, 0U);
assert(err == 0); unit_assert(err == 0, goto done);
assert(nvgpu_mem_rd32(g, &ch.inst_block, unit_assert(nvgpu_mem_rd32(g, &ch.inst_block,
ram_fc_config_w()) == 5U); ram_fc_config_w()) == 5U, goto done);
if (branches & F_RAMFC_SETUP_PRIVILEDGED_CH) { if (branches & F_RAMFC_SETUP_PRIVILEDGED_CH) {
assert(global_count == 15U); unit_assert(global_count == 15U, goto done);
} else { } else {
assert(global_count == 13U); unit_assert(global_count == 13U, goto done);
} }
} }
@@ -231,13 +229,14 @@ int test_gv11b_ramfc_capture_ram_dump(struct unit_module *m,
g->ops.ramin.alloc_size = gk20a_ramin_alloc_size; g->ops.ramin.alloc_size = gk20a_ramin_alloc_size;
err = nvgpu_alloc_inst_block(g, &ch.inst_block); err = nvgpu_alloc_inst_block(g, &ch.inst_block);
assert(err == 0); unit_assert(err == 0, goto done);
nvgpu_memset(g, &ch.inst_block, 0U, 0xa5U, 256U); nvgpu_memset(g, &ch.inst_block, 0U, 0xa5U, 256U);
gv11b_ramfc_capture_ram_dump(g, &ch, &info); gv11b_ramfc_capture_ram_dump(g, &ch, &info);
assert(info.inst.pb_top_level_get == 0xa5a5a5a5a5a5a5a5); unit_assert(info.inst.pb_top_level_get == 0xa5a5a5a5a5a5a5a5,
assert(info.inst.pb_count == 0xa5a5a5a5); goto done);
unit_assert(info.inst.pb_count == 0xa5a5a5a5, goto done);
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
done: done:

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -31,8 +31,6 @@
#include "../../nvgpu-fifo-common.h" #include "../../nvgpu-fifo-common.h"
#include "ramin-gk20a-fusa.h" #include "ramin-gk20a-fusa.h"
#define assert(cond) unit_assert(cond, goto done)
int test_gk20a_ramin_base_shift(struct unit_module *m, struct gk20a *g, int test_gk20a_ramin_base_shift(struct unit_module *m, struct gk20a *g,
void *args) void *args)
{ {
@@ -40,7 +38,7 @@ int test_gk20a_ramin_base_shift(struct unit_module *m, struct gk20a *g,
u32 base_shift = 0U; u32 base_shift = 0U;
base_shift = gk20a_ramin_base_shift(); base_shift = gk20a_ramin_base_shift();
assert(base_shift == ram_in_base_shift_v()); unit_assert(base_shift == ram_in_base_shift_v(), goto done);
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
done: done:
@@ -57,7 +55,7 @@ int test_gk20a_ramin_alloc_size(struct unit_module *m, struct gk20a *g,
u32 alloc_size = 0U; u32 alloc_size = 0U;
alloc_size = gk20a_ramin_alloc_size(); alloc_size = gk20a_ramin_alloc_size();
assert(alloc_size == ram_in_alloc_size_v()); unit_assert(alloc_size == ram_in_alloc_size_v(), goto done);
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
done: done:

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -46,8 +46,6 @@
} while (0) } while (0)
#endif #endif
#define assert(cond) unit_assert(cond, goto done)
#define branches_str test_fifo_flags_str #define branches_str test_fifo_flags_str
#define pruned test_fifo_subtest_pruned #define pruned test_fifo_subtest_pruned
@@ -71,7 +69,7 @@ int test_gm20b_ramin_set_big_page_size(struct unit_module *m, struct gk20a *g,
g->ops.ramin.alloc_size = gk20a_ramin_alloc_size; g->ops.ramin.alloc_size = gk20a_ramin_alloc_size;
err = nvgpu_dma_alloc(g, g->ops.ramin.alloc_size(), &mem); err = nvgpu_dma_alloc(g, g->ops.ramin.alloc_size(), &mem);
assert(err == 0); unit_assert(err == 0, goto done);
for (branches = 0U; branches < F_SET_BIG_PAGE_SIZE_LAST; branches++) { for (branches = 0U; branches < F_SET_BIG_PAGE_SIZE_LAST; branches++) {
unit_verbose(m, "%s branches=%s\n", unit_verbose(m, "%s branches=%s\n",
@@ -85,13 +83,15 @@ int test_gm20b_ramin_set_big_page_size(struct unit_module *m, struct gk20a *g,
gm20b_ramin_set_big_page_size(g, &mem, size); gm20b_ramin_set_big_page_size(g, &mem, size);
if (branches & F_SET_BIG_PAGE_SIZE_64K) { if (branches & F_SET_BIG_PAGE_SIZE_64K) {
assert(nvgpu_mem_rd32(g, &mem, unit_assert(nvgpu_mem_rd32(g, &mem,
ram_in_big_page_size_w()) == ram_in_big_page_size_w()) ==
(data | ram_in_big_page_size_64kb_f())); (data | ram_in_big_page_size_64kb_f()),
goto done);
} else { } else {
assert(nvgpu_mem_rd32(g, &mem, unit_assert(nvgpu_mem_rd32(g, &mem,
ram_in_big_page_size_w()) == ram_in_big_page_size_w()) ==
(data | ram_in_big_page_size_128kb_f())); (data | ram_in_big_page_size_128kb_f()),
goto done);
} }
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -35,8 +35,6 @@
#include "../../nvgpu-fifo-common.h" #include "../../nvgpu-fifo-common.h"
#include "ramin-gp10b-fusa.h" #include "ramin-gp10b-fusa.h"
#define assert(cond) unit_assert(cond, goto done)
int test_gp10b_ramin_init_pdb(struct unit_module *m, struct gk20a *g, int test_gp10b_ramin_init_pdb(struct unit_module *m, struct gk20a *g,
void *args) void *args)
{ {
@@ -54,10 +52,10 @@ int test_gp10b_ramin_init_pdb(struct unit_module *m, struct gk20a *g,
/* Aperture should be fixed = SYSMEM */ /* Aperture should be fixed = SYSMEM */
nvgpu_set_enabled(g, NVGPU_MM_HONORS_APERTURE, true); nvgpu_set_enabled(g, NVGPU_MM_HONORS_APERTURE, true);
err = nvgpu_alloc_inst_block(g, &inst_block); err = nvgpu_alloc_inst_block(g, &inst_block);
assert(err == 0); unit_assert(err == 0, goto done);
err = nvgpu_dma_alloc(g, g->ops.ramin.alloc_size(), &pdb_mem); err = nvgpu_dma_alloc(g, g->ops.ramin.alloc_size(), &pdb_mem);
assert(err == 0); unit_assert(err == 0, goto done);
pdb_addr = nvgpu_mem_get_addr(g, &pdb_mem); pdb_addr = nvgpu_mem_get_addr(g, &pdb_mem);
pdb_addr_lo = u64_lo32(pdb_addr >> ram_in_base_shift_v()); pdb_addr_lo = u64_lo32(pdb_addr >> ram_in_base_shift_v());
@@ -72,10 +70,11 @@ int test_gp10b_ramin_init_pdb(struct unit_module *m, struct gk20a *g,
gp10b_ramin_init_pdb(g, &inst_block, pdb_addr, &pdb_mem); gp10b_ramin_init_pdb(g, &inst_block, pdb_addr, &pdb_mem);
assert(nvgpu_mem_rd32(g, &inst_block, ram_in_page_dir_base_lo_w()) == unit_assert(nvgpu_mem_rd32(g, &inst_block,
data); ram_in_page_dir_base_lo_w()) == data, goto done);
assert(nvgpu_mem_rd32(g, &inst_block, ram_in_page_dir_base_hi_w()) == unit_assert(nvgpu_mem_rd32(g, &inst_block,
ram_in_page_dir_base_hi_f(pdb_addr_hi)); ram_in_page_dir_base_hi_w()) ==
ram_in_page_dir_base_hi_f(pdb_addr_hi), goto done);
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
done: done:

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -47,8 +47,6 @@
} while (0) } while (0)
#endif #endif
#define assert(cond) unit_assert(cond, goto done)
#define branches_str test_fifo_flags_str #define branches_str test_fifo_flags_str
#define pruned test_fifo_subtest_pruned #define pruned test_fifo_subtest_pruned
@@ -70,12 +68,14 @@ int test_gv11b_ramin_set_gr_ptr(struct unit_module *m, struct gk20a *g,
g->ops.ramin.alloc_size = gk20a_ramin_alloc_size; g->ops.ramin.alloc_size = gk20a_ramin_alloc_size;
err = nvgpu_alloc_inst_block(g, &inst_block); err = nvgpu_alloc_inst_block(g, &inst_block);
assert(err == 0); unit_assert(err == 0, goto done);
data_ptr = inst_block.cpu_va; data_ptr = inst_block.cpu_va;
gv11b_ramin_set_gr_ptr(g, &inst_block, addr); gv11b_ramin_set_gr_ptr(g, &inst_block, addr);
assert(data_ptr[ram_in_engine_wfi_target_w()] == data_lo); unit_assert(data_ptr[ram_in_engine_wfi_target_w()] == data_lo,
assert(data_ptr[ram_in_engine_wfi_ptr_hi_w()] == data_hi); goto done);
unit_assert(data_ptr[ram_in_engine_wfi_ptr_hi_w()] == data_hi,
goto done);
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
done: done:
@@ -115,10 +115,10 @@ int test_gv11b_ramin_init_subctx_pdb(struct unit_module *m, struct gk20a *g,
/* Aperture should be fixed = SYSMEM */ /* Aperture should be fixed = SYSMEM */
nvgpu_set_enabled(g, NVGPU_MM_HONORS_APERTURE, true); nvgpu_set_enabled(g, NVGPU_MM_HONORS_APERTURE, true);
err = nvgpu_alloc_inst_block(g, &inst_block); err = nvgpu_alloc_inst_block(g, &inst_block);
assert(err == 0); unit_assert(err == 0, goto done);
err = nvgpu_dma_alloc(g, g->ops.ramin.alloc_size(), &pdb_mem); err = nvgpu_dma_alloc(g, g->ops.ramin.alloc_size(), &pdb_mem);
assert(err == 0); unit_assert(err == 0, goto done);
pdb_addr = nvgpu_mem_get_addr(g, &pdb_mem); pdb_addr = nvgpu_mem_get_addr(g, &pdb_mem);
pdb_addr_lo = u64_lo32(pdb_addr >> ram_in_base_shift_v()); pdb_addr_lo = u64_lo32(pdb_addr >> ram_in_base_shift_v());
@@ -152,17 +152,17 @@ int test_gv11b_ramin_init_subctx_pdb(struct unit_module *m, struct gk20a *g,
for (subctx_id = 0; subctx_id < max_subctx_count; subctx_id++) { for (subctx_id = 0; subctx_id < max_subctx_count; subctx_id++) {
addr_lo = ram_in_sc_page_dir_base_vol_w(subctx_id); addr_lo = ram_in_sc_page_dir_base_vol_w(subctx_id);
addr_hi = ram_in_sc_page_dir_base_hi_w(subctx_id); addr_hi = ram_in_sc_page_dir_base_hi_w(subctx_id);
assert(nvgpu_mem_rd32(g, &inst_block, addr_lo) == unit_assert(nvgpu_mem_rd32(g, &inst_block, addr_lo) ==
format_data); format_data, goto done);
assert(nvgpu_mem_rd32(g, &inst_block, addr_hi) == unit_assert(nvgpu_mem_rd32(g, &inst_block, addr_hi) ==
pdb_addr_hi); pdb_addr_hi, goto done);
} }
for (subctx_id = 0; subctx_id < ram_in_sc_pdb_valid__size_1_v(); for (subctx_id = 0; subctx_id < ram_in_sc_pdb_valid__size_1_v();
subctx_id += 32U) { subctx_id += 32U) {
assert(nvgpu_mem_rd32(g, &inst_block, unit_assert(nvgpu_mem_rd32(g, &inst_block,
ram_in_sc_pdb_valid_long_w(subctx_id)) == ram_in_sc_pdb_valid_long_w(subctx_id)) ==
U32_MAX); U32_MAX, goto done);
} }
} }
@@ -194,12 +194,14 @@ int test_gv11b_ramin_set_eng_method_buffer(struct unit_module *m,
g->ops.ramin.alloc_size = gk20a_ramin_alloc_size; g->ops.ramin.alloc_size = gk20a_ramin_alloc_size;
err = nvgpu_alloc_inst_block(g, &inst_block); err = nvgpu_alloc_inst_block(g, &inst_block);
assert(err == 0); unit_assert(err == 0, goto done);
data_ptr = inst_block.cpu_va; data_ptr = inst_block.cpu_va;
gv11b_ramin_set_eng_method_buffer(g, &inst_block, addr); gv11b_ramin_set_eng_method_buffer(g, &inst_block, addr);
assert(data_ptr[ram_in_eng_method_buffer_addr_lo_w()] == addr_lo); unit_assert(data_ptr[ram_in_eng_method_buffer_addr_lo_w()] == addr_lo,
assert(data_ptr[ram_in_eng_method_buffer_addr_hi_w()] == addr_hi); goto done);
unit_assert(data_ptr[ram_in_eng_method_buffer_addr_hi_w()] == addr_hi,
goto done);
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
done: done:

View File

@@ -60,14 +60,13 @@
} while (0) } while (0)
#endif #endif
#define assert(cond) unit_assert(cond, goto done)
int test_gk20a_runlist_length_max(struct unit_module *m, int test_gk20a_runlist_length_max(struct unit_module *m,
struct gk20a *g, void *args) struct gk20a *g, void *args)
{ {
int ret = UNIT_FAIL; int ret = UNIT_FAIL;
assert(gk20a_runlist_length_max(g) == fifo_eng_runlist_length_max_v()); unit_assert(gk20a_runlist_length_max(g) ==
fifo_eng_runlist_length_max_v(), goto done);
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
done: done:
return ret; return ret;
@@ -88,13 +87,15 @@ int test_gk20a_runlist_hw_submit(struct unit_module *m,
gk20a_runlist_hw_submit(g, runlist_id, count, buffer_index); gk20a_runlist_hw_submit(g, runlist_id, count, buffer_index);
if (count == 0) { if (count == 0) {
assert(nvgpu_readl(g, fifo_runlist_base_r()) == 0); unit_assert(nvgpu_readl(g, fifo_runlist_base_r()) == 0,
goto done);
} else { } else {
assert(nvgpu_readl(g, fifo_runlist_base_r()) != 0); unit_assert(nvgpu_readl(g, fifo_runlist_base_r()) != 0,
goto done);
} }
assert(nvgpu_readl(g, fifo_runlist_r()) == unit_assert(nvgpu_readl(g, fifo_runlist_r()) ==
(fifo_runlist_engine_f(runlist_id) | (fifo_runlist_engine_f(runlist_id) |
fifo_eng_runlist_length_f(count))); fifo_eng_runlist_length_f(count)), goto done);
} }
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
@@ -168,7 +169,7 @@ int test_gk20a_runlist_wait_pending(struct unit_module *m,
/* nvgpu_timeout_init failure */ /* nvgpu_timeout_init failure */
nvgpu_posix_enable_fault_injection(timer_fi, true, 0); nvgpu_posix_enable_fault_injection(timer_fi, true, 0);
err = gk20a_runlist_wait_pending(g, runlist_id); err = gk20a_runlist_wait_pending(g, runlist_id);
assert(err == -ETIMEDOUT); unit_assert(err == -ETIMEDOUT, goto done);
nvgpu_posix_enable_fault_injection(timer_fi, false, 0); nvgpu_posix_enable_fault_injection(timer_fi, false, 0);
g->poll_timeout_default = 10; /* ms */ g->poll_timeout_default = 10; /* ms */
@@ -181,22 +182,22 @@ int test_gk20a_runlist_wait_pending(struct unit_module *m,
/* no wait */ /* no wait */
ctx->count = 0; ctx->count = 0;
err = gk20a_runlist_wait_pending(g, runlist_id); err = gk20a_runlist_wait_pending(g, runlist_id);
assert(err == 0); unit_assert(err == 0, goto done);
/* 1 loop */ /* 1 loop */
ctx->count = 1; ctx->count = 1;
err = gk20a_runlist_wait_pending(g, runlist_id); err = gk20a_runlist_wait_pending(g, runlist_id);
assert(err == 0); unit_assert(err == 0, goto done);
/* 2 loops */ /* 2 loops */
ctx->count = 2; ctx->count = 2;
err = gk20a_runlist_wait_pending(g, runlist_id); err = gk20a_runlist_wait_pending(g, runlist_id);
assert(err == 0); unit_assert(err == 0, goto done);
/* timeout */ /* timeout */
ctx->count = U32_MAX; ctx->count = U32_MAX;
err = gk20a_runlist_wait_pending(g, runlist_id); err = gk20a_runlist_wait_pending(g, runlist_id);
assert(err == -ETIMEDOUT); unit_assert(err == -ETIMEDOUT, goto done);
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
@@ -218,11 +219,13 @@ int test_gk20a_runlist_write_state(struct unit_module *m,
for (mask = 0; mask < 4; mask++) { for (mask = 0; mask < 4; mask++) {
nvgpu_writel(g, fifo_sched_disable_r(), v); nvgpu_writel(g, fifo_sched_disable_r(), v);
gk20a_runlist_write_state(g, mask, RUNLIST_DISABLED); gk20a_runlist_write_state(g, mask, RUNLIST_DISABLED);
assert(nvgpu_readl(g, fifo_sched_disable_r()) == (v | mask)); unit_assert(nvgpu_readl(g, fifo_sched_disable_r()) == (v | mask),
goto done);
nvgpu_writel(g, fifo_sched_disable_r(), v); nvgpu_writel(g, fifo_sched_disable_r(), v);
gk20a_runlist_write_state(g, mask, RUNLIST_ENABLED); gk20a_runlist_write_state(g, mask, RUNLIST_ENABLED);
assert(nvgpu_readl(g, fifo_sched_disable_r()) == (v & ~mask)); unit_assert(nvgpu_readl(g, fifo_sched_disable_r()) == (v & ~mask),
goto done);
} }
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -60,14 +60,13 @@
} while (0) } while (0)
#endif #endif
#define assert(cond) unit_assert(cond, goto done)
int test_gv11b_runlist_entry_size(struct unit_module *m, int test_gv11b_runlist_entry_size(struct unit_module *m,
struct gk20a *g, void *args) struct gk20a *g, void *args)
{ {
int ret = UNIT_FAIL; int ret = UNIT_FAIL;
assert(gv11b_runlist_entry_size(g) == ram_rl_entry_size_v()); unit_assert(gv11b_runlist_entry_size(g) == ram_rl_entry_size_v(),
goto done);
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
done: done:
return ret; return ret;
@@ -85,27 +84,35 @@ int test_gv11b_runlist_get_tsg_entry(struct unit_module *m,
u32 runlist[4]; u32 runlist[4];
tsg = nvgpu_tsg_open(g, getpid()); tsg = nvgpu_tsg_open(g, getpid());
assert(tsg != NULL); unit_assert(tsg != NULL, goto done);
/* no scaling */ /* no scaling */
timeslice = RL_MAX_TIMESLICE_TIMEOUT / 2; timeslice = RL_MAX_TIMESLICE_TIMEOUT / 2;
gv11b_runlist_get_tsg_entry(tsg, runlist, timeslice); gv11b_runlist_get_tsg_entry(tsg, runlist, timeslice);
assert(ram_rl_entry_tsg_timeslice_timeout_v(runlist[0]) == timeslice); unit_assert(ram_rl_entry_tsg_timeslice_timeout_v(runlist[0]) ==
assert(ram_rl_entry_tsg_timeslice_scale_v(runlist[0]) == 0U); timeslice, goto done);
assert(runlist[1] == ram_rl_entry_tsg_length_f(tsg->num_active_channels)); unit_assert(ram_rl_entry_tsg_timeslice_scale_v(runlist[0]) == 0U,
assert(runlist[2] == ram_rl_entry_tsg_tsgid_f(tsg->tsgid)); goto done);
unit_assert(runlist[1] == ram_rl_entry_tsg_length_f(
tsg->num_active_channels), goto done);
unit_assert(runlist[2] == ram_rl_entry_tsg_tsgid_f(tsg->tsgid),
goto done);
/* scaling */ /* scaling */
timeslice = RL_MAX_TIMESLICE_TIMEOUT + 1; timeslice = RL_MAX_TIMESLICE_TIMEOUT + 1;
gv11b_runlist_get_tsg_entry(tsg, runlist, timeslice); gv11b_runlist_get_tsg_entry(tsg, runlist, timeslice);
assert(ram_rl_entry_tsg_timeslice_timeout_v(runlist[0]) == (timeslice >> 1U)); unit_assert(ram_rl_entry_tsg_timeslice_timeout_v(runlist[0]) ==
assert(ram_rl_entry_tsg_timeslice_scale_v(runlist[0]) == 1U); (timeslice >> 1U), goto done);
unit_assert(ram_rl_entry_tsg_timeslice_scale_v(runlist[0]) == 1U,
goto done);
/* oversize */ /* oversize */
timeslice = U32_MAX; timeslice = U32_MAX;
gv11b_runlist_get_tsg_entry(tsg, runlist, timeslice); gv11b_runlist_get_tsg_entry(tsg, runlist, timeslice);
assert(ram_rl_entry_tsg_timeslice_timeout_v(runlist[0]) == RL_MAX_TIMESLICE_TIMEOUT); unit_assert(ram_rl_entry_tsg_timeslice_timeout_v(runlist[0]) ==
assert(ram_rl_entry_tsg_timeslice_scale_v(runlist[0]) == RL_MAX_TIMESLICE_SCALE); RL_MAX_TIMESLICE_TIMEOUT, goto done);
unit_assert(ram_rl_entry_tsg_timeslice_scale_v(runlist[0]) ==
RL_MAX_TIMESLICE_SCALE, goto done);
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
@@ -126,16 +133,17 @@ int test_gv11b_runlist_get_ch_entry(struct unit_module *m,
ch = nvgpu_channel_open_new(g, NVGPU_INVALID_RUNLIST_ID, ch = nvgpu_channel_open_new(g, NVGPU_INVALID_RUNLIST_ID,
false, getpid(), getpid()); false, getpid(), getpid());
assert(ch); unit_assert(ch, goto done);
ch->userd_mem = &mem; ch->userd_mem = &mem;
mem.aperture = APERTURE_SYSMEM; mem.aperture = APERTURE_SYSMEM;
ch->userd_iova = 0x1000beef; ch->userd_iova = 0x1000beef;
gv11b_runlist_get_ch_entry(ch, runlist); gv11b_runlist_get_ch_entry(ch, runlist);
assert(runlist[1] == u64_hi32(ch->userd_iova)); unit_assert(runlist[1] == u64_hi32(ch->userd_iova), goto done);
assert(ram_rl_entry_chid_f(runlist[2]) == ch->chid); unit_assert(ram_rl_entry_chid_f(runlist[2]) == ch->chid, goto done);
assert(runlist[3] == u64_hi32(nvgpu_inst_block_addr(g, &ch->inst_block))); unit_assert(runlist[3] == u64_hi32(nvgpu_inst_block_addr(g,
&ch->inst_block)), goto done);
ch->userd_mem = NULL; ch->userd_mem = NULL;

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -56,8 +56,6 @@
} while (0) } while (0)
#endif #endif
#define assert(cond) unit_assert(cond, goto done)
struct runlist_unit_ctx { struct runlist_unit_ctx {
u32 branches; u32 branches;
}; };
@@ -364,13 +362,13 @@ int test_tsg_format_gen(struct unit_module *m, struct gk20a *g, void *args)
test_args->level, get_log2(branches)-1, rl_data, test_args->level, get_log2(branches)-1, rl_data,
test_args->expect_header, test_args->expect_header,
test_args->expect_channel); test_args->expect_channel);
assert(err != 0); unit_assert(err != 0, goto done);
} else { } else {
err = run_format_test(m, f, &tsgs[0], chs, err = run_format_test(m, f, &tsgs[0], chs,
test_args->level, test_args->channels, rl_data, test_args->level, test_args->channels, rl_data,
test_args->expect_header, test_args->expect_header,
test_args->expect_channel); test_args->expect_channel);
assert(err == 0); unit_assert(err == 0, goto done);
} }
} }
@@ -645,9 +643,9 @@ int test_interleave_dual(struct unit_module *m, struct gk20a *g, void *args)
dual_args->expected, dual_args->n_expected); dual_args->expected, dual_args->n_expected);
if (branches & fail) { if (branches & fail) {
assert(err != UNIT_SUCCESS); unit_assert(err != UNIT_SUCCESS, goto done);
} else { } else {
assert(err == UNIT_SUCCESS); unit_assert(err == UNIT_SUCCESS, goto done);
} }
} }
@@ -781,9 +779,9 @@ int test_runlist_interleave_level_name(struct unit_module *m,
interleave_level_name = interleave_level_name =
nvgpu_runlist_interleave_level_name(get_log2(branches)); nvgpu_runlist_interleave_level_name(get_log2(branches));
assert(strcmp(interleave_level_name, unit_assert(strcmp(interleave_level_name,
f_runlist_interleave_level_name[ f_runlist_interleave_level_name[
get_log2(branches)]) == 0); get_log2(branches)]) == 0, goto done);
} }
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
@@ -833,10 +831,10 @@ int test_runlist_set_state(struct unit_module *m, struct gk20a *g, void *args)
if (branches & F_RUNLIST_SET_STATE_DISABLED) { if (branches & F_RUNLIST_SET_STATE_DISABLED) {
nvgpu_runlist_set_state(g, 0U, RUNLIST_DISABLED); nvgpu_runlist_set_state(g, 0U, RUNLIST_DISABLED);
assert(stub[0].count == 0U); unit_assert(stub[0].count == 0U, goto done);
} else { } else {
nvgpu_runlist_set_state(g, 1U, RUNLIST_ENABLED); nvgpu_runlist_set_state(g, 1U, RUNLIST_ENABLED);
assert(stub[0].count == 1U); unit_assert(stub[0].count == 1U, goto done);
} }
} }
@@ -864,7 +862,7 @@ int test_runlist_lock_unlock_active_runlists(struct unit_module *m,
u32 prune = fail; u32 prune = fail;
err = nvgpu_runlist_setup_sw(g); err = nvgpu_runlist_setup_sw(g);
assert(err == 0); unit_assert(err == 0, goto done);
for (branches = 0U; for (branches = 0U;
branches < F_RUNLIST_LOCK_UNLOCK_ACTIVE_RUNLISTS_LAST; branches < F_RUNLIST_LOCK_UNLOCK_ACTIVE_RUNLISTS_LAST;
@@ -923,7 +921,7 @@ int test_runlist_get_mask(struct unit_module *m, struct gk20a *g, void *args)
u32 prune = fail; u32 prune = fail;
err = nvgpu_runlist_setup_sw(g); err = nvgpu_runlist_setup_sw(g);
assert(err == 0); unit_assert(err == 0, goto done);
for (branches = 0U; branches < F_RUNLIST_GET_MASK_LAST; branches++) { for (branches = 0U; branches < F_RUNLIST_GET_MASK_LAST; branches++) {
@@ -955,9 +953,9 @@ int test_runlist_get_mask(struct unit_module *m, struct gk20a *g, void *args)
} }
if (branches == 0U) { if (branches == 0U) {
assert(ret_mask == 3U); unit_assert(ret_mask == 3U, goto done);
} else { } else {
assert(ret_mask == 1U); unit_assert(ret_mask == 1U, goto done);
} }
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
@@ -1039,9 +1037,9 @@ int test_runlist_setup_sw(struct unit_module *m, struct gk20a *g, void *args)
nvgpu_posix_enable_fault_injection(dma_fi, false, 0); nvgpu_posix_enable_fault_injection(dma_fi, false, 0);
if (branches & fail) { if (branches & fail) {
assert(err != 0); unit_assert(err != 0, goto done);
} else { } else {
assert(err == 0); unit_assert(err == 0, goto done);
nvgpu_runlist_cleanup_sw(g); nvgpu_runlist_cleanup_sw(g);
} }
} }
@@ -1114,7 +1112,7 @@ int test_runlist_reload_ids(struct unit_module *m, struct gk20a *g, void *args)
g->ops.runlist.hw_submit = stub_runlist_hw_submit; g->ops.runlist.hw_submit = stub_runlist_hw_submit;
err = nvgpu_runlist_setup_sw(g); err = nvgpu_runlist_setup_sw(g);
assert(err == 0); unit_assert(err == 0, goto done);
for (branches = 1U; branches < F_RUNLIST_RELOAD_IDS_LAST; for (branches = 1U; branches < F_RUNLIST_RELOAD_IDS_LAST;
branches++) { branches++) {
@@ -1150,9 +1148,9 @@ int test_runlist_reload_ids(struct unit_module *m, struct gk20a *g, void *args)
} }
if (branches & fail) { if (branches & fail) {
assert(err != 0); unit_assert(err != 0, goto done);
} else { } else {
assert(err == 0); unit_assert(err == 0, goto done);
} }
} }
@@ -1246,7 +1244,7 @@ int test_runlist_update_locked(struct unit_module *m, struct gk20a *g,
if (branches & F_RUNLIST_UPDATE_ADD_AGAIN) { if (branches & F_RUNLIST_UPDATE_ADD_AGAIN) {
err = nvgpu_runlist_update_locked(g, err = nvgpu_runlist_update_locked(g,
0U, ch, true, false); 0U, ch, true, false);
assert(err == 0); unit_assert(err == 0, goto done);
add = true; add = true;
} }
@@ -1274,11 +1272,11 @@ int test_runlist_update_locked(struct unit_module *m, struct gk20a *g,
err = nvgpu_runlist_update_locked(g, err = nvgpu_runlist_update_locked(g,
0U, chA, true, false); 0U, chA, true, false);
assert(err == 0); unit_assert(err == 0, goto done);
err = nvgpu_runlist_update_locked(g, err = nvgpu_runlist_update_locked(g,
0U, chA, false, false); 0U, chA, false, false);
assert(err == 0); unit_assert(err == 0, goto done);
err = nvgpu_tsg_unbind_channel(tsg, chA); err = nvgpu_tsg_unbind_channel(tsg, chA);
if (err != 0) { if (err != 0) {
@@ -1298,9 +1296,9 @@ int test_runlist_update_locked(struct unit_module *m, struct gk20a *g,
} }
if (branches & fail) { if (branches & fail) {
assert(err != 0); unit_assert(err != 0, goto done);
} else { } else {
assert(err == 0); unit_assert(err == 0, goto done);
} }
ch->tsgid = ch_tsgid_orig; ch->tsgid = ch_tsgid_orig;
} }

View File

@@ -61,8 +61,6 @@
} while (0) } while (0)
#endif #endif
#define assert(cond) unit_assert(cond, goto done)
struct tsg_unit_ctx { struct tsg_unit_ctx {
u32 branches; u32 branches;
}; };
@@ -163,15 +161,19 @@ int test_gv11b_tsg_init_eng_method_buffers(struct unit_module *m,
err = g->ops.tsg.init_eng_method_buffers(g, tsg); err = g->ops.tsg.init_eng_method_buffers(g, tsg);
if (branches & fail) { if (branches & fail) {
assert(err != 0); unit_assert(err != 0, goto done);
assert(tsg->eng_method_buffers == NULL); unit_assert(tsg->eng_method_buffers == NULL, goto done);
} else { } else {
assert(err == 0); unit_assert(err == 0, goto done);
if ((branches & F_TSG_INIT_ENG_BUF_ALREADY_EXISTS) == 0) { if ((branches & F_TSG_INIT_ENG_BUF_ALREADY_EXISTS) == 0) {
assert(tsg->eng_method_buffers != NULL); unit_assert(tsg->eng_method_buffers != NULL,
assert(tsg->eng_method_buffers[ASYNC_CE_RUNQUE].gpu_va != 0UL); goto done);
unit_assert(tsg->eng_method_buffers[
ASYNC_CE_RUNQUE].gpu_va != 0UL,
goto done);
g->ops.tsg.deinit_eng_method_buffers(g, tsg); g->ops.tsg.deinit_eng_method_buffers(g, tsg);
assert(tsg->eng_method_buffers == NULL); unit_assert(tsg->eng_method_buffers == NULL,
goto done);
} }
} }
} }
@@ -211,13 +213,13 @@ int test_gv11b_tsg_bind_channel_eng_method_buffers(struct unit_module *m,
}; };
tsg = nvgpu_tsg_open(g, getpid()); tsg = nvgpu_tsg_open(g, getpid());
assert(tsg != NULL); unit_assert(tsg != NULL, goto done);
ch = nvgpu_channel_open_new(g, ~0U, false, getpid(), getpid()); ch = nvgpu_channel_open_new(g, ~0U, false, getpid(), getpid());
assert(ch != NULL); unit_assert(ch != NULL, goto done);
err = nvgpu_tsg_bind_channel(tsg, ch); err = nvgpu_tsg_bind_channel(tsg, ch);
assert(err == 0); unit_assert(err == 0, goto done);
eng_method_buffers = tsg->eng_method_buffers; eng_method_buffers = tsg->eng_method_buffers;
@@ -252,18 +254,20 @@ int test_gv11b_tsg_bind_channel_eng_method_buffers(struct unit_module *m,
g->ops.tsg.bind_channel_eng_method_buffers(tsg, ch); g->ops.tsg.bind_channel_eng_method_buffers(tsg, ch);
if (branches & F_TSG_BIND_BUF_NO_METHOD_BUF) { if (branches & F_TSG_BIND_BUF_NO_METHOD_BUF) {
assert(nvgpu_mem_rd32(g, &ch->inst_block, unit_assert(nvgpu_mem_rd32(g, &ch->inst_block,
ram_in_eng_method_buffer_addr_lo_w()) == 0U); ram_in_eng_method_buffer_addr_lo_w()) == 0U,
assert(nvgpu_mem_rd32(g, &ch->inst_block, goto done);
ram_in_eng_method_buffer_addr_hi_w()) == 0U); unit_assert(nvgpu_mem_rd32(g, &ch->inst_block,
ram_in_eng_method_buffer_addr_hi_w()) == 0U,
goto done);
} else { } else {
assert(nvgpu_mem_rd32(g, &ch->inst_block, unit_assert(nvgpu_mem_rd32(g, &ch->inst_block,
ram_in_eng_method_buffer_addr_lo_w()) == ram_in_eng_method_buffer_addr_lo_w()) ==
u64_lo32(gpu_va)); u64_lo32(gpu_va), goto done);
assert(nvgpu_mem_rd32(g, &ch->inst_block, unit_assert(nvgpu_mem_rd32(g, &ch->inst_block,
ram_in_eng_method_buffer_addr_hi_w()) == ram_in_eng_method_buffer_addr_hi_w()) ==
u64_hi32(gpu_va)); u64_hi32(gpu_va), goto done);
} }
tsg->eng_method_buffers = eng_method_buffers; tsg->eng_method_buffers = eng_method_buffers;
@@ -308,17 +312,18 @@ int test_gv11b_tsg_unbind_channel_check_eng_faulted(struct unit_module *m,
}; };
tsg = nvgpu_tsg_open(g, getpid()); tsg = nvgpu_tsg_open(g, getpid());
assert(tsg != NULL); unit_assert(tsg != NULL, goto done);
assert(tsg->eng_method_buffers != NULL); unit_assert(tsg->eng_method_buffers != NULL, goto done);
eng_method_buffers = tsg->eng_method_buffers; eng_method_buffers = tsg->eng_method_buffers;
ch = nvgpu_channel_open_new(g, ~0U, false, getpid(), getpid()); ch = nvgpu_channel_open_new(g, ~0U, false, getpid(), getpid());
assert(ch != NULL); unit_assert(ch != NULL, goto done);
err = nvgpu_tsg_bind_channel(tsg, ch); err = nvgpu_tsg_bind_channel(tsg, ch);
assert(err == 0); unit_assert(err == 0, goto done);
assert(g->ops.tsg.unbind_channel_check_eng_faulted != NULL); unit_assert(g->ops.tsg.unbind_channel_check_eng_faulted != NULL,
goto done);
for (branches = 0U; branches < F_TSG_UNBIND_BUF_LAST; branches++) { for (branches = 0U; branches < F_TSG_UNBIND_BUF_LAST; branches++) {
@@ -348,12 +353,14 @@ int test_gv11b_tsg_unbind_channel_check_eng_faulted(struct unit_module *m,
if (branches & F_TSG_UNBIND_BUF_CH_SAVED) { if (branches & F_TSG_UNBIND_BUF_CH_SAVED) {
/* check that method count has been set to 0 */ /* check that method count has been set to 0 */
assert(nvgpu_mem_rd32(g, unit_assert(nvgpu_mem_rd32(g,
&eng_method_buffers[ASYNC_CE_RUNQUE], 0) == 0); &eng_method_buffers[ASYNC_CE_RUNQUE], 0) == 0,
goto done);
} else { } else {
/* check that method countis unchanged */ /* check that method countis unchanged */
assert(nvgpu_mem_rd32(g, unit_assert(nvgpu_mem_rd32(g,
&eng_method_buffers[ASYNC_CE_RUNQUE], 0) == 1); &eng_method_buffers[ASYNC_CE_RUNQUE], 0) == 1,
goto done);
} }
tsg->eng_method_buffers = eng_method_buffers; tsg->eng_method_buffers = eng_method_buffers;

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -60,8 +60,6 @@
} while (0) } while (0)
#endif #endif
#define assert(cond) unit_assert(cond, goto done)
struct tsg_unit_ctx { struct tsg_unit_ctx {
u32 branches; u32 branches;
}; };
@@ -184,7 +182,7 @@ int test_tsg_open(struct unit_module *m,
break; break;
} }
} }
assert(next_tsg != NULL); unit_assert(next_tsg != NULL, goto done);
f->num_channels = f->num_channels =
branches & F_TSG_OPEN_ACQUIRE_CH_FAIL ? branches & F_TSG_OPEN_ACQUIRE_CH_FAIL ?
@@ -223,9 +221,9 @@ int test_tsg_open(struct unit_module *m,
if (branches & fail) { if (branches & fail) {
f->num_channels = num_channels; f->num_channels = num_channels;
assert(tsg == NULL); unit_assert(tsg == NULL, goto done);
} else { } else {
assert(tsg != NULL); unit_assert(tsg != NULL, goto done);
nvgpu_ref_put(&tsg->refcount, nvgpu_tsg_release); nvgpu_ref_put(&tsg->refcount, nvgpu_tsg_release);
tsg = NULL; tsg = NULL;
} }
@@ -293,16 +291,16 @@ int test_tsg_bind_channel(struct unit_module *m,
u32 prune = fail; u32 prune = fail;
tsg = nvgpu_tsg_open(g, getpid()); tsg = nvgpu_tsg_open(g, getpid());
assert(tsg != NULL); unit_assert(tsg != NULL, goto done);
chA = nvgpu_channel_open_new(g, ~0U, false, getpid(), getpid()); chA = nvgpu_channel_open_new(g, ~0U, false, getpid(), getpid());
assert(chA != NULL); unit_assert(chA != NULL, goto done);
chB = nvgpu_channel_open_new(g, ~0U, false, getpid(), getpid()); chB = nvgpu_channel_open_new(g, ~0U, false, getpid(), getpid());
assert(chB != NULL); unit_assert(chB != NULL, goto done);
err = nvgpu_tsg_bind_channel(tsg, chA); err = nvgpu_tsg_bind_channel(tsg, chA);
assert(err == 0); unit_assert(err == 0, goto done);
tsg_save = *tsg; tsg_save = *tsg;
@@ -351,14 +349,16 @@ int test_tsg_bind_channel(struct unit_module *m,
err = nvgpu_tsg_bind_channel(tsg, ch); err = nvgpu_tsg_bind_channel(tsg, ch);
if (branches & fail) { if (branches & fail) {
assert(err != 0); unit_assert(err != 0, goto done);
} else { } else {
assert(err == 0); unit_assert(err == 0, goto done);
assert(!nvgpu_list_empty(&tsg->ch_list)); unit_assert(!nvgpu_list_empty(&tsg->ch_list),
goto done);
err = nvgpu_tsg_unbind_channel(tsg, ch); err = nvgpu_tsg_unbind_channel(tsg, ch);
assert(err == 0); unit_assert(err == 0, goto done);
assert(ch->tsgid == NVGPU_INVALID_TSG_ID); unit_assert(ch->tsgid == NVGPU_INVALID_TSG_ID,
goto done);
} }
} }
@@ -487,19 +487,19 @@ int test_tsg_unbind_channel(struct unit_module *m,
* we need to create tsg + bind channel for each test * we need to create tsg + bind channel for each test
*/ */
tsg = nvgpu_tsg_open(g, getpid()); tsg = nvgpu_tsg_open(g, getpid());
assert(tsg != NULL); unit_assert(tsg != NULL, goto done);
chA = nvgpu_channel_open_new(g, ~0U, false, getpid(), getpid()); chA = nvgpu_channel_open_new(g, ~0U, false, getpid(), getpid());
assert(chA != NULL); unit_assert(chA != NULL, goto done);
chB = nvgpu_channel_open_new(g, ~0U, false, getpid(), getpid()); chB = nvgpu_channel_open_new(g, ~0U, false, getpid(), getpid());
assert(chB != NULL); unit_assert(chB != NULL, goto done);
err = nvgpu_tsg_bind_channel(tsg, chA); err = nvgpu_tsg_bind_channel(tsg, chA);
assert(err == 0); unit_assert(err == 0, goto done);
err = nvgpu_tsg_bind_channel(tsg, chB); err = nvgpu_tsg_bind_channel(tsg, chB);
assert(err == 0); unit_assert(err == 0, goto done);
chA->unserviceable = chA->unserviceable =
branches & F_TSG_UNBIND_CHANNEL_UNSERVICEABLE ? branches & F_TSG_UNBIND_CHANNEL_UNSERVICEABLE ?
@@ -544,22 +544,27 @@ int test_tsg_unbind_channel(struct unit_module *m,
if (branches & fail) { if (branches & fail) {
/* check that TSG has been torn down */ /* check that TSG has been torn down */
assert(err != 0); unit_assert(err != 0, goto done);
assert(chA->unserviceable); unit_assert(chA->unserviceable, goto done);
assert(chB->unserviceable); unit_assert(chB->unserviceable, goto done);
assert(chA->tsgid == NVGPU_INVALID_TSG_ID); unit_assert(chA->tsgid == NVGPU_INVALID_TSG_ID,
goto done);
} else { } else {
if (branches & F_TSG_UNBIND_CHANNEL_ABORT_CLEAN_UP_NULL) { if (branches & F_TSG_UNBIND_CHANNEL_ABORT_CLEAN_UP_NULL) {
gops.channel.abort_clean_up(chA); gops.channel.abort_clean_up(chA);
} }
assert(chA->tsgid == NVGPU_INVALID_TSG_ID); unit_assert(chA->tsgid == NVGPU_INVALID_TSG_ID,
assert(nvgpu_list_empty(&chA->ch_entry)); goto done);
unit_assert(nvgpu_list_empty(&chA->ch_entry),
goto done);
/* check that TSG has not been torn down */ /* check that TSG has not been torn down */
assert(!chB->unserviceable); unit_assert(!chB->unserviceable, goto done);
assert(!nvgpu_list_empty(&chB->ch_entry)); unit_assert(!nvgpu_list_empty(&chB->ch_entry),
assert(!nvgpu_list_empty(&tsg->ch_list)); goto done);
unit_assert(!nvgpu_list_empty(&tsg->ch_list),
goto done);
} }
nvgpu_channel_close(chA); nvgpu_channel_close(chA);
@@ -652,9 +657,10 @@ int test_tsg_release(struct unit_module *m,
branches_str(branches, labels)); branches_str(branches, labels));
tsg = nvgpu_tsg_open(g, getpid()); tsg = nvgpu_tsg_open(g, getpid());
assert(tsg != NULL); unit_assert(tsg != NULL, goto done);
assert(tsg->gr_ctx != NULL); unit_assert(tsg->gr_ctx != NULL, goto done);
assert(tsg->gr_ctx->mem.aperture == APERTURE_INVALID); unit_assert(tsg->gr_ctx->mem.aperture ==
APERTURE_INVALID, goto done);
g->ops.tsg.release = g->ops.tsg.release =
branches & F_TSG_RELEASE_NO_RELEASE_HAL ? branches & F_TSG_RELEASE_NO_RELEASE_HAL ?
@@ -689,7 +695,7 @@ int test_tsg_release(struct unit_module *m,
stub_tsg_deinit_eng_method_buffers : NULL; stub_tsg_deinit_eng_method_buffers : NULL;
if (branches & F_TSG_RELEASE_SM_ERR_STATES) { if (branches & F_TSG_RELEASE_SM_ERR_STATES) {
assert(tsg->sm_error_states != NULL); unit_assert(tsg->sm_error_states != NULL, goto done);
} else { } else {
nvgpu_kfree(g, tsg->sm_error_states); nvgpu_kfree(g, tsg->sm_error_states);
tsg->sm_error_states = NULL; tsg->sm_error_states = NULL;
@@ -698,7 +704,7 @@ int test_tsg_release(struct unit_module *m,
nvgpu_ref_put(&tsg->refcount, nvgpu_tsg_release); nvgpu_ref_put(&tsg->refcount, nvgpu_tsg_release);
if ((branches & free_gr_ctx_mask) == free_gr_ctx_mask) { if ((branches & free_gr_ctx_mask) == free_gr_ctx_mask) {
assert(tsg->gr_ctx == NULL); unit_assert(tsg->gr_ctx == NULL, goto done);
} else { } else {
g->ops.gr.setup.free_gr_ctx = g->ops.gr.setup.free_gr_ctx =
gops.gr.setup.free_gr_ctx; gops.gr.setup.free_gr_ctx;
@@ -711,17 +717,17 @@ int test_tsg_release(struct unit_module *m,
nvgpu_free_gr_ctx_struct(g, tsg->gr_ctx); nvgpu_free_gr_ctx_struct(g, tsg->gr_ctx);
tsg->gr_ctx = NULL; tsg->gr_ctx = NULL;
} }
assert(stub[1].count == 0); unit_assert(stub[1].count == 0, goto done);
} }
if (branches & F_TSG_RELEASE_ENG_BUFS) { if (branches & F_TSG_RELEASE_ENG_BUFS) {
assert(stub[0].tsgid == tsg->tsgid); unit_assert(stub[0].tsgid == tsg->tsgid, goto done);
} }
assert(!f->tsg[tsg->tsgid].in_use); unit_assert(!f->tsg[tsg->tsgid].in_use, goto done);
assert(tsg->gr_ctx == NULL); unit_assert(tsg->gr_ctx == NULL, goto done);
assert(tsg->vm == NULL); unit_assert(tsg->vm == NULL, goto done);
assert(tsg->sm_error_states == NULL); unit_assert(tsg->sm_error_states == NULL, goto done);
} }
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
@@ -763,13 +769,13 @@ int test_tsg_unbind_channel_check_hw_state(struct unit_module *m,
u32 prune = F_TSG_UNBIND_CHANNEL_CHECK_HW_NEXT; u32 prune = F_TSG_UNBIND_CHANNEL_CHECK_HW_NEXT;
tsg = nvgpu_tsg_open(g, getpid()); tsg = nvgpu_tsg_open(g, getpid());
assert(tsg != NULL); unit_assert(tsg != NULL, goto done);
ch = nvgpu_channel_open_new(g, ~0U, false, getpid(), getpid()); ch = nvgpu_channel_open_new(g, ~0U, false, getpid(), getpid());
assert(ch != NULL); unit_assert(ch != NULL, goto done);
err = nvgpu_tsg_bind_channel(tsg, ch); err = nvgpu_tsg_bind_channel(tsg, ch);
assert(err == 0); unit_assert(err == 0, goto done);
for (branches = 0; branches < F_TSG_UNBIND_CHANNEL_CHECK_HW_LAST; for (branches = 0; branches < F_TSG_UNBIND_CHANNEL_CHECK_HW_LAST;
branches++) { branches++) {
@@ -799,9 +805,9 @@ int test_tsg_unbind_channel_check_hw_state(struct unit_module *m,
err = nvgpu_tsg_unbind_channel_check_hw_state(tsg, ch); err = nvgpu_tsg_unbind_channel_check_hw_state(tsg, ch);
if (branches & F_TSG_UNBIND_CHANNEL_CHECK_HW_NEXT) { if (branches & F_TSG_UNBIND_CHANNEL_CHECK_HW_NEXT) {
assert(err != 0); unit_assert(err != 0, goto done);
} else { } else {
assert(err == 0); unit_assert(err == 0, goto done);
} }
} }
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
@@ -849,16 +855,16 @@ int test_tsg_unbind_channel_check_ctx_reload(struct unit_module *m,
int err; int err;
tsg = nvgpu_tsg_open(g, getpid()); tsg = nvgpu_tsg_open(g, getpid());
assert(tsg != NULL); unit_assert(tsg != NULL, goto done);
chA = nvgpu_channel_open_new(g, ~0U, false, getpid(), getpid()); chA = nvgpu_channel_open_new(g, ~0U, false, getpid(), getpid());
assert(chA != NULL); unit_assert(chA != NULL, goto done);
chB = nvgpu_channel_open_new(g, ~0U, false, getpid(), getpid()); chB = nvgpu_channel_open_new(g, ~0U, false, getpid(), getpid());
assert(chB != NULL); unit_assert(chB != NULL, goto done);
err = nvgpu_tsg_bind_channel(tsg, chA); err = nvgpu_tsg_bind_channel(tsg, chA);
assert(err == 0); unit_assert(err == 0, goto done);
g->ops.channel.force_ctx_reload = stub_channel_force_ctx_reload; g->ops.channel.force_ctx_reload = stub_channel_force_ctx_reload;
@@ -876,7 +882,8 @@ int test_tsg_unbind_channel_check_ctx_reload(struct unit_module *m,
if ((branches & F_UNBIND_CHANNEL_CHECK_CTX_RELOAD_SET) && if ((branches & F_UNBIND_CHANNEL_CHECK_CTX_RELOAD_SET) &&
(branches & F_UNBIND_CHANNEL_CHECK_CTX_RELOAD_CHID_MATCH)) { (branches & F_UNBIND_CHANNEL_CHECK_CTX_RELOAD_CHID_MATCH)) {
assert(nvgpu_tsg_bind_channel(tsg, chB) == 0); unit_assert(nvgpu_tsg_bind_channel(tsg, chB) == 0,
goto done);
} }
nvgpu_tsg_unbind_channel_check_ctx_reload(tsg, chA, &hw_state); nvgpu_tsg_unbind_channel_check_ctx_reload(tsg, chA, &hw_state);
@@ -884,7 +891,7 @@ int test_tsg_unbind_channel_check_ctx_reload(struct unit_module *m,
if ((branches & F_UNBIND_CHANNEL_CHECK_CTX_RELOAD_SET) && if ((branches & F_UNBIND_CHANNEL_CHECK_CTX_RELOAD_SET) &&
(branches & F_UNBIND_CHANNEL_CHECK_CTX_RELOAD_CHID_MATCH)) { (branches & F_UNBIND_CHANNEL_CHECK_CTX_RELOAD_CHID_MATCH)) {
nvgpu_tsg_unbind_channel(tsg, chB); nvgpu_tsg_unbind_channel(tsg, chB);
assert(stub[0].chid == chB->chid); unit_assert(stub[0].chid == chB->chid, goto done);
} }
} }
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
@@ -951,16 +958,16 @@ int test_tsg_enable(struct unit_module *m,
int err; int err;
tsgA = nvgpu_tsg_open(g, getpid()); tsgA = nvgpu_tsg_open(g, getpid());
assert(tsgA != NULL); unit_assert(tsgA != NULL, goto done);
tsgB = nvgpu_tsg_open(g, getpid()); tsgB = nvgpu_tsg_open(g, getpid());
assert(tsgB != NULL); unit_assert(tsgB != NULL, goto done);
chA = nvgpu_channel_open_new(g, ~0U, false, getpid(), getpid()); chA = nvgpu_channel_open_new(g, ~0U, false, getpid(), getpid());
assert(chA != NULL); unit_assert(chA != NULL, goto done);
err = nvgpu_tsg_bind_channel(tsgA, chA); err = nvgpu_tsg_bind_channel(tsgA, chA);
assert(err == 0); unit_assert(err == 0, goto done);
g->ops.channel.disable = stub_channel_disable; g->ops.channel.disable = stub_channel_disable;
@@ -986,13 +993,14 @@ int test_tsg_enable(struct unit_module *m,
if (branches & F_TSG_ENABLE_STUB) { if (branches & F_TSG_ENABLE_STUB) {
if (tsg == tsgB) { if (tsg == tsgB) {
assert(stub[0].count == 0); unit_assert(stub[0].count == 0, goto done);
assert(stub[1].count == 0); unit_assert(stub[1].count == 0, goto done);
} }
if (tsg == tsgA) { if (tsg == tsgA) {
assert(stub[0].chid == chA->chid); unit_assert(stub[0].chid == chA->chid,
assert(stub[1].count > 0); goto done);
unit_assert(stub[1].count > 0, goto done);
} }
} }
@@ -1004,11 +1012,12 @@ int test_tsg_enable(struct unit_module *m,
if (branches & F_TSG_ENABLE_STUB) { if (branches & F_TSG_ENABLE_STUB) {
if (tsg == tsgB) { if (tsg == tsgB) {
assert(stub[2].count == 0); unit_assert(stub[2].count == 0, goto done);
} }
if (tsg == tsgA) { if (tsg == tsgA) {
assert(stub[2].chid == chA->chid); unit_assert(stub[2].chid == chA->chid,
goto done);
} }
} }
} }
@@ -1039,12 +1048,13 @@ int test_tsg_check_and_get_from_id(struct unit_module *m,
int ret = UNIT_FAIL; int ret = UNIT_FAIL;
tsg = nvgpu_tsg_check_and_get_from_id(g, NVGPU_INVALID_TSG_ID); tsg = nvgpu_tsg_check_and_get_from_id(g, NVGPU_INVALID_TSG_ID);
assert(tsg == NULL); unit_assert(tsg == NULL, goto done);
tsg = nvgpu_tsg_open(g, getpid()); tsg = nvgpu_tsg_open(g, getpid());
assert(tsg != NULL); unit_assert(tsg != NULL, goto done);
assert(nvgpu_tsg_check_and_get_from_id(g, tsg->tsgid) == tsg); unit_assert(nvgpu_tsg_check_and_get_from_id(g, tsg->tsgid) == tsg,
goto done);
nvgpu_ref_put(&tsg->refcount, nvgpu_tsg_release); nvgpu_ref_put(&tsg->refcount, nvgpu_tsg_release);
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
@@ -1092,13 +1102,13 @@ int test_tsg_abort(struct unit_module *m, struct gk20a *g, void *args)
int err; int err;
tsgA = nvgpu_tsg_open(g, getpid()); tsgA = nvgpu_tsg_open(g, getpid());
assert(tsgA != NULL); unit_assert(tsgA != NULL, goto done);
tsgB = nvgpu_tsg_open(g, getpid()); tsgB = nvgpu_tsg_open(g, getpid());
assert(tsgB != NULL); unit_assert(tsgB != NULL, goto done);
chA = nvgpu_channel_open_new(g, ~0U, false, getpid(), getpid()); chA = nvgpu_channel_open_new(g, ~0U, false, getpid(), getpid());
assert(chA != NULL); unit_assert(chA != NULL, goto done);
for (branches = 0U; branches < F_TSG_ABORT_LAST; branches++) { for (branches = 0U; branches < F_TSG_ABORT_LAST; branches++) {
@@ -1134,21 +1144,24 @@ int test_tsg_abort(struct unit_module *m, struct gk20a *g, void *args)
if (chA->tsgid == NVGPU_INVALID_TSG_ID) { if (chA->tsgid == NVGPU_INVALID_TSG_ID) {
err = nvgpu_tsg_bind_channel(tsgA, chA); err = nvgpu_tsg_bind_channel(tsgA, chA);
assert(err == 0); unit_assert(err == 0, goto done);
} }
nvgpu_tsg_abort(g, tsg, preempt); nvgpu_tsg_abort(g, tsg, preempt);
assert(preempt == (stub[0].tsgid == tsg->tsgid)); unit_assert(preempt == (stub[0].tsgid == tsg->tsgid),
goto done);
assert(chA->unserviceable == unit_assert(chA->unserviceable ==
((tsg == tsgA) && (chA->referenceable))); ((tsg == tsgA) && (chA->referenceable)), goto done);
if (!((branches & F_TSG_ABORT_CH_ABORT_CLEANUP_NULL) || if (!((branches & F_TSG_ABORT_CH_ABORT_CLEANUP_NULL) ||
(branches & F_TSG_ABORT_CH_NON_REFERENCABLE))) { (branches & F_TSG_ABORT_CH_NON_REFERENCABLE))) {
assert((stub[1].chid == chA->chid) == (tsg == tsgA) ); unit_assert((stub[1].chid == chA->chid) ==
assert((stub[1].chid == NVGPU_INVALID_CHANNEL_ID) == (tsg == tsgA), goto done);
(tsg == tsgB)); unit_assert((stub[1].chid ==
NVGPU_INVALID_CHANNEL_ID) == (tsg == tsgB),
goto done);
} }
tsg->abortable = true; tsg->abortable = true;
@@ -1214,9 +1227,9 @@ int test_tsg_setup_sw(struct unit_module *m,
err = nvgpu_tsg_setup_sw(g); err = nvgpu_tsg_setup_sw(g);
if (branches & fail) { if (branches & fail) {
assert(err != 0); unit_assert(err != 0, goto done);
} else { } else {
assert(err == 0); unit_assert(err == 0, goto done);
nvgpu_tsg_cleanup_sw(g); nvgpu_tsg_cleanup_sw(g);
} }
} }
@@ -1269,10 +1282,10 @@ int test_tsg_mark_error(struct unit_module *m,
branches_str(branches, labels)); branches_str(branches, labels));
tsg = nvgpu_tsg_open(g, getpid()); tsg = nvgpu_tsg_open(g, getpid());
assert(tsg != NULL); unit_assert(tsg != NULL, goto done);
ch = nvgpu_channel_open_new(g, ~0U, false, getpid(), getpid()); ch = nvgpu_channel_open_new(g, ~0U, false, getpid(), getpid());
assert(ch != NULL); unit_assert(ch != NULL, goto done);
ch->os_priv = &ch_priv; ch->os_priv = &ch_priv;
ch_priv.err_notifier.error = U32_MAX; ch_priv.err_notifier.error = U32_MAX;
@@ -1280,7 +1293,7 @@ int test_tsg_mark_error(struct unit_module *m,
if ((branches & F_TSG_MARK_ERROR_NO_CHANNEL) == 0) { if ((branches & F_TSG_MARK_ERROR_NO_CHANNEL) == 0) {
err = nvgpu_tsg_bind_channel(tsg, ch); err = nvgpu_tsg_bind_channel(tsg, ch);
assert(err == 0); unit_assert(err == 0, goto done);
} }
if (branches & F_TSG_MARK_ERROR_NON_REFERENCABLE) { if (branches & F_TSG_MARK_ERROR_NON_REFERENCABLE) {
@@ -1297,13 +1310,13 @@ int test_tsg_mark_error(struct unit_module *m,
if ((branches & F_TSG_MARK_ERROR_NO_CHANNEL) || if ((branches & F_TSG_MARK_ERROR_NO_CHANNEL) ||
(branches & F_TSG_MARK_ERROR_NON_REFERENCABLE)) { (branches & F_TSG_MARK_ERROR_NON_REFERENCABLE)) {
assert(!verbose); unit_assert(!verbose, goto done);
} }
if (branches & F_TSG_MARK_ERROR_VERBOSE) { if (branches & F_TSG_MARK_ERROR_VERBOSE) {
assert(verbose); unit_assert(verbose, goto done);
} else { } else {
assert(!verbose); unit_assert(!verbose, goto done);
} }
nvgpu_channel_close(ch); nvgpu_channel_close(ch);
@@ -1338,13 +1351,13 @@ int test_tsg_set_ctx_mmu_error(struct unit_module *m,
int err; int err;
tsg = nvgpu_tsg_open(g, getpid()); tsg = nvgpu_tsg_open(g, getpid());
assert(tsg != NULL); unit_assert(tsg != NULL, goto done);
ch = nvgpu_channel_open_new(g, ~0U, false, getpid(), getpid()); ch = nvgpu_channel_open_new(g, ~0U, false, getpid(), getpid());
assert(ch != NULL); unit_assert(ch != NULL, goto done);
err = nvgpu_tsg_bind_channel(tsg, ch); err = nvgpu_tsg_bind_channel(tsg, ch);
assert(err == 0); unit_assert(err == 0, goto done);
ch->os_priv = &ch_priv; ch->os_priv = &ch_priv;
ch_priv.err_notifier.error = U32_MAX; ch_priv.err_notifier.error = U32_MAX;
@@ -1352,8 +1365,9 @@ int test_tsg_set_ctx_mmu_error(struct unit_module *m,
nvgpu_tsg_set_ctx_mmu_error(g, tsg); nvgpu_tsg_set_ctx_mmu_error(g, tsg);
assert(ch_priv.err_notifier.error == NVGPU_ERR_NOTIFIER_FIFO_ERROR_MMU_ERR_FLT); unit_assert(ch_priv.err_notifier.error ==
assert(ch_priv.err_notifier.status != 0); NVGPU_ERR_NOTIFIER_FIFO_ERROR_MMU_ERR_FLT, goto done);
unit_assert(ch_priv.err_notifier.status != 0, goto done);
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
done: done:
@@ -1396,13 +1410,13 @@ int test_tsg_reset_faulted_eng_pbdma(struct unit_module *m,
u32 prune = fail; u32 prune = fail;
tsg = nvgpu_tsg_open(g, getpid()); tsg = nvgpu_tsg_open(g, getpid());
assert(tsg != NULL); unit_assert(tsg != NULL, goto done);
ch = nvgpu_channel_open_new(g, ~0U, false, getpid(), getpid()); ch = nvgpu_channel_open_new(g, ~0U, false, getpid(), getpid());
assert(ch != NULL); unit_assert(ch != NULL, goto done);
err = nvgpu_tsg_bind_channel(tsg, ch); err = nvgpu_tsg_bind_channel(tsg, ch);
assert(err == 0); unit_assert(err == 0, goto done);
for (branches = 0U; branches < F_TSG_MARK_ERROR_LAST; branches++) { for (branches = 0U; branches < F_TSG_MARK_ERROR_LAST; branches++) {
@@ -1426,9 +1440,9 @@ int test_tsg_reset_faulted_eng_pbdma(struct unit_module *m,
} }
if (branches & fail) { if (branches & fail) {
assert(stub[0].chid != ch->chid); unit_assert(stub[0].chid != ch->chid, goto done);
} else { } else {
assert(stub[0].chid == ch->chid); unit_assert(stub[0].chid == ch->chid, goto done);
} }
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -55,7 +55,6 @@
} while (0) } while (0)
#endif #endif
#define assert(cond) unit_assert(cond, goto done)
#define branches_str test_fifo_flags_str #define branches_str test_fifo_flags_str
int test_gk20a_userd_entry_size(struct unit_module *m, int test_gk20a_userd_entry_size(struct unit_module *m,
@@ -64,7 +63,7 @@ int test_gk20a_userd_entry_size(struct unit_module *m,
int ret = UNIT_FAIL; int ret = UNIT_FAIL;
u32 size = gk20a_userd_entry_size(g); u32 size = gk20a_userd_entry_size(g);
assert(size == ram_userd_chan_size_v()); unit_assert(size == ram_userd_chan_size_v(), goto done);
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
done: done:
return ret; return ret;

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -53,7 +53,6 @@
} while (0) } while (0)
#endif #endif
#define assert(cond) unit_assert(cond, goto done)
#define branches_str test_fifo_flags_str #define branches_str test_fifo_flags_str
int test_gv11b_usermode(struct unit_module *m, int test_gv11b_usermode(struct unit_module *m,
@@ -70,21 +69,22 @@ int test_gv11b_usermode(struct unit_module *m,
u32 token; u32 token;
u32 val; u32 val;
assert(base == usermode_cfg0_r()); unit_assert(base == usermode_cfg0_r(), goto done);
assert(bus_base == usermode_cfg0_r()); unit_assert(bus_base == usermode_cfg0_r(), goto done);
ch = nvgpu_channel_open_new(g, runlist_id, ch = nvgpu_channel_open_new(g, runlist_id,
privileged, getpid(), getpid()); privileged, getpid(), getpid());
assert(ch != NULL); unit_assert(ch != NULL, goto done);
hw_chid = f->channel_base + ch->chid; hw_chid = f->channel_base + ch->chid;
token = gv11b_usermode_doorbell_token(ch); token = gv11b_usermode_doorbell_token(ch);
assert(token == usermode_notify_channel_pending_id_f(hw_chid)); unit_assert(token == usermode_notify_channel_pending_id_f(hw_chid),
goto done);
nvgpu_usermode_writel(g, usermode_notify_channel_pending_r(), 0); nvgpu_usermode_writel(g, usermode_notify_channel_pending_r(), 0);
gv11b_usermode_ring_doorbell(ch); gv11b_usermode_ring_doorbell(ch);
val = nvgpu_readl(g, usermode_notify_channel_pending_r()); val = nvgpu_readl(g, usermode_notify_channel_pending_r());
assert(val == token); unit_assert(val == token, goto done);
ret = UNIT_SUCCESS; ret = UNIT_SUCCESS;
done: done: