gpu: nvgpu: unit: fifo: move assert to unit_assert

unit_assert macro is provided to check a condition and execute bail_out
action given as a second argument.
Currently, in fifo unit, unit_assert() is redefined as assert with
common bail_out action. However, name assert() creates confusion with
linux assert macro. So, this patch removes redefined assert macro and
replaces with unit_assert.

Jira NVGPU-4684

Change-Id: I3a880f965a191f16efdabced5e23723e66ecaf3c
Signed-off-by: Vedashree Vidwans <vvidwans@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2276863
GVS: Gerrit_Virtual_Submit
Reviewed-by: Alex Waterman <alexw@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Vedashree Vidwans
2020-01-09 14:46:19 -08:00
committed by Alex Waterman
parent 4a287f08cd
commit 652cff2cd0
32 changed files with 795 additions and 710 deletions

View File

@@ -60,14 +60,13 @@
} while (0)
#endif
#define assert(cond) unit_assert(cond, goto done)
int test_gk20a_runlist_length_max(struct unit_module *m,
struct gk20a *g, void *args)
{
int ret = UNIT_FAIL;
assert(gk20a_runlist_length_max(g) == fifo_eng_runlist_length_max_v());
unit_assert(gk20a_runlist_length_max(g) ==
fifo_eng_runlist_length_max_v(), goto done);
ret = UNIT_SUCCESS;
done:
return ret;
@@ -88,13 +87,15 @@ int test_gk20a_runlist_hw_submit(struct unit_module *m,
gk20a_runlist_hw_submit(g, runlist_id, count, buffer_index);
if (count == 0) {
assert(nvgpu_readl(g, fifo_runlist_base_r()) == 0);
unit_assert(nvgpu_readl(g, fifo_runlist_base_r()) == 0,
goto done);
} else {
assert(nvgpu_readl(g, fifo_runlist_base_r()) != 0);
unit_assert(nvgpu_readl(g, fifo_runlist_base_r()) != 0,
goto done);
}
assert(nvgpu_readl(g, fifo_runlist_r()) ==
unit_assert(nvgpu_readl(g, fifo_runlist_r()) ==
(fifo_runlist_engine_f(runlist_id) |
fifo_eng_runlist_length_f(count)));
fifo_eng_runlist_length_f(count)), goto done);
}
ret = UNIT_SUCCESS;
@@ -168,7 +169,7 @@ int test_gk20a_runlist_wait_pending(struct unit_module *m,
/* nvgpu_timeout_init failure */
nvgpu_posix_enable_fault_injection(timer_fi, true, 0);
err = gk20a_runlist_wait_pending(g, runlist_id);
assert(err == -ETIMEDOUT);
unit_assert(err == -ETIMEDOUT, goto done);
nvgpu_posix_enable_fault_injection(timer_fi, false, 0);
g->poll_timeout_default = 10; /* ms */
@@ -181,22 +182,22 @@ int test_gk20a_runlist_wait_pending(struct unit_module *m,
/* no wait */
ctx->count = 0;
err = gk20a_runlist_wait_pending(g, runlist_id);
assert(err == 0);
unit_assert(err == 0, goto done);
/* 1 loop */
ctx->count = 1;
err = gk20a_runlist_wait_pending(g, runlist_id);
assert(err == 0);
unit_assert(err == 0, goto done);
/* 2 loops */
ctx->count = 2;
err = gk20a_runlist_wait_pending(g, runlist_id);
assert(err == 0);
unit_assert(err == 0, goto done);
/* timeout */
ctx->count = U32_MAX;
err = gk20a_runlist_wait_pending(g, runlist_id);
assert(err == -ETIMEDOUT);
unit_assert(err == -ETIMEDOUT, goto done);
ret = UNIT_SUCCESS;
@@ -218,11 +219,13 @@ int test_gk20a_runlist_write_state(struct unit_module *m,
for (mask = 0; mask < 4; mask++) {
nvgpu_writel(g, fifo_sched_disable_r(), v);
gk20a_runlist_write_state(g, mask, RUNLIST_DISABLED);
assert(nvgpu_readl(g, fifo_sched_disable_r()) == (v | mask));
unit_assert(nvgpu_readl(g, fifo_sched_disable_r()) == (v | mask),
goto done);
nvgpu_writel(g, fifo_sched_disable_r(), v);
gk20a_runlist_write_state(g, mask, RUNLIST_ENABLED);
assert(nvgpu_readl(g, fifo_sched_disable_r()) == (v & ~mask));
unit_assert(nvgpu_readl(g, fifo_sched_disable_r()) == (v & ~mask),
goto done);
}
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -60,14 +60,13 @@
} while (0)
#endif
#define assert(cond) unit_assert(cond, goto done)
int test_gv11b_runlist_entry_size(struct unit_module *m,
struct gk20a *g, void *args)
{
int ret = UNIT_FAIL;
assert(gv11b_runlist_entry_size(g) == ram_rl_entry_size_v());
unit_assert(gv11b_runlist_entry_size(g) == ram_rl_entry_size_v(),
goto done);
ret = UNIT_SUCCESS;
done:
return ret;
@@ -85,27 +84,35 @@ int test_gv11b_runlist_get_tsg_entry(struct unit_module *m,
u32 runlist[4];
tsg = nvgpu_tsg_open(g, getpid());
assert(tsg != NULL);
unit_assert(tsg != NULL, goto done);
/* no scaling */
timeslice = RL_MAX_TIMESLICE_TIMEOUT / 2;
gv11b_runlist_get_tsg_entry(tsg, runlist, timeslice);
assert(ram_rl_entry_tsg_timeslice_timeout_v(runlist[0]) == timeslice);
assert(ram_rl_entry_tsg_timeslice_scale_v(runlist[0]) == 0U);
assert(runlist[1] == ram_rl_entry_tsg_length_f(tsg->num_active_channels));
assert(runlist[2] == ram_rl_entry_tsg_tsgid_f(tsg->tsgid));
unit_assert(ram_rl_entry_tsg_timeslice_timeout_v(runlist[0]) ==
timeslice, goto done);
unit_assert(ram_rl_entry_tsg_timeslice_scale_v(runlist[0]) == 0U,
goto done);
unit_assert(runlist[1] == ram_rl_entry_tsg_length_f(
tsg->num_active_channels), goto done);
unit_assert(runlist[2] == ram_rl_entry_tsg_tsgid_f(tsg->tsgid),
goto done);
/* scaling */
timeslice = RL_MAX_TIMESLICE_TIMEOUT + 1;
gv11b_runlist_get_tsg_entry(tsg, runlist, timeslice);
assert(ram_rl_entry_tsg_timeslice_timeout_v(runlist[0]) == (timeslice >> 1U));
assert(ram_rl_entry_tsg_timeslice_scale_v(runlist[0]) == 1U);
unit_assert(ram_rl_entry_tsg_timeslice_timeout_v(runlist[0]) ==
(timeslice >> 1U), goto done);
unit_assert(ram_rl_entry_tsg_timeslice_scale_v(runlist[0]) == 1U,
goto done);
/* oversize */
timeslice = U32_MAX;
gv11b_runlist_get_tsg_entry(tsg, runlist, timeslice);
assert(ram_rl_entry_tsg_timeslice_timeout_v(runlist[0]) == RL_MAX_TIMESLICE_TIMEOUT);
assert(ram_rl_entry_tsg_timeslice_scale_v(runlist[0]) == RL_MAX_TIMESLICE_SCALE);
unit_assert(ram_rl_entry_tsg_timeslice_timeout_v(runlist[0]) ==
RL_MAX_TIMESLICE_TIMEOUT, goto done);
unit_assert(ram_rl_entry_tsg_timeslice_scale_v(runlist[0]) ==
RL_MAX_TIMESLICE_SCALE, goto done);
ret = UNIT_SUCCESS;
@@ -126,16 +133,17 @@ int test_gv11b_runlist_get_ch_entry(struct unit_module *m,
ch = nvgpu_channel_open_new(g, NVGPU_INVALID_RUNLIST_ID,
false, getpid(), getpid());
assert(ch);
unit_assert(ch, goto done);
ch->userd_mem = &mem;
mem.aperture = APERTURE_SYSMEM;
ch->userd_iova = 0x1000beef;
gv11b_runlist_get_ch_entry(ch, runlist);
assert(runlist[1] == u64_hi32(ch->userd_iova));
assert(ram_rl_entry_chid_f(runlist[2]) == ch->chid);
assert(runlist[3] == u64_hi32(nvgpu_inst_block_addr(g, &ch->inst_block)));
unit_assert(runlist[1] == u64_hi32(ch->userd_iova), goto done);
unit_assert(ram_rl_entry_chid_f(runlist[2]) == ch->chid, goto done);
unit_assert(runlist[3] == u64_hi32(nvgpu_inst_block_addr(g,
&ch->inst_block)), goto done);
ch->userd_mem = NULL;

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -56,8 +56,6 @@
} while (0)
#endif
#define assert(cond) unit_assert(cond, goto done)
struct runlist_unit_ctx {
u32 branches;
};
@@ -364,13 +362,13 @@ int test_tsg_format_gen(struct unit_module *m, struct gk20a *g, void *args)
test_args->level, get_log2(branches)-1, rl_data,
test_args->expect_header,
test_args->expect_channel);
assert(err != 0);
unit_assert(err != 0, goto done);
} else {
err = run_format_test(m, f, &tsgs[0], chs,
test_args->level, test_args->channels, rl_data,
test_args->expect_header,
test_args->expect_channel);
assert(err == 0);
unit_assert(err == 0, goto done);
}
}
@@ -645,9 +643,9 @@ int test_interleave_dual(struct unit_module *m, struct gk20a *g, void *args)
dual_args->expected, dual_args->n_expected);
if (branches & fail) {
assert(err != UNIT_SUCCESS);
unit_assert(err != UNIT_SUCCESS, goto done);
} else {
assert(err == UNIT_SUCCESS);
unit_assert(err == UNIT_SUCCESS, goto done);
}
}
@@ -781,9 +779,9 @@ int test_runlist_interleave_level_name(struct unit_module *m,
interleave_level_name =
nvgpu_runlist_interleave_level_name(get_log2(branches));
assert(strcmp(interleave_level_name,
unit_assert(strcmp(interleave_level_name,
f_runlist_interleave_level_name[
get_log2(branches)]) == 0);
get_log2(branches)]) == 0, goto done);
}
ret = UNIT_SUCCESS;
@@ -833,10 +831,10 @@ int test_runlist_set_state(struct unit_module *m, struct gk20a *g, void *args)
if (branches & F_RUNLIST_SET_STATE_DISABLED) {
nvgpu_runlist_set_state(g, 0U, RUNLIST_DISABLED);
assert(stub[0].count == 0U);
unit_assert(stub[0].count == 0U, goto done);
} else {
nvgpu_runlist_set_state(g, 1U, RUNLIST_ENABLED);
assert(stub[0].count == 1U);
unit_assert(stub[0].count == 1U, goto done);
}
}
@@ -864,7 +862,7 @@ int test_runlist_lock_unlock_active_runlists(struct unit_module *m,
u32 prune = fail;
err = nvgpu_runlist_setup_sw(g);
assert(err == 0);
unit_assert(err == 0, goto done);
for (branches = 0U;
branches < F_RUNLIST_LOCK_UNLOCK_ACTIVE_RUNLISTS_LAST;
@@ -923,7 +921,7 @@ int test_runlist_get_mask(struct unit_module *m, struct gk20a *g, void *args)
u32 prune = fail;
err = nvgpu_runlist_setup_sw(g);
assert(err == 0);
unit_assert(err == 0, goto done);
for (branches = 0U; branches < F_RUNLIST_GET_MASK_LAST; branches++) {
@@ -955,9 +953,9 @@ int test_runlist_get_mask(struct unit_module *m, struct gk20a *g, void *args)
}
if (branches == 0U) {
assert(ret_mask == 3U);
unit_assert(ret_mask == 3U, goto done);
} else {
assert(ret_mask == 1U);
unit_assert(ret_mask == 1U, goto done);
}
ret = UNIT_SUCCESS;
@@ -1039,9 +1037,9 @@ int test_runlist_setup_sw(struct unit_module *m, struct gk20a *g, void *args)
nvgpu_posix_enable_fault_injection(dma_fi, false, 0);
if (branches & fail) {
assert(err != 0);
unit_assert(err != 0, goto done);
} else {
assert(err == 0);
unit_assert(err == 0, goto done);
nvgpu_runlist_cleanup_sw(g);
}
}
@@ -1114,7 +1112,7 @@ int test_runlist_reload_ids(struct unit_module *m, struct gk20a *g, void *args)
g->ops.runlist.hw_submit = stub_runlist_hw_submit;
err = nvgpu_runlist_setup_sw(g);
assert(err == 0);
unit_assert(err == 0, goto done);
for (branches = 1U; branches < F_RUNLIST_RELOAD_IDS_LAST;
branches++) {
@@ -1150,9 +1148,9 @@ int test_runlist_reload_ids(struct unit_module *m, struct gk20a *g, void *args)
}
if (branches & fail) {
assert(err != 0);
unit_assert(err != 0, goto done);
} else {
assert(err == 0);
unit_assert(err == 0, goto done);
}
}
@@ -1246,7 +1244,7 @@ int test_runlist_update_locked(struct unit_module *m, struct gk20a *g,
if (branches & F_RUNLIST_UPDATE_ADD_AGAIN) {
err = nvgpu_runlist_update_locked(g,
0U, ch, true, false);
assert(err == 0);
unit_assert(err == 0, goto done);
add = true;
}
@@ -1274,11 +1272,11 @@ int test_runlist_update_locked(struct unit_module *m, struct gk20a *g,
err = nvgpu_runlist_update_locked(g,
0U, chA, true, false);
assert(err == 0);
unit_assert(err == 0, goto done);
err = nvgpu_runlist_update_locked(g,
0U, chA, false, false);
assert(err == 0);
unit_assert(err == 0, goto done);
err = nvgpu_tsg_unbind_channel(tsg, chA);
if (err != 0) {
@@ -1298,9 +1296,9 @@ int test_runlist_update_locked(struct unit_module *m, struct gk20a *g,
}
if (branches & fail) {
assert(err != 0);
unit_assert(err != 0, goto done);
} else {
assert(err == 0);
unit_assert(err == 0, goto done);
}
ch->tsgid = ch_tsgid_orig;
}