mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-23 18:16:01 +03:00
userspace: Prune unit tests for new runlist code
Remove and prune the now broken tests related to the runlist updates. JIRA NVGPU-6425 Change-Id: I76e03c943ceae261e35958aa64717b5590a19c0e Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2474334 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: Vedashree Vidwans <vvidwans@nvidia.com> Reviewed-by: Shashank Singh <shashsingh@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com> GVS: Gerrit_Virtual_Submit
This commit is contained in:
committed by
mobile promotions
parent
77c0b9ffdc
commit
d925e33e8b
@@ -672,7 +672,7 @@ nvgpu_runlist_set_state
|
|||||||
nvgpu_runlist_setup_sw
|
nvgpu_runlist_setup_sw
|
||||||
nvgpu_runlist_unlock_active_runlists
|
nvgpu_runlist_unlock_active_runlists
|
||||||
nvgpu_runlist_unlock_runlists
|
nvgpu_runlist_unlock_runlists
|
||||||
nvgpu_runlist_update_for_channel
|
nvgpu_runlist_update
|
||||||
nvgpu_runlist_update_locked
|
nvgpu_runlist_update_locked
|
||||||
nvgpu_rwsem_init
|
nvgpu_rwsem_init
|
||||||
nvgpu_rwsem_down_read
|
nvgpu_rwsem_down_read
|
||||||
|
|||||||
@@ -687,7 +687,7 @@ nvgpu_runlist_set_state
|
|||||||
nvgpu_runlist_setup_sw
|
nvgpu_runlist_setup_sw
|
||||||
nvgpu_runlist_unlock_active_runlists
|
nvgpu_runlist_unlock_active_runlists
|
||||||
nvgpu_runlist_unlock_runlists
|
nvgpu_runlist_unlock_runlists
|
||||||
nvgpu_runlist_update_for_channel
|
nvgpu_runlist_update
|
||||||
nvgpu_runlist_update_locked
|
nvgpu_runlist_update_locked
|
||||||
nvgpu_rwsem_init
|
nvgpu_rwsem_init
|
||||||
nvgpu_rwsem_down_read
|
nvgpu_rwsem_down_read
|
||||||
|
|||||||
@@ -730,7 +730,7 @@ test_preempt_poll_tsg_on_pbdma.preempt_poll=0
|
|||||||
[nvgpu_preempt_gv11b]
|
[nvgpu_preempt_gv11b]
|
||||||
test_fifo_init_support.init_support=0
|
test_fifo_init_support.init_support=0
|
||||||
test_fifo_remove_support.remove_support=0
|
test_fifo_remove_support.remove_support=0
|
||||||
test_gv11b_fifo_is_preempt_pending.is_preempt_pending=0
|
test_gv11b_fifo_is_preempt_pending.is_preempt_pending=2
|
||||||
test_gv11b_fifo_preempt_channel.preempt_channel=0
|
test_gv11b_fifo_preempt_channel.preempt_channel=0
|
||||||
test_gv11b_fifo_preempt_runlists_for_rc.preempt_runlists_for_rc=0
|
test_gv11b_fifo_preempt_runlists_for_rc.preempt_runlists_for_rc=0
|
||||||
test_gv11b_fifo_preempt_trigger.preempt_trigger=0
|
test_gv11b_fifo_preempt_trigger.preempt_trigger=0
|
||||||
@@ -743,23 +743,6 @@ test_gp10b_ramfc_commit_userd.commit_userd=0
|
|||||||
test_gv11b_ramfc_capture_ram_dump.capture_ram_dump=0
|
test_gv11b_ramfc_capture_ram_dump.capture_ram_dump=0
|
||||||
test_gv11b_ramfc_setup.ramfc_setup=0
|
test_gv11b_ramfc_setup.ramfc_setup=0
|
||||||
|
|
||||||
[nvgpu_runlist]
|
|
||||||
test_fifo_init_support.init_support=0
|
|
||||||
test_fifo_remove_support.remove_support=0
|
|
||||||
test_flat_gen.flat=0
|
|
||||||
test_interleave_dual.interleave_dual=0
|
|
||||||
test_interleave_single.interleave_single=0
|
|
||||||
test_interleaving_levels.interleave_level=0
|
|
||||||
test_runlist_get_mask.get_mask=0
|
|
||||||
test_runlist_interleave_level_name.interleave_level_name=0
|
|
||||||
test_runlist_lock_unlock_active_runlists.lock_unlock_active_runlists=0
|
|
||||||
test_runlist_reload_ids.reload_ids=0
|
|
||||||
test_runlist_set_state.set_state=0
|
|
||||||
test_runlist_setup_sw.setup_sw=0
|
|
||||||
test_runlist_update_for_channel.update_for_channel=0
|
|
||||||
test_runlist_update_locked.runlist_update=0
|
|
||||||
test_tsg_format_gen.tsg_format_flat=0
|
|
||||||
|
|
||||||
[nvgpu_runlist_gk20a]
|
[nvgpu_runlist_gk20a]
|
||||||
test_fifo_init_support.init_support=0
|
test_fifo_init_support.init_support=0
|
||||||
test_fifo_remove_support.remove_support=0
|
test_fifo_remove_support.remove_support=0
|
||||||
@@ -786,7 +769,7 @@ test_nvgpu_sgt_get_next.sgt_get_next=0
|
|||||||
test_fifo_init_support.init_support=0
|
test_fifo_init_support.init_support=0
|
||||||
test_fifo_remove_support.remove_support=0
|
test_fifo_remove_support.remove_support=0
|
||||||
test_tsg_abort.abort=0
|
test_tsg_abort.abort=0
|
||||||
test_tsg_bind_channel.bind_channel=0
|
test_tsg_bind_channel.bind_channel=2
|
||||||
test_tsg_check_and_get_from_id.get_from_id=0
|
test_tsg_check_and_get_from_id.get_from_id=0
|
||||||
test_tsg_enable.enable_disable=0
|
test_tsg_enable.enable_disable=0
|
||||||
test_tsg_enable_sched.enable_disable_sched=0
|
test_tsg_enable_sched.enable_disable_sched=0
|
||||||
|
|||||||
@@ -677,16 +677,17 @@ static int stub_os_channel_alloc_usermode_buffers_ENOMEM(
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int stub_runlist_update_for_channel(struct gk20a *g, u32 runlist_id,
|
static int stub_runlist_update(struct gk20a *g, struct nvgpu_runlist *rl,
|
||||||
struct nvgpu_channel *ch, bool add, bool wait_for_finish)
|
struct nvgpu_channel *ch, bool add, bool wait_for_finish)
|
||||||
{
|
{
|
||||||
stub[1].chid = ch->chid;
|
stub[1].chid = ch->chid;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int stub_runlist_update_for_channel_ETIMEDOUT(struct gk20a *g,
|
static int stub_runlist_update_ETIMEDOUT(struct gk20a *g,
|
||||||
u32 runlist_id, struct nvgpu_channel *ch, bool add,
|
struct nvgpu_runlist *rl,
|
||||||
bool wait_for_finish)
|
struct nvgpu_channel *ch, bool add,
|
||||||
|
bool wait_for_finish)
|
||||||
{
|
{
|
||||||
return -ETIMEDOUT;
|
return -ETIMEDOUT;
|
||||||
}
|
}
|
||||||
@@ -835,10 +836,10 @@ int test_channel_setup_bind(struct unit_module *m, struct gk20a *g, void *vargs)
|
|||||||
F_CHANNEL_SETUP_BIND_USERMODE_TSGID_INVALID ?
|
F_CHANNEL_SETUP_BIND_USERMODE_TSGID_INVALID ?
|
||||||
NVGPU_INVALID_TSG_ID : tsgid_orig;
|
NVGPU_INVALID_TSG_ID : tsgid_orig;
|
||||||
|
|
||||||
g->ops.runlist.update_for_channel = branches &
|
g->ops.runlist.update = branches &
|
||||||
F_CHANNEL_SETUP_BIND_USERMODE_UPDATE_RL_FAIL ?
|
F_CHANNEL_SETUP_BIND_USERMODE_UPDATE_RL_FAIL ?
|
||||||
stub_runlist_update_for_channel_ETIMEDOUT :
|
stub_runlist_update_ETIMEDOUT :
|
||||||
stub_runlist_update_for_channel;
|
stub_runlist_update;
|
||||||
|
|
||||||
g->ops.ramfc.setup = branches &
|
g->ops.ramfc.setup = branches &
|
||||||
F_CHANNEL_SETUP_BIND_USERMODE_SETUP_RAMFC_FAIL ?
|
F_CHANNEL_SETUP_BIND_USERMODE_SETUP_RAMFC_FAIL ?
|
||||||
@@ -1338,7 +1339,7 @@ int test_channel_deterministic_idle_unidle(struct unit_module *m,
|
|||||||
g->ops.mm.cache.l2_flush = stub_mm_l2_flush; /* bug 2621189 */
|
g->ops.mm.cache.l2_flush = stub_mm_l2_flush; /* bug 2621189 */
|
||||||
g->os_channel.alloc_usermode_buffers =
|
g->os_channel.alloc_usermode_buffers =
|
||||||
stub_os_channel_alloc_usermode_buffers;
|
stub_os_channel_alloc_usermode_buffers;
|
||||||
g->ops.runlist.update_for_channel = stub_runlist_update_for_channel;
|
g->ops.runlist.update = stub_runlist_update;
|
||||||
|
|
||||||
(void)memset(&bind_args, 0, sizeof(bind_args));
|
(void)memset(&bind_args, 0, sizeof(bind_args));
|
||||||
bind_args.num_gpfifo_entries = 32;
|
bind_args.num_gpfifo_entries = 32;
|
||||||
@@ -1701,7 +1702,7 @@ int test_channel_semaphore_wakeup(struct unit_module *m,
|
|||||||
g->ops.mm.cache.l2_flush = stub_mm_l2_flush; /* bug 2621189 */
|
g->ops.mm.cache.l2_flush = stub_mm_l2_flush; /* bug 2621189 */
|
||||||
g->os_channel.alloc_usermode_buffers =
|
g->os_channel.alloc_usermode_buffers =
|
||||||
stub_os_channel_alloc_usermode_buffers;
|
stub_os_channel_alloc_usermode_buffers;
|
||||||
g->ops.runlist.update_for_channel = stub_runlist_update_for_channel;
|
g->ops.runlist.update = stub_runlist_update;
|
||||||
g->ops.mm.cache.fb_flush = stub_mm_fb_flush;
|
g->ops.mm.cache.fb_flush = stub_mm_fb_flush;
|
||||||
|
|
||||||
memset(&bind_args, 0, sizeof(bind_args));
|
memset(&bind_args, 0, sizeof(bind_args));
|
||||||
|
|||||||
@@ -527,7 +527,7 @@ struct unit_module_test nvgpu_preempt_gv11b_tests[] = {
|
|||||||
UNIT_TEST(preempt_runlists_for_rc, test_gv11b_fifo_preempt_runlists_for_rc, NULL, 0),
|
UNIT_TEST(preempt_runlists_for_rc, test_gv11b_fifo_preempt_runlists_for_rc, NULL, 0),
|
||||||
UNIT_TEST(preempt_channel, test_gv11b_fifo_preempt_channel, NULL, 0),
|
UNIT_TEST(preempt_channel, test_gv11b_fifo_preempt_channel, NULL, 0),
|
||||||
UNIT_TEST(preempt_tsg, test_gv11b_fifo_preempt_tsg, NULL, 0),
|
UNIT_TEST(preempt_tsg, test_gv11b_fifo_preempt_tsg, NULL, 0),
|
||||||
UNIT_TEST(is_preempt_pending, test_gv11b_fifo_is_preempt_pending, NULL, 0),
|
UNIT_TEST(is_preempt_pending, test_gv11b_fifo_is_preempt_pending, NULL, 2),
|
||||||
UNIT_TEST(remove_support, test_fifo_remove_support, &unit_ctx, 0),
|
UNIT_TEST(remove_support, test_fifo_remove_support, &unit_ctx, 0),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -341,7 +341,7 @@ int test_tsg_bind_channel(struct unit_module *m,
|
|||||||
/* runlist id mismatch */
|
/* runlist id mismatch */
|
||||||
tsg->runlist_id =
|
tsg->runlist_id =
|
||||||
branches & F_TSG_BIND_CHANNEL_RL_MISMATCH ?
|
branches & F_TSG_BIND_CHANNEL_RL_MISMATCH ?
|
||||||
ch->runlist_id + 1 : tsg_save.runlist_id;
|
0xffffffff : tsg_save.runlist_id;
|
||||||
|
|
||||||
/* ch already already active */
|
/* ch already already active */
|
||||||
runlist = &f->active_runlists[tsg->runlist_id];
|
runlist = &f->active_runlists[tsg->runlist_id];
|
||||||
@@ -443,8 +443,8 @@ static int stub_tsg_unbind_channel(struct nvgpu_tsg *tsg,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int stub_runlist_update_for_channel_EINVAL(
|
static int stub_runlist_update_EINVAL(
|
||||||
struct gk20a *g, u32 runlist_id,
|
struct gk20a *g, struct nvgpu_runlist *rl,
|
||||||
struct nvgpu_channel *ch, bool add, bool wait_for_finish)
|
struct nvgpu_channel *ch, bool add, bool wait_for_finish)
|
||||||
{
|
{
|
||||||
stub[0].count++;
|
stub[0].count++;
|
||||||
@@ -542,15 +542,15 @@ int test_tsg_unbind_channel(struct unit_module *m,
|
|||||||
g->ops.tsg.unbind_channel_check_hw_state = NULL;
|
g->ops.tsg.unbind_channel_check_hw_state = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
g->ops.runlist.update_for_channel =
|
g->ops.runlist.update =
|
||||||
branches & F_TSG_UNBIND_CHANNEL_RUNLIST_UPDATE_FAIL ?
|
branches & F_TSG_UNBIND_CHANNEL_RUNLIST_UPDATE_FAIL ?
|
||||||
stub_runlist_update_for_channel_EINVAL :
|
stub_runlist_update_EINVAL :
|
||||||
gops.runlist.update_for_channel;
|
gops.runlist.update;
|
||||||
|
|
||||||
if (branches & F_TSG_UNBIND_CHANNEL_RUNLIST_UPDATE_FAIL ||
|
if (branches & F_TSG_UNBIND_CHANNEL_RUNLIST_UPDATE_FAIL ||
|
||||||
branches & F_TSG_UNBIND_CHANNEL_ABORT_RUNLIST_UPDATE_FAIL) {
|
branches & F_TSG_UNBIND_CHANNEL_ABORT_RUNLIST_UPDATE_FAIL) {
|
||||||
g->ops.runlist.update_for_channel =
|
g->ops.runlist.update =
|
||||||
stub_runlist_update_for_channel_EINVAL;
|
stub_runlist_update_EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((branches & F_TSG_UNBIND_CHANNEL_UNBIND_HAL) ||
|
if ((branches & F_TSG_UNBIND_CHANNEL_UNBIND_HAL) ||
|
||||||
@@ -1539,7 +1539,7 @@ struct unit_module_test nvgpu_tsg_tests[] = {
|
|||||||
UNIT_TEST(open, test_tsg_open, &unit_ctx, 0),
|
UNIT_TEST(open, test_tsg_open, &unit_ctx, 0),
|
||||||
UNIT_TEST(release, test_tsg_release, &unit_ctx, 0),
|
UNIT_TEST(release, test_tsg_release, &unit_ctx, 0),
|
||||||
UNIT_TEST(get_from_id, test_tsg_check_and_get_from_id, &unit_ctx, 0),
|
UNIT_TEST(get_from_id, test_tsg_check_and_get_from_id, &unit_ctx, 0),
|
||||||
UNIT_TEST(bind_channel, test_tsg_bind_channel, &unit_ctx, 0),
|
UNIT_TEST(bind_channel, test_tsg_bind_channel, &unit_ctx, 2),
|
||||||
UNIT_TEST(unbind_channel, test_tsg_unbind_channel, &unit_ctx, 0),
|
UNIT_TEST(unbind_channel, test_tsg_unbind_channel, &unit_ctx, 0),
|
||||||
UNIT_TEST(unbind_channel_check_hw_state,
|
UNIT_TEST(unbind_channel_check_hw_state,
|
||||||
test_tsg_unbind_channel_check_hw_state, &unit_ctx, 0),
|
test_tsg_unbind_channel_check_hw_state, &unit_ctx, 0),
|
||||||
|
|||||||
@@ -148,7 +148,7 @@ static u32 stub_channel_count(struct gk20a *g)
|
|||||||
return 4;
|
return 4;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int stub_runlist_update_for_channel(struct gk20a *g, u32 runlist_id,
|
static int stub_runlist_update(struct gk20a *g, struct nvgpu_runlist *rl,
|
||||||
struct nvgpu_channel *ch, bool add, bool wait_for_finish)
|
struct nvgpu_channel *ch, bool add, bool wait_for_finish)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
@@ -345,7 +345,7 @@ int test_gr_intr_setup_channel(struct unit_module *m,
|
|||||||
tsgid);
|
tsgid);
|
||||||
|
|
||||||
g->ops.channel.count = stub_channel_count;
|
g->ops.channel.count = stub_channel_count;
|
||||||
g->ops.runlist.update_for_channel = stub_runlist_update_for_channel;
|
g->ops.runlist.update = stub_runlist_update;
|
||||||
if (f != NULL) {
|
if (f != NULL) {
|
||||||
f->g = g;
|
f->g = g;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -70,9 +70,10 @@ static u32 stub_channel_count(struct gk20a *g)
|
|||||||
return 4;
|
return 4;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int stub_runlist_update_for_channel(struct gk20a *g, u32 runlist_id,
|
static int stub_runlist_update(struct gk20a *g,
|
||||||
struct nvgpu_channel *ch,
|
struct nvgpu_runlist *rl,
|
||||||
bool add, bool wait_for_finish)
|
struct nvgpu_channel *ch,
|
||||||
|
bool add, bool wait_for_finish)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -642,7 +643,7 @@ int test_gr_setup_alloc_obj_ctx(struct unit_module *m,
|
|||||||
tsgid);
|
tsgid);
|
||||||
|
|
||||||
g->ops.channel.count = stub_channel_count;
|
g->ops.channel.count = stub_channel_count;
|
||||||
g->ops.runlist.update_for_channel = stub_runlist_update_for_channel;
|
g->ops.runlist.update = stub_runlist_update;
|
||||||
|
|
||||||
/* Save valid gops */
|
/* Save valid gops */
|
||||||
gr_setup_save_valid_ops(g);
|
gr_setup_save_valid_ops(g);
|
||||||
|
|||||||
@@ -522,9 +522,10 @@ static u32 stub_top_get_num_lce(struct gk20a *g)
|
|||||||
return ret_num_lce;
|
return ret_num_lce;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int stub_runlist_update_for_channel(struct gk20a *g, u32 runlist_id,
|
static int stub_runlist_update(struct gk20a *g,
|
||||||
struct nvgpu_channel *ch,
|
struct nvgpu_runlist *rl,
|
||||||
bool add, bool wait_for_finish)
|
struct nvgpu_channel *ch,
|
||||||
|
bool add, bool wait_for_finish)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -644,8 +645,7 @@ int test_handle_mmu_fault_common(struct unit_module *m,
|
|||||||
} else if (branch & F_TSG_VALID) {
|
} else if (branch & F_TSG_VALID) {
|
||||||
/* Init TSG and chB */
|
/* Init TSG and chB */
|
||||||
g->ops.gr.init.get_no_of_sm = stub_gr_init_get_no_of_sm;
|
g->ops.gr.init.get_no_of_sm = stub_gr_init_get_no_of_sm;
|
||||||
g->ops.runlist.update_for_channel =
|
g->ops.runlist.update = stub_runlist_update;
|
||||||
stub_runlist_update_for_channel;
|
|
||||||
g->ops.tsg.default_timeslice_us =
|
g->ops.tsg.default_timeslice_us =
|
||||||
nvgpu_tsg_default_timeslice_us;
|
nvgpu_tsg_default_timeslice_us;
|
||||||
g->ops.channel.alloc_inst = nvgpu_channel_alloc_inst;
|
g->ops.channel.alloc_inst = nvgpu_channel_alloc_inst;
|
||||||
|
|||||||
Reference in New Issue
Block a user