mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 17:36:20 +03:00
gpu: nvgpu: remove timeout fault injection tests
The timeout init API is changing to return void in most cases. Adapt the unit tests to the reduced branching. Change-Id: I4d05484529fe4ef46b518f41d10b71a4a9f9c6fb Signed-off-by: Konsta Hölttä <kholtta@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2614286 Reviewed-by: svcacv <svcacv@nvidia.com> Reviewed-by: Alex Waterman <alexw@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> GVS: Gerrit_Virtual_Submit Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
99a664bda0
commit
9b3f3ea4be
@@ -225,8 +225,6 @@ int test_bar_bind(struct unit_module *m, struct gk20a *g, void *args)
|
|||||||
{
|
{
|
||||||
int ret = UNIT_FAIL;
|
int ret = UNIT_FAIL;
|
||||||
struct nvgpu_mem bar_inst = {0};
|
struct nvgpu_mem bar_inst = {0};
|
||||||
struct nvgpu_posix_fault_inj *timer_fi =
|
|
||||||
nvgpu_timers_get_fault_injection();
|
|
||||||
|
|
||||||
/* Initialize cpu_va to a known value */
|
/* Initialize cpu_va to a known value */
|
||||||
bar_inst.cpu_va = (void *) 0xCE418000U;
|
bar_inst.cpu_va = (void *) 0xCE418000U;
|
||||||
@@ -281,15 +279,6 @@ int test_bar_bind(struct unit_module *m, struct gk20a *g, void *args)
|
|||||||
ret = UNIT_FAIL;
|
ret = UNIT_FAIL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Enable fault injection for the timer init call for branch coverage */
|
|
||||||
nvgpu_posix_enable_fault_injection(timer_fi, true, 0);
|
|
||||||
ret = g->ops.bus.bar1_bind(g, &bar_inst);
|
|
||||||
if (ret == 0U) {
|
|
||||||
unit_err(m, "Error injection for timeout init failed.\n");
|
|
||||||
ret = UNIT_FAIL;
|
|
||||||
}
|
|
||||||
nvgpu_posix_enable_fault_injection(timer_fi, false, 0);
|
|
||||||
|
|
||||||
bar_inst.cpu_va = (void *) 0x2670C000U;
|
bar_inst.cpu_va = (void *) 0x2670C000U;
|
||||||
read_bind_status_reg = 0U;
|
read_bind_status_reg = 0U;
|
||||||
ret = g->ops.bus.bar2_bind(g, &bar_inst);
|
ret = g->ops.bus.bar2_bind(g, &bar_inst);
|
||||||
@@ -319,15 +308,6 @@ int test_bar_bind(struct unit_module *m, struct gk20a *g, void *args)
|
|||||||
ret = UNIT_FAIL;
|
ret = UNIT_FAIL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Enable fault injection for the timer init call for branch coverage */
|
|
||||||
nvgpu_posix_enable_fault_injection(timer_fi, true, 0);
|
|
||||||
ret = g->ops.bus.bar2_bind(g, &bar_inst);
|
|
||||||
if (ret == 0U) {
|
|
||||||
unit_err(m, "Error injection for timeout init failed.\n");
|
|
||||||
ret = UNIT_FAIL;
|
|
||||||
}
|
|
||||||
nvgpu_posix_enable_fault_injection(timer_fi, false, 0);
|
|
||||||
|
|
||||||
ret = UNIT_SUCCESS;
|
ret = UNIT_SUCCESS;
|
||||||
done:
|
done:
|
||||||
return ret;
|
return ret;
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -141,7 +141,6 @@ int test_init_hw(struct unit_module *m, struct gk20a *g, void *args);
|
|||||||
* - Call bus.bar1_bind HAL again and except ret != 0 as the bind status
|
* - Call bus.bar1_bind HAL again and except ret != 0 as the bind status
|
||||||
* will remain pending during this call.
|
* will remain pending during this call.
|
||||||
* - The HAL should return error this time as timeout is expected to expire.
|
* - The HAL should return error this time as timeout is expected to expire.
|
||||||
* - Enable fault injection for the timer init call for branch coverage.
|
|
||||||
* - Repeat the above steps for BAR2 but with different cpu_va = 0x2670C000U.
|
* - Repeat the above steps for BAR2 but with different cpu_va = 0x2670C000U.
|
||||||
*
|
*
|
||||||
* Output:
|
* Output:
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -510,8 +510,6 @@ int test_falcon_reset(struct unit_module *m, struct gk20a *g, void *__args)
|
|||||||
*/
|
*/
|
||||||
int test_falcon_mem_scrub(struct unit_module *m, struct gk20a *g, void *__args)
|
int test_falcon_mem_scrub(struct unit_module *m, struct gk20a *g, void *__args)
|
||||||
{
|
{
|
||||||
struct nvgpu_posix_fault_inj *timer_fi =
|
|
||||||
nvgpu_timers_get_fault_injection();
|
|
||||||
struct {
|
struct {
|
||||||
struct nvgpu_falcon *flcn;
|
struct nvgpu_falcon *flcn;
|
||||||
void (*pre_scrub)(void *);
|
void (*pre_scrub)(void *);
|
||||||
@@ -535,16 +533,6 @@ int test_falcon_mem_scrub(struct unit_module *m, struct gk20a *g, void *__args)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* enable fault injection for the timer init call for branch coverage */
|
|
||||||
nvgpu_posix_enable_fault_injection(timer_fi, true, 0);
|
|
||||||
err = nvgpu_falcon_mem_scrub_wait(gpccs_flcn);
|
|
||||||
nvgpu_posix_enable_fault_injection(timer_fi, false, 0);
|
|
||||||
|
|
||||||
if (err != -ETIMEDOUT) {
|
|
||||||
unit_return_fail(m, "falcon mem scrub err: %d "
|
|
||||||
"expected err: -ETIMEDOUT\n", err);
|
|
||||||
}
|
|
||||||
|
|
||||||
return UNIT_SUCCESS;
|
return UNIT_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -605,8 +593,6 @@ static void flcn_idle_fail(void *data)
|
|||||||
*/
|
*/
|
||||||
int test_falcon_idle(struct unit_module *m, struct gk20a *g, void *__args)
|
int test_falcon_idle(struct unit_module *m, struct gk20a *g, void *__args)
|
||||||
{
|
{
|
||||||
struct nvgpu_posix_fault_inj *timer_fi =
|
|
||||||
nvgpu_timers_get_fault_injection();
|
|
||||||
struct {
|
struct {
|
||||||
struct nvgpu_falcon *flcn;
|
struct nvgpu_falcon *flcn;
|
||||||
void (*pre_idle)(void *);
|
void (*pre_idle)(void *);
|
||||||
@@ -631,16 +617,6 @@ int test_falcon_idle(struct unit_module *m, struct gk20a *g, void *__args)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* enable fault injection for the timer init call for branch coverage */
|
|
||||||
nvgpu_posix_enable_fault_injection(timer_fi, true, 0);
|
|
||||||
err = nvgpu_falcon_wait_idle(gpccs_flcn);
|
|
||||||
nvgpu_posix_enable_fault_injection(timer_fi, false, 0);
|
|
||||||
|
|
||||||
if (err != -ETIMEDOUT) {
|
|
||||||
unit_return_fail(m, "falcon wait for idle err: %d "
|
|
||||||
"expected err: -ETIMEDOUT\n", err);
|
|
||||||
}
|
|
||||||
|
|
||||||
return UNIT_SUCCESS;
|
return UNIT_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -679,8 +655,6 @@ static void flcn_halt_fail(void *data)
|
|||||||
int test_falcon_halt(struct unit_module *m, struct gk20a *g, void *__args)
|
int test_falcon_halt(struct unit_module *m, struct gk20a *g, void *__args)
|
||||||
{
|
{
|
||||||
#define FALCON_WAIT_HALT 200
|
#define FALCON_WAIT_HALT 200
|
||||||
struct nvgpu_posix_fault_inj *timer_fi =
|
|
||||||
nvgpu_timers_get_fault_injection();
|
|
||||||
struct {
|
struct {
|
||||||
struct nvgpu_falcon *flcn;
|
struct nvgpu_falcon *flcn;
|
||||||
void (*pre_halt)(void *);
|
void (*pre_halt)(void *);
|
||||||
@@ -705,17 +679,6 @@ int test_falcon_halt(struct unit_module *m, struct gk20a *g, void *__args)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* enable fault injection for the timer init call for branch coverage */
|
|
||||||
nvgpu_posix_enable_fault_injection(timer_fi, true, 0);
|
|
||||||
err = nvgpu_falcon_wait_for_halt(gpccs_flcn,
|
|
||||||
FALCON_WAIT_HALT);
|
|
||||||
nvgpu_posix_enable_fault_injection(timer_fi, false, 0);
|
|
||||||
|
|
||||||
if (err != -ETIMEDOUT) {
|
|
||||||
unit_return_fail(m, "falcon wait for halt err: %d "
|
|
||||||
"expected err: -ETIMEDOUT\n", err);
|
|
||||||
}
|
|
||||||
|
|
||||||
return UNIT_SUCCESS;
|
return UNIT_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -154,7 +154,7 @@ int test_falcon_mem_scrub(struct unit_module *m, struct gk20a *g, void *__args);
|
|||||||
* Description: The falcon unit shall be able to check and return the falcon
|
* Description: The falcon unit shall be able to check and return the falcon
|
||||||
* idle status.
|
* idle status.
|
||||||
*
|
*
|
||||||
* Test Type: Feature, Error guessing, Error injection
|
* Test Type: Feature, Error guessing
|
||||||
*
|
*
|
||||||
* Input: None.
|
* Input: None.
|
||||||
*
|
*
|
||||||
@@ -173,8 +173,6 @@ int test_falcon_mem_scrub(struct unit_module *m, struct gk20a *g, void *__args);
|
|||||||
* - Invoke nvgpu_falcon_wait_idle with initialized falcon struct where
|
* - Invoke nvgpu_falcon_wait_idle with initialized falcon struct where
|
||||||
* underlying falcon is not idle.
|
* underlying falcon is not idle.
|
||||||
* - Verify that wait fails with -ETIMEDOUT return value.
|
* - Verify that wait fails with -ETIMEDOUT return value.
|
||||||
* - Enable fault injection for the timer init call for branch coverage.
|
|
||||||
* - Verify that wait fails with -ETIMEDOUT return value.
|
|
||||||
*
|
*
|
||||||
* Output: Returns PASS if the steps above were executed successfully. FAIL
|
* Output: Returns PASS if the steps above were executed successfully. FAIL
|
||||||
* otherwise.
|
* otherwise.
|
||||||
@@ -187,7 +185,7 @@ int test_falcon_idle(struct unit_module *m, struct gk20a *g, void *__args);
|
|||||||
* Description: The falcon unit shall be able to check and return the falcon
|
* Description: The falcon unit shall be able to check and return the falcon
|
||||||
* halt status.
|
* halt status.
|
||||||
*
|
*
|
||||||
* Test Type: Feature, Error guessing, Error injection
|
* Test Type: Feature, Error guessing
|
||||||
*
|
*
|
||||||
* Targets: nvgpu_falcon_wait_for_halt, gops_falcon.is_falcon_cpu_halted,
|
* Targets: nvgpu_falcon_wait_for_halt, gops_falcon.is_falcon_cpu_halted,
|
||||||
* gk20a_is_falcon_cpu_halted
|
* gk20a_is_falcon_cpu_halted
|
||||||
@@ -203,8 +201,6 @@ int test_falcon_idle(struct unit_module *m, struct gk20a *g, void *__args);
|
|||||||
* - Invoke nvgpu_falcon_wait_for_halt with initialized falcon struct where
|
* - Invoke nvgpu_falcon_wait_for_halt with initialized falcon struct where
|
||||||
* underlying falcon is not halted.
|
* underlying falcon is not halted.
|
||||||
* - Verify that wait fails with -ETIMEDOUT return value.
|
* - Verify that wait fails with -ETIMEDOUT return value.
|
||||||
* - Enable fault injection for the timer init call for branch coverage.
|
|
||||||
* - Verify that wait fails with -ETIMEDOUT return value.
|
|
||||||
*
|
*
|
||||||
* Output: Returns PASS if the steps above were executed successfully. FAIL
|
* Output: Returns PASS if the steps above were executed successfully. FAIL
|
||||||
* otherwise.
|
* otherwise.
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -80,13 +80,9 @@ int fb_gv11b_init_test(struct unit_module *m, struct gk20a *g, void *args);
|
|||||||
* - While the NVGPU is powered off, call gm20b_fb_tlb_invalidate and ensure
|
* - While the NVGPU is powered off, call gm20b_fb_tlb_invalidate and ensure
|
||||||
* it returned success.
|
* it returned success.
|
||||||
* - The power on state of NVGPU.
|
* - The power on state of NVGPU.
|
||||||
* - Enable timer error injection (1st occurnce), call gm20b_fb_tlb_invalidate
|
|
||||||
* and ensure it failed.
|
|
||||||
* - Call gm20b_fb_tlb_invalidate again and check that it still failed (because
|
* - Call gm20b_fb_tlb_invalidate again and check that it still failed (because
|
||||||
* the fb_mmu_ctrl_r register is not set properly)
|
* the fb_mmu_ctrl_r register is not set properly)
|
||||||
* - Set the fb_mmu_ctrl_pri_fifo_space_v bit in fb_mmu_ctrl_r register.
|
* - Set the fb_mmu_ctrl_pri_fifo_space_v bit in fb_mmu_ctrl_r register.
|
||||||
* - Enable timer error injection (2nd occurnce), call gm20b_fb_tlb_invalidate
|
|
||||||
* and ensure it failed.
|
|
||||||
* - Using an helper during register writes, intercept writes to fb_mmu_ctrl_r
|
* - Using an helper during register writes, intercept writes to fb_mmu_ctrl_r
|
||||||
* to cause a timeout after the MMU invalidate. Ensure that
|
* to cause a timeout after the MMU invalidate. Ensure that
|
||||||
* gm20b_fb_tlb_invalidate returns a failure.
|
* gm20b_fb_tlb_invalidate returns a failure.
|
||||||
@@ -128,8 +124,6 @@ int fb_gm20b_tlb_invalidate_test(struct unit_module *m, struct gk20a *g,
|
|||||||
* gm20b_fb_mmu_debug_rd HAL returns the same value.
|
* gm20b_fb_mmu_debug_rd HAL returns the same value.
|
||||||
* - Call the VPR/WPR dump operations for code coverage. Ensure that none of
|
* - Call the VPR/WPR dump operations for code coverage. Ensure that none of
|
||||||
* those operations cause a crash.
|
* those operations cause a crash.
|
||||||
* - Enable timer error injection (1st occurnce), call gm20b_fb_vpr_info_fetch
|
|
||||||
* and ensure it failed.
|
|
||||||
* - Write in the fb_mmu_vpr_info register so that calling
|
* - Write in the fb_mmu_vpr_info register so that calling
|
||||||
* gm20b_fb_vpr_info_fetch triggers timeout in the
|
* gm20b_fb_vpr_info_fetch triggers timeout in the
|
||||||
* gm20b_fb_vpr_info_fetch_wait function. Ensure the return values reflects
|
* gm20b_fb_vpr_info_fetch_wait function. Ensure the return values reflects
|
||||||
@@ -187,8 +181,6 @@ int fb_mmu_fault_gv11b_init_test(struct unit_module *m, struct gk20a *g,
|
|||||||
* empty.
|
* empty.
|
||||||
* - Call the gv11b_fb_fault_buf_configure_hw HAL and enable fault buffer.
|
* - Call the gv11b_fb_fault_buf_configure_hw HAL and enable fault buffer.
|
||||||
* - Enable fault buffer again which shouldn't cause any crash.
|
* - Enable fault buffer again which shouldn't cause any crash.
|
||||||
* - While trying to disable the fault buffer, trigger a failure of
|
|
||||||
* nvgpu_timeout_init.
|
|
||||||
* - Disable the fault buffer.
|
* - Disable the fault buffer.
|
||||||
* - Enable fault buffer, set the busy bit in fb_mmu_fault_status_r register,
|
* - Enable fault buffer, set the busy bit in fb_mmu_fault_status_r register,
|
||||||
* disable the fault buffer which should cause an internal timeout. Ensure
|
* disable the fault buffer which should cause an internal timeout. Ensure
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -43,8 +43,6 @@ int fb_gm20b_tlb_invalidate_test(struct unit_module *m, struct gk20a *g,
|
|||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
struct nvgpu_mem pdb = {0};
|
struct nvgpu_mem pdb = {0};
|
||||||
struct nvgpu_posix_fault_inj *timer_fi =
|
|
||||||
nvgpu_timers_get_fault_injection();
|
|
||||||
|
|
||||||
/* Define the operations being tested in this unit test */
|
/* Define the operations being tested in this unit test */
|
||||||
g->ops.fb.tlb_invalidate = gm20b_fb_tlb_invalidate;
|
g->ops.fb.tlb_invalidate = gm20b_fb_tlb_invalidate;
|
||||||
@@ -61,15 +59,6 @@ int fb_gm20b_tlb_invalidate_test(struct unit_module *m, struct gk20a *g,
|
|||||||
/* Set NVGPU as powered on */
|
/* Set NVGPU as powered on */
|
||||||
g->power_on_state = NVGPU_STATE_POWERED_ON;
|
g->power_on_state = NVGPU_STATE_POWERED_ON;
|
||||||
|
|
||||||
/* Timeout init fault injection (MMU FIFO space) */
|
|
||||||
nvgpu_posix_enable_fault_injection(timer_fi, true, 0);
|
|
||||||
err = g->ops.fb.tlb_invalidate(g, &pdb);
|
|
||||||
if (err != -ETIMEDOUT) {
|
|
||||||
unit_return_fail(m,
|
|
||||||
"tlb_invalidate did not fail as expected (1)\n");
|
|
||||||
}
|
|
||||||
nvgpu_posix_enable_fault_injection(timer_fi, false, 0);
|
|
||||||
|
|
||||||
/* Timeout fail on fb_mmu_ctrl_r() read */
|
/* Timeout fail on fb_mmu_ctrl_r() read */
|
||||||
err = g->ops.fb.tlb_invalidate(g, &pdb);
|
err = g->ops.fb.tlb_invalidate(g, &pdb);
|
||||||
if (err != -ETIMEDOUT) {
|
if (err != -ETIMEDOUT) {
|
||||||
@@ -83,15 +72,6 @@ int fb_gm20b_tlb_invalidate_test(struct unit_module *m, struct gk20a *g,
|
|||||||
*/
|
*/
|
||||||
nvgpu_writel(g, fb_mmu_ctrl_r(), 1 << 16U);
|
nvgpu_writel(g, fb_mmu_ctrl_r(), 1 << 16U);
|
||||||
|
|
||||||
/* Timeout init fault injection (MMU invalidate) */
|
|
||||||
nvgpu_posix_enable_fault_injection(timer_fi, true, 1);
|
|
||||||
err = g->ops.fb.tlb_invalidate(g, &pdb);
|
|
||||||
if (err != -ETIMEDOUT) {
|
|
||||||
unit_return_fail(m,
|
|
||||||
"tlb_invalidate did not fail as expected (3)\n");
|
|
||||||
}
|
|
||||||
nvgpu_posix_enable_fault_injection(timer_fi, false, 0);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Timeout on fb_mmu_ctrl_r read after MMU invalidate (does not return
|
* Timeout on fb_mmu_ctrl_r read after MMU invalidate (does not return
|
||||||
* a failure)
|
* a failure)
|
||||||
@@ -117,8 +97,6 @@ int fb_gm20b_mmu_ctrl_test(struct unit_module *m, struct gk20a *g, void *args)
|
|||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
u64 wpr_base, wpr_size;
|
u64 wpr_base, wpr_size;
|
||||||
struct nvgpu_posix_fault_inj *timer_fi =
|
|
||||||
nvgpu_timers_get_fault_injection();
|
|
||||||
|
|
||||||
/* Define the operations being tested in this unit test */
|
/* Define the operations being tested in this unit test */
|
||||||
g->ops.fb.mmu_ctrl = gm20b_fb_mmu_ctrl;
|
g->ops.fb.mmu_ctrl = gm20b_fb_mmu_ctrl;
|
||||||
@@ -160,23 +138,6 @@ int fb_gm20b_mmu_ctrl_test(struct unit_module *m, struct gk20a *g, void *args)
|
|||||||
g->ops.fb.read_wpr_info(g, &wpr_base, &wpr_size);
|
g->ops.fb.read_wpr_info(g, &wpr_base, &wpr_size);
|
||||||
g->ops.fb.vpr_info_fetch(g);
|
g->ops.fb.vpr_info_fetch(g);
|
||||||
|
|
||||||
/* Error injection for g->ops.fb.vpr_info_fetch */
|
|
||||||
nvgpu_posix_enable_fault_injection(timer_fi, true, 0);
|
|
||||||
err = g->ops.fb.vpr_info_fetch(g);
|
|
||||||
nvgpu_posix_enable_fault_injection(timer_fi, false, 0);
|
|
||||||
if (err != -ETIMEDOUT) {
|
|
||||||
unit_return_fail(m,
|
|
||||||
"vpr_info_fetch did not fail as expected (1)\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
nvgpu_posix_enable_fault_injection(timer_fi, true, 1);
|
|
||||||
err = g->ops.fb.vpr_info_fetch(g);
|
|
||||||
nvgpu_posix_enable_fault_injection(timer_fi, false, 0);
|
|
||||||
if (err != -ETIMEDOUT) {
|
|
||||||
unit_return_fail(m,
|
|
||||||
"vpr_info_fetch did not fail as expected (2)\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Trigger timeout in the gm20b_fb_vpr_info_fetch_wait function on
|
* Trigger timeout in the gm20b_fb_vpr_info_fetch_wait function on
|
||||||
* fb_mmu_vpr_info_fetch_v(val) == fb_mmu_vpr_info_fetch_false_v()
|
* fb_mmu_vpr_info_fetch_v(val) == fb_mmu_vpr_info_fetch_false_v()
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -115,8 +115,6 @@ int fb_mmu_fault_gv11b_buffer_test(struct unit_module *m, struct gk20a *g,
|
|||||||
u32 get_idx;
|
u32 get_idx;
|
||||||
u32 val;
|
u32 val;
|
||||||
u32 lo, hi;
|
u32 lo, hi;
|
||||||
struct nvgpu_posix_fault_inj *timers_fi =
|
|
||||||
nvgpu_timers_get_fault_injection();
|
|
||||||
|
|
||||||
if (g->ops.fb.is_fault_buf_enabled(g, 0)) {
|
if (g->ops.fb.is_fault_buf_enabled(g, 0)) {
|
||||||
unit_return_fail(m, "fault buffer not disabled as expected\n");
|
unit_return_fail(m, "fault buffer not disabled as expected\n");
|
||||||
@@ -144,11 +142,6 @@ int fb_mmu_fault_gv11b_buffer_test(struct unit_module *m, struct gk20a *g,
|
|||||||
/* Enabling again shouldn't cause an issue */
|
/* Enabling again shouldn't cause an issue */
|
||||||
g->ops.fb.fault_buf_set_state_hw(g, 0, NVGPU_MMU_FAULT_BUF_ENABLED);
|
g->ops.fb.fault_buf_set_state_hw(g, 0, NVGPU_MMU_FAULT_BUF_ENABLED);
|
||||||
|
|
||||||
/* Make nvgpu_timeout_init fail during disable operation */
|
|
||||||
nvgpu_posix_enable_fault_injection(timers_fi, true, 0);
|
|
||||||
g->ops.fb.fault_buf_set_state_hw(g, 0, NVGPU_MMU_FAULT_BUF_DISABLED);
|
|
||||||
nvgpu_posix_enable_fault_injection(timers_fi, false, 0);
|
|
||||||
|
|
||||||
/* Disable */
|
/* Disable */
|
||||||
g->ops.fb.fault_buf_set_state_hw(g, 0, NVGPU_MMU_FAULT_BUF_DISABLED);
|
g->ops.fb.fault_buf_set_state_hw(g, 0, NVGPU_MMU_FAULT_BUF_DISABLED);
|
||||||
|
|
||||||
|
|||||||
@@ -366,14 +366,9 @@ int test_gv11b_fifo_is_preempt_pending(struct unit_module *m, struct gk20a *g,
|
|||||||
struct nvgpu_tsg *tsg = NULL;
|
struct nvgpu_tsg *tsg = NULL;
|
||||||
struct gpu_ops gops = g->ops;
|
struct gpu_ops gops = g->ops;
|
||||||
unsigned int id_type;
|
unsigned int id_type;
|
||||||
struct nvgpu_posix_fault_inj *timers_fi;
|
|
||||||
struct nvgpu_os_posix *p = nvgpu_os_posix_from_gk20a(g);
|
struct nvgpu_os_posix *p = nvgpu_os_posix_from_gk20a(g);
|
||||||
u32 ctx_stat = 0U;
|
u32 ctx_stat = 0U;
|
||||||
u32 id = 0U, next_id = 0U;
|
u32 id = 0U, next_id = 0U;
|
||||||
/* Assuming runlist_id is 0 */
|
|
||||||
u32 runlist_served_pbdmas = g->fifo.runlists[0U]->pbdma_bitmask;
|
|
||||||
|
|
||||||
timers_fi = nvgpu_timers_get_fault_injection();
|
|
||||||
|
|
||||||
err = nvgpu_runlist_setup_sw(g);
|
err = nvgpu_runlist_setup_sw(g);
|
||||||
unit_assert(err == 0, goto done);
|
unit_assert(err == 0, goto done);
|
||||||
@@ -407,10 +402,9 @@ int test_gv11b_fifo_is_preempt_pending(struct unit_module *m, struct gk20a *g,
|
|||||||
true : false;
|
true : false;
|
||||||
|
|
||||||
if (branches & F_PREEMPT_PENDING_POLL_PBDMA_FAIL) {
|
if (branches & F_PREEMPT_PENDING_POLL_PBDMA_FAIL) {
|
||||||
nvgpu_posix_enable_fault_injection(timers_fi, true, 0U);
|
/* TODO: make the poll loop time out */
|
||||||
} else if (branches & F_PREEMPT_PENDING_POLL_ENG_TIMEOUT_FAIL) {
|
} else if (branches & F_PREEMPT_PENDING_POLL_ENG_TIMEOUT_FAIL) {
|
||||||
nvgpu_posix_enable_fault_injection(timers_fi, true,
|
/* TODO: make the poll loop time out */
|
||||||
__builtin_popcount(runlist_served_pbdmas));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -420,10 +414,6 @@ int test_gv11b_fifo_is_preempt_pending(struct unit_module *m, struct gk20a *g,
|
|||||||
stub.pbdma_st.chsw_status = NVGPU_PBDMA_CHSW_STATUS_INVALID;
|
stub.pbdma_st.chsw_status = NVGPU_PBDMA_CHSW_STATUS_INVALID;
|
||||||
|
|
||||||
if (branches & F_PREEMPT_PENDING_POLL_ENG_PRE_SI_RETRIES) {
|
if (branches & F_PREEMPT_PENDING_POLL_ENG_PRE_SI_RETRIES) {
|
||||||
/* Timeout should not expire */
|
|
||||||
nvgpu_posix_enable_fault_injection(timers_fi, true,
|
|
||||||
PREEMPT_PENDING_POLL_PRE_SI_RETRIES + 4U);
|
|
||||||
|
|
||||||
/* Force engine status = ctxsw_switch */
|
/* Force engine status = ctxsw_switch */
|
||||||
branches |= F_PREEMPT_PENDING_CTX_STAT_SWITCH;
|
branches |= F_PREEMPT_PENDING_CTX_STAT_SWITCH;
|
||||||
/* Force eng_intr_pending = 0 */
|
/* Force eng_intr_pending = 0 */
|
||||||
@@ -495,8 +485,6 @@ int test_gv11b_fifo_is_preempt_pending(struct unit_module *m, struct gk20a *g,
|
|||||||
} else {
|
} else {
|
||||||
unit_assert(err == 0, goto done);
|
unit_assert(err == 0, goto done);
|
||||||
}
|
}
|
||||||
|
|
||||||
nvgpu_posix_enable_fault_injection(timers_fi, false, 0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = UNIT_SUCCESS;
|
ret = UNIT_SUCCESS;
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -161,17 +161,9 @@ int test_gk20a_runlist_wait_pending(struct unit_module *m,
|
|||||||
u32 runlist_id = nvgpu_engine_get_gr_runlist_id(g);
|
u32 runlist_id = nvgpu_engine_get_gr_runlist_id(g);
|
||||||
u32 timeout = g->poll_timeout_default;
|
u32 timeout = g->poll_timeout_default;
|
||||||
int err;
|
int err;
|
||||||
struct nvgpu_posix_fault_inj *timer_fi =
|
|
||||||
nvgpu_timers_get_fault_injection();
|
|
||||||
|
|
||||||
(void)nvgpu_posix_register_io(g, &test_reg_callbacks);
|
(void)nvgpu_posix_register_io(g, &test_reg_callbacks);
|
||||||
|
|
||||||
/* nvgpu_timeout_init failure */
|
|
||||||
nvgpu_posix_enable_fault_injection(timer_fi, true, 0);
|
|
||||||
err = gk20a_runlist_wait_pending(g, runlist_id);
|
|
||||||
unit_assert(err == -ETIMEDOUT, goto done);
|
|
||||||
nvgpu_posix_enable_fault_injection(timer_fi, false, 0);
|
|
||||||
|
|
||||||
g->poll_timeout_default = 10; /* ms */
|
g->poll_timeout_default = 10; /* ms */
|
||||||
|
|
||||||
ctx->m = m;
|
ctx->m = m;
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -98,7 +98,6 @@ int test_gk20a_runlist_hw_submit(struct unit_module *m,
|
|||||||
* - Check case where polling times out:
|
* - Check case where polling times out:
|
||||||
* - Set register to indicate that runlist is pending.
|
* - Set register to indicate that runlist is pending.
|
||||||
* - Call gk20a_runlist_wait_pending.
|
* - Call gk20a_runlist_wait_pending.
|
||||||
* - Check case where nvgpu_timeout_init fails, using fault injection.
|
|
||||||
*
|
*
|
||||||
* Output: Returns PASS if all branches gave expected results. FAIL otherwise.
|
* Output: Returns PASS if all branches gave expected results. FAIL otherwise.
|
||||||
*/
|
*/
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -180,21 +180,10 @@ static int gr_falcon_timer_init_error(struct unit_module *m,
|
|||||||
{
|
{
|
||||||
int err, i;
|
int err, i;
|
||||||
u32 fecs_imem = 0, gpccs_imem = 0;
|
u32 fecs_imem = 0, gpccs_imem = 0;
|
||||||
struct nvgpu_gr_falcon_query_sizes sizes;
|
|
||||||
struct nvgpu_posix_fault_inj *timer_fi =
|
|
||||||
nvgpu_timers_get_fault_injection();
|
|
||||||
int (*gr_falcon_ctrl_ctxsw_local)(struct gk20a *g,
|
int (*gr_falcon_ctrl_ctxsw_local)(struct gk20a *g,
|
||||||
u32 fecs_method,
|
u32 fecs_method,
|
||||||
u32 data, u32 *ret_val);
|
u32 data, u32 *ret_val);
|
||||||
|
|
||||||
nvgpu_posix_enable_fault_injection(timer_fi, true, 0);
|
|
||||||
err = g->ops.gr.falcon.wait_mem_scrubbing(g);
|
|
||||||
nvgpu_posix_enable_fault_injection(timer_fi, false, 0);
|
|
||||||
if (err == 0) {
|
|
||||||
unit_return_fail(m,
|
|
||||||
"gr_falcon_wait_mem_scrubbing timer failed\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
for (i = 0; i < 2; i++) {
|
for (i = 0; i < 2; i++) {
|
||||||
switch (i) {
|
switch (i) {
|
||||||
case 0:
|
case 0:
|
||||||
@@ -216,22 +205,6 @@ static int gr_falcon_timer_init_error(struct unit_module *m,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
nvgpu_posix_enable_fault_injection(timer_fi, true, 0);
|
|
||||||
err = g->ops.gr.falcon.wait_ctxsw_ready(g);
|
|
||||||
nvgpu_posix_enable_fault_injection(timer_fi, false, 0);
|
|
||||||
if (err == 0) {
|
|
||||||
unit_return_fail(m,
|
|
||||||
"gr_falcon_wait_ctxsw_ready timer failed\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
nvgpu_posix_enable_fault_injection(timer_fi, true, 0);
|
|
||||||
err = g->ops.gr.falcon.init_ctx_state(g, &sizes);
|
|
||||||
nvgpu_posix_enable_fault_injection(timer_fi, false, 0);
|
|
||||||
if (err == 0) {
|
|
||||||
unit_return_fail(m,
|
|
||||||
"gr_falcon_init_ctx_state failed\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
/* branch coverage check */
|
/* branch coverage check */
|
||||||
nvgpu_set_enabled(g, NVGPU_GR_USE_DMA_FOR_FW_BOOTSTRAP, false);
|
nvgpu_set_enabled(g, NVGPU_GR_USE_DMA_FOR_FW_BOOTSTRAP, false);
|
||||||
nvgpu_set_enabled(g, NVGPU_SEC_SECUREGPCCS, false);
|
nvgpu_set_enabled(g, NVGPU_SEC_SECUREGPCCS, false);
|
||||||
|
|||||||
@@ -55,10 +55,6 @@ struct gk20a;
|
|||||||
* Steps:
|
* Steps:
|
||||||
* - Call gm20b_gr_falcon_ctrl_ctxsw with watchdog timeout Method.
|
* - Call gm20b_gr_falcon_ctrl_ctxsw with watchdog timeout Method.
|
||||||
* - Call g->ops.gr.falcon.ctrl_ctxsw with Invalid Method.
|
* - Call g->ops.gr.falcon.ctrl_ctxsw with Invalid Method.
|
||||||
* - Enable timer init failure injection in various functions.
|
|
||||||
* - g->ops.gr.falcon.wait_ctxsw_ready.
|
|
||||||
* - g->ops.gr.falcon.init_ctx_state.
|
|
||||||
* - g->ops.gr.falcon.wait_mem_scrubbing.
|
|
||||||
* - Call gm20b_gr_falcon_submit_fecs_method_op with various
|
* - Call gm20b_gr_falcon_submit_fecs_method_op with various
|
||||||
* method op codes.
|
* method op codes.
|
||||||
* - Check that enable_set bit is set for ccsr_channel_r
|
* - Check that enable_set bit is set for ccsr_channel_r
|
||||||
|
|||||||
@@ -113,8 +113,6 @@ int test_gr_init_hal_ecc_scrub_reg(struct unit_module *m,
|
|||||||
u32 i;
|
u32 i;
|
||||||
int err;
|
int err;
|
||||||
struct nvgpu_gr_config *config = nvgpu_gr_get_config_ptr(g);
|
struct nvgpu_gr_config *config = nvgpu_gr_get_config_ptr(g);
|
||||||
struct nvgpu_posix_fault_inj *timer_fi =
|
|
||||||
nvgpu_timers_get_fault_injection();
|
|
||||||
|
|
||||||
/* Code coverage */
|
/* Code coverage */
|
||||||
nvgpu_set_enabled(g, NVGPU_ECC_ENABLED_SM_ICACHE, false);
|
nvgpu_set_enabled(g, NVGPU_ECC_ENABLED_SM_ICACHE, false);
|
||||||
@@ -135,19 +133,6 @@ int test_gr_init_hal_ecc_scrub_reg(struct unit_module *m,
|
|||||||
nvgpu_set_enabled(g, NVGPU_ECC_ENABLED_SM_L1_DATA, true);
|
nvgpu_set_enabled(g, NVGPU_ECC_ENABLED_SM_L1_DATA, true);
|
||||||
nvgpu_set_enabled(g, NVGPU_ECC_ENABLED_SM_LRF, true);
|
nvgpu_set_enabled(g, NVGPU_ECC_ENABLED_SM_LRF, true);
|
||||||
|
|
||||||
/* Trigger timeout initialization failure */
|
|
||||||
for (i = 0;
|
|
||||||
i < (sizeof(ecc_scrub_data) / sizeof(struct gr_ecc_scrub_reg_rec));
|
|
||||||
i++) {
|
|
||||||
nvgpu_posix_enable_fault_injection(timer_fi, true, i);
|
|
||||||
err = g->ops.gr.init.ecc_scrub_reg(g, config);
|
|
||||||
if (err == 0) {
|
|
||||||
unit_return_fail(m, "Timeout was expected");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
nvgpu_posix_enable_fault_injection(timer_fi, false, 0);
|
|
||||||
|
|
||||||
for (i = 0;
|
for (i = 0;
|
||||||
i < (sizeof(ecc_scrub_data) / sizeof(struct gr_ecc_scrub_reg_rec));
|
i < (sizeof(ecc_scrub_data) / sizeof(struct gr_ecc_scrub_reg_rec));
|
||||||
i++) {
|
i++) {
|
||||||
@@ -181,17 +166,6 @@ int test_gr_init_hal_wait_empty(struct unit_module *m,
|
|||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
int i;
|
int i;
|
||||||
struct nvgpu_posix_fault_inj *timer_fi =
|
|
||||||
nvgpu_timers_get_fault_injection();
|
|
||||||
|
|
||||||
/* Fail timeout initialization */
|
|
||||||
nvgpu_posix_enable_fault_injection(timer_fi, true, 0);
|
|
||||||
err = g->ops.gr.init.wait_empty(g);
|
|
||||||
if (err == 0) {
|
|
||||||
return UNIT_FAIL;
|
|
||||||
}
|
|
||||||
|
|
||||||
nvgpu_posix_enable_fault_injection(timer_fi, false, 0);
|
|
||||||
|
|
||||||
/* gr_status is non-zero, gr_activity are zero, expect failure */
|
/* gr_status is non-zero, gr_activity are zero, expect failure */
|
||||||
nvgpu_writel(g, gr_status_r(), BIT32(7));
|
nvgpu_writel(g, gr_status_r(), BIT32(7));
|
||||||
@@ -271,8 +245,6 @@ int test_gr_init_hal_wait_idle(struct unit_module *m,
|
|||||||
bool expected_pass;
|
bool expected_pass;
|
||||||
u32 entry_count;
|
u32 entry_count;
|
||||||
struct nvgpu_fifo *f = &g->fifo;
|
struct nvgpu_fifo *f = &g->fifo;
|
||||||
struct nvgpu_posix_fault_inj *timer_fi =
|
|
||||||
nvgpu_timers_get_fault_injection();
|
|
||||||
|
|
||||||
/* Configure GR engine in DEVICE_INFO registers */
|
/* Configure GR engine in DEVICE_INFO registers */
|
||||||
entry_count = top_device_info__size_1_v();
|
entry_count = top_device_info__size_1_v();
|
||||||
@@ -301,15 +273,6 @@ int test_gr_init_hal_wait_idle(struct unit_module *m,
|
|||||||
return UNIT_FAIL;
|
return UNIT_FAIL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Fail timeout initialization */
|
|
||||||
nvgpu_posix_enable_fault_injection(timer_fi, true, 0);
|
|
||||||
err = g->ops.gr.init.wait_idle(g);
|
|
||||||
if (err != -ETIMEDOUT) {
|
|
||||||
return UNIT_FAIL;
|
|
||||||
}
|
|
||||||
|
|
||||||
nvgpu_posix_enable_fault_injection(timer_fi, false, 0);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Set combinations of gr/fifo status registers.
|
* Set combinations of gr/fifo status registers.
|
||||||
* g->ops.gr.init.wait_idle will timeout only when context is valid
|
* g->ops.gr.init.wait_idle will timeout only when context is valid
|
||||||
@@ -395,17 +358,6 @@ int test_gr_init_hal_wait_fe_idle(struct unit_module *m,
|
|||||||
struct gk20a *g, void *args)
|
struct gk20a *g, void *args)
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
struct nvgpu_posix_fault_inj *timer_fi =
|
|
||||||
nvgpu_timers_get_fault_injection();
|
|
||||||
|
|
||||||
/* Fail timeout initialization */
|
|
||||||
nvgpu_posix_enable_fault_injection(timer_fi, true, 0);
|
|
||||||
err = g->ops.gr.init.wait_fe_idle(g);
|
|
||||||
if (err != -ETIMEDOUT) {
|
|
||||||
return UNIT_FAIL;
|
|
||||||
}
|
|
||||||
|
|
||||||
nvgpu_posix_enable_fault_injection(timer_fi, false, 0);
|
|
||||||
|
|
||||||
/* Set FE status active */
|
/* Set FE status active */
|
||||||
nvgpu_writel(g, gr_status_r(), BIT32(2U));
|
nvgpu_writel(g, gr_status_r(), BIT32(2U));
|
||||||
@@ -430,20 +382,9 @@ int test_gr_init_hal_fe_pwr_mode(struct unit_module *m,
|
|||||||
struct gk20a *g, void *args)
|
struct gk20a *g, void *args)
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
struct nvgpu_posix_fault_inj *timer_fi =
|
|
||||||
nvgpu_timers_get_fault_injection();
|
|
||||||
struct nvgpu_posix_fault_inj *readl_fi =
|
struct nvgpu_posix_fault_inj *readl_fi =
|
||||||
nvgpu_readl_get_fault_injection();
|
nvgpu_readl_get_fault_injection();
|
||||||
|
|
||||||
/* Fail timeout initialization */
|
|
||||||
nvgpu_posix_enable_fault_injection(timer_fi, true, 0);
|
|
||||||
err = g->ops.gr.init.fe_pwr_mode_force_on(g, true);
|
|
||||||
if (err != -ETIMEDOUT) {
|
|
||||||
return UNIT_FAIL;
|
|
||||||
}
|
|
||||||
|
|
||||||
nvgpu_posix_enable_fault_injection(timer_fi, false, 0);
|
|
||||||
|
|
||||||
/* Trigger timeout by default */
|
/* Trigger timeout by default */
|
||||||
err = g->ops.gr.init.fe_pwr_mode_force_on(g, true);
|
err = g->ops.gr.init.fe_pwr_mode_force_on(g, true);
|
||||||
if (err != -ETIMEDOUT) {
|
if (err != -ETIMEDOUT) {
|
||||||
|
|||||||
@@ -46,8 +46,6 @@ struct unit_module;
|
|||||||
* been executed successfully.
|
* been executed successfully.
|
||||||
*
|
*
|
||||||
* Steps:
|
* Steps:
|
||||||
* - Inject timeout error and call g->ops.gr.init.wait_empty.
|
|
||||||
* Should fail since timeout initialization fails.
|
|
||||||
* - Set various pass/fail values of gr_status and gr_activity registers
|
* - Set various pass/fail values of gr_status and gr_activity registers
|
||||||
* and verify the pass/fail output of g->ops.gr.init.wait_empty as
|
* and verify the pass/fail output of g->ops.gr.init.wait_empty as
|
||||||
* appropriate.
|
* appropriate.
|
||||||
@@ -75,9 +73,6 @@ int test_gr_init_hal_wait_empty(struct unit_module *m,
|
|||||||
* GR engine information is enumerated.
|
* GR engine information is enumerated.
|
||||||
* - Initialize required pieces of fifo (struct gk20a * pointer in struct
|
* - Initialize required pieces of fifo (struct gk20a * pointer in struct
|
||||||
* nvgpu_fifo, engine and pbdma s/w setup).
|
* nvgpu_fifo, engine and pbdma s/w setup).
|
||||||
* - Inject timeout error and call g->ops.gr.init.wait_idle.
|
|
||||||
* Should fail since timeout initialization fails.
|
|
||||||
* - Disable timeout error injection.
|
|
||||||
* - Set combinations of gr/fifo status registers.
|
* - Set combinations of gr/fifo status registers.
|
||||||
* Write register gr_engine_status_r() to update GR engine status.
|
* Write register gr_engine_status_r() to update GR engine status.
|
||||||
* Write register fifo_engine_status_r() to update context and ctxsw status.
|
* Write register fifo_engine_status_r() to update context and ctxsw status.
|
||||||
@@ -113,7 +108,6 @@ int test_gr_init_hal_wait_idle(struct unit_module *m,
|
|||||||
* been executed successfully.
|
* been executed successfully.
|
||||||
*
|
*
|
||||||
* Steps:
|
* Steps:
|
||||||
* - Inject timeout error and call g->ops.gr.init.wait_fe_idle.
|
|
||||||
* Should fail since timeout initialization fails.
|
* Should fail since timeout initialization fails.
|
||||||
* - Set FE active status in register gr_status_r(), and call
|
* - Set FE active status in register gr_status_r(), and call
|
||||||
* g->ops.gr.init.wait_fe_idle. Should fail since FE fails to idle.
|
* g->ops.gr.init.wait_fe_idle. Should fail since FE fails to idle.
|
||||||
@@ -140,9 +134,6 @@ int test_gr_init_hal_wait_fe_idle(struct unit_module *m,
|
|||||||
* been executed successfully.
|
* been executed successfully.
|
||||||
*
|
*
|
||||||
* Steps:
|
* Steps:
|
||||||
* - Inject timeout error and call g->ops.gr.init.fe_pwr_mode_force_on.
|
|
||||||
* should fail since timeout initialization fails.
|
|
||||||
* - Disable timeout error injection.
|
|
||||||
* - Call g->ops.gr.init.fe_pwr_mode_force_on. By default this should
|
* - Call g->ops.gr.init.fe_pwr_mode_force_on. By default this should
|
||||||
* timeout and return error.
|
* timeout and return error.
|
||||||
* - Enable readl function error injection and call
|
* - Enable readl function error injection and call
|
||||||
@@ -173,8 +164,6 @@ int test_gr_init_hal_fe_pwr_mode(struct unit_module *m,
|
|||||||
* - Disable feature flags for common.gr ECC handling for code coverage
|
* - Disable feature flags for common.gr ECC handling for code coverage
|
||||||
* and call g->ops.gr.init.ecc_scrub_reg.
|
* and call g->ops.gr.init.ecc_scrub_reg.
|
||||||
* - Re-enable all the feature flags.
|
* - Re-enable all the feature flags.
|
||||||
* - Inject timeout initialization failures and call
|
|
||||||
* g->ops.gr.init.ecc_scrub_reg.
|
|
||||||
* - Set incorrect values of scrub_done for each error type so that scrub
|
* - Set incorrect values of scrub_done for each error type so that scrub
|
||||||
* wait times out.
|
* wait times out.
|
||||||
* - Ensure that g->ops.gr.init.ecc_scrub_reg returns error.
|
* - Ensure that g->ops.gr.init.ecc_scrub_reg returns error.
|
||||||
|
|||||||
@@ -674,8 +674,6 @@ int test_flush_ltc(struct unit_module *m, struct gk20a *g, void *args)
|
|||||||
int ret = UNIT_SUCCESS;
|
int ret = UNIT_SUCCESS;
|
||||||
int i;
|
int i;
|
||||||
u32 stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE);
|
u32 stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE);
|
||||||
struct nvgpu_posix_fault_inj *timer_fi =
|
|
||||||
nvgpu_timers_get_fault_injection();
|
|
||||||
|
|
||||||
/* make it appear the clean & invalidate completed */
|
/* make it appear the clean & invalidate completed */
|
||||||
for (i = 0; i < NUM_LTC; i++) {
|
for (i = 0; i < NUM_LTC; i++) {
|
||||||
@@ -704,11 +702,6 @@ int test_flush_ltc(struct unit_module *m, struct gk20a *g, void *args)
|
|||||||
|
|
||||||
g->ops.ltc.flush(g);
|
g->ops.ltc.flush(g);
|
||||||
|
|
||||||
/* enable fault injection for the timer init call for branch coverage */
|
|
||||||
nvgpu_posix_enable_fault_injection(timer_fi, true, 0);
|
|
||||||
g->ops.ltc.flush(g);
|
|
||||||
nvgpu_posix_enable_fault_injection(timer_fi, false, 0);
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -357,8 +357,6 @@ int test_ltc_set_enabled(struct unit_module *m, struct gk20a *g, void *args);
|
|||||||
* - Configure the registers to reflect the clean and invalidate are pending
|
* - Configure the registers to reflect the clean and invalidate are pending
|
||||||
* for each ltc.
|
* for each ltc.
|
||||||
* - Call the flush API to get branch coverage of the timeout handling.
|
* - Call the flush API to get branch coverage of the timeout handling.
|
||||||
* - Enable the timer fault injection.
|
|
||||||
* - Call the flush API to get branch coverage of the handling of the error.
|
|
||||||
*
|
*
|
||||||
* Output: Returns PASS if register is configured correctly. FAIL otherwise.
|
* Output: Returns PASS if register is configured correctly. FAIL otherwise.
|
||||||
*/
|
*/
|
||||||
|
|||||||
@@ -61,7 +61,6 @@
|
|||||||
#define SPECIAL_CASE_DOUBLE_MAP 1
|
#define SPECIAL_CASE_DOUBLE_MAP 1
|
||||||
#define SPECIAL_CASE_NO_FREE 2
|
#define SPECIAL_CASE_NO_FREE 2
|
||||||
#define SPECIAL_CASE_NO_VM_AREA 4
|
#define SPECIAL_CASE_NO_VM_AREA 4
|
||||||
#define SPECIAL_CASE_TIMEOUT_INIT_FAIL 8
|
|
||||||
|
|
||||||
/* Expected bit count from nvgpu_vm_pde_coverage_bit_count() */
|
/* Expected bit count from nvgpu_vm_pde_coverage_bit_count() */
|
||||||
#define GP10B_PDE_BIT_COUNT 21U
|
#define GP10B_PDE_BIT_COUNT 21U
|
||||||
@@ -671,8 +670,6 @@ static int map_buffer(struct unit_module *m,
|
|||||||
u32 pte[2];
|
u32 pte[2];
|
||||||
struct nvgpu_mapped_buf **mapped_buffers = NULL;
|
struct nvgpu_mapped_buf **mapped_buffers = NULL;
|
||||||
u32 num_mapped_buffers = 0;
|
u32 num_mapped_buffers = 0;
|
||||||
struct nvgpu_posix_fault_inj *timers_fi =
|
|
||||||
nvgpu_timers_get_fault_injection();
|
|
||||||
struct nvgpu_posix_fault_inj *kmem_fi =
|
struct nvgpu_posix_fault_inj *kmem_fi =
|
||||||
nvgpu_kmem_get_fault_injection();
|
nvgpu_kmem_get_fault_injection();
|
||||||
|
|
||||||
@@ -939,13 +936,7 @@ free_mapped_buf:
|
|||||||
*/
|
*/
|
||||||
u64 buf_addr = mapped_buf->addr;
|
u64 buf_addr = mapped_buf->addr;
|
||||||
|
|
||||||
if (subcase & SPECIAL_CASE_TIMEOUT_INIT_FAIL) {
|
|
||||||
nvgpu_posix_enable_fault_injection(timers_fi, true, 0);
|
|
||||||
nvgpu_vm_unmap(vm, buf_addr, batch);
|
nvgpu_vm_unmap(vm, buf_addr, batch);
|
||||||
nvgpu_posix_enable_fault_injection(timers_fi, false, 0);
|
|
||||||
} else {
|
|
||||||
nvgpu_vm_unmap(vm, buf_addr, batch);
|
|
||||||
}
|
|
||||||
mapped_buf = NULL;
|
mapped_buf = NULL;
|
||||||
/*
|
/*
|
||||||
* Unmapping an already unmapped buffer should not cause any
|
* Unmapping an already unmapped buffer should not cause any
|
||||||
@@ -1633,24 +1624,6 @@ int test_map_buf_gpu_va(struct unit_module *m,
|
|||||||
goto exit;
|
goto exit;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Corner case: Timeout init fails in nvgpu_vm_unmap
|
|
||||||
*/
|
|
||||||
ret = map_buffer(m,
|
|
||||||
g,
|
|
||||||
vm,
|
|
||||||
NULL,
|
|
||||||
BUF_CPU_PA,
|
|
||||||
gpu_va,
|
|
||||||
buf_size,
|
|
||||||
page_size,
|
|
||||||
alignment,
|
|
||||||
SPECIAL_CASE_TIMEOUT_INIT_FAIL);
|
|
||||||
if (ret != UNIT_SUCCESS) {
|
|
||||||
unit_err(m, "Mapping failed (already mapped case)\n");
|
|
||||||
goto exit;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Map 64KB buffer */
|
/* Map 64KB buffer */
|
||||||
buf_size = SZ_64K;
|
buf_size = SZ_64K;
|
||||||
page_size = SZ_64K;
|
page_size = SZ_64K;
|
||||||
|
|||||||
Reference in New Issue
Block a user