gpu: nvgpu: falcon: add unit tests for branch coverage

Add test case to cover gk20a_is_falcon_idle branches, non-word multiple
copy cases in copy to imem and dmem, buffering logic in unaligned data
copy to imem/dmem.

Also update falcon_copy_to_dmem|imem_unaligned_src logic to compare the
offset with size.

JIRA NVGPU-2214

Change-Id: Ib891dc57f36a66818837f951c4453588b71fed90
Signed-off-by: Sagar Kamble <skamble@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2259146
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Sagar Kamble
2019-12-10 12:27:27 +05:30
committed by Alex Waterman
parent 70f614e07e
commit b1e4c0ef72
3 changed files with 64 additions and 3 deletions

View File

@@ -140,7 +140,7 @@ static void falcon_copy_to_dmem_unaligned_src(struct nvgpu_falcon *flcn,
u32 elems = 0U;
u32 i = 0U;
while ((offset + sizeof(src_tmp)) < size) {
while ((offset + sizeof(src_tmp)) <= size) {
nvgpu_memcpy((u8 *)&src_tmp[0], &src[offset],
sizeof(src_tmp));
for (i = 0; i < ARRAY_SIZE(src_tmp); i++) {
@@ -228,7 +228,7 @@ static void falcon_copy_to_imem_unaligned_src(struct nvgpu_falcon *flcn,
u32 i = 0U;
u32 j = 0U;
while ((offset + sizeof(src_tmp)) < size) {
while ((offset + sizeof(src_tmp)) <= size) {
nvgpu_memcpy((u8 *)&src_tmp[0], &src[offset],
sizeof(src_tmp));
for (i = 0; i < ARRAY_SIZE(src_tmp); i++) {

View File

@@ -566,6 +566,22 @@ static void flcn_idle_pass(void *data)
nvgpu_posix_io_writel_reg_space(g, idlestate_addr, unit_status);
}
/*
* This is to cover the falcon CPU idle & ext units busy branch in if condition
* in gk20a_is_falcon_idle.
*/
static void flcn_idle_fail_ext_busy(void *data)
{
struct nvgpu_falcon *flcn = (struct nvgpu_falcon *) data;
u32 idlestate_addr = flcn->flcn_base + falcon_falcon_idlestate_r();
struct gk20a *g = flcn->g;
u32 unit_status;
unit_status = nvgpu_posix_io_readl_reg_space(g, idlestate_addr);
unit_status |= falcon_falcon_idlestate_ext_busy_m();
nvgpu_posix_io_writel_reg_space(g, idlestate_addr, unit_status);
}
static void flcn_idle_fail(void *data)
{
struct nvgpu_falcon *flcn = (struct nvgpu_falcon *) data;
@@ -596,6 +612,7 @@ int test_falcon_idle(struct unit_module *m, struct gk20a *g, void *__args)
int exp_err;
} test_data[] = {{uninit_flcn, NULL, -EINVAL},
{gpccs_flcn, flcn_idle_pass, 0},
{gpccs_flcn, flcn_idle_fail_ext_busy, -ETIMEDOUT},
{gpccs_flcn, flcn_idle_fail, -ETIMEDOUT} };
int size = ARRAY_SIZE(test_data);
int err, i;
@@ -703,7 +720,8 @@ int test_falcon_halt(struct unit_module *m, struct gk20a *g, void *__args)
/*
* Valid/Invalid: Status of read and write from Falcon
* Valid: Read and write from initialized Falcon succeeds.
* Valid: Read and write of word-multiple and non-word-multiple data from
* initialized Falcon succeeds.
* Invalid: Read and write for uninitialized Falcon fails
* with error -EINVAL.
*/
@@ -731,6 +749,15 @@ int test_falcon_mem_rw_init(struct unit_module *m, struct gk20a *g,
}
}
/* write/read to/from initialized falcon with non-word-multiple data */
for (i = 0; i < MAX_MEM_TYPE; i++) {
err = falcon_check_read_write(g, m, pmu_flcn, i, dst,
RAND_DATA_SIZE - 1, 0);
if (err) {
return UNIT_FAIL;
}
}
return UNIT_SUCCESS;
}
@@ -820,6 +847,30 @@ int test_falcon_mem_rw_unaligned_cpu_buffer(struct unit_module *m,
}
#endif
/*
* write data of size 1K to valid range in imem from unaligned data
* to verify the buffering logic in falcon_copy_to_dmem_unaligned_src.
*/
unit_info(m, "Writing %d bytes to imem\n", (u32) SZ_1K);
err = nvgpu_falcon_copy_to_imem(pmu_flcn, dst,
(u8 *) rand_test_data_unaligned,
SZ_1K, 0, false, 0);
if (err) {
unit_return_fail(m, "Failed to copy to IMEM\n");
}
/*
* write data of size 1K to valid range in dmem from unaligned data
* to verify the buffering logic in falcon_copy_to_imem_unaligned_src.
*/
unit_info(m, "Writing %d bytes to dmem\n", (u32) SZ_1K);
err = nvgpu_falcon_copy_to_dmem(pmu_flcn, dst,
(u8 *) rand_test_data_unaligned,
SZ_1K, 0);
if (err) {
unit_return_fail(m, "Failed to copy to DMEM\n");
}
return UNIT_SUCCESS;
}

View File

@@ -127,6 +127,9 @@ int test_falcon_mem_scrub(struct unit_module *m, struct gk20a *g, void *__args);
* underlying falcon is idle.
* - Verify that wait succeeds with 0 return value.
* - Invoke nvgpu_falcon_wait_idle with initialized falcon struct where
* underlying falcon's ext units are busy but falcon CPU is idle.
* - Verify that wait fails with -ETIMEDOUT return value.
* - Invoke nvgpu_falcon_wait_idle with initialized falcon struct where
* underlying falcon is not idle.
* - Verify that wait fails with -ETIMEDOUT return value.
*
@@ -177,6 +180,10 @@ int test_falcon_halt(struct unit_module *m, struct gk20a *g, void *__args);
* - Invoke nvgpu_falcon_copy_to_imem and nvgpu_falcon_copy_to_dmem with
* initialized falcon struct with sample random data.
* - Verify that writes succeed with 0 return value in both cases.
* - Invoke nvgpu_falcon_copy_to_imem and nvgpu_falcon_copy_to_dmem with
* initialized falcon struct with sample random data of size that is
* not multiple of words.
* - Verify that writes succeed with 0 return value in both cases.
*
* Output: Returns PASS if the steps above were executed successfully. FAIL
* otherwise.
@@ -336,6 +343,9 @@ int test_falcon_bootstrap(struct unit_module *m, struct gk20a *g, void *__args);
* initialized falcon struct with above initialized sample random data
* and valid range.
* - Verify that writes succeed with 0 return value in both cases.
* - Write data of size 1K to valid range in imem/dmem from unaligned data
* to verify the buffering logic and cover branches in
* falcon_copy_to_dmem|imem_unaligned_src.
*
* Output: Returns PASS if the steps above were executed successfully. FAIL
* otherwise.