gpu: nvgpu: add more coverage to SYNC UT

This patch adds the following code coverage
1) Add a test to fail the allocation of syncpt_name correctly.
2) Add a test that covers all three branches of get_sync_ro_map
3) Add entry into the SWUTS.h and SWUTS.sources
4) Add test entries in the required_tests.json file
5) add nvgpu_safe_add_u64 to fix cert-C INT30 violation

Jira NVGPU-913

Change-Id: If0ccc9a9314494af1a663eb8ef37a68644c18453
Signed-off-by: Debarshi Dutta <ddutta@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2267389
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
ddutta
2019-12-23 11:16:55 +05:30
committed by Alex Waterman
parent b11699fe57
commit d0ad13b024
5 changed files with 211 additions and 35 deletions

View File

@@ -64,14 +64,6 @@ static int init_syncpt_mem(struct unit_module *m, struct gk20a *g)
return 0;
}
static int de_init_syncpt_mem(struct unit_module *m, struct gk20a *g)
{
if (nvgpu_mem_is_valid(&g->syncpt_mem))
nvgpu_dma_free(g, &g->syncpt_mem);
return 0;
}
static int init_channel_vm(struct unit_module *m, struct nvgpu_channel *ch)
{
u64 low_hole, aperture_size;
@@ -156,7 +148,6 @@ int test_sync_init(struct unit_module *m, struct gk20a *g, void *args)
*/
ch = nvgpu_kzalloc(g, sizeof(struct nvgpu_channel));
if (ch == NULL) {
de_init_syncpt_mem(m, g);
nvgpu_free_nvhost_dev(g);
unit_return_fail(m, "sync channel creation failure");
}
@@ -169,7 +160,6 @@ int test_sync_init(struct unit_module *m, struct gk20a *g, void *args)
ret = init_channel_vm(m, ch);
if (ret != 0) {
nvgpu_kfree(g, ch);
de_init_syncpt_mem(m, g);
nvgpu_free_nvhost_dev(g);
unit_return_fail(m, "sync channel vm init failure");
}
@@ -280,6 +270,13 @@ done:
if (sync != NULL)
nvgpu_channel_sync_destroy(sync, false);
if (nvgpu_mem_is_valid(&g->syncpt_mem) &&
ch->vm->syncpt_ro_map_gpu_va != 0ULL) {
nvgpu_gmmu_unmap(ch->vm, &g->syncpt_mem,
ch->vm->syncpt_ro_map_gpu_va);
ch->vm->syncpt_ro_map_gpu_va = 0ULL;
}
return ret;
}
@@ -322,29 +319,135 @@ done:
if (user_sync != NULL)
nvgpu_channel_sync_destroy(user_sync, false);
if (nvgpu_mem_is_valid(&g->syncpt_mem) &&
ch->vm->syncpt_ro_map_gpu_va != 0ULL) {
nvgpu_gmmu_unmap(ch->vm, &g->syncpt_mem,
ch->vm->syncpt_ro_map_gpu_va);
ch->vm->syncpt_ro_map_gpu_va = 0ULL;
}
return ret;
}
#define F_SYNC_GET_RO_MAP_PRE_ALLOCATED 0
#define F_SYNC_GET_RO_MAP 1
#define F_SYNC_GET_RO_MAP_FAIL 2
static const char *f_sync_get_ro_map[] = {
"sync_get_ro_map_preallocated",
"sync_get_ro_map",
"sync_get_ro_map_fail",
};
static void syncpt_ro_map_gpu_va_clear(struct gk20a *g, struct nvgpu_channel *ch)
{
if (nvgpu_mem_is_valid(&g->syncpt_mem) &&
ch->vm->syncpt_ro_map_gpu_va != 0ULL) {
nvgpu_gmmu_unmap(ch->vm, &g->syncpt_mem,
ch->vm->syncpt_ro_map_gpu_va);
ch->vm->syncpt_ro_map_gpu_va = 0ULL;
} else if (ch->vm->syncpt_ro_map_gpu_va != 0ULL) {
ch->vm->syncpt_ro_map_gpu_va = 0ULL;
} else {
(void) memset(&g->syncpt_mem, 0, sizeof(struct nvgpu_mem));
}
}
int test_sync_get_ro_map(struct unit_module *m, struct gk20a *g, void *args)
{
u64 base_gpuva = 0U;
u32 sync_size = 0U;
u32 branches;
int err = 0;
int ret = UNIT_FAIL;
for (branches = 0U; branches <= F_SYNC_GET_RO_MAP_FAIL; branches++) {
if (branches == F_SYNC_GET_RO_MAP_PRE_ALLOCATED) {
ch->vm->syncpt_ro_map_gpu_va = nvgpu_gmmu_map(ch->vm,
&g->syncpt_mem, g->syncpt_unit_size,
0, gk20a_mem_flag_read_only,
false, APERTURE_SYSMEM);
if (ch->vm->syncpt_ro_map_gpu_va == 0U) {
unit_return_fail(m, "Unable to preallocate mapping");
}
} else if (branches == F_SYNC_GET_RO_MAP) {
ch->vm->syncpt_ro_map_gpu_va = 0U;
} else if (branches == F_SYNC_GET_RO_MAP_FAIL) {
ch->vm->syncpt_ro_map_gpu_va = 0U;
/* fail Read-Only nvgpu_gmmu_map of g->syncpt_mem */
ch->vm->guest_managed = true;
}
unit_info(m, "%s branch: %s\n", __func__, f_sync_get_ro_map[branches]);
err = g->ops.sync.syncpt.get_sync_ro_map(ch->vm,
&base_gpuva, &sync_size);
if (branches < F_SYNC_GET_RO_MAP_FAIL) {
if(err != 0) {
unit_return_fail(m,
"unexpected failure in get_sync_ro_map");
} else {
ret = UNIT_SUCCESS;
}
assert(base_gpuva > 0ULL);
assert(sync_size > 0U);
unit_info(m, "Syncpt Shim GPU VA: %llu\n", base_gpuva);
} else {
if (err == 0) {
unit_return_fail(m,
"expected failure in get_sync_ro_map");
} else {
ret = UNIT_SUCCESS;
}
}
syncpt_ro_map_gpu_va_clear(g, ch);
if (ch->vm->guest_managed == true) {
ch->vm->guest_managed = false;
}
base_gpuva = 0U;
sync_size = 0U;
}
done:
syncpt_ro_map_gpu_va_clear(g, ch);
if (ch->vm->guest_managed == true) {
ch->vm->guest_managed = false;
}
return ret;
}
#define F_SYNC_GLOBAL_DISABLE_SYNCPT 0
#define F_SYNC_SYNCPT_ALLOC_FAILED 1
#define F_SYNC_USER_MANAGED 2
#define F_SYNC_NVHOST_CLIENT_MANAGED_FAIL 3
#define F_SYNC_RO_MAP_GPU_VA_MAP_FAIL 4
#define F_SYNC_MEM_CREATE_PHYS_FAIL 5
#define F_SYNC_BUF_MAP_FAIL 6
#define F_SYNC_FAIL_LAST 7
#define F_SYNC_STRADD_FAIL 3
#define F_SYNC_NVHOST_CLIENT_MANAGED_FAIL 4
#define F_SYNC_RO_MAP_GPU_VA_MAP_FAIL 5
#define F_SYNC_MEM_CREATE_PHYS_FAIL 6
#define F_SYNC_BUF_MAP_FAIL 7
#define F_SYNC_FAIL_LAST 8
static const char *f_syncpt_open[] = {
"global_disable_syncpt",
"syncpt_alloc_failed",
"syncpt_user_managed_false",
"syncpt_stradd_fail",
"syncpt_get_client_managed_fail",
"syncpt_ro_map_gpu_va_fail",
"syncpt_create_phys_mem_fail",
"syncpt_buf_map_fail",
};
static void clear_test_params(struct gk20a *g, bool *user_managed,
#define FAIL_G_NAME_STR "GK20A_FAILSTR_ADD_U32_CNDITION"
static void clear_test_params(struct gk20a *g,
bool *fault_injection_enabled, u32 branch,
struct nvgpu_posix_fault_inj *kmem_fi)
{
@@ -352,10 +455,6 @@ static void clear_test_params(struct gk20a *g, bool *user_managed,
g->disable_syncpoints = false;
}
if (!(*user_managed)) {
*user_managed = true;
}
if (ch->vm->guest_managed) {
ch->vm->guest_managed = false;
}
@@ -365,13 +464,7 @@ static void clear_test_params(struct gk20a *g, bool *user_managed,
*fault_injection_enabled = false;
}
if (branch == F_SYNC_NVHOST_CLIENT_MANAGED_FAIL) {
g->nvhost->syncpt_id = 1U;
}
if (ch->vm->syncpt_ro_map_gpu_va) {
ch->vm->syncpt_ro_map_gpu_va = 0ULL;
}
syncpt_ro_map_gpu_va_clear(g, ch);
}
int test_sync_create_fail(struct unit_module *m, struct gk20a *g, void *args)
@@ -382,6 +475,7 @@ int test_sync_create_fail(struct unit_module *m, struct gk20a *g, void *args)
bool user_managed = true;
bool fault_injection_enabled = false;
int ret = UNIT_FAIL;
const char *g_name = g->name;
kmem_fi = nvgpu_kmem_get_fault_injection();
@@ -405,6 +499,12 @@ int test_sync_create_fail(struct unit_module *m, struct gk20a *g, void *args)
fault_injection_enabled = true;
} else if (branches == F_SYNC_USER_MANAGED) {
user_managed = false;
} else if (branches == F_SYNC_STRADD_FAIL) {
/*
* fill the entire buffer resulting in
* failure in nvgpu_strnadd_u32
*/
g->name = FAIL_G_NAME_STR;
} else if (branches == F_SYNC_NVHOST_CLIENT_MANAGED_FAIL) {
g->nvhost->syncpt_id = 20U; /* arbitary id */
} else if (branches == F_SYNC_RO_MAP_GPU_VA_MAP_FAIL) {
@@ -434,6 +534,18 @@ int test_sync_create_fail(struct unit_module *m, struct gk20a *g, void *args)
unit_info(m, "%s branch: %s\n", __func__, f_syncpt_open[branches]);
sync = nvgpu_channel_sync_create(ch, user_managed);
if (branches == F_SYNC_USER_MANAGED) {
user_managed = true;
}
if (branches == F_SYNC_NVHOST_CLIENT_MANAGED_FAIL) {
g->nvhost->syncpt_id = 0U;
}
/* restore the original name member of the gk20a device */
g->name = g_name;
if (sync != NULL) {
nvgpu_channel_sync_destroy(sync, true);
unit_return_fail(m, "expected failure in creating sync points");
@@ -445,16 +557,13 @@ int test_sync_create_fail(struct unit_module *m, struct gk20a *g, void *args)
assert(syncpt_id == 0U);
assert(syncpt_value == 0U);
clear_test_params(g, &user_managed, &fault_injection_enabled,
branches, kmem_fi);
clear_test_params(g, &fault_injection_enabled, branches, kmem_fi);
}
ret = UNIT_SUCCESS;
return UNIT_SUCCESS;
done:
clear_test_params(g, &user_managed, &fault_injection_enabled,
0, kmem_fi);
clear_test_params(g, &fault_injection_enabled, 0, kmem_fi);
return ret;
}
@@ -468,8 +577,6 @@ int test_sync_deinit(struct unit_module *m, struct gk20a *g, void *args)
nvgpu_kfree(g, ch);
}
de_init_syncpt_mem(m, g);
if (g->nvhost == NULL) {
unit_return_fail(m ,"no valid nvhost device exists\n");
}
@@ -486,6 +593,7 @@ struct unit_module_test nvgpu_sync_tests[] = {
UNIT_TEST(sync_create_destroy, test_sync_create_destroy_sync, NULL, 0),
UNIT_TEST(sync_set_safe_state, test_sync_set_safe_state, NULL, 0),
UNIT_TEST(sync_user_managed_apis, test_sync_usermanaged_syncpt_apis, NULL, 0),
UNIT_TEST(sync_get_ro_map, test_sync_get_ro_map, NULL, 0),
UNIT_TEST(sync_fail, test_sync_create_fail, NULL, 0),
UNIT_TEST(sync_deinit, test_sync_deinit, NULL, 0),
};