mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 09:12:24 +03:00
gpu: nvgpu: add more coverage to SYNC UT
This patch adds the following code coverage 1) Add a test to fail the allocation of syncpt_name correctly. 2) Add a test that covers all three branches of get_sync_ro_map 3) Add entry into the SWUTS.h and SWUTS.sources 4) Add test entries in the required_tests.json file 5) add nvgpu_safe_add_u64 to fix cert-C INT30 violation Jira NVGPU-913 Change-Id: If0ccc9a9314494af1a663eb8ef37a68644c18453 Signed-off-by: Debarshi Dutta <ddutta@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2267389 Tested-by: mobile promotions <svcmobile_promotions@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
@@ -76,6 +76,7 @@
|
|||||||
* - @ref SWUTS-fifo-tsg-gv11b
|
* - @ref SWUTS-fifo-tsg-gv11b
|
||||||
* - @ref SWUTS-fifo-userd-gk20a
|
* - @ref SWUTS-fifo-userd-gk20a
|
||||||
* - @ref SWUTS-fifo-usermode-gv11b
|
* - @ref SWUTS-fifo-usermode-gv11b
|
||||||
|
* - @ref SWUTS-nvgpu-sync
|
||||||
* - @ref SWUTS-init
|
* - @ref SWUTS-init
|
||||||
* - @ref SWUTS-intr
|
* - @ref SWUTS-intr
|
||||||
* - @ref SWUTS-interface-atomic
|
* - @ref SWUTS-interface-atomic
|
||||||
|
|||||||
@@ -53,6 +53,8 @@ INPUT += ../../../userspace/units/fifo/tsg/nvgpu-tsg.h
|
|||||||
INPUT += ../../../userspace/units/fifo/tsg/gv11b/nvgpu-tsg-gv11b.h
|
INPUT += ../../../userspace/units/fifo/tsg/gv11b/nvgpu-tsg-gv11b.h
|
||||||
INPUT += ../../../userspace/units/fifo/userd/gk20a/nvgpu-userd-gk20a.h
|
INPUT += ../../../userspace/units/fifo/userd/gk20a/nvgpu-userd-gk20a.h
|
||||||
INPUT += ../../../userspace/units/fifo/userd/gv11b/nvgpu-usermode-gv11b.h
|
INPUT += ../../../userspace/units/fifo/userd/gv11b/nvgpu-usermode-gv11b.h
|
||||||
|
INPUT += ../../../userspace/units/sync/nvgpu-sync.c
|
||||||
|
INPUT += ../../../userspace/units/sync/nvgpu-sync.h
|
||||||
INPUT += ../../../userspace/units/fuse/nvgpu-fuse.h
|
INPUT += ../../../userspace/units/fuse/nvgpu-fuse.h
|
||||||
INPUT += ../../../userspace/units/fuse/nvgpu-fuse-gm20b.h
|
INPUT += ../../../userspace/units/fuse/nvgpu-fuse-gm20b.h
|
||||||
INPUT += ../../../userspace/units/fuse/nvgpu-fuse-gp10b.h
|
INPUT += ../../../userspace/units/fuse/nvgpu-fuse-gp10b.h
|
||||||
|
|||||||
@@ -4782,6 +4782,48 @@
|
|||||||
"test": "test_cast",
|
"test": "test_cast",
|
||||||
"case": "cast",
|
"case": "cast",
|
||||||
"unit": "static_analysis",
|
"unit": "static_analysis",
|
||||||
|
"test_level": 0
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"test": "test_sync_init",
|
||||||
|
"case": "sync_init",
|
||||||
|
"unit": "nvgpu-sync",
|
||||||
|
"test_level": 0
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"test": "test_sync_deinit",
|
||||||
|
"case": "sync_deinit",
|
||||||
|
"unit": "nvgpu-sync",
|
||||||
|
"test_level": 0
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"test": "test_sync_create_destroy_sync",
|
||||||
|
"case": "sync_create_destroy",
|
||||||
|
"unit": "nvgpu-sync",
|
||||||
|
"test_level": 0
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"test": "test_sync_set_safe_state",
|
||||||
|
"case": "sync_set_safe_state",
|
||||||
|
"unit": "nvgpu-sync",
|
||||||
|
"test_level": 0
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"test": "test_sync_usermanaged_syncpoint_apis",
|
||||||
|
"case": "sync_user_managed_apis",
|
||||||
|
"unit": "nvgpu-sync",
|
||||||
|
"test_level": 0
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"test": "test_sync_get_ro_map",
|
||||||
|
"case": "sync_get_ro_map",
|
||||||
|
"unit": "nvgpu-sync",
|
||||||
|
"test_level": 0
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"test": "test_sync_create_fail",
|
||||||
|
"case": "sync_fail",
|
||||||
|
"unit": "nvgpu-sync",
|
||||||
"test_level": 0
|
"test_level": 0
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
|||||||
@@ -64,14 +64,6 @@ static int init_syncpt_mem(struct unit_module *m, struct gk20a *g)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int de_init_syncpt_mem(struct unit_module *m, struct gk20a *g)
|
|
||||||
{
|
|
||||||
if (nvgpu_mem_is_valid(&g->syncpt_mem))
|
|
||||||
nvgpu_dma_free(g, &g->syncpt_mem);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int init_channel_vm(struct unit_module *m, struct nvgpu_channel *ch)
|
static int init_channel_vm(struct unit_module *m, struct nvgpu_channel *ch)
|
||||||
{
|
{
|
||||||
u64 low_hole, aperture_size;
|
u64 low_hole, aperture_size;
|
||||||
@@ -156,7 +148,6 @@ int test_sync_init(struct unit_module *m, struct gk20a *g, void *args)
|
|||||||
*/
|
*/
|
||||||
ch = nvgpu_kzalloc(g, sizeof(struct nvgpu_channel));
|
ch = nvgpu_kzalloc(g, sizeof(struct nvgpu_channel));
|
||||||
if (ch == NULL) {
|
if (ch == NULL) {
|
||||||
de_init_syncpt_mem(m, g);
|
|
||||||
nvgpu_free_nvhost_dev(g);
|
nvgpu_free_nvhost_dev(g);
|
||||||
unit_return_fail(m, "sync channel creation failure");
|
unit_return_fail(m, "sync channel creation failure");
|
||||||
}
|
}
|
||||||
@@ -169,7 +160,6 @@ int test_sync_init(struct unit_module *m, struct gk20a *g, void *args)
|
|||||||
ret = init_channel_vm(m, ch);
|
ret = init_channel_vm(m, ch);
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
nvgpu_kfree(g, ch);
|
nvgpu_kfree(g, ch);
|
||||||
de_init_syncpt_mem(m, g);
|
|
||||||
nvgpu_free_nvhost_dev(g);
|
nvgpu_free_nvhost_dev(g);
|
||||||
unit_return_fail(m, "sync channel vm init failure");
|
unit_return_fail(m, "sync channel vm init failure");
|
||||||
}
|
}
|
||||||
@@ -280,6 +270,13 @@ done:
|
|||||||
if (sync != NULL)
|
if (sync != NULL)
|
||||||
nvgpu_channel_sync_destroy(sync, false);
|
nvgpu_channel_sync_destroy(sync, false);
|
||||||
|
|
||||||
|
if (nvgpu_mem_is_valid(&g->syncpt_mem) &&
|
||||||
|
ch->vm->syncpt_ro_map_gpu_va != 0ULL) {
|
||||||
|
nvgpu_gmmu_unmap(ch->vm, &g->syncpt_mem,
|
||||||
|
ch->vm->syncpt_ro_map_gpu_va);
|
||||||
|
ch->vm->syncpt_ro_map_gpu_va = 0ULL;
|
||||||
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -322,29 +319,135 @@ done:
|
|||||||
if (user_sync != NULL)
|
if (user_sync != NULL)
|
||||||
nvgpu_channel_sync_destroy(user_sync, false);
|
nvgpu_channel_sync_destroy(user_sync, false);
|
||||||
|
|
||||||
|
if (nvgpu_mem_is_valid(&g->syncpt_mem) &&
|
||||||
|
ch->vm->syncpt_ro_map_gpu_va != 0ULL) {
|
||||||
|
nvgpu_gmmu_unmap(ch->vm, &g->syncpt_mem,
|
||||||
|
ch->vm->syncpt_ro_map_gpu_va);
|
||||||
|
ch->vm->syncpt_ro_map_gpu_va = 0ULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
#define F_SYNC_GET_RO_MAP_PRE_ALLOCATED 0
|
||||||
|
#define F_SYNC_GET_RO_MAP 1
|
||||||
|
#define F_SYNC_GET_RO_MAP_FAIL 2
|
||||||
|
|
||||||
|
static const char *f_sync_get_ro_map[] = {
|
||||||
|
"sync_get_ro_map_preallocated",
|
||||||
|
"sync_get_ro_map",
|
||||||
|
"sync_get_ro_map_fail",
|
||||||
|
};
|
||||||
|
|
||||||
|
static void syncpt_ro_map_gpu_va_clear(struct gk20a *g, struct nvgpu_channel *ch)
|
||||||
|
{
|
||||||
|
if (nvgpu_mem_is_valid(&g->syncpt_mem) &&
|
||||||
|
ch->vm->syncpt_ro_map_gpu_va != 0ULL) {
|
||||||
|
nvgpu_gmmu_unmap(ch->vm, &g->syncpt_mem,
|
||||||
|
ch->vm->syncpt_ro_map_gpu_va);
|
||||||
|
ch->vm->syncpt_ro_map_gpu_va = 0ULL;
|
||||||
|
} else if (ch->vm->syncpt_ro_map_gpu_va != 0ULL) {
|
||||||
|
ch->vm->syncpt_ro_map_gpu_va = 0ULL;
|
||||||
|
} else {
|
||||||
|
(void) memset(&g->syncpt_mem, 0, sizeof(struct nvgpu_mem));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
int test_sync_get_ro_map(struct unit_module *m, struct gk20a *g, void *args)
|
||||||
|
{
|
||||||
|
u64 base_gpuva = 0U;
|
||||||
|
u32 sync_size = 0U;
|
||||||
|
u32 branches;
|
||||||
|
|
||||||
|
int err = 0;
|
||||||
|
int ret = UNIT_FAIL;
|
||||||
|
|
||||||
|
for (branches = 0U; branches <= F_SYNC_GET_RO_MAP_FAIL; branches++) {
|
||||||
|
if (branches == F_SYNC_GET_RO_MAP_PRE_ALLOCATED) {
|
||||||
|
ch->vm->syncpt_ro_map_gpu_va = nvgpu_gmmu_map(ch->vm,
|
||||||
|
&g->syncpt_mem, g->syncpt_unit_size,
|
||||||
|
0, gk20a_mem_flag_read_only,
|
||||||
|
false, APERTURE_SYSMEM);
|
||||||
|
if (ch->vm->syncpt_ro_map_gpu_va == 0U) {
|
||||||
|
unit_return_fail(m, "Unable to preallocate mapping");
|
||||||
|
}
|
||||||
|
} else if (branches == F_SYNC_GET_RO_MAP) {
|
||||||
|
ch->vm->syncpt_ro_map_gpu_va = 0U;
|
||||||
|
} else if (branches == F_SYNC_GET_RO_MAP_FAIL) {
|
||||||
|
ch->vm->syncpt_ro_map_gpu_va = 0U;
|
||||||
|
/* fail Read-Only nvgpu_gmmu_map of g->syncpt_mem */
|
||||||
|
ch->vm->guest_managed = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
unit_info(m, "%s branch: %s\n", __func__, f_sync_get_ro_map[branches]);
|
||||||
|
|
||||||
|
err = g->ops.sync.syncpt.get_sync_ro_map(ch->vm,
|
||||||
|
&base_gpuva, &sync_size);
|
||||||
|
|
||||||
|
if (branches < F_SYNC_GET_RO_MAP_FAIL) {
|
||||||
|
if(err != 0) {
|
||||||
|
unit_return_fail(m,
|
||||||
|
"unexpected failure in get_sync_ro_map");
|
||||||
|
} else {
|
||||||
|
ret = UNIT_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
assert(base_gpuva > 0ULL);
|
||||||
|
assert(sync_size > 0U);
|
||||||
|
|
||||||
|
unit_info(m, "Syncpt Shim GPU VA: %llu\n", base_gpuva);
|
||||||
|
|
||||||
|
} else {
|
||||||
|
if (err == 0) {
|
||||||
|
unit_return_fail(m,
|
||||||
|
"expected failure in get_sync_ro_map");
|
||||||
|
} else {
|
||||||
|
ret = UNIT_SUCCESS;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
syncpt_ro_map_gpu_va_clear(g, ch);
|
||||||
|
|
||||||
|
if (ch->vm->guest_managed == true) {
|
||||||
|
ch->vm->guest_managed = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
base_gpuva = 0U;
|
||||||
|
sync_size = 0U;
|
||||||
|
}
|
||||||
|
done:
|
||||||
|
syncpt_ro_map_gpu_va_clear(g, ch);
|
||||||
|
|
||||||
|
if (ch->vm->guest_managed == true) {
|
||||||
|
ch->vm->guest_managed = false;
|
||||||
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define F_SYNC_GLOBAL_DISABLE_SYNCPT 0
|
#define F_SYNC_GLOBAL_DISABLE_SYNCPT 0
|
||||||
#define F_SYNC_SYNCPT_ALLOC_FAILED 1
|
#define F_SYNC_SYNCPT_ALLOC_FAILED 1
|
||||||
#define F_SYNC_USER_MANAGED 2
|
#define F_SYNC_USER_MANAGED 2
|
||||||
#define F_SYNC_NVHOST_CLIENT_MANAGED_FAIL 3
|
#define F_SYNC_STRADD_FAIL 3
|
||||||
#define F_SYNC_RO_MAP_GPU_VA_MAP_FAIL 4
|
#define F_SYNC_NVHOST_CLIENT_MANAGED_FAIL 4
|
||||||
#define F_SYNC_MEM_CREATE_PHYS_FAIL 5
|
#define F_SYNC_RO_MAP_GPU_VA_MAP_FAIL 5
|
||||||
#define F_SYNC_BUF_MAP_FAIL 6
|
#define F_SYNC_MEM_CREATE_PHYS_FAIL 6
|
||||||
#define F_SYNC_FAIL_LAST 7
|
#define F_SYNC_BUF_MAP_FAIL 7
|
||||||
|
#define F_SYNC_FAIL_LAST 8
|
||||||
|
|
||||||
static const char *f_syncpt_open[] = {
|
static const char *f_syncpt_open[] = {
|
||||||
"global_disable_syncpt",
|
"global_disable_syncpt",
|
||||||
"syncpt_alloc_failed",
|
"syncpt_alloc_failed",
|
||||||
"syncpt_user_managed_false",
|
"syncpt_user_managed_false",
|
||||||
|
"syncpt_stradd_fail",
|
||||||
"syncpt_get_client_managed_fail",
|
"syncpt_get_client_managed_fail",
|
||||||
"syncpt_ro_map_gpu_va_fail",
|
"syncpt_ro_map_gpu_va_fail",
|
||||||
"syncpt_create_phys_mem_fail",
|
"syncpt_create_phys_mem_fail",
|
||||||
"syncpt_buf_map_fail",
|
"syncpt_buf_map_fail",
|
||||||
};
|
};
|
||||||
|
|
||||||
static void clear_test_params(struct gk20a *g, bool *user_managed,
|
#define FAIL_G_NAME_STR "GK20A_FAILSTR_ADD_U32_CNDITION"
|
||||||
|
|
||||||
|
static void clear_test_params(struct gk20a *g,
|
||||||
bool *fault_injection_enabled, u32 branch,
|
bool *fault_injection_enabled, u32 branch,
|
||||||
struct nvgpu_posix_fault_inj *kmem_fi)
|
struct nvgpu_posix_fault_inj *kmem_fi)
|
||||||
{
|
{
|
||||||
@@ -352,10 +455,6 @@ static void clear_test_params(struct gk20a *g, bool *user_managed,
|
|||||||
g->disable_syncpoints = false;
|
g->disable_syncpoints = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!(*user_managed)) {
|
|
||||||
*user_managed = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ch->vm->guest_managed) {
|
if (ch->vm->guest_managed) {
|
||||||
ch->vm->guest_managed = false;
|
ch->vm->guest_managed = false;
|
||||||
}
|
}
|
||||||
@@ -365,13 +464,7 @@ static void clear_test_params(struct gk20a *g, bool *user_managed,
|
|||||||
*fault_injection_enabled = false;
|
*fault_injection_enabled = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (branch == F_SYNC_NVHOST_CLIENT_MANAGED_FAIL) {
|
syncpt_ro_map_gpu_va_clear(g, ch);
|
||||||
g->nvhost->syncpt_id = 1U;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ch->vm->syncpt_ro_map_gpu_va) {
|
|
||||||
ch->vm->syncpt_ro_map_gpu_va = 0ULL;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int test_sync_create_fail(struct unit_module *m, struct gk20a *g, void *args)
|
int test_sync_create_fail(struct unit_module *m, struct gk20a *g, void *args)
|
||||||
@@ -382,6 +475,7 @@ int test_sync_create_fail(struct unit_module *m, struct gk20a *g, void *args)
|
|||||||
bool user_managed = true;
|
bool user_managed = true;
|
||||||
bool fault_injection_enabled = false;
|
bool fault_injection_enabled = false;
|
||||||
int ret = UNIT_FAIL;
|
int ret = UNIT_FAIL;
|
||||||
|
const char *g_name = g->name;
|
||||||
|
|
||||||
kmem_fi = nvgpu_kmem_get_fault_injection();
|
kmem_fi = nvgpu_kmem_get_fault_injection();
|
||||||
|
|
||||||
@@ -405,6 +499,12 @@ int test_sync_create_fail(struct unit_module *m, struct gk20a *g, void *args)
|
|||||||
fault_injection_enabled = true;
|
fault_injection_enabled = true;
|
||||||
} else if (branches == F_SYNC_USER_MANAGED) {
|
} else if (branches == F_SYNC_USER_MANAGED) {
|
||||||
user_managed = false;
|
user_managed = false;
|
||||||
|
} else if (branches == F_SYNC_STRADD_FAIL) {
|
||||||
|
/*
|
||||||
|
* fill the entire buffer resulting in
|
||||||
|
* failure in nvgpu_strnadd_u32
|
||||||
|
*/
|
||||||
|
g->name = FAIL_G_NAME_STR;
|
||||||
} else if (branches == F_SYNC_NVHOST_CLIENT_MANAGED_FAIL) {
|
} else if (branches == F_SYNC_NVHOST_CLIENT_MANAGED_FAIL) {
|
||||||
g->nvhost->syncpt_id = 20U; /* arbitary id */
|
g->nvhost->syncpt_id = 20U; /* arbitary id */
|
||||||
} else if (branches == F_SYNC_RO_MAP_GPU_VA_MAP_FAIL) {
|
} else if (branches == F_SYNC_RO_MAP_GPU_VA_MAP_FAIL) {
|
||||||
@@ -434,6 +534,18 @@ int test_sync_create_fail(struct unit_module *m, struct gk20a *g, void *args)
|
|||||||
unit_info(m, "%s branch: %s\n", __func__, f_syncpt_open[branches]);
|
unit_info(m, "%s branch: %s\n", __func__, f_syncpt_open[branches]);
|
||||||
|
|
||||||
sync = nvgpu_channel_sync_create(ch, user_managed);
|
sync = nvgpu_channel_sync_create(ch, user_managed);
|
||||||
|
|
||||||
|
if (branches == F_SYNC_USER_MANAGED) {
|
||||||
|
user_managed = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (branches == F_SYNC_NVHOST_CLIENT_MANAGED_FAIL) {
|
||||||
|
g->nvhost->syncpt_id = 0U;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* restore the original name member of the gk20a device */
|
||||||
|
g->name = g_name;
|
||||||
|
|
||||||
if (sync != NULL) {
|
if (sync != NULL) {
|
||||||
nvgpu_channel_sync_destroy(sync, true);
|
nvgpu_channel_sync_destroy(sync, true);
|
||||||
unit_return_fail(m, "expected failure in creating sync points");
|
unit_return_fail(m, "expected failure in creating sync points");
|
||||||
@@ -445,16 +557,13 @@ int test_sync_create_fail(struct unit_module *m, struct gk20a *g, void *args)
|
|||||||
assert(syncpt_id == 0U);
|
assert(syncpt_id == 0U);
|
||||||
assert(syncpt_value == 0U);
|
assert(syncpt_value == 0U);
|
||||||
|
|
||||||
clear_test_params(g, &user_managed, &fault_injection_enabled,
|
clear_test_params(g, &fault_injection_enabled, branches, kmem_fi);
|
||||||
branches, kmem_fi);
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = UNIT_SUCCESS;
|
return UNIT_SUCCESS;
|
||||||
|
|
||||||
done:
|
done:
|
||||||
clear_test_params(g, &user_managed, &fault_injection_enabled,
|
clear_test_params(g, &fault_injection_enabled, 0, kmem_fi);
|
||||||
0, kmem_fi);
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@@ -468,8 +577,6 @@ int test_sync_deinit(struct unit_module *m, struct gk20a *g, void *args)
|
|||||||
nvgpu_kfree(g, ch);
|
nvgpu_kfree(g, ch);
|
||||||
}
|
}
|
||||||
|
|
||||||
de_init_syncpt_mem(m, g);
|
|
||||||
|
|
||||||
if (g->nvhost == NULL) {
|
if (g->nvhost == NULL) {
|
||||||
unit_return_fail(m ,"no valid nvhost device exists\n");
|
unit_return_fail(m ,"no valid nvhost device exists\n");
|
||||||
}
|
}
|
||||||
@@ -486,6 +593,7 @@ struct unit_module_test nvgpu_sync_tests[] = {
|
|||||||
UNIT_TEST(sync_create_destroy, test_sync_create_destroy_sync, NULL, 0),
|
UNIT_TEST(sync_create_destroy, test_sync_create_destroy_sync, NULL, 0),
|
||||||
UNIT_TEST(sync_set_safe_state, test_sync_set_safe_state, NULL, 0),
|
UNIT_TEST(sync_set_safe_state, test_sync_set_safe_state, NULL, 0),
|
||||||
UNIT_TEST(sync_user_managed_apis, test_sync_usermanaged_syncpt_apis, NULL, 0),
|
UNIT_TEST(sync_user_managed_apis, test_sync_usermanaged_syncpt_apis, NULL, 0),
|
||||||
|
UNIT_TEST(sync_get_ro_map, test_sync_get_ro_map, NULL, 0),
|
||||||
UNIT_TEST(sync_fail, test_sync_create_fail, NULL, 0),
|
UNIT_TEST(sync_fail, test_sync_create_fail, NULL, 0),
|
||||||
UNIT_TEST(sync_deinit, test_sync_deinit, NULL, 0),
|
UNIT_TEST(sync_deinit, test_sync_deinit, NULL, 0),
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -146,6 +146,29 @@ int test_sync_set_safe_state(struct unit_module *m, struct gk20a *g, void *args)
|
|||||||
*/
|
*/
|
||||||
int test_sync_usermanaged_syncpt_apis(struct unit_module *m, struct gk20a *g, void *args);
|
int test_sync_usermanaged_syncpt_apis(struct unit_module *m, struct gk20a *g, void *args);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test specification for: get_sync_ro_map
|
||||||
|
*
|
||||||
|
* Description: Branch coverage for get_sync_ro_map HAL
|
||||||
|
*
|
||||||
|
* Test Type: Feature
|
||||||
|
*
|
||||||
|
* Targets: gv11b_syncpt_get_sync_ro_map
|
||||||
|
*
|
||||||
|
* Input: test_sync_init run for this GPU
|
||||||
|
*
|
||||||
|
* Steps:
|
||||||
|
* - Check if a call to get_sync_ro_map HAL succeeds
|
||||||
|
* - Check when vm->syncpt_ro_map_gpu_va is preallocated
|
||||||
|
* - Check when vm->syncpt_ro_map_gpu_va is not preallocated
|
||||||
|
* - Check if a call to get_sync_ro_map HAL fails
|
||||||
|
* - Check when vm->syncpt_ro_map_gpu_va is not preallocated and
|
||||||
|
* call to MAP fails
|
||||||
|
*
|
||||||
|
* Output: Returns PASS if NULL is returned. FAIL otherwise.
|
||||||
|
*/
|
||||||
|
int test_sync_get_ro_map(struct unit_module *m, struct gk20a *g, void *args);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Test specification for: test_sync_create_fail
|
* Test specification for: test_sync_create_fail
|
||||||
*
|
*
|
||||||
|
|||||||
Reference in New Issue
Block a user