diff --git a/drivers/gpu/nvgpu/common/mm/nvgpu_sgt.c b/drivers/gpu/nvgpu/common/mm/nvgpu_sgt.c index 8c881ace1..1e0c2893a 100644 --- a/drivers/gpu/nvgpu/common/mm/nvgpu_sgt.c +++ b/drivers/gpu/nvgpu/common/mm/nvgpu_sgt.c @@ -25,6 +25,7 @@ #include #include #include +#include struct nvgpu_sgl *nvgpu_sgt_get_next(struct nvgpu_sgt *sgt, struct nvgpu_sgl *sgl) diff --git a/drivers/gpu/nvgpu/hal/sync/syncpt_cmdbuf_gv11b_fusa.c b/drivers/gpu/nvgpu/hal/sync/syncpt_cmdbuf_gv11b_fusa.c index 87bb8dc52..dd3815e97 100644 --- a/drivers/gpu/nvgpu/hal/sync/syncpt_cmdbuf_gv11b_fusa.c +++ b/drivers/gpu/nvgpu/hal/sync/syncpt_cmdbuf_gv11b_fusa.c @@ -75,10 +75,15 @@ int gv11b_syncpt_alloc_buf(struct nvgpu_channel *c, } nr_pages = DIV_ROUND_UP(g->syncpt_size, PAGE_SIZE); - nvgpu_mem_create_from_phys(g, syncpt_buf, + err = nvgpu_mem_create_from_phys(g, syncpt_buf, (g->syncpt_unit_base + nvgpu_nvhost_syncpt_unit_interface_get_byte_offset(syncpt_id)), nr_pages); + if (err < 0) { + nvgpu_err(g, "failed to create mem from physical addr"); + return err; + } + syncpt_buf->gpu_va = nvgpu_gmmu_map(c->vm, syncpt_buf, g->syncpt_size, 0, gk20a_mem_flag_none, false, APERTURE_SYSMEM); diff --git a/drivers/gpu/nvgpu/include/nvgpu/posix/posix-nvhost.h b/drivers/gpu/nvgpu/include/nvgpu/posix/posix-nvhost.h index c92fe5d6e..df23d556a 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/posix/posix-nvhost.h +++ b/drivers/gpu/nvgpu/include/nvgpu/posix/posix-nvhost.h @@ -1,57 +1,62 @@ -/* - * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef NVGPU_POSIX_NVHOST_H -#define NVGPU_POSIX_NVHOST_H - -#include - -struct gk20a; -struct nvgpu_nvhost_dev { - u32 host1x_sp_base; - u32 host1x_sp_size; - u32 nb_hw_pts; -}; - -void nvgpu_free_nvhost_dev(struct gk20a *g); - -int nvgpu_get_nvhost_dev(struct gk20a *g); - -int nvgpu_nvhost_syncpt_unit_interface_get_aperture( - struct nvgpu_nvhost_dev *nvgpu_syncpt_dev, - u64 *base, size_t *size); - -u32 nvgpu_nvhost_syncpt_unit_interface_get_byte_offset(u32 syncpt_id); - -void nvgpu_nvhost_syncpt_set_min_eq_max_ext( - struct nvgpu_nvhost_dev *nvhost_dev, u32 id); -void nvgpu_nvhost_syncpt_put_ref_ext( - struct nvgpu_nvhost_dev *nvhost_dev, u32 id); - -u32 nvgpu_nvhost_get_syncpt_client_managed( - struct nvgpu_nvhost_dev *nvhost_dev, - const char *syncpt_name); - -void nvgpu_nvhost_syncpt_set_safe_state( - struct nvgpu_nvhost_dev *nvhost_dev, u32 id); - +/* + * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef NVGPU_POSIX_NVHOST_H +#define NVGPU_POSIX_NVHOST_H + +#include + +#define NUM_HW_PTS 704U +#define SYNCPT_SAFE_STATE_INCR 256U + +struct gk20a; +struct nvgpu_nvhost_dev { + u32 host1x_sp_base; + u32 host1x_sp_size; + u32 nb_hw_pts; + u32 syncpt_id; + u32 syncpt_value; +}; + +void nvgpu_free_nvhost_dev(struct gk20a *g); + +int nvgpu_get_nvhost_dev(struct gk20a *g); + +int nvgpu_nvhost_syncpt_unit_interface_get_aperture( + struct nvgpu_nvhost_dev *nvgpu_syncpt_dev, + u64 *base, size_t *size); + +u32 nvgpu_nvhost_syncpt_unit_interface_get_byte_offset(u32 syncpt_id); + +void nvgpu_nvhost_syncpt_set_min_eq_max_ext( + struct nvgpu_nvhost_dev *nvhost_dev, u32 id); +void nvgpu_nvhost_syncpt_put_ref_ext( + struct nvgpu_nvhost_dev *nvhost_dev, u32 id); + +u32 nvgpu_nvhost_get_syncpt_client_managed( + struct nvgpu_nvhost_dev *nvhost_dev, + const char *syncpt_name); + +void nvgpu_nvhost_syncpt_set_safe_state( + struct nvgpu_nvhost_dev *nvhost_dev, u32 id); + #endif \ No newline at end of file diff --git a/drivers/gpu/nvgpu/libnvgpu-drv_safe.export b/drivers/gpu/nvgpu/libnvgpu-drv_safe.export index 4f5028c53..4748a9112 100644 --- a/drivers/gpu/nvgpu/libnvgpu-drv_safe.export +++ b/drivers/gpu/nvgpu/libnvgpu-drv_safe.export @@ -341,6 +341,8 @@ nvgpu_channel_setup_bind nvgpu_channel_refch_from_inst_ptr nvgpu_channel_setup_sw nvgpu_channel_sync_create +nvgpu_channel_sync_destroy +nvgpu_channel_sync_set_safe_state nvgpu_check_gpu_state nvgpu_cond_broadcast nvgpu_cond_broadcast_interruptible diff --git a/drivers/gpu/nvgpu/os/posix/posix-nvhost.c b/drivers/gpu/nvgpu/os/posix/posix-nvhost.c index f06ec5f09..a1d2b93bf 100644 --- a/drivers/gpu/nvgpu/os/posix/posix-nvhost.c +++ b/drivers/gpu/nvgpu/os/posix/posix-nvhost.c @@ -38,17 +38,31 @@ void nvgpu_free_nvhost_dev(struct gk20a *g) { } } +static void allocate_new_syncpt(struct nvgpu_nvhost_dev *nvgpu_syncpt_dev) +{ + u32 syncpt_id, syncpt_val; + + srand(time(NULL)); + + /* Limit the range between {1, NUM_HW_PTS} */ + syncpt_id = (rand() % NUM_HW_PTS) + 1; + /* Limit the range between {1, UINT_MAX - SYNCPT_SAFE_STATE_INCR - 1} */ + syncpt_val = (rand() % (UINT_MAX - SYNCPT_SAFE_STATE_INCR - 1)); + + nvgpu_syncpt_dev->syncpt_id = syncpt_id; + nvgpu_syncpt_dev->syncpt_value = syncpt_val; +} + int nvgpu_get_nvhost_dev(struct gk20a *g) { int ret = 0; - g->nvhost_dev = nvgpu_kzalloc(g, sizeof(struct nvgpu_nvhost_dev)); if (g->nvhost_dev == NULL) { return -ENOMEM; } g->nvhost_dev->host1x_sp_base = 0x60000000; - g->nvhost_dev->host1x_sp_size = 0x400000; + g->nvhost_dev->host1x_sp_size = 0x4000; g->nvhost_dev->nb_hw_pts = 704U; ret = nvgpu_nvhost_syncpt_unit_interface_get_aperture( g->nvhost_dev, &g->syncpt_unit_base, @@ -91,24 +105,38 @@ u32 nvgpu_nvhost_syncpt_unit_interface_get_byte_offset(u32 syncpt_id) void nvgpu_nvhost_syncpt_set_min_eq_max_ext( struct nvgpu_nvhost_dev *nvhost_dev, u32 id) { - BUG(); } void nvgpu_nvhost_syncpt_put_ref_ext( struct nvgpu_nvhost_dev *nvhost_dev, u32 id) { - BUG(); + nvhost_dev->syncpt_id = 0U; + nvhost_dev->syncpt_value = 0U; } u32 nvgpu_nvhost_get_syncpt_client_managed( struct nvgpu_nvhost_dev *nvhost_dev, const char *syncpt_name) { - return 0U; + /* Only allocate new syncpt if nothing exists already */ + if (nvhost_dev->syncpt_id == 0U) { + allocate_new_syncpt(nvhost_dev); + } else { + nvhost_dev->syncpt_id = 0U; + } + + return nvhost_dev->syncpt_id; } void nvgpu_nvhost_syncpt_set_safe_state( struct nvgpu_nvhost_dev *nvhost_dev, u32 id) { - BUG(); + u32 syncpt_value_cur; + + if (nvhost_dev->syncpt_id == id) { + syncpt_value_cur = nvhost_dev->syncpt_value; + nvhost_dev->syncpt_value = + nvgpu_safe_add_u32(syncpt_value_cur, + SYNCPT_SAFE_STATE_INCR); + } } diff --git a/userspace/units/sync/nvgpu-sync.c b/userspace/units/sync/nvgpu-sync.c index 3e467ec16..367f5be79 100644 --- a/userspace/units/sync/nvgpu-sync.c +++ b/userspace/units/sync/nvgpu-sync.c @@ -25,12 +25,15 @@ #include #include #include +#include #include +#include #include #include #include #include +#include "../fifo/nvgpu-fifo.h" #include "../fifo/nvgpu-fifo-gv11b.h" #include "nvgpu-sync.h" @@ -38,8 +41,75 @@ NVGPU_GPU_ARCHITECTURE_SHIFT) #define NV_PMC_BOOT_0_IMPLEMENTATION_B 0xB +#define assert(cond) unit_assert(cond, goto done) + static struct nvgpu_channel *ch; +static int init_syncpt_mem(struct unit_module *m, struct gk20a *g) +{ + u64 nr_pages; + int err; + if (!nvgpu_mem_is_valid(&g->syncpt_mem)) { + nr_pages = U64(DIV_ROUND_UP(g->syncpt_unit_size, + PAGE_SIZE)); + err = nvgpu_mem_create_from_phys(g, &g->syncpt_mem, + g->syncpt_unit_base, nr_pages); + if (err != 0) { + nvgpu_err(g, "Failed to create syncpt mem"); + return err; + } + } + + return 0; +} + +static int de_init_syncpt_mem(struct unit_module *m, struct gk20a *g) +{ + if (nvgpu_mem_is_valid(&g->syncpt_mem)) + nvgpu_dma_free(g, &g->syncpt_mem); + + return 0; +} + +static int init_channel_vm(struct unit_module *m, struct nvgpu_channel *ch) +{ + u64 low_hole, aperture_size; + struct gk20a *g = ch->g; + struct nvgpu_os_posix *p = nvgpu_os_posix_from_gk20a(g); + struct mm_gk20a *mm = &g->mm; + + p->mm_is_iommuable = true; + /* + * Initialize one VM space for system memory to be used throughout this + * unit module. + * Values below are similar to those used in nvgpu_init_system_vm() + */ + low_hole = SZ_4K * 16UL; + aperture_size = GK20A_PMU_VA_SIZE; + + mm->pmu.aperture_size = GK20A_PMU_VA_SIZE; + mm->channel.user_size = NV_MM_DEFAULT_USER_SIZE - + NV_MM_DEFAULT_KERNEL_SIZE; + mm->channel.kernel_size = NV_MM_DEFAULT_KERNEL_SIZE; + + mm->pmu.vm = nvgpu_vm_init(g, + g->ops.mm.gmmu.get_default_big_page_size(), + low_hole, + aperture_size - low_hole, + aperture_size, + true, + false, + false, + "system"); + if (mm->pmu.vm == NULL) { + unit_return_fail(m, "nvgpu_vm_init failed\n"); + } + + ch->vm = mm->pmu.vm; + + return UNIT_SUCCESS; +} + int test_sync_init(struct unit_module *m, struct gk20a *g, void *args) { int ret = 0; @@ -63,40 +133,286 @@ int test_sync_init(struct unit_module *m, struct gk20a *g, void *args) return -ENODEV; } + /* + * Init g->nvhost_dev containing sync metadata + */ ret = nvgpu_get_nvhost_dev(g); if (ret != 0) { unit_return_fail(m, "nvgpu_sync_early_init failed\n"); } + /* + * Alloc memory for g->syncpt_mem + */ + ret = init_syncpt_mem(m, g); + if (ret != 0) { + nvgpu_free_nvhost_dev(g); + unit_return_fail(m, "sync mem allocation failure"); + } + + /* + * Alloc memory for channel + */ ch = nvgpu_kzalloc(g, sizeof(struct nvgpu_channel)); if (ch == NULL) { + de_init_syncpt_mem(m, g); + nvgpu_free_nvhost_dev(g); unit_return_fail(m, "sync channel creation failure"); } ch->g = g; - return UNIT_SUCCESS; -} - -int test_sync_create_sync(struct unit_module *m, struct gk20a *g, void *args) -{ - struct nvgpu_channel_sync *sync = NULL; - - sync = nvgpu_channel_sync_create(ch, true); - - if (sync != NULL) { - unit_return_fail(m, "expected failure in creating sync points"); + /* + * Alloc and Init a VM for the channel + */ + ret = init_channel_vm(m, ch); + if (ret != 0) { + nvgpu_kfree(g, ch); + de_init_syncpt_mem(m, g); + nvgpu_free_nvhost_dev(g); + unit_return_fail(m, "sync channel vm init failure"); } return UNIT_SUCCESS; } +#define F_SYNC_DESTROY_SET_SAFE 0 +#define F_SYNC_DESTROY_LAST 1 + +static const char *f_sync_destroy_syncpt[] = { + "sync_destroy_set_safe", + "sync_destroy", +}; + +int test_sync_create_destroy_sync(struct unit_module *m, struct gk20a *g, void *args) +{ + struct nvgpu_channel_sync *sync = NULL; + u32 branches; + bool set_safe_state = true; + + u32 syncpt_value = 0U; + int ret = UNIT_FAIL; + + for (branches = 0U; branches <= F_SYNC_DESTROY_LAST; branches++) { + + sync = nvgpu_channel_sync_create(ch, true); + if (sync == NULL) { + unit_return_fail(m, "unexpected failure in creating sync points"); + } + + syncpt_value = g->nvhost_dev->syncpt_value; + + unit_info(m, "Syncpt ID: %u, Syncpt Value: %u\n", + g->nvhost_dev->syncpt_id, syncpt_value); + + assert((g->nvhost_dev->syncpt_id > 0U) && + (g->nvhost_dev->syncpt_id <= NUM_HW_PTS)); + + assert(syncpt_value < (UINT_MAX - SYNCPT_SAFE_STATE_INCR)); + + if (branches == F_SYNC_DESTROY_SET_SAFE) { + set_safe_state = false; + } + + unit_info(m, "%s branch: %s\n", __func__, f_sync_destroy_syncpt[branches]); + + nvgpu_channel_sync_destroy(sync, set_safe_state); + + sync = NULL; + } + + ret = UNIT_SUCCESS; + +done: + if (sync != NULL) + nvgpu_channel_sync_destroy(sync, set_safe_state); + + if (nvgpu_mem_is_valid(&g->syncpt_mem) && + ch->vm->syncpt_ro_map_gpu_va != 0ULL) { + nvgpu_gmmu_unmap(ch->vm, &g->syncpt_mem, + ch->vm->syncpt_ro_map_gpu_va); + ch->vm->syncpt_ro_map_gpu_va = 0ULL; + } + + return ret; +} + +int test_sync_set_safe_state(struct unit_module *m, struct gk20a *g, void *args) +{ + struct nvgpu_channel_sync *sync = NULL; + + u32 syncpt_value, syncpt_id; + u32 syncpt_safe_state_val; + + int ret = UNIT_FAIL; + + sync = nvgpu_channel_sync_create(ch, true); + if (sync == NULL) { + unit_return_fail(m, "unexpected failure in creating sync points"); + } + + syncpt_id = g->nvhost_dev->syncpt_id; + syncpt_value = g->nvhost_dev->syncpt_value; + + unit_info(m, "Syncpt ID: %u, Syncpt Value: %u\n", + syncpt_id, syncpt_value); + + assert((syncpt_id > 0U) && (syncpt_id <= NUM_HW_PTS)); + + assert(syncpt_value < (UINT_MAX - SYNCPT_SAFE_STATE_INCR)); + + nvgpu_channel_sync_set_safe_state(sync); + + syncpt_safe_state_val = g->nvhost_dev->syncpt_value; + + if ((syncpt_safe_state_val - syncpt_value) != SYNCPT_SAFE_STATE_INCR) { + unit_return_fail(m, "unexpected increment value for safe state"); + } + + nvgpu_channel_sync_destroy(sync, false); + + sync = NULL; + + ret = UNIT_SUCCESS; + +done: + if (sync != NULL) + nvgpu_channel_sync_destroy(sync, false); + + return ret; +} + +#define F_SYNC_SYNCPT_ALLOC_FAILED 0 +#define F_SYNC_USER_MANAGED 1 +#define F_SYNC_NVHOST_CLIENT_MANAGED_FAIL 2 +#define F_SYNC_RO_MAP_GPU_VA_MAP_FAIL 3 +#define F_SYNC_MEM_CREATE_PHYS_FAIL 4 +#define F_SYNC_BUF_MAP_FAIL 5 +#define F_SYNC_FAIL_LAST 6 + +static const char *f_syncpt_open[] = { + "syncpt_alloc_failed", + "syncpt_user_managed_false", + "syncpt_get_client_managed_fail", + "syncpt_ro_map_gpu_va_fail", + "syncpt_create_phys_mem_fail", + "syncpt_buf_map_fail", +}; + +static void clear_test_params(struct gk20a *g, bool *user_managed, + bool *fault_injection_enabled, u32 branch, + struct nvgpu_posix_fault_inj *kmem_fi) +{ + if (!(*user_managed)) { + *user_managed = true; + } + + if (ch->vm->guest_managed) { + ch->vm->guest_managed = false; + } + + if (*fault_injection_enabled) { + nvgpu_posix_enable_fault_injection(kmem_fi, false, 0); + *fault_injection_enabled = false; + } + + if (branch == F_SYNC_NVHOST_CLIENT_MANAGED_FAIL) { + g->nvhost_dev->syncpt_id = 1U; + } + + if (ch->vm->syncpt_ro_map_gpu_va) { + ch->vm->syncpt_ro_map_gpu_va = 0ULL; + } +} + +int test_sync_create_fail(struct unit_module *m, struct gk20a *g, void *args) +{ + struct nvgpu_channel_sync *sync = NULL; + struct nvgpu_posix_fault_inj *kmem_fi; + u32 branches; + bool user_managed = true; + bool fault_injection_enabled = false; + int ret = UNIT_FAIL; + + kmem_fi = nvgpu_kmem_get_fault_injection(); + + ch->vm->syncpt_ro_map_gpu_va = 0U; + + for (branches = 0U; branches < F_SYNC_FAIL_LAST; branches++) { + + u32 syncpt_id, syncpt_value; + + if (branches == F_SYNC_SYNCPT_ALLOC_FAILED) { + /* fail first kzalloc call */ + nvgpu_posix_enable_fault_injection(kmem_fi, true, 0); + fault_injection_enabled = true; + } else if (branches == F_SYNC_USER_MANAGED) { + user_managed = false; + } else if (branches == F_SYNC_NVHOST_CLIENT_MANAGED_FAIL) { + g->nvhost_dev->syncpt_id = 20U; /* arbitary id */ + } else if (branches == F_SYNC_RO_MAP_GPU_VA_MAP_FAIL) { + /* fail Read-Only nvgpu_gmmu_map of g->syncpt_mem */ + ch->vm->guest_managed = true; + } else if (branches == F_SYNC_MEM_CREATE_PHYS_FAIL) { + /* + * bypass map of g->syncpt_mem and fail at + * nvgpu_mem_create_from_phys after first kzalloc. + */ + ch->vm->syncpt_ro_map_gpu_va = 0x1000ULL; + nvgpu_posix_enable_fault_injection(kmem_fi, true, 1); + fault_injection_enabled = true; + } else if (branches == F_SYNC_BUF_MAP_FAIL) { + /* + * bypass map of g->syncpt_mem and fail at + * nvgpu_gmmu_map after first kzalloc and then two + * consequtive calls to kmalloc + */ + ch->vm->syncpt_ro_map_gpu_va = 1ULL; + nvgpu_posix_enable_fault_injection(kmem_fi, true, 3); + fault_injection_enabled = true; + } else { + continue; + } + + unit_info(m, "%s branch: %s\n", __func__, f_syncpt_open[branches]); + + sync = nvgpu_channel_sync_create(ch, user_managed); + if (sync != NULL) { + nvgpu_channel_sync_destroy(sync, true); + unit_return_fail(m, "expected failure in creating sync points"); + } + + syncpt_id = g->nvhost_dev->syncpt_id; + syncpt_value = g->nvhost_dev->syncpt_value; + + assert(syncpt_id == 0U); + assert(syncpt_value == 0U); + + clear_test_params(g, &user_managed, &fault_injection_enabled, + branches, kmem_fi); + + } + + ret = UNIT_SUCCESS; + +done: + clear_test_params(g, &user_managed, &fault_injection_enabled, + 0, kmem_fi); + + return ret; +} + int test_sync_deinit(struct unit_module *m, struct gk20a *g, void *args) { + + nvgpu_vm_put(g->mm.pmu.vm); + if (ch != NULL) { nvgpu_kfree(g, ch); } + de_init_syncpt_mem(m, g); + if (g->nvhost_dev == NULL) { unit_return_fail(m ,"no valid nvhost device exists\n"); } @@ -110,8 +426,10 @@ int test_sync_deinit(struct unit_module *m, struct gk20a *g, void *args) struct unit_module_test nvgpu_sync_tests[] = { UNIT_TEST(sync_init, test_sync_init, NULL, 0), - UNIT_TEST(sync_deinit, test_sync_create_sync, NULL, 0), + UNIT_TEST(sync_create_destroy, test_sync_create_destroy_sync, NULL, 0), + UNIT_TEST(sync_set_safe_state, test_sync_set_safe_state, NULL, 0), + UNIT_TEST(sync_fail, test_sync_create_fail, NULL, 0), UNIT_TEST(sync_deinit, test_sync_deinit, NULL, 0), }; -UNIT_MODULE(nvgpu-sync, nvgpu_sync_tests, UNIT_PRIO_NVGPU_TEST); \ No newline at end of file +UNIT_MODULE(nvgpu-sync, nvgpu_sync_tests, UNIT_PRIO_NVGPU_TEST); diff --git a/userspace/units/sync/nvgpu-sync.h b/userspace/units/sync/nvgpu-sync.h index e1c09ef49..7c78c961e 100644 --- a/userspace/units/sync/nvgpu-sync.h +++ b/userspace/units/sync/nvgpu-sync.h @@ -27,8 +27,136 @@ struct gk20a; struct unit_module; +/** @addtogroup SWUTS-nvgpu-sync + * @{ + * + * Software Unit Test Specification for nvgpu-sync + */ + +/** + * Test specification for: test_sync_init + * + * Description: Environment initialization for tests + * + * Test Type: Feature based + * + * Input: None + * + * Steps: + * - init FIFO register space. + * - init HAL parameters for gv11b. + * - init required for getting the sync ops initialized. + * - init g->nvhost_dev containing sync metadata. + * - alloc memory for g->syncpt_mem. + * - alloc memory for channel. + * - alloc and init a VM for the channel. + * + * Output: Returns PASS if all the above steps are successful. FAIL otherwise. + */ int test_sync_init(struct unit_module *m, struct gk20a *g, void *args); + +/** + * Test specification for: test_sync_deinit + * + * Description: Environment de-initialization for tests + * + * Test Type: Feature based + * + * Input: test_sync_init run for this GPU + * + * Steps: + * - put reference to VM put. + * - free channel memory. + * - free memory for g->syncpt_mem. + * - free g->nvhost_dev. + * - clear FIFO register space. + * + * Output: Returns PASS if all the above steps are successful. FAIL otherwise. + */ int test_sync_deinit(struct unit_module *m, struct gk20a *g, void *args); -int test_sync_create_sync(struct unit_module *m, struct gk20a *g, void *args); + +/** + * Test specification for: test_sync_create_destroy_sync + * + * Description: Branch coverage for nvgpu_channel_sync_{create/destroy} success + * + * Test Type: Feature based + * + * Targets: nvgpu_channel_sync_create, nvgpu_has_syncpoints, + * nvgpu_channel_sync_syncpt_create, + * nvgpu_nvhost_get_syncpt_client_managed, + * gv11b_syncpt_alloc_buf, + * set_syncpt_ro_map_gpu_va_locked, + * nvgpu_channel_sync_destroy, + * channel_sync_syncpt_destroy, + * gv11b_syncpt_free_buf + * + * Input: test_sync_init run for this GPU + * + * Steps: + * - Check valid cases for nvgpu_channel_sync_create: + * - Pass a valid channel to the API and pass usermanaged = true. + * - vm->syncpt_ro_map_gpu_va is not already allocated. + * - vm->syncpt_ro_map_gpu_va is already allocated. + * - Check valid cases for nvgpu_channel_sync_destroy: + * - Set set_safe_state = true. + * - Set set_safe_state = false. + * + * Output: Returns PASS if a valid syncpoint is created. FAIL otherwise. + */ +int test_sync_create_destroy_sync(struct unit_module *m, struct gk20a *g, void *args); + +/** + * Test specification for: test_sync_set_safe_state + * + * Description: Branch coverage for nvgpu_channel_sync_set_safe_state + * + * Test Type: Feature based + * + * Targets: nvgpu_channel_sync_set_safe_state + * + * Input: test_sync_init run for this GPU + * + * Steps: + * - Check if the syncpoint_value is incremented by a predefined fixed amount + * + * Output: Returns PASS if the above increment occurs correctly. FAIL otherwise. + */ +int test_sync_set_safe_state(struct unit_module *m, struct gk20a *g, void *args); + +/** + * Test specification for: test_sync_create_fail + * + * Description: Branch coverage for nvgpu_channel_sync_create failure + * + * Test Type: Feature based + * + * Targets: nvgpu_channel_sync_create, nvgpu_has_syncpoints, + * nvgpu_channel_sync_syncpt_create, + * nvgpu_nvhost_get_syncpt_client_managed, + * gv11b_syncpt_alloc_buf, + * set_syncpt_ro_map_gpu_va_locked, + * nvgpu_channel_sync_destroy, + * channel_sync_syncpt_destroy, + * gv11b_syncpt_free_buf + * + * Input: test_sync_init run for this GPU + * + * Steps: + * - Check failure cases for nvgpu_channel_sync_create: + * - pass user_managed = FALSE. + * - allocation of memory for struct nvgpu_channel_sync_syncpt fails. + * - nvgpu_nvhost_get_syncpt_client_managed() returns invalid syncpoint i.e. + * syncpt_id returned = 0. + * - failure of alloc_buf() HAL + * - syncpt read-only map failure. + * - failure of allocation of memory for syncpt_buf. + * - failure to map the memory allocated for syncpt_buf. + * + * Output: Returns PASS if NULL is returned. FAIL otherwise. + */ +int test_sync_create_fail(struct unit_module *m, struct gk20a *g, void *args); + +/** @} */ #endif /* UNIT_NVGPU_SYNC_H */