diff --git a/drivers/gpu/nvgpu/libnvgpu-drv_safe.export b/drivers/gpu/nvgpu/libnvgpu-drv_safe.export index 4a32e88a0..cf7a53382 100644 --- a/drivers/gpu/nvgpu/libnvgpu-drv_safe.export +++ b/drivers/gpu/nvgpu/libnvgpu-drv_safe.export @@ -245,6 +245,7 @@ nvgpu_alloc_common_init nvgpu_alloc_destroy nvgpu_alloc_end nvgpu_alloc_fixed +nvgpu_alloc_gr_ctx_struct nvgpu_alloc_initialized nvgpu_alloc_inst_block nvgpu_alloc_length @@ -256,10 +257,14 @@ nvgpu_allocator_init nvgpu_aperture_mask nvgpu_bar1_readl nvgpu_bar1_writel +nvgpu_big_alloc_impl +nvgpu_big_free nvgpu_big_pages_possible nvgpu_bitmap_clear nvgpu_bitmap_set nvgpu_bsearch +nvgpu_can_busy +nvgpu_ce_engine_interrupt_mask nvgpu_ce_init_support nvgpu_cg_blcg_fb_ltc_load_enable nvgpu_cg_blcg_fifo_load_enable @@ -275,129 +280,29 @@ nvgpu_cg_slcg_ce2_load_enable nvgpu_cg_init_gr_load_gating_prod nvgpu_cg_elcg_enable_no_wait nvgpu_cg_elcg_disable_no_wait -nvgpu_cond_get_fault_injection -nvgpu_current_pid -nvgpu_current_tid -nvgpu_engine_cleanup_sw -nvgpu_engine_get_active_eng_info -nvgpu_engine_get_ids -nvgpu_engine_get_gr_id -nvgpu_engine_init_info -nvgpu_engine_setup_sw -nvgpu_gr_alloc -nvgpu_gr_free -nvgpu_gr_init -nvgpu_gr_init_support -nvgpu_gr_remove_support -nvgpu_gr_intr_init_support -nvgpu_gr_intr_remove_support -nvgpu_gr_intr_handle_fecs_error -nvgpu_gr_prepare_sw -nvgpu_gr_enable_hw -nvgpu_gr_suspend -nvgpu_gr_sw_ready -nvgpu_gr_falcon_get_fecs_ucode_segments -nvgpu_gr_falcon_get_gpccs_ucode_segments -nvgpu_gr_falcon_get_surface_desc_cpu_va -nvgpu_gr_falcon_init_ctxsw -nvgpu_gr_falcon_init_ctx_state -nvgpu_gr_falcon_init_ctxsw_ucode -nvgpu_gr_falcon_init_support -nvgpu_gr_falcon_load_secure_ctxsw_ucode -nvgpu_gr_falcon_remove_support -nvgpu_gr_config_init -nvgpu_gr_config_deinit -nvgpu_gr_config_get_max_gpc_count -nvgpu_gr_config_get_max_tpc_count -nvgpu_gr_config_get_max_tpc_per_gpc_count -nvgpu_gr_config_get_gpc_count -nvgpu_gr_config_get_tpc_count -nvgpu_gr_config_get_ppc_count -nvgpu_gr_config_get_pe_count_per_gpc -nvgpu_gr_config_get_sm_count_per_tpc -nvgpu_gr_config_get_gpc_mask -nvgpu_gr_config_get_gpc_ppc_count -nvgpu_gr_config_get_gpc_skip_mask -nvgpu_gr_config_get_gpc_tpc_count -nvgpu_gr_config_get_pes_tpc_count -nvgpu_gr_config_get_pes_tpc_mask -nvgpu_gr_config_get_gpc_tpc_mask_base -nvgpu_gr_config_get_gpc_tpc_count_base -nvgpu_gr_config_set_no_of_sm -nvgpu_gr_config_get_no_of_sm -nvgpu_gr_config_get_sm_info -nvgpu_gr_config_set_sm_info_gpc_index -nvgpu_gr_config_get_sm_info_gpc_index -nvgpu_gr_config_set_sm_info_tpc_index -nvgpu_gr_config_get_sm_info_tpc_index -nvgpu_gr_config_set_sm_info_global_tpc_index -nvgpu_gr_config_get_sm_info_global_tpc_index -nvgpu_gr_config_set_sm_info_sm_index -nvgpu_gr_config_get_sm_info_sm_index -nvgpu_gr_config_set_gpc_tpc_mask -nvgpu_gr_config_get_gpc_tpc_mask -nvgpu_gr_engine_interrupt_mask -nvgpu_gr_obj_ctx_is_golden_image_ready -nvgpu_gr_ctx_get_tsgid -nvgpu_gr_get_config_ptr -nvgpu_gr_fs_state_init -nvgpu_gr_global_ctx_desc_alloc -nvgpu_gr_global_ctx_desc_free -nvgpu_gr_global_ctx_buffer_alloc -nvgpu_gr_global_ctx_buffer_free -nvgpu_gr_global_ctx_set_size -nvgpu_gr_global_ctx_buffer_map -nvgpu_gr_global_ctx_buffer_unmap -nvgpu_gr_global_ctx_buffer_get_mem -nvgpu_gr_global_ctx_buffer_ready -nvgpu_gr_global_ctx_init_local_golden_image -nvgpu_gr_global_ctx_load_local_golden_image -nvgpu_gr_global_ctx_compare_golden_images -nvgpu_gr_global_ctx_deinit_local_golden_image -nvgpu_gr_ctx_desc_alloc -nvgpu_gr_ctx_desc_free -nvgpu_alloc_gr_ctx_struct -nvgpu_free_gr_ctx_struct -nvgpu_gr_ctx_alloc -nvgpu_gr_ctx_free -nvgpu_gr_ctx_set_size -nvgpu_gr_ctx_alloc_patch_ctx -nvgpu_gr_ctx_free_patch_ctx -nvgpu_gr_ctx_map_global_ctx_buffers -nvgpu_gr_ctx_patch_write_begin -nvgpu_gr_ctx_patch_write -nvgpu_gr_ctx_patch_write_end -nvgpu_golden_ctx_verif_get_fault_injection -nvgpu_local_golden_image_get_fault_injection -nvgpu_gr_obj_ctx_init -nvgpu_gr_obj_ctx_alloc -nvgpu_gr_obj_ctx_deinit -nvgpu_gr_subctx_alloc -nvgpu_gr_subctx_free -nvgpu_gr_obj_ctx_set_ctxsw_preemption_mode -nvgpu_hr_timestamp -nvgpu_init_ltc_support -nvgpu_ltc_ecc_free -nvgpu_ltc_get_cacheline_size -nvgpu_ltc_get_ltc_count -nvgpu_ltc_get_slices_per_ltc -nvgpu_ltc_remove_support -nvgpu_ltc_sync_enabled -nvgpu_can_busy -nvgpu_ce_engine_interrupt_mask +nvgpu_channel_abort nvgpu_channel_alloc_inst nvgpu_channel_cleanup_sw nvgpu_channel_close +nvgpu_channel_debug_dump_all +nvgpu_channel_deterministic_idle +nvgpu_channel_deterministic_unidle nvgpu_channel_disable_tsg nvgpu_channel_enable_tsg nvgpu_channel_free_inst +nvgpu_channel_from_id__func nvgpu_channel_kill +nvgpu_channel_mark_error nvgpu_channel_open_new nvgpu_channel_put__func nvgpu_channel_setup_bind nvgpu_channel_refch_from_inst_ptr +nvgpu_channel_resume_all_serviceable_ch +nvgpu_channel_semaphore_wakeup nvgpu_channel_set_unserviceable nvgpu_channel_setup_sw +nvgpu_channel_suspend_all_serviceable_ch +nvgpu_channel_sw_quiesce nvgpu_channel_sync_create nvgpu_channel_sync_destroy nvgpu_channel_sync_set_safe_state @@ -409,6 +314,7 @@ nvgpu_cond_broadcast nvgpu_cond_broadcast_interruptible nvgpu_cond_broadcast_locked nvgpu_cond_destroy +nvgpu_cond_get_fault_injection nvgpu_cond_init nvgpu_cond_lock nvgpu_cond_signal @@ -416,6 +322,8 @@ nvgpu_cond_signal_interruptible nvgpu_cond_signal_locked nvgpu_cond_timedwait nvgpu_cond_unlock +nvgpu_current_pid +nvgpu_current_tid nvgpu_current_time_ms nvgpu_current_time_ns nvgpu_current_time_us @@ -432,12 +340,17 @@ nvgpu_ecc_counter_init_per_lts nvgpu_ecc_init_support nvgpu_engine_act_interrupt_mask nvgpu_engine_check_valid_id +nvgpu_engine_cleanup_sw nvgpu_engine_enum_from_type +nvgpu_engine_get_active_eng_info nvgpu_engine_get_all_ce_reset_mask nvgpu_engine_get_fast_ce_runlist_id +nvgpu_engine_get_gr_id nvgpu_engine_get_gr_runlist_id +nvgpu_engine_get_ids +nvgpu_engine_init_info nvgpu_engine_is_valid_runlist_id -nvgpu_get +nvgpu_engine_setup_sw nvgpu_falcon_hs_ucode_load_bootstrap nvgpu_falcon_copy_to_dmem nvgpu_falcon_copy_to_imem @@ -460,6 +373,8 @@ nvgpu_finalize_poweron nvgpu_free nvgpu_free_enabled_flags nvgpu_free_fixed +nvgpu_free_gr_ctx_struct +nvgpu_get nvgpu_get_pte nvgpu_gmmu_init_page_table nvgpu_gmmu_map @@ -467,18 +382,106 @@ nvgpu_gmmu_map_locked nvgpu_gmmu_map_fixed nvgpu_gmmu_unmap nvgpu_gmmu_unmap_locked +nvgpu_golden_ctx_verif_get_fault_injection +nvgpu_gr_alloc +nvgpu_gr_config_init +nvgpu_gr_config_deinit +nvgpu_gr_config_get_max_gpc_count +nvgpu_gr_config_get_max_tpc_count +nvgpu_gr_config_get_max_tpc_per_gpc_count +nvgpu_gr_config_get_gpc_count +nvgpu_gr_config_get_tpc_count +nvgpu_gr_config_get_ppc_count +nvgpu_gr_config_get_pe_count_per_gpc +nvgpu_gr_config_get_sm_count_per_tpc +nvgpu_gr_config_get_gpc_mask +nvgpu_gr_config_get_gpc_ppc_count +nvgpu_gr_config_get_gpc_skip_mask +nvgpu_gr_config_get_gpc_tpc_count +nvgpu_gr_config_get_pes_tpc_count +nvgpu_gr_config_get_pes_tpc_mask +nvgpu_gr_config_get_gpc_tpc_mask +nvgpu_gr_config_get_gpc_tpc_mask_base +nvgpu_gr_config_get_gpc_tpc_count_base +nvgpu_gr_config_get_sm_info +nvgpu_gr_config_get_sm_info_global_tpc_index +nvgpu_gr_config_get_sm_info_gpc_index +nvgpu_gr_config_get_sm_info_sm_index +nvgpu_gr_config_get_sm_info_tpc_index +nvgpu_gr_config_get_no_of_sm +nvgpu_gr_config_set_gpc_tpc_mask +nvgpu_gr_config_set_no_of_sm +nvgpu_gr_config_set_sm_info_global_tpc_index +nvgpu_gr_config_set_sm_info_gpc_index +nvgpu_gr_config_set_sm_info_sm_index +nvgpu_gr_config_set_sm_info_tpc_index +nvgpu_gr_ctx_alloc +nvgpu_gr_ctx_alloc_patch_ctx +nvgpu_gr_ctx_desc_alloc +nvgpu_gr_ctx_desc_free +nvgpu_gr_ctx_free +nvgpu_gr_ctx_free_patch_ctx +nvgpu_gr_ctx_get_tsgid +nvgpu_gr_ctx_map_global_ctx_buffers +nvgpu_gr_ctx_patch_write +nvgpu_gr_ctx_patch_write_begin +nvgpu_gr_ctx_patch_write_end +nvgpu_gr_ctx_set_size +nvgpu_gr_enable_hw +nvgpu_gr_engine_interrupt_mask +nvgpu_gr_falcon_get_fecs_ucode_segments +nvgpu_gr_falcon_get_gpccs_ucode_segments +nvgpu_gr_falcon_get_surface_desc_cpu_va +nvgpu_gr_falcon_init_ctxsw +nvgpu_gr_falcon_init_ctx_state +nvgpu_gr_falcon_init_ctxsw_ucode +nvgpu_gr_falcon_init_support +nvgpu_gr_falcon_load_secure_ctxsw_ucode +nvgpu_gr_falcon_remove_support +nvgpu_gr_free +nvgpu_gr_fs_state_init +nvgpu_gr_get_config_ptr +nvgpu_gr_global_ctx_buffer_alloc +nvgpu_gr_global_ctx_buffer_free +nvgpu_gr_global_ctx_buffer_get_mem +nvgpu_gr_global_ctx_buffer_map +nvgpu_gr_global_ctx_buffer_ready +nvgpu_gr_global_ctx_buffer_unmap +nvgpu_gr_global_ctx_compare_golden_images +nvgpu_gr_global_ctx_deinit_local_golden_image +nvgpu_gr_global_ctx_desc_alloc +nvgpu_gr_global_ctx_desc_free +nvgpu_gr_global_ctx_init_local_golden_image +nvgpu_gr_global_ctx_load_local_golden_image +nvgpu_gr_global_ctx_set_size +nvgpu_gr_init +nvgpu_gr_init_support +nvgpu_gr_intr_init_support +nvgpu_gr_intr_remove_support +nvgpu_gr_intr_handle_fecs_error +nvgpu_gr_obj_ctx_alloc +nvgpu_gr_obj_ctx_deinit +nvgpu_gr_obj_ctx_init +nvgpu_gr_obj_ctx_is_golden_image_ready +nvgpu_gr_obj_ctx_set_ctxsw_preemption_mode +nvgpu_gr_prepare_sw +nvgpu_gr_remove_support +nvgpu_gr_subctx_alloc +nvgpu_gr_subctx_free +nvgpu_gr_suspend +nvgpu_gr_sw_ready +nvgpu_hr_timestamp nvgpu_init_enabled_flags nvgpu_init_hal +nvgpu_init_ltc_support nvgpu_init_mm_support nvgpu_init_therm_support nvgpu_inst_block_addr nvgpu_free_inst_block nvgpu_inst_block_ptr nvgpu_is_enabled -nvgpu_big_alloc_impl -nvgpu_big_free -nvgpu_kfree_impl nvgpu_kcalloc_impl +nvgpu_kfree_impl nvgpu_kmalloc_impl nvgpu_kmem_cache_alloc nvgpu_kmem_cache_create @@ -486,7 +489,13 @@ nvgpu_kmem_cache_destroy nvgpu_kmem_cache_free nvgpu_kmem_get_fault_injection nvgpu_kzalloc_impl -nvgpu_vmalloc_impl +nvgpu_ltc_ecc_free +nvgpu_ltc_get_cacheline_size +nvgpu_ltc_get_ltc_count +nvgpu_ltc_get_slices_per_ltc +nvgpu_ltc_remove_support +nvgpu_ltc_sync_enabled +nvgpu_local_golden_image_get_fault_injection nvgpu_log_msg_impl nvgpu_mc_intr_mask nvgpu_mc_intr_nonstall_pause @@ -688,6 +697,7 @@ nvgpu_vm_mapping_batch_start nvgpu_vm_put nvgpu_vm_put_buffers nvgpu_vm_unmap +nvgpu_vmalloc_impl nvgpu_vzalloc_impl nvgpu_wait_for_deferred_interrupts nvgpu_writel diff --git a/userspace/required_tests.json b/userspace/required_tests.json index 8e899c524..63bb5852c 100644 --- a/userspace/required_tests.json +++ b/userspace/required_tests.json @@ -2021,18 +2021,48 @@ "unit": "nvgpu_allocator", "test_level": 0 }, + { + "test": "test_channel_abort_cleanup", + "case": "abort_cleanup", + "unit": "nvgpu_channel", + "test_level": 0 + }, { "test": "test_channel_alloc_inst", "case": "alloc_inst", "unit": "nvgpu_channel", "test_level": 0 }, + { + "test": "test_channel_abort", + "case": "ch_abort", + "unit": "nvgpu_channel", + "test_level": 0 + }, + { + "test": "test_channel_from_invalid_id", + "case": "channel_from_invalid_id", + "unit": "nvgpu_channel", + "test_level": 0 + }, + { + "test": "test_channel_put_warn", + "case": "channel_put_warn", + "unit": "nvgpu_channel", + "test_level": 0 + }, { "test": "test_channel_close", "case": "close", "unit": "nvgpu_channel", "test_level": 0 }, + { + "test": "test_channel_debug_dump", + "case": "debug_dump", + "unit": "nvgpu_channel", + "test_level": 0 + }, { "test": "test_channel_enable_disable_tsg", "case": "enable_disable_tsg", @@ -2045,24 +2075,48 @@ "unit": "nvgpu_channel", "test_level": 0 }, + { + "test": "test_channel_deterministic_idle_unidle", + "case": "idle_unidle", + "unit": "nvgpu_channel", + "test_level": 0 + }, { "test": "test_fifo_init_support", "case": "init_support", "unit": "nvgpu_channel", "test_level": 0 }, + { + "test": "test_channel_mark_error", + "case": "mark_error", + "unit": "nvgpu_channel", + "test_level": 0 + }, { "test": "test_channel_open", "case": "open", "unit": "nvgpu_channel", "test_level": 0 }, + { + "test": "test_ch_referenceable_cleanup", + "case": "referenceable_cleanup", + "unit": "nvgpu_channel", + "test_level": 0 + }, { "test": "test_fifo_remove_support", "case": "remove_support", "unit": "nvgpu_channel", "test_level": 0 }, + { + "test": "test_channel_semaphore_wakeup", + "case": "semaphore_wakeup", + "unit": "nvgpu_channel", + "test_level": 0 + }, { "test": "test_channel_setup_bind", "case": "setup_bind", @@ -2075,6 +2129,18 @@ "unit": "nvgpu_channel", "test_level": 0 }, + { + "test": "test_channel_suspend_resume_serviceable_chs", + "case": "suspend_resume", + "unit": "nvgpu_channel", + "test_level": 0 + }, + { + "test": "test_channel_sw_quiesce", + "case": "sw_quiesce", + "unit": "nvgpu_channel", + "test_level": 0 + }, { "test": "test_gk20a_channel_disable", "case": "disable", diff --git a/userspace/units/fifo/channel/nvgpu-channel.c b/userspace/units/fifo/channel/nvgpu-channel.c index cae6b8ebc..dfccde43e 100644 --- a/userspace/units/fifo/channel/nvgpu-channel.c +++ b/userspace/units/fifo/channel/nvgpu-channel.c @@ -33,6 +33,9 @@ #include #include #include +#include + +#include "common/sync/channel_sync_priv.h" #include @@ -42,6 +45,7 @@ #define MAX_STUB 2 struct stub_ctx { + u32 count; u32 chid; u32 tsgid; }; @@ -72,13 +76,12 @@ static void subtest_setup(u32 branches) #define assert(cond) unit_assert(cond, goto done) #define F_CHANNEL_SETUP_SW_VZALLOC_FAIL BIT(0) -#define F_CHANNEL_SETUP_SW_LAST BIT(1) - -/* TODO: nvgpu_cond_init failure, not testable yet */ -#define F_CHANNEL_SETUP_SW_INIT_SUPPORT_FAIL_COND_INIT +#define F_CHANNEL_SETUP_SW_REF_COND_FAIL BIT(1) +#define F_CHANNEL_SETUP_SW_LAST BIT(2) static const char *f_channel_setup_sw[] = { "vzalloc_fail", + "cond_init failure" }; static u32 stub_channel_count(struct gk20a *g) @@ -86,20 +89,22 @@ static u32 stub_channel_count(struct gk20a *g) return 32; } -int test_channel_setup_sw(struct unit_module *m, - struct gk20a *g, void *args) +int test_channel_setup_sw(struct unit_module *m, struct gk20a *g, void *vargs) { struct gpu_ops gops = g->ops; struct nvgpu_fifo *f = &g->fifo; struct nvgpu_posix_fault_inj *kmem_fi; + struct nvgpu_posix_fault_inj *l_cond_fi; u32 branches; int ret = UNIT_FAIL; int err; - u32 fail = F_CHANNEL_SETUP_SW_VZALLOC_FAIL; + u32 fail = F_CHANNEL_SETUP_SW_VZALLOC_FAIL | + F_CHANNEL_SETUP_SW_REF_COND_FAIL; u32 prune = fail; kmem_fi = nvgpu_kmem_get_fault_injection(); + l_cond_fi = nvgpu_cond_get_fault_injection(); g->ops.channel.count = stub_channel_count; @@ -117,12 +122,19 @@ int test_channel_setup_sw(struct unit_module *m, branches & F_CHANNEL_SETUP_SW_VZALLOC_FAIL ? true : false, 0); + /* Insert condition fault after some channels are initialized */ + if ((branches & F_CHANNEL_SETUP_SW_REF_COND_FAIL) != 0U) { + nvgpu_posix_enable_fault_injection(l_cond_fi, true, 5); + } + unit_verbose(m, "%s branches=%s\n", __func__, branches_str(branches, f_channel_setup_sw)); err = nvgpu_channel_setup_sw(g); if (branches & fail) { + nvgpu_posix_enable_fault_injection(kmem_fi, false, 0); + nvgpu_posix_enable_fault_injection(l_cond_fi, false, 0); assert(err != 0); assert(f->channel == NULL); } else { @@ -133,7 +145,6 @@ int test_channel_setup_sw(struct unit_module *m, ret = UNIT_SUCCESS; done: - nvgpu_posix_enable_fault_injection(kmem_fi, false, 0); if (ret != UNIT_SUCCESS) { unit_err(m, "%s branches=%s\n", __func__, branches_str(branches, f_channel_setup_sw)); @@ -150,14 +161,11 @@ done: #define F_CHANNEL_OPEN_ALLOC_CH_AGGRESSIVE BIT(5) #define F_CHANNEL_OPEN_BUG_ON BIT(6) #define F_CHANNEL_OPEN_ALLOC_INST_FAIL BIT(7) -#define F_CHANNEL_OPEN_OS BIT(8) -#define F_CHANNEL_OPEN_LAST BIT(9) +#define F_CHANNEL_OPEN_NOTIFIER_WQ_INIT_FAIL BIT(8) +#define F_CHANNEL_OPEN_SEMAPHORE_WQ_INIT_FAIL BIT(9) +#define F_CHANNEL_OPEN_LAST BIT(10) -/* TODO: cover nvgpu_cond_init failures */ -#define F_CHANNEL_OPEN_COND0_INIT_FAIL -#define F_CHANNEL_OPEN_COND1_INIT_FAIL - static const char *f_channel_open[] = { "engine_not_valid", "privileged", @@ -167,9 +175,8 @@ static const char *f_channel_open[] = { "aggressive_destroy", "bug_on", "alloc_inst_fail", - "cond0_init_fail", - "cond1_init_fail", - "hal", + "notifier_wq_init_fail", + "semaphore_wq_init_fail", }; static int stub_channel_alloc_inst_ENOMEM(struct gk20a *g, @@ -178,20 +185,21 @@ static int stub_channel_alloc_inst_ENOMEM(struct gk20a *g, return -ENOMEM; } -int test_channel_open(struct unit_module *m, - struct gk20a *g, void *args) +int test_channel_open(struct unit_module *m, struct gk20a *g, void *vargs) { struct nvgpu_fifo *f = &g->fifo; struct nvgpu_fifo fifo = g->fifo; struct gpu_ops gops = g->ops; struct nvgpu_channel *ch, *next_ch; - struct nvgpu_posix_fault_inj *kmem_fi; + struct nvgpu_posix_fault_inj *l_cond_fi; u32 branches; int ret = UNIT_FAIL; u32 fail = F_CHANNEL_OPEN_ALLOC_CH_FAIL | F_CHANNEL_OPEN_BUG_ON | - F_CHANNEL_OPEN_ALLOC_INST_FAIL; + F_CHANNEL_OPEN_ALLOC_INST_FAIL | + F_CHANNEL_OPEN_NOTIFIER_WQ_INIT_FAIL | + F_CHANNEL_OPEN_SEMAPHORE_WQ_INIT_FAIL; u32 prune = fail | F_CHANNEL_OPEN_ALLOC_CH_WARN0 | F_CHANNEL_OPEN_ALLOC_CH_WARN1; @@ -201,7 +209,7 @@ int test_channel_open(struct unit_module *m, void (*os_channel_open)(struct nvgpu_channel *ch) = g->os_channel.open; - kmem_fi = nvgpu_kmem_get_fault_injection(); + l_cond_fi = nvgpu_cond_get_fault_injection(); for (branches = 0U; branches < F_CHANNEL_OPEN_LAST; branches++) { @@ -214,8 +222,6 @@ int test_channel_open(struct unit_module *m, unit_verbose(m, "%s branches=%s\n", __func__, branches_str(branches, f_channel_open)); - nvgpu_posix_enable_fault_injection(kmem_fi, false, 0); - next_ch = nvgpu_list_empty(&f->free_chs) ? NULL : nvgpu_list_first_entry(&f->free_chs, @@ -247,6 +253,14 @@ int test_channel_open(struct unit_module *m, f->used_channels += 2U; } + if (branches & F_CHANNEL_OPEN_NOTIFIER_WQ_INIT_FAIL) { + nvgpu_posix_enable_fault_injection(l_cond_fi, true, 0); + } + + if (branches & F_CHANNEL_OPEN_SEMAPHORE_WQ_INIT_FAIL) { + nvgpu_posix_enable_fault_injection(l_cond_fi, true, 1); + } + g->ops.channel.alloc_inst = branches & F_CHANNEL_OPEN_ALLOC_INST_FAIL ? stub_channel_alloc_inst_ENOMEM : @@ -280,6 +294,7 @@ int test_channel_open(struct unit_module *m, } if (branches & fail) { + nvgpu_posix_enable_fault_injection(l_cond_fi, false, 0); if (branches & F_CHANNEL_OPEN_ALLOC_CH_FAIL) { f->free_chs = fifo.free_chs; } @@ -304,7 +319,7 @@ done: unit_err(m, "%s branches=%s\n", __func__, branches_str(branches, f_channel_open)); } - nvgpu_posix_enable_fault_injection(kmem_fi, false, 0); + if (ch != NULL) { nvgpu_channel_close(ch); } @@ -313,17 +328,21 @@ done: return ret; } -#define F_CHANNEL_CLOSE_ALREADY_FREED BIT(0) -#define F_CHANNEL_CLOSE_FORCE BIT(1) -#define F_CHANNEL_CLOSE_DYING BIT(2) -#define F_CHANNEL_CLOSE_TSG_BOUND BIT(3) -#define F_CHANNEL_CLOSE_TSG_UNBIND_FAIL BIT(4) -#define F_CHANNEL_CLOSE_OS_CLOSE BIT(5) -#define F_CHANNEL_CLOSE_NON_REFERENCEABLE BIT(6) -#define F_CHANNEL_CLOSE_AS_BOUND BIT(7) -#define F_CHANNEL_CLOSE_FREE_SUBCTX BIT(8) -#define F_CHANNEL_CLOSE_USER_SYNC BIT(9) -#define F_CHANNEL_CLOSE_LAST BIT(10) +#define F_CHANNEL_CLOSE_ALREADY_FREED BIT(0) +#define F_CHANNEL_CLOSE_FORCE BIT(1) +#define F_CHANNEL_CLOSE_DYING BIT(2) +#define F_CHANNEL_CLOSE_TSG_BOUND BIT(3) +#define F_CHANNEL_CLOSE_TSG_UNBIND_FAIL BIT(4) +#define F_CHANNEL_CLOSE_OS_CLOSE BIT(5) +#define F_CHANNEL_CLOSE_NON_REFERENCEABLE BIT(6) +#define F_CHANNEL_CLOSE_FREE_SUBCTX BIT(7) +#define F_CHANNEL_CLOSE_USER_SYNC BIT(8) +#define F_CHANNEL_CLOSE_NONZERO_DESTROY_THRESH_64 BIT(9) +#define F_CHANNEL_CLOSE_NONZERO_DESTROY_THRESH_1 BIT(10) +#define F_CHANNEL_CLOSE_DETERMINISTIC BIT(11) +#define F_CHANNEL_CLOSE_DETERMINISTIC_RAILGATE_ALLOWED BIT(12) +#define F_CHANNEL_CLOSE_AS_BOUND BIT(13) +#define F_CHANNEL_CLOSE_LAST BIT(14) /* nvgpu_tsg_unbind_channel always return 0 */ @@ -338,6 +357,9 @@ static const char *f_channel_close[] = { "as_bound", "free_subctx", "user_sync", + "destroy_thresh_64", + "destroy_thresh_1", + "deterministic", }; static void stub_os_channel_close(struct nvgpu_channel *ch, bool force) @@ -349,6 +371,17 @@ static void stub_gr_intr_flush_channel_tlb(struct gk20a *g) { } +static void stub_channel_sync_syncpt_set_safe_state( + struct nvgpu_channel_sync *s) +{ +} + +static void stub_channel_sync_destroy(struct nvgpu_channel_sync *s) +{ + stub[0].chid = 1; +} + + static bool channel_close_pruned(u32 branches, u32 final) { u32 branches_init = branches; @@ -363,7 +396,6 @@ static bool channel_close_pruned(u32 branches, u32 final) if ((branches & F_CHANNEL_CLOSE_AS_BOUND) == 0) { branches &= ~F_CHANNEL_CLOSE_FREE_SUBCTX; - branches &= ~F_CHANNEL_CLOSE_USER_SYNC; } if (branches < branches_init) { @@ -373,17 +405,18 @@ static bool channel_close_pruned(u32 branches, u32 final) return false; } -int test_channel_close(struct unit_module *m, - struct gk20a *g, void *args) +int test_channel_close(struct unit_module *m, struct gk20a *g, void *vargs) { struct gpu_ops gops = g->ops; struct nvgpu_channel *ch; struct nvgpu_tsg *tsg; + struct nvgpu_channel_sync user_sync = {0}; u32 branches = 0U; int ret = UNIT_FAIL; u32 fail = F_CHANNEL_CLOSE_ALREADY_FREED | F_CHANNEL_CLOSE_NON_REFERENCEABLE; - u32 prune = fail; + u32 prune = (u32)(F_CHANNEL_CLOSE_USER_SYNC) | + F_CHANNEL_CLOSE_DETERMINISTIC_RAILGATE_ALLOWED | fail; u32 runlist_id = NVGPU_INVALID_RUNLIST_ID; void (*os_channel_close)(struct nvgpu_channel *ch, bool force) = g->os_channel.close; @@ -424,6 +457,12 @@ int test_channel_close(struct unit_module *m, g->os_channel.close = branches & F_CHANNEL_CLOSE_OS_CLOSE ? stub_os_channel_close : NULL; + g->aggressive_sync_destroy_thresh = + branches & F_CHANNEL_CLOSE_NONZERO_DESTROY_THRESH_64 ? + 64U : + (branches & F_CHANNEL_CLOSE_NONZERO_DESTROY_THRESH_1) ? + 1U : 0U; + if (branches & F_CHANNEL_CLOSE_TSG_BOUND) { err = nvgpu_tsg_bind_channel(tsg, ch); assert(err == 0); @@ -445,13 +484,35 @@ int test_channel_close(struct unit_module *m, ch->vm = NULL; } + if (branches & F_CHANNEL_CLOSE_DETERMINISTIC) { + /* Compensate for atomic dec in gk20a_idle() */ + nvgpu_atomic_set(&g->usage_count, 1); + ch->deterministic = true; + } + + if (branches & F_CHANNEL_CLOSE_DETERMINISTIC_RAILGATE_ALLOWED) { + ch->deterministic = true; + ch->deterministic_railgate_allowed = true; + } + g->ops.gr.setup.free_subctx = branches & F_CHANNEL_CLOSE_FREE_SUBCTX ? gops.gr.setup.free_subctx : NULL; if (branches & F_CHANNEL_CLOSE_USER_SYNC) { - ch->user_sync = nvgpu_channel_sync_create(ch, true); - assert(err == 0); + /* Channel requires to be as_bound */ + memset(&mm, 0, sizeof(mm)); + memset(&vm, 0, sizeof(vm)); + mm.g = g; + vm.mm = &mm; + ch->vm = &vm; + nvgpu_ref_init(&vm.ref); + nvgpu_ref_get(&vm.ref); + + ch->user_sync = &user_sync; + ch->user_sync->set_safe_state = + stub_channel_sync_syncpt_set_safe_state; + ch->user_sync->destroy = stub_channel_sync_destroy; } if (branches & F_CHANNEL_CLOSE_ALREADY_FREED) { @@ -470,6 +531,11 @@ int test_channel_close(struct unit_module *m, continue; } + if ((branches & F_CHANNEL_CLOSE_USER_SYNC) != 0U) { + assert(stub[0].chid == 1U); + ch->user_sync = NULL; + } + if (branches & fail) { assert(ch->g != NULL); assert(nvgpu_list_empty(&ch->free_chs)); @@ -510,6 +576,8 @@ int test_channel_close(struct unit_module *m, ch->subctx = NULL; } + ch->deterministic = false; + ch->deterministic_railgate_allowed = false; assert(ch->usermode_submit_enabled == false); /* we took an extra reference to avoid nvgpu_vm_remove_ref */ @@ -557,7 +625,12 @@ done: #define F_CHANNEL_SETUP_BIND_USERMODE_ALLOC_BUF_FAIL BIT(3) #define F_CHANNEL_SETUP_BIND_USERMODE_SETUP_RAMFC_FAIL BIT(4) #define F_CHANNEL_SETUP_BIND_USERMODE_UPDATE_RL_FAIL BIT(5) -#define F_CHANNEL_SETUP_BIND_LAST BIT(6) +#define F_CHANNEL_SETUP_BIND_USERMODE_TSGID_INVALID BIT(6) +#define F_CHANNEL_SETUP_BIND_USERMODE_SUPPORT_DETERMINISTIC BIT(7) +#define F_CHANNEL_SETUP_BIND_USERMODE_POWER_REF_COUNT_FAIL BIT(8) +#define F_CHANNEL_SETUP_BIND_NON_USERMODE_DETERMINISTIC BIT(9) +#define F_CHANNEL_SETUP_BIND_USERMODE_OS_CH_USERMODE_BUF BIT(10) +#define F_CHANNEL_SETUP_BIND_LAST BIT(11) static const char *f_channel_setup_bind[] = { "no_as", @@ -566,6 +639,11 @@ static const char *f_channel_setup_bind[] = { "alloc_buf_fail", "setup_ramfc_fail", "update_rl_fail", + "invalid tsgid", + "support determininstic", + "power ref count fail", + "non usermode determinstic channel", + "os_channel free usermode buffer", }; static int stub_os_channel_alloc_usermode_buffers(struct nvgpu_channel *ch, @@ -619,12 +697,17 @@ static int stub_mm_l2_flush(struct gk20a *g, bool invalidate) return 0; } -int test_channel_setup_bind(struct unit_module *m, - struct gk20a *g, void *args) +static void stub_os_channel_free_usermode_buffers(struct nvgpu_channel *c) +{ + +} + +int test_channel_setup_bind(struct unit_module *m, struct gk20a *g, void *vargs) { struct gpu_ops gops = g->ops; struct nvgpu_channel *ch = NULL; struct nvgpu_tsg *tsg = NULL; + struct nvgpu_posix_fault_inj *l_nvgpu_fi; u32 branches = 0U; int ret = UNIT_FAIL; u32 fail = @@ -633,9 +716,15 @@ int test_channel_setup_bind(struct unit_module *m, F_CHANNEL_SETUP_BIND_USERMODE_ALLOC_BUF_NULL | F_CHANNEL_SETUP_BIND_USERMODE_ALLOC_BUF_FAIL | F_CHANNEL_SETUP_BIND_USERMODE_SETUP_RAMFC_FAIL | - F_CHANNEL_SETUP_BIND_USERMODE_UPDATE_RL_FAIL; - u32 prune = fail; + F_CHANNEL_SETUP_BIND_USERMODE_UPDATE_RL_FAIL | + F_CHANNEL_SETUP_BIND_USERMODE_TSGID_INVALID | + F_CHANNEL_SETUP_BIND_USERMODE_POWER_REF_COUNT_FAIL | + F_CHANNEL_SETUP_BIND_NON_USERMODE_DETERMINISTIC | + F_CHANNEL_SETUP_BIND_USERMODE_OS_CH_USERMODE_BUF; + u32 prune = (u32)(F_CHANNEL_SETUP_BIND_USERMODE_SUPPORT_DETERMINISTIC) | + fail; u32 runlist_id = NVGPU_INVALID_RUNLIST_ID; + u32 tsgid_orig; bool privileged = false; int err; struct nvgpu_mem pdb_mem; @@ -669,8 +758,10 @@ int test_channel_setup_bind(struct unit_module *m, vm.pdb.mem = &pdb_mem; memset(&bind_args, 0, sizeof(bind_args)); - bind_args.flags = NVGPU_SETUP_BIND_FLAGS_USERMODE_SUPPORT; bind_args.num_gpfifo_entries = 32; + tsgid_orig = ch->tsgid; + + l_nvgpu_fi = nvgpu_nvgpu_get_fault_injection(); for (branches = 0U; branches < F_CHANNEL_SETUP_BIND_LAST; branches++) { @@ -701,6 +792,34 @@ int test_channel_setup_bind(struct unit_module *m, stub_os_channel_alloc_usermode_buffers_ENOMEM; } + if (branches & + F_CHANNEL_SETUP_BIND_USERMODE_SUPPORT_DETERMINISTIC) { + bind_args.flags |= + NVGPU_SETUP_BIND_FLAGS_SUPPORT_DETERMINISTIC; + } + + if (branches & + F_CHANNEL_SETUP_BIND_USERMODE_POWER_REF_COUNT_FAIL) { + bind_args.flags |= + NVGPU_SETUP_BIND_FLAGS_SUPPORT_DETERMINISTIC; + nvgpu_posix_enable_fault_injection(l_nvgpu_fi, true, 0); + } + + if (branches & + F_CHANNEL_SETUP_BIND_NON_USERMODE_DETERMINISTIC) { + bind_args.flags |= + NVGPU_SETUP_BIND_FLAGS_SUPPORT_DETERMINISTIC; + bind_args.flags &= + ~NVGPU_SETUP_BIND_FLAGS_USERMODE_SUPPORT; + } else { + bind_args.flags |= + NVGPU_SETUP_BIND_FLAGS_USERMODE_SUPPORT; + } + + ch->tsgid = branches & + F_CHANNEL_SETUP_BIND_USERMODE_TSGID_INVALID ? + NVGPU_INVALID_TSG_ID : tsgid_orig; + g->ops.runlist.update_for_channel = branches & F_CHANNEL_SETUP_BIND_USERMODE_UPDATE_RL_FAIL ? stub_runlist_update_for_channel_ETIMEDOUT : @@ -710,14 +829,24 @@ int test_channel_setup_bind(struct unit_module *m, F_CHANNEL_SETUP_BIND_USERMODE_SETUP_RAMFC_FAIL ? stub_ramfc_setup_EINVAL : gops.ramfc.setup; + if (branches & + F_CHANNEL_SETUP_BIND_USERMODE_OS_CH_USERMODE_BUF) { + g->ops.ramfc.setup = stub_ramfc_setup_EINVAL; + g->os_channel.free_usermode_buffers = + stub_os_channel_free_usermode_buffers; + } + err = nvgpu_channel_setup_bind(ch, &bind_args); if (branches & fail) { + nvgpu_posix_enable_fault_injection( + l_nvgpu_fi, false, 0); assert(err != 0); assert(!nvgpu_mem_is_valid(&ch->usermode_userd)); assert(!nvgpu_mem_is_valid(&ch->usermode_gpfifo)); ch->usermode_submit_enabled = false; assert(nvgpu_atomic_read(&ch->bound) == false); + g->os_channel.free_usermode_buffers = NULL; } else { assert(err == 0); assert(stub[0].chid == ch->chid); @@ -728,8 +857,11 @@ int test_channel_setup_bind(struct unit_module *m, nvgpu_dma_free(g, &ch->usermode_userd); nvgpu_dma_free(g, &ch->usermode_gpfifo); ch->userd_iova = 0U; + ch->deterministic = false; nvgpu_atomic_set(&ch->bound, false); } + bind_args.flags &= + ~NVGPU_SETUP_BIND_FLAGS_SUPPORT_DETERMINISTIC; } ret = UNIT_SUCCESS; @@ -755,8 +887,7 @@ static const char *f_channel_alloc_inst[] = { "nomem", }; -int test_channel_alloc_inst(struct unit_module *m, - struct gk20a *g, void *args) +int test_channel_alloc_inst(struct unit_module *m, struct gk20a *g, void *vargs) { struct nvgpu_channel *ch = NULL; u32 branches = 0U; @@ -836,8 +967,7 @@ static const char *f_channel_from_inst[] = { "match_b", }; -int test_channel_from_inst(struct unit_module *m, - struct gk20a *g, void *args) +int test_channel_from_inst(struct unit_module *m, struct gk20a *g, void *vargs) { struct nvgpu_channel *ch = NULL; struct nvgpu_channel *chA = NULL; @@ -940,7 +1070,7 @@ static void stub_tsg_disable(struct nvgpu_tsg *tsg) } int test_channel_enable_disable_tsg(struct unit_module *m, - struct gk20a *g, void *args) + struct gk20a *g, void *vargs) { struct gpu_ops gops = g->ops; struct nvgpu_channel *ch = NULL; @@ -996,6 +1126,754 @@ done: return ret; } +#define F_CHANNEL_ABORT_TSG BIT(0) +#define F_CHANNEL_ABORT_LAST BIT(1) + +static const char *f_channel_abort[] = { + "tsg not null", +}; + +int test_channel_abort(struct unit_module *m, struct gk20a *g, void *vargs) +{ + struct nvgpu_channel *ch = NULL; + struct nvgpu_tsg *tsg = NULL; + u32 branches = 0U; + int ret = UNIT_FAIL; + u32 runlist_id = NVGPU_INVALID_RUNLIST_ID; + bool privileged = false; + int err; + + tsg = nvgpu_tsg_open(g, getpid()); + assert(tsg != NULL); + + ch = nvgpu_channel_open_new(g, runlist_id, + privileged, getpid(), getpid()); + assert(ch != NULL); + + for (branches = 0U; branches < F_CHANNEL_ABORT_LAST; branches++) { + subtest_setup(branches); + unit_verbose(m, "%s branches=%s\n", __func__, + branches_str(branches, f_channel_abort)); + + if ((branches & F_CHANNEL_ABORT_TSG) != 0U) { + err = nvgpu_tsg_bind_channel(tsg, ch); + assert(err == 0); + } + + nvgpu_channel_abort(ch, false); + assert(ch->unserviceable == true); + } + ret = UNIT_SUCCESS; + +done: + if (ret != UNIT_SUCCESS) { + unit_err(m, "%s branches=%s\n", __func__, + branches_str(branches, f_channel_abort)); + } + if (ch != NULL) { + nvgpu_channel_close(ch); + } + if (tsg != NULL) { + nvgpu_ref_put(&tsg->refcount, nvgpu_tsg_release); + } + return ret; +} + +#define F_CHANNEL_MARK_ERROR_COND_BROADCAST_FAIL BIT(0) +#define F_CHANNEL_MARK_ERROR_LAST BIT(1) + +static const char *f_channel_mark_error[] = { + "condition_broadcast_fail", +}; + +int test_channel_mark_error(struct unit_module *m, struct gk20a *g, void *vargs) +{ + struct nvgpu_channel *ch = NULL; + u32 branches = 0U; + int ret = UNIT_FAIL; + + u32 runlist_id = NVGPU_INVALID_RUNLIST_ID; + bool privileged = false; + bool err; + + ch = nvgpu_channel_open_new(g, runlist_id, + privileged, getpid(), getpid()); + assert(ch != NULL); + + for (branches = 0U; branches < F_CHANNEL_MARK_ERROR_LAST; branches++) { + + subtest_setup(branches); + unit_verbose(m, "%s branches=%s\n", __func__, + branches_str(branches, f_channel_mark_error)); + + if ((branches & F_CHANNEL_MARK_ERROR_COND_BROADCAST_FAIL) + != 0) { + ch->semaphore_wq.initialized = false; + ch->notifier_wq.initialized = false; + } + + err = nvgpu_channel_mark_error(g, ch); + assert(err == false); + assert(ch->unserviceable == true); + + ch->semaphore_wq.initialized = true; + ch->notifier_wq.initialized = true; + } + ret = UNIT_SUCCESS; + +done: + if (ret != UNIT_SUCCESS) { + unit_err(m, "%s branches=%s\n", __func__, + branches_str(branches, f_channel_mark_error)); + } + if (ch != NULL) { + nvgpu_channel_close(ch); + } + + return ret; +} + +int test_channel_sw_quiesce(struct unit_module *m, struct gk20a *g, void *vargs) +{ + struct nvgpu_channel *ch = NULL; + struct nvgpu_fifo *f = &g->fifo; + int ret = UNIT_FAIL; + + u32 runlist_id = NVGPU_INVALID_RUNLIST_ID; + bool privileged = false; + + ch = nvgpu_channel_open_new(g, runlist_id, + privileged, getpid(), getpid()); + assert(ch != NULL); + assert(f->num_channels > 0U); + +#ifndef CONFIG_NVGPU_RECOVERY + nvgpu_channel_sw_quiesce(g); + assert(ch->unserviceable == true); +#endif + + ret = UNIT_SUCCESS; + +done: + if (ch != NULL) { + nvgpu_channel_close(ch); + } + + return ret; +} + +#define F_CHANNEL_DETERMINISTIC_IDLE_UNIDLE BIT(0) +#define F_CHANNEL_DETERMINISTIC_IDLE_RAILGATE_ALLOWED BIT(1) +#define F_CHANNEL_DETERMINISTIC_UNIDLE_GK20ABUSY_FAIL BIT(2) +#define F_CHANNEL_DETERMINISTIC_IDLE_LAST BIT(3) + +static const char *f_channel_deterministic_idle_unidle[] = { + "deterministic_channel", + "determinstic_railgate_allowed", + "gk20a_busy_fail", +}; + +int test_channel_deterministic_idle_unidle(struct unit_module *m, + struct gk20a *g, void *vargs) +{ + struct nvgpu_posix_fault_inj *l_nvgpu_fi; + struct nvgpu_channel *ch = NULL; + struct nvgpu_tsg *tsg = NULL; + struct nvgpu_mem pdb_mem; + struct mm_gk20a mm; + struct vm_gk20a vm; + u32 runlist_id = NVGPU_INVALID_RUNLIST_ID; + bool privileged = false; + int err; + u32 branches = 0U; + int ret = UNIT_FAIL; + int gpu_usage_count_initial; + + struct nvgpu_setup_bind_args bind_args; + + tsg = nvgpu_tsg_open(g, getpid()); + assert(tsg != NULL); + + ch = nvgpu_channel_open_new(g, runlist_id, + privileged, getpid(), getpid()); + assert(ch != NULL); + + err = nvgpu_tsg_bind_channel(tsg, ch); + assert(err == 0); + + memset(&mm, 0, sizeof(mm)); + memset(&vm, 0, sizeof(vm)); + mm.g = g; + vm.mm = &mm; + ch->vm = &vm; + err = nvgpu_dma_alloc(g, PAGE_SIZE, &pdb_mem); + assert(err == 0); + vm.pdb.mem = &pdb_mem; + + g->ops.gr.intr.flush_channel_tlb = stub_gr_intr_flush_channel_tlb; + g->ops.mm.cache.l2_flush = stub_mm_l2_flush; /* bug 2621189 */ + g->os_channel.alloc_usermode_buffers = + stub_os_channel_alloc_usermode_buffers; + g->ops.runlist.update_for_channel = stub_runlist_update_for_channel; + + (void)memset(&bind_args, 0, sizeof(bind_args)); + bind_args.num_gpfifo_entries = 32; + + bind_args.flags |= NVGPU_SETUP_BIND_FLAGS_USERMODE_SUPPORT; + + l_nvgpu_fi = nvgpu_nvgpu_get_fault_injection(); + + for (branches = 0U; branches < F_CHANNEL_DETERMINISTIC_IDLE_LAST; + branches++) { + subtest_setup(branches); + unit_verbose(m, "%s branches=%s\n", __func__, + branches_str(branches, + f_channel_deterministic_idle_unidle)); + + if ((branches & F_CHANNEL_DETERMINISTIC_IDLE_UNIDLE) != 0) { + bind_args.flags |= + NVGPU_SETUP_BIND_FLAGS_SUPPORT_DETERMINISTIC; + } else { + bind_args.flags &= + ~NVGPU_SETUP_BIND_FLAGS_SUPPORT_DETERMINISTIC; + } + + err = nvgpu_channel_setup_bind(ch, &bind_args); + assert(err == 0); + assert(nvgpu_atomic_read(&ch->bound) == true); + + ch->deterministic_railgate_allowed = (branches & + F_CHANNEL_DETERMINISTIC_IDLE_RAILGATE_ALLOWED) ? + true : false; + + nvgpu_posix_enable_fault_injection(l_nvgpu_fi, ((branches & + F_CHANNEL_DETERMINISTIC_UNIDLE_GK20ABUSY_FAIL) != 0) ? + true : false, 0); + + gpu_usage_count_initial = g->usage_count.v; + + nvgpu_channel_deterministic_idle(g); + if ((u64)(branches & 0x3U) == + (F_CHANNEL_DETERMINISTIC_IDLE_UNIDLE & + ~F_CHANNEL_DETERMINISTIC_IDLE_RAILGATE_ALLOWED)) { + assert(g->usage_count.v == + (gpu_usage_count_initial - 1)); + } else { + + assert(g->usage_count.v == gpu_usage_count_initial); + } + + nvgpu_channel_deterministic_unidle(g); + if (branches == ((F_CHANNEL_DETERMINISTIC_IDLE_UNIDLE | + F_CHANNEL_DETERMINISTIC_UNIDLE_GK20ABUSY_FAIL) & + ~F_CHANNEL_DETERMINISTIC_IDLE_RAILGATE_ALLOWED)) { + assert(g->usage_count.v == + (gpu_usage_count_initial - 1)); + } else { + assert(g->usage_count.v == gpu_usage_count_initial); + } + + nvgpu_dma_free(g, &ch->usermode_userd); + nvgpu_dma_free(g, &ch->usermode_gpfifo); + ch->userd_iova = 0U; + ch->deterministic = false; + ch->usermode_submit_enabled = false; + nvgpu_atomic_set(&ch->bound, false); + nvgpu_posix_enable_fault_injection(l_nvgpu_fi, false, 0); + } + ret = UNIT_SUCCESS; + +done: + if (ret != UNIT_SUCCESS) { + unit_err(m, "%s branches=%s\n", __func__, + branches_str(branches, + f_channel_deterministic_idle_unidle)); + } + if (ch != NULL) { + nvgpu_channel_close(ch); + } + if (tsg != NULL) { + nvgpu_ref_put(&tsg->refcount, nvgpu_tsg_release); + } + + return ret; +} + +#define F_CHANNEL_SUSPEND_RESUME_UNSERVICEABLE_CH BIT(0) +#define F_CHANNEL_SUSPEND_RESUME_INVALID_TSGID BIT(1) +#define F_CHANNEL_SUSPEND_RESUME_CH_WRK_CMPL_CNCL_SYNC BIT(2) +#define F_CHANNEL_SUSPEND_RESUME_CHS_LAST BIT(3) + +static const char *f_channel_suspend_resume[] = { + "suspend_resume_unserviceable_channels", + "invalid_tsgid", + "work_completion_cancel_sync", +}; + +static int stub_fifo_preempt_tsg(struct gk20a *g, struct nvgpu_tsg *tsg) +{ + stub[0].tsgid = tsg->tsgid; + return 0; +} + +static int stub_fifo_preempt_channel(struct gk20a *g, struct nvgpu_channel *ch) +{ + stub[0].chid = ch->chid; + return -1; +} + +static void stub_channel_work_completion_cancel_sync(struct nvgpu_channel *ch) +{ + +} + +int test_channel_suspend_resume_serviceable_chs(struct unit_module *m, + struct gk20a *g, void *vargs) +{ + struct nvgpu_channel *ch = NULL; + struct nvgpu_tsg *tsg = NULL; + u32 runlist_id = NVGPU_INVALID_RUNLIST_ID; + bool privileged = false; + bool err; + u32 orig_ch_tsgid; + u32 branches = 0U; + u32 prune = F_CHANNEL_SUSPEND_RESUME_UNSERVICEABLE_CH | + F_CHANNEL_SUSPEND_RESUME_INVALID_TSGID | + F_CHANNEL_SUSPEND_RESUME_CH_WRK_CMPL_CNCL_SYNC; + int ret = UNIT_FAIL; + + tsg = nvgpu_tsg_open(g, getpid()); + assert(tsg != NULL); + ch = nvgpu_channel_open_new(g, runlist_id, + privileged, getpid(), getpid()); + assert(ch != NULL); + + err = nvgpu_tsg_bind_channel(tsg, ch); + assert(err == 0); + + g->ops.fifo.preempt_tsg = stub_fifo_preempt_tsg; + g->ops.fifo.preempt_channel = stub_fifo_preempt_channel; + orig_ch_tsgid = ch->tsgid; + + for (branches = 0U; branches < F_CHANNEL_SUSPEND_RESUME_CHS_LAST; + branches++) { + if (subtest_pruned(branches, prune)) { + unit_verbose(m, "%s branches=%s (pruned)\n", __func__, + branches_str(branches, + f_channel_suspend_resume)); + continue; + } + subtest_setup(branches); + unit_verbose(m, "%s branches=%s\n", __func__, + branches_str(branches, + f_channel_suspend_resume)); + + if (branches & F_CHANNEL_SUSPEND_RESUME_UNSERVICEABLE_CH) { + nvgpu_channel_set_unserviceable(ch); + } else { + ch->unserviceable = false; + } + + g->os_channel.work_completion_cancel_sync = branches & + F_CHANNEL_SUSPEND_RESUME_CH_WRK_CMPL_CNCL_SYNC ? + stub_channel_work_completion_cancel_sync : NULL; + + ch->tsgid = branches & F_CHANNEL_SUSPEND_RESUME_INVALID_TSGID ? + NVGPU_INVALID_TSG_ID : orig_ch_tsgid; + + err = nvgpu_channel_suspend_all_serviceable_ch(g); + assert(err == 0); + err = nvgpu_channel_resume_all_serviceable_ch(g); + + if (branches & F_CHANNEL_SUSPEND_RESUME_INVALID_TSGID) { + assert(stub[0].chid == ch->chid); + } else if (branches & + F_CHANNEL_SUSPEND_RESUME_UNSERVICEABLE_CH) { + assert(err == 0); + } else { + assert(stub[0].tsgid == ch->tsgid); + } + } + ret = UNIT_SUCCESS; + +done: + if (ret != UNIT_SUCCESS) { + unit_err(m, "%s branches=%s\n", __func__, + branches_str(branches, + f_channel_suspend_resume)); + } + if (ch != NULL) { + nvgpu_tsg_unbind_channel(tsg, ch); + nvgpu_channel_close(ch); + } + if (tsg != NULL) { + nvgpu_ref_put(&tsg->refcount, nvgpu_tsg_release); + } + + return ret; +} + +#define F_CHANNEL_DEBUG_DUMP_INFOS_ALLOC_FAIL BIT(0) +#define F_CHANNEL_DEBUG_DUMP_INFO_ALLOC_FAIL BIT(1) +#define F_CHANNEL_DEBUG_DUMP_LAST BIT(2) + +static const char *f_channel_debug_dump[] = { + "infos_alloc_fail", + "info_alloc_fail", +}; + +static void stub_channel_read_state(struct gk20a *g, struct nvgpu_channel *ch, + struct nvgpu_channel_hw_state *state) +{ + stub[0].chid = ch->chid; +} + +static void stub_ramfc_capture_ram_dump(struct gk20a *g, + struct nvgpu_channel *ch, struct nvgpu_channel_dump_info *info) +{ + stub[1].chid = ch->chid; +} + +static void stub_channel_debug_dump(struct gk20a *g, + struct nvgpu_debug_context *o, + struct nvgpu_channel_dump_info *info) +{ + +} + +int test_channel_debug_dump(struct unit_module *m, struct gk20a *g, void *vargs) +{ + struct nvgpu_channel *ch = NULL; + struct nvgpu_tsg *tsg = NULL; + struct gpu_ops gops = g->ops; + struct nvgpu_posix_fault_inj *kmem_fi; + struct nvgpu_debug_context o = { + .fn = NULL + }; + + u32 runlist_id = NVGPU_INVALID_RUNLIST_ID; + bool privileged = false; + bool err; + u32 branches = 0U; + u32 fail = F_CHANNEL_DEBUG_DUMP_INFOS_ALLOC_FAIL | + F_CHANNEL_DEBUG_DUMP_INFO_ALLOC_FAIL; + u32 prune = fail; + int ret = UNIT_FAIL; + + tsg = nvgpu_tsg_open(g, getpid()); + assert(tsg != NULL); + + ch = nvgpu_channel_open_new(g, runlist_id, + privileged, getpid(), getpid()); + assert(ch != NULL); + + err = nvgpu_tsg_bind_channel(tsg, ch); + assert(err == 0); + + kmem_fi = nvgpu_kmem_get_fault_injection(); + + g->ops.channel.read_state = stub_channel_read_state; + g->ops.ramfc.capture_ram_dump = stub_ramfc_capture_ram_dump; + g->ops.channel.debug_dump = stub_channel_debug_dump; + + for (branches = 0U; branches < F_CHANNEL_DEBUG_DUMP_LAST; + branches++) { + if (subtest_pruned(branches, prune)) { + unit_verbose(m, "%s branches=%s (pruned)\n", __func__, + branches_str(branches, f_channel_debug_dump)); + continue; + } + subtest_setup(branches); + unit_verbose(m, "%s branches=%s\n", __func__, + branches_str(branches, f_channel_debug_dump)); + + if (branches & F_CHANNEL_DEBUG_DUMP_INFOS_ALLOC_FAIL) { + nvgpu_posix_enable_fault_injection(kmem_fi, true, 0); + } + if (branches & F_CHANNEL_DEBUG_DUMP_INFO_ALLOC_FAIL) { + nvgpu_posix_enable_fault_injection(kmem_fi, true, 1); + } + + nvgpu_channel_debug_dump_all(g, &o); + if (branches & fail) { + nvgpu_posix_enable_fault_injection(kmem_fi, false, 0); + } else { + assert(stub[0].chid == ch->chid); + assert(stub[1].chid == ch->chid); + } + } + ret = UNIT_SUCCESS; + +done: + if (ret != UNIT_SUCCESS) { + unit_err(m, "%s branches=%s\n", __func__, + branches_str(branches, f_channel_debug_dump)); + } + if (ch != NULL) { + nvgpu_tsg_unbind_channel(tsg, ch); + nvgpu_channel_close(ch); + } + if (tsg != NULL) { + nvgpu_ref_put(&tsg->refcount, nvgpu_tsg_release); + } + + g->ops = gops; + return ret; +} + +#define F_CHANNEL_SEMAPHORRE_WAKEUP_DETERMINISTIC_CH BIT(0) +#define F_CHANNEL_SEMAPHORRE_WAKEUP_COND_BROADCAST_FAIL BIT(1) +#define F_CHANNEL_SEMAPHORRE_WAKEUP_CH_NOT_BOUND BIT(2) +#define F_CHANNEL_SEMAPHORRE_WAKEUP_LAST BIT(3) + +static const char *f_channel_semaphore_wakeup[] = { + "deterministic_channel", + "condition_broadcast_fail", + "channel_not_bound", +}; + +static u32 global_count; + +static int stub_mm_fb_flush(struct gk20a *g) +{ + stub[0].count = global_count++; + return 0; +} + +int test_channel_semaphore_wakeup(struct unit_module *m, + struct gk20a *g, void *vargs) +{ + struct nvgpu_channel *ch = NULL; + struct nvgpu_tsg *tsg = NULL; + struct nvgpu_mem pdb_mem; + struct mm_gk20a mm; + struct vm_gk20a vm; + u32 runlist_id = NVGPU_INVALID_RUNLIST_ID; + bool privileged = false; + bool err; + u32 branches = 0U; + u32 prune = F_CHANNEL_SEMAPHORRE_WAKEUP_CH_NOT_BOUND; + int ret = UNIT_FAIL; + + struct nvgpu_setup_bind_args bind_args; + + global_count = 0; + + tsg = nvgpu_tsg_open(g, getpid()); + assert(tsg != NULL); + + ch = nvgpu_channel_open_new(g, runlist_id, + privileged, getpid(), getpid()); + assert(ch != NULL); + + err = nvgpu_tsg_bind_channel(tsg, ch); + assert(err == 0); + + memset(&mm, 0, sizeof(mm)); + memset(&vm, 0, sizeof(vm)); + mm.g = g; + vm.mm = &mm; + ch->vm = &vm; + err = nvgpu_dma_alloc(g, PAGE_SIZE, &pdb_mem); + assert(err == 0); + vm.pdb.mem = &pdb_mem; + + g->ops.gr.intr.flush_channel_tlb = stub_gr_intr_flush_channel_tlb; + g->ops.mm.cache.l2_flush = stub_mm_l2_flush; /* bug 2621189 */ + g->os_channel.alloc_usermode_buffers = + stub_os_channel_alloc_usermode_buffers; + g->ops.runlist.update_for_channel = stub_runlist_update_for_channel; + g->ops.mm.cache.fb_flush = stub_mm_fb_flush; + + memset(&bind_args, 0, sizeof(bind_args)); + bind_args.num_gpfifo_entries = 32; + + bind_args.flags |= NVGPU_SETUP_BIND_FLAGS_USERMODE_SUPPORT; + + err = nvgpu_channel_setup_bind(ch, &bind_args); + assert(err == 0); + assert(nvgpu_atomic_read(&ch->bound) == true); + + for (branches = 0U; branches < F_CHANNEL_SEMAPHORRE_WAKEUP_LAST; + branches++) { + if (subtest_pruned(branches, prune)) { + unit_verbose(m, "%s branches=%s (pruned)\n", __func__, + branches_str(branches, + f_channel_semaphore_wakeup)); + continue; + } + subtest_setup(branches); + unit_verbose(m, "%s branches=%s\n", __func__, + branches_str(branches, f_channel_semaphore_wakeup)); + + if (branches & F_CHANNEL_SEMAPHORRE_WAKEUP_DETERMINISTIC_CH) { + ch->deterministic = true; + } + + ch->semaphore_wq.initialized = branches & + F_CHANNEL_SEMAPHORRE_WAKEUP_COND_BROADCAST_FAIL ? + false : true; + + if (branches & F_CHANNEL_SEMAPHORRE_WAKEUP_CH_NOT_BOUND) { + nvgpu_atomic_set(&ch->bound, false); + } else { + nvgpu_atomic_set(&ch->bound, true); + } + + nvgpu_channel_semaphore_wakeup(g, false); + assert(stub[0].count == (global_count - 1U)); + + ch->deterministic = false; + } + ret = UNIT_SUCCESS; + +done: + if (ret != UNIT_SUCCESS) { + unit_err(m, "%s branches=%s\n", __func__, + branches_str(branches, f_channel_semaphore_wakeup)); + } + if (ch != NULL) { + nvgpu_channel_close(ch); + } + if (tsg != NULL) { + nvgpu_ref_put(&tsg->refcount, nvgpu_tsg_release); + } + + return ret; +} + +int test_channel_from_invalid_id(struct unit_module *m, struct gk20a *g, + void *args) +{ + struct nvgpu_channel *ch = NULL; + int ret = UNIT_FAIL; + + ch = nvgpu_channel_from_id(g, NVGPU_INVALID_CHANNEL_ID); + assert(ch == NULL); + + ret = UNIT_SUCCESS; + +done: + return ret; +} + +int test_channel_put_warn(struct unit_module *m, struct gk20a *g, void *vargs) +{ + struct nvgpu_channel *ch = NULL; + struct nvgpu_fifo *f = &g->fifo; + int ret = UNIT_FAIL; + u32 runlist_id = NVGPU_INVALID_RUNLIST_ID; + bool privileged = false; + + ch = nvgpu_channel_open_new(g, runlist_id, + privileged, getpid(), getpid()); + assert(ch != NULL); + assert(f->num_channels > 0U); + + /* condition broadcast fail */ + ch->ref_count_dec_wq.initialized = false; + + nvgpu_atomic_set(&ch->ref_count, 2); + ch->referenceable = true; + nvgpu_channel_put(ch); + + /* + * Note: channel ref_count value is 1 now + * This function call will reduce count to 0 + */ + nvgpu_channel_put(ch); + + ret = UNIT_SUCCESS; + +done: + if (ch != NULL) { + nvgpu_atomic_set(&ch->ref_count, 1); + nvgpu_channel_close(ch); + } + + return ret; +} + +int test_ch_referenceable_cleanup(struct unit_module *m, struct gk20a *g, + void *vargs) +{ + struct nvgpu_channel *ch = NULL; + struct nvgpu_fifo *f = &g->fifo; + int err = 0; + int ret = UNIT_FAIL; + u32 runlist_id = NVGPU_INVALID_RUNLIST_ID; + bool privileged = false; + + err = nvgpu_channel_setup_sw(g); + assert(err == 0); + + ch = nvgpu_channel_open_new(g, runlist_id, + privileged, getpid(), getpid()); + assert(ch != NULL); + assert(f->num_channels > 0U); + + nvgpu_channel_cleanup_sw(g); + assert(ch->referenceable == false); + + /* Reset environment variables */ + err = nvgpu_channel_setup_sw(g); + assert(err == 0); + + ret = UNIT_SUCCESS; +done: + return ret; +} + +int test_channel_abort_cleanup(struct unit_module *m, struct gk20a *g, + void *vargs) +{ + struct nvgpu_channel *ch = NULL; + struct nvgpu_tsg *tsg; + struct mm_gk20a mm; + struct vm_gk20a vm; + int err = 0; + int ret = UNIT_FAIL; + u32 runlist_id = NVGPU_INVALID_RUNLIST_ID; + bool privileged = false; + + tsg = nvgpu_tsg_open(g, getpid()); + assert(tsg != NULL); + + g->ops.gr.intr.flush_channel_tlb = stub_gr_intr_flush_channel_tlb; + + ch = nvgpu_channel_open_new(g, runlist_id, + privileged, getpid(), getpid()); + assert(ch != NULL); + ch->usermode_submit_enabled = true; + + /* Channel requires to be as_bound */ + memset(&mm, 0, sizeof(mm)); + memset(&vm, 0, sizeof(vm)); + mm.g = g; + vm.mm = &mm; + ch->vm = &vm; + nvgpu_ref_init(&vm.ref); + nvgpu_ref_get(&vm.ref); + + err = nvgpu_tsg_bind_channel(tsg, ch); + assert(err == 0); + + ch->user_sync = nvgpu_kzalloc(g, + sizeof(struct nvgpu_channel_sync)); + ch->user_sync->set_safe_state = + stub_channel_sync_syncpt_set_safe_state; + ch->user_sync->destroy = stub_channel_sync_destroy; + + err = nvgpu_tsg_unbind_channel(tsg, ch); + assert(err == 0); + + nvgpu_channel_close(ch); + + ret = UNIT_SUCCESS; +done: + return ret; +} + struct unit_module_test nvgpu_channel_tests[] = { UNIT_TEST(setup_sw, test_channel_setup_sw, &unit_ctx, 0), UNIT_TEST(init_support, test_fifo_init_support, &unit_ctx, 0), @@ -1006,6 +1884,17 @@ struct unit_module_test nvgpu_channel_tests[] = { UNIT_TEST(from_inst, test_channel_from_inst, &unit_ctx, 0), UNIT_TEST(enable_disable_tsg, test_channel_enable_disable_tsg, &unit_ctx, 0), + UNIT_TEST(ch_abort, test_channel_abort, &unit_ctx, 0), + UNIT_TEST(mark_error, test_channel_mark_error, &unit_ctx, 0), + UNIT_TEST(sw_quiesce, test_channel_sw_quiesce, &unit_ctx, 0), + UNIT_TEST(idle_unidle, test_channel_deterministic_idle_unidle, &unit_ctx, 0), + UNIT_TEST(suspend_resume, test_channel_suspend_resume_serviceable_chs, &unit_ctx, 0), + UNIT_TEST(debug_dump, test_channel_debug_dump, &unit_ctx, 0), + UNIT_TEST(semaphore_wakeup, test_channel_semaphore_wakeup, &unit_ctx, 0), + UNIT_TEST(channel_from_invalid_id, test_channel_from_invalid_id, &unit_ctx, 0), + UNIT_TEST(channel_put_warn, test_channel_put_warn, &unit_ctx, 0), + UNIT_TEST(referenceable_cleanup, test_ch_referenceable_cleanup, &unit_ctx, 0), + UNIT_TEST(abort_cleanup, test_channel_abort_cleanup, &unit_ctx, 0), UNIT_TEST(remove_support, test_fifo_remove_support, &unit_ctx, 0), }; diff --git a/userspace/units/fifo/channel/nvgpu-channel.h b/userspace/units/fifo/channel/nvgpu-channel.h index 15302d2b6..f129bb3c7 100644 --- a/userspace/units/fifo/channel/nvgpu-channel.h +++ b/userspace/units/fifo/channel/nvgpu-channel.h @@ -42,6 +42,9 @@ struct gk20a; * * Test Type: Feature * + * Targets: nvgpu_channel_setup_sw, nvgpu_channel_init_support, + * nvgpu_channel_destroy, nvgpu_channel_cleanup_sw + * * Input: None * * Steps: @@ -54,7 +57,7 @@ struct gk20a; * Output: Returns PASS if all branches gave expected results. FAIL otherwise. */ int test_channel_setup_sw(struct unit_module *m, - struct gk20a *g, void *args); + struct gk20a *g, void *vargs); /** * Test specification for: test_channel_open @@ -63,6 +66,8 @@ int test_channel_setup_sw(struct unit_module *m, * * Test Type: Feature * + * Targets: nvgpu_channel_open_new + * * Input: test_fifo_init_support() run for this GPU * * Steps: @@ -92,7 +97,7 @@ int test_channel_setup_sw(struct unit_module *m, * Output: Returns PASS if all branches gave expected results. FAIL otherwise. */ int test_channel_open(struct unit_module *m, - struct gk20a *g, void *args); + struct gk20a *g, void *vargs); /** * Test specification for: test_channel_close @@ -101,6 +106,13 @@ int test_channel_open(struct unit_module *m, * * Test Type: Feature * + * Targets: nvgpu_channel_close, nvgpu_channel_kill, channel_free, + * channel_free_invoke_unbind, channel_free_wait_for_refs, + * channel_free_invoke_deferred_engine_reset, + * channel_free_invoke_sync_destroy, + * channel_free_put_deterministic_ref_from_init, + * channel_free_unlink_debug_session + * * Input: test_fifo_init_support() run for this GPU * * Steps: @@ -122,8 +134,7 @@ int test_channel_open(struct unit_module *m, * * Output: Returns PASS if all branches gave expected results. FAIL otherwise. */ -int test_channel_close(struct unit_module *m, - struct gk20a *g, void *args); +int test_channel_close(struct unit_module *m, struct gk20a *g, void *vargs); /** * Test specification for: test_channel_setup_bind @@ -132,6 +143,8 @@ int test_channel_close(struct unit_module *m, * * Test Type: Feature * + * Targets: nvgpu_channel_setup_bind, nvgpu_channel_setup_usermode + * * Input: test_fifo_init_support() run for this GPU * * Steps: @@ -160,7 +173,7 @@ int test_channel_close(struct unit_module *m, * Output: Returns PASS if all branches gave expected results. FAIL otherwise. */ int test_channel_setup_bind(struct unit_module *m, - struct gk20a *g, void *args); + struct gk20a *g, void *vargs); /** * Test specification for: test_channel_alloc_inst @@ -169,6 +182,8 @@ int test_channel_setup_bind(struct unit_module *m, * * Test Type: Feature * + * Targets: nvgpu_channel_alloc_inst, nvgpu_channel_free_inst + * * Input: test_fifo_init_support() run for this GPU * * Steps: @@ -186,7 +201,7 @@ int test_channel_setup_bind(struct unit_module *m, * Output: Returns PASS if all branches gave expected results. FAIL otherwise. */ int test_channel_alloc_inst(struct unit_module *m, - struct gk20a *g, void *args); + struct gk20a *g, void *vargs); /** * Test specification for: test_channel_from_inst @@ -195,6 +210,8 @@ int test_channel_alloc_inst(struct unit_module *m, * * Test Type: Feature * + * Targets: nvgpu_channel_refch_from_inst_ptr + * * Input: test_fifo_init_support() run for this GPU * * Steps: @@ -209,7 +226,7 @@ int test_channel_alloc_inst(struct unit_module *m, * Output: Returns PASS if all branches gave expected results. FAIL otherwise. */ int test_channel_from_inst(struct unit_module *m, - struct gk20a *g, void *args); + struct gk20a *g, void *vargs); /** * Test specification for: test_channel_enable_disable_tsg @@ -218,6 +235,8 @@ int test_channel_from_inst(struct unit_module *m, * * Test Type: Feature * + * Targets: nvgpu_channel_enable_tsg, nvgpu_channel_disable_tsg + * * Input: test_fifo_init_support() run for this GPU * * Steps: @@ -232,7 +251,222 @@ int test_channel_from_inst(struct unit_module *m, * Output: Returns PASS if all branches gave expected results. FAIL otherwise. */ int test_channel_enable_disable_tsg(struct unit_module *m, - struct gk20a *g, void *args); + struct gk20a *g, void *vargs); + +/** + * Test specification for: test_channel_abort + * + * Description: Test channel TSG abort + * + * Test Type: Feature based + * + * Targets: nvgpu_channel_abort + * + * Input: test_fifo_init_support() run for this GPU + * + * Steps: + * - Test that TSG abort is invoked for TSG bound channel. + * + * Output: Returns PASS if all branches gave expected results. FAIL otherwise. + */ +int test_channel_abort(struct unit_module *m, struct gk20a *g, void *vargs); + +/** + * Test specification for: test_channel_mark_error + * + * Description: Mark channel as unserviceable + * + * Test Type: Feature based + * + * Targets: nvgpu_channel_mark_error, nvgpu_channel_set_unserviceable, + * nvgpu_channel_ctxsw_timeout_debug_dump_state, + * nvgpu_channel_set_has_timedout_and_wakeup_wqs + * + * Input: test_fifo_init_support() run for this GPU + * + * Steps: + * - Test that the channel can be marked with error (unserviceable). + * - Test broadcast condition fail cases. + * + * Output: Returns PASS if all branches gave expected results. FAIL otherwise. + */ +int test_channel_mark_error(struct unit_module *m, + struct gk20a *g, void *vargs); + +/** + * Test specification for: test_channel_sw_quiesce + * + * Description: Test emergency quiescing of channels + * + * Test Type: Feature based + * + * Targets: nvgpu_channel_sw_quiesce, nvgpu_channel_set_error_notifier + * + * Input: test_fifo_init_support() run for this GPU + * + * Steps: + * - Check if channel can be placed in quiesce state. + * + * Output: Returns PASS if all branches gave expected results. FAIL otherwise. + */ +int test_channel_sw_quiesce(struct unit_module *m, + struct gk20a *g, void *vargs); + +/** + * Test specification for: test_channel_deterministic_idle_unidle + * + * Description: Stop and allow deterministic channel activity + * + * Test Type: Feature based + * + * Targets: nvgpu_channel_deterministic_idle, nvgpu_channel_deterministic_unidle + * + * Input: test_fifo_init_support() run for this GPU + * + * Steps: + * - Execute deterministic idle and unidle functions and check if gpu usage + * usage count is updated corresponding to input conditions. + * + * Output: Returns PASS if all branches gave expected results. FAIL otherwise. + */ +int test_channel_deterministic_idle_unidle(struct unit_module *m, + struct gk20a *g, void *vargs); + +/** + * Test specification for: test_channel_suspend_resume_serviceable_chs + * + * Description: Test suspend resume of all servicable channels + * + * Test Type: Feature based + * + * Targets: nvgpu_channel_suspend_all_serviceable_ch, + * nvgpu_channel_resume_all_serviceable_ch, + * nvgpu_channel_check_unserviceable + * + * Input: test_fifo_init_support() run for this GPU + * + * Steps: + * - Check if channels can be suspended and resumed. + * + * Output: Returns PASS if all branches gave expected results. FAIL otherwise. + */ +int test_channel_suspend_resume_serviceable_chs(struct unit_module *m, + struct gk20a *g, void *vargs); + +/** + * Test specification for: test_channel_debug_dump + * + * Description: Dump channel debug information + * + * Test Type: Feature based + * + * Targets: nvgpu_channel_debug_dump_all + * + * Input: test_fifo_init_support() run for this GPU + * + * Steps: + * - Dump all debug information for channels. + * + * Output: Returns PASS if all branches gave expected results. FAIL otherwise. + */ +int test_channel_debug_dump(struct unit_module *m, + struct gk20a *g, void *vargs); + +/** + * Test specification for: test_channel_semaphore_wakeup + * + * Description: Wake up threads waiting for semaphore + * + * Test Type: Feature based + * + * Targets: nvgpu_channel_semaphore_wakeup + * + * Input: test_fifo_init_support() run for this GPU + * + * Steps: + * - Execute semaphore_wakeup for deterministic/non-deterministic channels. + * + * Output: Returns PASS if all branches gave expected results. FAIL otherwise. + */ +int test_channel_semaphore_wakeup(struct unit_module *m, + struct gk20a *g, void *vargs); + +/** + * Test specification for: test_channel_from_invalid_id + * + * Description: Test channel reference extracted using channel id + * + * Test Type: Feature based + * + * Targets: nvgpu_channel_from_id + * + * Input: test_fifo_init_support() run for this GPU + * + * Steps: + * - Test corner case to retrieve channel with invalid channel id. + * + * Output: Returns PASS if all branches gave expected results. FAIL otherwise. + */ +int test_channel_from_invalid_id(struct unit_module *m, struct gk20a *g, + void *vargs); + +/** + * Test specification for: test_channel_put_warn + * + * Description: Test channel dereference + * + * Test Type: Feature based + * + * Targets: nvgpu_channel_put__func + * + * Input: test_fifo_init_support() run for this GPU + * + * Steps: + * - Test corner cases using referenceable channel and condition broadcast fail + * cases. + * + * Output: Returns PASS if all branches gave expected results. FAIL otherwise. + */ +int test_channel_put_warn(struct unit_module *m, struct gk20a *g, void *vargs); + +/** + * Test specification for: test_ch_referenceable_cleanup + * + * Description: Test channel cleanup corner case + * + * Test Type: Feature based + * + * Targets: nvgpu_channel_cleanup_sw + * + * Input: test_fifo_init_support() run for this GPU + * + * Steps: + * - Open a channel. Test how referenceable channel is cleaned-up/freed. + * + * Output: Returns PASS if all branches gave expected results. FAIL otherwise. + */ +int test_ch_referenceable_cleanup(struct unit_module *m, + struct gk20a *g, void *vargs); + +/** + * Test specification for: test_channel_abort_cleanup + * + * Description: Test channel abort cleanup with user_sync available + * + * Test Type: Feature based + * + * Targets: nvgpu_channel_abort_clean_up + * + * Input: test_fifo_init_support() run for this GPU + * + * Steps: + * - Bind channel to TSG and allocate channel user_sync. Test channel abort + * cleanup while unbinding from TSG. + * + * Output: Returns PASS if all branches gave expected results. FAIL otherwise. + */ +int test_channel_abort_cleanup(struct unit_module *m, struct gk20a *g, + void *vargs); /** * @} */