gpu: nvgpu: fix unit test traceability issues

Some of the functions with no traceability to unit tests are already
covered by callee API functions. Skip these functions in SWVR by
skipping doxygen for them.

Some of the functions are non-fusa like those in profile.h and
bsearch.h. Those were included as the header was included in
Doxygen sources. Mark then non-safe.

Some of the nvgpu functions were not added to Targets entries for
respective tests. Fix those.

JIRA NVGPU-7211

Change-Id: Iacf22dccdd9340100cf93814566d3979734c455d
Signed-off-by: Sagar Kamble <skamble@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2612982
(cherry picked from commit a40f62654747102cc8ef53ddbd9f953c21c2b745)
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2737672
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Sagar Kamble
2021-10-19 15:56:16 +05:30
committed by mobile promotions
parent bd5ade293d
commit d75473a115
35 changed files with 113 additions and 85 deletions

View File

@@ -16,7 +16,7 @@ bitops:
sources: [ include/nvgpu/bitops.h ]
bsearch:
safe: yes
safe: no
sources: [ include/nvgpu/bsearch.h ]
bug:

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -27,8 +27,10 @@ struct gk20a;
struct nvgpu_acr;
struct hs_acr;
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
void nvgpu_gv11b_acr_sw_init(struct gk20a *g, struct nvgpu_acr *acr);
u32 gv11b_acr_lsf_config(struct gk20a *g, struct nvgpu_acr *acr);
/** @endcond DOXYGEN_SHOULD_SKIP_THIS */
#endif /* ACR_SW_GV11B_H */

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -22,7 +22,9 @@
#ifndef NVGPU_FALCON_SW_GK20A_H
#define NVGPU_FALCON_SW_GK20A_H
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
void gk20a_falcon_engine_dependency_ops(struct nvgpu_falcon *flcn);
void gk20a_falcon_sw_init(struct nvgpu_falcon *flcn);
/** @endcond DOXYGEN_SHOULD_SKIP_THIS */
#endif /* NVGPU_FALCON_SW_GK20A_H */

View File

@@ -2204,7 +2204,9 @@ void nvgpu_channel_debug_dump_all(struct gk20a *g,
info->tsgid = ch->tsgid;
info->pid = ch->pid;
info->refs = nvgpu_atomic_read(&ch->ref_count);
#ifdef CONFIG_NVGPU_KERNEL_MODE_SUBMIT
info->deterministic = nvgpu_channel_is_deterministic(ch);
#endif
if (tsg) {
if (tsg->nvs_domain) {
domain_name = nvgpu_nvs_domain_get_name(tsg->nvs_domain);

View File

@@ -1,7 +1,7 @@
/*
* GV11B Tegra HAL interface
*
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -26,5 +26,7 @@
#define NVGPU_HAL_GV11B_H
struct gk20a;
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
int gv11b_init_hal(struct gk20a *g);
/** @endcond DOXYGEN_SHOULD_SKIP_THIS */
#endif /* NVGPU_HAL_GV11B_H */

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2014-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -30,6 +30,7 @@
struct gating_desc;
struct gk20a;
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
void gv11b_slcg_bus_load_gating_prod(struct gk20a *g,
bool prod);
u32 gv11b_slcg_bus_gating_prod_size(void);
@@ -145,4 +146,6 @@ void gv11b_blcg_hshub_load_gating_prod(struct gk20a *g,
u32 gv11b_blcg_hshub_gating_prod_size(void);
const struct gating_desc *gv11b_blcg_hshub_get_gating_prod(void);
/** @endcond DOXYGEN_SHOULD_SKIP_THIS */
#endif /* GV11B_GATING_REGLIST_H */

View File

@@ -585,8 +585,6 @@ int nvgpu_channel_set_syncpt(struct nvgpu_channel *ch);
bool nvgpu_channel_update_and_check_ctxsw_timeout(struct nvgpu_channel *ch,
u32 timeout_delta_ms, bool *progress);
#endif /* CONFIG_NVGPU_KERNEL_MODE_SUBMIT */
static inline bool nvgpu_channel_is_deterministic(struct nvgpu_channel *c)
{
#ifdef CONFIG_NVGPU_DETERMINISTIC_CHANNELS
@@ -597,6 +595,8 @@ static inline bool nvgpu_channel_is_deterministic(struct nvgpu_channel *c)
#endif
}
#endif /* CONFIG_NVGPU_KERNEL_MODE_SUBMIT */
/**
* @brief Get channel pointer from its node in free channels list.
*

View File

@@ -1,7 +1,7 @@
/*
* Nvgpu Channel Synchronization Abstraction
*
* Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2014-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -95,7 +95,6 @@ void nvgpu_channel_sync_get_ref(struct nvgpu_channel_sync *s);
* Decrement the usage_counter for this instance and return if equals 0.
*/
bool nvgpu_channel_sync_put_ref_and_check(struct nvgpu_channel_sync *s);
#endif /* CONFIG_NVGPU_KERNEL_MODE_SUBMIT */
/**
* @brief Free channel syncpoint/semaphore
@@ -133,4 +132,6 @@ struct nvgpu_channel_sync *nvgpu_channel_sync_create(struct nvgpu_channel *c);
*/
bool nvgpu_channel_sync_needs_os_fence_framework(struct gk20a *g);
#endif /* CONFIG_NVGPU_KERNEL_MODE_SUBMIT */
#endif /* NVGPU_CHANNEL_SYNC_H */

View File

@@ -2,7 +2,7 @@
*
* Nvgpu Channel Synchronization Abstraction (Syncpoints)
*
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -36,6 +36,8 @@ struct nvgpu_channel;
struct nvgpu_channel_sync_syncpt;
struct priv_cmd_entry;
#ifdef CONFIG_NVGPU_NON_FUSA
#ifdef CONFIG_TEGRA_GK20A_NVHOST
/**
@@ -116,4 +118,6 @@ nvgpu_channel_sync_syncpt_create(struct nvgpu_channel *c)
#endif
#endif /* CONFIG_NVGPU_NON_FUSA */
#endif /* NVGPU_CHANNEL_SYNC_SYNCPT_H */

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -101,6 +101,7 @@ struct nvgpu_ecc_stat {
struct nvgpu_list_node node;
};
#ifdef CONFIG_NVGPU_SYSFS
/**
* @brief Helper function to get struct nvgpu_ecc_stat from list node.
*
@@ -116,6 +117,7 @@ static inline struct nvgpu_ecc_stat *nvgpu_ecc_stat_from_node(
(uintptr_t)node - offsetof(struct nvgpu_ecc_stat, node)
);
}
#endif
/**
* The structure contains the error statistics assocaited with constituent

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -31,6 +31,8 @@ struct nvgpu_mapped_buf;
struct priv_cmd_entry;
struct nvgpu_channel;
#ifdef CONFIG_NVGPU_KERNEL_MODE_SUBMIT
struct nvgpu_channel_job {
struct nvgpu_mapped_buf **mapped_buffers;
u32 num_mapped_buffers;
@@ -55,5 +57,6 @@ void nvgpu_channel_joblist_delete(struct nvgpu_channel *c,
int nvgpu_channel_joblist_init(struct nvgpu_channel *c, u32 num_jobs);
void nvgpu_channel_joblist_deinit(struct nvgpu_channel *c);
#endif /* CONFIG_NVGPU_KERNEL_MODE_SUBMIT */
#endif

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -275,6 +275,7 @@ void nvgpu_kmem_cache_free(struct nvgpu_kmem_cache *cache, void *ptr);
*/
int nvgpu_kmem_init(struct gk20a *g);
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
/**
* @brief Finalize the kmem tracking code
*
@@ -285,6 +286,7 @@ int nvgpu_kmem_init(struct gk20a *g);
* @param flags [in] Flags that control operation of this finalization.
*/
void nvgpu_kmem_fini(struct gk20a *g, int flags);
/** @endcond DOXYGEN_SHOULD_SKIP_THIS */
/**
* These will simply be ignored if CONFIG_NVGPU_TRACK_MEM_USAGE is not defined.
@@ -316,6 +318,7 @@ void nvgpu_kmem_fini(struct gk20a *g, int flags);
*/
void *nvgpu_big_alloc_impl(struct gk20a *g, size_t size, bool clear);
#ifdef CONFIG_NVGPU_DGPU
/**
* @brief Macro to allocate memory
*
@@ -335,6 +338,7 @@ static inline void *nvgpu_big_malloc(struct gk20a *g, size_t size)
{
return nvgpu_big_alloc_impl(g, size, false);
}
#endif
/**
* @brief Macro to allocate a zero initialised memory.

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -31,6 +31,7 @@
#include <nvgpu_rmos/include/log.h>
#endif
#ifdef CONFIG_NVGPU_NON_FUSA
/**
* nvgpu_log_mask_enabled - Check if logging is enabled
*
@@ -43,6 +44,7 @@
* critical paths.
*/
bool nvgpu_log_mask_enabled(struct gk20a *g, u64 log_mask);
#endif
/**
* nvgpu_log - Print a debug message

View File

@@ -159,6 +159,8 @@ typedef struct nvgpu_posix_atomic64 {
#define NVGPU_POSIX_ATOMIC_XCHG(v, new) \
atomic_exchange(&((v)->v), (new))
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
/**
* @brief POSIX implementation of atomic set.
*
@@ -645,4 +647,5 @@ static inline bool nvgpu_atomic64_sub_and_test_impl(long x, nvgpu_atomic64_t *v)
return NVGPU_POSIX_ATOMIC_SUB_RETURN(v, x) == 0;
}
/** @endcond DOXYGEN_SHOULD_SKIP_THIS */
#endif /* NVGPU_POSIX_ATOMIC_H */

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -30,6 +30,7 @@ struct vm_gk20a;
struct priv_cmd_entry;
struct priv_cmd_queue;
#ifdef CONFIG_NVGPU_NON_FUSA
int nvgpu_priv_cmdbuf_queue_alloc(struct vm_gk20a *vm,
u32 job_count, struct priv_cmd_queue **queue);
void nvgpu_priv_cmdbuf_queue_free(struct priv_cmd_queue *q);
@@ -48,5 +49,5 @@ void nvgpu_priv_cmdbuf_append_zeros(struct gk20a *g, struct priv_cmd_entry *e,
void nvgpu_priv_cmdbuf_finish(struct gk20a *g, struct priv_cmd_entry *e,
u64 *gva, u32 *size);
#endif /* CONFIG_NVGPU_NON_FUSA */
#endif

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -37,7 +37,7 @@ struct unit_module;
*
* Test Type: Feature, Error guessing
*
* Targets: nvgpu_acr_init
* Targets: gops_acr.acr_init, nvgpu_acr_init
*
* Input: None
*
@@ -65,7 +65,9 @@ int test_acr_init(struct unit_module *m, struct gk20a *g, void *args);
*
* Test Type: Feature, Error guessing
*
* Targets: g->acr->prepare_ucode_blob
* Targets: nvgpu_acr_prepare_ucode_blob, flcn64_set_dma,
* nvgpu_acr_lsf_fecs_ucode_details, nvgpu_acr_lsf_gpccs_ucode_details,
* nvgpu_acr_alloc_blob_space_sys, nvgpu_acr_wpr_info_sys
*
* Input: None
* Steps:
@@ -119,7 +121,7 @@ int test_acr_is_lsf_lazy_bootstrap(struct unit_module *m, struct gk20a *g,
*
* Test Type: Feature, Error guessing
*
* Targets: nvgpu_acr_construct_execute
* Targets: gops_acr.acr_construct_execute, nvgpu_acr_construct_execute
*
* Input: None
*
@@ -149,7 +151,9 @@ int test_acr_construct_execute(struct unit_module *m,
*
* Test Type: Feature, Error guessing
*
* Targets: nvgpu_acr_bootstrap_hs_acr, nvgpu_pmu_report_bar0_pri_err_status,
* Targets: nvgpu_acr_bootstrap_hs_acr, nvgpu_acr_bootstrap_hs_ucode,
* nvgpu_acr_lsf_fecs_ucode_details, nvgpu_acr_lsf_gpccs_ucode_details,
* nvgpu_pmu_report_bar0_pri_err_status,
* gops_pmu.validate_mem_integrity, gv11b_pmu_validate_mem_integrity,
* gops_pmu.is_debug_mode_enabled, gv11b_pmu_is_debug_mode_en,
* gops_acr.pmu_clear_bar0_host_err_status,

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -39,13 +39,14 @@ struct unit_module;
*
* Test Type: Feature, Boundary Values
*
* Targets: gops_class.is_valid, gv11b_class_is_valid
* Equivalence classes:
* Targets: gops_class.is_valid, gv11b_class_is_valid,
* gops_class.is_valid_compute, gv11b_class_is_valid_compute,
*
* Equivalence classes for all class:
* Variable: class_num
* - Valid : { 0xC3C0U }, { 0xC3B5U }, { 0xC36FU }, { 0xC397U }
*
* Targets: gops_class.is_valid_compute, gv11b_class_is_valid_compute,
* Equivalence classes:
* Equivalence classes for compute class:
* Variable: class_num
* - Valid : { 0xC3C0U }
*

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -162,7 +162,8 @@ int fb_mmu_fault_gv11b_init_test(struct unit_module *m, struct gk20a *g,
* Description: Ensure all HAL functions work without causing an ABORT.
*
* Targets: gv11b_fb_is_fault_buf_enabled, gv11b_fb_fault_buffer_get_ptr_update,
* gv11b_fb_write_mmu_fault_buffer_size, gv11b_fb_fault_buf_set_state_hw,
* gv11b_fb_write_mmu_fault_buffer_size,
* fb_gv11b_write_mmu_fault_buffer_get, gv11b_fb_fault_buf_set_state_hw,
* gv11b_fb_read_mmu_fault_status, gv11b_fb_fault_buf_configure_hw,
* gv11b_fb_is_fault_buffer_empty, gv11b_fb_read_mmu_fault_addr_lo_hi,
* gops_fb.fault_buf_configure_hw, gops_fb.fault_buf_set_state_hw,
@@ -234,7 +235,9 @@ int fb_mmu_fault_gv11b_snap_reg(struct unit_module *m, struct gk20a *g,
* Description: Test the gv11b_fb_handle_mmu_fault HAL for all supported
* interrupt statuses.
*
* Targets: gv11b_fb_handle_mmu_fault, gv11b_fb_fault_buf_set_state_hw
* Targets: gv11b_fb_handle_mmu_fault, gv11b_fb_fault_buf_set_state_hw,
* gv11b_fb_handle_nonreplay_fault_overflow,
* gv11b_fb_handle_dropped_mmu_fault
*
* Test Type: Feature
*

View File

@@ -1039,7 +1039,7 @@ int test_channel_alloc_inst(struct unit_module *m, struct gk20a *g, void *vargs)
branches & F_CHANNEL_ALLOC_INST_ENOMEM ?
true : false, 0);
err = nvgpu_channel_alloc_inst(g, ch);
err = g->ops.channel.alloc_inst(g, ch);
if (branches & fail) {
unit_assert(err != 0, goto done);
@@ -1051,7 +1051,7 @@ int test_channel_alloc_inst(struct unit_module *m, struct gk20a *g, void *vargs)
APERTURE_INVALID, goto done);
}
nvgpu_channel_free_inst(g, ch);
g->ops.channel.free_inst(g, ch);
unit_assert(ch->inst_block.aperture == APERTURE_INVALID,
goto done);
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -189,7 +189,8 @@ int test_channel_setup_bind(struct unit_module *m,
*
* Test Type: Feature, Error injection
*
* Targets: nvgpu_channel_alloc_inst, nvgpu_channel_free_inst
* Targets: gops_channel.alloc_inst, nvgpu_channel_alloc_inst,
* gops_channel.free_inst, nvgpu_channel_free_inst
*
* Input: test_fifo_init_support() run for this GPU
*

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -70,7 +70,7 @@ int test_pbdma_setup_sw(struct unit_module *m,
*
* Targets: nvgpu_pbdma_status_is_chsw_switch, nvgpu_pbdma_status_is_chsw_load,
* nvgpu_pbdma_status_is_chsw_save, nvgpu_pbdma_status_is_chsw_valid,
* nvgpu_pbdma_status_is_id_type_tsg,
* nvgpu_pbdma_status_ch_not_loaded, nvgpu_pbdma_status_is_id_type_tsg,
* nvgpu_pbdma_status_is_next_id_type_tsg
*
* Input: test_fifo_init_support() run for this GPU.

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -188,25 +188,6 @@ int test_runlist_reload_ids(struct unit_module *m, struct gk20a *g, void *args);
int test_runlist_update_locked(struct unit_module *m, struct gk20a *g,
void *args);
/**
* Test specification for: test_runlist_update_for_channel
*
* Description: Add/remove channel to/from runlist.
*
* Test Type: Feature
*
* Targets: nvgpu_runlist_update_for_channel
*
* Input: test_fifo_init_support
*
* Steps:
* - Check that this API can be used to remove channels from runlist.
*
* Output: Returns PASS if all branches gave expected results. FAIL otherwise.
*/
int test_runlist_update_for_channel(struct unit_module *m, struct gk20a *g,
void *args);
/**
* Test specification for: test_tsg_format_gen
*

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -50,7 +50,8 @@ struct gk20a;
* gm20b_gr_falcon_init_ctx_state,
* gm20b_gr_falcon_submit_fecs_method_op,
* nvgpu_gr_get_falcon_ptr,
* gm20b_gr_falcon_ctrl_ctxsw
* gm20b_gr_falcon_ctrl_ctxsw,
* gv11b_gr_falcon_ctrl_ctxsw
*
* Steps:
* - Call gm20b_gr_falcon_ctrl_ctxsw with watchdog timeout Method.

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -44,11 +44,13 @@ struct unit_module;
* nvgpu_gr_falcon_load_secure_ctxsw_ucode,
* gops_gr_falcon.load_ctxsw_ucode,
* gops_gr_falcon.get_fecs_ctx_state_store_major_rev_id,
* gops_gr_falcon.fecs_ctxsw_clear_mailbox,
* gm20b_gr_falcon_get_fecs_ctx_state_store_major_rev_id,
* gm20b_gr_falcon_get_gpccs_start_reg_offset,
* gm20b_gr_falcon_start_gpccs,
* gm20b_gr_falcon_fecs_base_addr,
* gm20b_gr_falcon_gpccs_base_addr
* gm20b_gr_falcon_gpccs_base_addr,
* gm20b_gr_falcon_fecs_ctxsw_clear_mailbox
*
* Input: #test_gr_init_setup_ready must have been executed successfully.
*

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -51,7 +51,8 @@ struct unit_module;
* gm20b_gr_falcon_read_status1_fecs_ctxsw,
* gm20b_gr_falcon_get_fecs_ctxsw_mailbox_size,
* gm20b_gr_falcon_fecs_host_clear_intr,
* nvgpu_gr_intr_remove_support
* nvgpu_gr_intr_remove_support,
* gp10b_gr_intr_handle_class_error
*
* Input: #test_gr_init_setup_ready must have been executed successfully.
*
@@ -247,6 +248,7 @@ int test_gr_intr_fecs_exceptions(struct unit_module *m,
* gv11b_gr_intr_get_sm_hww_global_esr,
* gops_gr_intr.get_sm_no_lock_down_hww_global_esr_mask,
* gv11b_gr_intr_get_sm_no_lock_down_hww_global_esr_mask,
* gv11b_gr_intr_get_warp_esr_pc_sm_hww,
* nvgpu_gr_intr_set_error_notifier,
* nvgpu_gr_intr_stall_isr,
* gops_gr_intr.read_pending_interrupts,

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -123,6 +123,7 @@ int test_gr_init_prepare(struct unit_module *m, struct gk20a *g, void *args);
* gv11b_gr_init_get_alpha_cb_default_size,
* gv11b_gr_init_tpc_mask,
* gops_gr_init.get_no_of_sm,
* gops_gr_falcon.fecs_ctxsw_clear_mailbox,
* nvgpu_gr_get_no_of_sm,
* gm20b_gr_init_pd_tpc_per_gpc,
* gm20b_gr_init_cwd_gpcs_tpcs_num,
@@ -131,7 +132,9 @@ int test_gr_init_prepare(struct unit_module *m, struct gk20a *g, void *args);
* gp10b_gr_init_pagepool_default_size,
* gv11b_gr_falcon_fecs_host_int_enable,
* nvgpu_gr_falcon_get_golden_image_size,
* gm20b_gr_falcon_start_fecs
* gm20b_gr_falcon_start_fecs,
* gm20b_gr_falcon_fecs_ctxsw_clear_mailbox,
* gr_intr_report_ctxsw_error
*
* Input: test_gr_init_setup and test_gr_init_prepare
* must have been executed successfully.

View File

@@ -163,7 +163,7 @@ int test_get_put(struct unit_module *m,
* - init_test_setup_env() must be called before.
*
* Targets: nvgpu_check_gpu_state, is_nvgpu_gpu_state_valid,
* gops_mc.get_chip_details
* gops_mc.get_chip_details, gm20b_get_chip_details
*
* Steps:
* - Test valid case.

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -103,6 +103,7 @@ int test_mutex_tryacquire(struct unit_module *m, struct gk20a *g, void *args);
* nvgpu_mutex_acquire, nvgpu_spinlock_acquire,
* nvgpu_raw_spinlock_acquire, nvgpu_mutex_release,
* nvgpu_spinlock_release, nvgpu_raw_spinlock_release,
* nvgpu_spinlock_irqsave, nvgpu_spinunlock_irqrestore,
* nvgpu_posix_lock_acquire, nvgpu_posix_lock_release
*
* Input: @param args [in] Type of lock as defined by TYPE_* macros.

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -218,7 +218,8 @@ int test_is_stall_and_eng_intr_pending(struct unit_module *m, struct gk20a *g,
*
* Test Type: Feature
*
* Targets: gops_mc.isr_stall, mc_gp10b_isr_stall
* Targets: gops_mc.isr_stall, mc_gp10b_isr_stall,
* gops_mc.ltc_isr
*
* Input: test_mc_setup_env must have been run.
*

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -120,11 +120,7 @@ int test_page_faults_disable_hw(struct unit_module *m, struct gk20a *g,
*
* Test Type: Feature
*
<<<<<<< HEAD
* Targets: gops_mm.gops_mm_gmmu.get_default_big_page_size,
=======
* Targets: gops_mm_gmmu.get_default_big_page_size,
>>>>>>> 2769ccf4e... gpu: nvgpu: userspace: update "Targets" field for mm
* nvgpu_gmmu_default_big_page_size, nvgpu_alloc_inst_block,
* gops_mm.init_inst_block, gv11b_mm_init_inst_block
*

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -45,7 +45,7 @@ struct unit_module;
* nvgpu_vm_find_mapped_buf_less_than, nvgpu_get_pte, nvgpu_vm_put_buffers,
* nvgpu_vm_unmap, nvgpu_vm_area_free, nvgpu_vm_put, nvgpu_vm_find_mapped_buf,
* nvgpu_vm_area_find, nvgpu_vm_unmap_ref_internal, nvgpu_vm_unmap_system,
* nvgpu_os_buf_get_size
* nvgpu_os_buf_get_size, nvgpu_vm_area_from_vm_area_list
*
* Input: None
*
@@ -215,7 +215,8 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args);
*
* Test Type: Error injection
*
* Targets: nvgpu_vm_init, nvgpu_vm_map, nvgpu_vm_put
* Targets: nvgpu_vm_init, nvgpu_vm_map, nvgpu_vm_put, nvgpu_vm_area_from_vm_area_list,
* nvgpu_mapped_buf_from_buffer_list
*
* Input: None
*

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -38,7 +38,7 @@
* Test Type: Feature
*
* Targets: nvgpu_posix_bug, dump_stack,
* BUG, BUG_ON, nvgpu_assert
* bug_on_internal, nvgpu_assert
*
* Inputs: None
*

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -229,7 +229,7 @@ int test_kmem_virtual_alloc(struct unit_module *m,
*
* Test Type: Feature
*
* Targets: nvgpu_big_alloc_impl, nvgpu_big_free
* Targets: nvgpu_big_zalloc, nvgpu_big_alloc_impl, nvgpu_big_free
*
* Inputs:
* 1) GPU driver struct g.

View File

@@ -77,7 +77,7 @@ int test_priv_ring_free_reg_space(struct unit_module *m, struct gk20a *g, void *
*
* Test Type: Feature
*
* Targets: gops_priv_ring.enable_priv_ring, gm20b_enable_priv_ring
* Targets: gops_priv_ring.enable_priv_ring, gm20b_priv_ring_enable
*
* Input: test_priv_ring_setup() has been executed.
*
@@ -100,7 +100,7 @@ int test_enable_priv_ring(struct unit_module *m, struct gk20a *g, void *args);
*
* Test Type: Feature
*
* Targets: gops_priv_ring.enum_ltc, gm20b_priv_ring_enum_ltc.
* Targets: gops_priv_ring.enum_ltc, gm20b_priv_ring_enum_ltc
*
* Input: test_priv_ring_setup() has been executed.
*
@@ -203,7 +203,7 @@ int test_priv_ring_isr(struct unit_module *m, struct gk20a *g, void *args);
*
* Test Type: Feature, Error injection, Boundary Value
*
* Targets: gops_priv_ring.decode_error_code, gp10b_decode_error_code
* Targets: gops_priv_ring.decode_error_code, gp10b_priv_ring_decode_error_code
*
* Input: test_priv_ring_setup() has been executed.
* Equivalence classes:

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -113,9 +113,9 @@ int test_sync_create_destroy_sync(struct unit_module *m, struct gk20a *g, void *
*
* Test Type: Feature
*
* Targets: nvgpu_channel_user_syncpt_destroy,
* Targets: gops_sync_syncpt.free_buf, nvgpu_channel_user_syncpt_destroy,
* nvgpu_channel_user_syncpt_set_safe_state,
* nvgpu_channel_user_syncpt_create
* gops_sync_syncpt.alloc_buf, nvgpu_channel_user_syncpt_create
*
* Input: test_sync_init run for this GPU
*