mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 02:22:34 +03:00
nvgpu: unit: Add new mock register framework
Many tests used various incarnations of the mock register framework. This was based on a dump of gv11b registers. Tests that greatly benefitted from having generally sane register values all rely heavily on this framework. However, every test essentially did their own thing. This was not efficient and has caused a some issues in cleaning up the device and host code. Therefore introduce a much leaner and simplified register framework. All unit tests now automatically get a good subset of the gv11b registers auto-populated. As part of this also populate the HAL with a nvgpu_detect_chip() call. Many tests can now _probably_ have all their HAL init (except dummy HAL stuff) deleted. But this does require a few fixups here and there to set HALs to NULL where tests expect HALs to be NULL by default. Where necessary HALs are cleared with a memset to prevent unwanted code from executing. Overall, this imposes a far smaller burden on tests to initialize their environments. Something to consider for the future, though, is how to handle supporting multiple chips in the unit test world. JIRA NVGPU-5422 Change-Id: Icf1a63f728e9c5671ee0fdb726c235ffbd2843e2 Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2335334 Tested-by: mobile promotions <svcmobile_promotions@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
@@ -155,6 +155,7 @@ int test_init_mm(struct unit_module *m, struct gk20a *g, void *args)
|
||||
gv11b_fb_read_mmu_fault_buffer_size;
|
||||
g->ops.fb.init_hw = gv11b_fb_init_hw;
|
||||
g->ops.fb.intr.enable = gv11b_fb_intr_enable;
|
||||
g->ops.fb.fb_ecc_init = NULL;
|
||||
|
||||
err = nvgpu_init_mm_support(g);
|
||||
if (err != 0) {
|
||||
|
||||
@@ -237,7 +237,6 @@ int test_mm_dma_init(struct unit_module *m, struct gk20a *g, void *args)
|
||||
}
|
||||
|
||||
nvgpu_posix_register_io(g, &pramin_callbacks);
|
||||
nvgpu_posix_io_init_reg_space(g);
|
||||
|
||||
#ifdef CONFIG_NVGPU_DGPU
|
||||
/* Minimum HAL init for PRAMIN */
|
||||
|
||||
@@ -141,7 +141,6 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
|
||||
gv11b_mm_mmu_fault_info_mem_destroy;
|
||||
|
||||
nvgpu_posix_register_io(g, &mmu_faults_callbacks);
|
||||
nvgpu_posix_io_init_reg_space(g);
|
||||
|
||||
/* Register space: FB_MMU */
|
||||
if (nvgpu_posix_io_add_reg_space(g, flush_fb_flush_r(), 0x800) != 0) {
|
||||
|
||||
@@ -130,7 +130,6 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
|
||||
gv11b_mm_mmu_fault_info_mem_destroy;
|
||||
|
||||
nvgpu_posix_register_io(g, &mmu_faults_callbacks);
|
||||
nvgpu_posix_io_init_reg_space(g);
|
||||
|
||||
/* Register space: FB_MMU */
|
||||
if (nvgpu_posix_io_add_reg_space(g, flush_fb_flush_r(), 0x800) != 0) {
|
||||
|
||||
@@ -104,22 +104,23 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
|
||||
p->mm_is_iommuable = true;
|
||||
|
||||
/* Minimum HALs for page_table */
|
||||
memset(&g->ops.bus, 0, sizeof(g->ops.bus));
|
||||
memset(&g->ops.fb, 0, sizeof(g->ops.fb));
|
||||
g->ops.fb.init_hw = gv11b_fb_init_hw;
|
||||
g->ops.fb.intr.enable = gv11b_fb_intr_enable;
|
||||
g->ops.ramin.init_pdb = gp10b_ramin_init_pdb;
|
||||
g->ops.ramin.alloc_size = gk20a_ramin_alloc_size;
|
||||
g->ops.mm.gmmu.get_default_big_page_size =
|
||||
nvgpu_gmmu_default_big_page_size;
|
||||
g->ops.mm.init_inst_block = gv11b_mm_init_inst_block;
|
||||
g->ops.mm.gmmu.get_mmu_levels = gp10b_mm_get_mmu_levels;
|
||||
g->ops.ramin.init_pdb = gp10b_ramin_init_pdb;
|
||||
g->ops.ramin.alloc_size = gk20a_ramin_alloc_size;
|
||||
g->ops.mm.setup_hw = nvgpu_mm_setup_hw;
|
||||
g->ops.fb.init_hw = gv11b_fb_init_hw;
|
||||
g->ops.fb.intr.enable = gv11b_fb_intr_enable;
|
||||
g->ops.mm.cache.fb_flush = gk20a_mm_fb_flush;
|
||||
g->ops.mm.mmu_fault.info_mem_destroy =
|
||||
gv11b_mm_mmu_fault_info_mem_destroy;
|
||||
g->ops.mc.intr_stall_unit_config = mc_gp10b_intr_stall_unit_config;
|
||||
|
||||
nvgpu_posix_register_io(g, &mmu_faults_callbacks);
|
||||
nvgpu_posix_io_init_reg_space(g);
|
||||
|
||||
/* Register space: FB_MMU */
|
||||
if (nvgpu_posix_io_add_reg_space(g, fb_niso_intr_r(), 0x800) != 0) {
|
||||
|
||||
@@ -100,21 +100,22 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
|
||||
p->mm_is_iommuable = true;
|
||||
|
||||
/* Minimum HALs for page_table */
|
||||
memset(&g->ops.bus, 0, sizeof(g->ops.bus));
|
||||
memset(&g->ops.fb, 0, sizeof(g->ops.fb));
|
||||
g->ops.fb.init_hw = gv11b_fb_init_hw;
|
||||
g->ops.fb.intr.enable = gv11b_fb_intr_enable;
|
||||
g->ops.ramin.init_pdb = gp10b_ramin_init_pdb;
|
||||
g->ops.ramin.alloc_size = gk20a_ramin_alloc_size;
|
||||
g->ops.mc.intr_stall_unit_config = mc_gp10b_intr_stall_unit_config;
|
||||
g->ops.mm.gmmu.get_default_big_page_size =
|
||||
nvgpu_gmmu_default_big_page_size;
|
||||
g->ops.mm.gmmu.get_mmu_levels = gp10b_mm_get_mmu_levels;
|
||||
g->ops.ramin.init_pdb = gp10b_ramin_init_pdb;
|
||||
g->ops.ramin.alloc_size = gk20a_ramin_alloc_size;
|
||||
g->ops.mm.setup_hw = nvgpu_mm_setup_hw;
|
||||
g->ops.fb.init_hw = gv11b_fb_init_hw;
|
||||
g->ops.fb.intr.enable = gv11b_fb_intr_enable;
|
||||
g->ops.mm.cache.fb_flush = gk20a_mm_fb_flush;
|
||||
g->ops.mc.intr_stall_unit_config = mc_gp10b_intr_stall_unit_config;
|
||||
g->ops.mm.mmu_fault.info_mem_destroy =
|
||||
gv11b_mm_mmu_fault_info_mem_destroy;
|
||||
|
||||
nvgpu_posix_register_io(g, &mmu_faults_callbacks);
|
||||
nvgpu_posix_io_init_reg_space(g);
|
||||
|
||||
/* Register space: FB_MMU */
|
||||
if (nvgpu_posix_io_add_reg_space(g, fb_niso_intr_r(), 0x800) != 0) {
|
||||
|
||||
@@ -155,7 +155,6 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
|
||||
gv11b_mm_mmu_fault_parse_mmu_fault_info;
|
||||
|
||||
nvgpu_posix_register_io(g, &mmu_faults_callbacks);
|
||||
nvgpu_posix_io_init_reg_space(g);
|
||||
|
||||
/* Register space: FB_MMU */
|
||||
if (nvgpu_posix_io_add_reg_space(g, fb_mmu_ctrl_r(), 0x800) != 0) {
|
||||
|
||||
@@ -497,7 +497,6 @@ int test_mm_init_hal(struct unit_module *m, struct gk20a *g, void *args)
|
||||
g->ops.mm.setup_hw = int_empty_hal;
|
||||
|
||||
nvgpu_posix_register_io(g, &mmu_faults_callbacks);
|
||||
nvgpu_posix_io_init_reg_space(g);
|
||||
|
||||
/* Register space: FB_MMU */
|
||||
if (nvgpu_posix_io_add_reg_space(g, fb_niso_intr_r(), 0x800) != 0) {
|
||||
@@ -565,6 +564,7 @@ int test_mm_remove_mm_support(struct unit_module *m, struct gk20a *g,
|
||||
unit_return_fail(m, "nvgpu_pd_cache_init failed ??\n");
|
||||
}
|
||||
|
||||
g->ops.mm.mmu_fault.info_mem_destroy = NULL;
|
||||
g->mm.remove_support(&g->mm);
|
||||
|
||||
if (g->mm.pd_cache != NULL) {
|
||||
@@ -601,6 +601,8 @@ int test_mm_remove_mm_support(struct unit_module *m, struct gk20a *g,
|
||||
int test_mm_page_sizes(struct unit_module *m, struct gk20a *g,
|
||||
void *args)
|
||||
{
|
||||
g->ops.mm.gmmu.get_big_page_sizes = NULL;
|
||||
|
||||
if (nvgpu_mm_get_default_big_page_size(g) != SZ_64K) {
|
||||
unit_return_fail(m, "unexpected big page size (1)\n");
|
||||
}
|
||||
|
||||
@@ -109,7 +109,6 @@ static int init_vidmem_env(struct unit_module *m, struct gk20a *g)
|
||||
|
||||
nvgpu_init_pramin(&g->mm);
|
||||
nvgpu_posix_register_io(g, &pramin_callbacks);
|
||||
nvgpu_posix_io_init_reg_space(g);
|
||||
|
||||
/* Minimum HAL init for PRAMIN */
|
||||
g->ops.bus.set_bar0_window = gk20a_bus_set_bar0_window;
|
||||
@@ -328,6 +327,7 @@ int test_nvgpu_mem_iommu_translate(struct unit_module *m,
|
||||
* But, mm_is_iommuable = true.
|
||||
*/
|
||||
p->mm_is_iommuable = true;
|
||||
g->ops.mm.gmmu.get_iommu_bit = NULL;
|
||||
|
||||
temp_phys = nvgpu_mem_iommu_translate(g, test_sgl->phys);
|
||||
if (temp_phys != test_sgl->phys) {
|
||||
|
||||
@@ -127,22 +127,26 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
|
||||
mc_gp10b_intr_nonstall_unit_config;
|
||||
|
||||
/* Minimum HALs for page_table */
|
||||
g->ops.mm.gmmu.get_default_big_page_size =
|
||||
nvgpu_gmmu_default_big_page_size;
|
||||
g->ops.mm.gmmu.get_mmu_levels = gp10b_mm_get_mmu_levels;
|
||||
g->ops.mm.gmmu.get_max_page_table_levels = gp10b_get_max_page_table_levels;
|
||||
g->ops.mm.init_inst_block = gv11b_mm_init_inst_block;
|
||||
g->ops.mm.gmmu.map = nvgpu_gmmu_map_locked;
|
||||
g->ops.mm.gmmu.unmap = nvgpu_gmmu_unmap_locked;
|
||||
g->ops.mm.gmmu.get_iommu_bit = gp10b_mm_get_iommu_bit;
|
||||
g->ops.mm.gmmu.gpu_phys_addr = gv11b_gpu_phys_addr;
|
||||
memset(&g->ops.bus, 0, sizeof(g->ops.bus));
|
||||
memset(&g->ops.fb, 0, sizeof(g->ops.fb));
|
||||
#ifdef CONFIG_NVGPU_COMPRESSION
|
||||
g->ops.fb.compression_page_size = gp10b_fb_compression_page_size;
|
||||
#endif
|
||||
g->ops.fb.tlb_invalidate = gm20b_fb_tlb_invalidate;
|
||||
|
||||
g->ops.ramin.init_pdb = gp10b_ramin_init_pdb;
|
||||
g->ops.ramin.alloc_size = gk20a_ramin_alloc_size;
|
||||
|
||||
g->ops.mm.init_inst_block = gv11b_mm_init_inst_block;
|
||||
g->ops.mm.gmmu.get_default_big_page_size =
|
||||
nvgpu_gmmu_default_big_page_size;
|
||||
g->ops.mm.gmmu.get_mmu_levels = gp10b_mm_get_mmu_levels;
|
||||
g->ops.mm.gmmu.get_max_page_table_levels = gp10b_get_max_page_table_levels;
|
||||
g->ops.mm.gmmu.map = nvgpu_gmmu_map_locked;
|
||||
g->ops.mm.gmmu.unmap = nvgpu_gmmu_unmap_locked;
|
||||
g->ops.mm.gmmu.get_iommu_bit = gp10b_mm_get_iommu_bit;
|
||||
g->ops.mm.gmmu.gpu_phys_addr = gv11b_gpu_phys_addr;
|
||||
|
||||
/* New HALs for fault testing */
|
||||
g->ops.mc.is_mmu_fault_pending = gv11b_mc_is_mmu_fault_pending;
|
||||
g->ops.mm.mmu_fault.info_mem_destroy =
|
||||
@@ -178,7 +182,6 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
|
||||
g->mm.mmu_rd_mem.cpu_va = (void *) 0x30000000;
|
||||
|
||||
nvgpu_posix_register_io(g, &mmu_faults_callbacks);
|
||||
nvgpu_posix_io_init_reg_space(g);
|
||||
|
||||
/* Register space: FB_MMU */
|
||||
if (nvgpu_posix_io_add_reg_space(g, fb_niso_intr_r(), 0x800) != 0) {
|
||||
|
||||
Reference in New Issue
Block a user