diff --git a/drivers/gpu/nvgpu/libnvgpu-drv_safe.export b/drivers/gpu/nvgpu/libnvgpu-drv_safe.export index 7b707a41b..3276e0fe0 100644 --- a/drivers/gpu/nvgpu/libnvgpu-drv_safe.export +++ b/drivers/gpu/nvgpu/libnvgpu-drv_safe.export @@ -19,6 +19,7 @@ gk20a_fifo_intr_1_isr gk20a_fifo_intr_handle_chsw_error gk20a_fifo_intr_handle_runlist_event gk20a_fifo_pbdma_isr +gk20a_from_as gk20a_get_pde_pgsz gk20a_get_pte_pgsz gk20a_mm_fb_flush @@ -268,6 +269,7 @@ nvgpu_alloc_release_carveout nvgpu_alloc_reserve_carveout nvgpu_alloc_space nvgpu_allocator_init +nvgpu_aperture_is_sysmem nvgpu_aperture_mask nvgpu_bar1_readl nvgpu_bar1_writel @@ -352,6 +354,7 @@ nvgpu_dma_alloc_map nvgpu_dma_alloc_map_sys nvgpu_dma_alloc_sys nvgpu_dma_free +nvgpu_dma_free_sys nvgpu_dma_unmap_free nvgpu_ecc_counter_init_per_lts nvgpu_ecc_init_support @@ -517,7 +520,9 @@ nvgpu_init_hal nvgpu_init_ltc_support nvgpu_init_mm_support nvgpu_init_therm_support +nvgpu_insert_mapped_buf nvgpu_inst_block_addr +nvgpu_iommuable nvgpu_free_inst_block nvgpu_inst_block_ptr nvgpu_is_enabled @@ -551,6 +556,7 @@ nvgpu_memset nvgpu_mem_create_from_mem nvgpu_mem_create_from_phys nvgpu_mem_get_addr +nvgpu_mem_get_phys_addr nvgpu_mem_iommu_translate nvgpu_mem_is_sysmem nvgpu_mem_is_word_aligned @@ -642,6 +648,7 @@ nvgpu_posix_is_fault_injection_cntr_set nvgpu_posix_is_fault_injection_triggered nvgpu_posix_probe nvgpu_posix_register_io +nvgpu_pte_words nvgpu_queue_alloc nvgpu_queue_free nvgpu_queue_in @@ -743,6 +750,7 @@ nvgpu_usleep_range nvgpu_vfree_impl nvgpu_vm_alloc_va nvgpu_vm_area_alloc +nvgpu_vm_area_find nvgpu_vm_area_free nvgpu_vm_area_validate_buffer nvgpu_vm_bind_channel @@ -750,11 +758,15 @@ nvgpu_vm_do_init nvgpu_vm_find_mapped_buf nvgpu_vm_find_mapped_buf_less_than nvgpu_vm_find_mapped_buf_range +nvgpu_vm_find_mapping +nvgpu_vm_free_va +nvgpu_vm_get nvgpu_vm_get_buffers nvgpu_vm_init nvgpu_vm_map nvgpu_vm_mapping_batch_finish nvgpu_vm_mapping_batch_start +nvgpu_vm_pde_coverage_bit_count nvgpu_vm_put nvgpu_vm_put_buffers nvgpu_vm_unmap diff --git a/userspace/SWUTS.h b/userspace/SWUTS.h index 95884fb11..49d482fb1 100644 --- a/userspace/SWUTS.h +++ b/userspace/SWUTS.h @@ -85,6 +85,7 @@ * - @ref SWUTS-mm-as * - @ref SWUTS-mm-dma * - @ref SWUTS-mm-gmmu-page_table + * - @ref SWUTS-mm-gmmu-pd_cache * - @ref SWUTS-mm-hal-cache-flush-gk20a-fusa * - @ref SWUTS-mm-hal-cache-flush-gv11b-fusa * - @ref SWUTS-mm-hal-gmmu-gmmu_gk20a_fusa diff --git a/userspace/SWUTS.sources b/userspace/SWUTS.sources index dbc36f769..b18faec66 100644 --- a/userspace/SWUTS.sources +++ b/userspace/SWUTS.sources @@ -64,6 +64,7 @@ INPUT += ../../../userspace/units/mm/allocators/buddy_allocator/buddy_allocator. INPUT += ../../../userspace/units/mm/as/as.h INPUT += ../../../userspace/units/mm/dma/dma.h INPUT += ../../../userspace/units/mm/gmmu/page_table/page_table.h +INPUT += ../../../userspace/units/mm/gmmu/pd_cache/pd_cache.h INPUT += ../../../userspace/units/mm/hal/cache/flush_gk20a_fusa/flush-gk20a-fusa.h INPUT += ../../../userspace/units/mm/hal/cache/flush_gv11b_fusa/flush-gv11b-fusa.h INPUT += ../../../userspace/units/mm/hal/gmmu/gmmu_gk20a_fusa/gmmu-gk20a-fusa.h diff --git a/userspace/units/mm/as/as.c b/userspace/units/mm/as/as.c index 7285b5e46..b98b47471 100644 --- a/userspace/units/mm/as/as.c +++ b/userspace/units/mm/as/as.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -249,6 +249,32 @@ int test_as_alloc_share(struct unit_module *m, struct gk20a *g, void *args) return UNIT_SUCCESS; } +int test_gk20a_from_as(struct unit_module *m, struct gk20a *g, void *args) +{ + int ret = UNIT_FAIL; + struct gk20a_as_share *out; + int err; + + err = gk20a_as_alloc_share(g, SZ_64K, NVGPU_AS_ALLOC_USERSPACE_MANAGED, + &out); + if (err != 0) { + unit_return_fail(m, "gk20a_as_alloc_share failed err=%d\n", + err); + } + + if (g != gk20a_from_as(out->as)) { + unit_err(m, "ptr mismatch in gk20a_from_as\n"); + goto exit; + } + + ret = UNIT_SUCCESS; + +exit: + gk20a_as_release_share(out); + + return ret; +} + struct unit_module_test nvgpu_mm_as_tests[] = { UNIT_TEST(init, test_init_mm, NULL, 0), UNIT_TEST(as_alloc_share_64k_um_as_fail, test_as_alloc_share, @@ -270,7 +296,8 @@ struct unit_module_test nvgpu_mm_as_tests[] = { UNIT_TEST(as_alloc_share_uva, test_as_alloc_share, (void *) &test_64k_unified_va, 0), UNIT_TEST(as_alloc_share_uva_enabled, test_as_alloc_share, - (void *) &test_64k_unified_va_enabled, 0) + (void *) &test_64k_unified_va_enabled, 0), + UNIT_TEST(gk20a_from_as, test_gk20a_from_as, NULL, 0), }; UNIT_MODULE(mm.as, nvgpu_mm_as_tests, UNIT_PRIO_NVGPU_TEST); diff --git a/userspace/units/mm/as/as.h b/userspace/units/mm/as/as.h index 98815c136..a934012a9 100644 --- a/userspace/units/mm/as/as.h +++ b/userspace/units/mm/as/as.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -113,7 +113,8 @@ int test_init_mm(struct unit_module *m, struct gk20a *g, void *args); * * Test Type: Feature * - * Targets: gk20a_as_alloc_share, gk20a_as_release_share + * Targets: gk20a_as_alloc_share, gk20a_as_release_share, + * gk20a_vm_release_share, gk20a_from_as * * Input: * - The test_init_mm must have been executed @@ -147,4 +148,24 @@ int test_init_mm(struct unit_module *m, struct gk20a *g, void *args); */ int test_as_alloc_share(struct unit_module *m, struct gk20a *g, void *args); +/** + * Test specification for: test_gk20a_from_as + * + * Description: Simple test to check gk20a_from_as. + * + * Test Type: Feature + * + * Targets: gk20a_from_as + * + * Input: None + * + * Steps: + * - Call gk20a_from_as with an 'as' pointer and ensure it returns a + * pointer on g. + * + * Output: Returns PASS if the steps above were executed successfully. FAIL + * otherwise. + */ +int test_gk20a_from_as(struct unit_module *m, struct gk20a *g, void *args); + #endif /* UNIT_MM_AS_H */ diff --git a/userspace/units/mm/dma/dma.c b/userspace/units/mm/dma/dma.c index b62f722b6..3ecbc0e77 100644 --- a/userspace/units/mm/dma/dma.c +++ b/userspace/units/mm/dma/dma.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -152,6 +152,10 @@ static int init_mm(struct unit_module *m, struct gk20a *g) p->mm_is_iommuable = true; + if (!nvgpu_iommuable(g)) { + unit_return_fail(m, "Mismatch on nvgpu_iommuable\n"); + } + g->ops.mm.gmmu.get_default_big_page_size = gp10b_mm_get_default_big_page_size; g->ops.mm.gmmu.get_mmu_levels = gp10b_mm_get_mmu_levels; @@ -265,10 +269,6 @@ static struct nvgpu_mem *create_test_mem(void) return mem; } -/* - * Test to target nvgpu_dma_alloc_flags_* functions, testing several possible - * flags and SYSMEM/VIDMEM. - */ int test_mm_dma_alloc_flags(struct unit_module *m, struct gk20a *g, void *args) { int err; @@ -300,7 +300,7 @@ int test_mm_dma_alloc_flags(struct unit_module *m, struct gk20a *g, void *args) unit_err(m, "allocation not in SYSMEM\n"); goto end; } - nvgpu_dma_free(g, mem); + nvgpu_dma_free_sys(g, mem); /* Force allocation in VIDMEM and READ_ONLY */ #ifdef CONFIG_NVGPU_DGPU diff --git a/userspace/units/mm/dma/dma.h b/userspace/units/mm/dma/dma.h index 82de37173..ae74cbadc 100644 --- a/userspace/units/mm/dma/dma.h +++ b/userspace/units/mm/dma/dma.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -40,7 +40,7 @@ struct unit_module; * * Test Type: Feature, Other (setup) * - * Targets: nvgpu_vm_init + * Targets: nvgpu_vm_init, nvgpu_iommuable * * Input: None * @@ -68,7 +68,8 @@ int test_mm_dma_init(struct unit_module *m, struct gk20a *g, void *__args); * * Test Type: Feature * - * Targets: nvgpu_dma_alloc_flags_sys, nvgpu_dma_free, nvgpu_dma_alloc_flags_vid + * Targets: nvgpu_dma_alloc_flags_sys, nvgpu_dma_free, + * nvgpu_dma_alloc_flags_vid, nvgpu_dma_alloc, nvgpu_dma_alloc_sys * * Input: test_mm_dma_init * @@ -95,7 +96,8 @@ int test_mm_dma_alloc(struct unit_module *m, struct gk20a *g, void *args); * * Test Type: Feature * - * Targets: nvgpu_dma_alloc_flags_sys, nvgpu_dma_free, nvgpu_dma_alloc_flags_vid + * Targets: nvgpu_dma_alloc_flags_sys, nvgpu_dma_free, + * nvgpu_dma_alloc_flags_vid, nvgpu_dma_free_sys, nvgpu_dma_alloc_flags * * Input: test_mm_dma_init * @@ -124,7 +126,8 @@ int test_mm_dma_alloc_flags(struct unit_module *m, struct gk20a *g, void *args); * Test Type: Feature * * Targets: nvgpu_dma_alloc_map, nvgpu_dma_unmap_free, nvgpu_dma_alloc_map_sys, - * nvgpu_dma_alloc_map_vid + * nvgpu_dma_alloc_map_vid, nvgpu_dma_alloc_map_flags, + * nvgpu_dma_alloc_map_flags_sys * * Input: test_mm_dma_init * diff --git a/userspace/units/mm/gmmu/page_table/page_table.c b/userspace/units/mm/gmmu/page_table/page_table.c index ffe6fcddd..84f8e21e4 100644 --- a/userspace/units/mm/gmmu/page_table/page_table.c +++ b/userspace/units/mm/gmmu/page_table/page_table.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -49,13 +49,14 @@ #include -#define TEST_PA_ADDRESS 0xEFAD80000000 -#define TEST_GPU_VA 0x102040600000 -#define TEST_PA_ADDRESS_64K 0x1FAD80010000 -#define TEST_PA_ADDRESS_4K 0x2FAD80001000 -#define TEST_HOLE_SIZE 0x100000 -#define TEST_COMP_TAG 0xEF +#define TEST_PA_ADDRESS 0xEFAD80000000 +#define TEST_GPU_VA 0x102040600000 +#define TEST_PA_ADDRESS_64K 0x1FAD80010000 +#define TEST_PA_ADDRESS_4K 0x2FAD80001000 +#define TEST_HOLE_SIZE 0x100000 +#define TEST_COMP_TAG 0xEF #define TEST_INVALID_ADDRESS 0xAAC0000000 +#define TEST_PTE_SIZE 2U /* Size of the buffer to map. It must be a multiple of 4KB */ #define TEST_SIZE (1 * SZ_1M) @@ -366,13 +367,7 @@ static int init_mm(struct unit_module *m, struct gk20a *g) return UNIT_SUCCESS; } -/* - * Test: test_nvgpu_gmmu_init - * This test must be run once and be the first oneas it initializes the MM - * subsystem. - */ -static int test_nvgpu_gmmu_init(struct unit_module *m, - struct gk20a *g, void *args) +int test_nvgpu_gmmu_init(struct unit_module *m, struct gk20a *g, void *args) { int debug_level = verbose_lvl(m); @@ -396,12 +391,7 @@ static int test_nvgpu_gmmu_init(struct unit_module *m, return UNIT_SUCCESS; } -/* - * Test: test_nvgpu_gmmu_clean - * This test should be the last one to run as it de-initializes components. - */ -static int test_nvgpu_gmmu_clean(struct unit_module *m, - struct gk20a *g, void *args) +int test_nvgpu_gmmu_clean(struct unit_module *m, struct gk20a *g, void *args) { g->log_mask = 0; nvgpu_vm_put(g->mm.pmu.vm); @@ -455,7 +445,7 @@ int test_nvgpu_gmmu_map_unmap(struct unit_module *m, struct gk20a *g, void *args) { struct nvgpu_mem mem = { }; - u32 pte[2]; + u32 pte[TEST_PTE_SIZE]; int result; struct nvgpu_os_posix *p = nvgpu_os_posix_from_gk20a(g); struct test_parameters *params = (struct test_parameters *) args; @@ -624,12 +614,7 @@ int test_nvgpu_gmmu_map_unmap_map_fail(struct unit_module *m, struct gk20a *g, return UNIT_SUCCESS; } -/* - * Test: test_nvgpu_gmmu_init_page_table_fail - * Test special corner cases causing nvgpu_gmmu_init_page_table to fail - * Mostly to cover error handling and some branches. - */ -static int test_nvgpu_gmmu_init_page_table_fail(struct unit_module *m, +int test_nvgpu_gmmu_init_page_table_fail(struct unit_module *m, struct gk20a *g, void *args) { int err; @@ -648,16 +633,11 @@ static int test_nvgpu_gmmu_init_page_table_fail(struct unit_module *m, return UNIT_SUCCESS; } -/* - * Test: test_nvgpu_gmmu_set_pte - * This test targets the nvgpu_set_pte() function by mapping a buffer, and - * then trying to alter the validity bit of the corresponding PTE. - */ -static int test_nvgpu_gmmu_set_pte(struct unit_module *m, - struct gk20a *g, void *args) +int test_nvgpu_gmmu_set_pte(struct unit_module *m, struct gk20a *g, void *args) { struct nvgpu_mem mem = { }; - u32 pte[2]; + u32 pte[TEST_PTE_SIZE]; + u32 pte_size; int result; struct nvgpu_os_posix *p = nvgpu_os_posix_from_gk20a(g); struct test_parameters *params = (struct test_parameters *) args; @@ -673,6 +653,12 @@ static int test_nvgpu_gmmu_set_pte(struct unit_module *m, unit_return_fail(m, "Failed to map GMMU page"); } + pte_size = nvgpu_pte_words(g); + if (pte_size != TEST_PTE_SIZE) { + unit_return_fail(m, "PTE size unexpected: %d/%d\n", pte_size, + TEST_PTE_SIZE); + } + result = nvgpu_get_pte(g, g->mm.pmu.vm, mem.gpu_va, &pte[0]); if (result != 0) { unit_return_fail(m, "PTE lookup failed with code=%d\n", result); @@ -869,11 +855,6 @@ int test_nvgpu_gmmu_map_unmap_adv(struct unit_module *m, return UNIT_SUCCESS; } -/* - * Test: test_nvgpu_gmmu_map_unmap_batched - * This tests uses the batch mode and maps 2 buffers. Then it checks that - * the flags in the batch structure were set correctly. - */ int test_nvgpu_gmmu_map_unmap_batched(struct unit_module *m, struct gk20a *g, void *args) { @@ -933,7 +914,7 @@ int test_nvgpu_gmmu_map_unmap_batched(struct unit_module *m, static int check_pte_valid(struct unit_module *m, struct gk20a *g, struct vm_gk20a *vm, struct nvgpu_mem *mem) { - u32 pte[2]; + u32 pte[TEST_PTE_SIZE]; int result; result = nvgpu_get_pte(g, vm, mem->gpu_va, &pte[0]); @@ -953,7 +934,7 @@ static int check_pte_valid(struct unit_module *m, struct gk20a *g, static int check_pte_invalidated(struct unit_module *m, struct gk20a *g, struct vm_gk20a *vm, struct nvgpu_mem *mem) { - u32 pte[2]; + u32 pte[TEST_PTE_SIZE]; int result; result = nvgpu_get_pte(g, vm, mem->gpu_va, &pte[0]); @@ -1179,6 +1160,36 @@ int test_nvgpu_page_table_c2_full(struct unit_module *m, struct gk20a *g, return UNIT_SUCCESS; } +int test_nvgpu_gmmu_perm_str(struct unit_module *m, struct gk20a *g, void *args) +{ + int ret = UNIT_FAIL; + const char *str; + + str = nvgpu_gmmu_perm_str(gk20a_mem_flag_none); + if (strcmp(str, "RW") != 0) { + unit_return_fail(m, "nvgpu_gmmu_perm_str failed (1)\n"); + } + + str = nvgpu_gmmu_perm_str(gk20a_mem_flag_write_only); + if (strcmp(str, "WO") != 0) { + unit_return_fail(m, "nvgpu_gmmu_perm_str failed (2)\n"); + } + + str = nvgpu_gmmu_perm_str(gk20a_mem_flag_read_only); + if (strcmp(str, "RO") != 0) { + unit_return_fail(m, "nvgpu_gmmu_perm_str failed (3)\n"); + } + + str = nvgpu_gmmu_perm_str(0xFF); + if (strcmp(str, "??") != 0) { + unit_return_fail(m, "nvgpu_gmmu_perm_str failed (4)\n"); + } + + ret = UNIT_SUCCESS; + + + return ret; +} struct unit_module_test nvgpu_gmmu_tests[] = { UNIT_TEST(gmmu_init, test_nvgpu_gmmu_init, (void *) 1, 0), @@ -1285,6 +1296,7 @@ struct unit_module_test nvgpu_gmmu_tests[] = { req_fixed_address, test_nvgpu_page_table_c2_full, NULL, 0), + UNIT_TEST(gmmu_perm_str, test_nvgpu_gmmu_perm_str, NULL, 0), UNIT_TEST(gmmu_clean, test_nvgpu_gmmu_clean, NULL, 0), }; diff --git a/userspace/units/mm/gmmu/page_table/page_table.h b/userspace/units/mm/gmmu/page_table/page_table.h index d1db94771..ef288f632 100644 --- a/userspace/units/mm/gmmu/page_table/page_table.h +++ b/userspace/units/mm/gmmu/page_table/page_table.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -69,8 +69,8 @@ int test_nvgpu_gmmu_map_unmap_map_fail(struct unit_module *m, struct gk20a *g, * * Test Type: Feature * - * Targets: nvgpu_gmmu_map_fixed, nvgpu_gmmu_map, nvgpu_get_pte, - * nvgpu_gmmu_unmap + * Targets: nvgpu_gmmu_map_fixed, gops_mm.gops_mm_gmmu.map, nvgpu_gmmu_map, + * nvgpu_get_pte, gops_mm.gops_mm_gmmu.unmap, nvgpu_gmmu_unmap * * Input: args as a struct test_parameters to hold scenario and test parameters. * @@ -103,7 +103,8 @@ int test_nvgpu_gmmu_map_unmap(struct unit_module *m, struct gk20a *g, * * Test Type: Feature * - * Targets: nvgpu_gmmu_map_locked, nvgpu_gmmu_unmap + * Targets: gops_mm.gops_mm_gmmu.map, nvgpu_gmmu_map_locked, + * gops_mm.gops_mm_gmmu.unmap, nvgpu_gmmu_unmap, gk20a_from_vm * * Input: args as a struct test_parameters to hold scenario and test parameters. * @@ -128,7 +129,8 @@ int test_nvgpu_gmmu_map_unmap_adv(struct unit_module *m, struct gk20a *g, * * Test Type: Feature * - * Targets: nvgpu_gmmu_map_locked, nvgpu_gmmu_unmap + * Targets: nvgpu_gmmu_map_locked, nvgpu_gmmu_unmap, gops_mm.gops_mm_gmmu.unmap, + * nvgpu_gmmu_unmap_locked * * Input: args as a struct test_parameters to hold scenario and test parameters. * @@ -157,8 +159,9 @@ int test_nvgpu_gmmu_map_unmap_batched(struct unit_module *m, struct gk20a *g, * * Test Type: Feature * - * Targets: nvgpu_vm_init, nvgpu_gmmu_map, nvgpu_gmmu_map_locked, - * nvgpu_gmmu_unmap, nvgpu_vm_put + * Targets: nvgpu_vm_init, nvgpu_gmmu_map, gops_mm.gops_mm_gmmu.map, + * nvgpu_gmmu_map_locked, gops_mm.gops_mm_gmmu.unmap, nvgpu_gmmu_unmap, + * nvgpu_vm_put * * Input: None * @@ -189,8 +192,8 @@ int test_nvgpu_page_table_c1_full(struct unit_module *m, struct gk20a *g, * * Test Type: Feature * - * Targets: nvgpu_vm_init, nvgpu_gmmu_map_fixed, nvgpu_gmmu_unmap, - * nvgpu_vm_put + * Targets: nvgpu_vm_init, gops_mm.gops_mm_gmmu.map, nvgpu_gmmu_map_fixed, + * gops_mm.gops_mm_gmmu.unmap, nvgpu_gmmu_unmap, nvgpu_vm_put * * Input: None * @@ -211,5 +214,132 @@ int test_nvgpu_page_table_c1_full(struct unit_module *m, struct gk20a *g, int test_nvgpu_page_table_c2_full(struct unit_module *m, struct gk20a *g, void *args); +/** + * Test specification for: test_nvgpu_gmmu_init_page_table_fail + * + * Description: Test special corner cases causing nvgpu_gmmu_init_page_table + * to fail, mostly to cover error handling and some branches. + * + * Test Type: Error injection + * + * Targets: nvgpu_gmmu_init_page_table + * + * Input: None + * + * Steps: + * - Enable KMEM fault injection. + * - Call nvgpu_gmmu_init_page_table. + * - Disable KMEM fault injection. + * - Ensure that nvgpu_gmmu_init_page_table failed as expected. + * + * Output: Returns PASS if the steps above were executed successfully. FAIL + * otherwise. + */ +int test_nvgpu_gmmu_init_page_table_fail(struct unit_module *m, + struct gk20a *g, void *args); + +/** + * Test specification for: test_nvgpu_gmmu_set_pte + * + * Description: This test targets the nvgpu_set_pte() function by mapping a + * buffer, and then trying to alter the validity bit of the corresponding PTE. + * + * Test Type: Feature, Error injection + * + * Targets: nvgpu_get_pte, nvgpu_set_pte, nvgpu_pte_words + * + * Input: None + * + * Steps: + * - Map a test buffer (dynamic) and get the assigned GPU VA. + * - Ensure the mapping succeeded. + * - Check that nvgpu_pte_words returns the expected value (2). + * - Use nvgpu_get_pte to retrieve the PTE from the assigned GPU VA, ensure + * it is valid. + * - Call nvgpu_set_pte with an invalid address and ensure it failed. + * - Using nvgpu_set_pte, rewrite the PTE with the validity bit flipped and + * ensure it reports success. + * - Retrieve the PTE again, ensure it succeeds and then check that the PTE + * is invalid. + * + * Output: Returns PASS if the steps above were executed successfully. FAIL + * otherwise. + */ +static int test_nvgpu_gmmu_set_pte(struct unit_module *m, + struct gk20a *g, void *args); + +/** + * Test specification for: test_nvgpu_gmmu_init + * + * Description: This test must be run once and be the first one as it + * initializes the MM subsystem. + * + * Test Type: Other (setup), Feature + * + * Targets: nvgpu_gmmu_init_page_table, nvgpu_vm_init + * + * Input: None + * + * Steps: + * - Set debug log masks if needed. + * - For iGPU, enable the following flags: NVGPU_MM_UNIFIED_MEMORY, + * NVGPU_USE_COHERENT_SYSMEM, NVGPU_SUPPORT_NVLINK + * - Setup all the needed HALs. + * - Create a test PMU VM to be used by other tests which will cause the + * nvgpu_gmmu_init_page_table function to be called. + * + * Output: Returns PASS if the steps above were executed successfully. FAIL + * otherwise. + */ +int test_nvgpu_gmmu_init(struct unit_module *m, struct gk20a *g, void *args); + +/** + * Test specification for: test_nvgpu_gmmu_clean + * + * Description: This test should be the last one to run as it de-initializes + * components. + * + * Test Type: Other (cleanup) + * + * Targets: None + * + * Input: None + * + * Steps: + * - Set log mask to 0. + * - Call nvgpu_vm_put to remove the test VM. + * + * Output: Returns PASS if the steps above were executed successfully. FAIL + * otherwise. + */ +int test_nvgpu_gmmu_clean(struct unit_module *m, struct gk20a *g, void *args); + +/** + * Test specification for: test_nvgpu_gmmu_perm_str + * + * Description: Tests all supported combinations of permissions on the + * nvgpu_gmmu_perm_str function. + * + * Test Type: Feature + * + * Targets: nvgpu_gmmu_perm_str + * + * Input: None + * + * Steps: + * - Call nvgpu_gmmu_perm_str with flag gk20a_mem_flag_none and ensure it + * returns "RW" + * - Call nvgpu_gmmu_perm_str with flag gk20a_mem_flag_write_only and ensure it + * returns "WO" + * - Call nvgpu_gmmu_perm_str with flag gk20a_mem_flag_read_only and ensure it + * returns "RO" + * - Call nvgpu_gmmu_perm_str with an invalid flag and ensure it + * returns "??" + * + * Output: Returns PASS if the steps above were executed successfully. FAIL + * otherwise. + */ +int test_nvgpu_gmmu_perm_str(struct unit_module *m, struct gk20a *g, + void *args); /** }@ */ #endif /* UNIT_PAGE_TABLE_H */ diff --git a/userspace/units/mm/gmmu/pd_cache/pd_cache.h b/userspace/units/mm/gmmu/pd_cache/pd_cache.h index 49cd0d952..f1958dbdf 100644 --- a/userspace/units/mm/gmmu/pd_cache/pd_cache.h +++ b/userspace/units/mm/gmmu/pd_cache/pd_cache.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -39,7 +39,7 @@ struct unit_module; * * Test Type: Feature, Error Injection * - * Targets: nvgpu_pd_cache_init + * Targets: gops_mm.pd_cache_init, nvgpu_pd_cache_init * * Input: None * @@ -112,8 +112,8 @@ int test_pd_cache_valid_alloc(struct unit_module *m, struct gk20a *g, * * Test Type: Feature * - * Targets: nvgpu_pd_cache_init, nvgpu_pd_alloc, nvgpu_pd_free, - * nvgpu_pd_cache_fini + * Targets: gops_mm.pd_cache_init, nvgpu_pd_cache_init, nvgpu_pd_alloc, + * nvgpu_pd_free, nvgpu_pd_cache_fini * * Input: None * @@ -141,8 +141,8 @@ int test_per_pd_size(struct unit_module *m, struct gk20a *g, void *__args); * * Test Type: Feature * - * Targets: gp10b_mm_get_mmu_levels, nvgpu_pd_cache_init, nvgpu_pd_alloc, - * nvgpu_pd_offset_from_index, nvgpu_pd_write, nvgpu_pd_free, + * Targets: gp10b_mm_get_mmu_levels, gops_mm.pd_cache_init, nvgpu_pd_cache_init, + * nvgpu_pd_alloc, nvgpu_pd_offset_from_index, nvgpu_pd_write, nvgpu_pd_free, * nvgpu_pd_cache_fini * * Input: None @@ -171,8 +171,8 @@ int test_pd_write(struct unit_module *m, struct gk20a *g, void *__args); * * Test Type: Feature * - * Targets: nvgpu_pd_cache_init, nvgpu_pd_alloc, nvgpu_pd_gpu_addr, - * nvgpu_pd_free, nvgpu_pd_cache_fini + * Targets: gops_mm.pd_cache_init, nvgpu_pd_cache_init, nvgpu_pd_alloc, + * nvgpu_pd_gpu_addr, nvgpu_pd_free, nvgpu_pd_cache_fini * * Input: None * @@ -222,8 +222,8 @@ int test_offset_computation(struct unit_module *m, struct gk20a *g, * * Test Type: Feature, Error injection * - * Targets: nvgpu_pd_cache_init, nvgpu_pd_alloc, nvgpu_pd_cache_fini, - * nvgpu_pd_free + * Targets: gops_mm.pd_cache_init, nvgpu_pd_cache_init, nvgpu_pd_alloc, + * nvgpu_pd_cache_fini, nvgpu_pd_free * * Input: None * @@ -258,8 +258,8 @@ int test_init_deinit(struct unit_module *m, struct gk20a *g, void *__args); * * Test Type: Feature * - * Targets: nvgpu_pd_cache_init, nvgpu_pd_alloc, nvgpu_pd_cache_fini, - * nvgpu_pd_free + * Targets: gops_mm.pd_cache_init, nvgpu_pd_cache_init, nvgpu_pd_alloc, + * nvgpu_pd_cache_fini, nvgpu_pd_free * * Input: None * @@ -291,8 +291,8 @@ int test_pd_cache_alloc_gen(struct unit_module *m, struct gk20a *g, * * Test Type: Feature, Error injection * - * Targets: nvgpu_pd_cache_init, nvgpu_pd_alloc, nvgpu_pd_cache_fini, - * nvgpu_pd_free + * Targets: gops_mm.pd_cache_init, nvgpu_pd_cache_init, nvgpu_pd_alloc, + * nvgpu_pd_cache_fini, nvgpu_pd_free * * Input: None * @@ -323,7 +323,8 @@ int test_pd_free_empty_pd(struct unit_module *m, struct gk20a *g, * * Test Type: Error injection * - * Targets: nvgpu_pd_cache_init, nvgpu_pd_alloc, nvgpu_pd_cache_fini + * Targets: gops_mm.pd_cache_init, nvgpu_pd_cache_init, nvgpu_pd_alloc, + * nvgpu_pd_cache_fini * * Input: None * @@ -349,7 +350,8 @@ int test_pd_alloc_invalid_input(struct unit_module *m, struct gk20a *g, * * Test Type: Error injection * - * Targets: nvgpu_pd_cache_init, nvgpu_pd_alloc, nvgpu_pd_cache_fini + * Targets: nvgpu_pd_cache_init, nvgpu_pd_alloc, gops_mm.pd_cache_init, + * nvgpu_pd_cache_fini * * Input: None * @@ -376,7 +378,8 @@ int test_pd_alloc_direct_fi(struct unit_module *m, struct gk20a *g, void *args); * * Test Type: Error injection * - * Targets: nvgpu_pd_cache_init, nvgpu_pd_alloc, nvgpu_pd_cache_fini + * Targets: gops_mm.pd_cache_init, nvgpu_pd_cache_init, nvgpu_pd_alloc, + * nvgpu_pd_cache_fini * * Input: None * diff --git a/userspace/units/mm/hal/cache/flush_gk20a_fusa/flush-gk20a-fusa.h b/userspace/units/mm/hal/cache/flush_gk20a_fusa/flush-gk20a-fusa.h index 210718316..582d4ea89 100644 --- a/userspace/units/mm/hal/cache/flush_gk20a_fusa/flush-gk20a-fusa.h +++ b/userspace/units/mm/hal/cache/flush_gk20a_fusa/flush-gk20a-fusa.h @@ -59,7 +59,8 @@ int test_env_init_flush_gk20a_fusa(struct unit_module *m, struct gk20a *g, * * Test Type: Feature * - * Targets: gops_mm.gops_mm_cache.fb_flush, gk20a_mm_fb_flush + * Targets: gops_mm.gops_mm_cache.fb_flush, gk20a_mm_fb_flush, + * gops_mm.get_flush_retries * * Input: test_env_init, args (value can be F_GK20A_FB_FLUSH_DEFAULT_INPUT, * F_GK20A_FB_FLUSH_GET_RETRIES, F_GK20A_FB_FLUSH_PENDING_TRUE, diff --git a/userspace/units/mm/mm/mm.c b/userspace/units/mm/mm/mm.c index d0366e5c3..3184e1679 100644 --- a/userspace/units/mm/mm/mm.c +++ b/userspace/units/mm/mm/mm.c @@ -687,6 +687,25 @@ cleanup: return result; } +int test_gk20a_from_mm(struct unit_module *m, struct gk20a *g, void *args) +{ + if (g != gk20a_from_mm(&(g->mm))) { + unit_return_fail(m, "ptr mismatch in gk20a_from_mm\n"); + } + + return UNIT_SUCCESS; +} + +int test_bar1_aperture_size_mb_gk20a(struct unit_module *m, struct gk20a *g, + void *args) +{ + if (g->mm.bar1.aperture_size != (bar1_aperture_size_mb_gk20a() << 20)) { + unit_return_fail(m, "mismatch in bar1_aperture_size\n"); + } + + return UNIT_SUCCESS; +} + struct unit_module_test nvgpu_mm_mm_tests[] = { UNIT_TEST(init_hal, test_mm_init_hal, NULL, 0), UNIT_TEST(init_mm, test_nvgpu_init_mm, NULL, 0), @@ -696,6 +715,9 @@ struct unit_module_test nvgpu_mm_mm_tests[] = { UNIT_TEST(page_sizes, test_mm_page_sizes, NULL, 0), UNIT_TEST(inst_block, test_mm_inst_block, NULL, 0), UNIT_TEST(alloc_inst_block, test_mm_alloc_inst_block, NULL, 0), + UNIT_TEST(gk20a_from_mm, test_gk20a_from_mm, NULL, 0), + UNIT_TEST(bar1_aperture_size, test_bar1_aperture_size_mb_gk20a, NULL, + 0), }; UNIT_MODULE(mm.mm, nvgpu_mm_mm_tests, UNIT_PRIO_NVGPU_TEST); diff --git a/userspace/units/mm/mm/mm.h b/userspace/units/mm/mm/mm.h index 369715c2c..0eba4ea60 100644 --- a/userspace/units/mm/mm/mm.h +++ b/userspace/units/mm/mm/mm.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -70,7 +70,7 @@ int test_mm_init_hal(struct unit_module *m, struct gk20a *g, void *args); * * Test Type: Feature, Error guessing * - * Targets: nvgpu_init_mm_support + * Targets: gops_mm.init_mm_support, nvgpu_init_mm_support * * Input: test_mm_init_hal must have been executed successfully. * @@ -97,7 +97,7 @@ int test_nvgpu_init_mm(struct unit_module *m, struct gk20a *g, void *args); * * Test Type: Feature, Error guessing * - * Targets: nvgpu_mm_setup_hw + * Targets: gops_mm.setup_hw, nvgpu_mm_setup_hw * * Input: test_mm_init_hal and test_nvgpu_init_mm must have been executed * successfully. @@ -123,7 +123,7 @@ int test_nvgpu_mm_setup_hw(struct unit_module *m, struct gk20a *g, void *args); * * Test Type: Feature * - * Targets: nvgpu_set_power_state, nvgpu_mm_suspend + * Targets: nvgpu_set_power_state, gops_mm.mm_suspend, nvgpu_mm_suspend * * Input: test_mm_init_hal, test_nvgpu_init_mm and test_nvgpu_mm_setup_hw must * have been executed successfully. @@ -152,7 +152,7 @@ int test_mm_suspend(struct unit_module *m, struct gk20a *g, void *args); * * Test Type: Feature * - * Targets: nvgpu_pd_cache_init, gops_mm.remove_support + * Targets: gops_mm.pd_cache_init, nvgpu_pd_cache_init, gops_mm.remove_support * * Input: test_mm_init_hal, test_nvgpu_init_mm and test_nvgpu_mm_setup_hw must * have been executed successfully @@ -255,4 +255,44 @@ int test_mm_inst_block(struct unit_module *m, struct gk20a *g, void *args); int test_mm_alloc_inst_block(struct unit_module *m, struct gk20a *g, void *args); +/** + * Test specification for: test_gk20a_from_mm + * + * Description: Simple test to check gk20a_from_mm. + * + * Test Type: Feature + * + * Targets: gk20a_from_mm + * + * Input: None + * + * Steps: + * - Call gk20a_from_mm with the g->mm pointer and ensure it returns a + * pointer on g. + * + * Output: Returns PASS if the steps above were executed successfully. FAIL + * otherwise. + */ +int test_gk20a_from_mm(struct unit_module *m, struct gk20a *g, void *args); + +/** + * Test specification for: test_bar1_aperture_size_mb_gk20a + * + * Description: Simple test to check bar1_aperture_size_mb_gk20a. + * + * Test Type: Feature + * + * Targets: bar1_aperture_size_mb_gk20a + * + * Input: None + * + * Steps: + * - Ensure that g->mm.bar1.aperture_size matches the expected value from + * bar1_aperture_size_mb_gk20a + * + * Output: Returns PASS if the steps above were executed successfully. FAIL + * otherwise. + */ +int test_bar1_aperture_size_mb_gk20a(struct unit_module *m, struct gk20a *g, + void *args); #endif /* UNIT_MM_MM_H */ diff --git a/userspace/units/mm/nvgpu_mem/nvgpu_mem.c b/userspace/units/mm/nvgpu_mem/nvgpu_mem.c index c1dad6d93..428eead19 100644 --- a/userspace/units/mm/nvgpu_mem/nvgpu_mem.c +++ b/userspace/units/mm/nvgpu_mem/nvgpu_mem.c @@ -195,6 +195,9 @@ int test_nvgpu_aperture_mask(struct unit_module *m, /* Case: APERTURE_SYSMEM */ test_mem->aperture = APERTURE_SYSMEM; + if (!nvgpu_aperture_is_sysmem(test_mem->aperture)) { + unit_return_fail(m, "Invalid aperture enum\n"); + } ret_ap_mask = nvgpu_aperture_mask(g, test_mem, sysmem_mask, sysmem_coh_mask, vidmem_mask); if (ret_ap_mask != sysmem_mask) { @@ -652,6 +655,14 @@ int test_nvgpu_mem_create_from_phys(struct unit_module *m, unit_return_fail(m, "nvgpu_mem_create_from_phys init failed\n"); } + if (nvgpu_mem_get_phys_addr(g, test_mem) != ((u64) test_mem->cpu_va)) { + unit_return_fail(m, "invalid physical address\n"); + } + + if (nvgpu_mem_get_addr(g, test_mem) != ((u64) test_mem->cpu_va)) { + unit_return_fail(m, "invalid nvgpu_mem_get_addr address\n"); + } + /* Allocate cpu_va for later tests */ test_mem->cpu_va = nvgpu_kzalloc(g, MEM_SIZE); if (test_mem->cpu_va == NULL) { diff --git a/userspace/units/mm/nvgpu_mem/nvgpu_mem.h b/userspace/units/mm/nvgpu_mem/nvgpu_mem.h index f9817e619..adcfbedf9 100644 --- a/userspace/units/mm/nvgpu_mem/nvgpu_mem.h +++ b/userspace/units/mm/nvgpu_mem/nvgpu_mem.h @@ -41,6 +41,9 @@ struct unit_module; * * Targets: nvgpu_mem_create_from_phys * + * Targets: nvgpu_mem_create_from_phys, nvgpu_mem_get_phys_addr, + * nvgpu_mem_get_addr + * * Input: None * * Steps: @@ -163,7 +166,8 @@ int test_nvgpu_mem_iommu_translate(struct unit_module *m, * * Test Type: Feature * - * Targets: nvgpu_aperture_mask, nvgpu_aperture_mask_raw + * Targets: nvgpu_aperture_mask, nvgpu_aperture_mask_raw, + * nvgpu_aperture_is_sysmem * * Input: test_nvgpu_mem_create_from_phys * diff --git a/userspace/units/mm/vm/vm.c b/userspace/units/mm/vm/vm.c index 4bac88248..6203c2ad1 100644 --- a/userspace/units/mm/vm/vm.c +++ b/userspace/units/mm/vm/vm.c @@ -62,6 +62,9 @@ #define SPECIAL_CASE_NO_VM_AREA 4 #define SPECIAL_CASE_TIMEOUT_INIT_FAIL 8 +/* Expected bit count from nvgpu_vm_pde_coverage_bit_count() */ +#define GP10B_PDE_BIT_COUNT 21U + /* * Helper function used to create custom SGTs from a list of SGLs. * The created SGT needs to be explicitly free'd. @@ -184,6 +187,7 @@ static int init_test_env(struct unit_module *m, struct gk20a *g) g->ops.mm.cache.fb_flush = gk20a_mm_fb_flush; g->ops.mm.init_inst_block = hal_mm_init_inst_block; g->ops.mm.vm_as_free_share = hal_vm_as_free_share; + g->ops.mm.vm_bind_channel = nvgpu_vm_bind_channel; if (nvgpu_pd_cache_init(g) != 0) { unit_return_fail(m, "PD cache init failed.\n"); @@ -273,6 +277,9 @@ int test_nvgpu_vm_alloc_va(struct unit_module *m, struct gk20a *g, goto exit; } + /* And now free it */ + nvgpu_vm_free_va(vm, addr, 0); + ret = UNIT_SUCCESS; exit: if (vm != NULL) { @@ -571,6 +578,11 @@ static int map_buffer(struct unit_module *m, ret = UNIT_FAIL; goto free_vm_area; } + if (nvgpu_vm_area_find(vm, gpu_va) == NULL) { + unit_err(m, "VM area not found\n"); + ret = UNIT_FAIL; + goto free_vm_area; + } } } @@ -657,6 +669,17 @@ static int map_buffer(struct unit_module *m, goto free_mapped_buf; } + /* Check if we can find the mapped buffer via nvgpu_vm_find_mapping */ + if (fixed_gpu_va) { + mapped_buf_check = nvgpu_vm_find_mapping(vm, &os_buf, gpu_va, + flags, compr_kind); + if (mapped_buf_check == NULL) { + unit_err(m, "Can't find buf nvgpu_vm_find_mapping\n"); + ret = UNIT_FAIL; + goto free_mapped_buf; + } + } + /* * For code coverage, ensure that an invalid address does not return * a buffer. @@ -798,7 +821,7 @@ int test_vm_bind(struct unit_module *m, struct gk20a *g, void *__args) vm = create_test_vm(m, g); - nvgpu_vm_bind_channel(vm, channel); + g->ops.mm.vm_bind_channel(vm, channel); if (channel->vm != vm) { ret = UNIT_FAIL; @@ -1070,6 +1093,25 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args) goto exit; } + /* Ref count */ + if (vm->ref.refcount.v != 1U) { + unit_err(m, "Invalid ref count. (1)\n"); + ret = UNIT_FAIL; + goto exit; + } + nvgpu_vm_get(vm); + if (vm->ref.refcount.v != 2U) { + unit_err(m, "Invalid ref count. (2)\n"); + ret = UNIT_FAIL; + goto exit; + } + nvgpu_vm_put(vm); + if (vm->ref.refcount.v != 1U) { + unit_err(m, "Invalid ref count. (3)\n"); + ret = UNIT_FAIL; + goto exit; + } + ret = UNIT_SUCCESS; exit: @@ -1892,6 +1934,108 @@ exit: return ret; } +int test_gk20a_from_vm(struct unit_module *m, struct gk20a *g, void *args) +{ + struct vm_gk20a *vm = create_test_vm(m, g); + int ret = UNIT_FAIL; + + if (g != gk20a_from_vm(vm)) { + unit_err(m, "ptr mismatch in gk20a_from_vm\n"); + goto exit; + } + + ret = UNIT_SUCCESS; + +exit: + nvgpu_vm_put(vm); + + return ret; +} + +static bool is_overlapping_mapping(struct nvgpu_rbtree_node *root, u64 addr, + u64 size) +{ + struct nvgpu_rbtree_node *node = NULL; + struct nvgpu_mapped_buf *buffer; + + nvgpu_rbtree_search(addr, &node, root); + if (!node) + return false; + + buffer = mapped_buffer_from_rbtree_node(node); + if (addr + size > buffer->addr) + return true; + + return false; +} + + +int test_nvgpu_insert_mapped_buf(struct unit_module *m, struct gk20a *g, + void *args) +{ + int ret = UNIT_FAIL; + struct vm_gk20a *vm = create_test_vm(m, g); + struct nvgpu_mapped_buf *mapped_buffer = NULL; + u64 map_addr = BUF_CPU_PA; + u64 size = SZ_64K; + + if (is_overlapping_mapping(vm->mapped_buffers, map_addr, size)) { + unit_err(m, "addr already mapped"); + ret = UNIT_FAIL; + goto done; + } + + mapped_buffer = malloc(sizeof(*mapped_buffer)); + if (!mapped_buffer) { + ret = UNIT_FAIL; + goto done; + } + + mapped_buffer->addr = map_addr; + mapped_buffer->size = size; + mapped_buffer->pgsz_idx = GMMU_PAGE_SIZE_BIG; + mapped_buffer->vm = vm; + nvgpu_init_list_node(&mapped_buffer->buffer_list); + nvgpu_ref_init(&mapped_buffer->ref); + + nvgpu_insert_mapped_buf(vm, mapped_buffer); + + if (!is_overlapping_mapping(vm->mapped_buffers, map_addr, size)) { + unit_err(m, "addr NOT already mapped"); + ret = UNIT_FAIL; + goto done; + } + + ret = UNIT_SUCCESS; + +done: + nvgpu_vm_free_va(vm, map_addr, 0); + + return ret; +} + +int test_vm_pde_coverage_bit_count(struct unit_module *m, struct gk20a *g, + void *args) +{ + u32 bit_count; + int ret = UNIT_FAIL; + struct vm_gk20a *vm = create_test_vm(m, g); + + bit_count = nvgpu_vm_pde_coverage_bit_count(vm); + + if (bit_count != GP10B_PDE_BIT_COUNT) { + unit_err(m, "invalid PDE bit count\n"); + goto done; + } + + ret = UNIT_SUCCESS; + +done: + nvgpu_vm_put(vm); + + return ret; +} + struct unit_module_test vm_tests[] = { /* * Requirement verification tests @@ -1924,6 +2068,11 @@ struct unit_module_test vm_tests[] = { test_batch, NULL, 0), + UNIT_TEST(gk20a_from_vm, test_gk20a_from_vm, NULL, 0), + UNIT_TEST(nvgpu_insert_mapped_buf, test_nvgpu_insert_mapped_buf, NULL, + 0), + UNIT_TEST(vm_pde_coverage_bit_count, test_vm_pde_coverage_bit_count, + NULL, 0), }; UNIT_MODULE(vm, vm_tests, UNIT_PRIO_NVGPU_TEST); diff --git a/userspace/units/mm/vm/vm.h b/userspace/units/mm/vm/vm.h index 31876a70e..a2f04255d 100644 --- a/userspace/units/mm/vm/vm.h +++ b/userspace/units/mm/vm/vm.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -43,7 +43,9 @@ struct unit_module; * Targets: nvgpu_vm_init, nvgpu_vm_get_buffers, nvgpu_big_pages_possible, * nvgpu_vm_area_alloc, nvgpu_vm_map, nvgpu_vm_find_mapped_buf_range, * nvgpu_vm_find_mapped_buf_less_than, nvgpu_get_pte, nvgpu_vm_put_buffers, - * nvgpu_vm_unmap, nvgpu_vm_area_free, nvgpu_vm_put + * nvgpu_vm_unmap, nvgpu_vm_area_free, nvgpu_vm_put, nvgpu_vm_find_mapped_buf, + * nvgpu_vm_area_find, nvgpu_vm_unmap_ref_internal, nvgpu_vm_unmap_system, + * nvgpu_os_buf_get_size * * Input: None * @@ -91,7 +93,7 @@ int test_map_buf(struct unit_module *m, struct gk20a *g, void *__args); * nvgpu_vm_area_alloc, nvgpu_vm_map, nvgpu_vm_find_mapped_buf_range, * nvgpu_vm_find_mapped_buf_less_than, nvgpu_get_pte, nvgpu_vm_put_buffers, * nvgpu_vm_unmap, nvgpu_vm_area_free, nvgpu_vm_put, - * nvgpu_gmmu_va_small_page_limit + * nvgpu_gmmu_va_small_page_limit, nvgpu_vm_find_mapping * * Input: None * @@ -138,7 +140,7 @@ int test_map_buf_gpu_va(struct unit_module *m, struct gk20a *g, void *__args); * nvgpu_vm_map, nvgpu_vm_find_mapped_buf_range, * nvgpu_vm_find_mapped_buf_less_than, nvgpu_get_pte, nvgpu_vm_put_buffers, * nvgpu_vm_unmap, nvgpu_vm_area_free, nvgpu_vm_put, - * nvgpu_vm_mapping_batch_finish + * nvgpu_vm_mapping_batch_finish, nvgpu_vm_mapping_batch_finish_locked * * Input: None * @@ -161,11 +163,11 @@ int test_batch(struct unit_module *m, struct gk20a *g, void *__args); * Test specification for: test_init_error_paths * * Description: This test exercises the VM unit initialization code and covers - * a number of error paths. + * a number of error paths as well as reference counting mechanisms. * * Test Type: Feature, Error injection * - * Targets: nvgpu_vm_init, nvgpu_vm_do_init + * Targets: nvgpu_vm_init, nvgpu_vm_do_init, nvgpu_vm_get, nvgpu_vm_put * * Input: None * @@ -196,6 +198,9 @@ int test_batch(struct unit_module *m, struct gk20a *g, void *__args); * unified VA space. * - Ensure that nvgpu_vm_do_init succeeds with big pages disabled. * - Ensure that nvgpu_vm_do_init succeeds with no user VMA. + * - Ensure that reference count of the VM is 1. Then increment it using + * nvgpu_vm_get and ensure it is 2. Decrement it with nvgpu_vm_put and ensure + * it is back to 1. * - Uninitialize the VM * * Output: Returns PASS if the steps above were executed successfully. FAIL @@ -247,7 +252,7 @@ int test_map_buffer_error_cases(struct unit_module *m, struct gk20a *g, * * Test Type: Feature, Error injection * - * Targets: nvgpu_vm_alloc_va + * Targets: nvgpu_vm_alloc_va, nvgpu_vm_free_va * * Input: None * @@ -283,7 +288,7 @@ int test_nvgpu_vm_alloc_va(struct unit_module *m, struct gk20a *g, * * Test Type: Feature * - * Targets: nvgpu_vm_bind_channel + * Targets: gops_mm.vm_bind_channel, nvgpu_vm_bind_channel * * Input: None * @@ -379,5 +384,79 @@ int test_vm_aspace_id(struct unit_module *m, struct gk20a *g, void *__args); */ int test_vm_area_error_cases(struct unit_module *m, struct gk20a *g, void *__args); + +/** + * Test specification for: test_gk20a_from_vm + * + * Description: Simple test to check gk20a_from_vm. + * + * Test Type: Feature + * + * Targets: gk20a_from_vm + * + * Input: None + * + * Steps: + * - Create a test VM. + * - Call gk20a_from_vm with the test vm pointer and ensure it returns a + * pointer on g. + * - Uninitialize the VM. + * + * Output: Returns PASS if the steps above were executed successfully. FAIL + * otherwise. + */ +int test_gk20a_from_vm(struct unit_module *m, struct gk20a *g, void *args); + +/** + * Test specification for: test_nvgpu_insert_mapped_buf + * + * Description: Tests the logic of nvgpu_insert_mapped_buf + * + * Test Type: Feature + * + * Targets: nvgpu_insert_mapped_buf, mapped_buffer_from_rbtree_node + * + * Input: None + * + * Steps: + * - Create a test VM. + * - Set an arbitrary test address. + * - Search in the vm->mapped_buffers RBTree to ensure that the arbitrary test + * address has no mapped buffers already. + * - Instantiate a struct nvgpu_mapped_buf and set its address to the arbitrary + * address with a size of 64KB and big pages. + * - Call nvgpu_insert_mapped_buf on the struct nvgpu_mapped_buf. + * - Search again the vm->mapped_buffers RBTree and ensure the buffer can be + * found. + * - Uninitialize the VM. + * + * Output: Returns PASS if the steps above were executed successfully. FAIL + * otherwise. + */ +int test_nvgpu_insert_mapped_buf(struct unit_module *m, struct gk20a *g, + void *args); + +/** + * Test specification for: test_vm_pde_coverage_bit_count + * + * Description: Tests the logic of nvgpu_vm_pde_coverage_bit_count + * + * Test Type: Feature + * + * Targets: nvgpu_vm_pde_coverage_bit_count + * + * Input: None + * + * Steps: + * - Create a test VM. + * - Call nvgpu_vm_pde_coverage_bit_count and ensure it returns the expected + * value of 21 (for GP10B and following chips). + * - Uninitialize the VM. + * + * Output: Returns PASS if the steps above were executed successfully. FAIL + * otherwise. + */ +int test_vm_pde_coverage_bit_count(struct unit_module *m, struct gk20a *g, + void *args); /** }@ */ #endif /* UNIT_VM_H */