diff --git a/drivers/gpu/nvgpu/common/mm/vm.c b/drivers/gpu/nvgpu/common/mm/vm.c index aac948b85..5bc677c02 100644 --- a/drivers/gpu/nvgpu/common/mm/vm.c +++ b/drivers/gpu/nvgpu/common/mm/vm.c @@ -245,11 +245,6 @@ u64 nvgpu_vm_alloc_va(struct vm_gk20a *vm, u64 size, u32 pgsz_idx) vma = vm->vma[pgsz_idx]; - if (vm->guest_managed) { - nvgpu_err(g, "Illegal GPU allocation on behalf of guest OS"); - return 0; - } - if (pgsz_idx >= GMMU_NR_PAGE_SIZES) { nvgpu_err(g, "(%s) invalid page size requested", vma->name); return 0; @@ -599,8 +594,7 @@ static int nvgpu_vm_init_check_vma_limits(struct gk20a *g, struct vm_gk20a *vm, { if ((user_vma_start > user_vma_limit) || (user_lp_vma_start > user_lp_vma_limit) || - (!vm->guest_managed && - (kernel_vma_start >= kernel_vma_limit))) { + (kernel_vma_start >= kernel_vma_limit)) { nvgpu_err(g, "Invalid vm configuration"); nvgpu_do_assert(); return -EINVAL; @@ -739,12 +733,6 @@ static int nvgpu_vm_init_attributes(struct mm_gk20a *mm, return -ENOMEM; } - if (vm->guest_managed && (kernel_reserved != 0U)) { - nvgpu_do_assert_print(g, - "Cannot use guest managed VM with kernel space"); - return -EINVAL; - } - nvgpu_log_info(g, "Init space for %s: valimit=0x%llx, " "LP size=0x%x lowhole=0x%llx", name, aperture_size, diff --git a/drivers/gpu/nvgpu/include/nvgpu/vm.h b/drivers/gpu/nvgpu/include/nvgpu/vm.h index 5db38bde3..4616ff806 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/vm.h +++ b/drivers/gpu/nvgpu/include/nvgpu/vm.h @@ -256,8 +256,6 @@ struct vm_gk20a { * It is not enabled for safety build. */ bool enable_ctag; - /** Whether this address space is managed by guest or not. */ - bool guest_managed; /** Page size used for mappings with this address space. */ u32 big_page_size; diff --git a/userspace/required_tests.ini b/userspace/required_tests.ini index d7d7f2c71..63865ae7e 100644 --- a/userspace/required_tests.ini +++ b/userspace/required_tests.ini @@ -820,7 +820,6 @@ test_nvgpu_gmmu_map_unmap_adv.gmmu_map_unmap_no_iommu_sysmem_adv_big_pages_offse test_nvgpu_gmmu_map_unmap_adv.gmmu_map_unmap_tlb_invalidate_fail=0 test_nvgpu_gmmu_map_unmap_batched.gmmu_map_unmap_iommu_sysmem_adv_big_pages_batched=0 test_nvgpu_gmmu_map_unmap_map_fail.map_fail_fi_null_sgt=0 -test_nvgpu_gmmu_map_unmap_map_fail.map_fail_fi_vm_alloc=0 test_nvgpu_gmmu_map_unmap_map_fail.map_fail_pd_allocate=0 test_nvgpu_gmmu_map_unmap_map_fail.map_fail_pd_allocate_child=0 test_nvgpu_gmmu_map_unmap_map_fail.map_fail_tlb_invalidate=0 diff --git a/userspace/units/mm/gmmu/page_table/page_table.c b/userspace/units/mm/gmmu/page_table/page_table.c index e02336534..20967cec0 100644 --- a/userspace/units/mm/gmmu/page_table/page_table.c +++ b/userspace/units/mm/gmmu/page_table/page_table.c @@ -64,10 +64,9 @@ /* Some special failure cases */ #define SPECIAL_MAP_FAIL_FI_NULL_SGT 0 -#define SPECIAL_MAP_FAIL_VM_ALLOC 1 -#define SPECIAL_MAP_FAIL_PD_ALLOCATE 2 -#define SPECIAL_MAP_FAIL_PD_ALLOCATE_CHILD 3 -#define SPECIAL_MAP_FAIL_TLB_INVALIDATE 4 +#define SPECIAL_MAP_FAIL_PD_ALLOCATE 1 +#define SPECIAL_MAP_FAIL_PD_ALLOCATE_CHILD 2 +#define SPECIAL_MAP_FAIL_TLB_INVALIDATE 3 /* Consts for requirements C1/C2 testing */ #define REQ_C1_NUM_MEMS 3 @@ -604,11 +603,6 @@ int test_nvgpu_gmmu_map_unmap_map_fail(struct unit_module *m, struct gk20a *g, nvgpu_posix_enable_fault_injection(kmem_fi, true, 3); } - if (scenario == SPECIAL_MAP_FAIL_VM_ALLOC) { - /* Special case: cause __nvgpu_vm_alloc_va to fail */ - g->mm.pmu.vm->guest_managed = true; - } - if (scenario == SPECIAL_MAP_FAIL_TLB_INVALIDATE) { g->ops.fb.tlb_invalidate = hal_fb_tlb_invalidate_fail; } @@ -623,7 +617,6 @@ int test_nvgpu_gmmu_map_unmap_map_fail(struct unit_module *m, struct gk20a *g, } nvgpu_posix_enable_fault_injection(kmem_fi, false, 0); - g->mm.pmu.vm->guest_managed = false; if (mem.gpu_va != 0) { unit_return_fail(m, "map did not fail as expected\n"); @@ -1294,10 +1287,6 @@ struct unit_module_test nvgpu_gmmu_tests[] = { test_nvgpu_gmmu_map_unmap_map_fail, (void *) SPECIAL_MAP_FAIL_FI_NULL_SGT, 0), - UNIT_TEST(map_fail_fi_vm_alloc, - test_nvgpu_gmmu_map_unmap_map_fail, - (void *) SPECIAL_MAP_FAIL_VM_ALLOC, - 0), UNIT_TEST(map_fail_tlb_invalidate, test_nvgpu_gmmu_map_unmap_map_fail, (void *) SPECIAL_MAP_FAIL_TLB_INVALIDATE, diff --git a/userspace/units/mm/vm/vm.c b/userspace/units/mm/vm/vm.c index b1a89b46b..a9fdccdec 100644 --- a/userspace/units/mm/vm/vm.c +++ b/userspace/units/mm/vm/vm.c @@ -243,16 +243,6 @@ int test_nvgpu_vm_alloc_va(struct unit_module *m, struct gk20a *g, nvgpu_kmem_get_fault_injection(); u64 addr; - /* Error handling: VM cannot allocate VA */ - vm->guest_managed = true; - addr = nvgpu_vm_alloc_va(vm, SZ_1K, 0); - vm->guest_managed = false; - if (addr != 0) { - unit_err(m, "nvgpu_vm_alloc_va did not fail as expected (1).\n"); - ret = UNIT_FAIL; - goto exit; - } - /* Error handling: invalid page size */ addr = nvgpu_vm_alloc_va(vm, SZ_1K, GMMU_NR_PAGE_SIZES); if (addr != 0) { @@ -969,19 +959,6 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args) /* Make nvgpu_vm_do_init fail with invalid parameters */ vm = nvgpu_kzalloc(g, sizeof(*vm)); - vm->guest_managed = true; - if (!EXPECT_BUG( - nvgpu_vm_do_init(&g->mm, vm, - g->ops.mm.gmmu.get_default_big_page_size(), - low_hole, user_vma, kernel_reserved, - nvgpu_gmmu_va_small_page_limit(), - big_pages, false, true, __func__) - )) { - unit_err(m, "BUG() was not called but it was expected (3).\n"); - ret = UNIT_FAIL; - goto exit; - } - vm->guest_managed = false; /* vGPU with userspace managed */ g->is_virtual = true; @@ -1070,21 +1047,6 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args) goto exit; } - /* Invalid low_hole and kernel_reserved to cause an invalid config */ - vm->guest_managed = true; - ret = nvgpu_vm_do_init(&g->mm, vm, - g->ops.mm.gmmu.get_default_big_page_size(), - nvgpu_gmmu_va_small_page_limit(), - ((u64)SZ_1G * 200U), 0, - nvgpu_gmmu_va_small_page_limit(), big_pages, - false, false, __func__); - vm->guest_managed = false; - if (ret != -EINVAL) { - unit_err(m, "nvgpu_vm_do_init didn't fail as expected (11).\n"); - ret = UNIT_FAIL; - goto exit; - } - /* Cause nvgpu_vm_init_vma_allocators to fail for long vm name */ ret = nvgpu_vm_do_init(&g->mm, vm, g->ops.mm.gmmu.get_default_big_page_size(), @@ -1157,7 +1119,6 @@ int test_init_error_paths(struct unit_module *m, struct gk20a *g, void *__args) ret = UNIT_SUCCESS; exit: - vm->guest_managed = false; if (vm != NULL) { nvgpu_vm_put(vm); } diff --git a/userspace/units/sync/nvgpu-sync.c b/userspace/units/sync/nvgpu-sync.c index 67c5b91d4..ad6e96990 100644 --- a/userspace/units/sync/nvgpu-sync.c +++ b/userspace/units/sync/nvgpu-sync.c @@ -315,12 +315,11 @@ done: #define F_SYNC_GET_RO_MAP_PRE_ALLOCATED 0 #define F_SYNC_GET_RO_MAP 1 -#define F_SYNC_GET_RO_MAP_FAIL 2 +#define F_SYNC_GET_RO_MAP_MAX 1 static const char *f_sync_get_ro_map[] = { "sync_get_ro_map_preallocated", "sync_get_ro_map", - "sync_get_ro_map_fail", }; static void syncpt_ro_map_gpu_va_clear(struct gk20a *g, struct nvgpu_channel *ch) @@ -347,7 +346,7 @@ int test_sync_get_ro_map(struct unit_module *m, struct gk20a *g, void *args) int err = 0; int ret = UNIT_FAIL; - for (branches = 0U; branches <= F_SYNC_GET_RO_MAP_FAIL; branches++) { + for (branches = 0U; branches <= F_SYNC_GET_RO_MAP_MAX; branches++) { if (branches == F_SYNC_GET_RO_MAP_PRE_ALLOCATED) { ch->vm->syncpt_ro_map_gpu_va = nvgpu_gmmu_map(ch->vm, &g->syncpt_mem, g->syncpt_unit_size, @@ -358,10 +357,6 @@ int test_sync_get_ro_map(struct unit_module *m, struct gk20a *g, void *args) } } else if (branches == F_SYNC_GET_RO_MAP) { ch->vm->syncpt_ro_map_gpu_va = 0U; - } else if (branches == F_SYNC_GET_RO_MAP_FAIL) { - ch->vm->syncpt_ro_map_gpu_va = 0U; - /* fail Read-Only nvgpu_gmmu_map of g->syncpt_mem */ - ch->vm->guest_managed = true; } unit_info(m, "%s branch: %s\n", __func__, f_sync_get_ro_map[branches]); @@ -369,7 +364,7 @@ int test_sync_get_ro_map(struct unit_module *m, struct gk20a *g, void *args) err = g->ops.sync.syncpt.get_sync_ro_map(ch->vm, &base_gpuva, &sync_size, &num_syncpoints); - if (branches < F_SYNC_GET_RO_MAP_FAIL) { + if (branches < F_SYNC_GET_RO_MAP_MAX) { if(err != 0) { unit_return_fail(m, "unexpected failure in get_sync_ro_map"); @@ -382,40 +377,25 @@ int test_sync_get_ro_map(struct unit_module *m, struct gk20a *g, void *args) unit_info(m, "Syncpt Shim GPU VA: %llu\n", base_gpuva); - } else { - if (err == 0) { - unit_return_fail(m, - "expected failure in get_sync_ro_map"); - } else { - ret = UNIT_SUCCESS; - } } syncpt_ro_map_gpu_va_clear(g, ch); - if (ch->vm->guest_managed == true) { - ch->vm->guest_managed = false; - } - base_gpuva = 0U; sync_size = 0U; } done: syncpt_ro_map_gpu_va_clear(g, ch); - if (ch->vm->guest_managed == true) { - ch->vm->guest_managed = false; - } return ret; } #define F_SYNC_SYNCPT_ALLOC_FAILED 1 #define F_SYNC_STRADD_FAIL 3 #define F_SYNC_NVHOST_CLIENT_MANAGED_FAIL 4 -#define F_SYNC_RO_MAP_GPU_VA_MAP_FAIL 5 -#define F_SYNC_MEM_CREATE_PHYS_FAIL 6 -#define F_SYNC_BUF_MAP_FAIL 7 -#define F_SYNC_FAIL_LAST 8 +#define F_SYNC_MEM_CREATE_PHYS_FAIL 5 +#define F_SYNC_BUF_MAP_FAIL 6 +#define F_SYNC_FAIL_LAST 7 static const char *f_syncpt_open[] = { "global_disable_syncpt", @@ -423,7 +403,6 @@ static const char *f_syncpt_open[] = { "syncpt_user_managed_false", "syncpt_stradd_fail", "syncpt_get_client_managed_fail", - "syncpt_ro_map_gpu_va_fail", "syncpt_create_phys_mem_fail", "syncpt_buf_map_fail", }; @@ -439,10 +418,6 @@ static void clear_test_params(struct gk20a *g, bool *fault_injection_enabled, u32 branch, struct nvgpu_posix_fault_inj *kmem_fi) { - if (ch->vm->guest_managed) { - ch->vm->guest_managed = false; - } - if (*fault_injection_enabled) { nvgpu_posix_enable_fault_injection(kmem_fi, false, 0); *fault_injection_enabled = false; @@ -486,9 +461,6 @@ int test_sync_create_fail(struct unit_module *m, struct gk20a *g, void *args) g->name = FAIL_G_NAME_STR; } else if (branches == F_SYNC_NVHOST_CLIENT_MANAGED_FAIL) { g->nvhost->syncpt_id = 20U; /* arbitary id */ - } else if (branches == F_SYNC_RO_MAP_GPU_VA_MAP_FAIL) { - /* fail Read-Only nvgpu_gmmu_map of g->syncpt_mem */ - ch->vm->guest_managed = true; } else if (branches == F_SYNC_MEM_CREATE_PHYS_FAIL) { /* * bypass map of g->syncpt_mem and fail at