gpu: nvgpu: Add IOCTL flag + plumbing for unified VAs

Add a flag that let's userspace enable the unified VM functionality
on a selective bassis. This feature is working for all cases except
a single MODS trace. This will allow test coverage to be selectively
added in certain userspace tests as well to help prevent this feature
from bit rotting (as it has historically done).

Also update the unit test for the page table management in the GMMU
to reflect this new flag. It's been set to false since the target
platform for safety is currently not using unified address spaces.

Bug 200438879

Change-Id: Ibe005472910d1668e8372754be8dd792773f9d8c
Signed-off-by: Alex Waterman <alexw@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1951864
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Alex Waterman
2018-11-15 13:32:46 -08:00
committed by mobile promotions
parent b361c38bca
commit fc939e5fb6
10 changed files with 29 additions and 14 deletions

View File

@@ -282,6 +282,7 @@ int __nvgpu_vm_init(struct mm_gk20a *mm,
u64 aperture_size,
bool big_pages,
bool userspace_managed,
bool unified_va,
const char *name)
{
int err = 0;
@@ -315,7 +316,7 @@ int __nvgpu_vm_init(struct mm_gk20a *mm,
vm->vma[GMMU_PAGE_SIZE_SMALL] = &vm->user;
vm->vma[GMMU_PAGE_SIZE_BIG] = &vm->user;
vm->vma[GMMU_PAGE_SIZE_KERNEL] = &vm->kernel;
if (!nvgpu_is_enabled(g, NVGPU_MM_UNIFY_ADDRESS_SPACES)) {
if (!unified_va) {
vm->vma[GMMU_PAGE_SIZE_BIG] = &vm->user_lp;
}
@@ -324,6 +325,7 @@ int __nvgpu_vm_init(struct mm_gk20a *mm,
vm->big_page_size = vm->gmmu_page_sizes[GMMU_PAGE_SIZE_BIG];
vm->userspace_managed = userspace_managed;
vm->unified_va = unified_va;
vm->mmu_levels = g->ops.mm.get_mmu_levels(g, vm->big_page_size);
#ifdef CONFIG_TEGRA_GR_VIRTUALIZATION
@@ -351,8 +353,7 @@ int __nvgpu_vm_init(struct mm_gk20a *mm,
* sense to make one VM, same as if the unified address flag
* is set.
*/
if (!big_pages ||
nvgpu_is_enabled(g, NVGPU_MM_UNIFY_ADDRESS_SPACES)) {
if (!big_pages || unified_va) {
user_vma_start = low_hole;
user_vma_limit = vm->va_limit - kernel_reserved;
user_lp_vma_start = user_vma_limit;
@@ -405,7 +406,7 @@ int __nvgpu_vm_init(struct mm_gk20a *mm,
* Determine if big pages are possible in this VM. If a split address
* space is used then check the user_lp vma instead of the user vma.
*/
if (nvgpu_is_enabled(g, NVGPU_MM_UNIFY_ADDRESS_SPACES)) {
if (unified_va) {
vm->big_pages = big_pages &&
nvgpu_big_pages_possible(vm, user_vma_start,
user_vma_limit - user_vma_start);
@@ -577,6 +578,7 @@ struct vm_gk20a *nvgpu_vm_init(struct gk20a *g,
u64 aperture_size,
bool big_pages,
bool userspace_managed,
bool unified_va,
const char *name)
{
struct vm_gk20a *vm = nvgpu_kzalloc(g, sizeof(*vm));
@@ -587,7 +589,7 @@ struct vm_gk20a *nvgpu_vm_init(struct gk20a *g,
if (__nvgpu_vm_init(&g->mm, vm, big_page_size, low_hole,
kernel_reserved, aperture_size, big_pages,
userspace_managed, name) != 0) {
userspace_managed, unified_va, name) != 0) {
nvgpu_kfree(g, vm);
return NULL;
}