mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 17:36:20 +03:00
gpu: nvgpu: Add IOCTL flag + plumbing for unified VAs
Add a flag that let's userspace enable the unified VM functionality on a selective bassis. This feature is working for all cases except a single MODS trace. This will allow test coverage to be selectively added in certain userspace tests as well to help prevent this feature from bit rotting (as it has historically done). Also update the unit test for the page table management in the GMMU to reflect this new flag. It's been set to false since the target platform for safety is currently not using unified address spaces. Bug 200438879 Change-Id: Ibe005472910d1668e8372754be8dd792773f9d8c Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1951864 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
b361c38bca
commit
fc939e5fb6
@@ -57,6 +57,9 @@ static int gk20a_vm_alloc_share(struct gk20a_as_share *as_share,
|
|||||||
char name[32];
|
char name[32];
|
||||||
const bool userspace_managed =
|
const bool userspace_managed =
|
||||||
(flags & NVGPU_AS_ALLOC_USERSPACE_MANAGED) != 0;
|
(flags & NVGPU_AS_ALLOC_USERSPACE_MANAGED) != 0;
|
||||||
|
const bool unified_va =
|
||||||
|
nvgpu_is_enabled(g, NVGPU_MM_UNIFY_ADDRESS_SPACES) ||
|
||||||
|
(flags & NVGPU_AS_ALLOC_UNIFIED_VA) != 0;
|
||||||
|
|
||||||
nvgpu_log_fn(g, " ");
|
nvgpu_log_fn(g, " ");
|
||||||
|
|
||||||
@@ -79,7 +82,8 @@ static int gk20a_vm_alloc_share(struct gk20a_as_share *as_share,
|
|||||||
U64(big_page_size) << U64(10),
|
U64(big_page_size) << U64(10),
|
||||||
mm->channel.kernel_size,
|
mm->channel.kernel_size,
|
||||||
mm->channel.user_size + mm->channel.kernel_size,
|
mm->channel.user_size + mm->channel.kernel_size,
|
||||||
!mm->disable_bigpage, userspace_managed, name);
|
!mm->disable_bigpage,
|
||||||
|
userspace_managed, unified_va, name);
|
||||||
if (vm == NULL) {
|
if (vm == NULL) {
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -99,7 +99,7 @@ u32 nvgpu_vm_get_pte_size(struct vm_gk20a *vm, u64 base, u64 size)
|
|||||||
return GMMU_PAGE_SIZE_SMALL;
|
return GMMU_PAGE_SIZE_SMALL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!nvgpu_is_enabled(g, NVGPU_MM_UNIFY_ADDRESS_SPACES)) {
|
if (!vm->unified_va) {
|
||||||
return nvgpu_vm_get_pte_size_split_addr(vm, base, size);
|
return nvgpu_vm_get_pte_size_split_addr(vm, base, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -237,6 +237,7 @@ static int nvgpu_init_system_vm(struct mm_gk20a *mm)
|
|||||||
aperture_size,
|
aperture_size,
|
||||||
true,
|
true,
|
||||||
false,
|
false,
|
||||||
|
false,
|
||||||
"system");
|
"system");
|
||||||
if (mm->pmu.vm == NULL) {
|
if (mm->pmu.vm == NULL) {
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
@@ -279,7 +280,7 @@ static int nvgpu_init_cde_vm(struct mm_gk20a *mm)
|
|||||||
U64(big_page_size) << U64(10),
|
U64(big_page_size) << U64(10),
|
||||||
NV_MM_DEFAULT_KERNEL_SIZE,
|
NV_MM_DEFAULT_KERNEL_SIZE,
|
||||||
NV_MM_DEFAULT_KERNEL_SIZE + NV_MM_DEFAULT_USER_SIZE,
|
NV_MM_DEFAULT_KERNEL_SIZE + NV_MM_DEFAULT_USER_SIZE,
|
||||||
false, false, "cde");
|
false, false, false, "cde");
|
||||||
if (mm->cde.vm == NULL) {
|
if (mm->cde.vm == NULL) {
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
@@ -295,7 +296,7 @@ static int nvgpu_init_ce_vm(struct mm_gk20a *mm)
|
|||||||
U64(big_page_size) << U64(10),
|
U64(big_page_size) << U64(10),
|
||||||
NV_MM_DEFAULT_KERNEL_SIZE,
|
NV_MM_DEFAULT_KERNEL_SIZE,
|
||||||
NV_MM_DEFAULT_KERNEL_SIZE + NV_MM_DEFAULT_USER_SIZE,
|
NV_MM_DEFAULT_KERNEL_SIZE + NV_MM_DEFAULT_USER_SIZE,
|
||||||
false, false, "ce");
|
false, false, false, "ce");
|
||||||
if (mm->ce.vm == NULL) {
|
if (mm->ce.vm == NULL) {
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
@@ -389,7 +390,7 @@ static int nvgpu_init_bar1_vm(struct mm_gk20a *mm)
|
|||||||
SZ_64K,
|
SZ_64K,
|
||||||
mm->bar1.aperture_size - SZ_64K,
|
mm->bar1.aperture_size - SZ_64K,
|
||||||
mm->bar1.aperture_size,
|
mm->bar1.aperture_size,
|
||||||
true, false,
|
true, false, false,
|
||||||
"bar1");
|
"bar1");
|
||||||
if (mm->bar1.vm == NULL) {
|
if (mm->bar1.vm == NULL) {
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|||||||
@@ -282,6 +282,7 @@ int __nvgpu_vm_init(struct mm_gk20a *mm,
|
|||||||
u64 aperture_size,
|
u64 aperture_size,
|
||||||
bool big_pages,
|
bool big_pages,
|
||||||
bool userspace_managed,
|
bool userspace_managed,
|
||||||
|
bool unified_va,
|
||||||
const char *name)
|
const char *name)
|
||||||
{
|
{
|
||||||
int err = 0;
|
int err = 0;
|
||||||
@@ -315,7 +316,7 @@ int __nvgpu_vm_init(struct mm_gk20a *mm,
|
|||||||
vm->vma[GMMU_PAGE_SIZE_SMALL] = &vm->user;
|
vm->vma[GMMU_PAGE_SIZE_SMALL] = &vm->user;
|
||||||
vm->vma[GMMU_PAGE_SIZE_BIG] = &vm->user;
|
vm->vma[GMMU_PAGE_SIZE_BIG] = &vm->user;
|
||||||
vm->vma[GMMU_PAGE_SIZE_KERNEL] = &vm->kernel;
|
vm->vma[GMMU_PAGE_SIZE_KERNEL] = &vm->kernel;
|
||||||
if (!nvgpu_is_enabled(g, NVGPU_MM_UNIFY_ADDRESS_SPACES)) {
|
if (!unified_va) {
|
||||||
vm->vma[GMMU_PAGE_SIZE_BIG] = &vm->user_lp;
|
vm->vma[GMMU_PAGE_SIZE_BIG] = &vm->user_lp;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -324,6 +325,7 @@ int __nvgpu_vm_init(struct mm_gk20a *mm,
|
|||||||
|
|
||||||
vm->big_page_size = vm->gmmu_page_sizes[GMMU_PAGE_SIZE_BIG];
|
vm->big_page_size = vm->gmmu_page_sizes[GMMU_PAGE_SIZE_BIG];
|
||||||
vm->userspace_managed = userspace_managed;
|
vm->userspace_managed = userspace_managed;
|
||||||
|
vm->unified_va = unified_va;
|
||||||
vm->mmu_levels = g->ops.mm.get_mmu_levels(g, vm->big_page_size);
|
vm->mmu_levels = g->ops.mm.get_mmu_levels(g, vm->big_page_size);
|
||||||
|
|
||||||
#ifdef CONFIG_TEGRA_GR_VIRTUALIZATION
|
#ifdef CONFIG_TEGRA_GR_VIRTUALIZATION
|
||||||
@@ -351,8 +353,7 @@ int __nvgpu_vm_init(struct mm_gk20a *mm,
|
|||||||
* sense to make one VM, same as if the unified address flag
|
* sense to make one VM, same as if the unified address flag
|
||||||
* is set.
|
* is set.
|
||||||
*/
|
*/
|
||||||
if (!big_pages ||
|
if (!big_pages || unified_va) {
|
||||||
nvgpu_is_enabled(g, NVGPU_MM_UNIFY_ADDRESS_SPACES)) {
|
|
||||||
user_vma_start = low_hole;
|
user_vma_start = low_hole;
|
||||||
user_vma_limit = vm->va_limit - kernel_reserved;
|
user_vma_limit = vm->va_limit - kernel_reserved;
|
||||||
user_lp_vma_start = user_vma_limit;
|
user_lp_vma_start = user_vma_limit;
|
||||||
@@ -405,7 +406,7 @@ int __nvgpu_vm_init(struct mm_gk20a *mm,
|
|||||||
* Determine if big pages are possible in this VM. If a split address
|
* Determine if big pages are possible in this VM. If a split address
|
||||||
* space is used then check the user_lp vma instead of the user vma.
|
* space is used then check the user_lp vma instead of the user vma.
|
||||||
*/
|
*/
|
||||||
if (nvgpu_is_enabled(g, NVGPU_MM_UNIFY_ADDRESS_SPACES)) {
|
if (unified_va) {
|
||||||
vm->big_pages = big_pages &&
|
vm->big_pages = big_pages &&
|
||||||
nvgpu_big_pages_possible(vm, user_vma_start,
|
nvgpu_big_pages_possible(vm, user_vma_start,
|
||||||
user_vma_limit - user_vma_start);
|
user_vma_limit - user_vma_start);
|
||||||
@@ -577,6 +578,7 @@ struct vm_gk20a *nvgpu_vm_init(struct gk20a *g,
|
|||||||
u64 aperture_size,
|
u64 aperture_size,
|
||||||
bool big_pages,
|
bool big_pages,
|
||||||
bool userspace_managed,
|
bool userspace_managed,
|
||||||
|
bool unified_va,
|
||||||
const char *name)
|
const char *name)
|
||||||
{
|
{
|
||||||
struct vm_gk20a *vm = nvgpu_kzalloc(g, sizeof(*vm));
|
struct vm_gk20a *vm = nvgpu_kzalloc(g, sizeof(*vm));
|
||||||
@@ -587,7 +589,7 @@ struct vm_gk20a *nvgpu_vm_init(struct gk20a *g,
|
|||||||
|
|
||||||
if (__nvgpu_vm_init(&g->mm, vm, big_page_size, low_hole,
|
if (__nvgpu_vm_init(&g->mm, vm, big_page_size, low_hole,
|
||||||
kernel_reserved, aperture_size, big_pages,
|
kernel_reserved, aperture_size, big_pages,
|
||||||
userspace_managed, name) != 0) {
|
userspace_managed, unified_va, name) != 0) {
|
||||||
nvgpu_kfree(g, vm);
|
nvgpu_kfree(g, vm);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -59,7 +59,7 @@ int gp10b_init_bar2_vm(struct gk20a *g)
|
|||||||
|
|
||||||
mm->bar2.vm = nvgpu_vm_init(g, big_page_size, SZ_4K,
|
mm->bar2.vm = nvgpu_vm_init(g, big_page_size, SZ_4K,
|
||||||
mm->bar2.aperture_size - SZ_4K,
|
mm->bar2.aperture_size - SZ_4K,
|
||||||
mm->bar2.aperture_size, false, false, "bar2");
|
mm->bar2.aperture_size, false, false, false, "bar2");
|
||||||
if (mm->bar2.vm == NULL) {
|
if (mm->bar2.vm == NULL) {
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -42,7 +42,8 @@ struct gk20a_as_share {
|
|||||||
/*
|
/*
|
||||||
* AS allocation flags.
|
* AS allocation flags.
|
||||||
*/
|
*/
|
||||||
#define NVGPU_AS_ALLOC_USERSPACE_MANAGED (1 << 0)
|
#define NVGPU_AS_ALLOC_USERSPACE_MANAGED BIT32(0)
|
||||||
|
#define NVGPU_AS_ALLOC_UNIFIED_VA BIT32(1)
|
||||||
|
|
||||||
int gk20a_as_release_share(struct gk20a_as_share *as_share);
|
int gk20a_as_release_share(struct gk20a_as_share *as_share);
|
||||||
|
|
||||||
|
|||||||
@@ -159,6 +159,7 @@ struct vm_gk20a {
|
|||||||
u32 big_page_size;
|
u32 big_page_size;
|
||||||
|
|
||||||
bool userspace_managed;
|
bool userspace_managed;
|
||||||
|
bool unified_va;
|
||||||
|
|
||||||
const struct gk20a_mmu_level *mmu_levels;
|
const struct gk20a_mmu_level *mmu_levels;
|
||||||
|
|
||||||
@@ -302,6 +303,7 @@ int __nvgpu_vm_init(struct mm_gk20a *mm,
|
|||||||
u64 aperture_size,
|
u64 aperture_size,
|
||||||
bool big_pages,
|
bool big_pages,
|
||||||
bool userspace_managed,
|
bool userspace_managed,
|
||||||
|
bool unified_va,
|
||||||
const char *name);
|
const char *name);
|
||||||
|
|
||||||
struct vm_gk20a *nvgpu_vm_init(struct gk20a *g,
|
struct vm_gk20a *nvgpu_vm_init(struct gk20a *g,
|
||||||
@@ -311,6 +313,7 @@ struct vm_gk20a *nvgpu_vm_init(struct gk20a *g,
|
|||||||
u64 aperture_size,
|
u64 aperture_size,
|
||||||
bool big_pages,
|
bool big_pages,
|
||||||
bool userspace_managed,
|
bool userspace_managed,
|
||||||
|
bool unified_va,
|
||||||
const char *name);
|
const char *name);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|||||||
@@ -77,6 +77,8 @@ static u32 gk20a_as_translate_as_alloc_flags(struct gk20a *g, u32 flags)
|
|||||||
|
|
||||||
if (flags & NVGPU_GPU_IOCTL_ALLOC_AS_FLAGS_USERSPACE_MANAGED)
|
if (flags & NVGPU_GPU_IOCTL_ALLOC_AS_FLAGS_USERSPACE_MANAGED)
|
||||||
core_flags |= NVGPU_AS_ALLOC_USERSPACE_MANAGED;
|
core_flags |= NVGPU_AS_ALLOC_USERSPACE_MANAGED;
|
||||||
|
if (flags & NVGPU_GPU_IOCTL_ALLOC_AS_FLAGS_UNIFIED_VA)
|
||||||
|
core_flags |= NVGPU_AS_ALLOC_UNIFIED_VA;
|
||||||
|
|
||||||
return core_flags;
|
return core_flags;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1354,7 +1354,7 @@ static int gk20a_perfbuf_map(struct dbg_session_gk20a *dbg_s,
|
|||||||
big_page_size << 10,
|
big_page_size << 10,
|
||||||
NV_MM_DEFAULT_KERNEL_SIZE,
|
NV_MM_DEFAULT_KERNEL_SIZE,
|
||||||
NV_MM_DEFAULT_KERNEL_SIZE + NV_MM_DEFAULT_USER_SIZE,
|
NV_MM_DEFAULT_KERNEL_SIZE + NV_MM_DEFAULT_USER_SIZE,
|
||||||
false, false, "perfbuf");
|
false, false, false, "perfbuf");
|
||||||
if (!mm->perfbuf.vm) {
|
if (!mm->perfbuf.vm) {
|
||||||
nvgpu_mutex_release(&g->dbg_sessions_lock);
|
nvgpu_mutex_release(&g->dbg_sessions_lock);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|||||||
@@ -357,7 +357,8 @@ struct nvgpu_alloc_as_args {
|
|||||||
* increments at kickoffs and decrements at job completion are
|
* increments at kickoffs and decrements at job completion are
|
||||||
* bypassed.
|
* bypassed.
|
||||||
*/
|
*/
|
||||||
#define NVGPU_GPU_IOCTL_ALLOC_AS_FLAGS_USERSPACE_MANAGED (1 << 0)
|
#define NVGPU_GPU_IOCTL_ALLOC_AS_FLAGS_USERSPACE_MANAGED (1 << 0)
|
||||||
|
#define NVGPU_GPU_IOCTL_ALLOC_AS_FLAGS_UNIFIED_VA (1 << 1)
|
||||||
__u32 flags;
|
__u32 flags;
|
||||||
|
|
||||||
__u32 reserved; /* must be zero */
|
__u32 reserved; /* must be zero */
|
||||||
|
|||||||
@@ -240,6 +240,7 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
|
|||||||
aperture_size,
|
aperture_size,
|
||||||
true,
|
true,
|
||||||
false,
|
false,
|
||||||
|
false,
|
||||||
"system");
|
"system");
|
||||||
if (mm->pmu.vm == NULL) {
|
if (mm->pmu.vm == NULL) {
|
||||||
unit_return_fail(m, "nvgpu_vm_init failed\n");
|
unit_return_fail(m, "nvgpu_vm_init failed\n");
|
||||||
|
|||||||
Reference in New Issue
Block a user