gpu: nvgpu: Move unify_address_spaces to flags

Use the enabled flags API to handle the unify_address_sapce spaces
flag.

JIRA NVGPU-84
JIRA NVGPU-12
JIRA NVGPU-30

Change-Id: Id1b59aed4b349d6067615991597d534936cc5ce9
Signed-off-by: Alex Waterman <alexw@nvidia.com>
Reviewed-on: http://git-master/r/1488307
Reviewed-by: Automatic_Commit_Validation_User
Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
This commit is contained in:
Alex Waterman
2017-05-24 00:06:48 +01:00
committed by mobile promotions
parent 66a2511a36
commit 8d6b5cc349
5 changed files with 18 additions and 7 deletions

View File

@@ -20,6 +20,7 @@
#include <nvgpu/nvgpu_common.h> #include <nvgpu/nvgpu_common.h>
#include <nvgpu/soc.h> #include <nvgpu/soc.h>
#include <nvgpu/bug.h> #include <nvgpu/bug.h>
#include <nvgpu/enabled.h>
#include "gk20a/gk20a_scale.h" #include "gk20a/gk20a_scale.h"
#include "gk20a/gk20a.h" #include "gk20a/gk20a.h"
@@ -133,6 +134,9 @@ static void nvgpu_init_mm_vars(struct gk20a *g)
g->mm.disable_bigpage = platform->disable_bigpage; g->mm.disable_bigpage = platform->disable_bigpage;
g->mm.vidmem_is_vidmem = platform->vidmem_is_vidmem; g->mm.vidmem_is_vidmem = platform->vidmem_is_vidmem;
__nvgpu_set_enabled(g, NVGPU_MM_UNIFY_ADDRESS_SPACES,
platform->unify_address_spaces);
nvgpu_mutex_init(&g->mm.tlb_lock); nvgpu_mutex_init(&g->mm.tlb_lock);
nvgpu_mutex_init(&g->mm.priv_lock); nvgpu_mutex_init(&g->mm.priv_lock);
} }

View File

@@ -22,10 +22,10 @@
#include <nvgpu/list.h> #include <nvgpu/list.h>
#include <nvgpu/rbtree.h> #include <nvgpu/rbtree.h>
#include <nvgpu/semaphore.h> #include <nvgpu/semaphore.h>
#include <nvgpu/enabled.h>
#include "gk20a/gk20a.h" #include "gk20a/gk20a.h"
#include "gk20a/mm_gk20a.h" #include "gk20a/mm_gk20a.h"
#include "gk20a/platform_gk20a.h"
int vm_aspace_id(struct vm_gk20a *vm) int vm_aspace_id(struct vm_gk20a *vm)
{ {
@@ -255,7 +255,6 @@ int nvgpu_init_vm(struct mm_gk20a *mm,
u64 user_lp_vma_start, user_lp_vma_limit; u64 user_lp_vma_start, user_lp_vma_limit;
u64 kernel_vma_start, kernel_vma_limit; u64 kernel_vma_start, kernel_vma_limit;
struct gk20a *g = mm->g; struct gk20a *g = mm->g;
struct gk20a_platform *p = gk20a_get_platform(g->dev);
if (WARN_ON(kernel_reserved + low_hole > aperture_size)) if (WARN_ON(kernel_reserved + low_hole > aperture_size))
return -ENOMEM; return -ENOMEM;
@@ -275,7 +274,7 @@ int nvgpu_init_vm(struct mm_gk20a *mm,
vm->vma[gmmu_page_size_small] = &vm->user; vm->vma[gmmu_page_size_small] = &vm->user;
vm->vma[gmmu_page_size_big] = &vm->user; vm->vma[gmmu_page_size_big] = &vm->user;
vm->vma[gmmu_page_size_kernel] = &vm->kernel; vm->vma[gmmu_page_size_kernel] = &vm->kernel;
if (!p->unify_address_spaces) if (!nvgpu_is_enabled(g, NVGPU_MM_UNIFY_ADDRESS_SPACES))
vm->vma[gmmu_page_size_big] = &vm->user_lp; vm->vma[gmmu_page_size_big] = &vm->user_lp;
vm->va_start = low_hole; vm->va_start = low_hole;
@@ -293,7 +292,7 @@ int nvgpu_init_vm(struct mm_gk20a *mm,
/* Setup vma limits. */ /* Setup vma limits. */
if (kernel_reserved + low_hole < aperture_size) { if (kernel_reserved + low_hole < aperture_size) {
if (p->unify_address_spaces) { if (nvgpu_is_enabled(g, NVGPU_MM_UNIFY_ADDRESS_SPACES)) {
user_vma_start = low_hole; user_vma_start = low_hole;
user_vma_limit = vm->va_limit - kernel_reserved; user_vma_limit = vm->va_limit - kernel_reserved;
user_lp_vma_start = user_vma_limit; user_lp_vma_start = user_vma_limit;
@@ -346,7 +345,7 @@ int nvgpu_init_vm(struct mm_gk20a *mm,
* Determine if big pages are possible in this VM. If a split address * Determine if big pages are possible in this VM. If a split address
* space is used then check the user_lp vma instead of the user vma. * space is used then check the user_lp vma instead of the user vma.
*/ */
if (p->unify_address_spaces) if (nvgpu_is_enabled(g, NVGPU_MM_UNIFY_ADDRESS_SPACES))
vm->big_pages = nvgpu_big_pages_possible(vm, user_vma_start, vm->big_pages = nvgpu_big_pages_possible(vm, user_vma_start,
user_vma_limit - user_vma_start); user_vma_limit - user_vma_start);
else else

View File

@@ -2432,12 +2432,12 @@ static enum gmmu_pgsz_gk20a __get_pte_size_split_addr(struct vm_gk20a *vm,
*/ */
enum gmmu_pgsz_gk20a __get_pte_size(struct vm_gk20a *vm, u64 base, u64 size) enum gmmu_pgsz_gk20a __get_pte_size(struct vm_gk20a *vm, u64 base, u64 size)
{ {
struct gk20a_platform *p = gk20a_get_platform(vm->mm->g->dev); struct gk20a *g = gk20a_from_vm(vm);
if (!vm->big_pages) if (!vm->big_pages)
return gmmu_page_size_small; return gmmu_page_size_small;
if (!p->unify_address_spaces) if (!nvgpu_is_enabled(g, NVGPU_MM_UNIFY_ADDRESS_SPACES))
return __get_pte_size_split_addr(vm, base, size); return __get_pte_size_split_addr(vm, base, size);
if (base) if (base)

View File

@@ -28,6 +28,11 @@ struct gk20a;
#define NVGPU_IS_FMODEL 1 #define NVGPU_IS_FMODEL 1
#define NVGPU_DRIVER_IS_DYING 2 #define NVGPU_DRIVER_IS_DYING 2
/*
* MM flags.
*/
#define NVGPU_MM_UNIFY_ADDRESS_SPACES 16
/* /*
* Must be greater than the largest bit offset in the above list. * Must be greater than the largest bit offset in the above list.
*/ */

View File

@@ -245,6 +245,9 @@ static void vgpu_init_vars(struct gk20a *g, struct gk20a_platform *platform)
g->ptimer_src_freq = platform->ptimer_src_freq; g->ptimer_src_freq = platform->ptimer_src_freq;
g->can_railgate = platform->can_railgate_init; g->can_railgate = platform->can_railgate_init;
g->railgate_delay = platform->railgate_delay_init; g->railgate_delay = platform->railgate_delay_init;
__nvgpu_set_enabled(g, NVGPU_MM_UNIFY_ADDRESS_SPACES,
platform->unify_address_spaces);
} }
static int vgpu_init_support(struct platform_device *pdev) static int vgpu_init_support(struct platform_device *pdev)