gpu: nvgpu: Change the allocator flag naming scheme

Move to a more generic name of GPU_ALLOC_*.

Change-Id: Icbbd366847a9d74f83f578e4d9ea917a6e8ea3e2
Signed-off-by: Alex Waterman <alexw@nvidia.com>
Reviewed-on: http://git-master/r/1176445
Reviewed-by: Yu-Huan Hsu <yhsu@nvidia.com>
This commit is contained in:
Alex Waterman
2016-06-30 16:05:32 -07:00
parent 5672cbdf6d
commit 0793de62b2
5 changed files with 15 additions and 15 deletions

View File

@@ -137,7 +137,7 @@ struct gk20a_buddy_allocator {
/*
* This is for when the allocator is managing a GVA space (the
* GPU_BALLOC_GVA_SPACE bit is set in @flags). This requires
* GPU_ALLOC_GVA_SPACE bit is set in @flags). This requires
* that we group like sized allocations into PDE blocks.
*/
u64 pte_blk_order;

View File

@@ -74,7 +74,7 @@ struct gk20a_allocator {
/*
* Allocator flags.
*/
#define GPU_BALLOC_GVA_SPACE 0x1
#define GPU_ALLOC_GVA_SPACE 0x1
static inline void alloc_lock(struct gk20a_allocator *a)
{

View File

@@ -131,7 +131,7 @@ static void __balloc_buddy_list_add(struct gk20a_buddy_allocator *a,
* This lets the code that checks if there are available blocks check
* without cycling through the entire list.
*/
if (a->flags & GPU_BALLOC_GVA_SPACE &&
if (a->flags & GPU_ALLOC_GVA_SPACE &&
b->pte_size == BALLOC_PTE_SIZE_BIG)
list_add_tail(&b->buddy_entry, list);
else
@@ -383,7 +383,7 @@ static int balloc_split_buddy(struct gk20a_buddy_allocator *a,
right->parent = b;
/* PTE considerations. */
if (a->flags & GPU_BALLOC_GVA_SPACE &&
if (a->flags & GPU_ALLOC_GVA_SPACE &&
left->order <= a->pte_blk_order) {
left->pte_size = pte_size;
right->pte_size = pte_size;
@@ -473,7 +473,7 @@ static struct gk20a_buddy *__balloc_find_buddy(struct gk20a_buddy_allocator *a,
list_empty(balloc_get_order_list(a, order)))
return NULL;
if (a->flags & GPU_BALLOC_GVA_SPACE &&
if (a->flags & GPU_ALLOC_GVA_SPACE &&
pte_size == BALLOC_PTE_SIZE_BIG)
bud = list_last_entry(balloc_get_order_list(a, order),
struct gk20a_buddy, buddy_entry);
@@ -823,7 +823,7 @@ static u64 gk20a_buddy_balloc(struct gk20a_allocator *__a, u64 len)
* TODO: once userspace supports a unified address space pass 0 for
* the base. This will make only 'len' affect the PTE size.
*/
if (a->flags & GPU_BALLOC_GVA_SPACE)
if (a->flags & GPU_ALLOC_GVA_SPACE)
pte_size = __get_pte_size(a->vm, a->base, len);
else
pte_size = BALLOC_PTE_SIZE_ANY;
@@ -1112,7 +1112,7 @@ int __gk20a_buddy_allocator_init(struct gk20a_allocator *__a,
return -EINVAL;
/* If this is to manage a GVA space we need a VM. */
if (flags & GPU_BALLOC_GVA_SPACE && !vm)
if (flags & GPU_ALLOC_GVA_SPACE && !vm)
return -EINVAL;
a = kzalloc(sizeof(struct gk20a_buddy_allocator), GFP_KERNEL);
@@ -1139,7 +1139,7 @@ int __gk20a_buddy_allocator_init(struct gk20a_allocator *__a,
}
a->vm = vm;
if (flags & GPU_BALLOC_GVA_SPACE)
if (flags & GPU_ALLOC_GVA_SPACE)
a->pte_blk_order = balloc_get_order(a, vm->big_page_size << 10);
a->flags = flags;

View File

@@ -3548,7 +3548,7 @@ int gk20a_init_vm(struct mm_gk20a *mm,
g->separate_fixed_allocs,
SZ_4K,
GPU_BALLOC_MAX_ORDER,
GPU_BALLOC_GVA_SPACE);
GPU_ALLOC_GVA_SPACE);
if (err)
goto clean_up_ptes;
@@ -3566,7 +3566,7 @@ int gk20a_init_vm(struct mm_gk20a *mm,
small_vma_limit - small_vma_start,
SZ_4K,
GPU_BALLOC_MAX_ORDER,
GPU_BALLOC_GVA_SPACE);
GPU_ALLOC_GVA_SPACE);
if (err)
goto clean_up_ptes;
}
@@ -3581,7 +3581,7 @@ int gk20a_init_vm(struct mm_gk20a *mm,
large_vma_limit - large_vma_start,
big_page_size,
GPU_BALLOC_MAX_ORDER,
GPU_BALLOC_GVA_SPACE);
GPU_ALLOC_GVA_SPACE);
if (err)
goto clean_up_small_allocator;
}
@@ -3597,7 +3597,7 @@ int gk20a_init_vm(struct mm_gk20a *mm,
kernel_vma_limit - kernel_vma_start,
SZ_4K,
GPU_BALLOC_MAX_ORDER,
GPU_BALLOC_GVA_SPACE);
GPU_ALLOC_GVA_SPACE);
if (err)
goto clean_up_big_allocator;

View File

@@ -381,7 +381,7 @@ static int vgpu_vm_alloc_share(struct gk20a_as_share *as_share,
small_vma_limit - small_vma_start,
SZ_4K,
GPU_BALLOC_MAX_ORDER,
GPU_BALLOC_GVA_SPACE);
GPU_ALLOC_GVA_SPACE);
if (err)
goto clean_up_share;
}
@@ -396,7 +396,7 @@ static int vgpu_vm_alloc_share(struct gk20a_as_share *as_share,
large_vma_limit - large_vma_start,
big_page_size,
GPU_BALLOC_MAX_ORDER,
GPU_BALLOC_GVA_SPACE);
GPU_ALLOC_GVA_SPACE);
if (err)
goto clean_up_small_allocator;
}
@@ -412,7 +412,7 @@ static int vgpu_vm_alloc_share(struct gk20a_as_share *as_share,
kernel_vma_limit - kernel_vma_start,
SZ_4K,
GPU_BALLOC_MAX_ORDER,
GPU_BALLOC_GVA_SPACE);
GPU_ALLOC_GVA_SPACE);
if (err)
goto clean_up_big_allocator;