gpu: nvgpu: re-architect nvgpu allocator functions

Currently, buddy, page and bitmap allocators have individual init()
functions. This patch creates common nvgpu_alloc_allocator_init()
function to trigger the individual functions based on allocator type
argument. This makes writing requirements for the allocators easier.

Jira NVGPU-991

Change-Id: If94e3496f46f036460ef9f1831852e6fc19d3a0b
Signed-off-by: Vedashree Vidwans <vvidwans@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2097962
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Vedashree Vidwans
2019-04-15 09:32:35 -07:00
committed by mobile promotions
parent bb61fc110b
commit 8d325e9db1
7 changed files with 86 additions and 24 deletions

View File

@@ -1,7 +1,7 @@
/*
* gk20a allocator
*
* Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2011-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -178,3 +178,34 @@ int nvgpu_alloc_common_init(struct nvgpu_allocator *a, struct gk20a *g,
return 0;
}
/*
* Initialize requested type of allocator
*/
int nvgpu_allocator_init(struct gk20a *g, struct nvgpu_allocator *na,
struct vm_gk20a *vm, const char *name,
u64 base, u64 length, u64 blk_size, u64 max_order,
u64 flags, enum nvgpu_allocator_type alloc_type)
{
int err = -EINVAL;
switch (alloc_type) {
case BUDDY_ALLOCATOR:
err = nvgpu_buddy_allocator_init(g, na, vm, name, base, length,
blk_size, max_order, flags);
break;
case PAGE_ALLOCATOR:
err = nvgpu_page_allocator_init(g, na, name, base, length,
blk_size, flags);
break;
case BITMAP_ALLOCATOR:
err = nvgpu_bitmap_allocator_init(g, na, name, base, length,
blk_size, flags);
break;
default:
nvgpu_err(g, "Incorrect allocator type, couldn't initialize");
break;
}
return err;
}

View File

@@ -1065,9 +1065,9 @@ int nvgpu_page_allocator_init(struct gk20a *g, struct nvgpu_allocator *na,
(void) snprintf(buddy_name, sizeof(buddy_name), "%s-src", name);
err = nvgpu_buddy_allocator_init(g, &a->source_allocator, NULL,
buddy_name, base, length, blk_size,
0ULL, 0ULL);
err = nvgpu_allocator_init(g, &a->source_allocator, NULL, buddy_name,
base, length, blk_size, 0ULL, 0ULL,
BUDDY_ALLOCATOR);
if (err != 0) {
goto fail;
}

View File

@@ -339,16 +339,14 @@ int nvgpu_vidmem_init(struct mm_gk20a *mm)
* initialization requires vidmem but we want to use the CE to zero
* out vidmem before allocating it...
*/
err = nvgpu_page_allocator_init(g, &g->mm.vidmem.bootstrap_allocator,
"vidmem-bootstrap",
bootstrap_base, bootstrap_size,
SZ_4K, GPU_ALLOC_FORCE_CONTIG);
err = nvgpu_allocator_init(g, &g->mm.vidmem.bootstrap_allocator,
NULL, "vidmem-bootstrap", bootstrap_base,
bootstrap_size, SZ_4K, 0ULL,
GPU_ALLOC_FORCE_CONTIG, PAGE_ALLOCATOR);
err = nvgpu_page_allocator_init(g, &g->mm.vidmem.allocator,
"vidmem",
base, size - base,
default_page_size,
GPU_ALLOC_4K_VIDMEM_PAGES);
err = nvgpu_allocator_init(g, &g->mm.vidmem.allocator, NULL,
"vidmem", base, size - base, default_page_size, 0ULL,
GPU_ALLOC_4K_VIDMEM_PAGES, PAGE_ALLOCATOR);
if (err != 0) {
nvgpu_err(g, "Failed to register vidmem for size %zu: %d",
size, err);

View File

@@ -441,14 +441,15 @@ int nvgpu_vm_do_init(struct mm_gk20a *mm,
if (user_vma_start < user_vma_limit) {
(void) snprintf(alloc_name, sizeof(alloc_name), "gk20a_%s",
name);
err = nvgpu_buddy_allocator_init(g, &vm->user,
err = nvgpu_allocator_init(g, &vm->user,
vm, alloc_name,
user_vma_start,
user_vma_limit -
user_vma_start,
SZ_4K,
GPU_BALLOC_MAX_ORDER,
GPU_ALLOC_GVA_SPACE);
GPU_ALLOC_GVA_SPACE,
BUDDY_ALLOCATOR);
if (err != 0) {
goto clean_up_page_tables;
}
@@ -468,14 +469,15 @@ int nvgpu_vm_do_init(struct mm_gk20a *mm,
if (user_lp_vma_start < user_lp_vma_limit) {
(void) snprintf(alloc_name, sizeof(alloc_name), "gk20a_%s_lp",
name);
err = nvgpu_buddy_allocator_init(g, &vm->user_lp,
err = nvgpu_allocator_init(g, &vm->user_lp,
vm, alloc_name,
user_lp_vma_start,
user_lp_vma_limit -
user_lp_vma_start,
vm->big_page_size,
GPU_BALLOC_MAX_ORDER,
GPU_ALLOC_GVA_SPACE);
GPU_ALLOC_GVA_SPACE,
BUDDY_ALLOCATOR);
if (err != 0) {
goto clean_up_allocators;
}
@@ -485,13 +487,14 @@ int nvgpu_vm_do_init(struct mm_gk20a *mm,
* Kernel VMA. Must always exist for an address space.
*/
(void) snprintf(alloc_name, sizeof(alloc_name), "gk20a_%s-sys", name);
err = nvgpu_buddy_allocator_init(g, &vm->kernel,
err = nvgpu_allocator_init(g, &vm->kernel,
vm, alloc_name,
kernel_vma_start,
kernel_vma_limit - kernel_vma_start,
SZ_4K,
GPU_BALLOC_MAX_ORDER,
kernel_vma_flags);
kernel_vma_flags,
BUDDY_ALLOCATOR);
if (err != 0) {
goto clean_up_allocators;
}

View File

@@ -42,8 +42,9 @@ void nvgpu_pmu_dmem_allocator_init(struct gk20a *g,
~(PMU_DMEM_ALLOC_ALIGNMENT - 1U);
u32 size = end - start;
nvgpu_bitmap_allocator_init(g, dmem, "gk20a_pmu_dmem",
start, size, PMU_DMEM_ALLOC_ALIGNMENT, 0);
nvgpu_allocator_init(g, dmem, NULL, "gk20a_pmu_dmem", start,
size, PMU_DMEM_ALLOC_ALIGNMENT, 0ULL, 0ULL,
BITMAP_ALLOCATOR);
}
}

View File

@@ -38,8 +38,9 @@ void nvgpu_sec2_dmem_allocator_init(struct gk20a *g,
~(PMU_DMEM_ALLOC_ALIGNMENT - 1U);
u32 size = end - start;
nvgpu_bitmap_allocator_init(g, dmem, "sec2_dmem",
start, size, PMU_DMEM_ALLOC_ALIGNMENT, 0U);
nvgpu_allocator_init(g, dmem, NULL, "sec2_dmem", start,
size, PMU_DMEM_ALLOC_ALIGNMENT, 0ULL, 0ULL,
BITMAP_ALLOCATOR);
}
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2011-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -194,6 +194,12 @@ nvgpu_alloc_carveout_from_co_entry(struct nvgpu_list_node *node)
#define GPU_ALLOC_FORCE_CONTIG BIT64(3)
#define GPU_ALLOC_NO_SCATTER_GATHER BIT64(4)
enum nvgpu_allocator_type {
BUDDY_ALLOCATOR = 0,
PAGE_ALLOCATOR,
BITMAP_ALLOCATOR
};
static inline void alloc_lock(struct nvgpu_allocator *a)
{
nvgpu_mutex_acquire(&a->lock);
@@ -226,6 +232,28 @@ int nvgpu_page_allocator_init(struct gk20a *g, struct nvgpu_allocator *na,
const char *name, u64 base, u64 length,
u64 blk_size, u64 flags);
/*
* Common init function for any type of allocator.
* Returns 0 on success.
*
* @g: GPU pointer
* @na: Pointer to nvgpu_allocator struct
* @vm: VM to be associated with an allocator. Can be NULL.
* Applicable to buddy allocator only.
* @name: Name of the allocator.
* @base: Base address of the allocator.
* @length: Size of the allocator.
* @blk_size: Lowest size of resources that can be allocated.
* @max_order: Max order of resource slices that can be allocated.
* Applicable to buddy allocator only.
* @flags: Additional required flags.
* @alloc_type: Allocator type.
*/
int nvgpu_allocator_init(struct gk20a *g, struct nvgpu_allocator *na,
struct vm_gk20a *vm, const char *name,
u64 base, u64 length, u64 blk_size, u64 max_order,
u64 flags, enum nvgpu_allocator_type alloc_type);
/*
* Lockless allocatior initializers.
* Note: This allocator can only allocate fixed-size structures of a