diff --git a/drivers/gpu/nvgpu/common/mm/allocators/nvgpu_allocator.c b/drivers/gpu/nvgpu/common/mm/allocators/nvgpu_allocator.c index 0e2749712..fb5912bf8 100644 --- a/drivers/gpu/nvgpu/common/mm/allocators/nvgpu_allocator.c +++ b/drivers/gpu/nvgpu/common/mm/allocators/nvgpu_allocator.c @@ -1,7 +1,7 @@ /* * gk20a allocator * - * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2011-2019, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -178,3 +178,34 @@ int nvgpu_alloc_common_init(struct nvgpu_allocator *a, struct gk20a *g, return 0; } + +/* + * Initialize requested type of allocator + */ + +int nvgpu_allocator_init(struct gk20a *g, struct nvgpu_allocator *na, + struct vm_gk20a *vm, const char *name, + u64 base, u64 length, u64 blk_size, u64 max_order, + u64 flags, enum nvgpu_allocator_type alloc_type) +{ + int err = -EINVAL; + + switch (alloc_type) { + case BUDDY_ALLOCATOR: + err = nvgpu_buddy_allocator_init(g, na, vm, name, base, length, + blk_size, max_order, flags); + break; + case PAGE_ALLOCATOR: + err = nvgpu_page_allocator_init(g, na, name, base, length, + blk_size, flags); + break; + case BITMAP_ALLOCATOR: + err = nvgpu_bitmap_allocator_init(g, na, name, base, length, + blk_size, flags); + break; + default: + nvgpu_err(g, "Incorrect allocator type, couldn't initialize"); + break; + } + return err; +} diff --git a/drivers/gpu/nvgpu/common/mm/allocators/page_allocator.c b/drivers/gpu/nvgpu/common/mm/allocators/page_allocator.c index dbbab1621..913e3852e 100644 --- a/drivers/gpu/nvgpu/common/mm/allocators/page_allocator.c +++ b/drivers/gpu/nvgpu/common/mm/allocators/page_allocator.c @@ -1065,9 +1065,9 @@ int nvgpu_page_allocator_init(struct gk20a *g, struct nvgpu_allocator *na, (void) snprintf(buddy_name, sizeof(buddy_name), "%s-src", name); - err = nvgpu_buddy_allocator_init(g, &a->source_allocator, NULL, - buddy_name, base, length, blk_size, - 0ULL, 0ULL); + err = nvgpu_allocator_init(g, &a->source_allocator, NULL, buddy_name, + base, length, blk_size, 0ULL, 0ULL, + BUDDY_ALLOCATOR); if (err != 0) { goto fail; } diff --git a/drivers/gpu/nvgpu/common/mm/vidmem.c b/drivers/gpu/nvgpu/common/mm/vidmem.c index 144275fcc..849fd0fb0 100644 --- a/drivers/gpu/nvgpu/common/mm/vidmem.c +++ b/drivers/gpu/nvgpu/common/mm/vidmem.c @@ -339,16 +339,14 @@ int nvgpu_vidmem_init(struct mm_gk20a *mm) * initialization requires vidmem but we want to use the CE to zero * out vidmem before allocating it... */ - err = nvgpu_page_allocator_init(g, &g->mm.vidmem.bootstrap_allocator, - "vidmem-bootstrap", - bootstrap_base, bootstrap_size, - SZ_4K, GPU_ALLOC_FORCE_CONTIG); + err = nvgpu_allocator_init(g, &g->mm.vidmem.bootstrap_allocator, + NULL, "vidmem-bootstrap", bootstrap_base, + bootstrap_size, SZ_4K, 0ULL, + GPU_ALLOC_FORCE_CONTIG, PAGE_ALLOCATOR); - err = nvgpu_page_allocator_init(g, &g->mm.vidmem.allocator, - "vidmem", - base, size - base, - default_page_size, - GPU_ALLOC_4K_VIDMEM_PAGES); + err = nvgpu_allocator_init(g, &g->mm.vidmem.allocator, NULL, + "vidmem", base, size - base, default_page_size, 0ULL, + GPU_ALLOC_4K_VIDMEM_PAGES, PAGE_ALLOCATOR); if (err != 0) { nvgpu_err(g, "Failed to register vidmem for size %zu: %d", size, err); diff --git a/drivers/gpu/nvgpu/common/mm/vm.c b/drivers/gpu/nvgpu/common/mm/vm.c index af721f126..5841fe40c 100644 --- a/drivers/gpu/nvgpu/common/mm/vm.c +++ b/drivers/gpu/nvgpu/common/mm/vm.c @@ -441,14 +441,15 @@ int nvgpu_vm_do_init(struct mm_gk20a *mm, if (user_vma_start < user_vma_limit) { (void) snprintf(alloc_name, sizeof(alloc_name), "gk20a_%s", name); - err = nvgpu_buddy_allocator_init(g, &vm->user, + err = nvgpu_allocator_init(g, &vm->user, vm, alloc_name, user_vma_start, user_vma_limit - user_vma_start, SZ_4K, GPU_BALLOC_MAX_ORDER, - GPU_ALLOC_GVA_SPACE); + GPU_ALLOC_GVA_SPACE, + BUDDY_ALLOCATOR); if (err != 0) { goto clean_up_page_tables; } @@ -468,14 +469,15 @@ int nvgpu_vm_do_init(struct mm_gk20a *mm, if (user_lp_vma_start < user_lp_vma_limit) { (void) snprintf(alloc_name, sizeof(alloc_name), "gk20a_%s_lp", name); - err = nvgpu_buddy_allocator_init(g, &vm->user_lp, + err = nvgpu_allocator_init(g, &vm->user_lp, vm, alloc_name, user_lp_vma_start, user_lp_vma_limit - user_lp_vma_start, vm->big_page_size, GPU_BALLOC_MAX_ORDER, - GPU_ALLOC_GVA_SPACE); + GPU_ALLOC_GVA_SPACE, + BUDDY_ALLOCATOR); if (err != 0) { goto clean_up_allocators; } @@ -485,13 +487,14 @@ int nvgpu_vm_do_init(struct mm_gk20a *mm, * Kernel VMA. Must always exist for an address space. */ (void) snprintf(alloc_name, sizeof(alloc_name), "gk20a_%s-sys", name); - err = nvgpu_buddy_allocator_init(g, &vm->kernel, + err = nvgpu_allocator_init(g, &vm->kernel, vm, alloc_name, kernel_vma_start, kernel_vma_limit - kernel_vma_start, SZ_4K, GPU_BALLOC_MAX_ORDER, - kernel_vma_flags); + kernel_vma_flags, + BUDDY_ALLOCATOR); if (err != 0) { goto clean_up_allocators; } diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_allocator.c b/drivers/gpu/nvgpu/common/pmu/pmu_allocator.c index 7cd88b1ef..a7d2bd4a8 100644 --- a/drivers/gpu/nvgpu/common/pmu/pmu_allocator.c +++ b/drivers/gpu/nvgpu/common/pmu/pmu_allocator.c @@ -42,8 +42,9 @@ void nvgpu_pmu_dmem_allocator_init(struct gk20a *g, ~(PMU_DMEM_ALLOC_ALIGNMENT - 1U); u32 size = end - start; - nvgpu_bitmap_allocator_init(g, dmem, "gk20a_pmu_dmem", - start, size, PMU_DMEM_ALLOC_ALIGNMENT, 0); + nvgpu_allocator_init(g, dmem, NULL, "gk20a_pmu_dmem", start, + size, PMU_DMEM_ALLOC_ALIGNMENT, 0ULL, 0ULL, + BITMAP_ALLOCATOR); } } diff --git a/drivers/gpu/nvgpu/common/sec2/sec2_allocator.c b/drivers/gpu/nvgpu/common/sec2/sec2_allocator.c index 534df3df8..ea1ff1543 100644 --- a/drivers/gpu/nvgpu/common/sec2/sec2_allocator.c +++ b/drivers/gpu/nvgpu/common/sec2/sec2_allocator.c @@ -38,8 +38,9 @@ void nvgpu_sec2_dmem_allocator_init(struct gk20a *g, ~(PMU_DMEM_ALLOC_ALIGNMENT - 1U); u32 size = end - start; - nvgpu_bitmap_allocator_init(g, dmem, "sec2_dmem", - start, size, PMU_DMEM_ALLOC_ALIGNMENT, 0U); + nvgpu_allocator_init(g, dmem, NULL, "sec2_dmem", start, + size, PMU_DMEM_ALLOC_ALIGNMENT, 0ULL, 0ULL, + BITMAP_ALLOCATOR); } } diff --git a/drivers/gpu/nvgpu/include/nvgpu/allocator.h b/drivers/gpu/nvgpu/include/nvgpu/allocator.h index 27c4ee754..6b131b481 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/allocator.h +++ b/drivers/gpu/nvgpu/include/nvgpu/allocator.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2011-2019, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -194,6 +194,12 @@ nvgpu_alloc_carveout_from_co_entry(struct nvgpu_list_node *node) #define GPU_ALLOC_FORCE_CONTIG BIT64(3) #define GPU_ALLOC_NO_SCATTER_GATHER BIT64(4) +enum nvgpu_allocator_type { + BUDDY_ALLOCATOR = 0, + PAGE_ALLOCATOR, + BITMAP_ALLOCATOR +}; + static inline void alloc_lock(struct nvgpu_allocator *a) { nvgpu_mutex_acquire(&a->lock); @@ -226,6 +232,28 @@ int nvgpu_page_allocator_init(struct gk20a *g, struct nvgpu_allocator *na, const char *name, u64 base, u64 length, u64 blk_size, u64 flags); +/* + * Common init function for any type of allocator. + * Returns 0 on success. + * + * @g: GPU pointer + * @na: Pointer to nvgpu_allocator struct + * @vm: VM to be associated with an allocator. Can be NULL. + * Applicable to buddy allocator only. + * @name: Name of the allocator. + * @base: Base address of the allocator. + * @length: Size of the allocator. + * @blk_size: Lowest size of resources that can be allocated. + * @max_order: Max order of resource slices that can be allocated. + * Applicable to buddy allocator only. + * @flags: Additional required flags. + * @alloc_type: Allocator type. + */ +int nvgpu_allocator_init(struct gk20a *g, struct nvgpu_allocator *na, + struct vm_gk20a *vm, const char *name, + u64 base, u64 length, u64 blk_size, u64 max_order, + u64 flags, enum nvgpu_allocator_type alloc_type); + /* * Lockless allocatior initializers. * Note: This allocator can only allocate fixed-size structures of a