gpu: nvgpu: Move kmem_caches to allocator

Instead of using a single static kmem_cache for each type of
data structure the allocators may want to allocate each
allocator now has its own instance of the kmem_cache. This is
done so that each GPU driver instance can accurately track how
much memory it is using.

In order to support this on older kernels a new NVGPU API has
been made,

  nvgpu_kmem_cache_create(struct gk20a *g, size_t size)

To handle the possibility that caches cannot be created with
the same name.

This patch also fixes numerous places where kfree() was wrongly
used to free kmem_cache allocs.

Bug 1799159
Bug 1823380

Change-Id: Id674f9a5445fde3f95db65ad6bf3ea990444603d
Signed-off-by: Alex Waterman <alexw@nvidia.com>
Reviewed-on: http://git-master/r/1283826
Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com>
Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
GVS: Gerrit_Virtual_Submit
This commit is contained in:
Alex Waterman
2016-10-04 14:04:01 -07:00
committed by mobile promotions
parent 24e8ee192a
commit cf0ef133e6
9 changed files with 211 additions and 68 deletions

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -22,9 +22,6 @@
#include "bitmap_allocator_priv.h"
static struct kmem_cache *meta_data_cache; /* slab cache for meta data. */
static DEFINE_MUTEX(meta_data_cache_lock);
static u64 nvgpu_bitmap_alloc_length(struct nvgpu_allocator *a)
{
struct nvgpu_bitmap_allocator *ba = a->priv;
@@ -195,7 +192,7 @@ static int __nvgpu_bitmap_store_alloc(struct nvgpu_bitmap_allocator *a,
u64 addr, u64 len)
{
struct nvgpu_bitmap_alloc *alloc =
kmem_cache_alloc(meta_data_cache, GFP_KERNEL);
nvgpu_kmem_cache_alloc(a->meta_data_cache);
if (!alloc)
return -ENOMEM;
@@ -312,7 +309,8 @@ static void nvgpu_bitmap_free(struct nvgpu_allocator *__a, u64 addr)
a->bytes_freed += alloc->length;
done:
kfree(alloc);
if (a->meta_data_cache && alloc)
nvgpu_kmem_cache_free(a->meta_data_cache, alloc);
alloc_unlock(__a);
}
@@ -330,9 +328,10 @@ static void nvgpu_bitmap_alloc_destroy(struct nvgpu_allocator *__a)
alloc_entry);
rb_erase(node, &a->allocs);
kfree(alloc);
nvgpu_kmem_cache_free(a->meta_data_cache, alloc);
}
nvgpu_kmem_cache_destroy(a->meta_data_cache);
kfree(a->bitmap);
kfree(a);
}
@@ -382,14 +381,6 @@ int nvgpu_bitmap_allocator_init(struct gk20a *g, struct nvgpu_allocator *__a,
int err;
struct nvgpu_bitmap_allocator *a;
mutex_lock(&meta_data_cache_lock);
if (!meta_data_cache)
meta_data_cache = KMEM_CACHE(nvgpu_bitmap_alloc, 0);
mutex_unlock(&meta_data_cache_lock);
if (!meta_data_cache)
return -ENOMEM;
if (WARN_ON(blk_size & (blk_size - 1)))
return -EINVAL;
@@ -414,6 +405,15 @@ int nvgpu_bitmap_allocator_init(struct gk20a *g, struct nvgpu_allocator *__a,
if (err)
goto fail;
if (!(flags & GPU_ALLOC_NO_ALLOC_PAGE)) {
a->meta_data_cache = nvgpu_kmem_cache_create(g,
sizeof(struct nvgpu_bitmap_alloc));
if (!a->meta_data_cache) {
err = -ENOMEM;
goto fail;
}
}
a->base = base;
a->length = length;
a->blk_size = blk_size;
@@ -424,8 +424,10 @@ int nvgpu_bitmap_allocator_init(struct gk20a *g, struct nvgpu_allocator *__a,
a->bitmap = kcalloc(BITS_TO_LONGS(a->num_bits), sizeof(*a->bitmap),
GFP_KERNEL);
if (!a->bitmap)
if (!a->bitmap) {
err = -ENOMEM;
goto fail;
}
wmb();
a->inited = true;
@@ -441,6 +443,8 @@ int nvgpu_bitmap_allocator_init(struct gk20a *g, struct nvgpu_allocator *__a,
return 0;
fail:
if (a->meta_data_cache)
nvgpu_kmem_cache_destroy(a->meta_data_cache);
kfree(a);
return err;
}