gpu: nvgpu: use nvgpu list for buddy allocator

Use nvgpu list APIs instead of linux list APIs
for buddy allocator lists

Jira NVGPU-13

Change-Id: I69a506a9aef77eaa9da0f89609627f4c2f5a7b28
Signed-off-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-on: http://git-master/r/1462079
Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
This commit is contained in:
Deepak Nibade
2017-04-11 19:29:10 +05:30
committed by mobile promotions
parent 71c85c225e
commit a54fee533a
3 changed files with 60 additions and 39 deletions

View File

@@ -117,7 +117,7 @@ static struct nvgpu_buddy *balloc_new_buddy(struct nvgpu_buddy_allocator *a,
static void __balloc_buddy_list_add(struct nvgpu_buddy_allocator *a,
struct nvgpu_buddy *b,
struct list_head *list)
struct nvgpu_list_node *list)
{
if (buddy_is_in_list(b)) {
alloc_dbg(balloc_owner(a),
@@ -133,9 +133,9 @@ static void __balloc_buddy_list_add(struct nvgpu_buddy_allocator *a,
*/
if (a->flags & GPU_ALLOC_GVA_SPACE &&
b->pte_size == gmmu_page_size_big)
list_add_tail(&b->buddy_entry, list);
nvgpu_list_add_tail(&b->buddy_entry, list);
else
list_add(&b->buddy_entry, list);
nvgpu_list_add(&b->buddy_entry, list);
buddy_set_in_list(b);
}
@@ -150,7 +150,7 @@ static void __balloc_buddy_list_rem(struct nvgpu_buddy_allocator *a,
BUG();
}
list_del_init(&b->buddy_entry);
nvgpu_list_del(&b->buddy_entry);
buddy_clr_in_list(b);
}
@@ -208,7 +208,7 @@ static int balloc_init_lists(struct nvgpu_buddy_allocator *a)
/* First make sure the LLs are valid. */
for (i = 0; i < GPU_BALLOC_ORDER_LIST_LEN; i++)
INIT_LIST_HEAD(balloc_get_order_list(a, i));
nvgpu_init_list_node(balloc_get_order_list(a, i));
while (bstart < bend) {
order = __balloc_max_order_in(a, bstart, bend);
@@ -225,9 +225,10 @@ static int balloc_init_lists(struct nvgpu_buddy_allocator *a)
cleanup:
for (i = 0; i < GPU_BALLOC_ORDER_LIST_LEN; i++) {
if (!list_empty(balloc_get_order_list(a, i))) {
buddy = list_first_entry(balloc_get_order_list(a, i),
struct nvgpu_buddy, buddy_entry);
if (!nvgpu_list_empty(balloc_get_order_list(a, i))) {
buddy = nvgpu_list_first_entry(
balloc_get_order_list(a, i),
nvgpu_buddy, buddy_entry);
balloc_blist_rem(a, buddy);
nvgpu_kmem_cache_free(a->buddy_cache, buddy);
}
@@ -278,9 +279,10 @@ static void nvgpu_buddy_allocator_destroy(struct nvgpu_allocator *__a)
for (i = 0; i < GPU_BALLOC_ORDER_LIST_LEN; i++) {
BUG_ON(a->buddy_list_alloced[i] != 0);
while (!list_empty(balloc_get_order_list(a, i))) {
bud = list_first_entry(balloc_get_order_list(a, i),
struct nvgpu_buddy, buddy_entry);
while (!nvgpu_list_empty(balloc_get_order_list(a, i))) {
bud = nvgpu_list_first_entry(
balloc_get_order_list(a, i),
nvgpu_buddy, buddy_entry);
balloc_blist_rem(a, bud);
nvgpu_kmem_cache_free(a->buddy_cache, bud);
}
@@ -471,16 +473,16 @@ static struct nvgpu_buddy *__balloc_find_buddy(struct nvgpu_buddy_allocator *a,
struct nvgpu_buddy *bud;
if (order > a->max_order ||
list_empty(balloc_get_order_list(a, order)))
nvgpu_list_empty(balloc_get_order_list(a, order)))
return NULL;
if (a->flags & GPU_ALLOC_GVA_SPACE &&
pte_size == gmmu_page_size_big)
bud = list_last_entry(balloc_get_order_list(a, order),
struct nvgpu_buddy, buddy_entry);
bud = nvgpu_list_last_entry(balloc_get_order_list(a, order),
nvgpu_buddy, buddy_entry);
else
bud = list_first_entry(balloc_get_order_list(a, order),
struct nvgpu_buddy, buddy_entry);
bud = nvgpu_list_first_entry(balloc_get_order_list(a, order),
nvgpu_buddy, buddy_entry);
if (pte_size != BALLOC_PTE_SIZE_ANY &&
pte_size != bud->pte_size &&
@@ -645,7 +647,7 @@ static struct nvgpu_buddy *__balloc_make_fixed_buddy(
struct nvgpu_buddy_allocator *a, u64 base, u64 order, int pte_size)
{
struct nvgpu_buddy *bud = NULL;
struct list_head *order_list;
struct nvgpu_list_node *order_list;
u64 cur_order = order, cur_base = base;
/*
@@ -661,7 +663,8 @@ static struct nvgpu_buddy *__balloc_make_fixed_buddy(
int found = 0;
order_list = balloc_get_order_list(a, cur_order);
list_for_each_entry(bud, order_list, buddy_entry) {
nvgpu_list_for_each_entry(bud, order_list,
nvgpu_buddy, buddy_entry) {
if (bud->start == cur_base) {
/*
* Make sure page size matches if it's smaller
@@ -774,10 +777,10 @@ static u64 __balloc_do_alloc_fixed(struct nvgpu_buddy_allocator *a,
return base;
err_and_cleanup:
while (!list_empty(&falloc->buddies)) {
struct nvgpu_buddy *bud = list_first_entry(&falloc->buddies,
struct nvgpu_buddy,
buddy_entry);
while (!nvgpu_list_empty(&falloc->buddies)) {
struct nvgpu_buddy *bud = nvgpu_list_first_entry(
&falloc->buddies,
nvgpu_buddy, buddy_entry);
__balloc_buddy_list_rem(a, bud);
balloc_free_buddy(a, bud->start);
@@ -792,9 +795,9 @@ static void __balloc_do_free_fixed(struct nvgpu_buddy_allocator *a,
{
struct nvgpu_buddy *bud;
while (!list_empty(&falloc->buddies)) {
bud = list_first_entry(&falloc->buddies,
struct nvgpu_buddy,
while (!nvgpu_list_empty(&falloc->buddies)) {
bud = nvgpu_list_first_entry(&falloc->buddies,
nvgpu_buddy,
buddy_entry);
__balloc_buddy_list_rem(a, bud);
@@ -896,7 +899,7 @@ static u64 __nvgpu_balloc_fixed_buddy(struct nvgpu_allocator *__a,
if (!falloc)
goto fail;
INIT_LIST_HEAD(&falloc->buddies);
nvgpu_init_list_node(&falloc->buddies);
falloc->start = base;
falloc->end = base + len;
@@ -1018,7 +1021,8 @@ static bool nvgpu_buddy_reserve_is_possible(struct nvgpu_buddy_allocator *a,
* Not the fastest approach but we should not have that many carveouts
* for any reasonable allocator.
*/
list_for_each_entry(tmp, &a->co_list, co_entry) {
nvgpu_list_for_each_entry(tmp, &a->co_list,
nvgpu_alloc_carveout, co_entry) {
if ((co_base >= tmp->base &&
co_base < (tmp->base + tmp->length)) ||
(co_end >= tmp->base &&
@@ -1059,7 +1063,7 @@ static int nvgpu_buddy_reserve_co(struct nvgpu_allocator *__a,
goto done;
}
list_add(&co->co_entry, &a->co_list);
nvgpu_list_add(&co->co_entry, &a->co_list);
done:
alloc_unlock(__a);
@@ -1074,7 +1078,7 @@ static void nvgpu_buddy_release_co(struct nvgpu_allocator *__a,
{
alloc_lock(__a);
list_del_init(&co->co_entry);
nvgpu_list_del(&co->co_entry);
nvgpu_free(__a, co->base);
alloc_unlock(__a);
@@ -1149,10 +1153,11 @@ static void nvgpu_buddy_print_stats(struct nvgpu_allocator *__a,
if (lock)
alloc_lock(__a);
if (!list_empty(&a->co_list)) {
if (!nvgpu_list_empty(&a->co_list)) {
__alloc_pstat(s, __a, "\n");
__alloc_pstat(s, __a, "Carveouts:\n");
list_for_each_entry(tmp, &a->co_list, co_entry)
nvgpu_list_for_each_entry(tmp, &a->co_list,
nvgpu_alloc_carveout, co_entry)
__alloc_pstat(s, __a,
" CO %2d: %-20s 0x%010llx + 0x%llx\n",
i++, tmp->name, tmp->base, tmp->length);
@@ -1313,7 +1318,7 @@ int __nvgpu_buddy_allocator_init(struct gk20a *g, struct nvgpu_allocator *__a,
a->alloced_buddies = RB_ROOT;
a->fixed_allocs = RB_ROOT;
INIT_LIST_HEAD(&a->co_list);
nvgpu_init_list_node(&a->co_list);
err = balloc_init_lists(a);
if (err)
goto fail;

View File

@@ -17,9 +17,10 @@
#ifndef BUDDY_ALLOCATOR_PRIV_H
#define BUDDY_ALLOCATOR_PRIV_H
#include <linux/list.h>
#include <linux/rbtree.h>
#include <nvgpu/list.h>
struct nvgpu_kmem_cache;
struct nvgpu_allocator;
struct vm_gk20a;
@@ -33,7 +34,7 @@ struct nvgpu_buddy {
struct nvgpu_buddy *left; /* Lower address sub-node. */
struct nvgpu_buddy *right; /* Higher address sub-node. */
struct list_head buddy_entry; /* List entry for various lists. */
struct nvgpu_list_node buddy_entry; /* List entry for various lists. */
struct rb_node alloced_entry; /* RB tree of allocations. */
u64 start; /* Start address of this buddy. */
@@ -54,6 +55,13 @@ struct nvgpu_buddy {
int pte_size;
};
static inline struct nvgpu_buddy *
nvgpu_buddy_from_buddy_entry(struct nvgpu_list_node *node)
{
return (struct nvgpu_buddy *)
((uintptr_t)node - offsetof(struct nvgpu_buddy, buddy_entry));
};
#define __buddy_flag_ops(flag, flag_up) \
static inline int buddy_is_ ## flag(struct nvgpu_buddy *b) \
{ \
@@ -89,7 +97,7 @@ __buddy_flag_ops(in_list, IN_LIST);
* Keeps info for a fixed allocation.
*/
struct nvgpu_fixed_alloc {
struct list_head buddies; /* List of buddies. */
struct nvgpu_list_node buddies; /* List of buddies. */
struct rb_node alloced_entry; /* RB tree of fixed allocations. */
u64 start; /* Start of fixed block. */
@@ -125,7 +133,7 @@ struct nvgpu_buddy_allocator {
struct rb_root alloced_buddies; /* Outstanding allocations. */
struct rb_root fixed_allocs; /* Outstanding fixed allocations. */
struct list_head co_list;
struct nvgpu_list_node co_list;
struct nvgpu_kmem_cache *buddy_cache;
@@ -134,7 +142,7 @@ struct nvgpu_buddy_allocator {
*/
#define GPU_BALLOC_ORDER_LIST_LEN (GPU_BALLOC_MAX_ORDER + 1)
struct list_head buddy_list[GPU_BALLOC_ORDER_LIST_LEN];
struct nvgpu_list_node buddy_list[GPU_BALLOC_ORDER_LIST_LEN];
u64 buddy_list_len[GPU_BALLOC_ORDER_LIST_LEN];
u64 buddy_list_split[GPU_BALLOC_ORDER_LIST_LEN];
u64 buddy_list_alloced[GPU_BALLOC_ORDER_LIST_LEN];
@@ -162,7 +170,7 @@ static inline struct nvgpu_buddy_allocator *buddy_allocator(
return (struct nvgpu_buddy_allocator *)(a)->priv;
}
static inline struct list_head *balloc_get_order_list(
static inline struct nvgpu_list_node *balloc_get_order_list(
struct nvgpu_buddy_allocator *a, int order)
{
return &a->buddy_list[order];

View File

@@ -22,6 +22,7 @@
#include <linux/platform_device.h>
#include <nvgpu/lock.h>
#include <nvgpu/list.h>
/* #define ALLOCATOR_DEBUG */
@@ -101,7 +102,14 @@ struct nvgpu_alloc_carveout {
/*
* For usage by the allocator implementation.
*/
struct list_head co_entry;
struct nvgpu_list_node co_entry;
};
static inline struct nvgpu_alloc_carveout *
nvgpu_alloc_carveout_from_co_entry(struct nvgpu_list_node *node)
{
return (struct nvgpu_alloc_carveout *)
((uintptr_t)node - offsetof(struct nvgpu_alloc_carveout, co_entry));
};
#define NVGPU_CARVEOUT(__name, __base, __length) \