mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 17:36:20 +03:00
Add an optimization to the bitmap allocator for handling sequences of allocations. A common pattern of allocs from the priv_cmdbuf is to do many allocs and then many frees. In such cases it makes sense to store the last allocation offset and start searching for the next alloc from there. For such a pattern we know that the previous bits are already allocated so it doesn't make sense to search them unless we have to. Obviously, if there's no space found ahead of the precious alloc's block then we fall back to the remaining space. In random allocation patterns this optimization should not have any negative affects. It merely shifts the start point for searching for allocs but assuming each bit has an equal probability of being free the start location does not matter. Bug 1799159 Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: http://git-master/r/1205958 (cherry picked from commit 759c583962d6d57cb8cb073ccdbfcfc6db4c1e18) Change-Id: I267ef6fa155ff15d6ebfc76dc1abafd9aa1f44df Reviewed-on: http://git-master/r/1227923 GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
71 lines
2.0 KiB
C
71 lines
2.0 KiB
C
/*
|
|
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify it
|
|
* under the terms and conditions of the GNU General Public License,
|
|
* version 2, as published by the Free Software Foundation.
|
|
*
|
|
* This program is distributed in the hope it will be useful, but WITHOUT
|
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
* more details.
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
*/
|
|
|
|
#ifndef BITMAP_ALLOCATOR_PRIV_H
|
|
#define BITMAP_ALLOCATOR_PRIV_H
|
|
|
|
#include <linux/rbtree.h>
|
|
|
|
struct gk20a_allocator;
|
|
|
|
struct gk20a_bitmap_allocator {
|
|
struct gk20a_allocator *owner;
|
|
|
|
u64 base; /* Base address of the space. */
|
|
u64 length; /* Length of the space. */
|
|
u64 blk_size; /* Size that corresponds to 1 bit. */
|
|
u64 blk_shift; /* Bit shift to divide by blk_size. */
|
|
u64 num_bits; /* Number of allocatable bits. */
|
|
u64 bit_offs; /* Offset of bitmap. */
|
|
|
|
/*
|
|
* Optimization for making repeated allocations faster. Keep track of
|
|
* the next bit after the most recent allocation. This is where the next
|
|
* search will start from. This should make allocation faster in cases
|
|
* where lots of allocations get made one after another. It shouldn't
|
|
* have a negative impact on the case where the allocator is fragmented.
|
|
*/
|
|
u64 next_blk;
|
|
|
|
unsigned long *bitmap; /* The actual bitmap! */
|
|
struct rb_root allocs; /* Tree of outstanding allocations. */
|
|
|
|
u64 flags;
|
|
|
|
bool inited;
|
|
|
|
/* Statistics */
|
|
u64 nr_allocs;
|
|
u64 nr_fixed_allocs;
|
|
u64 bytes_alloced;
|
|
u64 bytes_freed;
|
|
};
|
|
|
|
struct gk20a_bitmap_alloc {
|
|
u64 base;
|
|
u64 length;
|
|
struct rb_node alloc_entry; /* RB tree of allocations. */
|
|
};
|
|
|
|
static inline struct gk20a_bitmap_allocator *bitmap_allocator(
|
|
struct gk20a_allocator *a)
|
|
{
|
|
return (struct gk20a_bitmap_allocator *)(a)->priv;
|
|
}
|
|
|
|
|
|
#endif
|