mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 17:36:20 +03:00
gpu: nvgpu: use nvgpu list for page chunks
Use nvgpu list APIs instead of linux list APIs to store chunks of page allocator Jira NVGPU-13 Change-Id: I63375fc2df683e018c48a90b76eca368438cc32f Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: http://git-master/r/1326814 Reviewed-by: Konsta Holtta <kholtta@nvidia.com> Reviewed-by: Automatic_Commit_Validation_User Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
1e355ca52e
commit
0d8830394a
@@ -135,11 +135,11 @@ static void __nvgpu_free_pages(struct nvgpu_page_allocator *a,
|
|||||||
{
|
{
|
||||||
struct page_alloc_chunk *chunk;
|
struct page_alloc_chunk *chunk;
|
||||||
|
|
||||||
while (!list_empty(&alloc->alloc_chunks)) {
|
while (!nvgpu_list_empty(&alloc->alloc_chunks)) {
|
||||||
chunk = list_first_entry(&alloc->alloc_chunks,
|
chunk = nvgpu_list_first_entry(&alloc->alloc_chunks,
|
||||||
struct page_alloc_chunk,
|
page_alloc_chunk,
|
||||||
list_entry);
|
list_entry);
|
||||||
list_del(&chunk->list_entry);
|
nvgpu_list_del(&chunk->list_entry);
|
||||||
|
|
||||||
if (free_buddy_alloc)
|
if (free_buddy_alloc)
|
||||||
nvgpu_free(&a->source_allocator, chunk->base);
|
nvgpu_free(&a->source_allocator, chunk->base);
|
||||||
@@ -322,8 +322,8 @@ static int __do_slab_alloc(struct nvgpu_page_allocator *a,
|
|||||||
alloc->length = slab_page->slab_size;
|
alloc->length = slab_page->slab_size;
|
||||||
alloc->base = slab_page->page_addr + (offs * slab_page->slab_size);
|
alloc->base = slab_page->page_addr + (offs * slab_page->slab_size);
|
||||||
|
|
||||||
chunk = list_first_entry(&alloc->alloc_chunks,
|
chunk = nvgpu_list_first_entry(&alloc->alloc_chunks,
|
||||||
struct page_alloc_chunk, list_entry);
|
page_alloc_chunk, list_entry);
|
||||||
chunk->base = alloc->base;
|
chunk->base = alloc->base;
|
||||||
chunk->length = alloc->length;
|
chunk->length = alloc->length;
|
||||||
|
|
||||||
@@ -359,8 +359,8 @@ static struct nvgpu_page_alloc *__nvgpu_alloc_slab(
|
|||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
INIT_LIST_HEAD(&alloc->alloc_chunks);
|
nvgpu_init_list_node(&alloc->alloc_chunks);
|
||||||
list_add(&chunk->list_entry, &alloc->alloc_chunks);
|
nvgpu_list_add(&chunk->list_entry, &alloc->alloc_chunks);
|
||||||
|
|
||||||
err = __do_slab_alloc(a, slab, alloc);
|
err = __do_slab_alloc(a, slab, alloc);
|
||||||
if (err)
|
if (err)
|
||||||
@@ -448,7 +448,7 @@ static struct nvgpu_page_alloc *__do_nvgpu_alloc_pages(
|
|||||||
|
|
||||||
memset(alloc, 0, sizeof(*alloc));
|
memset(alloc, 0, sizeof(*alloc));
|
||||||
|
|
||||||
INIT_LIST_HEAD(&alloc->alloc_chunks);
|
nvgpu_init_list_node(&alloc->alloc_chunks);
|
||||||
alloc->length = pages << a->page_shift;
|
alloc->length = pages << a->page_shift;
|
||||||
|
|
||||||
while (pages) {
|
while (pages) {
|
||||||
@@ -504,23 +504,23 @@ static struct nvgpu_page_alloc *__do_nvgpu_alloc_pages(
|
|||||||
|
|
||||||
c->base = chunk_addr;
|
c->base = chunk_addr;
|
||||||
c->length = chunk_len;
|
c->length = chunk_len;
|
||||||
list_add(&c->list_entry, &alloc->alloc_chunks);
|
nvgpu_list_add(&c->list_entry, &alloc->alloc_chunks);
|
||||||
|
|
||||||
i++;
|
i++;
|
||||||
}
|
}
|
||||||
|
|
||||||
alloc->nr_chunks = i;
|
alloc->nr_chunks = i;
|
||||||
c = list_first_entry(&alloc->alloc_chunks,
|
c = nvgpu_list_first_entry(&alloc->alloc_chunks,
|
||||||
struct page_alloc_chunk, list_entry);
|
page_alloc_chunk, list_entry);
|
||||||
alloc->base = c->base;
|
alloc->base = c->base;
|
||||||
|
|
||||||
return alloc;
|
return alloc;
|
||||||
|
|
||||||
fail_cleanup:
|
fail_cleanup:
|
||||||
while (!list_empty(&alloc->alloc_chunks)) {
|
while (!nvgpu_list_empty(&alloc->alloc_chunks)) {
|
||||||
c = list_first_entry(&alloc->alloc_chunks,
|
c = nvgpu_list_first_entry(&alloc->alloc_chunks,
|
||||||
struct page_alloc_chunk, list_entry);
|
page_alloc_chunk, list_entry);
|
||||||
list_del(&c->list_entry);
|
nvgpu_list_del(&c->list_entry);
|
||||||
nvgpu_free(&a->source_allocator, c->base);
|
nvgpu_free(&a->source_allocator, c->base);
|
||||||
nvgpu_kmem_cache_free(a->chunk_cache, c);
|
nvgpu_kmem_cache_free(a->chunk_cache, c);
|
||||||
}
|
}
|
||||||
@@ -548,7 +548,8 @@ static struct nvgpu_page_alloc *__nvgpu_alloc_pages(
|
|||||||
|
|
||||||
palloc_dbg(a, "Alloc 0x%llx (%llu) id=0x%010llx\n",
|
palloc_dbg(a, "Alloc 0x%llx (%llu) id=0x%010llx\n",
|
||||||
pages << a->page_shift, pages, alloc->base);
|
pages << a->page_shift, pages, alloc->base);
|
||||||
list_for_each_entry(c, &alloc->alloc_chunks, list_entry) {
|
nvgpu_list_for_each_entry(c, &alloc->alloc_chunks,
|
||||||
|
page_alloc_chunk, list_entry) {
|
||||||
palloc_dbg(a, " Chunk %2d: 0x%010llx + 0x%llx\n",
|
palloc_dbg(a, " Chunk %2d: 0x%010llx + 0x%llx\n",
|
||||||
i++, c->base, c->length);
|
i++, c->base, c->length);
|
||||||
}
|
}
|
||||||
@@ -664,11 +665,11 @@ static struct nvgpu_page_alloc *__nvgpu_alloc_pages_fixed(
|
|||||||
|
|
||||||
alloc->nr_chunks = 1;
|
alloc->nr_chunks = 1;
|
||||||
alloc->length = length;
|
alloc->length = length;
|
||||||
INIT_LIST_HEAD(&alloc->alloc_chunks);
|
nvgpu_init_list_node(&alloc->alloc_chunks);
|
||||||
|
|
||||||
c->base = alloc->base;
|
c->base = alloc->base;
|
||||||
c->length = length;
|
c->length = length;
|
||||||
list_add(&c->list_entry, &alloc->alloc_chunks);
|
nvgpu_list_add(&c->list_entry, &alloc->alloc_chunks);
|
||||||
|
|
||||||
return alloc;
|
return alloc;
|
||||||
|
|
||||||
@@ -708,7 +709,8 @@ static u64 nvgpu_page_alloc_fixed(struct nvgpu_allocator *__a,
|
|||||||
|
|
||||||
palloc_dbg(a, "Alloc [fixed] @ 0x%010llx + 0x%llx (%llu)\n",
|
palloc_dbg(a, "Alloc [fixed] @ 0x%010llx + 0x%llx (%llu)\n",
|
||||||
alloc->base, aligned_len, pages);
|
alloc->base, aligned_len, pages);
|
||||||
list_for_each_entry(c, &alloc->alloc_chunks, list_entry) {
|
nvgpu_list_for_each_entry(c, &alloc->alloc_chunks,
|
||||||
|
page_alloc_chunk, list_entry) {
|
||||||
palloc_dbg(a, " Chunk %2d: 0x%010llx + 0x%llx\n",
|
palloc_dbg(a, " Chunk %2d: 0x%010llx + 0x%llx\n",
|
||||||
i++, c->base, c->length);
|
i++, c->base, c->length);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -88,7 +88,8 @@ void nvgpu_pramin_access_batched(struct gk20a *g, struct mem_desc *mem,
|
|||||||
u32 byteoff, start_reg, until_end, n;
|
u32 byteoff, start_reg, until_end, n;
|
||||||
|
|
||||||
alloc = get_vidmem_page_alloc(mem->sgt->sgl);
|
alloc = get_vidmem_page_alloc(mem->sgt->sgl);
|
||||||
list_for_each_entry(chunk, &alloc->alloc_chunks, list_entry) {
|
nvgpu_list_for_each_entry(chunk, &alloc->alloc_chunks,
|
||||||
|
page_alloc_chunk, list_entry) {
|
||||||
if (offset >= chunk->length)
|
if (offset >= chunk->length)
|
||||||
offset -= chunk->length;
|
offset -= chunk->length;
|
||||||
else
|
else
|
||||||
@@ -113,7 +114,8 @@ void nvgpu_pramin_access_batched(struct gk20a *g, struct mem_desc *mem,
|
|||||||
size -= n;
|
size -= n;
|
||||||
|
|
||||||
if (n == (chunk->length - offset)) {
|
if (n == (chunk->length - offset)) {
|
||||||
chunk = list_next_entry(chunk, list_entry);
|
chunk = nvgpu_list_next_entry(chunk, page_alloc_chunk,
|
||||||
|
list_entry);
|
||||||
offset = 0;
|
offset = 0;
|
||||||
} else {
|
} else {
|
||||||
offset += n / sizeof(u32);
|
offset += n / sizeof(u32);
|
||||||
|
|||||||
@@ -33,6 +33,7 @@
|
|||||||
#include <nvgpu/kmem.h>
|
#include <nvgpu/kmem.h>
|
||||||
#include <nvgpu/timers.h>
|
#include <nvgpu/timers.h>
|
||||||
#include <nvgpu/pramin.h>
|
#include <nvgpu/pramin.h>
|
||||||
|
#include <nvgpu/list.h>
|
||||||
#include <nvgpu/allocator.h>
|
#include <nvgpu/allocator.h>
|
||||||
#include <nvgpu/semaphore.h>
|
#include <nvgpu/semaphore.h>
|
||||||
#include <nvgpu/page_allocator.h>
|
#include <nvgpu/page_allocator.h>
|
||||||
@@ -2174,7 +2175,8 @@ static u64 gk20a_mm_get_align(struct gk20a *g, struct scatterlist *sgl,
|
|||||||
struct nvgpu_page_alloc *alloc = get_vidmem_page_alloc(sgl);
|
struct nvgpu_page_alloc *alloc = get_vidmem_page_alloc(sgl);
|
||||||
struct page_alloc_chunk *chunk = NULL;
|
struct page_alloc_chunk *chunk = NULL;
|
||||||
|
|
||||||
list_for_each_entry(chunk, &alloc->alloc_chunks, list_entry) {
|
nvgpu_list_for_each_entry(chunk, &alloc->alloc_chunks,
|
||||||
|
page_alloc_chunk, list_entry) {
|
||||||
chunk_align = 1ULL << __ffs(chunk->base | chunk->length);
|
chunk_align = 1ULL << __ffs(chunk->base | chunk->length);
|
||||||
|
|
||||||
if (align)
|
if (align)
|
||||||
@@ -2875,7 +2877,8 @@ static int gk20a_gmmu_clear_vidmem_mem(struct gk20a *g, struct mem_desc *mem)
|
|||||||
|
|
||||||
alloc = get_vidmem_page_alloc(mem->sgt->sgl);
|
alloc = get_vidmem_page_alloc(mem->sgt->sgl);
|
||||||
|
|
||||||
list_for_each_entry(chunk, &alloc->alloc_chunks, list_entry) {
|
nvgpu_list_for_each_entry(chunk, &alloc->alloc_chunks,
|
||||||
|
page_alloc_chunk, list_entry) {
|
||||||
if (gk20a_last_fence)
|
if (gk20a_last_fence)
|
||||||
gk20a_fence_put(gk20a_last_fence);
|
gk20a_fence_put(gk20a_last_fence);
|
||||||
|
|
||||||
@@ -3732,8 +3735,8 @@ static int update_gmmu_ptes_locked(struct vm_gk20a *vm,
|
|||||||
if (sgt) {
|
if (sgt) {
|
||||||
alloc = get_vidmem_page_alloc(sgt->sgl);
|
alloc = get_vidmem_page_alloc(sgt->sgl);
|
||||||
|
|
||||||
list_for_each_entry(chunk, &alloc->alloc_chunks,
|
nvgpu_list_for_each_entry(chunk, &alloc->alloc_chunks,
|
||||||
list_entry) {
|
page_alloc_chunk, list_entry) {
|
||||||
if (space_to_skip &&
|
if (space_to_skip &&
|
||||||
space_to_skip > chunk->length) {
|
space_to_skip > chunk->length) {
|
||||||
space_to_skip -= chunk->length;
|
space_to_skip -= chunk->length;
|
||||||
|
|||||||
@@ -22,6 +22,7 @@
|
|||||||
|
|
||||||
#include <nvgpu/allocator.h>
|
#include <nvgpu/allocator.h>
|
||||||
#include <nvgpu/kmem.h>
|
#include <nvgpu/kmem.h>
|
||||||
|
#include <nvgpu/list.h>
|
||||||
|
|
||||||
struct nvgpu_allocator;
|
struct nvgpu_allocator;
|
||||||
|
|
||||||
@@ -78,19 +79,26 @@ struct page_alloc_slab_page {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct page_alloc_chunk {
|
struct page_alloc_chunk {
|
||||||
struct list_head list_entry;
|
struct nvgpu_list_node list_entry;
|
||||||
|
|
||||||
u64 base;
|
u64 base;
|
||||||
u64 length;
|
u64 length;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static inline struct page_alloc_chunk *
|
||||||
|
page_alloc_chunk_from_list_entry(struct nvgpu_list_node *node)
|
||||||
|
{
|
||||||
|
return (struct page_alloc_chunk *)
|
||||||
|
((uintptr_t)node - offsetof(struct page_alloc_chunk, list_entry));
|
||||||
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Struct to handle internal management of page allocation. It holds a list
|
* Struct to handle internal management of page allocation. It holds a list
|
||||||
* of the chunks of pages that make up the overall allocation - much like a
|
* of the chunks of pages that make up the overall allocation - much like a
|
||||||
* scatter gather table.
|
* scatter gather table.
|
||||||
*/
|
*/
|
||||||
struct nvgpu_page_alloc {
|
struct nvgpu_page_alloc {
|
||||||
struct list_head alloc_chunks;
|
struct nvgpu_list_node alloc_chunks;
|
||||||
|
|
||||||
int nr_chunks;
|
int nr_chunks;
|
||||||
u64 length;
|
u64 length;
|
||||||
|
|||||||
Reference in New Issue
Block a user