gpu: nvgpu: use nvgpu list for page chunks

Use nvgpu list APIs instead of linux list APIs
to store chunks of page allocator

Jira NVGPU-13

Change-Id: I63375fc2df683e018c48a90b76eca368438cc32f
Signed-off-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-on: http://git-master/r/1326814
Reviewed-by: Konsta Holtta <kholtta@nvidia.com>
Reviewed-by: Automatic_Commit_Validation_User
Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
This commit is contained in:
Deepak Nibade
2017-02-10 17:35:58 +05:30
committed by mobile promotions
parent 1e355ca52e
commit 0d8830394a
4 changed files with 43 additions and 28 deletions

View File

@@ -88,7 +88,8 @@ void nvgpu_pramin_access_batched(struct gk20a *g, struct mem_desc *mem,
u32 byteoff, start_reg, until_end, n;
alloc = get_vidmem_page_alloc(mem->sgt->sgl);
list_for_each_entry(chunk, &alloc->alloc_chunks, list_entry) {
nvgpu_list_for_each_entry(chunk, &alloc->alloc_chunks,
page_alloc_chunk, list_entry) {
if (offset >= chunk->length)
offset -= chunk->length;
else
@@ -113,7 +114,8 @@ void nvgpu_pramin_access_batched(struct gk20a *g, struct mem_desc *mem,
size -= n;
if (n == (chunk->length - offset)) {
chunk = list_next_entry(chunk, list_entry);
chunk = nvgpu_list_next_entry(chunk, page_alloc_chunk,
list_entry);
offset = 0;
} else {
offset += n / sizeof(u32);