gpu: nvgpu: Add __nvgpu_mem_create_from_pages()

Add a function to create a nvgpu_mem from a list of arbitrary pages.
This API is useful for pages not necessarily allocated by the Linux
page allocator. It is useful for making nvgpu_mems that represent
carveouts or other things like that.

JIRA NVGPU-12
JIRA NVGPU-30

Change-Id: Ibcb6432f077a6b0ecf9d183248e47a1f9ecb3ddd
Signed-off-by: Alex Waterman <alexw@nvidia.com>
Reviewed-on: http://git-master/r/1464080
Reviewed-by: Automatic_Commit_Validation_User
GVS: Gerrit_Virtual_Submit
Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com>
This commit is contained in:
Alex Waterman
2017-04-12 11:19:09 -07:00
committed by mobile promotions
parent 5714f5eaaf
commit aff9d46c00
4 changed files with 87 additions and 0 deletions

View File

@@ -297,3 +297,41 @@ int nvgpu_mem_create_from_mem(struct gk20a *g,
return ret;
}
int __nvgpu_mem_create_from_pages(struct gk20a *g, struct nvgpu_mem *dest,
struct page **pages, int nr_pages)
{
struct sg_table *sgt;
struct page **our_pages =
nvgpu_kmalloc(g, sizeof(struct page *) * nr_pages);
if (!our_pages)
return -ENOMEM;
memcpy(our_pages, pages, sizeof(struct page *) * nr_pages);
if (nvgpu_get_sgtable_from_pages(g, &sgt, pages, 0,
nr_pages * PAGE_SIZE)) {
nvgpu_kfree(g, our_pages);
return -ENOMEM;
}
/*
* If we are making an SGT from physical pages we can be reasonably
* certain that this should bypass the SMMU - thus we set the DMA (aka
* IOVA) address to 0. This tells the GMMU mapping code to not make a
* mapping directed to the SMMU.
*/
sg_dma_address(sgt->sgl) = 0;
dest->mem_flags = __NVGPU_MEM_FLAG_NO_DMA;
dest->aperture = APERTURE_SYSMEM;
dest->skip_wmb = 0;
dest->size = PAGE_SIZE * nr_pages;
dest->priv.flags = 0;
dest->priv.pages = our_pages;
dest->priv.sgt = sgt;
return 0;
}