gpu: nvgpu: Make "phys" nvgpu_mem impl

Make a physical nvgpu_mem implementation in the common code. This
implementation assumes a single, contiguous, physical range. GMMU
mappability is provided by building a one entry SGT.

Since this is now "common" code the original Linux code has been
moved to commom/mm/nvgpu_mem.c.

Also fix the '__' prefix in the nvgpu_mem function. This is not
necessary as this function, although somewhat tricky, is expected
to be used by arbitrary users within the nvgpu driver.

JIRA NVGPU-1029
Bug 2441531

Change-Id: I42313e5c664df3cd94933cc63ff0528326628683
Signed-off-by: Alex Waterman <alexw@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1995866
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Alex Waterman
2019-01-15 13:35:53 -08:00
committed by mobile promotions
parent e2a29dbb96
commit f766c6af91
8 changed files with 150 additions and 114 deletions

View File

@@ -26,6 +26,7 @@
#include <nvgpu/enabled.h>
#include <nvgpu/vidmem.h>
#include <nvgpu/gk20a.h>
#include <nvgpu/nvgpu_sgt.h>
#include <nvgpu/linux/dma.h>
@@ -345,11 +346,13 @@ void nvgpu_dma_free_sys(struct gk20a *g, struct nvgpu_mem *mem)
}
/*
* When this flag is set we expect that pages is still populated but not
* by the DMA API.
* When this flag is set this means we are freeing a "phys" nvgpu_mem.
* To handle this just nvgpu_kfree() the nvgpu_sgt and nvgpu_sgl.
*/
if (mem->mem_flags & __NVGPU_MEM_FLAG_NO_DMA)
nvgpu_kfree(g, mem->priv.pages);
if (mem->mem_flags & __NVGPU_MEM_FLAG_NO_DMA) {
nvgpu_kfree(g, mem->phys_sgt->sgl);
nvgpu_kfree(g, mem->phys_sgt);
}
if ((mem->mem_flags & NVGPU_MEM_FLAG_FOREIGN_SGT) == 0 &&
mem->priv.sgt != NULL) {

View File

@@ -188,66 +188,6 @@ int nvgpu_mem_create_from_mem(struct gk20a *g,
return ret;
}
int __nvgpu_mem_create_from_pages(struct gk20a *g, struct nvgpu_mem *dest,
struct page **pages, int nr_pages)
{
struct sg_table *sgt;
struct page **our_pages =
nvgpu_kmalloc(g, sizeof(struct page *) * nr_pages);
if (!our_pages)
return -ENOMEM;
nvgpu_memcpy((u8 *)our_pages, (u8 *)pages,
sizeof(struct page *) * nr_pages);
if (nvgpu_get_sgtable_from_pages(g, &sgt, pages, 0,
nr_pages * PAGE_SIZE)) {
nvgpu_kfree(g, our_pages);
return -ENOMEM;
}
/*
* If we are making an SGT from physical pages we can be reasonably
* certain that this should bypass the SMMU - thus we set the DMA (aka
* IOVA) address to 0. This tells the GMMU mapping code to not make a
* mapping directed to the SMMU.
*/
sg_dma_address(sgt->sgl) = 0;
dest->mem_flags = __NVGPU_MEM_FLAG_NO_DMA;
dest->aperture = APERTURE_SYSMEM;
dest->skip_wmb = 0;
dest->size = PAGE_SIZE * nr_pages;
dest->priv.flags = 0;
dest->priv.pages = our_pages;
dest->priv.sgt = sgt;
return 0;
}
#ifdef CONFIG_TEGRA_GK20A_NVHOST
int __nvgpu_mem_create_from_phys(struct gk20a *g, struct nvgpu_mem *dest,
u64 src_phys, int nr_pages)
{
struct page **pages =
nvgpu_kmalloc(g, sizeof(struct page *) * nr_pages);
int i, ret = 0;
if (!pages)
return -ENOMEM;
for (i = 0; i < nr_pages; i++)
pages[i] = phys_to_page(src_phys + PAGE_SIZE * i);
ret = __nvgpu_mem_create_from_pages(g, dest, pages, nr_pages);
nvgpu_kfree(g, pages);
return ret;
}
#endif
static struct nvgpu_sgl *nvgpu_mem_linux_sgl_next(struct nvgpu_sgl *sgl)
{
return (struct nvgpu_sgl *)sg_next((struct scatterlist *)sgl);