gpu: nvgpu: Move and rename gk20a_sgtable*

Move and rename the functions that build sgtables for nvgpu_mems into
the Linux specific DMA code.

One place outside of the Linux code do include the Linux DMA header.
That will be fixed in a subsequent patch.

JIRA NVGPU-12
JIRA NVGPU-30

Change-Id: Ie43c752b8f998f122af70f7c7eb727af0b0d98df
Signed-off-by: Alex Waterman <alexw@nvidia.com>
Reviewed-on: http://git-master/r/1464078
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Alex Waterman
2017-04-10 13:51:43 -07:00
committed by mobile promotions
parent 6a14d980cf
commit 126c735d30
7 changed files with 120 additions and 90 deletions

View File

@@ -38,6 +38,8 @@
#include <nvgpu/bug.h>
#include <nvgpu/log2.h>
#include <nvgpu/linux/dma.h>
#include "gk20a.h"
#include "mm_gk20a.h"
#include "fence_gk20a.h"
@@ -2621,7 +2623,7 @@ static void gk20a_vidmem_clear_mem_worker(struct work_struct *work)
gk20a_gmmu_clear_vidmem_mem(g, mem);
nvgpu_free(mem->allocator,
(u64)get_vidmem_page_alloc(mem->priv.sgt->sgl));
gk20a_free_sgtable(g, &mem->priv.sgt);
nvgpu_free_sgtable(g, &mem->priv.sgt);
WARN_ON(atomic64_sub_return(mem->size,
&g->mm.vidmem.bytes_pending) < 0);
@@ -2668,75 +2670,6 @@ void gk20a_gmmu_unmap(struct vm_gk20a *vm,
nvgpu_mutex_release(&vm->update_gmmu_lock);
}
/* get sg_table from already allocated buffer */
int gk20a_get_sgtable(struct device *d, struct sg_table **sgt,
void *cpuva, u64 iova,
size_t size)
{
struct gk20a *g = get_gk20a(d);
int err = 0;
*sgt = nvgpu_kzalloc(g, sizeof(struct sg_table));
if (!(*sgt)) {
nvgpu_err(g, "failed to allocate memory\n");
err = -ENOMEM;
goto fail;
}
err = dma_get_sgtable(d, *sgt,
cpuva, iova,
size);
if (err) {
nvgpu_err(g, "failed to create sg table\n");
goto fail;
}
sg_dma_address((*sgt)->sgl) = iova;
return 0;
fail:
if (*sgt) {
nvgpu_kfree(g, *sgt);
*sgt = NULL;
}
return err;
}
int gk20a_get_sgtable_from_pages(struct device *d, struct sg_table **sgt,
struct page **pages, u64 iova,
size_t size)
{
int err = 0;
struct gk20a *g = get_gk20a(d);
*sgt = nvgpu_kzalloc(g, sizeof(struct sg_table));
if (!(*sgt)) {
nvgpu_err(g, "failed to allocate memory\n");
err = -ENOMEM;
goto fail;
}
err = sg_alloc_table_from_pages(*sgt, pages,
DIV_ROUND_UP(size, PAGE_SIZE), 0, size, GFP_KERNEL);
if (err) {
nvgpu_err(g, "failed to allocate sg_table\n");
goto fail;
}
sg_dma_address((*sgt)->sgl) = iova;
return 0;
fail:
if (*sgt) {
nvgpu_kfree(get_gk20a(d), *sgt);
*sgt = NULL;
}
return err;
}
void gk20a_free_sgtable(struct gk20a *g, struct sg_table **sgt)
{
sg_free_table(*sgt);
nvgpu_kfree(g, *sgt);
*sgt = NULL;
}
u64 gk20a_mm_smmu_vaddr_translate(struct gk20a *g, dma_addr_t iova)
{
/* ensure it is not vidmem allocation */