gpu: nvgpu: Pass DMA allocation flags correctly

There are flags that need to be passed to both dma_alloc
and sg_alloc together. Update nvgpu_dma_alloc_flags_sys to always
pass flags.

Bug 1930032

Change-Id: I10c4c07d7b518d9ab6c48dd7a0758c68750d02a6
Signed-off-by: David Gilhooley <dgilhooley@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1596848
Reviewed-by: Richard Zhao <rizhao@nvidia.com>
Reviewed-by: Alex Waterman <alexw@nvidia.com>
Reviewed-by: Automatic_Commit_Validation_User
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: Konsta Holtta <kholtta@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
David Gilhooley
2017-11-12 21:38:58 -08:00
committed by mobile promotions
parent 90aeab9dee
commit b22c5911dd
2 changed files with 31 additions and 31 deletions

View File

@@ -211,6 +211,8 @@ int nvgpu_dma_alloc_flags_sys(struct gk20a *g, unsigned long flags,
struct device *d = dev_from_gk20a(g);
int err;
dma_addr_t iova;
DEFINE_DMA_ATTRS(dma_attrs);
void *alloc_ret;
/*
* Before the debug print so we see this in the total. But during
@@ -227,37 +229,22 @@ int nvgpu_dma_alloc_flags_sys(struct gk20a *g, unsigned long flags,
mem->size = size;
size = PAGE_ALIGN(size);
if (flags) {
DEFINE_DMA_ATTRS(dma_attrs);
nvgpu_dma_flags_to_attrs(&dma_attrs, flags);
nvgpu_dma_flags_to_attrs(&dma_attrs, flags);
if (flags & NVGPU_DMA_NO_KERNEL_MAPPING) {
mem->priv.pages = dma_alloc_attrs(d,
size, &iova, GFP_KERNEL,
alloc_ret = dma_alloc_attrs(d, size, &iova, GFP_KERNEL,
__DMA_ATTR(dma_attrs));
if (!mem->priv.pages)
return -ENOMEM;
} else {
mem->cpu_va = dma_alloc_attrs(d,
size, &iova, GFP_KERNEL,
__DMA_ATTR(dma_attrs));
if (!mem->cpu_va)
return -ENOMEM;
}
} else {
mem->cpu_va = dma_alloc_coherent(d, size, &iova, GFP_KERNEL);
if (!mem->cpu_va)
return -ENOMEM;
}
if (!alloc_ret)
return -ENOMEM;
if (flags & NVGPU_DMA_NO_KERNEL_MAPPING)
if (flags & NVGPU_DMA_NO_KERNEL_MAPPING) {
mem->priv.pages = alloc_ret;
err = nvgpu_get_sgtable_from_pages(g, &mem->priv.sgt,
mem->priv.pages,
iova, size);
else {
err = nvgpu_get_sgtable(g, &mem->priv.sgt, mem->cpu_va,
iova, size);
} else {
mem->cpu_va = alloc_ret;
err = nvgpu_get_sgtable_attrs(g, &mem->priv.sgt, mem->cpu_va,
iova, size, flags);
memset(mem->cpu_va, 0, size);
}
if (err)
@@ -273,7 +260,7 @@ int nvgpu_dma_alloc_flags_sys(struct gk20a *g, unsigned long flags,
fail_free:
g->dma_memory_used -= mem->aligned_size;
dma_free_coherent(d, size, mem->cpu_va, iova);
dma_free_attrs(d, size, alloc_ret, iova, __DMA_ATTR(dma_attrs));
mem->cpu_va = NULL;
mem->priv.sgt = NULL;
mem->size = 0;
@@ -571,11 +558,12 @@ void nvgpu_dma_unmap_free(struct vm_gk20a *vm, struct nvgpu_mem *mem)
nvgpu_dma_free(vm->mm->g, mem);
}
int nvgpu_get_sgtable(struct gk20a *g, struct sg_table **sgt,
void *cpuva, u64 iova, size_t size)
int nvgpu_get_sgtable_attrs(struct gk20a *g, struct sg_table **sgt,
void *cpuva, u64 iova, size_t size, unsigned long flags)
{
int err = 0;
struct sg_table *tbl;
DEFINE_DMA_ATTRS(dma_attrs);
tbl = nvgpu_kzalloc(g, sizeof(struct sg_table));
if (!tbl) {
@@ -583,7 +571,9 @@ int nvgpu_get_sgtable(struct gk20a *g, struct sg_table **sgt,
goto fail;
}
err = dma_get_sgtable(dev_from_gk20a(g), tbl, cpuva, iova, size);
nvgpu_dma_flags_to_attrs(&dma_attrs, flags);
err = dma_get_sgtable_attrs(dev_from_gk20a(g), tbl, cpuva, iova,
size, __DMA_ATTR(dma_attrs));
if (err)
goto fail;
@@ -599,6 +589,12 @@ fail:
return err;
}
int nvgpu_get_sgtable(struct gk20a *g, struct sg_table **sgt,
void *cpuva, u64 iova, size_t size)
{
return nvgpu_get_sgtable_attrs(g, sgt, cpuva, iova, size, 0);
}
int nvgpu_get_sgtable_from_pages(struct gk20a *g, struct sg_table **sgt,
struct page **pages, u64 iova, size_t size)
{