gpu: nvgpu: Use our own vmap() for coherent DMA buffers

For some reason the GPU does not like the mappings created by the
DMA API for coherent sysmem buffers. But a plain vmap() does seem
to work. To work around this, when we are using coherent sysmem,
force the NO_KERNEL_MAPPING flag to on and then make a vmap() in
the nvgpu DMA API wrapper. The rest of the driver will be none the
wiser but will work as expected.

This problem is not understood yet but it is being tracked in bug
2040115. Once this bug is understood this WAR should either be
determined as necessary or reverted with an appropriate fix.

Bug 2040115
JIRA EVLR-2333

Change-Id: Idae7a0c92441f0309df572ac18697af49bb6ff2b
Signed-off-by: Alex Waterman <alexw@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1657568
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Alex Waterman
2018-02-14 10:58:24 -08:00
committed by mobile promotions
parent 1170687c33
commit 3fdd8e38b2
4 changed files with 63 additions and 17 deletions

View File

@@ -221,6 +221,16 @@ int nvgpu_dma_alloc_flags_sys(struct gk20a *g, unsigned long flags,
NVGPU_DEFINE_DMA_ATTRS(dma_attrs); NVGPU_DEFINE_DMA_ATTRS(dma_attrs);
void *alloc_ret; void *alloc_ret;
/*
* WAR for IO coherent chips: the DMA API does not seem to generate
* mappings that work correctly. Unclear why - Bug ID: 2040115.
*
* Basically we just tell the DMA API not to map with NO_KERNEL_MAPPING
* and then make a vmap() ourselves.
*/
if (nvgpu_is_enabled(g, NVGPU_USE_COHERENT_SYSMEM))
flags |= NVGPU_DMA_NO_KERNEL_MAPPING;
/* /*
* Before the debug print so we see this in the total. But during * Before the debug print so we see this in the total. But during
* cleanup in the fail path this has to be subtracted. * cleanup in the fail path this has to be subtracted.
@@ -255,7 +265,17 @@ int nvgpu_dma_alloc_flags_sys(struct gk20a *g, unsigned long flags,
iova, size, flags); iova, size, flags);
} }
if (err) if (err)
goto fail_free; goto fail_free_dma;
if (nvgpu_is_enabled(g, NVGPU_USE_COHERENT_SYSMEM)) {
mem->cpu_va = vmap(mem->priv.pages,
size >> PAGE_SHIFT,
0, PAGE_KERNEL);
if (!mem->cpu_va) {
err = -ENOMEM;
goto fail_free_sgt;
}
}
mem->aligned_size = size; mem->aligned_size = size;
mem->aperture = APERTURE_SYSMEM; mem->aperture = APERTURE_SYSMEM;
@@ -265,12 +285,14 @@ int nvgpu_dma_alloc_flags_sys(struct gk20a *g, unsigned long flags,
return 0; return 0;
fail_free: fail_free_sgt:
g->dma_memory_used -= mem->aligned_size; nvgpu_free_sgtable(g, &mem->priv.sgt);
fail_free_dma:
dma_free_attrs(d, size, alloc_ret, iova, NVGPU_DMA_ATTR(dma_attrs)); dma_free_attrs(d, size, alloc_ret, iova, NVGPU_DMA_ATTR(dma_attrs));
mem->cpu_va = NULL; mem->cpu_va = NULL;
mem->priv.sgt = NULL; mem->priv.sgt = NULL;
mem->size = 0; mem->size = 0;
g->dma_memory_used -= mem->aligned_size;
return err; return err;
} }
@@ -466,6 +488,12 @@ static void nvgpu_dma_free_sys(struct gk20a *g, struct nvgpu_mem *mem)
if (!(mem->mem_flags & NVGPU_MEM_FLAG_SHADOW_COPY) && if (!(mem->mem_flags & NVGPU_MEM_FLAG_SHADOW_COPY) &&
!(mem->mem_flags & __NVGPU_MEM_FLAG_NO_DMA) && !(mem->mem_flags & __NVGPU_MEM_FLAG_NO_DMA) &&
(mem->cpu_va || mem->priv.pages)) { (mem->cpu_va || mem->priv.pages)) {
/*
* Free side of WAR for bug 2040115.
*/
if (nvgpu_is_enabled(g, NVGPU_USE_COHERENT_SYSMEM))
vunmap(mem->cpu_va);
if (mem->priv.flags) { if (mem->priv.flags) {
NVGPU_DEFINE_DMA_ATTRS(dma_attrs); NVGPU_DEFINE_DMA_ATTRS(dma_attrs);

View File

@@ -1149,6 +1149,12 @@ static int gk20a_probe(struct platform_device *dev)
if (err) if (err)
goto return_err; goto return_err;
np = nvgpu_get_node(gk20a);
if (of_dma_is_coherent(np)) {
__nvgpu_set_enabled(gk20a, NVGPU_USE_COHERENT_SYSMEM, true);
__nvgpu_set_enabled(gk20a, NVGPU_SUPPORT_IO_COHERENCE, true);
}
if (nvgpu_platform_is_simulation(gk20a)) if (nvgpu_platform_is_simulation(gk20a))
__nvgpu_set_enabled(gk20a, NVGPU_IS_FMODEL, true); __nvgpu_set_enabled(gk20a, NVGPU_IS_FMODEL, true);
@@ -1208,12 +1214,6 @@ static int gk20a_probe(struct platform_device *dev)
gk20a->mm.has_physical_mode = !nvgpu_is_hypervisor_mode(gk20a); gk20a->mm.has_physical_mode = !nvgpu_is_hypervisor_mode(gk20a);
np = nvgpu_get_node(gk20a);
if (of_dma_is_coherent(np)) {
__nvgpu_set_enabled(gk20a, NVGPU_USE_COHERENT_SYSMEM, true);
__nvgpu_set_enabled(gk20a, NVGPU_SUPPORT_IO_COHERENCE, true);
}
return 0; return 0;
return_err: return_err:

View File

@@ -44,6 +44,14 @@ int nvgpu_mem_begin(struct gk20a *g, struct nvgpu_mem *mem)
if (mem->aperture != APERTURE_SYSMEM || g->mm.force_pramin) if (mem->aperture != APERTURE_SYSMEM || g->mm.force_pramin)
return 0; return 0;
/*
* WAR for bug 2040115: we already will always have a coherent vmap()
* for all sysmem buffers. The prot settings are left alone since
* eventually this should be deleted.
*/
if (nvgpu_is_enabled(g, NVGPU_USE_COHERENT_SYSMEM))
return 0;
/* /*
* A CPU mapping is implicitly made for all SYSMEM DMA allocations that * A CPU mapping is implicitly made for all SYSMEM DMA allocations that
* don't have NVGPU_DMA_NO_KERNEL_MAPPING. Thus we don't need to make * don't have NVGPU_DMA_NO_KERNEL_MAPPING. Thus we don't need to make
@@ -73,6 +81,13 @@ void nvgpu_mem_end(struct gk20a *g, struct nvgpu_mem *mem)
if (mem->aperture != APERTURE_SYSMEM || g->mm.force_pramin) if (mem->aperture != APERTURE_SYSMEM || g->mm.force_pramin)
return; return;
/*
* WAR for bug 2040115: skip this since the map will be taken care of
* during the free in the DMA API.
*/
if (nvgpu_is_enabled(g, NVGPU_USE_COHERENT_SYSMEM))
return;
/* /*
* Similar to nvgpu_mem_begin() we don't need to unmap the CPU mapping * Similar to nvgpu_mem_begin() we don't need to unmap the CPU mapping
* already made by the DMA API. * already made by the DMA API.
@@ -393,8 +408,12 @@ int nvgpu_mem_create_from_mem(struct gk20a *g,
/* /*
* Re-use the CPU mapping only if the mapping was made by the DMA API. * Re-use the CPU mapping only if the mapping was made by the DMA API.
*
* Bug 2040115: the DMA API wrapper makes the mapping that we should
* re-use.
*/ */
if (!(src->priv.flags & NVGPU_DMA_NO_KERNEL_MAPPING)) if (!(src->priv.flags & NVGPU_DMA_NO_KERNEL_MAPPING) ||
nvgpu_is_enabled(g, NVGPU_USE_COHERENT_SYSMEM))
dest->cpu_va = src->cpu_va + (PAGE_SIZE * start_page); dest->cpu_va = src->cpu_va + (PAGE_SIZE * start_page);
dest->priv.pages = src->priv.pages + start_page; dest->priv.pages = src->priv.pages + start_page;

View File

@@ -566,6 +566,12 @@ static int nvgpu_pci_probe(struct pci_dev *pdev,
platform->g = g; platform->g = g;
l->dev = &pdev->dev; l->dev = &pdev->dev;
np = nvgpu_get_node(g);
if (of_dma_is_coherent(np)) {
__nvgpu_set_enabled(g, NVGPU_USE_COHERENT_SYSMEM, true);
__nvgpu_set_enabled(g, NVGPU_SUPPORT_IO_COHERENCE, true);
}
err = pci_enable_device(pdev); err = pci_enable_device(pdev);
if (err) if (err)
return err; return err;
@@ -644,13 +650,6 @@ static int nvgpu_pci_probe(struct pci_dev *pdev,
g->mm.has_physical_mode = false; g->mm.has_physical_mode = false;
np = nvgpu_get_node(g);
if (of_dma_is_coherent(np)) {
__nvgpu_set_enabled(g, NVGPU_USE_COHERENT_SYSMEM, true);
__nvgpu_set_enabled(g, NVGPU_SUPPORT_IO_COHERENCE, true);
}
return 0; return 0;
} }