gpu: nvgpu: remove nvgpu_mem_{begin,end}()

The NVGPU_DMA_NO_KERNEL_MAPPING flag is going away, and these functions
are no longer used. Delete them.

Change-Id: I0084d64c92783dd65306871e5cf6bd6366087caf
Signed-off-by: Konsta Holtta <kholtta@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1761581
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Alex Waterman <alexw@nvidia.com>
Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Konsta Holtta
2018-06-26 13:10:12 +03:00
committed by mobile promotions
parent dc1e91a2d7
commit e8d51f0c32
3 changed files with 2 additions and 81 deletions

View File

@@ -31,16 +31,6 @@
* can emulate a lot of the DMA mem functionality for unit testing purposes.
*/
int nvgpu_mem_begin(struct gk20a *g, struct nvgpu_mem *mem)
{
return 0;
}
void nvgpu_mem_end(struct gk20a *g, struct nvgpu_mem *mem)
{
return;
}
u32 nvgpu_mem_rd32(struct gk20a *g, struct nvgpu_mem *mem, u32 w)
{
u32 *mem_ptr = (u32 *)mem->cpu_va;

View File

@@ -319,14 +319,10 @@ int nvgpu_mem_create_from_mem(struct gk20a *g,
void __nvgpu_mem_free_vidmem_alloc(struct gk20a *g, struct nvgpu_mem *vidmem);
/*
* Buffer accessors - wrap between begin() and end() if there is no permanent
* kernel mapping for this buffer.
* Buffer accessors. Sysmem buffers always have a CPU mapping and vidmem
* buffers are accessed via PRAMIN.
*/
int nvgpu_mem_begin(struct gk20a *g, struct nvgpu_mem *mem);
/* nop for null mem, like with free() or vunmap() */
void nvgpu_mem_end(struct gk20a *g, struct nvgpu_mem *mem);
/* word-indexed offset */
u32 nvgpu_mem_rd32(struct gk20a *g, struct nvgpu_mem *mem, u32 w);
/* byte offset (32b-aligned) */

View File

@@ -48,71 +48,6 @@ static u64 __nvgpu_sgl_phys(struct gk20a *g, struct nvgpu_sgl *sgl)
return ipa;
}
int nvgpu_mem_begin(struct gk20a *g, struct nvgpu_mem *mem)
{
void *cpu_va;
pgprot_t prot = nvgpu_is_enabled(g, NVGPU_USE_COHERENT_SYSMEM) ?
PAGE_KERNEL :
pgprot_writecombine(PAGE_KERNEL);
if (mem->aperture != APERTURE_SYSMEM)
return 0;
/*
* WAR for bug 2040115: we already will always have a coherent vmap()
* for all sysmem buffers. The prot settings are left alone since
* eventually this should be deleted.
*/
if (nvgpu_is_enabled(g, NVGPU_USE_COHERENT_SYSMEM))
return 0;
/*
* A CPU mapping is implicitly made for all SYSMEM DMA allocations that
* don't have NVGPU_DMA_NO_KERNEL_MAPPING. Thus we don't need to make
* another CPU mapping.
*/
if (!(mem->priv.flags & NVGPU_DMA_NO_KERNEL_MAPPING))
return 0;
if (WARN_ON(mem->cpu_va)) {
nvgpu_warn(g, "nested");
return -EBUSY;
}
cpu_va = vmap(mem->priv.pages,
PAGE_ALIGN(mem->size) >> PAGE_SHIFT,
0, prot);
if (WARN_ON(!cpu_va))
return -ENOMEM;
mem->cpu_va = cpu_va;
return 0;
}
void nvgpu_mem_end(struct gk20a *g, struct nvgpu_mem *mem)
{
if (mem->aperture != APERTURE_SYSMEM)
return;
/*
* WAR for bug 2040115: skip this since the map will be taken care of
* during the free in the DMA API.
*/
if (nvgpu_is_enabled(g, NVGPU_USE_COHERENT_SYSMEM))
return;
/*
* Similar to nvgpu_mem_begin() we don't need to unmap the CPU mapping
* already made by the DMA API.
*/
if (!(mem->priv.flags & NVGPU_DMA_NO_KERNEL_MAPPING))
return;
vunmap(mem->cpu_va);
mem->cpu_va = NULL;
}
static void pramin_access_batch_rd_n(struct gk20a *g, u32 start, u32 words, u32 **arg)
{
struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);