diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c index 208e52918..d7f0f8efc 100644 --- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c @@ -475,6 +475,18 @@ static void unmap_gmmu_pages(void *handle, struct sg_table *sgt, void *va) FLUSH_CPU_DCACHE(handle, sg_phys(sgt->sgl), sgt->sgl->length); } #else +/* APIs for 64 bit arch */ +static int __alloc_gmmu_pages(struct vm_gk20a *vm, u32 order, + void **handle, + struct sg_table **sgt, + size_t *size); +static void __free_gmmu_pages(struct vm_gk20a *vm, void *handle, + struct sg_table *sgt, u32 order, + size_t size); +static int __map_gmmu_pages(void *handle, struct sg_table *sgt, + void **kva, size_t size); +static void __unmap_gmmu_pages(void *handle, struct sg_table *sgt, void *va); + static int alloc_gmmu_pages(struct vm_gk20a *vm, u32 order, void **handle, struct sg_table **sgt, @@ -490,6 +502,9 @@ static int alloc_gmmu_pages(struct vm_gk20a *vm, u32 order, gk20a_dbg_fn(""); + if (IS_ENABLED(CONFIG_ARM64)) + return __alloc_gmmu_pages(vm, order, handle, sgt, size); + *size = len; dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs); pages = dma_alloc_attrs(d, len, &iova, GFP_KERNEL, &attrs); @@ -529,6 +544,11 @@ static void free_gmmu_pages(struct vm_gk20a *vm, void *handle, gk20a_dbg_fn(""); BUG_ON(sgt == NULL); + if (IS_ENABLED(CONFIG_ARM64)) { + __free_gmmu_pages(vm, handle, sgt, order, size); + return; + } + iova = sg_dma_address(sgt->sgl); gk20a_free_sgtable(&sgt); @@ -546,6 +566,9 @@ static int map_gmmu_pages(void *handle, struct sg_table *sgt, struct page **pages = (struct page **)handle; gk20a_dbg_fn(""); + if (IS_ENABLED(CONFIG_ARM64)) + return __map_gmmu_pages(handle, sgt, kva, size); + *kva = vmap(pages, count, 0, pgprot_dmacoherent(PAGE_KERNEL)); if (!(*kva)) return -ENOMEM; @@ -556,8 +579,79 @@ static int map_gmmu_pages(void *handle, struct sg_table *sgt, static void unmap_gmmu_pages(void *handle, struct sg_table *sgt, void *va) { gk20a_dbg_fn(""); + + if (IS_ENABLED(CONFIG_ARM64)) { + __unmap_gmmu_pages(handle, sgt, va); + return; + } + vunmap(va); } + +static int __alloc_gmmu_pages(struct vm_gk20a *vm, u32 order, + void **handle, + struct sg_table **sgt, + size_t *size) +{ + struct device *d = dev_from_vm(vm); + u32 num_pages = 1 << order; + u32 len = num_pages * PAGE_SIZE; + dma_addr_t iova; + void *cpuva; + int err = 0; + + *size = len; + cpuva = dma_zalloc_coherent(d, len, &iova, GFP_KERNEL); + if (!cpuva) { + gk20a_err(d, "memory allocation failed\n"); + goto err_out; + } + + err = gk20a_get_sgtable(d, sgt, cpuva, iova, len); + if (err) { + gk20a_err(d, "sgt allocation failed\n"); + goto err_free; + } + + *handle = cpuva; + + return 0; + +err_free: + dma_free_coherent(d, len, cpuva, iova); + cpuva = NULL; + iova = 0; +err_out: + return -ENOMEM; +} + +static void __free_gmmu_pages(struct vm_gk20a *vm, void *handle, + struct sg_table *sgt, u32 order, + size_t size) +{ + struct device *d = dev_from_vm(vm); + u64 iova; + + iova = sg_dma_address(sgt->sgl); + + gk20a_free_sgtable(&sgt); + + dma_free_coherent(d, size, handle, iova); + handle = NULL; + iova = 0; +} + +static int __map_gmmu_pages(void *handle, struct sg_table *sgt, + void **kva, size_t size) +{ + *kva = handle; + return 0; +} + +static void __unmap_gmmu_pages(void *handle, struct sg_table *sgt, void *va) +{ + gk20a_dbg_fn(""); +} #endif /* allocate a phys contig region big enough for a full