diff --git a/drivers/gpu/nvgpu/include/nvgpu/linux/dma.h b/drivers/gpu/nvgpu/include/nvgpu/linux/dma.h index 342b278e1..e1704f4a8 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/linux/dma.h +++ b/drivers/gpu/nvgpu/include/nvgpu/linux/dma.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -35,4 +35,27 @@ int nvgpu_get_sgtable_from_pages(struct gk20a *g, struct sg_table **sgt, void nvgpu_free_sgtable(struct gk20a *g, struct sg_table **sgt); +/** + * nvgpu_dma_mmap_sys - Map allocated memory into userspace + * + * @param g - The GPU. + * @param vma - User provided VMA + * @param mem - Struct for retrieving the information about the allocated buffer. Must be + * allocated via a call to nvgpu_dma_alloc* API and use SYSMEM aperture. + * Following flags modifying the operation of the DMA mapping. + * are accepted: + * - %NVGPU_DMA_NO_KERNEL_MAPPING + * - %NVGPU_DMA_PHYSICALLY_ADDRESSED + * + * Map memory suitable for doing DMA into a valid user VMA. + * Returns 0 on success and a suitable error code when there's an error. This requires + * that a buffer(mem) is already allocated using nvgpu_dma_alloc* API. + * + * @return 0 For success, < 0 for failure. + * @retval -EINVAL incorrect parameters + * @retval -EEXIST VMA is already mapped. + * @retval Other failures. + */ +int nvgpu_dma_mmap_sys(struct gk20a *g, struct vm_area_struct *vma, struct nvgpu_mem *mem); + #endif diff --git a/drivers/gpu/nvgpu/os/linux/linux-dma.c b/drivers/gpu/nvgpu/os/linux/linux-dma.c index b97e7af41..f437dbeee 100644 --- a/drivers/gpu/nvgpu/os/linux/linux-dma.c +++ b/drivers/gpu/nvgpu/os/linux/linux-dma.c @@ -338,6 +338,76 @@ print_dma_err: return err; } +int nvgpu_dma_mmap_sys(struct gk20a *g, struct vm_area_struct *vma, struct nvgpu_mem *mem) +{ + struct device *d = dev_from_gk20a(g); + dma_addr_t iova; + unsigned long dma_attrs = 0; + int err; + unsigned long flags; + size_t size; + void *cpu_va; + struct vm_area_struct *vma_exists; + + if (!nvgpu_mem_is_valid(mem)) { + return -EINVAL; + } + + if (mem->aperture != APERTURE_SYSMEM) { + return -EINVAL; + } + + if ((mem->size == 0) || (PAGE_ALIGN(mem->size) != mem->aligned_size)) { + return -EINVAL; + } + + if ((vma->vm_end <= vma->vm_start) || (vma->vm_end - vma->vm_start > mem->size)) { + return -EINVAL; + } + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0) + mmap_write_lock(vma->vm_mm); +#else + down_write(&vma->vm_mm->mmap_sem); +#endif + + vma_exists = find_vma_intersection(vma->vm_mm, vma->vm_start, vma->vm_end); + if (vma_exists != NULL) { + err = -EEXIST; + goto done; + } + + size = mem->aligned_size; + flags = mem->priv.flags; + + nvgpu_dma_flags_to_attrs(g, &dma_attrs, flags); + + if (nvgpu_nvlink_non_contig(g, flags)) { + err = remap_vmalloc_range(vma, mem->cpu_va, vma->vm_pgoff); + } else { + iova = sg_dma_address(mem->priv.sgt->sgl); + if (flags & NVGPU_DMA_NO_KERNEL_MAPPING) { + cpu_va = mem->priv.pages; + } else { + cpu_va = mem->cpu_va; + } + err = dma_mmap_attrs(d, vma, cpu_va, iova, size, dma_attrs); + } + +done: +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0) + mmap_write_unlock(vma->vm_mm); +#else + up_write(&vma->vm_mm->mmap_sem); +#endif + + if (err != 0) { + nvgpu_err(g, "failed to map mem into userspace vma"); + } + + return err; +} + #if defined(CONFIG_NVGPU_DGPU) int nvgpu_dma_alloc_flags_vid_at(struct gk20a *g, unsigned long flags, size_t size, struct nvgpu_mem *mem, u64 at)