diff --git a/drivers/gpu/nvgpu/Kconfig b/drivers/gpu/nvgpu/Kconfig index bbd5f6f08..7dba61a32 100644 --- a/drivers/gpu/nvgpu/Kconfig +++ b/drivers/gpu/nvgpu/Kconfig @@ -40,6 +40,13 @@ config GK20A_PM_QOS Enable support to pass PM_QOS constraints to devfreq based scaling. +config GK20A_RDMA + bool "Support GK20A RDMA" + depends on GK20A && MMU_NOTIFIER + default n + help + Say Y here to enable GK20A RDMA features. + config NVGPU_TRACK_MEM_USAGE bool "Track the usage of system memory in nvgpu" depends on GK20A diff --git a/drivers/gpu/nvgpu/Makefile b/drivers/gpu/nvgpu/Makefile index e0fd70e6f..fdfaf092c 100644 --- a/drivers/gpu/nvgpu/Makefile +++ b/drivers/gpu/nvgpu/Makefile @@ -176,6 +176,9 @@ nvgpu-$(CONFIG_NVGPU_SUPPORT_CDE) += \ os/linux/cde_gm20b.o \ os/linux/cde_gp10b.o +nvgpu-$(CONFIG_GK20A_RDMA) += \ + os/linux/nvidia_p2p.o + ifeq ($(CONFIG_DEBUG_FS),y) nvgpu-$(CONFIG_NVGPU_SUPPORT_CDE) += \ os/linux/debug_cde.o diff --git a/drivers/gpu/nvgpu/include/nvgpu/linux/nvidia_p2p.h b/drivers/gpu/nvgpu/include/nvgpu/linux/nvidia_p2p.h new file mode 100644 index 000000000..c1dee7cf4 --- /dev/null +++ b/drivers/gpu/nvgpu/include/nvgpu/linux/nvidia_p2p.h @@ -0,0 +1,196 @@ +/* + * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_P2P_H__ +#define __NVIDIA_P2P_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define NVIDIA_P2P_UNINITIALIZED 0x0 +#define NVIDIA_P2P_PINNED 0x1 +#define NVIDIA_P2P_MAPPED 0x2 + +enum nvidia_p2p_page_size_type { + NVIDIA_P2P_PAGE_SIZE_4KB = 0, + NVIDIA_P2P_PAGE_SIZE_64KB, + NVIDIA_P2P_PAGE_SIZE_128KB, + NVIDIA_P2P_PAGE_SIZE_COUNT +}; + +struct nvidia_p2p_page_table { + u32 page_size; + u64 size; + u32 entries; + struct page **pages; + + u64 vaddr; + u32 mapped; + + struct mm_struct *mm; + struct mmu_notifier mn; + struct nvgpu_mutex lock; + void (*free_callback)(void *data); + void *data; +}; + +struct nvidia_p2p_dma_mapping { + dma_addr_t *hw_address; + u32 *hw_len; + u32 entries; + + struct sg_table *sgt; + struct device *dev; + struct nvidia_p2p_page_table *page_table; + enum dma_data_direction direction; +}; + +/* + * @brief + * Make the pages underlying a range of GPU virtual memory + * accessible to a third-party device. + * + * @param[in] vaddr + * A GPU Virtual Address + * @param[in] size + * The size of the requested mapping. + * Size must be a multiple of Page size. + * @param[out] **page_table + * A pointer to struct nvidia_p2p_page_table + * @param[in] free_callback + * A non-NULL pointer to the function to be invoked when the pages + * underlying the virtual address range are freed + * implicitly. Must be non NULL. + * @param[in] data + * A non-NULL opaque pointer to private data to be passed to the + * callback function. + * + * @return + * 0 upon successful completion. + * Negative number if any error + */ +int nvidia_p2p_get_pages(u64 vaddr, u64 size, + struct nvidia_p2p_page_table **page_table, + void (*free_callback)(void *data), void *data); +/* + * @brief + * Release the pages previously made accessible to + * a third-party device. + * + * @param[in] *page_table + * A pointer to struct nvidia_p2p_page_table + * + * @return + * 0 upon successful completion. + * -ENOMEM if the driver failed to allocate memory or if + * insufficient resources were available to complete the operation. + * Negative number if any other error + */ +int nvidia_p2p_put_pages(struct nvidia_p2p_page_table *page_table); + +/* + * @brief + * Release the pages previously made accessible to + * a third-party device. This is called during the + * execution of the free_callback(). + * + * @param[in] *page_table + * A pointer to struct nvidia_p2p_page_table + * + * @return + * 0 upon successful completion. + * -ENOMEM if the driver failed to allocate memory or if + * insufficient resources were available to complete the operation. + * Negative number if any other error + */ +int nvidia_p2p_free_page_table(struct nvidia_p2p_page_table *page_table); + +/* + * @brief + * Map the pages retrieved using nvidia_p2p_get_pages and + * pass the dma address to a third-party device. + * + * @param[in] *dev + * The peer device that needs to DMA to/from the + * mapping. + * @param[in] *page_table + * A pointer to struct nvidia_p2p_page_table + * @param[out] **map + * A pointer to struct nvidia_p2p_dma_mapping. + * The DMA mapping containing the DMA addresses to use. + * @param[in] direction + * DMA direction + * + * @return + * 0 upon successful completion. + * Negative number if any other error + */ +int nvidia_p2p_map_pages(struct device *dev, + struct nvidia_p2p_page_table *page_table, + struct nvidia_p2p_dma_mapping **map, + enum dma_data_direction direction); +/* + * @brief + * Unmap the pages previously mapped using nvidia_p2p_map_pages + * + * @param[in] *map + * A pointer to struct nvidia_p2p_dma_mapping. + * The DMA mapping containing the DMA addresses to use. + * + * @return + * 0 upon successful completion. + * Negative number if any other error + */ +int nvidia_p2p_unmap_pages(struct nvidia_p2p_dma_mapping *map); + +/* + * @brief + * Unmap the pages previously mapped using nvidia_p2p_map_pages. + * This is called during the execution of the free_callback(). + * + * @param[in] *map + * A pointer to struct nvidia_p2p_dma_mapping. + * The DMA mapping containing the DMA addresses to use. + * + * @return + * 0 upon successful completion. + * Negative number if any other error + */ +int nvidia_p2p_free_dma_mapping(struct nvidia_p2p_dma_mapping *dma_mapping); + +#endif diff --git a/drivers/gpu/nvgpu/os/linux/nvidia_p2p.c b/drivers/gpu/nvgpu/os/linux/nvidia_p2p.c new file mode 100644 index 000000000..dcd37beee --- /dev/null +++ b/drivers/gpu/nvgpu/os/linux/nvidia_p2p.c @@ -0,0 +1,296 @@ +/* + * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include + +static void nvidia_p2p_mn_release(struct mmu_notifier *mn, + struct mm_struct *mm) +{ + struct nvidia_p2p_page_table *page_table = container_of(mn, + struct nvidia_p2p_page_table, + mn); + + page_table->free_callback(page_table->data); +} + +static void nvidia_p2p_mn_invl_range_start(struct mmu_notifier *mn, + struct mm_struct *mm, unsigned long start, unsigned long end) +{ + struct nvidia_p2p_page_table *page_table = container_of(mn, + struct nvidia_p2p_page_table, + mn); + u64 vaddr = 0; + u64 size = 0; + + vaddr = page_table->vaddr; + size = page_table->size; + + if (vaddr >= start && vaddr <= end) { + mmu_notifier_unregister_no_release(&page_table->mn, page_table->mm); + page_table->free_callback(page_table->data); + } +} + +static struct mmu_notifier_ops nvidia_p2p_mmu_ops = { + .release = nvidia_p2p_mn_release, + .invalidate_range_start = nvidia_p2p_mn_invl_range_start, +}; + +int nvidia_p2p_get_pages(u64 vaddr, u64 size, + struct nvidia_p2p_page_table **page_table, + void (*free_callback)(void *data), void *data) +{ + int ret = 0; + int user_pages = 0; + int locked = 0; + int nr_pages = size >> PAGE_SHIFT; + struct page **pages; + + if (nr_pages <= 0) { + return -EINVAL; + } + + *page_table = kzalloc(sizeof(**page_table), GFP_KERNEL); + if (!*page_table) { + return -ENOMEM; + } + + pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL); + if (!pages) { + ret = -ENOMEM; + goto free_page_table; + } + down_read(¤t->mm->mmap_sem); + locked = 1; + user_pages = get_user_pages_locked(vaddr & PAGE_MASK, nr_pages, + FOLL_WRITE | FOLL_FORCE, + pages, &locked); + up_read(¤t->mm->mmap_sem); + if (user_pages != nr_pages) { + ret = user_pages < 0 ? user_pages : -ENOMEM; + goto free_pages; + } + + (*page_table)->pages = pages; + (*page_table)->entries = user_pages; + (*page_table)->page_size = NVIDIA_P2P_PAGE_SIZE_4KB; + (*page_table)->size = size; + + (*page_table)->mn.ops = &nvidia_p2p_mmu_ops; + (*page_table)->mm = current->mm; + (*page_table)->free_callback = free_callback; + (*page_table)->data = data; + (*page_table)->vaddr = vaddr; + nvgpu_mutex_init(&(*page_table)->lock); + (*page_table)->mapped = NVIDIA_P2P_PINNED; + + ret = mmu_notifier_register(&(*page_table)->mn, (*page_table)->mm); + if (ret) { + goto free_pages; + } + + return 0; +free_pages: + while (--user_pages >= 0) { + put_page(pages[user_pages]); + } + kfree(pages); +free_page_table: + kfree(*page_table); + *page_table = NULL; + return ret; +} +EXPORT_SYMBOL(nvidia_p2p_get_pages); + +int nvidia_p2p_put_pages(struct nvidia_p2p_page_table *page_table) +{ + if (!page_table) { + return -EINVAL; + } + + mmu_notifier_unregister(&page_table->mn, page_table->mm); + + return 0; +} +EXPORT_SYMBOL(nvidia_p2p_put_pages); + +int nvidia_p2p_free_page_table(struct nvidia_p2p_page_table *page_table) +{ + int user_pages = 0; + struct page **pages = NULL; + + if (!page_table) { + return 0; + } + + nvgpu_mutex_acquire(&page_table->lock); + + if (page_table->mapped & NVIDIA_P2P_MAPPED) { + WARN(1, "Attempting to free unmapped pages"); + } + + if (page_table->mapped & NVIDIA_P2P_PINNED) { + pages = page_table->pages; + user_pages = page_table->entries; + + while (--user_pages >= 0) { + put_page(pages[user_pages]); + } + + kfree(pages); + page_table->mapped &= (u32)~NVIDIA_P2P_PINNED; + } + + nvgpu_mutex_release(&page_table->lock); + + return 0; +} +EXPORT_SYMBOL(nvidia_p2p_free_page_table); + +int nvidia_p2p_map_pages(struct device *dev, + struct nvidia_p2p_page_table *page_table, + struct nvidia_p2p_dma_mapping **dma_mapping, + enum dma_data_direction direction) +{ + struct sg_table *sgt = NULL; + struct scatterlist *sg; + struct page **pages = NULL; + u32 nr_pages = 0; + int ret = 0; + int i, count; + + if (!page_table) { + return -EINVAL; + } + + nvgpu_mutex_acquire(&page_table->lock); + + pages = page_table->pages; + nr_pages = page_table->entries; + if (nr_pages <= 0) { + nvgpu_mutex_release(&page_table->lock); + return -EINVAL; + } + + *dma_mapping = kzalloc(sizeof(**dma_mapping), GFP_KERNEL); + if (!*dma_mapping) { + nvgpu_mutex_release(&page_table->lock); + return -ENOMEM; + } + sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); + if (!sgt) { + ret = -ENOMEM; + goto free_dma_mapping; + } + ret = sg_alloc_table_from_pages(sgt, pages, + nr_pages, 0, page_table->size, GFP_KERNEL); + if (ret) { + goto free_sgt; + } + (*dma_mapping)->sgt = sgt; + (*dma_mapping)->dev = dev; + (*dma_mapping)->direction = direction; + (*dma_mapping)->page_table = page_table; + + count = dma_map_sg(dev, sgt->sgl, sgt->nents, direction); + if (count < 1) { + goto free_sg_table; + } + + (*dma_mapping)->entries = count; + + (*dma_mapping)->hw_address = kcalloc(count, sizeof(u64), GFP_KERNEL); + if (!((*dma_mapping)->hw_address)) { + ret = -ENOMEM; + goto unmap_sg; + } + (*dma_mapping)->hw_len = kcalloc(count, sizeof(u64), GFP_KERNEL); + if (!((*dma_mapping)->hw_len)) { + ret = -ENOMEM; + goto free_hw_address; + } + + for_each_sg(sgt->sgl, sg, count, i) { + (*dma_mapping)->hw_address[i] = sg_dma_address(sg); + (*dma_mapping)->hw_len[i] = sg_dma_len(sg); + } + (*dma_mapping)->page_table->mapped |= NVIDIA_P2P_MAPPED; + nvgpu_mutex_release(&page_table->lock); + + return 0; +free_hw_address: + kfree((*dma_mapping)->hw_address); +unmap_sg: + dma_unmap_sg(dev, sgt->sgl, + sgt->nents, direction); +free_sg_table: + sg_free_table(sgt); +free_sgt: + kfree(sgt); +free_dma_mapping: + kfree(*dma_mapping); + *dma_mapping = NULL; + nvgpu_mutex_release(&page_table->lock); + + return ret; +} +EXPORT_SYMBOL(nvidia_p2p_map_pages); + +int nvidia_p2p_unmap_pages(struct nvidia_p2p_dma_mapping *dma_mapping) +{ + struct nvidia_p2p_page_table *page_table = NULL; + + if (!dma_mapping) { + return -EINVAL; + } + + page_table = dma_mapping->page_table; + if (!page_table) { + return -EFAULT; + } + + nvgpu_mutex_acquire(&page_table->lock); + if (page_table->mapped & NVIDIA_P2P_MAPPED) { + kfree(dma_mapping->hw_len); + kfree(dma_mapping->hw_address); + if (dma_mapping->entries) + dma_unmap_sg(dma_mapping->dev, + dma_mapping->sgt->sgl, + dma_mapping->sgt->nents, + dma_mapping->direction); + sg_free_table(dma_mapping->sgt); + kfree(dma_mapping->sgt); + kfree(dma_mapping); + page_table->mapped &= (u32)~NVIDIA_P2P_MAPPED; + } + nvgpu_mutex_release(&page_table->lock); + + return 0; +} +EXPORT_SYMBOL(nvidia_p2p_unmap_pages); + +int nvidia_p2p_free_dma_mapping(struct nvidia_p2p_dma_mapping *dma_mapping) +{ + return nvidia_p2p_unmap_pages(dma_mapping); +} +EXPORT_SYMBOL(nvidia_p2p_free_dma_mapping);