gpu: nvgpu: move nv-p2p outside nvgpu

nv-p2p doesn't depend upon nvgpu directly
and hence it can be moved to nvidia repository.

Bug 200551105

Change-Id: Icd855ecdb91ede29f8b4d3631bb140092e7a8f7e
Signed-off-by: ddutta <ddutta@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2275813
Reviewed-by: Preetham Chandru <pchandru@nvidia.com>
Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
ddutta
2020-01-08 17:06:27 +05:30
committed by Alex Waterman
parent cbf5b0c75b
commit b7be2379c0
5 changed files with 0 additions and 509 deletions

View File

@@ -135,9 +135,6 @@ nvhost:
sources: [ os/linux/nvhost.c,
os/linux/nvhost_priv.h ]
nvidia_p2p:
sources: [ os/linux/nvidia_p2p.c ]
nvlink:
sources: [ os/linux/nvlink.c,
os/linux/nvlink.h ]

View File

@@ -40,13 +40,6 @@ config GK20A_PM_QOS
Enable support to pass PM_QOS constraints to devfreq based
scaling.
config GK20A_RDMA
bool "Support GK20A RDMA"
depends on GK20A && MMU_NOTIFIER
default n
help
Say Y here to enable GK20A RDMA features.
config NVGPU_TRACK_MEM_USAGE
bool "Track the usage of system memory in nvgpu"
depends on GK20A

View File

@@ -445,9 +445,6 @@ nvgpu-$(CONFIG_NVGPU_SUPPORT_CDE) += \
os/linux/cde_gm20b.o \
os/linux/cde_gp10b.o
nvgpu-$(CONFIG_GK20A_RDMA) += \
os/linux/nvidia_p2p.o
ifeq ($(CONFIG_DEBUG_FS),y)
nvgpu-$(CONFIG_NVGPU_SUPPORT_CDE) += \
os/linux/debug_cde.o

View File

@@ -1,293 +0,0 @@
/*
* Copyright (c) 2018-2019, NVIDIA Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/slab.h>
#include <linux/nv-p2p.h>
static void nvidia_p2p_mn_release(struct mmu_notifier *mn,
struct mm_struct *mm)
{
struct nvidia_p2p_page_table *page_table = container_of(mn,
struct nvidia_p2p_page_table,
mn);
page_table->free_callback(page_table->data);
}
static void nvidia_p2p_mn_invl_range_start(struct mmu_notifier *mn,
struct mm_struct *mm, unsigned long start, unsigned long end)
{
struct nvidia_p2p_page_table *page_table = container_of(mn,
struct nvidia_p2p_page_table,
mn);
u64 vaddr = 0;
u64 size = 0;
vaddr = page_table->vaddr;
size = page_table->size;
if (vaddr >= start && vaddr <= end) {
mmu_notifier_unregister_no_release(&page_table->mn, page_table->mm);
page_table->free_callback(page_table->data);
}
}
static struct mmu_notifier_ops nvidia_p2p_mmu_ops = {
.release = nvidia_p2p_mn_release,
.invalidate_range_start = nvidia_p2p_mn_invl_range_start,
};
int nvidia_p2p_get_pages(u64 vaddr, u64 size,
struct nvidia_p2p_page_table **page_table,
void (*free_callback)(void *data), void *data)
{
int ret = 0;
int user_pages = 0;
int locked = 0;
int nr_pages = size >> PAGE_SHIFT;
struct page **pages;
if (nr_pages <= 0) {
return -EINVAL;
}
*page_table = kzalloc(sizeof(**page_table), GFP_KERNEL);
if (!*page_table) {
return -ENOMEM;
}
pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL);
if (!pages) {
ret = -ENOMEM;
goto free_page_table;
}
down_read(&current->mm->mmap_sem);
locked = 1;
user_pages = get_user_pages_locked(vaddr & PAGE_MASK, nr_pages,
FOLL_WRITE | FOLL_FORCE,
pages, &locked);
up_read(&current->mm->mmap_sem);
if (user_pages != nr_pages) {
ret = user_pages < 0 ? user_pages : -ENOMEM;
goto free_pages;
}
(*page_table)->version = NVIDIA_P2P_PAGE_TABLE_VERSION;
(*page_table)->pages = pages;
(*page_table)->entries = user_pages;
(*page_table)->page_size = NVIDIA_P2P_PAGE_SIZE_4KB;
(*page_table)->size = size;
(*page_table)->mn.ops = &nvidia_p2p_mmu_ops;
(*page_table)->mm = current->mm;
(*page_table)->free_callback = free_callback;
(*page_table)->data = data;
(*page_table)->vaddr = vaddr;
mutex_init(&(*page_table)->lock);
(*page_table)->mapped = NVIDIA_P2P_PINNED;
ret = mmu_notifier_register(&(*page_table)->mn, (*page_table)->mm);
if (ret) {
goto free_pages;
}
return 0;
free_pages:
while (--user_pages >= 0) {
put_page(pages[user_pages]);
}
kfree(pages);
free_page_table:
kfree(*page_table);
*page_table = NULL;
return ret;
}
EXPORT_SYMBOL(nvidia_p2p_get_pages);
int nvidia_p2p_put_pages(struct nvidia_p2p_page_table *page_table)
{
if (!page_table) {
return -EINVAL;
}
mmu_notifier_unregister(&page_table->mn, page_table->mm);
return 0;
}
EXPORT_SYMBOL(nvidia_p2p_put_pages);
int nvidia_p2p_free_page_table(struct nvidia_p2p_page_table *page_table)
{
int user_pages = 0;
struct page **pages = NULL;
if (!page_table) {
return 0;
}
mutex_lock(&page_table->lock);
if (page_table->mapped & NVIDIA_P2P_MAPPED) {
WARN(1, "Attempting to free unmapped pages");
}
if (page_table->mapped & NVIDIA_P2P_PINNED) {
pages = page_table->pages;
user_pages = page_table->entries;
while (--user_pages >= 0) {
put_page(pages[user_pages]);
}
kfree(pages);
page_table->mapped &= (u32)~NVIDIA_P2P_PINNED;
}
mutex_unlock(&page_table->lock);
return 0;
}
EXPORT_SYMBOL(nvidia_p2p_free_page_table);
int nvidia_p2p_dma_map_pages(struct device *dev,
struct nvidia_p2p_page_table *page_table,
struct nvidia_p2p_dma_mapping **dma_mapping,
enum dma_data_direction direction)
{
struct sg_table *sgt = NULL;
struct scatterlist *sg;
struct page **pages = NULL;
u32 nr_pages = 0;
int ret = 0;
int i, count;
if (!page_table) {
return -EINVAL;
}
mutex_lock(&page_table->lock);
pages = page_table->pages;
nr_pages = page_table->entries;
if (nr_pages <= 0) {
mutex_unlock(&page_table->lock);
return -EINVAL;
}
*dma_mapping = kzalloc(sizeof(**dma_mapping), GFP_KERNEL);
if (!*dma_mapping) {
mutex_unlock(&page_table->lock);
return -ENOMEM;
}
sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
if (!sgt) {
ret = -ENOMEM;
goto free_dma_mapping;
}
ret = sg_alloc_table_from_pages(sgt, pages,
nr_pages, 0, page_table->size, GFP_KERNEL);
if (ret) {
goto free_sgt;
}
(*dma_mapping)->version = NVIDIA_P2P_DMA_MAPPING_VERSION;
(*dma_mapping)->sgt = sgt;
(*dma_mapping)->dev = dev;
(*dma_mapping)->direction = direction;
(*dma_mapping)->page_table = page_table;
count = dma_map_sg(dev, sgt->sgl, sgt->nents, direction);
if (count < 1) {
goto free_sg_table;
}
(*dma_mapping)->entries = count;
(*dma_mapping)->hw_address = kcalloc(count, sizeof(u64), GFP_KERNEL);
if (!((*dma_mapping)->hw_address)) {
ret = -ENOMEM;
goto unmap_sg;
}
(*dma_mapping)->hw_len = kcalloc(count, sizeof(u64), GFP_KERNEL);
if (!((*dma_mapping)->hw_len)) {
ret = -ENOMEM;
goto free_hw_address;
}
for_each_sg(sgt->sgl, sg, count, i) {
(*dma_mapping)->hw_address[i] = sg_dma_address(sg);
(*dma_mapping)->hw_len[i] = sg_dma_len(sg);
}
(*dma_mapping)->page_table->mapped |= NVIDIA_P2P_MAPPED;
mutex_unlock(&page_table->lock);
return 0;
free_hw_address:
kfree((*dma_mapping)->hw_address);
unmap_sg:
dma_unmap_sg(dev, sgt->sgl,
sgt->nents, direction);
free_sg_table:
sg_free_table(sgt);
free_sgt:
kfree(sgt);
free_dma_mapping:
kfree(*dma_mapping);
*dma_mapping = NULL;
mutex_unlock(&page_table->lock);
return ret;
}
EXPORT_SYMBOL(nvidia_p2p_dma_map_pages);
int nvidia_p2p_dma_unmap_pages(struct nvidia_p2p_dma_mapping *dma_mapping)
{
struct nvidia_p2p_page_table *page_table = NULL;
if (!dma_mapping) {
return -EINVAL;
}
page_table = dma_mapping->page_table;
if (!page_table) {
return -EFAULT;
}
mutex_lock(&page_table->lock);
if (page_table->mapped & NVIDIA_P2P_MAPPED) {
kfree(dma_mapping->hw_len);
kfree(dma_mapping->hw_address);
if (dma_mapping->entries)
dma_unmap_sg(dma_mapping->dev,
dma_mapping->sgt->sgl,
dma_mapping->sgt->nents,
dma_mapping->direction);
sg_free_table(dma_mapping->sgt);
kfree(dma_mapping->sgt);
kfree(dma_mapping);
page_table->mapped &= (u32)~NVIDIA_P2P_MAPPED;
}
mutex_unlock(&page_table->lock);
return 0;
}
EXPORT_SYMBOL(nvidia_p2p_dma_unmap_pages);
int nvidia_p2p_free_dma_mapping(struct nvidia_p2p_dma_mapping *dma_mapping)
{
return nvidia_p2p_dma_unmap_pages(dma_mapping);
}
EXPORT_SYMBOL(nvidia_p2p_free_dma_mapping);

View File

@@ -1,203 +0,0 @@
/*
* Copyright (c) 2018-2019, NVIDIA Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __NVIDIA_P2P_H__
#define __NVIDIA_P2P_H__
#include <linux/dma-mapping.h>
#include <linux/mmu_notifier.h>
#define NVIDIA_P2P_UNINITIALIZED 0x0
#define NVIDIA_P2P_PINNED 0x1
#define NVIDIA_P2P_MAPPED 0x2
#define NVIDIA_P2P_MAJOR_VERSION_MASK 0xffff0000
#define NVIDIA_P2P_MINOR_VERSION_MASK 0x0000ffff
#define NVIDIA_P2P_MAJOR_VERSION(v) \
(((v) & NVIDIA_P2P_MAJOR_VERSION_MASK) >> 16)
#define NVIDIA_P2P_MINOR_VERSION(v) \
(((v) & NVIDIA_P2P_MINOR_VERSION_MASK))
#define NVIDIA_P2P_MAJOR_VERSION_MATCHES(p, v) \
(NVIDIA_P2P_MAJOR_VERSION((p)->version) == NVIDIA_P2P_MAJOR_VERSION(v))
#define NVIDIA_P2P_VERSION_COMPATIBLE(p, v) \
(NVIDIA_P2P_MAJOR_VERSION_MATCHES(p, v) && \
(NVIDIA_P2P_MINOR_VERSION((p)->version) >= \
(NVIDIA_P2P_MINOR_VERSION(v))))
enum nvidia_p2p_page_size_type {
NVIDIA_P2P_PAGE_SIZE_4KB = 0,
NVIDIA_P2P_PAGE_SIZE_64KB,
NVIDIA_P2P_PAGE_SIZE_128KB,
NVIDIA_P2P_PAGE_SIZE_COUNT
};
typedef struct nvidia_p2p_page_table {
u32 version;
u32 page_size;
u64 size;
u32 entries;
struct page **pages;
u64 vaddr;
u32 mapped;
struct mm_struct *mm;
struct mmu_notifier mn;
struct mutex lock;
void (*free_callback)(void *data);
void *data;
} nvidia_p2p_page_table_t;
typedef struct nvidia_p2p_dma_mapping {
u32 version;
dma_addr_t *hw_address;
u32 *hw_len;
u32 entries;
struct sg_table *sgt;
struct device *dev;
struct nvidia_p2p_page_table *page_table;
enum dma_data_direction direction;
} nvidia_p2p_dma_mapping_t;
#define NVIDIA_P2P_PAGE_TABLE_VERSION 0x00010000
#define NVIDIA_P2P_PAGE_TABLE_VERSION_COMPATIBLE(p) \
NVIDIA_P2P_VERSION_COMPATIBLE(p, NVIDIA_P2P_PAGE_TABLE_VERSION)
/*
* @brief
* Make the pages underlying a range of GPU virtual memory
* accessible to a third-party device.
*
* @param[in] vaddr
* A GPU Virtual Address
* @param[in] size
* The size of the requested mapping.
* Size must be a multiple of Page size.
* @param[out] **page_table
* A pointer to struct nvidia_p2p_page_table
* @param[in] free_callback
* A non-NULL pointer to the function to be invoked when the pages
* underlying the virtual address range are freed
* implicitly. Must be non NULL.
* @param[in] data
* A non-NULL opaque pointer to private data to be passed to the
* callback function.
*
* @return
* 0 upon successful completion.
* Negative number if any error
*/
int nvidia_p2p_get_pages(u64 vaddr, u64 size,
struct nvidia_p2p_page_table **page_table,
void (*free_callback)(void *data), void *data);
/*
* @brief
* Release the pages previously made accessible to
* a third-party device.
*
* @param[in] *page_table
* A pointer to struct nvidia_p2p_page_table
*
* @return
* 0 upon successful completion.
* -ENOMEM if the driver failed to allocate memory or if
* insufficient resources were available to complete the operation.
* Negative number if any other error
*/
int nvidia_p2p_put_pages(struct nvidia_p2p_page_table *page_table);
/*
* @brief
* Release the pages previously made accessible to
* a third-party device. This is called during the
* execution of the free_callback().
*
* @param[in] *page_table
* A pointer to struct nvidia_p2p_page_table
*
* @return
* 0 upon successful completion.
* -ENOMEM if the driver failed to allocate memory or if
* insufficient resources were available to complete the operation.
* Negative number if any other error
*/
int nvidia_p2p_free_page_table(struct nvidia_p2p_page_table *page_table);
#define NVIDIA_P2P_DMA_MAPPING_VERSION 0x00010000
#define NVIDIA_P2P_DMA_MAPPING_VERSION_COMPATIBLE(p) \
NVIDIA_P2P_VERSION_COMPATIBLE(p, NVIDIA_P2P_DMA_MAPPING_VERSION)
/*
* @brief
* Map the pages retrieved using nvidia_p2p_get_pages and
* pass the dma address to a third-party device.
*
* @param[in] *dev
* The peer device that needs to DMA to/from the
* mapping.
* @param[in] *page_table
* A pointer to struct nvidia_p2p_page_table
* @param[out] **map
* A pointer to struct nvidia_p2p_dma_mapping.
* The DMA mapping containing the DMA addresses to use.
* @param[in] direction
* DMA direction
*
* @return
* 0 upon successful completion.
* Negative number if any other error
*/
int nvidia_p2p_dma_map_pages(struct device *dev,
struct nvidia_p2p_page_table *page_table,
struct nvidia_p2p_dma_mapping **map,
enum dma_data_direction direction);
/*
* @brief
* Unmap the pages previously mapped using nvidia_p2p_dma_map_pages
*
* @param[in] *map
* A pointer to struct nvidia_p2p_dma_mapping.
* The DMA mapping containing the DMA addresses to use.
*
* @return
* 0 upon successful completion.
* Negative number if any other error
*/
int nvidia_p2p_dma_unmap_pages(struct nvidia_p2p_dma_mapping *map);
/*
* @brief
* Unmap the pages previously mapped using nvidia_p2p_dma_map_pages.
* This is called during the execution of the free_callback().
*
* @param[in] *map
* A pointer to struct nvidia_p2p_dma_mapping.
* The DMA mapping containing the DMA addresses to use.
*
* @return
* 0 upon successful completion.
* Negative number if any other error
*/
int nvidia_p2p_free_dma_mapping(struct nvidia_p2p_dma_mapping *dma_mapping);
#endif