mirror of
git://nv-tegra.nvidia.com/linux-nv-oot.git
synced 2025-12-22 09:11:26 +03:00
drivers: nv-p2p: Move Nvidia Tegra P2P Driver to OOT
Make Nvidia Tegra P2P driver for RDMA as an OOT module. The driver was earlier merged in Kernel-5.10 in the below cl. https://git-master.nvidia.com/r/c/linux-nvgpu/+/1821407 Bug 4142533 Change-Id: I6b24b44832d45fa5abedcd04c94d5d1e2576a6a3 Signed-off-by: Sumit Gupta <sumitg@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/2978103 Tested-by: mobile promotions <svcmobile_promotions@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
8409b7c652
commit
c10e9dee5c
@@ -43,6 +43,7 @@ obj-m += misc/
|
||||
obj-m += net/
|
||||
obj-m += nvpps/
|
||||
obj-m += nvpmodel/
|
||||
obj-m += nv-p2p/
|
||||
ifdef CONFIG_PCI
|
||||
obj-m += pci/
|
||||
ifdef CONFIG_TTY
|
||||
|
||||
5
drivers/nv-p2p/Makefile
Normal file
5
drivers/nv-p2p/Makefile
Normal file
@@ -0,0 +1,5 @@
|
||||
#
|
||||
# Nvidia Tegra RDMA driver.
|
||||
#
|
||||
ccflags-y += -Werror
|
||||
obj-m += nvidia-p2p.o
|
||||
373
drivers/nv-p2p/nvidia-p2p.c
Normal file
373
drivers/nv-p2p/nvidia-p2p.c
Normal file
@@ -0,0 +1,373 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
// Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
|
||||
#include <linux/slab.h>
|
||||
#include <linux/version.h>
|
||||
#include <linux/mmu_notifier.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/nv-p2p.h>
|
||||
|
||||
MODULE_DESCRIPTION("Nvidia Tegra P2P Driver");
|
||||
MODULE_AUTHOR("Preetham Chandru pchandru@nvidia.com");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
static inline u64 safe_cast_s32_to_u64(s32 si_a)
|
||||
{
|
||||
if (si_a < 0) {
|
||||
WARN_ON(1);
|
||||
return 0UL;
|
||||
} else {
|
||||
return (u64)si_a;
|
||||
}
|
||||
}
|
||||
|
||||
static inline s32 safe_cast_u64_to_s32(u64 ul_a)
|
||||
{
|
||||
if (ul_a > safe_cast_s32_to_u64(INT_MAX)) {
|
||||
WARN_ON(1);
|
||||
return 0;
|
||||
} else {
|
||||
return (s32)ul_a;
|
||||
}
|
||||
}
|
||||
|
||||
static inline u32 safe_cast_s32_to_u32(s32 si_a)
|
||||
{
|
||||
if (si_a < 0) {
|
||||
WARN_ON(1);
|
||||
return 0U;
|
||||
} else {
|
||||
return (u32)si_a;
|
||||
}
|
||||
}
|
||||
|
||||
static inline s32 safe_cast_u32_to_s32(u32 ui_a)
|
||||
{
|
||||
if (ui_a > safe_cast_s32_to_u32(INT_MAX)) {
|
||||
WARN_ON(1);
|
||||
return 0;
|
||||
} else {
|
||||
return (s32)ui_a;
|
||||
}
|
||||
}
|
||||
|
||||
static inline s32 safe_cast_s64_to_s32(s64 sl_a)
|
||||
{
|
||||
if ((sl_a > INT_MAX) || (sl_a < INT_MIN)) {
|
||||
WARN_ON(1);
|
||||
return 0;
|
||||
} else {
|
||||
return (s32)sl_a;
|
||||
}
|
||||
}
|
||||
|
||||
static void nvidia_p2p_mn_release(struct mmu_notifier *mn,
|
||||
struct mm_struct *mm)
|
||||
{
|
||||
struct nvidia_p2p_page_table *page_table = container_of(mn,
|
||||
struct nvidia_p2p_page_table,
|
||||
mn);
|
||||
|
||||
page_table->free_callback(page_table->data);
|
||||
}
|
||||
|
||||
static void nvidia_p2p_mn_invl_range_start_legacy(struct mmu_notifier *mn,
|
||||
struct mm_struct *mm, unsigned long start, unsigned long end)
|
||||
{
|
||||
struct nvidia_p2p_page_table *page_table = container_of(mn,
|
||||
struct nvidia_p2p_page_table,
|
||||
mn);
|
||||
|
||||
u64 vaddr = 0;
|
||||
u64 size = 0;
|
||||
|
||||
vaddr = page_table->vaddr;
|
||||
size = page_table->size;
|
||||
|
||||
if (vaddr >= start && vaddr <= end) {
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)
|
||||
mmu_notifier_put(&page_table->mn);
|
||||
#else
|
||||
mmu_notifier_unregister_no_release(&page_table->mn, page_table->mm);
|
||||
#endif
|
||||
page_table->free_callback(page_table->data);
|
||||
}
|
||||
}
|
||||
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
|
||||
static int nvidia_p2p_mn_invl_range_start(struct mmu_notifier *mn,
|
||||
const struct mmu_notifier_range *range)
|
||||
{
|
||||
nvidia_p2p_mn_invl_range_start_legacy(mn, NULL, range->start, range->end);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)
|
||||
static void nvidia_p2p_free_notifier(struct mmu_notifier *mn)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
static struct mmu_notifier_ops nvidia_p2p_mmu_ops = {
|
||||
.release = nvidia_p2p_mn_release,
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
|
||||
.invalidate_range_start = nvidia_p2p_mn_invl_range_start,
|
||||
#else
|
||||
.invalidate_range_start = nvidia_p2p_mn_invl_range_start_legacy,
|
||||
#endif
|
||||
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)
|
||||
.free_notifier = nvidia_p2p_free_notifier,
|
||||
#endif
|
||||
};
|
||||
|
||||
int nvidia_p2p_get_pages(u64 vaddr, u64 size,
|
||||
struct nvidia_p2p_page_table **page_table,
|
||||
void (*free_callback)(void *data), void *data)
|
||||
{
|
||||
int ret = 0;
|
||||
int user_pages = 0;
|
||||
int locked = 0;
|
||||
int nr_pages = safe_cast_u64_to_s32(size >> PAGE_SHIFT);
|
||||
struct page **pages;
|
||||
|
||||
if (nr_pages <= 0) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
*page_table = kzalloc(sizeof(**page_table), GFP_KERNEL);
|
||||
if (!*page_table) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL);
|
||||
if (!pages) {
|
||||
ret = -ENOMEM;
|
||||
goto free_page_table;
|
||||
}
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0)
|
||||
down_read(¤t->mm->mmap_lock);
|
||||
#else
|
||||
down_read(¤t->mm->mmap_sem);
|
||||
#endif
|
||||
locked = 1;
|
||||
user_pages = safe_cast_s64_to_s32(get_user_pages_locked(vaddr & PAGE_MASK, nr_pages,
|
||||
FOLL_WRITE | FOLL_FORCE, pages, &locked));
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0)
|
||||
up_read(¤t->mm->mmap_lock);
|
||||
#else
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
#endif
|
||||
if (user_pages != nr_pages) {
|
||||
ret = user_pages < 0 ? user_pages : -ENOMEM;
|
||||
goto free_pages;
|
||||
}
|
||||
|
||||
(*page_table)->version = NVIDIA_P2P_PAGE_TABLE_VERSION;
|
||||
(*page_table)->pages = pages;
|
||||
(*page_table)->entries = user_pages;
|
||||
(*page_table)->page_size = NVIDIA_P2P_PAGE_SIZE_4KB;
|
||||
(*page_table)->size = size;
|
||||
|
||||
(*page_table)->mn.ops = &nvidia_p2p_mmu_ops;
|
||||
(*page_table)->mm = current->mm;
|
||||
(*page_table)->free_callback = free_callback;
|
||||
(*page_table)->data = data;
|
||||
(*page_table)->vaddr = vaddr;
|
||||
mutex_init(&(*page_table)->lock);
|
||||
(*page_table)->mapped = NVIDIA_P2P_PINNED;
|
||||
|
||||
ret = mmu_notifier_register(&(*page_table)->mn, (*page_table)->mm);
|
||||
if (ret) {
|
||||
goto free_pages;
|
||||
}
|
||||
|
||||
return 0;
|
||||
free_pages:
|
||||
while (--user_pages >= 0) {
|
||||
put_page(pages[user_pages]);
|
||||
}
|
||||
kfree(pages);
|
||||
free_page_table:
|
||||
kfree(*page_table);
|
||||
*page_table = NULL;
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(nvidia_p2p_get_pages);
|
||||
|
||||
int nvidia_p2p_put_pages(struct nvidia_p2p_page_table *page_table)
|
||||
{
|
||||
if (!page_table) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mmu_notifier_unregister(&page_table->mn, page_table->mm);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(nvidia_p2p_put_pages);
|
||||
|
||||
int nvidia_p2p_free_page_table(struct nvidia_p2p_page_table *page_table)
|
||||
{
|
||||
int user_pages = 0;
|
||||
struct page **pages = NULL;
|
||||
|
||||
if (!page_table) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
mutex_lock(&page_table->lock);
|
||||
|
||||
if (page_table->mapped & NVIDIA_P2P_MAPPED) {
|
||||
WARN(1, "Attempting to free unmapped pages");
|
||||
}
|
||||
|
||||
if (page_table->mapped & NVIDIA_P2P_PINNED) {
|
||||
pages = page_table->pages;
|
||||
user_pages = safe_cast_u32_to_s32(page_table->entries);
|
||||
|
||||
while (--user_pages >= 0) {
|
||||
put_page(pages[user_pages]);
|
||||
}
|
||||
|
||||
kfree(pages);
|
||||
page_table->mapped &= ~NVIDIA_P2P_PINNED;
|
||||
}
|
||||
|
||||
mutex_unlock(&page_table->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(nvidia_p2p_free_page_table);
|
||||
|
||||
int nvidia_p2p_dma_map_pages(struct device *dev,
|
||||
struct nvidia_p2p_page_table *page_table,
|
||||
struct nvidia_p2p_dma_mapping **dma_mapping,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
struct sg_table *sgt = NULL;
|
||||
struct scatterlist *sg;
|
||||
struct page **pages = NULL;
|
||||
u32 nr_pages = 0;
|
||||
int ret = 0;
|
||||
int i, count;
|
||||
|
||||
if (!page_table) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mutex_lock(&page_table->lock);
|
||||
|
||||
pages = page_table->pages;
|
||||
nr_pages = page_table->entries;
|
||||
if (nr_pages <= 0) {
|
||||
mutex_unlock(&page_table->lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
*dma_mapping = kzalloc(sizeof(**dma_mapping), GFP_KERNEL);
|
||||
if (!*dma_mapping) {
|
||||
mutex_unlock(&page_table->lock);
|
||||
return -ENOMEM;
|
||||
}
|
||||
sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
|
||||
if (!sgt) {
|
||||
ret = -ENOMEM;
|
||||
goto free_dma_mapping;
|
||||
}
|
||||
ret = sg_alloc_table_from_pages(sgt, pages,
|
||||
nr_pages, 0, page_table->size, GFP_KERNEL);
|
||||
if (ret) {
|
||||
goto free_sgt;
|
||||
}
|
||||
|
||||
(*dma_mapping)->version = NVIDIA_P2P_DMA_MAPPING_VERSION;
|
||||
(*dma_mapping)->sgt = sgt;
|
||||
(*dma_mapping)->dev = dev;
|
||||
(*dma_mapping)->direction = direction;
|
||||
(*dma_mapping)->page_table = page_table;
|
||||
|
||||
count = dma_map_sg(dev, sgt->sgl, sgt->nents, direction);
|
||||
if (count < 1) {
|
||||
goto free_sg_table;
|
||||
}
|
||||
|
||||
(*dma_mapping)->entries = count;
|
||||
|
||||
(*dma_mapping)->hw_address = kcalloc(count, sizeof(u64), GFP_KERNEL);
|
||||
if (!((*dma_mapping)->hw_address)) {
|
||||
ret = -ENOMEM;
|
||||
goto unmap_sg;
|
||||
}
|
||||
(*dma_mapping)->hw_len = kcalloc(count, sizeof(u64), GFP_KERNEL);
|
||||
if (!((*dma_mapping)->hw_len)) {
|
||||
ret = -ENOMEM;
|
||||
goto free_hw_address;
|
||||
}
|
||||
|
||||
for_each_sg(sgt->sgl, sg, count, i) {
|
||||
(*dma_mapping)->hw_address[i] = sg_dma_address(sg);
|
||||
(*dma_mapping)->hw_len[i] = sg_dma_len(sg);
|
||||
}
|
||||
(*dma_mapping)->page_table->mapped |= NVIDIA_P2P_MAPPED;
|
||||
mutex_unlock(&page_table->lock);
|
||||
|
||||
return 0;
|
||||
free_hw_address:
|
||||
kfree((*dma_mapping)->hw_address);
|
||||
unmap_sg:
|
||||
dma_unmap_sg(dev, sgt->sgl,
|
||||
safe_cast_u32_to_s32(sgt->nents), direction);
|
||||
free_sg_table:
|
||||
sg_free_table(sgt);
|
||||
free_sgt:
|
||||
kfree(sgt);
|
||||
free_dma_mapping:
|
||||
kfree(*dma_mapping);
|
||||
*dma_mapping = NULL;
|
||||
mutex_unlock(&page_table->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(nvidia_p2p_dma_map_pages);
|
||||
|
||||
int nvidia_p2p_dma_unmap_pages(struct nvidia_p2p_dma_mapping *dma_mapping)
|
||||
{
|
||||
struct nvidia_p2p_page_table *page_table = NULL;
|
||||
|
||||
if (!dma_mapping) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
page_table = dma_mapping->page_table;
|
||||
if (!page_table) {
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
mutex_lock(&page_table->lock);
|
||||
if (page_table->mapped & NVIDIA_P2P_MAPPED) {
|
||||
kfree(dma_mapping->hw_len);
|
||||
kfree(dma_mapping->hw_address);
|
||||
if (dma_mapping->entries)
|
||||
dma_unmap_sg(dma_mapping->dev,
|
||||
dma_mapping->sgt->sgl,
|
||||
safe_cast_u32_to_s32(dma_mapping->sgt->nents),
|
||||
dma_mapping->direction);
|
||||
sg_free_table(dma_mapping->sgt);
|
||||
kfree(dma_mapping->sgt);
|
||||
kfree(dma_mapping);
|
||||
page_table->mapped &= ~NVIDIA_P2P_MAPPED;
|
||||
}
|
||||
mutex_unlock(&page_table->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(nvidia_p2p_dma_unmap_pages);
|
||||
|
||||
int nvidia_p2p_free_dma_mapping(struct nvidia_p2p_dma_mapping *dma_mapping)
|
||||
{
|
||||
return nvidia_p2p_dma_unmap_pages(dma_mapping);
|
||||
}
|
||||
EXPORT_SYMBOL(nvidia_p2p_free_dma_mapping);
|
||||
190
include/linux/nv-p2p.h
Normal file
190
include/linux/nv-p2p.h
Normal file
@@ -0,0 +1,190 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
// Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
|
||||
#ifndef __NVIDIA_P2P_H__
|
||||
#define __NVIDIA_P2P_H__
|
||||
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/mmu_notifier.h>
|
||||
|
||||
#define NVIDIA_P2P_UNINITIALIZED 0x0
|
||||
#define NVIDIA_P2P_PINNED 0x1U
|
||||
#define NVIDIA_P2P_MAPPED 0x2U
|
||||
|
||||
#define NVIDIA_P2P_MAJOR_VERSION_MASK 0xffff0000
|
||||
#define NVIDIA_P2P_MINOR_VERSION_MASK 0x0000ffff
|
||||
|
||||
#define NVIDIA_P2P_MAJOR_VERSION(v) \
|
||||
(((v) & NVIDIA_P2P_MAJOR_VERSION_MASK) >> 16)
|
||||
|
||||
#define NVIDIA_P2P_MINOR_VERSION(v) \
|
||||
(((v) & NVIDIA_P2P_MINOR_VERSION_MASK))
|
||||
|
||||
#define NVIDIA_P2P_MAJOR_VERSION_MATCHES(p, v) \
|
||||
(NVIDIA_P2P_MAJOR_VERSION((p)->version) == NVIDIA_P2P_MAJOR_VERSION(v))
|
||||
|
||||
#define NVIDIA_P2P_VERSION_COMPATIBLE(p, v) \
|
||||
(NVIDIA_P2P_MAJOR_VERSION_MATCHES(p, v) && \
|
||||
(NVIDIA_P2P_MINOR_VERSION((p)->version) >= \
|
||||
(NVIDIA_P2P_MINOR_VERSION(v))))
|
||||
|
||||
enum nvidia_p2p_page_size_type {
|
||||
NVIDIA_P2P_PAGE_SIZE_4KB = 0,
|
||||
NVIDIA_P2P_PAGE_SIZE_64KB,
|
||||
NVIDIA_P2P_PAGE_SIZE_128KB,
|
||||
NVIDIA_P2P_PAGE_SIZE_COUNT
|
||||
};
|
||||
|
||||
typedef struct nvidia_p2p_page_table {
|
||||
u32 version;
|
||||
u32 page_size;
|
||||
u64 size;
|
||||
u32 entries;
|
||||
struct page **pages;
|
||||
|
||||
u64 vaddr;
|
||||
u32 mapped;
|
||||
|
||||
struct mm_struct *mm;
|
||||
struct mmu_notifier mn;
|
||||
struct mutex lock;
|
||||
void (*free_callback)(void *data);
|
||||
void *data;
|
||||
} nvidia_p2p_page_table_t;
|
||||
|
||||
typedef struct nvidia_p2p_dma_mapping {
|
||||
u32 version;
|
||||
dma_addr_t *hw_address;
|
||||
u32 *hw_len;
|
||||
u32 entries;
|
||||
|
||||
struct sg_table *sgt;
|
||||
struct device *dev;
|
||||
struct nvidia_p2p_page_table *page_table;
|
||||
enum dma_data_direction direction;
|
||||
} nvidia_p2p_dma_mapping_t;
|
||||
|
||||
#define NVIDIA_P2P_PAGE_TABLE_VERSION 0x00010000
|
||||
|
||||
#define NVIDIA_P2P_PAGE_TABLE_VERSION_COMPATIBLE(p) \
|
||||
NVIDIA_P2P_VERSION_COMPATIBLE(p, NVIDIA_P2P_PAGE_TABLE_VERSION)
|
||||
|
||||
/*
|
||||
* @brief
|
||||
* Make the pages underlying a range of GPU virtual memory
|
||||
* accessible to a third-party device.
|
||||
*
|
||||
* @param[in] vaddr
|
||||
* A GPU Virtual Address
|
||||
* @param[in] size
|
||||
* The size of the requested mapping.
|
||||
* Size must be a multiple of Page size.
|
||||
* @param[out] **page_table
|
||||
* A pointer to struct nvidia_p2p_page_table
|
||||
* @param[in] free_callback
|
||||
* A non-NULL pointer to the function to be invoked when the pages
|
||||
* underlying the virtual address range are freed
|
||||
* implicitly. Must be non NULL.
|
||||
* @param[in] data
|
||||
* A non-NULL opaque pointer to private data to be passed to the
|
||||
* callback function.
|
||||
*
|
||||
* @return
|
||||
* 0 upon successful completion.
|
||||
* Negative number if any error
|
||||
*/
|
||||
int nvidia_p2p_get_pages(u64 vaddr, u64 size,
|
||||
struct nvidia_p2p_page_table **page_table,
|
||||
void (*free_callback)(void *data), void *data);
|
||||
/*
|
||||
* @brief
|
||||
* Release the pages previously made accessible to
|
||||
* a third-party device.
|
||||
*
|
||||
* @param[in] *page_table
|
||||
* A pointer to struct nvidia_p2p_page_table
|
||||
*
|
||||
* @return
|
||||
* 0 upon successful completion.
|
||||
* -ENOMEM if the driver failed to allocate memory or if
|
||||
* insufficient resources were available to complete the operation.
|
||||
* Negative number if any other error
|
||||
*/
|
||||
int nvidia_p2p_put_pages(struct nvidia_p2p_page_table *page_table);
|
||||
|
||||
/*
|
||||
* @brief
|
||||
* Release the pages previously made accessible to
|
||||
* a third-party device. This is called during the
|
||||
* execution of the free_callback().
|
||||
*
|
||||
* @param[in] *page_table
|
||||
* A pointer to struct nvidia_p2p_page_table
|
||||
*
|
||||
* @return
|
||||
* 0 upon successful completion.
|
||||
* -ENOMEM if the driver failed to allocate memory or if
|
||||
* insufficient resources were available to complete the operation.
|
||||
* Negative number if any other error
|
||||
*/
|
||||
int nvidia_p2p_free_page_table(struct nvidia_p2p_page_table *page_table);
|
||||
|
||||
#define NVIDIA_P2P_DMA_MAPPING_VERSION 0x00010000
|
||||
|
||||
#define NVIDIA_P2P_DMA_MAPPING_VERSION_COMPATIBLE(p) \
|
||||
NVIDIA_P2P_VERSION_COMPATIBLE(p, NVIDIA_P2P_DMA_MAPPING_VERSION)
|
||||
|
||||
/*
|
||||
* @brief
|
||||
* Map the pages retrieved using nvidia_p2p_get_pages and
|
||||
* pass the dma address to a third-party device.
|
||||
*
|
||||
* @param[in] *dev
|
||||
* The peer device that needs to DMA to/from the
|
||||
* mapping.
|
||||
* @param[in] *page_table
|
||||
* A pointer to struct nvidia_p2p_page_table
|
||||
* @param[out] **map
|
||||
* A pointer to struct nvidia_p2p_dma_mapping.
|
||||
* The DMA mapping containing the DMA addresses to use.
|
||||
* @param[in] direction
|
||||
* DMA direction
|
||||
*
|
||||
* @return
|
||||
* 0 upon successful completion.
|
||||
* Negative number if any other error
|
||||
*/
|
||||
int nvidia_p2p_dma_map_pages(struct device *dev,
|
||||
struct nvidia_p2p_page_table *page_table,
|
||||
struct nvidia_p2p_dma_mapping **map,
|
||||
enum dma_data_direction direction);
|
||||
/*
|
||||
* @brief
|
||||
* Unmap the pages previously mapped using nvidia_p2p_dma_map_pages
|
||||
*
|
||||
* @param[in] *map
|
||||
* A pointer to struct nvidia_p2p_dma_mapping.
|
||||
* The DMA mapping containing the DMA addresses to use.
|
||||
*
|
||||
* @return
|
||||
* 0 upon successful completion.
|
||||
* Negative number if any other error
|
||||
*/
|
||||
int nvidia_p2p_dma_unmap_pages(struct nvidia_p2p_dma_mapping *map);
|
||||
|
||||
/*
|
||||
* @brief
|
||||
* Unmap the pages previously mapped using nvidia_p2p_dma_map_pages.
|
||||
* This is called during the execution of the free_callback().
|
||||
*
|
||||
* @param[in] *map
|
||||
* A pointer to struct nvidia_p2p_dma_mapping.
|
||||
* The DMA mapping containing the DMA addresses to use.
|
||||
*
|
||||
* @return
|
||||
* 0 upon successful completion.
|
||||
* Negative number if any other error
|
||||
*/
|
||||
int nvidia_p2p_free_dma_mapping(struct nvidia_p2p_dma_mapping *dma_mapping);
|
||||
|
||||
#endif
|
||||
Reference in New Issue
Block a user