mirror of
git://nv-tegra.nvidia.com/linux-nv-oot.git
synced 2025-12-22 09:11:26 +03:00
video: tegra: nvmap: Add support for sgt caching
Deferred dmabuf unmapping is being removed from kernel. So, add similar support to cache sgt in NvMap. During map_dma_buf() call, NvMap will create a mapping and an sgt corresponding to it. It will also cache this sgt. When unmap_dma_buf() is called for same sgt, NvMap will not unmap the mappings. It will simply return from there. Next time when the mapping request comes for same dmabuf, it will look for existing sgt in cache and return it. This significantly reduces mapping overhead for same buffer when it's mapped and unmapped multiple times. Free the sgt and unmap only when corresponding buffer is freed. When all references from a buffer are removed, dmabuf_release() will be called where sgt will be freed. Bug 4064339 Change-Id: I7ed767ecaaac7aa44e6576e701b28537b84986ec Signed-off-by: Ashish Mhetre <amhetre@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/2925224 Reviewed-by: svcacv <svcacv@nvidia.com> Reviewed-by: Sachin Nikam <snikam@nvidia.com> GVS: Gerrit_Virtual_Submit <buildbot_gerritrpt@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
ad2857ccaa
commit
9207ceb10a
@@ -1464,6 +1464,10 @@ int __init nvmap_probe(struct platform_device *pdev)
|
|||||||
nvmap_stats_init(nvmap_debug_root);
|
nvmap_stats_init(nvmap_debug_root);
|
||||||
platform_set_drvdata(pdev, dev);
|
platform_set_drvdata(pdev, dev);
|
||||||
|
|
||||||
|
e = nvmap_dmabuf_stash_init();
|
||||||
|
if (e)
|
||||||
|
goto fail_heaps;
|
||||||
|
|
||||||
for (i = 0; i < dev->nr_carveouts; i++)
|
for (i = 0; i < dev->nr_carveouts; i++)
|
||||||
if (dev->heaps[i].heap_bit & NVMAP_HEAP_CARVEOUT_GENERIC)
|
if (dev->heaps[i].heap_bit & NVMAP_HEAP_CARVEOUT_GENERIC)
|
||||||
generic_carveout_present = 1;
|
generic_carveout_present = 1;
|
||||||
@@ -1526,6 +1530,7 @@ int nvmap_remove(struct platform_device *pdev)
|
|||||||
#ifdef NVMAP_CONFIG_SCIIPC
|
#ifdef NVMAP_CONFIG_SCIIPC
|
||||||
nvmap_sci_ipc_exit();
|
nvmap_sci_ipc_exit();
|
||||||
#endif
|
#endif
|
||||||
|
nvmap_dmabuf_stash_deinit();
|
||||||
debugfs_remove_recursive(dev->debug_root);
|
debugfs_remove_recursive(dev->debug_root);
|
||||||
misc_deregister(&dev->dev_user);
|
misc_deregister(&dev->dev_user);
|
||||||
#ifdef NVMAP_CONFIG_PAGE_POOLS
|
#ifdef NVMAP_CONFIG_PAGE_POOLS
|
||||||
|
|||||||
@@ -37,6 +37,35 @@
|
|||||||
#define NVMAP_DMABUF_ATTACH __nvmap_dmabuf_attach
|
#define NVMAP_DMABUF_ATTACH __nvmap_dmabuf_attach
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
struct nvmap_handle_sgt {
|
||||||
|
enum dma_data_direction dir;
|
||||||
|
struct sg_table *sgt;
|
||||||
|
struct device *dev;
|
||||||
|
struct list_head maps_entry;
|
||||||
|
struct nvmap_handle_info *owner;
|
||||||
|
} ____cacheline_aligned_in_smp;
|
||||||
|
|
||||||
|
static struct kmem_cache *handle_sgt_cache;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Initialize a kmem cache for allocating nvmap_handle_sgt's.
|
||||||
|
*/
|
||||||
|
int nvmap_dmabuf_stash_init(void)
|
||||||
|
{
|
||||||
|
handle_sgt_cache = KMEM_CACHE(nvmap_handle_sgt, 0);
|
||||||
|
if (IS_ERR_OR_NULL(handle_sgt_cache)) {
|
||||||
|
pr_err("Failed to make kmem cache for nvmap_handle_sgt.\n");
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void nvmap_dmabuf_stash_deinit(void)
|
||||||
|
{
|
||||||
|
kmem_cache_destroy(handle_sgt_cache);
|
||||||
|
}
|
||||||
|
|
||||||
static int __nvmap_dmabuf_attach(struct dma_buf *dmabuf, struct device *dev,
|
static int __nvmap_dmabuf_attach(struct dma_buf *dmabuf, struct device *dev,
|
||||||
struct dma_buf_attachment *attach)
|
struct dma_buf_attachment *attach)
|
||||||
{
|
{
|
||||||
@@ -81,12 +110,53 @@ static inline bool access_vpr_phys(struct device *dev)
|
|||||||
return !!of_find_property(dev->of_node, "access-vpr-phys", NULL);
|
return !!of_find_property(dev->of_node, "access-vpr-phys", NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int nvmap_dmabuf_stash_sgt_locked(struct dma_buf_attachment *attach,
|
||||||
|
enum dma_data_direction dir,
|
||||||
|
struct sg_table *sgt)
|
||||||
|
{
|
||||||
|
struct nvmap_handle_sgt *nvmap_sgt;
|
||||||
|
struct nvmap_handle_info *info = attach->dmabuf->priv;
|
||||||
|
|
||||||
|
nvmap_sgt = kmem_cache_alloc(handle_sgt_cache, GFP_KERNEL);
|
||||||
|
if (IS_ERR_OR_NULL(nvmap_sgt)) {
|
||||||
|
pr_err("Stashing SGT failed.\n");
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
nvmap_sgt->dir = dir;
|
||||||
|
nvmap_sgt->sgt = sgt;
|
||||||
|
nvmap_sgt->dev = attach->dev;
|
||||||
|
nvmap_sgt->owner = info;
|
||||||
|
list_add(&nvmap_sgt->maps_entry, &info->maps);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct sg_table *nvmap_dmabuf_get_sgt_from_stash(struct dma_buf_attachment *attach,
|
||||||
|
enum dma_data_direction dir)
|
||||||
|
{
|
||||||
|
struct nvmap_handle_info *info = attach->dmabuf->priv;
|
||||||
|
struct nvmap_handle_sgt *nvmap_sgt;
|
||||||
|
struct sg_table *sgt = NULL;
|
||||||
|
|
||||||
|
list_for_each_entry(nvmap_sgt, &info->maps, maps_entry) {
|
||||||
|
if (nvmap_sgt->dir != dir || nvmap_sgt->dev != attach->dev)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
/* found sgt in stash */
|
||||||
|
sgt = nvmap_sgt->sgt;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
return sgt;
|
||||||
|
}
|
||||||
|
|
||||||
static struct sg_table *nvmap_dmabuf_map_dma_buf(struct dma_buf_attachment *attach,
|
static struct sg_table *nvmap_dmabuf_map_dma_buf(struct dma_buf_attachment *attach,
|
||||||
enum dma_data_direction dir)
|
enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
struct nvmap_handle_info *info = attach->dmabuf->priv;
|
struct nvmap_handle_info *info = attach->dmabuf->priv;
|
||||||
int ents = 0;
|
int ents = 0;
|
||||||
struct sg_table *sgt;
|
struct sg_table *sgt = NULL;
|
||||||
#ifdef NVMAP_CONFIG_DEBUG_MAPS
|
#ifdef NVMAP_CONFIG_DEBUG_MAPS
|
||||||
char *device_name = NULL;
|
char *device_name = NULL;
|
||||||
u32 heap_type;
|
u32 heap_type;
|
||||||
@@ -110,6 +180,10 @@ static struct sg_table *nvmap_dmabuf_map_dma_buf(struct dma_buf_attachment *atta
|
|||||||
|
|
||||||
atomic_inc(&info->handle->pin);
|
atomic_inc(&info->handle->pin);
|
||||||
|
|
||||||
|
sgt = nvmap_dmabuf_get_sgt_from_stash(attach, dir);
|
||||||
|
if (sgt)
|
||||||
|
goto cache_hit;
|
||||||
|
|
||||||
sgt = __nvmap_sg_table(NULL, info->handle);
|
sgt = __nvmap_sg_table(NULL, info->handle);
|
||||||
if (IS_ERR(sgt)) {
|
if (IS_ERR(sgt)) {
|
||||||
atomic_dec(&info->handle->pin);
|
atomic_dec(&info->handle->pin);
|
||||||
@@ -133,6 +207,10 @@ static struct sg_table *nvmap_dmabuf_map_dma_buf(struct dma_buf_attachment *atta
|
|||||||
goto err_map;
|
goto err_map;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (nvmap_dmabuf_stash_sgt_locked(attach, dir, sgt))
|
||||||
|
WARN(1, "No mem to prep sgt.\n");
|
||||||
|
|
||||||
|
cache_hit:
|
||||||
attach->priv = sgt;
|
attach->priv = sgt;
|
||||||
|
|
||||||
#ifdef NVMAP_CONFIG_DEBUG_MAPS
|
#ifdef NVMAP_CONFIG_DEBUG_MAPS
|
||||||
@@ -155,6 +233,26 @@ err_map:
|
|||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void __nvmap_dmabuf_unmap_dma_buf(struct nvmap_handle_sgt *nvmap_sgt)
|
||||||
|
{
|
||||||
|
struct nvmap_handle_info *info = nvmap_sgt->owner;
|
||||||
|
enum dma_data_direction dir = nvmap_sgt->dir;
|
||||||
|
struct sg_table *sgt = nvmap_sgt->sgt;
|
||||||
|
struct device *dev = nvmap_sgt->dev;
|
||||||
|
|
||||||
|
if (!(nvmap_dev->dynamic_dma_map_mask & info->handle->heap_type)) {
|
||||||
|
sg_dma_address(sgt->sgl) = 0;
|
||||||
|
} else if (info->handle->heap_type == NVMAP_HEAP_CARVEOUT_VPR &&
|
||||||
|
access_vpr_phys(dev)) {
|
||||||
|
sg_dma_address(sgt->sgl) = 0;
|
||||||
|
} else {
|
||||||
|
dma_unmap_sg_attrs(dev,
|
||||||
|
sgt->sgl, sgt->nents,
|
||||||
|
dir, DMA_ATTR_SKIP_CPU_SYNC);
|
||||||
|
}
|
||||||
|
__nvmap_free_sg_table(NULL, info->handle, sgt);
|
||||||
|
}
|
||||||
|
|
||||||
static void nvmap_dmabuf_unmap_dma_buf(struct dma_buf_attachment *attach,
|
static void nvmap_dmabuf_unmap_dma_buf(struct dma_buf_attachment *attach,
|
||||||
struct sg_table *sgt,
|
struct sg_table *sgt,
|
||||||
enum dma_data_direction dir)
|
enum dma_data_direction dir)
|
||||||
@@ -174,18 +272,6 @@ static void nvmap_dmabuf_unmap_dma_buf(struct dma_buf_attachment *attach,
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!(nvmap_dev->dynamic_dma_map_mask & info->handle->heap_type)) {
|
|
||||||
sg_dma_address(sgt->sgl) = 0;
|
|
||||||
} else if (info->handle->heap_type == NVMAP_HEAP_CARVEOUT_VPR &&
|
|
||||||
access_vpr_phys(attach->dev)) {
|
|
||||||
sg_dma_address(sgt->sgl) = 0;
|
|
||||||
} else {
|
|
||||||
dma_unmap_sg_attrs(attach->dev,
|
|
||||||
sgt->sgl, sgt->nents,
|
|
||||||
dir, DMA_ATTR_SKIP_CPU_SYNC);
|
|
||||||
}
|
|
||||||
__nvmap_free_sg_table(NULL, info->handle, sgt);
|
|
||||||
|
|
||||||
#ifdef NVMAP_CONFIG_DEBUG_MAPS
|
#ifdef NVMAP_CONFIG_DEBUG_MAPS
|
||||||
/* Remove the device name from the list of carveout accessing devices */
|
/* Remove the device name from the list of carveout accessing devices */
|
||||||
heap_type = info->handle->heap_type;
|
heap_type = info->handle->heap_type;
|
||||||
@@ -199,12 +285,24 @@ static void nvmap_dmabuf_unmap_dma_buf(struct dma_buf_attachment *attach,
|
|||||||
static void nvmap_dmabuf_release(struct dma_buf *dmabuf)
|
static void nvmap_dmabuf_release(struct dma_buf *dmabuf)
|
||||||
{
|
{
|
||||||
struct nvmap_handle_info *info = dmabuf->priv;
|
struct nvmap_handle_info *info = dmabuf->priv;
|
||||||
|
struct nvmap_handle_sgt *nvmap_sgt;
|
||||||
|
|
||||||
trace_nvmap_dmabuf_release(info->handle->owner ?
|
trace_nvmap_dmabuf_release(info->handle->owner ?
|
||||||
info->handle->owner->name : "unknown",
|
info->handle->owner->name : "unknown",
|
||||||
info->handle,
|
info->handle,
|
||||||
dmabuf);
|
dmabuf);
|
||||||
|
|
||||||
|
mutex_lock(&info->maps_lock);
|
||||||
|
while (!list_empty(&info->maps)) {
|
||||||
|
nvmap_sgt = list_first_entry(&info->maps,
|
||||||
|
struct nvmap_handle_sgt,
|
||||||
|
maps_entry);
|
||||||
|
__nvmap_dmabuf_unmap_dma_buf(nvmap_sgt);
|
||||||
|
list_del(&nvmap_sgt->maps_entry);
|
||||||
|
kmem_cache_free(handle_sgt_cache, nvmap_sgt);
|
||||||
|
}
|
||||||
|
mutex_unlock(&info->maps_lock);
|
||||||
|
|
||||||
mutex_lock(&info->handle->lock);
|
mutex_lock(&info->handle->lock);
|
||||||
if (info->is_ro) {
|
if (info->is_ro) {
|
||||||
BUG_ON(dmabuf != info->handle->dmabuf_ro);
|
BUG_ON(dmabuf != info->handle->dmabuf_ro);
|
||||||
|
|||||||
@@ -621,6 +621,7 @@ int __nvmap_dmabuf_fd(struct nvmap_client *client,
|
|||||||
struct dma_buf *dmabuf, int flags);
|
struct dma_buf *dmabuf, int flags);
|
||||||
|
|
||||||
int nvmap_dmabuf_stash_init(void);
|
int nvmap_dmabuf_stash_init(void);
|
||||||
|
void nvmap_dmabuf_stash_deinit(void);
|
||||||
|
|
||||||
void *nvmap_altalloc(size_t len);
|
void *nvmap_altalloc(size_t len);
|
||||||
void nvmap_altfree(void *ptr, size_t len);
|
void nvmap_altfree(void *ptr, size_t len);
|
||||||
|
|||||||
Reference in New Issue
Block a user