mirror of
git://nv-tegra.nvidia.com/linux-nv-oot.git
synced 2025-12-22 17:25:35 +03:00
drm/tegra: Implement correct DMA-BUF semantics
DMA-BUF requires that each device that accesses a DMA-BUF attaches to it separately. To do so the host1x_bo_pin() and host1x_bo_unpin() functions need to be reimplemented so that they can return a mapping, which either represents an attachment or a map of the driver's own GEM object. Bug 200768479 Signed-off-by: Thierry Reding <treding@nvidia.com> Change-Id: Ia380b7dcc371ce47f5f35d44a60fbd6b4ab9d636 Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvidia/+/2596398 (cherry picked from commit 28960586000fca025689edfd45645ab28e497bca) Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvidia/+/2620137 Tested-by: Jonathan Hunter <jonathanh@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com> Reviewed-by: Jonathan Hunter <jonathanh@nvidia.com> Reviewed-by: svc_kernel_abi <svc_kernel_abi@nvidia.com> Reviewed-by: Mikko Perttunen <mperttunen@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> GVS: Gerrit_Virtual_Submit
This commit is contained in:
committed by
Laxman Dewangan
parent
65450ce220
commit
756aa327f1
@@ -21,6 +21,26 @@
|
||||
#include "drm.h"
|
||||
#include "gem.h"
|
||||
|
||||
static unsigned int __sgt_dma_count_chunks(struct sg_table *sgt)
|
||||
{
|
||||
dma_addr_t next = ~(dma_addr_t)0;
|
||||
unsigned int count = 0, i;
|
||||
struct scatterlist *s;
|
||||
|
||||
for_each_sg(sgt->sgl, s, sgt->nents, i) {
|
||||
/* sg_dma_address(s) is only valid for entries that have sg_dma_len(s) != 0. */
|
||||
if (!sg_dma_len(s))
|
||||
continue;
|
||||
|
||||
if (sg_dma_address(s) != next) {
|
||||
next = sg_dma_address(s) + sg_dma_len(s);
|
||||
count++;
|
||||
}
|
||||
}
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static void tegra_bo_put(struct host1x_bo *bo)
|
||||
{
|
||||
struct tegra_bo *obj = host1x_to_tegra_bo(bo);
|
||||
@@ -28,79 +48,64 @@ static void tegra_bo_put(struct host1x_bo *bo)
|
||||
drm_gem_object_put(&obj->gem);
|
||||
}
|
||||
|
||||
/* XXX move this into lib/scatterlist.c? */
|
||||
static int sg_alloc_table_from_sg(struct sg_table *sgt, struct scatterlist *sg,
|
||||
unsigned int nents, gfp_t gfp_mask)
|
||||
{
|
||||
struct scatterlist *dst;
|
||||
unsigned int i;
|
||||
int err;
|
||||
|
||||
err = sg_alloc_table(sgt, nents, gfp_mask);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
dst = sgt->sgl;
|
||||
|
||||
for (i = 0; i < nents; i++) {
|
||||
sg_set_page(dst, sg_page(sg), sg->length, 0);
|
||||
dst = sg_next(dst);
|
||||
sg = sg_next(sg);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct sg_table *tegra_bo_pin(struct device *dev, struct host1x_bo *bo,
|
||||
dma_addr_t *phys)
|
||||
static struct host1x_bo_mapping *tegra_bo_pin(struct device *dev, struct host1x_bo *bo,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
struct tegra_bo *obj = host1x_to_tegra_bo(bo);
|
||||
struct sg_table *sgt;
|
||||
struct drm_gem_object *gem = &obj->gem;
|
||||
struct host1x_bo_mapping *map;
|
||||
int err;
|
||||
|
||||
map = kzalloc(sizeof(*map), GFP_KERNEL);
|
||||
if (!map)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
map->bo = host1x_bo_get(bo);
|
||||
map->direction = direction;
|
||||
map->dev = dev;
|
||||
|
||||
/*
|
||||
* If we've manually mapped the buffer object through the IOMMU, make
|
||||
* sure to return the IOVA address of our mapping.
|
||||
*
|
||||
* Similarly, for buffers that have been allocated by the DMA API the
|
||||
* physical address can be used for devices that are not attached to
|
||||
* an IOMMU. For these devices, callers must pass a valid pointer via
|
||||
* the @phys argument.
|
||||
*
|
||||
* Imported buffers were also already mapped at import time, so the
|
||||
* existing mapping can be reused.
|
||||
* Imported buffers need special treatment to satisfy the semantics of DMA-BUF.
|
||||
*/
|
||||
if (phys) {
|
||||
*phys = obj->iova;
|
||||
return NULL;
|
||||
if (gem->import_attach) {
|
||||
struct dma_buf *buf = gem->import_attach->dmabuf;
|
||||
|
||||
map->attach = dma_buf_attach(buf, dev);
|
||||
if (IS_ERR(map->attach)) {
|
||||
err = PTR_ERR(map->attach);
|
||||
goto free;
|
||||
}
|
||||
|
||||
map->sgt = dma_buf_map_attachment(map->attach, direction);
|
||||
if (IS_ERR(map->sgt)) {
|
||||
dma_buf_detach(buf, map->attach);
|
||||
err = PTR_ERR(map->sgt);
|
||||
goto free;
|
||||
}
|
||||
|
||||
err = __sgt_dma_count_chunks(map->sgt);
|
||||
map->size = gem->size;
|
||||
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we don't have a mapping for this buffer yet, return an SG table
|
||||
* so that host1x can do the mapping for us via the DMA API.
|
||||
*/
|
||||
sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
|
||||
if (!sgt)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
map->sgt = kzalloc(sizeof(*map->sgt), GFP_KERNEL);
|
||||
if (!map->sgt) {
|
||||
err = -ENOMEM;
|
||||
goto free;
|
||||
}
|
||||
|
||||
if (obj->pages) {
|
||||
/*
|
||||
* If the buffer object was allocated from the explicit IOMMU
|
||||
* API code paths, construct an SG table from the pages.
|
||||
*/
|
||||
err = sg_alloc_table_from_pages(sgt, obj->pages, obj->num_pages,
|
||||
0, obj->gem.size, GFP_KERNEL);
|
||||
if (err < 0)
|
||||
goto free;
|
||||
} else if (obj->sgt) {
|
||||
/*
|
||||
* If the buffer object already has an SG table but no pages
|
||||
* were allocated for it, it means the buffer was imported and
|
||||
* the SG table needs to be copied to avoid overwriting any
|
||||
* other potential users of the original SG table.
|
||||
*/
|
||||
err = sg_alloc_table_from_sg(sgt, obj->sgt->sgl,
|
||||
obj->sgt->orig_nents, GFP_KERNEL);
|
||||
err = sg_alloc_table_from_pages(map->sgt, obj->pages, obj->num_pages, 0, gem->size,
|
||||
GFP_KERNEL);
|
||||
if (err < 0)
|
||||
goto free;
|
||||
} else {
|
||||
@@ -109,25 +114,56 @@ static struct sg_table *tegra_bo_pin(struct device *dev, struct host1x_bo *bo,
|
||||
* not imported, it had to be allocated with the DMA API, so
|
||||
* the DMA API helper can be used.
|
||||
*/
|
||||
err = dma_get_sgtable(dev, sgt, obj->vaddr, obj->iova,
|
||||
obj->gem.size);
|
||||
err = dma_get_sgtable(dev, map->sgt, obj->vaddr, obj->iova, gem->size);
|
||||
if (err < 0)
|
||||
goto free;
|
||||
}
|
||||
|
||||
return sgt;
|
||||
err = dma_map_sgtable(dev, map->sgt, direction, 0);
|
||||
if (err)
|
||||
goto free_sgt;
|
||||
|
||||
out:
|
||||
/*
|
||||
* If we've manually mapped the buffer object through the IOMMU, make sure to return the
|
||||
* existing IOVA address of our mapping.
|
||||
*/
|
||||
if (!obj->mm) {
|
||||
map->phys = sg_dma_address(map->sgt->sgl);
|
||||
map->chunks = err;
|
||||
} else {
|
||||
map->phys = obj->iova;
|
||||
map->chunks = 1;
|
||||
}
|
||||
|
||||
map->size = gem->size;
|
||||
|
||||
return map;
|
||||
|
||||
free_sgt:
|
||||
sg_free_table(map->sgt);
|
||||
free:
|
||||
kfree(sgt);
|
||||
kfree(map->sgt);
|
||||
kfree(map);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
static void tegra_bo_unpin(struct device *dev, struct sg_table *sgt)
|
||||
static void tegra_bo_unpin(struct host1x_bo_mapping *map)
|
||||
{
|
||||
if (sgt) {
|
||||
sg_free_table(sgt);
|
||||
kfree(sgt);
|
||||
if (!map)
|
||||
return;
|
||||
|
||||
if (map->attach) {
|
||||
dma_buf_unmap_attachment(map->attach, map->sgt, map->direction);
|
||||
dma_buf_detach(map->attach->dmabuf, map->attach);
|
||||
} else {
|
||||
dma_unmap_sgtable(map->dev, map->sgt, map->direction, 0);
|
||||
sg_free_table(map->sgt);
|
||||
kfree(map->sgt);
|
||||
}
|
||||
|
||||
host1x_bo_put(map->bo);
|
||||
kfree(map);
|
||||
}
|
||||
|
||||
static void *tegra_bo_mmap(struct host1x_bo *bo)
|
||||
|
||||
Reference in New Issue
Block a user