drm/tegra: gem: Don't attach dma-bufs when not needed

The dma-buf import code currently attaches and maps all imported
dma-bufs to the drm device to get their sgt for mapping to the
directly managed IOMMU domain.

In many cases, like for newer chips (Tegra186+), the directly
managed IOMMU domain is, however, not used. Mapping to the drm
device can also cause issues e.g. with swiotlb since it is not
a real device.

To improve the situation, only attach and map imported dma-bufs
when required.

Signed-off-by: Mikko Perttunen <mperttunen@nvidia.com>
Change-Id: If73cc4e9419a2eebf67269eadb34050986f73077
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/3090413
Reviewed-by: Santosh BS <santoshb@nvidia.com>
GVS: Gerrit_Virtual_Submit <buildbot_gerritrpt@nvidia.com>
This commit is contained in:
Mikko Perttunen
2024-03-06 09:27:19 +02:00
committed by mobile promotions
parent a050b8203b
commit c2d209f066
2 changed files with 58 additions and 31 deletions

View File

@@ -77,8 +77,8 @@ static struct host1x_bo_mapping *tegra_bo_pin(struct device *dev, struct host1x_
/* /*
* Imported buffers need special treatment to satisfy the semantics of DMA-BUF. * Imported buffers need special treatment to satisfy the semantics of DMA-BUF.
*/ */
if (gem->import_attach) { if (obj->dma_buf) {
struct dma_buf *buf = gem->import_attach->dmabuf; struct dma_buf *buf = obj->dma_buf;
map->attach = dma_buf_attach(buf, dev); map->attach = dma_buf_attach(buf, dev);
if (IS_ERR(map->attach)) { if (IS_ERR(map->attach)) {
@@ -186,8 +186,8 @@ static void *tegra_bo_mmap(struct host1x_bo *bo)
if (obj->vaddr) { if (obj->vaddr) {
return obj->vaddr; return obj->vaddr;
} else if (obj->gem.import_attach) { } else if (obj->dma_buf) {
ret = dma_buf_vmap(obj->gem.import_attach->dmabuf, &map); ret = dma_buf_vmap(obj->dma_buf, &map);
return ret ? NULL : map.vaddr; return ret ? NULL : map.vaddr;
} else { } else {
return vmap(obj->pages, obj->num_pages, VM_MAP, return vmap(obj->pages, obj->num_pages, VM_MAP,
@@ -206,8 +206,8 @@ static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
if (obj->vaddr) if (obj->vaddr)
return; return;
else if (obj->gem.import_attach) else if (obj->dma_buf)
dma_buf_vunmap(obj->gem.import_attach->dmabuf, &map); dma_buf_vunmap(obj->dma_buf, &map);
else else
vunmap(addr); vunmap(addr);
} }
@@ -463,27 +463,32 @@ static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
if (IS_ERR(bo)) if (IS_ERR(bo))
return bo; return bo;
attach = dma_buf_attach(buf, drm->dev); /*
if (IS_ERR(attach)) { * If we need to use IOMMU API to map the dma-buf into the internally managed
err = PTR_ERR(attach); * domain, map it first to the DRM device to get an sgt.
goto free; */
}
get_dma_buf(buf);
bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
if (IS_ERR(bo->sgt)) {
err = PTR_ERR(bo->sgt);
goto detach;
}
if (tegra->domain) { if (tegra->domain) {
attach = dma_buf_attach(buf, drm->dev);
if (IS_ERR(attach)) {
err = PTR_ERR(attach);
goto free;
}
bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
if (IS_ERR(bo->sgt)) {
err = PTR_ERR(bo->sgt);
goto detach;
}
err = tegra_bo_iommu_map(tegra, bo); err = tegra_bo_iommu_map(tegra, bo);
if (err < 0) if (err < 0)
goto detach; goto detach;
bo->gem.import_attach = attach;
} }
bo->gem.import_attach = attach; get_dma_buf(buf);
bo->dma_buf = buf;
return bo; return bo;
@@ -514,20 +519,21 @@ void tegra_bo_free_object(struct drm_gem_object *gem)
dev_name(mapping->dev)); dev_name(mapping->dev));
} }
if (tegra->domain) if (tegra->domain) {
tegra_bo_iommu_unmap(tegra, bo); tegra_bo_iommu_unmap(tegra, bo);
if (gem->import_attach) { if (gem->import_attach) {
struct dma_buf *dmabuf = gem->import_attach->dmabuf; dma_buf_unmap_attachment(gem->import_attach, bo->sgt,
DMA_TO_DEVICE);
dma_buf_unmap_attachment(gem->import_attach, bo->sgt, dma_buf_detach(gem->import_attach->dmabuf, gem->import_attach);
DMA_TO_DEVICE); }
dma_buf_detach(dmabuf, gem->import_attach);
dma_buf_put(dmabuf);
} else {
tegra_bo_free(gem->dev, bo);
} }
tegra_bo_free(gem->dev, bo);
if (bo->dma_buf)
dma_buf_put(bo->dma_buf);
drm_gem_object_release(gem); drm_gem_object_release(gem);
kfree(bo); kfree(bo);
} }

View File

@@ -32,6 +32,26 @@ struct tegra_bo_tiling {
enum tegra_bo_sector_layout sector_layout; enum tegra_bo_sector_layout sector_layout;
}; };
/*
* How memory is referenced within a tegra_bo:
*
* Buffer source | Mapping API(*) | Fields
* ---------------+-----------------+---------------
* Allocated here | DMA API | iova (IOVA mapped to drm->dev), vaddr (CPU VA)
*
* Allocated here | IOMMU API | pages/num_pages (Phys. memory), sgt (Mapped to drm->dev),
* | iova/size (Mapped to domain)
*
* Imported | DMA API | dma_buf (Imported dma_buf)
*
* Imported | IOMMU API | dma_buf (Imported dma_buf),
* | gem->import_attach (Attachment on drm->dev),
* | sgt (Mapped to drm->dev)
* | iova/size (Mapped to domain)
*
* (*) If tegra->domain is set, i.e. TegraDRM IOMMU domain is directly managed through IOMMU API,
* this is IOMMU API. Otherwise DMA API.
*/
struct tegra_bo { struct tegra_bo {
struct drm_gem_object gem; struct drm_gem_object gem;
struct host1x_bo base; struct host1x_bo base;
@@ -39,6 +59,7 @@ struct tegra_bo {
struct sg_table *sgt; struct sg_table *sgt;
dma_addr_t iova; dma_addr_t iova;
void *vaddr; void *vaddr;
struct dma_buf *dma_buf;
struct drm_mm_node *mm; struct drm_mm_node *mm;
unsigned long num_pages; unsigned long num_pages;