Files
linux-nv-oot/drivers/gpu/drm/tegra/uapi/gather_bo.c
Thierry Reding 1da27ee38f drm/tegra: Implement buffer object cache
This cache is used to avoid mapping and unmapping buffer objects
unnecessarily. Mappings are cached per client and stay hot until
the buffer object is destroyed.

Bug 200768479

Signed-off-by: Thierry Reding <treding@nvidia.com>
Change-Id: Ia2efe2b77b5043b665bae403d9eba4698e6a5228
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvidia/+/2596399
(cherry picked from commit d23156d94fa9499256e8082250e3bc02559f9c8d)
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvidia/+/2620152
Tested-by: Jonathan Hunter <jonathanh@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Reviewed-by: Jonathan Hunter <jonathanh@nvidia.com>
Reviewed-by: svc_kernel_abi <svc_kernel_abi@nvidia.com>
Reviewed-by: Mikko Perttunen <mperttunen@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
GVS: Gerrit_Virtual_Submit
2023-04-03 09:18:59 +00:00

112 lines
2.4 KiB
C

// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2020 NVIDIA Corporation */
#include <linux/dma-mapping.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include "gather_bo.h"
static struct host1x_bo *gather_bo_get(struct host1x_bo *host_bo)
{
struct gather_bo *bo = container_of(host_bo, struct gather_bo, base);
kref_get(&bo->ref);
return host_bo;
}
static void gather_bo_release(struct kref *ref)
{
struct gather_bo *bo = container_of(ref, struct gather_bo, ref);
dma_free_attrs(bo->dev, bo->gather_data_words * 4, bo->gather_data, bo->gather_data_dma,
0);
kfree(bo);
}
void gather_bo_put(struct host1x_bo *host_bo)
{
struct gather_bo *bo = container_of(host_bo, struct gather_bo, base);
kref_put(&bo->ref, gather_bo_release);
}
static struct host1x_bo_mapping *
gather_bo_pin(struct device *dev, struct host1x_bo *bo, enum dma_data_direction direction)
{
struct gather_bo *gather = container_of(bo, struct gather_bo, base);
struct host1x_bo_mapping *map;
int err;
map = kzalloc(sizeof(*map), GFP_KERNEL);
if (!map)
return ERR_PTR(-ENOMEM);
kref_init(&map->ref);
map->bo = host1x_bo_get(bo);
map->direction = direction;
map->dev = dev;
map->sgt = kzalloc(sizeof(*map->sgt), GFP_KERNEL);
if (!map->sgt) {
err = -ENOMEM;
goto free;
}
err = dma_get_sgtable(gather->dev, map->sgt, gather->gather_data, gather->gather_data_dma,
gather->gather_data_words * 4);
if (err)
goto free_sgt;
err = dma_map_sgtable(dev, map->sgt, direction, 0);
if (err)
goto free_sgt;
map->phys = sg_dma_address(map->sgt->sgl);
map->size = gather->gather_data_words * 4;
map->chunks = err;
return map;
free_sgt:
sg_free_table(map->sgt);
kfree(map->sgt);
free:
kfree(map);
return ERR_PTR(err);
}
static void gather_bo_unpin(struct host1x_bo_mapping *map)
{
if (!map)
return;
dma_unmap_sgtable(map->dev, map->sgt, map->direction, 0);
sg_free_table(map->sgt);
kfree(map->sgt);
host1x_bo_put(map->bo);
kfree(map);
}
static void *gather_bo_mmap(struct host1x_bo *host_bo)
{
struct gather_bo *bo = container_of(host_bo, struct gather_bo, base);
return bo->gather_data;
}
static void gather_bo_munmap(struct host1x_bo *host_bo, void *addr)
{
}
const struct host1x_bo_ops gather_bo_ops = {
.get = gather_bo_get,
.put = gather_bo_put,
.pin = gather_bo_pin,
.unpin = gather_bo_unpin,
.mmap = gather_bo_mmap,
.munmap = gather_bo_munmap,
};