Files
linux-nv-oot/drivers/gpu/host1x/fence.c
Santosh BS 98a6db4289 gpu: host1x-fence: fence support for multi-instance host1x
Update host1x-fence driver for handling fences for
multi-instance host1x

Bug 4793553
Jira HOSTX-5413

Signed-off-by: Santosh BS <santoshb@nvidia.com>
Change-Id: Ice32d6f7ffe5bced50e2d50abe71530a5c75928f
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/3198410
GVS: buildbot_gerritrpt <buildbot_gerritrpt@nvidia.com>
Reviewed-by: Mikko Perttunen <mperttunen@nvidia.com>
2025-07-24 10:19:06 +00:00

204 lines
5.0 KiB
C

// SPDX-License-Identifier: GPL-2.0-only
/*
* Syncpoint dma_fence implementation
*
* Copyright (c) 2020-2024, NVIDIA Corporation.
*/
#include <linux/dma-fence.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/sync_file.h>
#include <soc/tegra/fuse.h>
#include <soc/tegra/fuse-helper.h>
#include "dev.h"
#include "fence.h"
#include "intr.h"
#include "syncpt.h"
static const char *host1x_syncpt_fence_get_driver_name(struct dma_fence *f)
{
return "host1x";
}
static const char *host1x_syncpt_fence_get_timeline_name(struct dma_fence *f)
{
return "syncpoint";
}
static struct host1x_syncpt_fence *to_host1x_fence(struct dma_fence *f)
{
return container_of(f, struct host1x_syncpt_fence, base);
}
static bool host1x_syncpt_fence_signaled(struct dma_fence *f)
{
struct host1x_syncpt_fence *sf = to_host1x_fence(f);
return host1x_syncpt_is_expired(sf->sp, sf->threshold) || f->error;
}
static bool host1x_syncpt_fence_enable_signaling(struct dma_fence *f)
{
struct host1x_syncpt_fence *sf = to_host1x_fence(f);
if (host1x_syncpt_is_expired(sf->sp, sf->threshold))
return false;
/* Reference for interrupt path. */
dma_fence_get(f);
/*
* The dma_fence framework requires the fence driver to keep a
* reference to any fences for which 'enable_signaling' has been
* called (and that have not been signalled).
*
* We cannot currently always guarantee that all fences get signalled
* or cancelled. As such, for such situations, set up a timeout, so
* that long-lasting fences will get reaped eventually.
*/
if (sf->timeout) {
/* Reference for timeout path. */
dma_fence_get(f);
schedule_delayed_work(&sf->timeout_work, msecs_to_jiffies(30000));
}
host1x_intr_add_fence_locked(sf->sp->host, sf);
/*
* The fence may get signalled at any time after the above call,
* so we need to initialize all state used by signalling
* before it.
*/
return true;
}
const struct dma_fence_ops host1x_syncpt_fence_ops = {
.get_driver_name = host1x_syncpt_fence_get_driver_name,
.get_timeline_name = host1x_syncpt_fence_get_timeline_name,
.enable_signaling = host1x_syncpt_fence_enable_signaling,
.signaled = host1x_syncpt_fence_signaled,
};
void host1x_fence_signal(struct host1x_syncpt_fence *f, ktime_t ts)
{
if (atomic_xchg(&f->signaling, 1)) {
/*
* Already on timeout path, but we removed the fence before
* timeout path could, so drop interrupt path reference.
*/
dma_fence_put(&f->base);
return;
}
if (f->timeout && cancel_delayed_work(&f->timeout_work)) {
/*
* We know that the timeout path will not be entered.
* Safe to drop the timeout path's reference now.
*/
dma_fence_put(&f->base);
}
dma_fence_signal_timestamp_locked(&f->base, ts);
dma_fence_put(&f->base);
}
static void do_fence_timeout(struct work_struct *work)
{
struct delayed_work *dwork = (struct delayed_work *)work;
struct host1x_syncpt_fence *f =
container_of(dwork, struct host1x_syncpt_fence, timeout_work);
if (atomic_xchg(&f->signaling, 1)) {
/* Already on interrupt path, drop timeout path reference if any. */
if (f->timeout)
dma_fence_put(&f->base);
return;
}
if (host1x_intr_remove_fence(f->sp->host, f)) {
/*
* Managed to remove fence from queue, so it's safe to drop
* the interrupt path's reference.
*/
dma_fence_put(&f->base);
}
dma_fence_set_error(&f->base, -ETIMEDOUT);
dma_fence_signal(&f->base);
if (f->timeout)
dma_fence_put(&f->base);
}
struct dma_fence *host1x_fence_create(struct host1x_syncpt *sp, u32 threshold,
bool timeout)
{
struct host1x_syncpt_fence *fence;
if (!tegra_platform_is_silicon() /*!tegra_is_silicon()*/) {
dev_info_once(sp->host->dev,
"fence timeout disabled due to pre-silicon platform\n");
timeout = false;
}
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
if (!fence)
return ERR_PTR(-ENOMEM);
fence->sp = sp;
fence->threshold = threshold;
fence->timeout = timeout;
dma_fence_init(&fence->base, &host1x_syncpt_fence_ops, &sp->fences.lock,
dma_fence_context_alloc(1), 0);
INIT_DELAYED_WORK(&fence->timeout_work, do_fence_timeout);
return &fence->base;
}
EXPORT_SYMBOL(host1x_fence_create);
int host1x_fence_extract(struct dma_fence *fence, u32 *id, u32 *threshold)
{
struct host1x_syncpt_fence *f;
if (fence->ops != &host1x_syncpt_fence_ops)
return -EINVAL;
f = container_of(fence, struct host1x_syncpt_fence, base);
*id = f->sp->id;
*threshold = f->threshold;
return 0;
}
EXPORT_SYMBOL(host1x_fence_extract);
int host1x_fence_get_node(struct dma_fence *fence)
{
struct host1x_syncpt_fence *f;
int node;
if (fence->ops != &host1x_syncpt_fence_ops)
return -EINVAL;
f = container_of(fence, struct host1x_syncpt_fence, base);
node = dev_to_node(f->sp->host->dev);
return node == NUMA_NO_NODE ? 0 : node;
}
EXPORT_SYMBOL(host1x_fence_get_node);
void host1x_fence_cancel(struct dma_fence *f)
{
struct host1x_syncpt_fence *sf = to_host1x_fence(f);
schedule_delayed_work(&sf->timeout_work, 0);
flush_delayed_work(&sf->timeout_work);
}
EXPORT_SYMBOL(host1x_fence_cancel);