mirror of
git://nv-tegra.nvidia.com/linux-nv-oot.git
synced 2025-12-22 17:25:35 +03:00
Enhance IOCTL handler to identify and handle dma_fence_chain objects that might contain host1x dma fences. This fixes issues when userspace passes a dma_fence_chain (created by DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT operations) to HOST1X_IOCTL_FENCE_EXTRACT. The updated code iteratively unwraps fence chains until it finds a host1x_syncpt_fence or reaches a fence it can't process. This ensures proper operation with DRM-based applications that use timeline syncobj features which internally use dma_fence_chain. Bug 4983872 Change-Id: I3eef9d54e2c42180cb5c74236cd64f42a863b7ea Signed-off-by: Mainak Sen <msen@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/3364940 GVS: buildbot_gerritrpt <buildbot_gerritrpt@nvidia.com> Reviewed-by: svcacv <svcacv@nvidia.com> Reviewed-by: Leslin Varghese <lvarghese@nvidia.com> Tested-by: Arunmozhikannan Soundarapandian <asoundarapan@nvidia.com> Reviewed-by: Sourab Gupta <sourabg@nvidia.com>
706 lines
17 KiB
C
706 lines
17 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
// SPDX-FileCopyrightText: Copyright (c) 2022-2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
|
|
#include <nvidia/conftest.h>
|
|
|
|
#include <linux/anon_inodes.h>
|
|
#include <linux/cdev.h>
|
|
#include <linux/file.h>
|
|
#include <linux/fs.h>
|
|
#include <linux/module.h>
|
|
#include <linux/of.h>
|
|
#include <linux/of_platform.h>
|
|
#include <linux/platform_device.h>
|
|
#include <linux/poll.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/sync_file.h>
|
|
#include <linux/host1x-dispatch.h>
|
|
#include <linux/dma-fence.h>
|
|
#include <linux/dma-fence-chain.h>
|
|
|
|
#include "include/uapi/linux/host1x-fence.h"
|
|
|
|
#define HOST1X_INSTANCE_MAX 2
|
|
|
|
bool host1x_wrapper_init(void);
|
|
|
|
static struct host1x_uapi {
|
|
struct class *class;
|
|
|
|
struct cdev cdev;
|
|
struct device *dev;
|
|
dev_t dev_num;
|
|
} uapi_data;
|
|
|
|
static int dev_file_open(struct inode *inode, struct file *file)
|
|
{
|
|
struct platform_device *host1x_pdev;
|
|
struct device_node *np;
|
|
int numa_node;
|
|
struct host1x **host1xp;
|
|
|
|
static const struct of_device_id host1x_match[] = {
|
|
{ .compatible = "nvidia,tegraEmu-host1x", },
|
|
{ .compatible = "nvidia,tegra186-host1x", },
|
|
{ .compatible = "nvidia,tegra194-host1x", },
|
|
{ .compatible = "nvidia,tegra234-host1x", },
|
|
{ .compatible = "nvidia,tegra264-host1x", },
|
|
{},
|
|
};
|
|
|
|
host1xp = kzalloc(HOST1X_INSTANCE_MAX * sizeof(struct host1x *), GFP_KERNEL);
|
|
if (!host1xp)
|
|
return -ENOMEM;
|
|
|
|
for_each_matching_node(np, host1x_match) {
|
|
host1x_pdev = of_find_device_by_node(np);
|
|
if (host1x_pdev) {
|
|
numa_node = dev_to_node(&host1x_pdev->dev);
|
|
if (numa_node == NUMA_NO_NODE)
|
|
host1xp[0] = platform_get_drvdata(host1x_pdev);
|
|
else if (numa_node < HOST1X_INSTANCE_MAX && numa_node >= 0)
|
|
host1xp[numa_node] = platform_get_drvdata(host1x_pdev);
|
|
else
|
|
pr_warn("%s: no host1x_fence support for instance %d\n",
|
|
__func__, numa_node);
|
|
}
|
|
}
|
|
|
|
file->private_data = host1xp;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int dev_file_release(struct inode *inode, struct file *file)
|
|
{
|
|
kfree(file->private_data);
|
|
file->private_data = NULL;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int host1x_fence_create_fd(struct host1x_syncpt *sp, u32 threshold)
|
|
{
|
|
struct sync_file *file;
|
|
struct dma_fence *f;
|
|
int fd;
|
|
|
|
f = host1x_fence_create(sp, threshold, true);
|
|
if (IS_ERR(f))
|
|
return PTR_ERR(f);
|
|
|
|
fd = get_unused_fd_flags(O_CLOEXEC);
|
|
if (fd < 0) {
|
|
dma_fence_put(f);
|
|
return fd;
|
|
}
|
|
|
|
file = sync_file_create(f);
|
|
dma_fence_put(f);
|
|
if (!file)
|
|
return -ENOMEM;
|
|
|
|
fd_install(fd, file->file);
|
|
|
|
return fd;
|
|
}
|
|
|
|
static int dev_file_ioctl_create_fence(struct host1x **host1xp, void __user *data)
|
|
{
|
|
struct host1x_create_fence args;
|
|
struct host1x_syncpt *syncpt;
|
|
unsigned long copy_err;
|
|
int fd;
|
|
unsigned int instance, local_id;
|
|
|
|
copy_err = copy_from_user(&args, data, sizeof(args));
|
|
if (copy_err)
|
|
return -EFAULT;
|
|
|
|
if (args.reserved[0])
|
|
return -EINVAL;
|
|
|
|
instance = HOST1X_INSTANCE_NUM_FROM_GLOBAL_SYNCPOINT(args.id);
|
|
local_id = HOST1X_GLOBAL_TO_LOCAL_SYNCPOINT(args.id);
|
|
|
|
if (instance >= HOST1X_INSTANCE_MAX || !host1xp[instance])
|
|
return -EINVAL;
|
|
|
|
syncpt = host1x_syncpt_get_by_id_noref(host1xp[instance], local_id);
|
|
if (!syncpt)
|
|
return -EINVAL;
|
|
|
|
fd = host1x_fence_create_fd(syncpt, args.threshold);
|
|
if (fd < 0)
|
|
return fd;
|
|
|
|
args.fence_fd = fd;
|
|
|
|
copy_err = copy_to_user(data, &args, sizeof(args));
|
|
if (copy_err)
|
|
return -EFAULT;
|
|
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* fence_extract - Extract fence information from a single dma_fence
|
|
* @fence: The input dma_fence to extract from
|
|
* @max_fences: Maximum number of fences to copy to userspace (0 or 1)
|
|
* @fences_user_ptr: Userspace pointer to copy extracted fence information to
|
|
* @num_extracted: Output parameter to store the number of valid fences extracted
|
|
*
|
|
* This function extracts syncpoint information from a single dma_fence
|
|
* and copies it to userspace if max_fences > 0. For single fences,
|
|
* num_extracted will be either 0 (for signaled stub fences) or 1.
|
|
*
|
|
* Return: 0 on success, negative error code on failure
|
|
*/
|
|
static int fence_extract(struct dma_fence *fence, uint32_t max_fences,
|
|
struct host1x_fence_extract_fence __user *fences_user_ptr,
|
|
uint32_t *num_extracted)
|
|
{
|
|
struct host1x_fence_extract_fence f;
|
|
int instance, err;
|
|
unsigned long copy_err;
|
|
|
|
err = host1x_fence_extract(fence, &f.id, &f.threshold);
|
|
if (err == -EINVAL && dma_fence_is_signaled(fence)) {
|
|
/* Likely stub fence */
|
|
*num_extracted = 0;
|
|
return 0;
|
|
} else if (err) {
|
|
return err;
|
|
}
|
|
|
|
/* Convert to global id before giving to userspace */
|
|
instance = host1x_fence_get_node(fence);
|
|
if (instance < HOST1X_INSTANCE_MAX && instance >= 0)
|
|
f.id = HOST1X_LOCAL_TO_GLOBAL_SYNCPOINT(f.id, instance);
|
|
|
|
if (max_fences > 0) {
|
|
copy_err = copy_to_user(fences_user_ptr, &f, sizeof(f));
|
|
if (copy_err)
|
|
return -EFAULT;
|
|
}
|
|
|
|
*num_extracted = 1;
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* fence_array_extract - Extract fence information from a dma_fence_array
|
|
* @array: The input dma_fence_array to extract from
|
|
* @max_fences: Maximum number of fences to copy to userspace
|
|
* @fences_user_ptr: Userspace pointer to copy extracted fence information to
|
|
* @num_extracted: Output parameter to store the total number of valid fences extracted
|
|
*
|
|
* This function traverses a dma_fence_array, extracts syncpoint information from
|
|
* each fence in the array, and copies it to userspace. The function will
|
|
* count all valid fences in the array but only copy up to @max_fences to userspace.
|
|
*
|
|
* Return: 0 on success, negative error code on failure
|
|
*/
|
|
static int fence_array_extract(struct dma_fence_array *array, uint32_t max_fences,
|
|
struct host1x_fence_extract_fence __user *fences_user_ptr,
|
|
uint32_t *num_extracted)
|
|
{
|
|
unsigned int i, j = 0;
|
|
int err;
|
|
struct host1x_fence_extract_fence f;
|
|
int instance;
|
|
unsigned long copy_err;
|
|
|
|
for (i = 0; i < array->num_fences; i++) {
|
|
err = host1x_fence_extract(array->fences[i], &f.id, &f.threshold);
|
|
if (err == -EINVAL && dma_fence_is_signaled(array->fences[i])) {
|
|
/* Likely stub fence */
|
|
continue;
|
|
} else if (err) {
|
|
return err;
|
|
}
|
|
|
|
if (j < max_fences) {
|
|
/* Convert to global id before giving to userspace */
|
|
instance = host1x_fence_get_node(array->fences[i]);
|
|
if (instance < HOST1X_INSTANCE_MAX && instance >= 0)
|
|
f.id = HOST1X_LOCAL_TO_GLOBAL_SYNCPOINT(f.id, instance);
|
|
copy_err = copy_to_user(fences_user_ptr + j, &f, sizeof(f));
|
|
if (copy_err)
|
|
return -EFAULT;
|
|
}
|
|
j++;
|
|
}
|
|
|
|
*num_extracted = j;
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* fence_chain_extract - Extract fence information from a dma_fence chain
|
|
* @fence: The input dma_fence chain to extract from
|
|
* @max_fences: Maximum number of fences to copy to userspace
|
|
* @fences_user_ptr: Userspace pointer to copy extracted fence information to
|
|
* @num_extracted: Output parameter to store the total number of valid fences extracted
|
|
*
|
|
* This function traverses a dma_fence chain, extracts syncpoint information from
|
|
* each child fence, and copies it to userspace. It optimizes memory usage by
|
|
* processing fences directly without intermediate allocation. The function will
|
|
* count all valid fences in the chain but only copy up to @max_fences to userspace.
|
|
*
|
|
* Return: 0 on success, negative error code on failure
|
|
*/
|
|
static int fence_chain_extract(struct dma_fence *fence, uint32_t max_fences,
|
|
struct host1x_fence_extract_fence __user *fences_user_ptr,
|
|
uint32_t *num_extracted)
|
|
{
|
|
struct dma_fence *iter, *child;
|
|
unsigned int count = 0;
|
|
int err = 0;
|
|
struct host1x_fence_extract_fence f;
|
|
int instance;
|
|
unsigned long copy_err;
|
|
|
|
/* Traverse chain and extract fences directly */
|
|
dma_fence_chain_for_each(iter, fence) {
|
|
child = dma_fence_chain_contained(iter);
|
|
if (!child) {
|
|
dma_fence_put(iter);
|
|
continue;
|
|
}
|
|
|
|
err = host1x_fence_extract(child, &f.id, &f.threshold);
|
|
if (err == -EINVAL && dma_fence_is_signaled(child)) {
|
|
/* Likely stub fence */
|
|
dma_fence_put(iter);
|
|
continue;
|
|
} else if (err) {
|
|
dma_fence_put(iter);
|
|
return err;
|
|
}
|
|
|
|
if (count < max_fences) {
|
|
/* Convert to global id before giving to userspace */
|
|
instance = host1x_fence_get_node(child);
|
|
if (instance < HOST1X_INSTANCE_MAX && instance >= 0)
|
|
f.id = HOST1X_LOCAL_TO_GLOBAL_SYNCPOINT(f.id, instance);
|
|
copy_err = copy_to_user(fences_user_ptr + count, &f, sizeof(f));
|
|
if (copy_err) {
|
|
dma_fence_put(iter);
|
|
return -EFAULT;
|
|
}
|
|
}
|
|
count++;
|
|
dma_fence_put(iter);
|
|
}
|
|
|
|
*num_extracted = count;
|
|
return 0;
|
|
}
|
|
|
|
static int dev_file_ioctl_fence_extract(struct host1x **host1xp, void __user *data)
|
|
{
|
|
struct host1x_fence_extract_fence __user *fences_user_ptr;
|
|
struct dma_fence *fence;
|
|
struct host1x_fence_extract args;
|
|
struct dma_fence_array *array;
|
|
unsigned long copy_err;
|
|
uint32_t num_extracted = 0;
|
|
int err;
|
|
|
|
copy_err = copy_from_user(&args, data, sizeof(args));
|
|
if (copy_err)
|
|
return -EFAULT;
|
|
|
|
fences_user_ptr = u64_to_user_ptr(args.fences_ptr);
|
|
|
|
if (args.reserved[0] || args.reserved[1])
|
|
return -EINVAL;
|
|
|
|
fence = sync_file_get_fence(args.fence_fd);
|
|
if (!fence)
|
|
return -EINVAL;
|
|
|
|
if (dma_fence_is_array(fence)) {
|
|
array = to_dma_fence_array(fence);
|
|
err = fence_array_extract(array, args.num_fences, fences_user_ptr, &num_extracted);
|
|
} else if (dma_fence_is_chain(fence)) {
|
|
err = fence_chain_extract(fence, args.num_fences, fences_user_ptr, &num_extracted);
|
|
} else {
|
|
err = fence_extract(fence, args.num_fences, fences_user_ptr, &num_extracted);
|
|
}
|
|
|
|
if (err == 0) {
|
|
args.num_fences = num_extracted;
|
|
copy_err = copy_to_user(data, &args, sizeof(args));
|
|
if (copy_err)
|
|
err = -EFAULT;
|
|
}
|
|
|
|
/* If the fence is a chain, we have not taken the reference to the chain */
|
|
if (dma_fence_is_chain(fence))
|
|
return err;
|
|
|
|
dma_fence_put(fence);
|
|
return err;
|
|
}
|
|
|
|
struct host1x_pollfd_fence {
|
|
struct list_head list;
|
|
|
|
wait_queue_head_t *wq;
|
|
|
|
struct dma_fence *fence;
|
|
struct dma_fence_cb callback;
|
|
bool callback_set;
|
|
};
|
|
|
|
struct host1x_pollfd {
|
|
struct kref ref;
|
|
struct mutex lock;
|
|
wait_queue_head_t wq;
|
|
|
|
struct list_head fences;
|
|
};
|
|
|
|
static int host1x_pollfd_release(struct inode *inode, struct file *file)
|
|
{
|
|
struct host1x_pollfd *pollfd = file->private_data;
|
|
struct host1x_pollfd_fence *pfd_fence, *pfd_fence_temp;
|
|
|
|
mutex_lock(&pollfd->lock);
|
|
|
|
list_for_each_entry_safe(pfd_fence, pfd_fence_temp, &pollfd->fences, list) {
|
|
if (pfd_fence->callback_set) {
|
|
if (dma_fence_remove_callback(pfd_fence->fence, &pfd_fence->callback))
|
|
host1x_fence_cancel(pfd_fence->fence);
|
|
pfd_fence->callback_set = false;
|
|
}
|
|
/*The lock/unlock just ensures that the callback execution has finished*/
|
|
spin_lock(pfd_fence->fence->lock);
|
|
spin_unlock(pfd_fence->fence->lock);
|
|
|
|
dma_fence_put(pfd_fence->fence);
|
|
kfree(pfd_fence);
|
|
}
|
|
|
|
mutex_unlock(&pollfd->lock);
|
|
|
|
kfree(pollfd);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static unsigned int host1x_pollfd_poll(struct file *file, poll_table *wait)
|
|
{
|
|
struct host1x_pollfd *pollfd = file->private_data;
|
|
struct host1x_pollfd_fence *pfd_fence, *pfd_fence_temp;
|
|
unsigned int mask = 0;
|
|
|
|
poll_wait(file, &pollfd->wq, wait);
|
|
|
|
mutex_lock(&pollfd->lock);
|
|
|
|
list_for_each_entry_safe(pfd_fence, pfd_fence_temp, &pollfd->fences, list) {
|
|
if (dma_fence_is_signaled(pfd_fence->fence)) {
|
|
mask = POLLPRI | POLLIN;
|
|
|
|
if (pfd_fence->callback_set) {
|
|
if (dma_fence_remove_callback(pfd_fence->fence,
|
|
&pfd_fence->callback))
|
|
host1x_fence_cancel(pfd_fence->fence);
|
|
pfd_fence->callback_set = false;
|
|
}
|
|
/*The lock/unlock just ensures that the callback execution has finished*/
|
|
spin_lock(pfd_fence->fence->lock);
|
|
spin_unlock(pfd_fence->fence->lock);
|
|
|
|
dma_fence_put(pfd_fence->fence);
|
|
list_del(&pfd_fence->list);
|
|
kfree(pfd_fence);
|
|
}
|
|
}
|
|
|
|
mutex_unlock(&pollfd->lock);
|
|
|
|
return mask;
|
|
}
|
|
|
|
static const struct file_operations host1x_pollfd_ops = {
|
|
.release = host1x_pollfd_release,
|
|
.poll = host1x_pollfd_poll,
|
|
};
|
|
|
|
static int dev_file_ioctl_create_pollfd(struct host1x **host1xp, void __user *data)
|
|
{
|
|
struct host1x_create_pollfd args;
|
|
struct host1x_pollfd *pollfd;
|
|
unsigned long copy_err;
|
|
struct file *file;
|
|
int fd, err;
|
|
|
|
copy_err = copy_from_user(&args, data, sizeof(args));
|
|
if (copy_err)
|
|
return -EFAULT;
|
|
|
|
pollfd = kzalloc(sizeof(*pollfd), GFP_KERNEL);
|
|
if (!pollfd)
|
|
return -ENOMEM;
|
|
|
|
file = anon_inode_getfile("host1x_pollfd", &host1x_pollfd_ops, pollfd, 0);
|
|
if (IS_ERR(file)) {
|
|
err = PTR_ERR(file);
|
|
goto free_pollfd;
|
|
}
|
|
|
|
init_waitqueue_head(&pollfd->wq);
|
|
mutex_init(&pollfd->lock);
|
|
kref_init(&pollfd->ref);
|
|
INIT_LIST_HEAD(&pollfd->fences);
|
|
|
|
fd = get_unused_fd_flags(O_CLOEXEC);
|
|
if (fd < 0) {
|
|
err = fd;
|
|
goto put_file;
|
|
}
|
|
|
|
fd_install(fd, file);
|
|
|
|
args.fd = fd;
|
|
|
|
copy_err = copy_to_user(data, &args, sizeof(args));
|
|
if (copy_err)
|
|
return -EFAULT;
|
|
|
|
return 0;
|
|
|
|
put_file:
|
|
fput(file);
|
|
free_pollfd:
|
|
kfree(pollfd);
|
|
|
|
return err;
|
|
}
|
|
|
|
static void host1x_pollfd_callback(struct dma_fence *fence, struct dma_fence_cb *cb)
|
|
{
|
|
struct host1x_pollfd_fence *pfd_fence = container_of(cb, struct host1x_pollfd_fence, callback);
|
|
|
|
wake_up_all(pfd_fence->wq);
|
|
}
|
|
|
|
static int dev_file_ioctl_trigger_pollfd(struct host1x **host1xp, void __user *data)
|
|
{
|
|
struct host1x_pollfd_fence *pfd_fence;
|
|
struct host1x_trigger_pollfd args;
|
|
struct host1x_syncpt *syncpt;
|
|
struct host1x_pollfd *pollfd;
|
|
struct dma_fence *fence;
|
|
unsigned long copy_err;
|
|
struct file *file;
|
|
int err;
|
|
unsigned int instance, local_id;
|
|
|
|
copy_err = copy_from_user(&args, data, sizeof(args));
|
|
if (copy_err)
|
|
return -EFAULT;
|
|
|
|
file = fget(args.fd);
|
|
if (!file)
|
|
return -EINVAL;
|
|
|
|
if (file->f_op != &host1x_pollfd_ops) {
|
|
err = -EINVAL;
|
|
goto put_file;
|
|
}
|
|
|
|
pollfd = file->private_data;
|
|
|
|
instance = HOST1X_INSTANCE_NUM_FROM_GLOBAL_SYNCPOINT(args.id);
|
|
local_id = HOST1X_GLOBAL_TO_LOCAL_SYNCPOINT(args.id);
|
|
|
|
if (instance >= HOST1X_INSTANCE_MAX || !host1xp[instance])
|
|
return -EINVAL;
|
|
|
|
syncpt = host1x_syncpt_get_by_id_noref(host1xp[instance], local_id);
|
|
if (!syncpt) {
|
|
err = -EINVAL;
|
|
goto put_file;
|
|
}
|
|
|
|
pfd_fence = kzalloc(sizeof(*pfd_fence), GFP_KERNEL);
|
|
if (!pfd_fence) {
|
|
err = -ENOMEM;
|
|
goto put_file;
|
|
}
|
|
|
|
fence = host1x_fence_create(syncpt, args.threshold, false);
|
|
if (IS_ERR(fence)) {
|
|
err = PTR_ERR(fence);
|
|
goto free_pfd_fence;
|
|
}
|
|
|
|
pfd_fence->fence = fence;
|
|
pfd_fence->wq = &pollfd->wq;
|
|
|
|
mutex_lock(&pollfd->lock);
|
|
list_add(&pfd_fence->list, &pollfd->fences);
|
|
|
|
pfd_fence->callback_set = false;
|
|
err = dma_fence_add_callback(fence, &pfd_fence->callback, host1x_pollfd_callback);
|
|
if (err == -ENOENT) {
|
|
/*
|
|
* We don't free the fence here -- it will be done from the poll
|
|
* handler. This way the logic is same whether the through callback
|
|
* or this shortcut.
|
|
*/
|
|
wake_up_all(&pollfd->wq);
|
|
} else if (err != 0) {
|
|
mutex_unlock(&pollfd->lock);
|
|
goto remove_fence;
|
|
}
|
|
pfd_fence->callback_set = true;
|
|
|
|
mutex_unlock(&pollfd->lock);
|
|
|
|
return 0;
|
|
|
|
remove_fence:
|
|
list_del(&pfd_fence->list);
|
|
dma_fence_put(fence);
|
|
free_pfd_fence:
|
|
kfree(pfd_fence);
|
|
put_file:
|
|
fput(file);
|
|
|
|
return err;
|
|
}
|
|
|
|
static long dev_file_ioctl(struct file *file, unsigned int cmd,
|
|
unsigned long arg)
|
|
{
|
|
void __user *data = (void __user *)arg;
|
|
long err;
|
|
|
|
switch (cmd) {
|
|
case HOST1X_IOCTL_CREATE_FENCE:
|
|
err = dev_file_ioctl_create_fence(file->private_data, data);
|
|
break;
|
|
|
|
case HOST1X_IOCTL_CREATE_POLLFD:
|
|
err = dev_file_ioctl_create_pollfd(file->private_data, data);
|
|
break;
|
|
|
|
case HOST1X_IOCTL_TRIGGER_POLLFD:
|
|
err = dev_file_ioctl_trigger_pollfd(file->private_data, data);
|
|
break;
|
|
|
|
case HOST1X_IOCTL_FENCE_EXTRACT:
|
|
err = dev_file_ioctl_fence_extract(file->private_data, data);
|
|
break;
|
|
|
|
default:
|
|
err = -ENOTTY;
|
|
}
|
|
|
|
return err;
|
|
}
|
|
|
|
static const struct file_operations dev_file_fops = {
|
|
.owner = THIS_MODULE,
|
|
.open = dev_file_open,
|
|
.release = dev_file_release,
|
|
.unlocked_ioctl = dev_file_ioctl,
|
|
.compat_ioctl = dev_file_ioctl,
|
|
};
|
|
|
|
#if defined(NV_CLASS_STRUCT_DEVNODE_HAS_CONST_DEV_ARG) /* Linux v6.2 */
|
|
static char *host1x_fence_devnode(const struct device *dev, umode_t *mode)
|
|
#else
|
|
static char *host1x_fence_devnode(struct device *dev, umode_t *mode)
|
|
#endif
|
|
{
|
|
*mode = 0666;
|
|
return NULL;
|
|
}
|
|
|
|
static int host1x_uapi_init(struct host1x_uapi *uapi)
|
|
{
|
|
int err;
|
|
dev_t dev_num;
|
|
|
|
err = alloc_chrdev_region(&dev_num, 0, 1, "host1x-fence");
|
|
if (err)
|
|
return err;
|
|
|
|
#if defined(NV_CLASS_CREATE_HAS_NO_OWNER_ARG) /* Linux v6.4 */
|
|
uapi->class = class_create("host1x-fence");
|
|
#else
|
|
uapi->class = class_create(THIS_MODULE, "host1x-fence");
|
|
#endif
|
|
if (IS_ERR(uapi->class)) {
|
|
err = PTR_ERR(uapi->class);
|
|
goto unregister_chrdev_region;
|
|
}
|
|
uapi->class->devnode = host1x_fence_devnode;
|
|
|
|
cdev_init(&uapi->cdev, &dev_file_fops);
|
|
err = cdev_add(&uapi->cdev, dev_num, 1);
|
|
if (err)
|
|
goto destroy_class;
|
|
|
|
uapi->dev = device_create(uapi->class, NULL,
|
|
dev_num, NULL, "host1x-fence");
|
|
if (IS_ERR(uapi->dev)) {
|
|
err = PTR_ERR(uapi->dev);
|
|
goto del_cdev;
|
|
}
|
|
cdev_add(&uapi->cdev, dev_num, 1);
|
|
|
|
uapi->dev_num = dev_num;
|
|
|
|
host1x_wrapper_init();
|
|
|
|
/*
|
|
* Don't allow the kernel module to be unloaded. Unloading adds complexity
|
|
* during GVS verification. Resolving is not worth the effort in this case
|
|
*/
|
|
if (!try_module_get(THIS_MODULE)) {
|
|
pr_info("Host1x-Fence: Get Module Failed\n");
|
|
goto del_cdev;
|
|
}
|
|
|
|
return 0;
|
|
|
|
del_cdev:
|
|
cdev_del(&uapi->cdev);
|
|
destroy_class:
|
|
class_destroy(uapi->class);
|
|
unregister_chrdev_region:
|
|
unregister_chrdev_region(dev_num, 1);
|
|
|
|
return err;
|
|
}
|
|
|
|
static const struct of_device_id host1x_fence_of_match[] = {
|
|
{ .compatible = "nvidia,tegraEmu-host1x" },
|
|
{ .compatible = "nvidia,tegra234-host1x" },
|
|
{ .compatible = "nvidia,tegra264-host1x" },
|
|
{ },
|
|
};
|
|
MODULE_DEVICE_TABLE(of, host1x_fence_of_match);
|
|
|
|
static int __init tegra_host1x_init(void)
|
|
{
|
|
return host1x_uapi_init(&uapi_data);
|
|
}
|
|
module_init(tegra_host1x_init);
|
|
|
|
static void __exit tegra_host1x_exit(void)
|
|
{
|
|
}
|
|
module_exit(tegra_host1x_exit);
|
|
|
|
MODULE_DESCRIPTION("Host1x fence UAPI");
|
|
MODULE_LICENSE("GPL");
|