video: camera: Port RTCPU drivers to OOT tree

Port RTCPU drivers from kernel/nvidia to kernel/nvidia-oot.
In addition to copying the files this patch:
1) Modifies make files to build rtcpu drivers as modules
2) Modifies licenses of all ported files to SPDX
3) Adds MODULE_LICENSE macro to all modules
4) Removes checks for old kernel versions and the dead code after those checks
5) Fixes style errors according to checkpatch.pl

Change-Id: If64296a22ce958e5326c7509cb69f8f7154f598e
Signed-off-by: Frank Chen <frankc@nvidia.com>
Signed-off-by: Matti Ryttylainen <mryttylainen@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/2783040
Reviewed-by: Laxman Dewangan <ldewangan@nvidia.com>
Reviewed-by: Ankur Pawar <ankurp@nvidia.com>
Reviewed-by: Semi Malinen <smalinen@nvidia.com>
Reviewed-by: Pekka Pessi <ppessi@nvidia.com>
GVS: Gerrit_Virtual_Submit <buildbot_gerritrpt@nvidia.com>
This commit is contained in:
Frank Chen
2022-09-26 13:29:09 -07:00
committed by mobile promotions
parent 43ae0060cd
commit 304123a3bf
32 changed files with 10802 additions and 0 deletions

View File

@@ -1,6 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
LINUXINCLUDE += -I$(srctree.nvidia-oot)
obj-m += bad.o
obj-m += firmwares-class.o
@@ -15,6 +17,8 @@ tegra-cactmon-objs += actmon_common.o
obj-m += tegra-cactmon.o
obj-m += tegra-fsicom.o
obj-m += tegra-camera-rtcpu.o
obj-m += cvnas/
obj-m += hwpm/
obj-m += mce/
@@ -22,3 +26,4 @@ obj-m += uncore_pmu/
obj-m += mc-hwpm.o
obj-m += mc-utils.o
obj-m += dce/
obj-m += rtcpu/

View File

@@ -0,0 +1,19 @@
# SPDX-License-Identifier: GPL-2.0
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
LINUXINCLUDE += -I$(srctree.nvidia-oot)/include
LINUXINCLUDE += -I$(srctree)/include
LINUXINCLUDE += -I$(srctree.nvidia-oot)/drivers/platform/tegra/rtcpu
LINUXINCLUDE += -I$(srctree.nvidia-oot)/drivers/platform/tegra
obj-m += capture-ivc.o
obj-m += ivc-bus.o
obj-m += camchar.o
obj-m += camera-diagnostics.o
obj-m += debug.o
obj-m += clk-group.o
obj-m += hsp-mailbox-client.o
obj-m += tegra-rtcpu-trace.o
obj-m += rtcpu-monitor.o
obj-m += reset-group.o
obj-m += device-group.o

View File

@@ -0,0 +1,403 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#include <linux/bitmap.h>
#include <linux/cdev.h>
#include <linux/dcache.h>
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/poll.h>
#include <linux/sched.h>
#include <linux/sched/signal.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/tegra-ivc.h>
#include <linux/tegra-ivc-bus.h>
#include <linux/tegra-ivc-instance.h>
#include <linux/wait.h>
#include <asm/ioctls.h>
#include <asm/uaccess.h>
#define CCIOGNFRAMES _IOR('c', 1, int)
#define CCIOGNBYTES _IOR('c', 2, int)
struct tegra_camchar_data {
struct cdev cdev;
struct tegra_ivc_channel *ch;
struct mutex io_lock;
wait_queue_head_t waitq;
bool is_open;
bool is_established;
};
#define DEVICE_COUNT (128)
static DECLARE_BITMAP(tegra_camchar_minor_map, DEVICE_COUNT);
static DEFINE_SPINLOCK(tegra_camchar_lock);
static dev_t tegra_camchar_major_number;
static struct class *tegra_camchar_class;
static int tegra_camchar_open(struct inode *in, struct file *f)
{
struct tegra_camchar_data *data;
int ret;
data = container_of(in->i_cdev, struct tegra_camchar_data, cdev);
if (data->is_open)
return -EBUSY;
ret = tegra_ivc_channel_runtime_get(data->ch);
if (ret < 0)
return ret;
data->is_open = true;
data->is_established = false;
f->private_data = data->ch;
return nonseekable_open(in, f);
}
static int tegra_camchar_release(struct inode *in, struct file *fp)
{
struct tegra_ivc_channel *ch = fp->private_data;
struct tegra_camchar_data *data;
data = tegra_ivc_channel_get_drvdata(ch);
tegra_ivc_channel_runtime_put(ch);
data->is_open = false;
return 0;
}
static __poll_t tegra_camchar_poll(struct file *fp, struct poll_table_struct *pt)
{
__poll_t ret = 0;
struct tegra_ivc_channel *ch = fp->private_data;
struct tegra_camchar_data *dev_data = tegra_ivc_channel_get_drvdata(ch);
poll_wait(fp, &dev_data->waitq, pt);
mutex_lock(&dev_data->io_lock);
if (tegra_ivc_can_read(&ch->ivc))
ret |= (EPOLLIN | EPOLLRDNORM);
if (tegra_ivc_can_write(&ch->ivc))
ret |= (EPOLLOUT | EPOLLWRNORM);
mutex_unlock(&dev_data->io_lock);
return ret;
}
static ssize_t tegra_camchar_read(struct file *fp, char __user *buffer, size_t len,
loff_t *offset)
{
struct tegra_ivc_channel *ch = fp->private_data;
struct tegra_camchar_data *dev_data = tegra_ivc_channel_get_drvdata(ch);
DEFINE_WAIT(wait);
ssize_t ret;
if (WARN_ON(!ch->is_ready))
return -EIO;
len = min_t(size_t, len, ch->ivc.frame_size);
if (len == 0)
return 0;
do {
ret = mutex_lock_interruptible(&dev_data->io_lock);
if (ret)
break;
prepare_to_wait(&dev_data->waitq, &wait, TASK_INTERRUPTIBLE);
ret = tegra_ivc_read_user(&ch->ivc, buffer, len);
mutex_unlock(&dev_data->io_lock);
if (ret != -ENOMEM)
;
else if (signal_pending(current))
ret = -EINTR;
else if (fp->f_flags & O_NONBLOCK)
ret = -EAGAIN;
else
schedule();
finish_wait(&dev_data->waitq, &wait);
} while (ret == -ENOMEM);
return ret;
}
static ssize_t tegra_camchar_write(struct file *fp, const char __user *buffer,
size_t len, loff_t *offset)
{
struct tegra_ivc_channel *ch = fp->private_data;
struct tegra_camchar_data *dev_data = tegra_ivc_channel_get_drvdata(ch);
DEFINE_WAIT(wait);
ssize_t ret;
if (WARN_ON(!ch->is_ready))
return -EIO;
len = min_t(size_t, len, ch->ivc.frame_size);
if (len == 0)
return 0;
do {
ret = mutex_lock_interruptible(&dev_data->io_lock);
if (ret)
break;
prepare_to_wait(&dev_data->waitq, &wait, TASK_INTERRUPTIBLE);
ret = tegra_ivc_write_user(&ch->ivc, buffer, len);
mutex_unlock(&dev_data->io_lock);
if (ret > 0)
dev_data->is_established = true;
if (ret != -ENOMEM && ret != ECONNRESET)
;
else if (ret == ECONNRESET && dev_data->is_established)
;
else if (signal_pending(current))
ret = -EINTR;
else if (fp->f_flags & O_NONBLOCK)
ret = -EAGAIN;
else
schedule();
finish_wait(&dev_data->waitq, &wait);
if (ret == ECONNRESET && dev_data->is_established)
break;
} while (ret == -ENOMEM || ret == -ECONNRESET);
return ret;
}
static long tegra_camchar_ioctl(struct file *fp, unsigned int cmd,
unsigned long arg)
{
struct tegra_ivc_channel *ch = fp->private_data;
struct tegra_camchar_data *dev_data = tegra_ivc_channel_get_drvdata(ch);
long ret;
int val = 0;
mutex_lock(&dev_data->io_lock);
switch (cmd) {
/* generic serial port ioctls */
case FIONREAD:
ret = 0;
if (tegra_ivc_can_read(&ch->ivc))
val = ch->ivc.frame_size;
ret = put_user(val, (int __user *)arg);
break;
/* ioctls specific to this device */
case CCIOGNFRAMES:
val = ch->ivc.nframes;
ret = put_user(val, (int __user *)arg);
break;
case CCIOGNBYTES:
val = ch->ivc.frame_size;
ret = put_user(val, (int __user *)arg);
break;
default:
ret = -ENOTTY;
}
mutex_unlock(&dev_data->io_lock);
return ret;
}
static const struct file_operations tegra_camchar_fops = {
.open = tegra_camchar_open,
.poll = tegra_camchar_poll,
.read = tegra_camchar_read,
.write = tegra_camchar_write,
.release = tegra_camchar_release,
.unlocked_ioctl = tegra_camchar_ioctl,
.compat_ioctl = tegra_camchar_ioctl,
.llseek = no_llseek,
};
static int __init tegra_camchar_init(struct tegra_ivc_driver *drv)
{
int ret;
dev_t start;
ret = alloc_chrdev_region(&start, 0, DEVICE_COUNT, "camchar");
if (ret) {
pr_alert("camchar: failed to allocate device numbers\n");
return ret;
}
tegra_camchar_major_number = MAJOR(start);
tegra_camchar_class = class_create(THIS_MODULE, "camchar_class");
if (IS_ERR(tegra_camchar_class)) {
pr_alert("camchar: failed to create class\n");
ret = PTR_ERR(tegra_camchar_class);
goto init_err_class;
}
ret = tegra_ivc_driver_register(drv);
if (ret) {
pr_alert("camchar: ivc driver registration failed\n");
goto init_err_ivc;
}
pr_info("camchar: rtcpu character device driver loaded\n");
return 0;
init_err_ivc:
class_destroy(tegra_camchar_class);
init_err_class:
unregister_chrdev_region(start, DEVICE_COUNT);
return ret;
}
static void __exit tegra_camchar_exit(struct tegra_ivc_driver *drv)
{
dev_t num = MKDEV(tegra_camchar_major_number, 0);
tegra_ivc_driver_unregister(drv);
class_destroy(tegra_camchar_class);
unregister_chrdev_region(num, DEVICE_COUNT);
tegra_camchar_major_number = 0;
pr_info("camchar: unloaded rtcpu character device driver\n");
}
static void tegra_camchar_notify(struct tegra_ivc_channel *ch)
{
struct tegra_camchar_data *dev_data = tegra_ivc_channel_get_drvdata(ch);
wake_up_interruptible(&dev_data->waitq);
}
static int tegra_camchar_get_minor(void)
{
int minor;
spin_lock(&tegra_camchar_lock);
minor = find_first_zero_bit(tegra_camchar_minor_map, DEVICE_COUNT);
if (minor < DEVICE_COUNT)
set_bit(minor, tegra_camchar_minor_map);
else
minor = -ENODEV;
spin_unlock(&tegra_camchar_lock);
return minor;
}
static void tegra_camchar_put_minor(unsigned minor)
{
spin_lock(&tegra_camchar_lock);
if (minor < DEVICE_COUNT)
clear_bit(minor, tegra_camchar_minor_map);
spin_unlock(&tegra_camchar_lock);
}
static int tegra_camchar_probe(struct tegra_ivc_channel *ch)
{
const char *devname;
struct tegra_camchar_data *data;
int ret, minor;
dev_t num;
struct device *dummy;
devname = of_device_get_match_data(&ch->dev);
if (devname == NULL) {
ret = of_property_read_string(ch->dev.of_node,
"nvidia,devname", &devname);
if (ret != 0)
return ret;
}
dev_dbg(&ch->dev, "probing /dev/%s", devname);
data = devm_kzalloc(&ch->dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->ch = ch;
cdev_init(&data->cdev, &tegra_camchar_fops);
data->cdev.owner = THIS_MODULE;
init_waitqueue_head(&data->waitq);
mutex_init(&data->io_lock);
tegra_ivc_channel_set_drvdata(ch, data);
minor = tegra_camchar_get_minor();
if (minor < 0)
return minor;
num = MKDEV(tegra_camchar_major_number, minor);
ret = cdev_add(&data->cdev, num, 1);
if (ret) {
dev_warn(&ch->dev, "cannot add /dev/%s\n", devname);
tegra_camchar_put_minor(minor);
return ret;
}
dummy = device_create(tegra_camchar_class, &ch->dev, num, NULL,
"%s", devname);
if (IS_ERR(dummy)) {
dev_err(&ch->dev, "cannot create /dev/%s\n", devname);
tegra_camchar_put_minor(minor);
return PTR_ERR(dummy);
}
return ret;
}
static void tegra_camchar_remove(struct tegra_ivc_channel *ch)
{
struct tegra_camchar_data *data = tegra_ivc_channel_get_drvdata(ch);
dev_t num = data->cdev.dev;
device_destroy(tegra_camchar_class, num);
cdev_del(&data->cdev);
tegra_camchar_put_minor(MINOR(num));
}
static const struct tegra_ivc_channel_ops tegra_ivc_channel_chardev_ops = {
.probe = tegra_camchar_probe,
.remove = tegra_camchar_remove,
.notify = tegra_camchar_notify,
};
static const struct of_device_id camchar_of_match[] = {
{ .compatible = "nvidia,tegra-ivc-cdev" },
{ .compatible = "nvidia,tegra186-camera-ivc-protocol-echo",
.data = (void *)"camchar-echo", },
{ .compatible = "nvidia,tegra186-camera-ivc-protocol-dbg",
.data = (void *)"camchar-dbg", },
{ },
};
static struct tegra_ivc_driver camchar_driver = {
.driver = {
.owner = THIS_MODULE,
.bus = &tegra_ivc_bus_type,
.name = "tegra-ivc-cdev",
.of_match_table = camchar_of_match,
},
.dev_type = &tegra_ivc_channel_type,
.ops.channel = &tegra_ivc_channel_chardev_ops,
};
tegra_ivc_subsys_driver(camchar_driver, tegra_camchar_init, tegra_camchar_exit);
MODULE_AUTHOR("Jan Solanti <jsolanti@nvidia.com>");
MODULE_DESCRIPTION("The character device for ivc-bus");
MODULE_LICENSE("GPL v2");

View File

@@ -0,0 +1,50 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/slab.h>
#include <linux/tegra-ivc.h>
#include <linux/tegra-ivc-bus.h>
#include <linux/tegra-ivc-instance.h>
static int tegra_camera_diagnostics_probe(struct tegra_ivc_channel *ch)
{
(void)ch;
return 0;
}
static void tegra_camera_diagnostics_remove(struct tegra_ivc_channel *ch)
{
(void)ch;
}
static const struct tegra_ivc_channel_ops
tegra_camera_diagnostics_channel_ops = {
.probe = tegra_camera_diagnostics_probe,
.remove = tegra_camera_diagnostics_remove,
};
static const struct of_device_id camera_diagnostics_of_match[] = {
{ .compatible = "nvidia,tegra186-camera-diagnostics", },
{ },
};
static struct tegra_ivc_driver camera_diagnostics_driver = {
.driver = {
.owner = THIS_MODULE,
.bus = &tegra_ivc_bus_type,
.name = "tegra-camera-diagnostics",
.of_match_table = camera_diagnostics_of_match,
},
.dev_type = &tegra_ivc_channel_type,
.ops.channel = &tegra_camera_diagnostics_channel_ops,
};
tegra_ivc_subsys_driver_default(camera_diagnostics_driver);
MODULE_AUTHOR("Pekka Pessi <ppessi@nvidia.com>");
MODULE_DESCRIPTION("Dummy device driver for Camera Diagnostics IVC Channel");
MODULE_LICENSE("GPL v2");

View File

@@ -0,0 +1,132 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*/
#ifndef __CAPTURE_IVC_PRIV_H__
#define __CAPTURE_IVC_PRIV_H__
/** Total number of capture channels (2 * VI + ISP). T234 has two VI units */
#define NUM_CAPTURE_CHANNELS 88
/** Temporary ids for the clients whose channel-id is not yet allocated */
#define NUM_CAPTURE_TRANSACTION_IDS NUM_CAPTURE_CHANNELS
/** Total number of channels including Temporary IDs */
#define TOTAL_CHANNELS (NUM_CAPTURE_CHANNELS + NUM_CAPTURE_TRANSACTION_IDS)
#define TRANS_ID_START_IDX NUM_CAPTURE_CHANNELS
/**
* @brief Callback context of an IVC channel.
*/
struct tegra_capture_ivc_cb_ctx {
/** Linked list of callback contexts */
struct list_head node;
/** Callback function registered by client */
tegra_capture_ivc_cb_func cb_func;
/** Private context of a VI/ISP capture context */
const void *priv_context;
};
/**
* @brief IVC channel context.
*/
struct tegra_capture_ivc {
/** Pointer to IVC channel */
struct tegra_ivc_channel *chan;
/** Callback context lock */
struct mutex cb_ctx_lock;
/** Channel write lock */
struct mutex ivc_wr_lock;
/** Deferred work */
struct work_struct work;
/** Channel work queue head */
wait_queue_head_t write_q;
/** Array holding callbacks registered by each channel */
struct tegra_capture_ivc_cb_ctx cb_ctx[TOTAL_CHANNELS];
/** spinlock protecting access to linked list */
spinlock_t avl_ctx_list_lock;
/** Linked list holding callback contexts */
struct list_head avl_ctx_list;
};
/**
* @brief Standard message header for all capture IVC messages.
*/
struct tegra_capture_ivc_msg_header {
/** Message identifier. */
uint32_t msg_id;
union {
/** Channel identifier. */
uint32_t channel_id;
/** Transaction id */
uint32_t transaction;
};
} __aligned(8);
/**
* @brief Response of IVC msg
*/
struct tegra_capture_ivc_resp {
/** IVC msg header. See @ref tegra_capture_ivc_msg_header */
struct tegra_capture_ivc_msg_header header;
};
/** Pointer holding the Control IVC channel context, created during probe call*/
static struct tegra_capture_ivc *__scivc_control;
/** Pointer holding the Capture IVC channel context, created during probe call*/
static struct tegra_capture_ivc *__scivc_capture;
/**
* @brief Worker thread to handle the asynchronous msgs on the IVC channel.
This will further calls callbacks registered by Channel drivers.
*
* @param[in] work work_struct pointer
*/
static void tegra_capture_ivc_worker(
struct work_struct *work);
/**
* @brief Implementation of IVC notify operation which gets called when we any
* new message on the bus for the channel. This signals the worker thread.
*
* @param[in] chan tegra_ivc_channel channel pointer
*/
static void tegra_capture_ivc_notify(
struct tegra_ivc_channel *chan);
/**
* @brief Implementation of probe operation which gets called during boot
*
* @param[in,out] chan tegra_ivc_channel channel pointer
*
* @returns 0 (success), neg. errno (failure)
*/
static int tegra_capture_ivc_probe(
struct tegra_ivc_channel *chan);
/**
* @brief Implementation of remove operation
*
* @param[in] chan tegra_ivc_channel channel pointer
*/
static void tegra_capture_ivc_remove(
struct tegra_ivc_channel *chan);
/**
* @brief Function to transmit the IVC msg after checking if it can write,
* using Tegra IVC core library APIs.
*
* @param[in] civc IVC channel on which the msg needs to be transmitted.
* @param[in] req IVC msg blob.
* @param[in] len IVC msg length.
*
* @returns 0 (success), neg. errno (failure)
*/
static int tegra_capture_ivc_tx(
struct tegra_capture_ivc *civc,
const void *req,
size_t len);
#endif /* __CAPTURE_IVC_PRIV_H__ */

View File

@@ -0,0 +1,510 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#include <linux/tegra-capture-ivc.h>
#include <linux/completion.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pm_runtime.h>
#include <linux/tegra-ivc.h>
#include <linux/tegra-ivc-bus.h>
#include <linux/nospec.h>
#include <asm/barrier.h>
#include <trace/events/tegra_capture.h>
#include "capture-ivc-priv.h"
static int tegra_capture_ivc_tx_(struct tegra_capture_ivc *civc,
const void *req, size_t len)
{
struct tegra_ivc_channel *chan;
int ret;
if (civc == NULL)
return -ENODEV;
chan = civc->chan;
if (WARN_ON(!chan->is_ready))
return -EIO;
ret = mutex_lock_interruptible(&civc->ivc_wr_lock);
if (unlikely(ret == -EINTR))
return -ERESTARTSYS;
if (unlikely(ret))
return ret;
ret = wait_event_interruptible(civc->write_q,
tegra_ivc_can_write(&chan->ivc));
if (likely(ret == 0))
ret = tegra_ivc_write(&chan->ivc, req, len);
mutex_unlock(&civc->ivc_wr_lock);
if (unlikely(ret < 0))
dev_err(&chan->dev, "tegra_ivc_write: error %d\n", ret);
return ret;
}
static int tegra_capture_ivc_tx(struct tegra_capture_ivc *civc,
const void *req, size_t len)
{
int ret;
struct tegra_capture_ivc_msg_header hdr;
size_t hdrlen = sizeof(hdr);
char const *ch_name = "NULL";
if (civc && civc->chan)
ch_name = dev_name(&civc->chan->dev);
if (len < hdrlen) {
memset(&hdr, 0, hdrlen);
memcpy(&hdr, req, len);
} else {
memcpy(&hdr, req, hdrlen);
}
ret = tegra_capture_ivc_tx_(civc, req, len);
if (ret < 0)
trace_capture_ivc_send_error(ch_name, hdr.msg_id, hdr.channel_id, ret);
else
trace_capture_ivc_send(ch_name, hdr.msg_id, hdr.channel_id);
return ret;
}
int tegra_capture_ivc_control_submit(const void *control_desc, size_t len)
{
WARN_ON(__scivc_control == NULL);
return tegra_capture_ivc_tx(__scivc_control, control_desc, len);
}
EXPORT_SYMBOL(tegra_capture_ivc_control_submit);
int tegra_capture_ivc_capture_submit(const void *capture_desc, size_t len)
{
WARN_ON(__scivc_capture == NULL);
return tegra_capture_ivc_tx(__scivc_capture, capture_desc, len);
}
EXPORT_SYMBOL(tegra_capture_ivc_capture_submit);
int tegra_capture_ivc_register_control_cb(
tegra_capture_ivc_cb_func control_resp_cb,
uint32_t *trans_id, const void *priv_context)
{
struct tegra_capture_ivc *civc;
struct tegra_capture_ivc_cb_ctx *cb_ctx;
size_t ctx_id;
int ret;
/* Check if inputs are valid */
if (WARN(control_resp_cb == NULL, "callback function is NULL"))
return -EINVAL;
if (WARN(trans_id == NULL, "return value trans_id is NULL"))
return -EINVAL;
if (WARN_ON(!__scivc_control))
return -ENODEV;
civc = __scivc_control;
ret = tegra_ivc_channel_runtime_get(civc->chan);
if (unlikely(ret < 0))
return ret;
spin_lock(&civc->avl_ctx_list_lock);
if (unlikely(list_empty(&civc->avl_ctx_list))) {
spin_unlock(&civc->avl_ctx_list_lock);
ret = -EAGAIN;
goto fail;
}
cb_ctx = list_first_entry(&civc->avl_ctx_list,
struct tegra_capture_ivc_cb_ctx, node);
list_del(&cb_ctx->node);
spin_unlock(&civc->avl_ctx_list_lock);
ctx_id = cb_ctx - &civc->cb_ctx[0];
if (WARN(ctx_id < TRANS_ID_START_IDX ||
ctx_id >= ARRAY_SIZE(civc->cb_ctx),
"invalid cb_ctx %zu", ctx_id)) {
ret = -EIO;
goto fail;
}
mutex_lock(&civc->cb_ctx_lock);
if (WARN(cb_ctx->cb_func != NULL, "cb_ctx is busy")) {
ret = -EIO;
goto locked_fail;
}
*trans_id = (uint32_t)ctx_id;
cb_ctx->cb_func = control_resp_cb;
cb_ctx->priv_context = priv_context;
mutex_unlock(&civc->cb_ctx_lock);
return 0;
locked_fail:
mutex_unlock(&civc->cb_ctx_lock);
fail:
tegra_ivc_channel_runtime_put(civc->chan);
return ret;
}
EXPORT_SYMBOL(tegra_capture_ivc_register_control_cb);
int tegra_capture_ivc_notify_chan_id(uint32_t chan_id, uint32_t trans_id)
{
struct tegra_capture_ivc *civc;
if (WARN(chan_id >= NUM_CAPTURE_CHANNELS, "invalid chan_id"))
return -EINVAL;
if (WARN(trans_id < TRANS_ID_START_IDX ||
trans_id >= TOTAL_CHANNELS, "invalid trans_id"))
return -EINVAL;
if (WARN_ON(!__scivc_control))
return -ENODEV;
chan_id = array_index_nospec(chan_id, NUM_CAPTURE_CHANNELS);
trans_id = array_index_nospec(trans_id, TOTAL_CHANNELS);
civc = __scivc_control;
mutex_lock(&civc->cb_ctx_lock);
if (WARN(civc->cb_ctx[trans_id].cb_func == NULL,
"transaction context at %u is idle", trans_id)) {
mutex_unlock(&civc->cb_ctx_lock);
return -EBADF;
}
if (WARN(civc->cb_ctx[chan_id].cb_func != NULL,
"channel context at %u is busy", chan_id)) {
mutex_unlock(&civc->cb_ctx_lock);
return -EBUSY;
}
/* Update cb_ctx index */
civc->cb_ctx[chan_id].cb_func = civc->cb_ctx[trans_id].cb_func;
civc->cb_ctx[chan_id].priv_context =
civc->cb_ctx[trans_id].priv_context;
/* Reset trans_id cb_ctx fields */
civc->cb_ctx[trans_id].cb_func = NULL;
civc->cb_ctx[trans_id].priv_context = NULL;
mutex_unlock(&civc->cb_ctx_lock);
spin_lock(&civc->avl_ctx_list_lock);
list_add_tail(&civc->cb_ctx[trans_id].node, &civc->avl_ctx_list);
spin_unlock(&civc->avl_ctx_list_lock);
return 0;
}
EXPORT_SYMBOL(tegra_capture_ivc_notify_chan_id);
int tegra_capture_ivc_register_capture_cb(
tegra_capture_ivc_cb_func capture_status_ind_cb,
uint32_t chan_id, const void *priv_context)
{
struct tegra_capture_ivc *civc;
int ret;
if (WARN(capture_status_ind_cb == NULL, "callback function is NULL"))
return -EINVAL;
if (WARN(chan_id >= NUM_CAPTURE_CHANNELS,
"invalid channel id %u", chan_id))
return -EINVAL;
chan_id = array_index_nospec(chan_id, NUM_CAPTURE_CHANNELS);
if (!__scivc_capture)
return -ENODEV;
civc = __scivc_capture;
ret = tegra_ivc_channel_runtime_get(civc->chan);
if (ret < 0)
return ret;
mutex_lock(&civc->cb_ctx_lock);
if (WARN(civc->cb_ctx[chan_id].cb_func != NULL,
"capture channel %u is busy", chan_id)) {
ret = -EBUSY;
goto fail;
}
civc->cb_ctx[chan_id].cb_func = capture_status_ind_cb;
civc->cb_ctx[chan_id].priv_context = priv_context;
mutex_unlock(&civc->cb_ctx_lock);
return 0;
fail:
mutex_unlock(&civc->cb_ctx_lock);
tegra_ivc_channel_runtime_put(civc->chan);
return ret;
}
EXPORT_SYMBOL(tegra_capture_ivc_register_capture_cb);
int tegra_capture_ivc_unregister_control_cb(uint32_t id)
{
struct tegra_capture_ivc *civc;
/* id could be temporary trans_id or rtcpu-allocated chan_id */
if (WARN(id >= TOTAL_CHANNELS, "invalid id %u", id))
return -EINVAL;
if (WARN_ON(!__scivc_control))
return -ENODEV;
id = array_index_nospec(id, TOTAL_CHANNELS);
civc = __scivc_control;
mutex_lock(&civc->cb_ctx_lock);
if (WARN(civc->cb_ctx[id].cb_func == NULL,
"control channel %u is idle", id)) {
mutex_unlock(&civc->cb_ctx_lock);
return -EBADF;
}
civc->cb_ctx[id].cb_func = NULL;
civc->cb_ctx[id].priv_context = NULL;
mutex_unlock(&civc->cb_ctx_lock);
/*
* If it's trans_id, client encountered an error before or during
* chan_id update, in that case the corresponding cb_ctx
* needs to be added back in the avilable cb_ctx list.
*/
if (id >= TRANS_ID_START_IDX) {
spin_lock(&civc->avl_ctx_list_lock);
list_add_tail(&civc->cb_ctx[id].node, &civc->avl_ctx_list);
spin_unlock(&civc->avl_ctx_list_lock);
}
tegra_ivc_channel_runtime_put(civc->chan);
return 0;
}
EXPORT_SYMBOL(tegra_capture_ivc_unregister_control_cb);
int tegra_capture_ivc_unregister_capture_cb(uint32_t chan_id)
{
struct tegra_capture_ivc *civc;
if (chan_id >= NUM_CAPTURE_CHANNELS)
return -EINVAL;
if (!__scivc_capture)
return -ENODEV;
chan_id = array_index_nospec(chan_id, NUM_CAPTURE_CHANNELS);
civc = __scivc_capture;
mutex_lock(&civc->cb_ctx_lock);
if (WARN(civc->cb_ctx[chan_id].cb_func == NULL,
"capture channel %u is idle", chan_id)) {
mutex_unlock(&civc->cb_ctx_lock);
return -EBADF;
}
civc->cb_ctx[chan_id].cb_func = NULL;
civc->cb_ctx[chan_id].priv_context = NULL;
mutex_unlock(&civc->cb_ctx_lock);
tegra_ivc_channel_runtime_put(civc->chan);
return 0;
}
EXPORT_SYMBOL(tegra_capture_ivc_unregister_capture_cb);
static inline void tegra_capture_ivc_recv_msg(
struct tegra_capture_ivc *civc,
uint32_t id,
const void *msg)
{
struct device *dev = &civc->chan->dev;
/* Check if callback function available */
if (unlikely(!civc->cb_ctx[id].cb_func)) {
dev_dbg(dev, "No callback for id %u\n", id);
} else {
/* Invoke client callback. */
civc->cb_ctx[id].cb_func(msg, civc->cb_ctx[id].priv_context);
}
}
static inline void tegra_capture_ivc_recv(struct tegra_capture_ivc *civc)
{
struct ivc *ivc = &civc->chan->ivc;
struct device *dev = &civc->chan->dev;
while (tegra_ivc_can_read(ivc)) {
const void *msg = tegra_ivc_read_get_next_frame(ivc);
const struct tegra_capture_ivc_msg_header *hdr = msg;
uint32_t id = hdr->channel_id;
trace_capture_ivc_recv(dev_name(dev), hdr->msg_id, id);
/* Check if message is valid */
if (id < TOTAL_CHANNELS) {
id = array_index_nospec(id, TOTAL_CHANNELS);
tegra_capture_ivc_recv_msg(civc, id, msg);
} else {
dev_WARN(dev, "Invalid rtcpu channel id %u", id);
}
tegra_ivc_read_advance(ivc);
}
}
static void tegra_capture_ivc_worker(struct work_struct *work)
{
struct tegra_capture_ivc *civc;
struct tegra_ivc_channel *chan;
civc = container_of(work, struct tegra_capture_ivc, work);
chan = civc->chan;
/*
* Do not process IVC events if worker gets woken up while
* this channel is suspended. There is a Christmas tree
* notify when RCE resumes and IVC bus gets set up.
*/
if (pm_runtime_get_if_in_use(&chan->dev) > 0) {
WARN_ON(!chan->is_ready);
tegra_capture_ivc_recv(civc);
pm_runtime_put(&chan->dev);
} else {
dev_dbg(&chan->dev, "extra wakeup");
}
}
static void tegra_capture_ivc_notify(struct tegra_ivc_channel *chan)
{
struct tegra_capture_ivc *civc = tegra_ivc_channel_get_drvdata(chan);
trace_capture_ivc_notify(dev_name(&chan->dev));
/* Only 1 thread can wait on write_q, rest wait for write_lock */
wake_up(&civc->write_q);
schedule_work(&civc->work);
}
#define NV(x) "nvidia," #x
static int tegra_capture_ivc_probe(struct tegra_ivc_channel *chan)
{
struct device *dev = &chan->dev;
struct tegra_capture_ivc *civc;
const char *service;
int ret;
uint32_t i;
civc = devm_kzalloc(dev, (sizeof(*civc)), GFP_KERNEL);
if (unlikely(civc == NULL))
return -ENOMEM;
ret = of_property_read_string(dev->of_node, NV(service),
&service);
if (unlikely(ret)) {
dev_err(dev, "missing <%s> property\n", NV(service));
return ret;
}
civc->chan = chan;
mutex_init(&civc->cb_ctx_lock);
mutex_init(&civc->ivc_wr_lock);
/* Initialize ivc_work */
INIT_WORK(&civc->work, tegra_capture_ivc_worker);
/* Initialize wait queue */
init_waitqueue_head(&civc->write_q);
/* transaction-id list of available callback contexts */
spin_lock_init(&civc->avl_ctx_list_lock);
INIT_LIST_HEAD(&civc->avl_ctx_list);
/* Add the transaction cb-contexts to the available list */
for (i = TRANS_ID_START_IDX; i < ARRAY_SIZE(civc->cb_ctx); i++)
list_add_tail(&civc->cb_ctx[i].node, &civc->avl_ctx_list);
tegra_ivc_channel_set_drvdata(chan, civc);
if (!strcmp("capture-control", service)) {
if (WARN_ON(__scivc_control != NULL))
return -EEXIST;
__scivc_control = civc;
} else if (!strcmp("capture", service)) {
if (WARN_ON(__scivc_capture != NULL))
return -EEXIST;
__scivc_capture = civc;
} else {
dev_err(dev, "Unknown ivc channel %s\n", service);
return -EINVAL;
}
return 0;
}
static void tegra_capture_ivc_remove(struct tegra_ivc_channel *chan)
{
struct tegra_capture_ivc *civc = tegra_ivc_channel_get_drvdata(chan);
cancel_work_sync(&civc->work);
if (__scivc_control == civc)
__scivc_control = NULL;
else if (__scivc_capture == civc)
__scivc_capture = NULL;
else
dev_warn(&chan->dev, "Unknown ivc channel\n");
}
static struct of_device_id tegra_capture_ivc_channel_of_match[] = {
{ .compatible = "nvidia,tegra186-camera-ivc-protocol-capture-control" },
{ .compatible = "nvidia,tegra186-camera-ivc-protocol-capture" },
{ },
};
static const struct tegra_ivc_channel_ops tegra_capture_ivc_ops = {
.probe = tegra_capture_ivc_probe,
.remove = tegra_capture_ivc_remove,
.notify = tegra_capture_ivc_notify,
};
static struct tegra_ivc_driver tegra_capture_ivc_driver = {
.driver = {
.name = "tegra-capture-ivc",
.bus = &tegra_ivc_bus_type,
.owner = THIS_MODULE,
.of_match_table = tegra_capture_ivc_channel_of_match,
},
.dev_type = &tegra_ivc_channel_type,
.ops.channel = &tegra_capture_ivc_ops,
};
tegra_ivc_subsys_driver_default(tegra_capture_ivc_driver);
MODULE_AUTHOR("Sudhir Vyas <svyas@nvidia.com>");
MODULE_DESCRIPTION("NVIDIA Tegra Capture IVC driver");
MODULE_LICENSE("GPL v2");

View File

@@ -0,0 +1,246 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#include "clk-group.h"
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/clk.h>
#include <linux/module.h>
struct camrtc_clk_group {
struct device *device;
int nclocks;
struct clk **clocks;
struct {
struct clk *slow;
struct clk *fast;
} parents;
struct {
u32 slow;
u32 fast;
} *rates;
};
static void camrtc_clk_group_release(struct device *dev, void *res)
{
const struct camrtc_clk_group *grp = res;
int i;
for (i = 0; i < grp->nclocks; i++) {
if (grp->clocks[i])
clk_put(grp->clocks[i]);
}
if (grp->parents.slow)
clk_put(grp->parents.slow);
if (grp->parents.fast)
clk_put(grp->parents.fast);
}
static int camrtc_clk_group_get_parent(
struct device_node *np,
int index,
struct clk **return_clk)
{
struct of_phandle_args clkspec;
struct clk *clk;
int ret;
if (index < 0)
return -EINVAL;
ret = of_parse_phandle_with_args(np,
"nvidia,clock-parents", "#clock-cells", index,
&clkspec);
if (ret < 0)
return ret;
clk = of_clk_get_from_provider(&clkspec);
of_node_put(clkspec.np);
if (IS_ERR(clk))
return PTR_ERR(clk);
*return_clk = clk;
return 0;
}
struct camrtc_clk_group *camrtc_clk_group_get(
struct device *dev)
{
struct camrtc_clk_group *grp;
struct device_node *np;
int nclocks;
int nrates;
int nparents;
int index;
int ret;
if (!dev || !dev->of_node)
return ERR_PTR(-EINVAL);
np = dev->of_node;
nclocks = of_property_count_strings(np, "clock-names");
if (nclocks < 0)
return ERR_PTR(-ENOENT);
/* This has pairs of u32s: slow and fast rate for each clock */
nrates = of_property_count_u64_elems(np, "nvidia,clock-rates");
nparents = of_count_phandle_with_args(np, "nvidia,clock-parents",
"#clock-cells");
if (nparents > 0 && nparents != 2)
dev_warn(dev, "expecting exactly two \"%s\"\n",
"nvidia,clock-parents");
grp = devres_alloc(camrtc_clk_group_release,
sizeof(*grp) +
nclocks * sizeof(grp->clocks[0]) +
nclocks * sizeof(grp->rates[0]),
GFP_KERNEL);
if (!grp)
return ERR_PTR(-ENOMEM);
grp->nclocks = nclocks;
grp->device = dev;
grp->clocks = (struct clk **)(grp + 1);
grp->rates = (void *)(grp->clocks + nclocks);
for (index = 0; index < grp->nclocks; index++) {
struct clk *clk;
clk = of_clk_get(np, index);
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
goto error;
}
grp->clocks[index] = clk;
if (index >= nrates)
continue;
if (of_property_read_u32_index(np, "nvidia,clock-rates",
2 * index, &grp->rates[index].slow))
dev_warn(dev, "clock-rates property not found\n");
if (of_property_read_u32_index(np, "nvidia,clock-rates",
2 * index + 1, &grp->rates[index].fast))
dev_warn(dev, "clock-rates property not found\n");
}
if (nparents == 2) {
ret = camrtc_clk_group_get_parent(np, 0, &grp->parents.slow);
if (ret < 0)
goto error;
ret = camrtc_clk_group_get_parent(np, 1, &grp->parents.fast);
if (ret < 0)
goto error;
}
devres_add(dev, grp);
return grp;
error:
devres_free(grp);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(camrtc_clk_group_get);
static void camrtc_clk_group_error(
const struct camrtc_clk_group *grp,
char const *op,
int index,
int error)
{
const char *name = "unnamed";
of_property_read_string_index(grp->device->of_node,
"clock-names", index, &name);
dev_warn(grp->device, "%s clk %s (at [%d]): failed (%d)\n",
op, name, index, error);
}
int camrtc_clk_group_enable(const struct camrtc_clk_group *grp)
{
int index, err;
if (IS_ERR_OR_NULL(grp))
return -ENODEV;
for (index = 0; index < grp->nclocks; index++) {
err = clk_prepare_enable(grp->clocks[index]);
if (err) {
camrtc_clk_group_error(grp, "enable", index, err);
return err;
}
}
return 0;
}
EXPORT_SYMBOL_GPL(camrtc_clk_group_enable);
void camrtc_clk_group_disable(const struct camrtc_clk_group *grp)
{
int index;
if (IS_ERR_OR_NULL(grp))
return;
for (index = 0; index < grp->nclocks; index++)
clk_disable_unprepare(grp->clocks[index]);
}
EXPORT_SYMBOL_GPL(camrtc_clk_group_disable);
int camrtc_clk_group_adjust_slow(const struct camrtc_clk_group *grp)
{
int index;
if (IS_ERR_OR_NULL(grp))
return -ENODEV;
for (index = 0; index < grp->nclocks; index++) {
u32 slow = grp->rates[index].slow;
if (slow != 0)
clk_set_rate(grp->clocks[index], slow);
}
if (grp->parents.slow != NULL) {
for (index = 0; index < grp->nclocks; index++)
clk_set_parent(grp->clocks[index],
grp->parents.slow);
}
return 0;
}
EXPORT_SYMBOL_GPL(camrtc_clk_group_adjust_slow);
int camrtc_clk_group_adjust_fast(const struct camrtc_clk_group *grp)
{
int index;
if (IS_ERR_OR_NULL(grp))
return -ENODEV;
if (grp->parents.fast != NULL) {
for (index = 0; index < grp->nclocks; index++)
clk_set_parent(grp->clocks[index],
grp->parents.fast);
}
for (index = 0; index < grp->nclocks; index++) {
u32 fast = grp->rates[index].fast;
if (fast != 0)
clk_set_rate(grp->clocks[index], fast);
}
return 0;
}
EXPORT_SYMBOL_GPL(camrtc_clk_group_adjust_fast);
MODULE_LICENSE("GPL v2");

View File

@@ -0,0 +1,20 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*/
#ifndef INCLUDE_CLK_GROUP_H
#define INCLUDE_CLK_GROUP_H
struct device;
struct camrtc_clk_group;
struct camrtc_clk_group *camrtc_clk_group_get(struct device *dev);
int camrtc_clk_group_enable(const struct camrtc_clk_group *grp);
void camrtc_clk_group_disable(const struct camrtc_clk_group *grp);
int camrtc_clk_group_adjust_fast(const struct camrtc_clk_group *grp);
int camrtc_clk_group_adjust_slow(const struct camrtc_clk_group *grp);
#endif /* INCLUDE_CLK_GROUP_H */

View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,129 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#include "device-group.h"
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/types.h>
#include <linux/module.h>
#include "drivers/video/tegra/host/nvhost_acm.h"
struct camrtc_device_group {
struct device *dev;
char const *names_name;
int ndevices;
struct platform_device *devices[];
};
static int get_grouped_device(struct camrtc_device_group *grp,
struct device *dev, char const *name, int index)
{
struct device_node *np;
struct platform_device *pdev;
np = of_parse_phandle(dev->of_node, name, index);
if (np == NULL)
return 0;
if (!of_device_is_available(np)) {
dev_info(dev, "%s[%u] is disabled\n", name, index);
of_node_put(np);
return 0;
}
pdev = of_find_device_by_node(np);
of_node_put(np);
if (pdev == NULL) {
dev_warn(dev, "%s[%u] node has no device\n", name, index);
return 0;
}
grp->devices[index] = pdev;
return 0;
}
static void camrtc_device_group_release(struct device *dev, void *res)
{
const struct camrtc_device_group *grp = res;
int i;
put_device(grp->dev);
for (i = 0; i < grp->ndevices; i++)
platform_device_put(grp->devices[i]);
}
struct camrtc_device_group *camrtc_device_group_get(
struct device *dev,
char const *property_name,
char const *names_property_name)
{
int index, err;
struct camrtc_device_group *grp;
int ndevices;
if (!dev || !dev->of_node)
return ERR_PTR(-EINVAL);
ndevices = of_count_phandle_with_args(dev->of_node,
property_name, NULL);
if (ndevices <= 0)
return ERR_PTR(-ENOENT);
grp = devres_alloc(camrtc_device_group_release,
offsetof(struct camrtc_device_group, devices[ndevices]),
GFP_KERNEL | __GFP_ZERO);
if (!grp)
return ERR_PTR(-ENOMEM);
grp->dev = get_device(dev);
grp->ndevices = ndevices;
grp->names_name = names_property_name;
for (index = 0; index < grp->ndevices; index++) {
err = get_grouped_device(grp, dev, property_name, index);
if (err) {
devres_free(grp);
return ERR_PTR(err);
}
}
devres_add(dev, grp);
return grp;
}
EXPORT_SYMBOL(camrtc_device_group_get);
static inline struct platform_device *platform_device_get(
struct platform_device *pdev)
{
if (pdev != NULL)
get_device(&pdev->dev);
return pdev;
}
struct platform_device *camrtc_device_get_byname(
struct camrtc_device_group *grp,
const char *device_name)
{
int index;
if (grp == NULL)
return ERR_PTR(-EINVAL);
if (grp->names_name == NULL)
return ERR_PTR(-ENOENT);
index = of_property_match_string(grp->dev->of_node, grp->names_name,
device_name);
if (index < 0)
return ERR_PTR(-ENODEV);
if (index >= grp->ndevices)
return ERR_PTR(-ENODEV);
return platform_device_get(grp->devices[index]);
}
MODULE_LICENSE("GPL v2");

View File

@@ -0,0 +1,25 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*/
#ifndef INCLUDE_DEVICE_GROUP_H
#define INCLUDE_DEVICE_GROUP_H
struct device;
struct platform_device;
struct camrtc_device_group *camrtc_device_group_get(
struct device *dev,
const char *property_name,
const char *names_property_name);
struct platform_device *camrtc_device_get_byname(
struct camrtc_device_group *grp,
const char *device_name);
int camrtc_device_group_busy(const struct camrtc_device_group *grp);
void camrtc_device_group_idle(const struct camrtc_device_group *grp);
void camrtc_device_group_reset(const struct camrtc_device_group *grp);
#endif /* INCLUDE_DEVICE_GROUP_H */

View File

@@ -0,0 +1,34 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*/
#ifndef INCLUDE_RTCPU_HSP_COMBO_H
#define INCLUDE_RTCPU_HSP_COMBO_H
#include <linux/types.h>
struct camrtc_hsp;
struct device;
struct camrtc_hsp *camrtc_hsp_create(
struct device *dev,
void (*group_notify)(struct device *dev, u16 group),
long cmd_timeout);
void camrtc_hsp_free(struct camrtc_hsp *camhsp);
void camrtc_hsp_group_ring(struct camrtc_hsp *camhsp,
u16 group);
int camrtc_hsp_sync(struct camrtc_hsp *camhsp);
int camrtc_hsp_resume(struct camrtc_hsp *camhsp);
int camrtc_hsp_suspend(struct camrtc_hsp *camhsp);
int camrtc_hsp_bye(struct camrtc_hsp *camhsp);
int camrtc_hsp_ch_setup(struct camrtc_hsp *camhsp, dma_addr_t iova);
int camrtc_hsp_ping(struct camrtc_hsp *camhsp, u32 data, long timeout);
int camrtc_hsp_get_fw_hash(struct camrtc_hsp *camhsp,
u8 hash[], size_t hash_size);
#endif /* INCLUDE_RTCPU_HSP_COMBO_H */

View File

@@ -0,0 +1,644 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#include "hsp-combo.h"
#include <linux/version.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/mailbox_client.h>
#include <linux/sched.h>
#include <linux/sched/clock.h>
#include <linux/err.h>
#include "soc/tegra/camrtc-commands.h"
typedef struct mbox_client mbox_client;
struct camrtc_hsp_mbox {
struct mbox_client client;
struct mbox_chan *chan;
};
struct camrtc_hsp_op;
struct camrtc_hsp {
const struct camrtc_hsp_op *op;
struct camrtc_hsp_mbox rx;
struct camrtc_hsp_mbox tx;
u32 cookie;
spinlock_t sendlock;
void (*group_notify)(struct device *dev, u16 group);
struct device dev;
struct mutex mutex;
struct completion emptied;
wait_queue_head_t response_waitq;
atomic_t response;
long timeout;
};
struct camrtc_hsp_op {
int (*send)(struct camrtc_hsp *, int msg, long *timeout);
void (*group_ring)(struct camrtc_hsp *, u16 group);
int (*sync)(struct camrtc_hsp *, long *timeout);
int (*resume)(struct camrtc_hsp *, long *timeout);
int (*suspend)(struct camrtc_hsp *, long *timeout);
int (*bye)(struct camrtc_hsp *, long *timeout);
int (*ch_setup)(struct camrtc_hsp *, dma_addr_t iova, long *timeout);
int (*ping)(struct camrtc_hsp *, u32 data, long *timeout);
int (*get_fw_hash)(struct camrtc_hsp *, u32 index, long *timeout);
};
static int camrtc_hsp_send(struct camrtc_hsp *camhsp,
int request, long *timeout)
{
int ret = camhsp->op->send(camhsp, request, timeout);
if (ret == -ETIME) {
dev_err(&camhsp->dev,
"request 0x%08x: empty mailbox timeout\n", request);
} else if (ret == -EINVAL) {
dev_err(&camhsp->dev,
"request 0x%08x: invalid mbox channel\n", request);
} else if (ret == -ENOBUFS) {
dev_err(&camhsp->dev,
"request 0x%08x: no space left in mbox msg queue\n", request);
} else
dev_dbg(&camhsp->dev,
"request sent: 0x%08x\n", request);
return ret;
}
static int camrtc_hsp_recv(struct camrtc_hsp *camhsp,
int command, long *timeout)
{
int response;
*timeout = wait_event_timeout(
camhsp->response_waitq,
(response = atomic_xchg(&camhsp->response, -1)) >= 0,
*timeout);
if (*timeout <= 0) {
dev_err(&camhsp->dev,
"request 0x%08x: response timeout\n", command);
return -ETIMEDOUT;
}
dev_dbg(&camhsp->dev, "request 0x%08x: response 0x%08x\n",
command, response);
return response;
}
static int camrtc_hsp_sendrecv(struct camrtc_hsp *camhsp,
int command, long *timeout)
{
int response;
response = camrtc_hsp_send(camhsp, command, timeout);
if (response >= 0)
response = camrtc_hsp_recv(camhsp, command, timeout);
return response;
}
/* ---------------------------------------------------------------------- */
/* Protocol nvidia,tegra-camrtc-hsp-vm */
static void camrtc_hsp_rx_full_notify(mbox_client *cl, void *data)
{
struct camrtc_hsp *camhsp = dev_get_drvdata(cl->dev);
u32 status, group;
u32 msg = (u32) (unsigned long) data;
status = CAMRTC_HSP_SS_FW_MASK;
status >>= CAMRTC_HSP_SS_FW_SHIFT;
group = status & CAMRTC_HSP_SS_IVC_MASK;
if (CAMRTC_HSP_MSG_ID(msg) == CAMRTC_HSP_UNKNOWN)
dev_dbg(&camhsp->dev, "request message unknown 0x%08x\n", msg);
if (group != 0)
camhsp->group_notify(camhsp->dev.parent, (u16)group);
/* Other interrupt bits are ignored for now */
if (CAMRTC_HSP_MSG_ID(msg) == CAMRTC_HSP_IRQ) {
/* We are done here */
} else if (CAMRTC_HSP_MSG_ID(msg) < CAMRTC_HSP_HELLO) {
/* Rest of the unidirectional messages are now ignored */
dev_info(&camhsp->dev, "unknown message 0x%08x\n", msg);
} else {
atomic_set(&camhsp->response, msg);
wake_up(&camhsp->response_waitq);
}
}
static void camrtc_hsp_tx_empty_notify(mbox_client *cl, void *data, int empty_value)
{
struct camrtc_hsp *camhsp = dev_get_drvdata(cl->dev);
(void)empty_value; /* ignored */
complete(&camhsp->emptied);
}
static int camrtc_hsp_vm_send(struct camrtc_hsp *camhsp,
int request, long *timeout);
static void camrtc_hsp_vm_group_ring(struct camrtc_hsp *camhsp, u16 group);
static void camrtc_hsp_vm_send_irqmsg(struct camrtc_hsp *camhsp);
static int camrtc_hsp_vm_sync(struct camrtc_hsp *camhsp, long *timeout);
static int camrtc_hsp_vm_hello(struct camrtc_hsp *camhsp, long *timeout);
static int camrtc_hsp_vm_protocol(struct camrtc_hsp *camhsp, long *timeout);
static int camrtc_hsp_vm_resume(struct camrtc_hsp *camhsp, long *timeout);
static int camrtc_hsp_vm_suspend(struct camrtc_hsp *camhsp, long *timeout);
static int camrtc_hsp_vm_bye(struct camrtc_hsp *camhsp, long *timeout);
static int camrtc_hsp_vm_ch_setup(struct camrtc_hsp *camhsp,
dma_addr_t iova, long *timeout);
static int camrtc_hsp_vm_ping(struct camrtc_hsp *camhsp,
u32 data, long *timeout);
static int camrtc_hsp_vm_get_fw_hash(struct camrtc_hsp *camhsp,
u32 index, long *timeout);
static const struct camrtc_hsp_op camrtc_hsp_vm_ops = {
.send = camrtc_hsp_vm_send,
.group_ring = camrtc_hsp_vm_group_ring,
.sync = camrtc_hsp_vm_sync,
.resume = camrtc_hsp_vm_resume,
.suspend = camrtc_hsp_vm_suspend,
.bye = camrtc_hsp_vm_bye,
.ping = camrtc_hsp_vm_ping,
.ch_setup = camrtc_hsp_vm_ch_setup,
.get_fw_hash = camrtc_hsp_vm_get_fw_hash,
};
static int camrtc_hsp_vm_send(struct camrtc_hsp *camhsp,
int request, long *timeout)
{
int response;
unsigned long flags;
spin_lock_irqsave(&camhsp->sendlock, flags);
atomic_set(&camhsp->response, -1);
response = mbox_send_message(camhsp->tx.chan, (void *)(unsigned long) request);
spin_unlock_irqrestore(&camhsp->sendlock, flags);
return response;
}
static void camrtc_hsp_vm_group_ring(struct camrtc_hsp *camhsp,
u16 group)
{
camrtc_hsp_vm_send_irqmsg(camhsp);
}
static void camrtc_hsp_vm_send_irqmsg(struct camrtc_hsp *camhsp)
{
int irqmsg = CAMRTC_HSP_MSG(CAMRTC_HSP_IRQ, 1);
int response;
unsigned long flags;
spin_lock_irqsave(&camhsp->sendlock, flags);
response = mbox_send_message(camhsp->tx.chan, (void *)(unsigned long) irqmsg);
spin_unlock_irqrestore(&camhsp->sendlock, flags);
}
static int camrtc_hsp_vm_sendrecv(struct camrtc_hsp *camhsp,
int request, long *timeout)
{
int response = camrtc_hsp_sendrecv(camhsp, request, timeout);
if (response < 0)
return response;
if (CAMRTC_HSP_MSG_ID(request) != CAMRTC_HSP_MSG_ID(response)) {
dev_err(&camhsp->dev,
"request 0x%08x mismatch with response 0x%08x\n",
request, response);
return -EIO;
}
/* Return the 24-bit parameter only */
return CAMRTC_HSP_MSG_PARAM(response);
}
static int camrtc_hsp_vm_sync(struct camrtc_hsp *camhsp, long *timeout)
{
int response = camrtc_hsp_vm_hello(camhsp, timeout);
if (response >= 0) {
camhsp->cookie = response;
response = camrtc_hsp_vm_protocol(camhsp, timeout);
}
return response;
}
static u32 camrtc_hsp_vm_cookie(void)
{
u32 value = CAMRTC_HSP_MSG_PARAM(sched_clock() >> 5U);
if (value == 0)
value++;
return value;
}
static int camrtc_hsp_vm_hello(struct camrtc_hsp *camhsp, long *timeout)
{
int request = CAMRTC_HSP_MSG(CAMRTC_HSP_HELLO, camrtc_hsp_vm_cookie());
int response = camrtc_hsp_send(camhsp, request, timeout);
if (response < 0)
return response;
for (;;) {
response = camrtc_hsp_recv(camhsp, request, timeout);
/* Wait until we get the HELLO message we sent */
if (response == request)
break;
/* ...or timeout */
if (response < 0)
break;
}
return response;
}
static int camrtc_hsp_vm_protocol(struct camrtc_hsp *camhsp, long *timeout)
{
int request = CAMRTC_HSP_MSG(CAMRTC_HSP_PROTOCOL,
RTCPU_DRIVER_SM6_VERSION);
return camrtc_hsp_vm_sendrecv(camhsp, request, timeout);
}
static int camrtc_hsp_vm_resume(struct camrtc_hsp *camhsp, long *timeout)
{
int request = CAMRTC_HSP_MSG(CAMRTC_HSP_RESUME, camhsp->cookie);
return camrtc_hsp_vm_sendrecv(camhsp, request, timeout);
}
static int camrtc_hsp_vm_suspend(struct camrtc_hsp *camhsp, long *timeout)
{
u32 request = CAMRTC_HSP_MSG(CAMRTC_HSP_SUSPEND, 0);
return camrtc_hsp_vm_sendrecv(camhsp, request, timeout);
}
static int camrtc_hsp_vm_bye(struct camrtc_hsp *camhsp, long *timeout)
{
u32 request = CAMRTC_HSP_MSG(CAMRTC_HSP_BYE, 0);
camhsp->cookie = 0U;
return camrtc_hsp_vm_sendrecv(camhsp, request, timeout);
}
static int camrtc_hsp_vm_ch_setup(struct camrtc_hsp *camhsp,
dma_addr_t iova, long *timeout)
{
u32 request = CAMRTC_HSP_MSG(CAMRTC_HSP_CH_SETUP, iova >> 8);
return camrtc_hsp_vm_sendrecv(camhsp, request, timeout);
}
static int camrtc_hsp_vm_ping(struct camrtc_hsp *camhsp, u32 data,
long *timeout)
{
u32 request = CAMRTC_HSP_MSG(CAMRTC_HSP_PING, data);
return camrtc_hsp_vm_sendrecv(camhsp, request, timeout);
}
static int camrtc_hsp_vm_get_fw_hash(struct camrtc_hsp *camhsp, u32 index,
long *timeout)
{
u32 request = CAMRTC_HSP_MSG(CAMRTC_HSP_FW_HASH, index);
return camrtc_hsp_vm_sendrecv(camhsp, request, timeout);
}
static int camrtc_hsp_vm_probe(struct camrtc_hsp *camhsp)
{
struct device_node *np = camhsp->dev.parent->of_node;
int err = -ENOTSUPP;
const char *obtain = "";
np = of_get_compatible_child(np, "nvidia,tegra-camrtc-hsp-vm");
if (!of_device_is_available(np)) {
of_node_put(np);
dev_err(&camhsp->dev, "no hsp protocol \"%s\"\n",
"nvidia,tegra-camrtc-hsp-vm");
return -ENOTSUPP;
}
camhsp->dev.of_node = np;
camhsp->rx.chan = mbox_request_channel_byname(&camhsp->rx.client, "vm-rx");
if (IS_ERR(camhsp->rx.chan)) {
err = PTR_ERR(camhsp->rx.chan);
goto fail;
}
camhsp->tx.chan = mbox_request_channel_byname(&camhsp->tx.client, "vm-tx");
if (IS_ERR(camhsp->tx.chan)) {
err = PTR_ERR(camhsp->tx.chan);
goto fail;
}
camhsp->op = &camrtc_hsp_vm_ops;
dev_set_name(&camhsp->dev, "%s:%s",
dev_name(camhsp->dev.parent), camhsp->dev.of_node->name);
dev_dbg(&camhsp->dev, "probed\n");
return 0;
fail:
if (err != -EPROBE_DEFER) {
dev_err(&camhsp->dev, "%s: failed to obtain %s: %d\n",
np->name, obtain, err);
}
of_node_put(np);
return err;
}
/* ---------------------------------------------------------------------- */
/* Public interface */
void camrtc_hsp_group_ring(struct camrtc_hsp *camhsp,
u16 group)
{
if (!WARN_ON(camhsp == NULL))
camhsp->op->group_ring(camhsp, group);
}
EXPORT_SYMBOL(camrtc_hsp_group_ring);
/*
* Synchronize the HSP
*/
int camrtc_hsp_sync(struct camrtc_hsp *camhsp)
{
long timeout;
int response;
if (WARN_ON(camhsp == NULL))
return -EINVAL;
timeout = camhsp->timeout;
mutex_lock(&camhsp->mutex);
response = camhsp->op->sync(camhsp, &timeout);
mutex_unlock(&camhsp->mutex);
return response;
}
EXPORT_SYMBOL(camrtc_hsp_sync);
/*
* Resume: resume the firmware
*/
int camrtc_hsp_resume(struct camrtc_hsp *camhsp)
{
long timeout;
int response;
if (WARN_ON(camhsp == NULL))
return -EINVAL;
timeout = camhsp->timeout;
mutex_lock(&camhsp->mutex);
response = camhsp->op->resume(camhsp, &timeout);
mutex_unlock(&camhsp->mutex);
return response;
}
EXPORT_SYMBOL(camrtc_hsp_resume);
/*
* Suspend: set firmware to idle.
*/
int camrtc_hsp_suspend(struct camrtc_hsp *camhsp)
{
long timeout;
int response;
if (WARN_ON(camhsp == NULL))
return -EINVAL;
timeout = camhsp->timeout;
mutex_lock(&camhsp->mutex);
response = camhsp->op->suspend(camhsp, &timeout);
mutex_unlock(&camhsp->mutex);
if (response != 0)
dev_info(&camhsp->dev, "PM_SUSPEND failed: 0x%08x\n",
response);
return response <= 0 ? response : -EIO;
}
EXPORT_SYMBOL(camrtc_hsp_suspend);
/*
* Bye: tell firmware that VM mappings are going away
*/
int camrtc_hsp_bye(struct camrtc_hsp *camhsp)
{
long timeout;
int response;
if (WARN_ON(camhsp == NULL))
return -EINVAL;
timeout = camhsp->timeout;
mutex_lock(&camhsp->mutex);
response = camhsp->op->bye(camhsp, &timeout);
mutex_unlock(&camhsp->mutex);
if (response != 0)
dev_warn(&camhsp->dev, "BYE failed: 0x%08x\n", response);
return response;
}
EXPORT_SYMBOL(camrtc_hsp_bye);
int camrtc_hsp_ch_setup(struct camrtc_hsp *camhsp, dma_addr_t iova)
{
long timeout;
int response;
if (WARN_ON(camhsp == NULL))
return -EINVAL;
if (iova >= BIT_ULL(32) || (iova & 0xffU) != 0) {
dev_warn(&camhsp->dev,
"CH_SETUP invalid iova: 0x%08llx\n", iova);
return -EINVAL;
}
timeout = camhsp->timeout;
mutex_lock(&camhsp->mutex);
response = camhsp->op->ch_setup(camhsp, iova, &timeout);
mutex_unlock(&camhsp->mutex);
if (response > 0)
dev_dbg(&camhsp->dev, "CH_SETUP failed: 0x%08x\n", response);
return response;
}
EXPORT_SYMBOL(camrtc_hsp_ch_setup);
int camrtc_hsp_ping(struct camrtc_hsp *camhsp, u32 data, long timeout)
{
long left = timeout;
int response;
if (WARN_ON(camhsp == NULL))
return -EINVAL;
if (left == 0L)
left = camhsp->timeout;
mutex_lock(&camhsp->mutex);
response = camhsp->op->ping(camhsp, data, &left);
mutex_unlock(&camhsp->mutex);
return response;
}
EXPORT_SYMBOL(camrtc_hsp_ping);
int camrtc_hsp_get_fw_hash(struct camrtc_hsp *camhsp,
u8 hash[], size_t hash_size)
{
int i;
int ret = 0;
long timeout;
if (WARN_ON(camhsp == NULL))
return -EINVAL;
memset(hash, 0, hash_size);
timeout = camhsp->timeout;
mutex_lock(&camhsp->mutex);
for (i = 0; i < hash_size; i++) {
int value = camhsp->op->get_fw_hash(camhsp, i, &timeout);
if (value < 0 || value > 255) {
dev_info(&camhsp->dev,
"FW_HASH failed: 0x%08x\n", value);
ret = value < 0 ? value : -EIO;
goto fail;
}
hash[i] = value;
}
fail:
mutex_unlock(&camhsp->mutex);
return ret;
}
EXPORT_SYMBOL(camrtc_hsp_get_fw_hash);
static const struct device_type camrtc_hsp_combo_dev_type = {
.name = "camrtc-hsp-protocol",
};
static void camrtc_hsp_combo_dev_release(struct device *dev)
{
struct camrtc_hsp *camhsp = container_of(dev, struct camrtc_hsp, dev);
if (!IS_ERR_OR_NULL(camhsp->rx.chan))
mbox_free_channel(camhsp->rx.chan);
if (!IS_ERR_OR_NULL(camhsp->tx.chan))
mbox_free_channel(camhsp->tx.chan);
of_node_put(dev->of_node);
kfree(camhsp);
}
static int camrtc_hsp_probe(struct camrtc_hsp *camhsp)
{
int ret;
ret = camrtc_hsp_vm_probe(camhsp);
if (ret != -ENOTSUPP)
return ret;
return -ENODEV;
}
struct camrtc_hsp *camrtc_hsp_create(
struct device *dev,
void (*group_notify)(struct device *dev, u16 group),
long cmd_timeout)
{
struct camrtc_hsp *camhsp;
int ret = -EINVAL;
camhsp = kzalloc(sizeof(*camhsp), GFP_KERNEL);
if (camhsp == NULL)
return ERR_PTR(-ENOMEM);
camhsp->dev.parent = dev;
camhsp->group_notify = group_notify;
camhsp->timeout = cmd_timeout;
mutex_init(&camhsp->mutex);
spin_lock_init(&camhsp->sendlock);
init_waitqueue_head(&camhsp->response_waitq);
init_completion(&camhsp->emptied);
atomic_set(&camhsp->response, -1);
camhsp->dev.type = &camrtc_hsp_combo_dev_type;
camhsp->dev.release = camrtc_hsp_combo_dev_release;
device_initialize(&camhsp->dev);
dev_set_name(&camhsp->dev, "%s:%s", dev_name(dev), "hsp");
pm_runtime_no_callbacks(&camhsp->dev);
pm_runtime_enable(&camhsp->dev);
camhsp->tx.client.tx_block = false;
camhsp->rx.client.rx_callback = camrtc_hsp_rx_full_notify;
camhsp->tx.client.tx_done = camrtc_hsp_tx_empty_notify;
camhsp->rx.client.dev = camhsp->tx.client.dev = &(camhsp->dev);
ret = camrtc_hsp_probe(camhsp);
if (ret < 0)
goto fail;
ret = device_add(&camhsp->dev);
if (ret < 0)
goto fail;
dev_set_drvdata(&camhsp->dev, camhsp);
return camhsp;
fail:
camrtc_hsp_free(camhsp);
return ERR_PTR(ret);
}
EXPORT_SYMBOL(camrtc_hsp_create);
void camrtc_hsp_free(struct camrtc_hsp *camhsp)
{
if (IS_ERR_OR_NULL(camhsp))
return;
pm_runtime_disable(&camhsp->dev);
if (dev_get_drvdata(&camhsp->dev) != NULL)
device_unregister(&camhsp->dev);
else
put_device(&camhsp->dev);
}
EXPORT_SYMBOL(camrtc_hsp_free);
MODULE_LICENSE("GPL v2");

View File

@@ -0,0 +1,623 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/tegra-ivc.h>
#include <linux/tegra-ivc-bus.h>
#include <linux/tegra-camera-rtcpu.h>
#include <linux/bitops.h>
#include "soc/tegra/camrtc-channels.h"
#include "soc/tegra/camrtc-commands.h"
#define NV(p) "nvidia," #p
#define CAMRTC_IVC_CONFIG_SIZE 4096
struct tegra_ivc_region {
uintptr_t base;
size_t size;
dma_addr_t iova;
size_t config_size;
size_t ivc_size;
};
struct tegra_ivc_bus {
struct device dev;
struct tegra_ivc_channel *chans;
unsigned num_regions;
struct tegra_ivc_region regions[];
};
static void tegra_ivc_channel_ring(struct ivc *ivc)
{
struct tegra_ivc_channel *chan =
container_of(ivc, struct tegra_ivc_channel, ivc);
struct tegra_ivc_bus *bus =
container_of(chan->dev.parent, struct tegra_ivc_bus, dev);
tegra_camrtc_ivc_ring(bus->dev.parent, chan->group);
}
struct device_type tegra_ivc_channel_type = {
.name = "tegra-ivc-channel",
};
EXPORT_SYMBOL(tegra_ivc_channel_type);
int tegra_ivc_channel_runtime_get(struct tegra_ivc_channel *ch)
{
BUG_ON(ch == NULL);
return pm_runtime_get_sync(&ch->dev);
}
EXPORT_SYMBOL(tegra_ivc_channel_runtime_get);
void tegra_ivc_channel_runtime_put(struct tegra_ivc_channel *ch)
{
BUG_ON(ch == NULL);
pm_runtime_put(&ch->dev);
}
EXPORT_SYMBOL(tegra_ivc_channel_runtime_put);
static void tegra_ivc_channel_release(struct device *dev)
{
struct tegra_ivc_channel *chan =
container_of(dev, struct tegra_ivc_channel, dev);
of_node_put(dev->of_node);
kfree(chan);
}
static struct tegra_ivc_channel *tegra_ivc_channel_create(
struct tegra_ivc_bus *bus, struct device_node *ch_node,
struct tegra_ivc_region *region)
{
struct device *peer_device = bus->dev.parent;
struct camrtc_tlv_ivc_setup *tlv;
struct {
u32 rx;
u32 tx;
} start, end;
u32 version, channel_group, nframes, frame_size, queue_size;
const char *service;
int ret;
struct tegra_ivc_channel *chan = kzalloc(sizeof(*chan), GFP_KERNEL);
if (unlikely(chan == NULL))
return ERR_PTR(-ENOMEM);
chan->dev.parent = &bus->dev;
chan->dev.type = &tegra_ivc_channel_type;
chan->dev.bus = &tegra_ivc_bus_type;
chan->dev.of_node = of_node_get(ch_node);
chan->dev.release = tegra_ivc_channel_release;
dev_set_name(&chan->dev, "%s:%s", dev_name(&bus->dev),
kbasename(ch_node->full_name));
device_initialize(&chan->dev);
pm_runtime_no_callbacks(&chan->dev);
pm_runtime_enable(&chan->dev);
ret = of_property_read_string(ch_node, NV(service), &service);
if (ret) {
dev_err(&chan->dev, "missing <%s> property\n",
NV(service));
goto error;
}
ret = of_property_read_u32(ch_node, NV(version), &version);
if (ret)
version = 0;
ret = of_property_read_u32(ch_node, NV(group), &channel_group);
if (ret) {
dev_err(&chan->dev, "missing <%s> property\n", NV(group));
goto error;
}
/* We have 15 channel group bits available */
if ((channel_group & 0x7FFFU) != channel_group) {
dev_err(&chan->dev, "invalid property %s = 0x%x\n",
NV(group), channel_group);
goto error;
}
ret = of_property_read_u32(ch_node, NV(frame-count), &nframes);
if (ret || !nframes) {
dev_err(&chan->dev, "missing <%s> property\n",
NV(frame-count));
goto error;
}
nframes = 1 << fls(nframes - 1); /* Round up to a power of two */
ret = of_property_read_u32(ch_node, NV(frame-size), &frame_size);
if (ret || !frame_size) {
dev_err(&chan->dev, "missing <%s> property\n", NV(frame-size));
goto error;
}
if (region->config_size + sizeof(*tlv) > CAMRTC_IVC_CONFIG_SIZE) {
dev_err(&chan->dev, "IVC config size exceeded\n");
ret = -ENOSPC;
goto error;
}
queue_size = tegra_ivc_total_queue_size(nframes * frame_size);
if (region->ivc_size + 2 * queue_size > region->size) {
dev_err(&chan->dev, "buffers exceed IVC region\n");
ret = -ENOSPC;
goto error;
}
start.rx = region->ivc_size;
region->ivc_size += queue_size;
end.rx = region->ivc_size;
start.tx = end.rx;
region->ivc_size += queue_size;
end.tx = region->ivc_size;
/* Init IVC */
ret = tegra_ivc_init_with_dma_handle(&chan->ivc,
region->base + start.rx, region->iova + start.rx,
region->base + start.tx, region->iova + start.tx,
nframes, frame_size,
/* Device used to allocate the shared memory for IVC */
peer_device,
tegra_ivc_channel_ring);
if (ret) {
dev_err(&chan->dev, "IVC initialization error: %d\n", ret);
goto error;
}
chan->group = channel_group;
tegra_ivc_channel_reset(&chan->ivc);
/* Fill channel descriptor */
tlv = (struct camrtc_tlv_ivc_setup *)
(region->base + region->config_size);
tlv->tag = CAMRTC_TAG_IVC_SETUP;
tlv->len = sizeof(*tlv);
tlv->rx_iova = region->iova + start.rx;
tlv->rx_frame_size = frame_size;
tlv->rx_nframes = nframes;
tlv->tx_iova = region->iova + start.tx;
tlv->tx_frame_size = frame_size;
tlv->tx_nframes = nframes;
tlv->channel_group = channel_group;
tlv->ivc_version = version;
if (strscpy(tlv->ivc_service, service, sizeof(tlv->ivc_service)) < 0)
dev_warn(&chan->dev, "service name <%s> too long\n", service);
region->config_size += sizeof(*tlv);
(++tlv)->tag = 0; /* terminator */
dev_info(&chan->dev,
"%s: ver=%u grp=%u RX[%ux%u]=0x%x-0x%x TX[%ux%u]=0x%x-0x%x\n",
ch_node->name, version, channel_group,
nframes, frame_size, start.rx, end.rx,
nframes, frame_size, start.tx, end.tx);
ret = device_add(&chan->dev);
if (ret) {
dev_err(&chan->dev, "channel device error: %d\n", ret);
goto error;
}
return chan;
error:
put_device(&chan->dev);
return ERR_PTR(ret);
}
static void tegra_ivc_channel_notify(struct tegra_ivc_channel *chan)
{
const struct tegra_ivc_channel_ops *ops;
if (tegra_ivc_channel_notified(&chan->ivc) != 0)
return;
if (!chan->is_ready)
return;
rcu_read_lock();
ops = rcu_dereference(chan->ops);
if (ops != NULL && ops->notify != NULL)
ops->notify(chan);
rcu_read_unlock();
}
void tegra_ivc_bus_notify(struct tegra_ivc_bus *bus, u16 group)
{
struct tegra_ivc_channel *chan;
for (chan = bus->chans; chan != NULL; chan = chan->next) {
if ((chan->group & group) != 0)
tegra_ivc_channel_notify(chan);
}
}
EXPORT_SYMBOL(tegra_ivc_bus_notify);
struct device_type tegra_ivc_bus_dev_type = {
.name = "tegra-ivc-bus",
};
EXPORT_SYMBOL(tegra_ivc_bus_dev_type);
static void tegra_ivc_bus_release(struct device *dev)
{
struct tegra_ivc_bus *bus =
container_of(dev, struct tegra_ivc_bus, dev);
int i;
of_node_put(dev->of_node);
for (i = 0; i < bus->num_regions; i++) {
if (!bus->regions[i].base)
continue;
dma_free_coherent(dev->parent, bus->regions[i].size,
(void *)bus->regions[i].base,
bus->regions[i].iova);
}
kfree(bus);
}
static int tegra_ivc_bus_match(struct device *dev, struct device_driver *drv)
{
struct tegra_ivc_driver *ivcdrv = to_tegra_ivc_driver(drv);
if (dev->type != ivcdrv->dev_type)
return 0;
return of_driver_match_device(dev, drv);
}
static void tegra_ivc_bus_stop(struct tegra_ivc_bus *bus)
{
while (bus->chans != NULL) {
struct tegra_ivc_channel *chan = bus->chans;
bus->chans = chan->next;
pm_runtime_disable(&chan->dev);
device_unregister(&chan->dev);
}
}
static int tegra_ivc_bus_start(struct tegra_ivc_bus *bus)
{
struct device_node *dn = bus->dev.parent->of_node;
struct of_phandle_args reg_spec;
const char *status;
int i, ret;
for (i = 0;
of_parse_phandle_with_fixed_args(dn, NV(ivc-channels), 3,
i, &reg_spec) == 0;
i++) {
struct device_node *ch_node;
for_each_child_of_node(reg_spec.np, ch_node) {
struct tegra_ivc_channel *chan;
ret = of_property_read_string(ch_node,
"status", &status);
if (ret == 0) {
ret = strcmp(status, "disabled");
if (ret == 0)
continue;
}
chan = tegra_ivc_channel_create(bus, ch_node,
&bus->regions[i]);
if (IS_ERR(chan)) {
ret = PTR_ERR(chan);
of_node_put(ch_node);
goto error;
}
chan->next = bus->chans;
bus->chans = chan;
}
}
return 0;
error:
tegra_ivc_bus_stop(bus);
return ret;
}
/*
* This is called during RTCPU boot to synchronize
* (or re-synchronize in the case of PM resume).
*/
int tegra_ivc_bus_boot_sync(struct tegra_ivc_bus *bus)
{
int i;
if (IS_ERR_OR_NULL(bus))
return 0;
for (i = 0; i < bus->num_regions; i++) {
int ret = tegra_camrtc_iovm_setup(bus->dev.parent,
bus->regions[i].iova);
if (ret != 0) {
dev_info(&bus->dev, "IOVM setup error: %d\n", ret);
return -EIO;
}
}
return 0;
}
EXPORT_SYMBOL(tegra_ivc_bus_boot_sync);
static int tegra_ivc_bus_probe(struct device *dev)
{
int ret = -ENXIO;
if (dev->type == &tegra_ivc_channel_type) {
struct tegra_ivc_driver *drv = to_tegra_ivc_driver(dev->driver);
struct tegra_ivc_channel *chan = to_tegra_ivc_channel(dev);
const struct tegra_ivc_channel_ops *ops = drv->ops.channel;
mutex_init(&chan->ivc_wr_lock);
BUG_ON(ops == NULL);
if (ops->probe != NULL) {
ret = ops->probe(chan);
if (ret)
return ret;
}
rcu_assign_pointer(chan->ops, ops);
ret = 0;
}
return ret;
}
static void tegra_ivc_bus_remove(struct device *dev)
{
if (dev->type == &tegra_ivc_channel_type) {
struct tegra_ivc_driver *drv = to_tegra_ivc_driver(dev->driver);
struct tegra_ivc_channel *chan = to_tegra_ivc_channel(dev);
const struct tegra_ivc_channel_ops *ops = drv->ops.channel;
WARN_ON(rcu_access_pointer(chan->ops) != ops);
RCU_INIT_POINTER(chan->ops, NULL);
synchronize_rcu();
if (ops->remove != NULL)
ops->remove(chan);
}
return;
}
static int tegra_ivc_bus_ready_child(struct device *dev, void *data)
{
struct tegra_ivc_driver *drv = to_tegra_ivc_driver(dev->driver);
bool is_ready = (data != NULL) ? *(bool *)data : true;
if (dev->type == &tegra_ivc_channel_type) {
struct tegra_ivc_channel *chan = to_tegra_ivc_channel(dev);
const struct tegra_ivc_channel_ops *ops;
chan->is_ready = is_ready;
if (!is_ready)
atomic_inc(&chan->bus_resets);
smp_wmb();
if (drv != NULL) {
rcu_read_lock();
ops = rcu_dereference(chan->ops);
if (ops->ready != NULL)
ops->ready(chan, is_ready);
rcu_read_unlock();
} else {
dev_warn(dev, "ivc channel driver missing\n");
}
}
return 0;
}
struct bus_type tegra_ivc_bus_type = {
.name = "tegra-ivc-bus",
.match = tegra_ivc_bus_match,
.probe = tegra_ivc_bus_probe,
.remove = tegra_ivc_bus_remove,
};
EXPORT_SYMBOL(tegra_ivc_bus_type);
int tegra_ivc_driver_register(struct tegra_ivc_driver *drv)
{
return driver_register(&drv->driver);
}
EXPORT_SYMBOL(tegra_ivc_driver_register);
void tegra_ivc_driver_unregister(struct tegra_ivc_driver *drv)
{
return driver_unregister(&drv->driver);
}
EXPORT_SYMBOL(tegra_ivc_driver_unregister);
static int tegra_ivc_bus_parse_regions(struct tegra_ivc_bus *bus,
struct device_node *dev_node)
{
struct of_phandle_args reg_spec;
int i;
/* Parse out all regions in a node */
for (i = 0;
of_parse_phandle_with_fixed_args(dev_node, NV(ivc-channels), 3,
i, &reg_spec) == 0;
i++) {
struct device_node *ch_node;
struct tegra_ivc_region *region = &bus->regions[i];
u32 nframes, frame_size, size = CAMRTC_IVC_CONFIG_SIZE;
int ret = -ENODEV;
if (reg_spec.args_count < 3) {
of_node_put(reg_spec.np);
dev_err(&bus->dev, "invalid region specification\n");
return -EINVAL;
}
for_each_child_of_node(reg_spec.np, ch_node) {
ret = of_property_read_u32(ch_node, NV(frame-count),
&nframes);
if (ret || !nframes) {
dev_err(&bus->dev, "missing <%s> property\n",
NV(frame-count));
break;
}
/* Round up to a power of two */
nframes = 1 << fls(nframes - 1);
ret = of_property_read_u32(ch_node, NV(frame-size),
&frame_size);
if (ret || !frame_size) {
dev_err(&bus->dev, "missing <%s> property\n",
NV(frame-size));
break;
}
size += 2 * tegra_ivc_total_queue_size(nframes *
frame_size);
}
of_node_put(reg_spec.np);
if (ret)
return ret;
region->base =
(uintptr_t)dma_alloc_coherent(bus->dev.parent,
size, &region->iova,
GFP_KERNEL | __GFP_ZERO);
if (!region->base)
return -ENOMEM;
region->size = size;
region->config_size = 0;
region->ivc_size = CAMRTC_IVC_CONFIG_SIZE;
dev_info(&bus->dev, "region %u: iova=0x%x-0x%x size=%u\n",
i, (u32)region->iova, (u32)region->iova + size - 1,
size);
}
return 0;
}
static unsigned tegra_ivc_bus_count_regions(const struct device_node *dev_node)
{
unsigned i;
for (i = 0; of_parse_phandle_with_fixed_args(dev_node,
NV(ivc-channels), 3, i, NULL) == 0; i++)
;
return i;
}
struct tegra_ivc_bus *tegra_ivc_bus_create(struct device *dev)
{
struct tegra_ivc_bus *bus;
unsigned num;
int ret;
num = tegra_ivc_bus_count_regions(dev->of_node);
bus = kzalloc(sizeof(*bus) + num * sizeof(*bus->regions), GFP_KERNEL);
if (unlikely(bus == NULL))
return ERR_PTR(-ENOMEM);
bus->num_regions = num;
bus->dev.parent = dev;
bus->dev.type = &tegra_ivc_bus_dev_type;
bus->dev.bus = &tegra_ivc_bus_type;
bus->dev.of_node = of_get_child_by_name(dev->of_node, "hsp");
bus->dev.release = tegra_ivc_bus_release;
dev_set_name(&bus->dev, "%s:ivc-bus", dev_name(dev));
device_initialize(&bus->dev);
pm_runtime_no_callbacks(&bus->dev);
pm_runtime_enable(&bus->dev);
ret = tegra_ivc_bus_parse_regions(bus, dev->of_node);
if (ret) {
dev_err(&bus->dev, "IVC regions setup failed: %d\n", ret);
goto error;
}
ret = device_add(&bus->dev);
if (ret) {
dev_err(&bus->dev, "IVC instance error: %d\n", ret);
goto error;
}
ret = tegra_ivc_bus_start(bus);
if (ret) {
dev_err(&bus->dev, "bus start failed: %d\n", ret);
goto error;
}
return bus;
error:
put_device(&bus->dev);
return ERR_PTR(ret);
}
EXPORT_SYMBOL(tegra_ivc_bus_create);
/*
* Communicate RTCPU UP/DOWN state to IVC devices.
*/
void tegra_ivc_bus_ready(struct tegra_ivc_bus *bus, bool online)
{
if (IS_ERR_OR_NULL(bus))
return;
device_for_each_child(&bus->dev, &online, tegra_ivc_bus_ready_child);
if (online)
tegra_ivc_bus_notify(bus, 0xFFFFU);
}
EXPORT_SYMBOL(tegra_ivc_bus_ready);
void tegra_ivc_bus_destroy(struct tegra_ivc_bus *bus)
{
if (IS_ERR_OR_NULL(bus))
return;
pm_runtime_disable(&bus->dev);
tegra_ivc_bus_stop(bus);
device_unregister(&bus->dev);
}
EXPORT_SYMBOL(tegra_ivc_bus_destroy);
static __init int tegra_ivc_bus_init(void)
{
return bus_register(&tegra_ivc_bus_type);
}
static __exit void tegra_ivc_bus_exit(void)
{
bus_unregister(&tegra_ivc_bus_type);
}
module_init(tegra_ivc_bus_init);
module_exit(tegra_ivc_bus_exit);
MODULE_AUTHOR("Remi Denis-Courmont <remid@nvidia.com>");
MODULE_DESCRIPTION("NVIDIA Tegra IVC generic bus driver");
MODULE_LICENSE("GPL v2");

View File

@@ -0,0 +1,142 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#include "reset-group.h"
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/reset.h>
#include <linux/module.h>
struct camrtc_reset_group {
struct device *device;
const char *group_name;
int nresets;
struct reset_control *resets[];
};
static void camrtc_reset_group_release(struct device *dev, void *res)
{
const struct camrtc_reset_group *grp = res;
int i;
for (i = 0; i < grp->nresets; i++) {
if (grp->resets[i])
reset_control_put(grp->resets[i]);
}
}
struct camrtc_reset_group *camrtc_reset_group_get(
struct device *dev,
const char *group_name)
{
struct camrtc_reset_group *grp;
struct device_node *np;
const char *group_property;
size_t group_name_len;
int index;
int ret;
if (!dev || !dev->of_node)
return ERR_PTR(-EINVAL);
np = dev->of_node;
group_property = group_name ? group_name : "reset-names";
group_name_len = group_name ? strlen(group_name) : 0;
ret = of_property_count_strings(np, group_property);
if (ret < 0)
return ERR_PTR(-ENOENT);
grp = devres_alloc(camrtc_reset_group_release,
offsetof(struct camrtc_reset_group, resets[ret]) +
group_name_len + 1,
GFP_KERNEL);
if (!grp)
return ERR_PTR(-ENOMEM);
grp->nresets = ret;
grp->device = dev;
grp->group_name = (char *)&grp->resets[grp->nresets];
memcpy((char *)grp->group_name, group_name, group_name_len);
for (index = 0; index < grp->nresets; index++) {
char const *name;
struct reset_control *reset;
ret = of_property_read_string_index(np, group_property,
index, &name);
if (ret < 0)
goto error;
reset = of_reset_control_get(np, name);
if (IS_ERR(reset)) {
ret = PTR_ERR(reset);
goto error;
}
grp->resets[index] = reset;
}
devres_add(dev, grp);
return grp;
error:
devres_free(grp);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(camrtc_reset_group_get);
static void camrtc_reset_group_error(
const struct camrtc_reset_group *grp,
char const *op,
int index,
int error)
{
const char *name = "unnamed";
of_property_read_string_index(grp->device->of_node,
grp->group_name, index, &name);
dev_warn(grp->device, "%s reset %s (at %s[%d]): %d\n",
op, name, grp->group_name, index, error);
}
void camrtc_reset_group_assert(const struct camrtc_reset_group *grp)
{
int index, index0, err;
if (IS_ERR_OR_NULL(grp))
return;
for (index = 1; index <= grp->nresets; index++) {
index0 = grp->nresets - index;
err = reset_control_assert(grp->resets[index0]);
if (err < 0)
camrtc_reset_group_error(grp, "assert", index0, err);
}
}
EXPORT_SYMBOL_GPL(camrtc_reset_group_assert);
int camrtc_reset_group_deassert(const struct camrtc_reset_group *grp)
{
int index, err;
if (!grp)
return 0;
if (IS_ERR(grp))
return -ENODEV;
for (index = 0; index < grp->nresets; index++) {
err = reset_control_deassert(grp->resets[index]);
if (err < 0) {
camrtc_reset_group_error(grp, "deassert", index, err);
return err;
}
}
return 0;
}
EXPORT_SYMBOL_GPL(camrtc_reset_group_deassert);
MODULE_LICENSE("GPL v2");

View File

@@ -0,0 +1,19 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*/
#ifndef INCLUDE_RESET_GROUP_H
#define INCLUDE_RESET_GROUP_H
struct device;
struct camrtc_reset_group;
struct camrtc_reset_group *camrtc_reset_group_get(
struct device *dev,
const char *group_name);
void camrtc_reset_group_assert(const struct camrtc_reset_group *grp);
int camrtc_reset_group_deassert(const struct camrtc_reset_group *grp);
#endif /* INCLUDE_RESET_GROUP_H */

View File

@@ -0,0 +1,115 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/tegra-camera-rtcpu.h>
#include <linux/tegra-rtcpu-monitor.h>
#include "drivers/video/tegra/host/vi/vi_notify.h"
#include "vi-notify.h"
struct tegra_camrtc_mon {
struct device *rce_dev;
int wdt_irq;
struct work_struct wdt_work;
};
int tegra_camrtc_mon_restore_rtcpu(struct tegra_camrtc_mon *cam_rtcpu_mon)
{
/* (Re)boot the rtcpu */
/* rtcpu-down and rtcpu-up events are broadcast to all ivc channels */
return tegra_camrtc_reboot(cam_rtcpu_mon->rce_dev);
}
EXPORT_SYMBOL(tegra_camrtc_mon_restore_rtcpu);
static void tegra_camrtc_mon_wdt_worker(struct work_struct *work)
{
struct tegra_camrtc_mon *cam_rtcpu_mon = container_of(work,
struct tegra_camrtc_mon, wdt_work);
dev_info(cam_rtcpu_mon->rce_dev,
"Alert: Camera RTCPU gone bad! restoring it immediately!!\n");
tegra_camrtc_mon_restore_rtcpu(cam_rtcpu_mon);
/* Enable WDT IRQ */
enable_irq(cam_rtcpu_mon->wdt_irq);
}
static irqreturn_t tegra_camrtc_mon_wdt_remote_isr(int irq, void *data)
{
struct tegra_camrtc_mon *cam_rtcpu_mon = data;
disable_irq_nosync(irq);
schedule_work(&cam_rtcpu_mon->wdt_work);
return IRQ_HANDLED;
}
static int tegra_camrtc_mon_wdt_irq_setup(
struct tegra_camrtc_mon *cam_rtcpu_mon)
{
struct platform_device *pdev =
to_platform_device(cam_rtcpu_mon->rce_dev);
int ret;
cam_rtcpu_mon->wdt_irq = platform_get_irq_byname(pdev, "wdt-remote");
if (cam_rtcpu_mon->wdt_irq < 0) {
dev_warn(&pdev->dev, "missing irq wdt-remote\n");
return -ENODEV;
}
ret = devm_request_threaded_irq(&pdev->dev, cam_rtcpu_mon->wdt_irq,
NULL, tegra_camrtc_mon_wdt_remote_isr, IRQF_ONESHOT,
dev_name(cam_rtcpu_mon->rce_dev), cam_rtcpu_mon);
if (ret)
return ret;
dev_info(&pdev->dev, "using cam RTCPU IRQ (%d)\n",
cam_rtcpu_mon->wdt_irq);
return 0;
}
struct tegra_camrtc_mon *tegra_camrtc_mon_create(struct device *dev)
{
struct tegra_camrtc_mon *cam_rtcpu_mon;
cam_rtcpu_mon = devm_kzalloc(dev, sizeof(*cam_rtcpu_mon), GFP_KERNEL);
if (unlikely(cam_rtcpu_mon == NULL))
return ERR_PTR(-ENOMEM);
cam_rtcpu_mon->rce_dev = dev;
/* Initialize wdt_work */
INIT_WORK(&cam_rtcpu_mon->wdt_work, tegra_camrtc_mon_wdt_worker);
tegra_camrtc_mon_wdt_irq_setup(cam_rtcpu_mon);
dev_info(dev, "tegra_camrtc_mon_create is successful\n");
return cam_rtcpu_mon;
}
EXPORT_SYMBOL(tegra_camrtc_mon_create);
int tegra_cam_rtcpu_mon_destroy(struct tegra_camrtc_mon *cam_rtcpu_mon)
{
if (IS_ERR_OR_NULL(cam_rtcpu_mon))
return -EINVAL;
devm_kfree(cam_rtcpu_mon->rce_dev, cam_rtcpu_mon);
return 0;
}
EXPORT_SYMBOL(tegra_cam_rtcpu_mon_destroy);
MODULE_DESCRIPTION("CAMERA RTCPU monitor driver");
MODULE_AUTHOR("Sudhir Vyas <svyas@nvidia.com>");
MODULE_LICENSE("GPL v2");

View File

File diff suppressed because it is too large Load Diff

View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,27 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*/
#ifndef _LINUX_TEGRA_CAMERA_RTCPU_H_
#define _LINUX_TEGRA_CAMERA_RTCPU_H_
#include <linux/types.h>
struct device;
int tegra_camrtc_iovm_setup(struct device *dev, dma_addr_t iova);
ssize_t tegra_camrtc_print_version(struct device *dev, char *buf, size_t size);
int tegra_camrtc_reboot(struct device *dev);
int tegra_camrtc_restore(struct device *dev);
bool tegra_camrtc_is_rtcpu_alive(struct device *dev);
void tegra_camrtc_flush_trace(struct device *dev);
bool tegra_camrtc_is_rtcpu_powered(void);
#define TEGRA_CAMRTC_VERSION_LEN 128
int tegra_camrtc_ping(struct device *dev, u32 data, long timeout);
void tegra_camrtc_ivc_ring(struct device *dev, u16 group);
#endif

View File

@@ -0,0 +1,136 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*/
#ifndef INCLUDE_CAPTURE_IVC_H
#define INCLUDE_CAPTURE_IVC_H
#include <linux/types.h>
/**
* @brief Submit the control message binary blob to capture-IVC driver,
* which is to be transferred over control IVC channel to RTCPU.
*
* @param[in] control_desc binary blob containing control message
* descriptor, is opaque to capture-IVC driver.
* @param[in] len size of control_desc.
*
* @returns 0 (success), neg. errno (failure)
*/
int tegra_capture_ivc_control_submit(
const void *control_desc,
size_t len);
/**
* @brief Submit the capture message binary blob to capture-IVC driver,
* which is to be transferred over capture IVC channel to RTCPU.
*
* @param[in] capture_desc binary blob containing capture message
* descriptor, is opaque to KMDs.
* @param[in] len size of capture_desc.
*
* @returns 0 (success), neg. errno (failure)
*/
int tegra_capture_ivc_capture_submit(
const void *capture_desc,
size_t len);
/**
* @brief Callback function to be registered by client to receive the rtcpu
* notifications through control or capture IVC channel.
*
* @param[in] resp_desc binary blob containing the response message
* received from rtcpu through control or capture
* IVC channel, its opaque to KMDs.
* @param[in] priv_context Client's private context, opaque to
* capture-IVC driver.
*/
typedef void (*tegra_capture_ivc_cb_func)(
const void *resp_desc,
const void *priv_context);
/**
* @brief Register callback function to receive response messages from rtcpu
* through control IVC channel.
*
* @param[in] control_resp_cb callback function to be registered for
* control IVC channel.
* @param[in] priv_context client's private context, opaque to
* capture-IVC driver.
* @param[out] trans_id temporary id assigned by capture-IVC driver,
* for the clients whose unique chan_id is not
* yet allocated by RTCPU, to match their
* responses with the requests.
*
* @returns 0 (success), neg. errno (failure)
*/
int tegra_capture_ivc_register_control_cb(
tegra_capture_ivc_cb_func control_resp_cb,
uint32_t *trans_id,
const void *priv_context);
/**
* @brief Notify clients channel ID to capture-IVC driver.
* Once client gets the newly allocated channel ID from RTCPU, it has to
* notify it to capture-IVC driver also, so that it can replace the
* temporary ID trans_id with the new channel ID chan_id in its internal
* context. IVC driver uses this unique channel ID for mapping upcoming
* responses with the client requests.
*
* @param[in] chan_id new channel id allocated by RTCPU for the
* client, capture-IVC driver uses to refer the
* client for its future control responses.
* @param[in] trans_id temporary id assigned by capture-IVC driver,
* for the client.
*
* @returns 0 (success), neg. errno (failure)
*/
int tegra_capture_ivc_notify_chan_id(
uint32_t chan_id,
uint32_t trans_id);
/**
* @brief Register callback function to receive status-indication messages from
* rtcpu through capture IVC channel.
*
* @param[in] capture_status_ind_cb callback function to be registered for
* capture ivc channel.
* @param[in] chan_id client's channel id, capture-IVC driver
* uses it refer the client for its capture
* responses.
* @param[in] priv_context client's private context, opaque to
* capture-IVC driver.
*
* @returns 0 (success), neg. errno (failure)
*/
int tegra_capture_ivc_register_capture_cb(
tegra_capture_ivc_cb_func capture_status_ind_cb,
uint32_t chan_id,
const void *priv_context);
/**
* @brief Un-register callback function to stop receiving messages over
* control ivc channel.
*
* @param[in] id client's channel id or transaction id, for which the
* callback needs to be unregistered.
*
* @returns 0 (success), neg. errno (failure)
*/
int tegra_capture_ivc_unregister_control_cb(
uint32_t id);
/**
* @brief Un-register callback function to stop receiving messages over
* capture ivc channel.
*
* @param[in] chan_id client's channel id, for which the callback needs to be
* unregistered.
*
* @returns 0 (success), neg. errno (failure)
*/
int tegra_capture_ivc_unregister_capture_cb(
uint32_t chan_id);
#endif /* INCLUDE_CAPTURE_IVC_H */

View File

@@ -0,0 +1,137 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*/
#ifndef _LINUX_TEGRA_IVC_BUS_H
#define _LINUX_TEGRA_IVC_BUS_H
#include <linux/tegra-ivc-instance.h>
#include <linux/types.h>
extern struct bus_type tegra_ivc_bus_type;
extern struct device_type tegra_ivc_bus_dev_type;
struct tegra_ivc_bus;
struct tegra_ivc_rpc_data;
struct tegra_ivc_bus *tegra_ivc_bus_create(struct device *);
void tegra_ivc_bus_ready(struct tegra_ivc_bus *bus, bool online);
void tegra_ivc_bus_destroy(struct tegra_ivc_bus *bus);
int tegra_ivc_bus_boot_sync(struct tegra_ivc_bus *bus);
void tegra_ivc_bus_notify(struct tegra_ivc_bus *bus, u16 group);
struct tegra_ivc_driver {
struct device_driver driver;
struct device_type *dev_type;
union {
const struct tegra_ivc_channel_ops *channel;
} ops;
};
static inline struct tegra_ivc_driver *to_tegra_ivc_driver(
struct device_driver *drv)
{
if (drv == NULL)
return NULL;
return container_of(drv, struct tegra_ivc_driver, driver);
}
int tegra_ivc_driver_register(struct tegra_ivc_driver *drv);
void tegra_ivc_driver_unregister(struct tegra_ivc_driver *drv);
#define tegra_ivc_module_driver(drv) \
module_driver(drv, tegra_ivc_driver_register, \
tegra_ivc_driver_unregister)
#define tegra_ivc_subsys_driver(__driver, __register, __unregister, ...) \
static int __init __driver##_init(void) \
{ \
return __register(&(__driver), ##__VA_ARGS__); \
} \
subsys_initcall_sync(__driver##_init);
#define tegra_ivc_subsys_driver_default(__driver) \
tegra_ivc_subsys_driver(__driver, \
tegra_ivc_driver_register, \
tegra_ivc_driver_unregister)
/* IVC channel driver support */
extern struct device_type tegra_ivc_channel_type;
struct tegra_ivc_channel {
struct ivc ivc;
struct device dev;
const struct tegra_ivc_channel_ops __rcu *ops;
struct tegra_ivc_channel *next;
struct mutex ivc_wr_lock;
struct tegra_ivc_rpc_data *rpc_priv;
atomic_t bus_resets;
u16 group;
bool is_ready;
};
static inline bool tegra_ivc_channel_online_check(
struct tegra_ivc_channel *chan)
{
atomic_set(&chan->bus_resets, 0);
smp_wmb();
smp_rmb();
return chan->is_ready;
}
static inline bool tegra_ivc_channel_has_been_reset(
struct tegra_ivc_channel *chan)
{
smp_rmb();
return atomic_read(&chan->bus_resets) != 0;
}
static inline void *tegra_ivc_channel_get_drvdata(
struct tegra_ivc_channel *chan)
{
return dev_get_drvdata(&chan->dev);
}
static inline void tegra_ivc_channel_set_drvdata(
struct tegra_ivc_channel *chan, void *data)
{
dev_set_drvdata(&chan->dev, data);
}
static inline struct tegra_ivc_channel *to_tegra_ivc_channel(
struct device *dev)
{
return container_of(dev, struct tegra_ivc_channel, dev);
}
static inline struct device *tegra_ivc_channel_to_camrtc_dev(
struct tegra_ivc_channel *ch)
{
if (unlikely(ch == NULL))
return NULL;
BUG_ON(ch->dev.parent == NULL);
BUG_ON(ch->dev.parent->parent == NULL);
return ch->dev.parent->parent;
}
int tegra_ivc_channel_runtime_get(struct tegra_ivc_channel *chan);
void tegra_ivc_channel_runtime_put(struct tegra_ivc_channel *chan);
struct tegra_ivc_channel_ops {
int (*probe)(struct tegra_ivc_channel *);
void (*ready)(struct tegra_ivc_channel *, bool online);
void (*remove)(struct tegra_ivc_channel *);
void (*notify)(struct tegra_ivc_channel *);
};
/* Legacy mailbox support */
struct tegra_ivc_mbox_msg {
int length;
void *data;
};
#endif

View File

@@ -0,0 +1,16 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*/
#ifndef INCLUDE_RTCPU_MONITOR_H
#define INCLUDE_RTCPU_MONITOR_H
struct device;
struct tegra_camrtc_mon;
int tegra_camrtc_mon_restore_rtcpu(struct tegra_camrtc_mon *);
struct tegra_camrtc_mon *tegra_camrtc_mon_create(struct device *);
int tegra_cam_rtcpu_mon_destroy(struct tegra_camrtc_mon *);
#endif /* INCLUDE_RTCPU_MONITOR_H */

View File

@@ -0,0 +1,21 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*/
#ifndef _LINUX_TEGRA_RTCPU_TRACE_H_
#define _LINUX_TEGRA_RTCPU_TRACE_H_
#include <linux/types.h>
struct tegra_rtcpu_trace;
struct camrtc_device_group;
struct tegra_rtcpu_trace *tegra_rtcpu_trace_create(
struct device *dev,
struct camrtc_device_group *camera_devices);
int tegra_rtcpu_trace_boot_sync(struct tegra_rtcpu_trace *tracer);
void tegra_rtcpu_trace_flush(struct tegra_rtcpu_trace *tracer);
void tegra_rtcpu_trace_destroy(struct tegra_rtcpu_trace *tracer);
#endif

View File

@@ -0,0 +1,106 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*/
/**
* @file camrtc-channels.h
*
* @brief RCE channel setup tags & structures.
*/
#ifndef INCLUDE_CAMRTC_CHANNELS_H
#define INCLUDE_CAMRTC_CHANNELS_H
#include "camrtc-common.h"
/**
* @defgroups RceTags RCE tags
*
* All the enums and the fields inside the structs described in this header
* file supports only uintX_t types, where X can be 8,16,32,64.
* @{
*/
#define CAMRTC_TAG64(s0, s1, s2, s3, s4, s5, s6, s7) ( \
((uint64_t)(s0) << 0U) | ((uint64_t)(s1) << 8U) | \
((uint64_t)(s2) << 16U) | ((uint64_t)(s3) << 24U) | \
((uint64_t)(s4) << 32U) | ((uint64_t)(s5) << 40U) | \
((uint64_t)(s6) << 48U) | ((uint64_t)(s7) << 56U))
#define CAMRTC_TAG_IVC_SETUP CAMRTC_TAG64('I', 'V', 'C', '-', 'S', 'E', 'T', 'U')
#define CAMRTC_TAG_NV_TRACE CAMRTC_TAG64('N', 'V', ' ', 'T', 'R', 'A', 'C', 'E')
#define CAMRTC_TAG_NV_CAM_TRACE CAMRTC_TAG64('N', 'V', ' ', 'C', 'A', 'M', 'T', 'R')
#define CAMRTC_TAG_NV_COVERAGE CAMRTC_TAG64('N', 'V', ' ', 'C', 'O', 'V', 'E', 'R')
/** }@ */
/**
* @brief RCE Tag, length, and value (TLV)
*/
struct camrtc_tlv {
/** Command tag. See @ref RceTags "RCE Tags" */
uint64_t tag;
/** Length of the tag specific data */
uint64_t len;
};
/**
* @brief Setup TLV for IVC
*
* Multiple setup structures can follow each other.
*/
struct camrtc_tlv_ivc_setup {
/** Command tag. See @ref RceTags "RCE Tags" */
uint64_t tag;
/** Length of the tag specific data */
uint64_t len;
/** Base address of write header. RX from CCPLEX point of view */
uint64_t rx_iova;
/** Size of IVC write frame */
uint32_t rx_frame_size;
/** Number of IVC write frames */
uint32_t rx_nframes;
/** Base address of read header. TX from CCPLEX point of view */
uint64_t tx_iova;
/** Size of IVC read frame */
uint32_t tx_frame_size;
/** Number of IVC read frames */
uint32_t tx_nframes;
/** IVC channel group*/
uint32_t channel_group;
/** IVC version */
uint32_t ivc_version;
/** IVC service name */
char ivc_service[32];
};
/**
* @defgroup CamRTCChannelErrors Channel setup error codes
* @{
*/
#define RTCPU_CH_SUCCESS MK_U32(0)
#define RTCPU_CH_ERR_NO_SERVICE MK_U32(128)
#define RTCPU_CH_ERR_ALREADY MK_U32(129)
#define RTCPU_CH_ERR_UNKNOWN_TAG MK_U32(130)
#define RTCPU_CH_ERR_INVALID_IOVA MK_U32(131)
#define RTCPU_CH_ERR_INVALID_PARAM MK_U32(132)
/* @} */
/**
* @brief Code coverage memory header
*/
struct camrtc_coverage_memory_header {
/** Code coverage tag. Should be CAMRTC_TAG_NV_COVERAGE */
uint64_t signature;
/** Size of camrtc_coverage_memory_header */
uint64_t length;
/** Header revision */
uint32_t revision;
/** Size of the coverage memory buffer */
uint32_t coverage_buffer_size;
/** Coverage data inside the memory buffer in bytes */
uint32_t coverage_total_bytes;
/** Reserved */
uint32_t reserved;
};
#endif /* INCLUDE_CAMRTC_CHANNELS_H */

View File

@@ -0,0 +1,165 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*/
/**
* @file camrtc-commands.h
*
* @brief Commands used with "nvidia,tegra-camrtc-hsp-vm" & "nvidia,tegra-hsp-mailbox"
* protocol
*/
#ifndef INCLUDE_CAMRTC_COMMANDS_H
#define INCLUDE_CAMRTC_COMMANDS_H
#include "camrtc-common.h"
/**
* @defgroup HspVmMsgs Definitions for "nvidia,tegra-camrtc-hsp-vm" protocol
* @{
*/
#define CAMRTC_HSP_MSG(_id, _param) ( \
((uint32_t)(_id) << MK_U32(24)) | \
((uint32_t)(_param) & MK_U32(0xffffff)))
#define CAMRTC_HSP_MSG_ID(_msg) \
(((_msg) >> MK_U32(24)) & MK_U32(0x7f))
#define CAMRTC_HSP_MSG_PARAM(_msg) \
((uint32_t)(_msg) & MK_U32(0xffffff))
/**
* The IRQ message is sent when no other HSP-VM protocol message is being sent
* (i.e. the messages for higher level protocols implementing HSP such as IVC
* channel protocol) and the sender has updated its shared semaphore bits.
*/
#define CAMRTC_HSP_IRQ MK_U32(0x00)
/**
* The HELLO messages are exchanged at the beginning of VM and RCE FW session.
* The HELLO message exchange ensures there are no unprocessed messages
* in transit within VM or RCE FW.
*/
#define CAMRTC_HSP_HELLO MK_U32(0x40)
/**
* VM session close in indicated using BYE message,
* RCE FW reclaims the resources assigned to given VM.
* It must be sent before the Camera VM shuts down self.
*/
#define CAMRTC_HSP_BYE MK_U32(0x41)
/**
* The RESUME message is sent when VM wants to activate the RCE FW
* and access the camera hardware through it.
*/
#define CAMRTC_HSP_RESUME MK_U32(0x42)
/**
* Power off camera HW, switch to idle state. VM initiates it during runtime suspend or SC7.
*/
#define CAMRTC_HSP_SUSPEND MK_U32(0x43)
/**
* Used to set up a shared memory area (such as IVC channels, trace buffer etc)
* between Camera VM and RCE FW.
*/
#define CAMRTC_HSP_CH_SETUP MK_U32(0x44)
/**
* The Camera VM can use the PING message to check aliveness of RCE FW and the HSP protocol.
*/
#define CAMRTC_HSP_PING MK_U32(0x45)
/**
* SHA1 hash code for RCE FW binary.
*/
#define CAMRTC_HSP_FW_HASH MK_U32(0x46)
/**
* The VM includes its protocol version as a parameter to PROTOCOL message.
* FW responds with its protocol version, or RTCPU_FW_INVALID_VERSION
* if the VM protocol is not supported.
*/
#define CAMRTC_HSP_PROTOCOL MK_U32(0x47)
#define CAMRTC_HSP_RESERVED_5E MK_U32(0x5E) /* bug 200395605 */
#define CAMRTC_HSP_UNKNOWN MK_U32(0x7F)
/** Shared semaphore bits (FW->VM) */
#define CAMRTC_HSP_SS_FW_MASK MK_U32(0xFFFF)
#define CAMRTC_HSP_SS_FW_SHIFT MK_U32(0)
/** Shared semaphore bits (VM->FW) */
#define CAMRTC_HSP_SS_VM_MASK MK_U32(0x7FFF0000)
#define CAMRTC_HSP_SS_VM_SHIFT MK_U32(16)
/** Bits used by IVC channels */
#define CAMRTC_HSP_SS_IVC_MASK MK_U32(0xFF)
/** @} */
/**
* @defgroup HspMailboxMsgs Definitions for "nvidia,tegra-hsp-mailbox" protocol
* @{
*/
#define RTCPU_COMMAND(id, value) \
(((RTCPU_CMD_ ## id) << MK_U32(24)) | ((uint32_t)value))
#define RTCPU_GET_COMMAND_ID(value) \
((((uint32_t)value) >> MK_U32(24)) & MK_U32(0x7f))
#define RTCPU_GET_COMMAND_VALUE(value) \
(((uint32_t)value) & MK_U32(0xffffff))
/**
* RCE FW waits until VM client initiates boot sync with INIT HSP command.
*/
#define RTCPU_CMD_INIT MK_U32(0)
/**
* VM client sends host version and expects RCE FW to respond back with
* current FW version, as part of boot sync.
*/
#define RTCPU_CMD_FW_VERSION MK_U32(1)
#define RTCPU_CMD_RESERVED_02 MK_U32(2)
#define RTCPU_CMD_RESERVED_03 MK_U32(3)
/**
* Release RCE FW resources assigned to given VM client, during runtime suspend or SC7.
*/
#define RTCPU_CMD_PM_SUSPEND MK_U32(4)
#define RTCPU_CMD_RESERVED_05 MK_U32(5)
/**
* Used to set up a shared memory area (such as IVC channels, trace buffer etc)
* between Camera VM and RCE FW.
*/
#define RTCPU_CMD_CH_SETUP MK_U32(6)
#define RTCPU_CMD_RESERVED_5E MK_U32(0x5E) /* bug 200395605 */
#define RTCPU_CMD_RESERVED_7D MK_U32(0x7d)
#define RTCPU_CMD_RESERVED_7E MK_U32(0x7e)
#define RTCPU_CMD_ERROR MK_U32(0x7f)
#define RTCPU_FW_DB_VERSION MK_U32(0)
#define RTCPU_FW_VERSION MK_U32(1)
#define RTCPU_FW_SM2_VERSION MK_U32(2)
#define RTCPU_FW_SM3_VERSION MK_U32(3)
/** SM4 firmware can restore itself after suspend */
#define RTCPU_FW_SM4_VERSION MK_U32(4)
/** SM5 firmware supports IVC synchronization */
#define RTCPU_FW_SM5_VERSION MK_U32(5)
/** SM5 driver supports IVC synchronization */
#define RTCPU_DRIVER_SM5_VERSION MK_U32(5)
/** SM6 firmware/driver supports camrtc-hsp-vm protocol */
#define RTCPU_FW_SM6_VERSION MK_U32(6)
#define RTCPU_DRIVER_SM6_VERSION MK_U32(6)
#define RTCPU_IVC_SANS_TRACE MK_U32(1)
#define RTCPU_IVC_WITH_TRACE MK_U32(2)
#define RTCPU_FW_HASH_SIZE MK_U32(20)
#define RTCPU_FW_HASH_ERROR MK_U32(0xFFFFFF)
#define RTCPU_PM_SUSPEND_SUCCESS MK_U32(0x100)
#define RTCPU_PM_SUSPEND_FAILURE MK_U32(0x001)
#define RTCPU_FW_CURRENT_VERSION RTCPU_FW_SM6_VERSION
#define RTCPU_FW_INVALID_VERSION MK_U32(0xFFFFFF)
#define RTCPU_RESUME_ERROR MK_U32(0xFFFFFF)
/** @} */
#endif /* INCLUDE_CAMRTC_COMMANDS_H */

View File

@@ -0,0 +1,88 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*/
/**
* @file camrtc-common.h
*
* @brief RCE common header file
*/
#ifndef INCLUDE_CAMRTC_COMMON_H
#define INCLUDE_CAMRTC_COMMON_H
#if defined(__KERNEL__)
#include <linux/types.h>
#include <linux/compiler.h>
#define CAMRTC_PACKED __packed
#define CAMRTC_ALIGN __aligned
#else
#include <stdint.h>
#include <stdbool.h>
#ifndef CAMRTC_PACKED
#define CAMRTC_PACKED __attribute__((packed))
#endif
#ifndef CAMRTC_ALIGN
#define CAMRTC_ALIGN(_n) __attribute__((aligned(_n)))
#endif
#ifndef U64_C
#define U64_C(_x_) ((uint64_t)(_x_##ULL))
#endif
#ifndef U32_C
#define U32_C(_x_) ((uint32_t)(_x_##UL))
#endif
#ifndef U16_C
#define U16_C(_x_) ((uint16_t)(_x_##U))
#endif
#ifndef U8_C
#define U8_C(_x_) ((uint8_t)(_x_##U))
#endif
#endif
/**
* @defgroup MK_xxx Macros for defining constants
*
* These macros are used to define constants in the camera/firmware-api
* headers.
*
* The user of the header files can predefine them and override the
* types of the constants.
*
* @{
*/
#ifndef MK_U64
#define MK_U64(_x_) U64_C(_x_)
#endif
#ifndef MK_U32
#define MK_U32(_x_) U32_C(_x_)
#endif
#ifndef MK_U16
#define MK_U16(_x_) U16_C(_x_)
#endif
#ifndef MK_U8
#define MK_U8(_x_) U8_C(_x_)
#endif
#ifndef MK_BIT32
#define MK_BIT32(_x_) (MK_U32(1) << MK_U32(_x_))
#endif
#ifndef MK_BIT64
#define MK_BIT64(_x_) (MK_U64(1) << MK_U64(_x_))
#endif
#ifndef MK_ALIGN
#define MK_ALIGN(_x_) _x_
#endif
#ifndef MK_SIZE
#define MK_SIZE(_x_) MK_U32(_x_)
#endif
/** @} */
#endif /* INCLUDE_CAMRTC_COMMON_H */

View File

@@ -0,0 +1,408 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*/
#ifndef INCLUDE_CAMRTC_DBG_MESSAGES_H
#define INCLUDE_CAMRTC_DBG_MESSAGES_H
#include "camrtc-common.h"
#pragma GCC diagnostic error "-Wpadded"
/*
* Message identifiers.
*/
#define CAMRTC_REQ_PING MK_U32(0x01) /* Ping request. */
#define CAMRTC_REQ_PM_SLEEP MK_U32(0x02) /* Never implemented */
#define CAMRTC_REQ_MODS_TEST MK_U32(0x03) /* Run MODS test */
#define CAMRTC_REQ_SET_LOGLEVEL MK_U32(0x04) /* Set log level */
#define CAMRTC_REQ_LOGLEVEL CAMRTC_REQ_SET_LOGLEVEL
#define CAMRTC_REQ_RTOS_STATE MK_U32(0x05) /* Get FreeRTOS state */
#define CAMRTC_REQ_READ_MEMORY_32BIT MK_U32(0x06) /* Read memory */
#define CAMRTC_REQ_READ_MEMORY MK_U32(0x07)
#define CAMRTC_REQ_SET_PERF_COUNTERS MK_U32(0x08) /* ARM Performance counter */
#define CAMRTC_REQ_GET_PERF_COUNTERS MK_U32(0x09)
#define CAMRTC_REQ_GET_LOGLEVEL MK_U32(0x0A)
#define CAMRTC_REQ_RUN_TEST MK_U32(0x0B) /* Run functional test (obsolete) */
#define CAMRTC_REQ_GET_TASK_STAT MK_U32(0x0C)
#define CAMRTC_REQ_ENABLE_VI_STAT MK_U32(0x0D)
#define CAMRTC_REQ_GET_VI_STAT MK_U32(0x0E)
#define CAMRTC_REQ_GET_MEM_USAGE MK_U32(0x0F)
#define CAMRTC_REQ_RUN_MEM_TEST MK_U32(0x10) /* Run functional test */
#define CAMRTC_REQ_GET_IRQ_STAT MK_U32(0x11)
#define CAMRTC_REQ_SET_FALCON_COVERAGE MK_U32(0x12)
#define CAMRTC_REQ_GET_COVERAGE_SUPPORT MK_U32(0x13)
#define CAMRTC_REQUEST_TYPE_MAX MK_U32(0x14)
/* MODS test cases */
#define CAMRTC_MODS_TEST_BASIC MK_U32(0x00) /* Basic MODS tests */
#define CAMRTC_MODS_TEST_DMA MK_U32(0x01) /* MODS DMA test */
/* Deprecated */
#define CAMRTC_RESP_PONG CAMRTC_REQ_PING
#define CAMRTC_RESP_PM_SLEEP CAMRTC_REQ_PM_SLEEP
#define CAMRTC_RESP_MODS_RESULT CAMRTC_REQ_MODS_TEST
#define CAMRTC_RESP_LOGLEVEL CAMRTC_REQ_SET_LOGLEVEL
#define CAMRTC_RESP_RTOS_STATE CAMRTC_REQ_RTOS_STATE
#define CAMRTC_RESP_READ_MEMORY_32BIT CAMRTC_REQ_READ_MEMORY_32BIT
#define CAMRTC_RESP_READ_MEMORY CAMRTC_REQ_READ_MEMORY
#define CAMRTC_RESP_SET_PERF_COUNTERS CAMRTC_REQ_SET_PERF_COUNTERS
#define CAMRTC_RESP_GET_PERF_COUNTERS CAMRTC_REQ_GET_PERF_COUNTERS
/* Return statuses */
#define CAMRTC_STATUS_OK MK_U32(0)
#define CAMRTC_STATUS_ERROR MK_U32(1) /* Generic error */
#define CAMRTC_STATUS_REQ_UNKNOWN MK_U32(2) /* Unknown req_type */
#define CAMRTC_STATUS_NOT_IMPLEMENTED MK_U32(3) /* Request not implemented */
#define CAMRTC_STATUS_INVALID_PARAM MK_U32(4) /* Invalid parameter */
#define CAMRTC_DBG_FRAME_SIZE MK_U32(448)
#define CAMRTC_DBG_MAX_DATA MK_U32(440)
#define CAMRTC_DBG_TASK_STAT_MAX MK_U32(16)
/*
* This struct is used to query or set the wake timeout for the target.
* Fields:
* force_entry: when set forces the target to sleep for a set time
*/
struct camrtc_pm_data {
uint32_t force_entry;
};
/* This struct is used to send the loop count to perform the mods test
* on the target.
* Fields:
* mods_loops: number of times mods test should be run
*/
struct camrtc_mods_data {
uint32_t mods_case;
uint32_t mods_loops;
uint32_t mods_dma_channels;
};
/* This struct is used to extract the firmware version of the RTCPU.
* Fields:
* data: buffer to store the version string. Uses uint8_t
*/
struct camrtc_ping_data {
uint64_t ts_req; /* requestor timestamp */
uint64_t ts_resp; /* response timestamp */
uint8_t data[64]; /* data */
};
struct camrtc_log_data {
uint32_t level;
};
struct camrtc_rtos_state_data {
uint8_t rtos_state[CAMRTC_DBG_MAX_DATA]; /* string data */
};
/* This structure is used to read 32 bit data from firmware address space.
* Fields:
* addr: address to read from. should be 4 byte aligned.
* data: 32 bit value read from memory.
*/
struct camrtc_dbg_read_memory_32bit {
uint32_t addr;
};
struct camrtc_dbg_read_memory_32bit_result {
uint32_t data;
};
#define CAMRTC_DBG_READ_MEMORY_COUNT_MAX MK_U32(256)
/* This structure is used to read memory in firmware address space.
* Fields:
* addr: starting address. no alignment requirement
* count: number of bytes to read. limited to CAMRTC_DBG_READ_MEMORY_COUNT_MAX
* data: contents read from memory.
*/
struct camrtc_dbg_read_memory {
uint32_t addr;
uint32_t count;
};
struct camrtc_dbg_read_memory_result {
uint8_t data[CAMRTC_DBG_READ_MEMORY_COUNT_MAX];
};
#define CAMRTC_DBG_MAX_PERF_COUNTERS MK_U32(31)
/* This structure is used to set event type that each performance counter
* will monitor. This doesn't include fixed performance counter. If there
* are 4 counters available, only 3 of them are configurable.
* Fields:
* number: Number of performance counters to set.
* This excludes a fixed performance counter: cycle counter
* do_reset: Whether to reset counters
* cycle_counter_div64: Whether to enable cycle counter divider
* events: Event type to monitor
*/
struct camrtc_dbg_set_perf_counters {
uint32_t number;
uint32_t do_reset;
uint32_t cycle_counter_div64;
uint32_t events[CAMRTC_DBG_MAX_PERF_COUNTERS];
};
/* This structure is used to get performance counters.
* Fields:
* number: Number of performance counters.
* This includes a fixed performance counter: cycle counter
* counters: Descriptors of event counters. First entry is for cycle counter.
* event: Event type that the value represents.
* For first entry, this field is don't care.
* value: Value of performance counter.
* cycle_counter_div64: Nonzero if cycle counter divider is active
*/
struct camrtc_dbg_get_perf_counters_result {
uint32_t number;
struct {
uint32_t event;
uint32_t value;
} counters[CAMRTC_DBG_MAX_PERF_COUNTERS];
uint32_t cycle_counter_div64;
};
#define CAMRTC_DBG_MAX_TEST_DATA (CAMRTC_DBG_MAX_DATA - sizeof(uint64_t))
/* This structure is used pass textual input data to functional test
* case and get back the test output, including verdict.
*
* Fields:
* timeout: maximum time test may run in nanoseconds
* data: textual data (e.g., test name, verdict)
*/
struct camrtc_dbg_run_test_data {
uint64_t timeout; /* Time in nanoseconds */
char data[CAMRTC_DBG_MAX_TEST_DATA];
};
/* Number of memory areas */
#define CAMRTC_DBG_NUM_MEM_TEST_MEM MK_U32(8)
#define CAMRTC_DBG_MAX_MEM_TEST_DATA (\
CAMRTC_DBG_MAX_DATA \
- sizeof(uint64_t) - sizeof(struct camrtc_dbg_streamids) \
- (sizeof(struct camrtc_dbg_test_mem) * CAMRTC_DBG_NUM_MEM_TEST_MEM))
struct camrtc_dbg_test_mem {
uint32_t size;
uint32_t page_size;
uint64_t phys_addr;
uint64_t rtcpu_iova;
uint64_t vi_iova;
uint64_t vi2_iova;
uint64_t isp_iova;
};
struct camrtc_dbg_streamids {
uint8_t rtcpu;
uint8_t vi;
uint8_t vi2;
uint8_t isp;
};
/* This structure is used pass memory areas and textual input data to
* functional test case and get back the test output, including
* verdict.
*
* Fields:
* timeout: maximum time test may run in nanoseconds
* mem[]: address and size of memory areas passed to the test
* data: textual data (e.g., test name, verdict)
*/
struct camrtc_dbg_run_mem_test_data {
uint64_t timeout; /* Time in nanoseconds */
struct camrtc_dbg_test_mem mem[CAMRTC_DBG_NUM_MEM_TEST_MEM];
struct camrtc_dbg_streamids streamids;
char data[CAMRTC_DBG_MAX_MEM_TEST_DATA];
};
/* This structure is used get information on system tasks.
* Fields:
* n_task: number of reported tasks
* total_count: total runtime
* task: array of reported tasks
* id: task name
* count: runtime allocated to task
* number: unique task number
* priority: priority of task when this structure was populated
*/
struct camrtc_dbg_task_stat {
uint32_t n_task;
uint32_t total_count;
struct {
uint32_t id[2];
uint32_t count;
uint32_t number;
uint32_t priority;
} task[CAMRTC_DBG_TASK_STAT_MAX];
};
/* Limit for default CAMRTC_DBG_FRAME_SIZE */
#define CAMRTC_DBG_NUM_IRQ_STAT MK_U32(11)
/*
* This structure is used get information on interrupts.
*
* Fields:
* n_active: number of active interrupts
* total_called: total number of interrupts handled
* total_runtime: total runtime
* n_irq: number of reported interrupts
* irqs: array of reported tasks
* irq_num: irq number
* num_called: times this interrupt has been handled
* runtime: runtime for this interrupt
* name: name of the interrupt (may not be NUL-terminated)
*/
struct camrtc_dbg_irq_stat {
uint32_t n_active;
uint32_t n_irq;
uint64_t total_called;
uint64_t total_runtime;
struct {
uint32_t irq_num;
char name[12];
uint64_t runtime;
uint32_t max_runtime;
uint32_t num_called;
} irqs[CAMRTC_DBG_NUM_IRQ_STAT];
};
/* These structure is used to get VI message statistics.
* Fields:
* enable: enable/disable collecting vi message statistics
*/
struct camrtc_dbg_enable_vi_stat {
uint32_t enable;
};
/* These structure is used to get VI message statistics.
* Fields:
* avg: running average of VI message latency.
* max: maximum VI message latency observed so far.
*/
struct camrtc_dbg_vi_stat {
uint32_t avg;
uint32_t max;
};
/* These structure is used to get memory usage.
* Fields:
* text: code memory usage
* bss: global/static memory usage.
* data: global/static memory usage.
* heap: heap memory usage.
* stack: cpu stack memory usage.
* free: remaining free memory.
*/
struct camrtc_dbg_mem_usage {
uint32_t text;
uint32_t bss;
uint32_t data;
uint32_t heap;
uint32_t stack;
uint32_t free_mem;
};
#define CAMRTC_DBG_FALCON_ID_VI MK_U32(0x00)
#define CAMRTC_DBG_FALCON_ID_ISP MK_U32(0x80)
/* This structure is used to set falcon code coverage configuration data.
* Fields:
* falcon_id: Which falcon to set up the coverage for.
* flush: Flush coverage data action bit.
* reset: Reset coverage data action bit. If flush is also set, it runs first.
* size: Size of the coverage data buffer.
* iova: Address of the coverage data buffer in falcon IOVA space.
*
* NOTE: Setting iova and/or size to 0 will disable coverage.
*/
struct camrtc_dbg_coverage_data {
uint8_t falcon_id;
uint8_t flush;
uint8_t reset;
uint8_t pad__;
uint32_t size;
uint64_t iova;
};
/* This structure is used to reply code coverage status.
* Fields:
* falcon_id: Which falcon the status is for
* enabled: Coverage output is configured properly and enabled
* full: Coverage output buffer is full
* bytes_written: Bytes written to buffer so far.
*/
struct camrtc_dbg_coverage_stat {
uint8_t falcon_id;
uint8_t enabled;
uint8_t full;
uint8_t pad__;
uint32_t bytes_written;
};
/* This struct encapsulates the type of the request and the respective
* data associated with that request.
* Fields:
* req_type: indicates the type of the request be it pm related,
* mods or ping.
* data: Union of structs of all the request types.
*/
struct camrtc_dbg_request {
uint32_t req_type;
uint32_t reserved;
union {
struct camrtc_pm_data pm_data;
struct camrtc_mods_data mods_data;
struct camrtc_ping_data ping_data;
struct camrtc_log_data log_data;
struct camrtc_dbg_read_memory_32bit rm_32bit_data;
struct camrtc_dbg_read_memory rm_data;
struct camrtc_dbg_set_perf_counters set_perf_data;
struct camrtc_dbg_run_test_data run_test_data;
struct camrtc_dbg_run_mem_test_data run_mem_test_data;
struct camrtc_dbg_enable_vi_stat enable_vi_stat;
struct camrtc_dbg_coverage_data coverage_data;
} data;
};
/* This struct encapsulates the type of the response and the respective
* data associated with that response.
* Fields:
* resp_type: indicates the type of the response be it pm related,
* mods or ping.
* status: response in regard to the request i.e success/failure.
* In case of mods, this field is the result.
* data: Union of structs of all the request/response types.
*/
struct camrtc_dbg_response {
uint32_t resp_type;
uint32_t status;
union {
struct camrtc_pm_data pm_data;
struct camrtc_ping_data ping_data;
struct camrtc_log_data log_data;
struct camrtc_rtos_state_data rtos_state_data;
struct camrtc_dbg_read_memory_32bit_result rm_32bit_data;
struct camrtc_dbg_read_memory_result rm_data;
struct camrtc_dbg_get_perf_counters_result get_perf_data;
struct camrtc_dbg_run_test_data run_test_data;
struct camrtc_dbg_run_mem_test_data run_mem_test_data;
struct camrtc_dbg_task_stat task_stat_data;
struct camrtc_dbg_vi_stat vi_stat;
struct camrtc_dbg_mem_usage mem_usage;
struct camrtc_dbg_irq_stat irq_stat;
struct camrtc_dbg_coverage_stat coverage_stat;
} data;
};
#pragma GCC diagnostic ignored "-Wpadded"
#endif /* INCLUDE_CAMRTC_DBG_MESSAGES_H */

View File

@@ -0,0 +1,464 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*/
#ifndef INCLUDE_CAMRTC_TRACE_H
#define INCLUDE_CAMRTC_TRACE_H
#include "camrtc-common.h"
#include "camrtc-channels.h"
#pragma GCC diagnostic error "-Wpadded"
/*
* Trace memory consists of three part.
*
* 1. Trace memory header: This describes the layout of trace memory,
* and latest activities.
*
* 2. Exception memory: This is an array of exception entries. Each
* entry describes an exception occurred in the firmware.
*
* 3. Event memory: This is an array of event entries. This is implemented
* as a ring buffer.
*
* The next index gets updated when new messages are committed to the
* trace memory. The next index points to the entry to be written to
* at next occurrence of the exception or event.
*
* Trace memory layout
*
* 0x00000 +-------------------------------+
* | Trace Memory Header |
* 0x01000 +-------------------------------+
* | |
* | Exception Memory | <- exception_next_idx
* | |
* 0x10000 +-------------------------------+
* | |
* | |
* | Event Memory |
* | | <- event_next_idx
* | |
* +-------------------------------+
*/
/* Offset of each memory */
#define CAMRTC_TRACE_NEXT_IDX_SIZE MK_SIZE(64)
#define CAMRTC_TRACE_EXCEPTION_OFFSET MK_U32(0x01000)
#define CAMRTC_TRACE_EVENT_OFFSET MK_U32(0x10000)
/* Size of each entry */
#define CAMRTC_TRACE_EXCEPTION_SIZE MK_SIZE(1024)
#define CAMRTC_TRACE_EVENT_SIZE MK_SIZE(64)
/* Depth of call stack */
#define CAMRTC_TRACE_CALLSTACK_MAX MK_SIZE(32)
#define CAMRTC_TRACE_CALLSTACK_MIN MK_SIZE(4)
/*
* Trace memory header
*/
#define CAMRTC_TRACE_SIGNATURE_1 MK_U32(0x5420564e)
#define CAMRTC_TRACE_SIGNATURE_2 MK_U32(0x45434152)
#define CAMRTC_TRACE_ALIGNOF MK_ALIGN(64)
#define CAMRTC_TRACE_ALIGN CAMRTC_ALIGN(CAMRTC_TRACE_ALIGNOF)
struct camrtc_trace_memory_header {
/* layout: offset 0 */
union {
/*
* Temporary union to provide source compatiblity
* during the transition to new header format.
*/
struct camrtc_tlv tlv;
uint32_t signature[4] __attribute__((deprecated));
};
uint32_t revision;
uint32_t reserved1;
uint32_t exception_offset;
uint32_t exception_size;
uint32_t exception_entries;
uint32_t reserved2;
uint32_t event_offset;
uint32_t event_size;
uint32_t event_entries;
uint32_t reserved3;
uint32_t reserved4[0xc8 / 4];
/* pointer: offset 0x100 */
uint32_t exception_next_idx;
uint32_t event_next_idx;
uint32_t reserved_ptrs[0x38 / 4];
} CAMRTC_TRACE_ALIGN;
/*
* Exception entry
*/
/* Reset = 0 */
#define CAMRTC_ARMV7_EXCEPTION_UNDEFINED_INSTRUCTION MK_U32(1)
/* SWI = 2 */
#define CAMRTC_ARMV7_EXCEPTION_PREFETCH_ABORT MK_U32(3)
#define CAMRTC_ARMV7_EXCEPTION_DATA_ABORT MK_U32(4)
/* RSVD, IRQ, FIQ should never happen */
#define CAMRTC_ARMV7_EXCEPTION_RSVD MK_U32(5)
#define CAMRTC_ARMV7_EXCEPTION_IRQ MK_U32(6)
#define CAMRTC_ARMV7_EXCEPTION_FIQ MK_U32(7)
struct camrtc_trace_callstack {
uint32_t lr_stack_addr; /* address in stack where lr is saved */
uint32_t lr; /* value of saved lr */
};
struct camrtc_trace_armv7_exception {
uint32_t len; /* length in byte including this */
uint32_t type; /* CAMRTC_TRACE_ARMV7_EXCEPTION_* above */
union {
uint32_t data[24];
struct {
uint32_t r0, r1, r2, r3;
uint32_t r4, r5, r6, r7;
uint32_t r8, r9, r10, r11;
uint32_t r12, sp, lr, pc;
uint32_t r8_prev, r9_prev, r10_prev, r11_prev, r12_prev;
uint32_t sp_prev, lr_prev;
uint32_t reserved;
};
} gpr;
/* program status registers */
uint32_t cpsr, spsr;
/* data fault status/address register */
uint32_t dfsr, dfar, adfsr;
/* instruction fault status/address register */
uint32_t ifsr, ifar, aifsr;
struct camrtc_trace_callstack callstack[CAMRTC_TRACE_CALLSTACK_MAX];
};
/*
* Each trace event shares the header.
* The format of event data is determined by event type.
*/
#define CAMRTC_TRACE_EVENT_HEADER_SIZE MK_SIZE(16)
#define CAMRTC_TRACE_EVENT_PAYLOAD_SIZE \
(CAMRTC_TRACE_EVENT_SIZE - CAMRTC_TRACE_EVENT_HEADER_SIZE)
#define CAMRTC_EVENT_TYPE_OFFSET MK_U32(24)
#define CAMRTC_EVENT_TYPE_MASK \
(MK_U32(0xff) << CAMRTC_EVENT_TYPE_OFFSET)
#define CAMRTC_EVENT_TYPE_FROM_ID(id) \
(((id) & CAMRTC_EVENT_TYPE_MASK) >> CAMRTC_EVENT_TYPE_OFFSET)
#define CAMRTC_EVENT_MODULE_OFFSET MK_U32(16)
#define CAMRTC_EVENT_MODULE_MASK \
(MK_U32(0xff) << CAMRTC_EVENT_MODULE_OFFSET)
#define CAMRTC_EVENT_MODULE_FROM_ID(id) \
(((id) & CAMRTC_EVENT_MODULE_MASK) >> CAMRTC_EVENT_MODULE_OFFSET)
#define CAMRTC_EVENT_SUBID_OFFSET MK_U32(0)
#define CAMRTC_EVENT_SUBID_MASK \
(MK_U32(0xffff) << CAMRTC_EVENT_SUBID_OFFSET)
#define CAMRTC_EVENT_SUBID_FROM_ID(id) \
(((id) & CAMRTC_EVENT_SUBID_MASK) >> CAMRTC_EVENT_SUBID_OFFSET)
#define CAMRTC_EVENT_MAKE_ID(type, module, subid) \
(((uint32_t)(type) << CAMRTC_EVENT_TYPE_OFFSET) | \
((uint32_t)(module) << CAMRTC_EVENT_MODULE_OFFSET) | (uint32_t)(subid))
struct camrtc_event_header {
uint32_t len; /* Size in bytes including this field */
uint32_t id; /* Event ID */
uint64_t tstamp; /* Timestamp from TKE TSC */
};
struct camrtc_event_struct {
struct camrtc_event_header header;
union {
uint8_t data8[CAMRTC_TRACE_EVENT_PAYLOAD_SIZE];
uint32_t data32[CAMRTC_TRACE_EVENT_PAYLOAD_SIZE / 4];
} data;
};
// camrtc_event_type
#define CAMRTC_EVENT_TYPE_ARRAY MK_U32(0)
#define CAMRTC_EVENT_TYPE_ARMV7_EXCEPTION MK_U32(1)
#define CAMRTC_EVENT_TYPE_PAD MK_U32(2)
#define CAMRTC_EVENT_TYPE_START MK_U32(3)
#define CAMRTC_EVENT_TYPE_STRING MK_U32(4)
#define CAMRTC_EVENT_TYPE_BULK MK_U32(5)
// camrtc_event_module
#define CAMRTC_EVENT_MODULE_UNKNOWN MK_U32(0)
#define CAMRTC_EVENT_MODULE_BASE MK_U32(1)
#define CAMRTC_EVENT_MODULE_RTOS MK_U32(2)
#define CAMRTC_EVENT_MODULE_HEARTBEAT MK_U32(3)
#define CAMRTC_EVENT_MODULE_DBG MK_U32(4)
#define CAMRTC_EVENT_MODULE_MODS MK_U32(5)
#define CAMRTC_EVENT_MODULE_VINOTIFY MK_U32(6)
#define CAMRTC_EVENT_MODULE_I2C MK_U32(7)
#define CAMRTC_EVENT_MODULE_VI MK_U32(8)
#define CAMRTC_EVENT_MODULE_ISP MK_U32(9)
#define CAMRTC_EVENT_MODULE_NVCSI MK_U32(10)
#define CAMRTC_EVENT_MODULE_CAPTURE MK_U32(11)
#define CAMRTC_EVENT_MODULE_PERF MK_U32(12)
// camrtc_trace_event_type_ids
#define camrtc_trace_type_exception \
CAMRTC_EVENT_MAKE_ID(CAMRTC_EVENT_TYPE_ARMV7_EXCEPTION, \
CAMRTC_EVENT_MODULE_BASE, 0)
#define camrtc_trace_type_pad \
CAMRTC_EVENT_MAKE_ID(CAMRTC_EVENT_TYPE_PAD, \
CAMRTC_EVENT_MODULE_BASE, 0)
#define camrtc_trace_type_start \
CAMRTC_EVENT_MAKE_ID(CAMRTC_EVENT_TYPE_START, \
CAMRTC_EVENT_MODULE_BASE, 0)
#define camrtc_trace_type_string \
CAMRTC_EVENT_MAKE_ID(CAMRTC_EVENT_TYPE_STRING, \
CAMRTC_EVENT_MODULE_BASE, 0)
// camrtc_trace_base_ids
#define camrtc_trace_base_id(_subid) \
CAMRTC_EVENT_MAKE_ID(CAMRTC_EVENT_TYPE_ARRAY, \
CAMRTC_EVENT_MODULE_BASE, (_subid))
#define camrtc_trace_base_target_init \
camrtc_trace_base_id(1)
#define camrtc_trace_base_start_scheduler \
camrtc_trace_base_id(2)
// camrtc_trace_event_rtos_ids
#define camrtc_trace_rtos_id(_subid) \
CAMRTC_EVENT_MAKE_ID(CAMRTC_EVENT_TYPE_ARRAY, \
CAMRTC_EVENT_MODULE_RTOS, (_subid))
#define camrtc_trace_rtos_task_switched_in \
camrtc_trace_rtos_id(1)
#define camrtc_trace_rtos_increase_tick_count \
camrtc_trace_rtos_id(2)
#define camrtc_trace_rtos_low_power_idle_begin \
camrtc_trace_rtos_id(3)
#define camrtc_trace_rtos_low_power_idle_end \
camrtc_trace_rtos_id(4)
#define camrtc_trace_rtos_task_switched_out \
camrtc_trace_rtos_id(5)
#define camrtc_trace_rtos_task_priority_inherit \
camrtc_trace_rtos_id(6)
#define camrtc_trace_rtos_task_priority_disinherit \
camrtc_trace_rtos_id(7)
#define camrtc_trace_rtos_blocking_on_queue_receive \
camrtc_trace_rtos_id(8)
#define camrtc_trace_rtos_blocking_on_queue_send \
camrtc_trace_rtos_id(9)
#define camrtc_trace_rtos_moved_task_to_ready_state \
camrtc_trace_rtos_id(10)
#define camrtc_trace_rtos_queue_create \
camrtc_trace_rtos_id(11)
#define camrtc_trace_rtos_queue_create_failed \
camrtc_trace_rtos_id(12)
#define camrtc_trace_rtos_create_mutex \
camrtc_trace_rtos_id(13)
#define camrtc_trace_rtos_create_mutex_failed \
camrtc_trace_rtos_id(14)
#define camrtc_trace_rtos_give_mutex_recursive \
camrtc_trace_rtos_id(15)
#define camrtc_trace_rtos_give_mutex_recursive_failed \
camrtc_trace_rtos_id(16)
#define camrtc_trace_rtos_take_mutex_recursive \
camrtc_trace_rtos_id(17)
#define camrtc_trace_rtos_take_mutex_recursive_failed \
camrtc_trace_rtos_id(18)
#define camrtc_trace_rtos_create_counting_semaphore \
camrtc_trace_rtos_id(19)
#define camrtc_trace_rtos_create_counting_semaphore_failed \
camrtc_trace_rtos_id(20)
#define camrtc_trace_rtos_queue_send \
camrtc_trace_rtos_id(21)
#define camrtc_trace_rtos_queue_send_failed \
camrtc_trace_rtos_id(22)
#define camrtc_trace_rtos_queue_receive \
camrtc_trace_rtos_id(23)
#define camrtc_trace_rtos_queue_peek \
camrtc_trace_rtos_id(24)
#define camrtc_trace_rtos_queue_peek_from_isr \
camrtc_trace_rtos_id(25)
#define camrtc_trace_rtos_queue_receive_failed \
camrtc_trace_rtos_id(26)
#define camrtc_trace_rtos_queue_send_from_isr \
camrtc_trace_rtos_id(27)
#define camrtc_trace_rtos_queue_send_from_isr_failed \
camrtc_trace_rtos_id(28)
#define camrtc_trace_rtos_queue_receive_from_isr \
camrtc_trace_rtos_id(29)
#define camrtc_trace_rtos_queue_receive_from_isr_failed \
camrtc_trace_rtos_id(30)
#define camrtc_trace_rtos_queue_peek_from_isr_failed \
camrtc_trace_rtos_id(31)
#define camrtc_trace_rtos_queue_delete \
camrtc_trace_rtos_id(32)
#define camrtc_trace_rtos_task_create \
camrtc_trace_rtos_id(33)
#define camrtc_trace_rtos_task_create_failed \
camrtc_trace_rtos_id(34)
#define camrtc_trace_rtos_task_delete \
camrtc_trace_rtos_id(35)
#define camrtc_trace_rtos_task_delay_until \
camrtc_trace_rtos_id(36)
#define camrtc_trace_rtos_task_delay \
camrtc_trace_rtos_id(37)
#define camrtc_trace_rtos_task_priority_set \
camrtc_trace_rtos_id(38)
#define camrtc_trace_rtos_task_suspend \
camrtc_trace_rtos_id(39)
#define camrtc_trace_rtos_task_resume \
camrtc_trace_rtos_id(40)
#define camrtc_trace_rtos_task_resume_from_isr \
camrtc_trace_rtos_id(41)
#define camrtc_trace_rtos_task_increment_tick \
camrtc_trace_rtos_id(42)
#define camrtc_trace_rtos_timer_create \
camrtc_trace_rtos_id(43)
#define camrtc_trace_rtos_timer_create_failed \
camrtc_trace_rtos_id(44)
#define camrtc_trace_rtos_timer_command_send \
camrtc_trace_rtos_id(45)
#define camrtc_trace_rtos_timer_expired \
camrtc_trace_rtos_id(46)
#define camrtc_trace_rtos_timer_command_received \
camrtc_trace_rtos_id(47)
#define camrtc_trace_rtos_malloc \
camrtc_trace_rtos_id(48)
#define camrtc_trace_rtos_free \
camrtc_trace_rtos_id(49)
#define camrtc_trace_rtos_event_group_create \
camrtc_trace_rtos_id(50)
#define camrtc_trace_rtos_event_group_create_failed \
camrtc_trace_rtos_id(51)
#define camrtc_trace_rtos_event_group_sync_block \
camrtc_trace_rtos_id(52)
#define camrtc_trace_rtos_event_group_sync_end \
camrtc_trace_rtos_id(53)
#define camrtc_trace_rtos_event_group_wait_bits_block \
camrtc_trace_rtos_id(54)
#define camrtc_trace_rtos_event_group_wait_bits_end \
camrtc_trace_rtos_id(55)
#define camrtc_trace_rtos_event_group_clear_bits \
camrtc_trace_rtos_id(56)
#define camrtc_trace_rtos_event_group_clear_bits_from_isr \
camrtc_trace_rtos_id(57)
#define camrtc_trace_rtos_event_group_set_bits \
camrtc_trace_rtos_id(58)
#define camrtc_trace_rtos_event_group_set_bits_from_isr \
camrtc_trace_rtos_id(59)
#define camrtc_trace_rtos_event_group_delete \
camrtc_trace_rtos_id(60)
#define camrtc_trace_rtos_pend_func_call \
camrtc_trace_rtos_id(61)
#define camrtc_trace_rtos_pend_func_call_from_isr \
camrtc_trace_rtos_id(62)
#define camrtc_trace_rtos_queue_registry_add \
camrtc_trace_rtos_id(63)
// camrtc_trace_dbg_ids
#define camrtc_trace_dbg_id(_subid) \
CAMRTC_EVENT_MAKE_ID(CAMRTC_EVENT_TYPE_ARRAY, \
CAMRTC_EVENT_MODULE_DBG, (_subid))
#define camrtc_trace_dbg_unknown \
camrtc_trace_dbg_id(1)
#define camrtc_trace_dbg_enter \
camrtc_trace_dbg_id(2)
#define camrtc_trace_dbg_exit \
camrtc_trace_dbg_id(3)
#define camrtc_trace_dbg_set_loglevel \
camrtc_trace_dbg_id(4)
// camrtc_trace_vinotify_ids
#define camrtc_trace_vinotify_id(_subid) \
CAMRTC_EVENT_MAKE_ID(CAMRTC_EVENT_TYPE_ARRAY, \
CAMRTC_EVENT_MODULE_VINOTIFY, (_subid))
#define camrtc_trace_vinotify_event_ts64 \
camrtc_trace_vinotify_id(1)
#define camrtc_trace_vinotify_event \
camrtc_trace_vinotify_id(2)
#define camrtc_trace_vinotify_error \
camrtc_trace_vinotify_id(3)
// camrtc_trace_vi_ids
#define camrtc_trace_vi_id(_subid) \
CAMRTC_EVENT_MAKE_ID(CAMRTC_EVENT_TYPE_ARRAY, \
CAMRTC_EVENT_MODULE_VI, (_subid))
#define camrtc_trace_vi_frame_begin \
camrtc_trace_vi_id(1)
#define camrtc_trace_vi_frame_end \
camrtc_trace_vi_id(2)
// camrtc_trace_isp_ids
#define camrtc_trace_isp_id(_subid) \
CAMRTC_EVENT_MAKE_ID(CAMRTC_EVENT_TYPE_ARRAY, \
CAMRTC_EVENT_MODULE_ISP, (_subid))
#define camrtc_trace_isp_task_begin \
camrtc_trace_isp_id(1)
#define camrtc_trace_isp_task_end \
camrtc_trace_isp_id(2)
#define camrtc_trace_isp_falcon_traces_event \
camrtc_trace_isp_id(3)
// camrtc_trace_nvcsi_ids
#define camrtc_trace_nvcsi_id(_subid) \
CAMRTC_EVENT_MAKE_ID(CAMRTC_EVENT_TYPE_ARRAY, \
CAMRTC_EVENT_MODULE_NVCSI, (_subid))
#define camrtc_trace_nvcsi_intr \
camrtc_trace_nvcsi_id(1)
// camrtc_trace_capture_ids
#define camrtc_trace_capture_event_id(_subid) \
CAMRTC_EVENT_MAKE_ID(CAMRTC_EVENT_TYPE_ARRAY, \
CAMRTC_EVENT_MODULE_CAPTURE, (_subid))
#define camrtc_trace_capture_event_sof \
camrtc_trace_capture_event_id(0)
#define camrtc_trace_capture_event_eof \
camrtc_trace_capture_event_id(1)
#define camrtc_trace_capture_event_error \
camrtc_trace_capture_event_id(2)
#define camrtc_trace_capture_event_reschedule \
camrtc_trace_capture_event_id(3)
#define camrtc_trace_capture_event_sensor \
camrtc_trace_capture_event_id(4)
#define camrtc_trace_capture_event_reschedule_isp \
camrtc_trace_capture_event_id(5)
#define camrtc_trace_capture_event_isp_done \
camrtc_trace_capture_event_id(6)
#define camrtc_trace_capture_event_isp_error \
camrtc_trace_capture_event_id(7)
#define camrtc_trace_capture_event_inject \
camrtc_trace_capture_event_id(8)
#define camrtc_trace_capture_event_wdt \
camrtc_trace_capture_event_id(9)
#define camrtc_trace_capture_event_report_program \
camrtc_trace_capture_event_id(10)
#define camrtc_trace_capture_event_suspend \
camrtc_trace_capture_event_id(14)
#define camrtc_trace_capture_event_suspend_isp \
camrtc_trace_capture_event_id(15)
// camrtc_trace_perf id
#define camrtc_trace_perf_id(_subid) \
CAMRTC_EVENT_MAKE_ID(CAMRTC_EVENT_TYPE_ARRAY, \
CAMRTC_EVENT_MODULE_PERF, (_subid))
#define camrtc_trace_perf_counters \
camrtc_trace_perf_id(0)
#define camrtc_trace_perf_reset \
camrtc_trace_perf_id(1)
struct camrtc_trace_perf_counter_data {
uint64_t cycles;
uint32_t counters[3];
uint8_t events[3];
uint8_t name[25];
};
#pragma GCC diagnostic ignored "-Wpadded"
#endif /* INCLUDE_CAMRTC_TRACE_H */

View File

@@ -0,0 +1,692 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*/
#undef TRACE_SYSTEM
#define TRACE_SYSTEM freertos
#if !defined(_TRACE_FREERTOS_H_) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_FREERTOS_H_
#include <linux/tracepoint.h>
/*
* Classes with no argument
*/
DECLARE_EVENT_CLASS(rtos__noarg,
TP_PROTO(u64 tstamp),
TP_ARGS(tstamp),
TP_STRUCT__entry(
__field(u64, tstamp)
),
TP_fast_assign(
__entry->tstamp = tstamp;
),
TP_printk("tstamp:%llu", __entry->tstamp)
);
/*
* Classes with 1 argument
*/
DECLARE_EVENT_CLASS(rtos__count,
TP_PROTO(u64 tstamp, u32 count),
TP_ARGS(tstamp, count),
TP_STRUCT__entry(
__field(u64, tstamp)
__field(u32, count)
),
TP_fast_assign(
__entry->tstamp = tstamp;
__entry->count = count;
),
TP_printk("tstamp:%llu count:%u", __entry->tstamp, __entry->count)
);
DECLARE_EVENT_CLASS(rtos__type,
TP_PROTO(u64 tstamp, u32 type),
TP_ARGS(tstamp, type),
TP_STRUCT__entry(
__field(u64, tstamp)
__field(u32, type)
),
TP_fast_assign(
__entry->tstamp = tstamp;
__entry->type = type;
),
TP_printk("tstamp:%llu type:%u", __entry->tstamp, __entry->type)
);
DECLARE_EVENT_CLASS(rtos__queue,
TP_PROTO(u64 tstamp, u32 queue),
TP_ARGS(tstamp, queue),
TP_STRUCT__entry(
__field(u64, tstamp)
__field(u32, queue)
),
TP_fast_assign(
__entry->tstamp = tstamp;
__entry->queue = queue;
),
TP_printk("tstamp:%llu queue:0x%08x", __entry->tstamp, __entry->queue)
);
DECLARE_EVENT_CLASS(rtos__tcb,
TP_PROTO(u64 tstamp, u32 tcb),
TP_ARGS(tstamp, tcb),
TP_STRUCT__entry(
__field(u64, tstamp)
__field(u32, tcb)
),
TP_fast_assign(
__entry->tstamp = tstamp;
__entry->tcb = tcb;
),
TP_printk("tstamp:%llu tcb:0x%08x", __entry->tstamp, __entry->tcb)
);
DECLARE_EVENT_CLASS(rtos__mutex,
TP_PROTO(u64 tstamp, u32 mutex),
TP_ARGS(tstamp, mutex),
TP_STRUCT__entry(
__field(u64, tstamp)
__field(u32, mutex)
),
TP_fast_assign(
__entry->tstamp = tstamp;
__entry->mutex = mutex;
),
TP_printk("tstamp:%llu mutex:0x%08x", __entry->tstamp, __entry->mutex)
);
DECLARE_EVENT_CLASS(rtos__timer,
TP_PROTO(u64 tstamp, u32 timer),
TP_ARGS(tstamp, timer),
TP_STRUCT__entry(
__field(u64, tstamp)
__field(u32, timer)
),
TP_fast_assign(
__entry->tstamp = tstamp;
__entry->timer = timer;
),
TP_printk("tstamp:%llu timer:0x%08x", __entry->tstamp, __entry->timer)
);
DECLARE_EVENT_CLASS(rtos__eventgroup,
TP_PROTO(u64 tstamp, u32 eventgroup),
TP_ARGS(tstamp, eventgroup),
TP_STRUCT__entry(
__field(u64, tstamp)
__field(u32, eventgroup)
),
TP_fast_assign(
__entry->tstamp = tstamp;
__entry->eventgroup = eventgroup;
),
TP_printk("tstamp:%llu eventgroup:%u", __entry->tstamp,
__entry->eventgroup)
);
/*
* Classes with 2 arguments
*/
DECLARE_EVENT_CLASS(rtos__tcb_priority,
TP_PROTO(u64 tstamp, u32 tcb, u32 priority),
TP_ARGS(tstamp, tcb, priority),
TP_STRUCT__entry(
__field(u64, tstamp)
__field(u32, tcb)
__field(u32, priority)
),
TP_fast_assign(
__entry->tstamp = tstamp;
__entry->tcb = tcb;
__entry->priority = priority;
),
TP_printk("tstamp:%llu tcb:%u priority:%u",
__entry->tstamp, __entry->tcb, __entry->priority)
);
DECLARE_EVENT_CLASS(rtos__addr_size,
TP_PROTO(u64 tstamp, u32 addr, u32 size),
TP_ARGS(tstamp, addr, size),
TP_STRUCT__entry(
__field(u64, tstamp)
__field(u32, addr)
__field(u32, size)
),
TP_fast_assign(
__entry->tstamp = tstamp;
__entry->addr = addr;
__entry->size = size;
),
TP_printk("tstamp:%llu addr:%u size:%u",
__entry->tstamp, __entry->addr, __entry->size)
);
DECLARE_EVENT_CLASS(rtos__eventgroup_wait,
TP_PROTO(u64 tstamp, u32 eventgroup, u32 wait),
TP_ARGS(tstamp, eventgroup, wait),
TP_STRUCT__entry(
__field(u64, tstamp)
__field(u32, eventgroup)
__field(u32, wait)
),
TP_fast_assign(
__entry->tstamp = tstamp;
__entry->eventgroup = eventgroup;
__entry->wait = wait;
),
TP_printk("tstamp:%llu eventgroup:%u wait:%u",
__entry->tstamp, __entry->eventgroup, __entry->wait)
);
DECLARE_EVENT_CLASS(rtos__eventgroup_clear,
TP_PROTO(u64 tstamp, u32 eventgroup, u32 clear),
TP_ARGS(tstamp, eventgroup, clear),
TP_STRUCT__entry(
__field(u64, tstamp)
__field(u32, eventgroup)
__field(u32, clear)
),
TP_fast_assign(
__entry->tstamp = tstamp;
__entry->eventgroup = eventgroup;
__entry->clear = clear;
),
TP_printk("tstamp:%llu eventgroup:%u clear:%u",
__entry->tstamp, __entry->eventgroup, __entry->clear)
);
DECLARE_EVENT_CLASS(rtos__eventgroup_set,
TP_PROTO(u64 tstamp, u32 eventgroup, u32 set),
TP_ARGS(tstamp, eventgroup, set),
TP_STRUCT__entry(
__field(u64, tstamp)
__field(u32, eventgroup)
__field(u32, set)
),
TP_fast_assign(
__entry->tstamp = tstamp;
__entry->eventgroup = eventgroup;
__entry->set = set;
),
TP_printk("tstamp:%llu eventgroup:%u set:%u",
__entry->tstamp, __entry->eventgroup, __entry->set)
);
DECLARE_EVENT_CLASS(rtos__queue_name,
TP_PROTO(u64 tstamp, u32 queue, u32 name),
TP_ARGS(tstamp, queue, name),
TP_STRUCT__entry(
__field(u64, tstamp)
__field(u32, queue)
__field(u32, name)
),
TP_fast_assign(
__entry->tstamp = tstamp;
__entry->queue = queue;
__entry->name = name;
),
TP_printk("tstamp:%llu queue:%u name:0x%08x",
__entry->tstamp, __entry->queue, __entry->name)
);
/*
* Classes with 3 arguments
*/
DECLARE_EVENT_CLASS(rtos__ptimer_msgid_value,
TP_PROTO(u64 tstamp, u32 ptimer, u32 msgid, u32 value),
TP_ARGS(tstamp, ptimer, msgid, value),
TP_STRUCT__entry(
__field(u64, tstamp)
__field(u32, ptimer)
__field(u32, msgid)
__field(u32, value)
),
TP_fast_assign(
__entry->tstamp = tstamp;
__entry->ptimer = ptimer;
__entry->msgid = msgid;
__entry->value = value;
),
TP_printk("tstamp:%llu timer:0x%08x msgid:%u value:%u",
__entry->tstamp, __entry->ptimer, __entry->msgid,
__entry->value)
);
DECLARE_EVENT_CLASS(rtos__eventgroup_set_wait,
TP_PROTO(u64 tstamp, u32 eventgroup, u32 set, u32 wait),
TP_ARGS(tstamp, eventgroup, set, wait),
TP_STRUCT__entry(
__field(u64, tstamp)
__field(u32, eventgroup)
__field(u32, set)
__field(u32, wait)
),
TP_fast_assign(
__entry->tstamp = tstamp;
__entry->eventgroup = eventgroup;
__entry->set = set;
__entry->wait = wait;
),
TP_printk("tstamp:%llu eventgroup:%u set:%u wait:%u",
__entry->tstamp, __entry->eventgroup, __entry->set,
__entry->wait)
);
DECLARE_EVENT_CLASS(rtos__eventgroup_wait_timeout,
TP_PROTO(u64 tstamp, u32 eventgroup, u32 wait, u32 timeout),
TP_ARGS(tstamp, eventgroup, wait, timeout),
TP_STRUCT__entry(
__field(u64, tstamp)
__field(u32, eventgroup)
__field(u32, wait)
__field(u32, timeout)
),
TP_fast_assign(
__entry->tstamp = tstamp;
__entry->eventgroup = eventgroup;
__entry->timeout = timeout;
),
TP_printk("tstamp:%llu eventgroup:%u wait:%u timeout:%u",
__entry->tstamp, __entry->eventgroup,
__entry->wait, __entry->timeout)
);
/*
* Classes with 4 arguments
*/
DECLARE_EVENT_CLASS(rtos__timer_msgid_value_return,
TP_PROTO(u64 tstamp, u32 timer, u32 msgid, u32 value, u32 ret),
TP_ARGS(tstamp, timer, msgid, value, ret),
TP_STRUCT__entry(
__field(u64, tstamp)
__field(u32, timer)
__field(u32, msgid)
__field(u32, value)
__field(u32, ret)
),
TP_fast_assign(
__entry->tstamp = tstamp;
__entry->timer = timer;
__entry->msgid = msgid;
__entry->value = value;
__entry->ret = ret;
),
TP_printk("tstamp:%llu timer:%u msgid:%u value:%u return:%u",
__entry->tstamp, __entry->timer, __entry->msgid,
__entry->value, __entry->ret)
);
DECLARE_EVENT_CLASS(rtos__eventgroup_set_wait_timeout,
TP_PROTO(u64 tstamp, u32 eventgroup, u32 set, u32 wait, u32 timeout),
TP_ARGS(tstamp, eventgroup, set, wait, timeout),
TP_STRUCT__entry(
__field(u64, tstamp)
__field(u32, eventgroup)
__field(u32, set)
__field(u32, wait)
__field(u32, timeout)
),
TP_fast_assign(
__entry->tstamp = tstamp;
__entry->eventgroup = eventgroup;
__entry->set = set;
__entry->timeout = timeout;
),
TP_printk("tstamp:%llu eventgroup:%u set:%u wait:%u timeout:%u",
__entry->tstamp, __entry->eventgroup, __entry->set,
__entry->wait, __entry->timeout)
);
DECLARE_EVENT_CLASS(rtos__function_param1_param2_ret,
TP_PROTO(u64 tstamp, u32 function, u32 param1, u32 param2, u32 ret),
TP_ARGS(tstamp, function, param1, param2, ret),
TP_STRUCT__entry(
__field(u64, tstamp)
__field(u32, function)
__field(u32, param1)
__field(u32, param2)
__field(u32, ret)
),
TP_fast_assign(
__entry->tstamp = tstamp;
__entry->function = function;
__entry->param1 = param1;
__entry->ret = ret;
),
TP_printk(
"tstamp:%llu function:0x%08x param1:0x%08x param2:0x%08x ret:%u",
__entry->tstamp, __entry->function, __entry->param1,
__entry->param2, __entry->ret)
);
/*
* Events
*/
DEFINE_EVENT(rtos__noarg, rtos_task_switched_in,
TP_PROTO(u64 tstamp),
TP_ARGS(tstamp)
);
DEFINE_EVENT(rtos__count, rtos_increase_tick_count,
TP_PROTO(u64 tstamp, u32 count),
TP_ARGS(tstamp, count)
);
DEFINE_EVENT(rtos__noarg, rtos_low_power_idle_begin,
TP_PROTO(u64 tstamp),
TP_ARGS(tstamp)
);
DEFINE_EVENT(rtos__noarg, rtos_low_power_idle_end,
TP_PROTO(u64 tstamp),
TP_ARGS(tstamp)
);
DEFINE_EVENT(rtos__noarg, rtos_task_switched_out,
TP_PROTO(u64 tstamp),
TP_ARGS(tstamp)
);
DEFINE_EVENT(rtos__tcb_priority, rtos_task_priority_inherit,
TP_PROTO(u64 tstamp, u32 tcb, u32 priority),
TP_ARGS(tstamp, tcb, priority)
);
DEFINE_EVENT(rtos__tcb_priority, rtos_task_priority_disinherit,
TP_PROTO(u64 tstamp, u32 tcb, u32 priority),
TP_ARGS(tstamp, tcb, priority)
);
DEFINE_EVENT(rtos__queue, rtos_blocking_on_queue_receive,
TP_PROTO(u64 tstamp, u32 queue),
TP_ARGS(tstamp, queue)
);
DEFINE_EVENT(rtos__queue, rtos_blocking_on_queue_send,
TP_PROTO(u64 tstamp, u32 queue),
TP_ARGS(tstamp, queue)
);
DEFINE_EVENT(rtos__tcb, rtos_moved_task_to_ready_state,
TP_PROTO(u64 tstamp, u32 tcb),
TP_ARGS(tstamp, tcb)
);
DEFINE_EVENT(rtos__queue, rtos_queue_create,
TP_PROTO(u64 tstamp, u32 queue),
TP_ARGS(tstamp, queue)
);
DEFINE_EVENT(rtos__type, rtos_queue_create_failed,
TP_PROTO(u64 tstamp, u32 type),
TP_ARGS(tstamp, type)
);
DEFINE_EVENT(rtos__queue, rtos_create_mutex,
TP_PROTO(u64 tstamp, u32 queue),
TP_ARGS(tstamp, queue)
);
DEFINE_EVENT(rtos__noarg, rtos_create_mutex_failed,
TP_PROTO(u64 tstamp),
TP_ARGS(tstamp)
);
DEFINE_EVENT(rtos__mutex, rtos_give_mutex_recursive,
TP_PROTO(u64 tstamp, u32 mutex),
TP_ARGS(tstamp, mutex)
);
DEFINE_EVENT(rtos__mutex, rtos_give_mutex_recursive_failed,
TP_PROTO(u64 tstamp, u32 mutex),
TP_ARGS(tstamp, mutex)
);
DEFINE_EVENT(rtos__mutex, rtos_take_mutex_recursive,
TP_PROTO(u64 tstamp, u32 mutex),
TP_ARGS(tstamp, mutex)
);
DEFINE_EVENT(rtos__mutex, rtos_take_mutex_recursive_failed,
TP_PROTO(u64 tstamp, u32 mutex),
TP_ARGS(tstamp, mutex)
);
DEFINE_EVENT(rtos__noarg, rtos_create_counting_semaphore,
TP_PROTO(u64 tstamp),
TP_ARGS(tstamp)
);
DEFINE_EVENT(rtos__noarg, rtos_create_counting_semaphore_failed,
TP_PROTO(u64 tstamp),
TP_ARGS(tstamp)
);
DEFINE_EVENT(rtos__queue, rtos_queue_send,
TP_PROTO(u64 tstamp, u32 queue),
TP_ARGS(tstamp, queue)
);
DEFINE_EVENT(rtos__queue, rtos_queue_send_failed,
TP_PROTO(u64 tstamp, u32 queue),
TP_ARGS(tstamp, queue)
);
DEFINE_EVENT(rtos__queue, rtos_queue_receive,
TP_PROTO(u64 tstamp, u32 queue),
TP_ARGS(tstamp, queue)
);
DEFINE_EVENT(rtos__queue, rtos_queue_peek,
TP_PROTO(u64 tstamp, u32 queue),
TP_ARGS(tstamp, queue)
);
DEFINE_EVENT(rtos__queue, rtos_queue_peek_from_isr,
TP_PROTO(u64 tstamp, u32 queue),
TP_ARGS(tstamp, queue)
);
DEFINE_EVENT(rtos__queue, rtos_queue_receive_failed,
TP_PROTO(u64 tstamp, u32 queue),
TP_ARGS(tstamp, queue)
);
DEFINE_EVENT(rtos__queue, rtos_queue_send_from_isr,
TP_PROTO(u64 tstamp, u32 queue),
TP_ARGS(tstamp, queue)
);
DEFINE_EVENT(rtos__queue, rtos_queue_send_from_isr_failed,
TP_PROTO(u64 tstamp, u32 queue),
TP_ARGS(tstamp, queue)
);
DEFINE_EVENT(rtos__queue, rtos_queue_receive_from_isr,
TP_PROTO(u64 tstamp, u32 queue),
TP_ARGS(tstamp, queue)
);
DEFINE_EVENT(rtos__queue, rtos_queue_receive_from_isr_failed,
TP_PROTO(u64 tstamp, u32 queue),
TP_ARGS(tstamp, queue)
);
DEFINE_EVENT(rtos__queue, rtos_queue_peek_from_isr_failed,
TP_PROTO(u64 tstamp, u32 queue),
TP_ARGS(tstamp, queue)
);
DEFINE_EVENT(rtos__queue, rtos_queue_delete,
TP_PROTO(u64 tstamp, u32 queue),
TP_ARGS(tstamp, queue)
);
DEFINE_EVENT(rtos__tcb, rtos_task_create,
TP_PROTO(u64 tstamp, u32 tcb),
TP_ARGS(tstamp, tcb)
);
DEFINE_EVENT(rtos__noarg, rtos_task_create_failed,
TP_PROTO(u64 tstamp),
TP_ARGS(tstamp)
);
DEFINE_EVENT(rtos__tcb, rtos_task_delete,
TP_PROTO(u64 tstamp, u32 tcb),
TP_ARGS(tstamp, tcb)
);
DEFINE_EVENT(rtos__noarg, rtos_task_delay_until,
TP_PROTO(u64 tstamp),
TP_ARGS(tstamp)
);
DEFINE_EVENT(rtos__noarg, rtos_task_delay,
TP_PROTO(u64 tstamp),
TP_ARGS(tstamp)
);
DEFINE_EVENT(rtos__tcb_priority, rtos_task_priority_set,
TP_PROTO(u64 tstamp, u32 tcb, u32 priority),
TP_ARGS(tstamp, tcb, priority)
);
DEFINE_EVENT(rtos__tcb, rtos_task_suspend,
TP_PROTO(u64 tstamp, u32 tcb),
TP_ARGS(tstamp, tcb)
);
DEFINE_EVENT(rtos__tcb, rtos_task_resume,
TP_PROTO(u64 tstamp, u32 tcb),
TP_ARGS(tstamp, tcb)
);
DEFINE_EVENT(rtos__tcb, rtos_task_resume_from_isr,
TP_PROTO(u64 tstamp, u32 tcb),
TP_ARGS(tstamp, tcb)
);
DEFINE_EVENT(rtos__count, rtos_task_increment_tick,
TP_PROTO(u64 tstamp, u32 count),
TP_ARGS(tstamp, count)
);
DEFINE_EVENT(rtos__timer, rtos_timer_create,
TP_PROTO(u64 tstamp, u32 timer),
TP_ARGS(tstamp, timer)
);
DEFINE_EVENT(rtos__noarg, rtos_timer_create_failed,
TP_PROTO(u64 tstamp),
TP_ARGS(tstamp)
);
DEFINE_EVENT(rtos__timer_msgid_value_return, rtos_timer_command_send,
TP_PROTO(u64 tstamp, u32 timer, u32 msgid, u32 value, u32 ret),
TP_ARGS(tstamp, timer, msgid, value, ret)
);
DEFINE_EVENT(rtos__timer, rtos_timer_expired,
TP_PROTO(u64 tstamp, u32 timer),
TP_ARGS(tstamp, timer)
);
DEFINE_EVENT(rtos__ptimer_msgid_value, rtos_timer_command_received,
TP_PROTO(u64 tstamp, u32 ptimer, u32 msgid, u32 value),
TP_ARGS(tstamp, ptimer, msgid, value)
);
DEFINE_EVENT(rtos__addr_size, rtos_malloc,
TP_PROTO(u64 tstamp, u32 addr, u32 size),
TP_ARGS(tstamp, addr, size)
);
DEFINE_EVENT(rtos__addr_size, rtos_free,
TP_PROTO(u64 tstamp, u32 addr, u32 size),
TP_ARGS(tstamp, addr, size)
);
DEFINE_EVENT(rtos__eventgroup, rtos_event_group_create,
TP_PROTO(u64 tstamp, u32 eventgroup),
TP_ARGS(tstamp, eventgroup)
);
DEFINE_EVENT(rtos__noarg, rtos_event_group_create_failed,
TP_PROTO(u64 tstamp),
TP_ARGS(tstamp)
);
DEFINE_EVENT(rtos__eventgroup_set_wait, rtos_event_group_sync_block,
TP_PROTO(u64 tstamp, u32 eventgroup, u32 set, u32 wait),
TP_ARGS(tstamp, eventgroup, set, wait)
);
DEFINE_EVENT(rtos__eventgroup_set_wait_timeout, rtos_event_group_sync_end,
TP_PROTO(u64 tstamp, u32 eventgroup, u32 set, u32 wait, u32 timeout),
TP_ARGS(tstamp, eventgroup, set, wait, timeout)
);
DEFINE_EVENT(rtos__eventgroup_wait, rtos_event_group_wait_bits_block,
TP_PROTO(u64 tstamp, u32 eventgroup, u32 wait),
TP_ARGS(tstamp, eventgroup, wait)
);
DEFINE_EVENT(rtos__eventgroup_wait_timeout, rtos_event_group_wait_bits_end,
TP_PROTO(u64 tstamp, u32 eventgroup, u32 wait, u32 timeout),
TP_ARGS(tstamp, eventgroup, wait, timeout)
);
DEFINE_EVENT(rtos__eventgroup_clear, rtos_event_group_clear_bits,
TP_PROTO(u64 tstamp, u32 eventgroup, u32 clear),
TP_ARGS(tstamp, eventgroup, clear)
);
DEFINE_EVENT(rtos__eventgroup_clear, rtos_event_group_clear_bits_from_isr,
TP_PROTO(u64 tstamp, u32 eventgroup, u32 clear),
TP_ARGS(tstamp, eventgroup, clear)
);
DEFINE_EVENT(rtos__eventgroup_set, rtos_event_group_set_bits,
TP_PROTO(u64 tstamp, u32 eventgroup, u32 set),
TP_ARGS(tstamp, eventgroup, set)
);
DEFINE_EVENT(rtos__eventgroup_set, rtos_event_group_set_bits_from_isr,
TP_PROTO(u64 tstamp, u32 eventgroup, u32 set),
TP_ARGS(tstamp, eventgroup, set)
);
DEFINE_EVENT(rtos__eventgroup, rtos_event_group_delete,
TP_PROTO(u64 tstamp, u32 eventgroup),
TP_ARGS(tstamp, eventgroup)
);
DEFINE_EVENT(rtos__function_param1_param2_ret, rtos_pend_func_call,
TP_PROTO(u64 tstamp, u32 function, u32 param1, u32 param2, u32 ret),
TP_ARGS(tstamp, function, param1, param2, ret)
);
DEFINE_EVENT(rtos__function_param1_param2_ret, rtos_pend_func_call_from_isr,
TP_PROTO(u64 tstamp, u32 function, u32 param1, u32 param2, u32 ret),
TP_ARGS(tstamp, function, param1, param2, ret)
);
DEFINE_EVENT(rtos__queue_name, rtos_queue_registry_add,
TP_PROTO(u64 tstamp, u32 queue, u32 name),
TP_ARGS(tstamp, queue, name)
);
#endif /* _TRACE_FREERTOS_H_ */
#include <trace/define_trace.h>

View File

@@ -0,0 +1,230 @@
/*
* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#undef TRACE_SYSTEM
#define TRACE_SYSTEM tegra_capture
#if !defined(_TRACE_TEGRA_CAPTURE_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_TEGRA_CAPTURE_H
#include <linux/tracepoint.h>
/*
* Classes
*/
#ifndef IVC_NAME_LEN
#define IVC_NAME_LEN 16
#endif
DECLARE_EVENT_CLASS(capture__msg,
TP_PROTO(const char *ivc_name, u32 msg_id, u32 ch_id),
TP_ARGS(ivc_name, msg_id, ch_id),
TP_STRUCT__entry(
__array(char, ivc_name, IVC_NAME_LEN)
__field(u32, msg_id)
__field(u32, ch_id)
),
TP_fast_assign(
strlcpy(__entry->ivc_name, ivc_name, sizeof(__entry->ivc_name));
__entry->msg_id = msg_id;
__entry->ch_id = ch_id;
),
TP_printk("ivc:\"%s\" msg:0x%02x ch:0x%02x",
__entry->ivc_name,
__entry->msg_id,
__entry->ch_id)
);
/*
* Events for capture and capture control protocol
*/
TRACE_EVENT(capture_ivc_notify,
TP_PROTO(const char *ivc_name),
TP_ARGS(ivc_name),
TP_STRUCT__entry(
__array(char, ivc_name, IVC_NAME_LEN)
),
TP_fast_assign(
strlcpy(__entry->ivc_name, ivc_name, sizeof(__entry->ivc_name));
),
TP_printk("ivc:\"%s\"", __entry->ivc_name)
);
DEFINE_EVENT(capture__msg, capture_ivc_recv,
TP_PROTO(const char *ivc_name, u32 msg_id, u32 ch_id),
TP_ARGS(ivc_name, msg_id, ch_id)
);
DEFINE_EVENT(capture__msg, capture_ivc_send,
TP_PROTO(const char *ivc_name, u32 msg_id, u32 ch_id),
TP_ARGS(ivc_name, msg_id, ch_id)
);
TRACE_EVENT(capture_ivc_send_error,
TP_PROTO(const char *ivc_name, u32 msg_id, u32 ch_id, int err),
TP_ARGS(ivc_name, msg_id, ch_id, err),
TP_STRUCT__entry(
__array(char, ivc_name, IVC_NAME_LEN)
__field(u32, msg_id)
__field(u32, ch_id)
__field(int, err)
),
TP_fast_assign(
strlcpy(__entry->ivc_name, ivc_name, sizeof(__entry->ivc_name));
__entry->msg_id = msg_id;
__entry->ch_id = ch_id;
__entry->err = err;
),
TP_printk("ivc:\"%s\" msg:0x%02x ch:0x%02x: err:%d",
__entry->ivc_name,
__entry->msg_id,
__entry->ch_id,
__entry->err)
);
/*
* Capture scheduler events from RCE
*/
DECLARE_EVENT_CLASS(capture__progress_event,
TP_PROTO(u64 ts, u32 channel_id, u32 sequence),
TP_ARGS(ts, channel_id, sequence),
TP_STRUCT__entry(
__field(u64, ts)
__field(u32, channel_id)
__field(u32, sequence)
),
TP_fast_assign(
__entry->ts = ts;
__entry->channel_id = channel_id;
__entry->sequence = sequence;
),
TP_printk("ts:%llu ch:0x%02x seq:%u",
__entry->ts,
__entry->channel_id,
__entry->sequence)
);
DECLARE_EVENT_CLASS(capture__isp_event,
TP_PROTO(u64 ts, u32 channel_id, u32 prog_sequence, u32 cap_sequence,
u8 isp_settings_id, u8 vi_channel_id),
TP_ARGS(ts, channel_id, prog_sequence, cap_sequence, isp_settings_id, vi_channel_id),
TP_STRUCT__entry(
__field(u64, ts)
__field(u32, channel_id)
__field(u32, prog_sequence)
__field(u32, cap_sequence)
__field(u8, isp_settings_id)
__field(u8, vi_channel_id)
),
TP_fast_assign(
__entry->ts = ts;
__entry->channel_id = channel_id;
__entry->prog_sequence = prog_sequence;
),
TP_printk("ts:%llu ch:0x%02x seq:%u prog:%u set:%u vi:%u",
__entry->ts,
__entry->channel_id,
__entry->cap_sequence,
__entry->prog_sequence,
__entry->isp_settings_id,
__entry->vi_channel_id)
);
DECLARE_EVENT_CLASS(capture__suspend_event,
TP_PROTO(u64 ts, bool suspend),
TP_ARGS(ts, suspend),
TP_STRUCT__entry(
__field(u64, ts)
__field(bool, suspend)
),
TP_fast_assign(
__entry->ts = ts;
__entry->suspend = suspend;
),
TP_printk("ts:%llu suspend:%s",
__entry->ts,
__entry->suspend ? "true" : "false")
);
DEFINE_EVENT(capture__progress_event, capture_event_sof,
TP_PROTO(u64 ts, u32 channel_id, u32 sequence),
TP_ARGS(ts, channel_id, sequence)
);
DEFINE_EVENT(capture__progress_event, capture_event_eof,
TP_PROTO(u64 ts, u32 channel_id, u32 sequence),
TP_ARGS(ts, channel_id, sequence)
);
DEFINE_EVENT(capture__progress_event, capture_event_error,
TP_PROTO(u64 ts, u32 channel_id, u32 sequence),
TP_ARGS(ts, channel_id, sequence)
);
DEFINE_EVENT(capture__progress_event, capture_event_reschedule,
TP_PROTO(u64 ts, u32 channel_id, u32 sequence),
TP_ARGS(ts, channel_id, sequence)
);
DEFINE_EVENT(capture__isp_event, capture_event_reschedule_isp,
TP_PROTO(u64 ts, u32 channel_id, u32 prog_sequence, u32 cap_sequence,
u8 isp_settings_id, u8 vi_channel_id),
TP_ARGS(ts, channel_id, prog_sequence, cap_sequence, isp_settings_id, vi_channel_id)
);
DEFINE_EVENT(capture__isp_event, capture_event_isp_done,
TP_PROTO(u64 ts, u32 channel_id, u32 prog_sequence, u32 cap_sequence,
u8 isp_settings_id, u8 vi_channel_id),
TP_ARGS(ts, channel_id, prog_sequence, cap_sequence, isp_settings_id, vi_channel_id)
);
DEFINE_EVENT(capture__isp_event, capture_event_isp_error,
TP_PROTO(u64 ts, u32 channel_id, u32 prog_sequence, u32 cap_sequence,
u8 isp_settings_id, u8 vi_channel_id),
TP_ARGS(ts, channel_id, prog_sequence, cap_sequence, isp_settings_id, vi_channel_id)
);
DEFINE_EVENT(capture__progress_event, capture_event_report_program,
TP_PROTO(u64 ts, u32 channel_id, u32 sequence),
TP_ARGS(ts, channel_id, sequence)
);
TRACE_EVENT(capture_event_wdt,
TP_PROTO(u64 ts),
TP_ARGS(ts),
TP_STRUCT__entry(
__field(u64, ts)
),
TP_fast_assign(
__entry->ts = ts;
),
TP_printk("ts:%llu",
__entry->ts)
);
DEFINE_EVENT(capture__suspend_event, capture_event_suspend,
TP_PROTO(u64 ts, bool suspend),
TP_ARGS(ts, suspend)
);
DEFINE_EVENT(capture__suspend_event, capture_event_suspend_isp,
TP_PROTO(u64 ts, bool suspend),
TP_ARGS(ts, suspend)
);
#endif /* _TRACE_TEGRA_CAPTURE_H */
#include <trace/define_trace.h>

View File

@@ -0,0 +1,510 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*/
#undef TRACE_SYSTEM
#define TRACE_SYSTEM tegra_rtcpu
#if !defined(_TRACE_TEGRA_RTCPU_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_TEGRA_RTCPU_H
#include <linux/tracepoint.h>
#include <soc/tegra/camrtc-trace.h>
/*
* Classes
*/
DECLARE_EVENT_CLASS(rtcpu__noarg,
TP_PROTO(u64 tstamp),
TP_ARGS(tstamp),
TP_STRUCT__entry(
__field(u64, tstamp)
),
TP_fast_assign(
__entry->tstamp = tstamp;
),
TP_printk("tstamp:%llu", __entry->tstamp)
);
DECLARE_EVENT_CLASS(rtcpu__arg1,
TP_PROTO(u64 tstamp, u32 data1),
TP_ARGS(tstamp, data1),
TP_STRUCT__entry(
__field(u64, tstamp)
__field(u32, data1)
),
TP_fast_assign(
__entry->tstamp = tstamp;
__entry->data1 = data1;
),
TP_printk("tstamp:%llu, data:%u", __entry->tstamp,
__entry->data1)
);
DECLARE_EVENT_CLASS(rtcpu__dump,
TP_PROTO(u64 tstamp, u32 id, u32 len, void *data),
TP_ARGS(tstamp, id, len, data),
TP_STRUCT__entry(
__field(u64, tstamp)
__field(u32, id)
__field(u32, len)
__dynamic_array(__u8, data, len)
),
TP_fast_assign(
__entry->tstamp = tstamp;
__entry->id = id;
__entry->len = len;
memcpy(__get_dynamic_array(data), data, len);
),
TP_printk("tstamp:%llu id:0x%08x len:%u data:%s",
__entry->tstamp, __entry->id, __entry->len,
__print_hex(__get_dynamic_array(data), __entry->len))
);
/*
* Unknown events
*/
DEFINE_EVENT(rtcpu__dump, rtcpu_unknown,
TP_PROTO(u64 tstamp, u32 id, u32 len, void *data),
TP_ARGS(tstamp, id, len, data)
);
/*
* Non ARRAY event types
*/
TRACE_EVENT(rtcpu_armv7_exception,
TP_PROTO(u64 tstamp, u32 type),
TP_ARGS(tstamp, type),
TP_STRUCT__entry(
__field(u64, tstamp)
__field(u32, type)
),
TP_fast_assign(
__entry->tstamp = tstamp;
__entry->type = type;
),
TP_printk("tstamp:%llu type:%u", __entry->tstamp, __entry->type)
);
TRACE_EVENT(rtcpu_start,
TP_PROTO(u64 tstamp),
TP_ARGS(tstamp),
TP_STRUCT__entry(
__field(u64, tstamp)
),
TP_fast_assign(
__entry->tstamp = tstamp;
),
TP_printk("tstamp:%llu", __entry->tstamp)
);
#ifndef TEGRA_RTCPU_TRACE_STRING_SIZE
#define TEGRA_RTCPU_TRACE_STRING_SIZE 48
#endif
TRACE_EVENT(rtcpu_string,
TP_PROTO(u64 tstamp, u32 id, u32 len, const char *data),
TP_ARGS(tstamp, id, len, data),
TP_STRUCT__entry(
__field(u64, tstamp)
__field(u32, id)
__field(u32, len)
__array(char, data, TEGRA_RTCPU_TRACE_STRING_SIZE)
),
TP_fast_assign(
__entry->tstamp = tstamp;
__entry->id = id;
__entry->len = len;
strncpy(__entry->data, data, sizeof(__entry->data));
),
TP_printk("tstamp:%llu id:0x%08x str:\"%.*s\"",
__entry->tstamp, __entry->id,
(int)__entry->len, __entry->data)
);
DEFINE_EVENT(rtcpu__dump, rtcpu_bulk,
TP_PROTO(u64 tstamp, u32 id, u32 len, void *data),
TP_ARGS(tstamp, id, len, data)
);
/*
* Base events
*/
DEFINE_EVENT(rtcpu__noarg, rtcpu_target_init,
TP_PROTO(u64 tstamp),
TP_ARGS(tstamp)
);
DEFINE_EVENT(rtcpu__noarg, rtcpu_start_scheduler,
TP_PROTO(u64 tstamp),
TP_ARGS(tstamp)
);
/*
* Debug interface
*/
DEFINE_EVENT(rtcpu__arg1, rtcpu_dbg_unknown,
TP_PROTO(u64 tstamp, u32 data1),
TP_ARGS(tstamp, data1)
);
DEFINE_EVENT(rtcpu__arg1, rtcpu_dbg_enter,
TP_PROTO(u64 tstamp, u32 req_type),
TP_ARGS(tstamp, req_type)
);
DEFINE_EVENT(rtcpu__noarg, rtcpu_dbg_exit,
TP_PROTO(u64 tstamp),
TP_ARGS(tstamp)
);
TRACE_EVENT(rtcpu_dbg_set_loglevel,
TP_PROTO(u64 tstamp, u32 old_level, u32 new_level),
TP_ARGS(tstamp, old_level, new_level),
TP_STRUCT__entry(
__field(u64, tstamp)
__field(u32, old_level)
__field(u32, new_level)
),
TP_fast_assign(
__entry->tstamp = tstamp;
__entry->old_level = old_level;
__entry->new_level = new_level;
),
TP_printk("tstamp:%llu old:%u new:%u", __entry->tstamp,
__entry->old_level, __entry->new_level)
);
/*
* Perf events
*/
DECLARE_EVENT_CLASS(rtcpu__perf,
TP_PROTO(u64 tstamp, const struct camrtc_trace_perf_counter_data *perf),
TP_ARGS(tstamp, perf),
TP_STRUCT__entry(
__field(u64, tstamp)
__field_struct(struct camrtc_trace_perf_counter_data, perf)
),
TP_fast_assign(
__entry->tstamp = tstamp;
if (perf)
__entry->perf = *perf;
else
memset(&__entry->perf, 0, sizeof(*perf));
),
TP_printk("ts:%llu name:%.*s cc:%llu e%u:%u e%u:%u e%u:%u",
__entry->tstamp, (int)sizeof(__entry->perf.name),
__entry->perf.name, __entry->perf.cycles,
__entry->perf.events[0], __entry->perf.counters[0],
__entry->perf.events[1], __entry->perf.counters[1],
__entry->perf.events[2], __entry->perf.counters[2])
);
DEFINE_EVENT(rtcpu__perf, rtcpu_perf_counters,
TP_PROTO(u64 tstamp, const struct camrtc_trace_perf_counter_data *perf),
TP_ARGS(tstamp, perf)
);
DEFINE_EVENT(rtcpu__perf, rtcpu_perf_reset,
TP_PROTO(u64 tstamp, const struct camrtc_trace_perf_counter_data *perf),
TP_ARGS(tstamp, perf)
);
/*
* VI Notify events
*/
extern const char * const g_trace_vinotify_tag_strs[];
extern const unsigned int g_trace_vinotify_tag_str_count;
TRACE_EVENT(rtcpu_vinotify_event_ts64,
TP_PROTO(u64 tstamp, u8 tag, u32 ch_frame, u64 vi_tstamp, u32 data),
TP_ARGS(tstamp, tag, ch_frame, vi_tstamp, data),
TP_STRUCT__entry(
__field(u64, tstamp)
__field(u8, tag)
__field(u32, ch_frame)
__field(u64, vi_tstamp)
__field(u32, data)
),
TP_fast_assign(
__entry->tstamp = tstamp;
__entry->tag = tag;
__entry->ch_frame = ch_frame;
__entry->vi_tstamp = vi_tstamp;
__entry->data = data;
),
TP_printk(
"tstamp:%llu tag:%s channel:0x%02x frame:%u vi_tstamp:%llu data:0x%08x",
__entry->tstamp,
(__entry->tag < g_trace_vinotify_tag_str_count) ?
g_trace_vinotify_tag_strs[__entry->tag] :
__print_hex(&__entry->tag, 1),
(__entry->ch_frame >> 8) & 0xff,
(__entry->ch_frame >> 16) & 0xffff,
__entry->vi_tstamp, __entry->data)
);
TRACE_EVENT(rtcpu_vinotify_event,
TP_PROTO(u64 tstamp, u32 channel_id, u32 unit,
u32 tag, u32 vi_ts_hi, u32 vi_ts_lo, u32 ext_data, u32 data),
TP_ARGS(tstamp, channel_id, unit, tag, vi_ts_hi, vi_ts_lo, ext_data, data),
TP_STRUCT__entry(
__field(u64, tstamp)
__field(u32, channel_id)
__field(u32, unit)
__field(u8, tag_tag)
__field(u8, tag_channel)
__field(u16, tag_frame)
__field(u64, vi_ts)
__field(u64, data)
),
TP_fast_assign(
__entry->tstamp = tstamp;
__entry->channel_id = channel_id;
__entry->unit = unit;
__entry->tag_tag = tag & 0xff;
__entry->tag_channel = (tag >> 8) & 0xff;
__entry->tag_frame = (tag >> 16) & 0xffff;
__entry->vi_ts = ((u64)vi_ts_hi << 32) | vi_ts_lo;
__entry->data = ((u64)ext_data << 32) | data;
),
TP_printk(
"tstamp:%llu cch:%d vi:%u tag:%s channel:0x%02x frame:%u "
"vi_tstamp:%llu data:0x%016llx",
__entry->tstamp,
__entry->channel_id,
__entry->unit,
((__entry->tag_tag >> 1) < g_trace_vinotify_tag_str_count) ?
g_trace_vinotify_tag_strs[__entry->tag_tag >> 1] :
__print_hex(&__entry->tag_tag, 1),
__entry->tag_channel, __entry->tag_frame,
__entry->vi_ts, __entry->data)
);
TRACE_EVENT(rtcpu_vinotify_error,
TP_PROTO(u64 tstamp, u32 channel_id, u32 unit,
u32 tag, u32 vi_ts_hi, u32 vi_ts_lo, u32 ext_data, u32 data),
TP_ARGS(tstamp, channel_id, unit, tag, vi_ts_hi, vi_ts_lo, ext_data, data),
TP_STRUCT__entry(
__field(u64, tstamp)
__field(u32, channel_id)
__field(u32, unit)
__field(u8, tag_tag)
__field(u8, tag_channel)
__field(u16, tag_frame)
__field(u64, vi_ts)
__field(u64, data)
),
TP_fast_assign(
__entry->tstamp = tstamp;
__entry->channel_id = channel_id;
__entry->unit = unit;
__entry->tag_tag = tag & 0xff;
__entry->tag_channel = (tag >> 8) & 0xff;
__entry->tag_frame = (tag >> 16) & 0xffff;
__entry->vi_ts = ((u64)vi_ts_hi << 32) | vi_ts_lo;
__entry->data = ((u64)ext_data << 32) | data;
),
TP_printk(
"tstamp:%llu cch:%d vi:%u tag:%s channel:0x%02x frame:%u "
"vi_tstamp:%llu data:0x%016llx",
__entry->tstamp,
__entry->channel_id,
__entry->unit,
((__entry->tag_tag >> 1) < g_trace_vinotify_tag_str_count) ?
g_trace_vinotify_tag_strs[__entry->tag_tag >> 1] :
__print_hex(&__entry->tag_tag, 1),
__entry->tag_channel, __entry->tag_frame,
__entry->vi_ts, __entry->data)
);
/*
* NVCSI events
*/
extern const char * const g_trace_nvcsi_intr_class_strs[];
extern const unsigned int g_trace_nvcsi_intr_class_str_count;
extern const char * const g_trace_nvcsi_intr_type_strs[];
extern const unsigned int g_trace_nvcsi_intr_type_str_count;
TRACE_EVENT(rtcpu_nvcsi_intr,
TP_PROTO(u64 tstamp, u8 intr_class, u8 intr_type, u32 index,
u32 status),
TP_ARGS(tstamp, intr_class, intr_type, index, status),
TP_STRUCT__entry(
__field(u64, tstamp)
__field(u8, intr_class)
__field(u8, intr_type)
__field(u32, index)
__field(u32, status)
),
TP_fast_assign(
__entry->tstamp = tstamp;
__entry->intr_class = intr_class;
__entry->intr_type = intr_type;
__entry->index = index;
__entry->status = status;
),
TP_printk(
"tstamp:%llu class:%s type:%s phy:%u cil:%u st:%u vc:%u status:0x%08x",
__entry->tstamp,
(__entry->intr_class < g_trace_nvcsi_intr_class_str_count) ?
g_trace_nvcsi_intr_class_strs[__entry->intr_class] :
__print_hex(&__entry->intr_class, 1),
(__entry->intr_type < g_trace_nvcsi_intr_type_str_count) ?
g_trace_nvcsi_intr_type_strs[__entry->intr_type] :
__print_hex(&__entry->intr_type, 1),
(__entry->index >> 24) & 0xff,
(__entry->index >> 16) & 0xff,
(__entry->index >> 8) & 0xff,
__entry->index & 0xff,
__entry->status)
);
/*
* ISP events
*/
TRACE_EVENT(rtcpu_isp_falcon,
TP_PROTO(u8 tag, u8 ch, u8 seq, u32 tstamp, u32 data, u32 ext_data),
TP_ARGS(tag, ch, seq, tstamp, data, ext_data),
TP_STRUCT__entry(
__field(u8, tag)
__field(u8, ch)
__field(u8, seq)
__field(u32, tstamp)
__field(u32, data)
__field(u32, ext_data)
),
TP_fast_assign(
__entry->tag = tag;
__entry->ch = ch;
__entry->seq = seq;
__entry->tstamp = tstamp;
__entry->data = data;
__entry->ext_data = ext_data;
),
TP_printk(
"tag:0x%x tstamp:%u ch:%u seq:%u data:0x%08x ext_data:0x%08x",
__entry->tag, __entry->tstamp, __entry->ch, __entry->seq,
__entry->data, __entry->ext_data
)
);
extern const char * const g_trace_isp_falcon_task_strs[];
extern const unsigned int g_trace_isp_falcon_task_str_count;
TRACE_EVENT(rtcpu_isp_falcon_task_start,
TP_PROTO(u8 ch, u32 tstamp, u32 task),
TP_ARGS(ch, tstamp, task),
TP_STRUCT__entry(
__field(u8, ch)
__field(u32, tstamp)
__field(u32, task)
),
TP_fast_assign(
__entry->ch = ch;
__entry->tstamp = tstamp;
__entry->task = task;
),
TP_printk(
"tstamp:%u ch:%u task:%s",
__entry->tstamp, __entry->ch,
(__entry->task < g_trace_isp_falcon_task_str_count) ?
g_trace_isp_falcon_task_strs[__entry->task] :
"UNKNOWN"
)
);
TRACE_EVENT(rtcpu_isp_falcon_task_end,
TP_PROTO(u32 tstamp, u32 task),
TP_ARGS(tstamp, task),
TP_STRUCT__entry(
__field(u32, tstamp)
__field(u32, task)
),
TP_fast_assign(
__entry->tstamp = tstamp;
__entry->task = task;
),
TP_printk(
"tstamp:%u task:%s",
__entry->tstamp,
(__entry->task < g_trace_isp_falcon_task_str_count) ?
g_trace_isp_falcon_task_strs[__entry->task] :
"UNKNOWN"
)
);
TRACE_EVENT(rtcpu_isp_falcon_tile_start,
TP_PROTO(
u8 ch, u8 seq, u32 tstamp,
u8 tile_x, u8 tile_y,
u16 tile_w, u16 tile_h),
TP_ARGS(ch, seq, tstamp, tile_x, tile_y, tile_w, tile_h),
TP_STRUCT__entry(
__field(u8, ch)
__field(u8, seq)
__field(u32, tstamp)
__field(u8, tile_x)
__field(u8, tile_y)
__field(u16, tile_w)
__field(u16, tile_h)
),
TP_fast_assign(
__entry->ch = ch;
__entry->seq = seq;
__entry->tstamp = tstamp;
__entry->tile_x = tile_x;
__entry->tile_y = tile_y;
__entry->tile_w = tile_w;
__entry->tile_h = tile_h;
),
TP_printk(
"tstamp:%u ch:%u seq:%u tile_x:%u tile_y:%u tile_w:%u tile_h:%u",
__entry->tstamp, __entry->ch, __entry->seq,
__entry->tile_x, __entry->tile_y,
__entry->tile_w, __entry->tile_h
)
);
TRACE_EVENT(rtcpu_isp_falcon_tile_end,
TP_PROTO(u8 ch, u8 seq, u32 tstamp, u8 tile_x, u8 tile_y),
TP_ARGS(ch, seq, tstamp, tile_x, tile_y),
TP_STRUCT__entry(
__field(u8, ch)
__field(u8, seq)
__field(u32, tstamp)
__field(u8, tile_x)
__field(u8, tile_y)
),
TP_fast_assign(
__entry->ch = ch;
__entry->seq = seq;
__entry->tstamp = tstamp;
__entry->tile_x = tile_x;
__entry->tile_y = tile_y;
),
TP_printk(
"tstamp:%u ch:%u seq:%u tile_x:%u tile_y:%u",
__entry->tstamp, __entry->ch, __entry->seq,
__entry->tile_x, __entry->tile_y
)
);
#endif /* _TRACE_TEGRA_RTCPU_H */
#include <trace/define_trace.h>