diff --git a/drivers/platform/tegra/Makefile b/drivers/platform/tegra/Makefile index 3f4aa5f2..1b9d7952 100644 --- a/drivers/platform/tegra/Makefile +++ b/drivers/platform/tegra/Makefile @@ -1,6 +1,8 @@ # SPDX-License-Identifier: GPL-2.0 # Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +LINUXINCLUDE += -I$(srctree.nvidia-oot) + obj-m += bad.o obj-m += firmwares-class.o @@ -15,6 +17,8 @@ tegra-cactmon-objs += actmon_common.o obj-m += tegra-cactmon.o obj-m += tegra-fsicom.o +obj-m += tegra-camera-rtcpu.o + obj-m += cvnas/ obj-m += hwpm/ obj-m += mce/ @@ -22,3 +26,4 @@ obj-m += uncore_pmu/ obj-m += mc-hwpm.o obj-m += mc-utils.o obj-m += dce/ +obj-m += rtcpu/ diff --git a/drivers/platform/tegra/rtcpu/Makefile b/drivers/platform/tegra/rtcpu/Makefile new file mode 100644 index 00000000..43c1943d --- /dev/null +++ b/drivers/platform/tegra/rtcpu/Makefile @@ -0,0 +1,19 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +LINUXINCLUDE += -I$(srctree.nvidia-oot)/include +LINUXINCLUDE += -I$(srctree)/include +LINUXINCLUDE += -I$(srctree.nvidia-oot)/drivers/platform/tegra/rtcpu +LINUXINCLUDE += -I$(srctree.nvidia-oot)/drivers/platform/tegra + +obj-m += capture-ivc.o +obj-m += ivc-bus.o +obj-m += camchar.o +obj-m += camera-diagnostics.o +obj-m += debug.o +obj-m += clk-group.o +obj-m += hsp-mailbox-client.o +obj-m += tegra-rtcpu-trace.o +obj-m += rtcpu-monitor.o +obj-m += reset-group.o +obj-m += device-group.o diff --git a/drivers/platform/tegra/rtcpu/camchar.c b/drivers/platform/tegra/rtcpu/camchar.c new file mode 100644 index 00000000..eb689501 --- /dev/null +++ b/drivers/platform/tegra/rtcpu/camchar.c @@ -0,0 +1,403 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define CCIOGNFRAMES _IOR('c', 1, int) +#define CCIOGNBYTES _IOR('c', 2, int) + +struct tegra_camchar_data { + struct cdev cdev; + struct tegra_ivc_channel *ch; + struct mutex io_lock; + wait_queue_head_t waitq; + bool is_open; + bool is_established; +}; + +#define DEVICE_COUNT (128) + +static DECLARE_BITMAP(tegra_camchar_minor_map, DEVICE_COUNT); +static DEFINE_SPINLOCK(tegra_camchar_lock); +static dev_t tegra_camchar_major_number; +static struct class *tegra_camchar_class; + +static int tegra_camchar_open(struct inode *in, struct file *f) +{ + struct tegra_camchar_data *data; + int ret; + + data = container_of(in->i_cdev, struct tegra_camchar_data, cdev); + if (data->is_open) + return -EBUSY; + + ret = tegra_ivc_channel_runtime_get(data->ch); + if (ret < 0) + return ret; + + data->is_open = true; + data->is_established = false; + f->private_data = data->ch; + + return nonseekable_open(in, f); +} + +static int tegra_camchar_release(struct inode *in, struct file *fp) +{ + struct tegra_ivc_channel *ch = fp->private_data; + struct tegra_camchar_data *data; + + data = tegra_ivc_channel_get_drvdata(ch); + tegra_ivc_channel_runtime_put(ch); + data->is_open = false; + + return 0; +} + +static __poll_t tegra_camchar_poll(struct file *fp, struct poll_table_struct *pt) +{ + __poll_t ret = 0; + struct tegra_ivc_channel *ch = fp->private_data; + struct tegra_camchar_data *dev_data = tegra_ivc_channel_get_drvdata(ch); + + poll_wait(fp, &dev_data->waitq, pt); + + mutex_lock(&dev_data->io_lock); + if (tegra_ivc_can_read(&ch->ivc)) + ret |= (EPOLLIN | EPOLLRDNORM); + if (tegra_ivc_can_write(&ch->ivc)) + ret |= (EPOLLOUT | EPOLLWRNORM); + mutex_unlock(&dev_data->io_lock); + + return ret; +} + +static ssize_t tegra_camchar_read(struct file *fp, char __user *buffer, size_t len, + loff_t *offset) +{ + struct tegra_ivc_channel *ch = fp->private_data; + struct tegra_camchar_data *dev_data = tegra_ivc_channel_get_drvdata(ch); + DEFINE_WAIT(wait); + ssize_t ret; + + if (WARN_ON(!ch->is_ready)) + return -EIO; + + len = min_t(size_t, len, ch->ivc.frame_size); + if (len == 0) + return 0; + + do { + ret = mutex_lock_interruptible(&dev_data->io_lock); + if (ret) + break; + prepare_to_wait(&dev_data->waitq, &wait, TASK_INTERRUPTIBLE); + + ret = tegra_ivc_read_user(&ch->ivc, buffer, len); + mutex_unlock(&dev_data->io_lock); + + if (ret != -ENOMEM) + ; + else if (signal_pending(current)) + ret = -EINTR; + else if (fp->f_flags & O_NONBLOCK) + ret = -EAGAIN; + else + schedule(); + + finish_wait(&dev_data->waitq, &wait); + + } while (ret == -ENOMEM); + + return ret; +} + +static ssize_t tegra_camchar_write(struct file *fp, const char __user *buffer, + size_t len, loff_t *offset) +{ + struct tegra_ivc_channel *ch = fp->private_data; + struct tegra_camchar_data *dev_data = tegra_ivc_channel_get_drvdata(ch); + DEFINE_WAIT(wait); + ssize_t ret; + + if (WARN_ON(!ch->is_ready)) + return -EIO; + + len = min_t(size_t, len, ch->ivc.frame_size); + if (len == 0) + return 0; + + do { + ret = mutex_lock_interruptible(&dev_data->io_lock); + if (ret) + break; + + prepare_to_wait(&dev_data->waitq, &wait, TASK_INTERRUPTIBLE); + ret = tegra_ivc_write_user(&ch->ivc, buffer, len); + mutex_unlock(&dev_data->io_lock); + + if (ret > 0) + dev_data->is_established = true; + + if (ret != -ENOMEM && ret != ECONNRESET) + ; + else if (ret == ECONNRESET && dev_data->is_established) + ; + else if (signal_pending(current)) + ret = -EINTR; + else if (fp->f_flags & O_NONBLOCK) + ret = -EAGAIN; + else + schedule(); + + finish_wait(&dev_data->waitq, &wait); + + if (ret == ECONNRESET && dev_data->is_established) + break; + + } while (ret == -ENOMEM || ret == -ECONNRESET); + + return ret; +} + +static long tegra_camchar_ioctl(struct file *fp, unsigned int cmd, + unsigned long arg) +{ + struct tegra_ivc_channel *ch = fp->private_data; + struct tegra_camchar_data *dev_data = tegra_ivc_channel_get_drvdata(ch); + long ret; + int val = 0; + + mutex_lock(&dev_data->io_lock); + + switch (cmd) { + /* generic serial port ioctls */ + case FIONREAD: + ret = 0; + if (tegra_ivc_can_read(&ch->ivc)) + val = ch->ivc.frame_size; + ret = put_user(val, (int __user *)arg); + break; + /* ioctls specific to this device */ + case CCIOGNFRAMES: + val = ch->ivc.nframes; + ret = put_user(val, (int __user *)arg); + break; + case CCIOGNBYTES: + val = ch->ivc.frame_size; + ret = put_user(val, (int __user *)arg); + break; + + default: + ret = -ENOTTY; + } + + mutex_unlock(&dev_data->io_lock); + return ret; +} + +static const struct file_operations tegra_camchar_fops = { + .open = tegra_camchar_open, + .poll = tegra_camchar_poll, + .read = tegra_camchar_read, + .write = tegra_camchar_write, + .release = tegra_camchar_release, + .unlocked_ioctl = tegra_camchar_ioctl, + .compat_ioctl = tegra_camchar_ioctl, + .llseek = no_llseek, +}; + +static int __init tegra_camchar_init(struct tegra_ivc_driver *drv) +{ + int ret; + dev_t start; + + ret = alloc_chrdev_region(&start, 0, DEVICE_COUNT, "camchar"); + if (ret) { + pr_alert("camchar: failed to allocate device numbers\n"); + return ret; + } + tegra_camchar_major_number = MAJOR(start); + + tegra_camchar_class = class_create(THIS_MODULE, "camchar_class"); + if (IS_ERR(tegra_camchar_class)) { + pr_alert("camchar: failed to create class\n"); + ret = PTR_ERR(tegra_camchar_class); + goto init_err_class; + } + + ret = tegra_ivc_driver_register(drv); + if (ret) { + pr_alert("camchar: ivc driver registration failed\n"); + goto init_err_ivc; + } + + pr_info("camchar: rtcpu character device driver loaded\n"); + return 0; + +init_err_ivc: + class_destroy(tegra_camchar_class); +init_err_class: + unregister_chrdev_region(start, DEVICE_COUNT); + return ret; +} + +static void __exit tegra_camchar_exit(struct tegra_ivc_driver *drv) +{ + dev_t num = MKDEV(tegra_camchar_major_number, 0); + + tegra_ivc_driver_unregister(drv); + class_destroy(tegra_camchar_class); + unregister_chrdev_region(num, DEVICE_COUNT); + tegra_camchar_major_number = 0; + + pr_info("camchar: unloaded rtcpu character device driver\n"); +} + +static void tegra_camchar_notify(struct tegra_ivc_channel *ch) +{ + struct tegra_camchar_data *dev_data = tegra_ivc_channel_get_drvdata(ch); + + wake_up_interruptible(&dev_data->waitq); +} + +static int tegra_camchar_get_minor(void) +{ + int minor; + + spin_lock(&tegra_camchar_lock); + + minor = find_first_zero_bit(tegra_camchar_minor_map, DEVICE_COUNT); + if (minor < DEVICE_COUNT) + set_bit(minor, tegra_camchar_minor_map); + else + minor = -ENODEV; + + spin_unlock(&tegra_camchar_lock); + + return minor; +} + +static void tegra_camchar_put_minor(unsigned minor) +{ + spin_lock(&tegra_camchar_lock); + + if (minor < DEVICE_COUNT) + clear_bit(minor, tegra_camchar_minor_map); + + spin_unlock(&tegra_camchar_lock); +} + +static int tegra_camchar_probe(struct tegra_ivc_channel *ch) +{ + const char *devname; + struct tegra_camchar_data *data; + int ret, minor; + dev_t num; + struct device *dummy; + + devname = of_device_get_match_data(&ch->dev); + if (devname == NULL) { + ret = of_property_read_string(ch->dev.of_node, + "nvidia,devname", &devname); + if (ret != 0) + return ret; + } + + dev_dbg(&ch->dev, "probing /dev/%s", devname); + + data = devm_kzalloc(&ch->dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->ch = ch; + cdev_init(&data->cdev, &tegra_camchar_fops); + data->cdev.owner = THIS_MODULE; + init_waitqueue_head(&data->waitq); + mutex_init(&data->io_lock); + + tegra_ivc_channel_set_drvdata(ch, data); + + minor = tegra_camchar_get_minor(); + if (minor < 0) + return minor; + + num = MKDEV(tegra_camchar_major_number, minor); + ret = cdev_add(&data->cdev, num, 1); + if (ret) { + dev_warn(&ch->dev, "cannot add /dev/%s\n", devname); + tegra_camchar_put_minor(minor); + return ret; + } + + dummy = device_create(tegra_camchar_class, &ch->dev, num, NULL, + "%s", devname); + if (IS_ERR(dummy)) { + dev_err(&ch->dev, "cannot create /dev/%s\n", devname); + tegra_camchar_put_minor(minor); + return PTR_ERR(dummy); + } + + return ret; +} + +static void tegra_camchar_remove(struct tegra_ivc_channel *ch) +{ + struct tegra_camchar_data *data = tegra_ivc_channel_get_drvdata(ch); + dev_t num = data->cdev.dev; + + device_destroy(tegra_camchar_class, num); + cdev_del(&data->cdev); + tegra_camchar_put_minor(MINOR(num)); +} + +static const struct tegra_ivc_channel_ops tegra_ivc_channel_chardev_ops = { + .probe = tegra_camchar_probe, + .remove = tegra_camchar_remove, + .notify = tegra_camchar_notify, +}; + +static const struct of_device_id camchar_of_match[] = { + { .compatible = "nvidia,tegra-ivc-cdev" }, + { .compatible = "nvidia,tegra186-camera-ivc-protocol-echo", + .data = (void *)"camchar-echo", }, + { .compatible = "nvidia,tegra186-camera-ivc-protocol-dbg", + .data = (void *)"camchar-dbg", }, + { }, +}; + +static struct tegra_ivc_driver camchar_driver = { + .driver = { + .owner = THIS_MODULE, + .bus = &tegra_ivc_bus_type, + .name = "tegra-ivc-cdev", + .of_match_table = camchar_of_match, + }, + .dev_type = &tegra_ivc_channel_type, + .ops.channel = &tegra_ivc_channel_chardev_ops, +}; + +tegra_ivc_subsys_driver(camchar_driver, tegra_camchar_init, tegra_camchar_exit); +MODULE_AUTHOR("Jan Solanti "); +MODULE_DESCRIPTION("The character device for ivc-bus"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/platform/tegra/rtcpu/camera-diagnostics.c b/drivers/platform/tegra/rtcpu/camera-diagnostics.c new file mode 100644 index 00000000..8b6dd7d9 --- /dev/null +++ b/drivers/platform/tegra/rtcpu/camera-diagnostics.c @@ -0,0 +1,50 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static int tegra_camera_diagnostics_probe(struct tegra_ivc_channel *ch) +{ + (void)ch; + return 0; +} + +static void tegra_camera_diagnostics_remove(struct tegra_ivc_channel *ch) +{ + (void)ch; +} + +static const struct tegra_ivc_channel_ops +tegra_camera_diagnostics_channel_ops = { + .probe = tegra_camera_diagnostics_probe, + .remove = tegra_camera_diagnostics_remove, +}; + +static const struct of_device_id camera_diagnostics_of_match[] = { + { .compatible = "nvidia,tegra186-camera-diagnostics", }, + { }, +}; + +static struct tegra_ivc_driver camera_diagnostics_driver = { + .driver = { + .owner = THIS_MODULE, + .bus = &tegra_ivc_bus_type, + .name = "tegra-camera-diagnostics", + .of_match_table = camera_diagnostics_of_match, + }, + .dev_type = &tegra_ivc_channel_type, + .ops.channel = &tegra_camera_diagnostics_channel_ops, +}; + +tegra_ivc_subsys_driver_default(camera_diagnostics_driver); +MODULE_AUTHOR("Pekka Pessi "); +MODULE_DESCRIPTION("Dummy device driver for Camera Diagnostics IVC Channel"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/platform/tegra/rtcpu/capture-ivc-priv.h b/drivers/platform/tegra/rtcpu/capture-ivc-priv.h new file mode 100644 index 00000000..c5557e22 --- /dev/null +++ b/drivers/platform/tegra/rtcpu/capture-ivc-priv.h @@ -0,0 +1,132 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + */ + +#ifndef __CAPTURE_IVC_PRIV_H__ +#define __CAPTURE_IVC_PRIV_H__ + +/** Total number of capture channels (2 * VI + ISP). T234 has two VI units */ +#define NUM_CAPTURE_CHANNELS 88 + +/** Temporary ids for the clients whose channel-id is not yet allocated */ +#define NUM_CAPTURE_TRANSACTION_IDS NUM_CAPTURE_CHANNELS + +/** Total number of channels including Temporary IDs */ +#define TOTAL_CHANNELS (NUM_CAPTURE_CHANNELS + NUM_CAPTURE_TRANSACTION_IDS) +#define TRANS_ID_START_IDX NUM_CAPTURE_CHANNELS + +/** + * @brief Callback context of an IVC channel. + */ +struct tegra_capture_ivc_cb_ctx { + /** Linked list of callback contexts */ + struct list_head node; + /** Callback function registered by client */ + tegra_capture_ivc_cb_func cb_func; + /** Private context of a VI/ISP capture context */ + const void *priv_context; +}; + +/** + * @brief IVC channel context. + */ +struct tegra_capture_ivc { + /** Pointer to IVC channel */ + struct tegra_ivc_channel *chan; + /** Callback context lock */ + struct mutex cb_ctx_lock; + /** Channel write lock */ + struct mutex ivc_wr_lock; + /** Deferred work */ + struct work_struct work; + /** Channel work queue head */ + wait_queue_head_t write_q; + /** Array holding callbacks registered by each channel */ + struct tegra_capture_ivc_cb_ctx cb_ctx[TOTAL_CHANNELS]; + /** spinlock protecting access to linked list */ + spinlock_t avl_ctx_list_lock; + /** Linked list holding callback contexts */ + struct list_head avl_ctx_list; +}; + +/** + * @brief Standard message header for all capture IVC messages. + */ +struct tegra_capture_ivc_msg_header { + /** Message identifier. */ + uint32_t msg_id; + union { + /** Channel identifier. */ + uint32_t channel_id; + /** Transaction id */ + uint32_t transaction; + }; +} __aligned(8); + +/** + * @brief Response of IVC msg + */ +struct tegra_capture_ivc_resp { + /** IVC msg header. See @ref tegra_capture_ivc_msg_header */ + struct tegra_capture_ivc_msg_header header; +}; + +/** Pointer holding the Control IVC channel context, created during probe call*/ +static struct tegra_capture_ivc *__scivc_control; + +/** Pointer holding the Capture IVC channel context, created during probe call*/ +static struct tegra_capture_ivc *__scivc_capture; + +/** + * @brief Worker thread to handle the asynchronous msgs on the IVC channel. + This will further calls callbacks registered by Channel drivers. + * + * @param[in] work work_struct pointer + */ +static void tegra_capture_ivc_worker( + struct work_struct *work); + +/** + * @brief Implementation of IVC notify operation which gets called when we any + * new message on the bus for the channel. This signals the worker thread. + * + * @param[in] chan tegra_ivc_channel channel pointer + */ +static void tegra_capture_ivc_notify( + struct tegra_ivc_channel *chan); + +/** + * @brief Implementation of probe operation which gets called during boot + * + * @param[in,out] chan tegra_ivc_channel channel pointer + * + * @returns 0 (success), neg. errno (failure) + */ +static int tegra_capture_ivc_probe( + struct tegra_ivc_channel *chan); + +/** + * @brief Implementation of remove operation + * + * @param[in] chan tegra_ivc_channel channel pointer + */ +static void tegra_capture_ivc_remove( + struct tegra_ivc_channel *chan); + +/** + * @brief Function to transmit the IVC msg after checking if it can write, + * using Tegra IVC core library APIs. + * + * @param[in] civc IVC channel on which the msg needs to be transmitted. + * @param[in] req IVC msg blob. + * @param[in] len IVC msg length. + * + * @returns 0 (success), neg. errno (failure) + */ +static int tegra_capture_ivc_tx( + struct tegra_capture_ivc *civc, + const void *req, + size_t len); + +#endif /* __CAPTURE_IVC_PRIV_H__ */ diff --git a/drivers/platform/tegra/rtcpu/capture-ivc.c b/drivers/platform/tegra/rtcpu/capture-ivc.c new file mode 100644 index 00000000..8914400b --- /dev/null +++ b/drivers/platform/tegra/rtcpu/capture-ivc.c @@ -0,0 +1,510 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +#include "capture-ivc-priv.h" + +static int tegra_capture_ivc_tx_(struct tegra_capture_ivc *civc, + const void *req, size_t len) +{ + struct tegra_ivc_channel *chan; + int ret; + + if (civc == NULL) + return -ENODEV; + + chan = civc->chan; + if (WARN_ON(!chan->is_ready)) + return -EIO; + + ret = mutex_lock_interruptible(&civc->ivc_wr_lock); + if (unlikely(ret == -EINTR)) + return -ERESTARTSYS; + if (unlikely(ret)) + return ret; + + ret = wait_event_interruptible(civc->write_q, + tegra_ivc_can_write(&chan->ivc)); + if (likely(ret == 0)) + ret = tegra_ivc_write(&chan->ivc, req, len); + + mutex_unlock(&civc->ivc_wr_lock); + + if (unlikely(ret < 0)) + dev_err(&chan->dev, "tegra_ivc_write: error %d\n", ret); + + return ret; +} + +static int tegra_capture_ivc_tx(struct tegra_capture_ivc *civc, + const void *req, size_t len) +{ + int ret; + struct tegra_capture_ivc_msg_header hdr; + size_t hdrlen = sizeof(hdr); + char const *ch_name = "NULL"; + + if (civc && civc->chan) + ch_name = dev_name(&civc->chan->dev); + + if (len < hdrlen) { + memset(&hdr, 0, hdrlen); + memcpy(&hdr, req, len); + } else { + memcpy(&hdr, req, hdrlen); + } + + ret = tegra_capture_ivc_tx_(civc, req, len); + + if (ret < 0) + trace_capture_ivc_send_error(ch_name, hdr.msg_id, hdr.channel_id, ret); + else + trace_capture_ivc_send(ch_name, hdr.msg_id, hdr.channel_id); + + return ret; +} + +int tegra_capture_ivc_control_submit(const void *control_desc, size_t len) +{ + WARN_ON(__scivc_control == NULL); + return tegra_capture_ivc_tx(__scivc_control, control_desc, len); +} +EXPORT_SYMBOL(tegra_capture_ivc_control_submit); + +int tegra_capture_ivc_capture_submit(const void *capture_desc, size_t len) +{ + WARN_ON(__scivc_capture == NULL); + return tegra_capture_ivc_tx(__scivc_capture, capture_desc, len); +} +EXPORT_SYMBOL(tegra_capture_ivc_capture_submit); + +int tegra_capture_ivc_register_control_cb( + tegra_capture_ivc_cb_func control_resp_cb, + uint32_t *trans_id, const void *priv_context) +{ + struct tegra_capture_ivc *civc; + struct tegra_capture_ivc_cb_ctx *cb_ctx; + size_t ctx_id; + int ret; + + /* Check if inputs are valid */ + if (WARN(control_resp_cb == NULL, "callback function is NULL")) + return -EINVAL; + if (WARN(trans_id == NULL, "return value trans_id is NULL")) + return -EINVAL; + if (WARN_ON(!__scivc_control)) + return -ENODEV; + + civc = __scivc_control; + + ret = tegra_ivc_channel_runtime_get(civc->chan); + if (unlikely(ret < 0)) + return ret; + + spin_lock(&civc->avl_ctx_list_lock); + if (unlikely(list_empty(&civc->avl_ctx_list))) { + spin_unlock(&civc->avl_ctx_list_lock); + ret = -EAGAIN; + goto fail; + } + + + cb_ctx = list_first_entry(&civc->avl_ctx_list, + struct tegra_capture_ivc_cb_ctx, node); + + list_del(&cb_ctx->node); + spin_unlock(&civc->avl_ctx_list_lock); + + ctx_id = cb_ctx - &civc->cb_ctx[0]; + + if (WARN(ctx_id < TRANS_ID_START_IDX || + ctx_id >= ARRAY_SIZE(civc->cb_ctx), + "invalid cb_ctx %zu", ctx_id)) { + ret = -EIO; + goto fail; + } + + mutex_lock(&civc->cb_ctx_lock); + + if (WARN(cb_ctx->cb_func != NULL, "cb_ctx is busy")) { + ret = -EIO; + goto locked_fail; + } + + *trans_id = (uint32_t)ctx_id; + cb_ctx->cb_func = control_resp_cb; + cb_ctx->priv_context = priv_context; + + mutex_unlock(&civc->cb_ctx_lock); + + return 0; + +locked_fail: + mutex_unlock(&civc->cb_ctx_lock); +fail: + tegra_ivc_channel_runtime_put(civc->chan); + return ret; +} +EXPORT_SYMBOL(tegra_capture_ivc_register_control_cb); + +int tegra_capture_ivc_notify_chan_id(uint32_t chan_id, uint32_t trans_id) +{ + struct tegra_capture_ivc *civc; + + if (WARN(chan_id >= NUM_CAPTURE_CHANNELS, "invalid chan_id")) + return -EINVAL; + if (WARN(trans_id < TRANS_ID_START_IDX || + trans_id >= TOTAL_CHANNELS, "invalid trans_id")) + return -EINVAL; + if (WARN_ON(!__scivc_control)) + return -ENODEV; + + chan_id = array_index_nospec(chan_id, NUM_CAPTURE_CHANNELS); + trans_id = array_index_nospec(trans_id, TOTAL_CHANNELS); + + civc = __scivc_control; + + mutex_lock(&civc->cb_ctx_lock); + + if (WARN(civc->cb_ctx[trans_id].cb_func == NULL, + "transaction context at %u is idle", trans_id)) { + mutex_unlock(&civc->cb_ctx_lock); + return -EBADF; + } + + if (WARN(civc->cb_ctx[chan_id].cb_func != NULL, + "channel context at %u is busy", chan_id)) { + mutex_unlock(&civc->cb_ctx_lock); + return -EBUSY; + } + + /* Update cb_ctx index */ + civc->cb_ctx[chan_id].cb_func = civc->cb_ctx[trans_id].cb_func; + civc->cb_ctx[chan_id].priv_context = + civc->cb_ctx[trans_id].priv_context; + + /* Reset trans_id cb_ctx fields */ + civc->cb_ctx[trans_id].cb_func = NULL; + civc->cb_ctx[trans_id].priv_context = NULL; + + mutex_unlock(&civc->cb_ctx_lock); + + spin_lock(&civc->avl_ctx_list_lock); + list_add_tail(&civc->cb_ctx[trans_id].node, &civc->avl_ctx_list); + spin_unlock(&civc->avl_ctx_list_lock); + + return 0; +} +EXPORT_SYMBOL(tegra_capture_ivc_notify_chan_id); + +int tegra_capture_ivc_register_capture_cb( + tegra_capture_ivc_cb_func capture_status_ind_cb, + uint32_t chan_id, const void *priv_context) +{ + struct tegra_capture_ivc *civc; + int ret; + + if (WARN(capture_status_ind_cb == NULL, "callback function is NULL")) + return -EINVAL; + + if (WARN(chan_id >= NUM_CAPTURE_CHANNELS, + "invalid channel id %u", chan_id)) + return -EINVAL; + chan_id = array_index_nospec(chan_id, NUM_CAPTURE_CHANNELS); + + if (!__scivc_capture) + return -ENODEV; + + civc = __scivc_capture; + + ret = tegra_ivc_channel_runtime_get(civc->chan); + if (ret < 0) + return ret; + + mutex_lock(&civc->cb_ctx_lock); + + if (WARN(civc->cb_ctx[chan_id].cb_func != NULL, + "capture channel %u is busy", chan_id)) { + ret = -EBUSY; + goto fail; + } + + civc->cb_ctx[chan_id].cb_func = capture_status_ind_cb; + civc->cb_ctx[chan_id].priv_context = priv_context; + mutex_unlock(&civc->cb_ctx_lock); + + return 0; +fail: + mutex_unlock(&civc->cb_ctx_lock); + tegra_ivc_channel_runtime_put(civc->chan); + + return ret; +} +EXPORT_SYMBOL(tegra_capture_ivc_register_capture_cb); + +int tegra_capture_ivc_unregister_control_cb(uint32_t id) +{ + struct tegra_capture_ivc *civc; + + /* id could be temporary trans_id or rtcpu-allocated chan_id */ + if (WARN(id >= TOTAL_CHANNELS, "invalid id %u", id)) + return -EINVAL; + if (WARN_ON(!__scivc_control)) + return -ENODEV; + + id = array_index_nospec(id, TOTAL_CHANNELS); + + civc = __scivc_control; + + mutex_lock(&civc->cb_ctx_lock); + + if (WARN(civc->cb_ctx[id].cb_func == NULL, + "control channel %u is idle", id)) { + mutex_unlock(&civc->cb_ctx_lock); + return -EBADF; + } + + civc->cb_ctx[id].cb_func = NULL; + civc->cb_ctx[id].priv_context = NULL; + + mutex_unlock(&civc->cb_ctx_lock); + + /* + * If it's trans_id, client encountered an error before or during + * chan_id update, in that case the corresponding cb_ctx + * needs to be added back in the avilable cb_ctx list. + */ + if (id >= TRANS_ID_START_IDX) { + spin_lock(&civc->avl_ctx_list_lock); + list_add_tail(&civc->cb_ctx[id].node, &civc->avl_ctx_list); + spin_unlock(&civc->avl_ctx_list_lock); + } + + tegra_ivc_channel_runtime_put(civc->chan); + + return 0; +} +EXPORT_SYMBOL(tegra_capture_ivc_unregister_control_cb); + +int tegra_capture_ivc_unregister_capture_cb(uint32_t chan_id) +{ + struct tegra_capture_ivc *civc; + + if (chan_id >= NUM_CAPTURE_CHANNELS) + return -EINVAL; + + if (!__scivc_capture) + return -ENODEV; + + chan_id = array_index_nospec(chan_id, NUM_CAPTURE_CHANNELS); + + civc = __scivc_capture; + + mutex_lock(&civc->cb_ctx_lock); + + if (WARN(civc->cb_ctx[chan_id].cb_func == NULL, + "capture channel %u is idle", chan_id)) { + mutex_unlock(&civc->cb_ctx_lock); + return -EBADF; + } + + civc->cb_ctx[chan_id].cb_func = NULL; + civc->cb_ctx[chan_id].priv_context = NULL; + + mutex_unlock(&civc->cb_ctx_lock); + + tegra_ivc_channel_runtime_put(civc->chan); + + return 0; +} +EXPORT_SYMBOL(tegra_capture_ivc_unregister_capture_cb); + +static inline void tegra_capture_ivc_recv_msg( + struct tegra_capture_ivc *civc, + uint32_t id, + const void *msg) +{ + struct device *dev = &civc->chan->dev; + + /* Check if callback function available */ + if (unlikely(!civc->cb_ctx[id].cb_func)) { + dev_dbg(dev, "No callback for id %u\n", id); + } else { + /* Invoke client callback. */ + civc->cb_ctx[id].cb_func(msg, civc->cb_ctx[id].priv_context); + } +} + +static inline void tegra_capture_ivc_recv(struct tegra_capture_ivc *civc) +{ + struct ivc *ivc = &civc->chan->ivc; + struct device *dev = &civc->chan->dev; + + while (tegra_ivc_can_read(ivc)) { + const void *msg = tegra_ivc_read_get_next_frame(ivc); + const struct tegra_capture_ivc_msg_header *hdr = msg; + uint32_t id = hdr->channel_id; + + trace_capture_ivc_recv(dev_name(dev), hdr->msg_id, id); + + /* Check if message is valid */ + if (id < TOTAL_CHANNELS) { + id = array_index_nospec(id, TOTAL_CHANNELS); + tegra_capture_ivc_recv_msg(civc, id, msg); + } else { + dev_WARN(dev, "Invalid rtcpu channel id %u", id); + } + + tegra_ivc_read_advance(ivc); + } +} + +static void tegra_capture_ivc_worker(struct work_struct *work) +{ + struct tegra_capture_ivc *civc; + struct tegra_ivc_channel *chan; + + civc = container_of(work, struct tegra_capture_ivc, work); + chan = civc->chan; + + /* + * Do not process IVC events if worker gets woken up while + * this channel is suspended. There is a Christmas tree + * notify when RCE resumes and IVC bus gets set up. + */ + if (pm_runtime_get_if_in_use(&chan->dev) > 0) { + WARN_ON(!chan->is_ready); + + tegra_capture_ivc_recv(civc); + + pm_runtime_put(&chan->dev); + } else { + dev_dbg(&chan->dev, "extra wakeup"); + } +} + +static void tegra_capture_ivc_notify(struct tegra_ivc_channel *chan) +{ + struct tegra_capture_ivc *civc = tegra_ivc_channel_get_drvdata(chan); + + trace_capture_ivc_notify(dev_name(&chan->dev)); + + /* Only 1 thread can wait on write_q, rest wait for write_lock */ + wake_up(&civc->write_q); + schedule_work(&civc->work); +} + +#define NV(x) "nvidia," #x + +static int tegra_capture_ivc_probe(struct tegra_ivc_channel *chan) +{ + struct device *dev = &chan->dev; + struct tegra_capture_ivc *civc; + const char *service; + int ret; + uint32_t i; + + civc = devm_kzalloc(dev, (sizeof(*civc)), GFP_KERNEL); + if (unlikely(civc == NULL)) + return -ENOMEM; + + ret = of_property_read_string(dev->of_node, NV(service), + &service); + if (unlikely(ret)) { + dev_err(dev, "missing <%s> property\n", NV(service)); + return ret; + } + + civc->chan = chan; + + mutex_init(&civc->cb_ctx_lock); + mutex_init(&civc->ivc_wr_lock); + + /* Initialize ivc_work */ + INIT_WORK(&civc->work, tegra_capture_ivc_worker); + + /* Initialize wait queue */ + init_waitqueue_head(&civc->write_q); + + /* transaction-id list of available callback contexts */ + spin_lock_init(&civc->avl_ctx_list_lock); + INIT_LIST_HEAD(&civc->avl_ctx_list); + + /* Add the transaction cb-contexts to the available list */ + for (i = TRANS_ID_START_IDX; i < ARRAY_SIZE(civc->cb_ctx); i++) + list_add_tail(&civc->cb_ctx[i].node, &civc->avl_ctx_list); + + tegra_ivc_channel_set_drvdata(chan, civc); + + if (!strcmp("capture-control", service)) { + if (WARN_ON(__scivc_control != NULL)) + return -EEXIST; + __scivc_control = civc; + } else if (!strcmp("capture", service)) { + if (WARN_ON(__scivc_capture != NULL)) + return -EEXIST; + __scivc_capture = civc; + } else { + dev_err(dev, "Unknown ivc channel %s\n", service); + return -EINVAL; + } + + return 0; +} + +static void tegra_capture_ivc_remove(struct tegra_ivc_channel *chan) +{ + struct tegra_capture_ivc *civc = tegra_ivc_channel_get_drvdata(chan); + + cancel_work_sync(&civc->work); + + if (__scivc_control == civc) + __scivc_control = NULL; + else if (__scivc_capture == civc) + __scivc_capture = NULL; + else + dev_warn(&chan->dev, "Unknown ivc channel\n"); +} + +static struct of_device_id tegra_capture_ivc_channel_of_match[] = { + { .compatible = "nvidia,tegra186-camera-ivc-protocol-capture-control" }, + { .compatible = "nvidia,tegra186-camera-ivc-protocol-capture" }, + { }, +}; + +static const struct tegra_ivc_channel_ops tegra_capture_ivc_ops = { + .probe = tegra_capture_ivc_probe, + .remove = tegra_capture_ivc_remove, + .notify = tegra_capture_ivc_notify, +}; + +static struct tegra_ivc_driver tegra_capture_ivc_driver = { + .driver = { + .name = "tegra-capture-ivc", + .bus = &tegra_ivc_bus_type, + .owner = THIS_MODULE, + .of_match_table = tegra_capture_ivc_channel_of_match, + }, + .dev_type = &tegra_ivc_channel_type, + .ops.channel = &tegra_capture_ivc_ops, +}; + +tegra_ivc_subsys_driver_default(tegra_capture_ivc_driver); +MODULE_AUTHOR("Sudhir Vyas "); +MODULE_DESCRIPTION("NVIDIA Tegra Capture IVC driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/platform/tegra/rtcpu/clk-group.c b/drivers/platform/tegra/rtcpu/clk-group.c new file mode 100644 index 00000000..dbff0448 --- /dev/null +++ b/drivers/platform/tegra/rtcpu/clk-group.c @@ -0,0 +1,246 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +#include "clk-group.h" + +#include +#include +#include +#include +#include + +struct camrtc_clk_group { + struct device *device; + int nclocks; + struct clk **clocks; + struct { + struct clk *slow; + struct clk *fast; + } parents; + struct { + u32 slow; + u32 fast; + } *rates; +}; + +static void camrtc_clk_group_release(struct device *dev, void *res) +{ + const struct camrtc_clk_group *grp = res; + int i; + + for (i = 0; i < grp->nclocks; i++) { + if (grp->clocks[i]) + clk_put(grp->clocks[i]); + } + + if (grp->parents.slow) + clk_put(grp->parents.slow); + if (grp->parents.fast) + clk_put(grp->parents.fast); +} + +static int camrtc_clk_group_get_parent( + struct device_node *np, + int index, + struct clk **return_clk) +{ + struct of_phandle_args clkspec; + struct clk *clk; + int ret; + + if (index < 0) + return -EINVAL; + + ret = of_parse_phandle_with_args(np, + "nvidia,clock-parents", "#clock-cells", index, + &clkspec); + if (ret < 0) + return ret; + + clk = of_clk_get_from_provider(&clkspec); + of_node_put(clkspec.np); + + if (IS_ERR(clk)) + return PTR_ERR(clk); + + *return_clk = clk; + + return 0; +} + +struct camrtc_clk_group *camrtc_clk_group_get( + struct device *dev) +{ + struct camrtc_clk_group *grp; + struct device_node *np; + int nclocks; + int nrates; + int nparents; + int index; + int ret; + + if (!dev || !dev->of_node) + return ERR_PTR(-EINVAL); + + np = dev->of_node; + + nclocks = of_property_count_strings(np, "clock-names"); + if (nclocks < 0) + return ERR_PTR(-ENOENT); + + /* This has pairs of u32s: slow and fast rate for each clock */ + nrates = of_property_count_u64_elems(np, "nvidia,clock-rates"); + + nparents = of_count_phandle_with_args(np, "nvidia,clock-parents", + "#clock-cells"); + if (nparents > 0 && nparents != 2) + dev_warn(dev, "expecting exactly two \"%s\"\n", + "nvidia,clock-parents"); + + grp = devres_alloc(camrtc_clk_group_release, + sizeof(*grp) + + nclocks * sizeof(grp->clocks[0]) + + nclocks * sizeof(grp->rates[0]), + GFP_KERNEL); + if (!grp) + return ERR_PTR(-ENOMEM); + + grp->nclocks = nclocks; + grp->device = dev; + grp->clocks = (struct clk **)(grp + 1); + grp->rates = (void *)(grp->clocks + nclocks); + + for (index = 0; index < grp->nclocks; index++) { + struct clk *clk; + + clk = of_clk_get(np, index); + if (IS_ERR(clk)) { + ret = PTR_ERR(clk); + goto error; + } + + grp->clocks[index] = clk; + + if (index >= nrates) + continue; + + if (of_property_read_u32_index(np, "nvidia,clock-rates", + 2 * index, &grp->rates[index].slow)) + dev_warn(dev, "clock-rates property not found\n"); + if (of_property_read_u32_index(np, "nvidia,clock-rates", + 2 * index + 1, &grp->rates[index].fast)) + dev_warn(dev, "clock-rates property not found\n"); + } + + if (nparents == 2) { + ret = camrtc_clk_group_get_parent(np, 0, &grp->parents.slow); + if (ret < 0) + goto error; + + ret = camrtc_clk_group_get_parent(np, 1, &grp->parents.fast); + if (ret < 0) + goto error; + } + + devres_add(dev, grp); + return grp; + +error: + devres_free(grp); + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(camrtc_clk_group_get); + +static void camrtc_clk_group_error( + const struct camrtc_clk_group *grp, + char const *op, + int index, + int error) +{ + const char *name = "unnamed"; + + of_property_read_string_index(grp->device->of_node, + "clock-names", index, &name); + dev_warn(grp->device, "%s clk %s (at [%d]): failed (%d)\n", + op, name, index, error); +} + +int camrtc_clk_group_enable(const struct camrtc_clk_group *grp) +{ + int index, err; + + if (IS_ERR_OR_NULL(grp)) + return -ENODEV; + + for (index = 0; index < grp->nclocks; index++) { + err = clk_prepare_enable(grp->clocks[index]); + if (err) { + camrtc_clk_group_error(grp, "enable", index, err); + return err; + } + } + + return 0; +} +EXPORT_SYMBOL_GPL(camrtc_clk_group_enable); + +void camrtc_clk_group_disable(const struct camrtc_clk_group *grp) +{ + int index; + + if (IS_ERR_OR_NULL(grp)) + return; + + for (index = 0; index < grp->nclocks; index++) + clk_disable_unprepare(grp->clocks[index]); +} +EXPORT_SYMBOL_GPL(camrtc_clk_group_disable); + +int camrtc_clk_group_adjust_slow(const struct camrtc_clk_group *grp) +{ + int index; + + if (IS_ERR_OR_NULL(grp)) + return -ENODEV; + + for (index = 0; index < grp->nclocks; index++) { + u32 slow = grp->rates[index].slow; + + if (slow != 0) + clk_set_rate(grp->clocks[index], slow); + } + + if (grp->parents.slow != NULL) { + for (index = 0; index < grp->nclocks; index++) + clk_set_parent(grp->clocks[index], + grp->parents.slow); + } + + return 0; +} +EXPORT_SYMBOL_GPL(camrtc_clk_group_adjust_slow); + +int camrtc_clk_group_adjust_fast(const struct camrtc_clk_group *grp) +{ + int index; + + if (IS_ERR_OR_NULL(grp)) + return -ENODEV; + + if (grp->parents.fast != NULL) { + for (index = 0; index < grp->nclocks; index++) + clk_set_parent(grp->clocks[index], + grp->parents.fast); + } + + for (index = 0; index < grp->nclocks; index++) { + u32 fast = grp->rates[index].fast; + + if (fast != 0) + clk_set_rate(grp->clocks[index], fast); + } + + return 0; +} +EXPORT_SYMBOL_GPL(camrtc_clk_group_adjust_fast); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/platform/tegra/rtcpu/clk-group.h b/drivers/platform/tegra/rtcpu/clk-group.h new file mode 100644 index 00000000..37ed292a --- /dev/null +++ b/drivers/platform/tegra/rtcpu/clk-group.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + */ + +#ifndef INCLUDE_CLK_GROUP_H +#define INCLUDE_CLK_GROUP_H + +struct device; +struct camrtc_clk_group; + +struct camrtc_clk_group *camrtc_clk_group_get(struct device *dev); + +int camrtc_clk_group_enable(const struct camrtc_clk_group *grp); +void camrtc_clk_group_disable(const struct camrtc_clk_group *grp); + +int camrtc_clk_group_adjust_fast(const struct camrtc_clk_group *grp); +int camrtc_clk_group_adjust_slow(const struct camrtc_clk_group *grp); + +#endif /* INCLUDE_CLK_GROUP_H */ diff --git a/drivers/platform/tegra/rtcpu/debug.c b/drivers/platform/tegra/rtcpu/debug.c new file mode 100644 index 00000000..a2614635 --- /dev/null +++ b/drivers/platform/tegra/rtcpu/debug.c @@ -0,0 +1,1997 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +#include "soc/tegra/camrtc-dbg-messages.h" + +#include +#if IS_ENABLED(CONFIG_INTERCONNECT) +#include +#include +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if IS_ENABLED(CONFIG_TEGRA_BWMGR) +#include +#endif +#include + +#include + +#define CAMRTC_TEST_CAM_DEVICES 4 + +struct camrtc_test_device { + /* device handle */ + struct device *dev; + /* device iova for the memory in context */ + dma_addr_t dev_iova; +}; + +struct camrtc_test_mem { + /* access id in memory array */ + u32 index; + /* occupied memory size */ + size_t used; + /* total size */ + size_t size; + /* CPU address */ + void *ptr; + /* Physical base address, offsets valid for first page only */ + phys_addr_t phys_addr; + /* base iova for device used for allocation */ + dma_addr_t iova; + /* device index */ + u32 dev_index; + /* metadata for all the devices using this memory */ + struct camrtc_test_device devices[CAMRTC_TEST_CAM_DEVICES]; +}; + +struct camrtc_falcon_coverage { + u8 id; + bool enabled; + struct camrtc_test_mem mem; + struct sg_table sgt; + u64 falc_iova; + struct tegra_ivc_channel *ch; + struct device *mem_dev; + struct device *falcon_dev; +}; + +struct camrtc_debug { + struct tegra_ivc_channel *channel; + struct mutex mutex; + struct dentry *root; + wait_queue_head_t waitq; + struct { + u32 completion_timeout; + u32 mods_case; + u32 mods_loops; + u32 mods_dma_channels; + char *test_case; + size_t test_case_size; + u32 test_timeout; + u32 test_bw; + } parameters; + struct camrtc_falcon_coverage vi_falc_coverage; + struct camrtc_falcon_coverage isp_falc_coverage; + + struct camrtc_test_mem mem[CAMRTC_DBG_NUM_MEM_TEST_MEM]; + struct device *mem_devices[CAMRTC_TEST_CAM_DEVICES]; + struct ast_regset { + struct debugfs_regset32 common, region[8]; + } ast_regsets[2]; +}; + +#define NV(x) "nvidia," #x +#define FALCON_COVERAGE_MEM_SIZE (1024 * 128) /* 128kB */ + +struct camrtc_dbgfs_rmem { + /* reserved memory base address */ + phys_addr_t base_address; + /* reserved memory size */ + unsigned long total_size; + /* if reserved memory enabled */ + bool enabled; + /* memory contexts */ + struct camrtc_rmem_ctx { + phys_addr_t address; + unsigned long size; + } mem_ctxs[CAMRTC_DBG_NUM_MEM_TEST_MEM]; +}; + +static struct camrtc_dbgfs_rmem _camdbg_rmem; + +static int __init camrtc_dbgfs_rmem_init(struct reserved_mem *rmem) +{ + int i; + phys_addr_t curr_address = rmem->base; + unsigned long ctx_size = rmem->size/CAMRTC_DBG_NUM_MEM_TEST_MEM; + + _camdbg_rmem.base_address = rmem->base; + _camdbg_rmem.total_size = rmem->size; + + for (i = 0; i < CAMRTC_DBG_NUM_MEM_TEST_MEM; i++) { + _camdbg_rmem.mem_ctxs[i].address = curr_address; + _camdbg_rmem.mem_ctxs[i].size = ctx_size; + curr_address += ctx_size; + } + + _camdbg_rmem.enabled = true; + + return 0; +} + +RESERVEDMEM_OF_DECLARE(tegra_cam_rtcpu, + "nvidia,camdbg_carveout", camrtc_dbgfs_rmem_init); + +/* Get a camera-rtcpu device */ +static struct device *camrtc_get_device(struct tegra_ivc_channel *ch) +{ + if (unlikely(ch == NULL)) + return NULL; + + BUG_ON(ch->dev.parent == NULL); + BUG_ON(ch->dev.parent->parent == NULL); + + return ch->dev.parent->parent; +} + +#define INIT_OPEN_FOPS(_open) { \ + .open = _open, \ + .read = seq_read, \ + .llseek = seq_lseek, \ + .release = single_release \ +} + +#define DEFINE_SEQ_FOPS(_fops_, _show_) \ +static int _fops_ ## _open(struct inode *inode, struct file *file) \ +{ \ + return single_open(file, _show_, inode->i_private); \ +} \ +static const struct file_operations _fops_ = INIT_OPEN_FOPS(_fops_ ## _open) + +static int camrtc_show_version(struct seq_file *file, void *data) +{ + struct tegra_ivc_channel *ch = file->private; + struct device *rce_dev = camrtc_get_device(ch); + char version[TEGRA_CAMRTC_VERSION_LEN]; + + tegra_camrtc_print_version(rce_dev, version, sizeof(version)); + + seq_puts(file, version); + seq_puts(file, "\n"); + + return 0; +} + +DEFINE_SEQ_FOPS(camrtc_dbgfs_fops_version, camrtc_show_version); + +static int camrtc_show_reboot(struct seq_file *file, void *data) +{ + struct tegra_ivc_channel *ch = file->private; + struct device *rce_dev = camrtc_get_device(ch); + int ret = 0; + + /* Make rtcpu online */ + ret = tegra_ivc_channel_runtime_get(ch); + if (ret < 0) + goto error; + + ret = tegra_camrtc_reboot(rce_dev); + if (ret) + goto error; + + seq_puts(file, "0\n"); + +error: + tegra_ivc_channel_runtime_put(ch); + return ret; +} + +DEFINE_SEQ_FOPS(camrtc_dbgfs_fops_reboot, camrtc_show_reboot); + +static void camrtc_debug_notify(struct tegra_ivc_channel *ch) +{ + struct camrtc_debug *crd = tegra_ivc_channel_get_drvdata(ch); + + wake_up_all(&crd->waitq); +} + +static int camrtc_show_forced_reset_restore(struct seq_file *file, void *data) +{ + struct tegra_ivc_channel *ch = file->private; + struct device *rce_dev = camrtc_get_device(ch); + int ret = 0; + + /* Make rtcpu online */ + ret = tegra_ivc_channel_runtime_get(ch); + if (ret < 0) + goto error; + + ret = tegra_camrtc_restore(rce_dev); + if (ret) + goto error; + + seq_puts(file, "0\n"); + +error: + tegra_ivc_channel_runtime_put(ch); + return ret; +} + +DEFINE_SEQ_FOPS(camrtc_dbgfs_fops_forced_reset_restore, + camrtc_show_forced_reset_restore); + +static int camrtc_ivc_dbg_full_frame_xact( + struct tegra_ivc_channel *ch, + struct camrtc_dbg_request *req, + size_t req_size, + struct camrtc_dbg_response *resp, + size_t resp_size, + long timeout) +{ + struct camrtc_debug *crd = tegra_ivc_channel_get_drvdata(ch); + int ret; + + if (req == NULL || resp == NULL) + return -ENOMEM; + + if (timeout == 0) + timeout = crd->parameters.completion_timeout; + + timeout = msecs_to_jiffies(timeout); + + ret = mutex_lock_interruptible(&crd->mutex); + if (ret) + return ret; + + ret = tegra_ivc_channel_runtime_get(ch); + if (ret < 0) + goto unlock; + + if ((!tegra_ivc_channel_online_check(ch))) { + ret = -ECONNRESET; + goto out; + } + + while (tegra_ivc_can_read(&ch->ivc)) { + tegra_ivc_read_advance(&ch->ivc); + dev_warn(&ch->dev, "stray response\n"); + } + + timeout = wait_event_interruptible_timeout(crd->waitq, + tegra_ivc_channel_has_been_reset(ch) || + tegra_ivc_can_write(&ch->ivc), timeout); + if (timeout <= 0) { + ret = timeout ?: -ETIMEDOUT; + goto out; + } + if (tegra_ivc_channel_has_been_reset(ch)) { + ret = -ECONNRESET; + goto out; + } + + ret = tegra_ivc_write(&ch->ivc, req, req_size); + if (ret < 0) { + dev_err(&ch->dev, "IVC write error: %d\n", ret); + goto out; + } + + for (;;) { + timeout = wait_event_interruptible_timeout(crd->waitq, + tegra_ivc_channel_has_been_reset(ch) || + tegra_ivc_can_read(&ch->ivc), + timeout); + if (timeout <= 0) { + ret = timeout ?: -ETIMEDOUT; + break; + } + if (tegra_ivc_channel_has_been_reset(ch)) { + ret = -ECONNRESET; + break; + } + + dev_dbg(&ch->dev, "rx msg\n"); + + ret = tegra_ivc_read_peek(&ch->ivc, resp, 0, resp_size); + if (ret < 0) { + dev_err(&ch->dev, "IVC read error: %d\n", ret); + break; + } + + tegra_ivc_read_advance(&ch->ivc); + + if (resp->resp_type == req->req_type) { + ret = 0; + break; + } + + dev_err(&ch->dev, "unexpected response\n"); + } + +out: + tegra_ivc_channel_runtime_put(ch); +unlock: + mutex_unlock(&crd->mutex); + return ret; +} + +static inline int camrtc_ivc_dbg_xact( + struct tegra_ivc_channel *ch, + struct camrtc_dbg_request *req, + struct camrtc_dbg_response *resp, + long timeout) +{ + return camrtc_ivc_dbg_full_frame_xact(ch, req, sizeof(*req), + resp, sizeof(*resp), + timeout); +} + +static int camrtc_show_ping(struct seq_file *file, void *data) +{ + struct tegra_ivc_channel *ch = file->private; + struct camrtc_dbg_request req = { + .req_type = CAMRTC_REQ_PING, + }; + struct camrtc_dbg_response resp; + u64 sent, recv, tsc; + int ret; + + sent = sched_clock(); + req.data.ping_data.ts_req = sent; + + ret = camrtc_ivc_dbg_xact(ch, &req, &resp, 0); + if (ret) + return ret; + + recv = sched_clock(); + tsc = resp.data.ping_data.ts_resp; + seq_printf(file, + "roundtrip=%llu.%03llu us " + "(sent=%llu.%09llu recv=%llu.%09llu)\n", + (recv - sent) / 1000, (recv - sent) % 1000, + sent / 1000000000, sent % 1000000000, + recv / 1000000000, recv % 1000000000); + seq_printf(file, + "rtcpu tsc=%llu.%09llu offset=%llu.%09llu\n", + tsc / (1000000000 / 32), tsc % (1000000000 / 32), + (tsc * 32ULL - sent) / 1000000000, + (tsc * 32ULL - sent) % 1000000000); + seq_printf(file, "%.*s\n", + (int)sizeof(resp.data.ping_data.data), + (char *)resp.data.ping_data.data); + + return 0; +} + +DEFINE_SEQ_FOPS(camrtc_dbgfs_fops_ping, camrtc_show_ping); + +static int camrtc_show_sm_ping(struct seq_file *file, void *data) +{ + struct tegra_ivc_channel *ch = file->private; + struct device *camrtc = camrtc_get_device(ch); + u64 sent, recv; + int err; + + err = tegra_ivc_channel_runtime_get(ch); + if (err < 0) + return err; + + sent = sched_clock(); + + err = tegra_camrtc_ping(camrtc, (uint32_t)sent & 0xffffffU, 0); + if (err < 0) + goto error; + + recv = sched_clock(); + err = 0; + + seq_printf(file, + "roundtrip=%llu.%03llu us " + "(sent=%llu.%09llu recv=%llu.%09llu)\n", + (recv - sent) / 1000, (recv - sent) % 1000, + sent / 1000000000, sent % 1000000000, + recv / 1000000000, recv % 1000000000); + +error: + tegra_ivc_channel_runtime_put(ch); + + return err; +} + +DEFINE_SEQ_FOPS(camrtc_dbgfs_fops_sm_ping, camrtc_show_sm_ping); + +static int camrtc_dbgfs_show_loglevel(void *data, u64 *val) +{ + struct tegra_ivc_channel *ch = data; + struct camrtc_dbg_request req = { + .req_type = CAMRTC_REQ_GET_LOGLEVEL, + }; + struct camrtc_dbg_response resp; + int ret; + + ret = camrtc_ivc_dbg_xact(ch, &req, &resp, 0); + if (ret) + return ret; + + if (resp.status != CAMRTC_STATUS_OK) + return -EPROTO; + + *val = resp.data.log_data.level; + + return 0; +} + +static int camrtc_dbgfs_store_loglevel(void *data, u64 val) +{ + struct tegra_ivc_channel *ch = data; + struct camrtc_dbg_request req = { + .req_type = CAMRTC_REQ_SET_LOGLEVEL, + }; + struct camrtc_dbg_response resp; + int ret; + + if ((u32)val != val) + return -EINVAL; + + req.data.log_data.level = val; + + ret = camrtc_ivc_dbg_xact(ch, &req, &resp, 0); + if (ret) + return ret; + + if (resp.status == CAMRTC_STATUS_INVALID_PARAM) + return -EINVAL; + else if (resp.status != CAMRTC_STATUS_OK) + return -EPROTO; + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(camrtc_dbgfs_fops_loglevel, + camrtc_dbgfs_show_loglevel, + camrtc_dbgfs_store_loglevel, + "%lld\n"); + +static int camrtc_show_mods_result(struct seq_file *file, void *data) +{ + struct tegra_ivc_channel *ch = file->private; + struct camrtc_debug *crd = tegra_ivc_channel_get_drvdata(ch); + struct camrtc_dbg_request req = { + .req_type = CAMRTC_REQ_MODS_TEST, + }; + struct camrtc_dbg_response resp; + int ret; + unsigned long timeout = crd->parameters.completion_timeout; + u32 loops = crd->parameters.mods_loops; + + req.data.mods_data.mods_case = crd->parameters.mods_case; + req.data.mods_data.mods_loops = loops; + req.data.mods_data.mods_dma_channels = crd->parameters.mods_dma_channels; + + ret = camrtc_ivc_dbg_xact(ch, &req, &resp, loops * timeout); + if (ret == 0) + seq_printf(file, "mods=%u\n", resp.status); + + return ret; +} + +DEFINE_SEQ_FOPS(camrtc_dbgfs_fops_mods_result, camrtc_show_mods_result); + +static int camrtc_dbgfs_show_freertos_state(struct seq_file *file, void *data) +{ + struct tegra_ivc_channel *ch = file->private; + struct camrtc_dbg_request req = { + .req_type = CAMRTC_REQ_RTOS_STATE, + }; + struct camrtc_dbg_response resp; + int ret = 0; + + ret = camrtc_ivc_dbg_xact(ch, &req, &resp, 0); + if (ret == 0) { + seq_printf(file, "%.*s", + (int) sizeof(resp.data.rtos_state_data.rtos_state), + resp.data.rtos_state_data.rtos_state); + } + + return ret; +} + +DEFINE_SEQ_FOPS(camrtc_dbgfs_fops_freertos_state, + camrtc_dbgfs_show_freertos_state); + +static int camrtc_dbgfs_show_memstat(struct seq_file *file, void *data) +{ + struct tegra_ivc_channel *ch = file->private; + struct camrtc_dbg_request req = { + .req_type = CAMRTC_REQ_GET_MEM_USAGE, + }; + struct camrtc_dbg_response resp; + int ret = 0; + + ret = camrtc_ivc_dbg_xact(ch, &req, &resp, 0); + if (ret == 0) { + const struct camrtc_dbg_mem_usage *m = &resp.data.mem_usage; + uint32_t total = m->text + m->bss + m->data + m->heap + + m->stack + m->free_mem; + +#define K(x) (((x) + 1023) / 1024) + seq_printf(file, "%7s %7s %7s %7s %7s %7s %7s\n", + "text", "bss", "data", "heap", "sys", "free", "TOTAL"); + seq_printf(file, "%7u\t%7u\t%7u\t%7u\t%7u\t%7u\t%7u\n", + m->text, m->bss, m->data, m->heap, m->stack, m->free_mem, total); + seq_printf(file, "%7u\t%7u\t%7u\t%7u\t%7u\t%7u\t%7u (in kilobytes)\n", + K(m->text), K(m->bss), K(m->data), K(m->heap), + K(m->stack), K(m->free_mem), K(total)); + } + + return ret; +} + +DEFINE_SEQ_FOPS(camrtc_dbgfs_fops_memstat, camrtc_dbgfs_show_memstat); + +static int camrtc_dbgfs_show_irqstat(struct seq_file *file, void *data) +{ + int ret = -ENOMSG; +#ifdef CAMRTC_REQ_GET_IRQ_STAT + struct tegra_ivc_channel *ch = file->private; + struct camrtc_dbg_request req = { + .req_type = CAMRTC_REQ_GET_IRQ_STAT, + }; + void *mem = kzalloc(ch->ivc.frame_size, GFP_KERNEL | __GFP_ZERO); + struct camrtc_dbg_response *resp = mem; + const struct camrtc_dbg_irq_stat *stat = &resp->data.irq_stat; + uint32_t i; + uint32_t max_runtime = 0; + + ret = camrtc_ivc_dbg_full_frame_xact(ch, &req, sizeof(req), + resp, ch->ivc.frame_size, 0); + if (ret != 0) + goto done; + + seq_printf(file, "Irq#\tCount\tRuntime\tMax rt\tName\n"); + + for (i = 0; i < stat->n_irq; i++) { + seq_printf(file, "%u\t%u\t%llu\t%u\t%.*s\n", + stat->irqs[i].irq_num, + stat->irqs[i].num_called, + stat->irqs[i].runtime, + stat->irqs[i].max_runtime, + (int)sizeof(stat->irqs[i].name), stat->irqs[i].name); + + if (max_runtime < stat->irqs[i].max_runtime) + max_runtime = stat->irqs[i].max_runtime; + } + + seq_printf(file, "-\t%llu\t%llu\t%u\t%s\n", stat->total_called, + stat->total_runtime, max_runtime, "total"); + +done: + kfree(mem); +#endif + return ret; +} + +DEFINE_SEQ_FOPS(camrtc_dbgfs_fops_irqstat, camrtc_dbgfs_show_irqstat); + +static size_t camrtc_dbgfs_get_max_test_size( + const struct tegra_ivc_channel *ch) +{ + return ch->ivc.frame_size - offsetof(struct camrtc_dbg_request, + data.run_mem_test_data.data); +} + +static ssize_t camrtc_dbgfs_read_test_case(struct file *file, + char __user *buf, size_t count, loff_t *f_pos) +{ + struct tegra_ivc_channel *ch = file->f_inode->i_private; + struct camrtc_debug *crd = tegra_ivc_channel_get_drvdata(ch); + + return simple_read_from_buffer(buf, count, f_pos, + crd->parameters.test_case, + crd->parameters.test_case_size); +} + +static ssize_t camrtc_dbgfs_write_test_case(struct file *file, + const char __user *buf, size_t count, loff_t *f_pos) +{ + struct tegra_ivc_channel *ch = file->f_inode->i_private; + struct camrtc_debug *crd = tegra_ivc_channel_get_drvdata(ch); + char *test_case = crd->parameters.test_case; + size_t max_size = camrtc_dbgfs_get_max_test_size(ch); + int i; + ssize_t ret; + + ret = simple_write_to_buffer(test_case, max_size, f_pos, buf, count); + + if (ret >= 0) + crd->parameters.test_case_size = *f_pos; + + /* Mark input buffers empty */ + for (i = 0; i < ARRAY_SIZE(crd->mem); i++) + crd->mem[i].used = 0; + + return ret; +} + +static const struct file_operations camrtc_dbgfs_fops_test_case = { + .read = camrtc_dbgfs_read_test_case, + .write = camrtc_dbgfs_write_test_case, +}; + +static struct device *camrtc_dbgfs_memory_dev( + const struct camrtc_debug *crd) +{ + /* + * If VI misses stage-1 SMMU translation, the allocations need + * to be contiguous. Just allocate everything through VI and + * map it to other contexts separately. + */ + if (crd->mem_devices[1] != NULL) + return crd->mem_devices[1]; + else + return crd->mem_devices[0]; +} + +static ssize_t camrtc_dbgfs_read_test_mem(struct file *file, + char __user *buf, size_t count, loff_t *f_pos) +{ + struct camrtc_test_mem *mem = file->f_inode->i_private; + + return simple_read_from_buffer(buf, count, f_pos, mem->ptr, mem->used); +} + +static ssize_t camrtc_dbgfs_write_test_mem(struct file *file, + const char __user *buf, size_t count, loff_t *f_pos) +{ + struct camrtc_test_mem *mem = file->f_inode->i_private; + struct camrtc_debug *crd = container_of( + mem, struct camrtc_debug, mem[mem->index]); + struct device *mem_dev = camrtc_dbgfs_memory_dev(crd); + struct iommu_domain *domain = iommu_get_domain_for_dev(mem_dev); + ssize_t ret; + + if ((*f_pos + count) > mem->size) { + if (_camdbg_rmem.enabled) { + size_t size = round_up(*f_pos + count, 64 * 1024); + void *ptr = phys_to_virt( + _camdbg_rmem.mem_ctxs[mem->index].address); + unsigned long rmem_size = + _camdbg_rmem.mem_ctxs[mem->index].size; + + if (size > rmem_size) { + pr_err("%s: not enough memory\n", __func__); + return -ENOMEM; + } + + if (mem->ptr) + dma_unmap_single(mem_dev, mem->iova, mem->size, + DMA_BIDIRECTIONAL); + + /* same addr, no overwrite concern */ + mem->ptr = ptr; + mem->size = size; + + mem->iova = dma_map_single(mem_dev, mem->ptr, + mem->size, DMA_BIDIRECTIONAL); + if (dma_mapping_error(mem_dev, mem->iova)) { + pr_err("%s: dma map failed\n", __func__); + return -ENOMEM; + } + } else { + size_t size = round_up(*f_pos + count, 64 * 1024); + dma_addr_t iova; + void *ptr = dma_alloc_coherent(mem_dev, size, &iova, + GFP_KERNEL | __GFP_ZERO); + if (ptr == NULL) + return -ENOMEM; + if (mem->ptr) { + memcpy(ptr, mem->ptr, mem->used); + dma_free_coherent(mem_dev, mem->size, mem->ptr, + mem->iova); + } + mem->ptr = ptr; + mem->size = size; + mem->iova = iova; + } + + /* If mem_dev is not connected to SMMU, the iova is physical */ + if (domain) + mem->phys_addr = iommu_iova_to_phys(domain, mem->iova); + else + mem->phys_addr = mem->iova; + } + + ret = simple_write_to_buffer(mem->ptr, mem->size, f_pos, buf, count); + + if (ret >= 0) { + mem->used = *f_pos; + + if (mem->used == 0 && mem->ptr != NULL) { + if (_camdbg_rmem.enabled) + dma_unmap_single(mem_dev, mem->iova, mem->size, + DMA_BIDIRECTIONAL); + else + dma_free_coherent(mem_dev, mem->size, mem->ptr, + mem->iova); + + memset(mem, 0, sizeof(*mem)); + } + } + + return ret; +} + +static const struct file_operations camrtc_dbgfs_fops_test_mem = { + .read = camrtc_dbgfs_read_test_mem, + .write = camrtc_dbgfs_write_test_mem, +}; + +#define BUILD_BUG_ON_MISMATCH(s1, f1, s2, f2) \ + BUILD_BUG_ON(offsetof(s1, data.f1) != offsetof(s2, data.f2)) + +static int camrtc_test_run_and_show_result(struct seq_file *file, + struct camrtc_dbg_request *req, + struct camrtc_dbg_response *resp, + size_t data_offset) +{ + struct tegra_ivc_channel *ch = file->private; + struct camrtc_debug *crd = tegra_ivc_channel_get_drvdata(ch); + const char *test_case = crd->parameters.test_case; + size_t test_case_size = crd->parameters.test_case_size; + unsigned long timeout = crd->parameters.test_timeout; + uint64_t ns; + size_t req_size = ch->ivc.frame_size; + size_t resp_size = ch->ivc.frame_size; + int ret; + const char *result = (const void *)resp + data_offset; + size_t result_size = resp_size - data_offset; + const char *nul; + + if (WARN_ON(test_case_size > camrtc_dbgfs_get_max_test_size(ch))) + test_case_size = camrtc_dbgfs_get_max_test_size(ch); + + memcpy((char *)req + data_offset, test_case, test_case_size); + + /* Timeout is in ms, run_test_data.timeout in ns */ + if (timeout > 40) + ns = 1000000ULL * (timeout - 20); + else + ns = 1000000ULL * (timeout / 2); + + BUILD_BUG_ON_MISMATCH( + struct camrtc_dbg_request, run_mem_test_data.timeout, + struct camrtc_dbg_request, run_test_data.timeout); + + ret = tegra_ivc_channel_runtime_get(ch); + if (ret < 0) + return ret; + + req->data.run_test_data.timeout = ns; + + ret = camrtc_ivc_dbg_full_frame_xact(ch, req, req_size, + resp, resp_size, timeout); + + tegra_camrtc_flush_trace(camrtc_get_device(ch)); + + if (ret < 0) { + if (ret != -ECONNRESET) { + dev_info(&ch->dev, "rebooting after a failed test run"); + (void)tegra_camrtc_reboot(camrtc_get_device(ch)); + } + goto runtime_put; + } + + BUILD_BUG_ON_MISMATCH( + struct camrtc_dbg_response, run_mem_test_data.timeout, + struct camrtc_dbg_response, run_test_data.timeout); + + ns = resp->data.run_test_data.timeout; + + seq_printf(file, "result=%u runtime=%llu.%06llu ms\n\n", + resp->status, ns / 1000000, ns % 1000000); + + nul = memchr(result, '\0', result_size); + if (nul) + seq_write(file, result, nul - result); + else + seq_write(file, result, result_size); + +runtime_put: + tegra_ivc_channel_runtime_put(ch); + + return ret; +} + +static void camrtc_run_rmem_unmap_all(struct camrtc_debug *crd, + struct camrtc_test_mem *mem, bool all) +{ + int i; + struct device *mem_dev = camrtc_dbgfs_memory_dev(crd); + + /* Nothing to unmap */ + if (mem->ptr == NULL) + return; + + for (i = 0; i < mem->dev_index; i++) { + struct device *dev = mem->devices[i].dev; + dma_addr_t iova = mem->devices[i].dev_iova; + + if (dev == NULL) + break; + + /* keep mem_dev mapped unless forced */ + if (!all && (dev == mem_dev)) + continue; + + dma_unmap_single(dev, iova, + mem->size, DMA_BIDIRECTIONAL); + } +} + +static int camrtc_run_mem_map(struct tegra_ivc_channel *ch, + struct device *mem_dev, + struct device *dev, + struct sg_table *sgt, + struct camrtc_test_mem *mem, + uint64_t *return_iova) +{ + int ret = 0; + + *return_iova = 0ULL; + + if (dev == NULL) + return 0; + + if (mem->dev_index >= CAMRTC_TEST_CAM_DEVICES) { + pr_err("%s: device list exhausted\n", __func__); + return -ENOMEM; + } + + if (mem_dev == dev) { + *return_iova = mem->iova; + goto done; + } + + if (_camdbg_rmem.enabled) { + *return_iova = dma_map_single(dev, mem->ptr, + mem->size, DMA_BIDIRECTIONAL); + if (dma_mapping_error(dev, *return_iova)) { + pr_err("%s: dma map failed\n", __func__); + *return_iova = 0ULL; + return -ENOMEM; + } + } else { + ret = dma_get_sgtable(dev, sgt, mem->ptr, mem->iova, mem->size); + if (ret < 0) { + dev_err(&ch->dev, "dma_get_sgtable for %s failed\n", + dev_name(dev)); + return ret; + } + + if (!dma_map_sg(dev, sgt->sgl, sgt->orig_nents, + DMA_BIDIRECTIONAL)) { + dev_err(&ch->dev, "failed to map %s mem at 0x%llx\n", + dev_name(dev), (u64)mem->iova); + sg_free_table(sgt); + ret = -ENXIO; + } + + *return_iova = sgt->sgl->dma_address; + } + +done: + mem->devices[mem->dev_index].dev = dev; + mem->devices[mem->dev_index++].dev_iova = *return_iova; + + return ret; +} + +struct camrtc_run_membw { + struct device *dev; +#if IS_ENABLED(CONFIG_INTERCONNECT) + struct icc_path *icc_path; +#endif +#if IS_ENABLED(CONFIG_TEGRA_BWMGR) + struct tegra_bwmgr_client *bwmgr; +#endif +}; + +static void camrtc_membw_set(struct camrtc_run_membw *membw, u32 bw) +{ + struct device *dev = membw->dev; + + if (bw == 0) { + ; + } else if (tegra_get_chip_id() == TEGRA234) { +#if IS_ENABLED(CONFIG_INTERCONNECT) + struct icc_path *icc_path; + int ret; + + icc_path = icc_get(dev, TEGRA_ICC_RCE, TEGRA_ICC_PRIMARY); + + if (!IS_ERR_OR_NULL(icc_path)) { + ret = icc_set_bw(icc_path, 0, bw); + + if (ret) + dev_err(dev, "set icc bw [%u] failed: %d\n", bw, ret); + else + dev_dbg(dev, "requested icc bw %u\n", bw); + + membw->icc_path = icc_path; + } +#endif + } else { +#if IS_ENABLED(CONFIG_TEGRA_BWMGR) + struct tegra_bwmgr_client *bwmgr; + unsigned long emc_rate; + int ret; + + bwmgr = tegra_bwmgr_register(TEGRA_BWMGR_CLIENT_CAMERA_NON_ISO); + + if (!IS_ERR_OR_NULL(bwmgr)) { + if (bw == 0xFFFFFFFFU) + emc_rate = tegra_bwmgr_get_max_emc_rate(); + else + emc_rate = tegra_bwmgr_round_rate(bw); + + ret = tegra_bwmgr_set_emc(bwmgr, + emc_rate, TEGRA_BWMGR_SET_EMC_SHARED_BW); + + if (ret < 0) + dev_info(dev, "emc request rate %lu failed, %d\n", emc_rate, ret); + else + dev_dbg(dev, "requested emc rate %lu\n", emc_rate); + + membw->bwmgr = bwmgr; + } +#endif + } +} + +static void camrtc_membw_reset(struct camrtc_run_membw *membw) +{ +#if IS_ENABLED(CONFIG_INTERCONNECT) + if (membw->icc_path) + icc_put(membw->icc_path); +#endif + +#if IS_ENABLED(CONFIG_TEGRA_BWMGR) + if (membw->bwmgr) + tegra_bwmgr_unregister(membw->bwmgr); +#endif +} + +static int camrtc_run_mem_test(struct seq_file *file, + struct camrtc_dbg_request *req, + struct camrtc_dbg_response *resp) +{ + struct tegra_ivc_channel *ch = file->private; + struct camrtc_debug *crd = tegra_ivc_channel_get_drvdata(ch); + struct camrtc_run_membw membw = { .dev = crd->mem_devices[0], }; + struct camrtc_dbg_test_mem *testmem; + size_t i; + int ret = 0; + struct device *mem_dev = camrtc_dbgfs_memory_dev(crd); + struct device *rce_dev = crd->mem_devices[0]; + struct sg_table rce_sgt[ARRAY_SIZE(crd->mem)]; + struct device *vi_dev = crd->mem_devices[1]; + struct sg_table vi_sgt[ARRAY_SIZE(crd->mem)]; + struct device *isp_dev = crd->mem_devices[2]; + struct sg_table isp_sgt[ARRAY_SIZE(crd->mem)]; + struct device *vi2_dev = crd->mem_devices[3]; + struct sg_table vi2_sgt[ARRAY_SIZE(crd->mem)]; + struct camrtc_test_mem *mem0 = &crd->mem[0]; + + memset(rce_sgt, 0, sizeof(rce_sgt)); + memset(vi_sgt, 0, sizeof(vi_sgt)); + memset(isp_sgt, 0, sizeof(isp_sgt)); + memset(vi2_sgt, 0, sizeof(vi2_sgt)); + + req->req_type = CAMRTC_REQ_RUN_MEM_TEST; + + /* Allocate 6MB scratch memory in mem0 by default */ + if (!mem0->used) { + const size_t size = 6U << 20U; /* 6 MB */ + dma_addr_t iova; + void *ptr; + struct iommu_domain *domain = iommu_get_domain_for_dev(mem_dev); + + if (mem0->ptr) { + if (_camdbg_rmem.enabled) + camrtc_run_rmem_unmap_all(crd, mem0, true); + else + dma_free_coherent(mem_dev, mem0->size, + mem0->ptr, mem0->iova); + + memset(mem0, 0, sizeof(*mem0)); + } + + if (_camdbg_rmem.enabled) { + if (_camdbg_rmem.mem_ctxs[0].size < size) { + pr_err( + "%s: mem [%lu] < req size [%lu]\n", + __func__, _camdbg_rmem.mem_ctxs[0].size, + size); + return -ENOMEM; + } + + ptr = phys_to_virt(_camdbg_rmem.mem_ctxs[0].address); + + iova = dma_map_single(mem_dev, ptr, size, + DMA_BIDIRECTIONAL); + if (dma_mapping_error(mem_dev, iova)) { + pr_err("%s: dma map failed\n", __func__); + return -ENOMEM; + } + } else { + ptr = dma_alloc_coherent(mem_dev, size, &iova, + GFP_KERNEL | __GFP_ZERO); + if (ptr == NULL) + return -ENOMEM; + } + + mem0->ptr = ptr; + mem0->size = size; + + /* If mem_dev is not connected to SMMU, the iova is physical */ + if (domain) + mem0->phys_addr = iommu_iova_to_phys(domain, iova); + else + mem0->phys_addr = iova; + + mem0->iova = iova; + mem0->used = size; + } + + camrtc_membw_set(&membw, crd->parameters.test_bw); + + for (i = 0; i < ARRAY_SIZE(crd->mem); i++) { + struct camrtc_test_mem *mem = &crd->mem[i]; + + if (mem->used == 0) + continue; + + testmem = &req->data.run_mem_test_data.mem[i]; + + testmem->size = mem->used; + testmem->page_size = PAGE_SIZE; + testmem->phys_addr = mem->phys_addr; + + ret = camrtc_run_mem_map(ch, mem_dev, rce_dev, &rce_sgt[i], mem, + &testmem->rtcpu_iova); + if (ret < 0) + goto unmap; + + ret = camrtc_run_mem_map(ch, mem_dev, vi_dev, &vi_sgt[i], mem, + &testmem->vi_iova); + if (ret < 0) + goto unmap; + + ret = camrtc_run_mem_map(ch, mem_dev, isp_dev, &isp_sgt[i], mem, + &testmem->isp_iova); + if (ret < 0) + goto unmap; + + ret = camrtc_run_mem_map(ch, mem_dev, vi2_dev, &vi2_sgt[i], mem, + &testmem->vi2_iova); + if (ret < 0) + goto unmap; + + dma_sync_single_for_device(mem_dev, mem->iova, mem->used, + DMA_BIDIRECTIONAL); + } + + BUILD_BUG_ON_MISMATCH( + struct camrtc_dbg_request, run_mem_test_data.data, + struct camrtc_dbg_response, run_mem_test_data.data); + + ret = camrtc_test_run_and_show_result(file, req, resp, + offsetof(struct camrtc_dbg_response, + data.run_mem_test_data.data)); + if (ret < 0) + goto unmap; + + for (i = 0; i < ARRAY_SIZE(crd->mem); i++) { + struct camrtc_test_mem *mem = &crd->mem[i]; + + if (mem->size == 0) + continue; + + testmem = &resp->data.run_mem_test_data.mem[i]; + if (!WARN_ON(testmem->size > mem->size)) + mem->used = testmem->size; + + dma_sync_single_for_cpu(mem_dev, mem->iova, mem->used, + DMA_BIDIRECTIONAL); + } + +unmap: + camrtc_membw_reset(&membw); + + for (i = 0; i < ARRAY_SIZE(vi_sgt); i++) { + if (rce_sgt[i].sgl) { + dma_unmap_sg(rce_dev, rce_sgt[i].sgl, + rce_sgt[i].orig_nents, DMA_BIDIRECTIONAL); + sg_free_table(&rce_sgt[i]); + } + if (vi_sgt[i].sgl) { + dma_unmap_sg(vi_dev, vi_sgt[i].sgl, + vi_sgt[i].orig_nents, DMA_BIDIRECTIONAL); + sg_free_table(&vi_sgt[i]); + } + if (isp_sgt[i].sgl) { + dma_unmap_sg(isp_dev, isp_sgt[i].sgl, + isp_sgt[i].orig_nents, DMA_BIDIRECTIONAL); + sg_free_table(&isp_sgt[i]); + } + if (vi2_sgt[i].sgl) { + dma_unmap_sg(vi2_dev, vi2_sgt[i].sgl, + vi2_sgt[i].orig_nents, DMA_BIDIRECTIONAL); + sg_free_table(&vi2_sgt[i]); + } + } + + if (_camdbg_rmem.enabled) { + for (i = 0; i < ARRAY_SIZE(crd->mem); i++) { + struct camrtc_test_mem *mem = &crd->mem[i]; + camrtc_run_rmem_unmap_all(crd, mem, false); + } + } + + /* Reset mapping info, memory can still be used by cpu tests */ + for (i = 0; i < ARRAY_SIZE(crd->mem); i++) { + crd->mem[i].dev_index = 0U; + memset(&crd->mem[i].devices, 0, + (ARRAY_SIZE(crd->mem[i].devices) * + sizeof(struct camrtc_test_device))); + } + + return ret; +} + +static int camrtc_dbgfs_show_test_result(struct seq_file *file, void *data) +{ + struct tegra_ivc_channel *ch = file->private; + void *mem = kzalloc(2 * ch->ivc.frame_size, GFP_KERNEL | __GFP_ZERO); + struct camrtc_dbg_request *req = mem; + struct camrtc_dbg_response *resp = mem + ch->ivc.frame_size; + int ret; + + if (mem == NULL) + return -ENOMEM; + + ret = camrtc_run_mem_test(file, req, resp); + kfree(mem); + + return ret; +} + +DEFINE_SEQ_FOPS(camrtc_dbgfs_fops_test_result, camrtc_dbgfs_show_test_result); + +static int camrtc_dbgfs_show_test_list(struct seq_file *file, void *data) +{ + struct tegra_ivc_channel *ch = file->private; + struct camrtc_dbg_request req = { + .req_type = CAMRTC_REQ_RUN_TEST, + }; + struct camrtc_dbg_response *resp; + int ret; + + resp = kzalloc(ch->ivc.frame_size, GFP_KERNEL | __GFP_ZERO); + if (resp == NULL) + return -ENOMEM; + + memset(req.data.run_test_data.data, 0, + sizeof(req.data.run_test_data.data)); + strcpy(req.data.run_test_data.data, "list\n"); + + ret = camrtc_ivc_dbg_full_frame_xact(ch, &req, sizeof(req), + resp, ch->ivc.frame_size, 0); + if (ret == 0 && resp->status == CAMRTC_STATUS_OK) { + char const *list = (char const *)resp->data.run_test_data.data; + size_t textsize = ch->ivc.frame_size - + offsetof(struct camrtc_dbg_response, + data.run_test_data.data); + size_t i; + + /* Remove first line */ + for (i = 0; i < textsize; i++) + if (list[i] == '\n') + break; + for (; i < textsize; i++) + if (list[i] != '\n' && list[i] != '\r') + break; + + seq_printf(file, "%.*s", (int)(textsize - i), list + i); + } + + kfree(resp); + + return ret; +} + +DEFINE_SEQ_FOPS(camrtc_dbgfs_fops_test_list, camrtc_dbgfs_show_test_list); + +static int camrtc_coverage_msg(struct camrtc_falcon_coverage *cov, + struct camrtc_dbg_response *resp, + bool flush, bool reset) +{ + struct camrtc_dbg_request req = { + .req_type = CAMRTC_REQ_SET_FALCON_COVERAGE, + .data = { + .coverage_data = { + .falcon_id = cov->id, + .size = cov->enabled ? cov->mem.size : 0, + .iova = cov->enabled ? cov->falc_iova : 0, + .flush = flush ? 1 : 0, + .reset = reset ? 1 : 0, + }, + }, + }; + struct tegra_ivc_channel *ch = cov->ch; + int ret; + + ret = camrtc_ivc_dbg_xact(ch, &req, resp, 200); + + if (ret || (resp->status != CAMRTC_STATUS_OK)) { + dev_warn(&ch->dev, "Coverage IVC error: %d, status %u, id %u\n", + ret, resp->status, cov->id); + ret = -ENODEV; + } else if (resp->data.coverage_stat.full == 1) { + ret = -EOVERFLOW; + } + + return ret; +} + +static bool camrtc_coverage_is_supported(struct camrtc_falcon_coverage *cov) +{ + struct camrtc_dbg_response resp; + + (void)camrtc_coverage_msg(cov, &resp, false, false); + + return (resp.status == CAMRTC_STATUS_OK); +} + +static ssize_t camrtc_read_falcon_coverage(struct file *file, + char __user *buf, size_t count, loff_t *f_pos) +{ + struct camrtc_falcon_coverage *cov = file->f_inode->i_private; + struct camrtc_dbg_response resp; + ssize_t ret = 0; + + if (!cov->enabled) { + ret = -ENODEV; + goto done; + } + + /* In the beginning, do a flush */ + if (*f_pos == 0) { + /* Flush falcon buffer */ + ret = camrtc_coverage_msg(cov, &resp, true, false); + + if (ret) + goto done; + + cov->mem.used = resp.data.coverage_stat.bytes_written; + + dma_sync_single_for_cpu(cov->mem_dev, cov->mem.iova, + cov->mem.size, DMA_BIDIRECTIONAL); + } + + ret = simple_read_from_buffer(buf, count, f_pos, + cov->mem.ptr, cov->mem.used); +done: + return ret; +} + +static ssize_t camrtc_write_falcon_coverage(struct file *file, + const char __user *buf, size_t count, loff_t *f_pos) +{ + struct camrtc_falcon_coverage *cov = file->f_inode->i_private; + struct camrtc_dbg_response resp; + ssize_t ret = count; + + if (cov->enabled) { + memset(cov->mem.ptr, 0, cov->mem.size); + if (camrtc_coverage_msg(cov, &resp, false, true)) + ret = -ENODEV; + else + *f_pos += count; + } else { + ret = -ENODEV; + } + + return ret; +} +static const struct file_operations camrtc_dbgfs_fops_falcon_coverage = { + .read = camrtc_read_falcon_coverage, + .write = camrtc_write_falcon_coverage, +}; + +static int camrtc_falcon_coverage_enable(struct camrtc_falcon_coverage *cov) +{ + struct tegra_ivc_channel *ch = cov->ch; + struct device *mem_dev = cov->mem_dev; + struct device *falcon_dev = cov->falcon_dev; + struct camrtc_dbg_response resp; + int ret = 0; + + if (cov->enabled) + goto done; + + if (!camrtc_coverage_is_supported(cov)) { + ret = -ENODEV; + goto done; + } + + cov->mem.ptr = dma_alloc_coherent(mem_dev, + FALCON_COVERAGE_MEM_SIZE, + &cov->mem.iova, + GFP_KERNEL | __GFP_ZERO); + if (cov->mem.ptr == NULL) { + dev_warn(&ch->dev, + "Failed to allocate Falcon 0x%02x coverage memory!\n", + cov->id); + goto error; + } + + cov->mem.size = FALCON_COVERAGE_MEM_SIZE; + + if (camrtc_run_mem_map(ch, cov->mem_dev, falcon_dev, + &cov->sgt, &cov->mem, + &cov->falc_iova)) { + dev_warn(&ch->dev, + "Failed to map Falcon 0x%02x coverage memory\n", + cov->id); + goto clean_mem; + } + + /* Keep rtcpu alive when falcon coverage is in use. */ + ret = tegra_ivc_channel_runtime_get(ch); + if (ret < 0) + goto clean_mem; + + cov->enabled = true; + + /* Sync state with rtcpu */ + camrtc_coverage_msg(cov, &resp, false, false); + + dev_dbg(&ch->dev, "Falcon 0x%02x code coverage enabled.\n", + cov->id); + +done: + return ret; + +clean_mem: + dma_free_coherent(mem_dev, cov->mem.size, cov->mem.ptr, cov->mem.iova); + memset(&cov->mem, 0, sizeof(struct camrtc_test_mem)); + cov->enabled = false; + +error: + return ret; +} + +static void camrtc_falcon_coverage_disable(struct camrtc_falcon_coverage *cov) +{ + struct tegra_ivc_channel *ch = cov->ch; + struct device *mem_dev = cov->mem_dev; + struct device *falcon_dev = cov->falcon_dev; + struct camrtc_dbg_response resp; + + if (!cov->enabled) + return; + + /* Disable and sync with rtpcu */ + cov->enabled = false; + camrtc_coverage_msg(cov, &resp, false, false); + + if (cov->sgt.sgl) { + dma_unmap_sg(falcon_dev, cov->sgt.sgl, + cov->sgt.orig_nents, DMA_BIDIRECTIONAL); + sg_free_table(&cov->sgt); + } + + if (cov->mem.ptr) { + dma_free_coherent(mem_dev, cov->mem.size, + cov->mem.ptr, cov->mem.iova); + memset(&cov->mem, 0, sizeof(struct camrtc_test_mem)); + } + + tegra_ivc_channel_runtime_put(ch); +} + +static int camrtc_dbgfs_show_coverage_enable(void *data, u64 *val) +{ + struct camrtc_falcon_coverage *cov = data; + + *val = cov->enabled ? 1 : 0; + + return 0; +} + +static int camrtc_dbgfs_store_coverage_enable(void *data, u64 val) +{ + struct camrtc_falcon_coverage *cov = data; + bool enable = (val != 0) ? true : false; + int ret = 0; + + if (cov->enabled != enable) { + if (enable) + ret = camrtc_falcon_coverage_enable(cov); + else + camrtc_falcon_coverage_disable(cov); + } + + return ret; +} + +DEFINE_SIMPLE_ATTRIBUTE(camrtc_dbgfs_fops_coverage_enable, + camrtc_dbgfs_show_coverage_enable, + camrtc_dbgfs_store_coverage_enable, + "%lld\n"); + +#define TEGRA_APS_AST_CONTROL 0x0 +#define TEGRA_APS_AST_STREAMID_CTL 0x20 +#define TEGRA_APS_AST_REGION_0_SLAVE_BASE_LO 0x100 +#define TEGRA_APS_AST_REGION_0_SLAVE_BASE_HI 0x104 +#define TEGRA_APS_AST_REGION_0_MASK_LO 0x108 +#define TEGRA_APS_AST_REGION_0_MASK_HI 0x10c +#define TEGRA_APS_AST_REGION_0_MASTER_BASE_LO 0x110 +#define TEGRA_APS_AST_REGION_0_MASTER_BASE_HI 0x114 +#define TEGRA_APS_AST_REGION_0_CONTROL 0x118 + +#define TEGRA_APS_AST_REGION_STRIDE 0x20 + +#define AST_RGN_CTRL_VM_INDEX 15 +#define AST_RGN_CTRL_SNOOP BIT(2) + +#define AST_ADDR_MASK64 (~0xfffULL) + +struct tegra_ast_region_info { + u8 enabled; + u8 lock; + u8 snoop; + u8 non_secure; + + u8 ns_passthru; + u8 carveout_id; + u8 carveout_al; + u8 vpr_rd; + + u8 vpr_wr; + u8 vpr_passthru; + u8 vm_index; + u8 physical; + + u8 stream_id; + u8 stream_id_enabled; + u8 pad[2]; + + u64 slave; + u64 mask; + u64 master; + u32 control; +}; + +static void tegra_ast_get_region_info(void __iomem *base, + u32 region, + struct tegra_ast_region_info *info) +{ + u32 offset = region * TEGRA_APS_AST_REGION_STRIDE; + u32 vmidx, stream_id, gcontrol, control; + u64 lo, hi; + + control = readl(base + TEGRA_APS_AST_REGION_0_CONTROL + offset); + info->control = control; + + info->lock = (control & BIT(0)) != 0; + info->snoop = (control & BIT(2)) != 0; + info->non_secure = (control & BIT(3)) != 0; + info->ns_passthru = (control & BIT(4)) != 0; + info->carveout_id = (control >> 5) & (0x1f); + info->carveout_al = (control >> 10) & 0x3; + info->vpr_rd = (control & BIT(12)) != 0; + info->vpr_wr = (control & BIT(13)) != 0; + info->vpr_passthru = (control & BIT(14)) != 0; + vmidx = (control >> AST_RGN_CTRL_VM_INDEX) & 0xf; + info->vm_index = vmidx; + info->physical = (control & BIT(19)) != 0; + + if (info->physical) { + gcontrol = readl(base + TEGRA_APS_AST_CONTROL); + info->stream_id = (gcontrol >> 22) & 0x7F; + info->stream_id_enabled = 1; + } else { + stream_id = readl(base + TEGRA_APS_AST_STREAMID_CTL + + (4 * vmidx)); + info->stream_id = (stream_id >> 8) & 0xFF; + info->stream_id_enabled = (stream_id & BIT(0)) != 0; + } + + lo = readl(base + TEGRA_APS_AST_REGION_0_SLAVE_BASE_LO + offset); + hi = readl(base + TEGRA_APS_AST_REGION_0_SLAVE_BASE_HI + offset); + + info->slave = ((hi << 32U) + lo) & AST_ADDR_MASK64; + info->enabled = (lo & BIT(0)) != 0; + + hi = readl(base + TEGRA_APS_AST_REGION_0_MASK_HI + offset); + lo = readl(base + TEGRA_APS_AST_REGION_0_MASK_LO + offset); + + info->mask = ((hi << 32) + lo) | ~AST_ADDR_MASK64; + + hi = readl(base + TEGRA_APS_AST_REGION_0_MASTER_BASE_HI + offset); + lo = readl(base + TEGRA_APS_AST_REGION_0_MASTER_BASE_LO + offset); + + info->master = ((hi << 32U) + lo) & AST_ADDR_MASK64; +} + +static void __iomem *iomap_byname(struct device *dev, const char *name) +{ + int index = of_property_match_string(dev->of_node, "reg-names", name); + if (index < 0) + return IOMEM_ERR_PTR(-ENOENT); + + return of_iomap(dev->of_node, index); +} + +static void camrtc_dbgfs_show_ast_region(struct seq_file *file, + void __iomem *base, u32 index) +{ + struct tegra_ast_region_info info; + + tegra_ast_get_region_info(base, index, &info); + + seq_printf(file, "ast region %u %s\n", index, + info.enabled ? "enabled" : "disabled"); + + if (!info.enabled) + return; + + seq_printf(file, + "\tslave=0x%llx\n" + "\tmaster=0x%llx\n" + "\tsize=0x%llx\n" + "\tlock=%u snoop=%u non_secure=%u ns_passthru=%u\n" + "\tcarveout_id=%u carveout_al=%u\n" + "\tvpr_rd=%u vpr_wr=%u vpr_passthru=%u\n" + "\tvm_index=%u physical=%u\n" + "\tstream_id=%u (enabled=%u)\n", + info.slave, info.master, info.mask + 1, + info.lock, info.snoop, + info.non_secure, info.ns_passthru, + info.carveout_id, info.carveout_al, + info.vpr_rd, info.vpr_wr, info.vpr_passthru, + info.vm_index, info.physical, + info.stream_id, info.stream_id_enabled); +} + +struct camrtc_dbgfs_ast_node { + struct tegra_ivc_channel *ch; + const char *name; + uint8_t mask; +}; + +static int camrtc_dbgfs_show_ast(struct seq_file *file, + void *data) +{ + struct camrtc_dbgfs_ast_node *node = file->private; + void __iomem *ast; + int i; + + ast = iomap_byname(camrtc_get_device(node->ch), node->name); + if (ast == NULL) + return -ENOMEM; + + for (i = 0; i <= 7; i++) { + if (!(node->mask & BIT(i))) + continue; + + camrtc_dbgfs_show_ast_region(file, ast, i); + + if (node->mask & (node->mask - 1)) /* are multiple bits set? */ + seq_puts(file, "\n"); + } + + iounmap(ast); + return 0; +} + +DEFINE_SEQ_FOPS(camrtc_dbgfs_fops_ast, camrtc_dbgfs_show_ast); + +static const struct debugfs_reg32 ast_common_regs[] = { + { .name = "control", 0x0 }, + { .name = "error_status", 0x4 }, + { .name = "error_addr_lo", 0x8 }, + { .name = "error_addr_h", 0xC }, + { .name = "streamid_ctl_0", 0x20 }, + { .name = "streamid_ctl_1", 0x24 }, + { .name = "streamid_ctl_2", 0x28 }, + { .name = "streamid_ctl_3", 0x2C }, + { .name = "streamid_ctl_4", 0x30 }, + { .name = "streamid_ctl_5", 0x34 }, + { .name = "streamid_ctl_6", 0x38 }, + { .name = "streamid_ctl_7", 0x3C }, + { .name = "streamid_ctl_8", 0x40 }, + { .name = "streamid_ctl_9", 0x44 }, + { .name = "streamid_ctl_10", 0x48 }, + { .name = "streamid_ctl_11", 0x4C }, + { .name = "streamid_ctl_12", 0x50 }, + { .name = "streamid_ctl_13", 0x54 }, + { .name = "streamid_ctl_14", 0x58 }, + { .name = "streamid_ctl_15", 0x5C }, + { .name = "write_block_status", 0x60 }, + { .name = "read_block_status", 0x64 }, +}; + +static const struct debugfs_reg32 ast_region_regs[] = { + { .name = "slave_lo", 0x100 }, + { .name = "slave_hi", 0x104 }, + { .name = "mask_lo", 0x108 }, + { .name = "mask_hi", 0x10C }, + { .name = "master_lo", 0x110 }, + { .name = "master_hi", 0x114 }, + { .name = "control", 0x118 }, +}; + +static int ast_regset_create_files(struct tegra_ivc_channel *ch, + struct dentry *dir, + struct ast_regset *ars, + char const *ast_name) +{ + void __iomem *base; + int i; + + base = iomap_byname(camrtc_get_device(ch), ast_name); + if (IS_ERR_OR_NULL(base)) + return -ENOMEM; + + ars->common.base = base; + ars->common.regs = ast_common_regs; + ars->common.nregs = ARRAY_SIZE(ast_common_regs); + + debugfs_create_regset32("regs-common", 0444, dir, &ars->common); + + for (i = 0; i < ARRAY_SIZE(ars->region); i++) { + char name[16]; + + snprintf(name, sizeof(name), "regs-region%u", i); + + ars->region[i].base = base + i * TEGRA_APS_AST_REGION_STRIDE; + ars->region[i].regs = ast_region_regs; + ars->region[i].nregs = ARRAY_SIZE(ast_region_regs); + + debugfs_create_regset32(name, 0444, dir, &ars->region[i]); + } + + return 0; +} + +static int camrtc_debug_populate(struct tegra_ivc_channel *ch) +{ + struct camrtc_debug *crd = tegra_ivc_channel_get_drvdata(ch); + struct dentry *dir; + struct dentry *coverage; + struct dentry *vi; + struct dentry *isp; + struct camrtc_dbgfs_ast_node *ast_nodes; + unsigned int i, dma, region; + char const *name = "camrtc"; + + of_property_read_string(ch->dev.of_node, NV(debugfs), &name); + + crd->root = dir = debugfs_create_dir(name, NULL); + if (dir == NULL) + return -ENOMEM; + + coverage = debugfs_create_dir("coverage", dir); + if (coverage == NULL) + goto error; + vi = debugfs_create_dir("vi", coverage); + if (vi == NULL) + goto error; + isp = debugfs_create_dir("isp", coverage); + if (isp == NULL) + goto error; + if (!debugfs_create_file("data", 0600, vi, + &crd->vi_falc_coverage, + &camrtc_dbgfs_fops_falcon_coverage)) + goto error; + if (!debugfs_create_file("enable", 0600, vi, + &crd->vi_falc_coverage, + &camrtc_dbgfs_fops_coverage_enable)) + goto error; + if (!debugfs_create_file("data", 0600, isp, + &crd->isp_falc_coverage, + &camrtc_dbgfs_fops_falcon_coverage)) + goto error; + if (!debugfs_create_file("enable", 0600, isp, + &crd->isp_falc_coverage, + &camrtc_dbgfs_fops_coverage_enable)) + goto error; + + if (!debugfs_create_file("version", 0444, dir, ch, + &camrtc_dbgfs_fops_version)) + goto error; + if (!debugfs_create_file("reboot", 0400, dir, ch, + &camrtc_dbgfs_fops_reboot)) + goto error; + if (!debugfs_create_file("ping", 0444, dir, ch, + &camrtc_dbgfs_fops_ping)) + goto error; + if (!debugfs_create_file("sm-ping", 0444, dir, ch, + &camrtc_dbgfs_fops_sm_ping)) + goto error; + if (!debugfs_create_file("log-level", 0644, dir, ch, + &camrtc_dbgfs_fops_loglevel)) + goto error; + + debugfs_create_u32("timeout", 0644, dir, + &crd->parameters.completion_timeout); + + if (!debugfs_create_file("forced-reset-restore", 0400, dir, ch, + &camrtc_dbgfs_fops_forced_reset_restore)) + goto error; + + if (!debugfs_create_file("irqstat", 0444, dir, ch, + &camrtc_dbgfs_fops_irqstat)) + goto error; + if (!debugfs_create_file("memstat", 0444, dir, ch, + &camrtc_dbgfs_fops_memstat)) + goto error; + + dir = debugfs_create_dir("mods", crd->root); + if (!dir) + goto error; + + debugfs_create_u32("case", 0644, dir, + &crd->parameters.mods_case); + + debugfs_create_u32("loops", 0644, dir, + &crd->parameters.mods_loops); + + debugfs_create_x32("dma_channels", 0644, dir, + &crd->parameters.mods_dma_channels); + + if (!debugfs_create_file("result", 0400, dir, ch, + &camrtc_dbgfs_fops_mods_result)) + goto error; + + dir = debugfs_create_dir("rtos", crd->root); + if (!dir) + goto error; + if (!debugfs_create_file("state", 0444, dir, ch, + &camrtc_dbgfs_fops_freertos_state)) + goto error; + + dir = debugfs_create_dir("test", crd->root); + if (!dir) + goto error; + if (!debugfs_create_file("available", 0444, dir, ch, + &camrtc_dbgfs_fops_test_list)) + goto error; + if (!debugfs_create_file("case", 0644, dir, ch, + &camrtc_dbgfs_fops_test_case)) + goto error; + if (!debugfs_create_file("result", 0400, dir, ch, + &camrtc_dbgfs_fops_test_result)) + goto error; + + debugfs_create_u32("timeout", 0644, dir, + &crd->parameters.test_timeout); + + for (i = 0; i < ARRAY_SIZE(crd->mem); i++) { + char name[8]; + + crd->mem[i].index = i; + snprintf(name, sizeof(name), "mem%u", i); + if (!debugfs_create_file(name, 0644, dir, + &crd->mem[i], &camrtc_dbgfs_fops_test_mem)) + goto error; + } + + ast_nodes = devm_kzalloc(&ch->dev, 18 * sizeof(*ast_nodes), + GFP_KERNEL); + if (unlikely(ast_nodes == NULL)) + goto error; + + for (dma = 0; dma <= 1; dma++) { + const char *ast_name = dma ? "ast-dma" : "ast-cpu"; + + dir = debugfs_create_dir(ast_name, crd->root); + if (dir == NULL) + goto error; + + ast_regset_create_files(ch, dir, &crd->ast_regsets[dma], + ast_name); + + ast_nodes->ch = ch; + ast_nodes->name = ast_name; + ast_nodes->mask = 0xff; + + if (!debugfs_create_file("all", 0444, dir, ast_nodes, + &camrtc_dbgfs_fops_ast)) + goto error; + + ast_nodes++; + + for (region = 0; region < 8; region++) { + char name[8]; + + snprintf(name, sizeof name, "%u", region); + + ast_nodes->ch = ch; + ast_nodes->name = ast_name; + ast_nodes->mask = BIT(region); + + if (!debugfs_create_file(name, 0444, dir, ast_nodes, + &camrtc_dbgfs_fops_ast)) + goto error; + + ast_nodes++; + } + } + + return 0; +error: + debugfs_remove_recursive(crd->root); + return -ENOMEM; +} + +static struct device *camrtc_get_linked_device( + struct device *dev, char const *name, int index) +{ + struct device_node *np; + struct platform_device *pdev; + + np = of_parse_phandle(dev->of_node, name, index); + if (np == NULL) + return NULL; + + pdev = of_find_device_by_node(np); + of_node_put(np); + + if (pdev == NULL) { + dev_warn(dev, "%s[%u] node has no device\n", name, index); + return NULL; + } + + return &pdev->dev; +} + +static int camrtc_debug_probe(struct tegra_ivc_channel *ch) +{ + struct device *dev = &ch->dev; + struct camrtc_debug *crd; + uint32_t bw; + + BUG_ON(ch->ivc.frame_size < sizeof(struct camrtc_dbg_request)); + BUG_ON(ch->ivc.frame_size < sizeof(struct camrtc_dbg_response)); + + crd = devm_kzalloc(dev, sizeof(*crd) + ch->ivc.frame_size, GFP_KERNEL); + if (unlikely(crd == NULL)) + return -ENOMEM; + + crd->channel = ch; + crd->parameters.test_case = (char *)(crd + 1); + crd->parameters.mods_case = CAMRTC_MODS_TEST_BASIC; + crd->parameters.mods_loops = 20; + crd->parameters.mods_dma_channels = 0; + + if (of_property_read_u32(dev->of_node, + NV(ivc-timeout), + &crd->parameters.completion_timeout)) + crd->parameters.completion_timeout = 50; + + if (of_property_read_u32(dev->of_node, + NV(test-timeout), + &crd->parameters.test_timeout)) + crd->parameters.test_timeout = 1000; + + mutex_init(&crd->mutex); + init_waitqueue_head(&crd->waitq); + + tegra_ivc_channel_set_drvdata(ch, crd); + + crd->mem_devices[0] = camrtc_get_linked_device(dev, NV(mem-map), 0); + crd->mem_devices[1] = camrtc_get_linked_device(dev, NV(mem-map), 1); + crd->mem_devices[2] = camrtc_get_linked_device(dev, NV(mem-map), 2); + crd->mem_devices[3] = camrtc_get_linked_device(dev, NV(mem-map), 3); + + crd->vi_falc_coverage.id = CAMRTC_DBG_FALCON_ID_VI; + crd->vi_falc_coverage.mem_dev = camrtc_dbgfs_memory_dev(crd); + crd->vi_falc_coverage.falcon_dev = crd->mem_devices[1]; + crd->vi_falc_coverage.ch = ch; + + crd->isp_falc_coverage.id = CAMRTC_DBG_FALCON_ID_ISP; + crd->isp_falc_coverage.mem_dev = crd->mem_devices[0]; + crd->isp_falc_coverage.falcon_dev = crd->mem_devices[2]; + crd->isp_falc_coverage.ch = ch; + + if (of_property_read_u32(dev->of_node, NV(test-bw), &bw) == 0) { + crd->parameters.test_bw = bw; + + dev_dbg(dev, "using emc bw %u for tests\n", bw); + } + + if (crd->mem_devices[0] == NULL) { + dev_dbg(dev, "missing %s\n", NV(mem-map)); + crd->mem_devices[0] = get_device(camrtc_get_device(ch)); + } + + if (camrtc_debug_populate(ch)) + return -ENOMEM; + + return 0; +} + +static void camrtc_debug_remove(struct tegra_ivc_channel *ch) +{ + struct camrtc_debug *crd = tegra_ivc_channel_get_drvdata(ch); + int i; + struct device *mem_dev = camrtc_dbgfs_memory_dev(crd); + + camrtc_falcon_coverage_disable(&crd->vi_falc_coverage); + camrtc_falcon_coverage_disable(&crd->isp_falc_coverage); + + for (i = 0; i < ARRAY_SIZE(crd->mem); i++) { + struct camrtc_test_mem *mem = &crd->mem[i]; + + if (mem->size == 0) + continue; + + dma_free_coherent(mem_dev, mem->size, mem->ptr, mem->iova); + memset(mem, 0, sizeof(*mem)); + } + + put_device(crd->mem_devices[0]); + put_device(crd->mem_devices[1]); + put_device(crd->mem_devices[2]); + put_device(crd->mem_devices[3]); + + debugfs_remove_recursive(crd->root); +} + +static const struct tegra_ivc_channel_ops tegra_ivc_channel_debug_ops = { + .probe = camrtc_debug_probe, + .remove = camrtc_debug_remove, + .notify = camrtc_debug_notify, +}; + +static const struct of_device_id camrtc_debug_of_match[] = { + { .compatible = "nvidia,tegra186-camera-ivc-protocol-debug" }, + { }, +}; + +static struct tegra_ivc_driver camrtc_debug_driver = { + .driver = { + .owner = THIS_MODULE, + .bus = &tegra_ivc_bus_type, + .name = "tegra-camera-rtcpu-debugfs", + .of_match_table = camrtc_debug_of_match, + }, + .dev_type = &tegra_ivc_channel_type, + .ops.channel = &tegra_ivc_channel_debug_ops, +}; +tegra_ivc_subsys_driver_default(camrtc_debug_driver); + +MODULE_DESCRIPTION("Debug Driver for Camera RTCPU"); +MODULE_AUTHOR("Pekka Pessi "); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/platform/tegra/rtcpu/device-group.c b/drivers/platform/tegra/rtcpu/device-group.c new file mode 100644 index 00000000..fa684523 --- /dev/null +++ b/drivers/platform/tegra/rtcpu/device-group.c @@ -0,0 +1,129 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +#include "device-group.h" + +#include +#include +#include +#include +#include +#include +#include "drivers/video/tegra/host/nvhost_acm.h" + +struct camrtc_device_group { + struct device *dev; + char const *names_name; + int ndevices; + struct platform_device *devices[]; +}; + +static int get_grouped_device(struct camrtc_device_group *grp, + struct device *dev, char const *name, int index) +{ + struct device_node *np; + struct platform_device *pdev; + + np = of_parse_phandle(dev->of_node, name, index); + if (np == NULL) + return 0; + + if (!of_device_is_available(np)) { + dev_info(dev, "%s[%u] is disabled\n", name, index); + of_node_put(np); + return 0; + } + + pdev = of_find_device_by_node(np); + of_node_put(np); + + if (pdev == NULL) { + dev_warn(dev, "%s[%u] node has no device\n", name, index); + return 0; + } + + grp->devices[index] = pdev; + + return 0; +} + +static void camrtc_device_group_release(struct device *dev, void *res) +{ + const struct camrtc_device_group *grp = res; + int i; + + put_device(grp->dev); + + for (i = 0; i < grp->ndevices; i++) + platform_device_put(grp->devices[i]); +} + +struct camrtc_device_group *camrtc_device_group_get( + struct device *dev, + char const *property_name, + char const *names_property_name) +{ + int index, err; + struct camrtc_device_group *grp; + int ndevices; + + if (!dev || !dev->of_node) + return ERR_PTR(-EINVAL); + + ndevices = of_count_phandle_with_args(dev->of_node, + property_name, NULL); + if (ndevices <= 0) + return ERR_PTR(-ENOENT); + + grp = devres_alloc(camrtc_device_group_release, + offsetof(struct camrtc_device_group, devices[ndevices]), + GFP_KERNEL | __GFP_ZERO); + if (!grp) + return ERR_PTR(-ENOMEM); + + grp->dev = get_device(dev); + grp->ndevices = ndevices; + grp->names_name = names_property_name; + + for (index = 0; index < grp->ndevices; index++) { + err = get_grouped_device(grp, dev, property_name, index); + if (err) { + devres_free(grp); + return ERR_PTR(err); + } + } + + devres_add(dev, grp); + return grp; +} +EXPORT_SYMBOL(camrtc_device_group_get); + +static inline struct platform_device *platform_device_get( + struct platform_device *pdev) +{ + if (pdev != NULL) + get_device(&pdev->dev); + return pdev; +} + +struct platform_device *camrtc_device_get_byname( + struct camrtc_device_group *grp, + const char *device_name) +{ + int index; + + if (grp == NULL) + return ERR_PTR(-EINVAL); + if (grp->names_name == NULL) + return ERR_PTR(-ENOENT); + + index = of_property_match_string(grp->dev->of_node, grp->names_name, + device_name); + if (index < 0) + return ERR_PTR(-ENODEV); + if (index >= grp->ndevices) + return ERR_PTR(-ENODEV); + + return platform_device_get(grp->devices[index]); +} +MODULE_LICENSE("GPL v2"); diff --git a/drivers/platform/tegra/rtcpu/device-group.h b/drivers/platform/tegra/rtcpu/device-group.h new file mode 100644 index 00000000..2f9216f1 --- /dev/null +++ b/drivers/platform/tegra/rtcpu/device-group.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + */ + +#ifndef INCLUDE_DEVICE_GROUP_H +#define INCLUDE_DEVICE_GROUP_H + +struct device; +struct platform_device; + +struct camrtc_device_group *camrtc_device_group_get( + struct device *dev, + const char *property_name, + const char *names_property_name); + +struct platform_device *camrtc_device_get_byname( + struct camrtc_device_group *grp, + const char *device_name); + +int camrtc_device_group_busy(const struct camrtc_device_group *grp); +void camrtc_device_group_idle(const struct camrtc_device_group *grp); +void camrtc_device_group_reset(const struct camrtc_device_group *grp); + +#endif /* INCLUDE_DEVICE_GROUP_H */ diff --git a/drivers/platform/tegra/rtcpu/hsp-combo.h b/drivers/platform/tegra/rtcpu/hsp-combo.h new file mode 100644 index 00000000..7c72da06 --- /dev/null +++ b/drivers/platform/tegra/rtcpu/hsp-combo.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + */ + +#ifndef INCLUDE_RTCPU_HSP_COMBO_H +#define INCLUDE_RTCPU_HSP_COMBO_H + +#include + +struct camrtc_hsp; +struct device; + +struct camrtc_hsp *camrtc_hsp_create( + struct device *dev, + void (*group_notify)(struct device *dev, u16 group), + long cmd_timeout); + +void camrtc_hsp_free(struct camrtc_hsp *camhsp); + +void camrtc_hsp_group_ring(struct camrtc_hsp *camhsp, + u16 group); + +int camrtc_hsp_sync(struct camrtc_hsp *camhsp); +int camrtc_hsp_resume(struct camrtc_hsp *camhsp); +int camrtc_hsp_suspend(struct camrtc_hsp *camhsp); +int camrtc_hsp_bye(struct camrtc_hsp *camhsp); +int camrtc_hsp_ch_setup(struct camrtc_hsp *camhsp, dma_addr_t iova); +int camrtc_hsp_ping(struct camrtc_hsp *camhsp, u32 data, long timeout); +int camrtc_hsp_get_fw_hash(struct camrtc_hsp *camhsp, + u8 hash[], size_t hash_size); + + +#endif /* INCLUDE_RTCPU_HSP_COMBO_H */ diff --git a/drivers/platform/tegra/rtcpu/hsp-mailbox-client.c b/drivers/platform/tegra/rtcpu/hsp-mailbox-client.c new file mode 100644 index 00000000..22bc90c7 --- /dev/null +++ b/drivers/platform/tegra/rtcpu/hsp-mailbox-client.c @@ -0,0 +1,644 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +#include "hsp-combo.h" + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "soc/tegra/camrtc-commands.h" + +typedef struct mbox_client mbox_client; +struct camrtc_hsp_mbox { + struct mbox_client client; + struct mbox_chan *chan; +}; + +struct camrtc_hsp_op; + +struct camrtc_hsp { + const struct camrtc_hsp_op *op; + struct camrtc_hsp_mbox rx; + struct camrtc_hsp_mbox tx; + u32 cookie; + spinlock_t sendlock; + void (*group_notify)(struct device *dev, u16 group); + struct device dev; + struct mutex mutex; + struct completion emptied; + wait_queue_head_t response_waitq; + atomic_t response; + long timeout; +}; + +struct camrtc_hsp_op { + int (*send)(struct camrtc_hsp *, int msg, long *timeout); + void (*group_ring)(struct camrtc_hsp *, u16 group); + int (*sync)(struct camrtc_hsp *, long *timeout); + int (*resume)(struct camrtc_hsp *, long *timeout); + int (*suspend)(struct camrtc_hsp *, long *timeout); + int (*bye)(struct camrtc_hsp *, long *timeout); + int (*ch_setup)(struct camrtc_hsp *, dma_addr_t iova, long *timeout); + int (*ping)(struct camrtc_hsp *, u32 data, long *timeout); + int (*get_fw_hash)(struct camrtc_hsp *, u32 index, long *timeout); +}; + +static int camrtc_hsp_send(struct camrtc_hsp *camhsp, + int request, long *timeout) +{ + int ret = camhsp->op->send(camhsp, request, timeout); + + if (ret == -ETIME) { + dev_err(&camhsp->dev, + "request 0x%08x: empty mailbox timeout\n", request); + } else if (ret == -EINVAL) { + dev_err(&camhsp->dev, + "request 0x%08x: invalid mbox channel\n", request); + } else if (ret == -ENOBUFS) { + dev_err(&camhsp->dev, + "request 0x%08x: no space left in mbox msg queue\n", request); + } else + dev_dbg(&camhsp->dev, + "request sent: 0x%08x\n", request); + + return ret; +} + +static int camrtc_hsp_recv(struct camrtc_hsp *camhsp, + int command, long *timeout) +{ + int response; + + *timeout = wait_event_timeout( + camhsp->response_waitq, + (response = atomic_xchg(&camhsp->response, -1)) >= 0, + *timeout); + if (*timeout <= 0) { + dev_err(&camhsp->dev, + "request 0x%08x: response timeout\n", command); + return -ETIMEDOUT; + } + + dev_dbg(&camhsp->dev, "request 0x%08x: response 0x%08x\n", + command, response); + + return response; +} + +static int camrtc_hsp_sendrecv(struct camrtc_hsp *camhsp, + int command, long *timeout) +{ + int response; + response = camrtc_hsp_send(camhsp, command, timeout); + if (response >= 0) + response = camrtc_hsp_recv(camhsp, command, timeout); + + return response; +} + +/* ---------------------------------------------------------------------- */ +/* Protocol nvidia,tegra-camrtc-hsp-vm */ + +static void camrtc_hsp_rx_full_notify(mbox_client *cl, void *data) +{ + struct camrtc_hsp *camhsp = dev_get_drvdata(cl->dev); + u32 status, group; + + u32 msg = (u32) (unsigned long) data; + status = CAMRTC_HSP_SS_FW_MASK; + status >>= CAMRTC_HSP_SS_FW_SHIFT; + group = status & CAMRTC_HSP_SS_IVC_MASK; + + if (CAMRTC_HSP_MSG_ID(msg) == CAMRTC_HSP_UNKNOWN) + dev_dbg(&camhsp->dev, "request message unknown 0x%08x\n", msg); + + if (group != 0) + camhsp->group_notify(camhsp->dev.parent, (u16)group); + + /* Other interrupt bits are ignored for now */ + + if (CAMRTC_HSP_MSG_ID(msg) == CAMRTC_HSP_IRQ) { + /* We are done here */ + } else if (CAMRTC_HSP_MSG_ID(msg) < CAMRTC_HSP_HELLO) { + /* Rest of the unidirectional messages are now ignored */ + dev_info(&camhsp->dev, "unknown message 0x%08x\n", msg); + } else { + atomic_set(&camhsp->response, msg); + wake_up(&camhsp->response_waitq); + } +} + +static void camrtc_hsp_tx_empty_notify(mbox_client *cl, void *data, int empty_value) +{ + struct camrtc_hsp *camhsp = dev_get_drvdata(cl->dev); + + (void)empty_value; /* ignored */ + + complete(&camhsp->emptied); +} + +static int camrtc_hsp_vm_send(struct camrtc_hsp *camhsp, + int request, long *timeout); +static void camrtc_hsp_vm_group_ring(struct camrtc_hsp *camhsp, u16 group); +static void camrtc_hsp_vm_send_irqmsg(struct camrtc_hsp *camhsp); +static int camrtc_hsp_vm_sync(struct camrtc_hsp *camhsp, long *timeout); +static int camrtc_hsp_vm_hello(struct camrtc_hsp *camhsp, long *timeout); +static int camrtc_hsp_vm_protocol(struct camrtc_hsp *camhsp, long *timeout); +static int camrtc_hsp_vm_resume(struct camrtc_hsp *camhsp, long *timeout); +static int camrtc_hsp_vm_suspend(struct camrtc_hsp *camhsp, long *timeout); +static int camrtc_hsp_vm_bye(struct camrtc_hsp *camhsp, long *timeout); +static int camrtc_hsp_vm_ch_setup(struct camrtc_hsp *camhsp, + dma_addr_t iova, long *timeout); +static int camrtc_hsp_vm_ping(struct camrtc_hsp *camhsp, + u32 data, long *timeout); +static int camrtc_hsp_vm_get_fw_hash(struct camrtc_hsp *camhsp, + u32 index, long *timeout); + +static const struct camrtc_hsp_op camrtc_hsp_vm_ops = { + .send = camrtc_hsp_vm_send, + .group_ring = camrtc_hsp_vm_group_ring, + .sync = camrtc_hsp_vm_sync, + .resume = camrtc_hsp_vm_resume, + .suspend = camrtc_hsp_vm_suspend, + .bye = camrtc_hsp_vm_bye, + .ping = camrtc_hsp_vm_ping, + .ch_setup = camrtc_hsp_vm_ch_setup, + .get_fw_hash = camrtc_hsp_vm_get_fw_hash, +}; + +static int camrtc_hsp_vm_send(struct camrtc_hsp *camhsp, + int request, long *timeout) +{ + int response; + unsigned long flags; + + spin_lock_irqsave(&camhsp->sendlock, flags); + atomic_set(&camhsp->response, -1); + response = mbox_send_message(camhsp->tx.chan, (void *)(unsigned long) request); + spin_unlock_irqrestore(&camhsp->sendlock, flags); + + return response; +} + +static void camrtc_hsp_vm_group_ring(struct camrtc_hsp *camhsp, + u16 group) +{ + camrtc_hsp_vm_send_irqmsg(camhsp); +} + +static void camrtc_hsp_vm_send_irqmsg(struct camrtc_hsp *camhsp) +{ + int irqmsg = CAMRTC_HSP_MSG(CAMRTC_HSP_IRQ, 1); + int response; + unsigned long flags; + + spin_lock_irqsave(&camhsp->sendlock, flags); + response = mbox_send_message(camhsp->tx.chan, (void *)(unsigned long) irqmsg); + spin_unlock_irqrestore(&camhsp->sendlock, flags); +} + +static int camrtc_hsp_vm_sendrecv(struct camrtc_hsp *camhsp, + int request, long *timeout) +{ + int response = camrtc_hsp_sendrecv(camhsp, request, timeout); + + if (response < 0) + return response; + + if (CAMRTC_HSP_MSG_ID(request) != CAMRTC_HSP_MSG_ID(response)) { + dev_err(&camhsp->dev, + "request 0x%08x mismatch with response 0x%08x\n", + request, response); + return -EIO; + } + + /* Return the 24-bit parameter only */ + return CAMRTC_HSP_MSG_PARAM(response); +} + +static int camrtc_hsp_vm_sync(struct camrtc_hsp *camhsp, long *timeout) +{ + int response = camrtc_hsp_vm_hello(camhsp, timeout); + + if (response >= 0) { + camhsp->cookie = response; + response = camrtc_hsp_vm_protocol(camhsp, timeout); + } + + return response; +} + +static u32 camrtc_hsp_vm_cookie(void) +{ + u32 value = CAMRTC_HSP_MSG_PARAM(sched_clock() >> 5U); + + if (value == 0) + value++; + + return value; +} + +static int camrtc_hsp_vm_hello(struct camrtc_hsp *camhsp, long *timeout) +{ + int request = CAMRTC_HSP_MSG(CAMRTC_HSP_HELLO, camrtc_hsp_vm_cookie()); + int response = camrtc_hsp_send(camhsp, request, timeout); + + if (response < 0) + return response; + + for (;;) { + response = camrtc_hsp_recv(camhsp, request, timeout); + + /* Wait until we get the HELLO message we sent */ + if (response == request) + break; + + /* ...or timeout */ + if (response < 0) + break; + } + + return response; +} + +static int camrtc_hsp_vm_protocol(struct camrtc_hsp *camhsp, long *timeout) +{ + int request = CAMRTC_HSP_MSG(CAMRTC_HSP_PROTOCOL, + RTCPU_DRIVER_SM6_VERSION); + + return camrtc_hsp_vm_sendrecv(camhsp, request, timeout); +} + +static int camrtc_hsp_vm_resume(struct camrtc_hsp *camhsp, long *timeout) +{ + int request = CAMRTC_HSP_MSG(CAMRTC_HSP_RESUME, camhsp->cookie); + + return camrtc_hsp_vm_sendrecv(camhsp, request, timeout); +} + +static int camrtc_hsp_vm_suspend(struct camrtc_hsp *camhsp, long *timeout) +{ + u32 request = CAMRTC_HSP_MSG(CAMRTC_HSP_SUSPEND, 0); + + return camrtc_hsp_vm_sendrecv(camhsp, request, timeout); +} + +static int camrtc_hsp_vm_bye(struct camrtc_hsp *camhsp, long *timeout) +{ + u32 request = CAMRTC_HSP_MSG(CAMRTC_HSP_BYE, 0); + + camhsp->cookie = 0U; + + return camrtc_hsp_vm_sendrecv(camhsp, request, timeout); +} + +static int camrtc_hsp_vm_ch_setup(struct camrtc_hsp *camhsp, + dma_addr_t iova, long *timeout) +{ + u32 request = CAMRTC_HSP_MSG(CAMRTC_HSP_CH_SETUP, iova >> 8); + + return camrtc_hsp_vm_sendrecv(camhsp, request, timeout); +} + +static int camrtc_hsp_vm_ping(struct camrtc_hsp *camhsp, u32 data, + long *timeout) +{ + u32 request = CAMRTC_HSP_MSG(CAMRTC_HSP_PING, data); + + return camrtc_hsp_vm_sendrecv(camhsp, request, timeout); +} + +static int camrtc_hsp_vm_get_fw_hash(struct camrtc_hsp *camhsp, u32 index, + long *timeout) +{ + u32 request = CAMRTC_HSP_MSG(CAMRTC_HSP_FW_HASH, index); + + return camrtc_hsp_vm_sendrecv(camhsp, request, timeout); +} + +static int camrtc_hsp_vm_probe(struct camrtc_hsp *camhsp) +{ + struct device_node *np = camhsp->dev.parent->of_node; + int err = -ENOTSUPP; + const char *obtain = ""; + + np = of_get_compatible_child(np, "nvidia,tegra-camrtc-hsp-vm"); + if (!of_device_is_available(np)) { + of_node_put(np); + dev_err(&camhsp->dev, "no hsp protocol \"%s\"\n", + "nvidia,tegra-camrtc-hsp-vm"); + return -ENOTSUPP; + } + + camhsp->dev.of_node = np; + + camhsp->rx.chan = mbox_request_channel_byname(&camhsp->rx.client, "vm-rx"); + if (IS_ERR(camhsp->rx.chan)) { + err = PTR_ERR(camhsp->rx.chan); + goto fail; + } + + camhsp->tx.chan = mbox_request_channel_byname(&camhsp->tx.client, "vm-tx"); + if (IS_ERR(camhsp->tx.chan)) { + err = PTR_ERR(camhsp->tx.chan); + goto fail; + } + + camhsp->op = &camrtc_hsp_vm_ops; + dev_set_name(&camhsp->dev, "%s:%s", + dev_name(camhsp->dev.parent), camhsp->dev.of_node->name); + dev_dbg(&camhsp->dev, "probed\n"); + + return 0; + +fail: + if (err != -EPROBE_DEFER) { + dev_err(&camhsp->dev, "%s: failed to obtain %s: %d\n", + np->name, obtain, err); + } + of_node_put(np); + return err; +} + +/* ---------------------------------------------------------------------- */ +/* Public interface */ + +void camrtc_hsp_group_ring(struct camrtc_hsp *camhsp, + u16 group) +{ + if (!WARN_ON(camhsp == NULL)) + camhsp->op->group_ring(camhsp, group); +} +EXPORT_SYMBOL(camrtc_hsp_group_ring); + +/* + * Synchronize the HSP + */ +int camrtc_hsp_sync(struct camrtc_hsp *camhsp) +{ + long timeout; + int response; + + if (WARN_ON(camhsp == NULL)) + return -EINVAL; + + timeout = camhsp->timeout; + mutex_lock(&camhsp->mutex); + response = camhsp->op->sync(camhsp, &timeout); + mutex_unlock(&camhsp->mutex); + + return response; +} +EXPORT_SYMBOL(camrtc_hsp_sync); + +/* + * Resume: resume the firmware + */ +int camrtc_hsp_resume(struct camrtc_hsp *camhsp) +{ + long timeout; + int response; + + if (WARN_ON(camhsp == NULL)) + return -EINVAL; + + timeout = camhsp->timeout; + mutex_lock(&camhsp->mutex); + response = camhsp->op->resume(camhsp, &timeout); + mutex_unlock(&camhsp->mutex); + + return response; +} +EXPORT_SYMBOL(camrtc_hsp_resume); + +/* + * Suspend: set firmware to idle. + */ +int camrtc_hsp_suspend(struct camrtc_hsp *camhsp) +{ + long timeout; + int response; + + if (WARN_ON(camhsp == NULL)) + return -EINVAL; + + timeout = camhsp->timeout; + mutex_lock(&camhsp->mutex); + response = camhsp->op->suspend(camhsp, &timeout); + mutex_unlock(&camhsp->mutex); + + if (response != 0) + dev_info(&camhsp->dev, "PM_SUSPEND failed: 0x%08x\n", + response); + + return response <= 0 ? response : -EIO; +} +EXPORT_SYMBOL(camrtc_hsp_suspend); + +/* + * Bye: tell firmware that VM mappings are going away + */ +int camrtc_hsp_bye(struct camrtc_hsp *camhsp) +{ + long timeout; + int response; + + if (WARN_ON(camhsp == NULL)) + return -EINVAL; + + timeout = camhsp->timeout; + mutex_lock(&camhsp->mutex); + response = camhsp->op->bye(camhsp, &timeout); + mutex_unlock(&camhsp->mutex); + + if (response != 0) + dev_warn(&camhsp->dev, "BYE failed: 0x%08x\n", response); + + return response; +} +EXPORT_SYMBOL(camrtc_hsp_bye); + +int camrtc_hsp_ch_setup(struct camrtc_hsp *camhsp, dma_addr_t iova) +{ + long timeout; + int response; + + if (WARN_ON(camhsp == NULL)) + return -EINVAL; + + if (iova >= BIT_ULL(32) || (iova & 0xffU) != 0) { + dev_warn(&camhsp->dev, + "CH_SETUP invalid iova: 0x%08llx\n", iova); + return -EINVAL; + } + + timeout = camhsp->timeout; + mutex_lock(&camhsp->mutex); + response = camhsp->op->ch_setup(camhsp, iova, &timeout); + mutex_unlock(&camhsp->mutex); + + if (response > 0) + dev_dbg(&camhsp->dev, "CH_SETUP failed: 0x%08x\n", response); + + return response; +} +EXPORT_SYMBOL(camrtc_hsp_ch_setup); + +int camrtc_hsp_ping(struct camrtc_hsp *camhsp, u32 data, long timeout) +{ + long left = timeout; + int response; + + if (WARN_ON(camhsp == NULL)) + return -EINVAL; + + if (left == 0L) + left = camhsp->timeout; + + mutex_lock(&camhsp->mutex); + response = camhsp->op->ping(camhsp, data, &left); + mutex_unlock(&camhsp->mutex); + + return response; +} +EXPORT_SYMBOL(camrtc_hsp_ping); + +int camrtc_hsp_get_fw_hash(struct camrtc_hsp *camhsp, + u8 hash[], size_t hash_size) +{ + int i; + int ret = 0; + long timeout; + + if (WARN_ON(camhsp == NULL)) + return -EINVAL; + + memset(hash, 0, hash_size); + timeout = camhsp->timeout; + mutex_lock(&camhsp->mutex); + + for (i = 0; i < hash_size; i++) { + int value = camhsp->op->get_fw_hash(camhsp, i, &timeout); + + if (value < 0 || value > 255) { + dev_info(&camhsp->dev, + "FW_HASH failed: 0x%08x\n", value); + ret = value < 0 ? value : -EIO; + goto fail; + } + + hash[i] = value; + } + +fail: + mutex_unlock(&camhsp->mutex); + + return ret; +} +EXPORT_SYMBOL(camrtc_hsp_get_fw_hash); + +static const struct device_type camrtc_hsp_combo_dev_type = { + .name = "camrtc-hsp-protocol", +}; + +static void camrtc_hsp_combo_dev_release(struct device *dev) +{ + struct camrtc_hsp *camhsp = container_of(dev, struct camrtc_hsp, dev); + + if (!IS_ERR_OR_NULL(camhsp->rx.chan)) + mbox_free_channel(camhsp->rx.chan); + if (!IS_ERR_OR_NULL(camhsp->tx.chan)) + mbox_free_channel(camhsp->tx.chan); + + of_node_put(dev->of_node); + kfree(camhsp); +} + +static int camrtc_hsp_probe(struct camrtc_hsp *camhsp) +{ + int ret; + + ret = camrtc_hsp_vm_probe(camhsp); + if (ret != -ENOTSUPP) + return ret; + + return -ENODEV; +} + +struct camrtc_hsp *camrtc_hsp_create( + struct device *dev, + void (*group_notify)(struct device *dev, u16 group), + long cmd_timeout) +{ + struct camrtc_hsp *camhsp; + int ret = -EINVAL; + + camhsp = kzalloc(sizeof(*camhsp), GFP_KERNEL); + if (camhsp == NULL) + return ERR_PTR(-ENOMEM); + + camhsp->dev.parent = dev; + camhsp->group_notify = group_notify; + camhsp->timeout = cmd_timeout; + mutex_init(&camhsp->mutex); + spin_lock_init(&camhsp->sendlock); + init_waitqueue_head(&camhsp->response_waitq); + init_completion(&camhsp->emptied); + atomic_set(&camhsp->response, -1); + + camhsp->dev.type = &camrtc_hsp_combo_dev_type; + camhsp->dev.release = camrtc_hsp_combo_dev_release; + device_initialize(&camhsp->dev); + + dev_set_name(&camhsp->dev, "%s:%s", dev_name(dev), "hsp"); + + pm_runtime_no_callbacks(&camhsp->dev); + pm_runtime_enable(&camhsp->dev); + + camhsp->tx.client.tx_block = false; + camhsp->rx.client.rx_callback = camrtc_hsp_rx_full_notify; + camhsp->tx.client.tx_done = camrtc_hsp_tx_empty_notify; + camhsp->rx.client.dev = camhsp->tx.client.dev = &(camhsp->dev); + + ret = camrtc_hsp_probe(camhsp); + if (ret < 0) + goto fail; + + ret = device_add(&camhsp->dev); + if (ret < 0) + goto fail; + + dev_set_drvdata(&camhsp->dev, camhsp); + + return camhsp; + +fail: + camrtc_hsp_free(camhsp); + return ERR_PTR(ret); +} +EXPORT_SYMBOL(camrtc_hsp_create); + +void camrtc_hsp_free(struct camrtc_hsp *camhsp) +{ + if (IS_ERR_OR_NULL(camhsp)) + return; + + pm_runtime_disable(&camhsp->dev); + + if (dev_get_drvdata(&camhsp->dev) != NULL) + device_unregister(&camhsp->dev); + else + put_device(&camhsp->dev); +} +EXPORT_SYMBOL(camrtc_hsp_free); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/platform/tegra/rtcpu/ivc-bus.c b/drivers/platform/tegra/rtcpu/ivc-bus.c new file mode 100644 index 00000000..594075ad --- /dev/null +++ b/drivers/platform/tegra/rtcpu/ivc-bus.c @@ -0,0 +1,623 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "soc/tegra/camrtc-channels.h" +#include "soc/tegra/camrtc-commands.h" + +#define NV(p) "nvidia," #p + +#define CAMRTC_IVC_CONFIG_SIZE 4096 + +struct tegra_ivc_region { + uintptr_t base; + size_t size; + dma_addr_t iova; + size_t config_size; + size_t ivc_size; +}; + +struct tegra_ivc_bus { + struct device dev; + struct tegra_ivc_channel *chans; + unsigned num_regions; + struct tegra_ivc_region regions[]; +}; + +static void tegra_ivc_channel_ring(struct ivc *ivc) +{ + struct tegra_ivc_channel *chan = + container_of(ivc, struct tegra_ivc_channel, ivc); + struct tegra_ivc_bus *bus = + container_of(chan->dev.parent, struct tegra_ivc_bus, dev); + + tegra_camrtc_ivc_ring(bus->dev.parent, chan->group); +} + +struct device_type tegra_ivc_channel_type = { + .name = "tegra-ivc-channel", +}; +EXPORT_SYMBOL(tegra_ivc_channel_type); + +int tegra_ivc_channel_runtime_get(struct tegra_ivc_channel *ch) +{ + BUG_ON(ch == NULL); + + return pm_runtime_get_sync(&ch->dev); +} +EXPORT_SYMBOL(tegra_ivc_channel_runtime_get); + +void tegra_ivc_channel_runtime_put(struct tegra_ivc_channel *ch) +{ + BUG_ON(ch == NULL); + + pm_runtime_put(&ch->dev); +} +EXPORT_SYMBOL(tegra_ivc_channel_runtime_put); + +static void tegra_ivc_channel_release(struct device *dev) +{ + struct tegra_ivc_channel *chan = + container_of(dev, struct tegra_ivc_channel, dev); + + of_node_put(dev->of_node); + kfree(chan); +} + +static struct tegra_ivc_channel *tegra_ivc_channel_create( + struct tegra_ivc_bus *bus, struct device_node *ch_node, + struct tegra_ivc_region *region) +{ + struct device *peer_device = bus->dev.parent; + struct camrtc_tlv_ivc_setup *tlv; + struct { + u32 rx; + u32 tx; + } start, end; + u32 version, channel_group, nframes, frame_size, queue_size; + const char *service; + int ret; + + struct tegra_ivc_channel *chan = kzalloc(sizeof(*chan), GFP_KERNEL); + if (unlikely(chan == NULL)) + return ERR_PTR(-ENOMEM); + + chan->dev.parent = &bus->dev; + chan->dev.type = &tegra_ivc_channel_type; + chan->dev.bus = &tegra_ivc_bus_type; + chan->dev.of_node = of_node_get(ch_node); + chan->dev.release = tegra_ivc_channel_release; + dev_set_name(&chan->dev, "%s:%s", dev_name(&bus->dev), + kbasename(ch_node->full_name)); + device_initialize(&chan->dev); + pm_runtime_no_callbacks(&chan->dev); + pm_runtime_enable(&chan->dev); + + ret = of_property_read_string(ch_node, NV(service), &service); + if (ret) { + dev_err(&chan->dev, "missing <%s> property\n", + NV(service)); + goto error; + } + + ret = of_property_read_u32(ch_node, NV(version), &version); + if (ret) + version = 0; + + ret = of_property_read_u32(ch_node, NV(group), &channel_group); + if (ret) { + dev_err(&chan->dev, "missing <%s> property\n", NV(group)); + goto error; + } + + /* We have 15 channel group bits available */ + if ((channel_group & 0x7FFFU) != channel_group) { + dev_err(&chan->dev, "invalid property %s = 0x%x\n", + NV(group), channel_group); + goto error; + } + + ret = of_property_read_u32(ch_node, NV(frame-count), &nframes); + if (ret || !nframes) { + dev_err(&chan->dev, "missing <%s> property\n", + NV(frame-count)); + goto error; + } + nframes = 1 << fls(nframes - 1); /* Round up to a power of two */ + + ret = of_property_read_u32(ch_node, NV(frame-size), &frame_size); + if (ret || !frame_size) { + dev_err(&chan->dev, "missing <%s> property\n", NV(frame-size)); + goto error; + } + + if (region->config_size + sizeof(*tlv) > CAMRTC_IVC_CONFIG_SIZE) { + dev_err(&chan->dev, "IVC config size exceeded\n"); + ret = -ENOSPC; + goto error; + } + + queue_size = tegra_ivc_total_queue_size(nframes * frame_size); + if (region->ivc_size + 2 * queue_size > region->size) { + dev_err(&chan->dev, "buffers exceed IVC region\n"); + ret = -ENOSPC; + goto error; + } + + start.rx = region->ivc_size; + region->ivc_size += queue_size; + end.rx = region->ivc_size; + + start.tx = end.rx; + region->ivc_size += queue_size; + end.tx = region->ivc_size; + + /* Init IVC */ + ret = tegra_ivc_init_with_dma_handle(&chan->ivc, + region->base + start.rx, region->iova + start.rx, + region->base + start.tx, region->iova + start.tx, + nframes, frame_size, + /* Device used to allocate the shared memory for IVC */ + peer_device, + tegra_ivc_channel_ring); + if (ret) { + dev_err(&chan->dev, "IVC initialization error: %d\n", ret); + goto error; + } + + chan->group = channel_group; + + tegra_ivc_channel_reset(&chan->ivc); + + /* Fill channel descriptor */ + tlv = (struct camrtc_tlv_ivc_setup *) + (region->base + region->config_size); + + tlv->tag = CAMRTC_TAG_IVC_SETUP; + tlv->len = sizeof(*tlv); + tlv->rx_iova = region->iova + start.rx; + tlv->rx_frame_size = frame_size; + tlv->rx_nframes = nframes; + tlv->tx_iova = region->iova + start.tx; + tlv->tx_frame_size = frame_size; + tlv->tx_nframes = nframes; + tlv->channel_group = channel_group; + tlv->ivc_version = version; + if (strscpy(tlv->ivc_service, service, sizeof(tlv->ivc_service)) < 0) + dev_warn(&chan->dev, "service name <%s> too long\n", service); + + region->config_size += sizeof(*tlv); + (++tlv)->tag = 0; /* terminator */ + + dev_info(&chan->dev, + "%s: ver=%u grp=%u RX[%ux%u]=0x%x-0x%x TX[%ux%u]=0x%x-0x%x\n", + ch_node->name, version, channel_group, + nframes, frame_size, start.rx, end.rx, + nframes, frame_size, start.tx, end.tx); + + ret = device_add(&chan->dev); + if (ret) { + dev_err(&chan->dev, "channel device error: %d\n", ret); + goto error; + } + + return chan; +error: + put_device(&chan->dev); + return ERR_PTR(ret); +} + +static void tegra_ivc_channel_notify(struct tegra_ivc_channel *chan) +{ + const struct tegra_ivc_channel_ops *ops; + + if (tegra_ivc_channel_notified(&chan->ivc) != 0) + return; + + if (!chan->is_ready) + return; + + rcu_read_lock(); + ops = rcu_dereference(chan->ops); + + if (ops != NULL && ops->notify != NULL) + ops->notify(chan); + rcu_read_unlock(); +} + +void tegra_ivc_bus_notify(struct tegra_ivc_bus *bus, u16 group) +{ + struct tegra_ivc_channel *chan; + + for (chan = bus->chans; chan != NULL; chan = chan->next) { + if ((chan->group & group) != 0) + tegra_ivc_channel_notify(chan); + } +} +EXPORT_SYMBOL(tegra_ivc_bus_notify); + +struct device_type tegra_ivc_bus_dev_type = { + .name = "tegra-ivc-bus", +}; +EXPORT_SYMBOL(tegra_ivc_bus_dev_type); + +static void tegra_ivc_bus_release(struct device *dev) +{ + struct tegra_ivc_bus *bus = + container_of(dev, struct tegra_ivc_bus, dev); + int i; + + of_node_put(dev->of_node); + + for (i = 0; i < bus->num_regions; i++) { + if (!bus->regions[i].base) + continue; + + dma_free_coherent(dev->parent, bus->regions[i].size, + (void *)bus->regions[i].base, + bus->regions[i].iova); + } + + kfree(bus); +} + +static int tegra_ivc_bus_match(struct device *dev, struct device_driver *drv) +{ + struct tegra_ivc_driver *ivcdrv = to_tegra_ivc_driver(drv); + + if (dev->type != ivcdrv->dev_type) + return 0; + return of_driver_match_device(dev, drv); +} + +static void tegra_ivc_bus_stop(struct tegra_ivc_bus *bus) +{ + while (bus->chans != NULL) { + struct tegra_ivc_channel *chan = bus->chans; + + bus->chans = chan->next; + pm_runtime_disable(&chan->dev); + device_unregister(&chan->dev); + } +} + +static int tegra_ivc_bus_start(struct tegra_ivc_bus *bus) +{ + struct device_node *dn = bus->dev.parent->of_node; + struct of_phandle_args reg_spec; + const char *status; + int i, ret; + + for (i = 0; + of_parse_phandle_with_fixed_args(dn, NV(ivc-channels), 3, + i, ®_spec) == 0; + i++) { + struct device_node *ch_node; + + for_each_child_of_node(reg_spec.np, ch_node) { + struct tegra_ivc_channel *chan; + + ret = of_property_read_string(ch_node, + "status", &status); + + if (ret == 0) { + ret = strcmp(status, "disabled"); + + if (ret == 0) + continue; + } + + chan = tegra_ivc_channel_create(bus, ch_node, + &bus->regions[i]); + if (IS_ERR(chan)) { + ret = PTR_ERR(chan); + of_node_put(ch_node); + goto error; + } + + chan->next = bus->chans; + bus->chans = chan; + } + } + + return 0; +error: + tegra_ivc_bus_stop(bus); + return ret; +} + +/* + * This is called during RTCPU boot to synchronize + * (or re-synchronize in the case of PM resume). + */ +int tegra_ivc_bus_boot_sync(struct tegra_ivc_bus *bus) +{ + int i; + + if (IS_ERR_OR_NULL(bus)) + return 0; + + for (i = 0; i < bus->num_regions; i++) { + int ret = tegra_camrtc_iovm_setup(bus->dev.parent, + bus->regions[i].iova); + if (ret != 0) { + dev_info(&bus->dev, "IOVM setup error: %d\n", ret); + return -EIO; + } + } + + return 0; +} +EXPORT_SYMBOL(tegra_ivc_bus_boot_sync); + +static int tegra_ivc_bus_probe(struct device *dev) +{ + int ret = -ENXIO; + + if (dev->type == &tegra_ivc_channel_type) { + struct tegra_ivc_driver *drv = to_tegra_ivc_driver(dev->driver); + struct tegra_ivc_channel *chan = to_tegra_ivc_channel(dev); + const struct tegra_ivc_channel_ops *ops = drv->ops.channel; + + mutex_init(&chan->ivc_wr_lock); + + BUG_ON(ops == NULL); + if (ops->probe != NULL) { + ret = ops->probe(chan); + if (ret) + return ret; + } + + rcu_assign_pointer(chan->ops, ops); + ret = 0; + + } + + return ret; +} + +static void tegra_ivc_bus_remove(struct device *dev) +{ + if (dev->type == &tegra_ivc_channel_type) { + struct tegra_ivc_driver *drv = to_tegra_ivc_driver(dev->driver); + struct tegra_ivc_channel *chan = to_tegra_ivc_channel(dev); + const struct tegra_ivc_channel_ops *ops = drv->ops.channel; + + WARN_ON(rcu_access_pointer(chan->ops) != ops); + RCU_INIT_POINTER(chan->ops, NULL); + synchronize_rcu(); + + if (ops->remove != NULL) + ops->remove(chan); + + } + + return; +} + +static int tegra_ivc_bus_ready_child(struct device *dev, void *data) +{ + struct tegra_ivc_driver *drv = to_tegra_ivc_driver(dev->driver); + bool is_ready = (data != NULL) ? *(bool *)data : true; + + if (dev->type == &tegra_ivc_channel_type) { + struct tegra_ivc_channel *chan = to_tegra_ivc_channel(dev); + const struct tegra_ivc_channel_ops *ops; + + chan->is_ready = is_ready; + if (!is_ready) + atomic_inc(&chan->bus_resets); + smp_wmb(); + + if (drv != NULL) { + rcu_read_lock(); + ops = rcu_dereference(chan->ops); + if (ops->ready != NULL) + ops->ready(chan, is_ready); + rcu_read_unlock(); + } else { + dev_warn(dev, "ivc channel driver missing\n"); + } + } + + return 0; +} + +struct bus_type tegra_ivc_bus_type = { + .name = "tegra-ivc-bus", + .match = tegra_ivc_bus_match, + .probe = tegra_ivc_bus_probe, + .remove = tegra_ivc_bus_remove, +}; +EXPORT_SYMBOL(tegra_ivc_bus_type); + +int tegra_ivc_driver_register(struct tegra_ivc_driver *drv) +{ + return driver_register(&drv->driver); +} +EXPORT_SYMBOL(tegra_ivc_driver_register); + +void tegra_ivc_driver_unregister(struct tegra_ivc_driver *drv) +{ + return driver_unregister(&drv->driver); +} +EXPORT_SYMBOL(tegra_ivc_driver_unregister); + +static int tegra_ivc_bus_parse_regions(struct tegra_ivc_bus *bus, + struct device_node *dev_node) +{ + struct of_phandle_args reg_spec; + int i; + + /* Parse out all regions in a node */ + for (i = 0; + of_parse_phandle_with_fixed_args(dev_node, NV(ivc-channels), 3, + i, ®_spec) == 0; + i++) { + struct device_node *ch_node; + struct tegra_ivc_region *region = &bus->regions[i]; + u32 nframes, frame_size, size = CAMRTC_IVC_CONFIG_SIZE; + int ret = -ENODEV; + + if (reg_spec.args_count < 3) { + of_node_put(reg_spec.np); + dev_err(&bus->dev, "invalid region specification\n"); + return -EINVAL; + } + + for_each_child_of_node(reg_spec.np, ch_node) { + ret = of_property_read_u32(ch_node, NV(frame-count), + &nframes); + if (ret || !nframes) { + dev_err(&bus->dev, "missing <%s> property\n", + NV(frame-count)); + break; + } + /* Round up to a power of two */ + nframes = 1 << fls(nframes - 1); + + ret = of_property_read_u32(ch_node, NV(frame-size), + &frame_size); + if (ret || !frame_size) { + dev_err(&bus->dev, "missing <%s> property\n", + NV(frame-size)); + break; + } + + size += 2 * tegra_ivc_total_queue_size(nframes * + frame_size); + } + of_node_put(reg_spec.np); + + if (ret) + return ret; + + region->base = + (uintptr_t)dma_alloc_coherent(bus->dev.parent, + size, ®ion->iova, + GFP_KERNEL | __GFP_ZERO); + if (!region->base) + return -ENOMEM; + + region->size = size; + region->config_size = 0; + region->ivc_size = CAMRTC_IVC_CONFIG_SIZE; + + dev_info(&bus->dev, "region %u: iova=0x%x-0x%x size=%u\n", + i, (u32)region->iova, (u32)region->iova + size - 1, + size); + } + + return 0; +} + +static unsigned tegra_ivc_bus_count_regions(const struct device_node *dev_node) +{ + unsigned i; + + for (i = 0; of_parse_phandle_with_fixed_args(dev_node, + NV(ivc-channels), 3, i, NULL) == 0; i++) + ; + + return i; +} + +struct tegra_ivc_bus *tegra_ivc_bus_create(struct device *dev) +{ + struct tegra_ivc_bus *bus; + unsigned num; + int ret; + + num = tegra_ivc_bus_count_regions(dev->of_node); + + bus = kzalloc(sizeof(*bus) + num * sizeof(*bus->regions), GFP_KERNEL); + if (unlikely(bus == NULL)) + return ERR_PTR(-ENOMEM); + + bus->num_regions = num; + bus->dev.parent = dev; + bus->dev.type = &tegra_ivc_bus_dev_type; + bus->dev.bus = &tegra_ivc_bus_type; + bus->dev.of_node = of_get_child_by_name(dev->of_node, "hsp"); + bus->dev.release = tegra_ivc_bus_release; + dev_set_name(&bus->dev, "%s:ivc-bus", dev_name(dev)); + device_initialize(&bus->dev); + pm_runtime_no_callbacks(&bus->dev); + pm_runtime_enable(&bus->dev); + + ret = tegra_ivc_bus_parse_regions(bus, dev->of_node); + if (ret) { + dev_err(&bus->dev, "IVC regions setup failed: %d\n", ret); + goto error; + } + + ret = device_add(&bus->dev); + if (ret) { + dev_err(&bus->dev, "IVC instance error: %d\n", ret); + goto error; + } + + ret = tegra_ivc_bus_start(bus); + if (ret) { + dev_err(&bus->dev, "bus start failed: %d\n", ret); + goto error; + } + + return bus; + +error: + put_device(&bus->dev); + return ERR_PTR(ret); +} +EXPORT_SYMBOL(tegra_ivc_bus_create); + +/* + * Communicate RTCPU UP/DOWN state to IVC devices. + */ +void tegra_ivc_bus_ready(struct tegra_ivc_bus *bus, bool online) +{ + if (IS_ERR_OR_NULL(bus)) + return; + + device_for_each_child(&bus->dev, &online, tegra_ivc_bus_ready_child); + + if (online) + tegra_ivc_bus_notify(bus, 0xFFFFU); +} +EXPORT_SYMBOL(tegra_ivc_bus_ready); + +void tegra_ivc_bus_destroy(struct tegra_ivc_bus *bus) +{ + if (IS_ERR_OR_NULL(bus)) + return; + + pm_runtime_disable(&bus->dev); + tegra_ivc_bus_stop(bus); + device_unregister(&bus->dev); +} +EXPORT_SYMBOL(tegra_ivc_bus_destroy); + +static __init int tegra_ivc_bus_init(void) +{ + return bus_register(&tegra_ivc_bus_type); +} + +static __exit void tegra_ivc_bus_exit(void) +{ + bus_unregister(&tegra_ivc_bus_type); +} + +module_init(tegra_ivc_bus_init); +module_exit(tegra_ivc_bus_exit); +MODULE_AUTHOR("Remi Denis-Courmont "); +MODULE_DESCRIPTION("NVIDIA Tegra IVC generic bus driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/platform/tegra/rtcpu/reset-group.c b/drivers/platform/tegra/rtcpu/reset-group.c new file mode 100644 index 00000000..776dad6b --- /dev/null +++ b/drivers/platform/tegra/rtcpu/reset-group.c @@ -0,0 +1,142 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +#include "reset-group.h" + +#include +#include +#include +#include +#include + +struct camrtc_reset_group { + struct device *device; + const char *group_name; + int nresets; + struct reset_control *resets[]; +}; + +static void camrtc_reset_group_release(struct device *dev, void *res) +{ + const struct camrtc_reset_group *grp = res; + int i; + + for (i = 0; i < grp->nresets; i++) { + if (grp->resets[i]) + reset_control_put(grp->resets[i]); + } +} + +struct camrtc_reset_group *camrtc_reset_group_get( + struct device *dev, + const char *group_name) +{ + struct camrtc_reset_group *grp; + struct device_node *np; + const char *group_property; + size_t group_name_len; + int index; + int ret; + + if (!dev || !dev->of_node) + return ERR_PTR(-EINVAL); + + np = dev->of_node; + + group_property = group_name ? group_name : "reset-names"; + group_name_len = group_name ? strlen(group_name) : 0; + + ret = of_property_count_strings(np, group_property); + if (ret < 0) + return ERR_PTR(-ENOENT); + + grp = devres_alloc(camrtc_reset_group_release, + offsetof(struct camrtc_reset_group, resets[ret]) + + group_name_len + 1, + GFP_KERNEL); + if (!grp) + return ERR_PTR(-ENOMEM); + + grp->nresets = ret; + grp->device = dev; + grp->group_name = (char *)&grp->resets[grp->nresets]; + memcpy((char *)grp->group_name, group_name, group_name_len); + + for (index = 0; index < grp->nresets; index++) { + char const *name; + struct reset_control *reset; + + ret = of_property_read_string_index(np, group_property, + index, &name); + if (ret < 0) + goto error; + + reset = of_reset_control_get(np, name); + if (IS_ERR(reset)) { + ret = PTR_ERR(reset); + goto error; + } + + grp->resets[index] = reset; + } + + devres_add(dev, grp); + return grp; + +error: + devres_free(grp); + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(camrtc_reset_group_get); + +static void camrtc_reset_group_error( + const struct camrtc_reset_group *grp, + char const *op, + int index, + int error) +{ + const char *name = "unnamed"; + + of_property_read_string_index(grp->device->of_node, + grp->group_name, index, &name); + dev_warn(grp->device, "%s reset %s (at %s[%d]): %d\n", + op, name, grp->group_name, index, error); +} + +void camrtc_reset_group_assert(const struct camrtc_reset_group *grp) +{ + int index, index0, err; + + if (IS_ERR_OR_NULL(grp)) + return; + + for (index = 1; index <= grp->nresets; index++) { + index0 = grp->nresets - index; + err = reset_control_assert(grp->resets[index0]); + if (err < 0) + camrtc_reset_group_error(grp, "assert", index0, err); + } +} +EXPORT_SYMBOL_GPL(camrtc_reset_group_assert); + +int camrtc_reset_group_deassert(const struct camrtc_reset_group *grp) +{ + int index, err; + + if (!grp) + return 0; + if (IS_ERR(grp)) + return -ENODEV; + + for (index = 0; index < grp->nresets; index++) { + err = reset_control_deassert(grp->resets[index]); + if (err < 0) { + camrtc_reset_group_error(grp, "deassert", index, err); + return err; + } + } + + return 0; +} +EXPORT_SYMBOL_GPL(camrtc_reset_group_deassert); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/platform/tegra/rtcpu/reset-group.h b/drivers/platform/tegra/rtcpu/reset-group.h new file mode 100644 index 00000000..62da9fd0 --- /dev/null +++ b/drivers/platform/tegra/rtcpu/reset-group.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + */ + +#ifndef INCLUDE_RESET_GROUP_H +#define INCLUDE_RESET_GROUP_H + +struct device; +struct camrtc_reset_group; + +struct camrtc_reset_group *camrtc_reset_group_get( + struct device *dev, + const char *group_name); + +void camrtc_reset_group_assert(const struct camrtc_reset_group *grp); +int camrtc_reset_group_deassert(const struct camrtc_reset_group *grp); + +#endif /* INCLUDE_RESET_GROUP_H */ diff --git a/drivers/platform/tegra/rtcpu/rtcpu-monitor.c b/drivers/platform/tegra/rtcpu/rtcpu-monitor.c new file mode 100644 index 00000000..66667a77 --- /dev/null +++ b/drivers/platform/tegra/rtcpu/rtcpu-monitor.c @@ -0,0 +1,115 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "drivers/video/tegra/host/vi/vi_notify.h" +#include "vi-notify.h" + +struct tegra_camrtc_mon { + struct device *rce_dev; + int wdt_irq; + struct work_struct wdt_work; +}; + +int tegra_camrtc_mon_restore_rtcpu(struct tegra_camrtc_mon *cam_rtcpu_mon) +{ + /* (Re)boot the rtcpu */ + /* rtcpu-down and rtcpu-up events are broadcast to all ivc channels */ + return tegra_camrtc_reboot(cam_rtcpu_mon->rce_dev); +} +EXPORT_SYMBOL(tegra_camrtc_mon_restore_rtcpu); + +static void tegra_camrtc_mon_wdt_worker(struct work_struct *work) +{ + struct tegra_camrtc_mon *cam_rtcpu_mon = container_of(work, + struct tegra_camrtc_mon, wdt_work); + + dev_info(cam_rtcpu_mon->rce_dev, + "Alert: Camera RTCPU gone bad! restoring it immediately!!\n"); + + tegra_camrtc_mon_restore_rtcpu(cam_rtcpu_mon); + + /* Enable WDT IRQ */ + enable_irq(cam_rtcpu_mon->wdt_irq); +} + +static irqreturn_t tegra_camrtc_mon_wdt_remote_isr(int irq, void *data) +{ + struct tegra_camrtc_mon *cam_rtcpu_mon = data; + + disable_irq_nosync(irq); + + schedule_work(&cam_rtcpu_mon->wdt_work); + + return IRQ_HANDLED; +} + +static int tegra_camrtc_mon_wdt_irq_setup( + struct tegra_camrtc_mon *cam_rtcpu_mon) +{ + struct platform_device *pdev = + to_platform_device(cam_rtcpu_mon->rce_dev); + int ret; + + cam_rtcpu_mon->wdt_irq = platform_get_irq_byname(pdev, "wdt-remote"); + if (cam_rtcpu_mon->wdt_irq < 0) { + dev_warn(&pdev->dev, "missing irq wdt-remote\n"); + return -ENODEV; + } + + ret = devm_request_threaded_irq(&pdev->dev, cam_rtcpu_mon->wdt_irq, + NULL, tegra_camrtc_mon_wdt_remote_isr, IRQF_ONESHOT, + dev_name(cam_rtcpu_mon->rce_dev), cam_rtcpu_mon); + if (ret) + return ret; + + dev_info(&pdev->dev, "using cam RTCPU IRQ (%d)\n", + cam_rtcpu_mon->wdt_irq); + + return 0; +} + +struct tegra_camrtc_mon *tegra_camrtc_mon_create(struct device *dev) +{ + struct tegra_camrtc_mon *cam_rtcpu_mon; + + cam_rtcpu_mon = devm_kzalloc(dev, sizeof(*cam_rtcpu_mon), GFP_KERNEL); + if (unlikely(cam_rtcpu_mon == NULL)) + return ERR_PTR(-ENOMEM); + + cam_rtcpu_mon->rce_dev = dev; + + /* Initialize wdt_work */ + INIT_WORK(&cam_rtcpu_mon->wdt_work, tegra_camrtc_mon_wdt_worker); + + tegra_camrtc_mon_wdt_irq_setup(cam_rtcpu_mon); + + dev_info(dev, "tegra_camrtc_mon_create is successful\n"); + + return cam_rtcpu_mon; +} +EXPORT_SYMBOL(tegra_camrtc_mon_create); + +int tegra_cam_rtcpu_mon_destroy(struct tegra_camrtc_mon *cam_rtcpu_mon) +{ + if (IS_ERR_OR_NULL(cam_rtcpu_mon)) + return -EINVAL; + + devm_kfree(cam_rtcpu_mon->rce_dev, cam_rtcpu_mon); + + return 0; +} +EXPORT_SYMBOL(tegra_cam_rtcpu_mon_destroy); + +MODULE_DESCRIPTION("CAMERA RTCPU monitor driver"); +MODULE_AUTHOR("Sudhir Vyas "); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/platform/tegra/rtcpu/tegra-rtcpu-trace.c b/drivers/platform/tegra/rtcpu/tegra-rtcpu-trace.c new file mode 100644 index 00000000..494ab920 --- /dev/null +++ b/drivers/platform/tegra/rtcpu/tegra-rtcpu-trace.c @@ -0,0 +1,1529 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +#include "soc/tegra/camrtc-trace.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_EVENTLIB +#include +#include +#include "rtcpu/device-group.h" +#endif + +#define CREATE_TRACE_POINTS +#include +#include +#include + +#define NV(p) "nvidia," #p + +#define WORK_INTERVAL_DEFAULT 100 +#define EXCEPTION_STR_LENGTH 2048 + +/* + * Private driver data structure + */ + +struct tegra_rtcpu_trace { + struct device *dev; + struct device_node *of_node; + struct mutex lock; + + /* memory */ + void *trace_memory; + u32 trace_memory_size; + dma_addr_t dma_handle; + + /* pointers to each block */ + void *exceptions_base; + struct camrtc_event_struct *events; + dma_addr_t dma_handle_pointers; + dma_addr_t dma_handle_exceptions; + dma_addr_t dma_handle_events; + + /* limit */ + u32 exception_entries; + u32 event_entries; + + /* exception pointer */ + u32 exception_last_idx; + + /* last pointer */ + u32 event_last_idx; + + /* worker */ + struct delayed_work work; + unsigned long work_interval_jiffies; + + /* statistics */ + u32 n_exceptions; + u64 n_events; + + /* copy of the latest exception and event */ + char last_exception_str[EXCEPTION_STR_LENGTH]; + struct camrtc_event_struct copy_last_event; + + /* debugfs */ + struct dentry *debugfs_root; + + /* eventlib */ + struct platform_device *vi_platform_device; + struct platform_device *vi1_platform_device; + struct platform_device *isp_platform_device; + + /* printk logging */ + const char *log_prefix; + bool enable_printk; + u32 printk_used; + char printk[EXCEPTION_STR_LENGTH]; +}; + +/* + * Trace memory + */ + +static int rtcpu_trace_setup_memory(struct tegra_rtcpu_trace *tracer) +{ + struct device *dev = tracer->dev; + struct of_phandle_args reg_spec; + int ret; + void *trace_memory; + size_t mem_size; + dma_addr_t dma_addr; + + ret = of_parse_phandle_with_fixed_args(dev->of_node, NV(trace), + 3, 0, ®_spec); + if (unlikely(ret != 0)) { + dev_err(dev, "Cannot find trace entry\n"); + return -EINVAL; + } + + mem_size = reg_spec.args[2]; + trace_memory = dma_alloc_coherent(dev, mem_size, &dma_addr, + GFP_KERNEL | __GFP_ZERO); + if (trace_memory == NULL) { + ret = -ENOMEM; + goto error; + } + + /* Save the information */ + tracer->trace_memory = trace_memory; + tracer->trace_memory_size = mem_size; + tracer->dma_handle = dma_addr; + tracer->of_node = reg_spec.np; + + return 0; + +error: + of_node_put(reg_spec.np); + return ret; +} + +static void rtcpu_trace_init_memory(struct tegra_rtcpu_trace *tracer) +{ + /* memory map */ + tracer->dma_handle_pointers = tracer->dma_handle + + offsetof(struct camrtc_trace_memory_header, exception_next_idx); + tracer->exceptions_base = tracer->trace_memory + + CAMRTC_TRACE_EXCEPTION_OFFSET; + tracer->exception_entries = 7; + tracer->dma_handle_exceptions = tracer->dma_handle + + CAMRTC_TRACE_EXCEPTION_OFFSET; + tracer->events = tracer->trace_memory + CAMRTC_TRACE_EVENT_OFFSET; + tracer->event_entries = + (tracer->trace_memory_size - CAMRTC_TRACE_EVENT_OFFSET) / + CAMRTC_TRACE_EVENT_SIZE; + tracer->dma_handle_events = tracer->dma_handle + + CAMRTC_TRACE_EXCEPTION_OFFSET; + + { + struct camrtc_trace_memory_header header = { + .signature[0] = CAMRTC_TRACE_SIGNATURE_1, + .signature[1] = CAMRTC_TRACE_SIGNATURE_2, + .revision = 1, + .exception_offset = CAMRTC_TRACE_EXCEPTION_OFFSET, + .exception_size = CAMRTC_TRACE_EXCEPTION_SIZE, + .exception_entries = tracer->exception_entries, + .event_offset = CAMRTC_TRACE_EVENT_OFFSET, + .event_size = CAMRTC_TRACE_EVENT_SIZE, + .event_entries = tracer->event_entries, + }; + + memcpy(tracer->trace_memory, &header, sizeof(header)); + + dma_sync_single_for_device(tracer->dev, + tracer->dma_handle, sizeof(header), + DMA_TO_DEVICE); + } +} + +/* + * Worker + */ + +static void rtcpu_trace_invalidate_entries(struct tegra_rtcpu_trace *tracer, + dma_addr_t dma_handle, u32 old_next, u32 new_next, + u32 entry_size, u32 entry_count) +{ + /* invalidate cache */ + if (new_next > old_next) { + dma_sync_single_for_cpu(tracer->dev, + dma_handle + old_next * entry_size, + (new_next - old_next) * entry_size, + DMA_FROM_DEVICE); + } else { + dma_sync_single_for_cpu(tracer->dev, + dma_handle + old_next * entry_size, + (entry_count - old_next) * entry_size, + DMA_FROM_DEVICE); + dma_sync_single_for_cpu(tracer->dev, + dma_handle, new_next * entry_size, + DMA_FROM_DEVICE); + } +} + +static void rtcpu_trace_exception(struct tegra_rtcpu_trace *tracer, + struct camrtc_trace_armv7_exception *exc) +{ + static const char * const s_str_exc_type[] = { + "Invalid (Reset)", + "Undefined instruction", + "Invalid (SWI)", + "Prefetch abort", + "Data abort", + "Invalid (Reserved)", + "IRQ", + "FIQ", + }; + + struct seq_buf sb; + unsigned int i, count; + char *buf = tracer->last_exception_str; + size_t buf_size = sizeof(tracer->last_exception_str); + char const header[] = + "###################### RTCPU EXCEPTION ######################"; + char const trailer[] = + "#############################################################"; + + seq_buf_init(&sb, buf, buf_size); + + seq_buf_printf(&sb, "%s %s\n", + tracer->log_prefix, + (exc->type < ARRAY_SIZE(s_str_exc_type)) ? + s_str_exc_type[exc->type] : "Unknown"); + + seq_buf_printf(&sb, + " R0: %08x R1: %08x R2: %08x R3: %08x\n", + exc->gpr.r0, exc->gpr.r1, exc->gpr.r2, exc->gpr.r3); + seq_buf_printf(&sb, + " R4: %08x R5: %08x R6: %08x R7: %08x\n", + exc->gpr.r4, exc->gpr.r5, exc->gpr.r6, exc->gpr.r7); + seq_buf_printf(&sb, + " R8: %08x R9: %08x R10: %08x R11: %08x\n", + exc->gpr.r8, exc->gpr.r9, exc->gpr.r10, exc->gpr.r11); + seq_buf_printf(&sb, + " R12: %08x SP: %08x LR: %08x PC: %08x\n", + exc->gpr.r12, exc->gpr.sp, exc->gpr.lr, exc->gpr.pc); + + if (exc->type == CAMRTC_ARMV7_EXCEPTION_FIQ) { + seq_buf_printf(&sb, + " R8: %08x R9: %08x R10: %08x R11: %08x, R12: %08x\n", + exc->gpr.r8_prev, exc->gpr.r9_prev, + exc->gpr.r10_prev, exc->gpr.r11_prev, + exc->gpr.r12_prev); + } + seq_buf_printf(&sb, " SP: %08x LR: %08x\n", + exc->gpr.sp_prev, exc->gpr.lr_prev); + + seq_buf_printf(&sb, " CPSR: %08x SPSR: %08x\n", + exc->cpsr, exc->spsr); + + seq_buf_printf(&sb, " DFSR: %08x DFAR: %08x ADFSR: %08x\n", + exc->dfsr, exc->dfar, exc->adfsr); + seq_buf_printf(&sb, " IFSR: %08x IFAR: %08x AIFSR: %08x\n", + exc->ifsr, exc->ifar, exc->aifsr); + + count = (exc->len - + offsetof(struct camrtc_trace_armv7_exception, callstack)) / + sizeof(struct camrtc_trace_callstack); + + if (count > 0) + seq_buf_printf(&sb, "Callstack\n"); + + for (i = 0; i < count; ++i) { + if (i >= CAMRTC_TRACE_CALLSTACK_MAX) + break; + seq_buf_printf(&sb, " [%08x]: %08x\n", + exc->callstack[i].lr_stack_addr, exc->callstack[i].lr); + } + + if (i < count) + seq_buf_printf(&sb, " ... [skipping %u entries]\n", count - i); + + printk(KERN_INFO "%s\n%s\n%s\n%s%s%s\n%s\n", + " ", " ", header, buf, trailer, " ", " "); +} + +static inline void rtcpu_trace_exceptions(struct tegra_rtcpu_trace *tracer) +{ + const struct camrtc_trace_memory_header *header = tracer->trace_memory; + union { + struct camrtc_trace_armv7_exception exc; + uint8_t mem[CAMRTC_TRACE_EXCEPTION_SIZE]; + } exc; + u32 old_next = tracer->exception_last_idx; + u32 new_next = header->exception_next_idx; + + if (old_next == new_next) + return; + + if (new_next >= tracer->exception_entries) { + WARN_ON_ONCE(new_next >= tracer->exception_entries); + dev_warn_ratelimited(tracer->dev, + "exception entry %u outside range 0..%u\n", + new_next, tracer->exception_entries - 1); + return; + } + + new_next = array_index_nospec(new_next, tracer->exception_entries); + + rtcpu_trace_invalidate_entries(tracer, + tracer->dma_handle_exceptions, + old_next, new_next, + CAMRTC_TRACE_EXCEPTION_SIZE, + tracer->exception_entries); + + while (old_next != new_next) { + void *emem; + old_next = array_index_nospec(old_next, tracer->exception_entries); + emem = tracer->exceptions_base + + CAMRTC_TRACE_EXCEPTION_SIZE * old_next; + memcpy(&exc.mem, emem, CAMRTC_TRACE_EXCEPTION_SIZE); + rtcpu_trace_exception(tracer, &exc.exc); + ++tracer->n_exceptions; + if (++old_next == tracer->exception_entries) + old_next = 0; + } + + tracer->exception_last_idx = new_next; +} + +static void rtcpu_trace_base_event(struct camrtc_event_struct *event) +{ + switch (event->header.id) { + case camrtc_trace_base_target_init: + trace_rtcpu_target_init(event->header.tstamp); + break; + case camrtc_trace_base_start_scheduler: + trace_rtcpu_start_scheduler(event->header.tstamp); + break; + default: + trace_rtcpu_unknown(event->header.tstamp, + event->header.id, + event->header.len - CAMRTC_TRACE_EVENT_HEADER_SIZE, + event->data.data8); + break; + } +} + +static void rtcpu_trace_rtos_event(struct camrtc_event_struct *event) +{ + switch (event->header.id) { + case camrtc_trace_rtos_task_switched_in: + trace_rtos_task_switched_in(event->header.tstamp); + break; + case camrtc_trace_rtos_increase_tick_count: + trace_rtos_increase_tick_count(event->header.tstamp, + event->data.data32[0]); + break; + case camrtc_trace_rtos_low_power_idle_begin: + trace_rtos_low_power_idle_begin(event->header.tstamp); + break; + case camrtc_trace_rtos_low_power_idle_end: + trace_rtos_low_power_idle_end(event->header.tstamp); + break; + case camrtc_trace_rtos_task_switched_out: + trace_rtos_task_switched_out(event->header.tstamp); + break; + case camrtc_trace_rtos_task_priority_inherit: + trace_rtos_task_priority_inherit(event->header.tstamp, + event->data.data32[0], + event->data.data32[1]); + break; + case camrtc_trace_rtos_task_priority_disinherit: + trace_rtos_task_priority_disinherit(event->header.tstamp, + event->data.data32[0], + event->data.data32[1]); + break; + case camrtc_trace_rtos_blocking_on_queue_receive: + trace_rtos_blocking_on_queue_receive(event->header.tstamp, + event->data.data32[0]); + break; + case camrtc_trace_rtos_blocking_on_queue_send: + trace_rtos_blocking_on_queue_send(event->header.tstamp, + event->data.data32[0]); + break; + case camrtc_trace_rtos_moved_task_to_ready_state: + trace_rtos_moved_task_to_ready_state(event->header.tstamp, + event->data.data32[0]); + break; + case camrtc_trace_rtos_queue_create: + trace_rtos_queue_create(event->header.tstamp, + event->data.data32[0]); + break; + case camrtc_trace_rtos_queue_create_failed: + trace_rtos_queue_create_failed(event->header.tstamp, + event->data.data32[0]); + break; + case camrtc_trace_rtos_create_mutex: + trace_rtos_create_mutex(event->header.tstamp, + event->data.data32[0]); + break; + case camrtc_trace_rtos_create_mutex_failed: + trace_rtos_create_mutex_failed(event->header.tstamp); + break; + case camrtc_trace_rtos_give_mutex_recursive: + trace_rtos_give_mutex_recursive(event->header.tstamp, + event->data.data32[0]); + break; + case camrtc_trace_rtos_give_mutex_recursive_failed: + trace_rtos_give_mutex_recursive_failed(event->header.tstamp, + event->data.data32[0]); + break; + case camrtc_trace_rtos_take_mutex_recursive: + trace_rtos_take_mutex_recursive(event->header.tstamp, + event->data.data32[0]); + break; + case camrtc_trace_rtos_take_mutex_recursive_failed: + trace_rtos_take_mutex_recursive_failed(event->header.tstamp, + event->data.data32[0]); + break; + case camrtc_trace_rtos_create_counting_semaphore: + trace_rtos_create_counting_semaphore(event->header.tstamp); + break; + case camrtc_trace_rtos_create_counting_semaphore_failed: + trace_rtos_create_counting_semaphore_failed( + event->header.tstamp); + break; + case camrtc_trace_rtos_queue_send: + trace_rtos_queue_send(event->header.tstamp, + event->data.data32[0]); + break; + case camrtc_trace_rtos_queue_send_failed: + trace_rtos_queue_send_failed(event->header.tstamp, + event->data.data32[0]); + break; + case camrtc_trace_rtos_queue_receive: + trace_rtos_queue_receive(event->header.tstamp, + event->data.data32[0]); + break; + case camrtc_trace_rtos_queue_peek: + trace_rtos_queue_peek(event->header.tstamp, + event->data.data32[0]); + break; + case camrtc_trace_rtos_queue_peek_from_isr: + trace_rtos_queue_peek_from_isr(event->header.tstamp, + event->data.data32[0]); + break; + case camrtc_trace_rtos_queue_receive_failed: + trace_rtos_queue_receive_failed(event->header.tstamp, + event->data.data32[0]); + break; + case camrtc_trace_rtos_queue_send_from_isr: + trace_rtos_queue_send_from_isr(event->header.tstamp, + event->data.data32[0]); + break; + case camrtc_trace_rtos_queue_send_from_isr_failed: + trace_rtos_queue_send_from_isr_failed(event->header.tstamp, + event->data.data32[0]); + break; + case camrtc_trace_rtos_queue_receive_from_isr: + trace_rtos_queue_receive_from_isr(event->header.tstamp, + event->data.data32[0]); + break; + case camrtc_trace_rtos_queue_receive_from_isr_failed: + trace_rtos_queue_receive_from_isr_failed(event->header.tstamp, + event->data.data32[0]); + break; + case camrtc_trace_rtos_queue_peek_from_isr_failed: + trace_rtos_queue_peek_from_isr_failed(event->header.tstamp, + event->data.data32[0]); + break; + case camrtc_trace_rtos_queue_delete: + trace_rtos_queue_delete(event->header.tstamp, + event->data.data32[0]); + break; + case camrtc_trace_rtos_task_create: + trace_rtos_task_create(event->header.tstamp, + event->data.data32[0]); + break; + case camrtc_trace_rtos_task_create_failed: + trace_rtos_task_create_failed(event->header.tstamp); + break; + case camrtc_trace_rtos_task_delete: + trace_rtos_task_delete(event->header.tstamp, + event->data.data32[0]); + break; + case camrtc_trace_rtos_task_delay_until: + trace_rtos_task_delay_until(event->header.tstamp); + break; + case camrtc_trace_rtos_task_delay: + trace_rtos_task_delay(event->header.tstamp); + break; + case camrtc_trace_rtos_task_priority_set: + trace_rtos_task_priority_set(event->header.tstamp, + event->data.data32[0], + event->data.data32[1]); + break; + case camrtc_trace_rtos_task_suspend: + trace_rtos_task_suspend(event->header.tstamp, + event->data.data32[0]); + break; + case camrtc_trace_rtos_task_resume: + trace_rtos_task_resume(event->header.tstamp, + event->data.data32[0]); + break; + case camrtc_trace_rtos_task_resume_from_isr: + trace_rtos_task_resume_from_isr(event->header.tstamp, + event->data.data32[0]); + break; + case camrtc_trace_rtos_task_increment_tick: + trace_rtos_task_increment_tick(event->header.tstamp, + event->data.data32[0]); + break; + case camrtc_trace_rtos_timer_create: + trace_rtos_timer_create(event->header.tstamp, + event->data.data32[0]); + break; + case camrtc_trace_rtos_timer_create_failed: + trace_rtos_timer_create_failed(event->header.tstamp); + break; + case camrtc_trace_rtos_timer_command_send: + trace_rtos_timer_command_send(event->header.tstamp, + event->data.data32[0], + event->data.data32[1], + event->data.data32[2], + event->data.data32[3]); + break; + case camrtc_trace_rtos_timer_expired: + trace_rtos_timer_expired(event->header.tstamp, + event->data.data32[0]); + break; + case camrtc_trace_rtos_timer_command_received: + trace_rtos_timer_command_received(event->header.tstamp, + event->data.data32[0], + event->data.data32[1], + event->data.data32[2]); + break; + case camrtc_trace_rtos_malloc: + trace_rtos_malloc(event->header.tstamp, + event->data.data32[0], + event->data.data32[1]); + break; + case camrtc_trace_rtos_free: + trace_rtos_free(event->header.tstamp, + event->data.data32[0], + event->data.data32[1]); + break; + case camrtc_trace_rtos_event_group_create: + trace_rtos_event_group_create(event->header.tstamp, + event->data.data32[0]); + break; + case camrtc_trace_rtos_event_group_create_failed: + trace_rtos_event_group_create_failed(event->header.tstamp); + break; + case camrtc_trace_rtos_event_group_sync_block: + trace_rtos_event_group_sync_block(event->header.tstamp, + event->data.data32[0], + event->data.data32[1], + event->data.data32[2]); + break; + case camrtc_trace_rtos_event_group_sync_end: + trace_rtos_event_group_sync_end(event->header.tstamp, + event->data.data32[0], + event->data.data32[1], + event->data.data32[2], + event->data.data32[3]); + break; + case camrtc_trace_rtos_event_group_wait_bits_block: + trace_rtos_event_group_wait_bits_block(event->header.tstamp, + event->data.data32[0], + event->data.data32[1]); + break; + case camrtc_trace_rtos_event_group_wait_bits_end: + trace_rtos_event_group_wait_bits_end(event->header.tstamp, + event->data.data32[0], + event->data.data32[1], + event->data.data32[2]); + break; + case camrtc_trace_rtos_event_group_clear_bits: + trace_rtos_event_group_clear_bits(event->header.tstamp, + event->data.data32[0], + event->data.data32[1]); + break; + case camrtc_trace_rtos_event_group_clear_bits_from_isr: + trace_rtos_event_group_clear_bits_from_isr(event->header.tstamp, + event->data.data32[0], + event->data.data32[1]); + break; + case camrtc_trace_rtos_event_group_set_bits: + trace_rtos_event_group_set_bits(event->header.tstamp, + event->data.data32[0], + event->data.data32[1]); + break; + case camrtc_trace_rtos_event_group_set_bits_from_isr: + trace_rtos_event_group_set_bits_from_isr(event->header.tstamp, + event->data.data32[0], + event->data.data32[1]); + break; + case camrtc_trace_rtos_event_group_delete: + trace_rtos_event_group_delete(event->header.tstamp, + event->data.data32[0]); + break; + case camrtc_trace_rtos_pend_func_call: + trace_rtos_pend_func_call(event->header.tstamp, + event->data.data32[0], + event->data.data32[1], + event->data.data32[2], + event->data.data32[3]); + break; + case camrtc_trace_rtos_pend_func_call_from_isr: + trace_rtos_pend_func_call_from_isr(event->header.tstamp, + event->data.data32[0], + event->data.data32[1], + event->data.data32[2], + event->data.data32[3]); + break; + case camrtc_trace_rtos_queue_registry_add: + trace_rtos_queue_registry_add(event->header.tstamp, + event->data.data32[0], + event->data.data32[1]); + break; + default: + trace_rtcpu_unknown(event->header.tstamp, + event->header.id, + event->header.len - CAMRTC_TRACE_EVENT_HEADER_SIZE, + event->data.data8); + break; + } +} + +static void rtcpu_trace_dbg_event(struct camrtc_event_struct *event) +{ + switch (event->header.id) { + case camrtc_trace_dbg_unknown: + trace_rtcpu_dbg_unknown(event->header.tstamp, + event->data.data32[0]); + break; + case camrtc_trace_dbg_enter: + trace_rtcpu_dbg_enter(event->header.tstamp, + event->data.data32[0]); + break; + case camrtc_trace_dbg_exit: + trace_rtcpu_dbg_exit(event->header.tstamp); + break; + case camrtc_trace_dbg_set_loglevel: + trace_rtcpu_dbg_set_loglevel(event->header.tstamp, + event->data.data32[0], + event->data.data32[1]); + break; + default: + trace_rtcpu_unknown(event->header.tstamp, + event->header.id, + event->header.len - CAMRTC_TRACE_EVENT_HEADER_SIZE, + event->data.data8); + break; + } +} + +const char * const g_trace_vinotify_tag_strs[] = { + "FS", "FE", + "CSIMUX_FRAME", "CSIMUX_STREAM", + "CHANSEL_PXL_SOF", "CHANSEL_PXL_EOF", + "CHANSEL_EMBED_SOF", "CHANSEL_EMBED_EOF", + "CHANSEL_NLINES", "CHANSEL_FAULT", + "CHANSEL_FAULT_FE", "CHANSEL_NOMATCH", + "CHANSEL_COLLISION", "CHANSEL_SHORT_FRAME", + "CHANSEL_LOAD_FRAMED", "ATOMP_PACKER_OVERFLOW", + "ATOMP_FS", "ATOMP_FE", + "ATOMP_FRAME_DONE", "ATOMP_EMB_DATA_DONE", + "ATOMP_FRAME_NLINES_DONE", "ATOMP_FRAME_TRUNCATED", + "ATOMP_FRAME_TOSSED", "ATOMP_PDAF_DATA_DONE", + "VIFALC_TDSTATE", "VIFALC_ACTIONLST", + "ISPBUF_FIFO_OVERFLOW", "ISPBUF_FS", + "ISPBUF_FE", "VGP0_DONE", + "VGP1_DONE", "FMLITE_DONE", +}; +const unsigned int g_trace_vinotify_tag_str_count = + ARRAY_SIZE(g_trace_vinotify_tag_strs); + +static void rtcpu_trace_vinotify_event(struct camrtc_event_struct *event) +{ + switch (event->header.id) { + case camrtc_trace_vinotify_event_ts64: + trace_rtcpu_vinotify_event_ts64(event->header.tstamp, + (event->data.data32[0] >> 1) & 0x7f, event->data.data32[0], + ((u64)event->data.data32[3] << 32) | event->data.data32[1], + event->data.data32[2]); + break; + case camrtc_trace_vinotify_event: + trace_rtcpu_vinotify_event(event->header.tstamp, + event->data.data32[0], event->data.data32[1], + event->data.data32[2], event->data.data32[3], + event->data.data32[4], event->data.data32[5], + event->data.data32[6]); + break; + case camrtc_trace_vinotify_error: + trace_rtcpu_vinotify_error(event->header.tstamp, + event->data.data32[0], event->data.data32[1], + event->data.data32[2], event->data.data32[3], + event->data.data32[4], event->data.data32[5], + event->data.data32[6]); + break; + default: + trace_rtcpu_unknown(event->header.tstamp, + event->header.id, + event->header.len - CAMRTC_TRACE_EVENT_HEADER_SIZE, + event->data.data8); + break; + } +} + +static void rtcpu_trace_vi_eventlib_event(struct tegra_rtcpu_trace *tracer, + struct camrtc_event_struct *event) +{ +#ifdef CONFIG_EVENTLIB + struct nvhost_device_data *pdata = NULL; + struct nvhost_task_begin task_begin; + struct nvhost_task_end task_end; + u64 ts = 0; + u32 vi_unit_id = event->data.data32[6]; + + if (tracer->vi_platform_device == NULL) + return; + + if (vi_unit_id == 0) + pdata = platform_get_drvdata(tracer->vi_platform_device); + else if (vi_unit_id == 1) + pdata = platform_get_drvdata(tracer->vi1_platform_device); + if (pdata == NULL) + return; + + if (!pdata->eventlib_id) { + pr_warn("%s kernel eventlib id %d cannot be found\n", + __func__, pdata->eventlib_id); + return; + } + + switch (event->header.id) { + case camrtc_trace_vi_frame_begin: + /* Write task start event */ + task_begin.syncpt_id = event->data.data32[0]; + task_begin.syncpt_thresh = event->data.data32[1]; + task_begin.class_id = pdata->class; + task_begin.channel_id = event->data.data32[2]; + + ts = ((u64)event->data.data32[5] << 32) | + (u64)event->data.data32[4]; + keventlib_write(pdata->eventlib_id, + &task_begin, + sizeof(task_begin), + NVHOST_TASK_BEGIN, + ts); + break; + case camrtc_trace_vi_frame_end: + /* Write task end event */ + task_end.syncpt_id = event->data.data32[0]; + task_end.syncpt_thresh = event->data.data32[1]; + task_end.class_id = pdata->class; + task_end.channel_id = event->data.data32[2]; + + ts = ((u64)event->data.data32[5] << 32) | + (u64)event->data.data32[4]; + keventlib_write(pdata->eventlib_id, + &task_end, + sizeof(task_end), + NVHOST_TASK_END, + ts); + break; + default: + pr_warn("%pFn event id %d cannot be found\n", + __func__, pdata->eventlib_id); + break; + } +#endif +} + +static void rtcpu_trace_vi_event(struct tegra_rtcpu_trace *tracer, + struct camrtc_event_struct *event) +{ + switch (event->header.id) { + case camrtc_trace_vi_frame_begin: + case camrtc_trace_vi_frame_end: + rtcpu_trace_vi_eventlib_event(tracer, event); + break; + default: + trace_rtcpu_unknown(event->header.tstamp, + event->header.id, + event->header.len - CAMRTC_TRACE_EVENT_HEADER_SIZE, + event->data.data8); + break; + } +} + +const char * const g_trace_isp_falcon_task_strs[] = { + "UNUSED", + "SCHED_ERROR", + "SCHED_HANDLE_STAT", + "SCHED_FINISH_TILE", + "SCHED_FINISH_SLICE", + "HANDLE_EVENT", + "INPUT_ACTION", + "ISR" +}; + +const unsigned int g_trace_isp_falcon_task_str_count = + ARRAY_SIZE(g_trace_isp_falcon_task_strs); + +#define TRACE_ISP_FALCON_EVENT_TS 13U +#define TRACE_ISP_FALCON_EVENT_TE 14U +#define TRACE_ISP_FALCON_PROFILE_START 16U +#define TRACE_ISP_FALCON_PROFILE_END 17U + +static void rtcpu_trace_isp_eventlib_event(struct tegra_rtcpu_trace *tracer, + struct camrtc_event_struct *event) +{ +#ifdef CONFIG_EVENTLIB + struct nvhost_device_data *pdata = NULL; + struct nvhost_task_begin task_begin; + struct nvhost_task_end task_end; + + if (tracer->isp_platform_device == NULL) + return; + + pdata = platform_get_drvdata(tracer->isp_platform_device); + if (pdata == NULL) + return; + + if (!pdata->eventlib_id) { + pr_warn("%s kernel eventlib id %d cannot be found\n", + __func__, pdata->eventlib_id); + return; + } + + switch (event->header.id) { + case camrtc_trace_isp_task_begin: + /* Write task start event */ + task_begin.syncpt_id = event->data.data32[0]; + task_begin.syncpt_thresh = event->data.data32[1]; + task_begin.class_id = pdata->class; + task_begin.channel_id = event->data.data32[2]; + + keventlib_write(pdata->eventlib_id, + &task_begin, + sizeof(task_begin), + NVHOST_TASK_BEGIN, + event->header.tstamp); + break; + case camrtc_trace_isp_task_end: + /* Write task end event */ + task_end.syncpt_id = event->data.data32[0]; + task_end.syncpt_thresh = event->data.data32[1]; + task_end.class_id = pdata->class; + task_end.channel_id = event->data.data32[2]; + + keventlib_write(pdata->eventlib_id, + &task_end, + sizeof(task_end), + NVHOST_TASK_END, + event->header.tstamp); + break; + } +#endif +} + +static void rtcpu_trace_isp_falcon_event(struct camrtc_event_struct *event) +{ + u8 ispfalcon_tag = (u8) ((event->data.data32[0] & 0xFF) >> 1U); + u8 ch = (u8) ((event->data.data32[0] & 0xFF00) >> 8U); + u8 seq = (u8) ((event->data.data32[0] & 0xFF0000) >> 16U); + u32 tstamp = event->data.data32[1]; + + switch (ispfalcon_tag) { + case TRACE_ISP_FALCON_EVENT_TS: + trace_rtcpu_isp_falcon_tile_start( + ch, seq, tstamp, + (u8) (event->data.data32[3] & 0xFF), + (u8) ((event->data.data32[3] & 0xFF00) >> 8U), + (u16) (event->data.data32[2] & 0xFFFF), + (u16) ((event->data.data32[2] & 0xFFFF0000) >> 16U)); + break; + case TRACE_ISP_FALCON_EVENT_TE: + trace_rtcpu_isp_falcon_tile_end( + ch, seq, tstamp, + (u8) (event->data.data32[3] & 0xFF), + (u8) ((event->data.data32[3] & 0xFF00) >> 8U)); + break; + case TRACE_ISP_FALCON_PROFILE_START: + trace_rtcpu_isp_falcon_task_start( + ch, tstamp, + event->data.data32[2]); + break; + case TRACE_ISP_FALCON_PROFILE_END: + trace_rtcpu_isp_falcon_task_end( + tstamp, + event->data.data32[2]); + break; + default: + trace_rtcpu_isp_falcon( + ispfalcon_tag, ch, seq, tstamp, + event->data.data32[2], + event->data.data32[3]); + break; + } + +} + +static void rtcpu_trace_isp_event(struct tegra_rtcpu_trace *tracer, + struct camrtc_event_struct *event) +{ + switch (event->header.id) { + case camrtc_trace_isp_task_begin: + case camrtc_trace_isp_task_end: + rtcpu_trace_isp_eventlib_event(tracer, event); + break; + case camrtc_trace_isp_falcon_traces_event: + rtcpu_trace_isp_falcon_event(event); + break; + default: + trace_rtcpu_unknown(event->header.tstamp, + event->header.id, + event->header.len - CAMRTC_TRACE_EVENT_HEADER_SIZE, + event->data.data8); + break; + } +} + +const char * const g_trace_nvcsi_intr_class_strs[] = { + "GLOBAL", + "CORRECTABLE_ERR", + "UNCORRECTABLE_ERR", +}; +const unsigned int g_trace_nvcsi_intr_class_str_count = + ARRAY_SIZE(g_trace_nvcsi_intr_class_strs); + +const char * const g_trace_nvcsi_intr_type_strs[] = { + "SW_DEBUG", + "HOST1X", + "PHY_INTR", "PHY_INTR0", "PHY_INTR1", + "STREAM_NOVC", "STREAM_VC", +}; +const unsigned int g_trace_nvcsi_intr_type_str_count = + ARRAY_SIZE(g_trace_nvcsi_intr_type_strs); + +static void rtcpu_trace_nvcsi_event(struct camrtc_event_struct *event) +{ + u64 ts_tsc = ((u64)event->data.data32[5] << 32) | + (u64)event->data.data32[4]; + + switch (event->header.id) { + case camrtc_trace_nvcsi_intr: + trace_rtcpu_nvcsi_intr(ts_tsc, + (event->data.data32[0] & 0xff), + (event->data.data32[1] & 0xff), + event->data.data32[2], + event->data.data32[3]); + break; + default: + trace_rtcpu_unknown(event->header.tstamp, + event->header.id, + event->header.len - CAMRTC_TRACE_EVENT_HEADER_SIZE, + event->data.data8); + break; + } +} + +struct capture_event_progress { + uint32_t channel_id; + uint32_t sequence; +}; + +struct capture_event_isp { + uint32_t channel_id; + uint32_t prog_sequence; + uint32_t cap_sequence; + uint8_t isp_settings_id; + uint8_t vi_channel_id; + uint8_t pad_[2]; +}; + +struct capture_event { + union { + struct capture_event_progress progress; + struct capture_event_isp isp; + bool suspend; + }; +}; + +static void rtcpu_trace_capture_event(struct camrtc_event_struct *event) +{ + const struct capture_event *ev = (const void *)&event->data; + + switch (event->header.id) { + case camrtc_trace_capture_event_sof: + trace_capture_event_sof(event->header.tstamp, + ev->progress.channel_id, ev->progress.sequence); + break; + case camrtc_trace_capture_event_eof: + trace_capture_event_eof(event->header.tstamp, + ev->progress.channel_id, ev->progress.sequence); + break; + case camrtc_trace_capture_event_error: + trace_capture_event_error(event->header.tstamp, + ev->progress.channel_id, ev->progress.sequence); + break; + case camrtc_trace_capture_event_reschedule: + trace_capture_event_reschedule(event->header.tstamp, + ev->progress.channel_id, ev->progress.sequence); + break; + case camrtc_trace_capture_event_reschedule_isp: + trace_capture_event_reschedule_isp(event->header.tstamp, + ev->isp.channel_id, ev->isp.cap_sequence, ev->isp.prog_sequence, + ev->isp.isp_settings_id, ev->isp.vi_channel_id); + break; + case camrtc_trace_capture_event_isp_done: + trace_capture_event_isp_done(event->header.tstamp, + ev->isp.channel_id, ev->isp.cap_sequence, ev->isp.prog_sequence, + ev->isp.isp_settings_id, ev->isp.vi_channel_id); + break; + case camrtc_trace_capture_event_isp_error: + trace_capture_event_isp_error(event->header.tstamp, + ev->isp.channel_id, ev->isp.cap_sequence, ev->isp.prog_sequence, + ev->isp.isp_settings_id, ev->isp.vi_channel_id); + break; + case camrtc_trace_capture_event_wdt: + trace_capture_event_wdt(event->header.tstamp); + break; + case camrtc_trace_capture_event_report_program: + trace_capture_event_report_program(event->header.tstamp, + ev->progress.channel_id, ev->progress.sequence); + break; + case camrtc_trace_capture_event_suspend: + trace_capture_event_suspend(event->header.tstamp, ev->suspend); + break; + case camrtc_trace_capture_event_suspend_isp: + trace_capture_event_suspend_isp(event->header.tstamp, ev->suspend); + break; + + case camrtc_trace_capture_event_inject: + case camrtc_trace_capture_event_sensor: + default: + trace_rtcpu_unknown(event->header.tstamp, + event->header.id, + event->header.len - CAMRTC_TRACE_EVENT_HEADER_SIZE, + event->data.data8); + break; + } +} + +static void rtcpu_trace_perf_event(struct camrtc_event_struct *event) +{ + const struct camrtc_trace_perf_counter_data *perf = (const void *)&event->data; + + switch (event->header.id) { + case camrtc_trace_perf_reset: + trace_rtcpu_perf_reset(event->header.tstamp, perf); + break; + case camrtc_trace_perf_counters: + trace_rtcpu_perf_counters(event->header.tstamp, perf); + break; + + default: + trace_rtcpu_unknown(event->header.tstamp, + event->header.id, + event->header.len - CAMRTC_TRACE_EVENT_HEADER_SIZE, + event->data.data8); + break; + } +} + +static void rtcpu_trace_array_event(struct tegra_rtcpu_trace *tracer, + struct camrtc_event_struct *event) +{ + switch (CAMRTC_EVENT_MODULE_FROM_ID(event->header.id)) { + case CAMRTC_EVENT_MODULE_BASE: + rtcpu_trace_base_event(event); + break; + case CAMRTC_EVENT_MODULE_RTOS: + rtcpu_trace_rtos_event(event); + break; + case CAMRTC_EVENT_MODULE_DBG: + rtcpu_trace_dbg_event(event); + break; + case CAMRTC_EVENT_MODULE_VINOTIFY: + rtcpu_trace_vinotify_event(event); + break; + case CAMRTC_EVENT_MODULE_I2C: + break; + case CAMRTC_EVENT_MODULE_VI: + rtcpu_trace_vi_event(tracer, event); + break; + case CAMRTC_EVENT_MODULE_ISP: + rtcpu_trace_isp_event(tracer, event); + break; + case CAMRTC_EVENT_MODULE_NVCSI: + rtcpu_trace_nvcsi_event(event); + break; + case CAMRTC_EVENT_MODULE_CAPTURE: + rtcpu_trace_capture_event(event); + break; + case CAMRTC_EVENT_MODULE_PERF: + rtcpu_trace_perf_event(event); + break; + default: + trace_rtcpu_unknown(event->header.tstamp, + event->header.id, + event->header.len - CAMRTC_TRACE_EVENT_HEADER_SIZE, + event->data.data8); + break; + } +} + +static void trace_rtcpu_log(struct tegra_rtcpu_trace *tracer, + struct camrtc_event_struct *event) +{ + size_t len, used; + + if (unlikely(event->header.id != camrtc_trace_type_string)) + return; + + len = event->header.len - CAMRTC_TRACE_EVENT_HEADER_SIZE; + + if (len > CAMRTC_TRACE_EVENT_PAYLOAD_SIZE) { + pr_err("%s: invalid trace event len (%zu)\n", __func__, len); + len = CAMRTC_TRACE_EVENT_PAYLOAD_SIZE; + } + + if (len == CAMRTC_TRACE_EVENT_PAYLOAD_SIZE) + /* Ignore NULs at the end of buffer */ + len = strnlen(event->data.data8, len); + + used = tracer->printk_used; + + if (unlikely(used + len > sizeof(tracer->printk))) { + /* Too long concatenated message, print it out now */ + pr_info("%s %.*s\n", tracer->log_prefix, + (int)used, tracer->printk); + used = 0; + } + + memcpy(tracer->printk + used, event->data.data8, len); + + used += len; + + if (likely(used > 0)) { + char end = tracer->printk[used - 1]; + + /* + * Some log entries from rtcpu consists of multiple + * messages. If the string does not end with \r or + * \n, do not print it now but rather wait for the + * next piece. + */ + if (end == '\r' || end == '\n') { + while (--used > 0) { + end = tracer->printk[used - 1]; + if (!(end == '\r' || end == '\n')) + break; + } + + pr_info("%s %.*s\n", tracer->log_prefix, + (int)used, tracer->printk); + used = 0; + } + } + + tracer->printk_used = used; +} + +static void rtcpu_trace_event(struct tegra_rtcpu_trace *tracer, + struct camrtc_event_struct *event) +{ + switch (CAMRTC_EVENT_TYPE_FROM_ID(event->header.id)) { + case CAMRTC_EVENT_TYPE_ARRAY: + rtcpu_trace_array_event(tracer, event); + break; + case CAMRTC_EVENT_TYPE_ARMV7_EXCEPTION: + trace_rtcpu_armv7_exception(event->header.tstamp, + event->data.data32[0]); + break; + case CAMRTC_EVENT_TYPE_PAD: + /* ignore */ + break; + case CAMRTC_EVENT_TYPE_START: + trace_rtcpu_start(event->header.tstamp); + break; + case CAMRTC_EVENT_TYPE_STRING: + trace_rtcpu_string(event->header.tstamp, + event->header.id, + event->header.len - CAMRTC_TRACE_EVENT_HEADER_SIZE, + (char *) event->data.data8); + if (likely(tracer->enable_printk)) + trace_rtcpu_log(tracer, event); + break; + case CAMRTC_EVENT_TYPE_BULK: + trace_rtcpu_bulk(event->header.tstamp, + event->header.id, + event->header.len - CAMRTC_TRACE_EVENT_HEADER_SIZE, + event->data.data8); + break; + default: + trace_rtcpu_unknown(event->header.tstamp, + event->header.id, + event->header.len - CAMRTC_TRACE_EVENT_HEADER_SIZE, + event->data.data8); + break; + } +} + +static inline void rtcpu_trace_events(struct tegra_rtcpu_trace *tracer) +{ + const struct camrtc_trace_memory_header *header = tracer->trace_memory; + u32 old_next = tracer->event_last_idx; + u32 new_next = header->event_next_idx; + struct camrtc_event_struct *event, *last_event; + + if (new_next >= tracer->event_entries) { + WARN_ON_ONCE(new_next >= tracer->event_entries); + dev_warn_ratelimited(tracer->dev, + "trace entry %u outside range 0..%u\n", + new_next, tracer->event_entries - 1); + return; + } + + new_next = array_index_nospec(new_next, tracer->event_entries); + + if (old_next == new_next) + return; + + rtcpu_trace_invalidate_entries(tracer, + tracer->dma_handle_events, + old_next, new_next, + CAMRTC_TRACE_EVENT_SIZE, + tracer->event_entries); + + /* pull events */ + while (old_next != new_next) { + old_next = array_index_nospec(old_next, tracer->event_entries); + event = &tracer->events[old_next]; + last_event = event; + rtcpu_trace_event(tracer, event); + tracer->n_events++; + + if (++old_next == tracer->event_entries) + old_next = 0; + } + + tracer->event_last_idx = new_next; + tracer->copy_last_event = *last_event; +} + +void tegra_rtcpu_trace_flush(struct tegra_rtcpu_trace *tracer) +{ + if (tracer == NULL) + return; + + mutex_lock(&tracer->lock); + + /* invalidate the cache line for the pointers */ + dma_sync_single_for_cpu(tracer->dev, tracer->dma_handle_pointers, + CAMRTC_TRACE_NEXT_IDX_SIZE, DMA_FROM_DEVICE); + + /* process exceptions and events */ + rtcpu_trace_exceptions(tracer); + rtcpu_trace_events(tracer); + + mutex_unlock(&tracer->lock); +} +EXPORT_SYMBOL(tegra_rtcpu_trace_flush); + +static void rtcpu_trace_worker(struct work_struct *work) +{ + struct tegra_rtcpu_trace *tracer; + + tracer = container_of(work, struct tegra_rtcpu_trace, work.work); + + tegra_rtcpu_trace_flush(tracer); + + /* reschedule */ + schedule_delayed_work(&tracer->work, tracer->work_interval_jiffies); +} + +/* + * Debugfs + */ + +#define DEFINE_SEQ_FOPS(_fops_, _show_) \ + static int _fops_ ## _open(struct inode *inode, struct file *file) \ + { \ + return single_open(file, _show_, inode->i_private); \ + } \ + static const struct file_operations _fops_ = { \ + .open = _fops_ ## _open, \ + .read = seq_read, \ + .llseek = seq_lseek, \ + .release = single_release } + +static int rtcpu_trace_debugfs_stats_read( + struct seq_file *file, void *data) +{ + struct tegra_rtcpu_trace *tracer = file->private; + + seq_printf(file, "Exceptions: %u\nEvents: %llu\n", + tracer->n_exceptions, tracer->n_events); + + return 0; +} + +DEFINE_SEQ_FOPS(rtcpu_trace_debugfs_stats, rtcpu_trace_debugfs_stats_read); + +static int rtcpu_trace_debugfs_last_exception_read( + struct seq_file *file, void *data) +{ + struct tegra_rtcpu_trace *tracer = file->private; + + seq_puts(file, tracer->last_exception_str); + + return 0; +} + +DEFINE_SEQ_FOPS(rtcpu_trace_debugfs_last_exception, + rtcpu_trace_debugfs_last_exception_read); + +static int rtcpu_trace_debugfs_last_event_read( + struct seq_file *file, void *data) +{ + struct tegra_rtcpu_trace *tracer = file->private; + struct camrtc_event_struct *event = &tracer->copy_last_event; + unsigned int i, payload_len; + + if (tracer->n_events == 0) + return 0; + + payload_len = event->header.len - CAMRTC_TRACE_EVENT_HEADER_SIZE; + + seq_printf(file, "Len: %u\nID: 0x%08x\nTimestamp: %llu\n", + event->header.len, event->header.id, event->header.tstamp); + + switch (CAMRTC_EVENT_TYPE_FROM_ID(event->header.id)) { + case CAMRTC_EVENT_TYPE_ARRAY: + for (i = 0; i < payload_len / 4; ++i) + seq_printf(file, "0x%08x ", event->data.data32[i]); + seq_puts(file, "\n"); + break; + case CAMRTC_EVENT_TYPE_ARMV7_EXCEPTION: + seq_puts(file, "Exception.\n"); + break; + case CAMRTC_EVENT_TYPE_PAD: + break; + case CAMRTC_EVENT_TYPE_START: + seq_puts(file, "Start.\n"); + break; + case CAMRTC_EVENT_TYPE_STRING: + seq_puts(file, (char *) event->data.data8); + break; + case CAMRTC_EVENT_TYPE_BULK: + for (i = 0; i < payload_len; ++i) + seq_printf(file, "0x%02x ", event->data.data8[i]); + seq_puts(file, "\n"); + break; + default: + seq_puts(file, "Unknown type.\n"); + break; + } + + return 0; +} + +DEFINE_SEQ_FOPS(rtcpu_trace_debugfs_last_event, + rtcpu_trace_debugfs_last_event_read); + +static void rtcpu_trace_debugfs_deinit(struct tegra_rtcpu_trace *tracer) +{ + debugfs_remove_recursive(tracer->debugfs_root); +} + +static void rtcpu_trace_debugfs_init(struct tegra_rtcpu_trace *tracer) +{ + struct dentry *entry; + + tracer->debugfs_root = debugfs_create_dir("tegra_rtcpu_trace", NULL); + if (IS_ERR_OR_NULL(tracer->debugfs_root)) + return; + + entry = debugfs_create_file("stats", S_IRUGO, + tracer->debugfs_root, tracer, &rtcpu_trace_debugfs_stats); + if (IS_ERR_OR_NULL(entry)) + goto failed_create; + + entry = debugfs_create_file("last_exception", S_IRUGO, + tracer->debugfs_root, tracer, &rtcpu_trace_debugfs_last_exception); + if (IS_ERR_OR_NULL(entry)) + goto failed_create; + + entry = debugfs_create_file("last_event", S_IRUGO, + tracer->debugfs_root, tracer, &rtcpu_trace_debugfs_last_event); + if (IS_ERR_OR_NULL(entry)) + goto failed_create; + + return; + +failed_create: + debugfs_remove_recursive(tracer->debugfs_root); +} + +/* + * Init/Cleanup + */ + +struct tegra_rtcpu_trace *tegra_rtcpu_trace_create(struct device *dev, + struct camrtc_device_group *camera_devices) +{ + struct tegra_rtcpu_trace *tracer; + u32 param; + int ret; + + tracer = kzalloc(sizeof(*tracer), GFP_KERNEL); + if (unlikely(tracer == NULL)) + return NULL; + + tracer->dev = dev; + mutex_init(&tracer->lock); + + /* Get the trace memory */ + ret = rtcpu_trace_setup_memory(tracer); + if (ret) { + dev_err(dev, "Trace memory setup failed: %d\n", ret); + kfree(tracer); + return NULL; + } + + /* Initialize the trace memory */ + rtcpu_trace_init_memory(tracer); + + /* Debugfs */ + rtcpu_trace_debugfs_init(tracer); + +#ifdef CONFIG_EVENTLIB + if (camera_devices != NULL) { + /* Eventlib */ + tracer->isp_platform_device = + camrtc_device_get_byname(camera_devices, "isp"); + if (IS_ERR(tracer->isp_platform_device)) { + dev_info(dev, "no camera-device \"%s\"\n", "isp"); + tracer->isp_platform_device = NULL; + } + tracer->vi_platform_device = + camrtc_device_get_byname(camera_devices, "vi0"); + if (IS_ERR(tracer->vi_platform_device)) { + dev_info(dev, "no camera-device \"%s\"\n", "vi0"); + tracer->vi_platform_device = NULL; + } + tracer->vi1_platform_device = + camrtc_device_get_byname(camera_devices, "vi1"); + if (IS_ERR(tracer->vi1_platform_device)) { + dev_info(dev, "no camera-device \"%s\"\n", "vi1"); + tracer->vi1_platform_device = NULL; + } + } +#endif + + /* Worker */ + param = WORK_INTERVAL_DEFAULT; + if (of_property_read_u32(tracer->of_node, NV(interval-ms), ¶m)) { + dev_err(dev, "interval-ms property not present\n"); + kfree(tracer); + return NULL; + } + + tracer->enable_printk = of_property_read_bool(tracer->of_node, + NV(enable-printk)); + + tracer->log_prefix = "[RTCPU]"; + if (of_property_read_string(tracer->of_node, NV(log-prefix), + &tracer->log_prefix)) { + dev_err(dev, "RTCPU property not present\n"); + kfree(tracer); + return NULL; + } + + INIT_DELAYED_WORK(&tracer->work, rtcpu_trace_worker); + tracer->work_interval_jiffies = msecs_to_jiffies(param); + + /* Done with initialization */ + schedule_delayed_work(&tracer->work, 0); + + dev_info(dev, "Trace buffer configured at IOVA=0x%08x\n", + (u32)tracer->dma_handle); + + return tracer; +} +EXPORT_SYMBOL(tegra_rtcpu_trace_create); + +int tegra_rtcpu_trace_boot_sync(struct tegra_rtcpu_trace *tracer) +{ + int ret; + + if (tracer == NULL) + return 0; + + ret = tegra_camrtc_iovm_setup(tracer->dev, tracer->dma_handle); + if (ret == 0) + return 0; + + dev_err(tracer->dev, "RTCPU trace: IOVM setup error: %d\n", ret); + + return -EIO; +} +EXPORT_SYMBOL(tegra_rtcpu_trace_boot_sync); + +void tegra_rtcpu_trace_destroy(struct tegra_rtcpu_trace *tracer) +{ + if (IS_ERR_OR_NULL(tracer)) + return; + platform_device_put(tracer->isp_platform_device); + platform_device_put(tracer->vi_platform_device); + platform_device_put(tracer->vi1_platform_device); + of_node_put(tracer->of_node); + cancel_delayed_work_sync(&tracer->work); + flush_delayed_work(&tracer->work); + rtcpu_trace_debugfs_deinit(tracer); + dma_free_coherent(tracer->dev, tracer->trace_memory_size, + tracer->trace_memory, tracer->dma_handle); + kfree(tracer); +} +EXPORT_SYMBOL(tegra_rtcpu_trace_destroy); + +MODULE_DESCRIPTION("NVIDIA Tegra RTCPU trace driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/platform/tegra/tegra-camera-rtcpu.c b/drivers/platform/tegra/tegra-camera-rtcpu.c new file mode 100644 index 00000000..bef36b48 --- /dev/null +++ b/drivers/platform/tegra/tegra-camera-rtcpu.c @@ -0,0 +1,1160 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +#include + +#include +#include +#include +#if IS_ENABLED(CONFIG_INTERCONNECT) +#include +#include +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if IS_ENABLED(CONFIG_TEGRA_BWMGR) +#include +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "rtcpu/clk-group.h" +#include "rtcpu/device-group.h" +#include "rtcpu/reset-group.h" +#include "rtcpu/hsp-combo.h" + +#include "soc/tegra/camrtc-commands.h" +#include + +#define CAMRTC_NUM_REGS 2 +#define CAMRTC_NUM_RESETS 2 + +struct tegra_cam_rtcpu_pdata { + const char *name; + void (*assert_resets)(struct device *); + int (*deassert_resets)(struct device *); + int (*wait_for_idle)(struct device *); + const char * const *reset_names; + const char * const *reg_names; +}; + +/* Register specifics */ +#define TEGRA_APS_FRSC_SC_CTL_0 0x0 +#define TEGRA_APS_FRSC_SC_MODEIN_0 0x14 +#define TEGRA_PM_R5_CTRL_0 0x40 +#define TEGRA_PM_PWR_STATUS_0 0x20 + +#define TEGRA_R5R_SC_DISABLE 0x5 +#define TEGRA_FN_MODEIN 0x29527 +#define TEGRA_PM_FWLOADDONE 0x2 +#define TEGRA_PM_WFIPIPESTOPPED 0x200000 + +#define AMISC_ADSP_STATUS 0x14 +#define AMISC_ADSP_L2_IDLE BIT(31) +#define AMISC_ADSP_L2_CLKSTOPPED BIT(30) + +static int tegra_sce_cam_wait_for_idle(struct device *dev); +static void tegra_sce_cam_assert_resets(struct device *dev); +static int tegra_sce_cam_deassert_resets(struct device *dev); + +static int tegra_rce_cam_wait_for_idle(struct device *dev); +static void tegra_rce_cam_assert_resets(struct device *dev); +static int tegra_rce_cam_deassert_resets(struct device *dev); + +static const char * const sce_reset_names[] = { + "nvidia,reset-group-1", + "nvidia,reset-group-2", + NULL, +}; + +static const char * const sce_reg_names[] = { + "sce-pm", + "sce-cfg", + NULL +}; + +static const struct tegra_cam_rtcpu_pdata sce_pdata = { + .name = "sce", + .wait_for_idle = tegra_sce_cam_wait_for_idle, + .assert_resets = tegra_sce_cam_assert_resets, + .deassert_resets = tegra_sce_cam_deassert_resets, + .reset_names = sce_reset_names, + .reg_names = sce_reg_names, +}; + +static const char * const rce_reset_names[] = { + "reset-names", /* all named resets */ + NULL, +}; + +/* SCE and RCE share the PM regs */ +static const char * const rce_reg_names[] = { + "rce-pm", + NULL, +}; + +static const struct tegra_cam_rtcpu_pdata rce_pdata = { + .name = "rce", + .wait_for_idle = tegra_rce_cam_wait_for_idle, + .assert_resets = tegra_rce_cam_assert_resets, + .deassert_resets = tegra_rce_cam_deassert_resets, + .reset_names = rce_reset_names, + .reg_names = rce_reg_names, +}; + +#define NV(p) "nvidia," #p + +struct tegra_cam_rtcpu { + const char *name; + struct tegra_ivc_bus *ivc; + struct device_dma_parameters dma_parms; + struct camrtc_hsp *hsp; + struct tegra_rtcpu_trace *tracer; + struct tegra_rtcpu_coverage *coverage; + u32 cmd_timeout; + u32 fw_version; + u8 fw_hash[RTCPU_FW_HASH_SIZE]; + struct { + u64 reset_complete; + u64 boot_handshake; + } stats; + union { + void __iomem *regs[CAMRTC_NUM_REGS]; + struct { + void __iomem *pm_base; + void __iomem *cfg_base; + }; + }; + struct camrtc_clk_group *clocks; + struct camrtc_reset_group *resets[CAMRTC_NUM_RESETS]; + const struct tegra_cam_rtcpu_pdata *pdata; + struct camrtc_device_group *camera_devices; +#if IS_ENABLED(CONFIG_INTERCONNECT) + struct icc_path *icc_path; + u32 mem_bw; +#endif +#if IS_ENABLED(CONFIG_TEGRA_BWMGR) + struct tegra_bwmgr_client *bwmgr; + unsigned long full_bw; +#endif + struct tegra_camrtc_mon *monitor; + u32 max_reboot_retry; + bool powered; + bool boot_sync_done; + bool fw_active; + bool online; +}; + +static void __iomem *tegra_cam_ioremap(struct device *dev, int index) +{ + struct resource mem; + int err = of_address_to_resource(dev->of_node, index, &mem); + if (err) + return IOMEM_ERR_PTR(err); + + /* NOTE: assumes size is large enough for caller */ + return devm_ioremap_resource(dev, &mem); +} + +static void __iomem *tegra_cam_ioremap_byname(struct device *dev, + const char *name) +{ + int index = of_property_match_string(dev->of_node, "reg-names", name); + if (index < 0) + return IOMEM_ERR_PTR(-ENOENT); + return tegra_cam_ioremap(dev, index); +} + +static int tegra_camrtc_get_resources(struct device *dev) +{ + struct tegra_cam_rtcpu *rtcpu = dev_get_drvdata(dev); + const struct tegra_cam_rtcpu_pdata *pdata = rtcpu->pdata; + struct camrtc_device_group *devgrp; + int i, err; + + rtcpu->clocks = camrtc_clk_group_get(dev); + if (IS_ERR(rtcpu->clocks)) { + err = PTR_ERR(rtcpu->clocks); + if (err == -EPROBE_DEFER) + dev_info(dev, "defer %s probe because of %s\n", + rtcpu->name, "clocks"); + else + dev_warn(dev, "clocks not available: %d\n", err); + return err; + } + + devgrp = camrtc_device_group_get(dev, "nvidia,camera-devices", + "nvidia,camera-device-names"); + if (!IS_ERR(devgrp)) { + rtcpu->camera_devices = devgrp; + } else { + err = PTR_ERR(devgrp); + if (err == -EPROBE_DEFER) + return err; + if (err != -ENODATA && err != -ENOENT) + dev_warn(dev, "get %s: failed: %d\n", + "nvidia,camera-devices", err); + } + +#define GET_RESOURCES(_res_, _get_, _null_, _toerr) \ + for (i = 0; i < ARRAY_SIZE(rtcpu->_res_##s); i++) { \ + if (!pdata->_res_##_names[i]) \ + break; \ + rtcpu->_res_##s[i] = _get_(dev, pdata->_res_##_names[i]); \ + err = _toerr(rtcpu->_res_##s[i]); \ + if (err == 0) \ + continue; \ + rtcpu->_res_##s[i] = _null_; \ + if (err == -EPROBE_DEFER) { \ + dev_info(dev, "defer %s probe because %s %s\n", \ + rtcpu->name, #_res_, pdata->_res_##_names[i]); \ + return err; \ + } \ + if (err != -ENODATA && err != -ENOENT) \ + dev_warn(dev, "%s %s not available: %d\n", #_res_, \ + pdata->_res_##_names[i], err); \ + } + +#define _PTR2ERR(x) (IS_ERR(x) ? PTR_ERR(x) : 0) + + GET_RESOURCES(reset, camrtc_reset_group_get, NULL, _PTR2ERR); + GET_RESOURCES(reg, tegra_cam_ioremap_byname, NULL, _PTR2ERR); + +#undef _PTR2ERR + + if (rtcpu->resets[0] == NULL) { + struct camrtc_reset_group *resets; + + resets = camrtc_reset_group_get(dev, NULL); + + if (!IS_ERR(resets)) + rtcpu->resets[0] = resets; + else if (PTR_ERR(resets) == -EPROBE_DEFER) { + dev_info(dev, "defer %s probe because of %s\n", + rtcpu->name, "resets"); + return -EPROBE_DEFER; + } + } + + return 0; +} + +static int tegra_camrtc_enable_clks(struct device *dev) +{ + struct tegra_cam_rtcpu *rtcpu = dev_get_drvdata(dev); + + return camrtc_clk_group_enable(rtcpu->clocks); +} + +static void tegra_camrtc_disable_clks(struct device *dev) +{ + struct tegra_cam_rtcpu *rtcpu = dev_get_drvdata(dev); + + return camrtc_clk_group_disable(rtcpu->clocks); +} + +static void tegra_camrtc_assert_resets(struct device *dev) +{ + struct tegra_cam_rtcpu *rtcpu = dev_get_drvdata(dev); + + if (rtcpu->pdata->assert_resets) + rtcpu->pdata->assert_resets(dev); +} + +static int tegra_camrtc_deassert_resets(struct device *dev) +{ + struct tegra_cam_rtcpu *rtcpu = dev_get_drvdata(dev); + int ret = 0; + + if (rtcpu->pdata->deassert_resets) { + ret = rtcpu->pdata->deassert_resets(dev); + rtcpu->stats.reset_complete = ktime_get_ns(); + rtcpu->stats.boot_handshake = 0; + } + + return ret; +} + +#define CAMRTC_MAX_BW (0xFFFFFFFFU) + +#if IS_ENABLED(CONFIG_INTERCONNECT) + +#define RCE_MAX_BW_MBPS (160) + +static void tegra_camrtc_init_icc(struct device *dev, u32 bw) +{ + struct tegra_cam_rtcpu *rtcpu = dev_get_drvdata(dev); + + if (bw == CAMRTC_MAX_BW) + rtcpu->mem_bw = MBps_to_icc(RCE_MAX_BW_MBPS); + else + rtcpu->mem_bw = bw; + + rtcpu->icc_path = icc_get(dev, TEGRA_ICC_RCE, TEGRA_ICC_PRIMARY); + + if (IS_ERR_OR_NULL(rtcpu->icc_path)) { + dev_warn(dev, "no interconnect control\n"); + rtcpu->icc_path = NULL; + return; + } + + dev_dbg(dev, "using icc rate %u for power-on\n", rtcpu->mem_bw); +} +#endif + +#if IS_ENABLED(CONFIG_TEGRA_BWMGR) +static void tegra_camrtc_init_bwmgr(struct device *dev, u32 bw) +{ + struct tegra_cam_rtcpu *rtcpu = dev_get_drvdata(dev); + + if (bw == CAMRTC_MAX_BW) + rtcpu->full_bw = tegra_bwmgr_get_max_emc_rate(); + else + rtcpu->full_bw = tegra_bwmgr_round_rate(bw); + + rtcpu->bwmgr = tegra_bwmgr_register(TEGRA_BWMGR_CLIENT_CAMRTC); + + if (IS_ERR_OR_NULL(rtcpu->bwmgr)) { + dev_warn(dev, "no memory bw manager\n"); + rtcpu->bwmgr = NULL; + return; + } + + dev_dbg(dev, "using emc rate %lu for power-on\n", rtcpu->full_bw); +} +#endif + +static void tegra_camrtc_init_membw(struct device *dev) +{ + u32 bw = CAMRTC_MAX_BW; + + if (of_property_read_u32(dev->of_node, "nvidia,memory-bw", &bw) != 0) { + ; + } else if (tegra_get_chip_id() == TEGRA234) { +#if IS_ENABLED(CONFIG_INTERCONNECT) + tegra_camrtc_init_icc(dev, bw); +#endif + } else { +#if IS_ENABLED(CONFIG_TEGRA_BWMGR) + tegra_camrtc_init_bwmgr(dev, bw); +#endif + } +} + +static void tegra_camrtc_full_mem_bw(struct device *dev) +{ + struct tegra_cam_rtcpu *rtcpu = dev_get_drvdata(dev); + +#if IS_ENABLED(CONFIG_INTERCONNECT) + if (rtcpu->icc_path != NULL) { + int ret = icc_set_bw(rtcpu->icc_path, 0, rtcpu->mem_bw); + + if (ret) + dev_err(dev, "set icc bw [%u] failed: %d\n", rtcpu->mem_bw, ret); + else + dev_dbg(dev, "requested icc bw %u\n", rtcpu->mem_bw); + } +#endif + +#if IS_ENABLED(CONFIG_TEGRA_BWMGR) + if (rtcpu->bwmgr != NULL) { + int ret = tegra_bwmgr_set_emc(rtcpu->bwmgr, rtcpu->full_bw, + TEGRA_BWMGR_SET_EMC_FLOOR); + if (ret < 0) + dev_info(dev, "emc request rate %lu failed, %d\n", + rtcpu->full_bw, ret); + else + dev_dbg(dev, "requested emc rate %lu\n", + rtcpu->full_bw); + } +#endif +} + +static void tegra_camrtc_slow_mem_bw(struct device *dev) +{ + struct tegra_cam_rtcpu *rtcpu = dev_get_drvdata(dev); + +#if IS_ENABLED(CONFIG_INTERCONNECT) + if (rtcpu->icc_path != NULL) + (void)icc_set_bw(rtcpu->icc_path, 0, 0); +#endif + +#if IS_ENABLED(CONFIG_TEGRA_BWMGR) + if (rtcpu->bwmgr != NULL) + (void)tegra_bwmgr_set_emc(rtcpu->bwmgr, 0, + TEGRA_BWMGR_SET_EMC_FLOOR); +#endif +} + +static void tegra_camrtc_set_fwloaddone(struct device *dev, bool fwloaddone) +{ + struct tegra_cam_rtcpu *rtcpu = dev_get_drvdata(dev); + + if (rtcpu->pm_base != NULL) { + u32 val = readl(rtcpu->pm_base + TEGRA_PM_R5_CTRL_0); + + if (fwloaddone) + val |= TEGRA_PM_FWLOADDONE; + else + val &= ~TEGRA_PM_FWLOADDONE; + + writel(val, rtcpu->pm_base + TEGRA_PM_R5_CTRL_0); + } +} + +static int tegra_sce_cam_deassert_resets(struct device *dev) +{ + struct tegra_cam_rtcpu *rtcpu = dev_get_drvdata(dev); + int err; + + err = camrtc_reset_group_deassert(rtcpu->resets[0]); + if (err) + return err; + + /* Configure R5 core */ + if (rtcpu->cfg_base != NULL) { + u32 val = readl(rtcpu->cfg_base + TEGRA_APS_FRSC_SC_CTL_0); + + if (val != TEGRA_R5R_SC_DISABLE) { + /* Disable R5R and smartcomp in camera mode */ + writel(TEGRA_R5R_SC_DISABLE, + rtcpu->cfg_base + TEGRA_APS_FRSC_SC_CTL_0); + + /* Enable JTAG/Coresight */ + writel(TEGRA_FN_MODEIN, + rtcpu->cfg_base + TEGRA_APS_FRSC_SC_MODEIN_0); + } + } + + /* Group 2 */ + err = camrtc_reset_group_deassert(rtcpu->resets[1]); + if (err) + return err; + + /* Group 3: nCPUHALT controlled by PM, not by CAR. */ + tegra_camrtc_set_fwloaddone(dev, true); + + return 0; +} + +static void tegra_sce_cam_assert_resets(struct device *dev) +{ + struct tegra_cam_rtcpu *rtcpu = dev_get_drvdata(dev); + + tegra_camrtc_set_fwloaddone(dev, false); + + camrtc_reset_group_assert(rtcpu->resets[1]); + camrtc_reset_group_assert(rtcpu->resets[0]); +} + +static int tegra_sce_cam_wait_for_idle(struct device *dev) +{ + struct tegra_cam_rtcpu *rtcpu = dev_get_drvdata(dev); + long timeout = rtcpu->cmd_timeout; + long delay_stride = HZ / 50; + + if (rtcpu->pm_base == NULL) + return 0; + + /* Poll for WFI assert.*/ + for (;;) { + u32 val = readl(rtcpu->pm_base + TEGRA_PM_PWR_STATUS_0); + + if ((val & TEGRA_PM_WFIPIPESTOPPED) == 0) + break; + + if (timeout < 0) { + dev_warn(dev, "timeout waiting for WFI\n"); + return -EBUSY; + } + + msleep(delay_stride); + timeout -= delay_stride; + } + + return 0; +} + +static int tegra_rce_cam_wait_for_idle(struct device *dev) +{ + struct tegra_cam_rtcpu *rtcpu = dev_get_drvdata(dev); + long timeout = rtcpu->cmd_timeout; + long delay_stride = HZ / 50; + + if (rtcpu->pm_base == NULL) + return 0; + + /* Poll for WFI assert.*/ + for (;;) { + u32 val = readl(rtcpu->pm_base + TEGRA_PM_PWR_STATUS_0); + + if ((val & TEGRA_PM_WFIPIPESTOPPED) == 0) + break; + + if (timeout < 0) { + dev_info(dev, "timeout waiting for WFI\n"); + return -EBUSY; + } + + msleep(delay_stride); + timeout -= delay_stride; + } + + return 0; +} + +static int tegra_rce_cam_deassert_resets(struct device *dev) +{ + struct tegra_cam_rtcpu *rtcpu = dev_get_drvdata(dev); + int err; + + err = camrtc_reset_group_deassert(rtcpu->resets[0]); + if (err) + return err; + + /* nCPUHALT is a reset controlled by PM, not by CAR. */ + tegra_camrtc_set_fwloaddone(dev, true); + + return 0; +} + +static void tegra_rce_cam_assert_resets(struct device *dev) +{ + struct tegra_cam_rtcpu *rtcpu = dev_get_drvdata(dev); + + camrtc_reset_group_assert(rtcpu->resets[0]); +} + +static int tegra_camrtc_wait_for_idle(struct device *dev) +{ + struct tegra_cam_rtcpu *rtcpu = dev_get_drvdata(dev); + + return rtcpu->pdata->wait_for_idle(dev); +} + +static int tegra_camrtc_fw_suspend(struct device *dev) +{ + struct tegra_cam_rtcpu *rtcpu = dev_get_drvdata(dev); + + if (!rtcpu->fw_active || !rtcpu->hsp) + return 0; + + rtcpu->fw_active = false; + + return camrtc_hsp_suspend(rtcpu->hsp); +} + +static int tegra_camrtc_setup_shared_memory(struct device *dev) +{ + struct tegra_cam_rtcpu *rtcpu = dev_get_drvdata(dev); + int ret; + + /* + * Set-up trace + */ + ret = tegra_rtcpu_trace_boot_sync(rtcpu->tracer); + if (ret < 0) + dev_err(dev, "trace boot sync failed: %d\n", ret); + + /* + * Set-up coverage buffer + */ + ret = tegra_rtcpu_coverage_boot_sync(rtcpu->coverage); + if (ret < 0) { + /* + * Not a fatal error, don't stop the sync. + * But go ahead and remove the coverage debug FS + * entries and release the memory. + */ + tegra_rtcpu_coverage_destroy(rtcpu->coverage); + rtcpu->coverage = NULL; + } + + /* + * Set-up and activate the IVC services in firmware + */ + ret = tegra_ivc_bus_boot_sync(rtcpu->ivc); + if (ret < 0) + dev_err(dev, "ivc-bus boot sync failed: %d\n", ret); + + return ret; +} + +static void tegra_camrtc_set_online(struct device *dev, bool online) +{ + struct tegra_cam_rtcpu *rtcpu = dev_get_drvdata(dev); + + if (online == rtcpu->online) + return; + + if (online) { + if (tegra_camrtc_setup_shared_memory(dev) < 0) + return; + } + + /* Postpone the online transition if still probing */ + if (!IS_ERR_OR_NULL(rtcpu->ivc)) { + rtcpu->online = online; + tegra_ivc_bus_ready(rtcpu->ivc, online); + } +} + +int tegra_camrtc_ping(struct device *dev, u32 data, long timeout) +{ + struct tegra_cam_rtcpu *rtcpu = dev_get_drvdata(dev); + + return camrtc_hsp_ping(rtcpu->hsp, data, timeout); +} +EXPORT_SYMBOL(tegra_camrtc_ping); + +static void tegra_camrtc_ivc_notify(struct device *dev, u16 group) +{ + struct tegra_cam_rtcpu *rtcpu = dev_get_drvdata(dev); + + if (rtcpu->ivc) + tegra_ivc_bus_notify(rtcpu->ivc, group); +} + +void tegra_camrtc_ivc_ring(struct device *dev, u16 group) +{ + struct tegra_cam_rtcpu *rtcpu = dev_get_drvdata(dev); + + camrtc_hsp_group_ring(rtcpu->hsp, group); +} +EXPORT_SYMBOL(tegra_camrtc_ivc_ring); + +static int tegra_camrtc_poweron(struct device *dev, bool full_speed) +{ + struct tegra_cam_rtcpu *rtcpu = dev_get_drvdata(dev); + int ret; + + if (rtcpu->powered) { + if (full_speed) + camrtc_clk_group_adjust_fast(rtcpu->clocks); + return 0; + } + + /* Power on and let core run */ + ret = tegra_camrtc_enable_clks(dev); + if (ret) { + dev_err(dev, "failed to turn on %s clocks: %d\n", + rtcpu->name, ret); + return ret; + } + + if (full_speed) + camrtc_clk_group_adjust_fast(rtcpu->clocks); + + ret = tegra_camrtc_deassert_resets(dev); + if (ret) + return ret; + + rtcpu->powered = true; + + return 0; +} + +static void tegra_camrtc_poweroff(struct device *dev) +{ + struct tegra_cam_rtcpu *rtcpu = dev_get_drvdata(dev); + + if (!rtcpu->powered) + return; + + rtcpu->powered = false; + + tegra_camrtc_assert_resets(dev); + tegra_camrtc_disable_clks(dev); +} + +static int tegra_camrtc_boot_sync(struct device *dev) +{ + struct tegra_cam_rtcpu *rtcpu = dev_get_drvdata(dev); + int ret; + + if (!rtcpu->boot_sync_done) { + ret = camrtc_hsp_sync(rtcpu->hsp); + if (ret < 0) + return ret; + + rtcpu->fw_version = ret; + rtcpu->boot_sync_done = true; + } + + if (!rtcpu->fw_active) { + ret = camrtc_hsp_resume(rtcpu->hsp); + if (ret < 0) + return ret; + + rtcpu->fw_active = true; + } + + return 0; +} + +/* + * RTCPU boot sequence + */ +static int tegra_camrtc_boot(struct device *dev) +{ + struct tegra_cam_rtcpu *rtcpu = dev_get_drvdata(dev); + int retry = 0, max_retries = rtcpu->max_reboot_retry; + int ret; + + ret = tegra_camrtc_poweron(dev, true); + if (ret) + return ret; + + tegra_camrtc_full_mem_bw(dev); + + for (;;) { + ret = tegra_camrtc_boot_sync(dev); + + tegra_camrtc_set_online(dev, ret == 0); + + if (ret == 0) + break; + if (retry++ == max_retries) + break; + if (retry > 1) { + dev_warn(dev, "%s full reset, retry %u/%u\n", + rtcpu->name, retry, max_retries); + tegra_camrtc_assert_resets(dev); + usleep_range(10, 30); + tegra_camrtc_deassert_resets(dev); + } + } + + tegra_camrtc_slow_mem_bw(dev); + + return 0; +} + +int tegra_camrtc_iovm_setup(struct device *dev, dma_addr_t iova) +{ + struct tegra_cam_rtcpu *rtcpu = dev_get_drvdata(dev); + + return camrtc_hsp_ch_setup(rtcpu->hsp, iova); +} +EXPORT_SYMBOL(tegra_camrtc_iovm_setup); + +ssize_t tegra_camrtc_print_version(struct device *dev, + char *buf, size_t size) +{ + struct tegra_cam_rtcpu *rtcpu = dev_get_drvdata(dev); + struct seq_buf s; + int i; + + seq_buf_init(&s, buf, size); + seq_buf_printf(&s, "version cpu=%s cmd=%u sha1=", + rtcpu->name, rtcpu->fw_version); + + for (i = 0; i < RTCPU_FW_HASH_SIZE; i++) + seq_buf_printf(&s, "%02x", rtcpu->fw_hash[i]); + + return seq_buf_used(&s); +} +EXPORT_SYMBOL(tegra_camrtc_print_version); + +static void tegra_camrtc_log_fw_version(struct device *dev) +{ + char version[TEGRA_CAMRTC_VERSION_LEN]; + + tegra_camrtc_print_version(dev, version, sizeof(version)); + + dev_info(dev, "firmware %s\n", version); +} + +static int tegra_cam_rtcpu_runtime_suspend(struct device *dev) +{ + struct tegra_cam_rtcpu *rtcpu = dev_get_drvdata(dev); + int err; + + err = tegra_camrtc_fw_suspend(dev); + /* Try full reset if an error occurred while suspending core. */ + if (err < 0) { + + dev_info(dev, "RTCPU suspend failed, resetting it"); + + /* runtime_resume() powers RTCPU back on */ + tegra_camrtc_poweroff(dev); + + /* We want to boot sync IVC and trace when resuming */ + tegra_camrtc_set_online(dev, false); + } + + camrtc_clk_group_adjust_slow(rtcpu->clocks); + + return 0; +} + +static int tegra_cam_rtcpu_runtime_resume(struct device *dev) +{ + return tegra_camrtc_boot(dev); +} + +static int tegra_cam_rtcpu_runtime_idle(struct device *dev) +{ + pm_runtime_mark_last_busy(dev); + + return 0; +} + +static int tegra_camrtc_hsp_init(struct device *dev) +{ + struct tegra_cam_rtcpu *rtcpu = dev_get_drvdata(dev); + int err; + + if (!IS_ERR_OR_NULL(rtcpu->hsp)) + return 0; + + rtcpu->hsp = camrtc_hsp_create(dev, tegra_camrtc_ivc_notify, + rtcpu->cmd_timeout); + if (IS_ERR(rtcpu->hsp)) { + err = PTR_ERR(rtcpu->hsp); + rtcpu->hsp = NULL; + return err; + } + + return 0; +} + +static int tegra_cam_rtcpu_remove(struct platform_device *pdev) +{ + struct tegra_cam_rtcpu *rtcpu = platform_get_drvdata(pdev); + bool online = rtcpu->online; + bool pm_is_active = pm_runtime_active(&pdev->dev); + + pm_runtime_disable(&pdev->dev); + pm_runtime_set_suspended(&pdev->dev); + + tegra_camrtc_set_online(&pdev->dev, false); + + if (rtcpu->hsp) { + if (pm_is_active) + tegra_cam_rtcpu_runtime_suspend(&pdev->dev); + if (online) + camrtc_hsp_bye(rtcpu->hsp); + camrtc_hsp_free(rtcpu->hsp); + rtcpu->hsp = NULL; + } + + tegra_rtcpu_trace_destroy(rtcpu->tracer); + rtcpu->tracer = NULL; + tegra_rtcpu_coverage_destroy(rtcpu->coverage); + rtcpu->coverage = NULL; + + tegra_camrtc_poweroff(&pdev->dev); +#if IS_ENABLED(CONFIG_TEGRA_BWMGR) + if (rtcpu->bwmgr != NULL) + tegra_bwmgr_unregister(rtcpu->bwmgr); + rtcpu->bwmgr = NULL; +#endif +#if IS_ENABLED(CONFIG_INTERCONNECT) + icc_put(rtcpu->icc_path); + rtcpu->icc_path = NULL; +#endif + pm_genpd_remove_device(&pdev->dev); + tegra_cam_rtcpu_mon_destroy(rtcpu->monitor); + tegra_ivc_bus_destroy(rtcpu->ivc); + + pdev->dev.dma_parms = NULL; + + return 0; +} + +static struct device *s_dev; + +static int tegra_cam_rtcpu_probe(struct platform_device *pdev) +{ + struct tegra_cam_rtcpu *rtcpu; + const struct tegra_cam_rtcpu_pdata *pdata; + struct device *dev = &pdev->dev; + int ret; + const char *name; + uint32_t timeout; + + pdata = of_device_get_match_data(dev); + if (pdata == NULL) { + dev_err(dev, "no device match\n"); + return -ENODEV; + } + + name = pdata->name; + of_property_read_string(dev->of_node, "nvidia,cpu-name", &name); + + dev_dbg(dev, "probing RTCPU on %s\n", name); + + rtcpu = devm_kzalloc(dev, sizeof(*rtcpu), GFP_KERNEL); + if (rtcpu == NULL) + return -ENOMEM; + + rtcpu->pdata = pdata; + rtcpu->name = name; + platform_set_drvdata(pdev, rtcpu); + + (void) dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + + /* Enable runtime power management */ + pm_runtime_enable(dev); + + ret = tegra_camrtc_get_resources(dev); + if (ret) + goto fail; + + rtcpu->max_reboot_retry = 3; + (void)of_property_read_u32(dev->of_node, NV(max-reboot), + &rtcpu->max_reboot_retry); + timeout = 2000; +#if 0 /* disabled for OOT kernel */ + if (tegra_platform_is_vdk()) + timeout = 5000; +#endif + + (void)of_property_read_u32(dev->of_node, "nvidia,cmd-timeout", &timeout); + + rtcpu->cmd_timeout = msecs_to_jiffies(timeout); + + timeout = 60000; + ret = of_property_read_u32(dev->of_node, NV(autosuspend-delay-ms), &timeout); + if (ret == 0) { + pm_runtime_use_autosuspend(dev); + pm_runtime_set_autosuspend_delay(&pdev->dev, timeout); + } + + tegra_camrtc_init_membw(dev); + + dev->dma_parms = &rtcpu->dma_parms; + dma_set_max_seg_size(dev, UINT_MAX); + + rtcpu->tracer = tegra_rtcpu_trace_create(dev, rtcpu->camera_devices); + + rtcpu->coverage = tegra_rtcpu_coverage_create(dev); + + ret = tegra_camrtc_hsp_init(dev); + if (ret) + goto fail; + + /* Power on device */ + ret = pm_runtime_get_sync(dev); + if (ret < 0) + goto fail; + + rtcpu->ivc = tegra_ivc_bus_create(dev); + if (IS_ERR(rtcpu->ivc)) { + ret = PTR_ERR(rtcpu->ivc); + rtcpu->ivc = NULL; + goto put_and_fail; + } + + rtcpu->monitor = tegra_camrtc_mon_create(dev); + if (IS_ERR(rtcpu->monitor)) { + ret = PTR_ERR(rtcpu->monitor); + goto put_and_fail; + } + + if (of_property_read_bool(dev->of_node, "nvidia,disable-runtime-pm")) + pm_runtime_get(dev); + + ret = camrtc_hsp_get_fw_hash(rtcpu->hsp, + rtcpu->fw_hash, sizeof(rtcpu->fw_hash)); + if (ret == 0) + devm_tegrafw_register(dev, + name != pdata->name ? name : "camrtc", + TFW_NORMAL, tegra_camrtc_print_version, NULL); + + tegra_camrtc_set_online(dev, true); + + pm_runtime_put(dev); + + /* Print firmware version */ + tegra_camrtc_log_fw_version(dev); + + s_dev = dev; + + dev_dbg(dev, "successfully probed RTCPU on %s\n", name); + + return 0; + +put_and_fail: + pm_runtime_dont_use_autosuspend(dev); + pm_runtime_put_sync_suspend(dev); +fail: + tegra_cam_rtcpu_remove(pdev); + return ret; +} + +int tegra_camrtc_reboot(struct device *dev) +{ + struct tegra_cam_rtcpu *rtcpu = dev_get_drvdata(dev); + + if (pm_runtime_suspended(dev)) { + dev_info(dev, "cannot reboot while suspended\n"); + return -EIO; + } + + if (!rtcpu->powered) + return -EIO; + + rtcpu->boot_sync_done = false; + rtcpu->fw_active = false; + + pm_runtime_mark_last_busy(dev); + + tegra_camrtc_set_online(dev, false); + + tegra_camrtc_assert_resets(dev); + + rtcpu->powered = false; + + return tegra_camrtc_boot(dev); +} +EXPORT_SYMBOL(tegra_camrtc_reboot); + +int tegra_camrtc_restore(struct device *dev) +{ + struct tegra_cam_rtcpu *rtcpu = dev_get_drvdata(dev); + + if (rtcpu->monitor) + return tegra_camrtc_mon_restore_rtcpu(rtcpu->monitor); + else + return tegra_camrtc_reboot(dev); +} +EXPORT_SYMBOL(tegra_camrtc_restore); + +bool tegra_camrtc_is_rtcpu_alive(struct device *dev) +{ + struct tegra_cam_rtcpu *rtcpu = dev_get_drvdata(dev); + + return rtcpu->online; +} +EXPORT_SYMBOL(tegra_camrtc_is_rtcpu_alive); + +bool tegra_camrtc_is_rtcpu_powered(void) +{ + struct tegra_cam_rtcpu *rtcpu; + + if (s_dev) { + rtcpu = dev_get_drvdata(s_dev); + return rtcpu->powered; + } + + return false; +} +EXPORT_SYMBOL(tegra_camrtc_is_rtcpu_powered); + +void tegra_camrtc_flush_trace(struct device *dev) +{ + struct tegra_cam_rtcpu *rtcpu = dev_get_drvdata(dev); + + tegra_rtcpu_trace_flush(rtcpu->tracer); +} +EXPORT_SYMBOL(tegra_camrtc_flush_trace); + +static int tegra_camrtc_halt(struct device *dev) +{ + struct tegra_cam_rtcpu *rtcpu = dev_get_drvdata(dev); + bool online = rtcpu->online; + int err = 0; + + tegra_camrtc_set_online(dev, false); + + if (!rtcpu->powered) + return 0; + + if (!pm_runtime_suspended(dev)) + /* Tell CAMRTC that we power down camera devices */ + err = tegra_camrtc_fw_suspend(dev); + + if (online && rtcpu->hsp && err == 0) + /* Tell CAMRTC that shared memory is going away */ + err = camrtc_hsp_bye(rtcpu->hsp); + + if (err == 0) + /* Don't bother to check for WFI if core is unresponsive */ + tegra_camrtc_wait_for_idle(dev); + + tegra_camrtc_poweroff(dev); + + /* Ensure peer sync happens on system wakeup */ + rtcpu->fw_active = false; + rtcpu->boot_sync_done = false; + + return 0; +} + +static int tegra_camrtc_resume(struct device *dev) +{ + int err; + + err = tegra_camrtc_poweron(dev, false); + + if (err == 0) + err = tegra_camrtc_boot(dev); + + return err; +} + +static void tegra_cam_rtcpu_shutdown(struct platform_device *pdev) +{ + tegra_camrtc_halt(&pdev->dev); +} + +static const struct of_device_id tegra_cam_rtcpu_of_match[] = { + { + .compatible = NV(tegra186-sce-ivc), .data = &sce_pdata + }, + { + .compatible = NV(tegra194-rce), .data = &rce_pdata + }, + { }, +}; +MODULE_DEVICE_TABLE(of, tegra_cam_rtcpu_of_match); + +static const struct dev_pm_ops tegra_cam_rtcpu_pm_ops = { + .suspend = tegra_camrtc_halt, + .resume = tegra_camrtc_resume, + .runtime_suspend = tegra_cam_rtcpu_runtime_suspend, + .runtime_resume = tegra_cam_rtcpu_runtime_resume, + .runtime_idle = tegra_cam_rtcpu_runtime_idle, +}; + +static struct platform_driver tegra_cam_rtcpu_driver = { + .driver = { + .name = "tegra186-cam-rtcpu", + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(tegra_cam_rtcpu_of_match), +#ifdef CONFIG_PM + .pm = &tegra_cam_rtcpu_pm_ops, +#endif + }, + .probe = tegra_cam_rtcpu_probe, + .remove = tegra_cam_rtcpu_remove, + .shutdown = tegra_cam_rtcpu_shutdown, +}; +module_platform_driver(tegra_cam_rtcpu_driver); + +MODULE_DESCRIPTION("CAMERA RTCPU driver"); +MODULE_AUTHOR("NVIDIA"); +MODULE_LICENSE("GPL v2"); diff --git a/include/linux/tegra-camera-rtcpu.h b/include/linux/tegra-camera-rtcpu.h new file mode 100644 index 00000000..4fa1a9b1 --- /dev/null +++ b/include/linux/tegra-camera-rtcpu.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + */ + +#ifndef _LINUX_TEGRA_CAMERA_RTCPU_H_ +#define _LINUX_TEGRA_CAMERA_RTCPU_H_ + +#include + +struct device; + +int tegra_camrtc_iovm_setup(struct device *dev, dma_addr_t iova); +ssize_t tegra_camrtc_print_version(struct device *dev, char *buf, size_t size); +int tegra_camrtc_reboot(struct device *dev); +int tegra_camrtc_restore(struct device *dev); +bool tegra_camrtc_is_rtcpu_alive(struct device *dev); +void tegra_camrtc_flush_trace(struct device *dev); + +bool tegra_camrtc_is_rtcpu_powered(void); + +#define TEGRA_CAMRTC_VERSION_LEN 128 + +int tegra_camrtc_ping(struct device *dev, u32 data, long timeout); +void tegra_camrtc_ivc_ring(struct device *dev, u16 group); + +#endif diff --git a/include/linux/tegra-capture-ivc.h b/include/linux/tegra-capture-ivc.h new file mode 100644 index 00000000..ac0c04b5 --- /dev/null +++ b/include/linux/tegra-capture-ivc.h @@ -0,0 +1,136 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + */ + +#ifndef INCLUDE_CAPTURE_IVC_H +#define INCLUDE_CAPTURE_IVC_H + +#include + +/** + * @brief Submit the control message binary blob to capture-IVC driver, + * which is to be transferred over control IVC channel to RTCPU. + * + * @param[in] control_desc binary blob containing control message + * descriptor, is opaque to capture-IVC driver. + * @param[in] len size of control_desc. + * + * @returns 0 (success), neg. errno (failure) + */ +int tegra_capture_ivc_control_submit( + const void *control_desc, + size_t len); + +/** + * @brief Submit the capture message binary blob to capture-IVC driver, + * which is to be transferred over capture IVC channel to RTCPU. + * + * @param[in] capture_desc binary blob containing capture message + * descriptor, is opaque to KMDs. + * @param[in] len size of capture_desc. + * + * @returns 0 (success), neg. errno (failure) + */ +int tegra_capture_ivc_capture_submit( + const void *capture_desc, + size_t len); + +/** + * @brief Callback function to be registered by client to receive the rtcpu + * notifications through control or capture IVC channel. + * + * @param[in] resp_desc binary blob containing the response message + * received from rtcpu through control or capture + * IVC channel, its opaque to KMDs. + * @param[in] priv_context Client's private context, opaque to + * capture-IVC driver. + */ +typedef void (*tegra_capture_ivc_cb_func)( + const void *resp_desc, + const void *priv_context); + +/** + * @brief Register callback function to receive response messages from rtcpu + * through control IVC channel. + * + * @param[in] control_resp_cb callback function to be registered for + * control IVC channel. + * @param[in] priv_context client's private context, opaque to + * capture-IVC driver. + * @param[out] trans_id temporary id assigned by capture-IVC driver, + * for the clients whose unique chan_id is not + * yet allocated by RTCPU, to match their + * responses with the requests. + * + * @returns 0 (success), neg. errno (failure) + */ +int tegra_capture_ivc_register_control_cb( + tegra_capture_ivc_cb_func control_resp_cb, + uint32_t *trans_id, + const void *priv_context); + +/** + * @brief Notify client’s channel ID to capture-IVC driver. + * Once client gets the newly allocated channel ID from RTCPU, it has to + * notify it to capture-IVC driver also, so that it can replace the + * temporary ID trans_id with the new channel ID chan_id in its internal + * context. IVC driver uses this unique channel ID for mapping upcoming + * responses with the client requests. + * + * @param[in] chan_id new channel id allocated by RTCPU for the + * client, capture-IVC driver uses to refer the + * client for its future control responses. + * @param[in] trans_id temporary id assigned by capture-IVC driver, + * for the client. + * + * @returns 0 (success), neg. errno (failure) + */ +int tegra_capture_ivc_notify_chan_id( + uint32_t chan_id, + uint32_t trans_id); + +/** + * @brief Register callback function to receive status-indication messages from + * rtcpu through capture IVC channel. + * + * @param[in] capture_status_ind_cb callback function to be registered for + * capture ivc channel. + * @param[in] chan_id client's channel id, capture-IVC driver + * uses it refer the client for its capture + * responses. + * @param[in] priv_context client's private context, opaque to + * capture-IVC driver. + * + * @returns 0 (success), neg. errno (failure) + */ +int tegra_capture_ivc_register_capture_cb( + tegra_capture_ivc_cb_func capture_status_ind_cb, + uint32_t chan_id, + const void *priv_context); + +/** + * @brief Un-register callback function to stop receiving messages over + * control ivc channel. + * + * @param[in] id client's channel id or transaction id, for which the + * callback needs to be unregistered. + * + * @returns 0 (success), neg. errno (failure) + */ +int tegra_capture_ivc_unregister_control_cb( + uint32_t id); + +/** + * @brief Un-register callback function to stop receiving messages over + * capture ivc channel. + * + * @param[in] chan_id client's channel id, for which the callback needs to be + * unregistered. + * + * @returns 0 (success), neg. errno (failure) + */ +int tegra_capture_ivc_unregister_capture_cb( + uint32_t chan_id); + +#endif /* INCLUDE_CAPTURE_IVC_H */ diff --git a/include/linux/tegra-ivc-bus.h b/include/linux/tegra-ivc-bus.h new file mode 100644 index 00000000..d0332566 --- /dev/null +++ b/include/linux/tegra-ivc-bus.h @@ -0,0 +1,137 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + */ + +#ifndef _LINUX_TEGRA_IVC_BUS_H +#define _LINUX_TEGRA_IVC_BUS_H + +#include +#include + +extern struct bus_type tegra_ivc_bus_type; +extern struct device_type tegra_ivc_bus_dev_type; +struct tegra_ivc_bus; +struct tegra_ivc_rpc_data; + +struct tegra_ivc_bus *tegra_ivc_bus_create(struct device *); + +void tegra_ivc_bus_ready(struct tegra_ivc_bus *bus, bool online); +void tegra_ivc_bus_destroy(struct tegra_ivc_bus *bus); +int tegra_ivc_bus_boot_sync(struct tegra_ivc_bus *bus); +void tegra_ivc_bus_notify(struct tegra_ivc_bus *bus, u16 group); + +struct tegra_ivc_driver { + struct device_driver driver; + struct device_type *dev_type; + union { + const struct tegra_ivc_channel_ops *channel; + } ops; +}; + +static inline struct tegra_ivc_driver *to_tegra_ivc_driver( + struct device_driver *drv) +{ + if (drv == NULL) + return NULL; + return container_of(drv, struct tegra_ivc_driver, driver); +} + +int tegra_ivc_driver_register(struct tegra_ivc_driver *drv); +void tegra_ivc_driver_unregister(struct tegra_ivc_driver *drv); +#define tegra_ivc_module_driver(drv) \ + module_driver(drv, tegra_ivc_driver_register, \ + tegra_ivc_driver_unregister) + +#define tegra_ivc_subsys_driver(__driver, __register, __unregister, ...) \ +static int __init __driver##_init(void) \ +{ \ + return __register(&(__driver), ##__VA_ARGS__); \ +} \ +subsys_initcall_sync(__driver##_init); + +#define tegra_ivc_subsys_driver_default(__driver) \ +tegra_ivc_subsys_driver(__driver, \ + tegra_ivc_driver_register, \ + tegra_ivc_driver_unregister) + +/* IVC channel driver support */ +extern struct device_type tegra_ivc_channel_type; + +struct tegra_ivc_channel { + struct ivc ivc; + struct device dev; + const struct tegra_ivc_channel_ops __rcu *ops; + struct tegra_ivc_channel *next; + struct mutex ivc_wr_lock; + struct tegra_ivc_rpc_data *rpc_priv; + atomic_t bus_resets; + u16 group; + bool is_ready; +}; + +static inline bool tegra_ivc_channel_online_check( + struct tegra_ivc_channel *chan) +{ + atomic_set(&chan->bus_resets, 0); + + smp_wmb(); + smp_rmb(); + + return chan->is_ready; +} + +static inline bool tegra_ivc_channel_has_been_reset( + struct tegra_ivc_channel *chan) +{ + smp_rmb(); + return atomic_read(&chan->bus_resets) != 0; +} + +static inline void *tegra_ivc_channel_get_drvdata( + struct tegra_ivc_channel *chan) +{ + return dev_get_drvdata(&chan->dev); +} + +static inline void tegra_ivc_channel_set_drvdata( + struct tegra_ivc_channel *chan, void *data) +{ + dev_set_drvdata(&chan->dev, data); +} + +static inline struct tegra_ivc_channel *to_tegra_ivc_channel( + struct device *dev) +{ + return container_of(dev, struct tegra_ivc_channel, dev); +} + +static inline struct device *tegra_ivc_channel_to_camrtc_dev( + struct tegra_ivc_channel *ch) +{ + if (unlikely(ch == NULL)) + return NULL; + + BUG_ON(ch->dev.parent == NULL); + BUG_ON(ch->dev.parent->parent == NULL); + + return ch->dev.parent->parent; +} + +int tegra_ivc_channel_runtime_get(struct tegra_ivc_channel *chan); +void tegra_ivc_channel_runtime_put(struct tegra_ivc_channel *chan); + +struct tegra_ivc_channel_ops { + int (*probe)(struct tegra_ivc_channel *); + void (*ready)(struct tegra_ivc_channel *, bool online); + void (*remove)(struct tegra_ivc_channel *); + void (*notify)(struct tegra_ivc_channel *); +}; + +/* Legacy mailbox support */ +struct tegra_ivc_mbox_msg { + int length; + void *data; +}; + +#endif diff --git a/include/linux/tegra-rtcpu-monitor.h b/include/linux/tegra-rtcpu-monitor.h new file mode 100644 index 00000000..6e8ed5e9 --- /dev/null +++ b/include/linux/tegra-rtcpu-monitor.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + */ + +#ifndef INCLUDE_RTCPU_MONITOR_H +#define INCLUDE_RTCPU_MONITOR_H + +struct device; +struct tegra_camrtc_mon; + +int tegra_camrtc_mon_restore_rtcpu(struct tegra_camrtc_mon *); +struct tegra_camrtc_mon *tegra_camrtc_mon_create(struct device *); +int tegra_cam_rtcpu_mon_destroy(struct tegra_camrtc_mon *); + +#endif /* INCLUDE_RTCPU_MONITOR_H */ diff --git a/include/linux/tegra-rtcpu-trace.h b/include/linux/tegra-rtcpu-trace.h new file mode 100644 index 00000000..96884ad0 --- /dev/null +++ b/include/linux/tegra-rtcpu-trace.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + */ + +#ifndef _LINUX_TEGRA_RTCPU_TRACE_H_ +#define _LINUX_TEGRA_RTCPU_TRACE_H_ + +#include + +struct tegra_rtcpu_trace; +struct camrtc_device_group; + +struct tegra_rtcpu_trace *tegra_rtcpu_trace_create( + struct device *dev, + struct camrtc_device_group *camera_devices); +int tegra_rtcpu_trace_boot_sync(struct tegra_rtcpu_trace *tracer); +void tegra_rtcpu_trace_flush(struct tegra_rtcpu_trace *tracer); +void tegra_rtcpu_trace_destroy(struct tegra_rtcpu_trace *tracer); + +#endif diff --git a/include/soc/tegra/camrtc-channels.h b/include/soc/tegra/camrtc-channels.h new file mode 100644 index 00000000..55922659 --- /dev/null +++ b/include/soc/tegra/camrtc-channels.h @@ -0,0 +1,106 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + */ + +/** + * @file camrtc-channels.h + * + * @brief RCE channel setup tags & structures. + */ + +#ifndef INCLUDE_CAMRTC_CHANNELS_H +#define INCLUDE_CAMRTC_CHANNELS_H + +#include "camrtc-common.h" + +/** + * @defgroups RceTags RCE tags + * + * All the enums and the fields inside the structs described in this header + * file supports only uintX_t types, where X can be 8,16,32,64. + * @{ + */ +#define CAMRTC_TAG64(s0, s1, s2, s3, s4, s5, s6, s7) ( \ + ((uint64_t)(s0) << 0U) | ((uint64_t)(s1) << 8U) | \ + ((uint64_t)(s2) << 16U) | ((uint64_t)(s3) << 24U) | \ + ((uint64_t)(s4) << 32U) | ((uint64_t)(s5) << 40U) | \ + ((uint64_t)(s6) << 48U) | ((uint64_t)(s7) << 56U)) + +#define CAMRTC_TAG_IVC_SETUP CAMRTC_TAG64('I', 'V', 'C', '-', 'S', 'E', 'T', 'U') +#define CAMRTC_TAG_NV_TRACE CAMRTC_TAG64('N', 'V', ' ', 'T', 'R', 'A', 'C', 'E') +#define CAMRTC_TAG_NV_CAM_TRACE CAMRTC_TAG64('N', 'V', ' ', 'C', 'A', 'M', 'T', 'R') +#define CAMRTC_TAG_NV_COVERAGE CAMRTC_TAG64('N', 'V', ' ', 'C', 'O', 'V', 'E', 'R') +/** }@ */ + +/** + * @brief RCE Tag, length, and value (TLV) + */ +struct camrtc_tlv { + /** Command tag. See @ref RceTags "RCE Tags" */ + uint64_t tag; + /** Length of the tag specific data */ + uint64_t len; +}; + +/** + * @brief Setup TLV for IVC + * + * Multiple setup structures can follow each other. + */ +struct camrtc_tlv_ivc_setup { + /** Command tag. See @ref RceTags "RCE Tags" */ + uint64_t tag; + /** Length of the tag specific data */ + uint64_t len; + /** Base address of write header. RX from CCPLEX point of view */ + uint64_t rx_iova; + /** Size of IVC write frame */ + uint32_t rx_frame_size; + /** Number of IVC write frames */ + uint32_t rx_nframes; + /** Base address of read header. TX from CCPLEX point of view */ + uint64_t tx_iova; + /** Size of IVC read frame */ + uint32_t tx_frame_size; + /** Number of IVC read frames */ + uint32_t tx_nframes; + /** IVC channel group*/ + uint32_t channel_group; + /** IVC version */ + uint32_t ivc_version; + /** IVC service name */ + char ivc_service[32]; +}; + +/** + * @defgroup CamRTCChannelErrors Channel setup error codes + * @{ + */ +#define RTCPU_CH_SUCCESS MK_U32(0) +#define RTCPU_CH_ERR_NO_SERVICE MK_U32(128) +#define RTCPU_CH_ERR_ALREADY MK_U32(129) +#define RTCPU_CH_ERR_UNKNOWN_TAG MK_U32(130) +#define RTCPU_CH_ERR_INVALID_IOVA MK_U32(131) +#define RTCPU_CH_ERR_INVALID_PARAM MK_U32(132) +/* @} */ + +/** + * @brief Code coverage memory header + */ +struct camrtc_coverage_memory_header { + /** Code coverage tag. Should be CAMRTC_TAG_NV_COVERAGE */ + uint64_t signature; + /** Size of camrtc_coverage_memory_header */ + uint64_t length; + /** Header revision */ + uint32_t revision; + /** Size of the coverage memory buffer */ + uint32_t coverage_buffer_size; + /** Coverage data inside the memory buffer in bytes */ + uint32_t coverage_total_bytes; + /** Reserved */ + uint32_t reserved; +}; + +#endif /* INCLUDE_CAMRTC_CHANNELS_H */ diff --git a/include/soc/tegra/camrtc-commands.h b/include/soc/tegra/camrtc-commands.h new file mode 100644 index 00000000..f6121256 --- /dev/null +++ b/include/soc/tegra/camrtc-commands.h @@ -0,0 +1,165 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + */ + +/** + * @file camrtc-commands.h + * + * @brief Commands used with "nvidia,tegra-camrtc-hsp-vm" & "nvidia,tegra-hsp-mailbox" + * protocol + */ + +#ifndef INCLUDE_CAMRTC_COMMANDS_H +#define INCLUDE_CAMRTC_COMMANDS_H + +#include "camrtc-common.h" + +/** + * @defgroup HspVmMsgs Definitions for "nvidia,tegra-camrtc-hsp-vm" protocol + * @{ + */ +#define CAMRTC_HSP_MSG(_id, _param) ( \ + ((uint32_t)(_id) << MK_U32(24)) | \ + ((uint32_t)(_param) & MK_U32(0xffffff))) +#define CAMRTC_HSP_MSG_ID(_msg) \ + (((_msg) >> MK_U32(24)) & MK_U32(0x7f)) +#define CAMRTC_HSP_MSG_PARAM(_msg) \ + ((uint32_t)(_msg) & MK_U32(0xffffff)) + +/** + * The IRQ message is sent when no other HSP-VM protocol message is being sent + * (i.e. the messages for higher level protocols implementing HSP such as IVC + * channel protocol) and the sender has updated its shared semaphore bits. + */ +#define CAMRTC_HSP_IRQ MK_U32(0x00) + +/** + * The HELLO messages are exchanged at the beginning of VM and RCE FW session. + * The HELLO message exchange ensures there are no unprocessed messages + * in transit within VM or RCE FW. + */ +#define CAMRTC_HSP_HELLO MK_U32(0x40) +/** + * VM session close in indicated using BYE message, + * RCE FW reclaims the resources assigned to given VM. + * It must be sent before the Camera VM shuts down self. + */ +#define CAMRTC_HSP_BYE MK_U32(0x41) +/** + * The RESUME message is sent when VM wants to activate the RCE FW + * and access the camera hardware through it. + */ +#define CAMRTC_HSP_RESUME MK_U32(0x42) +/** + * Power off camera HW, switch to idle state. VM initiates it during runtime suspend or SC7. + */ +#define CAMRTC_HSP_SUSPEND MK_U32(0x43) +/** + * Used to set up a shared memory area (such as IVC channels, trace buffer etc) + * between Camera VM and RCE FW. + */ +#define CAMRTC_HSP_CH_SETUP MK_U32(0x44) +/** + * The Camera VM can use the PING message to check aliveness of RCE FW and the HSP protocol. + */ +#define CAMRTC_HSP_PING MK_U32(0x45) +/** + * SHA1 hash code for RCE FW binary. + */ +#define CAMRTC_HSP_FW_HASH MK_U32(0x46) +/** + * The VM includes its protocol version as a parameter to PROTOCOL message. + * FW responds with its protocol version, or RTCPU_FW_INVALID_VERSION + * if the VM protocol is not supported. + */ +#define CAMRTC_HSP_PROTOCOL MK_U32(0x47) +#define CAMRTC_HSP_RESERVED_5E MK_U32(0x5E) /* bug 200395605 */ +#define CAMRTC_HSP_UNKNOWN MK_U32(0x7F) + +/** Shared semaphore bits (FW->VM) */ +#define CAMRTC_HSP_SS_FW_MASK MK_U32(0xFFFF) +#define CAMRTC_HSP_SS_FW_SHIFT MK_U32(0) + +/** Shared semaphore bits (VM->FW) */ +#define CAMRTC_HSP_SS_VM_MASK MK_U32(0x7FFF0000) +#define CAMRTC_HSP_SS_VM_SHIFT MK_U32(16) + +/** Bits used by IVC channels */ +#define CAMRTC_HSP_SS_IVC_MASK MK_U32(0xFF) + +/** @} */ + +/** + * @defgroup HspMailboxMsgs Definitions for "nvidia,tegra-hsp-mailbox" protocol + * @{ + */ +#define RTCPU_COMMAND(id, value) \ + (((RTCPU_CMD_ ## id) << MK_U32(24)) | ((uint32_t)value)) + +#define RTCPU_GET_COMMAND_ID(value) \ + ((((uint32_t)value) >> MK_U32(24)) & MK_U32(0x7f)) + +#define RTCPU_GET_COMMAND_VALUE(value) \ + (((uint32_t)value) & MK_U32(0xffffff)) +/** + * RCE FW waits until VM client initiates boot sync with INIT HSP command. + */ +#define RTCPU_CMD_INIT MK_U32(0) +/** + * VM client sends host version and expects RCE FW to respond back with + * current FW version, as part of boot sync. + */ +#define RTCPU_CMD_FW_VERSION MK_U32(1) +#define RTCPU_CMD_RESERVED_02 MK_U32(2) +#define RTCPU_CMD_RESERVED_03 MK_U32(3) +/** + * Release RCE FW resources assigned to given VM client, during runtime suspend or SC7. + */ +#define RTCPU_CMD_PM_SUSPEND MK_U32(4) +#define RTCPU_CMD_RESERVED_05 MK_U32(5) +/** + * Used to set up a shared memory area (such as IVC channels, trace buffer etc) + * between Camera VM and RCE FW. + */ +#define RTCPU_CMD_CH_SETUP MK_U32(6) +#define RTCPU_CMD_RESERVED_5E MK_U32(0x5E) /* bug 200395605 */ +#define RTCPU_CMD_RESERVED_7D MK_U32(0x7d) +#define RTCPU_CMD_RESERVED_7E MK_U32(0x7e) +#define RTCPU_CMD_ERROR MK_U32(0x7f) + +#define RTCPU_FW_DB_VERSION MK_U32(0) +#define RTCPU_FW_VERSION MK_U32(1) +#define RTCPU_FW_SM2_VERSION MK_U32(2) +#define RTCPU_FW_SM3_VERSION MK_U32(3) +/** SM4 firmware can restore itself after suspend */ +#define RTCPU_FW_SM4_VERSION MK_U32(4) + +/** SM5 firmware supports IVC synchronization */ +#define RTCPU_FW_SM5_VERSION MK_U32(5) +/** SM5 driver supports IVC synchronization */ +#define RTCPU_DRIVER_SM5_VERSION MK_U32(5) + +/** SM6 firmware/driver supports camrtc-hsp-vm protocol */ +#define RTCPU_FW_SM6_VERSION MK_U32(6) +#define RTCPU_DRIVER_SM6_VERSION MK_U32(6) + +#define RTCPU_IVC_SANS_TRACE MK_U32(1) +#define RTCPU_IVC_WITH_TRACE MK_U32(2) + +#define RTCPU_FW_HASH_SIZE MK_U32(20) + +#define RTCPU_FW_HASH_ERROR MK_U32(0xFFFFFF) + +#define RTCPU_PM_SUSPEND_SUCCESS MK_U32(0x100) +#define RTCPU_PM_SUSPEND_FAILURE MK_U32(0x001) + +#define RTCPU_FW_CURRENT_VERSION RTCPU_FW_SM6_VERSION + +#define RTCPU_FW_INVALID_VERSION MK_U32(0xFFFFFF) + +#define RTCPU_RESUME_ERROR MK_U32(0xFFFFFF) + +/** @} */ + +#endif /* INCLUDE_CAMRTC_COMMANDS_H */ diff --git a/include/soc/tegra/camrtc-common.h b/include/soc/tegra/camrtc-common.h new file mode 100644 index 00000000..63dd4cc4 --- /dev/null +++ b/include/soc/tegra/camrtc-common.h @@ -0,0 +1,88 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + */ + +/** + * @file camrtc-common.h + * + * @brief RCE common header file + */ + +#ifndef INCLUDE_CAMRTC_COMMON_H +#define INCLUDE_CAMRTC_COMMON_H + +#if defined(__KERNEL__) +#include +#include +#define CAMRTC_PACKED __packed +#define CAMRTC_ALIGN __aligned +#else +#include +#include +#ifndef CAMRTC_PACKED +#define CAMRTC_PACKED __attribute__((packed)) +#endif +#ifndef CAMRTC_ALIGN +#define CAMRTC_ALIGN(_n) __attribute__((aligned(_n))) +#endif +#ifndef U64_C +#define U64_C(_x_) ((uint64_t)(_x_##ULL)) +#endif +#ifndef U32_C +#define U32_C(_x_) ((uint32_t)(_x_##UL)) +#endif +#ifndef U16_C +#define U16_C(_x_) ((uint16_t)(_x_##U)) +#endif +#ifndef U8_C +#define U8_C(_x_) ((uint8_t)(_x_##U)) +#endif +#endif + +/** + * @defgroup MK_xxx Macros for defining constants + * + * These macros are used to define constants in the camera/firmware-api + * headers. + * + * The user of the header files can predefine them and override the + * types of the constants. + * + * @{ + */ +#ifndef MK_U64 +#define MK_U64(_x_) U64_C(_x_) +#endif + +#ifndef MK_U32 +#define MK_U32(_x_) U32_C(_x_) +#endif + +#ifndef MK_U16 +#define MK_U16(_x_) U16_C(_x_) +#endif + +#ifndef MK_U8 +#define MK_U8(_x_) U8_C(_x_) +#endif + +#ifndef MK_BIT32 +#define MK_BIT32(_x_) (MK_U32(1) << MK_U32(_x_)) +#endif + +#ifndef MK_BIT64 +#define MK_BIT64(_x_) (MK_U64(1) << MK_U64(_x_)) +#endif + +#ifndef MK_ALIGN +#define MK_ALIGN(_x_) _x_ +#endif + +#ifndef MK_SIZE +#define MK_SIZE(_x_) MK_U32(_x_) +#endif + +/** @} */ + +#endif /* INCLUDE_CAMRTC_COMMON_H */ diff --git a/include/soc/tegra/camrtc-dbg-messages.h b/include/soc/tegra/camrtc-dbg-messages.h new file mode 100644 index 00000000..b5f29113 --- /dev/null +++ b/include/soc/tegra/camrtc-dbg-messages.h @@ -0,0 +1,408 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + */ + +#ifndef INCLUDE_CAMRTC_DBG_MESSAGES_H +#define INCLUDE_CAMRTC_DBG_MESSAGES_H + +#include "camrtc-common.h" + +#pragma GCC diagnostic error "-Wpadded" + +/* + * Message identifiers. + */ +#define CAMRTC_REQ_PING MK_U32(0x01) /* Ping request. */ +#define CAMRTC_REQ_PM_SLEEP MK_U32(0x02) /* Never implemented */ +#define CAMRTC_REQ_MODS_TEST MK_U32(0x03) /* Run MODS test */ +#define CAMRTC_REQ_SET_LOGLEVEL MK_U32(0x04) /* Set log level */ +#define CAMRTC_REQ_LOGLEVEL CAMRTC_REQ_SET_LOGLEVEL +#define CAMRTC_REQ_RTOS_STATE MK_U32(0x05) /* Get FreeRTOS state */ +#define CAMRTC_REQ_READ_MEMORY_32BIT MK_U32(0x06) /* Read memory */ +#define CAMRTC_REQ_READ_MEMORY MK_U32(0x07) +#define CAMRTC_REQ_SET_PERF_COUNTERS MK_U32(0x08) /* ARM Performance counter */ +#define CAMRTC_REQ_GET_PERF_COUNTERS MK_U32(0x09) +#define CAMRTC_REQ_GET_LOGLEVEL MK_U32(0x0A) +#define CAMRTC_REQ_RUN_TEST MK_U32(0x0B) /* Run functional test (obsolete) */ +#define CAMRTC_REQ_GET_TASK_STAT MK_U32(0x0C) +#define CAMRTC_REQ_ENABLE_VI_STAT MK_U32(0x0D) +#define CAMRTC_REQ_GET_VI_STAT MK_U32(0x0E) +#define CAMRTC_REQ_GET_MEM_USAGE MK_U32(0x0F) +#define CAMRTC_REQ_RUN_MEM_TEST MK_U32(0x10) /* Run functional test */ +#define CAMRTC_REQ_GET_IRQ_STAT MK_U32(0x11) +#define CAMRTC_REQ_SET_FALCON_COVERAGE MK_U32(0x12) +#define CAMRTC_REQ_GET_COVERAGE_SUPPORT MK_U32(0x13) +#define CAMRTC_REQUEST_TYPE_MAX MK_U32(0x14) + +/* MODS test cases */ +#define CAMRTC_MODS_TEST_BASIC MK_U32(0x00) /* Basic MODS tests */ +#define CAMRTC_MODS_TEST_DMA MK_U32(0x01) /* MODS DMA test */ + +/* Deprecated */ +#define CAMRTC_RESP_PONG CAMRTC_REQ_PING +#define CAMRTC_RESP_PM_SLEEP CAMRTC_REQ_PM_SLEEP +#define CAMRTC_RESP_MODS_RESULT CAMRTC_REQ_MODS_TEST +#define CAMRTC_RESP_LOGLEVEL CAMRTC_REQ_SET_LOGLEVEL +#define CAMRTC_RESP_RTOS_STATE CAMRTC_REQ_RTOS_STATE +#define CAMRTC_RESP_READ_MEMORY_32BIT CAMRTC_REQ_READ_MEMORY_32BIT +#define CAMRTC_RESP_READ_MEMORY CAMRTC_REQ_READ_MEMORY +#define CAMRTC_RESP_SET_PERF_COUNTERS CAMRTC_REQ_SET_PERF_COUNTERS +#define CAMRTC_RESP_GET_PERF_COUNTERS CAMRTC_REQ_GET_PERF_COUNTERS + +/* Return statuses */ +#define CAMRTC_STATUS_OK MK_U32(0) +#define CAMRTC_STATUS_ERROR MK_U32(1) /* Generic error */ +#define CAMRTC_STATUS_REQ_UNKNOWN MK_U32(2) /* Unknown req_type */ +#define CAMRTC_STATUS_NOT_IMPLEMENTED MK_U32(3) /* Request not implemented */ +#define CAMRTC_STATUS_INVALID_PARAM MK_U32(4) /* Invalid parameter */ + +#define CAMRTC_DBG_FRAME_SIZE MK_U32(448) +#define CAMRTC_DBG_MAX_DATA MK_U32(440) +#define CAMRTC_DBG_TASK_STAT_MAX MK_U32(16) + +/* + * This struct is used to query or set the wake timeout for the target. + * Fields: + * force_entry: when set forces the target to sleep for a set time + */ +struct camrtc_pm_data { + uint32_t force_entry; +}; + +/* This struct is used to send the loop count to perform the mods test + * on the target. + * Fields: + * mods_loops: number of times mods test should be run + */ +struct camrtc_mods_data { + uint32_t mods_case; + uint32_t mods_loops; + uint32_t mods_dma_channels; +}; + +/* This struct is used to extract the firmware version of the RTCPU. + * Fields: + * data: buffer to store the version string. Uses uint8_t + */ +struct camrtc_ping_data { + uint64_t ts_req; /* requestor timestamp */ + uint64_t ts_resp; /* response timestamp */ + uint8_t data[64]; /* data */ +}; + +struct camrtc_log_data { + uint32_t level; +}; + +struct camrtc_rtos_state_data { + uint8_t rtos_state[CAMRTC_DBG_MAX_DATA]; /* string data */ +}; + +/* This structure is used to read 32 bit data from firmware address space. + * Fields: + * addr: address to read from. should be 4 byte aligned. + * data: 32 bit value read from memory. + */ +struct camrtc_dbg_read_memory_32bit { + uint32_t addr; +}; + +struct camrtc_dbg_read_memory_32bit_result { + uint32_t data; +}; + +#define CAMRTC_DBG_READ_MEMORY_COUNT_MAX MK_U32(256) + +/* This structure is used to read memory in firmware address space. + * Fields: + * addr: starting address. no alignment requirement + * count: number of bytes to read. limited to CAMRTC_DBG_READ_MEMORY_COUNT_MAX + * data: contents read from memory. + */ +struct camrtc_dbg_read_memory { + uint32_t addr; + uint32_t count; +}; + +struct camrtc_dbg_read_memory_result { + uint8_t data[CAMRTC_DBG_READ_MEMORY_COUNT_MAX]; +}; + +#define CAMRTC_DBG_MAX_PERF_COUNTERS MK_U32(31) + +/* This structure is used to set event type that each performance counter + * will monitor. This doesn't include fixed performance counter. If there + * are 4 counters available, only 3 of them are configurable. + * Fields: + * number: Number of performance counters to set. + * This excludes a fixed performance counter: cycle counter + * do_reset: Whether to reset counters + * cycle_counter_div64: Whether to enable cycle counter divider + * events: Event type to monitor + */ +struct camrtc_dbg_set_perf_counters { + uint32_t number; + uint32_t do_reset; + uint32_t cycle_counter_div64; + uint32_t events[CAMRTC_DBG_MAX_PERF_COUNTERS]; +}; + +/* This structure is used to get performance counters. + * Fields: + * number: Number of performance counters. + * This includes a fixed performance counter: cycle counter + * counters: Descriptors of event counters. First entry is for cycle counter. + * event: Event type that the value represents. + * For first entry, this field is don't care. + * value: Value of performance counter. + * cycle_counter_div64: Nonzero if cycle counter divider is active + */ +struct camrtc_dbg_get_perf_counters_result { + uint32_t number; + struct { + uint32_t event; + uint32_t value; + } counters[CAMRTC_DBG_MAX_PERF_COUNTERS]; + uint32_t cycle_counter_div64; +}; + + +#define CAMRTC_DBG_MAX_TEST_DATA (CAMRTC_DBG_MAX_DATA - sizeof(uint64_t)) + +/* This structure is used pass textual input data to functional test + * case and get back the test output, including verdict. + * + * Fields: + * timeout: maximum time test may run in nanoseconds + * data: textual data (e.g., test name, verdict) + */ +struct camrtc_dbg_run_test_data { + uint64_t timeout; /* Time in nanoseconds */ + char data[CAMRTC_DBG_MAX_TEST_DATA]; +}; + +/* Number of memory areas */ +#define CAMRTC_DBG_NUM_MEM_TEST_MEM MK_U32(8) + +#define CAMRTC_DBG_MAX_MEM_TEST_DATA (\ + CAMRTC_DBG_MAX_DATA \ + - sizeof(uint64_t) - sizeof(struct camrtc_dbg_streamids) \ + - (sizeof(struct camrtc_dbg_test_mem) * CAMRTC_DBG_NUM_MEM_TEST_MEM)) + +struct camrtc_dbg_test_mem { + uint32_t size; + uint32_t page_size; + uint64_t phys_addr; + uint64_t rtcpu_iova; + uint64_t vi_iova; + uint64_t vi2_iova; + uint64_t isp_iova; +}; + +struct camrtc_dbg_streamids { + uint8_t rtcpu; + uint8_t vi; + uint8_t vi2; + uint8_t isp; +}; + +/* This structure is used pass memory areas and textual input data to + * functional test case and get back the test output, including + * verdict. + * + * Fields: + * timeout: maximum time test may run in nanoseconds + * mem[]: address and size of memory areas passed to the test + * data: textual data (e.g., test name, verdict) + */ +struct camrtc_dbg_run_mem_test_data { + uint64_t timeout; /* Time in nanoseconds */ + struct camrtc_dbg_test_mem mem[CAMRTC_DBG_NUM_MEM_TEST_MEM]; + struct camrtc_dbg_streamids streamids; + char data[CAMRTC_DBG_MAX_MEM_TEST_DATA]; +}; + +/* This structure is used get information on system tasks. + * Fields: + * n_task: number of reported tasks + * total_count: total runtime + * task: array of reported tasks + * id: task name + * count: runtime allocated to task + * number: unique task number + * priority: priority of task when this structure was populated + */ +struct camrtc_dbg_task_stat { + uint32_t n_task; + uint32_t total_count; + struct { + uint32_t id[2]; + uint32_t count; + uint32_t number; + uint32_t priority; + } task[CAMRTC_DBG_TASK_STAT_MAX]; +}; + +/* Limit for default CAMRTC_DBG_FRAME_SIZE */ +#define CAMRTC_DBG_NUM_IRQ_STAT MK_U32(11) + +/* + * This structure is used get information on interrupts. + * + * Fields: + * n_active: number of active interrupts + * total_called: total number of interrupts handled + * total_runtime: total runtime + * n_irq: number of reported interrupts + * irqs: array of reported tasks + * irq_num: irq number + * num_called: times this interrupt has been handled + * runtime: runtime for this interrupt + * name: name of the interrupt (may not be NUL-terminated) + */ +struct camrtc_dbg_irq_stat { + uint32_t n_active; + uint32_t n_irq; + uint64_t total_called; + uint64_t total_runtime; + struct { + uint32_t irq_num; + char name[12]; + uint64_t runtime; + uint32_t max_runtime; + uint32_t num_called; + } irqs[CAMRTC_DBG_NUM_IRQ_STAT]; +}; + +/* These structure is used to get VI message statistics. + * Fields: + * enable: enable/disable collecting vi message statistics + */ +struct camrtc_dbg_enable_vi_stat { + uint32_t enable; +}; + +/* These structure is used to get VI message statistics. + * Fields: + * avg: running average of VI message latency. + * max: maximum VI message latency observed so far. + */ +struct camrtc_dbg_vi_stat { + uint32_t avg; + uint32_t max; +}; + +/* These structure is used to get memory usage. + * Fields: + * text: code memory usage + * bss: global/static memory usage. + * data: global/static memory usage. + * heap: heap memory usage. + * stack: cpu stack memory usage. + * free: remaining free memory. + */ +struct camrtc_dbg_mem_usage { + uint32_t text; + uint32_t bss; + uint32_t data; + uint32_t heap; + uint32_t stack; + uint32_t free_mem; +}; + +#define CAMRTC_DBG_FALCON_ID_VI MK_U32(0x00) +#define CAMRTC_DBG_FALCON_ID_ISP MK_U32(0x80) + +/* This structure is used to set falcon code coverage configuration data. + * Fields: + * falcon_id: Which falcon to set up the coverage for. + * flush: Flush coverage data action bit. + * reset: Reset coverage data action bit. If flush is also set, it runs first. + * size: Size of the coverage data buffer. + * iova: Address of the coverage data buffer in falcon IOVA space. + * + * NOTE: Setting iova and/or size to 0 will disable coverage. + */ +struct camrtc_dbg_coverage_data { + uint8_t falcon_id; + uint8_t flush; + uint8_t reset; + uint8_t pad__; + uint32_t size; + uint64_t iova; +}; + +/* This structure is used to reply code coverage status. + * Fields: + * falcon_id: Which falcon the status is for + * enabled: Coverage output is configured properly and enabled + * full: Coverage output buffer is full + * bytes_written: Bytes written to buffer so far. + */ +struct camrtc_dbg_coverage_stat { + uint8_t falcon_id; + uint8_t enabled; + uint8_t full; + uint8_t pad__; + uint32_t bytes_written; +}; + +/* This struct encapsulates the type of the request and the respective + * data associated with that request. + * Fields: + * req_type: indicates the type of the request be it pm related, + * mods or ping. + * data: Union of structs of all the request types. + */ +struct camrtc_dbg_request { + uint32_t req_type; + uint32_t reserved; + union { + struct camrtc_pm_data pm_data; + struct camrtc_mods_data mods_data; + struct camrtc_ping_data ping_data; + struct camrtc_log_data log_data; + struct camrtc_dbg_read_memory_32bit rm_32bit_data; + struct camrtc_dbg_read_memory rm_data; + struct camrtc_dbg_set_perf_counters set_perf_data; + struct camrtc_dbg_run_test_data run_test_data; + struct camrtc_dbg_run_mem_test_data run_mem_test_data; + struct camrtc_dbg_enable_vi_stat enable_vi_stat; + struct camrtc_dbg_coverage_data coverage_data; + } data; +}; + +/* This struct encapsulates the type of the response and the respective + * data associated with that response. + * Fields: + * resp_type: indicates the type of the response be it pm related, + * mods or ping. + * status: response in regard to the request i.e success/failure. + * In case of mods, this field is the result. + * data: Union of structs of all the request/response types. + */ +struct camrtc_dbg_response { + uint32_t resp_type; + uint32_t status; + union { + struct camrtc_pm_data pm_data; + struct camrtc_ping_data ping_data; + struct camrtc_log_data log_data; + struct camrtc_rtos_state_data rtos_state_data; + struct camrtc_dbg_read_memory_32bit_result rm_32bit_data; + struct camrtc_dbg_read_memory_result rm_data; + struct camrtc_dbg_get_perf_counters_result get_perf_data; + struct camrtc_dbg_run_test_data run_test_data; + struct camrtc_dbg_run_mem_test_data run_mem_test_data; + struct camrtc_dbg_task_stat task_stat_data; + struct camrtc_dbg_vi_stat vi_stat; + struct camrtc_dbg_mem_usage mem_usage; + struct camrtc_dbg_irq_stat irq_stat; + struct camrtc_dbg_coverage_stat coverage_stat; + } data; +}; + +#pragma GCC diagnostic ignored "-Wpadded" + +#endif /* INCLUDE_CAMRTC_DBG_MESSAGES_H */ diff --git a/include/soc/tegra/camrtc-trace.h b/include/soc/tegra/camrtc-trace.h new file mode 100644 index 00000000..5ca6c175 --- /dev/null +++ b/include/soc/tegra/camrtc-trace.h @@ -0,0 +1,464 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + */ + +#ifndef INCLUDE_CAMRTC_TRACE_H +#define INCLUDE_CAMRTC_TRACE_H + +#include "camrtc-common.h" +#include "camrtc-channels.h" + +#pragma GCC diagnostic error "-Wpadded" + +/* + * Trace memory consists of three part. + * + * 1. Trace memory header: This describes the layout of trace memory, + * and latest activities. + * + * 2. Exception memory: This is an array of exception entries. Each + * entry describes an exception occurred in the firmware. + * + * 3. Event memory: This is an array of event entries. This is implemented + * as a ring buffer. + * + * The next index gets updated when new messages are committed to the + * trace memory. The next index points to the entry to be written to + * at next occurrence of the exception or event. + * + * Trace memory layout + * + * 0x00000 +-------------------------------+ + * | Trace Memory Header | + * 0x01000 +-------------------------------+ + * | | + * | Exception Memory | <- exception_next_idx + * | | + * 0x10000 +-------------------------------+ + * | | + * | | + * | Event Memory | + * | | <- event_next_idx + * | | + * +-------------------------------+ + */ + +/* Offset of each memory */ +#define CAMRTC_TRACE_NEXT_IDX_SIZE MK_SIZE(64) +#define CAMRTC_TRACE_EXCEPTION_OFFSET MK_U32(0x01000) +#define CAMRTC_TRACE_EVENT_OFFSET MK_U32(0x10000) + +/* Size of each entry */ +#define CAMRTC_TRACE_EXCEPTION_SIZE MK_SIZE(1024) +#define CAMRTC_TRACE_EVENT_SIZE MK_SIZE(64) + +/* Depth of call stack */ +#define CAMRTC_TRACE_CALLSTACK_MAX MK_SIZE(32) +#define CAMRTC_TRACE_CALLSTACK_MIN MK_SIZE(4) + +/* + * Trace memory header + */ + +#define CAMRTC_TRACE_SIGNATURE_1 MK_U32(0x5420564e) +#define CAMRTC_TRACE_SIGNATURE_2 MK_U32(0x45434152) + +#define CAMRTC_TRACE_ALIGNOF MK_ALIGN(64) + +#define CAMRTC_TRACE_ALIGN CAMRTC_ALIGN(CAMRTC_TRACE_ALIGNOF) + +struct camrtc_trace_memory_header { + /* layout: offset 0 */ + union { + /* + * Temporary union to provide source compatiblity + * during the transition to new header format. + */ + struct camrtc_tlv tlv; + uint32_t signature[4] __attribute__((deprecated)); + }; + uint32_t revision; + uint32_t reserved1; + uint32_t exception_offset; + uint32_t exception_size; + uint32_t exception_entries; + uint32_t reserved2; + uint32_t event_offset; + uint32_t event_size; + uint32_t event_entries; + uint32_t reserved3; + uint32_t reserved4[0xc8 / 4]; + + /* pointer: offset 0x100 */ + uint32_t exception_next_idx; + uint32_t event_next_idx; + uint32_t reserved_ptrs[0x38 / 4]; +} CAMRTC_TRACE_ALIGN; + +/* + * Exception entry + */ +/* Reset = 0 */ +#define CAMRTC_ARMV7_EXCEPTION_UNDEFINED_INSTRUCTION MK_U32(1) +/* SWI = 2 */ +#define CAMRTC_ARMV7_EXCEPTION_PREFETCH_ABORT MK_U32(3) +#define CAMRTC_ARMV7_EXCEPTION_DATA_ABORT MK_U32(4) +/* RSVD, IRQ, FIQ should never happen */ +#define CAMRTC_ARMV7_EXCEPTION_RSVD MK_U32(5) +#define CAMRTC_ARMV7_EXCEPTION_IRQ MK_U32(6) +#define CAMRTC_ARMV7_EXCEPTION_FIQ MK_U32(7) + +struct camrtc_trace_callstack { + uint32_t lr_stack_addr; /* address in stack where lr is saved */ + uint32_t lr; /* value of saved lr */ +}; + +struct camrtc_trace_armv7_exception { + uint32_t len; /* length in byte including this */ + uint32_t type; /* CAMRTC_TRACE_ARMV7_EXCEPTION_* above */ + union { + uint32_t data[24]; + struct { + uint32_t r0, r1, r2, r3; + uint32_t r4, r5, r6, r7; + uint32_t r8, r9, r10, r11; + uint32_t r12, sp, lr, pc; + uint32_t r8_prev, r9_prev, r10_prev, r11_prev, r12_prev; + uint32_t sp_prev, lr_prev; + uint32_t reserved; + }; + } gpr; + /* program status registers */ + uint32_t cpsr, spsr; + /* data fault status/address register */ + uint32_t dfsr, dfar, adfsr; + /* instruction fault status/address register */ + uint32_t ifsr, ifar, aifsr; + struct camrtc_trace_callstack callstack[CAMRTC_TRACE_CALLSTACK_MAX]; +}; + +/* + * Each trace event shares the header. + * The format of event data is determined by event type. + */ + +#define CAMRTC_TRACE_EVENT_HEADER_SIZE MK_SIZE(16) +#define CAMRTC_TRACE_EVENT_PAYLOAD_SIZE \ + (CAMRTC_TRACE_EVENT_SIZE - CAMRTC_TRACE_EVENT_HEADER_SIZE) + +#define CAMRTC_EVENT_TYPE_OFFSET MK_U32(24) +#define CAMRTC_EVENT_TYPE_MASK \ + (MK_U32(0xff) << CAMRTC_EVENT_TYPE_OFFSET) +#define CAMRTC_EVENT_TYPE_FROM_ID(id) \ + (((id) & CAMRTC_EVENT_TYPE_MASK) >> CAMRTC_EVENT_TYPE_OFFSET) + +#define CAMRTC_EVENT_MODULE_OFFSET MK_U32(16) +#define CAMRTC_EVENT_MODULE_MASK \ + (MK_U32(0xff) << CAMRTC_EVENT_MODULE_OFFSET) +#define CAMRTC_EVENT_MODULE_FROM_ID(id) \ + (((id) & CAMRTC_EVENT_MODULE_MASK) >> CAMRTC_EVENT_MODULE_OFFSET) + +#define CAMRTC_EVENT_SUBID_OFFSET MK_U32(0) +#define CAMRTC_EVENT_SUBID_MASK \ + (MK_U32(0xffff) << CAMRTC_EVENT_SUBID_OFFSET) +#define CAMRTC_EVENT_SUBID_FROM_ID(id) \ + (((id) & CAMRTC_EVENT_SUBID_MASK) >> CAMRTC_EVENT_SUBID_OFFSET) + +#define CAMRTC_EVENT_MAKE_ID(type, module, subid) \ + (((uint32_t)(type) << CAMRTC_EVENT_TYPE_OFFSET) | \ + ((uint32_t)(module) << CAMRTC_EVENT_MODULE_OFFSET) | (uint32_t)(subid)) + +struct camrtc_event_header { + uint32_t len; /* Size in bytes including this field */ + uint32_t id; /* Event ID */ + uint64_t tstamp; /* Timestamp from TKE TSC */ +}; + +struct camrtc_event_struct { + struct camrtc_event_header header; + union { + uint8_t data8[CAMRTC_TRACE_EVENT_PAYLOAD_SIZE]; + uint32_t data32[CAMRTC_TRACE_EVENT_PAYLOAD_SIZE / 4]; + } data; +}; + +// camrtc_event_type +#define CAMRTC_EVENT_TYPE_ARRAY MK_U32(0) +#define CAMRTC_EVENT_TYPE_ARMV7_EXCEPTION MK_U32(1) +#define CAMRTC_EVENT_TYPE_PAD MK_U32(2) +#define CAMRTC_EVENT_TYPE_START MK_U32(3) +#define CAMRTC_EVENT_TYPE_STRING MK_U32(4) +#define CAMRTC_EVENT_TYPE_BULK MK_U32(5) + +// camrtc_event_module +#define CAMRTC_EVENT_MODULE_UNKNOWN MK_U32(0) +#define CAMRTC_EVENT_MODULE_BASE MK_U32(1) +#define CAMRTC_EVENT_MODULE_RTOS MK_U32(2) +#define CAMRTC_EVENT_MODULE_HEARTBEAT MK_U32(3) +#define CAMRTC_EVENT_MODULE_DBG MK_U32(4) +#define CAMRTC_EVENT_MODULE_MODS MK_U32(5) +#define CAMRTC_EVENT_MODULE_VINOTIFY MK_U32(6) +#define CAMRTC_EVENT_MODULE_I2C MK_U32(7) +#define CAMRTC_EVENT_MODULE_VI MK_U32(8) +#define CAMRTC_EVENT_MODULE_ISP MK_U32(9) +#define CAMRTC_EVENT_MODULE_NVCSI MK_U32(10) +#define CAMRTC_EVENT_MODULE_CAPTURE MK_U32(11) +#define CAMRTC_EVENT_MODULE_PERF MK_U32(12) + +// camrtc_trace_event_type_ids +#define camrtc_trace_type_exception \ + CAMRTC_EVENT_MAKE_ID(CAMRTC_EVENT_TYPE_ARMV7_EXCEPTION, \ + CAMRTC_EVENT_MODULE_BASE, 0) +#define camrtc_trace_type_pad \ + CAMRTC_EVENT_MAKE_ID(CAMRTC_EVENT_TYPE_PAD, \ + CAMRTC_EVENT_MODULE_BASE, 0) +#define camrtc_trace_type_start \ + CAMRTC_EVENT_MAKE_ID(CAMRTC_EVENT_TYPE_START, \ + CAMRTC_EVENT_MODULE_BASE, 0) +#define camrtc_trace_type_string \ + CAMRTC_EVENT_MAKE_ID(CAMRTC_EVENT_TYPE_STRING, \ + CAMRTC_EVENT_MODULE_BASE, 0) + +// camrtc_trace_base_ids +#define camrtc_trace_base_id(_subid) \ + CAMRTC_EVENT_MAKE_ID(CAMRTC_EVENT_TYPE_ARRAY, \ + CAMRTC_EVENT_MODULE_BASE, (_subid)) +#define camrtc_trace_base_target_init \ + camrtc_trace_base_id(1) +#define camrtc_trace_base_start_scheduler \ + camrtc_trace_base_id(2) + +// camrtc_trace_event_rtos_ids +#define camrtc_trace_rtos_id(_subid) \ + CAMRTC_EVENT_MAKE_ID(CAMRTC_EVENT_TYPE_ARRAY, \ + CAMRTC_EVENT_MODULE_RTOS, (_subid)) +#define camrtc_trace_rtos_task_switched_in \ + camrtc_trace_rtos_id(1) +#define camrtc_trace_rtos_increase_tick_count \ + camrtc_trace_rtos_id(2) +#define camrtc_trace_rtos_low_power_idle_begin \ + camrtc_trace_rtos_id(3) +#define camrtc_trace_rtos_low_power_idle_end \ + camrtc_trace_rtos_id(4) +#define camrtc_trace_rtos_task_switched_out \ + camrtc_trace_rtos_id(5) +#define camrtc_trace_rtos_task_priority_inherit \ + camrtc_trace_rtos_id(6) +#define camrtc_trace_rtos_task_priority_disinherit \ + camrtc_trace_rtos_id(7) +#define camrtc_trace_rtos_blocking_on_queue_receive \ + camrtc_trace_rtos_id(8) +#define camrtc_trace_rtos_blocking_on_queue_send \ + camrtc_trace_rtos_id(9) +#define camrtc_trace_rtos_moved_task_to_ready_state \ + camrtc_trace_rtos_id(10) +#define camrtc_trace_rtos_queue_create \ + camrtc_trace_rtos_id(11) +#define camrtc_trace_rtos_queue_create_failed \ + camrtc_trace_rtos_id(12) +#define camrtc_trace_rtos_create_mutex \ + camrtc_trace_rtos_id(13) +#define camrtc_trace_rtos_create_mutex_failed \ + camrtc_trace_rtos_id(14) +#define camrtc_trace_rtos_give_mutex_recursive \ + camrtc_trace_rtos_id(15) +#define camrtc_trace_rtos_give_mutex_recursive_failed \ + camrtc_trace_rtos_id(16) +#define camrtc_trace_rtos_take_mutex_recursive \ + camrtc_trace_rtos_id(17) +#define camrtc_trace_rtos_take_mutex_recursive_failed \ + camrtc_trace_rtos_id(18) +#define camrtc_trace_rtos_create_counting_semaphore \ + camrtc_trace_rtos_id(19) +#define camrtc_trace_rtos_create_counting_semaphore_failed \ + camrtc_trace_rtos_id(20) +#define camrtc_trace_rtos_queue_send \ + camrtc_trace_rtos_id(21) +#define camrtc_trace_rtos_queue_send_failed \ + camrtc_trace_rtos_id(22) +#define camrtc_trace_rtos_queue_receive \ + camrtc_trace_rtos_id(23) +#define camrtc_trace_rtos_queue_peek \ + camrtc_trace_rtos_id(24) +#define camrtc_trace_rtos_queue_peek_from_isr \ + camrtc_trace_rtos_id(25) +#define camrtc_trace_rtos_queue_receive_failed \ + camrtc_trace_rtos_id(26) +#define camrtc_trace_rtos_queue_send_from_isr \ + camrtc_trace_rtos_id(27) +#define camrtc_trace_rtos_queue_send_from_isr_failed \ + camrtc_trace_rtos_id(28) +#define camrtc_trace_rtos_queue_receive_from_isr \ + camrtc_trace_rtos_id(29) +#define camrtc_trace_rtos_queue_receive_from_isr_failed \ + camrtc_trace_rtos_id(30) +#define camrtc_trace_rtos_queue_peek_from_isr_failed \ + camrtc_trace_rtos_id(31) +#define camrtc_trace_rtos_queue_delete \ + camrtc_trace_rtos_id(32) +#define camrtc_trace_rtos_task_create \ + camrtc_trace_rtos_id(33) +#define camrtc_trace_rtos_task_create_failed \ + camrtc_trace_rtos_id(34) +#define camrtc_trace_rtos_task_delete \ + camrtc_trace_rtos_id(35) +#define camrtc_trace_rtos_task_delay_until \ + camrtc_trace_rtos_id(36) +#define camrtc_trace_rtos_task_delay \ + camrtc_trace_rtos_id(37) +#define camrtc_trace_rtos_task_priority_set \ + camrtc_trace_rtos_id(38) +#define camrtc_trace_rtos_task_suspend \ + camrtc_trace_rtos_id(39) +#define camrtc_trace_rtos_task_resume \ + camrtc_trace_rtos_id(40) +#define camrtc_trace_rtos_task_resume_from_isr \ + camrtc_trace_rtos_id(41) +#define camrtc_trace_rtos_task_increment_tick \ + camrtc_trace_rtos_id(42) +#define camrtc_trace_rtos_timer_create \ + camrtc_trace_rtos_id(43) +#define camrtc_trace_rtos_timer_create_failed \ + camrtc_trace_rtos_id(44) +#define camrtc_trace_rtos_timer_command_send \ + camrtc_trace_rtos_id(45) +#define camrtc_trace_rtos_timer_expired \ + camrtc_trace_rtos_id(46) +#define camrtc_trace_rtos_timer_command_received \ + camrtc_trace_rtos_id(47) +#define camrtc_trace_rtos_malloc \ + camrtc_trace_rtos_id(48) +#define camrtc_trace_rtos_free \ + camrtc_trace_rtos_id(49) +#define camrtc_trace_rtos_event_group_create \ + camrtc_trace_rtos_id(50) +#define camrtc_trace_rtos_event_group_create_failed \ + camrtc_trace_rtos_id(51) +#define camrtc_trace_rtos_event_group_sync_block \ + camrtc_trace_rtos_id(52) +#define camrtc_trace_rtos_event_group_sync_end \ + camrtc_trace_rtos_id(53) +#define camrtc_trace_rtos_event_group_wait_bits_block \ + camrtc_trace_rtos_id(54) +#define camrtc_trace_rtos_event_group_wait_bits_end \ + camrtc_trace_rtos_id(55) +#define camrtc_trace_rtos_event_group_clear_bits \ + camrtc_trace_rtos_id(56) +#define camrtc_trace_rtos_event_group_clear_bits_from_isr \ + camrtc_trace_rtos_id(57) +#define camrtc_trace_rtos_event_group_set_bits \ + camrtc_trace_rtos_id(58) +#define camrtc_trace_rtos_event_group_set_bits_from_isr \ + camrtc_trace_rtos_id(59) +#define camrtc_trace_rtos_event_group_delete \ + camrtc_trace_rtos_id(60) +#define camrtc_trace_rtos_pend_func_call \ + camrtc_trace_rtos_id(61) +#define camrtc_trace_rtos_pend_func_call_from_isr \ + camrtc_trace_rtos_id(62) +#define camrtc_trace_rtos_queue_registry_add \ + camrtc_trace_rtos_id(63) + +// camrtc_trace_dbg_ids +#define camrtc_trace_dbg_id(_subid) \ + CAMRTC_EVENT_MAKE_ID(CAMRTC_EVENT_TYPE_ARRAY, \ + CAMRTC_EVENT_MODULE_DBG, (_subid)) +#define camrtc_trace_dbg_unknown \ + camrtc_trace_dbg_id(1) +#define camrtc_trace_dbg_enter \ + camrtc_trace_dbg_id(2) +#define camrtc_trace_dbg_exit \ + camrtc_trace_dbg_id(3) +#define camrtc_trace_dbg_set_loglevel \ + camrtc_trace_dbg_id(4) + +// camrtc_trace_vinotify_ids +#define camrtc_trace_vinotify_id(_subid) \ + CAMRTC_EVENT_MAKE_ID(CAMRTC_EVENT_TYPE_ARRAY, \ + CAMRTC_EVENT_MODULE_VINOTIFY, (_subid)) +#define camrtc_trace_vinotify_event_ts64 \ + camrtc_trace_vinotify_id(1) +#define camrtc_trace_vinotify_event \ + camrtc_trace_vinotify_id(2) +#define camrtc_trace_vinotify_error \ + camrtc_trace_vinotify_id(3) + +// camrtc_trace_vi_ids +#define camrtc_trace_vi_id(_subid) \ + CAMRTC_EVENT_MAKE_ID(CAMRTC_EVENT_TYPE_ARRAY, \ + CAMRTC_EVENT_MODULE_VI, (_subid)) +#define camrtc_trace_vi_frame_begin \ + camrtc_trace_vi_id(1) +#define camrtc_trace_vi_frame_end \ + camrtc_trace_vi_id(2) + +// camrtc_trace_isp_ids +#define camrtc_trace_isp_id(_subid) \ + CAMRTC_EVENT_MAKE_ID(CAMRTC_EVENT_TYPE_ARRAY, \ + CAMRTC_EVENT_MODULE_ISP, (_subid)) +#define camrtc_trace_isp_task_begin \ + camrtc_trace_isp_id(1) +#define camrtc_trace_isp_task_end \ + camrtc_trace_isp_id(2) +#define camrtc_trace_isp_falcon_traces_event \ + camrtc_trace_isp_id(3) + +// camrtc_trace_nvcsi_ids +#define camrtc_trace_nvcsi_id(_subid) \ + CAMRTC_EVENT_MAKE_ID(CAMRTC_EVENT_TYPE_ARRAY, \ + CAMRTC_EVENT_MODULE_NVCSI, (_subid)) +#define camrtc_trace_nvcsi_intr \ + camrtc_trace_nvcsi_id(1) + +// camrtc_trace_capture_ids +#define camrtc_trace_capture_event_id(_subid) \ + CAMRTC_EVENT_MAKE_ID(CAMRTC_EVENT_TYPE_ARRAY, \ + CAMRTC_EVENT_MODULE_CAPTURE, (_subid)) +#define camrtc_trace_capture_event_sof \ + camrtc_trace_capture_event_id(0) +#define camrtc_trace_capture_event_eof \ + camrtc_trace_capture_event_id(1) +#define camrtc_trace_capture_event_error \ + camrtc_trace_capture_event_id(2) +#define camrtc_trace_capture_event_reschedule \ + camrtc_trace_capture_event_id(3) +#define camrtc_trace_capture_event_sensor \ + camrtc_trace_capture_event_id(4) +#define camrtc_trace_capture_event_reschedule_isp \ + camrtc_trace_capture_event_id(5) +#define camrtc_trace_capture_event_isp_done \ + camrtc_trace_capture_event_id(6) +#define camrtc_trace_capture_event_isp_error \ + camrtc_trace_capture_event_id(7) +#define camrtc_trace_capture_event_inject \ + camrtc_trace_capture_event_id(8) +#define camrtc_trace_capture_event_wdt \ + camrtc_trace_capture_event_id(9) +#define camrtc_trace_capture_event_report_program \ + camrtc_trace_capture_event_id(10) + +#define camrtc_trace_capture_event_suspend \ + camrtc_trace_capture_event_id(14) +#define camrtc_trace_capture_event_suspend_isp \ + camrtc_trace_capture_event_id(15) + +// camrtc_trace_perf id +#define camrtc_trace_perf_id(_subid) \ + CAMRTC_EVENT_MAKE_ID(CAMRTC_EVENT_TYPE_ARRAY, \ + CAMRTC_EVENT_MODULE_PERF, (_subid)) +#define camrtc_trace_perf_counters \ + camrtc_trace_perf_id(0) +#define camrtc_trace_perf_reset \ + camrtc_trace_perf_id(1) + +struct camrtc_trace_perf_counter_data { + uint64_t cycles; + uint32_t counters[3]; + uint8_t events[3]; + uint8_t name[25]; +}; + +#pragma GCC diagnostic ignored "-Wpadded" + +#endif /* INCLUDE_CAMRTC_TRACE_H */ diff --git a/include/trace/events/freertos.h b/include/trace/events/freertos.h new file mode 100644 index 00000000..8f91a6c1 --- /dev/null +++ b/include/trace/events/freertos.h @@ -0,0 +1,692 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM freertos + +#if !defined(_TRACE_FREERTOS_H_) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_FREERTOS_H_ + +#include + +/* + * Classes with no argument + */ + +DECLARE_EVENT_CLASS(rtos__noarg, + TP_PROTO(u64 tstamp), + TP_ARGS(tstamp), + TP_STRUCT__entry( + __field(u64, tstamp) + ), + TP_fast_assign( + __entry->tstamp = tstamp; + ), + TP_printk("tstamp:%llu", __entry->tstamp) +); + +/* + * Classes with 1 argument + */ + +DECLARE_EVENT_CLASS(rtos__count, + TP_PROTO(u64 tstamp, u32 count), + TP_ARGS(tstamp, count), + TP_STRUCT__entry( + __field(u64, tstamp) + __field(u32, count) + ), + TP_fast_assign( + __entry->tstamp = tstamp; + __entry->count = count; + ), + TP_printk("tstamp:%llu count:%u", __entry->tstamp, __entry->count) +); + +DECLARE_EVENT_CLASS(rtos__type, + TP_PROTO(u64 tstamp, u32 type), + TP_ARGS(tstamp, type), + TP_STRUCT__entry( + __field(u64, tstamp) + __field(u32, type) + ), + TP_fast_assign( + __entry->tstamp = tstamp; + __entry->type = type; + ), + TP_printk("tstamp:%llu type:%u", __entry->tstamp, __entry->type) +); + +DECLARE_EVENT_CLASS(rtos__queue, + TP_PROTO(u64 tstamp, u32 queue), + TP_ARGS(tstamp, queue), + TP_STRUCT__entry( + __field(u64, tstamp) + __field(u32, queue) + ), + TP_fast_assign( + __entry->tstamp = tstamp; + __entry->queue = queue; + ), + TP_printk("tstamp:%llu queue:0x%08x", __entry->tstamp, __entry->queue) +); + +DECLARE_EVENT_CLASS(rtos__tcb, + TP_PROTO(u64 tstamp, u32 tcb), + TP_ARGS(tstamp, tcb), + TP_STRUCT__entry( + __field(u64, tstamp) + __field(u32, tcb) + ), + TP_fast_assign( + __entry->tstamp = tstamp; + __entry->tcb = tcb; + ), + TP_printk("tstamp:%llu tcb:0x%08x", __entry->tstamp, __entry->tcb) +); + +DECLARE_EVENT_CLASS(rtos__mutex, + TP_PROTO(u64 tstamp, u32 mutex), + TP_ARGS(tstamp, mutex), + TP_STRUCT__entry( + __field(u64, tstamp) + __field(u32, mutex) + ), + TP_fast_assign( + __entry->tstamp = tstamp; + __entry->mutex = mutex; + ), + TP_printk("tstamp:%llu mutex:0x%08x", __entry->tstamp, __entry->mutex) +); + +DECLARE_EVENT_CLASS(rtos__timer, + TP_PROTO(u64 tstamp, u32 timer), + TP_ARGS(tstamp, timer), + TP_STRUCT__entry( + __field(u64, tstamp) + __field(u32, timer) + ), + TP_fast_assign( + __entry->tstamp = tstamp; + __entry->timer = timer; + ), + TP_printk("tstamp:%llu timer:0x%08x", __entry->tstamp, __entry->timer) +); + +DECLARE_EVENT_CLASS(rtos__eventgroup, + TP_PROTO(u64 tstamp, u32 eventgroup), + TP_ARGS(tstamp, eventgroup), + TP_STRUCT__entry( + __field(u64, tstamp) + __field(u32, eventgroup) + ), + TP_fast_assign( + __entry->tstamp = tstamp; + __entry->eventgroup = eventgroup; + ), + TP_printk("tstamp:%llu eventgroup:%u", __entry->tstamp, + __entry->eventgroup) +); + +/* + * Classes with 2 arguments + */ + +DECLARE_EVENT_CLASS(rtos__tcb_priority, + TP_PROTO(u64 tstamp, u32 tcb, u32 priority), + TP_ARGS(tstamp, tcb, priority), + TP_STRUCT__entry( + __field(u64, tstamp) + __field(u32, tcb) + __field(u32, priority) + ), + TP_fast_assign( + __entry->tstamp = tstamp; + __entry->tcb = tcb; + __entry->priority = priority; + ), + TP_printk("tstamp:%llu tcb:%u priority:%u", + __entry->tstamp, __entry->tcb, __entry->priority) +); + +DECLARE_EVENT_CLASS(rtos__addr_size, + TP_PROTO(u64 tstamp, u32 addr, u32 size), + TP_ARGS(tstamp, addr, size), + TP_STRUCT__entry( + __field(u64, tstamp) + __field(u32, addr) + __field(u32, size) + ), + TP_fast_assign( + __entry->tstamp = tstamp; + __entry->addr = addr; + __entry->size = size; + ), + TP_printk("tstamp:%llu addr:%u size:%u", + __entry->tstamp, __entry->addr, __entry->size) +); + +DECLARE_EVENT_CLASS(rtos__eventgroup_wait, + TP_PROTO(u64 tstamp, u32 eventgroup, u32 wait), + TP_ARGS(tstamp, eventgroup, wait), + TP_STRUCT__entry( + __field(u64, tstamp) + __field(u32, eventgroup) + __field(u32, wait) + ), + TP_fast_assign( + __entry->tstamp = tstamp; + __entry->eventgroup = eventgroup; + __entry->wait = wait; + ), + TP_printk("tstamp:%llu eventgroup:%u wait:%u", + __entry->tstamp, __entry->eventgroup, __entry->wait) +); + +DECLARE_EVENT_CLASS(rtos__eventgroup_clear, + TP_PROTO(u64 tstamp, u32 eventgroup, u32 clear), + TP_ARGS(tstamp, eventgroup, clear), + TP_STRUCT__entry( + __field(u64, tstamp) + __field(u32, eventgroup) + __field(u32, clear) + ), + TP_fast_assign( + __entry->tstamp = tstamp; + __entry->eventgroup = eventgroup; + __entry->clear = clear; + ), + TP_printk("tstamp:%llu eventgroup:%u clear:%u", + __entry->tstamp, __entry->eventgroup, __entry->clear) +); + +DECLARE_EVENT_CLASS(rtos__eventgroup_set, + TP_PROTO(u64 tstamp, u32 eventgroup, u32 set), + TP_ARGS(tstamp, eventgroup, set), + TP_STRUCT__entry( + __field(u64, tstamp) + __field(u32, eventgroup) + __field(u32, set) + ), + TP_fast_assign( + __entry->tstamp = tstamp; + __entry->eventgroup = eventgroup; + __entry->set = set; + ), + TP_printk("tstamp:%llu eventgroup:%u set:%u", + __entry->tstamp, __entry->eventgroup, __entry->set) +); + +DECLARE_EVENT_CLASS(rtos__queue_name, + TP_PROTO(u64 tstamp, u32 queue, u32 name), + TP_ARGS(tstamp, queue, name), + TP_STRUCT__entry( + __field(u64, tstamp) + __field(u32, queue) + __field(u32, name) + ), + TP_fast_assign( + __entry->tstamp = tstamp; + __entry->queue = queue; + __entry->name = name; + ), + TP_printk("tstamp:%llu queue:%u name:0x%08x", + __entry->tstamp, __entry->queue, __entry->name) +); + +/* + * Classes with 3 arguments + */ + +DECLARE_EVENT_CLASS(rtos__ptimer_msgid_value, + TP_PROTO(u64 tstamp, u32 ptimer, u32 msgid, u32 value), + TP_ARGS(tstamp, ptimer, msgid, value), + TP_STRUCT__entry( + __field(u64, tstamp) + __field(u32, ptimer) + __field(u32, msgid) + __field(u32, value) + ), + TP_fast_assign( + __entry->tstamp = tstamp; + __entry->ptimer = ptimer; + __entry->msgid = msgid; + __entry->value = value; + ), + TP_printk("tstamp:%llu timer:0x%08x msgid:%u value:%u", + __entry->tstamp, __entry->ptimer, __entry->msgid, + __entry->value) +); + +DECLARE_EVENT_CLASS(rtos__eventgroup_set_wait, + TP_PROTO(u64 tstamp, u32 eventgroup, u32 set, u32 wait), + TP_ARGS(tstamp, eventgroup, set, wait), + TP_STRUCT__entry( + __field(u64, tstamp) + __field(u32, eventgroup) + __field(u32, set) + __field(u32, wait) + ), + TP_fast_assign( + __entry->tstamp = tstamp; + __entry->eventgroup = eventgroup; + __entry->set = set; + __entry->wait = wait; + ), + TP_printk("tstamp:%llu eventgroup:%u set:%u wait:%u", + __entry->tstamp, __entry->eventgroup, __entry->set, + __entry->wait) +); + +DECLARE_EVENT_CLASS(rtos__eventgroup_wait_timeout, + TP_PROTO(u64 tstamp, u32 eventgroup, u32 wait, u32 timeout), + TP_ARGS(tstamp, eventgroup, wait, timeout), + TP_STRUCT__entry( + __field(u64, tstamp) + __field(u32, eventgroup) + __field(u32, wait) + __field(u32, timeout) + ), + TP_fast_assign( + __entry->tstamp = tstamp; + __entry->eventgroup = eventgroup; + __entry->timeout = timeout; + ), + TP_printk("tstamp:%llu eventgroup:%u wait:%u timeout:%u", + __entry->tstamp, __entry->eventgroup, + __entry->wait, __entry->timeout) +); + +/* + * Classes with 4 arguments + */ + +DECLARE_EVENT_CLASS(rtos__timer_msgid_value_return, + TP_PROTO(u64 tstamp, u32 timer, u32 msgid, u32 value, u32 ret), + TP_ARGS(tstamp, timer, msgid, value, ret), + TP_STRUCT__entry( + __field(u64, tstamp) + __field(u32, timer) + __field(u32, msgid) + __field(u32, value) + __field(u32, ret) + ), + TP_fast_assign( + __entry->tstamp = tstamp; + __entry->timer = timer; + __entry->msgid = msgid; + __entry->value = value; + __entry->ret = ret; + ), + TP_printk("tstamp:%llu timer:%u msgid:%u value:%u return:%u", + __entry->tstamp, __entry->timer, __entry->msgid, + __entry->value, __entry->ret) +); + +DECLARE_EVENT_CLASS(rtos__eventgroup_set_wait_timeout, + TP_PROTO(u64 tstamp, u32 eventgroup, u32 set, u32 wait, u32 timeout), + TP_ARGS(tstamp, eventgroup, set, wait, timeout), + TP_STRUCT__entry( + __field(u64, tstamp) + __field(u32, eventgroup) + __field(u32, set) + __field(u32, wait) + __field(u32, timeout) + ), + TP_fast_assign( + __entry->tstamp = tstamp; + __entry->eventgroup = eventgroup; + __entry->set = set; + __entry->timeout = timeout; + ), + TP_printk("tstamp:%llu eventgroup:%u set:%u wait:%u timeout:%u", + __entry->tstamp, __entry->eventgroup, __entry->set, + __entry->wait, __entry->timeout) +); + +DECLARE_EVENT_CLASS(rtos__function_param1_param2_ret, + TP_PROTO(u64 tstamp, u32 function, u32 param1, u32 param2, u32 ret), + TP_ARGS(tstamp, function, param1, param2, ret), + TP_STRUCT__entry( + __field(u64, tstamp) + __field(u32, function) + __field(u32, param1) + __field(u32, param2) + __field(u32, ret) + ), + TP_fast_assign( + __entry->tstamp = tstamp; + __entry->function = function; + __entry->param1 = param1; + __entry->ret = ret; + ), + TP_printk( + "tstamp:%llu function:0x%08x param1:0x%08x param2:0x%08x ret:%u", + __entry->tstamp, __entry->function, __entry->param1, + __entry->param2, __entry->ret) +); + +/* + * Events + */ + +DEFINE_EVENT(rtos__noarg, rtos_task_switched_in, + TP_PROTO(u64 tstamp), + TP_ARGS(tstamp) +); + +DEFINE_EVENT(rtos__count, rtos_increase_tick_count, + TP_PROTO(u64 tstamp, u32 count), + TP_ARGS(tstamp, count) +); + +DEFINE_EVENT(rtos__noarg, rtos_low_power_idle_begin, + TP_PROTO(u64 tstamp), + TP_ARGS(tstamp) +); + +DEFINE_EVENT(rtos__noarg, rtos_low_power_idle_end, + TP_PROTO(u64 tstamp), + TP_ARGS(tstamp) +); + +DEFINE_EVENT(rtos__noarg, rtos_task_switched_out, + TP_PROTO(u64 tstamp), + TP_ARGS(tstamp) +); + +DEFINE_EVENT(rtos__tcb_priority, rtos_task_priority_inherit, + TP_PROTO(u64 tstamp, u32 tcb, u32 priority), + TP_ARGS(tstamp, tcb, priority) +); + +DEFINE_EVENT(rtos__tcb_priority, rtos_task_priority_disinherit, + TP_PROTO(u64 tstamp, u32 tcb, u32 priority), + TP_ARGS(tstamp, tcb, priority) +); + +DEFINE_EVENT(rtos__queue, rtos_blocking_on_queue_receive, + TP_PROTO(u64 tstamp, u32 queue), + TP_ARGS(tstamp, queue) +); + +DEFINE_EVENT(rtos__queue, rtos_blocking_on_queue_send, + TP_PROTO(u64 tstamp, u32 queue), + TP_ARGS(tstamp, queue) +); + +DEFINE_EVENT(rtos__tcb, rtos_moved_task_to_ready_state, + TP_PROTO(u64 tstamp, u32 tcb), + TP_ARGS(tstamp, tcb) +); + +DEFINE_EVENT(rtos__queue, rtos_queue_create, + TP_PROTO(u64 tstamp, u32 queue), + TP_ARGS(tstamp, queue) +); + +DEFINE_EVENT(rtos__type, rtos_queue_create_failed, + TP_PROTO(u64 tstamp, u32 type), + TP_ARGS(tstamp, type) +); + +DEFINE_EVENT(rtos__queue, rtos_create_mutex, + TP_PROTO(u64 tstamp, u32 queue), + TP_ARGS(tstamp, queue) +); + +DEFINE_EVENT(rtos__noarg, rtos_create_mutex_failed, + TP_PROTO(u64 tstamp), + TP_ARGS(tstamp) +); + +DEFINE_EVENT(rtos__mutex, rtos_give_mutex_recursive, + TP_PROTO(u64 tstamp, u32 mutex), + TP_ARGS(tstamp, mutex) +); + +DEFINE_EVENT(rtos__mutex, rtos_give_mutex_recursive_failed, + TP_PROTO(u64 tstamp, u32 mutex), + TP_ARGS(tstamp, mutex) +); + +DEFINE_EVENT(rtos__mutex, rtos_take_mutex_recursive, + TP_PROTO(u64 tstamp, u32 mutex), + TP_ARGS(tstamp, mutex) +); + +DEFINE_EVENT(rtos__mutex, rtos_take_mutex_recursive_failed, + TP_PROTO(u64 tstamp, u32 mutex), + TP_ARGS(tstamp, mutex) +); + +DEFINE_EVENT(rtos__noarg, rtos_create_counting_semaphore, + TP_PROTO(u64 tstamp), + TP_ARGS(tstamp) +); + +DEFINE_EVENT(rtos__noarg, rtos_create_counting_semaphore_failed, + TP_PROTO(u64 tstamp), + TP_ARGS(tstamp) +); + +DEFINE_EVENT(rtos__queue, rtos_queue_send, + TP_PROTO(u64 tstamp, u32 queue), + TP_ARGS(tstamp, queue) +); + +DEFINE_EVENT(rtos__queue, rtos_queue_send_failed, + TP_PROTO(u64 tstamp, u32 queue), + TP_ARGS(tstamp, queue) +); + +DEFINE_EVENT(rtos__queue, rtos_queue_receive, + TP_PROTO(u64 tstamp, u32 queue), + TP_ARGS(tstamp, queue) +); + +DEFINE_EVENT(rtos__queue, rtos_queue_peek, + TP_PROTO(u64 tstamp, u32 queue), + TP_ARGS(tstamp, queue) +); + +DEFINE_EVENT(rtos__queue, rtos_queue_peek_from_isr, + TP_PROTO(u64 tstamp, u32 queue), + TP_ARGS(tstamp, queue) +); + +DEFINE_EVENT(rtos__queue, rtos_queue_receive_failed, + TP_PROTO(u64 tstamp, u32 queue), + TP_ARGS(tstamp, queue) +); + +DEFINE_EVENT(rtos__queue, rtos_queue_send_from_isr, + TP_PROTO(u64 tstamp, u32 queue), + TP_ARGS(tstamp, queue) +); + +DEFINE_EVENT(rtos__queue, rtos_queue_send_from_isr_failed, + TP_PROTO(u64 tstamp, u32 queue), + TP_ARGS(tstamp, queue) +); + +DEFINE_EVENT(rtos__queue, rtos_queue_receive_from_isr, + TP_PROTO(u64 tstamp, u32 queue), + TP_ARGS(tstamp, queue) +); + +DEFINE_EVENT(rtos__queue, rtos_queue_receive_from_isr_failed, + TP_PROTO(u64 tstamp, u32 queue), + TP_ARGS(tstamp, queue) +); + +DEFINE_EVENT(rtos__queue, rtos_queue_peek_from_isr_failed, + TP_PROTO(u64 tstamp, u32 queue), + TP_ARGS(tstamp, queue) +); + +DEFINE_EVENT(rtos__queue, rtos_queue_delete, + TP_PROTO(u64 tstamp, u32 queue), + TP_ARGS(tstamp, queue) +); + +DEFINE_EVENT(rtos__tcb, rtos_task_create, + TP_PROTO(u64 tstamp, u32 tcb), + TP_ARGS(tstamp, tcb) +); + +DEFINE_EVENT(rtos__noarg, rtos_task_create_failed, + TP_PROTO(u64 tstamp), + TP_ARGS(tstamp) +); + +DEFINE_EVENT(rtos__tcb, rtos_task_delete, + TP_PROTO(u64 tstamp, u32 tcb), + TP_ARGS(tstamp, tcb) +); + +DEFINE_EVENT(rtos__noarg, rtos_task_delay_until, + TP_PROTO(u64 tstamp), + TP_ARGS(tstamp) +); + +DEFINE_EVENT(rtos__noarg, rtos_task_delay, + TP_PROTO(u64 tstamp), + TP_ARGS(tstamp) +); + +DEFINE_EVENT(rtos__tcb_priority, rtos_task_priority_set, + TP_PROTO(u64 tstamp, u32 tcb, u32 priority), + TP_ARGS(tstamp, tcb, priority) +); + +DEFINE_EVENT(rtos__tcb, rtos_task_suspend, + TP_PROTO(u64 tstamp, u32 tcb), + TP_ARGS(tstamp, tcb) +); + +DEFINE_EVENT(rtos__tcb, rtos_task_resume, + TP_PROTO(u64 tstamp, u32 tcb), + TP_ARGS(tstamp, tcb) +); + +DEFINE_EVENT(rtos__tcb, rtos_task_resume_from_isr, + TP_PROTO(u64 tstamp, u32 tcb), + TP_ARGS(tstamp, tcb) +); + +DEFINE_EVENT(rtos__count, rtos_task_increment_tick, + TP_PROTO(u64 tstamp, u32 count), + TP_ARGS(tstamp, count) +); + +DEFINE_EVENT(rtos__timer, rtos_timer_create, + TP_PROTO(u64 tstamp, u32 timer), + TP_ARGS(tstamp, timer) +); + +DEFINE_EVENT(rtos__noarg, rtos_timer_create_failed, + TP_PROTO(u64 tstamp), + TP_ARGS(tstamp) +); + +DEFINE_EVENT(rtos__timer_msgid_value_return, rtos_timer_command_send, + TP_PROTO(u64 tstamp, u32 timer, u32 msgid, u32 value, u32 ret), + TP_ARGS(tstamp, timer, msgid, value, ret) +); + +DEFINE_EVENT(rtos__timer, rtos_timer_expired, + TP_PROTO(u64 tstamp, u32 timer), + TP_ARGS(tstamp, timer) +); + +DEFINE_EVENT(rtos__ptimer_msgid_value, rtos_timer_command_received, + TP_PROTO(u64 tstamp, u32 ptimer, u32 msgid, u32 value), + TP_ARGS(tstamp, ptimer, msgid, value) +); + +DEFINE_EVENT(rtos__addr_size, rtos_malloc, + TP_PROTO(u64 tstamp, u32 addr, u32 size), + TP_ARGS(tstamp, addr, size) +); + +DEFINE_EVENT(rtos__addr_size, rtos_free, + TP_PROTO(u64 tstamp, u32 addr, u32 size), + TP_ARGS(tstamp, addr, size) +); + +DEFINE_EVENT(rtos__eventgroup, rtos_event_group_create, + TP_PROTO(u64 tstamp, u32 eventgroup), + TP_ARGS(tstamp, eventgroup) +); + +DEFINE_EVENT(rtos__noarg, rtos_event_group_create_failed, + TP_PROTO(u64 tstamp), + TP_ARGS(tstamp) +); + +DEFINE_EVENT(rtos__eventgroup_set_wait, rtos_event_group_sync_block, + TP_PROTO(u64 tstamp, u32 eventgroup, u32 set, u32 wait), + TP_ARGS(tstamp, eventgroup, set, wait) +); + +DEFINE_EVENT(rtos__eventgroup_set_wait_timeout, rtos_event_group_sync_end, + TP_PROTO(u64 tstamp, u32 eventgroup, u32 set, u32 wait, u32 timeout), + TP_ARGS(tstamp, eventgroup, set, wait, timeout) +); + +DEFINE_EVENT(rtos__eventgroup_wait, rtos_event_group_wait_bits_block, + TP_PROTO(u64 tstamp, u32 eventgroup, u32 wait), + TP_ARGS(tstamp, eventgroup, wait) +); + +DEFINE_EVENT(rtos__eventgroup_wait_timeout, rtos_event_group_wait_bits_end, + TP_PROTO(u64 tstamp, u32 eventgroup, u32 wait, u32 timeout), + TP_ARGS(tstamp, eventgroup, wait, timeout) +); + +DEFINE_EVENT(rtos__eventgroup_clear, rtos_event_group_clear_bits, + TP_PROTO(u64 tstamp, u32 eventgroup, u32 clear), + TP_ARGS(tstamp, eventgroup, clear) +); + +DEFINE_EVENT(rtos__eventgroup_clear, rtos_event_group_clear_bits_from_isr, + TP_PROTO(u64 tstamp, u32 eventgroup, u32 clear), + TP_ARGS(tstamp, eventgroup, clear) +); + +DEFINE_EVENT(rtos__eventgroup_set, rtos_event_group_set_bits, + TP_PROTO(u64 tstamp, u32 eventgroup, u32 set), + TP_ARGS(tstamp, eventgroup, set) +); + +DEFINE_EVENT(rtos__eventgroup_set, rtos_event_group_set_bits_from_isr, + TP_PROTO(u64 tstamp, u32 eventgroup, u32 set), + TP_ARGS(tstamp, eventgroup, set) +); + +DEFINE_EVENT(rtos__eventgroup, rtos_event_group_delete, + TP_PROTO(u64 tstamp, u32 eventgroup), + TP_ARGS(tstamp, eventgroup) +); + +DEFINE_EVENT(rtos__function_param1_param2_ret, rtos_pend_func_call, + TP_PROTO(u64 tstamp, u32 function, u32 param1, u32 param2, u32 ret), + TP_ARGS(tstamp, function, param1, param2, ret) +); + +DEFINE_EVENT(rtos__function_param1_param2_ret, rtos_pend_func_call_from_isr, + TP_PROTO(u64 tstamp, u32 function, u32 param1, u32 param2, u32 ret), + TP_ARGS(tstamp, function, param1, param2, ret) +); + +DEFINE_EVENT(rtos__queue_name, rtos_queue_registry_add, + TP_PROTO(u64 tstamp, u32 queue, u32 name), + TP_ARGS(tstamp, queue, name) +); + +#endif /* _TRACE_FREERTOS_H_ */ + +#include diff --git a/include/trace/events/tegra_capture.h b/include/trace/events/tegra_capture.h new file mode 100644 index 00000000..40e732d7 --- /dev/null +++ b/include/trace/events/tegra_capture.h @@ -0,0 +1,230 @@ +/* + * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM tegra_capture + +#if !defined(_TRACE_TEGRA_CAPTURE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_TEGRA_CAPTURE_H + +#include + +/* + * Classes + */ +#ifndef IVC_NAME_LEN +#define IVC_NAME_LEN 16 +#endif + +DECLARE_EVENT_CLASS(capture__msg, + TP_PROTO(const char *ivc_name, u32 msg_id, u32 ch_id), + TP_ARGS(ivc_name, msg_id, ch_id), + TP_STRUCT__entry( + __array(char, ivc_name, IVC_NAME_LEN) + __field(u32, msg_id) + __field(u32, ch_id) + ), + TP_fast_assign( + strlcpy(__entry->ivc_name, ivc_name, sizeof(__entry->ivc_name)); + __entry->msg_id = msg_id; + __entry->ch_id = ch_id; + ), + TP_printk("ivc:\"%s\" msg:0x%02x ch:0x%02x", + __entry->ivc_name, + __entry->msg_id, + __entry->ch_id) +); + +/* + * Events for capture and capture control protocol + */ +TRACE_EVENT(capture_ivc_notify, + TP_PROTO(const char *ivc_name), + TP_ARGS(ivc_name), + TP_STRUCT__entry( + __array(char, ivc_name, IVC_NAME_LEN) + ), + TP_fast_assign( + strlcpy(__entry->ivc_name, ivc_name, sizeof(__entry->ivc_name)); + ), + TP_printk("ivc:\"%s\"", __entry->ivc_name) +); + +DEFINE_EVENT(capture__msg, capture_ivc_recv, + TP_PROTO(const char *ivc_name, u32 msg_id, u32 ch_id), + TP_ARGS(ivc_name, msg_id, ch_id) +); + +DEFINE_EVENT(capture__msg, capture_ivc_send, + TP_PROTO(const char *ivc_name, u32 msg_id, u32 ch_id), + TP_ARGS(ivc_name, msg_id, ch_id) +); + +TRACE_EVENT(capture_ivc_send_error, + TP_PROTO(const char *ivc_name, u32 msg_id, u32 ch_id, int err), + TP_ARGS(ivc_name, msg_id, ch_id, err), + TP_STRUCT__entry( + __array(char, ivc_name, IVC_NAME_LEN) + __field(u32, msg_id) + __field(u32, ch_id) + __field(int, err) + ), + TP_fast_assign( + strlcpy(__entry->ivc_name, ivc_name, sizeof(__entry->ivc_name)); + __entry->msg_id = msg_id; + __entry->ch_id = ch_id; + __entry->err = err; + ), + TP_printk("ivc:\"%s\" msg:0x%02x ch:0x%02x: err:%d", + __entry->ivc_name, + __entry->msg_id, + __entry->ch_id, + __entry->err) +); + +/* + * Capture scheduler events from RCE + */ +DECLARE_EVENT_CLASS(capture__progress_event, + TP_PROTO(u64 ts, u32 channel_id, u32 sequence), + TP_ARGS(ts, channel_id, sequence), + TP_STRUCT__entry( + __field(u64, ts) + __field(u32, channel_id) + __field(u32, sequence) + ), + TP_fast_assign( + __entry->ts = ts; + __entry->channel_id = channel_id; + __entry->sequence = sequence; + ), + TP_printk("ts:%llu ch:0x%02x seq:%u", + __entry->ts, + __entry->channel_id, + __entry->sequence) +); + +DECLARE_EVENT_CLASS(capture__isp_event, + TP_PROTO(u64 ts, u32 channel_id, u32 prog_sequence, u32 cap_sequence, + u8 isp_settings_id, u8 vi_channel_id), + TP_ARGS(ts, channel_id, prog_sequence, cap_sequence, isp_settings_id, vi_channel_id), + TP_STRUCT__entry( + __field(u64, ts) + __field(u32, channel_id) + __field(u32, prog_sequence) + __field(u32, cap_sequence) + __field(u8, isp_settings_id) + __field(u8, vi_channel_id) + ), + TP_fast_assign( + __entry->ts = ts; + __entry->channel_id = channel_id; + __entry->prog_sequence = prog_sequence; + ), + TP_printk("ts:%llu ch:0x%02x seq:%u prog:%u set:%u vi:%u", + __entry->ts, + __entry->channel_id, + __entry->cap_sequence, + __entry->prog_sequence, + __entry->isp_settings_id, + __entry->vi_channel_id) +); + +DECLARE_EVENT_CLASS(capture__suspend_event, + TP_PROTO(u64 ts, bool suspend), + TP_ARGS(ts, suspend), + TP_STRUCT__entry( + __field(u64, ts) + __field(bool, suspend) + ), + TP_fast_assign( + __entry->ts = ts; + __entry->suspend = suspend; + ), + TP_printk("ts:%llu suspend:%s", + __entry->ts, + __entry->suspend ? "true" : "false") +); + +DEFINE_EVENT(capture__progress_event, capture_event_sof, + TP_PROTO(u64 ts, u32 channel_id, u32 sequence), + TP_ARGS(ts, channel_id, sequence) +); + +DEFINE_EVENT(capture__progress_event, capture_event_eof, + TP_PROTO(u64 ts, u32 channel_id, u32 sequence), + TP_ARGS(ts, channel_id, sequence) +); + +DEFINE_EVENT(capture__progress_event, capture_event_error, + TP_PROTO(u64 ts, u32 channel_id, u32 sequence), + TP_ARGS(ts, channel_id, sequence) +); + +DEFINE_EVENT(capture__progress_event, capture_event_reschedule, + TP_PROTO(u64 ts, u32 channel_id, u32 sequence), + TP_ARGS(ts, channel_id, sequence) +); + +DEFINE_EVENT(capture__isp_event, capture_event_reschedule_isp, + TP_PROTO(u64 ts, u32 channel_id, u32 prog_sequence, u32 cap_sequence, + u8 isp_settings_id, u8 vi_channel_id), + TP_ARGS(ts, channel_id, prog_sequence, cap_sequence, isp_settings_id, vi_channel_id) +); + +DEFINE_EVENT(capture__isp_event, capture_event_isp_done, + TP_PROTO(u64 ts, u32 channel_id, u32 prog_sequence, u32 cap_sequence, + u8 isp_settings_id, u8 vi_channel_id), + TP_ARGS(ts, channel_id, prog_sequence, cap_sequence, isp_settings_id, vi_channel_id) +); + +DEFINE_EVENT(capture__isp_event, capture_event_isp_error, + TP_PROTO(u64 ts, u32 channel_id, u32 prog_sequence, u32 cap_sequence, + u8 isp_settings_id, u8 vi_channel_id), + TP_ARGS(ts, channel_id, prog_sequence, cap_sequence, isp_settings_id, vi_channel_id) +); + +DEFINE_EVENT(capture__progress_event, capture_event_report_program, + TP_PROTO(u64 ts, u32 channel_id, u32 sequence), + TP_ARGS(ts, channel_id, sequence) +); + +TRACE_EVENT(capture_event_wdt, + TP_PROTO(u64 ts), + TP_ARGS(ts), + TP_STRUCT__entry( + __field(u64, ts) + ), + TP_fast_assign( + __entry->ts = ts; + ), + TP_printk("ts:%llu", + __entry->ts) +); + +DEFINE_EVENT(capture__suspend_event, capture_event_suspend, + TP_PROTO(u64 ts, bool suspend), + TP_ARGS(ts, suspend) +); + +DEFINE_EVENT(capture__suspend_event, capture_event_suspend_isp, + TP_PROTO(u64 ts, bool suspend), + TP_ARGS(ts, suspend) +); + +#endif /* _TRACE_TEGRA_CAPTURE_H */ + +#include diff --git a/include/trace/events/tegra_rtcpu.h b/include/trace/events/tegra_rtcpu.h new file mode 100644 index 00000000..5f5e2022 --- /dev/null +++ b/include/trace/events/tegra_rtcpu.h @@ -0,0 +1,510 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM tegra_rtcpu + +#if !defined(_TRACE_TEGRA_RTCPU_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_TEGRA_RTCPU_H + +#include +#include + +/* + * Classes + */ + +DECLARE_EVENT_CLASS(rtcpu__noarg, + TP_PROTO(u64 tstamp), + TP_ARGS(tstamp), + TP_STRUCT__entry( + __field(u64, tstamp) + ), + TP_fast_assign( + __entry->tstamp = tstamp; + ), + TP_printk("tstamp:%llu", __entry->tstamp) +); + +DECLARE_EVENT_CLASS(rtcpu__arg1, + TP_PROTO(u64 tstamp, u32 data1), + TP_ARGS(tstamp, data1), + TP_STRUCT__entry( + __field(u64, tstamp) + __field(u32, data1) + ), + TP_fast_assign( + __entry->tstamp = tstamp; + __entry->data1 = data1; + ), + TP_printk("tstamp:%llu, data:%u", __entry->tstamp, + __entry->data1) +); + +DECLARE_EVENT_CLASS(rtcpu__dump, + TP_PROTO(u64 tstamp, u32 id, u32 len, void *data), + TP_ARGS(tstamp, id, len, data), + TP_STRUCT__entry( + __field(u64, tstamp) + __field(u32, id) + __field(u32, len) + __dynamic_array(__u8, data, len) + ), + TP_fast_assign( + __entry->tstamp = tstamp; + __entry->id = id; + __entry->len = len; + memcpy(__get_dynamic_array(data), data, len); + ), + TP_printk("tstamp:%llu id:0x%08x len:%u data:%s", + __entry->tstamp, __entry->id, __entry->len, + __print_hex(__get_dynamic_array(data), __entry->len)) +); + +/* + * Unknown events + */ + +DEFINE_EVENT(rtcpu__dump, rtcpu_unknown, + TP_PROTO(u64 tstamp, u32 id, u32 len, void *data), + TP_ARGS(tstamp, id, len, data) +); + +/* + * Non ARRAY event types + */ + +TRACE_EVENT(rtcpu_armv7_exception, + TP_PROTO(u64 tstamp, u32 type), + TP_ARGS(tstamp, type), + TP_STRUCT__entry( + __field(u64, tstamp) + __field(u32, type) + ), + TP_fast_assign( + __entry->tstamp = tstamp; + __entry->type = type; + ), + TP_printk("tstamp:%llu type:%u", __entry->tstamp, __entry->type) +); + +TRACE_EVENT(rtcpu_start, + TP_PROTO(u64 tstamp), + TP_ARGS(tstamp), + TP_STRUCT__entry( + __field(u64, tstamp) + ), + TP_fast_assign( + __entry->tstamp = tstamp; + ), + TP_printk("tstamp:%llu", __entry->tstamp) +); + +#ifndef TEGRA_RTCPU_TRACE_STRING_SIZE +#define TEGRA_RTCPU_TRACE_STRING_SIZE 48 +#endif + +TRACE_EVENT(rtcpu_string, + TP_PROTO(u64 tstamp, u32 id, u32 len, const char *data), + TP_ARGS(tstamp, id, len, data), + TP_STRUCT__entry( + __field(u64, tstamp) + __field(u32, id) + __field(u32, len) + __array(char, data, TEGRA_RTCPU_TRACE_STRING_SIZE) + ), + TP_fast_assign( + __entry->tstamp = tstamp; + __entry->id = id; + __entry->len = len; + strncpy(__entry->data, data, sizeof(__entry->data)); + ), + TP_printk("tstamp:%llu id:0x%08x str:\"%.*s\"", + __entry->tstamp, __entry->id, + (int)__entry->len, __entry->data) +); + +DEFINE_EVENT(rtcpu__dump, rtcpu_bulk, + TP_PROTO(u64 tstamp, u32 id, u32 len, void *data), + TP_ARGS(tstamp, id, len, data) +); + +/* + * Base events + */ + +DEFINE_EVENT(rtcpu__noarg, rtcpu_target_init, + TP_PROTO(u64 tstamp), + TP_ARGS(tstamp) +); + +DEFINE_EVENT(rtcpu__noarg, rtcpu_start_scheduler, + TP_PROTO(u64 tstamp), + TP_ARGS(tstamp) +); + +/* + * Debug interface + */ + +DEFINE_EVENT(rtcpu__arg1, rtcpu_dbg_unknown, + TP_PROTO(u64 tstamp, u32 data1), + TP_ARGS(tstamp, data1) +); + +DEFINE_EVENT(rtcpu__arg1, rtcpu_dbg_enter, + TP_PROTO(u64 tstamp, u32 req_type), + TP_ARGS(tstamp, req_type) +); + +DEFINE_EVENT(rtcpu__noarg, rtcpu_dbg_exit, + TP_PROTO(u64 tstamp), + TP_ARGS(tstamp) +); + +TRACE_EVENT(rtcpu_dbg_set_loglevel, + TP_PROTO(u64 tstamp, u32 old_level, u32 new_level), + TP_ARGS(tstamp, old_level, new_level), + TP_STRUCT__entry( + __field(u64, tstamp) + __field(u32, old_level) + __field(u32, new_level) + ), + TP_fast_assign( + __entry->tstamp = tstamp; + __entry->old_level = old_level; + __entry->new_level = new_level; + ), + TP_printk("tstamp:%llu old:%u new:%u", __entry->tstamp, + __entry->old_level, __entry->new_level) +); + +/* + * Perf events + */ +DECLARE_EVENT_CLASS(rtcpu__perf, + TP_PROTO(u64 tstamp, const struct camrtc_trace_perf_counter_data *perf), + TP_ARGS(tstamp, perf), + TP_STRUCT__entry( + __field(u64, tstamp) + __field_struct(struct camrtc_trace_perf_counter_data, perf) + ), + TP_fast_assign( + __entry->tstamp = tstamp; + if (perf) + __entry->perf = *perf; + else + memset(&__entry->perf, 0, sizeof(*perf)); + ), + TP_printk("ts:%llu name:%.*s cc:%llu e%u:%u e%u:%u e%u:%u", + __entry->tstamp, (int)sizeof(__entry->perf.name), + __entry->perf.name, __entry->perf.cycles, + __entry->perf.events[0], __entry->perf.counters[0], + __entry->perf.events[1], __entry->perf.counters[1], + __entry->perf.events[2], __entry->perf.counters[2]) +); + +DEFINE_EVENT(rtcpu__perf, rtcpu_perf_counters, + TP_PROTO(u64 tstamp, const struct camrtc_trace_perf_counter_data *perf), + TP_ARGS(tstamp, perf) +); + +DEFINE_EVENT(rtcpu__perf, rtcpu_perf_reset, + TP_PROTO(u64 tstamp, const struct camrtc_trace_perf_counter_data *perf), + TP_ARGS(tstamp, perf) +); + + +/* + * VI Notify events + */ + +extern const char * const g_trace_vinotify_tag_strs[]; +extern const unsigned int g_trace_vinotify_tag_str_count; + +TRACE_EVENT(rtcpu_vinotify_event_ts64, + TP_PROTO(u64 tstamp, u8 tag, u32 ch_frame, u64 vi_tstamp, u32 data), + TP_ARGS(tstamp, tag, ch_frame, vi_tstamp, data), + TP_STRUCT__entry( + __field(u64, tstamp) + __field(u8, tag) + __field(u32, ch_frame) + __field(u64, vi_tstamp) + __field(u32, data) + ), + TP_fast_assign( + __entry->tstamp = tstamp; + __entry->tag = tag; + __entry->ch_frame = ch_frame; + __entry->vi_tstamp = vi_tstamp; + __entry->data = data; + ), + TP_printk( + "tstamp:%llu tag:%s channel:0x%02x frame:%u vi_tstamp:%llu data:0x%08x", + __entry->tstamp, + (__entry->tag < g_trace_vinotify_tag_str_count) ? + g_trace_vinotify_tag_strs[__entry->tag] : + __print_hex(&__entry->tag, 1), + (__entry->ch_frame >> 8) & 0xff, + (__entry->ch_frame >> 16) & 0xffff, + __entry->vi_tstamp, __entry->data) +); + +TRACE_EVENT(rtcpu_vinotify_event, + TP_PROTO(u64 tstamp, u32 channel_id, u32 unit, + u32 tag, u32 vi_ts_hi, u32 vi_ts_lo, u32 ext_data, u32 data), + TP_ARGS(tstamp, channel_id, unit, tag, vi_ts_hi, vi_ts_lo, ext_data, data), + TP_STRUCT__entry( + __field(u64, tstamp) + __field(u32, channel_id) + __field(u32, unit) + __field(u8, tag_tag) + __field(u8, tag_channel) + __field(u16, tag_frame) + __field(u64, vi_ts) + __field(u64, data) + ), + TP_fast_assign( + __entry->tstamp = tstamp; + __entry->channel_id = channel_id; + __entry->unit = unit; + __entry->tag_tag = tag & 0xff; + __entry->tag_channel = (tag >> 8) & 0xff; + __entry->tag_frame = (tag >> 16) & 0xffff; + __entry->vi_ts = ((u64)vi_ts_hi << 32) | vi_ts_lo; + __entry->data = ((u64)ext_data << 32) | data; + ), + TP_printk( + "tstamp:%llu cch:%d vi:%u tag:%s channel:0x%02x frame:%u " + "vi_tstamp:%llu data:0x%016llx", + __entry->tstamp, + __entry->channel_id, + __entry->unit, + ((__entry->tag_tag >> 1) < g_trace_vinotify_tag_str_count) ? + g_trace_vinotify_tag_strs[__entry->tag_tag >> 1] : + __print_hex(&__entry->tag_tag, 1), + __entry->tag_channel, __entry->tag_frame, + __entry->vi_ts, __entry->data) +); + +TRACE_EVENT(rtcpu_vinotify_error, + TP_PROTO(u64 tstamp, u32 channel_id, u32 unit, + u32 tag, u32 vi_ts_hi, u32 vi_ts_lo, u32 ext_data, u32 data), + TP_ARGS(tstamp, channel_id, unit, tag, vi_ts_hi, vi_ts_lo, ext_data, data), + TP_STRUCT__entry( + __field(u64, tstamp) + __field(u32, channel_id) + __field(u32, unit) + __field(u8, tag_tag) + __field(u8, tag_channel) + __field(u16, tag_frame) + __field(u64, vi_ts) + __field(u64, data) + ), + TP_fast_assign( + __entry->tstamp = tstamp; + __entry->channel_id = channel_id; + __entry->unit = unit; + __entry->tag_tag = tag & 0xff; + __entry->tag_channel = (tag >> 8) & 0xff; + __entry->tag_frame = (tag >> 16) & 0xffff; + __entry->vi_ts = ((u64)vi_ts_hi << 32) | vi_ts_lo; + __entry->data = ((u64)ext_data << 32) | data; + ), + TP_printk( + "tstamp:%llu cch:%d vi:%u tag:%s channel:0x%02x frame:%u " + "vi_tstamp:%llu data:0x%016llx", + __entry->tstamp, + __entry->channel_id, + __entry->unit, + ((__entry->tag_tag >> 1) < g_trace_vinotify_tag_str_count) ? + g_trace_vinotify_tag_strs[__entry->tag_tag >> 1] : + __print_hex(&__entry->tag_tag, 1), + __entry->tag_channel, __entry->tag_frame, + __entry->vi_ts, __entry->data) +); + +/* + * NVCSI events + */ + +extern const char * const g_trace_nvcsi_intr_class_strs[]; +extern const unsigned int g_trace_nvcsi_intr_class_str_count; + +extern const char * const g_trace_nvcsi_intr_type_strs[]; +extern const unsigned int g_trace_nvcsi_intr_type_str_count; + +TRACE_EVENT(rtcpu_nvcsi_intr, + TP_PROTO(u64 tstamp, u8 intr_class, u8 intr_type, u32 index, + u32 status), + TP_ARGS(tstamp, intr_class, intr_type, index, status), + TP_STRUCT__entry( + __field(u64, tstamp) + __field(u8, intr_class) + __field(u8, intr_type) + __field(u32, index) + __field(u32, status) + ), + TP_fast_assign( + __entry->tstamp = tstamp; + __entry->intr_class = intr_class; + __entry->intr_type = intr_type; + __entry->index = index; + __entry->status = status; + ), + TP_printk( + "tstamp:%llu class:%s type:%s phy:%u cil:%u st:%u vc:%u status:0x%08x", + __entry->tstamp, + (__entry->intr_class < g_trace_nvcsi_intr_class_str_count) ? + g_trace_nvcsi_intr_class_strs[__entry->intr_class] : + __print_hex(&__entry->intr_class, 1), + (__entry->intr_type < g_trace_nvcsi_intr_type_str_count) ? + g_trace_nvcsi_intr_type_strs[__entry->intr_type] : + __print_hex(&__entry->intr_type, 1), + (__entry->index >> 24) & 0xff, + (__entry->index >> 16) & 0xff, + (__entry->index >> 8) & 0xff, + __entry->index & 0xff, + __entry->status) +); + +/* + * ISP events + */ + +TRACE_EVENT(rtcpu_isp_falcon, + TP_PROTO(u8 tag, u8 ch, u8 seq, u32 tstamp, u32 data, u32 ext_data), + TP_ARGS(tag, ch, seq, tstamp, data, ext_data), + TP_STRUCT__entry( + __field(u8, tag) + __field(u8, ch) + __field(u8, seq) + __field(u32, tstamp) + __field(u32, data) + __field(u32, ext_data) + ), + TP_fast_assign( + __entry->tag = tag; + __entry->ch = ch; + __entry->seq = seq; + __entry->tstamp = tstamp; + __entry->data = data; + __entry->ext_data = ext_data; + ), + TP_printk( + "tag:0x%x tstamp:%u ch:%u seq:%u data:0x%08x ext_data:0x%08x", + __entry->tag, __entry->tstamp, __entry->ch, __entry->seq, + __entry->data, __entry->ext_data + ) +); + +extern const char * const g_trace_isp_falcon_task_strs[]; +extern const unsigned int g_trace_isp_falcon_task_str_count; + +TRACE_EVENT(rtcpu_isp_falcon_task_start, + TP_PROTO(u8 ch, u32 tstamp, u32 task), + TP_ARGS(ch, tstamp, task), + TP_STRUCT__entry( + __field(u8, ch) + __field(u32, tstamp) + __field(u32, task) + ), + TP_fast_assign( + __entry->ch = ch; + __entry->tstamp = tstamp; + __entry->task = task; + ), + TP_printk( + "tstamp:%u ch:%u task:%s", + __entry->tstamp, __entry->ch, + (__entry->task < g_trace_isp_falcon_task_str_count) ? + g_trace_isp_falcon_task_strs[__entry->task] : + "UNKNOWN" + ) +); + +TRACE_EVENT(rtcpu_isp_falcon_task_end, + TP_PROTO(u32 tstamp, u32 task), + TP_ARGS(tstamp, task), + TP_STRUCT__entry( + __field(u32, tstamp) + __field(u32, task) + ), + TP_fast_assign( + __entry->tstamp = tstamp; + __entry->task = task; + ), + TP_printk( + "tstamp:%u task:%s", + __entry->tstamp, + (__entry->task < g_trace_isp_falcon_task_str_count) ? + g_trace_isp_falcon_task_strs[__entry->task] : + "UNKNOWN" + ) +); + + +TRACE_EVENT(rtcpu_isp_falcon_tile_start, + TP_PROTO( + u8 ch, u8 seq, u32 tstamp, + u8 tile_x, u8 tile_y, + u16 tile_w, u16 tile_h), + TP_ARGS(ch, seq, tstamp, tile_x, tile_y, tile_w, tile_h), + TP_STRUCT__entry( + __field(u8, ch) + __field(u8, seq) + __field(u32, tstamp) + __field(u8, tile_x) + __field(u8, tile_y) + __field(u16, tile_w) + __field(u16, tile_h) + + ), + TP_fast_assign( + __entry->ch = ch; + __entry->seq = seq; + __entry->tstamp = tstamp; + __entry->tile_x = tile_x; + __entry->tile_y = tile_y; + __entry->tile_w = tile_w; + __entry->tile_h = tile_h; + ), + TP_printk( + "tstamp:%u ch:%u seq:%u tile_x:%u tile_y:%u tile_w:%u tile_h:%u", + __entry->tstamp, __entry->ch, __entry->seq, + __entry->tile_x, __entry->tile_y, + __entry->tile_w, __entry->tile_h + ) +); + +TRACE_EVENT(rtcpu_isp_falcon_tile_end, + TP_PROTO(u8 ch, u8 seq, u32 tstamp, u8 tile_x, u8 tile_y), + TP_ARGS(ch, seq, tstamp, tile_x, tile_y), + TP_STRUCT__entry( + __field(u8, ch) + __field(u8, seq) + __field(u32, tstamp) + __field(u8, tile_x) + __field(u8, tile_y) + + ), + TP_fast_assign( + __entry->ch = ch; + __entry->seq = seq; + __entry->tstamp = tstamp; + __entry->tile_x = tile_x; + __entry->tile_y = tile_y; + ), + TP_printk( + "tstamp:%u ch:%u seq:%u tile_x:%u tile_y:%u", + __entry->tstamp, __entry->ch, __entry->seq, + __entry->tile_x, __entry->tile_y + ) +); + + +#endif /* _TRACE_TEGRA_RTCPU_H */ + +#include